Zhyever commited on
Commit
1f418ff
1 Parent(s): 4ac0a25
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. ControlNet/.gitignore +0 -143
  2. ControlNet/LICENSE +0 -201
  3. ControlNet/README.md +0 -348
  4. ControlNet/annotator/canny/__init__.py +0 -6
  5. ControlNet/annotator/ckpts/ckpts.txt +0 -1
  6. ControlNet/annotator/hed/__init__.py +0 -96
  7. ControlNet/annotator/midas/LICENSE +0 -21
  8. ControlNet/annotator/midas/__init__.py +0 -42
  9. ControlNet/annotator/midas/api.py +0 -169
  10. ControlNet/annotator/midas/midas/__init__.py +0 -0
  11. ControlNet/annotator/midas/midas/base_model.py +0 -16
  12. ControlNet/annotator/midas/midas/dpt_depth.py +0 -109
  13. ControlNet/annotator/midas/midas/midas_net.py +0 -76
  14. ControlNet/annotator/midas/midas/midas_net_custom.py +0 -128
  15. ControlNet/annotator/midas/midas/transforms.py +0 -234
  16. ControlNet/annotator/midas/midas/vit.py +0 -491
  17. ControlNet/annotator/midas/utils.py +0 -189
  18. ControlNet/annotator/mlsd/LICENSE +0 -201
  19. ControlNet/annotator/mlsd/__init__.py +0 -43
  20. ControlNet/annotator/mlsd/models/mbv2_mlsd_large.py +0 -292
  21. ControlNet/annotator/mlsd/models/mbv2_mlsd_tiny.py +0 -275
  22. ControlNet/annotator/mlsd/utils.py +0 -580
  23. ControlNet/annotator/openpose/LICENSE +0 -108
  24. ControlNet/annotator/openpose/__init__.py +0 -49
  25. ControlNet/annotator/openpose/body.py +0 -219
  26. ControlNet/annotator/openpose/hand.py +0 -86
  27. ControlNet/annotator/openpose/model.py +0 -219
  28. ControlNet/annotator/openpose/util.py +0 -164
  29. ControlNet/annotator/uniformer/LICENSE +0 -203
  30. ControlNet/annotator/uniformer/__init__.py +0 -27
  31. ControlNet/annotator/uniformer/configs/_base_/default_runtime.py +0 -14
  32. ControlNet/annotator/uniformer/configs/_base_/models/ann_r50-d8.py +0 -46
  33. ControlNet/annotator/uniformer/configs/_base_/models/apcnet_r50-d8.py +0 -44
  34. ControlNet/annotator/uniformer/configs/_base_/models/ccnet_r50-d8.py +0 -44
  35. ControlNet/annotator/uniformer/configs/_base_/models/cgnet.py +0 -35
  36. ControlNet/annotator/uniformer/configs/_base_/models/danet_r50-d8.py +0 -44
  37. ControlNet/annotator/uniformer/configs/_base_/models/deeplabv3_r50-d8.py +0 -44
  38. ControlNet/annotator/uniformer/configs/_base_/models/deeplabv3_unet_s5-d16.py +0 -50
  39. ControlNet/annotator/uniformer/configs/_base_/models/deeplabv3plus_r50-d8.py +0 -46
  40. ControlNet/annotator/uniformer/configs/_base_/models/dmnet_r50-d8.py +0 -44
  41. ControlNet/annotator/uniformer/configs/_base_/models/dnl_r50-d8.py +0 -46
  42. ControlNet/annotator/uniformer/configs/_base_/models/emanet_r50-d8.py +0 -47
  43. ControlNet/annotator/uniformer/configs/_base_/models/encnet_r50-d8.py +0 -48
  44. ControlNet/annotator/uniformer/configs/_base_/models/fast_scnn.py +0 -57
  45. ControlNet/annotator/uniformer/configs/_base_/models/fcn_hr18.py +0 -52
  46. ControlNet/annotator/uniformer/configs/_base_/models/fcn_r50-d8.py +0 -45
  47. ControlNet/annotator/uniformer/configs/_base_/models/fcn_unet_s5-d16.py +0 -51
  48. ControlNet/annotator/uniformer/configs/_base_/models/fpn_r50.py +0 -36
  49. ControlNet/annotator/uniformer/configs/_base_/models/fpn_uniformer.py +0 -35
  50. ControlNet/annotator/uniformer/configs/_base_/models/gcnet_r50-d8.py +0 -46
ControlNet/.gitignore DELETED
@@ -1,143 +0,0 @@
1
- .idea/
2
-
3
- training/
4
- lightning_logs/
5
- image_log/
6
-
7
- *.pth
8
- *.pt
9
- *.ckpt
10
- *.safetensors
11
-
12
- gradio_pose2image_private.py
13
- gradio_canny2image_private.py
14
-
15
- # Byte-compiled / optimized / DLL files
16
- __pycache__/
17
- *.py[cod]
18
- *$py.class
19
-
20
- # C extensions
21
- *.so
22
-
23
- # Distribution / packaging
24
- .Python
25
- build/
26
- develop-eggs/
27
- dist/
28
- downloads/
29
- eggs/
30
- .eggs/
31
- lib/
32
- lib64/
33
- parts/
34
- sdist/
35
- var/
36
- wheels/
37
- pip-wheel-metadata/
38
- share/python-wheels/
39
- *.egg-info/
40
- .installed.cfg
41
- *.egg
42
- MANIFEST
43
-
44
- # PyInstaller
45
- # Usually these files are written by a python script from a template
46
- # before PyInstaller builds the exe, so as to inject date/other infos into it.
47
- *.manifest
48
- *.spec
49
-
50
- # Installer logs
51
- pip-log.txt
52
- pip-delete-this-directory.txt
53
-
54
- # Unit test / coverage reports
55
- htmlcov/
56
- .tox/
57
- .nox/
58
- .coverage
59
- .coverage.*
60
- .cache
61
- nosetests.xml
62
- coverage.xml
63
- *.cover
64
- *.py,cover
65
- .hypothesis/
66
- .pytest_cache/
67
-
68
- # Translations
69
- *.mo
70
- *.pot
71
-
72
- # Django stuff:
73
- *.log
74
- local_settings.py
75
- db.sqlite3
76
- db.sqlite3-journal
77
-
78
- # Flask stuff:
79
- instance/
80
- .webassets-cache
81
-
82
- # Scrapy stuff:
83
- .scrapy
84
-
85
- # Sphinx documentation
86
- docs/_build/
87
-
88
- # PyBuilder
89
- target/
90
-
91
- # Jupyter Notebook
92
- .ipynb_checkpoints
93
-
94
- # IPython
95
- profile_default/
96
- ipython_config.py
97
-
98
- # pyenv
99
- .python-version
100
-
101
- # pipenv
102
- # According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control.
103
- # However, in case of collaboration, if having platform-specific dependencies or dependencies
104
- # having no cross-platform support, pipenv may install dependencies that don't work, or not
105
- # install all needed dependencies.
106
- #Pipfile.lock
107
-
108
- # PEP 582; used by e.g. github.com/David-OConnor/pyflow
109
- __pypackages__/
110
-
111
- # Celery stuff
112
- celerybeat-schedule
113
- celerybeat.pid
114
-
115
- # SageMath parsed files
116
- *.sage.py
117
-
118
- # Environments
119
- .env
120
- .venv
121
- env/
122
- venv/
123
- ENV/
124
- env.bak/
125
- venv.bak/
126
-
127
- # Spyder project settings
128
- .spyderproject
129
- .spyproject
130
-
131
- # Rope project settings
132
- .ropeproject
133
-
134
- # mkdocs documentation
135
- /site
136
-
137
- # mypy
138
- .mypy_cache/
139
- .dmypy.json
140
- dmypy.json
141
-
142
- # Pyre type checker
143
- .pyre/
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
ControlNet/LICENSE DELETED
@@ -1,201 +0,0 @@
1
- Apache License
2
- Version 2.0, January 2004
3
- http://www.apache.org/licenses/
4
-
5
- TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
6
-
7
- 1. Definitions.
8
-
9
- "License" shall mean the terms and conditions for use, reproduction,
10
- and distribution as defined by Sections 1 through 9 of this document.
11
-
12
- "Licensor" shall mean the copyright owner or entity authorized by
13
- the copyright owner that is granting the License.
14
-
15
- "Legal Entity" shall mean the union of the acting entity and all
16
- other entities that control, are controlled by, or are under common
17
- control with that entity. For the purposes of this definition,
18
- "control" means (i) the power, direct or indirect, to cause the
19
- direction or management of such entity, whether by contract or
20
- otherwise, or (ii) ownership of fifty percent (50%) or more of the
21
- outstanding shares, or (iii) beneficial ownership of such entity.
22
-
23
- "You" (or "Your") shall mean an individual or Legal Entity
24
- exercising permissions granted by this License.
25
-
26
- "Source" form shall mean the preferred form for making modifications,
27
- including but not limited to software source code, documentation
28
- source, and configuration files.
29
-
30
- "Object" form shall mean any form resulting from mechanical
31
- transformation or translation of a Source form, including but
32
- not limited to compiled object code, generated documentation,
33
- and conversions to other media types.
34
-
35
- "Work" shall mean the work of authorship, whether in Source or
36
- Object form, made available under the License, as indicated by a
37
- copyright notice that is included in or attached to the work
38
- (an example is provided in the Appendix below).
39
-
40
- "Derivative Works" shall mean any work, whether in Source or Object
41
- form, that is based on (or derived from) the Work and for which the
42
- editorial revisions, annotations, elaborations, or other modifications
43
- represent, as a whole, an original work of authorship. For the purposes
44
- of this License, Derivative Works shall not include works that remain
45
- separable from, or merely link (or bind by name) to the interfaces of,
46
- the Work and Derivative Works thereof.
47
-
48
- "Contribution" shall mean any work of authorship, including
49
- the original version of the Work and any modifications or additions
50
- to that Work or Derivative Works thereof, that is intentionally
51
- submitted to Licensor for inclusion in the Work by the copyright owner
52
- or by an individual or Legal Entity authorized to submit on behalf of
53
- the copyright owner. For the purposes of this definition, "submitted"
54
- means any form of electronic, verbal, or written communication sent
55
- to the Licensor or its representatives, including but not limited to
56
- communication on electronic mailing lists, source code control systems,
57
- and issue tracking systems that are managed by, or on behalf of, the
58
- Licensor for the purpose of discussing and improving the Work, but
59
- excluding communication that is conspicuously marked or otherwise
60
- designated in writing by the copyright owner as "Not a Contribution."
61
-
62
- "Contributor" shall mean Licensor and any individual or Legal Entity
63
- on behalf of whom a Contribution has been received by Licensor and
64
- subsequently incorporated within the Work.
65
-
66
- 2. Grant of Copyright License. Subject to the terms and conditions of
67
- this License, each Contributor hereby grants to You a perpetual,
68
- worldwide, non-exclusive, no-charge, royalty-free, irrevocable
69
- copyright license to reproduce, prepare Derivative Works of,
70
- publicly display, publicly perform, sublicense, and distribute the
71
- Work and such Derivative Works in Source or Object form.
72
-
73
- 3. Grant of Patent License. Subject to the terms and conditions of
74
- this License, each Contributor hereby grants to You a perpetual,
75
- worldwide, non-exclusive, no-charge, royalty-free, irrevocable
76
- (except as stated in this section) patent license to make, have made,
77
- use, offer to sell, sell, import, and otherwise transfer the Work,
78
- where such license applies only to those patent claims licensable
79
- by such Contributor that are necessarily infringed by their
80
- Contribution(s) alone or by combination of their Contribution(s)
81
- with the Work to which such Contribution(s) was submitted. If You
82
- institute patent litigation against any entity (including a
83
- cross-claim or counterclaim in a lawsuit) alleging that the Work
84
- or a Contribution incorporated within the Work constitutes direct
85
- or contributory patent infringement, then any patent licenses
86
- granted to You under this License for that Work shall terminate
87
- as of the date such litigation is filed.
88
-
89
- 4. Redistribution. You may reproduce and distribute copies of the
90
- Work or Derivative Works thereof in any medium, with or without
91
- modifications, and in Source or Object form, provided that You
92
- meet the following conditions:
93
-
94
- (a) You must give any other recipients of the Work or
95
- Derivative Works a copy of this License; and
96
-
97
- (b) You must cause any modified files to carry prominent notices
98
- stating that You changed the files; and
99
-
100
- (c) You must retain, in the Source form of any Derivative Works
101
- that You distribute, all copyright, patent, trademark, and
102
- attribution notices from the Source form of the Work,
103
- excluding those notices that do not pertain to any part of
104
- the Derivative Works; and
105
-
106
- (d) If the Work includes a "NOTICE" text file as part of its
107
- distribution, then any Derivative Works that You distribute must
108
- include a readable copy of the attribution notices contained
109
- within such NOTICE file, excluding those notices that do not
110
- pertain to any part of the Derivative Works, in at least one
111
- of the following places: within a NOTICE text file distributed
112
- as part of the Derivative Works; within the Source form or
113
- documentation, if provided along with the Derivative Works; or,
114
- within a display generated by the Derivative Works, if and
115
- wherever such third-party notices normally appear. The contents
116
- of the NOTICE file are for informational purposes only and
117
- do not modify the License. You may add Your own attribution
118
- notices within Derivative Works that You distribute, alongside
119
- or as an addendum to the NOTICE text from the Work, provided
120
- that such additional attribution notices cannot be construed
121
- as modifying the License.
122
-
123
- You may add Your own copyright statement to Your modifications and
124
- may provide additional or different license terms and conditions
125
- for use, reproduction, or distribution of Your modifications, or
126
- for any such Derivative Works as a whole, provided Your use,
127
- reproduction, and distribution of the Work otherwise complies with
128
- the conditions stated in this License.
129
-
130
- 5. Submission of Contributions. Unless You explicitly state otherwise,
131
- any Contribution intentionally submitted for inclusion in the Work
132
- by You to the Licensor shall be under the terms and conditions of
133
- this License, without any additional terms or conditions.
134
- Notwithstanding the above, nothing herein shall supersede or modify
135
- the terms of any separate license agreement you may have executed
136
- with Licensor regarding such Contributions.
137
-
138
- 6. Trademarks. This License does not grant permission to use the trade
139
- names, trademarks, service marks, or product names of the Licensor,
140
- except as required for reasonable and customary use in describing the
141
- origin of the Work and reproducing the content of the NOTICE file.
142
-
143
- 7. Disclaimer of Warranty. Unless required by applicable law or
144
- agreed to in writing, Licensor provides the Work (and each
145
- Contributor provides its Contributions) on an "AS IS" BASIS,
146
- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
147
- implied, including, without limitation, any warranties or conditions
148
- of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
149
- PARTICULAR PURPOSE. You are solely responsible for determining the
150
- appropriateness of using or redistributing the Work and assume any
151
- risks associated with Your exercise of permissions under this License.
152
-
153
- 8. Limitation of Liability. In no event and under no legal theory,
154
- whether in tort (including negligence), contract, or otherwise,
155
- unless required by applicable law (such as deliberate and grossly
156
- negligent acts) or agreed to in writing, shall any Contributor be
157
- liable to You for damages, including any direct, indirect, special,
158
- incidental, or consequential damages of any character arising as a
159
- result of this License or out of the use or inability to use the
160
- Work (including but not limited to damages for loss of goodwill,
161
- work stoppage, computer failure or malfunction, or any and all
162
- other commercial damages or losses), even if such Contributor
163
- has been advised of the possibility of such damages.
164
-
165
- 9. Accepting Warranty or Additional Liability. While redistributing
166
- the Work or Derivative Works thereof, You may choose to offer,
167
- and charge a fee for, acceptance of support, warranty, indemnity,
168
- or other liability obligations and/or rights consistent with this
169
- License. However, in accepting such obligations, You may act only
170
- on Your own behalf and on Your sole responsibility, not on behalf
171
- of any other Contributor, and only if You agree to indemnify,
172
- defend, and hold each Contributor harmless for any liability
173
- incurred by, or claims asserted against, such Contributor by reason
174
- of your accepting any such warranty or additional liability.
175
-
176
- END OF TERMS AND CONDITIONS
177
-
178
- APPENDIX: How to apply the Apache License to your work.
179
-
180
- To apply the Apache License to your work, attach the following
181
- boilerplate notice, with the fields enclosed by brackets "[]"
182
- replaced with your own identifying information. (Don't include
183
- the brackets!) The text should be enclosed in the appropriate
184
- comment syntax for the file format. We also recommend that a
185
- file or class name and description of purpose be included on the
186
- same "printed page" as the copyright notice for easier
187
- identification within third-party archives.
188
-
189
- Copyright [yyyy] [name of copyright owner]
190
-
191
- Licensed under the Apache License, Version 2.0 (the "License");
192
- you may not use this file except in compliance with the License.
193
- You may obtain a copy of the License at
194
-
195
- http://www.apache.org/licenses/LICENSE-2.0
196
-
197
- Unless required by applicable law or agreed to in writing, software
198
- distributed under the License is distributed on an "AS IS" BASIS,
199
- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
200
- See the License for the specific language governing permissions and
201
- limitations under the License.
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
ControlNet/README.md DELETED
@@ -1,348 +0,0 @@
1
- # News: A nightly version of ControlNet 1.1 is released!
2
-
3
- [ControlNet 1.1](https://github.com/lllyasviel/ControlNet-v1-1-nightly) is released. Those new models will be merged to this repo after we make sure that everything is good.
4
-
5
- # Below is ControlNet 1.0
6
-
7
- Official implementation of [Adding Conditional Control to Text-to-Image Diffusion Models](https://arxiv.org/abs/2302.05543).
8
-
9
- ControlNet is a neural network structure to control diffusion models by adding extra conditions.
10
-
11
- ![img](github_page/he.png)
12
-
13
- It copys the weights of neural network blocks into a "locked" copy and a "trainable" copy.
14
-
15
- The "trainable" one learns your condition. The "locked" one preserves your model.
16
-
17
- Thanks to this, training with small dataset of image pairs will not destroy the production-ready diffusion models.
18
-
19
- The "zero convolution" is 1×1 convolution with both weight and bias initialized as zeros.
20
-
21
- Before training, all zero convolutions output zeros, and ControlNet will not cause any distortion.
22
-
23
- No layer is trained from scratch. You are still fine-tuning. Your original model is safe.
24
-
25
- This allows training on small-scale or even personal devices.
26
-
27
- This is also friendly to merge/replacement/offsetting of models/weights/blocks/layers.
28
-
29
- ### FAQ
30
-
31
- **Q:** But wait, if the weight of a conv layer is zero, the gradient will also be zero, and the network will not learn anything. Why "zero convolution" works?
32
-
33
- **A:** This is not true. [See an explanation here](docs/faq.md).
34
-
35
- # Stable Diffusion + ControlNet
36
-
37
- By repeating the above simple structure 14 times, we can control stable diffusion in this way:
38
-
39
- ![img](github_page/sd.png)
40
-
41
- In this way, the ControlNet can **reuse** the SD encoder as a **deep, strong, robust, and powerful backbone** to learn diverse controls. Many evidences (like [this](https://jerryxu.net/ODISE/) and [this](https://vpd.ivg-research.xyz/)) validate that the SD encoder is an excellent backbone.
42
-
43
- Note that the way we connect layers is computational efficient. The original SD encoder does not need to store gradients (the locked original SD Encoder Block 1234 and Middle). The required GPU memory is not much larger than original SD, although many layers are added. Great!
44
-
45
- # Features & News
46
-
47
- 2023/0/14 - We released [ControlNet 1.1](https://github.com/lllyasviel/ControlNet-v1-1-nightly). Those new models will be merged to this repo after we make sure that everything is good.
48
-
49
- 2023/03/03 - We released a discussion - [Precomputed ControlNet: Speed up ControlNet by 45%, but is it necessary?](https://github.com/lllyasviel/ControlNet/discussions/216)
50
-
51
- 2023/02/26 - We released a blog - [Ablation Study: Why ControlNets use deep encoder? What if it was lighter? Or even an MLP?](https://github.com/lllyasviel/ControlNet/discussions/188)
52
-
53
- 2023/02/20 - Implementation for non-prompt mode released. See also [Guess Mode / Non-Prompt Mode](#guess-anchor).
54
-
55
- 2023/02/12 - Now you can play with any community model by [Transferring the ControlNet](https://github.com/lllyasviel/ControlNet/discussions/12).
56
-
57
- 2023/02/11 - [Low VRAM mode](docs/low_vram.md) is added. Please use this mode if you are using 8GB GPU(s) or if you want larger batch size.
58
-
59
- # Production-Ready Pretrained Models
60
-
61
- First create a new conda environment
62
-
63
- conda env create -f environment.yaml
64
- conda activate control
65
-
66
- All models and detectors can be downloaded from [our Hugging Face page](https://huggingface.co/lllyasviel/ControlNet). Make sure that SD models are put in "ControlNet/models" and detectors are put in "ControlNet/annotator/ckpts". Make sure that you download all necessary pretrained weights and detector models from that Hugging Face page, including HED edge detection model, Midas depth estimation model, Openpose, and so on.
67
-
68
- We provide 9 Gradio apps with these models.
69
-
70
- All test images can be found at the folder "test_imgs".
71
-
72
- ## ControlNet with Canny Edge
73
-
74
- Stable Diffusion 1.5 + ControlNet (using simple Canny edge detection)
75
-
76
- python gradio_canny2image.py
77
-
78
- The Gradio app also allows you to change the Canny edge thresholds. Just try it for more details.
79
-
80
- Prompt: "bird"
81
- ![p](github_page/p1.png)
82
-
83
- Prompt: "cute dog"
84
- ![p](github_page/p2.png)
85
-
86
- ## ControlNet with M-LSD Lines
87
-
88
- Stable Diffusion 1.5 + ControlNet (using simple M-LSD straight line detection)
89
-
90
- python gradio_hough2image.py
91
-
92
- The Gradio app also allows you to change the M-LSD thresholds. Just try it for more details.
93
-
94
- Prompt: "room"
95
- ![p](github_page/p3.png)
96
-
97
- Prompt: "building"
98
- ![p](github_page/p4.png)
99
-
100
- ## ControlNet with HED Boundary
101
-
102
- Stable Diffusion 1.5 + ControlNet (using soft HED Boundary)
103
-
104
- python gradio_hed2image.py
105
-
106
- The soft HED Boundary will preserve many details in input images, making this app suitable for recoloring and stylizing. Just try it for more details.
107
-
108
- Prompt: "oil painting of handsome old man, masterpiece"
109
- ![p](github_page/p5.png)
110
-
111
- Prompt: "Cyberpunk robot"
112
- ![p](github_page/p6.png)
113
-
114
- ## ControlNet with User Scribbles
115
-
116
- Stable Diffusion 1.5 + ControlNet (using Scribbles)
117
-
118
- python gradio_scribble2image.py
119
-
120
- Note that the UI is based on Gradio, and Gradio is somewhat difficult to customize. Right now you need to draw scribbles outside the UI (using your favorite drawing software, for example, MS Paint) and then import the scribble image to Gradio.
121
-
122
- Prompt: "turtle"
123
- ![p](github_page/p7.png)
124
-
125
- Prompt: "hot air balloon"
126
- ![p](github_page/p8.png)
127
-
128
- ### Interactive Interface
129
-
130
- We actually provide an interactive interface
131
-
132
- python gradio_scribble2image_interactive.py
133
-
134
- ~~However, because gradio is very [buggy](https://github.com/gradio-app/gradio/issues/3166) and difficult to customize, right now, user need to first set canvas width and heights and then click "Open drawing canvas" to get a drawing area. Please do not upload image to that drawing canvas. Also, the drawing area is very small; it should be bigger. But I failed to find out how to make it larger. Again, gradio is really buggy.~~ (Now fixed, will update asap)
135
-
136
- The below dog sketch is drawn by me. Perhaps we should draw a better dog for showcase.
137
-
138
- Prompt: "dog in a room"
139
- ![p](github_page/p20.png)
140
-
141
- ## ControlNet with Fake Scribbles
142
-
143
- Stable Diffusion 1.5 + ControlNet (using fake scribbles)
144
-
145
- python gradio_fake_scribble2image.py
146
-
147
- Sometimes we are lazy, and we do not want to draw scribbles. This script use the exactly same scribble-based model but use a simple algorithm to synthesize scribbles from input images.
148
-
149
- Prompt: "bag"
150
- ![p](github_page/p9.png)
151
-
152
- Prompt: "shose" (Note that "shose" is a typo; it should be "shoes". But it still seems to work.)
153
- ![p](github_page/p10.png)
154
-
155
- ## ControlNet with Human Pose
156
-
157
- Stable Diffusion 1.5 + ControlNet (using human pose)
158
-
159
- python gradio_pose2image.py
160
-
161
- Apparently, this model deserves a better UI to directly manipulate pose skeleton. However, again, Gradio is somewhat difficult to customize. Right now you need to input an image and then the Openpose will detect the pose for you.
162
-
163
- Prompt: "Chief in the kitchen"
164
- ![p](github_page/p11.png)
165
-
166
- Prompt: "An astronaut on the moon"
167
- ![p](github_page/p12.png)
168
-
169
- ## ControlNet with Semantic Segmentation
170
-
171
- Stable Diffusion 1.5 + ControlNet (using semantic segmentation)
172
-
173
- python gradio_seg2image.py
174
-
175
- This model use ADE20K's segmentation protocol. Again, this model deserves a better UI to directly draw the segmentations. However, again, Gradio is somewhat difficult to customize. Right now you need to input an image and then a model called Uniformer will detect the segmentations for you. Just try it for more details.
176
-
177
- Prompt: "House"
178
- ![p](github_page/p13.png)
179
-
180
- Prompt: "River"
181
- ![p](github_page/p14.png)
182
-
183
- ## ControlNet with Depth
184
-
185
- Stable Diffusion 1.5 + ControlNet (using depth map)
186
-
187
- python gradio_depth2image.py
188
-
189
- Great! Now SD 1.5 also have a depth control. FINALLY. So many possibilities (considering SD1.5 has much more community models than SD2).
190
-
191
- Note that different from Stability's model, the ControlNet receive the full 512×512 depth map, rather than 64×64 depth. Note that Stability's SD2 depth model use 64*64 depth maps. This means that the ControlNet will preserve more details in the depth map.
192
-
193
- This is always a strength because if users do not want to preserve more details, they can simply use another SD to post-process an i2i. But if they want to preserve more details, ControlNet becomes their only choice. Again, SD2 uses 64×64 depth, we use 512×512.
194
-
195
- Prompt: "Stormtrooper's lecture"
196
- ![p](github_page/p15.png)
197
-
198
- ## ControlNet with Normal Map
199
-
200
- Stable Diffusion 1.5 + ControlNet (using normal map)
201
-
202
- python gradio_normal2image.py
203
-
204
- This model use normal map. Rightnow in the APP, the normal is computed from the midas depth map and a user threshold (to determine how many area is background with identity normal face to viewer, tune the "Normal background threshold" in the gradio app to get a feeling).
205
-
206
- Prompt: "Cute toy"
207
- ![p](github_page/p17.png)
208
-
209
- Prompt: "Plaster statue of Abraham Lincoln"
210
- ![p](github_page/p18.png)
211
-
212
- Compared to depth model, this model seems to be a bit better at preserving the geometry. This is intuitive: minor details are not salient in depth maps, but are salient in normal maps. Below is the depth result with same inputs. You can see that the hairstyle of the man in the input image is modified by depth model, but preserved by the normal model.
213
-
214
- Prompt: "Plaster statue of Abraham Lincoln"
215
- ![p](github_page/p19.png)
216
-
217
- ## ControlNet with Anime Line Drawing
218
-
219
- We also trained a relatively simple ControlNet for anime line drawings. This tool may be useful for artistic creations. (Although the image details in the results is a bit modified, since it still diffuse latent images.)
220
-
221
- This model is not available right now. We need to evaluate the potential risks before releasing this model. Nevertheless, you may be interested in [transferring the ControlNet to any community model](https://github.com/lllyasviel/ControlNet/discussions/12).
222
-
223
- ![p](github_page/p21.png)
224
-
225
- <a id="guess-anchor"></a>
226
-
227
- # Guess Mode / Non-Prompt Mode
228
-
229
- The "guess mode" (or called non-prompt mode) will completely unleash all the power of the very powerful ControlNet encoder.
230
-
231
- See also the blog - [Ablation Study: Why ControlNets use deep encoder? What if it was lighter? Or even an MLP?](https://github.com/lllyasviel/ControlNet/discussions/188)
232
-
233
- You need to manually check the "Guess Mode" toggle to enable this mode.
234
-
235
- In this mode, the ControlNet encoder will try best to recognize the content of the input control map, like depth map, edge map, scribbles, etc, even if you remove all prompts.
236
-
237
- **Let's have fun with some very challenging experimental settings!**
238
-
239
- **No prompts. No "positive" prompts. No "negative" prompts. No extra caption detector. One single diffusion loop.**
240
-
241
- For this mode, we recommend to use 50 steps and guidance scale between 3 and 5.
242
-
243
- ![p](github_page/uc2a.png)
244
-
245
- No prompts:
246
-
247
- ![p](github_page/uc2b.png)
248
-
249
- Note that the below example is 768×768. No prompts. No "positive" prompts. No "negative" prompts.
250
-
251
- ![p](github_page/uc1.png)
252
-
253
- By tuning the parameters, you can get some very intereting results like below:
254
-
255
- ![p](github_page/uc3.png)
256
-
257
- Because no prompt is available, the ControlNet encoder will "guess" what is in the control map. Sometimes the guess result is really interesting. Because diffusion algorithm can essentially give multiple results, the ControlNet seems able to give multiple guesses, like this:
258
-
259
- ![p](github_page/uc4.png)
260
-
261
- Without prompt, the HED seems good at generating images look like paintings when the control strength is relatively low:
262
-
263
- ![p](github_page/uc6.png)
264
-
265
- The Guess Mode is also supported in [WebUI Plugin](https://github.com/Mikubill/sd-webui-controlnet):
266
-
267
- ![p](github_page/uci1.png)
268
-
269
- No prompts. Default WebUI parameters. Pure random results with the seed being 12345. Standard SD1.5. Input scribble is in "test_imgs" folder to reproduce.
270
-
271
- ![p](github_page/uci2.png)
272
-
273
- Below is another challenging example:
274
-
275
- ![p](github_page/uci3.png)
276
-
277
- No prompts. Default WebUI parameters. Pure random results with the seed being 12345. Standard SD1.5. Input scribble is in "test_imgs" folder to reproduce.
278
-
279
- ![p](github_page/uci4.png)
280
-
281
- Note that in the guess mode, you will still be able to input prompts. The only difference is that the model will "try harder" to guess what is in the control map even if you do not provide the prompt. Just try it yourself!
282
-
283
- Besides, if you write some scripts (like BLIP) to generate image captions from the "guess mode" images, and then use the generated captions as prompts to diffuse again, you will get a SOTA pipeline for fully automatic conditional image generating.
284
-
285
- # Combining Multiple ControlNets
286
-
287
- ControlNets are composable: more than one ControlNet can be easily composed to multi-condition control.
288
-
289
- Right now this feature is in experimental stage in the [Mikubill' A1111 Webui Plugin](https://github.com/Mikubill/sd-webui-controlnet):
290
-
291
- ![p](github_page/multi2.png)
292
-
293
- ![p](github_page/multi.png)
294
-
295
- As long as the models are controlling the same SD, the "boundary" between different research projects does not even exist. This plugin also allows different methods to work together!
296
-
297
- # Use ControlNet in Any Community Model (SD1.X)
298
-
299
- This is an experimental feature.
300
-
301
- [See the steps here](https://github.com/lllyasviel/ControlNet/discussions/12).
302
-
303
- Or you may want to use the [Mikubill' A1111 Webui Plugin](https://github.com/Mikubill/sd-webui-controlnet) which is plug-and-play and does not need manual merging.
304
-
305
- # Annotate Your Own Data
306
-
307
- We provide simple python scripts to process images.
308
-
309
- [See a gradio example here](docs/annotator.md).
310
-
311
- # Train with Your Own Data
312
-
313
- Training a ControlNet is as easy as (or even easier than) training a simple pix2pix.
314
-
315
- [See the steps here](docs/train.md).
316
-
317
- # Related Resources
318
-
319
- Special Thank to the great project - [Mikubill' A1111 Webui Plugin](https://github.com/Mikubill/sd-webui-controlnet) !
320
-
321
- We also thank Hysts for making [Hugging Face Space](https://huggingface.co/spaces/hysts/ControlNet) as well as more than 65 models in that amazing [Colab list](https://github.com/camenduru/controlnet-colab)!
322
-
323
- Thank haofanwang for making [ControlNet-for-Diffusers](https://github.com/haofanwang/ControlNet-for-Diffusers)!
324
-
325
- We also thank all authors for making Controlnet DEMOs, including but not limited to [fffiloni](https://huggingface.co/spaces/fffiloni/ControlNet-Video), [other-model](https://huggingface.co/spaces/hysts/ControlNet-with-other-models), [ThereforeGames](https://github.com/AUTOMATIC1111/stable-diffusion-webui/discussions/7784), [RamAnanth1](https://huggingface.co/spaces/RamAnanth1/ControlNet), etc!
326
-
327
- Besides, you may also want to read these amazing related works:
328
-
329
- [Composer: Creative and Controllable Image Synthesis with Composable Conditions](https://github.com/damo-vilab/composer): A much bigger model to control diffusion!
330
-
331
- [T2I-Adapter: Learning Adapters to Dig out More Controllable Ability for Text-to-Image Diffusion Models](https://github.com/TencentARC/T2I-Adapter): A much smaller model to control stable diffusion!
332
-
333
- [ControlLoRA: A Light Neural Network To Control Stable Diffusion Spatial Information](https://github.com/HighCWu/ControlLoRA): Implement Controlnet using LORA!
334
-
335
- And these amazing recent projects: [InstructPix2Pix Learning to Follow Image Editing Instructions](https://www.timothybrooks.com/instruct-pix2pix), [Pix2pix-zero: Zero-shot Image-to-Image Translation](https://github.com/pix2pixzero/pix2pix-zero), [Plug-and-Play Diffusion Features for Text-Driven Image-to-Image Translation](https://github.com/MichalGeyer/plug-and-play), [MaskSketch: Unpaired Structure-guided Masked Image Generation](https://arxiv.org/abs/2302.05496), [SEGA: Instructing Diffusion using Semantic Dimensions](https://arxiv.org/abs/2301.12247), [Universal Guidance for Diffusion Models](https://github.com/arpitbansal297/Universal-Guided-Diffusion), [Region-Aware Diffusion for Zero-shot Text-driven Image Editing](https://github.com/haha-lisa/RDM-Region-Aware-Diffusion-Model), [Domain Expansion of Image Generators](https://arxiv.org/abs/2301.05225), [Image Mixer](https://twitter.com/LambdaAPI/status/1626327289288957956), [MultiDiffusion: Fusing Diffusion Paths for Controlled Image Generation](https://multidiffusion.github.io/)
336
-
337
- # Citation
338
-
339
- @misc{zhang2023adding,
340
- title={Adding Conditional Control to Text-to-Image Diffusion Models},
341
- author={Lvmin Zhang and Anyi Rao and Maneesh Agrawala},
342
- booktitle={IEEE International Conference on Computer Vision (ICCV)}
343
- year={2023},
344
- }
345
-
346
- [Arxiv Link](https://arxiv.org/abs/2302.05543)
347
-
348
- [Supplementary Materials](https://lllyasviel.github.io/misc/202309/cnet_supp.pdf)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
ControlNet/annotator/canny/__init__.py DELETED
@@ -1,6 +0,0 @@
1
- import cv2
2
-
3
-
4
- class CannyDetector:
5
- def __call__(self, img, low_threshold, high_threshold):
6
- return cv2.Canny(img, low_threshold, high_threshold)
 
 
 
 
 
 
 
ControlNet/annotator/ckpts/ckpts.txt DELETED
@@ -1 +0,0 @@
1
- Weights here.
 
 
ControlNet/annotator/hed/__init__.py DELETED
@@ -1,96 +0,0 @@
1
- # This is an improved version and model of HED edge detection with Apache License, Version 2.0.
2
- # Please use this implementation in your products
3
- # This implementation may produce slightly different results from Saining Xie's official implementations,
4
- # but it generates smoother edges and is more suitable for ControlNet as well as other image-to-image translations.
5
- # Different from official models and other implementations, this is an RGB-input model (rather than BGR)
6
- # and in this way it works better for gradio's RGB protocol
7
-
8
- import os
9
- import cv2
10
- import torch
11
- import numpy as np
12
-
13
- from einops import rearrange
14
- from annotator.util import annotator_ckpts_path
15
-
16
-
17
- class DoubleConvBlock(torch.nn.Module):
18
- def __init__(self, input_channel, output_channel, layer_number):
19
- super().__init__()
20
- self.convs = torch.nn.Sequential()
21
- self.convs.append(torch.nn.Conv2d(in_channels=input_channel, out_channels=output_channel, kernel_size=(3, 3), stride=(1, 1), padding=1))
22
- for i in range(1, layer_number):
23
- self.convs.append(torch.nn.Conv2d(in_channels=output_channel, out_channels=output_channel, kernel_size=(3, 3), stride=(1, 1), padding=1))
24
- self.projection = torch.nn.Conv2d(in_channels=output_channel, out_channels=1, kernel_size=(1, 1), stride=(1, 1), padding=0)
25
-
26
- def __call__(self, x, down_sampling=False):
27
- h = x
28
- if down_sampling:
29
- h = torch.nn.functional.max_pool2d(h, kernel_size=(2, 2), stride=(2, 2))
30
- for conv in self.convs:
31
- h = conv(h)
32
- h = torch.nn.functional.relu(h)
33
- return h, self.projection(h)
34
-
35
-
36
- class ControlNetHED_Apache2(torch.nn.Module):
37
- def __init__(self):
38
- super().__init__()
39
- self.norm = torch.nn.Parameter(torch.zeros(size=(1, 3, 1, 1)))
40
- self.block1 = DoubleConvBlock(input_channel=3, output_channel=64, layer_number=2)
41
- self.block2 = DoubleConvBlock(input_channel=64, output_channel=128, layer_number=2)
42
- self.block3 = DoubleConvBlock(input_channel=128, output_channel=256, layer_number=3)
43
- self.block4 = DoubleConvBlock(input_channel=256, output_channel=512, layer_number=3)
44
- self.block5 = DoubleConvBlock(input_channel=512, output_channel=512, layer_number=3)
45
-
46
- def __call__(self, x):
47
- h = x - self.norm
48
- h, projection1 = self.block1(h)
49
- h, projection2 = self.block2(h, down_sampling=True)
50
- h, projection3 = self.block3(h, down_sampling=True)
51
- h, projection4 = self.block4(h, down_sampling=True)
52
- h, projection5 = self.block5(h, down_sampling=True)
53
- return projection1, projection2, projection3, projection4, projection5
54
-
55
-
56
- class HEDdetector:
57
- def __init__(self):
58
- remote_model_path = "https://huggingface.co/lllyasviel/Annotators/resolve/main/ControlNetHED.pth"
59
- modelpath = os.path.join(annotator_ckpts_path, "ControlNetHED.pth")
60
- if not os.path.exists(modelpath):
61
- from basicsr.utils.download_util import load_file_from_url
62
- load_file_from_url(remote_model_path, model_dir=annotator_ckpts_path)
63
- self.netNetwork = ControlNetHED_Apache2().float().cuda().eval()
64
- self.netNetwork.load_state_dict(torch.load(modelpath))
65
-
66
- def __call__(self, input_image):
67
- assert input_image.ndim == 3
68
- H, W, C = input_image.shape
69
- with torch.no_grad():
70
- image_hed = torch.from_numpy(input_image.copy()).float().cuda()
71
- image_hed = rearrange(image_hed, 'h w c -> 1 c h w')
72
- edges = self.netNetwork(image_hed)
73
- edges = [e.detach().cpu().numpy().astype(np.float32)[0, 0] for e in edges]
74
- edges = [cv2.resize(e, (W, H), interpolation=cv2.INTER_LINEAR) for e in edges]
75
- edges = np.stack(edges, axis=2)
76
- edge = 1 / (1 + np.exp(-np.mean(edges, axis=2).astype(np.float64)))
77
- edge = (edge * 255.0).clip(0, 255).astype(np.uint8)
78
- return edge
79
-
80
-
81
- def nms(x, t, s):
82
- x = cv2.GaussianBlur(x.astype(np.float32), (0, 0), s)
83
-
84
- f1 = np.array([[0, 0, 0], [1, 1, 1], [0, 0, 0]], dtype=np.uint8)
85
- f2 = np.array([[0, 1, 0], [0, 1, 0], [0, 1, 0]], dtype=np.uint8)
86
- f3 = np.array([[1, 0, 0], [0, 1, 0], [0, 0, 1]], dtype=np.uint8)
87
- f4 = np.array([[0, 0, 1], [0, 1, 0], [1, 0, 0]], dtype=np.uint8)
88
-
89
- y = np.zeros_like(x)
90
-
91
- for f in [f1, f2, f3, f4]:
92
- np.putmask(y, cv2.dilate(x, kernel=f) == x, x)
93
-
94
- z = np.zeros_like(y, dtype=np.uint8)
95
- z[y > t] = 255
96
- return z
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
ControlNet/annotator/midas/LICENSE DELETED
@@ -1,21 +0,0 @@
1
- MIT License
2
-
3
- Copyright (c) 2019 Intel ISL (Intel Intelligent Systems Lab)
4
-
5
- Permission is hereby granted, free of charge, to any person obtaining a copy
6
- of this software and associated documentation files (the "Software"), to deal
7
- in the Software without restriction, including without limitation the rights
8
- to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
9
- copies of the Software, and to permit persons to whom the Software is
10
- furnished to do so, subject to the following conditions:
11
-
12
- The above copyright notice and this permission notice shall be included in all
13
- copies or substantial portions of the Software.
14
-
15
- THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16
- IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17
- FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
18
- AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19
- LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20
- OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21
- SOFTWARE.
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
ControlNet/annotator/midas/__init__.py DELETED
@@ -1,42 +0,0 @@
1
- # Midas Depth Estimation
2
- # From https://github.com/isl-org/MiDaS
3
- # MIT LICENSE
4
-
5
- import cv2
6
- import numpy as np
7
- import torch
8
-
9
- from einops import rearrange
10
- from .api import MiDaSInference
11
-
12
-
13
- class MidasDetector:
14
- def __init__(self):
15
- self.model = MiDaSInference(model_type="dpt_hybrid").cuda()
16
-
17
- def __call__(self, input_image, a=np.pi * 2.0, bg_th=0.1):
18
- assert input_image.ndim == 3
19
- image_depth = input_image
20
- with torch.no_grad():
21
- image_depth = torch.from_numpy(image_depth).float().cuda()
22
- image_depth = image_depth / 127.5 - 1.0
23
- image_depth = rearrange(image_depth, 'h w c -> 1 c h w')
24
- depth = self.model(image_depth)[0]
25
-
26
- depth_pt = depth.clone()
27
- depth_pt -= torch.min(depth_pt)
28
- depth_pt /= torch.max(depth_pt)
29
- depth_pt = depth_pt.cpu().numpy()
30
- depth_image = (depth_pt * 255.0).clip(0, 255).astype(np.uint8)
31
-
32
- depth_np = depth.cpu().numpy()
33
- x = cv2.Sobel(depth_np, cv2.CV_32F, 1, 0, ksize=3)
34
- y = cv2.Sobel(depth_np, cv2.CV_32F, 0, 1, ksize=3)
35
- z = np.ones_like(x) * a
36
- x[depth_pt < bg_th] = 0
37
- y[depth_pt < bg_th] = 0
38
- normal = np.stack([x, y, z], axis=2)
39
- normal /= np.sum(normal ** 2.0, axis=2, keepdims=True) ** 0.5
40
- normal_image = (normal * 127.5 + 127.5).clip(0, 255).astype(np.uint8)
41
-
42
- return depth_image, normal_image
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
ControlNet/annotator/midas/api.py DELETED
@@ -1,169 +0,0 @@
1
- # based on https://github.com/isl-org/MiDaS
2
-
3
- import cv2
4
- import os
5
- import torch
6
- import torch.nn as nn
7
- from torchvision.transforms import Compose
8
-
9
- from .midas.dpt_depth import DPTDepthModel
10
- from .midas.midas_net import MidasNet
11
- from .midas.midas_net_custom import MidasNet_small
12
- from .midas.transforms import Resize, NormalizeImage, PrepareForNet
13
- from annotator.util import annotator_ckpts_path
14
-
15
-
16
- ISL_PATHS = {
17
- "dpt_large": os.path.join(annotator_ckpts_path, "dpt_large-midas-2f21e586.pt"),
18
- "dpt_hybrid": os.path.join(annotator_ckpts_path, "dpt_hybrid-midas-501f0c75.pt"),
19
- "midas_v21": "",
20
- "midas_v21_small": "",
21
- }
22
-
23
- remote_model_path = "https://huggingface.co/lllyasviel/ControlNet/resolve/main/annotator/ckpts/dpt_hybrid-midas-501f0c75.pt"
24
-
25
-
26
- def disabled_train(self, mode=True):
27
- """Overwrite model.train with this function to make sure train/eval mode
28
- does not change anymore."""
29
- return self
30
-
31
-
32
- def load_midas_transform(model_type):
33
- # https://github.com/isl-org/MiDaS/blob/master/run.py
34
- # load transform only
35
- if model_type == "dpt_large": # DPT-Large
36
- net_w, net_h = 384, 384
37
- resize_mode = "minimal"
38
- normalization = NormalizeImage(mean=[0.5, 0.5, 0.5], std=[0.5, 0.5, 0.5])
39
-
40
- elif model_type == "dpt_hybrid": # DPT-Hybrid
41
- net_w, net_h = 384, 384
42
- resize_mode = "minimal"
43
- normalization = NormalizeImage(mean=[0.5, 0.5, 0.5], std=[0.5, 0.5, 0.5])
44
-
45
- elif model_type == "midas_v21":
46
- net_w, net_h = 384, 384
47
- resize_mode = "upper_bound"
48
- normalization = NormalizeImage(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])
49
-
50
- elif model_type == "midas_v21_small":
51
- net_w, net_h = 256, 256
52
- resize_mode = "upper_bound"
53
- normalization = NormalizeImage(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])
54
-
55
- else:
56
- assert False, f"model_type '{model_type}' not implemented, use: --model_type large"
57
-
58
- transform = Compose(
59
- [
60
- Resize(
61
- net_w,
62
- net_h,
63
- resize_target=None,
64
- keep_aspect_ratio=True,
65
- ensure_multiple_of=32,
66
- resize_method=resize_mode,
67
- image_interpolation_method=cv2.INTER_CUBIC,
68
- ),
69
- normalization,
70
- PrepareForNet(),
71
- ]
72
- )
73
-
74
- return transform
75
-
76
-
77
- def load_model(model_type):
78
- # https://github.com/isl-org/MiDaS/blob/master/run.py
79
- # load network
80
- model_path = ISL_PATHS[model_type]
81
- if model_type == "dpt_large": # DPT-Large
82
- model = DPTDepthModel(
83
- path=model_path,
84
- backbone="vitl16_384",
85
- non_negative=True,
86
- )
87
- net_w, net_h = 384, 384
88
- resize_mode = "minimal"
89
- normalization = NormalizeImage(mean=[0.5, 0.5, 0.5], std=[0.5, 0.5, 0.5])
90
-
91
- elif model_type == "dpt_hybrid": # DPT-Hybrid
92
- if not os.path.exists(model_path):
93
- from basicsr.utils.download_util import load_file_from_url
94
- load_file_from_url(remote_model_path, model_dir=annotator_ckpts_path)
95
-
96
- model = DPTDepthModel(
97
- path=model_path,
98
- backbone="vitb_rn50_384",
99
- non_negative=True,
100
- )
101
- net_w, net_h = 384, 384
102
- resize_mode = "minimal"
103
- normalization = NormalizeImage(mean=[0.5, 0.5, 0.5], std=[0.5, 0.5, 0.5])
104
-
105
- elif model_type == "midas_v21":
106
- model = MidasNet(model_path, non_negative=True)
107
- net_w, net_h = 384, 384
108
- resize_mode = "upper_bound"
109
- normalization = NormalizeImage(
110
- mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]
111
- )
112
-
113
- elif model_type == "midas_v21_small":
114
- model = MidasNet_small(model_path, features=64, backbone="efficientnet_lite3", exportable=True,
115
- non_negative=True, blocks={'expand': True})
116
- net_w, net_h = 256, 256
117
- resize_mode = "upper_bound"
118
- normalization = NormalizeImage(
119
- mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]
120
- )
121
-
122
- else:
123
- print(f"model_type '{model_type}' not implemented, use: --model_type large")
124
- assert False
125
-
126
- transform = Compose(
127
- [
128
- Resize(
129
- net_w,
130
- net_h,
131
- resize_target=None,
132
- keep_aspect_ratio=True,
133
- ensure_multiple_of=32,
134
- resize_method=resize_mode,
135
- image_interpolation_method=cv2.INTER_CUBIC,
136
- ),
137
- normalization,
138
- PrepareForNet(),
139
- ]
140
- )
141
-
142
- return model.eval(), transform
143
-
144
-
145
- class MiDaSInference(nn.Module):
146
- MODEL_TYPES_TORCH_HUB = [
147
- "DPT_Large",
148
- "DPT_Hybrid",
149
- "MiDaS_small"
150
- ]
151
- MODEL_TYPES_ISL = [
152
- "dpt_large",
153
- "dpt_hybrid",
154
- "midas_v21",
155
- "midas_v21_small",
156
- ]
157
-
158
- def __init__(self, model_type):
159
- super().__init__()
160
- assert (model_type in self.MODEL_TYPES_ISL)
161
- model, _ = load_model(model_type)
162
- self.model = model
163
- self.model.train = disabled_train
164
-
165
- def forward(self, x):
166
- with torch.no_grad():
167
- prediction = self.model(x)
168
- return prediction
169
-
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
ControlNet/annotator/midas/midas/__init__.py DELETED
File without changes
ControlNet/annotator/midas/midas/base_model.py DELETED
@@ -1,16 +0,0 @@
1
- import torch
2
-
3
-
4
- class BaseModel(torch.nn.Module):
5
- def load(self, path):
6
- """Load model from file.
7
-
8
- Args:
9
- path (str): file path
10
- """
11
- parameters = torch.load(path, map_location=torch.device('cpu'))
12
-
13
- if "optimizer" in parameters:
14
- parameters = parameters["model"]
15
-
16
- self.load_state_dict(parameters)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
ControlNet/annotator/midas/midas/dpt_depth.py DELETED
@@ -1,109 +0,0 @@
1
- import torch
2
- import torch.nn as nn
3
- import torch.nn.functional as F
4
-
5
- from .base_model import BaseModel
6
- from .blocks import (
7
- FeatureFusionBlock,
8
- FeatureFusionBlock_custom,
9
- Interpolate,
10
- _make_encoder,
11
- forward_vit,
12
- )
13
-
14
-
15
- def _make_fusion_block(features, use_bn):
16
- return FeatureFusionBlock_custom(
17
- features,
18
- nn.ReLU(False),
19
- deconv=False,
20
- bn=use_bn,
21
- expand=False,
22
- align_corners=True,
23
- )
24
-
25
-
26
- class DPT(BaseModel):
27
- def __init__(
28
- self,
29
- head,
30
- features=256,
31
- backbone="vitb_rn50_384",
32
- readout="project",
33
- channels_last=False,
34
- use_bn=False,
35
- ):
36
-
37
- super(DPT, self).__init__()
38
-
39
- self.channels_last = channels_last
40
-
41
- hooks = {
42
- "vitb_rn50_384": [0, 1, 8, 11],
43
- "vitb16_384": [2, 5, 8, 11],
44
- "vitl16_384": [5, 11, 17, 23],
45
- }
46
-
47
- # Instantiate backbone and reassemble blocks
48
- self.pretrained, self.scratch = _make_encoder(
49
- backbone,
50
- features,
51
- False, # Set to true of you want to train from scratch, uses ImageNet weights
52
- groups=1,
53
- expand=False,
54
- exportable=False,
55
- hooks=hooks[backbone],
56
- use_readout=readout,
57
- )
58
-
59
- self.scratch.refinenet1 = _make_fusion_block(features, use_bn)
60
- self.scratch.refinenet2 = _make_fusion_block(features, use_bn)
61
- self.scratch.refinenet3 = _make_fusion_block(features, use_bn)
62
- self.scratch.refinenet4 = _make_fusion_block(features, use_bn)
63
-
64
- self.scratch.output_conv = head
65
-
66
-
67
- def forward(self, x):
68
- if self.channels_last == True:
69
- x.contiguous(memory_format=torch.channels_last)
70
-
71
- layer_1, layer_2, layer_3, layer_4 = forward_vit(self.pretrained, x)
72
-
73
- layer_1_rn = self.scratch.layer1_rn(layer_1)
74
- layer_2_rn = self.scratch.layer2_rn(layer_2)
75
- layer_3_rn = self.scratch.layer3_rn(layer_3)
76
- layer_4_rn = self.scratch.layer4_rn(layer_4)
77
-
78
- path_4 = self.scratch.refinenet4(layer_4_rn)
79
- path_3 = self.scratch.refinenet3(path_4, layer_3_rn)
80
- path_2 = self.scratch.refinenet2(path_3, layer_2_rn)
81
- path_1 = self.scratch.refinenet1(path_2, layer_1_rn)
82
-
83
- out = self.scratch.output_conv(path_1)
84
-
85
- return out
86
-
87
-
88
- class DPTDepthModel(DPT):
89
- def __init__(self, path=None, non_negative=True, **kwargs):
90
- features = kwargs["features"] if "features" in kwargs else 256
91
-
92
- head = nn.Sequential(
93
- nn.Conv2d(features, features // 2, kernel_size=3, stride=1, padding=1),
94
- Interpolate(scale_factor=2, mode="bilinear", align_corners=True),
95
- nn.Conv2d(features // 2, 32, kernel_size=3, stride=1, padding=1),
96
- nn.ReLU(True),
97
- nn.Conv2d(32, 1, kernel_size=1, stride=1, padding=0),
98
- nn.ReLU(True) if non_negative else nn.Identity(),
99
- nn.Identity(),
100
- )
101
-
102
- super().__init__(head, **kwargs)
103
-
104
- if path is not None:
105
- self.load(path)
106
-
107
- def forward(self, x):
108
- return super().forward(x).squeeze(dim=1)
109
-
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
ControlNet/annotator/midas/midas/midas_net.py DELETED
@@ -1,76 +0,0 @@
1
- """MidashNet: Network for monocular depth estimation trained by mixing several datasets.
2
- This file contains code that is adapted from
3
- https://github.com/thomasjpfan/pytorch_refinenet/blob/master/pytorch_refinenet/refinenet/refinenet_4cascade.py
4
- """
5
- import torch
6
- import torch.nn as nn
7
-
8
- from .base_model import BaseModel
9
- from .blocks import FeatureFusionBlock, Interpolate, _make_encoder
10
-
11
-
12
- class MidasNet(BaseModel):
13
- """Network for monocular depth estimation.
14
- """
15
-
16
- def __init__(self, path=None, features=256, non_negative=True):
17
- """Init.
18
-
19
- Args:
20
- path (str, optional): Path to saved model. Defaults to None.
21
- features (int, optional): Number of features. Defaults to 256.
22
- backbone (str, optional): Backbone network for encoder. Defaults to resnet50
23
- """
24
- print("Loading weights: ", path)
25
-
26
- super(MidasNet, self).__init__()
27
-
28
- use_pretrained = False if path is None else True
29
-
30
- self.pretrained, self.scratch = _make_encoder(backbone="resnext101_wsl", features=features, use_pretrained=use_pretrained)
31
-
32
- self.scratch.refinenet4 = FeatureFusionBlock(features)
33
- self.scratch.refinenet3 = FeatureFusionBlock(features)
34
- self.scratch.refinenet2 = FeatureFusionBlock(features)
35
- self.scratch.refinenet1 = FeatureFusionBlock(features)
36
-
37
- self.scratch.output_conv = nn.Sequential(
38
- nn.Conv2d(features, 128, kernel_size=3, stride=1, padding=1),
39
- Interpolate(scale_factor=2, mode="bilinear"),
40
- nn.Conv2d(128, 32, kernel_size=3, stride=1, padding=1),
41
- nn.ReLU(True),
42
- nn.Conv2d(32, 1, kernel_size=1, stride=1, padding=0),
43
- nn.ReLU(True) if non_negative else nn.Identity(),
44
- )
45
-
46
- if path:
47
- self.load(path)
48
-
49
- def forward(self, x):
50
- """Forward pass.
51
-
52
- Args:
53
- x (tensor): input data (image)
54
-
55
- Returns:
56
- tensor: depth
57
- """
58
-
59
- layer_1 = self.pretrained.layer1(x)
60
- layer_2 = self.pretrained.layer2(layer_1)
61
- layer_3 = self.pretrained.layer3(layer_2)
62
- layer_4 = self.pretrained.layer4(layer_3)
63
-
64
- layer_1_rn = self.scratch.layer1_rn(layer_1)
65
- layer_2_rn = self.scratch.layer2_rn(layer_2)
66
- layer_3_rn = self.scratch.layer3_rn(layer_3)
67
- layer_4_rn = self.scratch.layer4_rn(layer_4)
68
-
69
- path_4 = self.scratch.refinenet4(layer_4_rn)
70
- path_3 = self.scratch.refinenet3(path_4, layer_3_rn)
71
- path_2 = self.scratch.refinenet2(path_3, layer_2_rn)
72
- path_1 = self.scratch.refinenet1(path_2, layer_1_rn)
73
-
74
- out = self.scratch.output_conv(path_1)
75
-
76
- return torch.squeeze(out, dim=1)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
ControlNet/annotator/midas/midas/midas_net_custom.py DELETED
@@ -1,128 +0,0 @@
1
- """MidashNet: Network for monocular depth estimation trained by mixing several datasets.
2
- This file contains code that is adapted from
3
- https://github.com/thomasjpfan/pytorch_refinenet/blob/master/pytorch_refinenet/refinenet/refinenet_4cascade.py
4
- """
5
- import torch
6
- import torch.nn as nn
7
-
8
- from .base_model import BaseModel
9
- from .blocks import FeatureFusionBlock, FeatureFusionBlock_custom, Interpolate, _make_encoder
10
-
11
-
12
- class MidasNet_small(BaseModel):
13
- """Network for monocular depth estimation.
14
- """
15
-
16
- def __init__(self, path=None, features=64, backbone="efficientnet_lite3", non_negative=True, exportable=True, channels_last=False, align_corners=True,
17
- blocks={'expand': True}):
18
- """Init.
19
-
20
- Args:
21
- path (str, optional): Path to saved model. Defaults to None.
22
- features (int, optional): Number of features. Defaults to 256.
23
- backbone (str, optional): Backbone network for encoder. Defaults to resnet50
24
- """
25
- print("Loading weights: ", path)
26
-
27
- super(MidasNet_small, self).__init__()
28
-
29
- use_pretrained = False if path else True
30
-
31
- self.channels_last = channels_last
32
- self.blocks = blocks
33
- self.backbone = backbone
34
-
35
- self.groups = 1
36
-
37
- features1=features
38
- features2=features
39
- features3=features
40
- features4=features
41
- self.expand = False
42
- if "expand" in self.blocks and self.blocks['expand'] == True:
43
- self.expand = True
44
- features1=features
45
- features2=features*2
46
- features3=features*4
47
- features4=features*8
48
-
49
- self.pretrained, self.scratch = _make_encoder(self.backbone, features, use_pretrained, groups=self.groups, expand=self.expand, exportable=exportable)
50
-
51
- self.scratch.activation = nn.ReLU(False)
52
-
53
- self.scratch.refinenet4 = FeatureFusionBlock_custom(features4, self.scratch.activation, deconv=False, bn=False, expand=self.expand, align_corners=align_corners)
54
- self.scratch.refinenet3 = FeatureFusionBlock_custom(features3, self.scratch.activation, deconv=False, bn=False, expand=self.expand, align_corners=align_corners)
55
- self.scratch.refinenet2 = FeatureFusionBlock_custom(features2, self.scratch.activation, deconv=False, bn=False, expand=self.expand, align_corners=align_corners)
56
- self.scratch.refinenet1 = FeatureFusionBlock_custom(features1, self.scratch.activation, deconv=False, bn=False, align_corners=align_corners)
57
-
58
-
59
- self.scratch.output_conv = nn.Sequential(
60
- nn.Conv2d(features, features//2, kernel_size=3, stride=1, padding=1, groups=self.groups),
61
- Interpolate(scale_factor=2, mode="bilinear"),
62
- nn.Conv2d(features//2, 32, kernel_size=3, stride=1, padding=1),
63
- self.scratch.activation,
64
- nn.Conv2d(32, 1, kernel_size=1, stride=1, padding=0),
65
- nn.ReLU(True) if non_negative else nn.Identity(),
66
- nn.Identity(),
67
- )
68
-
69
- if path:
70
- self.load(path)
71
-
72
-
73
- def forward(self, x):
74
- """Forward pass.
75
-
76
- Args:
77
- x (tensor): input data (image)
78
-
79
- Returns:
80
- tensor: depth
81
- """
82
- if self.channels_last==True:
83
- print("self.channels_last = ", self.channels_last)
84
- x.contiguous(memory_format=torch.channels_last)
85
-
86
-
87
- layer_1 = self.pretrained.layer1(x)
88
- layer_2 = self.pretrained.layer2(layer_1)
89
- layer_3 = self.pretrained.layer3(layer_2)
90
- layer_4 = self.pretrained.layer4(layer_3)
91
-
92
- layer_1_rn = self.scratch.layer1_rn(layer_1)
93
- layer_2_rn = self.scratch.layer2_rn(layer_2)
94
- layer_3_rn = self.scratch.layer3_rn(layer_3)
95
- layer_4_rn = self.scratch.layer4_rn(layer_4)
96
-
97
-
98
- path_4 = self.scratch.refinenet4(layer_4_rn)
99
- path_3 = self.scratch.refinenet3(path_4, layer_3_rn)
100
- path_2 = self.scratch.refinenet2(path_3, layer_2_rn)
101
- path_1 = self.scratch.refinenet1(path_2, layer_1_rn)
102
-
103
- out = self.scratch.output_conv(path_1)
104
-
105
- return torch.squeeze(out, dim=1)
106
-
107
-
108
-
109
- def fuse_model(m):
110
- prev_previous_type = nn.Identity()
111
- prev_previous_name = ''
112
- previous_type = nn.Identity()
113
- previous_name = ''
114
- for name, module in m.named_modules():
115
- if prev_previous_type == nn.Conv2d and previous_type == nn.BatchNorm2d and type(module) == nn.ReLU:
116
- # print("FUSED ", prev_previous_name, previous_name, name)
117
- torch.quantization.fuse_modules(m, [prev_previous_name, previous_name, name], inplace=True)
118
- elif prev_previous_type == nn.Conv2d and previous_type == nn.BatchNorm2d:
119
- # print("FUSED ", prev_previous_name, previous_name)
120
- torch.quantization.fuse_modules(m, [prev_previous_name, previous_name], inplace=True)
121
- # elif previous_type == nn.Conv2d and type(module) == nn.ReLU:
122
- # print("FUSED ", previous_name, name)
123
- # torch.quantization.fuse_modules(m, [previous_name, name], inplace=True)
124
-
125
- prev_previous_type = previous_type
126
- prev_previous_name = previous_name
127
- previous_type = type(module)
128
- previous_name = name
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
ControlNet/annotator/midas/midas/transforms.py DELETED
@@ -1,234 +0,0 @@
1
- import numpy as np
2
- import cv2
3
- import math
4
-
5
-
6
- def apply_min_size(sample, size, image_interpolation_method=cv2.INTER_AREA):
7
- """Rezise the sample to ensure the given size. Keeps aspect ratio.
8
-
9
- Args:
10
- sample (dict): sample
11
- size (tuple): image size
12
-
13
- Returns:
14
- tuple: new size
15
- """
16
- shape = list(sample["disparity"].shape)
17
-
18
- if shape[0] >= size[0] and shape[1] >= size[1]:
19
- return sample
20
-
21
- scale = [0, 0]
22
- scale[0] = size[0] / shape[0]
23
- scale[1] = size[1] / shape[1]
24
-
25
- scale = max(scale)
26
-
27
- shape[0] = math.ceil(scale * shape[0])
28
- shape[1] = math.ceil(scale * shape[1])
29
-
30
- # resize
31
- sample["image"] = cv2.resize(
32
- sample["image"], tuple(shape[::-1]), interpolation=image_interpolation_method
33
- )
34
-
35
- sample["disparity"] = cv2.resize(
36
- sample["disparity"], tuple(shape[::-1]), interpolation=cv2.INTER_NEAREST
37
- )
38
- sample["mask"] = cv2.resize(
39
- sample["mask"].astype(np.float32),
40
- tuple(shape[::-1]),
41
- interpolation=cv2.INTER_NEAREST,
42
- )
43
- sample["mask"] = sample["mask"].astype(bool)
44
-
45
- return tuple(shape)
46
-
47
-
48
- class Resize(object):
49
- """Resize sample to given size (width, height).
50
- """
51
-
52
- def __init__(
53
- self,
54
- width,
55
- height,
56
- resize_target=True,
57
- keep_aspect_ratio=False,
58
- ensure_multiple_of=1,
59
- resize_method="lower_bound",
60
- image_interpolation_method=cv2.INTER_AREA,
61
- ):
62
- """Init.
63
-
64
- Args:
65
- width (int): desired output width
66
- height (int): desired output height
67
- resize_target (bool, optional):
68
- True: Resize the full sample (image, mask, target).
69
- False: Resize image only.
70
- Defaults to True.
71
- keep_aspect_ratio (bool, optional):
72
- True: Keep the aspect ratio of the input sample.
73
- Output sample might not have the given width and height, and
74
- resize behaviour depends on the parameter 'resize_method'.
75
- Defaults to False.
76
- ensure_multiple_of (int, optional):
77
- Output width and height is constrained to be multiple of this parameter.
78
- Defaults to 1.
79
- resize_method (str, optional):
80
- "lower_bound": Output will be at least as large as the given size.
81
- "upper_bound": Output will be at max as large as the given size. (Output size might be smaller than given size.)
82
- "minimal": Scale as least as possible. (Output size might be smaller than given size.)
83
- Defaults to "lower_bound".
84
- """
85
- self.__width = width
86
- self.__height = height
87
-
88
- self.__resize_target = resize_target
89
- self.__keep_aspect_ratio = keep_aspect_ratio
90
- self.__multiple_of = ensure_multiple_of
91
- self.__resize_method = resize_method
92
- self.__image_interpolation_method = image_interpolation_method
93
-
94
- def constrain_to_multiple_of(self, x, min_val=0, max_val=None):
95
- y = (np.round(x / self.__multiple_of) * self.__multiple_of).astype(int)
96
-
97
- if max_val is not None and y > max_val:
98
- y = (np.floor(x / self.__multiple_of) * self.__multiple_of).astype(int)
99
-
100
- if y < min_val:
101
- y = (np.ceil(x / self.__multiple_of) * self.__multiple_of).astype(int)
102
-
103
- return y
104
-
105
- def get_size(self, width, height):
106
- # determine new height and width
107
- scale_height = self.__height / height
108
- scale_width = self.__width / width
109
-
110
- if self.__keep_aspect_ratio:
111
- if self.__resize_method == "lower_bound":
112
- # scale such that output size is lower bound
113
- if scale_width > scale_height:
114
- # fit width
115
- scale_height = scale_width
116
- else:
117
- # fit height
118
- scale_width = scale_height
119
- elif self.__resize_method == "upper_bound":
120
- # scale such that output size is upper bound
121
- if scale_width < scale_height:
122
- # fit width
123
- scale_height = scale_width
124
- else:
125
- # fit height
126
- scale_width = scale_height
127
- elif self.__resize_method == "minimal":
128
- # scale as least as possbile
129
- if abs(1 - scale_width) < abs(1 - scale_height):
130
- # fit width
131
- scale_height = scale_width
132
- else:
133
- # fit height
134
- scale_width = scale_height
135
- else:
136
- raise ValueError(
137
- f"resize_method {self.__resize_method} not implemented"
138
- )
139
-
140
- if self.__resize_method == "lower_bound":
141
- new_height = self.constrain_to_multiple_of(
142
- scale_height * height, min_val=self.__height
143
- )
144
- new_width = self.constrain_to_multiple_of(
145
- scale_width * width, min_val=self.__width
146
- )
147
- elif self.__resize_method == "upper_bound":
148
- new_height = self.constrain_to_multiple_of(
149
- scale_height * height, max_val=self.__height
150
- )
151
- new_width = self.constrain_to_multiple_of(
152
- scale_width * width, max_val=self.__width
153
- )
154
- elif self.__resize_method == "minimal":
155
- new_height = self.constrain_to_multiple_of(scale_height * height)
156
- new_width = self.constrain_to_multiple_of(scale_width * width)
157
- else:
158
- raise ValueError(f"resize_method {self.__resize_method} not implemented")
159
-
160
- return (new_width, new_height)
161
-
162
- def __call__(self, sample):
163
- width, height = self.get_size(
164
- sample["image"].shape[1], sample["image"].shape[0]
165
- )
166
-
167
- # resize sample
168
- sample["image"] = cv2.resize(
169
- sample["image"],
170
- (width, height),
171
- interpolation=self.__image_interpolation_method,
172
- )
173
-
174
- if self.__resize_target:
175
- if "disparity" in sample:
176
- sample["disparity"] = cv2.resize(
177
- sample["disparity"],
178
- (width, height),
179
- interpolation=cv2.INTER_NEAREST,
180
- )
181
-
182
- if "depth" in sample:
183
- sample["depth"] = cv2.resize(
184
- sample["depth"], (width, height), interpolation=cv2.INTER_NEAREST
185
- )
186
-
187
- sample["mask"] = cv2.resize(
188
- sample["mask"].astype(np.float32),
189
- (width, height),
190
- interpolation=cv2.INTER_NEAREST,
191
- )
192
- sample["mask"] = sample["mask"].astype(bool)
193
-
194
- return sample
195
-
196
-
197
- class NormalizeImage(object):
198
- """Normlize image by given mean and std.
199
- """
200
-
201
- def __init__(self, mean, std):
202
- self.__mean = mean
203
- self.__std = std
204
-
205
- def __call__(self, sample):
206
- sample["image"] = (sample["image"] - self.__mean) / self.__std
207
-
208
- return sample
209
-
210
-
211
- class PrepareForNet(object):
212
- """Prepare sample for usage as network input.
213
- """
214
-
215
- def __init__(self):
216
- pass
217
-
218
- def __call__(self, sample):
219
- image = np.transpose(sample["image"], (2, 0, 1))
220
- sample["image"] = np.ascontiguousarray(image).astype(np.float32)
221
-
222
- if "mask" in sample:
223
- sample["mask"] = sample["mask"].astype(np.float32)
224
- sample["mask"] = np.ascontiguousarray(sample["mask"])
225
-
226
- if "disparity" in sample:
227
- disparity = sample["disparity"].astype(np.float32)
228
- sample["disparity"] = np.ascontiguousarray(disparity)
229
-
230
- if "depth" in sample:
231
- depth = sample["depth"].astype(np.float32)
232
- sample["depth"] = np.ascontiguousarray(depth)
233
-
234
- return sample
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
ControlNet/annotator/midas/midas/vit.py DELETED
@@ -1,491 +0,0 @@
1
- import torch
2
- import torch.nn as nn
3
- import timm
4
- import types
5
- import math
6
- import torch.nn.functional as F
7
-
8
-
9
- class Slice(nn.Module):
10
- def __init__(self, start_index=1):
11
- super(Slice, self).__init__()
12
- self.start_index = start_index
13
-
14
- def forward(self, x):
15
- return x[:, self.start_index :]
16
-
17
-
18
- class AddReadout(nn.Module):
19
- def __init__(self, start_index=1):
20
- super(AddReadout, self).__init__()
21
- self.start_index = start_index
22
-
23
- def forward(self, x):
24
- if self.start_index == 2:
25
- readout = (x[:, 0] + x[:, 1]) / 2
26
- else:
27
- readout = x[:, 0]
28
- return x[:, self.start_index :] + readout.unsqueeze(1)
29
-
30
-
31
- class ProjectReadout(nn.Module):
32
- def __init__(self, in_features, start_index=1):
33
- super(ProjectReadout, self).__init__()
34
- self.start_index = start_index
35
-
36
- self.project = nn.Sequential(nn.Linear(2 * in_features, in_features), nn.GELU())
37
-
38
- def forward(self, x):
39
- readout = x[:, 0].unsqueeze(1).expand_as(x[:, self.start_index :])
40
- features = torch.cat((x[:, self.start_index :], readout), -1)
41
-
42
- return self.project(features)
43
-
44
-
45
- class Transpose(nn.Module):
46
- def __init__(self, dim0, dim1):
47
- super(Transpose, self).__init__()
48
- self.dim0 = dim0
49
- self.dim1 = dim1
50
-
51
- def forward(self, x):
52
- x = x.transpose(self.dim0, self.dim1)
53
- return x
54
-
55
-
56
- def forward_vit(pretrained, x):
57
- b, c, h, w = x.shape
58
-
59
- glob = pretrained.model.forward_flex(x)
60
-
61
- layer_1 = pretrained.activations["1"]
62
- layer_2 = pretrained.activations["2"]
63
- layer_3 = pretrained.activations["3"]
64
- layer_4 = pretrained.activations["4"]
65
-
66
- layer_1 = pretrained.act_postprocess1[0:2](layer_1)
67
- layer_2 = pretrained.act_postprocess2[0:2](layer_2)
68
- layer_3 = pretrained.act_postprocess3[0:2](layer_3)
69
- layer_4 = pretrained.act_postprocess4[0:2](layer_4)
70
-
71
- unflatten = nn.Sequential(
72
- nn.Unflatten(
73
- 2,
74
- torch.Size(
75
- [
76
- h // pretrained.model.patch_size[1],
77
- w // pretrained.model.patch_size[0],
78
- ]
79
- ),
80
- )
81
- )
82
-
83
- if layer_1.ndim == 3:
84
- layer_1 = unflatten(layer_1)
85
- if layer_2.ndim == 3:
86
- layer_2 = unflatten(layer_2)
87
- if layer_3.ndim == 3:
88
- layer_3 = unflatten(layer_3)
89
- if layer_4.ndim == 3:
90
- layer_4 = unflatten(layer_4)
91
-
92
- layer_1 = pretrained.act_postprocess1[3 : len(pretrained.act_postprocess1)](layer_1)
93
- layer_2 = pretrained.act_postprocess2[3 : len(pretrained.act_postprocess2)](layer_2)
94
- layer_3 = pretrained.act_postprocess3[3 : len(pretrained.act_postprocess3)](layer_3)
95
- layer_4 = pretrained.act_postprocess4[3 : len(pretrained.act_postprocess4)](layer_4)
96
-
97
- return layer_1, layer_2, layer_3, layer_4
98
-
99
-
100
- def _resize_pos_embed(self, posemb, gs_h, gs_w):
101
- posemb_tok, posemb_grid = (
102
- posemb[:, : self.start_index],
103
- posemb[0, self.start_index :],
104
- )
105
-
106
- gs_old = int(math.sqrt(len(posemb_grid)))
107
-
108
- posemb_grid = posemb_grid.reshape(1, gs_old, gs_old, -1).permute(0, 3, 1, 2)
109
- posemb_grid = F.interpolate(posemb_grid, size=(gs_h, gs_w), mode="bilinear")
110
- posemb_grid = posemb_grid.permute(0, 2, 3, 1).reshape(1, gs_h * gs_w, -1)
111
-
112
- posemb = torch.cat([posemb_tok, posemb_grid], dim=1)
113
-
114
- return posemb
115
-
116
-
117
- def forward_flex(self, x):
118
- b, c, h, w = x.shape
119
-
120
- pos_embed = self._resize_pos_embed(
121
- self.pos_embed, h // self.patch_size[1], w // self.patch_size[0]
122
- )
123
-
124
- B = x.shape[0]
125
-
126
- if hasattr(self.patch_embed, "backbone"):
127
- x = self.patch_embed.backbone(x)
128
- if isinstance(x, (list, tuple)):
129
- x = x[-1] # last feature if backbone outputs list/tuple of features
130
-
131
- x = self.patch_embed.proj(x).flatten(2).transpose(1, 2)
132
-
133
- if getattr(self, "dist_token", None) is not None:
134
- cls_tokens = self.cls_token.expand(
135
- B, -1, -1
136
- ) # stole cls_tokens impl from Phil Wang, thanks
137
- dist_token = self.dist_token.expand(B, -1, -1)
138
- x = torch.cat((cls_tokens, dist_token, x), dim=1)
139
- else:
140
- cls_tokens = self.cls_token.expand(
141
- B, -1, -1
142
- ) # stole cls_tokens impl from Phil Wang, thanks
143
- x = torch.cat((cls_tokens, x), dim=1)
144
-
145
- x = x + pos_embed
146
- x = self.pos_drop(x)
147
-
148
- for blk in self.blocks:
149
- x = blk(x)
150
-
151
- x = self.norm(x)
152
-
153
- return x
154
-
155
-
156
- activations = {}
157
-
158
-
159
- def get_activation(name):
160
- def hook(model, input, output):
161
- activations[name] = output
162
-
163
- return hook
164
-
165
-
166
- def get_readout_oper(vit_features, features, use_readout, start_index=1):
167
- if use_readout == "ignore":
168
- readout_oper = [Slice(start_index)] * len(features)
169
- elif use_readout == "add":
170
- readout_oper = [AddReadout(start_index)] * len(features)
171
- elif use_readout == "project":
172
- readout_oper = [
173
- ProjectReadout(vit_features, start_index) for out_feat in features
174
- ]
175
- else:
176
- assert (
177
- False
178
- ), "wrong operation for readout token, use_readout can be 'ignore', 'add', or 'project'"
179
-
180
- return readout_oper
181
-
182
-
183
- def _make_vit_b16_backbone(
184
- model,
185
- features=[96, 192, 384, 768],
186
- size=[384, 384],
187
- hooks=[2, 5, 8, 11],
188
- vit_features=768,
189
- use_readout="ignore",
190
- start_index=1,
191
- ):
192
- pretrained = nn.Module()
193
-
194
- pretrained.model = model
195
- pretrained.model.blocks[hooks[0]].register_forward_hook(get_activation("1"))
196
- pretrained.model.blocks[hooks[1]].register_forward_hook(get_activation("2"))
197
- pretrained.model.blocks[hooks[2]].register_forward_hook(get_activation("3"))
198
- pretrained.model.blocks[hooks[3]].register_forward_hook(get_activation("4"))
199
-
200
- pretrained.activations = activations
201
-
202
- readout_oper = get_readout_oper(vit_features, features, use_readout, start_index)
203
-
204
- # 32, 48, 136, 384
205
- pretrained.act_postprocess1 = nn.Sequential(
206
- readout_oper[0],
207
- Transpose(1, 2),
208
- nn.Unflatten(2, torch.Size([size[0] // 16, size[1] // 16])),
209
- nn.Conv2d(
210
- in_channels=vit_features,
211
- out_channels=features[0],
212
- kernel_size=1,
213
- stride=1,
214
- padding=0,
215
- ),
216
- nn.ConvTranspose2d(
217
- in_channels=features[0],
218
- out_channels=features[0],
219
- kernel_size=4,
220
- stride=4,
221
- padding=0,
222
- bias=True,
223
- dilation=1,
224
- groups=1,
225
- ),
226
- )
227
-
228
- pretrained.act_postprocess2 = nn.Sequential(
229
- readout_oper[1],
230
- Transpose(1, 2),
231
- nn.Unflatten(2, torch.Size([size[0] // 16, size[1] // 16])),
232
- nn.Conv2d(
233
- in_channels=vit_features,
234
- out_channels=features[1],
235
- kernel_size=1,
236
- stride=1,
237
- padding=0,
238
- ),
239
- nn.ConvTranspose2d(
240
- in_channels=features[1],
241
- out_channels=features[1],
242
- kernel_size=2,
243
- stride=2,
244
- padding=0,
245
- bias=True,
246
- dilation=1,
247
- groups=1,
248
- ),
249
- )
250
-
251
- pretrained.act_postprocess3 = nn.Sequential(
252
- readout_oper[2],
253
- Transpose(1, 2),
254
- nn.Unflatten(2, torch.Size([size[0] // 16, size[1] // 16])),
255
- nn.Conv2d(
256
- in_channels=vit_features,
257
- out_channels=features[2],
258
- kernel_size=1,
259
- stride=1,
260
- padding=0,
261
- ),
262
- )
263
-
264
- pretrained.act_postprocess4 = nn.Sequential(
265
- readout_oper[3],
266
- Transpose(1, 2),
267
- nn.Unflatten(2, torch.Size([size[0] // 16, size[1] // 16])),
268
- nn.Conv2d(
269
- in_channels=vit_features,
270
- out_channels=features[3],
271
- kernel_size=1,
272
- stride=1,
273
- padding=0,
274
- ),
275
- nn.Conv2d(
276
- in_channels=features[3],
277
- out_channels=features[3],
278
- kernel_size=3,
279
- stride=2,
280
- padding=1,
281
- ),
282
- )
283
-
284
- pretrained.model.start_index = start_index
285
- pretrained.model.patch_size = [16, 16]
286
-
287
- # We inject this function into the VisionTransformer instances so that
288
- # we can use it with interpolated position embeddings without modifying the library source.
289
- pretrained.model.forward_flex = types.MethodType(forward_flex, pretrained.model)
290
- pretrained.model._resize_pos_embed = types.MethodType(
291
- _resize_pos_embed, pretrained.model
292
- )
293
-
294
- return pretrained
295
-
296
-
297
- def _make_pretrained_vitl16_384(pretrained, use_readout="ignore", hooks=None):
298
- model = timm.create_model("vit_large_patch16_384", pretrained=pretrained)
299
-
300
- hooks = [5, 11, 17, 23] if hooks == None else hooks
301
- return _make_vit_b16_backbone(
302
- model,
303
- features=[256, 512, 1024, 1024],
304
- hooks=hooks,
305
- vit_features=1024,
306
- use_readout=use_readout,
307
- )
308
-
309
-
310
- def _make_pretrained_vitb16_384(pretrained, use_readout="ignore", hooks=None):
311
- model = timm.create_model("vit_base_patch16_384", pretrained=pretrained)
312
-
313
- hooks = [2, 5, 8, 11] if hooks == None else hooks
314
- return _make_vit_b16_backbone(
315
- model, features=[96, 192, 384, 768], hooks=hooks, use_readout=use_readout
316
- )
317
-
318
-
319
- def _make_pretrained_deitb16_384(pretrained, use_readout="ignore", hooks=None):
320
- model = timm.create_model("vit_deit_base_patch16_384", pretrained=pretrained)
321
-
322
- hooks = [2, 5, 8, 11] if hooks == None else hooks
323
- return _make_vit_b16_backbone(
324
- model, features=[96, 192, 384, 768], hooks=hooks, use_readout=use_readout
325
- )
326
-
327
-
328
- def _make_pretrained_deitb16_distil_384(pretrained, use_readout="ignore", hooks=None):
329
- model = timm.create_model(
330
- "vit_deit_base_distilled_patch16_384", pretrained=pretrained
331
- )
332
-
333
- hooks = [2, 5, 8, 11] if hooks == None else hooks
334
- return _make_vit_b16_backbone(
335
- model,
336
- features=[96, 192, 384, 768],
337
- hooks=hooks,
338
- use_readout=use_readout,
339
- start_index=2,
340
- )
341
-
342
-
343
- def _make_vit_b_rn50_backbone(
344
- model,
345
- features=[256, 512, 768, 768],
346
- size=[384, 384],
347
- hooks=[0, 1, 8, 11],
348
- vit_features=768,
349
- use_vit_only=False,
350
- use_readout="ignore",
351
- start_index=1,
352
- ):
353
- pretrained = nn.Module()
354
-
355
- pretrained.model = model
356
-
357
- if use_vit_only == True:
358
- pretrained.model.blocks[hooks[0]].register_forward_hook(get_activation("1"))
359
- pretrained.model.blocks[hooks[1]].register_forward_hook(get_activation("2"))
360
- else:
361
- pretrained.model.patch_embed.backbone.stages[0].register_forward_hook(
362
- get_activation("1")
363
- )
364
- pretrained.model.patch_embed.backbone.stages[1].register_forward_hook(
365
- get_activation("2")
366
- )
367
-
368
- pretrained.model.blocks[hooks[2]].register_forward_hook(get_activation("3"))
369
- pretrained.model.blocks[hooks[3]].register_forward_hook(get_activation("4"))
370
-
371
- pretrained.activations = activations
372
-
373
- readout_oper = get_readout_oper(vit_features, features, use_readout, start_index)
374
-
375
- if use_vit_only == True:
376
- pretrained.act_postprocess1 = nn.Sequential(
377
- readout_oper[0],
378
- Transpose(1, 2),
379
- nn.Unflatten(2, torch.Size([size[0] // 16, size[1] // 16])),
380
- nn.Conv2d(
381
- in_channels=vit_features,
382
- out_channels=features[0],
383
- kernel_size=1,
384
- stride=1,
385
- padding=0,
386
- ),
387
- nn.ConvTranspose2d(
388
- in_channels=features[0],
389
- out_channels=features[0],
390
- kernel_size=4,
391
- stride=4,
392
- padding=0,
393
- bias=True,
394
- dilation=1,
395
- groups=1,
396
- ),
397
- )
398
-
399
- pretrained.act_postprocess2 = nn.Sequential(
400
- readout_oper[1],
401
- Transpose(1, 2),
402
- nn.Unflatten(2, torch.Size([size[0] // 16, size[1] // 16])),
403
- nn.Conv2d(
404
- in_channels=vit_features,
405
- out_channels=features[1],
406
- kernel_size=1,
407
- stride=1,
408
- padding=0,
409
- ),
410
- nn.ConvTranspose2d(
411
- in_channels=features[1],
412
- out_channels=features[1],
413
- kernel_size=2,
414
- stride=2,
415
- padding=0,
416
- bias=True,
417
- dilation=1,
418
- groups=1,
419
- ),
420
- )
421
- else:
422
- pretrained.act_postprocess1 = nn.Sequential(
423
- nn.Identity(), nn.Identity(), nn.Identity()
424
- )
425
- pretrained.act_postprocess2 = nn.Sequential(
426
- nn.Identity(), nn.Identity(), nn.Identity()
427
- )
428
-
429
- pretrained.act_postprocess3 = nn.Sequential(
430
- readout_oper[2],
431
- Transpose(1, 2),
432
- nn.Unflatten(2, torch.Size([size[0] // 16, size[1] // 16])),
433
- nn.Conv2d(
434
- in_channels=vit_features,
435
- out_channels=features[2],
436
- kernel_size=1,
437
- stride=1,
438
- padding=0,
439
- ),
440
- )
441
-
442
- pretrained.act_postprocess4 = nn.Sequential(
443
- readout_oper[3],
444
- Transpose(1, 2),
445
- nn.Unflatten(2, torch.Size([size[0] // 16, size[1] // 16])),
446
- nn.Conv2d(
447
- in_channels=vit_features,
448
- out_channels=features[3],
449
- kernel_size=1,
450
- stride=1,
451
- padding=0,
452
- ),
453
- nn.Conv2d(
454
- in_channels=features[3],
455
- out_channels=features[3],
456
- kernel_size=3,
457
- stride=2,
458
- padding=1,
459
- ),
460
- )
461
-
462
- pretrained.model.start_index = start_index
463
- pretrained.model.patch_size = [16, 16]
464
-
465
- # We inject this function into the VisionTransformer instances so that
466
- # we can use it with interpolated position embeddings without modifying the library source.
467
- pretrained.model.forward_flex = types.MethodType(forward_flex, pretrained.model)
468
-
469
- # We inject this function into the VisionTransformer instances so that
470
- # we can use it with interpolated position embeddings without modifying the library source.
471
- pretrained.model._resize_pos_embed = types.MethodType(
472
- _resize_pos_embed, pretrained.model
473
- )
474
-
475
- return pretrained
476
-
477
-
478
- def _make_pretrained_vitb_rn50_384(
479
- pretrained, use_readout="ignore", hooks=None, use_vit_only=False
480
- ):
481
- model = timm.create_model("vit_base_resnet50_384", pretrained=pretrained)
482
-
483
- hooks = [0, 1, 8, 11] if hooks == None else hooks
484
- return _make_vit_b_rn50_backbone(
485
- model,
486
- features=[256, 512, 768, 768],
487
- size=[384, 384],
488
- hooks=hooks,
489
- use_vit_only=use_vit_only,
490
- use_readout=use_readout,
491
- )
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
ControlNet/annotator/midas/utils.py DELETED
@@ -1,189 +0,0 @@
1
- """Utils for monoDepth."""
2
- import sys
3
- import re
4
- import numpy as np
5
- import cv2
6
- import torch
7
-
8
-
9
- def read_pfm(path):
10
- """Read pfm file.
11
-
12
- Args:
13
- path (str): path to file
14
-
15
- Returns:
16
- tuple: (data, scale)
17
- """
18
- with open(path, "rb") as file:
19
-
20
- color = None
21
- width = None
22
- height = None
23
- scale = None
24
- endian = None
25
-
26
- header = file.readline().rstrip()
27
- if header.decode("ascii") == "PF":
28
- color = True
29
- elif header.decode("ascii") == "Pf":
30
- color = False
31
- else:
32
- raise Exception("Not a PFM file: " + path)
33
-
34
- dim_match = re.match(r"^(\d+)\s(\d+)\s$", file.readline().decode("ascii"))
35
- if dim_match:
36
- width, height = list(map(int, dim_match.groups()))
37
- else:
38
- raise Exception("Malformed PFM header.")
39
-
40
- scale = float(file.readline().decode("ascii").rstrip())
41
- if scale < 0:
42
- # little-endian
43
- endian = "<"
44
- scale = -scale
45
- else:
46
- # big-endian
47
- endian = ">"
48
-
49
- data = np.fromfile(file, endian + "f")
50
- shape = (height, width, 3) if color else (height, width)
51
-
52
- data = np.reshape(data, shape)
53
- data = np.flipud(data)
54
-
55
- return data, scale
56
-
57
-
58
- def write_pfm(path, image, scale=1):
59
- """Write pfm file.
60
-
61
- Args:
62
- path (str): pathto file
63
- image (array): data
64
- scale (int, optional): Scale. Defaults to 1.
65
- """
66
-
67
- with open(path, "wb") as file:
68
- color = None
69
-
70
- if image.dtype.name != "float32":
71
- raise Exception("Image dtype must be float32.")
72
-
73
- image = np.flipud(image)
74
-
75
- if len(image.shape) == 3 and image.shape[2] == 3: # color image
76
- color = True
77
- elif (
78
- len(image.shape) == 2 or len(image.shape) == 3 and image.shape[2] == 1
79
- ): # greyscale
80
- color = False
81
- else:
82
- raise Exception("Image must have H x W x 3, H x W x 1 or H x W dimensions.")
83
-
84
- file.write("PF\n" if color else "Pf\n".encode())
85
- file.write("%d %d\n".encode() % (image.shape[1], image.shape[0]))
86
-
87
- endian = image.dtype.byteorder
88
-
89
- if endian == "<" or endian == "=" and sys.byteorder == "little":
90
- scale = -scale
91
-
92
- file.write("%f\n".encode() % scale)
93
-
94
- image.tofile(file)
95
-
96
-
97
- def read_image(path):
98
- """Read image and output RGB image (0-1).
99
-
100
- Args:
101
- path (str): path to file
102
-
103
- Returns:
104
- array: RGB image (0-1)
105
- """
106
- img = cv2.imread(path)
107
-
108
- if img.ndim == 2:
109
- img = cv2.cvtColor(img, cv2.COLOR_GRAY2BGR)
110
-
111
- img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB) / 255.0
112
-
113
- return img
114
-
115
-
116
- def resize_image(img):
117
- """Resize image and make it fit for network.
118
-
119
- Args:
120
- img (array): image
121
-
122
- Returns:
123
- tensor: data ready for network
124
- """
125
- height_orig = img.shape[0]
126
- width_orig = img.shape[1]
127
-
128
- if width_orig > height_orig:
129
- scale = width_orig / 384
130
- else:
131
- scale = height_orig / 384
132
-
133
- height = (np.ceil(height_orig / scale / 32) * 32).astype(int)
134
- width = (np.ceil(width_orig / scale / 32) * 32).astype(int)
135
-
136
- img_resized = cv2.resize(img, (width, height), interpolation=cv2.INTER_AREA)
137
-
138
- img_resized = (
139
- torch.from_numpy(np.transpose(img_resized, (2, 0, 1))).contiguous().float()
140
- )
141
- img_resized = img_resized.unsqueeze(0)
142
-
143
- return img_resized
144
-
145
-
146
- def resize_depth(depth, width, height):
147
- """Resize depth map and bring to CPU (numpy).
148
-
149
- Args:
150
- depth (tensor): depth
151
- width (int): image width
152
- height (int): image height
153
-
154
- Returns:
155
- array: processed depth
156
- """
157
- depth = torch.squeeze(depth[0, :, :, :]).to("cpu")
158
-
159
- depth_resized = cv2.resize(
160
- depth.numpy(), (width, height), interpolation=cv2.INTER_CUBIC
161
- )
162
-
163
- return depth_resized
164
-
165
- def write_depth(path, depth, bits=1):
166
- """Write depth map to pfm and png file.
167
-
168
- Args:
169
- path (str): filepath without extension
170
- depth (array): depth
171
- """
172
- write_pfm(path + ".pfm", depth.astype(np.float32))
173
-
174
- depth_min = depth.min()
175
- depth_max = depth.max()
176
-
177
- max_val = (2**(8*bits))-1
178
-
179
- if depth_max - depth_min > np.finfo("float").eps:
180
- out = max_val * (depth - depth_min) / (depth_max - depth_min)
181
- else:
182
- out = np.zeros(depth.shape, dtype=depth.type)
183
-
184
- if bits == 1:
185
- cv2.imwrite(path + ".png", out.astype("uint8"))
186
- elif bits == 2:
187
- cv2.imwrite(path + ".png", out.astype("uint16"))
188
-
189
- return
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
ControlNet/annotator/mlsd/LICENSE DELETED
@@ -1,201 +0,0 @@
1
- Apache License
2
- Version 2.0, January 2004
3
- http://www.apache.org/licenses/
4
-
5
- TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
6
-
7
- 1. Definitions.
8
-
9
- "License" shall mean the terms and conditions for use, reproduction,
10
- and distribution as defined by Sections 1 through 9 of this document.
11
-
12
- "Licensor" shall mean the copyright owner or entity authorized by
13
- the copyright owner that is granting the License.
14
-
15
- "Legal Entity" shall mean the union of the acting entity and all
16
- other entities that control, are controlled by, or are under common
17
- control with that entity. For the purposes of this definition,
18
- "control" means (i) the power, direct or indirect, to cause the
19
- direction or management of such entity, whether by contract or
20
- otherwise, or (ii) ownership of fifty percent (50%) or more of the
21
- outstanding shares, or (iii) beneficial ownership of such entity.
22
-
23
- "You" (or "Your") shall mean an individual or Legal Entity
24
- exercising permissions granted by this License.
25
-
26
- "Source" form shall mean the preferred form for making modifications,
27
- including but not limited to software source code, documentation
28
- source, and configuration files.
29
-
30
- "Object" form shall mean any form resulting from mechanical
31
- transformation or translation of a Source form, including but
32
- not limited to compiled object code, generated documentation,
33
- and conversions to other media types.
34
-
35
- "Work" shall mean the work of authorship, whether in Source or
36
- Object form, made available under the License, as indicated by a
37
- copyright notice that is included in or attached to the work
38
- (an example is provided in the Appendix below).
39
-
40
- "Derivative Works" shall mean any work, whether in Source or Object
41
- form, that is based on (or derived from) the Work and for which the
42
- editorial revisions, annotations, elaborations, or other modifications
43
- represent, as a whole, an original work of authorship. For the purposes
44
- of this License, Derivative Works shall not include works that remain
45
- separable from, or merely link (or bind by name) to the interfaces of,
46
- the Work and Derivative Works thereof.
47
-
48
- "Contribution" shall mean any work of authorship, including
49
- the original version of the Work and any modifications or additions
50
- to that Work or Derivative Works thereof, that is intentionally
51
- submitted to Licensor for inclusion in the Work by the copyright owner
52
- or by an individual or Legal Entity authorized to submit on behalf of
53
- the copyright owner. For the purposes of this definition, "submitted"
54
- means any form of electronic, verbal, or written communication sent
55
- to the Licensor or its representatives, including but not limited to
56
- communication on electronic mailing lists, source code control systems,
57
- and issue tracking systems that are managed by, or on behalf of, the
58
- Licensor for the purpose of discussing and improving the Work, but
59
- excluding communication that is conspicuously marked or otherwise
60
- designated in writing by the copyright owner as "Not a Contribution."
61
-
62
- "Contributor" shall mean Licensor and any individual or Legal Entity
63
- on behalf of whom a Contribution has been received by Licensor and
64
- subsequently incorporated within the Work.
65
-
66
- 2. Grant of Copyright License. Subject to the terms and conditions of
67
- this License, each Contributor hereby grants to You a perpetual,
68
- worldwide, non-exclusive, no-charge, royalty-free, irrevocable
69
- copyright license to reproduce, prepare Derivative Works of,
70
- publicly display, publicly perform, sublicense, and distribute the
71
- Work and such Derivative Works in Source or Object form.
72
-
73
- 3. Grant of Patent License. Subject to the terms and conditions of
74
- this License, each Contributor hereby grants to You a perpetual,
75
- worldwide, non-exclusive, no-charge, royalty-free, irrevocable
76
- (except as stated in this section) patent license to make, have made,
77
- use, offer to sell, sell, import, and otherwise transfer the Work,
78
- where such license applies only to those patent claims licensable
79
- by such Contributor that are necessarily infringed by their
80
- Contribution(s) alone or by combination of their Contribution(s)
81
- with the Work to which such Contribution(s) was submitted. If You
82
- institute patent litigation against any entity (including a
83
- cross-claim or counterclaim in a lawsuit) alleging that the Work
84
- or a Contribution incorporated within the Work constitutes direct
85
- or contributory patent infringement, then any patent licenses
86
- granted to You under this License for that Work shall terminate
87
- as of the date such litigation is filed.
88
-
89
- 4. Redistribution. You may reproduce and distribute copies of the
90
- Work or Derivative Works thereof in any medium, with or without
91
- modifications, and in Source or Object form, provided that You
92
- meet the following conditions:
93
-
94
- (a) You must give any other recipients of the Work or
95
- Derivative Works a copy of this License; and
96
-
97
- (b) You must cause any modified files to carry prominent notices
98
- stating that You changed the files; and
99
-
100
- (c) You must retain, in the Source form of any Derivative Works
101
- that You distribute, all copyright, patent, trademark, and
102
- attribution notices from the Source form of the Work,
103
- excluding those notices that do not pertain to any part of
104
- the Derivative Works; and
105
-
106
- (d) If the Work includes a "NOTICE" text file as part of its
107
- distribution, then any Derivative Works that You distribute must
108
- include a readable copy of the attribution notices contained
109
- within such NOTICE file, excluding those notices that do not
110
- pertain to any part of the Derivative Works, in at least one
111
- of the following places: within a NOTICE text file distributed
112
- as part of the Derivative Works; within the Source form or
113
- documentation, if provided along with the Derivative Works; or,
114
- within a display generated by the Derivative Works, if and
115
- wherever such third-party notices normally appear. The contents
116
- of the NOTICE file are for informational purposes only and
117
- do not modify the License. You may add Your own attribution
118
- notices within Derivative Works that You distribute, alongside
119
- or as an addendum to the NOTICE text from the Work, provided
120
- that such additional attribution notices cannot be construed
121
- as modifying the License.
122
-
123
- You may add Your own copyright statement to Your modifications and
124
- may provide additional or different license terms and conditions
125
- for use, reproduction, or distribution of Your modifications, or
126
- for any such Derivative Works as a whole, provided Your use,
127
- reproduction, and distribution of the Work otherwise complies with
128
- the conditions stated in this License.
129
-
130
- 5. Submission of Contributions. Unless You explicitly state otherwise,
131
- any Contribution intentionally submitted for inclusion in the Work
132
- by You to the Licensor shall be under the terms and conditions of
133
- this License, without any additional terms or conditions.
134
- Notwithstanding the above, nothing herein shall supersede or modify
135
- the terms of any separate license agreement you may have executed
136
- with Licensor regarding such Contributions.
137
-
138
- 6. Trademarks. This License does not grant permission to use the trade
139
- names, trademarks, service marks, or product names of the Licensor,
140
- except as required for reasonable and customary use in describing the
141
- origin of the Work and reproducing the content of the NOTICE file.
142
-
143
- 7. Disclaimer of Warranty. Unless required by applicable law or
144
- agreed to in writing, Licensor provides the Work (and each
145
- Contributor provides its Contributions) on an "AS IS" BASIS,
146
- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
147
- implied, including, without limitation, any warranties or conditions
148
- of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
149
- PARTICULAR PURPOSE. You are solely responsible for determining the
150
- appropriateness of using or redistributing the Work and assume any
151
- risks associated with Your exercise of permissions under this License.
152
-
153
- 8. Limitation of Liability. In no event and under no legal theory,
154
- whether in tort (including negligence), contract, or otherwise,
155
- unless required by applicable law (such as deliberate and grossly
156
- negligent acts) or agreed to in writing, shall any Contributor be
157
- liable to You for damages, including any direct, indirect, special,
158
- incidental, or consequential damages of any character arising as a
159
- result of this License or out of the use or inability to use the
160
- Work (including but not limited to damages for loss of goodwill,
161
- work stoppage, computer failure or malfunction, or any and all
162
- other commercial damages or losses), even if such Contributor
163
- has been advised of the possibility of such damages.
164
-
165
- 9. Accepting Warranty or Additional Liability. While redistributing
166
- the Work or Derivative Works thereof, You may choose to offer,
167
- and charge a fee for, acceptance of support, warranty, indemnity,
168
- or other liability obligations and/or rights consistent with this
169
- License. However, in accepting such obligations, You may act only
170
- on Your own behalf and on Your sole responsibility, not on behalf
171
- of any other Contributor, and only if You agree to indemnify,
172
- defend, and hold each Contributor harmless for any liability
173
- incurred by, or claims asserted against, such Contributor by reason
174
- of your accepting any such warranty or additional liability.
175
-
176
- END OF TERMS AND CONDITIONS
177
-
178
- APPENDIX: How to apply the Apache License to your work.
179
-
180
- To apply the Apache License to your work, attach the following
181
- boilerplate notice, with the fields enclosed by brackets "{}"
182
- replaced with your own identifying information. (Don't include
183
- the brackets!) The text should be enclosed in the appropriate
184
- comment syntax for the file format. We also recommend that a
185
- file or class name and description of purpose be included on the
186
- same "printed page" as the copyright notice for easier
187
- identification within third-party archives.
188
-
189
- Copyright 2021-present NAVER Corp.
190
-
191
- Licensed under the Apache License, Version 2.0 (the "License");
192
- you may not use this file except in compliance with the License.
193
- You may obtain a copy of the License at
194
-
195
- http://www.apache.org/licenses/LICENSE-2.0
196
-
197
- Unless required by applicable law or agreed to in writing, software
198
- distributed under the License is distributed on an "AS IS" BASIS,
199
- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
200
- See the License for the specific language governing permissions and
201
- limitations under the License.
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
ControlNet/annotator/mlsd/__init__.py DELETED
@@ -1,43 +0,0 @@
1
- # MLSD Line Detection
2
- # From https://github.com/navervision/mlsd
3
- # Apache-2.0 license
4
-
5
- import cv2
6
- import numpy as np
7
- import torch
8
- import os
9
-
10
- from einops import rearrange
11
- from .models.mbv2_mlsd_tiny import MobileV2_MLSD_Tiny
12
- from .models.mbv2_mlsd_large import MobileV2_MLSD_Large
13
- from .utils import pred_lines
14
-
15
- from annotator.util import annotator_ckpts_path
16
-
17
-
18
- remote_model_path = "https://huggingface.co/lllyasviel/ControlNet/resolve/main/annotator/ckpts/mlsd_large_512_fp32.pth"
19
-
20
-
21
- class MLSDdetector:
22
- def __init__(self):
23
- model_path = os.path.join(annotator_ckpts_path, "mlsd_large_512_fp32.pth")
24
- if not os.path.exists(model_path):
25
- from basicsr.utils.download_util import load_file_from_url
26
- load_file_from_url(remote_model_path, model_dir=annotator_ckpts_path)
27
- model = MobileV2_MLSD_Large()
28
- model.load_state_dict(torch.load(model_path), strict=True)
29
- self.model = model.cuda().eval()
30
-
31
- def __call__(self, input_image, thr_v, thr_d):
32
- assert input_image.ndim == 3
33
- img = input_image
34
- img_output = np.zeros_like(img)
35
- try:
36
- with torch.no_grad():
37
- lines = pred_lines(img, self.model, [img.shape[0], img.shape[1]], thr_v, thr_d)
38
- for line in lines:
39
- x_start, y_start, x_end, y_end = [int(val) for val in line]
40
- cv2.line(img_output, (x_start, y_start), (x_end, y_end), [255, 255, 255], 1)
41
- except Exception as e:
42
- pass
43
- return img_output[:, :, 0]
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
ControlNet/annotator/mlsd/models/mbv2_mlsd_large.py DELETED
@@ -1,292 +0,0 @@
1
- import os
2
- import sys
3
- import torch
4
- import torch.nn as nn
5
- import torch.utils.model_zoo as model_zoo
6
- from torch.nn import functional as F
7
-
8
-
9
- class BlockTypeA(nn.Module):
10
- def __init__(self, in_c1, in_c2, out_c1, out_c2, upscale = True):
11
- super(BlockTypeA, self).__init__()
12
- self.conv1 = nn.Sequential(
13
- nn.Conv2d(in_c2, out_c2, kernel_size=1),
14
- nn.BatchNorm2d(out_c2),
15
- nn.ReLU(inplace=True)
16
- )
17
- self.conv2 = nn.Sequential(
18
- nn.Conv2d(in_c1, out_c1, kernel_size=1),
19
- nn.BatchNorm2d(out_c1),
20
- nn.ReLU(inplace=True)
21
- )
22
- self.upscale = upscale
23
-
24
- def forward(self, a, b):
25
- b = self.conv1(b)
26
- a = self.conv2(a)
27
- if self.upscale:
28
- b = F.interpolate(b, scale_factor=2.0, mode='bilinear', align_corners=True)
29
- return torch.cat((a, b), dim=1)
30
-
31
-
32
- class BlockTypeB(nn.Module):
33
- def __init__(self, in_c, out_c):
34
- super(BlockTypeB, self).__init__()
35
- self.conv1 = nn.Sequential(
36
- nn.Conv2d(in_c, in_c, kernel_size=3, padding=1),
37
- nn.BatchNorm2d(in_c),
38
- nn.ReLU()
39
- )
40
- self.conv2 = nn.Sequential(
41
- nn.Conv2d(in_c, out_c, kernel_size=3, padding=1),
42
- nn.BatchNorm2d(out_c),
43
- nn.ReLU()
44
- )
45
-
46
- def forward(self, x):
47
- x = self.conv1(x) + x
48
- x = self.conv2(x)
49
- return x
50
-
51
- class BlockTypeC(nn.Module):
52
- def __init__(self, in_c, out_c):
53
- super(BlockTypeC, self).__init__()
54
- self.conv1 = nn.Sequential(
55
- nn.Conv2d(in_c, in_c, kernel_size=3, padding=5, dilation=5),
56
- nn.BatchNorm2d(in_c),
57
- nn.ReLU()
58
- )
59
- self.conv2 = nn.Sequential(
60
- nn.Conv2d(in_c, in_c, kernel_size=3, padding=1),
61
- nn.BatchNorm2d(in_c),
62
- nn.ReLU()
63
- )
64
- self.conv3 = nn.Conv2d(in_c, out_c, kernel_size=1)
65
-
66
- def forward(self, x):
67
- x = self.conv1(x)
68
- x = self.conv2(x)
69
- x = self.conv3(x)
70
- return x
71
-
72
- def _make_divisible(v, divisor, min_value=None):
73
- """
74
- This function is taken from the original tf repo.
75
- It ensures that all layers have a channel number that is divisible by 8
76
- It can be seen here:
77
- https://github.com/tensorflow/models/blob/master/research/slim/nets/mobilenet/mobilenet.py
78
- :param v:
79
- :param divisor:
80
- :param min_value:
81
- :return:
82
- """
83
- if min_value is None:
84
- min_value = divisor
85
- new_v = max(min_value, int(v + divisor / 2) // divisor * divisor)
86
- # Make sure that round down does not go down by more than 10%.
87
- if new_v < 0.9 * v:
88
- new_v += divisor
89
- return new_v
90
-
91
-
92
- class ConvBNReLU(nn.Sequential):
93
- def __init__(self, in_planes, out_planes, kernel_size=3, stride=1, groups=1):
94
- self.channel_pad = out_planes - in_planes
95
- self.stride = stride
96
- #padding = (kernel_size - 1) // 2
97
-
98
- # TFLite uses slightly different padding than PyTorch
99
- if stride == 2:
100
- padding = 0
101
- else:
102
- padding = (kernel_size - 1) // 2
103
-
104
- super(ConvBNReLU, self).__init__(
105
- nn.Conv2d(in_planes, out_planes, kernel_size, stride, padding, groups=groups, bias=False),
106
- nn.BatchNorm2d(out_planes),
107
- nn.ReLU6(inplace=True)
108
- )
109
- self.max_pool = nn.MaxPool2d(kernel_size=stride, stride=stride)
110
-
111
-
112
- def forward(self, x):
113
- # TFLite uses different padding
114
- if self.stride == 2:
115
- x = F.pad(x, (0, 1, 0, 1), "constant", 0)
116
- #print(x.shape)
117
-
118
- for module in self:
119
- if not isinstance(module, nn.MaxPool2d):
120
- x = module(x)
121
- return x
122
-
123
-
124
- class InvertedResidual(nn.Module):
125
- def __init__(self, inp, oup, stride, expand_ratio):
126
- super(InvertedResidual, self).__init__()
127
- self.stride = stride
128
- assert stride in [1, 2]
129
-
130
- hidden_dim = int(round(inp * expand_ratio))
131
- self.use_res_connect = self.stride == 1 and inp == oup
132
-
133
- layers = []
134
- if expand_ratio != 1:
135
- # pw
136
- layers.append(ConvBNReLU(inp, hidden_dim, kernel_size=1))
137
- layers.extend([
138
- # dw
139
- ConvBNReLU(hidden_dim, hidden_dim, stride=stride, groups=hidden_dim),
140
- # pw-linear
141
- nn.Conv2d(hidden_dim, oup, 1, 1, 0, bias=False),
142
- nn.BatchNorm2d(oup),
143
- ])
144
- self.conv = nn.Sequential(*layers)
145
-
146
- def forward(self, x):
147
- if self.use_res_connect:
148
- return x + self.conv(x)
149
- else:
150
- return self.conv(x)
151
-
152
-
153
- class MobileNetV2(nn.Module):
154
- def __init__(self, pretrained=True):
155
- """
156
- MobileNet V2 main class
157
- Args:
158
- num_classes (int): Number of classes
159
- width_mult (float): Width multiplier - adjusts number of channels in each layer by this amount
160
- inverted_residual_setting: Network structure
161
- round_nearest (int): Round the number of channels in each layer to be a multiple of this number
162
- Set to 1 to turn off rounding
163
- block: Module specifying inverted residual building block for mobilenet
164
- """
165
- super(MobileNetV2, self).__init__()
166
-
167
- block = InvertedResidual
168
- input_channel = 32
169
- last_channel = 1280
170
- width_mult = 1.0
171
- round_nearest = 8
172
-
173
- inverted_residual_setting = [
174
- # t, c, n, s
175
- [1, 16, 1, 1],
176
- [6, 24, 2, 2],
177
- [6, 32, 3, 2],
178
- [6, 64, 4, 2],
179
- [6, 96, 3, 1],
180
- #[6, 160, 3, 2],
181
- #[6, 320, 1, 1],
182
- ]
183
-
184
- # only check the first element, assuming user knows t,c,n,s are required
185
- if len(inverted_residual_setting) == 0 or len(inverted_residual_setting[0]) != 4:
186
- raise ValueError("inverted_residual_setting should be non-empty "
187
- "or a 4-element list, got {}".format(inverted_residual_setting))
188
-
189
- # building first layer
190
- input_channel = _make_divisible(input_channel * width_mult, round_nearest)
191
- self.last_channel = _make_divisible(last_channel * max(1.0, width_mult), round_nearest)
192
- features = [ConvBNReLU(4, input_channel, stride=2)]
193
- # building inverted residual blocks
194
- for t, c, n, s in inverted_residual_setting:
195
- output_channel = _make_divisible(c * width_mult, round_nearest)
196
- for i in range(n):
197
- stride = s if i == 0 else 1
198
- features.append(block(input_channel, output_channel, stride, expand_ratio=t))
199
- input_channel = output_channel
200
-
201
- self.features = nn.Sequential(*features)
202
- self.fpn_selected = [1, 3, 6, 10, 13]
203
- # weight initialization
204
- for m in self.modules():
205
- if isinstance(m, nn.Conv2d):
206
- nn.init.kaiming_normal_(m.weight, mode='fan_out')
207
- if m.bias is not None:
208
- nn.init.zeros_(m.bias)
209
- elif isinstance(m, nn.BatchNorm2d):
210
- nn.init.ones_(m.weight)
211
- nn.init.zeros_(m.bias)
212
- elif isinstance(m, nn.Linear):
213
- nn.init.normal_(m.weight, 0, 0.01)
214
- nn.init.zeros_(m.bias)
215
- if pretrained:
216
- self._load_pretrained_model()
217
-
218
- def _forward_impl(self, x):
219
- # This exists since TorchScript doesn't support inheritance, so the superclass method
220
- # (this one) needs to have a name other than `forward` that can be accessed in a subclass
221
- fpn_features = []
222
- for i, f in enumerate(self.features):
223
- if i > self.fpn_selected[-1]:
224
- break
225
- x = f(x)
226
- if i in self.fpn_selected:
227
- fpn_features.append(x)
228
-
229
- c1, c2, c3, c4, c5 = fpn_features
230
- return c1, c2, c3, c4, c5
231
-
232
-
233
- def forward(self, x):
234
- return self._forward_impl(x)
235
-
236
- def _load_pretrained_model(self):
237
- pretrain_dict = model_zoo.load_url('https://download.pytorch.org/models/mobilenet_v2-b0353104.pth')
238
- model_dict = {}
239
- state_dict = self.state_dict()
240
- for k, v in pretrain_dict.items():
241
- if k in state_dict:
242
- model_dict[k] = v
243
- state_dict.update(model_dict)
244
- self.load_state_dict(state_dict)
245
-
246
-
247
- class MobileV2_MLSD_Large(nn.Module):
248
- def __init__(self):
249
- super(MobileV2_MLSD_Large, self).__init__()
250
-
251
- self.backbone = MobileNetV2(pretrained=False)
252
- ## A, B
253
- self.block15 = BlockTypeA(in_c1= 64, in_c2= 96,
254
- out_c1= 64, out_c2=64,
255
- upscale=False)
256
- self.block16 = BlockTypeB(128, 64)
257
-
258
- ## A, B
259
- self.block17 = BlockTypeA(in_c1 = 32, in_c2 = 64,
260
- out_c1= 64, out_c2= 64)
261
- self.block18 = BlockTypeB(128, 64)
262
-
263
- ## A, B
264
- self.block19 = BlockTypeA(in_c1=24, in_c2=64,
265
- out_c1=64, out_c2=64)
266
- self.block20 = BlockTypeB(128, 64)
267
-
268
- ## A, B, C
269
- self.block21 = BlockTypeA(in_c1=16, in_c2=64,
270
- out_c1=64, out_c2=64)
271
- self.block22 = BlockTypeB(128, 64)
272
-
273
- self.block23 = BlockTypeC(64, 16)
274
-
275
- def forward(self, x):
276
- c1, c2, c3, c4, c5 = self.backbone(x)
277
-
278
- x = self.block15(c4, c5)
279
- x = self.block16(x)
280
-
281
- x = self.block17(c3, x)
282
- x = self.block18(x)
283
-
284
- x = self.block19(c2, x)
285
- x = self.block20(x)
286
-
287
- x = self.block21(c1, x)
288
- x = self.block22(x)
289
- x = self.block23(x)
290
- x = x[:, 7:, :, :]
291
-
292
- return x
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
ControlNet/annotator/mlsd/models/mbv2_mlsd_tiny.py DELETED
@@ -1,275 +0,0 @@
1
- import os
2
- import sys
3
- import torch
4
- import torch.nn as nn
5
- import torch.utils.model_zoo as model_zoo
6
- from torch.nn import functional as F
7
-
8
-
9
- class BlockTypeA(nn.Module):
10
- def __init__(self, in_c1, in_c2, out_c1, out_c2, upscale = True):
11
- super(BlockTypeA, self).__init__()
12
- self.conv1 = nn.Sequential(
13
- nn.Conv2d(in_c2, out_c2, kernel_size=1),
14
- nn.BatchNorm2d(out_c2),
15
- nn.ReLU(inplace=True)
16
- )
17
- self.conv2 = nn.Sequential(
18
- nn.Conv2d(in_c1, out_c1, kernel_size=1),
19
- nn.BatchNorm2d(out_c1),
20
- nn.ReLU(inplace=True)
21
- )
22
- self.upscale = upscale
23
-
24
- def forward(self, a, b):
25
- b = self.conv1(b)
26
- a = self.conv2(a)
27
- b = F.interpolate(b, scale_factor=2.0, mode='bilinear', align_corners=True)
28
- return torch.cat((a, b), dim=1)
29
-
30
-
31
- class BlockTypeB(nn.Module):
32
- def __init__(self, in_c, out_c):
33
- super(BlockTypeB, self).__init__()
34
- self.conv1 = nn.Sequential(
35
- nn.Conv2d(in_c, in_c, kernel_size=3, padding=1),
36
- nn.BatchNorm2d(in_c),
37
- nn.ReLU()
38
- )
39
- self.conv2 = nn.Sequential(
40
- nn.Conv2d(in_c, out_c, kernel_size=3, padding=1),
41
- nn.BatchNorm2d(out_c),
42
- nn.ReLU()
43
- )
44
-
45
- def forward(self, x):
46
- x = self.conv1(x) + x
47
- x = self.conv2(x)
48
- return x
49
-
50
- class BlockTypeC(nn.Module):
51
- def __init__(self, in_c, out_c):
52
- super(BlockTypeC, self).__init__()
53
- self.conv1 = nn.Sequential(
54
- nn.Conv2d(in_c, in_c, kernel_size=3, padding=5, dilation=5),
55
- nn.BatchNorm2d(in_c),
56
- nn.ReLU()
57
- )
58
- self.conv2 = nn.Sequential(
59
- nn.Conv2d(in_c, in_c, kernel_size=3, padding=1),
60
- nn.BatchNorm2d(in_c),
61
- nn.ReLU()
62
- )
63
- self.conv3 = nn.Conv2d(in_c, out_c, kernel_size=1)
64
-
65
- def forward(self, x):
66
- x = self.conv1(x)
67
- x = self.conv2(x)
68
- x = self.conv3(x)
69
- return x
70
-
71
- def _make_divisible(v, divisor, min_value=None):
72
- """
73
- This function is taken from the original tf repo.
74
- It ensures that all layers have a channel number that is divisible by 8
75
- It can be seen here:
76
- https://github.com/tensorflow/models/blob/master/research/slim/nets/mobilenet/mobilenet.py
77
- :param v:
78
- :param divisor:
79
- :param min_value:
80
- :return:
81
- """
82
- if min_value is None:
83
- min_value = divisor
84
- new_v = max(min_value, int(v + divisor / 2) // divisor * divisor)
85
- # Make sure that round down does not go down by more than 10%.
86
- if new_v < 0.9 * v:
87
- new_v += divisor
88
- return new_v
89
-
90
-
91
- class ConvBNReLU(nn.Sequential):
92
- def __init__(self, in_planes, out_planes, kernel_size=3, stride=1, groups=1):
93
- self.channel_pad = out_planes - in_planes
94
- self.stride = stride
95
- #padding = (kernel_size - 1) // 2
96
-
97
- # TFLite uses slightly different padding than PyTorch
98
- if stride == 2:
99
- padding = 0
100
- else:
101
- padding = (kernel_size - 1) // 2
102
-
103
- super(ConvBNReLU, self).__init__(
104
- nn.Conv2d(in_planes, out_planes, kernel_size, stride, padding, groups=groups, bias=False),
105
- nn.BatchNorm2d(out_planes),
106
- nn.ReLU6(inplace=True)
107
- )
108
- self.max_pool = nn.MaxPool2d(kernel_size=stride, stride=stride)
109
-
110
-
111
- def forward(self, x):
112
- # TFLite uses different padding
113
- if self.stride == 2:
114
- x = F.pad(x, (0, 1, 0, 1), "constant", 0)
115
- #print(x.shape)
116
-
117
- for module in self:
118
- if not isinstance(module, nn.MaxPool2d):
119
- x = module(x)
120
- return x
121
-
122
-
123
- class InvertedResidual(nn.Module):
124
- def __init__(self, inp, oup, stride, expand_ratio):
125
- super(InvertedResidual, self).__init__()
126
- self.stride = stride
127
- assert stride in [1, 2]
128
-
129
- hidden_dim = int(round(inp * expand_ratio))
130
- self.use_res_connect = self.stride == 1 and inp == oup
131
-
132
- layers = []
133
- if expand_ratio != 1:
134
- # pw
135
- layers.append(ConvBNReLU(inp, hidden_dim, kernel_size=1))
136
- layers.extend([
137
- # dw
138
- ConvBNReLU(hidden_dim, hidden_dim, stride=stride, groups=hidden_dim),
139
- # pw-linear
140
- nn.Conv2d(hidden_dim, oup, 1, 1, 0, bias=False),
141
- nn.BatchNorm2d(oup),
142
- ])
143
- self.conv = nn.Sequential(*layers)
144
-
145
- def forward(self, x):
146
- if self.use_res_connect:
147
- return x + self.conv(x)
148
- else:
149
- return self.conv(x)
150
-
151
-
152
- class MobileNetV2(nn.Module):
153
- def __init__(self, pretrained=True):
154
- """
155
- MobileNet V2 main class
156
- Args:
157
- num_classes (int): Number of classes
158
- width_mult (float): Width multiplier - adjusts number of channels in each layer by this amount
159
- inverted_residual_setting: Network structure
160
- round_nearest (int): Round the number of channels in each layer to be a multiple of this number
161
- Set to 1 to turn off rounding
162
- block: Module specifying inverted residual building block for mobilenet
163
- """
164
- super(MobileNetV2, self).__init__()
165
-
166
- block = InvertedResidual
167
- input_channel = 32
168
- last_channel = 1280
169
- width_mult = 1.0
170
- round_nearest = 8
171
-
172
- inverted_residual_setting = [
173
- # t, c, n, s
174
- [1, 16, 1, 1],
175
- [6, 24, 2, 2],
176
- [6, 32, 3, 2],
177
- [6, 64, 4, 2],
178
- #[6, 96, 3, 1],
179
- #[6, 160, 3, 2],
180
- #[6, 320, 1, 1],
181
- ]
182
-
183
- # only check the first element, assuming user knows t,c,n,s are required
184
- if len(inverted_residual_setting) == 0 or len(inverted_residual_setting[0]) != 4:
185
- raise ValueError("inverted_residual_setting should be non-empty "
186
- "or a 4-element list, got {}".format(inverted_residual_setting))
187
-
188
- # building first layer
189
- input_channel = _make_divisible(input_channel * width_mult, round_nearest)
190
- self.last_channel = _make_divisible(last_channel * max(1.0, width_mult), round_nearest)
191
- features = [ConvBNReLU(4, input_channel, stride=2)]
192
- # building inverted residual blocks
193
- for t, c, n, s in inverted_residual_setting:
194
- output_channel = _make_divisible(c * width_mult, round_nearest)
195
- for i in range(n):
196
- stride = s if i == 0 else 1
197
- features.append(block(input_channel, output_channel, stride, expand_ratio=t))
198
- input_channel = output_channel
199
- self.features = nn.Sequential(*features)
200
-
201
- self.fpn_selected = [3, 6, 10]
202
- # weight initialization
203
- for m in self.modules():
204
- if isinstance(m, nn.Conv2d):
205
- nn.init.kaiming_normal_(m.weight, mode='fan_out')
206
- if m.bias is not None:
207
- nn.init.zeros_(m.bias)
208
- elif isinstance(m, nn.BatchNorm2d):
209
- nn.init.ones_(m.weight)
210
- nn.init.zeros_(m.bias)
211
- elif isinstance(m, nn.Linear):
212
- nn.init.normal_(m.weight, 0, 0.01)
213
- nn.init.zeros_(m.bias)
214
-
215
- #if pretrained:
216
- # self._load_pretrained_model()
217
-
218
- def _forward_impl(self, x):
219
- # This exists since TorchScript doesn't support inheritance, so the superclass method
220
- # (this one) needs to have a name other than `forward` that can be accessed in a subclass
221
- fpn_features = []
222
- for i, f in enumerate(self.features):
223
- if i > self.fpn_selected[-1]:
224
- break
225
- x = f(x)
226
- if i in self.fpn_selected:
227
- fpn_features.append(x)
228
-
229
- c2, c3, c4 = fpn_features
230
- return c2, c3, c4
231
-
232
-
233
- def forward(self, x):
234
- return self._forward_impl(x)
235
-
236
- def _load_pretrained_model(self):
237
- pretrain_dict = model_zoo.load_url('https://download.pytorch.org/models/mobilenet_v2-b0353104.pth')
238
- model_dict = {}
239
- state_dict = self.state_dict()
240
- for k, v in pretrain_dict.items():
241
- if k in state_dict:
242
- model_dict[k] = v
243
- state_dict.update(model_dict)
244
- self.load_state_dict(state_dict)
245
-
246
-
247
- class MobileV2_MLSD_Tiny(nn.Module):
248
- def __init__(self):
249
- super(MobileV2_MLSD_Tiny, self).__init__()
250
-
251
- self.backbone = MobileNetV2(pretrained=True)
252
-
253
- self.block12 = BlockTypeA(in_c1= 32, in_c2= 64,
254
- out_c1= 64, out_c2=64)
255
- self.block13 = BlockTypeB(128, 64)
256
-
257
- self.block14 = BlockTypeA(in_c1 = 24, in_c2 = 64,
258
- out_c1= 32, out_c2= 32)
259
- self.block15 = BlockTypeB(64, 64)
260
-
261
- self.block16 = BlockTypeC(64, 16)
262
-
263
- def forward(self, x):
264
- c2, c3, c4 = self.backbone(x)
265
-
266
- x = self.block12(c3, c4)
267
- x = self.block13(x)
268
- x = self.block14(c2, x)
269
- x = self.block15(x)
270
- x = self.block16(x)
271
- x = x[:, 7:, :, :]
272
- #print(x.shape)
273
- x = F.interpolate(x, scale_factor=2.0, mode='bilinear', align_corners=True)
274
-
275
- return x
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
ControlNet/annotator/mlsd/utils.py DELETED
@@ -1,580 +0,0 @@
1
- '''
2
- modified by lihaoweicv
3
- pytorch version
4
- '''
5
-
6
- '''
7
- M-LSD
8
- Copyright 2021-present NAVER Corp.
9
- Apache License v2.0
10
- '''
11
-
12
- import os
13
- import numpy as np
14
- import cv2
15
- import torch
16
- from torch.nn import functional as F
17
-
18
-
19
- def deccode_output_score_and_ptss(tpMap, topk_n = 200, ksize = 5):
20
- '''
21
- tpMap:
22
- center: tpMap[1, 0, :, :]
23
- displacement: tpMap[1, 1:5, :, :]
24
- '''
25
- b, c, h, w = tpMap.shape
26
- assert b==1, 'only support bsize==1'
27
- displacement = tpMap[:, 1:5, :, :][0]
28
- center = tpMap[:, 0, :, :]
29
- heat = torch.sigmoid(center)
30
- hmax = F.max_pool2d( heat, (ksize, ksize), stride=1, padding=(ksize-1)//2)
31
- keep = (hmax == heat).float()
32
- heat = heat * keep
33
- heat = heat.reshape(-1, )
34
-
35
- scores, indices = torch.topk(heat, topk_n, dim=-1, largest=True)
36
- yy = torch.floor_divide(indices, w).unsqueeze(-1)
37
- xx = torch.fmod(indices, w).unsqueeze(-1)
38
- ptss = torch.cat((yy, xx),dim=-1)
39
-
40
- ptss = ptss.detach().cpu().numpy()
41
- scores = scores.detach().cpu().numpy()
42
- displacement = displacement.detach().cpu().numpy()
43
- displacement = displacement.transpose((1,2,0))
44
- return ptss, scores, displacement
45
-
46
-
47
- def pred_lines(image, model,
48
- input_shape=[512, 512],
49
- score_thr=0.10,
50
- dist_thr=20.0):
51
- h, w, _ = image.shape
52
- h_ratio, w_ratio = [h / input_shape[0], w / input_shape[1]]
53
-
54
- resized_image = np.concatenate([cv2.resize(image, (input_shape[1], input_shape[0]), interpolation=cv2.INTER_AREA),
55
- np.ones([input_shape[0], input_shape[1], 1])], axis=-1)
56
-
57
- resized_image = resized_image.transpose((2,0,1))
58
- batch_image = np.expand_dims(resized_image, axis=0).astype('float32')
59
- batch_image = (batch_image / 127.5) - 1.0
60
-
61
- batch_image = torch.from_numpy(batch_image).float().cuda()
62
- outputs = model(batch_image)
63
- pts, pts_score, vmap = deccode_output_score_and_ptss(outputs, 200, 3)
64
- start = vmap[:, :, :2]
65
- end = vmap[:, :, 2:]
66
- dist_map = np.sqrt(np.sum((start - end) ** 2, axis=-1))
67
-
68
- segments_list = []
69
- for center, score in zip(pts, pts_score):
70
- y, x = center
71
- distance = dist_map[y, x]
72
- if score > score_thr and distance > dist_thr:
73
- disp_x_start, disp_y_start, disp_x_end, disp_y_end = vmap[y, x, :]
74
- x_start = x + disp_x_start
75
- y_start = y + disp_y_start
76
- x_end = x + disp_x_end
77
- y_end = y + disp_y_end
78
- segments_list.append([x_start, y_start, x_end, y_end])
79
-
80
- lines = 2 * np.array(segments_list) # 256 > 512
81
- lines[:, 0] = lines[:, 0] * w_ratio
82
- lines[:, 1] = lines[:, 1] * h_ratio
83
- lines[:, 2] = lines[:, 2] * w_ratio
84
- lines[:, 3] = lines[:, 3] * h_ratio
85
-
86
- return lines
87
-
88
-
89
- def pred_squares(image,
90
- model,
91
- input_shape=[512, 512],
92
- params={'score': 0.06,
93
- 'outside_ratio': 0.28,
94
- 'inside_ratio': 0.45,
95
- 'w_overlap': 0.0,
96
- 'w_degree': 1.95,
97
- 'w_length': 0.0,
98
- 'w_area': 1.86,
99
- 'w_center': 0.14}):
100
- '''
101
- shape = [height, width]
102
- '''
103
- h, w, _ = image.shape
104
- original_shape = [h, w]
105
-
106
- resized_image = np.concatenate([cv2.resize(image, (input_shape[0], input_shape[1]), interpolation=cv2.INTER_AREA),
107
- np.ones([input_shape[0], input_shape[1], 1])], axis=-1)
108
- resized_image = resized_image.transpose((2, 0, 1))
109
- batch_image = np.expand_dims(resized_image, axis=0).astype('float32')
110
- batch_image = (batch_image / 127.5) - 1.0
111
-
112
- batch_image = torch.from_numpy(batch_image).float().cuda()
113
- outputs = model(batch_image)
114
-
115
- pts, pts_score, vmap = deccode_output_score_and_ptss(outputs, 200, 3)
116
- start = vmap[:, :, :2] # (x, y)
117
- end = vmap[:, :, 2:] # (x, y)
118
- dist_map = np.sqrt(np.sum((start - end) ** 2, axis=-1))
119
-
120
- junc_list = []
121
- segments_list = []
122
- for junc, score in zip(pts, pts_score):
123
- y, x = junc
124
- distance = dist_map[y, x]
125
- if score > params['score'] and distance > 20.0:
126
- junc_list.append([x, y])
127
- disp_x_start, disp_y_start, disp_x_end, disp_y_end = vmap[y, x, :]
128
- d_arrow = 1.0
129
- x_start = x + d_arrow * disp_x_start
130
- y_start = y + d_arrow * disp_y_start
131
- x_end = x + d_arrow * disp_x_end
132
- y_end = y + d_arrow * disp_y_end
133
- segments_list.append([x_start, y_start, x_end, y_end])
134
-
135
- segments = np.array(segments_list)
136
-
137
- ####### post processing for squares
138
- # 1. get unique lines
139
- point = np.array([[0, 0]])
140
- point = point[0]
141
- start = segments[:, :2]
142
- end = segments[:, 2:]
143
- diff = start - end
144
- a = diff[:, 1]
145
- b = -diff[:, 0]
146
- c = a * start[:, 0] + b * start[:, 1]
147
-
148
- d = np.abs(a * point[0] + b * point[1] - c) / np.sqrt(a ** 2 + b ** 2 + 1e-10)
149
- theta = np.arctan2(diff[:, 0], diff[:, 1]) * 180 / np.pi
150
- theta[theta < 0.0] += 180
151
- hough = np.concatenate([d[:, None], theta[:, None]], axis=-1)
152
-
153
- d_quant = 1
154
- theta_quant = 2
155
- hough[:, 0] //= d_quant
156
- hough[:, 1] //= theta_quant
157
- _, indices, counts = np.unique(hough, axis=0, return_index=True, return_counts=True)
158
-
159
- acc_map = np.zeros([512 // d_quant + 1, 360 // theta_quant + 1], dtype='float32')
160
- idx_map = np.zeros([512 // d_quant + 1, 360 // theta_quant + 1], dtype='int32') - 1
161
- yx_indices = hough[indices, :].astype('int32')
162
- acc_map[yx_indices[:, 0], yx_indices[:, 1]] = counts
163
- idx_map[yx_indices[:, 0], yx_indices[:, 1]] = indices
164
-
165
- acc_map_np = acc_map
166
- # acc_map = acc_map[None, :, :, None]
167
- #
168
- # ### fast suppression using tensorflow op
169
- # acc_map = tf.constant(acc_map, dtype=tf.float32)
170
- # max_acc_map = tf.keras.layers.MaxPool2D(pool_size=(5, 5), strides=1, padding='same')(acc_map)
171
- # acc_map = acc_map * tf.cast(tf.math.equal(acc_map, max_acc_map), tf.float32)
172
- # flatten_acc_map = tf.reshape(acc_map, [1, -1])
173
- # topk_values, topk_indices = tf.math.top_k(flatten_acc_map, k=len(pts))
174
- # _, h, w, _ = acc_map.shape
175
- # y = tf.expand_dims(topk_indices // w, axis=-1)
176
- # x = tf.expand_dims(topk_indices % w, axis=-1)
177
- # yx = tf.concat([y, x], axis=-1)
178
-
179
- ### fast suppression using pytorch op
180
- acc_map = torch.from_numpy(acc_map_np).unsqueeze(0).unsqueeze(0)
181
- _,_, h, w = acc_map.shape
182
- max_acc_map = F.max_pool2d(acc_map,kernel_size=5, stride=1, padding=2)
183
- acc_map = acc_map * ( (acc_map == max_acc_map).float() )
184
- flatten_acc_map = acc_map.reshape([-1, ])
185
-
186
- scores, indices = torch.topk(flatten_acc_map, len(pts), dim=-1, largest=True)
187
- yy = torch.div(indices, w, rounding_mode='floor').unsqueeze(-1)
188
- xx = torch.fmod(indices, w).unsqueeze(-1)
189
- yx = torch.cat((yy, xx), dim=-1)
190
-
191
- yx = yx.detach().cpu().numpy()
192
-
193
- topk_values = scores.detach().cpu().numpy()
194
- indices = idx_map[yx[:, 0], yx[:, 1]]
195
- basis = 5 // 2
196
-
197
- merged_segments = []
198
- for yx_pt, max_indice, value in zip(yx, indices, topk_values):
199
- y, x = yx_pt
200
- if max_indice == -1 or value == 0:
201
- continue
202
- segment_list = []
203
- for y_offset in range(-basis, basis + 1):
204
- for x_offset in range(-basis, basis + 1):
205
- indice = idx_map[y + y_offset, x + x_offset]
206
- cnt = int(acc_map_np[y + y_offset, x + x_offset])
207
- if indice != -1:
208
- segment_list.append(segments[indice])
209
- if cnt > 1:
210
- check_cnt = 1
211
- current_hough = hough[indice]
212
- for new_indice, new_hough in enumerate(hough):
213
- if (current_hough == new_hough).all() and indice != new_indice:
214
- segment_list.append(segments[new_indice])
215
- check_cnt += 1
216
- if check_cnt == cnt:
217
- break
218
- group_segments = np.array(segment_list).reshape([-1, 2])
219
- sorted_group_segments = np.sort(group_segments, axis=0)
220
- x_min, y_min = sorted_group_segments[0, :]
221
- x_max, y_max = sorted_group_segments[-1, :]
222
-
223
- deg = theta[max_indice]
224
- if deg >= 90:
225
- merged_segments.append([x_min, y_max, x_max, y_min])
226
- else:
227
- merged_segments.append([x_min, y_min, x_max, y_max])
228
-
229
- # 2. get intersections
230
- new_segments = np.array(merged_segments) # (x1, y1, x2, y2)
231
- start = new_segments[:, :2] # (x1, y1)
232
- end = new_segments[:, 2:] # (x2, y2)
233
- new_centers = (start + end) / 2.0
234
- diff = start - end
235
- dist_segments = np.sqrt(np.sum(diff ** 2, axis=-1))
236
-
237
- # ax + by = c
238
- a = diff[:, 1]
239
- b = -diff[:, 0]
240
- c = a * start[:, 0] + b * start[:, 1]
241
- pre_det = a[:, None] * b[None, :]
242
- det = pre_det - np.transpose(pre_det)
243
-
244
- pre_inter_y = a[:, None] * c[None, :]
245
- inter_y = (pre_inter_y - np.transpose(pre_inter_y)) / (det + 1e-10)
246
- pre_inter_x = c[:, None] * b[None, :]
247
- inter_x = (pre_inter_x - np.transpose(pre_inter_x)) / (det + 1e-10)
248
- inter_pts = np.concatenate([inter_x[:, :, None], inter_y[:, :, None]], axis=-1).astype('int32')
249
-
250
- # 3. get corner information
251
- # 3.1 get distance
252
- '''
253
- dist_segments:
254
- | dist(0), dist(1), dist(2), ...|
255
- dist_inter_to_segment1:
256
- | dist(inter,0), dist(inter,0), dist(inter,0), ... |
257
- | dist(inter,1), dist(inter,1), dist(inter,1), ... |
258
- ...
259
- dist_inter_to_semgnet2:
260
- | dist(inter,0), dist(inter,1), dist(inter,2), ... |
261
- | dist(inter,0), dist(inter,1), dist(inter,2), ... |
262
- ...
263
- '''
264
-
265
- dist_inter_to_segment1_start = np.sqrt(
266
- np.sum(((inter_pts - start[:, None, :]) ** 2), axis=-1, keepdims=True)) # [n_batch, n_batch, 1]
267
- dist_inter_to_segment1_end = np.sqrt(
268
- np.sum(((inter_pts - end[:, None, :]) ** 2), axis=-1, keepdims=True)) # [n_batch, n_batch, 1]
269
- dist_inter_to_segment2_start = np.sqrt(
270
- np.sum(((inter_pts - start[None, :, :]) ** 2), axis=-1, keepdims=True)) # [n_batch, n_batch, 1]
271
- dist_inter_to_segment2_end = np.sqrt(
272
- np.sum(((inter_pts - end[None, :, :]) ** 2), axis=-1, keepdims=True)) # [n_batch, n_batch, 1]
273
-
274
- # sort ascending
275
- dist_inter_to_segment1 = np.sort(
276
- np.concatenate([dist_inter_to_segment1_start, dist_inter_to_segment1_end], axis=-1),
277
- axis=-1) # [n_batch, n_batch, 2]
278
- dist_inter_to_segment2 = np.sort(
279
- np.concatenate([dist_inter_to_segment2_start, dist_inter_to_segment2_end], axis=-1),
280
- axis=-1) # [n_batch, n_batch, 2]
281
-
282
- # 3.2 get degree
283
- inter_to_start = new_centers[:, None, :] - inter_pts
284
- deg_inter_to_start = np.arctan2(inter_to_start[:, :, 1], inter_to_start[:, :, 0]) * 180 / np.pi
285
- deg_inter_to_start[deg_inter_to_start < 0.0] += 360
286
- inter_to_end = new_centers[None, :, :] - inter_pts
287
- deg_inter_to_end = np.arctan2(inter_to_end[:, :, 1], inter_to_end[:, :, 0]) * 180 / np.pi
288
- deg_inter_to_end[deg_inter_to_end < 0.0] += 360
289
-
290
- '''
291
- B -- G
292
- | |
293
- C -- R
294
- B : blue / G: green / C: cyan / R: red
295
-
296
- 0 -- 1
297
- | |
298
- 3 -- 2
299
- '''
300
- # rename variables
301
- deg1_map, deg2_map = deg_inter_to_start, deg_inter_to_end
302
- # sort deg ascending
303
- deg_sort = np.sort(np.concatenate([deg1_map[:, :, None], deg2_map[:, :, None]], axis=-1), axis=-1)
304
-
305
- deg_diff_map = np.abs(deg1_map - deg2_map)
306
- # we only consider the smallest degree of intersect
307
- deg_diff_map[deg_diff_map > 180] = 360 - deg_diff_map[deg_diff_map > 180]
308
-
309
- # define available degree range
310
- deg_range = [60, 120]
311
-
312
- corner_dict = {corner_info: [] for corner_info in range(4)}
313
- inter_points = []
314
- for i in range(inter_pts.shape[0]):
315
- for j in range(i + 1, inter_pts.shape[1]):
316
- # i, j > line index, always i < j
317
- x, y = inter_pts[i, j, :]
318
- deg1, deg2 = deg_sort[i, j, :]
319
- deg_diff = deg_diff_map[i, j]
320
-
321
- check_degree = deg_diff > deg_range[0] and deg_diff < deg_range[1]
322
-
323
- outside_ratio = params['outside_ratio'] # over ratio >>> drop it!
324
- inside_ratio = params['inside_ratio'] # over ratio >>> drop it!
325
- check_distance = ((dist_inter_to_segment1[i, j, 1] >= dist_segments[i] and \
326
- dist_inter_to_segment1[i, j, 0] <= dist_segments[i] * outside_ratio) or \
327
- (dist_inter_to_segment1[i, j, 1] <= dist_segments[i] and \
328
- dist_inter_to_segment1[i, j, 0] <= dist_segments[i] * inside_ratio)) and \
329
- ((dist_inter_to_segment2[i, j, 1] >= dist_segments[j] and \
330
- dist_inter_to_segment2[i, j, 0] <= dist_segments[j] * outside_ratio) or \
331
- (dist_inter_to_segment2[i, j, 1] <= dist_segments[j] and \
332
- dist_inter_to_segment2[i, j, 0] <= dist_segments[j] * inside_ratio))
333
-
334
- if check_degree and check_distance:
335
- corner_info = None
336
-
337
- if (deg1 >= 0 and deg1 <= 45 and deg2 >= 45 and deg2 <= 120) or \
338
- (deg2 >= 315 and deg1 >= 45 and deg1 <= 120):
339
- corner_info, color_info = 0, 'blue'
340
- elif (deg1 >= 45 and deg1 <= 125 and deg2 >= 125 and deg2 <= 225):
341
- corner_info, color_info = 1, 'green'
342
- elif (deg1 >= 125 and deg1 <= 225 and deg2 >= 225 and deg2 <= 315):
343
- corner_info, color_info = 2, 'black'
344
- elif (deg1 >= 0 and deg1 <= 45 and deg2 >= 225 and deg2 <= 315) or \
345
- (deg2 >= 315 and deg1 >= 225 and deg1 <= 315):
346
- corner_info, color_info = 3, 'cyan'
347
- else:
348
- corner_info, color_info = 4, 'red' # we don't use it
349
- continue
350
-
351
- corner_dict[corner_info].append([x, y, i, j])
352
- inter_points.append([x, y])
353
-
354
- square_list = []
355
- connect_list = []
356
- segments_list = []
357
- for corner0 in corner_dict[0]:
358
- for corner1 in corner_dict[1]:
359
- connect01 = False
360
- for corner0_line in corner0[2:]:
361
- if corner0_line in corner1[2:]:
362
- connect01 = True
363
- break
364
- if connect01:
365
- for corner2 in corner_dict[2]:
366
- connect12 = False
367
- for corner1_line in corner1[2:]:
368
- if corner1_line in corner2[2:]:
369
- connect12 = True
370
- break
371
- if connect12:
372
- for corner3 in corner_dict[3]:
373
- connect23 = False
374
- for corner2_line in corner2[2:]:
375
- if corner2_line in corner3[2:]:
376
- connect23 = True
377
- break
378
- if connect23:
379
- for corner3_line in corner3[2:]:
380
- if corner3_line in corner0[2:]:
381
- # SQUARE!!!
382
- '''
383
- 0 -- 1
384
- | |
385
- 3 -- 2
386
- square_list:
387
- order: 0 > 1 > 2 > 3
388
- | x0, y0, x1, y1, x2, y2, x3, y3 |
389
- | x0, y0, x1, y1, x2, y2, x3, y3 |
390
- ...
391
- connect_list:
392
- order: 01 > 12 > 23 > 30
393
- | line_idx01, line_idx12, line_idx23, line_idx30 |
394
- | line_idx01, line_idx12, line_idx23, line_idx30 |
395
- ...
396
- segments_list:
397
- order: 0 > 1 > 2 > 3
398
- | line_idx0_i, line_idx0_j, line_idx1_i, line_idx1_j, line_idx2_i, line_idx2_j, line_idx3_i, line_idx3_j |
399
- | line_idx0_i, line_idx0_j, line_idx1_i, line_idx1_j, line_idx2_i, line_idx2_j, line_idx3_i, line_idx3_j |
400
- ...
401
- '''
402
- square_list.append(corner0[:2] + corner1[:2] + corner2[:2] + corner3[:2])
403
- connect_list.append([corner0_line, corner1_line, corner2_line, corner3_line])
404
- segments_list.append(corner0[2:] + corner1[2:] + corner2[2:] + corner3[2:])
405
-
406
- def check_outside_inside(segments_info, connect_idx):
407
- # return 'outside or inside', min distance, cover_param, peri_param
408
- if connect_idx == segments_info[0]:
409
- check_dist_mat = dist_inter_to_segment1
410
- else:
411
- check_dist_mat = dist_inter_to_segment2
412
-
413
- i, j = segments_info
414
- min_dist, max_dist = check_dist_mat[i, j, :]
415
- connect_dist = dist_segments[connect_idx]
416
- if max_dist > connect_dist:
417
- return 'outside', min_dist, 0, 1
418
- else:
419
- return 'inside', min_dist, -1, -1
420
-
421
- top_square = None
422
-
423
- try:
424
- map_size = input_shape[0] / 2
425
- squares = np.array(square_list).reshape([-1, 4, 2])
426
- score_array = []
427
- connect_array = np.array(connect_list)
428
- segments_array = np.array(segments_list).reshape([-1, 4, 2])
429
-
430
- # get degree of corners:
431
- squares_rollup = np.roll(squares, 1, axis=1)
432
- squares_rolldown = np.roll(squares, -1, axis=1)
433
- vec1 = squares_rollup - squares
434
- normalized_vec1 = vec1 / (np.linalg.norm(vec1, axis=-1, keepdims=True) + 1e-10)
435
- vec2 = squares_rolldown - squares
436
- normalized_vec2 = vec2 / (np.linalg.norm(vec2, axis=-1, keepdims=True) + 1e-10)
437
- inner_products = np.sum(normalized_vec1 * normalized_vec2, axis=-1) # [n_squares, 4]
438
- squares_degree = np.arccos(inner_products) * 180 / np.pi # [n_squares, 4]
439
-
440
- # get square score
441
- overlap_scores = []
442
- degree_scores = []
443
- length_scores = []
444
-
445
- for connects, segments, square, degree in zip(connect_array, segments_array, squares, squares_degree):
446
- '''
447
- 0 -- 1
448
- | |
449
- 3 -- 2
450
-
451
- # segments: [4, 2]
452
- # connects: [4]
453
- '''
454
-
455
- ###################################### OVERLAP SCORES
456
- cover = 0
457
- perimeter = 0
458
- # check 0 > 1 > 2 > 3
459
- square_length = []
460
-
461
- for start_idx in range(4):
462
- end_idx = (start_idx + 1) % 4
463
-
464
- connect_idx = connects[start_idx] # segment idx of segment01
465
- start_segments = segments[start_idx]
466
- end_segments = segments[end_idx]
467
-
468
- start_point = square[start_idx]
469
- end_point = square[end_idx]
470
-
471
- # check whether outside or inside
472
- start_position, start_min, start_cover_param, start_peri_param = check_outside_inside(start_segments,
473
- connect_idx)
474
- end_position, end_min, end_cover_param, end_peri_param = check_outside_inside(end_segments, connect_idx)
475
-
476
- cover += dist_segments[connect_idx] + start_cover_param * start_min + end_cover_param * end_min
477
- perimeter += dist_segments[connect_idx] + start_peri_param * start_min + end_peri_param * end_min
478
-
479
- square_length.append(
480
- dist_segments[connect_idx] + start_peri_param * start_min + end_peri_param * end_min)
481
-
482
- overlap_scores.append(cover / perimeter)
483
- ######################################
484
- ###################################### DEGREE SCORES
485
- '''
486
- deg0 vs deg2
487
- deg1 vs deg3
488
- '''
489
- deg0, deg1, deg2, deg3 = degree
490
- deg_ratio1 = deg0 / deg2
491
- if deg_ratio1 > 1.0:
492
- deg_ratio1 = 1 / deg_ratio1
493
- deg_ratio2 = deg1 / deg3
494
- if deg_ratio2 > 1.0:
495
- deg_ratio2 = 1 / deg_ratio2
496
- degree_scores.append((deg_ratio1 + deg_ratio2) / 2)
497
- ######################################
498
- ###################################### LENGTH SCORES
499
- '''
500
- len0 vs len2
501
- len1 vs len3
502
- '''
503
- len0, len1, len2, len3 = square_length
504
- len_ratio1 = len0 / len2 if len2 > len0 else len2 / len0
505
- len_ratio2 = len1 / len3 if len3 > len1 else len3 / len1
506
- length_scores.append((len_ratio1 + len_ratio2) / 2)
507
-
508
- ######################################
509
-
510
- overlap_scores = np.array(overlap_scores)
511
- overlap_scores /= np.max(overlap_scores)
512
-
513
- degree_scores = np.array(degree_scores)
514
- # degree_scores /= np.max(degree_scores)
515
-
516
- length_scores = np.array(length_scores)
517
-
518
- ###################################### AREA SCORES
519
- area_scores = np.reshape(squares, [-1, 4, 2])
520
- area_x = area_scores[:, :, 0]
521
- area_y = area_scores[:, :, 1]
522
- correction = area_x[:, -1] * area_y[:, 0] - area_y[:, -1] * area_x[:, 0]
523
- area_scores = np.sum(area_x[:, :-1] * area_y[:, 1:], axis=-1) - np.sum(area_y[:, :-1] * area_x[:, 1:], axis=-1)
524
- area_scores = 0.5 * np.abs(area_scores + correction)
525
- area_scores /= (map_size * map_size) # np.max(area_scores)
526
- ######################################
527
-
528
- ###################################### CENTER SCORES
529
- centers = np.array([[256 // 2, 256 // 2]], dtype='float32') # [1, 2]
530
- # squares: [n, 4, 2]
531
- square_centers = np.mean(squares, axis=1) # [n, 2]
532
- center2center = np.sqrt(np.sum((centers - square_centers) ** 2))
533
- center_scores = center2center / (map_size / np.sqrt(2.0))
534
-
535
- '''
536
- score_w = [overlap, degree, area, center, length]
537
- '''
538
- score_w = [0.0, 1.0, 10.0, 0.5, 1.0]
539
- score_array = params['w_overlap'] * overlap_scores \
540
- + params['w_degree'] * degree_scores \
541
- + params['w_area'] * area_scores \
542
- - params['w_center'] * center_scores \
543
- + params['w_length'] * length_scores
544
-
545
- best_square = []
546
-
547
- sorted_idx = np.argsort(score_array)[::-1]
548
- score_array = score_array[sorted_idx]
549
- squares = squares[sorted_idx]
550
-
551
- except Exception as e:
552
- pass
553
-
554
- '''return list
555
- merged_lines, squares, scores
556
- '''
557
-
558
- try:
559
- new_segments[:, 0] = new_segments[:, 0] * 2 / input_shape[1] * original_shape[1]
560
- new_segments[:, 1] = new_segments[:, 1] * 2 / input_shape[0] * original_shape[0]
561
- new_segments[:, 2] = new_segments[:, 2] * 2 / input_shape[1] * original_shape[1]
562
- new_segments[:, 3] = new_segments[:, 3] * 2 / input_shape[0] * original_shape[0]
563
- except:
564
- new_segments = []
565
-
566
- try:
567
- squares[:, :, 0] = squares[:, :, 0] * 2 / input_shape[1] * original_shape[1]
568
- squares[:, :, 1] = squares[:, :, 1] * 2 / input_shape[0] * original_shape[0]
569
- except:
570
- squares = []
571
- score_array = []
572
-
573
- try:
574
- inter_points = np.array(inter_points)
575
- inter_points[:, 0] = inter_points[:, 0] * 2 / input_shape[1] * original_shape[1]
576
- inter_points[:, 1] = inter_points[:, 1] * 2 / input_shape[0] * original_shape[0]
577
- except:
578
- inter_points = []
579
-
580
- return new_segments, squares, score_array, inter_points
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
ControlNet/annotator/openpose/LICENSE DELETED
@@ -1,108 +0,0 @@
1
- OPENPOSE: MULTIPERSON KEYPOINT DETECTION
2
- SOFTWARE LICENSE AGREEMENT
3
- ACADEMIC OR NON-PROFIT ORGANIZATION NONCOMMERCIAL RESEARCH USE ONLY
4
-
5
- BY USING OR DOWNLOADING THE SOFTWARE, YOU ARE AGREEING TO THE TERMS OF THIS LICENSE AGREEMENT. IF YOU DO NOT AGREE WITH THESE TERMS, YOU MAY NOT USE OR DOWNLOAD THE SOFTWARE.
6
-
7
- This is a license agreement ("Agreement") between your academic institution or non-profit organization or self (called "Licensee" or "You" in this Agreement) and Carnegie Mellon University (called "Licensor" in this Agreement). All rights not specifically granted to you in this Agreement are reserved for Licensor.
8
-
9
- RESERVATION OF OWNERSHIP AND GRANT OF LICENSE:
10
- Licensor retains exclusive ownership of any copy of the Software (as defined below) licensed under this Agreement and hereby grants to Licensee a personal, non-exclusive,
11
- non-transferable license to use the Software for noncommercial research purposes, without the right to sublicense, pursuant to the terms and conditions of this Agreement. As used in this Agreement, the term "Software" means (i) the actual copy of all or any portion of code for program routines made accessible to Licensee by Licensor pursuant to this Agreement, inclusive of backups, updates, and/or merged copies permitted hereunder or subsequently supplied by Licensor, including all or any file structures, programming instructions, user interfaces and screen formats and sequences as well as any and all documentation and instructions related to it, and (ii) all or any derivatives and/or modifications created or made by You to any of the items specified in (i).
12
-
13
- CONFIDENTIALITY: Licensee acknowledges that the Software is proprietary to Licensor, and as such, Licensee agrees to receive all such materials in confidence and use the Software only in accordance with the terms of this Agreement. Licensee agrees to use reasonable effort to protect the Software from unauthorized use, reproduction, distribution, or publication.
14
-
15
- COPYRIGHT: The Software is owned by Licensor and is protected by United
16
- States copyright laws and applicable international treaties and/or conventions.
17
-
18
- PERMITTED USES: The Software may be used for your own noncommercial internal research purposes. You understand and agree that Licensor is not obligated to implement any suggestions and/or feedback you might provide regarding the Software, but to the extent Licensor does so, you are not entitled to any compensation related thereto.
19
-
20
- DERIVATIVES: You may create derivatives of or make modifications to the Software, however, You agree that all and any such derivatives and modifications will be owned by Licensor and become a part of the Software licensed to You under this Agreement. You may only use such derivatives and modifications for your own noncommercial internal research purposes, and you may not otherwise use, distribute or copy such derivatives and modifications in violation of this Agreement.
21
-
22
- BACKUPS: If Licensee is an organization, it may make that number of copies of the Software necessary for internal noncommercial use at a single site within its organization provided that all information appearing in or on the original labels, including the copyright and trademark notices are copied onto the labels of the copies.
23
-
24
- USES NOT PERMITTED: You may not distribute, copy or use the Software except as explicitly permitted herein. Licensee has not been granted any trademark license as part of this Agreement and may not use the name or mark “OpenPose", "Carnegie Mellon" or any renditions thereof without the prior written permission of Licensor.
25
-
26
- You may not sell, rent, lease, sublicense, lend, time-share or transfer, in whole or in part, or provide third parties access to prior or present versions (or any parts thereof) of the Software.
27
-
28
- ASSIGNMENT: You may not assign this Agreement or your rights hereunder without the prior written consent of Licensor. Any attempted assignment without such consent shall be null and void.
29
-
30
- TERM: The term of the license granted by this Agreement is from Licensee's acceptance of this Agreement by downloading the Software or by using the Software until terminated as provided below.
31
-
32
- The Agreement automatically terminates without notice if you fail to comply with any provision of this Agreement. Licensee may terminate this Agreement by ceasing using the Software. Upon any termination of this Agreement, Licensee will delete any and all copies of the Software. You agree that all provisions which operate to protect the proprietary rights of Licensor shall remain in force should breach occur and that the obligation of confidentiality described in this Agreement is binding in perpetuity and, as such, survives the term of the Agreement.
33
-
34
- FEE: Provided Licensee abides completely by the terms and conditions of this Agreement, there is no fee due to Licensor for Licensee's use of the Software in accordance with this Agreement.
35
-
36
- DISCLAIMER OF WARRANTIES: THE SOFTWARE IS PROVIDED "AS-IS" WITHOUT WARRANTY OF ANY KIND INCLUDING ANY WARRANTIES OF PERFORMANCE OR MERCHANTABILITY OR FITNESS FOR A PARTICULAR USE OR PURPOSE OR OF NON-INFRINGEMENT. LICENSEE BEARS ALL RISK RELATING TO QUALITY AND PERFORMANCE OF THE SOFTWARE AND RELATED MATERIALS.
37
-
38
- SUPPORT AND MAINTENANCE: No Software support or training by the Licensor is provided as part of this Agreement.
39
-
40
- EXCLUSIVE REMEDY AND LIMITATION OF LIABILITY: To the maximum extent permitted under applicable law, Licensor shall not be liable for direct, indirect, special, incidental, or consequential damages or lost profits related to Licensee's use of and/or inability to use the Software, even if Licensor is advised of the possibility of such damage.
41
-
42
- EXPORT REGULATION: Licensee agrees to comply with any and all applicable
43
- U.S. export control laws, regulations, and/or other laws related to embargoes and sanction programs administered by the Office of Foreign Assets Control.
44
-
45
- SEVERABILITY: If any provision(s) of this Agreement shall be held to be invalid, illegal, or unenforceable by a court or other tribunal of competent jurisdiction, the validity, legality and enforceability of the remaining provisions shall not in any way be affected or impaired thereby.
46
-
47
- NO IMPLIED WAIVERS: No failure or delay by Licensor in enforcing any right or remedy under this Agreement shall be construed as a waiver of any future or other exercise of such right or remedy by Licensor.
48
-
49
- GOVERNING LAW: This Agreement shall be construed and enforced in accordance with the laws of the Commonwealth of Pennsylvania without reference to conflict of laws principles. You consent to the personal jurisdiction of the courts of this County and waive their rights to venue outside of Allegheny County, Pennsylvania.
50
-
51
- ENTIRE AGREEMENT AND AMENDMENTS: This Agreement constitutes the sole and entire agreement between Licensee and Licensor as to the matter set forth herein and supersedes any previous agreements, understandings, and arrangements between the parties relating hereto.
52
-
53
-
54
-
55
- ************************************************************************
56
-
57
- THIRD-PARTY SOFTWARE NOTICES AND INFORMATION
58
-
59
- This project incorporates material from the project(s) listed below (collectively, "Third Party Code"). This Third Party Code is licensed to you under their original license terms set forth below. We reserves all other rights not expressly granted, whether by implication, estoppel or otherwise.
60
-
61
- 1. Caffe, version 1.0.0, (https://github.com/BVLC/caffe/)
62
-
63
- COPYRIGHT
64
-
65
- All contributions by the University of California:
66
- Copyright (c) 2014-2017 The Regents of the University of California (Regents)
67
- All rights reserved.
68
-
69
- All other contributions:
70
- Copyright (c) 2014-2017, the respective contributors
71
- All rights reserved.
72
-
73
- Caffe uses a shared copyright model: each contributor holds copyright over
74
- their contributions to Caffe. The project versioning records all such
75
- contribution and copyright details. If a contributor wants to further mark
76
- their specific copyright on a particular contribution, they should indicate
77
- their copyright solely in the commit message of the change when it is
78
- committed.
79
-
80
- LICENSE
81
-
82
- Redistribution and use in source and binary forms, with or without
83
- modification, are permitted provided that the following conditions are met:
84
-
85
- 1. Redistributions of source code must retain the above copyright notice, this
86
- list of conditions and the following disclaimer.
87
- 2. Redistributions in binary form must reproduce the above copyright notice,
88
- this list of conditions and the following disclaimer in the documentation
89
- and/or other materials provided with the distribution.
90
-
91
- THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
92
- ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
93
- WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
94
- DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
95
- ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
96
- (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
97
- LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
98
- ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
99
- (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
100
- SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
101
-
102
- CONTRIBUTION AGREEMENT
103
-
104
- By contributing to the BVLC/caffe repository through pull-request, comment,
105
- or otherwise, the contributor releases their content to the
106
- license and copyright terms herein.
107
-
108
- ************END OF THIRD-PARTY SOFTWARE NOTICES AND INFORMATION**********
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
ControlNet/annotator/openpose/__init__.py DELETED
@@ -1,49 +0,0 @@
1
- # Openpose
2
- # Original from CMU https://github.com/CMU-Perceptual-Computing-Lab/openpose
3
- # 2nd Edited by https://github.com/Hzzone/pytorch-openpose
4
- # 3rd Edited by ControlNet
5
-
6
- import os
7
- os.environ["KMP_DUPLICATE_LIB_OK"]="TRUE"
8
-
9
- import torch
10
- import numpy as np
11
- from . import util
12
- from .body import Body
13
- from .hand import Hand
14
- from annotator.util import annotator_ckpts_path
15
-
16
-
17
- body_model_path = "https://huggingface.co/lllyasviel/ControlNet/resolve/main/annotator/ckpts/body_pose_model.pth"
18
- hand_model_path = "https://huggingface.co/lllyasviel/ControlNet/resolve/main/annotator/ckpts/hand_pose_model.pth"
19
-
20
-
21
- class OpenposeDetector:
22
- def __init__(self):
23
- body_modelpath = os.path.join(annotator_ckpts_path, "body_pose_model.pth")
24
- hand_modelpath = os.path.join(annotator_ckpts_path, "hand_pose_model.pth")
25
-
26
- if not os.path.exists(hand_modelpath):
27
- from basicsr.utils.download_util import load_file_from_url
28
- load_file_from_url(body_model_path, model_dir=annotator_ckpts_path)
29
- load_file_from_url(hand_model_path, model_dir=annotator_ckpts_path)
30
-
31
- self.body_estimation = Body(body_modelpath)
32
- self.hand_estimation = Hand(hand_modelpath)
33
-
34
- def __call__(self, oriImg, hand=False):
35
- oriImg = oriImg[:, :, ::-1].copy()
36
- with torch.no_grad():
37
- candidate, subset = self.body_estimation(oriImg)
38
- canvas = np.zeros_like(oriImg)
39
- canvas = util.draw_bodypose(canvas, candidate, subset)
40
- if hand:
41
- hands_list = util.handDetect(candidate, subset, oriImg)
42
- all_hand_peaks = []
43
- for x, y, w, is_left in hands_list:
44
- peaks = self.hand_estimation(oriImg[y:y+w, x:x+w, :])
45
- peaks[:, 0] = np.where(peaks[:, 0] == 0, peaks[:, 0], peaks[:, 0] + x)
46
- peaks[:, 1] = np.where(peaks[:, 1] == 0, peaks[:, 1], peaks[:, 1] + y)
47
- all_hand_peaks.append(peaks)
48
- canvas = util.draw_handpose(canvas, all_hand_peaks)
49
- return canvas, dict(candidate=candidate.tolist(), subset=subset.tolist())
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
ControlNet/annotator/openpose/body.py DELETED
@@ -1,219 +0,0 @@
1
- import cv2
2
- import numpy as np
3
- import math
4
- import time
5
- from scipy.ndimage.filters import gaussian_filter
6
- import matplotlib.pyplot as plt
7
- import matplotlib
8
- import torch
9
- from torchvision import transforms
10
-
11
- from . import util
12
- from .model import bodypose_model
13
-
14
- class Body(object):
15
- def __init__(self, model_path):
16
- self.model = bodypose_model()
17
- if torch.cuda.is_available():
18
- self.model = self.model.cuda()
19
- print('cuda')
20
- model_dict = util.transfer(self.model, torch.load(model_path))
21
- self.model.load_state_dict(model_dict)
22
- self.model.eval()
23
-
24
- def __call__(self, oriImg):
25
- # scale_search = [0.5, 1.0, 1.5, 2.0]
26
- scale_search = [0.5]
27
- boxsize = 368
28
- stride = 8
29
- padValue = 128
30
- thre1 = 0.1
31
- thre2 = 0.05
32
- multiplier = [x * boxsize / oriImg.shape[0] for x in scale_search]
33
- heatmap_avg = np.zeros((oriImg.shape[0], oriImg.shape[1], 19))
34
- paf_avg = np.zeros((oriImg.shape[0], oriImg.shape[1], 38))
35
-
36
- for m in range(len(multiplier)):
37
- scale = multiplier[m]
38
- imageToTest = cv2.resize(oriImg, (0, 0), fx=scale, fy=scale, interpolation=cv2.INTER_CUBIC)
39
- imageToTest_padded, pad = util.padRightDownCorner(imageToTest, stride, padValue)
40
- im = np.transpose(np.float32(imageToTest_padded[:, :, :, np.newaxis]), (3, 2, 0, 1)) / 256 - 0.5
41
- im = np.ascontiguousarray(im)
42
-
43
- data = torch.from_numpy(im).float()
44
- if torch.cuda.is_available():
45
- data = data.cuda()
46
- # data = data.permute([2, 0, 1]).unsqueeze(0).float()
47
- with torch.no_grad():
48
- Mconv7_stage6_L1, Mconv7_stage6_L2 = self.model(data)
49
- Mconv7_stage6_L1 = Mconv7_stage6_L1.cpu().numpy()
50
- Mconv7_stage6_L2 = Mconv7_stage6_L2.cpu().numpy()
51
-
52
- # extract outputs, resize, and remove padding
53
- # heatmap = np.transpose(np.squeeze(net.blobs[output_blobs.keys()[1]].data), (1, 2, 0)) # output 1 is heatmaps
54
- heatmap = np.transpose(np.squeeze(Mconv7_stage6_L2), (1, 2, 0)) # output 1 is heatmaps
55
- heatmap = cv2.resize(heatmap, (0, 0), fx=stride, fy=stride, interpolation=cv2.INTER_CUBIC)
56
- heatmap = heatmap[:imageToTest_padded.shape[0] - pad[2], :imageToTest_padded.shape[1] - pad[3], :]
57
- heatmap = cv2.resize(heatmap, (oriImg.shape[1], oriImg.shape[0]), interpolation=cv2.INTER_CUBIC)
58
-
59
- # paf = np.transpose(np.squeeze(net.blobs[output_blobs.keys()[0]].data), (1, 2, 0)) # output 0 is PAFs
60
- paf = np.transpose(np.squeeze(Mconv7_stage6_L1), (1, 2, 0)) # output 0 is PAFs
61
- paf = cv2.resize(paf, (0, 0), fx=stride, fy=stride, interpolation=cv2.INTER_CUBIC)
62
- paf = paf[:imageToTest_padded.shape[0] - pad[2], :imageToTest_padded.shape[1] - pad[3], :]
63
- paf = cv2.resize(paf, (oriImg.shape[1], oriImg.shape[0]), interpolation=cv2.INTER_CUBIC)
64
-
65
- heatmap_avg += heatmap_avg + heatmap / len(multiplier)
66
- paf_avg += + paf / len(multiplier)
67
-
68
- all_peaks = []
69
- peak_counter = 0
70
-
71
- for part in range(18):
72
- map_ori = heatmap_avg[:, :, part]
73
- one_heatmap = gaussian_filter(map_ori, sigma=3)
74
-
75
- map_left = np.zeros(one_heatmap.shape)
76
- map_left[1:, :] = one_heatmap[:-1, :]
77
- map_right = np.zeros(one_heatmap.shape)
78
- map_right[:-1, :] = one_heatmap[1:, :]
79
- map_up = np.zeros(one_heatmap.shape)
80
- map_up[:, 1:] = one_heatmap[:, :-1]
81
- map_down = np.zeros(one_heatmap.shape)
82
- map_down[:, :-1] = one_heatmap[:, 1:]
83
-
84
- peaks_binary = np.logical_and.reduce(
85
- (one_heatmap >= map_left, one_heatmap >= map_right, one_heatmap >= map_up, one_heatmap >= map_down, one_heatmap > thre1))
86
- peaks = list(zip(np.nonzero(peaks_binary)[1], np.nonzero(peaks_binary)[0])) # note reverse
87
- peaks_with_score = [x + (map_ori[x[1], x[0]],) for x in peaks]
88
- peak_id = range(peak_counter, peak_counter + len(peaks))
89
- peaks_with_score_and_id = [peaks_with_score[i] + (peak_id[i],) for i in range(len(peak_id))]
90
-
91
- all_peaks.append(peaks_with_score_and_id)
92
- peak_counter += len(peaks)
93
-
94
- # find connection in the specified sequence, center 29 is in the position 15
95
- limbSeq = [[2, 3], [2, 6], [3, 4], [4, 5], [6, 7], [7, 8], [2, 9], [9, 10], \
96
- [10, 11], [2, 12], [12, 13], [13, 14], [2, 1], [1, 15], [15, 17], \
97
- [1, 16], [16, 18], [3, 17], [6, 18]]
98
- # the middle joints heatmap correpondence
99
- mapIdx = [[31, 32], [39, 40], [33, 34], [35, 36], [41, 42], [43, 44], [19, 20], [21, 22], \
100
- [23, 24], [25, 26], [27, 28], [29, 30], [47, 48], [49, 50], [53, 54], [51, 52], \
101
- [55, 56], [37, 38], [45, 46]]
102
-
103
- connection_all = []
104
- special_k = []
105
- mid_num = 10
106
-
107
- for k in range(len(mapIdx)):
108
- score_mid = paf_avg[:, :, [x - 19 for x in mapIdx[k]]]
109
- candA = all_peaks[limbSeq[k][0] - 1]
110
- candB = all_peaks[limbSeq[k][1] - 1]
111
- nA = len(candA)
112
- nB = len(candB)
113
- indexA, indexB = limbSeq[k]
114
- if (nA != 0 and nB != 0):
115
- connection_candidate = []
116
- for i in range(nA):
117
- for j in range(nB):
118
- vec = np.subtract(candB[j][:2], candA[i][:2])
119
- norm = math.sqrt(vec[0] * vec[0] + vec[1] * vec[1])
120
- norm = max(0.001, norm)
121
- vec = np.divide(vec, norm)
122
-
123
- startend = list(zip(np.linspace(candA[i][0], candB[j][0], num=mid_num), \
124
- np.linspace(candA[i][1], candB[j][1], num=mid_num)))
125
-
126
- vec_x = np.array([score_mid[int(round(startend[I][1])), int(round(startend[I][0])), 0] \
127
- for I in range(len(startend))])
128
- vec_y = np.array([score_mid[int(round(startend[I][1])), int(round(startend[I][0])), 1] \
129
- for I in range(len(startend))])
130
-
131
- score_midpts = np.multiply(vec_x, vec[0]) + np.multiply(vec_y, vec[1])
132
- score_with_dist_prior = sum(score_midpts) / len(score_midpts) + min(
133
- 0.5 * oriImg.shape[0] / norm - 1, 0)
134
- criterion1 = len(np.nonzero(score_midpts > thre2)[0]) > 0.8 * len(score_midpts)
135
- criterion2 = score_with_dist_prior > 0
136
- if criterion1 and criterion2:
137
- connection_candidate.append(
138
- [i, j, score_with_dist_prior, score_with_dist_prior + candA[i][2] + candB[j][2]])
139
-
140
- connection_candidate = sorted(connection_candidate, key=lambda x: x[2], reverse=True)
141
- connection = np.zeros((0, 5))
142
- for c in range(len(connection_candidate)):
143
- i, j, s = connection_candidate[c][0:3]
144
- if (i not in connection[:, 3] and j not in connection[:, 4]):
145
- connection = np.vstack([connection, [candA[i][3], candB[j][3], s, i, j]])
146
- if (len(connection) >= min(nA, nB)):
147
- break
148
-
149
- connection_all.append(connection)
150
- else:
151
- special_k.append(k)
152
- connection_all.append([])
153
-
154
- # last number in each row is the total parts number of that person
155
- # the second last number in each row is the score of the overall configuration
156
- subset = -1 * np.ones((0, 20))
157
- candidate = np.array([item for sublist in all_peaks for item in sublist])
158
-
159
- for k in range(len(mapIdx)):
160
- if k not in special_k:
161
- partAs = connection_all[k][:, 0]
162
- partBs = connection_all[k][:, 1]
163
- indexA, indexB = np.array(limbSeq[k]) - 1
164
-
165
- for i in range(len(connection_all[k])): # = 1:size(temp,1)
166
- found = 0
167
- subset_idx = [-1, -1]
168
- for j in range(len(subset)): # 1:size(subset,1):
169
- if subset[j][indexA] == partAs[i] or subset[j][indexB] == partBs[i]:
170
- subset_idx[found] = j
171
- found += 1
172
-
173
- if found == 1:
174
- j = subset_idx[0]
175
- if subset[j][indexB] != partBs[i]:
176
- subset[j][indexB] = partBs[i]
177
- subset[j][-1] += 1
178
- subset[j][-2] += candidate[partBs[i].astype(int), 2] + connection_all[k][i][2]
179
- elif found == 2: # if found 2 and disjoint, merge them
180
- j1, j2 = subset_idx
181
- membership = ((subset[j1] >= 0).astype(int) + (subset[j2] >= 0).astype(int))[:-2]
182
- if len(np.nonzero(membership == 2)[0]) == 0: # merge
183
- subset[j1][:-2] += (subset[j2][:-2] + 1)
184
- subset[j1][-2:] += subset[j2][-2:]
185
- subset[j1][-2] += connection_all[k][i][2]
186
- subset = np.delete(subset, j2, 0)
187
- else: # as like found == 1
188
- subset[j1][indexB] = partBs[i]
189
- subset[j1][-1] += 1
190
- subset[j1][-2] += candidate[partBs[i].astype(int), 2] + connection_all[k][i][2]
191
-
192
- # if find no partA in the subset, create a new subset
193
- elif not found and k < 17:
194
- row = -1 * np.ones(20)
195
- row[indexA] = partAs[i]
196
- row[indexB] = partBs[i]
197
- row[-1] = 2
198
- row[-2] = sum(candidate[connection_all[k][i, :2].astype(int), 2]) + connection_all[k][i][2]
199
- subset = np.vstack([subset, row])
200
- # delete some rows of subset which has few parts occur
201
- deleteIdx = []
202
- for i in range(len(subset)):
203
- if subset[i][-1] < 4 or subset[i][-2] / subset[i][-1] < 0.4:
204
- deleteIdx.append(i)
205
- subset = np.delete(subset, deleteIdx, axis=0)
206
-
207
- # subset: n*20 array, 0-17 is the index in candidate, 18 is the total score, 19 is the total parts
208
- # candidate: x, y, score, id
209
- return candidate, subset
210
-
211
- if __name__ == "__main__":
212
- body_estimation = Body('../model/body_pose_model.pth')
213
-
214
- test_image = '../images/ski.jpg'
215
- oriImg = cv2.imread(test_image) # B,G,R order
216
- candidate, subset = body_estimation(oriImg)
217
- canvas = util.draw_bodypose(oriImg, candidate, subset)
218
- plt.imshow(canvas[:, :, [2, 1, 0]])
219
- plt.show()
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
ControlNet/annotator/openpose/hand.py DELETED
@@ -1,86 +0,0 @@
1
- import cv2
2
- import json
3
- import numpy as np
4
- import math
5
- import time
6
- from scipy.ndimage.filters import gaussian_filter
7
- import matplotlib.pyplot as plt
8
- import matplotlib
9
- import torch
10
- from skimage.measure import label
11
-
12
- from .model import handpose_model
13
- from . import util
14
-
15
- class Hand(object):
16
- def __init__(self, model_path):
17
- self.model = handpose_model()
18
- if torch.cuda.is_available():
19
- self.model = self.model.cuda()
20
- print('cuda')
21
- model_dict = util.transfer(self.model, torch.load(model_path))
22
- self.model.load_state_dict(model_dict)
23
- self.model.eval()
24
-
25
- def __call__(self, oriImg):
26
- scale_search = [0.5, 1.0, 1.5, 2.0]
27
- # scale_search = [0.5]
28
- boxsize = 368
29
- stride = 8
30
- padValue = 128
31
- thre = 0.05
32
- multiplier = [x * boxsize / oriImg.shape[0] for x in scale_search]
33
- heatmap_avg = np.zeros((oriImg.shape[0], oriImg.shape[1], 22))
34
- # paf_avg = np.zeros((oriImg.shape[0], oriImg.shape[1], 38))
35
-
36
- for m in range(len(multiplier)):
37
- scale = multiplier[m]
38
- imageToTest = cv2.resize(oriImg, (0, 0), fx=scale, fy=scale, interpolation=cv2.INTER_CUBIC)
39
- imageToTest_padded, pad = util.padRightDownCorner(imageToTest, stride, padValue)
40
- im = np.transpose(np.float32(imageToTest_padded[:, :, :, np.newaxis]), (3, 2, 0, 1)) / 256 - 0.5
41
- im = np.ascontiguousarray(im)
42
-
43
- data = torch.from_numpy(im).float()
44
- if torch.cuda.is_available():
45
- data = data.cuda()
46
- # data = data.permute([2, 0, 1]).unsqueeze(0).float()
47
- with torch.no_grad():
48
- output = self.model(data).cpu().numpy()
49
- # output = self.model(data).numpy()q
50
-
51
- # extract outputs, resize, and remove padding
52
- heatmap = np.transpose(np.squeeze(output), (1, 2, 0)) # output 1 is heatmaps
53
- heatmap = cv2.resize(heatmap, (0, 0), fx=stride, fy=stride, interpolation=cv2.INTER_CUBIC)
54
- heatmap = heatmap[:imageToTest_padded.shape[0] - pad[2], :imageToTest_padded.shape[1] - pad[3], :]
55
- heatmap = cv2.resize(heatmap, (oriImg.shape[1], oriImg.shape[0]), interpolation=cv2.INTER_CUBIC)
56
-
57
- heatmap_avg += heatmap / len(multiplier)
58
-
59
- all_peaks = []
60
- for part in range(21):
61
- map_ori = heatmap_avg[:, :, part]
62
- one_heatmap = gaussian_filter(map_ori, sigma=3)
63
- binary = np.ascontiguousarray(one_heatmap > thre, dtype=np.uint8)
64
- # 全部小于阈值
65
- if np.sum(binary) == 0:
66
- all_peaks.append([0, 0])
67
- continue
68
- label_img, label_numbers = label(binary, return_num=True, connectivity=binary.ndim)
69
- max_index = np.argmax([np.sum(map_ori[label_img == i]) for i in range(1, label_numbers + 1)]) + 1
70
- label_img[label_img != max_index] = 0
71
- map_ori[label_img == 0] = 0
72
-
73
- y, x = util.npmax(map_ori)
74
- all_peaks.append([x, y])
75
- return np.array(all_peaks)
76
-
77
- if __name__ == "__main__":
78
- hand_estimation = Hand('../model/hand_pose_model.pth')
79
-
80
- # test_image = '../images/hand.jpg'
81
- test_image = '../images/hand.jpg'
82
- oriImg = cv2.imread(test_image) # B,G,R order
83
- peaks = hand_estimation(oriImg)
84
- canvas = util.draw_handpose(oriImg, peaks, True)
85
- cv2.imshow('', canvas)
86
- cv2.waitKey(0)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
ControlNet/annotator/openpose/model.py DELETED
@@ -1,219 +0,0 @@
1
- import torch
2
- from collections import OrderedDict
3
-
4
- import torch
5
- import torch.nn as nn
6
-
7
- def make_layers(block, no_relu_layers):
8
- layers = []
9
- for layer_name, v in block.items():
10
- if 'pool' in layer_name:
11
- layer = nn.MaxPool2d(kernel_size=v[0], stride=v[1],
12
- padding=v[2])
13
- layers.append((layer_name, layer))
14
- else:
15
- conv2d = nn.Conv2d(in_channels=v[0], out_channels=v[1],
16
- kernel_size=v[2], stride=v[3],
17
- padding=v[4])
18
- layers.append((layer_name, conv2d))
19
- if layer_name not in no_relu_layers:
20
- layers.append(('relu_'+layer_name, nn.ReLU(inplace=True)))
21
-
22
- return nn.Sequential(OrderedDict(layers))
23
-
24
- class bodypose_model(nn.Module):
25
- def __init__(self):
26
- super(bodypose_model, self).__init__()
27
-
28
- # these layers have no relu layer
29
- no_relu_layers = ['conv5_5_CPM_L1', 'conv5_5_CPM_L2', 'Mconv7_stage2_L1',\
30
- 'Mconv7_stage2_L2', 'Mconv7_stage3_L1', 'Mconv7_stage3_L2',\
31
- 'Mconv7_stage4_L1', 'Mconv7_stage4_L2', 'Mconv7_stage5_L1',\
32
- 'Mconv7_stage5_L2', 'Mconv7_stage6_L1', 'Mconv7_stage6_L1']
33
- blocks = {}
34
- block0 = OrderedDict([
35
- ('conv1_1', [3, 64, 3, 1, 1]),
36
- ('conv1_2', [64, 64, 3, 1, 1]),
37
- ('pool1_stage1', [2, 2, 0]),
38
- ('conv2_1', [64, 128, 3, 1, 1]),
39
- ('conv2_2', [128, 128, 3, 1, 1]),
40
- ('pool2_stage1', [2, 2, 0]),
41
- ('conv3_1', [128, 256, 3, 1, 1]),
42
- ('conv3_2', [256, 256, 3, 1, 1]),
43
- ('conv3_3', [256, 256, 3, 1, 1]),
44
- ('conv3_4', [256, 256, 3, 1, 1]),
45
- ('pool3_stage1', [2, 2, 0]),
46
- ('conv4_1', [256, 512, 3, 1, 1]),
47
- ('conv4_2', [512, 512, 3, 1, 1]),
48
- ('conv4_3_CPM', [512, 256, 3, 1, 1]),
49
- ('conv4_4_CPM', [256, 128, 3, 1, 1])
50
- ])
51
-
52
-
53
- # Stage 1
54
- block1_1 = OrderedDict([
55
- ('conv5_1_CPM_L1', [128, 128, 3, 1, 1]),
56
- ('conv5_2_CPM_L1', [128, 128, 3, 1, 1]),
57
- ('conv5_3_CPM_L1', [128, 128, 3, 1, 1]),
58
- ('conv5_4_CPM_L1', [128, 512, 1, 1, 0]),
59
- ('conv5_5_CPM_L1', [512, 38, 1, 1, 0])
60
- ])
61
-
62
- block1_2 = OrderedDict([
63
- ('conv5_1_CPM_L2', [128, 128, 3, 1, 1]),
64
- ('conv5_2_CPM_L2', [128, 128, 3, 1, 1]),
65
- ('conv5_3_CPM_L2', [128, 128, 3, 1, 1]),
66
- ('conv5_4_CPM_L2', [128, 512, 1, 1, 0]),
67
- ('conv5_5_CPM_L2', [512, 19, 1, 1, 0])
68
- ])
69
- blocks['block1_1'] = block1_1
70
- blocks['block1_2'] = block1_2
71
-
72
- self.model0 = make_layers(block0, no_relu_layers)
73
-
74
- # Stages 2 - 6
75
- for i in range(2, 7):
76
- blocks['block%d_1' % i] = OrderedDict([
77
- ('Mconv1_stage%d_L1' % i, [185, 128, 7, 1, 3]),
78
- ('Mconv2_stage%d_L1' % i, [128, 128, 7, 1, 3]),
79
- ('Mconv3_stage%d_L1' % i, [128, 128, 7, 1, 3]),
80
- ('Mconv4_stage%d_L1' % i, [128, 128, 7, 1, 3]),
81
- ('Mconv5_stage%d_L1' % i, [128, 128, 7, 1, 3]),
82
- ('Mconv6_stage%d_L1' % i, [128, 128, 1, 1, 0]),
83
- ('Mconv7_stage%d_L1' % i, [128, 38, 1, 1, 0])
84
- ])
85
-
86
- blocks['block%d_2' % i] = OrderedDict([
87
- ('Mconv1_stage%d_L2' % i, [185, 128, 7, 1, 3]),
88
- ('Mconv2_stage%d_L2' % i, [128, 128, 7, 1, 3]),
89
- ('Mconv3_stage%d_L2' % i, [128, 128, 7, 1, 3]),
90
- ('Mconv4_stage%d_L2' % i, [128, 128, 7, 1, 3]),
91
- ('Mconv5_stage%d_L2' % i, [128, 128, 7, 1, 3]),
92
- ('Mconv6_stage%d_L2' % i, [128, 128, 1, 1, 0]),
93
- ('Mconv7_stage%d_L2' % i, [128, 19, 1, 1, 0])
94
- ])
95
-
96
- for k in blocks.keys():
97
- blocks[k] = make_layers(blocks[k], no_relu_layers)
98
-
99
- self.model1_1 = blocks['block1_1']
100
- self.model2_1 = blocks['block2_1']
101
- self.model3_1 = blocks['block3_1']
102
- self.model4_1 = blocks['block4_1']
103
- self.model5_1 = blocks['block5_1']
104
- self.model6_1 = blocks['block6_1']
105
-
106
- self.model1_2 = blocks['block1_2']
107
- self.model2_2 = blocks['block2_2']
108
- self.model3_2 = blocks['block3_2']
109
- self.model4_2 = blocks['block4_2']
110
- self.model5_2 = blocks['block5_2']
111
- self.model6_2 = blocks['block6_2']
112
-
113
-
114
- def forward(self, x):
115
-
116
- out1 = self.model0(x)
117
-
118
- out1_1 = self.model1_1(out1)
119
- out1_2 = self.model1_2(out1)
120
- out2 = torch.cat([out1_1, out1_2, out1], 1)
121
-
122
- out2_1 = self.model2_1(out2)
123
- out2_2 = self.model2_2(out2)
124
- out3 = torch.cat([out2_1, out2_2, out1], 1)
125
-
126
- out3_1 = self.model3_1(out3)
127
- out3_2 = self.model3_2(out3)
128
- out4 = torch.cat([out3_1, out3_2, out1], 1)
129
-
130
- out4_1 = self.model4_1(out4)
131
- out4_2 = self.model4_2(out4)
132
- out5 = torch.cat([out4_1, out4_2, out1], 1)
133
-
134
- out5_1 = self.model5_1(out5)
135
- out5_2 = self.model5_2(out5)
136
- out6 = torch.cat([out5_1, out5_2, out1], 1)
137
-
138
- out6_1 = self.model6_1(out6)
139
- out6_2 = self.model6_2(out6)
140
-
141
- return out6_1, out6_2
142
-
143
- class handpose_model(nn.Module):
144
- def __init__(self):
145
- super(handpose_model, self).__init__()
146
-
147
- # these layers have no relu layer
148
- no_relu_layers = ['conv6_2_CPM', 'Mconv7_stage2', 'Mconv7_stage3',\
149
- 'Mconv7_stage4', 'Mconv7_stage5', 'Mconv7_stage6']
150
- # stage 1
151
- block1_0 = OrderedDict([
152
- ('conv1_1', [3, 64, 3, 1, 1]),
153
- ('conv1_2', [64, 64, 3, 1, 1]),
154
- ('pool1_stage1', [2, 2, 0]),
155
- ('conv2_1', [64, 128, 3, 1, 1]),
156
- ('conv2_2', [128, 128, 3, 1, 1]),
157
- ('pool2_stage1', [2, 2, 0]),
158
- ('conv3_1', [128, 256, 3, 1, 1]),
159
- ('conv3_2', [256, 256, 3, 1, 1]),
160
- ('conv3_3', [256, 256, 3, 1, 1]),
161
- ('conv3_4', [256, 256, 3, 1, 1]),
162
- ('pool3_stage1', [2, 2, 0]),
163
- ('conv4_1', [256, 512, 3, 1, 1]),
164
- ('conv4_2', [512, 512, 3, 1, 1]),
165
- ('conv4_3', [512, 512, 3, 1, 1]),
166
- ('conv4_4', [512, 512, 3, 1, 1]),
167
- ('conv5_1', [512, 512, 3, 1, 1]),
168
- ('conv5_2', [512, 512, 3, 1, 1]),
169
- ('conv5_3_CPM', [512, 128, 3, 1, 1])
170
- ])
171
-
172
- block1_1 = OrderedDict([
173
- ('conv6_1_CPM', [128, 512, 1, 1, 0]),
174
- ('conv6_2_CPM', [512, 22, 1, 1, 0])
175
- ])
176
-
177
- blocks = {}
178
- blocks['block1_0'] = block1_0
179
- blocks['block1_1'] = block1_1
180
-
181
- # stage 2-6
182
- for i in range(2, 7):
183
- blocks['block%d' % i] = OrderedDict([
184
- ('Mconv1_stage%d' % i, [150, 128, 7, 1, 3]),
185
- ('Mconv2_stage%d' % i, [128, 128, 7, 1, 3]),
186
- ('Mconv3_stage%d' % i, [128, 128, 7, 1, 3]),
187
- ('Mconv4_stage%d' % i, [128, 128, 7, 1, 3]),
188
- ('Mconv5_stage%d' % i, [128, 128, 7, 1, 3]),
189
- ('Mconv6_stage%d' % i, [128, 128, 1, 1, 0]),
190
- ('Mconv7_stage%d' % i, [128, 22, 1, 1, 0])
191
- ])
192
-
193
- for k in blocks.keys():
194
- blocks[k] = make_layers(blocks[k], no_relu_layers)
195
-
196
- self.model1_0 = blocks['block1_0']
197
- self.model1_1 = blocks['block1_1']
198
- self.model2 = blocks['block2']
199
- self.model3 = blocks['block3']
200
- self.model4 = blocks['block4']
201
- self.model5 = blocks['block5']
202
- self.model6 = blocks['block6']
203
-
204
- def forward(self, x):
205
- out1_0 = self.model1_0(x)
206
- out1_1 = self.model1_1(out1_0)
207
- concat_stage2 = torch.cat([out1_1, out1_0], 1)
208
- out_stage2 = self.model2(concat_stage2)
209
- concat_stage3 = torch.cat([out_stage2, out1_0], 1)
210
- out_stage3 = self.model3(concat_stage3)
211
- concat_stage4 = torch.cat([out_stage3, out1_0], 1)
212
- out_stage4 = self.model4(concat_stage4)
213
- concat_stage5 = torch.cat([out_stage4, out1_0], 1)
214
- out_stage5 = self.model5(concat_stage5)
215
- concat_stage6 = torch.cat([out_stage5, out1_0], 1)
216
- out_stage6 = self.model6(concat_stage6)
217
- return out_stage6
218
-
219
-
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
ControlNet/annotator/openpose/util.py DELETED
@@ -1,164 +0,0 @@
1
- import math
2
- import numpy as np
3
- import matplotlib
4
- import cv2
5
-
6
-
7
- def padRightDownCorner(img, stride, padValue):
8
- h = img.shape[0]
9
- w = img.shape[1]
10
-
11
- pad = 4 * [None]
12
- pad[0] = 0 # up
13
- pad[1] = 0 # left
14
- pad[2] = 0 if (h % stride == 0) else stride - (h % stride) # down
15
- pad[3] = 0 if (w % stride == 0) else stride - (w % stride) # right
16
-
17
- img_padded = img
18
- pad_up = np.tile(img_padded[0:1, :, :]*0 + padValue, (pad[0], 1, 1))
19
- img_padded = np.concatenate((pad_up, img_padded), axis=0)
20
- pad_left = np.tile(img_padded[:, 0:1, :]*0 + padValue, (1, pad[1], 1))
21
- img_padded = np.concatenate((pad_left, img_padded), axis=1)
22
- pad_down = np.tile(img_padded[-2:-1, :, :]*0 + padValue, (pad[2], 1, 1))
23
- img_padded = np.concatenate((img_padded, pad_down), axis=0)
24
- pad_right = np.tile(img_padded[:, -2:-1, :]*0 + padValue, (1, pad[3], 1))
25
- img_padded = np.concatenate((img_padded, pad_right), axis=1)
26
-
27
- return img_padded, pad
28
-
29
- # transfer caffe model to pytorch which will match the layer name
30
- def transfer(model, model_weights):
31
- transfered_model_weights = {}
32
- for weights_name in model.state_dict().keys():
33
- transfered_model_weights[weights_name] = model_weights['.'.join(weights_name.split('.')[1:])]
34
- return transfered_model_weights
35
-
36
- # draw the body keypoint and lims
37
- def draw_bodypose(canvas, candidate, subset):
38
- stickwidth = 4
39
- limbSeq = [[2, 3], [2, 6], [3, 4], [4, 5], [6, 7], [7, 8], [2, 9], [9, 10], \
40
- [10, 11], [2, 12], [12, 13], [13, 14], [2, 1], [1, 15], [15, 17], \
41
- [1, 16], [16, 18], [3, 17], [6, 18]]
42
-
43
- colors = [[255, 0, 0], [255, 85, 0], [255, 170, 0], [255, 255, 0], [170, 255, 0], [85, 255, 0], [0, 255, 0], \
44
- [0, 255, 85], [0, 255, 170], [0, 255, 255], [0, 170, 255], [0, 85, 255], [0, 0, 255], [85, 0, 255], \
45
- [170, 0, 255], [255, 0, 255], [255, 0, 170], [255, 0, 85]]
46
- for i in range(18):
47
- for n in range(len(subset)):
48
- index = int(subset[n][i])
49
- if index == -1:
50
- continue
51
- x, y = candidate[index][0:2]
52
- cv2.circle(canvas, (int(x), int(y)), 4, colors[i], thickness=-1)
53
- for i in range(17):
54
- for n in range(len(subset)):
55
- index = subset[n][np.array(limbSeq[i]) - 1]
56
- if -1 in index:
57
- continue
58
- cur_canvas = canvas.copy()
59
- Y = candidate[index.astype(int), 0]
60
- X = candidate[index.astype(int), 1]
61
- mX = np.mean(X)
62
- mY = np.mean(Y)
63
- length = ((X[0] - X[1]) ** 2 + (Y[0] - Y[1]) ** 2) ** 0.5
64
- angle = math.degrees(math.atan2(X[0] - X[1], Y[0] - Y[1]))
65
- polygon = cv2.ellipse2Poly((int(mY), int(mX)), (int(length / 2), stickwidth), int(angle), 0, 360, 1)
66
- cv2.fillConvexPoly(cur_canvas, polygon, colors[i])
67
- canvas = cv2.addWeighted(canvas, 0.4, cur_canvas, 0.6, 0)
68
- # plt.imsave("preview.jpg", canvas[:, :, [2, 1, 0]])
69
- # plt.imshow(canvas[:, :, [2, 1, 0]])
70
- return canvas
71
-
72
-
73
- # image drawed by opencv is not good.
74
- def draw_handpose(canvas, all_hand_peaks, show_number=False):
75
- edges = [[0, 1], [1, 2], [2, 3], [3, 4], [0, 5], [5, 6], [6, 7], [7, 8], [0, 9], [9, 10], \
76
- [10, 11], [11, 12], [0, 13], [13, 14], [14, 15], [15, 16], [0, 17], [17, 18], [18, 19], [19, 20]]
77
-
78
- for peaks in all_hand_peaks:
79
- for ie, e in enumerate(edges):
80
- if np.sum(np.all(peaks[e], axis=1)==0)==0:
81
- x1, y1 = peaks[e[0]]
82
- x2, y2 = peaks[e[1]]
83
- cv2.line(canvas, (x1, y1), (x2, y2), matplotlib.colors.hsv_to_rgb([ie/float(len(edges)), 1.0, 1.0])*255, thickness=2)
84
-
85
- for i, keyponit in enumerate(peaks):
86
- x, y = keyponit
87
- cv2.circle(canvas, (x, y), 4, (0, 0, 255), thickness=-1)
88
- if show_number:
89
- cv2.putText(canvas, str(i), (x, y), cv2.FONT_HERSHEY_SIMPLEX, 0.3, (0, 0, 0), lineType=cv2.LINE_AA)
90
- return canvas
91
-
92
- # detect hand according to body pose keypoints
93
- # please refer to https://github.com/CMU-Perceptual-Computing-Lab/openpose/blob/master/src/openpose/hand/handDetector.cpp
94
- def handDetect(candidate, subset, oriImg):
95
- # right hand: wrist 4, elbow 3, shoulder 2
96
- # left hand: wrist 7, elbow 6, shoulder 5
97
- ratioWristElbow = 0.33
98
- detect_result = []
99
- image_height, image_width = oriImg.shape[0:2]
100
- for person in subset.astype(int):
101
- # if any of three not detected
102
- has_left = np.sum(person[[5, 6, 7]] == -1) == 0
103
- has_right = np.sum(person[[2, 3, 4]] == -1) == 0
104
- if not (has_left or has_right):
105
- continue
106
- hands = []
107
- #left hand
108
- if has_left:
109
- left_shoulder_index, left_elbow_index, left_wrist_index = person[[5, 6, 7]]
110
- x1, y1 = candidate[left_shoulder_index][:2]
111
- x2, y2 = candidate[left_elbow_index][:2]
112
- x3, y3 = candidate[left_wrist_index][:2]
113
- hands.append([x1, y1, x2, y2, x3, y3, True])
114
- # right hand
115
- if has_right:
116
- right_shoulder_index, right_elbow_index, right_wrist_index = person[[2, 3, 4]]
117
- x1, y1 = candidate[right_shoulder_index][:2]
118
- x2, y2 = candidate[right_elbow_index][:2]
119
- x3, y3 = candidate[right_wrist_index][:2]
120
- hands.append([x1, y1, x2, y2, x3, y3, False])
121
-
122
- for x1, y1, x2, y2, x3, y3, is_left in hands:
123
- # pos_hand = pos_wrist + ratio * (pos_wrist - pos_elbox) = (1 + ratio) * pos_wrist - ratio * pos_elbox
124
- # handRectangle.x = posePtr[wrist*3] + ratioWristElbow * (posePtr[wrist*3] - posePtr[elbow*3]);
125
- # handRectangle.y = posePtr[wrist*3+1] + ratioWristElbow * (posePtr[wrist*3+1] - posePtr[elbow*3+1]);
126
- # const auto distanceWristElbow = getDistance(poseKeypoints, person, wrist, elbow);
127
- # const auto distanceElbowShoulder = getDistance(poseKeypoints, person, elbow, shoulder);
128
- # handRectangle.width = 1.5f * fastMax(distanceWristElbow, 0.9f * distanceElbowShoulder);
129
- x = x3 + ratioWristElbow * (x3 - x2)
130
- y = y3 + ratioWristElbow * (y3 - y2)
131
- distanceWristElbow = math.sqrt((x3 - x2) ** 2 + (y3 - y2) ** 2)
132
- distanceElbowShoulder = math.sqrt((x2 - x1) ** 2 + (y2 - y1) ** 2)
133
- width = 1.5 * max(distanceWristElbow, 0.9 * distanceElbowShoulder)
134
- # x-y refers to the center --> offset to topLeft point
135
- # handRectangle.x -= handRectangle.width / 2.f;
136
- # handRectangle.y -= handRectangle.height / 2.f;
137
- x -= width / 2
138
- y -= width / 2 # width = height
139
- # overflow the image
140
- if x < 0: x = 0
141
- if y < 0: y = 0
142
- width1 = width
143
- width2 = width
144
- if x + width > image_width: width1 = image_width - x
145
- if y + width > image_height: width2 = image_height - y
146
- width = min(width1, width2)
147
- # the max hand box value is 20 pixels
148
- if width >= 20:
149
- detect_result.append([int(x), int(y), int(width), is_left])
150
-
151
- '''
152
- return value: [[x, y, w, True if left hand else False]].
153
- width=height since the network require squared input.
154
- x, y is the coordinate of top left
155
- '''
156
- return detect_result
157
-
158
- # get max index of 2d array
159
- def npmax(array):
160
- arrayindex = array.argmax(1)
161
- arrayvalue = array.max(1)
162
- i = arrayvalue.argmax()
163
- j = arrayindex[i]
164
- return i, j
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
ControlNet/annotator/uniformer/LICENSE DELETED
@@ -1,203 +0,0 @@
1
- Copyright 2022 SenseTime X-Lab. All rights reserved.
2
-
3
- Apache License
4
- Version 2.0, January 2004
5
- http://www.apache.org/licenses/
6
-
7
- TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
8
-
9
- 1. Definitions.
10
-
11
- "License" shall mean the terms and conditions for use, reproduction,
12
- and distribution as defined by Sections 1 through 9 of this document.
13
-
14
- "Licensor" shall mean the copyright owner or entity authorized by
15
- the copyright owner that is granting the License.
16
-
17
- "Legal Entity" shall mean the union of the acting entity and all
18
- other entities that control, are controlled by, or are under common
19
- control with that entity. For the purposes of this definition,
20
- "control" means (i) the power, direct or indirect, to cause the
21
- direction or management of such entity, whether by contract or
22
- otherwise, or (ii) ownership of fifty percent (50%) or more of the
23
- outstanding shares, or (iii) beneficial ownership of such entity.
24
-
25
- "You" (or "Your") shall mean an individual or Legal Entity
26
- exercising permissions granted by this License.
27
-
28
- "Source" form shall mean the preferred form for making modifications,
29
- including but not limited to software source code, documentation
30
- source, and configuration files.
31
-
32
- "Object" form shall mean any form resulting from mechanical
33
- transformation or translation of a Source form, including but
34
- not limited to compiled object code, generated documentation,
35
- and conversions to other media types.
36
-
37
- "Work" shall mean the work of authorship, whether in Source or
38
- Object form, made available under the License, as indicated by a
39
- copyright notice that is included in or attached to the work
40
- (an example is provided in the Appendix below).
41
-
42
- "Derivative Works" shall mean any work, whether in Source or Object
43
- form, that is based on (or derived from) the Work and for which the
44
- editorial revisions, annotations, elaborations, or other modifications
45
- represent, as a whole, an original work of authorship. For the purposes
46
- of this License, Derivative Works shall not include works that remain
47
- separable from, or merely link (or bind by name) to the interfaces of,
48
- the Work and Derivative Works thereof.
49
-
50
- "Contribution" shall mean any work of authorship, including
51
- the original version of the Work and any modifications or additions
52
- to that Work or Derivative Works thereof, that is intentionally
53
- submitted to Licensor for inclusion in the Work by the copyright owner
54
- or by an individual or Legal Entity authorized to submit on behalf of
55
- the copyright owner. For the purposes of this definition, "submitted"
56
- means any form of electronic, verbal, or written communication sent
57
- to the Licensor or its representatives, including but not limited to
58
- communication on electronic mailing lists, source code control systems,
59
- and issue tracking systems that are managed by, or on behalf of, the
60
- Licensor for the purpose of discussing and improving the Work, but
61
- excluding communication that is conspicuously marked or otherwise
62
- designated in writing by the copyright owner as "Not a Contribution."
63
-
64
- "Contributor" shall mean Licensor and any individual or Legal Entity
65
- on behalf of whom a Contribution has been received by Licensor and
66
- subsequently incorporated within the Work.
67
-
68
- 2. Grant of Copyright License. Subject to the terms and conditions of
69
- this License, each Contributor hereby grants to You a perpetual,
70
- worldwide, non-exclusive, no-charge, royalty-free, irrevocable
71
- copyright license to reproduce, prepare Derivative Works of,
72
- publicly display, publicly perform, sublicense, and distribute the
73
- Work and such Derivative Works in Source or Object form.
74
-
75
- 3. Grant of Patent License. Subject to the terms and conditions of
76
- this License, each Contributor hereby grants to You a perpetual,
77
- worldwide, non-exclusive, no-charge, royalty-free, irrevocable
78
- (except as stated in this section) patent license to make, have made,
79
- use, offer to sell, sell, import, and otherwise transfer the Work,
80
- where such license applies only to those patent claims licensable
81
- by such Contributor that are necessarily infringed by their
82
- Contribution(s) alone or by combination of their Contribution(s)
83
- with the Work to which such Contribution(s) was submitted. If You
84
- institute patent litigation against any entity (including a
85
- cross-claim or counterclaim in a lawsuit) alleging that the Work
86
- or a Contribution incorporated within the Work constitutes direct
87
- or contributory patent infringement, then any patent licenses
88
- granted to You under this License for that Work shall terminate
89
- as of the date such litigation is filed.
90
-
91
- 4. Redistribution. You may reproduce and distribute copies of the
92
- Work or Derivative Works thereof in any medium, with or without
93
- modifications, and in Source or Object form, provided that You
94
- meet the following conditions:
95
-
96
- (a) You must give any other recipients of the Work or
97
- Derivative Works a copy of this License; and
98
-
99
- (b) You must cause any modified files to carry prominent notices
100
- stating that You changed the files; and
101
-
102
- (c) You must retain, in the Source form of any Derivative Works
103
- that You distribute, all copyright, patent, trademark, and
104
- attribution notices from the Source form of the Work,
105
- excluding those notices that do not pertain to any part of
106
- the Derivative Works; and
107
-
108
- (d) If the Work includes a "NOTICE" text file as part of its
109
- distribution, then any Derivative Works that You distribute must
110
- include a readable copy of the attribution notices contained
111
- within such NOTICE file, excluding those notices that do not
112
- pertain to any part of the Derivative Works, in at least one
113
- of the following places: within a NOTICE text file distributed
114
- as part of the Derivative Works; within the Source form or
115
- documentation, if provided along with the Derivative Works; or,
116
- within a display generated by the Derivative Works, if and
117
- wherever such third-party notices normally appear. The contents
118
- of the NOTICE file are for informational purposes only and
119
- do not modify the License. You may add Your own attribution
120
- notices within Derivative Works that You distribute, alongside
121
- or as an addendum to the NOTICE text from the Work, provided
122
- that such additional attribution notices cannot be construed
123
- as modifying the License.
124
-
125
- You may add Your own copyright statement to Your modifications and
126
- may provide additional or different license terms and conditions
127
- for use, reproduction, or distribution of Your modifications, or
128
- for any such Derivative Works as a whole, provided Your use,
129
- reproduction, and distribution of the Work otherwise complies with
130
- the conditions stated in this License.
131
-
132
- 5. Submission of Contributions. Unless You explicitly state otherwise,
133
- any Contribution intentionally submitted for inclusion in the Work
134
- by You to the Licensor shall be under the terms and conditions of
135
- this License, without any additional terms or conditions.
136
- Notwithstanding the above, nothing herein shall supersede or modify
137
- the terms of any separate license agreement you may have executed
138
- with Licensor regarding such Contributions.
139
-
140
- 6. Trademarks. This License does not grant permission to use the trade
141
- names, trademarks, service marks, or product names of the Licensor,
142
- except as required for reasonable and customary use in describing the
143
- origin of the Work and reproducing the content of the NOTICE file.
144
-
145
- 7. Disclaimer of Warranty. Unless required by applicable law or
146
- agreed to in writing, Licensor provides the Work (and each
147
- Contributor provides its Contributions) on an "AS IS" BASIS,
148
- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
149
- implied, including, without limitation, any warranties or conditions
150
- of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
151
- PARTICULAR PURPOSE. You are solely responsible for determining the
152
- appropriateness of using or redistributing the Work and assume any
153
- risks associated with Your exercise of permissions under this License.
154
-
155
- 8. Limitation of Liability. In no event and under no legal theory,
156
- whether in tort (including negligence), contract, or otherwise,
157
- unless required by applicable law (such as deliberate and grossly
158
- negligent acts) or agreed to in writing, shall any Contributor be
159
- liable to You for damages, including any direct, indirect, special,
160
- incidental, or consequential damages of any character arising as a
161
- result of this License or out of the use or inability to use the
162
- Work (including but not limited to damages for loss of goodwill,
163
- work stoppage, computer failure or malfunction, or any and all
164
- other commercial damages or losses), even if such Contributor
165
- has been advised of the possibility of such damages.
166
-
167
- 9. Accepting Warranty or Additional Liability. While redistributing
168
- the Work or Derivative Works thereof, You may choose to offer,
169
- and charge a fee for, acceptance of support, warranty, indemnity,
170
- or other liability obligations and/or rights consistent with this
171
- License. However, in accepting such obligations, You may act only
172
- on Your own behalf and on Your sole responsibility, not on behalf
173
- of any other Contributor, and only if You agree to indemnify,
174
- defend, and hold each Contributor harmless for any liability
175
- incurred by, or claims asserted against, such Contributor by reason
176
- of your accepting any such warranty or additional liability.
177
-
178
- END OF TERMS AND CONDITIONS
179
-
180
- APPENDIX: How to apply the Apache License to your work.
181
-
182
- To apply the Apache License to your work, attach the following
183
- boilerplate notice, with the fields enclosed by brackets "[]"
184
- replaced with your own identifying information. (Don't include
185
- the brackets!) The text should be enclosed in the appropriate
186
- comment syntax for the file format. We also recommend that a
187
- file or class name and description of purpose be included on the
188
- same "printed page" as the copyright notice for easier
189
- identification within third-party archives.
190
-
191
- Copyright 2022 SenseTime X-Lab.
192
-
193
- Licensed under the Apache License, Version 2.0 (the "License");
194
- you may not use this file except in compliance with the License.
195
- You may obtain a copy of the License at
196
-
197
- http://www.apache.org/licenses/LICENSE-2.0
198
-
199
- Unless required by applicable law or agreed to in writing, software
200
- distributed under the License is distributed on an "AS IS" BASIS,
201
- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
202
- See the License for the specific language governing permissions and
203
- limitations under the License.
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
ControlNet/annotator/uniformer/__init__.py DELETED
@@ -1,27 +0,0 @@
1
- # Uniformer
2
- # From https://github.com/Sense-X/UniFormer
3
- # # Apache-2.0 license
4
-
5
- import os
6
-
7
- from annotator.uniformer.mmseg.apis import init_segmentor, inference_segmentor, show_result_pyplot
8
- from annotator.uniformer.mmseg.core.evaluation import get_palette
9
- from annotator.util import annotator_ckpts_path
10
-
11
-
12
- checkpoint_file = "https://huggingface.co/lllyasviel/ControlNet/resolve/main/annotator/ckpts/upernet_global_small.pth"
13
-
14
-
15
- class UniformerDetector:
16
- def __init__(self):
17
- modelpath = os.path.join(annotator_ckpts_path, "upernet_global_small.pth")
18
- if not os.path.exists(modelpath):
19
- from basicsr.utils.download_util import load_file_from_url
20
- load_file_from_url(checkpoint_file, model_dir=annotator_ckpts_path)
21
- config_file = os.path.join(os.path.dirname(annotator_ckpts_path), "uniformer", "exp", "upernet_global_small", "config.py")
22
- self.model = init_segmentor(config_file, modelpath).cuda()
23
-
24
- def __call__(self, img):
25
- result = inference_segmentor(self.model, img)
26
- res_img = show_result_pyplot(self.model, img, result, get_palette('ade'), opacity=1)
27
- return res_img
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
ControlNet/annotator/uniformer/configs/_base_/default_runtime.py DELETED
@@ -1,14 +0,0 @@
1
- # yapf:disable
2
- log_config = dict(
3
- interval=50,
4
- hooks=[
5
- dict(type='TextLoggerHook', by_epoch=False),
6
- # dict(type='TensorboardLoggerHook')
7
- ])
8
- # yapf:enable
9
- dist_params = dict(backend='nccl')
10
- log_level = 'INFO'
11
- load_from = None
12
- resume_from = None
13
- workflow = [('train', 1)]
14
- cudnn_benchmark = True
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
ControlNet/annotator/uniformer/configs/_base_/models/ann_r50-d8.py DELETED
@@ -1,46 +0,0 @@
1
- # model settings
2
- norm_cfg = dict(type='SyncBN', requires_grad=True)
3
- model = dict(
4
- type='EncoderDecoder',
5
- pretrained='open-mmlab://resnet50_v1c',
6
- backbone=dict(
7
- type='ResNetV1c',
8
- depth=50,
9
- num_stages=4,
10
- out_indices=(0, 1, 2, 3),
11
- dilations=(1, 1, 2, 4),
12
- strides=(1, 2, 1, 1),
13
- norm_cfg=norm_cfg,
14
- norm_eval=False,
15
- style='pytorch',
16
- contract_dilation=True),
17
- decode_head=dict(
18
- type='ANNHead',
19
- in_channels=[1024, 2048],
20
- in_index=[2, 3],
21
- channels=512,
22
- project_channels=256,
23
- query_scales=(1, ),
24
- key_pool_scales=(1, 3, 6, 8),
25
- dropout_ratio=0.1,
26
- num_classes=19,
27
- norm_cfg=norm_cfg,
28
- align_corners=False,
29
- loss_decode=dict(
30
- type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0)),
31
- auxiliary_head=dict(
32
- type='FCNHead',
33
- in_channels=1024,
34
- in_index=2,
35
- channels=256,
36
- num_convs=1,
37
- concat_input=False,
38
- dropout_ratio=0.1,
39
- num_classes=19,
40
- norm_cfg=norm_cfg,
41
- align_corners=False,
42
- loss_decode=dict(
43
- type='CrossEntropyLoss', use_sigmoid=False, loss_weight=0.4)),
44
- # model training and testing settings
45
- train_cfg=dict(),
46
- test_cfg=dict(mode='whole'))
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
ControlNet/annotator/uniformer/configs/_base_/models/apcnet_r50-d8.py DELETED
@@ -1,44 +0,0 @@
1
- # model settings
2
- norm_cfg = dict(type='SyncBN', requires_grad=True)
3
- model = dict(
4
- type='EncoderDecoder',
5
- pretrained='open-mmlab://resnet50_v1c',
6
- backbone=dict(
7
- type='ResNetV1c',
8
- depth=50,
9
- num_stages=4,
10
- out_indices=(0, 1, 2, 3),
11
- dilations=(1, 1, 2, 4),
12
- strides=(1, 2, 1, 1),
13
- norm_cfg=norm_cfg,
14
- norm_eval=False,
15
- style='pytorch',
16
- contract_dilation=True),
17
- decode_head=dict(
18
- type='APCHead',
19
- in_channels=2048,
20
- in_index=3,
21
- channels=512,
22
- pool_scales=(1, 2, 3, 6),
23
- dropout_ratio=0.1,
24
- num_classes=19,
25
- norm_cfg=dict(type='SyncBN', requires_grad=True),
26
- align_corners=False,
27
- loss_decode=dict(
28
- type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0)),
29
- auxiliary_head=dict(
30
- type='FCNHead',
31
- in_channels=1024,
32
- in_index=2,
33
- channels=256,
34
- num_convs=1,
35
- concat_input=False,
36
- dropout_ratio=0.1,
37
- num_classes=19,
38
- norm_cfg=norm_cfg,
39
- align_corners=False,
40
- loss_decode=dict(
41
- type='CrossEntropyLoss', use_sigmoid=False, loss_weight=0.4)),
42
- # model training and testing settings
43
- train_cfg=dict(),
44
- test_cfg=dict(mode='whole'))
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
ControlNet/annotator/uniformer/configs/_base_/models/ccnet_r50-d8.py DELETED
@@ -1,44 +0,0 @@
1
- # model settings
2
- norm_cfg = dict(type='SyncBN', requires_grad=True)
3
- model = dict(
4
- type='EncoderDecoder',
5
- pretrained='open-mmlab://resnet50_v1c',
6
- backbone=dict(
7
- type='ResNetV1c',
8
- depth=50,
9
- num_stages=4,
10
- out_indices=(0, 1, 2, 3),
11
- dilations=(1, 1, 2, 4),
12
- strides=(1, 2, 1, 1),
13
- norm_cfg=norm_cfg,
14
- norm_eval=False,
15
- style='pytorch',
16
- contract_dilation=True),
17
- decode_head=dict(
18
- type='CCHead',
19
- in_channels=2048,
20
- in_index=3,
21
- channels=512,
22
- recurrence=2,
23
- dropout_ratio=0.1,
24
- num_classes=19,
25
- norm_cfg=norm_cfg,
26
- align_corners=False,
27
- loss_decode=dict(
28
- type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0)),
29
- auxiliary_head=dict(
30
- type='FCNHead',
31
- in_channels=1024,
32
- in_index=2,
33
- channels=256,
34
- num_convs=1,
35
- concat_input=False,
36
- dropout_ratio=0.1,
37
- num_classes=19,
38
- norm_cfg=norm_cfg,
39
- align_corners=False,
40
- loss_decode=dict(
41
- type='CrossEntropyLoss', use_sigmoid=False, loss_weight=0.4)),
42
- # model training and testing settings
43
- train_cfg=dict(),
44
- test_cfg=dict(mode='whole'))
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
ControlNet/annotator/uniformer/configs/_base_/models/cgnet.py DELETED
@@ -1,35 +0,0 @@
1
- # model settings
2
- norm_cfg = dict(type='SyncBN', eps=1e-03, requires_grad=True)
3
- model = dict(
4
- type='EncoderDecoder',
5
- backbone=dict(
6
- type='CGNet',
7
- norm_cfg=norm_cfg,
8
- in_channels=3,
9
- num_channels=(32, 64, 128),
10
- num_blocks=(3, 21),
11
- dilations=(2, 4),
12
- reductions=(8, 16)),
13
- decode_head=dict(
14
- type='FCNHead',
15
- in_channels=256,
16
- in_index=2,
17
- channels=256,
18
- num_convs=0,
19
- concat_input=False,
20
- dropout_ratio=0,
21
- num_classes=19,
22
- norm_cfg=norm_cfg,
23
- loss_decode=dict(
24
- type='CrossEntropyLoss',
25
- use_sigmoid=False,
26
- loss_weight=1.0,
27
- class_weight=[
28
- 2.5959933, 6.7415504, 3.5354059, 9.8663225, 9.690899, 9.369352,
29
- 10.289121, 9.953208, 4.3097677, 9.490387, 7.674431, 9.396905,
30
- 10.347791, 6.3927646, 10.226669, 10.241062, 10.280587,
31
- 10.396974, 10.055647
32
- ])),
33
- # model training and testing settings
34
- train_cfg=dict(sampler=None),
35
- test_cfg=dict(mode='whole'))
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
ControlNet/annotator/uniformer/configs/_base_/models/danet_r50-d8.py DELETED
@@ -1,44 +0,0 @@
1
- # model settings
2
- norm_cfg = dict(type='SyncBN', requires_grad=True)
3
- model = dict(
4
- type='EncoderDecoder',
5
- pretrained='open-mmlab://resnet50_v1c',
6
- backbone=dict(
7
- type='ResNetV1c',
8
- depth=50,
9
- num_stages=4,
10
- out_indices=(0, 1, 2, 3),
11
- dilations=(1, 1, 2, 4),
12
- strides=(1, 2, 1, 1),
13
- norm_cfg=norm_cfg,
14
- norm_eval=False,
15
- style='pytorch',
16
- contract_dilation=True),
17
- decode_head=dict(
18
- type='DAHead',
19
- in_channels=2048,
20
- in_index=3,
21
- channels=512,
22
- pam_channels=64,
23
- dropout_ratio=0.1,
24
- num_classes=19,
25
- norm_cfg=norm_cfg,
26
- align_corners=False,
27
- loss_decode=dict(
28
- type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0)),
29
- auxiliary_head=dict(
30
- type='FCNHead',
31
- in_channels=1024,
32
- in_index=2,
33
- channels=256,
34
- num_convs=1,
35
- concat_input=False,
36
- dropout_ratio=0.1,
37
- num_classes=19,
38
- norm_cfg=norm_cfg,
39
- align_corners=False,
40
- loss_decode=dict(
41
- type='CrossEntropyLoss', use_sigmoid=False, loss_weight=0.4)),
42
- # model training and testing settings
43
- train_cfg=dict(),
44
- test_cfg=dict(mode='whole'))
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
ControlNet/annotator/uniformer/configs/_base_/models/deeplabv3_r50-d8.py DELETED
@@ -1,44 +0,0 @@
1
- # model settings
2
- norm_cfg = dict(type='SyncBN', requires_grad=True)
3
- model = dict(
4
- type='EncoderDecoder',
5
- pretrained='open-mmlab://resnet50_v1c',
6
- backbone=dict(
7
- type='ResNetV1c',
8
- depth=50,
9
- num_stages=4,
10
- out_indices=(0, 1, 2, 3),
11
- dilations=(1, 1, 2, 4),
12
- strides=(1, 2, 1, 1),
13
- norm_cfg=norm_cfg,
14
- norm_eval=False,
15
- style='pytorch',
16
- contract_dilation=True),
17
- decode_head=dict(
18
- type='ASPPHead',
19
- in_channels=2048,
20
- in_index=3,
21
- channels=512,
22
- dilations=(1, 12, 24, 36),
23
- dropout_ratio=0.1,
24
- num_classes=19,
25
- norm_cfg=norm_cfg,
26
- align_corners=False,
27
- loss_decode=dict(
28
- type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0)),
29
- auxiliary_head=dict(
30
- type='FCNHead',
31
- in_channels=1024,
32
- in_index=2,
33
- channels=256,
34
- num_convs=1,
35
- concat_input=False,
36
- dropout_ratio=0.1,
37
- num_classes=19,
38
- norm_cfg=norm_cfg,
39
- align_corners=False,
40
- loss_decode=dict(
41
- type='CrossEntropyLoss', use_sigmoid=False, loss_weight=0.4)),
42
- # model training and testing settings
43
- train_cfg=dict(),
44
- test_cfg=dict(mode='whole'))
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
ControlNet/annotator/uniformer/configs/_base_/models/deeplabv3_unet_s5-d16.py DELETED
@@ -1,50 +0,0 @@
1
- # model settings
2
- norm_cfg = dict(type='SyncBN', requires_grad=True)
3
- model = dict(
4
- type='EncoderDecoder',
5
- pretrained=None,
6
- backbone=dict(
7
- type='UNet',
8
- in_channels=3,
9
- base_channels=64,
10
- num_stages=5,
11
- strides=(1, 1, 1, 1, 1),
12
- enc_num_convs=(2, 2, 2, 2, 2),
13
- dec_num_convs=(2, 2, 2, 2),
14
- downsamples=(True, True, True, True),
15
- enc_dilations=(1, 1, 1, 1, 1),
16
- dec_dilations=(1, 1, 1, 1),
17
- with_cp=False,
18
- conv_cfg=None,
19
- norm_cfg=norm_cfg,
20
- act_cfg=dict(type='ReLU'),
21
- upsample_cfg=dict(type='InterpConv'),
22
- norm_eval=False),
23
- decode_head=dict(
24
- type='ASPPHead',
25
- in_channels=64,
26
- in_index=4,
27
- channels=16,
28
- dilations=(1, 12, 24, 36),
29
- dropout_ratio=0.1,
30
- num_classes=2,
31
- norm_cfg=norm_cfg,
32
- align_corners=False,
33
- loss_decode=dict(
34
- type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0)),
35
- auxiliary_head=dict(
36
- type='FCNHead',
37
- in_channels=128,
38
- in_index=3,
39
- channels=64,
40
- num_convs=1,
41
- concat_input=False,
42
- dropout_ratio=0.1,
43
- num_classes=2,
44
- norm_cfg=norm_cfg,
45
- align_corners=False,
46
- loss_decode=dict(
47
- type='CrossEntropyLoss', use_sigmoid=False, loss_weight=0.4)),
48
- # model training and testing settings
49
- train_cfg=dict(),
50
- test_cfg=dict(mode='slide', crop_size=256, stride=170))
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
ControlNet/annotator/uniformer/configs/_base_/models/deeplabv3plus_r50-d8.py DELETED
@@ -1,46 +0,0 @@
1
- # model settings
2
- norm_cfg = dict(type='SyncBN', requires_grad=True)
3
- model = dict(
4
- type='EncoderDecoder',
5
- pretrained='open-mmlab://resnet50_v1c',
6
- backbone=dict(
7
- type='ResNetV1c',
8
- depth=50,
9
- num_stages=4,
10
- out_indices=(0, 1, 2, 3),
11
- dilations=(1, 1, 2, 4),
12
- strides=(1, 2, 1, 1),
13
- norm_cfg=norm_cfg,
14
- norm_eval=False,
15
- style='pytorch',
16
- contract_dilation=True),
17
- decode_head=dict(
18
- type='DepthwiseSeparableASPPHead',
19
- in_channels=2048,
20
- in_index=3,
21
- channels=512,
22
- dilations=(1, 12, 24, 36),
23
- c1_in_channels=256,
24
- c1_channels=48,
25
- dropout_ratio=0.1,
26
- num_classes=19,
27
- norm_cfg=norm_cfg,
28
- align_corners=False,
29
- loss_decode=dict(
30
- type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0)),
31
- auxiliary_head=dict(
32
- type='FCNHead',
33
- in_channels=1024,
34
- in_index=2,
35
- channels=256,
36
- num_convs=1,
37
- concat_input=False,
38
- dropout_ratio=0.1,
39
- num_classes=19,
40
- norm_cfg=norm_cfg,
41
- align_corners=False,
42
- loss_decode=dict(
43
- type='CrossEntropyLoss', use_sigmoid=False, loss_weight=0.4)),
44
- # model training and testing settings
45
- train_cfg=dict(),
46
- test_cfg=dict(mode='whole'))
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
ControlNet/annotator/uniformer/configs/_base_/models/dmnet_r50-d8.py DELETED
@@ -1,44 +0,0 @@
1
- # model settings
2
- norm_cfg = dict(type='SyncBN', requires_grad=True)
3
- model = dict(
4
- type='EncoderDecoder',
5
- pretrained='open-mmlab://resnet50_v1c',
6
- backbone=dict(
7
- type='ResNetV1c',
8
- depth=50,
9
- num_stages=4,
10
- out_indices=(0, 1, 2, 3),
11
- dilations=(1, 1, 2, 4),
12
- strides=(1, 2, 1, 1),
13
- norm_cfg=norm_cfg,
14
- norm_eval=False,
15
- style='pytorch',
16
- contract_dilation=True),
17
- decode_head=dict(
18
- type='DMHead',
19
- in_channels=2048,
20
- in_index=3,
21
- channels=512,
22
- filter_sizes=(1, 3, 5, 7),
23
- dropout_ratio=0.1,
24
- num_classes=19,
25
- norm_cfg=dict(type='SyncBN', requires_grad=True),
26
- align_corners=False,
27
- loss_decode=dict(
28
- type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0)),
29
- auxiliary_head=dict(
30
- type='FCNHead',
31
- in_channels=1024,
32
- in_index=2,
33
- channels=256,
34
- num_convs=1,
35
- concat_input=False,
36
- dropout_ratio=0.1,
37
- num_classes=19,
38
- norm_cfg=norm_cfg,
39
- align_corners=False,
40
- loss_decode=dict(
41
- type='CrossEntropyLoss', use_sigmoid=False, loss_weight=0.4)),
42
- # model training and testing settings
43
- train_cfg=dict(),
44
- test_cfg=dict(mode='whole'))
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
ControlNet/annotator/uniformer/configs/_base_/models/dnl_r50-d8.py DELETED
@@ -1,46 +0,0 @@
1
- # model settings
2
- norm_cfg = dict(type='SyncBN', requires_grad=True)
3
- model = dict(
4
- type='EncoderDecoder',
5
- pretrained='open-mmlab://resnet50_v1c',
6
- backbone=dict(
7
- type='ResNetV1c',
8
- depth=50,
9
- num_stages=4,
10
- out_indices=(0, 1, 2, 3),
11
- dilations=(1, 1, 2, 4),
12
- strides=(1, 2, 1, 1),
13
- norm_cfg=norm_cfg,
14
- norm_eval=False,
15
- style='pytorch',
16
- contract_dilation=True),
17
- decode_head=dict(
18
- type='DNLHead',
19
- in_channels=2048,
20
- in_index=3,
21
- channels=512,
22
- dropout_ratio=0.1,
23
- reduction=2,
24
- use_scale=True,
25
- mode='embedded_gaussian',
26
- num_classes=19,
27
- norm_cfg=norm_cfg,
28
- align_corners=False,
29
- loss_decode=dict(
30
- type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0)),
31
- auxiliary_head=dict(
32
- type='FCNHead',
33
- in_channels=1024,
34
- in_index=2,
35
- channels=256,
36
- num_convs=1,
37
- concat_input=False,
38
- dropout_ratio=0.1,
39
- num_classes=19,
40
- norm_cfg=norm_cfg,
41
- align_corners=False,
42
- loss_decode=dict(
43
- type='CrossEntropyLoss', use_sigmoid=False, loss_weight=0.4)),
44
- # model training and testing settings
45
- train_cfg=dict(),
46
- test_cfg=dict(mode='whole'))
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
ControlNet/annotator/uniformer/configs/_base_/models/emanet_r50-d8.py DELETED
@@ -1,47 +0,0 @@
1
- # model settings
2
- norm_cfg = dict(type='SyncBN', requires_grad=True)
3
- model = dict(
4
- type='EncoderDecoder',
5
- pretrained='open-mmlab://resnet50_v1c',
6
- backbone=dict(
7
- type='ResNetV1c',
8
- depth=50,
9
- num_stages=4,
10
- out_indices=(0, 1, 2, 3),
11
- dilations=(1, 1, 2, 4),
12
- strides=(1, 2, 1, 1),
13
- norm_cfg=norm_cfg,
14
- norm_eval=False,
15
- style='pytorch',
16
- contract_dilation=True),
17
- decode_head=dict(
18
- type='EMAHead',
19
- in_channels=2048,
20
- in_index=3,
21
- channels=256,
22
- ema_channels=512,
23
- num_bases=64,
24
- num_stages=3,
25
- momentum=0.1,
26
- dropout_ratio=0.1,
27
- num_classes=19,
28
- norm_cfg=norm_cfg,
29
- align_corners=False,
30
- loss_decode=dict(
31
- type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0)),
32
- auxiliary_head=dict(
33
- type='FCNHead',
34
- in_channels=1024,
35
- in_index=2,
36
- channels=256,
37
- num_convs=1,
38
- concat_input=False,
39
- dropout_ratio=0.1,
40
- num_classes=19,
41
- norm_cfg=norm_cfg,
42
- align_corners=False,
43
- loss_decode=dict(
44
- type='CrossEntropyLoss', use_sigmoid=False, loss_weight=0.4)),
45
- # model training and testing settings
46
- train_cfg=dict(),
47
- test_cfg=dict(mode='whole'))
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
ControlNet/annotator/uniformer/configs/_base_/models/encnet_r50-d8.py DELETED
@@ -1,48 +0,0 @@
1
- # model settings
2
- norm_cfg = dict(type='SyncBN', requires_grad=True)
3
- model = dict(
4
- type='EncoderDecoder',
5
- pretrained='open-mmlab://resnet50_v1c',
6
- backbone=dict(
7
- type='ResNetV1c',
8
- depth=50,
9
- num_stages=4,
10
- out_indices=(0, 1, 2, 3),
11
- dilations=(1, 1, 2, 4),
12
- strides=(1, 2, 1, 1),
13
- norm_cfg=norm_cfg,
14
- norm_eval=False,
15
- style='pytorch',
16
- contract_dilation=True),
17
- decode_head=dict(
18
- type='EncHead',
19
- in_channels=[512, 1024, 2048],
20
- in_index=(1, 2, 3),
21
- channels=512,
22
- num_codes=32,
23
- use_se_loss=True,
24
- add_lateral=False,
25
- dropout_ratio=0.1,
26
- num_classes=19,
27
- norm_cfg=norm_cfg,
28
- align_corners=False,
29
- loss_decode=dict(
30
- type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0),
31
- loss_se_decode=dict(
32
- type='CrossEntropyLoss', use_sigmoid=True, loss_weight=0.2)),
33
- auxiliary_head=dict(
34
- type='FCNHead',
35
- in_channels=1024,
36
- in_index=2,
37
- channels=256,
38
- num_convs=1,
39
- concat_input=False,
40
- dropout_ratio=0.1,
41
- num_classes=19,
42
- norm_cfg=norm_cfg,
43
- align_corners=False,
44
- loss_decode=dict(
45
- type='CrossEntropyLoss', use_sigmoid=False, loss_weight=0.4)),
46
- # model training and testing settings
47
- train_cfg=dict(),
48
- test_cfg=dict(mode='whole'))
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
ControlNet/annotator/uniformer/configs/_base_/models/fast_scnn.py DELETED
@@ -1,57 +0,0 @@
1
- # model settings
2
- norm_cfg = dict(type='SyncBN', requires_grad=True, momentum=0.01)
3
- model = dict(
4
- type='EncoderDecoder',
5
- backbone=dict(
6
- type='FastSCNN',
7
- downsample_dw_channels=(32, 48),
8
- global_in_channels=64,
9
- global_block_channels=(64, 96, 128),
10
- global_block_strides=(2, 2, 1),
11
- global_out_channels=128,
12
- higher_in_channels=64,
13
- lower_in_channels=128,
14
- fusion_out_channels=128,
15
- out_indices=(0, 1, 2),
16
- norm_cfg=norm_cfg,
17
- align_corners=False),
18
- decode_head=dict(
19
- type='DepthwiseSeparableFCNHead',
20
- in_channels=128,
21
- channels=128,
22
- concat_input=False,
23
- num_classes=19,
24
- in_index=-1,
25
- norm_cfg=norm_cfg,
26
- align_corners=False,
27
- loss_decode=dict(
28
- type='CrossEntropyLoss', use_sigmoid=True, loss_weight=0.4)),
29
- auxiliary_head=[
30
- dict(
31
- type='FCNHead',
32
- in_channels=128,
33
- channels=32,
34
- num_convs=1,
35
- num_classes=19,
36
- in_index=-2,
37
- norm_cfg=norm_cfg,
38
- concat_input=False,
39
- align_corners=False,
40
- loss_decode=dict(
41
- type='CrossEntropyLoss', use_sigmoid=True, loss_weight=0.4)),
42
- dict(
43
- type='FCNHead',
44
- in_channels=64,
45
- channels=32,
46
- num_convs=1,
47
- num_classes=19,
48
- in_index=-3,
49
- norm_cfg=norm_cfg,
50
- concat_input=False,
51
- align_corners=False,
52
- loss_decode=dict(
53
- type='CrossEntropyLoss', use_sigmoid=True, loss_weight=0.4)),
54
- ],
55
- # model training and testing settings
56
- train_cfg=dict(),
57
- test_cfg=dict(mode='whole'))
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
ControlNet/annotator/uniformer/configs/_base_/models/fcn_hr18.py DELETED
@@ -1,52 +0,0 @@
1
- # model settings
2
- norm_cfg = dict(type='SyncBN', requires_grad=True)
3
- model = dict(
4
- type='EncoderDecoder',
5
- pretrained='open-mmlab://msra/hrnetv2_w18',
6
- backbone=dict(
7
- type='HRNet',
8
- norm_cfg=norm_cfg,
9
- norm_eval=False,
10
- extra=dict(
11
- stage1=dict(
12
- num_modules=1,
13
- num_branches=1,
14
- block='BOTTLENECK',
15
- num_blocks=(4, ),
16
- num_channels=(64, )),
17
- stage2=dict(
18
- num_modules=1,
19
- num_branches=2,
20
- block='BASIC',
21
- num_blocks=(4, 4),
22
- num_channels=(18, 36)),
23
- stage3=dict(
24
- num_modules=4,
25
- num_branches=3,
26
- block='BASIC',
27
- num_blocks=(4, 4, 4),
28
- num_channels=(18, 36, 72)),
29
- stage4=dict(
30
- num_modules=3,
31
- num_branches=4,
32
- block='BASIC',
33
- num_blocks=(4, 4, 4, 4),
34
- num_channels=(18, 36, 72, 144)))),
35
- decode_head=dict(
36
- type='FCNHead',
37
- in_channels=[18, 36, 72, 144],
38
- in_index=(0, 1, 2, 3),
39
- channels=sum([18, 36, 72, 144]),
40
- input_transform='resize_concat',
41
- kernel_size=1,
42
- num_convs=1,
43
- concat_input=False,
44
- dropout_ratio=-1,
45
- num_classes=19,
46
- norm_cfg=norm_cfg,
47
- align_corners=False,
48
- loss_decode=dict(
49
- type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0)),
50
- # model training and testing settings
51
- train_cfg=dict(),
52
- test_cfg=dict(mode='whole'))
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
ControlNet/annotator/uniformer/configs/_base_/models/fcn_r50-d8.py DELETED
@@ -1,45 +0,0 @@
1
- # model settings
2
- norm_cfg = dict(type='SyncBN', requires_grad=True)
3
- model = dict(
4
- type='EncoderDecoder',
5
- pretrained='open-mmlab://resnet50_v1c',
6
- backbone=dict(
7
- type='ResNetV1c',
8
- depth=50,
9
- num_stages=4,
10
- out_indices=(0, 1, 2, 3),
11
- dilations=(1, 1, 2, 4),
12
- strides=(1, 2, 1, 1),
13
- norm_cfg=norm_cfg,
14
- norm_eval=False,
15
- style='pytorch',
16
- contract_dilation=True),
17
- decode_head=dict(
18
- type='FCNHead',
19
- in_channels=2048,
20
- in_index=3,
21
- channels=512,
22
- num_convs=2,
23
- concat_input=True,
24
- dropout_ratio=0.1,
25
- num_classes=19,
26
- norm_cfg=norm_cfg,
27
- align_corners=False,
28
- loss_decode=dict(
29
- type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0)),
30
- auxiliary_head=dict(
31
- type='FCNHead',
32
- in_channels=1024,
33
- in_index=2,
34
- channels=256,
35
- num_convs=1,
36
- concat_input=False,
37
- dropout_ratio=0.1,
38
- num_classes=19,
39
- norm_cfg=norm_cfg,
40
- align_corners=False,
41
- loss_decode=dict(
42
- type='CrossEntropyLoss', use_sigmoid=False, loss_weight=0.4)),
43
- # model training and testing settings
44
- train_cfg=dict(),
45
- test_cfg=dict(mode='whole'))
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
ControlNet/annotator/uniformer/configs/_base_/models/fcn_unet_s5-d16.py DELETED
@@ -1,51 +0,0 @@
1
- # model settings
2
- norm_cfg = dict(type='SyncBN', requires_grad=True)
3
- model = dict(
4
- type='EncoderDecoder',
5
- pretrained=None,
6
- backbone=dict(
7
- type='UNet',
8
- in_channels=3,
9
- base_channels=64,
10
- num_stages=5,
11
- strides=(1, 1, 1, 1, 1),
12
- enc_num_convs=(2, 2, 2, 2, 2),
13
- dec_num_convs=(2, 2, 2, 2),
14
- downsamples=(True, True, True, True),
15
- enc_dilations=(1, 1, 1, 1, 1),
16
- dec_dilations=(1, 1, 1, 1),
17
- with_cp=False,
18
- conv_cfg=None,
19
- norm_cfg=norm_cfg,
20
- act_cfg=dict(type='ReLU'),
21
- upsample_cfg=dict(type='InterpConv'),
22
- norm_eval=False),
23
- decode_head=dict(
24
- type='FCNHead',
25
- in_channels=64,
26
- in_index=4,
27
- channels=64,
28
- num_convs=1,
29
- concat_input=False,
30
- dropout_ratio=0.1,
31
- num_classes=2,
32
- norm_cfg=norm_cfg,
33
- align_corners=False,
34
- loss_decode=dict(
35
- type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0)),
36
- auxiliary_head=dict(
37
- type='FCNHead',
38
- in_channels=128,
39
- in_index=3,
40
- channels=64,
41
- num_convs=1,
42
- concat_input=False,
43
- dropout_ratio=0.1,
44
- num_classes=2,
45
- norm_cfg=norm_cfg,
46
- align_corners=False,
47
- loss_decode=dict(
48
- type='CrossEntropyLoss', use_sigmoid=False, loss_weight=0.4)),
49
- # model training and testing settings
50
- train_cfg=dict(),
51
- test_cfg=dict(mode='slide', crop_size=256, stride=170))
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
ControlNet/annotator/uniformer/configs/_base_/models/fpn_r50.py DELETED
@@ -1,36 +0,0 @@
1
- # model settings
2
- norm_cfg = dict(type='SyncBN', requires_grad=True)
3
- model = dict(
4
- type='EncoderDecoder',
5
- pretrained='open-mmlab://resnet50_v1c',
6
- backbone=dict(
7
- type='ResNetV1c',
8
- depth=50,
9
- num_stages=4,
10
- out_indices=(0, 1, 2, 3),
11
- dilations=(1, 1, 1, 1),
12
- strides=(1, 2, 2, 2),
13
- norm_cfg=norm_cfg,
14
- norm_eval=False,
15
- style='pytorch',
16
- contract_dilation=True),
17
- neck=dict(
18
- type='FPN',
19
- in_channels=[256, 512, 1024, 2048],
20
- out_channels=256,
21
- num_outs=4),
22
- decode_head=dict(
23
- type='FPNHead',
24
- in_channels=[256, 256, 256, 256],
25
- in_index=[0, 1, 2, 3],
26
- feature_strides=[4, 8, 16, 32],
27
- channels=128,
28
- dropout_ratio=0.1,
29
- num_classes=19,
30
- norm_cfg=norm_cfg,
31
- align_corners=False,
32
- loss_decode=dict(
33
- type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0)),
34
- # model training and testing settings
35
- train_cfg=dict(),
36
- test_cfg=dict(mode='whole'))
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
ControlNet/annotator/uniformer/configs/_base_/models/fpn_uniformer.py DELETED
@@ -1,35 +0,0 @@
1
- # model settings
2
- norm_cfg = dict(type='SyncBN', requires_grad=True)
3
- model = dict(
4
- type='EncoderDecoder',
5
- backbone=dict(
6
- type='UniFormer',
7
- embed_dim=[64, 128, 320, 512],
8
- layers=[3, 4, 8, 3],
9
- head_dim=64,
10
- mlp_ratio=4.,
11
- qkv_bias=True,
12
- drop_rate=0.,
13
- attn_drop_rate=0.,
14
- drop_path_rate=0.1),
15
- neck=dict(
16
- type='FPN',
17
- in_channels=[64, 128, 320, 512],
18
- out_channels=256,
19
- num_outs=4),
20
- decode_head=dict(
21
- type='FPNHead',
22
- in_channels=[256, 256, 256, 256],
23
- in_index=[0, 1, 2, 3],
24
- feature_strides=[4, 8, 16, 32],
25
- channels=128,
26
- dropout_ratio=0.1,
27
- num_classes=150,
28
- norm_cfg=norm_cfg,
29
- align_corners=False,
30
- loss_decode=dict(
31
- type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0)),
32
- # model training and testing settings
33
- train_cfg=dict(),
34
- test_cfg=dict(mode='whole')
35
- )
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
ControlNet/annotator/uniformer/configs/_base_/models/gcnet_r50-d8.py DELETED
@@ -1,46 +0,0 @@
1
- # model settings
2
- norm_cfg = dict(type='SyncBN', requires_grad=True)
3
- model = dict(
4
- type='EncoderDecoder',
5
- pretrained='open-mmlab://resnet50_v1c',
6
- backbone=dict(
7
- type='ResNetV1c',
8
- depth=50,
9
- num_stages=4,
10
- out_indices=(0, 1, 2, 3),
11
- dilations=(1, 1, 2, 4),
12
- strides=(1, 2, 1, 1),
13
- norm_cfg=norm_cfg,
14
- norm_eval=False,
15
- style='pytorch',
16
- contract_dilation=True),
17
- decode_head=dict(
18
- type='GCHead',
19
- in_channels=2048,
20
- in_index=3,
21
- channels=512,
22
- ratio=1 / 4.,
23
- pooling_type='att',
24
- fusion_types=('channel_add', ),
25
- dropout_ratio=0.1,
26
- num_classes=19,
27
- norm_cfg=norm_cfg,
28
- align_corners=False,
29
- loss_decode=dict(
30
- type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0)),
31
- auxiliary_head=dict(
32
- type='FCNHead',
33
- in_channels=1024,
34
- in_index=2,
35
- channels=256,
36
- num_convs=1,
37
- concat_input=False,
38
- dropout_ratio=0.1,
39
- num_classes=19,
40
- norm_cfg=norm_cfg,
41
- align_corners=False,
42
- loss_decode=dict(
43
- type='CrossEntropyLoss', use_sigmoid=False, loss_weight=0.4)),
44
- # model training and testing settings
45
- train_cfg=dict(),
46
- test_cfg=dict(mode='whole'))