rbanfield commited on
Commit
5ed15ed
1 Parent(s): 05f1405

Upload folder using huggingface_hub

Browse files
Files changed (32) hide show
  1. .gitattributes +1 -0
  2. README.md +3 -9
  3. app.py +125 -0
  4. opencv_zoo/LICENSE +201 -0
  5. opencv_zoo/README.md +112 -0
  6. opencv_zoo/models/__init__.py +96 -0
  7. opencv_zoo/models/face_detection_yunet/.demo.py.swp +0 -0
  8. opencv_zoo/models/face_detection_yunet/CMakeLists.txt +11 -0
  9. opencv_zoo/models/face_detection_yunet/LICENSE +21 -0
  10. opencv_zoo/models/face_detection_yunet/README.md +67 -0
  11. opencv_zoo/models/face_detection_yunet/__pycache__/yunet.cpython-311.pyc +0 -0
  12. opencv_zoo/models/face_detection_yunet/demo.cpp +220 -0
  13. opencv_zoo/models/face_detection_yunet/demo.py +145 -0
  14. opencv_zoo/models/face_detection_yunet/example_outputs/largest_selfie.jpg +3 -0
  15. opencv_zoo/models/face_detection_yunet/example_outputs/yunet_demo.gif +0 -0
  16. opencv_zoo/models/face_detection_yunet/face_detection_yunet_2023mar.onnx +3 -0
  17. opencv_zoo/models/face_detection_yunet/face_detection_yunet_2023mar_int8.onnx +3 -0
  18. opencv_zoo/models/face_detection_yunet/yunet.py +55 -0
  19. opencv_zoo/models/license_plate_detection_yunet/LICENSE +203 -0
  20. opencv_zoo/models/license_plate_detection_yunet/README.md +30 -0
  21. opencv_zoo/models/license_plate_detection_yunet/__pycache__/lpd_yunet.cpython-311.pyc +0 -0
  22. opencv_zoo/models/license_plate_detection_yunet/demo.py +129 -0
  23. opencv_zoo/models/license_plate_detection_yunet/example_outputs/lpd_yunet_demo.gif +0 -0
  24. opencv_zoo/models/license_plate_detection_yunet/example_outputs/result-1.jpg +0 -0
  25. opencv_zoo/models/license_plate_detection_yunet/example_outputs/result-2.jpg +0 -0
  26. opencv_zoo/models/license_plate_detection_yunet/example_outputs/result-3.jpg +0 -0
  27. opencv_zoo/models/license_plate_detection_yunet/example_outputs/result-4.jpg +0 -0
  28. opencv_zoo/models/license_plate_detection_yunet/license_plate_detection_lpd_yunet_2023mar.onnx +3 -0
  29. opencv_zoo/models/license_plate_detection_yunet/license_plate_detection_lpd_yunet_2023mar_int8.onnx +3 -0
  30. opencv_zoo/models/license_plate_detection_yunet/lpd_yunet.py +133 -0
  31. opencv_zoo/models/license_plate_detection_yunet/result.jpg +0 -0
  32. requirements.txt +2 -0
.gitattributes CHANGED
@@ -33,3 +33,4 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
33
  *.zip filter=lfs diff=lfs merge=lfs -text
34
  *.zst filter=lfs diff=lfs merge=lfs -text
35
  *tfevents* filter=lfs diff=lfs merge=lfs -text
 
 
33
  *.zip filter=lfs diff=lfs merge=lfs -text
34
  *.zst filter=lfs diff=lfs merge=lfs -text
35
  *tfevents* filter=lfs diff=lfs merge=lfs -text
36
+ opencv_zoo/models/face_detection_yunet/example_outputs/largest_selfie.jpg filter=lfs diff=lfs merge=lfs -text
README.md CHANGED
@@ -1,12 +1,6 @@
1
  ---
2
- title: Face And License Plate Obfuscator - YuNet
3
- emoji: 🐢
4
- colorFrom: indigo
5
- colorTo: yellow
6
- sdk: gradio
7
- sdk_version: 3.47.0
8
  app_file: app.py
9
- pinned: false
 
10
  ---
11
-
12
- Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
 
1
  ---
2
+ title: Face_and_License_Plate_Obfuscator_-_YuNet
 
 
 
 
 
3
  app_file: app.py
4
+ sdk: gradio
5
+ sdk_version: 3.42.0
6
  ---
 
 
app.py ADDED
@@ -0,0 +1,125 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import numpy as np
2
+ import cv2
3
+ import gradio as gr
4
+
5
+ from opencv_zoo.models.face_detection_yunet.yunet import YuNet
6
+ from opencv_zoo.models.license_plate_detection_yunet.lpd_yunet import LPD_YuNet
7
+
8
+ # Instantiate face detection YuNet
9
+ face_model = YuNet(
10
+ modelPath="opencv_zoo/models/face_detection_yunet/face_detection_yunet_2023mar.onnx",
11
+ inputSize=[320, 320],
12
+ confThreshold=0.9,
13
+ nmsThreshold=0.3,
14
+ topK=5000,
15
+ backendId=cv2.dnn.DNN_BACKEND_OPENCV,
16
+ targetId=cv2.dnn.DNN_TARGET_CPU,
17
+ )
18
+
19
+ # Instantiate license plate detection YuNet
20
+ lpd_model = LPD_YuNet(
21
+ modelPath="opencv_zoo/models/license_plate_detection_yunet/license_plate_detection_lpd_yunet_2023mar.onnx",
22
+ confThreshold=0.9,
23
+ nmsThreshold=0.3,
24
+ topK=5000,
25
+ keepTopK=750,
26
+ backendId=cv2.dnn.DNN_BACKEND_OPENCV,
27
+ targetId=cv2.dnn.DNN_TARGET_CPU,
28
+ )
29
+
30
+
31
+ def json_detections(face_results, lpd_results):
32
+ json_result = {}
33
+ json_result["faces"] = []
34
+ json_result["license_plates"] = []
35
+
36
+ for det in face_results if face_results is not None else []:
37
+ bbox = det[0:4].astype(np.int32)
38
+ json_result["faces"].append(
39
+ {
40
+ "xmin": int(bbox[0]),
41
+ "ymin": int(bbox[1]),
42
+ "xmax": int(bbox[0]) + int(bbox[2]),
43
+ "ymax": int(bbox[1]) + int(bbox[3]),
44
+ }
45
+ )
46
+
47
+ for det in lpd_results if lpd_results is not None else []:
48
+ bbox = det[:-1].astype(np.int32)
49
+ x1, y1, x2, y2, x3, y3, x4, y4 = bbox
50
+
51
+ xmin = min(x1, x2, x3, x4)
52
+ xmax = max(x1, x2, x3, x4)
53
+ ymin = min(y1, y2, y3, y4)
54
+ ymax = max(y1, y2, y3, y4)
55
+
56
+ json_result["license_plates"].append(
57
+ {
58
+ "xmin": int(xmin),
59
+ "ymin": int(ymin),
60
+ "xmax": int(xmax),
61
+ "ymax": int(ymax),
62
+ }
63
+ )
64
+
65
+ return json_result
66
+
67
+
68
+ def overlay_results(image, face_results, lpd_results):
69
+ # Draw face results on the input image
70
+ for det in face_results if face_results is not None else []:
71
+ bbox = det[0:4].astype(np.int32)
72
+ cv2.rectangle(
73
+ image,
74
+ (bbox[0], bbox[1]),
75
+ (bbox[0] + bbox[2], bbox[1] + bbox[3]),
76
+ (0, 0, 0),
77
+ -1,
78
+ )
79
+
80
+ # Draw lpd results on the input image
81
+ for det in lpd_results:
82
+ bbox = det[:-1].astype(np.int32)
83
+ x1, y1, x2, y2, x3, y3, x4, y4 = bbox
84
+
85
+ # The output of this is technically a parallelogram, but we will
86
+ # just black out the rectangle
87
+ xmin = min(x1, x2, x3, x4)
88
+ xmax = max(x1, x2, x3, x4)
89
+ ymin = min(y1, y2, y3, y4)
90
+ ymax = max(y1, y2, y3, y4)
91
+
92
+ cv2.rectangle(image, (xmin, ymin), (xmax, ymax), (0, 0, 0), -1)
93
+
94
+ return image
95
+
96
+
97
+ def predict(image):
98
+
99
+ h, w, _ = image.shape
100
+
101
+ # Inference
102
+ face_model.setInputSize([w, h])
103
+ lpd_model.setInputSize([w, h])
104
+ infer_image = cv2.cvtColor(image, cv2.COLOR_RGB2BGR)
105
+ face_results = face_model.infer(infer_image)
106
+ lpd_results = lpd_model.infer(infer_image)
107
+
108
+ # Process output
109
+ image = overlay_results(image, face_results, lpd_results)
110
+ json = json_detections(face_results, lpd_results)
111
+
112
+ return image, json
113
+
114
+
115
+ demo = gr.Interface(
116
+ title="Face and License Plate Obfuscator - YuNet",
117
+ fn=predict,
118
+ inputs=gr.Image(type="numpy", label="Original Image"),
119
+ outputs=[
120
+ gr.Image(type="numpy", label="Output Image"),
121
+ gr.JSON(),
122
+ ],
123
+ )
124
+
125
+ demo.launch()
opencv_zoo/LICENSE ADDED
@@ -0,0 +1,201 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ Apache License
2
+ Version 2.0, January 2004
3
+ http://www.apache.org/licenses/
4
+
5
+ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
6
+
7
+ 1. Definitions.
8
+
9
+ "License" shall mean the terms and conditions for use, reproduction,
10
+ and distribution as defined by Sections 1 through 9 of this document.
11
+
12
+ "Licensor" shall mean the copyright owner or entity authorized by
13
+ the copyright owner that is granting the License.
14
+
15
+ "Legal Entity" shall mean the union of the acting entity and all
16
+ other entities that control, are controlled by, or are under common
17
+ control with that entity. For the purposes of this definition,
18
+ "control" means (i) the power, direct or indirect, to cause the
19
+ direction or management of such entity, whether by contract or
20
+ otherwise, or (ii) ownership of fifty percent (50%) or more of the
21
+ outstanding shares, or (iii) beneficial ownership of such entity.
22
+
23
+ "You" (or "Your") shall mean an individual or Legal Entity
24
+ exercising permissions granted by this License.
25
+
26
+ "Source" form shall mean the preferred form for making modifications,
27
+ including but not limited to software source code, documentation
28
+ source, and configuration files.
29
+
30
+ "Object" form shall mean any form resulting from mechanical
31
+ transformation or translation of a Source form, including but
32
+ not limited to compiled object code, generated documentation,
33
+ and conversions to other media types.
34
+
35
+ "Work" shall mean the work of authorship, whether in Source or
36
+ Object form, made available under the License, as indicated by a
37
+ copyright notice that is included in or attached to the work
38
+ (an example is provided in the Appendix below).
39
+
40
+ "Derivative Works" shall mean any work, whether in Source or Object
41
+ form, that is based on (or derived from) the Work and for which the
42
+ editorial revisions, annotations, elaborations, or other modifications
43
+ represent, as a whole, an original work of authorship. For the purposes
44
+ of this License, Derivative Works shall not include works that remain
45
+ separable from, or merely link (or bind by name) to the interfaces of,
46
+ the Work and Derivative Works thereof.
47
+
48
+ "Contribution" shall mean any work of authorship, including
49
+ the original version of the Work and any modifications or additions
50
+ to that Work or Derivative Works thereof, that is intentionally
51
+ submitted to Licensor for inclusion in the Work by the copyright owner
52
+ or by an individual or Legal Entity authorized to submit on behalf of
53
+ the copyright owner. For the purposes of this definition, "submitted"
54
+ means any form of electronic, verbal, or written communication sent
55
+ to the Licensor or its representatives, including but not limited to
56
+ communication on electronic mailing lists, source code control systems,
57
+ and issue tracking systems that are managed by, or on behalf of, the
58
+ Licensor for the purpose of discussing and improving the Work, but
59
+ excluding communication that is conspicuously marked or otherwise
60
+ designated in writing by the copyright owner as "Not a Contribution."
61
+
62
+ "Contributor" shall mean Licensor and any individual or Legal Entity
63
+ on behalf of whom a Contribution has been received by Licensor and
64
+ subsequently incorporated within the Work.
65
+
66
+ 2. Grant of Copyright License. Subject to the terms and conditions of
67
+ this License, each Contributor hereby grants to You a perpetual,
68
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
69
+ copyright license to reproduce, prepare Derivative Works of,
70
+ publicly display, publicly perform, sublicense, and distribute the
71
+ Work and such Derivative Works in Source or Object form.
72
+
73
+ 3. Grant of Patent License. Subject to the terms and conditions of
74
+ this License, each Contributor hereby grants to You a perpetual,
75
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
76
+ (except as stated in this section) patent license to make, have made,
77
+ use, offer to sell, sell, import, and otherwise transfer the Work,
78
+ where such license applies only to those patent claims licensable
79
+ by such Contributor that are necessarily infringed by their
80
+ Contribution(s) alone or by combination of their Contribution(s)
81
+ with the Work to which such Contribution(s) was submitted. If You
82
+ institute patent litigation against any entity (including a
83
+ cross-claim or counterclaim in a lawsuit) alleging that the Work
84
+ or a Contribution incorporated within the Work constitutes direct
85
+ or contributory patent infringement, then any patent licenses
86
+ granted to You under this License for that Work shall terminate
87
+ as of the date such litigation is filed.
88
+
89
+ 4. Redistribution. You may reproduce and distribute copies of the
90
+ Work or Derivative Works thereof in any medium, with or without
91
+ modifications, and in Source or Object form, provided that You
92
+ meet the following conditions:
93
+
94
+ (a) You must give any other recipients of the Work or
95
+ Derivative Works a copy of this License; and
96
+
97
+ (b) You must cause any modified files to carry prominent notices
98
+ stating that You changed the files; and
99
+
100
+ (c) You must retain, in the Source form of any Derivative Works
101
+ that You distribute, all copyright, patent, trademark, and
102
+ attribution notices from the Source form of the Work,
103
+ excluding those notices that do not pertain to any part of
104
+ the Derivative Works; and
105
+
106
+ (d) If the Work includes a "NOTICE" text file as part of its
107
+ distribution, then any Derivative Works that You distribute must
108
+ include a readable copy of the attribution notices contained
109
+ within such NOTICE file, excluding those notices that do not
110
+ pertain to any part of the Derivative Works, in at least one
111
+ of the following places: within a NOTICE text file distributed
112
+ as part of the Derivative Works; within the Source form or
113
+ documentation, if provided along with the Derivative Works; or,
114
+ within a display generated by the Derivative Works, if and
115
+ wherever such third-party notices normally appear. The contents
116
+ of the NOTICE file are for informational purposes only and
117
+ do not modify the License. You may add Your own attribution
118
+ notices within Derivative Works that You distribute, alongside
119
+ or as an addendum to the NOTICE text from the Work, provided
120
+ that such additional attribution notices cannot be construed
121
+ as modifying the License.
122
+
123
+ You may add Your own copyright statement to Your modifications and
124
+ may provide additional or different license terms and conditions
125
+ for use, reproduction, or distribution of Your modifications, or
126
+ for any such Derivative Works as a whole, provided Your use,
127
+ reproduction, and distribution of the Work otherwise complies with
128
+ the conditions stated in this License.
129
+
130
+ 5. Submission of Contributions. Unless You explicitly state otherwise,
131
+ any Contribution intentionally submitted for inclusion in the Work
132
+ by You to the Licensor shall be under the terms and conditions of
133
+ this License, without any additional terms or conditions.
134
+ Notwithstanding the above, nothing herein shall supersede or modify
135
+ the terms of any separate license agreement you may have executed
136
+ with Licensor regarding such Contributions.
137
+
138
+ 6. Trademarks. This License does not grant permission to use the trade
139
+ names, trademarks, service marks, or product names of the Licensor,
140
+ except as required for reasonable and customary use in describing the
141
+ origin of the Work and reproducing the content of the NOTICE file.
142
+
143
+ 7. Disclaimer of Warranty. Unless required by applicable law or
144
+ agreed to in writing, Licensor provides the Work (and each
145
+ Contributor provides its Contributions) on an "AS IS" BASIS,
146
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
147
+ implied, including, without limitation, any warranties or conditions
148
+ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
149
+ PARTICULAR PURPOSE. You are solely responsible for determining the
150
+ appropriateness of using or redistributing the Work and assume any
151
+ risks associated with Your exercise of permissions under this License.
152
+
153
+ 8. Limitation of Liability. In no event and under no legal theory,
154
+ whether in tort (including negligence), contract, or otherwise,
155
+ unless required by applicable law (such as deliberate and grossly
156
+ negligent acts) or agreed to in writing, shall any Contributor be
157
+ liable to You for damages, including any direct, indirect, special,
158
+ incidental, or consequential damages of any character arising as a
159
+ result of this License or out of the use or inability to use the
160
+ Work (including but not limited to damages for loss of goodwill,
161
+ work stoppage, computer failure or malfunction, or any and all
162
+ other commercial damages or losses), even if such Contributor
163
+ has been advised of the possibility of such damages.
164
+
165
+ 9. Accepting Warranty or Additional Liability. While redistributing
166
+ the Work or Derivative Works thereof, You may choose to offer,
167
+ and charge a fee for, acceptance of support, warranty, indemnity,
168
+ or other liability obligations and/or rights consistent with this
169
+ License. However, in accepting such obligations, You may act only
170
+ on Your own behalf and on Your sole responsibility, not on behalf
171
+ of any other Contributor, and only if You agree to indemnify,
172
+ defend, and hold each Contributor harmless for any liability
173
+ incurred by, or claims asserted against, such Contributor by reason
174
+ of your accepting any such warranty or additional liability.
175
+
176
+ END OF TERMS AND CONDITIONS
177
+
178
+ APPENDIX: How to apply the Apache License to your work.
179
+
180
+ To apply the Apache License to your work, attach the following
181
+ boilerplate notice, with the fields enclosed by brackets "[]"
182
+ replaced with your own identifying information. (Don't include
183
+ the brackets!) The text should be enclosed in the appropriate
184
+ comment syntax for the file format. We also recommend that a
185
+ file or class name and description of purpose be included on the
186
+ same "printed page" as the copyright notice for easier
187
+ identification within third-party archives.
188
+
189
+ Copyright [yyyy] [name of copyright owner]
190
+
191
+ Licensed under the Apache License, Version 2.0 (the "License");
192
+ you may not use this file except in compliance with the License.
193
+ You may obtain a copy of the License at
194
+
195
+ http://www.apache.org/licenses/LICENSE-2.0
196
+
197
+ Unless required by applicable law or agreed to in writing, software
198
+ distributed under the License is distributed on an "AS IS" BASIS,
199
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
200
+ See the License for the specific language governing permissions and
201
+ limitations under the License.
opencv_zoo/README.md ADDED
@@ -0,0 +1,112 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # OpenCV Zoo and Benchmark
2
+
3
+ A zoo for models tuned for OpenCV DNN with benchmarks on different platforms.
4
+
5
+ Guidelines:
6
+
7
+ - Install latest `opencv-python`:
8
+ ```shell
9
+ python3 -m pip install opencv-python
10
+ # Or upgrade to latest version
11
+ python3 -m pip install --upgrade opencv-python
12
+ ```
13
+ - Clone this repo to download all models and demo scripts:
14
+ ```shell
15
+ # Install git-lfs from https://git-lfs.github.com/
16
+ git clone https://github.com/opencv/opencv_zoo && cd opencv_zoo
17
+ git lfs install
18
+ git lfs pull
19
+ ```
20
+ - To run benchmarks on your hardware settings, please refer to [benchmark/README](./benchmark/README.md).
21
+
22
+ ## Models & Benchmark Results
23
+
24
+ ![](benchmark/color_table.svg?raw=true)
25
+
26
+ Hardware Setup:
27
+
28
+ - [Intel Core i7-12700K](https://www.intel.com/content/www/us/en/products/sku/134594/intel-core-i712700k-processor-25m-cache-up-to-5-00-ghz/specifications.html): 8 Performance-cores (3.60 GHz, turbo up to 4.90 GHz), 4 Efficient-cores (2.70 GHz, turbo up to 3.80 GHz), 20 threads.
29
+ - [Raspberry Pi 4B](https://www.raspberrypi.com/products/raspberry-pi-4-model-b/specifications/): Broadcom BCM2711 SoC with a Quad core Cortex-A72 (ARM v8) 64-bit @ 1.5 GHz.
30
+ - [Toybrick RV1126](https://t.rock-chips.com/en/portal.php?mod=view&aid=26): Rockchip RV1126 SoC with a quard-core ARM Cortex-A7 CPU and a 2.0 TOPs NPU.
31
+ - [Khadas Edge 2](https://www.khadas.com/edge2): Rockchip RK3588S SoC with a CPU of 2.25 GHz Quad Core ARM Cortex-A76 + 1.8 GHz Quad Core Cortex-A55, and a 6 TOPS NPU.
32
+ - [Horizon Sunrise X3](https://developer.horizon.ai/sunrise): an SoC from Horizon Robotics with a quad-core ARM Cortex-A53 1.2 GHz CPU and a 5 TOPS BPU (a.k.a NPU).
33
+ - [MAIX-III AXera-Pi](https://wiki.sipeed.com/hardware/en/maixIII/ax-pi/axpi.html#Hardware): Axera AX620A SoC with a quad-core ARM Cortex-A7 CPU and a 3.6 TOPS @ int8 NPU.
34
+ - [StarFive VisionFive 2](https://doc-en.rvspace.org/VisionFive2/Product_Brief/VisionFive_2/specification_pb.html): `StarFive JH7110` SoC with a RISC-V quad-core CPU, which can turbo up to 1.5GHz, and an GPU of model `IMG BXE-4-32 MC1` from Imagination, which has a work freq up to 600MHz.
35
+ - [NVIDIA Jetson Nano B01](https://developer.nvidia.com/embedded/jetson-nano-developer-kit): a Quad-core ARM A57 @ 1.43 GHz CPU, and a 128-core NVIDIA Maxwell GPU.
36
+ - [Khadas VIM3](https://www.khadas.com/vim3): Amlogic A311D SoC with a 2.2GHz Quad core ARM Cortex-A73 + 1.8GHz dual core Cortex-A53 ARM CPU, and a 5 TOPS NPU. Benchmarks are done using **per-tensor quantized** models. Follow [this guide](https://github.com/opencv/opencv/wiki/TIM-VX-Backend-For-Running-OpenCV-On-NPU) to build OpenCV with TIM-VX backend enabled.
37
+ - [Atlas 200 DK](https://e.huawei.com/en/products/computing/ascend/atlas-200): Ascend 310 NPU with 22 TOPS @ INT8. Follow [this guide](https://github.com/opencv/opencv/wiki/Huawei-CANN-Backend) to build OpenCV with CANN backend enabled.
38
+ - [Allwinner Nezha D1](https://d1.docs.aw-ol.com/en): Allwinner D1 SoC with a 1.0 GHz single-core RISC-V [Xuantie C906 CPU](https://www.t-head.cn/product/C906?spm=a2ouz.12986968.0.0.7bfc1384auGNPZ) with RVV 0.7.1 support. YuNet is tested for now. Visit [here](https://github.com/fengyuentau/opencv_zoo_cpp) for more details.
39
+
40
+ ***Important Notes***:
41
+
42
+ - The data under each column of hardware setups on the above table represents the elapsed time of an inference (preprocess, forward and postprocess).
43
+ - The time data is the mean of 10 runs after some warmup runs. Different metrics may be applied to some specific models.
44
+ - Batch size is 1 for all benchmark results.
45
+ - `---` represents the model is not availble to run on the device.
46
+ - View [benchmark/config](./benchmark/config) for more details on benchmarking different models.
47
+
48
+ ## Some Examples
49
+
50
+ Some examples are listed below. You can find more in the directory of each model!
51
+
52
+ ### Face Detection with [YuNet](./models/face_detection_yunet/)
53
+
54
+ ![largest selfie](./models/face_detection_yunet/example_outputs/largest_selfie.jpg)
55
+
56
+ ### Facial Expression Recognition with [Progressive Teacher](./models/facial_expression_recognition/)
57
+
58
+ ![fer demo](./models/facial_expression_recognition/example_outputs/selfie.jpg)
59
+
60
+ ### Human Segmentation with [PP-HumanSeg](./models/human_segmentation_pphumanseg/)
61
+
62
+ ![messi](./models/human_segmentation_pphumanseg/example_outputs/messi.jpg)
63
+
64
+ ### License Plate Detection with [LPD_YuNet](./models/license_plate_detection_yunet/)
65
+
66
+ ![license plate detection](./models/license_plate_detection_yunet/example_outputs/lpd_yunet_demo.gif)
67
+
68
+ ### Object Detection with [NanoDet](./models/object_detection_nanodet/) & [YOLOX](./models/object_detection_yolox/)
69
+
70
+ ![nanodet demo](./models/object_detection_nanodet/example_outputs/1_res.jpg)
71
+
72
+ ![yolox demo](./models/object_detection_yolox/example_outputs/3_res.jpg)
73
+
74
+ ### Object Tracking with [DaSiamRPN](./models/object_tracking_dasiamrpn/)
75
+
76
+ ![webcam demo](./models/object_tracking_dasiamrpn/example_outputs/dasiamrpn_demo.gif)
77
+
78
+ ### Palm Detection with [MP-PalmDet](./models/palm_detection_mediapipe/)
79
+
80
+ ![palm det](./models/palm_detection_mediapipe/example_outputs/mppalmdet_demo.gif)
81
+
82
+ ### Hand Pose Estimation with [MP-HandPose](models/handpose_estimation_mediapipe/)
83
+
84
+ ![handpose estimation](models/handpose_estimation_mediapipe/example_outputs/mphandpose_demo.webp)
85
+
86
+ ### Person Detection with [MP-PersonDet](./models/person_detection_mediapipe)
87
+
88
+ ![person det](./models/person_detection_mediapipe/example_outputs/mppersondet_demo.webp)
89
+
90
+ ### Pose Estimation with [MP-Pose](models/pose_estimation_mediapipe)
91
+
92
+ ![pose_estimation](models/pose_estimation_mediapipe/example_outputs/mpposeest_demo.webp)
93
+
94
+ ### QR Code Detection and Parsing with [WeChatQRCode](./models/qrcode_wechatqrcode/)
95
+
96
+ ![qrcode](./models/qrcode_wechatqrcode/example_outputs/wechat_qrcode_demo.gif)
97
+
98
+ ### Chinese Text detection [DB](./models/text_detection_db/)
99
+
100
+ ![mask](./models/text_detection_db/example_outputs/mask.jpg)
101
+
102
+ ### English Text detection [DB](./models/text_detection_db/)
103
+
104
+ ![gsoc](./models/text_detection_db/example_outputs/gsoc.jpg)
105
+
106
+ ### Text Detection with [CRNN](./models/text_recognition_crnn/)
107
+
108
+ ![crnn_demo](./models/text_recognition_crnn/example_outputs/CRNNCTC.gif)
109
+
110
+ ## License
111
+
112
+ OpenCV Zoo is licensed under the [Apache 2.0 license](./LICENSE). Please refer to licenses of different models.
opencv_zoo/models/__init__.py ADDED
@@ -0,0 +1,96 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from pathlib import Path
2
+ import glob
3
+ import os
4
+
5
+ from .face_detection_yunet.yunet import YuNet
6
+ from .text_detection_db.db import DB
7
+ from .text_recognition_crnn.crnn import CRNN
8
+ from .face_recognition_sface.sface import SFace
9
+ from .image_classification_ppresnet.ppresnet import PPResNet
10
+ from .human_segmentation_pphumanseg.pphumanseg import PPHumanSeg
11
+ from .person_detection_mediapipe.mp_persondet import MPPersonDet
12
+ from .pose_estimation_mediapipe.mp_pose import MPPose
13
+ from .qrcode_wechatqrcode.wechatqrcode import WeChatQRCode
14
+ from .object_tracking_dasiamrpn.dasiamrpn import DaSiamRPN
15
+ from .person_reid_youtureid.youtureid import YoutuReID
16
+ from .image_classification_mobilenet.mobilenet import MobileNet
17
+ from .palm_detection_mediapipe.mp_palmdet import MPPalmDet
18
+ from .handpose_estimation_mediapipe.mp_handpose import MPHandPose
19
+ from .license_plate_detection_yunet.lpd_yunet import LPD_YuNet
20
+ from .object_detection_nanodet.nanodet import NanoDet
21
+ from .object_detection_yolox.yolox import YoloX
22
+ from .facial_expression_recognition.facial_fer_model import FacialExpressionRecog
23
+
24
+ class ModuleRegistery:
25
+ def __init__(self, name):
26
+ self._name = name
27
+ self._dict = dict()
28
+
29
+ self._base_path = Path(__file__).parent
30
+
31
+ def get(self, key):
32
+ '''
33
+ Returns a tuple with:
34
+ - a module handler,
35
+ - a list of model file paths
36
+ '''
37
+ return self._dict[key]
38
+
39
+ def register(self, item):
40
+ '''
41
+ Registers given module handler along with paths of model files
42
+ '''
43
+ # search for model files
44
+ model_dir = str(self._base_path / item.__module__.split(".")[1])
45
+ fp32_model_paths = []
46
+ fp16_model_paths = []
47
+ int8_model_paths = []
48
+ # onnx
49
+ ret_onnx = sorted(glob.glob(os.path.join(model_dir, "*.onnx")))
50
+ if "object_tracking" in item.__module__:
51
+ # object tracking models usually have multiple parts
52
+ fp32_model_paths = [ret_onnx]
53
+ else:
54
+ for r in ret_onnx:
55
+ if "int8" in r:
56
+ int8_model_paths.append([r])
57
+ elif "fp16" in r: # exclude fp16 for now
58
+ fp16_model_paths.append([r])
59
+ else:
60
+ fp32_model_paths.append([r])
61
+ # caffe
62
+ ret_caffemodel = sorted(glob.glob(os.path.join(model_dir, "*.caffemodel")))
63
+ ret_prototxt = sorted(glob.glob(os.path.join(model_dir, "*.prototxt")))
64
+ caffe_models = []
65
+ for caffemodel, prototxt in zip(ret_caffemodel, ret_prototxt):
66
+ caffe_models += [prototxt, caffemodel]
67
+ if caffe_models:
68
+ fp32_model_paths.append(caffe_models)
69
+
70
+ all_model_paths = dict(
71
+ fp32=fp32_model_paths,
72
+ fp16=fp16_model_paths,
73
+ int8=int8_model_paths,
74
+ )
75
+
76
+ self._dict[item.__name__] = (item, all_model_paths)
77
+
78
+ MODELS = ModuleRegistery('Models')
79
+ MODELS.register(YuNet)
80
+ MODELS.register(DB)
81
+ MODELS.register(CRNN)
82
+ MODELS.register(SFace)
83
+ MODELS.register(PPResNet)
84
+ MODELS.register(PPHumanSeg)
85
+ MODELS.register(MPPersonDet)
86
+ MODELS.register(MPPose)
87
+ MODELS.register(WeChatQRCode)
88
+ MODELS.register(DaSiamRPN)
89
+ MODELS.register(YoutuReID)
90
+ MODELS.register(MobileNet)
91
+ MODELS.register(MPPalmDet)
92
+ MODELS.register(MPHandPose)
93
+ MODELS.register(LPD_YuNet)
94
+ MODELS.register(NanoDet)
95
+ MODELS.register(YoloX)
96
+ MODELS.register(FacialExpressionRecog)
opencv_zoo/models/face_detection_yunet/.demo.py.swp ADDED
Binary file (16.4 kB). View file
 
opencv_zoo/models/face_detection_yunet/CMakeLists.txt ADDED
@@ -0,0 +1,11 @@
 
 
 
 
 
 
 
 
 
 
 
 
1
+ cmake_minimum_required(VERSION 3.24.0)
2
+ project(opencv_zoo_face_detection_yunet)
3
+
4
+ set(OPENCV_VERSION "4.8.0")
5
+ set(OPENCV_INSTALLATION_PATH "" CACHE PATH "Where to look for OpenCV installation")
6
+
7
+ # Find OpenCV
8
+ find_package(OpenCV ${OPENCV_VERSION} REQUIRED HINTS ${OPENCV_INSTALLATION_PATH})
9
+
10
+ add_executable(demo demo.cpp)
11
+ target_link_libraries(demo ${OpenCV_LIBS})
opencv_zoo/models/face_detection_yunet/LICENSE ADDED
@@ -0,0 +1,21 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ MIT License
2
+
3
+ Copyright (c) 2020 Shiqi Yu <shiqi.yu@gmail.com>
4
+
5
+ Permission is hereby granted, free of charge, to any person obtaining a copy
6
+ of this software and associated documentation files (the "Software"), to deal
7
+ in the Software without restriction, including without limitation the rights
8
+ to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
9
+ copies of the Software, and to permit persons to whom the Software is
10
+ furnished to do so, subject to the following conditions:
11
+
12
+ The above copyright notice and this permission notice shall be included in all
13
+ copies or substantial portions of the Software.
14
+
15
+ THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16
+ IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17
+ FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
18
+ AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19
+ LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20
+ OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21
+ SOFTWARE.
opencv_zoo/models/face_detection_yunet/README.md ADDED
@@ -0,0 +1,67 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # YuNet
2
+
3
+ YuNet is a light-weight, fast and accurate face detection model, which achieves 0.834(AP_easy), 0.824(AP_medium), 0.708(AP_hard) on the WIDER Face validation set.
4
+
5
+ Notes:
6
+
7
+ - Model source: [here](https://github.com/ShiqiYu/libfacedetection.train/blob/a61a428929148171b488f024b5d6774f93cdbc13/tasks/task1/onnx/yunet.onnx).
8
+ - This model can detect **faces of pixels between around 10x10 to 300x300** due to the training scheme.
9
+ - For details on training this model, please visit https://github.com/ShiqiYu/libfacedetection.train.
10
+ - This ONNX model has fixed input shape, but OpenCV DNN infers on the exact shape of input image. See https://github.com/opencv/opencv_zoo/issues/44 for more information.
11
+
12
+ Results of accuracy evaluation with [tools/eval](../../tools/eval).
13
+
14
+ | Models | Easy AP | Medium AP | Hard AP |
15
+ | ----------- | ------- | --------- | ------- |
16
+ | YuNet | 0.8871 | 0.8710 | 0.7681 |
17
+ | YuNet quant | 0.8838 | 0.8683 | 0.7676 |
18
+
19
+ \*: 'quant' stands for 'quantized'.
20
+
21
+ ## Demo
22
+
23
+ ### Python
24
+
25
+ Run the following command to try the demo:
26
+
27
+ ```shell
28
+ # detect on camera input
29
+ python demo.py
30
+ # detect on an image
31
+ python demo.py --input /path/to/image -v
32
+
33
+ # get help regarding various parameters
34
+ python demo.py --help
35
+ ```
36
+
37
+ ### C++
38
+
39
+ Install latest OpenCV and CMake >= 3.24.0 to get started with:
40
+
41
+ ```shell
42
+ # A typical and default installation path of OpenCV is /usr/local
43
+ cmake -B build -D OPENCV_INSTALLATION_PATH=/path/to/opencv/installation .
44
+ cmake --build build
45
+
46
+ # detect on camera input
47
+ ./build/demo
48
+ # detect on an image
49
+ ./build/demo -i=/path/to/image -v
50
+ # get help messages
51
+ ./build/demo -h
52
+ ```
53
+
54
+ ### Example outputs
55
+
56
+ ![webcam demo](./example_outputs/yunet_demo.gif)
57
+
58
+ ![largest selfie](./example_outputs/largest_selfie.jpg)
59
+
60
+ ## License
61
+
62
+ All files in this directory are licensed under [MIT License](./LICENSE).
63
+
64
+ ## Reference
65
+
66
+ - https://github.com/ShiqiYu/libfacedetection
67
+ - https://github.com/ShiqiYu/libfacedetection.train
opencv_zoo/models/face_detection_yunet/__pycache__/yunet.cpython-311.pyc ADDED
Binary file (2.66 kB). View file
 
opencv_zoo/models/face_detection_yunet/demo.cpp ADDED
@@ -0,0 +1,220 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #include "opencv2/opencv.hpp"
2
+
3
+ #include <map>
4
+ #include <vector>
5
+ #include <string>
6
+ #include <iostream>
7
+
8
+ const std::map<std::string, int> str2backend{
9
+ {"opencv", cv::dnn::DNN_BACKEND_OPENCV}, {"cuda", cv::dnn::DNN_BACKEND_CUDA},
10
+ {"timvx", cv::dnn::DNN_BACKEND_TIMVX}, {"cann", cv::dnn::DNN_BACKEND_CANN}
11
+ };
12
+ const std::map<std::string, int> str2target{
13
+ {"cpu", cv::dnn::DNN_TARGET_CPU}, {"cuda", cv::dnn::DNN_TARGET_CUDA},
14
+ {"npu", cv::dnn::DNN_TARGET_NPU}, {"cuda_fp16", cv::dnn::DNN_TARGET_CUDA_FP16}
15
+ };
16
+
17
+ class YuNet
18
+ {
19
+ public:
20
+ YuNet(const std::string& model_path,
21
+ const cv::Size& input_size = cv::Size(320, 320),
22
+ float conf_threshold = 0.6f,
23
+ float nms_threshold = 0.3f,
24
+ int top_k = 5000,
25
+ int backend_id = 0,
26
+ int target_id = 0)
27
+ : model_path_(model_path), input_size_(input_size),
28
+ conf_threshold_(conf_threshold), nms_threshold_(nms_threshold),
29
+ top_k_(top_k), backend_id_(backend_id), target_id_(target_id)
30
+ {
31
+ model = cv::FaceDetectorYN::create(model_path_, "", input_size_, conf_threshold_, nms_threshold_, top_k_, backend_id_, target_id_);
32
+ }
33
+
34
+ void setBackendAndTarget(int backend_id, int target_id)
35
+ {
36
+ backend_id_ = backend_id;
37
+ target_id_ = target_id;
38
+ model = cv::FaceDetectorYN::create(model_path_, "", input_size_, conf_threshold_, nms_threshold_, top_k_, backend_id_, target_id_);
39
+ }
40
+
41
+ /* Overwrite the input size when creating the model. Size format: [Width, Height].
42
+ */
43
+ void setInputSize(const cv::Size& input_size)
44
+ {
45
+ input_size_ = input_size;
46
+ model->setInputSize(input_size_);
47
+ }
48
+
49
+ cv::Mat infer(const cv::Mat image)
50
+ {
51
+ cv::Mat res;
52
+ model->detect(image, res);
53
+ return res;
54
+ }
55
+
56
+ private:
57
+ cv::Ptr<cv::FaceDetectorYN> model;
58
+
59
+ std::string model_path_;
60
+ cv::Size input_size_;
61
+ float conf_threshold_;
62
+ float nms_threshold_;
63
+ int top_k_;
64
+ int backend_id_;
65
+ int target_id_;
66
+ };
67
+
68
+ cv::Mat visualize(const cv::Mat& image, const cv::Mat& faces, float fps = -1.f)
69
+ {
70
+ static cv::Scalar box_color{0, 255, 0};
71
+ static std::vector<cv::Scalar> landmark_color{
72
+ cv::Scalar(255, 0, 0), // right eye
73
+ cv::Scalar( 0, 0, 255), // left eye
74
+ cv::Scalar( 0, 255, 0), // nose tip
75
+ cv::Scalar(255, 0, 255), // right mouth corner
76
+ cv::Scalar( 0, 255, 255) // left mouth corner
77
+ };
78
+ static cv::Scalar text_color{0, 255, 0};
79
+
80
+ auto output_image = image.clone();
81
+
82
+ if (fps >= 0)
83
+ {
84
+ cv::putText(output_image, cv::format("FPS: %.2f", fps), cv::Point(0, 15), cv::FONT_HERSHEY_SIMPLEX, 0.5, text_color, 2);
85
+ }
86
+
87
+ for (int i = 0; i < faces.rows; ++i)
88
+ {
89
+ // Draw bounding boxes
90
+ int x1 = static_cast<int>(faces.at<float>(i, 0));
91
+ int y1 = static_cast<int>(faces.at<float>(i, 1));
92
+ int w = static_cast<int>(faces.at<float>(i, 2));
93
+ int h = static_cast<int>(faces.at<float>(i, 3));
94
+ cv::rectangle(output_image, cv::Rect(x1, y1, w, h), box_color, 2);
95
+
96
+ // Confidence as text
97
+ float conf = faces.at<float>(i, 14);
98
+ cv::putText(output_image, cv::format("%.4f", conf), cv::Point(x1, y1+12), cv::FONT_HERSHEY_DUPLEX, 0.5, text_color);
99
+
100
+ // Draw landmarks
101
+ for (int j = 0; j < landmark_color.size(); ++j)
102
+ {
103
+ int x = static_cast<int>(faces.at<float>(i, 2*j+4)), y = static_cast<int>(faces.at<float>(i, 2*j+5));
104
+ cv::circle(output_image, cv::Point(x, y), 2, landmark_color[j], 2);
105
+ }
106
+ }
107
+ return output_image;
108
+ }
109
+
110
+ int main(int argc, char** argv)
111
+ {
112
+ cv::CommandLineParser parser(argc, argv,
113
+ "{help h | | Print this message}"
114
+ "{input i | | Set input to a certain image, omit if using camera}"
115
+ "{model m | face_detection_yunet_2023mar.onnx | Set path to the model}"
116
+ "{backend b | opencv | Set DNN backend}"
117
+ "{target t | cpu | Set DNN target}"
118
+ "{save s | false | Whether to save result image or not}"
119
+ "{vis v | false | Whether to visualize result image or not}"
120
+ /* model params below*/
121
+ "{conf_threshold | 0.9 | Set the minimum confidence for the model to identify a face. Filter out faces of conf < conf_threshold}"
122
+ "{nms_threshold | 0.3 | Set the threshold to suppress overlapped boxes. Suppress boxes if IoU(box1, box2) >= nms_threshold, the one of higher score is kept.}"
123
+ "{top_k | 5000 | Keep top_k bounding boxes before NMS. Set a lower value may help speed up postprocessing.}"
124
+ );
125
+ if (parser.has("help"))
126
+ {
127
+ parser.printMessage();
128
+ return 0;
129
+ }
130
+
131
+ std::string input_path = parser.get<std::string>("input");
132
+ std::string model_path = parser.get<std::string>("model");
133
+ std::string backend = parser.get<std::string>("backend");
134
+ std::string target = parser.get<std::string>("target");
135
+ bool save_flag = parser.get<bool>("save");
136
+ bool vis_flag = parser.get<bool>("vis");
137
+
138
+ // model params
139
+ float conf_threshold = parser.get<float>("conf_threshold");
140
+ float nms_threshold = parser.get<float>("nms_threshold");
141
+ int top_k = parser.get<int>("top_k");
142
+ const int backend_id = str2backend.at(backend);
143
+ const int target_id = str2target.at(target);
144
+
145
+ // Instantiate YuNet
146
+ YuNet model(model_path, cv::Size(320, 320), conf_threshold, nms_threshold, top_k, backend_id, target_id);
147
+
148
+ // If input is an image
149
+ if (!input_path.empty())
150
+ {
151
+ auto image = cv::imread(input_path);
152
+
153
+ // Inference
154
+ model.setInputSize(image.size());
155
+ auto faces = model.infer(image);
156
+
157
+ // Print faces
158
+ std::cout << cv::format("%d faces detected:\n", faces.rows);
159
+ for (int i = 0; i < faces.rows; ++i)
160
+ {
161
+ int x1 = static_cast<int>(faces.at<float>(i, 0));
162
+ int y1 = static_cast<int>(faces.at<float>(i, 1));
163
+ int w = static_cast<int>(faces.at<float>(i, 2));
164
+ int h = static_cast<int>(faces.at<float>(i, 3));
165
+ float conf = faces.at<float>(i, 14);
166
+ std::cout << cv::format("%d: x1=%d, y1=%d, w=%d, h=%d, conf=%.4f\n", i, x1, y1, w, h, conf);
167
+ }
168
+
169
+ // Draw reults on the input image
170
+ if (save_flag || vis_flag)
171
+ {
172
+ auto res_image = visualize(image, faces);
173
+ if (save_flag)
174
+ {
175
+ std::cout << "Results are saved to result.jpg\n";
176
+ cv::imwrite("result.jpg", res_image);
177
+ }
178
+ if (vis_flag)
179
+ {
180
+ cv::namedWindow(input_path, cv::WINDOW_AUTOSIZE);
181
+ cv::imshow(input_path, res_image);
182
+ cv::waitKey(0);
183
+ }
184
+ }
185
+ }
186
+ else // Call default camera
187
+ {
188
+ int device_id = 0;
189
+ auto cap = cv::VideoCapture(device_id);
190
+ int w = static_cast<int>(cap.get(cv::CAP_PROP_FRAME_WIDTH));
191
+ int h = static_cast<int>(cap.get(cv::CAP_PROP_FRAME_HEIGHT));
192
+ model.setInputSize(cv::Size(w, h));
193
+
194
+ auto tick_meter = cv::TickMeter();
195
+ cv::Mat frame;
196
+ while (cv::waitKey(1) < 0)
197
+ {
198
+ bool has_frame = cap.read(frame);
199
+ if (!has_frame)
200
+ {
201
+ std::cout << "No frames grabbed! Exiting ...\n";
202
+ break;
203
+ }
204
+
205
+ // Inference
206
+ tick_meter.start();
207
+ cv::Mat faces = model.infer(frame);
208
+ tick_meter.stop();
209
+
210
+ // Draw results on the input image
211
+ auto res_image = visualize(frame, faces, (float)tick_meter.getFPS());
212
+ // Visualize in a new window
213
+ cv::imshow("YuNet Demo", res_image);
214
+
215
+ tick_meter.reset();
216
+ }
217
+ }
218
+
219
+ return 0;
220
+ }
opencv_zoo/models/face_detection_yunet/demo.py ADDED
@@ -0,0 +1,145 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # This file is part of OpenCV Zoo project.
2
+ # It is subject to the license terms in the LICENSE file found in the same directory.
3
+ #
4
+ # Copyright (C) 2021, Shenzhen Institute of Artificial Intelligence and Robotics for Society, all rights reserved.
5
+ # Third party copyrights are property of their respective owners.
6
+
7
+ import argparse
8
+
9
+ import numpy as np
10
+ import cv2 as cv
11
+
12
+ from yunet import YuNet
13
+
14
+ # Check OpenCV version
15
+ assert cv.__version__ >= "4.8.0", \
16
+ "Please install latest opencv-python to try this demo: python3 -m pip install --upgrade opencv-python"
17
+
18
+ # Valid combinations of backends and targets
19
+ backend_target_pairs = [
20
+ [cv.dnn.DNN_BACKEND_OPENCV, cv.dnn.DNN_TARGET_CPU],
21
+ [cv.dnn.DNN_BACKEND_CUDA, cv.dnn.DNN_TARGET_CUDA],
22
+ [cv.dnn.DNN_BACKEND_CUDA, cv.dnn.DNN_TARGET_CUDA_FP16],
23
+ [cv.dnn.DNN_BACKEND_TIMVX, cv.dnn.DNN_TARGET_NPU],
24
+ [cv.dnn.DNN_BACKEND_CANN, cv.dnn.DNN_TARGET_NPU]
25
+ ]
26
+
27
+ parser = argparse.ArgumentParser(description='YuNet: A Fast and Accurate CNN-based Face Detector (https://github.com/ShiqiYu/libfacedetection).')
28
+ parser.add_argument('--input', '-i', type=str,
29
+ help='Usage: Set input to a certain image, omit if using camera.')
30
+ parser.add_argument('--model', '-m', type=str, default='face_detection_yunet_2023mar.onnx',
31
+ help="Usage: Set model type, defaults to 'face_detection_yunet_2023mar.onnx'.")
32
+ parser.add_argument('--backend_target', '-bt', type=int, default=0,
33
+ help='''Choose one of the backend-target pair to run this demo:
34
+ {:d}: (default) OpenCV implementation + CPU,
35
+ {:d}: CUDA + GPU (CUDA),
36
+ {:d}: CUDA + GPU (CUDA FP16),
37
+ {:d}: TIM-VX + NPU,
38
+ {:d}: CANN + NPU
39
+ '''.format(*[x for x in range(len(backend_target_pairs))]))
40
+ parser.add_argument('--conf_threshold', type=float, default=0.9,
41
+ help='Usage: Set the minimum needed confidence for the model to identify a face, defauts to 0.9. Smaller values may result in faster detection, but will limit accuracy. Filter out faces of confidence < conf_threshold.')
42
+ parser.add_argument('--nms_threshold', type=float, default=0.3,
43
+ help='Usage: Suppress bounding boxes of iou >= nms_threshold. Default = 0.3.')
44
+ parser.add_argument('--top_k', type=int, default=5000,
45
+ help='Usage: Keep top_k bounding boxes before NMS.')
46
+ parser.add_argument('--save', '-s', action='store_true',
47
+ help='Usage: Specify to save file with results (i.e. bounding box, confidence level). Invalid in case of camera input.')
48
+ parser.add_argument('--vis', '-v', action='store_true',
49
+ help='Usage: Specify to open a new window to show results. Invalid in case of camera input.')
50
+ args = parser.parse_args()
51
+
52
+ def visualize(image, results, box_color=(0, 255, 0), text_color=(0, 0, 255), fps=None):
53
+ output = image.copy()
54
+ landmark_color = [
55
+ (255, 0, 0), # right eye
56
+ ( 0, 0, 255), # left eye
57
+ ( 0, 255, 0), # nose tip
58
+ (255, 0, 255), # right mouth corner
59
+ ( 0, 255, 255) # left mouth corner
60
+ ]
61
+
62
+ if fps is not None:
63
+ cv.putText(output, 'FPS: {:.2f}'.format(fps), (0, 15), cv.FONT_HERSHEY_SIMPLEX, 0.5, text_color)
64
+
65
+ for det in (results if results is not None else []):
66
+ bbox = det[0:4].astype(np.int32)
67
+ cv.rectangle(output, (bbox[0], bbox[1]), (bbox[0]+bbox[2], bbox[1]+bbox[3]), box_color, 2)
68
+
69
+ conf = det[-1]
70
+ cv.putText(output, '{:.4f}'.format(conf), (bbox[0], bbox[1]+12), cv.FONT_HERSHEY_DUPLEX, 0.5, text_color)
71
+
72
+ landmarks = det[4:14].astype(np.int32).reshape((5,2))
73
+ for idx, landmark in enumerate(landmarks):
74
+ cv.circle(output, landmark, 2, landmark_color[idx], 2)
75
+
76
+ return output
77
+
78
+ if __name__ == '__main__':
79
+ backend_id = backend_target_pairs[args.backend_target][0]
80
+ target_id = backend_target_pairs[args.backend_target][1]
81
+
82
+ # Instantiate YuNet
83
+ model = YuNet(modelPath=args.model,
84
+ inputSize=[320, 320],
85
+ confThreshold=args.conf_threshold,
86
+ nmsThreshold=args.nms_threshold,
87
+ topK=args.top_k,
88
+ backendId=backend_id,
89
+ targetId=target_id)
90
+
91
+ # If input is an image
92
+ if args.input is not None:
93
+ image = cv.imread(args.input)
94
+ h, w, _ = image.shape
95
+
96
+ # Inference
97
+ model.setInputSize([w, h])
98
+ results = model.infer(image)
99
+
100
+ # Print results
101
+ print('{} faces detected.'.format(results.shape[0]))
102
+ for idx, det in enumerate(results):
103
+ print('{}: {:.0f} {:.0f} {:.0f} {:.0f} {:.0f} {:.0f} {:.0f} {:.0f} {:.0f} {:.0f} {:.0f} {:.0f} {:.0f} {:.0f}'.format(
104
+ idx, *det[:-1])
105
+ )
106
+
107
+ # Draw results on the input image
108
+ image = visualize(image, results)
109
+
110
+ # Save results if save is true
111
+ if args.save:
112
+ print('Resutls saved to result.jpg\n')
113
+ cv.imwrite('result.jpg', image)
114
+
115
+ # Visualize results in a new window
116
+ if args.vis:
117
+ cv.namedWindow(args.input, cv.WINDOW_AUTOSIZE)
118
+ cv.imshow(args.input, image)
119
+ cv.waitKey(0)
120
+ else: # Omit input to call default camera
121
+ deviceId = 0
122
+ cap = cv.VideoCapture(deviceId)
123
+ w = int(cap.get(cv.CAP_PROP_FRAME_WIDTH))
124
+ h = int(cap.get(cv.CAP_PROP_FRAME_HEIGHT))
125
+ model.setInputSize([w, h])
126
+
127
+ tm = cv.TickMeter()
128
+ while cv.waitKey(1) < 0:
129
+ hasFrame, frame = cap.read()
130
+ if not hasFrame:
131
+ print('No frames grabbed!')
132
+ break
133
+
134
+ # Inference
135
+ tm.start()
136
+ results = model.infer(frame) # results is a tuple
137
+ tm.stop()
138
+
139
+ # Draw results on the input image
140
+ frame = visualize(frame, results, fps=tm.getFPS())
141
+
142
+ # Visualize results in a new Window
143
+ cv.imshow('YuNet Demo', frame)
144
+
145
+ tm.reset()
opencv_zoo/models/face_detection_yunet/example_outputs/largest_selfie.jpg ADDED

Git LFS Details

  • SHA256: ab8413ad9bb4f53068f4fb63c6747e5989991dd02241c923d5595b614ecf2bf6
  • Pointer size: 132 Bytes
  • Size of remote file: 1.15 MB
opencv_zoo/models/face_detection_yunet/example_outputs/yunet_demo.gif ADDED
opencv_zoo/models/face_detection_yunet/face_detection_yunet_2023mar.onnx ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:8f2383e4dd3cfbb4553ea8718107fc0423210dc964f9f4280604804ed2552fa4
3
+ size 232589
opencv_zoo/models/face_detection_yunet/face_detection_yunet_2023mar_int8.onnx ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:321aa5a6afabf7ecc46a3d06bfab2b579dc96eb5c3be7edd365fa04502ad9294
3
+ size 100416
opencv_zoo/models/face_detection_yunet/yunet.py ADDED
@@ -0,0 +1,55 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # This file is part of OpenCV Zoo project.
2
+ # It is subject to the license terms in the LICENSE file found in the same directory.
3
+ #
4
+ # Copyright (C) 2021, Shenzhen Institute of Artificial Intelligence and Robotics for Society, all rights reserved.
5
+ # Third party copyrights are property of their respective owners.
6
+
7
+ from itertools import product
8
+
9
+ import numpy as np
10
+ import cv2 as cv
11
+
12
+ class YuNet:
13
+ def __init__(self, modelPath, inputSize=[320, 320], confThreshold=0.6, nmsThreshold=0.3, topK=5000, backendId=0, targetId=0):
14
+ self._modelPath = modelPath
15
+ self._inputSize = tuple(inputSize) # [w, h]
16
+ self._confThreshold = confThreshold
17
+ self._nmsThreshold = nmsThreshold
18
+ self._topK = topK
19
+ self._backendId = backendId
20
+ self._targetId = targetId
21
+
22
+ self._model = cv.FaceDetectorYN.create(
23
+ model=self._modelPath,
24
+ config="",
25
+ input_size=self._inputSize,
26
+ score_threshold=self._confThreshold,
27
+ nms_threshold=self._nmsThreshold,
28
+ top_k=self._topK,
29
+ backend_id=self._backendId,
30
+ target_id=self._targetId)
31
+
32
+ @property
33
+ def name(self):
34
+ return self.__class__.__name__
35
+
36
+ def setBackendAndTarget(self, backendId, targetId):
37
+ self._backendId = backendId
38
+ self._targetId = targetId
39
+ self._model = cv.FaceDetectorYN.create(
40
+ model=self._modelPath,
41
+ config="",
42
+ input_size=self._inputSize,
43
+ score_threshold=self._confThreshold,
44
+ nms_threshold=self._nmsThreshold,
45
+ top_k=self._topK,
46
+ backend_id=self._backendId,
47
+ target_id=self._targetId)
48
+
49
+ def setInputSize(self, input_size):
50
+ self._model.setInputSize(tuple(input_size))
51
+
52
+ def infer(self, image):
53
+ # Forward
54
+ faces = self._model.detect(image)
55
+ return faces[1]
opencv_zoo/models/license_plate_detection_yunet/LICENSE ADDED
@@ -0,0 +1,203 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+
2
+ Apache License
3
+ Version 2.0, January 2004
4
+ http://www.apache.org/licenses/
5
+
6
+ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
7
+
8
+ 1. Definitions.
9
+
10
+ "License" shall mean the terms and conditions for use, reproduction,
11
+ and distribution as defined by Sections 1 through 9 of this document.
12
+
13
+ "Licensor" shall mean the copyright owner or entity authorized by
14
+ the copyright owner that is granting the License.
15
+
16
+ "Legal Entity" shall mean the union of the acting entity and all
17
+ other entities that control, are controlled by, or are under common
18
+ control with that entity. For the purposes of this definition,
19
+ "control" means (i) the power, direct or indirect, to cause the
20
+ direction or management of such entity, whether by contract or
21
+ otherwise, or (ii) ownership of fifty percent (50%) or more of the
22
+ outstanding shares, or (iii) beneficial ownership of such entity.
23
+
24
+ "You" (or "Your") shall mean an individual or Legal Entity
25
+ exercising permissions granted by this License.
26
+
27
+ "Source" form shall mean the preferred form for making modifications,
28
+ including but not limited to software source code, documentation
29
+ source, and configuration files.
30
+
31
+ "Object" form shall mean any form resulting from mechanical
32
+ transformation or translation of a Source form, including but
33
+ not limited to compiled object code, generated documentation,
34
+ and conversions to other media types.
35
+
36
+ "Work" shall mean the work of authorship, whether in Source or
37
+ Object form, made available under the License, as indicated by a
38
+ copyright notice that is included in or attached to the work
39
+ (an example is provided in the Appendix below).
40
+
41
+ "Derivative Works" shall mean any work, whether in Source or Object
42
+ form, that is based on (or derived from) the Work and for which the
43
+ editorial revisions, annotations, elaborations, or other modifications
44
+ represent, as a whole, an original work of authorship. For the purposes
45
+ of this License, Derivative Works shall not include works that remain
46
+ separable from, or merely link (or bind by name) to the interfaces of,
47
+ the Work and Derivative Works thereof.
48
+
49
+ "Contribution" shall mean any work of authorship, including
50
+ the original version of the Work and any modifications or additions
51
+ to that Work or Derivative Works thereof, that is intentionally
52
+ submitted to Licensor for inclusion in the Work by the copyright owner
53
+ or by an individual or Legal Entity authorized to submit on behalf of
54
+ the copyright owner. For the purposes of this definition, "submitted"
55
+ means any form of electronic, verbal, or written communication sent
56
+ to the Licensor or its representatives, including but not limited to
57
+ communication on electronic mailing lists, source code control systems,
58
+ and issue tracking systems that are managed by, or on behalf of, the
59
+ Licensor for the purpose of discussing and improving the Work, but
60
+ excluding communication that is conspicuously marked or otherwise
61
+ designated in writing by the copyright owner as "Not a Contribution."
62
+
63
+ "Contributor" shall mean Licensor and any individual or Legal Entity
64
+ on behalf of whom a Contribution has been received by Licensor and
65
+ subsequently incorporated within the Work.
66
+
67
+ 2. Grant of Copyright License. Subject to the terms and conditions of
68
+ this License, each Contributor hereby grants to You a perpetual,
69
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
70
+ copyright license to reproduce, prepare Derivative Works of,
71
+ publicly display, publicly perform, sublicense, and distribute the
72
+ Work and such Derivative Works in Source or Object form.
73
+
74
+ 3. Grant of Patent License. Subject to the terms and conditions of
75
+ this License, each Contributor hereby grants to You a perpetual,
76
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
77
+ (except as stated in this section) patent license to make, have made,
78
+ use, offer to sell, sell, import, and otherwise transfer the Work,
79
+ where such license applies only to those patent claims licensable
80
+ by such Contributor that are necessarily infringed by their
81
+ Contribution(s) alone or by combination of their Contribution(s)
82
+ with the Work to which such Contribution(s) was submitted. If You
83
+ institute patent litigation against any entity (including a
84
+ cross-claim or counterclaim in a lawsuit) alleging that the Work
85
+ or a Contribution incorporated within the Work constitutes direct
86
+ or contributory patent infringement, then any patent licenses
87
+ granted to You under this License for that Work shall terminate
88
+ as of the date such litigation is filed.
89
+
90
+ 4. Redistribution. You may reproduce and distribute copies of the
91
+ Work or Derivative Works thereof in any medium, with or without
92
+ modifications, and in Source or Object form, provided that You
93
+ meet the following conditions:
94
+
95
+ (a) You must give any other recipients of the Work or
96
+ Derivative Works a copy of this License; and
97
+
98
+ (b) You must cause any modified files to carry prominent notices
99
+ stating that You changed the files; and
100
+
101
+ (c) You must retain, in the Source form of any Derivative Works
102
+ that You distribute, all copyright, patent, trademark, and
103
+ attribution notices from the Source form of the Work,
104
+ excluding those notices that do not pertain to any part of
105
+ the Derivative Works; and
106
+
107
+ (d) If the Work includes a "NOTICE" text file as part of its
108
+ distribution, then any Derivative Works that You distribute must
109
+ include a readable copy of the attribution notices contained
110
+ within such NOTICE file, excluding those notices that do not
111
+ pertain to any part of the Derivative Works, in at least one
112
+ of the following places: within a NOTICE text file distributed
113
+ as part of the Derivative Works; within the Source form or
114
+ documentation, if provided along with the Derivative Works; or,
115
+ within a display generated by the Derivative Works, if and
116
+ wherever such third-party notices normally appear. The contents
117
+ of the NOTICE file are for informational purposes only and
118
+ do not modify the License. You may add Your own attribution
119
+ notices within Derivative Works that You distribute, alongside
120
+ or as an addendum to the NOTICE text from the Work, provided
121
+ that such additional attribution notices cannot be construed
122
+ as modifying the License.
123
+
124
+ You may add Your own copyright statement to Your modifications and
125
+ may provide additional or different license terms and conditions
126
+ for use, reproduction, or distribution of Your modifications, or
127
+ for any such Derivative Works as a whole, provided Your use,
128
+ reproduction, and distribution of the Work otherwise complies with
129
+ the conditions stated in this License.
130
+
131
+ 5. Submission of Contributions. Unless You explicitly state otherwise,
132
+ any Contribution intentionally submitted for inclusion in the Work
133
+ by You to the Licensor shall be under the terms and conditions of
134
+ this License, without any additional terms or conditions.
135
+ Notwithstanding the above, nothing herein shall supersede or modify
136
+ the terms of any separate license agreement you may have executed
137
+ with Licensor regarding such Contributions.
138
+
139
+ 6. Trademarks. This License does not grant permission to use the trade
140
+ names, trademarks, service marks, or product names of the Licensor,
141
+ except as required for reasonable and customary use in describing the
142
+ origin of the Work and reproducing the content of the NOTICE file.
143
+
144
+ 7. Disclaimer of Warranty. Unless required by applicable law or
145
+ agreed to in writing, Licensor provides the Work (and each
146
+ Contributor provides its Contributions) on an "AS IS" BASIS,
147
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
148
+ implied, including, without limitation, any warranties or conditions
149
+ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
150
+ PARTICULAR PURPOSE. You are solely responsible for determining the
151
+ appropriateness of using or redistributing the Work and assume any
152
+ risks associated with Your exercise of permissions under this License.
153
+
154
+ 8. Limitation of Liability. In no event and under no legal theory,
155
+ whether in tort (including negligence), contract, or otherwise,
156
+ unless required by applicable law (such as deliberate and grossly
157
+ negligent acts) or agreed to in writing, shall any Contributor be
158
+ liable to You for damages, including any direct, indirect, special,
159
+ incidental, or consequential damages of any character arising as a
160
+ result of this License or out of the use or inability to use the
161
+ Work (including but not limited to damages for loss of goodwill,
162
+ work stoppage, computer failure or malfunction, or any and all
163
+ other commercial damages or losses), even if such Contributor
164
+ has been advised of the possibility of such damages.
165
+
166
+ 9. Accepting Warranty or Additional Liability. While redistributing
167
+ the Work or Derivative Works thereof, You may choose to offer,
168
+ and charge a fee for, acceptance of support, warranty, indemnity,
169
+ or other liability obligations and/or rights consistent with this
170
+ License. However, in accepting such obligations, You may act only
171
+ on Your own behalf and on Your sole responsibility, not on behalf
172
+ of any other Contributor, and only if You agree to indemnify,
173
+ defend, and hold each Contributor harmless for any liability
174
+ incurred by, or claims asserted against, such Contributor by reason
175
+ of your accepting any such warranty or additional liability.
176
+
177
+ END OF TERMS AND CONDITIONS
178
+
179
+ APPENDIX: How to apply the Apache License to your work.
180
+
181
+ To apply the Apache License to your work, attach the following
182
+ boilerplate notice, with the fields enclosed by brackets "[]"
183
+ replaced with your own identifying information. (Don't include
184
+ the brackets!) The text should be enclosed in the appropriate
185
+ comment syntax for the file format. We also recommend that a
186
+ file or class name and description of purpose be included on the
187
+ same "printed page" as the copyright notice for easier
188
+ identification within third-party archives.
189
+
190
+ Copyright (c) 2022 WATRIX
191
+ Author: Dong Xu
192
+
193
+ Licensed under the Apache License, Version 2.0 (the "License");
194
+ you may not use this file except in compliance with the License.
195
+ You may obtain a copy of the License at
196
+
197
+ http://www.apache.org/licenses/LICENSE-2.0
198
+
199
+ Unless required by applicable law or agreed to in writing, software
200
+ distributed under the License is distributed on an "AS IS" BASIS,
201
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
202
+ See the License for the specific language governing permissions and
203
+ limitations under the License.
opencv_zoo/models/license_plate_detection_yunet/README.md ADDED
@@ -0,0 +1,30 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # License Plate Detection with YuNet
2
+
3
+ This model is contributed by Dong Xu (徐栋) from [watrix.ai](watrix.ai) (银河水滴).
4
+
5
+ Please note that the model is trained with Chinese license plates, so the detection results of other license plates with this model may be limited.
6
+
7
+ ## Demo
8
+
9
+ Run the following command to try the demo:
10
+
11
+ ```shell
12
+ # detect on camera input
13
+ python demo.py
14
+ # detect on an image
15
+ python demo.py --input /path/to/image -v
16
+ # get help regarding various parameters
17
+ python demo.py --help
18
+ ```
19
+
20
+ ### Example outputs
21
+
22
+ ![lpd](./example_outputs/lpd_yunet_demo.gif)
23
+
24
+ ## License
25
+
26
+ All files in this directory are licensed under [Apache 2.0 License](./LICENSE)
27
+
28
+ ## Reference
29
+
30
+ - https://github.com/ShiqiYu/libfacedetection.train
opencv_zoo/models/license_plate_detection_yunet/__pycache__/lpd_yunet.cpython-311.pyc ADDED
Binary file (8.64 kB). View file
 
opencv_zoo/models/license_plate_detection_yunet/demo.py ADDED
@@ -0,0 +1,129 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import argparse
2
+
3
+ import numpy as np
4
+ import cv2 as cv
5
+
6
+ from lpd_yunet import LPD_YuNet
7
+
8
+ # Check OpenCV version
9
+ assert cv.__version__ >= "4.8.0", \
10
+ "Please install latest opencv-python to try this demo: python3 -m pip install --upgrade opencv-python"
11
+
12
+ # Valid combinations of backends and targets
13
+ backend_target_pairs = [
14
+ [cv.dnn.DNN_BACKEND_OPENCV, cv.dnn.DNN_TARGET_CPU],
15
+ [cv.dnn.DNN_BACKEND_CUDA, cv.dnn.DNN_TARGET_CUDA],
16
+ [cv.dnn.DNN_BACKEND_CUDA, cv.dnn.DNN_TARGET_CUDA_FP16],
17
+ [cv.dnn.DNN_BACKEND_TIMVX, cv.dnn.DNN_TARGET_NPU],
18
+ [cv.dnn.DNN_BACKEND_CANN, cv.dnn.DNN_TARGET_NPU]
19
+ ]
20
+
21
+ parser = argparse.ArgumentParser(description='LPD-YuNet for License Plate Detection')
22
+ parser.add_argument('--input', '-i', type=str,
23
+ help='Usage: Set path to the input image. Omit for using default camera.')
24
+ parser.add_argument('--model', '-m', type=str, default='license_plate_detection_lpd_yunet_2023mar.onnx',
25
+ help='Usage: Set model path, defaults to license_plate_detection_lpd_yunet_2023mar.onnx.')
26
+ parser.add_argument('--backend_target', '-bt', type=int, default=0,
27
+ help='''Choose one of the backend-target pair to run this demo:
28
+ {:d}: (default) OpenCV implementation + CPU,
29
+ {:d}: CUDA + GPU (CUDA),
30
+ {:d}: CUDA + GPU (CUDA FP16),
31
+ {:d}: TIM-VX + NPU,
32
+ {:d}: CANN + NPU
33
+ '''.format(*[x for x in range(len(backend_target_pairs))]))
34
+ parser.add_argument('--conf_threshold', type=float, default=0.9,
35
+ help='Usage: Set the minimum needed confidence for the model to identify a license plate, defaults to 0.9. Smaller values may result in faster detection, but will limit accuracy. Filter out faces of confidence < conf_threshold.')
36
+ parser.add_argument('--nms_threshold', type=float, default=0.3,
37
+ help='Usage: Suppress bounding boxes of iou >= nms_threshold. Default = 0.3. Suppress bounding boxes of iou >= nms_threshold.')
38
+ parser.add_argument('--top_k', type=int, default=5000,
39
+ help='Usage: Keep top_k bounding boxes before NMS.')
40
+ parser.add_argument('--keep_top_k', type=int, default=750,
41
+ help='Usage: Keep keep_top_k bounding boxes after NMS.')
42
+ parser.add_argument('--save', '-s', action='store_true',
43
+ help='Usage: Specify to save file with results (i.e. bounding box, confidence level). Invalid in case of camera input.')
44
+ parser.add_argument('--vis', '-v', action='store_true',
45
+ help='Usage: Specify to open a new window to show results. Invalid in case of camera input.')
46
+ args = parser.parse_args()
47
+
48
+ def visualize(image, dets, line_color=(0, 255, 0), text_color=(0, 0, 255), fps=None):
49
+ output = image.copy()
50
+
51
+ if fps is not None:
52
+ cv.putText(output, 'FPS: {:.2f}'.format(fps), (0, 15), cv.FONT_HERSHEY_SIMPLEX, 0.5, text_color)
53
+
54
+ for det in dets:
55
+ bbox = det[:-1].astype(np.int32)
56
+ x1, y1, x2, y2, x3, y3, x4, y4 = bbox
57
+
58
+ # Draw the border of license plate
59
+ cv.line(output, (x1, y1), (x2, y2), line_color, 2)
60
+ cv.line(output, (x2, y2), (x3, y3), line_color, 2)
61
+ cv.line(output, (x3, y3), (x4, y4), line_color, 2)
62
+ cv.line(output, (x4, y4), (x1, y1), line_color, 2)
63
+
64
+ return output
65
+
66
+ if __name__ == '__main__':
67
+ backend_id = backend_target_pairs[args.backend_target][0]
68
+ target_id = backend_target_pairs[args.backend_target][1]
69
+
70
+ # Instantiate LPD-YuNet
71
+ model = LPD_YuNet(modelPath=args.model,
72
+ confThreshold=args.conf_threshold,
73
+ nmsThreshold=args.nms_threshold,
74
+ topK=args.top_k,
75
+ keepTopK=args.keep_top_k,
76
+ backendId=backend_id,
77
+ targetId=target_id)
78
+
79
+ # If input is an image
80
+ if args.input is not None:
81
+ image = cv.imread(args.input)
82
+ h, w, _ = image.shape
83
+
84
+ # Inference
85
+ model.setInputSize([w, h])
86
+ results = model.infer(image)
87
+
88
+ # Print results
89
+ print('{} license plates detected.'.format(results.shape[0]))
90
+
91
+ # Draw results on the input image
92
+ image = visualize(image, results)
93
+
94
+ # Save results if save is true
95
+ if args.save:
96
+ print('Resutls saved to result.jpg')
97
+ cv.imwrite('result.jpg', image)
98
+
99
+ # Visualize results in a new window
100
+ if args.vis:
101
+ cv.namedWindow(args.input, cv.WINDOW_AUTOSIZE)
102
+ cv.imshow(args.input, image)
103
+ cv.waitKey(0)
104
+ else: # Omit input to call default camera
105
+ deviceId = 0
106
+ cap = cv.VideoCapture(deviceId)
107
+ w = int(cap.get(cv.CAP_PROP_FRAME_WIDTH))
108
+ h = int(cap.get(cv.CAP_PROP_FRAME_HEIGHT))
109
+ model.setInputSize([w, h])
110
+
111
+ tm = cv.TickMeter()
112
+ while cv.waitKey(1) < 0:
113
+ hasFrame, frame = cap.read()
114
+ if not hasFrame:
115
+ print('No frames grabbed!')
116
+ break
117
+
118
+ # Inference
119
+ tm.start()
120
+ results = model.infer(frame) # results is a tuple
121
+ tm.stop()
122
+
123
+ # Draw results on the input image
124
+ frame = visualize(frame, results, fps=tm.getFPS())
125
+
126
+ # Visualize results in a new Window
127
+ cv.imshow('LPD-YuNet Demo', frame)
128
+
129
+ tm.reset()
opencv_zoo/models/license_plate_detection_yunet/example_outputs/lpd_yunet_demo.gif ADDED
opencv_zoo/models/license_plate_detection_yunet/example_outputs/result-1.jpg ADDED
opencv_zoo/models/license_plate_detection_yunet/example_outputs/result-2.jpg ADDED
opencv_zoo/models/license_plate_detection_yunet/example_outputs/result-3.jpg ADDED
opencv_zoo/models/license_plate_detection_yunet/example_outputs/result-4.jpg ADDED
opencv_zoo/models/license_plate_detection_yunet/license_plate_detection_lpd_yunet_2023mar.onnx ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:6d4978a7b6d25514d5e24811b82bfb511d166bdd8ca3b03aa63c1623d4d039c7
3
+ size 4146213
opencv_zoo/models/license_plate_detection_yunet/license_plate_detection_lpd_yunet_2023mar_int8.onnx ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:d67982a014fe93ad04612f565ed23ca010dcb0fd925d880ef0edf9cd7bdf931a
3
+ size 1087142
opencv_zoo/models/license_plate_detection_yunet/lpd_yunet.py ADDED
@@ -0,0 +1,133 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from itertools import product
2
+
3
+ import numpy as np
4
+ import cv2 as cv
5
+
6
+ class LPD_YuNet:
7
+ def __init__(self, modelPath, inputSize=[320, 240], confThreshold=0.8, nmsThreshold=0.3, topK=5000, keepTopK=750, backendId=0, targetId=0):
8
+ self.model_path = modelPath
9
+ self.input_size = np.array(inputSize)
10
+ self.confidence_threshold=confThreshold
11
+ self.nms_threshold = nmsThreshold
12
+ self.top_k = topK
13
+ self.keep_top_k = keepTopK
14
+ self.backend_id = backendId
15
+ self.target_id = targetId
16
+
17
+ self.output_names = ['loc', 'conf', 'iou']
18
+ self.min_sizes = [[10, 16, 24], [32, 48], [64, 96], [128, 192, 256]]
19
+ self.steps = [8, 16, 32, 64]
20
+ self.variance = [0.1, 0.2]
21
+
22
+ # load model
23
+ self.model = cv.dnn.readNet(self.model_path)
24
+ # generate anchors/priorboxes
25
+ self._priorGen()
26
+
27
+ @property
28
+ def name(self):
29
+ return self.__class__.__name__
30
+
31
+ def setBackendAndTarget(self, backendId, targetId):
32
+ self.backend_id = backendId
33
+ self.target_id = targetId
34
+ self.model.setPreferableBackend(self.backend_id)
35
+ self.model.setPreferableTarget(self.target_id)
36
+
37
+ def setInputSize(self, inputSize):
38
+ self.input_size = inputSize
39
+ # re-generate anchors/priorboxes
40
+ self._priorGen()
41
+
42
+ def _preprocess(self, image):
43
+ return cv.dnn.blobFromImage(image)
44
+
45
+ def infer(self, image):
46
+ assert image.shape[0] == self.input_size[1], '{} (height of input image) != {} (preset height)'.format(image.shape[0], self.input_size[1])
47
+ assert image.shape[1] == self.input_size[0], '{} (width of input image) != {} (preset width)'.format(image.shape[1], self.input_size[0])
48
+
49
+ # Preprocess
50
+ inputBlob = self._preprocess(image)
51
+
52
+ # Forward
53
+ self.model.setInput(inputBlob)
54
+ outputBlob = self.model.forward(self.output_names)
55
+
56
+ # Postprocess
57
+ results = self._postprocess(outputBlob)
58
+
59
+ return results
60
+
61
+ def _postprocess(self, blob):
62
+ # Decode
63
+ dets = self._decode(blob)
64
+
65
+ # NMS
66
+ keepIdx = cv.dnn.NMSBoxes(
67
+ bboxes=dets[:, 0:4].tolist(),
68
+ scores=dets[:, -1].tolist(),
69
+ score_threshold=self.confidence_threshold,
70
+ nms_threshold=self.nms_threshold,
71
+ top_k=self.top_k
72
+ ) # box_num x class_num
73
+ if len(keepIdx) > 0:
74
+ dets = dets[keepIdx]
75
+ return dets[:self.keep_top_k]
76
+ else:
77
+ return np.empty(shape=(0, 9))
78
+
79
+ def _priorGen(self):
80
+ w, h = self.input_size
81
+ feature_map_2th = [int(int((h + 1) / 2) / 2),
82
+ int(int((w + 1) / 2) / 2)]
83
+ feature_map_3th = [int(feature_map_2th[0] / 2),
84
+ int(feature_map_2th[1] / 2)]
85
+ feature_map_4th = [int(feature_map_3th[0] / 2),
86
+ int(feature_map_3th[1] / 2)]
87
+ feature_map_5th = [int(feature_map_4th[0] / 2),
88
+ int(feature_map_4th[1] / 2)]
89
+ feature_map_6th = [int(feature_map_5th[0] / 2),
90
+ int(feature_map_5th[1] / 2)]
91
+
92
+ feature_maps = [feature_map_3th, feature_map_4th,
93
+ feature_map_5th, feature_map_6th]
94
+
95
+ priors = []
96
+ for k, f in enumerate(feature_maps):
97
+ min_sizes = self.min_sizes[k]
98
+ for i, j in product(range(f[0]), range(f[1])): # i->h, j->w
99
+ for min_size in min_sizes:
100
+ s_kx = min_size / w
101
+ s_ky = min_size / h
102
+
103
+ cx = (j + 0.5) * self.steps[k] / w
104
+ cy = (i + 0.5) * self.steps[k] / h
105
+
106
+ priors.append([cx, cy, s_kx, s_ky])
107
+ self.priors = np.array(priors, dtype=np.float32)
108
+
109
+ def _decode(self, blob):
110
+ loc, conf, iou = blob
111
+ # get score
112
+ cls_scores = conf[:, 1]
113
+ iou_scores = iou[:, 0]
114
+ # clamp
115
+ _idx = np.where(iou_scores < 0.)
116
+ iou_scores[_idx] = 0.
117
+ _idx = np.where(iou_scores > 1.)
118
+ iou_scores[_idx] = 1.
119
+ scores = np.sqrt(cls_scores * iou_scores)
120
+ scores = scores[:, np.newaxis]
121
+
122
+ scale = self.input_size
123
+
124
+ # get four corner points for bounding box
125
+ bboxes = np.hstack((
126
+ (self.priors[:, 0:2] + loc[:, 4: 6] * self.variance[0] * self.priors[:, 2:4]) * scale,
127
+ (self.priors[:, 0:2] + loc[:, 6: 8] * self.variance[0] * self.priors[:, 2:4]) * scale,
128
+ (self.priors[:, 0:2] + loc[:, 10:12] * self.variance[0] * self.priors[:, 2:4]) * scale,
129
+ (self.priors[:, 0:2] + loc[:, 12:14] * self.variance[0] * self.priors[:, 2:4]) * scale
130
+ ))
131
+
132
+ dets = np.hstack((bboxes, scores))
133
+ return dets
opencv_zoo/models/license_plate_detection_yunet/result.jpg ADDED
requirements.txt ADDED
@@ -0,0 +1,2 @@
 
 
 
1
+ opencv-python-headless
2
+ numpy