Spaces:
Runtime error
Runtime error
Upload folder using huggingface_hub
Browse filesThis view is limited to 50 files because it contains too many changes.
See raw diff
- .gitattributes +6 -35
- .gitignore +1 -0
- EulerZ.py +51 -0
- README.md +128 -7
- README_CN.md +102 -0
- __pycache__/EulerZ.cpython-38.pyc +0 -0
- __pycache__/error.cpython-38.pyc +0 -0
- __pycache__/face_judgement_align.cpython-38.pyc +0 -0
- __pycache__/imageTransform.cpython-38.pyc +0 -0
- __pycache__/layoutCreate.cpython-38.pyc +0 -0
- __pycache__/move_image.cpython-38.pyc +0 -0
- app.py +216 -0
- beautyPlugin/GrindSkin.py +43 -0
- beautyPlugin/MakeBeautiful.py +45 -0
- beautyPlugin/MakeWhiter.py +108 -0
- beautyPlugin/ThinFace.py +267 -0
- beautyPlugin/__init__.py +4 -0
- beautyPlugin/__pycache__/GrindSkin.cpython-310.pyc +0 -0
- beautyPlugin/__pycache__/GrindSkin.cpython-38.pyc +0 -0
- beautyPlugin/__pycache__/MakeBeautiful.cpython-310.pyc +0 -0
- beautyPlugin/__pycache__/MakeBeautiful.cpython-38.pyc +0 -0
- beautyPlugin/__pycache__/MakeWhiter.cpython-310.pyc +0 -0
- beautyPlugin/__pycache__/MakeWhiter.cpython-38.pyc +0 -0
- beautyPlugin/__pycache__/ThinFace.cpython-310.pyc +0 -0
- beautyPlugin/__pycache__/ThinFace.cpython-38.pyc +0 -0
- beautyPlugin/__pycache__/__init__.cpython-310.pyc +0 -0
- beautyPlugin/__pycache__/__init__.cpython-38.pyc +0 -0
- beautyPlugin/lut_image/1.png +0 -0
- beautyPlugin/lut_image/3.png +0 -0
- beautyPlugin/lut_image/lutOrigin.png +0 -0
- cuny_tools.py +621 -0
- error.py +27 -0
- face_judgement_align.py +578 -0
- hivision_modnet.onnx +3 -0
- hivision_modnet.onnx.1 +3 -0
- hivisionai/__init__.py +0 -0
- hivisionai/__pycache__/__init__.cpython-310.pyc +0 -0
- hivisionai/__pycache__/__init__.cpython-38.pyc +0 -0
- hivisionai/__pycache__/app.cpython-310.pyc +0 -0
- hivisionai/app.py +452 -0
- hivisionai/hyService/__init__.py +0 -0
- hivisionai/hyService/__pycache__/__init__.cpython-310.pyc +0 -0
- hivisionai/hyService/__pycache__/__init__.cpython-38.pyc +0 -0
- hivisionai/hyService/__pycache__/cloudService.cpython-310.pyc +0 -0
- hivisionai/hyService/__pycache__/dbTools.cpython-310.pyc +0 -0
- hivisionai/hyService/__pycache__/error.cpython-310.pyc +0 -0
- hivisionai/hyService/__pycache__/error.cpython-38.pyc +0 -0
- hivisionai/hyService/__pycache__/serviceTest.cpython-310.pyc +0 -0
- hivisionai/hyService/__pycache__/utils.cpython-310.pyc +0 -0
- hivisionai/hyService/cloudService.py +406 -0
.gitattributes
CHANGED
@@ -1,35 +1,6 @@
|
|
1 |
-
|
2 |
-
|
3 |
-
|
4 |
-
|
5 |
-
|
6 |
-
|
7 |
-
*.gz filter=lfs diff=lfs merge=lfs -text
|
8 |
-
*.h5 filter=lfs diff=lfs merge=lfs -text
|
9 |
-
*.joblib filter=lfs diff=lfs merge=lfs -text
|
10 |
-
*.lfs.* filter=lfs diff=lfs merge=lfs -text
|
11 |
-
*.mlmodel filter=lfs diff=lfs merge=lfs -text
|
12 |
-
*.model filter=lfs diff=lfs merge=lfs -text
|
13 |
-
*.msgpack filter=lfs diff=lfs merge=lfs -text
|
14 |
-
*.npy filter=lfs diff=lfs merge=lfs -text
|
15 |
-
*.npz filter=lfs diff=lfs merge=lfs -text
|
16 |
-
*.onnx filter=lfs diff=lfs merge=lfs -text
|
17 |
-
*.ot filter=lfs diff=lfs merge=lfs -text
|
18 |
-
*.parquet filter=lfs diff=lfs merge=lfs -text
|
19 |
-
*.pb filter=lfs diff=lfs merge=lfs -text
|
20 |
-
*.pickle filter=lfs diff=lfs merge=lfs -text
|
21 |
-
*.pkl filter=lfs diff=lfs merge=lfs -text
|
22 |
-
*.pt filter=lfs diff=lfs merge=lfs -text
|
23 |
-
*.pth filter=lfs diff=lfs merge=lfs -text
|
24 |
-
*.rar filter=lfs diff=lfs merge=lfs -text
|
25 |
-
*.safetensors filter=lfs diff=lfs merge=lfs -text
|
26 |
-
saved_model/**/* filter=lfs diff=lfs merge=lfs -text
|
27 |
-
*.tar.* filter=lfs diff=lfs merge=lfs -text
|
28 |
-
*.tar filter=lfs diff=lfs merge=lfs -text
|
29 |
-
*.tflite filter=lfs diff=lfs merge=lfs -text
|
30 |
-
*.tgz filter=lfs diff=lfs merge=lfs -text
|
31 |
-
*.wasm filter=lfs diff=lfs merge=lfs -text
|
32 |
-
*.xz filter=lfs diff=lfs merge=lfs -text
|
33 |
-
*.zip filter=lfs diff=lfs merge=lfs -text
|
34 |
-
*.zst filter=lfs diff=lfs merge=lfs -text
|
35 |
-
*tfevents* filter=lfs diff=lfs merge=lfs -text
|
|
|
1 |
+
hivision_modnet.onnx filter=lfs diff=lfs merge=lfs -text
|
2 |
+
hivision_modnet.onnx.1 filter=lfs diff=lfs merge=lfs -text
|
3 |
+
hivisionai/hycv/mtcnn_onnx/weights/onet.onnx filter=lfs diff=lfs merge=lfs -text
|
4 |
+
hivisionai/hycv/mtcnn_onnx/weights/pnet.onnx filter=lfs diff=lfs merge=lfs -text
|
5 |
+
hivisionai/hycv/mtcnn_onnx/weights/rnet.onnx filter=lfs diff=lfs merge=lfs -text
|
6 |
+
sources/demoImage.png filter=lfs diff=lfs merge=lfs -text
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
.gitignore
ADDED
@@ -0,0 +1 @@
|
|
|
|
|
1 |
+
*.pyc
|
EulerZ.py
ADDED
@@ -0,0 +1,51 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
"""
|
2 |
+
@author: cuny
|
3 |
+
@file: EulerX.py
|
4 |
+
@time: 2022/4/1 13:54
|
5 |
+
@description:
|
6 |
+
寻找三维z轴旋转角roll,实现:
|
7 |
+
1. 输入一张三通道图片(四通道、单通道将默认转为三通道)
|
8 |
+
2. 输出人脸在x轴的转角roll,顺时针为正方向,角度制
|
9 |
+
"""
|
10 |
+
import cv2
|
11 |
+
import numpy as np
|
12 |
+
from math import asin, pi # -pi/2 ~ pi/2
|
13 |
+
|
14 |
+
|
15 |
+
# 获得人脸的关键点信息
|
16 |
+
def get_facePoints(src: np.ndarray, fd68):
|
17 |
+
if len(src.shape) == 2:
|
18 |
+
src = cv2.cvtColor(src, cv2.COLOR_GRAY2BGR)
|
19 |
+
elif src.shape[2] == 4:
|
20 |
+
src = cv2.cvtColor(src, cv2.COLOR_BGRA2BGR)
|
21 |
+
status, dets, landmarks, _ = fd68.facePointsEuler(src)
|
22 |
+
|
23 |
+
if status == 0:
|
24 |
+
return 0, None, None
|
25 |
+
elif status == 2:
|
26 |
+
return 2, None, None
|
27 |
+
else:
|
28 |
+
return 1, dets, np.fliplr(landmarks)
|
29 |
+
|
30 |
+
|
31 |
+
def eulerZ(landmark: np.matrix):
|
32 |
+
# 我们规定顺时针为正方向
|
33 |
+
def get_pi_2(r):
|
34 |
+
pi_2 = pi / 2.
|
35 |
+
if r >= 0.0:
|
36 |
+
return pi_2
|
37 |
+
else:
|
38 |
+
return -pi_2
|
39 |
+
orbit_points = np.array([[landmark[21, 0], landmark[21, 1]], [landmark[71, 0], landmark[71, 1]],
|
40 |
+
[landmark[25, 0], landmark[25, 1]], [landmark[67, 0], landmark[67, 1]]])
|
41 |
+
# [[cos a],[sin a],[point_x],[point_y]]
|
42 |
+
# 前面两项是有关直线与Y正半轴夹角a的三角函数,所以对于眼睛部分来讲sin a应该接近1
|
43 |
+
# "我可以认为"cv2.fitLine的y轴正方向为竖直向下,且生成的拟合直线的方向为从起点指向终点
|
44 |
+
# 与y轴的夹角为y轴夹角与直线方向的夹角,方向从y指向直线,逆时针为正方向
|
45 |
+
# 所以最后对于鼻梁的计算结果需要取个负号
|
46 |
+
orbit_line = cv2.fitLine(orbit_points, cv2.DIST_L2, 0, 0.01, 0.01)
|
47 |
+
orbit_a = asin(orbit_line[1][0])
|
48 |
+
nose_points = np.array([[landmark[55, 0], landmark[55, 1]], [landmark[69, 0], landmark[69, 1]]])
|
49 |
+
nose_line = cv2.fitLine(nose_points, cv2.DIST_L2, 0, 0.01, 0.01)
|
50 |
+
nose_a = asin(nose_line[1][0])
|
51 |
+
return (orbit_a + nose_a) * (180.0 / (2 * pi))
|
README.md
CHANGED
@@ -1,12 +1,133 @@
|
|
1 |
---
|
2 |
title: HivisionIDPhotos
|
3 |
-
emoji: ⚡
|
4 |
-
colorFrom: yellow
|
5 |
-
colorTo: red
|
6 |
-
sdk: gradio
|
7 |
-
sdk_version: 3.42.0
|
8 |
app_file: app.py
|
9 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
10 |
---
|
11 |
|
12 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
---
|
2 |
title: HivisionIDPhotos
|
|
|
|
|
|
|
|
|
|
|
3 |
app_file: app.py
|
4 |
+
sdk: gradio
|
5 |
+
sdk_version: 3.40.1
|
6 |
+
---
|
7 |
+
<div align="center">
|
8 |
+
<h1><img src="https://linimages.oss-cn-beijing.aliyuncs.com/hivision_photo_logo.png" width=80>HivisionIDPhoto</h1>
|
9 |
+
|
10 |
+
|
11 |
+
English / [中文](README_CN.md)
|
12 |
+
|
13 |
+
[![GitHub](https://img.shields.io/static/v1?label=Github&message=GitHub&color=black)](https://github.com/xiaolin199912/HivisionIDPhotos)
|
14 |
+
[![SwanHub Demo](https://swanhub.co/git/repo/SwanHub%2FAuto-README/file/preview?ref=main&path=swanhub.svg)](https://swanhub.co/ZeYiLin/HivisionIDPhotos/demo)
|
15 |
+
[![zhihu](https://img.shields.io/static/v1?label=知乎&message=zhihu&color=blue)](https://zhuanlan.zhihu.com/p/638254028)
|
16 |
+
|
17 |
+
<img src="./sources/demoImage.png" width=900>
|
18 |
+
|
19 |
+
</div>
|
20 |
+
|
21 |
+
|
22 |
+
# 🤩Project Update
|
23 |
+
|
24 |
+
- Online Demo: [![SwanHub Demo](https://swanhub.co/git/repo/SwanHub%2FAuto-README/file/preview?ref=main&path=swanhub.svg)](https://swanhub.co/ZeYiLin/HivisionIDPhotos/demo)
|
25 |
+
- 2023.7.15: A Python library is planned for release!
|
26 |
+
- 2023.6.20: Updated **Forecast Size Selection**
|
27 |
+
- 2023.6.19: Updated **Layout photo**
|
28 |
+
- 2023.6.13: Updated **center gradient color**
|
29 |
+
- 2023.6.11: Updated **top and bottom gradient color**
|
30 |
+
- 2023.6.8: Updated **custom size**
|
31 |
+
- 2023.6.4: Updated **custom background color and face detection bug notification**
|
32 |
+
- 2023.5.10: Updated **change background without changing size**
|
33 |
+
<br>
|
34 |
+
|
35 |
+
|
36 |
+
# Overview
|
37 |
+
|
38 |
+
> 🚀Thank you for your interest in our work. You may also want to check out our other achievements in the field of image processing. Please feel free to contact us at zeyi.lin@swanhub.co.
|
39 |
+
|
40 |
+
HivisionIDPhoto aims to develop a practical intelligent algorithm for producing ID photos. It uses a complete set of model workflows to recognize various user photo scenarios, perform image segmentation, and generate ID photos.
|
41 |
+
|
42 |
+
**HivisionIDPhoto can:**
|
43 |
+
|
44 |
+
1. Perform lightweight image segmentation
|
45 |
+
2. Generate standard ID photos and six-inch layout photos according to different size specifications
|
46 |
+
3. Provide beauty features (waiting)
|
47 |
+
4. Provide intelligent formal wear replacement (waiting)
|
48 |
+
|
49 |
+
<div align="center">
|
50 |
+
<img src="sources/gradio-image.jpeg" width=900>
|
51 |
+
</div>
|
52 |
+
|
53 |
+
|
54 |
---
|
55 |
|
56 |
+
If HivisionIDPhoto is helpful to you, please star this repo or recommend it to your friends to solve the problem of emergency ID photo production!
|
57 |
+
|
58 |
+
|
59 |
+
# 🔧Environment Dependencies and Installation
|
60 |
+
|
61 |
+
- Python >= 3.7 (Recommend to use [Anaconda](https://www.anaconda.com/download/#linux) or [Miniconda](https://docs.conda.io/en/latest/miniconda.html))
|
62 |
+
- onnxruntime
|
63 |
+
- OpenCV
|
64 |
+
- Option: Linux, Windows, MacOS
|
65 |
+
|
66 |
+
### Installation
|
67 |
+
|
68 |
+
1. Clone repo
|
69 |
+
|
70 |
+
```bash
|
71 |
+
git lfs install && git clone https://swanhub.co/ZeYiLin/HivisionIDPhotos.git
|
72 |
+
cd HivisionIDPhotos
|
73 |
+
```
|
74 |
+
|
75 |
+
2. Install dependent packages
|
76 |
+
|
77 |
+
```
|
78 |
+
pip install numpy
|
79 |
+
pip install opencv-python
|
80 |
+
pip install onnxruntime
|
81 |
+
pip install gradio
|
82 |
+
```
|
83 |
+
|
84 |
+
|
85 |
+
|
86 |
+
# ⚡️Quick Inference
|
87 |
+
|
88 |
+
### 1. Download Pre-trained Models
|
89 |
+
|
90 |
+
**SwanHub:**
|
91 |
+
|
92 |
+
The model and code are downloaded through git-lfs.
|
93 |
+
|
94 |
+
```
|
95 |
+
git lfs install
|
96 |
+
git clone https://swanhub.co/ZeYiLin/HivisionIDPhotos.git
|
97 |
+
```
|
98 |
+
|
99 |
+
**GitHub:**
|
100 |
+
|
101 |
+
```
|
102 |
+
git clone https://github.com/xiaolin199912/HivisionIDPhotos.git
|
103 |
+
```
|
104 |
+
|
105 |
+
| Model | Parameters | Dir | Download Link |
|
106 |
+
| :------------------: | :--------: | :-------: | :----------------------------------------------------------: |
|
107 |
+
| hivision_modnet.onnx | 25 M | `./` | [Download](https://github.com/xiaolin199912/HivisionIDPhotos/releases/download/pretrained-model/hivision_modnet.onnx) |
|
108 |
+
|
109 |
+
### **2. Inference!**
|
110 |
+
|
111 |
+
Run a Gradio Demo:
|
112 |
+
|
113 |
+
```
|
114 |
+
python app.py
|
115 |
+
```
|
116 |
+
|
117 |
+
Running the program will generate a local web page where you can complete ID photo operations and interactions.
|
118 |
+
|
119 |
+
|
120 |
+
# Reference Projects
|
121 |
+
|
122 |
+
1. MTCNN: https://github.com/ipazc/mtcnn
|
123 |
+
2. MTCNN-ONNX:https://swanhub.co/ZeYiLin/MTCNN-ONNX
|
124 |
+
3. ModNet: https://github.com/ZHKKKe/MODNet
|
125 |
+
|
126 |
+
|
127 |
+
# 📧Contact
|
128 |
+
|
129 |
+
If you have any questions, please email Zeyi.lin@swanhub.co
|
130 |
+
|
131 |
+
|
132 |
+
Copyright © 2023, ZeYiLin. All Rights Reserved.
|
133 |
+
|
README_CN.md
ADDED
@@ -0,0 +1,102 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
<div align="center">
|
2 |
+
<h1><img src="https://linimages.oss-cn-beijing.aliyuncs.com/hivision_photo_logo.png" width=80>HivisionIDPhoto</h1>
|
3 |
+
|
4 |
+
[English](README.md) / 中文
|
5 |
+
|
6 |
+
[![GitHub](https://img.shields.io/static/v1?label=GitHub&message=GitHub&color=black)](https://github.com/xiaolin199912/HivisionIDPhotos)
|
7 |
+
[![SwanHub Demo](https://img.shields.io/static/v1?label=在线体验&message=SwanHub%20Demo&color=blue)](https://swanhub.co/ZeYiLin/HivisionIDPhotos/demo)
|
8 |
+
[![zhihu](https://img.shields.io/static/v1?label=知乎&message=知乎&color=blue)](https://zhuanlan.zhihu.com/p/638254028)
|
9 |
+
|
10 |
+
<img src="sources/demoImage.png" width=900>
|
11 |
+
</div>
|
12 |
+
|
13 |
+
|
14 |
+
# 🤩项目更新
|
15 |
+
- 在线体验: [![SwanHub Demo](https://img.shields.io/static/v1?label=Demo&message=SwanHub%20Demo&color=blue)](https://swanhub.co/ZeYiLin/HivisionIDPhotos/demo)
|
16 |
+
- 2023.6.20: 更新**预设尺寸菜单**
|
17 |
+
- 2023.6.19: 更新**排版照**
|
18 |
+
- 2023.6.13: 更新**中心渐变色**
|
19 |
+
- 2023.6.11: 更新**上下渐变色**
|
20 |
+
- 2023.6.8: 更新**自定义尺寸**
|
21 |
+
- 2023.6.4: 更新**自定义底色、人脸检测Bug通知**
|
22 |
+
- 2023.5.10: 更新**不改尺寸只换底**
|
23 |
+
|
24 |
+
# Overview
|
25 |
+
|
26 |
+
> 🚀谢谢你对我们的工作感兴趣。您可能还想查看我们在图像领域的其他成果,欢迎来信:zeyi.lin@swanhub.co.
|
27 |
+
|
28 |
+
HivisionIDPhoto旨在开发一种实用的证件照智能制作算法。
|
29 |
+
|
30 |
+
它利用一套完善的模型工作流程,实现对多种用户拍照场景的识别、抠图与证件照生成。
|
31 |
+
|
32 |
+
|
33 |
+
**HivisionIDPhoto可以做到:**
|
34 |
+
|
35 |
+
1. 轻量级抠图
|
36 |
+
2. 根据不同尺寸规格生成不同的标准证件照、六寸排版照
|
37 |
+
3. 美颜(waiting)
|
38 |
+
4. 智能换正装(waiting)
|
39 |
+
|
40 |
+
<div align="center">
|
41 |
+
<img src="sources/gradio-image.jpeg" width=900>
|
42 |
+
</div>
|
43 |
+
|
44 |
+
|
45 |
+
---
|
46 |
+
|
47 |
+
如果HivisionIDPhoto对你有帮助,请star这个repo或推荐给你的朋友,解决证件照应急制作问题!
|
48 |
+
|
49 |
+
|
50 |
+
# 🔧环境安装与依赖
|
51 |
+
|
52 |
+
- Python >= 3.7 (Recommend to use [Anaconda](https://www.anaconda.com/download/#linux) or [Miniconda](https://docs.conda.io/en/latest/miniconda.html))
|
53 |
+
- onnxruntime
|
54 |
+
- OpenCV
|
55 |
+
- Option: Linux, Windows, MacOS
|
56 |
+
|
57 |
+
### Installation
|
58 |
+
|
59 |
+
1. Clone repo
|
60 |
+
|
61 |
+
```bash
|
62 |
+
git lfs install && git clone https://swanhub.co/ZeYiLin/HivisionIDPhotos.git
|
63 |
+
cd HivisionIDPhotos
|
64 |
+
```
|
65 |
+
|
66 |
+
2. Install dependent packages
|
67 |
+
|
68 |
+
```
|
69 |
+
pip install numpy
|
70 |
+
pip install opencv-python
|
71 |
+
pip install onnxruntime
|
72 |
+
pip install gradio
|
73 |
+
```
|
74 |
+
|
75 |
+
|
76 |
+
|
77 |
+
# ⚡️快速推理
|
78 |
+
|
79 |
+
模型与代码通过git-lfs下载。
|
80 |
+
|
81 |
+
```
|
82 |
+
git lfs install
|
83 |
+
git clone https://swanhub.co/ZeYiLin/HivisionIDPhotos.git
|
84 |
+
```
|
85 |
+
|
86 |
+
**推理!**
|
87 |
+
|
88 |
+
```
|
89 |
+
python app.py
|
90 |
+
```
|
91 |
+
|
92 |
+
运行程序将生成一个本地Web页面,在页面中可完成证件照的操作与交互。
|
93 |
+
|
94 |
+
|
95 |
+
# 引用项目
|
96 |
+
|
97 |
+
1. MTCNN: https://github.com/ipazc/mtcnn
|
98 |
+
2. ModNet: https://github.com/ZHKKKe/MODNet
|
99 |
+
|
100 |
+
# 📧联系我们
|
101 |
+
|
102 |
+
如果您有任何问题,请发邮件至 zeyi.lin@swanhub.co
|
__pycache__/EulerZ.cpython-38.pyc
ADDED
Binary file (1.73 kB). View file
|
|
__pycache__/error.cpython-38.pyc
ADDED
Binary file (1.01 kB). View file
|
|
__pycache__/face_judgement_align.cpython-38.pyc
ADDED
Binary file (15 kB). View file
|
|
__pycache__/imageTransform.cpython-38.pyc
ADDED
Binary file (6.69 kB). View file
|
|
__pycache__/layoutCreate.cpython-38.pyc
ADDED
Binary file (2.56 kB). View file
|
|
__pycache__/move_image.cpython-38.pyc
ADDED
Binary file (3.28 kB). View file
|
|
app.py
ADDED
@@ -0,0 +1,216 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import cv2
|
2 |
+
import gradio as gr
|
3 |
+
import onnxruntime
|
4 |
+
from face_judgement_align import IDphotos_create
|
5 |
+
from hivisionai.hycv.vision import add_background
|
6 |
+
from layoutCreate import generate_layout_photo, generate_layout_image
|
7 |
+
import pathlib
|
8 |
+
import numpy as np
|
9 |
+
|
10 |
+
size_list_dict = {"一寸": (413, 295), "二寸": (626, 413),
|
11 |
+
"教师资格证": (413, 295), "国家公务员考试": (413, 295), "初级会计考试": (413, 295)}
|
12 |
+
color_list_dict = {"蓝色": (86, 140, 212), "白色": (255, 255, 255), "红色": (233, 51, 35)}
|
13 |
+
|
14 |
+
|
15 |
+
# 设置Gradio examples
|
16 |
+
def set_example_image(example: list) -> dict:
|
17 |
+
return gr.Image.update(value=example[0])
|
18 |
+
|
19 |
+
|
20 |
+
# 检测RGB是否超出范围,如果超出则约束到0~255之间
|
21 |
+
def range_check(value, min_value=0, max_value=255):
|
22 |
+
value = int(value)
|
23 |
+
if value <= min_value:
|
24 |
+
value = min_value
|
25 |
+
elif value > max_value:
|
26 |
+
value = max_value
|
27 |
+
return value
|
28 |
+
|
29 |
+
|
30 |
+
def idphoto_inference(input_image,
|
31 |
+
mode_option,
|
32 |
+
size_list_option,
|
33 |
+
color_option,
|
34 |
+
render_option,
|
35 |
+
custom_color_R,
|
36 |
+
custom_color_G,
|
37 |
+
custom_color_B,
|
38 |
+
custom_size_height,
|
39 |
+
custom_size_width,
|
40 |
+
head_measure_ratio=0.2,
|
41 |
+
head_height_ratio=0.45,
|
42 |
+
top_distance_max=0.12,
|
43 |
+
top_distance_min=0.10):
|
44 |
+
idphoto_json = {
|
45 |
+
"size_mode": mode_option,
|
46 |
+
"color_mode": color_option,
|
47 |
+
"render_mode": render_option,
|
48 |
+
}
|
49 |
+
|
50 |
+
# 如果尺寸模式选择的是尺寸列表
|
51 |
+
if idphoto_json["size_mode"] == "尺寸列表":
|
52 |
+
idphoto_json["size"] = size_list_dict[size_list_option]
|
53 |
+
# 如果尺寸模式选择的是自定义尺寸
|
54 |
+
elif idphoto_json["size_mode"] == "自定义尺寸":
|
55 |
+
id_height = int(custom_size_height)
|
56 |
+
id_width = int(custom_size_width)
|
57 |
+
if id_height < id_width or min(id_height, id_width) < 100 or max(id_height, id_width) > 1800:
|
58 |
+
return {
|
59 |
+
img_output_standard: gr.update(value=None),
|
60 |
+
img_output_standard_hd: gr.update(value=None),
|
61 |
+
notification: gr.update(value="宽度应不大于长度;长宽不应小于100,大于1800", visible=True)}
|
62 |
+
idphoto_json["size"] = (id_height, id_width)
|
63 |
+
else:
|
64 |
+
idphoto_json["size"] = (None, None)
|
65 |
+
|
66 |
+
# 如果颜色模式选择的是自定义底色
|
67 |
+
if idphoto_json["color_mode"] == "自定义底色":
|
68 |
+
idphoto_json["color_bgr"] = (range_check(custom_color_R),
|
69 |
+
range_check(custom_color_G),
|
70 |
+
range_check(custom_color_B))
|
71 |
+
else:
|
72 |
+
idphoto_json["color_bgr"] = color_list_dict[color_option]
|
73 |
+
|
74 |
+
result_image_hd, result_image_standard, typography_arr, typography_rotate, \
|
75 |
+
_, _, _, _, status = IDphotos_create(input_image,
|
76 |
+
mode=idphoto_json["size_mode"],
|
77 |
+
size=idphoto_json["size"],
|
78 |
+
head_measure_ratio=head_measure_ratio,
|
79 |
+
head_height_ratio=head_height_ratio,
|
80 |
+
align=False,
|
81 |
+
beauty=False,
|
82 |
+
fd68=None,
|
83 |
+
human_sess=sess,
|
84 |
+
IS_DEBUG=False,
|
85 |
+
top_distance_max=top_distance_max,
|
86 |
+
top_distance_min=top_distance_min)
|
87 |
+
|
88 |
+
# 如果检测到人脸数量不等于1
|
89 |
+
if status == 0:
|
90 |
+
result_messgae = {
|
91 |
+
img_output_standard: gr.update(value=None),
|
92 |
+
img_output_standard_hd: gr.update(value=None),
|
93 |
+
notification: gr.update(value="人脸数量不等于1", visible=True)
|
94 |
+
}
|
95 |
+
|
96 |
+
# 如果检测到人脸数量等于1
|
97 |
+
else:
|
98 |
+
if idphoto_json["render_mode"] == "纯色":
|
99 |
+
result_image_standard = np.uint8(
|
100 |
+
add_background(result_image_standard, bgr=idphoto_json["color_bgr"]))
|
101 |
+
result_image_hd = np.uint8(add_background(result_image_hd, bgr=idphoto_json["color_bgr"]))
|
102 |
+
elif idphoto_json["render_mode"] == "上下渐变(白)":
|
103 |
+
result_image_standard = np.uint8(
|
104 |
+
add_background(result_image_standard, bgr=idphoto_json["color_bgr"], mode="updown_gradient"))
|
105 |
+
result_image_hd = np.uint8(
|
106 |
+
add_background(result_image_hd, bgr=idphoto_json["color_bgr"], mode="updown_gradient"))
|
107 |
+
else:
|
108 |
+
result_image_standard = np.uint8(
|
109 |
+
add_background(result_image_standard, bgr=idphoto_json["color_bgr"], mode="center_gradient"))
|
110 |
+
result_image_hd = np.uint8(
|
111 |
+
add_background(result_image_hd, bgr=idphoto_json["color_bgr"], mode="center_gradient"))
|
112 |
+
|
113 |
+
if idphoto_json["size_mode"] == "只换底":
|
114 |
+
result_layout_image = gr.update(visible=False)
|
115 |
+
else:
|
116 |
+
typography_arr, typography_rotate = generate_layout_photo(input_height=idphoto_json["size"][0],
|
117 |
+
input_width=idphoto_json["size"][1])
|
118 |
+
|
119 |
+
result_layout_image = generate_layout_image(result_image_standard, typography_arr,
|
120 |
+
typography_rotate,
|
121 |
+
height=idphoto_json["size"][0],
|
122 |
+
width=idphoto_json["size"][1])
|
123 |
+
|
124 |
+
result_messgae = {
|
125 |
+
img_output_standard: result_image_standard,
|
126 |
+
img_output_standard_hd: result_image_hd,
|
127 |
+
img_output_layout: result_layout_image,
|
128 |
+
notification: gr.update(visible=False)}
|
129 |
+
|
130 |
+
return result_messgae
|
131 |
+
|
132 |
+
|
133 |
+
if __name__ == "__main__":
|
134 |
+
HY_HUMAN_MATTING_WEIGHTS_PATH = "./hivision_modnet.onnx"
|
135 |
+
sess = onnxruntime.InferenceSession(HY_HUMAN_MATTING_WEIGHTS_PATH)
|
136 |
+
size_mode = ["尺寸列表", "只换底", "自定义尺寸"]
|
137 |
+
size_list = ["一寸", "二寸", "教师资格证", "国家公务员考试", "初级会计考试"]
|
138 |
+
colors = ["蓝色", "白色", "红色", "自定义底色"]
|
139 |
+
render = ["纯色", "上下渐变(白)", "中心渐变(白)"]
|
140 |
+
|
141 |
+
title = "<h1 id='title'>HivisionIDPhotos</h1>"
|
142 |
+
description = "<h3>😎6.20更新:新增尺寸选择列表</h3>"
|
143 |
+
css = '''
|
144 |
+
h1#title, h3 {
|
145 |
+
text-align: center;
|
146 |
+
}
|
147 |
+
'''
|
148 |
+
|
149 |
+
demo = gr.Blocks(css=css)
|
150 |
+
|
151 |
+
with demo:
|
152 |
+
gr.Markdown(title)
|
153 |
+
gr.Markdown(description)
|
154 |
+
with gr.Row():
|
155 |
+
with gr.Column():
|
156 |
+
img_input = gr.Image().style(height=350)
|
157 |
+
mode_options = gr.Radio(choices=size_mode, label="证件照尺寸选项", value="尺寸列表", elem_id="size")
|
158 |
+
# 预设尺寸下拉菜单
|
159 |
+
with gr.Row(visible=True) as size_list_row:
|
160 |
+
size_list_options = gr.Dropdown(choices=size_list, label="预设尺寸", value="一寸", elem_id="size_list")
|
161 |
+
|
162 |
+
with gr.Row(visible=False) as custom_size:
|
163 |
+
custom_size_height = gr.Number(value=413, label="height", interactive=True)
|
164 |
+
custom_size_wdith = gr.Number(value=295, label="width", interactive=True)
|
165 |
+
|
166 |
+
color_options = gr.Radio(choices=colors, label="背景色", value="蓝色", elem_id="color")
|
167 |
+
with gr.Row(visible=False) as custom_color:
|
168 |
+
custom_color_R = gr.Number(value=0, label="R", interactive=True)
|
169 |
+
custom_color_G = gr.Number(value=0, label="G", interactive=True)
|
170 |
+
custom_color_B = gr.Number(value=0, label="B", interactive=True)
|
171 |
+
|
172 |
+
render_options = gr.Radio(choices=render, label="渲染方式", value="纯色", elem_id="render")
|
173 |
+
|
174 |
+
img_but = gr.Button('开始制作')
|
175 |
+
# 案例图片
|
176 |
+
example_images = gr.Dataset(components=[img_input],
|
177 |
+
samples=[[path.as_posix()]
|
178 |
+
for path in sorted(pathlib.Path('images').rglob('*.jpg'))])
|
179 |
+
|
180 |
+
with gr.Column():
|
181 |
+
notification = gr.Text(label="状态", visible=False)
|
182 |
+
with gr.Row():
|
183 |
+
img_output_standard = gr.Image(label="标准照").style(height=350)
|
184 |
+
img_output_standard_hd = gr.Image(label="高清照").style(height=350)
|
185 |
+
img_output_layout = gr.Image(label="六寸排版照").style(height=350)
|
186 |
+
|
187 |
+
|
188 |
+
def change_color(colors):
|
189 |
+
if colors == "自定义底色":
|
190 |
+
return {custom_color: gr.update(visible=True)}
|
191 |
+
else:
|
192 |
+
return {custom_color: gr.update(visible=False)}
|
193 |
+
|
194 |
+
def change_size_mode(size_option_item):
|
195 |
+
if size_option_item == "自定义尺寸":
|
196 |
+
return {custom_size: gr.update(visible=True),
|
197 |
+
size_list_row: gr.update(visible=False)}
|
198 |
+
elif size_option_item == "只换底":
|
199 |
+
return {custom_size: gr.update(visible=False),
|
200 |
+
size_list_row: gr.update(visible=False)}
|
201 |
+
else:
|
202 |
+
return {custom_size: gr.update(visible=False),
|
203 |
+
size_list_row: gr.update(visible=True)}
|
204 |
+
|
205 |
+
color_options.input(change_color, inputs=[color_options], outputs=[custom_color])
|
206 |
+
mode_options.input(change_size_mode, inputs=[mode_options], outputs=[custom_size, size_list_row])
|
207 |
+
|
208 |
+
img_but.click(idphoto_inference,
|
209 |
+
inputs=[img_input, mode_options, size_list_options, color_options, render_options,
|
210 |
+
custom_color_R, custom_color_G, custom_color_B,
|
211 |
+
custom_size_height, custom_size_wdith],
|
212 |
+
outputs=[img_output_standard, img_output_standard_hd, img_output_layout, notification],
|
213 |
+
queue=True)
|
214 |
+
example_images.click(fn=set_example_image, inputs=[example_images], outputs=[img_input])
|
215 |
+
|
216 |
+
demo.launch(enable_queue=True)
|
beautyPlugin/GrindSkin.py
ADDED
@@ -0,0 +1,43 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
"""
|
2 |
+
@author: cuny
|
3 |
+
@file: GrindSkin.py
|
4 |
+
@time: 2022/7/2 14:44
|
5 |
+
@description:
|
6 |
+
磨皮算法
|
7 |
+
"""
|
8 |
+
import cv2
|
9 |
+
import numpy as np
|
10 |
+
|
11 |
+
|
12 |
+
def grindSkin(src, grindDegree: int = 3, detailDegree: int = 1, strength: int = 9):
|
13 |
+
"""
|
14 |
+
Dest =(Src * (100 - Opacity) + (Src + 2 * GaussBlur(EPFFilter(Src) - Src)) * Opacity) /100
|
15 |
+
人像磨皮方案,后续会考虑使用一些皮肤区域检测算法来实现仅皮肤区域磨皮,增加算法的精细程度——或者使用人脸关键点
|
16 |
+
https://www.cnblogs.com/Imageshop/p/4709710.html
|
17 |
+
Args:
|
18 |
+
src: 原图
|
19 |
+
grindDegree: 磨皮程度调节参数
|
20 |
+
detailDegree: 细节程度调节参数
|
21 |
+
strength: 融合程度,作为磨皮强度(0 - 10)
|
22 |
+
|
23 |
+
Returns:
|
24 |
+
磨皮后的图像
|
25 |
+
"""
|
26 |
+
if strength <= 0:
|
27 |
+
return src
|
28 |
+
dst = src.copy()
|
29 |
+
opacity = min(10., strength) / 10.
|
30 |
+
dx = grindDegree * 5 # 双边滤波参数之一
|
31 |
+
fc = grindDegree * 12.5 # 双边滤波参数之一
|
32 |
+
temp1 = cv2.bilateralFilter(src[:, :, :3], dx, fc, fc)
|
33 |
+
temp2 = cv2.subtract(temp1, src[:, :, :3])
|
34 |
+
temp3 = cv2.GaussianBlur(temp2, (2 * detailDegree - 1, 2 * detailDegree - 1), 0)
|
35 |
+
temp4 = cv2.add(cv2.add(temp3, temp3), src[:, :, :3])
|
36 |
+
dst[:, :, :3] = cv2.addWeighted(temp4, opacity, src[:, :, :3], 1 - opacity, 0.0)
|
37 |
+
return dst
|
38 |
+
|
39 |
+
|
40 |
+
if __name__ == "__main__":
|
41 |
+
input_image = cv2.imread("test_image/7.jpg")
|
42 |
+
output_image = grindSkin(src=input_image)
|
43 |
+
cv2.imwrite("grindSkinCompare.png", np.hstack((input_image, output_image)))
|
beautyPlugin/MakeBeautiful.py
ADDED
@@ -0,0 +1,45 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
"""
|
2 |
+
@author: cuny
|
3 |
+
@file: MakeBeautiful.py
|
4 |
+
@time: 2022/7/7 20:23
|
5 |
+
@description:
|
6 |
+
美颜工具集合文件,作为暴露在外的插件接口
|
7 |
+
"""
|
8 |
+
from .GrindSkin import grindSkin
|
9 |
+
from .MakeWhiter import MakeWhiter
|
10 |
+
from .ThinFace import thinFace
|
11 |
+
import numpy as np
|
12 |
+
|
13 |
+
|
14 |
+
def makeBeautiful(input_image: np.ndarray,
|
15 |
+
landmark,
|
16 |
+
thinStrength: int,
|
17 |
+
thinPlace: int,
|
18 |
+
grindStrength: int,
|
19 |
+
whiterStrength: int
|
20 |
+
) -> np.ndarray:
|
21 |
+
"""
|
22 |
+
美颜工具的接口函数,用于实现美颜效果
|
23 |
+
Args:
|
24 |
+
input_image: 输入的图像
|
25 |
+
landmark: 瘦脸需要的人脸关键点信息,为fd68返回的第二个参数
|
26 |
+
thinStrength: 瘦脸强度,为0-10(如果更高其实也没什么问题),当强度为0或者更低时,则不瘦脸
|
27 |
+
thinPlace: 选择瘦脸区域,为0-2之间的值,越大瘦脸的点越靠下
|
28 |
+
grindStrength: 磨皮强度,为0-10(如果更高其实也没什么问题),当强度为0或者更低时,则不磨皮
|
29 |
+
whiterStrength: 美白强度,为0-10(如果更高其实也没什么问题),当强度为0或者更低时,则不美白
|
30 |
+
Returns:
|
31 |
+
output_image 输出图像
|
32 |
+
"""
|
33 |
+
try:
|
34 |
+
_, _, _ = input_image.shape
|
35 |
+
except ValueError:
|
36 |
+
raise TypeError("输入图像必须为3通道或者4通道!")
|
37 |
+
# 三通道或者四通道图像
|
38 |
+
# 首先进行瘦脸
|
39 |
+
input_image = thinFace(input_image, landmark, place=thinPlace, strength=thinStrength)
|
40 |
+
# 其次进行磨皮
|
41 |
+
input_image = grindSkin(src=input_image, strength=grindStrength)
|
42 |
+
# 最后进行美白
|
43 |
+
makeWhiter = MakeWhiter()
|
44 |
+
input_image = makeWhiter.run(input_image, strength=whiterStrength)
|
45 |
+
return input_image
|
beautyPlugin/MakeWhiter.py
ADDED
@@ -0,0 +1,108 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
"""
|
2 |
+
@author: cuny
|
3 |
+
@file: MakeWhiter.py
|
4 |
+
@time: 2022/7/2 14:28
|
5 |
+
@description:
|
6 |
+
美白算法
|
7 |
+
"""
|
8 |
+
import os
|
9 |
+
import cv2
|
10 |
+
import math
|
11 |
+
import numpy as np
|
12 |
+
local_path = os.path.dirname(__file__)
|
13 |
+
|
14 |
+
|
15 |
+
class MakeWhiter(object):
|
16 |
+
class __LutWhite:
|
17 |
+
"""
|
18 |
+
美白的内部类
|
19 |
+
"""
|
20 |
+
|
21 |
+
def __init__(self, lut):
|
22 |
+
cube64rows = 8
|
23 |
+
cube64size = 64
|
24 |
+
cube256size = 256
|
25 |
+
cubeScale = int(cube256size / cube64size) # 4
|
26 |
+
|
27 |
+
reshapeLut = np.zeros((cube256size, cube256size, cube256size, 3))
|
28 |
+
for i in range(cube64size):
|
29 |
+
tmp = math.floor(i / cube64rows)
|
30 |
+
cx = int((i - tmp * cube64rows) * cube64size)
|
31 |
+
cy = int(tmp * cube64size)
|
32 |
+
cube64 = lut[cy:cy + cube64size, cx:cx + cube64size] # cube64 in lut(512*512 (512=8*64))
|
33 |
+
_rows, _cols, _ = cube64.shape
|
34 |
+
if _rows == 0 or _cols == 0:
|
35 |
+
continue
|
36 |
+
cube256 = cv2.resize(cube64, (cube256size, cube256size))
|
37 |
+
i = i * cubeScale
|
38 |
+
for k in range(cubeScale):
|
39 |
+
reshapeLut[i + k] = cube256
|
40 |
+
self.lut = reshapeLut
|
41 |
+
|
42 |
+
def imageInLut(self, src):
|
43 |
+
arr = src.copy()
|
44 |
+
bs = arr[:, :, 0]
|
45 |
+
gs = arr[:, :, 1]
|
46 |
+
rs = arr[:, :, 2]
|
47 |
+
arr[:, :] = self.lut[bs, gs, rs]
|
48 |
+
return arr
|
49 |
+
|
50 |
+
def __init__(self, lutImage: np.ndarray = None):
|
51 |
+
self.__lutWhiten = None
|
52 |
+
if lutImage is not None:
|
53 |
+
self.__lutWhiten = self.__LutWhite(lutImage)
|
54 |
+
|
55 |
+
def setLut(self, lutImage: np.ndarray):
|
56 |
+
self.__lutWhiten = self.__LutWhite(lutImage)
|
57 |
+
|
58 |
+
@staticmethod
|
59 |
+
def generate_identify_color_matrix(size: int = 512, channel: int = 3) -> np.ndarray:
|
60 |
+
"""
|
61 |
+
用于生成一张初始的查找表
|
62 |
+
Args:
|
63 |
+
size: 查找表尺寸,默认为512
|
64 |
+
channel: 查找表通道数,默认为3
|
65 |
+
|
66 |
+
Returns:
|
67 |
+
返回生成的查找表图像
|
68 |
+
"""
|
69 |
+
img = np.zeros((size, size, channel), dtype=np.uint8)
|
70 |
+
for by in range(size // 64):
|
71 |
+
for bx in range(size // 64):
|
72 |
+
for g in range(64):
|
73 |
+
for r in range(64):
|
74 |
+
x = r + bx * 64
|
75 |
+
y = g + by * 64
|
76 |
+
img[y][x][0] = int(r * 255.0 / 63.0 + 0.5)
|
77 |
+
img[y][x][1] = int(g * 255.0 / 63.0 + 0.5)
|
78 |
+
img[y][x][2] = int((bx + by * 8.0) * 255.0 / 63.0 + 0.5)
|
79 |
+
return cv2.cvtColor(img, cv2.COLOR_RGB2BGR).clip(0, 255).astype('uint8')
|
80 |
+
|
81 |
+
def run(self, src: np.ndarray, strength: int) -> np.ndarray:
|
82 |
+
"""
|
83 |
+
美白图像
|
84 |
+
Args:
|
85 |
+
src: 原图
|
86 |
+
strength: 美白强度,0 - 10
|
87 |
+
Returns:
|
88 |
+
美白后的图像
|
89 |
+
"""
|
90 |
+
dst = src.copy()
|
91 |
+
strength = min(10, int(strength)) / 10.
|
92 |
+
if strength <= 0:
|
93 |
+
return dst
|
94 |
+
self.setLut(cv2.imread(f"{local_path}/lut_image/3.png", -1))
|
95 |
+
_, _, c = src.shape
|
96 |
+
img = self.__lutWhiten.imageInLut(src[:, :, :3])
|
97 |
+
dst[:, :, :3] = cv2.addWeighted(src[:, :, :3], 1 - strength, img, strength, 0)
|
98 |
+
return dst
|
99 |
+
|
100 |
+
|
101 |
+
if __name__ == "__main__":
|
102 |
+
# makeLut = MakeWhiter()
|
103 |
+
# cv2.imwrite("lutOrigin.png", makeLut.generate_identify_color_matrix())
|
104 |
+
input_image = cv2.imread("test_image/7.jpg", -1)
|
105 |
+
lut_image = cv2.imread("lut_image/3.png")
|
106 |
+
makeWhiter = MakeWhiter(lut_image)
|
107 |
+
output_image = makeWhiter.run(input_image, 10)
|
108 |
+
cv2.imwrite("makeWhiterCompare.png", np.hstack((input_image, output_image)))
|
beautyPlugin/ThinFace.py
ADDED
@@ -0,0 +1,267 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
"""
|
2 |
+
@author: cuny
|
3 |
+
@file: ThinFace.py
|
4 |
+
@time: 2022/7/2 15:50
|
5 |
+
@description:
|
6 |
+
瘦脸算法,用到了图像局部平移法
|
7 |
+
先使用人脸关键点检测,然后再使用图像局部平移法
|
8 |
+
需要注意的是,这部分不会包含dlib人脸关键点检测,因为考虑到模型载入的问题
|
9 |
+
"""
|
10 |
+
import cv2
|
11 |
+
import math
|
12 |
+
import numpy as np
|
13 |
+
|
14 |
+
|
15 |
+
class TranslationWarp(object):
|
16 |
+
"""
|
17 |
+
本类包含瘦脸算法,由于瘦脸算法包含了很多个版本,所以以类的方式呈现
|
18 |
+
前两个算法没什么好讲的,网上资料很多
|
19 |
+
第三个采用numpy内部的自定义函数处理,在处理速度上有一些提升
|
20 |
+
最后采用cv2.map算法,处理速度大幅度提升
|
21 |
+
"""
|
22 |
+
|
23 |
+
# 瘦脸
|
24 |
+
@staticmethod
|
25 |
+
def localTranslationWarp(srcImg, startX, startY, endX, endY, radius):
|
26 |
+
# 双线性插值法
|
27 |
+
def BilinearInsert(src, ux, uy):
|
28 |
+
w, h, c = src.shape
|
29 |
+
if c == 3:
|
30 |
+
x1 = int(ux)
|
31 |
+
x2 = x1 + 1
|
32 |
+
y1 = int(uy)
|
33 |
+
y2 = y1 + 1
|
34 |
+
part1 = src[y1, x1].astype(np.float64) * (float(x2) - ux) * (float(y2) - uy)
|
35 |
+
part2 = src[y1, x2].astype(np.float64) * (ux - float(x1)) * (float(y2) - uy)
|
36 |
+
part3 = src[y2, x1].astype(np.float64) * (float(x2) - ux) * (uy - float(y1))
|
37 |
+
part4 = src[y2, x2].astype(np.float64) * (ux - float(x1)) * (uy - float(y1))
|
38 |
+
insertValue = part1 + part2 + part3 + part4
|
39 |
+
return insertValue.astype(np.int8)
|
40 |
+
|
41 |
+
ddradius = float(radius * radius) # 圆的半径
|
42 |
+
copyImg = srcImg.copy() # copy后的图像矩阵
|
43 |
+
# 计算公式中的|m-c|^2
|
44 |
+
ddmc = (endX - startX) * (endX - startX) + (endY - startY) * (endY - startY)
|
45 |
+
H, W, C = srcImg.shape # 获取图像的形状
|
46 |
+
for i in range(W):
|
47 |
+
for j in range(H):
|
48 |
+
# # 计算该点是否在形变圆的范围之内
|
49 |
+
# # 优化,第一步,直接判断是会在(startX,startY)的矩阵框中
|
50 |
+
if math.fabs(i - startX) > radius and math.fabs(j - startY) > radius:
|
51 |
+
continue
|
52 |
+
distance = (i - startX) * (i - startX) + (j - startY) * (j - startY)
|
53 |
+
if distance < ddradius:
|
54 |
+
# 计算出(i,j)坐标的原坐标
|
55 |
+
# 计算公式中右边平方号里的部分
|
56 |
+
ratio = (ddradius - distance) / (ddradius - distance + ddmc)
|
57 |
+
ratio = ratio * ratio
|
58 |
+
# 映射原位置
|
59 |
+
UX = i - ratio * (endX - startX)
|
60 |
+
UY = j - ratio * (endY - startY)
|
61 |
+
|
62 |
+
# 根据双线性插值法得到UX,UY的值
|
63 |
+
# start_ = time.time()
|
64 |
+
value = BilinearInsert(srcImg, UX, UY)
|
65 |
+
# print(f"双线性插值耗时;{time.time() - start_}")
|
66 |
+
# 改变当前 i ,j的值
|
67 |
+
copyImg[j, i] = value
|
68 |
+
return copyImg
|
69 |
+
|
70 |
+
# 瘦脸pro1, 限制了for循环的遍历次数
|
71 |
+
@staticmethod
|
72 |
+
def localTranslationWarpLimitFor(srcImg, startP: np.matrix, endP: np.matrix, radius: float):
|
73 |
+
startX, startY = startP[0, 0], startP[0, 1]
|
74 |
+
endX, endY = endP[0, 0], endP[0, 1]
|
75 |
+
|
76 |
+
# 双线性插值法
|
77 |
+
def BilinearInsert(src, ux, uy):
|
78 |
+
w, h, c = src.shape
|
79 |
+
if c == 3:
|
80 |
+
x1 = int(ux)
|
81 |
+
x2 = x1 + 1
|
82 |
+
y1 = int(uy)
|
83 |
+
y2 = y1 + 1
|
84 |
+
part1 = src[y1, x1].astype(np.float64) * (float(x2) - ux) * (float(y2) - uy)
|
85 |
+
part2 = src[y1, x2].astype(np.float64) * (ux - float(x1)) * (float(y2) - uy)
|
86 |
+
part3 = src[y2, x1].astype(np.float64) * (float(x2) - ux) * (uy - float(y1))
|
87 |
+
part4 = src[y2, x2].astype(np.float64) * (ux - float(x1)) * (uy - float(y1))
|
88 |
+
insertValue = part1 + part2 + part3 + part4
|
89 |
+
return insertValue.astype(np.int8)
|
90 |
+
|
91 |
+
ddradius = float(radius * radius) # 圆的半径
|
92 |
+
copyImg = srcImg.copy() # copy后的图像矩阵
|
93 |
+
# 计算公式中的|m-c|^2
|
94 |
+
ddmc = (endX - startX) ** 2 + (endY - startY) ** 2
|
95 |
+
# 计算正方形的左上角起始点
|
96 |
+
startTX, startTY = (startX - math.floor(radius + 1), startY - math.floor((radius + 1)))
|
97 |
+
# 计算正方形的右下角的结束点
|
98 |
+
endTX, endTY = (startX + math.floor(radius + 1), startY + math.floor((radius + 1)))
|
99 |
+
# 剪切srcImg
|
100 |
+
srcImg = srcImg[startTY: endTY + 1, startTX: endTX + 1, :]
|
101 |
+
# db.cv_show(srcImg)
|
102 |
+
# 裁剪后的图像相当于在x,y都减少了startX - math.floor(radius + 1)
|
103 |
+
# 原本的endX, endY在切后的坐标点
|
104 |
+
endX, endY = (endX - startX + math.floor(radius + 1), endY - startY + math.floor(radius + 1))
|
105 |
+
# 原本的startX, startY剪切后的坐标点
|
106 |
+
startX, startY = (math.floor(radius + 1), math.floor(radius + 1))
|
107 |
+
H, W, C = srcImg.shape # 获取图像的形状
|
108 |
+
for i in range(W):
|
109 |
+
for j in range(H):
|
110 |
+
# 计算该点是否在形变圆的范围之内
|
111 |
+
# 优化,第一步,直接判断是会在(startX,startY)的矩阵框中
|
112 |
+
# if math.fabs(i - startX) > radius and math.fabs(j - startY) > radius:
|
113 |
+
# continue
|
114 |
+
distance = (i - startX) * (i - startX) + (j - startY) * (j - startY)
|
115 |
+
if distance < ddradius:
|
116 |
+
# 计算出(i,j)坐标的原坐标
|
117 |
+
# 计算公式中右边平方号里的部分
|
118 |
+
ratio = (ddradius - distance) / (ddradius - distance + ddmc)
|
119 |
+
ratio = ratio * ratio
|
120 |
+
# 映射原位置
|
121 |
+
UX = i - ratio * (endX - startX)
|
122 |
+
UY = j - ratio * (endY - startY)
|
123 |
+
|
124 |
+
# 根据双线性插值法得到UX,UY的值
|
125 |
+
# start_ = time.time()
|
126 |
+
value = BilinearInsert(srcImg, UX, UY)
|
127 |
+
# print(f"双线性插值耗时;{time.time() - start_}")
|
128 |
+
# 改变当前 i ,j的值
|
129 |
+
copyImg[j + startTY, i + startTX] = value
|
130 |
+
return copyImg
|
131 |
+
|
132 |
+
# # 瘦脸pro2,采用了numpy自定义函数做处理
|
133 |
+
# def localTranslationWarpNumpy(self, srcImg, startP: np.matrix, endP: np.matrix, radius: float):
|
134 |
+
# startX , startY = startP[0, 0], startP[0, 1]
|
135 |
+
# endX, endY = endP[0, 0], endP[0, 1]
|
136 |
+
# ddradius = float(radius * radius) # 圆的半径
|
137 |
+
# copyImg = srcImg.copy() # copy后的图像矩阵
|
138 |
+
# # 计算公式中的|m-c|^2
|
139 |
+
# ddmc = (endX - startX)**2 + (endY - startY)**2
|
140 |
+
# # 计算正方形的左上角起始点
|
141 |
+
# startTX, startTY = (startX - math.floor(radius + 1), startY - math.floor((radius + 1)))
|
142 |
+
# # 计算正方形的右下角的结束点
|
143 |
+
# endTX, endTY = (startX + math.floor(radius + 1), startY + math.floor((radius + 1)))
|
144 |
+
# # 剪切srcImg
|
145 |
+
# self.thinImage = srcImg[startTY : endTY + 1, startTX : endTX + 1, :]
|
146 |
+
# # s = self.thinImage
|
147 |
+
# # db.cv_show(srcImg)
|
148 |
+
# # 裁剪后的图像相当于在x,y都减少了startX - math.floor(radius + 1)
|
149 |
+
# # 原本的endX, endY在切后的坐标点
|
150 |
+
# endX, endY = (endX - startX + math.floor(radius + 1), endY - startY + math.floor(radius + 1))
|
151 |
+
# # 原本的startX, startY剪切后的坐标点
|
152 |
+
# startX ,startY = (math.floor(radius + 1), math.floor(radius + 1))
|
153 |
+
# H, W, C = self.thinImage.shape # 获取图像的形状
|
154 |
+
# index_m = np.arange(H * W).reshape((H, W))
|
155 |
+
# triangle_ufunc = np.frompyfunc(self.process, 9, 3)
|
156 |
+
# # start_ = time.time()
|
157 |
+
# finalImgB, finalImgG, finalImgR = triangle_ufunc(index_m, self, W, ddradius, ddmc, startX, startY, endX, endY)
|
158 |
+
# finaleImg = np.dstack((finalImgB, finalImgG, finalImgR)).astype(np.uint8)
|
159 |
+
# finaleImg = np.fliplr(np.rot90(finaleImg, -1))
|
160 |
+
# copyImg[startTY: endTY + 1, startTX: endTX + 1, :] = finaleImg
|
161 |
+
# # print(f"图像处理耗时;{time.time() - start_}")
|
162 |
+
# # db.cv_show(copyImg)
|
163 |
+
# return copyImg
|
164 |
+
|
165 |
+
# 瘦脸pro3,采用opencv内置函数
|
166 |
+
@staticmethod
|
167 |
+
def localTranslationWarpFastWithStrength(srcImg, startP: np.matrix, endP: np.matrix, radius, strength: float = 100.):
|
168 |
+
"""
|
169 |
+
采用opencv内置函数
|
170 |
+
Args:
|
171 |
+
srcImg: 源图像
|
172 |
+
startP: 起点位置
|
173 |
+
endP: 终点位置
|
174 |
+
radius: 处理半径
|
175 |
+
strength: 瘦脸强度,一般取100以上
|
176 |
+
|
177 |
+
Returns:
|
178 |
+
|
179 |
+
"""
|
180 |
+
startX, startY = startP[0, 0], startP[0, 1]
|
181 |
+
endX, endY = endP[0, 0], endP[0, 1]
|
182 |
+
ddradius = float(radius * radius)
|
183 |
+
# copyImg = np.zeros(srcImg.shape, np.uint8)
|
184 |
+
# copyImg = srcImg.copy()
|
185 |
+
|
186 |
+
maskImg = np.zeros(srcImg.shape[:2], np.uint8)
|
187 |
+
cv2.circle(maskImg, (startX, startY), math.ceil(radius), (255, 255, 255), -1)
|
188 |
+
|
189 |
+
K0 = 100 / strength
|
190 |
+
|
191 |
+
# 计算公式中的|m-c|^2
|
192 |
+
ddmc_x = (endX - startX) * (endX - startX)
|
193 |
+
ddmc_y = (endY - startY) * (endY - startY)
|
194 |
+
H, W, C = srcImg.shape
|
195 |
+
|
196 |
+
mapX = np.vstack([np.arange(W).astype(np.float32).reshape(1, -1)] * H)
|
197 |
+
mapY = np.hstack([np.arange(H).astype(np.float32).reshape(-1, 1)] * W)
|
198 |
+
|
199 |
+
distance_x = (mapX - startX) * (mapX - startX)
|
200 |
+
distance_y = (mapY - startY) * (mapY - startY)
|
201 |
+
distance = distance_x + distance_y
|
202 |
+
K1 = np.sqrt(distance)
|
203 |
+
ratio_x = (ddradius - distance_x) / (ddradius - distance_x + K0 * ddmc_x)
|
204 |
+
ratio_y = (ddradius - distance_y) / (ddradius - distance_y + K0 * ddmc_y)
|
205 |
+
ratio_x = ratio_x * ratio_x
|
206 |
+
ratio_y = ratio_y * ratio_y
|
207 |
+
|
208 |
+
UX = mapX - ratio_x * (endX - startX) * (1 - K1 / radius)
|
209 |
+
UY = mapY - ratio_y * (endY - startY) * (1 - K1 / radius)
|
210 |
+
|
211 |
+
np.copyto(UX, mapX, where=maskImg == 0)
|
212 |
+
np.copyto(UY, mapY, where=maskImg == 0)
|
213 |
+
UX = UX.astype(np.float32)
|
214 |
+
UY = UY.astype(np.float32)
|
215 |
+
copyImg = cv2.remap(srcImg, UX, UY, interpolation=cv2.INTER_LINEAR)
|
216 |
+
return copyImg
|
217 |
+
|
218 |
+
|
219 |
+
def thinFace(src, landmark, place: int = 0, strength=30.):
|
220 |
+
"""
|
221 |
+
瘦脸程序接口,输入人脸关键点信息和强度,即可实现瘦脸
|
222 |
+
注意处理四通道图像
|
223 |
+
Args:
|
224 |
+
src: 原图
|
225 |
+
landmark: 关键点信息
|
226 |
+
place: 选择瘦脸区域,为0-4之间的值
|
227 |
+
strength: 瘦脸强度,输入值在0-10之间,如果小于或者等于0,则不瘦脸
|
228 |
+
|
229 |
+
Returns:
|
230 |
+
瘦脸后的图像
|
231 |
+
"""
|
232 |
+
strength = min(100., strength * 10.)
|
233 |
+
if strength <= 0.:
|
234 |
+
return src
|
235 |
+
# 也可以设置瘦脸区域
|
236 |
+
place = max(0, min(4, int(place)))
|
237 |
+
left_landmark = landmark[4 + place]
|
238 |
+
left_landmark_down = landmark[6 + place]
|
239 |
+
right_landmark = landmark[13 + place]
|
240 |
+
right_landmark_down = landmark[15 + place]
|
241 |
+
endPt = landmark[58]
|
242 |
+
# 计算第4个点到第6个点的距离作为瘦脸距离
|
243 |
+
r_left = math.sqrt(
|
244 |
+
(left_landmark[0, 0] - left_landmark_down[0, 0]) ** 2 +
|
245 |
+
(left_landmark[0, 1] - left_landmark_down[0, 1]) ** 2
|
246 |
+
)
|
247 |
+
|
248 |
+
# 计算第14个点到第16个点的距离作为瘦脸距离
|
249 |
+
r_right = math.sqrt((right_landmark[0, 0] - right_landmark_down[0, 0]) ** 2 +
|
250 |
+
(right_landmark[0, 1] - right_landmark_down[0, 1]) ** 2)
|
251 |
+
# 瘦左边脸
|
252 |
+
thin_image = TranslationWarp.localTranslationWarpFastWithStrength(src, left_landmark[0], endPt[0], r_left, strength)
|
253 |
+
# 瘦右边脸
|
254 |
+
thin_image = TranslationWarp.localTranslationWarpFastWithStrength(thin_image, right_landmark[0], endPt[0], r_right, strength)
|
255 |
+
return thin_image
|
256 |
+
|
257 |
+
|
258 |
+
if __name__ == "__main__":
|
259 |
+
import os
|
260 |
+
from hycv.FaceDetection68.faceDetection68 import FaceDetection68
|
261 |
+
local_file = os.path.dirname(__file__)
|
262 |
+
PREDICTOR_PATH = f"{local_file}/weights/shape_predictor_68_face_landmarks.dat" # 关键点检测模型路径
|
263 |
+
fd68 = FaceDetection68(model_path=PREDICTOR_PATH)
|
264 |
+
input_image = cv2.imread("test_image/4.jpg", -1)
|
265 |
+
_, landmark_, _ = fd68.facePoints(input_image)
|
266 |
+
output_image = thinFace(input_image, landmark_, strength=30.2)
|
267 |
+
cv2.imwrite("thinFaceCompare.png", np.hstack((input_image, output_image)))
|
beautyPlugin/__init__.py
ADDED
@@ -0,0 +1,4 @@
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from .MakeBeautiful import makeBeautiful
|
2 |
+
|
3 |
+
|
4 |
+
|
beautyPlugin/__pycache__/GrindSkin.cpython-310.pyc
ADDED
Binary file (1.74 kB). View file
|
|
beautyPlugin/__pycache__/GrindSkin.cpython-38.pyc
ADDED
Binary file (1.69 kB). View file
|
|
beautyPlugin/__pycache__/MakeBeautiful.cpython-310.pyc
ADDED
Binary file (1.76 kB). View file
|
|
beautyPlugin/__pycache__/MakeBeautiful.cpython-38.pyc
ADDED
Binary file (1.71 kB). View file
|
|
beautyPlugin/__pycache__/MakeWhiter.cpython-310.pyc
ADDED
Binary file (3.87 kB). View file
|
|
beautyPlugin/__pycache__/MakeWhiter.cpython-38.pyc
ADDED
Binary file (3.8 kB). View file
|
|
beautyPlugin/__pycache__/ThinFace.cpython-310.pyc
ADDED
Binary file (6.24 kB). View file
|
|
beautyPlugin/__pycache__/ThinFace.cpython-38.pyc
ADDED
Binary file (6.48 kB). View file
|
|
beautyPlugin/__pycache__/__init__.cpython-310.pyc
ADDED
Binary file (234 Bytes). View file
|
|
beautyPlugin/__pycache__/__init__.cpython-38.pyc
ADDED
Binary file (195 Bytes). View file
|
|
beautyPlugin/lut_image/1.png
ADDED
beautyPlugin/lut_image/3.png
ADDED
beautyPlugin/lut_image/lutOrigin.png
ADDED
cuny_tools.py
ADDED
@@ -0,0 +1,621 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import cv2
|
2 |
+
import numpy as np
|
3 |
+
from hivisionai.hycv.utils import get_box_pro
|
4 |
+
from hivisionai.hycv.vision import cover_image, draw_picture_dots
|
5 |
+
from math import fabs, sin, radians, cos
|
6 |
+
|
7 |
+
def opencv_rotate(img, angle):
|
8 |
+
h, w = img.shape[:2]
|
9 |
+
center = (w / 2, h / 2)
|
10 |
+
scale = 1.0
|
11 |
+
# 2.1获取M矩阵
|
12 |
+
"""
|
13 |
+
M矩阵
|
14 |
+
[
|
15 |
+
cosA -sinA (1-cosA)*centerX+sinA*centerY
|
16 |
+
sinA cosA -sinA*centerX+(1-cosA)*centerY
|
17 |
+
]
|
18 |
+
"""
|
19 |
+
M = cv2.getRotationMatrix2D(center, angle, scale)
|
20 |
+
# 2.2 新的宽高,radians(angle) 把角度转为弧度 sin(弧度)
|
21 |
+
new_H = int(w * fabs(sin(radians(angle))) + h * fabs(cos(radians(angle))))
|
22 |
+
new_W = int(h * fabs(sin(radians(angle))) + w * fabs(cos(radians(angle))))
|
23 |
+
# 2.3 平移
|
24 |
+
M[0, 2] += (new_W - w) / 2
|
25 |
+
M[1, 2] += (new_H - h) / 2
|
26 |
+
rotate = cv2.warpAffine(img, M, (new_W, new_H), borderValue=(0, 0, 0))
|
27 |
+
return rotate
|
28 |
+
|
29 |
+
|
30 |
+
def transformationNeck2(image:np.ndarray, per_to_side:float=0.8)->np.ndarray:
|
31 |
+
"""
|
32 |
+
透视变换脖子函数,输入图像和四个点(矩形框)
|
33 |
+
矩形框内的图像可能是不完整的(边角有透明区域)
|
34 |
+
我们将根据透视变换将矩形框内的图像拉伸成和矩形框一样的形状.
|
35 |
+
算法分为几个步骤: 选择脖子的四个点 -> 选定这四个点拉伸后的坐标 -> 透视变换 -> 覆盖原图
|
36 |
+
"""
|
37 |
+
_, _, _, a = cv2.split(image) # 这应该是一个四通道的图像
|
38 |
+
height, width = a.shape
|
39 |
+
def locate_side(image_:np.ndarray, x_:int, y_max:int) -> int:
|
40 |
+
# 寻找x=y, 且 y <= y_max 上从下往上第一个非0的点,如果没找到就返回0
|
41 |
+
y_ = 0
|
42 |
+
for y_ in range(y_max - 1, -1, -1):
|
43 |
+
if image_[y_][x_] != 0:
|
44 |
+
break
|
45 |
+
return y_
|
46 |
+
def locate_width(image_:np.ndarray, y_:int, mode, left_or_right:int=None):
|
47 |
+
# 从y=y这个水平线上寻找两边的非零点
|
48 |
+
# 增加left_or_right的原因在于为下面check_jaw服务
|
49 |
+
if mode==1: # 左往右
|
50 |
+
x_ = 0
|
51 |
+
if left_or_right is None:
|
52 |
+
left_or_right = 0
|
53 |
+
for x_ in range(left_or_right, width):
|
54 |
+
if image_[y_][x_] != 0:
|
55 |
+
break
|
56 |
+
else: # 右往左
|
57 |
+
x_ = width
|
58 |
+
if left_or_right is None:
|
59 |
+
left_or_right = width - 1
|
60 |
+
for x_ in range(left_or_right, -1, -1):
|
61 |
+
if image_[y_][x_] != 0:
|
62 |
+
break
|
63 |
+
return x_
|
64 |
+
def check_jaw(image_:np.ndarray, left_, right_):
|
65 |
+
"""
|
66 |
+
检查选择的点是否与截到下巴,如果截到了,就往下平移一个单位
|
67 |
+
"""
|
68 |
+
f= True # True代表没截到下巴
|
69 |
+
# [x, y]
|
70 |
+
for x_cell in range(left_[0] + 1, right_[0]):
|
71 |
+
if image_[left_[1]][x_cell] == 0:
|
72 |
+
f = False
|
73 |
+
break
|
74 |
+
if f is True:
|
75 |
+
return left_, right_
|
76 |
+
else:
|
77 |
+
y_ = left_[1] + 2
|
78 |
+
x_left_ = locate_width(image_, y_, mode=1, left_or_right=left_[0])
|
79 |
+
x_right_ = locate_width(image_, y_, mode=2, left_or_right=right_[0])
|
80 |
+
left_, right_ = check_jaw(image_, [x_left_, y_], [x_right_, y_])
|
81 |
+
return left_, right_
|
82 |
+
# 选择脖子的四个点,核心在于选择上面的两个点,这两个点的确定的位置应该是"宽出来的"两个点
|
83 |
+
_, _ ,_, a = cv2.split(image) # 这应该是一个四通道的图像
|
84 |
+
ret,a_thresh = cv2.threshold(a,127,255,cv2.THRESH_BINARY)
|
85 |
+
y_high, y_low, x_left, x_right = get_box_pro(image=image, model=1) # 直接返回矩阵信息
|
86 |
+
y_left_side = locate_side(image_=a_thresh, x_=x_left, y_max=y_low) # 左边的点的y轴坐标
|
87 |
+
y_right_side = locate_side(image_=a_thresh, x_=x_right, y_max=y_low) # 右边的点的y轴坐标
|
88 |
+
y = min(y_left_side, y_right_side) # 将两点的坐标保持相同
|
89 |
+
cell_left_above, cell_right_above = check_jaw(a_thresh,[x_left, y], [x_right, y])
|
90 |
+
x_left, x_right = cell_left_above[0], cell_right_above[0]
|
91 |
+
# 此时我们寻找到了脖子的"宽出来的"两个点,这两个点作为上面的两个点, 接下来寻找下面的两个点
|
92 |
+
if per_to_side >1:
|
93 |
+
assert ValueError("per_to_side 必须小于1!")
|
94 |
+
# 在后面的透视变换中我会把它拉成矩形, 在这里我先获取四个点的高和宽
|
95 |
+
height_ = 150 # 这个值应该是个变化的值,与拉伸的长度有关,但是现在先规定为150
|
96 |
+
width_ = x_right - x_left # 其实也就是 cell_right_above[1] - cell_left_above[1]
|
97 |
+
y = int((y_low - y)*per_to_side + y) # 定位y轴坐标
|
98 |
+
cell_left_below, cell_right_bellow = ([locate_width(a_thresh, y_=y, mode=1), y], [locate_width(a_thresh, y_=y, mode=2), y])
|
99 |
+
# 四个点全齐,开始透视变换
|
100 |
+
# 寻找透视变换后的四个点,只需要变换below的两个点即可
|
101 |
+
# cell_left_below_final, cell_right_bellow_final = ([cell_left_above[1], y_low], [cell_right_above[1], y_low])
|
102 |
+
# 需要变换的四个点为 cell_left_above, cell_right_above, cell_left_below, cell_right_bellow
|
103 |
+
rect = np.array([cell_left_above, cell_right_above, cell_left_below, cell_right_bellow],
|
104 |
+
dtype='float32')
|
105 |
+
# 变化后的坐标点
|
106 |
+
dst = np.array([[0, 0], [width_, 0], [0 , height_], [width_, height_]],
|
107 |
+
dtype='float32')
|
108 |
+
# 计算变换矩阵
|
109 |
+
M = cv2.getPerspectiveTransform(rect, dst)
|
110 |
+
warped = cv2.warpPerspective(image, M, (width_, height_))
|
111 |
+
final = cover_image(image=warped, background=image, mode=3, x=cell_left_above[0], y=cell_left_above[1])
|
112 |
+
# tmp = np.zeros(image.shape)
|
113 |
+
# final = cover_image(image=warped, background=tmp, mode=3, x=cell_left_above[0], y=cell_left_above[1])
|
114 |
+
# final = cover_image(image=image, background=final, mode=3, x=0, y=0)
|
115 |
+
return final
|
116 |
+
|
117 |
+
|
118 |
+
def transformationNeck(image:np.ndarray, cutNeckHeight:int, neckBelow:int,
|
119 |
+
toHeight:int,per_to_side:float=0.75) -> np.ndarray:
|
120 |
+
"""
|
121 |
+
脖子扩充算法, 其实需要输入的只是脖子扣出来的部分以及需要被扩充的高度/需要被扩充成的高度.
|
122 |
+
"""
|
123 |
+
height, width, channels = image.shape
|
124 |
+
_, _, _, a = cv2.split(image) # 这应该是一个四通道的图像
|
125 |
+
ret, a_thresh = cv2.threshold(a, 127, 255, cv2.THRESH_BINARY) # 将透明图层二值化
|
126 |
+
def locate_width(image_:np.ndarray, y_:int, mode, left_or_right:int=None):
|
127 |
+
# 从y=y这个水平线上寻找两边的非零点
|
128 |
+
# 增加left_or_right的原因在于为下面check_jaw服务
|
129 |
+
if mode==1: # 左往右
|
130 |
+
x_ = 0
|
131 |
+
if left_or_right is None:
|
132 |
+
left_or_right = 0
|
133 |
+
for x_ in range(left_or_right, width):
|
134 |
+
if image_[y_][x_] != 0:
|
135 |
+
break
|
136 |
+
else: # 右往左
|
137 |
+
x_ = width
|
138 |
+
if left_or_right is None:
|
139 |
+
left_or_right = width - 1
|
140 |
+
for x_ in range(left_or_right, -1, -1):
|
141 |
+
if image_[y_][x_] != 0:
|
142 |
+
break
|
143 |
+
return x_
|
144 |
+
def check_jaw(image_:np.ndarray, left_, right_):
|
145 |
+
"""
|
146 |
+
检查选择的点是否与截到下巴,如果截到了,就往下平移一个单位
|
147 |
+
"""
|
148 |
+
f= True # True代表没截到下巴
|
149 |
+
# [x, y]
|
150 |
+
for x_cell in range(left_[0] + 1, right_[0]):
|
151 |
+
if image_[left_[1]][x_cell] == 0:
|
152 |
+
f = False
|
153 |
+
break
|
154 |
+
if f is True:
|
155 |
+
return left_, right_
|
156 |
+
else:
|
157 |
+
y_ = left_[1] + 2
|
158 |
+
x_left_ = locate_width(image_, y_, mode=1, left_or_right=left_[0])
|
159 |
+
x_right_ = locate_width(image_, y_, mode=2, left_or_right=right_[0])
|
160 |
+
left_, right_ = check_jaw(image_, [x_left_, y_], [x_right_, y_])
|
161 |
+
return left_, right_
|
162 |
+
x_left = locate_width(image_=a_thresh, mode=1, y_=cutNeckHeight)
|
163 |
+
x_right = locate_width(image_=a_thresh, mode=2, y_=cutNeckHeight)
|
164 |
+
# 在这里我们取消了对下巴的检查,原因在于输入的imageHeight并不能改变
|
165 |
+
# cell_left_above, cell_right_above = check_jaw(a_thresh, [x_left, imageHeight], [x_right, imageHeight])
|
166 |
+
cell_left_above, cell_right_above = [x_left, cutNeckHeight], [x_right, cutNeckHeight]
|
167 |
+
toWidth = x_right - x_left # 矩形宽
|
168 |
+
# 此时我们寻找到了脖子的"宽出来的"两个点,这两个点作为上面的两个点, 接下来寻找下面的两个点
|
169 |
+
if per_to_side >1:
|
170 |
+
assert ValueError("per_to_side 必须小于1!")
|
171 |
+
y_below = int((neckBelow - cutNeckHeight) * per_to_side + cutNeckHeight) # 定位y轴坐标
|
172 |
+
cell_left_below = [locate_width(a_thresh, y_=y_below, mode=1), y_below]
|
173 |
+
cell_right_bellow = [locate_width(a_thresh, y_=y_below, mode=2), y_below]
|
174 |
+
# 四个点全齐,开始透视变换
|
175 |
+
# 需要变换的四个点为 cell_left_above, cell_right_above, cell_left_below, cell_right_bellow
|
176 |
+
rect = np.array([cell_left_above, cell_right_above, cell_left_below, cell_right_bellow],
|
177 |
+
dtype='float32')
|
178 |
+
# 变化后的坐标点
|
179 |
+
dst = np.array([[0, 0], [toWidth, 0], [0 , toHeight], [toWidth, toHeight]],
|
180 |
+
dtype='float32')
|
181 |
+
M = cv2.getPerspectiveTransform(rect, dst)
|
182 |
+
warped = cv2.warpPerspective(image, M, (toWidth, toHeight))
|
183 |
+
# 将变换后的图像覆盖到原图上
|
184 |
+
final = cover_image(image=warped, background=image, mode=3, x=cell_left_above[0], y=cell_left_above[1])
|
185 |
+
return final
|
186 |
+
|
187 |
+
|
188 |
+
def bestJunctionCheck_beta(image:np.ndarray, stepSize:int=4, if_per:bool=False):
|
189 |
+
"""
|
190 |
+
最优衔接点检测算法, 去寻找脖子的"拐点"
|
191 |
+
"""
|
192 |
+
point_k = 1
|
193 |
+
_, _, _, a = cv2.split(image) # 这应该是一个四通道的图像
|
194 |
+
height, width = a.shape
|
195 |
+
ret, a_thresh = cv2.threshold(a, 127, 255, cv2.THRESH_BINARY) # 将透明图层二值化
|
196 |
+
y_high, y_low, x_left, x_right = get_box_pro(image=image, model=1) # 直接返回矩阵信息
|
197 |
+
def scan(y_:int, max_num:int=2):
|
198 |
+
num = 0
|
199 |
+
left = False
|
200 |
+
right = False
|
201 |
+
for x_ in range(width):
|
202 |
+
if a_thresh[y_][x_] != 0:
|
203 |
+
if x_ < width // 2 and left is False:
|
204 |
+
num += 1
|
205 |
+
left = True
|
206 |
+
elif x_ > width // 2 and right is False:
|
207 |
+
num += 1
|
208 |
+
right = True
|
209 |
+
return True if num >= max_num else False
|
210 |
+
def locate_neck_above():
|
211 |
+
"""
|
212 |
+
定位脖子的尖尖脚
|
213 |
+
"""
|
214 |
+
for y_ in range( y_high - 2, height):
|
215 |
+
if scan(y_):
|
216 |
+
return y_, y_
|
217 |
+
y_high_left, y_high_right = locate_neck_above()
|
218 |
+
def locate_width_pro(image_:np.ndarray, y_:int, mode):
|
219 |
+
"""
|
220 |
+
这会是一个生成器,用于生成脖子两边的轮廓
|
221 |
+
x_, y_ 是启始点的坐标,每一次寻找都会让y_+1
|
222 |
+
mode==1说明是找左边的边,即,image_[y_][x_] == 0 且image_[y_][x_ + 1] !=0 时跳出;
|
223 |
+
否则 当image_[y_][x_] != 0 时, x_ - 1; 当image_[y_][x_] == 0 且 image_[y_][x_ + 1] ==0 时x_ + 1
|
224 |
+
mode==2说明是找右边的边,即,image_[y_][x_] == 0 且image_[y_][x_ - 1] !=0 时跳出
|
225 |
+
否则 当image_[y_][x_] != 0 时, x_ + 1; 当image_[y_][x_] == 0 且 image_[y_][x_ - 1] ==0 时x_ - 1
|
226 |
+
"""
|
227 |
+
y_ += 1
|
228 |
+
if mode == 1:
|
229 |
+
x_ = 0
|
230 |
+
while 0 <= y_ < height and 0 <= x_ < width:
|
231 |
+
while image_[y_][x_] != 0 and x_ >= 0:
|
232 |
+
x_ -= 1
|
233 |
+
while image_[y_][x_] == 0 and image_[y_][x_ + 1] == 0 and x_ < width - 2:
|
234 |
+
x_ += 1
|
235 |
+
yield [y_, x_]
|
236 |
+
y_ += 1
|
237 |
+
elif mode == 2:
|
238 |
+
x_ = width-1
|
239 |
+
while 0 <= y_ < height and 0 <= x_ < width:
|
240 |
+
while image_[y_][x_] != 0 and x_ < width - 2: x_ += 1
|
241 |
+
while image_[y_][x_] == 0 and image_[y_][x_ - 1] == 0 and x_ >= 0: x_ -= 1
|
242 |
+
yield [y_, x_]
|
243 |
+
y_ += 1
|
244 |
+
yield False
|
245 |
+
def kGenerator(image_:np.ndarray, mode):
|
246 |
+
"""
|
247 |
+
导数生成器,用来生成每一个点对应的导数
|
248 |
+
"""
|
249 |
+
y_ = y_high_left if mode == 1 else y_high_right
|
250 |
+
c_generator = locate_width_pro(image_=image_, y_=y_, mode=mode)
|
251 |
+
for cell in c_generator:
|
252 |
+
nc = locate_width_pro(image_=image_, y_=cell[0] + stepSize, mode=mode)
|
253 |
+
nextCell = next(nc)
|
254 |
+
if nextCell is False:
|
255 |
+
yield False, False
|
256 |
+
else:
|
257 |
+
k = (cell[1] - nextCell[1]) / stepSize
|
258 |
+
yield k, cell
|
259 |
+
def findPt(image_:np.ndarray, mode):
|
260 |
+
k_generator = kGenerator(image_=image_, mode=mode)
|
261 |
+
k, cell = next(k_generator)
|
262 |
+
k_next, cell_next = next(k_generator)
|
263 |
+
if k is False:
|
264 |
+
raise ValueError("无法找到拐点!")
|
265 |
+
while k_next is not False:
|
266 |
+
k_next, cell_next = next(k_generator)
|
267 |
+
if (k_next < - 1 / stepSize) or k_next > point_k:
|
268 |
+
break
|
269 |
+
cell = cell_next
|
270 |
+
# return int(cell[0] + stepSize / 2)
|
271 |
+
return cell[0]
|
272 |
+
# 先找左边的拐点:
|
273 |
+
pointY_left = findPt(image_=a_thresh, mode=1)
|
274 |
+
# 再找右边的拐点:
|
275 |
+
pointY_right = findPt(image_=a_thresh, mode=2)
|
276 |
+
point = (pointY_left + pointY_right) // 2
|
277 |
+
if if_per is True:
|
278 |
+
point = (pointY_left + pointY_right) // 2
|
279 |
+
return point / (y_low - y_high)
|
280 |
+
pointX_left = next(locate_width_pro(image_=a_thresh, y_= point - 1, mode=1))[1]
|
281 |
+
pointX_right = next(locate_width_pro(image_=a_thresh, y_=point- 1, mode=2))[1]
|
282 |
+
return [pointX_left, point], [pointX_right, point]
|
283 |
+
|
284 |
+
|
285 |
+
def bestJunctionCheck(image:np.ndarray, offset:int, stepSize:int=4):
|
286 |
+
"""
|
287 |
+
最优点检测算算法输入一张脖子图片(无论这张图片是否已经被二值化,我都认为没有被二值化),输出一个小数(脖子最上方与衔接点位置/脖子图像长度)
|
288 |
+
与beta版不同的是它新增了一个阈值限定内容.
|
289 |
+
对于脖子而言,我我们首先可以定位到上面的部分,然后根据上面的这个点向下进行遍历检测.
|
290 |
+
与beta版类似,我们使用一个stepSize来用作斜率的检测
|
291 |
+
但是对于遍历检测而言,与beta版不同的是,我们需要对遍历的地方进行一定的限制.
|
292 |
+
限制的标准是,如果当前遍历的点的横坐标和起始点横坐标的插值超过了某个阈值,则认为是越界.
|
293 |
+
"""
|
294 |
+
point_k = 1
|
295 |
+
_, _, _, a = cv2.split(image) # 这应该是一个四通道的图像
|
296 |
+
height, width = a.shape
|
297 |
+
ret, a_thresh = cv2.threshold(a, 127, 255, cv2.THRESH_BINARY) # 将透明图层二值化
|
298 |
+
# 直接返回脖子的位置信息, 修正系数为0, get_box_pro内部也封装了二值化,所以直接输入原图
|
299 |
+
y_high, y_low, _, _ = get_box_pro(image=image, model=1, correction_factor=0)
|
300 |
+
# 真正有用的只有上下y轴的两个值...
|
301 |
+
# 首先当然是确定起始点的位置,我们用同样的scan扫描函数进行行遍历.
|
302 |
+
def scan(y_:int, max_num:int=2):
|
303 |
+
num = 0
|
304 |
+
# 设定两个值,分别代表脖子的左边和右边
|
305 |
+
left = False
|
306 |
+
right = False
|
307 |
+
for x_ in range(width):
|
308 |
+
if a_thresh[y_][x_] != 0:
|
309 |
+
# 检测左边
|
310 |
+
if x_ < width // 2 and left is False:
|
311 |
+
num += 1
|
312 |
+
left = True
|
313 |
+
# 检测右边
|
314 |
+
elif x_ > width // 2 and right is False:
|
315 |
+
num += 1
|
316 |
+
right = True
|
317 |
+
return True if num >= max_num else False
|
318 |
+
def locate_neck_above():
|
319 |
+
"""
|
320 |
+
定位脖子的尖尖脚
|
321 |
+
"""
|
322 |
+
# y_high就是脖子的最高点
|
323 |
+
for y_ in range(y_high, height):
|
324 |
+
if scan(y_):
|
325 |
+
return y_
|
326 |
+
y_start = locate_neck_above() # 得到遍历的初始高度
|
327 |
+
if y_low - y_start < stepSize: assert ValueError("脖子太小!")
|
328 |
+
# 然后获取一下初始的坐标点
|
329 |
+
x_left, x_right = 0, width
|
330 |
+
for x_left_ in range(0, width):
|
331 |
+
if a_thresh[y_start][x_left_] != 0:
|
332 |
+
x_left = x_left_
|
333 |
+
break
|
334 |
+
for x_right_ in range(width -1 , -1, -1):
|
335 |
+
if a_thresh[y_start][x_right_] != 0:
|
336 |
+
x_right = x_right_
|
337 |
+
break
|
338 |
+
# 接下来我定义两个生成器,首先是脖子轮廓(向下寻找的)生成器,每进行一次next,生成器会返回y+1的脖子轮廓点
|
339 |
+
def contoursGenerator(image_:np.ndarray, y_:int, mode):
|
340 |
+
"""
|
341 |
+
这会是一个生成器,用于生成脖子两边的轮廓
|
342 |
+
y_ 是启始点的y坐标,每一次寻找都会让y_+1
|
343 |
+
mode==1说明是找左边的边,即,image_[y_][x_] == 0 且image_[y_][x_ + 1] !=0 时跳出;
|
344 |
+
否则 当image_[y_][x_] != 0 时, x_ - 1; 当image_[y_][x_] == 0 且 image_[y_][x_ + 1] ==0 时x_ + 1
|
345 |
+
mode==2说明是找右边的边,即,image_[y_][x_] == 0 且image_[y_][x_ - 1] !=0 时跳出
|
346 |
+
否则 当image_[y_][x_] != 0 时, x_ + 1; 当image_[y_][x_] == 0 且 image_[y_][x_ - 1] ==0 时x_ - 1
|
347 |
+
"""
|
348 |
+
y_ += 1
|
349 |
+
try:
|
350 |
+
if mode == 1:
|
351 |
+
x_ = 0
|
352 |
+
while 0 <= y_ < height and 0 <= x_ < width:
|
353 |
+
while image_[y_][x_] != 0 and x_ >= 0: x_ -= 1
|
354 |
+
# 这里其实会有bug,不过可以不管
|
355 |
+
while x_ < width and image_[y_][x_] == 0 and image_[y_][x_ + 1] == 0: x_ += 1
|
356 |
+
yield [y_, x_]
|
357 |
+
y_ += 1
|
358 |
+
elif mode == 2:
|
359 |
+
x_ = width-1
|
360 |
+
while 0 <= y_ < height and 0 <= x_ < width:
|
361 |
+
while x_ < width and image_[y_][x_] != 0: x_ += 1
|
362 |
+
while x_ >= 0 and image_[y_][x_] == 0 and image_[y_][x_ - 1] == 0: x_ -= 1
|
363 |
+
yield [y_, x_]
|
364 |
+
y_ += 1
|
365 |
+
# 当处理失败则返回False
|
366 |
+
except IndexError:
|
367 |
+
yield False
|
368 |
+
# 然后是斜率生成器,这个生成器依赖子轮廓生成器,每一次生成轮廓后会计算斜率,另一个点的选取和stepSize有关
|
369 |
+
def kGenerator(image_: np.ndarray, mode):
|
370 |
+
"""
|
371 |
+
导数生成器,用来生成每一个点对应的导数
|
372 |
+
"""
|
373 |
+
y_ = y_start
|
374 |
+
# 对起始点建立一个生成器, mode=1时是左边轮廓,mode=2时是右边轮廓
|
375 |
+
c_generator = contoursGenerator(image_=image_, y_=y_, mode=mode)
|
376 |
+
for cell in c_generator:
|
377 |
+
# 寻找距离当前cell距离为stepSize的轮廓点
|
378 |
+
kc = contoursGenerator(image_=image_, y_=cell[0] + stepSize, mode=mode)
|
379 |
+
kCell = next(kc)
|
380 |
+
if kCell is False:
|
381 |
+
# 寻找失败
|
382 |
+
yield False, False
|
383 |
+
else:
|
384 |
+
# 寻找成功,返回当坐标点和斜率值
|
385 |
+
# 对于左边而言,斜率必然是前一个点的坐标减去后一个点的坐标
|
386 |
+
# 对于右边而言,斜率必然是后一个点的坐标减去前一个点的坐标
|
387 |
+
k = (cell[1] - kCell[1]) / stepSize if mode == 1 else (kCell[1] - cell[1]) / stepSize
|
388 |
+
yield k, cell
|
389 |
+
# 接着开始写寻找算法,需要注意的是我们是分两边选择的
|
390 |
+
def findPt(image_:np.ndarray, mode):
|
391 |
+
x_base = x_left if mode == 1 else x_right
|
392 |
+
k_generator = kGenerator(image_=image_, mode=mode)
|
393 |
+
k, cell = k_generator.__next__()
|
394 |
+
if k is False:
|
395 |
+
raise ValueError("无法找到拐点!")
|
396 |
+
k_next, cell_next = k_generator.__next__()
|
397 |
+
while k_next is not False:
|
398 |
+
cell = cell_next
|
399 |
+
if cell[1] > x_base and mode == 2:
|
400 |
+
x_base = cell[1]
|
401 |
+
elif cell[1] < x_base and mode == 1:
|
402 |
+
x_base = cell[1]
|
403 |
+
# 跳出循环的方式一:斜率超过了某个值
|
404 |
+
if k_next > point_k:
|
405 |
+
print("K out")
|
406 |
+
break
|
407 |
+
# 跳出循环的方式二:超出阈值
|
408 |
+
elif abs(cell[1] - x_base) > offset:
|
409 |
+
print("O out")
|
410 |
+
break
|
411 |
+
k_next, cell_next = k_generator.__next__()
|
412 |
+
if abs(cell[1] - x_base) > offset:
|
413 |
+
cell[0] = cell[0] - offset - 1
|
414 |
+
return cell[0]
|
415 |
+
# 先找左边的拐点:
|
416 |
+
pointY_left = findPt(image_=a_thresh, mode=1)
|
417 |
+
# 再找右边的拐点:
|
418 |
+
pointY_right = findPt(image_=a_thresh, mode=2)
|
419 |
+
point = min(pointY_right, pointY_left)
|
420 |
+
per = (point - y_high) / (y_low - y_high)
|
421 |
+
# pointX_left = next(contoursGenerator(image_=a_thresh, y_= point- 1, mode=1))[1]
|
422 |
+
# pointX_right = next(contoursGenerator(image_=a_thresh, y_=point - 1, mode=2))[1]
|
423 |
+
# return [pointX_left, point], [pointX_right, point]
|
424 |
+
return per
|
425 |
+
|
426 |
+
|
427 |
+
def checkSharpCorner(image:np.ndarray):
|
428 |
+
_, _, _, a = cv2.split(image) # 这应该是一个四通道的图像
|
429 |
+
height, width = a.shape
|
430 |
+
ret, a_thresh = cv2.threshold(a, 127, 255, cv2.THRESH_BINARY) # 将透明图层二值化
|
431 |
+
# 直接返回脖子的位置信息, 修正系数为0, get_box_pro内部也封装了二值化,所以直接输入原图
|
432 |
+
y_high, y_low, _, _ = get_box_pro(image=image, model=1, correction_factor=0)
|
433 |
+
def scan(y_:int, max_num:int=2):
|
434 |
+
num = 0
|
435 |
+
# 设定两个值,分别代表脖子的左边和右边
|
436 |
+
left = False
|
437 |
+
right = False
|
438 |
+
for x_ in range(width):
|
439 |
+
if a_thresh[y_][x_] != 0:
|
440 |
+
# 检测左边
|
441 |
+
if x_ < width // 2 and left is False:
|
442 |
+
num += 1
|
443 |
+
left = True
|
444 |
+
# 检测右边
|
445 |
+
elif x_ > width // 2 and right is False:
|
446 |
+
num += 1
|
447 |
+
right = True
|
448 |
+
return True if num >= max_num else False
|
449 |
+
def locate_neck_above():
|
450 |
+
"""
|
451 |
+
定位脖子的尖尖脚
|
452 |
+
"""
|
453 |
+
# y_high就是脖子的最高点
|
454 |
+
for y_ in range(y_high, height):
|
455 |
+
if scan(y_):
|
456 |
+
return y_
|
457 |
+
y_start = locate_neck_above()
|
458 |
+
return y_start
|
459 |
+
|
460 |
+
|
461 |
+
def checkJaw(image:np.ndarray, y_start:int):
|
462 |
+
# 寻找"马鞍点"
|
463 |
+
_, _, _, a = cv2.split(image) # 这应该是一个四通道的图像
|
464 |
+
height, width = a.shape
|
465 |
+
ret, a_thresh = cv2.threshold(a, 127, 255, cv2.THRESH_BINARY) # 将透明图层二值化
|
466 |
+
if width <=1: raise TypeError("图像太小!")
|
467 |
+
x_left, x_right = 0, width - 1
|
468 |
+
for x_left in range(width):
|
469 |
+
if a_thresh[y_start][x_left] != 0:
|
470 |
+
while a_thresh[y_start][x_left] != 0: x_left += 1
|
471 |
+
break
|
472 |
+
for x_right in range(width-1, -1, -1):
|
473 |
+
if a_thresh[y_start][x_right] != 0:
|
474 |
+
while a_thresh[y_start][x_right] != 0: x_right -= 1
|
475 |
+
break
|
476 |
+
point_list_y = []
|
477 |
+
point_list_x = []
|
478 |
+
for x in range(x_left, x_right):
|
479 |
+
y = y_start
|
480 |
+
while a_thresh[y][x] == 0: y += 1
|
481 |
+
point_list_y.append(y)
|
482 |
+
point_list_x.append(x)
|
483 |
+
y = max(point_list_y)
|
484 |
+
x = point_list_x[point_list_y.index(y)]
|
485 |
+
return x, y
|
486 |
+
|
487 |
+
|
488 |
+
def checkHairLOrR(cloth_image_input_cut,
|
489 |
+
input_a,
|
490 |
+
neck_a,
|
491 |
+
cloth_image_input_top_y,
|
492 |
+
cutbar_top=0.4,
|
493 |
+
cutbar_bottom=0.5,
|
494 |
+
threshold=0.3):
|
495 |
+
"""
|
496 |
+
本函数用于检测衣服是否被头发遮挡,当前只考虑左右是否被遮挡,即"一刀切"
|
497 |
+
返回int
|
498 |
+
0代表没有被遮挡
|
499 |
+
1代表左边被遮挡
|
500 |
+
2代表右边被遮挡
|
501 |
+
3代表全被遮挡了
|
502 |
+
约定,输入的图像是一张灰度图,且被二值化过.
|
503 |
+
"""
|
504 |
+
def per_darkPoint(img:np.ndarray) -> int:
|
505 |
+
"""
|
506 |
+
用于遍历相加图像上的黑点.
|
507 |
+
然后返回黑点数/图像面积
|
508 |
+
"""
|
509 |
+
h, w = img.shape
|
510 |
+
sum_darkPoint = 0
|
511 |
+
for y in range(h):
|
512 |
+
for x in range(w):
|
513 |
+
if img[y][x] == 0:
|
514 |
+
sum_darkPoint += 1
|
515 |
+
return sum_darkPoint / (h * w)
|
516 |
+
|
517 |
+
if threshold < 0 or threshold > 1: raise TypeError("阈值设置必须在0和1之间!")
|
518 |
+
|
519 |
+
# 裁出cloth_image_input_cut按高度40%~50%的区域-cloth_image_input_cutbar,并转换为A矩阵,做二值化
|
520 |
+
cloth_image_input_height = cloth_image_input_cut.shape[0]
|
521 |
+
_, _, _, cloth_image_input_cutbar = cv2.split(cloth_image_input_cut[
|
522 |
+
int(cloth_image_input_height * cutbar_top):int(
|
523 |
+
cloth_image_input_height * cutbar_bottom), :])
|
524 |
+
_, cloth_image_input_cutbar = cv2.threshold(cloth_image_input_cutbar, 127, 255, cv2.THRESH_BINARY)
|
525 |
+
|
526 |
+
# 裁出input_image、neck_image的A矩阵的对应区域,并做二值化
|
527 |
+
input_a_cutbar = input_a[cloth_image_input_top_y + int(cloth_image_input_height * cutbar_top):
|
528 |
+
cloth_image_input_top_y + int(cloth_image_input_height * cutbar_bottom), :]
|
529 |
+
_, input_a_cutbar = cv2.threshold(input_a_cutbar, 127, 255, cv2.THRESH_BINARY)
|
530 |
+
neck_a_cutbar = neck_a[cloth_image_input_top_y + int(cloth_image_input_height * cutbar_top):
|
531 |
+
cloth_image_input_top_y + int(cloth_image_input_height * cutbar_bottom), :]
|
532 |
+
_, neck_a_cutbar = cv2.threshold(neck_a_cutbar, 50, 255, cv2.THRESH_BINARY)
|
533 |
+
|
534 |
+
# 将三个cutbar合到一起-result_a_cutbar
|
535 |
+
input_a_cutbar = np.uint8(255 - input_a_cutbar)
|
536 |
+
result_a_cutbar = cv2.add(input_a_cutbar, cloth_image_input_cutbar)
|
537 |
+
result_a_cutbar = cv2.add(result_a_cutbar, neck_a_cutbar)
|
538 |
+
|
539 |
+
if_mask = 0
|
540 |
+
# 我们将图像 一刀切,分为左边和右边
|
541 |
+
height, width = result_a_cutbar.shape # 一通道图像
|
542 |
+
left_image = result_a_cutbar[:, :width//2]
|
543 |
+
right_image = result_a_cutbar[:, width//2:]
|
544 |
+
if per_darkPoint(left_image) > threshold:
|
545 |
+
if_mask = 1
|
546 |
+
if per_darkPoint(right_image) > threshold:
|
547 |
+
if_mask = 3 if if_mask == 1 else 2
|
548 |
+
return if_mask
|
549 |
+
|
550 |
+
|
551 |
+
def find_black(image):
|
552 |
+
"""
|
553 |
+
找黑色点函数,遇到输入矩阵中的第一个黑点,返回它的y值
|
554 |
+
"""
|
555 |
+
height, width = image.shape[0], image.shape[1]
|
556 |
+
for i in range(height):
|
557 |
+
for j in range(width):
|
558 |
+
if image[i, j] < 127:
|
559 |
+
return i
|
560 |
+
return None
|
561 |
+
|
562 |
+
|
563 |
+
def convert_black_array(image):
|
564 |
+
height, width = image.shape[0], image.shape[1]
|
565 |
+
mask = np.zeros([height, width])
|
566 |
+
for j in range(width):
|
567 |
+
for i in range(height):
|
568 |
+
if image[i, j] > 127:
|
569 |
+
mask[i:, j] = 1
|
570 |
+
break
|
571 |
+
return mask
|
572 |
+
|
573 |
+
|
574 |
+
def checkLongHair(neck_image, head_bottom_y, neck_top_y):
|
575 |
+
"""
|
576 |
+
长发检测函数,输入为head/neck图像,通过下巴是否为最低点,来判断是否为长发
|
577 |
+
:return 0 : 短发
|
578 |
+
:return 1 : 长发
|
579 |
+
"""
|
580 |
+
jaw_y = neck_top_y + checkJaw(neck_image, y_start=checkSharpCorner(neck_image))[1]
|
581 |
+
if jaw_y >= head_bottom_y-3:
|
582 |
+
return 0
|
583 |
+
else:
|
584 |
+
return 1
|
585 |
+
|
586 |
+
|
587 |
+
def checkLongHair2(head_bottom_y, cloth_top_y):
|
588 |
+
if head_bottom_y > cloth_top_y+10:
|
589 |
+
return 1
|
590 |
+
else:
|
591 |
+
return 0
|
592 |
+
|
593 |
+
|
594 |
+
if __name__ == "__main__":
|
595 |
+
for i in range(1, 8):
|
596 |
+
img = cv2.imread(f"./neck_temp/neck_image{i}.png", cv2.IMREAD_UNCHANGED)
|
597 |
+
# new = transformationNeck(image=img, cutNeckHeight=419,neckBelow=472, toHeight=150)
|
598 |
+
# point_list = bestJunctionCheck(img, offset=5, stepSize=3)
|
599 |
+
# per = bestJunctionCheck(img, offset=5, stepSize=3)
|
600 |
+
# # 返回一个小数的形式, 接下来我将它处理为两个点
|
601 |
+
point_list = []
|
602 |
+
# y_high_, y_low_, _, _ = get_box_pro(image=img, model=1, conreection_factor=0)
|
603 |
+
# _y = y_high_ + int((y_low_ - y_high_) * per)
|
604 |
+
# _, _, _, a_ = cv2.split(img) # 这应该是一个四通道的图像
|
605 |
+
# h, w = a_.shape
|
606 |
+
# r, a_t = cv2.threshold(a_, 127, 255, cv2.THRESH_BINARY) # 将透明图层二值化
|
607 |
+
# _x = 0
|
608 |
+
# for _x in range(w):
|
609 |
+
# if a_t[_y][_x] != 0:
|
610 |
+
# break
|
611 |
+
# point_list.append([_x, _y])
|
612 |
+
# for _x in range(w - 1, -1, -1):
|
613 |
+
# if a_t[_y][_x] != 0:
|
614 |
+
# break
|
615 |
+
# point_list.append([_x, _y])
|
616 |
+
y = checkSharpCorner(img)
|
617 |
+
point = checkJaw(image=img, y_start=y)
|
618 |
+
point_list.append(point)
|
619 |
+
new = draw_picture_dots(img, point_list, pen_size=2)
|
620 |
+
cv2.imshow(f"{i}", new)
|
621 |
+
cv2.waitKey(0)
|
error.py
ADDED
@@ -0,0 +1,27 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
"""
|
2 |
+
@author: cuny
|
3 |
+
@file: error.py
|
4 |
+
@time: 2022/4/7 15:50
|
5 |
+
@description:
|
6 |
+
定义证件照制作的错误类
|
7 |
+
"""
|
8 |
+
from hivisionai.hyService.error import ProcessError
|
9 |
+
|
10 |
+
|
11 |
+
class IDError(ProcessError):
|
12 |
+
def __init__(self, err, diary=None, face_num=-1, status_id: str = "1500"):
|
13 |
+
"""
|
14 |
+
用于报错
|
15 |
+
Args:
|
16 |
+
err: 错误描述
|
17 |
+
diary: 函数运行日志,默认为None
|
18 |
+
face_num: 告诉此时识别到的人像个数,如果为-1则说明为未知错误
|
19 |
+
"""
|
20 |
+
super().__init__(err)
|
21 |
+
if diary is None:
|
22 |
+
diary = {}
|
23 |
+
self.err = err
|
24 |
+
self.diary = diary
|
25 |
+
self.face_num = face_num
|
26 |
+
self.status_id = status_id
|
27 |
+
|
face_judgement_align.py
ADDED
@@ -0,0 +1,578 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import math
|
2 |
+
import cv2
|
3 |
+
import numpy as np
|
4 |
+
from hivisionai.hycv.face_tools import face_detect_mtcnn
|
5 |
+
from hivisionai.hycv.utils import get_box_pro, CV2Bytes
|
6 |
+
from hivisionai.hycv.vision import resize_image_esp, IDphotos_cut, add_background, calTime, resize_image_by_min, \
|
7 |
+
rotate_bound_4channels
|
8 |
+
import onnxruntime
|
9 |
+
from EulerZ import eulerZ
|
10 |
+
from beautyPlugin import makeBeautiful
|
11 |
+
from error import IDError
|
12 |
+
from imageTransform import standard_photo_resize, hollowOutFix, get_modnet_matting, draw_picture_dots, detect_distance
|
13 |
+
from layoutCreate import generate_layout_photo
|
14 |
+
from move_image import move
|
15 |
+
|
16 |
+
testImages = []
|
17 |
+
|
18 |
+
|
19 |
+
class LinearFunction_TwoDots(object):
|
20 |
+
"""
|
21 |
+
通过两个坐标点构建线性函数
|
22 |
+
"""
|
23 |
+
|
24 |
+
def __init__(self, dot1, dot2):
|
25 |
+
self.d1 = dot1
|
26 |
+
self.d2 = dot2
|
27 |
+
self.mode = "normal"
|
28 |
+
if self.d2.x != self.d1.x:
|
29 |
+
self.k = (self.d2.y - self.d1.y) / max((self.d2.x - self.d1.x), 1)
|
30 |
+
self.b = self.d2.y - self.k * self.d2.x
|
31 |
+
else:
|
32 |
+
self.mode = "x=1"
|
33 |
+
|
34 |
+
def forward(self, input_, mode="x"):
|
35 |
+
if mode == "x":
|
36 |
+
if self.mode == "normal":
|
37 |
+
return self.k * input_ + self.b
|
38 |
+
else:
|
39 |
+
return 0
|
40 |
+
elif mode == "y":
|
41 |
+
if self.mode == "normal":
|
42 |
+
return (input_ - self.b) / self.k
|
43 |
+
else:
|
44 |
+
return self.d1.x
|
45 |
+
|
46 |
+
def forward_x(self, x):
|
47 |
+
if self.mode == "normal":
|
48 |
+
return self.k * x + self.b
|
49 |
+
else:
|
50 |
+
return 0
|
51 |
+
|
52 |
+
def forward_y(self, y):
|
53 |
+
if self.mode == "normal":
|
54 |
+
return (y - self.b) / self.k
|
55 |
+
else:
|
56 |
+
return self.d1.x
|
57 |
+
|
58 |
+
|
59 |
+
class Coordinate(object):
|
60 |
+
def __init__(self, x, y):
|
61 |
+
self.x = x
|
62 |
+
self.y = y
|
63 |
+
|
64 |
+
def __str__(self):
|
65 |
+
return "({}, {})".format(self.x, self.y)
|
66 |
+
|
67 |
+
|
68 |
+
@calTime
|
69 |
+
def face_number_and_angle_detection(input_image):
|
70 |
+
"""
|
71 |
+
本函数的功能是利用机器学习算法计算图像中人脸的数目与关键点,并通过关键点信息来计算人脸在平面上的旋转角度。
|
72 |
+
当前人脸数目!=1时,将raise一个错误信息并终止全部程序。
|
73 |
+
Args:
|
74 |
+
input_image: numpy.array(3 channels),用户上传的原图(经过了一些简单的resize)
|
75 |
+
|
76 |
+
Returns:
|
77 |
+
- dets: list,人脸定位信息(x1, y1, x2, y2)
|
78 |
+
- rotation: int,旋转角度,正数代表逆时针偏离,负数代表顺时针偏离
|
79 |
+
- landmark: list,人脸关键点信息
|
80 |
+
"""
|
81 |
+
|
82 |
+
# face++人脸检测
|
83 |
+
# input_image_bytes = CV2Bytes.cv2_byte(input_image, ".jpg")
|
84 |
+
# face_num, face_rectangle, landmarks, headpose = megvii_face_detector(input_image_bytes)
|
85 |
+
# print(face_rectangle)
|
86 |
+
|
87 |
+
faces, landmarks = face_detect_mtcnn(input_image)
|
88 |
+
face_num = len(faces)
|
89 |
+
|
90 |
+
# 排除不合人脸数目要求(必须是1)的照片
|
91 |
+
if face_num == 0 or face_num >= 2:
|
92 |
+
if face_num == 0:
|
93 |
+
status_id_ = "1101"
|
94 |
+
else:
|
95 |
+
status_id_ = "1102"
|
96 |
+
raise IDError(f"人脸检测出错!检测出了{face_num}张人脸", face_num=face_num, status_id=status_id_)
|
97 |
+
|
98 |
+
# 获得人脸定位坐标
|
99 |
+
face_rectangle = []
|
100 |
+
for iter, (x1, y1, x2, y2, _) in enumerate(faces):
|
101 |
+
x1, y1, x2, y2 = int(x1), int(y1), int(x2), int(y2)
|
102 |
+
face_rectangle.append({'top': x1, 'left': y1, 'width': x2 - x1, 'height': y2 - y1})
|
103 |
+
|
104 |
+
# 获取人脸定位坐标与关键点信息
|
105 |
+
dets = face_rectangle[0]
|
106 |
+
# landmark = landmarks[0]
|
107 |
+
#
|
108 |
+
# # 人脸旋转角度计算
|
109 |
+
# rotation = eulerZ(landmark)
|
110 |
+
# return dets, rotation, landmark
|
111 |
+
return dets
|
112 |
+
|
113 |
+
@calTime
|
114 |
+
def image_matting(input_image, params):
|
115 |
+
"""
|
116 |
+
本函数的功能为全局人像抠图。
|
117 |
+
Args:
|
118 |
+
- input_image: numpy.array(3 channels),用户原图
|
119 |
+
|
120 |
+
Returns:
|
121 |
+
- origin_png_image: numpy.array(4 channels), 抠好的图
|
122 |
+
"""
|
123 |
+
|
124 |
+
print("抠图采用本地模型")
|
125 |
+
origin_png_image = get_modnet_matting(input_image, sess=params["modnet"]["human_sess"])
|
126 |
+
|
127 |
+
origin_png_image = hollowOutFix(origin_png_image) # 抠图洞洞修补
|
128 |
+
return origin_png_image
|
129 |
+
|
130 |
+
|
131 |
+
@calTime
|
132 |
+
def rotation_ajust(input_image, rotation, a, IS_DEBUG=False):
|
133 |
+
"""
|
134 |
+
本函数的功能是根据旋转角对原图进行无损旋转,并返回结果图与附带信息。
|
135 |
+
Args:
|
136 |
+
- input_image: numpy.array(3 channels), 用户上传的原图(经过了一些简单的resize、美颜)
|
137 |
+
- rotation: float, 人的五官偏离"端正"形态的旋转角
|
138 |
+
- a: numpy.array(1 channel), matting图的matte
|
139 |
+
- IS_DEBUG: DEBUG模式开关
|
140 |
+
|
141 |
+
Returns:
|
142 |
+
- result_jpg_image: numpy.array(3 channels), 原图旋转的结果图
|
143 |
+
- result_png_image: numpy.array(4 channels), matting图旋转的结果图
|
144 |
+
- L1: CLassObject, 根据旋转点连线所构造函数
|
145 |
+
- L2: ClassObject, 根据旋转点连线所构造函数
|
146 |
+
- dotL3: ClassObject, 一个特殊裁切点的坐标
|
147 |
+
- clockwise: int, 表示照片是顺时针偏离还是逆时针偏离
|
148 |
+
- drawed_dots_image: numpy.array(3 channels), 在result_jpg_image上标定了4个旋转点的结果图,用于DEBUG模式
|
149 |
+
"""
|
150 |
+
|
151 |
+
# Step1. 数据准备
|
152 |
+
rotation = -1 * rotation # rotation为正数->原图顺时针偏离,为负数->逆时针偏离
|
153 |
+
h, w = input_image.copy().shape[:2]
|
154 |
+
|
155 |
+
# Step2. 无损旋转
|
156 |
+
result_jpg_image, result_png_image, cos, sin = rotate_bound_4channels(input_image, a, rotation)
|
157 |
+
|
158 |
+
# Step3. 附带信息计算
|
159 |
+
nh, nw = result_jpg_image.shape[:2] # 旋转后的新的长宽
|
160 |
+
clockwise = -1 if rotation < 0 else 1 # clockwise代表时针,即1为顺时针,-1为逆时针
|
161 |
+
# 如果逆时针偏离:
|
162 |
+
if rotation < 0:
|
163 |
+
p1 = Coordinate(0, int(w * sin))
|
164 |
+
p2 = Coordinate(int(w * cos), 0)
|
165 |
+
p3 = Coordinate(nw, int(h * cos))
|
166 |
+
p4 = Coordinate(int(h * sin), nh)
|
167 |
+
L1 = LinearFunction_TwoDots(p1, p4)
|
168 |
+
L2 = LinearFunction_TwoDots(p4, p3)
|
169 |
+
dotL3 = Coordinate(int(0.25 * p2.x + 0.75 * p3.x), int(0.25 * p2.y + 0.75 * p3.y))
|
170 |
+
|
171 |
+
# 如果顺时针偏离:
|
172 |
+
else:
|
173 |
+
p1 = Coordinate(int(h * sin), 0)
|
174 |
+
p2 = Coordinate(nw, int(w * sin))
|
175 |
+
p3 = Coordinate(int(w * cos), nh)
|
176 |
+
p4 = Coordinate(0, int(h * cos))
|
177 |
+
L1 = LinearFunction_TwoDots(p4, p3)
|
178 |
+
L2 = LinearFunction_TwoDots(p3, p2)
|
179 |
+
dotL3 = Coordinate(int(0.75 * p4.x + 0.25 * p1.x), int(0.75 * p4.y + 0.25 * p1.y))
|
180 |
+
|
181 |
+
# Step4. 根据附带信息进行图像绘制(4个旋转点),便于DEBUG模式验证
|
182 |
+
drawed_dots_image = draw_picture_dots(result_jpg_image, [(p1.x, p1.y), (p2.x, p2.y), (p3.x, p3.y),
|
183 |
+
(p4.x, p4.y), (dotL3.x, dotL3.y)])
|
184 |
+
if IS_DEBUG:
|
185 |
+
testImages.append(["drawed_dots_image", drawed_dots_image])
|
186 |
+
|
187 |
+
return result_jpg_image, result_png_image, L1, L2, dotL3, clockwise, drawed_dots_image
|
188 |
+
|
189 |
+
|
190 |
+
@calTime
|
191 |
+
def face_number_detection_mtcnn(input_image):
|
192 |
+
"""
|
193 |
+
本函数的功能是对旋转矫正的结果图进行基于MTCNN模型的人脸检测。
|
194 |
+
Args:
|
195 |
+
- input_image: numpy.array(3 channels), 旋转矫正(rotation_adjust)的3通道结果图
|
196 |
+
|
197 |
+
Returns:
|
198 |
+
- faces: list, 人脸检测的结果,包含人脸位置信息
|
199 |
+
"""
|
200 |
+
# 如果图像的长或宽>1500px,则对图像进行1/2的resize再做MTCNN人脸检测,以加快处理速度
|
201 |
+
if max(input_image.shape[0], input_image.shape[1]) >= 1500:
|
202 |
+
input_image_resize = cv2.resize(input_image,
|
203 |
+
(input_image.shape[1] // 2, input_image.shape[0] // 2),
|
204 |
+
interpolation=cv2.INTER_AREA)
|
205 |
+
faces, _ = face_detect_mtcnn(input_image_resize, filter=True) # MTCNN人脸检测
|
206 |
+
# 如果缩放后图像的MTCNN人脸数目检测结果等于1->两次人脸检测结果没有偏差,则对定位数据x2
|
207 |
+
if len(faces) == 1:
|
208 |
+
for item, param in enumerate(faces[0]):
|
209 |
+
faces[0][item] = param * 2
|
210 |
+
# 如果两次人脸检测结果有偏差,则默认缩放后图像的MTCNN检测存在误差,则将原图输入再做一次MTCNN(保险措施)
|
211 |
+
else:
|
212 |
+
faces, _ = face_detect_mtcnn(input_image, filter=True)
|
213 |
+
# 如果图像的长或宽<1500px,则直接进行MTCNN检测
|
214 |
+
else:
|
215 |
+
faces, _ = face_detect_mtcnn(input_image, filter=True)
|
216 |
+
|
217 |
+
return faces
|
218 |
+
|
219 |
+
|
220 |
+
@calTime
|
221 |
+
def cutting_rect_pan(x1, y1, x2, y2, width, height, L1, L2, L3, clockwise, standard_size):
|
222 |
+
"""
|
223 |
+
本函数的功能是对旋转矫正结果图的裁剪框进行修正 ———— 解决"旋转三角形"现象。
|
224 |
+
Args:
|
225 |
+
- x1: int, 裁剪框左上角的横坐标
|
226 |
+
- y1: int, 裁剪框左上角的纵坐标
|
227 |
+
- x2: int, 裁剪框右下角的横坐标
|
228 |
+
- y2: int, 裁剪框右下角的纵坐标
|
229 |
+
- width: int, 待裁剪图的宽度
|
230 |
+
- height:int, 待裁剪图的高度
|
231 |
+
- L1: CLassObject, 根据旋转点连线所构造函数
|
232 |
+
- L2: CLassObject, 根据旋转点连线所构造函数
|
233 |
+
- L3: ClassObject, 一个特殊裁切点的坐标
|
234 |
+
- clockwise: int, 旋转时针状态
|
235 |
+
- standard_size: tuple, 标准照的尺寸
|
236 |
+
|
237 |
+
Returns:
|
238 |
+
- x1: int, 新的裁剪框左上角的横坐标
|
239 |
+
- y1: int, 新的裁剪框左上角的纵坐标
|
240 |
+
- x2: int, 新的裁剪框右下角的横坐标
|
241 |
+
- y2: int, 新的裁剪框右下角的纵坐标
|
242 |
+
- x_bias: int, 裁剪框横坐标方向上的计算偏置量
|
243 |
+
- y_bias: int, 裁剪框纵坐标方向上的计算偏置量
|
244 |
+
"""
|
245 |
+
# 用于计算的裁剪框坐标x1_cal,x2_cal,y1_cal,y2_cal(如果裁剪框超出了图像范围,则缩小直至在范围内)
|
246 |
+
x1_std = x1 if x1 > 0 else 0
|
247 |
+
x2_std = x2 if x2 < width else width
|
248 |
+
# y1_std = y1 if y1 > 0 else 0
|
249 |
+
y2_std = y2 if y2 < height else height
|
250 |
+
|
251 |
+
# 初始化x和y的计算偏置项x_bias和y_bias
|
252 |
+
x_bias = 0
|
253 |
+
y_bias = 0
|
254 |
+
|
255 |
+
# 如果顺时针偏转
|
256 |
+
if clockwise == 1:
|
257 |
+
if y2 > L1.forward_x(x1_std):
|
258 |
+
y_bias = int(-(y2_std - L1.forward_x(x1_std)))
|
259 |
+
if y2 > L2.forward_x(x2_std):
|
260 |
+
x_bias = int(-(x2_std - L2.forward_y(y2_std)))
|
261 |
+
x2 = x2_std + x_bias
|
262 |
+
if x1 < L3.x:
|
263 |
+
x1 = L3.x
|
264 |
+
# 如果逆时针偏转
|
265 |
+
else:
|
266 |
+
if y2 > L1.forward_x(x1_std):
|
267 |
+
x_bias = int(L1.forward_y(y2_std) - x1_std)
|
268 |
+
if y2 > L2.forward_x(x2_std):
|
269 |
+
y_bias = int(-(y2_std - L2.forward_x(x2_std)))
|
270 |
+
x1 = x1_std + x_bias
|
271 |
+
if x2 > L3.x:
|
272 |
+
x2 = L3.x
|
273 |
+
|
274 |
+
# 计算裁剪框的y的变化
|
275 |
+
y2 = int(y2_std + y_bias)
|
276 |
+
new_cut_width = x2 - x1
|
277 |
+
new_cut_height = int(new_cut_width / standard_size[1] * standard_size[0])
|
278 |
+
y1 = y2 - new_cut_height
|
279 |
+
|
280 |
+
return x1, y1, x2, y2, x_bias, y_bias
|
281 |
+
|
282 |
+
|
283 |
+
@calTime
|
284 |
+
def idphoto_cutting(faces, head_measure_ratio, standard_size, head_height_ratio, origin_png_image, origin_png_image_pre,
|
285 |
+
rotation_params, align=False, IS_DEBUG=False, top_distance_max=0.12, top_distance_min=0.10):
|
286 |
+
"""
|
287 |
+
本函数的功能为进行证件照的自适应裁剪,自适应依据Setting.json的控制参数,以及输入图像的自身情况。
|
288 |
+
Args:
|
289 |
+
- faces: list, 人脸位置信息
|
290 |
+
- head_measure_ratio: float, 人脸面积与全图面积的期望比值
|
291 |
+
- standard_size: tuple, 标准照尺寸, 如(413, 295)
|
292 |
+
- head_height_ratio: float, 人脸中心处在全图高度的比例期望值
|
293 |
+
- origin_png_image: numpy.array(4 channels), 经过一系列转换后的用户输入图
|
294 |
+
- origin_png_image_pre:numpy.array(4 channels),经过一系列转换(但没有做旋转矫正)的用户输入图
|
295 |
+
- rotation_params:旋转参数字典
|
296 |
+
- L1: classObject, 来自rotation_ajust的L1线性函数
|
297 |
+
- L2: classObject, 来自rotation_ajust的L2线性函数
|
298 |
+
- L3: classObject, 来自rotation_ajust的dotL3点
|
299 |
+
- clockwise: int, (顺/逆)时针偏差
|
300 |
+
- drawed_image: numpy.array, 红点标定4个旋转点的图像
|
301 |
+
- align: bool, 是否图像做过旋转矫正
|
302 |
+
- IS_DEBUG: DEBUG模式开关
|
303 |
+
- top_distance_max: float, 头距离顶部的最大比例
|
304 |
+
- top_distance_min: float, 头距离顶部的最小比例
|
305 |
+
|
306 |
+
Returns:
|
307 |
+
- result_image_hd: numpy.array(4 channels), 高清照
|
308 |
+
- result_image_standard: numpy.array(4 channels), 标准照
|
309 |
+
- clothing_params: json, 换装配置参数,便于后续换装功能的使用
|
310 |
+
|
311 |
+
"""
|
312 |
+
# Step0. 旋转参数准备
|
313 |
+
L1 = rotation_params["L1"]
|
314 |
+
L2 = rotation_params["L2"]
|
315 |
+
L3 = rotation_params["L3"]
|
316 |
+
clockwise = rotation_params["clockwise"]
|
317 |
+
drawed_image = rotation_params["drawed_image"]
|
318 |
+
|
319 |
+
# Step1. 准备人脸参数
|
320 |
+
face_rect = faces[0]
|
321 |
+
x, y = face_rect[0], face_rect[1]
|
322 |
+
w, h = face_rect[2] - x + 1, face_rect[3] - y + 1
|
323 |
+
height, width = origin_png_image.shape[:2]
|
324 |
+
width_height_ratio = standard_size[0] / standard_size[1] # 高宽比
|
325 |
+
|
326 |
+
# Step2. 计算高级参数
|
327 |
+
face_center = (x + w / 2, y + h / 2) # 面部中心坐标
|
328 |
+
face_measure = w * h # 面部面积
|
329 |
+
crop_measure = face_measure / head_measure_ratio # 裁剪框面积:为面部面积的5倍
|
330 |
+
resize_ratio = crop_measure / (standard_size[0] * standard_size[1]) # 裁剪框缩放率
|
331 |
+
resize_ratio_single = math.sqrt(resize_ratio) # 长和宽的缩放率(resize_ratio的开方)
|
332 |
+
crop_size = (int(standard_size[0] * resize_ratio_single),
|
333 |
+
int(standard_size[1] * resize_ratio_single)) # 裁剪框大小
|
334 |
+
|
335 |
+
# 裁剪框的定位信息
|
336 |
+
x1 = int(face_center[0] - crop_size[1] / 2)
|
337 |
+
y1 = int(face_center[1] - crop_size[0] * head_height_ratio)
|
338 |
+
y2 = y1 + crop_size[0]
|
339 |
+
x2 = x1 + crop_size[1]
|
340 |
+
|
341 |
+
# Step3. 对于旋转矫正图片的裁切处理
|
342 |
+
# if align:
|
343 |
+
# y_top_pre, _, _, _ = get_box_pro(origin_png_image.astype(np.uint8), model=2,
|
344 |
+
# correction_factor=0) # 获取matting结果图的顶距
|
345 |
+
# # 裁剪参数重新计算,目标是以最小的图像损失来消除"旋转三角形"
|
346 |
+
# x1, y1, x2, y2, x_bias, y_bias = cutting_rect_pan(x1, y1, x2, y2, width, height, L1, L2, L3, clockwise,
|
347 |
+
# standard_size)
|
348 |
+
# # 这里设定一个拒绝判定条件,如果裁剪框切进了人脸检测框的话,就不进行旋转
|
349 |
+
# if y1 > y_top_pre:
|
350 |
+
# y2 = y2 - (y1 - y_top_pre)
|
351 |
+
# y1 = y_top_pre
|
352 |
+
# # 如何遇到裁剪到人脸的情况,则转为不旋转裁切
|
353 |
+
# if x1 > x or x2 < (x + w) or y1 > y or y2 < (y + h):
|
354 |
+
# return idphoto_cutting(faces, head_measure_ratio, standard_size, head_height_ratio, origin_png_image_pre,
|
355 |
+
# origin_png_image_pre, rotation_params, align=False, IS_DEBUG=False)
|
356 |
+
#
|
357 |
+
# if y_bias != 0:
|
358 |
+
# origin_png_image = origin_png_image[:y2, :]
|
359 |
+
# if x_bias > 0: # 逆时针
|
360 |
+
# origin_png_image = origin_png_image[:, x1:]
|
361 |
+
# if drawed_image is not None and IS_DEBUG:
|
362 |
+
# drawed_x = x1
|
363 |
+
# x = x - x1
|
364 |
+
# x2 = x2 - x1
|
365 |
+
# x1 = 0
|
366 |
+
# else: # 顺时针
|
367 |
+
# origin_png_image = origin_png_image[:, :x2]
|
368 |
+
#
|
369 |
+
# if drawed_image is not None and IS_DEBUG:
|
370 |
+
# drawed_x = drawed_x if x_bias > 0 else 0
|
371 |
+
# drawed_image = draw_picture_dots(drawed_image, [(x1 + drawed_x, y1), (x1 + drawed_x, y2),
|
372 |
+
# (x2 + drawed_x, y1), (x2 + drawed_x, y2)],
|
373 |
+
# pen_color=(255, 0, 0))
|
374 |
+
# testImages.append(["drawed_image", drawed_image])
|
375 |
+
|
376 |
+
# Step4. 对照片的第一轮裁剪
|
377 |
+
cut_image = IDphotos_cut(x1, y1, x2, y2, origin_png_image)
|
378 |
+
cut_image = cv2.resize(cut_image, (crop_size[1], crop_size[0]))
|
379 |
+
y_top, y_bottom, x_left, x_right = get_box_pro(cut_image.astype(np.uint8), model=2,
|
380 |
+
correction_factor=0) # 得到cut_image中人像的上下左右距离信息
|
381 |
+
if IS_DEBUG:
|
382 |
+
testImages.append(["firstCut", cut_image])
|
383 |
+
|
384 |
+
# Step5. 判定cut_image中的人像是否处于合理的位置,若不合理,则处理数据以便之后调整位置
|
385 |
+
# 检测人像与裁剪框左边或右边是否存在空隙
|
386 |
+
if x_left > 0 or x_right > 0:
|
387 |
+
status_left_right = 1
|
388 |
+
cut_value_top = int(((x_left + x_right) * width_height_ratio) / 2) # 减去左右,为了保持比例,上下也要相应减少cut_value_top
|
389 |
+
else:
|
390 |
+
status_left_right = 0
|
391 |
+
cut_value_top = 0
|
392 |
+
|
393 |
+
"""
|
394 |
+
检测人头顶与照片的顶部是否在合适的距离内:
|
395 |
+
- status==0: 距离合适, 无需移动
|
396 |
+
- status=1: 距离过大, 人像应向上移动
|
397 |
+
- status=2: 距离过小, 人像应向下移动
|
398 |
+
"""
|
399 |
+
status_top, move_value = detect_distance(y_top - cut_value_top, crop_size[0], max=top_distance_max,
|
400 |
+
min=top_distance_min)
|
401 |
+
|
402 |
+
# Step6. 对照片的第二轮裁剪
|
403 |
+
if status_left_right == 0 and status_top == 0:
|
404 |
+
result_image = cut_image
|
405 |
+
else:
|
406 |
+
result_image = IDphotos_cut(x1 + x_left,
|
407 |
+
y1 + cut_value_top + status_top * move_value,
|
408 |
+
x2 - x_right,
|
409 |
+
y2 - cut_value_top + status_top * move_value,
|
410 |
+
origin_png_image)
|
411 |
+
if IS_DEBUG:
|
412 |
+
testImages.append(["result_image_pre", result_image])
|
413 |
+
|
414 |
+
# 换装参数准备
|
415 |
+
relative_x = x - (x1 + x_left)
|
416 |
+
relative_y = y - (y1 + cut_value_top + status_top * move_value)
|
417 |
+
|
418 |
+
# Step7. 当照片底部存在空隙时,下拉至底部
|
419 |
+
result_image, y_high = move(result_image.astype(np.uint8))
|
420 |
+
relative_y = relative_y + y_high # 更新换装参数
|
421 |
+
|
422 |
+
# cv2.imwrite("./temp_image.png", result_image)
|
423 |
+
|
424 |
+
# Step8. 标准照与高清照转换
|
425 |
+
result_image_standard = standard_photo_resize(result_image, standard_size)
|
426 |
+
result_image_hd, resize_ratio_max = resize_image_by_min(result_image, esp=max(600, standard_size[1]))
|
427 |
+
|
428 |
+
# Step9. 参数准备-为换装服务
|
429 |
+
clothing_params = {
|
430 |
+
"relative_x": relative_x * resize_ratio_max,
|
431 |
+
"relative_y": relative_y * resize_ratio_max,
|
432 |
+
"w": w * resize_ratio_max,
|
433 |
+
"h": h * resize_ratio_max
|
434 |
+
}
|
435 |
+
|
436 |
+
return result_image_hd, result_image_standard, clothing_params
|
437 |
+
|
438 |
+
|
439 |
+
@calTime
|
440 |
+
def debug_mode_process(testImages):
|
441 |
+
for item, (text, imageItem) in enumerate(testImages):
|
442 |
+
channel = imageItem.shape[2]
|
443 |
+
(height, width) = imageItem.shape[:2]
|
444 |
+
if channel == 4:
|
445 |
+
imageItem = add_background(imageItem, bgr=(255, 255, 255))
|
446 |
+
imageItem = np.uint8(imageItem)
|
447 |
+
if item == 0:
|
448 |
+
testHeight = height
|
449 |
+
result_image_test = imageItem
|
450 |
+
result_image_test = cv2.putText(result_image_test, text, (50, 50), cv2.FONT_HERSHEY_COMPLEX, 1.0,
|
451 |
+
(200, 100, 100), 3)
|
452 |
+
else:
|
453 |
+
imageItem = cv2.resize(imageItem, (int(width * testHeight / height), testHeight))
|
454 |
+
imageItem = cv2.putText(imageItem, text, (50, 50), cv2.FONT_HERSHEY_COMPLEX, 1.0, (200, 100, 100),
|
455 |
+
3)
|
456 |
+
result_image_test = cv2.hconcat([result_image_test, imageItem])
|
457 |
+
if item == len(testImages) - 1:
|
458 |
+
return result_image_test
|
459 |
+
|
460 |
+
|
461 |
+
@calTime("主函数")
|
462 |
+
def IDphotos_create(input_image,
|
463 |
+
mode="ID",
|
464 |
+
size=(413, 295),
|
465 |
+
head_measure_ratio=0.2,
|
466 |
+
head_height_ratio=0.45,
|
467 |
+
align=False,
|
468 |
+
beauty=True,
|
469 |
+
fd68=None,
|
470 |
+
human_sess=None,
|
471 |
+
IS_DEBUG=False,
|
472 |
+
top_distance_max=0.12,
|
473 |
+
top_distance_min=0.10):
|
474 |
+
"""
|
475 |
+
证件照制作主函数
|
476 |
+
Args:
|
477 |
+
input_image: 输入图像矩阵
|
478 |
+
size: (h, w)
|
479 |
+
head_measure_ratio: 头部占比?
|
480 |
+
head_height_ratio: 头部高度占比?
|
481 |
+
align: 是否进行人脸矫正(roll),默认为True(是)
|
482 |
+
fd68: 人脸68关键点检测类,详情参见hycv.FaceDetection68.faceDetection68
|
483 |
+
human_sess: 人像抠图模型类,由onnx载入(不与下面两个参数连用)
|
484 |
+
oss_image_name: 阿里云api需要的参数,实际上是上传到oss的路径
|
485 |
+
user: 阿里云api的accessKey配置对象
|
486 |
+
top_distance_max: float, 头距离顶部的最大比例
|
487 |
+
top_distance_min: float, 头距离顶部的最小比例
|
488 |
+
Returns:
|
489 |
+
result_image(高清版), result_image(普清版), api请求日志,
|
490 |
+
排版照参数(list),排版照是否旋转参数,照片尺寸(x, y)
|
491 |
+
在函数不出错的情况下,函数会因为一些原因主动抛出异常:
|
492 |
+
1. 无人脸(或者只有半张,dlib无法检测出来),抛出IDError异常,内部参数face_num为0
|
493 |
+
2. 人脸数量超过1,抛出IDError异常,内部参数face_num为2
|
494 |
+
3. 抠图api请求失败,抛出IDError异常,内部参数face_num为-1
|
495 |
+
"""
|
496 |
+
|
497 |
+
# Step0. 数据准备/图像预处理
|
498 |
+
matting_params = {"modnet": {"human_sess": human_sess}}
|
499 |
+
rotation_params = {"L1": None, "L2": None, "L3": None, "clockwise": None, "drawed_image": None}
|
500 |
+
input_image = resize_image_esp(input_image, 2000) # 将输入图片resize到最大边长为2000
|
501 |
+
|
502 |
+
# Step1. 人脸检测
|
503 |
+
# dets, rotation, landmark = face_number_and_angle_detection(input_image)
|
504 |
+
# dets = face_number_and_angle_detection(input_image)
|
505 |
+
|
506 |
+
# Step2. 美颜
|
507 |
+
# if beauty:
|
508 |
+
# input_image = makeBeautiful(input_image, landmark, 2, 2, 5, 4)
|
509 |
+
|
510 |
+
# Step3. 抠图
|
511 |
+
origin_png_image = image_matting(input_image, matting_params)
|
512 |
+
if mode == "只换底":
|
513 |
+
return origin_png_image, origin_png_image, None, None, None, None, None, None, 1
|
514 |
+
|
515 |
+
origin_png_image_pre = origin_png_image.copy() # 备份一下现在抠图结果图,之后在iphoto_cutting函数有用
|
516 |
+
|
517 |
+
# Step4. 旋转矫正
|
518 |
+
# 如果旋转角不大于2, 则不做旋转
|
519 |
+
# if abs(rotation) <= 2:
|
520 |
+
# align = False
|
521 |
+
# # 否则,进行旋转矫正
|
522 |
+
# if align:
|
523 |
+
# input_image_candidate, origin_png_image_candidate, L1, L2, L3, clockwise, drawed_image \
|
524 |
+
# = rotation_ajust(input_image, rotation, cv2.split(origin_png_image)[-1], IS_DEBUG=IS_DEBUG) # 图像旋转
|
525 |
+
#
|
526 |
+
# origin_png_image_pre = origin_png_image.copy()
|
527 |
+
# input_image = input_image_candidate.copy()
|
528 |
+
# origin_png_image = origin_png_image_candidate.copy()
|
529 |
+
#
|
530 |
+
# rotation_params["L1"] = L1
|
531 |
+
# rotation_params["L2"] = L2
|
532 |
+
# rotation_params["L3"] = L3
|
533 |
+
# rotation_params["clockwise"] = clockwise
|
534 |
+
# rotation_params["drawed_image"] = drawed_image
|
535 |
+
|
536 |
+
# Step5. MTCNN人脸检测
|
537 |
+
faces = face_number_detection_mtcnn(input_image)
|
538 |
+
|
539 |
+
# Step6. 证件照自适应裁剪
|
540 |
+
face_num = len(faces)
|
541 |
+
# 报错MTCNN检测结果不等于1的图片
|
542 |
+
if face_num != 1:
|
543 |
+
return None, None, None, None, None, None, None, None, 0
|
544 |
+
# 符合条件的进入下一环
|
545 |
+
else:
|
546 |
+
result_image_hd, result_image_standard, clothing_params = \
|
547 |
+
idphoto_cutting(faces, head_measure_ratio, size, head_height_ratio, origin_png_image,
|
548 |
+
origin_png_image_pre, rotation_params, align=align, IS_DEBUG=IS_DEBUG,
|
549 |
+
top_distance_max=top_distance_max, top_distance_min=top_distance_min)
|
550 |
+
|
551 |
+
# Step7. 排版照参数获取
|
552 |
+
typography_arr, typography_rotate = generate_layout_photo(input_height=size[0], input_width=size[1])
|
553 |
+
|
554 |
+
return result_image_hd, result_image_standard, typography_arr, typography_rotate, \
|
555 |
+
clothing_params["relative_x"], clothing_params["relative_y"], clothing_params["w"], clothing_params["h"], 1
|
556 |
+
|
557 |
+
|
558 |
+
if __name__ == "__main__":
|
559 |
+
HY_HUMAN_MATTING_WEIGHTS_PATH = "./hivision_modnet.onnx"
|
560 |
+
sess = onnxruntime.InferenceSession(HY_HUMAN_MATTING_WEIGHTS_PATH)
|
561 |
+
|
562 |
+
input_image = cv2.imread("test.jpg")
|
563 |
+
|
564 |
+
result_image_hd, result_image_standard, typography_arr, typography_rotate, \
|
565 |
+
_, _, _, _, _ = IDphotos_create(input_image,
|
566 |
+
size=(413, 295),
|
567 |
+
head_measure_ratio=0.2,
|
568 |
+
head_height_ratio=0.45,
|
569 |
+
align=True,
|
570 |
+
beauty=True,
|
571 |
+
fd68=None,
|
572 |
+
human_sess=sess,
|
573 |
+
oss_image_name="test_tmping.jpg",
|
574 |
+
user=None,
|
575 |
+
IS_DEBUG=False,
|
576 |
+
top_distance_max=0.12,
|
577 |
+
top_distance_min=0.10)
|
578 |
+
cv2.imwrite("result_image_hd.png", result_image_hd)
|
hivision_modnet.onnx
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:7e0cb9a2a841b426dd0daf1a788ec398dab059bc039041d62b15636c0783bc56
|
3 |
+
size 25888609
|
hivision_modnet.onnx.1
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:7e0cb9a2a841b426dd0daf1a788ec398dab059bc039041d62b15636c0783bc56
|
3 |
+
size 25888609
|
hivisionai/__init__.py
ADDED
File without changes
|
hivisionai/__pycache__/__init__.cpython-310.pyc
ADDED
Binary file (203 Bytes). View file
|
|
hivisionai/__pycache__/__init__.cpython-38.pyc
ADDED
Binary file (142 Bytes). View file
|
|
hivisionai/__pycache__/app.cpython-310.pyc
ADDED
Binary file (15.7 kB). View file
|
|
hivisionai/app.py
ADDED
@@ -0,0 +1,452 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# -*- coding: utf-8 -*-
|
2 |
+
|
3 |
+
"""
|
4 |
+
@Time : 2022/8/27 14:17
|
5 |
+
@Author : cuny
|
6 |
+
@File : app.py
|
7 |
+
@Software : PyCharm
|
8 |
+
@Introduce:
|
9 |
+
查看包版本等一系列操作
|
10 |
+
"""
|
11 |
+
import os
|
12 |
+
import sys
|
13 |
+
import json
|
14 |
+
import shutil
|
15 |
+
import zipfile
|
16 |
+
import requests
|
17 |
+
from argparse import ArgumentParser
|
18 |
+
from importlib.metadata import version
|
19 |
+
try: # 加上这个try的原因在于本地环境和云函数端的import形式有所不同
|
20 |
+
from qcloud_cos import CosConfig
|
21 |
+
from qcloud_cos import CosS3Client
|
22 |
+
except ImportError:
|
23 |
+
try:
|
24 |
+
from qcloud_cos_v5 import CosConfig
|
25 |
+
from qcloud_cos_v5 import CosS3Client
|
26 |
+
from qcloud_cos.cos_exception import CosServiceError
|
27 |
+
except ImportError:
|
28 |
+
raise ImportError("请下载腾讯云COS相关代码包:pip install cos-python-sdk-v5")
|
29 |
+
|
30 |
+
|
31 |
+
class HivisionaiParams(object):
|
32 |
+
"""
|
33 |
+
定义一些基本常量
|
34 |
+
"""
|
35 |
+
# 文件所在路径
|
36 |
+
# 包名称
|
37 |
+
package_name = "HY-sdk"
|
38 |
+
# 腾讯云相关变量
|
39 |
+
region = "ap-beijing"
|
40 |
+
zip_key = "HY-sdk/" # zip存储的云端文件夹路径,这里改了publish.yml也需要更改
|
41 |
+
# 云端用户配置,如果在cloud_config_save不存在,就需要下载此文件
|
42 |
+
user_url = "https://hy-sdk-config-1305323352.cos.ap-beijing.myqcloud.com/sdk-user/user_config.json"
|
43 |
+
bucket = "cloud-public-static-1306602019"
|
44 |
+
# 压缩包类型
|
45 |
+
file_format = ".zip"
|
46 |
+
# 下载路径(.hivisionai文件夹路径)
|
47 |
+
download_path = os.path.expandvars('$HOME')
|
48 |
+
# zip文件、zip解压缩文件的存放路径
|
49 |
+
save_folder = f"{os.path.expandvars('$HOME')}/.hivisionai/sdk"
|
50 |
+
# 腾讯云配置文件存放路径
|
51 |
+
cloud_config_save = f"{os.path.expandvars('$HOME')}/.hivisionai/user_config.json"
|
52 |
+
# 项目路径
|
53 |
+
hivisionai_path = os.path.dirname(os.path.dirname(__file__))
|
54 |
+
# 使用hivisionai的路径
|
55 |
+
getcwd = os.getcwd()
|
56 |
+
# HY-func的依赖配置
|
57 |
+
# 每个依赖会包含三个参数,保存路径(save_path,相对于HY_func的路径)、下载url(url)
|
58 |
+
functionDependence = {
|
59 |
+
"configs": [
|
60 |
+
# --------- 配置文件部分
|
61 |
+
# _lib
|
62 |
+
{
|
63 |
+
"url": "https://hy-sdk-config-1305323352.cos.ap-beijing.myqcloud.com/hy-func/_lib/config/aliyun-human-matting-api.json",
|
64 |
+
"save_path": "_lib/config/aliyun-human-matting-api.json"
|
65 |
+
},
|
66 |
+
{
|
67 |
+
"url": "https://hy-sdk-config-1305323352.cos.ap-beijing.myqcloud.com/hy-func/_lib/config/megvii-face-plus-api.json",
|
68 |
+
"save_path": "_lib/config/megvii-face-plus-api.json"
|
69 |
+
},
|
70 |
+
{
|
71 |
+
"url": "https://hy-sdk-config-1305323352.cos.ap-beijing.myqcloud.com/hy-func/_lib/config/volcano-face-change-api.json",
|
72 |
+
"save_path": "_lib/config/volcano-face-change-api.json"
|
73 |
+
},
|
74 |
+
# _service
|
75 |
+
{
|
76 |
+
"url": "https://hy-sdk-config-1305323352.cos.ap-beijing.myqcloud.com/hy-func/_service/config/func_error_conf.json",
|
77 |
+
"save_path": "_service/utils/config/func_error_conf.json"
|
78 |
+
},
|
79 |
+
{
|
80 |
+
"url": "https://hy-sdk-config-1305323352.cos.ap-beijing.myqcloud.com/hy-func/_service/config/service_config.json",
|
81 |
+
"save_path": "_service/utils/config/service_config.json"
|
82 |
+
},
|
83 |
+
# --------- 模型部分
|
84 |
+
# 模型部分存储在Notion文档当中
|
85 |
+
# https://www.notion.so/HY-func-cc6cc41ba6e94b36b8fa5f5d67d1683f
|
86 |
+
],
|
87 |
+
"weights": "https://www.notion.so/HY-func-cc6cc41ba6e94b36b8fa5f5d67d1683f"
|
88 |
+
}
|
89 |
+
|
90 |
+
|
91 |
+
class HivisionaiUtils(object):
|
92 |
+
"""
|
93 |
+
本类为一些基本工具类,包含代码复用相关内容
|
94 |
+
"""
|
95 |
+
@staticmethod
|
96 |
+
def get_client():
|
97 |
+
"""获取cos客户端对象"""
|
98 |
+
def get_secret():
|
99 |
+
# 首先判断cloud_config_save下是否存在
|
100 |
+
if not os.path.exists(HivisionaiParams.cloud_config_save):
|
101 |
+
print("Downloading user_config...")
|
102 |
+
resp = requests.get(HivisionaiParams.user_url)
|
103 |
+
open(HivisionaiParams.cloud_config_save, "wb").write(resp.content)
|
104 |
+
config = json.load(open(HivisionaiParams.cloud_config_save, "r"))
|
105 |
+
return config["secret_id"], config["secret_key"]
|
106 |
+
# todo 接入HY-Auth-Sync
|
107 |
+
secret_id, secret_key = get_secret()
|
108 |
+
return CosS3Client(CosConfig(Region=HivisionaiParams.region, Secret_id=secret_id, Secret_key=secret_key))
|
109 |
+
|
110 |
+
def get_all_versions(self):
|
111 |
+
"""获取云端的所有版本号"""
|
112 |
+
def getAllVersion_base():
|
113 |
+
"""
|
114 |
+
返回cos存储桶内部的某个文件夹的内部名称
|
115 |
+
ps:如果需要修改默认的存储桶配置,请在代码运行的时候加入代码 s.bucket = 存储桶名称 (s是对象实例)
|
116 |
+
返回的内容存储在response["Content"],不过返回的数据大小是有限制的,具体内容还是请看官方文档。
|
117 |
+
Returns:
|
118 |
+
[版本列表]
|
119 |
+
"""
|
120 |
+
resp = client.list_objects(
|
121 |
+
Bucket=HivisionaiParams.bucket,
|
122 |
+
Prefix=HivisionaiParams.zip_key,
|
123 |
+
Marker=marker
|
124 |
+
)
|
125 |
+
versions_list.extend([x["Key"].split("/")[-1].split(HivisionaiParams.file_format)[0] for x in resp["Contents"] if int(x["Size"]) > 0])
|
126 |
+
if resp['IsTruncated'] == 'false': # 接下来没有数据了,就退出
|
127 |
+
return ""
|
128 |
+
else:
|
129 |
+
return resp['NextMarker']
|
130 |
+
client = self.get_client()
|
131 |
+
marker = ""
|
132 |
+
versions_list = []
|
133 |
+
while True: # 轮询
|
134 |
+
try:
|
135 |
+
marker = getAllVersion_base()
|
136 |
+
except KeyError as e:
|
137 |
+
print(e)
|
138 |
+
raise
|
139 |
+
if len(marker) == 0: # 没有数据了
|
140 |
+
break
|
141 |
+
return versions_list
|
142 |
+
|
143 |
+
def get_newest_version(self):
|
144 |
+
"""获取最新的版本号"""
|
145 |
+
versions_list = self.get_all_versions()
|
146 |
+
# reverse=True,降序
|
147 |
+
versions_list.sort(key=lambda x: int(x.split(".")[-1]), reverse=True)
|
148 |
+
versions_list.sort(key=lambda x: int(x.split(".")[-2]), reverse=True)
|
149 |
+
versions_list.sort(key=lambda x: int(x.split(".")[-3]), reverse=True)
|
150 |
+
return versions_list[0]
|
151 |
+
|
152 |
+
def download_version(self, v):
|
153 |
+
"""
|
154 |
+
在存储桶中下载文件,将下载好的文件解压至本地
|
155 |
+
Args:
|
156 |
+
v: 版本号,x.x.x
|
157 |
+
|
158 |
+
Returns:
|
159 |
+
None
|
160 |
+
"""
|
161 |
+
file_name = v + HivisionaiParams.file_format
|
162 |
+
client = self.get_client()
|
163 |
+
print(f"Download to {HivisionaiParams.save_folder}...")
|
164 |
+
try:
|
165 |
+
resp = client.get_object(HivisionaiParams.bucket, HivisionaiParams.zip_key + "/" + file_name)
|
166 |
+
contents = resp["Body"].get_raw_stream().read()
|
167 |
+
except CosServiceError:
|
168 |
+
print(f"[{file_name}.zip] does not exist, please check your version!")
|
169 |
+
sys.exit()
|
170 |
+
if not os.path.exists(HivisionaiParams.save_folder):
|
171 |
+
os.makedirs(HivisionaiParams.save_folder)
|
172 |
+
open(os.path.join(HivisionaiParams.save_folder, file_name), "wb").write(contents)
|
173 |
+
print("Download success!")
|
174 |
+
|
175 |
+
@staticmethod
|
176 |
+
def download_dependence(path=None):
|
177 |
+
"""
|
178 |
+
一键下载HY-sdk所需要的所有依赖,需要注意的是,本方法必须在运行pip install之后使用(运行完pip install之后才会出现hivisionai文件夹)
|
179 |
+
Args:
|
180 |
+
path: 文件路径,精确到hivisionai文件夹的上一个目录,如果为None,则默认下载到python环境下hivisionai安装的目录
|
181 |
+
|
182 |
+
Returns:
|
183 |
+
下载相应内容到指定位置
|
184 |
+
"""
|
185 |
+
# print("指定的下载路径:", path) # 此时在path路径下必然存在一个hivisionai文件夹
|
186 |
+
# print("系统安装的hivisionai库的路径:", HivisionaiParams.hivisionai_path)
|
187 |
+
print("Dependence downloading...")
|
188 |
+
if path is None:
|
189 |
+
path = HivisionaiParams.hivisionai_path
|
190 |
+
# ----------------下载mtcnn模型文件
|
191 |
+
mtcnn_path = os.path.join(path, "hivisionai/hycv/mtcnn_onnx/weights")
|
192 |
+
base_url = "https://linimages.oss-cn-beijing.aliyuncs.com/"
|
193 |
+
onnx_files = ["pnet.onnx", "rnet.onnx", "onet.onnx"]
|
194 |
+
print(f"Downloading mtcnn model in {mtcnn_path}")
|
195 |
+
if not os.path.exists(mtcnn_path):
|
196 |
+
os.mkdir(mtcnn_path)
|
197 |
+
for onnx_file in onnx_files:
|
198 |
+
if not os.path.exists(os.path.join(mtcnn_path, onnx_file)):
|
199 |
+
# download onnx model
|
200 |
+
onnx_url = base_url + onnx_file
|
201 |
+
print("Downloading Onnx Model in:", onnx_url)
|
202 |
+
r = requests.get(onnx_url, stream=True)
|
203 |
+
if r.status_code == 200:
|
204 |
+
open(os.path.join(mtcnn_path, onnx_file), 'wb').write(r.content) # 将内容写入文件
|
205 |
+
print(f"Download finished -- {onnx_file}")
|
206 |
+
del r
|
207 |
+
# ----------------
|
208 |
+
print("Dependence download finished...")
|
209 |
+
|
210 |
+
|
211 |
+
class HivisionaiApps(object):
|
212 |
+
"""
|
213 |
+
本类为app对外暴露的接口,为了代码规整性,这里使用类来对暴露接口进行调整
|
214 |
+
"""
|
215 |
+
@staticmethod
|
216 |
+
def show_cloud_version():
|
217 |
+
"""查看在cos中的所有HY-sdk版本"""
|
218 |
+
print("Connect to COS...")
|
219 |
+
versions_list = hivisionai_utils.get_all_versions()
|
220 |
+
# reverse=True,降序
|
221 |
+
versions_list.sort(key=lambda x: int(x.split(".")[-1]), reverse=True)
|
222 |
+
versions_list.sort(key=lambda x: int(x.split(".")[-2]), reverse=True)
|
223 |
+
versions_list.sort(key=lambda x: int(x.split(".")[-3]), reverse=True)
|
224 |
+
if len(versions_list) == 0:
|
225 |
+
print("There is no version currently, please release it first!")
|
226 |
+
sys.exit()
|
227 |
+
versions = "The currently existing versions (Keep 10): \n"
|
228 |
+
for i, v in enumerate(versions_list):
|
229 |
+
versions += str(v) + " "
|
230 |
+
if i == 9:
|
231 |
+
break
|
232 |
+
print(versions)
|
233 |
+
|
234 |
+
@staticmethod
|
235 |
+
def upgrade(v: str, enforce: bool = False, save_cached: bool = False):
|
236 |
+
"""
|
237 |
+
自动升级HY-sdk到指定版本
|
238 |
+
Args:
|
239 |
+
v: 指定的版本号,格式为x.x.x
|
240 |
+
enforce: 是否需要强制执行更新命令
|
241 |
+
save_cached: 是否保存下载的wheel文件,默认为否
|
242 |
+
Returns:
|
243 |
+
None
|
244 |
+
"""
|
245 |
+
def check_format():
|
246 |
+
# noinspection PyBroadException
|
247 |
+
try:
|
248 |
+
major, minor, patch = v.split(".")
|
249 |
+
int(major)
|
250 |
+
int(minor)
|
251 |
+
int(patch)
|
252 |
+
except Exception as e:
|
253 |
+
print(f"Illegal version number!\n{e}")
|
254 |
+
pass
|
255 |
+
print("Upgrading, please wait a moment...")
|
256 |
+
if v == "-1":
|
257 |
+
v = hivisionai_utils.get_newest_version()
|
258 |
+
# 检查format的格式
|
259 |
+
check_format()
|
260 |
+
if v == version(HivisionaiParams.package_name) and not enforce:
|
261 |
+
print(f"Current version: {v} already exists, skip installation.")
|
262 |
+
sys.exit()
|
263 |
+
hivisionai_utils.download_version(v)
|
264 |
+
# 下载完毕(下载至save_folder),解压文件
|
265 |
+
target_zip = os.path.join(HivisionaiParams.save_folder, f"{v}.zip")
|
266 |
+
assert zipfile.is_zipfile(target_zip), "Decompression failed, and the target was not a zip file."
|
267 |
+
new_dir = target_zip.replace('.zip', '') # 解压的文件名
|
268 |
+
if os.path.exists(new_dir): # 判断文件夹是否存在
|
269 |
+
shutil.rmtree(new_dir)
|
270 |
+
os.mkdir(new_dir) # 新建文件夹
|
271 |
+
f = zipfile.ZipFile(target_zip)
|
272 |
+
f.extractall(new_dir) # 提取zip文件
|
273 |
+
print("Decompressed, begin to install...")
|
274 |
+
os.system(f'pip3 install {os.path.join(new_dir, "**.whl")}')
|
275 |
+
# 开始自动下载必要的模型依赖
|
276 |
+
hivisionai_utils.download_dependence()
|
277 |
+
# 安装完毕,如果save_cached为真,删除"$HOME/.hivisionai/sdk"内部的所有文件元素
|
278 |
+
if save_cached is True:
|
279 |
+
os.system(f'rm -rf {HivisionaiParams.save_folder}/**')
|
280 |
+
|
281 |
+
@staticmethod
|
282 |
+
def export(path):
|
283 |
+
"""
|
284 |
+
输出最新版本的文件到命令运行的path目录
|
285 |
+
Args:
|
286 |
+
path: 用户输入的路径
|
287 |
+
|
288 |
+
Returns:
|
289 |
+
输出最新的hivisionai到path目录
|
290 |
+
"""
|
291 |
+
# print(f"当前路径: {os.path.join(HivisionaiParams.getcwd, path)}")
|
292 |
+
# print(f"文件路径: {os.path.dirname(__file__)}")
|
293 |
+
export_path = os.path.join(HivisionaiParams.getcwd, path)
|
294 |
+
# 判断输出路径存不存在,如果不存在,就报错
|
295 |
+
assert os.path.exists(export_path), f"{export_path} dose not Exists!"
|
296 |
+
v = hivisionai_utils.get_newest_version()
|
297 |
+
# 下载文件到.hivisionai/sdk当中
|
298 |
+
hivisionai_utils.download_version(v)
|
299 |
+
# 下载完毕(下载至save_folder),解压文件
|
300 |
+
target_zip = os.path.join(HivisionaiParams.save_folder, f"{v}.zip")
|
301 |
+
assert zipfile.is_zipfile(target_zip), "Decompression failed, and the target was not a zip file."
|
302 |
+
new_dir = os.path.basename(target_zip.replace('.zip', '')) # 解压的文件名
|
303 |
+
new_dir = os.path.join(export_path, new_dir) # 解压的文件路径
|
304 |
+
if os.path.exists(new_dir): # 判断文件夹是否存在
|
305 |
+
shutil.rmtree(new_dir)
|
306 |
+
os.mkdir(new_dir) # 新建文件夹
|
307 |
+
f = zipfile.ZipFile(target_zip)
|
308 |
+
f.extractall(new_dir) # 提取zip文件
|
309 |
+
print("Decompressed, begin to export...")
|
310 |
+
# 强制删除bin/hivisionai和hivisionai/以及HY_sdk-**
|
311 |
+
bin_path = os.path.join(export_path, "bin")
|
312 |
+
hivisionai_path = os.path.join(export_path, "hivisionai")
|
313 |
+
sdk_path = os.path.join(export_path, "HY_sdk-**")
|
314 |
+
os.system(f"rm -rf {bin_path} {hivisionai_path} {sdk_path}")
|
315 |
+
# 删除完毕,开始export
|
316 |
+
os.system(f'pip3 install {os.path.join(new_dir, "**.whl")} -t {export_path}')
|
317 |
+
hivisionai_utils.download_dependence(export_path)
|
318 |
+
# 将下载下来的文件夹删除
|
319 |
+
os.system(f'rm -rf {target_zip} && rm -rf {new_dir}')
|
320 |
+
print("Done.")
|
321 |
+
|
322 |
+
@staticmethod
|
323 |
+
def hy_func_init(force):
|
324 |
+
"""
|
325 |
+
在HY-func目录下使用hivisionai --init,可以自动将需要的依赖下载到指定位置
|
326 |
+
不过对于比较大的模型——修复模型而言,需要手动下载
|
327 |
+
Args:
|
328 |
+
force: 如果force为True,则会强制重新下载所有的内容,包括修复模型这种比较大的模型
|
329 |
+
Returns:
|
330 |
+
程序执行完毕,会将一些必要的依赖也下载完毕
|
331 |
+
"""
|
332 |
+
cwd = HivisionaiParams.getcwd
|
333 |
+
# 判断当前文件夹是否是HY-func
|
334 |
+
dirName = os.path.basename(cwd)
|
335 |
+
assert dirName == "HY-func", "请在正确的文件目录下初始化HY-func!"
|
336 |
+
# 需要下载的内容会存放在HivisionaiParams的functionDependence���量下
|
337 |
+
functionDependence = HivisionaiParams.functionDependence
|
338 |
+
# 下载配置文件
|
339 |
+
configs = functionDependence["configs"]
|
340 |
+
print("正在下载配置文件...")
|
341 |
+
for config in configs:
|
342 |
+
if not force and os.path.exists(config['save_path']):
|
343 |
+
print(f"[pass]: {os.path.basename(config['url'])}")
|
344 |
+
continue
|
345 |
+
print(f"[Download]: {config['url']}")
|
346 |
+
resp = requests.get(config['url'])
|
347 |
+
# json文件存储在text区域,但是其他的不一定
|
348 |
+
open(os.path.join(cwd, config['save_path']), 'w').write(resp.text)
|
349 |
+
# 其他文件,提示访问notion文档
|
350 |
+
print(f"[NOTICE]: 一切准备就绪,请访问下面的文档下载剩下的模型文件:\n{functionDependence['weights']}")
|
351 |
+
|
352 |
+
@staticmethod
|
353 |
+
def hy_func_deploy(functionName: str = None, functionPath: str = None):
|
354 |
+
"""
|
355 |
+
在HY-func目录下使用此命令,并且随附功能函数的名称,就可以将HY-func的部署版放到桌面上
|
356 |
+
但是需要注意的是,本方式不适合修复功能使用,修复功能依旧需要手动制作镜像
|
357 |
+
Args:
|
358 |
+
functionName: 功能函数名称
|
359 |
+
functionPath: 需要注册的HY-func路径
|
360 |
+
|
361 |
+
Returns:
|
362 |
+
程序执行完毕,桌面会出现一个同名文件夹
|
363 |
+
"""
|
364 |
+
# 为了代码撰写的方便,这里仅仅把模型文件删除,其余配置文件保留
|
365 |
+
# 为了实现在任意位置输入hivisionai --deploy funcName都能成功,在使用前需要在.hivisionai/user_config.json中注册
|
366 |
+
# print(functionName, functionPath)
|
367 |
+
if functionPath is not None:
|
368 |
+
# 更新/添加路径
|
369 |
+
# functionPath为相对于使用路径的路径
|
370 |
+
assert os.path.basename(functionPath) == "HY-func", "所指向路径非HY-func!"
|
371 |
+
func_path = os.path.join(HivisionaiParams.getcwd, functionPath)
|
372 |
+
assert os.path.join(func_path), f"路径不存在: {func_path}"
|
373 |
+
# functionPath的路径写到user_config当中
|
374 |
+
user_config = json.load(open(HivisionaiParams.cloud_config_save, 'rb'))
|
375 |
+
user_config["func_path"] = func_path
|
376 |
+
open(HivisionaiParams.cloud_config_save, 'w').write(json.dumps(user_config))
|
377 |
+
print("HY-func全局路径保存成功!")
|
378 |
+
try:
|
379 |
+
user_config = json.load(open(HivisionaiParams.cloud_config_save, 'rb'))
|
380 |
+
func_path = user_config['func_path']
|
381 |
+
except KeyError:
|
382 |
+
return print("请先使用-p命令注册全局HY-func路径!")
|
383 |
+
# 此时func_path必然存在
|
384 |
+
# print(os.listdir(func_path))
|
385 |
+
assert functionName in os.listdir(func_path), functionName + "功能不存在!"
|
386 |
+
func_path_deploy = os.path.join(func_path, functionName)
|
387 |
+
# 开始复制文件到指定目录
|
388 |
+
# 我们默认移动到Desktop目录下,如果没有此目录,需要先创建一个
|
389 |
+
target_dir = os.path.join(HivisionaiParams.download_path, "Desktop")
|
390 |
+
assert os.path.exists(target_dir), target_dir + "文件路径不存在,你需要先创建一下!"
|
391 |
+
# 开始移动
|
392 |
+
target_dir = os.path.join(target_dir, functionName)
|
393 |
+
print("正在复制需要部署的文件...")
|
394 |
+
os.system(f"rm -rf {target_dir}")
|
395 |
+
os.system(f'cp -rf {func_path_deploy} {target_dir}')
|
396 |
+
os.system(f"cp -rf {os.path.join(func_path, '_lib')} {target_dir}")
|
397 |
+
os.system(f"cp -rf {os.path.join(func_path, '_service')} {target_dir}")
|
398 |
+
# 生成最新的hivisionai
|
399 |
+
print("正在生成hivisionai代码包...")
|
400 |
+
os.system(f'hivisionai -t {target_dir}')
|
401 |
+
# 移动完毕,删除模型文件
|
402 |
+
print("移动完毕,正在删除不需要的文件...")
|
403 |
+
# 模型文件
|
404 |
+
os.system(f"rm -rf {os.path.join(target_dir, '_lib', 'weights', '**')}")
|
405 |
+
# hivisionai生成时的多余文件
|
406 |
+
os.system(f"rm -rf {os.path.join(target_dir, 'bin')} {os.path.join(target_dir, 'HY_sdk**')}")
|
407 |
+
print("部署文件生成成功,你可以开始部署了!")
|
408 |
+
|
409 |
+
|
410 |
+
hivisionai_utils = HivisionaiUtils()
|
411 |
+
|
412 |
+
|
413 |
+
def entry_point():
|
414 |
+
parser = ArgumentParser()
|
415 |
+
# 查看版本号
|
416 |
+
parser.add_argument("-v", "--version", action="store_true", help="View the current HY-sdk version, which does not represent the final cloud version.")
|
417 |
+
# 自动更新
|
418 |
+
parser.add_argument("-u", "--upgrade", nargs='?', const="-1", type=str, help="Automatically update HY-sdk to the latest version")
|
419 |
+
# 查找云端的HY-sdk版本
|
420 |
+
parser.add_argument("-l", "--list", action="store_true", help="Find HY-sdk versions of the cloud, and keep up to ten")
|
421 |
+
# 下载云端的版本到本地路径
|
422 |
+
parser.add_argument("-t", "--export", nargs='?', const="./", help="Add a path parameter to automatically download the latest version of sdk to this path. If there are no parameters, the default is the current path")
|
423 |
+
# 强制更新附带参数,当一个功能需要强制执行一遍的时候,需要附带此参数
|
424 |
+
parser.add_argument("-f", "--force", action="store_true", help="Enforcement of other functions, execution of a single parameter is meaningless")
|
425 |
+
# 初始化HY-func
|
426 |
+
parser.add_argument("--init", action="store_true", help="Initialization HY-func")
|
427 |
+
# 部署HY-func
|
428 |
+
parser.add_argument("-d", "--deploy", nargs='?', const="-1", type=str, help="Deploy HY-func")
|
429 |
+
# 涉及注册一些自定义内容的时候,需要附带此参数,并写上自定义内容
|
430 |
+
parser.add_argument("-p", "--param", nargs='?', const="-1", type=str, help="When registering some custom content, you need to attach this parameter and write the custom content.")
|
431 |
+
args = parser.parse_args()
|
432 |
+
if args.version:
|
433 |
+
print(version(HivisionaiParams.package_name))
|
434 |
+
sys.exit()
|
435 |
+
if args.upgrade:
|
436 |
+
HivisionaiApps.upgrade(args.upgrade, args.force)
|
437 |
+
sys.exit()
|
438 |
+
if args.list:
|
439 |
+
HivisionaiApps.show_cloud_version()
|
440 |
+
sys.exit()
|
441 |
+
if args.export:
|
442 |
+
HivisionaiApps.export(args.export)
|
443 |
+
sys.exit()
|
444 |
+
if args.init:
|
445 |
+
HivisionaiApps.hy_func_init(args.force)
|
446 |
+
sys.exit()
|
447 |
+
if args.deploy:
|
448 |
+
HivisionaiApps.hy_func_deploy(args.deploy, args.param)
|
449 |
+
|
450 |
+
|
451 |
+
if __name__ == "__main__":
|
452 |
+
entry_point()
|
hivisionai/hyService/__init__.py
ADDED
File without changes
|
hivisionai/hyService/__pycache__/__init__.cpython-310.pyc
ADDED
Binary file (213 Bytes). View file
|
|
hivisionai/hyService/__pycache__/__init__.cpython-38.pyc
ADDED
Binary file (152 Bytes). View file
|
|
hivisionai/hyService/__pycache__/cloudService.cpython-310.pyc
ADDED
Binary file (14.3 kB). View file
|
|
hivisionai/hyService/__pycache__/dbTools.cpython-310.pyc
ADDED
Binary file (11.8 kB). View file
|
|
hivisionai/hyService/__pycache__/error.cpython-310.pyc
ADDED
Binary file (1.16 kB). View file
|
|
hivisionai/hyService/__pycache__/error.cpython-38.pyc
ADDED
Binary file (1.17 kB). View file
|
|
hivisionai/hyService/__pycache__/serviceTest.cpython-310.pyc
ADDED
Binary file (1.64 kB). View file
|
|
hivisionai/hyService/__pycache__/utils.cpython-310.pyc
ADDED
Binary file (2.87 kB). View file
|
|
hivisionai/hyService/cloudService.py
ADDED
@@ -0,0 +1,406 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
"""
|
2 |
+
焕影小程序功能服务端的基本工具函数,以类的形式封装
|
3 |
+
"""
|
4 |
+
try: # 加上这个try的原因在于本地环境和云函数端的import形式有所不同
|
5 |
+
from qcloud_cos import CosConfig
|
6 |
+
from qcloud_cos import CosS3Client
|
7 |
+
except ImportError:
|
8 |
+
try:
|
9 |
+
from qcloud_cos_v5 import CosConfig
|
10 |
+
from qcloud_cos_v5 import CosS3Client
|
11 |
+
except ImportError:
|
12 |
+
raise ImportError("请下载腾讯云COS相关代码包:pip install cos-python-sdk-v5")
|
13 |
+
import requests
|
14 |
+
import datetime
|
15 |
+
import json
|
16 |
+
from .error import ProcessError
|
17 |
+
import os
|
18 |
+
local_path_ = os.path.dirname(__file__)
|
19 |
+
|
20 |
+
|
21 |
+
class GetConfig(object):
|
22 |
+
@staticmethod
|
23 |
+
def hy_sdk_client(Id:str, Key:str):
|
24 |
+
# 从cos中寻找文件
|
25 |
+
REGION: str = 'ap-beijing'
|
26 |
+
TOKEN = None
|
27 |
+
SCHEME: str = 'https'
|
28 |
+
BUCKET: str = 'hy-sdk-config-1305323352'
|
29 |
+
client_config = CosConfig(Region=REGION,
|
30 |
+
SecretId=Id,
|
31 |
+
SecretKey=Key,
|
32 |
+
Token=TOKEN,
|
33 |
+
Scheme=SCHEME)
|
34 |
+
return CosS3Client(client_config), BUCKET
|
35 |
+
|
36 |
+
def load_json(self, path:str, default_download=False):
|
37 |
+
try:
|
38 |
+
if os.path.isdir(path):
|
39 |
+
raise ProcessError("请输入具体的配置文件路径,而非文件夹!")
|
40 |
+
if default_download is True:
|
41 |
+
print(f"\033[34m 默认强制重新下载配置文件...\033[0m")
|
42 |
+
raise FileNotFoundError
|
43 |
+
with open(path) as f:
|
44 |
+
config = json.load(f)
|
45 |
+
return config
|
46 |
+
except FileNotFoundError:
|
47 |
+
dir_name = os.path.dirname(path)
|
48 |
+
try:
|
49 |
+
os.makedirs(dir_name)
|
50 |
+
except FileExistsError:
|
51 |
+
pass
|
52 |
+
base_name = os.path.basename(path)
|
53 |
+
print(f"\033[34m 正在从COS中下载配置文件...\033[0m")
|
54 |
+
print(f"\033[31m 请注意,接下来会在{dir_name}路径下生成文件{base_name}...\033[0m")
|
55 |
+
Id = input("请输入SecretId:")
|
56 |
+
Key = input("请输入SecretKey:")
|
57 |
+
client, bucket = self.hy_sdk_client(Id, Key)
|
58 |
+
data_bytes = client.get_object(Bucket=bucket,Key=base_name)["Body"].get_raw_stream().read()
|
59 |
+
data = json.loads(data_bytes.decode("utf-8"))
|
60 |
+
# data["SecretId"] = Id # 未来可以把这个加上
|
61 |
+
# data["SecretKey"] = Key
|
62 |
+
with open(path, "w") as f:
|
63 |
+
data_str = json.dumps(data, ensure_ascii=False)
|
64 |
+
# 如果 ensure_ascii 是 true (即默认值),输出保证将所有输入的非 ASCII 字符转义。
|
65 |
+
# 如果 ensure_ascii 是 false,这些字符会原样输出。
|
66 |
+
f.write(data_str)
|
67 |
+
f.close()
|
68 |
+
print(f"\033[32m 配置文件保存成功\033[0m")
|
69 |
+
return data
|
70 |
+
except json.decoder.JSONDecodeError:
|
71 |
+
print(f"\033[31m WARNING: 配置文件为空!\033[0m")
|
72 |
+
return {}
|
73 |
+
|
74 |
+
def load_file(self, cloud_path:str, local_path:str):
|
75 |
+
"""
|
76 |
+
从COS中下载文件到本地,本函数将会被默认执行的,在使用的时候建议加一些限制.
|
77 |
+
:param cloud_path: 云端的文件路径
|
78 |
+
:param local_path: 将云端文件保存在本地的路径
|
79 |
+
"""
|
80 |
+
if os.path.isdir(cloud_path):
|
81 |
+
raise ProcessError("请输入具体的云端文件路径,而非文件夹!")
|
82 |
+
if os.path.isdir(local_path):
|
83 |
+
raise ProcessError("请输入具体的本地文件路径,而非文件夹!")
|
84 |
+
dir_name = os.path.dirname(local_path)
|
85 |
+
base_name = os.path.basename(local_path)
|
86 |
+
try:
|
87 |
+
os.makedirs(dir_name)
|
88 |
+
except FileExistsError:
|
89 |
+
pass
|
90 |
+
cloud_name = os.path.basename(cloud_path)
|
91 |
+
print(f"\033[31m 请注意,接下来会在{dir_name}路径下生成文件{base_name}\033[0m")
|
92 |
+
Id = input("请输入SecretId:")
|
93 |
+
Key = input("请输入SecretKey:")
|
94 |
+
client, bucket = self.hy_sdk_client(Id, Key)
|
95 |
+
print(f"\033[34m 正在从COS中下载文件: {cloud_name}, 此过程可能耗费一些时间...\033[0m")
|
96 |
+
data_bytes = client.get_object(Bucket=bucket,Key=cloud_path)["Body"].get_raw_stream().read()
|
97 |
+
# data["SecretId"] = Id # 未来可以把这个加上
|
98 |
+
# data["SecretKey"] = Key
|
99 |
+
with open(local_path, "wb") as f:
|
100 |
+
# 如果 ensure_ascii 是 true (即默认值),输出保证将所有输入的非 ASCII 字符转义。
|
101 |
+
# 如果 ensure_ascii 是 false,这些字符会原样输出。
|
102 |
+
f.write(data_bytes)
|
103 |
+
f.close()
|
104 |
+
print(f"\033[32m 文件保存成功\033[0m")
|
105 |
+
|
106 |
+
|
107 |
+
class CosConf(GetConfig):
|
108 |
+
"""
|
109 |
+
从安全的角度出发,将一些默认配置文件上传至COS中,接下来使用COS和它的子类的时候,在第一次使用时需要输入Cuny给的id和key
|
110 |
+
用于连接cos存储桶,下载配置文��.
|
111 |
+
当然,在service_default_download = False的时候,如果在运行路径下已经有conf/service_config.json文件了,
|
112 |
+
那么就不用再次下载了,也不用输入id和key
|
113 |
+
事实上这只需要运行一次,因为配置文件将会被下载至源码文件夹中
|
114 |
+
如果要自定义路径,请在继承的子类中编写__init__函数,将service_path定向到指定路径
|
115 |
+
"""
|
116 |
+
def __init__(self) -> None:
|
117 |
+
# 下面这些参数是类的共享参数
|
118 |
+
self.__SECRET_ID: str = None # 服务的id
|
119 |
+
self.__SECRET_KEY: str = None # 服务的key
|
120 |
+
self.__REGION: str = None # 服务的存储桶地区
|
121 |
+
self.__TOKEN: str = None # 服务的token,目前一直是None
|
122 |
+
self.__SCHEME: str = None # 服务的访问协议,默认实际上是https
|
123 |
+
self.__BUCKET: str = None # 服务的存储桶
|
124 |
+
self.__SERVICE_CONFIG: dict = None # 服务的配置文件
|
125 |
+
self.service_path: str = f"{local_path_}/conf/service_config.json"
|
126 |
+
# 配置文件路径,默认是函数运行的路径下的conf文件夹
|
127 |
+
self.service_default_download = False # 是否在每次访问配置的时候都重新下载文件
|
128 |
+
|
129 |
+
@property
|
130 |
+
def service_config(self):
|
131 |
+
if self.__SERVICE_CONFIG is None or self.service_default_download is True:
|
132 |
+
self.__SERVICE_CONFIG = self.load_json(self.service_path, self.service_default_download)
|
133 |
+
return self.__SERVICE_CONFIG
|
134 |
+
|
135 |
+
@property
|
136 |
+
def client(self):
|
137 |
+
client_config = CosConfig(Region=self.region,
|
138 |
+
SecretId=self.secret_id,
|
139 |
+
SecretKey=self.secret_key,
|
140 |
+
Token=self.token,
|
141 |
+
Scheme=self.scheme)
|
142 |
+
return CosS3Client(client_config)
|
143 |
+
|
144 |
+
def get_key(self, key:str):
|
145 |
+
try:
|
146 |
+
data = self.service_config[key]
|
147 |
+
if data == "None":
|
148 |
+
return None
|
149 |
+
else:
|
150 |
+
return data
|
151 |
+
except KeyError:
|
152 |
+
print(f"\033[31m没有对应键值{key},默认返回None\033[0m")
|
153 |
+
return None
|
154 |
+
|
155 |
+
@property
|
156 |
+
def secret_id(self):
|
157 |
+
if self.__SECRET_ID is None:
|
158 |
+
self.__SECRET_ID = self.get_key("SECRET_ID")
|
159 |
+
return self.__SECRET_ID
|
160 |
+
|
161 |
+
@secret_id.setter
|
162 |
+
def secret_id(self, value:str):
|
163 |
+
self.__SECRET_ID = value
|
164 |
+
|
165 |
+
@property
|
166 |
+
def secret_key(self):
|
167 |
+
if self.__SECRET_KEY is None:
|
168 |
+
self.__SECRET_KEY = self.get_key("SECRET_KEY")
|
169 |
+
return self.__SECRET_KEY
|
170 |
+
|
171 |
+
@secret_key.setter
|
172 |
+
def secret_key(self, value:str):
|
173 |
+
self.__SECRET_KEY = value
|
174 |
+
|
175 |
+
@property
|
176 |
+
def region(self):
|
177 |
+
if self.__REGION is None:
|
178 |
+
self.__REGION = self.get_key("REGION")
|
179 |
+
return self.__REGION
|
180 |
+
|
181 |
+
@region.setter
|
182 |
+
def region(self, value:str):
|
183 |
+
self.__REGION = value
|
184 |
+
|
185 |
+
@property
|
186 |
+
def token(self):
|
187 |
+
# if self.__TOKEN is None:
|
188 |
+
# self.__TOKEN = self.get_key("TOKEN")
|
189 |
+
# 这里可以注释掉
|
190 |
+
return self.__TOKEN
|
191 |
+
|
192 |
+
@token.setter
|
193 |
+
def token(self, value:str):
|
194 |
+
self.__TOKEN= value
|
195 |
+
|
196 |
+
@property
|
197 |
+
def scheme(self):
|
198 |
+
if self.__SCHEME is None:
|
199 |
+
self.__SCHEME = self.get_key("SCHEME")
|
200 |
+
return self.__SCHEME
|
201 |
+
|
202 |
+
@scheme.setter
|
203 |
+
def scheme(self, value:str):
|
204 |
+
self.__SCHEME = value
|
205 |
+
|
206 |
+
@property
|
207 |
+
def bucket(self):
|
208 |
+
if self.__BUCKET is None:
|
209 |
+
self.__BUCKET = self.get_key("BUCKET")
|
210 |
+
return self.__BUCKET
|
211 |
+
|
212 |
+
@bucket.setter
|
213 |
+
def bucket(self, value):
|
214 |
+
self.__BUCKET = value
|
215 |
+
|
216 |
+
def downloadFile_COS(self, key, bucket:str=None, if_read:bool=False):
|
217 |
+
"""
|
218 |
+
从COS下载对象(二进制数据), 如果下载失败就返回None
|
219 |
+
"""
|
220 |
+
CosBucket = self.bucket if bucket is None else bucket
|
221 |
+
try:
|
222 |
+
# 将本类的Debug继承给抛弃了
|
223 |
+
# self.debug_print(f"Download from {CosBucket}", font_color="blue")
|
224 |
+
obj = self.client.get_object(
|
225 |
+
Bucket=CosBucket,
|
226 |
+
Key=key
|
227 |
+
)
|
228 |
+
if if_read is True:
|
229 |
+
data = obj["Body"].get_raw_stream().read() # byte
|
230 |
+
return data
|
231 |
+
else:
|
232 |
+
return obj
|
233 |
+
except Exception as e:
|
234 |
+
print(f"\033[31m下载失败! 错误描述:{e}\033[0m")
|
235 |
+
return None
|
236 |
+
|
237 |
+
def showFileList_COS_base(self, key, bucket, marker:str=""):
|
238 |
+
"""
|
239 |
+
返回cos存储桶内部的某个文件夹的内部名称
|
240 |
+
:param key: cos云端的存储路径
|
241 |
+
:param bucket: cos存储桶名称,如果没指定名称(None)就会寻找默认的存储桶
|
242 |
+
:param marker: 标记,用于记录上次查询到哪里了
|
243 |
+
ps:如果需要修改默认的存储桶配置,请在代码运行的时候加入代码 s.bucket = 存储桶名称 (s是对象实例)
|
244 |
+
返回的内容存储在response["Content"],不过返回的��据大小是有限制的,具体内容还是请看官方文档。
|
245 |
+
"""
|
246 |
+
response = self.client.list_objects(
|
247 |
+
Bucket=bucket,
|
248 |
+
Prefix=key,
|
249 |
+
Marker=marker
|
250 |
+
)
|
251 |
+
return response
|
252 |
+
|
253 |
+
def showFileList_COS(self, key, bucket:str=None)->list:
|
254 |
+
"""
|
255 |
+
实现查询存储桶中所有对象的操作,因为cos的sdk有返回数据包大小的限制,所以我们需要进行一定的改动
|
256 |
+
"""
|
257 |
+
marker = ""
|
258 |
+
file_list = []
|
259 |
+
CosBucket = self.bucket if bucket is None else bucket
|
260 |
+
while True: # 轮询
|
261 |
+
response = self.showFileList_COS_base(key, CosBucket, marker)
|
262 |
+
try:
|
263 |
+
file_list.extend(response["Contents"])
|
264 |
+
except KeyError as e:
|
265 |
+
print(e)
|
266 |
+
raise
|
267 |
+
if response['IsTruncated'] == 'false': # 接下来没有数据了,就退出
|
268 |
+
break
|
269 |
+
marker = response['NextMarker']
|
270 |
+
return file_list
|
271 |
+
|
272 |
+
def uploadFile_COS(self, buffer, key, bucket:str=None):
|
273 |
+
"""
|
274 |
+
从COS上传数据,需要注意的是必须得是二进制文件
|
275 |
+
"""
|
276 |
+
CosBucket = self.bucket if bucket is None else bucket
|
277 |
+
try:
|
278 |
+
self.client.put_object(
|
279 |
+
Bucket=CosBucket,
|
280 |
+
Body=buffer,
|
281 |
+
Key=key
|
282 |
+
)
|
283 |
+
return True
|
284 |
+
except Exception as e:
|
285 |
+
print(e)
|
286 |
+
return False
|
287 |
+
|
288 |
+
|
289 |
+
class FuncDiary(CosConf):
|
290 |
+
filter_dict = {"60a5e13da00e6e0001fd53c8": "Cuny",
|
291 |
+
"612c290f3a9af4000170faad": "守望平凡",
|
292 |
+
"614de96e1259260001506d6c": "林泽毅-焕影一新"}
|
293 |
+
|
294 |
+
def __init__(self, func_name: str, uid: str, error_conf_path: str = f"{local_path_}/conf/func_error_conf.json"):
|
295 |
+
"""
|
296 |
+
日志类的实例化
|
297 |
+
Args:
|
298 |
+
func_name: 功能名称,影响了日志投递的路径
|
299 |
+
"""
|
300 |
+
super().__init__()
|
301 |
+
# 配置文件路径,默认是函数运行的路径下的conf文件夹
|
302 |
+
self.service_path: str = os.path.join(os.path.dirname(error_conf_path), "service_config.json")
|
303 |
+
self.error_dict = self.load_json(path=error_conf_path)
|
304 |
+
self.__up: str = f"wx/invokeFunction_c/{datetime.datetime.now().strftime('%Y/%m/%d/%H')}/{func_name}/"
|
305 |
+
self.func_name: str = func_name
|
306 |
+
# 下面这个属性是的日志名称的前缀
|
307 |
+
self.__start_time = datetime.datetime.now().timestamp()
|
308 |
+
h_point = datetime.datetime.strptime(datetime.datetime.now().strftime('%Y/%m/%d/%H'), '%Y/%m/%d/%H')
|
309 |
+
h_point_timestamp = h_point.timestamp()
|
310 |
+
self.__prefix = int(self.__start_time - h_point_timestamp).__str__() + "_"
|
311 |
+
self.__uid = uid
|
312 |
+
self.__diary = None
|
313 |
+
|
314 |
+
def __str__(self):
|
315 |
+
return f"<{self.func_name}> DIARY for {self.__uid}"
|
316 |
+
|
317 |
+
@property
|
318 |
+
def content(self):
|
319 |
+
return self.__diary
|
320 |
+
|
321 |
+
@content.setter
|
322 |
+
def content(self, value: str):
|
323 |
+
if not isinstance(value, dict):
|
324 |
+
raise TypeError("content 只能是字典!")
|
325 |
+
if "status" in value:
|
326 |
+
raise KeyError("status字段已被默认占用,请在日志信息中更换字段名称!")
|
327 |
+
if self.__diary is None:
|
328 |
+
self.__diary = value
|
329 |
+
else:
|
330 |
+
raise PermissionError("为了减小日志对整体代码的影响,<content>只能被覆写一次!")
|
331 |
+
|
332 |
+
def uploadDiary_COS(self, status_id: str, suffix: str = "", bucket: str = "hy-hcy-data-logs-1306602019"):
|
333 |
+
if self.__diary is None:
|
334 |
+
self.__diary = {"status": self.error_dict[status_id]}
|
335 |
+
if status_id == "0000":
|
336 |
+
self.__up += f"True/{self.__uid}/"
|
337 |
+
else:
|
338 |
+
self.__up += f"False/{self.__uid}/"
|
339 |
+
interval = int(10 * (datetime.datetime.now().timestamp() - self.__start_time))
|
340 |
+
prefix = self.__prefix + status_id + "_" + interval.__str__()
|
341 |
+
self.__diary["status"] = self.error_dict[status_id]
|
342 |
+
name = prefix + "_" + suffix if len(suffix) != 0 else prefix
|
343 |
+
self.uploadFile_COS(buffer=json.dumps(self.__diary), key=self.__up + name, bucket=bucket)
|
344 |
+
print(f"{self}上传成功.")
|
345 |
+
|
346 |
+
|
347 |
+
class ResponseWebSocket(CosConf):
|
348 |
+
# 网关推送地址
|
349 |
+
__HOST:str = None
|
350 |
+
@property
|
351 |
+
def sendBackHost(self):
|
352 |
+
if self.__HOST is None:
|
353 |
+
self.__HOST = self.get_key("HOST")
|
354 |
+
return self.__HOST
|
355 |
+
|
356 |
+
@sendBackHost.setter
|
357 |
+
def sendBackHost(self, value):
|
358 |
+
self.__HOST = value
|
359 |
+
|
360 |
+
def sendMsg_toWebSocket(self, message,connectionID:str = None):
|
361 |
+
if connectionID is not None:
|
362 |
+
retmsg = {'websocket': {}}
|
363 |
+
retmsg['websocket']['action'] = "data send"
|
364 |
+
retmsg['websocket']['secConnectionID'] = connectionID
|
365 |
+
retmsg['websocket']['dataType'] = 'text'
|
366 |
+
retmsg['websocket']['data'] = json.dumps(message)
|
367 |
+
requests.post(self.sendBackHost, json=retmsg)
|
368 |
+
print("send success!")
|
369 |
+
else:
|
370 |
+
pass
|
371 |
+
|
372 |
+
@staticmethod
|
373 |
+
def create_Msg(status, msg):
|
374 |
+
"""
|
375 |
+
本方法用于创建一个用于发送到WebSocket客户端的数据
|
376 |
+
输入的信息部分,需要有如下几个参数:
|
377 |
+
1. id,固定为"return-result"
|
378 |
+
2. status,如果输入为1则status=true, 如果输入为-1则status=false
|
379 |
+
3. obj_key, 图片的云端路径, 这是输入的msg本身自带的
|
380 |
+
"""
|
381 |
+
msg['status'] = "false" if status == -1 else 'true' # 其实最好还是用bool
|
382 |
+
msg['id'] = "async-back-msg"
|
383 |
+
msg['type'] = "funcType"
|
384 |
+
msg["format"] = "imageType"
|
385 |
+
return msg
|
386 |
+
|
387 |
+
|
388 |
+
# 功能服务类
|
389 |
+
class Service(ResponseWebSocket):
|
390 |
+
"""
|
391 |
+
服务的主函数,封装了cos上传/下载功能以及与api网关的一键通讯
|
392 |
+
将类的实例变成一个可被调用的对象,在服务运行的时候,只需要运行该对象即可
|
393 |
+
当然,因为是类,所以支持继承和修改
|
394 |
+
"""
|
395 |
+
@classmethod
|
396 |
+
def process(cls, *args, **kwargs):
|
397 |
+
"""
|
398 |
+
处理函数,在使用的时候请将之重构
|
399 |
+
"""
|
400 |
+
pass
|
401 |
+
|
402 |
+
@classmethod
|
403 |
+
def __call__(cls, *args, **kwargs):
|
404 |
+
pass
|
405 |
+
|
406 |
+
|