diff --git a/.gitattributes b/.gitattributes
index a6344aac8c09253b3b630fb776ae94478aa0275b..817437959b7a75003670b78762dda3d4d2e7e645 100644
--- a/.gitattributes
+++ b/.gitattributes
@@ -1,35 +1,6 @@
-*.7z filter=lfs diff=lfs merge=lfs -text
-*.arrow filter=lfs diff=lfs merge=lfs -text
-*.bin filter=lfs diff=lfs merge=lfs -text
-*.bz2 filter=lfs diff=lfs merge=lfs -text
-*.ckpt filter=lfs diff=lfs merge=lfs -text
-*.ftz filter=lfs diff=lfs merge=lfs -text
-*.gz filter=lfs diff=lfs merge=lfs -text
-*.h5 filter=lfs diff=lfs merge=lfs -text
-*.joblib filter=lfs diff=lfs merge=lfs -text
-*.lfs.* filter=lfs diff=lfs merge=lfs -text
-*.mlmodel filter=lfs diff=lfs merge=lfs -text
-*.model filter=lfs diff=lfs merge=lfs -text
-*.msgpack filter=lfs diff=lfs merge=lfs -text
-*.npy filter=lfs diff=lfs merge=lfs -text
-*.npz filter=lfs diff=lfs merge=lfs -text
-*.onnx filter=lfs diff=lfs merge=lfs -text
-*.ot filter=lfs diff=lfs merge=lfs -text
-*.parquet filter=lfs diff=lfs merge=lfs -text
-*.pb filter=lfs diff=lfs merge=lfs -text
-*.pickle filter=lfs diff=lfs merge=lfs -text
-*.pkl filter=lfs diff=lfs merge=lfs -text
-*.pt filter=lfs diff=lfs merge=lfs -text
-*.pth filter=lfs diff=lfs merge=lfs -text
-*.rar filter=lfs diff=lfs merge=lfs -text
-*.safetensors filter=lfs diff=lfs merge=lfs -text
-saved_model/**/* filter=lfs diff=lfs merge=lfs -text
-*.tar.* filter=lfs diff=lfs merge=lfs -text
-*.tar filter=lfs diff=lfs merge=lfs -text
-*.tflite filter=lfs diff=lfs merge=lfs -text
-*.tgz filter=lfs diff=lfs merge=lfs -text
-*.wasm filter=lfs diff=lfs merge=lfs -text
-*.xz filter=lfs diff=lfs merge=lfs -text
-*.zip filter=lfs diff=lfs merge=lfs -text
-*.zst filter=lfs diff=lfs merge=lfs -text
-*tfevents* filter=lfs diff=lfs merge=lfs -text
+hivision_modnet.onnx filter=lfs diff=lfs merge=lfs -text
+hivision_modnet.onnx.1 filter=lfs diff=lfs merge=lfs -text
+hivisionai/hycv/mtcnn_onnx/weights/onet.onnx filter=lfs diff=lfs merge=lfs -text
+hivisionai/hycv/mtcnn_onnx/weights/pnet.onnx filter=lfs diff=lfs merge=lfs -text
+hivisionai/hycv/mtcnn_onnx/weights/rnet.onnx filter=lfs diff=lfs merge=lfs -text
+sources/demoImage.png filter=lfs diff=lfs merge=lfs -text
diff --git a/.gitignore b/.gitignore
new file mode 100644
index 0000000000000000000000000000000000000000..7e99e367f8443d86e5e8825b9fda39dfbb39630d
--- /dev/null
+++ b/.gitignore
@@ -0,0 +1 @@
+*.pyc
\ No newline at end of file
diff --git a/EulerZ.py b/EulerZ.py
new file mode 100644
index 0000000000000000000000000000000000000000..2e0bbaf5c081ea35aa56e2cc7e60cd58105ea3a0
--- /dev/null
+++ b/EulerZ.py
@@ -0,0 +1,51 @@
+"""
+@author: cuny
+@file: EulerX.py
+@time: 2022/4/1 13:54
+@description:
+寻找三维z轴旋转角roll,实现:
+1. 输入一张三通道图片(四通道、单通道将默认转为三通道)
+2. 输出人脸在x轴的转角roll,顺时针为正方向,角度制
+"""
+import cv2
+import numpy as np
+from math import asin, pi # -pi/2 ~ pi/2
+
+
+# 获得人脸的关键点信息
+def get_facePoints(src: np.ndarray, fd68):
+ if len(src.shape) == 2:
+ src = cv2.cvtColor(src, cv2.COLOR_GRAY2BGR)
+ elif src.shape[2] == 4:
+ src = cv2.cvtColor(src, cv2.COLOR_BGRA2BGR)
+ status, dets, landmarks, _ = fd68.facePointsEuler(src)
+
+ if status == 0:
+ return 0, None, None
+ elif status == 2:
+ return 2, None, None
+ else:
+ return 1, dets, np.fliplr(landmarks)
+
+
+def eulerZ(landmark: np.matrix):
+ # 我们规定顺时针为正方向
+ def get_pi_2(r):
+ pi_2 = pi / 2.
+ if r >= 0.0:
+ return pi_2
+ else:
+ return -pi_2
+ orbit_points = np.array([[landmark[21, 0], landmark[21, 1]], [landmark[71, 0], landmark[71, 1]],
+ [landmark[25, 0], landmark[25, 1]], [landmark[67, 0], landmark[67, 1]]])
+ # [[cos a],[sin a],[point_x],[point_y]]
+ # 前面两项是有关直线与Y正半轴夹角a的三角函数,所以对于眼睛部分来讲sin a应该接近1
+ # "我可以认为"cv2.fitLine的y轴正方向为竖直向下,且生成的拟合直线的方向为从起点指向终点
+ # 与y轴的夹角为y轴夹角与直线方向的夹角,方向从y指向直线,逆时针为正方向
+ # 所以最后对于鼻梁的计算结果需要取个负号
+ orbit_line = cv2.fitLine(orbit_points, cv2.DIST_L2, 0, 0.01, 0.01)
+ orbit_a = asin(orbit_line[1][0])
+ nose_points = np.array([[landmark[55, 0], landmark[55, 1]], [landmark[69, 0], landmark[69, 1]]])
+ nose_line = cv2.fitLine(nose_points, cv2.DIST_L2, 0, 0.01, 0.01)
+ nose_a = asin(nose_line[1][0])
+ return (orbit_a + nose_a) * (180.0 / (2 * pi))
diff --git a/README.md b/README.md
index 0ce392b537c27f5509f79bbff4870596d932ac64..9aac129ea26334d372fbe233c0631a32c2a342f7 100644
--- a/README.md
+++ b/README.md
@@ -1,12 +1,133 @@
---
title: HivisionIDPhotos
-emoji: ⚡
-colorFrom: yellow
-colorTo: red
-sdk: gradio
-sdk_version: 3.42.0
app_file: app.py
-pinned: false
+sdk: gradio
+sdk_version: 3.40.1
+---
+
+
HivisionIDPhoto
+
+
+English / [中文](README_CN.md)
+
+[![GitHub](https://img.shields.io/static/v1?label=Github&message=GitHub&color=black)](https://github.com/xiaolin199912/HivisionIDPhotos)
+[![SwanHub Demo](https://swanhub.co/git/repo/SwanHub%2FAuto-README/file/preview?ref=main&path=swanhub.svg)](https://swanhub.co/ZeYiLin/HivisionIDPhotos/demo)
+[![zhihu](https://img.shields.io/static/v1?label=知乎&message=zhihu&color=blue)](https://zhuanlan.zhihu.com/p/638254028)
+
+
+
+
+
+
+# 🤩Project Update
+
+- Online Demo: [![SwanHub Demo](https://swanhub.co/git/repo/SwanHub%2FAuto-README/file/preview?ref=main&path=swanhub.svg)](https://swanhub.co/ZeYiLin/HivisionIDPhotos/demo)
+- 2023.7.15: A Python library is planned for release!
+- 2023.6.20: Updated **Forecast Size Selection**
+- 2023.6.19: Updated **Layout photo**
+- 2023.6.13: Updated **center gradient color**
+- 2023.6.11: Updated **top and bottom gradient color**
+- 2023.6.8: Updated **custom size**
+- 2023.6.4: Updated **custom background color and face detection bug notification**
+- 2023.5.10: Updated **change background without changing size**
+
+
+
+# Overview
+
+> 🚀Thank you for your interest in our work. You may also want to check out our other achievements in the field of image processing. Please feel free to contact us at zeyi.lin@swanhub.co.
+
+HivisionIDPhoto aims to develop a practical intelligent algorithm for producing ID photos. It uses a complete set of model workflows to recognize various user photo scenarios, perform image segmentation, and generate ID photos.
+
+**HivisionIDPhoto can:**
+
+1. Perform lightweight image segmentation
+2. Generate standard ID photos and six-inch layout photos according to different size specifications
+3. Provide beauty features (waiting)
+4. Provide intelligent formal wear replacement (waiting)
+
+
+
+
+
+
---
-Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
+If HivisionIDPhoto is helpful to you, please star this repo or recommend it to your friends to solve the problem of emergency ID photo production!
+
+
+# 🔧Environment Dependencies and Installation
+
+- Python >= 3.7 (Recommend to use [Anaconda](https://www.anaconda.com/download/#linux) or [Miniconda](https://docs.conda.io/en/latest/miniconda.html))
+- onnxruntime
+- OpenCV
+- Option: Linux, Windows, MacOS
+
+### Installation
+
+1. Clone repo
+
+```bash
+git lfs install && git clone https://swanhub.co/ZeYiLin/HivisionIDPhotos.git
+cd HivisionIDPhotos
+```
+
+2. Install dependent packages
+
+```
+pip install numpy
+pip install opencv-python
+pip install onnxruntime
+pip install gradio
+```
+
+
+
+# ⚡️Quick Inference
+
+### 1. Download Pre-trained Models
+
+**SwanHub:**
+
+The model and code are downloaded through git-lfs.
+
+```
+git lfs install
+git clone https://swanhub.co/ZeYiLin/HivisionIDPhotos.git
+```
+
+**GitHub:**
+
+```
+git clone https://github.com/xiaolin199912/HivisionIDPhotos.git
+```
+
+| Model | Parameters | Dir | Download Link |
+| :------------------: | :--------: | :-------: | :----------------------------------------------------------: |
+| hivision_modnet.onnx | 25 M | `./` | [Download](https://github.com/xiaolin199912/HivisionIDPhotos/releases/download/pretrained-model/hivision_modnet.onnx) |
+
+### **2. Inference!**
+
+Run a Gradio Demo:
+
+```
+python app.py
+```
+
+Running the program will generate a local web page where you can complete ID photo operations and interactions.
+
+
+# Reference Projects
+
+1. MTCNN: https://github.com/ipazc/mtcnn
+2. MTCNN-ONNX:https://swanhub.co/ZeYiLin/MTCNN-ONNX
+3. ModNet: https://github.com/ZHKKKe/MODNet
+
+
+# 📧Contact
+
+If you have any questions, please email Zeyi.lin@swanhub.co
+
+
+Copyright © 2023, ZeYiLin. All Rights Reserved.
+
diff --git a/README_CN.md b/README_CN.md
new file mode 100644
index 0000000000000000000000000000000000000000..d0d8dbe7334c53af04b9b937072252bcf21d2242
--- /dev/null
+++ b/README_CN.md
@@ -0,0 +1,102 @@
+
+
HivisionIDPhoto
+
+[English](README.md) / 中文
+
+[![GitHub](https://img.shields.io/static/v1?label=GitHub&message=GitHub&color=black)](https://github.com/xiaolin199912/HivisionIDPhotos)
+[![SwanHub Demo](https://img.shields.io/static/v1?label=在线体验&message=SwanHub%20Demo&color=blue)](https://swanhub.co/ZeYiLin/HivisionIDPhotos/demo)
+[![zhihu](https://img.shields.io/static/v1?label=知乎&message=知乎&color=blue)](https://zhuanlan.zhihu.com/p/638254028)
+
+
+
+
+
+# 🤩项目更新
+- 在线体验: [![SwanHub Demo](https://img.shields.io/static/v1?label=Demo&message=SwanHub%20Demo&color=blue)](https://swanhub.co/ZeYiLin/HivisionIDPhotos/demo)
+- 2023.6.20: 更新**预设尺寸菜单**
+- 2023.6.19: 更新**排版照**
+- 2023.6.13: 更新**中心渐变色**
+- 2023.6.11: 更新**上下渐变色**
+- 2023.6.8: 更新**自定义尺寸**
+- 2023.6.4: 更新**自定义底色、人脸检测Bug通知**
+- 2023.5.10: 更新**不改尺寸只换底**
+
+# Overview
+
+> 🚀谢谢你对我们的工作感兴趣。您可能还想查看我们在图像领域的其他成果,欢迎来信:zeyi.lin@swanhub.co.
+
+HivisionIDPhoto旨在开发一种实用的证件照智能制作算法。
+
+它利用一套完善的模型工作流程,实现对多种用户拍照场景的识别、抠图与证件照生成。
+
+
+**HivisionIDPhoto可以做到:**
+
+1. 轻量级抠图
+2. 根据不同尺寸规格生成不同的标准证件照、六寸排版照
+3. 美颜(waiting)
+4. 智能换正装(waiting)
+
+
+
+
+
+
+---
+
+如果HivisionIDPhoto对你有帮助,请star这个repo或推荐给你的朋友,解决证件照应急制作问题!
+
+
+# 🔧环境安装与依赖
+
+- Python >= 3.7 (Recommend to use [Anaconda](https://www.anaconda.com/download/#linux) or [Miniconda](https://docs.conda.io/en/latest/miniconda.html))
+- onnxruntime
+- OpenCV
+- Option: Linux, Windows, MacOS
+
+### Installation
+
+1. Clone repo
+
+```bash
+git lfs install && git clone https://swanhub.co/ZeYiLin/HivisionIDPhotos.git
+cd HivisionIDPhotos
+```
+
+2. Install dependent packages
+
+```
+pip install numpy
+pip install opencv-python
+pip install onnxruntime
+pip install gradio
+```
+
+
+
+# ⚡️快速推理
+
+模型与代码通过git-lfs下载。
+
+```
+git lfs install
+git clone https://swanhub.co/ZeYiLin/HivisionIDPhotos.git
+```
+
+**推理!**
+
+```
+python app.py
+```
+
+运行程序将生成一个本地Web页面,在页面中可完成证件照的操作与交互。
+
+
+# 引用项目
+
+1. MTCNN: https://github.com/ipazc/mtcnn
+2. ModNet: https://github.com/ZHKKKe/MODNet
+
+# 📧联系我们
+
+如果您有任何问题,请发邮件至 zeyi.lin@swanhub.co
\ No newline at end of file
diff --git a/__pycache__/EulerZ.cpython-38.pyc b/__pycache__/EulerZ.cpython-38.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..df782a99064fd14f74d092386d62c4a6e7ac15e6
Binary files /dev/null and b/__pycache__/EulerZ.cpython-38.pyc differ
diff --git a/__pycache__/error.cpython-38.pyc b/__pycache__/error.cpython-38.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..eaf2ed0bbe67b2f98eb986076df464ce131fd124
Binary files /dev/null and b/__pycache__/error.cpython-38.pyc differ
diff --git a/__pycache__/face_judgement_align.cpython-38.pyc b/__pycache__/face_judgement_align.cpython-38.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..f560da716eed57725c9e7f77768e0c8cf10ad90e
Binary files /dev/null and b/__pycache__/face_judgement_align.cpython-38.pyc differ
diff --git a/__pycache__/imageTransform.cpython-38.pyc b/__pycache__/imageTransform.cpython-38.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..7bc9908d0631b596a802ba958580645c69906ce7
Binary files /dev/null and b/__pycache__/imageTransform.cpython-38.pyc differ
diff --git a/__pycache__/layoutCreate.cpython-38.pyc b/__pycache__/layoutCreate.cpython-38.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..77ad1967ca7c1857aa045287dc893decb39c9965
Binary files /dev/null and b/__pycache__/layoutCreate.cpython-38.pyc differ
diff --git a/__pycache__/move_image.cpython-38.pyc b/__pycache__/move_image.cpython-38.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..991679cb8ba01ed9cf495a0d4013c486f66f606e
Binary files /dev/null and b/__pycache__/move_image.cpython-38.pyc differ
diff --git a/app.py b/app.py
new file mode 100644
index 0000000000000000000000000000000000000000..34653567d7ba47d16e9aa2bd977b6a01096c94a3
--- /dev/null
+++ b/app.py
@@ -0,0 +1,216 @@
+import cv2
+import gradio as gr
+import onnxruntime
+from face_judgement_align import IDphotos_create
+from hivisionai.hycv.vision import add_background
+from layoutCreate import generate_layout_photo, generate_layout_image
+import pathlib
+import numpy as np
+
+size_list_dict = {"一寸": (413, 295), "二寸": (626, 413),
+ "教师资格证": (413, 295), "国家公务员考试": (413, 295), "初级会计考试": (413, 295)}
+color_list_dict = {"蓝色": (86, 140, 212), "白色": (255, 255, 255), "红色": (233, 51, 35)}
+
+
+# 设置Gradio examples
+def set_example_image(example: list) -> dict:
+ return gr.Image.update(value=example[0])
+
+
+# 检测RGB是否超出范围,如果超出则约束到0~255之间
+def range_check(value, min_value=0, max_value=255):
+ value = int(value)
+ if value <= min_value:
+ value = min_value
+ elif value > max_value:
+ value = max_value
+ return value
+
+
+def idphoto_inference(input_image,
+ mode_option,
+ size_list_option,
+ color_option,
+ render_option,
+ custom_color_R,
+ custom_color_G,
+ custom_color_B,
+ custom_size_height,
+ custom_size_width,
+ head_measure_ratio=0.2,
+ head_height_ratio=0.45,
+ top_distance_max=0.12,
+ top_distance_min=0.10):
+ idphoto_json = {
+ "size_mode": mode_option,
+ "color_mode": color_option,
+ "render_mode": render_option,
+ }
+
+ # 如果尺寸模式选择的是尺寸列表
+ if idphoto_json["size_mode"] == "尺寸列表":
+ idphoto_json["size"] = size_list_dict[size_list_option]
+ # 如果尺寸模式选择的是自定义尺寸
+ elif idphoto_json["size_mode"] == "自定义尺寸":
+ id_height = int(custom_size_height)
+ id_width = int(custom_size_width)
+ if id_height < id_width or min(id_height, id_width) < 100 or max(id_height, id_width) > 1800:
+ return {
+ img_output_standard: gr.update(value=None),
+ img_output_standard_hd: gr.update(value=None),
+ notification: gr.update(value="宽度应不大于长度;长宽不应小于100,大于1800", visible=True)}
+ idphoto_json["size"] = (id_height, id_width)
+ else:
+ idphoto_json["size"] = (None, None)
+
+ # 如果颜色模式选择的是自定义底色
+ if idphoto_json["color_mode"] == "自定义底色":
+ idphoto_json["color_bgr"] = (range_check(custom_color_R),
+ range_check(custom_color_G),
+ range_check(custom_color_B))
+ else:
+ idphoto_json["color_bgr"] = color_list_dict[color_option]
+
+ result_image_hd, result_image_standard, typography_arr, typography_rotate, \
+ _, _, _, _, status = IDphotos_create(input_image,
+ mode=idphoto_json["size_mode"],
+ size=idphoto_json["size"],
+ head_measure_ratio=head_measure_ratio,
+ head_height_ratio=head_height_ratio,
+ align=False,
+ beauty=False,
+ fd68=None,
+ human_sess=sess,
+ IS_DEBUG=False,
+ top_distance_max=top_distance_max,
+ top_distance_min=top_distance_min)
+
+ # 如果检测到人脸数量不等于1
+ if status == 0:
+ result_messgae = {
+ img_output_standard: gr.update(value=None),
+ img_output_standard_hd: gr.update(value=None),
+ notification: gr.update(value="人脸数量不等于1", visible=True)
+ }
+
+ # 如果检测到人脸数量等于1
+ else:
+ if idphoto_json["render_mode"] == "纯色":
+ result_image_standard = np.uint8(
+ add_background(result_image_standard, bgr=idphoto_json["color_bgr"]))
+ result_image_hd = np.uint8(add_background(result_image_hd, bgr=idphoto_json["color_bgr"]))
+ elif idphoto_json["render_mode"] == "上下渐变(白)":
+ result_image_standard = np.uint8(
+ add_background(result_image_standard, bgr=idphoto_json["color_bgr"], mode="updown_gradient"))
+ result_image_hd = np.uint8(
+ add_background(result_image_hd, bgr=idphoto_json["color_bgr"], mode="updown_gradient"))
+ else:
+ result_image_standard = np.uint8(
+ add_background(result_image_standard, bgr=idphoto_json["color_bgr"], mode="center_gradient"))
+ result_image_hd = np.uint8(
+ add_background(result_image_hd, bgr=idphoto_json["color_bgr"], mode="center_gradient"))
+
+ if idphoto_json["size_mode"] == "只换底":
+ result_layout_image = gr.update(visible=False)
+ else:
+ typography_arr, typography_rotate = generate_layout_photo(input_height=idphoto_json["size"][0],
+ input_width=idphoto_json["size"][1])
+
+ result_layout_image = generate_layout_image(result_image_standard, typography_arr,
+ typography_rotate,
+ height=idphoto_json["size"][0],
+ width=idphoto_json["size"][1])
+
+ result_messgae = {
+ img_output_standard: result_image_standard,
+ img_output_standard_hd: result_image_hd,
+ img_output_layout: result_layout_image,
+ notification: gr.update(visible=False)}
+
+ return result_messgae
+
+
+if __name__ == "__main__":
+ HY_HUMAN_MATTING_WEIGHTS_PATH = "./hivision_modnet.onnx"
+ sess = onnxruntime.InferenceSession(HY_HUMAN_MATTING_WEIGHTS_PATH)
+ size_mode = ["尺寸列表", "只换底", "自定义尺寸"]
+ size_list = ["一寸", "二寸", "教师资格证", "国家公务员考试", "初级会计考试"]
+ colors = ["蓝色", "白色", "红色", "自定义底色"]
+ render = ["纯色", "上下渐变(白)", "中心渐变(白)"]
+
+ title = "HivisionIDPhotos
"
+ description = "😎6.20更新:新增尺寸选择列表
"
+ css = '''
+ h1#title, h3 {
+ text-align: center;
+ }
+ '''
+
+ demo = gr.Blocks(css=css)
+
+ with demo:
+ gr.Markdown(title)
+ gr.Markdown(description)
+ with gr.Row():
+ with gr.Column():
+ img_input = gr.Image().style(height=350)
+ mode_options = gr.Radio(choices=size_mode, label="证件照尺寸选项", value="尺寸列表", elem_id="size")
+ # 预设尺寸下拉菜单
+ with gr.Row(visible=True) as size_list_row:
+ size_list_options = gr.Dropdown(choices=size_list, label="预设尺寸", value="一寸", elem_id="size_list")
+
+ with gr.Row(visible=False) as custom_size:
+ custom_size_height = gr.Number(value=413, label="height", interactive=True)
+ custom_size_wdith = gr.Number(value=295, label="width", interactive=True)
+
+ color_options = gr.Radio(choices=colors, label="背景色", value="蓝色", elem_id="color")
+ with gr.Row(visible=False) as custom_color:
+ custom_color_R = gr.Number(value=0, label="R", interactive=True)
+ custom_color_G = gr.Number(value=0, label="G", interactive=True)
+ custom_color_B = gr.Number(value=0, label="B", interactive=True)
+
+ render_options = gr.Radio(choices=render, label="渲染方式", value="纯色", elem_id="render")
+
+ img_but = gr.Button('开始制作')
+ # 案例图片
+ example_images = gr.Dataset(components=[img_input],
+ samples=[[path.as_posix()]
+ for path in sorted(pathlib.Path('images').rglob('*.jpg'))])
+
+ with gr.Column():
+ notification = gr.Text(label="状态", visible=False)
+ with gr.Row():
+ img_output_standard = gr.Image(label="标准照").style(height=350)
+ img_output_standard_hd = gr.Image(label="高清照").style(height=350)
+ img_output_layout = gr.Image(label="六寸排版照").style(height=350)
+
+
+ def change_color(colors):
+ if colors == "自定义底色":
+ return {custom_color: gr.update(visible=True)}
+ else:
+ return {custom_color: gr.update(visible=False)}
+
+ def change_size_mode(size_option_item):
+ if size_option_item == "自定义尺寸":
+ return {custom_size: gr.update(visible=True),
+ size_list_row: gr.update(visible=False)}
+ elif size_option_item == "只换底":
+ return {custom_size: gr.update(visible=False),
+ size_list_row: gr.update(visible=False)}
+ else:
+ return {custom_size: gr.update(visible=False),
+ size_list_row: gr.update(visible=True)}
+
+ color_options.input(change_color, inputs=[color_options], outputs=[custom_color])
+ mode_options.input(change_size_mode, inputs=[mode_options], outputs=[custom_size, size_list_row])
+
+ img_but.click(idphoto_inference,
+ inputs=[img_input, mode_options, size_list_options, color_options, render_options,
+ custom_color_R, custom_color_G, custom_color_B,
+ custom_size_height, custom_size_wdith],
+ outputs=[img_output_standard, img_output_standard_hd, img_output_layout, notification],
+ queue=True)
+ example_images.click(fn=set_example_image, inputs=[example_images], outputs=[img_input])
+
+ demo.launch(enable_queue=True)
diff --git a/beautyPlugin/GrindSkin.py b/beautyPlugin/GrindSkin.py
new file mode 100644
index 0000000000000000000000000000000000000000..66b938b40418f9622f6879d6068bc01a4abe51c6
--- /dev/null
+++ b/beautyPlugin/GrindSkin.py
@@ -0,0 +1,43 @@
+"""
+@author: cuny
+@file: GrindSkin.py
+@time: 2022/7/2 14:44
+@description:
+磨皮算法
+"""
+import cv2
+import numpy as np
+
+
+def grindSkin(src, grindDegree: int = 3, detailDegree: int = 1, strength: int = 9):
+ """
+ Dest =(Src * (100 - Opacity) + (Src + 2 * GaussBlur(EPFFilter(Src) - Src)) * Opacity) /100
+ 人像磨皮方案,后续会考虑使用一些皮肤区域检测算法来实现仅皮肤区域磨皮,增加算法的精细程度——或者使用人脸关键点
+ https://www.cnblogs.com/Imageshop/p/4709710.html
+ Args:
+ src: 原图
+ grindDegree: 磨皮程度调节参数
+ detailDegree: 细节程度调节参数
+ strength: 融合程度,作为磨皮强度(0 - 10)
+
+ Returns:
+ 磨皮后的图像
+ """
+ if strength <= 0:
+ return src
+ dst = src.copy()
+ opacity = min(10., strength) / 10.
+ dx = grindDegree * 5 # 双边滤波参数之一
+ fc = grindDegree * 12.5 # 双边滤波参数之一
+ temp1 = cv2.bilateralFilter(src[:, :, :3], dx, fc, fc)
+ temp2 = cv2.subtract(temp1, src[:, :, :3])
+ temp3 = cv2.GaussianBlur(temp2, (2 * detailDegree - 1, 2 * detailDegree - 1), 0)
+ temp4 = cv2.add(cv2.add(temp3, temp3), src[:, :, :3])
+ dst[:, :, :3] = cv2.addWeighted(temp4, opacity, src[:, :, :3], 1 - opacity, 0.0)
+ return dst
+
+
+if __name__ == "__main__":
+ input_image = cv2.imread("test_image/7.jpg")
+ output_image = grindSkin(src=input_image)
+ cv2.imwrite("grindSkinCompare.png", np.hstack((input_image, output_image)))
diff --git a/beautyPlugin/MakeBeautiful.py b/beautyPlugin/MakeBeautiful.py
new file mode 100644
index 0000000000000000000000000000000000000000..ec83777fa4edd04858eb0660cfdb168367ab4f5b
--- /dev/null
+++ b/beautyPlugin/MakeBeautiful.py
@@ -0,0 +1,45 @@
+"""
+@author: cuny
+@file: MakeBeautiful.py
+@time: 2022/7/7 20:23
+@description:
+美颜工具集合文件,作为暴露在外的插件接口
+"""
+from .GrindSkin import grindSkin
+from .MakeWhiter import MakeWhiter
+from .ThinFace import thinFace
+import numpy as np
+
+
+def makeBeautiful(input_image: np.ndarray,
+ landmark,
+ thinStrength: int,
+ thinPlace: int,
+ grindStrength: int,
+ whiterStrength: int
+ ) -> np.ndarray:
+ """
+ 美颜工具的接口函数,用于实现美颜效果
+ Args:
+ input_image: 输入的图像
+ landmark: 瘦脸需要的人脸关键点信息,为fd68返回的第二个参数
+ thinStrength: 瘦脸强度,为0-10(如果更高其实也没什么问题),当强度为0或者更低时,则不瘦脸
+ thinPlace: 选择瘦脸区域,为0-2之间的值,越大瘦脸的点越靠下
+ grindStrength: 磨皮强度,为0-10(如果更高其实也没什么问题),当强度为0或者更低时,则不磨皮
+ whiterStrength: 美白强度,为0-10(如果更高其实也没什么问题),当强度为0或者更低时,则不美白
+ Returns:
+ output_image 输出图像
+ """
+ try:
+ _, _, _ = input_image.shape
+ except ValueError:
+ raise TypeError("输入图像必须为3通道或者4通道!")
+ # 三通道或者四通道图像
+ # 首先进行瘦脸
+ input_image = thinFace(input_image, landmark, place=thinPlace, strength=thinStrength)
+ # 其次进行磨皮
+ input_image = grindSkin(src=input_image, strength=grindStrength)
+ # 最后进行美白
+ makeWhiter = MakeWhiter()
+ input_image = makeWhiter.run(input_image, strength=whiterStrength)
+ return input_image
diff --git a/beautyPlugin/MakeWhiter.py b/beautyPlugin/MakeWhiter.py
new file mode 100644
index 0000000000000000000000000000000000000000..65d17c8fa3d4c6fec646fd37afc652c21183119f
--- /dev/null
+++ b/beautyPlugin/MakeWhiter.py
@@ -0,0 +1,108 @@
+"""
+@author: cuny
+@file: MakeWhiter.py
+@time: 2022/7/2 14:28
+@description:
+美白算法
+"""
+import os
+import cv2
+import math
+import numpy as np
+local_path = os.path.dirname(__file__)
+
+
+class MakeWhiter(object):
+ class __LutWhite:
+ """
+ 美白的内部类
+ """
+
+ def __init__(self, lut):
+ cube64rows = 8
+ cube64size = 64
+ cube256size = 256
+ cubeScale = int(cube256size / cube64size) # 4
+
+ reshapeLut = np.zeros((cube256size, cube256size, cube256size, 3))
+ for i in range(cube64size):
+ tmp = math.floor(i / cube64rows)
+ cx = int((i - tmp * cube64rows) * cube64size)
+ cy = int(tmp * cube64size)
+ cube64 = lut[cy:cy + cube64size, cx:cx + cube64size] # cube64 in lut(512*512 (512=8*64))
+ _rows, _cols, _ = cube64.shape
+ if _rows == 0 or _cols == 0:
+ continue
+ cube256 = cv2.resize(cube64, (cube256size, cube256size))
+ i = i * cubeScale
+ for k in range(cubeScale):
+ reshapeLut[i + k] = cube256
+ self.lut = reshapeLut
+
+ def imageInLut(self, src):
+ arr = src.copy()
+ bs = arr[:, :, 0]
+ gs = arr[:, :, 1]
+ rs = arr[:, :, 2]
+ arr[:, :] = self.lut[bs, gs, rs]
+ return arr
+
+ def __init__(self, lutImage: np.ndarray = None):
+ self.__lutWhiten = None
+ if lutImage is not None:
+ self.__lutWhiten = self.__LutWhite(lutImage)
+
+ def setLut(self, lutImage: np.ndarray):
+ self.__lutWhiten = self.__LutWhite(lutImage)
+
+ @staticmethod
+ def generate_identify_color_matrix(size: int = 512, channel: int = 3) -> np.ndarray:
+ """
+ 用于生成一张初始的查找表
+ Args:
+ size: 查找表尺寸,默认为512
+ channel: 查找表通道数,默认为3
+
+ Returns:
+ 返回生成的查找表图像
+ """
+ img = np.zeros((size, size, channel), dtype=np.uint8)
+ for by in range(size // 64):
+ for bx in range(size // 64):
+ for g in range(64):
+ for r in range(64):
+ x = r + bx * 64
+ y = g + by * 64
+ img[y][x][0] = int(r * 255.0 / 63.0 + 0.5)
+ img[y][x][1] = int(g * 255.0 / 63.0 + 0.5)
+ img[y][x][2] = int((bx + by * 8.0) * 255.0 / 63.0 + 0.5)
+ return cv2.cvtColor(img, cv2.COLOR_RGB2BGR).clip(0, 255).astype('uint8')
+
+ def run(self, src: np.ndarray, strength: int) -> np.ndarray:
+ """
+ 美白图像
+ Args:
+ src: 原图
+ strength: 美白强度,0 - 10
+ Returns:
+ 美白后的图像
+ """
+ dst = src.copy()
+ strength = min(10, int(strength)) / 10.
+ if strength <= 0:
+ return dst
+ self.setLut(cv2.imread(f"{local_path}/lut_image/3.png", -1))
+ _, _, c = src.shape
+ img = self.__lutWhiten.imageInLut(src[:, :, :3])
+ dst[:, :, :3] = cv2.addWeighted(src[:, :, :3], 1 - strength, img, strength, 0)
+ return dst
+
+
+if __name__ == "__main__":
+ # makeLut = MakeWhiter()
+ # cv2.imwrite("lutOrigin.png", makeLut.generate_identify_color_matrix())
+ input_image = cv2.imread("test_image/7.jpg", -1)
+ lut_image = cv2.imread("lut_image/3.png")
+ makeWhiter = MakeWhiter(lut_image)
+ output_image = makeWhiter.run(input_image, 10)
+ cv2.imwrite("makeWhiterCompare.png", np.hstack((input_image, output_image)))
diff --git a/beautyPlugin/ThinFace.py b/beautyPlugin/ThinFace.py
new file mode 100644
index 0000000000000000000000000000000000000000..3d229cce8ff024d3b1a0b034543e1363cacd832c
--- /dev/null
+++ b/beautyPlugin/ThinFace.py
@@ -0,0 +1,267 @@
+"""
+@author: cuny
+@file: ThinFace.py
+@time: 2022/7/2 15:50
+@description:
+瘦脸算法,用到了图像局部平移法
+先使用人脸关键点检测,然后再使用图像局部平移法
+需要注意的是,这部分不会包含dlib人脸关键点检测,因为考虑到模型载入的问题
+"""
+import cv2
+import math
+import numpy as np
+
+
+class TranslationWarp(object):
+ """
+ 本类包含瘦脸算法,由于瘦脸算法包含了很多个版本,所以以类的方式呈现
+ 前两个算法没什么好讲的,网上资料很多
+ 第三个采用numpy内部的自定义函数处理,在处理速度上有一些提升
+ 最后采用cv2.map算法,处理速度大幅度提升
+ """
+
+ # 瘦脸
+ @staticmethod
+ def localTranslationWarp(srcImg, startX, startY, endX, endY, radius):
+ # 双线性插值法
+ def BilinearInsert(src, ux, uy):
+ w, h, c = src.shape
+ if c == 3:
+ x1 = int(ux)
+ x2 = x1 + 1
+ y1 = int(uy)
+ y2 = y1 + 1
+ part1 = src[y1, x1].astype(np.float64) * (float(x2) - ux) * (float(y2) - uy)
+ part2 = src[y1, x2].astype(np.float64) * (ux - float(x1)) * (float(y2) - uy)
+ part3 = src[y2, x1].astype(np.float64) * (float(x2) - ux) * (uy - float(y1))
+ part4 = src[y2, x2].astype(np.float64) * (ux - float(x1)) * (uy - float(y1))
+ insertValue = part1 + part2 + part3 + part4
+ return insertValue.astype(np.int8)
+
+ ddradius = float(radius * radius) # 圆的半径
+ copyImg = srcImg.copy() # copy后的图像矩阵
+ # 计算公式中的|m-c|^2
+ ddmc = (endX - startX) * (endX - startX) + (endY - startY) * (endY - startY)
+ H, W, C = srcImg.shape # 获取图像的形状
+ for i in range(W):
+ for j in range(H):
+ # # 计算该点是否在形变圆的范围之内
+ # # 优化,第一步,直接判断是会在(startX,startY)的矩阵框中
+ if math.fabs(i - startX) > radius and math.fabs(j - startY) > radius:
+ continue
+ distance = (i - startX) * (i - startX) + (j - startY) * (j - startY)
+ if distance < ddradius:
+ # 计算出(i,j)坐标的原坐标
+ # 计算公式中右边平方号里的部分
+ ratio = (ddradius - distance) / (ddradius - distance + ddmc)
+ ratio = ratio * ratio
+ # 映射原位置
+ UX = i - ratio * (endX - startX)
+ UY = j - ratio * (endY - startY)
+
+ # 根据双线性插值法得到UX,UY的值
+ # start_ = time.time()
+ value = BilinearInsert(srcImg, UX, UY)
+ # print(f"双线性插值耗时;{time.time() - start_}")
+ # 改变当前 i ,j的值
+ copyImg[j, i] = value
+ return copyImg
+
+ # 瘦脸pro1, 限制了for循环的遍历次数
+ @staticmethod
+ def localTranslationWarpLimitFor(srcImg, startP: np.matrix, endP: np.matrix, radius: float):
+ startX, startY = startP[0, 0], startP[0, 1]
+ endX, endY = endP[0, 0], endP[0, 1]
+
+ # 双线性插值法
+ def BilinearInsert(src, ux, uy):
+ w, h, c = src.shape
+ if c == 3:
+ x1 = int(ux)
+ x2 = x1 + 1
+ y1 = int(uy)
+ y2 = y1 + 1
+ part1 = src[y1, x1].astype(np.float64) * (float(x2) - ux) * (float(y2) - uy)
+ part2 = src[y1, x2].astype(np.float64) * (ux - float(x1)) * (float(y2) - uy)
+ part3 = src[y2, x1].astype(np.float64) * (float(x2) - ux) * (uy - float(y1))
+ part4 = src[y2, x2].astype(np.float64) * (ux - float(x1)) * (uy - float(y1))
+ insertValue = part1 + part2 + part3 + part4
+ return insertValue.astype(np.int8)
+
+ ddradius = float(radius * radius) # 圆的半径
+ copyImg = srcImg.copy() # copy后的图像矩阵
+ # 计算公式中的|m-c|^2
+ ddmc = (endX - startX) ** 2 + (endY - startY) ** 2
+ # 计算正方形的左上角起始点
+ startTX, startTY = (startX - math.floor(radius + 1), startY - math.floor((radius + 1)))
+ # 计算正方形的右下角的结束点
+ endTX, endTY = (startX + math.floor(radius + 1), startY + math.floor((radius + 1)))
+ # 剪切srcImg
+ srcImg = srcImg[startTY: endTY + 1, startTX: endTX + 1, :]
+ # db.cv_show(srcImg)
+ # 裁剪后的图像相当于在x,y都减少了startX - math.floor(radius + 1)
+ # 原本的endX, endY在切后的坐标点
+ endX, endY = (endX - startX + math.floor(radius + 1), endY - startY + math.floor(radius + 1))
+ # 原本的startX, startY剪切后的坐标点
+ startX, startY = (math.floor(radius + 1), math.floor(radius + 1))
+ H, W, C = srcImg.shape # 获取图像的形状
+ for i in range(W):
+ for j in range(H):
+ # 计算该点是否在形变圆的范围之内
+ # 优化,第一步,直接判断是会在(startX,startY)的矩阵框中
+ # if math.fabs(i - startX) > radius and math.fabs(j - startY) > radius:
+ # continue
+ distance = (i - startX) * (i - startX) + (j - startY) * (j - startY)
+ if distance < ddradius:
+ # 计算出(i,j)坐标的原坐标
+ # 计算公式中右边平方号里的部分
+ ratio = (ddradius - distance) / (ddradius - distance + ddmc)
+ ratio = ratio * ratio
+ # 映射原位置
+ UX = i - ratio * (endX - startX)
+ UY = j - ratio * (endY - startY)
+
+ # 根据双线性插值法得到UX,UY的值
+ # start_ = time.time()
+ value = BilinearInsert(srcImg, UX, UY)
+ # print(f"双线性插值耗时;{time.time() - start_}")
+ # 改变当前 i ,j的值
+ copyImg[j + startTY, i + startTX] = value
+ return copyImg
+
+ # # 瘦脸pro2,采用了numpy自定义函数做处理
+ # def localTranslationWarpNumpy(self, srcImg, startP: np.matrix, endP: np.matrix, radius: float):
+ # startX , startY = startP[0, 0], startP[0, 1]
+ # endX, endY = endP[0, 0], endP[0, 1]
+ # ddradius = float(radius * radius) # 圆的半径
+ # copyImg = srcImg.copy() # copy后的图像矩阵
+ # # 计算公式中的|m-c|^2
+ # ddmc = (endX - startX)**2 + (endY - startY)**2
+ # # 计算正方形的左上角起始点
+ # startTX, startTY = (startX - math.floor(radius + 1), startY - math.floor((radius + 1)))
+ # # 计算正方形的右下角的结束点
+ # endTX, endTY = (startX + math.floor(radius + 1), startY + math.floor((radius + 1)))
+ # # 剪切srcImg
+ # self.thinImage = srcImg[startTY : endTY + 1, startTX : endTX + 1, :]
+ # # s = self.thinImage
+ # # db.cv_show(srcImg)
+ # # 裁剪后的图像相当于在x,y都减少了startX - math.floor(radius + 1)
+ # # 原本的endX, endY在切后的坐标点
+ # endX, endY = (endX - startX + math.floor(radius + 1), endY - startY + math.floor(radius + 1))
+ # # 原本的startX, startY剪切后的坐标点
+ # startX ,startY = (math.floor(radius + 1), math.floor(radius + 1))
+ # H, W, C = self.thinImage.shape # 获取图像的形状
+ # index_m = np.arange(H * W).reshape((H, W))
+ # triangle_ufunc = np.frompyfunc(self.process, 9, 3)
+ # # start_ = time.time()
+ # finalImgB, finalImgG, finalImgR = triangle_ufunc(index_m, self, W, ddradius, ddmc, startX, startY, endX, endY)
+ # finaleImg = np.dstack((finalImgB, finalImgG, finalImgR)).astype(np.uint8)
+ # finaleImg = np.fliplr(np.rot90(finaleImg, -1))
+ # copyImg[startTY: endTY + 1, startTX: endTX + 1, :] = finaleImg
+ # # print(f"图像处理耗时;{time.time() - start_}")
+ # # db.cv_show(copyImg)
+ # return copyImg
+
+ # 瘦脸pro3,采用opencv内置函数
+ @staticmethod
+ def localTranslationWarpFastWithStrength(srcImg, startP: np.matrix, endP: np.matrix, radius, strength: float = 100.):
+ """
+ 采用opencv内置函数
+ Args:
+ srcImg: 源图像
+ startP: 起点位置
+ endP: 终点位置
+ radius: 处理半径
+ strength: 瘦脸强度,一般取100以上
+
+ Returns:
+
+ """
+ startX, startY = startP[0, 0], startP[0, 1]
+ endX, endY = endP[0, 0], endP[0, 1]
+ ddradius = float(radius * radius)
+ # copyImg = np.zeros(srcImg.shape, np.uint8)
+ # copyImg = srcImg.copy()
+
+ maskImg = np.zeros(srcImg.shape[:2], np.uint8)
+ cv2.circle(maskImg, (startX, startY), math.ceil(radius), (255, 255, 255), -1)
+
+ K0 = 100 / strength
+
+ # 计算公式中的|m-c|^2
+ ddmc_x = (endX - startX) * (endX - startX)
+ ddmc_y = (endY - startY) * (endY - startY)
+ H, W, C = srcImg.shape
+
+ mapX = np.vstack([np.arange(W).astype(np.float32).reshape(1, -1)] * H)
+ mapY = np.hstack([np.arange(H).astype(np.float32).reshape(-1, 1)] * W)
+
+ distance_x = (mapX - startX) * (mapX - startX)
+ distance_y = (mapY - startY) * (mapY - startY)
+ distance = distance_x + distance_y
+ K1 = np.sqrt(distance)
+ ratio_x = (ddradius - distance_x) / (ddradius - distance_x + K0 * ddmc_x)
+ ratio_y = (ddradius - distance_y) / (ddradius - distance_y + K0 * ddmc_y)
+ ratio_x = ratio_x * ratio_x
+ ratio_y = ratio_y * ratio_y
+
+ UX = mapX - ratio_x * (endX - startX) * (1 - K1 / radius)
+ UY = mapY - ratio_y * (endY - startY) * (1 - K1 / radius)
+
+ np.copyto(UX, mapX, where=maskImg == 0)
+ np.copyto(UY, mapY, where=maskImg == 0)
+ UX = UX.astype(np.float32)
+ UY = UY.astype(np.float32)
+ copyImg = cv2.remap(srcImg, UX, UY, interpolation=cv2.INTER_LINEAR)
+ return copyImg
+
+
+def thinFace(src, landmark, place: int = 0, strength=30.):
+ """
+ 瘦脸程序接口,输入人脸关键点信息和强度,即可实现瘦脸
+ 注意处理四通道图像
+ Args:
+ src: 原图
+ landmark: 关键点信息
+ place: 选择瘦脸区域,为0-4之间的值
+ strength: 瘦脸强度,输入值在0-10之间,如果小于或者等于0,则不瘦脸
+
+ Returns:
+ 瘦脸后的图像
+ """
+ strength = min(100., strength * 10.)
+ if strength <= 0.:
+ return src
+ # 也可以设置瘦脸区域
+ place = max(0, min(4, int(place)))
+ left_landmark = landmark[4 + place]
+ left_landmark_down = landmark[6 + place]
+ right_landmark = landmark[13 + place]
+ right_landmark_down = landmark[15 + place]
+ endPt = landmark[58]
+ # 计算第4个点到第6个点的距离作为瘦脸距离
+ r_left = math.sqrt(
+ (left_landmark[0, 0] - left_landmark_down[0, 0]) ** 2 +
+ (left_landmark[0, 1] - left_landmark_down[0, 1]) ** 2
+ )
+
+ # 计算第14个点到第16个点的距离作为瘦脸距离
+ r_right = math.sqrt((right_landmark[0, 0] - right_landmark_down[0, 0]) ** 2 +
+ (right_landmark[0, 1] - right_landmark_down[0, 1]) ** 2)
+ # 瘦左边脸
+ thin_image = TranslationWarp.localTranslationWarpFastWithStrength(src, left_landmark[0], endPt[0], r_left, strength)
+ # 瘦右边脸
+ thin_image = TranslationWarp.localTranslationWarpFastWithStrength(thin_image, right_landmark[0], endPt[0], r_right, strength)
+ return thin_image
+
+
+if __name__ == "__main__":
+ import os
+ from hycv.FaceDetection68.faceDetection68 import FaceDetection68
+ local_file = os.path.dirname(__file__)
+ PREDICTOR_PATH = f"{local_file}/weights/shape_predictor_68_face_landmarks.dat" # 关键点检测模型路径
+ fd68 = FaceDetection68(model_path=PREDICTOR_PATH)
+ input_image = cv2.imread("test_image/4.jpg", -1)
+ _, landmark_, _ = fd68.facePoints(input_image)
+ output_image = thinFace(input_image, landmark_, strength=30.2)
+ cv2.imwrite("thinFaceCompare.png", np.hstack((input_image, output_image)))
diff --git a/beautyPlugin/__init__.py b/beautyPlugin/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..94cc58ad2a2f5514acfd0a5945e70840a0f3b1c7
--- /dev/null
+++ b/beautyPlugin/__init__.py
@@ -0,0 +1,4 @@
+from .MakeBeautiful import makeBeautiful
+
+
+
diff --git a/beautyPlugin/__pycache__/GrindSkin.cpython-310.pyc b/beautyPlugin/__pycache__/GrindSkin.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..7e4b335c07b41146541e17d7d311dd39e4c0645d
Binary files /dev/null and b/beautyPlugin/__pycache__/GrindSkin.cpython-310.pyc differ
diff --git a/beautyPlugin/__pycache__/GrindSkin.cpython-38.pyc b/beautyPlugin/__pycache__/GrindSkin.cpython-38.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..ab26356d473b769c95a6f9acae22c213e41a6c4a
Binary files /dev/null and b/beautyPlugin/__pycache__/GrindSkin.cpython-38.pyc differ
diff --git a/beautyPlugin/__pycache__/MakeBeautiful.cpython-310.pyc b/beautyPlugin/__pycache__/MakeBeautiful.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..e697ead6b2191554d10ecc20f5f07d9538e9c2a3
Binary files /dev/null and b/beautyPlugin/__pycache__/MakeBeautiful.cpython-310.pyc differ
diff --git a/beautyPlugin/__pycache__/MakeBeautiful.cpython-38.pyc b/beautyPlugin/__pycache__/MakeBeautiful.cpython-38.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..3829c5f0939de65d1f4a36ee40cf429d236218fa
Binary files /dev/null and b/beautyPlugin/__pycache__/MakeBeautiful.cpython-38.pyc differ
diff --git a/beautyPlugin/__pycache__/MakeWhiter.cpython-310.pyc b/beautyPlugin/__pycache__/MakeWhiter.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..93d702c49cb9b3e403cdffc29b3247ba629cdd98
Binary files /dev/null and b/beautyPlugin/__pycache__/MakeWhiter.cpython-310.pyc differ
diff --git a/beautyPlugin/__pycache__/MakeWhiter.cpython-38.pyc b/beautyPlugin/__pycache__/MakeWhiter.cpython-38.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..80e4f3413d0de7629a88bbb224eaf58bdfd411b1
Binary files /dev/null and b/beautyPlugin/__pycache__/MakeWhiter.cpython-38.pyc differ
diff --git a/beautyPlugin/__pycache__/ThinFace.cpython-310.pyc b/beautyPlugin/__pycache__/ThinFace.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..e00e6be48aa90256e9f5a3acceff9c61feed12a0
Binary files /dev/null and b/beautyPlugin/__pycache__/ThinFace.cpython-310.pyc differ
diff --git a/beautyPlugin/__pycache__/ThinFace.cpython-38.pyc b/beautyPlugin/__pycache__/ThinFace.cpython-38.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..dc89ab45ef40159841c0a24ce3f5f4c252ab1020
Binary files /dev/null and b/beautyPlugin/__pycache__/ThinFace.cpython-38.pyc differ
diff --git a/beautyPlugin/__pycache__/__init__.cpython-310.pyc b/beautyPlugin/__pycache__/__init__.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..d310faa207ad4f7156cd89092ceb6c1a0b4ab227
Binary files /dev/null and b/beautyPlugin/__pycache__/__init__.cpython-310.pyc differ
diff --git a/beautyPlugin/__pycache__/__init__.cpython-38.pyc b/beautyPlugin/__pycache__/__init__.cpython-38.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..35132a08ef642ea8a2c2a0332b02df9a5e687e18
Binary files /dev/null and b/beautyPlugin/__pycache__/__init__.cpython-38.pyc differ
diff --git a/beautyPlugin/lut_image/1.png b/beautyPlugin/lut_image/1.png
new file mode 100644
index 0000000000000000000000000000000000000000..7ad5b0c9b03cfd189aa741a196b6708f34c82c56
Binary files /dev/null and b/beautyPlugin/lut_image/1.png differ
diff --git a/beautyPlugin/lut_image/3.png b/beautyPlugin/lut_image/3.png
new file mode 100644
index 0000000000000000000000000000000000000000..743fc12be388fab214c416b888b5abff892e9218
Binary files /dev/null and b/beautyPlugin/lut_image/3.png differ
diff --git a/beautyPlugin/lut_image/lutOrigin.png b/beautyPlugin/lut_image/lutOrigin.png
new file mode 100644
index 0000000000000000000000000000000000000000..6bb7dbd2f1f4a1b0276cc0926ac6c38c63c41ec6
Binary files /dev/null and b/beautyPlugin/lut_image/lutOrigin.png differ
diff --git a/cuny_tools.py b/cuny_tools.py
new file mode 100644
index 0000000000000000000000000000000000000000..c4418ad52beaef28063e8ad7e7767d8be96e6b98
--- /dev/null
+++ b/cuny_tools.py
@@ -0,0 +1,621 @@
+import cv2
+import numpy as np
+from hivisionai.hycv.utils import get_box_pro
+from hivisionai.hycv.vision import cover_image, draw_picture_dots
+from math import fabs, sin, radians, cos
+
+def opencv_rotate(img, angle):
+ h, w = img.shape[:2]
+ center = (w / 2, h / 2)
+ scale = 1.0
+ # 2.1获取M矩阵
+ """
+ M矩阵
+ [
+ cosA -sinA (1-cosA)*centerX+sinA*centerY
+ sinA cosA -sinA*centerX+(1-cosA)*centerY
+ ]
+ """
+ M = cv2.getRotationMatrix2D(center, angle, scale)
+ # 2.2 新的宽高,radians(angle) 把角度转为弧度 sin(弧度)
+ new_H = int(w * fabs(sin(radians(angle))) + h * fabs(cos(radians(angle))))
+ new_W = int(h * fabs(sin(radians(angle))) + w * fabs(cos(radians(angle))))
+ # 2.3 平移
+ M[0, 2] += (new_W - w) / 2
+ M[1, 2] += (new_H - h) / 2
+ rotate = cv2.warpAffine(img, M, (new_W, new_H), borderValue=(0, 0, 0))
+ return rotate
+
+
+def transformationNeck2(image:np.ndarray, per_to_side:float=0.8)->np.ndarray:
+ """
+ 透视变换脖子函数,输入图像和四个点(矩形框)
+ 矩形框内的图像可能是不完整的(边角有透明区域)
+ 我们将根据透视变换将矩形框内的图像拉伸成和矩形框一样的形状.
+ 算法分为几个步骤: 选择脖子的四个点 -> 选定这四个点拉伸后的坐标 -> 透视变换 -> 覆盖原图
+ """
+ _, _, _, a = cv2.split(image) # 这应该是一个四通道的图像
+ height, width = a.shape
+ def locate_side(image_:np.ndarray, x_:int, y_max:int) -> int:
+ # 寻找x=y, 且 y <= y_max 上从下往上第一个非0的点,如果没找到就返回0
+ y_ = 0
+ for y_ in range(y_max - 1, -1, -1):
+ if image_[y_][x_] != 0:
+ break
+ return y_
+ def locate_width(image_:np.ndarray, y_:int, mode, left_or_right:int=None):
+ # 从y=y这个水平线上寻找两边的非零点
+ # 增加left_or_right的原因在于为下面check_jaw服务
+ if mode==1: # 左往右
+ x_ = 0
+ if left_or_right is None:
+ left_or_right = 0
+ for x_ in range(left_or_right, width):
+ if image_[y_][x_] != 0:
+ break
+ else: # 右往左
+ x_ = width
+ if left_or_right is None:
+ left_or_right = width - 1
+ for x_ in range(left_or_right, -1, -1):
+ if image_[y_][x_] != 0:
+ break
+ return x_
+ def check_jaw(image_:np.ndarray, left_, right_):
+ """
+ 检查选择的点是否与截到下巴,如果截到了,就往下平移一个单位
+ """
+ f= True # True代表没截到下巴
+ # [x, y]
+ for x_cell in range(left_[0] + 1, right_[0]):
+ if image_[left_[1]][x_cell] == 0:
+ f = False
+ break
+ if f is True:
+ return left_, right_
+ else:
+ y_ = left_[1] + 2
+ x_left_ = locate_width(image_, y_, mode=1, left_or_right=left_[0])
+ x_right_ = locate_width(image_, y_, mode=2, left_or_right=right_[0])
+ left_, right_ = check_jaw(image_, [x_left_, y_], [x_right_, y_])
+ return left_, right_
+ # 选择脖子的四个点,核心在于选择上面的两个点,这两个点的确定的位置应该是"宽出来的"两个点
+ _, _ ,_, a = cv2.split(image) # 这应该是一个四通道的图像
+ ret,a_thresh = cv2.threshold(a,127,255,cv2.THRESH_BINARY)
+ y_high, y_low, x_left, x_right = get_box_pro(image=image, model=1) # 直接返回矩阵信息
+ y_left_side = locate_side(image_=a_thresh, x_=x_left, y_max=y_low) # 左边的点的y轴坐标
+ y_right_side = locate_side(image_=a_thresh, x_=x_right, y_max=y_low) # 右边的点的y轴坐标
+ y = min(y_left_side, y_right_side) # 将两点的坐标保持相同
+ cell_left_above, cell_right_above = check_jaw(a_thresh,[x_left, y], [x_right, y])
+ x_left, x_right = cell_left_above[0], cell_right_above[0]
+ # 此时我们寻找到了脖子的"宽出来的"两个点,这两个点作为上面的两个点, 接下来寻找下面的两个点
+ if per_to_side >1:
+ assert ValueError("per_to_side 必须小于1!")
+ # 在后面的透视变换中我会把它拉成矩形, 在这里我先获取四个点的高和宽
+ height_ = 150 # 这个值应该是个变化的值,与拉伸的长度有关,但是现在先规定为150
+ width_ = x_right - x_left # 其实也就是 cell_right_above[1] - cell_left_above[1]
+ y = int((y_low - y)*per_to_side + y) # 定位y轴坐标
+ cell_left_below, cell_right_bellow = ([locate_width(a_thresh, y_=y, mode=1), y], [locate_width(a_thresh, y_=y, mode=2), y])
+ # 四个点全齐,开始透视变换
+ # 寻找透视变换后的四个点,只需要变换below的两个点即可
+ # cell_left_below_final, cell_right_bellow_final = ([cell_left_above[1], y_low], [cell_right_above[1], y_low])
+ # 需要变换的四个点为 cell_left_above, cell_right_above, cell_left_below, cell_right_bellow
+ rect = np.array([cell_left_above, cell_right_above, cell_left_below, cell_right_bellow],
+ dtype='float32')
+ # 变化后的坐标点
+ dst = np.array([[0, 0], [width_, 0], [0 , height_], [width_, height_]],
+ dtype='float32')
+ # 计算变换矩阵
+ M = cv2.getPerspectiveTransform(rect, dst)
+ warped = cv2.warpPerspective(image, M, (width_, height_))
+ final = cover_image(image=warped, background=image, mode=3, x=cell_left_above[0], y=cell_left_above[1])
+ # tmp = np.zeros(image.shape)
+ # final = cover_image(image=warped, background=tmp, mode=3, x=cell_left_above[0], y=cell_left_above[1])
+ # final = cover_image(image=image, background=final, mode=3, x=0, y=0)
+ return final
+
+
+def transformationNeck(image:np.ndarray, cutNeckHeight:int, neckBelow:int,
+ toHeight:int,per_to_side:float=0.75) -> np.ndarray:
+ """
+ 脖子扩充算法, 其实需要输入的只是脖子扣出来的部分以及需要被扩充的高度/需要被扩充成的高度.
+ """
+ height, width, channels = image.shape
+ _, _, _, a = cv2.split(image) # 这应该是一个四通道的图像
+ ret, a_thresh = cv2.threshold(a, 127, 255, cv2.THRESH_BINARY) # 将透明图层二值化
+ def locate_width(image_:np.ndarray, y_:int, mode, left_or_right:int=None):
+ # 从y=y这个水平线上寻找两边的非零点
+ # 增加left_or_right的原因在于为下面check_jaw服务
+ if mode==1: # 左往右
+ x_ = 0
+ if left_or_right is None:
+ left_or_right = 0
+ for x_ in range(left_or_right, width):
+ if image_[y_][x_] != 0:
+ break
+ else: # 右往左
+ x_ = width
+ if left_or_right is None:
+ left_or_right = width - 1
+ for x_ in range(left_or_right, -1, -1):
+ if image_[y_][x_] != 0:
+ break
+ return x_
+ def check_jaw(image_:np.ndarray, left_, right_):
+ """
+ 检查选择的点是否与截到下巴,如果截到了,就往下平移一个单位
+ """
+ f= True # True代表没截到下巴
+ # [x, y]
+ for x_cell in range(left_[0] + 1, right_[0]):
+ if image_[left_[1]][x_cell] == 0:
+ f = False
+ break
+ if f is True:
+ return left_, right_
+ else:
+ y_ = left_[1] + 2
+ x_left_ = locate_width(image_, y_, mode=1, left_or_right=left_[0])
+ x_right_ = locate_width(image_, y_, mode=2, left_or_right=right_[0])
+ left_, right_ = check_jaw(image_, [x_left_, y_], [x_right_, y_])
+ return left_, right_
+ x_left = locate_width(image_=a_thresh, mode=1, y_=cutNeckHeight)
+ x_right = locate_width(image_=a_thresh, mode=2, y_=cutNeckHeight)
+ # 在这里我们取消了对下巴的检查,原因在于输入的imageHeight并不能改变
+ # cell_left_above, cell_right_above = check_jaw(a_thresh, [x_left, imageHeight], [x_right, imageHeight])
+ cell_left_above, cell_right_above = [x_left, cutNeckHeight], [x_right, cutNeckHeight]
+ toWidth = x_right - x_left # 矩形宽
+ # 此时我们寻找到了脖子的"宽出来的"两个点,这两个点作为上面的两个点, 接下来寻找下面的两个点
+ if per_to_side >1:
+ assert ValueError("per_to_side 必须小于1!")
+ y_below = int((neckBelow - cutNeckHeight) * per_to_side + cutNeckHeight) # 定位y轴坐标
+ cell_left_below = [locate_width(a_thresh, y_=y_below, mode=1), y_below]
+ cell_right_bellow = [locate_width(a_thresh, y_=y_below, mode=2), y_below]
+ # 四个点全齐,开始透视变换
+ # 需要变换的四个点为 cell_left_above, cell_right_above, cell_left_below, cell_right_bellow
+ rect = np.array([cell_left_above, cell_right_above, cell_left_below, cell_right_bellow],
+ dtype='float32')
+ # 变化后的坐标点
+ dst = np.array([[0, 0], [toWidth, 0], [0 , toHeight], [toWidth, toHeight]],
+ dtype='float32')
+ M = cv2.getPerspectiveTransform(rect, dst)
+ warped = cv2.warpPerspective(image, M, (toWidth, toHeight))
+ # 将变换后的图像覆盖到原图上
+ final = cover_image(image=warped, background=image, mode=3, x=cell_left_above[0], y=cell_left_above[1])
+ return final
+
+
+def bestJunctionCheck_beta(image:np.ndarray, stepSize:int=4, if_per:bool=False):
+ """
+ 最优衔接点检测算法, 去寻找脖子的"拐点"
+ """
+ point_k = 1
+ _, _, _, a = cv2.split(image) # 这应该是一个四通道的图像
+ height, width = a.shape
+ ret, a_thresh = cv2.threshold(a, 127, 255, cv2.THRESH_BINARY) # 将透明图层二值化
+ y_high, y_low, x_left, x_right = get_box_pro(image=image, model=1) # 直接返回矩阵信息
+ def scan(y_:int, max_num:int=2):
+ num = 0
+ left = False
+ right = False
+ for x_ in range(width):
+ if a_thresh[y_][x_] != 0:
+ if x_ < width // 2 and left is False:
+ num += 1
+ left = True
+ elif x_ > width // 2 and right is False:
+ num += 1
+ right = True
+ return True if num >= max_num else False
+ def locate_neck_above():
+ """
+ 定位脖子的尖尖脚
+ """
+ for y_ in range( y_high - 2, height):
+ if scan(y_):
+ return y_, y_
+ y_high_left, y_high_right = locate_neck_above()
+ def locate_width_pro(image_:np.ndarray, y_:int, mode):
+ """
+ 这会是一个生成器,用于生成脖子两边的轮廓
+ x_, y_ 是启始点的坐标,每一次寻找都会让y_+1
+ mode==1说明是找左边的边,即,image_[y_][x_] == 0 且image_[y_][x_ + 1] !=0 时跳出;
+ 否则 当image_[y_][x_] != 0 时, x_ - 1; 当image_[y_][x_] == 0 且 image_[y_][x_ + 1] ==0 时x_ + 1
+ mode==2说明是找右边的边,即,image_[y_][x_] == 0 且image_[y_][x_ - 1] !=0 时跳出
+ 否则 当image_[y_][x_] != 0 时, x_ + 1; 当image_[y_][x_] == 0 且 image_[y_][x_ - 1] ==0 时x_ - 1
+ """
+ y_ += 1
+ if mode == 1:
+ x_ = 0
+ while 0 <= y_ < height and 0 <= x_ < width:
+ while image_[y_][x_] != 0 and x_ >= 0:
+ x_ -= 1
+ while image_[y_][x_] == 0 and image_[y_][x_ + 1] == 0 and x_ < width - 2:
+ x_ += 1
+ yield [y_, x_]
+ y_ += 1
+ elif mode == 2:
+ x_ = width-1
+ while 0 <= y_ < height and 0 <= x_ < width:
+ while image_[y_][x_] != 0 and x_ < width - 2: x_ += 1
+ while image_[y_][x_] == 0 and image_[y_][x_ - 1] == 0 and x_ >= 0: x_ -= 1
+ yield [y_, x_]
+ y_ += 1
+ yield False
+ def kGenerator(image_:np.ndarray, mode):
+ """
+ 导数生成器,用来生成每一个点对应的导数
+ """
+ y_ = y_high_left if mode == 1 else y_high_right
+ c_generator = locate_width_pro(image_=image_, y_=y_, mode=mode)
+ for cell in c_generator:
+ nc = locate_width_pro(image_=image_, y_=cell[0] + stepSize, mode=mode)
+ nextCell = next(nc)
+ if nextCell is False:
+ yield False, False
+ else:
+ k = (cell[1] - nextCell[1]) / stepSize
+ yield k, cell
+ def findPt(image_:np.ndarray, mode):
+ k_generator = kGenerator(image_=image_, mode=mode)
+ k, cell = next(k_generator)
+ k_next, cell_next = next(k_generator)
+ if k is False:
+ raise ValueError("无法找到拐点!")
+ while k_next is not False:
+ k_next, cell_next = next(k_generator)
+ if (k_next < - 1 / stepSize) or k_next > point_k:
+ break
+ cell = cell_next
+ # return int(cell[0] + stepSize / 2)
+ return cell[0]
+ # 先找左边的拐点:
+ pointY_left = findPt(image_=a_thresh, mode=1)
+ # 再找右边的拐点:
+ pointY_right = findPt(image_=a_thresh, mode=2)
+ point = (pointY_left + pointY_right) // 2
+ if if_per is True:
+ point = (pointY_left + pointY_right) // 2
+ return point / (y_low - y_high)
+ pointX_left = next(locate_width_pro(image_=a_thresh, y_= point - 1, mode=1))[1]
+ pointX_right = next(locate_width_pro(image_=a_thresh, y_=point- 1, mode=2))[1]
+ return [pointX_left, point], [pointX_right, point]
+
+
+def bestJunctionCheck(image:np.ndarray, offset:int, stepSize:int=4):
+ """
+ 最优点检测算算法输入一张脖子图片(无论这张图片是否已经被二值化,我都认为没有被二值化),输出一个小数(脖子最上方与衔接点位置/脖子图像长度)
+ 与beta版不同的是它新增了一个阈值限定内容.
+ 对于脖子而言,我我们首先可以定位到上面的部分,然后根据上面的这个点向下进行遍历检测.
+ 与beta版类似,我们使用一个stepSize来用作斜率的检测
+ 但是对于遍历检测而言,与beta版不同的是,我们需要对遍历的地方进行一定的限制.
+ 限制的标准是,如果当前遍历的点的横坐标和起始点横坐标的插值超过了某个阈值,则认为是越界.
+ """
+ point_k = 1
+ _, _, _, a = cv2.split(image) # 这应该是一个四通道的图像
+ height, width = a.shape
+ ret, a_thresh = cv2.threshold(a, 127, 255, cv2.THRESH_BINARY) # 将透明图层二值化
+ # 直接返回脖子的位置信息, 修正系数为0, get_box_pro内部也封装了二值化,所以直接输入原图
+ y_high, y_low, _, _ = get_box_pro(image=image, model=1, correction_factor=0)
+ # 真正有用的只有上下y轴的两个值...
+ # 首先当然是确定起始点的位置,我们用同样的scan扫描函数进行行遍历.
+ def scan(y_:int, max_num:int=2):
+ num = 0
+ # 设定两个值,分别代表脖子的左边和右边
+ left = False
+ right = False
+ for x_ in range(width):
+ if a_thresh[y_][x_] != 0:
+ # 检测左边
+ if x_ < width // 2 and left is False:
+ num += 1
+ left = True
+ # 检测右边
+ elif x_ > width // 2 and right is False:
+ num += 1
+ right = True
+ return True if num >= max_num else False
+ def locate_neck_above():
+ """
+ 定位脖子的尖尖脚
+ """
+ # y_high就是脖子的最高点
+ for y_ in range(y_high, height):
+ if scan(y_):
+ return y_
+ y_start = locate_neck_above() # 得到遍历的初始高度
+ if y_low - y_start < stepSize: assert ValueError("脖子太小!")
+ # 然后获取一下初始的坐标点
+ x_left, x_right = 0, width
+ for x_left_ in range(0, width):
+ if a_thresh[y_start][x_left_] != 0:
+ x_left = x_left_
+ break
+ for x_right_ in range(width -1 , -1, -1):
+ if a_thresh[y_start][x_right_] != 0:
+ x_right = x_right_
+ break
+ # 接下来我定义两个生成器,首先是脖子轮廓(向下寻找的)生成器,每进行一次next,生成器会返回y+1的脖子轮廓点
+ def contoursGenerator(image_:np.ndarray, y_:int, mode):
+ """
+ 这会是一个生成器,用于生成脖子两边的轮廓
+ y_ 是启始点的y坐标,每一次寻找都会让y_+1
+ mode==1说明是找左边的边,即,image_[y_][x_] == 0 且image_[y_][x_ + 1] !=0 时跳出;
+ 否则 当image_[y_][x_] != 0 时, x_ - 1; 当image_[y_][x_] == 0 且 image_[y_][x_ + 1] ==0 时x_ + 1
+ mode==2说明是找右边的边,即,image_[y_][x_] == 0 且image_[y_][x_ - 1] !=0 时跳出
+ 否则 当image_[y_][x_] != 0 时, x_ + 1; 当image_[y_][x_] == 0 且 image_[y_][x_ - 1] ==0 时x_ - 1
+ """
+ y_ += 1
+ try:
+ if mode == 1:
+ x_ = 0
+ while 0 <= y_ < height and 0 <= x_ < width:
+ while image_[y_][x_] != 0 and x_ >= 0: x_ -= 1
+ # 这里其实会有bug,不过可以不管
+ while x_ < width and image_[y_][x_] == 0 and image_[y_][x_ + 1] == 0: x_ += 1
+ yield [y_, x_]
+ y_ += 1
+ elif mode == 2:
+ x_ = width-1
+ while 0 <= y_ < height and 0 <= x_ < width:
+ while x_ < width and image_[y_][x_] != 0: x_ += 1
+ while x_ >= 0 and image_[y_][x_] == 0 and image_[y_][x_ - 1] == 0: x_ -= 1
+ yield [y_, x_]
+ y_ += 1
+ # 当处理失败则返回False
+ except IndexError:
+ yield False
+ # 然后是斜率生成器,这个生成器依赖子轮廓生成器,每一次生成轮廓后会计算斜率,另一个点的选取和stepSize有关
+ def kGenerator(image_: np.ndarray, mode):
+ """
+ 导数生成器,用来生成每一个点对应的导数
+ """
+ y_ = y_start
+ # 对起始点建立一个生成器, mode=1时是左边轮廓,mode=2时是右边轮廓
+ c_generator = contoursGenerator(image_=image_, y_=y_, mode=mode)
+ for cell in c_generator:
+ # 寻找距离当前cell距离为stepSize的轮廓点
+ kc = contoursGenerator(image_=image_, y_=cell[0] + stepSize, mode=mode)
+ kCell = next(kc)
+ if kCell is False:
+ # 寻找失败
+ yield False, False
+ else:
+ # 寻找成功,返回当坐标点和斜率值
+ # 对于左边而言,斜率必然是前一个点的坐标减去后一个点的坐标
+ # 对于右边而言,斜率必然是后一个点的坐标减去前一个点的坐标
+ k = (cell[1] - kCell[1]) / stepSize if mode == 1 else (kCell[1] - cell[1]) / stepSize
+ yield k, cell
+ # 接着开始写寻找算法,需要注意的是我们是分两边选择的
+ def findPt(image_:np.ndarray, mode):
+ x_base = x_left if mode == 1 else x_right
+ k_generator = kGenerator(image_=image_, mode=mode)
+ k, cell = k_generator.__next__()
+ if k is False:
+ raise ValueError("无法找到拐点!")
+ k_next, cell_next = k_generator.__next__()
+ while k_next is not False:
+ cell = cell_next
+ if cell[1] > x_base and mode == 2:
+ x_base = cell[1]
+ elif cell[1] < x_base and mode == 1:
+ x_base = cell[1]
+ # 跳出循环的方式一:斜率超过了某个值
+ if k_next > point_k:
+ print("K out")
+ break
+ # 跳出循环的方式二:超出阈值
+ elif abs(cell[1] - x_base) > offset:
+ print("O out")
+ break
+ k_next, cell_next = k_generator.__next__()
+ if abs(cell[1] - x_base) > offset:
+ cell[0] = cell[0] - offset - 1
+ return cell[0]
+ # 先找左边的拐点:
+ pointY_left = findPt(image_=a_thresh, mode=1)
+ # 再找右边的拐点:
+ pointY_right = findPt(image_=a_thresh, mode=2)
+ point = min(pointY_right, pointY_left)
+ per = (point - y_high) / (y_low - y_high)
+ # pointX_left = next(contoursGenerator(image_=a_thresh, y_= point- 1, mode=1))[1]
+ # pointX_right = next(contoursGenerator(image_=a_thresh, y_=point - 1, mode=2))[1]
+ # return [pointX_left, point], [pointX_right, point]
+ return per
+
+
+def checkSharpCorner(image:np.ndarray):
+ _, _, _, a = cv2.split(image) # 这应该是一个四通道的图像
+ height, width = a.shape
+ ret, a_thresh = cv2.threshold(a, 127, 255, cv2.THRESH_BINARY) # 将透明图层二值化
+ # 直接返回脖子的位置信息, 修正系数为0, get_box_pro内部也封装了二值化,所以直接输入原图
+ y_high, y_low, _, _ = get_box_pro(image=image, model=1, correction_factor=0)
+ def scan(y_:int, max_num:int=2):
+ num = 0
+ # 设定两个值,分别代表脖子的左边和右边
+ left = False
+ right = False
+ for x_ in range(width):
+ if a_thresh[y_][x_] != 0:
+ # 检测左边
+ if x_ < width // 2 and left is False:
+ num += 1
+ left = True
+ # 检测右边
+ elif x_ > width // 2 and right is False:
+ num += 1
+ right = True
+ return True if num >= max_num else False
+ def locate_neck_above():
+ """
+ 定位脖子的尖尖脚
+ """
+ # y_high就是脖子的最高点
+ for y_ in range(y_high, height):
+ if scan(y_):
+ return y_
+ y_start = locate_neck_above()
+ return y_start
+
+
+def checkJaw(image:np.ndarray, y_start:int):
+ # 寻找"马鞍点"
+ _, _, _, a = cv2.split(image) # 这应该是一个四通道的图像
+ height, width = a.shape
+ ret, a_thresh = cv2.threshold(a, 127, 255, cv2.THRESH_BINARY) # 将透明图层二值化
+ if width <=1: raise TypeError("图像太小!")
+ x_left, x_right = 0, width - 1
+ for x_left in range(width):
+ if a_thresh[y_start][x_left] != 0:
+ while a_thresh[y_start][x_left] != 0: x_left += 1
+ break
+ for x_right in range(width-1, -1, -1):
+ if a_thresh[y_start][x_right] != 0:
+ while a_thresh[y_start][x_right] != 0: x_right -= 1
+ break
+ point_list_y = []
+ point_list_x = []
+ for x in range(x_left, x_right):
+ y = y_start
+ while a_thresh[y][x] == 0: y += 1
+ point_list_y.append(y)
+ point_list_x.append(x)
+ y = max(point_list_y)
+ x = point_list_x[point_list_y.index(y)]
+ return x, y
+
+
+def checkHairLOrR(cloth_image_input_cut,
+ input_a,
+ neck_a,
+ cloth_image_input_top_y,
+ cutbar_top=0.4,
+ cutbar_bottom=0.5,
+ threshold=0.3):
+ """
+ 本函数用于检测衣服是否被头发遮挡,当前只考虑左右是否被遮挡,即"一刀切"
+ 返回int
+ 0代表没有被遮挡
+ 1代表左边被遮挡
+ 2代表右边被遮挡
+ 3代表全被遮挡了
+ 约定,输入的图像是一张灰度图,且被二值化过.
+ """
+ def per_darkPoint(img:np.ndarray) -> int:
+ """
+ 用于遍历相加图像上的黑点.
+ 然后返回黑点数/图像面积
+ """
+ h, w = img.shape
+ sum_darkPoint = 0
+ for y in range(h):
+ for x in range(w):
+ if img[y][x] == 0:
+ sum_darkPoint += 1
+ return sum_darkPoint / (h * w)
+
+ if threshold < 0 or threshold > 1: raise TypeError("阈值设置必须在0和1之间!")
+
+ # 裁出cloth_image_input_cut按高度40%~50%的区域-cloth_image_input_cutbar,并转换为A矩阵,做二值化
+ cloth_image_input_height = cloth_image_input_cut.shape[0]
+ _, _, _, cloth_image_input_cutbar = cv2.split(cloth_image_input_cut[
+ int(cloth_image_input_height * cutbar_top):int(
+ cloth_image_input_height * cutbar_bottom), :])
+ _, cloth_image_input_cutbar = cv2.threshold(cloth_image_input_cutbar, 127, 255, cv2.THRESH_BINARY)
+
+ # 裁出input_image、neck_image的A矩阵的对应区域,并做二值化
+ input_a_cutbar = input_a[cloth_image_input_top_y + int(cloth_image_input_height * cutbar_top):
+ cloth_image_input_top_y + int(cloth_image_input_height * cutbar_bottom), :]
+ _, input_a_cutbar = cv2.threshold(input_a_cutbar, 127, 255, cv2.THRESH_BINARY)
+ neck_a_cutbar = neck_a[cloth_image_input_top_y + int(cloth_image_input_height * cutbar_top):
+ cloth_image_input_top_y + int(cloth_image_input_height * cutbar_bottom), :]
+ _, neck_a_cutbar = cv2.threshold(neck_a_cutbar, 50, 255, cv2.THRESH_BINARY)
+
+ # 将三个cutbar合到一起-result_a_cutbar
+ input_a_cutbar = np.uint8(255 - input_a_cutbar)
+ result_a_cutbar = cv2.add(input_a_cutbar, cloth_image_input_cutbar)
+ result_a_cutbar = cv2.add(result_a_cutbar, neck_a_cutbar)
+
+ if_mask = 0
+ # 我们将图像 一刀切,分为左边和右边
+ height, width = result_a_cutbar.shape # 一通道图像
+ left_image = result_a_cutbar[:, :width//2]
+ right_image = result_a_cutbar[:, width//2:]
+ if per_darkPoint(left_image) > threshold:
+ if_mask = 1
+ if per_darkPoint(right_image) > threshold:
+ if_mask = 3 if if_mask == 1 else 2
+ return if_mask
+
+
+def find_black(image):
+ """
+ 找黑色点函数,遇到输入矩阵中的第一个黑点,返回它的y值
+ """
+ height, width = image.shape[0], image.shape[1]
+ for i in range(height):
+ for j in range(width):
+ if image[i, j] < 127:
+ return i
+ return None
+
+
+def convert_black_array(image):
+ height, width = image.shape[0], image.shape[1]
+ mask = np.zeros([height, width])
+ for j in range(width):
+ for i in range(height):
+ if image[i, j] > 127:
+ mask[i:, j] = 1
+ break
+ return mask
+
+
+def checkLongHair(neck_image, head_bottom_y, neck_top_y):
+ """
+ 长发检测函数,输入为head/neck图像,通过下巴是否为最低点,来判断是否为长发
+ :return 0 : 短发
+ :return 1 : 长发
+ """
+ jaw_y = neck_top_y + checkJaw(neck_image, y_start=checkSharpCorner(neck_image))[1]
+ if jaw_y >= head_bottom_y-3:
+ return 0
+ else:
+ return 1
+
+
+def checkLongHair2(head_bottom_y, cloth_top_y):
+ if head_bottom_y > cloth_top_y+10:
+ return 1
+ else:
+ return 0
+
+
+if __name__ == "__main__":
+ for i in range(1, 8):
+ img = cv2.imread(f"./neck_temp/neck_image{i}.png", cv2.IMREAD_UNCHANGED)
+ # new = transformationNeck(image=img, cutNeckHeight=419,neckBelow=472, toHeight=150)
+ # point_list = bestJunctionCheck(img, offset=5, stepSize=3)
+ # per = bestJunctionCheck(img, offset=5, stepSize=3)
+ # # 返回一个小数的形式, 接下来我将它处理为两个点
+ point_list = []
+ # y_high_, y_low_, _, _ = get_box_pro(image=img, model=1, conreection_factor=0)
+ # _y = y_high_ + int((y_low_ - y_high_) * per)
+ # _, _, _, a_ = cv2.split(img) # 这应该是一个四通道的图像
+ # h, w = a_.shape
+ # r, a_t = cv2.threshold(a_, 127, 255, cv2.THRESH_BINARY) # 将透明图层二值化
+ # _x = 0
+ # for _x in range(w):
+ # if a_t[_y][_x] != 0:
+ # break
+ # point_list.append([_x, _y])
+ # for _x in range(w - 1, -1, -1):
+ # if a_t[_y][_x] != 0:
+ # break
+ # point_list.append([_x, _y])
+ y = checkSharpCorner(img)
+ point = checkJaw(image=img, y_start=y)
+ point_list.append(point)
+ new = draw_picture_dots(img, point_list, pen_size=2)
+ cv2.imshow(f"{i}", new)
+ cv2.waitKey(0)
\ No newline at end of file
diff --git a/error.py b/error.py
new file mode 100644
index 0000000000000000000000000000000000000000..cbfba9c6a4238b5e6a5a1b3849eaefd17793cf54
--- /dev/null
+++ b/error.py
@@ -0,0 +1,27 @@
+"""
+@author: cuny
+@file: error.py
+@time: 2022/4/7 15:50
+@description:
+定义证件照制作的错误类
+"""
+from hivisionai.hyService.error import ProcessError
+
+
+class IDError(ProcessError):
+ def __init__(self, err, diary=None, face_num=-1, status_id: str = "1500"):
+ """
+ 用于报错
+ Args:
+ err: 错误描述
+ diary: 函数运行日志,默认为None
+ face_num: 告诉此时识别到的人像个数,如果为-1则说明为未知错误
+ """
+ super().__init__(err)
+ if diary is None:
+ diary = {}
+ self.err = err
+ self.diary = diary
+ self.face_num = face_num
+ self.status_id = status_id
+
diff --git a/face_judgement_align.py b/face_judgement_align.py
new file mode 100644
index 0000000000000000000000000000000000000000..4ea0f9880fe88dd4884312513cee5ae51e8bd1e7
--- /dev/null
+++ b/face_judgement_align.py
@@ -0,0 +1,578 @@
+import math
+import cv2
+import numpy as np
+from hivisionai.hycv.face_tools import face_detect_mtcnn
+from hivisionai.hycv.utils import get_box_pro, CV2Bytes
+from hivisionai.hycv.vision import resize_image_esp, IDphotos_cut, add_background, calTime, resize_image_by_min, \
+ rotate_bound_4channels
+import onnxruntime
+from EulerZ import eulerZ
+from beautyPlugin import makeBeautiful
+from error import IDError
+from imageTransform import standard_photo_resize, hollowOutFix, get_modnet_matting, draw_picture_dots, detect_distance
+from layoutCreate import generate_layout_photo
+from move_image import move
+
+testImages = []
+
+
+class LinearFunction_TwoDots(object):
+ """
+ 通过两个坐标点构建线性函数
+ """
+
+ def __init__(self, dot1, dot2):
+ self.d1 = dot1
+ self.d2 = dot2
+ self.mode = "normal"
+ if self.d2.x != self.d1.x:
+ self.k = (self.d2.y - self.d1.y) / max((self.d2.x - self.d1.x), 1)
+ self.b = self.d2.y - self.k * self.d2.x
+ else:
+ self.mode = "x=1"
+
+ def forward(self, input_, mode="x"):
+ if mode == "x":
+ if self.mode == "normal":
+ return self.k * input_ + self.b
+ else:
+ return 0
+ elif mode == "y":
+ if self.mode == "normal":
+ return (input_ - self.b) / self.k
+ else:
+ return self.d1.x
+
+ def forward_x(self, x):
+ if self.mode == "normal":
+ return self.k * x + self.b
+ else:
+ return 0
+
+ def forward_y(self, y):
+ if self.mode == "normal":
+ return (y - self.b) / self.k
+ else:
+ return self.d1.x
+
+
+class Coordinate(object):
+ def __init__(self, x, y):
+ self.x = x
+ self.y = y
+
+ def __str__(self):
+ return "({}, {})".format(self.x, self.y)
+
+
+@calTime
+def face_number_and_angle_detection(input_image):
+ """
+ 本函数的功能是利用机器学习算法计算图像中人脸的数目与关键点,并通过关键点信息来计算人脸在平面上的旋转角度。
+ 当前人脸数目!=1时,将raise一个错误信息并终止全部程序。
+ Args:
+ input_image: numpy.array(3 channels),用户上传的原图(经过了一些简单的resize)
+
+ Returns:
+ - dets: list,人脸定位信息(x1, y1, x2, y2)
+ - rotation: int,旋转角度,正数代表逆时针偏离,负数代表顺时针偏离
+ - landmark: list,人脸关键点信息
+ """
+
+ # face++人脸检测
+ # input_image_bytes = CV2Bytes.cv2_byte(input_image, ".jpg")
+ # face_num, face_rectangle, landmarks, headpose = megvii_face_detector(input_image_bytes)
+ # print(face_rectangle)
+
+ faces, landmarks = face_detect_mtcnn(input_image)
+ face_num = len(faces)
+
+ # 排除不合人脸数目要求(必须是1)的照片
+ if face_num == 0 or face_num >= 2:
+ if face_num == 0:
+ status_id_ = "1101"
+ else:
+ status_id_ = "1102"
+ raise IDError(f"人脸检测出错!检测出了{face_num}张人脸", face_num=face_num, status_id=status_id_)
+
+ # 获得人脸定位坐标
+ face_rectangle = []
+ for iter, (x1, y1, x2, y2, _) in enumerate(faces):
+ x1, y1, x2, y2 = int(x1), int(y1), int(x2), int(y2)
+ face_rectangle.append({'top': x1, 'left': y1, 'width': x2 - x1, 'height': y2 - y1})
+
+ # 获取人脸定位坐标与关键点信息
+ dets = face_rectangle[0]
+ # landmark = landmarks[0]
+ #
+ # # 人脸旋转角度计算
+ # rotation = eulerZ(landmark)
+ # return dets, rotation, landmark
+ return dets
+
+@calTime
+def image_matting(input_image, params):
+ """
+ 本函数的功能为全局人像抠图。
+ Args:
+ - input_image: numpy.array(3 channels),用户原图
+
+ Returns:
+ - origin_png_image: numpy.array(4 channels), 抠好的图
+ """
+
+ print("抠图采用本地模型")
+ origin_png_image = get_modnet_matting(input_image, sess=params["modnet"]["human_sess"])
+
+ origin_png_image = hollowOutFix(origin_png_image) # 抠图洞洞修补
+ return origin_png_image
+
+
+@calTime
+def rotation_ajust(input_image, rotation, a, IS_DEBUG=False):
+ """
+ 本函数的功能是根据旋转角对原图进行无损旋转,并返回结果图与附带信息。
+ Args:
+ - input_image: numpy.array(3 channels), 用户上传的原图(经过了一些简单的resize、美颜)
+ - rotation: float, 人的五官偏离"端正"形态的旋转角
+ - a: numpy.array(1 channel), matting图的matte
+ - IS_DEBUG: DEBUG模式开关
+
+ Returns:
+ - result_jpg_image: numpy.array(3 channels), 原图旋转的结果图
+ - result_png_image: numpy.array(4 channels), matting图旋转的结果图
+ - L1: CLassObject, 根据旋转点连线所构造函数
+ - L2: ClassObject, 根据旋转点连线所构造函数
+ - dotL3: ClassObject, 一个特殊裁切点的坐标
+ - clockwise: int, 表示照片是顺时针偏离还是逆时针偏离
+ - drawed_dots_image: numpy.array(3 channels), 在result_jpg_image上标定了4个旋转点的结果图,用于DEBUG模式
+ """
+
+ # Step1. 数据准备
+ rotation = -1 * rotation # rotation为正数->原图顺时针偏离,为负数->逆时针偏离
+ h, w = input_image.copy().shape[:2]
+
+ # Step2. 无损旋转
+ result_jpg_image, result_png_image, cos, sin = rotate_bound_4channels(input_image, a, rotation)
+
+ # Step3. 附带信息计算
+ nh, nw = result_jpg_image.shape[:2] # 旋转后的新的长宽
+ clockwise = -1 if rotation < 0 else 1 # clockwise代表时针,即1为顺时针,-1为逆时针
+ # 如果逆时针偏离:
+ if rotation < 0:
+ p1 = Coordinate(0, int(w * sin))
+ p2 = Coordinate(int(w * cos), 0)
+ p3 = Coordinate(nw, int(h * cos))
+ p4 = Coordinate(int(h * sin), nh)
+ L1 = LinearFunction_TwoDots(p1, p4)
+ L2 = LinearFunction_TwoDots(p4, p3)
+ dotL3 = Coordinate(int(0.25 * p2.x + 0.75 * p3.x), int(0.25 * p2.y + 0.75 * p3.y))
+
+ # 如果顺时针偏离:
+ else:
+ p1 = Coordinate(int(h * sin), 0)
+ p2 = Coordinate(nw, int(w * sin))
+ p3 = Coordinate(int(w * cos), nh)
+ p4 = Coordinate(0, int(h * cos))
+ L1 = LinearFunction_TwoDots(p4, p3)
+ L2 = LinearFunction_TwoDots(p3, p2)
+ dotL3 = Coordinate(int(0.75 * p4.x + 0.25 * p1.x), int(0.75 * p4.y + 0.25 * p1.y))
+
+ # Step4. 根据附带信息进行图像绘制(4个旋转点),便于DEBUG模式验证
+ drawed_dots_image = draw_picture_dots(result_jpg_image, [(p1.x, p1.y), (p2.x, p2.y), (p3.x, p3.y),
+ (p4.x, p4.y), (dotL3.x, dotL3.y)])
+ if IS_DEBUG:
+ testImages.append(["drawed_dots_image", drawed_dots_image])
+
+ return result_jpg_image, result_png_image, L1, L2, dotL3, clockwise, drawed_dots_image
+
+
+@calTime
+def face_number_detection_mtcnn(input_image):
+ """
+ 本函数的功能是对旋转矫正的结果图进行基于MTCNN模型的人脸检测。
+ Args:
+ - input_image: numpy.array(3 channels), 旋转矫正(rotation_adjust)的3通道结果图
+
+ Returns:
+ - faces: list, 人脸检测的结果,包含人脸位置信息
+ """
+ # 如果图像的长或宽>1500px,则对图像进行1/2的resize再做MTCNN人脸检测,以加快处理速度
+ if max(input_image.shape[0], input_image.shape[1]) >= 1500:
+ input_image_resize = cv2.resize(input_image,
+ (input_image.shape[1] // 2, input_image.shape[0] // 2),
+ interpolation=cv2.INTER_AREA)
+ faces, _ = face_detect_mtcnn(input_image_resize, filter=True) # MTCNN人脸检测
+ # 如果缩放后图像的MTCNN人脸数目检测结果等于1->两次人脸检测结果没有偏差,则对定位数据x2
+ if len(faces) == 1:
+ for item, param in enumerate(faces[0]):
+ faces[0][item] = param * 2
+ # 如果两次人脸检测结果有偏差,则默认缩放后图像的MTCNN检测存在误差,则将原图输入再做一次MTCNN(保险措施)
+ else:
+ faces, _ = face_detect_mtcnn(input_image, filter=True)
+ # 如果图像的长或宽<1500px,则直接进行MTCNN检测
+ else:
+ faces, _ = face_detect_mtcnn(input_image, filter=True)
+
+ return faces
+
+
+@calTime
+def cutting_rect_pan(x1, y1, x2, y2, width, height, L1, L2, L3, clockwise, standard_size):
+ """
+ 本函数的功能是对旋转矫正结果图的裁剪框进行修正 ———— 解决"旋转三角形"现象。
+ Args:
+ - x1: int, 裁剪框左上角的横坐标
+ - y1: int, 裁剪框左上角的纵坐标
+ - x2: int, 裁剪框右下角的横坐标
+ - y2: int, 裁剪框右下角的纵坐标
+ - width: int, 待裁剪图的宽度
+ - height:int, 待裁剪图的高度
+ - L1: CLassObject, 根据旋转点连线所构造函数
+ - L2: CLassObject, 根据旋转点连线所构造函数
+ - L3: ClassObject, 一个特殊裁切点的坐标
+ - clockwise: int, 旋转时针状态
+ - standard_size: tuple, 标准照的尺寸
+
+ Returns:
+ - x1: int, 新的裁剪框左上角的横坐标
+ - y1: int, 新的裁剪框左上角的纵坐标
+ - x2: int, 新的裁剪框右下角的横坐标
+ - y2: int, 新的裁剪框右下角的纵坐标
+ - x_bias: int, 裁剪框横坐标方向上的计算偏置量
+ - y_bias: int, 裁剪框纵坐标方向上的计算偏置量
+ """
+ # 用于计算的裁剪框坐标x1_cal,x2_cal,y1_cal,y2_cal(如果裁剪框超出了图像范围,则缩小直至在范围内)
+ x1_std = x1 if x1 > 0 else 0
+ x2_std = x2 if x2 < width else width
+ # y1_std = y1 if y1 > 0 else 0
+ y2_std = y2 if y2 < height else height
+
+ # 初始化x和y的计算偏置项x_bias和y_bias
+ x_bias = 0
+ y_bias = 0
+
+ # 如果顺时针偏转
+ if clockwise == 1:
+ if y2 > L1.forward_x(x1_std):
+ y_bias = int(-(y2_std - L1.forward_x(x1_std)))
+ if y2 > L2.forward_x(x2_std):
+ x_bias = int(-(x2_std - L2.forward_y(y2_std)))
+ x2 = x2_std + x_bias
+ if x1 < L3.x:
+ x1 = L3.x
+ # 如果逆时针偏转
+ else:
+ if y2 > L1.forward_x(x1_std):
+ x_bias = int(L1.forward_y(y2_std) - x1_std)
+ if y2 > L2.forward_x(x2_std):
+ y_bias = int(-(y2_std - L2.forward_x(x2_std)))
+ x1 = x1_std + x_bias
+ if x2 > L3.x:
+ x2 = L3.x
+
+ # 计算裁剪框的y的变化
+ y2 = int(y2_std + y_bias)
+ new_cut_width = x2 - x1
+ new_cut_height = int(new_cut_width / standard_size[1] * standard_size[0])
+ y1 = y2 - new_cut_height
+
+ return x1, y1, x2, y2, x_bias, y_bias
+
+
+@calTime
+def idphoto_cutting(faces, head_measure_ratio, standard_size, head_height_ratio, origin_png_image, origin_png_image_pre,
+ rotation_params, align=False, IS_DEBUG=False, top_distance_max=0.12, top_distance_min=0.10):
+ """
+ 本函数的功能为进行证件照的自适应裁剪,自适应依据Setting.json的控制参数,以及输入图像的自身情况。
+ Args:
+ - faces: list, 人脸位置信息
+ - head_measure_ratio: float, 人脸面积与全图面积的期望比值
+ - standard_size: tuple, 标准照尺寸, 如(413, 295)
+ - head_height_ratio: float, 人脸中心处在全图高度的比例期望值
+ - origin_png_image: numpy.array(4 channels), 经过一系列转换后的用户输入图
+ - origin_png_image_pre:numpy.array(4 channels),经过一系列转换(但没有做旋转矫正)的用户输入图
+ - rotation_params:旋转参数字典
+ - L1: classObject, 来自rotation_ajust的L1线性函数
+ - L2: classObject, 来自rotation_ajust的L2线性函数
+ - L3: classObject, 来自rotation_ajust的dotL3点
+ - clockwise: int, (顺/逆)时针偏差
+ - drawed_image: numpy.array, 红点标定4个旋转点的图像
+ - align: bool, 是否图像做过旋转矫正
+ - IS_DEBUG: DEBUG模式开关
+ - top_distance_max: float, 头距离顶部的最大比例
+ - top_distance_min: float, 头距离顶部的最小比例
+
+ Returns:
+ - result_image_hd: numpy.array(4 channels), 高清照
+ - result_image_standard: numpy.array(4 channels), 标准照
+ - clothing_params: json, 换装配置参数,便于后续换装功能的使用
+
+ """
+ # Step0. 旋转参数准备
+ L1 = rotation_params["L1"]
+ L2 = rotation_params["L2"]
+ L3 = rotation_params["L3"]
+ clockwise = rotation_params["clockwise"]
+ drawed_image = rotation_params["drawed_image"]
+
+ # Step1. 准备人脸参数
+ face_rect = faces[0]
+ x, y = face_rect[0], face_rect[1]
+ w, h = face_rect[2] - x + 1, face_rect[3] - y + 1
+ height, width = origin_png_image.shape[:2]
+ width_height_ratio = standard_size[0] / standard_size[1] # 高宽比
+
+ # Step2. 计算高级参数
+ face_center = (x + w / 2, y + h / 2) # 面部中心坐标
+ face_measure = w * h # 面部面积
+ crop_measure = face_measure / head_measure_ratio # 裁剪框面积:为面部面积的5倍
+ resize_ratio = crop_measure / (standard_size[0] * standard_size[1]) # 裁剪框缩放率
+ resize_ratio_single = math.sqrt(resize_ratio) # 长和宽的缩放率(resize_ratio的开方)
+ crop_size = (int(standard_size[0] * resize_ratio_single),
+ int(standard_size[1] * resize_ratio_single)) # 裁剪框大小
+
+ # 裁剪框的定位信息
+ x1 = int(face_center[0] - crop_size[1] / 2)
+ y1 = int(face_center[1] - crop_size[0] * head_height_ratio)
+ y2 = y1 + crop_size[0]
+ x2 = x1 + crop_size[1]
+
+ # Step3. 对于旋转矫正图片的裁切处理
+ # if align:
+ # y_top_pre, _, _, _ = get_box_pro(origin_png_image.astype(np.uint8), model=2,
+ # correction_factor=0) # 获取matting结果图的顶距
+ # # 裁剪参数重新计算,目标是以最小的图像损失来消除"旋转三角形"
+ # x1, y1, x2, y2, x_bias, y_bias = cutting_rect_pan(x1, y1, x2, y2, width, height, L1, L2, L3, clockwise,
+ # standard_size)
+ # # 这里设定一个拒绝判定条件,如果裁剪框切进了人脸检测框的话,就不进行旋转
+ # if y1 > y_top_pre:
+ # y2 = y2 - (y1 - y_top_pre)
+ # y1 = y_top_pre
+ # # 如何遇到裁剪到人脸的情况,则转为不旋转裁切
+ # if x1 > x or x2 < (x + w) or y1 > y or y2 < (y + h):
+ # return idphoto_cutting(faces, head_measure_ratio, standard_size, head_height_ratio, origin_png_image_pre,
+ # origin_png_image_pre, rotation_params, align=False, IS_DEBUG=False)
+ #
+ # if y_bias != 0:
+ # origin_png_image = origin_png_image[:y2, :]
+ # if x_bias > 0: # 逆时针
+ # origin_png_image = origin_png_image[:, x1:]
+ # if drawed_image is not None and IS_DEBUG:
+ # drawed_x = x1
+ # x = x - x1
+ # x2 = x2 - x1
+ # x1 = 0
+ # else: # 顺时针
+ # origin_png_image = origin_png_image[:, :x2]
+ #
+ # if drawed_image is not None and IS_DEBUG:
+ # drawed_x = drawed_x if x_bias > 0 else 0
+ # drawed_image = draw_picture_dots(drawed_image, [(x1 + drawed_x, y1), (x1 + drawed_x, y2),
+ # (x2 + drawed_x, y1), (x2 + drawed_x, y2)],
+ # pen_color=(255, 0, 0))
+ # testImages.append(["drawed_image", drawed_image])
+
+ # Step4. 对照片的第一轮裁剪
+ cut_image = IDphotos_cut(x1, y1, x2, y2, origin_png_image)
+ cut_image = cv2.resize(cut_image, (crop_size[1], crop_size[0]))
+ y_top, y_bottom, x_left, x_right = get_box_pro(cut_image.astype(np.uint8), model=2,
+ correction_factor=0) # 得到cut_image中人像的上下左右距离信息
+ if IS_DEBUG:
+ testImages.append(["firstCut", cut_image])
+
+ # Step5. 判定cut_image中的人像是否处于合理的位置,若不合理,则处理数据以便之后调整位置
+ # 检测人像与裁剪框左边或右边是否存在空隙
+ if x_left > 0 or x_right > 0:
+ status_left_right = 1
+ cut_value_top = int(((x_left + x_right) * width_height_ratio) / 2) # 减去左右,为了保持比例,上下也要相应减少cut_value_top
+ else:
+ status_left_right = 0
+ cut_value_top = 0
+
+ """
+ 检测人头顶与照片的顶部是否在合适的距离内:
+ - status==0: 距离合适, 无需移动
+ - status=1: 距离过大, 人像应向上移动
+ - status=2: 距离过小, 人像应向下移动
+ """
+ status_top, move_value = detect_distance(y_top - cut_value_top, crop_size[0], max=top_distance_max,
+ min=top_distance_min)
+
+ # Step6. 对照片的第二轮裁剪
+ if status_left_right == 0 and status_top == 0:
+ result_image = cut_image
+ else:
+ result_image = IDphotos_cut(x1 + x_left,
+ y1 + cut_value_top + status_top * move_value,
+ x2 - x_right,
+ y2 - cut_value_top + status_top * move_value,
+ origin_png_image)
+ if IS_DEBUG:
+ testImages.append(["result_image_pre", result_image])
+
+ # 换装参数准备
+ relative_x = x - (x1 + x_left)
+ relative_y = y - (y1 + cut_value_top + status_top * move_value)
+
+ # Step7. 当照片底部存在空隙时,下拉至底部
+ result_image, y_high = move(result_image.astype(np.uint8))
+ relative_y = relative_y + y_high # 更新换装参数
+
+ # cv2.imwrite("./temp_image.png", result_image)
+
+ # Step8. 标准照与高清照转换
+ result_image_standard = standard_photo_resize(result_image, standard_size)
+ result_image_hd, resize_ratio_max = resize_image_by_min(result_image, esp=max(600, standard_size[1]))
+
+ # Step9. 参数准备-为换装服务
+ clothing_params = {
+ "relative_x": relative_x * resize_ratio_max,
+ "relative_y": relative_y * resize_ratio_max,
+ "w": w * resize_ratio_max,
+ "h": h * resize_ratio_max
+ }
+
+ return result_image_hd, result_image_standard, clothing_params
+
+
+@calTime
+def debug_mode_process(testImages):
+ for item, (text, imageItem) in enumerate(testImages):
+ channel = imageItem.shape[2]
+ (height, width) = imageItem.shape[:2]
+ if channel == 4:
+ imageItem = add_background(imageItem, bgr=(255, 255, 255))
+ imageItem = np.uint8(imageItem)
+ if item == 0:
+ testHeight = height
+ result_image_test = imageItem
+ result_image_test = cv2.putText(result_image_test, text, (50, 50), cv2.FONT_HERSHEY_COMPLEX, 1.0,
+ (200, 100, 100), 3)
+ else:
+ imageItem = cv2.resize(imageItem, (int(width * testHeight / height), testHeight))
+ imageItem = cv2.putText(imageItem, text, (50, 50), cv2.FONT_HERSHEY_COMPLEX, 1.0, (200, 100, 100),
+ 3)
+ result_image_test = cv2.hconcat([result_image_test, imageItem])
+ if item == len(testImages) - 1:
+ return result_image_test
+
+
+@calTime("主函数")
+def IDphotos_create(input_image,
+ mode="ID",
+ size=(413, 295),
+ head_measure_ratio=0.2,
+ head_height_ratio=0.45,
+ align=False,
+ beauty=True,
+ fd68=None,
+ human_sess=None,
+ IS_DEBUG=False,
+ top_distance_max=0.12,
+ top_distance_min=0.10):
+ """
+ 证件照制作主函数
+ Args:
+ input_image: 输入图像矩阵
+ size: (h, w)
+ head_measure_ratio: 头部占比?
+ head_height_ratio: 头部高度占比?
+ align: 是否进行人脸矫正(roll),默认为True(是)
+ fd68: 人脸68关键点检测类,详情参见hycv.FaceDetection68.faceDetection68
+ human_sess: 人像抠图模型类,由onnx载入(不与下面两个参数连用)
+ oss_image_name: 阿里云api需要的参数,实际上是上传到oss的路径
+ user: 阿里云api的accessKey配置对象
+ top_distance_max: float, 头距离顶部的最大比例
+ top_distance_min: float, 头距离顶部的最小比例
+ Returns:
+ result_image(高清版), result_image(普清版), api请求日志,
+ 排版照参数(list),排版照是否旋转参数,照片尺寸(x, y)
+ 在函数不出错的情况下,函数会因为一些原因主动抛出异常:
+ 1. 无人脸(或者只有半张,dlib无法检测出来),抛出IDError异常,内部参数face_num为0
+ 2. 人脸数量超过1,抛出IDError异常,内部参数face_num为2
+ 3. 抠图api请求失败,抛出IDError异常,内部参数face_num为-1
+ """
+
+ # Step0. 数据准备/图像预处理
+ matting_params = {"modnet": {"human_sess": human_sess}}
+ rotation_params = {"L1": None, "L2": None, "L3": None, "clockwise": None, "drawed_image": None}
+ input_image = resize_image_esp(input_image, 2000) # 将输入图片resize到最大边长为2000
+
+ # Step1. 人脸检测
+ # dets, rotation, landmark = face_number_and_angle_detection(input_image)
+ # dets = face_number_and_angle_detection(input_image)
+
+ # Step2. 美颜
+ # if beauty:
+ # input_image = makeBeautiful(input_image, landmark, 2, 2, 5, 4)
+
+ # Step3. 抠图
+ origin_png_image = image_matting(input_image, matting_params)
+ if mode == "只换底":
+ return origin_png_image, origin_png_image, None, None, None, None, None, None, 1
+
+ origin_png_image_pre = origin_png_image.copy() # 备份一下现在抠图结果图,之后在iphoto_cutting函数有用
+
+ # Step4. 旋转矫正
+ # 如果旋转角不大于2, 则不做旋转
+ # if abs(rotation) <= 2:
+ # align = False
+ # # 否则,进行旋转矫正
+ # if align:
+ # input_image_candidate, origin_png_image_candidate, L1, L2, L3, clockwise, drawed_image \
+ # = rotation_ajust(input_image, rotation, cv2.split(origin_png_image)[-1], IS_DEBUG=IS_DEBUG) # 图像旋转
+ #
+ # origin_png_image_pre = origin_png_image.copy()
+ # input_image = input_image_candidate.copy()
+ # origin_png_image = origin_png_image_candidate.copy()
+ #
+ # rotation_params["L1"] = L1
+ # rotation_params["L2"] = L2
+ # rotation_params["L3"] = L3
+ # rotation_params["clockwise"] = clockwise
+ # rotation_params["drawed_image"] = drawed_image
+
+ # Step5. MTCNN人脸检测
+ faces = face_number_detection_mtcnn(input_image)
+
+ # Step6. 证件照自适应裁剪
+ face_num = len(faces)
+ # 报错MTCNN检测结果不等于1的图片
+ if face_num != 1:
+ return None, None, None, None, None, None, None, None, 0
+ # 符合条件的进入下一环
+ else:
+ result_image_hd, result_image_standard, clothing_params = \
+ idphoto_cutting(faces, head_measure_ratio, size, head_height_ratio, origin_png_image,
+ origin_png_image_pre, rotation_params, align=align, IS_DEBUG=IS_DEBUG,
+ top_distance_max=top_distance_max, top_distance_min=top_distance_min)
+
+ # Step7. 排版照参数获取
+ typography_arr, typography_rotate = generate_layout_photo(input_height=size[0], input_width=size[1])
+
+ return result_image_hd, result_image_standard, typography_arr, typography_rotate, \
+ clothing_params["relative_x"], clothing_params["relative_y"], clothing_params["w"], clothing_params["h"], 1
+
+
+if __name__ == "__main__":
+ HY_HUMAN_MATTING_WEIGHTS_PATH = "./hivision_modnet.onnx"
+ sess = onnxruntime.InferenceSession(HY_HUMAN_MATTING_WEIGHTS_PATH)
+
+ input_image = cv2.imread("test.jpg")
+
+ result_image_hd, result_image_standard, typography_arr, typography_rotate, \
+ _, _, _, _, _ = IDphotos_create(input_image,
+ size=(413, 295),
+ head_measure_ratio=0.2,
+ head_height_ratio=0.45,
+ align=True,
+ beauty=True,
+ fd68=None,
+ human_sess=sess,
+ oss_image_name="test_tmping.jpg",
+ user=None,
+ IS_DEBUG=False,
+ top_distance_max=0.12,
+ top_distance_min=0.10)
+ cv2.imwrite("result_image_hd.png", result_image_hd)
diff --git a/hivision_modnet.onnx b/hivision_modnet.onnx
new file mode 100644
index 0000000000000000000000000000000000000000..bb7efedf84eab9ac5b267fe7205b9c8a1ff82be7
--- /dev/null
+++ b/hivision_modnet.onnx
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:7e0cb9a2a841b426dd0daf1a788ec398dab059bc039041d62b15636c0783bc56
+size 25888609
diff --git a/hivision_modnet.onnx.1 b/hivision_modnet.onnx.1
new file mode 100644
index 0000000000000000000000000000000000000000..bb7efedf84eab9ac5b267fe7205b9c8a1ff82be7
--- /dev/null
+++ b/hivision_modnet.onnx.1
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:7e0cb9a2a841b426dd0daf1a788ec398dab059bc039041d62b15636c0783bc56
+size 25888609
diff --git a/hivisionai/__init__.py b/hivisionai/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391
diff --git a/hivisionai/__pycache__/__init__.cpython-310.pyc b/hivisionai/__pycache__/__init__.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..875c9412c29ae4499cc69b86bdad0dec63d6bdfb
Binary files /dev/null and b/hivisionai/__pycache__/__init__.cpython-310.pyc differ
diff --git a/hivisionai/__pycache__/__init__.cpython-38.pyc b/hivisionai/__pycache__/__init__.cpython-38.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..8bd19211ab5bb6dbeb972b353d638a62965b1908
Binary files /dev/null and b/hivisionai/__pycache__/__init__.cpython-38.pyc differ
diff --git a/hivisionai/__pycache__/app.cpython-310.pyc b/hivisionai/__pycache__/app.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..ef691afce32ee9d6bd0eefd547d5fab94c7398e6
Binary files /dev/null and b/hivisionai/__pycache__/app.cpython-310.pyc differ
diff --git a/hivisionai/app.py b/hivisionai/app.py
new file mode 100644
index 0000000000000000000000000000000000000000..c22fbf6ad638e1b8f2abc0208d77677f0ffc1b23
--- /dev/null
+++ b/hivisionai/app.py
@@ -0,0 +1,452 @@
+# -*- coding: utf-8 -*-
+
+"""
+@Time : 2022/8/27 14:17
+@Author : cuny
+@File : app.py
+@Software : PyCharm
+@Introduce:
+查看包版本等一系列操作
+"""
+import os
+import sys
+import json
+import shutil
+import zipfile
+import requests
+from argparse import ArgumentParser
+from importlib.metadata import version
+try: # 加上这个try的原因在于本地环境和云函数端的import形式有所不同
+ from qcloud_cos import CosConfig
+ from qcloud_cos import CosS3Client
+except ImportError:
+ try:
+ from qcloud_cos_v5 import CosConfig
+ from qcloud_cos_v5 import CosS3Client
+ from qcloud_cos.cos_exception import CosServiceError
+ except ImportError:
+ raise ImportError("请下载腾讯云COS相关代码包:pip install cos-python-sdk-v5")
+
+
+class HivisionaiParams(object):
+ """
+ 定义一些基本常量
+ """
+ # 文件所在路径
+ # 包名称
+ package_name = "HY-sdk"
+ # 腾讯云相关变量
+ region = "ap-beijing"
+ zip_key = "HY-sdk/" # zip存储的云端文件夹路径,这里改了publish.yml也需要更改
+ # 云端用户配置,如果在cloud_config_save不存在,就需要下载此文件
+ user_url = "https://hy-sdk-config-1305323352.cos.ap-beijing.myqcloud.com/sdk-user/user_config.json"
+ bucket = "cloud-public-static-1306602019"
+ # 压缩包类型
+ file_format = ".zip"
+ # 下载路径(.hivisionai文件夹路径)
+ download_path = os.path.expandvars('$HOME')
+ # zip文件、zip解压缩文件的存放路径
+ save_folder = f"{os.path.expandvars('$HOME')}/.hivisionai/sdk"
+ # 腾讯云配置文件存放路径
+ cloud_config_save = f"{os.path.expandvars('$HOME')}/.hivisionai/user_config.json"
+ # 项目路径
+ hivisionai_path = os.path.dirname(os.path.dirname(__file__))
+ # 使用hivisionai的路径
+ getcwd = os.getcwd()
+ # HY-func的依赖配置
+ # 每个依赖会包含三个参数,保存路径(save_path,相对于HY_func的路径)、下载url(url)
+ functionDependence = {
+ "configs": [
+ # --------- 配置文件部分
+ # _lib
+ {
+ "url": "https://hy-sdk-config-1305323352.cos.ap-beijing.myqcloud.com/hy-func/_lib/config/aliyun-human-matting-api.json",
+ "save_path": "_lib/config/aliyun-human-matting-api.json"
+ },
+ {
+ "url": "https://hy-sdk-config-1305323352.cos.ap-beijing.myqcloud.com/hy-func/_lib/config/megvii-face-plus-api.json",
+ "save_path": "_lib/config/megvii-face-plus-api.json"
+ },
+ {
+ "url": "https://hy-sdk-config-1305323352.cos.ap-beijing.myqcloud.com/hy-func/_lib/config/volcano-face-change-api.json",
+ "save_path": "_lib/config/volcano-face-change-api.json"
+ },
+ # _service
+ {
+ "url": "https://hy-sdk-config-1305323352.cos.ap-beijing.myqcloud.com/hy-func/_service/config/func_error_conf.json",
+ "save_path": "_service/utils/config/func_error_conf.json"
+ },
+ {
+ "url": "https://hy-sdk-config-1305323352.cos.ap-beijing.myqcloud.com/hy-func/_service/config/service_config.json",
+ "save_path": "_service/utils/config/service_config.json"
+ },
+ # --------- 模型部分
+ # 模型部分存储在Notion文档当中
+ # https://www.notion.so/HY-func-cc6cc41ba6e94b36b8fa5f5d67d1683f
+ ],
+ "weights": "https://www.notion.so/HY-func-cc6cc41ba6e94b36b8fa5f5d67d1683f"
+ }
+
+
+class HivisionaiUtils(object):
+ """
+ 本类为一些基本工具类,包含代码复用相关内容
+ """
+ @staticmethod
+ def get_client():
+ """获取cos客户端对象"""
+ def get_secret():
+ # 首先判断cloud_config_save下是否存在
+ if not os.path.exists(HivisionaiParams.cloud_config_save):
+ print("Downloading user_config...")
+ resp = requests.get(HivisionaiParams.user_url)
+ open(HivisionaiParams.cloud_config_save, "wb").write(resp.content)
+ config = json.load(open(HivisionaiParams.cloud_config_save, "r"))
+ return config["secret_id"], config["secret_key"]
+ # todo 接入HY-Auth-Sync
+ secret_id, secret_key = get_secret()
+ return CosS3Client(CosConfig(Region=HivisionaiParams.region, Secret_id=secret_id, Secret_key=secret_key))
+
+ def get_all_versions(self):
+ """获取云端的所有版本号"""
+ def getAllVersion_base():
+ """
+ 返回cos存储桶内部的某个文件夹的内部名称
+ ps:如果需要修改默认的存储桶配置,请在代码运行的时候加入代码 s.bucket = 存储桶名称 (s是对象实例)
+ 返回的内容存储在response["Content"],不过返回的数据大小是有限制的,具体内容还是请看官方文档。
+ Returns:
+ [版本列表]
+ """
+ resp = client.list_objects(
+ Bucket=HivisionaiParams.bucket,
+ Prefix=HivisionaiParams.zip_key,
+ Marker=marker
+ )
+ versions_list.extend([x["Key"].split("/")[-1].split(HivisionaiParams.file_format)[0] for x in resp["Contents"] if int(x["Size"]) > 0])
+ if resp['IsTruncated'] == 'false': # 接下来没有数据了,就退出
+ return ""
+ else:
+ return resp['NextMarker']
+ client = self.get_client()
+ marker = ""
+ versions_list = []
+ while True: # 轮询
+ try:
+ marker = getAllVersion_base()
+ except KeyError as e:
+ print(e)
+ raise
+ if len(marker) == 0: # 没有数据了
+ break
+ return versions_list
+
+ def get_newest_version(self):
+ """获取最新的版本号"""
+ versions_list = self.get_all_versions()
+ # reverse=True,降序
+ versions_list.sort(key=lambda x: int(x.split(".")[-1]), reverse=True)
+ versions_list.sort(key=lambda x: int(x.split(".")[-2]), reverse=True)
+ versions_list.sort(key=lambda x: int(x.split(".")[-3]), reverse=True)
+ return versions_list[0]
+
+ def download_version(self, v):
+ """
+ 在存储桶中下载文件,将下载好的文件解压至本地
+ Args:
+ v: 版本号,x.x.x
+
+ Returns:
+ None
+ """
+ file_name = v + HivisionaiParams.file_format
+ client = self.get_client()
+ print(f"Download to {HivisionaiParams.save_folder}...")
+ try:
+ resp = client.get_object(HivisionaiParams.bucket, HivisionaiParams.zip_key + "/" + file_name)
+ contents = resp["Body"].get_raw_stream().read()
+ except CosServiceError:
+ print(f"[{file_name}.zip] does not exist, please check your version!")
+ sys.exit()
+ if not os.path.exists(HivisionaiParams.save_folder):
+ os.makedirs(HivisionaiParams.save_folder)
+ open(os.path.join(HivisionaiParams.save_folder, file_name), "wb").write(contents)
+ print("Download success!")
+
+ @staticmethod
+ def download_dependence(path=None):
+ """
+ 一键下载HY-sdk所需要的所有依赖,需要注意的是,本方法必须在运行pip install之后使用(运行完pip install之后才会出现hivisionai文件夹)
+ Args:
+ path: 文件路径,精确到hivisionai文件夹的上一个目录,如果为None,则默认下载到python环境下hivisionai安装的目录
+
+ Returns:
+ 下载相应内容到指定位置
+ """
+ # print("指定的下载路径:", path) # 此时在path路径下必然存在一个hivisionai文件夹
+ # print("系统安装的hivisionai库的路径:", HivisionaiParams.hivisionai_path)
+ print("Dependence downloading...")
+ if path is None:
+ path = HivisionaiParams.hivisionai_path
+ # ----------------下载mtcnn模型文件
+ mtcnn_path = os.path.join(path, "hivisionai/hycv/mtcnn_onnx/weights")
+ base_url = "https://linimages.oss-cn-beijing.aliyuncs.com/"
+ onnx_files = ["pnet.onnx", "rnet.onnx", "onet.onnx"]
+ print(f"Downloading mtcnn model in {mtcnn_path}")
+ if not os.path.exists(mtcnn_path):
+ os.mkdir(mtcnn_path)
+ for onnx_file in onnx_files:
+ if not os.path.exists(os.path.join(mtcnn_path, onnx_file)):
+ # download onnx model
+ onnx_url = base_url + onnx_file
+ print("Downloading Onnx Model in:", onnx_url)
+ r = requests.get(onnx_url, stream=True)
+ if r.status_code == 200:
+ open(os.path.join(mtcnn_path, onnx_file), 'wb').write(r.content) # 将内容写入文件
+ print(f"Download finished -- {onnx_file}")
+ del r
+ # ----------------
+ print("Dependence download finished...")
+
+
+class HivisionaiApps(object):
+ """
+ 本类为app对外暴露的接口,为了代码规整性,这里使用类来对暴露接口进行调整
+ """
+ @staticmethod
+ def show_cloud_version():
+ """查看在cos中的所有HY-sdk版本"""
+ print("Connect to COS...")
+ versions_list = hivisionai_utils.get_all_versions()
+ # reverse=True,降序
+ versions_list.sort(key=lambda x: int(x.split(".")[-1]), reverse=True)
+ versions_list.sort(key=lambda x: int(x.split(".")[-2]), reverse=True)
+ versions_list.sort(key=lambda x: int(x.split(".")[-3]), reverse=True)
+ if len(versions_list) == 0:
+ print("There is no version currently, please release it first!")
+ sys.exit()
+ versions = "The currently existing versions (Keep 10): \n"
+ for i, v in enumerate(versions_list):
+ versions += str(v) + " "
+ if i == 9:
+ break
+ print(versions)
+
+ @staticmethod
+ def upgrade(v: str, enforce: bool = False, save_cached: bool = False):
+ """
+ 自动升级HY-sdk到指定版本
+ Args:
+ v: 指定的版本号,格式为x.x.x
+ enforce: 是否需要强制执行更新命令
+ save_cached: 是否保存下载的wheel文件,默认为否
+ Returns:
+ None
+ """
+ def check_format():
+ # noinspection PyBroadException
+ try:
+ major, minor, patch = v.split(".")
+ int(major)
+ int(minor)
+ int(patch)
+ except Exception as e:
+ print(f"Illegal version number!\n{e}")
+ pass
+ print("Upgrading, please wait a moment...")
+ if v == "-1":
+ v = hivisionai_utils.get_newest_version()
+ # 检查format的格式
+ check_format()
+ if v == version(HivisionaiParams.package_name) and not enforce:
+ print(f"Current version: {v} already exists, skip installation.")
+ sys.exit()
+ hivisionai_utils.download_version(v)
+ # 下载完毕(下载至save_folder),解压文件
+ target_zip = os.path.join(HivisionaiParams.save_folder, f"{v}.zip")
+ assert zipfile.is_zipfile(target_zip), "Decompression failed, and the target was not a zip file."
+ new_dir = target_zip.replace('.zip', '') # 解压的文件名
+ if os.path.exists(new_dir): # 判断文件夹是否存在
+ shutil.rmtree(new_dir)
+ os.mkdir(new_dir) # 新建文件夹
+ f = zipfile.ZipFile(target_zip)
+ f.extractall(new_dir) # 提取zip文件
+ print("Decompressed, begin to install...")
+ os.system(f'pip3 install {os.path.join(new_dir, "**.whl")}')
+ # 开始自动下载必要的模型依赖
+ hivisionai_utils.download_dependence()
+ # 安装完毕,如果save_cached为真,删除"$HOME/.hivisionai/sdk"内部的所有文件元素
+ if save_cached is True:
+ os.system(f'rm -rf {HivisionaiParams.save_folder}/**')
+
+ @staticmethod
+ def export(path):
+ """
+ 输出最新版本的文件到命令运行的path目录
+ Args:
+ path: 用户输入的路径
+
+ Returns:
+ 输出最新的hivisionai到path目录
+ """
+ # print(f"当前路径: {os.path.join(HivisionaiParams.getcwd, path)}")
+ # print(f"文件路径: {os.path.dirname(__file__)}")
+ export_path = os.path.join(HivisionaiParams.getcwd, path)
+ # 判断输出路径存不存在,如果不存在,就报错
+ assert os.path.exists(export_path), f"{export_path} dose not Exists!"
+ v = hivisionai_utils.get_newest_version()
+ # 下载文件到.hivisionai/sdk当中
+ hivisionai_utils.download_version(v)
+ # 下载完毕(下载至save_folder),解压文件
+ target_zip = os.path.join(HivisionaiParams.save_folder, f"{v}.zip")
+ assert zipfile.is_zipfile(target_zip), "Decompression failed, and the target was not a zip file."
+ new_dir = os.path.basename(target_zip.replace('.zip', '')) # 解压的文件名
+ new_dir = os.path.join(export_path, new_dir) # 解压的文件路径
+ if os.path.exists(new_dir): # 判断文件夹是否存在
+ shutil.rmtree(new_dir)
+ os.mkdir(new_dir) # 新建文件夹
+ f = zipfile.ZipFile(target_zip)
+ f.extractall(new_dir) # 提取zip文件
+ print("Decompressed, begin to export...")
+ # 强制删除bin/hivisionai和hivisionai/以及HY_sdk-**
+ bin_path = os.path.join(export_path, "bin")
+ hivisionai_path = os.path.join(export_path, "hivisionai")
+ sdk_path = os.path.join(export_path, "HY_sdk-**")
+ os.system(f"rm -rf {bin_path} {hivisionai_path} {sdk_path}")
+ # 删除完毕,开始export
+ os.system(f'pip3 install {os.path.join(new_dir, "**.whl")} -t {export_path}')
+ hivisionai_utils.download_dependence(export_path)
+ # 将下载下来的文件夹删除
+ os.system(f'rm -rf {target_zip} && rm -rf {new_dir}')
+ print("Done.")
+
+ @staticmethod
+ def hy_func_init(force):
+ """
+ 在HY-func目录下使用hivisionai --init,可以自动将需要的依赖下载到指定位置
+ 不过对于比较大的模型——修复模型而言,需要手动下载
+ Args:
+ force: 如果force为True,则会强制重新下载所有的内容,包括修复模型这种比较大的模型
+ Returns:
+ 程序执行完毕,会将一些必要的依赖也下载完毕
+ """
+ cwd = HivisionaiParams.getcwd
+ # 判断当前文件夹是否是HY-func
+ dirName = os.path.basename(cwd)
+ assert dirName == "HY-func", "请在正确的文件目录下初始化HY-func!"
+ # 需要下载的内容会存放在HivisionaiParams的functionDependence变量下
+ functionDependence = HivisionaiParams.functionDependence
+ # 下载配置文件
+ configs = functionDependence["configs"]
+ print("正在下载配置文件...")
+ for config in configs:
+ if not force and os.path.exists(config['save_path']):
+ print(f"[pass]: {os.path.basename(config['url'])}")
+ continue
+ print(f"[Download]: {config['url']}")
+ resp = requests.get(config['url'])
+ # json文件存储在text区域,但是其他的不一定
+ open(os.path.join(cwd, config['save_path']), 'w').write(resp.text)
+ # 其他文件,提示访问notion文档
+ print(f"[NOTICE]: 一切准备就绪,请访问下面的文档下载剩下的模型文件:\n{functionDependence['weights']}")
+
+ @staticmethod
+ def hy_func_deploy(functionName: str = None, functionPath: str = None):
+ """
+ 在HY-func目录下使用此命令,并且随附功能函数的名称,就可以将HY-func的部署版放到桌面上
+ 但是需要注意的是,本方式不适合修复功能使用,修复功能依旧需要手动制作镜像
+ Args:
+ functionName: 功能函数名称
+ functionPath: 需要注册的HY-func路径
+
+ Returns:
+ 程序执行完毕,桌面会出现一个同名文件夹
+ """
+ # 为了代码撰写的方便,这里仅仅把模型文件删除,其余配置文件保留
+ # 为了实现在任意位置输入hivisionai --deploy funcName都能成功,在使用前需要在.hivisionai/user_config.json中注册
+ # print(functionName, functionPath)
+ if functionPath is not None:
+ # 更新/添加路径
+ # functionPath为相对于使用路径的路径
+ assert os.path.basename(functionPath) == "HY-func", "所指向路径非HY-func!"
+ func_path = os.path.join(HivisionaiParams.getcwd, functionPath)
+ assert os.path.join(func_path), f"路径不存在: {func_path}"
+ # functionPath的路径写到user_config当中
+ user_config = json.load(open(HivisionaiParams.cloud_config_save, 'rb'))
+ user_config["func_path"] = func_path
+ open(HivisionaiParams.cloud_config_save, 'w').write(json.dumps(user_config))
+ print("HY-func全局路径保存成功!")
+ try:
+ user_config = json.load(open(HivisionaiParams.cloud_config_save, 'rb'))
+ func_path = user_config['func_path']
+ except KeyError:
+ return print("请先使用-p命令注册全局HY-func路径!")
+ # 此时func_path必然存在
+ # print(os.listdir(func_path))
+ assert functionName in os.listdir(func_path), functionName + "功能不存在!"
+ func_path_deploy = os.path.join(func_path, functionName)
+ # 开始复制文件到指定目录
+ # 我们默认移动到Desktop目录下,如果没有此目录,需要先创建一个
+ target_dir = os.path.join(HivisionaiParams.download_path, "Desktop")
+ assert os.path.exists(target_dir), target_dir + "文件路径不存在,你需要先创建一下!"
+ # 开始移动
+ target_dir = os.path.join(target_dir, functionName)
+ print("正在复制需要部署的文件...")
+ os.system(f"rm -rf {target_dir}")
+ os.system(f'cp -rf {func_path_deploy} {target_dir}')
+ os.system(f"cp -rf {os.path.join(func_path, '_lib')} {target_dir}")
+ os.system(f"cp -rf {os.path.join(func_path, '_service')} {target_dir}")
+ # 生成最新的hivisionai
+ print("正在生成hivisionai代码包...")
+ os.system(f'hivisionai -t {target_dir}')
+ # 移动完毕,删除模型文件
+ print("移动完毕,正在删除不需要的文件...")
+ # 模型文件
+ os.system(f"rm -rf {os.path.join(target_dir, '_lib', 'weights', '**')}")
+ # hivisionai生成时的多余文件
+ os.system(f"rm -rf {os.path.join(target_dir, 'bin')} {os.path.join(target_dir, 'HY_sdk**')}")
+ print("部署文件生成成功,你可以开始部署了!")
+
+
+hivisionai_utils = HivisionaiUtils()
+
+
+def entry_point():
+ parser = ArgumentParser()
+ # 查看版本号
+ parser.add_argument("-v", "--version", action="store_true", help="View the current HY-sdk version, which does not represent the final cloud version.")
+ # 自动更新
+ parser.add_argument("-u", "--upgrade", nargs='?', const="-1", type=str, help="Automatically update HY-sdk to the latest version")
+ # 查找云端的HY-sdk版本
+ parser.add_argument("-l", "--list", action="store_true", help="Find HY-sdk versions of the cloud, and keep up to ten")
+ # 下载云端的版本到本地路径
+ parser.add_argument("-t", "--export", nargs='?', const="./", help="Add a path parameter to automatically download the latest version of sdk to this path. If there are no parameters, the default is the current path")
+ # 强制更新附带参数,当一个功能需要强制执行一遍的时候,需要附带此参数
+ parser.add_argument("-f", "--force", action="store_true", help="Enforcement of other functions, execution of a single parameter is meaningless")
+ # 初始化HY-func
+ parser.add_argument("--init", action="store_true", help="Initialization HY-func")
+ # 部署HY-func
+ parser.add_argument("-d", "--deploy", nargs='?', const="-1", type=str, help="Deploy HY-func")
+ # 涉及注册一些自定义内容的时候,需要附带此参数,并写上自定义内容
+ parser.add_argument("-p", "--param", nargs='?', const="-1", type=str, help="When registering some custom content, you need to attach this parameter and write the custom content.")
+ args = parser.parse_args()
+ if args.version:
+ print(version(HivisionaiParams.package_name))
+ sys.exit()
+ if args.upgrade:
+ HivisionaiApps.upgrade(args.upgrade, args.force)
+ sys.exit()
+ if args.list:
+ HivisionaiApps.show_cloud_version()
+ sys.exit()
+ if args.export:
+ HivisionaiApps.export(args.export)
+ sys.exit()
+ if args.init:
+ HivisionaiApps.hy_func_init(args.force)
+ sys.exit()
+ if args.deploy:
+ HivisionaiApps.hy_func_deploy(args.deploy, args.param)
+
+
+if __name__ == "__main__":
+ entry_point()
diff --git a/hivisionai/hyService/__init__.py b/hivisionai/hyService/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391
diff --git a/hivisionai/hyService/__pycache__/__init__.cpython-310.pyc b/hivisionai/hyService/__pycache__/__init__.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..a74559611b70d1d813f3dc19d5a9b37862a6c529
Binary files /dev/null and b/hivisionai/hyService/__pycache__/__init__.cpython-310.pyc differ
diff --git a/hivisionai/hyService/__pycache__/__init__.cpython-38.pyc b/hivisionai/hyService/__pycache__/__init__.cpython-38.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..9168dc7711a582d4534e7eaf6cdbf68bdbe989d1
Binary files /dev/null and b/hivisionai/hyService/__pycache__/__init__.cpython-38.pyc differ
diff --git a/hivisionai/hyService/__pycache__/cloudService.cpython-310.pyc b/hivisionai/hyService/__pycache__/cloudService.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..bf7ed6fcac5a05adfadbf7cf3a4f83777ba1dcbe
Binary files /dev/null and b/hivisionai/hyService/__pycache__/cloudService.cpython-310.pyc differ
diff --git a/hivisionai/hyService/__pycache__/dbTools.cpython-310.pyc b/hivisionai/hyService/__pycache__/dbTools.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..4e6852499feefbe3772ca1f87a0ebfacf29c096d
Binary files /dev/null and b/hivisionai/hyService/__pycache__/dbTools.cpython-310.pyc differ
diff --git a/hivisionai/hyService/__pycache__/error.cpython-310.pyc b/hivisionai/hyService/__pycache__/error.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..1792c5262e3116edc43c1c146ed5ec1e2246ee3a
Binary files /dev/null and b/hivisionai/hyService/__pycache__/error.cpython-310.pyc differ
diff --git a/hivisionai/hyService/__pycache__/error.cpython-38.pyc b/hivisionai/hyService/__pycache__/error.cpython-38.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..043a7abdfbde3eef664bbec1a0446bf949a18bc8
Binary files /dev/null and b/hivisionai/hyService/__pycache__/error.cpython-38.pyc differ
diff --git a/hivisionai/hyService/__pycache__/serviceTest.cpython-310.pyc b/hivisionai/hyService/__pycache__/serviceTest.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..a47acb353812ba6425b03da62756496b680ea87a
Binary files /dev/null and b/hivisionai/hyService/__pycache__/serviceTest.cpython-310.pyc differ
diff --git a/hivisionai/hyService/__pycache__/utils.cpython-310.pyc b/hivisionai/hyService/__pycache__/utils.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..c0211525810579a61ef68b2a5a3506373e783281
Binary files /dev/null and b/hivisionai/hyService/__pycache__/utils.cpython-310.pyc differ
diff --git a/hivisionai/hyService/cloudService.py b/hivisionai/hyService/cloudService.py
new file mode 100644
index 0000000000000000000000000000000000000000..f987e6c84dd0741dbea03b6a2b5f7fdea12e0ce0
--- /dev/null
+++ b/hivisionai/hyService/cloudService.py
@@ -0,0 +1,406 @@
+"""
+焕影小程序功能服务端的基本工具函数,以类的形式封装
+"""
+try: # 加上这个try的原因在于本地环境和云函数端的import形式有所不同
+ from qcloud_cos import CosConfig
+ from qcloud_cos import CosS3Client
+except ImportError:
+ try:
+ from qcloud_cos_v5 import CosConfig
+ from qcloud_cos_v5 import CosS3Client
+ except ImportError:
+ raise ImportError("请下载腾讯云COS相关代码包:pip install cos-python-sdk-v5")
+import requests
+import datetime
+import json
+from .error import ProcessError
+import os
+local_path_ = os.path.dirname(__file__)
+
+
+class GetConfig(object):
+ @staticmethod
+ def hy_sdk_client(Id:str, Key:str):
+ # 从cos中寻找文件
+ REGION: str = 'ap-beijing'
+ TOKEN = None
+ SCHEME: str = 'https'
+ BUCKET: str = 'hy-sdk-config-1305323352'
+ client_config = CosConfig(Region=REGION,
+ SecretId=Id,
+ SecretKey=Key,
+ Token=TOKEN,
+ Scheme=SCHEME)
+ return CosS3Client(client_config), BUCKET
+
+ def load_json(self, path:str, default_download=False):
+ try:
+ if os.path.isdir(path):
+ raise ProcessError("请输入具体的配置文件路径,而非文件夹!")
+ if default_download is True:
+ print(f"\033[34m 默认强制重新下载配置文件...\033[0m")
+ raise FileNotFoundError
+ with open(path) as f:
+ config = json.load(f)
+ return config
+ except FileNotFoundError:
+ dir_name = os.path.dirname(path)
+ try:
+ os.makedirs(dir_name)
+ except FileExistsError:
+ pass
+ base_name = os.path.basename(path)
+ print(f"\033[34m 正在从COS中下载配置文件...\033[0m")
+ print(f"\033[31m 请注意,接下来会在{dir_name}路径下生成文件{base_name}...\033[0m")
+ Id = input("请输入SecretId:")
+ Key = input("请输入SecretKey:")
+ client, bucket = self.hy_sdk_client(Id, Key)
+ data_bytes = client.get_object(Bucket=bucket,Key=base_name)["Body"].get_raw_stream().read()
+ data = json.loads(data_bytes.decode("utf-8"))
+ # data["SecretId"] = Id # 未来可以把这个加上
+ # data["SecretKey"] = Key
+ with open(path, "w") as f:
+ data_str = json.dumps(data, ensure_ascii=False)
+ # 如果 ensure_ascii 是 true (即默认值),输出保证将所有输入的非 ASCII 字符转义。
+ # 如果 ensure_ascii 是 false,这些字符会原样输出。
+ f.write(data_str)
+ f.close()
+ print(f"\033[32m 配置文件保存成功\033[0m")
+ return data
+ except json.decoder.JSONDecodeError:
+ print(f"\033[31m WARNING: 配置文件为空!\033[0m")
+ return {}
+
+ def load_file(self, cloud_path:str, local_path:str):
+ """
+ 从COS中下载文件到本地,本函数将会被默认执行的,在使用的时候建议加一些限制.
+ :param cloud_path: 云端的文件路径
+ :param local_path: 将云端文件保存在本地的路径
+ """
+ if os.path.isdir(cloud_path):
+ raise ProcessError("请输入具体的云端文件路径,而非文件夹!")
+ if os.path.isdir(local_path):
+ raise ProcessError("请输入具体的本地文件路径,而非文件夹!")
+ dir_name = os.path.dirname(local_path)
+ base_name = os.path.basename(local_path)
+ try:
+ os.makedirs(dir_name)
+ except FileExistsError:
+ pass
+ cloud_name = os.path.basename(cloud_path)
+ print(f"\033[31m 请注意,接下来会在{dir_name}路径下生成文件{base_name}\033[0m")
+ Id = input("请输入SecretId:")
+ Key = input("请输入SecretKey:")
+ client, bucket = self.hy_sdk_client(Id, Key)
+ print(f"\033[34m 正在从COS中下载文件: {cloud_name}, 此过程可能耗费一些时间...\033[0m")
+ data_bytes = client.get_object(Bucket=bucket,Key=cloud_path)["Body"].get_raw_stream().read()
+ # data["SecretId"] = Id # 未来可以把这个加上
+ # data["SecretKey"] = Key
+ with open(local_path, "wb") as f:
+ # 如果 ensure_ascii 是 true (即默认值),输出保证将所有输入的非 ASCII 字符转义。
+ # 如果 ensure_ascii 是 false,这些字符会原样输出。
+ f.write(data_bytes)
+ f.close()
+ print(f"\033[32m 文件保存成功\033[0m")
+
+
+class CosConf(GetConfig):
+ """
+ 从安全的角度出发,将一些默认配置文件上传至COS中,接下来使用COS和它的子类的时候,在第一次使用时需要输入Cuny给的id和key
+ 用于连接cos存储桶,下载配置文件.
+ 当然,在service_default_download = False的时候,如果在运行路径下已经有conf/service_config.json文件了,
+ 那么就不用再次下载了,也不用输入id和key
+ 事实上这只需要运行一次,因为配置文件将会被下载至源码文件夹中
+ 如果要自定义路径,请在继承的子类中编写__init__函数,将service_path定向到指定路径
+ """
+ def __init__(self) -> None:
+ # 下面这些参数是类的共享参数
+ self.__SECRET_ID: str = None # 服务的id
+ self.__SECRET_KEY: str = None # 服务的key
+ self.__REGION: str = None # 服务的存储桶地区
+ self.__TOKEN: str = None # 服务的token,目前一直是None
+ self.__SCHEME: str = None # 服务的访问协议,默认实际上是https
+ self.__BUCKET: str = None # 服务的存储桶
+ self.__SERVICE_CONFIG: dict = None # 服务的配置文件
+ self.service_path: str = f"{local_path_}/conf/service_config.json"
+ # 配置文件路径,默认是函数运行的路径下的conf文件夹
+ self.service_default_download = False # 是否在每次访问配置的时候都重新下载文件
+
+ @property
+ def service_config(self):
+ if self.__SERVICE_CONFIG is None or self.service_default_download is True:
+ self.__SERVICE_CONFIG = self.load_json(self.service_path, self.service_default_download)
+ return self.__SERVICE_CONFIG
+
+ @property
+ def client(self):
+ client_config = CosConfig(Region=self.region,
+ SecretId=self.secret_id,
+ SecretKey=self.secret_key,
+ Token=self.token,
+ Scheme=self.scheme)
+ return CosS3Client(client_config)
+
+ def get_key(self, key:str):
+ try:
+ data = self.service_config[key]
+ if data == "None":
+ return None
+ else:
+ return data
+ except KeyError:
+ print(f"\033[31m没有对应键值{key},默认返回None\033[0m")
+ return None
+
+ @property
+ def secret_id(self):
+ if self.__SECRET_ID is None:
+ self.__SECRET_ID = self.get_key("SECRET_ID")
+ return self.__SECRET_ID
+
+ @secret_id.setter
+ def secret_id(self, value:str):
+ self.__SECRET_ID = value
+
+ @property
+ def secret_key(self):
+ if self.__SECRET_KEY is None:
+ self.__SECRET_KEY = self.get_key("SECRET_KEY")
+ return self.__SECRET_KEY
+
+ @secret_key.setter
+ def secret_key(self, value:str):
+ self.__SECRET_KEY = value
+
+ @property
+ def region(self):
+ if self.__REGION is None:
+ self.__REGION = self.get_key("REGION")
+ return self.__REGION
+
+ @region.setter
+ def region(self, value:str):
+ self.__REGION = value
+
+ @property
+ def token(self):
+ # if self.__TOKEN is None:
+ # self.__TOKEN = self.get_key("TOKEN")
+ # 这里可以注释掉
+ return self.__TOKEN
+
+ @token.setter
+ def token(self, value:str):
+ self.__TOKEN= value
+
+ @property
+ def scheme(self):
+ if self.__SCHEME is None:
+ self.__SCHEME = self.get_key("SCHEME")
+ return self.__SCHEME
+
+ @scheme.setter
+ def scheme(self, value:str):
+ self.__SCHEME = value
+
+ @property
+ def bucket(self):
+ if self.__BUCKET is None:
+ self.__BUCKET = self.get_key("BUCKET")
+ return self.__BUCKET
+
+ @bucket.setter
+ def bucket(self, value):
+ self.__BUCKET = value
+
+ def downloadFile_COS(self, key, bucket:str=None, if_read:bool=False):
+ """
+ 从COS下载对象(二进制数据), 如果下载失败就返回None
+ """
+ CosBucket = self.bucket if bucket is None else bucket
+ try:
+ # 将本类的Debug继承给抛弃了
+ # self.debug_print(f"Download from {CosBucket}", font_color="blue")
+ obj = self.client.get_object(
+ Bucket=CosBucket,
+ Key=key
+ )
+ if if_read is True:
+ data = obj["Body"].get_raw_stream().read() # byte
+ return data
+ else:
+ return obj
+ except Exception as e:
+ print(f"\033[31m下载失败! 错误描述:{e}\033[0m")
+ return None
+
+ def showFileList_COS_base(self, key, bucket, marker:str=""):
+ """
+ 返回cos存储桶内部的某个文件夹的内部名称
+ :param key: cos云端的存储路径
+ :param bucket: cos存储桶名称,如果没指定名称(None)就会寻找默认的存储桶
+ :param marker: 标记,用于记录上次查询到哪里了
+ ps:如果需要修改默认的存储桶配置,请在代码运行的时候加入代码 s.bucket = 存储桶名称 (s是对象实例)
+ 返回的内容存储在response["Content"],不过返回的数据大小是有限制的,具体内容还是请看官方文档。
+ """
+ response = self.client.list_objects(
+ Bucket=bucket,
+ Prefix=key,
+ Marker=marker
+ )
+ return response
+
+ def showFileList_COS(self, key, bucket:str=None)->list:
+ """
+ 实现查询存储桶中所有对象的操作,因为cos的sdk有返回数据包大小的限制,所以我们需要进行一定的改动
+ """
+ marker = ""
+ file_list = []
+ CosBucket = self.bucket if bucket is None else bucket
+ while True: # 轮询
+ response = self.showFileList_COS_base(key, CosBucket, marker)
+ try:
+ file_list.extend(response["Contents"])
+ except KeyError as e:
+ print(e)
+ raise
+ if response['IsTruncated'] == 'false': # 接下来没有数据了,就退出
+ break
+ marker = response['NextMarker']
+ return file_list
+
+ def uploadFile_COS(self, buffer, key, bucket:str=None):
+ """
+ 从COS上传数据,需要注意的是必须得是二进制文件
+ """
+ CosBucket = self.bucket if bucket is None else bucket
+ try:
+ self.client.put_object(
+ Bucket=CosBucket,
+ Body=buffer,
+ Key=key
+ )
+ return True
+ except Exception as e:
+ print(e)
+ return False
+
+
+class FuncDiary(CosConf):
+ filter_dict = {"60a5e13da00e6e0001fd53c8": "Cuny",
+ "612c290f3a9af4000170faad": "守望平凡",
+ "614de96e1259260001506d6c": "林泽毅-焕影一新"}
+
+ def __init__(self, func_name: str, uid: str, error_conf_path: str = f"{local_path_}/conf/func_error_conf.json"):
+ """
+ 日志类的实例化
+ Args:
+ func_name: 功能名称,影响了日志投递的路径
+ """
+ super().__init__()
+ # 配置文件路径,默认是函数运行的路径下的conf文件夹
+ self.service_path: str = os.path.join(os.path.dirname(error_conf_path), "service_config.json")
+ self.error_dict = self.load_json(path=error_conf_path)
+ self.__up: str = f"wx/invokeFunction_c/{datetime.datetime.now().strftime('%Y/%m/%d/%H')}/{func_name}/"
+ self.func_name: str = func_name
+ # 下面这个属性是的日志名称的前缀
+ self.__start_time = datetime.datetime.now().timestamp()
+ h_point = datetime.datetime.strptime(datetime.datetime.now().strftime('%Y/%m/%d/%H'), '%Y/%m/%d/%H')
+ h_point_timestamp = h_point.timestamp()
+ self.__prefix = int(self.__start_time - h_point_timestamp).__str__() + "_"
+ self.__uid = uid
+ self.__diary = None
+
+ def __str__(self):
+ return f"<{self.func_name}> DIARY for {self.__uid}"
+
+ @property
+ def content(self):
+ return self.__diary
+
+ @content.setter
+ def content(self, value: str):
+ if not isinstance(value, dict):
+ raise TypeError("content 只能是字典!")
+ if "status" in value:
+ raise KeyError("status字段已被默认占用,请在日志信息中更换字段名称!")
+ if self.__diary is None:
+ self.__diary = value
+ else:
+ raise PermissionError("为了减小日志对整体代码的影响,只能被覆写一次!")
+
+ def uploadDiary_COS(self, status_id: str, suffix: str = "", bucket: str = "hy-hcy-data-logs-1306602019"):
+ if self.__diary is None:
+ self.__diary = {"status": self.error_dict[status_id]}
+ if status_id == "0000":
+ self.__up += f"True/{self.__uid}/"
+ else:
+ self.__up += f"False/{self.__uid}/"
+ interval = int(10 * (datetime.datetime.now().timestamp() - self.__start_time))
+ prefix = self.__prefix + status_id + "_" + interval.__str__()
+ self.__diary["status"] = self.error_dict[status_id]
+ name = prefix + "_" + suffix if len(suffix) != 0 else prefix
+ self.uploadFile_COS(buffer=json.dumps(self.__diary), key=self.__up + name, bucket=bucket)
+ print(f"{self}上传成功.")
+
+
+class ResponseWebSocket(CosConf):
+ # 网关推送地址
+ __HOST:str = None
+ @property
+ def sendBackHost(self):
+ if self.__HOST is None:
+ self.__HOST = self.get_key("HOST")
+ return self.__HOST
+
+ @sendBackHost.setter
+ def sendBackHost(self, value):
+ self.__HOST = value
+
+ def sendMsg_toWebSocket(self, message,connectionID:str = None):
+ if connectionID is not None:
+ retmsg = {'websocket': {}}
+ retmsg['websocket']['action'] = "data send"
+ retmsg['websocket']['secConnectionID'] = connectionID
+ retmsg['websocket']['dataType'] = 'text'
+ retmsg['websocket']['data'] = json.dumps(message)
+ requests.post(self.sendBackHost, json=retmsg)
+ print("send success!")
+ else:
+ pass
+
+ @staticmethod
+ def create_Msg(status, msg):
+ """
+ 本方法用于创建一个用于发送到WebSocket客户端的数据
+ 输入的信息部分,需要有如下几个参数:
+ 1. id,固定为"return-result"
+ 2. status,如果输入为1则status=true, 如果输入为-1则status=false
+ 3. obj_key, 图片的云端路径, 这是输入的msg本身自带的
+ """
+ msg['status'] = "false" if status == -1 else 'true' # 其实最好还是用bool
+ msg['id'] = "async-back-msg"
+ msg['type'] = "funcType"
+ msg["format"] = "imageType"
+ return msg
+
+
+# 功能服务类
+class Service(ResponseWebSocket):
+ """
+ 服务的主函数,封装了cos上传/下载功能以及与api网关的一键通讯
+ 将类的实例变成一个可被调用的对象,在服务运行的时候,只需要运行该对象即可
+ 当然,因为是类,所以支持继承和修改
+ """
+ @classmethod
+ def process(cls, *args, **kwargs):
+ """
+ 处理函数,在使用的时候请将之重构
+ """
+ pass
+
+ @classmethod
+ def __call__(cls, *args, **kwargs):
+ pass
+
+
diff --git a/hivisionai/hyService/dbTools.py b/hivisionai/hyService/dbTools.py
new file mode 100644
index 0000000000000000000000000000000000000000..643f2a28c9c24232166010ce80d3e14a697f5ba8
--- /dev/null
+++ b/hivisionai/hyService/dbTools.py
@@ -0,0 +1,337 @@
+import os
+import pymongo
+import datetime
+import time
+from .cloudService import GetConfig
+local_path = os.path.dirname(__file__)
+
+
+class DBUtils(GetConfig):
+ """
+ 从安全的角度出发,将一些默认配置文件上传至COS中,接下来使用COS和它的子类的时候,在第一次使用时需要输入Cuny给的id和key
+ 用于连接数据库等对象
+ 当然,在db_default_download = False的时候,如果在运行路径下已经有配置文件了,
+ 那么就不用再次下载了,也不用输入id和key
+ 事实上这只需要运行一次,因为配置文件将会被下载至源码文件夹中
+ 如果要自定义路径,请在继承的子类中编写__init__函数,将service_path定向到指定路径
+ """
+ __BASE_DIR: dict = None
+ __PARAMS_DIR: dict = None
+ db_base_path: str = f"{local_path}/conf/base_config.json"
+ db_params_path: str = f"{local_path}/conf/params.json"
+ db_default_download: bool = False
+
+ @property
+ def base_config(self):
+ if self.__BASE_DIR is None:
+ self.__BASE_DIR = self.load_json(self.db_base_path, self.db_default_download)
+ return self.__BASE_DIR
+
+ @property
+ def db_config(self):
+ return self.base_config["database_config"]
+
+ @property
+ def params_config(self):
+ if self.__PARAMS_DIR is None:
+ self.__PARAMS_DIR = self.load_json(self.db_params_path, self.db_default_download)
+ return self.__PARAMS_DIR
+
+ @property
+ def size_dir(self):
+ return self.params_config["size_config"]
+
+ @property
+ def func_dir(self):
+ return self.params_config["func_config"]
+
+ @property
+ def wx_config(self):
+ return self.base_config["wx_config"]
+
+ def get_dbClient(self):
+ return pymongo.MongoClient(self.db_config["connect_url"])
+
+ @staticmethod
+ def get_time(yyyymmdd=None, delta_date=0):
+ """
+ 给出当前的时间
+ :param yyyymmdd: 以yyyymmdd给出的日期时间
+ :param delta_date: 获取减去delta_day后的时间,默认为0就是当天
+ 时间格式:yyyy_mm_dd
+ """
+ if yyyymmdd is None:
+ now_time = (datetime.datetime.now() - datetime.timedelta(delta_date)).strftime("%Y-%m-%d")
+ return now_time
+ # 输入了yyyymmdd的数据和delta_date,通过这两个数据返回距离yyyymmdd delta_date天的时间
+ pre_time = datetime.datetime(int(yyyymmdd[0:4]), int(yyyymmdd[4:6]), int(yyyymmdd[6:8]))
+ return (pre_time - datetime.timedelta(delta_date)).strftime("%Y-%m-%d")
+
+ # 获得时间戳
+ def get_timestamp(self, date_time:str=None) -> int:
+ """
+ 输入的日期形式为:"2021-11-29 16:39:45.999"
+ 真正必须输入的是前十个字符,及精确到日期,后面的时间可以不输入,不输入则默认置零
+ """
+ def standardDateTime(dt:str) -> str:
+ """
+ 规范化时间字符串
+ """
+ if len(dt) < 10:
+ raise ValueError("你必须至少输入准确到天的日期!比如:2021-11-29")
+ elif len(dt) == 10:
+ return dt + " 00:00:00.0"
+ else:
+ try:
+ date, time = dt.split(" ")
+ except ValueError:
+ raise ValueError("你只能也必须在日期与具体时间之间增加一个空格,其他地方不能出现空格!")
+ while len(time) < 10:
+ if len(time) in (2, 5):
+ time += ":"
+ elif len(time) == 8:
+ time += "."
+ else:
+ time += "0"
+ return date + " " + time
+ if date_time is None:
+ # 默认返回当前时间(str), date_time精确到毫秒
+ date_time = datetime.datetime.now()
+ # 转换成时间戳
+ else:
+ date_time = standardDateTime(dt=date_time)
+ date_time = datetime.datetime.strptime(date_time, "%Y-%m-%d %H:%M:%S.%f")
+ timestamp_ms = int(time.mktime(date_time.timetuple()) * 1000.0 + date_time.microsecond / 1000.0)
+ return timestamp_ms
+
+ @staticmethod
+ def get_standardTime(yyyy_mm_dd: str):
+ return yyyy_mm_dd[0:4] + yyyy_mm_dd[5:7] + yyyy_mm_dd[8:10]
+
+ def find_oneDay_data(self, db_name: str, collection_name: str, date: str = None) -> dict:
+ """
+ 获取指定天数的数据,如果date is None,就自动寻找距今最近的有数据的那一天的数据
+ """
+ df = None # 应该被返回的数据
+ collection = self.get_dbClient()[db_name][collection_name]
+ if date is None: # 自动寻找前几天的数据,最多三十天
+ for delta_date in range(1, 31):
+ date_yyyymmdd = self.get_standardTime(self.get_time(delta_date=delta_date))
+ filter_ = {"date": date_yyyymmdd}
+ df = collection.find_one(filter=filter_)
+ if df is not None:
+ del df["_id"]
+ break
+ else:
+ filter_ = {"date": date}
+ df = collection.find_one(filter=filter_)
+ if df is not None:
+ del df["_id"]
+ return df
+
+ def find_daysData_byPeriod(self, date_period: tuple, db_name: str, col_name: str):
+ # 给出一个指定的范围日期,返回相应的数据(日期的两头都会被寻找)
+ # 这个函数我们默认数据库中的数据是连续的,即不会出现在 20211221 到 20211229 之间有一天没有数据的情况
+ if len(date_period) != 2:
+ raise ValueError("date_period数据结构:(开始日期,截止日期)")
+ start, end = date_period # yyyymmdd
+ delta_date = int(end) - int(start)
+ if delta_date < 0:
+ raise ValueError("传入的日期有误!")
+ collection = self.get_dbClient()[db_name][col_name]
+ date = start
+ while int(date) <= int(end):
+ yield collection.find_one(filter={"date": date})
+ date = self.get_standardTime(self.get_time(date, -1))
+
+ @staticmethod
+ def find_biggest_valueDict(dict_: dict):
+ # 寻找字典中数值最大的字段,要求输入的字典的字段值全为数字
+ while len(dict_) > 0:
+ max_value = 0
+ p = None
+ for key in dict_:
+ if dict_[key] > max_value:
+ p = key
+ max_value = dict_[key]
+ yield p, max_value
+ del dict_[p]
+
+ def copy_andAdd_dict(self, dict_base, dict_):
+ # 深度拷贝字典,将后者赋值给前者
+ # 如果后者的键名在前者已经存在,则直接相加。这就要求两者的数据是数值型
+ for key in dict_:
+ if key not in dict_base:
+ dict_base[key] = dict_[key]
+ else:
+ if isinstance(dict_[key], int) or isinstance(dict_[key], float):
+ dict_base[key] = round(dict_[key] + dict_base[key], 2)
+ else:
+ dict_base[key] = self.copy_andAdd_dict(dict_base[key], dict_[key])
+ return dict_base
+
+ @staticmethod
+ def compare_data(dict1: dict, dict2: dict, suffix: str, save: int, **kwargs):
+ """
+ 有两个字典,并且通过kwargs会传输一个新的字典,根据字典中的键值我们进行比对,处理成相应的数据格式
+ 并且在dict1中,生成一个新的键值,为kwargs中的元素+suffix
+ save:保留几位小数
+ """
+ new_dict = dict1.copy()
+ for key in kwargs:
+ try:
+ if kwargs[key] not in dict2 or int(dict2[kwargs[key]]) == -1 or float(dict1[kwargs[key]]) <= 0.0:
+ # 数据不存在
+ data_new = 5002
+ else:
+ try:
+ data_new = round(
+ ((float(dict1[kwargs[key]]) - float(dict2[kwargs[key]])) / float(dict2[kwargs[key]])) * 100
+ , save)
+ except ZeroDivisionError:
+ data_new = 5002
+ if data_new == 0.0:
+ data_new = 0
+ except TypeError as e:
+ print(e)
+ data_new = 5002 # 如果没有之前的数据,默认返回0
+ new_dict[kwargs[key] + suffix] = data_new
+ return new_dict
+
+ @staticmethod
+ def sum_dictList_byKey(dictList: list, **kwargs) -> dict:
+ """
+ 有一个列表,列表中的元素为字典,并且所有字典都有一个键值为key的字段,字段值为数字
+ 我们将每一个字典的key字段提取后相加,得到该字段值之和.
+ """
+ sum_num = {}
+ if kwargs is None:
+ raise ImportError("Please input at least ONE key")
+ for key in kwargs:
+ sum_num[kwargs[key]] = 0
+ for dict_ in dictList:
+ if not isinstance(dict_, dict):
+ raise TypeError("object is not DICT!")
+ for key in kwargs:
+ sum_num[kwargs[key]] += dict_[kwargs[key]]
+ return sum_num
+
+ @staticmethod
+ def sum_2ListDict(list_dict1: list, list_dict2: list, key_name, data_name):
+ """
+ 有两个列表,列表内的元素为字典,我们根据key所对应的键值寻找列表中键值相同的两个元素,将他们的data对应的键值相加
+ 生成新的列表字典(其余键值被删除)
+ key仅在一个列表中存在,则直接加入新的列表字典
+ """
+ sum_list = []
+
+ def find_sameKey(kn, key_, ld: list) -> int:
+ for dic_ in ld:
+ if dic_[kn] == key_:
+ post_ = ld.index(dic_)
+ return post_
+ return -1
+
+ for dic in list_dict1:
+ key = dic[key_name] # 键名
+ post = find_sameKey(key_name, key, list_dict2) # 在list2中寻找相同的位置
+ data = dic[data_name] + list_dict2[post][data_name] if post != -1 else dic[data_name]
+ sum_list.append({key_name: key, data_name: data})
+ return sum_list
+
+ @staticmethod
+ def find_biggest_dictList(dictList: list, key: str = "key", data: str = "value"):
+ """
+ 有一个列表,里面每一个元素都是一个字典
+ 这些字典有一些共通性质,那就是里面都有一个key键名和一个data键名,后者的键值必须是数字
+ 我们根据data键值的大小进行生成,每一次返回列表中data键值最大的数和它的key键值
+ """
+ while len(dictList) > 0:
+ point = 0
+ biggest_num = int(dictList[0][data])
+ biggest_key = dictList[0][key]
+ for i in range(len(dictList)):
+ num = int(dictList[i][data])
+ if num > biggest_num:
+ point = i
+ biggest_num = int(dictList[i][data])
+ biggest_key = dictList[i][key]
+ yield str(biggest_key), biggest_num
+ del dictList[point]
+
+ def get_share_data(self, date_yyyymmdd: str):
+ # 获得用户界面情况
+ visitPage = self.find_oneDay_data(date=date_yyyymmdd,
+ db_name="cuny-user-analysis",
+ collection_name="daily-userVisitPage")
+ if visitPage is not None:
+ # 这一部分没有得到数据是可以容忍的.不用抛出模态框错误
+ # 获得昨日用户分享情况
+ sum_num = self.sum_dictList_byKey(dictList=visitPage["data_list"],
+ key1="page_share_pv",
+ key2="page_share_uv")
+ else:
+ # 此时将分享次数等置为-1
+ sum_num = {"page_share_pv": -1, "page_share_uv": -1}
+ return sum_num
+
+ @staticmethod
+ def compare_date(date1_yyyymmdd: str, date2_yyyymmdd: str):
+ # 如果date1是date2的昨天,那么就返回True
+ date1 = int(date1_yyyymmdd)
+ date2 = int(date2_yyyymmdd)
+ return True if date2 - date1 == 1 else False
+
+ def change_time(self, date_yyyymmdd: str, mode: int):
+ # 将yyyymmdd的数据分开为相应的数据形式
+ if mode == 1:
+ if self.compare_date(date_yyyymmdd, self.get_standardTime(self.get_time(delta_date=0))) is False:
+ return date_yyyymmdd[0:4] + "年" + date_yyyymmdd[4:6] + "月" + date_yyyymmdd[6:8] + "日"
+ else:
+ return "昨日"
+ elif mode == 2:
+ date = date_yyyymmdd[0:4] + "." + date_yyyymmdd[4:6] + "." + date_yyyymmdd[6:8]
+ if self.compare_date(date_yyyymmdd, self.get_standardTime(self.get_time(delta_date=0))) is True:
+ return date + "~" + date + " | 昨日"
+ else:
+ return date + "~" + date
+
+ @staticmethod
+ def changeList_dict2List_list(dl: list, order: list):
+ """
+ 列表内是一个个字典,本函数将字典拆解,以order的形式排列键值为列表
+ 考虑到一些格式的问题,这里我采用生成器的形式封装
+ """
+ for dic in dl:
+ # dic是列表内的字典元素
+ tmp = []
+ for key_name in order:
+ key = dic[key_name]
+ tmp.append(key)
+ yield tmp
+
+ def dict_mapping(self, dict_name: str, id_: str):
+ """
+ 进行字典映射,输入字典名称和键名,返回具体的键值
+ 如果不存在,则原路返回键名
+ """
+ try:
+ return getattr(self, dict_name)[id_]
+ except KeyError:
+ return id_
+ except AttributeError:
+ print(f"[WARNING]: 本对象内部不存在{dict_name}!")
+ return id_
+
+ @staticmethod
+ def dictAddKey(dic: dict, dic_tmp: dict, **kwargs):
+ """
+ 往字典中加入参数,可迭代
+ """
+ for key in kwargs:
+ dic[key] = dic_tmp[key]
+ return dic
+
+
+if __name__ == "__main__":
+ dbu = DBUtils()
\ No newline at end of file
diff --git a/hivisionai/hyService/error.py b/hivisionai/hyService/error.py
new file mode 100644
index 0000000000000000000000000000000000000000..008bec5b234cb287c665dfb896f7b68a69687269
--- /dev/null
+++ b/hivisionai/hyService/error.py
@@ -0,0 +1,20 @@
+"""
+@author: cuny
+@fileName: error.py
+@create_time: 2022/03/10 下午3:14
+@introduce:
+保存一些定义的错误类型
+"""
+class ProcessError(Exception):
+ def __init__(self, err):
+ super().__init__(err)
+ self.err = err
+ def __str__(self):
+ return self.err
+
+class WrongImageType(TypeError):
+ def __init__(self, err):
+ super().__init__(err)
+ self.err = err
+ def __str__(self):
+ return self.err
\ No newline at end of file
diff --git a/hivisionai/hyService/serviceTest.py b/hivisionai/hyService/serviceTest.py
new file mode 100644
index 0000000000000000000000000000000000000000..3ba75ad5e6889058cdcb62d2a0eb1f01046dd4cc
--- /dev/null
+++ b/hivisionai/hyService/serviceTest.py
@@ -0,0 +1,34 @@
+"""
+用于测试云端或者本地服务的运行是否成功
+"""
+import requests
+import functools
+import cv2
+import time
+
+def httpPostTest(url, msg:dict):
+ """
+ 以post请求访问api,携带msg(dict)信息
+ """
+ re = requests.post(url=url, json=msg)
+ print(re.text)
+ return re
+
+
+def localTestImageFunc(path):
+ """
+ 在本地端测试算法,需要注意的是本装饰器只支持测试和图像相关算法
+ path代表测试图像的路径,其余参数请写入被装饰的函数中,并且只支持标签形式输入
+ 被测试的函数的第一个输入参数必须为图像矩阵(以cv2读入)
+ """
+ def decorator(func):
+ @functools.wraps(func)
+ def wrapper(**kwargs):
+ start = time.time()
+ image = cv2.imread(path)
+ image_out = func(image) if len(kwargs) == 0 else func(image, kwargs)
+ print("END.\n处理时间(不计算加载模型时间){}秒:".format(round(time.time()-start, 2)))
+ cv2.imshow("test", image_out)
+ cv2.waitKey(0)
+ return wrapper
+ return decorator
diff --git a/hivisionai/hyService/utils.py b/hivisionai/hyService/utils.py
new file mode 100644
index 0000000000000000000000000000000000000000..88bf67a92690c4463387e19d506b732053d34877
--- /dev/null
+++ b/hivisionai/hyService/utils.py
@@ -0,0 +1,92 @@
+"""
+@author: cuny
+@fileName: utils.py
+@create_time: 2021/12/29 下午1:29
+@introduce:
+焕影服务的一些工具函数,涉及两类:
+1. 开发debug时候的工具函数
+2. 初始化COS配置时的工具函数
+"""
+import cv2
+from .error import WrongImageType
+import numpy as np
+
+class Debug(object):
+ color_dir:dict = {
+ "red":"31m",
+ "green":"32m",
+ "yellow":"33m",
+ "blue":"34m",
+ "common":"38m"
+ } # 颜色值
+ __DEBUG:bool = True
+
+ @property
+ def debug(self):
+ return self.__DEBUG
+
+ @debug.setter
+ def debug(self, value):
+ if not isinstance(value, bool):
+ raise TypeError("你必须设定debug的值为bool的True或者False")
+ print(f"设置debug为: {value}")
+ self.__DEBUG = value
+
+ def debug_print(self, text, **kwargs):
+ if self.debug is True:
+ key = self.color_dir["common"] if "font_color" not in kwargs else self.color_dir[kwargs["font_color"]]
+ print(f"\033[{key}{text}\033[0m")
+
+ @staticmethod
+ def resize_image_esp(input_image, esp=2000):
+ """
+ 输入:
+ input_path:numpy图片
+ esp:限制的最大边长
+ """
+ # resize函数=>可以让原图压缩到最大边为esp的尺寸(不改变比例)
+ width = input_image.shape[0]
+ length = input_image.shape[1]
+ max_num = max(width, length)
+
+ if max_num > esp:
+ print("Image resizing...")
+ if width == max_num:
+ length = int((esp / width) * length)
+ width = esp
+
+ else:
+ width = int((esp / length) * width)
+ length = esp
+ print(length, width)
+ im_resize = cv2.resize(input_image, (length, width), interpolation=cv2.INTER_AREA)
+ return im_resize
+ else:
+ return input_image
+
+ def cv_show(self, *args, **kwargs):
+ def check_images(img):
+ # 判断是否是矩阵类型
+ if not isinstance(img, np.ndarray):
+ raise WrongImageType("输入的图像必须是 np.ndarray 类型!")
+ if self.debug is True:
+ size = 500 if "size" not in kwargs else kwargs["size"] # 默认缩放尺寸为最大边500像素点
+ if len(args) == 0:
+ raise ProcessError("你必须传入若干图像信息!")
+ flag = False
+ base = None
+ for image in args:
+ check_images(image)
+ if flag is False:
+ image = self.resize_image_esp(image, size)
+ h, w = image.shape[0], image.shape[1]
+ flag = (w, h)
+ base = image
+ else:
+ image = cv2.resize(image, flag)
+ base = np.hstack((base, image))
+ title = "cv_show" if "winname" not in kwargs else kwargs["winname"]
+ cv2.imshow(title, base)
+ cv2.waitKey(0)
+ else:
+ pass
diff --git a/hivisionai/hyTrain/APIs.py b/hivisionai/hyTrain/APIs.py
new file mode 100644
index 0000000000000000000000000000000000000000..48b1d227fd90685ee70851ca34cf70aa3e789687
--- /dev/null
+++ b/hivisionai/hyTrain/APIs.py
@@ -0,0 +1,197 @@
+import requests, os
+import json
+import hashlib, base64, hmac
+import sys
+import oss2
+from aliyunsdkimageseg.request.v20191230.SegmentBodyRequest import SegmentBodyRequest
+from aliyunsdkimageseg.request.v20191230.SegmentSkinRequest import SegmentSkinRequest
+from aliyunsdkfacebody.request.v20191230.DetectFaceRequest import DetectFaceRequest
+from aliyunsdkcore.client import AcsClient
+
+# 头像抠图参数配置
+def params_of_head(photo_base64, photo_type):
+ print ('测试头像抠图接口 ...')
+ host = 'https://person.market.alicloudapi.com'
+ uri = '/segment/person/headrgba' # 头像抠图返回透明PNG图
+ # uri = '/segment/person/head' # 头像抠图返回alpha图
+ # uri = '/segment/person/headborder' # 头像抠图返回带白边的透明PNG图
+ return host, uri, {
+ 'photo': photo_base64,
+ 'type': photo_type,
+ 'face_required': 0, # 可选,检测是否必须带有人脸才进行抠图处理,0为检测,1为不检测,默认为0
+ 'border_ratio': 0.3, # 可选,仅带白边接口可用,
+ # 在头像边缘增加白边(或者其他颜色)宽度,取值为0-0.5,
+ # 这个宽度是相对于图片宽度和高度最大值的比例,
+ # 比如原图尺寸为640x480,border_ratio为0.2,
+ # 则添加的白边的宽度为:max(640,480) * 0.2 = 96个像素
+ 'margin_color': '#ff0000' # 可选,仅带白边接口可用,
+ # 在头像边缘增加边框的颜色,默认为白色
+
+ }
+
+# 头像抠图API
+def wanxing_get_head_api(file_name='/home/parallels/Desktop/change_cloth/input_image/03.jpg',
+ output_path="./head.png",
+ app_key='204014294',
+ secret="pI2uo7AhCFjnaZWYrCCAEjmsZJbK6vzy",
+ stage='RELEASE'):
+ info = sys.version_info
+ if info[0] < 3:
+ is_python3 = False
+ else:
+ is_python3 = True
+
+ with open(file_name, 'rb') as fp:
+ photo_base64 = base64.b64encode(fp.read())
+ if is_python3:
+ photo_base64 = photo_base64.decode('utf8')
+
+ _, photo_type = os.path.splitext(file_name)
+ photo_type = photo_type.lstrip('.')
+ # print(photo_type)
+ # print(photo_base64)
+
+ # host, uri, body_json = params_of_portrait_matting(photo_base64, photo_type)
+ # host, uri, body_json = params_of_object_matting(photo_base64)
+ # host, uri, body_json = params_of_idphoto(photo_base64, photo_type)
+ host, uri, body_json = params_of_head(photo_base64, photo_type)
+ # host, uri, body_json = params_of_crop(photo_base64)
+ api = host + uri
+
+ body = json.dumps(body_json)
+ md5lib = hashlib.md5()
+ if is_python3:
+ md5lib.update(body.encode('utf8'))
+ else:
+ md5lib.update(body)
+ body_md5 = md5lib.digest()
+ body_md5 = base64.b64encode(body_md5)
+ if is_python3:
+ body_md5 = body_md5.decode('utf8')
+
+ method = 'POST'
+ accept = 'application/json'
+ content_type = 'application/octet-stream; charset=utf-8'
+ date_str = ''
+ headers = ''
+
+ string_to_sign = method + '\n' \
+ + accept + '\n' \
+ + body_md5 + '\n' \
+ + content_type + '\n' \
+ + date_str + '\n' \
+ + headers \
+ + uri
+ if is_python3:
+ signed = hmac.new(secret.encode('utf8'),
+ string_to_sign.encode('utf8'),
+ digestmod=hashlib.sha256).digest()
+ else:
+ signed = hmac.new(secret, string_to_sign, digestmod=hashlib.sha256).digest()
+ signed = base64.b64encode(signed)
+ if is_python3:
+ signed = signed.decode('utf8')
+
+ headers = {
+ 'Accept': accept,
+ 'Content-MD5': body_md5,
+ 'Content-Type': content_type,
+ 'X-Ca-Key': app_key,
+ 'X-Ca-Stage': stage,
+ 'X-Ca-Signature': signed
+ }
+ #print signed
+
+
+ resp = requests.post(api, data=body, headers=headers)
+ # for u,v in resp.headers.items():
+ # print(u+": " + v)
+ try:
+ res = resp.content
+ res = json.loads(res)
+ # print ('res:', res)
+ if str(res['status']) == '0':
+ # print ('成功!')
+ file_object = requests.get(res["data"]["result"])
+ # print(file_object)
+ with open(output_path, 'wb') as local_file:
+ local_file.write(file_object.content)
+
+ # image = cv2.imread("./test_head.png", -1)
+ # return image
+ else:
+ pass
+ # print ('失败!')
+ except:
+ print('failed parse:', resp)
+
+# 阿里云抠图API
+def aliyun_human_matting_api(input_path, output_path, type="human"):
+ auth = oss2.Auth('LTAI5tP2NxdzSFfpKYxZFCuJ', 'VzbGdUbRawuMAitekP3ORfrw0i3NEX')
+ bucket = oss2.Bucket(auth, 'https://oss-cn-shanghai.aliyuncs.com', 'huanying-api')
+ key = os.path.basename(input_path)
+ origin_image = input_path
+ try:
+ bucket.put_object_from_file(key, origin_image, headers={"Connection":"close"})
+ except Exception as e:
+ print(e)
+
+ url = bucket.sign_url('GET', key, 10 * 60)
+ client = AcsClient('LTAI5tP2NxdzSFfpKYxZFCuJ', 'VzbGdUbRawuMAitekP3ORfrw0i3NEX', 'cn-shanghai')
+ if type == "human":
+ request = SegmentBodyRequest()
+ elif type == "skin":
+ request = SegmentSkinRequest()
+ request.set_accept_format('json')
+ request.set_ImageURL(url)
+
+ try:
+ response = client.do_action_with_exception(request)
+ response_dict = eval(str(response, encoding='utf-8'))
+ if type == "human":
+ output_url = response_dict['Data']['ImageURL']
+ elif type == "skin":
+ output_url = response_dict['Data']['Elements'][0]['URL']
+ file_object = requests.get(output_url)
+ with open(output_path, 'wb') as local_file:
+ local_file.write(file_object.content)
+ bucket.delete_object(key)
+ except Exception as e:
+ print(e)
+ response = client.do_action_with_exception(request)
+ response_dict = eval(str(response, encoding='utf-8'))
+ print(response_dict)
+ output_url = response_dict['Data']['ImageURL']
+ file_object = requests.get(output_url)
+ with open(output_path, 'wb') as local_file:
+ local_file.write(file_object.content)
+ bucket.delete_object(key)
+
+# 阿里云人脸检测API
+def aliyun_face_detect_api(input_path, type="human"):
+ auth = oss2.Auth('LTAI5tP2NxdzSFfpKYxZFCuJ', 'VzbGdUbRawuMAitekP3ORfrw0i3NEX')
+ bucket = oss2.Bucket(auth, 'https://oss-cn-shanghai.aliyuncs.com', 'huanying-api')
+ key = os.path.basename(input_path)
+ origin_image = input_path
+ try:
+ bucket.put_object_from_file(key, origin_image, headers={"Connection":"close"})
+ except Exception as e:
+ print(e)
+
+ url = bucket.sign_url('GET', key, 10 * 60)
+ client = AcsClient('LTAI5tP2NxdzSFfpKYxZFCuJ', 'VzbGdUbRawuMAitekP3ORfrw0i3NEX', 'cn-shanghai')
+ if type == "human":
+ request = DetectFaceRequest()
+ request.set_accept_format('json')
+ request.set_ImageURL(url)
+ try:
+ response = client.do_action_with_exception(request)
+ response_json = json.loads(str(response, encoding='utf-8'))
+ print(response_json["Data"]["PoseList"][-1])
+ bucket.delete_object(key)
+ return response_json["Data"]["PoseList"][-1]
+ except Exception as e:
+ print(e)
+
+if __name__ == "__main__":
+ wanxing_get_head_api()
\ No newline at end of file
diff --git a/hivisionai/hyTrain/DataProcessing.py b/hivisionai/hyTrain/DataProcessing.py
new file mode 100644
index 0000000000000000000000000000000000000000..6d38c53a9be5a829d2c4926b5af4e7208c83ca3f
--- /dev/null
+++ b/hivisionai/hyTrain/DataProcessing.py
@@ -0,0 +1,37 @@
+import cv2
+import random
+from scipy.ndimage import grey_erosion, grey_dilation
+import numpy as np
+from glob import glob
+import random
+
+
+def make_a_and_trimaps(input_image, resize=(512, 512)):
+ image = cv2.resize(input_image, resize)
+ b, g, r, a = cv2.split(image)
+
+ a_scale_resize = a / 255
+ trimap = (a_scale_resize >= 0.95).astype("float32")
+ not_bg = (a_scale_resize > 0).astype("float32")
+ d_size = a.shape[0] // 256 * random.randint(10, 20)
+ e_size = a.shape[0] // 256 * random.randint(10, 20)
+ trimap[np.where((grey_dilation(not_bg, size=(d_size, d_size))
+ - grey_erosion(trimap, size=(e_size, e_size))) != 0)] = 0.5
+
+ return a, trimap*255
+
+
+def get_filedir_filelist(input_path):
+ return glob(input_path+"/*")
+
+
+def extChange(filedir, ext="png"):
+ ext_origin = str(filedir).split(".")[-1]
+ return filedir.replace(ext_origin, ext)
+
+def random_image_crop(input_image:np.array, crop_size=(512,512)):
+ height, width = input_image.shape[0], input_image.shape[1]
+ crop_height, crop_width = crop_size[0], crop_size[1]
+ x = random.randint(0, width-crop_width)
+ y = random.randint(0, height-crop_height)
+ return input_image[y:y+crop_height, x:x+crop_width]
\ No newline at end of file
diff --git a/hivisionai/hyTrain/__init__.py b/hivisionai/hyTrain/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391
diff --git a/hivisionai/hyTrain/__pycache__/APIs.cpython-310.pyc b/hivisionai/hyTrain/__pycache__/APIs.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..beff39ab4fcf30167d8f57ee9f2f7a31b47b9192
Binary files /dev/null and b/hivisionai/hyTrain/__pycache__/APIs.cpython-310.pyc differ
diff --git a/hivisionai/hyTrain/__pycache__/DataProcessing.cpython-310.pyc b/hivisionai/hyTrain/__pycache__/DataProcessing.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..7c570959d30a62121d3ad3e56215268a698d8565
Binary files /dev/null and b/hivisionai/hyTrain/__pycache__/DataProcessing.cpython-310.pyc differ
diff --git a/hivisionai/hyTrain/__pycache__/__init__.cpython-310.pyc b/hivisionai/hyTrain/__pycache__/__init__.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..3167ade11cc765a1edaeea0cb7d029f63fd658a4
Binary files /dev/null and b/hivisionai/hyTrain/__pycache__/__init__.cpython-310.pyc differ
diff --git a/hivisionai/hycv/FaceDetection68/__init__.py b/hivisionai/hycv/FaceDetection68/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..ab9d7abfc8ac8da85b7cd873e1262cb57af5180b
--- /dev/null
+++ b/hivisionai/hycv/FaceDetection68/__init__.py
@@ -0,0 +1,8 @@
+"""
+@author: cuny
+@fileName: __init__.py
+@create_time: 2022/01/03 下午9:39
+@introduce:
+人脸68关键点检测sdk的__init__包,实际上是对dlib的封装
+"""
+from .faceDetection68 import FaceDetection68, PoseEstimator68
diff --git a/hivisionai/hycv/FaceDetection68/__pycache__/__init__.cpython-310.pyc b/hivisionai/hycv/FaceDetection68/__pycache__/__init__.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..4d0766b28704472a70beb814e8b5b75a6dedfa5e
Binary files /dev/null and b/hivisionai/hycv/FaceDetection68/__pycache__/__init__.cpython-310.pyc differ
diff --git a/hivisionai/hycv/FaceDetection68/__pycache__/faceDetection68.cpython-310.pyc b/hivisionai/hycv/FaceDetection68/__pycache__/faceDetection68.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..3ef60457af37a6f00d6db7abb668e78f55873f68
Binary files /dev/null and b/hivisionai/hycv/FaceDetection68/__pycache__/faceDetection68.cpython-310.pyc differ
diff --git a/hivisionai/hycv/FaceDetection68/faceDetection68.py b/hivisionai/hycv/FaceDetection68/faceDetection68.py
new file mode 100644
index 0000000000000000000000000000000000000000..999c511f16ecd6a8020743debbde5e42b4401f36
--- /dev/null
+++ b/hivisionai/hycv/FaceDetection68/faceDetection68.py
@@ -0,0 +1,443 @@
+"""
+@author: cuny
+@fileName: faceDetection68.py
+@create_time: 2022/01/03 下午10:20
+@introduce:
+人脸68关键点检测主文件,以类的形式封装
+"""
+from hivisionai.hyService.cloudService import GetConfig
+import os
+import cv2
+import dlib
+import numpy as np
+local_file = os.path.dirname(__file__)
+PREDICTOR_PATH = f"{local_file}/weights/shape_predictor_68_face_landmarks.dat" # 关键点检测模型路径
+MODULE3D_PATH = f"{local_file}/weights/68_points_3D_model.txt" # 3d的68点配置文件路径
+
+# 定义一个人脸检测错误的错误类
+class FaceError(Exception):
+ def __init__(self, err):
+ super().__init__(err)
+ self.err = err
+ def __str__(self):
+ return self.err
+
+class FaceConfig68(object):
+ face_area:list = None # 一些其他的参数,在本类中实际没啥用
+ FACE_POINTS = list(range(17, 68)) # 人脸轮廓点索引
+ MOUTH_POINTS = list(range(48, 61)) # 嘴巴点索引
+ RIGHT_BROW_POINTS = list(range(17, 22)) # 右眉毛索引
+ LEFT_BROW_POINTS = list(range(22, 27)) # 左眉毛索引
+ RIGHT_EYE_POINTS = list(range(36, 42)) # 右眼索引
+ LEFT_EYE_POINTS = list(range(42, 48)) # 左眼索引
+ NOSE_POINTS = list(range(27, 35)) # 鼻子索引
+ JAW_POINTS = list(range(0, 17)) # 下巴索引
+ LEFT_FACE = list(range(42, 48)) + list(range(22, 27)) # 左半边脸索引
+ RIGHT_FACE = list(range(36, 42)) + list(range(17, 22)) # 右半边脸索引
+ JAW_END = 17 # 下巴结束点
+ FACE_START = 0 # 人脸识别开始
+ FACE_END = 68 # 人脸识别结束
+ # 下面这个是整张脸的mark点,可以用:
+ # for group in self.OVERLAY_POINTS:
+ # cv2.fillConvexPoly(face_mask, cv2.convexHull(dst_matrix[group]), (255, 255, 255))
+ # 来形成人脸蒙版
+ OVERLAY_POINTS = [
+ JAW_POINTS,
+ LEFT_FACE,
+ RIGHT_FACE
+ ]
+
+class FaceDetection68(FaceConfig68):
+ """
+ 人脸68关键点检测主类,当然使用的是dlib开源包
+ """
+ def __init__(self, model_path:str=None, default_download:bool=False, *args, **kwargs):
+ # 初始化,检查并下载模型
+ self.model_path = PREDICTOR_PATH if model_path is None else model_path
+ if not os.path.exists(self.model_path) or default_download: # 下载配置
+ gc = GetConfig()
+ gc.load_file(cloud_path="weights/shape_predictor_68_face_landmarks.dat",
+ local_path=self.model_path)
+ self.__detector = None
+ self.__predictor = None
+
+ @property
+ def detector(self):
+ if self.__detector is None:
+ self.__detector = dlib.get_frontal_face_detector() # 获取人脸分类器
+ return self.__detector
+ @property
+ def predictor(self):
+ if self.__predictor is None:
+ self.__predictor = dlib.shape_predictor(self.model_path) # 输入模型,构建特征提取器
+ return self.__predictor
+
+ @staticmethod
+ def draw_face(img:np.ndarray, dets:dlib.rectangles, *args, **kwargs):
+ # 画人脸检测框, 为了一些兼容操作我没有设置默认显示,可以在运行完本函数后将返回值进行self.cv_show()
+ tmp = img.copy()
+ for face in dets:
+ # 左上角(x1,y1),右下角(x2,y2)
+ x1, y1, x2, y2 = face.left(), face.top(), face.right(), face.bottom()
+ # print(x1, y1, x2, y2)
+ cv2.rectangle(tmp, (x1, y1), (x2, y2), (0, 255, 0), 2)
+ return tmp
+
+ @staticmethod
+ def draw_points(img:np.ndarray, landmarks:np.matrix, if_num:int=False, *args, **kwargs):
+ """
+ 画人脸关键点, 为了一些兼容操作我没有设置默认显示,可以在运行完本函数后将返回值进行self.cv_show()
+ :param img: 输入的是人脸检测的图,必须是3通道或者灰度图
+ :param if_num: 是否在画关键点的同时画上编号
+ :param landmarks: 输入的关键点矩阵信息
+ """
+ tmp = img.copy()
+ h, w, c = tmp.shape
+ r = int(h / 100) - 2 if h > w else int(w / 100) - 2
+ for idx, point in enumerate(landmarks):
+ # 68点的坐标
+ pos = (point[0, 0], point[0, 1])
+ # 利用cv2.circle给每个特征点画一个圈,共68个
+ cv2.circle(tmp, pos, r, color=(0, 0, 255), thickness=-1) # bgr
+ if if_num is True:
+ # 利用cv2.putText输出1-68
+ font = cv2.FONT_HERSHEY_SIMPLEX
+ cv2.putText(tmp, str(idx + 1), pos, font, 0.8, (0, 0, 255), 1, cv2.LINE_AA)
+ return tmp
+
+ @staticmethod
+ def resize_image_esp(input_image_, esp=2000):
+ """
+ 输入:
+ input_path:numpy图片
+ esp:限制的最大边长
+ """
+ # resize函数=>可以让原图压缩到最大边为esp的尺寸(不改变比例)
+ width = input_image_.shape[0]
+
+ length = input_image_.shape[1]
+ max_num = max(width, length)
+
+ if max_num > esp:
+ print("Image resizing...")
+ if width == max_num:
+ length = int((esp / width) * length)
+ width = esp
+
+ else:
+ width = int((esp / length) * width)
+ length = esp
+ print(length, width)
+ im_resize = cv2.resize(input_image_, (length, width), interpolation=cv2.INTER_AREA)
+ return im_resize
+ else:
+ return input_image_
+
+ def facesPoints(self, img:np.ndarray, esp:int=None, det_num:int=1,*args, **kwargs):
+ """
+ :param img: 输入的是人脸检测的图,必须是3通道或者灰度图
+ :param esp: 如果输入了具体数值,会将图片的最大边长缩放至esp,另一边等比例缩放
+ :param det_num: 人脸检测的迭代次数, 采样次数越多,越有利于检测到更多的人脸
+ :return
+ 返回人脸检测框对象dets, 人脸关键点矩阵列表(列表中每个元素为一个人脸的关键点矩阵), 人脸关键点元组列表(列表中每个元素为一个人脸的关键点列表)
+ """
+ # win = dlib.image_window()
+ # win.clear_overlay()
+ # win.set_image(img)
+ # dlib的人脸检测装置
+ if esp is not None:
+ img = self.resize_image_esp(input_image_=img, esp=esp)
+ dets = self.detector(img, det_num)
+ # self.draw_face(img, dets)
+ # font_color = "green" if len(dets) == 1 else "red"
+ # dg.debug_print("Number of faces detected: {}".format(len(dets)), font_color=font_color)
+ landmarkList = []
+ pointsList = []
+ for d in dets:
+ shape = self.predictor(img, d)
+ landmark = np.matrix([[p.x, p.y] for p in shape.parts()])
+ landmarkList.append(landmark)
+ point_list = []
+ for p in landmark.tolist():
+ point_list.append((p[0], p[1]))
+ pointsList.append(point_list)
+ # dg.debug_print("Key point detection SUCCESS.", font_color="green")
+ return dets, landmarkList, pointsList
+
+ def facePoints(self, img:np.ndarray, esp:int=None, det_num:int=1, *args, **kwargs):
+ """
+ 本函数与facesPoints大致类似,主要区别在于本函数默认只能返回一个人脸关键点参数
+ """
+ # win = dlib.image_window()
+ # win.clear_overlay()
+ # win.set_image(img)
+ # dlib的人脸检测装置, 参数1表示对图片进行上采样一次,采样次数越多,越有利于检测到更多的人脸
+ if esp is not None:
+ img = self.resize_image_esp(input_image_=img, esp=esp)
+ dets = self.detector(img, det_num)
+ # self.draw_face(img, dets)
+ font_color = "green" if len(dets) == 1 else "red"
+ # dg.debug_print("Number of faces detected: {}".format(len(dets)), font_color=font_color)
+ if font_color=="red":
+ # 本检测函数必然只能检测出一张人脸
+ raise FaceError("Face detection error!!!")
+ d = dets[0] # 唯一人脸
+ shape = self.predictor(img, d)
+ landmark = np.matrix([[p.x, p.y] for p in shape.parts()])
+ # print("face_landmark:", landmark) # 打印关键点矩阵
+ # shape = predictor(img, )
+ # dlib.hit_enter_to_continue()
+ # 返回关键点矩阵,关键点,
+ point_list = []
+ for p in landmark.tolist():
+ point_list.append((p[0], p[1]))
+ # dg.debug_print("Key point detection SUCCESS.", font_color="green")
+ # 最后的一个返回参数只会被计算一次,用于标明脸部框的位置
+ # [人脸框左上角纵坐标(top),左上角横坐标(left),人脸框宽度(width),人脸框高度(height)]
+ return dets, landmark, point_list
+
+class PoseEstimator68(object):
+ """
+ Estimate head pose according to the facial landmarks
+ 本类将实现但输入图的人脸姿态检测
+ """
+ def __init__(self, img:np.ndarray, params_path:str=None, default_download:bool=False):
+ self.params_path = MODULE3D_PATH if params_path is None else params_path
+ if not os.path.exists(self.params_path) or default_download:
+ gc = GetConfig()
+ gc.load_file(cloud_path="weights/68_points_3D_model.txt",
+ local_path=self.params_path)
+ h, w, c = img.shape
+ self.size = (h, w)
+ # 3D model points.
+ self.model_points = np.array([
+ (0.0, 0.0, 0.0), # Nose tip
+ (0.0, -330.0, -65.0), # Chin
+ (-225.0, 170.0, -135.0), # Left eye left corner
+ (225.0, 170.0, -135.0), # Right eye right corner
+ (-150.0, -150.0, -125.0), # Mouth left corner
+ (150.0, -150.0, -125.0) # Mouth right corner
+ ]) / 4.5
+ self.model_points_68 = self._get_full_model_points()
+
+ # Camera internals
+ self.focal_length = self.size[1]
+ self.camera_center = (self.size[1] / 2, self.size[0] / 2)
+ self.camera_matrix = np.array(
+ [[self.focal_length, 0, self.camera_center[0]],
+ [0, self.focal_length, self.camera_center[1]],
+ [0, 0, 1]], dtype="double")
+
+ # Assuming no lens distortion
+ self.dist_coeefs = np.zeros((4, 1))
+
+ # Rotation vector and translation vector
+ self.r_vec = np.array([[0.01891013], [0.08560084], [-3.14392813]])
+ self.t_vec = np.array(
+ [[-14.97821226], [-10.62040383], [-2053.03596872]])
+ # self.r_vec = None
+ # self.t_vec = None
+
+ def _get_full_model_points(self):
+ """Get all 68 3D model points from file"""
+ raw_value = []
+ with open(self.params_path) as file:
+ for line in file:
+ raw_value.append(line)
+ model_points = np.array(raw_value, dtype=np.float32)
+ model_points = np.reshape(model_points, (3, -1)).T
+
+ # Transform the model into a front view.
+ # model_points[:, 0] *= -1
+ model_points[:, 1] *= -1
+ model_points[:, 2] *= -1
+ return model_points
+
+ def show_3d_model(self):
+ from matplotlib import pyplot
+ from mpl_toolkits.mplot3d import Axes3D
+ fig = pyplot.figure()
+ ax = Axes3D(fig)
+
+ x = self.model_points_68[:, 0]
+ y = self.model_points_68[:, 1]
+ z = self.model_points_68[:, 2]
+
+ ax.scatter(x, y, z)
+ ax.axis('auto')
+ pyplot.xlabel('x')
+ pyplot.ylabel('y')
+ pyplot.show()
+
+ def solve_pose(self, image_points):
+ """
+ Solve pose from image points
+ Return (rotation_vector, translation_vector) as pose.
+ """
+ assert image_points.shape[0] == self.model_points_68.shape[0], "3D points and 2D points should be of same number."
+ (_, rotation_vector, translation_vector) = cv2.solvePnP(
+ self.model_points, image_points, self.camera_matrix, self.dist_coeefs)
+
+ # (success, rotation_vector, translation_vector) = cv2.solvePnP(
+ # self.model_points,
+ # image_points,
+ # self.camera_matrix,
+ # self.dist_coeefs,
+ # rvec=self.r_vec,
+ # tvec=self.t_vec,
+ # useExtrinsicGuess=True)
+ return rotation_vector, translation_vector
+
+ def solve_pose_by_68_points(self, image_points):
+ """
+ Solve pose from all the 68 image points
+ Return (rotation_vector, translation_vector) as pose.
+ """
+ if self.r_vec is None:
+ (_, rotation_vector, translation_vector) = cv2.solvePnP(
+ self.model_points_68, image_points, self.camera_matrix, self.dist_coeefs)
+ self.r_vec = rotation_vector
+ self.t_vec = translation_vector
+
+ (_, rotation_vector, translation_vector) = cv2.solvePnP(
+ self.model_points_68,
+ image_points,
+ self.camera_matrix,
+ self.dist_coeefs,
+ rvec=self.r_vec,
+ tvec=self.t_vec,
+ useExtrinsicGuess=True)
+
+ return rotation_vector, translation_vector
+
+ # def draw_annotation_box(self, image, rotation_vector, translation_vector, color=(255, 255, 255), line_width=2):
+ # """Draw a 3D box as annotation of pose"""
+ # point_3d = []
+ # rear_size = 75
+ # rear_depth = 0
+ # point_3d.append((-rear_size, -rear_size, rear_depth))
+ # point_3d.append((-rear_size, rear_size, rear_depth))
+ # point_3d.append((rear_size, rear_size, rear_depth))
+ # point_3d.append((rear_size, -rear_size, rear_depth))
+ # point_3d.append((-rear_size, -rear_size, rear_depth))
+ #
+ # front_size = 100
+ # front_depth = 100
+ # point_3d.append((-front_size, -front_size, front_depth))
+ # point_3d.append((-front_size, front_size, front_depth))
+ # point_3d.append((front_size, front_size, front_depth))
+ # point_3d.append((front_size, -front_size, front_depth))
+ # point_3d.append((-front_size, -front_size, front_depth))
+ # point_3d = np.array(point_3d, dtype=np.float64).reshape(-1, 3)
+ #
+ # # Map to 2d image points
+ # (point_2d, _) = cv2.projectPoints(point_3d,
+ # rotation_vector,
+ # translation_vector,
+ # self.camera_matrix,
+ # self.dist_coeefs)
+ # point_2d = np.int32(point_2d.reshape(-1, 2))
+ #
+ # # Draw all the lines
+ # cv2.polylines(image, [point_2d], True, color, line_width, cv2.LINE_AA)
+ # cv2.line(image, tuple(point_2d[1]), tuple(
+ # point_2d[6]), color, line_width, cv2.LINE_AA)
+ # cv2.line(image, tuple(point_2d[2]), tuple(
+ # point_2d[7]), color, line_width, cv2.LINE_AA)
+ # cv2.line(image, tuple(point_2d[3]), tuple(
+ # point_2d[8]), color, line_width, cv2.LINE_AA)
+ #
+ # def draw_axis(self, img, R, t):
+ # points = np.float32(
+ # [[30, 0, 0], [0, 30, 0], [0, 0, 30], [0, 0, 0]]).reshape(-1, 3)
+ #
+ # axisPoints, _ = cv2.projectPoints(
+ # points, R, t, self.camera_matrix, self.dist_coeefs)
+ #
+ # img = cv2.line(img, tuple(axisPoints[3].ravel()), tuple(
+ # axisPoints[0].ravel()), (255, 0, 0), 3)
+ # img = cv2.line(img, tuple(axisPoints[3].ravel()), tuple(
+ # axisPoints[1].ravel()), (0, 255, 0), 3)
+ # img = cv2.line(img, tuple(axisPoints[3].ravel()), tuple(
+ # axisPoints[2].ravel()), (0, 0, 255), 3)
+
+ def draw_axes(self, img, R, t):
+ """
+ OX is drawn in red, OY in green and OZ in blue.
+ """
+ return cv2.drawFrameAxes(img, self.camera_matrix, self.dist_coeefs, R, t, 30)
+
+ @staticmethod
+ def get_pose_marks(marks):
+ """Get marks ready for pose estimation from 68 marks"""
+ pose_marks = [marks[30], marks[8], marks[36], marks[45], marks[48], marks[54]]
+ return pose_marks
+
+ @staticmethod
+ def rot_params_rm(R):
+ from math import pi,atan2,asin, fabs
+ # x轴
+ pitch = (180 * atan2(-R[2][1], R[2][2]) / pi)
+ f = (0 > pitch) - (0 < pitch)
+ pitch = f * (180 - fabs(pitch))
+ # y轴
+ yaw = -(180 * asin(R[2][0]) / pi)
+ # z轴
+ roll = (180 * atan2(-R[1][0], R[0][0]) / pi)
+ f = (0 > roll) - (0 < roll)
+ roll = f * (180 - fabs(roll))
+ if not fabs(roll) < 90.0:
+ roll = f * (180 - fabs(roll))
+ rot_params = [pitch, yaw, roll]
+ return rot_params
+
+ @staticmethod
+ def rot_params_rv(rvec_):
+ from math import pi, atan2, asin, fabs
+ R = cv2.Rodrigues(rvec_)[0]
+ # x轴
+ pitch = (180 * atan2(-R[2][1], R[2][2]) / pi)
+ f = (0 > pitch) - (0 < pitch)
+ pitch = f * (180 - fabs(pitch))
+ # y轴
+ yaw = -(180 * asin(R[2][0]) / pi)
+ # z轴
+ roll = (180 * atan2(-R[1][0], R[0][0]) / pi)
+ f = (0 > roll) - (0 < roll)
+ roll = f * (180 - fabs(roll))
+ rot_params = [pitch, yaw, roll]
+ return rot_params
+
+ def imageEulerAngle(self, img_points):
+ # 这里的img_points对应的是facePoints的第三个返回值,注意是facePoints而非facesPoints
+ # 对于facesPoints而言,需要把第三个返回值逐一取出再输入
+ # 把列表转为矩阵,且编码形式为float64
+ img_points = np.array(img_points, dtype=np.float64)
+ rvec, tvec = self.solve_pose_by_68_points(img_points)
+ # 旋转向量转旋转矩阵
+ R = cv2.Rodrigues(rvec)[0]
+ # theta = np.linalg.norm(rvec)
+ # r = rvec / theta
+ # R_ = np.array([[0, -r[2][0], r[1][0]],
+ # [r[2][0], 0, -r[0][0]],
+ # [-r[1][0], r[0][0], 0]])
+ # R = np.cos(theta) * np.eye(3) + (1 - np.cos(theta)) * r * r.T + np.sin(theta) * R_
+ # 旋转矩阵转欧拉角
+ eulerAngle = self.rot_params_rm(R)
+ # 返回一个元组和欧拉角列表
+ return (rvec, tvec, R), eulerAngle
+
+
+# if __name__ == "__main__":
+# # 示例
+# from hyService.utils import Debug
+# dg = Debug()
+# image_input = cv2.imread("./test.jpg") # 读取一张图片, 必须是三通道或者灰度图
+# fd68 = FaceDetection68() # 初始化人脸关键点检测类
+# dets_, landmark_, point_list_ = fd68.facePoints(image_input) # 输入图片. 检测单张人脸
+# # dets_, landmark_, point_list_ = fd68.facesPoints(input_image) # 输入图片. 检测多张人脸
+# img = fd68.draw_points(image_input, landmark_)
+# dg.cv_show(img)
+# pe = PoseEstimator68(image_input)
+# _, ea = pe.imageEulerAngle(point_list_) # 输入关键点列表, 如果要使用facesPoints,则输入的是point_list_[i]
+# print(ea) # 结果
\ No newline at end of file
diff --git a/hivisionai/hycv/__init__.py b/hivisionai/hycv/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..3be8cd900606d8144e2972a1e018921231427eb1
--- /dev/null
+++ b/hivisionai/hycv/__init__.py
@@ -0,0 +1 @@
+from .utils import cover_mask, get_box, get_box_pro, filtering, cut, zoom_image_without_change_size
diff --git a/hivisionai/hycv/__pycache__/__init__.cpython-310.pyc b/hivisionai/hycv/__pycache__/__init__.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..dc87c53c5eb2f0cca69cf9819885ce62bee6e2e2
Binary files /dev/null and b/hivisionai/hycv/__pycache__/__init__.cpython-310.pyc differ
diff --git a/hivisionai/hycv/__pycache__/__init__.cpython-38.pyc b/hivisionai/hycv/__pycache__/__init__.cpython-38.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..7eec4b7a01323a6e3278b6e3efea7d2ccf14f5e7
Binary files /dev/null and b/hivisionai/hycv/__pycache__/__init__.cpython-38.pyc differ
diff --git a/hivisionai/hycv/__pycache__/error.cpython-310.pyc b/hivisionai/hycv/__pycache__/error.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..ebb84b8710c163df963954b225137282ddb535db
Binary files /dev/null and b/hivisionai/hycv/__pycache__/error.cpython-310.pyc differ
diff --git a/hivisionai/hycv/__pycache__/face_tools.cpython-310.pyc b/hivisionai/hycv/__pycache__/face_tools.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..5c784a0536b230016b75a5b41f8a882ca078511f
Binary files /dev/null and b/hivisionai/hycv/__pycache__/face_tools.cpython-310.pyc differ
diff --git a/hivisionai/hycv/__pycache__/face_tools.cpython-38.pyc b/hivisionai/hycv/__pycache__/face_tools.cpython-38.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..8c143616f351071d6e91344e2f4a2606c2955056
Binary files /dev/null and b/hivisionai/hycv/__pycache__/face_tools.cpython-38.pyc differ
diff --git a/hivisionai/hycv/__pycache__/idphoto.cpython-310.pyc b/hivisionai/hycv/__pycache__/idphoto.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..9ad6a67135ad5da7351f45a69ca3efbc8c1468c9
Binary files /dev/null and b/hivisionai/hycv/__pycache__/idphoto.cpython-310.pyc differ
diff --git a/hivisionai/hycv/__pycache__/matting_tools.cpython-310.pyc b/hivisionai/hycv/__pycache__/matting_tools.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..774727f7ae56bc7205e43efa83fbdba45372d007
Binary files /dev/null and b/hivisionai/hycv/__pycache__/matting_tools.cpython-310.pyc differ
diff --git a/hivisionai/hycv/__pycache__/matting_tools.cpython-38.pyc b/hivisionai/hycv/__pycache__/matting_tools.cpython-38.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..5328331f0e61ccd84ac54450e5087ee481e1bcd0
Binary files /dev/null and b/hivisionai/hycv/__pycache__/matting_tools.cpython-38.pyc differ
diff --git a/hivisionai/hycv/__pycache__/tensor2numpy.cpython-310.pyc b/hivisionai/hycv/__pycache__/tensor2numpy.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..950f449a852a2924f5f87278e357427717224c76
Binary files /dev/null and b/hivisionai/hycv/__pycache__/tensor2numpy.cpython-310.pyc differ
diff --git a/hivisionai/hycv/__pycache__/tensor2numpy.cpython-38.pyc b/hivisionai/hycv/__pycache__/tensor2numpy.cpython-38.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..04f20d8dc9eae5f2ebdd291b949dcbbd083b652c
Binary files /dev/null and b/hivisionai/hycv/__pycache__/tensor2numpy.cpython-38.pyc differ
diff --git a/hivisionai/hycv/__pycache__/utils.cpython-310.pyc b/hivisionai/hycv/__pycache__/utils.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..c27f34fca27facf0ffdd774b222621178f7944df
Binary files /dev/null and b/hivisionai/hycv/__pycache__/utils.cpython-310.pyc differ
diff --git a/hivisionai/hycv/__pycache__/utils.cpython-38.pyc b/hivisionai/hycv/__pycache__/utils.cpython-38.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..782695e56b68d37506f4da87f8429a1b23e6b666
Binary files /dev/null and b/hivisionai/hycv/__pycache__/utils.cpython-38.pyc differ
diff --git a/hivisionai/hycv/__pycache__/vision.cpython-310.pyc b/hivisionai/hycv/__pycache__/vision.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..3b6467e9ac91ea09b9271a310f5833f781b56779
Binary files /dev/null and b/hivisionai/hycv/__pycache__/vision.cpython-310.pyc differ
diff --git a/hivisionai/hycv/__pycache__/vision.cpython-38.pyc b/hivisionai/hycv/__pycache__/vision.cpython-38.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..ba5b86258413d7c25ae5085ba01278b1bed4e62e
Binary files /dev/null and b/hivisionai/hycv/__pycache__/vision.cpython-38.pyc differ
diff --git a/hivisionai/hycv/error.py b/hivisionai/hycv/error.py
new file mode 100644
index 0000000000000000000000000000000000000000..26e1ba1faacf175d3409cceb9f66ea57c58947b9
--- /dev/null
+++ b/hivisionai/hycv/error.py
@@ -0,0 +1,16 @@
+"""
+定义hycv的一些错误类型,其实和hyService大致相同
+"""
+class ProcessError(Exception):
+ def __init__(self, err):
+ super().__init__(err)
+ self.err = err
+ def __str__(self):
+ return self.err
+
+class WrongImageType(TypeError):
+ def __init__(self, err):
+ super().__init__(err)
+ self.err = err
+ def __str__(self):
+ return self.err
\ No newline at end of file
diff --git a/hivisionai/hycv/face_tools.py b/hivisionai/hycv/face_tools.py
new file mode 100644
index 0000000000000000000000000000000000000000..6bb156ee70bf5142cf2a61f04094dcfe8c80319a
--- /dev/null
+++ b/hivisionai/hycv/face_tools.py
@@ -0,0 +1,427 @@
+import cv2
+import os
+import onnxruntime
+from .mtcnn_onnx.detector import detect_faces
+from .tensor2numpy import *
+from PIL import Image
+import requests
+from os.path import exists
+
+
+def download_img(img_url, base_dir):
+ print("Downloading Onnx Model in:", img_url)
+ r = requests.get(img_url, stream=True)
+ filename = img_url.split("/")[-1]
+ # print(r.status_code) # 返回状态码
+ if r.status_code == 200:
+ open(f'{base_dir}/{filename}', 'wb').write(r.content) # 将内容写入图片
+ print(f"Download Finshed -- {filename}")
+ del r
+
+class BBox(object):
+ # bbox is a list of [left, right, top, bottom]
+ def __init__(self, bbox):
+ self.left = bbox[0]
+ self.right = bbox[1]
+ self.top = bbox[2]
+ self.bottom = bbox[3]
+ self.x = bbox[0]
+ self.y = bbox[2]
+ self.w = bbox[1] - bbox[0]
+ self.h = bbox[3] - bbox[2]
+
+ # scale to [0,1]
+ def projectLandmark(self, landmark):
+ landmark_= np.asarray(np.zeros(landmark.shape))
+ for i, point in enumerate(landmark):
+ landmark_[i] = ((point[0]-self.x)/self.w, (point[1]-self.y)/self.h)
+ return landmark_
+
+ # landmark of (5L, 2L) from [0,1] to real range
+ def reprojectLandmark(self, landmark):
+ landmark_= np.asarray(np.zeros(landmark.shape))
+ for i, point in enumerate(landmark):
+ x = point[0] * self.w + self.x
+ y = point[1] * self.h + self.y
+ landmark_[i] = (x, y)
+ return landmark_
+
+
+def face_detect_mtcnn(input_image, color_key=None, filter=None):
+ """
+ Inputs:
+ - input_image: OpenCV Numpy.array
+ - color_key: 当color_key等于"RGB"时,将不进行转换操作
+ - filter:当filter等于True时,将抛弃掉置信度小于0.98或人脸框面积小于3600的人脸
+ return:
+ - faces: 带有人脸信息的变量
+ - landmarks: face alignment
+ """
+ if color_key != "RGB":
+ input_image = cv2.cvtColor(input_image, cv2.COLOR_BGR2RGB)
+
+ input_image = Image.fromarray(input_image)
+ faces, landmarks = detect_faces(input_image)
+
+ if filter:
+ face_clean = []
+ for face in faces:
+ confidence = face[-1]
+ x1 = face[0]
+ y1 = face[1]
+ x2 = face[2]
+ y2 = face[3]
+ w = x2 - x1 + 1
+ h = y2 - y1 + 1
+ measure = w * h
+ if confidence >= 0.98 and measure > 3600:
+ # 如果检测到的人脸置信度小于0.98或人脸框面积小于3600,则抛弃该人脸
+ face_clean.append(face)
+ faces = face_clean
+
+ return faces, landmarks
+
+
+def mtcnn_bbox(face, width, height):
+ x1 = face[0]
+ y1 = face[1]
+ x2 = face[2]
+ y2 = face[3]
+ w = x2 - x1 + 1
+ h = y2 - y1 + 1
+
+ size = int(max([w, h]) * 1.1)
+ cx = x1 + w // 2
+ cy = y1 + h // 2
+ x1 = cx - size // 2
+ x2 = x1 + size
+ y1 = cy - size // 2
+ y2 = y1 + size
+
+ dx = max(0, -x1)
+ dy = max(0, -y1)
+ x1 = max(0, x1)
+ y1 = max(0, y1)
+
+ edx = max(0, x2 - width)
+ edy = max(0, y2 - height)
+ x2 = min(width, x2)
+ y2 = min(height, y2)
+
+ return x1, x2, y1, y2, dx, dy, edx, edy
+
+
+def mtcnn_cropped_face(face_box, image, width, height):
+ x1, x2, y1, y2, dx, dy, edx, edy = mtcnn_bbox(face_box, width, height)
+ new_bbox = list(map(int, [x1, x2, y1, y2]))
+ new_bbox = BBox(new_bbox)
+ cropped = image[new_bbox.top:new_bbox.bottom, new_bbox.left:new_bbox.right]
+ if (dx > 0 or dy > 0 or edx > 0 or edy > 0):
+ cropped = cv2.copyMakeBorder(cropped, int(dy), int(edy), int(dx), int(edx), cv2.BORDER_CONSTANT, 0)
+ return cropped, new_bbox
+
+
+def face_landmark_56(input_image, faces_box=None):
+ basedir = os.path.dirname(os.path.realpath(__file__)).split("mtcnn.py")[0]
+ mean = np.asarray([0.485, 0.456, 0.406])
+ std = np.asarray([0.229, 0.224, 0.225])
+ base_url = "https://linimages.oss-cn-beijing.aliyuncs.com/"
+
+ if not exists(f"{basedir}/mtcnn_onnx/weights/landmark_detection_56_se_external.onnx"):
+ # download onnx model
+ download_img(img_url=base_url + "landmark_detection_56_se_external.onnx",
+ base_dir=f"{basedir}/mtcnn_onnx/weights")
+
+ ort_session = onnxruntime.InferenceSession(f"{basedir}/mtcnn_onnx/weights/landmark_detection_56_se_external.onnx")
+ out_size = 56
+
+ height, width, _ = input_image.shape
+ if faces_box is None:
+ faces_box, _ = face_detect_mtcnn(input_image)
+
+ if len(faces_box) == 0:
+ print('NO face is detected!')
+ return None
+ else:
+ landmarks = []
+ for face_box in faces_box:
+ cropped, new_bbox = mtcnn_cropped_face(face_box, input_image, width, height)
+ cropped_face = cv2.resize(cropped, (out_size, out_size))
+
+ test_face = NNormalize(cropped_face, mean=mean, std=std)
+ test_face = NTo_Tensor(test_face)
+ test_face = NUnsqueeze(test_face)
+
+ ort_inputs = {ort_session.get_inputs()[0].name: test_face}
+ ort_outs = ort_session.run(None, ort_inputs)
+
+ landmark = ort_outs[0]
+
+ landmark = landmark.reshape(-1, 2)
+ landmark = new_bbox.reprojectLandmark(landmark)
+ landmarks.append(landmark)
+
+ return landmarks
+
+
+
+REFERENCE_FACIAL_POINTS = [
+ [30.29459953, 51.69630051],
+ [65.53179932, 51.50139999],
+ [48.02519989, 71.73660278],
+ [33.54930115, 92.3655014],
+ [62.72990036, 92.20410156]
+]
+
+DEFAULT_CROP_SIZE = (96, 112)
+
+
+def _umeyama(src, dst, estimate_scale=True, scale=1.0):
+ """Estimate N-D similarity transformation with or without scaling.
+ Parameters
+ ----------
+ src : (M, N) array
+ Source coordinates.
+ dst : (M, N) array
+ Destination coordinates.
+ estimate_scale : bool
+ Whether to estimate scaling factor.
+ Returns
+ -------
+ T : (N + 1, N + 1)
+ The homogeneous similarity transformation matrix. The matrix contains
+ NaN values only if the problem is not well-conditioned.
+ References
+ ----------
+ .. [1] "Least-squares estimation of transformation parameters between two
+ point patterns", Shinji Umeyama, PAMI 1991, :DOI:`10.1109/34.88573`
+ """
+
+ num = src.shape[0]
+ dim = src.shape[1]
+
+ # Compute mean of src and dst.
+ src_mean = src.mean(axis=0)
+ dst_mean = dst.mean(axis=0)
+
+ # Subtract mean from src and dst.
+ src_demean = src - src_mean
+ dst_demean = dst - dst_mean
+
+ # Eq. (38).
+ A = dst_demean.T @ src_demean / num
+
+ # Eq. (39).
+ d = np.ones((dim,), dtype=np.double)
+ if np.linalg.det(A) < 0:
+ d[dim - 1] = -1
+
+ T = np.eye(dim + 1, dtype=np.double)
+
+ U, S, V = np.linalg.svd(A)
+
+ # Eq. (40) and (43).
+ rank = np.linalg.matrix_rank(A)
+ if rank == 0:
+ return np.nan * T
+ elif rank == dim - 1:
+ if np.linalg.det(U) * np.linalg.det(V) > 0:
+ T[:dim, :dim] = U @ V
+ else:
+ s = d[dim - 1]
+ d[dim - 1] = -1
+ T[:dim, :dim] = U @ np.diag(d) @ V
+ d[dim - 1] = s
+ else:
+ T[:dim, :dim] = U @ np.diag(d) @ V
+
+ if estimate_scale:
+ # Eq. (41) and (42).
+ scale = 1.0 / src_demean.var(axis=0).sum() * (S @ d)
+ else:
+ scale = scale
+
+ T[:dim, dim] = dst_mean - scale * (T[:dim, :dim] @ src_mean.T)
+ T[:dim, :dim] *= scale
+
+ return T, scale
+
+
+class FaceWarpException(Exception):
+ def __str__(self):
+ return 'In File {}:{}'.format(
+ __file__, super.__str__(self))
+
+
+def get_reference_facial_points_5(output_size=None,
+ inner_padding_factor=0.0,
+ outer_padding=(0, 0),
+ default_square=False):
+ tmp_5pts = np.array(REFERENCE_FACIAL_POINTS)
+ tmp_crop_size = np.array(DEFAULT_CROP_SIZE)
+
+ # 0) make the inner region a square
+ if default_square:
+ size_diff = max(tmp_crop_size) - tmp_crop_size
+ tmp_5pts += size_diff / 2
+ tmp_crop_size += size_diff
+
+ if (output_size and
+ output_size[0] == tmp_crop_size[0] and
+ output_size[1] == tmp_crop_size[1]):
+ print('output_size == DEFAULT_CROP_SIZE {}: return default reference points'.format(tmp_crop_size))
+ return tmp_5pts
+
+ if (inner_padding_factor == 0 and
+ outer_padding == (0, 0)):
+ if output_size is None:
+ print('No paddings to do: return default reference points')
+ return tmp_5pts
+ else:
+ raise FaceWarpException(
+ 'No paddings to do, output_size must be None or {}'.format(tmp_crop_size))
+
+ # check output size
+ if not (0 <= inner_padding_factor <= 1.0):
+ raise FaceWarpException('Not (0 <= inner_padding_factor <= 1.0)')
+
+ if ((inner_padding_factor > 0 or outer_padding[0] > 0 or outer_padding[1] > 0)
+ and output_size is None):
+ output_size = tmp_crop_size * \
+ (1 + inner_padding_factor * 2).astype(np.int32)
+ output_size += np.array(outer_padding)
+ print(' deduced from paddings, output_size = ', output_size)
+
+ if not (outer_padding[0] < output_size[0]
+ and outer_padding[1] < output_size[1]):
+ raise FaceWarpException('Not (outer_padding[0] < output_size[0]'
+ 'and outer_padding[1] < output_size[1])')
+
+ # 1) pad the inner region according inner_padding_factor
+ # print('---> STEP1: pad the inner region according inner_padding_factor')
+ if inner_padding_factor > 0:
+ size_diff = tmp_crop_size * inner_padding_factor * 2
+ tmp_5pts += size_diff / 2
+ tmp_crop_size += np.round(size_diff).astype(np.int32)
+
+ # print(' crop_size = ', tmp_crop_size)
+ # print(' reference_5pts = ', tmp_5pts)
+
+ # 2) resize the padded inner region
+ # print('---> STEP2: resize the padded inner region')
+ size_bf_outer_pad = np.array(output_size) - np.array(outer_padding) * 2
+ # print(' crop_size = ', tmp_crop_size)
+ # print(' size_bf_outer_pad = ', size_bf_outer_pad)
+
+ if size_bf_outer_pad[0] * tmp_crop_size[1] != size_bf_outer_pad[1] * tmp_crop_size[0]:
+ raise FaceWarpException('Must have (output_size - outer_padding)'
+ '= some_scale * (crop_size * (1.0 + inner_padding_factor)')
+
+ scale_factor = size_bf_outer_pad[0].astype(np.float32) / tmp_crop_size[0]
+ # print(' resize scale_factor = ', scale_factor)
+ tmp_5pts = tmp_5pts * scale_factor
+ # size_diff = tmp_crop_size * (scale_factor - min(scale_factor))
+ # tmp_5pts = tmp_5pts + size_diff / 2
+ tmp_crop_size = size_bf_outer_pad
+ # print(' crop_size = ', tmp_crop_size)
+ # print(' reference_5pts = ', tmp_5pts)
+
+ # 3) add outer_padding to make output_size
+ reference_5point = tmp_5pts + np.array(outer_padding)
+ tmp_crop_size = output_size
+ # print('---> STEP3: add outer_padding to make output_size')
+ # print(' crop_size = ', tmp_crop_size)
+ # print(' reference_5pts = ', tmp_5pts)
+ #
+ # print('===> end get_reference_facial_points\n')
+
+ return reference_5point
+
+
+def get_affine_transform_matrix(src_pts, dst_pts):
+ tfm = np.float32([[1, 0, 0], [0, 1, 0]])
+ n_pts = src_pts.shape[0]
+ ones = np.ones((n_pts, 1), src_pts.dtype)
+ src_pts_ = np.hstack([src_pts, ones])
+ dst_pts_ = np.hstack([dst_pts, ones])
+
+ A, res, rank, s = np.linalg.lstsq(src_pts_, dst_pts_)
+
+ if rank == 3:
+ tfm = np.float32([
+ [A[0, 0], A[1, 0], A[2, 0]],
+ [A[0, 1], A[1, 1], A[2, 1]]
+ ])
+ elif rank == 2:
+ tfm = np.float32([
+ [A[0, 0], A[1, 0], 0],
+ [A[0, 1], A[1, 1], 0]
+ ])
+
+ return tfm
+
+
+def warp_and_crop_face(src_img,
+ facial_pts,
+ reference_pts=None,
+ crop_size=(96, 112),
+ align_type='smilarity'): #smilarity cv2_affine affine
+ if reference_pts is None:
+ if crop_size[0] == 96 and crop_size[1] == 112:
+ reference_pts = REFERENCE_FACIAL_POINTS
+ else:
+ default_square = False
+ inner_padding_factor = 0
+ outer_padding = (0, 0)
+ output_size = crop_size
+
+ reference_pts = get_reference_facial_points_5(output_size,
+ inner_padding_factor,
+ outer_padding,
+ default_square)
+
+ ref_pts = np.float32(reference_pts)
+ ref_pts_shp = ref_pts.shape
+ if max(ref_pts_shp) < 3 or min(ref_pts_shp) != 2:
+ raise FaceWarpException(
+ 'reference_pts.shape must be (K,2) or (2,K) and K>2')
+
+ if ref_pts_shp[0] == 2:
+ ref_pts = ref_pts.T
+
+ src_pts = np.float32(facial_pts)
+ src_pts_shp = src_pts.shape
+ if max(src_pts_shp) < 3 or min(src_pts_shp) != 2:
+ raise FaceWarpException(
+ 'facial_pts.shape must be (K,2) or (2,K) and K>2')
+
+ if src_pts_shp[0] == 2:
+ src_pts = src_pts.T
+
+ if src_pts.shape != ref_pts.shape:
+ raise FaceWarpException(
+ 'facial_pts and reference_pts must have the same shape')
+
+ if align_type == 'cv2_affine':
+ tfm = cv2.getAffineTransform(src_pts[0:3], ref_pts[0:3])
+ tfm_inv = cv2.getAffineTransform(ref_pts[0:3], src_pts[0:3])
+ elif align_type == 'affine':
+ tfm = get_affine_transform_matrix(src_pts, ref_pts)
+ tfm_inv = get_affine_transform_matrix(ref_pts, src_pts)
+ else:
+ params, scale = _umeyama(src_pts, ref_pts)
+ tfm = params[:2, :]
+
+ params, _ = _umeyama(ref_pts, src_pts, False, scale=1.0/scale)
+ tfm_inv = params[:2, :]
+
+ face_img = cv2.warpAffine(src_img, tfm, (crop_size[0], crop_size[1]), flags=3)
+
+ return face_img, tfm_inv
+
+
+if __name__ == "__main__":
+ image = cv2.imread("/home/parallels/Desktop/IDPhotos/input_image/03.jpg")
+ face_detect_mtcnn(image)
+
+
diff --git a/hivisionai/hycv/idphoto.py b/hivisionai/hycv/idphoto.py
new file mode 100644
index 0000000000000000000000000000000000000000..848cae6fadeb01499ef393df4b2df4093850048e
--- /dev/null
+++ b/hivisionai/hycv/idphoto.py
@@ -0,0 +1,2 @@
+from .idphotoTool.idphoto_cut import IDphotos_create
+from .idphotoTool.idphoto_change_cloth import change_cloth
\ No newline at end of file
diff --git a/hivisionai/hycv/idphotoTool/__init__.py b/hivisionai/hycv/idphotoTool/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391
diff --git a/hivisionai/hycv/idphotoTool/__pycache__/__init__.cpython-310.pyc b/hivisionai/hycv/idphotoTool/__pycache__/__init__.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..dd6a61ad3539e54bb3593f14612509154c4838b9
Binary files /dev/null and b/hivisionai/hycv/idphotoTool/__pycache__/__init__.cpython-310.pyc differ
diff --git a/hivisionai/hycv/idphotoTool/__pycache__/cuny_tools.cpython-310.pyc b/hivisionai/hycv/idphotoTool/__pycache__/cuny_tools.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..bc25000d58e3bee0282524868657f71e9764efa1
Binary files /dev/null and b/hivisionai/hycv/idphotoTool/__pycache__/cuny_tools.cpython-310.pyc differ
diff --git a/hivisionai/hycv/idphotoTool/__pycache__/idphoto_change_cloth.cpython-310.pyc b/hivisionai/hycv/idphotoTool/__pycache__/idphoto_change_cloth.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..b794e50da57719458ea43011ad9ced8475b0b053
Binary files /dev/null and b/hivisionai/hycv/idphotoTool/__pycache__/idphoto_change_cloth.cpython-310.pyc differ
diff --git a/hivisionai/hycv/idphotoTool/__pycache__/idphoto_cut.cpython-310.pyc b/hivisionai/hycv/idphotoTool/__pycache__/idphoto_cut.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..1f2f8f5b87ac0cfec65c5eb1a0ad1af23140d0ca
Binary files /dev/null and b/hivisionai/hycv/idphotoTool/__pycache__/idphoto_cut.cpython-310.pyc differ
diff --git a/hivisionai/hycv/idphotoTool/__pycache__/move_image.cpython-310.pyc b/hivisionai/hycv/idphotoTool/__pycache__/move_image.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..9255bc94f2b280821250ed077bcd91c7f9db6ac9
Binary files /dev/null and b/hivisionai/hycv/idphotoTool/__pycache__/move_image.cpython-310.pyc differ
diff --git a/hivisionai/hycv/idphotoTool/__pycache__/neck_processing.cpython-310.pyc b/hivisionai/hycv/idphotoTool/__pycache__/neck_processing.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..16625d185ae534a34905ec84719155473d2c528e
Binary files /dev/null and b/hivisionai/hycv/idphotoTool/__pycache__/neck_processing.cpython-310.pyc differ
diff --git a/hivisionai/hycv/idphotoTool/cuny_tools.py b/hivisionai/hycv/idphotoTool/cuny_tools.py
new file mode 100644
index 0000000000000000000000000000000000000000..2f9920e4c27c95be03a5a2d1ae62781797a7e87c
--- /dev/null
+++ b/hivisionai/hycv/idphotoTool/cuny_tools.py
@@ -0,0 +1,593 @@
+import cv2
+import numpy as np
+from ..utils import get_box_pro
+from ..vision import cover_image, draw_picture_dots
+
+
+def transformationNeck2(image:np.ndarray, per_to_side:float=0.8)->np.ndarray:
+ """
+ 透视变换脖子函数,输入图像和四个点(矩形框)
+ 矩形框内的图像可能是不完整的(边角有透明区域)
+ 我们将根据透视变换将矩形框内的图像拉伸成和矩形框一样的形状.
+ 算法分为几个步骤: 选择脖子的四个点 -> 选定这四个点拉伸后的坐标 -> 透视变换 -> 覆盖原图
+ """
+ _, _, _, a = cv2.split(image) # 这应该是一个四通道的图像
+ height, width = a.shape
+ def locate_side(image_:np.ndarray, x_:int, y_max:int) -> int:
+ # 寻找x=y, 且 y <= y_max 上从下往上第一个非0的点,如果没找到就返回0
+ y_ = 0
+ for y_ in range(y_max - 1, -1, -1):
+ if image_[y_][x_] != 0:
+ break
+ return y_
+ def locate_width(image_:np.ndarray, y_:int, mode, left_or_right:int=None):
+ # 从y=y这个水平线上寻找两边的非零点
+ # 增加left_or_right的原因在于为下面check_jaw服务
+ if mode==1: # 左往右
+ x_ = 0
+ if left_or_right is None:
+ left_or_right = 0
+ for x_ in range(left_or_right, width):
+ if image_[y_][x_] != 0:
+ break
+ else: # 右往左
+ x_ = width
+ if left_or_right is None:
+ left_or_right = width - 1
+ for x_ in range(left_or_right, -1, -1):
+ if image_[y_][x_] != 0:
+ break
+ return x_
+ def check_jaw(image_:np.ndarray, left_, right_):
+ """
+ 检查选择的点是否与截到下巴,如果截到了,就往下平移一个单位
+ """
+ f= True # True代表没截到下巴
+ # [x, y]
+ for x_cell in range(left_[0] + 1, right_[0]):
+ if image_[left_[1]][x_cell] == 0:
+ f = False
+ break
+ if f is True:
+ return left_, right_
+ else:
+ y_ = left_[1] + 2
+ x_left_ = locate_width(image_, y_, mode=1, left_or_right=left_[0])
+ x_right_ = locate_width(image_, y_, mode=2, left_or_right=right_[0])
+ left_, right_ = check_jaw(image_, [x_left_, y_], [x_right_, y_])
+ return left_, right_
+ # 选择脖子的四个点,核心在于选择上面的两个点,这两个点的确定的位置应该是"宽出来的"两个点
+ _, _ ,_, a = cv2.split(image) # 这应该是一个四通道的图像
+ ret,a_thresh = cv2.threshold(a,127,255,cv2.THRESH_BINARY)
+ y_high, y_low, x_left, x_right = get_box_pro(image=image, model=1) # 直接返回矩阵信息
+ y_left_side = locate_side(image_=a_thresh, x_=x_left, y_max=y_low) # 左边的点的y轴坐标
+ y_right_side = locate_side(image_=a_thresh, x_=x_right, y_max=y_low) # 右边的点的y轴坐标
+ y = min(y_left_side, y_right_side) # 将两点的坐标保持相同
+ cell_left_above, cell_right_above = check_jaw(a_thresh,[x_left, y], [x_right, y])
+ x_left, x_right = cell_left_above[0], cell_right_above[0]
+ # 此时我们寻找到了脖子的"宽出来的"两个点,这两个点作为上面的两个点, 接下来寻找下面的两个点
+ if per_to_side >1:
+ assert ValueError("per_to_side 必须小于1!")
+ # 在后面的透视变换中我会把它拉成矩形, 在这里我先获取四个点的高和宽
+ height_ = 150 # 这个值应该是个变化的值,与拉伸的长度有关,但是现在先规定为150
+ width_ = x_right - x_left # 其实也就是 cell_right_above[1] - cell_left_above[1]
+ y = int((y_low - y)*per_to_side + y) # 定位y轴坐标
+ cell_left_below, cell_right_bellow = ([locate_width(a_thresh, y_=y, mode=1), y], [locate_width(a_thresh, y_=y, mode=2), y])
+ # 四个点全齐,开始透视变换
+ # 寻找透视变换后的四个点,只需要变换below的两个点即可
+ # cell_left_below_final, cell_right_bellow_final = ([cell_left_above[1], y_low], [cell_right_above[1], y_low])
+ # 需要变换的四个点为 cell_left_above, cell_right_above, cell_left_below, cell_right_bellow
+ rect = np.array([cell_left_above, cell_right_above, cell_left_below, cell_right_bellow],
+ dtype='float32')
+ # 变化后的坐标点
+ dst = np.array([[0, 0], [width_, 0], [0 , height_], [width_, height_]],
+ dtype='float32')
+ # 计算变换矩阵
+ M = cv2.getPerspectiveTransform(rect, dst)
+ warped = cv2.warpPerspective(image, M, (width_, height_))
+ final = cover_image(image=warped, background=image, mode=3, x=cell_left_above[0], y=cell_left_above[1])
+ # tmp = np.zeros(image.shape)
+ # final = cover_image(image=warped, background=tmp, mode=3, x=cell_left_above[0], y=cell_left_above[1])
+ # final = cover_image(image=image, background=final, mode=3, x=0, y=0)
+ return final
+
+def transformationNeck(image:np.ndarray, cutNeckHeight:int, neckBelow:int,
+ toHeight:int,per_to_side:float=0.75) -> np.ndarray:
+ """
+ 脖子扩充算法, 其实需要输入的只是脖子扣出来的部分以及需要被扩充的高度/需要被扩充成的高度.
+ """
+ height, width, channels = image.shape
+ _, _, _, a = cv2.split(image) # 这应该是一个四通道的图像
+ ret, a_thresh = cv2.threshold(a, 127, 255, cv2.THRESH_BINARY) # 将透明图层二值化
+ def locate_width(image_:np.ndarray, y_:int, mode, left_or_right:int=None):
+ # 从y=y这个水平线上寻找两边的非零点
+ # 增加left_or_right的原因在于为下面check_jaw服务
+ if mode==1: # 左往右
+ x_ = 0
+ if left_or_right is None:
+ left_or_right = 0
+ for x_ in range(left_or_right, width):
+ if image_[y_][x_] != 0:
+ break
+ else: # 右往左
+ x_ = width
+ if left_or_right is None:
+ left_or_right = width - 1
+ for x_ in range(left_or_right, -1, -1):
+ if image_[y_][x_] != 0:
+ break
+ return x_
+ def check_jaw(image_:np.ndarray, left_, right_):
+ """
+ 检查选择的点是否与截到下巴,如果截到了,就往下平移一个单位
+ """
+ f= True # True代表没截到下巴
+ # [x, y]
+ for x_cell in range(left_[0] + 1, right_[0]):
+ if image_[left_[1]][x_cell] == 0:
+ f = False
+ break
+ if f is True:
+ return left_, right_
+ else:
+ y_ = left_[1] + 2
+ x_left_ = locate_width(image_, y_, mode=1, left_or_right=left_[0])
+ x_right_ = locate_width(image_, y_, mode=2, left_or_right=right_[0])
+ left_, right_ = check_jaw(image_, [x_left_, y_], [x_right_, y_])
+ return left_, right_
+ x_left = locate_width(image_=a_thresh, mode=1, y_=cutNeckHeight)
+ x_right = locate_width(image_=a_thresh, mode=2, y_=cutNeckHeight)
+ # 在这里我们取消了对下巴的检查,原因在于输入的imageHeight并不能改变
+ # cell_left_above, cell_right_above = check_jaw(a_thresh, [x_left, imageHeight], [x_right, imageHeight])
+ cell_left_above, cell_right_above = [x_left, cutNeckHeight], [x_right, cutNeckHeight]
+ toWidth = x_right - x_left # 矩形宽
+ # 此时我们寻找到了脖子的"宽出来的"两个点,这两个点作为上面的两个点, 接下来寻找下面的两个点
+ if per_to_side >1:
+ assert ValueError("per_to_side 必须小于1!")
+ y_below = int((neckBelow - cutNeckHeight) * per_to_side + cutNeckHeight) # 定位y轴坐标
+ cell_left_below = [locate_width(a_thresh, y_=y_below, mode=1), y_below]
+ cell_right_bellow = [locate_width(a_thresh, y_=y_below, mode=2), y_below]
+ # 四个点全齐,开始透视变换
+ # 需要变换的四个点为 cell_left_above, cell_right_above, cell_left_below, cell_right_bellow
+ rect = np.array([cell_left_above, cell_right_above, cell_left_below, cell_right_bellow],
+ dtype='float32')
+ # 变化后的坐标点
+ dst = np.array([[0, 0], [toWidth, 0], [0 , toHeight], [toWidth, toHeight]],
+ dtype='float32')
+ M = cv2.getPerspectiveTransform(rect, dst)
+ warped = cv2.warpPerspective(image, M, (toWidth, toHeight))
+ # 将变换后的图像覆盖到原图上
+ final = cover_image(image=warped, background=image, mode=3, x=cell_left_above[0], y=cell_left_above[1])
+ return final
+
+def bestJunctionCheck_beta(image:np.ndarray, stepSize:int=4, if_per:bool=False):
+ """
+ 最优衔接点检测算法, 去寻找脖子的"拐点"
+ """
+ point_k = 1
+ _, _, _, a = cv2.split(image) # 这应该是一个四通道的图像
+ height, width = a.shape
+ ret, a_thresh = cv2.threshold(a, 127, 255, cv2.THRESH_BINARY) # 将透明图层二值化
+ y_high, y_low, x_left, x_right = get_box_pro(image=image, model=1) # 直接返回矩阵信息
+ def scan(y_:int, max_num:int=2):
+ num = 0
+ left = False
+ right = False
+ for x_ in range(width):
+ if a_thresh[y_][x_] != 0:
+ if x_ < width // 2 and left is False:
+ num += 1
+ left = True
+ elif x_ > width // 2 and right is False:
+ num += 1
+ right = True
+ return True if num >= max_num else False
+ def locate_neck_above():
+ """
+ 定位脖子的尖尖脚
+ """
+ for y_ in range( y_high - 2, height):
+ if scan(y_):
+ return y_, y_
+ y_high_left, y_high_right = locate_neck_above()
+ def locate_width_pro(image_:np.ndarray, y_:int, mode):
+ """
+ 这会是一个生成器,用于生成脖子两边的轮廓
+ x_, y_ 是启始点的坐标,每一次寻找都会让y_+1
+ mode==1说明是找左边的边,即,image_[y_][x_] == 0 且image_[y_][x_ + 1] !=0 时跳出;
+ 否则 当image_[y_][x_] != 0 时, x_ - 1; 当image_[y_][x_] == 0 且 image_[y_][x_ + 1] ==0 时x_ + 1
+ mode==2说明是找右边的边,即,image_[y_][x_] == 0 且image_[y_][x_ - 1] !=0 时跳出
+ 否则 当image_[y_][x_] != 0 时, x_ + 1; 当image_[y_][x_] == 0 且 image_[y_][x_ - 1] ==0 时x_ - 1
+ """
+ y_ += 1
+ if mode == 1:
+ x_ = 0
+ while 0 <= y_ < height and 0 <= x_ < width:
+ while image_[y_][x_] != 0 and x_ >= 0:
+ x_ -= 1
+ while image_[y_][x_] == 0 and image_[y_][x_ + 1] == 0 and x_ < width - 2:
+ x_ += 1
+ yield [y_, x_]
+ y_ += 1
+ elif mode == 2:
+ x_ = width-1
+ while 0 <= y_ < height and 0 <= x_ < width:
+ while image_[y_][x_] != 0 and x_ < width - 2: x_ += 1
+ while image_[y_][x_] == 0 and image_[y_][x_ - 1] == 0 and x_ >= 0: x_ -= 1
+ yield [y_, x_]
+ y_ += 1
+ yield False
+ def kGenerator(image_:np.ndarray, mode):
+ """
+ 导数生成器,用来生成每一个点对应的导数
+ """
+ y_ = y_high_left if mode == 1 else y_high_right
+ c_generator = locate_width_pro(image_=image_, y_=y_, mode=mode)
+ for cell in c_generator:
+ nc = locate_width_pro(image_=image_, y_=cell[0] + stepSize, mode=mode)
+ nextCell = next(nc)
+ if nextCell is False:
+ yield False, False
+ else:
+ k = (cell[1] - nextCell[1]) / stepSize
+ yield k, cell
+ def findPt(image_:np.ndarray, mode):
+ k_generator = kGenerator(image_=image_, mode=mode)
+ k, cell = next(k_generator)
+ k_next, cell_next = next(k_generator)
+ if k is False:
+ raise ValueError("无法找到拐点!")
+ while k_next is not False:
+ k_next, cell_next = next(k_generator)
+ if (k_next < - 1 / stepSize) or k_next > point_k:
+ break
+ cell = cell_next
+ # return int(cell[0] + stepSize / 2)
+ return cell[0]
+ # 先找左边的拐点:
+ pointY_left = findPt(image_=a_thresh, mode=1)
+ # 再找右边的拐点:
+ pointY_right = findPt(image_=a_thresh, mode=2)
+ point = (pointY_left + pointY_right) // 2
+ if if_per is True:
+ point = (pointY_left + pointY_right) // 2
+ return point / (y_low - y_high)
+ pointX_left = next(locate_width_pro(image_=a_thresh, y_= point - 1, mode=1))[1]
+ pointX_right = next(locate_width_pro(image_=a_thresh, y_=point- 1, mode=2))[1]
+ return [pointX_left, point], [pointX_right, point]
+
+
+def bestJunctionCheck(image:np.ndarray, offset:int, stepSize:int=4):
+ """
+ 最优点检测算算法输入一张脖子图片(无论这张图片是否已经被二值化,我都认为没有被二值化),输出一个小数(脖子最上方与衔接点位置/脖子图像长度)
+ 与beta版不同的是它新增了一个阈值限定内容.
+ 对于脖子而言,我我们首先可以定位到上面的部分,然后根据上面的这个点向下进行遍历检测.
+ 与beta版类似,我们使用一个stepSize来用作斜率的检测
+ 但是对于遍历检测而言,与beta版不同的是,我们需要对遍历的地方进行一定的限制.
+ 限制的标准是,如果当前遍历的点的横坐标和起始点横坐标的插值超过了某个阈值,则认为是越界.
+ """
+ point_k = 1
+ _, _, _, a = cv2.split(image) # 这应该是一个四通道的图像
+ height, width = a.shape
+ ret, a_thresh = cv2.threshold(a, 127, 255, cv2.THRESH_BINARY) # 将透明图层二值化
+ # 直接返回脖子的位置信息, 修正系数为0, get_box_pro内部也封装了二值化,所以直接输入原图
+ y_high, y_low, _, _ = get_box_pro(image=image, model=1, correction_factor=0)
+ # 真正有用的只有上下y轴的两个值...
+ # 首先当然是确定起始点的位置,我们用同样的scan扫描函数进行行遍历.
+ def scan(y_:int, max_num:int=2):
+ num = 0
+ # 设定两个值,分别代表脖子的左边和右边
+ left = False
+ right = False
+ for x_ in range(width):
+ if a_thresh[y_][x_] != 0:
+ # 检测左边
+ if x_ < width // 2 and left is False:
+ num += 1
+ left = True
+ # 检测右边
+ elif x_ > width // 2 and right is False:
+ num += 1
+ right = True
+ return True if num >= max_num else False
+ def locate_neck_above():
+ """
+ 定位脖子的尖尖脚
+ """
+ # y_high就是脖子的最高点
+ for y_ in range(y_high, height):
+ if scan(y_):
+ return y_
+ y_start = locate_neck_above() # 得到遍历的初始高度
+ if y_low - y_start < stepSize: assert ValueError("脖子太小!")
+ # 然后获取一下初始的坐标点
+ x_left, x_right = 0, width
+ for x_left_ in range(0, width):
+ if a_thresh[y_start][x_left_] != 0:
+ x_left = x_left_
+ break
+ for x_right_ in range(width -1 , -1, -1):
+ if a_thresh[y_start][x_right_] != 0:
+ x_right = x_right_
+ break
+ # 接下来我定义两个生成器,首先是脖子轮廓(向下寻找的)生成器,每进行一次next,生成器会返回y+1的脖子轮廓点
+ def contoursGenerator(image_:np.ndarray, y_:int, mode):
+ """
+ 这会是一个生成器,用于生成脖子两边的轮廓
+ y_ 是启始点的y坐标,每一次寻找都会让y_+1
+ mode==1说明是找左边的边,即,image_[y_][x_] == 0 且image_[y_][x_ + 1] !=0 时跳出;
+ 否则 当image_[y_][x_] != 0 时, x_ - 1; 当image_[y_][x_] == 0 且 image_[y_][x_ + 1] ==0 时x_ + 1
+ mode==2说明是找右边的边,即,image_[y_][x_] == 0 且image_[y_][x_ - 1] !=0 时跳出
+ 否则 当image_[y_][x_] != 0 时, x_ + 1; 当image_[y_][x_] == 0 且 image_[y_][x_ - 1] ==0 时x_ - 1
+ """
+ y_ += 1
+ try:
+ if mode == 1:
+ x_ = 0
+ while 0 <= y_ < height and 0 <= x_ < width:
+ while image_[y_][x_] != 0 and x_ >= 0: x_ -= 1
+ # 这里其实会有bug,不过可以不管
+ while x_ < width and image_[y_][x_] == 0 and image_[y_][x_ + 1] == 0: x_ += 1
+ yield [y_, x_]
+ y_ += 1
+ elif mode == 2:
+ x_ = width-1
+ while 0 <= y_ < height and 0 <= x_ < width:
+ while x_ < width and image_[y_][x_] != 0: x_ += 1
+ while x_ >= 0 and image_[y_][x_] == 0 and image_[y_][x_ - 1] == 0: x_ -= 1
+ yield [y_, x_]
+ y_ += 1
+ # 当处理失败则返回False
+ except IndexError:
+ yield False
+ # 然后是斜率生成器,这个生成器依赖子轮廓生成器,每一次生成轮廓后会计算斜率,另一个点的选取和stepSize有关
+ def kGenerator(image_: np.ndarray, mode):
+ """
+ 导数生成器,用来生成每一个点对应的导数
+ """
+ y_ = y_start
+ # 对起始点建立一个生成器, mode=1时是左边轮廓,mode=2时是右边轮廓
+ c_generator = contoursGenerator(image_=image_, y_=y_, mode=mode)
+ for cell in c_generator:
+ # 寻找距离当前cell距离为stepSize的轮廓点
+ kc = contoursGenerator(image_=image_, y_=cell[0] + stepSize, mode=mode)
+ kCell = next(kc)
+ if kCell is False:
+ # 寻找失败
+ yield False, False
+ else:
+ # 寻找成功,返回当坐标点和斜率值
+ # 对于左边而言,斜率必然是前一个点的坐标减去后一个点的坐标
+ # 对于右边而言,斜率必然是后一个点的坐标减去前一个点的坐标
+ k = (cell[1] - kCell[1]) / stepSize if mode == 1 else (kCell[1] - cell[1]) / stepSize
+ yield k, cell
+ # 接着开始写寻找算法,需要注意的是我们是分两边选择的
+ def findPt(image_:np.ndarray, mode):
+ x_base = x_left if mode == 1 else x_right
+ k_generator = kGenerator(image_=image_, mode=mode)
+ k, cell = k_generator.__next__()
+ if k is False:
+ raise ValueError("无法找到拐点!")
+ k_next, cell_next = k_generator.__next__()
+ while k_next is not False:
+ cell = cell_next
+ if cell[1] > x_base and mode == 2:
+ x_base = cell[1]
+ elif cell[1] < x_base and mode == 1:
+ x_base = cell[1]
+ # 跳出循环的方式一:斜率超过了某个值
+ if k_next > point_k:
+ print("K out")
+ break
+ # 跳出循环的方式二:超出阈值
+ elif abs(cell[1] - x_base) > offset:
+ print("O out")
+ break
+ k_next, cell_next = k_generator.__next__()
+ if abs(cell[1] - x_base) > offset:
+ cell[0] = cell[0] - offset - 1
+ return cell[0]
+ # 先找左边的拐点:
+ pointY_left = findPt(image_=a_thresh, mode=1)
+ # 再找右边的拐点:
+ pointY_right = findPt(image_=a_thresh, mode=2)
+ point = min(pointY_right, pointY_left)
+ per = (point - y_high) / (y_low - y_high)
+ # pointX_left = next(contoursGenerator(image_=a_thresh, y_= point- 1, mode=1))[1]
+ # pointX_right = next(contoursGenerator(image_=a_thresh, y_=point - 1, mode=2))[1]
+ # return [pointX_left, point], [pointX_right, point]
+ return per
+
+
+def checkSharpCorner(image:np.ndarray):
+ _, _, _, a = cv2.split(image) # 这应该是一个四通道的图像
+ height, width = a.shape
+ ret, a_thresh = cv2.threshold(a, 127, 255, cv2.THRESH_BINARY) # 将透明图层二值化
+ # 直接返回脖子的位置信息, 修正系数为0, get_box_pro内部也封装了二值化,所以直接输入原图
+ y_high, y_low, _, _ = get_box_pro(image=image, model=1, correction_factor=0)
+ def scan(y_:int, max_num:int=2):
+ num = 0
+ # 设定两个值,分别代表脖子的左边和右边
+ left = False
+ right = False
+ for x_ in range(width):
+ if a_thresh[y_][x_] != 0:
+ # 检测左边
+ if x_ < width // 2 and left is False:
+ num += 1
+ left = True
+ # 检测右边
+ elif x_ > width // 2 and right is False:
+ num += 1
+ right = True
+ return True if num >= max_num else False
+ def locate_neck_above():
+ """
+ 定位脖子的尖尖脚
+ """
+ # y_high就是脖子的最高点
+ for y_ in range(y_high, height):
+ if scan(y_):
+ return y_
+ y_start = locate_neck_above()
+ return y_start
+
+def checkJaw(image:np.ndarray, y_start:int):
+ # 寻找"马鞍点"
+ _, _, _, a = cv2.split(image) # 这应该是一个四通道的图像
+ height, width = a.shape
+ ret, a_thresh = cv2.threshold(a, 127, 255, cv2.THRESH_BINARY) # 将透明图层二值化
+ if width <=1: raise TypeError("图像太小!")
+ x_left, x_right = 0, width - 1
+ for x_left in range(width):
+ if a_thresh[y_start][x_left] != 0:
+ while a_thresh[y_start][x_left] != 0: x_left += 1
+ break
+ for x_right in range(width-1, -1, -1):
+ if a_thresh[y_start][x_right] != 0:
+ while a_thresh[y_start][x_right] != 0: x_right -= 1
+ break
+ point_list_y = []
+ point_list_x = []
+ for x in range(x_left, x_right):
+ y = y_start
+ while a_thresh[y][x] == 0: y += 1
+ point_list_y.append(y)
+ point_list_x.append(x)
+ y = max(point_list_y)
+ x = point_list_x[point_list_y.index(y)]
+ return x, y
+
+
+def checkHairLOrR(cloth_image_input_cut,
+ input_a,
+ neck_a,
+ cloth_image_input_top_y,
+ cutbar_top=0.4,
+ cutbar_bottom=0.5,
+ threshold=0.3):
+ """
+ 本函数用于检测衣服是否被头发遮挡,当前只考虑左右是否被遮挡,即"一刀切"
+ 返回int
+ 0代表没有被遮挡
+ 1代表左边被遮挡
+ 2代表右边被遮挡
+ 3代表全被遮挡了
+ 约定,输入的图像是一张灰度图,且被二值化过.
+ """
+ def per_darkPoint(img:np.ndarray) -> int:
+ """
+ 用于遍历相加图像上的黑点.
+ 然后返回黑点数/图像面积
+ """
+ h, w = img.shape
+ sum_darkPoint = 0
+ for y in range(h):
+ for x in range(w):
+ if img[y][x] == 0:
+ sum_darkPoint += 1
+ return sum_darkPoint / (h * w)
+
+ if threshold < 0 or threshold > 1: raise TypeError("阈值设置必须在0和1之间!")
+
+ # 裁出cloth_image_input_cut按高度40%~50%的区域-cloth_image_input_cutbar,并转换为A矩阵,做二值化
+ cloth_image_input_height = cloth_image_input_cut.shape[0]
+ _, _, _, cloth_image_input_cutbar = cv2.split(cloth_image_input_cut[
+ int(cloth_image_input_height * cutbar_top):int(
+ cloth_image_input_height * cutbar_bottom), :])
+ _, cloth_image_input_cutbar = cv2.threshold(cloth_image_input_cutbar, 127, 255, cv2.THRESH_BINARY)
+
+ # 裁出input_image、neck_image的A矩阵的对应区域,并做二值化
+ input_a_cutbar = input_a[cloth_image_input_top_y + int(cloth_image_input_height * cutbar_top):
+ cloth_image_input_top_y + int(cloth_image_input_height * cutbar_bottom), :]
+ _, input_a_cutbar = cv2.threshold(input_a_cutbar, 127, 255, cv2.THRESH_BINARY)
+ neck_a_cutbar = neck_a[cloth_image_input_top_y + int(cloth_image_input_height * cutbar_top):
+ cloth_image_input_top_y + int(cloth_image_input_height * cutbar_bottom), :]
+ _, neck_a_cutbar = cv2.threshold(neck_a_cutbar, 50, 255, cv2.THRESH_BINARY)
+
+ # 将三个cutbar合到一起-result_a_cutbar
+ input_a_cutbar = np.uint8(255 - input_a_cutbar)
+ result_a_cutbar = cv2.add(input_a_cutbar, cloth_image_input_cutbar)
+ result_a_cutbar = cv2.add(result_a_cutbar, neck_a_cutbar)
+
+ if_mask = 0
+ # 我们将图像 一刀切,分为左边和右边
+ height, width = result_a_cutbar.shape # 一通道图像
+ left_image = result_a_cutbar[:, :width//2]
+ right_image = result_a_cutbar[:, width//2:]
+ if per_darkPoint(left_image) > threshold:
+ if_mask = 1
+ if per_darkPoint(right_image) > threshold:
+ if_mask = 3 if if_mask == 1 else 2
+ return if_mask
+
+
+if __name__ == "__main__":
+ for i in range(1, 8):
+ img = cv2.imread(f"./neck_temp/neck_image{i}.png", cv2.IMREAD_UNCHANGED)
+ # new = transformationNeck(image=img, cutNeckHeight=419,neckBelow=472, toHeight=150)
+ # point_list = bestJunctionCheck(img, offset=5, stepSize=3)
+ # per = bestJunctionCheck(img, offset=5, stepSize=3)
+ # # 返回一个小数的形式, 接下来我将它处理为两个点
+ point_list = []
+ # y_high_, y_low_, _, _ = get_box_pro(image=img, model=1, conreection_factor=0)
+ # _y = y_high_ + int((y_low_ - y_high_) * per)
+ # _, _, _, a_ = cv2.split(img) # 这应该是一个四通道的图像
+ # h, w = a_.shape
+ # r, a_t = cv2.threshold(a_, 127, 255, cv2.THRESH_BINARY) # 将透明图层二值化
+ # _x = 0
+ # for _x in range(w):
+ # if a_t[_y][_x] != 0:
+ # break
+ # point_list.append([_x, _y])
+ # for _x in range(w - 1, -1, -1):
+ # if a_t[_y][_x] != 0:
+ # break
+ # point_list.append([_x, _y])
+ y = checkSharpCorner(img)
+ point = checkJaw(image=img, y_start=y)
+ point_list.append(point)
+ new = draw_picture_dots(img, point_list, pen_size=2)
+ cv2.imshow(f"{i}", new)
+ cv2.waitKey(0)
+
+def find_black(image):
+ """
+ 找黑色点函数,遇到输入矩阵中的第一个黑点,返回它的y值
+ """
+ height, width = image.shape[0], image.shape[1]
+ for i in range(height):
+ for j in range(width):
+ if image[i, j] < 127:
+ return i
+ return None
+
+def convert_black_array(image):
+ height, width = image.shape[0], image.shape[1]
+ mask = np.zeros([height, width])
+ for j in range(width):
+ for i in range(height):
+ if image[i, j] > 127:
+ mask[i:, j] = 1
+ break
+ return mask
+
+def checkLongHair(neck_image, head_bottom_y, neck_top_y):
+ """
+ 长发检测函数,输入为head/neck图像,通过下巴是否为最低点,来判断是否为长发
+ :return 0 : 短发
+ :return 1 : 长发
+ """
+ jaw_y = neck_top_y + checkJaw(neck_image, y_start=checkSharpCorner(neck_image))[1]
+ if jaw_y >= head_bottom_y-3:
+ return 0
+ else:
+ return 1
+
+def checkLongHair2(head_bottom_y, cloth_top_y):
+ if head_bottom_y > cloth_top_y+10:
+ return 1
+ else:
+ return 0
+
+
diff --git a/hivisionai/hycv/idphotoTool/idphoto_change_cloth.py b/hivisionai/hycv/idphotoTool/idphoto_change_cloth.py
new file mode 100644
index 0000000000000000000000000000000000000000..e90c0c04dc85f4464dc099b15496d948f2276988
--- /dev/null
+++ b/hivisionai/hycv/idphotoTool/idphoto_change_cloth.py
@@ -0,0 +1,271 @@
+import cv2
+import numpy as np
+from ..utils import get_box_pro, cut_BiggestAreas, locate_neck, get_cutbox_image
+from .move_image import move
+from ..vision import add_background, cover_image
+from ..matting_tools import get_modnet_matting
+from .neck_processing import transformationNeck
+from .cuny_tools import checkSharpCorner, checkJaw, checkHairLOrR,\
+ checkLongHair, checkLongHair2, convert_black_array, find_black
+
+test_image_path = "./supple_image/"
+
+def change_cloth(input_image:np.array,
+ cloth_model,
+ CLOTH_WIDTH,
+ CLOTH_X,
+ CLOTH_WIDTH_CHANGE,
+ CLOTH_X_CHANGE,
+ CLOTH_Y,
+ neck_ratio=0.2,
+ space_adjust=None,
+ hair_front=True
+ ):
+
+ # ============= 1. 得到头脖子图、纯头图、纯脖子图的相关信息 =============== #
+ # 1.1 获取原图input_image属性
+ input_height, input_width = input_image.shape[0], input_image.shape[1]
+ # print("input_height:", input_height)
+ # print("input_width", input_width)
+ b, g, r, input_a = cv2.split(input_image)
+
+ # 1.2 得到头脖子图headneck_image、纯头图head_image
+ input_image = add_background(input_image, bgr=(255, 255, 255))
+ headneck_image = get_modnet_matting(input_image, checkpoint_path="./checkpoint/huanying_headneck3.onnx")
+ head_image = get_modnet_matting(input_image, checkpoint_path="./checkpoint/huanying_head3.onnx")
+
+ # 1.3 得到优化后的脖子图neck_threshold_image
+ _, _, _, headneck_a = cv2.split(headneck_image)
+ _, _, _, head_a = cv2.split(head_image)
+ neck_a = cv2.subtract(headneck_a, head_a)
+ _, neck_threshold_a = cv2.threshold(neck_a, 180, 255, cv2.THRESH_BINARY)
+ neck_threshold_image = cut_BiggestAreas(cv2.merge(
+ (np.uint8(b), np.uint8(g), np.uint8(r), np.uint8(neck_threshold_a))))
+
+ # 1.4 得到优化后的头脖子图headneck_threshold_image
+ _, headneck_threshold_a = cv2.threshold(headneck_a, 180, 255, cv2.THRESH_BINARY)
+ headneck_threshold_image = cut_BiggestAreas(
+ cv2.merge((np.uint8(b), np.uint8(g), np.uint8(r), np.uint8(headneck_threshold_a))))
+
+ # 1.5 获取脖子图、头脖子图的A矩阵
+ _, _, _, neck_threshold_a2 = cv2.split(neck_threshold_image)
+ _, _, _, headneck_threshold_a2 = cv2.split(headneck_threshold_image)
+
+ # 1.6 获取头发的底部坐标信息,以及头的左右坐标信息
+ _, headneck_y_bottom, headneck_x_left, headneck_x_right = get_box_pro(headneck_threshold_image,
+ model=2, correction_factor=0)
+ headneck_y_bottom = input_height-headneck_y_bottom
+ headneck_x_right = input_width-headneck_x_right
+
+
+
+ # ============= 2. 得到原来的衣服的相关信息 =============== #
+ # 2.1 抠出原来衣服cloth_image_input
+ cloth_origin_image_a = cv2.subtract(np.uint8(input_a), np.uint8(headneck_a))
+ _, cloth_origin_image_a = cv2.threshold(cloth_origin_image_a, 180, 255, cv2.THRESH_BINARY)
+ cloth_image_input = cut_BiggestAreas(cv2.merge((np.uint8(b), np.uint8(g), np.uint8(r), np.uint8(cloth_origin_image_a))))
+
+ # 2.2 对cloth_image_input做裁剪(减去上面的大片透明区域)
+ cloth_image_input_top_y, _, _, _ = get_box_pro(cloth_image_input, model=2)
+ cloth_image_input_cut = cloth_image_input[cloth_image_input_top_y:, :]
+
+
+
+ # ============= 3.计算脖子的衔接点信息,为新服装拼接作准备 ===============#
+ # 3.1 得到裁剪透明区域后的脖子图neck_cut_image,以及它的坐标信息
+ neck_y_top, neck_y_bottom, neck_x_left, neck_x_right = get_box_pro(neck_threshold_image, model=2)
+ neck_cut_image = get_cutbox_image(neck_threshold_image)
+ neck_height = input_height - (neck_y_top + neck_y_bottom)
+ neck_width = input_width - (neck_x_right + neck_x_left)
+
+ # 3.2 对neck_cut_image做“尖尖”检测,得到较低的“尖尖”对于脖子高度的比率y_neck_corner_ratio
+ y_neck_corner = checkSharpCorner(neck_cut_image)
+ y_neck_corner_ratio = y_neck_corner / neck_height
+
+ # 3.3 取y_neck_corner_ratio与新衣服预先设定好的neck_ratio的最大值,作为最终的neck_ratio
+ neck_ratio = max(neck_ratio, y_neck_corner_ratio)
+
+ # 3.4 计算在neck_ratio下的脖子左衔接点坐标neck_left_x_byRatio,neck_left_y_byRatio、宽度neck_width_byRatio
+ neck_coordinate1, neck_coordinate2, neck_width_byRatio = locate_neck(neck_cut_image, float(neck_ratio))
+ neck_width_byRatio = neck_width_byRatio + CLOTH_WIDTH_CHANGE
+ neck_left_x_byRatio = neck_x_left + neck_coordinate1[1] + CLOTH_X_CHANGE
+ neck_left_y_byRatio = neck_y_top + neck_coordinate1[0]
+
+
+
+ # ============= 4.读取新衣服图,调整大小 =============== #
+ # 4.1 得到新衣服图片的拼贴坐标x, y以及脖子最底部的坐标y_neckline
+ CLOTH_HEIGHT = CLOTH_Y
+ RESIZE_RATIO = neck_width_byRatio / CLOTH_WIDTH
+ x, y = int(neck_left_x_byRatio - CLOTH_X * RESIZE_RATIO), neck_left_y_byRatio
+ y_neckline = y + int(CLOTH_HEIGHT * RESIZE_RATIO)
+
+ # 4.2 读取新衣服,并进行缩放
+ cloth = cv2.imread(cloth_model, -1)
+ cloth_height, cloth_width = cloth.shape[0], cloth.shape[1]
+ cloth = cv2.resize(cloth, (int(cloth_width * RESIZE_RATIO),
+ int(cloth_height * RESIZE_RATIO)), interpolation=cv2.INTER_AREA)
+
+ # 4.3 获得新衣服的A矩阵
+ _, _, _, cloth_a = cv2.split(cloth)
+
+
+
+ # ============= 5. 判断头发的前后关系,以及对于长发的背景填充、判定是否为长发等 =============== #
+ # 5.1 根据hair_number, 判断是0:头发披在后面、1:左前右后、2:左后右前还是3:都在前面
+ hair_number = checkHairLOrR(cloth_image_input_cut, input_a, neck_a, cloth_image_input_top_y)
+
+ # 5.2 对于长发的背景填充-将原衣服区域的部分变成黑色,并填充到最终图片作为背景
+ cloth_image_input_save = cloth_origin_image_a[:int(y+cloth_height*RESIZE_RATIO),
+ max(0, headneck_x_left-1):min(headneck_x_right+1, input_width)]
+ headneck_threshold_a_save = headneck_a[:int(y+cloth_height*RESIZE_RATIO),
+ max(0, headneck_x_left-1):min(headneck_x_right+1, input_width)]
+ headneck_mask = convert_black_array(headneck_threshold_a_save)
+ kernel = cv2.getStructuringElement(cv2.MORPH_RECT, (15, 15))
+ cloth_image_input_save = cv2.dilate(cloth_image_input_save, kernel)
+ cloth_image_input_save = np.uint8(cloth_image_input_save*headneck_mask)
+
+ # 5.3 检测是否为长发
+ head_bottom_y = input_height - get_box_pro(head_image, model=2, correction_factor=0)[1]
+ isLongHair01 = checkLongHair(neck_cut_image, head_bottom_y, neck_top_y=neck_y_top)
+ isLongHair02 = checkLongHair2(head_bottom_y, cloth_image_input_top_y)
+ isLongHair = isLongHair01 and isLongHair02
+
+
+
+ # ============= 6.第一轮服装拼贴 =============== #
+ # 6.1 创建一个空白背景background
+ background = np.uint8((np.zeros([input_height, input_width, 4])))
+
+ # 6.2 盖上headneck_image
+ result_headneck_image = cover_image(headneck_image, background, 0, 0, mode=3)
+
+ # 6.3 如果space_adjust开启的话,background的底部将增加一些行数
+ if space_adjust is not None:
+ insert_array = np.uint8(np.zeros((space_adjust, input_width, 4)))
+ result_headneck_image = np.r_[result_headneck_image, insert_array]
+
+ # 6.4 盖上新衣服
+ result_cloth_image = cover_image(cloth, result_headneck_image, x, y, mode=3)
+
+ # 6.5 截出脖子与衣服交接的区域neck_cloth_image,以及它的A矩阵neck_cloth_a
+ neck_cloth_image = result_cloth_image[y:y_neckline,
+ neck_left_x_byRatio:neck_left_x_byRatio+neck_width_byRatio]
+ _, _, _, neck_cloth_a = cv2.split(neck_cloth_image)
+ _, neck_cloth_a = cv2.threshold(neck_cloth_a, 127, 255, cv2.THRESH_BINARY)
+
+
+
+ # ============= 7.第二轮服装拼贴 =============== #
+ # 7.1 检测neck_cloth_a中是否有黑点(即镂空区域)
+ # 如果black_dots_y不为None,说明存在镂空区域——需要进行脖子拉伸;反而则不存在,不需要
+ black_dots_y = find_black(neck_cloth_a)
+ # cv2.imwrite(test_image_path+"neck_cloth_a.jpg", neck_cloth_a)
+
+ # flag: 用于指示是否进行拉伸
+ flag = 0
+
+ # 7.2 如果存在镂空区域,则进行拉伸
+ if black_dots_y != None:
+ flag = 1
+ # cutNeckHeight:要拉伸的区域的顶部y值
+ # neckBelow:脖子底部的y值
+ # toHeight:拉伸区域的高度
+ cutNeckHeight = black_dots_y + y - 6
+ # if cutNeckHeight < neck_y_top+checkJaw(neck_cut_image, y_start=checkSharpCorner(neck_cut_image))[1]:
+ # print("拒绝!!!!!!")
+ # return 0, 0, 0, 0, 0
+
+ neckBelow = input_height-neck_y_bottom
+ toHeight = y_neckline-cutNeckHeight
+ print("cutNeckHeight:", cutNeckHeight)
+ print("toHeight:", toHeight)
+ print("neckBelow:", neckBelow)
+ # cv2.imwrite(test_image_path+"neck_image.png", neck_threshold_image)
+
+ # 对原有的脖子做拉伸,得到new_neck_image
+ new_neck_image = transformationNeck(neck_threshold_image,
+ cutNeckHeight=cutNeckHeight,
+ neckBelow=neckBelow,
+ toHeight=toHeight)
+ # cv2.imwrite(test_image_path+"new_neck_image.png", new_neck_image)
+
+
+ # 重新进行拼贴
+ result_headneck_image = cover_image(new_neck_image, result_headneck_image, 0, 0, mode=3)
+ result_headneck_image = cover_image(head_image, result_headneck_image, 0, 0, mode=3)
+ result_cloth_image = cover_image(cloth, result_headneck_image, x, y, mode=3)
+
+ _, _, _, neck_a = cv2.split(new_neck_image)
+
+
+ # 7.3 下面是对最终图的A矩阵进行处理
+ # 首先将neck_a与新衣服衔接点的左边两边区域删去,得到neck_a_leftright
+ neck_a_copy = neck_a.copy()
+ neck_a_copy[neck_left_y_byRatio:, :max(0, neck_left_x_byRatio-4)] = 0
+ neck_a_copy[neck_left_y_byRatio:,
+ min(input_width, neck_left_x_byRatio + neck_width_byRatio - CLOTH_X_CHANGE+4):] = 0
+ n_a_leftright = cv2.subtract(neck_a, neck_a_copy)
+
+ # 7.4 如果存在镂空区域,则对headneck_a做进一步处理
+ if black_dots_y != None:
+ neck_a = cv2.subtract(neck_a, n_a_leftright)
+ # 得到去掉脖子两翼的新的headneck_a
+ headneck_a = cv2.subtract(headneck_a, n_a_leftright)
+ # 将headneck_a覆盖上拉伸后的脖子A矩阵
+ headneck_a = np.uint8(cover_image(neck_a, headneck_a, 0, 0, mode=2))
+ else:
+ headneck_a = cv2.subtract(headneck_a, n_a_leftright)
+
+
+
+ # 7.5 如果是长发
+ if isLongHair:
+ # 在背景加入黑色矩形,填充抠头模型可能会出现的,部分长发没有抠全的部分
+ black_background_x1 = int(neck_left_x_byRatio - neck_width_byRatio * 0.1)
+ black_background_x2 = int(neck_left_x_byRatio + neck_width_byRatio * 1.1)
+ black_background_y1 = int(neck_y_top - neck_height * 0.1)
+ black_background_y2 = min(input_height - neck_y_bottom - 3, head_bottom_y)
+ headneck_a[black_background_y1:black_background_y2, black_background_x1:black_background_x2] = 255
+
+ # 在背景中,将原本衣服区域置为黑色
+ headneck_a = cover_image(cloth_image_input_save, headneck_a, max(0, headneck_x_left-1), 0, mode=2)
+
+ # 7.6 如果space_adjust开启的话,headneck_a的底部将增加一些行数
+ if space_adjust is not None:
+ insert_array = np.uint8(np.zeros((space_adjust, input_width)))
+ headneck_a = np.r_[headneck_a, insert_array]
+
+ # 7.7 盖上新衣服
+ new_a = np.uint8(cover_image(cloth_a, headneck_a, x, y, mode=2))
+
+ # neck_cloth_a = new_a[y:y_neckline, neck_left_x_byRatio:neck_left_x_byRatio + neck_width_byRatio]
+ # _, neck_cloth_a = cv2.threshold(neck_cloth_a, 127, 255, cv2.THRESH_BINARY)
+ # cv2.imwrite(test_image_path + "neck_cloth_a2.jpg", neck_cloth_a)
+ #
+ # if find_black(neck_cloth_a) != None:
+ # print("拒绝!!!!")
+ # return "拒绝"
+
+ # 7.8 如果有头发披在前面
+ if hair_front:
+ # 如果头发披在左边
+ if hair_number == 1:
+ result_cloth_image = cover_image(head_image[:, :head_image.shape[1] // 2], result_cloth_image, 0, 0, mode=3)
+ # 如果头发披在右边
+ elif hair_number == 2:
+ result_cloth_image = cover_image(head_image[:, head_image.shape[1] // 2:], result_cloth_image, head_image.shape[1] // 2, 0, mode=3)
+ # 如果头发披在两边
+ elif hair_number == 3:
+ result_cloth_image = cover_image(head_image, result_cloth_image, 0, 0, mode=3)
+
+ # 7.9 合成最终图片,并做底部空隙的移动
+ r, g, b, _ = cv2.split(result_cloth_image)
+ result_image = move(cv2.merge((r, g, b, new_a)))
+
+ # 7.10 返回:结果图、是否拉伸、头发前披状态、是否长发
+ return 1, result_image, flag, hair_number, isLongHair
+
+
+if __name__ == "__main__":
+ pass
diff --git a/hivisionai/hycv/idphotoTool/idphoto_cut.py b/hivisionai/hycv/idphotoTool/idphoto_cut.py
new file mode 100644
index 0000000000000000000000000000000000000000..348b4bf33b196897446d2030faf3210637bc02db
--- /dev/null
+++ b/hivisionai/hycv/idphotoTool/idphoto_cut.py
@@ -0,0 +1,420 @@
+import cv2
+import math
+from ..utils import get_box_pro
+from ..face_tools import face_detect_mtcnn
+from ..vision import IDphotos_cut, detect_distance, resize_image_esp, draw_picture_dots
+from ..matting_tools import get_modnet_matting
+from .move_image import move
+from src.hivisionai.hyTrain.APIs import aliyun_face_detect_api
+import numpy as np
+import json
+
+
+def get_max(height, width, d1, d2, d3, d4, rotation_flag):
+ if rotation_flag:
+ height1 = height
+ height2 = height - int(d1.y) # d2
+ height3 = int(d4.y) # d3
+ height4 = int(d4.y) - int(d1.x)
+
+ width1 = width
+ width2 = width - int(d3.x)
+ width3 = int(d2.x)
+ width4 = int(d2.x) - int(d3.x)
+
+ else:
+ height1 = height
+ height2 = height - int(d2.y)
+ height3 = int(d3.y)
+ height4 = int(d3.y) - int(d2.y)
+
+ width1 = width
+ width2 = width - int(d1.x)
+ width3 = int(d4.x)
+ width4 = int(d4.x) - int(d1.x)
+
+ height_list = [height1, height2, height3, height4]
+ width_list = [width1, width2, width3, width4]
+
+ background_height = max(height_list)
+ status_height = height_list.index(background_height)
+
+ background_width = max(width_list)
+ status_width = width_list.index(background_width)
+
+ height_change = 0
+ width_change = 0
+ height_change2 = 0
+ width_change2 = 0
+ if status_height == 1 or status_height == 3:
+ if rotation_flag:
+ height_change = abs(d1.y)
+ height_change2 = d1.y
+ else:
+ height_change = abs(d2.y)
+ height_change2 = d2.y
+
+ if status_width == 1 or status_width == 3:
+ if rotation_flag:
+ width_change = abs(d3.x)
+ width_change2 = d3.x
+ else:
+ width_change = abs(d1.x)
+ width_change2 = d1.x
+
+ return background_height, status_height, background_width, status_width, height_change, width_change,\
+ height_change2, width_change2
+
+class LinearFunction_TwoDots(object):
+ """
+ 通过两个坐标点构建线性函数
+ """
+ def __init__(self, dot1, dot2):
+ self.d1 = dot1
+ self.d2 = dot2
+ self.k = (self.d2.y - self.d1.y) / (self.d2.x - self.d1.x)
+ self.b = self.d2.y - self.k * self.d2.x
+
+ def forward(self, input, mode="x"):
+ if mode == "x":
+ return self.k * input + self.b
+ elif mode == "y":
+ return (input - self.b) / self.k
+
+ def forward_x(self, x):
+ return self.k * x + self.b
+
+ def forward_y(self, y):
+ return (y - self.b) / self.k
+
+class Coordinate(object):
+ def __init__(self, x, y):
+ self.x = x
+ self.y = y
+
+ def __str__(self):
+ return "({}, {})".format(self.x, self.y)
+
+def IDphotos_create(input_image, size=(413, 295), head_measure_ratio=0.2, head_height_ratio=0.45,
+ checkpoint_path="checkpoint/ModNet1.0.onnx", align=True):
+ """
+ input_path: 输入图像路径
+ output_path: 输出图像路径
+ size: 裁剪尺寸,格式应该如(413,295),竖直距离在前,水平距离在后
+ head_measure_ratio: 人头面积占照片面积的head_ratio
+ head_height_ratio: 人头中心处于照片从上到下的head_height
+ align: 是否进行人脸矫正
+ """
+
+ input_image = resize_image_esp(input_image, 2000) # 将输入图片压缩到最大边长为2000
+ # cv2.imwrite("./temp_input_image.jpg", input_image)
+ origin_png_image = get_modnet_matting(input_image, checkpoint_path)
+ # cv2.imwrite("./test_image/origin_png_image.png", origin_png_image)
+ _, _, _, a = cv2.split(origin_png_image)
+ width_length_ratio = size[0]/size[1] # 长宽比
+ rotation = aliyun_face_detect_api("./temp_input_image.jpg")
+
+ # 如果旋转角过小,则不进行矫正
+ if abs(rotation) < 0.025:
+ align=False
+
+ if align:
+ print("开始align")
+ if rotation > 0:
+ rotation_flag = 0 # 逆时针旋转
+ else:
+ rotation_flag = 1 # 顺时针旋转
+ width, height, channels = input_image.shape
+
+ p_list = [(0, 0), (0, height), (width, 0), (width, height)]
+ rotate_list = []
+ rotate = cv2.getRotationMatrix2D((height * 0.5, width * 0.5), rotation, 0.75)
+ for p in p_list:
+ p_m = np.array([[p[1]], [p[0]], [1]])
+ rotate_list.append(np.dot(rotate[:2], p_m))
+ # print("旋转角的四个顶点", rotate_list)
+
+ input_image = cv2.warpAffine(input_image, rotate, (height, width), flags=cv2.INTER_AREA)
+ new_a = cv2.warpAffine(a, rotate, (height, width), flags=cv2.INTER_AREA)
+ # cv2.imwrite("./test_image/rotation.jpg", input_image)
+
+ # ===================== 开始人脸检测 ===================== #
+ faces, _ = face_detect_mtcnn(input_image, filter=True)
+ face_num = len(faces)
+ print("检测到的人脸数目为:", len(faces))
+ # ===================== 人脸检测结束 ===================== #
+
+ if face_num == 1:
+ face_rect = faces[0]
+ x, y = face_rect[0], face_rect[1]
+ w, h = face_rect[2] - x + 1, face_rect[3] - y + 1
+ elif face_num == 0:
+ print("无人脸,返回0!!!")
+ return 0
+ else:
+ print("太多人脸,返回2!!!")
+ return 2
+
+ d1, d2, d3, d4 = rotate_list[0], rotate_list[1], rotate_list[2], rotate_list[3]
+ d1 = Coordinate(int(d1[0]), int(d1[1]))
+ d2 = Coordinate(int(d2[0]), int(d2[1]))
+ d3 = Coordinate(int(d3[0]), int(d3[1]))
+ d4 = Coordinate(int(d4[0]), int(d4[1]))
+ print("d1:", d1)
+ print("d2:", d2)
+ print("d3:", d3)
+ print("d4:", d4)
+
+ background_height, status_height, background_width, status_width,\
+ height_change, width_change, height_change2, width_change2 = get_max(width, height, d1, d2, d3, d4, rotation_flag)
+
+ print("background_height:", background_height)
+ print("background_width:", background_width)
+ print("status_height:", status_height)
+ print("status_width:", status_width)
+ print("height_change:", height_change)
+ print("width_change:", width_change)
+
+ background = np.zeros([background_height, background_width, 3])
+ background_a = np.zeros([background_height, background_width])
+
+ background[height_change:height_change+width, width_change:width_change+height] = input_image
+ background_a[height_change:height_change+width, width_change:width_change+height] = new_a
+ d1 = Coordinate(int(d1.x)-width_change2, int(d1.y)-height_change2)
+ d2 = Coordinate(int(d2.x)-width_change2, int(d2.y)-height_change2)
+ d3 = Coordinate(int(d3.x)-width_change2, int(d3.y)-height_change2)
+ d4 = Coordinate(int(d4.x)-width_change2, int(d4.y)-height_change2)
+ print("d1:", d1)
+ print("d2:", d2)
+ print("d3:", d3)
+ print("d4:", d4)
+
+ if rotation_flag:
+ f13 = LinearFunction_TwoDots(d1, d3)
+ d5 = Coordinate(max(0, d3.x), f13.forward_x(max(0, d3.x)))
+ print("d5:", d5)
+
+ f42 = LinearFunction_TwoDots(d4, d2)
+ d7 = Coordinate(f42.forward_y(d5.y), d5.y)
+ print("d7", d7)
+
+ background_draw = draw_picture_dots(background, dots=[(d1.x, d1.y),
+ (d2.x, d2.y),
+ (d3.x, d3.y),
+ (d4.x, d4.y),
+ (d5.x, d5.y),
+ (d7.x, d7.y)])
+ # cv2.imwrite("./test_image/rotation_background.jpg", background_draw)
+
+ if xd7.x:
+ print("return 6")
+ return 6
+
+ background_output = background[:int(d5.y), int(d5.x):int(d7.x)]
+ background_a_output = background_a[:int(d5.y), int(d5.x):int(d7.x)]
+ # cv2.imwrite("./test_image/rotation_background_cut.jpg", background_output)
+
+ else:
+ f34 = LinearFunction_TwoDots(d3, d4)
+ d5 = Coordinate(min(width_change+height, d4.x), f34.forward_x(min(width_change+height, d4.x)))
+ print("d5:", d5)
+
+ f13 = LinearFunction_TwoDots(d1, d3)
+ d7 = Coordinate(f13.forward_y(d5.y), d5.y)
+ print("d7", d7)
+
+ if xd5.x:
+ print("return 6")
+ return 6
+
+ background_draw = draw_picture_dots(background, dots=[(d1.x, d1.y),
+ (d2.x, d2.y),
+ (d3.x, d3.y),
+ (d4.x, d4.y),
+ (d5.x, d5.y),
+ (d7.x, d7.y)])
+
+ # cv2.imwrite("./test_image/rotation_background.jpg", background_draw)
+
+ background_output = background[:int(d5.y), int(d7.x):int(d5.x)]
+ background_a_output = background_a[:int(d5.y), int(d7.x):int(d5.x)]
+ # cv2.imwrite("./test_image/rotation_background_cut.jpg", background_output)
+
+ input_image = np.uint8(background_output)
+ b, g, r = cv2.split(input_image)
+ origin_png_image = cv2.merge((b, g, r, np.uint8(background_a_output)))
+
+ # ===================== 开始人脸检测 ===================== #
+ width, length = input_image.shape[0], input_image.shape[1]
+ faces, _ = face_detect_mtcnn(input_image, filter=True)
+ face_num = len(faces)
+ print("检测到的人脸数目为:", len(faces))
+ # ===================== 人脸检测结束 ===================== #
+
+ if face_num == 1:
+
+ face_rect = faces[0]
+ x, y = face_rect[0], face_rect[1]
+ w, h = face_rect[2] - x + 1, face_rect[3] - y + 1
+
+ # x,y,w,h代表人脸框的左上角坐标和宽高
+
+ # 检测头顶下方空隙,如果头顶下方空隙过小,则拒绝
+ if y+h >= 0.85*width:
+ # print("face bottom too short! y+h={} width={}".format(y+h, width))
+ print("在人脸下方的空间太少,返回值3!!!")
+ return 3
+
+ # 第一次裁剪
+ # 确定裁剪的基本参数
+ face_center = (x+w/2, y+h/2) # 面部中心坐标
+ face_measure = w*h # 面部面积
+ crop_measure = face_measure/head_measure_ratio # 裁剪框面积:为面部面积的5倍
+ resize_ratio = crop_measure/(size[0]*size[1]) # 裁剪框缩放率(以输入尺寸为标准)
+ resize_ratio_single = math.sqrt(resize_ratio)
+ crop_size = (int(size[0]*resize_ratio_single), int(size[1]*resize_ratio_single)) # 裁剪框大小
+ print("crop_size:", crop_size)
+
+ # 裁剪规则:x1和y1为裁剪的起始坐标,x2和y2为裁剪的最终坐标
+ # y的确定由人脸中心在照片的45%位置决定
+ x1 = int(face_center[0]-crop_size[1]/2)
+ y1 = int(face_center[1]-crop_size[0]*head_height_ratio)
+ y2 = y1+crop_size[0]
+ x2 = x1+crop_size[1]
+
+ # 对原图进行抠图,得到透明图img
+ print("开始进行抠图")
+ # origin_png_image => 对原图的抠图结果
+ # cut_image => 第一次裁剪后的图片
+ # result_image => 第二次裁剪后的图片/输出图片
+ # origin_png_image = get_human_matting(input_image, get_file_dir(checkpoint_path))
+
+ cut_image = IDphotos_cut(x1, y1, x2, y2, origin_png_image)
+ # cv2.imwrite("./temp.png", cut_image)
+ # 对裁剪得到的图片temp_path,我们将image=temp_path resize为裁剪框大小,这样方便进行后续计算
+ cut_image = cv2.resize(cut_image, (crop_size[1], crop_size[0]))
+ y_top, y_bottom, x_left, x_right = get_box_pro(cut_image, model=2) # 得到透明图中人像的上下左右距离信息
+ print("y_top:{}, y_bottom:{}, x_left:{}, x_right:{}".format(y_top, y_bottom, x_left, x_right))
+
+ # 判断左右是否有间隙
+ if x_left > 0 or x_right > 0:
+ # 左右有空隙, 我们需要减掉它
+ print("左右有空隙!")
+ status_left_right = 1
+ cut_value_top = int(((x_left + x_right) * width_length_ratio) / 2) # 减去左右,为了保持比例,上下也要相应减少cut_value_top
+ print("cut_value_top:", cut_value_top)
+
+ else:
+ # 左右没有空隙, 则不管
+ status_left_right = 0
+ cut_value_top = 0
+ print("cut_value_top:", cut_value_top)
+
+ # 检测人头顶与照片的顶部是否在合适的距离内
+ print("y_top:", y_top)
+ status_top, move_value = detect_distance(y_top-int((x_left+x_right)*width_length_ratio/2), crop_size[0])
+ # status=0 => 距离合适, 无需移动
+ # status=1 => 距离过大, 人像应向上移动
+ # status=2 => 距离过小, 人像应向下移动
+ # move_value => 上下移动的距离
+ print("status_top:", status_top)
+ print("move_value:", move_value)
+
+ # 开始第二次裁剪
+ if status_top == 0:
+ # 如果上下距离合适,则无需移动
+ if status_left_right:
+ # 如果左右有空隙,则需要用到cut_value_top
+ result_image = IDphotos_cut(x1 + x_left,
+ y1 + cut_value_top,
+ x2 - x_right,
+ y2 - cut_value_top,
+ origin_png_image)
+
+ else:
+ # 如果左右没有空隙,那么则无需改动
+ result_image = cut_image
+
+ elif status_top == 1:
+ # 如果头顶离照片顶部距离过大,需要人像向上移动,则需要用到move_value
+ if status_left_right:
+ # 左右存在距离,则需要cut_value_top
+ result_image = IDphotos_cut(x1 + x_left,
+ y1 + cut_value_top + move_value,
+ x2 - x_right,
+ y2 - cut_value_top + move_value,
+ origin_png_image)
+ else:
+ # 左右不存在距离
+ result_image = IDphotos_cut(x1 + x_left,
+ y1 + move_value,
+ x2 - x_right,
+ y2 + move_value,
+ origin_png_image)
+
+ else:
+ # 如果头顶离照片顶部距离过小,则需要人像向下移动,则需要用到move_value
+ if status_left_right:
+ # 左右存在距离,则需要cut_value_top
+ result_image = IDphotos_cut(x1 + x_left,
+ y1 + cut_value_top - move_value,
+ x2 - x_right,
+ y2 - cut_value_top - move_value,
+ origin_png_image)
+ else:
+ # 左右不存在距离
+ result_image = IDphotos_cut(x1 + x_left,
+ y1 - move_value,
+ x2 - x_right,
+ y2 - move_value,
+ origin_png_image)
+
+ # 调节头顶位置————防止底部空一块儿
+ result_image = move(result_image)
+
+ # 高清保存
+ # cv2.imwrite(output_path.replace(".png", "_HD.png"), result_image)
+
+ # 普清保存
+ result_image2 = cv2.resize(result_image, (size[1], size[0]), interpolation=cv2.INTER_AREA)
+ # cv2.imwrite("./output_image.png", result_image)
+ print("完成.返回1")
+ return 1, result_image, result_image2
+
+ elif face_num == 0:
+ print("无人脸,返回0!!!")
+ return 0
+ else:
+ print("太多人脸,返回2!!!")
+ return 2
+
+
+if __name__ == "__main__":
+ with open("./Setting.json") as json_file:
+ # file_list = get_filedir_filelist("./input_image")
+ setting = json.load(json_file)
+ filedir = "../IDPhotos/input_image/linzeyi.jpg"
+ file_list = [filedir]
+ for filedir in file_list:
+ print(filedir)
+ # try:
+ status_id, result_image, result_image2 = IDphotos_create(cv2.imread(filedir),
+ size=(setting["size_height"], setting["size_width"]),
+ head_height_ratio=setting["head_height_ratio"],
+ head_measure_ratio=setting["head_measure_ratio"],
+ checkpoint_path=setting["checkpoint_path"],
+ align=True)
+ # cv2.imwrite("./result_image.png", result_image)
+
+ if status_id == 1:
+ print("处理完毕!")
+ elif status_id == 0:
+ print("没有人脸!请重新上传有人脸的照片.")
+ elif status_id == 2:
+ print("人脸不只一张!请重新上传单独人脸的照片.")
+ elif status_id == 3:
+ print("人头下方空隙不足!")
+ elif status_id == 4:
+ print("此照片不能制作该规格!")
+ # except Exception as e:
+ # print(e)
\ No newline at end of file
diff --git a/hivisionai/hycv/idphotoTool/move_image.py b/hivisionai/hycv/idphotoTool/move_image.py
new file mode 100644
index 0000000000000000000000000000000000000000..a9f1f8a18955aaa95fdecf0f1ab6bc940c6c61c3
--- /dev/null
+++ b/hivisionai/hycv/idphotoTool/move_image.py
@@ -0,0 +1,121 @@
+"""
+有一些png图像下部也会有一些透明的区域,使得图像无法对其底部边框
+本程序实现移动图像,使其下部与png图像实际大小相对齐
+"""
+import os
+import cv2
+import numpy as np
+from ..utils import get_box_pro
+
+path_pre = os.path.join(os.getcwd(), 'pre')
+path_final = os.path.join(os.getcwd(), 'final')
+
+
+def merge(boxes):
+ """
+ 生成的边框可能不止只有一个,需要将边框合并
+ """
+ x, y, h, w = boxes[0]
+ # x和y应该是整个boxes里面最小的值
+ if len(boxes) > 1:
+ for tmp in boxes:
+ x_tmp, y_tmp, h_tmp, w_tmp = tmp
+ if x > x_tmp:
+ x_max = x_tmp + w_tmp if x_tmp + w_tmp > x + w else x + w
+ x = x_tmp
+ w = x_max - x
+ if y > y_tmp:
+ y_max = y_tmp + h_tmp if y_tmp + h_tmp > y + h else y + h
+ y = y_tmp
+ h = y_max - y
+ return tuple((x, y, h, w))
+
+
+def get_box(png_img):
+ """
+ 获取矩形边框最终返回一个元组(x,y,h,w),分别对应矩形左上角的坐标和矩形的高和宽
+ """
+ r, g, b , a = cv2.split(png_img)
+ gray_img = a
+ th, binary = cv2.threshold(gray_img, 127 , 255, cv2.THRESH_BINARY) # 二值化
+ # cv2.imshow("name", binary)
+ # cv2.waitKey(0)
+ contours, hierarchy = cv2.findContours(binary, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE) # 得到轮廓列表contours
+ bounding_boxes = merge([cv2.boundingRect(cnt) for cnt in contours]) # 轮廓合并
+ # print(bounding_boxes)
+ return bounding_boxes
+
+def get_box_2(png_img):
+ """
+ 不用opencv内置算法生成矩形了,改用自己的算法(for循环)
+ """
+ _, _, _, a = cv2.split(png_img)
+ _, a = cv2.threshold(a, 127, 255, cv2.THRESH_BINARY)
+ # 将r,g,b通道丢弃,只留下透明度通道
+ # cv2.imshow("name", a)
+ # cv2.waitKey(0)
+ # 在透明度矩阵中,0代表完全透明
+ height,width=a.shape # 高和宽
+ f=0
+ tmp1 = 0
+
+ """
+ 获取上下
+ """
+ for tmp1 in range(0,height):
+ tmp_a_high= a[tmp1:tmp1+1,:][0]
+ for tmp2 in range(width):
+ # a = tmp_a_low[tmp2]
+ if tmp_a_high[tmp2]!=0:
+ f=1
+ if f == 1:
+ break
+ delta_y_high = tmp1 + 1
+ f = 0
+ for tmp1 in range(height,-1, -1):
+ tmp_a_low= a[tmp1-1:tmp1+1,:][0]
+ for tmp2 in range(width):
+ # a = tmp_a_low[tmp2]
+ if tmp_a_low[tmp2]!=0:
+ f=1
+ if f == 1:
+ break
+ delta_y_bottom = height - tmp1 + 3
+ """
+ 获取左右
+ """
+ f = 0
+ for tmp1 in range(width):
+ tmp_a_left = a[:, tmp1:tmp1+1]
+ for tmp2 in range(height):
+ if tmp_a_left[tmp2] != 0:
+ f = 1
+ if f==1:
+ break
+ delta_x_left = tmp1 + 1
+ f = 0
+ for tmp1 in range(width, -1, -1):
+ tmp_a_left = a[:, tmp1-1:tmp1]
+ for tmp2 in range(height):
+ if tmp_a_left[tmp2] != 0:
+ f = 1
+ if f==1:
+ break
+ delta_x_right = width - tmp1 + 1
+ return delta_y_high, delta_y_bottom, delta_x_left, delta_x_right
+
+def move(input_image):
+ """
+ 裁剪主函数,输入一张png图像,该图像周围是透明的
+ """
+ png_img = input_image # 获取图像
+
+ height, width, channels = png_img.shape # 高y、宽x
+ y_low,y_high, _, _ = get_box_pro(png_img, model=2) # for循环
+ base = np.zeros((y_high, width, channels),dtype=np.uint8) # for循环
+ png_img = png_img[0:height - y_high, :, :] # for循环
+ png_img = np.concatenate((base, png_img), axis=0)
+ return png_img
+
+if __name__ == "__main__":
+ pass
diff --git a/hivisionai/hycv/idphotoTool/neck_processing.py b/hivisionai/hycv/idphotoTool/neck_processing.py
new file mode 100644
index 0000000000000000000000000000000000000000..b26cdf76809fc19b4bf736df9f5627801f097c16
--- /dev/null
+++ b/hivisionai/hycv/idphotoTool/neck_processing.py
@@ -0,0 +1,320 @@
+import cv2
+import numpy as np
+from ..utils import get_box_pro
+from ..vision import cover_image
+
+
+def transformationNeck(image:np.ndarray, cutNeckHeight:int, neckBelow:int,
+ toHeight:int,per_to_side:float=0.75) -> np.ndarray:
+ """
+ 脖子扩充算法, 其实需要输入的只是脖子扣出来的部分以及需要被扩充的高度/需要被扩充成的高度.
+ """
+ height, width, channels = image.shape
+ _, _, _, a = cv2.split(image) # 这应该是一个四通道的图像
+ ret, a_thresh = cv2.threshold(a, 20, 255, cv2.THRESH_BINARY) # 将透明图层二值化
+ def locate_width(image_:np.ndarray, y_:int, mode, left_or_right:int=None):
+ # 从y=y这个水平线上寻找两边的非零点
+ # 增加left_or_right的原因在于为下面check_jaw服务
+ if mode==1: # 左往右
+ x_ = 0
+ if left_or_right is None:
+ left_or_right = 0
+ for x_ in range(left_or_right, width):
+ if image_[y_][x_] != 0:
+ break
+ else: # 右往左
+ x_ = width
+ if left_or_right is None:
+ left_or_right = width - 1
+ for x_ in range(left_or_right, -1, -1):
+ if image_[y_][x_] != 0:
+ break
+ return x_
+ def check_jaw(image_:np.ndarray, left_, right_):
+ """
+ 检查选择的点是否与截到下巴,如果截到了,就往下平移一个单位
+ """
+ f= True # True代表没截到下巴
+ # [x, y]
+ for x_cell in range(left_[0] + 1, right_[0]):
+ if image_[left_[1]][x_cell] == 0:
+ f = False
+ break
+ if f is True:
+ return left_, right_
+ else:
+ y_ = left_[1] + 2
+ x_left_ = locate_width(image_, y_, mode=1, left_or_right=left_[0])
+ x_right_ = locate_width(image_, y_, mode=2, left_or_right=right_[0])
+ left_, right_ = check_jaw(image_, [x_left_, y_], [x_right_, y_])
+ return left_, right_
+ x_left = locate_width(image_=a_thresh, mode=1, y_=cutNeckHeight)
+ x_right = locate_width(image_=a_thresh, mode=2, y_=cutNeckHeight)
+ # 在这里我们取消了对下巴的检查,原因在于输入的imageHeight并不能改变
+ # cell_left_above, cell_right_above = check_jaw(a_thresh, [x_left, imageHeight], [x_right, imageHeight])
+ cell_left_above, cell_right_above = [x_left, cutNeckHeight], [x_right, cutNeckHeight]
+ toWidth = x_right - x_left # 矩形宽
+ # 此时我们寻找到了脖子的"宽出来的"两个点,这两个点作为上面的两个点, 接下来寻找下面的两个点
+ if per_to_side >1:
+ assert ValueError("per_to_side 必须小于1!")
+ y_below = int((neckBelow - cutNeckHeight) * per_to_side + cutNeckHeight) # 定位y轴坐标
+ cell_left_below = [locate_width(a_thresh, y_=y_below, mode=1), y_below]
+ cell_right_bellow = [locate_width(a_thresh, y_=y_below, mode=2), y_below]
+ # 四个点全齐,开始透视变换
+ # 需要变换的四个点为 cell_left_above, cell_right_above, cell_left_below, cell_right_bellow
+ rect = np.array([cell_left_above, cell_right_above, cell_left_below, cell_right_bellow],
+ dtype='float32')
+ # 变化后的坐标点
+ dst = np.array([[0, 0], [toWidth, 0], [0 , toHeight], [toWidth, toHeight]],
+ dtype='float32')
+ M = cv2.getPerspectiveTransform(rect, dst)
+ warped = cv2.warpPerspective(image, M, (toWidth, toHeight))
+ # 将变换后的图像覆盖到原图上
+ final = cover_image(image=warped, background=image, mode=3, x=cell_left_above[0], y=cell_left_above[1])
+ return final
+
+
+def transformationNeck2(image:np.ndarray, per_to_side:float=0.8)->np.ndarray:
+ """
+ 透视变换脖子函数,输入图像和四个点(矩形框)
+ 矩形框内的图像可能是不完整的(边角有透明区域)
+ 我们将根据透视变换将矩形框内的图像拉伸成和矩形框一样的形状.
+ 算法分为几个步骤: 选择脖子的四个点 -> 选定这四个点拉伸后的坐标 -> 透视变换 -> 覆盖原图
+ """
+ b, g, r, a = cv2.split(image) # 这应该是一个四通道的图像
+ height, width = a.shape
+ def locate_side(image_:np.ndarray, x_:int, y_max:int) -> int:
+ # 寻找x=y, 且 y <= y_max 上从下往上第一个非0的点,如果没找到就返回0
+ y_ = 0
+ for y_ in range(y_max - 1, -1, -1):
+ if image_[y_][x_] != 0:
+ break
+ return y_
+ def locate_width(image_:np.ndarray, y_:int, mode, left_or_right:int=None):
+ # 从y=y这个水平线上寻找两边的非零点
+ # 增加left_or_right的原因在于为下面check_jaw服务
+ if mode==1: # 左往右
+ x_ = 0
+ if left_or_right is None:
+ left_or_right = 0
+ for x_ in range(left_or_right, width):
+ if image_[y_][x_] != 0:
+ break
+ else: # 右往左
+ x_ = width
+ if left_or_right is None:
+ left_or_right = width - 1
+ for x_ in range(left_or_right, -1, -1):
+ if image_[y_][x_] != 0:
+ break
+ return x_
+ def check_jaw(image_:np.ndarray, left_, right_):
+ """
+ 检查选择的点是否与截到下巴,如果截到了,就往下平移一个单位
+ """
+ f= True # True代表没截到下巴
+ # [x, y]
+ for x_cell in range(left_[0] + 1, right_[0]):
+ if image_[left_[1]][x_cell] == 0:
+ f = False
+ break
+ if f is True:
+ return left_, right_
+ else:
+ y_ = left_[1] + 2
+ x_left_ = locate_width(image_, y_, mode=1, left_or_right=left_[0])
+ x_right_ = locate_width(image_, y_, mode=2, left_or_right=right_[0])
+ left_, right_ = check_jaw(image_, [x_left_, y_], [x_right_, y_])
+ return left_, right_
+ # 选择脖子的四个点,核心在于选择上面的两个点,这两个点的确定的位置应该是"宽出来的"两个点
+ _, _ ,_, a = cv2.split(image) # 这应该是一个四通道的图像
+ ret,a_thresh = cv2.threshold(a,127,255,cv2.THRESH_BINARY)
+ y_high, y_low, x_left, x_right = get_box_pro(image=image, model=1) # 直接返回矩阵信息
+ y_left_side = locate_side(image_=a_thresh, x_=x_left, y_max=y_low) # 左边的点的y轴坐标
+ y_right_side = locate_side(image_=a_thresh, x_=x_right, y_max=y_low) # 右边的点的y轴坐标
+ y = min(y_left_side, y_right_side) # 将两点的坐标保持相同
+ cell_left_above, cell_right_above = check_jaw(a_thresh,[x_left, y], [x_right, y])
+ x_left, x_right = cell_left_above[0], cell_right_above[0]
+ # 此时我们寻找到了脖子的"宽出来的"两个点,这两个点作为上面的两个点, 接下来寻找下面的两个点
+ if per_to_side >1:
+ assert ValueError("per_to_side 必须小于1!")
+ # 在后面的透视变换中我会把它拉成矩形, 在这里我先获取四个点的高和宽
+ height_ = 100 # 这个值应该是个变化的值,与拉伸的长度有关,但是现在先规定为150
+ width_ = x_right - x_left # 其实也就是 cell_right_above[1] - cell_left_above[1]
+ y = int((y_low - y)*per_to_side + y) # 定位y轴坐标
+ cell_left_below, cell_right_bellow = ([locate_width(a_thresh, y_=y, mode=1), y], [locate_width(a_thresh, y_=y, mode=2), y])
+ # 四个点全齐,开始透视变换
+ # 寻找透视变换后的四个点,只需要变换below的两个点即可
+ # cell_left_below_final, cell_right_bellow_final = ([cell_left_above[1], y_low], [cell_right_above[1], y_low])
+ # 需要变换的四个点为 cell_left_above, cell_right_above, cell_left_below, cell_right_bellow
+ rect = np.array([cell_left_above, cell_right_above, cell_left_below, cell_right_bellow],
+ dtype='float32')
+ # 变化后的坐标点
+ dst = np.array([[0, 0], [width_, 0], [0 , height_], [width_, height_]],
+ dtype='float32')
+ # 计算变换矩阵
+ M = cv2.getPerspectiveTransform(rect, dst)
+ warped = cv2.warpPerspective(image, M, (width_, height_))
+
+ # a = cv2.erode(a, (10, 10))
+ # image = cv2.merge((r, g, b, a))
+ final = cover_image(image=warped, background=image, mode=3, x=cell_left_above[0], y=cell_left_above[1])
+ # tmp = np.zeros(image.shape)
+ # final = cover_image(image=warped, background=tmp, mode=3, x=cell_left_above[0], y=cell_left_above[1])
+ # final = cover_image(image=image, background=final, mode=3, x=0, y=0)
+ return final
+
+
+def bestJunctionCheck(image:np.ndarray, offset:int, stepSize:int=2):
+ """
+ 最优点检测算算法输入一张脖子图片(无论这张图片是否已经被二值化,我都认为没有被二值化),输出一个小数(脖子最上方与衔接点位置/脖子图像长度)
+ 与beta版不同的是它新增了一个阈值限定内容.
+ 对于脖子而言,我我们首先可以定位到上面的部分,然后根据上面的这个点向下进行遍历检测.
+ 与beta版类似,我们使用一个stepSize来用作斜率的检测
+ 但是对于遍历检测而言,与beta版不同的是,我们需要对遍历的地方进行一定的限制.
+ 限制的标准是,如果当前遍历的点的横坐标和起始点横坐标的插值超过了某个阈值,则认为是越界.
+ """
+ point_k = 1
+ _, _, _, a = cv2.split(image) # 这应该是一个四通道的图像
+ height, width = a.shape
+ ret, a_thresh = cv2.threshold(a, 127, 255, cv2.THRESH_BINARY) # 将透明图层二值化
+ # 直接返回脖子的位置信息, 修正系数为0, get_box_pro内部也封装了二值化,所以直接输入原图
+ y_high, y_low, _, _ = get_box_pro(image=image, model=1, correction_factor=0)
+ # 真正有用的只有上下y轴的两个值...
+ # 首先当然是确定起始点的位置,我们用同样的scan扫描函数进行行遍历.
+ def scan(y_:int, max_num:int=2):
+ num = 0
+ # 设定两个值,分别代表脖子的左边和右边
+ left = False
+ right = False
+ for x_ in range(width):
+ if a_thresh[y_][x_] != 0:
+ # 检测左边
+ if x_ < width // 2 and left is False:
+ num += 1
+ left = True
+ # 检测右边
+ elif x_ > width // 2 and right is False:
+ num += 1
+ right = True
+ return True if num >= max_num else False
+ def locate_neck_above():
+ """
+ 定位脖子的尖尖脚
+ """
+ # y_high就是脖子的最高点
+ for y_ in range(y_high, height):
+ if scan(y_):
+ return y_
+ y_start = locate_neck_above() # 得到遍历的初始高度
+ if y_low - y_start < stepSize: assert ValueError("脖子太小!")
+ # 然后获取一下初始的坐标点
+ x_left, x_right = 0, width
+ for x_left_ in range(0, width):
+ if a_thresh[y_start][x_left_] != 0:
+ x_left = x_left_
+ break
+ for x_right_ in range(width -1 , -1, -1):
+ if a_thresh[y_start][x_right_] != 0:
+ x_right = x_right_
+ break
+ # 接下来我定义两个生成器,首先是脖子轮廓(向下寻找的)生成器,每进行一次next,生成器会返回y+1的脖子轮廓点
+ def contoursGenerator(image_:np.ndarray, y_:int, mode):
+ """
+ 这会是一个生成器,用于生成脖子两边的轮廓
+ y_ 是启始点的y坐标,每一次寻找都会让y_+1
+ mode==1说明是找左边的边,即,image_[y_][x_] == 0 且image_[y_][x_ + 1] !=0 时跳出;
+ 否则 当image_[y_][x_] != 0 时, x_ - 1; 当image_[y_][x_] == 0 且 image_[y_][x_ + 1] ==0 时x_ + 1
+ mode==2说明是找右边的边,即,image_[y_][x_] == 0 且image_[y_][x_ - 1] !=0 时跳出
+ 否则 当image_[y_][x_] != 0 时, x_ + 1; 当image_[y_][x_] == 0 且 image_[y_][x_ - 1] ==0 时x_ - 1
+ """
+ y_ += 1
+ try:
+ if mode == 1:
+ x_ = 0
+ while 0 <= y_ < height and 0 <= x_ < width:
+ while image_[y_][x_] != 0 and x_ >= 0: x_ -= 1
+ # 这里其实会有bug,不过可以不管
+ while x_ < width and image_[y_][x_] == 0 and image_[y_][x_ + 1] == 0: x_ += 1
+ yield [y_, x_]
+ y_ += 1
+ elif mode == 2:
+ x_ = width-1
+ while 0 <= y_ < height and 0 <= x_ < width:
+ while x_ < width and image_[y_][x_] != 0: x_ += 1
+ while x_ >= 0 and image_[y_][x_] == 0 and image_[y_][x_ - 1] == 0: x_ -= 1
+ yield [y_, x_]
+ y_ += 1
+ # 当处理失败则返回False
+ except IndexError:
+ yield False
+ # 然后是斜率生成器,这个生成器依赖子轮廓生成器,每一次生成轮廓后会计算斜率,另一个点的选取和stepSize有关
+ def kGenerator(image_: np.ndarray, mode):
+ """
+ 导数生成器,用来生成每一个点对应的导数
+ """
+ y_ = y_start
+ # 对起始点建立一个生成器, mode=1时是左边轮廓,mode=2时是右边轮廓
+ c_generator = contoursGenerator(image_=image_, y_=y_, mode=mode)
+ for cell in c_generator:
+ # 寻找距离当前cell距离为stepSize的轮廓点
+ kc = contoursGenerator(image_=image_, y_=cell[0] + stepSize, mode=mode)
+ kCell = next(kc)
+ if kCell is False:
+ # 寻找失败
+ yield False, False
+ else:
+ # 寻找成功,返回当坐标点和斜率值
+ # 对于左边而言,斜率必然是前一个点的坐标减去后一个点的坐标
+ # 对于右边而言,斜率必然是后一个点的坐标减去前一个点的坐标
+ k = (cell[1] - kCell[1]) / stepSize if mode == 1 else (kCell[1] - cell[1]) / stepSize
+ yield k, cell
+ # 接着开始写寻找算法,需要注意的是我们是分两边选择的
+ def findPt(image_:np.ndarray, mode):
+ x_base = x_left if mode == 1 else x_right
+ k_generator = kGenerator(image_=image_, mode=mode)
+ k, cell = k_generator.__next__()
+ if k is False:
+ raise ValueError("无法找到拐点!")
+ k_next, cell_next = k_generator.__next__()
+ while k_next is not False:
+ cell = cell_next
+ # if cell[1] > x_base and mode == 2:
+ # x_base = cell[1]
+ # elif cell[1] < x_base and mode == 1:
+ # x_base = cell[1]
+ # 跳出循环的方式一:斜率超过了某个值
+ if k_next > point_k:
+ print("K out")
+ break
+ # 跳出循环的方式二:超出阈值
+ elif abs(cell[1] - x_base) > offset:
+ print("O out")
+ break
+ k_next, cell_next = k_generator.__next__()
+ if abs(cell[1] - x_base) > offset:
+ cell[0] = cell[0] - offset - 1
+ return cell[0]
+ # 先找左边的拐点:
+ pointY_left = findPt(image_=a_thresh, mode=1)
+ # 再找右边的拐点:
+ pointY_right = findPt(image_=a_thresh, mode=2)
+ point = min(pointY_right, pointY_left)
+ per = (point - y_high) / (y_low - y_high)
+ # pointX_left = next(contoursGenerator(image_=a_thresh, y_= point- 1, mode=1))[1]
+ # pointX_right = next(contoursGenerator(image_=a_thresh, y_=point - 1, mode=2))[1]
+ # return [pointX_left, point], [pointX_right, point]
+ return per
+
+
+
+
+
+if __name__ == "__main__":
+ img = cv2.imread("./neck_temp/neck_image6.png", cv2.IMREAD_UNCHANGED)
+ new = transformationNeck(img)
+ cv2.imwrite("./1.png", new)
+
+
+
+
diff --git a/hivisionai/hycv/matting_tools.py b/hivisionai/hycv/matting_tools.py
new file mode 100644
index 0000000000000000000000000000000000000000..647c351451594e04ca0493f121bfc99ee95ba9e7
--- /dev/null
+++ b/hivisionai/hycv/matting_tools.py
@@ -0,0 +1,39 @@
+import numpy as np
+from PIL import Image
+import cv2
+import onnxruntime
+from .tensor2numpy import NNormalize, NTo_Tensor, NUnsqueeze
+from .vision import image2bgr
+
+
+def read_modnet_image(input_image, ref_size=512):
+ im = Image.fromarray(np.uint8(input_image))
+ width, length = im.size[0], im.size[1]
+ im = np.asarray(im)
+ im = image2bgr(im)
+ im = cv2.resize(im, (ref_size, ref_size), interpolation=cv2.INTER_AREA)
+ im = NNormalize(im, mean=np.array([0.5, 0.5, 0.5]), std=np.array([0.5, 0.5, 0.5]))
+ im = NUnsqueeze(NTo_Tensor(im))
+
+ return im, width, length
+
+
+def get_modnet_matting(input_image, checkpoint_path="./test.onnx", ref_size=512):
+
+ print("checkpoint_path:", checkpoint_path)
+ sess = onnxruntime.InferenceSession(checkpoint_path)
+
+ input_name = sess.get_inputs()[0].name
+ output_name = sess.get_outputs()[0].name
+
+ im, width, length = read_modnet_image(input_image=input_image, ref_size=ref_size)
+
+ matte = sess.run([output_name], {input_name: im})
+ matte = (matte[0] * 255).astype('uint8')
+ matte = np.squeeze(matte)
+ mask = cv2.resize(matte, (width, length), interpolation=cv2.INTER_AREA)
+ b, g, r = cv2.split(np.uint8(input_image))
+
+ output_image = cv2.merge((b, g, r, mask))
+
+ return output_image
\ No newline at end of file
diff --git a/hivisionai/hycv/mtcnn_onnx/__init__.py b/hivisionai/hycv/mtcnn_onnx/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..617ba38c34b1801b2db2e0209b4e886c9d24c490
--- /dev/null
+++ b/hivisionai/hycv/mtcnn_onnx/__init__.py
@@ -0,0 +1,2 @@
+from .visualization_utils import show_bboxes
+from .detector import detect_faces
diff --git a/hivisionai/hycv/mtcnn_onnx/__pycache__/__init__.cpython-310.pyc b/hivisionai/hycv/mtcnn_onnx/__pycache__/__init__.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..4384d74ed4b623edf8c1623eda96e29a96b6e2e6
Binary files /dev/null and b/hivisionai/hycv/mtcnn_onnx/__pycache__/__init__.cpython-310.pyc differ
diff --git a/hivisionai/hycv/mtcnn_onnx/__pycache__/__init__.cpython-38.pyc b/hivisionai/hycv/mtcnn_onnx/__pycache__/__init__.cpython-38.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..9d2545b9f79b5661aba499207ee476cdb6c38574
Binary files /dev/null and b/hivisionai/hycv/mtcnn_onnx/__pycache__/__init__.cpython-38.pyc differ
diff --git a/hivisionai/hycv/mtcnn_onnx/__pycache__/box_utils.cpython-310.pyc b/hivisionai/hycv/mtcnn_onnx/__pycache__/box_utils.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..fffbde10833a6915a1672edb116a42a54bc004ff
Binary files /dev/null and b/hivisionai/hycv/mtcnn_onnx/__pycache__/box_utils.cpython-310.pyc differ
diff --git a/hivisionai/hycv/mtcnn_onnx/__pycache__/box_utils.cpython-38.pyc b/hivisionai/hycv/mtcnn_onnx/__pycache__/box_utils.cpython-38.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..2b6b545ca5d7ff20da136858fc17f5f18d9067b7
Binary files /dev/null and b/hivisionai/hycv/mtcnn_onnx/__pycache__/box_utils.cpython-38.pyc differ
diff --git a/hivisionai/hycv/mtcnn_onnx/__pycache__/detector.cpython-310.pyc b/hivisionai/hycv/mtcnn_onnx/__pycache__/detector.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..ae2b531c7c10c7e66a0f1bcb0efbc950bfa1bcdb
Binary files /dev/null and b/hivisionai/hycv/mtcnn_onnx/__pycache__/detector.cpython-310.pyc differ
diff --git a/hivisionai/hycv/mtcnn_onnx/__pycache__/detector.cpython-38.pyc b/hivisionai/hycv/mtcnn_onnx/__pycache__/detector.cpython-38.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..5030dea6267fa2bdbf5855e3bfb5596b4ee08ef0
Binary files /dev/null and b/hivisionai/hycv/mtcnn_onnx/__pycache__/detector.cpython-38.pyc differ
diff --git a/hivisionai/hycv/mtcnn_onnx/__pycache__/first_stage.cpython-310.pyc b/hivisionai/hycv/mtcnn_onnx/__pycache__/first_stage.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..a16ce7801b83f810f4ff5043385f064498e4e827
Binary files /dev/null and b/hivisionai/hycv/mtcnn_onnx/__pycache__/first_stage.cpython-310.pyc differ
diff --git a/hivisionai/hycv/mtcnn_onnx/__pycache__/first_stage.cpython-38.pyc b/hivisionai/hycv/mtcnn_onnx/__pycache__/first_stage.cpython-38.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..833ec384b2f5b12e004fd5aae84721df952bd590
Binary files /dev/null and b/hivisionai/hycv/mtcnn_onnx/__pycache__/first_stage.cpython-38.pyc differ
diff --git a/hivisionai/hycv/mtcnn_onnx/__pycache__/visualization_utils.cpython-310.pyc b/hivisionai/hycv/mtcnn_onnx/__pycache__/visualization_utils.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..0c7f10a81a69c4ae3941149ae5949201658d9161
Binary files /dev/null and b/hivisionai/hycv/mtcnn_onnx/__pycache__/visualization_utils.cpython-310.pyc differ
diff --git a/hivisionai/hycv/mtcnn_onnx/__pycache__/visualization_utils.cpython-38.pyc b/hivisionai/hycv/mtcnn_onnx/__pycache__/visualization_utils.cpython-38.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..8891a490123a307d1ff38e43b4b99d0ee8358a7b
Binary files /dev/null and b/hivisionai/hycv/mtcnn_onnx/__pycache__/visualization_utils.cpython-38.pyc differ
diff --git a/hivisionai/hycv/mtcnn_onnx/box_utils.py b/hivisionai/hycv/mtcnn_onnx/box_utils.py
new file mode 100644
index 0000000000000000000000000000000000000000..3557387fa71e919099b9c7afa3e034f4e90b25f3
--- /dev/null
+++ b/hivisionai/hycv/mtcnn_onnx/box_utils.py
@@ -0,0 +1,238 @@
+import numpy as np
+from PIL import Image
+
+
+def nms(boxes, overlap_threshold=0.5, mode='union'):
+ """Non-maximum suppression.
+
+ Arguments:
+ boxes: a float numpy array of shape [n, 5],
+ where each row is (xmin, ymin, xmax, ymax, score).
+ overlap_threshold: a float number.
+ mode: 'union' or 'min'.
+
+ Returns:
+ list with indices of the selected boxes
+ """
+
+ # if there are no boxes, return the empty list
+ if len(boxes) == 0:
+ return []
+
+ # list of picked indices
+ pick = []
+
+ # grab the coordinates of the bounding boxes
+ x1, y1, x2, y2, score = [boxes[:, i] for i in range(5)]
+
+ area = (x2 - x1 + 1.0)*(y2 - y1 + 1.0)
+ ids = np.argsort(score) # in increasing order
+
+ while len(ids) > 0:
+
+ # grab index of the largest value
+ last = len(ids) - 1
+ i = ids[last]
+ pick.append(i)
+
+ # compute intersections
+ # of the box with the largest score
+ # with the rest of boxes
+
+ # left top corner of intersection boxes
+ ix1 = np.maximum(x1[i], x1[ids[:last]])
+ iy1 = np.maximum(y1[i], y1[ids[:last]])
+
+ # right bottom corner of intersection boxes
+ ix2 = np.minimum(x2[i], x2[ids[:last]])
+ iy2 = np.minimum(y2[i], y2[ids[:last]])
+
+ # width and height of intersection boxes
+ w = np.maximum(0.0, ix2 - ix1 + 1.0)
+ h = np.maximum(0.0, iy2 - iy1 + 1.0)
+
+ # intersections' areas
+ inter = w * h
+ if mode == 'min':
+ overlap = inter/np.minimum(area[i], area[ids[:last]])
+ elif mode == 'union':
+ # intersection over union (IoU)
+ overlap = inter/(area[i] + area[ids[:last]] - inter)
+
+ # delete all boxes where overlap is too big
+ ids = np.delete(
+ ids,
+ np.concatenate([[last], np.where(overlap > overlap_threshold)[0]])
+ )
+
+ return pick
+
+
+def convert_to_square(bboxes):
+ """Convert bounding boxes to a square form.
+
+ Arguments:
+ bboxes: a float numpy array of shape [n, 5].
+
+ Returns:
+ a float numpy array of shape [n, 5],
+ squared bounding boxes.
+ """
+
+ square_bboxes = np.zeros_like(bboxes)
+ x1, y1, x2, y2 = [bboxes[:, i] for i in range(4)]
+ h = y2 - y1 + 1.0
+ w = x2 - x1 + 1.0
+ max_side = np.maximum(h, w)
+ square_bboxes[:, 0] = x1 + w*0.5 - max_side*0.5
+ square_bboxes[:, 1] = y1 + h*0.5 - max_side*0.5
+ square_bboxes[:, 2] = square_bboxes[:, 0] + max_side - 1.0
+ square_bboxes[:, 3] = square_bboxes[:, 1] + max_side - 1.0
+ return square_bboxes
+
+
+def calibrate_box(bboxes, offsets):
+ """Transform bounding boxes to be more like true bounding boxes.
+ 'offsets' is one of the outputs of the nets.
+
+ Arguments:
+ bboxes: a float numpy array of shape [n, 5].
+ offsets: a float numpy array of shape [n, 4].
+
+ Returns:
+ a float numpy array of shape [n, 5].
+ """
+ x1, y1, x2, y2 = [bboxes[:, i] for i in range(4)]
+ w = x2 - x1 + 1.0
+ h = y2 - y1 + 1.0
+ w = np.expand_dims(w, 1)
+ h = np.expand_dims(h, 1)
+
+ # this is what happening here:
+ # tx1, ty1, tx2, ty2 = [offsets[:, i] for i in range(4)]
+ # x1_true = x1 + tx1*w
+ # y1_true = y1 + ty1*h
+ # x2_true = x2 + tx2*w
+ # y2_true = y2 + ty2*h
+ # below is just more compact form of this
+
+ # are offsets always such that
+ # x1 < x2 and y1 < y2 ?
+
+ translation = np.hstack([w, h, w, h])*offsets
+ bboxes[:, 0:4] = bboxes[:, 0:4] + translation
+ return bboxes
+
+
+def get_image_boxes(bounding_boxes, img, size=24):
+ """Cut out boxes from the image.
+
+ Arguments:
+ bounding_boxes: a float numpy array of shape [n, 5].
+ img: an instance of PIL.Image.
+ size: an integer, size of cutouts.
+
+ Returns:
+ a float numpy array of shape [n, 3, size, size].
+ """
+
+ num_boxes = len(bounding_boxes)
+ width, height = img.size
+
+ [dy, edy, dx, edx, y, ey, x, ex, w, h] = correct_bboxes(bounding_boxes, width, height)
+ img_boxes = np.zeros((num_boxes, 3, size, size), 'float32')
+
+ for i in range(num_boxes):
+ img_box = np.zeros((h[i], w[i], 3), 'uint8')
+
+ img_array = np.asarray(img, 'uint8')
+ img_box[dy[i]:(edy[i] + 1), dx[i]:(edx[i] + 1), :] =\
+ img_array[y[i]:(ey[i] + 1), x[i]:(ex[i] + 1), :]
+
+ # resize
+ img_box = Image.fromarray(img_box)
+ img_box = img_box.resize((size, size), Image.BILINEAR)
+ img_box = np.asarray(img_box, 'float32')
+
+ img_boxes[i, :, :, :] = _preprocess(img_box)
+
+ return img_boxes
+
+
+def correct_bboxes(bboxes, width, height):
+ """Crop boxes that are too big and get coordinates
+ with respect to cutouts.
+
+ Arguments:
+ bboxes: a float numpy array of shape [n, 5],
+ where each row is (xmin, ymin, xmax, ymax, score).
+ width: a float number.
+ height: a float number.
+
+ Returns:
+ dy, dx, edy, edx: a int numpy arrays of shape [n],
+ coordinates of the boxes with respect to the cutouts.
+ y, x, ey, ex: a int numpy arrays of shape [n],
+ corrected ymin, xmin, ymax, xmax.
+ h, w: a int numpy arrays of shape [n],
+ just heights and widths of boxes.
+
+ in the following order:
+ [dy, edy, dx, edx, y, ey, x, ex, w, h].
+ """
+
+ x1, y1, x2, y2 = [bboxes[:, i] for i in range(4)]
+ w, h = x2 - x1 + 1.0, y2 - y1 + 1.0
+ num_boxes = bboxes.shape[0]
+
+ # 'e' stands for end
+ # (x, y) -> (ex, ey)
+ x, y, ex, ey = x1, y1, x2, y2
+
+ # we need to cut out a box from the image.
+ # (x, y, ex, ey) are corrected coordinates of the box
+ # in the image.
+ # (dx, dy, edx, edy) are coordinates of the box in the cutout
+ # from the image.
+ dx, dy = np.zeros((num_boxes,)), np.zeros((num_boxes,))
+ edx, edy = w.copy() - 1.0, h.copy() - 1.0
+
+ # if box's bottom right corner is too far right
+ ind = np.where(ex > width - 1.0)[0]
+ edx[ind] = w[ind] + width - 2.0 - ex[ind]
+ ex[ind] = width - 1.0
+
+ # if box's bottom right corner is too low
+ ind = np.where(ey > height - 1.0)[0]
+ edy[ind] = h[ind] + height - 2.0 - ey[ind]
+ ey[ind] = height - 1.0
+
+ # if box's top left corner is too far left
+ ind = np.where(x < 0.0)[0]
+ dx[ind] = 0.0 - x[ind]
+ x[ind] = 0.0
+
+ # if box's top left corner is too high
+ ind = np.where(y < 0.0)[0]
+ dy[ind] = 0.0 - y[ind]
+ y[ind] = 0.0
+
+ return_list = [dy, edy, dx, edx, y, ey, x, ex, w, h]
+ return_list = [i.astype('int32') for i in return_list]
+
+ return return_list
+
+
+def _preprocess(img):
+ """Preprocessing step before feeding the network.
+
+ Arguments:
+ img: a float numpy array of shape [h, w, c].
+
+ Returns:
+ a float numpy array of shape [1, c, h, w].
+ """
+ img = img.transpose((2, 0, 1))
+ img = np.expand_dims(img, 0)
+ img = (img - 127.5)*0.0078125
+ return img
diff --git a/hivisionai/hycv/mtcnn_onnx/detector.py b/hivisionai/hycv/mtcnn_onnx/detector.py
new file mode 100644
index 0000000000000000000000000000000000000000..bdcfb36b8306019011e2559cfc7abbb09af8db2e
--- /dev/null
+++ b/hivisionai/hycv/mtcnn_onnx/detector.py
@@ -0,0 +1,166 @@
+import numpy as np
+from .box_utils import nms, calibrate_box, get_image_boxes, convert_to_square
+from .first_stage import run_first_stage
+import onnxruntime
+import os
+from os.path import exists
+import requests
+
+
+def download_img(img_url, base_dir):
+ print("Downloading Onnx Model in:",img_url)
+ r = requests.get(img_url, stream=True)
+ filename = img_url.split("/")[-1]
+ # print(r.status_code) # 返回状态码
+ if r.status_code == 200:
+ open(f'{base_dir}/{filename}', 'wb').write(r.content) # 将内容写入图片
+ print(f"Download Finshed -- {filename}")
+ del r
+
+
+def detect_faces(image, min_face_size=20.0, thresholds=None, nms_thresholds=None):
+ """
+ Arguments:
+ image: an instance of PIL.Image.
+ min_face_size: a float number.
+ thresholds: a list of length 3.
+ nms_thresholds: a list of length 3.
+
+ Returns:
+ two float numpy arrays of shapes [n_boxes, 4] and [n_boxes, 10],
+ bounding boxes and facial landmarks.
+ """
+ if nms_thresholds is None:
+ nms_thresholds = [0.7, 0.7, 0.7]
+ if thresholds is None:
+ thresholds = [0.6, 0.7, 0.8]
+ base_url = "https://linimages.oss-cn-beijing.aliyuncs.com/"
+ onnx_filedirs = ["pnet.onnx", "rnet.onnx", "onet.onnx"]
+
+ # LOAD MODELS
+ basedir = os.path.dirname(os.path.realpath(__file__)).split("detector.py")[0]
+
+ for onnx_filedir in onnx_filedirs:
+ if not exists(f"{basedir}/weights"):
+ os.mkdir(f"{basedir}/weights")
+ if not exists(f"{basedir}/weights/{onnx_filedir}"):
+ # download onnx model
+ download_img(img_url=base_url+onnx_filedir, base_dir=f"{basedir}/weights")
+
+ pnet = onnxruntime.InferenceSession(f"{basedir}/weights/pnet.onnx") # Load a ONNX model
+ input_name_pnet = pnet.get_inputs()[0].name
+ output_name_pnet1 = pnet.get_outputs()[0].name
+ output_name_pnet2 = pnet.get_outputs()[1].name
+ pnet = [pnet, input_name_pnet, [output_name_pnet1, output_name_pnet2]]
+
+ rnet = onnxruntime.InferenceSession(f"{basedir}/weights/rnet.onnx") # Load a ONNX model
+ input_name_rnet = rnet.get_inputs()[0].name
+ output_name_rnet1 = rnet.get_outputs()[0].name
+ output_name_rnet2 = rnet.get_outputs()[1].name
+ rnet = [rnet, input_name_rnet, [output_name_rnet1, output_name_rnet2]]
+
+ onet = onnxruntime.InferenceSession(f"{basedir}/weights/onet.onnx") # Load a ONNX model
+ input_name_onet = onet.get_inputs()[0].name
+ output_name_onet1 = onet.get_outputs()[0].name
+ output_name_onet2 = onet.get_outputs()[1].name
+ output_name_onet3 = onet.get_outputs()[2].name
+ onet = [onet, input_name_onet, [output_name_onet1, output_name_onet2, output_name_onet3]]
+
+ # BUILD AN IMAGE PYRAMID
+ width, height = image.size
+ min_length = min(height, width)
+
+ min_detection_size = 12
+ factor = 0.707 # sqrt(0.5)
+
+ # scales for scaling the image
+ scales = []
+
+ # scales the image so that
+ # minimum size that we can detect equals to
+ # minimum face size that we want to detect
+ m = min_detection_size/min_face_size
+ min_length *= m
+
+ factor_count = 0
+ while min_length > min_detection_size:
+ scales.append(m*factor**factor_count)
+ min_length *= factor
+ factor_count += 1
+
+ # STAGE 1
+
+ # it will be returned
+ bounding_boxes = []
+
+ # run P-Net on different scales
+ for s in scales:
+ boxes = run_first_stage(image, pnet, scale=s, threshold=thresholds[0])
+ bounding_boxes.append(boxes)
+
+ # collect boxes (and offsets, and scores) from different scales
+ bounding_boxes = [i for i in bounding_boxes if i is not None]
+ bounding_boxes = np.vstack(bounding_boxes)
+
+ keep = nms(bounding_boxes[:, 0:5], nms_thresholds[0])
+ bounding_boxes = bounding_boxes[keep]
+
+ # use offsets predicted by pnet to transform bounding boxes
+ bounding_boxes = calibrate_box(bounding_boxes[:, 0:5], bounding_boxes[:, 5:])
+ # shape [n_boxes, 5]
+
+ bounding_boxes = convert_to_square(bounding_boxes)
+ bounding_boxes[:, 0:4] = np.round(bounding_boxes[:, 0:4])
+
+ # STAGE 2
+
+ img_boxes = get_image_boxes(bounding_boxes, image, size=24)
+
+ output = rnet[0].run([rnet[2][0], rnet[2][1]], {rnet[1]: img_boxes})
+ offsets = output[0] # shape [n_boxes, 4]
+ probs = output[1] # shape [n_boxes, 2]
+
+ keep = np.where(probs[:, 1] > thresholds[1])[0]
+ bounding_boxes = bounding_boxes[keep]
+ bounding_boxes[:, 4] = probs[keep, 1].reshape((-1,))
+ offsets = offsets[keep]
+
+ keep = nms(bounding_boxes, nms_thresholds[1])
+ bounding_boxes = bounding_boxes[keep]
+ bounding_boxes = calibrate_box(bounding_boxes, offsets[keep])
+ bounding_boxes = convert_to_square(bounding_boxes)
+ bounding_boxes[:, 0:4] = np.round(bounding_boxes[:, 0:4])
+
+ # STAGE 3
+
+ img_boxes = get_image_boxes(bounding_boxes, image, size=48)
+ if len(img_boxes) == 0:
+ return [], []
+ #img_boxes = Variable(torch.FloatTensor(img_boxes), volatile=True)
+ # with torch.no_grad():
+ # img_boxes = torch.FloatTensor(img_boxes)
+ # output = onet(img_boxes)
+ output = onet[0].run([onet[2][0], onet[2][1], onet[2][2]], {rnet[1]: img_boxes})
+ landmarks = output[0] # shape [n_boxes, 10]
+ offsets = output[1] # shape [n_boxes, 4]
+ probs = output[2] # shape [n_boxes, 2]
+
+ keep = np.where(probs[:, 1] > thresholds[2])[0]
+ bounding_boxes = bounding_boxes[keep]
+ bounding_boxes[:, 4] = probs[keep, 1].reshape((-1,))
+ offsets = offsets[keep]
+ landmarks = landmarks[keep]
+
+ # compute landmark points
+ width = bounding_boxes[:, 2] - bounding_boxes[:, 0] + 1.0
+ height = bounding_boxes[:, 3] - bounding_boxes[:, 1] + 1.0
+ xmin, ymin = bounding_boxes[:, 0], bounding_boxes[:, 1]
+ landmarks[:, 0:5] = np.expand_dims(xmin, 1) + np.expand_dims(width, 1)*landmarks[:, 0:5]
+ landmarks[:, 5:10] = np.expand_dims(ymin, 1) + np.expand_dims(height, 1)*landmarks[:, 5:10]
+
+ bounding_boxes = calibrate_box(bounding_boxes, offsets)
+ keep = nms(bounding_boxes, nms_thresholds[2], mode='min')
+ bounding_boxes = bounding_boxes[keep]
+ landmarks = landmarks[keep]
+
+ return bounding_boxes, landmarks
diff --git a/hivisionai/hycv/mtcnn_onnx/first_stage.py b/hivisionai/hycv/mtcnn_onnx/first_stage.py
new file mode 100644
index 0000000000000000000000000000000000000000..440e3ffa39de669285e1150bf5036fb42873640b
--- /dev/null
+++ b/hivisionai/hycv/mtcnn_onnx/first_stage.py
@@ -0,0 +1,97 @@
+import math
+from PIL import Image
+import numpy as np
+from .box_utils import nms, _preprocess
+
+
+def run_first_stage(image, net, scale, threshold):
+ """Run P-Net, generate bounding boxes, and do NMS.
+
+ Arguments:
+ image: an instance of PIL.Image.
+ net: an instance of pytorch's nn.Module, P-Net.
+ scale: a float number,
+ scale width and height of the image by this number.
+ threshold: a float number,
+ threshold on the probability of a face when generating
+ bounding boxes from predictions of the net.
+
+ Returns:
+ a float numpy array of shape [n_boxes, 9],
+ bounding boxes with scores and offsets (4 + 1 + 4).
+ """
+
+ # scale the image and convert it to a float array
+
+ width, height = image.size
+ sw, sh = math.ceil(width*scale), math.ceil(height*scale)
+ img = image.resize((sw, sh), Image.BILINEAR)
+ img = np.asarray(img, 'float32')
+ img = _preprocess(img)
+ # with torch.no_grad():
+ # img = torch.FloatTensor(_preprocess(img))
+ output = net[0].run([net[2][0],net[2][1]], {net[1]: img})
+ probs = output[1][0, 1, :, :]
+ offsets = output[0]
+ # probs: probability of a face at each sliding window
+ # offsets: transformations to true bounding boxes
+
+ boxes = _generate_bboxes(probs, offsets, scale, threshold)
+ if len(boxes) == 0:
+ return None
+
+ keep = nms(boxes[:, 0:5], overlap_threshold=0.5)
+ return boxes[keep]
+
+
+def _generate_bboxes(probs, offsets, scale, threshold):
+ """Generate bounding boxes at places
+ where there is probably a face.
+
+ Arguments:
+ probs: a float numpy array of shape [n, m].
+ offsets: a float numpy array of shape [1, 4, n, m].
+ scale: a float number,
+ width and height of the image were scaled by this number.
+ threshold: a float number.
+
+ Returns:
+ a float numpy array of shape [n_boxes, 9]
+ """
+
+ # applying P-Net is equivalent, in some sense, to
+ # moving 12x12 window with stride 2
+ stride = 2
+ cell_size = 12
+
+ # indices of boxes where there is probably a face
+ inds = np.where(probs > threshold)
+
+ if inds[0].size == 0:
+ return np.array([])
+
+ # transformations of bounding boxes
+ tx1, ty1, tx2, ty2 = [offsets[0, i, inds[0], inds[1]] for i in range(4)]
+ # they are defined as:
+ # w = x2 - x1 + 1
+ # h = y2 - y1 + 1
+ # x1_true = x1 + tx1*w
+ # x2_true = x2 + tx2*w
+ # y1_true = y1 + ty1*h
+ # y2_true = y2 + ty2*h
+
+ offsets = np.array([tx1, ty1, tx2, ty2])
+ score = probs[inds[0], inds[1]]
+
+ # P-Net is applied to scaled images
+ # so we need to rescale bounding boxes back
+ bounding_boxes = np.vstack([
+ np.round((stride*inds[1] + 1.0)/scale),
+ np.round((stride*inds[0] + 1.0)/scale),
+ np.round((stride*inds[1] + 1.0 + cell_size)/scale),
+ np.round((stride*inds[0] + 1.0 + cell_size)/scale),
+ score, offsets
+ ])
+ # why one is added?
+
+ return bounding_boxes.T
diff --git a/hivisionai/hycv/mtcnn_onnx/visualization_utils.py b/hivisionai/hycv/mtcnn_onnx/visualization_utils.py
new file mode 100644
index 0000000000000000000000000000000000000000..bab02be31a6ca44486f98d57de4ab4bfa89394b7
--- /dev/null
+++ b/hivisionai/hycv/mtcnn_onnx/visualization_utils.py
@@ -0,0 +1,31 @@
+from PIL import ImageDraw
+
+
+def show_bboxes(img, bounding_boxes, facial_landmarks=[]):
+ """Draw bounding boxes and facial landmarks.
+
+ Arguments:
+ img: an instance of PIL.Image.
+ bounding_boxes: a float numpy array of shape [n, 5].
+ facial_landmarks: a float numpy array of shape [n, 10].
+
+ Returns:
+ an instance of PIL.Image.
+ """
+
+ img_copy = img.copy()
+ draw = ImageDraw.Draw(img_copy)
+
+ for b in bounding_boxes:
+ draw.rectangle([
+ (b[0], b[1]), (b[2], b[3])
+ ], outline='white')
+
+ for p in facial_landmarks:
+ for i in range(5):
+ draw.ellipse([
+ (p[i] - 1.0, p[i + 5] - 1.0),
+ (p[i] + 1.0, p[i + 5] + 1.0)
+ ], outline='blue')
+
+ return img_copy
diff --git a/hivisionai/hycv/mtcnn_onnx/weights/onet.onnx b/hivisionai/hycv/mtcnn_onnx/weights/onet.onnx
new file mode 100644
index 0000000000000000000000000000000000000000..3e72b2a494edac43815151b665e72306830ba93c
--- /dev/null
+++ b/hivisionai/hycv/mtcnn_onnx/weights/onet.onnx
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:a42f0cbde62f8032ccc3299fd989a9909c8b185a72a360e247c6f953af366efd
+size 1558989
diff --git a/hivisionai/hycv/mtcnn_onnx/weights/pnet.onnx b/hivisionai/hycv/mtcnn_onnx/weights/pnet.onnx
new file mode 100644
index 0000000000000000000000000000000000000000..3a85efb64f37b80517351e7869b2af8f8567524a
--- /dev/null
+++ b/hivisionai/hycv/mtcnn_onnx/weights/pnet.onnx
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:4b2f1dc55be37d8f73d08cd4038c723d12b9fa90fceb7fa5a4461c4353fb87c5
+size 28316
diff --git a/hivisionai/hycv/mtcnn_onnx/weights/rnet.onnx b/hivisionai/hycv/mtcnn_onnx/weights/rnet.onnx
new file mode 100644
index 0000000000000000000000000000000000000000..1b48098013263f80971a356c20ab64bcb21d8482
--- /dev/null
+++ b/hivisionai/hycv/mtcnn_onnx/weights/rnet.onnx
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:3d8bed44df8fde6bf359806e8ff06f30dda2a9d63cf57aaff8a835d49edcbaee
+size 402933
diff --git a/hivisionai/hycv/tensor2numpy.py b/hivisionai/hycv/tensor2numpy.py
new file mode 100644
index 0000000000000000000000000000000000000000..94edbe3f5d89cedeafd62be6ec9a68f86e1b223e
--- /dev/null
+++ b/hivisionai/hycv/tensor2numpy.py
@@ -0,0 +1,63 @@
+"""
+作者:林泽毅
+建这个开源库的起源呢,是因为在做onnx推理的时候,需要将原来的tensor转换成numpy.array
+问题是Tensor和Numpy的矩阵排布逻辑不同
+包括Tensor推理经常会进行Transform,比如ToTensor,Normalize等
+就想做一些等价转换的函数。
+"""
+import numpy as np
+
+
+def NTo_Tensor(array):
+ """
+ :param array: opencv/PIL读取的numpy矩阵
+ :return:返回一个形如Tensor的numpy矩阵
+ Example:
+ Inputs:array.shape = (512,512,3)
+ Outputs:output.shape = (3,512,512)
+ """
+ output = array.transpose((2, 0, 1))
+ return output
+
+
+def NNormalize(array, mean=np.array([0.5, 0.5, 0.5]), std=np.array([0.5, 0.5, 0.5]), dtype=np.float32):
+ """
+ :param array: opencv/PIL读取的numpy矩阵
+ mean: 归一化均值,np.array格式
+ std: 归一化标准差,np.array格式
+ dtype:输出的numpy数据格式,一般onnx需要float32
+ :return:numpy矩阵
+ Example:
+ Inputs:array为opencv/PIL读取的一张图片
+ mean=np.array([0.5,0.5,0.5])
+ std=np.array([0.5,0.5,0.5])
+ dtype=np.float32
+ Outputs:output为归一化后的numpy矩阵
+ """
+ im = array / 255.0
+ im = np.divide(np.subtract(im, mean), std)
+ output = np.asarray(im, dtype=dtype)
+
+ return output
+
+
+def NUnsqueeze(array, axis=0):
+ """
+ :param array: opencv/PIL读取的numpy矩阵
+ axis:要增加的维度
+ :return:numpy矩阵
+ Example:
+ Inputs:array为opencv/PIL读取的一张图片,array.shape为[512,512,3]
+ axis=0
+ Outputs:output为array在第0维增加一个维度,shape转为[1,512,512,3]
+ """
+ if axis == 0:
+ output = array[None, :, :, :]
+ elif axis == 1:
+ output = array[:, None, :, :]
+ elif axis == 2:
+ output = array[:, :, None, :]
+ else:
+ output = array[:, :, :, None]
+
+ return output
diff --git a/hivisionai/hycv/utils.py b/hivisionai/hycv/utils.py
new file mode 100644
index 0000000000000000000000000000000000000000..9bbcd6024e4ff05fa4597511703c96dc994712db
--- /dev/null
+++ b/hivisionai/hycv/utils.py
@@ -0,0 +1,452 @@
+"""
+本文件存放一些自制的简单的图像处理函数
+"""
+from PIL import Image
+import cv2
+import numpy as np
+import math
+import warnings
+import csv
+import glob
+
+
+def cover_mask(image_path, mask_path, alpha=0.85, rate=0.1, if_save=True):
+ """
+ 在图片右下角盖上水印
+ :param image_path:
+ :param mask_path: 水印路径,以PNG方式读取
+ :param alpha: 不透明度,默认为0.85
+ :param rate: 水印比例,越小水印也越小,默认为0.1
+ :param if_save: 是否将裁剪后的图片保存,如果为True,则保存并返回新图路径,否则不保存,返回截取后的图片对象
+ :return: 新的图片路径
+ """
+ # 生成新的图片路径,我们默认图片后缀存在且必然包含“.”
+ path_len = len(image_path)
+ index = 0
+ for index in range(path_len - 1, -1, -1):
+ if image_path[index] == ".":
+ break
+ if 3 >= path_len - index >= 6:
+ raise TypeError("输入的图片格式有误!")
+ new_path = image_path[0:index] + "_with_mask" + image_path[index:path_len]
+ # 以png方式读取水印图
+ mask = Image.open(mask_path).convert('RGBA')
+ mask_h, mask_w = mask.size
+ # 以png的方式读取原图
+ im = Image.open(image_path).convert('RGBA')
+ # 我采取的策略是,先拷贝一张原图im为base作为基底,然后在im上利用paste函数添加水印
+ # 此时的水印是完全不透明的,我需要利用blend函数内置参数alpha进行不透明度调整
+ base = im.copy()
+ # layer = Image.new('RGBA', im.size, (0, 0, 0, ))
+ # tmp = Image.new('RGBA', im.size, (0, 0, 0, 0))
+ h, w = im.size
+ # 根据原图大小缩放水印图
+ mask = mask.resize((int(rate*math.sqrt(w*h*mask_h/mask_w)), int(rate*math.sqrt(w*h*mask_w/mask_h))), Image.ANTIALIAS)
+ mh, mw = mask.size
+ r, g, b, a = mask.split()
+ im.paste(mask, (h-mh, w-mw), mask=a)
+ # im.show()
+ out = Image.blend(base, im, alpha=alpha).convert('RGB')
+ # out = Image.alpha_composite(im, layer).convert('RGB')
+ if if_save:
+ out.save(new_path)
+ return new_path
+ else:
+ return out
+
+def check_image(image) ->np.ndarray:
+ """
+ 判断某一对象是否为图像/矩阵类型,最终返回图像/矩阵
+ """
+ if not isinstance(image, np.ndarray):
+ image = cv2.imread(image, cv2.IMREAD_UNCHANGED)
+ return image
+
+def get_box(image) -> list:
+ """
+ 这是一个简单的扣图后图像定位函数,不考虑噪点影响
+ 我们使用遍历的方法,碰到非透明点以后立即返回位置坐标
+ :param image:图像信息,可以是图片路径,也可以是已经读取后的图像
+ 如果传入的是图片路径,我会首先通过读取图片、二值化,然后再进行图像处理
+ 如果传入的是图像,直接处理,不会二值化
+ :return: 回传一个列表,分别是图像的上下(y)左右(x)自个值
+ """
+ image = check_image(image)
+ height, width, _ = image.shape
+ try:
+ b, g, r, a = cv2.split(image)
+ # 二值化处理
+ a = (a > 127).astype(np.int_)
+ except ValueError:
+ # 说明传入的是无透明图层的图像,直接返回图像尺寸
+ warnings.warn("你传入了一张非四通道格式的图片!")
+ return [0, height, 0, width]
+ flag1, flag2 = 0, 0
+ box = [0, 0, 0, 0] # 上下左右
+ # 采用两面夹击战术,使用flag1和2确定两面的裁剪程度
+ # 先得到上下
+ for i in range(height):
+ for j in range(width):
+ if flag1 == 0 and a[i][j] != 0:
+ flag1 = 1
+ box[0] = i
+ if flag2 == 0 and a[height - i -1][j] != 0:
+ flag2 = 1
+ box[1] = height - i - 1
+ if flag2 * flag1 == 1:
+ break
+ # 再得到左右
+ flag1, flag2 = 0, 0
+ for j in range(width):
+ for i in range(height):
+ if flag1 == 0 and a[i][j] != 0:
+ flag1 = 1
+ box[2] = j
+ if flag2 == 0 and a[i][width - j - 1] != 0:
+ flag2 = 1
+ box[3] = width - j - 1
+ if flag2 * flag1 == 1:
+ break
+ return box
+
+def filtering(img, f, x, y, x_max, y_max, x_min, y_min, area=0, noise_size=50) ->tuple:
+ """
+ filtering将使用递归的方法得到一个连续图像(这个连续矩阵必须得是单通道的)的范围(坐标)
+ :param img: 传入的矩阵
+ :param f: 和img相同尺寸的全零矩阵,用于标记递归递归过的点
+ :param x: 当前递归到的x轴坐标
+ :param y: 当前递归到的y轴坐标
+ :param x_max: 递归过程中x轴坐标的最大值
+ :param y_max: 递归过程中y轴坐标的最大值
+ :param x_min: 递归过程中x轴坐标的最小值
+ :param y_min: 递归过程中y轴坐标的最小值
+ :param area: 当前递归区域面积大小
+ :param noise_size: 最大递归区域面积大小,当area大于noise_size时,函数返回(0, 1)
+ :return: 分两种情况,当area大于noise_size时,函数返回(0, 1),当area小于等于noise_size时,函数返回(box, 0)
+ 其中box是连续图像的坐标和像素点面积(上下左右,面积)
+ 理论上来讲,我们可以用这个函数递归出任一图像的形状和坐标,但是从计算机内存、计算速度上考虑,这并不是一个好的选择
+ 所以这个函数一般用于判断和过滤噪点
+ """
+ dire_dir = [(1, 0), (-1, 0), (0, 1), (0, -1), (1, 1), (1, -1), (-1, -1), (-1, 1)]
+ height, width = img.shape
+ f[x][y] = 1
+ for dire in dire_dir:
+ delta_x, delta_y = dire
+ tmp_x, tmp_y = (x + delta_x, y + delta_y)
+ if height > tmp_x >= 0 and width > tmp_y >= 0:
+ if img[tmp_x][tmp_y] != 0 and f[tmp_x][tmp_y] == 0:
+ f[tmp_x][tmp_y] = 1
+ # cv2.imshow("test", f)
+ # cv2.waitKey(3)
+ area += 1
+ if area > noise_size:
+ return 0, 1
+ else:
+ x_max = tmp_x if tmp_x > x_max else x_max
+ x_min = tmp_x if tmp_x < x_min else x_min
+ y_max = tmp_y if tmp_y > y_max else y_max
+ y_min = tmp_y if tmp_y < y_min else y_min
+ box, flag = filtering(img, f, tmp_x, tmp_y, x_max, y_max, x_min, y_min, area=area, noise_size=noise_size)
+ if flag == 1:
+ return 0, 1
+ else:
+ (x_max, x_min, y_max, y_min, area) = box
+ return [x_min, x_max, y_min, y_max, area], 0
+
+
+def get_box_pro(image: np.ndarray, model: int = 1, correction_factor=None, thresh: int = 127):
+ """
+ 本函数能够实现输入一张四通道图像,返回图像中最大连续非透明面积的区域的矩形坐标
+ 本函数将采用opencv内置函数来解析整个图像的mask,并提供一些参数,用于读取图像的位置信息
+ Args:
+ image: 四通道矩阵图像
+ model: 返回值模式
+ correction_factor: 提供一些边缘扩张接口,输入格式为list或者int:[up, down, left, right]。
+ 举个例子,假设我们希望剪切出的矩形框左边能够偏左1个像素,则输入[0, 0, 1, 0];
+ 如果希望右边偏右1个像素,则输入[0, 0, 0, 1]
+ 如果输入为int,则默认只会对左右两边做拓展,比如输入2,则和[0, 0, 2, 2]是等效的
+ thresh: 二值化阈值,为了保持一些羽化效果,thresh必须要小
+ Returns:
+ model为1时,将会返回切割出的矩形框的四个坐标点信息
+ model为2时,将会返回矩形框四边相距于原图四边的距离
+ """
+ # ------------ 数据格式规范部分 -------------- #
+ # 输入必须为四通道
+ if correction_factor is None:
+ correction_factor = [0, 0, 0, 0]
+ if not isinstance(image, np.ndarray) or len(cv2.split(image)) != 4:
+ raise TypeError("输入的图像必须为四通道np.ndarray类型矩阵!")
+ # correction_factor规范化
+ if isinstance(correction_factor, int):
+ correction_factor = [0, 0, correction_factor, correction_factor]
+ elif not isinstance(correction_factor, list):
+ raise TypeError("correction_factor 必须为int或者list类型!")
+ # ------------ 数据格式规范完毕 -------------- #
+ # 分离mask
+ _, _, _, mask = cv2.split(image)
+ # mask二值化处理
+ _, mask = cv2.threshold(mask, thresh=thresh, maxval=255, type=0)
+ contours, hierarchy = cv2.findContours(mask, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
+ temp = np.ones(image.shape, np.uint8)*255
+ cv2.drawContours(temp, contours, -1, (0, 0, 255), -1)
+ contours_area = []
+ for cnt in contours:
+ contours_area.append(cv2.contourArea(cnt))
+ idx = contours_area.index(max(contours_area))
+ x, y, w, h = cv2.boundingRect(contours[idx]) # 框出图像
+ # ------------ 开始输出数据 -------------- #
+ height, width, _ = image.shape
+ y_up = y - correction_factor[0] if y - correction_factor[0] >= 0 else 0
+ y_down = y + h + correction_factor[1] if y + h + correction_factor[1] < height else height - 1
+ x_left = x - correction_factor[2] if x - correction_factor[2] >= 0 else 0
+ x_right = x + w + correction_factor[3] if x + w + correction_factor[3] < width else width - 1
+ if model == 1:
+ # model=1,将会返回切割出的矩形框的四个坐标点信息
+ return [y_up, y_down, x_left, x_right]
+ elif model == 2:
+ # model=2, 将会返回矩形框四边相距于原图四边的距离
+ return [y_up, height - y_down, x_left, width - x_right]
+ else:
+ raise EOFError("请选择正确的模式!")
+
+
+def cut(image_path:str, box:list, if_save=True):
+ """
+ 根据box,裁剪对应的图片区域后保存
+ :param image_path: 原图路径
+ :param box: 坐标列表,上下左右
+ :param if_save:是否将裁剪后的图片保存,如果为True,则保存并返回新图路径,否则不保存,返回截取后的图片对象
+ :return: 新图路径或者是新图对象
+ """
+ index = 0
+ path_len = len(image_path)
+ up, down, left, right = box
+ image = cv2.imread(image_path, cv2.IMREAD_UNCHANGED)
+ new_image = image[up: down, left: right]
+ if if_save:
+ for index in range(path_len - 1, -1, -1):
+ if image_path[index] == ".":
+ break
+ if 3 >= path_len - index >= 6:
+ raise TypeError("输入的图片格式有误!")
+ new_path = image_path[0:index] + "_cut" + image_path[index:path_len]
+ cv2.imwrite(new_path, new_image, [cv2.IMWRITE_PNG_COMPRESSION, 9])
+ return new_path
+ else:
+ return new_image
+
+
+def zoom_image_without_change_size(image:np.ndarray, zoom_rate, interpolation=cv2.INTER_NEAREST) ->np.ndarray:
+ """
+ 在不改变原图大小的情况下,对图像进行放大,目前只支持从图像中心放大
+ :param image: 传入的图像对象
+ :param zoom_rate: 放大比例,单位为倍(初始为1倍)
+ :param interpolation: 插值方式,与opencv的resize内置参数相对应,默认为最近邻插值
+ :return: 裁剪后的图像实例
+ """
+ height, width, _ = image.shape
+ if zoom_rate < 1:
+ # zoom_rate不能小于1
+ raise ValueError("zoom_rate不能小于1!")
+ height_tmp = int(height * zoom_rate)
+ width_tmp = int(width * zoom_rate)
+ image_tmp = cv2.resize(image, (height_tmp, width_tmp), interpolation=interpolation)
+ # 定位一下被裁剪的位置,实际上是裁剪框的左上角的点的坐标
+ delta_x = (width_tmp - width) // 2 # 横向
+ delta_y = (height_tmp - height) // 2 # 纵向
+ return image_tmp[delta_y : delta_y + height, delta_x : delta_x + width]
+
+
+def filedir2csv(scan_filedir, csv_filedir):
+ file_list = glob.glob(scan_filedir+"/*")
+
+ with open(csv_filedir, "w") as csv_file:
+ writter = csv.writer(csv_file)
+ for file_dir in file_list:
+ writter.writerow([file_dir])
+
+ print("filedir2csv success!")
+
+
+def full_ties(image_pre:np.ndarray):
+ height, width = image_pre.shape
+ # 先膨胀
+ kernel = np.ones((5, 5), dtype=np.uint8)
+ dilate = cv2.dilate(image_pre, kernel, 1)
+ # cv2.imshow("dilate", dilate)
+ def FillHole(image):
+ # 复制 image 图像
+ im_floodFill = image.copy()
+ # Mask 用于 floodFill,官方要求长宽+2
+ mask = np.zeros((height + 2, width + 2), np.uint8)
+ seedPoint = (0, 0)
+ # floodFill函数中的seedPoint对应像素必须是背景
+ is_break = False
+ for i in range(im_floodFill.shape[0]):
+ for j in range(im_floodFill.shape[1]):
+ if (im_floodFill[i][j] == 0):
+ seedPoint = (i, j)
+ is_break = True
+ break
+ if (is_break):
+ break
+ # 得到im_floodFill 255填充非孔洞值
+ cv2.floodFill(im_floodFill, mask, seedPoint, 255)
+ # cv2.imshow("tmp1", im_floodFill)
+ # 得到im_floodFill的逆im_floodFill_inv
+ im_floodFill_inv = cv2.bitwise_not(im_floodFill)
+ # cv2.imshow("tmp2", im_floodFill_inv)
+ # 把image、im_floodFill_inv这两幅图像结合起来得到前景
+ im_out = image | im_floodFill_inv
+ return im_out
+ # 洪流算法填充
+ image_floodFill = FillHole(dilate)
+ # 填充图和原图合并
+ image_final = image_floodFill | image_pre
+ # 再腐蚀
+ kernel = np.ones((5, 5), np.uint8)
+ erosion= cv2.erode(image_final, kernel, iterations=6)
+ # cv2.imshow("erosion", erosion)
+ # 添加高斯模糊
+ blur = cv2.GaussianBlur(erosion, (5, 5), 2.5)
+ # cv2.imshow("blur", blur)
+ # image_final = merge_image(image_pre, erosion)
+ # 再与原图合并
+ image_final = image_pre | blur
+ # cv2.imshow("final", image_final)
+ return image_final
+
+
+def cut_BiggestAreas(image):
+ # 裁剪出整张图轮廓最大的部分
+ def find_BiggestAreas(image_pre):
+ # 定义一个三乘三的卷积核
+ kernel = np.ones((3, 3), dtype=np.uint8)
+ # 将输入图片膨胀
+ # dilate = cv2.dilate(image_pre, kernel, 3)
+ # cv2.imshow("dilate", dilate)
+ # 将输入图片二值化
+ _, thresh = cv2.threshold(image_pre, 127, 255, cv2.THRESH_BINARY)
+ # cv2.imshow("thresh", thresh)
+ # 将二值化后的图片膨胀
+ dilate_afterThresh = cv2.dilate(thresh, kernel, 5)
+ # cv2.imshow("thresh_afterThresh", dilate_afterThresh)
+ # 找轮廓
+ contours_, hierarchy = cv2.findContours(dilate_afterThresh, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
+ # 识别出最大的轮廓
+ # 需要注意的是,在低版本的findContours当中返回的结果是tuple,不支持pop,所以需要将其转为pop
+ contours = [x for x in contours_]
+ area = map(cv2.contourArea, contours)
+ area_list = list(area)
+ area_max = max(area_list)
+ post = area_list.index(area_max)
+ # 将最大的区域保留,其余全部填黑
+ contours.pop(post)
+ for i in range(len(contours)):
+ cv2.drawContours(image_pre, contours, i, 0, cv2.FILLED)
+ # cv2.imshow("cut", image_pre)
+ return image_pre
+ b, g, r, a = cv2.split(image)
+ a_new = find_BiggestAreas(a)
+ new_image = cv2.merge((b, g, r, a_new))
+ return new_image
+
+
+def locate_neck(image:np.ndarray, proportion):
+ """
+ 根据输入的图片(四通道)和proportion(自上而下)的比例,定位到相应的y点,然后向内收缩,直到两边的像素点不透明
+ """
+ if image.shape[-1] != 4:
+ raise TypeError("请输入一张png格式的四通道图片!")
+ if proportion > 1 or proportion <=0:
+ raise ValueError("proportion 必须在0~1之间!")
+ _, _, _, a = cv2.split(image)
+ height, width = a.shape
+ _, a = cv2.threshold(a, 127, 255, cv2.THRESH_BINARY)
+ y = int(height * proportion)
+ x = 0
+ for x in range(width):
+ if a[y][x] == 255:
+ break
+ left = (y, x)
+ for x in range(width - 1, -1 , -1):
+ if a[y][x] == 255:
+ break
+ right = (y, x)
+ return left, right, right[1] - left[1]
+
+
+def get_cutbox_image(input_image):
+ height, width = input_image.shape[0], input_image.shape[1]
+ y_top, y_bottom, x_left, x_right = get_box_pro(input_image, model=2)
+ result_image = input_image[y_top:height - y_bottom, x_left:width - x_right]
+ return result_image
+
+
+def brightnessAdjustment(image: np.ndarray, bright_factor: int=0):
+ """
+ 图像亮度调节
+ :param image: 输入的图像矩阵
+ :param bright_factor:亮度调节因子,可正可负,没有范围限制
+ 当bright_factor ---> +无穷 时,图像全白
+ 当bright_factor ---> -无穷 时,图像全黑
+ :return: 处理后的图片
+ """
+ res = np.uint8(np.clip(np.int16(image) + bright_factor, 0, 255))
+ return res
+
+
+def contrastAdjustment(image: np.ndarray, contrast_factor: int = 0):
+ """
+ 图像对比度调节,实际上调节对比度的同时对亮度也有一定的影响
+ :param image: 输入的图像矩阵
+ :param contrast_factor:亮度调节因子,可正可负,范围在[-100, +100]之间
+ 当contrast_factor=-100时,图像变为灰色
+ :return: 处理后的图片
+ """
+ contrast_factor = 1 + min(contrast_factor, 100) / 100 if contrast_factor > 0 else 1 + max(contrast_factor,
+ -100) / 100
+ image_b = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
+ bright_ = image_b.mean()
+ res = np.uint8(np.clip(contrast_factor * (np.int16(image) - bright_) + bright_, 0, 255))
+ return res
+
+
+class CV2Bytes(object):
+ @staticmethod
+ def byte_cv2(image_byte, flags=cv2.IMREAD_COLOR) ->np.ndarray:
+ """
+ 将传入的字节流解码为图像, 当flags为 -1 的时候为无损解码
+ """
+ np_arr = np.frombuffer(image_byte,np.uint8)
+ image = cv2.imdecode(np_arr, flags)
+ return image
+
+ @staticmethod
+ def cv2_byte(image:np.ndarray, imageType:str=".jpg"):
+ """
+ 将传入的图像解码为字节流
+ """
+ _, image_encode = cv2.imencode(imageType, image)
+ image_byte = image_encode.tobytes()
+ return image_byte
+
+
+def comb2images(src_white:np.ndarray, src_black:np.ndarray, mask:np.ndarray) -> np.ndarray:
+ """输入两张图片,将这两张图片根据输入的mask进行叠加处理
+ 这里并非简单的cv2.add(),因为也考虑了羽化部分,所以需要进行一些其他的处理操作
+ 核心的算法为: dst = (mask * src_white + (1 - mask) * src_black).astype(np.uint8)
+
+ Args:
+ src_white (np.ndarray): 第一张图像,代表的是mask中的白色区域,三通道
+ src_black (np.ndarray): 第二张图像,代表的是mask中的黑色区域,三通道
+ mask (np.ndarray): mask.输入为单通道,后续会归一化并转为三通道
+ 需要注意的是这三者的尺寸应该是一样的
+
+ Returns:
+ np.ndarray: 返回的三通道图像
+ """
+ # 函数内部不检查相关参数是否一样,使用的时候需要注意一下
+ mask = cv2.cvtColor(mask, cv2.COLOR_GRAY2BGR).astype(np.float32) / 255
+ return (mask * src_white + (1 - mask) * src_black).astype(np.uint8)
+
diff --git a/hivisionai/hycv/vision.py b/hivisionai/hycv/vision.py
new file mode 100644
index 0000000000000000000000000000000000000000..a862e9c7b2dbbc0a2f3061d8a48d5f58904ee7f9
--- /dev/null
+++ b/hivisionai/hycv/vision.py
@@ -0,0 +1,446 @@
+import cv2
+from PIL import Image
+import numpy as np
+import functools
+import time
+
+def calTime(mark):
+ """
+ 一个输出函数时间的装饰器.
+ :param mark: str, 可选填, 如果填了就会在print开头加上mark标签。
+ """
+ if isinstance(mark, str):
+ def decorater(func):
+ @functools.wraps(func)
+ def wrapper(*args, **kw):
+ start_time = time.time()
+ return_param = func(*args, **kw)
+ print("[Mark-{}] {} 函数花费的时间为 {:.2f}.".format(mark, func.__name__, time.time() - start_time))
+ return return_param
+
+ return wrapper
+
+ return decorater
+ else:
+ func = mark
+
+ @functools.wraps(func)
+ def wrapper(*args, **kw):
+ start_time = time.time()
+ return_param = func(*args, **kw)
+ print("{} 函数花费的时间为 {:.2f}.".format(func.__name__, time.time() - start_time))
+ return return_param
+
+ return wrapper
+
+
+def ChangeImageDPI(input_path, output_path, dpi=300):
+ """
+ 改变输入图像的dpi.
+ input_path: 输入图像路径
+ output_path: 输出图像路径
+ dpi:打印分辨率
+ """
+ image = Image.open(input_path)
+ image.save(output_path, dpi=(dpi, dpi))
+ # print(1)
+ print("Your Image's DPI have been changed. The last DPI = ({},{}) ".format(dpi,dpi))
+
+
+def IDphotos_cut(x1, y1, x2, y2, img):
+ """
+ 在图片上进行滑动裁剪,输入输出为
+ 输入:一张图片img,和裁剪框信息(x1,x2,y1,y2)
+ 输出: 裁剪好的图片,然后裁剪框超出了图像范围,那么将用0矩阵补位
+ ------------------------------------
+ x:裁剪框左上的横坐标
+ y:裁剪框左上的纵坐标
+ x2:裁剪框右下的横坐标
+ y2:裁剪框右下的纵坐标
+ crop_size:裁剪框大小
+ img:裁剪图像(numpy.array)
+ output_path:裁剪图片的输出路径
+ ------------------------------------
+ """
+
+ crop_size = (y2-y1, x2-x1)
+ """
+ ------------------------------------
+ temp_x_1:裁剪框左边超出图像部分
+ temp_y_1:裁剪框上边超出图像部分
+ temp_x_2:裁剪框右边超出图像部分
+ temp_y_2:裁剪框下边超出图像部分
+ ------------------------------------
+ """
+ temp_x_1 = 0
+ temp_y_1 = 0
+ temp_x_2 = 0
+ temp_y_2 = 0
+
+ if y1 < 0:
+ temp_y_1 = abs(y1)
+ y1 = 0
+ if y2 > img.shape[0]:
+ temp_y_2 = y2
+ y2 = img.shape[0]
+ temp_y_2 = temp_y_2 - y2
+
+ if x1 < 0:
+ temp_x_1 = abs(x1)
+ x1 = 0
+ if x2 > img.shape[1]:
+ temp_x_2 = x2
+ x2 = img.shape[1]
+ temp_x_2 = temp_x_2 - x2
+
+ # 生成一张全透明背景
+ print("crop_size:", crop_size)
+ background_bgr = np.full((crop_size[0], crop_size[1]), 255, dtype=np.uint8)
+ background_a = np.full((crop_size[0], crop_size[1]), 0, dtype=np.uint8)
+ background = cv2.merge((background_bgr, background_bgr, background_bgr, background_a))
+
+ background[temp_y_1: crop_size[0] - temp_y_2, temp_x_1: crop_size[1] - temp_x_2] = img[y1:y2, x1:x2]
+
+ return background
+
+
+def resize_image_esp(input_image, esp=2000):
+ """
+ 输入:
+ input_path:numpy图片
+ esp:限制的最大边长
+ """
+ # resize函数=>可以让原图压缩到最大边为esp的尺寸(不改变比例)
+ width = input_image.shape[0]
+
+ length = input_image.shape[1]
+ max_num = max(width, length)
+
+ if max_num > esp:
+ print("Image resizing...")
+ if width == max_num:
+ length = int((esp / width) * length)
+ width = esp
+
+ else:
+ width = int((esp / length) * width)
+ length = esp
+ print(length, width)
+ im_resize = cv2.resize(input_image, (length, width), interpolation=cv2.INTER_AREA)
+ return im_resize
+ else:
+ return input_image
+
+
+def resize_image_by_min(input_image, esp=600):
+ """
+ 将图像缩放为最短边至少为esp的图像。
+ :param input_image: 输入图像(OpenCV矩阵)
+ :param esp: 缩放后的最短边长
+ :return: 缩放后的图像,缩放倍率
+ """
+ height, width = input_image.shape[0], input_image.shape[1]
+ min_border = min(height, width)
+ if min_border < esp:
+ if height >= width:
+ new_width = esp
+ new_height = height * esp // width
+ else:
+ new_height = esp
+ new_width = width * esp // height
+
+ return cv2.resize(input_image, (new_width, new_height), interpolation=cv2.INTER_AREA), new_height / height
+
+ else:
+ return input_image, 1
+
+
+def detect_distance(value, crop_heigh, max=0.06, min=0.04):
+ """
+ 检测人头顶与照片顶部的距离是否在适当范围内。
+ 输入:与顶部的差值
+ 输出:(status, move_value)
+ status=0 不动
+ status=1 人脸应向上移动(裁剪框向下移动)
+ status-2 人脸应向下移动(裁剪框向上移动)
+ ---------------------------------------
+ value:头顶与照片顶部的距离·
+ crop_heigh: 裁剪框的高度
+ max: 距离的最大值
+ min: 距离的最小值
+ ---------------------------------------
+ """
+ value = value / crop_heigh # 头顶往上的像素占图像的比例
+ if min <= value <= max:
+ return 0, 0
+ elif value > max:
+ # 头顶往上的像素比例高于max
+ move_value = value - max
+ move_value = int(move_value * crop_heigh)
+ # print("上移{}".format(move_value))
+ return 1, move_value
+ else:
+ # 头顶往上的像素比例低于min
+ move_value = min - value
+ move_value = int(move_value * crop_heigh)
+ # print("下移{}".format(move_value))
+ return -1, move_value
+
+
+def draw_picture_dots(image, dots, pen_size=10, pen_color=(0, 0, 255)):
+ """
+ 给一张照片上绘制点。
+ image: Opencv图像矩阵
+ dots: 一堆点,形如[(100,100),(150,100)]
+ pen_size: 画笔的大小
+ pen_color: 画笔的颜色
+ """
+ if isinstance(dots, dict):
+ dots = [v for u, v in dots.items()]
+ image = image.copy()
+ for x, y in dots:
+ cv2.circle(image, (int(x), int(y)), pen_size, pen_color, -1)
+ return image
+
+
+def draw_picture_rectangle(image, bbox, pen_size=2, pen_color=(0, 0, 255)):
+ image = image.copy()
+ x1 = int(bbox[0])
+ y1 = int(bbox[1])
+ x2 = int(bbox[2])
+ y2 = int(bbox[3])
+ cv2.rectangle(image, (x1,y1), (x2, y2), pen_color, pen_size)
+ return image
+
+
+def generate_gradient(start_color, width, height, mode="updown"):
+ # 定义背景颜色
+ end_color = (255, 255, 255) # 白色
+
+ # 创建一个空白图像
+ r_out = np.zeros((height, width), dtype=int)
+ g_out = np.zeros((height, width), dtype=int)
+ b_out = np.zeros((height, width), dtype=int)
+
+ if mode == "updown":
+ # 生成上下渐变色
+ for y in range(height):
+ r = int((y / height) * end_color[0] + ((height - y) / height) * start_color[0])
+ g = int((y / height) * end_color[1] + ((height - y) / height) * start_color[1])
+ b = int((y / height) * end_color[2] + ((height - y) / height) * start_color[2])
+ r_out[y, :] = r
+ g_out[y, :] = g
+ b_out[y, :] = b
+
+ else:
+ # 生成中心渐变色
+ img = np.zeros((height, width, 3))
+ # 定义椭圆中心和半径
+ center = (width//2, height//2)
+ end_axies = max(height, width)
+ # 定义渐变色
+ end_color = (255, 255, 255)
+ # 绘制椭圆
+ for y in range(end_axies):
+ axes = (end_axies - y, end_axies - y)
+ r = int((y / end_axies) * end_color[0] + ((end_axies - y) / end_axies) * start_color[0])
+ g = int((y / end_axies) * end_color[1] + ((end_axies - y) / end_axies) * start_color[1])
+ b = int((y / end_axies) * end_color[2] + ((end_axies - y) / end_axies) * start_color[2])
+
+ cv2.ellipse(img, center, axes, 0, 0, 360, (b, g, r), -1)
+ b_out, g_out, r_out = cv2.split(np.uint64(img))
+
+ return r_out, g_out, b_out
+
+
+def add_background(input_image, bgr=(0, 0, 0), mode="pure_color"):
+ """
+ 本函数的功能为为透明图像加上背景。
+ :param input_image: numpy.array(4 channels), 透明图像
+ :param bgr: tuple, 合成纯色底时的BGR值
+ :param new_background: numpy.array(3 channels),合成自定义图像底时的背景图
+ :return: output: 合成好的输出图像
+ """
+ height, width = input_image.shape[0], input_image.shape[1]
+ b, g, r, a = cv2.split(input_image)
+ a_cal = a / 255
+ if mode == "pure_color":
+ # 纯色填充
+ b2 = np.full([height, width], bgr[0], dtype=int)
+ g2 = np.full([height, width], bgr[1], dtype=int)
+ r2 = np.full([height, width], bgr[2], dtype=int)
+ elif mode == "updown_gradient":
+ b2, g2, r2 = generate_gradient(bgr, width, height, mode="updown")
+ else:
+ b2, g2, r2 = generate_gradient(bgr, width, height, mode="center")
+
+ output = cv2.merge(((b - b2) * a_cal + b2, (g - g2) * a_cal + g2, (r - r2) * a_cal + r2))
+
+ return output
+
+
+def rotate_bound(image, angle):
+ """
+ 一个旋转函数,输入一张图片和一个旋转角,可以实现不损失图像信息的旋转。
+ - image: numpy.array(3 channels)
+ - angle: 旋转角(度)
+ """
+ (h, w) = image.shape[:2]
+ (cX, cY) = (w / 2, h / 2)
+
+ M = cv2.getRotationMatrix2D((cX, cY), -angle, 1.0)
+ cos = np.abs(M[0, 0])
+ sin = np.abs(M[0, 1])
+
+ nW = int((h * sin) + (w * cos))
+ nH = int((h * cos) + (w * sin))
+
+ M[0, 2] += (nW / 2) - cX
+ M[1, 2] += (nH / 2) - cY
+
+ return cv2.warpAffine(image, M, (nW, nH)), cos, sin
+
+
+def rotate_bound_4channels(image, a, angle):
+ """
+ 【rotate_bound_4channels的4通道版本】
+ 一个旋转函数,输入一张图片和一个旋转角,可以实现不损失图像信息的旋转。
+ Inputs:
+ - image: numpy.array(3 channels), 输入图像
+ - a: numpy.array(1 channels), 输入图像的A矩阵
+ - angle: 旋转角(度)
+ Returns:
+ - input_image: numpy.array(3 channels), 对image进行旋转后的图像
+ - result_image: numpy.array(4 channels), 旋转且透明的图像
+ - cos: float, 旋转角的余弦值
+ - sin: float, 旋转角的正弦值
+ """
+ input_image, cos, sin = rotate_bound(image, angle)
+ new_a, _, _ = rotate_bound(a, angle) # 对做matte旋转,以便之后merge
+ b, g, r = cv2.split(input_image)
+ result_image = cv2.merge((b, g, r, new_a)) # 得到抠图结果图的无损旋转结果
+
+ return input_image, result_image, cos, sin
+
+
+def cover_image(image, background, x, y, mode=1):
+ """
+ mode = 1: directly cover
+ mode = 2: cv2.add
+ mode = 3: bgra cover
+ """
+ image = image.copy()
+ background = background.copy()
+ height1, width1 = background.shape[0], background.shape[1]
+ height2, width2 = image.shape[0], image.shape[1]
+ wuqiong_bg_y = height1 + 1
+ wuqiong_bg_x = width1 + 1
+ wuqiong_img_y = height2 + 1
+ wuqiong_img_x = width2 + 1
+
+ def cover_mode(image, background, imgy1=0, imgy2=-1, imgx1=0, imgx2=-1, bgy1=0, bgy2=-1, bgx1=0, bgx2=-1, mode=1):
+ if mode == 1:
+ background[bgy1:bgy2, bgx1:bgx2] = image[imgy1:imgy2, imgx1:imgx2]
+ elif mode == 2:
+ background[bgy1:bgy2, bgx1:bgx2] = cv2.add(background[bgy1:bgy2, bgx1:bgx2], image[imgy1:imgy2, imgx1:imgx2])
+ elif mode == 3:
+ b, g, r, a = cv2.split(image[imgy1:imgy2, imgx1:imgx2])
+ b2, g2, r2, a2 = cv2.split(background[bgy1:bgy2, bgx1:bgx2])
+ background[bgy1:bgy2, bgx1:bgx2, 0] = b * (a / 255) + b2 * (1 - a / 255)
+ background[bgy1:bgy2, bgx1:bgx2, 1] = g * (a / 255) + g2 * (1 - a / 255)
+ background[bgy1:bgy2, bgx1:bgx2, 2] = r * (a / 255) + r2 * (1 - a / 255)
+ background[bgy1:bgy2, bgx1:bgx2, 3] = cv2.add(a, a2)
+
+ return background
+
+ if x >= 0 and y >= 0:
+ x2 = x + width2
+ y2 = y + height2
+
+ if x2 <= width1 and y2 <= height1:
+ background = cover_mode(image, background,0,wuqiong_img_y,0,wuqiong_img_x,y,y2,x,x2,mode)
+
+ elif x2 > width1 and y2 <= height1:
+ # background[y:y2, x:] = image[:, :width1 - x]
+ background = cover_mode(image, background, 0, wuqiong_img_y, 0, width1-x, y, y2, x, wuqiong_bg_x,mode)
+
+ elif x2 <= width1 and y2 > height1:
+ # background[y:, x:x2] = image[:height1 - y, :]
+ background = cover_mode(image, background, 0, height1-y, 0, wuqiong_img_x, y, wuqiong_bg_y, x, x2,mode)
+ else:
+ # background[y:, x:] = image[:height1 - y, :width1 - x]
+ background = cover_mode(image, background, 0, height1-y, 0, width1-x, y, wuqiong_bg_y, x, wuqiong_bg_x,mode)
+
+ elif x < 0 and y >= 0:
+ x2 = x + width2
+ y2 = y + height2
+
+ if x2 <= width1 and y2 <= height1:
+ # background[y:y2, :x + width2] = image[:, abs(x):]
+ background = cover_mode(image, background, 0, wuqiong_img_y, abs(x), wuqiong_img_x, y, y2, 0, x+width2,mode)
+ elif x2 > width1 and y2 <= height1:
+ background = cover_mode(image, background, 0, wuqiong_img_y, abs(x), width1+abs(x), y, y2, 0, wuqiong_bg_x,mode)
+ elif x2 <= 0:
+ pass
+ elif x2 <= width1 and y2 > height1:
+ background = cover_mode(image, background, 0, height1-y, abs(x), wuqiong_img_x, y, wuqiong_bg_y, 0, x2, mode)
+ else:
+ # background[y:, :] = image[:height1 - y, abs(x):width1 + abs(x)]
+ background = cover_mode(image, background, 0, height1-y, abs(x), width1+abs(x), y, wuqiong_bg_y, 0, wuqiong_bg_x,mode)
+
+ elif x >= 0 and y < 0:
+ x2 = x + width2
+ y2 = y + height2
+ if y2 <= 0:
+ pass
+ if x2 <= width1 and y2 <= height1:
+ # background[:y2, x:x2] = image[abs(y):, :]
+ background = cover_mode(image, background, abs(y), wuqiong_img_y, 0, wuqiong_img_x, 0, y2, x, x2,mode)
+ elif x2 > width1 and y2 <= height1:
+ # background[:y2, x:] = image[abs(y):, :width1 - x]
+ background = cover_mode(image, background, abs(y), wuqiong_img_y, 0, width1-x, 0, y2, x, wuqiong_bg_x,mode)
+ elif x2 <= width1 and y2 > height1:
+ # background[:, x:x2] = image[abs(y):height1 + abs(y), :]
+ background = cover_mode(image, background, abs(y), height1+abs(y), 0, wuqiong_img_x, 0, wuqiong_bg_y, x, x2,mode)
+ else:
+ # background[:, x:] = image[abs(y):height1 + abs(y), :width1 - abs(x)]
+ background = cover_mode(image, background, abs(y), height1+abs(y), 0, width1-abs(x), 0, wuqiong_bg_x, x, wuqiong_bg_x,mode)
+
+ else:
+ x2 = x + width2
+ y2 = y + height2
+ if y2 <= 0 or x2 <= 0:
+ pass
+ if x2 <= width1 and y2 <= height1:
+ # background[:y2, :x2] = image[abs(y):, abs(x):]
+ background = cover_mode(image, background, abs(y), wuqiong_img_y, abs(x), wuqiong_img_x, 0, y2, 0, x2,mode)
+ elif x2 > width1 and y2 <= height1:
+ # background[:y2, :] = image[abs(y):, abs(x):width1 + abs(x)]
+ background = cover_mode(image, background, abs(y), wuqiong_img_y, abs(x), width1+abs(x), 0, y2, 0, wuqiong_bg_x,mode)
+ elif x2 <= width1 and y2 > height1:
+ # background[:, :x2] = image[abs(y):height1 + abs(y), abs(x):]
+ background = cover_mode(image, background, abs(y), height1+abs(y), abs(x), wuqiong_img_x, 0, wuqiong_bg_y, 0, x2,mode)
+ else:
+ # background[:, :] = image[abs(y):height1 - abs(y), abs(x):width1 + abs(x)]
+ background = cover_mode(image, background, abs(y), height1-abs(y), abs(x), width1+abs(x), 0, wuqiong_bg_y, 0, wuqiong_bg_x,mode)
+
+ return background
+
+
+def image2bgr(input_image):
+ if len(input_image.shape) == 2:
+ input_image = input_image[:, :, None]
+ if input_image.shape[2] == 1:
+ result_image = np.repeat(input_image, 3, axis=2)
+ elif input_image.shape[2] == 4:
+ result_image = input_image[:, :, 0:3]
+ else:
+ result_image = input_image
+
+ return result_image
+
+
+if __name__ == "__main__":
+ image = cv2.imread("./03.png", -1)
+ result_image = add_background(image, bgr=(255, 255, 255))
+ cv2.imwrite("test.jpg", result_image)
\ No newline at end of file
diff --git a/idPhotoCreateDebug.py b/idPhotoCreateDebug.py
new file mode 100644
index 0000000000000000000000000000000000000000..a24cdbc47c4735a9c0c8ddcdbf09a641ba2715fa
--- /dev/null
+++ b/idPhotoCreateDebug.py
@@ -0,0 +1,62 @@
+"""
+@author: cuny
+@file: idPhotoCreateDebug.py
+@time: 2022/4/25 17:43
+@description:
+证件照制作的本地调试文件
+"""
+import time
+import cv2
+from hivisionai.hyService.utils import Debug
+from hivisionai.hycv.vision import resize_image_esp
+from idPhotoCreateUtils import IdPhotoCreateService
+
+cbs = IdPhotoCreateService()
+db = Debug()
+msg = {"uid": "60a5e13da00e6e0001fd53c8",
+ "send_msg": {"index": 1,
+ "platform": "test",
+ "obj_key": "wx/certificatePhoto/62b31e4fa866f7af5d361390/61d70e3ba866f7af5df28a3b/old-image/xyz165621950.png",
+ "template_info": {"height": 192, "width": 144, "name": "一寸"},
+ "cloth_number": "girl02",
+ "size": {"name": "一寸",
+ "w": 144,
+ "h": 192},
+ "time": "test",
+ "uid": "60a5e13da00e6e0001fd53c8",
+ }
+ }
+
+# ----------------- 本地调试方式 ----------------- #
+
+image_byte = open("../idPhotoCreate/test_image/21.jpg", "rb").read()
+# ---------------------------------------------- #
+# 开始从云端下载图像,首先获取一些基本数据,这一行基本别动
+(w, h, name), (download_path, upload_path_hd, upload_path_common), image_name, send_msg, \
+ (uid, connectionID) = cbs.checkKey(msg)
+print("upload_path_common", upload_path_common)
+# 在这一步我们获得到了用户的数据1
+image_pre = cbs.byte_cv2(image_byte, flags=cv2.IMREAD_COLOR)
+image_pre = resize_image_esp(image_pre, esp=2000)
+# 数据图片下载完毕,开始功能处理
+db.debug_print("INFO: processing...", font_color="yellow")
+# 开始处理
+db.debug_print("INFO: 取消上传照片...", font_color="yellow")
+start = time.time()
+result_image_HD, result_image, typography_arr, typography_rotate, relative_x, relative_y, w, h, id_temp_info = \
+ cbs.process(image_pre=image_pre,
+ oss_image_name=image_name,
+ w=w,
+ h=h,
+ beauty=False,
+ upload_path_hd=upload_path_hd,
+ upload_path_common=upload_path_common,
+ if_upload=False)
+
+db.debug_print(f"INFO: 图像处理时间: {round(time.time() - start, 2)}秒", font_color="blue")
+db.debug_print("INFO: success.", font_color="green")
+# cv2.imshow("test", result_image)
+# cv2.waitKey(0)
+# ---------------------------------------------- #
+cv2.imwrite("result_image_standard.png", result_image)
+cv2.imwrite("result_image_HD.png", result_image_HD)
diff --git a/idPhotoCreateService.py b/idPhotoCreateService.py
new file mode 100644
index 0000000000000000000000000000000000000000..8bd81bd3be87589b3ce237db09c11656fd209aa8
--- /dev/null
+++ b/idPhotoCreateService.py
@@ -0,0 +1,39 @@
+"""
+@author: cuny
+@file: idPhotoCreateService.py
+@time: 2022/4/3 18:07
+@description:
+证件照制作服务文件
+"""
+from idPhotoCreateUtils import IdPhotoCreateService
+
+ipcs = IdPhotoCreateService()
+
+
+def service(msg: dict = None, context=None):
+ if "send_msg" not in msg:
+ print("冷启动/测试模式,不做任何函数处理")
+ else:
+ ipcs(msg)
+ return "Already invoked a function!"
+
+
+if __name__ == "__main__":
+ # 这个路径为本地日常调试用
+ obj_key = "hyyx-wx/certificatePhoto/60a5e13da00e6e0001fd53c8/61ace447bb291a20b30796f1/old-image/1640490682087.png"
+
+ # obj_key = "wx/certificatePhoto/60a5e13da00e6e0001fd53c8/61d70e3ba866f7af5df28a37/old-image/1649428342890.jpeg"
+ # obj_key = "wx/certificatePhoto/624fc15ca866f7af5ddcdd10/61d70e3ba866f7af5df28a7d/old-image/test.heif"
+ # obj_key = "wx/certificatePhoto/61832a9ce99f3b00016883cb/61d70e3ba866f7af5df28a37/old-image/1650332506715.png"
+ msg_ = {"uid": "60a5e13da00e6e0001fd53c8",
+ "send_msg": {"index": 1,
+ "platform": "test",
+ "obj_key": obj_key,
+ "template_info": {"height": 413, "width": 295, "name": "一寸"},
+ "size": {"name": "一寸",
+ "w": 295,
+ "h": 431},
+ "time": "test",
+ }
+ }
+ service(msg_)
diff --git a/idPhotoCreateUtils.py b/idPhotoCreateUtils.py
new file mode 100644
index 0000000000000000000000000000000000000000..2b1e8cba707f862aad3c2c33516bb4d6ff98deca
--- /dev/null
+++ b/idPhotoCreateUtils.py
@@ -0,0 +1,209 @@
+"""
+@author: cuny
+@file: idPhotoCreateUtils.py
+@time: 2022/4/4 14:37
+@description:
+证件照制作服务类,新增了人脸矫正函数
+"""
+from _service import *
+from hivisionai.hycv.utils import CV2Bytes
+from _lib import AliyunUser, HY_HUMAN_MATTING_WEIGHTS_PATH
+from face_judgement_align import IDphotos_create
+from error import IDError
+import onnxruntime
+import time
+import cv2
+
+
+class IdPhotoCreateService(Service, CV2Bytes):
+ def __init__(self, *args, **kwargs):
+ super().__init__(*args, **kwargs)
+ # 设置预加载模型参数,dlib、抠头、抠脖子等等
+ print("证件照制作对象初始化...")
+ start = time.time()
+ self.__human_sess = None
+ self.fd68 = None # 为本地人脸检测预留接口
+ self.user = AliyunUser()
+ print(f"初始化完毕,总耗时{round(time.time() - start, 2)}秒")
+
+ @property
+ def human_sess(self):
+ if self.__human_sess is None:
+ print("加载模型...")
+ self.__human_sess = onnxruntime.InferenceSession(HY_HUMAN_MATTING_WEIGHTS_PATH)
+ return self.__human_sess
+
+ def createMsg(self, status, msg, *args, **kwargs):
+ """
+ 本方法用于创建一个用于发送到WebSocket客户端的数据
+ 输入的信息部分,需要有如下几个参数:
+ 1. id,固定为"return-result"
+ 2. status,如果输入为1则status=true, 如果输入为-1则status=false
+ 3. obj_key, 图片的云端路径, 这是输入的msg本身自带的
+ """
+ msg['status'] = True if status >= 1 else False # 最好还是用bool
+ msg['id'] = "async-back-msg"
+ msg['type'] = "certificatePhoto"
+ msg["format"] = "png"
+ return msg
+
+ def process(self,
+ image_pre,
+ oss_image_name,
+ w=295,
+ h=413,
+ beauty=False,
+ upload_path_hd=None,
+ upload_path_common=None,
+ if_upload: bool = True):
+ """
+ 处理函数
+ Args:
+ image_pre: 输入的原图
+ oss_image_name: 上传阿里云api的尺寸图像
+ w: 证件照尺寸-宽
+ h: 证件照尺寸-高
+ beauty: 是否美颜
+ upload_path_hd: 高清图上传cos路径
+ upload_path_common: 标清图上传cos路径
+ if_upload: 是否上传,不同选择返回的参数不同
+
+ Returns:
+ 1. if if_upload is True:
+ 函数会将图像上传,不返回图像仅返回参数
+ 2. if if_upload is False:
+ 函数不会将图像上传,返回图像和一些参数
+ """
+ print("oss_name:", oss_image_name)
+ result_image_HD, result_image, _, \
+ typography_arr, typography_rotate, \
+ relative_x, relative_y, w, h, id_temp_info = IDphotos_create(image_pre,
+ size=(h, w),
+ head_height_ratio=0.45,
+ head_measure_ratio=0.2,
+ align=True,
+ beauty=beauty,
+ fd68=self.fd68,
+ human_sess=self.load_sess_generator("human_sess"),
+ oss_image_name=oss_image_name,
+ user=self.user)
+
+ if if_upload:
+ # 上传图像,云端模式
+ print("[图像尺寸]: ", result_image_HD.shape)
+ result_image_HD_byte = self.cv2_byte(result_image_HD, imageType=".png")
+ self.uploadFile_COS(buffer=result_image_HD_byte, key=upload_path_hd)
+ result_image_byte = self.cv2_byte(result_image, imageType=".png")
+ self.uploadFile_COS(buffer=result_image_byte, key=upload_path_common)
+ print("[image send success]")
+ return typography_arr, typography_rotate, relative_x, relative_y, w, h, id_temp_info
+ else:
+ # 不上传图像,返回处理结果
+ return result_image_HD, result_image, typography_arr, typography_rotate, relative_x, relative_y, w, h, id_temp_info
+
+ def checkKey(self, msg):
+ print("GET", msg)
+ try:
+ uid, send_msg = msg["uid"], msg["send_msg"]
+ connectionID = None
+ except KeyError:
+ connectionID, send_msg = msg["connectionID"], msg["send_msg"]
+ uid = send_msg["uid"]
+ download_path: str = send_msg["obj_key"] # 获得cos下载路径
+ # platform = send_msg["platform"] if "platform" in send_msg else "undefined" # 换装次数
+ # 获取需要被制作的证件照尺寸
+ template_info = send_msg["template_info"]
+ w, h, name = int(template_info["width"]), int(template_info["height"]), template_info["name"]
+ # 获得cos回传传路径
+ img_format = send_msg['obj_key'][send_msg['obj_key'].rfind('.') + 1:]
+ tr = send_msg['obj_key'].replace(img_format, 'png')
+ upload_path_hd: str = tr.replace("old-image", "new-image/hd")
+ upload_path_common: str = tr.replace("old-image", "new-image/common")
+ image_name = f"{uid}_{upload_path_common.split('/')[-1]}"
+ send_msg["hd_key"] = upload_path_hd # 回传云端结果图片路径(高清照)
+ send_msg["common_key"] = upload_path_common # 回传云端结果图片路径(高清照)
+ return (w, h, name), (download_path, upload_path_hd, upload_path_common), image_name, send_msg, (
+ uid, connectionID)
+
+ def __call__(self, msg, *args, **kwargs):
+ """
+ 证件照制作算法服务函数
+ """
+ # --------------初始化一些数据-------------- #
+ print(msg)
+ backMsg, uid = None, ""
+ status_id = "0000"
+ funcDiary = FuncDiary("certificatePhoto")
+ # noinspection PyBroadException
+ try:
+ (w, h, name), (download_path, upload_path_hd, upload_path_common), image_name, backMsg, uid = self.checkKey(
+ msg)
+ # ----------------数据获取完毕-------------- #
+ # 开始处理
+ print("start...")
+ # start = time.time()
+ resp = self.downloadFile_COS(download_path, if_read=False) # 下载图片
+ image_byte = resp['Body'].get_raw_stream().read() # 读取二进制图片
+ # 将二进制图片转为cv2格式, 无损格式
+ image_pre = self.byte_cv2(image_byte, flags=cv2.IMREAD_COLOR)
+ # cv2.imwrite(f"test_image/cloud_img.{img_format}", image_pre)
+ # np_arr = np.frombuffer(image_byte, np.uint8)
+ # image = cv2.imdecode(np_arr, -1)
+ # 数据图片下载完毕,开始功能处理
+ print("processing...")
+ # 证件照制作
+ # 返回的w和h与输入的w和h不是一回事
+ backMsg["typography_arr"], backMsg["typography_rotate"], \
+ backMsg["relative_x"], backMsg["relative_y"], \
+ backMsg["w_create"], backMsg["h_create"], \
+ backMsg["id_temp_info"] = self.process(image_pre=image_pre,
+ oss_image_name=image_name,
+ w=w,
+ h=h,
+ upload_path_hd=upload_path_hd,
+ upload_path_common=upload_path_common)
+ except IDError as e:
+ # ------------处理失败, 错误类型有两种--------------- #
+ # 一是人像错误,这时候用户上传了一张无人像(太糊)或者两个以上人像的照片
+ # 此时face_num = 0或者2, back_msg["status"] is True
+ # 此外为未知错误,此时face_num 不存在于back_msg
+ # back_msg["status"] is False
+ # ----------------------------------------------- #
+ # print(type(e), e.err)
+ status_id = e.status_id
+ if e.face_num != -1:
+ backMsg["face_num"] = e.face_num
+ backMsg = self.createMsg(status=1, msg=backMsg) # back_msg["status"] is True
+ else:
+ # 抠图失败
+ backMsg = self.createMsg(status=-1, msg=backMsg)
+ print("fail!")
+ except cv2.error:
+ status_id = "1103"
+ backMsg = self.createMsg(status=-1, msg=backMsg)
+ print("fail!")
+ except Exception as e:
+ status_id = "1500"
+ print("[ERROR] ", e)
+ backMsg["problem"] = str(e)
+ backMsg = self.createMsg(status=-1, msg=backMsg)
+ print("fail!")
+ else:
+ # 无错误
+ backMsg = self.createMsg(status=1, msg=backMsg)
+ # 处理成功,在回传消息中添加成功对应消息
+ backMsg["face_num"] = 1 # 人脸个数,处理成功的话必然是1
+ print("success!")
+ finally:
+ # print(back_msg) # 打印回传数据,方便调试
+ self.sendMsg(backMsg, uid)
+ # ------------------投递日志------------------- #
+ funcDiary.content = backMsg
+ funcDiary.uploadDiary_COS(status_id=status_id, uid=uid[0])
+ # ------------------投递结束------------------- #
+ assert status_id == "0000", f"函数出现异常: {status_id}"
+
+
+def load_sess(idPhotoCreateService: IdPhotoCreateService):
+ while True:
+ yield idPhotoCreateService.human_sess
diff --git a/imageTransform.py b/imageTransform.py
new file mode 100644
index 0000000000000000000000000000000000000000..1a6fae0a79c4984965b0dfd366f52912b4d637df
--- /dev/null
+++ b/imageTransform.py
@@ -0,0 +1,218 @@
+import numpy as np
+import cv2
+import functools
+import time
+from hivisionai.hycv.matting_tools import read_modnet_image
+
+
+def calTime(mark):
+ if isinstance(mark, str):
+ def decorater(func):
+ @functools.wraps(func)
+ def wrapper(*args, **kw):
+ start_time = time.time()
+ return_param = func(*args, **kw)
+ print("[Mark-{}] {} 函数花费的时间为 {:.2f}.".format(mark, func.__name__, time.time() - start_time))
+ return return_param
+
+ return wrapper
+
+ return decorater
+ else:
+ func = mark
+
+ @functools.wraps(func)
+ def wrapper(*args, **kw):
+ start_time = time.time()
+ return_param = func(*args, **kw)
+ print("{} 函数花费的时间为 {:.2f}.".format(func.__name__, time.time() - start_time))
+ return return_param
+
+ return wrapper
+
+
+def standard_photo_resize(input_image: np.array, size):
+ """
+ input_image: 输入图像,即高清照
+ size: 标准照的尺寸
+ """
+ resize_ratio = input_image.shape[0] / size[0]
+ resize_item = int(round(input_image.shape[0] / size[0]))
+ if resize_ratio >= 2:
+ for i in range(resize_item - 1):
+ if i == 0:
+ result_image = cv2.resize(input_image,
+ (size[1] * (resize_item - i - 1), size[0] * (resize_item - i - 1)),
+ interpolation=cv2.INTER_AREA)
+ else:
+ result_image = cv2.resize(result_image,
+ (size[1] * (resize_item - i - 1), size[0] * (resize_item - i - 1)),
+ interpolation=cv2.INTER_AREA)
+ else:
+ result_image = cv2.resize(input_image, (size[1], size[0]), interpolation=cv2.INTER_AREA)
+
+ return result_image
+
+
+def hollowOutFix(src: np.ndarray) -> np.ndarray:
+ b, g, r, a = cv2.split(src)
+ src_bgr = cv2.merge((b, g, r))
+ # -----------padding---------- #
+ add_area = np.zeros((10, a.shape[1]), np.uint8)
+ a = np.vstack((add_area, a, add_area))
+ add_area = np.zeros((a.shape[0], 10), np.uint8)
+ a = np.hstack((add_area, a, add_area))
+ # -------------end------------ #
+ _, a_threshold = cv2.threshold(a, 127, 255, 0)
+ a_erode = cv2.erode(a_threshold, kernel=cv2.getStructuringElement(cv2.MORPH_RECT, (5, 5)), iterations=3)
+ contours, hierarchy = cv2.findContours(a_erode, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_NONE)
+ contours = [x for x in contours]
+ # contours = np.squeeze(contours)
+ contours.sort(key=lambda c: cv2.contourArea(c), reverse=True)
+ a_contour = cv2.drawContours(np.zeros(a.shape, np.uint8), contours[0], -1, 255, 2)
+ # a_base = a_contour[1:-1, 1:-1]
+ h, w = a.shape[:2]
+ mask = np.zeros([h + 2, w + 2], np.uint8) # mask必须行和列都加2,且必须为uint8单通道阵列
+ cv2.floodFill(a_contour, mask=mask, seedPoint=(0, 0), newVal=255)
+ a = cv2.add(a, 255 - a_contour)
+ return cv2.merge((src_bgr, a[10:-10, 10:-10]))
+
+
+def resize_image_by_min(input_image, esp=600):
+ """
+ 将图像缩放为最短边至少为600的图像。
+ :param input_image: 输入图像(OpenCV矩阵)
+ :param esp: 缩放后的最短边长
+ :return: 缩放后的图像,缩放倍率
+ """
+ height, width = input_image.shape[0], input_image.shape[1]
+ min_border = min(height, width)
+ if min_border < esp:
+ if height >= width:
+ new_width = esp
+ new_height = height * esp // width
+ else:
+ new_height = esp
+ new_width = width * esp // height
+
+ return cv2.resize(input_image, (new_width, new_height), interpolation=cv2.INTER_AREA), new_height / height
+
+ else:
+ return input_image, 1
+
+
+def rotate_bound(image, angle):
+ """
+ 一个旋转函数,输入一张图片和一个旋转角,可以实现不损失图像信息的旋转。
+ """
+ # grab the dimensions of the image and then determine the
+ # center
+ (h, w) = image.shape[:2]
+ (cX, cY) = (w / 2, h / 2)
+
+ # grab the rotation matrix (applying the negative of the
+ # angle to rotate clockwise), then grab the sine and cosine
+ # (i.e., the rotation components of the matrix)
+ M = cv2.getRotationMatrix2D((cX, cY), -angle, 1.0)
+ cos = np.abs(M[0, 0])
+ sin = np.abs(M[0, 1])
+
+ # compute the new bounding dimensions of the image
+ nW = int((h * sin) + (w * cos))
+ nH = int((h * cos) + (w * sin))
+
+ # adjust the rotation matrix to take into account translation
+ M[0, 2] += (nW / 2) - cX
+ M[1, 2] += (nH / 2) - cY
+
+ # perform the actual rotation and return the image
+ return cv2.warpAffine(image, M, (nW, nH)), cos, sin
+
+
+def rotate_bound_4channels(image, a, angle):
+ """
+ 一个旋转函数,输入一张图片和一个旋转角,可以实现不损失图像信息的旋转。
+ """
+ input_image, cos, sin = rotate_bound(image, angle)
+ new_a, _, _ = rotate_bound(a, angle) # 对做matte旋转,以便之后merge
+ b, g, r = cv2.split(input_image)
+ result_image = cv2.merge((b, g, r, new_a)) # 得到抠图结果图的无损旋转结果
+
+ # perform the actual rotation and return the image
+ return input_image, result_image, cos, sin
+
+
+def draw_picture_dots(image, dots, pen_size=10, pen_color=(0, 0, 255)):
+ """
+ 给一张照片上绘制点。
+ image: Opencv图像矩阵
+ dots: 一堆点,形如[(100,100),(150,100)]
+ pen_size: 画笔的大小
+ pen_color: 画笔的颜色
+ """
+ if isinstance(dots, dict):
+ dots = [v for u, v in dots.items()]
+ image = image.copy()
+ dots = list(dots)
+ for dot in dots:
+ # print("dot: ", dot)
+ x = dot[0]
+ y = dot[1]
+ cv2.circle(image, (int(x), int(y)), pen_size, pen_color, -1)
+ return image
+
+
+def get_modnet_matting(input_image, sess, ref_size=512):
+ """
+ 使用modnet模型对图像进行抠图处理。
+ :param input_image: 输入图像(opencv矩阵)
+ :param sess: onnxruntime推理主体
+ :param ref_size: 缩放参数
+ :return: 抠图后的图像
+ """
+ input_name = sess.get_inputs()[0].name
+ output_name = sess.get_outputs()[0].name
+
+ im, width, length = read_modnet_image(input_image=input_image, ref_size=ref_size)
+
+ matte = sess.run([output_name], {input_name: im})
+ matte = (matte[0] * 255).astype('uint8')
+ matte = np.squeeze(matte)
+ mask = cv2.resize(matte, (width, length), interpolation=cv2.INTER_AREA)
+ b, g, r = cv2.split(np.uint8(input_image))
+
+ output_image = cv2.merge((b, g, r, mask))
+
+ return output_image
+
+
+def detect_distance(value, crop_heigh, max=0.06, min=0.04):
+ """
+ 检测人头顶与照片顶部的距离是否在适当范围内。
+ 输入:与顶部的差值
+ 输出:(status, move_value)
+ status=0 不动
+ status=1 人脸应向上移动(裁剪框向下移动)
+ status-2 人脸应向下移动(裁剪框向上移动)
+ ---------------------------------------
+ value:头顶与照片顶部的距离
+ crop_heigh: 裁剪框的高度
+ max: 距离的最大值
+ min: 距离的最小值
+ ---------------------------------------
+ """
+ value = value / crop_heigh # 头顶往上的像素占图像的比例
+ if min <= value <= max:
+ return 0, 0
+ elif value > max:
+ # 头顶往上的像素比例高于max
+ move_value = value - max
+ move_value = int(move_value * crop_heigh)
+ # print("上移{}".format(move_value))
+ return 1, move_value
+ else:
+ # 头顶往上的像素比例低于min
+ move_value = min - value
+ move_value = int(move_value * crop_heigh)
+ # print("下移{}".format(move_value))
+ return -1, move_value
diff --git a/images/test.jpg b/images/test.jpg
new file mode 100644
index 0000000000000000000000000000000000000000..bb7127a64108e5cc8a0062586e255c62c9e5e934
Binary files /dev/null and b/images/test.jpg differ
diff --git a/images/test2.jpg b/images/test2.jpg
new file mode 100644
index 0000000000000000000000000000000000000000..298a9c0803331ce8c9e884002062d5807057e90d
Binary files /dev/null and b/images/test2.jpg differ
diff --git a/images/test3.jpg b/images/test3.jpg
new file mode 100644
index 0000000000000000000000000000000000000000..e48b1684f556573cfa2d65b385482e9ecfd35c5e
Binary files /dev/null and b/images/test3.jpg differ
diff --git a/images/test4.jpg b/images/test4.jpg
new file mode 100644
index 0000000000000000000000000000000000000000..845c8fb4de33a414f597660926428249d0b73b15
Binary files /dev/null and b/images/test4.jpg differ
diff --git a/layoutCreate.py b/layoutCreate.py
new file mode 100644
index 0000000000000000000000000000000000000000..0c8ea41cffb3f9d9aed8bb29cea270ee25a373eb
--- /dev/null
+++ b/layoutCreate.py
@@ -0,0 +1,113 @@
+import cv2.detail
+import numpy as np
+
+def judge_layout(input_width, input_height, PHOTO_INTERVAL_W, PHOTO_INTERVAL_H, LIMIT_BLOCK_W, LIMIT_BLOCK_H):
+ centerBlockHeight_1, centerBlockWidth_1 = input_height, input_width # 由证件照们组成的一个中心区块(1代表不转置排列)
+ centerBlockHeight_2, centerBlockWidth_2 = input_width, input_height # 由证件照们组成的一个中心区块(2代表转置排列)
+
+ # 1.不转置排列的情况下:
+ layout_col_no_transpose = 0 # 行
+ layout_row_no_transpose = 0 # 列
+ for i in range(1, 4):
+ centerBlockHeight_temp = input_height * i + PHOTO_INTERVAL_H * (i-1)
+ if centerBlockHeight_temp < LIMIT_BLOCK_H:
+ centerBlockHeight_1 = centerBlockHeight_temp
+ layout_row_no_transpose = i
+ else:
+ break
+ for j in range(1, 9):
+ centerBlockWidth_temp = input_width * j + PHOTO_INTERVAL_W * (j-1)
+ if centerBlockWidth_temp < LIMIT_BLOCK_W:
+ centerBlockWidth_1 = centerBlockWidth_temp
+ layout_col_no_transpose = j
+ else:
+ break
+ layout_number_no_transpose = layout_row_no_transpose*layout_col_no_transpose
+
+ # 2.转置排列的情况下:
+ layout_col_transpose = 0 # 行
+ layout_row_transpose = 0 # 列
+ for i in range(1, 4):
+ centerBlockHeight_temp = input_width * i + PHOTO_INTERVAL_H * (i-1)
+ if centerBlockHeight_temp < LIMIT_BLOCK_H:
+ centerBlockHeight_2 = centerBlockHeight_temp
+ layout_row_transpose = i
+ else:
+ break
+ for j in range(1, 9):
+ centerBlockWidth_temp = input_height * j + PHOTO_INTERVAL_W * (j-1)
+ if centerBlockWidth_temp < LIMIT_BLOCK_W:
+ centerBlockWidth_2 = centerBlockWidth_temp
+ layout_col_transpose = j
+ else:
+ break
+ layout_number_transpose = layout_row_transpose*layout_col_transpose
+
+ if layout_number_transpose > layout_number_no_transpose:
+ layout_mode = (layout_col_transpose, layout_row_transpose, 2)
+ return layout_mode, centerBlockWidth_2, centerBlockHeight_2
+ else:
+ layout_mode = (layout_col_no_transpose, layout_row_no_transpose, 1)
+ return layout_mode, centerBlockWidth_1, centerBlockHeight_1
+
+
+def generate_layout_photo(input_height, input_width):
+ # 1.基础参数表
+ LAYOUT_WIDTH = 1746
+ LAYOUT_HEIGHT = 1180
+ PHOTO_INTERVAL_H = 30 # 证件照与证件照之间的垂直距离
+ PHOTO_INTERVAL_W = 30 # 证件照与证件照之间的水平距离
+ SIDES_INTERVAL_H = 50 # 证件照与画布边缘的垂直距离
+ SIDES_INTERVAL_W = 70 # 证件照与画布边缘的水平距离
+ LIMIT_BLOCK_W = LAYOUT_WIDTH - 2*SIDES_INTERVAL_W
+ LIMIT_BLOCK_H = LAYOUT_HEIGHT - 2*SIDES_INTERVAL_H
+
+ # 2.创建一个1180x1746的空白画布
+ white_background = np.zeros([LAYOUT_HEIGHT, LAYOUT_WIDTH, 3], np.uint8)
+ white_background.fill(255)
+
+ # 3.计算照片的layout(列、行、横竖朝向),证件照组成的中心区块的分辨率
+ layout_mode, centerBlockWidth, centerBlockHeight = judge_layout(input_width, input_height, PHOTO_INTERVAL_W,
+ PHOTO_INTERVAL_H, LIMIT_BLOCK_W, LIMIT_BLOCK_H)
+ # 4.开始排列组合
+ x11 = (LAYOUT_WIDTH - centerBlockWidth)//2
+ y11 = (LAYOUT_HEIGHT - centerBlockHeight)//2
+ typography_arr = []
+ typography_rotate = False
+ if layout_mode[2] == 2:
+ input_height, input_width = input_width, input_height
+ typography_rotate = True
+
+ for j in range(layout_mode[1]):
+ for i in range(layout_mode[0]):
+ xi = x11 + i*input_width + i*PHOTO_INTERVAL_W
+ yi = y11 + j*input_height + j*PHOTO_INTERVAL_H
+ typography_arr.append([xi, yi])
+
+ return typography_arr, typography_rotate
+
+def generate_layout_image(input_image, typography_arr, typography_rotate, width=295, height=413):
+ LAYOUT_WIDTH = 1746
+ LAYOUT_HEIGHT = 1180
+ white_background = np.zeros([LAYOUT_HEIGHT, LAYOUT_WIDTH, 3], np.uint8)
+ white_background.fill(255)
+ if input_image.shape[0] != height:
+ input_image = cv2.resize(input_image, (width, height))
+ if typography_rotate:
+ input_image = cv2.transpose(input_image)
+ height, width = width, height
+ for arr in typography_arr:
+ locate_x, locate_y = arr[0], arr[1]
+ white_background[locate_y:locate_y+height, locate_x:locate_x+width] = input_image
+
+ return white_background
+
+
+if __name__ == "__main__":
+ typography_arr, typography_rotate = generate_layout_photo(input_height=413, input_width=295)
+ print("typography_arr:", typography_arr)
+ print("typography_rotate:", typography_rotate)
+ result_image = generate_layout_image(cv2.imread("./32.jpg"), typography_arr, typography_rotate, width=295, height=413)
+ cv2.imwrite("./result_image.jpg", result_image)
+
+
diff --git a/move_image.py b/move_image.py
new file mode 100644
index 0000000000000000000000000000000000000000..9db807d7c65283080d9efbd7a32f3ad0c7eb8cbc
--- /dev/null
+++ b/move_image.py
@@ -0,0 +1,134 @@
+"""
+有一些png图像下部也会有一些透明的区域,使得图像无法对其底部边框
+本程序实现移动图像,使其下部与png图像实际大小相对齐
+"""
+import os
+import cv2
+import numpy as np
+from hivisionai.hycv.utils import get_box_pro
+
+path_pre = os.path.join(os.getcwd(), 'pre')
+path_final = os.path.join(os.getcwd(), 'final')
+
+
+def merge(boxes):
+ """
+ 生成的边框可能不止只有一个,需要将边框合并
+ """
+ x, y, h, w = boxes[0]
+ # x和y应该是整个boxes里面最小的值
+ if len(boxes) > 1:
+ for tmp in boxes:
+ x_tmp, y_tmp, h_tmp, w_tmp = tmp
+ if x > x_tmp:
+ x_max = x_tmp + w_tmp if x_tmp + w_tmp > x + w else x + w
+ x = x_tmp
+ w = x_max - x
+ if y > y_tmp:
+ y_max = y_tmp + h_tmp if y_tmp + h_tmp > y + h else y + h
+ y = y_tmp
+ h = y_max - y
+ return tuple((x, y, h, w))
+
+
+def get_box(png_img):
+ """
+ 获取矩形边框最终返回一个元组(x,y,h,w),分别对应矩形左上角的坐标和矩形的高和宽
+ """
+ r, g, b , a = cv2.split(png_img)
+ gray_img = a
+ th, binary = cv2.threshold(gray_img, 127 , 255, cv2.THRESH_BINARY) # 二值化
+ # cv2.imshow("name", binary)
+ # cv2.waitKey(0)
+ contours, hierarchy = cv2.findContours(binary, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE) # 得到轮廓列表contours
+ bounding_boxes = merge([cv2.boundingRect(cnt) for cnt in contours]) # 轮廓合并
+ # print(bounding_boxes)
+ return bounding_boxes
+
+
+def get_box_2(png_img):
+ """
+ 不用opencv内置算法生成矩形了,改用自己的算法(for循环)
+ """
+ _, _, _, a = cv2.split(png_img)
+ _, a = cv2.threshold(a, 127, 255, cv2.THRESH_BINARY)
+ # 将r,g,b通道丢弃,只留下透明度通道
+ # cv2.imshow("name", a)
+ # cv2.waitKey(0)
+ # 在透明度矩阵中,0代表完全透明
+ height,width=a.shape # 高和宽
+ f=0
+ tmp1 = 0
+
+ """
+ 获取上下
+ """
+ for tmp1 in range(0,height):
+ tmp_a_high= a[tmp1:tmp1+1,:][0]
+ for tmp2 in range(width):
+ # a = tmp_a_low[tmp2]
+ if tmp_a_high[tmp2]!=0:
+ f=1
+ if f == 1:
+ break
+ delta_y_high = tmp1 + 1
+ f = 0
+ for tmp1 in range(height,-1, -1):
+ tmp_a_low= a[tmp1-1:tmp1+1,:][0]
+ for tmp2 in range(width):
+ # a = tmp_a_low[tmp2]
+ if tmp_a_low[tmp2]!=0:
+ f=1
+ if f == 1:
+ break
+ delta_y_bottom = height - tmp1 + 3
+ """
+ 获取左右
+ """
+ f = 0
+ for tmp1 in range(width):
+ tmp_a_left = a[:, tmp1:tmp1+1]
+ for tmp2 in range(height):
+ if tmp_a_left[tmp2] != 0:
+ f = 1
+ if f==1:
+ break
+ delta_x_left = tmp1 + 1
+ f = 0
+ for tmp1 in range(width, -1, -1):
+ tmp_a_left = a[:, tmp1-1:tmp1]
+ for tmp2 in range(height):
+ if tmp_a_left[tmp2] != 0:
+ f = 1
+ if f==1:
+ break
+ delta_x_right = width - tmp1 + 1
+ return delta_y_high, delta_y_bottom, delta_x_left, delta_x_right
+
+
+def move(input_image):
+ """
+ 裁剪主函数,输入一张png图像,该图像周围是透明的
+ """
+ png_img = input_image # 获取图像
+
+ height, width, channels = png_img.shape # 高y、宽x
+ y_low,y_high, _, _ = get_box_pro(png_img, model=2) # for循环
+ base = np.zeros((y_high, width, channels),dtype=np.uint8) # for循环
+ png_img = png_img[0:height - y_high, :, :] # for循环
+ png_img = np.concatenate((base, png_img), axis=0)
+ return png_img, y_high
+
+
+def main():
+ if not os.path.exists(path_pre):
+ os.makedirs(path_pre)
+ if not os.path.exists(path_final):
+ os.makedirs(path_final)
+ for name in os.listdir(path_pre):
+ pass
+ # move(name)
+
+
+if __name__ == "__main__":
+ main()
diff --git a/requirements.txt b/requirements.txt
new file mode 100644
index 0000000000000000000000000000000000000000..a6b53bd37a97cf771be048673d5730354b345c28
--- /dev/null
+++ b/requirements.txt
@@ -0,0 +1,4 @@
+opencv-python
+onnx
+onnxruntime
+numpy
\ No newline at end of file
diff --git a/sources/.gitkeep b/sources/.gitkeep
new file mode 100644
index 0000000000000000000000000000000000000000..0519ecba6ea913e21689ec692e81e9e4973fbf73
--- /dev/null
+++ b/sources/.gitkeep
@@ -0,0 +1 @@
+
\ No newline at end of file
diff --git a/sources/demoImage.png b/sources/demoImage.png
new file mode 100644
index 0000000000000000000000000000000000000000..8c99538d5e6d8509d60786aeae2aa99e03abb37b
--- /dev/null
+++ b/sources/demoImage.png
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:18aba5dc15d286156cd06c3c977f3e029e94e8fd48afb9b53264048cc2a4d429
+size 1621741
diff --git a/sources/gradio-image.jpeg b/sources/gradio-image.jpeg
new file mode 100644
index 0000000000000000000000000000000000000000..347ea3fd5429d1407e08c1f2b830cf8615bc6757
Binary files /dev/null and b/sources/gradio-image.jpeg differ
diff --git a/testParam.json b/testParam.json
new file mode 100644
index 0000000000000000000000000000000000000000..a1486cc17852d40247a2092f443f8b6f9e09bc37
--- /dev/null
+++ b/testParam.json
@@ -0,0 +1 @@
+{"payTime": {"face_number_and_angle_detection": "1.61", "aliyun_matting": "2.73", "MTCNN": "0.23", "IDphoto_cutting": "0.03", "Total": "4.64"}, "running": {"matting_model": "API", "status_top": 0, "move_value": 0, "cutting_mode": "01", "x1": 80, "y1": -69, "x2": 1354, "y2": 1714}}
\ No newline at end of file