Spaces:
Running
Running
TheEeeeLin
commited on
Commit
•
7173af9
1
Parent(s):
b2cb163
whitening
Browse files- demo/locals.py +16 -0
- demo/processor.py +19 -8
- demo/ui.py +36 -2
- demo/utils.py +0 -17
- hivision/creator/__init__.py +35 -4
- hivision/creator/context.py +30 -1
- hivision/creator/retinaface/inference.py +34 -5
- hivision/plugin/beauty/__init__.py +1 -0
- hivision/plugin/beauty/beauty_tools.py +49 -0
- hivision/plugin/beauty/grind_skin.py +44 -0
- hivision/plugin/beauty/lut/lut_origin.png +0 -0
- hivision/plugin/beauty/thin_face.py +304 -0
- hivision/plugin/beauty/whitening.py +83 -0
- hivision/utils.py +26 -7
demo/locals.py
CHANGED
@@ -292,4 +292,20 @@ LOCALES = {
|
|
292 |
"label": "抠图图像",
|
293 |
},
|
294 |
},
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
295 |
}
|
|
|
292 |
"label": "抠图图像",
|
293 |
},
|
294 |
},
|
295 |
+
"beauty_tab": {
|
296 |
+
"en": {
|
297 |
+
"label": "Beauty",
|
298 |
+
},
|
299 |
+
"zh": {
|
300 |
+
"label": "美颜",
|
301 |
+
},
|
302 |
+
},
|
303 |
+
"whitening_strength": {
|
304 |
+
"en": {
|
305 |
+
"label": "whitening strength",
|
306 |
+
},
|
307 |
+
"zh": {
|
308 |
+
"label": "美白强度",
|
309 |
+
},
|
310 |
+
},
|
311 |
}
|
demo/processor.py
CHANGED
@@ -1,13 +1,13 @@
|
|
1 |
import numpy as np
|
2 |
from hivision import IDCreator
|
3 |
from hivision.error import FaceError, APIError
|
4 |
-
from hivision.utils import add_background, resize_image_to_kb
|
5 |
from hivision.creator.layout_calculator import (
|
6 |
generate_layout_photo,
|
7 |
generate_layout_image,
|
8 |
)
|
9 |
from hivision.creator.choose_handler import choose_handler
|
10 |
-
from demo.utils import
|
11 |
import gradio as gr
|
12 |
import os
|
13 |
import time
|
@@ -41,7 +41,7 @@ class IDPhotoProcessor:
|
|
41 |
face_detect_option,
|
42 |
head_measure_ratio=0.2,
|
43 |
top_distance_max=0.12,
|
44 |
-
|
45 |
):
|
46 |
top_distance_min = top_distance_max - 0.02
|
47 |
|
@@ -70,6 +70,8 @@ class IDPhotoProcessor:
|
|
70 |
return [
|
71 |
gr.update(value=None), # img_output_standard
|
72 |
gr.update(value=None), # img_output_standard_hd
|
|
|
|
|
73 |
None, # img_output_layout (assuming it should be None or not updated)
|
74 |
gr.update( # notification
|
75 |
value=LOCALES["size_mode"][language]["custom_size_eror"],
|
@@ -113,6 +115,7 @@ class IDPhotoProcessor:
|
|
113 |
idphoto_json["size_mode"] in LOCALES["size_mode"][language]["choices"][1]
|
114 |
)
|
115 |
|
|
|
116 |
try:
|
117 |
result = creator(
|
118 |
input_image,
|
@@ -120,11 +123,15 @@ class IDPhotoProcessor:
|
|
120 |
size=idphoto_json["size"],
|
121 |
head_measure_ratio=head_measure_ratio,
|
122 |
head_top_range=(top_distance_max, top_distance_min),
|
|
|
123 |
)
|
|
|
124 |
except FaceError:
|
125 |
return [
|
126 |
gr.update(value=None), # img_output_standard
|
127 |
gr.update(value=None), # img_output_standard_hd
|
|
|
|
|
128 |
gr.update(visible=False), # img_output_layout
|
129 |
gr.update( # notification
|
130 |
value=LOCALES["notification"][language]["face_error"],
|
@@ -132,11 +139,12 @@ class IDPhotoProcessor:
|
|
132 |
),
|
133 |
None, # file_download (assuming it should be None or have no update)
|
134 |
]
|
135 |
-
|
136 |
except APIError as e:
|
137 |
return [
|
138 |
gr.update(value=None), # img_output_standard
|
139 |
gr.update(value=None), # img_output_standard_hd
|
|
|
140 |
gr.update(value=None), # img_output_standard_hd_png
|
141 |
gr.update(visible=False), # img_output_layout
|
142 |
gr.update( # notification
|
@@ -145,13 +153,14 @@ class IDPhotoProcessor:
|
|
145 |
),
|
146 |
None, # file_download (assuming it should be None or have no update)
|
147 |
]
|
148 |
-
|
149 |
else:
|
150 |
-
(result_image_standard, result_image_hd, _, _) = result
|
151 |
|
152 |
result_image_standard_png = np.uint8(result_image_standard)
|
153 |
result_image_hd_png = np.uint8(result_image_hd)
|
154 |
|
|
|
155 |
if (
|
156 |
idphoto_json["render_mode"]
|
157 |
== LOCALES["render_mode"][language]["choices"][0]
|
@@ -162,6 +171,7 @@ class IDPhotoProcessor:
|
|
162 |
result_image_hd = np.uint8(
|
163 |
add_background(result_image_hd, bgr=idphoto_json["color_bgr"])
|
164 |
)
|
|
|
165 |
elif (
|
166 |
idphoto_json["render_mode"]
|
167 |
== LOCALES["render_mode"][language]["choices"][1]
|
@@ -180,6 +190,7 @@ class IDPhotoProcessor:
|
|
180 |
mode="updown_gradient",
|
181 |
)
|
182 |
)
|
|
|
183 |
else:
|
184 |
result_image_standard = np.uint8(
|
185 |
add_background(
|
@@ -277,7 +288,7 @@ class IDPhotoProcessor:
|
|
277 |
return [
|
278 |
result_image_standard, # img_output_standard
|
279 |
result_image_hd, # img_output_standard_hd
|
280 |
-
result_image_standard_png, #
|
281 |
result_image_hd_png, # img_output_standard_hd_png
|
282 |
result_layout_image, # img_output_layout
|
283 |
gr.update(visible=False), # notification
|
@@ -287,7 +298,7 @@ class IDPhotoProcessor:
|
|
287 |
return [
|
288 |
result_image_standard, # img_output_standard
|
289 |
result_image_hd, # img_output_standard_hd
|
290 |
-
result_image_standard_png, #
|
291 |
result_image_hd_png, # img_output_standard_hd_png
|
292 |
result_layout_image, # img_output_layout
|
293 |
gr.update(visible=False), # notification
|
|
|
1 |
import numpy as np
|
2 |
from hivision import IDCreator
|
3 |
from hivision.error import FaceError, APIError
|
4 |
+
from hivision.utils import add_background, resize_image_to_kb, add_watermark
|
5 |
from hivision.creator.layout_calculator import (
|
6 |
generate_layout_photo,
|
7 |
generate_layout_image,
|
8 |
)
|
9 |
from hivision.creator.choose_handler import choose_handler
|
10 |
+
from demo.utils import range_check
|
11 |
import gradio as gr
|
12 |
import os
|
13 |
import time
|
|
|
41 |
face_detect_option,
|
42 |
head_measure_ratio=0.2,
|
43 |
top_distance_max=0.12,
|
44 |
+
whitening_strength=0,
|
45 |
):
|
46 |
top_distance_min = top_distance_max - 0.02
|
47 |
|
|
|
70 |
return [
|
71 |
gr.update(value=None), # img_output_standard
|
72 |
gr.update(value=None), # img_output_standard_hd
|
73 |
+
gr.update(value=None), # img_output_standard_png
|
74 |
+
gr.update(value=None), # img_output_standard_hd_png
|
75 |
None, # img_output_layout (assuming it should be None or not updated)
|
76 |
gr.update( # notification
|
77 |
value=LOCALES["size_mode"][language]["custom_size_eror"],
|
|
|
115 |
idphoto_json["size_mode"] in LOCALES["size_mode"][language]["choices"][1]
|
116 |
)
|
117 |
|
118 |
+
# 生成证件照
|
119 |
try:
|
120 |
result = creator(
|
121 |
input_image,
|
|
|
123 |
size=idphoto_json["size"],
|
124 |
head_measure_ratio=head_measure_ratio,
|
125 |
head_top_range=(top_distance_max, top_distance_min),
|
126 |
+
whitening_strength=whitening_strength,
|
127 |
)
|
128 |
+
# 如果检测到人脸数量不等于1
|
129 |
except FaceError:
|
130 |
return [
|
131 |
gr.update(value=None), # img_output_standard
|
132 |
gr.update(value=None), # img_output_standard_hd
|
133 |
+
gr.update(value=None), # img_output_standard_png
|
134 |
+
gr.update(value=None), # img_output_standard_hd_png
|
135 |
gr.update(visible=False), # img_output_layout
|
136 |
gr.update( # notification
|
137 |
value=LOCALES["notification"][language]["face_error"],
|
|
|
139 |
),
|
140 |
None, # file_download (assuming it should be None or have no update)
|
141 |
]
|
142 |
+
# 如果 API 错误
|
143 |
except APIError as e:
|
144 |
return [
|
145 |
gr.update(value=None), # img_output_standard
|
146 |
gr.update(value=None), # img_output_standard_hd
|
147 |
+
gr.update(value=None), # img_output_standard_png
|
148 |
gr.update(value=None), # img_output_standard_hd_png
|
149 |
gr.update(visible=False), # img_output_layout
|
150 |
gr.update( # notification
|
|
|
153 |
),
|
154 |
None, # file_download (assuming it should be None or have no update)
|
155 |
]
|
156 |
+
# 证件照生成正常
|
157 |
else:
|
158 |
+
(result_image_standard, result_image_hd, _, _, _, _) = result
|
159 |
|
160 |
result_image_standard_png = np.uint8(result_image_standard)
|
161 |
result_image_hd_png = np.uint8(result_image_hd)
|
162 |
|
163 |
+
# 纯色渲染
|
164 |
if (
|
165 |
idphoto_json["render_mode"]
|
166 |
== LOCALES["render_mode"][language]["choices"][0]
|
|
|
171 |
result_image_hd = np.uint8(
|
172 |
add_background(result_image_hd, bgr=idphoto_json["color_bgr"])
|
173 |
)
|
174 |
+
# 上下渐变渲染
|
175 |
elif (
|
176 |
idphoto_json["render_mode"]
|
177 |
== LOCALES["render_mode"][language]["choices"][1]
|
|
|
190 |
mode="updown_gradient",
|
191 |
)
|
192 |
)
|
193 |
+
# 中心渐变渲染
|
194 |
else:
|
195 |
result_image_standard = np.uint8(
|
196 |
add_background(
|
|
|
288 |
return [
|
289 |
result_image_standard, # img_output_standard
|
290 |
result_image_hd, # img_output_standard_hd
|
291 |
+
result_image_standard_png, # img_output_standard_png
|
292 |
result_image_hd_png, # img_output_standard_hd_png
|
293 |
result_layout_image, # img_output_layout
|
294 |
gr.update(visible=False), # notification
|
|
|
298 |
return [
|
299 |
result_image_standard, # img_output_standard
|
300 |
result_image_hd, # img_output_standard_hd
|
301 |
+
result_image_standard_png, # img_output_standard_png
|
302 |
result_image_hd_png, # img_output_standard_hd_png
|
303 |
result_layout_image, # img_output_layout
|
304 |
gr.update(visible=False), # notification
|
demo/ui.py
CHANGED
@@ -2,7 +2,12 @@ import gradio as gr
|
|
2 |
import os
|
3 |
import pathlib
|
4 |
from demo.locals import LOCALES
|
5 |
-
from
|
|
|
|
|
|
|
|
|
|
|
6 |
|
7 |
|
8 |
def load_description(fp):
|
@@ -12,7 +17,10 @@ def load_description(fp):
|
|
12 |
|
13 |
|
14 |
def create_ui(
|
15 |
-
processor
|
|
|
|
|
|
|
16 |
):
|
17 |
DEFAULT_LANG = "zh"
|
18 |
DEFAULT_HUMAN_MATTING_MODEL = "modnet_photographic_portrait_matting"
|
@@ -55,6 +63,7 @@ def create_ui(
|
|
55 |
value=human_matting_models[0],
|
56 |
)
|
57 |
|
|
|
58 |
with gr.Tab(
|
59 |
LOCALES["key_param"][DEFAULT_LANG]["label"]
|
60 |
) as key_parameter_tab:
|
@@ -97,6 +106,7 @@ def create_ui(
|
|
97 |
value=LOCALES["render_mode"][DEFAULT_LANG]["choices"][0],
|
98 |
)
|
99 |
|
|
|
100 |
with gr.Tab(
|
101 |
LOCALES["advance_param"][DEFAULT_LANG]["label"]
|
102 |
) as advance_parameter_tab:
|
@@ -132,6 +142,20 @@ def create_ui(
|
|
132 |
interactive=True,
|
133 |
)
|
134 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
135 |
with gr.Tab(
|
136 |
LOCALES["watermark_tab"][DEFAULT_LANG]["label"]
|
137 |
) as watermark_parameter_tab:
|
@@ -379,6 +403,12 @@ def create_ui(
|
|
379 |
matting_image_accordion: gr.update(
|
380 |
label=LOCALES["matting_image"][language]["label"]
|
381 |
),
|
|
|
|
|
|
|
|
|
|
|
|
|
382 |
}
|
383 |
|
384 |
def change_color(colors):
|
@@ -417,6 +447,7 @@ def create_ui(
|
|
417 |
return {custom_image_kb: gr.update(visible=False)}
|
418 |
|
419 |
# ---------------- 绑定事件 ----------------
|
|
|
420 |
language_options.input(
|
421 |
change_language,
|
422 |
inputs=[language_options],
|
@@ -450,6 +481,8 @@ def create_ui(
|
|
450 |
watermark_text_space,
|
451 |
watermark_options,
|
452 |
matting_image_accordion,
|
|
|
|
|
453 |
],
|
454 |
)
|
455 |
|
@@ -494,6 +527,7 @@ def create_ui(
|
|
494 |
face_detect_model_options,
|
495 |
head_measure_ratio_option,
|
496 |
top_distance_option,
|
|
|
497 |
],
|
498 |
outputs=[
|
499 |
img_output_standard,
|
|
|
2 |
import os
|
3 |
import pathlib
|
4 |
from demo.locals import LOCALES
|
5 |
+
from demo.processor import IDPhotoProcessor
|
6 |
+
|
7 |
+
"""
|
8 |
+
只裁切模式:
|
9 |
+
1. 如果重新上传了照片,然后点击按钮,第一次会调用不裁切的模式,第二次会调用裁切的模式
|
10 |
+
"""
|
11 |
|
12 |
|
13 |
def load_description(fp):
|
|
|
17 |
|
18 |
|
19 |
def create_ui(
|
20 |
+
processor: IDPhotoProcessor,
|
21 |
+
root_dir: str,
|
22 |
+
human_matting_models: list,
|
23 |
+
face_detect_models: list,
|
24 |
):
|
25 |
DEFAULT_LANG = "zh"
|
26 |
DEFAULT_HUMAN_MATTING_MODEL = "modnet_photographic_portrait_matting"
|
|
|
63 |
value=human_matting_models[0],
|
64 |
)
|
65 |
|
66 |
+
# TAB1 - 关键参数
|
67 |
with gr.Tab(
|
68 |
LOCALES["key_param"][DEFAULT_LANG]["label"]
|
69 |
) as key_parameter_tab:
|
|
|
106 |
value=LOCALES["render_mode"][DEFAULT_LANG]["choices"][0],
|
107 |
)
|
108 |
|
109 |
+
# TAB2 - 高级参数
|
110 |
with gr.Tab(
|
111 |
LOCALES["advance_param"][DEFAULT_LANG]["label"]
|
112 |
) as advance_parameter_tab:
|
|
|
142 |
interactive=True,
|
143 |
)
|
144 |
|
145 |
+
# TAB3 - 美颜
|
146 |
+
with gr.Tab(
|
147 |
+
LOCALES["beauty_tab"][DEFAULT_LANG]["label"]
|
148 |
+
) as beauty_parameter_tab:
|
149 |
+
whitening_option = gr.Slider(
|
150 |
+
label=LOCALES["whitening_strength"][DEFAULT_LANG]["label"],
|
151 |
+
minimum=0,
|
152 |
+
maximum=10,
|
153 |
+
value=2,
|
154 |
+
step=1,
|
155 |
+
interactive=True,
|
156 |
+
)
|
157 |
+
|
158 |
+
# TAB4 - 水印
|
159 |
with gr.Tab(
|
160 |
LOCALES["watermark_tab"][DEFAULT_LANG]["label"]
|
161 |
) as watermark_parameter_tab:
|
|
|
403 |
matting_image_accordion: gr.update(
|
404 |
label=LOCALES["matting_image"][language]["label"]
|
405 |
),
|
406 |
+
beauty_parameter_tab: gr.update(
|
407 |
+
label=LOCALES["beauty_tab"][language]["label"]
|
408 |
+
),
|
409 |
+
whitening_option: gr.update(
|
410 |
+
label=LOCALES["whitening_strength"][language]["label"]
|
411 |
+
),
|
412 |
}
|
413 |
|
414 |
def change_color(colors):
|
|
|
447 |
return {custom_image_kb: gr.update(visible=False)}
|
448 |
|
449 |
# ---------------- 绑定事件 ----------------
|
450 |
+
# 语言切换
|
451 |
language_options.input(
|
452 |
change_language,
|
453 |
inputs=[language_options],
|
|
|
481 |
watermark_text_space,
|
482 |
watermark_options,
|
483 |
matting_image_accordion,
|
484 |
+
beauty_parameter_tab,
|
485 |
+
whitening_option,
|
486 |
],
|
487 |
)
|
488 |
|
|
|
527 |
face_detect_model_options,
|
528 |
head_measure_ratio_option,
|
529 |
top_distance_option,
|
530 |
+
whitening_option,
|
531 |
],
|
532 |
outputs=[
|
533 |
img_output_standard,
|
demo/utils.py
CHANGED
@@ -42,20 +42,3 @@ def csv_to_color_list(csv_file: str) -> dict:
|
|
42 |
def range_check(value, min_value=0, max_value=255):
|
43 |
value = int(value)
|
44 |
return max(min_value, min(value, max_value))
|
45 |
-
|
46 |
-
|
47 |
-
def add_watermark(
|
48 |
-
image, text, size=50, opacity=0.5, angle=45, color="#8B8B1B", space=75
|
49 |
-
):
|
50 |
-
image = Image.fromarray(image)
|
51 |
-
watermarker = Watermarker(
|
52 |
-
input_image=image,
|
53 |
-
text=text,
|
54 |
-
style=WatermarkerStyles.STRIPED,
|
55 |
-
angle=angle,
|
56 |
-
color=color,
|
57 |
-
opacity=opacity,
|
58 |
-
size=size,
|
59 |
-
space=space,
|
60 |
-
)
|
61 |
-
return np.array(watermarker.image.convert("RGB"))
|
|
|
42 |
def range_check(value, min_value=0, max_value=255):
|
43 |
value = int(value)
|
44 |
return max(min_value, min(value, max_value))
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
hivision/creator/__init__.py
CHANGED
@@ -12,7 +12,8 @@ from typing import Tuple
|
|
12 |
import hivision.creator.utils as U
|
13 |
from .context import Context, ContextHandler, Params, Result
|
14 |
from .human_matting import extract_human
|
15 |
-
from .face_detector import detect_face_mtcnn
|
|
|
16 |
from .photo_adjuster import adjust_photo
|
17 |
|
18 |
|
@@ -51,18 +52,24 @@ class IDCreator:
|
|
51 |
image: np.ndarray,
|
52 |
size: Tuple[int, int] = (413, 295),
|
53 |
change_bg_only: bool = False,
|
|
|
54 |
head_measure_ratio: float = 0.2,
|
55 |
head_height_ratio: float = 0.45,
|
56 |
head_top_range: float = (0.12, 0.1),
|
|
|
|
|
57 |
) -> Result:
|
58 |
"""
|
59 |
证件照处理函数
|
60 |
:param image: 输入图像
|
61 |
-
:param change_bg_only:
|
|
|
62 |
:param size: 输出的图像大小(h,w)
|
63 |
:param head_measure_ratio: 人脸面积与全图面积的期望比值
|
64 |
:param head_height_ratio: 人脸中心处在全图高度的比例期望值
|
65 |
:param head_top_range: 头距离顶部的比例(max,min)
|
|
|
|
|
66 |
|
67 |
:return: 返回处理后的证件照和一系列参数
|
68 |
"""
|
@@ -73,6 +80,9 @@ class IDCreator:
|
|
73 |
head_measure_ratio=head_measure_ratio,
|
74 |
head_height_ratio=head_height_ratio,
|
75 |
head_top_range=head_top_range,
|
|
|
|
|
|
|
76 |
)
|
77 |
self.ctx = Context(params)
|
78 |
ctx = self.ctx
|
@@ -82,30 +92,51 @@ class IDCreator:
|
|
82 |
) # 将输入图片 resize 到最大边长为 2000
|
83 |
ctx.origin_image = ctx.processing_image.copy()
|
84 |
self.before_all and self.before_all(ctx)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
85 |
# 1. 人像抠图
|
86 |
-
|
87 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
88 |
if ctx.params.change_bg_only:
|
89 |
ctx.result = Result(
|
90 |
standard=ctx.matting_image,
|
91 |
hd=ctx.matting_image,
|
|
|
92 |
clothing_params=None,
|
93 |
typography_params=None,
|
|
|
94 |
)
|
95 |
self.after_all and self.after_all(ctx)
|
96 |
return ctx.result
|
|
|
97 |
# 2. 人脸检测
|
98 |
self.detection_handler(ctx)
|
99 |
self.after_detect and self.after_detect(ctx)
|
|
|
100 |
# 3. 图像调整
|
101 |
result_image_hd, result_image_standard, clothing_params, typography_params = (
|
102 |
adjust_photo(ctx)
|
103 |
)
|
|
|
|
|
104 |
ctx.result = Result(
|
105 |
standard=result_image_standard,
|
106 |
hd=result_image_hd,
|
|
|
107 |
clothing_params=clothing_params,
|
108 |
typography_params=typography_params,
|
|
|
109 |
)
|
110 |
self.after_all and self.after_all(ctx)
|
111 |
return ctx.result
|
|
|
12 |
import hivision.creator.utils as U
|
13 |
from .context import Context, ContextHandler, Params, Result
|
14 |
from .human_matting import extract_human
|
15 |
+
from .face_detector import detect_face_mtcnn
|
16 |
+
from hivision.plugin.beauty.whitening import make_whitening
|
17 |
from .photo_adjuster import adjust_photo
|
18 |
|
19 |
|
|
|
52 |
image: np.ndarray,
|
53 |
size: Tuple[int, int] = (413, 295),
|
54 |
change_bg_only: bool = False,
|
55 |
+
crop_only: bool = False,
|
56 |
head_measure_ratio: float = 0.2,
|
57 |
head_height_ratio: float = 0.45,
|
58 |
head_top_range: float = (0.12, 0.1),
|
59 |
+
face: Tuple[int, int, int, int] = None,
|
60 |
+
whitening_strength: int = 0,
|
61 |
) -> Result:
|
62 |
"""
|
63 |
证件照处理函数
|
64 |
:param image: 输入图像
|
65 |
+
:param change_bg_only: 是否只需要抠图
|
66 |
+
:param crop_only: 是否只需要裁剪
|
67 |
:param size: 输出的图像大小(h,w)
|
68 |
:param head_measure_ratio: 人脸面积与全图面积的期望比值
|
69 |
:param head_height_ratio: 人脸中心处在全图高度的比例期望值
|
70 |
:param head_top_range: 头距离顶部的比例(max,min)
|
71 |
+
:param face: 人脸坐标
|
72 |
+
:param whitening_strength: 美白强度
|
73 |
|
74 |
:return: 返回处理后的证件照和一系列参数
|
75 |
"""
|
|
|
80 |
head_measure_ratio=head_measure_ratio,
|
81 |
head_height_ratio=head_height_ratio,
|
82 |
head_top_range=head_top_range,
|
83 |
+
crop_only=crop_only,
|
84 |
+
face=face,
|
85 |
+
whitening_strength=whitening_strength,
|
86 |
)
|
87 |
self.ctx = Context(params)
|
88 |
ctx = self.ctx
|
|
|
92 |
) # 将输入图片 resize 到最大边长为 2000
|
93 |
ctx.origin_image = ctx.processing_image.copy()
|
94 |
self.before_all and self.before_all(ctx)
|
95 |
+
|
96 |
+
# 美白
|
97 |
+
if ctx.params.whitening_strength > 0:
|
98 |
+
ctx.processing_image = make_whitening(
|
99 |
+
ctx.processing_image, ctx.params.whitening_strength
|
100 |
+
)
|
101 |
+
|
102 |
# 1. 人像抠图
|
103 |
+
if not ctx.params.crop_only:
|
104 |
+
# 调用抠图工作流
|
105 |
+
self.matting_handler(ctx)
|
106 |
+
self.after_matting and self.after_matting(ctx)
|
107 |
+
else:
|
108 |
+
ctx.matting_image = ctx.processing_image
|
109 |
+
|
110 |
+
# 如果仅换底,则直接返回抠图结果
|
111 |
if ctx.params.change_bg_only:
|
112 |
ctx.result = Result(
|
113 |
standard=ctx.matting_image,
|
114 |
hd=ctx.matting_image,
|
115 |
+
matting=ctx.matting_image,
|
116 |
clothing_params=None,
|
117 |
typography_params=None,
|
118 |
+
face=None,
|
119 |
)
|
120 |
self.after_all and self.after_all(ctx)
|
121 |
return ctx.result
|
122 |
+
|
123 |
# 2. 人脸检测
|
124 |
self.detection_handler(ctx)
|
125 |
self.after_detect and self.after_detect(ctx)
|
126 |
+
|
127 |
# 3. 图像调整
|
128 |
result_image_hd, result_image_standard, clothing_params, typography_params = (
|
129 |
adjust_photo(ctx)
|
130 |
)
|
131 |
+
|
132 |
+
# 4. 返回结果
|
133 |
ctx.result = Result(
|
134 |
standard=result_image_standard,
|
135 |
hd=result_image_hd,
|
136 |
+
matting=ctx.matting_image,
|
137 |
clothing_params=clothing_params,
|
138 |
typography_params=typography_params,
|
139 |
+
face=ctx.face,
|
140 |
)
|
141 |
self.after_all and self.after_all(ctx)
|
142 |
return ctx.result
|
hivision/creator/context.py
CHANGED
@@ -16,15 +16,21 @@ class Params:
|
|
16 |
self,
|
17 |
size: Tuple[int, int] = (413, 295),
|
18 |
change_bg_only: bool = False,
|
|
|
19 |
head_measure_ratio: float = 0.2,
|
20 |
head_height_ratio: float = 0.45,
|
21 |
head_top_range: float = (0.12, 0.1),
|
|
|
|
|
22 |
):
|
23 |
self.__size = size
|
24 |
self.__change_bg_only = change_bg_only
|
|
|
25 |
self.__head_measure_ratio = head_measure_ratio
|
26 |
self.__head_height_ratio = head_height_ratio
|
27 |
self.__head_top_range = head_top_range
|
|
|
|
|
28 |
|
29 |
@property
|
30 |
def size(self):
|
@@ -46,17 +52,32 @@ class Params:
|
|
46 |
def head_top_range(self):
|
47 |
return self.__head_top_range
|
48 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
49 |
|
50 |
class Result:
|
51 |
def __init__(
|
52 |
self,
|
53 |
standard: np.ndarray,
|
54 |
hd: np.ndarray,
|
|
|
55 |
clothing_params: Optional[dict],
|
56 |
typography_params: Optional[dict],
|
|
|
57 |
):
|
58 |
self.standard = standard
|
59 |
self.hd = hd
|
|
|
60 |
self.clothing_params = clothing_params
|
61 |
"""
|
62 |
服装参数,仅换底时为 None
|
@@ -65,10 +86,18 @@ class Result:
|
|
65 |
"""
|
66 |
排版参数,仅换底时为 None
|
67 |
"""
|
|
|
68 |
|
69 |
def __iter__(self):
|
70 |
return iter(
|
71 |
-
[
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
72 |
)
|
73 |
|
74 |
|
|
|
16 |
self,
|
17 |
size: Tuple[int, int] = (413, 295),
|
18 |
change_bg_only: bool = False,
|
19 |
+
crop_only: bool = False,
|
20 |
head_measure_ratio: float = 0.2,
|
21 |
head_height_ratio: float = 0.45,
|
22 |
head_top_range: float = (0.12, 0.1),
|
23 |
+
face: Tuple[int, int, int, int] = None,
|
24 |
+
whitening_strength: int = 0,
|
25 |
):
|
26 |
self.__size = size
|
27 |
self.__change_bg_only = change_bg_only
|
28 |
+
self.__crop_only = crop_only
|
29 |
self.__head_measure_ratio = head_measure_ratio
|
30 |
self.__head_height_ratio = head_height_ratio
|
31 |
self.__head_top_range = head_top_range
|
32 |
+
self.__face = face
|
33 |
+
self.__whitening_strength = whitening_strength
|
34 |
|
35 |
@property
|
36 |
def size(self):
|
|
|
52 |
def head_top_range(self):
|
53 |
return self.__head_top_range
|
54 |
|
55 |
+
@property
|
56 |
+
def crop_only(self):
|
57 |
+
return self.__crop_only
|
58 |
+
|
59 |
+
@property
|
60 |
+
def face(self):
|
61 |
+
return self.__face
|
62 |
+
|
63 |
+
@property
|
64 |
+
def whitening_strength(self):
|
65 |
+
return self.__whitening_strength
|
66 |
+
|
67 |
|
68 |
class Result:
|
69 |
def __init__(
|
70 |
self,
|
71 |
standard: np.ndarray,
|
72 |
hd: np.ndarray,
|
73 |
+
matting: np.ndarray,
|
74 |
clothing_params: Optional[dict],
|
75 |
typography_params: Optional[dict],
|
76 |
+
face: Optional[Tuple[int, int, int, int, float]],
|
77 |
):
|
78 |
self.standard = standard
|
79 |
self.hd = hd
|
80 |
+
self.matting = matting
|
81 |
self.clothing_params = clothing_params
|
82 |
"""
|
83 |
服装参数,仅换底时为 None
|
|
|
86 |
"""
|
87 |
排版参数,仅换底时为 None
|
88 |
"""
|
89 |
+
self.face = face
|
90 |
|
91 |
def __iter__(self):
|
92 |
return iter(
|
93 |
+
[
|
94 |
+
self.standard,
|
95 |
+
self.hd,
|
96 |
+
self.matting,
|
97 |
+
self.clothing_params,
|
98 |
+
self.typography_params,
|
99 |
+
self.face,
|
100 |
+
]
|
101 |
)
|
102 |
|
103 |
|
hivision/creator/retinaface/inference.py
CHANGED
@@ -1,6 +1,6 @@
|
|
1 |
import numpy as np
|
2 |
import cv2
|
3 |
-
import onnxruntime
|
4 |
from hivision.creator.retinaface.box_utils import decode, decode_landm
|
5 |
from hivision.creator.retinaface.prior_box import PriorBox
|
6 |
|
@@ -46,10 +46,39 @@ keep_top_k = 750
|
|
46 |
save_image = True
|
47 |
vis_thres = 0.6
|
48 |
|
|
|
|
|
|
|
|
|
|
|
49 |
|
50 |
-
|
51 |
-
|
52 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
53 |
|
54 |
|
55 |
def retinaface_detect_faces(image, model_path: str, sess=None):
|
@@ -75,7 +104,7 @@ def retinaface_detect_faces(image, model_path: str, sess=None):
|
|
75 |
|
76 |
# Load ONNX model
|
77 |
if sess is None:
|
78 |
-
retinaface =
|
79 |
else:
|
80 |
retinaface = sess
|
81 |
|
|
|
1 |
import numpy as np
|
2 |
import cv2
|
3 |
+
import onnxruntime
|
4 |
from hivision.creator.retinaface.box_utils import decode, decode_landm
|
5 |
from hivision.creator.retinaface.prior_box import PriorBox
|
6 |
|
|
|
46 |
save_image = True
|
47 |
vis_thres = 0.6
|
48 |
|
49 |
+
ONNX_DEVICE = (
|
50 |
+
"CUDAExecutionProvider"
|
51 |
+
if onnxruntime.get_device() == "GPU"
|
52 |
+
else "CPUExecutionProvider"
|
53 |
+
)
|
54 |
|
55 |
+
|
56 |
+
def load_onnx_model(checkpoint_path, set_cpu=False):
|
57 |
+
providers = (
|
58 |
+
["CUDAExecutionProvider", "CPUExecutionProvider"]
|
59 |
+
if ONNX_DEVICE == "CUDAExecutionProvider"
|
60 |
+
else ["CPUExecutionProvider"]
|
61 |
+
)
|
62 |
+
|
63 |
+
if set_cpu:
|
64 |
+
sess = onnxruntime.InferenceSession(
|
65 |
+
checkpoint_path, providers=["CPUExecutionProvider"]
|
66 |
+
)
|
67 |
+
else:
|
68 |
+
try:
|
69 |
+
sess = onnxruntime.InferenceSession(checkpoint_path, providers=providers)
|
70 |
+
except Exception as e:
|
71 |
+
if ONNX_DEVICE == "CUDAExecutionProvider":
|
72 |
+
print(f"Failed to load model with CUDAExecutionProvider: {e}")
|
73 |
+
print("Falling back to CPUExecutionProvider")
|
74 |
+
# 尝试使用CPU加载模型
|
75 |
+
sess = onnxruntime.InferenceSession(
|
76 |
+
checkpoint_path, providers=["CPUExecutionProvider"]
|
77 |
+
)
|
78 |
+
else:
|
79 |
+
raise e # 如果是CPU执行失败,重新抛出异常
|
80 |
+
|
81 |
+
return sess
|
82 |
|
83 |
|
84 |
def retinaface_detect_faces(image, model_path: str, sess=None):
|
|
|
104 |
|
105 |
# Load ONNX model
|
106 |
if sess is None:
|
107 |
+
retinaface = load_onnx_model(model_path, set_cpu=False)
|
108 |
else:
|
109 |
retinaface = sess
|
110 |
|
hivision/plugin/beauty/__init__.py
ADDED
@@ -0,0 +1 @@
|
|
|
|
|
1 |
+
from .beauty_tools import BeautyTools
|
hivision/plugin/beauty/beauty_tools.py
ADDED
@@ -0,0 +1,49 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
"""
|
2 |
+
@author: cuny
|
3 |
+
@file: MakeBeautiful.py
|
4 |
+
@time: 2022/7/7 20:23
|
5 |
+
@description:
|
6 |
+
美颜工具集合文件,作为暴露在外的插件接口
|
7 |
+
"""
|
8 |
+
|
9 |
+
from .grind_skin import grindSkin
|
10 |
+
from .whitening import MakeWhiter
|
11 |
+
from .thin_face import thinFace
|
12 |
+
import numpy as np
|
13 |
+
|
14 |
+
|
15 |
+
def BeautyTools(
|
16 |
+
input_image: np.ndarray,
|
17 |
+
landmark,
|
18 |
+
thinStrength: int,
|
19 |
+
thinPlace: int,
|
20 |
+
grindStrength: int,
|
21 |
+
whiterStrength: int,
|
22 |
+
) -> np.ndarray:
|
23 |
+
"""
|
24 |
+
美颜工具的接口函数,用于实现美颜效果
|
25 |
+
Args:
|
26 |
+
input_image: 输入的图像
|
27 |
+
landmark: 瘦脸需要的人脸关键点信息,为fd68返回的第二个参数
|
28 |
+
thinStrength: 瘦脸强度,为0-10(如果更高其实也没什么问题),当强度为0或者更低时,则不瘦脸
|
29 |
+
thinPlace: 选择瘦脸区域,为0-2之间的值,越大瘦脸的点越靠下
|
30 |
+
grindStrength: 磨皮强度,为0-10(如果更高其实也没什么问题),当强度为0或者更低时,则不磨皮
|
31 |
+
whiterStrength: 美白强度,为0-10(如果更高其实也没什么问题),当强度为0或者更低时,则不美白
|
32 |
+
Returns:
|
33 |
+
output_image 输出图像
|
34 |
+
"""
|
35 |
+
try:
|
36 |
+
_, _, _ = input_image.shape
|
37 |
+
except ValueError:
|
38 |
+
raise TypeError("输入图像必须为3通道或者4通道!")
|
39 |
+
# 三通道或者四通道图像
|
40 |
+
# 首先进行瘦脸
|
41 |
+
input_image = thinFace(
|
42 |
+
input_image, landmark, place=thinPlace, strength=thinStrength
|
43 |
+
)
|
44 |
+
# 其次进行磨皮
|
45 |
+
input_image = grindSkin(src=input_image, strength=grindStrength)
|
46 |
+
# 最后进行美白
|
47 |
+
makeWhiter = MakeWhiter()
|
48 |
+
input_image = makeWhiter.run(input_image, strength=whiterStrength)
|
49 |
+
return input_image
|
hivision/plugin/beauty/grind_skin.py
ADDED
@@ -0,0 +1,44 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
"""
|
2 |
+
@author: cuny
|
3 |
+
@file: GrindSkin.py
|
4 |
+
@time: 2022/7/2 14:44
|
5 |
+
@description:
|
6 |
+
磨皮算法
|
7 |
+
"""
|
8 |
+
|
9 |
+
import cv2
|
10 |
+
import numpy as np
|
11 |
+
|
12 |
+
|
13 |
+
def grindSkin(src, grindDegree: int = 3, detailDegree: int = 1, strength: int = 9):
|
14 |
+
"""
|
15 |
+
Dest =(Src * (100 - Opacity) + (Src + 2 * GaussBlur(EPFFilter(Src) - Src)) * Opacity) /100
|
16 |
+
人像磨皮方案,后续会考虑使用一些皮肤区域检测算法来实现仅皮肤区域磨皮,增加算法的精细程度——或者使用人脸关键点
|
17 |
+
https://www.cnblogs.com/Imageshop/p/4709710.html
|
18 |
+
Args:
|
19 |
+
src: 原图
|
20 |
+
grindDegree: 磨皮程度调节参数
|
21 |
+
detailDegree: 细节程度调节参数
|
22 |
+
strength: 融合程度,作为磨皮强度(0 - 10)
|
23 |
+
|
24 |
+
Returns:
|
25 |
+
磨皮后的图像
|
26 |
+
"""
|
27 |
+
if strength <= 0:
|
28 |
+
return src
|
29 |
+
dst = src.copy()
|
30 |
+
opacity = min(10.0, strength) / 10.0
|
31 |
+
dx = grindDegree * 5 # 双边滤波参数之一
|
32 |
+
fc = grindDegree * 12.5 # 双边滤波参数之一
|
33 |
+
temp1 = cv2.bilateralFilter(src[:, :, :3], dx, fc, fc)
|
34 |
+
temp2 = cv2.subtract(temp1, src[:, :, :3])
|
35 |
+
temp3 = cv2.GaussianBlur(temp2, (2 * detailDegree - 1, 2 * detailDegree - 1), 0)
|
36 |
+
temp4 = cv2.add(cv2.add(temp3, temp3), src[:, :, :3])
|
37 |
+
dst[:, :, :3] = cv2.addWeighted(temp4, opacity, src[:, :, :3], 1 - opacity, 0.0)
|
38 |
+
return dst
|
39 |
+
|
40 |
+
|
41 |
+
if __name__ == "__main__":
|
42 |
+
input_image = cv2.imread("test_image/7.jpg")
|
43 |
+
output_image = grindSkin(src=input_image)
|
44 |
+
cv2.imwrite("grindSkinCompare.png", np.hstack((input_image, output_image)))
|
hivision/plugin/beauty/lut/lut_origin.png
ADDED
hivision/plugin/beauty/thin_face.py
ADDED
@@ -0,0 +1,304 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
"""
|
2 |
+
@author: cuny
|
3 |
+
@file: ThinFace.py
|
4 |
+
@time: 2022/7/2 15:50
|
5 |
+
@description:
|
6 |
+
瘦脸算法,用到了图像局部平移法
|
7 |
+
先使用人脸关键点检测,然后再使用图像局部平移法
|
8 |
+
需要注意的是,这部分不会包含dlib人脸关键点检测,因为考虑到模型载入的问题
|
9 |
+
"""
|
10 |
+
|
11 |
+
import cv2
|
12 |
+
import math
|
13 |
+
import numpy as np
|
14 |
+
|
15 |
+
|
16 |
+
class TranslationWarp(object):
|
17 |
+
"""
|
18 |
+
本类包含瘦脸算法,由于瘦脸算法包含了很多个版本,所以以类的方式呈现
|
19 |
+
前两个算法没什么好讲的,网上资料很多
|
20 |
+
第三个采用numpy内部的自定义函数处理,在处理速度上有一些提升
|
21 |
+
最后采用cv2.map算法,处理速度大幅度提升
|
22 |
+
"""
|
23 |
+
|
24 |
+
# 瘦脸
|
25 |
+
@staticmethod
|
26 |
+
def localTranslationWarp(srcImg, startX, startY, endX, endY, radius):
|
27 |
+
# 双线性插值法
|
28 |
+
def BilinearInsert(src, ux, uy):
|
29 |
+
w, h, c = src.shape
|
30 |
+
if c == 3:
|
31 |
+
x1 = int(ux)
|
32 |
+
x2 = x1 + 1
|
33 |
+
y1 = int(uy)
|
34 |
+
y2 = y1 + 1
|
35 |
+
part1 = (
|
36 |
+
src[y1, x1].astype(np.float64) * (float(x2) - ux) * (float(y2) - uy)
|
37 |
+
)
|
38 |
+
part2 = (
|
39 |
+
src[y1, x2].astype(np.float64) * (ux - float(x1)) * (float(y2) - uy)
|
40 |
+
)
|
41 |
+
part3 = (
|
42 |
+
src[y2, x1].astype(np.float64) * (float(x2) - ux) * (uy - float(y1))
|
43 |
+
)
|
44 |
+
part4 = (
|
45 |
+
src[y2, x2].astype(np.float64) * (ux - float(x1)) * (uy - float(y1))
|
46 |
+
)
|
47 |
+
insertValue = part1 + part2 + part3 + part4
|
48 |
+
return insertValue.astype(np.int8)
|
49 |
+
|
50 |
+
ddradius = float(radius * radius) # 圆的半径
|
51 |
+
copyImg = srcImg.copy() # copy后的图像矩阵
|
52 |
+
# 计算公式中的|m-c|^2
|
53 |
+
ddmc = (endX - startX) * (endX - startX) + (endY - startY) * (endY - startY)
|
54 |
+
H, W, C = srcImg.shape # 获取图像的形状
|
55 |
+
for i in range(W):
|
56 |
+
for j in range(H):
|
57 |
+
# # 计算该点是否在形变圆的范围之内
|
58 |
+
# # 优化,第一步,直接判断是会在(startX,startY)的矩阵框中
|
59 |
+
if math.fabs(i - startX) > radius and math.fabs(j - startY) > radius:
|
60 |
+
continue
|
61 |
+
distance = (i - startX) * (i - startX) + (j - startY) * (j - startY)
|
62 |
+
if distance < ddradius:
|
63 |
+
# 计算出(i,j)坐标的原坐标
|
64 |
+
# 计算公式中右边平方号里的部分
|
65 |
+
ratio = (ddradius - distance) / (ddradius - distance + ddmc)
|
66 |
+
ratio = ratio * ratio
|
67 |
+
# 映射原位置
|
68 |
+
UX = i - ratio * (endX - startX)
|
69 |
+
UY = j - ratio * (endY - startY)
|
70 |
+
|
71 |
+
# 根据双线性插值法得到UX,UY的值
|
72 |
+
# start_ = time.time()
|
73 |
+
value = BilinearInsert(srcImg, UX, UY)
|
74 |
+
# print(f"双线性插值耗时;{time.time() - start_}")
|
75 |
+
# 改变当前 i ,j的值
|
76 |
+
copyImg[j, i] = value
|
77 |
+
return copyImg
|
78 |
+
|
79 |
+
# 瘦脸pro1, 限制了for循环的遍历次数
|
80 |
+
@staticmethod
|
81 |
+
def localTranslationWarpLimitFor(
|
82 |
+
srcImg, startP: np.matrix, endP: np.matrix, radius: float
|
83 |
+
):
|
84 |
+
startX, startY = startP[0, 0], startP[0, 1]
|
85 |
+
endX, endY = endP[0, 0], endP[0, 1]
|
86 |
+
|
87 |
+
# 双线性插值法
|
88 |
+
def BilinearInsert(src, ux, uy):
|
89 |
+
w, h, c = src.shape
|
90 |
+
if c == 3:
|
91 |
+
x1 = int(ux)
|
92 |
+
x2 = x1 + 1
|
93 |
+
y1 = int(uy)
|
94 |
+
y2 = y1 + 1
|
95 |
+
part1 = (
|
96 |
+
src[y1, x1].astype(np.float64) * (float(x2) - ux) * (float(y2) - uy)
|
97 |
+
)
|
98 |
+
part2 = (
|
99 |
+
src[y1, x2].astype(np.float64) * (ux - float(x1)) * (float(y2) - uy)
|
100 |
+
)
|
101 |
+
part3 = (
|
102 |
+
src[y2, x1].astype(np.float64) * (float(x2) - ux) * (uy - float(y1))
|
103 |
+
)
|
104 |
+
part4 = (
|
105 |
+
src[y2, x2].astype(np.float64) * (ux - float(x1)) * (uy - float(y1))
|
106 |
+
)
|
107 |
+
insertValue = part1 + part2 + part3 + part4
|
108 |
+
return insertValue.astype(np.int8)
|
109 |
+
|
110 |
+
ddradius = float(radius * radius) # 圆的半径
|
111 |
+
copyImg = srcImg.copy() # copy后的图像矩阵
|
112 |
+
# 计算公式中的|m-c|^2
|
113 |
+
ddmc = (endX - startX) ** 2 + (endY - startY) ** 2
|
114 |
+
# 计算正方形的左上角起始点
|
115 |
+
startTX, startTY = (
|
116 |
+
startX - math.floor(radius + 1),
|
117 |
+
startY - math.floor((radius + 1)),
|
118 |
+
)
|
119 |
+
# 计算正方形的右下角的结束点
|
120 |
+
endTX, endTY = (
|
121 |
+
startX + math.floor(radius + 1),
|
122 |
+
startY + math.floor((radius + 1)),
|
123 |
+
)
|
124 |
+
# 剪切srcImg
|
125 |
+
srcImg = srcImg[startTY : endTY + 1, startTX : endTX + 1, :]
|
126 |
+
# db.cv_show(srcImg)
|
127 |
+
# 裁剪后的图像相当于在x,y都减少了startX - math.floor(radius + 1)
|
128 |
+
# 原本的endX, endY在切后的坐标点
|
129 |
+
endX, endY = (
|
130 |
+
endX - startX + math.floor(radius + 1),
|
131 |
+
endY - startY + math.floor(radius + 1),
|
132 |
+
)
|
133 |
+
# 原本的startX, startY剪切后的坐标点
|
134 |
+
startX, startY = (math.floor(radius + 1), math.floor(radius + 1))
|
135 |
+
H, W, C = srcImg.shape # 获取图像的形状
|
136 |
+
for i in range(W):
|
137 |
+
for j in range(H):
|
138 |
+
# 计算该点是否在形变圆的范围之内
|
139 |
+
# 优化,第一步,直接判断是会在(startX,startY)的矩阵框中
|
140 |
+
# if math.fabs(i - startX) > radius and math.fabs(j - startY) > radius:
|
141 |
+
# continue
|
142 |
+
distance = (i - startX) * (i - startX) + (j - startY) * (j - startY)
|
143 |
+
if distance < ddradius:
|
144 |
+
# 计算出(i,j)坐标的原坐标
|
145 |
+
# 计算公式中右边平方号里的部分
|
146 |
+
ratio = (ddradius - distance) / (ddradius - distance + ddmc)
|
147 |
+
ratio = ratio * ratio
|
148 |
+
# 映射原位置
|
149 |
+
UX = i - ratio * (endX - startX)
|
150 |
+
UY = j - ratio * (endY - startY)
|
151 |
+
|
152 |
+
# 根据双线性插值法得到UX,UY的值
|
153 |
+
# start_ = time.time()
|
154 |
+
value = BilinearInsert(srcImg, UX, UY)
|
155 |
+
# print(f"双线性插值耗时;{time.time() - start_}")
|
156 |
+
# 改变当前 i ,j的值
|
157 |
+
copyImg[j + startTY, i + startTX] = value
|
158 |
+
return copyImg
|
159 |
+
|
160 |
+
# # 瘦脸pro2,采用了numpy自定义函数做处理
|
161 |
+
# def localTranslationWarpNumpy(self, srcImg, startP: np.matrix, endP: np.matrix, radius: float):
|
162 |
+
# startX , startY = startP[0, 0], startP[0, 1]
|
163 |
+
# endX, endY = endP[0, 0], endP[0, 1]
|
164 |
+
# ddradius = float(radius * radius) # 圆的半径
|
165 |
+
# copyImg = srcImg.copy() # copy后的图像矩阵
|
166 |
+
# # 计算公式中的|m-c|^2
|
167 |
+
# ddmc = (endX - startX)**2 + (endY - startY)**2
|
168 |
+
# # 计算正方形的左上角起始点
|
169 |
+
# startTX, startTY = (startX - math.floor(radius + 1), startY - math.floor((radius + 1)))
|
170 |
+
# # 计算正方形的右下角的结束点
|
171 |
+
# endTX, endTY = (startX + math.floor(radius + 1), startY + math.floor((radius + 1)))
|
172 |
+
# # 剪切srcImg
|
173 |
+
# self.thinImage = srcImg[startTY : endTY + 1, startTX : endTX + 1, :]
|
174 |
+
# # s = self.thinImage
|
175 |
+
# # db.cv_show(srcImg)
|
176 |
+
# # 裁剪后的图像相当于在x,y都减少了startX - math.floor(radius + 1)
|
177 |
+
# # 原本的endX, endY在切后的坐标点
|
178 |
+
# endX, endY = (endX - startX + math.floor(radius + 1), endY - startY + math.floor(radius + 1))
|
179 |
+
# # 原本的startX, startY剪切后的坐标点
|
180 |
+
# startX ,startY = (math.floor(radius + 1), math.floor(radius + 1))
|
181 |
+
# H, W, C = self.thinImage.shape # 获取图像的形状
|
182 |
+
# index_m = np.arange(H * W).reshape((H, W))
|
183 |
+
# triangle_ufunc = np.frompyfunc(self.process, 9, 3)
|
184 |
+
# # start_ = time.time()
|
185 |
+
# finalImgB, finalImgG, finalImgR = triangle_ufunc(index_m, self, W, ddradius, ddmc, startX, startY, endX, endY)
|
186 |
+
# finaleImg = np.dstack((finalImgB, finalImgG, finalImgR)).astype(np.uint8)
|
187 |
+
# finaleImg = np.fliplr(np.rot90(finaleImg, -1))
|
188 |
+
# copyImg[startTY: endTY + 1, startTX: endTX + 1, :] = finaleImg
|
189 |
+
# # print(f"图像处理耗时;{time.time() - start_}")
|
190 |
+
# # db.cv_show(copyImg)
|
191 |
+
# return copyImg
|
192 |
+
|
193 |
+
# 瘦脸pro3,采用opencv内置函数
|
194 |
+
@staticmethod
|
195 |
+
def localTranslationWarpFastWithStrength(
|
196 |
+
srcImg, startP: np.matrix, endP: np.matrix, radius, strength: float = 100.0
|
197 |
+
):
|
198 |
+
"""
|
199 |
+
采用opencv内置函数
|
200 |
+
Args:
|
201 |
+
srcImg: 源图像
|
202 |
+
startP: 起点位置
|
203 |
+
endP: 终点位置
|
204 |
+
radius: 处理半径
|
205 |
+
strength: 瘦脸强度,一般取100以上
|
206 |
+
|
207 |
+
Returns:
|
208 |
+
|
209 |
+
"""
|
210 |
+
startX, startY = startP[0, 0], startP[0, 1]
|
211 |
+
endX, endY = endP[0, 0], endP[0, 1]
|
212 |
+
ddradius = float(radius * radius)
|
213 |
+
# copyImg = np.zeros(srcImg.shape, np.uint8)
|
214 |
+
# copyImg = srcImg.copy()
|
215 |
+
|
216 |
+
maskImg = np.zeros(srcImg.shape[:2], np.uint8)
|
217 |
+
cv2.circle(maskImg, (startX, startY), math.ceil(radius), (255, 255, 255), -1)
|
218 |
+
|
219 |
+
K0 = 100 / strength
|
220 |
+
|
221 |
+
# 计算公式中的|m-c|^2
|
222 |
+
ddmc_x = (endX - startX) * (endX - startX)
|
223 |
+
ddmc_y = (endY - startY) * (endY - startY)
|
224 |
+
H, W, C = srcImg.shape
|
225 |
+
|
226 |
+
mapX = np.vstack([np.arange(W).astype(np.float32).reshape(1, -1)] * H)
|
227 |
+
mapY = np.hstack([np.arange(H).astype(np.float32).reshape(-1, 1)] * W)
|
228 |
+
|
229 |
+
distance_x = (mapX - startX) * (mapX - startX)
|
230 |
+
distance_y = (mapY - startY) * (mapY - startY)
|
231 |
+
distance = distance_x + distance_y
|
232 |
+
K1 = np.sqrt(distance)
|
233 |
+
ratio_x = (ddradius - distance_x) / (ddradius - distance_x + K0 * ddmc_x)
|
234 |
+
ratio_y = (ddradius - distance_y) / (ddradius - distance_y + K0 * ddmc_y)
|
235 |
+
ratio_x = ratio_x * ratio_x
|
236 |
+
ratio_y = ratio_y * ratio_y
|
237 |
+
|
238 |
+
UX = mapX - ratio_x * (endX - startX) * (1 - K1 / radius)
|
239 |
+
UY = mapY - ratio_y * (endY - startY) * (1 - K1 / radius)
|
240 |
+
|
241 |
+
np.copyto(UX, mapX, where=maskImg == 0)
|
242 |
+
np.copyto(UY, mapY, where=maskImg == 0)
|
243 |
+
UX = UX.astype(np.float32)
|
244 |
+
UY = UY.astype(np.float32)
|
245 |
+
copyImg = cv2.remap(srcImg, UX, UY, interpolation=cv2.INTER_LINEAR)
|
246 |
+
return copyImg
|
247 |
+
|
248 |
+
|
249 |
+
def thinFace(src, landmark, place: int = 0, strength=30.0):
|
250 |
+
"""
|
251 |
+
瘦脸程序接口,输入人脸关键点信息和强度,即可实现瘦脸
|
252 |
+
注意处理四通道图像
|
253 |
+
Args:
|
254 |
+
src: 原图
|
255 |
+
landmark: 关键点信息
|
256 |
+
place: 选择瘦脸区域,为0-4之间的值
|
257 |
+
strength: 瘦脸强度,输入值在0-10之间,如果小于或者等于0,则不瘦脸
|
258 |
+
|
259 |
+
Returns:
|
260 |
+
瘦脸后的图像
|
261 |
+
"""
|
262 |
+
strength = min(100.0, strength * 10.0)
|
263 |
+
if strength <= 0.0:
|
264 |
+
return src
|
265 |
+
# 也可以设置瘦脸区域
|
266 |
+
place = max(0, min(4, int(place)))
|
267 |
+
left_landmark = landmark[4 + place]
|
268 |
+
left_landmark_down = landmark[6 + place]
|
269 |
+
right_landmark = landmark[13 + place]
|
270 |
+
right_landmark_down = landmark[15 + place]
|
271 |
+
endPt = landmark[58]
|
272 |
+
# 计算第4个点到第6个点的距离作为瘦脸距离
|
273 |
+
r_left = math.sqrt(
|
274 |
+
(left_landmark[0, 0] - left_landmark_down[0, 0]) ** 2
|
275 |
+
+ (left_landmark[0, 1] - left_landmark_down[0, 1]) ** 2
|
276 |
+
)
|
277 |
+
|
278 |
+
# 计算第14个点到第16个点的距离作为瘦脸距离
|
279 |
+
r_right = math.sqrt(
|
280 |
+
(right_landmark[0, 0] - right_landmark_down[0, 0]) ** 2
|
281 |
+
+ (right_landmark[0, 1] - right_landmark_down[0, 1]) ** 2
|
282 |
+
)
|
283 |
+
# 瘦左边脸
|
284 |
+
thin_image = TranslationWarp.localTranslationWarpFastWithStrength(
|
285 |
+
src, left_landmark[0], endPt[0], r_left, strength
|
286 |
+
)
|
287 |
+
# 瘦右边脸
|
288 |
+
thin_image = TranslationWarp.localTranslationWarpFastWithStrength(
|
289 |
+
thin_image, right_landmark[0], endPt[0], r_right, strength
|
290 |
+
)
|
291 |
+
return thin_image
|
292 |
+
|
293 |
+
|
294 |
+
# if __name__ == "__main__":
|
295 |
+
# import os
|
296 |
+
# from hycv.FaceDetection68.faceDetection68 import FaceDetection68
|
297 |
+
|
298 |
+
# local_file = os.path.dirname(__file__)
|
299 |
+
# PREDICTOR_PATH = f"{local_file}/weights/shape_predictor_68_face_landmarks.dat" # 关键点检测模型路径
|
300 |
+
# fd68 = FaceDetection68(model_path=PREDICTOR_PATH)
|
301 |
+
# input_image = cv2.imread("test_image/4.jpg", -1)
|
302 |
+
# _, landmark_, _ = fd68.facePoints(input_image)
|
303 |
+
# output_image = thinFace(input_image, landmark_, strength=30.2)
|
304 |
+
# cv2.imwrite("thinFaceCompare.png", np.hstack((input_image, output_image)))
|
hivision/plugin/beauty/whitening.py
ADDED
@@ -0,0 +1,83 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import cv2
|
2 |
+
import numpy as np
|
3 |
+
import os
|
4 |
+
import gradio as gr
|
5 |
+
|
6 |
+
|
7 |
+
class LutWhite:
|
8 |
+
CUBE64_ROWS = 8
|
9 |
+
CUBE64_SIZE = 64
|
10 |
+
CUBE256_SIZE = 256
|
11 |
+
CUBE_SCALE = CUBE256_SIZE // CUBE64_SIZE
|
12 |
+
|
13 |
+
def __init__(self, lut_image):
|
14 |
+
self.lut = self._create_lut(lut_image)
|
15 |
+
|
16 |
+
def _create_lut(self, lut_image):
|
17 |
+
reshape_lut = np.zeros(
|
18 |
+
(self.CUBE256_SIZE, self.CUBE256_SIZE, self.CUBE256_SIZE, 3), dtype=np.uint8
|
19 |
+
)
|
20 |
+
for i in range(self.CUBE64_SIZE):
|
21 |
+
tmp = i // self.CUBE64_ROWS
|
22 |
+
cx = (i % self.CUBE64_ROWS) * self.CUBE64_SIZE
|
23 |
+
cy = tmp * self.CUBE64_SIZE
|
24 |
+
cube64 = lut_image[cy : cy + self.CUBE64_SIZE, cx : cx + self.CUBE64_SIZE]
|
25 |
+
if cube64.size == 0:
|
26 |
+
continue
|
27 |
+
cube256 = cv2.resize(cube64, (self.CUBE256_SIZE, self.CUBE256_SIZE))
|
28 |
+
reshape_lut[i * self.CUBE_SCALE : (i + 1) * self.CUBE_SCALE] = cube256
|
29 |
+
return reshape_lut
|
30 |
+
|
31 |
+
def apply(self, src):
|
32 |
+
b, g, r = src[:, :, 0], src[:, :, 1], src[:, :, 2]
|
33 |
+
return self.lut[b, g, r]
|
34 |
+
|
35 |
+
|
36 |
+
class MakeWhiter:
|
37 |
+
def __init__(self, lut_image):
|
38 |
+
self.lut_white = LutWhite(lut_image)
|
39 |
+
|
40 |
+
def run(self, src: np.ndarray, strength: int) -> np.ndarray:
|
41 |
+
strength = np.clip(strength / 10.0, 0, 1)
|
42 |
+
if strength <= 0:
|
43 |
+
return src
|
44 |
+
img = self.lut_white.apply(src[:, :, :3])
|
45 |
+
return cv2.addWeighted(src[:, :, :3], 1 - strength, img, strength, 0)
|
46 |
+
|
47 |
+
|
48 |
+
base_dir = os.path.dirname(os.path.abspath(__file__))
|
49 |
+
default_lut = cv2.imread(os.path.join(base_dir, "lut/lut_origin.png"))
|
50 |
+
make_whiter = MakeWhiter(default_lut)
|
51 |
+
|
52 |
+
|
53 |
+
def make_whitening(image, strength):
|
54 |
+
image = cv2.cvtColor(np.array(image), cv2.COLOR_RGB2BGR)
|
55 |
+
output_image = make_whiter.run(image, strength)
|
56 |
+
return cv2.cvtColor(output_image, cv2.COLOR_BGR2RGB)
|
57 |
+
|
58 |
+
|
59 |
+
def make_whitening_png(image, strength):
|
60 |
+
image = cv2.cvtColor(np.array(image), cv2.COLOR_RGBA2BGRA)
|
61 |
+
|
62 |
+
b, g, r, a = cv2.split(image)
|
63 |
+
bgr_image = cv2.merge((b, g, r))
|
64 |
+
|
65 |
+
b_w, g_w, r_w = cv2.split(make_whiter.run(bgr_image, strength))
|
66 |
+
output_image = cv2.merge((b_w, g_w, r_w, a))
|
67 |
+
|
68 |
+
return cv2.cvtColor(output_image, cv2.COLOR_RGBA2BGRA)
|
69 |
+
|
70 |
+
|
71 |
+
# 启动Gradio应用
|
72 |
+
if __name__ == "__main__":
|
73 |
+
demo = gr.Interface(
|
74 |
+
fn=make_whitening_png,
|
75 |
+
inputs=[
|
76 |
+
gr.Image(type="pil", image_mode="RGBA", label="Input Image"),
|
77 |
+
gr.Slider(0, 10, step=1, label="Whitening Strength"),
|
78 |
+
],
|
79 |
+
outputs=gr.Image(type="pil"),
|
80 |
+
title="Image Whitening Demo",
|
81 |
+
description="Upload an image and adjust the whitening strength to see the effect.",
|
82 |
+
)
|
83 |
+
demo.launch()
|
hivision/utils.py
CHANGED
@@ -12,6 +12,7 @@ import io
|
|
12 |
import numpy as np
|
13 |
import cv2
|
14 |
import base64
|
|
|
15 |
|
16 |
|
17 |
def resize_image_to_kb(input_image, output_image_path, target_size_kb):
|
@@ -74,12 +75,6 @@ def resize_image_to_kb(input_image, output_image_path, target_size_kb):
|
|
74 |
quality = 1
|
75 |
|
76 |
|
77 |
-
import numpy as np
|
78 |
-
from PIL import Image
|
79 |
-
import io
|
80 |
-
import base64
|
81 |
-
|
82 |
-
|
83 |
def resize_image_to_kb_base64(input_image, target_size_kb, mode="exact"):
|
84 |
"""
|
85 |
Resize an image to a target size in KB and return it as a base64 encoded string.
|
@@ -153,13 +148,20 @@ def resize_image_to_kb_base64(input_image, target_size_kb, mode="exact"):
|
|
153 |
return img_base64
|
154 |
|
155 |
|
156 |
-
def numpy_2_base64(img: np.ndarray):
|
157 |
_, buffer = cv2.imencode(".png", img)
|
158 |
base64_image = base64.b64encode(buffer).decode("utf-8")
|
159 |
|
160 |
return base64_image
|
161 |
|
162 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
163 |
def save_numpy_image(numpy_img, file_path):
|
164 |
# 检查数组的形状
|
165 |
if numpy_img.shape[2] == 4:
|
@@ -273,3 +275,20 @@ def add_background(input_image, bgr=(0, 0, 0), mode="pure_color"):
|
|
273 |
)
|
274 |
|
275 |
return output
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
12 |
import numpy as np
|
13 |
import cv2
|
14 |
import base64
|
15 |
+
from hivision.plugin.watermark import Watermarker, WatermarkerStyles
|
16 |
|
17 |
|
18 |
def resize_image_to_kb(input_image, output_image_path, target_size_kb):
|
|
|
75 |
quality = 1
|
76 |
|
77 |
|
|
|
|
|
|
|
|
|
|
|
|
|
78 |
def resize_image_to_kb_base64(input_image, target_size_kb, mode="exact"):
|
79 |
"""
|
80 |
Resize an image to a target size in KB and return it as a base64 encoded string.
|
|
|
148 |
return img_base64
|
149 |
|
150 |
|
151 |
+
def numpy_2_base64(img: np.ndarray) -> str:
|
152 |
_, buffer = cv2.imencode(".png", img)
|
153 |
base64_image = base64.b64encode(buffer).decode("utf-8")
|
154 |
|
155 |
return base64_image
|
156 |
|
157 |
|
158 |
+
def base64_2_numpy(base64_image: str) -> np.ndarray:
|
159 |
+
img = base64.b64decode(base64_image)
|
160 |
+
img = np.frombuffer(img, np.uint8)
|
161 |
+
|
162 |
+
return img
|
163 |
+
|
164 |
+
|
165 |
def save_numpy_image(numpy_img, file_path):
|
166 |
# 检查数组的形状
|
167 |
if numpy_img.shape[2] == 4:
|
|
|
275 |
)
|
276 |
|
277 |
return output
|
278 |
+
|
279 |
+
|
280 |
+
def add_watermark(
|
281 |
+
image, text, size=50, opacity=0.5, angle=45, color="#8B8B1B", space=75
|
282 |
+
):
|
283 |
+
image = Image.fromarray(image)
|
284 |
+
watermarker = Watermarker(
|
285 |
+
input_image=image,
|
286 |
+
text=text,
|
287 |
+
style=WatermarkerStyles.STRIPED,
|
288 |
+
angle=angle,
|
289 |
+
color=color,
|
290 |
+
opacity=opacity,
|
291 |
+
size=size,
|
292 |
+
space=space,
|
293 |
+
)
|
294 |
+
return np.array(watermarker.image.convert("RGB"))
|