iniit
Browse filesThis view is limited to 50 files because it contains too many changes.
See raw diff
- .gitattributes +1 -0
- .gitignore +2 -0
- .gradio/cached_examples/25/Output/41d0a13d3c5d3c05accc/00003245_00.webp +0 -0
- .gradio/cached_examples/25/log.csv +2 -0
- app.py +232 -0
- close_eyes.py +120 -0
- close_lip.py +150 -0
- create_bottom_lip.py +212 -0
- create_chin_image.py +86 -0
- create_hole_image.py +149 -0
- create_no_mouth.py +97 -0
- create_top_lip.py +284 -0
- demo_footer.html +3 -0
- demo_header.html +16 -0
- demo_tools.html +10 -0
- draw_landmarks68.py +516 -0
- examples/00002062.jpg +0 -0
- examples/00002062.webp +0 -0
- examples/00003245_00.jpg +0 -0
- examples/00003245_00.webp +0 -0
- examples/00100265.jpg +0 -0
- examples/00100265.webp +0 -0
- examples/00824006.jpg +0 -0
- examples/00824006.webp +0 -0
- examples/00824008.jpg +0 -0
- examples/00824008.webp +0 -0
- examples/00825000.jpg +0 -0
- examples/00825000.webp +0 -0
- examples/00826007.jpg +0 -0
- examples/00826007.webp +0 -0
- examples/00827009.jpg +0 -0
- examples/00827009.webp +0 -0
- examples/00828003.jpg +0 -0
- examples/00828003.webp +0 -0
- face_landmarker.task +3 -0
- face_landmarker.task.txt +8 -0
- glibvision/common_utils.py +112 -0
- glibvision/cv2_utils.py +138 -0
- glibvision/glandmark_utils.py +48 -0
- glibvision/numpy_utils.py +110 -0
- glibvision/pil_utils.py +14 -0
- gradio_utils.py +60 -0
- hole_images/black.jpg +0 -0
- hole_images/dark01.jpg +0 -0
- hole_images/mid01.jpg +0 -0
- hole_images/mid02.jpg +0 -0
- landmarks68_utils.py +147 -0
- lip_utils.py +781 -0
- mp_box.py +133 -0
- mp_constants.py +320 -0
.gitattributes
CHANGED
@@ -33,3 +33,4 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
|
|
33 |
*.zip filter=lfs diff=lfs merge=lfs -text
|
34 |
*.zst filter=lfs diff=lfs merge=lfs -text
|
35 |
*tfevents* filter=lfs diff=lfs merge=lfs -text
|
|
|
|
33 |
*.zip filter=lfs diff=lfs merge=lfs -text
|
34 |
*.zst filter=lfs diff=lfs merge=lfs -text
|
35 |
*tfevents* filter=lfs diff=lfs merge=lfs -text
|
36 |
+
*.task filter=lfs diff=lfs merge=lfs -text
|
.gitignore
ADDED
@@ -0,0 +1,2 @@
|
|
|
|
|
|
|
1 |
+
__pycache__
|
2 |
+
files
|
.gradio/cached_examples/25/Output/41d0a13d3c5d3c05accc/00003245_00.webp
ADDED
.gradio/cached_examples/25/log.csv
ADDED
@@ -0,0 +1,2 @@
|
|
|
|
|
|
|
1 |
+
Output,timestamp
|
2 |
+
"[{""image"": {""path"": "".gradio\\cached_examples\\25\\Output\\41d0a13d3c5d3c05accc\\00003245_00.webp"", ""url"": ""/gradio_api/file=C:\\Users\\owner\\AppData\\Local\\Temp\\gradio\\931aec3e3a5351edb4bba6660e3848db2c5b49fd47c1f07afe9f05213b87363b\\00003245_00.webp"", ""size"": null, ""orig_name"": ""00003245_00.webp"", ""mime_type"": null, ""is_stream"": false, ""meta"": {""_type"": ""gradio.FileData""}}, ""caption"": ""animation""}]",2024-11-16 16:03:27.862651
|
app.py
ADDED
@@ -0,0 +1,232 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import spaces
|
2 |
+
import gradio as gr
|
3 |
+
import subprocess
|
4 |
+
from PIL import Image
|
5 |
+
import json
|
6 |
+
import os
|
7 |
+
import time
|
8 |
+
|
9 |
+
import mp_box
|
10 |
+
import draw_landmarks68
|
11 |
+
import landmarks68_utils
|
12 |
+
import io
|
13 |
+
import numpy as np
|
14 |
+
|
15 |
+
from glibvision.cv2_utils import pil_to_bgr_image,bgr_to_rgb
|
16 |
+
from gradio_utils import save_image,save_buffer,clear_old_files ,read_file
|
17 |
+
from close_eyes import process_close_eyes_image
|
18 |
+
from open_mouth import process_open_mouth
|
19 |
+
'''
|
20 |
+
Face landmark detection based Face Detection.
|
21 |
+
https://ai.google.dev/edge/mediapipe/solutions/vision/face_landmarker
|
22 |
+
from model card
|
23 |
+
https://storage.googleapis.com/mediapipe-assets/MediaPipe%20BlazeFace%20Model%20Card%20(Short%20Range).pdf
|
24 |
+
Licensed Apache License, Version 2.0
|
25 |
+
Train with google's dataset(more detail see model card)
|
26 |
+
|
27 |
+
'''
|
28 |
+
|
29 |
+
#@spaces.GPU(duration=120)
|
30 |
+
def process_images(image,eyelid_thick=1,eyelid_blur=9,inpaint_radius=10,inpaint_blur=30,mask_dilate=10,dilate_blur=10,
|
31 |
+
open_size_y=8,inside_layer_low_depth=False,hole_image_name="dark01",
|
32 |
+
make_animation=True,eyes_duration=200,mouth_duration=40,
|
33 |
+
progress=gr.Progress(track_tqdm=True)):
|
34 |
+
clear_old_files()
|
35 |
+
if image == None:
|
36 |
+
raise gr.Error("Need Image")
|
37 |
+
|
38 |
+
progress(0, desc="Start Making Animation")
|
39 |
+
boxes,mp_image,face_landmarker_result = mp_box.mediapipe_to_box(image)
|
40 |
+
annotated_image,bbox,landmark_points = draw_landmarks68.draw_landmarks_on_image(image,face_landmarker_result)
|
41 |
+
landmark_list = draw_landmarks68.convert_to_landmark_group_json(landmark_points)
|
42 |
+
|
43 |
+
|
44 |
+
|
45 |
+
|
46 |
+
galleries = []
|
47 |
+
|
48 |
+
progressed = 0
|
49 |
+
progress_step = 0.8/open_size_y
|
50 |
+
animations = []
|
51 |
+
np_image = pil_to_bgr_image(image)
|
52 |
+
if make_animation:
|
53 |
+
start_index = 0
|
54 |
+
else:
|
55 |
+
start_index = open_size_y-1
|
56 |
+
|
57 |
+
for i in range(start_index,open_size_y):
|
58 |
+
mouth_opened = process_open_mouth(np_image,landmark_list,0,i,True,inside_layer_low_depth,0,hole_image_name+".jpg")
|
59 |
+
animations.append(mouth_opened)
|
60 |
+
mouth_opened_path = save_image(mouth_opened)
|
61 |
+
galleries.append((mouth_opened_path,f"mouth-opened {i}"))
|
62 |
+
progressed+=progress_step
|
63 |
+
progress(progressed)
|
64 |
+
|
65 |
+
if make_animation:
|
66 |
+
np_image = pil_to_bgr_image(animations[0])# TODO option
|
67 |
+
|
68 |
+
eyes_closed_np,mask_np = process_close_eyes_image(np_image,landmark_list,eyelid_thick,eyelid_blur,inpaint_radius,inpaint_blur,mask_dilate,dilate_blur)
|
69 |
+
eyes_closed = Image.fromarray(bgr_to_rgb(eyes_closed_np))
|
70 |
+
|
71 |
+
eyes_closed_path = save_image(eyes_closed)
|
72 |
+
galleries.append((eyes_closed_path,"eyes-closed"))
|
73 |
+
|
74 |
+
eyes_closed_mask_path = save_image(Image.fromarray(mask_np))
|
75 |
+
galleries.append((eyes_closed_mask_path,"eyes-closed-mask"))
|
76 |
+
|
77 |
+
|
78 |
+
duractions = [mouth_duration]*len(animations)*2+[eyes_duration]
|
79 |
+
if make_animation:
|
80 |
+
animations = animations + animations[::-1]+[eyes_closed]
|
81 |
+
output_buffer = io.BytesIO()
|
82 |
+
animations[0].save(output_buffer,
|
83 |
+
save_all=True,
|
84 |
+
append_images=animations[1:],
|
85 |
+
duration=duractions,
|
86 |
+
loop=0,
|
87 |
+
format='WebP')
|
88 |
+
webp_path = save_buffer(output_buffer)
|
89 |
+
#galleries.append((webp_path,"animation"))
|
90 |
+
|
91 |
+
return webp_path,galleries
|
92 |
+
|
93 |
+
|
94 |
+
|
95 |
+
|
96 |
+
css="""
|
97 |
+
#col-left {
|
98 |
+
margin: 0 auto;
|
99 |
+
max-width: 640px;
|
100 |
+
}
|
101 |
+
#col-right {
|
102 |
+
margin: 0 auto;
|
103 |
+
max-width: 640px;
|
104 |
+
}
|
105 |
+
.grid-container {
|
106 |
+
display: flex;
|
107 |
+
align-items: center;
|
108 |
+
justify-content: center;
|
109 |
+
gap:10px
|
110 |
+
}
|
111 |
+
|
112 |
+
.image {
|
113 |
+
width: 128px;
|
114 |
+
height: 128px;
|
115 |
+
object-fit: cover;
|
116 |
+
}
|
117 |
+
|
118 |
+
.text {
|
119 |
+
font-size: 16px;
|
120 |
+
}
|
121 |
+
"""
|
122 |
+
|
123 |
+
#css=css,
|
124 |
+
|
125 |
+
|
126 |
+
|
127 |
+
with gr.Blocks(css=css, elem_id="demo-container") as demo:
|
128 |
+
with gr.Column():
|
129 |
+
gr.HTML(read_file("demo_header.html"))
|
130 |
+
gr.HTML(read_file("demo_tools.html"))
|
131 |
+
with gr.Row():
|
132 |
+
with gr.Column():
|
133 |
+
image = gr.Image(height=800,sources=['upload','clipboard'],image_mode='RGB',elem_id="image_upload", type="pil", label="Upload")
|
134 |
+
with gr.Row(elem_id="prompt-container", equal_height=False):
|
135 |
+
with gr.Row():
|
136 |
+
btn = gr.Button("Create Closed-eye and Mouth-opened", elem_id="run_button",variant="primary")
|
137 |
+
|
138 |
+
with gr.Accordion(label="Eyes-Closed Advanced Settings", open=False):
|
139 |
+
with gr.Row( equal_height=True):
|
140 |
+
eyelid_thick = gr.Slider(
|
141 |
+
label="Eyelid thick",
|
142 |
+
minimum=0,
|
143 |
+
maximum=20,
|
144 |
+
step=1,
|
145 |
+
value=1)
|
146 |
+
eyelid_blur = gr.Slider(
|
147 |
+
label="Eyelid blur",
|
148 |
+
minimum=0,
|
149 |
+
maximum=30,
|
150 |
+
step=1,
|
151 |
+
value=7)
|
152 |
+
with gr.Row( equal_height=True):
|
153 |
+
inpaint_radius = gr.Slider(
|
154 |
+
label="Inpaint Radius",
|
155 |
+
minimum=1,
|
156 |
+
maximum=20,
|
157 |
+
step=1,
|
158 |
+
value=10,info="incresing make smooth but slow")
|
159 |
+
inpaint_blur = gr.Slider(
|
160 |
+
label="Inpaint blur",
|
161 |
+
minimum=0,
|
162 |
+
maximum=30,
|
163 |
+
step=1,
|
164 |
+
value=20)
|
165 |
+
with gr.Row( equal_height=True):
|
166 |
+
mask_dilate = gr.Slider(
|
167 |
+
label="Mask dilate",
|
168 |
+
minimum=0,
|
169 |
+
maximum=20,
|
170 |
+
step=1,
|
171 |
+
value=10)
|
172 |
+
|
173 |
+
dilate_blur = gr.Slider(
|
174 |
+
label="dilate blur",
|
175 |
+
minimum=0,
|
176 |
+
maximum=20,
|
177 |
+
step=1,
|
178 |
+
value=10)
|
179 |
+
with gr.Row( equal_height=True):
|
180 |
+
eyes_duration = gr.Slider(
|
181 |
+
label="Eyeclosed animation duration",
|
182 |
+
minimum=1,
|
183 |
+
maximum=500,
|
184 |
+
step=1,
|
185 |
+
value=200)
|
186 |
+
with gr.Accordion(label="Mouth-Opened Advanced Settings", open=False):
|
187 |
+
with gr.Row( equal_height=True):
|
188 |
+
make_animation = gr.Checkbox(label="animation",value=True,info="take long time if open-size is large")
|
189 |
+
open_size_y = gr.Slider(
|
190 |
+
label="Open Size",
|
191 |
+
minimum=1,
|
192 |
+
maximum=40,
|
193 |
+
step=1,
|
194 |
+
value=8,info="Large size is for img2img/inpaint")
|
195 |
+
inside_layer_low_depth=gr.Checkbox(label="Inner Layer Low",value=False,info="if value >20 check on better result")
|
196 |
+
|
197 |
+
hole_image_name=gr.Dropdown(label="inner image name",choices=["dark01","black","mid01","mid02"],value="dark01",info="if you use img2img black is better")
|
198 |
+
with gr.Row( equal_height=True):
|
199 |
+
mouth_duration = gr.Slider(
|
200 |
+
label="mouhtopen animation duration",info="per frame",
|
201 |
+
minimum=1,
|
202 |
+
maximum=500,
|
203 |
+
step=1,
|
204 |
+
value=40)
|
205 |
+
with gr.Column():
|
206 |
+
animation_out = gr.Image(height=760,label="Animation", elem_id="output-animation")
|
207 |
+
image_out = gr.Gallery(label="Output", elem_id="output-img",preview=True)
|
208 |
+
|
209 |
+
|
210 |
+
btn.click(fn=process_images, inputs=[image,eyelid_thick,eyelid_blur,inpaint_radius,inpaint_blur,mask_dilate,dilate_blur,
|
211 |
+
open_size_y,inside_layer_low_depth,hole_image_name,make_animation,
|
212 |
+
eyes_duration,mouth_duration],outputs=[animation_out,image_out] ,api_name='infer')
|
213 |
+
gr.Examples(
|
214 |
+
examples =[
|
215 |
+
["examples/00003245_00.jpg","examples/00003245_00.webp"],
|
216 |
+
["examples/00002062.jpg","examples/00002062.webp"],
|
217 |
+
["examples/00100265.jpg","examples/00100265.webp"],
|
218 |
+
["examples/00824006.jpg","examples/00824006.webp"],
|
219 |
+
["examples/00824008.jpg","examples/00824008.webp"],
|
220 |
+
["examples/00825000.jpg","examples/00825000.webp"],
|
221 |
+
["examples/00826007.jpg","examples/00826007.webp"],
|
222 |
+
["examples/00827009.jpg","examples/00827009.webp"],
|
223 |
+
["examples/00828003.jpg","examples/00828003.webp"],
|
224 |
+
],
|
225 |
+
#examples =["examples/00003245_00.jpg","examples/00002062.jpg","examples/00100265.jpg","examples/00824006.jpg","examples/00824008.jpg",
|
226 |
+
# "examples/00825000.jpg","examples/00826007.jpg","examples/00827009.jpg","examples/00828003.jpg",],
|
227 |
+
inputs=[image,animation_out],examples_per_page=5
|
228 |
+
)
|
229 |
+
gr.HTML(read_file("demo_footer.html"))
|
230 |
+
|
231 |
+
if __name__ == "__main__":
|
232 |
+
demo.launch()
|
close_eyes.py
ADDED
@@ -0,0 +1,120 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
|
2 |
+
"""
|
3 |
+
close_eye.py
|
4 |
+
|
5 |
+
|
6 |
+
目を閉じた画像を作ります。
|
7 |
+
|
8 |
+
目と上まつ毛の部分をポイントとしており、そこをinpaintします。
|
9 |
+
inpaint部分をぼかして、元の画像の上にはりつけ、ぼかします。
|
10 |
+
|
11 |
+
|
12 |
+
|
13 |
+
著者: Akihito Miyazaki
|
14 |
+
作成日: 2024-04-23
|
15 |
+
更新履歴:
|
16 |
+
- 2024-04-23: 最初のリリース
|
17 |
+
- 2024-09-24: name suffixを追加
|
18 |
+
- 2034-11-15: changed for huggingface
|
19 |
+
"""
|
20 |
+
import os
|
21 |
+
import cv2
|
22 |
+
import numpy as np
|
23 |
+
|
24 |
+
|
25 |
+
from glibvision.numpy_utils import bulge_polygon
|
26 |
+
|
27 |
+
from glibvision.cv2_utils import fill_points,get_image_size,gray3d_to_2d,blend_rgb_images,create_color_image
|
28 |
+
from landmarks68_utils import get_left_upper_eyelid_points,get_right_upper_eyelid_points,get_bulged_eyes,get_close_eyelid_point
|
29 |
+
|
30 |
+
|
31 |
+
def create_eyelid_mask(image,face_landmarks_list,thick = 1,bulge=0.2):
|
32 |
+
black = create_color_image(image,(0,0,0))
|
33 |
+
left_eyelid = get_left_upper_eyelid_points(face_landmarks_list)
|
34 |
+
left_eyelid = bulge_polygon(left_eyelid,bulge)
|
35 |
+
fill_points(black,left_eyelid)
|
36 |
+
|
37 |
+
print("right")
|
38 |
+
right_eyelid = get_right_upper_eyelid_points(face_landmarks_list)
|
39 |
+
print(right_eyelid)
|
40 |
+
right_eyelid = bulge_polygon(right_eyelid,bulge)
|
41 |
+
fill_points(black,right_eyelid)
|
42 |
+
|
43 |
+
eyes_points = get_bulged_eyes(face_landmarks_list)
|
44 |
+
for points in eyes_points:
|
45 |
+
np_points = np.array(points,dtype=np.int32)
|
46 |
+
cv2.fillPoly(image, [np_points], (255,255,255))
|
47 |
+
if thick > 0:
|
48 |
+
cv2.polylines(black, [np_points], isClosed=False, color=(255,255,255), thickness=thick)
|
49 |
+
|
50 |
+
return cv2.cvtColor(black,cv2.COLOR_BGR2GRAY)
|
51 |
+
|
52 |
+
DEBUG = False
|
53 |
+
def process_close_eyes_image(img,landmarks_list,eyelid_thick=1,eyelid_blur=9,inpaint_radius=10,inpaint_blur=30,mask_dilate=10,dilate_blur=10):
|
54 |
+
img_h, img_w = get_image_size(img)
|
55 |
+
|
56 |
+
eyelid_mask = create_eyelid_mask(img,landmarks_list)
|
57 |
+
if DEBUG:
|
58 |
+
cv2.imwrite("close_eye_mask.jpg",eyelid_mask)
|
59 |
+
|
60 |
+
mask = gray3d_to_2d(eyelid_mask)
|
61 |
+
|
62 |
+
|
63 |
+
img_inpainted = cv2.inpaint(img, mask,inpaint_radius, cv2.INPAINT_TELEA)
|
64 |
+
if DEBUG:
|
65 |
+
cv2.imwrite("close_eye_inpaint.jpg",img_inpainted)
|
66 |
+
|
67 |
+
## Inpaintした画像をぼかす。
|
68 |
+
if inpaint_blur>0:
|
69 |
+
if inpaint_blur%2==0: #if even it would error
|
70 |
+
inpaint_blur+=1
|
71 |
+
blurred_image = cv2.GaussianBlur(img_inpainted, (inpaint_blur, inpaint_blur), 0)
|
72 |
+
if DEBUG:
|
73 |
+
cv2.imwrite("close_eye_inpaint_burred.jpg",blurred_image)
|
74 |
+
else:
|
75 |
+
blurred_image=img_inpainted
|
76 |
+
|
77 |
+
|
78 |
+
# まつげを描く
|
79 |
+
if eyelid_thick>0:
|
80 |
+
left,right = get_close_eyelid_point(landmarks_list)
|
81 |
+
for points in [left,right]:
|
82 |
+
print("## draw eyelid")
|
83 |
+
print(points)
|
84 |
+
cv2.polylines(blurred_image, [np.array(points)], isClosed=False, color=(0,0,0), thickness=eyelid_thick,lineType=cv2.LINE_AA)
|
85 |
+
if DEBUG:
|
86 |
+
cv2.imwrite("close_eye_inpaint_burred_eyeline.jpg",blurred_image)
|
87 |
+
|
88 |
+
|
89 |
+
|
90 |
+
|
91 |
+
if eyelid_thick>0 and eyelid_blur>0:
|
92 |
+
if eyelid_blur%2==0:
|
93 |
+
eyelid_blur+=1
|
94 |
+
# blur-eyelid-line
|
95 |
+
blurred_image = cv2.GaussianBlur(blurred_image, (eyelid_blur, eyelid_blur), 2)
|
96 |
+
|
97 |
+
|
98 |
+
print(mask_dilate,dilate_blur)
|
99 |
+
if mask_dilate>0:
|
100 |
+
# Inpaintの境界線から少し広げている
|
101 |
+
kernel = np.ones((mask_dilate, mask_dilate), np.uint8)
|
102 |
+
extend_mask = cv2.dilate(mask, kernel, iterations=1)
|
103 |
+
|
104 |
+
if dilate_blur>0:
|
105 |
+
if dilate_blur%2==0:
|
106 |
+
dilate_blur+=1
|
107 |
+
|
108 |
+
extend_burred_mask = cv2.GaussianBlur(extend_mask, (dilate_blur, dilate_blur), 1)
|
109 |
+
else:
|
110 |
+
extend_burred_mask = extend_mask
|
111 |
+
else:
|
112 |
+
extend_burred_mask=mask
|
113 |
+
|
114 |
+
|
115 |
+
img_inpainted = blend_rgb_images(img,blurred_image,extend_burred_mask)
|
116 |
+
|
117 |
+
if DEBUG:
|
118 |
+
cv2.imwrite("create_no_mouth_image_merged.jpg",img_inpainted)
|
119 |
+
|
120 |
+
return img_inpainted,extend_burred_mask
|
close_lip.py
ADDED
@@ -0,0 +1,150 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
|
2 |
+
"""
|
3 |
+
close_lip.py
|
4 |
+
|
5 |
+
|
6 |
+
唇を閉じた画像を作ります。
|
7 |
+
|
8 |
+
唇の開いている範囲をinpaintします。
|
9 |
+
そこに、hole部分を影として戻します。
|
10 |
+
|
11 |
+
|
12 |
+
|
13 |
+
著者: Akihito Miyazaki
|
14 |
+
作成日: 2024-04-23
|
15 |
+
更新履歴:
|
16 |
+
- 2024-04-23: 最初のリリース
|
17 |
+
- 1024-11-16:coverted to huggingface-space (but args broken)
|
18 |
+
"""
|
19 |
+
import os
|
20 |
+
import cv2
|
21 |
+
import numpy as np
|
22 |
+
from PIL import Image
|
23 |
+
import lip_utils
|
24 |
+
|
25 |
+
|
26 |
+
from glibvision.cv2_utils import blend_rgb_images
|
27 |
+
from glibvision.numpy_utils import apply_binary_mask_to_color,create_2d_image
|
28 |
+
|
29 |
+
import argparse
|
30 |
+
|
31 |
+
|
32 |
+
|
33 |
+
|
34 |
+
|
35 |
+
def create_top_lip_low_mask(image,face_landmarks_list,line_thick = 1):
|
36 |
+
black = create_2d_image(image.shape)
|
37 |
+
lip_utils.fill_top_lower(black,face_landmarks_list,line_thick,lip_utils.COLOR_WHITE)
|
38 |
+
return black
|
39 |
+
|
40 |
+
def create_lip_hole_mask(image,face_landmarks_list,line_thick = 1):
|
41 |
+
black = create_2d_image(image.shape)
|
42 |
+
lip_utils.fill_lip_hole(black,face_landmarks_list,line_thick,lip_utils.COLOR_WHITE)
|
43 |
+
|
44 |
+
return black
|
45 |
+
|
46 |
+
def process_close_lip_image(img,landmarks_list):
|
47 |
+
img_h, img_w = lip_utils.get_image_size(img)
|
48 |
+
|
49 |
+
hole_mask = create_lip_hole_mask(img,landmarks_list,0)
|
50 |
+
|
51 |
+
lower_lip_mask = create_top_lip_low_mask(img,landmarks_list)
|
52 |
+
|
53 |
+
#these make dirty
|
54 |
+
#kernel = np.ones((3, 3), np.uint8)
|
55 |
+
#lower_lip_mask = cv2.erode(lower_lip_mask, kernel, iterations=1)
|
56 |
+
|
57 |
+
|
58 |
+
if lip_utils.DEBUG:
|
59 |
+
cv2.imwrite("close_lip_01_mask.jpg",lower_lip_mask)
|
60 |
+
|
61 |
+
mixed_mask = cv2.bitwise_or(hole_mask,lower_lip_mask)
|
62 |
+
if lip_utils.DEBUG:
|
63 |
+
cv2.imwrite("close_lip_01_mask_mixed.jpg",mixed_mask)
|
64 |
+
|
65 |
+
img_inpainted = cv2.inpaint(img, mixed_mask,3, cv2.INPAINT_NS)
|
66 |
+
if lip_utils.DEBUG:
|
67 |
+
cv2.imwrite("close_lip_02_inpaint.jpg",img_inpainted)
|
68 |
+
|
69 |
+
|
70 |
+
copy_impainted=img_inpainted.copy()
|
71 |
+
apply_binary_mask_to_color(copy_impainted,(0,8,50),hole_mask)
|
72 |
+
#lip_utils.fill_lip_hole(img_inpainted,landmarks_list,0,(0,8,50)) # BGR
|
73 |
+
if lip_utils.DEBUG:
|
74 |
+
cv2.imwrite("close_lip_03_hole.jpg",copy_impainted)
|
75 |
+
|
76 |
+
|
77 |
+
|
78 |
+
|
79 |
+
## Inpaintした画像をぼかす。
|
80 |
+
blurred_image = cv2.GaussianBlur(copy_impainted, (9, 9), 0) #場合によっては奇数じゃないとエラーが出ることがある
|
81 |
+
if lip_utils.DEBUG:
|
82 |
+
cv2.imwrite("close_lip_04_burred.jpg",blurred_image)
|
83 |
+
|
84 |
+
|
85 |
+
# Inpaintの境界線から少し広げている
|
86 |
+
kernel = np.ones((3, 3), np.uint8)
|
87 |
+
shrink_mask = cv2.erode(hole_mask, kernel, iterations=1)
|
88 |
+
|
89 |
+
|
90 |
+
shrink_burred_mask = cv2.GaussianBlur(shrink_mask, (3, 3), 0)
|
91 |
+
if lip_utils.DEBUG:
|
92 |
+
cv2.imwrite("close_lip_05_final_hole_mask.jpg",shrink_burred_mask)
|
93 |
+
|
94 |
+
img_inpainted = blend_rgb_images(img_inpainted,blurred_image,shrink_burred_mask)
|
95 |
+
if lip_utils.DEBUG:
|
96 |
+
cv2.imwrite("close_lip_05_final_hole.jpg",img_inpainted)
|
97 |
+
# Inpaintの境界線から少し広げている
|
98 |
+
kernel = np.ones((3, 3), np.uint8)
|
99 |
+
extend_mask = cv2.dilate(lower_lip_mask, kernel, iterations=1)
|
100 |
+
|
101 |
+
|
102 |
+
extend_burred_mask = cv2.GaussianBlur(extend_mask, (3, 3), 0)
|
103 |
+
if lip_utils.DEBUG:
|
104 |
+
cv2.imwrite("close_lip_05_final_lip_mask.jpg",extend_burred_mask)
|
105 |
+
img_inpainted = blend_rgb_images(img_inpainted,blurred_image,extend_burred_mask)
|
106 |
+
|
107 |
+
if lip_utils.DEBUG:
|
108 |
+
cv2.imwrite("close_lip_05_final_lip.jpg",img_inpainted)
|
109 |
+
|
110 |
+
mixed_mask = cv2.bitwise_or(shrink_burred_mask,extend_burred_mask)
|
111 |
+
mixed_mask[mixed_mask>0] = 255
|
112 |
+
mixed_mask = cv2.dilate(mixed_mask, (1,1), iterations=1)
|
113 |
+
# mixed_mask = cv2.GaussianBlur(mixed_mask, (3, 3), 0)
|
114 |
+
|
115 |
+
if lip_utils.DEBUG:
|
116 |
+
cv2.imwrite("close_lip_05_final_mixed_mask.jpg",mixed_mask)
|
117 |
+
|
118 |
+
return img_inpainted,mixed_mask
|
119 |
+
|
120 |
+
if __name__ == "__main__":
|
121 |
+
parser = argparse.ArgumentParser(description='Open Mouth')
|
122 |
+
parser.add_argument('--input',"-i",help='変換する画像の元(必須) 口を閉じていること',required=True)
|
123 |
+
parser.add_argument('--output',"-o",help='画像の保存先(別途一時的なレイヤーファイルも作られる)')
|
124 |
+
parser.add_argument('--landmark',"-l",help='landmarkdata')
|
125 |
+
parser.add_argument('--scale',"-sc",help='スケール精度が上がる',default=4,type=int)
|
126 |
+
|
127 |
+
args = parser.parse_args()
|
128 |
+
# 画像ファイルのパス
|
129 |
+
img_path = args.input
|
130 |
+
img = cv2.imread(img_path)
|
131 |
+
#landmarks_list = landmark_utils.load_landmarks(img,args.scale,args.landmark)
|
132 |
+
landmarks_list = None
|
133 |
+
eye_closed_image,mask = process_close_lip_image(img,landmarks_list)
|
134 |
+
|
135 |
+
output_path = args.output
|
136 |
+
if output_path == None:
|
137 |
+
parent_path,file = os.path.split(img_path)
|
138 |
+
name,ext = os.path.splitext(file)
|
139 |
+
output_path = os.path.join(parent_path,f"{name}_lipclose{ext}")
|
140 |
+
|
141 |
+
|
142 |
+
|
143 |
+
|
144 |
+
parent_path,file = os.path.split(output_path)
|
145 |
+
name,ext = os.path.splitext(file)
|
146 |
+
|
147 |
+
mask_path = os.path.join(parent_path,f"{name}_mask{ext}")
|
148 |
+
cv2.imwrite(mask_path,mask)
|
149 |
+
cv2.imwrite(output_path,eye_closed_image)
|
150 |
+
print(f"complete image {output_path} and mask {mask_path}")
|
create_bottom_lip.py
ADDED
@@ -0,0 +1,212 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
"""
|
2 |
+
bottom_lip.py
|
3 |
+
|
4 |
+
open_mouthの一部 下唇レイヤーの生成
|
5 |
+
単独ではまだ動かない。
|
6 |
+
|
7 |
+
|
8 |
+
著者: Akihito Miyazaki
|
9 |
+
作成日: 2024-04-23
|
10 |
+
更新履歴:
|
11 |
+
- 2024-04-23: 最初のリリース
|
12 |
+
|
13 |
+
"""
|
14 |
+
|
15 |
+
import cv2
|
16 |
+
import numpy as np
|
17 |
+
from PIL import Image
|
18 |
+
from lip_utils import *
|
19 |
+
import lip_utils
|
20 |
+
from scipy.ndimage import binary_dilation, gaussian_filter
|
21 |
+
|
22 |
+
def process_lip_image(img,landmarks_list, crop_image_margin, open_size_y, open_size_x):
|
23 |
+
"""
|
24 |
+
唇画像を処理する関数
|
25 |
+
|
26 |
+
open_size_x 最終画像で端をカットしている
|
27 |
+
side_tipsに値をコピーしている。side_tips
|
28 |
+
"""
|
29 |
+
# 画像の読み込み
|
30 |
+
|
31 |
+
side_tips = 0 # remove cropped pixel
|
32 |
+
edge_x =0 # remove side pixel from final image
|
33 |
+
|
34 |
+
|
35 |
+
# 口の開きに応じて、中心を下げようとしたが、真ん中が下がるのは極めて不自然なのでやめた。
|
36 |
+
# そのうち、5分割の中心を変形するのに使いたい
|
37 |
+
mid_lip_move_ratio = open_size_y/80.0 if open_size_y>0 else 0
|
38 |
+
mid_lip_move_ratio = 0
|
39 |
+
|
40 |
+
if open_size_x>0:# ここ謎コード? そのうち検証したい
|
41 |
+
print("only support shorten use minus open")
|
42 |
+
side_tips = open_size_x
|
43 |
+
edge_x = int(open_size_x*1.5) #som magic number
|
44 |
+
#edge_x = 0
|
45 |
+
open_size_x = 0 # TODO move (some transform broken)
|
46 |
+
|
47 |
+
# 不自然にはみ出す、サイドのカット用だが、いまいち機能しないので使用停止
|
48 |
+
# TOP と HOLEが未対応なのでOFF
|
49 |
+
side_tips = 0 # remove cropped pixel
|
50 |
+
edge_x =0 # remove side pixel from final image
|
51 |
+
|
52 |
+
|
53 |
+
img_h, img_w = lip_utils.get_image_size(img)
|
54 |
+
|
55 |
+
# 唇領域の抽出と処理 (マージンが追加される。透明化処理が怪しい)
|
56 |
+
gaus = 4
|
57 |
+
box = lip_utils.get_points_box(landmarks_list, lip_utils.POINTS_BOTTOM_LIP, crop_image_margin)
|
58 |
+
align_points = lip_utils.get_bottom_lip_align_points(landmarks_list)
|
59 |
+
alpha_image, rec = get_alpha_image(img, landmarks_list, lip_utils.POINTS_BOTTOM_LIP, crop_image_margin, crop_image_margin, gaus)
|
60 |
+
|
61 |
+
|
62 |
+
# 最後に返す画像の元を作成
|
63 |
+
h, w = lip_utils.get_image_size(alpha_image)
|
64 |
+
if lip_utils.DEBUG:
|
65 |
+
cv2.imwrite("debug/bottom_lip_alpha.png",alpha_image)
|
66 |
+
print(f"bottom-lip cropped w = {w} h = {h}")
|
67 |
+
bottom_lip_final_image=lip_utils.create_rgba(w,h+open_size_y+1)# some how transform image expand TODO まだ必要か確認
|
68 |
+
bottom_lip_final_image_h,bottom_lip_final_image_w = lip_utils.get_image_size(bottom_lip_final_image)
|
69 |
+
print(f"bottom_lip_final_image:w = {bottom_lip_final_image_w} h = {bottom_lip_final_image_h}")
|
70 |
+
|
71 |
+
|
72 |
+
#local_align_points = lip_utils.offset_points(align_points,box[0])
|
73 |
+
#print(align_points)
|
74 |
+
|
75 |
+
|
76 |
+
# 唇の位置、いまだ検討中 https://github.com/akjava/lip_recognition_tools/issues/2
|
77 |
+
mid_left = int(w/5*2)
|
78 |
+
mid_left = int(w/3)
|
79 |
+
|
80 |
+
mid_right = bottom_lip_final_image_w - mid_left
|
81 |
+
print(f"image width = {bottom_lip_final_image_w} mid_left = {mid_left} mid_right ={mid_right}")
|
82 |
+
|
83 |
+
mid_center = int((mid_right+mid_left)/2) # 過去に真ん中下げに使っていたが必要ないと思っている。
|
84 |
+
mid_move_y_divided = 5 # 0 means no move
|
85 |
+
|
86 |
+
# 真ん中左の唇の変形 中心さげのに、無駄に2分割している。 https://github.com/akjava/lip_recognition_tools/issues/3
|
87 |
+
|
88 |
+
mid_image_left = lip_utils.crop_image(alpha_image,mid_left,0,mid_center,h)
|
89 |
+
mid_image_left_h,mid_image_left_w = lip_utils.get_image_size(mid_image_left)
|
90 |
+
max_w = mid_image_left_w
|
91 |
+
max_h = mid_image_left_h
|
92 |
+
opend_mid_lip_left = lip_utils.create_moved_image(mid_image_left,
|
93 |
+
[(0,0),(max_w,0),
|
94 |
+
(0,max_h),(max_w,max_h)],
|
95 |
+
|
96 |
+
[(-0,-0),(max_w,int(max_h*mid_lip_move_ratio)),#int(max_h/2)
|
97 |
+
(0,max_h),(max_w,max_h)]
|
98 |
+
)
|
99 |
+
# 予定外にサイズが伸びると、エラーになるので避けたいが、もう少し検証が必要
|
100 |
+
#opend_mid_lip_left = cv2.resize(opend_mid_lip_left, (max_w, max_h), interpolation=cv2.INTER_AREA)
|
101 |
+
lip_utils.print_width_height(mid_image_left,"mid-left")
|
102 |
+
lip_utils.print_width_height(opend_mid_lip_left,"moved-mid-left")
|
103 |
+
|
104 |
+
|
105 |
+
# 真ん中右の唇の変形
|
106 |
+
mid_image_right = lip_utils.crop_image(alpha_image,mid_center,0,mid_right,h)
|
107 |
+
mid_image_right_h,mid_image_right_w = lip_utils.get_image_size(mid_image_right)
|
108 |
+
max_w = mid_image_right_w
|
109 |
+
max_h = mid_image_right_h
|
110 |
+
|
111 |
+
opend_mid_lip_right = lip_utils.create_moved_image(mid_image_right,
|
112 |
+
[(0,0),(max_w,0),
|
113 |
+
(0,max_h),(max_w,max_h)],
|
114 |
+
|
115 |
+
[(-0,int(max_h*mid_lip_move_ratio)),(max_w,0),#int(max_h/2)
|
116 |
+
(0,max_h),(max_w,max_h)]
|
117 |
+
)
|
118 |
+
|
119 |
+
#opend_mid_lip_right = cv2.resize(opend_mid_lip_right, (max_w, max_h), interpolation=cv2.INTER_AREA)
|
120 |
+
lip_utils.print_width_height(mid_image_right,"mid-right")
|
121 |
+
lip_utils.print_width_height(opend_mid_lip_right,"moved-mid-right")
|
122 |
+
|
123 |
+
|
124 |
+
#no remove side-tip area
|
125 |
+
left_image = lip_utils.crop_image(alpha_image,side_tips,0,mid_left,h)
|
126 |
+
right_image = lip_utils.crop_image(alpha_image,mid_right,0,w-side_tips,h)
|
127 |
+
|
128 |
+
# 左の唇を下げる 左側は固定
|
129 |
+
left_lip_image_h,left_lip_image_w = lip_utils.get_image_size(left_image)
|
130 |
+
print(f"left-image:w = {left_lip_image_w} h = {left_lip_image_h}")
|
131 |
+
|
132 |
+
max_w = left_lip_image_w
|
133 |
+
max_h = left_lip_image_h
|
134 |
+
opend_lip_left = lip_utils.create_moved_image(left_image,
|
135 |
+
[(0,0),(max_w,0),
|
136 |
+
(0,max_h),(max_w,max_h)],
|
137 |
+
|
138 |
+
[(0,-0),(max_w+open_size_x,open_size_y),
|
139 |
+
(0,max_h-0),(max_w+open_size_x,max_h+open_size_y)]
|
140 |
+
)
|
141 |
+
left_lip_image_h,left_lip_image_w = lip_utils.get_image_size(opend_lip_left)
|
142 |
+
max_w = left_lip_image_w
|
143 |
+
max_h = left_lip_image_h
|
144 |
+
|
145 |
+
new_h,new_w = lip_utils.get_image_size(opend_lip_left)
|
146 |
+
print(f"left-image moved:w = {new_w} h = {new_h}")
|
147 |
+
if lip_utils.DEBUG:
|
148 |
+
cv2.imwrite("open_botto_lip_left.png",opend_lip_left)
|
149 |
+
|
150 |
+
|
151 |
+
# 右の唇を下げる 右側は固定
|
152 |
+
right_lip_image_h,right_lip_image_w = lip_utils.get_image_size(right_image)
|
153 |
+
max_w = right_lip_image_w
|
154 |
+
max_h = right_lip_image_h
|
155 |
+
opend_lip_right = lip_utils.create_moved_image(right_image,
|
156 |
+
[(0,0),(max_w,0),
|
157 |
+
(0,max_h),(max_w,max_h)],
|
158 |
+
|
159 |
+
[(0,open_size_y),(max_w+open_size_x,-0),
|
160 |
+
(0,max_h+open_size_y),(0+max_w+open_size_x,max_h-0)]
|
161 |
+
)
|
162 |
+
new_h,new_w = lip_utils.get_image_size(opend_lip_right)
|
163 |
+
print(f"right-image moved :w = {new_w} h = {new_h}")
|
164 |
+
if lip_utils.DEBUG:
|
165 |
+
cv2.imwrite("open_botto_lip_right.png",opend_lip_right)
|
166 |
+
|
167 |
+
|
168 |
+
|
169 |
+
|
170 |
+
# 変形後の各画像を描く alpha含めた全コピーなので注意(元画像のAlphaは無視)
|
171 |
+
#is this ok?
|
172 |
+
#lip_utils.copy_image(bottom_lip_final_image,mid_image,mid_left-1,open_size_y)
|
173 |
+
#lip_utils.copy_image(bottom_lip_final_image,mid_image,mid_left,open_size_y)
|
174 |
+
## 中央部分
|
175 |
+
lip_utils.copy_image(bottom_lip_final_image,opend_mid_lip_left,mid_left,open_size_y)
|
176 |
+
lip_utils.copy_image(bottom_lip_final_image,opend_mid_lip_right,mid_center,open_size_y)
|
177 |
+
|
178 |
+
|
179 |
+
print(lip_utils.get_image_size(opend_lip_left))
|
180 |
+
print(lip_utils.get_image_size(bottom_lip_final_image))
|
181 |
+
|
182 |
+
## 左右の端 side_tips
|
183 |
+
lip_utils.copy_image(bottom_lip_final_image,opend_lip_left,open_size_x+side_tips,0)
|
184 |
+
lip_utils.copy_image(bottom_lip_final_image,opend_lip_right,mid_right,0)
|
185 |
+
|
186 |
+
|
187 |
+
#edge_x=22 #for40 #テスト中
|
188 |
+
|
189 |
+
# 両端の処理 https://github.com/akjava/lip_recognition_tools/issues/6
|
190 |
+
lip_utils.fade_in_x(bottom_lip_final_image,edge_x*2)
|
191 |
+
lip_utils.fade_out_x(bottom_lip_final_image,edge_x*2)
|
192 |
+
|
193 |
+
# 最終的な画像の作成と保存
|
194 |
+
if lip_utils.DEBUG:
|
195 |
+
cv2.imwrite("bottom_lip_opend.png", bottom_lip_final_image)
|
196 |
+
face_size_image = lip_utils.create_rgba(img_w, img_h)
|
197 |
+
lip_utils.copy_image(face_size_image, bottom_lip_final_image, box[0][0], box[0][1])
|
198 |
+
if lip_utils.DEBUG:
|
199 |
+
cv2.imwrite("bottom_lip_layer.png", face_size_image)
|
200 |
+
return face_size_image
|
201 |
+
|
202 |
+
|
203 |
+
if __name__ == "__main__":
|
204 |
+
# 画像ファイルのパス
|
205 |
+
img_path = "straight.jpg"
|
206 |
+
# パラメータ
|
207 |
+
crop_image_margin = 16
|
208 |
+
open_size_y = 10
|
209 |
+
open_size_x = -10
|
210 |
+
|
211 |
+
# 関数の呼び出し
|
212 |
+
process_lip_image(img_path, crop_image_margin, open_size_y, open_size_x)
|
create_chin_image.py
ADDED
@@ -0,0 +1,86 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
"""
|
2 |
+
create_chin_image.py
|
3 |
+
|
4 |
+
open_mouth.pyの一部
|
5 |
+
|
6 |
+
口のオープンに合わせて、あご画像を縦に拡大しているだけ、
|
7 |
+
|
8 |
+
輪郭はもう少し綺麗に取りたい
|
9 |
+
https://github.com/akjava/lip_recognition_tools/issues/7
|
10 |
+
|
11 |
+
著者: Akihito Miyazaki
|
12 |
+
作成日: 2024-04-23
|
13 |
+
更新履歴:
|
14 |
+
- 2024-04-23: 最初のリリース
|
15 |
+
|
16 |
+
"""
|
17 |
+
|
18 |
+
import cv2
|
19 |
+
import numpy as np
|
20 |
+
from PIL import Image
|
21 |
+
import lip_utils
|
22 |
+
|
23 |
+
def process_chin_image(img,landmarks_list, margin, open_size_y, open_size_x):
|
24 |
+
img_h, img_w = lip_utils.get_image_size(img)
|
25 |
+
|
26 |
+
|
27 |
+
open_size_x = 0 #stop support this
|
28 |
+
if open_size_x > 0:
|
29 |
+
print("currently stop support open-sizex")
|
30 |
+
|
31 |
+
jaw_points = lip_utils.get_jaw_points(landmarks_list)
|
32 |
+
print("### JAW POINT")
|
33 |
+
print(jaw_points)
|
34 |
+
|
35 |
+
box = lip_utils.points_to_box(jaw_points)
|
36 |
+
print(box)
|
37 |
+
|
38 |
+
cropped = lip_utils.crop_image_by_box(img,box)
|
39 |
+
cropped_img_h, cropped_img_w = lip_utils.get_image_size(cropped)
|
40 |
+
if lip_utils.DEBUG_CHIN:
|
41 |
+
cv2.imwrite("chin_cropped.jpg",cropped)
|
42 |
+
cropped_jaw_points = lip_utils.offset_points(jaw_points,box[0])
|
43 |
+
#what hell is this?
|
44 |
+
#points = np.array(jaw_points,dtype=np.float32)
|
45 |
+
# 回転矩形を取得
|
46 |
+
#rect = cv2.minAreaRect(points)
|
47 |
+
#print(rect)
|
48 |
+
|
49 |
+
mask = lip_utils.create_mask_from_points(cropped,cropped_jaw_points,4,2)
|
50 |
+
if lip_utils.DEBUG_CHIN:
|
51 |
+
cv2.imwrite("chin_mask.jpg",mask)
|
52 |
+
|
53 |
+
#lip_utils.print_numpy(mask)
|
54 |
+
chin_image = lip_utils.apply_mask(cropped,mask)
|
55 |
+
chin_image_resized = cv2.resize(chin_image, (cropped_img_w, cropped_img_h+open_size_y), interpolation=cv2.INTER_LANCZOS4)
|
56 |
+
#chin_mask_image_resized = cv2.resize(mask, (cropped_img_w, cropped_img_h+open_size_y), interpolation=cv2.INTER_LANCZOS4)
|
57 |
+
|
58 |
+
#lip_utils.print_numpy(chin_image)
|
59 |
+
if lip_utils.DEBUG_CHIN:
|
60 |
+
cv2.imwrite("chin_resized.png",chin_image_resized) #alpha image must be save png
|
61 |
+
|
62 |
+
full_rgba=lip_utils.create_rgba(img_w,img_h)
|
63 |
+
lip_utils.copy_image(full_rgba,chin_image_resized,box[0][0],box[0][1])
|
64 |
+
if lip_utils.DEBUG_CHIN:
|
65 |
+
cv2.imwrite("chin_full.png",full_rgba)
|
66 |
+
|
67 |
+
#mask_gray=lip_utils.create_gray(img_w,img_h)
|
68 |
+
#lip_utils.copy_image(mask_gray,chin_mask_image_resized,box[0][0],box[0][1])
|
69 |
+
# chin mask is useless
|
70 |
+
|
71 |
+
return full_rgba
|
72 |
+
|
73 |
+
if __name__ == "__main__":
|
74 |
+
# 画像ファイルのパス
|
75 |
+
img_path = "straight.jpg"
|
76 |
+
img = cv2.imread(img_path)
|
77 |
+
img_h, img_w = lip_utils.get_image_size(img)
|
78 |
+
landmarks_list = lip_utils.image_to_landmarks_list(img)
|
79 |
+
|
80 |
+
# パラメータ
|
81 |
+
margin = 4
|
82 |
+
open_size_y = 20
|
83 |
+
open_size_x = 0
|
84 |
+
|
85 |
+
# 関数の呼び出し
|
86 |
+
process_chin_image(img,landmarks_list, margin, open_size_y, open_size_x)
|
create_hole_image.py
ADDED
@@ -0,0 +1,149 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
|
2 |
+
"""
|
3 |
+
create_hole_image.py
|
4 |
+
|
5 |
+
open_mouth.pyの一部
|
6 |
+
|
7 |
+
|
8 |
+
著者: Akihito Miyazaki
|
9 |
+
作成日: 2024-04-23
|
10 |
+
更新履歴:
|
11 |
+
- 2024-04-23: 最初のリリース
|
12 |
+
- 2024-09-15:slide_amountを追加
|
13 |
+
"""
|
14 |
+
|
15 |
+
import cv2
|
16 |
+
import numpy as np
|
17 |
+
from PIL import Image
|
18 |
+
import lip_utils
|
19 |
+
|
20 |
+
|
21 |
+
|
22 |
+
def vertical_slide(image, slide_amount):
|
23 |
+
height, width = image.shape[:2]
|
24 |
+
|
25 |
+
# スライド量が画像の高さより大きい場合、画像の高さに制限する
|
26 |
+
slide_amount = min(slide_amount, height)
|
27 |
+
slide_amount = max(slide_amount, -height)
|
28 |
+
|
29 |
+
slide_image = np.zeros_like(image) # 入力画像と同じサイズと型の画像を作成
|
30 |
+
|
31 |
+
if slide_amount > 0: # 下にスライド
|
32 |
+
slide_image[slide_amount:, :] = image[:height - slide_amount, :]
|
33 |
+
elif slide_amount < 0: # 上にスライド
|
34 |
+
slide_image[:height + slide_amount, :] = image[-slide_amount:, :]
|
35 |
+
else:
|
36 |
+
slide_image = image.copy()
|
37 |
+
|
38 |
+
return slide_image
|
39 |
+
|
40 |
+
|
41 |
+
def file_name_check(path):
|
42 |
+
max_name_limit = 50
|
43 |
+
check = True
|
44 |
+
if path.find("..")!=-1:
|
45 |
+
check = False
|
46 |
+
if path.find("/")!=-1:
|
47 |
+
check = False
|
48 |
+
if path.find("\\")!=-1:
|
49 |
+
check = False
|
50 |
+
if path.find(":")!=-1:
|
51 |
+
check = False
|
52 |
+
if len(path)>max_name_limit:
|
53 |
+
print(f"name is limited {max_name_limit}")
|
54 |
+
check = False
|
55 |
+
if not check:
|
56 |
+
ValueError(f"Invalid Name {path}")
|
57 |
+
|
58 |
+
|
59 |
+
|
60 |
+
def process_create_hole_image(img,landmarks_list,open_size_y = 0,open_size_x=0,hole_offset=0,hole_image_name="dark01.jpg"):
|
61 |
+
file_name_check(hole_image_name)
|
62 |
+
img_h, img_w = lip_utils.get_image_size(img)
|
63 |
+
|
64 |
+
# 画像を複製して、アラインポイントを描画する。アラインは、傾きも考慮した唇の範囲
|
65 |
+
img_lined = np.copy(img)
|
66 |
+
|
67 |
+
points = lip_utils.get_top_lip_align_points(landmarks_list)
|
68 |
+
img_lined1 = np.copy(img)
|
69 |
+
print(points)
|
70 |
+
cv2.polylines(img_lined1, [np.array(points)], isClosed=True, color=(0,255,0), thickness=1)
|
71 |
+
if lip_utils.DEBUG:
|
72 |
+
cv2.imwrite("create_hole_top_lip_align_line.jpg",img_lined1)
|
73 |
+
|
74 |
+
|
75 |
+
print(f"align point = {points}")
|
76 |
+
diff_align_x = points[0][0]-points[2][0]
|
77 |
+
print(f"diff_align_x = {diff_align_x}")
|
78 |
+
np_points = np.array(points)
|
79 |
+
|
80 |
+
diff_left = np_points[2] - np_points[0] #left-bottom ,left-up
|
81 |
+
diff_right = np_points[3] - np_points[1] #right-bottom,right-up
|
82 |
+
print(f"diff left-y = {diff_left},diff right-y ={diff_right}")
|
83 |
+
|
84 |
+
top_lip_thicks = lip_utils.get_top_lip_thicks(landmarks_list) # this ignore rotation
|
85 |
+
top_lip_thicks2 = lip_utils.get_top_lip_thicks(landmarks_list,True) # this ignore rotation
|
86 |
+
|
87 |
+
lip_thick = np.mean(top_lip_thicks)
|
88 |
+
lip_thick2 = np.mean(top_lip_thicks2)
|
89 |
+
|
90 |
+
base_mouth_size = lip_thick2*1.5
|
91 |
+
|
92 |
+
mouth_angle=lip_utils.calculate_clockwise_angle(points[2],points[3])
|
93 |
+
angled_point=lip_utils.calculate_new_point((0,0),base_mouth_size,mouth_angle+90)
|
94 |
+
angled_mouth_size = angled_point[1] + open_size_y
|
95 |
+
#print(f"lip_thick2={lip_thick2}")
|
96 |
+
|
97 |
+
print(f"lip thick2 ={lip_thick2} base_mouth_size={base_mouth_size} mouth_angle={mouth_angle} angled_mouth_size={angled_mouth_size}")
|
98 |
+
#上唇の範囲を元に、口全体を定義するため、根拠ないけど1.x倍にしている。 https://github.com/akjava/lip_recognition_tools/issues/8
|
99 |
+
diff_left[1] = angled_mouth_size
|
100 |
+
diff_right[1] = angled_mouth_size
|
101 |
+
diff_left[0] *=0
|
102 |
+
diff_right[0] *=0
|
103 |
+
expand_left = np_points[2] + diff_left
|
104 |
+
expand_right = np_points[3] + diff_right
|
105 |
+
|
106 |
+
# X座標も拡大するが、基本使わないので無視してもいい。
|
107 |
+
expand_points = np.array([np_points[0],np_points[1],expand_left,expand_right])
|
108 |
+
print(f"expand_points = {[np_points[0],np_points[1],expand_left,expand_right]}")
|
109 |
+
cv2.polylines(img_lined, [expand_points], isClosed=True, color=(0,255,0), thickness=1)
|
110 |
+
if lip_utils.DEBUG:
|
111 |
+
cv2.imwrite("create_hole_image_top-align_line.jpg",img_lined)
|
112 |
+
|
113 |
+
|
114 |
+
# これまた、hole用の画像をなんとなく、サイズに合わせている。
|
115 |
+
# そのため、画像の位置調整が非常に微妙になる
|
116 |
+
# TODO 画像の指定引数 https://github.com/akjava/lip_recognition_tools/issues/9
|
117 |
+
# https://github.com/akjava/lip_recognition_tools/issues/10
|
118 |
+
#hole_image = cv2.imread("hole_images/hole_01_light_dark.jpg")
|
119 |
+
hole_image = cv2.imread(f"hole_images/{hole_image_name}")
|
120 |
+
hole_image = vertical_slide(hole_image,hole_offset)
|
121 |
+
if lip_utils.DEBUG:
|
122 |
+
cv2.imwrite("create_hole_image-slided_hole_image.jpg",hole_image)
|
123 |
+
#exit(0)
|
124 |
+
|
125 |
+
hole_image_h,hole_image_w = lip_utils.get_image_size(hole_image)
|
126 |
+
max_w = hole_image_w
|
127 |
+
max_h = hole_image_h
|
128 |
+
expand_list = expand_points.tolist()
|
129 |
+
aligned_hole_image = lip_utils.create_moved_image(hole_image, [(0,0),(max_w,0),
|
130 |
+
(0,max_h),(max_w,max_h)],
|
131 |
+
expand_list
|
132 |
+
|
133 |
+
)
|
134 |
+
if lip_utils.DEBUG:
|
135 |
+
cv2.imwrite("create_hole_image_top-align_image.jpg",aligned_hole_image)
|
136 |
+
|
137 |
+
img_face = np.copy(img)
|
138 |
+
lip_utils.copy_image(img_face,aligned_hole_image,expand_list[0][0] - diff_align_x,(expand_list[0][1]+expand_list[1][1])//2)
|
139 |
+
if lip_utils.DEBUG:
|
140 |
+
cv2.imwrite("create_hole_image_top-align_face.jpg",img_face)
|
141 |
+
return img_face
|
142 |
+
|
143 |
+
|
144 |
+
if __name__ == "__main__":
|
145 |
+
# 画像ファイルのパス
|
146 |
+
img_path = "00012245.jpg" #"straight.jpg"
|
147 |
+
img = cv2.imread(img_path)
|
148 |
+
landmarks_list = lip_utils.image_to_landmarks_list(img)
|
149 |
+
process_create_hole_image(img,landmarks_list)
|
create_no_mouth.py
ADDED
@@ -0,0 +1,97 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
|
2 |
+
"""
|
3 |
+
create_no_mouth.py
|
4 |
+
|
5 |
+
open_mouth.pyの一部
|
6 |
+
|
7 |
+
口のない画像を作ります。
|
8 |
+
|
9 |
+
口の部分のをlandpointを取って、その部分を消して、inpaintします。
|
10 |
+
inpaint部分をぼかして、元の画像の上にはりつけ、ぼかします。
|
11 |
+
|
12 |
+
|
13 |
+
|
14 |
+
著者: Akihito Miyazaki
|
15 |
+
作成日: 2024-04-23
|
16 |
+
更新履歴:
|
17 |
+
- 2024-04-23: 最初のリリース
|
18 |
+
|
19 |
+
"""
|
20 |
+
|
21 |
+
import cv2
|
22 |
+
import numpy as np
|
23 |
+
from PIL import Image
|
24 |
+
import lip_utils
|
25 |
+
|
26 |
+
from glibvision.cv2_utils import blend_rgb_images
|
27 |
+
from glibvision.numpy_utils import apply_binary_mask_to_image
|
28 |
+
|
29 |
+
def process_create_no_mouth_image(img,landmarks_list):
|
30 |
+
img_h, img_w = lip_utils.get_image_size(img)
|
31 |
+
|
32 |
+
## 口の範囲をInpaintで消す。
|
33 |
+
(bottom_width,bottom_height)=lip_utils.get_bottom_lip_width_height(landmarks_list)
|
34 |
+
lip_points = lip_utils.get_lip_mask_points(landmarks_list)
|
35 |
+
|
36 |
+
# 選択範囲を丸めてみたけど、それほどメリットが感じなかった。
|
37 |
+
#lip_points = lip_utils.bulge_polygon(lip_points,0.1)
|
38 |
+
|
39 |
+
# 唇のマスク範囲を検証
|
40 |
+
if lip_utils.DEBUG:
|
41 |
+
img_lined = np.copy(img)
|
42 |
+
cv2.polylines(img_lined, [np.array(lip_points)], isClosed=True, color=(0,255,0), thickness=1)
|
43 |
+
cv2.imwrite("create_no_mouth_image_polyline.jpg",img_lined)
|
44 |
+
|
45 |
+
# 唇のエッジ部分は、影やら、ピンクなどあるので、それを削るのに、下唇の高さを元にしている。0.5は根拠ない。
|
46 |
+
dilation_size = int(bottom_height*0.5)
|
47 |
+
lip_mask = lip_utils.create_mask_from_points(img,lip_points,dilation_size,0) # inpaintで使うので、ぼかしは不要
|
48 |
+
if lip_utils.DEBUG:
|
49 |
+
lip_utils.print_numpy(lip_mask,"lip mask")
|
50 |
+
|
51 |
+
if lip_utils.DEBUG:
|
52 |
+
cv2.imwrite("create_no_mouth_image_mask.jpg",lip_mask)
|
53 |
+
|
54 |
+
img_inpainted = cv2.inpaint(img, lip_mask,3, cv2.INPAINT_TELEA)
|
55 |
+
if lip_utils.DEBUG:
|
56 |
+
cv2.imwrite("create_no_mouth_image_inpaint.jpg",img_inpainted)
|
57 |
+
|
58 |
+
## Inpaintした部分をぼかしている。
|
59 |
+
blurred_image = cv2.GaussianBlur(img_inpainted, (29, 29), 0) #場合によっては奇数じゃないとエラーが出ることがある
|
60 |
+
if lip_utils.DEBUG:
|
61 |
+
cv2.imwrite("create_no_mouth_image_blurred.jpg",blurred_image)
|
62 |
+
|
63 |
+
apply_binary_mask_to_image(img_inpainted,blurred_image,lip_mask)
|
64 |
+
if lip_utils.DEBUG:
|
65 |
+
cv2.imwrite("create_no_mouth_image_blurred1_applied.jpg",blurred_image)
|
66 |
+
|
67 |
+
|
68 |
+
# 全体を少しぼかす
|
69 |
+
blurred_image2 = cv2.GaussianBlur(img_inpainted, (9, 9), 0)
|
70 |
+
if lip_utils.DEBUG:
|
71 |
+
cv2.imwrite("create_no_mouth_image_blurred2.jpg",blurred_image2)
|
72 |
+
|
73 |
+
# Inpaintの境界線から少し広げている
|
74 |
+
kernel = np.ones((8, 8), np.uint8)
|
75 |
+
lip_mask = cv2.dilate(lip_mask, kernel, iterations=1)
|
76 |
+
|
77 |
+
# 全体を少しぼかす
|
78 |
+
blurred_mask = cv2.GaussianBlur(lip_mask, (19, 19), 0)
|
79 |
+
if lip_utils.DEBUG:
|
80 |
+
cv2.imwrite("create_no_mouth_image_blurred_mask.jpg",blurred_mask)
|
81 |
+
|
82 |
+
# https://github.com/akjava/lip_recognition_tools/issues/12
|
83 |
+
#cv2_utils.apply_binary_mask_to_image(img_inpainted,blurred_image2,lip_mask)
|
84 |
+
|
85 |
+
img_inpainted = blend_rgb_images(img_inpainted,blurred_image2,lip_mask)
|
86 |
+
|
87 |
+
if lip_utils.DEBUG:
|
88 |
+
cv2.imwrite("create_no_mouth_image_merged.jpg",img_inpainted)
|
89 |
+
|
90 |
+
return img_inpainted
|
91 |
+
|
92 |
+
if __name__ == "__main__":
|
93 |
+
# 画像ファイルのパス
|
94 |
+
img_path = "straight.jpg"
|
95 |
+
img = cv2.imread(img_path)
|
96 |
+
landmarks_list = lip_utils.image_to_landmarks_list(img)
|
97 |
+
process_create_no_mouth_image(img,landmarks_list)
|
create_top_lip.py
ADDED
@@ -0,0 +1,284 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import cv2
|
2 |
+
import numpy as np
|
3 |
+
from PIL import Image
|
4 |
+
import lip_utils
|
5 |
+
|
6 |
+
def process_lip_image(img,landmarks_list, margin, open_size_y, open_size_x):
|
7 |
+
print(open_size_x)
|
8 |
+
"""
|
9 |
+
唇画像を処理する関数
|
10 |
+
"""
|
11 |
+
img_h, img_w = lip_utils.get_image_size(img)
|
12 |
+
|
13 |
+
|
14 |
+
open_size_x = 0 #stop support this
|
15 |
+
print("currently stop support open-sizex")
|
16 |
+
|
17 |
+
#for 40 # TODO recheck later issues/91
|
18 |
+
side_tips = 0 # TODO depent on size or point
|
19 |
+
#side_tips = 0
|
20 |
+
side_tips = open_size_x
|
21 |
+
edge_x = int(open_size_x*1.4) #som magic number
|
22 |
+
|
23 |
+
mid_lip_move_artio = open_size_y/80.0 if open_size_y>0 else 0
|
24 |
+
mid_lip_shrink_artio = open_size_x/4 if open_size_x>0 else 0
|
25 |
+
|
26 |
+
# 上唇の抽出と処理
|
27 |
+
top_lip_rgba, cropped_box = lip_utils.get_alpha_image(img, landmarks_list, lip_utils.POINTS_TOP_LIP, margin, margin, 4)
|
28 |
+
if lip_utils.DEBUG:
|
29 |
+
cv2.imwrite("top_lip_rgba.png",top_lip_rgba)
|
30 |
+
new_h,new_w = lip_utils.get_image_size(top_lip_rgba)
|
31 |
+
w = new_w
|
32 |
+
h = new_h
|
33 |
+
print(f"top-lip-alpha-margined-size:w = {new_w} h = {new_h} margin = {margin}")
|
34 |
+
align_points = lip_utils.get_top_lip_align_points(landmarks_list)
|
35 |
+
|
36 |
+
box = cropped_box
|
37 |
+
top_points=lip_utils.get_landmark_points(landmarks_list,lip_utils.POINTS_TOP_LIP)
|
38 |
+
cropped_lip_points = [(point[0] - box[0][0], point[1] - box[0][1]) for point in top_points]
|
39 |
+
lip_points = [(point[0] - box[0][0], point[1] - box[0][1]) for point in align_points]
|
40 |
+
middle_lip = ((lip_points[0][0] + lip_points[1][0]) / 2, (lip_points[0][1] + lip_points[1][1]) / 2)
|
41 |
+
print(f"middle:{middle_lip}")
|
42 |
+
|
43 |
+
|
44 |
+
#DEV
|
45 |
+
print(f"box {cropped_box[0][0]},{cropped_box[0][1]}")
|
46 |
+
face_size_image=lip_utils.create_rgba(img_w,img_h)
|
47 |
+
lip_utils.copy_image(face_size_image,top_lip_rgba,cropped_box[0][0]-margin,cropped_box[0][1]-margin)
|
48 |
+
if lip_utils.DEBUG:
|
49 |
+
cv2.imwrite("top_lip_layer.png",face_size_image)
|
50 |
+
|
51 |
+
|
52 |
+
|
53 |
+
|
54 |
+
|
55 |
+
|
56 |
+
#intではないとエラー
|
57 |
+
middle_y = max(1,int(middle_lip[1]-5)) # force move up
|
58 |
+
|
59 |
+
|
60 |
+
# 3分割
|
61 |
+
mid_x1=int(w/3) # LEFT
|
62 |
+
mid_x2 = w -mid_x1 # RIGHT
|
63 |
+
print(f"image width = {new_w} mid_left = {mid_x1} mid_right ={mid_x2}")
|
64 |
+
|
65 |
+
cx, cy, cx2, cy2 = 0, middle_y,mid_x1, h
|
66 |
+
|
67 |
+
print("###",w,",",middle_y)
|
68 |
+
crop_top = lip_utils.crop_image(top_lip_rgba,0,0,w,middle_y)#full top
|
69 |
+
|
70 |
+
#if use mid only left right change control of up
|
71 |
+
#crop_top = lip_utils.crop_image(top_lip_rgba,mid_x1,0,mid_x2,middle_y)
|
72 |
+
if lip_utils.DEBUG:
|
73 |
+
cv2.imwrite("crop_top.png",crop_top)
|
74 |
+
#cv2.imwrite("top_lip_mid.png",crop_mid)
|
75 |
+
|
76 |
+
below_top_lip_image = lip_utils.crop_image(top_lip_rgba,0,middle_y,w,h)
|
77 |
+
below_top_lip_image_h,below_top_lip_image_w = lip_utils.get_image_size(below_top_lip_image)
|
78 |
+
if lip_utils.DEBUG:
|
79 |
+
cv2.imwrite("below_top_lip_image.png",below_top_lip_image)
|
80 |
+
print(f"below_top_lip_image w = {below_top_lip_image_w}, h= {below_top_lip_image_h}")
|
81 |
+
|
82 |
+
#中央部分を切り抜く
|
83 |
+
|
84 |
+
mid_x1_x2_half = int((mid_x2+mid_x1)/2)
|
85 |
+
print(mid_x1_x2_half)
|
86 |
+
crop_mid_left = lip_utils.crop_image(below_top_lip_image,mid_x1,0,mid_x1_x2_half,below_top_lip_image_h)
|
87 |
+
lip_utils.print_width_height(crop_mid_left,"crop_mid_left")
|
88 |
+
crop_mid_h,crop_mid_w = lip_utils.get_image_size(crop_mid_left)
|
89 |
+
max_w = crop_mid_w
|
90 |
+
max_h = crop_mid_h
|
91 |
+
moveup_lip_mid = lip_utils.create_moved_image(crop_mid_left, [(0,0),(max_w,0),
|
92 |
+
(0,max_h),(max_w,max_h)],
|
93 |
+
|
94 |
+
[(0,0),(crop_mid_w,0),
|
95 |
+
(0,int(max_h)),(max_w,int(crop_mid_h*(1.0 - mid_lip_move_artio)))]# TODO ratio
|
96 |
+
)
|
97 |
+
lip_utils.print_width_height(moveup_lip_mid,"crop_mid_left-moved")
|
98 |
+
|
99 |
+
if lip_utils.DEBUG:
|
100 |
+
cv2.imwrite("moveup_lip_mid_left.png",moveup_lip_mid)
|
101 |
+
|
102 |
+
crop_mid_right = lip_utils.crop_image(below_top_lip_image,mid_x1_x2_half,0,mid_x2,below_top_lip_image_h)
|
103 |
+
crop_mid_h,crop_mid_w = lip_utils.get_image_size(crop_mid_right)
|
104 |
+
max_w = crop_mid_w
|
105 |
+
max_h = crop_mid_h
|
106 |
+
lip_utils.print_width_height(crop_mid_right,"crop_mid_right")
|
107 |
+
moveup_lip_mid_right = lip_utils.create_moved_image(crop_mid_right, [(0,0),(max_w,0),
|
108 |
+
(0,max_h),(max_w,max_h)],
|
109 |
+
|
110 |
+
[(0,0),(max_w,0),
|
111 |
+
(0,int(max_h*(1.0-mid_lip_move_artio))),(max_w,int(max_h))]
|
112 |
+
)
|
113 |
+
lip_utils.print_width_height(moveup_lip_mid_right,"crop_mid_right-moved")
|
114 |
+
if lip_utils.DEBUG:
|
115 |
+
cv2.imwrite("moveup_lip_mid_right.png",moveup_lip_mid_right)
|
116 |
+
|
117 |
+
|
118 |
+
|
119 |
+
|
120 |
+
# 最終画像 サイズは、最初の 切り抜きに 口を開く + open_size_y
|
121 |
+
top_lip_final_image=lip_utils.create_rgba(w,h+open_size_y+1)#some how transform image expand
|
122 |
+
final_image_h,final_image_w = lip_utils.get_image_size(top_lip_final_image)
|
123 |
+
print(f"final-image-size:w = {final_image_w} h = {final_image_h}")
|
124 |
+
|
125 |
+
|
126 |
+
# left block
|
127 |
+
left_lip_image = lip_utils.crop_image(below_top_lip_image,side_tips,0,mid_x1,below_top_lip_image_h)
|
128 |
+
left_lip_image_h,left_lip_image_w = lip_utils.get_image_size(left_lip_image)
|
129 |
+
print(f"left-image-cropped:w = {left_lip_image_w} h = {left_lip_image_h}")
|
130 |
+
# this left transofom is very important change result (dont feel strange +open_size_x)
|
131 |
+
max_w = left_lip_image_w
|
132 |
+
max_h = left_lip_image_h
|
133 |
+
|
134 |
+
opend_lip_left = lip_utils.create_moved_image(left_lip_image,
|
135 |
+
[(0,0),(max_w,0),
|
136 |
+
(0,max_h),(max_w,max_h)],
|
137 |
+
|
138 |
+
[(0,0+open_size_y),(max_w+open_size_x,0),
|
139 |
+
(0,max_h+open_size_y),(max_w+open_size_x,max_h)]
|
140 |
+
)
|
141 |
+
|
142 |
+
|
143 |
+
new_h,new_w = lip_utils.get_image_size(opend_lip_left)
|
144 |
+
print(f"left-image-moved:w = {new_w} h = {new_h}")
|
145 |
+
if lip_utils.DEBUG:
|
146 |
+
cv2.imwrite("top_lip_opend_left.png",opend_lip_left)
|
147 |
+
|
148 |
+
right_lip_image = lip_utils.crop_image(below_top_lip_image,mid_x2,0,below_top_lip_image_w-side_tips,below_top_lip_image_h)
|
149 |
+
right_lip_image_h,right_lip_image_w = lip_utils.get_image_size(right_lip_image)
|
150 |
+
print(f"right-image-cropped:w = {right_lip_image_w} h = {right_lip_image_h}")
|
151 |
+
max_w = right_lip_image_w
|
152 |
+
#cv2.imwrite("top_lip_opend_left.png",opend_lip_left)
|
153 |
+
# right block
|
154 |
+
#right-image-cropped:w = 39 h = 32
|
155 |
+
opend_lip_right = lip_utils.create_moved_image(right_lip_image,
|
156 |
+
[(0,0),(right_lip_image_w-1,0),
|
157 |
+
(0,right_lip_image_h-1),(right_lip_image_w-1,right_lip_image_h-1)],
|
158 |
+
|
159 |
+
[(-0,0),(right_lip_image_w-1+open_size_x,open_size_y), # remove corner shrink it broke image
|
160 |
+
(0,int(crop_mid_h-1)),(right_lip_image_w+open_size_x-1,right_lip_image_h-1+open_size_y)]
|
161 |
+
#,(39+open_size_x,right_lip_image_h+open_size_y) #TOD
|
162 |
+
)
|
163 |
+
|
164 |
+
|
165 |
+
new_h,new_w = lip_utils.get_image_size(opend_lip_right)
|
166 |
+
right_image_w_changed = new_w-right_lip_image_w
|
167 |
+
|
168 |
+
print(f"right-image-moved:w = {new_w} h = {new_h}")
|
169 |
+
if lip_utils.DEBUG:
|
170 |
+
cv2.imwrite("top_lip_opend_right.png",opend_lip_right)
|
171 |
+
|
172 |
+
|
173 |
+
move_x = open_size_x +(open_size_x-right_image_w_changed)
|
174 |
+
print(f"right_image_w_changed ={right_image_w_changed} open_size_x ={open_size_x} move_x ={move_x}")
|
175 |
+
|
176 |
+
lip_utils.copy_image(top_lip_final_image,crop_top,0,0) # full version
|
177 |
+
#lip_utils.copy_image(top_lip_final_image,crop_top,mid_x1,0)
|
178 |
+
print(f"open size x = {open_size_x}")
|
179 |
+
lip_utils.copy_image(top_lip_final_image,opend_lip_left,side_tips,middle_y)# open_size_x must slided and minus value
|
180 |
+
|
181 |
+
#mid
|
182 |
+
lip_utils.copy_image(top_lip_final_image,moveup_lip_mid,mid_x1,middle_y)
|
183 |
+
lip_utils.copy_image(top_lip_final_image,moveup_lip_mid_right,mid_x1_x2_half,middle_y)
|
184 |
+
|
185 |
+
lip_utils.copy_image(top_lip_final_image,opend_lip_right,mid_x2,middle_y)
|
186 |
+
|
187 |
+
|
188 |
+
if lip_utils.DEBUG:
|
189 |
+
cv2.imwrite("top_lip_opend.png",top_lip_final_image)
|
190 |
+
face_size_image=lip_utils.create_rgba(img_w,img_h)
|
191 |
+
lip_utils.copy_image(face_size_image,top_lip_final_image,box[0][0],box[0][1])
|
192 |
+
if lip_utils.DEBUG:
|
193 |
+
cv2.imwrite("top_lip_layer.png",face_size_image)
|
194 |
+
|
195 |
+
|
196 |
+
# possible bug inverted
|
197 |
+
points = lip_utils.get_lip_hole_points(landmarks_list)
|
198 |
+
#points = lip_utils.get_lip_hole_top_points(landmarks_list)
|
199 |
+
|
200 |
+
statics=[1,2,3]
|
201 |
+
half =[0,4,5,9]
|
202 |
+
|
203 |
+
#not effect now open-sizex set 0 at begging
|
204 |
+
m = 1
|
205 |
+
|
206 |
+
|
207 |
+
## TOP Lip Move Up basically upper thick is 1.5 x lower lip toher are teeth
|
208 |
+
(bottom_width,bottom_height)=lip_utils.get_bottom_lip_width_height(landmarks_list)
|
209 |
+
left_thick,mid_thick,right_thick = lip_utils.get_top_lip_thicks(landmarks_list)
|
210 |
+
bottom_base = bottom_height/1.5
|
211 |
+
|
212 |
+
diff_left = max(0,int(left_thick - bottom_base))
|
213 |
+
diff_right = max(0,int(right_thick - bottom_base))
|
214 |
+
diff_mid = max(0,int((diff_right+diff_left)*0.4))
|
215 |
+
print(f"bottom base = {bottom_base} left thick ={left_thick} diff ={diff_left}")
|
216 |
+
print(f"bottom base = {bottom_base} left thick ={left_thick} mid ={diff_mid}")
|
217 |
+
|
218 |
+
|
219 |
+
(bottom_width,bottom_height)=lip_utils.get_bottom_lip_width_height(landmarks_list)
|
220 |
+
|
221 |
+
mid_lip_drop_size = lip_utils.get_bottom_mid_drop_size(open_size_y,bottom_height)
|
222 |
+
print(f"mid_lip_drop_size = {mid_lip_drop_size}")
|
223 |
+
moved_points = []
|
224 |
+
for idx,point in enumerate(points):
|
225 |
+
if idx not in statics:
|
226 |
+
if idx in half:
|
227 |
+
plus_x = 0
|
228 |
+
if idx == 5 :#or idx ==0
|
229 |
+
plus_x = open_size_x*m
|
230 |
+
elif idx == 9:#idx == 4 or
|
231 |
+
plus_x = -open_size_x*m
|
232 |
+
elif idx == 0:
|
233 |
+
plus_x = open_size_x*m
|
234 |
+
elif idx == 4:
|
235 |
+
plus_x =-open_size_x*m
|
236 |
+
print(f"idx ={idx} plus {plus_x}")
|
237 |
+
|
238 |
+
moved_points.append((point[0]+plus_x,point[1]+open_size_y/2))
|
239 |
+
else:
|
240 |
+
#bottom
|
241 |
+
moved_points.append((point[0],point[1]+int(open_size_y*2)+mid_lip_drop_size))
|
242 |
+
else:
|
243 |
+
print(f"static ? {idx}")
|
244 |
+
#static top
|
245 |
+
#moved_points.append((point[0],point[1]-crop_mid_h/4)) #for open 40
|
246 |
+
#moved_points.append((point[0],point[1]))
|
247 |
+
if idx == 3:
|
248 |
+
moved_points.append((point[0],point[1]-diff_left))
|
249 |
+
print(f"left top lip move up {diff_left}")
|
250 |
+
elif idx == 2:
|
251 |
+
moved_points.append((point[0],point[1]-diff_mid))
|
252 |
+
elif idx == 1:
|
253 |
+
moved_points.append((point[0],point[1]-diff_right))
|
254 |
+
print(f"right top lip move up {diff_right}")
|
255 |
+
|
256 |
+
|
257 |
+
|
258 |
+
|
259 |
+
|
260 |
+
# force moved
|
261 |
+
#moved_points[1][1] = moved_points[1][1] -4
|
262 |
+
|
263 |
+
tmp = lip_utils.create_mask_from_points(img,points,int(open_size_y/2),0)
|
264 |
+
if lip_utils.DEBUG:
|
265 |
+
cv2.imwrite("lip_hole_mask_base.jpg",tmp)
|
266 |
+
|
267 |
+
gaus = 2 # ADD OPTION
|
268 |
+
mask = lip_utils.create_mask_from_points(img,moved_points,int(open_size_y/2),gaus)
|
269 |
+
if lip_utils.DEBUG:
|
270 |
+
cv2.imwrite("lip_hole_mask.jpg",mask)
|
271 |
+
|
272 |
+
|
273 |
+
return face_size_image,mask
|
274 |
+
|
275 |
+
if __name__ == "__main__":
|
276 |
+
# 画像ファイルのパス
|
277 |
+
img_path = "straight.jpg"
|
278 |
+
# パラメータ
|
279 |
+
margin = 4
|
280 |
+
open_size_y = 20
|
281 |
+
open_size_x = 0
|
282 |
+
|
283 |
+
# 関数の呼び出し
|
284 |
+
process_lip_image(img_path, margin, open_size_y, open_size_x)
|
demo_footer.html
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
<div>
|
2 |
+
<P> Images are generated with <a href="https://huggingface.co/black-forest-labs/FLUX.1-schnell">FLUX.1-schnell</a> and licensed under <a href="http://www.apache.org/licenses/LICENSE-2.0">the Apache 2.0 License</a>
|
3 |
+
</div>
|
demo_header.html
ADDED
@@ -0,0 +1,16 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
<div style="text-align: center;">
|
2 |
+
<h1>
|
3 |
+
Mediapipe 68-points Eyes-Closed and Mouth-Opened
|
4 |
+
</h1>
|
5 |
+
<div class="grid-container">
|
6 |
+
<img src="https://akjava.github.io/AIDiagramChatWithVoice-FaceCharacter/webp/128/00538245.webp" alt="Mediapipe Face Detection" class="image">
|
7 |
+
|
8 |
+
<p class="text">
|
9 |
+
This Space use <a href="http://www.apache.org/licenses/LICENSE-2.0">the Apache 2.0</a> Licensed <a href="https://ai.google.dev/edge/mediapipe/solutions/vision/face_landmarker">Mediapipe FaceLandmarker</a> <br>
|
10 |
+
One of json format is from MIT licensed <a href="https://github.com/ageitgey/face_recognition">face_recognition</a><br>
|
11 |
+
I should clarify because it is confusing: I'm not using dlib's non-MIT licensed 68-point model at all.<br>
|
12 |
+
This is 10-year-old technology. However, most amazing talk-head models,.<br> while often having their core code under MIT/Apache licenses, rely on datasets or NVIDIA libraries with more restrictive licenses.
|
13 |
+
</p>
|
14 |
+
</div>
|
15 |
+
|
16 |
+
</div>
|
demo_tools.html
ADDED
@@ -0,0 +1,10 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
<div style="text-align: center;">
|
2 |
+
<p>
|
3 |
+
<a href="https://huggingface.co/spaces/Akjava/flux1-schnell-img2img">Flux1-Img2Img(GPU)</a> |
|
4 |
+
<a href="https://huggingface.co/spaces/Akjava/flux1-schnell-mask-inpaint">Flux1-Inpaint(GPU)</a> |
|
5 |
+
<a href="https://huggingface.co/spaces/Akjava/mediapipe-68-points-facial-mask">Create 68 points Parts Mask</a> |
|
6 |
+
<a href="https://huggingface.co/spaces/Akjava/histgram-color-matching">Histgram Color Matching</a> |
|
7 |
+
<a href="https://huggingface.co/spaces/Akjava/WebPTalkHead">WebP anime with 3 images</a>
|
8 |
+
</p>
|
9 |
+
<p></p>
|
10 |
+
</div>
|
draw_landmarks68.py
ADDED
@@ -0,0 +1,516 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
|
2 |
+
import mediapipe as mp
|
3 |
+
from mediapipe.tasks import python
|
4 |
+
from mediapipe.tasks.python import vision
|
5 |
+
from mediapipe.framework.formats import landmark_pb2
|
6 |
+
from mediapipe import solutions
|
7 |
+
import numpy as np
|
8 |
+
import time
|
9 |
+
import cv2
|
10 |
+
import argparse
|
11 |
+
import os
|
12 |
+
import math
|
13 |
+
|
14 |
+
# modified in gradio
|
15 |
+
|
16 |
+
from mp_constants import *
|
17 |
+
from mp_utils import divide_line_to_points,points_to_bbox,expand_bbox
|
18 |
+
|
19 |
+
import logging
|
20 |
+
|
21 |
+
# for share lib,TODO make module
|
22 |
+
#import sys
|
23 |
+
#sys.path.append("C:\\Users\\owner\\Documents\\pythons\\glibvision")
|
24 |
+
from glibvision.glandmark_utils import bbox_to_glandmarks,convert_to_landmark_group_json
|
25 |
+
from glibvision.cv2_utils import draw_bbox,plot_points,set_plot_text
|
26 |
+
|
27 |
+
def parse_arguments():
|
28 |
+
"""
|
29 |
+
引数
|
30 |
+
|
31 |
+
"""
|
32 |
+
parser = argparse.ArgumentParser(
|
33 |
+
description="draw 68 points"
|
34 |
+
)
|
35 |
+
parser.add_argument(
|
36 |
+
"--input_file","-i",required=True,help="Input file"
|
37 |
+
)
|
38 |
+
parser.add_argument(
|
39 |
+
"--model_path","-m",default="face_landmarker.task",help="model path"
|
40 |
+
)
|
41 |
+
parser.add_argument(
|
42 |
+
"--save_glandmark","-g",action="store_true",help="save godot-landmark json"
|
43 |
+
)
|
44 |
+
parser.add_argument(
|
45 |
+
"--save_group_landmark","-landmark",action="store_true",help="save group-landmark json"
|
46 |
+
)
|
47 |
+
return parser.parse_args()
|
48 |
+
|
49 |
+
|
50 |
+
|
51 |
+
|
52 |
+
|
53 |
+
def draw_landmarks_on_image(rgb_image, detection_result,draw_number=True,font_scale=0.5,text_color=(200,200,200),dot_size=3,dot_color=(255,0,0),line_size=1,line_color=(0,0,355),box_size=1,box_color=(200,200,200)):
|
54 |
+
#print(f"dot_size={dot_size},dot_color={dot_color},line_size={line_size},line_color={line_color}")
|
55 |
+
image_width,iamge_height = rgb_image.size
|
56 |
+
face_landmarks_list = detection_result.face_landmarks
|
57 |
+
annotated_image = np.copy(rgb_image)
|
58 |
+
|
59 |
+
def get_cordinate(index):
|
60 |
+
x=face_landmarks_list[0][index].x
|
61 |
+
y=face_landmarks_list[0][index].y
|
62 |
+
return x,y
|
63 |
+
|
64 |
+
def get_distance(x1,y1,x2,y2):
|
65 |
+
return math.sqrt((x2 - x1)**2 + (y2 - y1)**2)
|
66 |
+
|
67 |
+
def get_centers():
|
68 |
+
center_indices =[
|
69 |
+
#(POINT_LEFT_HEAD_OUTER,POINT_RIGHT_HEAD_OUTER,POINT_FOREHEAD_TOP),
|
70 |
+
#(POINT_LEFT_HEAD_OUTER,POINT_RIGHT_HEAD_OUTER,POINT_CHIN_BOTTOM),
|
71 |
+
[POINT_NOSE_CENTER_MIDDLE],
|
72 |
+
#[POINT_LOWER_LIP_CENTER_BOTTOM]
|
73 |
+
#(POINT_UPPER_LIP_CENTER_BOTTOM,POINT_LOWER_LIP_CENTER_TOP)
|
74 |
+
]
|
75 |
+
centers = []
|
76 |
+
for indices in center_indices:
|
77 |
+
total_x = 0
|
78 |
+
total_y = 0
|
79 |
+
for index in indices:
|
80 |
+
x,y = get_cordinate(index)
|
81 |
+
total_x+=x
|
82 |
+
total_y+=y
|
83 |
+
centers.append ((total_x/len(indices),total_y/len(indices)))
|
84 |
+
return centers
|
85 |
+
|
86 |
+
centers = get_centers()
|
87 |
+
for center in centers:
|
88 |
+
center_x,center_y = center
|
89 |
+
|
90 |
+
pt = int(center_x*image_width),int(center_y*iamge_height)
|
91 |
+
|
92 |
+
#cv2.circle(annotated_image,pt,20,(0,0,255),-1)
|
93 |
+
|
94 |
+
def get_closed_center(x,y):
|
95 |
+
closed = None
|
96 |
+
closed_distance = 0
|
97 |
+
for center in centers:
|
98 |
+
distance = get_distance(center[0],center[1],x,y)
|
99 |
+
if closed == None:
|
100 |
+
closed = center
|
101 |
+
closed_distance = distance
|
102 |
+
else:
|
103 |
+
if distance<closed_distance:
|
104 |
+
closed_distance = distance
|
105 |
+
closed = center
|
106 |
+
return closed
|
107 |
+
|
108 |
+
|
109 |
+
#landmark is [index-upper,index-lower]
|
110 |
+
def get_mean_point(landmark,width=image_width,height=iamge_height):
|
111 |
+
xs=[]
|
112 |
+
ys=[]
|
113 |
+
for index in landmark:
|
114 |
+
x,y = get_cordinate(index) #inner cordinate
|
115 |
+
xs.append(x)
|
116 |
+
ys.append(y)
|
117 |
+
|
118 |
+
return int(np.mean(xs)*width),int(np.mean(ys)*height)
|
119 |
+
|
120 |
+
def get_cordinate_point(landmark,width=image_width,height=iamge_height):
|
121 |
+
point = get_cordinate(landmark)
|
122 |
+
|
123 |
+
return int(point[0]*width),int(point[1]*height)
|
124 |
+
# TODO rename and explain this is for contour choose most outer point
|
125 |
+
def get_point(landmark,width=image_width,height=iamge_height):
|
126 |
+
xs=[]
|
127 |
+
ys=[]
|
128 |
+
|
129 |
+
|
130 |
+
def get_outer_point(indexes):
|
131 |
+
outer_point = None
|
132 |
+
max_distance = None
|
133 |
+
if len(indexes) == 0:
|
134 |
+
return None
|
135 |
+
|
136 |
+
ratio = 0.5
|
137 |
+
x,y = get_cordinate(indexes[-1]) #on contour 3 lines outer,center,inner cordinate
|
138 |
+
|
139 |
+
#x,y = get_cordinate(indexes[0])
|
140 |
+
center_x,center_y = get_closed_center(x,y)
|
141 |
+
x-=(center_x-x)*ratio
|
142 |
+
y-=(center_y-y)*ratio
|
143 |
+
|
144 |
+
outer_x = x
|
145 |
+
outer_y = y
|
146 |
+
|
147 |
+
for index in indexes:
|
148 |
+
x,y = get_cordinate(index)
|
149 |
+
|
150 |
+
distance = get_distance(outer_x,outer_y,x,y)
|
151 |
+
#print(f"{distance} index={index} x={x},y={y}")
|
152 |
+
if outer_point == None:
|
153 |
+
outer_point = (x,y)
|
154 |
+
max_distance = distance
|
155 |
+
else:
|
156 |
+
if distance<max_distance:
|
157 |
+
outer_point = (x,y)
|
158 |
+
return outer_point
|
159 |
+
|
160 |
+
|
161 |
+
|
162 |
+
for group in landmark:
|
163 |
+
outer_point = get_outer_point(group)
|
164 |
+
xs.append(outer_point[0])
|
165 |
+
ys.append(outer_point[1])
|
166 |
+
|
167 |
+
|
168 |
+
return int(np.mean(xs)*width),int(np.mean(ys)*height)
|
169 |
+
|
170 |
+
# Loop through the detected faces to visualize.
|
171 |
+
for idx in range(len(face_landmarks_list)):
|
172 |
+
face_landmarks = face_landmarks_list[idx]
|
173 |
+
|
174 |
+
# Draw the face landmarks. #something change format
|
175 |
+
face_landmarks_proto = landmark_pb2.NormalizedLandmarkList()
|
176 |
+
face_landmarks_proto.landmark.extend([
|
177 |
+
landmark_pb2.NormalizedLandmark(x=landmark.x, y=landmark.y, z=landmark.z) for landmark in face_landmarks
|
178 |
+
])
|
179 |
+
|
180 |
+
|
181 |
+
def draw_sets(draw_set,color=(0,255,0)):
|
182 |
+
solutions.drawing_utils.draw_landmarks(
|
183 |
+
image=annotated_image,
|
184 |
+
landmark_list=face_landmarks_proto,
|
185 |
+
connections=draw_set,
|
186 |
+
landmark_drawing_spec=None,
|
187 |
+
connection_drawing_spec=mp.solutions.drawing_styles.DrawingSpec(color=color, thickness=1 ))
|
188 |
+
def draw_triangle(index1,index2,index3):
|
189 |
+
draw_sets({(index1,index2),(index2,index3),(index3,index1)})
|
190 |
+
|
191 |
+
def draw_lines(array,color=(0,0,128)):
|
192 |
+
my_set = set()
|
193 |
+
for i in range(len(array)-1):
|
194 |
+
v = (array[i],array[i+1])
|
195 |
+
my_set.add(v)
|
196 |
+
draw_sets(my_set,color)
|
197 |
+
|
198 |
+
def convert_to_box(face_landmarks_list,indices,w=1024,h=1024):
|
199 |
+
x1=0
|
200 |
+
y1=0
|
201 |
+
x2=w
|
202 |
+
y2=h
|
203 |
+
for index in indices:
|
204 |
+
x=min(w,max(0,(face_landmarks_list[0][index].x*w)))
|
205 |
+
y=min(h,max(0,(face_landmarks_list[0][index].y*h)))
|
206 |
+
if x>x1:
|
207 |
+
x1=x
|
208 |
+
if y>y1:
|
209 |
+
y1=y
|
210 |
+
|
211 |
+
if x<x2:
|
212 |
+
x2=x
|
213 |
+
if y<y2:
|
214 |
+
y2=y
|
215 |
+
|
216 |
+
return [x1,y1,x2-x1,y2-y1]
|
217 |
+
|
218 |
+
|
219 |
+
my_set ={(362,382),(382,398),(398,362)}
|
220 |
+
my_set = mp.solutions.face_mesh.FACEMESH_RIGHT_EYE
|
221 |
+
|
222 |
+
#mediapipe to 5point
|
223 |
+
"""
|
224 |
+
draw_triangle(362,382,398)
|
225 |
+
draw_triangle(173,133,155)
|
226 |
+
draw_triangle(33,246,7)
|
227 |
+
draw_triangle(249,263,466)
|
228 |
+
|
229 |
+
draw_triangle(94,2,164)
|
230 |
+
|
231 |
+
draw_triangle(61,76,61)
|
232 |
+
draw_triangle(291,306,291)
|
233 |
+
|
234 |
+
draw_lines([17,18,200,199,175,152])
|
235 |
+
|
236 |
+
draw_lines([127,234,93,132,58,172,136,150,149,176,148,152],(255,0,0))
|
237 |
+
#draw_lines([127,234,132,172,150,176,152],(0,0,255))
|
238 |
+
"""
|
239 |
+
|
240 |
+
#
|
241 |
+
#draw_lines([9,107])
|
242 |
+
"""
|
243 |
+
draw_lines([148,171,208])
|
244 |
+
draw_lines([176,140])
|
245 |
+
draw_lines([149,170,211])
|
246 |
+
draw_lines([150,169,])
|
247 |
+
|
248 |
+
draw_lines([150,169])
|
249 |
+
draw_lines([136,135,214])
|
250 |
+
draw_lines([172,138,192])
|
251 |
+
draw_lines([58,215])
|
252 |
+
draw_lines([132,177,147])
|
253 |
+
draw_lines([58,215,213])
|
254 |
+
draw_lines([93,137,123])
|
255 |
+
#draw_lines([234,227])
|
256 |
+
#draw_lines([127,34,143])
|
257 |
+
|
258 |
+
"""
|
259 |
+
#draw_lines([378,288,356,251,151,21,127,58,150,152])
|
260 |
+
#draw_lines(LINE_RIGHT_CONTOUR_OUTER_EYE_TO_CHIN)
|
261 |
+
#draw_lines(LINE_RIGHT_CONTOUR_EYE_TO_CHIN)
|
262 |
+
#draw_lines(LINE_RIGHT_CONTOUR_INNER_EYE_TO_CHIN,(0,255,0))
|
263 |
+
"""
|
264 |
+
draw_lines(LINE_RIGHT_CONTOUR_0)
|
265 |
+
draw_lines(LINE_RIGHT_CONTOUR_1)
|
266 |
+
draw_lines(LINE_RIGHT_CONTOUR_2)
|
267 |
+
draw_lines(LINE_RIGHT_CONTOUR_3)
|
268 |
+
draw_lines(LINE_RIGHT_CONTOUR_4)
|
269 |
+
draw_lines(LINE_RIGHT_CONTOUR_5)
|
270 |
+
draw_lines(LINE_RIGHT_CONTOUR_6)
|
271 |
+
draw_lines(LINE_RIGHT_CONTOUR_7)
|
272 |
+
draw_lines(LINE_RIGHT_CONTOUR_8)
|
273 |
+
draw_lines(LINE_RIGHT_CONTOUR_9)
|
274 |
+
draw_lines(LINE_RIGHT_CONTOUR_10)
|
275 |
+
draw_lines(LINE_RIGHT_CONTOUR_11)
|
276 |
+
|
277 |
+
|
278 |
+
draw_lines(LINE_LEFT_CONTOUR_1)
|
279 |
+
draw_lines(LINE_LEFT_CONTOUR_2)
|
280 |
+
draw_lines(LINE_LEFT_CONTOUR_3)
|
281 |
+
draw_lines(LINE_LEFT_CONTOUR_4)
|
282 |
+
draw_lines(LINE_LEFT_CONTOUR_5)
|
283 |
+
draw_lines(LINE_LEFT_CONTOUR_6)
|
284 |
+
draw_lines(LINE_LEFT_CONTOUR_7)
|
285 |
+
draw_lines(LINE_LEFT_CONTOUR_8)
|
286 |
+
draw_lines(LINE_LEFT_CONTOUR_9)
|
287 |
+
draw_lines(LINE_LEFT_CONTOUR_10)
|
288 |
+
draw_lines(LINE_LEFT_CONTOUR_11)
|
289 |
+
#draw_lines(LINE_LEFT_CONTOUR_12)
|
290 |
+
"""
|
291 |
+
|
292 |
+
#draw_lines(LINE_RIGHT_CONTOUR_6,(255,0,0))
|
293 |
+
|
294 |
+
def get_eye_brow_points(landmarks):
|
295 |
+
result_points= []
|
296 |
+
for landmark in landmarks:
|
297 |
+
point=get_mean_point(landmark)
|
298 |
+
result_points.append(point)
|
299 |
+
|
300 |
+
return result_points
|
301 |
+
|
302 |
+
def get_mean_points(landmarks):
|
303 |
+
result_points= []
|
304 |
+
for landmark in landmarks:
|
305 |
+
point=get_mean_point(landmark)
|
306 |
+
result_points.append(point)
|
307 |
+
|
308 |
+
return result_points
|
309 |
+
|
310 |
+
def get_divided_points(landmarks,divided=3):
|
311 |
+
result_points= []
|
312 |
+
landmark_points = []
|
313 |
+
for landmark in landmarks:
|
314 |
+
if isinstance(landmark, int):
|
315 |
+
pt=get_cordinate_point(landmark)
|
316 |
+
else:
|
317 |
+
pt =get_mean_point(landmark)
|
318 |
+
landmark_points.append(pt)
|
319 |
+
|
320 |
+
divided_points = divide_line_to_points(landmark_points,divided)
|
321 |
+
|
322 |
+
#print(centers[0][0]*1024,",",centers[0][1]*1024)
|
323 |
+
return divided_points
|
324 |
+
|
325 |
+
|
326 |
+
def get_half_contour(landmarks):
|
327 |
+
result_points= []
|
328 |
+
landmark_points = []
|
329 |
+
for landmark in landmarks:
|
330 |
+
pt =get_point(landmark)
|
331 |
+
landmark_points.append(pt)
|
332 |
+
|
333 |
+
divided_points = divide_line_to_points(landmark_points,8)#9
|
334 |
+
#for pt in divided_points:
|
335 |
+
# cv2.circle(annotated_image,pt,3,(255,0,0),-1)
|
336 |
+
# result_points.append((pt[0],pt[1]))
|
337 |
+
|
338 |
+
#print(centers[0][0]*1024,",",centers[0][1]*1024)
|
339 |
+
return divided_points
|
340 |
+
|
341 |
+
right_landmarks =[
|
342 |
+
#[LANDMARK_68_CONTOUR_5]
|
343 |
+
[LANDMARK_68_CONTOUR_1],[LANDMARK_68_CONTOUR_2_PART1,LANDMARK_68_CONTOUR_2_PART2],[LANDMARK_68_CONTOUR_3],[LANDMARK_68_CONTOUR_4],[LANDMARK_68_CONTOUR_5],[LANDMARK_68_CONTOUR_6_PART1,LANDMARK_68_CONTOUR_6_PART2],[LANDMARK_68_CONTOUR_7],[LANDMARK_68_CONTOUR_8_PART1,LANDMARK_68_CONTOUR_8_PART2],[LANDMARK_68_CONTOUR_9],
|
344 |
+
|
345 |
+
]
|
346 |
+
contour_right_points=get_half_contour(right_landmarks)
|
347 |
+
|
348 |
+
left_landmarks =[
|
349 |
+
[LANDMARK_68_CONTOUR_9], [LINE_LEFT_CONTOUR_1], [LINE_LEFT_CONTOUR_2], [LINE_LEFT_CONTOUR_3], [LINE_LEFT_CONTOUR_4],[LINE_LEFT_CONTOUR_5],[LINE_LEFT_CONTOUR_6],[LINE_LEFT_CONTOUR_7],[LINE_LEFT_CONTOUR_8],[LINE_LEFT_CONTOUR_9],[LINE_LEFT_CONTOUR_10],[LINE_LEFT_CONTOUR_11]
|
350 |
+
]
|
351 |
+
contour_left_points=get_half_contour(left_landmarks)
|
352 |
+
|
353 |
+
set_plot_text(draw_number,font_scale,text_color) # for reset
|
354 |
+
plot_points(annotated_image,contour_right_points+contour_left_points[1:],False,dot_size,dot_color,line_size,line_color)
|
355 |
+
|
356 |
+
right_eye_brow_points=get_eye_brow_points([
|
357 |
+
LANDMARK_68_RIGHT_EYEBROW_18,LANDMARK_68_RIGHT_EYEBROW_19,LANDMARK_68_RIGHT_EYEBROW_20,LANDMARK_68_RIGHT_EYEBROW_21,LANDMARK_68_RIGHT_EYEBROW_22
|
358 |
+
])
|
359 |
+
plot_points(annotated_image,right_eye_brow_points,False,dot_size,dot_color,line_size,line_color)
|
360 |
+
left_eye_brow_points=get_eye_brow_points([
|
361 |
+
LANDMARK_68_LEFT_EYEBROW_23,LANDMARK_68_LEFT_EYEBROW_24,LANDMARK_68_LEFT_EYEBROW_25,LANDMARK_68_LEFT_EYEBROW_26,LANDMARK_68_LEFT_EYEBROW_27
|
362 |
+
])
|
363 |
+
plot_points(annotated_image,left_eye_brow_points,False,dot_size,dot_color,line_size,line_color)
|
364 |
+
|
365 |
+
vertical_nose_points = get_divided_points([LANDMARK_68_VERTICAL_NOSE_28,LANDMARK_68_VERTICAL_NOSE_29,LANDMARK_68_VERTICAL_NOSE_30,LANDMARK_68_VERTICAL_NOSE_31],3)
|
366 |
+
plot_points(annotated_image,vertical_nose_points,False,dot_size,dot_color,line_size,line_color)
|
367 |
+
|
368 |
+
horizontal_nose_points = get_mean_points([LANDMARK_68_HORIZONTAL_NOSE_32,LANDMARK_68_HORIZONTAL_NOSE_33,LANDMARK_68_HORIZONTAL_NOSE_34,LANDMARK_68_HORIZONTAL_NOSE_35,LANDMARK_68_HORIZONTAL_NOSE_36])
|
369 |
+
plot_points(annotated_image,horizontal_nose_points,False,dot_size,dot_color,line_size,line_color)
|
370 |
+
|
371 |
+
right_upper_eye_points = get_divided_points(LINE_RIGHT_UPPER_MIXED_EYE2,3)
|
372 |
+
right_lower_eye_points = get_divided_points(LINE_RIGHT_LOWER_MIXED_EYE,3)
|
373 |
+
#right_eye_points = right_upper_eye_points+right_lower_eye_points # first and last is same as above
|
374 |
+
right_eye_points = right_upper_eye_points+right_lower_eye_points[1:-1]
|
375 |
+
plot_points(annotated_image,right_eye_points,True,dot_size,dot_color,line_size,line_color)
|
376 |
+
|
377 |
+
#draw_lines(LINE_RIGHT_LOWER_OUTER_EYE,(0,255,0))
|
378 |
+
#draw_lines(LINE_RIGHT_LOWER_INNER_EYE,(0,255,0))
|
379 |
+
#draw_lines(LINE_RIGHT_UPPER_OUTER_EYE,(0,255,0))
|
380 |
+
#draw_lines(LINE_RIGHT_UPPER_INNER_EYE,(0,255,0))
|
381 |
+
|
382 |
+
left_upper_eye_points = get_divided_points(LINE_LEFT_UPPER_MIXED_EYE2,3)
|
383 |
+
left_lower_eye_points = get_divided_points(LINE_LEFT_LOWER_MIXED_EYE,3)
|
384 |
+
#left_eye_points = left_upper_eye_points+left_lower_eye_points# first and last is same as above
|
385 |
+
left_eye_points = left_upper_eye_points+left_lower_eye_points[1:-1]
|
386 |
+
plot_points(annotated_image,left_eye_points,True,dot_size,dot_color,line_size,line_color)
|
387 |
+
# first and last is same as above
|
388 |
+
|
389 |
+
#draw_lines(LINE_LEFT_LOWER_OUTER_EYE,(0,255,0))
|
390 |
+
#draw_lines(LINE_LEFT_LOWER_INNER_EYE,(0,255,0))
|
391 |
+
#draw_lines(LINE_LEFT_UPPER_OUTER_EYE,(0,255,0))
|
392 |
+
#draw_lines(LINE_LEFT_UPPER_INNER_EYE,(0,255,0))
|
393 |
+
|
394 |
+
left_upper_outer_lip_points = get_divided_points(LINE_RIGHT_UPPER_OUTER_LIP,3)
|
395 |
+
right_upper_outer_lip_points = get_divided_points(LINE_LEFT_UPPER_OUTER_LIP,3)
|
396 |
+
upper_outer_lip_points = left_upper_outer_lip_points+right_upper_outer_lip_points[1:]# first and last is same as above
|
397 |
+
#plot_points(annotated_image,upper_outer_lip_points)
|
398 |
+
upper_outer_lip_points = get_mean_points([LANDMARK_68_UPPER_OUTER_LIP_49,LANDMARK_68_UPPER_OUTER_LIP_50,LANDMARK_68_UPPER_OUTER_LIP_51,LANDMARK_68_UPPER_OUTER_LIP_52,LANDMARK_68_UPPER_OUTER_LIP_53,LANDMARK_68_UPPER_OUTER_LIP_54,LANDMARK_68_UPPER_OUTER_LIP_55])
|
399 |
+
#plot_points(annotated_image,upper_outer_lip_points)
|
400 |
+
|
401 |
+
lower_outer_lip_points = get_mean_points([LANDMARK_68_UPPER_OUTER_LIP_55,LANDMARK_68_LOWER_OUTER_LIP_56,LANDMARK_68_LOWER_OUTER_LIP_57,LANDMARK_68_LOWER_OUTER_LIP_58,LANDMARK_68_LOWER_OUTER_LIP_59,LANDMARK_68_LOWER_OUTER_LIP_60,LANDMARK_68_UPPER_OUTER_LIP_49])
|
402 |
+
outer_lip_points = upper_outer_lip_points+lower_outer_lip_points[1:-1]
|
403 |
+
plot_points(annotated_image,outer_lip_points,True,dot_size,dot_color,line_size,line_color)
|
404 |
+
|
405 |
+
upper_inner_lip_points = get_mean_points([LANDMARK_68_UPPER_INNER_LIP_61,LANDMARK_68_UPPER_INNER_LIP_62,LANDMARK_68_UPPER_INNER_LIP_63,LANDMARK_68_UPPER_INNER_LIP_64,LANDMARK_68_UPPER_INNER_LIP_65])
|
406 |
+
#plot_points(annotated_image,upper_inner_lip_points)
|
407 |
+
|
408 |
+
lower_inner_lip_points = get_mean_points([LANDMARK_68_UPPER_INNER_LIP_65,LANDMARK_68_LOWER_INNER_LIP_66,LANDMARK_68_LOWER_INNER_LIP_67,LANDMARK_68_LOWER_INNER_LIP_68,LANDMARK_68_UPPER_INNER_LIP_61])
|
409 |
+
inner_lip_points = upper_inner_lip_points+lower_inner_lip_points[1:-1]
|
410 |
+
plot_points(annotated_image,inner_lip_points,True,dot_size,dot_color,line_size,line_color)
|
411 |
+
|
412 |
+
landmark_points = contour_right_points+contour_left_points[1:]
|
413 |
+
landmark_points += right_eye_brow_points + left_eye_brow_points
|
414 |
+
landmark_points += vertical_nose_points + horizontal_nose_points
|
415 |
+
landmark_points += right_eye_points + left_eye_points
|
416 |
+
landmark_points += outer_lip_points + inner_lip_points
|
417 |
+
|
418 |
+
#plot_points(annotated_image,landmark_points,20) # for debug
|
419 |
+
bbox = points_to_bbox(landmark_points)
|
420 |
+
bbox = expand_bbox(bbox,5,7,5,5)
|
421 |
+
|
422 |
+
draw_bbox(annotated_image,bbox,box_color,box_size)
|
423 |
+
#draw_lines([POINT_LEFT_HEAD_OUTER_EX,POINT_LEFT_EYE_OUTER_EX,POINT_LEFT_MOUTH_OUTER_EX,POINT_LEFT_CHIN_OUTER,POINT_CHIN_BOTTOM])
|
424 |
+
#draw_lines([LANDMARK_68_CONTOUR_1,LANDMARK_68_CONTOUR_2,LANDMARK_68_CONTOUR_3,LANDMARK_68_CONTOUR_4,LANDMARK_68_CONTOUR_5,LANDMARK_68_CONTOUR_6,LANDMARK_68_CONTOUR_7,LANDMARK_68_CONTOUR_8,LANDMARK_68_CONTOUR_9])
|
425 |
+
"""solutions.drawing_utils.draw_landmarks(
|
426 |
+
image=annotated_image,
|
427 |
+
landmark_list=face_landmarks_proto,
|
428 |
+
connections=mp.solutions.face_mesh.FACEMESH_LEFT_EYE,#FACE_OVAL
|
429 |
+
landmark_drawing_spec=None,
|
430 |
+
connection_drawing_spec=mp.solutions.drawing_styles
|
431 |
+
.get_default_face_mesh_contours_style())"""
|
432 |
+
|
433 |
+
"""solutions.drawing_utils.draw_landmarks(
|
434 |
+
image=annotated_image,
|
435 |
+
landmark_list=face_landmarks_proto,
|
436 |
+
connections=mp.solutions.face_mesh.FACEMESH_CONTOURS,# mix all
|
437 |
+
landmark_drawing_spec=None,
|
438 |
+
connection_drawing_spec=mp.solutions.drawing_styles
|
439 |
+
.get_default_face_mesh_contours_style())
|
440 |
+
"""
|
441 |
+
"""solutions.drawing_utils.draw_landmarks(
|
442 |
+
image=annotated_image,
|
443 |
+
landmark_list=face_landmarks_proto,
|
444 |
+
connections=mp.solutions.face_mesh.FACEMESH_IRISES,
|
445 |
+
landmark_drawing_spec=None,
|
446 |
+
connection_drawing_spec=mp.solutions.drawing_styles
|
447 |
+
.get_default_face_mesh_iris_connections_style()) """
|
448 |
+
|
449 |
+
return annotated_image,bbox,landmark_points
|
450 |
+
|
451 |
+
|
452 |
+
if __name__ == "__main__":
|
453 |
+
args = parse_arguments()
|
454 |
+
input_file = args.input_file
|
455 |
+
|
456 |
+
#file checks
|
457 |
+
if not os.path.isfile(input_file):
|
458 |
+
print(f"input is not file '{input_file}'")
|
459 |
+
exit(0)
|
460 |
+
|
461 |
+
model_path = args.model_path
|
462 |
+
|
463 |
+
BaseOptions = mp.tasks.BaseOptions
|
464 |
+
FaceLandmarker = mp.tasks.vision.FaceLandmarker
|
465 |
+
FaceLandmarkerOptions = mp.tasks.vision.FaceLandmarkerOptions
|
466 |
+
VisionRunningMode = mp.tasks.vision.RunningMode
|
467 |
+
|
468 |
+
options = FaceLandmarkerOptions(
|
469 |
+
base_options=BaseOptions(model_asset_path=model_path),
|
470 |
+
running_mode=VisionRunningMode.IMAGE
|
471 |
+
,min_face_detection_confidence=0, min_face_presence_confidence=0
|
472 |
+
)
|
473 |
+
|
474 |
+
|
475 |
+
with FaceLandmarker.create_from_options(options) as landmarker:
|
476 |
+
|
477 |
+
start = time.time()
|
478 |
+
mp_image = mp.Image.create_from_file(input_file)
|
479 |
+
face_landmarker_result = landmarker.detect(mp_image)
|
480 |
+
detect_time = time.time()-start
|
481 |
+
print(detect_time)
|
482 |
+
|
483 |
+
annotated_image,bbox,landmark_points = draw_landmarks_on_image(mp_image.numpy_view(), face_landmarker_result)
|
484 |
+
#print(annotated_image)
|
485 |
+
#annotated_image=cv2.resize(annotated_image, (800, 800))
|
486 |
+
annotated_image=cv2.cvtColor(annotated_image, cv2.COLOR_RGB2BGR)
|
487 |
+
cv2.imwrite(input_file.replace(".jpg","_la68.jpg"),annotated_image)
|
488 |
+
|
489 |
+
if args.save_glandmark:
|
490 |
+
parent_path,file = os.path.split(input_file)
|
491 |
+
glandmark = bbox_to_glandmarks(file,bbox,landmark_points)
|
492 |
+
glandmark_path = input_file.replace(".jpg",f".json")
|
493 |
+
if os.path.exists(glandmark_path):
|
494 |
+
print(f"glandmark exist skipped {glandmark_path}")
|
495 |
+
else:
|
496 |
+
import json
|
497 |
+
with open(glandmark_path,"w") as f:
|
498 |
+
json.dump(glandmark,f)
|
499 |
+
|
500 |
+
# _landmark.json always overwrite because not design for edit
|
501 |
+
if args.save_group_landmark:
|
502 |
+
result=convert_to_landmark_group_json(landmark_points)
|
503 |
+
total = 0
|
504 |
+
for key in result[0].keys():
|
505 |
+
total += len(result[0][key])
|
506 |
+
|
507 |
+
print(total)
|
508 |
+
import json
|
509 |
+
group_landmark_path = input_file.replace(".jpg",f"_landmark.json")
|
510 |
+
with open(group_landmark_path,"w") as f:
|
511 |
+
json.dump(result,f)
|
512 |
+
|
513 |
+
#cv2.imshow("image",)
|
514 |
+
#cv2.waitKey(0)
|
515 |
+
#cv2.destroyAllWindows()
|
516 |
+
|
examples/00002062.jpg
ADDED
examples/00002062.webp
ADDED
examples/00003245_00.jpg
ADDED
examples/00003245_00.webp
ADDED
examples/00100265.jpg
ADDED
examples/00100265.webp
ADDED
examples/00824006.jpg
ADDED
examples/00824006.webp
ADDED
examples/00824008.jpg
ADDED
examples/00824008.webp
ADDED
examples/00825000.jpg
ADDED
examples/00825000.webp
ADDED
examples/00826007.jpg
ADDED
examples/00826007.webp
ADDED
examples/00827009.jpg
ADDED
examples/00827009.webp
ADDED
examples/00828003.jpg
ADDED
examples/00828003.webp
ADDED
face_landmarker.task
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:64184e229b263107bc2b804c6625db1341ff2bb731874b0bcc2fe6544e0bc9ff
|
3 |
+
size 3758596
|
face_landmarker.task.txt
ADDED
@@ -0,0 +1,8 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
Face landmark detection
|
2 |
+
https://ai.google.dev/edge/mediapipe/solutions/vision/face_landmarker
|
3 |
+
|
4 |
+
model card page is
|
5 |
+
https://storage.googleapis.com/mediapipe-assets/MediaPipe%20BlazeFace%20Model%20Card%20(Short%20Range).pdf
|
6 |
+
|
7 |
+
license is Apache2.0
|
8 |
+
https://www.apache.org/licenses/LICENSE-2.0.html
|
glibvision/common_utils.py
ADDED
@@ -0,0 +1,112 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import os
|
2 |
+
def check_exists_files(files,dirs,exit_on_error=True):
|
3 |
+
if files is not None:
|
4 |
+
if isinstance(files, str):
|
5 |
+
files = [files]
|
6 |
+
for file in files:
|
7 |
+
if not os.path.isfile(file):
|
8 |
+
print(f"File {file} not found")
|
9 |
+
if exit_on_error:
|
10 |
+
exit(1)
|
11 |
+
else:
|
12 |
+
return 1
|
13 |
+
if dirs is not None:
|
14 |
+
if isinstance(dirs, str):
|
15 |
+
dirs = [dirs]
|
16 |
+
for dir in dirs:
|
17 |
+
if not os.path.isdir(dir):
|
18 |
+
print(f"Dir {dir} not found")
|
19 |
+
if exit_on_error:
|
20 |
+
exit(1)
|
21 |
+
else:
|
22 |
+
return 1
|
23 |
+
return 0
|
24 |
+
|
25 |
+
image_extensions =[".jpg"]
|
26 |
+
|
27 |
+
def add_name_suffix(file_name,suffix,replace_suffix=False):
|
28 |
+
if not suffix.startswith("_"):#force add
|
29 |
+
suffix="_"+suffix
|
30 |
+
|
31 |
+
name,ext = os.path.splitext(file_name)
|
32 |
+
if replace_suffix:
|
33 |
+
index = name.rfind("_")
|
34 |
+
if index!=-1:
|
35 |
+
return f"{name[0:index]}{suffix}{ext}"
|
36 |
+
|
37 |
+
return f"{name}{suffix}{ext}"
|
38 |
+
|
39 |
+
def replace_extension(file_name,new_extension,suffix=None,replace_suffix=False):
|
40 |
+
if not new_extension.startswith("."):
|
41 |
+
new_extension="."+new_extension
|
42 |
+
|
43 |
+
name,ext = os.path.splitext(file_name)
|
44 |
+
new_file = f"{name}{new_extension}"
|
45 |
+
if suffix:
|
46 |
+
return add_name_suffix(name+new_extension,suffix,replace_suffix)
|
47 |
+
return new_file
|
48 |
+
|
49 |
+
def list_digit_images(input_dir,sort=True):
|
50 |
+
digit_images = []
|
51 |
+
global image_extensions
|
52 |
+
files = os.listdir(input_dir)
|
53 |
+
for file in files:
|
54 |
+
if file.endswith(".jpg"):#TODO check image
|
55 |
+
base,ext = os.path.splitext(file)
|
56 |
+
if not base.isdigit():
|
57 |
+
continue
|
58 |
+
digit_images.append(file)
|
59 |
+
|
60 |
+
if sort:
|
61 |
+
digit_images.sort()
|
62 |
+
|
63 |
+
return digit_images
|
64 |
+
def list_suffix_images(input_dir,suffix,is_digit=True,sort=True):
|
65 |
+
digit_images = []
|
66 |
+
global image_extensions
|
67 |
+
files = os.listdir(input_dir)
|
68 |
+
for file in files:
|
69 |
+
if file.endswith(".jpg"):#TODO check image
|
70 |
+
base,ext = os.path.splitext(file)
|
71 |
+
if base.endswith(suffix):
|
72 |
+
if is_digit:
|
73 |
+
if not base.replace(suffix,"").isdigit():
|
74 |
+
continue
|
75 |
+
digit_images.append(file)
|
76 |
+
|
77 |
+
if sort:
|
78 |
+
digit_images.sort()
|
79 |
+
|
80 |
+
return digit_images
|
81 |
+
|
82 |
+
import time
|
83 |
+
|
84 |
+
class ProgressTracker:
|
85 |
+
"""
|
86 |
+
処理の進捗状況を追跡し、経過時間と残り時間を表示するクラス。
|
87 |
+
"""
|
88 |
+
|
89 |
+
def __init__(self,key, total_target):
|
90 |
+
"""
|
91 |
+
コンストラクタ
|
92 |
+
|
93 |
+
Args:
|
94 |
+
total_target (int): 処理対象の総数
|
95 |
+
"""
|
96 |
+
self.key = key
|
97 |
+
self.total_target = total_target
|
98 |
+
self.complete_target = 0
|
99 |
+
self.start_time = time.time()
|
100 |
+
|
101 |
+
def update(self):
|
102 |
+
"""
|
103 |
+
進捗を1つ進める。
|
104 |
+
経過時間と残り時間を表示する。
|
105 |
+
"""
|
106 |
+
self.complete_target += 1
|
107 |
+
current_time = time.time()
|
108 |
+
consumed_time = current_time - self.start_time
|
109 |
+
remain_time = (consumed_time / self.complete_target) * (self.total_target - self.complete_target) if self.complete_target > 0 else 0
|
110 |
+
print(f"stepped {self.key} {self.total_target} of {self.complete_target}, consumed {(consumed_time / 60):.1f} min, remain {(remain_time / 60):.1f} min")
|
111 |
+
|
112 |
+
|
glibvision/cv2_utils.py
ADDED
@@ -0,0 +1,138 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import cv2
|
2 |
+
import numpy as np
|
3 |
+
|
4 |
+
|
5 |
+
|
6 |
+
def draw_bbox(image,box,color=(255,0,0),thickness=1):
|
7 |
+
if thickness==0:
|
8 |
+
return
|
9 |
+
|
10 |
+
left = int(box[0])
|
11 |
+
top = int(box[1])
|
12 |
+
right = int(box[0]+box[2])
|
13 |
+
bottom = int(box[1]+box[3])
|
14 |
+
box_points =[(left,top),(right,top),(right,bottom),(left,bottom)]
|
15 |
+
|
16 |
+
cv2.polylines(image, [np.array(box_points)], isClosed=True, color=color, thickness=thickness)
|
17 |
+
|
18 |
+
|
19 |
+
def to_int_points(points):
|
20 |
+
int_points=[]
|
21 |
+
for point in points:
|
22 |
+
int_points.append([int(point[0]),int(point[1])])
|
23 |
+
return int_points
|
24 |
+
|
25 |
+
def draw_text(img, text, point, font_scale=0.5, color=(200, 200, 200), thickness=1):
|
26 |
+
font = cv2.FONT_HERSHEY_SIMPLEX
|
27 |
+
cv2.putText(img, str(text), point, font, font_scale, color, thickness, cv2.LINE_AA)
|
28 |
+
|
29 |
+
plot_text_color = (200, 200, 200)
|
30 |
+
plot_text_font_scale = 0.5
|
31 |
+
plot_index = 1
|
32 |
+
plot_text = True
|
33 |
+
|
34 |
+
def set_plot_text(is_plot,text_font_scale,text_color):
|
35 |
+
global plot_index,plot_text,plot_text_font_scale,plot_text_color
|
36 |
+
plot_text = is_plot
|
37 |
+
plot_index = 1
|
38 |
+
plot_text_font_scale = text_font_scale
|
39 |
+
plot_text_color = text_color
|
40 |
+
|
41 |
+
def plot_points(image,points,isClosed=False,circle_size=3,circle_color=(255,0,0),line_size=1,line_color=(0,0,255)):
|
42 |
+
global plot_index,plot_text
|
43 |
+
int_points = to_int_points(points)
|
44 |
+
if circle_size>0:
|
45 |
+
for point in int_points:
|
46 |
+
cv2.circle(image,point,circle_size,circle_color,-1)
|
47 |
+
if plot_text:
|
48 |
+
draw_text(image,plot_index,point,plot_text_font_scale,plot_text_color)
|
49 |
+
plot_index+=1
|
50 |
+
if line_size>0:
|
51 |
+
cv2.polylines(image, [np.array(int_points)], isClosed=isClosed, color=line_color, thickness=line_size)
|
52 |
+
|
53 |
+
def fill_points(image,points,thickness=1,line_color=(255,255,255),fill_color = (255,255,255)):
|
54 |
+
np_points = np.array(points,dtype=np.int32)
|
55 |
+
cv2.fillPoly(image, [np_points], fill_color)
|
56 |
+
cv2.polylines(image, [np_points], isClosed=True, color=line_color, thickness=thickness)
|
57 |
+
|
58 |
+
def get_image_size(cv2_image):
|
59 |
+
return cv2_image.shape[:2]
|
60 |
+
|
61 |
+
def get_channel(np_array):
|
62 |
+
return np_array.shape[2] if np_array.ndim == 3 else 1
|
63 |
+
|
64 |
+
def get_numpy_text(np_array,key=""):
|
65 |
+
channel = get_channel(np_array)
|
66 |
+
return f"{key} shape = {np_array.shape} channel = {channel} ndim = {np_array.ndim} size = {np_array.size}"
|
67 |
+
|
68 |
+
|
69 |
+
def gray3d_to_2d(grayscale: np.ndarray) -> np.ndarray:
|
70 |
+
channel = get_channel(grayscale)
|
71 |
+
if channel!=1:
|
72 |
+
raise ValueError(f"color maybe rgb or rgba {get_numpy_text(grayscale)}")
|
73 |
+
"""
|
74 |
+
3 次元グレースケール画像 (チャンネル数 1) を 2 次元に変換する。
|
75 |
+
|
76 |
+
Args:
|
77 |
+
grayscale (np.ndarray): 3 次元グレースケール画像 (チャンネル数 1)。
|
78 |
+
|
79 |
+
Returns:
|
80 |
+
np.ndarray: 2 次元グレースケール画像。
|
81 |
+
"""
|
82 |
+
|
83 |
+
if grayscale.ndim == 2:
|
84 |
+
return grayscale
|
85 |
+
return np.squeeze(grayscale)
|
86 |
+
|
87 |
+
def blend_rgb_images(image1: np.ndarray, image2: np.ndarray, mask: np.ndarray) -> np.ndarray:
|
88 |
+
"""
|
89 |
+
2 つの RGB 画像をマスク画像を使用してブレンドする。
|
90 |
+
|
91 |
+
Args:
|
92 |
+
image1 (np.ndarray): 最初の画像 (RGB)。
|
93 |
+
image2 (np.ndarray): 2 番目の画像 (RGB)。
|
94 |
+
mask (np.ndarray): マスク画像 (グレースケール)。
|
95 |
+
|
96 |
+
Returns:
|
97 |
+
np.ndarray: ブレンドされた画像 (RGB)。
|
98 |
+
|
99 |
+
Raises:
|
100 |
+
ValueError: 入力画像の形状が一致しない場合。
|
101 |
+
"""
|
102 |
+
|
103 |
+
if image1.shape != image2.shape or image1.shape[:2] != mask.shape:
|
104 |
+
raise ValueError("入力画像の形状が一致しません。")
|
105 |
+
|
106 |
+
# 画像を float 型に変換
|
107 |
+
image1 = image1.astype(float)
|
108 |
+
image2 = image2.astype(float)
|
109 |
+
|
110 |
+
# マスクを 3 チャンネルに変換し、0-1 の範囲にスケール
|
111 |
+
alpha = cv2.cvtColor(mask, cv2.COLOR_GRAY2BGR).astype(float) / 255.0
|
112 |
+
|
113 |
+
# ブレンド計算
|
114 |
+
blended = (1 - alpha) * image1 + alpha * image2
|
115 |
+
|
116 |
+
return blended.astype(np.uint8)
|
117 |
+
|
118 |
+
def create_color_image(img,color=(255,255,255)):
|
119 |
+
mask = np.zeros_like(img)
|
120 |
+
|
121 |
+
h, w = img.shape[:2]
|
122 |
+
cv2.rectangle(mask, (0, 0), (w, h), color, -1)
|
123 |
+
return mask
|
124 |
+
|
125 |
+
def pil_to_bgr_image(image):
|
126 |
+
np_image = np.array(image, dtype=np.uint8)
|
127 |
+
if np_image.shape[2] == 4:
|
128 |
+
bgr_img = cv2.cvtColor(np_image, cv2.COLOR_RGBA2BGRA)
|
129 |
+
else:
|
130 |
+
bgr_img = cv2.cvtColor(np_image, cv2.COLOR_RGB2BGR)
|
131 |
+
return bgr_img
|
132 |
+
|
133 |
+
def bgr_to_rgb(np_image):
|
134 |
+
if np_image.shape[2] == 4:
|
135 |
+
bgr_img = cv2.cvtColor(np_image, cv2.COLOR_RBGRA2RGBA)
|
136 |
+
else:
|
137 |
+
bgr_img = cv2.cvtColor(np_image, cv2.COLOR_BGR2RGB)
|
138 |
+
return bgr_img
|
glibvision/glandmark_utils.py
ADDED
@@ -0,0 +1,48 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
|
2 |
+
import os
|
3 |
+
|
4 |
+
#simple single version
|
5 |
+
def bbox_to_glandmarks(file_name,bbox,points = None):
|
6 |
+
base,ext = os.path.splitext(file_name)
|
7 |
+
glandmark = {"image":{
|
8 |
+
"boxes":[{
|
9 |
+
"left":int(bbox[0]),"top":int(bbox[1]),"width":int(bbox[2]),"height":int(bbox[3])
|
10 |
+
}],
|
11 |
+
"file":file_name,
|
12 |
+
"id":int(base)
|
13 |
+
# width,height ignore here
|
14 |
+
}}
|
15 |
+
if points is not None:
|
16 |
+
parts=[
|
17 |
+
]
|
18 |
+
for point in points:
|
19 |
+
parts.append({"x":int(point[0]),"y":int(point[1])})
|
20 |
+
glandmark["image"]["boxes"][0]["parts"] = parts
|
21 |
+
return glandmark
|
22 |
+
|
23 |
+
#technically this is not g-landmark/dlib ,
|
24 |
+
def convert_to_landmark_group_json(points):
|
25 |
+
if len(points)!=68:
|
26 |
+
print(f"points must be 68 but {len(points)}")
|
27 |
+
return None
|
28 |
+
new_points=list(points)
|
29 |
+
|
30 |
+
result = [ # possible multi person ,just possible any func support multi person
|
31 |
+
|
32 |
+
{ # index start 0 but index-number start 1
|
33 |
+
"chin":new_points[0:17],
|
34 |
+
"left_eyebrow":new_points[17:22],
|
35 |
+
"right_eyebrow":new_points[22:27],
|
36 |
+
"nose_bridge":new_points[27:31],
|
37 |
+
"nose_tip":new_points[31:36],
|
38 |
+
"left_eye":new_points[36:42],
|
39 |
+
"right_eye":new_points[42:48],
|
40 |
+
|
41 |
+
# lip points customized structure
|
42 |
+
# MIT licensed face_recognition
|
43 |
+
# https://github.com/ageitgey/face_recognition
|
44 |
+
"top_lip":new_points[48:55]+[new_points[64]]+[new_points[63]]+[new_points[62]]+[new_points[61]]+[new_points[60]],
|
45 |
+
"bottom_lip":new_points[54:60]+[new_points[48]]+[new_points[60]]+[new_points[67]]+[new_points[66]]+[new_points[65]]+[new_points[64]],
|
46 |
+
}
|
47 |
+
]
|
48 |
+
return result
|
glibvision/numpy_utils.py
ADDED
@@ -0,0 +1,110 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import numpy as np
|
2 |
+
|
3 |
+
|
4 |
+
def apply_binary_mask_to_color(base_image,color,mask):
|
5 |
+
"""
|
6 |
+
二値マスクを使用して、画像の一部を別の画像にコピーする。
|
7 |
+
|
8 |
+
Args:
|
9 |
+
base_image (np.ndarray): コピー先の画像。
|
10 |
+
paste_image (np.ndarray): コピー元の画像。
|
11 |
+
mask (np.ndarray): 二値マスク画像。
|
12 |
+
|
13 |
+
Returns:
|
14 |
+
np.ndarray: マスクを適用した画像。
|
15 |
+
|
16 |
+
"""
|
17 |
+
# TODO check all shape
|
18 |
+
#print_numpy(base_image)
|
19 |
+
#print_numpy(paste_image)
|
20 |
+
#print_numpy(mask)
|
21 |
+
if mask.ndim == 2:
|
22 |
+
condition = mask == 255
|
23 |
+
else:
|
24 |
+
condition = mask[:,:,0] == 255
|
25 |
+
|
26 |
+
base_image[condition] = color
|
27 |
+
return base_image
|
28 |
+
|
29 |
+
def apply_binary_mask_to_image(base_image,paste_image,mask):
|
30 |
+
"""
|
31 |
+
二値マスクを使用して、画像の一部を別の画像にコピーする。
|
32 |
+
|
33 |
+
Args:
|
34 |
+
base_image (np.ndarray): コピー先の画像。
|
35 |
+
paste_image (np.ndarray): コピー元の画像。
|
36 |
+
mask (np.ndarray): 二値マスク画像。
|
37 |
+
|
38 |
+
Returns:
|
39 |
+
np.ndarray: マスクを適用した画像。
|
40 |
+
|
41 |
+
"""
|
42 |
+
# TODO check all shape
|
43 |
+
#print_numpy(base_image)
|
44 |
+
#print_numpy(paste_image)
|
45 |
+
#print_numpy(mask)
|
46 |
+
if mask.ndim == 2:
|
47 |
+
condition = mask == 255
|
48 |
+
else:
|
49 |
+
condition = mask[:,:,0] == 255
|
50 |
+
|
51 |
+
base_image[condition] = paste_image[condition]
|
52 |
+
return base_image
|
53 |
+
|
54 |
+
def pil_to_numpy(image):
|
55 |
+
return np.array(image, dtype=np.uint8)
|
56 |
+
|
57 |
+
def extruce_points(points,index,ratio=1.5):
|
58 |
+
"""
|
59 |
+
indexのポイントをratio倍だけ、点群の中心から、外側に膨らます。
|
60 |
+
"""
|
61 |
+
center_point = np.mean(points, axis=0)
|
62 |
+
if index < 0 or index > len(points):
|
63 |
+
raise ValueError(f"index must be range(0,{len(points)} but value = {index})")
|
64 |
+
point1 =points[index]
|
65 |
+
print(f"center = {center_point}")
|
66 |
+
vec_to_center = point1 - center_point
|
67 |
+
return vec_to_center*ratio + center_point
|
68 |
+
|
69 |
+
|
70 |
+
def bulge_polygon(points, bulge_factor=0.1,isClosed=True):
|
71 |
+
"""
|
72 |
+
ポリゴンの辺の中間に点を追加し、外側に膨らませる
|
73 |
+
ndarrayを返すので注意
|
74 |
+
"""
|
75 |
+
# 入力 points を NumPy 配列に変換
|
76 |
+
points = np.array(points)
|
77 |
+
|
78 |
+
# ポリゴン全体の重心を求める
|
79 |
+
center_point = np.mean(points, axis=0)
|
80 |
+
#print(f"center = {center_point}")
|
81 |
+
new_points = []
|
82 |
+
num_points = len(points)
|
83 |
+
for i in range(num_points):
|
84 |
+
if i == num_points -1 and not isClosed:
|
85 |
+
break
|
86 |
+
p1 = points[i]
|
87 |
+
#print(f"p{i} = {p1}")
|
88 |
+
# 重心から頂点へのベクトル
|
89 |
+
#vec_to_center = p1 - center_point
|
90 |
+
|
91 |
+
# 辺のベクトルを求める
|
92 |
+
mid_diff = points[(i + 1) % num_points] - p1
|
93 |
+
mid = p1+(mid_diff/2)
|
94 |
+
|
95 |
+
#print(f"mid = {mid}")
|
96 |
+
out_vec = mid - center_point
|
97 |
+
|
98 |
+
# 重心からのベクトルに bulge_vec を加算
|
99 |
+
new_point = mid + out_vec * bulge_factor
|
100 |
+
|
101 |
+
new_points.append(p1)
|
102 |
+
new_points.append(new_point.astype(np.int32))
|
103 |
+
|
104 |
+
return np.array(new_points)
|
105 |
+
|
106 |
+
|
107 |
+
# image.shape rgb are (1024,1024,3) use 1024,1024 as 2-dimensional
|
108 |
+
def create_2d_image(shape):
|
109 |
+
grayscale_image = np.zeros(shape[:2], dtype=np.uint8)
|
110 |
+
return grayscale_image
|
glibvision/pil_utils.py
ADDED
@@ -0,0 +1,14 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from PIL import Image,ImageDraw
|
2 |
+
|
3 |
+
def create_color_image(width, height, color=(255,255,255)):
|
4 |
+
img = Image.new('RGB', (width, height), color)
|
5 |
+
return img
|
6 |
+
|
7 |
+
def fill_points(image,points,color=(255,255,255)):
|
8 |
+
draw = ImageDraw.Draw(image)
|
9 |
+
int_points = [(int(x), int(y)) for x, y in points]
|
10 |
+
draw.polygon(int_points, fill=color)
|
11 |
+
return image
|
12 |
+
|
13 |
+
def from_numpy(numpy_array):
|
14 |
+
return Image.fromarray(numpy_array)
|
gradio_utils.py
ADDED
@@ -0,0 +1,60 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
|
2 |
+
|
3 |
+
import os
|
4 |
+
import time
|
5 |
+
import io
|
6 |
+
import hashlib
|
7 |
+
|
8 |
+
def clear_old_files(dir="files",passed_time=60*60):
|
9 |
+
try:
|
10 |
+
files = os.listdir(dir)
|
11 |
+
current_time = time.time()
|
12 |
+
for file in files:
|
13 |
+
file_path = os.path.join(dir,file)
|
14 |
+
|
15 |
+
ctime = os.stat(file_path).st_ctime
|
16 |
+
diff = current_time - ctime
|
17 |
+
#print(f"ctime={ctime},current_time={current_time},passed_time={passed_time},diff={diff}")
|
18 |
+
if diff > passed_time:
|
19 |
+
os.remove(file_path)
|
20 |
+
except:
|
21 |
+
print("maybe still gallery using error")
|
22 |
+
|
23 |
+
def get_buffer_id(buffer):
|
24 |
+
hash_object = hashlib.sha256(buffer.getvalue())
|
25 |
+
hex_dig = hash_object.hexdigest()
|
26 |
+
unique_id = hex_dig[:32]
|
27 |
+
return unique_id
|
28 |
+
|
29 |
+
def get_image_id(image):
|
30 |
+
buffer = io.BytesIO()
|
31 |
+
image.save(buffer, format='PNG')
|
32 |
+
return get_buffer_id(buffer)
|
33 |
+
|
34 |
+
def save_image(image,extension="jpg",dir_name="files"):
|
35 |
+
id = get_image_id(image)
|
36 |
+
os.makedirs(dir_name,exist_ok=True)
|
37 |
+
file_path = f"{dir_name}/{id}.{extension}"
|
38 |
+
|
39 |
+
image.save(file_path)
|
40 |
+
return file_path
|
41 |
+
|
42 |
+
def save_buffer(buffer,extension="webp",dir_name="files"):
|
43 |
+
id = get_buffer_id(buffer)
|
44 |
+
os.makedirs(dir_name,exist_ok=True)
|
45 |
+
file_path = f"{dir_name}/{id}.{extension}"
|
46 |
+
|
47 |
+
with open(file_path,"wb") as f:
|
48 |
+
f.write(buffer.getvalue())
|
49 |
+
return file_path
|
50 |
+
|
51 |
+
def write_file(file_path,text):
|
52 |
+
with open(file_path, 'w', encoding='utf-8') as f:
|
53 |
+
f.write(text)
|
54 |
+
|
55 |
+
def read_file(file_path):
|
56 |
+
"""read the text of target file
|
57 |
+
"""
|
58 |
+
with open(file_path, 'r', encoding='utf-8') as f:
|
59 |
+
content = f.read()
|
60 |
+
return content
|
hole_images/black.jpg
ADDED
hole_images/dark01.jpg
ADDED
hole_images/mid01.jpg
ADDED
hole_images/mid02.jpg
ADDED
landmarks68_utils.py
ADDED
@@ -0,0 +1,147 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import numpy as np
|
2 |
+
from PIL import Image,ImageDraw
|
3 |
+
from glibvision.numpy_utils import extruce_points,bulge_polygon
|
4 |
+
|
5 |
+
|
6 |
+
def minus_point(pt1,pt2):
|
7 |
+
return [pt1[0]-pt2[0],pt1[1]-pt2[1]]
|
8 |
+
|
9 |
+
def lerp_point(pt1,pt2,pt2_ratio):
|
10 |
+
return [int(pt1[0]*(1.0-pt2_ratio)+pt2[0]*pt2_ratio),pt1[1]*(1.0-pt2_ratio)+pt2[1]*pt2_ratio]
|
11 |
+
|
12 |
+
def mean_point(points):
|
13 |
+
xs = 0
|
14 |
+
ys = 0
|
15 |
+
for pt in points:
|
16 |
+
xs +=pt[0]
|
17 |
+
ys +=pt[1]
|
18 |
+
return [int(xs/len(points)),int(ys/len(points))]
|
19 |
+
|
20 |
+
def get_face_points(face_landmarks_list):
|
21 |
+
contour_points=get_landmark_points(face_landmarks_list,PARTS_CONTOUR)
|
22 |
+
left_eyebrow_points=get_landmark_points(face_landmarks_list,PARTS_LEFT_EYEBROW)
|
23 |
+
|
24 |
+
right_eyebrow_points=get_landmark_points(face_landmarks_list,PARTS_RIGHT_EYEBROW)
|
25 |
+
|
26 |
+
nose_points=get_landmark_points(face_landmarks_list,PARTS_NOSE_BRIDGE)
|
27 |
+
|
28 |
+
diff_right = minus_point(contour_points[1],contour_points[0])
|
29 |
+
right_minus_corner = minus_point(contour_points[0] , diff_right)
|
30 |
+
right_contour = lerp_point(right_minus_corner,left_eyebrow_points[0],0.3)
|
31 |
+
|
32 |
+
diff_left = minus_point(contour_points[15],contour_points[16])
|
33 |
+
left_minus_corner = minus_point(contour_points[16] , diff_left)
|
34 |
+
left_contour = lerp_point(left_minus_corner,right_eyebrow_points[-1],0.3)
|
35 |
+
|
36 |
+
middle_face = mean_point([nose_points[0],right_eyebrow_points[0],left_eyebrow_points[-1]])
|
37 |
+
return [right_contour]+list(contour_points)+[left_contour,middle_face]
|
38 |
+
|
39 |
+
|
40 |
+
def get_innner_mouth_points(face_landmarks_list):
|
41 |
+
top_points=get_landmark_points(face_landmarks_list,PARTS_UPPER_LIP)
|
42 |
+
bottom_points=get_landmark_points(face_landmarks_list,PARTS_LOWER_LIP)
|
43 |
+
return top_points[7:]+bottom_points[7:]#[::-1]
|
44 |
+
|
45 |
+
|
46 |
+
PARTS_UPPER_LIP = "top_lip"
|
47 |
+
PARTS_LOWER_LIP = "bottom_lip"
|
48 |
+
PARTS_CONTOUR ="chin"
|
49 |
+
PARTS_LEFT_EYEBROW ="left_eyebrow"
|
50 |
+
PARTS_RIGHT_EYEBROW ="right_eyebrow"
|
51 |
+
PARTS_LEFT_EYE ="left_eye"
|
52 |
+
PARTS_RIGHT_EYE ="right_eye"
|
53 |
+
PARTS_NOSE_TIP ="nose_tip"
|
54 |
+
PARTS_NOSE_BRIDGE ="nose_bridge"
|
55 |
+
|
56 |
+
def get_landmark_points(face_landmarks_list,key):
|
57 |
+
matching_landmark_points = []
|
58 |
+
for face_landmarks in face_landmarks_list:
|
59 |
+
for landmark_name, landmark_points in face_landmarks.items():
|
60 |
+
matching_landmark_points = landmark_points.copy()
|
61 |
+
if landmark_name ==key:
|
62 |
+
return tuple(matching_landmark_points)
|
63 |
+
|
64 |
+
def get_left_upper_eyelid_points(face_landmarks_list,bulge_factor = 0.2):
|
65 |
+
eye_points=get_landmark_points(face_landmarks_list,PARTS_LEFT_EYE)
|
66 |
+
extruded_points=[]
|
67 |
+
|
68 |
+
need_extrude =[0,1,2]
|
69 |
+
for index in range(len(eye_points)):
|
70 |
+
if index in need_extrude:
|
71 |
+
ratio = 1.3
|
72 |
+
else:
|
73 |
+
ratio = 1.1
|
74 |
+
ex_point=extruce_points(eye_points,index,ratio)
|
75 |
+
extruded_points.append(ex_point)
|
76 |
+
return extruded_points
|
77 |
+
|
78 |
+
def get_right_upper_eyelid_points(face_landmarks_list,bulge_factor = 0.2):
|
79 |
+
eye_points=get_landmark_points(face_landmarks_list,PARTS_RIGHT_EYE)
|
80 |
+
extruded_points=[]
|
81 |
+
|
82 |
+
need_extrude =[1,2,3]
|
83 |
+
for index in range(len(eye_points)):
|
84 |
+
if index in need_extrude:
|
85 |
+
ratio = 1.3
|
86 |
+
else:
|
87 |
+
ratio = 1.1
|
88 |
+
ex_point=extruce_points(eye_points,index,ratio)
|
89 |
+
extruded_points.append(ex_point)
|
90 |
+
#return list(eye_points[0:4])+extruded_points
|
91 |
+
return extruded_points
|
92 |
+
|
93 |
+
def get_bulged_eyes(face_landmarks_list,bulge_factor=0.2):
|
94 |
+
points1=get_landmark_points(face_landmarks_list,PARTS_LEFT_EYE)
|
95 |
+
points2=get_landmark_points(face_landmarks_list,PARTS_RIGHT_EYE)
|
96 |
+
|
97 |
+
return bulge_polygon(points1, bulge_factor=bulge_factor),bulge_polygon(points2, bulge_factor=bulge_factor)
|
98 |
+
|
99 |
+
|
100 |
+
def get_lerp(point1,point2,ratio1=0.5):
|
101 |
+
x = point1[0]*ratio1 + point2[0]*(1.0-ratio1)
|
102 |
+
y = point1[1]*ratio1 + point2[1]*(1.0-ratio1)
|
103 |
+
return [int(x),int(y)]
|
104 |
+
|
105 |
+
def get_close_eyelid_point(landmarks_list,bulge = 0.01):
|
106 |
+
left = get_landmark_points(landmarks_list,PARTS_LEFT_EYE)
|
107 |
+
left_points = [get_lerp(left[3],left[4],0.6)]+list(left[4:6])+[left[0]]
|
108 |
+
right = get_landmark_points(landmarks_list,PARTS_RIGHT_EYE)
|
109 |
+
right_points = [get_lerp(right[0],right[5],0.6)]+list(right[3:6][::-1])
|
110 |
+
|
111 |
+
#print("right points")
|
112 |
+
#print(right_points)
|
113 |
+
last2 = right_points[-2:]
|
114 |
+
#print(last2[0])
|
115 |
+
#print(last2[1])
|
116 |
+
extra_dvidied = 10
|
117 |
+
diff = ((last2[0][0]-last2[1][0])/extra_dvidied,(last2[0][1]-last2[1][1])/extra_dvidied)
|
118 |
+
extra = [int(last2[1][0] - diff[0]),int(last2[1][1] - diff[1])]
|
119 |
+
|
120 |
+
height = abs(right_points[0][1]-right_points[-1][1])
|
121 |
+
print(f"height = {height}")
|
122 |
+
move_down = int(height/5)
|
123 |
+
print("diff")
|
124 |
+
print(diff)
|
125 |
+
print(right_points[-1])
|
126 |
+
print(extra)
|
127 |
+
right_points.append(extra)
|
128 |
+
for pt in right_points:
|
129 |
+
pt[1]+=move_down
|
130 |
+
|
131 |
+
last2 = left_points[-2:]
|
132 |
+
diff = ((last2[0][0]-last2[1][0])/extra_dvidied,(last2[0][1]-last2[1][1])/extra_dvidied)
|
133 |
+
extra = [int(last2[1][0] - diff[0]),int(last2[1][1] - diff[1])]
|
134 |
+
left_points.append(extra)
|
135 |
+
for pt in left_points:
|
136 |
+
pt[1]+=move_down
|
137 |
+
|
138 |
+
print(right_points)
|
139 |
+
if bulge:
|
140 |
+
left_points = bulge_polygon(left_points,0.1,False).tolist()
|
141 |
+
right_points = bulge_polygon(right_points,0.1,False).tolist()
|
142 |
+
###LEFT
|
143 |
+
print("####RIGHT")
|
144 |
+
# last 2 points
|
145 |
+
|
146 |
+
|
147 |
+
return left_points,right_points
|
lip_utils.py
ADDED
@@ -0,0 +1,781 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import face_recognition
|
2 |
+
import os
|
3 |
+
import json
|
4 |
+
#from glibvision.cv2_utils import get_numpy_text
|
5 |
+
from glibvision.numpy_utils import bulge_polygon
|
6 |
+
import numpy as np
|
7 |
+
import math
|
8 |
+
USE_CACHE = True
|
9 |
+
|
10 |
+
# face structures are same
|
11 |
+
# MIT LICENSED
|
12 |
+
# https://github.com/ageitgey/face_recognition
|
13 |
+
|
14 |
+
TOP_LIP = "top_lip"
|
15 |
+
BOTTOM_LIP = "bottom_lip"
|
16 |
+
PARTS_CHIN ="chin"
|
17 |
+
PARTS_LEFT_EYEBROW ="left_eyebrow"
|
18 |
+
PARTS_RIGHT_EYEBROW ="right_eyebrow"
|
19 |
+
PARTS_LEFT_EYE ="left_eye"
|
20 |
+
PARTS_RIGHT_EYE ="right_eye"
|
21 |
+
|
22 |
+
POINTS_TOP_LIP = "top_lip"
|
23 |
+
POINTS_BOTTOM_LIP = "bottom_lip"
|
24 |
+
POINTS_CHIN = "chin"
|
25 |
+
|
26 |
+
COLOR_WHITE=(255,255,255)
|
27 |
+
COLOR_BLACK=(0,0,0)
|
28 |
+
COLOR_ALPHA=(0,0,0,0)
|
29 |
+
|
30 |
+
DEBUG = False
|
31 |
+
DEBUG_CHIN = False
|
32 |
+
def load_image_file(path):
|
33 |
+
image = face_recognition.load_image_file(path)
|
34 |
+
data_path=path+".json"
|
35 |
+
if USE_CACHE and os.path.exists(data_path):
|
36 |
+
with open(data_path, "r") as f:
|
37 |
+
face_landmarks_list = json.loads(f.read())
|
38 |
+
else:
|
39 |
+
face_landmarks_list = image_to_landmarks_list(image)
|
40 |
+
if USE_CACHE:
|
41 |
+
json_data = json.dumps(face_landmarks_list)
|
42 |
+
with open(data_path, "w") as f:
|
43 |
+
f.write(json_data)
|
44 |
+
|
45 |
+
return image,face_landmarks_list
|
46 |
+
|
47 |
+
def save_landmarks(face_landmarks,out_path):
|
48 |
+
json_data = json.dumps(face_landmarks)
|
49 |
+
with open(out_path, "w") as f:
|
50 |
+
f.write(json_data)
|
51 |
+
|
52 |
+
def load_landmarks(input_path):
|
53 |
+
with open(input_path, "r") as f:
|
54 |
+
face_landmarks_list = json.loads(f.read())
|
55 |
+
return face_landmarks_list
|
56 |
+
|
57 |
+
|
58 |
+
|
59 |
+
def image_to_landmarks_list(image):
|
60 |
+
face_landmarks_list = face_recognition.face_landmarks(image)
|
61 |
+
return face_landmarks_list
|
62 |
+
|
63 |
+
def fill_polygon(image,face_landmarks_list,key,thickness=1,line_color=(255,255,255),fill_color = (255,255,255)):
|
64 |
+
points=get_landmark_points(face_landmarks_list,key)
|
65 |
+
np_points = np.array(points,dtype=np.int32)
|
66 |
+
cv2.fillPoly(image, [np_points], fill_color)
|
67 |
+
cv2.polylines(image, [np_points], isClosed=True, color=line_color, thickness=thickness)
|
68 |
+
|
69 |
+
def fill_lip(image,face_landmarks_list,thickness=1,line_color=(255,255,255),fill_color = (255,255,255)):
|
70 |
+
points1=get_landmark_points(face_landmarks_list,TOP_LIP)[0:7]
|
71 |
+
points2=get_landmark_points(face_landmarks_list,BOTTOM_LIP)[0:7]
|
72 |
+
|
73 |
+
np_points = np.array(points1+points2[::-1],dtype=np.int32)
|
74 |
+
|
75 |
+
|
76 |
+
cv2.fillPoly(image, [np_points], fill_color)
|
77 |
+
if thickness > 0:
|
78 |
+
cv2.polylines(image, [np_points], isClosed=False, color=line_color, thickness=thickness)
|
79 |
+
|
80 |
+
def fill_top(image,face_landmarks_list,thickness=1,line_color=(255,255,255),fill_color = (255,255,255)):
|
81 |
+
points1=get_landmark_points(face_landmarks_list,TOP_LIP)[0:7]
|
82 |
+
|
83 |
+
np_points = np.array(points1,dtype=np.int32)
|
84 |
+
|
85 |
+
|
86 |
+
cv2.fillPoly(image, [np_points], fill_color)
|
87 |
+
if thickness > 0:
|
88 |
+
cv2.polylines(image, [np_points], isClosed=False, color=line_color, thickness=thickness)
|
89 |
+
|
90 |
+
def fill_top_lower(image,face_landmarks_list,thickness=1,line_color=(255,255,255),fill_color = (255,255,255)):
|
91 |
+
top_lip_points=get_landmark_points(face_landmarks_list,TOP_LIP) # 5 to 7 ,1 t- 11
|
92 |
+
points1 = [lerp_points(top_lip_points[5],top_lip_points[7],0.7)]+ \
|
93 |
+
[mid_points(top_lip_points[7],top_lip_points[8])]+ \
|
94 |
+
list(top_lip_points[8:11]) +\
|
95 |
+
[mid_points(top_lip_points[10],top_lip_points[11])]+ \
|
96 |
+
[lerp_points(top_lip_points[1],top_lip_points[11],0.7)]+\
|
97 |
+
[mid_points(top_lip_points[2],top_lip_points[10])]+\
|
98 |
+
[mid_points(top_lip_points[3],top_lip_points[9])]+\
|
99 |
+
[mid_points(top_lip_points[4],top_lip_points[8])]
|
100 |
+
|
101 |
+
np_points = np.array(points1,dtype=np.int32)
|
102 |
+
|
103 |
+
|
104 |
+
cv2.fillPoly(image, [np_points], fill_color)
|
105 |
+
if thickness > 0:
|
106 |
+
cv2.polylines(image, [np_points], isClosed=False, color=line_color, thickness=thickness)
|
107 |
+
|
108 |
+
def get_lip_mask_points(face_landmarks_list):
|
109 |
+
points1=get_landmark_points(face_landmarks_list,TOP_LIP)[0:7]
|
110 |
+
points2=get_landmark_points(face_landmarks_list,BOTTOM_LIP)[0:7]
|
111 |
+
return points1+points2
|
112 |
+
|
113 |
+
|
114 |
+
|
115 |
+
from scipy.special import comb
|
116 |
+
|
117 |
+
def bernstein_poly(i, n, t):
|
118 |
+
"""
|
119 |
+
n 次ベジェ曲線の i 番目の Bernstein 基底関数を計算する
|
120 |
+
"""
|
121 |
+
return comb(n, i) * (t**(n-i)) * (1 - t)**i
|
122 |
+
|
123 |
+
def bezier_curve(points, num_points=100):
|
124 |
+
"""
|
125 |
+
与えられた点からベジェ曲線を計算する
|
126 |
+
"""
|
127 |
+
nPoints = len(points)
|
128 |
+
xPoints = np.array([p[0] for p in points])
|
129 |
+
yPoints = np.array([p[1] for p in points])
|
130 |
+
|
131 |
+
t = np.linspace(0.0, 1.0, num_points)
|
132 |
+
|
133 |
+
polynomial_array = np.array([bernstein_poly(i, nPoints-1, t) for i in range(0, nPoints)])
|
134 |
+
|
135 |
+
xvals = np.dot(xPoints, polynomial_array)
|
136 |
+
yvals = np.dot(yPoints, polynomial_array)
|
137 |
+
|
138 |
+
return np.array(list(zip(xvals, yvals)))
|
139 |
+
import cv2
|
140 |
+
import numpy as np
|
141 |
+
|
142 |
+
|
143 |
+
|
144 |
+
|
145 |
+
|
146 |
+
|
147 |
+
def fill_eyes(image,face_landmarks_list,thickness=1,line_color=(255,255,255),fill_color = (255,255,255)):
|
148 |
+
points1=get_landmark_points(face_landmarks_list,PARTS_LEFT_EYE)
|
149 |
+
points2=get_landmark_points(face_landmarks_list,PARTS_RIGHT_EYE)
|
150 |
+
|
151 |
+
for points in [points1,points2]:
|
152 |
+
#points = bezier_curve(points, num_points=10)
|
153 |
+
#print(points)
|
154 |
+
points = bulge_polygon(points, bulge_factor=0.2)
|
155 |
+
#print(points)
|
156 |
+
np_points = np.array(points,dtype=np.int32)
|
157 |
+
|
158 |
+
cv2.fillPoly(image, [np_points], fill_color)
|
159 |
+
if thickness > 0:
|
160 |
+
cv2.polylines(image, [np_points], isClosed=False, color=line_color, thickness=thickness)
|
161 |
+
|
162 |
+
|
163 |
+
|
164 |
+
|
165 |
+
|
166 |
+
def fill_face(image,face_landmarks_list,thickness=1,line_color=(255,255,255),fill_color = (255,255,255)):
|
167 |
+
points1=get_landmark_points(face_landmarks_list,PARTS_LEFT_EYEBROW)
|
168 |
+
points2=get_landmark_points(face_landmarks_list,PARTS_RIGHT_EYEBROW)
|
169 |
+
points3=get_landmark_points(face_landmarks_list,PARTS_CHIN)
|
170 |
+
|
171 |
+
np_points = np.array(points1+points2+points3[::-1],dtype=np.int32)
|
172 |
+
|
173 |
+
|
174 |
+
cv2.fillPoly(image, [np_points], fill_color)
|
175 |
+
cv2.polylines(image, [np_points], isClosed=False, color=line_color, thickness=thickness)
|
176 |
+
|
177 |
+
def fill_face_inside(image,face_landmarks_list,thickness=1,line_color=(255,255,255),fill_color = (255,255,255)):
|
178 |
+
print("not support yet")
|
179 |
+
return None
|
180 |
+
points1=get_landmark_points(face_landmarks_list,PARTS_LEFT_EYEBROW)
|
181 |
+
points2=get_landmark_points(face_landmarks_list,PARTS_RIGHT_EYEBROW)
|
182 |
+
points3=get_landmark_points(face_landmarks_list,PARTS_CHIN)
|
183 |
+
points3=get_landmark_points(face_landmarks_list,PARTS_CHIN)
|
184 |
+
points3=get_landmark_points(face_landmarks_list,PARTS_CHIN)
|
185 |
+
|
186 |
+
np_points = np.array(points1+points2+points3[::-1],dtype=np.int32)
|
187 |
+
|
188 |
+
|
189 |
+
cv2.fillPoly(image, [np_points], fill_color)
|
190 |
+
cv2.polylines(image, [np_points], isClosed=False, color=line_color, thickness=thickness)
|
191 |
+
|
192 |
+
def half_pt(point1,point2):
|
193 |
+
return [sum(x) / 2 for x in zip(point1, point2)]
|
194 |
+
|
195 |
+
|
196 |
+
def line_lip(image,face_landmarks_list,key,thickness=1,line_color=(255,255,255)):
|
197 |
+
points=get_landmark_points(face_landmarks_list,key)
|
198 |
+
print(len(points))
|
199 |
+
#st=[(points[0]+points[11])/2]
|
200 |
+
st = [sum(x) / 2 for x in zip(points[0], points[11])]
|
201 |
+
|
202 |
+
#et=[(points[6]+points[7])/2]
|
203 |
+
et = [sum(x) / 2 for x in zip(points[6], points[7])]
|
204 |
+
print(et)
|
205 |
+
print(points)
|
206 |
+
np_points = np.array([st]+points[1:6]+[et],dtype=np.int32)
|
207 |
+
#if key == TOP_LIP:
|
208 |
+
cv2.polylines(image, [np_points], isClosed=False, color=line_color, thickness=thickness)
|
209 |
+
|
210 |
+
def get_lip_hole_points(face_landmarks_list):
|
211 |
+
top_points=get_landmark_points(face_landmarks_list,TOP_LIP)
|
212 |
+
bottom_points=get_landmark_points(face_landmarks_list,BOTTOM_LIP)
|
213 |
+
return top_points[7:]+bottom_points[7:]#[::-1]
|
214 |
+
#np_points = np.array(top_points[7:]+bottom_points[7:][::-1],dtype=np.int32)
|
215 |
+
|
216 |
+
def get_lip_hole_top_points(face_landmarks_list):
|
217 |
+
top_points=get_landmark_points(face_landmarks_list,TOP_LIP)
|
218 |
+
#bottom_points=get_landmark_points(face_landmarks_list,BOTTOM_LIP)
|
219 |
+
return top_points[7:]
|
220 |
+
|
221 |
+
def get_lip_hole_bottom_points(face_landmarks_list):
|
222 |
+
#top_points=get_landmark_points(face_landmarks_list,TOP_LIP)
|
223 |
+
bottom_points=get_landmark_points(face_landmarks_list,BOTTOM_LIP)
|
224 |
+
#inverted for connect top
|
225 |
+
return bottom_points[7:][::-1]
|
226 |
+
|
227 |
+
#for hide too long tooth
|
228 |
+
def get_lip_hole_bottom_half_points(face_landmarks_list):
|
229 |
+
#top_points=get_landmark_points(face_landmarks_list,TOP_LIP)
|
230 |
+
bottom_points=get_landmark_points(face_landmarks_list,BOTTOM_LIP)
|
231 |
+
#inverted for connect top
|
232 |
+
st = [sum(x) / 2 for x in zip(bottom_points[7], bottom_points[8])]
|
233 |
+
et = [sum(x) / 2 for x in zip(bottom_points[10], bottom_points[11])]
|
234 |
+
points = [st]+bottom_points[8:11]+[et]
|
235 |
+
#print(points)
|
236 |
+
return points[::-1]
|
237 |
+
|
238 |
+
def fill_points(points,image,thickness=1,line_color=(255,255,255),fill_color = (255,255,255)):
|
239 |
+
np_points = np.array(points,dtype=np.int32)
|
240 |
+
|
241 |
+
|
242 |
+
|
243 |
+
cv2.fillPoly(image, [np_points], fill_color)
|
244 |
+
if thickness>0:
|
245 |
+
cv2.polylines(image, [np_points], isClosed=False, color=line_color, thickness=thickness)
|
246 |
+
|
247 |
+
def fill_lip_hole_top(image,face_landmarks_list,thickness=1,line_color=(255,255,255),fill_color = (255,255,255)):
|
248 |
+
np_points = np.array(get_lip_hole_top_points(face_landmarks_list),dtype=np.int32)
|
249 |
+
|
250 |
+
cv2.fillPoly(image, [np_points], fill_color)
|
251 |
+
cv2.polylines(image, [np_points], isClosed=False, color=line_color, thickness=thickness)
|
252 |
+
|
253 |
+
|
254 |
+
|
255 |
+
def fill_lip_hole(image,face_landmarks_list,thickness=1,line_color=(255,255,255),fill_color = (255,255,255)):
|
256 |
+
np_points = np.array(get_lip_hole_points(face_landmarks_list),dtype=np.int32)
|
257 |
+
#print(np_points)
|
258 |
+
cv2.fillPoly(image, [np_points], fill_color)
|
259 |
+
cv2.polylines(image, [np_points], isClosed=False, color=line_color, thickness=thickness)
|
260 |
+
|
261 |
+
|
262 |
+
|
263 |
+
def get_landmark_points(face_landmarks_list,key):
|
264 |
+
matching_landmark_points = []
|
265 |
+
for face_landmarks in face_landmarks_list:
|
266 |
+
for landmark_name, landmark_points in face_landmarks.items():
|
267 |
+
#matching_landmark_points = landmark_points.copy()
|
268 |
+
if landmark_name ==key:
|
269 |
+
for value in landmark_points:
|
270 |
+
matching_landmark_points.append([value[0],value[1]])
|
271 |
+
return tuple(matching_landmark_points)
|
272 |
+
|
273 |
+
def get_image_size(cv2_image):
|
274 |
+
return cv2_image.shape[:2]
|
275 |
+
|
276 |
+
def get_top_lip_box(face_landmarks_list,margin = 0):
|
277 |
+
print(f"get_top_lip_box margin = {margin}")
|
278 |
+
points = get_landmark_points(face_landmarks_list,TOP_LIP)
|
279 |
+
box= points_to_box(points)
|
280 |
+
if margin>0:
|
281 |
+
return ((box[0][0] - margin,box[0][1] - margin),(box[1][0] + margin, box[1][1] + margin))
|
282 |
+
else:
|
283 |
+
return box
|
284 |
+
|
285 |
+
def get_points_box(face_landmarks_list,key,margin = 0):
|
286 |
+
print(f"margin = {margin}")
|
287 |
+
points = get_landmark_points(face_landmarks_list,key)
|
288 |
+
box= points_to_box(points)
|
289 |
+
if margin>0:
|
290 |
+
return ((box[0][0] - margin,box[0][1] - margin),(box[1][0] + margin, box[1][1] + margin))
|
291 |
+
else:
|
292 |
+
return box
|
293 |
+
|
294 |
+
#for size up
|
295 |
+
|
296 |
+
def create_moved_image(image,src_points,dst_points,force_size=None):
|
297 |
+
# keep top of lip stable but affing must be 4 point
|
298 |
+
#print(f"src = {src_points}")
|
299 |
+
#print(f"dst = {dst_points}")
|
300 |
+
src_pts=np.array([src_points],dtype=np.float32)
|
301 |
+
dst_pts=np.array([dst_points],dtype=np.float32)
|
302 |
+
#BORDER_REPLICATE
|
303 |
+
return warp_with_auto_resize(image, src_pts, dst_pts,cv2.BORDER_REPLICATE,force_size)
|
304 |
+
|
305 |
+
# lip-index
|
306 |
+
"""
|
307 |
+
1 2 3 4 5
|
308 |
+
0 6
|
309 |
+
11 10 9 8 7
|
310 |
+
"""
|
311 |
+
def get_top_lip_align_points(face_landmarks_list):
|
312 |
+
landmark=get_landmark_points(face_landmarks_list,TOP_LIP)
|
313 |
+
index_center = 3
|
314 |
+
index_right= 0 #mirror
|
315 |
+
#index_ritht_top= 2 #mirror
|
316 |
+
#index_left_top= 4 #mirror
|
317 |
+
index_left = 6
|
318 |
+
#if landmark_name ==key:
|
319 |
+
# 0 is right edge
|
320 |
+
x1 = landmark[index_right][0]
|
321 |
+
y1 = landmark[index_right][1]
|
322 |
+
# 6 is left edge
|
323 |
+
x2 = landmark[index_left][0]
|
324 |
+
y2 = landmark[index_left][1]
|
325 |
+
|
326 |
+
#left_top = landmark[index_left_top][1]
|
327 |
+
#right_top = landmark[index_ritht_top][1]
|
328 |
+
#top = left_top if left_top<right_top else right_top
|
329 |
+
|
330 |
+
# bottom center position
|
331 |
+
cx = (x1+x2)/2
|
332 |
+
cy = (y1+y2)/2
|
333 |
+
|
334 |
+
|
335 |
+
diffx=(landmark[index_center][0]-cx)
|
336 |
+
diffy=(landmark[index_center][1]-cy)
|
337 |
+
|
338 |
+
#print(f"x1={x1} y1={y1} x2={x2} y2={y2} cx={cx} cy={cy} diffx={diffx} diffy={diffy}")
|
339 |
+
|
340 |
+
#plt.scatter(cx,cy, c='r', s=10)
|
341 |
+
return ((int(x1+diffx),int(y1+diffy)),(int(x2+diffx),int(y2+diffy)),(x1,y1),(x2,y2))
|
342 |
+
|
343 |
+
|
344 |
+
def calculate_new_point(start_point, distance, angle):
|
345 |
+
x1, y1 = start_point
|
346 |
+
angle_rad = math.radians(angle)
|
347 |
+
|
348 |
+
# 新しい点の座標を計算
|
349 |
+
new_x = x1 + distance * math.cos(angle_rad)
|
350 |
+
new_y = y1 + distance * math.sin(angle_rad)
|
351 |
+
|
352 |
+
return (new_x, new_y)
|
353 |
+
|
354 |
+
def calculate_clockwise_angle(point1, point2):
|
355 |
+
x1, y1 = point1
|
356 |
+
x2, y2 = point2
|
357 |
+
|
358 |
+
# atan2を使用して角度を計算
|
359 |
+
angle_rad = math.atan2(y2 - y1, x2 - x1)
|
360 |
+
|
361 |
+
# 反時計回りから時計回りに変換
|
362 |
+
if angle_rad < 0:
|
363 |
+
angle_rad += 2 * math.pi
|
364 |
+
|
365 |
+
# ラジアンから度に変換
|
366 |
+
angle_deg = math.degrees(angle_rad)
|
367 |
+
|
368 |
+
return angle_deg
|
369 |
+
|
370 |
+
def get_bottom_lip_align_points(landmarks_list):
|
371 |
+
points = get_landmark_points(landmarks_list,POINTS_BOTTOM_LIP)
|
372 |
+
return (points[0],points[3],points[6],points[9])
|
373 |
+
|
374 |
+
def get_bottom_lip_width_height(landmarks_list):
|
375 |
+
points = get_landmark_points(landmarks_list,POINTS_BOTTOM_LIP)
|
376 |
+
return (points[0][0] -points[6][0],points[3][1] -points[9][1])
|
377 |
+
|
378 |
+
|
379 |
+
def crop_image(image,x1,y1,x2,y2):
|
380 |
+
return image[y1:y2, x1:x2]
|
381 |
+
|
382 |
+
def crop_cv2_image_by_box(image,box):
|
383 |
+
return crop_image_by_box(image,box)
|
384 |
+
|
385 |
+
def crop_image_by_box(image,box):
|
386 |
+
print(f"crop_cv2_image_by_box yy2 xx2 {box[0][1]}:{box[1][1]},{box[0][0]}:{box[1][0]}")
|
387 |
+
return image[box[0][1]:box[1][1], box[0][0]:box[1][0]]
|
388 |
+
|
389 |
+
def get_top_lip_datas(img,margin=4):
|
390 |
+
landmarks_list=image_to_landmarks_list(img)
|
391 |
+
box = get_top_lip_box(landmarks_list,margin)
|
392 |
+
cropped_img = crop_cv2_image_by_box(img,box)
|
393 |
+
points = get_top_lip_points(landmarks_list) #its rectangle but not square
|
394 |
+
return landmarks_list,cropped_img,points,box
|
395 |
+
|
396 |
+
def get_bottom_lip_datas(img,margin=4):
|
397 |
+
landmarks_list=image_to_landmarks_list(img)
|
398 |
+
box = get_points_box(landmarks_list,POINTS_BOTTOM_LIP,margin)
|
399 |
+
cropped_img = crop_cv2_image_by_box(img,box)
|
400 |
+
points = get_bottom_lip_align_points(landmarks_list) #its rectangle but not square
|
401 |
+
return landmarks_list,cropped_img,points,box
|
402 |
+
|
403 |
+
def offset_points(points,offset):
|
404 |
+
new_points = []
|
405 |
+
for point in points:
|
406 |
+
new_points.append((point[0]-offset[0],point[1]-offset[1]))
|
407 |
+
return new_points
|
408 |
+
|
409 |
+
|
410 |
+
def points_to_box(points):
|
411 |
+
min_x = 0
|
412 |
+
min_y = 0
|
413 |
+
min_x = float('inf')
|
414 |
+
min_y = float('inf')
|
415 |
+
max_x= 0
|
416 |
+
max_y= 0
|
417 |
+
for point in points:
|
418 |
+
if point[0]>max_x:
|
419 |
+
max_x=int(point[0])
|
420 |
+
if point[1]>max_y:
|
421 |
+
max_y=int(point[1])
|
422 |
+
if point[0]<min_x:
|
423 |
+
min_x=int(point[0])
|
424 |
+
if point[1]<min_y:
|
425 |
+
min_y=int(point[1])
|
426 |
+
return ((min_x,min_y),(max_x,max_y))
|
427 |
+
|
428 |
+
|
429 |
+
|
430 |
+
import cv2
|
431 |
+
import numpy as np
|
432 |
+
|
433 |
+
def warp_with_auto_resize(img, src_pts, dst_pts, borderMode=cv2.BORDER_TRANSPARENT, force_size=None):
|
434 |
+
"""
|
435 |
+
画像を WRAP 変換し、はみ出した場合は自動的にサイズを調整します。
|
436 |
+
|
437 |
+
Args:
|
438 |
+
img: 変換対象の画像 (numpy array)
|
439 |
+
src_pts: 変換元の四角形の頂点 (numpy array)
|
440 |
+
dst_pts: 変換先の四角形の頂点 (numpy array)
|
441 |
+
|
442 |
+
Returns:
|
443 |
+
変換後の画像 (numpy array)
|
444 |
+
"""
|
445 |
+
# 変換行列を計算
|
446 |
+
mat = cv2.getPerspectiveTransform(src_pts, dst_pts)
|
447 |
+
|
448 |
+
# 変換後の画像サイズを計算
|
449 |
+
h, w = img.shape[:2]
|
450 |
+
corners = np.float32([[0, 0], [w, 0], [w, h], [0, h]])
|
451 |
+
warped_corners = cv2.perspectiveTransform(corners.reshape(-1, 1, 2), mat).reshape(-1, 2)
|
452 |
+
|
453 |
+
# 変換後の画像の最小矩形を計算 (元の画像の四隅も含めて計算)
|
454 |
+
min_x = np.min(warped_corners[:, 0])
|
455 |
+
min_y = np.min(warped_corners[:, 1])
|
456 |
+
max_x = np.max(warped_corners[:, 0])
|
457 |
+
max_y = np.max(warped_corners[:, 1])
|
458 |
+
new_w, new_h = int(max_x - min_x), int(max_y - min_y)
|
459 |
+
|
460 |
+
# 変換行列を更新 (平行移動成分を追加)
|
461 |
+
mat[0, 2] += -min_x
|
462 |
+
mat[1, 2] += -min_y
|
463 |
+
|
464 |
+
if force_size:
|
465 |
+
new_w = force_size[0]
|
466 |
+
new_h = force_size[1]
|
467 |
+
|
468 |
+
warped_img = cv2.warpPerspective(img, mat, (new_w, new_h), flags=cv2.INTER_LANCZOS4, borderMode=borderMode)
|
469 |
+
|
470 |
+
return warped_img
|
471 |
+
|
472 |
+
def warp_with_auto_resize1(img, src_pts, dst_pts,borderMode= cv2.BORDER_TRANSPARENT,force_size=None):
|
473 |
+
"""
|
474 |
+
画像を WRAP 変換し、はみ出した場合は自動的にサイズを調整します。
|
475 |
+
|
476 |
+
Args:
|
477 |
+
img: 変換対象の画像 (numpy array)
|
478 |
+
src_pts: 変換元の四角形の頂点 (numpy array)
|
479 |
+
dst_pts: 変換先の四角形の頂点 (numpy array)
|
480 |
+
|
481 |
+
Returns:
|
482 |
+
変換後の画像 (numpy array)
|
483 |
+
"""
|
484 |
+
# 変換行列を計算
|
485 |
+
mat = cv2.getPerspectiveTransform(src_pts, dst_pts)
|
486 |
+
|
487 |
+
# 変換後の画像サイズを計算
|
488 |
+
h, w = img.shape[:2]
|
489 |
+
#print(f"img w{w} h{h}")
|
490 |
+
corners = np.float32([[0, 0], [w, 0], [w, h], [0, h]])
|
491 |
+
warped_corners = cv2.perspectiveTransform(corners.reshape(-1, 1, 2), mat).reshape(-1, 2)
|
492 |
+
|
493 |
+
# 変換後の画像の最小矩形を計算
|
494 |
+
min_x, min_y = np.min(warped_corners, axis=0)
|
495 |
+
|
496 |
+
max_x, max_y = np.max(warped_corners, axis=0)
|
497 |
+
new_w, new_h = int(max_x - min_x), int(max_y - min_y)
|
498 |
+
#print(f"min x {min_x} min y {min_y}")
|
499 |
+
#print(f"max x {max_x} max y {max_y}")
|
500 |
+
# 変換行列を更新 (平行移動成分を追加)
|
501 |
+
mat[0, 2] += -min_x
|
502 |
+
mat[1, 2] += -min_y
|
503 |
+
|
504 |
+
#print(f"audo w{new_w} h{new_h}")
|
505 |
+
|
506 |
+
if force_size:
|
507 |
+
new_w = force_size[0]
|
508 |
+
new_h = force_size[1]
|
509 |
+
|
510 |
+
warped_img = cv2.warpPerspective(img, mat, (new_w, new_h),flags=cv2.INTER_LANCZOS4, borderMode=borderMode)
|
511 |
+
|
512 |
+
return warped_img
|
513 |
+
|
514 |
+
def get_channel(np_array):
|
515 |
+
return np_array.shape[2] if np_array.ndim == 3 else 1
|
516 |
+
|
517 |
+
def print_numpy(np_array,key=""):
|
518 |
+
channel = get_channel(np_array)
|
519 |
+
print(f"{key} shape = {np_array.shape} channel = {channel} ndim = {np_array.ndim} size = {np_array.size}")
|
520 |
+
|
521 |
+
def create_color_image(img,color=(255,255,255)):
|
522 |
+
mask = np.zeros_like(img)
|
523 |
+
h, w = img.shape[:2]
|
524 |
+
cv2.rectangle(mask, (0, 0), (w, h), color, -1)
|
525 |
+
return mask
|
526 |
+
|
527 |
+
def create_mask(img,color=(255,255,255)):
|
528 |
+
mask = np.zeros_like(img)
|
529 |
+
h, w = img.shape[:2]
|
530 |
+
cv2.rectangle(mask, (0, 0), (w, h), color, -1)
|
531 |
+
return mask
|
532 |
+
|
533 |
+
def create_rgba(width,height):
|
534 |
+
return np.zeros((height, width, 4), dtype=np.uint8)
|
535 |
+
|
536 |
+
def create_rgb(width,height):
|
537 |
+
return np.zeros((height, width, 3), dtype=np.uint8)
|
538 |
+
|
539 |
+
def create_gray(width,height):
|
540 |
+
return np.zeros((height, width), dtype=np.uint8)
|
541 |
+
|
542 |
+
def copy_image(img1,img2,x,y):
|
543 |
+
img1[y:y+img2.shape[0], x:x+img2.shape[1]] = img2
|
544 |
+
|
545 |
+
def copy_color(img1,x,y,x2,y2,color):
|
546 |
+
color_img = np.full((y2-y, x2-x, 4), color, dtype=np.uint8)
|
547 |
+
img1[y:y2, x:x2] = color_img
|
548 |
+
|
549 |
+
|
550 |
+
|
551 |
+
def multiply_point(point,multiply):
|
552 |
+
return int(point[0]*multiply),int(point[1]*multiply)
|
553 |
+
|
554 |
+
def get_resized_top_pos(points,multiply=0.5):
|
555 |
+
diff_left = multiply_point((points[0][0]-points[2][0],points[0][1]-points[2][1]),multiply)
|
556 |
+
diff_right = multiply_point((points[1][0]-points[3][0],points[1][1]-points[3][1]),multiply)
|
557 |
+
return (diff_right,diff_left)
|
558 |
+
|
559 |
+
|
560 |
+
def get_alpha_image(base_image,landmarks_list,key,margin = 0,dilation_size = 2,gaussian_size = 2):
|
561 |
+
box = get_points_box(landmarks_list,key,margin)
|
562 |
+
# box expand margin
|
563 |
+
cropped_img = crop_cv2_image_by_box(base_image,box)
|
564 |
+
# convert RGBA
|
565 |
+
if cropped_img.shape[2] == 3: # Check if the image has 3 channels (RGB)
|
566 |
+
image_rgba = cv2.cvtColor(cropped_img, cv2.COLOR_BGR2BGRA)
|
567 |
+
mask = np.zeros(cropped_img.shape[:2], dtype="uint8")
|
568 |
+
else:
|
569 |
+
print("already alpha skipped")
|
570 |
+
image_rgba = cropped_img
|
571 |
+
mask = np.zeros(cropped_img.shape[:2], dtype="uint8")
|
572 |
+
#mask = cropped_img[:, :, 3].copy() # if you use this .some how block
|
573 |
+
|
574 |
+
global_points = get_landmark_points(landmarks_list,key)
|
575 |
+
|
576 |
+
local_points = offset_points(global_points,box[0])
|
577 |
+
print(local_points)
|
578 |
+
# create Lip Mask
|
579 |
+
np_points = np.array(local_points,dtype=np.int32)
|
580 |
+
|
581 |
+
cv2.fillPoly(mask, [np_points], 255)
|
582 |
+
|
583 |
+
kernel = np.ones((dilation_size, dilation_size), np.uint8)
|
584 |
+
|
585 |
+
dilated_mask = cv2.dilate(mask, kernel, iterations=1)
|
586 |
+
#dilated_mask = cv2.erode(mask, kernel, iterations=1) # TODO support dilation_size
|
587 |
+
|
588 |
+
# Gaussian Blur
|
589 |
+
if gaussian_size > 0:
|
590 |
+
smooth_mask = cv2.GaussianBlur(dilated_mask, (0,0 ), sigmaX=gaussian_size, sigmaY=gaussian_size)
|
591 |
+
expanded_mask = np.expand_dims(smooth_mask, axis=-1)
|
592 |
+
else:
|
593 |
+
expanded_mask = np.expand_dims(dilated_mask, axis=-1)
|
594 |
+
|
595 |
+
#lip_utils.print_numpy(image_rgba,"rgba")
|
596 |
+
#lip_utils.print_numpy(smooth_mask,"smooth")
|
597 |
+
#lip_utils.print_numpy(expanded_mask,"expanded_mask")
|
598 |
+
|
599 |
+
image_rgba[..., 3] = expanded_mask[..., 0]
|
600 |
+
|
601 |
+
|
602 |
+
return image_rgba,box
|
603 |
+
|
604 |
+
def apply_mask(image,mask):
|
605 |
+
if len(mask.shape) == 3:
|
606 |
+
expanded_mask = mask
|
607 |
+
else:
|
608 |
+
expanded_mask = np.expand_dims(mask, axis=-1)
|
609 |
+
|
610 |
+
if len(mask.shape)!=3:
|
611 |
+
error = f"image must be shape 3 {image.shape}"
|
612 |
+
raise ValueError(error)
|
613 |
+
|
614 |
+
if get_channel(image)!=4:
|
615 |
+
image_rgba = cv2.cvtColor(image, cv2.COLOR_BGR2BGRA) #why rgb to gray?
|
616 |
+
else:
|
617 |
+
image_rgba = image
|
618 |
+
image_rgba[..., 3] = expanded_mask[..., 0]
|
619 |
+
return image_rgba
|
620 |
+
|
621 |
+
def apply_mask_alpha(image,mask,invert=False):
|
622 |
+
if len(mask.shape) == 3:
|
623 |
+
expanded_mask = mask
|
624 |
+
else:
|
625 |
+
expanded_mask = np.expand_dims(mask, axis=-1)
|
626 |
+
|
627 |
+
image_rgba = cv2.cvtColor(image, cv2.COLOR_BGR2BGRA)
|
628 |
+
if invert:
|
629 |
+
image_rgba[..., 3] = expanded_mask[..., 0]
|
630 |
+
else:
|
631 |
+
image_rgba[..., 3] = 255 - expanded_mask[..., 0]
|
632 |
+
return image_rgba
|
633 |
+
|
634 |
+
def print_width_height(image,label):
|
635 |
+
new_h,new_w = get_image_size(image)
|
636 |
+
print(f"{label}:width = {new_w} height = {new_h}")
|
637 |
+
|
638 |
+
|
639 |
+
def create_mask_from_points(img,points,dilation_size=4,gaussian_size=4):
|
640 |
+
np_points = np.array(points,dtype=np.int32)
|
641 |
+
mask = np.zeros(img.shape[:2], dtype="uint8")
|
642 |
+
cv2.fillPoly(mask, [np_points], 255)
|
643 |
+
|
644 |
+
kernel = np.ones((abs(dilation_size),abs(dilation_size) ), np.uint8)
|
645 |
+
if dilation_size > 0:
|
646 |
+
dilated_mask = cv2.dilate(mask, kernel, iterations=1)
|
647 |
+
else:
|
648 |
+
dilated_mask = cv2.erode(mask, kernel, iterations=1) # TODO support dilation_size
|
649 |
+
# Gaussian Blur
|
650 |
+
if gaussian_size > 0:
|
651 |
+
smooth_mask = cv2.GaussianBlur(dilated_mask, (0,0 ), sigmaX=gaussian_size, sigmaY=gaussian_size)
|
652 |
+
expanded_mask = np.expand_dims(smooth_mask, axis=-1)
|
653 |
+
else:
|
654 |
+
expanded_mask = np.expand_dims(dilated_mask, axis=-1)
|
655 |
+
return expanded_mask
|
656 |
+
#lip_utils.print_numpy(image_rgba,"rgba")
|
657 |
+
#lip_utils.print_numpy(smooth_mask,"smooth")
|
658 |
+
#lip_utils.print_numpy(expanded_mask,"expanded_mask")
|
659 |
+
|
660 |
+
def mid_points(point1,point2):
|
661 |
+
return [sum(x) / 2 for x in zip(point1,point2)]
|
662 |
+
|
663 |
+
def lerp_points(point1, point2, lerp):
|
664 |
+
return [(1.0 - lerp) * p1 + lerp * p2 for p1, p2 in zip(point1, point2)]
|
665 |
+
|
666 |
+
def get_jaw_points(face_landmarks_list):
|
667 |
+
chin_points = get_landmark_points(face_landmarks_list,POINTS_CHIN)
|
668 |
+
bottom_lip_points = get_landmark_points(face_landmarks_list,POINTS_BOTTOM_LIP)
|
669 |
+
|
670 |
+
points =[]
|
671 |
+
|
672 |
+
points.extend(chin_points[4:13])
|
673 |
+
points.append(mid_points(chin_points[12],bottom_lip_points[0]))
|
674 |
+
points.append(mid_points(chin_points[8],bottom_lip_points[3]))
|
675 |
+
points.append(mid_points(chin_points[4],bottom_lip_points[6]))
|
676 |
+
|
677 |
+
return points
|
678 |
+
|
679 |
+
def get_bottom_mid_drop_size(open_size_y,lip_height):
|
680 |
+
# when full open case open_size_y 40 lip become half
|
681 |
+
mid_lip_move_ratio = open_size_y/80.0 if open_size_y>0 else 0
|
682 |
+
return mid_lip_move_ratio*lip_height
|
683 |
+
|
684 |
+
|
685 |
+
def fade_in_x(img,size):
|
686 |
+
if size==0:
|
687 |
+
return
|
688 |
+
per_pixel = 1.0/size
|
689 |
+
for y in range(img.shape[0]):
|
690 |
+
for x in range(img.shape[1]):
|
691 |
+
|
692 |
+
if x <size:
|
693 |
+
alpha_base = per_pixel * x
|
694 |
+
# アルファ値を変更し、ピクセルに設定
|
695 |
+
#print(f"before x ={x} = {img[y,x,3]} after = {img[y,x,3] * alpha_base}")
|
696 |
+
img[y, x, 3] = img[y,x,3] * alpha_base
|
697 |
+
def fade_out_x(img,size):
|
698 |
+
if size==0:
|
699 |
+
return
|
700 |
+
per_pixel = 1.0/size
|
701 |
+
w = img.shape[1]
|
702 |
+
|
703 |
+
for y in range(img.shape[0]):
|
704 |
+
for x in range(img.shape[1]):
|
705 |
+
|
706 |
+
if x >w:
|
707 |
+
diff = x - w
|
708 |
+
alpha_base = 1.0 - (per_pixel * x)
|
709 |
+
# アルファ値を変更し、ピクセルに設定
|
710 |
+
#print(f"before x ={x} = {img[y,x,3]} after = {img[y,x,3] * alpha_base}")
|
711 |
+
img[y, x, 3] = img[y,x,3] * alpha_base
|
712 |
+
|
713 |
+
|
714 |
+
def alpha_blend_with_image2_alpha(image1, image2):
|
715 |
+
return cv2.addWeighted(image1, 1, image2, 1, 0)
|
716 |
+
def numpy_alpha_blend_with_image2_alpha(image1, image2,invert=False):
|
717 |
+
"""
|
718 |
+
image1をimage2のアルファチャンネルを使用してアルファブレンディングします。
|
719 |
+
"""
|
720 |
+
# 画像のサイズを確認し、必要に応じてリサイズします。
|
721 |
+
if image1.shape[:2] != image2.shape[:2]:
|
722 |
+
image1 = cv2.resize(image1, (image2.shape[1], image2.shape[0]))
|
723 |
+
|
724 |
+
src1 = np.array(image1)
|
725 |
+
src2 = np.array(image2)
|
726 |
+
mask1 = np.array(image2[:, :, 3])
|
727 |
+
mask1 = mask1 / 255
|
728 |
+
mask1 = np.expand_dims(mask1, axis=-1)
|
729 |
+
if invert:
|
730 |
+
dst = src1 * (1-mask1) + src2 * mask1
|
731 |
+
else:
|
732 |
+
dst = src1 * mask1 + src2 * (1 - mask1)
|
733 |
+
# アルファブレンディングを行います。
|
734 |
+
#blended = cv2.cvtColor(dst, cv2.COLOR_BGRA2BGRA)
|
735 |
+
dst = dst.astype(np.uint8)
|
736 |
+
return dst
|
737 |
+
|
738 |
+
def distance_2d(point1, point2):
|
739 |
+
return math.sqrt((point2[0] - point1[0])**2 + (point2[1] - point1[1])**2)
|
740 |
+
|
741 |
+
# points[index][x=0 y=1] index is see landmark image by plot2.py
|
742 |
+
def get_top_lip_thicks(landmarks_list,is_distance_base=False):
|
743 |
+
points = get_landmark_points(landmarks_list,POINTS_TOP_LIP)
|
744 |
+
if is_distance_base:
|
745 |
+
return (distance_2d(points[10],points[2]),distance_2d(points[9],points[3]),distance_2d(points[8],points[4]))
|
746 |
+
return (points[10][1] -points[2][1],points[9][1] -points[3][1],points[8][1] -points[4][1])
|
747 |
+
|
748 |
+
|
749 |
+
def scale_down_values(data, scale_factor=0.25):
|
750 |
+
"""
|
751 |
+
Scales down the values in a list of dictionaries by a given scale factor.
|
752 |
+
|
753 |
+
Parameters:
|
754 |
+
- data: A list of dictionaries where each dictionary represents facial landmarks.
|
755 |
+
- scale_factor: The factor by which to scale down the values. Default is 0.25 (1/4).
|
756 |
+
|
757 |
+
Returns:
|
758 |
+
- A new list of dictionaries with scaled down values.
|
759 |
+
"""
|
760 |
+
scaled_data = []
|
761 |
+
for item in data:
|
762 |
+
scaled_item = {}
|
763 |
+
for key, values in item.items():
|
764 |
+
scaled_values = [(int(x * scale_factor), int(y * scale_factor)) for x, y in values]
|
765 |
+
scaled_item[key] = scaled_values
|
766 |
+
scaled_data.append(scaled_item)
|
767 |
+
return scaled_data
|
768 |
+
|
769 |
+
def save_landmarks(face_landmarks,out_path):
|
770 |
+
json_data = json.dumps(face_landmarks)
|
771 |
+
with open(out_path, "w") as f:
|
772 |
+
f.write(json_data)
|
773 |
+
|
774 |
+
def load_landmarks(input_path):
|
775 |
+
with open(input_path, "r") as f:
|
776 |
+
face_landmarks_list = json.loads(f.read())
|
777 |
+
return face_landmarks_list
|
778 |
+
|
779 |
+
|
780 |
+
|
781 |
+
|
mp_box.py
ADDED
@@ -0,0 +1,133 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import mediapipe as mp
|
2 |
+
from mediapipe.tasks import python
|
3 |
+
from mediapipe.tasks.python import vision
|
4 |
+
from mediapipe.framework.formats import landmark_pb2
|
5 |
+
from mediapipe import solutions
|
6 |
+
import numpy as np
|
7 |
+
|
8 |
+
# for X,Y,W,H to x1,y1,x2,y2(Left-top,right-bottom style)
|
9 |
+
def xywh_to_xyxy(box):
|
10 |
+
return [box[0],box[1],box[0]+box[2],box[1]+box[3]]
|
11 |
+
|
12 |
+
def convert_to_box(face_landmarks_list,indices,w=1024,h=1024):
|
13 |
+
x1=w
|
14 |
+
y1=h
|
15 |
+
x2=0
|
16 |
+
y2=0
|
17 |
+
for index in indices:
|
18 |
+
x=min(w,max(0,(face_landmarks_list[0][index].x*w)))
|
19 |
+
y=min(h,max(0,(face_landmarks_list[0][index].y*h)))
|
20 |
+
if x<x1:
|
21 |
+
x1=x
|
22 |
+
|
23 |
+
if y<y1:
|
24 |
+
y1=y
|
25 |
+
|
26 |
+
if x>x2:
|
27 |
+
x2=x
|
28 |
+
if y>y2:
|
29 |
+
y2=y
|
30 |
+
|
31 |
+
|
32 |
+
return [int(x1),int(y1),int(x2-x1),int(y2-y1)]
|
33 |
+
|
34 |
+
|
35 |
+
def box_to_square(bbox):
|
36 |
+
box=list(bbox)
|
37 |
+
if box[2]>box[3]:
|
38 |
+
diff = box[2]-box[3]
|
39 |
+
box[3]+=diff
|
40 |
+
box[1]-=diff/2
|
41 |
+
elif box[3]>box[2]:
|
42 |
+
diff = box[3]-box[2]
|
43 |
+
box[2]+=diff
|
44 |
+
box[0]-=diff/2
|
45 |
+
return box
|
46 |
+
|
47 |
+
|
48 |
+
def face_landmark_result_to_box(face_landmarker_result,width=1024,height=1024):
|
49 |
+
face_landmarks_list = face_landmarker_result.face_landmarks
|
50 |
+
|
51 |
+
|
52 |
+
full_indices = list(range(456))
|
53 |
+
|
54 |
+
MIDDLE_FOREHEAD = 151
|
55 |
+
BOTTOM_CHIN_EX = 152
|
56 |
+
BOTTOM_CHIN = 175
|
57 |
+
CHIN_TO_MIDDLE_FOREHEAD = [200,14,1,6,18,9]
|
58 |
+
MOUTH_BOTTOM = [202,200,422]
|
59 |
+
EYEBROW_CHEEK_LEFT_RIGHT = [46,226,50,1,280,446,276]
|
60 |
+
|
61 |
+
LEFT_HEAD_OUTER_EX = 251 #on side face almost same as full
|
62 |
+
LEFT_HEAD_OUTER = 301
|
63 |
+
LEFT_EYE_OUTER_EX = 356
|
64 |
+
LEFT_EYE_OUTER = 264
|
65 |
+
LEFT_MOUTH_OUTER_EX = 288
|
66 |
+
LEFT_MOUTH_OUTER = 288
|
67 |
+
LEFT_CHIN_OUTER = 435
|
68 |
+
RIGHT_HEAD_OUTER_EX = 21
|
69 |
+
RIGHT_HEAD_OUTER = 71
|
70 |
+
RIGHT_EYE_OUTER_EX = 127
|
71 |
+
RIGHT_EYE_OUTER = 34
|
72 |
+
RIGHT_MOUTH_OUTER_EX = 58
|
73 |
+
RIGHT_MOUTH_OUTER = 215
|
74 |
+
RIGHT_CHIN_OUTER = 150
|
75 |
+
|
76 |
+
# TODO naming line
|
77 |
+
min_indices=CHIN_TO_MIDDLE_FOREHEAD+EYEBROW_CHEEK_LEFT_RIGHT+MOUTH_BOTTOM
|
78 |
+
|
79 |
+
chin_to_brow_indices = [LEFT_CHIN_OUTER,LEFT_MOUTH_OUTER,LEFT_EYE_OUTER,LEFT_HEAD_OUTER,MIDDLE_FOREHEAD,RIGHT_HEAD_OUTER,RIGHT_EYE_OUTER,RIGHT_MOUTH_OUTER,RIGHT_CHIN_OUTER,BOTTOM_CHIN]+min_indices
|
80 |
+
|
81 |
+
box1 = convert_to_box(face_landmarks_list,min_indices,width,height)
|
82 |
+
box2 = convert_to_box(face_landmarks_list,chin_to_brow_indices,width,height)
|
83 |
+
box3 = convert_to_box(face_landmarks_list,full_indices,width,height)
|
84 |
+
#print(box)
|
85 |
+
|
86 |
+
return [box1,box2,box3,box_to_square(box1),box_to_square(box2),box_to_square(box3)]
|
87 |
+
|
88 |
+
|
89 |
+
def draw_landmarks_on_image(detection_result,rgb_image):
|
90 |
+
face_landmarks_list = detection_result.face_landmarks
|
91 |
+
annotated_image = np.copy(rgb_image)
|
92 |
+
|
93 |
+
# Loop through the detected faces to visualize.
|
94 |
+
for idx in range(len(face_landmarks_list)):
|
95 |
+
face_landmarks = face_landmarks_list[idx]
|
96 |
+
|
97 |
+
# Draw the face landmarks.
|
98 |
+
face_landmarks_proto = landmark_pb2.NormalizedLandmarkList()
|
99 |
+
face_landmarks_proto.landmark.extend([
|
100 |
+
landmark_pb2.NormalizedLandmark(x=landmark.x, y=landmark.y, z=landmark.z) for landmark in face_landmarks
|
101 |
+
])
|
102 |
+
|
103 |
+
solutions.drawing_utils.draw_landmarks(
|
104 |
+
image=annotated_image,
|
105 |
+
landmark_list=face_landmarks_proto,
|
106 |
+
connections=mp.solutions.face_mesh.FACEMESH_TESSELATION,
|
107 |
+
landmark_drawing_spec=None,
|
108 |
+
connection_drawing_spec=mp.solutions.drawing_styles
|
109 |
+
.get_default_face_mesh_tesselation_style())
|
110 |
+
|
111 |
+
return annotated_image
|
112 |
+
|
113 |
+
def mediapipe_to_box(image_data,model_path="face_landmarker.task"):
|
114 |
+
BaseOptions = mp.tasks.BaseOptions
|
115 |
+
FaceLandmarker = mp.tasks.vision.FaceLandmarker
|
116 |
+
FaceLandmarkerOptions = mp.tasks.vision.FaceLandmarkerOptions
|
117 |
+
VisionRunningMode = mp.tasks.vision.RunningMode
|
118 |
+
|
119 |
+
options = FaceLandmarkerOptions(
|
120 |
+
base_options=BaseOptions(model_asset_path=model_path),
|
121 |
+
running_mode=VisionRunningMode.IMAGE
|
122 |
+
,min_face_detection_confidence=0, min_face_presence_confidence=0
|
123 |
+
)
|
124 |
+
|
125 |
+
|
126 |
+
with FaceLandmarker.create_from_options(options) as landmarker:
|
127 |
+
if isinstance(image_data,str):
|
128 |
+
mp_image = mp.Image.create_from_file(image_data)
|
129 |
+
else:
|
130 |
+
mp_image = mp.Image(image_format=mp.ImageFormat.SRGB, data=np.asarray(image_data))
|
131 |
+
face_landmarker_result = landmarker.detect(mp_image)
|
132 |
+
boxes = face_landmark_result_to_box(face_landmarker_result,mp_image.width,mp_image.height)
|
133 |
+
return boxes,mp_image,face_landmarker_result
|
mp_constants.py
ADDED
@@ -0,0 +1,320 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
|
2 |
+
# contour
|
3 |
+
POINT_LEFT_HEAD_OUTER_EX = 251 #on side face almost same as full
|
4 |
+
POINT_LEFT_HEAD_OUTER = 301
|
5 |
+
POINT_LEFT_EYE_OUTER_EX = 356
|
6 |
+
POINT_LEFT_EYE_OUTER = 264
|
7 |
+
POINT_LEFT_MOUTH_OUTER_EX = 288
|
8 |
+
POINT_LEFT_MOUTH_OUTER = 435
|
9 |
+
POINT_LEFT_CHIN_OUTER = 379
|
10 |
+
POINT_RIGHT_HEAD_OUTER_EX = 21
|
11 |
+
POINT_RIGHT_HEAD_OUTER = 71
|
12 |
+
POINT_RIGHT_EYE_OUTER_EX = 127
|
13 |
+
POINT_RIGHT_EYE_OUTER = 34
|
14 |
+
POINT_RIGHT_MOUTH_OUTER_EX = 58
|
15 |
+
POINT_RIGHT_MOUTH_OUTER = 215
|
16 |
+
POINT_RIGHT_CHIN_OUTER = 150
|
17 |
+
POINT_CHIN_BOTTOM = 152
|
18 |
+
|
19 |
+
POINT_FOREHEAD_TOP = 10
|
20 |
+
|
21 |
+
POINT_UPPER_LIP_CENTER_BOTTOM=13
|
22 |
+
POINT_LOWER_LIP_CENTER_TOP=14
|
23 |
+
POINT_LOWER_LIP_CENTER_BOTTOM=17
|
24 |
+
POINT_NOSE_CENTER_MIDDLE=5
|
25 |
+
|
26 |
+
LINE_RIGHT_CONTOUR_OUTER_EYE_TO_CHIN =[127,234,93,132,58,172,136,150,149,176,148,152]
|
27 |
+
LINE_RIGHT_CONTOUR_EYE_TO_CHIN = [34,227,137,177,215,138,135,169,170,140,171,175]
|
28 |
+
LINE_RIGHT_CONTOUR_INNER_EYE_TO_CHIN =[143,116,123,147,213,192,214,210,211,32,208,199]
|
29 |
+
|
30 |
+
|
31 |
+
LINE_RIGHT_CONTOUR_0 = [152,175,199]
|
32 |
+
LINE_RIGHT_CONTOUR_1 = [148,171,208]
|
33 |
+
LINE_RIGHT_CONTOUR_2 = [176,140,32]
|
34 |
+
LINE_RIGHT_CONTOUR_3 = [149,170,211]
|
35 |
+
LINE_RIGHT_CONTOUR_4 = [150,169,210]
|
36 |
+
LINE_RIGHT_CONTOUR_5 = [136,135,214]
|
37 |
+
LINE_RIGHT_CONTOUR_6 = [172,138,192]
|
38 |
+
LINE_RIGHT_CONTOUR_7 = [58,215,213]
|
39 |
+
LINE_RIGHT_CONTOUR_8 = [132,177,147]
|
40 |
+
LINE_RIGHT_CONTOUR_9 = [93,137,123]
|
41 |
+
LINE_RIGHT_CONTOUR_10 = [234,227,116]
|
42 |
+
LINE_RIGHT_CONTOUR_11 = [127,34,143]
|
43 |
+
|
44 |
+
LANDMARK_68_CONTOUR_1 = LINE_RIGHT_CONTOUR_11
|
45 |
+
LANDMARK_68_CONTOUR_2_PART1 = LINE_RIGHT_CONTOUR_10
|
46 |
+
LANDMARK_68_CONTOUR_2_PART2 = LINE_RIGHT_CONTOUR_9
|
47 |
+
LANDMARK_68_CONTOUR_3 = LINE_RIGHT_CONTOUR_8
|
48 |
+
LANDMARK_68_CONTOUR_4 = LINE_RIGHT_CONTOUR_7
|
49 |
+
LANDMARK_68_CONTOUR_5 = LINE_RIGHT_CONTOUR_6
|
50 |
+
LANDMARK_68_CONTOUR_6_PART1 = LINE_RIGHT_CONTOUR_5
|
51 |
+
LANDMARK_68_CONTOUR_6_PART2 = LINE_RIGHT_CONTOUR_4
|
52 |
+
|
53 |
+
LANDMARK_68_CONTOUR_7 = LINE_RIGHT_CONTOUR_3
|
54 |
+
LANDMARK_68_CONTOUR_8_PART1 = LINE_RIGHT_CONTOUR_2
|
55 |
+
LANDMARK_68_CONTOUR_8_PART2 = LINE_RIGHT_CONTOUR_1
|
56 |
+
LANDMARK_68_CONTOUR_9 = LINE_RIGHT_CONTOUR_0
|
57 |
+
|
58 |
+
|
59 |
+
LINE_LEFT_CONTOUR_1 = [377,396,428]
|
60 |
+
LINE_LEFT_CONTOUR_2 = [400,369,262]
|
61 |
+
LINE_LEFT_CONTOUR_3 = [378,395,431]
|
62 |
+
LINE_LEFT_CONTOUR_4 = [379,394,430]
|
63 |
+
LINE_LEFT_CONTOUR_5 = [365,364,434]
|
64 |
+
LINE_LEFT_CONTOUR_6 = [397,367,416]
|
65 |
+
LINE_LEFT_CONTOUR_7 = [288,435,433]
|
66 |
+
LINE_LEFT_CONTOUR_8 = [361,401,376]
|
67 |
+
LINE_LEFT_CONTOUR_9 = [323,366,352]
|
68 |
+
LINE_LEFT_CONTOUR_10 = [454,447,345]
|
69 |
+
LINE_LEFT_CONTOUR_11 = [356,264,372]
|
70 |
+
LINE_LEFT_CONTOUR_12 = [389,368,383]
|
71 |
+
|
72 |
+
LANDMARK_68_CONTOUR_10 = LINE_LEFT_CONTOUR_1
|
73 |
+
LANDMARK_68_CONTOUR_11_PART1 = LINE_LEFT_CONTOUR_2
|
74 |
+
LANDMARK_68_CONTOUR_11_PART2 = LINE_LEFT_CONTOUR_3
|
75 |
+
LANDMARK_68_CONTOUR_12 = LINE_LEFT_CONTOUR_4
|
76 |
+
LANDMARK_68_CONTOUR_13 = LINE_LEFT_CONTOUR_5
|
77 |
+
LANDMARK_68_CONTOUR_14 = LINE_LEFT_CONTOUR_6
|
78 |
+
LANDMARK_68_CONTOUR_15_PART1 = LINE_LEFT_CONTOUR_7
|
79 |
+
LANDMARK_68_CONTOUR_15_PART2 = LINE_LEFT_CONTOUR_8
|
80 |
+
|
81 |
+
LANDMARK_68_CONTOUR_16 = LINE_LEFT_CONTOUR_9
|
82 |
+
LANDMARK_68_CONTOUR_17_PART1 = LINE_LEFT_CONTOUR_10
|
83 |
+
LANDMARK_68_CONTOUR_17_PART2 = LINE_LEFT_CONTOUR_11
|
84 |
+
|
85 |
+
LANDMARK_68_RIGHT_EYEBROW_18 = [70,46] #upper,lower
|
86 |
+
LANDMARK_68_RIGHT_EYEBROW_19 = [63,53]
|
87 |
+
LANDMARK_68_RIGHT_EYEBROW_20 = [105,52]
|
88 |
+
LANDMARK_68_RIGHT_EYEBROW_21 = [66,65]
|
89 |
+
LANDMARK_68_RIGHT_EYEBROW_22 = [107,55]
|
90 |
+
|
91 |
+
LANDMARK_68_LEFT_EYEBROW_23 = [336,285] #upper,lower
|
92 |
+
LANDMARK_68_LEFT_EYEBROW_24 = [296,295]
|
93 |
+
LANDMARK_68_LEFT_EYEBROW_25 = [334,282]
|
94 |
+
LANDMARK_68_LEFT_EYEBROW_26 = [293,283]
|
95 |
+
LANDMARK_68_LEFT_EYEBROW_27 = [300,276]
|
96 |
+
|
97 |
+
POINT_NOSE_0 = 8
|
98 |
+
POINT_NOSE_1 = 168
|
99 |
+
POINT_NOSE_2 = 6
|
100 |
+
POINT_NOSE_3 = 197
|
101 |
+
POINT_NOSE_4 = 195
|
102 |
+
POINT_NOSE_5 = 5
|
103 |
+
POINT_NOSE_6 = 4
|
104 |
+
POINT_NOSE_7 = 19
|
105 |
+
POINT_NOSE_8 = 94
|
106 |
+
POINT_NOSE_9 = 2
|
107 |
+
|
108 |
+
#side
|
109 |
+
POINT_NOSE_10 = 98
|
110 |
+
POINT_NOSE_11 = 97
|
111 |
+
POINT_NOSE_12 = 326
|
112 |
+
POINT_NOSE_13 = 327
|
113 |
+
|
114 |
+
LANDMARK_68_VERTICAL_NOSE_28 =[8,168]
|
115 |
+
LANDMARK_68_VERTICAL_NOSE_29 = [6]
|
116 |
+
LANDMARK_68_VERTICAL_NOSE_30=[197,195]
|
117 |
+
LANDMARK_68_VERTICAL_NOSE_31=[5,4]
|
118 |
+
|
119 |
+
LANDMARK_68_HORIZONTAL_NOSE_32 =[POINT_NOSE_10]
|
120 |
+
LANDMARK_68_HORIZONTAL_NOSE_33 = [POINT_NOSE_11]
|
121 |
+
LANDMARK_68_HORIZONTAL_NOSE_34=[POINT_NOSE_9]
|
122 |
+
LANDMARK_68_HORIZONTAL_NOSE_35=[POINT_NOSE_12]
|
123 |
+
LANDMARK_68_HORIZONTAL_NOSE_36=[POINT_NOSE_13]
|
124 |
+
|
125 |
+
|
126 |
+
LINE_VERTICAL_NOSE = [POINT_NOSE_0,POINT_NOSE_1,POINT_NOSE_2,POINT_NOSE_3,POINT_NOSE_4,POINT_NOSE_5,POINT_NOSE_6,POINT_NOSE_7,POINT_NOSE_8,POINT_NOSE_9]
|
127 |
+
LINE_HORIZONTAL_NOSE =[POINT_NOSE_10,POINT_NOSE_11,POINT_NOSE_9,POINT_NOSE_12,POINT_NOSE_13]
|
128 |
+
|
129 |
+
### EYES
|
130 |
+
POINT_RIGHT_UPPER_INNER_EYE_1 = 33
|
131 |
+
POINT_RIGHT_UPPER_INNER_EYE_2 = 246
|
132 |
+
POINT_RIGHT_UPPER_INNER_EYE_3 = 161
|
133 |
+
POINT_RIGHT_UPPER_INNER_EYE_4 = 160
|
134 |
+
POINT_RIGHT_UPPER_INNER_EYE_5 = 159
|
135 |
+
POINT_RIGHT_UPPER_INNER_EYE_6 = 158
|
136 |
+
POINT_RIGHT_UPPER_INNER_EYE_7 = 157
|
137 |
+
POINT_RIGHT_UPPER_INNER_EYE_8 = 173
|
138 |
+
POINT_RIGHT_UPPER_INNER_EYE_9 = 133
|
139 |
+
|
140 |
+
LINE_RIGHT_UPPER_INNER_EYE=[POINT_RIGHT_UPPER_INNER_EYE_1,POINT_RIGHT_UPPER_INNER_EYE_2,POINT_RIGHT_UPPER_INNER_EYE_3,POINT_RIGHT_UPPER_INNER_EYE_4,POINT_RIGHT_UPPER_INNER_EYE_5,POINT_RIGHT_UPPER_INNER_EYE_6,POINT_RIGHT_UPPER_INNER_EYE_7,POINT_RIGHT_UPPER_INNER_EYE_8,POINT_RIGHT_UPPER_INNER_EYE_9]
|
141 |
+
|
142 |
+
POINT_RIGHT_LOWER_INNER_EYE_1 = 155
|
143 |
+
POINT_RIGHT_LOWER_INNER_EYE_2 = 154
|
144 |
+
POINT_RIGHT_LOWER_INNER_EYE_3 = 153
|
145 |
+
POINT_RIGHT_LOWER_INNER_EYE_4 = 145
|
146 |
+
POINT_RIGHT_LOWER_INNER_EYE_5 = 144
|
147 |
+
POINT_RIGHT_LOWER_INNER_EYE_6 = 163
|
148 |
+
POINT_RIGHT_LOWER_INNER_EYE_7 = 7
|
149 |
+
|
150 |
+
LINE_RIGHT_LOWER_INNER_EYE=[POINT_RIGHT_UPPER_INNER_EYE_9,POINT_RIGHT_LOWER_INNER_EYE_1,POINT_RIGHT_LOWER_INNER_EYE_2,POINT_RIGHT_LOWER_INNER_EYE_3,POINT_RIGHT_LOWER_INNER_EYE_4,POINT_RIGHT_LOWER_INNER_EYE_5,POINT_RIGHT_LOWER_INNER_EYE_6,POINT_RIGHT_LOWER_INNER_EYE_7,POINT_RIGHT_UPPER_INNER_EYE_1]
|
151 |
+
|
152 |
+
|
153 |
+
POINT_RIGHT_UPPER_OUTER_EYE_1 = 130
|
154 |
+
POINT_RIGHT_UPPER_OUTER_EYE_2 = 247
|
155 |
+
POINT_RIGHT_UPPER_OUTER_EYE_3 = 30
|
156 |
+
POINT_RIGHT_UPPER_OUTER_EYE_4 = 29
|
157 |
+
POINT_RIGHT_UPPER_OUTER_EYE_5 = 27
|
158 |
+
POINT_RIGHT_UPPER_OUTER_EYE_6 = 28
|
159 |
+
POINT_RIGHT_UPPER_OUTER_EYE_7 = 56
|
160 |
+
POINT_RIGHT_UPPER_OUTER_EYE_8 = 190
|
161 |
+
POINT_RIGHT_UPPER_OUTER_EYE_9 = 243
|
162 |
+
|
163 |
+
LINE_RIGHT_UPPER_OUTER_EYE=[POINT_RIGHT_UPPER_OUTER_EYE_1,POINT_RIGHT_UPPER_OUTER_EYE_2,POINT_RIGHT_UPPER_OUTER_EYE_3,POINT_RIGHT_UPPER_OUTER_EYE_4,POINT_RIGHT_UPPER_OUTER_EYE_5,POINT_RIGHT_UPPER_OUTER_EYE_6,POINT_RIGHT_UPPER_OUTER_EYE_7,POINT_RIGHT_UPPER_OUTER_EYE_8,POINT_RIGHT_UPPER_OUTER_EYE_9]
|
164 |
+
|
165 |
+
LINE_RIGHT_UPPER_MIXED_EYE =[#firs eye1 and eye2 is intesionaly for moveup
|
166 |
+
[POINT_RIGHT_UPPER_INNER_EYE_1,POINT_RIGHT_UPPER_OUTER_EYE_2], [POINT_RIGHT_UPPER_INNER_EYE_2,POINT_RIGHT_UPPER_OUTER_EYE_2], [POINT_RIGHT_UPPER_INNER_EYE_3,POINT_RIGHT_UPPER_OUTER_EYE_3], [POINT_RIGHT_UPPER_INNER_EYE_4,POINT_RIGHT_UPPER_OUTER_EYE_4], [POINT_RIGHT_UPPER_INNER_EYE_5,POINT_RIGHT_UPPER_OUTER_EYE_5], [POINT_RIGHT_UPPER_INNER_EYE_6,POINT_RIGHT_UPPER_OUTER_EYE_6]
|
167 |
+
,[POINT_RIGHT_UPPER_INNER_EYE_8],[POINT_RIGHT_UPPER_INNER_EYE_8,POINT_RIGHT_UPPER_INNER_EYE_9] #I'm not sure need this one or not POINT_RIGHT_LOWER_INNER_EYE_1
|
168 |
+
]
|
169 |
+
|
170 |
+
LINE_RIGHT_UPPER_MIXED_EYE2 =[#firs eye1 and eye2 is intesionaly for moveup
|
171 |
+
[POINT_RIGHT_UPPER_INNER_EYE_1,POINT_RIGHT_UPPER_INNER_EYE_1,POINT_RIGHT_UPPER_OUTER_EYE_2],
|
172 |
+
[POINT_RIGHT_UPPER_INNER_EYE_2,POINT_RIGHT_UPPER_INNER_EYE_2,POINT_RIGHT_UPPER_OUTER_EYE_2],
|
173 |
+
[POINT_RIGHT_UPPER_INNER_EYE_3,POINT_RIGHT_UPPER_INNER_EYE_3,POINT_RIGHT_UPPER_OUTER_EYE_3],
|
174 |
+
[POINT_RIGHT_UPPER_INNER_EYE_4,POINT_RIGHT_UPPER_INNER_EYE_4,POINT_RIGHT_UPPER_OUTER_EYE_4],
|
175 |
+
[POINT_RIGHT_UPPER_INNER_EYE_5,POINT_RIGHT_UPPER_INNER_EYE_5,POINT_RIGHT_UPPER_OUTER_EYE_5],
|
176 |
+
[POINT_RIGHT_UPPER_INNER_EYE_6,POINT_RIGHT_UPPER_INNER_EYE_6,POINT_RIGHT_UPPER_OUTER_EYE_6]
|
177 |
+
,[POINT_RIGHT_UPPER_INNER_EYE_8],
|
178 |
+
[POINT_RIGHT_UPPER_INNER_EYE_8,POINT_RIGHT_UPPER_INNER_EYE_9] #I'm not sure need this one or not POINT_RIGHT_LOWER_INNER_EYE_1
|
179 |
+
]
|
180 |
+
|
181 |
+
POINT_RIGHT_LOWER_OUTER_EYE_1 = 112
|
182 |
+
POINT_RIGHT_LOWER_OUTER_EYE_2 = 26
|
183 |
+
POINT_RIGHT_LOWER_OUTER_EYE_3 = 22
|
184 |
+
POINT_RIGHT_LOWER_OUTER_EYE_4 = 23
|
185 |
+
POINT_RIGHT_LOWER_OUTER_EYE_5 = 24
|
186 |
+
POINT_RIGHT_LOWER_OUTER_EYE_6 = 110
|
187 |
+
POINT_RIGHT_LOWER_OUTER_EYE_7 = 25
|
188 |
+
|
189 |
+
LINE_RIGHT_LOWER_OUTER_EYE=[POINT_RIGHT_UPPER_OUTER_EYE_9,POINT_RIGHT_LOWER_OUTER_EYE_1,POINT_RIGHT_LOWER_OUTER_EYE_2,POINT_RIGHT_LOWER_OUTER_EYE_3,POINT_RIGHT_LOWER_OUTER_EYE_4,POINT_RIGHT_LOWER_OUTER_EYE_5,POINT_RIGHT_LOWER_OUTER_EYE_6,POINT_RIGHT_LOWER_OUTER_EYE_7,POINT_RIGHT_UPPER_OUTER_EYE_1]
|
190 |
+
|
191 |
+
LINE_RIGHT_LOWER_MIXED_EYE =[
|
192 |
+
[POINT_RIGHT_UPPER_INNER_EYE_8,POINT_RIGHT_UPPER_INNER_EYE_9,POINT_RIGHT_LOWER_INNER_EYE_1]
|
193 |
+
,[POINT_RIGHT_LOWER_INNER_EYE_2]
|
194 |
+
,POINT_RIGHT_LOWER_INNER_EYE_3,POINT_RIGHT_LOWER_INNER_EYE_4,POINT_RIGHT_LOWER_INNER_EYE_5,POINT_RIGHT_LOWER_INNER_EYE_6,POINT_RIGHT_LOWER_INNER_EYE_7
|
195 |
+
,[POINT_RIGHT_UPPER_INNER_EYE_1,POINT_RIGHT_UPPER_OUTER_EYE_2] #combine 1 and 2 for move up
|
196 |
+
]
|
197 |
+
|
198 |
+
|
199 |
+
POINT_LEFT_UPPER_INNER_EYE_1 = 362
|
200 |
+
POINT_LEFT_UPPER_INNER_EYE_2 = 398
|
201 |
+
POINT_LEFT_UPPER_INNER_EYE_3 = 384
|
202 |
+
POINT_LEFT_UPPER_INNER_EYE_4 = 385
|
203 |
+
POINT_LEFT_UPPER_INNER_EYE_5 = 386
|
204 |
+
POINT_LEFT_UPPER_INNER_EYE_6 = 387
|
205 |
+
POINT_LEFT_UPPER_INNER_EYE_7 = 388
|
206 |
+
POINT_LEFT_UPPER_INNER_EYE_8 = 466
|
207 |
+
POINT_LEFT_UPPER_INNER_EYE_9 = 263
|
208 |
+
|
209 |
+
LINE_LEFT_UPPER_INNER_EYE=[POINT_LEFT_UPPER_INNER_EYE_1,POINT_LEFT_UPPER_INNER_EYE_2,POINT_LEFT_UPPER_INNER_EYE_3,POINT_LEFT_UPPER_INNER_EYE_4,POINT_LEFT_UPPER_INNER_EYE_5,POINT_LEFT_UPPER_INNER_EYE_6,POINT_LEFT_UPPER_INNER_EYE_7,POINT_LEFT_UPPER_INNER_EYE_8,POINT_LEFT_UPPER_INNER_EYE_9]
|
210 |
+
LINE_LEFT_UPPER_INNER_EYE2=[POINT_LEFT_UPPER_INNER_EYE_1,POINT_LEFT_UPPER_INNER_EYE_2,POINT_LEFT_UPPER_INNER_EYE_3,POINT_LEFT_UPPER_INNER_EYE_4,POINT_LEFT_UPPER_INNER_EYE_5,POINT_LEFT_UPPER_INNER_EYE_6,POINT_LEFT_UPPER_INNER_EYE_7,POINT_LEFT_UPPER_INNER_EYE_8,POINT_LEFT_UPPER_INNER_EYE_9]
|
211 |
+
|
212 |
+
|
213 |
+
|
214 |
+
POINT_LEFT_LOWER_INNER_EYE_1 = 249
|
215 |
+
POINT_LEFT_LOWER_INNER_EYE_2 = 390
|
216 |
+
POINT_LEFT_LOWER_INNER_EYE_3 = 373
|
217 |
+
POINT_LEFT_LOWER_INNER_EYE_4 = 374
|
218 |
+
POINT_LEFT_LOWER_INNER_EYE_5 = 380
|
219 |
+
POINT_LEFT_LOWER_INNER_EYE_6 = 381
|
220 |
+
POINT_LEFT_LOWER_INNER_EYE_7 = 382
|
221 |
+
|
222 |
+
|
223 |
+
LINE_LEFT_LOWER_INNER_EYE=[POINT_LEFT_UPPER_INNER_EYE_9,POINT_LEFT_LOWER_INNER_EYE_2,POINT_LEFT_LOWER_INNER_EYE_3,POINT_LEFT_LOWER_INNER_EYE_4,POINT_LEFT_LOWER_INNER_EYE_5,POINT_LEFT_LOWER_INNER_EYE_6,POINT_LEFT_LOWER_INNER_EYE_7,POINT_LEFT_UPPER_INNER_EYE_1]
|
224 |
+
|
225 |
+
#outer
|
226 |
+
|
227 |
+
POINT_LEFT_UPPER_OUTER_EYE_1 = 463
|
228 |
+
POINT_LEFT_UPPER_OUTER_EYE_2 = 414
|
229 |
+
POINT_LEFT_UPPER_OUTER_EYE_3 = 286
|
230 |
+
POINT_LEFT_UPPER_OUTER_EYE_4 = 258
|
231 |
+
POINT_LEFT_UPPER_OUTER_EYE_5 = 257
|
232 |
+
POINT_LEFT_UPPER_OUTER_EYE_6 = 259
|
233 |
+
POINT_LEFT_UPPER_OUTER_EYE_7 = 260
|
234 |
+
POINT_LEFT_UPPER_OUTER_EYE_8 = 467
|
235 |
+
POINT_LEFT_UPPER_OUTER_EYE_9 = 359
|
236 |
+
|
237 |
+
LINE_LEFT_UPPER_OUTER_EYE=[POINT_LEFT_UPPER_OUTER_EYE_1,POINT_LEFT_UPPER_OUTER_EYE_2,POINT_LEFT_UPPER_OUTER_EYE_3,POINT_LEFT_UPPER_OUTER_EYE_4,POINT_LEFT_UPPER_OUTER_EYE_5,POINT_LEFT_UPPER_OUTER_EYE_6,POINT_LEFT_UPPER_OUTER_EYE_7,POINT_LEFT_UPPER_OUTER_EYE_8,POINT_LEFT_UPPER_OUTER_EYE_9]
|
238 |
+
|
239 |
+
|
240 |
+
POINT_LEFT_LOWER_OUTER_EYE_1 = 255
|
241 |
+
POINT_LEFT_LOWER_OUTER_EYE_2 = 339
|
242 |
+
POINT_LEFT_LOWER_OUTER_EYE_3 = 254
|
243 |
+
POINT_LEFT_LOWER_OUTER_EYE_4 = 253
|
244 |
+
POINT_LEFT_LOWER_OUTER_EYE_5 = 252
|
245 |
+
POINT_LEFT_LOWER_OUTER_EYE_6 = 256
|
246 |
+
POINT_LEFT_LOWER_OUTER_EYE_7 = 341
|
247 |
+
|
248 |
+
LINE_LEFT_LOWER_OUTER_EYE=[POINT_LEFT_UPPER_OUTER_EYE_9,POINT_LEFT_LOWER_OUTER_EYE_1,POINT_LEFT_LOWER_OUTER_EYE_2,POINT_LEFT_LOWER_OUTER_EYE_3,POINT_LEFT_LOWER_OUTER_EYE_4,POINT_LEFT_LOWER_OUTER_EYE_5,POINT_LEFT_LOWER_OUTER_EYE_6,POINT_LEFT_LOWER_OUTER_EYE_7,POINT_LEFT_UPPER_OUTER_EYE_1]
|
249 |
+
|
250 |
+
LINE_LEFT_UPPER_MIXED_EYE =[#firs eye1 and eye2 is intesionaly for moveup
|
251 |
+
[POINT_LEFT_UPPER_INNER_EYE_1,POINT_LEFT_UPPER_INNER_EYE_2,POINT_LEFT_LOWER_INNER_EYE_7],
|
252 |
+
[POINT_LEFT_UPPER_INNER_EYE_2,POINT_LEFT_UPPER_OUTER_EYE_2], [POINT_LEFT_UPPER_INNER_EYE_3,POINT_LEFT_UPPER_INNER_EYE_3,POINT_LEFT_UPPER_OUTER_EYE_3], [POINT_LEFT_UPPER_INNER_EYE_4,POINT_LEFT_UPPER_OUTER_EYE_4], [POINT_LEFT_UPPER_INNER_EYE_5,POINT_LEFT_UPPER_OUTER_EYE_5], [POINT_LEFT_UPPER_INNER_EYE_6,POINT_LEFT_UPPER_OUTER_EYE_6]
|
253 |
+
,[POINT_LEFT_UPPER_INNER_EYE_8],[POINT_LEFT_UPPER_OUTER_EYE_8,POINT_LEFT_UPPER_INNER_EYE_9]
|
254 |
+
]
|
255 |
+
|
256 |
+
LINE_LEFT_UPPER_MIXED_EYE2 =[#firs eye1 and eye2 is intesionaly for moveup
|
257 |
+
[POINT_LEFT_UPPER_INNER_EYE_1,POINT_LEFT_UPPER_INNER_EYE_1,POINT_LEFT_UPPER_INNER_EYE_2,POINT_LEFT_LOWER_INNER_EYE_7],
|
258 |
+
[POINT_LEFT_UPPER_INNER_EYE_2,POINT_LEFT_UPPER_INNER_EYE_2,POINT_LEFT_UPPER_OUTER_EYE_2],
|
259 |
+
[POINT_LEFT_UPPER_INNER_EYE_3,POINT_LEFT_UPPER_INNER_EYE_3,POINT_LEFT_UPPER_INNER_EYE_3,POINT_LEFT_UPPER_OUTER_EYE_3],
|
260 |
+
[POINT_LEFT_UPPER_INNER_EYE_4,POINT_LEFT_UPPER_INNER_EYE_4,POINT_LEFT_UPPER_OUTER_EYE_4],
|
261 |
+
[POINT_LEFT_UPPER_INNER_EYE_5,POINT_LEFT_UPPER_INNER_EYE_5,POINT_LEFT_UPPER_OUTER_EYE_5],
|
262 |
+
[POINT_LEFT_UPPER_INNER_EYE_6,POINT_LEFT_UPPER_INNER_EYE_6,POINT_LEFT_UPPER_OUTER_EYE_6]
|
263 |
+
,[POINT_LEFT_UPPER_INNER_EYE_8],
|
264 |
+
[POINT_LEFT_UPPER_OUTER_EYE_8,POINT_LEFT_UPPER_INNER_EYE_9]
|
265 |
+
]
|
266 |
+
|
267 |
+
LINE_LEFT_LOWER_MIXED_EYE =[
|
268 |
+
[POINT_LEFT_UPPER_OUTER_EYE_8,POINT_LEFT_UPPER_INNER_EYE_9]
|
269 |
+
,[POINT_LEFT_LOWER_INNER_EYE_2]
|
270 |
+
,POINT_LEFT_LOWER_INNER_EYE_3,POINT_LEFT_LOWER_INNER_EYE_4,POINT_LEFT_LOWER_INNER_EYE_5,POINT_LEFT_LOWER_INNER_EYE_6,POINT_LEFT_LOWER_INNER_EYE_7
|
271 |
+
, [POINT_LEFT_UPPER_INNER_EYE_1,POINT_LEFT_UPPER_INNER_EYE_2,POINT_LEFT_LOWER_INNER_EYE_7] #combine 1 and 2 for move up
|
272 |
+
]
|
273 |
+
|
274 |
+
|
275 |
+
#LIP
|
276 |
+
LINE_RIGHT_UPPER_OUTER_LIP=[
|
277 |
+
61,185,40,39,37,0
|
278 |
+
]
|
279 |
+
LINE_LEFT_UPPER_OUTER_LIP=[
|
280 |
+
0,267,269,270,409,291
|
281 |
+
]
|
282 |
+
|
283 |
+
|
284 |
+
LINE_LOWER_OUTER_LIP=[291,#upper
|
285 |
+
375,321,405,314,17,84,181,91,146
|
286 |
+
,61 #upper
|
287 |
+
]
|
288 |
+
|
289 |
+
LINE_UPPER_INNER_LIP=[
|
290 |
+
61,185,40,39,37,0,267,269,270,409,291
|
291 |
+
]
|
292 |
+
|
293 |
+
LINE_LOWER_INNER_LIP=[291,#upper
|
294 |
+
375,321,405,314,17,84,181,91,146
|
295 |
+
,61 #upper
|
296 |
+
]
|
297 |
+
|
298 |
+
LANDMARK_68_UPPER_OUTER_LIP_49 =[61]
|
299 |
+
LANDMARK_68_UPPER_OUTER_LIP_50 =[40,39]
|
300 |
+
LANDMARK_68_UPPER_OUTER_LIP_51 =[37]
|
301 |
+
LANDMARK_68_UPPER_OUTER_LIP_52 =[0]
|
302 |
+
LANDMARK_68_UPPER_OUTER_LIP_53 =[267]
|
303 |
+
LANDMARK_68_UPPER_OUTER_LIP_54 =[270,269]
|
304 |
+
LANDMARK_68_UPPER_OUTER_LIP_55 =[291]
|
305 |
+
|
306 |
+
LANDMARK_68_LOWER_OUTER_LIP_56 =[375,321]
|
307 |
+
LANDMARK_68_LOWER_OUTER_LIP_57 =[405,314]
|
308 |
+
LANDMARK_68_LOWER_OUTER_LIP_58 =[17]
|
309 |
+
LANDMARK_68_LOWER_OUTER_LIP_59 =[84,181]
|
310 |
+
LANDMARK_68_LOWER_OUTER_LIP_60 =[146,91]
|
311 |
+
|
312 |
+
LANDMARK_68_UPPER_INNER_LIP_61 =[78]
|
313 |
+
LANDMARK_68_UPPER_INNER_LIP_62 =[81]
|
314 |
+
LANDMARK_68_UPPER_INNER_LIP_63 =[13]
|
315 |
+
LANDMARK_68_UPPER_INNER_LIP_64 =[311]
|
316 |
+
LANDMARK_68_UPPER_INNER_LIP_65 =[308]
|
317 |
+
|
318 |
+
LANDMARK_68_LOWER_INNER_LIP_66 =[402]
|
319 |
+
LANDMARK_68_LOWER_INNER_LIP_67 =[14]
|
320 |
+
LANDMARK_68_LOWER_INNER_LIP_68 =[178]
|