Spaces:
Build error
Build error
root
commited on
Commit
•
10ba879
1
Parent(s):
2e4018d
initial commit of
Browse filesThis view is limited to 50 files because it contains too many changes.
See raw diff
- LICENSE +21 -0
- app.py +215 -0
- assets/.gitignore +2 -0
- assets/gradio_description_animation.md +16 -0
- assets/gradio_description_retargeting.md +4 -0
- assets/gradio_description_upload.md +2 -0
- assets/gradio_title.md +11 -0
- inference.py +58 -0
- pretrained_weights/.gitkeep +0 -0
- readme.md +206 -0
- requirements.txt +22 -0
- speed.py +195 -0
- src/config/__init__.py +0 -0
- src/config/argument_config.py +48 -0
- src/config/base_config.py +29 -0
- src/config/crop_config.py +29 -0
- src/config/inference_config.py +52 -0
- src/config/models.yaml +43 -0
- src/gradio_pipeline.py +117 -0
- src/live_portrait_pipeline.py +285 -0
- src/live_portrait_wrapper.py +319 -0
- src/modules/__init__.py +0 -0
- src/modules/appearance_feature_extractor.py +48 -0
- src/modules/convnextv2.py +149 -0
- src/modules/dense_motion.py +104 -0
- src/modules/motion_extractor.py +35 -0
- src/modules/spade_generator.py +59 -0
- src/modules/stitching_retargeting_network.py +38 -0
- src/modules/util.py +441 -0
- src/modules/warping_network.py +77 -0
- src/utils/__init__.py +0 -0
- src/utils/camera.py +73 -0
- src/utils/crop.py +398 -0
- src/utils/cropper.py +196 -0
- src/utils/dependencies/insightface/__init__.py +20 -0
- src/utils/dependencies/insightface/app/__init__.py +1 -0
- src/utils/dependencies/insightface/app/common.py +49 -0
- src/utils/dependencies/insightface/app/face_analysis.py +110 -0
- src/utils/dependencies/insightface/data/__init__.py +2 -0
- src/utils/dependencies/insightface/data/image.py +27 -0
- src/utils/dependencies/insightface/data/images/Tom_Hanks_54745.png +0 -0
- src/utils/dependencies/insightface/data/images/mask_black.jpg +0 -0
- src/utils/dependencies/insightface/data/images/mask_blue.jpg +0 -0
- src/utils/dependencies/insightface/data/images/mask_green.jpg +0 -0
- src/utils/dependencies/insightface/data/images/mask_white.jpg +0 -0
- src/utils/dependencies/insightface/data/images/t1.jpg +0 -0
- src/utils/dependencies/insightface/data/objects/meanshape_68.pkl +3 -0
- src/utils/dependencies/insightface/data/pickle_object.py +17 -0
- src/utils/dependencies/insightface/data/rec_builder.py +71 -0
- src/utils/dependencies/insightface/model_zoo/__init__.py +6 -0
LICENSE
ADDED
@@ -0,0 +1,21 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
MIT License
|
2 |
+
|
3 |
+
Copyright (c) 2024 Kuaishou Visual Generation and Interaction Center
|
4 |
+
|
5 |
+
Permission is hereby granted, free of charge, to any person obtaining a copy
|
6 |
+
of this software and associated documentation files (the "Software"), to deal
|
7 |
+
in the Software without restriction, including without limitation the rights
|
8 |
+
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
9 |
+
copies of the Software, and to permit persons to whom the Software is
|
10 |
+
furnished to do so, subject to the following conditions:
|
11 |
+
|
12 |
+
The above copyright notice and this permission notice shall be included in all
|
13 |
+
copies or substantial portions of the Software.
|
14 |
+
|
15 |
+
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
16 |
+
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
17 |
+
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
18 |
+
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
19 |
+
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
20 |
+
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
21 |
+
SOFTWARE.
|
app.py
ADDED
@@ -0,0 +1,215 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# coding: utf-8
|
2 |
+
|
3 |
+
"""
|
4 |
+
The entrance of the gradio
|
5 |
+
"""
|
6 |
+
|
7 |
+
import tyro
|
8 |
+
import subprocess
|
9 |
+
import gradio as gr
|
10 |
+
import os.path as osp
|
11 |
+
from src.utils.helper import load_description
|
12 |
+
from src.gradio_pipeline import GradioPipeline
|
13 |
+
from src.config.crop_config import CropConfig
|
14 |
+
from src.config.argument_config import ArgumentConfig
|
15 |
+
from src.config.inference_config import InferenceConfig
|
16 |
+
|
17 |
+
|
18 |
+
def partial_fields(target_class, kwargs):
|
19 |
+
return target_class(**{k: v for k, v in kwargs.items() if hasattr(target_class, k)})
|
20 |
+
|
21 |
+
|
22 |
+
def fast_check_ffmpeg():
|
23 |
+
try:
|
24 |
+
subprocess.run(["ffmpeg", "-version"], capture_output=True, check=True)
|
25 |
+
return True
|
26 |
+
except:
|
27 |
+
return False
|
28 |
+
|
29 |
+
# set tyro theme
|
30 |
+
tyro.extras.set_accent_color("bright_cyan")
|
31 |
+
args = tyro.cli(ArgumentConfig)
|
32 |
+
|
33 |
+
if not fast_check_ffmpeg():
|
34 |
+
raise ImportError(
|
35 |
+
"FFmpeg is not installed. Please install FFmpeg before running this script. https://ffmpeg.org/download.html"
|
36 |
+
)
|
37 |
+
|
38 |
+
# specify configs for inference
|
39 |
+
inference_cfg = partial_fields(InferenceConfig, args.__dict__) # use attribute of args to initial InferenceConfig
|
40 |
+
crop_cfg = partial_fields(CropConfig, args.__dict__) # use attribute of args to initial CropConfig
|
41 |
+
|
42 |
+
gradio_pipeline = GradioPipeline(
|
43 |
+
inference_cfg=inference_cfg,
|
44 |
+
crop_cfg=crop_cfg,
|
45 |
+
args=args
|
46 |
+
)
|
47 |
+
|
48 |
+
|
49 |
+
def gpu_wrapped_execute_video(*args, **kwargs):
|
50 |
+
return gradio_pipeline.execute_video(*args, **kwargs)
|
51 |
+
|
52 |
+
|
53 |
+
def gpu_wrapped_execute_image(*args, **kwargs):
|
54 |
+
return gradio_pipeline.execute_image(*args, **kwargs)
|
55 |
+
|
56 |
+
|
57 |
+
# assets
|
58 |
+
title_md = "assets/gradio_title.md"
|
59 |
+
example_portrait_dir = "assets/examples/source"
|
60 |
+
example_video_dir = "assets/examples/driving"
|
61 |
+
data_examples = [
|
62 |
+
[osp.join(example_portrait_dir, "s9.jpg"), osp.join(example_video_dir, "d0.mp4"), True, True, True, False],
|
63 |
+
[osp.join(example_portrait_dir, "s6.jpg"), osp.join(example_video_dir, "d0.mp4"), True, True, True, False],
|
64 |
+
[osp.join(example_portrait_dir, "s10.jpg"), osp.join(example_video_dir, "d0.mp4"), True, True, True, False],
|
65 |
+
[osp.join(example_portrait_dir, "s5.jpg"), osp.join(example_video_dir, "d18.mp4"), True, True, True, False],
|
66 |
+
[osp.join(example_portrait_dir, "s7.jpg"), osp.join(example_video_dir, "d19.mp4"), True, True, True, False],
|
67 |
+
[osp.join(example_portrait_dir, "s2.jpg"), osp.join(example_video_dir, "d13.mp4"), True, True, True, True],
|
68 |
+
]
|
69 |
+
#################### interface logic ####################
|
70 |
+
|
71 |
+
# Define components first
|
72 |
+
eye_retargeting_slider = gr.Slider(minimum=0, maximum=0.8, step=0.01, label="target eyes-open ratio")
|
73 |
+
lip_retargeting_slider = gr.Slider(minimum=0, maximum=0.8, step=0.01, label="target lip-open ratio")
|
74 |
+
retargeting_input_image = gr.Image(type="filepath")
|
75 |
+
output_image = gr.Image(type="numpy")
|
76 |
+
output_image_paste_back = gr.Image(type="numpy")
|
77 |
+
output_video = gr.Video()
|
78 |
+
output_video_concat = gr.Video()
|
79 |
+
|
80 |
+
with gr.Blocks(theme=gr.themes.Soft()) as demo:
|
81 |
+
gr.HTML(load_description(title_md))
|
82 |
+
gr.Markdown(load_description("assets/gradio_description_upload.md"))
|
83 |
+
with gr.Row():
|
84 |
+
with gr.Accordion(open=True, label="Source Portrait"):
|
85 |
+
image_input = gr.Image(type="filepath")
|
86 |
+
gr.Examples(
|
87 |
+
examples=[
|
88 |
+
[osp.join(example_portrait_dir, "s9.jpg")],
|
89 |
+
[osp.join(example_portrait_dir, "s6.jpg")],
|
90 |
+
[osp.join(example_portrait_dir, "s10.jpg")],
|
91 |
+
[osp.join(example_portrait_dir, "s5.jpg")],
|
92 |
+
[osp.join(example_portrait_dir, "s7.jpg")],
|
93 |
+
[osp.join(example_portrait_dir, "s12.jpg")],
|
94 |
+
],
|
95 |
+
inputs=[image_input],
|
96 |
+
cache_examples=False,
|
97 |
+
)
|
98 |
+
with gr.Accordion(open=True, label="Driving Video"):
|
99 |
+
video_input = gr.Video()
|
100 |
+
gr.Examples(
|
101 |
+
examples=[
|
102 |
+
[osp.join(example_video_dir, "d0.mp4")],
|
103 |
+
[osp.join(example_video_dir, "d18.mp4")],
|
104 |
+
[osp.join(example_video_dir, "d19.mp4")],
|
105 |
+
[osp.join(example_video_dir, "d14.mp4")],
|
106 |
+
[osp.join(example_video_dir, "d6.mp4")],
|
107 |
+
],
|
108 |
+
inputs=[video_input],
|
109 |
+
cache_examples=False,
|
110 |
+
)
|
111 |
+
with gr.Row():
|
112 |
+
with gr.Accordion(open=False, label="Animation Instructions and Options"):
|
113 |
+
gr.Markdown(load_description("assets/gradio_description_animation.md"))
|
114 |
+
with gr.Row():
|
115 |
+
flag_relative_input = gr.Checkbox(value=True, label="relative motion")
|
116 |
+
flag_do_crop_input = gr.Checkbox(value=True, label="do crop (source)")
|
117 |
+
flag_remap_input = gr.Checkbox(value=True, label="paste-back")
|
118 |
+
flag_crop_driving_video_input = gr.Checkbox(value=False, label="do crop (driving video)")
|
119 |
+
with gr.Row():
|
120 |
+
with gr.Column():
|
121 |
+
process_button_animation = gr.Button("🚀 Animate", variant="primary")
|
122 |
+
with gr.Column():
|
123 |
+
process_button_reset = gr.ClearButton([image_input, video_input, output_video, output_video_concat], value="🧹 Clear")
|
124 |
+
with gr.Row():
|
125 |
+
with gr.Column():
|
126 |
+
with gr.Accordion(open=True, label="The animated video in the original image space"):
|
127 |
+
output_video.render()
|
128 |
+
with gr.Column():
|
129 |
+
with gr.Accordion(open=True, label="The animated video"):
|
130 |
+
output_video_concat.render()
|
131 |
+
with gr.Row():
|
132 |
+
# Examples
|
133 |
+
gr.Markdown("## You could also choose the examples below by one click ⬇️")
|
134 |
+
with gr.Row():
|
135 |
+
gr.Examples(
|
136 |
+
examples=data_examples,
|
137 |
+
fn=gpu_wrapped_execute_video,
|
138 |
+
inputs=[
|
139 |
+
image_input,
|
140 |
+
video_input,
|
141 |
+
flag_relative_input,
|
142 |
+
flag_do_crop_input,
|
143 |
+
flag_remap_input,
|
144 |
+
flag_crop_driving_video_input
|
145 |
+
],
|
146 |
+
outputs=[output_image, output_image_paste_back],
|
147 |
+
examples_per_page=len(data_examples),
|
148 |
+
cache_examples=False,
|
149 |
+
)
|
150 |
+
gr.Markdown(load_description("assets/gradio_description_retargeting.md"), visible=True)
|
151 |
+
with gr.Row(visible=True):
|
152 |
+
eye_retargeting_slider.render()
|
153 |
+
lip_retargeting_slider.render()
|
154 |
+
with gr.Row(visible=True):
|
155 |
+
process_button_retargeting = gr.Button("🚗 Retargeting", variant="primary")
|
156 |
+
process_button_reset_retargeting = gr.ClearButton(
|
157 |
+
[
|
158 |
+
eye_retargeting_slider,
|
159 |
+
lip_retargeting_slider,
|
160 |
+
retargeting_input_image,
|
161 |
+
output_image,
|
162 |
+
output_image_paste_back
|
163 |
+
],
|
164 |
+
value="🧹 Clear"
|
165 |
+
)
|
166 |
+
with gr.Row(visible=True):
|
167 |
+
with gr.Column():
|
168 |
+
with gr.Accordion(open=True, label="Retargeting Input"):
|
169 |
+
retargeting_input_image.render()
|
170 |
+
gr.Examples(
|
171 |
+
examples=[
|
172 |
+
[osp.join(example_portrait_dir, "s9.jpg")],
|
173 |
+
[osp.join(example_portrait_dir, "s6.jpg")],
|
174 |
+
[osp.join(example_portrait_dir, "s10.jpg")],
|
175 |
+
[osp.join(example_portrait_dir, "s5.jpg")],
|
176 |
+
[osp.join(example_portrait_dir, "s7.jpg")],
|
177 |
+
[osp.join(example_portrait_dir, "s12.jpg")],
|
178 |
+
],
|
179 |
+
inputs=[retargeting_input_image],
|
180 |
+
cache_examples=False,
|
181 |
+
)
|
182 |
+
with gr.Column():
|
183 |
+
with gr.Accordion(open=True, label="Retargeting Result"):
|
184 |
+
output_image.render()
|
185 |
+
with gr.Column():
|
186 |
+
with gr.Accordion(open=True, label="Paste-back Result"):
|
187 |
+
output_image_paste_back.render()
|
188 |
+
# binding functions for buttons
|
189 |
+
process_button_retargeting.click(
|
190 |
+
# fn=gradio_pipeline.execute_image,
|
191 |
+
fn=gpu_wrapped_execute_image,
|
192 |
+
inputs=[eye_retargeting_slider, lip_retargeting_slider, retargeting_input_image, flag_do_crop_input],
|
193 |
+
outputs=[output_image, output_image_paste_back],
|
194 |
+
show_progress=True
|
195 |
+
)
|
196 |
+
process_button_animation.click(
|
197 |
+
fn=gpu_wrapped_execute_video,
|
198 |
+
inputs=[
|
199 |
+
image_input,
|
200 |
+
video_input,
|
201 |
+
flag_relative_input,
|
202 |
+
flag_do_crop_input,
|
203 |
+
flag_remap_input,
|
204 |
+
flag_crop_driving_video_input
|
205 |
+
],
|
206 |
+
outputs=[output_video, output_video_concat],
|
207 |
+
show_progress=True
|
208 |
+
)
|
209 |
+
|
210 |
+
|
211 |
+
demo.launch(
|
212 |
+
server_port=args.server_port,
|
213 |
+
share=args.share,
|
214 |
+
server_name=args.server_name
|
215 |
+
)
|
assets/.gitignore
ADDED
@@ -0,0 +1,2 @@
|
|
|
|
|
|
|
1 |
+
examples/driving/*.pkl
|
2 |
+
examples/driving/*_crop.mp4
|
assets/gradio_description_animation.md
ADDED
@@ -0,0 +1,16 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
<span style="font-size: 1.2em;">🔥 To animate the source portrait with the driving video, please follow these steps:</span>
|
2 |
+
<div style="font-size: 1.2em; margin-left: 20px;">
|
3 |
+
1. In the <strong>Animation Options</strong> section, we recommend enabling the <strong>do crop (source)</strong> option if faces occupy a small portion of your image.
|
4 |
+
</div>
|
5 |
+
<div style="font-size: 1.2em; margin-left: 20px;">
|
6 |
+
2. Press the <strong>🚀 Animate</strong> button and wait for a moment. Your animated video will appear in the result block. This may take a few moments.
|
7 |
+
</div>
|
8 |
+
<div style="font-size: 1.2em; margin-left: 20px;">
|
9 |
+
3. If you want to upload your own driving video, <strong>the best practice</strong>:
|
10 |
+
|
11 |
+
- Crop it to a 1:1 aspect ratio (e.g., 512x512 or 256x256 pixels), or enable auto-driving by checking `do crop (driving video)`.
|
12 |
+
- Focus on the head area, similar to the example videos.
|
13 |
+
- Minimize shoulder movement.
|
14 |
+
- Make sure the first frame of driving video is a frontal face with **neutral expression**.
|
15 |
+
|
16 |
+
</div>
|
assets/gradio_description_retargeting.md
ADDED
@@ -0,0 +1,4 @@
|
|
|
|
|
|
|
|
|
|
|
1 |
+
<br>
|
2 |
+
|
3 |
+
## Retargeting
|
4 |
+
<span style="font-size: 1.2em;">🔥 To edit the eyes and lip open ratio of the source portrait, drag the sliders and click the <strong>🚗 Retargeting</strong> button. You can try running it multiple times. <strong>😊 Set both ratios to 0.8 to see what's going on!</strong> </span>
|
assets/gradio_description_upload.md
ADDED
@@ -0,0 +1,2 @@
|
|
|
|
|
|
|
1 |
+
## 🤗 This is the official gradio demo for **LivePortrait**.
|
2 |
+
<div style="font-size: 1.2em;">Please upload or use a webcam to get a <strong>Source Portrait</strong> (any aspect ratio) and upload a <strong>Driving Video</strong> (1:1 aspect ratio, or any aspect ratio with <code>do crop (driving video)</code> checked).</div>
|
assets/gradio_title.md
ADDED
@@ -0,0 +1,11 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
<div style="display: flex; justify-content: center; align-items: center; text-align: center;">
|
2 |
+
<div>
|
3 |
+
<h1>LivePortrait: Efficient Portrait Animation with Stitching and Retargeting Control</h1>
|
4 |
+
<div style="display: flex; justify-content: center; align-items: center; text-align: center;>
|
5 |
+
<a href="https://arxiv.org/pdf/2407.03168"><img src="https://img.shields.io/badge/arXiv-2407.03168-red"></a>
|
6 |
+
<a href="https://liveportrait.github.io"><img src="https://img.shields.io/badge/Project_Page-LivePortrait-green" alt="Project Page"></a>
|
7 |
+
<a href="https://github.com/KwaiVGI/LivePortrait"><img src="https://img.shields.io/badge/Github-Code-blue"></a>
|
8 |
+
<a href='https://huggingface.co/spaces/KwaiVGI/liveportrait'><img src='https://img.shields.io/badge/%F0%9F%A4%97%20Hugging%20Face-Spaces-blue'></a>
|
9 |
+
</div>
|
10 |
+
</div>
|
11 |
+
</div>
|
inference.py
ADDED
@@ -0,0 +1,58 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# coding: utf-8
|
2 |
+
|
3 |
+
import os.path as osp
|
4 |
+
import tyro
|
5 |
+
import subprocess
|
6 |
+
from src.config.argument_config import ArgumentConfig
|
7 |
+
from src.config.inference_config import InferenceConfig
|
8 |
+
from src.config.crop_config import CropConfig
|
9 |
+
from src.live_portrait_pipeline import LivePortraitPipeline
|
10 |
+
|
11 |
+
|
12 |
+
def partial_fields(target_class, kwargs):
|
13 |
+
return target_class(**{k: v for k, v in kwargs.items() if hasattr(target_class, k)})
|
14 |
+
|
15 |
+
|
16 |
+
def fast_check_ffmpeg():
|
17 |
+
try:
|
18 |
+
subprocess.run(["ffmpeg", "-version"], capture_output=True, check=True)
|
19 |
+
return True
|
20 |
+
except:
|
21 |
+
return False
|
22 |
+
|
23 |
+
|
24 |
+
def fast_check_args(args: ArgumentConfig):
|
25 |
+
if not osp.exists(args.source_image):
|
26 |
+
raise FileNotFoundError(f"source image not found: {args.source_image}")
|
27 |
+
if not osp.exists(args.driving_info):
|
28 |
+
raise FileNotFoundError(f"driving info not found: {args.driving_info}")
|
29 |
+
|
30 |
+
|
31 |
+
def main():
|
32 |
+
# set tyro theme
|
33 |
+
tyro.extras.set_accent_color("bright_cyan")
|
34 |
+
args = tyro.cli(ArgumentConfig)
|
35 |
+
|
36 |
+
if not fast_check_ffmpeg():
|
37 |
+
raise ImportError(
|
38 |
+
"FFmpeg is not installed. Please install FFmpeg before running this script. https://ffmpeg.org/download.html"
|
39 |
+
)
|
40 |
+
|
41 |
+
# fast check the args
|
42 |
+
fast_check_args(args)
|
43 |
+
|
44 |
+
# specify configs for inference
|
45 |
+
inference_cfg = partial_fields(InferenceConfig, args.__dict__) # use attribute of args to initial InferenceConfig
|
46 |
+
crop_cfg = partial_fields(CropConfig, args.__dict__) # use attribute of args to initial CropConfig
|
47 |
+
|
48 |
+
live_portrait_pipeline = LivePortraitPipeline(
|
49 |
+
inference_cfg=inference_cfg,
|
50 |
+
crop_cfg=crop_cfg
|
51 |
+
)
|
52 |
+
|
53 |
+
# run
|
54 |
+
live_portrait_pipeline.execute(args)
|
55 |
+
|
56 |
+
|
57 |
+
if __name__ == "__main__":
|
58 |
+
main()
|
pretrained_weights/.gitkeep
ADDED
File without changes
|
readme.md
ADDED
@@ -0,0 +1,206 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
<h1 align="center">LivePortrait: Efficient Portrait Animation with Stitching and Retargeting Control</h1>
|
2 |
+
|
3 |
+
<div align='center'>
|
4 |
+
<a href='https://github.com/cleardusk' target='_blank'><strong>Jianzhu Guo</strong></a><sup> 1†</sup> 
|
5 |
+
<a href='https://github.com/KwaiVGI' target='_blank'><strong>Dingyun Zhang</strong></a><sup> 1,2</sup> 
|
6 |
+
<a href='https://github.com/KwaiVGI' target='_blank'><strong>Xiaoqiang Liu</strong></a><sup> 1</sup> 
|
7 |
+
<a href='https://scholar.google.com/citations?user=t88nyvsAAAAJ&hl' target='_blank'><strong>Zhizhou Zhong</strong></a><sup> 1,3</sup> 
|
8 |
+
<a href='https://scholar.google.com.hk/citations?user=_8k1ubAAAAAJ' target='_blank'><strong>Yuan Zhang</strong></a><sup> 1</sup> 
|
9 |
+
</div>
|
10 |
+
|
11 |
+
<div align='center'>
|
12 |
+
<a href='https://scholar.google.com/citations?user=P6MraaYAAAAJ' target='_blank'><strong>Pengfei Wan</strong></a><sup> 1</sup> 
|
13 |
+
<a href='https://openreview.net/profile?id=~Di_ZHANG3' target='_blank'><strong>Di Zhang</strong></a><sup> 1</sup> 
|
14 |
+
</div>
|
15 |
+
|
16 |
+
<div align='center'>
|
17 |
+
<sup>1 </sup>Kuaishou Technology  <sup>2 </sup>University of Science and Technology of China  <sup>3 </sup>Fudan University 
|
18 |
+
</div>
|
19 |
+
|
20 |
+
<br>
|
21 |
+
<div align="center">
|
22 |
+
<!-- <a href='LICENSE'><img src='https://img.shields.io/badge/license-MIT-yellow'></a> -->
|
23 |
+
<a href='https://arxiv.org/pdf/2407.03168'><img src='https://img.shields.io/badge/arXiv-LivePortrait-red'></a>
|
24 |
+
<a href='https://liveportrait.github.io'><img src='https://img.shields.io/badge/Project-LivePortrait-green'></a>
|
25 |
+
<a href='https://huggingface.co/spaces/KwaiVGI/liveportrait'><img src='https://img.shields.io/badge/%F0%9F%A4%97%20Hugging%20Face-Spaces-blue'></a>
|
26 |
+
</div>
|
27 |
+
<br>
|
28 |
+
|
29 |
+
<p align="center">
|
30 |
+
<img src="./assets/docs/showcase2.gif" alt="showcase">
|
31 |
+
<br>
|
32 |
+
🔥 For more results, visit our <a href="https://liveportrait.github.io/"><strong>homepage</strong></a> 🔥
|
33 |
+
</p>
|
34 |
+
|
35 |
+
|
36 |
+
|
37 |
+
## 🔥 Updates
|
38 |
+
- **`2024/07/10`**: 💪 We support audio and video concatenating, driving video auto-cropping, and template making to protect privacy. More to see [here](assets/docs/changelog/2024-07-10.md).
|
39 |
+
- **`2024/07/09`**: 🤗 We released the [HuggingFace Space](https://huggingface.co/spaces/KwaiVGI/liveportrait), thanks to the HF team and [Gradio](https://github.com/gradio-app/gradio)!
|
40 |
+
- **`2024/07/04`**: 😊 We released the initial version of the inference code and models. Continuous updates, stay tuned!
|
41 |
+
- **`2024/07/04`**: 🔥 We released the [homepage](https://liveportrait.github.io) and technical report on [arXiv](https://arxiv.org/pdf/2407.03168).
|
42 |
+
|
43 |
+
|
44 |
+
|
45 |
+
## Introduction
|
46 |
+
This repo, named **LivePortrait**, contains the official PyTorch implementation of our paper [LivePortrait: Efficient Portrait Animation with Stitching and Retargeting Control](https://arxiv.org/pdf/2407.03168).
|
47 |
+
We are actively updating and improving this repository. If you find any bugs or have suggestions, welcome to raise issues or submit pull requests (PR) 💖.
|
48 |
+
|
49 |
+
## 🔥 Getting Started
|
50 |
+
### 1. Clone the code and prepare the environment
|
51 |
+
```bash
|
52 |
+
git clone https://github.com/KwaiVGI/LivePortrait
|
53 |
+
cd LivePortrait
|
54 |
+
|
55 |
+
# create env using conda
|
56 |
+
conda create -n LivePortrait python==3.9.18
|
57 |
+
conda activate LivePortrait
|
58 |
+
# install dependencies with pip
|
59 |
+
pip install -r requirements.txt
|
60 |
+
```
|
61 |
+
|
62 |
+
**Note:** make sure your system has [FFmpeg](https://ffmpeg.org/) installed!
|
63 |
+
|
64 |
+
### 2. Download pretrained weights
|
65 |
+
|
66 |
+
The easiest way to download the pretrained weights is from HuggingFace:
|
67 |
+
```bash
|
68 |
+
# first, ensure git-lfs is installed, see: https://docs.github.com/en/repositories/working-with-files/managing-large-files/installing-git-large-file-storage
|
69 |
+
git lfs install
|
70 |
+
# clone the weights
|
71 |
+
git clone https://huggingface.co/KwaiVGI/liveportrait pretrained_weights
|
72 |
+
```
|
73 |
+
|
74 |
+
Alternatively, you can download all pretrained weights from [Google Drive](https://drive.google.com/drive/folders/1UtKgzKjFAOmZkhNK-OYT0caJ_w2XAnib) or [Baidu Yun](https://pan.baidu.com/s/1MGctWmNla_vZxDbEp2Dtzw?pwd=z5cn). Unzip and place them in `./pretrained_weights`.
|
75 |
+
|
76 |
+
Ensuring the directory structure is as follows, or contains:
|
77 |
+
```text
|
78 |
+
pretrained_weights
|
79 |
+
├── insightface
|
80 |
+
│ └── models
|
81 |
+
│ └── buffalo_l
|
82 |
+
│ ├── 2d106det.onnx
|
83 |
+
│ └── det_10g.onnx
|
84 |
+
└── liveportrait
|
85 |
+
├── base_models
|
86 |
+
│ ├── appearance_feature_extractor.pth
|
87 |
+
│ ├── motion_extractor.pth
|
88 |
+
│ ├── spade_generator.pth
|
89 |
+
│ └── warping_module.pth
|
90 |
+
├── landmark.onnx
|
91 |
+
└── retargeting_models
|
92 |
+
└── stitching_retargeting_module.pth
|
93 |
+
```
|
94 |
+
|
95 |
+
### 3. Inference 🚀
|
96 |
+
|
97 |
+
#### Fast hands-on
|
98 |
+
```bash
|
99 |
+
python inference.py
|
100 |
+
```
|
101 |
+
|
102 |
+
If the script runs successfully, you will get an output mp4 file named `animations/s6--d0_concat.mp4`. This file includes the following results: driving video, input image, and generated result.
|
103 |
+
|
104 |
+
<p align="center">
|
105 |
+
<img src="./assets/docs/inference.gif" alt="image">
|
106 |
+
</p>
|
107 |
+
|
108 |
+
Or, you can change the input by specifying the `-s` and `-d` arguments:
|
109 |
+
|
110 |
+
```bash
|
111 |
+
python inference.py -s assets/examples/source/s9.jpg -d assets/examples/driving/d0.mp4
|
112 |
+
|
113 |
+
# disable pasting back to run faster
|
114 |
+
python inference.py -s assets/examples/source/s9.jpg -d assets/examples/driving/d0.mp4 --no_flag_pasteback
|
115 |
+
|
116 |
+
# more options to see
|
117 |
+
python inference.py -h
|
118 |
+
```
|
119 |
+
|
120 |
+
#### Driving video auto-cropping
|
121 |
+
|
122 |
+
📕 To use your own driving video, we **recommend**:
|
123 |
+
- Crop it to a **1:1** aspect ratio (e.g., 512x512 or 256x256 pixels), or enable auto-cropping by `--flag_crop_driving_video`.
|
124 |
+
- Focus on the head area, similar to the example videos.
|
125 |
+
- Minimize shoulder movement.
|
126 |
+
- Make sure the first frame of driving video is a frontal face with **neutral expression**.
|
127 |
+
|
128 |
+
Below is a auto-cropping case by `--flag_crop_driving_video`:
|
129 |
+
```bash
|
130 |
+
python inference.py -s assets/examples/source/s9.jpg -d assets/examples/driving/d13.mp4 --flag_crop_driving_video
|
131 |
+
```
|
132 |
+
|
133 |
+
If you find the results of auto-cropping is not well, you can modify the `--scale_crop_video`, `--vy_ratio_crop_video` options to adjust the scale and offset, or do it manually.
|
134 |
+
|
135 |
+
#### Motion template making
|
136 |
+
You can also use the auto-generated motion template files ending with `.pkl` to speed up inference, and **protect privacy**, such as:
|
137 |
+
```bash
|
138 |
+
python inference.py -s assets/examples/source/s9.jpg -d assets/examples/driving/d5.pkl
|
139 |
+
```
|
140 |
+
|
141 |
+
**Discover more interesting results on our [Homepage](https://liveportrait.github.io)** 😊
|
142 |
+
|
143 |
+
### 4. Gradio interface 🤗
|
144 |
+
|
145 |
+
We also provide a Gradio <a href='https://github.com/gradio-app/gradio'><img src='https://img.shields.io/github/stars/gradio-app/gradio'></a> interface for a better experience, just run by:
|
146 |
+
|
147 |
+
```bash
|
148 |
+
python app.py
|
149 |
+
```
|
150 |
+
|
151 |
+
You can specify the `--server_port`, `--share`, `--server_name` arguments to satisfy your needs!
|
152 |
+
|
153 |
+
🚀 We also provide an acceleration option `--flag_do_torch_compile`. The first-time inference triggers an optimization process (about one minute), making subsequent inferences 20-30% faster. Performance gains may vary with different CUDA versions.
|
154 |
+
```bash
|
155 |
+
# enable torch.compile for faster inference
|
156 |
+
python app.py --flag_do_torch_compile
|
157 |
+
```
|
158 |
+
**Note**: This method has not been fully tested. e.g., on Windows.
|
159 |
+
|
160 |
+
**Or, try it out effortlessly on [HuggingFace](https://huggingface.co/spaces/KwaiVGI/LivePortrait) 🤗**
|
161 |
+
|
162 |
+
### 5. Inference speed evaluation 🚀🚀🚀
|
163 |
+
We have also provided a script to evaluate the inference speed of each module:
|
164 |
+
|
165 |
+
```bash
|
166 |
+
python speed.py
|
167 |
+
```
|
168 |
+
|
169 |
+
Below are the results of inferring one frame on an RTX 4090 GPU using the native PyTorch framework with `torch.compile`:
|
170 |
+
|
171 |
+
| Model | Parameters(M) | Model Size(MB) | Inference(ms) |
|
172 |
+
|-----------------------------------|:-------------:|:--------------:|:-------------:|
|
173 |
+
| Appearance Feature Extractor | 0.84 | 3.3 | 0.82 |
|
174 |
+
| Motion Extractor | 28.12 | 108 | 0.84 |
|
175 |
+
| Spade Generator | 55.37 | 212 | 7.59 |
|
176 |
+
| Warping Module | 45.53 | 174 | 5.21 |
|
177 |
+
| Stitching and Retargeting Modules | 0.23 | 2.3 | 0.31 |
|
178 |
+
|
179 |
+
*Note: The values for the Stitching and Retargeting Modules represent the combined parameter counts and total inference time of three sequential MLP networks.*
|
180 |
+
|
181 |
+
## Community Resources 🤗
|
182 |
+
|
183 |
+
Discover the invaluable resources contributed by our community to enhance your LivePortrait experience:
|
184 |
+
|
185 |
+
- [ComfyUI-LivePortraitKJ](https://github.com/kijai/ComfyUI-LivePortraitKJ) by [@kijai](https://github.com/kijai)
|
186 |
+
- [comfyui-liveportrait](https://github.com/shadowcz007/comfyui-liveportrait) by [@shadowcz007](https://github.com/shadowcz007)
|
187 |
+
- [LivePortrait hands-on tutorial](https://www.youtube.com/watch?v=uyjSTAOY7yI) by [@AI Search](https://www.youtube.com/@theAIsearch)
|
188 |
+
- [ComfyUI tutorial](https://www.youtube.com/watch?v=8-IcDDmiUMM) by [@Sebastian Kamph](https://www.youtube.com/@sebastiankamph)
|
189 |
+
- [LivePortrait In ComfyUI](https://www.youtube.com/watch?v=aFcS31OWMjE) by [@Benji](https://www.youtube.com/@TheFutureThinker)
|
190 |
+
- [Replicate Playground](https://replicate.com/fofr/live-portrait) and [cog-comfyui](https://github.com/fofr/cog-comfyui) by [@fofr](https://github.com/fofr)
|
191 |
+
|
192 |
+
And many more amazing contributions from our community!
|
193 |
+
|
194 |
+
## Acknowledgements
|
195 |
+
We would like to thank the contributors of [FOMM](https://github.com/AliaksandrSiarohin/first-order-model), [Open Facevid2vid](https://github.com/zhanglonghao1992/One-Shot_Free-View_Neural_Talking_Head_Synthesis), [SPADE](https://github.com/NVlabs/SPADE), [InsightFace](https://github.com/deepinsight/insightface) repositories, for their open research and contributions.
|
196 |
+
|
197 |
+
## Citation 💖
|
198 |
+
If you find LivePortrait useful for your research, welcome to 🌟 this repo and cite our work using the following BibTeX:
|
199 |
+
```bibtex
|
200 |
+
@article{guo2024liveportrait,
|
201 |
+
title = {LivePortrait: Efficient Portrait Animation with Stitching and Retargeting Control},
|
202 |
+
author = {Guo, Jianzhu and Zhang, Dingyun and Liu, Xiaoqiang and Zhong, Zhizhou and Zhang, Yuan and Wan, Pengfei and Zhang, Di},
|
203 |
+
journal = {arXiv preprint arXiv:2407.03168},
|
204 |
+
year = {2024}
|
205 |
+
}
|
206 |
+
```
|
requirements.txt
ADDED
@@ -0,0 +1,22 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
--extra-index-url https://download.pytorch.org/whl/cu118
|
2 |
+
torch==2.3.0
|
3 |
+
torchvision==0.18.0
|
4 |
+
torchaudio==2.3.0
|
5 |
+
|
6 |
+
numpy==1.26.4
|
7 |
+
pyyaml==6.0.1
|
8 |
+
opencv-python==4.10.0.84
|
9 |
+
scipy==1.13.1
|
10 |
+
imageio==2.34.2
|
11 |
+
lmdb==1.4.1
|
12 |
+
tqdm==4.66.4
|
13 |
+
rich==13.7.1
|
14 |
+
ffmpeg-python==0.2.0
|
15 |
+
onnxruntime-gpu==1.18.0
|
16 |
+
onnx==1.16.1
|
17 |
+
scikit-image==0.24.0
|
18 |
+
albumentations==1.4.10
|
19 |
+
matplotlib==3.9.0
|
20 |
+
imageio-ffmpeg==0.5.1
|
21 |
+
tyro==0.8.5
|
22 |
+
gradio==4.37.1
|
speed.py
ADDED
@@ -0,0 +1,195 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# coding: utf-8
|
2 |
+
|
3 |
+
"""
|
4 |
+
Benchmark the inference speed of each module in LivePortrait.
|
5 |
+
|
6 |
+
TODO: heavy GPT style, need to refactor
|
7 |
+
"""
|
8 |
+
|
9 |
+
import torch
|
10 |
+
torch._dynamo.config.suppress_errors = True # Suppress errors and fall back to eager execution
|
11 |
+
|
12 |
+
import yaml
|
13 |
+
import time
|
14 |
+
import numpy as np
|
15 |
+
|
16 |
+
from src.utils.helper import load_model, concat_feat
|
17 |
+
from src.config.inference_config import InferenceConfig
|
18 |
+
|
19 |
+
|
20 |
+
def initialize_inputs(batch_size=1, device_id=0):
|
21 |
+
"""
|
22 |
+
Generate random input tensors and move them to GPU
|
23 |
+
"""
|
24 |
+
feature_3d = torch.randn(batch_size, 32, 16, 64, 64).to(device_id).half()
|
25 |
+
kp_source = torch.randn(batch_size, 21, 3).to(device_id).half()
|
26 |
+
kp_driving = torch.randn(batch_size, 21, 3).to(device_id).half()
|
27 |
+
source_image = torch.randn(batch_size, 3, 256, 256).to(device_id).half()
|
28 |
+
generator_input = torch.randn(batch_size, 256, 64, 64).to(device_id).half()
|
29 |
+
eye_close_ratio = torch.randn(batch_size, 3).to(device_id).half()
|
30 |
+
lip_close_ratio = torch.randn(batch_size, 2).to(device_id).half()
|
31 |
+
feat_stitching = concat_feat(kp_source, kp_driving).half()
|
32 |
+
feat_eye = concat_feat(kp_source, eye_close_ratio).half()
|
33 |
+
feat_lip = concat_feat(kp_source, lip_close_ratio).half()
|
34 |
+
|
35 |
+
inputs = {
|
36 |
+
'feature_3d': feature_3d,
|
37 |
+
'kp_source': kp_source,
|
38 |
+
'kp_driving': kp_driving,
|
39 |
+
'source_image': source_image,
|
40 |
+
'generator_input': generator_input,
|
41 |
+
'feat_stitching': feat_stitching,
|
42 |
+
'feat_eye': feat_eye,
|
43 |
+
'feat_lip': feat_lip
|
44 |
+
}
|
45 |
+
|
46 |
+
return inputs
|
47 |
+
|
48 |
+
|
49 |
+
def load_and_compile_models(cfg, model_config):
|
50 |
+
"""
|
51 |
+
Load and compile models for inference
|
52 |
+
"""
|
53 |
+
appearance_feature_extractor = load_model(cfg.checkpoint_F, model_config, cfg.device_id, 'appearance_feature_extractor')
|
54 |
+
motion_extractor = load_model(cfg.checkpoint_M, model_config, cfg.device_id, 'motion_extractor')
|
55 |
+
warping_module = load_model(cfg.checkpoint_W, model_config, cfg.device_id, 'warping_module')
|
56 |
+
spade_generator = load_model(cfg.checkpoint_G, model_config, cfg.device_id, 'spade_generator')
|
57 |
+
stitching_retargeting_module = load_model(cfg.checkpoint_S, model_config, cfg.device_id, 'stitching_retargeting_module')
|
58 |
+
|
59 |
+
models_with_params = [
|
60 |
+
('Appearance Feature Extractor', appearance_feature_extractor),
|
61 |
+
('Motion Extractor', motion_extractor),
|
62 |
+
('Warping Network', warping_module),
|
63 |
+
('SPADE Decoder', spade_generator)
|
64 |
+
]
|
65 |
+
|
66 |
+
compiled_models = {}
|
67 |
+
for name, model in models_with_params:
|
68 |
+
model = model.half()
|
69 |
+
model = torch.compile(model, mode='max-autotune') # Optimize for inference
|
70 |
+
model.eval() # Switch to evaluation mode
|
71 |
+
compiled_models[name] = model
|
72 |
+
|
73 |
+
retargeting_models = ['stitching', 'eye', 'lip']
|
74 |
+
for retarget in retargeting_models:
|
75 |
+
module = stitching_retargeting_module[retarget].half()
|
76 |
+
module = torch.compile(module, mode='max-autotune') # Optimize for inference
|
77 |
+
module.eval() # Switch to evaluation mode
|
78 |
+
stitching_retargeting_module[retarget] = module
|
79 |
+
|
80 |
+
return compiled_models, stitching_retargeting_module
|
81 |
+
|
82 |
+
|
83 |
+
def warm_up_models(compiled_models, stitching_retargeting_module, inputs):
|
84 |
+
"""
|
85 |
+
Warm up models to prepare them for benchmarking
|
86 |
+
"""
|
87 |
+
print("Warm up start!")
|
88 |
+
with torch.no_grad():
|
89 |
+
for _ in range(10):
|
90 |
+
compiled_models['Appearance Feature Extractor'](inputs['source_image'])
|
91 |
+
compiled_models['Motion Extractor'](inputs['source_image'])
|
92 |
+
compiled_models['Warping Network'](inputs['feature_3d'], inputs['kp_driving'], inputs['kp_source'])
|
93 |
+
compiled_models['SPADE Decoder'](inputs['generator_input']) # Adjust input as required
|
94 |
+
stitching_retargeting_module['stitching'](inputs['feat_stitching'])
|
95 |
+
stitching_retargeting_module['eye'](inputs['feat_eye'])
|
96 |
+
stitching_retargeting_module['lip'](inputs['feat_lip'])
|
97 |
+
print("Warm up end!")
|
98 |
+
|
99 |
+
|
100 |
+
def measure_inference_times(compiled_models, stitching_retargeting_module, inputs):
|
101 |
+
"""
|
102 |
+
Measure inference times for each model
|
103 |
+
"""
|
104 |
+
times = {name: [] for name in compiled_models.keys()}
|
105 |
+
times['Stitching and Retargeting Modules'] = []
|
106 |
+
|
107 |
+
overall_times = []
|
108 |
+
|
109 |
+
with torch.no_grad():
|
110 |
+
for _ in range(100):
|
111 |
+
torch.cuda.synchronize()
|
112 |
+
overall_start = time.time()
|
113 |
+
|
114 |
+
start = time.time()
|
115 |
+
compiled_models['Appearance Feature Extractor'](inputs['source_image'])
|
116 |
+
torch.cuda.synchronize()
|
117 |
+
times['Appearance Feature Extractor'].append(time.time() - start)
|
118 |
+
|
119 |
+
start = time.time()
|
120 |
+
compiled_models['Motion Extractor'](inputs['source_image'])
|
121 |
+
torch.cuda.synchronize()
|
122 |
+
times['Motion Extractor'].append(time.time() - start)
|
123 |
+
|
124 |
+
start = time.time()
|
125 |
+
compiled_models['Warping Network'](inputs['feature_3d'], inputs['kp_driving'], inputs['kp_source'])
|
126 |
+
torch.cuda.synchronize()
|
127 |
+
times['Warping Network'].append(time.time() - start)
|
128 |
+
|
129 |
+
start = time.time()
|
130 |
+
compiled_models['SPADE Decoder'](inputs['generator_input']) # Adjust input as required
|
131 |
+
torch.cuda.synchronize()
|
132 |
+
times['SPADE Decoder'].append(time.time() - start)
|
133 |
+
|
134 |
+
start = time.time()
|
135 |
+
stitching_retargeting_module['stitching'](inputs['feat_stitching'])
|
136 |
+
stitching_retargeting_module['eye'](inputs['feat_eye'])
|
137 |
+
stitching_retargeting_module['lip'](inputs['feat_lip'])
|
138 |
+
torch.cuda.synchronize()
|
139 |
+
times['Stitching and Retargeting Modules'].append(time.time() - start)
|
140 |
+
|
141 |
+
overall_times.append(time.time() - overall_start)
|
142 |
+
|
143 |
+
return times, overall_times
|
144 |
+
|
145 |
+
|
146 |
+
def print_benchmark_results(compiled_models, stitching_retargeting_module, retargeting_models, times, overall_times):
|
147 |
+
"""
|
148 |
+
Print benchmark results with average and standard deviation of inference times
|
149 |
+
"""
|
150 |
+
average_times = {name: np.mean(times[name]) * 1000 for name in times.keys()}
|
151 |
+
std_times = {name: np.std(times[name]) * 1000 for name in times.keys()}
|
152 |
+
|
153 |
+
for name, model in compiled_models.items():
|
154 |
+
num_params = sum(p.numel() for p in model.parameters())
|
155 |
+
num_params_in_millions = num_params / 1e6
|
156 |
+
print(f"Number of parameters for {name}: {num_params_in_millions:.2f} M")
|
157 |
+
|
158 |
+
for index, retarget in enumerate(retargeting_models):
|
159 |
+
num_params = sum(p.numel() for p in stitching_retargeting_module[retarget].parameters())
|
160 |
+
num_params_in_millions = num_params / 1e6
|
161 |
+
print(f"Number of parameters for part_{index} in Stitching and Retargeting Modules: {num_params_in_millions:.2f} M")
|
162 |
+
|
163 |
+
for name, avg_time in average_times.items():
|
164 |
+
std_time = std_times[name]
|
165 |
+
print(f"Average inference time for {name} over 100 runs: {avg_time:.2f} ms (std: {std_time:.2f} ms)")
|
166 |
+
|
167 |
+
|
168 |
+
def main():
|
169 |
+
"""
|
170 |
+
Main function to benchmark speed and model parameters
|
171 |
+
"""
|
172 |
+
# Load configuration
|
173 |
+
cfg = InferenceConfig()
|
174 |
+
model_config_path = cfg.models_config
|
175 |
+
with open(model_config_path, 'r') as file:
|
176 |
+
model_config = yaml.safe_load(file)
|
177 |
+
|
178 |
+
# Sample input tensors
|
179 |
+
inputs = initialize_inputs(device_id = cfg.device_id)
|
180 |
+
|
181 |
+
# Load and compile models
|
182 |
+
compiled_models, stitching_retargeting_module = load_and_compile_models(cfg, model_config)
|
183 |
+
|
184 |
+
# Warm up models
|
185 |
+
warm_up_models(compiled_models, stitching_retargeting_module, inputs)
|
186 |
+
|
187 |
+
# Measure inference times
|
188 |
+
times, overall_times = measure_inference_times(compiled_models, stitching_retargeting_module, inputs)
|
189 |
+
|
190 |
+
# Print benchmark results
|
191 |
+
print_benchmark_results(compiled_models, stitching_retargeting_module, ['stitching', 'eye', 'lip'], times, overall_times)
|
192 |
+
|
193 |
+
|
194 |
+
if __name__ == "__main__":
|
195 |
+
main()
|
src/config/__init__.py
ADDED
File without changes
|
src/config/argument_config.py
ADDED
@@ -0,0 +1,48 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# coding: utf-8
|
2 |
+
|
3 |
+
"""
|
4 |
+
All configs for user
|
5 |
+
"""
|
6 |
+
|
7 |
+
from dataclasses import dataclass
|
8 |
+
import tyro
|
9 |
+
from typing_extensions import Annotated
|
10 |
+
from typing import Optional
|
11 |
+
from .base_config import PrintableConfig, make_abs_path
|
12 |
+
|
13 |
+
|
14 |
+
@dataclass(repr=False) # use repr from PrintableConfig
|
15 |
+
class ArgumentConfig(PrintableConfig):
|
16 |
+
########## input arguments ##########
|
17 |
+
source_image: Annotated[str, tyro.conf.arg(aliases=["-s"])] = make_abs_path('../../assets/examples/source/s6.jpg') # path to the source portrait
|
18 |
+
driving_info: Annotated[str, tyro.conf.arg(aliases=["-d"])] = make_abs_path('../../assets/examples/driving/d0.mp4') # path to driving video or template (.pkl format)
|
19 |
+
output_dir: Annotated[str, tyro.conf.arg(aliases=["-o"])] = 'animations/' # directory to save output video
|
20 |
+
|
21 |
+
########## inference arguments ##########
|
22 |
+
flag_use_half_precision: bool = True # whether to use half precision (FP16). If black boxes appear, it might be due to GPU incompatibility; set to False.
|
23 |
+
flag_crop_driving_video: bool = False # whether to crop the driving video, if the given driving info is a video
|
24 |
+
device_id: int = 0 # gpu device id
|
25 |
+
flag_force_cpu: bool = False # force cpu inference, WIP!
|
26 |
+
flag_lip_zero: bool = True # whether let the lip to close state before animation, only take effect when flag_eye_retargeting and flag_lip_retargeting is False
|
27 |
+
flag_eye_retargeting: bool = False # not recommend to be True, WIP
|
28 |
+
flag_lip_retargeting: bool = False # not recommend to be True, WIP
|
29 |
+
flag_stitching: bool = True # recommend to True if head movement is small, False if head movement is large
|
30 |
+
flag_relative_motion: bool = True # whether to use relative motion
|
31 |
+
flag_pasteback: bool = True # whether to paste-back/stitch the animated face cropping from the face-cropping space to the original image space
|
32 |
+
flag_do_crop: bool = True # whether to crop the source portrait to the face-cropping space
|
33 |
+
flag_do_rot: bool = True # whether to conduct the rotation when flag_do_crop is True
|
34 |
+
|
35 |
+
########## crop arguments ##########
|
36 |
+
scale: float = 2.3 # the ratio of face area is smaller if scale is larger
|
37 |
+
vx_ratio: float = 0 # the ratio to move the face to left or right in cropping space
|
38 |
+
vy_ratio: float = -0.125 # the ratio to move the face to up or down in cropping space
|
39 |
+
|
40 |
+
scale_crop_video: float = 2.2 # scale factor for cropping video
|
41 |
+
vx_ratio_crop_video: float = 0. # adjust y offset
|
42 |
+
vy_ratio_crop_video: float = -0.1 # adjust x offset
|
43 |
+
|
44 |
+
########## gradio arguments ##########
|
45 |
+
server_port: Annotated[int, tyro.conf.arg(aliases=["-p"])] = 8890 # port for gradio server
|
46 |
+
share: bool = False # whether to share the server to public
|
47 |
+
server_name: Optional[str] = "127.0.0.1" # set the local server name, "0.0.0.0" to broadcast all
|
48 |
+
flag_do_torch_compile: bool = False # whether to use torch.compile to accelerate generation
|
src/config/base_config.py
ADDED
@@ -0,0 +1,29 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# coding: utf-8
|
2 |
+
|
3 |
+
"""
|
4 |
+
pretty printing class
|
5 |
+
"""
|
6 |
+
|
7 |
+
from __future__ import annotations
|
8 |
+
import os.path as osp
|
9 |
+
from typing import Tuple
|
10 |
+
|
11 |
+
|
12 |
+
def make_abs_path(fn):
|
13 |
+
return osp.join(osp.dirname(osp.realpath(__file__)), fn)
|
14 |
+
|
15 |
+
|
16 |
+
class PrintableConfig: # pylint: disable=too-few-public-methods
|
17 |
+
"""Printable Config defining str function"""
|
18 |
+
|
19 |
+
def __repr__(self):
|
20 |
+
lines = [self.__class__.__name__ + ":"]
|
21 |
+
for key, val in vars(self).items():
|
22 |
+
if isinstance(val, Tuple):
|
23 |
+
flattened_val = "["
|
24 |
+
for item in val:
|
25 |
+
flattened_val += str(item) + "\n"
|
26 |
+
flattened_val = flattened_val.rstrip("\n")
|
27 |
+
val = flattened_val + "]"
|
28 |
+
lines += f"{key}: {str(val)}".split("\n")
|
29 |
+
return "\n ".join(lines)
|
src/config/crop_config.py
ADDED
@@ -0,0 +1,29 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# coding: utf-8
|
2 |
+
|
3 |
+
"""
|
4 |
+
parameters used for crop faces
|
5 |
+
"""
|
6 |
+
|
7 |
+
from dataclasses import dataclass
|
8 |
+
|
9 |
+
from .base_config import PrintableConfig
|
10 |
+
|
11 |
+
|
12 |
+
@dataclass(repr=False) # use repr from PrintableConfig
|
13 |
+
class CropConfig(PrintableConfig):
|
14 |
+
insightface_root: str = "../../pretrained_weights/insightface"
|
15 |
+
landmark_ckpt_path: str = "../../pretrained_weights/liveportrait/landmark.onnx"
|
16 |
+
device_id: int = 0 # gpu device id
|
17 |
+
flag_force_cpu: bool = False # force cpu inference, WIP
|
18 |
+
########## source image cropping option ##########
|
19 |
+
dsize: int = 512 # crop size
|
20 |
+
scale: float = 2.5 # scale factor
|
21 |
+
vx_ratio: float = 0 # vx ratio
|
22 |
+
vy_ratio: float = -0.125 # vy ratio +up, -down
|
23 |
+
max_face_num: int = 0 # max face number, 0 mean no limit
|
24 |
+
|
25 |
+
########## driving video auto cropping option ##########
|
26 |
+
scale_crop_video: float = 2.2 # 2.0 # scale factor for cropping video
|
27 |
+
vx_ratio_crop_video: float = 0.0 # adjust y offset
|
28 |
+
vy_ratio_crop_video: float = -0.1 # adjust x offset
|
29 |
+
direction: str = "large-small" # direction of cropping
|
src/config/inference_config.py
ADDED
@@ -0,0 +1,52 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# coding: utf-8
|
2 |
+
|
3 |
+
"""
|
4 |
+
config dataclass used for inference
|
5 |
+
"""
|
6 |
+
|
7 |
+
import os.path as osp
|
8 |
+
import cv2
|
9 |
+
from numpy import ndarray
|
10 |
+
from dataclasses import dataclass
|
11 |
+
from typing import Literal, Tuple
|
12 |
+
from .base_config import PrintableConfig, make_abs_path
|
13 |
+
|
14 |
+
|
15 |
+
@dataclass(repr=False) # use repr from PrintableConfig
|
16 |
+
class InferenceConfig(PrintableConfig):
|
17 |
+
# MODEL CONFIG, NOT EXPORTED PARAMS
|
18 |
+
models_config: str = make_abs_path('./models.yaml') # portrait animation config
|
19 |
+
checkpoint_F: str = make_abs_path('../../pretrained_weights/liveportrait/base_models/appearance_feature_extractor.pth') # path to checkpoint of F
|
20 |
+
checkpoint_M: str = make_abs_path('../../pretrained_weights/liveportrait/base_models/motion_extractor.pth') # path to checkpoint pf M
|
21 |
+
checkpoint_G: str = make_abs_path('../../pretrained_weights/liveportrait/base_models/spade_generator.pth') # path to checkpoint of G
|
22 |
+
checkpoint_W: str = make_abs_path('../../pretrained_weights/liveportrait/base_models/warping_module.pth') # path to checkpoint of W
|
23 |
+
checkpoint_S: str = make_abs_path('../../pretrained_weights/liveportrait/retargeting_models/stitching_retargeting_module.pth') # path to checkpoint to S and R_eyes, R_lip
|
24 |
+
|
25 |
+
# EXPORTED PARAMS
|
26 |
+
flag_use_half_precision: bool = True
|
27 |
+
flag_crop_driving_video: bool = False
|
28 |
+
device_id: int = 0
|
29 |
+
flag_lip_zero: bool = True
|
30 |
+
flag_eye_retargeting: bool = False
|
31 |
+
flag_lip_retargeting: bool = False
|
32 |
+
flag_stitching: bool = True
|
33 |
+
flag_relative_motion: bool = True
|
34 |
+
flag_pasteback: bool = True
|
35 |
+
flag_do_crop: bool = True
|
36 |
+
flag_do_rot: bool = True
|
37 |
+
flag_force_cpu: bool = False
|
38 |
+
flag_do_torch_compile: bool = False
|
39 |
+
|
40 |
+
# NOT EXPORTED PARAMS
|
41 |
+
lip_zero_threshold: float = 0.03 # threshold for flag_lip_zero
|
42 |
+
anchor_frame: int = 0 # TO IMPLEMENT
|
43 |
+
|
44 |
+
input_shape: Tuple[int, int] = (256, 256) # input shape
|
45 |
+
output_format: Literal['mp4', 'gif'] = 'mp4' # output video format
|
46 |
+
crf: int = 15 # crf for output video
|
47 |
+
output_fps: int = 25 # default output fps
|
48 |
+
|
49 |
+
mask_crop: ndarray = cv2.imread(make_abs_path('../utils/resources/mask_template.png'), cv2.IMREAD_COLOR)
|
50 |
+
size_gif: int = 256 # default gif size, TO IMPLEMENT
|
51 |
+
source_max_dim: int = 1280 # the max dim of height and width of source image
|
52 |
+
source_division: int = 2 # make sure the height and width of source image can be divided by this number
|
src/config/models.yaml
ADDED
@@ -0,0 +1,43 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
model_params:
|
2 |
+
appearance_feature_extractor_params: # the F in the paper
|
3 |
+
image_channel: 3
|
4 |
+
block_expansion: 64
|
5 |
+
num_down_blocks: 2
|
6 |
+
max_features: 512
|
7 |
+
reshape_channel: 32
|
8 |
+
reshape_depth: 16
|
9 |
+
num_resblocks: 6
|
10 |
+
motion_extractor_params: # the M in the paper
|
11 |
+
num_kp: 21
|
12 |
+
backbone: convnextv2_tiny
|
13 |
+
warping_module_params: # the W in the paper
|
14 |
+
num_kp: 21
|
15 |
+
block_expansion: 64
|
16 |
+
max_features: 512
|
17 |
+
num_down_blocks: 2
|
18 |
+
reshape_channel: 32
|
19 |
+
estimate_occlusion_map: True
|
20 |
+
dense_motion_params:
|
21 |
+
block_expansion: 32
|
22 |
+
max_features: 1024
|
23 |
+
num_blocks: 5
|
24 |
+
reshape_depth: 16
|
25 |
+
compress: 4
|
26 |
+
spade_generator_params: # the G in the paper
|
27 |
+
upscale: 2 # represents upsample factor 256x256 -> 512x512
|
28 |
+
block_expansion: 64
|
29 |
+
max_features: 512
|
30 |
+
num_down_blocks: 2
|
31 |
+
stitching_retargeting_module_params: # the S in the paper
|
32 |
+
stitching:
|
33 |
+
input_size: 126 # (21*3)*2
|
34 |
+
hidden_sizes: [128, 128, 64]
|
35 |
+
output_size: 65 # (21*3)+2(tx,ty)
|
36 |
+
lip:
|
37 |
+
input_size: 65 # (21*3)+2
|
38 |
+
hidden_sizes: [128, 128, 64]
|
39 |
+
output_size: 63 # (21*3)
|
40 |
+
eye:
|
41 |
+
input_size: 66 # (21*3)+3
|
42 |
+
hidden_sizes: [256, 256, 128, 128, 64]
|
43 |
+
output_size: 63 # (21*3)
|
src/gradio_pipeline.py
ADDED
@@ -0,0 +1,117 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# coding: utf-8
|
2 |
+
|
3 |
+
"""
|
4 |
+
Pipeline for gradio
|
5 |
+
"""
|
6 |
+
import gradio as gr
|
7 |
+
|
8 |
+
from .config.argument_config import ArgumentConfig
|
9 |
+
from .live_portrait_pipeline import LivePortraitPipeline
|
10 |
+
from .utils.io import load_img_online
|
11 |
+
from .utils.rprint import rlog as log
|
12 |
+
from .utils.crop import prepare_paste_back, paste_back
|
13 |
+
from .utils.camera import get_rotation_matrix
|
14 |
+
|
15 |
+
|
16 |
+
def update_args(args, user_args):
|
17 |
+
"""update the args according to user inputs
|
18 |
+
"""
|
19 |
+
for k, v in user_args.items():
|
20 |
+
if hasattr(args, k):
|
21 |
+
setattr(args, k, v)
|
22 |
+
return args
|
23 |
+
|
24 |
+
|
25 |
+
class GradioPipeline(LivePortraitPipeline):
|
26 |
+
|
27 |
+
def __init__(self, inference_cfg, crop_cfg, args: ArgumentConfig):
|
28 |
+
super().__init__(inference_cfg, crop_cfg)
|
29 |
+
# self.live_portrait_wrapper = self.live_portrait_wrapper
|
30 |
+
self.args = args
|
31 |
+
|
32 |
+
def execute_video(
|
33 |
+
self,
|
34 |
+
input_image_path,
|
35 |
+
input_video_path,
|
36 |
+
flag_relative_input,
|
37 |
+
flag_do_crop_input,
|
38 |
+
flag_remap_input,
|
39 |
+
flag_crop_driving_video_input
|
40 |
+
):
|
41 |
+
""" for video driven potrait animation
|
42 |
+
"""
|
43 |
+
if input_image_path is not None and input_video_path is not None:
|
44 |
+
args_user = {
|
45 |
+
'source_image': input_image_path,
|
46 |
+
'driving_info': input_video_path,
|
47 |
+
'flag_relative': flag_relative_input,
|
48 |
+
'flag_do_crop': flag_do_crop_input,
|
49 |
+
'flag_pasteback': flag_remap_input,
|
50 |
+
'flag_crop_driving_video': flag_crop_driving_video_input
|
51 |
+
}
|
52 |
+
# update config from user input
|
53 |
+
self.args = update_args(self.args, args_user)
|
54 |
+
self.live_portrait_wrapper.update_config(self.args.__dict__)
|
55 |
+
self.cropper.update_config(self.args.__dict__)
|
56 |
+
# video driven animation
|
57 |
+
video_path, video_path_concat = self.execute(self.args)
|
58 |
+
gr.Info("Run successfully!", duration=2)
|
59 |
+
return video_path, video_path_concat,
|
60 |
+
else:
|
61 |
+
raise gr.Error("The input source portrait or driving video hasn't been prepared yet 💥!", duration=5)
|
62 |
+
|
63 |
+
def execute_image(self, input_eye_ratio: float, input_lip_ratio: float, input_image, flag_do_crop=True):
|
64 |
+
""" for single image retargeting
|
65 |
+
"""
|
66 |
+
# disposable feature
|
67 |
+
f_s_user, x_s_user, source_lmk_user, crop_M_c2o, mask_ori, img_rgb = \
|
68 |
+
self.prepare_retargeting(input_image, flag_do_crop)
|
69 |
+
|
70 |
+
if input_eye_ratio is None or input_lip_ratio is None:
|
71 |
+
raise gr.Error("Invalid ratio input 💥!", duration=5)
|
72 |
+
else:
|
73 |
+
inference_cfg = self.live_portrait_wrapper.inference_cfg
|
74 |
+
x_s_user = x_s_user.to(self.live_portrait_wrapper.device)
|
75 |
+
f_s_user = f_s_user.to(self.live_portrait_wrapper.device)
|
76 |
+
# ∆_eyes,i = R_eyes(x_s; c_s,eyes, c_d,eyes,i)
|
77 |
+
combined_eye_ratio_tensor = self.live_portrait_wrapper.calc_combined_eye_ratio([[input_eye_ratio]], source_lmk_user)
|
78 |
+
eyes_delta = self.live_portrait_wrapper.retarget_eye(x_s_user, combined_eye_ratio_tensor)
|
79 |
+
# ∆_lip,i = R_lip(x_s; c_s,lip, c_d,lip,i)
|
80 |
+
combined_lip_ratio_tensor = self.live_portrait_wrapper.calc_combined_lip_ratio([[input_lip_ratio]], source_lmk_user)
|
81 |
+
lip_delta = self.live_portrait_wrapper.retarget_lip(x_s_user, combined_lip_ratio_tensor)
|
82 |
+
num_kp = x_s_user.shape[1]
|
83 |
+
# default: use x_s
|
84 |
+
x_d_new = x_s_user + eyes_delta.reshape(-1, num_kp, 3) + lip_delta.reshape(-1, num_kp, 3)
|
85 |
+
# D(W(f_s; x_s, x′_d))
|
86 |
+
out = self.live_portrait_wrapper.warp_decode(f_s_user, x_s_user, x_d_new)
|
87 |
+
out = self.live_portrait_wrapper.parse_output(out['out'])[0]
|
88 |
+
out_to_ori_blend = paste_back(out, crop_M_c2o, img_rgb, mask_ori)
|
89 |
+
gr.Info("Run successfully!", duration=2)
|
90 |
+
return out, out_to_ori_blend
|
91 |
+
|
92 |
+
def prepare_retargeting(self, input_image, flag_do_crop=True):
|
93 |
+
""" for single image retargeting
|
94 |
+
"""
|
95 |
+
if input_image is not None:
|
96 |
+
# gr.Info("Upload successfully!", duration=2)
|
97 |
+
inference_cfg = self.live_portrait_wrapper.inference_cfg
|
98 |
+
######## process source portrait ########
|
99 |
+
img_rgb = load_img_online(input_image, mode='rgb', max_dim=1280, n=16)
|
100 |
+
log(f"Load source image from {input_image}.")
|
101 |
+
crop_info = self.cropper.crop_source_image(img_rgb, self.cropper.crop_cfg)
|
102 |
+
if flag_do_crop:
|
103 |
+
I_s = self.live_portrait_wrapper.prepare_source(crop_info['img_crop_256x256'])
|
104 |
+
else:
|
105 |
+
I_s = self.live_portrait_wrapper.prepare_source(img_rgb)
|
106 |
+
x_s_info = self.live_portrait_wrapper.get_kp_info(I_s)
|
107 |
+
R_s = get_rotation_matrix(x_s_info['pitch'], x_s_info['yaw'], x_s_info['roll'])
|
108 |
+
############################################
|
109 |
+
f_s_user = self.live_portrait_wrapper.extract_feature_3d(I_s)
|
110 |
+
x_s_user = self.live_portrait_wrapper.transform_keypoint(x_s_info)
|
111 |
+
source_lmk_user = crop_info['lmk_crop']
|
112 |
+
crop_M_c2o = crop_info['M_c2o']
|
113 |
+
mask_ori = prepare_paste_back(inference_cfg.mask_crop, crop_info['M_c2o'], dsize=(img_rgb.shape[1], img_rgb.shape[0]))
|
114 |
+
return f_s_user, x_s_user, source_lmk_user, crop_M_c2o, mask_ori, img_rgb
|
115 |
+
else:
|
116 |
+
# when press the clear button, go here
|
117 |
+
raise gr.Error("The retargeting input hasn't been prepared yet 💥!", duration=5)
|
src/live_portrait_pipeline.py
ADDED
@@ -0,0 +1,285 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# coding: utf-8
|
2 |
+
|
3 |
+
"""
|
4 |
+
Pipeline of LivePortrait
|
5 |
+
"""
|
6 |
+
|
7 |
+
import torch
|
8 |
+
torch.backends.cudnn.benchmark = True # disable CUDNN_BACKEND_EXECUTION_PLAN_DESCRIPTOR warning
|
9 |
+
|
10 |
+
import cv2; cv2.setNumThreads(0); cv2.ocl.setUseOpenCL(False)
|
11 |
+
import numpy as np
|
12 |
+
import os
|
13 |
+
import os.path as osp
|
14 |
+
from rich.progress import track
|
15 |
+
|
16 |
+
from .config.argument_config import ArgumentConfig
|
17 |
+
from .config.inference_config import InferenceConfig
|
18 |
+
from .config.crop_config import CropConfig
|
19 |
+
from .utils.cropper import Cropper
|
20 |
+
from .utils.camera import get_rotation_matrix
|
21 |
+
from .utils.video import images2video, concat_frames, get_fps, add_audio_to_video, has_audio_stream
|
22 |
+
from .utils.crop import _transform_img, prepare_paste_back, paste_back
|
23 |
+
from .utils.io import load_image_rgb, load_driving_info, resize_to_limit, dump, load
|
24 |
+
from .utils.helper import mkdir, basename, dct2device, is_video, is_template, remove_suffix
|
25 |
+
from .utils.rprint import rlog as log
|
26 |
+
# from .utils.viz import viz_lmk
|
27 |
+
from .live_portrait_wrapper import LivePortraitWrapper
|
28 |
+
|
29 |
+
|
30 |
+
def make_abs_path(fn):
|
31 |
+
return osp.join(osp.dirname(osp.realpath(__file__)), fn)
|
32 |
+
|
33 |
+
|
34 |
+
class LivePortraitPipeline(object):
|
35 |
+
|
36 |
+
def __init__(self, inference_cfg: InferenceConfig, crop_cfg: CropConfig):
|
37 |
+
self.live_portrait_wrapper: LivePortraitWrapper = LivePortraitWrapper(inference_cfg=inference_cfg)
|
38 |
+
self.cropper: Cropper = Cropper(crop_cfg=crop_cfg)
|
39 |
+
|
40 |
+
def execute(self, args: ArgumentConfig):
|
41 |
+
# for convenience
|
42 |
+
inf_cfg = self.live_portrait_wrapper.inference_cfg
|
43 |
+
device = self.live_portrait_wrapper.device
|
44 |
+
crop_cfg = self.cropper.crop_cfg
|
45 |
+
|
46 |
+
######## process source portrait ########
|
47 |
+
img_rgb = load_image_rgb(args.source_image)
|
48 |
+
img_rgb = resize_to_limit(img_rgb, inf_cfg.source_max_dim, inf_cfg.source_division)
|
49 |
+
log(f"Load source image from {args.source_image}")
|
50 |
+
|
51 |
+
crop_info = self.cropper.crop_source_image(img_rgb, crop_cfg)
|
52 |
+
if crop_info is None:
|
53 |
+
raise Exception("No face detected in the source image!")
|
54 |
+
source_lmk = crop_info['lmk_crop']
|
55 |
+
img_crop, img_crop_256x256 = crop_info['img_crop'], crop_info['img_crop_256x256']
|
56 |
+
|
57 |
+
if inf_cfg.flag_do_crop:
|
58 |
+
I_s = self.live_portrait_wrapper.prepare_source(img_crop_256x256)
|
59 |
+
else:
|
60 |
+
img_crop_256x256 = cv2.resize(img_rgb, (256, 256)) # force to resize to 256x256
|
61 |
+
I_s = self.live_portrait_wrapper.prepare_source(img_crop_256x256)
|
62 |
+
x_s_info = self.live_portrait_wrapper.get_kp_info(I_s)
|
63 |
+
x_c_s = x_s_info['kp']
|
64 |
+
R_s = get_rotation_matrix(x_s_info['pitch'], x_s_info['yaw'], x_s_info['roll'])
|
65 |
+
f_s = self.live_portrait_wrapper.extract_feature_3d(I_s)
|
66 |
+
x_s = self.live_portrait_wrapper.transform_keypoint(x_s_info)
|
67 |
+
|
68 |
+
flag_lip_zero = inf_cfg.flag_lip_zero # not overwrite
|
69 |
+
if flag_lip_zero:
|
70 |
+
# let lip-open scalar to be 0 at first
|
71 |
+
c_d_lip_before_animation = [0.]
|
72 |
+
combined_lip_ratio_tensor_before_animation = self.live_portrait_wrapper.calc_combined_lip_ratio(c_d_lip_before_animation, source_lmk)
|
73 |
+
if combined_lip_ratio_tensor_before_animation[0][0] < inf_cfg.lip_zero_threshold:
|
74 |
+
flag_lip_zero = False
|
75 |
+
else:
|
76 |
+
lip_delta_before_animation = self.live_portrait_wrapper.retarget_lip(x_s, combined_lip_ratio_tensor_before_animation)
|
77 |
+
############################################
|
78 |
+
|
79 |
+
######## process driving info ########
|
80 |
+
flag_load_from_template = is_template(args.driving_info)
|
81 |
+
driving_rgb_crop_256x256_lst = None
|
82 |
+
wfp_template = None
|
83 |
+
|
84 |
+
if flag_load_from_template:
|
85 |
+
# NOTE: load from template, it is fast, but the cropping video is None
|
86 |
+
log(f"Load from template: {args.driving_info}, NOT the video, so the cropping video and audio are both NULL.", style='bold green')
|
87 |
+
template_dct = load(args.driving_info)
|
88 |
+
n_frames = template_dct['n_frames']
|
89 |
+
|
90 |
+
# set output_fps
|
91 |
+
output_fps = template_dct.get('output_fps', inf_cfg.output_fps)
|
92 |
+
log(f'The FPS of template: {output_fps}')
|
93 |
+
|
94 |
+
if args.flag_crop_driving_video:
|
95 |
+
log("Warning: flag_crop_driving_video is True, but the driving info is a template, so it is ignored.")
|
96 |
+
|
97 |
+
elif osp.exists(args.driving_info) and is_video(args.driving_info):
|
98 |
+
# load from video file, AND make motion template
|
99 |
+
log(f"Load video: {args.driving_info}")
|
100 |
+
if osp.isdir(args.driving_info):
|
101 |
+
output_fps = inf_cfg.output_fps
|
102 |
+
else:
|
103 |
+
output_fps = int(get_fps(args.driving_info))
|
104 |
+
log(f'The FPS of {args.driving_info} is: {output_fps}')
|
105 |
+
|
106 |
+
log(f"Load video file (mp4 mov avi etc...): {args.driving_info}")
|
107 |
+
driving_rgb_lst = load_driving_info(args.driving_info)
|
108 |
+
|
109 |
+
######## make motion template ########
|
110 |
+
log("Start making motion template...")
|
111 |
+
if inf_cfg.flag_crop_driving_video:
|
112 |
+
ret = self.cropper.crop_driving_video(driving_rgb_lst)
|
113 |
+
log(f'Driving video is cropped, {len(ret["frame_crop_lst"])} frames are processed.')
|
114 |
+
driving_rgb_crop_lst, driving_lmk_crop_lst = ret['frame_crop_lst'], ret['lmk_crop_lst']
|
115 |
+
driving_rgb_crop_256x256_lst = [cv2.resize(_, (256, 256)) for _ in driving_rgb_crop_lst]
|
116 |
+
else:
|
117 |
+
driving_lmk_crop_lst = self.cropper.calc_lmks_from_cropped_video(driving_rgb_lst)
|
118 |
+
driving_rgb_crop_256x256_lst = [cv2.resize(_, (256, 256)) for _ in driving_rgb_lst] # force to resize to 256x256
|
119 |
+
|
120 |
+
c_d_eyes_lst, c_d_lip_lst = self.live_portrait_wrapper.calc_driving_ratio(driving_lmk_crop_lst)
|
121 |
+
# save the motion template
|
122 |
+
I_d_lst = self.live_portrait_wrapper.prepare_driving_videos(driving_rgb_crop_256x256_lst)
|
123 |
+
template_dct = self.make_motion_template(I_d_lst, c_d_eyes_lst, c_d_lip_lst, output_fps=output_fps)
|
124 |
+
|
125 |
+
wfp_template = remove_suffix(args.driving_info) + '.pkl'
|
126 |
+
dump(wfp_template, template_dct)
|
127 |
+
log(f"Dump motion template to {wfp_template}")
|
128 |
+
|
129 |
+
n_frames = I_d_lst.shape[0]
|
130 |
+
else:
|
131 |
+
raise Exception(f"{args.driving_info} not exists or unsupported driving info types!")
|
132 |
+
#########################################
|
133 |
+
|
134 |
+
######## prepare for pasteback ########
|
135 |
+
I_p_pstbk_lst = None
|
136 |
+
if inf_cfg.flag_pasteback and inf_cfg.flag_do_crop and inf_cfg.flag_stitching:
|
137 |
+
mask_ori_float = prepare_paste_back(inf_cfg.mask_crop, crop_info['M_c2o'], dsize=(img_rgb.shape[1], img_rgb.shape[0]))
|
138 |
+
I_p_pstbk_lst = []
|
139 |
+
log("Prepared pasteback mask done.")
|
140 |
+
#########################################
|
141 |
+
|
142 |
+
I_p_lst = []
|
143 |
+
R_d_0, x_d_0_info = None, None
|
144 |
+
|
145 |
+
for i in track(range(n_frames), description='🚀Animating...', total=n_frames):
|
146 |
+
x_d_i_info = template_dct['motion'][i]
|
147 |
+
x_d_i_info = dct2device(x_d_i_info, device)
|
148 |
+
R_d_i = x_d_i_info['R_d']
|
149 |
+
|
150 |
+
if i == 0:
|
151 |
+
R_d_0 = R_d_i
|
152 |
+
x_d_0_info = x_d_i_info
|
153 |
+
|
154 |
+
if inf_cfg.flag_relative_motion:
|
155 |
+
R_new = (R_d_i @ R_d_0.permute(0, 2, 1)) @ R_s
|
156 |
+
delta_new = x_s_info['exp'] + (x_d_i_info['exp'] - x_d_0_info['exp'])
|
157 |
+
scale_new = x_s_info['scale'] * (x_d_i_info['scale'] / x_d_0_info['scale'])
|
158 |
+
t_new = x_s_info['t'] + (x_d_i_info['t'] - x_d_0_info['t'])
|
159 |
+
else:
|
160 |
+
R_new = R_d_i
|
161 |
+
delta_new = x_d_i_info['exp']
|
162 |
+
scale_new = x_s_info['scale']
|
163 |
+
t_new = x_d_i_info['t']
|
164 |
+
|
165 |
+
t_new[..., 2].fill_(0) # zero tz
|
166 |
+
x_d_i_new = scale_new * (x_c_s @ R_new + delta_new) + t_new
|
167 |
+
|
168 |
+
# Algorithm 1:
|
169 |
+
if not inf_cfg.flag_stitching and not inf_cfg.flag_eye_retargeting and not inf_cfg.flag_lip_retargeting:
|
170 |
+
# without stitching or retargeting
|
171 |
+
if flag_lip_zero:
|
172 |
+
x_d_i_new += lip_delta_before_animation.reshape(-1, x_s.shape[1], 3)
|
173 |
+
else:
|
174 |
+
pass
|
175 |
+
elif inf_cfg.flag_stitching and not inf_cfg.flag_eye_retargeting and not inf_cfg.flag_lip_retargeting:
|
176 |
+
# with stitching and without retargeting
|
177 |
+
if flag_lip_zero:
|
178 |
+
x_d_i_new = self.live_portrait_wrapper.stitching(x_s, x_d_i_new) + lip_delta_before_animation.reshape(-1, x_s.shape[1], 3)
|
179 |
+
else:
|
180 |
+
x_d_i_new = self.live_portrait_wrapper.stitching(x_s, x_d_i_new)
|
181 |
+
else:
|
182 |
+
eyes_delta, lip_delta = None, None
|
183 |
+
if inf_cfg.flag_eye_retargeting:
|
184 |
+
c_d_eyes_i = c_d_eyes_lst[i]
|
185 |
+
combined_eye_ratio_tensor = self.live_portrait_wrapper.calc_combined_eye_ratio(c_d_eyes_i, source_lmk)
|
186 |
+
# ∆_eyes,i = R_eyes(x_s; c_s,eyes, c_d,eyes,i)
|
187 |
+
eyes_delta = self.live_portrait_wrapper.retarget_eye(x_s, combined_eye_ratio_tensor)
|
188 |
+
if inf_cfg.flag_lip_retargeting:
|
189 |
+
c_d_lip_i = c_d_lip_lst[i]
|
190 |
+
combined_lip_ratio_tensor = self.live_portrait_wrapper.calc_combined_lip_ratio(c_d_lip_i, source_lmk)
|
191 |
+
# ∆_lip,i = R_lip(x_s; c_s,lip, c_d,lip,i)
|
192 |
+
lip_delta = self.live_portrait_wrapper.retarget_lip(x_s, combined_lip_ratio_tensor)
|
193 |
+
|
194 |
+
if inf_cfg.flag_relative_motion: # use x_s
|
195 |
+
x_d_i_new = x_s + \
|
196 |
+
(eyes_delta.reshape(-1, x_s.shape[1], 3) if eyes_delta is not None else 0) + \
|
197 |
+
(lip_delta.reshape(-1, x_s.shape[1], 3) if lip_delta is not None else 0)
|
198 |
+
else: # use x_d,i
|
199 |
+
x_d_i_new = x_d_i_new + \
|
200 |
+
(eyes_delta.reshape(-1, x_s.shape[1], 3) if eyes_delta is not None else 0) + \
|
201 |
+
(lip_delta.reshape(-1, x_s.shape[1], 3) if lip_delta is not None else 0)
|
202 |
+
|
203 |
+
if inf_cfg.flag_stitching:
|
204 |
+
x_d_i_new = self.live_portrait_wrapper.stitching(x_s, x_d_i_new)
|
205 |
+
|
206 |
+
out = self.live_portrait_wrapper.warp_decode(f_s, x_s, x_d_i_new)
|
207 |
+
I_p_i = self.live_portrait_wrapper.parse_output(out['out'])[0]
|
208 |
+
I_p_lst.append(I_p_i)
|
209 |
+
|
210 |
+
if inf_cfg.flag_pasteback and inf_cfg.flag_do_crop and inf_cfg.flag_stitching:
|
211 |
+
# TODO: pasteback is slow, considering optimize it using multi-threading or GPU
|
212 |
+
I_p_pstbk = paste_back(I_p_i, crop_info['M_c2o'], img_rgb, mask_ori_float)
|
213 |
+
I_p_pstbk_lst.append(I_p_pstbk)
|
214 |
+
|
215 |
+
mkdir(args.output_dir)
|
216 |
+
wfp_concat = None
|
217 |
+
flag_has_audio = (not flag_load_from_template) and has_audio_stream(args.driving_info)
|
218 |
+
|
219 |
+
######### build final concact result #########
|
220 |
+
# driving frame | source image | generation, or source image | generation
|
221 |
+
frames_concatenated = concat_frames(driving_rgb_crop_256x256_lst, img_crop_256x256, I_p_lst)
|
222 |
+
wfp_concat = osp.join(args.output_dir, f'{basename(args.source_image)}--{basename(args.driving_info)}_concat.mp4')
|
223 |
+
images2video(frames_concatenated, wfp=wfp_concat, fps=output_fps)
|
224 |
+
|
225 |
+
if flag_has_audio:
|
226 |
+
# final result with concact
|
227 |
+
wfp_concat_with_audio = osp.join(args.output_dir, f'{basename(args.source_image)}--{basename(args.driving_info)}_concat_with_audio.mp4')
|
228 |
+
add_audio_to_video(wfp_concat, args.driving_info, wfp_concat_with_audio)
|
229 |
+
os.replace(wfp_concat_with_audio, wfp_concat)
|
230 |
+
log(f"Replace {wfp_concat} with {wfp_concat_with_audio}")
|
231 |
+
|
232 |
+
# save drived result
|
233 |
+
wfp = osp.join(args.output_dir, f'{basename(args.source_image)}--{basename(args.driving_info)}.mp4')
|
234 |
+
if I_p_pstbk_lst is not None and len(I_p_pstbk_lst) > 0:
|
235 |
+
images2video(I_p_pstbk_lst, wfp=wfp, fps=output_fps)
|
236 |
+
else:
|
237 |
+
images2video(I_p_lst, wfp=wfp, fps=output_fps)
|
238 |
+
|
239 |
+
######### build final result #########
|
240 |
+
if flag_has_audio:
|
241 |
+
wfp_with_audio = osp.join(args.output_dir, f'{basename(args.source_image)}--{basename(args.driving_info)}_with_audio.mp4')
|
242 |
+
add_audio_to_video(wfp, args.driving_info, wfp_with_audio)
|
243 |
+
os.replace(wfp_with_audio, wfp)
|
244 |
+
log(f"Replace {wfp} with {wfp_with_audio}")
|
245 |
+
|
246 |
+
# final log
|
247 |
+
if wfp_template not in (None, ''):
|
248 |
+
log(f'Animated template: {wfp_template}, you can specify `-d` argument with this template path next time to avoid cropping video, motion making and protecting privacy.', style='bold green')
|
249 |
+
log(f'Animated video: {wfp}')
|
250 |
+
log(f'Animated video with concact: {wfp_concat}')
|
251 |
+
|
252 |
+
return wfp, wfp_concat
|
253 |
+
|
254 |
+
def make_motion_template(self, I_d_lst, c_d_eyes_lst, c_d_lip_lst, **kwargs):
|
255 |
+
n_frames = I_d_lst.shape[0]
|
256 |
+
template_dct = {
|
257 |
+
'n_frames': n_frames,
|
258 |
+
'output_fps': kwargs.get('output_fps', 25),
|
259 |
+
'motion': [],
|
260 |
+
'c_d_eyes_lst': [],
|
261 |
+
'c_d_lip_lst': [],
|
262 |
+
}
|
263 |
+
|
264 |
+
for i in track(range(n_frames), description='Making motion templates...', total=n_frames):
|
265 |
+
# collect s_d, R_d, δ_d and t_d for inference
|
266 |
+
I_d_i = I_d_lst[i]
|
267 |
+
x_d_i_info = self.live_portrait_wrapper.get_kp_info(I_d_i)
|
268 |
+
R_d_i = get_rotation_matrix(x_d_i_info['pitch'], x_d_i_info['yaw'], x_d_i_info['roll'])
|
269 |
+
|
270 |
+
item_dct = {
|
271 |
+
'scale': x_d_i_info['scale'].cpu().numpy().astype(np.float32),
|
272 |
+
'R_d': R_d_i.cpu().numpy().astype(np.float32),
|
273 |
+
'exp': x_d_i_info['exp'].cpu().numpy().astype(np.float32),
|
274 |
+
't': x_d_i_info['t'].cpu().numpy().astype(np.float32),
|
275 |
+
}
|
276 |
+
|
277 |
+
template_dct['motion'].append(item_dct)
|
278 |
+
|
279 |
+
c_d_eyes = c_d_eyes_lst[i].astype(np.float32)
|
280 |
+
template_dct['c_d_eyes_lst'].append(c_d_eyes)
|
281 |
+
|
282 |
+
c_d_lip = c_d_lip_lst[i].astype(np.float32)
|
283 |
+
template_dct['c_d_lip_lst'].append(c_d_lip)
|
284 |
+
|
285 |
+
return template_dct
|
src/live_portrait_wrapper.py
ADDED
@@ -0,0 +1,319 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# coding: utf-8
|
2 |
+
|
3 |
+
"""
|
4 |
+
Wrapper for LivePortrait core functions
|
5 |
+
"""
|
6 |
+
|
7 |
+
import os.path as osp
|
8 |
+
import numpy as np
|
9 |
+
import cv2
|
10 |
+
import torch
|
11 |
+
import yaml
|
12 |
+
|
13 |
+
from .utils.timer import Timer
|
14 |
+
from .utils.helper import load_model, concat_feat
|
15 |
+
from .utils.camera import headpose_pred_to_degree, get_rotation_matrix
|
16 |
+
from .utils.retargeting_utils import calc_eye_close_ratio, calc_lip_close_ratio
|
17 |
+
from .config.inference_config import InferenceConfig
|
18 |
+
from .utils.rprint import rlog as log
|
19 |
+
|
20 |
+
|
21 |
+
class LivePortraitWrapper(object):
|
22 |
+
|
23 |
+
def __init__(self, inference_cfg: InferenceConfig):
|
24 |
+
|
25 |
+
self.inference_cfg = inference_cfg
|
26 |
+
self.device_id = inference_cfg.device_id
|
27 |
+
self.compile = inference_cfg.flag_do_torch_compile
|
28 |
+
if inference_cfg.flag_force_cpu:
|
29 |
+
self.device = 'cpu'
|
30 |
+
else:
|
31 |
+
self.device = 'cuda:' + str(self.device_id)
|
32 |
+
|
33 |
+
model_config = yaml.load(open(inference_cfg.models_config, 'r'), Loader=yaml.SafeLoader)
|
34 |
+
# init F
|
35 |
+
self.appearance_feature_extractor = load_model(inference_cfg.checkpoint_F, model_config, self.device, 'appearance_feature_extractor')
|
36 |
+
log(f'Load appearance_feature_extractor done.')
|
37 |
+
# init M
|
38 |
+
self.motion_extractor = load_model(inference_cfg.checkpoint_M, model_config, self.device, 'motion_extractor')
|
39 |
+
log(f'Load motion_extractor done.')
|
40 |
+
# init W
|
41 |
+
self.warping_module = load_model(inference_cfg.checkpoint_W, model_config, self.device, 'warping_module')
|
42 |
+
log(f'Load warping_module done.')
|
43 |
+
# init G
|
44 |
+
self.spade_generator = load_model(inference_cfg.checkpoint_G, model_config, self.device, 'spade_generator')
|
45 |
+
log(f'Load spade_generator done.')
|
46 |
+
# init S and R
|
47 |
+
if inference_cfg.checkpoint_S is not None and osp.exists(inference_cfg.checkpoint_S):
|
48 |
+
self.stitching_retargeting_module = load_model(inference_cfg.checkpoint_S, model_config, self.device, 'stitching_retargeting_module')
|
49 |
+
log(f'Load stitching_retargeting_module done.')
|
50 |
+
else:
|
51 |
+
self.stitching_retargeting_module = None
|
52 |
+
# Optimize for inference
|
53 |
+
if self.compile:
|
54 |
+
torch._dynamo.config.suppress_errors = True # Suppress errors and fall back to eager execution
|
55 |
+
self.warping_module = torch.compile(self.warping_module, mode='max-autotune')
|
56 |
+
self.spade_generator = torch.compile(self.spade_generator, mode='max-autotune')
|
57 |
+
|
58 |
+
self.timer = Timer()
|
59 |
+
|
60 |
+
def update_config(self, user_args):
|
61 |
+
for k, v in user_args.items():
|
62 |
+
if hasattr(self.inference_cfg, k):
|
63 |
+
setattr(self.inference_cfg, k, v)
|
64 |
+
|
65 |
+
def prepare_source(self, img: np.ndarray) -> torch.Tensor:
|
66 |
+
""" construct the input as standard
|
67 |
+
img: HxWx3, uint8, 256x256
|
68 |
+
"""
|
69 |
+
h, w = img.shape[:2]
|
70 |
+
if h != self.inference_cfg.input_shape[0] or w != self.inference_cfg.input_shape[1]:
|
71 |
+
x = cv2.resize(img, (self.inference_cfg.input_shape[0], self.inference_cfg.input_shape[1]))
|
72 |
+
else:
|
73 |
+
x = img.copy()
|
74 |
+
|
75 |
+
if x.ndim == 3:
|
76 |
+
x = x[np.newaxis].astype(np.float32) / 255. # HxWx3 -> 1xHxWx3, normalized to 0~1
|
77 |
+
elif x.ndim == 4:
|
78 |
+
x = x.astype(np.float32) / 255. # BxHxWx3, normalized to 0~1
|
79 |
+
else:
|
80 |
+
raise ValueError(f'img ndim should be 3 or 4: {x.ndim}')
|
81 |
+
x = np.clip(x, 0, 1) # clip to 0~1
|
82 |
+
x = torch.from_numpy(x).permute(0, 3, 1, 2) # 1xHxWx3 -> 1x3xHxW
|
83 |
+
x = x.to(self.device)
|
84 |
+
return x
|
85 |
+
|
86 |
+
def prepare_driving_videos(self, imgs) -> torch.Tensor:
|
87 |
+
""" construct the input as standard
|
88 |
+
imgs: NxBxHxWx3, uint8
|
89 |
+
"""
|
90 |
+
if isinstance(imgs, list):
|
91 |
+
_imgs = np.array(imgs)[..., np.newaxis] # TxHxWx3x1
|
92 |
+
elif isinstance(imgs, np.ndarray):
|
93 |
+
_imgs = imgs
|
94 |
+
else:
|
95 |
+
raise ValueError(f'imgs type error: {type(imgs)}')
|
96 |
+
|
97 |
+
y = _imgs.astype(np.float32) / 255.
|
98 |
+
y = np.clip(y, 0, 1) # clip to 0~1
|
99 |
+
y = torch.from_numpy(y).permute(0, 4, 3, 1, 2) # TxHxWx3x1 -> Tx1x3xHxW
|
100 |
+
y = y.to(self.device)
|
101 |
+
|
102 |
+
return y
|
103 |
+
|
104 |
+
def extract_feature_3d(self, x: torch.Tensor) -> torch.Tensor:
|
105 |
+
""" get the appearance feature of the image by F
|
106 |
+
x: Bx3xHxW, normalized to 0~1
|
107 |
+
"""
|
108 |
+
with torch.no_grad():
|
109 |
+
with torch.autocast(device_type=self.device[:4], dtype=torch.float16, enabled=self.inference_cfg.flag_use_half_precision):
|
110 |
+
feature_3d = self.appearance_feature_extractor(x)
|
111 |
+
|
112 |
+
return feature_3d.float()
|
113 |
+
|
114 |
+
def get_kp_info(self, x: torch.Tensor, **kwargs) -> dict:
|
115 |
+
""" get the implicit keypoint information
|
116 |
+
x: Bx3xHxW, normalized to 0~1
|
117 |
+
flag_refine_info: whether to trandform the pose to degrees and the dimention of the reshape
|
118 |
+
return: A dict contains keys: 'pitch', 'yaw', 'roll', 't', 'exp', 'scale', 'kp'
|
119 |
+
"""
|
120 |
+
with torch.no_grad():
|
121 |
+
with torch.autocast(device_type=self.device[:4], dtype=torch.float16, enabled=self.inference_cfg.flag_use_half_precision):
|
122 |
+
kp_info = self.motion_extractor(x)
|
123 |
+
|
124 |
+
if self.inference_cfg.flag_use_half_precision:
|
125 |
+
# float the dict
|
126 |
+
for k, v in kp_info.items():
|
127 |
+
if isinstance(v, torch.Tensor):
|
128 |
+
kp_info[k] = v.float()
|
129 |
+
|
130 |
+
flag_refine_info: bool = kwargs.get('flag_refine_info', True)
|
131 |
+
if flag_refine_info:
|
132 |
+
bs = kp_info['kp'].shape[0]
|
133 |
+
kp_info['pitch'] = headpose_pred_to_degree(kp_info['pitch'])[:, None] # Bx1
|
134 |
+
kp_info['yaw'] = headpose_pred_to_degree(kp_info['yaw'])[:, None] # Bx1
|
135 |
+
kp_info['roll'] = headpose_pred_to_degree(kp_info['roll'])[:, None] # Bx1
|
136 |
+
kp_info['kp'] = kp_info['kp'].reshape(bs, -1, 3) # BxNx3
|
137 |
+
kp_info['exp'] = kp_info['exp'].reshape(bs, -1, 3) # BxNx3
|
138 |
+
|
139 |
+
return kp_info
|
140 |
+
|
141 |
+
def get_pose_dct(self, kp_info: dict) -> dict:
|
142 |
+
pose_dct = dict(
|
143 |
+
pitch=headpose_pred_to_degree(kp_info['pitch']).item(),
|
144 |
+
yaw=headpose_pred_to_degree(kp_info['yaw']).item(),
|
145 |
+
roll=headpose_pred_to_degree(kp_info['roll']).item(),
|
146 |
+
)
|
147 |
+
return pose_dct
|
148 |
+
|
149 |
+
def get_fs_and_kp_info(self, source_prepared, driving_first_frame):
|
150 |
+
|
151 |
+
# get the canonical keypoints of source image by M
|
152 |
+
source_kp_info = self.get_kp_info(source_prepared, flag_refine_info=True)
|
153 |
+
source_rotation = get_rotation_matrix(source_kp_info['pitch'], source_kp_info['yaw'], source_kp_info['roll'])
|
154 |
+
|
155 |
+
# get the canonical keypoints of first driving frame by M
|
156 |
+
driving_first_frame_kp_info = self.get_kp_info(driving_first_frame, flag_refine_info=True)
|
157 |
+
driving_first_frame_rotation = get_rotation_matrix(
|
158 |
+
driving_first_frame_kp_info['pitch'],
|
159 |
+
driving_first_frame_kp_info['yaw'],
|
160 |
+
driving_first_frame_kp_info['roll']
|
161 |
+
)
|
162 |
+
|
163 |
+
# get feature volume by F
|
164 |
+
source_feature_3d = self.extract_feature_3d(source_prepared)
|
165 |
+
|
166 |
+
return source_kp_info, source_rotation, source_feature_3d, driving_first_frame_kp_info, driving_first_frame_rotation
|
167 |
+
|
168 |
+
def transform_keypoint(self, kp_info: dict):
|
169 |
+
"""
|
170 |
+
transform the implicit keypoints with the pose, shift, and expression deformation
|
171 |
+
kp: BxNx3
|
172 |
+
"""
|
173 |
+
kp = kp_info['kp'] # (bs, k, 3)
|
174 |
+
pitch, yaw, roll = kp_info['pitch'], kp_info['yaw'], kp_info['roll']
|
175 |
+
|
176 |
+
t, exp = kp_info['t'], kp_info['exp']
|
177 |
+
scale = kp_info['scale']
|
178 |
+
|
179 |
+
pitch = headpose_pred_to_degree(pitch)
|
180 |
+
yaw = headpose_pred_to_degree(yaw)
|
181 |
+
roll = headpose_pred_to_degree(roll)
|
182 |
+
|
183 |
+
bs = kp.shape[0]
|
184 |
+
if kp.ndim == 2:
|
185 |
+
num_kp = kp.shape[1] // 3 # Bx(num_kpx3)
|
186 |
+
else:
|
187 |
+
num_kp = kp.shape[1] # Bxnum_kpx3
|
188 |
+
|
189 |
+
rot_mat = get_rotation_matrix(pitch, yaw, roll) # (bs, 3, 3)
|
190 |
+
|
191 |
+
# Eqn.2: s * (R * x_c,s + exp) + t
|
192 |
+
kp_transformed = kp.view(bs, num_kp, 3) @ rot_mat + exp.view(bs, num_kp, 3)
|
193 |
+
kp_transformed *= scale[..., None] # (bs, k, 3) * (bs, 1, 1) = (bs, k, 3)
|
194 |
+
kp_transformed[:, :, 0:2] += t[:, None, 0:2] # remove z, only apply tx ty
|
195 |
+
|
196 |
+
return kp_transformed
|
197 |
+
|
198 |
+
def retarget_eye(self, kp_source: torch.Tensor, eye_close_ratio: torch.Tensor) -> torch.Tensor:
|
199 |
+
"""
|
200 |
+
kp_source: BxNx3
|
201 |
+
eye_close_ratio: Bx3
|
202 |
+
Return: Bx(3*num_kp)
|
203 |
+
"""
|
204 |
+
feat_eye = concat_feat(kp_source, eye_close_ratio)
|
205 |
+
|
206 |
+
with torch.no_grad():
|
207 |
+
delta = self.stitching_retargeting_module['eye'](feat_eye)
|
208 |
+
|
209 |
+
return delta
|
210 |
+
|
211 |
+
def retarget_lip(self, kp_source: torch.Tensor, lip_close_ratio: torch.Tensor) -> torch.Tensor:
|
212 |
+
"""
|
213 |
+
kp_source: BxNx3
|
214 |
+
lip_close_ratio: Bx2
|
215 |
+
Return: Bx(3*num_kp)
|
216 |
+
"""
|
217 |
+
feat_lip = concat_feat(kp_source, lip_close_ratio)
|
218 |
+
|
219 |
+
with torch.no_grad():
|
220 |
+
delta = self.stitching_retargeting_module['lip'](feat_lip)
|
221 |
+
|
222 |
+
return delta
|
223 |
+
|
224 |
+
def stitch(self, kp_source: torch.Tensor, kp_driving: torch.Tensor) -> torch.Tensor:
|
225 |
+
"""
|
226 |
+
kp_source: BxNx3
|
227 |
+
kp_driving: BxNx3
|
228 |
+
Return: Bx(3*num_kp+2)
|
229 |
+
"""
|
230 |
+
feat_stiching = concat_feat(kp_source, kp_driving)
|
231 |
+
|
232 |
+
with torch.no_grad():
|
233 |
+
delta = self.stitching_retargeting_module['stitching'](feat_stiching)
|
234 |
+
|
235 |
+
return delta
|
236 |
+
|
237 |
+
def stitching(self, kp_source: torch.Tensor, kp_driving: torch.Tensor) -> torch.Tensor:
|
238 |
+
""" conduct the stitching
|
239 |
+
kp_source: Bxnum_kpx3
|
240 |
+
kp_driving: Bxnum_kpx3
|
241 |
+
"""
|
242 |
+
|
243 |
+
if self.stitching_retargeting_module is not None:
|
244 |
+
|
245 |
+
bs, num_kp = kp_source.shape[:2]
|
246 |
+
|
247 |
+
kp_driving_new = kp_driving.clone()
|
248 |
+
delta = self.stitch(kp_source, kp_driving_new)
|
249 |
+
|
250 |
+
delta_exp = delta[..., :3*num_kp].reshape(bs, num_kp, 3) # 1x20x3
|
251 |
+
delta_tx_ty = delta[..., 3*num_kp:3*num_kp+2].reshape(bs, 1, 2) # 1x1x2
|
252 |
+
|
253 |
+
kp_driving_new += delta_exp
|
254 |
+
kp_driving_new[..., :2] += delta_tx_ty
|
255 |
+
|
256 |
+
return kp_driving_new
|
257 |
+
|
258 |
+
return kp_driving
|
259 |
+
|
260 |
+
def warp_decode(self, feature_3d: torch.Tensor, kp_source: torch.Tensor, kp_driving: torch.Tensor) -> torch.Tensor:
|
261 |
+
""" get the image after the warping of the implicit keypoints
|
262 |
+
feature_3d: Bx32x16x64x64, feature volume
|
263 |
+
kp_source: BxNx3
|
264 |
+
kp_driving: BxNx3
|
265 |
+
"""
|
266 |
+
# The line 18 in Algorithm 1: D(W(f_s; x_s, x′_d,i))
|
267 |
+
with torch.no_grad():
|
268 |
+
with torch.autocast(device_type=self.device[:4], dtype=torch.float16, enabled=self.inference_cfg.flag_use_half_precision):
|
269 |
+
if self.compile:
|
270 |
+
# Mark the beginning of a new CUDA Graph step
|
271 |
+
torch.compiler.cudagraph_mark_step_begin()
|
272 |
+
# get decoder input
|
273 |
+
ret_dct = self.warping_module(feature_3d, kp_source=kp_source, kp_driving=kp_driving)
|
274 |
+
# decode
|
275 |
+
ret_dct['out'] = self.spade_generator(feature=ret_dct['out'])
|
276 |
+
|
277 |
+
# float the dict
|
278 |
+
if self.inference_cfg.flag_use_half_precision:
|
279 |
+
for k, v in ret_dct.items():
|
280 |
+
if isinstance(v, torch.Tensor):
|
281 |
+
ret_dct[k] = v.float()
|
282 |
+
|
283 |
+
return ret_dct
|
284 |
+
|
285 |
+
def parse_output(self, out: torch.Tensor) -> np.ndarray:
|
286 |
+
""" construct the output as standard
|
287 |
+
return: 1xHxWx3, uint8
|
288 |
+
"""
|
289 |
+
out = np.transpose(out.data.cpu().numpy(), [0, 2, 3, 1]) # 1x3xHxW -> 1xHxWx3
|
290 |
+
out = np.clip(out, 0, 1) # clip to 0~1
|
291 |
+
out = np.clip(out * 255, 0, 255).astype(np.uint8) # 0~1 -> 0~255
|
292 |
+
|
293 |
+
return out
|
294 |
+
|
295 |
+
def calc_driving_ratio(self, driving_lmk_lst):
|
296 |
+
input_eye_ratio_lst = []
|
297 |
+
input_lip_ratio_lst = []
|
298 |
+
for lmk in driving_lmk_lst:
|
299 |
+
# for eyes retargeting
|
300 |
+
input_eye_ratio_lst.append(calc_eye_close_ratio(lmk[None]))
|
301 |
+
# for lip retargeting
|
302 |
+
input_lip_ratio_lst.append(calc_lip_close_ratio(lmk[None]))
|
303 |
+
return input_eye_ratio_lst, input_lip_ratio_lst
|
304 |
+
|
305 |
+
def calc_combined_eye_ratio(self, c_d_eyes_i, source_lmk):
|
306 |
+
c_s_eyes = calc_eye_close_ratio(source_lmk[None])
|
307 |
+
c_s_eyes_tensor = torch.from_numpy(c_s_eyes).float().to(self.device)
|
308 |
+
c_d_eyes_i_tensor = torch.Tensor([c_d_eyes_i[0][0]]).reshape(1, 1).to(self.device)
|
309 |
+
# [c_s,eyes, c_d,eyes,i]
|
310 |
+
combined_eye_ratio_tensor = torch.cat([c_s_eyes_tensor, c_d_eyes_i_tensor], dim=1)
|
311 |
+
return combined_eye_ratio_tensor
|
312 |
+
|
313 |
+
def calc_combined_lip_ratio(self, c_d_lip_i, source_lmk):
|
314 |
+
c_s_lip = calc_lip_close_ratio(source_lmk[None])
|
315 |
+
c_s_lip_tensor = torch.from_numpy(c_s_lip).float().to(self.device)
|
316 |
+
c_d_lip_i_tensor = torch.Tensor([c_d_lip_i[0]]).to(self.device).reshape(1, 1) # 1x1
|
317 |
+
# [c_s,lip, c_d,lip,i]
|
318 |
+
combined_lip_ratio_tensor = torch.cat([c_s_lip_tensor, c_d_lip_i_tensor], dim=1) # 1x2
|
319 |
+
return combined_lip_ratio_tensor
|
src/modules/__init__.py
ADDED
File without changes
|
src/modules/appearance_feature_extractor.py
ADDED
@@ -0,0 +1,48 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# coding: utf-8
|
2 |
+
|
3 |
+
"""
|
4 |
+
Appearance extractor(F) defined in paper, which maps the source image s to a 3D appearance feature volume.
|
5 |
+
"""
|
6 |
+
|
7 |
+
import torch
|
8 |
+
from torch import nn
|
9 |
+
from .util import SameBlock2d, DownBlock2d, ResBlock3d
|
10 |
+
|
11 |
+
|
12 |
+
class AppearanceFeatureExtractor(nn.Module):
|
13 |
+
|
14 |
+
def __init__(self, image_channel, block_expansion, num_down_blocks, max_features, reshape_channel, reshape_depth, num_resblocks):
|
15 |
+
super(AppearanceFeatureExtractor, self).__init__()
|
16 |
+
self.image_channel = image_channel
|
17 |
+
self.block_expansion = block_expansion
|
18 |
+
self.num_down_blocks = num_down_blocks
|
19 |
+
self.max_features = max_features
|
20 |
+
self.reshape_channel = reshape_channel
|
21 |
+
self.reshape_depth = reshape_depth
|
22 |
+
|
23 |
+
self.first = SameBlock2d(image_channel, block_expansion, kernel_size=(3, 3), padding=(1, 1))
|
24 |
+
|
25 |
+
down_blocks = []
|
26 |
+
for i in range(num_down_blocks):
|
27 |
+
in_features = min(max_features, block_expansion * (2 ** i))
|
28 |
+
out_features = min(max_features, block_expansion * (2 ** (i + 1)))
|
29 |
+
down_blocks.append(DownBlock2d(in_features, out_features, kernel_size=(3, 3), padding=(1, 1)))
|
30 |
+
self.down_blocks = nn.ModuleList(down_blocks)
|
31 |
+
|
32 |
+
self.second = nn.Conv2d(in_channels=out_features, out_channels=max_features, kernel_size=1, stride=1)
|
33 |
+
|
34 |
+
self.resblocks_3d = torch.nn.Sequential()
|
35 |
+
for i in range(num_resblocks):
|
36 |
+
self.resblocks_3d.add_module('3dr' + str(i), ResBlock3d(reshape_channel, kernel_size=3, padding=1))
|
37 |
+
|
38 |
+
def forward(self, source_image):
|
39 |
+
out = self.first(source_image) # Bx3x256x256 -> Bx64x256x256
|
40 |
+
|
41 |
+
for i in range(len(self.down_blocks)):
|
42 |
+
out = self.down_blocks[i](out)
|
43 |
+
out = self.second(out)
|
44 |
+
bs, c, h, w = out.shape # ->Bx512x64x64
|
45 |
+
|
46 |
+
f_s = out.view(bs, self.reshape_channel, self.reshape_depth, h, w) # ->Bx32x16x64x64
|
47 |
+
f_s = self.resblocks_3d(f_s) # ->Bx32x16x64x64
|
48 |
+
return f_s
|
src/modules/convnextv2.py
ADDED
@@ -0,0 +1,149 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# coding: utf-8
|
2 |
+
|
3 |
+
"""
|
4 |
+
This moudle is adapted to the ConvNeXtV2 version for the extraction of implicit keypoints, poses, and expression deformation.
|
5 |
+
"""
|
6 |
+
|
7 |
+
import torch
|
8 |
+
import torch.nn as nn
|
9 |
+
# from timm.models.layers import trunc_normal_, DropPath
|
10 |
+
from .util import LayerNorm, DropPath, trunc_normal_, GRN
|
11 |
+
|
12 |
+
__all__ = ['convnextv2_tiny']
|
13 |
+
|
14 |
+
|
15 |
+
class Block(nn.Module):
|
16 |
+
""" ConvNeXtV2 Block.
|
17 |
+
|
18 |
+
Args:
|
19 |
+
dim (int): Number of input channels.
|
20 |
+
drop_path (float): Stochastic depth rate. Default: 0.0
|
21 |
+
"""
|
22 |
+
|
23 |
+
def __init__(self, dim, drop_path=0.):
|
24 |
+
super().__init__()
|
25 |
+
self.dwconv = nn.Conv2d(dim, dim, kernel_size=7, padding=3, groups=dim) # depthwise conv
|
26 |
+
self.norm = LayerNorm(dim, eps=1e-6)
|
27 |
+
self.pwconv1 = nn.Linear(dim, 4 * dim) # pointwise/1x1 convs, implemented with linear layers
|
28 |
+
self.act = nn.GELU()
|
29 |
+
self.grn = GRN(4 * dim)
|
30 |
+
self.pwconv2 = nn.Linear(4 * dim, dim)
|
31 |
+
self.drop_path = DropPath(drop_path) if drop_path > 0. else nn.Identity()
|
32 |
+
|
33 |
+
def forward(self, x):
|
34 |
+
input = x
|
35 |
+
x = self.dwconv(x)
|
36 |
+
x = x.permute(0, 2, 3, 1) # (N, C, H, W) -> (N, H, W, C)
|
37 |
+
x = self.norm(x)
|
38 |
+
x = self.pwconv1(x)
|
39 |
+
x = self.act(x)
|
40 |
+
x = self.grn(x)
|
41 |
+
x = self.pwconv2(x)
|
42 |
+
x = x.permute(0, 3, 1, 2) # (N, H, W, C) -> (N, C, H, W)
|
43 |
+
|
44 |
+
x = input + self.drop_path(x)
|
45 |
+
return x
|
46 |
+
|
47 |
+
|
48 |
+
class ConvNeXtV2(nn.Module):
|
49 |
+
""" ConvNeXt V2
|
50 |
+
|
51 |
+
Args:
|
52 |
+
in_chans (int): Number of input image channels. Default: 3
|
53 |
+
num_classes (int): Number of classes for classification head. Default: 1000
|
54 |
+
depths (tuple(int)): Number of blocks at each stage. Default: [3, 3, 9, 3]
|
55 |
+
dims (int): Feature dimension at each stage. Default: [96, 192, 384, 768]
|
56 |
+
drop_path_rate (float): Stochastic depth rate. Default: 0.
|
57 |
+
head_init_scale (float): Init scaling value for classifier weights and biases. Default: 1.
|
58 |
+
"""
|
59 |
+
|
60 |
+
def __init__(
|
61 |
+
self,
|
62 |
+
in_chans=3,
|
63 |
+
depths=[3, 3, 9, 3],
|
64 |
+
dims=[96, 192, 384, 768],
|
65 |
+
drop_path_rate=0.,
|
66 |
+
**kwargs
|
67 |
+
):
|
68 |
+
super().__init__()
|
69 |
+
self.depths = depths
|
70 |
+
self.downsample_layers = nn.ModuleList() # stem and 3 intermediate downsampling conv layers
|
71 |
+
stem = nn.Sequential(
|
72 |
+
nn.Conv2d(in_chans, dims[0], kernel_size=4, stride=4),
|
73 |
+
LayerNorm(dims[0], eps=1e-6, data_format="channels_first")
|
74 |
+
)
|
75 |
+
self.downsample_layers.append(stem)
|
76 |
+
for i in range(3):
|
77 |
+
downsample_layer = nn.Sequential(
|
78 |
+
LayerNorm(dims[i], eps=1e-6, data_format="channels_first"),
|
79 |
+
nn.Conv2d(dims[i], dims[i+1], kernel_size=2, stride=2),
|
80 |
+
)
|
81 |
+
self.downsample_layers.append(downsample_layer)
|
82 |
+
|
83 |
+
self.stages = nn.ModuleList() # 4 feature resolution stages, each consisting of multiple residual blocks
|
84 |
+
dp_rates = [x.item() for x in torch.linspace(0, drop_path_rate, sum(depths))]
|
85 |
+
cur = 0
|
86 |
+
for i in range(4):
|
87 |
+
stage = nn.Sequential(
|
88 |
+
*[Block(dim=dims[i], drop_path=dp_rates[cur + j]) for j in range(depths[i])]
|
89 |
+
)
|
90 |
+
self.stages.append(stage)
|
91 |
+
cur += depths[i]
|
92 |
+
|
93 |
+
self.norm = nn.LayerNorm(dims[-1], eps=1e-6) # final norm layer
|
94 |
+
|
95 |
+
# NOTE: the output semantic items
|
96 |
+
num_bins = kwargs.get('num_bins', 66)
|
97 |
+
num_kp = kwargs.get('num_kp', 24) # the number of implicit keypoints
|
98 |
+
self.fc_kp = nn.Linear(dims[-1], 3 * num_kp) # implicit keypoints
|
99 |
+
|
100 |
+
# print('dims[-1]: ', dims[-1])
|
101 |
+
self.fc_scale = nn.Linear(dims[-1], 1) # scale
|
102 |
+
self.fc_pitch = nn.Linear(dims[-1], num_bins) # pitch bins
|
103 |
+
self.fc_yaw = nn.Linear(dims[-1], num_bins) # yaw bins
|
104 |
+
self.fc_roll = nn.Linear(dims[-1], num_bins) # roll bins
|
105 |
+
self.fc_t = nn.Linear(dims[-1], 3) # translation
|
106 |
+
self.fc_exp = nn.Linear(dims[-1], 3 * num_kp) # expression / delta
|
107 |
+
|
108 |
+
def _init_weights(self, m):
|
109 |
+
if isinstance(m, (nn.Conv2d, nn.Linear)):
|
110 |
+
trunc_normal_(m.weight, std=.02)
|
111 |
+
nn.init.constant_(m.bias, 0)
|
112 |
+
|
113 |
+
def forward_features(self, x):
|
114 |
+
for i in range(4):
|
115 |
+
x = self.downsample_layers[i](x)
|
116 |
+
x = self.stages[i](x)
|
117 |
+
return self.norm(x.mean([-2, -1])) # global average pooling, (N, C, H, W) -> (N, C)
|
118 |
+
|
119 |
+
def forward(self, x):
|
120 |
+
x = self.forward_features(x)
|
121 |
+
|
122 |
+
# implicit keypoints
|
123 |
+
kp = self.fc_kp(x)
|
124 |
+
|
125 |
+
# pose and expression deformation
|
126 |
+
pitch = self.fc_pitch(x)
|
127 |
+
yaw = self.fc_yaw(x)
|
128 |
+
roll = self.fc_roll(x)
|
129 |
+
t = self.fc_t(x)
|
130 |
+
exp = self.fc_exp(x)
|
131 |
+
scale = self.fc_scale(x)
|
132 |
+
|
133 |
+
ret_dct = {
|
134 |
+
'pitch': pitch,
|
135 |
+
'yaw': yaw,
|
136 |
+
'roll': roll,
|
137 |
+
't': t,
|
138 |
+
'exp': exp,
|
139 |
+
'scale': scale,
|
140 |
+
|
141 |
+
'kp': kp, # canonical keypoint
|
142 |
+
}
|
143 |
+
|
144 |
+
return ret_dct
|
145 |
+
|
146 |
+
|
147 |
+
def convnextv2_tiny(**kwargs):
|
148 |
+
model = ConvNeXtV2(depths=[3, 3, 9, 3], dims=[96, 192, 384, 768], **kwargs)
|
149 |
+
return model
|
src/modules/dense_motion.py
ADDED
@@ -0,0 +1,104 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# coding: utf-8
|
2 |
+
|
3 |
+
"""
|
4 |
+
The module that predicting a dense motion from sparse motion representation given by kp_source and kp_driving
|
5 |
+
"""
|
6 |
+
|
7 |
+
from torch import nn
|
8 |
+
import torch.nn.functional as F
|
9 |
+
import torch
|
10 |
+
from .util import Hourglass, make_coordinate_grid, kp2gaussian
|
11 |
+
|
12 |
+
|
13 |
+
class DenseMotionNetwork(nn.Module):
|
14 |
+
def __init__(self, block_expansion, num_blocks, max_features, num_kp, feature_channel, reshape_depth, compress, estimate_occlusion_map=True):
|
15 |
+
super(DenseMotionNetwork, self).__init__()
|
16 |
+
self.hourglass = Hourglass(block_expansion=block_expansion, in_features=(num_kp+1)*(compress+1), max_features=max_features, num_blocks=num_blocks) # ~60+G
|
17 |
+
|
18 |
+
self.mask = nn.Conv3d(self.hourglass.out_filters, num_kp + 1, kernel_size=7, padding=3) # 65G! NOTE: computation cost is large
|
19 |
+
self.compress = nn.Conv3d(feature_channel, compress, kernel_size=1) # 0.8G
|
20 |
+
self.norm = nn.BatchNorm3d(compress, affine=True)
|
21 |
+
self.num_kp = num_kp
|
22 |
+
self.flag_estimate_occlusion_map = estimate_occlusion_map
|
23 |
+
|
24 |
+
if self.flag_estimate_occlusion_map:
|
25 |
+
self.occlusion = nn.Conv2d(self.hourglass.out_filters*reshape_depth, 1, kernel_size=7, padding=3)
|
26 |
+
else:
|
27 |
+
self.occlusion = None
|
28 |
+
|
29 |
+
def create_sparse_motions(self, feature, kp_driving, kp_source):
|
30 |
+
bs, _, d, h, w = feature.shape # (bs, 4, 16, 64, 64)
|
31 |
+
identity_grid = make_coordinate_grid((d, h, w), ref=kp_source) # (16, 64, 64, 3)
|
32 |
+
identity_grid = identity_grid.view(1, 1, d, h, w, 3) # (1, 1, d=16, h=64, w=64, 3)
|
33 |
+
coordinate_grid = identity_grid - kp_driving.view(bs, self.num_kp, 1, 1, 1, 3)
|
34 |
+
|
35 |
+
k = coordinate_grid.shape[1]
|
36 |
+
|
37 |
+
# NOTE: there lacks an one-order flow
|
38 |
+
driving_to_source = coordinate_grid + kp_source.view(bs, self.num_kp, 1, 1, 1, 3) # (bs, num_kp, d, h, w, 3)
|
39 |
+
|
40 |
+
# adding background feature
|
41 |
+
identity_grid = identity_grid.repeat(bs, 1, 1, 1, 1, 1)
|
42 |
+
sparse_motions = torch.cat([identity_grid, driving_to_source], dim=1) # (bs, 1+num_kp, d, h, w, 3)
|
43 |
+
return sparse_motions
|
44 |
+
|
45 |
+
def create_deformed_feature(self, feature, sparse_motions):
|
46 |
+
bs, _, d, h, w = feature.shape
|
47 |
+
feature_repeat = feature.unsqueeze(1).unsqueeze(1).repeat(1, self.num_kp+1, 1, 1, 1, 1, 1) # (bs, num_kp+1, 1, c, d, h, w)
|
48 |
+
feature_repeat = feature_repeat.view(bs * (self.num_kp+1), -1, d, h, w) # (bs*(num_kp+1), c, d, h, w)
|
49 |
+
sparse_motions = sparse_motions.view((bs * (self.num_kp+1), d, h, w, -1)) # (bs*(num_kp+1), d, h, w, 3)
|
50 |
+
sparse_deformed = F.grid_sample(feature_repeat, sparse_motions, align_corners=False)
|
51 |
+
sparse_deformed = sparse_deformed.view((bs, self.num_kp+1, -1, d, h, w)) # (bs, num_kp+1, c, d, h, w)
|
52 |
+
|
53 |
+
return sparse_deformed
|
54 |
+
|
55 |
+
def create_heatmap_representations(self, feature, kp_driving, kp_source):
|
56 |
+
spatial_size = feature.shape[3:] # (d=16, h=64, w=64)
|
57 |
+
gaussian_driving = kp2gaussian(kp_driving, spatial_size=spatial_size, kp_variance=0.01) # (bs, num_kp, d, h, w)
|
58 |
+
gaussian_source = kp2gaussian(kp_source, spatial_size=spatial_size, kp_variance=0.01) # (bs, num_kp, d, h, w)
|
59 |
+
heatmap = gaussian_driving - gaussian_source # (bs, num_kp, d, h, w)
|
60 |
+
|
61 |
+
# adding background feature
|
62 |
+
zeros = torch.zeros(heatmap.shape[0], 1, spatial_size[0], spatial_size[1], spatial_size[2]).type(heatmap.type()).to(heatmap.device)
|
63 |
+
heatmap = torch.cat([zeros, heatmap], dim=1)
|
64 |
+
heatmap = heatmap.unsqueeze(2) # (bs, 1+num_kp, 1, d, h, w)
|
65 |
+
return heatmap
|
66 |
+
|
67 |
+
def forward(self, feature, kp_driving, kp_source):
|
68 |
+
bs, _, d, h, w = feature.shape # (bs, 32, 16, 64, 64)
|
69 |
+
|
70 |
+
feature = self.compress(feature) # (bs, 4, 16, 64, 64)
|
71 |
+
feature = self.norm(feature) # (bs, 4, 16, 64, 64)
|
72 |
+
feature = F.relu(feature) # (bs, 4, 16, 64, 64)
|
73 |
+
|
74 |
+
out_dict = dict()
|
75 |
+
|
76 |
+
# 1. deform 3d feature
|
77 |
+
sparse_motion = self.create_sparse_motions(feature, kp_driving, kp_source) # (bs, 1+num_kp, d, h, w, 3)
|
78 |
+
deformed_feature = self.create_deformed_feature(feature, sparse_motion) # (bs, 1+num_kp, c=4, d=16, h=64, w=64)
|
79 |
+
|
80 |
+
# 2. (bs, 1+num_kp, d, h, w)
|
81 |
+
heatmap = self.create_heatmap_representations(deformed_feature, kp_driving, kp_source) # (bs, 1+num_kp, 1, d, h, w)
|
82 |
+
|
83 |
+
input = torch.cat([heatmap, deformed_feature], dim=2) # (bs, 1+num_kp, c=5, d=16, h=64, w=64)
|
84 |
+
input = input.view(bs, -1, d, h, w) # (bs, (1+num_kp)*c=105, d=16, h=64, w=64)
|
85 |
+
|
86 |
+
prediction = self.hourglass(input)
|
87 |
+
|
88 |
+
mask = self.mask(prediction)
|
89 |
+
mask = F.softmax(mask, dim=1) # (bs, 1+num_kp, d=16, h=64, w=64)
|
90 |
+
out_dict['mask'] = mask
|
91 |
+
mask = mask.unsqueeze(2) # (bs, num_kp+1, 1, d, h, w)
|
92 |
+
sparse_motion = sparse_motion.permute(0, 1, 5, 2, 3, 4) # (bs, num_kp+1, 3, d, h, w)
|
93 |
+
deformation = (sparse_motion * mask).sum(dim=1) # (bs, 3, d, h, w) mask take effect in this place
|
94 |
+
deformation = deformation.permute(0, 2, 3, 4, 1) # (bs, d, h, w, 3)
|
95 |
+
|
96 |
+
out_dict['deformation'] = deformation
|
97 |
+
|
98 |
+
if self.flag_estimate_occlusion_map:
|
99 |
+
bs, _, d, h, w = prediction.shape
|
100 |
+
prediction_reshape = prediction.view(bs, -1, h, w)
|
101 |
+
occlusion_map = torch.sigmoid(self.occlusion(prediction_reshape)) # Bx1x64x64
|
102 |
+
out_dict['occlusion_map'] = occlusion_map
|
103 |
+
|
104 |
+
return out_dict
|
src/modules/motion_extractor.py
ADDED
@@ -0,0 +1,35 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# coding: utf-8
|
2 |
+
|
3 |
+
"""
|
4 |
+
Motion extractor(M), which directly predicts the canonical keypoints, head pose and expression deformation of the input image
|
5 |
+
"""
|
6 |
+
|
7 |
+
from torch import nn
|
8 |
+
import torch
|
9 |
+
|
10 |
+
from .convnextv2 import convnextv2_tiny
|
11 |
+
from .util import filter_state_dict
|
12 |
+
|
13 |
+
model_dict = {
|
14 |
+
'convnextv2_tiny': convnextv2_tiny,
|
15 |
+
}
|
16 |
+
|
17 |
+
|
18 |
+
class MotionExtractor(nn.Module):
|
19 |
+
def __init__(self, **kwargs):
|
20 |
+
super(MotionExtractor, self).__init__()
|
21 |
+
|
22 |
+
# default is convnextv2_base
|
23 |
+
backbone = kwargs.get('backbone', 'convnextv2_tiny')
|
24 |
+
self.detector = model_dict.get(backbone)(**kwargs)
|
25 |
+
|
26 |
+
def load_pretrained(self, init_path: str):
|
27 |
+
if init_path not in (None, ''):
|
28 |
+
state_dict = torch.load(init_path, map_location=lambda storage, loc: storage)['model']
|
29 |
+
state_dict = filter_state_dict(state_dict, remove_name='head')
|
30 |
+
ret = self.detector.load_state_dict(state_dict, strict=False)
|
31 |
+
print(f'Load pretrained model from {init_path}, ret: {ret}')
|
32 |
+
|
33 |
+
def forward(self, x):
|
34 |
+
out = self.detector(x)
|
35 |
+
return out
|
src/modules/spade_generator.py
ADDED
@@ -0,0 +1,59 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# coding: utf-8
|
2 |
+
|
3 |
+
"""
|
4 |
+
Spade decoder(G) defined in the paper, which input the warped feature to generate the animated image.
|
5 |
+
"""
|
6 |
+
|
7 |
+
import torch
|
8 |
+
from torch import nn
|
9 |
+
import torch.nn.functional as F
|
10 |
+
from .util import SPADEResnetBlock
|
11 |
+
|
12 |
+
|
13 |
+
class SPADEDecoder(nn.Module):
|
14 |
+
def __init__(self, upscale=1, max_features=256, block_expansion=64, out_channels=64, num_down_blocks=2):
|
15 |
+
for i in range(num_down_blocks):
|
16 |
+
input_channels = min(max_features, block_expansion * (2 ** (i + 1)))
|
17 |
+
self.upscale = upscale
|
18 |
+
super().__init__()
|
19 |
+
norm_G = 'spadespectralinstance'
|
20 |
+
label_num_channels = input_channels # 256
|
21 |
+
|
22 |
+
self.fc = nn.Conv2d(input_channels, 2 * input_channels, 3, padding=1)
|
23 |
+
self.G_middle_0 = SPADEResnetBlock(2 * input_channels, 2 * input_channels, norm_G, label_num_channels)
|
24 |
+
self.G_middle_1 = SPADEResnetBlock(2 * input_channels, 2 * input_channels, norm_G, label_num_channels)
|
25 |
+
self.G_middle_2 = SPADEResnetBlock(2 * input_channels, 2 * input_channels, norm_G, label_num_channels)
|
26 |
+
self.G_middle_3 = SPADEResnetBlock(2 * input_channels, 2 * input_channels, norm_G, label_num_channels)
|
27 |
+
self.G_middle_4 = SPADEResnetBlock(2 * input_channels, 2 * input_channels, norm_G, label_num_channels)
|
28 |
+
self.G_middle_5 = SPADEResnetBlock(2 * input_channels, 2 * input_channels, norm_G, label_num_channels)
|
29 |
+
self.up_0 = SPADEResnetBlock(2 * input_channels, input_channels, norm_G, label_num_channels)
|
30 |
+
self.up_1 = SPADEResnetBlock(input_channels, out_channels, norm_G, label_num_channels)
|
31 |
+
self.up = nn.Upsample(scale_factor=2)
|
32 |
+
|
33 |
+
if self.upscale is None or self.upscale <= 1:
|
34 |
+
self.conv_img = nn.Conv2d(out_channels, 3, 3, padding=1)
|
35 |
+
else:
|
36 |
+
self.conv_img = nn.Sequential(
|
37 |
+
nn.Conv2d(out_channels, 3 * (2 * 2), kernel_size=3, padding=1),
|
38 |
+
nn.PixelShuffle(upscale_factor=2)
|
39 |
+
)
|
40 |
+
|
41 |
+
def forward(self, feature):
|
42 |
+
seg = feature # Bx256x64x64
|
43 |
+
x = self.fc(feature) # Bx512x64x64
|
44 |
+
x = self.G_middle_0(x, seg)
|
45 |
+
x = self.G_middle_1(x, seg)
|
46 |
+
x = self.G_middle_2(x, seg)
|
47 |
+
x = self.G_middle_3(x, seg)
|
48 |
+
x = self.G_middle_4(x, seg)
|
49 |
+
x = self.G_middle_5(x, seg)
|
50 |
+
|
51 |
+
x = self.up(x) # Bx512x64x64 -> Bx512x128x128
|
52 |
+
x = self.up_0(x, seg) # Bx512x128x128 -> Bx256x128x128
|
53 |
+
x = self.up(x) # Bx256x128x128 -> Bx256x256x256
|
54 |
+
x = self.up_1(x, seg) # Bx256x256x256 -> Bx64x256x256
|
55 |
+
|
56 |
+
x = self.conv_img(F.leaky_relu(x, 2e-1)) # Bx64x256x256 -> Bx3xHxW
|
57 |
+
x = torch.sigmoid(x) # Bx3xHxW
|
58 |
+
|
59 |
+
return x
|
src/modules/stitching_retargeting_network.py
ADDED
@@ -0,0 +1,38 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# coding: utf-8
|
2 |
+
|
3 |
+
"""
|
4 |
+
Stitching module(S) and two retargeting modules(R) defined in the paper.
|
5 |
+
|
6 |
+
- The stitching module pastes the animated portrait back into the original image space without pixel misalignment, such as in
|
7 |
+
the stitching region.
|
8 |
+
|
9 |
+
- The eyes retargeting module is designed to address the issue of incomplete eye closure during cross-id reenactment, especially
|
10 |
+
when a person with small eyes drives a person with larger eyes.
|
11 |
+
|
12 |
+
- The lip retargeting module is designed similarly to the eye retargeting module, and can also normalize the input by ensuring that
|
13 |
+
the lips are in a closed state, which facilitates better animation driving.
|
14 |
+
"""
|
15 |
+
from torch import nn
|
16 |
+
|
17 |
+
|
18 |
+
class StitchingRetargetingNetwork(nn.Module):
|
19 |
+
def __init__(self, input_size, hidden_sizes, output_size):
|
20 |
+
super(StitchingRetargetingNetwork, self).__init__()
|
21 |
+
layers = []
|
22 |
+
for i in range(len(hidden_sizes)):
|
23 |
+
if i == 0:
|
24 |
+
layers.append(nn.Linear(input_size, hidden_sizes[i]))
|
25 |
+
else:
|
26 |
+
layers.append(nn.Linear(hidden_sizes[i - 1], hidden_sizes[i]))
|
27 |
+
layers.append(nn.ReLU(inplace=True))
|
28 |
+
layers.append(nn.Linear(hidden_sizes[-1], output_size))
|
29 |
+
self.mlp = nn.Sequential(*layers)
|
30 |
+
|
31 |
+
def initialize_weights_to_zero(self):
|
32 |
+
for m in self.modules():
|
33 |
+
if isinstance(m, nn.Linear):
|
34 |
+
nn.init.zeros_(m.weight)
|
35 |
+
nn.init.zeros_(m.bias)
|
36 |
+
|
37 |
+
def forward(self, x):
|
38 |
+
return self.mlp(x)
|
src/modules/util.py
ADDED
@@ -0,0 +1,441 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# coding: utf-8
|
2 |
+
|
3 |
+
"""
|
4 |
+
This file defines various neural network modules and utility functions, including convolutional and residual blocks,
|
5 |
+
normalizations, and functions for spatial transformation and tensor manipulation.
|
6 |
+
"""
|
7 |
+
|
8 |
+
from torch import nn
|
9 |
+
import torch.nn.functional as F
|
10 |
+
import torch
|
11 |
+
import torch.nn.utils.spectral_norm as spectral_norm
|
12 |
+
import math
|
13 |
+
import warnings
|
14 |
+
|
15 |
+
|
16 |
+
def kp2gaussian(kp, spatial_size, kp_variance):
|
17 |
+
"""
|
18 |
+
Transform a keypoint into gaussian like representation
|
19 |
+
"""
|
20 |
+
mean = kp
|
21 |
+
|
22 |
+
coordinate_grid = make_coordinate_grid(spatial_size, mean)
|
23 |
+
number_of_leading_dimensions = len(mean.shape) - 1
|
24 |
+
shape = (1,) * number_of_leading_dimensions + coordinate_grid.shape
|
25 |
+
coordinate_grid = coordinate_grid.view(*shape)
|
26 |
+
repeats = mean.shape[:number_of_leading_dimensions] + (1, 1, 1, 1)
|
27 |
+
coordinate_grid = coordinate_grid.repeat(*repeats)
|
28 |
+
|
29 |
+
# Preprocess kp shape
|
30 |
+
shape = mean.shape[:number_of_leading_dimensions] + (1, 1, 1, 3)
|
31 |
+
mean = mean.view(*shape)
|
32 |
+
|
33 |
+
mean_sub = (coordinate_grid - mean)
|
34 |
+
|
35 |
+
out = torch.exp(-0.5 * (mean_sub ** 2).sum(-1) / kp_variance)
|
36 |
+
|
37 |
+
return out
|
38 |
+
|
39 |
+
|
40 |
+
def make_coordinate_grid(spatial_size, ref, **kwargs):
|
41 |
+
d, h, w = spatial_size
|
42 |
+
x = torch.arange(w).type(ref.dtype).to(ref.device)
|
43 |
+
y = torch.arange(h).type(ref.dtype).to(ref.device)
|
44 |
+
z = torch.arange(d).type(ref.dtype).to(ref.device)
|
45 |
+
|
46 |
+
# NOTE: must be right-down-in
|
47 |
+
x = (2 * (x / (w - 1)) - 1) # the x axis faces to the right
|
48 |
+
y = (2 * (y / (h - 1)) - 1) # the y axis faces to the bottom
|
49 |
+
z = (2 * (z / (d - 1)) - 1) # the z axis faces to the inner
|
50 |
+
|
51 |
+
yy = y.view(1, -1, 1).repeat(d, 1, w)
|
52 |
+
xx = x.view(1, 1, -1).repeat(d, h, 1)
|
53 |
+
zz = z.view(-1, 1, 1).repeat(1, h, w)
|
54 |
+
|
55 |
+
meshed = torch.cat([xx.unsqueeze_(3), yy.unsqueeze_(3), zz.unsqueeze_(3)], 3)
|
56 |
+
|
57 |
+
return meshed
|
58 |
+
|
59 |
+
|
60 |
+
class ConvT2d(nn.Module):
|
61 |
+
"""
|
62 |
+
Upsampling block for use in decoder.
|
63 |
+
"""
|
64 |
+
|
65 |
+
def __init__(self, in_features, out_features, kernel_size=3, stride=2, padding=1, output_padding=1):
|
66 |
+
super(ConvT2d, self).__init__()
|
67 |
+
|
68 |
+
self.convT = nn.ConvTranspose2d(in_features, out_features, kernel_size=kernel_size, stride=stride,
|
69 |
+
padding=padding, output_padding=output_padding)
|
70 |
+
self.norm = nn.InstanceNorm2d(out_features)
|
71 |
+
|
72 |
+
def forward(self, x):
|
73 |
+
out = self.convT(x)
|
74 |
+
out = self.norm(out)
|
75 |
+
out = F.leaky_relu(out)
|
76 |
+
return out
|
77 |
+
|
78 |
+
|
79 |
+
class ResBlock3d(nn.Module):
|
80 |
+
"""
|
81 |
+
Res block, preserve spatial resolution.
|
82 |
+
"""
|
83 |
+
|
84 |
+
def __init__(self, in_features, kernel_size, padding):
|
85 |
+
super(ResBlock3d, self).__init__()
|
86 |
+
self.conv1 = nn.Conv3d(in_channels=in_features, out_channels=in_features, kernel_size=kernel_size, padding=padding)
|
87 |
+
self.conv2 = nn.Conv3d(in_channels=in_features, out_channels=in_features, kernel_size=kernel_size, padding=padding)
|
88 |
+
self.norm1 = nn.BatchNorm3d(in_features, affine=True)
|
89 |
+
self.norm2 = nn.BatchNorm3d(in_features, affine=True)
|
90 |
+
|
91 |
+
def forward(self, x):
|
92 |
+
out = self.norm1(x)
|
93 |
+
out = F.relu(out)
|
94 |
+
out = self.conv1(out)
|
95 |
+
out = self.norm2(out)
|
96 |
+
out = F.relu(out)
|
97 |
+
out = self.conv2(out)
|
98 |
+
out += x
|
99 |
+
return out
|
100 |
+
|
101 |
+
|
102 |
+
class UpBlock3d(nn.Module):
|
103 |
+
"""
|
104 |
+
Upsampling block for use in decoder.
|
105 |
+
"""
|
106 |
+
|
107 |
+
def __init__(self, in_features, out_features, kernel_size=3, padding=1, groups=1):
|
108 |
+
super(UpBlock3d, self).__init__()
|
109 |
+
|
110 |
+
self.conv = nn.Conv3d(in_channels=in_features, out_channels=out_features, kernel_size=kernel_size,
|
111 |
+
padding=padding, groups=groups)
|
112 |
+
self.norm = nn.BatchNorm3d(out_features, affine=True)
|
113 |
+
|
114 |
+
def forward(self, x):
|
115 |
+
out = F.interpolate(x, scale_factor=(1, 2, 2))
|
116 |
+
out = self.conv(out)
|
117 |
+
out = self.norm(out)
|
118 |
+
out = F.relu(out)
|
119 |
+
return out
|
120 |
+
|
121 |
+
|
122 |
+
class DownBlock2d(nn.Module):
|
123 |
+
"""
|
124 |
+
Downsampling block for use in encoder.
|
125 |
+
"""
|
126 |
+
|
127 |
+
def __init__(self, in_features, out_features, kernel_size=3, padding=1, groups=1):
|
128 |
+
super(DownBlock2d, self).__init__()
|
129 |
+
self.conv = nn.Conv2d(in_channels=in_features, out_channels=out_features, kernel_size=kernel_size, padding=padding, groups=groups)
|
130 |
+
self.norm = nn.BatchNorm2d(out_features, affine=True)
|
131 |
+
self.pool = nn.AvgPool2d(kernel_size=(2, 2))
|
132 |
+
|
133 |
+
def forward(self, x):
|
134 |
+
out = self.conv(x)
|
135 |
+
out = self.norm(out)
|
136 |
+
out = F.relu(out)
|
137 |
+
out = self.pool(out)
|
138 |
+
return out
|
139 |
+
|
140 |
+
|
141 |
+
class DownBlock3d(nn.Module):
|
142 |
+
"""
|
143 |
+
Downsampling block for use in encoder.
|
144 |
+
"""
|
145 |
+
|
146 |
+
def __init__(self, in_features, out_features, kernel_size=3, padding=1, groups=1):
|
147 |
+
super(DownBlock3d, self).__init__()
|
148 |
+
'''
|
149 |
+
self.conv = nn.Conv3d(in_channels=in_features, out_channels=out_features, kernel_size=kernel_size,
|
150 |
+
padding=padding, groups=groups, stride=(1, 2, 2))
|
151 |
+
'''
|
152 |
+
self.conv = nn.Conv3d(in_channels=in_features, out_channels=out_features, kernel_size=kernel_size,
|
153 |
+
padding=padding, groups=groups)
|
154 |
+
self.norm = nn.BatchNorm3d(out_features, affine=True)
|
155 |
+
self.pool = nn.AvgPool3d(kernel_size=(1, 2, 2))
|
156 |
+
|
157 |
+
def forward(self, x):
|
158 |
+
out = self.conv(x)
|
159 |
+
out = self.norm(out)
|
160 |
+
out = F.relu(out)
|
161 |
+
out = self.pool(out)
|
162 |
+
return out
|
163 |
+
|
164 |
+
|
165 |
+
class SameBlock2d(nn.Module):
|
166 |
+
"""
|
167 |
+
Simple block, preserve spatial resolution.
|
168 |
+
"""
|
169 |
+
|
170 |
+
def __init__(self, in_features, out_features, groups=1, kernel_size=3, padding=1, lrelu=False):
|
171 |
+
super(SameBlock2d, self).__init__()
|
172 |
+
self.conv = nn.Conv2d(in_channels=in_features, out_channels=out_features, kernel_size=kernel_size, padding=padding, groups=groups)
|
173 |
+
self.norm = nn.BatchNorm2d(out_features, affine=True)
|
174 |
+
if lrelu:
|
175 |
+
self.ac = nn.LeakyReLU()
|
176 |
+
else:
|
177 |
+
self.ac = nn.ReLU()
|
178 |
+
|
179 |
+
def forward(self, x):
|
180 |
+
out = self.conv(x)
|
181 |
+
out = self.norm(out)
|
182 |
+
out = self.ac(out)
|
183 |
+
return out
|
184 |
+
|
185 |
+
|
186 |
+
class Encoder(nn.Module):
|
187 |
+
"""
|
188 |
+
Hourglass Encoder
|
189 |
+
"""
|
190 |
+
|
191 |
+
def __init__(self, block_expansion, in_features, num_blocks=3, max_features=256):
|
192 |
+
super(Encoder, self).__init__()
|
193 |
+
|
194 |
+
down_blocks = []
|
195 |
+
for i in range(num_blocks):
|
196 |
+
down_blocks.append(DownBlock3d(in_features if i == 0 else min(max_features, block_expansion * (2 ** i)), min(max_features, block_expansion * (2 ** (i + 1))), kernel_size=3, padding=1))
|
197 |
+
self.down_blocks = nn.ModuleList(down_blocks)
|
198 |
+
|
199 |
+
def forward(self, x):
|
200 |
+
outs = [x]
|
201 |
+
for down_block in self.down_blocks:
|
202 |
+
outs.append(down_block(outs[-1]))
|
203 |
+
return outs
|
204 |
+
|
205 |
+
|
206 |
+
class Decoder(nn.Module):
|
207 |
+
"""
|
208 |
+
Hourglass Decoder
|
209 |
+
"""
|
210 |
+
|
211 |
+
def __init__(self, block_expansion, in_features, num_blocks=3, max_features=256):
|
212 |
+
super(Decoder, self).__init__()
|
213 |
+
|
214 |
+
up_blocks = []
|
215 |
+
|
216 |
+
for i in range(num_blocks)[::-1]:
|
217 |
+
in_filters = (1 if i == num_blocks - 1 else 2) * min(max_features, block_expansion * (2 ** (i + 1)))
|
218 |
+
out_filters = min(max_features, block_expansion * (2 ** i))
|
219 |
+
up_blocks.append(UpBlock3d(in_filters, out_filters, kernel_size=3, padding=1))
|
220 |
+
|
221 |
+
self.up_blocks = nn.ModuleList(up_blocks)
|
222 |
+
self.out_filters = block_expansion + in_features
|
223 |
+
|
224 |
+
self.conv = nn.Conv3d(in_channels=self.out_filters, out_channels=self.out_filters, kernel_size=3, padding=1)
|
225 |
+
self.norm = nn.BatchNorm3d(self.out_filters, affine=True)
|
226 |
+
|
227 |
+
def forward(self, x):
|
228 |
+
out = x.pop()
|
229 |
+
for up_block in self.up_blocks:
|
230 |
+
out = up_block(out)
|
231 |
+
skip = x.pop()
|
232 |
+
out = torch.cat([out, skip], dim=1)
|
233 |
+
out = self.conv(out)
|
234 |
+
out = self.norm(out)
|
235 |
+
out = F.relu(out)
|
236 |
+
return out
|
237 |
+
|
238 |
+
|
239 |
+
class Hourglass(nn.Module):
|
240 |
+
"""
|
241 |
+
Hourglass architecture.
|
242 |
+
"""
|
243 |
+
|
244 |
+
def __init__(self, block_expansion, in_features, num_blocks=3, max_features=256):
|
245 |
+
super(Hourglass, self).__init__()
|
246 |
+
self.encoder = Encoder(block_expansion, in_features, num_blocks, max_features)
|
247 |
+
self.decoder = Decoder(block_expansion, in_features, num_blocks, max_features)
|
248 |
+
self.out_filters = self.decoder.out_filters
|
249 |
+
|
250 |
+
def forward(self, x):
|
251 |
+
return self.decoder(self.encoder(x))
|
252 |
+
|
253 |
+
|
254 |
+
class SPADE(nn.Module):
|
255 |
+
def __init__(self, norm_nc, label_nc):
|
256 |
+
super().__init__()
|
257 |
+
|
258 |
+
self.param_free_norm = nn.InstanceNorm2d(norm_nc, affine=False)
|
259 |
+
nhidden = 128
|
260 |
+
|
261 |
+
self.mlp_shared = nn.Sequential(
|
262 |
+
nn.Conv2d(label_nc, nhidden, kernel_size=3, padding=1),
|
263 |
+
nn.ReLU())
|
264 |
+
self.mlp_gamma = nn.Conv2d(nhidden, norm_nc, kernel_size=3, padding=1)
|
265 |
+
self.mlp_beta = nn.Conv2d(nhidden, norm_nc, kernel_size=3, padding=1)
|
266 |
+
|
267 |
+
def forward(self, x, segmap):
|
268 |
+
normalized = self.param_free_norm(x)
|
269 |
+
segmap = F.interpolate(segmap, size=x.size()[2:], mode='nearest')
|
270 |
+
actv = self.mlp_shared(segmap)
|
271 |
+
gamma = self.mlp_gamma(actv)
|
272 |
+
beta = self.mlp_beta(actv)
|
273 |
+
out = normalized * (1 + gamma) + beta
|
274 |
+
return out
|
275 |
+
|
276 |
+
|
277 |
+
class SPADEResnetBlock(nn.Module):
|
278 |
+
def __init__(self, fin, fout, norm_G, label_nc, use_se=False, dilation=1):
|
279 |
+
super().__init__()
|
280 |
+
# Attributes
|
281 |
+
self.learned_shortcut = (fin != fout)
|
282 |
+
fmiddle = min(fin, fout)
|
283 |
+
self.use_se = use_se
|
284 |
+
# create conv layers
|
285 |
+
self.conv_0 = nn.Conv2d(fin, fmiddle, kernel_size=3, padding=dilation, dilation=dilation)
|
286 |
+
self.conv_1 = nn.Conv2d(fmiddle, fout, kernel_size=3, padding=dilation, dilation=dilation)
|
287 |
+
if self.learned_shortcut:
|
288 |
+
self.conv_s = nn.Conv2d(fin, fout, kernel_size=1, bias=False)
|
289 |
+
# apply spectral norm if specified
|
290 |
+
if 'spectral' in norm_G:
|
291 |
+
self.conv_0 = spectral_norm(self.conv_0)
|
292 |
+
self.conv_1 = spectral_norm(self.conv_1)
|
293 |
+
if self.learned_shortcut:
|
294 |
+
self.conv_s = spectral_norm(self.conv_s)
|
295 |
+
# define normalization layers
|
296 |
+
self.norm_0 = SPADE(fin, label_nc)
|
297 |
+
self.norm_1 = SPADE(fmiddle, label_nc)
|
298 |
+
if self.learned_shortcut:
|
299 |
+
self.norm_s = SPADE(fin, label_nc)
|
300 |
+
|
301 |
+
def forward(self, x, seg1):
|
302 |
+
x_s = self.shortcut(x, seg1)
|
303 |
+
dx = self.conv_0(self.actvn(self.norm_0(x, seg1)))
|
304 |
+
dx = self.conv_1(self.actvn(self.norm_1(dx, seg1)))
|
305 |
+
out = x_s + dx
|
306 |
+
return out
|
307 |
+
|
308 |
+
def shortcut(self, x, seg1):
|
309 |
+
if self.learned_shortcut:
|
310 |
+
x_s = self.conv_s(self.norm_s(x, seg1))
|
311 |
+
else:
|
312 |
+
x_s = x
|
313 |
+
return x_s
|
314 |
+
|
315 |
+
def actvn(self, x):
|
316 |
+
return F.leaky_relu(x, 2e-1)
|
317 |
+
|
318 |
+
|
319 |
+
def filter_state_dict(state_dict, remove_name='fc'):
|
320 |
+
new_state_dict = {}
|
321 |
+
for key in state_dict:
|
322 |
+
if remove_name in key:
|
323 |
+
continue
|
324 |
+
new_state_dict[key] = state_dict[key]
|
325 |
+
return new_state_dict
|
326 |
+
|
327 |
+
|
328 |
+
class GRN(nn.Module):
|
329 |
+
""" GRN (Global Response Normalization) layer
|
330 |
+
"""
|
331 |
+
|
332 |
+
def __init__(self, dim):
|
333 |
+
super().__init__()
|
334 |
+
self.gamma = nn.Parameter(torch.zeros(1, 1, 1, dim))
|
335 |
+
self.beta = nn.Parameter(torch.zeros(1, 1, 1, dim))
|
336 |
+
|
337 |
+
def forward(self, x):
|
338 |
+
Gx = torch.norm(x, p=2, dim=(1, 2), keepdim=True)
|
339 |
+
Nx = Gx / (Gx.mean(dim=-1, keepdim=True) + 1e-6)
|
340 |
+
return self.gamma * (x * Nx) + self.beta + x
|
341 |
+
|
342 |
+
|
343 |
+
class LayerNorm(nn.Module):
|
344 |
+
r""" LayerNorm that supports two data formats: channels_last (default) or channels_first.
|
345 |
+
The ordering of the dimensions in the inputs. channels_last corresponds to inputs with
|
346 |
+
shape (batch_size, height, width, channels) while channels_first corresponds to inputs
|
347 |
+
with shape (batch_size, channels, height, width).
|
348 |
+
"""
|
349 |
+
|
350 |
+
def __init__(self, normalized_shape, eps=1e-6, data_format="channels_last"):
|
351 |
+
super().__init__()
|
352 |
+
self.weight = nn.Parameter(torch.ones(normalized_shape))
|
353 |
+
self.bias = nn.Parameter(torch.zeros(normalized_shape))
|
354 |
+
self.eps = eps
|
355 |
+
self.data_format = data_format
|
356 |
+
if self.data_format not in ["channels_last", "channels_first"]:
|
357 |
+
raise NotImplementedError
|
358 |
+
self.normalized_shape = (normalized_shape, )
|
359 |
+
|
360 |
+
def forward(self, x):
|
361 |
+
if self.data_format == "channels_last":
|
362 |
+
return F.layer_norm(x, self.normalized_shape, self.weight, self.bias, self.eps)
|
363 |
+
elif self.data_format == "channels_first":
|
364 |
+
u = x.mean(1, keepdim=True)
|
365 |
+
s = (x - u).pow(2).mean(1, keepdim=True)
|
366 |
+
x = (x - u) / torch.sqrt(s + self.eps)
|
367 |
+
x = self.weight[:, None, None] * x + self.bias[:, None, None]
|
368 |
+
return x
|
369 |
+
|
370 |
+
|
371 |
+
def _no_grad_trunc_normal_(tensor, mean, std, a, b):
|
372 |
+
# Cut & paste from PyTorch official master until it's in a few official releases - RW
|
373 |
+
# Method based on https://people.sc.fsu.edu/~jburkardt/presentations/truncated_normal.pdf
|
374 |
+
def norm_cdf(x):
|
375 |
+
# Computes standard normal cumulative distribution function
|
376 |
+
return (1. + math.erf(x / math.sqrt(2.))) / 2.
|
377 |
+
|
378 |
+
if (mean < a - 2 * std) or (mean > b + 2 * std):
|
379 |
+
warnings.warn("mean is more than 2 std from [a, b] in nn.init.trunc_normal_. "
|
380 |
+
"The distribution of values may be incorrect.",
|
381 |
+
stacklevel=2)
|
382 |
+
|
383 |
+
with torch.no_grad():
|
384 |
+
# Values are generated by using a truncated uniform distribution and
|
385 |
+
# then using the inverse CDF for the normal distribution.
|
386 |
+
# Get upper and lower cdf values
|
387 |
+
l = norm_cdf((a - mean) / std)
|
388 |
+
u = norm_cdf((b - mean) / std)
|
389 |
+
|
390 |
+
# Uniformly fill tensor with values from [l, u], then translate to
|
391 |
+
# [2l-1, 2u-1].
|
392 |
+
tensor.uniform_(2 * l - 1, 2 * u - 1)
|
393 |
+
|
394 |
+
# Use inverse cdf transform for normal distribution to get truncated
|
395 |
+
# standard normal
|
396 |
+
tensor.erfinv_()
|
397 |
+
|
398 |
+
# Transform to proper mean, std
|
399 |
+
tensor.mul_(std * math.sqrt(2.))
|
400 |
+
tensor.add_(mean)
|
401 |
+
|
402 |
+
# Clamp to ensure it's in the proper range
|
403 |
+
tensor.clamp_(min=a, max=b)
|
404 |
+
return tensor
|
405 |
+
|
406 |
+
|
407 |
+
def drop_path(x, drop_prob=0., training=False, scale_by_keep=True):
|
408 |
+
""" Drop paths (Stochastic Depth) per sample (when applied in main path of residual blocks).
|
409 |
+
|
410 |
+
This is the same as the DropConnect impl I created for EfficientNet, etc networks, however,
|
411 |
+
the original name is misleading as 'Drop Connect' is a different form of dropout in a separate paper...
|
412 |
+
See discussion: https://github.com/tensorflow/tpu/issues/494#issuecomment-532968956 ... I've opted for
|
413 |
+
changing the layer and argument names to 'drop path' rather than mix DropConnect as a layer name and use
|
414 |
+
'survival rate' as the argument.
|
415 |
+
|
416 |
+
"""
|
417 |
+
if drop_prob == 0. or not training:
|
418 |
+
return x
|
419 |
+
keep_prob = 1 - drop_prob
|
420 |
+
shape = (x.shape[0],) + (1,) * (x.ndim - 1) # work with diff dim tensors, not just 2D ConvNets
|
421 |
+
random_tensor = x.new_empty(shape).bernoulli_(keep_prob)
|
422 |
+
if keep_prob > 0.0 and scale_by_keep:
|
423 |
+
random_tensor.div_(keep_prob)
|
424 |
+
return x * random_tensor
|
425 |
+
|
426 |
+
|
427 |
+
class DropPath(nn.Module):
|
428 |
+
""" Drop paths (Stochastic Depth) per sample (when applied in main path of residual blocks).
|
429 |
+
"""
|
430 |
+
|
431 |
+
def __init__(self, drop_prob=None, scale_by_keep=True):
|
432 |
+
super(DropPath, self).__init__()
|
433 |
+
self.drop_prob = drop_prob
|
434 |
+
self.scale_by_keep = scale_by_keep
|
435 |
+
|
436 |
+
def forward(self, x):
|
437 |
+
return drop_path(x, self.drop_prob, self.training, self.scale_by_keep)
|
438 |
+
|
439 |
+
|
440 |
+
def trunc_normal_(tensor, mean=0., std=1., a=-2., b=2.):
|
441 |
+
return _no_grad_trunc_normal_(tensor, mean, std, a, b)
|
src/modules/warping_network.py
ADDED
@@ -0,0 +1,77 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# coding: utf-8
|
2 |
+
|
3 |
+
"""
|
4 |
+
Warping field estimator(W) defined in the paper, which generates a warping field using the implicit
|
5 |
+
keypoint representations x_s and x_d, and employs this flow field to warp the source feature volume f_s.
|
6 |
+
"""
|
7 |
+
|
8 |
+
from torch import nn
|
9 |
+
import torch.nn.functional as F
|
10 |
+
from .util import SameBlock2d
|
11 |
+
from .dense_motion import DenseMotionNetwork
|
12 |
+
|
13 |
+
|
14 |
+
class WarpingNetwork(nn.Module):
|
15 |
+
def __init__(
|
16 |
+
self,
|
17 |
+
num_kp,
|
18 |
+
block_expansion,
|
19 |
+
max_features,
|
20 |
+
num_down_blocks,
|
21 |
+
reshape_channel,
|
22 |
+
estimate_occlusion_map=False,
|
23 |
+
dense_motion_params=None,
|
24 |
+
**kwargs
|
25 |
+
):
|
26 |
+
super(WarpingNetwork, self).__init__()
|
27 |
+
|
28 |
+
self.upscale = kwargs.get('upscale', 1)
|
29 |
+
self.flag_use_occlusion_map = kwargs.get('flag_use_occlusion_map', True)
|
30 |
+
|
31 |
+
if dense_motion_params is not None:
|
32 |
+
self.dense_motion_network = DenseMotionNetwork(
|
33 |
+
num_kp=num_kp,
|
34 |
+
feature_channel=reshape_channel,
|
35 |
+
estimate_occlusion_map=estimate_occlusion_map,
|
36 |
+
**dense_motion_params
|
37 |
+
)
|
38 |
+
else:
|
39 |
+
self.dense_motion_network = None
|
40 |
+
|
41 |
+
self.third = SameBlock2d(max_features, block_expansion * (2 ** num_down_blocks), kernel_size=(3, 3), padding=(1, 1), lrelu=True)
|
42 |
+
self.fourth = nn.Conv2d(in_channels=block_expansion * (2 ** num_down_blocks), out_channels=block_expansion * (2 ** num_down_blocks), kernel_size=1, stride=1)
|
43 |
+
|
44 |
+
self.estimate_occlusion_map = estimate_occlusion_map
|
45 |
+
|
46 |
+
def deform_input(self, inp, deformation):
|
47 |
+
return F.grid_sample(inp, deformation, align_corners=False)
|
48 |
+
|
49 |
+
def forward(self, feature_3d, kp_driving, kp_source):
|
50 |
+
if self.dense_motion_network is not None:
|
51 |
+
# Feature warper, Transforming feature representation according to deformation and occlusion
|
52 |
+
dense_motion = self.dense_motion_network(
|
53 |
+
feature=feature_3d, kp_driving=kp_driving, kp_source=kp_source
|
54 |
+
)
|
55 |
+
if 'occlusion_map' in dense_motion:
|
56 |
+
occlusion_map = dense_motion['occlusion_map'] # Bx1x64x64
|
57 |
+
else:
|
58 |
+
occlusion_map = None
|
59 |
+
|
60 |
+
deformation = dense_motion['deformation'] # Bx16x64x64x3
|
61 |
+
out = self.deform_input(feature_3d, deformation) # Bx32x16x64x64
|
62 |
+
|
63 |
+
bs, c, d, h, w = out.shape # Bx32x16x64x64
|
64 |
+
out = out.view(bs, c * d, h, w) # -> Bx512x64x64
|
65 |
+
out = self.third(out) # -> Bx256x64x64
|
66 |
+
out = self.fourth(out) # -> Bx256x64x64
|
67 |
+
|
68 |
+
if self.flag_use_occlusion_map and (occlusion_map is not None):
|
69 |
+
out = out * occlusion_map
|
70 |
+
|
71 |
+
ret_dct = {
|
72 |
+
'occlusion_map': occlusion_map,
|
73 |
+
'deformation': deformation,
|
74 |
+
'out': out,
|
75 |
+
}
|
76 |
+
|
77 |
+
return ret_dct
|
src/utils/__init__.py
ADDED
File without changes
|
src/utils/camera.py
ADDED
@@ -0,0 +1,73 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# coding: utf-8
|
2 |
+
|
3 |
+
"""
|
4 |
+
functions for processing and transforming 3D facial keypoints
|
5 |
+
"""
|
6 |
+
|
7 |
+
import numpy as np
|
8 |
+
import torch
|
9 |
+
import torch.nn.functional as F
|
10 |
+
|
11 |
+
PI = np.pi
|
12 |
+
|
13 |
+
|
14 |
+
def headpose_pred_to_degree(pred):
|
15 |
+
"""
|
16 |
+
pred: (bs, 66) or (bs, 1) or others
|
17 |
+
"""
|
18 |
+
if pred.ndim > 1 and pred.shape[1] == 66:
|
19 |
+
# NOTE: note that the average is modified to 97.5
|
20 |
+
device = pred.device
|
21 |
+
idx_tensor = [idx for idx in range(0, 66)]
|
22 |
+
idx_tensor = torch.FloatTensor(idx_tensor).to(device)
|
23 |
+
pred = F.softmax(pred, dim=1)
|
24 |
+
degree = torch.sum(pred*idx_tensor, axis=1) * 3 - 97.5
|
25 |
+
|
26 |
+
return degree
|
27 |
+
|
28 |
+
return pred
|
29 |
+
|
30 |
+
|
31 |
+
def get_rotation_matrix(pitch_, yaw_, roll_):
|
32 |
+
""" the input is in degree
|
33 |
+
"""
|
34 |
+
# transform to radian
|
35 |
+
pitch = pitch_ / 180 * PI
|
36 |
+
yaw = yaw_ / 180 * PI
|
37 |
+
roll = roll_ / 180 * PI
|
38 |
+
|
39 |
+
device = pitch.device
|
40 |
+
|
41 |
+
if pitch.ndim == 1:
|
42 |
+
pitch = pitch.unsqueeze(1)
|
43 |
+
if yaw.ndim == 1:
|
44 |
+
yaw = yaw.unsqueeze(1)
|
45 |
+
if roll.ndim == 1:
|
46 |
+
roll = roll.unsqueeze(1)
|
47 |
+
|
48 |
+
# calculate the euler matrix
|
49 |
+
bs = pitch.shape[0]
|
50 |
+
ones = torch.ones([bs, 1]).to(device)
|
51 |
+
zeros = torch.zeros([bs, 1]).to(device)
|
52 |
+
x, y, z = pitch, yaw, roll
|
53 |
+
|
54 |
+
rot_x = torch.cat([
|
55 |
+
ones, zeros, zeros,
|
56 |
+
zeros, torch.cos(x), -torch.sin(x),
|
57 |
+
zeros, torch.sin(x), torch.cos(x)
|
58 |
+
], dim=1).reshape([bs, 3, 3])
|
59 |
+
|
60 |
+
rot_y = torch.cat([
|
61 |
+
torch.cos(y), zeros, torch.sin(y),
|
62 |
+
zeros, ones, zeros,
|
63 |
+
-torch.sin(y), zeros, torch.cos(y)
|
64 |
+
], dim=1).reshape([bs, 3, 3])
|
65 |
+
|
66 |
+
rot_z = torch.cat([
|
67 |
+
torch.cos(z), -torch.sin(z), zeros,
|
68 |
+
torch.sin(z), torch.cos(z), zeros,
|
69 |
+
zeros, zeros, ones
|
70 |
+
], dim=1).reshape([bs, 3, 3])
|
71 |
+
|
72 |
+
rot = rot_z @ rot_y @ rot_x
|
73 |
+
return rot.permute(0, 2, 1) # transpose
|
src/utils/crop.py
ADDED
@@ -0,0 +1,398 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# coding: utf-8
|
2 |
+
|
3 |
+
"""
|
4 |
+
cropping function and the related preprocess functions for cropping
|
5 |
+
"""
|
6 |
+
|
7 |
+
import numpy as np
|
8 |
+
import os.path as osp
|
9 |
+
from math import sin, cos, acos, degrees
|
10 |
+
import cv2; cv2.setNumThreads(0); cv2.ocl.setUseOpenCL(False) # NOTE: enforce single thread
|
11 |
+
from .rprint import rprint as print
|
12 |
+
|
13 |
+
DTYPE = np.float32
|
14 |
+
CV2_INTERP = cv2.INTER_LINEAR
|
15 |
+
|
16 |
+
def make_abs_path(fn):
|
17 |
+
return osp.join(osp.dirname(osp.realpath(__file__)), fn)
|
18 |
+
|
19 |
+
def _transform_img(img, M, dsize, flags=CV2_INTERP, borderMode=None):
|
20 |
+
""" conduct similarity or affine transformation to the image, do not do border operation!
|
21 |
+
img:
|
22 |
+
M: 2x3 matrix or 3x3 matrix
|
23 |
+
dsize: target shape (width, height)
|
24 |
+
"""
|
25 |
+
if isinstance(dsize, tuple) or isinstance(dsize, list):
|
26 |
+
_dsize = tuple(dsize)
|
27 |
+
else:
|
28 |
+
_dsize = (dsize, dsize)
|
29 |
+
|
30 |
+
if borderMode is not None:
|
31 |
+
return cv2.warpAffine(img, M[:2, :], dsize=_dsize, flags=flags, borderMode=borderMode, borderValue=(0, 0, 0))
|
32 |
+
else:
|
33 |
+
return cv2.warpAffine(img, M[:2, :], dsize=_dsize, flags=flags)
|
34 |
+
|
35 |
+
|
36 |
+
def _transform_pts(pts, M):
|
37 |
+
""" conduct similarity or affine transformation to the pts
|
38 |
+
pts: Nx2 ndarray
|
39 |
+
M: 2x3 matrix or 3x3 matrix
|
40 |
+
return: Nx2
|
41 |
+
"""
|
42 |
+
return pts @ M[:2, :2].T + M[:2, 2]
|
43 |
+
|
44 |
+
|
45 |
+
def parse_pt2_from_pt101(pt101, use_lip=True):
|
46 |
+
"""
|
47 |
+
parsing the 2 points according to the 101 points, which cancels the roll
|
48 |
+
"""
|
49 |
+
# the former version use the eye center, but it is not robust, now use interpolation
|
50 |
+
pt_left_eye = np.mean(pt101[[39, 42, 45, 48]], axis=0) # left eye center
|
51 |
+
pt_right_eye = np.mean(pt101[[51, 54, 57, 60]], axis=0) # right eye center
|
52 |
+
|
53 |
+
if use_lip:
|
54 |
+
# use lip
|
55 |
+
pt_center_eye = (pt_left_eye + pt_right_eye) / 2
|
56 |
+
pt_center_lip = (pt101[75] + pt101[81]) / 2
|
57 |
+
pt2 = np.stack([pt_center_eye, pt_center_lip], axis=0)
|
58 |
+
else:
|
59 |
+
pt2 = np.stack([pt_left_eye, pt_right_eye], axis=0)
|
60 |
+
return pt2
|
61 |
+
|
62 |
+
|
63 |
+
def parse_pt2_from_pt106(pt106, use_lip=True):
|
64 |
+
"""
|
65 |
+
parsing the 2 points according to the 106 points, which cancels the roll
|
66 |
+
"""
|
67 |
+
pt_left_eye = np.mean(pt106[[33, 35, 40, 39]], axis=0) # left eye center
|
68 |
+
pt_right_eye = np.mean(pt106[[87, 89, 94, 93]], axis=0) # right eye center
|
69 |
+
|
70 |
+
if use_lip:
|
71 |
+
# use lip
|
72 |
+
pt_center_eye = (pt_left_eye + pt_right_eye) / 2
|
73 |
+
pt_center_lip = (pt106[52] + pt106[61]) / 2
|
74 |
+
pt2 = np.stack([pt_center_eye, pt_center_lip], axis=0)
|
75 |
+
else:
|
76 |
+
pt2 = np.stack([pt_left_eye, pt_right_eye], axis=0)
|
77 |
+
return pt2
|
78 |
+
|
79 |
+
|
80 |
+
def parse_pt2_from_pt203(pt203, use_lip=True):
|
81 |
+
"""
|
82 |
+
parsing the 2 points according to the 203 points, which cancels the roll
|
83 |
+
"""
|
84 |
+
pt_left_eye = np.mean(pt203[[0, 6, 12, 18]], axis=0) # left eye center
|
85 |
+
pt_right_eye = np.mean(pt203[[24, 30, 36, 42]], axis=0) # right eye center
|
86 |
+
if use_lip:
|
87 |
+
# use lip
|
88 |
+
pt_center_eye = (pt_left_eye + pt_right_eye) / 2
|
89 |
+
pt_center_lip = (pt203[48] + pt203[66]) / 2
|
90 |
+
pt2 = np.stack([pt_center_eye, pt_center_lip], axis=0)
|
91 |
+
else:
|
92 |
+
pt2 = np.stack([pt_left_eye, pt_right_eye], axis=0)
|
93 |
+
return pt2
|
94 |
+
|
95 |
+
|
96 |
+
def parse_pt2_from_pt68(pt68, use_lip=True):
|
97 |
+
"""
|
98 |
+
parsing the 2 points according to the 68 points, which cancels the roll
|
99 |
+
"""
|
100 |
+
lm_idx = np.array([31, 37, 40, 43, 46, 49, 55], dtype=np.int32) - 1
|
101 |
+
if use_lip:
|
102 |
+
pt5 = np.stack([
|
103 |
+
np.mean(pt68[lm_idx[[1, 2]], :], 0), # left eye
|
104 |
+
np.mean(pt68[lm_idx[[3, 4]], :], 0), # right eye
|
105 |
+
pt68[lm_idx[0], :], # nose
|
106 |
+
pt68[lm_idx[5], :], # lip
|
107 |
+
pt68[lm_idx[6], :] # lip
|
108 |
+
], axis=0)
|
109 |
+
|
110 |
+
pt2 = np.stack([
|
111 |
+
(pt5[0] + pt5[1]) / 2,
|
112 |
+
(pt5[3] + pt5[4]) / 2
|
113 |
+
], axis=0)
|
114 |
+
else:
|
115 |
+
pt2 = np.stack([
|
116 |
+
np.mean(pt68[lm_idx[[1, 2]], :], 0), # left eye
|
117 |
+
np.mean(pt68[lm_idx[[3, 4]], :], 0), # right eye
|
118 |
+
], axis=0)
|
119 |
+
|
120 |
+
return pt2
|
121 |
+
|
122 |
+
|
123 |
+
def parse_pt2_from_pt5(pt5, use_lip=True):
|
124 |
+
"""
|
125 |
+
parsing the 2 points according to the 5 points, which cancels the roll
|
126 |
+
"""
|
127 |
+
if use_lip:
|
128 |
+
pt2 = np.stack([
|
129 |
+
(pt5[0] + pt5[1]) / 2,
|
130 |
+
(pt5[3] + pt5[4]) / 2
|
131 |
+
], axis=0)
|
132 |
+
else:
|
133 |
+
pt2 = np.stack([
|
134 |
+
pt5[0],
|
135 |
+
pt5[1]
|
136 |
+
], axis=0)
|
137 |
+
return pt2
|
138 |
+
|
139 |
+
|
140 |
+
def parse_pt2_from_pt_x(pts, use_lip=True):
|
141 |
+
if pts.shape[0] == 101:
|
142 |
+
pt2 = parse_pt2_from_pt101(pts, use_lip=use_lip)
|
143 |
+
elif pts.shape[0] == 106:
|
144 |
+
pt2 = parse_pt2_from_pt106(pts, use_lip=use_lip)
|
145 |
+
elif pts.shape[0] == 68:
|
146 |
+
pt2 = parse_pt2_from_pt68(pts, use_lip=use_lip)
|
147 |
+
elif pts.shape[0] == 5:
|
148 |
+
pt2 = parse_pt2_from_pt5(pts, use_lip=use_lip)
|
149 |
+
elif pts.shape[0] == 203:
|
150 |
+
pt2 = parse_pt2_from_pt203(pts, use_lip=use_lip)
|
151 |
+
elif pts.shape[0] > 101:
|
152 |
+
# take the first 101 points
|
153 |
+
pt2 = parse_pt2_from_pt101(pts[:101], use_lip=use_lip)
|
154 |
+
else:
|
155 |
+
raise Exception(f'Unknow shape: {pts.shape}')
|
156 |
+
|
157 |
+
if not use_lip:
|
158 |
+
# NOTE: to compile with the latter code, need to rotate the pt2 90 degrees clockwise manually
|
159 |
+
v = pt2[1] - pt2[0]
|
160 |
+
pt2[1, 0] = pt2[0, 0] - v[1]
|
161 |
+
pt2[1, 1] = pt2[0, 1] + v[0]
|
162 |
+
|
163 |
+
return pt2
|
164 |
+
|
165 |
+
|
166 |
+
def parse_rect_from_landmark(
|
167 |
+
pts,
|
168 |
+
scale=1.5,
|
169 |
+
need_square=True,
|
170 |
+
vx_ratio=0,
|
171 |
+
vy_ratio=0,
|
172 |
+
use_deg_flag=False,
|
173 |
+
**kwargs
|
174 |
+
):
|
175 |
+
"""parsing center, size, angle from 101/68/5/x landmarks
|
176 |
+
vx_ratio: the offset ratio along the pupil axis x-axis, multiplied by size
|
177 |
+
vy_ratio: the offset ratio along the pupil axis y-axis, multiplied by size, which is used to contain more forehead area
|
178 |
+
|
179 |
+
judge with pts.shape
|
180 |
+
"""
|
181 |
+
pt2 = parse_pt2_from_pt_x(pts, use_lip=kwargs.get('use_lip', True))
|
182 |
+
|
183 |
+
uy = pt2[1] - pt2[0]
|
184 |
+
l = np.linalg.norm(uy)
|
185 |
+
if l <= 1e-3:
|
186 |
+
uy = np.array([0, 1], dtype=DTYPE)
|
187 |
+
else:
|
188 |
+
uy /= l
|
189 |
+
ux = np.array((uy[1], -uy[0]), dtype=DTYPE)
|
190 |
+
|
191 |
+
# the rotation degree of the x-axis, the clockwise is positive, the counterclockwise is negative (image coordinate system)
|
192 |
+
# print(uy)
|
193 |
+
# print(ux)
|
194 |
+
angle = acos(ux[0])
|
195 |
+
if ux[1] < 0:
|
196 |
+
angle = -angle
|
197 |
+
|
198 |
+
# rotation matrix
|
199 |
+
M = np.array([ux, uy])
|
200 |
+
|
201 |
+
# calculate the size which contains the angle degree of the bbox, and the center
|
202 |
+
center0 = np.mean(pts, axis=0)
|
203 |
+
rpts = (pts - center0) @ M.T # (M @ P.T).T = P @ M.T
|
204 |
+
lt_pt = np.min(rpts, axis=0)
|
205 |
+
rb_pt = np.max(rpts, axis=0)
|
206 |
+
center1 = (lt_pt + rb_pt) / 2
|
207 |
+
|
208 |
+
size = rb_pt - lt_pt
|
209 |
+
if need_square:
|
210 |
+
m = max(size[0], size[1])
|
211 |
+
size[0] = m
|
212 |
+
size[1] = m
|
213 |
+
|
214 |
+
size *= scale # scale size
|
215 |
+
center = center0 + ux * center1[0] + uy * center1[1] # counterclockwise rotation, equivalent to M.T @ center1.T
|
216 |
+
center = center + ux * (vx_ratio * size) + uy * \
|
217 |
+
(vy_ratio * size) # considering the offset in vx and vy direction
|
218 |
+
|
219 |
+
if use_deg_flag:
|
220 |
+
angle = degrees(angle)
|
221 |
+
|
222 |
+
return center, size, angle
|
223 |
+
|
224 |
+
|
225 |
+
def parse_bbox_from_landmark(pts, **kwargs):
|
226 |
+
center, size, angle = parse_rect_from_landmark(pts, **kwargs)
|
227 |
+
cx, cy = center
|
228 |
+
w, h = size
|
229 |
+
|
230 |
+
# calculate the vertex positions before rotation
|
231 |
+
bbox = np.array([
|
232 |
+
[cx-w/2, cy-h/2], # left, top
|
233 |
+
[cx+w/2, cy-h/2],
|
234 |
+
[cx+w/2, cy+h/2], # right, bottom
|
235 |
+
[cx-w/2, cy+h/2]
|
236 |
+
], dtype=DTYPE)
|
237 |
+
|
238 |
+
# construct rotation matrix
|
239 |
+
bbox_rot = bbox.copy()
|
240 |
+
R = np.array([
|
241 |
+
[np.cos(angle), -np.sin(angle)],
|
242 |
+
[np.sin(angle), np.cos(angle)]
|
243 |
+
], dtype=DTYPE)
|
244 |
+
|
245 |
+
# calculate the relative position of each vertex from the rotation center, then rotate these positions, and finally add the coordinates of the rotation center
|
246 |
+
bbox_rot = (bbox_rot - center) @ R.T + center
|
247 |
+
|
248 |
+
return {
|
249 |
+
'center': center, # 2x1
|
250 |
+
'size': size, # scalar
|
251 |
+
'angle': angle, # rad, counterclockwise
|
252 |
+
'bbox': bbox, # 4x2
|
253 |
+
'bbox_rot': bbox_rot, # 4x2
|
254 |
+
}
|
255 |
+
|
256 |
+
|
257 |
+
def crop_image_by_bbox(img, bbox, lmk=None, dsize=512, angle=None, flag_rot=False, **kwargs):
|
258 |
+
left, top, right, bot = bbox
|
259 |
+
if int(right - left) != int(bot - top):
|
260 |
+
print(f'right-left {right-left} != bot-top {bot-top}')
|
261 |
+
size = right - left
|
262 |
+
|
263 |
+
src_center = np.array([(left + right) / 2, (top + bot) / 2], dtype=DTYPE)
|
264 |
+
tgt_center = np.array([dsize / 2, dsize / 2], dtype=DTYPE)
|
265 |
+
|
266 |
+
s = dsize / size # scale
|
267 |
+
if flag_rot and angle is not None:
|
268 |
+
costheta, sintheta = cos(angle), sin(angle)
|
269 |
+
cx, cy = src_center[0], src_center[1] # ori center
|
270 |
+
tcx, tcy = tgt_center[0], tgt_center[1] # target center
|
271 |
+
# need to infer
|
272 |
+
M_o2c = np.array(
|
273 |
+
[[s * costheta, s * sintheta, tcx - s * (costheta * cx + sintheta * cy)],
|
274 |
+
[-s * sintheta, s * costheta, tcy - s * (-sintheta * cx + costheta * cy)]],
|
275 |
+
dtype=DTYPE
|
276 |
+
)
|
277 |
+
else:
|
278 |
+
M_o2c = np.array(
|
279 |
+
[[s, 0, tgt_center[0] - s * src_center[0]],
|
280 |
+
[0, s, tgt_center[1] - s * src_center[1]]],
|
281 |
+
dtype=DTYPE
|
282 |
+
)
|
283 |
+
|
284 |
+
# if flag_rot and angle is None:
|
285 |
+
# print('angle is None, but flag_rotate is True', style="bold yellow")
|
286 |
+
|
287 |
+
img_crop = _transform_img(img, M_o2c, dsize=dsize, borderMode=kwargs.get('borderMode', None))
|
288 |
+
lmk_crop = _transform_pts(lmk, M_o2c) if lmk is not None else None
|
289 |
+
|
290 |
+
M_o2c = np.vstack([M_o2c, np.array([0, 0, 1], dtype=DTYPE)])
|
291 |
+
M_c2o = np.linalg.inv(M_o2c)
|
292 |
+
|
293 |
+
# cv2.imwrite('crop.jpg', img_crop)
|
294 |
+
|
295 |
+
return {
|
296 |
+
'img_crop': img_crop,
|
297 |
+
'lmk_crop': lmk_crop,
|
298 |
+
'M_o2c': M_o2c,
|
299 |
+
'M_c2o': M_c2o,
|
300 |
+
}
|
301 |
+
|
302 |
+
|
303 |
+
def _estimate_similar_transform_from_pts(
|
304 |
+
pts,
|
305 |
+
dsize,
|
306 |
+
scale=1.5,
|
307 |
+
vx_ratio=0,
|
308 |
+
vy_ratio=-0.1,
|
309 |
+
flag_do_rot=True,
|
310 |
+
**kwargs
|
311 |
+
):
|
312 |
+
""" calculate the affine matrix of the cropped image from sparse points, the original image to the cropped image, the inverse is the cropped image to the original image
|
313 |
+
pts: landmark, 101 or 68 points or other points, Nx2
|
314 |
+
scale: the larger scale factor, the smaller face ratio
|
315 |
+
vx_ratio: x shift
|
316 |
+
vy_ratio: y shift, the smaller the y shift, the lower the face region
|
317 |
+
rot_flag: if it is true, conduct correction
|
318 |
+
"""
|
319 |
+
center, size, angle = parse_rect_from_landmark(
|
320 |
+
pts, scale=scale, vx_ratio=vx_ratio, vy_ratio=vy_ratio,
|
321 |
+
use_lip=kwargs.get('use_lip', True)
|
322 |
+
)
|
323 |
+
|
324 |
+
s = dsize / size[0] # scale
|
325 |
+
tgt_center = np.array([dsize / 2, dsize / 2], dtype=DTYPE) # center of dsize
|
326 |
+
|
327 |
+
if flag_do_rot:
|
328 |
+
costheta, sintheta = cos(angle), sin(angle)
|
329 |
+
cx, cy = center[0], center[1] # ori center
|
330 |
+
tcx, tcy = tgt_center[0], tgt_center[1] # target center
|
331 |
+
# need to infer
|
332 |
+
M_INV = np.array(
|
333 |
+
[[s * costheta, s * sintheta, tcx - s * (costheta * cx + sintheta * cy)],
|
334 |
+
[-s * sintheta, s * costheta, tcy - s * (-sintheta * cx + costheta * cy)]],
|
335 |
+
dtype=DTYPE
|
336 |
+
)
|
337 |
+
else:
|
338 |
+
M_INV = np.array(
|
339 |
+
[[s, 0, tgt_center[0] - s * center[0]],
|
340 |
+
[0, s, tgt_center[1] - s * center[1]]],
|
341 |
+
dtype=DTYPE
|
342 |
+
)
|
343 |
+
|
344 |
+
M_INV_H = np.vstack([M_INV, np.array([0, 0, 1])])
|
345 |
+
M = np.linalg.inv(M_INV_H)
|
346 |
+
|
347 |
+
# M_INV is from the original image to the cropped image, M is from the cropped image to the original image
|
348 |
+
return M_INV, M[:2, ...]
|
349 |
+
|
350 |
+
|
351 |
+
def crop_image(img, pts: np.ndarray, **kwargs):
|
352 |
+
dsize = kwargs.get('dsize', 224)
|
353 |
+
scale = kwargs.get('scale', 1.5) # 1.5 | 1.6
|
354 |
+
vy_ratio = kwargs.get('vy_ratio', -0.1) # -0.0625 | -0.1
|
355 |
+
|
356 |
+
M_INV, _ = _estimate_similar_transform_from_pts(
|
357 |
+
pts,
|
358 |
+
dsize=dsize,
|
359 |
+
scale=scale,
|
360 |
+
vy_ratio=vy_ratio,
|
361 |
+
flag_do_rot=kwargs.get('flag_do_rot', True),
|
362 |
+
)
|
363 |
+
|
364 |
+
img_crop = _transform_img(img, M_INV, dsize) # origin to crop
|
365 |
+
pt_crop = _transform_pts(pts, M_INV)
|
366 |
+
|
367 |
+
M_o2c = np.vstack([M_INV, np.array([0, 0, 1], dtype=DTYPE)])
|
368 |
+
M_c2o = np.linalg.inv(M_o2c)
|
369 |
+
|
370 |
+
ret_dct = {
|
371 |
+
'M_o2c': M_o2c, # from the original image to the cropped image 3x3
|
372 |
+
'M_c2o': M_c2o, # from the cropped image to the original image 3x3
|
373 |
+
'img_crop': img_crop, # the cropped image
|
374 |
+
'pt_crop': pt_crop, # the landmarks of the cropped image
|
375 |
+
}
|
376 |
+
|
377 |
+
return ret_dct
|
378 |
+
|
379 |
+
def average_bbox_lst(bbox_lst):
|
380 |
+
if len(bbox_lst) == 0:
|
381 |
+
return None
|
382 |
+
bbox_arr = np.array(bbox_lst)
|
383 |
+
return np.mean(bbox_arr, axis=0).tolist()
|
384 |
+
|
385 |
+
def prepare_paste_back(mask_crop, crop_M_c2o, dsize):
|
386 |
+
"""prepare mask for later image paste back
|
387 |
+
"""
|
388 |
+
mask_ori = _transform_img(mask_crop, crop_M_c2o, dsize)
|
389 |
+
mask_ori = mask_ori.astype(np.float32) / 255.
|
390 |
+
return mask_ori
|
391 |
+
|
392 |
+
def paste_back(img_crop, M_c2o, img_ori, mask_ori):
|
393 |
+
"""paste back the image
|
394 |
+
"""
|
395 |
+
dsize = (img_ori.shape[1], img_ori.shape[0])
|
396 |
+
result = _transform_img(img_crop, M_c2o, dsize=dsize)
|
397 |
+
result = np.clip(mask_ori * result + (1 - mask_ori) * img_ori, 0, 255).astype(np.uint8)
|
398 |
+
return result
|
src/utils/cropper.py
ADDED
@@ -0,0 +1,196 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# coding: utf-8
|
2 |
+
|
3 |
+
import os.path as osp
|
4 |
+
from dataclasses import dataclass, field
|
5 |
+
from typing import List, Tuple, Union
|
6 |
+
|
7 |
+
import cv2; cv2.setNumThreads(0); cv2.ocl.setUseOpenCL(False)
|
8 |
+
import numpy as np
|
9 |
+
|
10 |
+
from ..config.crop_config import CropConfig
|
11 |
+
from .crop import (
|
12 |
+
average_bbox_lst,
|
13 |
+
crop_image,
|
14 |
+
crop_image_by_bbox,
|
15 |
+
parse_bbox_from_landmark,
|
16 |
+
)
|
17 |
+
from .io import contiguous
|
18 |
+
from .rprint import rlog as log
|
19 |
+
from .face_analysis_diy import FaceAnalysisDIY
|
20 |
+
from .landmark_runner import LandmarkRunner
|
21 |
+
|
22 |
+
|
23 |
+
def make_abs_path(fn):
|
24 |
+
return osp.join(osp.dirname(osp.realpath(__file__)), fn)
|
25 |
+
|
26 |
+
|
27 |
+
@dataclass
|
28 |
+
class Trajectory:
|
29 |
+
start: int = -1 # start frame
|
30 |
+
end: int = -1 # end frame
|
31 |
+
lmk_lst: Union[Tuple, List, np.ndarray] = field(default_factory=list) # lmk list
|
32 |
+
bbox_lst: Union[Tuple, List, np.ndarray] = field(default_factory=list) # bbox list
|
33 |
+
|
34 |
+
frame_rgb_lst: Union[Tuple, List, np.ndarray] = field(default_factory=list) # frame list
|
35 |
+
lmk_crop_lst: Union[Tuple, List, np.ndarray] = field(default_factory=list) # lmk list
|
36 |
+
frame_rgb_crop_lst: Union[Tuple, List, np.ndarray] = field(default_factory=list) # frame crop list
|
37 |
+
|
38 |
+
|
39 |
+
class Cropper(object):
|
40 |
+
def __init__(self, **kwargs) -> None:
|
41 |
+
self.crop_cfg: CropConfig = kwargs.get("crop_cfg", None)
|
42 |
+
device_id = kwargs.get("device_id", 0)
|
43 |
+
flag_force_cpu = kwargs.get("flag_force_cpu", False)
|
44 |
+
if flag_force_cpu:
|
45 |
+
device = "cpu"
|
46 |
+
face_analysis_wrapper_provicer = ["CPUExecutionProvider"]
|
47 |
+
else:
|
48 |
+
device = "cuda"
|
49 |
+
face_analysis_wrapper_provicer = ["CUDAExecutionProvider"]
|
50 |
+
self.landmark_runner = LandmarkRunner(
|
51 |
+
ckpt_path=make_abs_path(self.crop_cfg.landmark_ckpt_path),
|
52 |
+
onnx_provider=device,
|
53 |
+
device_id=device_id,
|
54 |
+
)
|
55 |
+
self.landmark_runner.warmup()
|
56 |
+
|
57 |
+
self.face_analysis_wrapper = FaceAnalysisDIY(
|
58 |
+
name="buffalo_l",
|
59 |
+
root=make_abs_path(self.crop_cfg.insightface_root),
|
60 |
+
providers=face_analysis_wrapper_provicer,
|
61 |
+
)
|
62 |
+
self.face_analysis_wrapper.prepare(ctx_id=device_id, det_size=(512, 512))
|
63 |
+
self.face_analysis_wrapper.warmup()
|
64 |
+
|
65 |
+
def update_config(self, user_args):
|
66 |
+
for k, v in user_args.items():
|
67 |
+
if hasattr(self.crop_cfg, k):
|
68 |
+
setattr(self.crop_cfg, k, v)
|
69 |
+
|
70 |
+
def crop_source_image(self, img_rgb_: np.ndarray, crop_cfg: CropConfig):
|
71 |
+
# crop a source image and get neccessary information
|
72 |
+
img_rgb = img_rgb_.copy() # copy it
|
73 |
+
|
74 |
+
img_bgr = cv2.cvtColor(img_rgb, cv2.COLOR_RGB2BGR)
|
75 |
+
src_face = self.face_analysis_wrapper.get(
|
76 |
+
img_bgr,
|
77 |
+
flag_do_landmark_2d_106=True,
|
78 |
+
direction=crop_cfg.direction,
|
79 |
+
max_face_num=crop_cfg.max_face_num,
|
80 |
+
)
|
81 |
+
|
82 |
+
if len(src_face) == 0:
|
83 |
+
log("No face detected in the source image.")
|
84 |
+
return None
|
85 |
+
elif len(src_face) > 1:
|
86 |
+
log(f"More than one face detected in the image, only pick one face by rule {crop_cfg.direction}.")
|
87 |
+
|
88 |
+
# NOTE: temporarily only pick the first face, to support multiple face in the future
|
89 |
+
src_face = src_face[0]
|
90 |
+
lmk = src_face.landmark_2d_106 # this is the 106 landmarks from insightface
|
91 |
+
|
92 |
+
# crop the face
|
93 |
+
ret_dct = crop_image(
|
94 |
+
img_rgb, # ndarray
|
95 |
+
lmk, # 106x2 or Nx2
|
96 |
+
dsize=crop_cfg.dsize,
|
97 |
+
scale=crop_cfg.scale,
|
98 |
+
vx_ratio=crop_cfg.vx_ratio,
|
99 |
+
vy_ratio=crop_cfg.vy_ratio,
|
100 |
+
)
|
101 |
+
|
102 |
+
lmk = self.landmark_runner.run(img_rgb, lmk)
|
103 |
+
ret_dct["lmk_crop"] = lmk
|
104 |
+
|
105 |
+
# update a 256x256 version for network input
|
106 |
+
ret_dct["img_crop_256x256"] = cv2.resize(ret_dct["img_crop"], (256, 256), interpolation=cv2.INTER_AREA)
|
107 |
+
ret_dct["lmk_crop_256x256"] = ret_dct["lmk_crop"] * 256 / crop_cfg.dsize
|
108 |
+
|
109 |
+
return ret_dct
|
110 |
+
|
111 |
+
def crop_driving_video(self, driving_rgb_lst, **kwargs):
|
112 |
+
"""Tracking based landmarks/alignment and cropping"""
|
113 |
+
trajectory = Trajectory()
|
114 |
+
direction = kwargs.get("direction", "large-small")
|
115 |
+
for idx, frame_rgb in enumerate(driving_rgb_lst):
|
116 |
+
if idx == 0 or trajectory.start == -1:
|
117 |
+
src_face = self.face_analysis_wrapper.get(
|
118 |
+
contiguous(frame_rgb[..., ::-1]),
|
119 |
+
flag_do_landmark_2d_106=True,
|
120 |
+
direction=direction,
|
121 |
+
)
|
122 |
+
if len(src_face) == 0:
|
123 |
+
log(f"No face detected in the frame #{idx}")
|
124 |
+
continue
|
125 |
+
elif len(src_face) > 1:
|
126 |
+
log(f"More than one face detected in the driving frame_{idx}, only pick one face by rule {direction}.")
|
127 |
+
src_face = src_face[0]
|
128 |
+
lmk = src_face.landmark_2d_106
|
129 |
+
lmk = self.landmark_runner.run(frame_rgb, lmk)
|
130 |
+
trajectory.start, trajectory.end = idx, idx
|
131 |
+
else:
|
132 |
+
lmk = self.landmark_runner.run(frame_rgb, trajectory.lmk_lst[-1])
|
133 |
+
trajectory.end = idx
|
134 |
+
|
135 |
+
trajectory.lmk_lst.append(lmk)
|
136 |
+
ret_bbox = parse_bbox_from_landmark(
|
137 |
+
lmk,
|
138 |
+
scale=self.crop_cfg.scale_crop_video,
|
139 |
+
vx_ratio_crop_video=self.crop_cfg.vx_ratio_crop_video,
|
140 |
+
vy_ratio=self.crop_cfg.vy_ratio_crop_video,
|
141 |
+
)["bbox"]
|
142 |
+
bbox = [
|
143 |
+
ret_bbox[0, 0],
|
144 |
+
ret_bbox[0, 1],
|
145 |
+
ret_bbox[2, 0],
|
146 |
+
ret_bbox[2, 1],
|
147 |
+
] # 4,
|
148 |
+
trajectory.bbox_lst.append(bbox) # bbox
|
149 |
+
trajectory.frame_rgb_lst.append(frame_rgb)
|
150 |
+
|
151 |
+
global_bbox = average_bbox_lst(trajectory.bbox_lst)
|
152 |
+
|
153 |
+
for idx, (frame_rgb, lmk) in enumerate(zip(trajectory.frame_rgb_lst, trajectory.lmk_lst)):
|
154 |
+
ret_dct = crop_image_by_bbox(
|
155 |
+
frame_rgb,
|
156 |
+
global_bbox,
|
157 |
+
lmk=lmk,
|
158 |
+
dsize=kwargs.get("dsize", 512),
|
159 |
+
flag_rot=False,
|
160 |
+
borderValue=(0, 0, 0),
|
161 |
+
)
|
162 |
+
trajectory.frame_rgb_crop_lst.append(ret_dct["img_crop"])
|
163 |
+
trajectory.lmk_crop_lst.append(ret_dct["lmk_crop"])
|
164 |
+
|
165 |
+
return {
|
166 |
+
"frame_crop_lst": trajectory.frame_rgb_crop_lst,
|
167 |
+
"lmk_crop_lst": trajectory.lmk_crop_lst,
|
168 |
+
}
|
169 |
+
|
170 |
+
def calc_lmks_from_cropped_video(self, driving_rgb_crop_lst, **kwargs):
|
171 |
+
"""Tracking based landmarks/alignment"""
|
172 |
+
trajectory = Trajectory()
|
173 |
+
direction = kwargs.get("direction", "large-small")
|
174 |
+
|
175 |
+
for idx, frame_rgb_crop in enumerate(driving_rgb_crop_lst):
|
176 |
+
if idx == 0 or trajectory.start == -1:
|
177 |
+
src_face = self.face_analysis_wrapper.get(
|
178 |
+
contiguous(frame_rgb_crop[..., ::-1]), # convert to BGR
|
179 |
+
flag_do_landmark_2d_106=True,
|
180 |
+
direction=direction,
|
181 |
+
)
|
182 |
+
if len(src_face) == 0:
|
183 |
+
log(f"No face detected in the frame #{idx}")
|
184 |
+
raise Exception(f"No face detected in the frame #{idx}")
|
185 |
+
elif len(src_face) > 1:
|
186 |
+
log(f"More than one face detected in the driving frame_{idx}, only pick one face by rule {direction}.")
|
187 |
+
src_face = src_face[0]
|
188 |
+
lmk = src_face.landmark_2d_106
|
189 |
+
lmk = self.landmark_runner.run(frame_rgb_crop, lmk)
|
190 |
+
trajectory.start, trajectory.end = idx, idx
|
191 |
+
else:
|
192 |
+
lmk = self.landmark_runner.run(frame_rgb_crop, trajectory.lmk_lst[-1])
|
193 |
+
trajectory.end = idx
|
194 |
+
|
195 |
+
trajectory.lmk_lst.append(lmk)
|
196 |
+
return trajectory.lmk_lst
|
src/utils/dependencies/insightface/__init__.py
ADDED
@@ -0,0 +1,20 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# coding: utf-8
|
2 |
+
# pylint: disable=wrong-import-position
|
3 |
+
"""InsightFace: A Face Analysis Toolkit."""
|
4 |
+
from __future__ import absolute_import
|
5 |
+
|
6 |
+
try:
|
7 |
+
#import mxnet as mx
|
8 |
+
import onnxruntime
|
9 |
+
except ImportError:
|
10 |
+
raise ImportError(
|
11 |
+
"Unable to import dependency onnxruntime. "
|
12 |
+
)
|
13 |
+
|
14 |
+
__version__ = '0.7.3'
|
15 |
+
|
16 |
+
from . import model_zoo
|
17 |
+
from . import utils
|
18 |
+
from . import app
|
19 |
+
from . import data
|
20 |
+
|
src/utils/dependencies/insightface/app/__init__.py
ADDED
@@ -0,0 +1 @@
|
|
|
|
|
1 |
+
from .face_analysis import *
|
src/utils/dependencies/insightface/app/common.py
ADDED
@@ -0,0 +1,49 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import numpy as np
|
2 |
+
from numpy.linalg import norm as l2norm
|
3 |
+
#from easydict import EasyDict
|
4 |
+
|
5 |
+
class Face(dict):
|
6 |
+
|
7 |
+
def __init__(self, d=None, **kwargs):
|
8 |
+
if d is None:
|
9 |
+
d = {}
|
10 |
+
if kwargs:
|
11 |
+
d.update(**kwargs)
|
12 |
+
for k, v in d.items():
|
13 |
+
setattr(self, k, v)
|
14 |
+
# Class attributes
|
15 |
+
#for k in self.__class__.__dict__.keys():
|
16 |
+
# if not (k.startswith('__') and k.endswith('__')) and not k in ('update', 'pop'):
|
17 |
+
# setattr(self, k, getattr(self, k))
|
18 |
+
|
19 |
+
def __setattr__(self, name, value):
|
20 |
+
if isinstance(value, (list, tuple)):
|
21 |
+
value = [self.__class__(x)
|
22 |
+
if isinstance(x, dict) else x for x in value]
|
23 |
+
elif isinstance(value, dict) and not isinstance(value, self.__class__):
|
24 |
+
value = self.__class__(value)
|
25 |
+
super(Face, self).__setattr__(name, value)
|
26 |
+
super(Face, self).__setitem__(name, value)
|
27 |
+
|
28 |
+
__setitem__ = __setattr__
|
29 |
+
|
30 |
+
def __getattr__(self, name):
|
31 |
+
return None
|
32 |
+
|
33 |
+
@property
|
34 |
+
def embedding_norm(self):
|
35 |
+
if self.embedding is None:
|
36 |
+
return None
|
37 |
+
return l2norm(self.embedding)
|
38 |
+
|
39 |
+
@property
|
40 |
+
def normed_embedding(self):
|
41 |
+
if self.embedding is None:
|
42 |
+
return None
|
43 |
+
return self.embedding / self.embedding_norm
|
44 |
+
|
45 |
+
@property
|
46 |
+
def sex(self):
|
47 |
+
if self.gender is None:
|
48 |
+
return None
|
49 |
+
return 'M' if self.gender==1 else 'F'
|
src/utils/dependencies/insightface/app/face_analysis.py
ADDED
@@ -0,0 +1,110 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# -*- coding: utf-8 -*-
|
2 |
+
# @Organization : insightface.ai
|
3 |
+
# @Author : Jia Guo
|
4 |
+
# @Time : 2021-05-04
|
5 |
+
# @Function :
|
6 |
+
|
7 |
+
|
8 |
+
from __future__ import division
|
9 |
+
|
10 |
+
import glob
|
11 |
+
import os.path as osp
|
12 |
+
|
13 |
+
import numpy as np
|
14 |
+
import onnxruntime
|
15 |
+
from numpy.linalg import norm
|
16 |
+
|
17 |
+
from ..model_zoo import model_zoo
|
18 |
+
from ..utils import ensure_available
|
19 |
+
from .common import Face
|
20 |
+
|
21 |
+
|
22 |
+
DEFAULT_MP_NAME = 'buffalo_l'
|
23 |
+
__all__ = ['FaceAnalysis']
|
24 |
+
|
25 |
+
class FaceAnalysis:
|
26 |
+
def __init__(self, name=DEFAULT_MP_NAME, root='~/.insightface', allowed_modules=None, **kwargs):
|
27 |
+
onnxruntime.set_default_logger_severity(3)
|
28 |
+
self.models = {}
|
29 |
+
self.model_dir = ensure_available('models', name, root=root)
|
30 |
+
onnx_files = glob.glob(osp.join(self.model_dir, '*.onnx'))
|
31 |
+
onnx_files = sorted(onnx_files)
|
32 |
+
for onnx_file in onnx_files:
|
33 |
+
model = model_zoo.get_model(onnx_file, **kwargs)
|
34 |
+
if model is None:
|
35 |
+
print('model not recognized:', onnx_file)
|
36 |
+
elif allowed_modules is not None and model.taskname not in allowed_modules:
|
37 |
+
print('model ignore:', onnx_file, model.taskname)
|
38 |
+
del model
|
39 |
+
elif model.taskname not in self.models and (allowed_modules is None or model.taskname in allowed_modules):
|
40 |
+
# print('find model:', onnx_file, model.taskname, model.input_shape, model.input_mean, model.input_std)
|
41 |
+
self.models[model.taskname] = model
|
42 |
+
else:
|
43 |
+
print('duplicated model task type, ignore:', onnx_file, model.taskname)
|
44 |
+
del model
|
45 |
+
assert 'detection' in self.models
|
46 |
+
self.det_model = self.models['detection']
|
47 |
+
|
48 |
+
|
49 |
+
def prepare(self, ctx_id, det_thresh=0.5, det_size=(640, 640)):
|
50 |
+
self.det_thresh = det_thresh
|
51 |
+
assert det_size is not None
|
52 |
+
# print('set det-size:', det_size)
|
53 |
+
self.det_size = det_size
|
54 |
+
for taskname, model in self.models.items():
|
55 |
+
if taskname=='detection':
|
56 |
+
model.prepare(ctx_id, input_size=det_size, det_thresh=det_thresh)
|
57 |
+
else:
|
58 |
+
model.prepare(ctx_id)
|
59 |
+
|
60 |
+
def get(self, img, max_num=0):
|
61 |
+
bboxes, kpss = self.det_model.detect(img,
|
62 |
+
max_num=max_num,
|
63 |
+
metric='default')
|
64 |
+
if bboxes.shape[0] == 0:
|
65 |
+
return []
|
66 |
+
ret = []
|
67 |
+
for i in range(bboxes.shape[0]):
|
68 |
+
bbox = bboxes[i, 0:4]
|
69 |
+
det_score = bboxes[i, 4]
|
70 |
+
kps = None
|
71 |
+
if kpss is not None:
|
72 |
+
kps = kpss[i]
|
73 |
+
face = Face(bbox=bbox, kps=kps, det_score=det_score)
|
74 |
+
for taskname, model in self.models.items():
|
75 |
+
if taskname=='detection':
|
76 |
+
continue
|
77 |
+
model.get(img, face)
|
78 |
+
ret.append(face)
|
79 |
+
return ret
|
80 |
+
|
81 |
+
def draw_on(self, img, faces):
|
82 |
+
import cv2
|
83 |
+
dimg = img.copy()
|
84 |
+
for i in range(len(faces)):
|
85 |
+
face = faces[i]
|
86 |
+
box = face.bbox.astype(np.int)
|
87 |
+
color = (0, 0, 255)
|
88 |
+
cv2.rectangle(dimg, (box[0], box[1]), (box[2], box[3]), color, 2)
|
89 |
+
if face.kps is not None:
|
90 |
+
kps = face.kps.astype(np.int)
|
91 |
+
#print(landmark.shape)
|
92 |
+
for l in range(kps.shape[0]):
|
93 |
+
color = (0, 0, 255)
|
94 |
+
if l == 0 or l == 3:
|
95 |
+
color = (0, 255, 0)
|
96 |
+
cv2.circle(dimg, (kps[l][0], kps[l][1]), 1, color,
|
97 |
+
2)
|
98 |
+
if face.gender is not None and face.age is not None:
|
99 |
+
cv2.putText(dimg,'%s,%d'%(face.sex,face.age), (box[0]-1, box[1]-4),cv2.FONT_HERSHEY_COMPLEX,0.7,(0,255,0),1)
|
100 |
+
|
101 |
+
#for key, value in face.items():
|
102 |
+
# if key.startswith('landmark_3d'):
|
103 |
+
# print(key, value.shape)
|
104 |
+
# print(value[0:10,:])
|
105 |
+
# lmk = np.round(value).astype(np.int)
|
106 |
+
# for l in range(lmk.shape[0]):
|
107 |
+
# color = (255, 0, 0)
|
108 |
+
# cv2.circle(dimg, (lmk[l][0], lmk[l][1]), 1, color,
|
109 |
+
# 2)
|
110 |
+
return dimg
|
src/utils/dependencies/insightface/data/__init__.py
ADDED
@@ -0,0 +1,2 @@
|
|
|
|
|
|
|
1 |
+
from .image import get_image
|
2 |
+
from .pickle_object import get_object
|
src/utils/dependencies/insightface/data/image.py
ADDED
@@ -0,0 +1,27 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import cv2
|
2 |
+
import os
|
3 |
+
import os.path as osp
|
4 |
+
from pathlib import Path
|
5 |
+
|
6 |
+
class ImageCache:
|
7 |
+
data = {}
|
8 |
+
|
9 |
+
def get_image(name, to_rgb=False):
|
10 |
+
key = (name, to_rgb)
|
11 |
+
if key in ImageCache.data:
|
12 |
+
return ImageCache.data[key]
|
13 |
+
images_dir = osp.join(Path(__file__).parent.absolute(), 'images')
|
14 |
+
ext_names = ['.jpg', '.png', '.jpeg']
|
15 |
+
image_file = None
|
16 |
+
for ext_name in ext_names:
|
17 |
+
_image_file = osp.join(images_dir, "%s%s"%(name, ext_name))
|
18 |
+
if osp.exists(_image_file):
|
19 |
+
image_file = _image_file
|
20 |
+
break
|
21 |
+
assert image_file is not None, '%s not found'%name
|
22 |
+
img = cv2.imread(image_file)
|
23 |
+
if to_rgb:
|
24 |
+
img = img[:,:,::-1]
|
25 |
+
ImageCache.data[key] = img
|
26 |
+
return img
|
27 |
+
|
src/utils/dependencies/insightface/data/images/Tom_Hanks_54745.png
ADDED
src/utils/dependencies/insightface/data/images/mask_black.jpg
ADDED
src/utils/dependencies/insightface/data/images/mask_blue.jpg
ADDED
src/utils/dependencies/insightface/data/images/mask_green.jpg
ADDED
src/utils/dependencies/insightface/data/images/mask_white.jpg
ADDED
src/utils/dependencies/insightface/data/images/t1.jpg
ADDED
src/utils/dependencies/insightface/data/objects/meanshape_68.pkl
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:39ffecf84ba73f0d0d7e49380833ba88713c9fcdec51df4f7ac45a48b8f4cc51
|
3 |
+
size 974
|
src/utils/dependencies/insightface/data/pickle_object.py
ADDED
@@ -0,0 +1,17 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import cv2
|
2 |
+
import os
|
3 |
+
import os.path as osp
|
4 |
+
from pathlib import Path
|
5 |
+
import pickle
|
6 |
+
|
7 |
+
def get_object(name):
|
8 |
+
objects_dir = osp.join(Path(__file__).parent.absolute(), 'objects')
|
9 |
+
if not name.endswith('.pkl'):
|
10 |
+
name = name+".pkl"
|
11 |
+
filepath = osp.join(objects_dir, name)
|
12 |
+
if not osp.exists(filepath):
|
13 |
+
return None
|
14 |
+
with open(filepath, 'rb') as f:
|
15 |
+
obj = pickle.load(f)
|
16 |
+
return obj
|
17 |
+
|
src/utils/dependencies/insightface/data/rec_builder.py
ADDED
@@ -0,0 +1,71 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import pickle
|
2 |
+
import numpy as np
|
3 |
+
import os
|
4 |
+
import os.path as osp
|
5 |
+
import sys
|
6 |
+
import mxnet as mx
|
7 |
+
|
8 |
+
|
9 |
+
class RecBuilder():
|
10 |
+
def __init__(self, path, image_size=(112, 112)):
|
11 |
+
self.path = path
|
12 |
+
self.image_size = image_size
|
13 |
+
self.widx = 0
|
14 |
+
self.wlabel = 0
|
15 |
+
self.max_label = -1
|
16 |
+
assert not osp.exists(path), '%s exists' % path
|
17 |
+
os.makedirs(path)
|
18 |
+
self.writer = mx.recordio.MXIndexedRecordIO(os.path.join(path, 'train.idx'),
|
19 |
+
os.path.join(path, 'train.rec'),
|
20 |
+
'w')
|
21 |
+
self.meta = []
|
22 |
+
|
23 |
+
def add(self, imgs):
|
24 |
+
#!!! img should be BGR!!!!
|
25 |
+
#assert label >= 0
|
26 |
+
#assert label > self.last_label
|
27 |
+
assert len(imgs) > 0
|
28 |
+
label = self.wlabel
|
29 |
+
for img in imgs:
|
30 |
+
idx = self.widx
|
31 |
+
image_meta = {'image_index': idx, 'image_classes': [label]}
|
32 |
+
header = mx.recordio.IRHeader(0, label, idx, 0)
|
33 |
+
if isinstance(img, np.ndarray):
|
34 |
+
s = mx.recordio.pack_img(header,img,quality=95,img_fmt='.jpg')
|
35 |
+
else:
|
36 |
+
s = mx.recordio.pack(header, img)
|
37 |
+
self.writer.write_idx(idx, s)
|
38 |
+
self.meta.append(image_meta)
|
39 |
+
self.widx += 1
|
40 |
+
self.max_label = label
|
41 |
+
self.wlabel += 1
|
42 |
+
|
43 |
+
|
44 |
+
def add_image(self, img, label):
|
45 |
+
#!!! img should be BGR!!!!
|
46 |
+
#assert label >= 0
|
47 |
+
#assert label > self.last_label
|
48 |
+
idx = self.widx
|
49 |
+
header = mx.recordio.IRHeader(0, label, idx, 0)
|
50 |
+
if isinstance(label, list):
|
51 |
+
idlabel = label[0]
|
52 |
+
else:
|
53 |
+
idlabel = label
|
54 |
+
image_meta = {'image_index': idx, 'image_classes': [idlabel]}
|
55 |
+
if isinstance(img, np.ndarray):
|
56 |
+
s = mx.recordio.pack_img(header,img,quality=95,img_fmt='.jpg')
|
57 |
+
else:
|
58 |
+
s = mx.recordio.pack(header, img)
|
59 |
+
self.writer.write_idx(idx, s)
|
60 |
+
self.meta.append(image_meta)
|
61 |
+
self.widx += 1
|
62 |
+
self.max_label = max(self.max_label, idlabel)
|
63 |
+
|
64 |
+
def close(self):
|
65 |
+
with open(osp.join(self.path, 'train.meta'), 'wb') as pfile:
|
66 |
+
pickle.dump(self.meta, pfile, protocol=pickle.HIGHEST_PROTOCOL)
|
67 |
+
print('stat:', self.widx, self.wlabel)
|
68 |
+
with open(os.path.join(self.path, 'property'), 'w') as f:
|
69 |
+
f.write("%d,%d,%d\n" % (self.max_label+1, self.image_size[0], self.image_size[1]))
|
70 |
+
f.write("%d\n" % (self.widx))
|
71 |
+
|
src/utils/dependencies/insightface/model_zoo/__init__.py
ADDED
@@ -0,0 +1,6 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from .model_zoo import get_model
|
2 |
+
from .arcface_onnx import ArcFaceONNX
|
3 |
+
from .retinaface import RetinaFace
|
4 |
+
from .scrfd import SCRFD
|
5 |
+
from .landmark import Landmark
|
6 |
+
from .attribute import Attribute
|