Junjie96 commited on
Commit
dbac7c5
1 Parent(s): 52ddf89

Upload 38 files

Browse files
.gitattributes CHANGED
@@ -33,3 +33,20 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
33
  *.zip filter=lfs diff=lfs merge=lfs -text
34
  *.zst filter=lfs diff=lfs merge=lfs -text
35
  *tfevents* filter=lfs diff=lfs merge=lfs -text
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
33
  *.zip filter=lfs diff=lfs merge=lfs -text
34
  *.zst filter=lfs diff=lfs merge=lfs -text
35
  *tfevents* filter=lfs diff=lfs merge=lfs -text
36
+ assets/examples/1-output-1.png filter=lfs diff=lfs merge=lfs -text
37
+ assets/examples/1-output-2.png filter=lfs diff=lfs merge=lfs -text
38
+ assets/examples/1-output-3.png filter=lfs diff=lfs merge=lfs -text
39
+ assets/examples/2-output-1.png filter=lfs diff=lfs merge=lfs -text
40
+ assets/examples/2-output-2.png filter=lfs diff=lfs merge=lfs -text
41
+ assets/examples/2-output-3.png filter=lfs diff=lfs merge=lfs -text
42
+ assets/examples/2-output-4.png filter=lfs diff=lfs merge=lfs -text
43
+ assets/examples/2-stylegan2-ffhq-0100.png filter=lfs diff=lfs merge=lfs -text
44
+ assets/examples/2-stylegan2-ffhq-0293.png filter=lfs diff=lfs merge=lfs -text
45
+ assets/examples/3-output-1.png filter=lfs diff=lfs merge=lfs -text
46
+ assets/examples/3-output-2.png filter=lfs diff=lfs merge=lfs -text
47
+ assets/examples/3-output-3.png filter=lfs diff=lfs merge=lfs -text
48
+ assets/examples/3-output-4.png filter=lfs diff=lfs merge=lfs -text
49
+ assets/examples/3-style-1.png filter=lfs diff=lfs merge=lfs -text
50
+ assets/examples/3-stylegan2-ffhq-0293.png filter=lfs diff=lfs merge=lfs -text
51
+ assets/examples/3-stylegan2-ffhq-0381.png filter=lfs diff=lfs merge=lfs -text
52
+ assets/examples/Trump-4.jpg filter=lfs diff=lfs merge=lfs -text
README.md CHANGED
@@ -1,13 +1,12 @@
1
  ---
2
  title: UniPortrait
3
- emoji: 🐠
4
- colorFrom: yellow
5
  colorTo: red
6
  sdk: gradio
7
- sdk_version: 4.41.0
8
  app_file: app.py
9
  pinned: false
10
  license: apache-2.0
11
  ---
12
-
13
  Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
 
1
  ---
2
  title: UniPortrait
3
+ emoji: 👩‍🎨
4
+ colorFrom: purple
5
  colorTo: red
6
  sdk: gradio
7
+ sdk_version: 4.31.2
8
  app_file: app.py
9
  pinned: false
10
  license: apache-2.0
11
  ---
 
12
  Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
app.py ADDED
@@ -0,0 +1,382 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ##!/usr/bin/python3
2
+ # -*- coding: utf-8 -*-
3
+ # @Time : 2024-07-31
4
+ # @Author : Junjie He
5
+ import gradio as gr
6
+
7
+ from src.process import (
8
+ text_to_single_id_generation_process,
9
+ text_to_multi_id_generation_process,
10
+ image_to_single_id_generation_process,
11
+ )
12
+
13
+
14
+ def text_to_single_id_generation_block():
15
+ gr.Markdown("## Text-to-Single-ID Generation")
16
+ gr.HTML(text_to_single_id_description)
17
+ gr.HTML(text_to_single_id_tips)
18
+ with gr.Row():
19
+ with gr.Column(scale=1, min_width=100):
20
+ prompt = gr.Textbox(value="", label='Prompt', lines=2)
21
+ negative_prompt = gr.Textbox(value="nsfw", label='Negative Prompt')
22
+ image_resolution = gr.Dropdown(choices=["768x512", "512x512", "512x768"], value="512x512",
23
+ label="Image Resolution (HxW)")
24
+ run_button = gr.Button(value="Run")
25
+
26
+ with gr.Accordion("Advanced Options", open=True):
27
+ seed = gr.Slider(label="Seed (-1 indicates random)", minimum=-1, maximum=2147483647, step=1, value=-1)
28
+
29
+ faceid_scale = gr.Slider(label="Face ID Scale", minimum=0.0, maximum=1.0, step=0.01, value=0.7)
30
+ face_structure_scale = gr.Slider(label="Face Structure Scale", minimum=0.0, maximum=1.0,
31
+ step=0.01, value=0.1)
32
+
33
+ style_scale = gr.Slider(label="style_scale", minimum=0.0, maximum=1.0, step=0.01, value=0.7)
34
+
35
+ use_sr = gr.Checkbox(label="RealESRGAN 2x", value=True)
36
+
37
+ with gr.Column(scale=3, min_width=100):
38
+ with gr.Row(equal_height=False):
39
+ pil_faceid = gr.Image(type="pil", label="ID Image")
40
+ with gr.Accordion("ID Supplements", open=True):
41
+ with gr.Row():
42
+ pil_supp_faceids = gr.File(file_count="multiple", file_types=["image"],
43
+ type="filepath", label="Additional ID Images")
44
+ with gr.Row():
45
+ with gr.Column(scale=1, min_width=100):
46
+ pil_mix_faceid_1 = gr.Image(type="pil", label="Mix ID 1")
47
+ mix_scale_1 = gr.Slider(label="Mix Scale 1", minimum=0.0, maximum=1.0, step=0.01, value=0.0)
48
+ with gr.Column(scale=1, min_width=100):
49
+ pil_mix_faceid_2 = gr.Image(type="pil", label="Mix ID 2")
50
+ mix_scale_2 = gr.Slider(label="Mix Scale 2", minimum=0.0, maximum=1.0, step=0.01, value=0.0)
51
+ pil_style = gr.Image(type="pil", label="Style")
52
+
53
+ with gr.Row():
54
+ example_output = gr.Image(type="pil", label="(Example Output)", visible=False)
55
+ result_gallery = gr.Gallery(label='Output', show_label=True, elem_id="gallery", columns=4, preview=True,
56
+ format="png")
57
+ with gr.Row():
58
+ examples = [
59
+ [
60
+ "A young man with short black hair, wearing a black hoodie with a hood, was paired with a blue denim jacket with yellow details.",
61
+ "assets/examples/1-newton.jpg",
62
+ "assets/No-Image-Placeholder.png",
63
+ "assets/examples/1-output-1.png",
64
+ ],
65
+ [
66
+ "A young man with short black hair, wearing a black hoodie with a hood, was paired with a blue denim jacket with yellow details.",
67
+ "assets/examples/1-newton.jpg",
68
+ "assets/examples/1-style-1.jpg",
69
+ "assets/examples/1-output-2.png",
70
+ ],
71
+ ]
72
+ gr.Examples(
73
+ label="Examples",
74
+ examples=examples,
75
+ inputs=[prompt, pil_faceid, pil_style, example_output],
76
+ )
77
+ ips = [
78
+ pil_faceid, pil_supp_faceids,
79
+ pil_mix_faceid_1, mix_scale_1,
80
+ pil_mix_faceid_2, mix_scale_2,
81
+ faceid_scale, face_structure_scale,
82
+ prompt, negative_prompt,
83
+ pil_style, style_scale,
84
+ seed, image_resolution, use_sr,
85
+ ]
86
+ run_button.click(fn=text_to_single_id_generation_process, inputs=ips, outputs=[result_gallery])
87
+
88
+
89
+ def text_to_multi_id_generation_block():
90
+ gr.Markdown("## Text-to-Multi-ID Generation")
91
+ gr.HTML(text_to_multi_id_description)
92
+ gr.HTML(text_to_multi_id_tips)
93
+ with gr.Row():
94
+ with gr.Column(scale=1, min_width=100):
95
+ prompt = gr.Textbox(value="", label='Prompt', lines=2)
96
+ negative_prompt = gr.Textbox(value="nsfw", label='Negative Prompt')
97
+ image_resolution = gr.Dropdown(choices=["768x512", "512x512", "512x768"], value="512x512",
98
+ label="Image Resolution (HxW)")
99
+ run_button = gr.Button(value="Run")
100
+
101
+ with gr.Accordion("Advanced Options", open=True):
102
+ seed = gr.Slider(label="Seed (-1 indicates random)", minimum=-1, maximum=2147483647, step=1, value=-1)
103
+
104
+ faceid_scale = gr.Slider(label="Face ID Scale", minimum=0.0, maximum=1.0, step=0.01, value=0.7)
105
+ face_structure_scale = gr.Slider(label="Face Structure Scale", minimum=0.0, maximum=1.0,
106
+ step=0.01, value=0.3)
107
+ style_scale = gr.Slider(label="style_scale", minimum=0.0, maximum=1.0, step=0.01, value=0.7)
108
+
109
+ use_sr = gr.Checkbox(label="RealESRGAN 2x", value=True)
110
+
111
+ with gr.Column(scale=3, min_width=100):
112
+ with gr.Row(equal_height=False):
113
+ with gr.Column(scale=1, min_width=100):
114
+ pil_faceid_1st = gr.Image(type="pil", label="First ID")
115
+ with gr.Accordion("First ID Supplements", open=False):
116
+ with gr.Row():
117
+ pil_supp_faceids_1st = gr.File(file_count="multiple", file_types=["image"],
118
+ type="filepath", label="Additional ID Images")
119
+ with gr.Row():
120
+ with gr.Column(scale=1, min_width=100):
121
+ pil_mix_faceid_1_1st = gr.Image(type="pil", label="Mix ID 1")
122
+ mix_scale_1_1st = gr.Slider(label="Mix Scale 1", minimum=0.0, maximum=1.0, step=0.01,
123
+ value=0.0)
124
+ with gr.Column(scale=1, min_width=100):
125
+ pil_mix_faceid_2_1st = gr.Image(type="pil", label="Mix ID 2")
126
+ mix_scale_2_1st = gr.Slider(label="Mix Scale 2", minimum=0.0, maximum=1.0, step=0.01,
127
+ value=0.0)
128
+ with gr.Column(scale=1, min_width=100):
129
+ pil_faceid_2nd = gr.Image(type="pil", label="Second ID")
130
+ with gr.Accordion("Second ID Supplements", open=False):
131
+ with gr.Row():
132
+ pil_supp_faceids_2nd = gr.File(file_count="multiple", file_types=["image"],
133
+ type="filepath", label="Additional ID Images")
134
+ with gr.Row():
135
+ with gr.Column(scale=1, min_width=100):
136
+ pil_mix_faceid_1_2nd = gr.Image(type="pil", label="Mix ID 1")
137
+ mix_scale_1_2nd = gr.Slider(label="Mix Scale 1", minimum=0.0, maximum=1.0, step=0.01,
138
+ value=0.0)
139
+ with gr.Column(scale=1, min_width=100):
140
+ pil_mix_faceid_2_2nd = gr.Image(type="pil", label="Mix ID 2")
141
+ mix_scale_2_2nd = gr.Slider(label="Mix Scale 2", minimum=0.0, maximum=1.0, step=0.01,
142
+ value=0.0)
143
+ with gr.Column(scale=1, min_width=100):
144
+ pil_style = gr.Image(type="pil", label="Style")
145
+
146
+ with gr.Row():
147
+ example_output = gr.Image(type="pil", label="(Example Output)", visible=False)
148
+ result_gallery = gr.Gallery(label='Output', show_label=True, elem_id="gallery", columns=4, preview=True,
149
+ format="png")
150
+ with gr.Row():
151
+ examples = [
152
+ [
153
+ "两个女人在欢笑和快乐中被捕捉到,他们的脸上洋溢着真挚的幸福,背景是日落时分的宁静海滩。这幅画以柔和的风格描绘,捕捉了这一刻的温暖和宁静。",
154
+ "assets/examples/2-stylegan2-ffhq-0100.png",
155
+ "assets/examples/2-stylegan2-ffhq-0293.png",
156
+ "assets/No-Image-Placeholder.png",
157
+ "assets/examples/2-output-1.png",
158
+ ],
159
+ [
160
+ "The two female models are drinking coffee. The background was off-white.",
161
+ "assets/examples/2-stylegan2-ffhq-0100.png",
162
+ "assets/examples/2-stylegan2-ffhq-0293.png",
163
+ "assets/examples/2-style-1.jpg",
164
+ "assets/examples/2-output-2.png",
165
+ ],
166
+ ]
167
+ gr.Examples(
168
+ label="Examples",
169
+ examples=examples,
170
+ inputs=[prompt, pil_faceid_1st, pil_faceid_2nd, pil_style, example_output],
171
+ )
172
+ with gr.Row():
173
+ examples = [
174
+ [
175
+ "Two men in an American poster.",
176
+ "assets/examples/Trump-1.jpg",
177
+ ["assets/examples/Trump-2.jpg", "assets/examples/Trump-3.jpg", "assets/examples/Trump-4.jpg"],
178
+ "assets/examples/Biden-1.jpg",
179
+ ["assets/examples/Biden-2.jpg", "assets/examples/Biden-3.jpg", "assets/examples/Biden-4.jpg"],
180
+ "assets/examples/2-output-4.png",
181
+ ],
182
+ [
183
+ "Two men engaged in a vigorous handshake, both wearing expressions of enthusiasm and determination, set against a backdrop of a bustling business district. The image is crafted in a sleek and modern digital art style, conveying the dynamic and competitive nature of their interaction.",
184
+ "assets/examples/Trump-1.jpg",
185
+ ["assets/examples/Trump-2.jpg", "assets/examples/Trump-3.jpg", "assets/examples/Trump-4.jpg"],
186
+ "assets/examples/Biden-1.jpg",
187
+ ["assets/examples/Biden-2.jpg", "assets/examples/Biden-3.jpg", "assets/examples/Biden-4.jpg"],
188
+ "assets/examples/2-output-3.png",
189
+ ],
190
+ ]
191
+ gr.Examples(
192
+ label="Examples (Multiple References)",
193
+ examples=examples,
194
+ inputs=[prompt, pil_faceid_1st, pil_supp_faceids_1st, pil_faceid_2nd, pil_supp_faceids_2nd, example_output],
195
+ )
196
+ ips = [
197
+ pil_faceid_1st, pil_supp_faceids_1st,
198
+ pil_mix_faceid_1_1st, mix_scale_1_1st,
199
+ pil_mix_faceid_2_1st, mix_scale_2_1st,
200
+ pil_faceid_2nd, pil_supp_faceids_2nd,
201
+ pil_mix_faceid_1_2nd, mix_scale_1_2nd,
202
+ pil_mix_faceid_2_2nd, mix_scale_2_2nd,
203
+ faceid_scale, face_structure_scale,
204
+ prompt, negative_prompt,
205
+ pil_style, style_scale,
206
+ seed, image_resolution, use_sr,
207
+ ]
208
+ run_button.click(fn=text_to_multi_id_generation_process, inputs=ips, outputs=[result_gallery])
209
+
210
+
211
+ def image_to_single_id_generation_block():
212
+ gr.Markdown("## Image-to-Single-ID Generation")
213
+ gr.HTML(image_to_single_id_description)
214
+ gr.HTML(image_to_single_id_tips)
215
+ with gr.Row():
216
+ with gr.Column(scale=1, min_width=100):
217
+ image_resolution = gr.Dropdown(choices=["768x512", "512x512", "512x768"], value="512x512",
218
+ label="Image Resolution (HxW)")
219
+ run_button = gr.Button(value="Run")
220
+
221
+ with gr.Accordion("Advanced Options", open=True):
222
+ seed = gr.Slider(label="Seed (-1 indicates random)", minimum=-1, maximum=2147483647, step=1, value=-1)
223
+
224
+ style_scale = gr.Slider(label="Reference Scale", minimum=0.0, maximum=1.0, step=0.01, value=0.7)
225
+ faceid_scale = gr.Slider(label="Face ID Scale", minimum=0.0, maximum=1.0, step=0.01, value=0.7)
226
+ face_structure_scale = gr.Slider(label="Face Structure Scale", minimum=0.0, maximum=1.0, step=0.01,
227
+ value=0.3)
228
+
229
+ use_sr = gr.Checkbox(label="RealESRGAN 2x", value=True)
230
+
231
+ with gr.Column(scale=3, min_width=100):
232
+ with gr.Row(equal_height=False):
233
+ pil_style = gr.Image(type="pil", label="Portrait Reference")
234
+ pil_faceid = gr.Image(type="pil", label="ID Image")
235
+ with gr.Accordion("ID Supplements", open=True):
236
+ with gr.Row():
237
+ pil_supp_faceids = gr.File(file_count="multiple", file_types=["image"],
238
+ type="filepath", label="Additional ID Images")
239
+ with gr.Row():
240
+ with gr.Column(scale=1, min_width=100):
241
+ pil_mix_faceid_1 = gr.Image(type="pil", label="Mix ID 1")
242
+ mix_scale_1 = gr.Slider(label="Mix Scale 1", minimum=0.0, maximum=1.0, step=0.01, value=0.0)
243
+ with gr.Column(scale=1, min_width=100):
244
+ pil_mix_faceid_2 = gr.Image(type="pil", label="Mix ID 2")
245
+ mix_scale_2 = gr.Slider(label="Mix Scale 2", minimum=0.0, maximum=1.0, step=0.01, value=0.0)
246
+ with gr.Row():
247
+ with gr.Column(scale=3, min_width=100):
248
+ example_output = gr.Image(type="pil", label="(Example Output)", visible=False)
249
+ result_gallery = gr.Gallery(label='Output', show_label=True, elem_id="gallery", columns=4,
250
+ preview=True, format="png")
251
+ with gr.Row():
252
+ examples = [
253
+ [
254
+ "assets/examples/3-style-1.png",
255
+ "assets/examples/3-stylegan2-ffhq-0293.png",
256
+ 0.7,
257
+ 0.3,
258
+ "assets/examples/3-output-1.png",
259
+ ],
260
+ [
261
+ "assets/examples/3-style-1.png",
262
+ "assets/examples/3-stylegan2-ffhq-0293.png",
263
+ 0.6,
264
+ 0.0,
265
+ "assets/examples/3-output-2.png",
266
+ ],
267
+ [
268
+ "assets/examples/3-style-2.jpg",
269
+ "assets/examples/3-stylegan2-ffhq-0381.png",
270
+ 0.7,
271
+ 0.3,
272
+ "assets/examples/3-output-3.png",
273
+ ],
274
+ [
275
+ "assets/examples/3-style-3.jpg",
276
+ "assets/examples/3-stylegan2-ffhq-0381.png",
277
+ 0.6,
278
+ 0.0,
279
+ "assets/examples/3-output-4.png",
280
+ ],
281
+ ]
282
+ gr.Examples(
283
+ label="Examples",
284
+ examples=examples,
285
+ inputs=[pil_style, pil_faceid, faceid_scale, face_structure_scale, example_output],
286
+ )
287
+ ips = [
288
+ pil_faceid, pil_supp_faceids,
289
+ pil_mix_faceid_1, mix_scale_1,
290
+ pil_mix_faceid_2, mix_scale_2,
291
+ faceid_scale, face_structure_scale,
292
+ pil_style, style_scale,
293
+ seed, image_resolution, use_sr,
294
+ ]
295
+ run_button.click(fn=image_to_single_id_generation_process, inputs=ips, outputs=[result_gallery])
296
+
297
+
298
+ if __name__ == "__main__":
299
+ title = r"""
300
+ <div style="text-align: center;">
301
+ <h1> UniPortrait: A Unified Framework for Identity-Preserving Single- and Multi-Human Image Personalization </h1>
302
+ <div style="display: flex; justify-content: center; align-items: center; text-align: center;">
303
+ <a href="https://arxiv.org/pdf/xxxx.xxxxx"><img src="https://img.shields.io/badge/arXiv-xxxx.xxxxx-red"></a>
304
+ &nbsp;
305
+ <a href='https://aigcdesigngroup.github.io/UniPortrait-Page/'><img src='https://img.shields.io/badge/Project_Page-UniPortrait-green' alt='Project Page'></a>
306
+ &nbsp;
307
+ <a href="https://github.com/junjiehe96/UniPortrait"><img src="https://img.shields.io/badge/Github-Code-blue"></a>
308
+ </div>
309
+ </br>
310
+ </div>
311
+ """
312
+
313
+ title_description = r"""
314
+ This is the <b>official 🤗 Gradio demo</b> for <a href='https://arxiv.org/pdf/xxxx.xxxxx' target='_blank'><b>UniPortrait: A Unified Framework for Identity-Preserving Single- and Multi-Human Image Personalization</b></a>.<br>
315
+ The demo provides three capabilities: text-to-single-ID personalization, text-to-multi-ID personalization, and image-to-single-ID personalization. All of these are based on the Stable Diffusion v1-5 model. Feel free to give them a try! 😊
316
+ """
317
+
318
+ text_to_single_id_description = r"""🚀🚀🚀Quick start:<br>
319
+ 1. Enter a text prompt (Chinese or English), Upload an image with a face, and Click the <b>Run</b> button.<br>
320
+ 2. (Optional) You can also upload an image as the style reference for the results. 🤗<br>
321
+ """
322
+
323
+ text_to_single_id_tips = r"""💡💡💡Tips:<br>
324
+ 1. Try to avoid creating too small faces, as this may lead to some artifacts. (Currently, the short side length of the generated image is limited to 512)<br>
325
+ 2. It's a good idea to upload multiple reference photos of your face to improve the prompt and ID consistency. Additional references can be uploaded in the "ID supplements".<br>
326
+ 3. The appropriate values of "Face ID Scale" and "Face Structure Scale" are important for balancing the ID and text alignment. We recommend using "Face ID Scale" (0.5~0.7) and "Face Structure Scale" (0.0~0.4).<br>
327
+ """
328
+
329
+ text_to_multi_id_description = r"""🚀🚀🚀Quick start:<br>
330
+ 1. Enter a text prompt (Chinese or English), Upload an image with a face in "First ID" and "Second ID" blocks respectively, and Click the <b>Run</b> button.<br>
331
+ 2. (Optional) You can also upload an image as the style reference for the results. 🤗<br>
332
+ """
333
+
334
+ text_to_multi_id_tips = r"""💡💡💡Tips:<br>
335
+ 1. Try to avoid creating too small faces, as this may lead to some artifacts. (Currently, the short side length of the generated image is limited to 512)<br>
336
+ 2. It's a good idea to upload multiple reference photos of your face to improve the prompt and ID consistency. Additional references can be uploaded in the "ID supplements".<br>
337
+ 3. The appropriate values of "Face ID Scale" and "Face Structure Scale" are important for balancing the ID and text alignment. We recommend using "Face ID Scale" (0.3~0.7) and "Face Structure Scale" (0.0~0.4).<br>
338
+ """
339
+
340
+ image_to_single_id_description = r"""🚀🚀🚀Quick start: Upload an image as the portrait reference (can be any style), Upload a face image, and Click the <b>Run</b> button. 🤗<br>"""
341
+
342
+ image_to_single_id_tips = r"""💡💡💡Tips:<br>
343
+ 1. Try to avoid creating too small faces, as this may lead to some artifacts. (Currently, the short side length of the generated image is limited to 512)<br>
344
+ 2. It's a good idea to upload multiple reference photos of your face to improve ID consistency. Additional references can be uploaded in the "ID supplements".<br>
345
+ 3. The appropriate values of "Face ID Scale" and "Face Structure Scale" are important for balancing the portrait reference and ID alignment. We recommend using "Face ID Scale" (0.5~0.7) and "Face Structure Scale" (0.0~0.4).<br>
346
+ """
347
+
348
+ citation = r"""
349
+ ---
350
+ 📝 **Citation**
351
+ <br>
352
+ If our work is helpful for your research or applications, please cite us via:
353
+ ```bibtex
354
+ @article{wang2024instantid,
355
+ title={InstantID: Zero-shot Identity-Preserving Generation in Seconds},
356
+ author={Wang, Qixun and Bai, Xu and Wang, Haofan and Qin, Zekui and Chen, Anthony},
357
+ journal={arXiv preprint arXiv:2401.07519},
358
+ year={2024}
359
+ }
360
+ ```
361
+ 📧 **Contact**
362
+ <br>
363
+ If you have any questions, please feel free to open an issue or directly reach us out at <b>he_junjie@zju.edu.cn</b>.
364
+ """
365
+
366
+ block = gr.Blocks(title="UniPortrait").queue()
367
+ with block:
368
+ gr.HTML(title)
369
+ gr.HTML(title_description)
370
+
371
+ with gr.TabItem("Text-to-Single-ID"):
372
+ text_to_single_id_generation_block()
373
+
374
+ with gr.TabItem("Text-to-Multi-ID"):
375
+ text_to_multi_id_generation_block()
376
+
377
+ with gr.TabItem("Image-to-Single-ID (Stylization)"):
378
+ image_to_single_id_generation_block()
379
+
380
+ block.launch(share=True)
381
+ # block.launch(server_name='0.0.0.0', share=False, server_port=9999, allowed_paths=["/"])
382
+ # block.launch(server_name='127.0.0.1', share=False, server_port=9999, allowed_paths=["/"])
assets/No-Image-Placeholder.png ADDED
assets/examples/1-newton.jpg ADDED
assets/examples/1-output-1.png ADDED

Git LFS Details

  • SHA256: 4ac173d31b0bd613bdf534e2414a8a681b8ca8afb1830bf4da721f3d0acf748c
  • Pointer size: 132 Bytes
  • Size of remote file: 1.12 MB
assets/examples/1-output-2.png ADDED

Git LFS Details

  • SHA256: e77bba07b1b2fc73bf2f6c569f1c1b8793b576586ec374c33e3dbb9052a02201
  • Pointer size: 132 Bytes
  • Size of remote file: 1.1 MB
assets/examples/1-output-3.png ADDED

Git LFS Details

  • SHA256: 2bfcae708bc5e428563efae6800006b874aa3d3a804e5bbf16b5fb88a956cff6
  • Pointer size: 132 Bytes
  • Size of remote file: 1.14 MB
assets/examples/1-style-1.jpg ADDED
assets/examples/2-output-1.png ADDED

Git LFS Details

  • SHA256: d737f89a12856ab445b2e73656b9003ff01b0faa2f9102010232c97fd3451e07
  • Pointer size: 132 Bytes
  • Size of remote file: 1.17 MB
assets/examples/2-output-2.png ADDED

Git LFS Details

  • SHA256: 742f8f46bac6b8adbdc64816b8a87d8114f0fc985d34b376460f7b025e25a458
  • Pointer size: 132 Bytes
  • Size of remote file: 1.15 MB
assets/examples/2-output-3.png ADDED

Git LFS Details

  • SHA256: 7b821808810cc3cec511123eacaf62c3b254a36a48a8f1e23818a701334de427
  • Pointer size: 132 Bytes
  • Size of remote file: 1.19 MB
assets/examples/2-output-4.png ADDED

Git LFS Details

  • SHA256: 5281571febe727a6a96582fef2ebae97f0a72d19820c7eb4ed43d4f87c211a71
  • Pointer size: 132 Bytes
  • Size of remote file: 1.24 MB
assets/examples/2-style-1.jpg ADDED
assets/examples/2-stylegan2-ffhq-0100.png ADDED

Git LFS Details

  • SHA256: 3620d6c5a3280dea0cf9e98b91ce5b505a6c6fee94a515c9cbff250f9d13f9c5
  • Pointer size: 132 Bytes
  • Size of remote file: 1.34 MB
assets/examples/2-stylegan2-ffhq-0293.png ADDED

Git LFS Details

  • SHA256: acd15320b4f75d12642b7916fd276d59dbcb592c7695cf5e09c3d710da973c7e
  • Pointer size: 132 Bytes
  • Size of remote file: 1.21 MB
assets/examples/3-output-1.png ADDED

Git LFS Details

  • SHA256: 1f2e095949cfaebe0014267df9a82513014ca0adb6cffa025c3bdc9c0b881f1d
  • Pointer size: 132 Bytes
  • Size of remote file: 1.14 MB
assets/examples/3-output-2.png ADDED

Git LFS Details

  • SHA256: a813a12a631fd070457597191ff63bbb7064d01e21c7151ebfc2ec8c3d03b4f0
  • Pointer size: 132 Bytes
  • Size of remote file: 1.1 MB
assets/examples/3-output-3.png ADDED

Git LFS Details

  • SHA256: 15e8891c71342bf54cda110e26a501381a91dc5660aa503b93f60b0dfbbb16fe
  • Pointer size: 132 Bytes
  • Size of remote file: 1.01 MB
assets/examples/3-output-4.png ADDED

Git LFS Details

  • SHA256: 945d2258769fde2d53c2ffbcc4ab7e68dba3dcd925a9346691e7481ab1774446
  • Pointer size: 132 Bytes
  • Size of remote file: 1.05 MB
assets/examples/3-style-1.png ADDED

Git LFS Details

  • SHA256: 864f7116528d90e8798d82c7a6fce50734759d46bf590554bbaa0177ff869c2c
  • Pointer size: 132 Bytes
  • Size of remote file: 2.05 MB
assets/examples/3-style-2.jpg ADDED
assets/examples/3-style-3.jpg ADDED
assets/examples/3-stylegan2-ffhq-0293.png ADDED

Git LFS Details

  • SHA256: acd15320b4f75d12642b7916fd276d59dbcb592c7695cf5e09c3d710da973c7e
  • Pointer size: 132 Bytes
  • Size of remote file: 1.21 MB
assets/examples/3-stylegan2-ffhq-0381.png ADDED

Git LFS Details

  • SHA256: b54010565c6f18618735b9dadaec16494fa7818c37d87003664b269b43c11d28
  • Pointer size: 132 Bytes
  • Size of remote file: 1.35 MB
assets/examples/Biden-1.jpg ADDED
assets/examples/Biden-2.jpg ADDED
assets/examples/Biden-3.jpg ADDED
assets/examples/Biden-4.jpg ADDED
assets/examples/Trump-1.jpg ADDED
assets/examples/Trump-2.jpg ADDED
assets/examples/Trump-3.jpg ADDED
assets/examples/Trump-4.jpg ADDED

Git LFS Details

  • SHA256: 0a46e35fa0dd47ccb1049621360e2272a705d1bf65c02dd08c367ef9b0eb2386
  • Pointer size: 132 Bytes
  • Size of remote file: 1.79 MB
requirements.txt ADDED
@@ -0,0 +1 @@
 
 
1
+ oss2
src/__init__.py ADDED
File without changes
src/generation.py ADDED
@@ -0,0 +1,79 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import json
2
+ import os
3
+ import time
4
+
5
+ import gradio as gr
6
+ import requests
7
+
8
+ from src.log import logger
9
+ from src.util import download_images
10
+
11
+
12
+ def call_generation(data):
13
+ url_task = os.getenv("URL_TASK")
14
+ api_key = os.getenv("API_KEY_GENERATION")
15
+ model_id = os.getenv("MODEL_ID")
16
+ url_query = os.getenv("URL_QUERY")
17
+
18
+ batch_size = 2
19
+ repeat_times = 2
20
+
21
+ headers = {
22
+ "Content-Type": "application/json",
23
+ "Accept": "application/json",
24
+ "Authorization": f"Bearer {api_key}",
25
+ "X-DashScope-Async": "enable",
26
+ }
27
+ data["model"] = model_id
28
+ data["parameters"]["n"] = batch_size
29
+
30
+ all_res_ = []
31
+ for i in range(repeat_times):
32
+ if data["parameters"]["seed"] != -1:
33
+ data["parameters"]["seed"] = data["parameters"]["seed"] * (i+1)
34
+ res_ = requests.post(url_task, data=json.dumps(data), headers=headers)
35
+ all_res_.append(res_)
36
+
37
+ all_image_data = []
38
+ for res_ in all_res_:
39
+ respose_code = res_.status_code
40
+ if 200 == respose_code:
41
+ res = json.loads(res_.content.decode())
42
+ task_id = res['output']['task_id']
43
+ logger.info(f"task_id: {task_id}: Create request success. Params: {data}")
44
+
45
+ # Async query
46
+ is_running = True
47
+ while is_running:
48
+ res_ = requests.post(f'{url_query}/{task_id}', headers=headers)
49
+ respose_code = res_.status_code
50
+ if 200 == respose_code:
51
+ res = json.loads(res_.content.decode())
52
+ if "SUCCEEDED" == res['output']['task_status']:
53
+ logger.info(f"task_id: {task_id}: Generation task query success.")
54
+ results = res['output']['results']
55
+ img_urls = [x['url'] for x in results]
56
+ logger.info(f"task_id: {task_id}: {res}")
57
+ break
58
+ elif "FAILED" != res['output']['task_status']:
59
+ logger.debug(f"task_id: {task_id}: query result...")
60
+ time.sleep(1)
61
+ else:
62
+ raise gr.Error(
63
+ "Fail to get results from Generation task. Make sure all the ID images have a clear face. If it still doesn't work, you can contact us or open an issue.")
64
+
65
+ else:
66
+ logger.error(f'task_id: {task_id}: Fail to query task result: {res_.content}')
67
+ raise gr.Error("Fail to query task result.")
68
+
69
+ logger.info(f"task_id: {task_id}: download generated images.")
70
+ img_data = download_images(img_urls, batch_size)
71
+ logger.info(f"task_id: {task_id}: Generate done.")
72
+ all_image_data += img_data
73
+ else:
74
+ logger.error(f'Fail to create Generation task: {res_.content}')
75
+ raise gr.Error("Fail to create Generation task.")
76
+
77
+ if len(all_image_data) != repeat_times * batch_size:
78
+ raise gr.Error("Fail to Generation.")
79
+ return all_image_data
src/log.py ADDED
@@ -0,0 +1,18 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import logging
2
+ import os
3
+ from logging.handlers import RotatingFileHandler
4
+
5
+ log_file_name = "workdir/UniPortrait.log"
6
+ os.makedirs(os.path.dirname(log_file_name), exist_ok=True)
7
+
8
+ format = '[%(levelname)s] %(asctime)s "%(filename)s", line %(lineno)d, %(message)s'
9
+ logging.basicConfig(
10
+ format=format,
11
+ datefmt="%Y-%m-%d %H:%M:%S",
12
+ level=logging.INFO)
13
+ logger = logging.getLogger(name="UniPortrait-Studio")
14
+
15
+ fh = RotatingFileHandler(log_file_name, maxBytes=20000000, backupCount=3)
16
+ formatter = logging.Formatter(format, datefmt="%Y-%m-%d %H:%M:%S")
17
+ fh.setFormatter(formatter)
18
+ logger.addHandler(fh)
src/process.py ADDED
@@ -0,0 +1,277 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import time
2
+ import uuid
3
+
4
+ import gradio as gr
5
+ import numpy as np
6
+ from PIL import Image
7
+
8
+ from .generation import call_generation
9
+ from .util import upload_np_2_oss
10
+
11
+
12
+ def get_image_url(request_id, pil_image, suffix):
13
+ np_image = np.array(pil_image.convert("RGB"))
14
+ image_url = upload_np_2_oss(np_image, request_id + suffix)
15
+ return image_url
16
+
17
+
18
+ def is_no_image_placeholder(pil_image):
19
+ from PIL import Image, ImageChops
20
+ no_image_placeholder = Image.open("assets/No-Image-Placeholder.png").convert("RGB")
21
+ diff = ImageChops.difference(pil_image, no_image_placeholder)
22
+ if diff.getbbox() is None:
23
+ return True
24
+ else:
25
+ return False
26
+
27
+
28
+ def text_to_single_id_generation_process(
29
+ pil_faceid=None, pil_supp_faceids=None,
30
+ pil_mix_faceid_1=None, mix_scale_1=0.0,
31
+ pil_mix_faceid_2=None, mix_scale_2=0.0,
32
+ faceid_scale=0.0, face_structure_scale=0.0,
33
+ prompt="", negative_prompt="",
34
+ pil_style=None, style_scale=0.0,
35
+ seed=-1, image_resolution="512x512", use_sr=True,
36
+ ):
37
+ request_id = time.strftime('%Y%m%d-', time.localtime(time.time())) + str(uuid.uuid4())
38
+
39
+ if prompt == "":
40
+ raise gr.Error("Please enter the prompt")
41
+
42
+ if pil_style and is_no_image_placeholder(pil_style):
43
+ pil_style = None
44
+
45
+ faceid_url = []
46
+ if pil_faceid:
47
+ faceid_url.append(get_image_url(request_id, pil_faceid, suffix="_faceid.png"))
48
+ if pil_supp_faceids and len(pil_supp_faceids) > 0:
49
+ for idx, pil_supp_faceid in enumerate(pil_supp_faceids):
50
+ pil_supp_faceid = Image.open(pil_supp_faceid)
51
+ faceid_url.append(get_image_url(request_id, pil_supp_faceid, suffix=f"_supp_faceid_{idx}.png"))
52
+
53
+ mix_face_url = []
54
+ mix_scale = []
55
+ if pil_mix_faceid_1 is not None:
56
+ mix_face_url.append(
57
+ get_image_url(request_id, pil_mix_faceid_1, suffix=f"_mix_faceid_1_{mix_scale_1:.2f}.png")
58
+ )
59
+ mix_scale.append(mix_scale_1)
60
+
61
+ if pil_mix_faceid_2 is not None:
62
+ mix_face_url.append(
63
+ get_image_url(request_id, pil_mix_faceid_2, suffix=f"_mix_faceid_2_{mix_scale_2:.2f}.png")
64
+ )
65
+ mix_scale.append(mix_scale_2)
66
+
67
+ # The outer parenthesis represent the ID, and the inner parenthesis represent the mix face image for each ID.
68
+ faceid_image_url, mix_faceid_image_url, mix_faceid_scale = [], [], []
69
+ if len(faceid_url) > 0:
70
+ faceid_image_url.append(faceid_url)
71
+ mix_faceid_image_url.append(mix_face_url)
72
+ mix_faceid_scale.append(mix_scale)
73
+
74
+ if pil_style is None:
75
+ style_image_url = ""
76
+ else:
77
+ style_image_url = get_image_url(request_id, pil_style, suffix="_style.png")
78
+
79
+ data = dict(
80
+ input=dict(
81
+ faceid_image_url=faceid_image_url,
82
+ mix_faceid_image_url=mix_faceid_image_url,
83
+ mix_faceid_scale=mix_faceid_scale,
84
+ style_image_url=style_image_url,
85
+ prompt=prompt,
86
+ negative_prompt=negative_prompt
87
+ ),
88
+ parameters=dict(
89
+ image_resolution=image_resolution,
90
+ faceid_scale=faceid_scale * 10.,
91
+ face_structure_scale=face_structure_scale * 10.,
92
+ style_scale=style_scale * 10.,
93
+ use_sr=int(use_sr),
94
+ seed=seed,
95
+ mode="text-to-image",
96
+ )
97
+ )
98
+
99
+ res = call_generation(data)
100
+
101
+ return res
102
+
103
+
104
+ def text_to_multi_id_generation_process(
105
+ pil_faceid_1st=None, pil_supp_faceids_1st=None,
106
+ pil_mix_faceid_1_1st=None, mix_scale_1_1st=0.0,
107
+ pil_mix_faceid_2_1st=None, mix_scale_2_1st=0.0,
108
+ pil_faceid_2nd=None, pil_supp_faceids_2nd=None,
109
+ pil_mix_faceid_1_2nd=None, mix_scale_1_2nd=0.0,
110
+ pil_mix_faceid_2_2nd=None, mix_scale_2_2nd=0.0,
111
+ faceid_scale=0.0, face_structure_scale=0.0,
112
+ prompt="", negative_prompt="",
113
+ pil_style=None, style_scale=0.0,
114
+ seed=-1, image_resolution="512x512", use_sr=True,
115
+ ):
116
+ request_id = time.strftime('%Y%m%d-', time.localtime(time.time())) + str(uuid.uuid4())
117
+
118
+ if prompt == "":
119
+ raise gr.Error("Please enter the prompt")
120
+
121
+ if pil_style and is_no_image_placeholder(pil_style):
122
+ pil_style = None
123
+
124
+ first_faceid_url = []
125
+ if pil_faceid_1st:
126
+ first_faceid_url.append(get_image_url(request_id, pil_faceid_1st, suffix="_faceid_1st.png"))
127
+ if pil_supp_faceids_1st and len(pil_supp_faceids_1st) > 0:
128
+ for idx, pil_supp_faceid_1st in enumerate(pil_supp_faceids_1st):
129
+ pil_supp_faceid_1st = Image.open(pil_supp_faceid_1st)
130
+ first_faceid_url.append(get_image_url(request_id, pil_supp_faceid_1st, suffix=f"_faceid_1st_{idx}.png"))
131
+
132
+ second_faceid_url = []
133
+ if pil_faceid_2nd:
134
+ second_faceid_url.append(get_image_url(request_id, pil_faceid_2nd, suffix="_faceid_2nd.png"))
135
+ if pil_supp_faceids_2nd and len(pil_supp_faceids_2nd) > 0:
136
+ for idx, pil_supp_faceid_2nd in enumerate(pil_supp_faceids_2nd):
137
+ pil_supp_faceid_2nd = Image.open(pil_supp_faceid_2nd)
138
+ second_faceid_url.append(get_image_url(request_id, pil_supp_faceid_2nd, suffix=f"_faceid_2nd_{idx}.png"))
139
+
140
+ first_mix_face_url, first_mix_scale = [], []
141
+ if pil_mix_faceid_1_1st:
142
+ first_mix_face_url.append(
143
+ get_image_url(request_id, pil_mix_faceid_1_1st, suffix=f"_mix_faceid_1_1st_{mix_scale_1_1st:.2f}.png")
144
+ )
145
+ first_mix_scale.append(mix_scale_1_1st)
146
+
147
+ if pil_mix_faceid_2_1st is not None:
148
+ first_mix_face_url.append(
149
+ get_image_url(request_id, pil_mix_faceid_2_1st, suffix=f"_mix_faceid_2_1st_{mix_scale_2_1st:.2f}.png")
150
+ )
151
+ first_mix_scale.append(mix_scale_2_1st)
152
+
153
+ second_mix_face_url, second_mix_scale = [], []
154
+ if pil_mix_faceid_1_2nd:
155
+ second_mix_face_url.append(
156
+ get_image_url(request_id, pil_mix_faceid_1_2nd, suffix=f"_mix_faceid_1_2nd_{mix_scale_1_2nd:.2f}.png")
157
+ )
158
+ second_mix_scale.append(mix_scale_1_2nd)
159
+
160
+ if pil_mix_faceid_2_2nd is not None:
161
+ second_mix_face_url.append(
162
+ get_image_url(request_id, pil_mix_faceid_2_2nd, suffix=f"_mix_faceid_2_2nd_{mix_scale_2_2nd:.2f}.png")
163
+ )
164
+ second_mix_scale.append(mix_scale_2_2nd)
165
+
166
+ # The outer parenthesis represent the ID, and the inner parenthesis represent the mix face image for each ID.
167
+ faceid_image_url, mix_faceid_image_url, mix_faceid_scale = [], [], []
168
+ if len(first_faceid_url) > 0:
169
+ faceid_image_url.append(first_faceid_url)
170
+ mix_faceid_image_url.append(first_mix_face_url)
171
+ mix_faceid_scale.append(first_mix_scale)
172
+ if len(second_faceid_url) > 0:
173
+ faceid_image_url.append(second_faceid_url)
174
+ mix_faceid_image_url.append(second_mix_face_url)
175
+ mix_faceid_scale.append(second_mix_scale)
176
+
177
+ if pil_style is None:
178
+ style_image_url = ""
179
+ else:
180
+ style_image_url = get_image_url(request_id, pil_style, suffix="_style.png")
181
+
182
+ data = dict(
183
+ input=dict(
184
+ faceid_image_url=faceid_image_url,
185
+ mix_faceid_image_url=mix_faceid_image_url,
186
+ mix_faceid_scale=mix_faceid_scale,
187
+ style_image_url=style_image_url,
188
+ prompt=prompt,
189
+ negative_prompt=negative_prompt
190
+ ),
191
+ parameters=dict(
192
+ image_resolution=image_resolution,
193
+ faceid_scale=faceid_scale * 10.,
194
+ face_structure_scale=face_structure_scale * 10.,
195
+ style_scale=style_scale * 10.,
196
+ use_sr=int(use_sr),
197
+ seed=seed,
198
+ mode="text-to-image",
199
+ )
200
+ )
201
+
202
+ res = call_generation(data)
203
+
204
+ return res
205
+
206
+
207
+ def image_to_single_id_generation_process(
208
+ pil_faceid=None, pil_supp_faceids=None,
209
+ pil_mix_faceid_1=None, mix_scale_1=0.0,
210
+ pil_mix_faceid_2=None, mix_scale_2=0.0,
211
+ faceid_scale=0.0, face_structure_scale=0.0,
212
+ pil_style=None, style_scale=1.0,
213
+ seed=-1, image_resolution="768x512", use_sr=True,
214
+ ):
215
+ request_id = time.strftime('%Y%m%d-', time.localtime(time.time())) + str(uuid.uuid4())
216
+
217
+ if pil_faceid is None:
218
+ raise gr.Error("Please upload an ID image")
219
+
220
+ if pil_style is None:
221
+ raise gr.Error("Please upload a reference image")
222
+
223
+ faceid_url = []
224
+ if pil_faceid:
225
+ faceid_url.append(get_image_url(request_id, pil_faceid, suffix="_faceid.png"))
226
+ if pil_supp_faceids and len(pil_supp_faceids) > 0:
227
+ for idx, pil_supp_faceid in enumerate(pil_supp_faceids):
228
+ pil_supp_faceid = Image.open(pil_supp_faceid)
229
+ faceid_url.append(get_image_url(request_id, pil_supp_faceid, suffix=f"_supp_faceid_{idx}.png"))
230
+
231
+ mix_face_url = []
232
+ mix_scale = []
233
+ if pil_mix_faceid_1 is not None:
234
+ mix_face_url.append(
235
+ get_image_url(request_id, pil_mix_faceid_1, suffix=f"_mix_faceid_1_{mix_scale_1:.2f}.png")
236
+ )
237
+ mix_scale.append(mix_scale_1)
238
+
239
+ if pil_mix_faceid_2 is not None:
240
+ mix_face_url.append(
241
+ get_image_url(request_id, pil_mix_faceid_2, suffix=f"_mix_faceid_2_{mix_scale_2:.2f}.png")
242
+ )
243
+ mix_scale.append(mix_scale_2)
244
+
245
+ # The outer parenthesis represent the ID, and the inner parenthesis represent the mix face image for each ID.
246
+ faceid_image_url, mix_faceid_image_url, mix_faceid_scale = [], [], []
247
+ if len(faceid_url) > 0:
248
+ faceid_image_url.append(faceid_url)
249
+ mix_faceid_image_url.append(mix_face_url)
250
+ mix_faceid_scale.append(mix_scale)
251
+
252
+ if pil_style is None:
253
+ style_image_url = ""
254
+ else:
255
+ style_image_url = get_image_url(request_id, pil_style, suffix="_style.png")
256
+
257
+ data = dict(
258
+ input=dict(
259
+ faceid_image_url=faceid_image_url,
260
+ mix_faceid_image_url=mix_faceid_image_url,
261
+ mix_faceid_scale=mix_faceid_scale,
262
+ style_image_url=style_image_url,
263
+ ),
264
+ parameters=dict(
265
+ image_resolution=image_resolution,
266
+ faceid_scale=faceid_scale * 10.,
267
+ face_structure_scale=face_structure_scale * 10.,
268
+ style_scale=style_scale * 10.,
269
+ use_sr=int(use_sr),
270
+ seed=seed,
271
+ mode="image-to-image",
272
+ )
273
+ )
274
+
275
+ res = call_generation(data)
276
+
277
+ return res
src/util.py ADDED
@@ -0,0 +1,58 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import concurrent.futures
2
+ import io
3
+ import os
4
+
5
+ import oss2
6
+ import requests
7
+ from PIL import Image
8
+
9
+ from .log import logger
10
+
11
+ # oss
12
+ access_key_id = os.getenv("ACCESS_KEY_ID")
13
+ access_key_secret = os.getenv("ACCESS_KEY_SECRET")
14
+ bucket_name = os.getenv("BUCKET_NAME")
15
+ endpoint = os.getenv("ENDPOINT")
16
+
17
+ bucket = oss2.Bucket(oss2.Auth(access_key_id, access_key_secret), endpoint, bucket_name)
18
+ oss_path = os.getenv("OSS_PATH")
19
+
20
+
21
+ def download_img_pil(index, img_url):
22
+ r = requests.get(img_url, stream=True)
23
+ if r.status_code == 200:
24
+ img = Image.open(io.BytesIO(r.content))
25
+ return (index, img)
26
+ else:
27
+ logger.error(f"Fail to download: {img_url}")
28
+
29
+
30
+ def download_images(img_urls, batch_size):
31
+ imgs_pil = [None] * batch_size
32
+ with concurrent.futures.ThreadPoolExecutor(max_workers=4) as executor:
33
+ to_do = []
34
+ for i, url in enumerate(img_urls):
35
+ future = executor.submit(download_img_pil, i, url)
36
+ to_do.append(future)
37
+
38
+ for future in concurrent.futures.as_completed(to_do):
39
+ ret = future.result()
40
+ index, img_pil = ret
41
+ imgs_pil[index] = img_pil
42
+
43
+ return imgs_pil
44
+
45
+
46
+ def upload_np_2_oss(input_image, name="cache.png"):
47
+ assert name.lower().endswith((".png", ".jpg")), name
48
+ imgByteArr = io.BytesIO()
49
+ if name.lower().endswith(".png"):
50
+ Image.fromarray(input_image).save(imgByteArr, format="PNG")
51
+ else:
52
+ Image.fromarray(input_image).save(imgByteArr, format="JPEG", quality=95)
53
+ imgByteArr = imgByteArr.getvalue()
54
+
55
+ bucket.put_object(oss_path + "/" + name, imgByteArr)
56
+ ret = bucket.sign_url('GET', oss_path + "/" + name, 60 * 60 * 24)
57
+ del imgByteArr
58
+ return ret