Spaces:
Running
on
Zero
Running
on
Zero
Update app.py
Browse files
app.py
CHANGED
@@ -161,6 +161,12 @@ def is_square_video(video_path):
|
|
161 |
return gr.update(visible=True)
|
162 |
|
163 |
|
|
|
|
|
|
|
|
|
|
|
|
|
164 |
|
165 |
# assets
|
166 |
title_md = "assets/gradio_title.md"
|
@@ -193,162 +199,161 @@ driving_video_path=gr.Video()
|
|
193 |
with gr.Blocks(theme=gr.themes.Soft()) as demo:
|
194 |
#gr.HTML(load_description(title_md))
|
195 |
|
196 |
-
|
197 |
-
|
198 |
-
|
199 |
|
200 |
-
|
201 |
-
|
202 |
-
|
203 |
-
script_txt = gr.Text()
|
204 |
-
audio_gen_button = gr.Button("Audio generation", variant="primary")
|
205 |
-
# with gr.Column():
|
206 |
-
# txt2video_gen_button = gr.Button("txt2video generation", variant="primary")
|
207 |
-
|
208 |
-
#with gr.Column():
|
209 |
-
#audio_gen_button = gr.Button("Audio generation", variant="primary")
|
210 |
-
with gr.Row():
|
211 |
-
output_audio = gr.Audio(label="Generated audio", type="filepath")
|
212 |
-
|
213 |
-
|
214 |
-
gr.Markdown("### 2. Audio to Driving-Video")
|
215 |
-
with gr.Row():
|
216 |
-
#audio_path_component = gr.Textbox(label="Input", value="assets/examples/driving/test_aud.mp3")
|
217 |
-
video_gen_button = gr.Button("Audio to Video generation", variant="primary")
|
218 |
-
# with gr.Row():
|
219 |
-
# #a2v_output = gr.Video()
|
220 |
-
# driving_video_path.render()
|
221 |
-
|
222 |
-
|
223 |
-
gr.Markdown("### 3. Image to Talking Video")
|
224 |
-
#gr.Markdown(load_description("assets/gradio_description_upload.md"))
|
225 |
-
with gr.Row():
|
226 |
-
with gr.Accordion(open=True, label="Source Portrait"):
|
227 |
-
image_input = gr.Image(type="filepath")
|
228 |
-
gr.Examples(
|
229 |
-
examples=[
|
230 |
-
#[osp.join(example_portrait_dir, "01.webp")],
|
231 |
-
[osp.join(example_portrait_dir, "02.webp")],
|
232 |
-
[osp.join(example_portrait_dir, "03.jpg")],
|
233 |
-
[osp.join(example_portrait_dir, "04.jpg")],
|
234 |
-
[osp.join(example_portrait_dir, "05.jpg")],
|
235 |
-
[osp.join(example_portrait_dir, "06.jpg")],
|
236 |
-
[osp.join(example_portrait_dir, "07.jpg")],
|
237 |
-
[osp.join(example_portrait_dir, "08.jpg")],
|
238 |
-
],
|
239 |
-
inputs=[image_input],
|
240 |
-
cache_examples=False,
|
241 |
-
)
|
242 |
-
with gr.Accordion(open=True, label="Driving Video"):
|
243 |
-
video_input = gr.Video()
|
244 |
-
gr.Examples(
|
245 |
-
examples=[
|
246 |
-
[osp.join(example_video_dir, "d0.mp4")],
|
247 |
-
[osp.join(example_video_dir, "d18.mp4")],
|
248 |
-
[osp.join(example_video_dir, "d19.mp4")],
|
249 |
-
[osp.join(example_video_dir, "d14_trim.mp4")],
|
250 |
-
[osp.join(example_video_dir, "d6_trim.mp4")],
|
251 |
-
],
|
252 |
-
inputs=[video_input],
|
253 |
-
cache_examples=False,
|
254 |
-
)
|
255 |
-
with gr.Row():
|
256 |
-
with gr.Accordion(open=False, label="Animation Instructions and Options"):
|
257 |
-
gr.Markdown(load_description("assets/gradio_description_animation.md"))
|
258 |
-
with gr.Row():
|
259 |
-
flag_relative_input = gr.Checkbox(value=True, label="relative motion")
|
260 |
-
flag_do_crop_input = gr.Checkbox(value=True, label="do crop")
|
261 |
-
flag_remap_input = gr.Checkbox(value=True, label="paste-back")
|
262 |
-
#gr.Markdown(load_description("assets/gradio_description_animate_clear.md"))
|
263 |
-
with gr.Row():
|
264 |
-
with gr.Column():
|
265 |
-
process_button_animation = gr.Button("🚀 Animate", variant="primary")
|
266 |
-
with gr.Column():
|
267 |
-
process_button_reset = gr.ClearButton([image_input, video_input, output_video, output_video_concat], value="🧹 Clear")
|
268 |
-
with gr.Row():
|
269 |
-
with gr.Column():
|
270 |
-
with gr.Accordion(open=True, label="The animated video in the original image space"):
|
271 |
-
output_video.render()
|
272 |
-
with gr.Column():
|
273 |
-
with gr.Accordion(open=True, label="The animated video"):
|
274 |
-
output_video_concat.render()
|
275 |
-
# with gr.Row():
|
276 |
-
# # Examples
|
277 |
-
# gr.Markdown("## You could also choose the examples below by one click ⬇️")
|
278 |
-
# with gr.Row():
|
279 |
-
# gr.Examples(
|
280 |
-
# examples=data_examples,
|
281 |
-
# fn=gpu_wrapped_execute_video,
|
282 |
-
# inputs=[
|
283 |
-
# image_input,
|
284 |
-
# video_input,
|
285 |
-
# flag_relative_input,
|
286 |
-
# flag_do_crop_input,
|
287 |
-
# flag_remap_input
|
288 |
-
# ],
|
289 |
-
# outputs=[output_image, output_image_paste_back],
|
290 |
-
# examples_per_page=6,
|
291 |
-
# cache_examples=False,
|
292 |
-
# )
|
293 |
|
294 |
-
|
295 |
-
|
296 |
-
|
297 |
-
|
298 |
-
|
299 |
-
|
300 |
-
|
301 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
302 |
],
|
303 |
-
|
304 |
-
|
305 |
)
|
306 |
-
|
307 |
-
|
308 |
-
|
309 |
-
|
310 |
-
|
311 |
-
|
312 |
-
|
313 |
-
|
314 |
-
|
315 |
-
fn=gpu_wrapped_elevenlabs_pipeline_generate_voice,
|
316 |
-
inputs=[
|
317 |
-
script_txt
|
318 |
],
|
319 |
-
|
320 |
-
|
321 |
)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
322 |
|
323 |
-
video_gen_button.click(
|
324 |
-
fn=gpu_wrapped_stf_pipeline_execute,
|
325 |
-
inputs=[
|
326 |
-
output_audio
|
327 |
-
#audio_path_component
|
328 |
-
],
|
329 |
-
outputs=[
|
330 |
-
video_input
|
331 |
-
#driving_video_path
|
332 |
-
],
|
333 |
-
show_progress=True
|
334 |
-
)
|
335 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
336 |
|
337 |
-
|
338 |
-
# image_input.change(
|
339 |
-
# fn=gradio_pipeline.prepare_retargeting,
|
340 |
-
# inputs=image_input,
|
341 |
-
# outputs=[eye_retargeting_slider, lip_retargeting_slider, retargeting_input_image]
|
342 |
-
# )
|
343 |
-
video_input.upload(
|
344 |
-
fn=is_square_video,
|
345 |
-
inputs=video_input,
|
346 |
-
outputs=video_input
|
347 |
-
)
|
348 |
-
|
349 |
-
# # 세 번째 탭: Flux 개발용 탭
|
350 |
-
# with gr.Tab("FLUX Image"):
|
351 |
-
# flux_demo = create_flux_tab(image_input) # Flux 개발용 탭 생성
|
352 |
|
353 |
demo.launch(
|
354 |
server_port=args.server_port,
|
|
|
161 |
return gr.update(visible=True)
|
162 |
|
163 |
|
164 |
+
def txt_to_driving_video(input_text):
|
165 |
+
audio_outpath = gpu_wrapped_elevenlabs_pipeline_generate_voice(text)
|
166 |
+
video_outpath = gpu_wrapped_stf_pipeline_execute(audio_outpath)
|
167 |
+
return video_outpath
|
168 |
+
|
169 |
+
|
170 |
|
171 |
# assets
|
172 |
title_md = "assets/gradio_title.md"
|
|
|
199 |
with gr.Blocks(theme=gr.themes.Soft()) as demo:
|
200 |
#gr.HTML(load_description(title_md))
|
201 |
|
202 |
+
|
203 |
+
gr.Markdown("# Talk-GEN by ESTsoft")
|
204 |
+
gr.Markdown("## Text to talking video generation")
|
205 |
|
206 |
+
#gr.Markdown("### 1. Text to audio")
|
207 |
+
gr.Markdown("### 1. Text to Driving-Video")
|
208 |
+
with gr.Row():
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
209 |
|
210 |
+
script_txt = gr.Text()
|
211 |
+
# audio_gen_button = gr.Button("Audio generation", variant="primary")
|
212 |
+
# with gr.Column():
|
213 |
+
# txt2video_gen_button = gr.Button("txt2video generation", variant="primary")
|
214 |
+
txt2video_gen_button = gr.Button("txt2video generation", variant="primary")
|
215 |
+
|
216 |
+
#with gr.Column():
|
217 |
+
#audio_gen_button = gr.Button("Audio generation", variant="primary")
|
218 |
+
# with gr.Row():
|
219 |
+
# output_audio = gr.Audio(label="Generated audio", type="filepath")
|
220 |
+
|
221 |
+
|
222 |
+
# gr.Markdown("### 2. Audio to Driving-Video")
|
223 |
+
# with gr.Row():
|
224 |
+
# #audio_path_component = gr.Textbox(label="Input", value="assets/examples/driving/test_aud.mp3")
|
225 |
+
# video_gen_button = gr.Button("Audio to Video generation", variant="primary")
|
226 |
+
# with gr.Row():
|
227 |
+
# #a2v_output = gr.Video()
|
228 |
+
# driving_video_path.render()
|
229 |
+
|
230 |
+
|
231 |
+
gr.Markdown("### 2. Image to Talking-Video with Driving-Video")
|
232 |
+
#gr.Markdown(load_description("assets/gradio_description_upload.md"))
|
233 |
+
with gr.Row():
|
234 |
+
with gr.Accordion(open=True, label="Source Portrait"):
|
235 |
+
image_input = gr.Image(type="filepath")
|
236 |
+
gr.Examples(
|
237 |
+
examples=[
|
238 |
+
#[osp.join(example_portrait_dir, "01.webp")],
|
239 |
+
[osp.join(example_portrait_dir, "02.webp")],
|
240 |
+
[osp.join(example_portrait_dir, "03.jpg")],
|
241 |
+
[osp.join(example_portrait_dir, "04.jpg")],
|
242 |
+
[osp.join(example_portrait_dir, "05.jpg")],
|
243 |
+
[osp.join(example_portrait_dir, "06.jpg")],
|
244 |
+
[osp.join(example_portrait_dir, "07.jpg")],
|
245 |
+
[osp.join(example_portrait_dir, "08.jpg")],
|
246 |
],
|
247 |
+
inputs=[image_input],
|
248 |
+
cache_examples=False,
|
249 |
)
|
250 |
+
with gr.Accordion(open=True, label="Driving Video"):
|
251 |
+
video_input = gr.Video()
|
252 |
+
gr.Examples(
|
253 |
+
examples=[
|
254 |
+
[osp.join(example_video_dir, "d0.mp4")],
|
255 |
+
[osp.join(example_video_dir, "d18.mp4")],
|
256 |
+
[osp.join(example_video_dir, "d19.mp4")],
|
257 |
+
[osp.join(example_video_dir, "d14_trim.mp4")],
|
258 |
+
[osp.join(example_video_dir, "d6_trim.mp4")],
|
|
|
|
|
|
|
259 |
],
|
260 |
+
inputs=[video_input],
|
261 |
+
cache_examples=False,
|
262 |
)
|
263 |
+
with gr.Row():
|
264 |
+
with gr.Accordion(open=False, label="Animation Instructions and Options"):
|
265 |
+
gr.Markdown(load_description("assets/gradio_description_animation.md"))
|
266 |
+
with gr.Row():
|
267 |
+
flag_relative_input = gr.Checkbox(value=True, label="relative motion")
|
268 |
+
flag_do_crop_input = gr.Checkbox(value=True, label="do crop")
|
269 |
+
flag_remap_input = gr.Checkbox(value=True, label="paste-back")
|
270 |
+
#gr.Markdown(load_description("assets/gradio_description_animate_clear.md"))
|
271 |
+
with gr.Row():
|
272 |
+
with gr.Column():
|
273 |
+
process_button_animation = gr.Button("🚀 Animate", variant="primary")
|
274 |
+
with gr.Column():
|
275 |
+
process_button_reset = gr.ClearButton([image_input, video_input, output_video, output_video_concat], value="🧹 Clear")
|
276 |
+
with gr.Row():
|
277 |
+
with gr.Column():
|
278 |
+
with gr.Accordion(open=True, label="The animated video in the original image space"):
|
279 |
+
output_video.render()
|
280 |
+
with gr.Column():
|
281 |
+
with gr.Accordion(open=True, label="The animated video"):
|
282 |
+
output_video_concat.render()
|
283 |
+
# with gr.Row():
|
284 |
+
# # Examples
|
285 |
+
# gr.Markdown("## You could also choose the examples below by one click ⬇️")
|
286 |
+
# with gr.Row():
|
287 |
+
# gr.Examples(
|
288 |
+
# examples=data_examples,
|
289 |
+
# fn=gpu_wrapped_execute_video,
|
290 |
+
# inputs=[
|
291 |
+
# image_input,
|
292 |
+
# video_input,
|
293 |
+
# flag_relative_input,
|
294 |
+
# flag_do_crop_input,
|
295 |
+
# flag_remap_input
|
296 |
+
# ],
|
297 |
+
# outputs=[output_image, output_image_paste_back],
|
298 |
+
# examples_per_page=6,
|
299 |
+
# cache_examples=False,
|
300 |
+
# )
|
301 |
+
|
302 |
+
process_button_animation.click(
|
303 |
+
fn=gpu_wrapped_execute_video,
|
304 |
+
inputs=[
|
305 |
+
image_input,
|
306 |
+
video_input,
|
307 |
+
flag_relative_input,
|
308 |
+
flag_do_crop_input,
|
309 |
+
flag_remap_input
|
310 |
+
],
|
311 |
+
outputs=[output_video, output_video_concat],
|
312 |
+
show_progress=True
|
313 |
+
)
|
314 |
+
txt2video_gen_button.click(
|
315 |
+
fn=txt_to_driving_video,
|
316 |
+
inputs=[
|
317 |
+
script_txt
|
318 |
+
],
|
319 |
+
outputs=[video_input],
|
320 |
+
show_progress=True
|
321 |
+
)
|
322 |
+
# audio_gen_button.click(
|
323 |
+
# fn=gpu_wrapped_elevenlabs_pipeline_generate_voice,
|
324 |
+
# inputs=[
|
325 |
+
# script_txt
|
326 |
+
# ],
|
327 |
+
# outputs=[output_audio],
|
328 |
+
# show_progress=True
|
329 |
+
# )
|
330 |
+
|
331 |
+
# video_gen_button.click(
|
332 |
+
# fn=gpu_wrapped_stf_pipeline_execute,
|
333 |
+
# inputs=[
|
334 |
+
# output_audio
|
335 |
+
# #audio_path_component
|
336 |
+
# ],
|
337 |
+
# outputs=[
|
338 |
+
# video_input
|
339 |
+
# #driving_video_path
|
340 |
+
# ],
|
341 |
+
# show_progress=True
|
342 |
+
# )
|
343 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
344 |
|
345 |
+
|
346 |
+
# image_input.change(
|
347 |
+
# fn=gradio_pipeline.prepare_retargeting,
|
348 |
+
# inputs=image_input,
|
349 |
+
# outputs=[eye_retargeting_slider, lip_retargeting_slider, retargeting_input_image]
|
350 |
+
# )
|
351 |
+
video_input.upload(
|
352 |
+
fn=is_square_video,
|
353 |
+
inputs=video_input,
|
354 |
+
outputs=video_input
|
355 |
+
)
|
356 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
357 |
|
358 |
demo.launch(
|
359 |
server_port=args.server_port,
|