wav2lip / app.py
aliabd's picture
aliabd HF staff
Update app.py
8ec0f81
import os
os.system("python -m pip install paddlepaddle-gpu==2.2.1.post112 -f https://www.paddlepaddle.org.cn/whl/linux/mkl/avx/stable.html")
os.system("hub install wav2lip==1.0.0")
import gradio as gr
import paddlehub as hub
from pathlib import Path
module = hub.Module(name="wav2lip")
def inference(image,audio):
face_input_path = image
audio_input_path = audio
module.wav2lip_transfer(face=face_input_path, audio=audio_input_path, output_dir='.', use_gpu=True)
return "result.mp4"
title = "Wav2lip"
description = "Gradio demo for Wav2lip: Accurately Lip-syncing Videos In The Wild. To use it, simply upload your image and audio file, or click one of the examples to load them. Read more at the links below. Please trim audio file to maximum of 3-4 seconds"
article = "<p style='text-align: center'><a href='https://arxiv.org/abs/2008.10010' target='_blank'>A Lip Sync Expert Is All You Need for Speech to Lip Generation In The Wild</a> | <a href='https://github.com/PaddlePaddle/PaddleGAN/blob/develop/docs/en_US/tutorials/wav2lip.md' target='_blank'>Github Repo</a></p>"
examples=[['monatest.jpeg',"game.wav"]]
iface = gr.Interface(inference, [gr.inputs.Image(type="filepath"),gr.inputs.Audio(source="upload", type="filepath")],
outputs=gr.outputs.Video(label="Output Video"),examples=examples,enable_queue=True,title=title,article=article,description=description)
iface.launch(debug=True)