|
import gradio as gr |
|
from gradio_client import Client |
|
import traceback |
|
|
|
def get_speech(text, voice): |
|
client = Client("https://collabora-whisperspeech.hf.space/") |
|
result = client.predict( |
|
text, |
|
voice, |
|
"", |
|
14, |
|
api_name="/whisper_speech_demo" |
|
) |
|
print(result) |
|
return result |
|
|
|
def get_dreamtalk(image_in, speech): |
|
client = Client("https://fffiloni-dreamtalk.hf.space/") |
|
result = client.predict( |
|
speech, |
|
image_in, |
|
"M030_front_neutral_level1_001.mat", |
|
api_name="/infer" |
|
) |
|
print(result) |
|
return result['video'] |
|
|
|
def pipe (text, voice, image_in): |
|
|
|
speech = get_speech(text, voice) |
|
|
|
try: |
|
video = get_dreamtalk(image_in, speech) |
|
except Exception as e: |
|
tb = traceback.format_exception(etype=type(e), value=e, tb=e.__traceback__) |
|
print("".join(tb)) |
|
raise gr.Error('An error occurred while loading DreamTalk:\n{}'.format('\n'.join(tb))) from None |
|
|
|
return video |
|
|
|
with gr.Blocks() as demo: |
|
with gr.Column(): |
|
gr.HTML(""" |
|
<h2 style="text-align: center;"> |
|
Whisper Speech X Dreamtalk |
|
</h2> |
|
<p style="text-align: center;"></p> |
|
""") |
|
with gr.Row(): |
|
with gr.Column(): |
|
image_in = gr.Image(label="Portrait IN", type="filepath", value="einstein.jpg") |
|
with gr.Column(): |
|
voice = gr.Audio(type="filepath", label="Upload or Record Speaker audio (Optional)") |
|
text = gr.Textbox(label="text") |
|
submit_btn = gr.Button('Submit') |
|
with gr.Column(): |
|
video_o = gr.Video(label="Video result") |
|
submit_btn.click( |
|
fn = pipe, |
|
inputs = [ |
|
text, voice, image_in |
|
], |
|
outputs = [ |
|
video_o |
|
], |
|
concurrency_limit = 3 |
|
) |
|
demo.queue(max_size=10).launch(show_error=True, show_api=False) |