test_audio / app.py
robin0307's picture
Update app.py
cdc5171
raw
history blame
1.25 kB
import os
import gradio as gr
import numpy as np
io1 = gr.Interface.load("huggingface/facebook/xm_transformer_s2ut_en-hk")
io2 = gr.Interface.load("huggingface/facebook/xm_transformer_s2ut_hk-en")
io3 = gr.Interface.load("huggingface/facebook/xm_transformer_unity_en-hk")
io4 = gr.Interface.load("huggingface/facebook/xm_transformer_unity_hk-en")
def inference(audio, model):
print(audio)
if model == "xm_transformer_s2ut_en-hk":
out_audio = io1(audio)
elif model == "xm_transformer_s2ut_hk-en":
out_audio = io2(audio)
elif model == "xm_transformer_unity_en-hk":
out_audio = io3(audio)
else:
out_audio = io4(audio)
return out_audio
model = gr.Dropdown(choices=["xm_transformer_unity_en-hk", "xm_transformer_unity_hk-en", "xm_transformer_s2ut_en-hk", "xm_transformer_s2ut_hk-en"])
audio = gr.Audio(source="microphone", type="filepath", label="Input")
demo = gr.Interface(fn=inference, inputs=[audio, model], outputs=["audio"], examples=[
['audio1.wav', 'xm_transformer_unity_hk-en'],
['audio2.wav', 'xm_transformer_unity_hk-en'],
['audio3.wav', 'xm_transformer_unity_hk-en'],
['en_audio1.wav', 'xm_transformer_unity_en-hk'],
['en_audio2.wav', 'xm_transformer_unity_en-hk']
])
demo.launch()