Spaces:
Runtime error
Runtime error
File size: 3,745 Bytes
8e732f9 6290527 83cc343 d4c1570 83cc343 9df4a94 83cc343 9df4a94 83cc343 22d6633 83cc343 d4c1570 59b9a05 f7b9cbb 9f62a33 7def692 d4c1570 83cc343 b8c6cbb 83cc343 b8c6cbb 83cc343 b8c6cbb 83cc343 b8c6cbb 83cc343 b8c6cbb 83cc343 b8c6cbb 83cc343 b8c6cbb 83cc343 b8c6cbb 83cc343 427bba2 83cc343 b8c6cbb 83cc343 b8c6cbb 83cc343 b8c6cbb 83cc343 b8c6cbb 83cc343 b8c6cbb 83cc343 b8c6cbb 83cc343 b8c6cbb 83cc343 427bba2 83cc343 427bba2 83cc343 a647593 83cc343 e819cc9 83cc343 d4c1570 5ca500c |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 |
import os
os.system("pip install gradio==2.8.0b2")
import gradio as gr
import numpy as np
title = "fairseq S2S"
description = "Gradio Demo for fairseq S2S: speech-to-speech translation models. To use it, simply add your audio, or click one of the examples to load them. Read more at the links below."
article = "<p style='text-align: center'><a href='https://arxiv.org/abs/2010.05171' target='_blank'>fairseq S2T: Fast Speech-to-Text Modeling with fairseq</a> | <a href='https://github.com/pytorch/fairseq/tree/main/examples/speech_to_text' target='_blank'>Github Repo</a></p>"
examples = [
["common_voice_es_en.flac","xm_transformer_600m-es_en-multi_domain"],
["common_voice_ru_18945535.flac","xm_transformer_600m-ru_en-multi_domain"],
["common_voice_fr_19731305.mp3","xm_transformer_600m-fr_en-multi_domain"],
["common_voice_en_ru.mp3","xm_transformer_600m-en_ru-multi_domain"],
["common_voice_en_es.mp3","xm_transformer_600m-en_es-multi_domain"],
["common_voice_en_ar.mp3","xm_transformer_600m-en_ar-multi_domain"],
["common_voice_en_tr.mp3","xm_transformer_600m-en_tr-multi_domain"],
["common_voice_en_fr.mp3","xm_transformer_600m-en_fr-multi_domain"],
["common_voice_en_vi.mp3","xm_transformer_600m-en_vi-multi_domain"],
]
io1 = gr.Interface.load("huggingface/facebook/xm_transformer_600m-es_en-multi_domain")
io2 = gr.Interface.load("huggingface/facebook/xm_transformer_600m-ru_en-multi_domain")
io3 = gr.Interface.load("huggingface/facebook/xm_transformer_600m-en_ru-multi_domain")
io4 = gr.Interface.load("huggingface/facebook/xm_transformer_600m-en_es-multi_domain")
io5 = gr.Interface.load("huggingface/facebook/xm_transformer_600m-en_zh-multi_domain")
io6 = gr.Interface.load("huggingface/facebook/xm_transformer_600m-fr_en-multi_domain")
io7 = gr.Interface.load("huggingface/facebook/xm_transformer_600m-en_ar-multi_domain")
io8 = gr.Interface.load("huggingface/facebook/xm_transformer_600m-en_tr-multi_domain")
io9 = gr.Interface.load("huggingface/facebook/xm_transformer_600m-en_fr-multi_domain")
io10 = gr.Interface.load("huggingface/facebook/xm_transformer_600m-en_vi-multi_domain")
def inference(text,model):
if model == "xm_transformer_600m-es_en-multi_domain":
outtext = io1(text)
elif model == "xm_transformer_600m-ru_en-multi_domain":
outtext = io2(text)
elif model == "xm_transformer_600m-en_ru-multi_domain":
outtext = io3(text)
elif model == "xm_transformer_600m-en_es-multi_domain":
outtext = io4(text)
elif model == "xm_transformer_600m-en_zh-multi_domain":
outtext = io5(text)
elif model == "xm_transformer_600m-fr_en-multi_domain":
outtext = io6(text)
elif model == "xm_transformer_600m-en_ar-multi_domain":
outtext = io7(text)
elif model == "xm_transformer_600m-en_tr-multi_domain":
outtext = io8(text)
elif model == "xm_transformer_600m-en_fr-multi_domain":
outtext = io9(text)
else:
outtext = io10(text)
return outtext
gr.Interface(
inference,
[gr.inputs.Audio(label="Input",type="filepath"),gr.inputs.Dropdown(choices=["xm_transformer_600m-es_en-multi_domain","xm_transformer_600m-ru_en-multi_domain","xm_transformer_600m-en_ru-multi_domain","xm_transformer_600m-en_es-multi_domain","xm_transformer_600m-fr_en-multi_domain","xm_transformer_600m-en_ar-multi_domain","xm_transformer_600m-en_tr-multi_domain","xm_transformer_600m-en_fr-multi_domain","xm_transformer_600m-en_vi-multi_domain"], type="value", default="xm_transformer_600m-es_en-multi_domain", label="model")
],
gr.outputs.Audio(label="Output"),
article=article,
title=title,
examples=examples,
description=description).launch(enable_queue=True,cache_examples=False) |