import subprocess import sys subprocess.check_call([sys.executable, "-m", "pip", "install", 'gradio==3.40.1']) import gradio as gr import numpy as np import torch from datasets import load_dataset from transformers import SpeechT5ForTextToSpeech, SpeechT5HifiGan, SpeechT5Processor, pipeline device = "cuda:0" if torch.cuda.is_available() else "cpu" # load speech translation checkpoint asr_pipe = pipeline("automatic-speech-recognition", model="openai/whisper-base", device=device) # load text-to-speech checkpoint and speaker embeddings processor = SpeechT5Processor.from_pretrained("tsobolev/speecht5_finetuned_voxpopuli_fi") model = SpeechT5ForTextToSpeech.from_pretrained("tsobolev/speecht5_finetuned_voxpopuli_fi").to(device) vocoder = SpeechT5HifiGan.from_pretrained("microsoft/speecht5_hifigan").to(device) embeddings_dataset = load_dataset("Matthijs/cmu-arctic-xvectors", split="validation") speaker_embeddings = torch.tensor(embeddings_dataset[7000]["xvector"]).unsqueeze(0) en2fi_pipeline = pipeline("translation", model="Helsinki-NLP/opus-mt-en-fi") print("gradio version is ",gr.__version__) def translate(audio): outputs = asr_pipe(audio, max_new_tokens=256, generate_kwargs={"task": "translate"}) fi_translation = en2fi_pipeline(outputs["text"]) text = fi_translation[0]['translation_text'] replacements = [ ("ä", "ae"), ("ö", "oe"), ] for src, dst in replacements: text = text.replace(src, dst) print(text) return text def synthesise(text): inputs = processor(text=text, return_tensors="pt") speech = model.generate_speech(inputs["input_ids"].to(device), speaker_embeddings.to(device), vocoder=vocoder) return speech.cpu() def speech_to_speech_translation(audio): translated_text = translate(audio) synthesised_speech = synthesise(translated_text) synthesised_speech = (synthesised_speech.numpy() * 32767).astype(np.int16) return 16000, synthesised_speech title = "Cascaded STST" description = """ Demo for cascaded speech-to-speech translation (STST), mapping from source speech in any language, supported by the whisper, to target speech in Finnish. Demo uses: * OpenAI's [Whisper Base](https://huggingface.co/openai/whisper-base) model for speech translation * Language Technology at the University of Helsinki en-fi model [Helsinki-NLP](https://huggingface.co/Helsinki-NLP/opus-mt-en-fi) * Microsoft's [SpeechT5 TTS](https://huggingface.co/microsoft/speecht5_tts) model fine-tuned on subset of [VoxPopuli](https://huggingface.co/datasets/facebook/voxpopuli) dataset for text-to-speech * replacements: ä => ae , ö => oe ![Cascaded STST](https://huggingface.co/datasets/huggingface-course/audio-course-images/resolve/main/s2st_cascaded.png "Diagram of cascaded speech to speech translation") """ demo = gr.Blocks() mic_translate = gr.Interface( fn=speech_to_speech_translation, inputs=gr.Audio(source="microphone", type="filepath"), outputs=gr.Audio(label="Generated Speech", type="numpy"), title=title, description=description, ) file_translate = gr.Interface( fn=speech_to_speech_translation, inputs=gr.Audio(source="upload", type="filepath"), outputs=gr.Audio(label="Generated Speech", type="numpy"), examples=[["./example.wav"]], title=title, description=description, ) with demo: gr.TabbedInterface([mic_translate, file_translate], ["Microphone", "Audio File"]) demo.launch()