gsvann's picture
Update fo Russian speech app.py
a15f457
raw
history blame
3.86 kB
"Задание 3"
import gradio as gr
import numpy as np
import torch
import espeakng
import sentencepiece
from datasets import load_dataset
from transformers import pipeline, MarianMTModel, MarianTokenizer, VitsModel, VitsTokenizer
device = "cuda:0" if torch.cuda.is_available() else "cpu"
import phonemizer
model_wav2vec = 'facebook/wav2vec2-lv-60-espeak-cv-ft'
asr_pipe = pipeline("automatic-speech-recognition", model=model_wav2vec, device=device)
# from speech to text
def translate_audio(audio):
outputs = asr_pipe(audio, max_new_tokens=256,
generate_kwargs={"task": "translate"})
return outputs["text"]
# translation
def translate_text(text, from_language, target_language): #to English -mul en, to Russian - en ru
model_name = f'Helsinki-NLP/opus-mt-{from_language}-{target_language}'
tokenizer = MarianTokenizer.from_pretrained(model_name)
model = MarianMTModel.from_pretrained(model_name)
inputs = tokenizer.encode(text, return_tensors="pt")
outputs = model.generate(inputs, num_beams=4, max_length=50, early_stopping=True)
translated_text = tokenizer.decode(outputs[0], skip_special_tokens=True)
return translated_text
# load text-to-speech checkpoint
#model = pipeline("text-to-speech", model="voxxer/speecht5_finetuned_commonvoice_ru_translit")
model = VitsModel.from_pretrained("voxxer/speecht5_finetuned_commonvoice_ru_translit")
tokenizer = VitsTokenizer.from_pretrained("facebook/mms-tts-rus")
def synthesise(text):
translated_text = translate_text(text, 'mul', 'en')
translated_text = translate_text(translate_text, 'en', 'ru')
inputs = tokenizer(translated_text, return_tensors="pt")
input_ids = inputs["input_ids"]
with torch.no_grad():
outputs = model(input_ids)
speech = outputs["waveform"]
return speech.cpu()
def speech_to_speech_translation(audio):
text_from_audio = translate_audio(audio)
print(translated_text)
synthesised_speech = synthesise(text_from_audio)
synthesised_speech = (synthesised_speech.numpy() * 32767).astype(np.int16)
return 16000, synthesised_speech[0]
title = "Cascaded STST"
description = """
* В начале происходит распознавание речи с помощью модели facebook/wav2vec2-lv-60-espeak-cv-ft и на выходе получается текст на любом из 60 языков.
* Затем полученный текст переводится сначала на английский с помощью Helsinki-NLP/opus-mt-mul-en, а потом на русский с помощью Helsinki-NLP/opus-mt-en-ru
* На последнем шаге полученный текст озвучивается с помощью fine-tune-говой версии microsoft/speecht5_tts - voxxer/speecht5_finetuned_commonvoice_ru_translit
Demo for cascaded speech-to-speech translation (STST), mapping from source speech in any language to target speech in Russian. Demo uses facebook/mms-tts-rus model for text-to-speech:
![Cascaded STST](https://huggingface.co/datasets/huggingface-course/audio-course-images/resolve/main/s2st_cascaded.png "Diagram of cascaded speech to speech translation")
"""
demo = gr.Blocks()
mic_translate = gr.Interface(
fn=speech_to_speech_translation,
inputs=gr.Audio(source="microphone", type="filepath"),
outputs=gr.Audio(label="Generated Speech", type="numpy"),
title=title,
description=description,
)
file_translate = gr.Interface(
fn=speech_to_speech_translation,
inputs=gr.Audio(source="upload", type="filepath"),
outputs=gr.Audio(label="Generated Speech", type="numpy"),
examples=[["./example.wav"]],
title=title,
description=description,
)
with demo:
gr.TabbedInterface([mic_translate, file_translate], ["Microphone", "Audio File"])
demo.launch()