Oysiyl's picture
Update app.py
3982804 verified
import os
from typing import Text
import gradio as gr
import soundfile as sf
from transformers import pipeline
import numpy as np
import torch
import re
from speechbrain.pretrained import EncoderClassifier
def create_speaker_embedding(speaker_model, waveform: np.ndarray) -> np.ndarray:
with torch.no_grad():
speaker_embeddings = speaker_model.encode_batch(torch.tensor(waveform))
speaker_embeddings = torch.nn.functional.normalize(speaker_embeddings, dim=2)
if device.type != 'cuda':
speaker_embeddings = speaker_embeddings.squeeze().numpy()
else:
speaker_embeddings = speaker_embeddings.squeeze().cpu().numpy()
speaker_embeddings = torch.tensor(speaker_embeddings, dtype=dtype).unsqueeze(0).to(device)
return speaker_embeddings
def remove_special_characters_s(text: Text) -> Text:
chars_to_remove_regex = '[\-\…\–\"\“\%\‘\”\�\»\«\„\`\'́]'
# remove special characters
text = re.sub(chars_to_remove_regex, '', text)
text = re.sub("՚", "'", text)
text = re.sub("’", "'", text)
text = re.sub(r'ы', 'и', text)
text = text.lower()
return text
def cyrillic_to_latin(text: Text) -> Text:
replacements = [
('а', 'a'),
('б', 'b'),
('в', 'v'),
('г', 'h'),
('д', 'd'),
('е', 'e'),
('ж', 'zh'),
('з', 'z'),
('и', 'y'),
('й', 'j'),
('к', 'k'),
('л', 'l'),
('м', 'm'),
('н', 'n'),
('о', 'o'),
('п', 'p'),
('р', 'r'),
('с', 's'),
('т', 't'),
('у', 'u'),
('ф', 'f'),
('х', 'h'),
('ц', 'ts'),
('ч', 'ch'),
('ш', 'sh'),
('щ', 'sch'),
('ь', "'"),
('ю', 'ju'),
('я', 'ja'),
('є', 'je'),
('і', 'i'),
('ї', 'ji'),
('ґ', 'g')
]
for src, dst in replacements:
text = text.replace(src, dst)
return text
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
if torch.cuda.is_available():
dtype = torch.bfloat16 if torch.cuda.get_device_capability()[0] == 8 else torch.float16
else:
dtype = torch.float32
spk_model_name = "speechbrain/spkrec-xvect-voxceleb"
speaker_model = EncoderClassifier.from_hparams(
source=spk_model_name,
run_opts={"device": device},
savedir=os.path.join("/tmp", spk_model_name)
)
waveform, samplerate = sf.read("files/speaker.wav")
speaker_embeddings = create_speaker_embedding(speaker_model, waveform)
transcriber = pipeline("text-to-speech", model="Oysiyl/speecht5_tts_common_voice_uk")
def transcribe(text: Text) -> tuple((int, np.ndarray)):
text = remove_special_characters_s(text)
text = cyrillic_to_latin(text)
out = transcriber(text, forward_params={"speaker_embeddings": speaker_embeddings})
audio, sr = out["audio"], out["sampling_rate"]
return sr, audio
demo = gr.Interface(
transcribe,
gr.Textbox(),
outputs="audio",
title="Text to Speech for Ukrainian language demo",
description="Click on the example below or type text!",
examples=[["Держава-агресор Росія закуповує комунікаційне обладнання, зокрема супутникові інтернет-термінали Starlink, для використання у війні в арабських країнах"],
["Доброго вечора, ми з України!"]],
cache_examples=True
)
demo.launch()