|
import gradio as gr |
|
import torch |
|
import soundfile as sf |
|
import spaces |
|
import os |
|
import numpy as np |
|
import re |
|
from transformers import SpeechT5Processor, SpeechT5ForTextToSpeech, SpeechT5HifiGan |
|
from speechbrain.pretrained import EncoderClassifier |
|
from datasets import load_dataset |
|
|
|
device = "cuda" if torch.cuda.is_available() else "cpu" |
|
|
|
def load_models_and_data(): |
|
model_name = "microsoft/speecht5_tts" |
|
processor = SpeechT5Processor.from_pretrained(model_name) |
|
model = SpeechT5ForTextToSpeech.from_pretrained("Beehzod/speecht5_finetuned_uz_customData2").to(device) |
|
vocoder = SpeechT5HifiGan.from_pretrained("microsoft/speecht5_hifigan").to(device) |
|
|
|
spk_model_name = "speechbrain/spkrec-xvect-voxceleb" |
|
speaker_model = EncoderClassifier.from_hparams( |
|
source=spk_model_name, |
|
run_opts={"device": device}, |
|
savedir=os.path.join("/tmp", spk_model_name), |
|
) |
|
|
|
|
|
dataset = load_dataset("Beehzod/UzTTS_data", split="train") |
|
example = dataset[304] |
|
|
|
return model, processor, vocoder, speaker_model, example |
|
|
|
model, processor, vocoder, speaker_model, default_example = load_models_and_data() |
|
|
|
def create_speaker_embedding(waveform): |
|
with torch.no_grad(): |
|
speaker_embeddings = speaker_model.encode_batch(torch.tensor(waveform).unsqueeze(0).to(device)) |
|
speaker_embeddings = torch.nn.functional.normalize(speaker_embeddings, dim=2) |
|
speaker_embeddings = speaker_embeddings.squeeze() |
|
return speaker_embeddings |
|
|
|
def prepare_default_embedding(example): |
|
audio = example["audio"] |
|
return create_speaker_embedding(audio["array"]) |
|
|
|
default_embedding = prepare_default_embedding(default_example) |
|
|
|
replacements = [ |
|
("а", "a"), ("б", "b"), ("в", "v"), ("г", "g"), |
|
("д", "d"), ("е", "e"), ("л", "l"), ("м", "m"), |
|
("о", "o"), ("р", "r"), ("с", "s"), ("т", "t"), |
|
("у", "u"), ("х", "x"), ("ю", "yu"), ("ё", "yo") |
|
] |
|
|
|
|
|
number_words = { |
|
0: "nol", 1: "bir", 2: "ikki", 3: "uch", 4: "to'rt", 5: "besh", 6: "olti", 7: "yetti", 8: "sakkiz", 9: "to'qqiz", |
|
10: "o'n", 11: "o'n bir", 12: "o'n ikki", 13: "o'n uch", 14: "o'n to'rt", 15: "o'n besh", 16: "o'n oltı", 17: "o'n yetti", |
|
18: "o'n sakkiz", 19: "o'n toqqiz", 20: "yigirma", 30: "o'ttiz", 40: "qirq", 50: "ellik", 60: "oltmish", 70: "yetmish", |
|
80: "sakson", 90: "to'qson", 100: "yuz", 1000: "ming", 1000000: "million" |
|
} |
|
def number_to_words(number): |
|
if number < 20: |
|
return number_words[number] |
|
elif number < 100: |
|
tens, unit = divmod(number, 10) |
|
return number_words[tens * 10] + (" " + number_words[unit] if unit else "") |
|
elif number < 1000: |
|
hundreds, remainder = divmod(number, 100) |
|
return (number_words[hundreds] + " yuz" if hundreds > 1 else "yuz") + (" " + number_to_words(remainder) if remainder else "") |
|
elif number < 1000000: |
|
thousands, remainder = divmod(number, 1000) |
|
return (number_to_words(thousands) + " ming" if thousands > 1 else "ming") + (" " + number_to_words(remainder) if remainder else "") |
|
elif number < 1000000000: |
|
millions, remainder = divmod(number, 1000000) |
|
return number_to_words(millions) + " million" + (" " + number_to_words(remainder) if remainder else "") |
|
elif number < 1000000000000: |
|
billions, remainder = divmod(number, 1000000000) |
|
return number_to_words(billions) + " milliard" + (" " + number_to_words(remainder) if remainder else "") |
|
else: |
|
return str(number) |
|
|
|
|
|
def replace_numbers_with_words(text): |
|
def replace(match): |
|
number = int(match.group()) |
|
return number_to_words(number) |
|
|
|
|
|
result = re.sub(r'\b\d+\b', replace, text) |
|
|
|
return result |
|
|
|
def normalize_text(text): |
|
|
|
text = text.lower() |
|
|
|
|
|
text = replace_numbers_with_words(text) |
|
|
|
|
|
for old, new in replacements: |
|
text = text.replace(old, new) |
|
|
|
|
|
text = re.sub(r'[^\w\s]', '', text) |
|
|
|
return text |
|
|
|
@spaces.GPU(duration=60) |
|
def text_to_speech(text, audio_file=None): |
|
|
|
normalized_text = normalize_text(text) |
|
|
|
|
|
inputs = processor(text=normalized_text, return_tensors="pt").to(device) |
|
|
|
|
|
speaker_embeddings = default_embedding |
|
|
|
|
|
with torch.no_grad(): |
|
speech = model.generate_speech(inputs["input_ids"], speaker_embeddings.unsqueeze(0), vocoder=vocoder) |
|
|
|
speech_np = speech.cpu().numpy() |
|
|
|
return (16000, speech_np) |
|
|
|
iface = gr.Interface( |
|
fn=text_to_speech, |
|
inputs=[ |
|
gr.Textbox(label="Enter Uzbek text to convert to speech") |
|
], |
|
outputs=[ |
|
gr.Audio(label="Generated Speech", type="numpy") |
|
], |
|
title="Uzbek SpeechT5 Text-to-Speech Demo", |
|
description="Enter Uzbek text, and listen to the generated speech." |
|
) |
|
|
|
iface.launch(share=True) |