File size: 5,115 Bytes
be530f1
 
 
ad49b31
 
 
 
 
 
 
be530f1
ad49b31
be530f1
ad49b31
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
be530f1
ad49b31
be530f1
ad49b31
 
 
 
 
 
be530f1
ad49b31
 
 
be530f1
ad49b31
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
be530f1
ad49b31
 
 
 
 
 
 
 
be530f1
 
ad49b31
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
import gradio as gr
import torch
import soundfile as sf
import spaces
import os
import numpy as np
import re
from transformers import SpeechT5Processor, SpeechT5ForTextToSpeech, SpeechT5HifiGan
from speechbrain.pretrained import EncoderClassifier
from datasets import load_dataset

device = "cuda" if torch.cuda.is_available() else "cpu"

def load_models_and_data():
    model_name = "microsoft/speecht5_tts"
    processor = SpeechT5Processor.from_pretrained(model_name)
    model = SpeechT5ForTextToSpeech.from_pretrained("Beehzod/speecht5_finetuned_uz_customData2").to(device)
    vocoder = SpeechT5HifiGan.from_pretrained("microsoft/speecht5_hifigan").to(device)
    
    spk_model_name = "speechbrain/spkrec-xvect-voxceleb"
    speaker_model = EncoderClassifier.from_hparams(
        source=spk_model_name,
        run_opts={"device": device},
        savedir=os.path.join("/tmp", spk_model_name),
    )
    
    # Load a sample from a dataset for default embedding
    dataset = load_dataset("Beehzod/UzTTS_data", split="train")
    example = dataset[304]
    
    return model, processor, vocoder, speaker_model, example

model, processor, vocoder, speaker_model, default_example = load_models_and_data()

def create_speaker_embedding(waveform):
    with torch.no_grad():
        speaker_embeddings = speaker_model.encode_batch(torch.tensor(waveform).unsqueeze(0).to(device))
        speaker_embeddings = torch.nn.functional.normalize(speaker_embeddings, dim=2)
        speaker_embeddings = speaker_embeddings.squeeze()
    return speaker_embeddings

def prepare_default_embedding(example):
    audio = example["audio"]
    return create_speaker_embedding(audio["array"])

default_embedding = prepare_default_embedding(default_example)

replacements = [
    ("а", "a"), ("б", "b"), ("в", "v"), ("г", "g"),
    ("д", "d"), ("е", "e"), ("л", "l"), ("м", "m"),
    ("о", "o"), ("р", "r"), ("с", "s"), ("т", "t"),
    ("у", "u"), ("х", "x"), ("ю", "yu"), ("ё", "yo")
]


number_words = {
    0: "nol", 1: "bir", 2: "ikki", 3: "uch", 4: "to'rt", 5: "besh", 6: "olti", 7: "yetti", 8: "sakkiz", 9: "to'qqiz",
    10: "o'n", 11: "o'n bir", 12: "o'n ikki", 13: "o'n uch", 14: "o'n to'rt", 15: "o'n besh", 16: "o'n oltı", 17: "o'n yetti",
    18: "o'n sakkiz", 19: "o'n toqqiz", 20: "yigirma", 30: "o'ttiz", 40: "qirq", 50: "ellik", 60: "oltmish", 70: "yetmish",
    80: "sakson", 90: "to'qson", 100: "yuz", 1000: "ming", 1000000: "million"
}
def number_to_words(number):
    if number < 20:
        return number_words[number]
    elif number < 100:
        tens, unit = divmod(number, 10)
        return number_words[tens * 10] + (" " + number_words[unit] if unit else "")
    elif number < 1000:
        hundreds, remainder = divmod(number, 100)
        return (number_words[hundreds] + " yuz" if hundreds > 1 else "yuz") + (" " + number_to_words(remainder) if remainder else "")
    elif number < 1000000:
        thousands, remainder = divmod(number, 1000)
        return (number_to_words(thousands) + " ming" if thousands > 1 else "ming") + (" " + number_to_words(remainder) if remainder else "")
    elif number < 1000000000:
        millions, remainder = divmod(number, 1000000)
        return number_to_words(millions) + " million" + (" " + number_to_words(remainder) if remainder else "")
    elif number < 1000000000000:
        billions, remainder = divmod(number, 1000000000)
        return number_to_words(billions) + " milliard" + (" " + number_to_words(remainder) if remainder else "")
    else:
        return str(number)


def replace_numbers_with_words(text):
    def replace(match):
        number = int(match.group())
        return number_to_words(number)

    # Find the numbers and change with words.
    result = re.sub(r'\b\d+\b', replace, text)

    return result

def normalize_text(text):
    # Convert to lowercase
    text = text.lower()
    
    # Replace numbers with words
    text = replace_numbers_with_words(text)
    
    # Apply character replacements
    for old, new in replacements:
        text = text.replace(old, new)
    
    # Remove punctuation
    text = re.sub(r'[^\w\s]', '', text)
    
    return text

@spaces.GPU(duration=60)
def text_to_speech(text, audio_file=None):
    # Normalize the input text
    normalized_text = normalize_text(text)
    
    # Prepare the input for the model
    inputs = processor(text=normalized_text, return_tensors="pt").to(device)
    
    # Use the default speaker embedding
    speaker_embeddings = default_embedding
    
    # Generate speech
    with torch.no_grad():
        speech = model.generate_speech(inputs["input_ids"], speaker_embeddings.unsqueeze(0), vocoder=vocoder)
    
    speech_np = speech.cpu().numpy()

    return (16000, speech_np)
    
iface = gr.Interface(
    fn=text_to_speech,
    inputs=[
        gr.Textbox(label="Enter Uzbek text to convert to speech")
    ],
    outputs=[
        gr.Audio(label="Generated Speech", type="numpy")
    ],
    title="Uzbek SpeechT5 Text-to-Speech Demo",
    description="Enter Uzbek text, and listen to the generated speech."
)

iface.launch(share=True)