File size: 2,951 Bytes
96094ed
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
f251112
 
 
 
 
 
96094ed
 
 
 
 
 
 
f251112
 
96094ed
 
f251112
 
 
 
 
 
 
 
96094ed
6747ea1
f251112
6747ea1
f251112
 
 
 
 
96094ed
f251112
6747ea1
96094ed
 
 
 
 
 
 
 
 
 
f251112
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
96094ed
 
6747ea1
f251112
 
6747ea1
96094ed
f251112
 
96094ed
5b1172f
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
import torch

from transformers import pipeline

import numpy as np
import gradio as gr

def _grab_best_device(use_gpu=True):
    if torch.cuda.device_count() > 0 and use_gpu:
        device = "cuda"
    else:
        device = "cpu"
    return device

device = _grab_best_device()

HUB_PATH = "ylacombe/vits_vctk_welsh_male"


pipe_dict = {
    "current_model": "ylacombe/vits_vctk_welsh_male",
    "pipe":  pipeline("text-to-speech", model=HUB_PATH, device=0),
}

title = "# 🐶 VITS"

description = """

"""

max_speakers = 15


# Inference
def generate_audio(text, model_id):
    
    if pipe_dict["current_model"] != model_id:
        gr.Warning("Model has changed - loading new model")
        pipe_dict["pipe"] = pipeline("text-to-speech", model=model_id, device=0)
        pipe_dict["current_model"] = model_id

    num_speakers = pipe_dict["pipe"].model.config.num_speakers

    out = []
    for i in range(min(num_speakers, max_speakers)):
        forward_params = {"speaker_id": i}
        output = pipe_dict["pipe"](text, forward_params=forward_params)
        
        output =  gr.Audio(value = (output["sampling_rate"], output["audio"].squeeze()), type="numpy", autoplay=False, label=f"Generated Audio - speaker {i}", show_label=True,
                           visible=True)
        out.append(output)
    
    out.extend([gr.Audio(visible=False)]*(max_speakers-num_speakers))
    return out


# Gradio blocks demo    
with gr.Blocks() as demo_blocks:
    gr.Markdown(title)
    gr.Markdown(description)
    with gr.Row():
        with gr.Column():
            inp_text = gr.Textbox(label="Input Text", info="What would you like bark to synthesise?")
            btn = gr.Button("Generate Audio!")
            model_id = gr.Dropdown(
                    [
                        "ylacombe/vits_vctk_welsh_male",
                        "ylacombe/vits_vctk_welsh_female",
                        "ylacombe/vits_ljs_welsh_male",
                        "ylacombe/vits_ljs_welsh_female",
                        "ylacombe/vits_vctk_irish_male",
                        "ylacombe/vits_vctk_scottish_female",
                        "ylacombe/vits_ljs_irish_male",
                        "ylacombe/vits_ljs_scottish_female",
                        "ylacombe/mms-tam-finetuned-multispeaker",
                        "ylacombe/mms-spa-finetuned-chilean-multispeaker",
                    ],
                    value="ylacombe/vits_vctk_welsh_male", 
                    label="Model", 
                    info="Model you want to test",
                    )
    
        with gr.Column():
            outputs = []
            for i in range(max_speakers):
                out_audio = gr.Audio(type="numpy", autoplay=False, label=f"Generated Audio - speaker {i}", show_label=True, visible=False)
                outputs.append(out_audio)
    
    btn.click(generate_audio, [inp_text, model_id], outputs)
    

demo_blocks.queue().launch()