Spaces:
Sleeping
Sleeping
Upload test version
Browse files- README.md +1 -1
- app.py +101 -0
- packages.txt +2 -0
- requirements.txt +1 -0
README.md
CHANGED
@@ -1,6 +1,6 @@
|
|
1 |
---
|
2 |
title: CoquiTTS
|
3 |
-
emoji:
|
4 |
colorFrom: green
|
5 |
colorTo: red
|
6 |
sdk: gradio
|
|
|
1 |
---
|
2 |
title: CoquiTTS
|
3 |
+
emoji: 🐸
|
4 |
colorFrom: green
|
5 |
colorTo: red
|
6 |
sdk: gradio
|
app.py
ADDED
@@ -0,0 +1,101 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import tempfile
|
2 |
+
from typing import Optional
|
3 |
+
from TTS.config import load_config
|
4 |
+
import gradio as gr
|
5 |
+
import numpy as np
|
6 |
+
from TTS.utils.manage import ModelManager
|
7 |
+
from TTS.utils.synthesizer import Synthesizer
|
8 |
+
|
9 |
+
|
10 |
+
MODELS = {}
|
11 |
+
SPEAKERS = {}
|
12 |
+
MAX_TXT_LEN = 100
|
13 |
+
|
14 |
+
|
15 |
+
manager = ModelManager()
|
16 |
+
MODEL_NAMES = manager.list_tts_models()
|
17 |
+
|
18 |
+
# filter out multi-speaker models and slow wavegrad vocoders
|
19 |
+
filters = ["vctk", "your_tts", "ek1"]
|
20 |
+
MODEL_NAMES = [model_name for model_name in MODEL_NAMES if not any(f in model_name for f in filters)]
|
21 |
+
|
22 |
+
EN = [el for el in MODEL_NAMES if "/en/" in el]
|
23 |
+
OTHER = [el for el in MODEL_NAMES if "/en/" not in el]
|
24 |
+
EN[0], EN[5] = EN[5], EN[0]
|
25 |
+
MODEL_NAMES = EN + OTHER
|
26 |
+
|
27 |
+
# reorder models
|
28 |
+
print(MODEL_NAMES)
|
29 |
+
|
30 |
+
|
31 |
+
def tts(text: str, model_name: str):
|
32 |
+
if len(text) > MAX_TXT_LEN:
|
33 |
+
text = text[:MAX_TXT_LEN]
|
34 |
+
print(f"Input text was cutoff since it went over the {MAX_TXT_LEN} character limit.")
|
35 |
+
print(text, model_name)
|
36 |
+
# download model
|
37 |
+
model_path, config_path, model_item = manager.download_model(model_name)
|
38 |
+
vocoder_name: Optional[str] = model_item["default_vocoder"]
|
39 |
+
# download vocoder
|
40 |
+
vocoder_path = None
|
41 |
+
vocoder_config_path = None
|
42 |
+
if vocoder_name is not None:
|
43 |
+
vocoder_path, vocoder_config_path, _ = manager.download_model(vocoder_name)
|
44 |
+
# init synthesizer
|
45 |
+
synthesizer = Synthesizer(
|
46 |
+
model_path, config_path, None, None, vocoder_path, vocoder_config_path,
|
47 |
+
)
|
48 |
+
# synthesize
|
49 |
+
if synthesizer is None:
|
50 |
+
raise NameError("model not found")
|
51 |
+
wavs = synthesizer.tts(text, None)
|
52 |
+
# return output
|
53 |
+
with tempfile.NamedTemporaryFile(suffix=".wav", delete=False) as fp:
|
54 |
+
synthesizer.save_wav(wavs, fp)
|
55 |
+
return fp.name
|
56 |
+
|
57 |
+
|
58 |
+
title = """<h1 align="center">🐸💬 CoquiTTS Playground </h1>"""
|
59 |
+
|
60 |
+
with gr.Blocks(analytics_enabled=False) as demo:
|
61 |
+
with gr.Row():
|
62 |
+
with gr.Column():
|
63 |
+
gr.Markdown(
|
64 |
+
"""
|
65 |
+
## <img src="https://huggingface.co/spaces/proxectonos/README/resolve/main/title-card.png" width="100%" style="border-radius: 0.75rem;">
|
66 |
+
"""
|
67 |
+
)
|
68 |
+
gr.Markdown(
|
69 |
+
"""
|
70 |
+
<br/>
|
71 |
+
💻 Este space mostra algúns dos modelos TTS desenvolvidos polo **[Proxecto Nós](https://huggingface.co/proxectonos)**.
|
72 |
+
<br/>
|
73 |
+
"""
|
74 |
+
)
|
75 |
+
|
76 |
+
with gr.Row():
|
77 |
+
with gr.Column():
|
78 |
+
input_text = gr.inputs.Textbox(
|
79 |
+
label="Input Text",
|
80 |
+
default="This sentence has been generated by a speech synthesis system.",
|
81 |
+
)
|
82 |
+
model_select = gr.inputs.Dropdown(
|
83 |
+
label="Pick Model: tts_models/<language>/<dataset>/<model_name>",
|
84 |
+
choices=MODEL_NAMES,
|
85 |
+
default="tts_models/en/jenny/jenny"
|
86 |
+
)
|
87 |
+
tts_button = gr.Button("Send", elem_id="send-btn", visible=True)
|
88 |
+
|
89 |
+
with gr.Column():
|
90 |
+
output_audio = gr.outputs.Audio(label="Output", type="filepath")
|
91 |
+
|
92 |
+
tts_button.click(
|
93 |
+
tts,
|
94 |
+
inputs=[
|
95 |
+
input_text,
|
96 |
+
model_select,
|
97 |
+
],
|
98 |
+
outputs=[output_audio],
|
99 |
+
)
|
100 |
+
|
101 |
+
demo.queue(concurrency_count=16).launch(debug=True)
|
packages.txt
ADDED
@@ -0,0 +1,2 @@
|
|
|
|
|
|
|
1 |
+
libsndfile1
|
2 |
+
espeak-ng
|
requirements.txt
ADDED
@@ -0,0 +1 @@
|
|
|
|
|
1 |
+
git+https://github.com/coqui-ai/TTS@dev#egg=TTS
|