Spaces:
Running
Running
ZoniaChatbot
commited on
Upload 6 files
Browse files- Dockerfile +56 -0
- README.md +5 -7
- app.py +219 -0
- gitattributes +35 -0
- packages.txt +3 -0
- requirements.txt +5 -0
Dockerfile
ADDED
@@ -0,0 +1,56 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
FROM ubuntu:22.04
|
2 |
+
|
3 |
+
# Install Python and necessary packages
|
4 |
+
RUN apt-get update && apt-get install -y python3.10 python3-pip
|
5 |
+
|
6 |
+
# Create a user and necessary directories
|
7 |
+
RUN useradd -m -u 1001 user && mkdir -p /home/user/app/cache
|
8 |
+
|
9 |
+
RUN apt-get install -y git git-lfs ffmpeg libsm6 libxext6 cmake rsync libgl1-mesa-glx
|
10 |
+
RUN git lfs install
|
11 |
+
|
12 |
+
# Ensure the cache directory is writable by the user
|
13 |
+
RUN chown -R user:user /home/user/app/cache && chmod -R 777 /home/user/app/cache
|
14 |
+
|
15 |
+
# Install additional packages from packages.txt
|
16 |
+
RUN --mount=target=/tmp/packages.txt,source=packages.txt apt-get update && \
|
17 |
+
xargs -r -a /tmp/packages.txt apt-get install -y && rm -rf /var/lib/apt/lists/*
|
18 |
+
|
19 |
+
# Install Python requirements
|
20 |
+
RUN --mount=target=/tmp/requirements.txt,source=requirements.txt pip install --no-cache-dir -r /tmp/requirements.txt
|
21 |
+
|
22 |
+
WORKDIR /home/user/app
|
23 |
+
|
24 |
+
# Install specific versions of pip and other packages
|
25 |
+
RUN pip install --no-cache-dir pip==22.3.1 && \
|
26 |
+
pip install --no-cache-dir datasets "huggingface-hub>=0.19" "hf-transfer>=0.1.4" "protobuf<4" "click<8.1" "pydantic~=1.0"
|
27 |
+
RUN pip freeze > /tmp/freeze.txt
|
28 |
+
|
29 |
+
# Install Gradio and other dependencies
|
30 |
+
RUN pip install --no-cache-dir gradio[oauth]==4.38.1 "uvicorn>=0.14.0" spaces
|
31 |
+
|
32 |
+
# Copy application files and set correct ownership
|
33 |
+
COPY --link --chown=1001:1001 ./ /home/user/app
|
34 |
+
|
35 |
+
#EXPOSE 7860
|
36 |
+
|
37 |
+
# Moved up to where the user is added
|
38 |
+
#RUN mkdir cache
|
39 |
+
|
40 |
+
# Set environment variables
|
41 |
+
#ENV TRANSFORMERS_CACHE=/home/user/app/cache (deprecated)
|
42 |
+
ENV HF_HOME=/home/user/app/cache
|
43 |
+
ENV GRADIO_SERVER_NAME="0.0.0.0"
|
44 |
+
ENV GRADIO_SERVER_PORT=7860
|
45 |
+
|
46 |
+
# Switch to the user
|
47 |
+
USER user
|
48 |
+
|
49 |
+
# Copy the app file
|
50 |
+
COPY app.py .
|
51 |
+
|
52 |
+
# Debug: Check permissions of cache directory
|
53 |
+
RUN ls -l /home/user/app && ls -l /home/user/app/cache
|
54 |
+
|
55 |
+
# Run the app
|
56 |
+
CMD ["python3", "app.py"]
|
README.md
CHANGED
@@ -1,13 +1,11 @@
|
|
1 |
---
|
2 |
-
title:
|
3 |
-
emoji:
|
4 |
-
colorFrom:
|
5 |
-
colorTo:
|
6 |
-
sdk:
|
7 |
-
sdk_version: 5.13.1
|
8 |
app_file: app.py
|
9 |
pinned: false
|
10 |
-
license: cc-by-nc-4.0
|
11 |
---
|
12 |
|
13 |
Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
|
|
|
1 |
---
|
2 |
+
title: Explore MMS Finetuning
|
3 |
+
emoji: 🌍
|
4 |
+
colorFrom: blue
|
5 |
+
colorTo: red
|
6 |
+
sdk: docker
|
|
|
7 |
app_file: app.py
|
8 |
pinned: false
|
|
|
9 |
---
|
10 |
|
11 |
Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
|
app.py
ADDED
@@ -0,0 +1,219 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import torch
|
2 |
+
|
3 |
+
from transformers import pipeline
|
4 |
+
|
5 |
+
import numpy as np
|
6 |
+
import gradio as gr
|
7 |
+
|
8 |
+
def _grab_best_device(use_gpu=True):
|
9 |
+
if torch.cuda.device_count() > 0 and use_gpu:
|
10 |
+
device = 0 #"cuda"
|
11 |
+
else:
|
12 |
+
device = -1 #"cpu"
|
13 |
+
#device = 0 if torch.cuda.is_available() else -1
|
14 |
+
|
15 |
+
return device
|
16 |
+
|
17 |
+
device = _grab_best_device()
|
18 |
+
|
19 |
+
default_model_per_language = {
|
20 |
+
"spanish": "facebook/mms-tts-spa",
|
21 |
+
"tamil": "facebook/mms-tts-tam",
|
22 |
+
"gujarati": "facebook/mms-tts-guj",
|
23 |
+
"marathi": "facebook/mms-tts-mar",
|
24 |
+
#"english": "kakao-enterprise/vits-ljs",
|
25 |
+
"english": "facebook/mms-tts-eng",
|
26 |
+
}
|
27 |
+
|
28 |
+
models_per_language = {
|
29 |
+
"english": [
|
30 |
+
"ylacombe/vits_ljs_midlands_male_monospeaker",
|
31 |
+
],
|
32 |
+
"spanish": [
|
33 |
+
"ylacombe/mms-spa-finetuned-chilean-monospeaker",
|
34 |
+
],
|
35 |
+
"tamil": [
|
36 |
+
"ylacombe/mms-tam-finetuned-monospeaker",
|
37 |
+
],
|
38 |
+
"gujarati" : ["ylacombe/mms-guj-finetuned-monospeaker"],
|
39 |
+
"marathi": ["ylacombe/mms-mar-finetuned-monospeaker"]
|
40 |
+
}
|
41 |
+
|
42 |
+
HUB_PATH = "ylacombe/vits_ljs_midlands_male_monospeaker"
|
43 |
+
|
44 |
+
|
45 |
+
pipe_dict = {
|
46 |
+
"current_model": "ylacombe/vits_ljs_midlands_male_monospeaker",
|
47 |
+
"pipe": pipeline("text-to-speech", model=HUB_PATH, device=device),
|
48 |
+
"original_pipe": pipeline("text-to-speech", model=default_model_per_language["english"], device=device),
|
49 |
+
"language": "english",
|
50 |
+
}
|
51 |
+
|
52 |
+
title = """
|
53 |
+
# Explore MMS finetuning
|
54 |
+
## Or how to access truely multilingual TTS
|
55 |
+
|
56 |
+
Massively Multilingual Speech (MMS) models are light-weight, low-latency TTS models based on the [VITS architecture](https://huggingface.co/docs/transformers/model_doc/vits).
|
57 |
+
|
58 |
+
Meta's [MMS](https://arxiv.org/abs/2305.13516) project, aiming to provide speech technology across a diverse range of languages. You can find more details about the supported languages and their ISO 639-3 codes in the [MMS Language Coverage Overview](https://dl.fbaipublicfiles.com/mms/misc/language_coverage_mms.html),
|
59 |
+
and see all MMS-TTS checkpoints on the Hugging Face Hub: [facebook/mms-tts](https://huggingface.co/models?sort=trending&search=facebook%2Fmms-tts).
|
60 |
+
|
61 |
+
Coupled with the right data and the right training recipe, you can get an excellent finetuned version of every MMS checkpoints in **20 minutes** with as little as **80 to 150 samples**.
|
62 |
+
|
63 |
+
Training recipe available in this [github repository](https://github.com/ylacombe/finetune-hf-vits)!
|
64 |
+
"""
|
65 |
+
|
66 |
+
max_speakers = 15
|
67 |
+
|
68 |
+
|
69 |
+
# Inference
|
70 |
+
def generate_audio(text, model_id, language):
|
71 |
+
|
72 |
+
if pipe_dict["language"] != language:
|
73 |
+
gr.Warning(f"Language has changed - loading new default model: {default_model_per_language[language]}")
|
74 |
+
pipe_dict["language"] = language
|
75 |
+
pipe_dict["original_pipe"] = pipeline("text-to-speech", model=default_model_per_language[language], device=device)
|
76 |
+
|
77 |
+
if pipe_dict["current_model"] != model_id:
|
78 |
+
gr.Warning("Model has changed - loading new model")
|
79 |
+
pipe_dict["pipe"] = pipeline("text-to-speech", model=model_id, device=device)
|
80 |
+
pipe_dict["current_model"] = model_id
|
81 |
+
|
82 |
+
num_speakers = pipe_dict["pipe"].model.config.num_speakers
|
83 |
+
|
84 |
+
out = []
|
85 |
+
# first generate original model result
|
86 |
+
output = pipe_dict["original_pipe"](text)
|
87 |
+
output = gr.Audio(value = (output["sampling_rate"], output["audio"].squeeze()), type="numpy", autoplay=False, label=f"Non finetuned model prediction {default_model_per_language[language]}", show_label=True,
|
88 |
+
visible=True)
|
89 |
+
out.append(output)
|
90 |
+
|
91 |
+
|
92 |
+
if num_speakers>1:
|
93 |
+
for i in range(min(num_speakers, max_speakers - 1)):
|
94 |
+
forward_params = {"speaker_id": i}
|
95 |
+
output = pipe_dict["pipe"](text, forward_params=forward_params)
|
96 |
+
|
97 |
+
output = gr.Audio(value = (output["sampling_rate"], output["audio"].squeeze()), type="numpy", autoplay=False, label=f"Generated Audio - speaker {i}", show_label=True,
|
98 |
+
visible=True)
|
99 |
+
out.append(output)
|
100 |
+
out.extend([gr.Audio(visible=False)]*(max_speakers-num_speakers))
|
101 |
+
else:
|
102 |
+
output = pipe_dict["pipe"](text)
|
103 |
+
output = gr.Audio(value = (output["sampling_rate"], output["audio"].squeeze()), type="numpy", autoplay=False, label="Generated Audio - Mono speaker", show_label=True,
|
104 |
+
visible=True)
|
105 |
+
out.append(output)
|
106 |
+
out.extend([gr.Audio(visible=False)]*(max_speakers-2))
|
107 |
+
return out
|
108 |
+
|
109 |
+
|
110 |
+
css = """
|
111 |
+
#container{
|
112 |
+
margin: 0 auto;
|
113 |
+
max-width: 80rem;
|
114 |
+
}
|
115 |
+
#intro{
|
116 |
+
max-width: 100%;
|
117 |
+
text-align: center;
|
118 |
+
margin: 0 auto;
|
119 |
+
}
|
120 |
+
"""
|
121 |
+
# Gradio blocks demo
|
122 |
+
with gr.Blocks(css=css) as demo_blocks:
|
123 |
+
gr.Markdown(title, elem_id="intro")
|
124 |
+
|
125 |
+
with gr.Row():
|
126 |
+
with gr.Column():
|
127 |
+
inp_text = gr.Textbox(label="Input Text", info="What sentence would you like to synthesise?")
|
128 |
+
btn = gr.Button("Generate Audio!")
|
129 |
+
language = gr.Dropdown(
|
130 |
+
default_model_per_language.keys(),
|
131 |
+
value = "spanish",
|
132 |
+
label = "language",
|
133 |
+
info = "Language that you want to test"
|
134 |
+
)
|
135 |
+
|
136 |
+
model_id = gr.Dropdown(
|
137 |
+
models_per_language["spanish"],
|
138 |
+
value="ylacombe/mms-spa-finetuned-chilean-monospeaker",
|
139 |
+
label="Model",
|
140 |
+
info="Model you want to test",
|
141 |
+
)
|
142 |
+
|
143 |
+
with gr.Column():
|
144 |
+
outputs = []
|
145 |
+
for i in range(max_speakers):
|
146 |
+
out_audio = gr.Audio(type="numpy", autoplay=False, label=f"Generated Audio - speaker {i}", show_label=True, visible=False)
|
147 |
+
outputs.append(out_audio)
|
148 |
+
|
149 |
+
with gr.Accordion("Datasets and models details", open=False):
|
150 |
+
gr.Markdown("""
|
151 |
+
|
152 |
+
For each language, we used 100 to 150 samples of a single speaker to finetune the model.
|
153 |
+
|
154 |
+
### Spanish
|
155 |
+
|
156 |
+
* **Model**: [Spanish MMS TTS](https://huggingface.co/facebook/mms-tts-spa).
|
157 |
+
* **Datasets**:
|
158 |
+
- [Chilean Spanish TTS dataset](https://huggingface.co/datasets/ylacombe/google-chilean-spanish).
|
159 |
+
|
160 |
+
### Tamil
|
161 |
+
|
162 |
+
* **Model**: [Tamil MMS TTS](https://huggingface.co/facebook/mms-tts-tam).
|
163 |
+
* **Datasets**:
|
164 |
+
- [Tamil TTS dataset](https://huggingface.co/datasets/ylacombe/google-tamil).
|
165 |
+
|
166 |
+
### Gujarati
|
167 |
+
|
168 |
+
* **Model**: [Gujarati MMS TTS](https://huggingface.co/facebook/mms-tts-guj).
|
169 |
+
* **Datasets**:
|
170 |
+
- [Gujarati TTS dataset](https://huggingface.co/datasets/ylacombe/google-gujarati).
|
171 |
+
|
172 |
+
### Marathi
|
173 |
+
|
174 |
+
* **Model**: [Marathi MMS TTS](https://huggingface.co/facebook/mms-tts-mar).
|
175 |
+
* **Datasets**:
|
176 |
+
- [Marathi TTS dataset](https://huggingface.co/datasets/ylacombe/google-chilean-marathi).
|
177 |
+
|
178 |
+
### English
|
179 |
+
|
180 |
+
* **Model**: [VITS-ljs](https://huggingface.co/kakao-enterprise/vits-ljs)
|
181 |
+
* **Dataset**: [British Isles Accent](https://huggingface.co/datasets/ylacombe/english_dialects). For each accent, we used 100 to 150 samples of a single speaker to finetune [VITS-ljs](https://huggingface.co/kakao-enterprise/vits-ljs).
|
182 |
+
|
183 |
+
|
184 |
+
""")
|
185 |
+
|
186 |
+
with gr.Accordion("Run VITS and MMS with transformers", open=False):
|
187 |
+
gr.Markdown(
|
188 |
+
"""
|
189 |
+
```bash
|
190 |
+
pip install transformers
|
191 |
+
```
|
192 |
+
```py
|
193 |
+
from transformers import pipeline
|
194 |
+
import scipy
|
195 |
+
pipe = pipeline("text-to-speech", model="kakao-enterprise/vits-ljs", device=0)
|
196 |
+
|
197 |
+
results = pipe("A cinematic shot of a baby racoon wearing an intricate italian priest robe")
|
198 |
+
|
199 |
+
# write to a wav file
|
200 |
+
scipy.io.wavfile.write("audio_vits.wav", rate=results["sampling_rate"], data=results["audio"].squeeze())
|
201 |
+
```
|
202 |
+
"""
|
203 |
+
)
|
204 |
+
|
205 |
+
|
206 |
+
language.change(lambda language: gr.Dropdown(
|
207 |
+
models_per_language[language],
|
208 |
+
value=models_per_language[language][0],
|
209 |
+
label="Model",
|
210 |
+
info="Model you want to test",
|
211 |
+
),
|
212 |
+
language,
|
213 |
+
model_id
|
214 |
+
)
|
215 |
+
|
216 |
+
btn.click(generate_audio, [inp_text, model_id, language], outputs)
|
217 |
+
|
218 |
+
|
219 |
+
demo_blocks.queue().launch()
|
gitattributes
ADDED
@@ -0,0 +1,35 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
*.7z filter=lfs diff=lfs merge=lfs -text
|
2 |
+
*.arrow filter=lfs diff=lfs merge=lfs -text
|
3 |
+
*.bin filter=lfs diff=lfs merge=lfs -text
|
4 |
+
*.bz2 filter=lfs diff=lfs merge=lfs -text
|
5 |
+
*.ckpt filter=lfs diff=lfs merge=lfs -text
|
6 |
+
*.ftz filter=lfs diff=lfs merge=lfs -text
|
7 |
+
*.gz filter=lfs diff=lfs merge=lfs -text
|
8 |
+
*.h5 filter=lfs diff=lfs merge=lfs -text
|
9 |
+
*.joblib filter=lfs diff=lfs merge=lfs -text
|
10 |
+
*.lfs.* filter=lfs diff=lfs merge=lfs -text
|
11 |
+
*.mlmodel filter=lfs diff=lfs merge=lfs -text
|
12 |
+
*.model filter=lfs diff=lfs merge=lfs -text
|
13 |
+
*.msgpack filter=lfs diff=lfs merge=lfs -text
|
14 |
+
*.npy filter=lfs diff=lfs merge=lfs -text
|
15 |
+
*.npz filter=lfs diff=lfs merge=lfs -text
|
16 |
+
*.onnx filter=lfs diff=lfs merge=lfs -text
|
17 |
+
*.ot filter=lfs diff=lfs merge=lfs -text
|
18 |
+
*.parquet filter=lfs diff=lfs merge=lfs -text
|
19 |
+
*.pb filter=lfs diff=lfs merge=lfs -text
|
20 |
+
*.pickle filter=lfs diff=lfs merge=lfs -text
|
21 |
+
*.pkl filter=lfs diff=lfs merge=lfs -text
|
22 |
+
*.pt filter=lfs diff=lfs merge=lfs -text
|
23 |
+
*.pth filter=lfs diff=lfs merge=lfs -text
|
24 |
+
*.rar filter=lfs diff=lfs merge=lfs -text
|
25 |
+
*.safetensors filter=lfs diff=lfs merge=lfs -text
|
26 |
+
saved_model/**/* filter=lfs diff=lfs merge=lfs -text
|
27 |
+
*.tar.* filter=lfs diff=lfs merge=lfs -text
|
28 |
+
*.tar filter=lfs diff=lfs merge=lfs -text
|
29 |
+
*.tflite filter=lfs diff=lfs merge=lfs -text
|
30 |
+
*.tgz filter=lfs diff=lfs merge=lfs -text
|
31 |
+
*.wasm filter=lfs diff=lfs merge=lfs -text
|
32 |
+
*.xz filter=lfs diff=lfs merge=lfs -text
|
33 |
+
*.zip filter=lfs diff=lfs merge=lfs -text
|
34 |
+
*.zst filter=lfs diff=lfs merge=lfs -text
|
35 |
+
*tfevents* filter=lfs diff=lfs merge=lfs -text
|
packages.txt
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
festival
|
2 |
+
espeak-ng
|
3 |
+
mbrola
|
requirements.txt
ADDED
@@ -0,0 +1,5 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
transformers
|
2 |
+
torch==2.0.1
|
3 |
+
torchvision==0.15.2
|
4 |
+
torchaudio==2.0.2
|
5 |
+
phonemizer
|