Spaces:
Running
on
Zero
Running
on
Zero
Update app.py
Browse files
app.py
CHANGED
@@ -8,7 +8,7 @@ from transformers import AutoTokenizer, AutoFeatureExtractor, set_seed
|
|
8 |
device = "cuda:0" if torch.cuda.is_available() else "cpu"
|
9 |
|
10 |
|
11 |
-
repo_id = "parler-tts/
|
12 |
|
13 |
model = ParlerTTSForConditionalGeneration.from_pretrained(repo_id).to(device)
|
14 |
tokenizer = AutoTokenizer.from_pretrained(repo_id)
|
@@ -108,7 +108,7 @@ with gr.Blocks(css=css) as block:
|
|
108 |
gr.HTML(
|
109 |
f"""
|
110 |
<p><a href="https://github.com/huggingface/parler-tts"> Parler-TTS</a> is a training and inference library for
|
111 |
-
high-fidelity text-to-speech (TTS) models. The model demonstrated here, <a href="https://huggingface.co/parler-tts/
|
112 |
is the first iteration model trained using 10k hours of narrated audiobooks. It generates high-quality speech
|
113 |
with features that can be controlled using a simple text prompt (e.g. gender, background noise, speaking rate, pitch and reverberation).</p>
|
114 |
|
|
|
8 |
device = "cuda:0" if torch.cuda.is_available() else "cpu"
|
9 |
|
10 |
|
11 |
+
repo_id = "parler-tts/parler_tts_mini_v0.1"
|
12 |
|
13 |
model = ParlerTTSForConditionalGeneration.from_pretrained(repo_id).to(device)
|
14 |
tokenizer = AutoTokenizer.from_pretrained(repo_id)
|
|
|
108 |
gr.HTML(
|
109 |
f"""
|
110 |
<p><a href="https://github.com/huggingface/parler-tts"> Parler-TTS</a> is a training and inference library for
|
111 |
+
high-fidelity text-to-speech (TTS) models. The model demonstrated here, <a href="https://huggingface.co/parler-tts/parler_tts_mini_v0.1"> Parler-TTS Mini v0.1</a>,
|
112 |
is the first iteration model trained using 10k hours of narrated audiobooks. It generates high-quality speech
|
113 |
with features that can be controlled using a simple text prompt (e.g. gender, background noise, speaking rate, pitch and reverberation).</p>
|
114 |
|