kevinwang676 commited on
Commit
ac403c1
1 Parent(s): 46c1226

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +381 -297
app.py CHANGED
@@ -1,313 +1,397 @@
1
- import os
2
-
3
- #os.system("pip install git+https://github.com/suno-ai/bark.git")
4
-
5
- from bark.generation import SUPPORTED_LANGS
6
- from bark import SAMPLE_RATE, generate_audio
7
- from scipy.io.wavfile import write as write_wav
8
- from datetime import datetime
9
-
10
- import shutil
11
  import gradio as gr
12
-
13
  import sys
14
-
15
- import string
16
- import time
17
- import argparse
18
- import json
19
-
20
  import numpy as np
21
- # import IPython
22
- # from IPython.display import Audio
23
-
24
  import torch
 
 
25
 
26
- from TTS.tts.utils.synthesis import synthesis
27
- from TTS.tts.utils.text.symbols import make_symbols, phonemes, symbols
28
- try:
29
- from TTS.utils.audio import AudioProcessor
30
- except:
31
- from TTS.utils.audio import AudioProcessor
32
-
33
-
34
- from TTS.tts.models import setup_model
35
- from TTS.config import load_config
36
- from TTS.tts.models.vits import *
37
-
38
- from TTS.tts.utils.speakers import SpeakerManager
39
- from pydub import AudioSegment
40
-
41
- # from google.colab import files
42
- import librosa
43
-
44
- from scipy.io.wavfile import write, read
45
-
46
- import subprocess
47
-
48
- '''
49
- from google.colab import drive
50
- drive.mount('/content/drive')
51
- src_path = os.path.join(os.path.join(os.path.join(os.path.join(os.getcwd(), 'drive'), 'MyDrive'), 'Colab Notebooks'), 'best_model_latest.pth.tar')
52
- dst_path = os.path.join(os.getcwd(), 'best_model.pth.tar')
53
- shutil.copy(src_path, dst_path)
54
- '''
55
-
56
- TTS_PATH = "TTS/"
57
-
58
- # add libraries into environment
59
- sys.path.append(TTS_PATH) # set this if TTS is not installed globally
60
-
61
- # Paths definition
62
-
63
- OUT_PATH = 'out/'
64
-
65
- # create output path
66
- os.makedirs(OUT_PATH, exist_ok=True)
67
-
68
- # model vars
69
- MODEL_PATH = 'best_model.pth.tar'
70
- CONFIG_PATH = 'config.json'
71
- TTS_LANGUAGES = "language_ids.json"
72
- TTS_SPEAKERS = "speakers.json"
73
- USE_CUDA = torch.cuda.is_available()
74
-
75
- # load the config
76
- C = load_config(CONFIG_PATH)
77
-
78
- # load the audio processor
79
- ap = AudioProcessor(**C.audio)
80
-
81
- speaker_embedding = None
82
-
83
- C.model_args['d_vector_file'] = TTS_SPEAKERS
84
- C.model_args['use_speaker_encoder_as_loss'] = False
85
-
86
- model = setup_model(C)
87
- model.language_manager.set_language_ids_from_file(TTS_LANGUAGES)
88
- # print(model.language_manager.num_languages, model.embedded_language_dim)
89
- # print(model.emb_l)
90
- cp = torch.load(MODEL_PATH, map_location=torch.device('cpu'))
91
- # remove speaker encoder
92
- model_weights = cp['model'].copy()
93
- for key in list(model_weights.keys()):
94
- if "speaker_encoder" in key:
95
- del model_weights[key]
96
-
97
- model.load_state_dict(model_weights)
98
-
99
- model.eval()
100
-
101
- if USE_CUDA:
102
- model = model.cuda()
103
-
104
- # synthesize voice
105
- use_griffin_lim = False
106
-
107
- # Paths definition
108
-
109
- CONFIG_SE_PATH = "config_se.json"
110
- CHECKPOINT_SE_PATH = "SE_checkpoint.pth.tar"
111
-
112
- # Load the Speaker encoder
113
-
114
- SE_speaker_manager = SpeakerManager(encoder_model_path=CHECKPOINT_SE_PATH, encoder_config_path=CONFIG_SE_PATH, use_cuda=USE_CUDA)
115
-
116
- # Define helper function
117
-
118
- def compute_spec(ref_file):
119
- y, sr = librosa.load(ref_file, sr=ap.sample_rate)
120
- spec = ap.spectrogram(y)
121
- spec = torch.FloatTensor(spec).unsqueeze(0)
122
- return spec
123
-
124
-
125
- def voice_conversion(ta, ra, da):
126
-
127
- target_audio = 'target.wav'
128
- reference_audio = 'reference.wav'
129
- driving_audio = 'driving.wav'
130
-
131
- write(target_audio, ta[0], ta[1])
132
- write(reference_audio, ra[0], ra[1])
133
- write(driving_audio, da[0], da[1])
134
-
135
- # !ffmpeg-normalize $target_audio -nt rms -t=-27 -o $target_audio -ar 16000 -f
136
- # !ffmpeg-normalize $reference_audio -nt rms -t=-27 -o $reference_audio -ar 16000 -f
137
- # !ffmpeg-normalize $driving_audio -nt rms -t=-27 -o $driving_audio -ar 16000 -f
138
-
139
- files = [target_audio, reference_audio, driving_audio]
140
-
141
- for file in files:
142
- subprocess.run(["ffmpeg-normalize", file, "-nt", "rms", "-t=-27", "-o", file, "-ar", "16000", "-f"])
143
-
144
- # ta_ = read(target_audio)
145
-
146
- target_emb = SE_speaker_manager.compute_d_vector_from_clip([target_audio])
147
- target_emb = torch.FloatTensor(target_emb).unsqueeze(0)
148
-
149
- driving_emb = SE_speaker_manager.compute_d_vector_from_clip([reference_audio])
150
- driving_emb = torch.FloatTensor(driving_emb).unsqueeze(0)
151
-
152
- # Convert the voice
153
-
154
- driving_spec = compute_spec(driving_audio)
155
- y_lengths = torch.tensor([driving_spec.size(-1)])
156
- if USE_CUDA:
157
- ref_wav_voc, _, _ = model.voice_conversion(driving_spec.cuda(), y_lengths.cuda(), driving_emb.cuda(), target_emb.cuda())
158
- ref_wav_voc = ref_wav_voc.squeeze().cpu().detach().numpy()
159
- else:
160
- ref_wav_voc, _, _ = model.voice_conversion(driving_spec, y_lengths, driving_emb, target_emb)
161
- ref_wav_voc = ref_wav_voc.squeeze().detach().numpy()
162
-
163
- # print("Reference Audio after decoder:")
164
- # IPython.display.display(Audio(ref_wav_voc, rate=ap.sample_rate))
165
-
166
- return (ap.sample_rate, ref_wav_voc)
167
-
168
- def generate_text_to_speech(text_prompt, selected_speaker, text_temp, waveform_temp):
169
- audio_array = generate_audio(text_prompt, selected_speaker, text_temp, waveform_temp)
170
-
171
- now = datetime.now()
172
- date_str = now.strftime("%m-%d-%Y")
173
- time_str = now.strftime("%H-%M-%S")
174
-
175
- outputs_folder = os.path.join(os.getcwd(), "outputs")
176
- if not os.path.exists(outputs_folder):
177
- os.makedirs(outputs_folder)
178
-
179
- sub_folder = os.path.join(outputs_folder, date_str)
180
- if not os.path.exists(sub_folder):
181
- os.makedirs(sub_folder)
182
-
183
- file_name = f"audio_{time_str}.wav"
184
- file_path = os.path.join(sub_folder, file_name)
185
- write_wav(file_path, SAMPLE_RATE, audio_array)
186
-
187
- return file_path
188
-
189
-
190
- speakers_list = []
191
-
192
- for lang, code in SUPPORTED_LANGS:
193
- for n in range(10):
194
- speakers_list.append(f"{code}_speaker_{n}")
195
-
196
- examples1 = [["ref.wav", "Bark.wav", "Bark.wav"]]
197
 
198
- with gr.Blocks() as demo:
199
- gr.Markdown(
200
- f""" # <center>🐶🎶🥳 - Bark with Voice Cloning</center>
201
-
202
- ### <center>🤗 - Powered by [Bark](https://huggingface.co/spaces/suno/bark) and [YourTTS](https://github.com/Edresson/YourTTS). Inspired by [bark-webui](https://github.com/makawy7/bark-webui).</center>
203
- 1. You can duplicate and use it with a GPU: <a href="https://huggingface.co/spaces/{os.getenv('SPACE_ID')}?duplicate=true"><img style="display: inline; margin-top: 0em; margin-bottom: 0em" src="https://bit.ly/3gLdBN6" alt="Duplicate Space" /></a>
204
- 2. First use Bark to generate audio from text and then use YourTTS to get new audio in a custom voice you like. Easy to use!
205
- 3. For voice cloning, longer reference audio (~90s) will generally lead to better quality of the cloned speech. Also, please make sure the input audio generated by Bark is not too short.
206
- """
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
207
  )
208
 
209
- with gr.Row().style(equal_height=True):
210
- inp1 = gr.Textbox(label="Input Text", lines=4, placeholder="Enter text here...")
211
 
212
- inp3 = gr.Slider(
213
- 0.1,
214
- 1.0,
215
- value=0.7,
216
- label="Generation Temperature",
217
- info="1.0 more diverse, 0.1 more conservative",
218
- )
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
219
 
220
- inp4 = gr.Slider(
221
- 0.1, 1.0, value=0.7, label="Waveform Temperature", info="1.0 more diverse, 0.1 more conservative"
222
- )
223
- with gr.Row().style(equal_height=True):
224
 
225
- inp2 = gr.Dropdown(speakers_list, value=speakers_list[1], label="Acoustic Prompt")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
226
 
227
- button = gr.Button("Generate using Bark")
228
-
229
- out1 = gr.Audio(label="Generated Audio")
230
-
231
- button.click(generate_text_to_speech, [inp1, inp2, inp3, inp4], [out1])
232
-
233
 
234
- with gr.Row().style(equal_height=True):
235
- inp5 = gr.Audio(label="Upload Reference Audio for Voice Cloning Here")
236
- inp6 = out1
237
- inp7 = out1
 
238
 
239
- btn = gr.Button("Generate using YourTTS")
240
- out2 = gr.Audio(label="Generated Audio in a Custom Voice")
241
 
242
- btn.click(voice_conversion, [inp5, inp6, inp7], [out2])
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
243
 
244
- gr.Examples(examples=examples1, fn=voice_conversion, inputs=[inp5, inp6, inp7],
245
- outputs=[out2], cache_examples=True)
246
-
247
- gr.Markdown(
248
- """ ### <center>NOTE: Please do not generate any audio that is potentially harmful to any person or organization❗</center>
249
-
250
- """
251
- )
252
- gr.Markdown(
253
- """
254
- ### <center>😄 - You may also apply [VoiceFixer](https://huggingface.co/spaces/Kevin676/VoiceFixer) to the generated audio in order to enhance the speech.</center>
255
- ## 🌎 Foreign Language
256
- Bark supports various languages out-of-the-box and automatically determines language from input text. \
257
- When prompted with code-switched text, Bark will even attempt to employ the native accent for the respective languages in the same voice.
258
- Try the prompt:
259
- ```
260
- Buenos días Miguel. Tu colega piensa que tu alemán es extremadamente malo. But I suppose your english isn't terrible.
261
- ```
262
- ## 🤭 Non-Speech Sounds
263
- Below is a list of some known non-speech sounds, but we are finding more every day. \
264
- Please let us know if you find patterns that work particularly well on Discord!
265
- * [laughter]
266
- * [laughs]
267
- * [sighs]
268
- * [music]
269
- * [gasps]
270
- * [clears throat]
271
- * — or ... for hesitations
272
- * ♪ for song lyrics
273
- * capitalization for emphasis of a word
274
- * MAN/WOMAN: for bias towards speaker
275
- Try the prompt:
276
- ```
277
- " [clears throat] Hello, my name is Suno. And, uh — and I like pizza. [laughs] But I also have other interests such as... ♪ singing ♪."
278
- ```
279
- ## 🎶 Music
280
- Bark can generate all types of audio, and, in principle, doesn't see a difference between speech and music. \
281
- Sometimes Bark chooses to generate text as music, but you can help it out by adding music notes around your lyrics.
282
- Try the prompt:
283
- ```
284
- ♪ In the jungle, the mighty jungle, the lion barks tonight ♪
285
- ```
286
- ## 🧬 Voice Cloning
287
- Bark has the capability to fully clone voices - including tone, pitch, emotion and prosody. \
288
- The model also attempts to preserve music, ambient noise, etc. from input audio. \
289
- However, to mitigate misuse of this technology, we limit the audio history prompts to a limited set of Suno-provided, fully synthetic options to choose from.
290
- ## 👥 Speaker Prompts
291
- You can provide certain speaker prompts such as NARRATOR, MAN, WOMAN, etc. \
292
- Please note that these are not always respected, especially if a conflicting audio history prompt is given.
293
- Try the prompt:
294
- ```
295
- WOMAN: I would like an oatmilk latte please.
296
- MAN: Wow, that's expensive!
297
- ```
298
- ## Details
299
- Bark model by [Suno](https://suno.ai/), including official [code](https://github.com/suno-ai/bark) and model weights. \
300
- Gradio demo supported by 🤗 Hugging Face. Bark is licensed under a non-commercial license: CC-BY 4.0 NC, see details on [GitHub](https://github.com/suno-ai/bark).
301
-
302
- """
303
- )
304
-
305
-
306
- gr.HTML('''
307
- <div class="footer">
308
- <p>🎶🖼️🎡 - It’s the intersection of technology and liberal arts that makes our hearts sing — Steve Jobs
309
- </p>
310
- </div>
311
- ''')
312
-
313
- demo.queue().launch(show_error=True)
 
1
+ from cProfile import label
2
+ import dataclasses
3
+ from distutils.command.check import check
4
+ from doctest import Example
 
 
 
 
 
 
5
  import gradio as gr
6
+ import os
7
  import sys
 
 
 
 
 
 
8
  import numpy as np
9
+ import logging
 
 
10
  import torch
11
+ import pytorch_seed
12
+ import time
13
 
14
+ from xml.sax import saxutils
15
+ from bark.api import generate_with_settings
16
+ from bark.api import save_as_prompt
17
+ from util.settings import Settings
18
+ #import nltk
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
19
 
20
+ from bark import SAMPLE_RATE
21
+ from cloning.clonevoice import clone_voice
22
+ from bark.generation import SAMPLE_RATE, preload_models, _load_history_prompt, codec_decode
23
+ from scipy.io.wavfile import write as write_wav
24
+ from util.parseinput import split_and_recombine_text, build_ssml, is_ssml, create_clips_from_ssml
25
+ from datetime import datetime
26
+ from tqdm.auto import tqdm
27
+ from util.helper import create_filename, add_id3_tag
28
+ from swap_voice import swap_voice_from_audio
29
+ from training.training_prepare import prepare_semantics_from_text, prepare_wavs_from_semantics
30
+ from training.train import training_prepare_files, train
31
+
32
+ settings = Settings('config.yaml')
33
+
34
+
35
+ def generate_text_to_speech(text, selected_speaker, text_temp, waveform_temp, eos_prob, quick_generation, complete_settings, seed, batchcount, progress=gr.Progress(track_tqdm=True)):
36
+ # Chunk the text into smaller pieces then combine the generated audio
37
+
38
+ # generation settings
39
+ if selected_speaker == 'None':
40
+ selected_speaker = None
41
+
42
+ voice_name = selected_speaker
43
+
44
+ if text == None or len(text) < 1:
45
+ if selected_speaker == None:
46
+ raise gr.Error('No text entered!')
47
+
48
+ # Extract audio data from speaker if no text and speaker selected
49
+ voicedata = _load_history_prompt(voice_name)
50
+ audio_arr = codec_decode(voicedata["fine_prompt"])
51
+ result = create_filename(settings.output_folder_path, "None", "extract",".wav")
52
+ save_wav(audio_arr, result)
53
+ return result
54
+
55
+ if batchcount < 1:
56
+ batchcount = 1
57
+
58
+
59
+ silenceshort = np.zeros(int((float(settings.silence_sentence) / 1000.0) * SAMPLE_RATE), dtype=np.int16) # quarter second of silence
60
+ silencelong = np.zeros(int((float(settings.silence_speakers) / 1000.0) * SAMPLE_RATE), dtype=np.float32) # half a second of silence
61
+ use_last_generation_as_history = "Use last generation as history" in complete_settings
62
+ save_last_generation = "Save generation as Voice" in complete_settings
63
+ for l in range(batchcount):
64
+ currentseed = seed
65
+ if seed != None and seed > 2**32 - 1:
66
+ logger.warning(f"Seed {seed} > 2**32 - 1 (max), setting to random")
67
+ currentseed = None
68
+ if currentseed == None or currentseed <= 0:
69
+ currentseed = np.random.default_rng().integers(1, 2**32 - 1)
70
+ assert(0 < currentseed and currentseed < 2**32)
71
+
72
+ progress(0, desc="Generating")
73
+
74
+ full_generation = None
75
+
76
+ all_parts = []
77
+ complete_text = ""
78
+ text = text.lstrip()
79
+ if is_ssml(text):
80
+ list_speak = create_clips_from_ssml(text)
81
+ prev_speaker = None
82
+ for i, clip in tqdm(enumerate(list_speak), total=len(list_speak)):
83
+ selected_speaker = clip[0]
84
+ # Add pause break between speakers
85
+ if i > 0 and selected_speaker != prev_speaker:
86
+ all_parts += [silencelong.copy()]
87
+ prev_speaker = selected_speaker
88
+ text = clip[1]
89
+ text = saxutils.unescape(text)
90
+ if selected_speaker == "None":
91
+ selected_speaker = None
92
+
93
+ print(f"\nGenerating Text ({i+1}/{len(list_speak)}) -> {selected_speaker} (Seed {currentseed}):`{text}`")
94
+ complete_text += text
95
+ with pytorch_seed.SavedRNG(currentseed):
96
+ audio_array = generate_with_settings(text_prompt=text, voice_name=selected_speaker, semantic_temp=text_temp, coarse_temp=waveform_temp, eos_p=eos_prob)
97
+ currentseed = torch.random.initial_seed()
98
+ if len(list_speak) > 1:
99
+ filename = create_filename(settings.output_folder_path, currentseed, "audioclip",".wav")
100
+ save_wav(audio_array, filename)
101
+ add_id3_tag(filename, text, selected_speaker, currentseed)
102
+
103
+ all_parts += [audio_array]
104
+ else:
105
+ texts = split_and_recombine_text(text, settings.input_text_desired_length, settings.input_text_max_length)
106
+ for i, text in tqdm(enumerate(texts), total=len(texts)):
107
+ print(f"\nGenerating Text ({i+1}/{len(texts)}) -> {selected_speaker} (Seed {currentseed}):`{text}`")
108
+ complete_text += text
109
+ if quick_generation == True:
110
+ with pytorch_seed.SavedRNG(currentseed):
111
+ audio_array = generate_with_settings(text_prompt=text, voice_name=selected_speaker, semantic_temp=text_temp, coarse_temp=waveform_temp, eos_p=eos_prob)
112
+ currentseed = torch.random.initial_seed()
113
+ else:
114
+ full_output = use_last_generation_as_history or save_last_generation
115
+ if full_output:
116
+ full_generation, audio_array = generate_with_settings(text_prompt=text, voice_name=voice_name, semantic_temp=text_temp, coarse_temp=waveform_temp, eos_p=eos_prob, output_full=True)
117
+ else:
118
+ audio_array = generate_with_settings(text_prompt=text, voice_name=voice_name, semantic_temp=text_temp, coarse_temp=waveform_temp, eos_p=eos_prob)
119
+
120
+ # Noticed this in the HF Demo - convert to 16bit int -32767/32767 - most used audio format
121
+ # audio_array = (audio_array * 32767).astype(np.int16)
122
+
123
+ if len(texts) > 1:
124
+ filename = create_filename(settings.output_folder_path, currentseed, "audioclip",".wav")
125
+ save_wav(audio_array, filename)
126
+ add_id3_tag(filename, text, selected_speaker, currentseed)
127
+
128
+ if quick_generation == False and (save_last_generation == True or use_last_generation_as_history == True):
129
+ # save to npz
130
+ voice_name = create_filename(settings.output_folder_path, seed, "audioclip", ".npz")
131
+ save_as_prompt(voice_name, full_generation)
132
+ if use_last_generation_as_history:
133
+ selected_speaker = voice_name
134
+
135
+ all_parts += [audio_array]
136
+ # Add short pause between sentences
137
+ if text[-1] in "!?.\n" and i > 1:
138
+ all_parts += [silenceshort.copy()]
139
+
140
+ # save & play audio
141
+ result = create_filename(settings.output_folder_path, currentseed, "final",".wav")
142
+ save_wav(np.concatenate(all_parts), result)
143
+ # write id3 tag with text truncated to 60 chars, as a precaution...
144
+ add_id3_tag(result, complete_text, selected_speaker, currentseed)
145
+
146
+ return result
147
+
148
+
149
+
150
+ def save_wav(audio_array, filename):
151
+ write_wav(filename, SAMPLE_RATE, audio_array)
152
+
153
+ def save_voice(filename, semantic_prompt, coarse_prompt, fine_prompt):
154
+ np.savez_compressed(
155
+ filename,
156
+ semantic_prompt=semantic_prompt,
157
+ coarse_prompt=coarse_prompt,
158
+ fine_prompt=fine_prompt
159
  )
160
 
 
 
161
 
162
+ def on_quick_gen_changed(checkbox):
163
+ if checkbox == False:
164
+ return gr.CheckboxGroup.update(visible=True)
165
+ return gr.CheckboxGroup.update(visible=False)
166
+
167
+ def delete_output_files(checkbox_state):
168
+ if checkbox_state:
169
+ outputs_folder = os.path.join(os.getcwd(), settings.output_folder_path)
170
+ if os.path.exists(outputs_folder):
171
+ purgedir(outputs_folder)
172
+ return False
173
+
174
+
175
+ # https://stackoverflow.com/a/54494779
176
+ def purgedir(parent):
177
+ for root, dirs, files in os.walk(parent):
178
+ for item in files:
179
+ # Delete subordinate files
180
+ filespec = os.path.join(root, item)
181
+ os.unlink(filespec)
182
+ for item in dirs:
183
+ # Recursively perform this operation for subordinate directories
184
+ purgedir(os.path.join(root, item))
185
+
186
+ def convert_text_to_ssml(text, selected_speaker):
187
+ return build_ssml(text, selected_speaker)
188
+
189
+
190
+ def training_prepare(selected_step, num_text_generations, progress=gr.Progress(track_tqdm=True)):
191
+ if selected_step == prepare_training_list[0]:
192
+ prepare_semantics_from_text()
193
+ else:
194
+ prepare_wavs_from_semantics()
195
+ return None
196
+
197
+
198
+ def start_training(save_model_epoch, max_epochs, progress=gr.Progress(track_tqdm=True)):
199
+ training_prepare_files("./training/data/", "./training/data/checkpoint/hubert_base_ls960.pt")
200
+ train("./training/data/", save_model_epoch, max_epochs)
201
+ return None
202
+
203
+
204
+
205
+ def apply_settings(themes, input_server_name, input_server_port, input_server_public, input_desired_len, input_max_len, input_silence_break, input_silence_speaker):
206
+ settings.selected_theme = themes
207
+ settings.server_name = input_server_name
208
+ settings.server_port = input_server_port
209
+ settings.server_share = input_server_public
210
+ settings.input_text_desired_length = input_desired_len
211
+ settings.input_text_max_length = input_max_len
212
+ settings.silence_sentence = input_silence_break
213
+ settings.silence_speaker = input_silence_speaker
214
+ settings.save()
215
+
216
+ def restart():
217
+ global restart_server
218
+ restart_server = True
219
+
220
+
221
+ def create_version_html():
222
+ python_version = ".".join([str(x) for x in sys.version_info[0:3]])
223
+ versions_html = f"""
224
+ python: <span title="{sys.version}">{python_version}</span>
225
+  • 
226
+ torch: {getattr(torch, '__long_version__',torch.__version__)}
227
+  • 
228
+ gradio: {gr.__version__}
229
+ """
230
+ return versions_html
231
 
232
+
 
 
 
233
 
234
+ logger = logging.getLogger(__name__)
235
+ APPTITLE = "Bark Voice Cloning UI"
236
+
237
+
238
+ autolaunch = False
239
+
240
+ if len(sys.argv) > 1:
241
+ autolaunch = "-autolaunch" in sys.argv
242
+
243
+
244
+ if torch.cuda.is_available() == False:
245
+ os.environ['BARK_FORCE_CPU'] = 'True'
246
+ logger.warning("No CUDA detected, fallback to CPU!")
247
+
248
+ print(f'smallmodels={os.environ.get("SUNO_USE_SMALL_MODELS", False)}')
249
+ print(f'enablemps={os.environ.get("SUNO_ENABLE_MPS", False)}')
250
+ print(f'offloadcpu={os.environ.get("SUNO_OFFLOAD_CPU", False)}')
251
+ print(f'forcecpu={os.environ.get("BARK_FORCE_CPU", False)}')
252
+ print(f'autolaunch={autolaunch}\n\n')
253
+
254
+ #print("Updating nltk\n")
255
+ #nltk.download('punkt')
256
+
257
+ print("Preloading Models\n")
258
+ preload_models()
259
+
260
+ available_themes = ["Default", "gradio/glass", "gradio/monochrome", "gradio/seafoam", "gradio/soft", "gstaff/xkcd", "freddyaboulton/dracula_revamped", "ysharma/steampunk"]
261
+ tokenizer_language_list = ["de","en", "pl"]
262
+ prepare_training_list = ["Step 1: Semantics from Text","Step 2: WAV from Semantics"]
263
+
264
+ seed = -1
265
+ server_name = settings.server_name
266
+ if len(server_name) < 1:
267
+ server_name = None
268
+ server_port = settings.server_port
269
+ if server_port <= 0:
270
+ server_port = None
271
+ global run_server
272
+ global restart_server
273
+
274
+ run_server = True
275
+
276
+ while run_server:
277
+ # Collect all existing speakers/voices in dir
278
+ speakers_list = []
279
+
280
+ for root, dirs, files in os.walk("./bark/assets/prompts"):
281
+ for file in files:
282
+ if file.endswith(".npz"):
283
+ pathpart = root.replace("./bark/assets/prompts", "")
284
+ name = os.path.join(pathpart, file[:-4])
285
+ if name.startswith("/") or name.startswith("\\"):
286
+ name = name[1:]
287
+ speakers_list.append(name)
288
+
289
+ speakers_list = sorted(speakers_list, key=lambda x: x.lower())
290
+ speakers_list.insert(0, 'None')
291
+
292
+ print(f'Launching {APPTITLE} Server')
293
+
294
+ # Create Gradio Blocks
295
+
296
+ with gr.Blocks(title=f"{APPTITLE}", mode=f"{APPTITLE}", theme=settings.selected_theme) as barkgui:
297
+ gr.Markdown("# <center>🐶🎶⭐ - Bark Voice Cloning</center>")
298
+ gr.Markdown("### <center>🤗 - If you like this space, please star my [github repo](https://github.com/KevinWang676/Bark-Voice-Cloning)</center>")
299
+ gr.Markdown("### <center>🎡 - Based on [bark-gui](https://github.com/C0untFloyd/bark-gui)</center>")
300
+ gr.Markdown(f""" You can duplicate and use it with a GPU: <a href="https://huggingface.co/spaces/{os.getenv('SPACE_ID')}?duplicate=true"><img style="display: inline; margin-top: 0em; margin-bottom: 0em" src="https://bit.ly/3gLdBN6" alt="Duplicate Space" /></a>
301
+ or open in [Colab](https://colab.research.google.com/github/KevinWang676/Bark-Voice-Cloning/blob/main/Bark_Voice_Cloning_UI.ipynb) for quick start 🌟
302
+ """)
303
+
304
+ with gr.Tab("🎙️ - Clone Voice"):
305
+ with gr.Row():
306
+ input_audio_filename = gr.Audio(label="Input audio.wav", source="upload", type="filepath")
307
+ #transcription_text = gr.Textbox(label="Transcription Text", lines=1, placeholder="Enter Text of your Audio Sample here...")
308
+ with gr.Row():
309
+ with gr.Column():
310
+ initialname = "/home/user/app/bark/assets/prompts/file"
311
+ output_voice = gr.Textbox(label="Filename of trained Voice (do not change the initial name)", lines=1, placeholder=initialname, value=initialname, visible=False)
312
+ with gr.Column():
313
+ tokenizerlang = gr.Dropdown(tokenizer_language_list, label="Base Language Tokenizer", value=tokenizer_language_list[1], visible=False)
314
+ with gr.Row():
315
+ clone_voice_button = gr.Button("Create Voice", variant="primary")
316
+ with gr.Row():
317
+ dummy = gr.Text(label="Progress")
318
+ npz_file = gr.File(label=".npz file")
319
+ speakers_list.insert(0, npz_file) # add prompt
320
+
321
+ with gr.Tab("🎵 - TTS"):
322
+ with gr.Row():
323
+ with gr.Column():
324
+ placeholder = "Enter text here."
325
+ input_text = gr.Textbox(label="Input Text", lines=4, placeholder=placeholder)
326
+ convert_to_ssml_button = gr.Button("Convert Input Text to SSML")
327
+ with gr.Column():
328
+ seedcomponent = gr.Number(label="Seed (default -1 = Random)", precision=0, value=-1)
329
+ batchcount = gr.Number(label="Batch count", precision=0, value=1)
330
+
331
+ with gr.Row():
332
+ with gr.Column():
333
+ gr.Markdown("[Voice Prompt Library](https://suno-ai.notion.site/8b8e8749ed514b0cbf3f699013548683?v=bc67cff786b04b50b3ceb756fd05f68c)")
334
+ speaker = gr.Dropdown(speakers_list, value=speakers_list[0], label="Voice (Choose “file” if you wanna use the custom voice)")
335
+
336
+ with gr.Column():
337
+ text_temp = gr.Slider(0.1, 1.0, value=0.6, label="Generation Temperature", info="1.0 more diverse, 0.1 more conservative")
338
+ waveform_temp = gr.Slider(0.1, 1.0, value=0.7, label="Waveform temperature", info="1.0 more diverse, 0.1 more conservative")
339
+
340
+ with gr.Row():
341
+ with gr.Column():
342
+ quick_gen_checkbox = gr.Checkbox(label="Quick Generation", value=True)
343
+ settings_checkboxes = ["Use last generation as history", "Save generation as Voice"]
344
+ complete_settings = gr.CheckboxGroup(choices=settings_checkboxes, value=settings_checkboxes, label="Detailed Generation Settings", type="value", interactive=True, visible=False)
345
+ with gr.Column():
346
+ eos_prob = gr.Slider(0.0, 0.5, value=0.05, label="End of sentence probability")
347
+
348
+ with gr.Row():
349
+ with gr.Column():
350
+ tts_create_button = gr.Button("Generate", variant="primary")
351
+ with gr.Column():
352
+ hidden_checkbox = gr.Checkbox(visible=False)
353
+ button_stop_generation = gr.Button("Stop generation")
354
+ with gr.Row():
355
+ output_audio = gr.Audio(label="Generated Audio", type="filepath")
356
+
357
+ with gr.Tab("🔮 - Voice Conversion"):
358
+ with gr.Row():
359
+ swap_audio_filename = gr.Audio(label="Input audio.wav to swap voice", source="upload", type="filepath")
360
+ with gr.Row():
361
+ with gr.Column():
362
+ swap_tokenizer_lang = gr.Dropdown(tokenizer_language_list, label="Base Language Tokenizer", value=tokenizer_language_list[1])
363
+ swap_seed = gr.Number(label="Seed (default -1 = Random)", precision=0, value=-1)
364
+ with gr.Column():
365
+ speaker_swap = gr.Dropdown(speakers_list, value=speakers_list[0], label="Voice (Choose “file” if you wanna use the custom voice)")
366
+ swap_batchcount = gr.Number(label="Batch count", precision=0, value=1)
367
+ with gr.Row():
368
+ swap_voice_button = gr.Button("Generate", variant="primary")
369
+ with gr.Row():
370
+ output_swap = gr.Audio(label="Generated Audio", type="filepath")
371
 
 
 
 
 
 
 
372
 
373
+ quick_gen_checkbox.change(fn=on_quick_gen_changed, inputs=quick_gen_checkbox, outputs=complete_settings)
374
+ convert_to_ssml_button.click(convert_text_to_ssml, inputs=[input_text, speaker],outputs=input_text)
375
+ gen_click = tts_create_button.click(generate_text_to_speech, inputs=[input_text, speaker, text_temp, waveform_temp, eos_prob, quick_gen_checkbox, complete_settings, seedcomponent, batchcount],outputs=output_audio)
376
+ button_stop_generation.click(fn=None, inputs=None, outputs=None, cancels=[gen_click])
377
+
378
 
 
 
379
 
380
+ swap_voice_button.click(swap_voice_from_audio, inputs=[swap_audio_filename, speaker_swap, swap_tokenizer_lang, swap_seed, swap_batchcount], outputs=output_swap)
381
+ clone_voice_button.click(clone_voice, inputs=[input_audio_filename, output_voice], outputs=[dummy, npz_file])
382
+
383
+
384
+ restart_server = False
385
+ try:
386
+ barkgui.queue().launch(show_error=True)
387
+ except:
388
+ restart_server = True
389
+ run_server = False
390
+ try:
391
+ while restart_server == False:
392
+ time.sleep(1.0)
393
+ except (KeyboardInterrupt, OSError):
394
+ print("Keyboard interruption in main thread... closing server.")
395
+ run_server = False
396
+ barkgui.close()
397