ovieyra21 commited on
Commit
3e9badc
1 Parent(s): cb5733f

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +12 -118
app.py CHANGED
@@ -1,14 +1,11 @@
1
  import torch
2
-
3
  import gradio as gr
4
  import yt_dlp as youtube_dl
5
  import numpy as np
6
  from datasets import Dataset, Audio
7
  from scipy.io import wavfile
8
-
9
  from transformers import pipeline
10
  from transformers.pipelines.audio_utils import ffmpeg_read
11
-
12
  import tempfile
13
  import os
14
  import time
@@ -29,14 +26,13 @@ pipe = pipeline(
29
  device=device,
30
  )
31
 
32
- separator = demucs.api.Separator(model = DEMUCS_MODEL_NAME, )
33
 
34
  def separate_vocal(path):
35
  origin, separated = separator.separate_audio_file(path)
36
  demucs.api.save_audio(separated["vocals"], path, samplerate=separator.samplerate)
37
  return path
38
 
39
-
40
  def transcribe(inputs_path, task, use_demucs, dataset_name, oauth_token: gr.OAuthToken | None, progress=gr.Progress()):
41
  if inputs_path is None:
42
  raise gr.Error("No audio file submitted! Please upload or record an audio file before submitting your request.")
@@ -66,19 +62,15 @@ def transcribe(inputs_path, task, use_demucs, dataset_name, oauth_token: gr.OAut
66
  current_step += 1
67
  progress((current_step, total_step), desc="Create dataset.")
68
 
69
-
70
  transcripts = []
71
  audios = []
72
  with tempfile.TemporaryDirectory() as tmpdirname:
73
- for i,chunk in enumerate(progress.tqdm(chunks, desc="Creating dataset (and clean audio if asked for)")):
74
-
75
- # TODO: make sure 1D or 2D?
76
  arr = chunk["audio"]
77
  path = os.path.join(tmpdirname, f"{i}.wav")
78
- wavfile.write(path, sampling_rate, arr)
79
 
80
  if use_demucs == "separate-audio":
81
- # use demucs tp separate vocals
82
  print(f"Separating vocals #{i}")
83
  path = separate_vocal(path)
84
 
@@ -93,7 +85,6 @@ def transcribe(inputs_path, task, use_demucs, dataset_name, oauth_token: gr.OAut
93
 
94
  return [[transcript] for transcript in transcripts], text
95
 
96
-
97
  def _return_yt_html_embed(yt_url):
98
  video_id = yt_url.split("?v=")[-1]
99
  HTML_str = (
@@ -133,10 +124,7 @@ def download_yt_audio(yt_url, filename):
133
  except youtube_dl.utils.ExtractorError as err:
134
  raise gr.Error(str(err))
135
 
136
-
137
- def yt_transcribe(yt_url, task, use_demucs, dataset_name, oauth_token: gr.OAuthToken | None, max_filesize=75.0, dataset_sampling_rate = 24000,
138
- progress=gr.Progress()):
139
-
140
  if yt_url is None:
141
  raise gr.Error("No youtube link submitted! Please put a working link.")
142
  if dataset_name is None:
@@ -149,7 +137,7 @@ def yt_transcribe(yt_url, task, use_demucs, dataset_name, oauth_token: gr.OAuthT
149
 
150
  if oauth_token is None:
151
  gr.Warning("Make sure to click and login before using this demo.")
152
- return html_embed_str, [["transcripts will appear here"]], ""
153
 
154
  current_step += 1
155
  progress((current_step, total_step), desc="Load video.")
@@ -182,15 +170,12 @@ def yt_transcribe(yt_url, task, use_demucs, dataset_name, oauth_token: gr.OAuthT
182
  transcripts = []
183
  audios = []
184
  with tempfile.TemporaryDirectory() as tmpdirname:
185
- for i,chunk in enumerate(progress.tqdm(chunks, desc="Creating dataset (and clean audio if asked for).")):
186
-
187
- # TODO: make sure 1D or 2D?
188
  arr = chunk["audio"]
189
  path = os.path.join(tmpdirname, f"{i}.wav")
190
- wavfile.write(path, dataset_sampling_rate, arr)
191
 
192
  if use_demucs == "separate-audio":
193
- # use demucs tp separate vocals
194
  print(f"Separating vocals #{i}")
195
  path = separate_vocal(path)
196
 
@@ -203,110 +188,19 @@ def yt_transcribe(yt_url, task, use_demucs, dataset_name, oauth_token: gr.OAuthT
203
  progress((current_step, total_step), desc="Push dataset.")
204
  dataset.push_to_hub(dataset_name, token=oauth_token.token if oauth_token else oauth_token)
205
 
206
-
207
  return html_embed_str, [[transcript] for transcript in transcripts], text
208
 
209
-
210
- def naive_postprocess_whisper_chunks(chunks, audio_array, sampling_rate, stop_chars = ".!:;?", min_duration = 5):
211
- # merge chunks as long as merged audio duration is lower than min_duration and that a stop character is not met
212
- # return list of dictionnaries (text, audio)
213
- # min duration is in seconds
214
  min_duration = int(min_duration * sampling_rate)
215
-
216
-
217
  new_chunks = []
218
  while chunks:
219
  current_chunk = chunks.pop(0)
220
-
221
  begin, end = current_chunk["timestamp"]
222
- begin, end = int(begin*sampling_rate), int(end*sampling_rate)
223
-
224
- current_dur = end-begin
225
-
226
  text = current_chunk["text"]
227
-
228
-
229
  chunk_to_concat = [audio_array[begin:end]]
230
- while chunks and (text[-1] not in stop_chars or (current_dur<min_duration)):
231
  ch = chunks.pop(0)
232
  begin, end = ch["timestamp"]
233
- begin, end = int(begin*sampling_rate), int(end*sampling_rate)
234
- current_dur += end-begin
235
-
236
- text = "".join([text, ch["text"]])
237
-
238
- # TODO: add silence ?
239
- chunk_to_concat.append(audio_array[begin:end])
240
-
241
-
242
- new_chunks.append({
243
- "text": text.strip(),
244
- "audio": np.concatenate(chunk_to_concat),
245
- })
246
- print(f"LENGTH CHUNK #{len(new_chunks)}: {current_dur/sampling_rate}s")
247
-
248
- return new_chunks
249
-
250
- css = """
251
- #intro{
252
- max-width: 100%;
253
- text-align: center;
254
- margin: 0 auto;
255
- }
256
- """
257
- with gr.Blocks(css=css) as demo:
258
- with gr.Row():
259
- gr.LoginButton()
260
- gr.LogoutButton()
261
-
262
- with gr.Tab("YouTube"):
263
- gr.Markdown("Create your own TTS dataset using Youtube", elem_id="intro")
264
- gr.Markdown(
265
- "This demo allows use to create a text-to-speech dataset from an input audio snippet and push it to hub to keep track of it."
266
- f"Demo uses the checkpoint [{MODEL_NAME}](https://huggingface.co/{MODEL_NAME}) and 🤗 Transformers to automatically transcribe audio files"
267
- " of arbitrary length. It then merge chunks of audio and push it to the hub."
268
- )
269
- with gr.Row():
270
- with gr.Column():
271
- audio_youtube = gr.Textbox(lines=1, placeholder="Paste the URL to a YouTube video here", label="YouTube URL")
272
- task_youtube = gr.Radio(["transcribe", "translate"], label="Task", value="transcribe")
273
- cleaning_youtube = gr.Radio(["no-post-processing", "separate-audio"], label="Audio separation and cleaning (takes longer - use it if your samples are not cleaned (background noise and music))", value="separate-audio")
274
- textbox_youtube = gr.Textbox(lines=1, placeholder="Place your new dataset name here. Should be in the format : <user>/<dataset_name> or <org>/<dataset_name>. Also accepts <dataset_name>, which will default to the namespace of the logged-in user.", label="Dataset name")
275
-
276
- with gr.Row():
277
- clear_youtube = gr.ClearButton([audio_youtube, task_youtube, cleaning_youtube, textbox_youtube])
278
- submit_youtube = gr.Button("Submit")
279
-
280
- with gr.Column():
281
- html_youtube = gr.HTML()
282
- dataset_youtube = gr.Dataset(label="Transcribed samples.",components=["text"], headers=["Transcripts"], samples=[["transcripts will appear here"]])
283
- transcript_youtube = gr.Textbox(label="Transcription")
284
-
285
- with gr.Tab("Microphone or Audio file"):
286
- gr.Markdown("Create your own TTS dataset using your own recordings", elem_id="intro")
287
- gr.Markdown(
288
- "This demo allows use to create a text-to-speech dataset from an input audio snippet and push it to hub to keep track of it."
289
- f"Demo uses the checkpoint [{MODEL_NAME}](https://huggingface.co/{MODEL_NAME}) and 🤗 Transformers to automatically transcribe audio files"
290
- " of arbitrary length. It then merge chunks of audio and push it to the hub."
291
- )
292
- with gr.Row():
293
- with gr.Column():
294
- audio_file = gr.Audio(type="filepath")
295
- task_file = gr.Radio(["transcribe", "translate"], label="Task", value="transcribe")
296
- cleaning_file = gr.Radio(["no-post-processing", "separate-audio"], label="Audio separation and cleaning (takes longer - use it if your samples are not cleaned (background noise and music))", value="separate-audio")
297
- textbox_file = gr.Textbox(lines=1, placeholder="Place your new dataset name here. Should be in the format : <user>/<dataset_name> or <org>/<dataset_name>. Also accepts <dataset_name>, which will default to the namespace of the logged-in user.", label="Dataset name")
298
-
299
- with gr.Row():
300
- clear_file = gr.ClearButton([audio_file, task_file, cleaning_file, textbox_file])
301
- submit_file = gr.Button("Submit")
302
-
303
- with gr.Column():
304
- dataset_file = gr.Dataset(label="Transcribed samples.", components=["text"], headers=["Transcripts"], samples=[["transcripts will appear here"]])
305
- transcript_file = gr.Textbox(label="Transcription")
306
-
307
-
308
-
309
- submit_file.click(transcribe, inputs=[audio_file, task_file, cleaning_file, textbox_file], outputs=[dataset_file, transcript_file])
310
- submit_youtube.click(yt_transcribe, inputs=[audio_youtube, task_youtube, cleaning_youtube, textbox_youtube], outputs=[html_youtube, dataset_youtube, transcript_youtube])
311
-
312
- demo.launch(debug=True)
 
1
  import torch
 
2
  import gradio as gr
3
  import yt_dlp as youtube_dl
4
  import numpy as np
5
  from datasets import Dataset, Audio
6
  from scipy.io import wavfile
 
7
  from transformers import pipeline
8
  from transformers.pipelines.audio_utils import ffmpeg_read
 
9
  import tempfile
10
  import os
11
  import time
 
26
  device=device,
27
  )
28
 
29
+ separator = demucs.api.Separator(model=DEMUCS_MODEL_NAME, )
30
 
31
  def separate_vocal(path):
32
  origin, separated = separator.separate_audio_file(path)
33
  demucs.api.save_audio(separated["vocals"], path, samplerate=separator.samplerate)
34
  return path
35
 
 
36
  def transcribe(inputs_path, task, use_demucs, dataset_name, oauth_token: gr.OAuthToken | None, progress=gr.Progress()):
37
  if inputs_path is None:
38
  raise gr.Error("No audio file submitted! Please upload or record an audio file before submitting your request.")
 
62
  current_step += 1
63
  progress((current_step, total_step), desc="Create dataset.")
64
 
 
65
  transcripts = []
66
  audios = []
67
  with tempfile.TemporaryDirectory() as tmpdirname:
68
+ for i, chunk in enumerate(progress.tqdm(chunks, desc="Creating dataset (and clean audio if asked for)")):
 
 
69
  arr = chunk["audio"]
70
  path = os.path.join(tmpdirname, f"{i}.wav")
71
+ wavfile.write(path, sampling_rate, arr)
72
 
73
  if use_demucs == "separate-audio":
 
74
  print(f"Separating vocals #{i}")
75
  path = separate_vocal(path)
76
 
 
85
 
86
  return [[transcript] for transcript in transcripts], text
87
 
 
88
  def _return_yt_html_embed(yt_url):
89
  video_id = yt_url.split("?v=")[-1]
90
  HTML_str = (
 
124
  except youtube_dl.utils.ExtractorError as err:
125
  raise gr.Error(str(err))
126
 
127
+ def yt_transcribe(yt_url, task, use_demucs, dataset_name, oauth_token: gr.OAuthToken | None, max_filesize=75.0, dataset_sampling_rate=24000, progress=gr.Progress()):
 
 
 
128
  if yt_url is None:
129
  raise gr.Error("No youtube link submitted! Please put a working link.")
130
  if dataset_name is None:
 
137
 
138
  if oauth_token is None:
139
  gr.Warning("Make sure to click and login before using this demo.")
140
+ return html_embed_str, [["transcripts will appear here"]], ""
141
 
142
  current_step += 1
143
  progress((current_step, total_step), desc="Load video.")
 
170
  transcripts = []
171
  audios = []
172
  with tempfile.TemporaryDirectory() as tmpdirname:
173
+ for i, chunk in enumerate(progress.tqdm(chunks, desc="Creating dataset (and clean audio if asked for).")):
 
 
174
  arr = chunk["audio"]
175
  path = os.path.join(tmpdirname, f"{i}.wav")
176
+ wavfile.write(path, dataset_sampling_rate, arr)
177
 
178
  if use_demucs == "separate-audio":
 
179
  print(f"Separating vocals #{i}")
180
  path = separate_vocal(path)
181
 
 
188
  progress((current_step, total_step), desc="Push dataset.")
189
  dataset.push_to_hub(dataset_name, token=oauth_token.token if oauth_token else oauth_token)
190
 
 
191
  return html_embed_str, [[transcript] for transcript in transcripts], text
192
 
193
+ def naive_postprocess_whisper_chunks(chunks, audio_array, sampling_rate, stop_chars=".!:;?", min_duration=5):
 
 
 
 
194
  min_duration = int(min_duration * sampling_rate)
 
 
195
  new_chunks = []
196
  while chunks:
197
  current_chunk = chunks.pop(0)
 
198
  begin, end = current_chunk["timestamp"]
199
+ begin, end = int(begin * sampling_rate), int(end * sampling_rate)
200
+ current_dur = end - begin
 
 
201
  text = current_chunk["text"]
 
 
202
  chunk_to_concat = [audio_array[begin:end]]
203
+ while chunks and (text[-1] not in stop_chars or (current_dur < min_duration)):
204
  ch = chunks.pop(0)
205
  begin, end = ch["timestamp"]
206
+ begin, end = int(begin * sampling_rate), int(end * sampling_rate)