Marco-Cheung commited on
Commit
37cb8cc
1 Parent(s): 0a5f7db

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +42 -38
app.py CHANGED
@@ -1,72 +1,76 @@
1
  import gradio as gr
2
  import numpy as np
3
  import torch
4
- from datasets import load_dataset
5
-
6
- from transformers import SpeechT5ForTextToSpeech, SpeechT5HifiGan, SpeechT5Processor, pipeline
7
-
8
-
 
 
 
 
9
  device = "cuda:0" if torch.cuda.is_available() else "cpu"
10
 
11
  # load speech translation checkpoint
12
- asr_pipe = pipeline("automatic-speech-recognition", model="openai/whisper-base", device=device)
13
-
14
- # load text-to-speech checkpoint and speaker embeddings
15
- processor = SpeechT5Processor.from_pretrained("microsoft/speecht5_tts")
16
-
17
- model = SpeechT5ForTextToSpeech.from_pretrained("microsoft/speecht5_tts").to(device)
18
- vocoder = SpeechT5HifiGan.from_pretrained("microsoft/speecht5_hifigan").to(device)
19
-
20
- embeddings_dataset = load_dataset("Matthijs/cmu-arctic-xvectors", split="validation")
21
- speaker_embeddings = torch.tensor(embeddings_dataset[7306]["xvector"]).unsqueeze(0)
22
 
 
 
 
 
23
 
24
  def translate(audio):
25
  outputs = asr_pipe(audio, max_new_tokens=256, generate_kwargs={"task": "translate"})
26
  return outputs["text"]
27
-
28
-
29
- def synthesise(text):
30
- inputs = processor(text=text, return_tensors="pt")
31
- speech = model.generate_speech(inputs["input_ids"].to(device), speaker_embeddings.to(device), vocoder=vocoder)
32
- return speech.cpu()
33
-
34
-
35
- def speech_to_speech_translation(audio):
36
  translated_text = translate(audio)
37
- synthesised_speech = synthesise(translated_text)
38
- synthesised_speech = (synthesised_speech.numpy() * 32767).astype(np.int16)
39
- return 16000, synthesised_speech
40
-
41
-
42
- title = "Cascaded STST"
 
 
 
 
43
  description = """
44
- Demo for cascaded speech-to-speech translation (STST), mapping from source speech in any language to target speech in English. Demo uses OpenAI's [Whisper Base](https://huggingface.co/openai/whisper-base) model for speech translation, and Microsoft's
45
- [SpeechT5 TTS](https://huggingface.co/microsoft/speecht5_tts) model for text-to-speech:
46
-
47
  ![Cascaded STST](https://huggingface.co/datasets/huggingface-course/audio-course-images/resolve/main/s2st_cascaded.png "Diagram of cascaded speech to speech translation")
48
  """
49
-
50
  demo = gr.Blocks()
51
 
52
  mic_translate = gr.Interface(
53
  fn=speech_to_speech_translation,
54
- inputs=gr.Audio(source="microphone", type="filepath"),
 
55
  outputs=gr.Audio(label="Generated Speech", type="numpy"),
56
  title=title,
57
  description=description,
 
58
  )
59
 
60
  file_translate = gr.Interface(
61
  fn=speech_to_speech_translation,
62
- inputs=gr.Audio(source="upload", type="filepath"),
 
63
  outputs=gr.Audio(label="Generated Speech", type="numpy"),
64
- examples=[["./example.wav"]],
65
  title=title,
66
  description=description,
 
67
  )
68
 
69
  with demo:
70
  gr.TabbedInterface([mic_translate, file_translate], ["Microphone", "Audio File"])
71
 
72
- demo.launch()
 
 
1
  import gradio as gr
2
  import numpy as np
3
  import torch
4
+ from transformers import AutoProcessor, pipeline, BarkModel
5
+
6
+ ASR_MODEL_NAME = "bofenghuang/whisper-large-v2-cv11-german"
7
+ TTS_MODEL_NAME = "suno/bark-small"
8
+ BATCH_SIZE = 8
9
+ voices = {
10
+ "male" : "v2/en_speaker_6",
11
+ "female" : "v2/en_speaker_9"
12
+ }
13
  device = "cuda:0" if torch.cuda.is_available() else "cpu"
14
 
15
  # load speech translation checkpoint
16
+ asr_pipe = pipeline("automatic-speech-recognition", model=ASR_MODEL_NAME, chunk_length_s=10,device=device)
 
 
 
 
 
 
 
 
 
17
 
18
+ # load text-to-speech checkpoint
19
+ processor = AutoProcessor.from_pretrained("suno/bark-small")
20
+ model = BarkModel.from_pretrained("suno/bark-small").to(device)
21
+ sampling_rate = model.generation_config.sample_rate
22
 
23
  def translate(audio):
24
  outputs = asr_pipe(audio, max_new_tokens=256, generate_kwargs={"task": "translate"})
25
  return outputs["text"]
26
+
27
+ def synthesise(text, voice_preset):
28
+ inputs = processor(text=text, return_tensors="pt",voice_preset=voice_preset)
29
+ speech = model.generate(**inputs.to(device))
30
+ return speech[0]
31
+
32
+ def speech_to_speech_translation(audio, voice):
33
+ voice_preset = None
 
34
  translated_text = translate(audio)
35
+ print(translated_text)
36
+ if voice == "Female":
37
+ voice_preset = voices["female"]
38
+ else:
39
+ voice_preset = voices["male"]
40
+ synthesised_speech = synthesise(translated_text, voice_preset)
41
+ synthesised_speech = (synthesised_speech.cpu().numpy() * 32767).astype(np.int16)
42
+ return sampling_rate, synthesised_speech
43
+
44
+ title = "Cascaded STST - Any language to German speech"
45
  description = """
46
+ Demo for cascaded speech-to-speech translation (STST), mapping from source speech in any language to target speech in German. Demo uses fine-tuned version of openai/whisper-large-v2 model (https://huggingface.co/bofenghuang/whisper-large-v2-cv11-german) for speech translation, and Suno's
47
+ [Bark-large](https://huggingface.co/suno/bark-small) model for text-to-speech:
 
48
  ![Cascaded STST](https://huggingface.co/datasets/huggingface-course/audio-course-images/resolve/main/s2st_cascaded.png "Diagram of cascaded speech to speech translation")
49
  """
 
50
  demo = gr.Blocks()
51
 
52
  mic_translate = gr.Interface(
53
  fn=speech_to_speech_translation,
54
+ inputs=[gr.Audio(source="microphone", type="filepath"),
55
+ gr.inputs.Radio(["Male", "Female"], label="Voice", default="Male")],
56
  outputs=gr.Audio(label="Generated Speech", type="numpy"),
57
  title=title,
58
  description=description,
59
+ allow_flagging="never"
60
  )
61
 
62
  file_translate = gr.Interface(
63
  fn=speech_to_speech_translation,
64
+ inputs=[gr.Audio(source="upload", type="filepath"),
65
+ gr.inputs.Radio(["Male", "Female"], label="Voice", default="Male")],
66
  outputs=gr.Audio(label="Generated Speech", type="numpy"),
 
67
  title=title,
68
  description=description,
69
+ allow_flagging="never"
70
  )
71
 
72
  with demo:
73
  gr.TabbedInterface([mic_translate, file_translate], ["Microphone", "Audio File"])
74
 
75
+ demo.queue(concurrency_count=2,max_size=10)
76
+ demo.launch()