frogcho123 commited on
Commit
726d965
1 Parent(s): da5250a

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +18 -16
app.py CHANGED
@@ -4,40 +4,42 @@ import whisper
4
  from transformers import AutoTokenizer, AutoModelForSeq2SeqLM
5
  from gtts import gTTS
6
 
 
 
 
 
 
7
  def speech_to_speech(input_audio, to_lang):
8
  # Save the uploaded audio file
9
  input_file = "input_audio" + os.path.splitext(input_audio.name)[1]
10
  input_audio.save(input_file)
11
-
12
  # Speech-to-Text (STT)
13
- model = whisper.load_model("base")
14
  audio = whisper.load_audio(input_file)
15
  audio = whisper.pad_or_trim(audio)
16
- mel = whisper.log_mel_spectrogram(audio).to(model.device)
17
- _, probs = model.detect_language(mel)
18
  options = whisper.DecodingOptions()
19
- result = whisper.decode(model, mel, options)
20
  text = result.text
21
  lang = max(probs, key=probs.get)
22
-
23
  # Translate
24
- tokenizer = AutoTokenizer.from_pretrained("alirezamsh/small100")
25
- model = AutoModelForSeq2SeqLM.from_pretrained("alirezamsh/small100")
26
- tokenizer.src_lang = lang
27
- tokenizer.tgt_lang = to_lang
28
- encoded_bg = tokenizer(text, return_tensors="pt")
29
- generated_tokens = model.generate(**encoded_bg)
30
- translated_text = tokenizer.batch_decode(generated_tokens, skip_special_tokens=True)[0]
31
-
32
  # Text-to-Speech (TTS)
33
  tts = gTTS(text=translated_text, lang=to_lang)
34
  output_file = "output_audio.mp3"
35
  tts.save(output_file)
36
-
37
  return output_file
38
 
39
  languages = ["ru", "fr", "es", "de"] # Example languages: Russian, French, Spanish, German
40
- file_input = gr.inputs.File(label="Upload Audio", type="audio")
41
  dropdown = gr.inputs.Dropdown(languages, label="Translation Language")
42
  audio_output = gr.outputs.Audio(type="file", label="Translated Voice")
43
 
 
4
  from transformers import AutoTokenizer, AutoModelForSeq2SeqLM
5
  from gtts import gTTS
6
 
7
+ # Load models
8
+ model_stt = whisper.load_model("base")
9
+ model_translation = AutoModelForSeq2SeqLM.from_pretrained("alirezamsh/small100")
10
+ tokenizer_translation = AutoTokenizer.from_pretrained("alirezamsh/small100")
11
+
12
  def speech_to_speech(input_audio, to_lang):
13
  # Save the uploaded audio file
14
  input_file = "input_audio" + os.path.splitext(input_audio.name)[1]
15
  input_audio.save(input_file)
16
+
17
  # Speech-to-Text (STT)
 
18
  audio = whisper.load_audio(input_file)
19
  audio = whisper.pad_or_trim(audio)
20
+ mel = whisper.log_mel_spectrogram(audio).to(model_stt.device)
21
+ _, probs = model_stt.detect_language(mel)
22
  options = whisper.DecodingOptions()
23
+ result = whisper.decode(model_stt, mel, options)
24
  text = result.text
25
  lang = max(probs, key=probs.get)
26
+
27
  # Translate
28
+ tokenizer_translation.src_lang = lang
29
+ tokenizer_translation.tgt_lang = to_lang
30
+ encoded_bg = tokenizer_translation(text, return_tensors="pt")
31
+ generated_tokens = model_translation.generate(**encoded_bg)
32
+ translated_text = tokenizer_translation.batch_decode(generated_tokens, skip_special_tokens=True)[0]
33
+
 
 
34
  # Text-to-Speech (TTS)
35
  tts = gTTS(text=translated_text, lang=to_lang)
36
  output_file = "output_audio.mp3"
37
  tts.save(output_file)
38
+
39
  return output_file
40
 
41
  languages = ["ru", "fr", "es", "de"] # Example languages: Russian, French, Spanish, German
42
+ file_input = gr.inputs.File(label="Upload Audio", accept="audio/*")
43
  dropdown = gr.inputs.Dropdown(languages, label="Translation Language")
44
  audio_output = gr.outputs.Audio(type="file", label="Translated Voice")
45