frogcho123 commited on
Commit
da5250a
1 Parent(s): 7ce46d5

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +25 -44
app.py CHANGED
@@ -1,64 +1,45 @@
1
- import os
2
- import tempfile
3
  import gradio as gr
 
4
  import whisper
5
  from transformers import AutoTokenizer, AutoModelForSeq2SeqLM
6
  from gtts import gTTS
7
- import base64
8
-
9
- # Load the models and tokenizer
10
- whisper_model = whisper.load_model("base")
11
- tokenizer = AutoTokenizer.from_pretrained("alirezamsh/small100")
12
- model = AutoModelForSeq2SeqLM.from_pretrained("alirezamsh/small100")
13
 
14
- def translate_audio(input_file, to_lang):
15
- # Load the audio file
 
 
 
 
 
16
  audio = whisper.load_audio(input_file)
17
  audio = whisper.pad_or_trim(audio)
18
- mel = whisper.log_mel_spectrogram(audio).to(whisper_model.device)
19
-
20
- # Detect language using Whisper
21
- _, probs = whisper_model.detect_language(mel)
22
- lang = max(probs, key=probs.get)
23
-
24
- # Convert audio to text
25
  options = whisper.DecodingOptions()
26
- result = whisper.decode(whisper_model, mel, options)
27
  text = result.text
 
28
 
29
- # Translate the text
 
 
30
  tokenizer.src_lang = lang
 
31
  encoded_bg = tokenizer(text, return_tensors="pt")
32
  generated_tokens = model.generate(**encoded_bg)
33
  translated_text = tokenizer.batch_decode(generated_tokens, skip_special_tokens=True)[0]
34
 
35
- # Convert translated text to audio
36
  tts = gTTS(text=translated_text, lang=to_lang)
37
- temp_output_file = tempfile.NamedTemporaryFile(suffix=".mp3").name
38
- tts.save(temp_output_file)
39
-
40
- # Load audio data from file
41
- audio_data = open(temp_output_file, "rb").read()
42
 
43
- # Convert audio data to Base64 string
44
- audio_base64 = base64.b64encode(audio_data).decode("utf-8")
45
-
46
- return audio_base64
47
-
48
- def translate_audio_interface(input_file, to_lang):
49
- return translate_audio(input_file, to_lang)
50
 
51
- iface = gr.Interface(
52
- fn=translate_audio_interface,
53
- inputs=["file", "text"],
54
- outputs="text",
55
- title="Audio Translation",
56
- description="Uploadd an MP3 file and select the target language for translation.",
57
- examples=[
58
- ["audio_example.mp3", "en"],
59
- ["speech_sample.mp3", "fr"],
60
- ]
61
- )
62
 
63
- iface.launch()
64
 
 
 
 
1
  import gradio as gr
2
+ import os
3
  import whisper
4
  from transformers import AutoTokenizer, AutoModelForSeq2SeqLM
5
  from gtts import gTTS
 
 
 
 
 
 
6
 
7
+ def speech_to_speech(input_audio, to_lang):
8
+ # Save the uploaded audio file
9
+ input_file = "input_audio" + os.path.splitext(input_audio.name)[1]
10
+ input_audio.save(input_file)
11
+
12
+ # Speech-to-Text (STT)
13
+ model = whisper.load_model("base")
14
  audio = whisper.load_audio(input_file)
15
  audio = whisper.pad_or_trim(audio)
16
+ mel = whisper.log_mel_spectrogram(audio).to(model.device)
17
+ _, probs = model.detect_language(mel)
 
 
 
 
 
18
  options = whisper.DecodingOptions()
19
+ result = whisper.decode(model, mel, options)
20
  text = result.text
21
+ lang = max(probs, key=probs.get)
22
 
23
+ # Translate
24
+ tokenizer = AutoTokenizer.from_pretrained("alirezamsh/small100")
25
+ model = AutoModelForSeq2SeqLM.from_pretrained("alirezamsh/small100")
26
  tokenizer.src_lang = lang
27
+ tokenizer.tgt_lang = to_lang
28
  encoded_bg = tokenizer(text, return_tensors="pt")
29
  generated_tokens = model.generate(**encoded_bg)
30
  translated_text = tokenizer.batch_decode(generated_tokens, skip_special_tokens=True)[0]
31
 
32
+ # Text-to-Speech (TTS)
33
  tts = gTTS(text=translated_text, lang=to_lang)
34
+ output_file = "output_audio.mp3"
35
+ tts.save(output_file)
 
 
 
36
 
37
+ return output_file
 
 
 
 
 
 
38
 
39
+ languages = ["ru", "fr", "es", "de"] # Example languages: Russian, French, Spanish, German
40
+ file_input = gr.inputs.File(label="Upload Audio", type="audio")
41
+ dropdown = gr.inputs.Dropdown(languages, label="Translation Language")
42
+ audio_output = gr.outputs.Audio(type="file", label="Translated Voice")
 
 
 
 
 
 
 
43
 
44
+ gr.Interface(fn=speech_to_speech, inputs=[file_input, dropdown], outputs=audio_output, title="Speech-to-Speech Translator", description="Upload an audio file (MP3, WAV, or FLAC) and choose the target language for translation.", theme="default").launch()
45