import os import gradio as gr import whisper import IPython from transformers import AutoTokenizer, AutoModelForSeq2SeqLM from gtts import gTTS # Load the ASR model asr_model = whisper.load_model("base") # Load the translation model translation_tokenizer = AutoTokenizer.from_pretrained("alirezamsh/small100") translation_model = AutoModelForSeq2SeqLM.from_pretrained("alirezamsh/small100") # Available target languages available_languages = { 'Russian': 'ru', 'Spanish': 'es', 'English': 'en', 'Greek': 'gr' } # Function to translate the audio def translate_audio(audio_file, target_language): to_lang = available_languages[target_language] # Auto to text (ASR) audio = whisper.load_audio(audio_file.name) audio = whisper.pad_or_trim(audio) mel = whisper.log_mel_spectrogram(audio).to(asr_model.device) _, probs = asr_model.detect_language(mel) options = whisper.DecodingOptions() result = whisper.decode(asr_model, mel, options) text = result.text # Translate the text translation_tokenizer.src_lang = to_lang encoded_bg = translation_tokenizer(text, return_tensors="pt") generated_tokens = translation_model.generate(**encoded_bg) translated_text = translation_tokenizer.batch_decode(generated_tokens, skip_special_tokens=True)[0] # Text-to-audio (TTS) tts = gTTS(text=translated_text, lang=to_lang) output_file = "translated_audio.mp3" tts.save(output_file) return output_file # Gradio interface audio_input = gr.inputs.Audio(label="Upload audio file") language_dropdown = gr.inputs.Dropdown(choices=list(available_languages.keys()), label="Select Target Language") audio_output = gr.outputs.Audio(label="Translated audio file") iface = gr.Interface(fn=translate_audio, inputs=[audio_input, language_dropdown], outputs=audio_output, title="Audio Translation Demo") iface.launch()