import os import gradio as gr import whisper from transformers import AutoTokenizer, AutoModelForSeq2SeqLM from gtts import gTTS def translate_voice(file, target_lang): # Auto to text (STT) model = whisper.load_model("base") audio = whisper.load_audio(file.name) audio = whisper.pad_or_trim(audio) mel = whisper.log_mel_spectrogram(audio).to(model.device).float() _, probs = model.detect_language(mel) options = whisper.DecodingOptions() result = whisper.decode(model, mel, options) text = result.text lang = max(probs, key=probs.get) # Translate tokenizer = AutoTokenizer.from_pretrained("alirezamsh/small100") model = AutoModelForSeq2SeqLM.from_pretrained("alirezamsh/small100") tokenizer.src_lang = target_lang encoded_bg = tokenizer(text, return_tensors="pt") generated_tokens = model.generate(**encoded_bg) translated_text = tokenizer.batch_decode(generated_tokens, skip_special_tokens=True)[0] # Text-to-audio (TTS) tts = gTTS(text=translated_text, lang=target_lang) filename = "to_speech.mp3" tts.save(filename) return filename, text, translated_text, target_lang iface = gr.Interface( fn=translate_voice, inputs=[ gr.components.File(type="file", label="Your Audio"), gr.components.Dropdown(choices=['en', 'ru', 'de', 'fr'], label="Target Language") ], outputs=[ gr.components.Audio(type="filepath", label="Translated Audio"), gr.components.Textbox(label="Original Text"), gr.components.Textbox(label="Translated Text"), gr.components.Textbox(label="Target Language"), ] ) iface.launch()