import gradio as gr from transformers import Wav2Vec2ForCTC, Wav2Vec2Processor import torch import librosa import os # Model details models = { "m3hrdadfi/wav2vec2-large-xlsr-persian-v3": None, "jonatasgrosman/wav2vec2-large-xlsr-53-persian": None, "AlirezaSaei/wav2vec2-large-xlsr-persian-fine-tuned": None } # Load models and processors def load_model(model_name): model = Wav2Vec2ForCTC.from_pretrained(model_name) processor = Wav2Vec2Processor.from_pretrained(model_name) return model, processor def transcribe(audio, model_name): if models[model_name] is None: models[model_name] = load_model(model_name) model, processor = models[model_name] audio_data, _ = librosa.load(audio, sr=16000) input_values = processor(audio_data, sampling_rate=16000, return_tensors="pt", padding=True).input_values with torch.no_grad(): logits = model(input_values).logits predicted_ids = torch.argmax(logits, dim=-1) transcription = processor.batch_decode(predicted_ids)[0] return transcription # Read HTML banner if os.path.exists("banner.html"): with open("banner.html", "r", encoding="utf-8") as file: banner = file.read() else: banner = "
Test the best Persian STT models in one place!
""") with gr.Row(): with gr.Column(): # Audio input (upload or record) audio_input = gr.Audio( type="filepath", label="Upload or Record Audio" ) model_dropdown = gr.Dropdown( choices=list(models.keys()), label="Select Model", value="m3hrdadfi/wav2vec2-large-xlsr-persian-v3" ) # Add Test Audio Button def use_test_audio(): return "Test-Audio.ogg" test_audio_button = gr.Button("Use Test Audio") with gr.Column(): output_text = gr.Textbox( label="Transcription", lines=5, placeholder="The transcription will appear here..." ) transcribe_button = gr.Button("Transcribe") test_audio_button.click( fn=use_test_audio, inputs=[], outputs=[audio_input] ) transcribe_button.click( fn=transcribe, inputs=[audio_input, model_dropdown], outputs=output_text ) gr.Markdown(""" """) demo.launch()