import os os.system("pip install git+https://github.com/openai/whisper.git") import gradio as gr import whisper # call whisper model for audio/speech processing model = whisper.load_model("small") def inference_audio(audio): audio = whisper.load_audio(audio) audio = whisper.pad_or_trim(audio) mel = whisper.log_mel_spectrogram(audio).to(model.device) _, probs = model.detect_language(mel) options = whisper.DecodingOptions(fp16 = False) result = whisper.decode(model, mel, options) return result.text audio = gr.Audio( label="Input Audio", show_label=False, source="microphone", type="filepath" ) app=gr.Interface(title="Speech to text",fn=inference_audio,inputs=audio, outputs=["text"]).launch(debug = True)