from src import infer, utils import gradio as gr infer.model_preloader_downloader() AUDIO_EXAMPLE = [ [None, "assets/audio/male-indonesian.wav", None], [None, "assets/audio/female-indonesian.wav", None], [None, "assets/audio/male-english.wav", None], [None, "assets/audio/female-english.wav", None], ] TITLE = "OpenAI Whisper" DESCRIPTION = utils.parsing_text("assets/descriptions.md") ARTICLE = utils.parsing_text("assets/articles.md") demo = gr.Interface( fn=infer.predict, inputs=[ gr.Dropdown( label="Model", choices=[ "tiny", "base", "small", "medium", "large", "large-v1", "large-v2" ], value="base"), gr.Radio(label="Language", choices=["indonesian","english"], value="indonesian"), gr.Audio(label="Speak", source="microphone", type="filepath"), gr.Audio(label="Upload Audio", source="upload", type="filepath"), ], outputs=[gr.TextArea(label="Output Text"),], title=TITLE, description=DESCRIPTION, article=ARTICLE, # examples=audio_examples, ) demo.launch()