import gradio import torchaudio from fastai.vision.all import * from fastai.learner import load_learner from torchvision.utils import save_image from huggingface_hub import hf_hub_download model = load_learner( hf_hub_download("kurianbenoy/music_genre_classification_baseline", "model.pkl") ) EXAMPLES_PATH = Path("./examples") labels = model.dls.vocab interface_options = { "title": "Music Genre Classification", "description": "A simple baseline model for classifying music genres with fast.ai on [Kaggle competition data](https://www.kaggle.com/competitions/kaggle-pog-series-s01e02/data)", "examples": [f"{EXAMPLES_PATH}/{f.name}" for f in EXAMPLES_PATH.iterdir()], "interpretation": "default", "layout": "horizontal", "theme": "default", } def predict(img): img = PILImage.create(img) _pred, _pred_w_idx, probs = model.predict(img) labels_probs = {labels[i]: float(probs[i]) for i, _ in enumerate(labels)} return labels_probs demo = gradio.Interface( fn=predict, inputs=gradio.inputs.Image(shape=(512, 512)), outputs=gradio.outputs.Label(num_top_classes=5), **interface_options, ) launch_options = { "enable_queue": True, "share": False, } demo.launch(**launch_options)