import gradio as gr from fastai.vision.all import * from fastaudio.core.all import * matplotlib.rcParams['figure.dpi'] = 300 def get_x(df): return df.path def get_y(df): return df.pattern learn = load_learner('xresnet50_pitch3_removeSilence.pkl') labels = learn.dls.vocab def predict(Record, Upload): if Upload: path = Upload else: path = Record spec,pred,pred_idx,probs = learn.predict(str(path), with_input=True) fig,ax = plt.subplots(figsize=(16,10)) show_image(spec, ax=ax) ax.invert_yaxis() return [{labels[i]: float(probs[i]) for i in range(len(labels))}, fig] title = "Japanese Pitch Accent Pattern Detector" description = "This model will predict the pitch accent pattern of a word based on the recording of its pronunciation." article="

How did I make this and what is it for?

" examples = [['代わる.mp3'],['大丈夫な.mp3'],['熱くない.mp3'], ['あめー雨.mp3'], ['あめー飴.mp3']] enable_queue=True gr.Interface(fn=predict,inputs=[gr.inputs.Audio(source='microphone', type='filepath', optional=True), gr.inputs.Audio(source='upload', type='filepath', optional=True)], outputs= [gr.outputs.Label(num_top_classes=3), gr.outputs.Image(type="plot", label='Spectrogram')], title=title,description=description,article=article,examples=examples).launch(debug=True, enable_queue=enable_queue)