Japanese_pitch / app.py
mizoru's picture
reverting to prior to add version choice
42d4ff2
import gradio as gr
from fastai.vision.all import *
from fastaudio.core.all import *
matplotlib.rcParams['figure.dpi'] = 300
def get_x(df):
return df.path
def get_y(df):
return df.pattern
learn = load_learner('xresnet50_pitch3_removeSilence.pkl')
labels = learn.dls.vocab
def predict(Record, Upload):
if Upload: path = Upload
else: path = Record
spec,pred,pred_idx,probs = learn.predict(str(path), with_input=True)
fig,ax = plt.subplots(figsize=(16,10))
show_image(spec, ax=ax)
ax.invert_yaxis()
return [{labels[i]: float(probs[i]) for i in range(len(labels))}, fig]
title = "Japanese Pitch Accent Pattern Detector"
description = "This model will predict the pitch accent pattern of a word based on the recording of its pronunciation."
article="<p style='text-align: center'><a href='https://mizoru.github.io/blog/2021/12/25/Japanese-pitch.html' target='_blank'>How did I make this and what is it for?</a></p>"
examples = [['代わる.mp3'],['大丈夫な.mp3'],['熱くない.mp3'], ['あめー雨.mp3'], ['あめー飴.mp3']]
enable_queue=True
gr.Interface(fn=predict,inputs=[gr.inputs.Audio(source='microphone', type='filepath', optional=True), gr.inputs.Audio(source='upload', type='filepath', optional=True)], outputs= [gr.outputs.Label(num_top_classes=3), gr.outputs.Image(type="plot", label='Spectrogram')], title=title,description=description,article=article,examples=examples).launch(debug=True, enable_queue=enable_queue)