immelstorun's picture
Upload app2.py
61534cc
raw
history blame
1.51 kB
import os
from speechbrain.pretrained.interfaces import foreign_class
import gradio as gr
import warnings
warnings.filterwarnings("ignore")
# Loading the speechbrain emotion detection model
learner = foreign_class(
source="speechbrain/emotion-recognition-wav2vec2-IEMOCAP",
pymodule_file="custom_interface.py",
classname="CustomEncoderWav2vec2Classifier"
)
# Building prediction function for gradio
emotion_dict = {
'sad': 'Sad',
'hap': 'Happy',
'ang': 'Anger',
'fea': 'Fear',
'sur': 'Surprised',
'neu': 'Neutral'
}
def predict_emotion(file_path):
# Since we get the file path from the dropdown, we don't need to access the `.name` property
out_prob, score, index, text_lab = learner.classify_file(file_path)
return emotion_dict[text_lab[0]]
# Folder containing audio files
folder = "prerecorded"
# Assuming that the 'prerecorded' folder is in the current working directory
# Change the working directory path if necessary
audio_files = [os.path.join(folder, file) for file in os.listdir(folder) if file.endswith('.wav')]
# Loading gradio interface with dropdown for audio selection
inputs = gr.inputs.Dropdown(audio_files, label="Select Audio File")
outputs = "text"
title = "Machine Learning Emotion Detection"
description = "Gradio demo for Emotion Detection. To use it, select an audio file from the dropdown and click 'Submit'. Read more at the links below."
gr.Interface(predict_emotion, inputs, outputs, title=title, description=description).launch()