pragnakalp
commited on
Commit
·
339957b
1
Parent(s):
184a6db
Upload app.py
Browse files
app.py
ADDED
@@ -0,0 +1,183 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from __future__ import absolute_import, division, print_function, unicode_literals
|
2 |
+
|
3 |
+
from flask import Flask, make_response, render_template, request, jsonify, redirect, url_for, send_from_directory
|
4 |
+
from flask_cors import CORS
|
5 |
+
import sys
|
6 |
+
import os
|
7 |
+
import librosa
|
8 |
+
import librosa.display
|
9 |
+
import numpy as np
|
10 |
+
from datetime import date
|
11 |
+
import re
|
12 |
+
import json
|
13 |
+
import email
|
14 |
+
import csv
|
15 |
+
import datetime
|
16 |
+
import smtplib
|
17 |
+
import ssl
|
18 |
+
from email.mime.text import MIMEText
|
19 |
+
import time
|
20 |
+
import pytz
|
21 |
+
import requests
|
22 |
+
import pyaudio
|
23 |
+
import wave
|
24 |
+
import shutil
|
25 |
+
import warnings
|
26 |
+
import tensorflow as tf
|
27 |
+
import gradio as gr
|
28 |
+
from keras.models import Sequential
|
29 |
+
from keras.layers import Dense
|
30 |
+
from keras.utils import to_categorical
|
31 |
+
from keras.layers import Flatten, Dropout, Activation
|
32 |
+
from keras.layers import Conv2D, MaxPooling2D
|
33 |
+
from keras.layers import BatchNormalization
|
34 |
+
from sklearn.model_selection import train_test_split
|
35 |
+
from tqdm import tqdm
|
36 |
+
|
37 |
+
warnings.filterwarnings("ignore")
|
38 |
+
|
39 |
+
timestamp = datetime.datetime.now()
|
40 |
+
current_date = timestamp.strftime('%d-%m-%Y')
|
41 |
+
current_time = timestamp.strftime('%I:%M:%S')
|
42 |
+
IP = ''
|
43 |
+
cwd = os.getcwd()
|
44 |
+
|
45 |
+
|
46 |
+
classLabels = ('Angry', 'Fear', 'Disgust', 'Happy', 'Sad', 'Surprised', 'Neutral')
|
47 |
+
numLabels = len(classLabels)
|
48 |
+
in_shape = (39,216)
|
49 |
+
model = Sequential()
|
50 |
+
|
51 |
+
model.add(Conv2D(8, (13, 13), input_shape=(in_shape[0], in_shape[1], 1)))
|
52 |
+
model.add(BatchNormalization(axis=-1))
|
53 |
+
model.add(Activation('relu'))
|
54 |
+
model.add(Conv2D(8, (13, 13)))
|
55 |
+
model.add(BatchNormalization(axis=-1))
|
56 |
+
model.add(Activation('relu'))
|
57 |
+
model.add(MaxPooling2D(pool_size=(2, 1)))
|
58 |
+
model.add(Conv2D(8, (3, 3)))
|
59 |
+
model.add(BatchNormalization(axis=-1))
|
60 |
+
model.add(Activation('relu'))
|
61 |
+
model.add(Conv2D(8, (1, 1)))
|
62 |
+
model.add(BatchNormalization(axis=-1))
|
63 |
+
model.add(Activation('relu'))
|
64 |
+
model.add(MaxPooling2D(pool_size=(2, 1)))
|
65 |
+
model.add(Flatten())
|
66 |
+
model.add(Dense(64))
|
67 |
+
model.add(BatchNormalization())
|
68 |
+
model.add(Activation('relu'))
|
69 |
+
model.add(Dropout(0.2))
|
70 |
+
|
71 |
+
model.add(Dense(numLabels, activation='softmax'))
|
72 |
+
model.compile(loss='binary_crossentropy', optimizer='adam',
|
73 |
+
metrics=['accuracy'])
|
74 |
+
# print(model.summary(), file=sys.stderr)
|
75 |
+
|
76 |
+
model.load_weights('speech_emotion_detection_ravdess_savee.h5')
|
77 |
+
|
78 |
+
# app = Flask(__name__)
|
79 |
+
|
80 |
+
# app._static_folder = os.path.join( "/home/ubuntu/Desktop/nlpdemos/server_demos/speech_emotion/static" )
|
81 |
+
|
82 |
+
|
83 |
+
def selected_audio(audio):
|
84 |
+
if audio and audio != 'Please select any of the following options':
|
85 |
+
post_file_name = audio.lower() + '.wav'
|
86 |
+
|
87 |
+
filepath = os.path.join("pre_recoreded",post_file_name)
|
88 |
+
if os.path.exists(filepath):
|
89 |
+
print("SELECT file name => ",filepath)
|
90 |
+
result = predict_speech_emotion(filepath)
|
91 |
+
print("result = ",result)
|
92 |
+
|
93 |
+
return result
|
94 |
+
|
95 |
+
def recorded_audio(audio):
|
96 |
+
try:
|
97 |
+
fileList = os.listdir('recorded_audio')
|
98 |
+
new_wav_file = ""
|
99 |
+
|
100 |
+
if(fileList):
|
101 |
+
filename_list = []
|
102 |
+
|
103 |
+
for i in fileList:
|
104 |
+
filename = i.split('.')[0]
|
105 |
+
filename_list.append(int(filename))
|
106 |
+
|
107 |
+
max_file = max(filename_list)
|
108 |
+
new_wav_file = int(max_file) + 1
|
109 |
+
else:
|
110 |
+
new_wav_file="1"
|
111 |
+
|
112 |
+
new_wav_file = str(new_wav_file) + ".wav"
|
113 |
+
|
114 |
+
# filepath = os.path.join('recorded_audio', new_wav_file)
|
115 |
+
# shutil.move(recorded_audio, filepath)
|
116 |
+
filepath = 'recorded_audio/22.wav'
|
117 |
+
result = predict_speech_emotion(audio.name)
|
118 |
+
return result
|
119 |
+
except Exception as e:
|
120 |
+
print(e)
|
121 |
+
return "ERROR"
|
122 |
+
|
123 |
+
|
124 |
+
def predict_speech_emotion(filepath):
|
125 |
+
if os.path.exists(filepath):
|
126 |
+
print("last file name => ",filepath)
|
127 |
+
X, sample_rate = librosa.load(filepath, res_type='kaiser_best',duration=2.5,sr=22050*2,offset=0.5)
|
128 |
+
sample_rate = np.array(sample_rate)
|
129 |
+
mfccs = librosa.feature.mfcc(y=X, sr=sample_rate, n_mfcc=39)
|
130 |
+
feature = mfccs
|
131 |
+
feature = feature.reshape(39, 216, 1)
|
132 |
+
# np_array = np.array([feature])
|
133 |
+
np_array = np.array([feature])
|
134 |
+
prediction = model.predict(np_array)
|
135 |
+
np_argmax = np.argmax(prediction)
|
136 |
+
result = classLabels[np_argmax]
|
137 |
+
return result
|
138 |
+
|
139 |
+
|
140 |
+
# demo = gr.Interface(
|
141 |
+
# fn=send_audio,
|
142 |
+
# inputs=gr.Audio(source="microphone", type="filepath"),
|
143 |
+
# outputs="text")
|
144 |
+
|
145 |
+
# demo.launch()
|
146 |
+
|
147 |
+
# selected_audio = gr.Dropdown(["Angry", "Happy", "Sad", "Disgust","Fear", "Surprise", "Neutral"],
|
148 |
+
# lable = "Input Audio")
|
149 |
+
# audio_ui=gr.Audio()
|
150 |
+
# text = gr.Textbox()
|
151 |
+
# demo = gr.Interface(
|
152 |
+
# fn=send_audio,
|
153 |
+
# inputs=selected_audio,
|
154 |
+
# outputs=[audio_ui,text])
|
155 |
+
|
156 |
+
# demo.launch()
|
157 |
+
|
158 |
+
def return_audio_clip(audio_text):
|
159 |
+
post_file_name = audio_text.lower() + '.wav'
|
160 |
+
filepath = os.path.join("pre_recoreded",post_file_name)
|
161 |
+
return filepath
|
162 |
+
|
163 |
+
with gr.Blocks() as demo:
|
164 |
+
gr.Markdown("Select audio or record audio")
|
165 |
+
with gr.Row():
|
166 |
+
with gr.Column():
|
167 |
+
input_audio_text = gr.Dropdown(["Please select any of the following options","Angry", "Happy", "Sad", "Disgust","Fear", "Surprise", "Neutral"],
|
168 |
+
lable = "Input Audio",interactive=True)
|
169 |
+
audio_ui=gr.Audio()
|
170 |
+
input_audio_text.change(return_audio_clip,input_audio_text,audio_ui)
|
171 |
+
output_text = gr.Textbox(lable="Prdicted emotion")
|
172 |
+
sub_btn = gr.Button("Submit")
|
173 |
+
|
174 |
+
with gr.Column():
|
175 |
+
audio=gr.Audio(source="microphone", type="file",labele="Recored audio")
|
176 |
+
recorded_text = gr.Textbox(lable="Prdicted emotion")
|
177 |
+
with gr.Column():
|
178 |
+
sub_btn2 = gr.Button("Submit")
|
179 |
+
|
180 |
+
sub_btn.click(selected_audio, inputs=input_audio_text, outputs=output_text)
|
181 |
+
sub_btn2.click(recorded_audio, inputs=audio, outputs=recorded_text)
|
182 |
+
|
183 |
+
demo.launch()
|