Garvitj commited on
Commit
dd436a1
·
verified ·
1 Parent(s): 6473dcc

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +276 -7
app.py CHANGED
@@ -1,13 +1,282 @@
1
  import gradio as gr
2
  from huggingface_hub import InferenceClient
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3
 
4
- # Using Zephyr-7B Beta
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
5
  MODEL_NAME = "HuggingFaceH4/zephyr-7b-beta"
6
  client = InferenceClient(MODEL_NAME)
7
 
 
 
 
 
 
 
8
 
9
- def respond(message, history, system_message, max_tokens, temperature, top_p):
10
- messages = [{"role": "system", "content": system_message}]
 
 
 
 
 
 
 
 
 
11
 
12
  for val in history:
13
  if val[0]:
@@ -15,7 +284,7 @@ def respond(message, history, system_message, max_tokens, temperature, top_p):
15
  if val[1]:
16
  messages.append({"role": "assistant", "content": val[1]})
17
 
18
- messages.append({"role": "user", "content": message})
19
 
20
  response = ""
21
 
@@ -33,12 +302,12 @@ def respond(message, history, system_message, max_tokens, temperature, top_p):
33
  except Exception as e:
34
  yield f"Error: {str(e)}"
35
 
36
-
37
- # Gradio UI with adjustable settings
38
  demo = gr.ChatInterface(
39
  respond,
40
  additional_inputs=[
41
- gr.Textbox(value="You are a friendly chatbot.", label="System message"),
 
42
  gr.Slider(minimum=1, maximum=2048, value=512, step=1, label="Max Tokens"),
43
  gr.Slider(minimum=0.1, maximum=2.0, value=0.7, step=0.1, label="Temperature"),
44
  gr.Slider(minimum=0.1, maximum=1.0, value=0.95, step=0.05, label="Top-p"),
 
1
  import gradio as gr
2
  from huggingface_hub import InferenceClient
3
+ import gradio as gr
4
+ import numpy as np
5
+ import cv2
6
+ import librosa
7
+ import moviepy.editor as mp
8
+ import speech_recognition as sr
9
+ import tempfile
10
+ import wave
11
+ import os
12
+ import tensorflow as tf
13
+ from tensorflow.keras.preprocessing.text import tokenizer_from_json
14
+ from tensorflow.keras.models import load_model, model_from_json
15
+ from sklearn.preprocessing import StandardScaler
16
+ from tensorflow.keras.preprocessing.sequence import pad_sequences
17
+ import nltk
18
+ nltk.download('stopwords')
19
+ nltk.download('punkt')
20
+ nltk.download('punkt_tab')
21
+ nltk.download('wordnet')
22
+ from nltk.corpus import stopwords
23
+ from nltk.stem import WordNetLemmatizer
24
+ import pickle
25
+ import json
26
+ from tensorflow.keras.preprocessing.image import img_to_array, load_img
27
+ from collections import Counter
28
+ # Load the text model
29
+ with open('model_architecture_for_text_emotion_updated_json.json', 'r') as json_file:
30
+ model_json = json_file.read()
31
+ text_model = model_from_json(model_json)
32
+ text_model.load_weights("model_for_text_emotion_updated(1).keras")
33
+
34
+ # Load the encoder and scaler for audio
35
+ with open('encoder.pkl', 'rb') as file:
36
+ encoder = pickle.load(file)
37
+ with open('scaler.pkl', 'rb') as file:
38
+ scaler = pickle.load(file)
39
+
40
+ # Load the tokenizer for text
41
+ with open('tokenizer.json') as json_file:
42
+ tokenizer_json = json.load(json_file)
43
+ tokenizer = tokenizer_from_json(tokenizer_json)
44
+
45
+ # Load the audio model
46
+ audio_model = load_model('my_model.h5')
47
+
48
+ # Load the image model
49
+ image_model = load_model('model_emotion.h5')
50
+
51
+ # Initialize NLTK
52
+ lemmatizer = WordNetLemmatizer()
53
+ stop_words = set(stopwords.words('english'))
54
+
55
+ # Preprocess text function
56
+ def preprocess_text(text):
57
+ tokens = nltk.word_tokenize(text.lower())
58
+ tokens = [word for word in tokens if word.isalnum() and word not in stop_words]
59
+ lemmatized_tokens = [lemmatizer.lemmatize(word) for word in tokens]
60
+ return ' '.join(lemmatized_tokens)
61
+
62
+ # Extract features from audio
63
+ # Extract features from audio
64
+ def extract_features(data, sample_rate):
65
+ result = []
66
+
67
+ try:
68
+ zcr = np.mean(librosa.feature.zero_crossing_rate(y=data).T, axis=0)
69
+ result.append(zcr)
70
+
71
+ stft = np.abs(librosa.stft(data))
72
+ chroma_stft = np.mean(librosa.feature.chroma_stft(S=stft, sr=sample_rate).T, axis=0)
73
+ result.append(chroma_stft)
74
+
75
+ mfcc = np.mean(librosa.feature.mfcc(y=data, sr=sample_rate).T, axis=0)
76
+ result.append(mfcc)
77
+
78
+ rms = np.mean(librosa.feature.rms(y=data).T, axis=0)
79
+ result.append(rms)
80
+
81
+ mel = np.mean(librosa.feature.melspectrogram(y=data, sr=sample_rate).T, axis=0)
82
+ result.append(mel)
83
+
84
+ # Ensure all features are numpy arrays
85
+ result = [np.atleast_1d(feature) for feature in result]
86
+
87
+ # Stack features horizontally
88
+ return np.hstack(result)
89
+
90
+ except Exception as e:
91
+ print(f"Error extracting features: {e}")
92
+ return np.zeros(1) # Return a default feature array if extraction fails
93
 
94
+ # Predict emotion from text
95
+ def find_emotion_using_text(sample_rate, audio_data, recognizer):
96
+ mapping = {0: "anger", 1: "disgust", 2: "fear", 3: "joy", 4: "neutral", 5: "sadness", 6: "surprise"}
97
+ with tempfile.NamedTemporaryFile(delete=False, suffix=".wav") as temp_audio_file:
98
+ temp_audio_path = temp_audio_file.name
99
+
100
+ with wave.open(temp_audio_path, 'w') as wf:
101
+ wf.setnchannels(1)
102
+ wf.setsampwidth(2)
103
+ wf.setframerate(sample_rate)
104
+ wf.writeframes(audio_data.tobytes())
105
+
106
+ with sr.AudioFile(temp_audio_path) as source:
107
+ audio_record = recognizer.record(source)
108
+ text = recognizer.recognize_google(audio_record)
109
+ pre_text = preprocess_text(text)
110
+ title_seq = tokenizer.texts_to_sequences([pre_text])
111
+ padded_title_seq = pad_sequences(title_seq, maxlen=35, padding='post', truncating='post')
112
+ inp1 = np.array(padded_title_seq)
113
+ text_prediction = text_model.predict(inp1)
114
+
115
+ os.remove(temp_audio_path)
116
+ max_index = text_prediction.argmax()
117
+ return mapping[max_index]
118
+
119
+ # Predict emotion from audio
120
+ def predict_emotion(audio_data):
121
+ sample_rate, data = audio_data
122
+ data = data.flatten()
123
+
124
+ if data.dtype != np.float32:
125
+ data = data.astype(np.float32)
126
+ data = data / np.max(np.abs(data))
127
+
128
+ features = extract_features(data, sample_rate)
129
+ features = np.expand_dims(features, axis=0)
130
+
131
+ if features.ndim == 3:
132
+ features = np.squeeze(features, axis=2)
133
+ elif features.ndim != 2:
134
+ raise ValueError("Features array has unexpected dimensions.")
135
+
136
+ scaled_features = scaler.transform(features)
137
+ scaled_features = np.expand_dims(scaled_features, axis=2)
138
+
139
+ prediction = audio_model.predict(scaled_features)
140
+ emotion_index = np.argmax(prediction)
141
+
142
+ num_classes = len(encoder.categories_[0])
143
+ emotion_array = np.zeros((1, num_classes))
144
+ emotion_array[0, emotion_index] = 1
145
+
146
+ emotion_label = encoder.inverse_transform(emotion_array)[0]
147
+ return emotion_label
148
+
149
+ # Preprocess image
150
+ def preprocess_image(image):
151
+ image = load_img(image, target_size=(48, 48), color_mode="grayscale")
152
+ image = img_to_array(image)
153
+ image = np.expand_dims(image, axis=0)
154
+ image = image / 255.0
155
+ return image
156
+
157
+ # Predict emotion from image
158
+ def predict_emotion_from_image(image):
159
+ preprocessed_image = preprocess_image(image)
160
+ prediction = image_model.predict(preprocessed_image)
161
+ emotion_index = np.argmax(prediction)
162
+
163
+ mapping = {0: "anger", 1: "disgust", 2: "fear", 3: "joy", 4: "neutral", 5: "sadness", 6: "surprise"}
164
+ return mapping[emotion_index]
165
+
166
+ # Main function to handle text, audio, and image emotion recognition
167
+ # Load the models and other necessary files (as before)
168
+
169
+ # Preprocess image (as before)
170
+
171
+ # Predict emotion from image (as before)
172
+
173
+ # Extract features from audio (as before)
174
+
175
+ # Predict emotion from text (as before)
176
+
177
+ # Predict emotion from audio (as before)
178
+
179
+ def process_video(video_path):
180
+ cap = cv2.VideoCapture(video_path)
181
+ frame_rate = cap.get(cv2.CAP_PROP_FPS)
182
+
183
+ frame_count = 0
184
+ predictions = []
185
+
186
+ while cap.isOpened():
187
+ ret, frame = cap.read()
188
+ if not ret:
189
+ break
190
+ # Process every nth frame (to speed up processing)
191
+ if frame_count % int(frame_rate) == 0:
192
+ # Convert frame to grayscale as required by your model
193
+ frame = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
194
+ frame = cv2.resize(frame, (48, 48)) # Resize to match model input size
195
+ frame = img_to_array(frame)
196
+ frame = np.expand_dims(frame, axis=0) / 255.0
197
+
198
+ # Predict emotion
199
+ prediction = image_model.predict(frame)
200
+ predictions.append(np.argmax(prediction))
201
+
202
+ frame_count += 1
203
+
204
+ cap.release()
205
+ # cv2.destroyAllWindows()
206
+
207
+ # Find the most common prediction
208
+ most_common_emotion = Counter(predictions).most_common(1)[0][0]
209
+ mapping = {0: "anger", 1: "disgust", 2: "fear", 3: "joy", 4: "neutral", 5: "sadness", 6: "surprise"}
210
+ return mapping[most_common_emotion]
211
+
212
+ # Process audio from video and predict emotions
213
+ def process_audio_from_video(video_path):
214
+ video = mp.VideoFileClip(video_path)
215
+ audio = video.audio
216
+ with tempfile.NamedTemporaryFile(delete=False, suffix=".wav") as temp_audio_file:
217
+ temp_audio_path = temp_audio_file.name
218
+ audio.write_audiofile(temp_audio_path)
219
+
220
+ recognizer = sr.Recognizer()
221
+ with sr.AudioFile(temp_audio_path) as source:
222
+ audio_record = recognizer.record(source)
223
+ text = recognizer.recognize_google(audio_record)
224
+ pre_text = preprocess_text(text)
225
+ title_seq = tokenizer.texts_to_sequences([pre_text])
226
+ padded_title_seq = pad_sequences(title_seq, maxlen=35, padding='post', truncating='post')
227
+ inp1 = np.array(padded_title_seq)
228
+ text_prediction = text_model.predict(inp1)
229
+
230
+ os.remove(temp_audio_path)
231
+
232
+ max_index = text_prediction.argmax()
233
+ text_emotion = {0: "anger", 1: "disgust", 2: "fear", 3: "joy", 4: "neutral", 5: "sadness", 6: "surprise"}[max_index]
234
+
235
+ audio_emotion = predict_emotion((audio.fps, np.array(audio.to_soundarray())))
236
+
237
+ return text_emotion, audio_emotion, text
238
+
239
+ # Main function to handle video emotion recognition
240
+ def transcribe_and_predict_video(video):
241
+ """
242
+ Process video for emotion detection (image, audio, text) and transcription.
243
+ (Replace process_video & process_audio_from_video with actual implementations)
244
+ """
245
+ image_emotion = process_video(video) # Emotion from video frames
246
+ print("Image processing done.")
247
+
248
+ text_emotion, audio_emotion, extracted_text = process_audio_from_video(video) # Speech-to-text + emotions
249
+ print("Audio processing done.")
250
+
251
+ return {
252
+ "text_emotion": text_emotion,
253
+ "audio_emotion": audio_emotion,
254
+ "image_emotion": image_emotion,
255
+ "extracted_text": extracted_text,
256
+ }
257
+
258
+ # Load Zephyr-7B Model
259
  MODEL_NAME = "HuggingFaceH4/zephyr-7b-beta"
260
  client = InferenceClient(MODEL_NAME)
261
 
262
+ # Chatbot response function
263
+ def respond(video, history, system_message, max_tokens, temperature, top_p):
264
+ video_path = video.name # Get the uploaded video file path
265
+
266
+ # Process the video for emotions & text
267
+ result = transcribe_and_predict_video(video_path)
268
 
269
+ # Construct a system prompt with extracted emotions & text
270
+ system_prompt = (
271
+ f"{system_message}\n\n"
272
+ f"Detected Emotions:\n"
273
+ f"- Text Emotion: {result['text_emotion']}\n"
274
+ f"- Audio Emotion: {result['audio_emotion']}\n"
275
+ f"- Image Emotion: {result['image_emotion']}\n\n"
276
+ f"Extracted Speech: {result['extracted_text']}"
277
+ )
278
+
279
+ messages = [{"role": "system", "content": system_prompt}]
280
 
281
  for val in history:
282
  if val[0]:
 
284
  if val[1]:
285
  messages.append({"role": "assistant", "content": val[1]})
286
 
287
+ messages.append({"role": "user", "content": result['extracted_text']})
288
 
289
  response = ""
290
 
 
302
  except Exception as e:
303
  yield f"Error: {str(e)}"
304
 
305
+ # Gradio UI for video chatbot
 
306
  demo = gr.ChatInterface(
307
  respond,
308
  additional_inputs=[
309
+ gr.Video(label="Upload a Video"), # Video input
310
+ gr.Textbox(value="You are a chatbot that analyzes emotions and responds accordingly.", label="System message"),
311
  gr.Slider(minimum=1, maximum=2048, value=512, step=1, label="Max Tokens"),
312
  gr.Slider(minimum=0.1, maximum=2.0, value=0.7, step=0.1, label="Temperature"),
313
  gr.Slider(minimum=0.1, maximum=1.0, value=0.95, step=0.05, label="Top-p"),