Spaces:
Sleeping
Sleeping
Upload 3 files
Browse files- Trained_Model.h5 +3 -0
- app.py +57 -0
- requirements.txt +6 -0
Trained_Model.h5
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:f7550046da07e357f41c5fc35788c86a057d80db494bb44b4ad86cf1cdd1db0d
|
3 |
+
size 63471736
|
app.py
ADDED
@@ -0,0 +1,57 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import gradio as gr
|
2 |
+
import cv2
|
3 |
+
import tensorflow as tf
|
4 |
+
import numpy as np
|
5 |
+
from tensorflow.keras.preprocessing.image import ImageDataGenerator
|
6 |
+
from tensorflow.keras.models import load_model
|
7 |
+
|
8 |
+
# Load the pre-trained model
|
9 |
+
model = tf.keras.models.load_model('Trained_Model.h5')
|
10 |
+
|
11 |
+
# Define the emotion labels
|
12 |
+
emotion_labels = {
|
13 |
+
0: 'Angry',
|
14 |
+
1: 'Disgust',
|
15 |
+
2: 'Fear',
|
16 |
+
3: 'Happy',
|
17 |
+
4: 'Neutral',
|
18 |
+
5: 'Sad',
|
19 |
+
6: 'Surprise'
|
20 |
+
}
|
21 |
+
|
22 |
+
# Create the image generator for preprocessing
|
23 |
+
img_gen = ImageDataGenerator(rescale=1./255)
|
24 |
+
|
25 |
+
# Define the function to predict emotions
|
26 |
+
def predict_emotion(file):
|
27 |
+
# Load the image or video
|
28 |
+
cap = cv2.VideoCapture(file.name)
|
29 |
+
if cap.isOpened():
|
30 |
+
ret, frame = cap.read()
|
31 |
+
# Check if it's an image or video
|
32 |
+
if frame is not None:
|
33 |
+
# Preprocess the image
|
34 |
+
img = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
|
35 |
+
img = cv2.resize(img, (48, 48))
|
36 |
+
img = np.expand_dims(img, axis=-1)
|
37 |
+
img = np.expand_dims(img, axis=0)
|
38 |
+
img = img.astype('float32')
|
39 |
+
img = img_gen.standardize(img)
|
40 |
+
# Predict the emotion
|
41 |
+
prediction = model.predict(img)
|
42 |
+
label = emotion_labels[np.argmax(prediction)]
|
43 |
+
else:
|
44 |
+
label = "No frames found in the video"
|
45 |
+
else:
|
46 |
+
label = "Could not open the file"
|
47 |
+
return label
|
48 |
+
|
49 |
+
# Create the Gradio interface
|
50 |
+
input_type = gr.inputs.File(label="Upload an image or video to predict emotions")
|
51 |
+
output_type = gr.outputs.Textbox(label="Predicted emotion")
|
52 |
+
title = "Emotion Detection"
|
53 |
+
description = "Upload an image or video to predict the corresponding emotion"
|
54 |
+
iface = gr.Interface(fn=predict_emotion, inputs=input_type, outputs=output_type, title=title, description=description)
|
55 |
+
if __name__ == '__main__':
|
56 |
+
iface.launch(inline=False)
|
57 |
+
|
requirements.txt
ADDED
@@ -0,0 +1,6 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
tensorflow
|
2 |
+
keras
|
3 |
+
opencv-python
|
4 |
+
numpy
|
5 |
+
matplotlib
|
6 |
+
gradio
|