Spaces:
Runtime error
Runtime error
import gradio as gr | |
import tensorflow as tf | |
import cv2 | |
import numpy as np | |
# Load the saved model | |
model = tf.keras.models.load_model('model/cnn_9_layer_model.h5') | |
# Define the face cascade and emotions | |
face_cascade = cv2.CascadeClassifier(cv2.data.haarcascades + 'haarcascade_frontalface_default.xml') | |
emotions = ['Angry', 'Disgust', 'Fear', 'Happy', 'Sad', 'Surprise', 'Neutral'] | |
# Define the predict_emotion function | |
def predict_emotion(frame): | |
gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY) | |
faces = face_cascade.detectMultiScale(gray, 1.3, 5) | |
for (x, y, w, h) in faces: | |
face = gray[y:y+h, x:x+w] | |
face = cv2.resize(face, (48, 48)) | |
face = np.expand_dims(face, axis=-1) | |
face = np.expand_dims(face, axis=0) | |
prediction = model.predict(face) | |
emotion = emotions[np.argmax(prediction)] | |
cv2.putText(frame, emotion, (x, y-10), cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 255, 0), 2) | |
cv2.rectangle(frame, (x, y), (x+w, y+h), (255, 0, 0), 2) | |
return frame | |
# Start the video capture and emotion detection | |
# cap = cv2.VideoCapture(0) | |
# while True: | |
# ret, frame = cap.read() | |
# if ret: | |
# frame = predict_emotion(frame) | |
# cv2.imshow('Live Facial Emotion Detection', frame) | |
# if cv2.waitKey(1) == ord('q'): | |
# break | |
# cap.release() | |
# cv2.destroyAllWindows() | |
input_image = gr.Image(source = "webcam", streaming = True, label="Your Face") | |
# video = gr.inputs.Video(source = "webcam" ) | |
output_image = gr.outputs.Image( type="numpy", label="Detected Emotion" ) | |
iface = gr.Interface( | |
fn = predict_emotion, | |
inputs=input_image, | |
outputs=output_image, | |
# interpretation = "default", | |
title = "Mood Detectives", | |
description = "Real-Time Emotion Detection Using Facial Expressions:\nCan our model detect if you are angry, happy, sad, fear, disgust, surprise or neutral?", | |
live = True | |
) | |
iface.launch() |