Spaces:
Runtime error
Runtime error
import cv2 | |
import numpy as np | |
from keras.models import load_model | |
from utils.datasets import get_labels | |
from utils.inference import detect_faces, apply_offsets, load_detection_model, load_image | |
from utils.preprocessor import preprocess_input | |
def most_frequent(List): | |
return max(set(List), key=List.count) | |
def get_most_frequent_emotion(dict_): | |
emotions = [] | |
for frame_nmr in dict_.keys(): | |
for face_nmr in dict_[frame_nmr].keys(): | |
emotions.append(dict_[frame_nmr][face_nmr]['emotion']) | |
return most_frequent(emotions) | |
def process(imagePaths, output_filename='data/output/results.txt'): | |
detection_model_path = 'detection_models/haarcascade_frontalface_default.xml' | |
emotion_model_path = 'emotion_models/fer2013_mini_XCEPTION.102-0.66.hdf5' | |
emotion_labels = get_labels('fer2013') | |
emotion_offsets = (0, 0) | |
face_detection = load_detection_model(detection_model_path) | |
emotion_classifier = load_model(emotion_model_path, compile=False) | |
emotion_target_size = emotion_classifier.input_shape[1:3] | |
output = {} | |
for idx, image_path in enumerate(imagePaths): | |
gray_image = load_image(image_path, grayscale=True) | |
gray_image = np.squeeze(gray_image) | |
gray_image = gray_image.astype('uint8') | |
faces = detect_faces(face_detection, gray_image) | |
tmp = {} | |
for face_coordinates in faces: | |
face_key = tuple(face_coordinates) | |
x1, x2, y1, y2 = apply_offsets(face_coordinates, emotion_offsets) | |
gray_face = gray_image[y1:y2, x1:x2] | |
try: | |
gray_face = cv2.resize(gray_face, (emotion_target_size)) | |
except: | |
continue | |
gray_face = preprocess_input(gray_face, True) | |
gray_face = np.expand_dims(gray_face, 0) | |
gray_face = np.expand_dims(gray_face, -1) | |
emotion_prediction = emotion_classifier.predict(gray_face) | |
emotion_label_arg = np.argmax(emotion_prediction) | |
emotion_text = emotion_labels[emotion_label_arg] | |
tmp[face_key] = {'emotion': emotion_text, 'score': float(np.max(emotion_prediction))} | |
output[image_path] = tmp | |
# Save results to a text file | |
with open(output_filename, 'w') as file: | |
for image_path, faces_info in output.items(): | |
file.write(f"{image_path}\n") | |
for face_key, info in faces_info.items(): | |
file.write(f" {face_key}: {info}\n") | |
most_frequent_emotion = get_most_frequent_emotion(output) | |
return output, most_frequent_emotion | |
# This function can be used for processing a single image | |
def process_single_image(image): | |
detection_model_path = 'detection_models/haarcascade_frontalface_default.xml' | |
emotion_model_path = 'emotion_models/fer2013_mini_XCEPTION.102-0.66.hdf5' | |
emotion_labels = get_labels('fer2013') | |
emotion_offsets = (0, 0) | |
face_detection = load_detection_model(detection_model_path) | |
emotion_classifier = load_model(emotion_model_path, compile=False) | |
emotion_target_size = emotion_classifier.input_shape[1:3] | |
gray_image = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY) | |
faces = detect_faces(face_detection, gray_image) | |
for face_coordinates in faces: | |
x1, x2, y1, y2 = apply_offsets(face_coordinates, emotion_offsets) | |
gray_face = gray_image[y1:y2, x1:x2] | |
try: | |
gray_face = cv2.resize(gray_face, (emotion_target_size)) | |
except: | |
continue | |
gray_face = preprocess_input(gray_face, True) | |
gray_face = np.expand_dims(gray_face, 0) | |
gray_face = np.expand_dims(gray_face, -1) | |
emotion_prediction = emotion_classifier.predict(gray_face) | |
emotion_label_arg = np.argmax(emotion_prediction) | |
emotion_text = emotion_labels[emotion_label_arg] | |
# Draw rectangle around face and label with predicted emotion | |
(x, y, w, h) = face_coordinates | |
cv2.rectangle(image, (x, y), (x+w, y+h), (0, 255, 0), 2) | |
cv2.putText(image, emotion_text, (x, y-10), cv2.FONT_HERSHEY_SIMPLEX, 0.9, (0, 255, 0), 2) | |
return image | |
if __name__ == "__main__": | |
# This is just for testing purposes | |
test_image_paths = ['path_to_test_image1.jpg', 'path_to_test_image2.jpg'] | |
output, most_frequent_emotion = process(test_image_paths) | |
print(f"Most frequent emotion: {most_frequent_emotion}") | |
for key in output.keys(): | |
print(f"Image: {key}") | |
print(output[key]) | |