AhmedIbrahim007 commited on
Commit
8f412ba
1 Parent(s): b4cf5d6

Upload 36 files

Browse files
.gitattributes CHANGED
@@ -33,3 +33,5 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
33
  *.zip filter=lfs diff=lfs merge=lfs -text
34
  *.zst filter=lfs diff=lfs merge=lfs -text
35
  *tfevents* filter=lfs diff=lfs merge=lfs -text
 
 
 
33
  *.zip filter=lfs diff=lfs merge=lfs -text
34
  *.zst filter=lfs diff=lfs merge=lfs -text
35
  *tfevents* filter=lfs diff=lfs merge=lfs -text
36
+ test-video.mp4 filter=lfs diff=lfs merge=lfs -text
37
+ test.png filter=lfs diff=lfs merge=lfs -text
.idea/.gitignore ADDED
@@ -0,0 +1,8 @@
 
 
 
 
 
 
 
 
 
1
+ # Default ignored files
2
+ /shelf/
3
+ /workspace.xml
4
+ # Editor-based HTTP Client requests
5
+ /httpRequests/
6
+ # Datasource local storage ignored files
7
+ /dataSources/
8
+ /dataSources.local.xml
.idea/inspectionProfiles/Project_Default.xml ADDED
@@ -0,0 +1,22 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ <component name="InspectionProjectProfileManager">
2
+ <profile version="1.0">
3
+ <option name="myName" value="Project Default" />
4
+ <inspection_tool class="PyPackageRequirementsInspection" enabled="true" level="WARNING" enabled_by_default="true">
5
+ <option name="ignoredPackages">
6
+ <value>
7
+ <list size="9">
8
+ <item index="0" class="java.lang.String" itemvalue="tqdm" />
9
+ <item index="1" class="java.lang.String" itemvalue="pandas" />
10
+ <item index="2" class="java.lang.String" itemvalue="tensorboard" />
11
+ <item index="3" class="java.lang.String" itemvalue="seaborn" />
12
+ <item index="4" class="java.lang.String" itemvalue="tensorboardX" />
13
+ <item index="5" class="java.lang.String" itemvalue="sklearn" />
14
+ <item index="6" class="java.lang.String" itemvalue="torch" />
15
+ <item index="7" class="java.lang.String" itemvalue="numpy" />
16
+ <item index="8" class="java.lang.String" itemvalue="torchvision" />
17
+ </list>
18
+ </value>
19
+ </option>
20
+ </inspection_tool>
21
+ </profile>
22
+ </component>
.idea/inspectionProfiles/profiles_settings.xml ADDED
@@ -0,0 +1,6 @@
 
 
 
 
 
 
 
1
+ <component name="InspectionProjectProfileManager">
2
+ <settings>
3
+ <option name="USE_PROJECT_PROFILE" value="false" />
4
+ <version value="1.0" />
5
+ </settings>
6
+ </component>
.idea/misc.xml ADDED
@@ -0,0 +1,7 @@
 
 
 
 
 
 
 
 
1
+ <?xml version="1.0" encoding="UTF-8"?>
2
+ <project version="4">
3
+ <component name="Black">
4
+ <option name="sdkName" value="Python 3.10 (src)" />
5
+ </component>
6
+ <component name="ProjectRootManager" version="2" project-jdk-name="Python 3.10 (src)" project-jdk-type="Python SDK" />
7
+ </project>
.idea/modules.xml ADDED
@@ -0,0 +1,8 @@
 
 
 
 
 
 
 
 
 
1
+ <?xml version="1.0" encoding="UTF-8"?>
2
+ <project version="4">
3
+ <component name="ProjectModuleManager">
4
+ <modules>
5
+ <module fileurl="file://$PROJECT_DIR$/.idea/src.iml" filepath="$PROJECT_DIR$/.idea/src.iml" />
6
+ </modules>
7
+ </component>
8
+ </project>
.idea/src.iml ADDED
@@ -0,0 +1,14 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ <?xml version="1.0" encoding="UTF-8"?>
2
+ <module type="PYTHON_MODULE" version="4">
3
+ <component name="NewModuleRootManager">
4
+ <content url="file://$MODULE_DIR$">
5
+ <excludeFolder url="file://$MODULE_DIR$/venv" />
6
+ </content>
7
+ <orderEntry type="inheritedJdk" />
8
+ <orderEntry type="sourceFolder" forTests="false" />
9
+ </component>
10
+ <component name="PyDocumentationSettings">
11
+ <option name="format" value="PLAIN" />
12
+ <option name="myDocStringFormat" value="Plain" />
13
+ </component>
14
+ </module>
.idea/workspace.xml ADDED
@@ -0,0 +1,59 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ <?xml version="1.0" encoding="UTF-8"?>
2
+ <project version="4">
3
+ <component name="AutoImportSettings">
4
+ <option name="autoReloadType" value="SELECTIVE" />
5
+ </component>
6
+ <component name="ChangeListManager">
7
+ <list default="true" id="5d6e565a-1460-4a7f-8f74-e2b5df1c6b88" name="Changes" comment="" />
8
+ <option name="SHOW_DIALOG" value="false" />
9
+ <option name="HIGHLIGHT_CONFLICTS" value="true" />
10
+ <option name="HIGHLIGHT_NON_ACTIVE_CHANGELIST" value="false" />
11
+ <option name="LAST_RESOLUTION" value="IGNORE" />
12
+ </component>
13
+ <component name="ProjectColorInfo">{
14
+ &quot;associatedIndex&quot;: 1
15
+ }</component>
16
+ <component name="ProjectId" id="2lZgqQ9N8xKvJ5dtQsXYgjs4WBz" />
17
+ <component name="ProjectViewState">
18
+ <option name="hideEmptyMiddlePackages" value="true" />
19
+ <option name="showLibraryContents" value="true" />
20
+ </component>
21
+ <component name="PropertiesComponent"><![CDATA[{
22
+ "keyToString": {
23
+ "RunOnceActivity.ShowReadmeOnStart": "true",
24
+ "dart.analysis.tool.window.visible": "false",
25
+ "last_opened_file_path": "D:/collage/gp/ml/future-forge/emotion-detection/face_classification/src",
26
+ "node.js.detected.package.eslint": "true",
27
+ "node.js.detected.package.tslint": "true",
28
+ "node.js.selected.package.eslint": "(autodetect)",
29
+ "node.js.selected.package.tslint": "(autodetect)",
30
+ "nodejs_package_manager_path": "npm",
31
+ "settings.editor.selected.configurable": "com.jetbrains.python.configuration.PyActiveSdkModuleConfigurable",
32
+ "vue.rearranger.settings.migration": "true"
33
+ }
34
+ }]]></component>
35
+ <component name="SharedIndexes">
36
+ <attachedChunks>
37
+ <set>
38
+ <option value="bundled-js-predefined-d6986cc7102b-7c0b70fcd90d-JavaScript-PY-242.21829.153" />
39
+ <option value="bundled-python-sdk-464836ebc622-b74155a9e76b-com.jetbrains.pycharm.pro.sharedIndexes.bundled-PY-242.21829.153" />
40
+ </set>
41
+ </attachedChunks>
42
+ </component>
43
+ <component name="SpellCheckerSettings" RuntimeDictionaries="0" Folders="0" CustomDictionaries="0" DefaultDictionary="application-level" UseSingleDictionary="true" transferred="true" />
44
+ <component name="TaskManager">
45
+ <task active="true" id="Default" summary="Default task">
46
+ <changelist id="5d6e565a-1460-4a7f-8f74-e2b5df1c6b88" name="Changes" comment="" />
47
+ <created>1725393203470</created>
48
+ <option name="number" value="Default" />
49
+ <option name="presentableId" value="Default" />
50
+ <updated>1725393203470</updated>
51
+ <workItem from="1725393204716" duration="916000" />
52
+ <workItem from="1725394369496" duration="321000" />
53
+ </task>
54
+ <servers />
55
+ </component>
56
+ <component name="TypeScriptGeneratedFilesManager">
57
+ <option name="version" value="3" />
58
+ </component>
59
+ </project>
REQUIREMENTS.txt ADDED
@@ -0,0 +1,12 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ keras==2.10.0
2
+ tensorflow
3
+ pandas==1.4.4
4
+ numpy==1.23.3
5
+ h5py==3.7.0
6
+ statistics==1.0.3.5
7
+ opencv-python==4.6.0.66
8
+ scipy==1.9.1
9
+ matplotlib==3.5.3
10
+ imageio==2.21.3
11
+ scikit-image==0.19.3
12
+ gradio==3.50.2
__pycache__/extract_frames.cpython-310.pyc ADDED
Binary file (897 Bytes). View file
 
__pycache__/extract_frames.cpython-311.pyc ADDED
Binary file (1.57 kB). View file
 
__pycache__/extract_frames.cpython-312.pyc ADDED
Binary file (1.37 kB). View file
 
__pycache__/get_every_fram_path.cpython-310.pyc ADDED
Binary file (497 Bytes). View file
 
__pycache__/get_every_fram_path.cpython-311.pyc ADDED
Binary file (799 Bytes). View file
 
__pycache__/get_every_fram_path.cpython-312.pyc ADDED
Binary file (729 Bytes). View file
 
__pycache__/grapher.cpython-310.pyc ADDED
Binary file (3.1 kB). View file
 
__pycache__/grapher.cpython-311.pyc ADDED
Binary file (6.59 kB). View file
 
__pycache__/main_emotion_classifier.cpython-310.pyc ADDED
Binary file (3.32 kB). View file
 
__pycache__/main_emotion_classifier.cpython-311.pyc ADDED
Binary file (6.5 kB). View file
 
app.py ADDED
@@ -0,0 +1,61 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import gradio as gr
2
+ import cv2
3
+ from extract_frames import ExtractFrames
4
+ from get_every_fram_path import getEveryFramPath
5
+ from main_emotion_classifier import process, process_single_image
6
+ from grapher import createGraph
7
+
8
+ def process_image(image):
9
+ # Process the image using your existing function
10
+ processed_image = process_single_image(image)
11
+ return processed_image
12
+
13
+ def process_video(video_path):
14
+ # Extract frames from the video and process them
15
+ output_dir = ExtractFrames(video_path)
16
+ frame_paths = getEveryFramPath(output_dir)
17
+ results, most_frequent_emotion = process(frame_paths)
18
+
19
+ # Create the emotion graphs from the results
20
+ createGraph('data/output/results.txt')
21
+
22
+ # Return paths to the three generated graphs
23
+ return [
24
+ 'data/output/emotion_bar_plot.png',
25
+ 'data/output/emotion_stem_plot.png',
26
+ 'data/output/emotionAVG.png'
27
+ ]
28
+
29
+
30
+ def gradio_interface(file):
31
+ if file is None:
32
+ return None, None
33
+
34
+ file_type = file.name.split('.')[-1].lower()
35
+
36
+ if file_type in ['jpg', 'jpeg', 'png', 'bmp']: # Image input
37
+ image = cv2.imread(file.name)
38
+ image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
39
+ processed_image = process_image(image)
40
+ return processed_image, None
41
+ elif file_type in ['mp4', 'avi', 'mov', 'wmv']: # Video input
42
+ graph_paths = process_video(file.name)
43
+ return None, graph_paths
44
+ else:
45
+ return None, None
46
+
47
+ # Set up the Gradio Interface
48
+ iface = gr.Interface(
49
+ fn=gradio_interface,
50
+ inputs=gr.File(label="Upload Image or Video"),
51
+ outputs=[
52
+ gr.Image(type="numpy", label="Processed Image (for image uploads)"),
53
+ gr.Gallery(label="Emotion Distribution Graphs (for video uploads)", columns=3)
54
+ ],
55
+ title="Face Emotion Recognition",
56
+ description="Upload an image or video to analyze emotions. For images, the result will show detected faces with emotions. For videos, it will provide graphs of emotion distribution."
57
+ )
58
+
59
+ # Launch the Gradio interface
60
+ if __name__ == "__main__":
61
+ iface.launch()
detection_models/haarcascade_frontalface_default.xml ADDED
The diff for this file is too large to render. See raw diff
 
emotion_models/fer2013_mini_XCEPTION.102-0.66.hdf5 ADDED
Binary file (873 kB). View file
 
extract_frames.py ADDED
@@ -0,0 +1,49 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import cv2
2
+ import os
3
+
4
+ def ExtractFrames(videoPath):
5
+ # Path to the video file
6
+ video_path = videoPath
7
+
8
+ # Directory to save the frames
9
+ output_dir = 'data/output/extracted-frames'
10
+ os.makedirs(output_dir, exist_ok=True)
11
+
12
+ # Open the video file
13
+ video_capture = cv2.VideoCapture(video_path)
14
+
15
+ # Check if the video was opened successfully
16
+ if not video_capture.isOpened():
17
+ print(f"Error: Could not open video file {video_path}")
18
+ else:
19
+ print(f"Successfully opened video file {video_path}")
20
+
21
+ # Frame index
22
+ frame_index = 0
23
+
24
+ while True:
25
+ # Read the next frame from the video
26
+ success, frame = video_capture.read()
27
+
28
+ # If there are no more frames, break the loop
29
+ if not success:
30
+ print("No more frames to read or an error occurred.")
31
+ break
32
+
33
+ # Construct the filename for the frame
34
+ frame_filename = os.path.join(output_dir, f'frame_{frame_index:04d}.png')
35
+
36
+ # Save the current frame as an image file
37
+ cv2.imwrite(frame_filename, frame)
38
+
39
+ # Print the saved frame information
40
+ print(f'Saved {frame_filename}')
41
+
42
+ # Increment the frame index
43
+ frame_index += 1
44
+
45
+ # Release the video capture object
46
+ video_capture.release()
47
+ return output_dir
48
+
49
+ print('All frames extracted.')
get_every_fram_path.py ADDED
@@ -0,0 +1,15 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+
3
+ def getEveryFramPath(directoryPath):
4
+ # Define the directory path
5
+ directory_path = directoryPath
6
+
7
+ # Get the list of all file paths
8
+ file_paths = []
9
+ for root, directories, files in os.walk(directory_path):
10
+ for filename in files:
11
+ file_paths.append(os.path.join(root, filename))
12
+
13
+ # Print the list of file paths
14
+ print(file_paths)
15
+ return file_paths
grapher.py ADDED
@@ -0,0 +1,107 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import matplotlib.pyplot as plt
2
+ import re
3
+ import numpy as np
4
+ from collections import defaultdict
5
+
6
+
7
+ def createGraph(resultPath):
8
+ # Initialize lists to store emotions sequentially
9
+ emotions_list = []
10
+ emotion_counts = defaultdict(int)
11
+ emotion_scores = defaultdict(float)
12
+
13
+ # Define a regular expression pattern to match the emotion and score
14
+ pattern = re.compile(r"'emotion': '(\w+)', 'score': ([\d\.]+)")
15
+
16
+ # Read and parse the file
17
+ with open(resultPath, 'r') as file:
18
+ for line in file:
19
+ match = pattern.search(line)
20
+ if match:
21
+ emotion = match.group(1)
22
+ score = float(match.group(2))
23
+ emotions_list.append(emotion)
24
+ emotion_counts[emotion] += 1
25
+ emotion_scores[emotion] += score
26
+ else:
27
+ print(f"Skipping malformed line: {line.strip()}")
28
+
29
+ # Group emotions into positive, neutral, and negative categories
30
+ emotion_group_map = {
31
+ 'happy': 'Positive',
32
+ 'surprise': 'Positive', # Positive emotions
33
+ 'neutral': 'Neutral', # Neutral emotions
34
+ 'sad': 'Negative',
35
+ 'angry': 'Negative',
36
+ 'fear': 'Negative',
37
+ 'disgust': 'Negative' # Negative emotions
38
+ }
39
+
40
+ # Aggregate counts for positive, neutral, and negative categories
41
+ grouped_counts = defaultdict(int)
42
+ for emotion in emotions_list:
43
+ grouped_counts[emotion_group_map[emotion]] += 1
44
+
45
+ # Define the categories and their counts
46
+ categories = ['Positive', 'Neutral', 'Negative']
47
+ counts = [grouped_counts[category] for category in categories]
48
+
49
+ # **Figure 1: Bar Chart for Positive, Neutral, and Negative Emotions**
50
+ plt.figure(figsize=(8, 6))
51
+ plt.bar(categories, counts, color=['green', 'grey', 'red'], alpha=0.7)
52
+ plt.xlabel('Emotion Group')
53
+ plt.ylabel('Count')
54
+ plt.title('Distribution of Positive, Neutral, and Negative Emotions')
55
+ plt.grid(True)
56
+
57
+ # Save the bar plot
58
+ plt.savefig('data/output/emotion_bar_plot.png')
59
+
60
+ # **Figure 2: Stem Plot (unchanged from previous version)**
61
+ emotion_values = [1 if emotion_group_map[e] == 'Positive' else
62
+ 0 if emotion_group_map[e] == 'Neutral' else
63
+ -1 for e in emotions_list]
64
+
65
+ # Prepare x-axis values (line numbers)
66
+ line_numbers = np.arange(1, len(emotion_values) + 1)
67
+
68
+ plt.figure(figsize=(12, 6))
69
+ plt.plot(line_numbers, emotion_values, color='blue', linewidth=2, linestyle='-', marker='o', alpha=0.7)
70
+
71
+ for i, emotion in enumerate(emotions_list):
72
+ if emotion_group_map[emotion] == 'Positive':
73
+ plt.axvspan(i + 0.5, i + 1.5, color='green', alpha=0.3)
74
+ elif emotion_group_map[emotion] == 'Neutral':
75
+ plt.axvspan(i + 0.5, i + 1.5, color='grey', alpha=0.3)
76
+ elif emotion_group_map[emotion] == 'Negative':
77
+ plt.axvspan(i + 0.5, i + 1.5, color='red', alpha=0.3)
78
+
79
+ plt.xlabel('Line Number')
80
+ plt.ylabel('Emotion Group')
81
+ plt.title('Emotion Type Across the File (Stem Plot)')
82
+ plt.ylim([-2, 2])
83
+ plt.grid(True)
84
+ plt.yticks([-1, 0, 1], ['Negative', 'Neutral', 'Positive'])
85
+
86
+ # Save the stem plot
87
+ plt.savefig('data/output/emotion_stem_plot.png')
88
+
89
+ # **Figure 3: Combination Bar and Line Chart for Counts and Scores (unchanged)**
90
+ emotions = list(emotion_counts.keys())
91
+ counts = [emotion_counts[e] for e in emotions]
92
+ average_scores = [emotion_scores[e] / emotion_counts[e] for e in emotions]
93
+
94
+ fig, ax1 = plt.subplots()
95
+
96
+ ax1.bar(emotions, counts, color='b', alpha=0.7)
97
+ ax1.set_xlabel('Emotion')
98
+ ax1.set_ylabel('Count', color='b')
99
+ ax1.tick_params(axis='y', labelcolor='b')
100
+
101
+ ax2 = ax1.twinx()
102
+ ax2.plot(emotions, average_scores, color='r', marker='o')
103
+ ax2.set_ylabel('Average Score', color='r')
104
+ ax2.tick_params(axis='y', labelcolor='r')
105
+
106
+ plt.title('Emotion Distribution and Average Scores')
107
+ plt.savefig('data/output/emotionAVG.png')
main_emotion_classifier.py ADDED
@@ -0,0 +1,114 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import cv2
2
+ import numpy as np
3
+ from keras.models import load_model
4
+ from utils.datasets import get_labels
5
+ from utils.inference import detect_faces, apply_offsets, load_detection_model, load_image
6
+ from utils.preprocessor import preprocess_input
7
+
8
+ def most_frequent(List):
9
+ return max(set(List), key=List.count)
10
+
11
+ def get_most_frequent_emotion(dict_):
12
+ emotions = []
13
+ for frame_nmr in dict_.keys():
14
+ for face_nmr in dict_[frame_nmr].keys():
15
+ emotions.append(dict_[frame_nmr][face_nmr]['emotion'])
16
+ return most_frequent(emotions)
17
+
18
+ def process(imagePaths, output_filename='data/output/results.txt'):
19
+ detection_model_path = 'detection_models/haarcascade_frontalface_default.xml'
20
+ emotion_model_path = 'emotion_models/fer2013_mini_XCEPTION.102-0.66.hdf5'
21
+ emotion_labels = get_labels('fer2013')
22
+ emotion_offsets = (0, 0)
23
+
24
+ face_detection = load_detection_model(detection_model_path)
25
+ emotion_classifier = load_model(emotion_model_path, compile=False)
26
+ emotion_target_size = emotion_classifier.input_shape[1:3]
27
+
28
+ output = {}
29
+
30
+ for idx, image_path in enumerate(imagePaths):
31
+ gray_image = load_image(image_path, grayscale=True)
32
+ gray_image = np.squeeze(gray_image)
33
+ gray_image = gray_image.astype('uint8')
34
+
35
+ faces = detect_faces(face_detection, gray_image)
36
+
37
+ tmp = {}
38
+ for face_coordinates in faces:
39
+ face_key = tuple(face_coordinates)
40
+
41
+ x1, x2, y1, y2 = apply_offsets(face_coordinates, emotion_offsets)
42
+ gray_face = gray_image[y1:y2, x1:x2]
43
+
44
+ try:
45
+ gray_face = cv2.resize(gray_face, (emotion_target_size))
46
+ except:
47
+ continue
48
+
49
+ gray_face = preprocess_input(gray_face, True)
50
+ gray_face = np.expand_dims(gray_face, 0)
51
+ gray_face = np.expand_dims(gray_face, -1)
52
+ emotion_prediction = emotion_classifier.predict(gray_face)
53
+ emotion_label_arg = np.argmax(emotion_prediction)
54
+ emotion_text = emotion_labels[emotion_label_arg]
55
+
56
+ tmp[face_key] = {'emotion': emotion_text, 'score': float(np.max(emotion_prediction))}
57
+
58
+ output[image_path] = tmp
59
+
60
+ # Save results to a text file
61
+ with open(output_filename, 'w') as file:
62
+ for image_path, faces_info in output.items():
63
+ file.write(f"{image_path}\n")
64
+ for face_key, info in faces_info.items():
65
+ file.write(f" {face_key}: {info}\n")
66
+
67
+ most_frequent_emotion = get_most_frequent_emotion(output)
68
+ return output, most_frequent_emotion
69
+
70
+ # This function can be used for processing a single image
71
+ def process_single_image(image):
72
+ detection_model_path = 'detection_models/haarcascade_frontalface_default.xml'
73
+ emotion_model_path = 'emotion_models/fer2013_mini_XCEPTION.102-0.66.hdf5'
74
+ emotion_labels = get_labels('fer2013')
75
+ emotion_offsets = (0, 0)
76
+
77
+ face_detection = load_detection_model(detection_model_path)
78
+ emotion_classifier = load_model(emotion_model_path, compile=False)
79
+ emotion_target_size = emotion_classifier.input_shape[1:3]
80
+
81
+ gray_image = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
82
+ faces = detect_faces(face_detection, gray_image)
83
+
84
+ for face_coordinates in faces:
85
+ x1, x2, y1, y2 = apply_offsets(face_coordinates, emotion_offsets)
86
+ gray_face = gray_image[y1:y2, x1:x2]
87
+
88
+ try:
89
+ gray_face = cv2.resize(gray_face, (emotion_target_size))
90
+ except:
91
+ continue
92
+
93
+ gray_face = preprocess_input(gray_face, True)
94
+ gray_face = np.expand_dims(gray_face, 0)
95
+ gray_face = np.expand_dims(gray_face, -1)
96
+ emotion_prediction = emotion_classifier.predict(gray_face)
97
+ emotion_label_arg = np.argmax(emotion_prediction)
98
+ emotion_text = emotion_labels[emotion_label_arg]
99
+
100
+ # Draw rectangle around face and label with predicted emotion
101
+ (x, y, w, h) = face_coordinates
102
+ cv2.rectangle(image, (x, y), (x+w, y+h), (0, 255, 0), 2)
103
+ cv2.putText(image, emotion_text, (x, y-10), cv2.FONT_HERSHEY_SIMPLEX, 0.9, (0, 255, 0), 2)
104
+
105
+ return image
106
+
107
+ if __name__ == "__main__":
108
+ # This is just for testing purposes
109
+ test_image_paths = ['path_to_test_image1.jpg', 'path_to_test_image2.jpg']
110
+ output, most_frequent_emotion = process(test_image_paths)
111
+ print(f"Most frequent emotion: {most_frequent_emotion}")
112
+ for key in output.keys():
113
+ print(f"Image: {key}")
114
+ print(output[key])
test-video.mp4 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:6ebb10ed6003e13a611f7a31d5095a58cc9491e996f4f7a416e10cc51567c740
3
+ size 3828330
test.png ADDED

Git LFS Details

  • SHA256: 1326591c0cb92092174dced655b769e02b77357fe92db921838a90aba0000470
  • Pointer size: 132 Bytes
  • Size of remote file: 1.06 MB
utils/__pycache__/datasets.cpython-310.pyc ADDED
Binary file (5.07 kB). View file
 
utils/__pycache__/datasets.cpython-311.pyc ADDED
Binary file (9.09 kB). View file
 
utils/__pycache__/inference.cpython-310.pyc ADDED
Binary file (1.9 kB). View file
 
utils/__pycache__/inference.cpython-311.pyc ADDED
Binary file (2.83 kB). View file
 
utils/__pycache__/preprocessor.cpython-310.pyc ADDED
Binary file (1.06 kB). View file
 
utils/__pycache__/preprocessor.cpython-311.pyc ADDED
Binary file (1.52 kB). View file
 
utils/datasets.py ADDED
@@ -0,0 +1,149 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from scipy.io import loadmat
2
+ import pandas as pd
3
+ import numpy as np
4
+ from random import shuffle
5
+ import os
6
+ import cv2
7
+
8
+
9
+ class DataManager(object):
10
+ """Class for loading fer2013 emotion classification dataset or
11
+ imdb gender classification dataset."""
12
+ def __init__(self, dataset_name='imdb',
13
+ dataset_path=None, image_size=(48, 48)):
14
+
15
+ self.dataset_name = dataset_name
16
+ self.dataset_path = dataset_path
17
+ self.image_size = image_size
18
+ if self.dataset_path is not None:
19
+ self.dataset_path = dataset_path
20
+ elif self.dataset_name == 'imdb':
21
+ self.dataset_path = '../datasets/imdb_crop/imdb.mat'
22
+ elif self.dataset_name == 'fer2013':
23
+ self.dataset_path = '../datasets/fer2013/fer2013.csv'
24
+ elif self.dataset_name == 'KDEF':
25
+ self.dataset_path = '../datasets/KDEF/'
26
+ else:
27
+ raise Exception(
28
+ 'Incorrect dataset name, please input imdb or fer2013')
29
+
30
+ def get_data(self):
31
+ if self.dataset_name == 'imdb':
32
+ ground_truth_data = self._load_imdb()
33
+ elif self.dataset_name == 'fer2013':
34
+ ground_truth_data = self._load_fer2013()
35
+ elif self.dataset_name == 'KDEF':
36
+ ground_truth_data = self._load_KDEF()
37
+ return ground_truth_data
38
+
39
+ def _load_imdb(self):
40
+ face_score_treshold = 3
41
+ dataset = loadmat(self.dataset_path)
42
+ image_names_array = dataset['imdb']['full_path'][0, 0][0]
43
+ gender_classes = dataset['imdb']['gender'][0, 0][0]
44
+ face_score = dataset['imdb']['face_score'][0, 0][0]
45
+ second_face_score = dataset['imdb']['second_face_score'][0, 0][0]
46
+ face_score_mask = face_score > face_score_treshold
47
+ second_face_score_mask = np.isnan(second_face_score)
48
+ unknown_gender_mask = np.logical_not(np.isnan(gender_classes))
49
+ mask = np.logical_and(face_score_mask, second_face_score_mask)
50
+ mask = np.logical_and(mask, unknown_gender_mask)
51
+ image_names_array = image_names_array[mask]
52
+ gender_classes = gender_classes[mask].tolist()
53
+ image_names = []
54
+ for image_name_arg in range(image_names_array.shape[0]):
55
+ image_name = image_names_array[image_name_arg][0]
56
+ image_names.append(image_name)
57
+ return dict(zip(image_names, gender_classes))
58
+
59
+ def _load_fer2013(self):
60
+ data = pd.read_csv(self.dataset_path)
61
+ pixels = data['pixels'].tolist()
62
+ width, height = 48, 48
63
+ faces = []
64
+ for pixel_sequence in pixels:
65
+ face = [int(pixel) for pixel in pixel_sequence.split(' ')]
66
+ face = np.asarray(face).reshape(width, height)
67
+ face = cv2.resize(face.astype('uint8'), self.image_size)
68
+ faces.append(face.astype('float32'))
69
+ faces = np.asarray(faces)
70
+ faces = np.expand_dims(faces, -1)
71
+ emotions = pd.get_dummies(data['emotion']).as_matrix()
72
+ return faces, emotions
73
+
74
+ def _load_KDEF(self):
75
+ class_to_arg = get_class_to_arg(self.dataset_name)
76
+ num_classes = len(class_to_arg)
77
+
78
+ file_paths = []
79
+ for folder, subfolders, filenames in os.walk(self.dataset_path):
80
+ for filename in filenames:
81
+ if filename.lower().endswith(('.jpg')):
82
+ file_paths.append(os.path.join(folder, filename))
83
+
84
+ num_faces = len(file_paths)
85
+ y_size, x_size = self.image_size
86
+ faces = np.zeros(shape=(num_faces, y_size, x_size))
87
+ emotions = np.zeros(shape=(num_faces, num_classes))
88
+ for file_arg, file_path in enumerate(file_paths):
89
+ image_array = cv2.imread(file_path, cv2.IMREAD_GRAYSCALE)
90
+ image_array = cv2.resize(image_array, (y_size, x_size))
91
+ faces[file_arg] = image_array
92
+ file_basename = os.path.basename(file_path)
93
+ file_emotion = file_basename[4:6]
94
+ # there are two file names in the dataset
95
+ # that don't match the given classes
96
+ try:
97
+ emotion_arg = class_to_arg[file_emotion]
98
+ except:
99
+ continue
100
+ emotions[file_arg, emotion_arg] = 1
101
+ faces = np.expand_dims(faces, -1)
102
+ return faces, emotions
103
+
104
+
105
+ def get_labels(dataset_name):
106
+ if dataset_name == 'fer2013':
107
+ return {0: 'angry', 1: 'disgust', 2: 'fear', 3: 'happy',
108
+ 4: 'sad', 5: 'surprise', 6: 'neutral'}
109
+ elif dataset_name == 'imdb':
110
+ return {0: 'woman', 1: 'man'}
111
+ elif dataset_name == 'KDEF':
112
+ return {0: 'AN', 1: 'DI', 2: 'AF', 3: 'HA', 4: 'SA', 5: 'SU', 6: 'NE'}
113
+ else:
114
+ raise Exception('Invalid dataset name')
115
+
116
+
117
+ def get_class_to_arg(dataset_name='fer2013'):
118
+ if dataset_name == 'fer2013':
119
+ return {'angry': 0, 'disgust': 1, 'fear': 2, 'happy': 3, 'sad': 4,
120
+ 'surprise': 5, 'neutral': 6}
121
+ elif dataset_name == 'imdb':
122
+ return {'woman': 0, 'man': 1}
123
+ elif dataset_name == 'KDEF':
124
+ return {'AN': 0, 'DI': 1, 'AF': 2, 'HA': 3, 'SA': 4, 'SU': 5, 'NE': 6}
125
+ else:
126
+ raise Exception('Invalid dataset name')
127
+
128
+
129
+ def split_imdb_data(ground_truth_data, validation_split=.2, do_shuffle=False):
130
+ ground_truth_keys = sorted(ground_truth_data.keys())
131
+ if do_shuffle is not False:
132
+ shuffle(ground_truth_keys)
133
+ training_split = 1 - validation_split
134
+ num_train = int(training_split * len(ground_truth_keys))
135
+ train_keys = ground_truth_keys[:num_train]
136
+ validation_keys = ground_truth_keys[num_train:]
137
+ return train_keys, validation_keys
138
+
139
+
140
+ def split_data(x, y, validation_split=.2):
141
+ num_samples = len(x)
142
+ num_train_samples = int((1 - validation_split)*num_samples)
143
+ train_x = x[:num_train_samples]
144
+ train_y = y[:num_train_samples]
145
+ val_x = x[num_train_samples:]
146
+ val_y = y[num_train_samples:]
147
+ train_data = (train_x, train_y)
148
+ val_data = (val_x, val_y)
149
+ return train_data, val_data
utils/inference.py ADDED
@@ -0,0 +1,37 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import cv2
2
+ import matplotlib.pyplot as plt
3
+ import numpy as np
4
+ from tensorflow.keras.utils import load_img, img_to_array
5
+
6
+ def load_image(image_path, grayscale=False, target_size=None):
7
+ color_mode = 'grayscale' if grayscale else 'rgb'
8
+ pil_image = load_img(image_path, color_mode=color_mode, target_size=target_size)
9
+ return img_to_array(pil_image)
10
+
11
+ def load_detection_model(model_path):
12
+ detection_model = cv2.CascadeClassifier(model_path)
13
+ return detection_model
14
+
15
+ def detect_faces(detection_model, gray_image_array):
16
+ return detection_model.detectMultiScale(gray_image_array, 1.3, 5)
17
+
18
+ def draw_bounding_box(face_coordinates, image_array, color):
19
+ x, y, w, h = face_coordinates
20
+ cv2.rectangle(image_array, (x, y), (x + w, y + h), color, 2)
21
+
22
+ def apply_offsets(face_coordinates, offsets):
23
+ x, y, width, height = face_coordinates
24
+ x_off, y_off = offsets
25
+ return (x - x_off, x + width + x_off, y - y_off, y + height + y_off)
26
+
27
+ def draw_text(coordinates, image_array, text, color, x_offset=0, y_offset=0,
28
+ font_scale=2, thickness=2):
29
+ x, y = coordinates[:2]
30
+ cv2.putText(image_array, text, (x + x_offset, y + y_offset),
31
+ cv2.FONT_HERSHEY_SIMPLEX,
32
+ font_scale, color, thickness, cv2.LINE_AA)
33
+
34
+ def get_colors(num_classes):
35
+ colors = plt.cm.hsv(np.linspace(0, 1, num_classes)).tolist()
36
+ colors = np.asarray(colors) * 255
37
+ return colors
utils/preprocessor.py ADDED
@@ -0,0 +1,27 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import numpy as np
2
+ from imageio import imread
3
+ from skimage.transform import resize as imresize
4
+
5
+ def preprocess_input(x, v2=True):
6
+ x = x.astype('float32')
7
+ x = x / 255.0
8
+ if v2:
9
+ x = x - 0.5
10
+ x = x * 2.0
11
+ return x
12
+
13
+
14
+ def _imread(image_name):
15
+ return imread(image_name)
16
+
17
+
18
+ def _imresize(image_array, size):
19
+ return imresize(image_array, size)
20
+
21
+
22
+ def to_categorical(integer_classes, num_classes=2):
23
+ integer_classes = np.asarray(integer_classes, dtype='int')
24
+ num_samples = integer_classes.shape[0]
25
+ categorical = np.zeros((num_samples, num_classes))
26
+ categorical[np.arange(num_samples), integer_classes] = 1
27
+ return categorical