Spaces:
Runtime error
Runtime error
Shafeek Saleem
commited on
Commit
•
0cba43f
1
Parent(s):
3767003
added template files
Browse files- .idea/.gitignore +3 -0
- .idea/ascii_demo.iml +8 -0
- .idea/inspectionProfiles/Project_Default.xml +27 -0
- .idea/inspectionProfiles/profiles_settings.xml +6 -0
- .idea/modules.xml +8 -0
- .idea/sonarlint/issuestore/0/0/000a6190576126fbf86ffdd96d384deeb366af79 +4 -0
- .idea/sonarlint/issuestore/2/6/261359fd9dbbe29d2e8fa924e82dca6f103aeb4b +0 -0
- .idea/sonarlint/issuestore/8/7/87caad19f17efd4c3e8264659f523744ffa94ed8 +2 -0
- .idea/sonarlint/issuestore/a/a/aa7ba3dbfc6be1a7bca20c1e791bc3e201c04b60 +0 -0
- .idea/sonarlint/issuestore/c/b/cb19cfbf0eac792d9353415a9a0072a9219f871e +0 -0
- .idea/sonarlint/issuestore/e/2/e274ab1fb18a0b24ca276995515f2380ef55255f +0 -0
- .idea/sonarlint/issuestore/index.pb +3 -0
- .idea/vcs.xml +6 -0
- 0_Introduction.py +32 -0
- app.py +0 -50
- assets/logo.png +0 -0
- pages/1_Technology Behind It.py +60 -0
- pages/2_Collecting Your Data.py +70 -0
- pages/3_Training the Model.py +93 -0
- pages/4_Trying It Out.py +81 -0
- pages/5_Congratulations.py +12 -0
- requirements.txt +0 -2
- utils/__init__.py +0 -0
- utils/__pycache__/__init__.cpython-310.pyc +0 -0
- utils/__pycache__/inference.cpython-310.pyc +0 -0
- utils/__pycache__/levels.cpython-310.pyc +0 -0
- utils/__pycache__/login.cpython-310.pyc +0 -0
- utils/inference.py +10 -0
- utils/levels.py +36 -0
- utils/login.py +29 -0
.idea/.gitignore
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
# Default ignored files
|
2 |
+
/shelf/
|
3 |
+
/workspace.xml
|
.idea/ascii_demo.iml
ADDED
@@ -0,0 +1,8 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
<?xml version="1.0" encoding="UTF-8"?>
|
2 |
+
<module type="PYTHON_MODULE" version="4">
|
3 |
+
<component name="NewModuleRootManager">
|
4 |
+
<content url="file://$MODULE_DIR$" />
|
5 |
+
<orderEntry type="inheritedJdk" />
|
6 |
+
<orderEntry type="sourceFolder" forTests="false" />
|
7 |
+
</component>
|
8 |
+
</module>
|
.idea/inspectionProfiles/Project_Default.xml
ADDED
@@ -0,0 +1,27 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
<component name="InspectionProjectProfileManager">
|
2 |
+
<profile version="1.0">
|
3 |
+
<option name="myName" value="Project Default" />
|
4 |
+
<inspection_tool class="PyPep8Inspection" enabled="true" level="WEAK WARNING" enabled_by_default="true">
|
5 |
+
<option name="ignoredErrors">
|
6 |
+
<list>
|
7 |
+
<option value="E402" />
|
8 |
+
</list>
|
9 |
+
</option>
|
10 |
+
</inspection_tool>
|
11 |
+
<inspection_tool class="PyPep8NamingInspection" enabled="true" level="WEAK WARNING" enabled_by_default="true">
|
12 |
+
<option name="ignoredErrors">
|
13 |
+
<list>
|
14 |
+
<option value="N806" />
|
15 |
+
</list>
|
16 |
+
</option>
|
17 |
+
</inspection_tool>
|
18 |
+
<inspection_tool class="PyUnresolvedReferencesInspection" enabled="true" level="WARNING" enabled_by_default="true">
|
19 |
+
<option name="ignoredIdentifiers">
|
20 |
+
<list>
|
21 |
+
<option value="trx_clus" />
|
22 |
+
<option value="matrix_demand_transfer" />
|
23 |
+
</list>
|
24 |
+
</option>
|
25 |
+
</inspection_tool>
|
26 |
+
</profile>
|
27 |
+
</component>
|
.idea/inspectionProfiles/profiles_settings.xml
ADDED
@@ -0,0 +1,6 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
<component name="InspectionProjectProfileManager">
|
2 |
+
<settings>
|
3 |
+
<option name="USE_PROJECT_PROFILE" value="false" />
|
4 |
+
<version value="1.0" />
|
5 |
+
</settings>
|
6 |
+
</component>
|
.idea/modules.xml
ADDED
@@ -0,0 +1,8 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
<?xml version="1.0" encoding="UTF-8"?>
|
2 |
+
<project version="4">
|
3 |
+
<component name="ProjectModuleManager">
|
4 |
+
<modules>
|
5 |
+
<module fileurl="file://$PROJECT_DIR$/.idea/ascii_demo.iml" filepath="$PROJECT_DIR$/.idea/ascii_demo.iml" />
|
6 |
+
</modules>
|
7 |
+
</component>
|
8 |
+
</project>
|
.idea/sonarlint/issuestore/0/0/000a6190576126fbf86ffdd96d384deeb366af79
ADDED
@@ -0,0 +1,4 @@
|
|
|
|
|
|
|
|
|
|
|
1 |
+
|
2 |
+
Ipython:S1481",Remove the unused local variable "username".(��̳�����
|
3 |
+
Dpython:S1481 ",Remove the unused local variable "password".(��ͮ
|
4 |
+
Lpython:S1135"4Complete the task associated to this "TODO" comment.(�ڭ�
|
.idea/sonarlint/issuestore/2/6/261359fd9dbbe29d2e8fa924e82dca6f103aeb4b
ADDED
File without changes
|
.idea/sonarlint/issuestore/8/7/87caad19f17efd4c3e8264659f523744ffa94ed8
ADDED
@@ -0,0 +1,2 @@
|
|
|
|
|
|
|
1 |
+
|
2 |
+
Gpython:S1066"/Merge this if statement with the enclosing one.(����
|
.idea/sonarlint/issuestore/a/a/aa7ba3dbfc6be1a7bca20c1e791bc3e201c04b60
ADDED
File without changes
|
.idea/sonarlint/issuestore/c/b/cb19cfbf0eac792d9353415a9a0072a9219f871e
ADDED
File without changes
|
.idea/sonarlint/issuestore/e/2/e274ab1fb18a0b24ca276995515f2380ef55255f
ADDED
File without changes
|
.idea/sonarlint/issuestore/index.pb
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:3cea10e0be6249b66ee773812cedaca53f00b10c64f0c5fc3741d5fad1cb3805
|
3 |
+
size 412
|
.idea/vcs.xml
ADDED
@@ -0,0 +1,6 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
<?xml version="1.0" encoding="UTF-8"?>
|
2 |
+
<project version="4">
|
3 |
+
<component name="VcsDirectoryMappings">
|
4 |
+
<mapping directory="$PROJECT_DIR$" vcs="Git" />
|
5 |
+
</component>
|
6 |
+
</project>
|
0_Introduction.py
ADDED
@@ -0,0 +1,32 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import streamlit as st
|
2 |
+
from utils.levels import complete_level, initialize_level, render_page, get_level
|
3 |
+
from utils.login import initialize_login
|
4 |
+
|
5 |
+
initialize_login()
|
6 |
+
initialize_level()
|
7 |
+
|
8 |
+
LEVEL = 0
|
9 |
+
|
10 |
+
|
11 |
+
def intro_page():
|
12 |
+
st.header("Face Recognition")
|
13 |
+
st.subheader("Introduction")
|
14 |
+
|
15 |
+
st.write(
|
16 |
+
"""Welcome to the interactive tutorial on creating your very own Face Recognition Application! In this tutorial, you will learn how to build
|
17 |
+
a simple application that can detect and recognise faces from a given photo. Face recognition has revolutionized
|
18 |
+
various industries, including security, entertainment, and personal identification. Are you ready to dive into the exciting world of face recognition?"""
|
19 |
+
)
|
20 |
+
|
21 |
+
# st.image(
|
22 |
+
# "https://miro.medium.com/v2/resize:fit:720/0*Ko6oX3rRb0aTgdUI.gif",
|
23 |
+
# use_column_width=True,
|
24 |
+
# )
|
25 |
+
|
26 |
+
st.info(f"Click on the button below to start the tutorial!")
|
27 |
+
|
28 |
+
if st.button("I am Ready!"):
|
29 |
+
complete_level(LEVEL)
|
30 |
+
|
31 |
+
|
32 |
+
render_page(intro_page, LEVEL)
|
app.py
DELETED
@@ -1,50 +0,0 @@
|
|
1 |
-
import streamlit as st
|
2 |
-
from PIL import Image
|
3 |
-
import face_recognition
|
4 |
-
import os
|
5 |
-
import uuid
|
6 |
-
import streamlit as st
|
7 |
-
from PIL import Image
|
8 |
-
import face_recognition
|
9 |
-
|
10 |
-
st.header("Face Detection")
|
11 |
-
st.write(
|
12 |
-
"Now it's time to collect the pictures we need to create our known-faces data base for our face recognition model. "
|
13 |
-
"But remember, we should always ask for permission before taking someone's picture. We can use a smartphone or a digital camera to capture pictures, and it's important to take pictures of different people. This will help our application to have a good known-faces database!"
|
14 |
-
)
|
15 |
-
|
16 |
-
img_dir = os.path.join(".sessions", "known_faces")
|
17 |
-
os.makedirs(img_dir, exist_ok=True)
|
18 |
-
|
19 |
-
picture = st.file_uploader("Upload a candidate image",type=['jpg','png','jpeg'],accept_multiple_files=False)
|
20 |
-
if picture:
|
21 |
-
image = face_recognition.load_image_file(picture)
|
22 |
-
st.image(image)
|
23 |
-
|
24 |
-
# Find all the faces in the image using the default HOG-based model.
|
25 |
-
# This method is fairly accurate, but not as accurate as the CNN model and not GPU accelerated.
|
26 |
-
# See also: find_faces_in_picture_cnn.py
|
27 |
-
face_locations = face_recognition.face_locations(image)
|
28 |
-
st.write("Algorithm found {} face(s) in this photograph.".format(len(face_locations)))
|
29 |
-
|
30 |
-
cols = st.columns(len(face_locations))
|
31 |
-
for i in range(len(face_locations)):
|
32 |
-
col = cols[i]
|
33 |
-
face = face_locations[i]
|
34 |
-
# display faces
|
35 |
-
with col:
|
36 |
-
st.header("Face {}".format(i))
|
37 |
-
# Print the location of each face in this image
|
38 |
-
top, right, bottom, left = face
|
39 |
-
# You can access the actual face itself like this:
|
40 |
-
face_image = image[top:bottom, left:right]
|
41 |
-
pil_image = Image.fromarray(face_image)
|
42 |
-
st.image(pil_image)
|
43 |
-
face_name = st.text_input('Specify name', "This is a placeholder", key="text_"+str(i))
|
44 |
-
if st.button("Save", key="button_"+str(i)):
|
45 |
-
img_name = str(uuid.uuid4()) + f"{face_name}" + ".jpg"
|
46 |
-
img_path = os.path.join(img_dir, img_name)
|
47 |
-
im1 = pil_image.save(img_path)
|
48 |
-
st.success("Face added successfully!")
|
49 |
-
else:
|
50 |
-
st.write("Please upload an image to proceed.")
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
assets/logo.png
ADDED
pages/1_Technology Behind It.py
ADDED
@@ -0,0 +1,60 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import streamlit as st
|
2 |
+
from utils.levels import complete_level, render_page, initialize_level
|
3 |
+
from utils.login import initialize_login
|
4 |
+
|
5 |
+
LEVEL = 1
|
6 |
+
|
7 |
+
initialize_login()
|
8 |
+
initialize_level()
|
9 |
+
|
10 |
+
|
11 |
+
|
12 |
+
def step1_page():
|
13 |
+
st.header("How does it work?")
|
14 |
+
st.markdown(
|
15 |
+
"""
|
16 |
+
### How does it work?
|
17 |
+
In this tutorial, we will explore the fascinating world of face recognition technology. Through this interactive web application,
|
18 |
+
you will learn the fundamentals of face detection, feature encodings, and face recognition algorithms.
|
19 |
+
|
20 |
+
Throughout the tutorial, you will have the opportunity to upload images and experience the power of face recognition firsthand.
|
21 |
+
Step by step, we will guide you through the process, ensuring that you gain a deep understanding of each concept and its practical implementation.
|
22 |
+
Here's how it works:
|
23 |
+
|
24 |
+
1. **Face Detection**: In this first step, discover how face detection algorithms locate and identify faces within an image.
|
25 |
+
You will see how this crucial first step sets the foundation for subsequent face recognition tasks.
|
26 |
+
|
27 |
+
2. **Face Encodings**: Next, the application pays attention to learn about face encodings, a technique that extracts unique
|
28 |
+
facial characteristics and represents them as numerical vectors. These features capture unique characteristics that enable reliable comparisons between faces.
|
29 |
+
Here we will gereate face encodings for the known faces to create a data base.
|
30 |
+
"""
|
31 |
+
)
|
32 |
+
st.image(
|
33 |
+
"https://media.istockphoto.com/id/1136827583/photo/futuristic-and-technological-scanning-of-face-for-facial-recognition.jpg?s=612x612&w=0&k=20&c=GsqBYxvE64TS8HY__OSn6qZU5HPBhIemnqjyf37TkQo=",
|
34 |
+
use_column_width=True,
|
35 |
+
)
|
36 |
+
st.markdown(
|
37 |
+
"""
|
38 |
+
3. **Face Recognition**: Dive into the world of face recognition algorithms, where you will understand the mechanisms
|
39 |
+
behind comparing face encodings and making accurate matches. It uses the face encoding of unknown face against the face encodings of known faces to compare and find matching features.
|
40 |
+
"""
|
41 |
+
)
|
42 |
+
st.image(
|
43 |
+
"https://miro.medium.com/v2/resize:fit:1200/1*4rjT-RSOTdlPqp1UwcF3tg.jpeg",
|
44 |
+
use_column_width=True,
|
45 |
+
)
|
46 |
+
st.markdown(
|
47 |
+
"""
|
48 |
+
So, our emotion detection model is like a clever brain that looks at faces, notices important features, and guesses
|
49 |
+
how someone is feeling based on those features. It's a way for computers to understand emotions, just like we do as
|
50 |
+
humans!
|
51 |
+
"""
|
52 |
+
)
|
53 |
+
|
54 |
+
st.info("Click on the button below to continue!")
|
55 |
+
|
56 |
+
if st.button("Complete"):
|
57 |
+
complete_level(LEVEL)
|
58 |
+
|
59 |
+
|
60 |
+
render_page(step1_page, LEVEL)
|
pages/2_Collecting Your Data.py
ADDED
@@ -0,0 +1,70 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import streamlit as st
|
2 |
+
from PIL import Image
|
3 |
+
import face_recognition
|
4 |
+
from utils.levels import complete_level, render_page, initialize_level
|
5 |
+
from utils.login import get_login
|
6 |
+
import os
|
7 |
+
import uuid
|
8 |
+
from utils.login import initialize_login
|
9 |
+
|
10 |
+
initialize_login()
|
11 |
+
initialize_level()
|
12 |
+
|
13 |
+
LEVEL = 2
|
14 |
+
|
15 |
+
|
16 |
+
def step2_page():
|
17 |
+
st.header("Face Detection")
|
18 |
+
st.write(
|
19 |
+
"Now it's time to collect the pictures we need to create our known-faces data base for our face recognition model. "
|
20 |
+
"But remember, we should always ask for permission before taking someone's picture. We can use a smartphone or a digital camera to capture pictures, and it's important to take pictures of different people. This will help our application to have a good known-faces database!"
|
21 |
+
)
|
22 |
+
|
23 |
+
img_dir = os.path.join(".sessions", get_login()["username"], "known_faces")
|
24 |
+
os.makedirs(img_dir, exist_ok=True)
|
25 |
+
|
26 |
+
picture = st.camera_input("Take a picture")
|
27 |
+
if picture:
|
28 |
+
image = face_recognition.load_image_file(picture)
|
29 |
+
st.image(image)
|
30 |
+
|
31 |
+
# Find all the faces in the image using the default HOG-based model.
|
32 |
+
# This method is fairly accurate, but not as accurate as the CNN model and not GPU accelerated.
|
33 |
+
# See also: find_faces_in_picture_cnn.py
|
34 |
+
face_locations = face_recognition.face_locations(image)
|
35 |
+
st.write("Algorithm found {} face(s) in this photograph.".format(len(face_locations)))
|
36 |
+
|
37 |
+
cols = st.columns(len(face_locations))
|
38 |
+
for i in range(len(face_locations)):
|
39 |
+
col = cols[i]
|
40 |
+
face = face_locations[i]
|
41 |
+
# display faces
|
42 |
+
with col:
|
43 |
+
st.header("Face {}".format(i))
|
44 |
+
# Print the location of each face in this image
|
45 |
+
top, right, bottom, left = face
|
46 |
+
# You can access the actual face itself like this:
|
47 |
+
face_image = image[top:bottom, left:right]
|
48 |
+
pil_image = Image.fromarray(face_image)
|
49 |
+
st.image(pil_image)
|
50 |
+
face_name = st.text_input('Specify name', "This is a placeholder", key="text_"+str(i))
|
51 |
+
if st.button("Save", key="button_"+str(i)):
|
52 |
+
img_name = str(uuid.uuid4()) + f"{face_name}_{i}" + ".jpg"
|
53 |
+
img_path = os.path.join(img_dir, img_name)
|
54 |
+
with open(img_path, "wb") as f:
|
55 |
+
f.write(face_image.getvalue())
|
56 |
+
st.success("Face added successfully!")
|
57 |
+
|
58 |
+
images = os.listdir(img_dir)
|
59 |
+
if st.button("Clear All"):
|
60 |
+
for img in images:
|
61 |
+
os.remove(os.path.join(img_dir, img))
|
62 |
+
st.success("All images cleared!")
|
63 |
+
st.experimental_rerun()
|
64 |
+
|
65 |
+
st.info("If you are satisfied with your images, click on the button below to complete this level.")
|
66 |
+
if st.button("Complete"):
|
67 |
+
complete_level(LEVEL)
|
68 |
+
|
69 |
+
|
70 |
+
render_page(step2_page, LEVEL)
|
pages/3_Training the Model.py
ADDED
@@ -0,0 +1,93 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import streamlit as st
|
2 |
+
from utils.levels import complete_level, render_page, initialize_level
|
3 |
+
from utils.login import get_login, initialize_login
|
4 |
+
from utils.inference import query
|
5 |
+
import os
|
6 |
+
import time
|
7 |
+
import json
|
8 |
+
|
9 |
+
initialize_login()
|
10 |
+
initialize_level()
|
11 |
+
|
12 |
+
LEVEL = 3
|
13 |
+
|
14 |
+
|
15 |
+
def infer(image):
|
16 |
+
time.sleep(1)
|
17 |
+
output = query(image)
|
18 |
+
cols = st.columns(2)
|
19 |
+
cols[0].image(image, use_column_width=True)
|
20 |
+
with cols[1]:
|
21 |
+
for item in output:
|
22 |
+
st.progress(item["score"], text=item["label"])
|
23 |
+
|
24 |
+
|
25 |
+
def step3_page():
|
26 |
+
st.header("Feature encoding")
|
27 |
+
st.markdown(
|
28 |
+
"""
|
29 |
+
### What is Face Encodings?
|
30 |
+
In face recognition, face encodings are numerical representations of facial features that are used to uniquely identify individuals.
|
31 |
+
These encodings are obtained by extracting relevant facial information from an input image or video frame.
|
32 |
+
|
33 |
+
Face encodings are typically computed using deep learning models, such as Convolutional Neural Networks (CNNs),
|
34 |
+
that are trained on large datasets of labeled faces. During the training process, these models learn to recognize patterns and extract discriminative features from facial images.
|
35 |
+
"""
|
36 |
+
)
|
37 |
+
st.image(
|
38 |
+
"https://1.bp.blogspot.com/-Cehw4NbX2L8/X1-DJeQgtmI/AAAAAAAAKeE/fA55q9EXsgQLU0trbgX_vJkiFMwR927yQCLcBGAsYHQ/s853/final.gif",
|
39 |
+
use_column_width=True,
|
40 |
+
)
|
41 |
+
st.markdown(
|
42 |
+
"""
|
43 |
+
3. **Finding Patterns**: Our teacher starts to notice patterns in the pictures and the emotions we assigned to them. They might see that smiling faces usually mean happiness, or that certain features are common in sad expressions. These patterns help the teacher understand how emotions are expressed on people's faces.
|
44 |
+
"""
|
45 |
+
)
|
46 |
+
st.image(
|
47 |
+
"https://miro.medium.com/v2/resize:fit:1358/1*KoHwRNZGrVrhdbye3BDEew.png",
|
48 |
+
use_column_width=True,
|
49 |
+
)
|
50 |
+
st.markdown(
|
51 |
+
"""
|
52 |
+
Once the face encodings are obtained, they can be stored in a database or used for face recognition tasks.
|
53 |
+
During face recognition, the encodings of input faces are compared to the stored encodings to determine if a match exists.
|
54 |
+
Various similarity metrics, such as Euclidean distance or cosine similarity, can be utilized to measure the similarity between
|
55 |
+
face encodings and determine potential matches.
|
56 |
+
"""
|
57 |
+
)
|
58 |
+
|
59 |
+
st.info(
|
60 |
+
"Now it's your turn to create face encodings to the face database you have created earlier!"
|
61 |
+
)
|
62 |
+
|
63 |
+
img_dir = os.path.join(".sessions", get_login()["username"], "known_faces")
|
64 |
+
images = os.listdir(img_dir)
|
65 |
+
if len(images) > 0:
|
66 |
+
st.subheader("Lets see your available images in your known-face database.")
|
67 |
+
cols = st.columns(len(images))
|
68 |
+
for i, img in enumerate(images):
|
69 |
+
face_name = img.split("_")[0]
|
70 |
+
cols[i].image(os.path.join(img_dir, img), use_column_width=True)
|
71 |
+
cols[i].write(face_name)
|
72 |
+
st.subheader("Lets create face encodings for the known-faces.")
|
73 |
+
face_encodings_dict = {}
|
74 |
+
if st.button("Create Face Encodings"):
|
75 |
+
my_bar = st.progress(0, text="Generating encodings...")
|
76 |
+
if len(images) > 0:
|
77 |
+
for i, img in enumerate(images):
|
78 |
+
with open(os.path.join(img_dir, img), "rb") as f:
|
79 |
+
face_image = f.read()
|
80 |
+
my_face_encoding = face_recognition.face_encodings(face_image)[0]
|
81 |
+
my_bar.progress(int((i + 1) / len(images) * 100), text="Generating encodings...")
|
82 |
+
face_encodings_dict[img] = my_face_encoding
|
83 |
+
my_bar.progress(100, text="Successfully encoded all the known faces!")
|
84 |
+
st.success("Face encoding completed successfully!")
|
85 |
+
with open(os.path.join(".sessions", get_login()["username"], "face_encodings.json"), "w") as write_file:
|
86 |
+
json.dump(face_encodings_dict, write_file, indent=4)
|
87 |
+
complete_level(LEVEL)
|
88 |
+
else:
|
89 |
+
my_bar.empty()
|
90 |
+
st.error("You have not taken any images yet! Do the previous steps first!")
|
91 |
+
|
92 |
+
|
93 |
+
render_page(step3_page, LEVEL)
|
pages/4_Trying It Out.py
ADDED
@@ -0,0 +1,81 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import streamlit as st
|
2 |
+
from utils.levels import complete_level, render_page, initialize_level
|
3 |
+
from utils.login import get_login, initialize_login
|
4 |
+
from utils.inference import query
|
5 |
+
import os
|
6 |
+
import time
|
7 |
+
|
8 |
+
initialize_login()
|
9 |
+
initialize_level()
|
10 |
+
|
11 |
+
LEVEL = 4
|
12 |
+
|
13 |
+
|
14 |
+
def infer(image):
|
15 |
+
time.sleep(1)
|
16 |
+
output = query(image)
|
17 |
+
cols = st.columns(2)
|
18 |
+
cols[0].image(image, use_column_width=True)
|
19 |
+
with cols[1]:
|
20 |
+
for item in output:
|
21 |
+
st.progress(item["score"], text=item["label"])
|
22 |
+
|
23 |
+
|
24 |
+
def step4_page():
|
25 |
+
st.header("Trying It Out")
|
26 |
+
st.markdown(
|
27 |
+
"""
|
28 |
+
### How Our Emotion Detection Application Works
|
29 |
+
Now that we have trained our emotion detection application, let's see how it works in action! Here's a simple explanation of how the application recognizes emotions:
|
30 |
+
|
31 |
+
1. **Looking at Faces**: When we use our emotion detection application, we can show it a picture of a face or use a camera to capture a real-time image. It's like giving our application a chance to see someone's expression.
|
32 |
+
|
33 |
+
2. **Observing the Features**: The application carefully looks at the face and pays attention to different parts, like the eyes, mouth, and eyebrows. It tries to understand the expressions by noticing how these parts look and how they are positioned. It's like the application is taking a close look at the face, just like we do when we try to understand someone's emotions.
|
34 |
+
"""
|
35 |
+
)
|
36 |
+
st.image(
|
37 |
+
"https://camo.githubusercontent.com/3bb4e2eba7c8a91d71916496bc775e870222f19bb5098cb4bc514ed60078c1e7/68747470733a2f2f626c6f672e7161746573746c61622e636f6d2f77702d636f6e74656e742f75706c6f6164732f323032302f30312f4d4c5f6578616d706c652e6769663f7261773d74727565",
|
38 |
+
use_column_width=True,
|
39 |
+
)
|
40 |
+
st.markdown(
|
41 |
+
"""
|
42 |
+
3. **Guessing the Emotion**: Based on what it observed, our application uses the knowledge it learned during training to make its best guess about the person's emotion. It remembers the patterns it saw before and tries to match them with the features it observed. It might think the person looks happy, sad, or maybe surprised!
|
43 |
+
"""
|
44 |
+
)
|
45 |
+
st.image(
|
46 |
+
"https://miro.medium.com/v2/resize:fit:1358/1*KoHwRNZGrVrhdbye3BDEew.png",
|
47 |
+
use_column_width=True,
|
48 |
+
)
|
49 |
+
st.markdown(
|
50 |
+
"""
|
51 |
+
4. **Providing a Result**: Finally, our emotion detection application tells us what emotion it thinks the person is feeling. It might say, "I think this person looks happy!" or "I think this person looks sad." It's like having a virtual friend who can give us their guess about someone's emotion.
|
52 |
+
|
53 |
+
By going through these steps, our emotion detection application can quickly analyze faces and give us an idea of how someone might be feeling. It's like having a special friend who can understand and guess emotions based on facial expressions!
|
54 |
+
"""
|
55 |
+
)
|
56 |
+
|
57 |
+
st.info(
|
58 |
+
"Now that we know how our emotion detection application works, let's try it out!"
|
59 |
+
)
|
60 |
+
|
61 |
+
st.info("Select an image to analyze!")
|
62 |
+
input_type = st.radio("Select the Input Type", ["Image", "Camera"])
|
63 |
+
|
64 |
+
if input_type == "Camera":
|
65 |
+
image = st.camera_input("Take a picture")
|
66 |
+
byte_image = image.getvalue() if image else None
|
67 |
+
else:
|
68 |
+
image = st.file_uploader("Upload an image", type=["png", "jpg", "jpeg"])
|
69 |
+
byte_image = image.read() if image else None
|
70 |
+
try_img = os.path.join(".sessions", get_login()["username"], "try.jpg")
|
71 |
+
if byte_image:
|
72 |
+
with open(try_img, "wb") as f:
|
73 |
+
f.write(byte_image)
|
74 |
+
infer(try_img)
|
75 |
+
|
76 |
+
st.info("Click on the button below to complete this level!")
|
77 |
+
if st.button("Complete Level"):
|
78 |
+
complete_level(LEVEL)
|
79 |
+
|
80 |
+
|
81 |
+
render_page(step4_page, LEVEL)
|
pages/5_Congratulations.py
ADDED
@@ -0,0 +1,12 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import streamlit as st
|
2 |
+
from utils.levels import render_page, initialize_level
|
3 |
+
|
4 |
+
initialize_level()
|
5 |
+
|
6 |
+
LEVEL = 5
|
7 |
+
|
8 |
+
def complete_page():
|
9 |
+
st.header("Congratulations!")
|
10 |
+
st.subheader("You have completed the tutorial!")
|
11 |
+
|
12 |
+
render_page(complete_page, LEVEL)
|
requirements.txt
CHANGED
@@ -1,3 +1 @@
|
|
1 |
-
transformers
|
2 |
-
torch
|
3 |
face-recognition
|
|
|
|
|
|
|
1 |
face-recognition
|
utils/__init__.py
ADDED
File without changes
|
utils/__pycache__/__init__.cpython-310.pyc
ADDED
Binary file (165 Bytes). View file
|
|
utils/__pycache__/inference.cpython-310.pyc
ADDED
Binary file (617 Bytes). View file
|
|
utils/__pycache__/levels.cpython-310.pyc
ADDED
Binary file (1.6 kB). View file
|
|
utils/__pycache__/login.cpython-310.pyc
ADDED
Binary file (1.05 kB). View file
|
|
utils/inference.py
ADDED
@@ -0,0 +1,10 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import requests
|
2 |
+
|
3 |
+
API_URL = "https://api-inference.huggingface.co/models/CynthiaCR/emotions_classifier"
|
4 |
+
headers = {"Authorization": "Bearer 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"}
|
5 |
+
|
6 |
+
def query(filename):
|
7 |
+
with open(filename, "rb") as f:
|
8 |
+
data = f.read()
|
9 |
+
response = requests.post(API_URL, headers=headers, data=data)
|
10 |
+
return response.json()
|
utils/levels.py
ADDED
@@ -0,0 +1,36 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import streamlit as st
|
2 |
+
from utils.login import get_login
|
3 |
+
import os
|
4 |
+
|
5 |
+
def initialize_level():
|
6 |
+
if 'level' not in st.session_state:
|
7 |
+
if get_login()["status"]:
|
8 |
+
if not os.path.exists(f".sessions/{get_login()['username']}/level.txt"):
|
9 |
+
with open(f".sessions/{get_login()['username']}/level.txt", "w") as f:
|
10 |
+
f.write("0")
|
11 |
+
st.session_state['level'] = 0
|
12 |
+
else:
|
13 |
+
with open(f".sessions/{get_login()['username']}/level.txt", "r") as f:
|
14 |
+
st.session_state['level'] = int(f.read())
|
15 |
+
|
16 |
+
def get_level():
|
17 |
+
return st.session_state['level']
|
18 |
+
|
19 |
+
def render_page(page, level):
|
20 |
+
if get_login()["status"]:
|
21 |
+
if st.session_state['level'] < level:
|
22 |
+
st.error(f"You need to complete Level {st.session_state['level']} first!")
|
23 |
+
else:
|
24 |
+
page()
|
25 |
+
else:
|
26 |
+
st.error("You need to login first!")
|
27 |
+
|
28 |
+
def complete_level(level):
|
29 |
+
if st.session_state['level'] > level:
|
30 |
+
st.info(f'You have Already completed Level {level}!')
|
31 |
+
else:
|
32 |
+
st.session_state['level'] = level + 1
|
33 |
+
with open(f".sessions/{get_login()['username']}/level.txt", "w") as f:
|
34 |
+
f.write(str(st.session_state['level']))
|
35 |
+
st.balloons()
|
36 |
+
st.success(f'You have completed Level {level}! You can now move on to the next level.')
|
utils/login.py
ADDED
@@ -0,0 +1,29 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import streamlit as st
|
2 |
+
import os
|
3 |
+
|
4 |
+
|
5 |
+
def initialize_login():
|
6 |
+
if "login" not in st.session_state:
|
7 |
+
st.columns(3)[1].image("assets/logo.png")
|
8 |
+
username = st.text_input("Username")
|
9 |
+
password = st.text_input("Password", type="password")
|
10 |
+
if st.button("Login"):
|
11 |
+
# TODO: replace with actual authorization check
|
12 |
+
authorized = {"status": True, "Name": "John Doe", "username": "johndoe"}
|
13 |
+
if authorized["status"]:
|
14 |
+
st.session_state["login"] = authorized
|
15 |
+
os.makedirs(
|
16 |
+
os.path.join(".sessions", st.session_state["login"]["username"]),
|
17 |
+
exist_ok=True,
|
18 |
+
)
|
19 |
+
st.success("Login Successful!")
|
20 |
+
st.experimental_rerun()
|
21 |
+
else:
|
22 |
+
st.error("Invalid username or password")
|
23 |
+
else:
|
24 |
+
st.sidebar.success(f'Hello, {st.session_state["login"]["Name"]}!')
|
25 |
+
st.sidebar.image("assets/logo.png", use_column_width=True)
|
26 |
+
|
27 |
+
|
28 |
+
def get_login():
|
29 |
+
return st.session_state.get("login", {"status": False})
|