Spaces:
Runtime error
Runtime error
import streamlit as st | |
from utils.levels import complete_level, render_page, initialize_level | |
from utils.login import get_login, initialize_login | |
from utils.inference import query | |
import os | |
import time | |
import face_recognition | |
import cv2 | |
import numpy as np | |
from PIL import Image | |
initialize_login() | |
initialize_level() | |
LEVEL = 4 | |
def step4_page(): | |
st.header("Face Recognition: Trying It Out") | |
st.write( | |
""" | |
Once the face encodings are obtained, they can be stored in a database or used for face recognition tasks. | |
During face recognition, the encodings of input faces are compared to the stored encodings (our known-face database) | |
to determine if a match exists. Various similarity metrics, such as Euclidean distance or cosine similarity, | |
can be utilized to measure the similarity between face encodings and determine potential matches. | |
""" | |
) | |
st.info( | |
"Now that we know how our face recognition application works, let's try it out!" | |
) | |
face_encodings_dir = os.path.join(".sessions", get_login()["username"], "face_encodings") | |
face_encodings = os.listdir(face_encodings_dir) | |
st.write(face_encodings) | |
known_face_encodings = [] | |
known_face_names = [] | |
if len(face_encodings) > 0: | |
for i, face_encoding in enumerate(face_encodings): | |
known_face_encoding = np.load(os.path.join(face_encodings_dir, face_encoding)) | |
face_name = face_encoding.split(".")[0] | |
known_face_encodings.append(known_face_encoding) | |
known_face_names.append(face_name) | |
st.write(known_face_encodings) | |
st.write(known_face_names) | |
st.info("Select an image to analyze!") | |
input_type = st.radio("Select the Input Type", ["Image", "Camera"]) | |
if input_type == "Camera": | |
picture = st.camera_input("Take a picture") | |
else: | |
picture = st.file_uploader("Upload an image", type=["png", "jpg", "jpeg"]) | |
if picture: | |
image = face_recognition.load_image_file(picture) | |
face_locations = face_recognition.face_locations(image) | |
face_encodings = face_recognition.face_encodings(image, face_locations) | |
# Loop through each face in this image | |
cols = st.columns(len(face_encodings)) | |
i = 0 | |
st.image(image) | |
st.info("Select the tolerance level you want for your model! (How much distance between faces to consider it a match. " | |
"Lower is more strict. 0.6 is typical best performance.)") | |
tolerance = st.slider('Select tolerance level', 0.0, 1.0, 0.3, 0.1) | |
if tolerance: | |
for (top, right, bottom, left), face_encoding in zip(face_locations, face_encodings): | |
# See if the face is a match for the known face(s) | |
matches = face_recognition.compare_faces(known_face_encodings, face_encoding) | |
name = "Unknown" | |
st.write(matches) | |
# If a match was found in known_face_encodings, just use the first one. | |
face_distances = face_recognition.face_distance(known_face_encodings, face_encoding) | |
st.write(face_distances) | |
st.write(face_distances.shape) | |
# Calculate the row sums | |
row_sums = np.sum(face_distances, axis=1) | |
best_match_index = np.argmin(row_sums) | |
st.write(best_match_index) | |
if matches[best_match_index]: | |
name = known_face_names[best_match_index] | |
face_image = image[top:bottom, left:right] | |
pil_image = Image.fromarray(face_image) | |
cols[i].image(pil_image, use_column_width=True) | |
cols[i].write("Person name: " +name) | |
i+=1 | |
st.info("Click on the button below to complete this level!") | |
if st.button("Complete Level"): | |
complete_level(LEVEL) | |
render_page(step4_page, LEVEL) | |