Spaces:
Runtime error
Runtime error
File size: 3,710 Bytes
0cba43f 2736c78 caa436d 0cba43f 19127c5 2736c78 2028549 2736c78 3acfa00 19127c5 3acfa00 2028549 caa436d 775c70f 2028549 2b5e9d0 9b42807 2b5e9d0 9b42807 f8438df caa436d 2b5e9d0 caa436d 0cba43f |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 |
import streamlit as st
from utils.levels import complete_level, render_page, initialize_level
from utils.login import get_login, initialize_login
from utils.inference import query
import os
import time
import face_recognition
import cv2
import numpy as np
from PIL import Image
initialize_login()
initialize_level()
LEVEL = 4
def step4_page():
st.header("Face Recognition: Trying It Out")
st.write(
"""
Once the face encodings are obtained, they can be stored in a database or used for face recognition tasks.
During face recognition, the encodings of input faces are compared to the stored encodings (our known-face database)
to determine if a match exists. Various similarity metrics, such as Euclidean distance or cosine similarity,
can be utilized to measure the similarity between face encodings and determine potential matches.
"""
)
st.info(
"Now that we know how our face recognition application works, let's try it out!"
)
face_encodings_dir = os.path.join(".sessions", get_login()["username"], "face_encodings")
face_encodings = os.listdir(face_encodings_dir)
st.write(face_encodings)
known_face_encodings = []
known_face_names = []
if len(face_encodings) > 0:
for i, face_encoding in enumerate(face_encodings):
known_face_encoding = np.load(os.path.join(face_encodings_dir, face_encoding))
face_name = face_encoding.split(".")[0]
known_face_encodings.append(known_face_encoding)
known_face_names.append(face_name)
st.write(known_face_encodings)
st.write(known_face_names)
st.info("Select an image to analyze!")
input_type = st.radio("Select the Input Type", ["Image", "Camera"])
if input_type == "Camera":
picture = st.camera_input("Take a picture")
else:
picture = st.file_uploader("Upload an image", type=["png", "jpg", "jpeg"])
if picture:
image = face_recognition.load_image_file(picture)
face_locations = face_recognition.face_locations(image)
face_encodings = face_recognition.face_encodings(image, face_locations)
# Loop through each face in this image
cols = st.columns(len(face_encodings))
i = 0
st.image(image)
st.info("Select the tolerance level you want for your model! (How much distance between faces to consider it a match. "
"Lower is more strict. 0.6 is typical best performance.)")
tolerance = st.slider('Select tolerance level', 0.0, 1.0, 0.3, 0.1)
if tolerance:
for (top, right, bottom, left), face_encoding in zip(face_locations, face_encodings):
# See if the face is a match for the known face(s)
matches = face_recognition.compare_faces(known_face_encodings, face_encoding)
name = "Unknown"
# If a match was found in known_face_encodings, just use the first one.
face_distances = face_recognition.face_distance(known_face_encodings, face_encoding)
st.write(face_distances)
best_match_index = np.argmin(face_distances)
if matches[best_match_index]:
name = known_face_names[best_match_index]
face_image = image[top:bottom, left:right]
pil_image = Image.fromarray(face_image)
cols[i].image(pil_image, use_column_width=True)
cols[i].write("Person name: " +name)
i+=1
st.info("Click on the button below to complete this level!")
if st.button("Complete Level"):
complete_level(LEVEL)
render_page(step4_page, LEVEL)
|