Spaces:
Runtime error
Runtime error
import streamlit as st | |
from PIL import Image | |
import face_recognition | |
from utils.levels import complete_level, render_page, initialize_level | |
from utils.login import get_login | |
import os | |
import uuid | |
from utils.login import initialize_login | |
initialize_login() | |
initialize_level() | |
LEVEL = 2 | |
def step2_page(): | |
st.header("Face Detection and Creating your own known-faces database") | |
st.write( | |
""" | |
### How it works? | |
Face detection is the process of identifying and locating human faces within an image or video frame. | |
It is a fundamental step in many computer vision applications, including face recognition, facial expression analysis, and augmented reality. | |
""" | |
) | |
st.image( | |
"https://user-images.githubusercontent.com/31125521/42756818-0a41edaa-88fe-11e8-9033-8cd141b0fa09.gif", | |
use_column_width=True, | |
) | |
st.write( | |
""" | |
In simple terms, here's how face detection works: | |
1. **Image Preparation**: The input image is initially preprocessed to enhance its quality and improve the chances of detecting faces accurately. This may involve resizing, grayscale conversion, or contrast adjustments. | |
2. **Feature Extraction**: The face detection algorithm analyzes the image to identify distinctive features that are commonly found in human faces. These features can include the presence of eyes, nose, mouth, and other facial characteristics. | |
3. **Classification or Regression**: Once the features are extracted, the algorithm employs a trained model to classify or regressively predict whether each region of the image contains a face or not. This model is typically trained using machine learning techniques on large datasets of labeled face and non-face samples. | |
4. **Face Localization**: If a face is detected, the algorithm determines the position and size of the face within the image. This information is usually represented as a bounding box that encloses the detected face region. | |
5. **Post-processing**: After initial detection, post-processing steps may be performed to refine the results. These steps could involve filtering out false positives or adjusting the size and position of the detected face regions to align them more accurately. | |
""" | |
) | |
img_dir = os.path.join(".sessions", get_login()["username"], "known_faces") | |
os.makedirs(img_dir, exist_ok=True) | |
st.write( | |
""" | |
### Creating your own known-faces database | |
Now it's time to collect and name the identified faces to create our known-faces data base for our face recognition model. | |
But remember, we should always ask for permission before taking someone's picture. We can use a smartphone or a digital camera to capture pictures, and it's important to take pictures of different people. This will help our application to have a good known-faces database! | |
""" | |
) | |
st.info("Select an image to continue!") | |
input_type = st.radio("Select the Input Type", ["Image", "Camera"]) | |
if input_type == "Camera": | |
picture = st.camera_input("Take a picture") | |
else: | |
picture = st.file_uploader("Upload an image", type=["png", "jpg", "jpeg"]) | |
if picture: | |
image = face_recognition.load_image_file(picture) | |
st.image(image) | |
# Find all the faces in the image using the default HOG-based model. | |
# This method is fairly accurate, but not as accurate as the CNN model and not GPU accelerated. | |
# See also: find_faces_in_picture_cnn.py | |
face_locations = face_recognition.face_locations(image) | |
st.write("Algorithm found {} face(s) in this photograph.".format(len(face_locations))) | |
cols = st.columns(len(face_locations)) | |
for i in range(len(face_locations)): | |
col = cols[i] | |
face = face_locations[i] | |
# display faces | |
with col: | |
st.header("Face {}".format(i)) | |
# Print the location of each face in this image | |
top, right, bottom, left = face | |
# You can access the actual face itself like this: | |
face_image = image[top:bottom, left:right] | |
pil_image = Image.fromarray(face_image) | |
col.image(pil_image, use_column_width=True) | |
face_name = st.text_input('Specify name to save it in the known-face database', "This is a placeholder", key="text_"+str(i)) | |
if st.button("Save", key="button_"+str(i)): | |
img_name = str(uuid.uuid4()) + f"_{face_name}_{i}" + ".jpg" | |
img_path = os.path.join(img_dir, img_name) | |
pil_image.save(img_path) | |
st.success("Face added successfully!") | |
images = os.listdir(img_dir) | |
st.write(images) | |
if st.button("Clear All"): | |
for img in images: | |
os.remove(os.path.join(img_dir, img)) | |
st.success("All images cleared!") | |
st.experimental_rerun() | |
st.info("If you are satisfied with your images, click on the button below to complete this level.") | |
if st.button("Complete"): | |
complete_level(LEVEL) | |
render_page(step2_page, LEVEL) | |