Spaces:
Runtime error
Runtime error
import streamlit as st | |
import cv2 | |
import numpy as np | |
import datetime | |
import os | |
import time | |
import base64 | |
import re | |
import glob | |
from camera_input_live import camera_input_live | |
import face_recognition | |
# Set wide layout | |
st.set_page_config(layout="wide") | |
# Decorator for caching images | |
def get_image_count(): | |
return {'count': 0} | |
# Function Definitions for Camera Feature | |
def save_image(image, image_count): | |
timestamp = datetime.datetime.now().strftime("%Y%m%d_%H%M%S") | |
filename = f"captured_image_{timestamp}_{image_count['count']}.png" | |
image_count['count'] += 1 | |
bytes_data = image.getvalue() | |
cv2_img = cv2.imdecode(np.frombuffer(bytes_data, np.uint8), cv2.IMREAD_COLOR) | |
cv2.imwrite(filename, cv2_img) | |
return filename | |
def get_image_base64(image_path): | |
with open(image_path, "rb") as image_file: | |
return base64.b64encode(image_file.read()).decode() | |
# Function Definitions for Chord Sheet Feature | |
def process_line(line): | |
if re.search(r'\b[A-G][#b]?m?\b', line): | |
line = re.sub(r'\b([A-G][#b]?m?)\b', r"<img src='\1.png' style='height:20px;'>", line) | |
return line | |
def process_sheet(sheet): | |
processed_lines = [] | |
for line in sheet.split('\n'): | |
processed_line = process_line(line) | |
processed_lines.append(processed_line) | |
return '<br>'.join(processed_lines) | |
# Load a sample image and learn how to recognize it | |
known_image = face_recognition.load_image_file("known_face.jpg") | |
known_encoding = face_recognition.face_encodings(known_image)[0] | |
# Main Function | |
def main(): | |
# Layout Configuration | |
col1, col2 = st.columns([2, 3]) | |
# Camera Section | |
with col1: | |
st.markdown("✨ Magic Lens: Real-Time Camera Stream 🌈") | |
snapshot_interval = st.slider("Snapshot Interval (seconds)", 1, 10, 5) | |
image_placeholder = st.empty() | |
if 'captured_images' not in st.session_state: | |
st.session_state['captured_images'] = [] | |
if 'last_captured' not in st.session_state: | |
st.session_state['last_captured'] = time.time() | |
image = camera_input_live() | |
if image is not None: | |
# Convert the image to RGB format for face_recognition | |
rgb_image = cv2.cvtColor(np.array(image), cv2.COLOR_BGR2RGB) | |
# Detect faces in the image | |
face_locations = face_recognition.face_locations(rgb_image) | |
face_encodings = face_recognition.face_encodings(rgb_image, face_locations) | |
# Iterate over detected faces and compare with known face | |
for (top, right, bottom, left), face_encoding in zip(face_locations, face_encodings): | |
matches = face_recognition.compare_faces([known_encoding], face_encoding) | |
if True in matches: | |
# If a match is found, draw a green rectangle and label | |
cv2.rectangle(rgb_image, (left, top), (right, bottom), (0, 255, 0), 2) | |
cv2.putText(rgb_image, "Known Face", (left, top - 10), cv2.FONT_HERSHEY_SIMPLEX, 0.9, (0, 255, 0), 2) | |
else: | |
# If no match, draw a red rectangle | |
cv2.rectangle(rgb_image, (left, top), (right, bottom), (0, 0, 255), 2) | |
# Convert the RGB image back to BGR format for display | |
bgr_image = cv2.cvtColor(rgb_image, cv2.COLOR_RGB2BGR) | |
image_placeholder.image(bgr_image, channels="BGR") | |
if time.time() - st.session_state['last_captured'] > snapshot_interval: | |
image_count = get_image_count() | |
filename = save_image(image, image_count) | |
st.session_state['captured_images'].append(filename) | |
st.session_state['last_captured'] = time.time() | |
sidebar_html = "<div style='display:flex;flex-direction:column;'>" | |
for img_file in st.session_state['captured_images']: | |
image_base64 = get_image_base64(img_file) | |
sidebar_html += f"<img src='data:image/png;base64,{image_base64}' style='width:100px;'><br>" | |
sidebar_html += "</div>" | |
st.sidebar.markdown("## Captured Images") | |
st.sidebar.markdown(sidebar_html, unsafe_allow_html=True) | |
# JavaScript Timer | |
st.markdown(f"<script>setInterval(function() {{ document.getElementById('timer').innerHTML = new Date().toLocaleTimeString(); }}, 1000);</script><div>Current Time: <span id='timer'></span></div>", unsafe_allow_html=True) | |
# Chord Sheet Section | |
with col2: | |
st.markdown("## 🎬 Action! Real-Time Camera Stream Highlights 📽️") | |
all_files = [f for f in glob.glob("*.png") if ' by ' in f] | |
selected_file = st.selectbox("Choose a Dataset:", all_files) | |
if selected_file: | |
with open(selected_file, 'r', encoding='utf-8') as file: | |
sheet = file.read() | |
st.markdown(process_sheet(sheet), unsafe_allow_html=True) | |
# Trigger a rerun only when the snapshot interval is reached | |
if 'last_captured' in st.session_state and time.time() - st.session_state['last_captured'] > snapshot_interval: | |
st.experimental_rerun() | |
if __name__ == "__main__": | |
main() |