import os os.environ['OPENCV_AVFOUNDATION_SKIP_AUTH'] = '1' import streamlit as st import cv2 from transformers import pipeline from PIL import Image # Initialize the Hugging Face pipeline for facial emotion detection emotion_pipeline = pipeline("image-classification", model="dima806/facial_emotions_image_detection") # Function to analyze sentiment def analyze_sentiment(frame): # Convert frame to RGB rgb_frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB) # Convert the frame to a PIL image pil_image = Image.fromarray(rgb_frame) # Analyze sentiment using the Hugging Face pipeline results = emotion_pipeline(pil_image) # Analyze sentiment using the Hugging Face pipeline results = emotion_pipeline(pil_image) # Get the dominant emotion dominant_emotion = max(results, key=lambda x: x['score'])['label'] return dominant_emotion # Function to capture video from webcam def video_stream(): video_capture = cv2.VideoCapture(0) if not video_capture.isOpened(): st.error("Error: Could not open video capture device.") return while True: ret, frame = video_capture.read() if not ret: st.error("Error: Failed to read frame from video capture device.") break yield frame video_capture.release() # Streamlit UI st.markdown( """ """, unsafe_allow_html=True ) st.title("Computer Vision Test Lab") st.subheader("Facial Sentiment") # Columns for input and output streams col1, col2 = st.columns(2) with col1: st.header("Input Stream") st.subheader("Webcam") video_placeholder = st.empty() with col2: st.header("Output Stream") st.subheader("Analysis") output_placeholder = st.empty() sentiment_placeholder = st.empty() # Start video stream video_capture = cv2.VideoCapture(0) if not video_capture.isOpened(): st.error("Error: Could not open video capture device.") else: while True: ret, frame = video_capture.read() if not ret: st.error("Error: Failed to read frame from video capture device.") break # Display the input stream video_placeholder.image(frame, channels="BGR") # Analyze sentiment sentiment = analyze_sentiment(frame) # Display the output stream (here it's the same as input, modify as needed) output_placeholder.image(frame, channels="BGR") # Display sentiment sentiment_placeholder.write(f"Sentiment: {sentiment}") # Add a short delay to control the frame rate if cv2.waitKey(1) & 0xFF == ord('q'): break