udaytag's picture
Update app.py
13aa613 verified
raw
history blame contribute delete
No virus
3.66 kB
import streamlit as st
import numpy as np
import tensorflow as tf
from tensorflow.keras.preprocessing.image import img_to_array
from tensorflow.keras.applications.efficientnet_v2 import preprocess_input
from PIL import Image
import cv2
from streamlit_webrtc import webrtc_streamer, VideoProcessorBase
import av
# Load your emotion classification model
def load_model(model_path, weights_path):
st.write("Loading model...")
with open(model_path, 'r') as json_file:
model_json = json_file.read()
loaded_model = tf.keras.models.model_from_json(model_json)
loaded_model.load_weights(weights_path)
st.write("Model loaded successfully")
return loaded_model
# Function to preprocess image for prediction
def preprocess_image(img, target_size=(224, 224)):
img = img.resize(target_size)
img_array = img_to_array(img)
img_array = np.expand_dims(img_array, axis=0)
img_array = preprocess_input(img_array)
return img_array
# Function to predict emotion of the image
def predict_emotion_single_image(img, model):
st.write("Predicting emotion...")
preprocessed_img = preprocess_image(img)
predictions = model.predict(preprocessed_img)
predicted_class = np.argmax(predictions, axis=1)[0]
st.write(f"Prediction: {predicted_class}")
return predicted_class
# Video processor class for real-time emotion detection
class VideoProcessor(VideoProcessorBase):
def __init__(self, model, face_cascade):
self.model = model
self.face_cascade = face_cascade
self.class_names = ['Ahegao', 'Angry', 'Happy', 'Neutral', 'Sad', 'Surprise']
def recv(self, frame):
img = frame.to_ndarray(format="bgr24")
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
faces = self.face_cascade.detectMultiScale(gray, scaleFactor=1.1, minNeighbors=5, minSize=(30, 30))
for (x, y, w, h) in faces:
face_img = img[y:y + h, x:x + w]
predicted_class = predict_emotion_single_image(Image.fromarray(cv2.cvtColor(face_img, cv2.COLOR_BGR2RGB)), self.model)
cv2.rectangle(img, (x, y), (x + w, y + h), (255, 0, 0), 2)
cv2.putText(img, self.class_names[predicted_class], (x, y - 10), cv2.FONT_HERSHEY_SIMPLEX, 0.9, (36, 255, 12), 2)
return av.VideoFrame.from_ndarray(img, format="bgr24")
# Main Streamlit function
def main():
st.title("Facial Emotion Detection")
st.sidebar.title("Options")
option = st.sidebar.selectbox("Choose an option", ["Real-time Facial Emotion Detection", "Emotion Analysis for a Single Image"])
# Load your emotion classification model
model = load_model('model.json', 'model_weights.h5')
# Load the pre-trained face detection model
face_cascade = cv2.CascadeClassifier(cv2.data.haarcascades + 'haarcascade_frontalface_default.xml')
if option == "Real-time Facial Emotion Detection":
st.write("Real-time Facial Emotion Detection")
webrtc_streamer(key="emotion-detection", video_processor_factory=lambda: VideoProcessor(model, face_cascade))
elif option == "Emotion Analysis for a Single Image":
st.write("Emotion Analysis for a Single Image")
single_image = st.file_uploader("Upload an image", type=["jpg", "jpeg", "png"])
if single_image is not None:
img = Image.open(single_image)
predicted_emotion = predict_emotion_single_image(img, model)
st.image(img, caption="Uploaded Image", use_column_width=True)
st.write("Predicted Emotion for Single Image:", ['Ahegao', 'Angry', 'Happy', 'Neutral', 'Sad', 'Surprise'][predicted_emotion])
if __name__ == "__main__":
main()