Spaces:
Sleeping
Sleeping
File size: 5,504 Bytes
b878745 3f51327 b878745 3f51327 b878745 3f51327 ca211ea b878745 4a17310 e1d89e6 b878745 c884652 b878745 3f51327 3e57df9 ca211ea 3f51327 ca211ea 3f51327 e1d89e6 b878745 3f51327 2bbfd62 4a17310 2bbfd62 1bbb83d 2bbfd62 1bbb83d b878745 c884652 2bbfd62 c884652 b878745 edbb580 c884652 4a17310 a1b1b7f e1d89e6 b878745 e436a2e dc26248 b878745 dc26248 ceb1369 e436a2e b878745 2bbfd62 b878745 e1d89e6 b878745 e1d89e6 b878745 3f51327 e1d89e6 3f51327 1bbb83d 2bbfd62 3f51327 1bbb83d 3f51327 3e57df9 1bbb83d 3e57df9 b878745 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 |
#############################################################################################################################
# Filename : app.py
# Description: A Streamlit application to detect facial expressions from images and provide responses.
# Author : Lucas Yao
#
# Copyright © 2024 by Lucas Yao
#############################################################################################################################
# Import libraries.
import os # Load environment variable(s).
import streamlit as st # Build the GUI of the application.
from PIL import Image # Handle image operations.
from dotenv import load_dotenv # Load environment variables.
from fer import FER # Import the FER model for facial expression recognition.
import numpy as np # Import NumPy for array handling
import openai # OpenAI API for generating text responses.
#############################################################################################################################
# Load environment variable(s).
load_dotenv()
# Set up OpenAI API key.
openai.api_key = os.getenv('OPENAI_API_KEY')
#############################################################################################################################
# Function to query the facial expression recognition model using FER.
def query_emotion(image):
# Convert the PIL Image to a NumPy array
image_np = np.array(image)
detector = FER()
emotions = detector.detect_emotions(image_np)
if emotions:
# Get the emotion with the highest score.
dominant_emotion = max(emotions[0]['emotions'], key=emotions[0]['emotions'].get)
return dominant_emotion
else:
st.error("Could not detect any emotion.")
return None
#############################################################################################################################
# Function to generate a response using OpenAI based on detected emotion.
def generate_text_based_on_mood(emotion, response_type):
try:
if response_type == "Joke":
prompt = f"Generate a light-hearted joke for someone who is feeling {emotion}."
elif response_type == "Motivational Message":
prompt = f"Generate a motivational message for someone who is feeling {emotion}."
elif response_type == "Compliment":
prompt = f"Generate a heartfelt compliment for someone who is feeling {emotion}."
elif response_type == "Song Recommendation":
prompt = f"Recommend a song that would suit someone who is feeling {emotion}. Include the song title and artist."
# Call OpenAI's API using GPT-4.
response = openai.ChatCompletion.create(
model="gpt-4", # Specify the GPT-4 model
messages=[
{"role": "user", "content": prompt}
]
)
# Extract the generated text.
generated_text = response['choices'][0]['message']['content']
return generated_text.strip()
except Exception as e:
st.error(f"Error generating text: {e}")
return "Sorry, I couldn't come up with a message at this moment."
#############################################################################################################################
# Function to convert text to speech using gTTS.
def text_to_speech(text):
from gtts import gTTS
try:
tts = gTTS(text, lang='en')
audio_file = "output.mp3"
tts.save(audio_file) # Save the audio file.
return audio_file
except Exception as e:
st.error(f"Error with TTS: {e}")
return None
#############################################################################################################################
# Main function to create the Streamlit web application.
def main():
st.title("Facial Expression Mood Detector")
st.write("Upload an image of a face to detect mood and receive a response.")
# Upload image.
uploaded_file = st.file_uploader("Choose an image...", type=["jpg", "jpeg", "png"])
if uploaded_file is not None:
# Load and display the image.
image = Image.open(uploaded_file)
st.image(image, caption='Uploaded Image', use_column_width=True)
# Detect facial expression.
emotion = query_emotion(image)
if emotion:
st.write(f"Detected emotion: {emotion}")
# Dropdown for selecting response type.
response_type = st.selectbox("Select the type of response:", ["Joke", "Motivational Message", "Compliment", "Song Recommendation"])
# Generate text based on detected emotion and user preference.
if st.button("Get Response"):
message = generate_text_based_on_mood(emotion, response_type)
# Display the generated message (song recommendation or other response)
st.write("Here's your response:")
st.write(message)
# Convert the generated message to audio if it's not a song recommendation (optional)
if response_type != "Song Recommendation":
audio_file = text_to_speech(message)
if audio_file:
st.audio(audio_file) # Streamlit will handle playback.
#############################################################################################################################
# Run the application.
if __name__ == "__main__":
main()
|