Spaces:
Running
Running
Update app.py
Browse files
app.py
CHANGED
@@ -6,14 +6,16 @@ from tensorflow.keras.applications.efficientnet_v2 import preprocess_input
|
|
6 |
from PIL import Image
|
7 |
import cv2
|
8 |
from streamlit_webrtc import webrtc_streamer, VideoProcessorBase
|
9 |
-
|
10 |
|
11 |
# Load your emotion classification model
|
12 |
def load_model(model_path, weights_path):
|
|
|
13 |
with open(model_path, 'r') as json_file:
|
14 |
model_json = json_file.read()
|
15 |
loaded_model = tf.keras.models.model_from_json(model_json)
|
16 |
loaded_model.load_weights(weights_path)
|
|
|
17 |
return loaded_model
|
18 |
|
19 |
# Function to preprocess image for prediction
|
@@ -26,9 +28,11 @@ def preprocess_image(img, target_size=(224, 224)):
|
|
26 |
|
27 |
# Function to predict emotion of the image
|
28 |
def predict_emotion_single_image(img, model):
|
|
|
29 |
preprocessed_img = preprocess_image(img)
|
30 |
predictions = model.predict(preprocessed_img)
|
31 |
predicted_class = np.argmax(predictions, axis=1)[0]
|
|
|
32 |
return predicted_class
|
33 |
|
34 |
# Video processor class for real-time emotion detection
|
@@ -40,20 +44,13 @@ class VideoProcessor(VideoProcessorBase):
|
|
40 |
|
41 |
def recv(self, frame):
|
42 |
img = frame.to_ndarray(format="bgr24")
|
43 |
-
|
44 |
-
# Convert the frame to grayscale
|
45 |
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
|
46 |
-
|
47 |
-
# Detect faces in the grayscale frame
|
48 |
faces = self.face_cascade.detectMultiScale(gray, scaleFactor=1.1, minNeighbors=5, minSize=(30, 30))
|
49 |
-
|
50 |
-
# Draw rectangles around the faces and predict emotions
|
51 |
for (x, y, w, h) in faces:
|
52 |
face_img = img[y:y + h, x:x + w]
|
53 |
predicted_class = predict_emotion_single_image(Image.fromarray(cv2.cvtColor(face_img, cv2.COLOR_BGR2RGB)), self.model)
|
54 |
cv2.rectangle(img, (x, y), (x + w, y + h), (255, 0, 0), 2)
|
55 |
cv2.putText(img, self.class_names[predicted_class], (x, y - 10), cv2.FONT_HERSHEY_SIMPLEX, 0.9, (36, 255, 12), 2)
|
56 |
-
|
57 |
return av.VideoFrame.from_ndarray(img, format="bgr24")
|
58 |
|
59 |
# Main Streamlit function
|
@@ -78,7 +75,7 @@ def main():
|
|
78 |
img = Image.open(single_image)
|
79 |
predicted_emotion = predict_emotion_single_image(img, model)
|
80 |
st.image(img, caption="Uploaded Image", use_column_width=True)
|
81 |
-
st.write("Predicted Emotion for Single Image:",
|
82 |
|
83 |
if __name__ == "__main__":
|
84 |
main()
|
|
|
6 |
from PIL import Image
|
7 |
import cv2
|
8 |
from streamlit_webrtc import webrtc_streamer, VideoProcessorBase
|
9 |
+
import av
|
10 |
|
11 |
# Load your emotion classification model
|
12 |
def load_model(model_path, weights_path):
|
13 |
+
st.write("Loading model...")
|
14 |
with open(model_path, 'r') as json_file:
|
15 |
model_json = json_file.read()
|
16 |
loaded_model = tf.keras.models.model_from_json(model_json)
|
17 |
loaded_model.load_weights(weights_path)
|
18 |
+
st.write("Model loaded successfully")
|
19 |
return loaded_model
|
20 |
|
21 |
# Function to preprocess image for prediction
|
|
|
28 |
|
29 |
# Function to predict emotion of the image
|
30 |
def predict_emotion_single_image(img, model):
|
31 |
+
st.write("Predicting emotion...")
|
32 |
preprocessed_img = preprocess_image(img)
|
33 |
predictions = model.predict(preprocessed_img)
|
34 |
predicted_class = np.argmax(predictions, axis=1)[0]
|
35 |
+
st.write(f"Prediction: {predicted_class}")
|
36 |
return predicted_class
|
37 |
|
38 |
# Video processor class for real-time emotion detection
|
|
|
44 |
|
45 |
def recv(self, frame):
|
46 |
img = frame.to_ndarray(format="bgr24")
|
|
|
|
|
47 |
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
|
|
|
|
|
48 |
faces = self.face_cascade.detectMultiScale(gray, scaleFactor=1.1, minNeighbors=5, minSize=(30, 30))
|
|
|
|
|
49 |
for (x, y, w, h) in faces:
|
50 |
face_img = img[y:y + h, x:x + w]
|
51 |
predicted_class = predict_emotion_single_image(Image.fromarray(cv2.cvtColor(face_img, cv2.COLOR_BGR2RGB)), self.model)
|
52 |
cv2.rectangle(img, (x, y), (x + w, y + h), (255, 0, 0), 2)
|
53 |
cv2.putText(img, self.class_names[predicted_class], (x, y - 10), cv2.FONT_HERSHEY_SIMPLEX, 0.9, (36, 255, 12), 2)
|
|
|
54 |
return av.VideoFrame.from_ndarray(img, format="bgr24")
|
55 |
|
56 |
# Main Streamlit function
|
|
|
75 |
img = Image.open(single_image)
|
76 |
predicted_emotion = predict_emotion_single_image(img, model)
|
77 |
st.image(img, caption="Uploaded Image", use_column_width=True)
|
78 |
+
st.write("Predicted Emotion for Single Image:", ['Ahegao', 'Angry', 'Happy', 'Neutral', 'Sad', 'Surprise'][predicted_emotion])
|
79 |
|
80 |
if __name__ == "__main__":
|
81 |
main()
|