|
import torch |
|
import cv2 |
|
from PIL import Image |
|
import numpy as np |
|
import matplotlib.pyplot as plt |
|
from transformers import pipeline |
|
import gradio as gr |
|
from sklearn.cluster import KMeans |
|
from colorsys import rgb_to_hsv |
|
from huggingface_hub import InferenceClient |
|
|
|
|
|
emotion_classifier = pipeline("text-classification", model="j-hartmann/emotion-english-distilroberta-base", return_all_scores=True) |
|
|
|
|
|
client = InferenceClient("nehapasricha94/LLaVA-image-analysis") |
|
|
|
|
|
def analyze_colors(image): |
|
try: |
|
|
|
if image.mode != "RGB": |
|
image = image.convert("RGB") |
|
|
|
|
|
image = image.resize((150, 150)) |
|
|
|
|
|
img_array = np.array(image) |
|
|
|
|
|
pixels = img_array.reshape((-1, 3)) |
|
|
|
kmeans = KMeans(n_clusters=5, random_state=0) |
|
kmeans.fit(pixels) |
|
dominant_colors = kmeans.cluster_centers_ |
|
|
|
|
|
plt.figure(figsize=(8, 6)) |
|
plt.imshow([dominant_colors.astype(int)]) |
|
plt.axis('off') |
|
plt.show() |
|
|
|
return dominant_colors |
|
|
|
except Exception as e: |
|
print(f"Error in analyze_colors: {e}") |
|
return None |
|
|
|
|
|
|
|
def color_emotion_analysis(dominant_colors): |
|
try: |
|
emotions = [] |
|
stress_levels = [] |
|
|
|
|
|
brightness_weight = 0.5 |
|
hue_weight = 0.3 |
|
saturation_weight = 0.2 |
|
|
|
for color in dominant_colors: |
|
|
|
r, g, b = color / 255.0 |
|
|
|
|
|
h, s, v = rgb_to_hsv(r, g, b) |
|
|
|
|
|
weighted_brightness = v * brightness_weight |
|
weighted_hue = h * hue_weight |
|
weighted_saturation = s * saturation_weight |
|
|
|
|
|
score = weighted_brightness + weighted_hue + weighted_saturation |
|
|
|
|
|
if score < 0.3: |
|
emotions.append("Sadness") |
|
stress_levels.append("Moderate-High Stress") |
|
elif 0.3 <= score < 0.5: |
|
emotions.append("Neutral") |
|
stress_levels.append("Moderate Stress") |
|
elif 0.5 <= score < 0.7: |
|
emotions.append("Okay") |
|
stress_levels.append("Low Stress") |
|
elif 0.7 <= score < 0.85: |
|
emotions.append("Happiness") |
|
stress_levels.append("Very Low Stress") |
|
else: |
|
emotions.append("Very Happy") |
|
stress_levels.append("No Stress") |
|
|
|
return emotions, stress_levels |
|
|
|
except Exception as e: |
|
print(f"Error in color_emotion_analysis: {e}") |
|
return ["Error analyzing emotions"], ["Error analyzing stress levels"] |
|
|
|
|
|
|
|
def analyze_patterns(image): |
|
try: |
|
|
|
gray_image = cv2.cvtColor(np.array(image), cv2.COLOR_RGB2GRAY) |
|
edges = cv2.Canny(gray_image, 100, 200) |
|
|
|
|
|
num_edges = np.sum(edges > 0) |
|
|
|
if num_edges > 10000: |
|
return "Chaotic patterns - possibly distress", 0.8 |
|
else: |
|
return "Orderly patterns - possibly calm", 0.2 |
|
except Exception as e: |
|
print(f"Error in analyze_patterns: {e}") |
|
return "Error analyzing patterns", 0.5 |
|
|
|
|
|
|
|
def compute_overall_result(color_emotions, stress_levels, pattern_analysis, pattern_stress): |
|
try: |
|
|
|
color_emotion_weight = 0.5 |
|
pattern_weight = 0.5 |
|
|
|
|
|
dominant_emotion = max(set(color_emotions), key=color_emotions.count) |
|
|
|
|
|
stress_mapping = { |
|
"No Stress": 0.1, |
|
"Very Low Stress": 0.3, |
|
"Low Stress": 0.5, |
|
"Moderate Stress": 0.7, |
|
"Moderate-High Stress": 0.9, |
|
} |
|
color_stress_numeric = [stress_mapping[stress] for stress in stress_levels if stress in stress_mapping] |
|
avg_color_stress = np.mean(color_stress_numeric) if color_stress_numeric else 0.5 |
|
|
|
|
|
overall_stress = (avg_color_stress * color_emotion_weight) + (pattern_stress * pattern_weight) |
|
|
|
|
|
if overall_stress < 0.3: |
|
overall_emotion = "Calm and Relaxed" |
|
elif 0.3 <= overall_stress < 0.6: |
|
overall_emotion = "Neutral Mood" |
|
elif 0.6 <= overall_stress < 0.8: |
|
overall_emotion = "Slightly Stressed" |
|
else: |
|
overall_emotion = "Highly Stressed" |
|
|
|
return f"Overall emotion: {overall_emotion} (Dominant Color Emotion: {dominant_emotion}, Pattern Analysis: {pattern_analysis})" |
|
|
|
except Exception as e: |
|
return f"Error computing overall result: {str(e)}" |
|
|
|
|
|
|
|
def analyze_emotion_from_text(text): |
|
try: |
|
|
|
emotion_scores = emotion_classifier(text) |
|
dominant_emotion = max(emotion_scores, key=lambda x: x['score']) |
|
return f"Detected emotion from text: {dominant_emotion['label']} with score: {dominant_emotion['score']:.2f}" |
|
except Exception as e: |
|
print(f"Error analyzing emotion from text: {e}") |
|
return "Error analyzing text emotion" |
|
|
|
|
|
|
|
def analyze_emotion_from_image(image): |
|
try: |
|
print(f"Initial input type: {type(image)}") |
|
|
|
|
|
if isinstance(image, str) and image.startswith('http'): |
|
print(f"Loading image from URL: {image}") |
|
response = requests.get(image, stream=True) |
|
response.raise_for_status() |
|
image = Image.open(BytesIO(response.content)).convert("RGB") |
|
print("Loaded image from URL.") |
|
|
|
|
|
elif isinstance(image, str): |
|
print(f"Loading image from file path: {image}") |
|
image = Image.open(image).convert("RGB") |
|
print("Loaded image from local file.") |
|
|
|
|
|
elif isinstance(image, dict) and "blob" in image: |
|
blob_data = image["blob"] |
|
image = Image.open(blob_data).convert("RGB") |
|
print("Loaded image from Blob data.") |
|
|
|
|
|
elif isinstance(image, np.ndarray): |
|
image = Image.fromarray(image).convert("RGB") |
|
print("Converted image from NumPy array.") |
|
|
|
print(f"Image size: {image.size}, mode: {image.mode}") |
|
|
|
|
|
dominant_colors = analyze_colors(image) |
|
if dominant_colors is None: |
|
return "Error analyzing colors" |
|
|
|
color_emotions, stress_levels = color_emotion_analysis(dominant_colors) |
|
print(f"Color emotions: {color_emotions}, Stress levels: {stress_levels}") |
|
|
|
|
|
pattern_analysis, pattern_stress = analyze_patterns(image) |
|
print(f"Pattern analysis: {pattern_analysis}, Pattern stress: {pattern_stress}") |
|
|
|
|
|
overall_result = compute_overall_result(color_emotions, stress_levels, pattern_analysis, pattern_stress) |
|
|
|
return overall_result |
|
except Exception as e: |
|
print(f"Error processing image: {str(e)}") |
|
return f"Error processing image: {str(e)}" |
|
|
|
|
|
|
|
|
|
iface = gr.Interface(fn=analyze_emotion_from_image, inputs="image", outputs="text") |
|
|
|
|
|
if __name__ == "__main__": |
|
iface.launch() |
|
|