Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
@@ -1,123 +1,87 @@
|
|
1 |
import streamlit as st
|
2 |
-
import
|
3 |
-
import
|
4 |
from PIL import Image
|
5 |
-
import requests
|
6 |
from io import BytesIO
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
7 |
|
8 |
# Download the model file from the Hugging Face repository
|
9 |
-
model_url = "https://huggingface.co/Hammad712/
|
10 |
-
|
11 |
-
|
12 |
-
|
13 |
-
|
14 |
-
|
15 |
-
|
16 |
-
|
17 |
-
|
18 |
-
|
19 |
-
|
20 |
-
|
21 |
-
|
22 |
-
|
23 |
-
|
24 |
-
|
25 |
-
|
26 |
-
|
27 |
-
|
28 |
-
|
29 |
-
|
30 |
-
|
31 |
-
|
32 |
-
|
33 |
-
|
34 |
-
st.
|
35 |
-
<style>
|
36 |
-
.title {
|
37 |
-
font-size: 3em;
|
38 |
-
color: #4CAF50; /* Green */
|
39 |
-
text-align: center;
|
40 |
-
}
|
41 |
-
.description {
|
42 |
-
font-size: 1.2em;
|
43 |
-
color: #777777; /* Gray */
|
44 |
-
text-align: center;
|
45 |
-
}
|
46 |
-
.header {
|
47 |
-
font-size: 1.5em;
|
48 |
-
color: #ff6f61; /* Red */
|
49 |
-
}
|
50 |
-
.predicted-emotion {
|
51 |
-
font-size: 1.5em;
|
52 |
-
color: #1e90ff; /* Blue */
|
53 |
-
}
|
54 |
-
.spinner-text {
|
55 |
-
font-size: 1.2em;
|
56 |
-
color: #ffa500; /* Orange */
|
57 |
-
}
|
58 |
-
</style>
|
59 |
-
""", unsafe_allow_html=True)
|
60 |
-
|
61 |
-
# Title and description
|
62 |
-
st.markdown('<div class="title">Emotion Detection</div>', unsafe_allow_html=True)
|
63 |
-
st.markdown('<div class="description">Upload an image or provide an image URL to detect emotions</div>', unsafe_allow_html=True)
|
64 |
-
|
65 |
-
# Input field for image URL
|
66 |
-
st.markdown('<div class="header">Enter Image URL</div>', unsafe_allow_html=True)
|
67 |
-
image_url = st.text_input("Image URL")
|
68 |
-
|
69 |
-
# File uploader for images
|
70 |
-
st.markdown('<div class="header">Or Upload Image</div>', unsafe_allow_html=True)
|
71 |
-
image_file = st.file_uploader("Choose an image", type=["png", 'jpg'])
|
72 |
-
|
73 |
-
# Sidebar to show previous predictions
|
74 |
-
if 'predictions' not in st.session_state:
|
75 |
-
st.session_state.predictions = []
|
76 |
-
|
77 |
-
st.sidebar.header("Previous Predictions")
|
78 |
-
for pred in st.session_state.predictions:
|
79 |
-
st.sidebar.image(pred['image'], caption=pred['emotion'], use_column_width=True)
|
80 |
-
|
81 |
-
# Function to load image from URL
|
82 |
-
def load_image_from_url(url):
|
83 |
-
try:
|
84 |
-
response = requests.get(url)
|
85 |
-
img = Image.open(BytesIO(response.content))
|
86 |
-
return img
|
87 |
-
except Exception as e:
|
88 |
-
st.error(f"Error loading image: {e}")
|
89 |
-
return None
|
90 |
-
|
91 |
-
# Display the image and predict emotions
|
92 |
-
image = None
|
93 |
-
if image_url:
|
94 |
-
image = load_image_from_url(image_url)
|
95 |
-
elif image_file is not None:
|
96 |
-
image = Image.open(image_file)
|
97 |
-
|
98 |
-
if image:
|
99 |
-
st.image(image, caption="Selected Image", use_column_width=True)
|
100 |
-
|
101 |
-
with st.spinner('Processing...'):
|
102 |
-
st.markdown('<div class="spinner-text">Processing...</div>', unsafe_allow_html=True)
|
103 |
-
|
104 |
-
# Preprocess the image
|
105 |
-
preprocessed_image = preprocess_image(image)
|
106 |
-
|
107 |
-
# Make predictions
|
108 |
-
predictions = model.predict(preprocessed_image)
|
109 |
-
predicted_label = emotion_labels[np.argmax(predictions)]
|
110 |
-
|
111 |
-
# Display the predicted emotion
|
112 |
-
st.markdown(f'<div class="predicted-emotion">Predicted Emotion: {predicted_label}</div>', unsafe_allow_html=True)
|
113 |
-
|
114 |
-
# Save the prediction and image to session state
|
115 |
-
image_bytes = BytesIO()
|
116 |
-
image.save(image_bytes, format='PNG')
|
117 |
-
st.session_state.predictions.append({
|
118 |
-
'image': image_bytes.getvalue(),
|
119 |
-
'emotion': predicted_label
|
120 |
-
})
|
121 |
-
|
122 |
-
# Update the sidebar with the new prediction
|
123 |
-
st.sidebar.image(image_bytes.getvalue(), caption=predicted_label, use_column_width=True)
|
|
|
1 |
import streamlit as st
|
2 |
+
from fastai.vision import open_image, load_learner, show_image
|
3 |
+
import PIL.Image
|
4 |
from PIL import Image
|
|
|
5 |
from io import BytesIO
|
6 |
+
import requests
|
7 |
+
import torch.nn as nn
|
8 |
+
import os
|
9 |
+
import tempfile
|
10 |
+
import shutil
|
11 |
+
|
12 |
+
# Define the FeatureLoss class
|
13 |
+
class FeatureLoss(nn.Module):
|
14 |
+
def __init__(self, m_feat, layer_ids, layer_wgts):
|
15 |
+
super().__init__()
|
16 |
+
self.m_feat = m_feat
|
17 |
+
self.loss_features = [self.m_feat[i] for i in layer_ids]
|
18 |
+
self.hooks = hook_outputs(self.loss_features, detach=False)
|
19 |
+
self.wgts = layer_wgts
|
20 |
+
self.metric_names = ['pixel',] + [f'feat_{i}' for i in range(len(layer_ids))] + [f'gram_{i}' for i in range(len(layer_ids))]
|
21 |
+
|
22 |
+
def make_features(self, x, clone=False):
|
23 |
+
self.m_feat(x)
|
24 |
+
return [(o.clone() if clone else o) for o in self.hooks.stored]
|
25 |
+
|
26 |
+
def forward(self, input, target):
|
27 |
+
out_feat = self.make_features(target, clone=True)
|
28 |
+
in_feat = self.make_features(input)
|
29 |
+
self.feat_losses = [base_loss(input, target)]
|
30 |
+
self.feat_losses += [base_loss(f_in, f_out) * w for f_in, f_out, w in zip(in_feat, out_feat, self.wgts)]
|
31 |
+
self.feat_losses += [base_loss(gram_matrix(f_in), gram_matrix(f_out)) * w**2 * 5e3 for f_in, f_out, w in zip(in_feat, out_feat, self.wgts)]
|
32 |
+
self.metrics = dict(zip(self.metric_names, self.feat_losses))
|
33 |
+
return sum(self.feat_losses)
|
34 |
+
|
35 |
+
def __del__(self): self.hooks.remove()
|
36 |
+
|
37 |
+
def add_margin(pil_img, top, right, bottom, left, color):
|
38 |
+
width, height = pil_img.size
|
39 |
+
new_width = width + right + left
|
40 |
+
new_height = height + top + bottom
|
41 |
+
result = Image.new(pil_img.mode, (new_width, new_height), color)
|
42 |
+
result.paste(pil_img, (left, top))
|
43 |
+
return result
|
44 |
+
|
45 |
+
def inference(image_path_or_url, learn):
|
46 |
+
if image_path_or_url.startswith('http://') or image_path_or_url.startswith('https://'):
|
47 |
+
response = requests.get(image_path_or_url)
|
48 |
+
img = PIL.Image.open(BytesIO(response.content)).convert("RGB")
|
49 |
+
else:
|
50 |
+
img = PIL.Image.open(image_path_or_url).convert("RGB")
|
51 |
+
|
52 |
+
im_new = add_margin(img, 250, 250, 250, 250, (255, 255, 255))
|
53 |
+
im_new.save("test.jpg", quality=95)
|
54 |
+
img = open_image("test.jpg")
|
55 |
+
p, img_hr, b = learn.predict(img)
|
56 |
+
return img_hr
|
57 |
+
|
58 |
+
# Streamlit application
|
59 |
+
st.title("Image Inference with Fastai")
|
60 |
|
61 |
# Download the model file from the Hugging Face repository
|
62 |
+
model_url = "https://huggingface.co/Hammad712/image2sketch/resolve/main/image2sketch.pkl"
|
63 |
+
model_file_path = 'image2sketch.pkl'
|
64 |
+
|
65 |
+
if not os.path.exists(model_file_path):
|
66 |
+
with st.spinner('Downloading model...'):
|
67 |
+
response = requests.get(model_url)
|
68 |
+
with open(model_file_path, 'wb') as f:
|
69 |
+
f.write(response.content)
|
70 |
+
st.success('Model downloaded successfully!')
|
71 |
+
|
72 |
+
# Create a temporary directory for the model
|
73 |
+
with tempfile.TemporaryDirectory() as tmpdirname:
|
74 |
+
shutil.move(model_file_path, os.path.join(tmpdirname, 'export.pkl'))
|
75 |
+
learn = load_learner(tmpdirname)
|
76 |
+
|
77 |
+
# Input for image URL or path
|
78 |
+
image_path_or_url = st.text_input("Enter image path or URL", "")
|
79 |
+
|
80 |
+
# Run inference button
|
81 |
+
if st.button("Run Inference"):
|
82 |
+
if image_path_or_url:
|
83 |
+
with st.spinner('Processing...'):
|
84 |
+
high_res_image = inference(image_path_or_url, learn)
|
85 |
+
st.image(high_res_image, caption='High Resolution Image', use_column_width=True)
|
86 |
+
else:
|
87 |
+
st.error("Please enter a valid image path or URL.")
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|