modif image en amont
Browse files
app.py
CHANGED
@@ -1,19 +1,13 @@
|
|
1 |
-
|
2 |
-
# gpu_id = torch.cuda.current_device()
|
3 |
-
# print("GPU", gpu_id)
|
4 |
-
# print(torch.__path__)
|
5 |
-
# print(torch.cuda.is_available())
|
6 |
-
# print(torch.cuda.get_arch_list())
|
7 |
-
# print(torch.cuda.device_count())
|
8 |
-
|
9 |
import streamlit as st
|
10 |
from transformers import pipeline
|
11 |
-
|
12 |
import torch
|
13 |
# Assurez-vous que models.py est correctement défini et dans votre PATH
|
14 |
from models import vgg19
|
15 |
from torchvision import transforms
|
16 |
|
|
|
17 |
# Initialisation des pipelines Hugging Face
|
18 |
caption_pipeline = pipeline("image-to-text", model="Salesforce/blip-image-captioning-large")
|
19 |
emotion_pipeline = pipeline("image-classification", model="RickyIG/emotion_face_image_classification_v3")
|
@@ -28,6 +22,7 @@ model.to(device)
|
|
28 |
model.load_state_dict(torch.load(model_path, map_location=device))
|
29 |
model.eval()
|
30 |
|
|
|
31 |
def predict_count(image):
|
32 |
# Prétraitement de l'image
|
33 |
trans = transforms.Compose([
|
@@ -75,10 +70,41 @@ def main():
|
|
75 |
# Upload d'image
|
76 |
uploaded_image = st.file_uploader("Choisissez une image...", type=["jpg", "jpeg", "png"])
|
77 |
if uploaded_image is not None:
|
|
|
|
|
|
|
|
|
78 |
image = Image.open(uploaded_image)
|
79 |
|
80 |
-
#
|
81 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
82 |
|
83 |
# Placeholder pour le bouton ou le message de chargement
|
84 |
button_placeholder = st.empty()
|
|
|
1 |
+
from PIL import Image, ExifTags
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
2 |
import streamlit as st
|
3 |
from transformers import pipeline
|
4 |
+
|
5 |
import torch
|
6 |
# Assurez-vous que models.py est correctement défini et dans votre PATH
|
7 |
from models import vgg19
|
8 |
from torchvision import transforms
|
9 |
|
10 |
+
|
11 |
# Initialisation des pipelines Hugging Face
|
12 |
caption_pipeline = pipeline("image-to-text", model="Salesforce/blip-image-captioning-large")
|
13 |
emotion_pipeline = pipeline("image-classification", model="RickyIG/emotion_face_image_classification_v3")
|
|
|
22 |
model.load_state_dict(torch.load(model_path, map_location=device))
|
23 |
model.eval()
|
24 |
|
25 |
+
|
26 |
def predict_count(image):
|
27 |
# Prétraitement de l'image
|
28 |
trans = transforms.Compose([
|
|
|
70 |
# Upload d'image
|
71 |
uploaded_image = st.file_uploader("Choisissez une image...", type=["jpg", "jpeg", "png"])
|
72 |
if uploaded_image is not None:
|
73 |
+
|
74 |
+
|
75 |
+
|
76 |
+
|
77 |
image = Image.open(uploaded_image)
|
78 |
|
79 |
+
# Obtenir et afficher la taille de l'image
|
80 |
+
width, height = image.size
|
81 |
+
st.write(f"Largeur: {width} pixels, Hauteur: {height} pixels")
|
82 |
+
if width>1200 or height > 1200:
|
83 |
+
try:
|
84 |
+
exif = image._getexif()
|
85 |
+
if exif is not None:
|
86 |
+
orientation_key = next((key for key, value in ExifTags.TAGS.items() if value == 'Orientation'), None)
|
87 |
+
if orientation_key is not None and orientation_key in exif:
|
88 |
+
orientation = exif[orientation_key]
|
89 |
+
if orientation == 3:
|
90 |
+
image = image.rotate(180, expand=True)
|
91 |
+
elif orientation == 6:
|
92 |
+
image = image.rotate(270, expand=True)
|
93 |
+
elif orientation == 8:
|
94 |
+
image = image.rotate(90, expand=True)
|
95 |
+
# Affichage de l'image uploadée
|
96 |
+
st.image(image, caption='Image Uploadée', use_column_width=True)
|
97 |
+
except Exception as e:
|
98 |
+
st.write(f"Erreur lors de la correction de l'orientation: {e}")
|
99 |
+
pass
|
100 |
+
image = image.resize((224, 224))
|
101 |
+
else:
|
102 |
+
# Affichage de l'image uploadée
|
103 |
+
st.image(image, caption='Image Uploadée', use_column_width=True)
|
104 |
+
|
105 |
+
|
106 |
+
|
107 |
+
|
108 |
|
109 |
# Placeholder pour le bouton ou le message de chargement
|
110 |
button_placeholder = st.empty()
|