Spaces:
Running
Running
File size: 15,091 Bytes
61172ff e33ec43 8a1e479 55c3ec2 ffade52 fca51e8 aaa235c 951c645 aaa235c 61172ff c8a8786 dbf5992 61172ff 62adc82 6a7ecb7 61172ff 62adc82 61172ff 62adc82 d40fca5 fca51e8 27d007f e4ba9ea 61172ff 1e394de 8839c56 fd96b98 8839c56 1e394de 1f2ced2 1e394de 27d007f fca51e8 8839c56 ff640ab 8839c56 4febb7b f13b153 4febb7b f13b153 3aa0bb0 5c94fdc b79fd1d ca1b268 3f7c323 bf4858a 3f7c323 bc92d43 3f7c323 d0f65b9 3f7c323 b6152a3 3f7c323 92f82c8 3f7c323 9816ee3 7a795c8 3f7c323 b6152a3 3f7c323 62a943a 3f7c323 1f2ced2 08f6b0c 3f7c323 4784dc5 7f9f7f7 612eb22 3f7c323 8a1e479 3f7c323 2b50937 9b05108 612eb22 4aaeb5c e818c37 7f9f7f7 8a1e479 3f7c323 dc41bf1 fca51e8 7f9f7f7 06b5453 8973c7e c669577 5029012 3f7c323 f9bd8a4 3f7c323 c4cf1cf 2a48899 75d3600 7d7fb98 f9bd8a4 36bf530 3f7c323 77ce902 2b50937 4448f78 e818c37 4448f78 b6305a8 93f658a 7ecddf8 93f658a b6305a8 386c45e b6305a8 e818c37 4293bc4 1f8c1b7 c4cb5d0 8839c56 c4cb5d0 735109b 7f9f7f7 b6305a8 735109b 7f9f7f7 9ec23c0 7f9f7f7 c92d58b 4176b47 b610afe 65176bd decbe2a 1453a79 decbe2a 339bd3b b610afe d2fa8e3 d0328cd 301c7c8 decbe2a b785154 decbe2a b785154 decbe2a b785154 decbe2a b785154 decbe2a b785154 decbe2a aa50693 2beafef aa50693 decbe2a 8046a37 369eba0 c92d58b 735109b c4cb5d0 c9a38ff 0c60a5f c4cb5d0 43a34c0 347a7cb b6305a8 43a34c0 b6305a8 43a34c0 375ec78 2ae111f 43a34c0 490112d 43a34c0 ab3fb0b 43a34c0 ca1b268 abef7f9 ca1b268 735109b c4e5de8 0524522 3a93dd5 0524522 f13b153 0524522 735109b 43a34c0 d40fca5 27d007f e4ba9ea a6fb543 fca51e8 0b96c03 baa95fe 1e394de fca51e8 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 |
import streamlit as st
import tensorflow as tf
import os
import torch
import cv2
import numpy as np
#import requests
import joblib
import sklearn
from PIL import Image
from sklearn.decomposition import PCA
from tensorflow.keras.models import load_model
from transformers import pipeline
token = os.environ['token']
st.set_page_config(
page_title = 'Patacognition',
layout = 'wide',
menu_items = {
"About" : 'Proyecto ideado para la investigación de "Clasificación de imágenes de una sola clase con algortimos de Inteligencia Artificial".',
"Report a Bug" : 'mailto:contact@patacon.org'
}
)
st.sidebar.write("contact@patacon.org")
cnn, vit, zero_shot, classic_ml = st.tabs(["CNN", "ViT", "Zero-Shot", "Machine Learning Clásico"])
classic_ml_root = "/home/user/app/classicML"
@st.cache_resource
def load_pca():
return joblib.load(os.path.join(classic_ml_root, "pca_model.pkl"))
def _predict(_model_list, _img, sklearn = False):
y_gorrito = 0
raw_img = cv2.cvtColor(_img, cv2.COLOR_BGR2RGB)
img = cv2.resize(_img, (IMAGE_WIDTH, IMAGE_HEIGHT))
if sklearn:
fl_img =[img.flatten()]
data = pca.transform(fl_img)
for model in _model_list:
prediction = model.predict_proba(data)
y_gorrito += prediction[0][Categories.index("Patacon-True")]
else:
for model in _model_list:
y_gorrito += tf.cast(model(tf.expand_dims(img/255., 0)), dtype=tf.float32)
return [y_gorrito / len(_model_list), raw_img]
#def _pca_predict(models, _img):
# y_gorrito = 0
# raw_img = cv2.cvtColor(_img, cv2.COLOR_BGR2RGB)
# img = cv2.resize(_img, (IMAGE_WIDTH, IMAGE_HEIGHT))
# fl_img =[img.flatten()]
# data = pca.transform(fl_img)
# for model in models:
# prediction = model.predict_proba(data)
# y_gorrito += prediction[0][Categories.index("Patacon-True")]
# return [y_gorrito / len(models), raw_img]
#def classic_ml_prediction(clfs, _img):
# y_gorrito = 0
# raw_img = cv2.cvtColor(_img, cv2.COLOR_BGR2RGB)
# img = cv2.resize(_img, (IMAGE_WIDTH, IMAGE_HEIGHT)).flatten()
# data = pca.transform(img.reshape(1, -1))
# for clf in clfs:
# y_gorrito += clf.predict(data)
# return [y_gorrito / len(clfs), raw_img]
def preprocess(file_uploader, module = 'cv2'): #makes the uploaded image readable
img = np.frombuffer(uploaded_file.read(), np.uint8)
if module == 'cv2':
img = cv2.imdecode(img, cv2.IMREAD_COLOR)
elif module == 'pil':
img = Image.open(file_uploader)
return img
def multiclass_prediction(classifier, important_class): #made for hf zero-shot pipeline results
score = (max([classifier[i]['score'] for i in range(len(classifier))]))
labels = [predict['label'] for predict in classifier if score == predict['score']]
for clase in classifier:
if clase['label'] == important_class:
class_score = clase['score']
return (labels[0] if len(labels) == 1 else labels, score, class_score)
#API_URL = "https://api-inference.huggingface.co/models"
#headers = {"Authorization": f"Bearer {st.secrets['token']}"}
#def query(data, models): #HF API
# response = requests.post(API_URL + "/" + model_name, headers=headers, data=data)
# if response.json()["error"] == "Internal Server Error":
# return -1
# while "error" in response.json():
# response = requests.post(API_URL + "/" + model_name, headers=headers, data=data)
# return response.json()[1]["score"] #.json
@st.cache_resource
def load_clip():
classifier = pipeline("zero-shot-image-classification", model = 'openai/clip-vit-large-patch14-336')
return classifier
with cnn:
col_a, col_b, = st.columns(2)
ultra_flag = None
with col_a:
st.title("Redes neuronales convolucionales")
st.caption("Los modelos no están en orden de eficacia, sino en orden de creación.")
current_dir = os.getcwd()
root_dir = os.path.dirname(current_dir)
# Join the path to the models folder
DIR = os.path.join(current_dir, "models")
models = os.listdir(DIR)
common_root = r"/home/user/app/models/ptctrn_v"
common_end = ".h5"
model_dict = dict()
for model in models: #preprocessing of strings so the name is readable in the multiselect bar
model_dir = os.path.join(DIR, model)
model_name = 'Patacotrón ' + model_dir.split(common_root)[-1].split(common_end)[0]
model_dict[model_name] = model_dir
#weight_list = []
# Create a dropdown menu to select the model
model_choice = st.multiselect("Seleccione uno o varios modelos de clasificación", model_dict.keys())
threshold = st.slider('¿Cuál va a ser el límite donde se considere patacón? (el valor recomendado es de 75%-80%)', 0, 100, 50, key = 'threshold_convnet')
selected_models = []
# Set the image dimensions
IMAGE_WIDTH = IMAGE_HEIGHT = 224
executed = False
with col_b:
uploaded_file = st.file_uploader(key = 'conv_upload', label = 'Sube la imagen a clasificar',type= ['jpg','png', 'jpeg', 'jfif', 'webp', 'heic'])
if st.button(key = 'convnet_button', label ='¿Hay un patacón en la imagen?'):
if len(model_choice) < 1:
st.write('Debe elegir como mínimo un modelo.')
elif uploaded_file is not None:
img = preprocess(uploaded_file)
with st.spinner('Cargando predicción...'):
selected_models = [load_model(model_dict[model_name]) for model_name in model_choice if model_name not in selected_models]
#final_weights = weight_list if len(weight_list) >= 1 else [1 for i in range(len(selected_models))]
y_gorrito, raw_img = _predict(selected_models, img)
if round(float(y_gorrito*100)) >= threshold:
st.success("¡Patacón Detectado!")
else:
st.error("No se considera que haya un patacón en la imagen")
st.caption(f'La probabilidad de que la imagen tenga un patacón es del: {round(float(y_gorrito * 100), 2)}%')
st.image(raw_img)
else:
st.write('Revisa haber seleccionado los modelos y la imagen correctamente.')
with vit:
col_a, col_b = st.columns(2)
with col_a:
st.title('Visual Transformers')
st.caption('One class is all you need!')
model_dict = {
'google/vit-base-patch16-224-in21k' : 'frncscp/patacoptimus-prime',
'facebook/dinov2-base' : 'frncscp/dinotron',
'facebook/convnext-large-224' : 'frncscp/pataconxt',
'microsoft/focalnet-small' : 'frncscp/focalnet-small-patacon',
'microsoft/swin-tiny-patch4-window7-224' : 'frncscp/patacoswin'
}
model_choice = st.multiselect("Seleccione un modelo de clasificación", model_dict.keys(), key = 'ViT_multiselect')
uploaded_file = st.file_uploader(key = 'ViT_upload', label = 'Sube la imagen a clasificar',type= ['jpg','png', 'jpeg', 'jfif', 'webp', 'heic'])
flag = False
threshold = st.slider('¿Cuál va a ser el límite desde donde se considere patacón? (se recomienda por encima del 80%)', 0, 100, 80, key = 'threshold_vit')
with col_b:
if st.button(key = 'ViT_button', label ='¿Hay un patacón en la imagen?'):
if len(model_choice) < 1:
print('Recuerda seleccionar al menos un modelo de clasificación')
elif uploaded_file is not None:
with st.spinner('Cargando predicción...'):
classifiers = [pipeline("image-classification", model= model_dict[model_choice[i]], token = token) for i in range(len(model_choice))]
#classifier = pipeline("image-classification", model= model_dict[model_choice[0]])
img = preprocess(uploaded_file, module = 'pil')
models = [model_dict[model] for model in model_choice]
#st.write(models)
def vit_ensemble(classifier_list, img):
y_gorrito = 0
for classifier in classifier_list:
classifier = classifier(img)
for clase in classifier:
if clase['label'] == 'Patacon-True':
y_gorrito += clase["score"]
return y_gorrito / len(classifier_list)
#models = [model_dict[i] for i in range(len(model_choice))]
#st.write(type(models), models)
#st.write(model_choice)
#y_gorrito = 0
#y_gorritoo = query(uploaded_file.read(), model_choice[0])#[1]["score"]
#i = -1
#st.write("loop iniciado")
#for model in models:
# i+=1
# st.write("y gorrito a cargar")
# a = query(uploaded_file.read(), model)
# if a == -1:
# st.write("Los servidores se encuentrar caídos, intente más tarde")
# st.write("query terminado")
# y_gorritoo += a
# st.write("y gorrito cargado")
#y_gorritoo /= i
#st.write(y_gorritoo)
#st.write("loop terminado")
#st.write("y gorrito calculado", len(model_choice))
#classifier = classifier(img)
#for clase in classifier:
# if clase['label'] == 'Patacon-True':
# y_gorrito = clase["score"]
#y_gorrito = classifier[0]["score"]
y_gorrito = vit_ensemble(classifiers, img)
#
if round(float(y_gorrito * 100)) >= threshold:
st.success("¡Patacón Detectado!")
else:
st.error("No se considera que haya un patacón en la imagen")
st.caption(f'La probabilidad de que la imagen tenga un patacón es del: {round(float(y_gorrito * 100), 2)}%')
st.image(img)
else:
st.write("Asegúrate de haber subido correctamente la imagen.")
with zero_shot:
col_a, col_b = st.columns(2)
zsloaded = []
with col_a:
st.title("Clasificación Zero-Shot")
st.caption("Usando Clip de OpenAI")
labels_for_classification = ["A yellow deep fried smashed plantain",
"A yellow corn dough",
"A stuffed fried dough",
"Fried food",
"Fruit",
"Anything"]
uploaded_file = st.file_uploader(key = 'ZS_upload', label = 'Sube la imagen a clasificar',type= ['jpg','png', 'jpeg', 'jfif', 'webp', 'heic'])
with col_b:
if st.button(key = 'ZS_button', label ='¿Hay un patacón en la imagen?'):
if uploaded_file is not None:
with st.spinner('Cargando el modelo (puede demorar hasta un minuto, pero después predice rápido)'):
classifier = load_clip()
with st.spinner('Cargando predicción...'):
img = preprocess(uploaded_file, module = 'pil')
zs_classifier = classifier(img,
candidate_labels = labels_for_classification)
label, _, y_gorrito = multiclass_prediction(zs_classifier, labels_for_classification[0])
if label == "A yellow deep fried smashed plantain":
st.success("¡Patacón Detectado!")
else:
st.error("No se considera que haya un patacón en la imagen")
st.caption(f'La probabilidad de que la imagen tenga un patacón es del: {round(float(y_gorrito * 100), 2)}%')
st.image(img)
else:
st.write("Asegúrate de haber subido correctamente la imagen.")
with classic_ml:
pca = load_pca()
Categories=['Patacon-True','Patacon-False']
col_a, col_b = st.columns(2)
with col_a:
st.title("Machine Learning Clásico")
st.caption("Usando análisis por componentes principales")
model_dict = {
'Máquina de vectores de soporte' : 'pca_svm.sav',
'K-Nearest Neighbors' : 'pca_knn.sav',
'Bosques Aleatorios' : 'pca_random_forest.sav',
}
for model_name, filename in model_dict.items():
model_dict[model_name] = os.path.join(classic_ml_root, filename)
model_choice = st.multiselect("Seleccione un modelo de clasificación", model_dict.keys(), key = 'cML_multiselect')
uploaded_file = st.file_uploader(key = 'cML_upload', label = 'Sube la imagen a clasificar',type= ['jpg','png', 'jpeg', 'jfif', 'webp', 'heic'])
threshold = st.slider('¿Cuál va a ser el límite desde donde se considere patacón? (se recomienda por encima del 70%)', 0, 100, 70, key = 'threshold_cML')
with col_b:
if st.button(key = 'cML_button', label ='¿Hay un patacón en la imagen?'):
if len(model_choice) < 1:
print('Recuerda seleccionar al menos un modelo de clasificación')
elif uploaded_file is not None:
with st.spinner('Cargando predicción...'):
img = preprocess(uploaded_file)
selected_models = [joblib.load(model_dict[model_name]) for model_name in model_choice]
y_gorrito, raw_img = _predict(selected_models, img, sklearn = True)
if round(float(y_gorrito*100)) >= threshold:
st.success("¡Patacón Detectado!")
else:
st.error("No se considera que haya un patacón en la imagen")
st.caption(f'La probabilidad de que la imagen tenga un patacón es del: {round(float(y_gorrito * 100), 2)}%')
st.image(raw_img)
else:
st.write('Revisa haber seleccionado los modelos y la imagen correctamente.') |