File size: 13,093 Bytes
61172ff
 
c458088
9b05108
2b50937
61172ff
8a1e479
55c3ec2
5c94fdc
61172ff
 
 
6a7ecb7
61172ff
 
 
 
 
 
f6f244b
d0f65b9
1e38a8c
61172ff
cadba70
8839c56
fd96b98
 
 
8839c56
faae2ad
8839c56
 
 
 
 
 
ff640ab
8839c56
 
4febb7b
f13b153
 
4febb7b
 
 
 
f13b153
5c94fdc
1ba6a43
5c94fdc
a4c199e
beb55b0
7e0e920
 
11e22f4
5c94fdc
b79fd1d
ca1b268
7e0e920
 
ca1b268
 
 
3f7c323
bf4858a
3f7c323
bc92d43
3f7c323
d0f65b9
3f7c323
 
b6152a3
 
3f7c323
 
92f82c8
3f7c323
9816ee3
7a795c8
3f7c323
 
b6152a3
3f7c323
62a943a
3f7c323
f90552b
7f9f7f7
49fb59f
7f9f7f7
 
4784dc5
08f6b0c
7f9f7f7
 
f90552b
7f9f7f7
d00c746
7f9f7f7
08f6b0c
3f7c323
 
4784dc5
7f9f7f7
c4cf1cf
3f7c323
8a1e479
3f7c323
 
 
 
 
 
2b50937
9b05108
4aaeb5c
e818c37
7f9f7f7
 
 
8a1e479
3f7c323
dc41bf1
7f9f7f7
 
 
 
 
 
 
8a1e479
7f9f7f7
 
 
 
 
c669577
5029012
3f7c323
f9bd8a4
3f7c323
c4cf1cf
2a48899
 
3f7c323
75d3600
7d7fb98
f9bd8a4
36bf530
3f7c323
 
77ce902
2b50937
4448f78
 
 
e818c37
4448f78
b6305a8
 
 
 
 
 
 
 
e818c37
4293bc4
1f8c1b7
c4cb5d0
8839c56
c4cb5d0
735109b
7f9f7f7
 
b6305a8
735109b
7f9f7f7
d2fa8e3
25ba27a
339bd3b
7f9f7f7
 
c92d58b
4176b47
7f9f7f7
9b05108
 
 
 
 
 
 
 
339bd3b
d2fa8e3
 
 
301c7c8
 
 
8046a37
3e4216e
c92d58b
3e4216e
 
fcf5de5
aa50693
 
 
 
 
 
 
2beafef
aa50693
 
 
8046a37
369eba0
 
 
 
 
c92d58b
735109b
 
c4cb5d0
 
c9a38ff
0c60a5f
 
c4cb5d0
43a34c0
347a7cb
b6305a8
43a34c0
 
 
b6305a8
43a34c0
 
 
490112d
43a34c0
ab3fb0b
43a34c0
ca1b268
abef7f9
ca1b268
735109b
c4e5de8
0524522
 
 
 
 
 
 
 
 
3a93dd5
0524522
f13b153
0524522
 
 
 
 
 
735109b
 
43a34c0
7e14a78
 
 
 
 
 
 
735109b
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
import streamlit as st
import tensorflow as tf
from tensorflow.keras.models import load_model
from transformers import AutoConfig, AutoModel, pipeline#, AutoProcessor, AutoModelForZeroShotImageClassification
from PIL import Image 
import os
import cv2
import numpy as np
import requests

st.set_page_config(
    page_title = 'Patacotrón',
    layout = 'wide',
    menu_items = {
        "About" : 'Proyecto ideado para la investigación de "Clasificación de imágenes de una sola clase con algortimos de Inteligencia Artificial".',
        "Report a Bug" : 'https://docs.google.com/forms/d/e/1FAIpQLScH0ZxAV8aSqs7TPYi86u0nkxvQG3iuHCStWNB-BoQnSW2V0g/viewform?usp=sf_link'
    }
)

st.sidebar.write("contact@patacotron.tech")

cnn, vit, zero_shot, autoencoder, svm, iforest, gan = st.tabs(["CNN", "ViT", "Zero-Shot", "Autoencoder", "OC-SVM", 'iForest', 'GAN'])

def predict(_model_list, _weights, _img): 
    y_gorrito = 0
    raw_img = cv2.cvtColor(_img, cv2.COLOR_BGR2RGB)
    img = cv2.resize(_img, (IMAGE_WIDTH, IMAGE_HEIGHT))
    for model, weight in zip(_model_list, _weights):
        y_gorrito += tf.cast(model(tf.expand_dims(img/255., 0)), dtype=tf.float32)*weight
    return [y_gorrito / sum(_weights), raw_img]

def preprocess(file_uploader, module = 'cv2'): #makes the uploaded image readable
    img = np.frombuffer(uploaded_file.read(), np.uint8)
    if module == 'cv2':
        img = cv2.imdecode(img, cv2.IMREAD_COLOR)
    elif module == 'pil':
        img = Image.open(file_uploader)
    return img

def multiclass_prediction(classifier, important_class): #made for hf zero-shot pipeline results
    score = (max([classifier[i]['score'] for i in range(len(classifier))]))
    labels = [predict['label'] for predict in classifier if score == predict['score']]
    for clase in classifier:
        if clase['label'] == important_class:
            class_score = clase['score']
    return (labels[0] if len(labels) == 1 else labels, score, class_score)

API_URL = "https://api-inference.huggingface.co/models"
headers = {"Authorization": f"Bearer {st.secrets['token']}"}

def query(data, model_name): #HF API
    response = requests.post(API_URL + "/" + model_name, headers=headers, data=data)
    while "error" in response.json(): 
        response = requests.post(API_URL + "/" + model_name, headers=headers, data=data)
    return response.json()[1]["score"] #.json

@st.cache_resource
def load_clip():
    #processor = AutoProcessor.from_pretrained("openai/clip-vit-large-patch14-336")
    #model = AutoModelForZeroShotImageClassification.from_pretrained("openai/clip-vit-large-patch14-336")
    classifier = pipeline("zero-shot-image-classification", model = 'openai/clip-vit-large-patch14-336')
    return classifier

with cnn:

    col_a, col_b, = st.columns(2)
    ultra_flag = None
    with col_a:
        st.title("Redes neuronales convolucionales")
        st.caption("Los modelos no están en orden de eficacia, sino en orden de creación.")
        
        current_dir = os.getcwd()
        root_dir = os.path.dirname(current_dir)
        
        # Join the path to the models folder
        DIR = os.path.join(current_dir, "models")
        models = os.listdir(DIR)
        common_root = r"/home/user/app/models/ptctrn_v"
        common_end = ".h5"
        
        model_dict = dict()
        for model in models: #preprocessing of strings so the name is readable in the multiselect bar
            model_dir = os.path.join(DIR, model)
            model_name = 'Patacotrón ' + model_dir.split(common_root)[-1].split(common_end)[0]
            model_dict[model_name] = model_dir
        #ultraversions = ['Patacotrón 1.5', 'Patacotrón 1.7', 'Patacotrón 1.8', 'Patacotrón 1.12', 'Patacotrón 1.12.2', 'Patacotrón 1.12.3']
        #ultraversions = ['Patacotrón 1.5', 'Patacotrón 1.6', 'Patacotrón 1.12.2', 'Patacotrón 1.8', 'Patacotrón 1.12']#, 'Patacotrón 1.13.20', 'Patacotrón 1.13.38']
        #['Patacotrón 1.5', 'Patacotrón 1.6',  'Patacotrón 1.7', 'Patacotrón 1.12'] #
        #ultra_button = st.checkbox('Ultra-Patacotrón (en construcción, no es la mejor versión)')
        #ultra_flag = False
        weight_list = []
        
        #if ultra_button:
        #    ultra_flag = True
            #weight_list = [3, 1, 4.5, 1, .8, 1] [.5, 1.75, 4, .5, 2]
        #    weight_list = [2.5, 1.8, 1.5, 3.14, 2.2] #.2, 2]
            #[1, 2, 3, 2.5] 
        #    st.caption('Para Ultra-Patacotrón, este porcentaje no representa una a priori una probabilidad, sino la combinación ponderada de modelos con sesgos positivos y negativos, lo importante es que identifique correctamente el objeto.')
            
        # Create a dropdown menu to select the model
        model_choice = st.multiselect("Seleccione uno o varios modelos de clasificación", model_dict.keys())
        
        threshold = st.slider('¿Cuál va a ser el límite donde se considere patacón? (el valor recomendado es de 75%-80%)', 0, 100, 50, key = 'threshold_convnet')
        
        selected_models = []
                
        # Set the image dimensions
        IMAGE_WIDTH = IMAGE_HEIGHT = 224
    
        executed = False
        
    with col_b:
             
        uploaded_file = st.file_uploader(key = 'conv_upload', label = 'Sube la imagen a clasificar',type= ['jpg','png', 'jpeg', 'jfif', 'webp', 'heic'])
        
        if st.button(key = 'convnet_button', label ='¿Hay un patacón en la imagen?'):
            if len(model_choice) < 1:
            #if (len(model_choice) > 0 and ultra_flag) or (len(model_choice) == 0 and ultra_flag is None):
                st.write('Debe elegir como mínimo un modelo.')
            
            elif uploaded_file is not None:
                img = preprocess(uploaded_file)
                #if ultra_flag:
                #    with st.spinner('Cargando ultra-predicción...'):
                #        if not executed: 
                #            ultraptctrn = [load_model(model_dict[model]) for model in ultraversions]
                #            executed = True
                #        final_weights = weight_list if len(weight_list) >= 1 else [1 for i in range(len(ultraptctrn))]
                #        y_gorrito, raw_img = predict(ultraptctrn, final_weights, img)
                        
                #else:
                with st.spinner('Cargando predicción...'):
                    selected_models = [load_model(model_dict[model]) for model in model_choice if model not in selected_models]
                    final_weights = weight_list if len(weight_list) >= 1 else [1 for i in range(len(selected_models))]       
                    y_gorrito, raw_img = predict(selected_models, final_weights, img)

                if round(float(y_gorrito*100)) >= threshold:
                    st.success("¡Patacón Detectado!")
                
                else:
                    st.error("No se considera que haya un patacón en la imagen")
                
                st.caption(f'La probabilidad de que la imagen tenga un patacón es del: {round(float(y_gorrito * 100), 2)}%')  
                st.caption('Si los resultados no fueron los esperados, por favor, [haz click aquí](https://docs.google.com/forms/d/e/1FAIpQLScH0ZxAV8aSqs7TPYi86u0nkxvQG3iuHCStWNB-BoQnSW2V0g/viewform?usp=sf_link)')
                
                st.image(raw_img)
            
            else:
                st.write('Revisa haber seleccionado los modelos y la imagen correctamente.')

with vit:
      
    col_a, col_b = st.columns(2)

    with col_a:
        st.title('Visual Transformers')
        st.caption('One class is all you need!')

        model_dict = {
            'google/vit-base-patch16-224-in21k' : 'frncscp/patacoptimus-prime',
            'facebook/convnext-large-224' : 'frncscp/pataconxt'
        }
        
        model_choice = st.multiselect("Seleccione un modelo de clasificación", model_dict.keys(), key = 'ViT_multiselect')
        
        uploaded_file = st.file_uploader(key = 'ViT_upload', label = 'Sube la imagen a clasificar',type= ['jpg','png', 'jpeg', 'jfif', 'webp', 'heic'])
        flag = False
        threshold = st.slider('¿Cuál va a ser el límite desde donde se considere patacón? (se recomienda por encima del 80%)', 0, 100, 80, key = 'threshold_vit')

    with col_b:

        if st.button(key = 'ViT_button', label ='¿Hay un patacón en la imagen?'):
            if len(model_choice) < 1:
                print('Recuerda seleccionar al menos un modelo de clasificación')
            elif uploaded_file is not None:
                with st.spinner('Cargando predicción...'):

                    #y_gorrito = query(uploaded_file.read(), model_dict[model_choice[0]]) 

                    #classifiers = [pipeline("image-classification", model= model_dict[model_choice[i]]) for i in range(len(model_choice))]
                    
                    #classifier = pipeline("image-classification", model= model_dict[model_choice[0]])
                    img = preprocess(uploaded_file, module = 'pil')


                    #def vit_ensemble(classifier_list, img):
                    #   y_gorrito = 0
                    #    for classifier in classifier_list:
                    #        classifier = classifier(img)                            
                    #        for clase in classifier:
                    #            if clase['label'] == 'Patacon-True':
                    #                y_gorrito += clase["score"]
                    #    return y_gorrito / len(classifier_list)

                    models = [model_choice[i] for i in range(len(model_choice))]
                    st.write(models)
                    
                    st.write(dict(model_choice))

                    y_gorrito = 0
                    #y_gorrito = query(uploaded_file.read(), model_choice[0])[1]["score"] 
                    i = 0
                    for model in model_choice:
                        i+=1
                        y_gorrito += query(uploaded_file.read(), model_dict[i][model])    
                    y_gorrito /= i
    
                    #st.write("y gorrito calculado", len(model_choice))
                    #classifier = classifier(img) 
    
                    #for clase in classifier:
                    #    if clase['label'] == 'Patacon-True':
                    #        y_gorrito = clase["score"]
                
                #y_gorrito = classifier[0]["score"]
                    
                    #y_gorrito = vit_ensemble(classifiers, img)
                    #
                    if round(float(y_gorrito * 100)) >= threshold:
                        st.success("¡Patacón Detectado!")
                    else:
                        st.error("No se considera que haya un patacón en la imagen")
                    st.caption(f'La probabilidad de que la imagen tenga un patacón es del: {round(float(y_gorrito * 100), 2)}%')  
                    st.image(img) 
            else:
                st.write("Asegúrate de haber subido correctamente la imagen.")


                

with zero_shot:
    
    col_a, col_b = st.columns(2)
    zsloaded = []
        
    with col_a:
    
        st.title("Clasificación Zero-Shot")
        st.caption("Usando Clip de OpenAI")

        labels_for_classification =  ["A yellow deep fried smashed plantain", 
                            "Fried food", 
                            "Fruit",
                            "Anything"]
        
        uploaded_file = st.file_uploader(key = 'ZS_upload', label = 'Sube la imagen a clasificar',type= ['jpg','png', 'jpeg', 'jfif', 'webp', 'heic'])
        
    with col_b:
                
        if st.button(key = 'ZS_button', label ='¿Hay un patacón en la imagen?'):
            if uploaded_file is not None:     

                with st.spinner('Cargando el modelo (puede demorar hasta un minuto, pero después predice rápido)'):
                    classifier = load_clip()
                
                with st.spinner('Cargando predicción...'):
                    img = preprocess(uploaded_file, module = 'pil')
                    zs_classifier = classifier(img, 
                    candidate_labels = labels_for_classification)

                    label, _, y_gorrito = multiclass_prediction(zs_classifier, labels_for_classification[0])
                        
                    if label == "A yellow deep fried smashed plantain":
                        st.success("¡Patacón Detectado!")
                    else:
                        st.error("No se considera que haya un patacón en la imagen")
                        
                    st.caption(f'La probabilidad de que la imagen tenga un patacón es del: {round(float(y_gorrito * 100), 2)}%')  
                    st.image(img)
            else:
                st.write("Asegúrate de haber subido correctamente la imagen.")
        
with autoencoder:
    st.write('Próximamente')
with gan:
    st.write('Próximamente')  
with svm:
    st.write('Próximamente')
with iforest:
    st.write('Próximamente')