File size: 2,408 Bytes
2bb6993
 
5e8712d
2bb6993
 
2ea2102
2bb6993
 
 
5006386
27abd4f
2bb6993
2ea2102
6c004f6
 
 
 
2ea2102
 
6c004f6
 
 
 
2ea2102
 
6c004f6
 
 
2ea2102
5e8712d
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2bb6993
 
 
2ea2102
 
 
 
d5e46d8
2bb6993
646a2f5
05a7fdf
6c004f6
 
5006386
6c004f6
f3dd9f9
 
 
6c004f6
 
 
5a5b5b2
6c004f6
5a5b5b2
27abd4f
5a5b5b2
27abd4f
2bb6993
646a2f5
2bb6993
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
import gradio as gr
from gensim.models import Word2Vec
from gensim.models.callbacks import CallbackAny2Vec
import keras
from keras.utils.data_utils import pad_sequences
from keras import backend as K
import requests
import shutil
import requests
import json
from PIL import Image

def recall_m(y_true, y_pred):
  true_positives = K.sum(K.round(K.clip(y_true * y_pred, 0, 1)))
  possible_positives = K.sum(K.round(K.clip(y_true, 0, 1)))
  recall = true_positives / (possible_positives + K.epsilon())
  return recall

def precision_m(y_true, y_pred):
  true_positives = K.sum(K.round(K.clip(y_true * y_pred, 0, 1)))
  predicted_positives = K.sum(K.round(K.clip(y_pred, 0, 1)))
  precision = true_positives / (predicted_positives + K.epsilon())
  return precision

def f1_m(y_true, y_pred):
  precision = precision_m(y_true, y_pred)
  recall = recall_m(y_true, y_pred)
  return 2*((precision*recall)/(precision+recall+K.epsilon()))

#initialise callback class
class callback(CallbackAny2Vec):
  """
  Print the loss value after each epoch
  """
  def __init__(self):
    self.epoch = 0
    #gensim loss is cumulative, so we record previous values to print
    self.loss_previous_step = 0 

  def on_epoch_end(self, model):
    loss = model.get_latest_training_loss()
    if self.epoch % 100 == 0:
      print('Loss after epoch {}: {}'.format(self.epoch, loss-self.loss_previous_step))

    self.epoch+= 1
    self.loss_previous_step = loss

reloaded_w2v_model = Word2Vec.load('word2vec_xp8.model')

reconstructed_model_CNN = keras.models.load_model("best weights CNN.h5", 
                                                   custom_objects={'f1_m':f1_m, 
                                                                   "precision_m":precision_m, 
                                                                   "recall_m":recall_m})


def classify(sentence):
  sentenceWords = json.loads(sentence.replace("'",'"'))
  
  aux_vector = []
  for word in sentenceWords:
    aux_vector.append(reloaded_w2v_model.wv[word])
  w2vWords = []
  w2vWords.append(aux_vector)
  MCTIinput_vector = pad_sequences(w2vWords, maxlen=2726, padding='pre')
  

  # pad 2726
  value = reconstructed_model_CNN.predict(MCTIinput_vector)[0]
  
  if value >= 0.5:
    return Image.open(r"elegivel.png")
  else:
    return Image.open(r"inelegivel.png")

iface = gr.Interface(fn=classify, inputs="text", outputs="image")
iface.launch()