LightFury9 commited on
Commit
32fd60b
1 Parent(s): 5f63a62

Upload app.py

Browse files
Files changed (1) hide show
  1. app.py +94 -0
app.py ADDED
@@ -0,0 +1,94 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import numpy as np
2
+ import tensorflow as tf
3
+ from tensorflow import keras
4
+ import matplotlib.cm as cm
5
+
6
+ model = tf.keras.models.load_model('./EfficientNetB3')
7
+ pred_model = tf.keras.models.load_model('./ConvNeXtTiny')
8
+
9
+
10
+ def make_gradcam_heatmap(img_array, model, last_conv_layer_name, pred_index=None):
11
+ # First, we create a model that maps the input image to the activations
12
+ # of the last conv layer as well as the output predictions
13
+ grad_model = keras.models.Model(
14
+ model.inputs, [model.get_layer(last_conv_layer_name).output, model.output]
15
+ )
16
+
17
+ # Then, we compute the gradient of the top predicted class for our input image
18
+ # with respect to the activations of the last conv layer
19
+ with tf.GradientTape() as tape:
20
+ last_conv_layer_output, preds = grad_model(img_array)
21
+ if pred_index is None:
22
+ pred_index = tf.argmax(preds[0])
23
+ class_channel = preds[:, pred_index]
24
+
25
+ # This is the gradient of the output neuron (top predicted or chosen)
26
+ # with regard to the output feature map of the last conv layer
27
+ grads = tape.gradient(class_channel, last_conv_layer_output)
28
+
29
+ # This is a vector where each entry is the mean intensity of the gradient
30
+ # over a specific feature map channel
31
+ pooled_grads = tf.reduce_mean(grads, axis=(0, 1, 2))
32
+
33
+ # We multiply each channel in the feature map array
34
+ # by "how important this channel is" with regard to the top predicted class
35
+ # then sum all the channels to obtain the heatmap class activation
36
+ last_conv_layer_output = last_conv_layer_output[0]
37
+ heatmap = last_conv_layer_output @ pooled_grads[..., tf.newaxis]
38
+ heatmap = tf.squeeze(heatmap)
39
+
40
+ # For visualization purpose, we will also normalize the heatmap between 0 & 1
41
+ heatmap = tf.maximum(heatmap, 0) / tf.math.reduce_max(heatmap)
42
+ return heatmap.numpy()
43
+
44
+ def gradio_img_array(img):
45
+ # `img` is a PIL image of size 299x299
46
+ # img = keras.utils.load_img(img_path, target_size=size)
47
+ # `array` is a float32 Numpy array of shape (299, 299, 3)
48
+ array = keras.utils.img_to_array(img)
49
+ # We add a dimension to transform our array into a "batch"
50
+ # of size (1, 299, 299, 3)
51
+ array = np.expand_dims(array, axis=0)
52
+ return array
53
+
54
+
55
+ def gradio_display_gradcam(img_path, heatmap, cam_path="cam.jpg", alpha=0.4):
56
+ # Load the original image
57
+ # img = keras.utils.load_img(img_path)
58
+ img = keras.utils.img_to_array(img_path)
59
+
60
+ # Rescale heatmap to a range 0-255
61
+ heatmap = np.uint8(255 * heatmap)
62
+
63
+ # Use jet colormap to colorize heatmap
64
+ jet = cm.get_cmap("jet")
65
+
66
+ # Use RGB values of the colormap
67
+ jet_colors = jet(np.arange(256))[:, :3]
68
+ jet_heatmap = jet_colors[heatmap]
69
+
70
+ # Create an image with RGB colorized heatmap
71
+ jet_heatmap = keras.utils.array_to_img(jet_heatmap)
72
+ jet_heatmap = jet_heatmap.resize((img.shape[1], img.shape[0]))
73
+ jet_heatmap = keras.utils.img_to_array(jet_heatmap)
74
+
75
+ # Superimpose the heatmap on original image
76
+ superimposed_img = jet_heatmap * alpha + img
77
+ superimposed_img = keras.utils.array_to_img(superimposed_img)
78
+ return superimposed_img
79
+
80
+ import gradio as gr
81
+
82
+ def test(img_path):
83
+ # Prepare image
84
+ img_array = tf.keras.applications.efficientnet.preprocess_input(gradio_img_array(img_path))
85
+ heatmap = make_gradcam_heatmap(img_array, model, "block7b_project_conv")
86
+ img = gradio_display_gradcam(img_path, heatmap, cam_path="cam2.jpg")
87
+ preds = pred_model.predict(img_array, verbose=0)[0]
88
+ preds_dict = {"0": float(preds[0]), "1": float(preds[1]), "2": float(preds[2]), "3": float(preds[3]), "4": float(preds[4])}
89
+ return img, preds_dict
90
+
91
+
92
+ interf = gr.Interface(fn=test, inputs="image", outputs=["image", "label"])
93
+
94
+ interf.launch()