yxmauw commited on
Commit
33053e0
1 Parent(s): 64c7ef4
Files changed (6) hide show
  1. ENet_model.tflite +3 -0
  2. LICENSE +21 -0
  3. README.md +10 -3
  4. app.py +61 -0
  5. model_methods.py +131 -0
  6. requirements.txt +5 -0
ENet_model.tflite ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:443f7b83554e76a67661d836f7201d201320f6a0b6288e3eb91294504745c2d3
3
+ size 81882888
LICENSE ADDED
@@ -0,0 +1,21 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ MIT License
2
+
3
+ Copyright (c) 2022 Belle
4
+
5
+ Permission is hereby granted, free of charge, to any person obtaining a copy
6
+ of this software and associated documentation files (the "Software"), to deal
7
+ in the Software without restriction, including without limitation the rights
8
+ to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
9
+ copies of the Software, and to permit persons to whom the Software is
10
+ furnished to do so, subject to the following conditions:
11
+
12
+ The above copyright notice and this permission notice shall be included in all
13
+ copies or substantial portions of the Software.
14
+
15
+ THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16
+ IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17
+ FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
18
+ AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19
+ LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20
+ OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21
+ SOFTWARE.
README.md CHANGED
@@ -1,3 +1,10 @@
1
- ---
2
- license: mit
3
- ---
 
 
 
 
 
 
 
 
1
+ # Eye Disease Classifier app
2
+
3
+ This app is associated with [eye-disease-classification project](https://github.com/yxmauw/eye-disease-classification)
4
+
5
+ Using [Streamlit](https://streamlit.io/) library
6
+
7
+ Launched on [Huggingface spaces](https://huggingface.co/spaces), please [![Generic badge](https://img.shields.io/badge/🤗-Open%20In%20Spaces-blue.svg)](https://huggingface.co/spaces/yxmauw/eye-disease-clf-app). Enjoy!
8
+
9
+ _Notes:_
10
+ _App repeatedly exceeded Streamlit cloud resource limits, so had to use alternative cloud app launch space_
app.py ADDED
@@ -0,0 +1,61 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import numpy as np
2
+ import streamlit as st
3
+ import tensorflow as tf
4
+ from PIL import Image
5
+ import io
6
+ from model_methods import predict, orig_img, plot_gradient_maps, gradCAM
7
+
8
+ # configuration of the page
9
+ st.set_page_config(
10
+ layout='wide',
11
+ page_icon='👁️',
12
+ page_title='Eye Disease Classifier',
13
+ initial_sidebar_state='auto'
14
+ )
15
+
16
+ st.title('👁️ Eye Disease classifier')
17
+ st.info('Only classifies **Cataract**, **Diabetic retinopathy**, **Glaucoma** or **Normal**. \n\n Model is restricted to giving **1** class at a time')
18
+
19
+ new_img = st.file_uploader('Please upload your retinal image in .png or .jpeg/.jpg format')
20
+
21
+ def predict_upload():
22
+ result = predict(new_img) # result is a probabilities array
23
+ classes = ['cataract', 'diabetic retinopathy', 'glaucoma', 'normal']
24
+ max_result = (np.max(result, axis=-1)) * 100 # max probability
25
+ pred_prob = np.format_float_positional(max_result, precision=2) # format probability
26
+ pred_class = classes[(np.argmax(result, axis=-1)[0])] # string
27
+ st.write(f'### There is a')
28
+ st.success(f'# {pred_prob}% probability')
29
+ st.write(f'### that this retinal image shows')
30
+ st.success(f'# {pred_class}')
31
+
32
+ # instantiate submit button
33
+ if st.button('Classify'):
34
+ if new_img is not None:
35
+ with st.sidebar:
36
+ predict_upload()
37
+
38
+ col1, col2, col3 = st.columns(3)
39
+ with col1:
40
+ st.image(new_img)
41
+ st.caption('Original')
42
+
43
+ with col2:
44
+ input_im = orig_img(new_img) # output tensor
45
+ plot_gradient_maps(input_im)
46
+ st.caption('Saliency map')
47
+
48
+ with col3:
49
+ gradCAM(new_img, intensity=0.5, res=250)
50
+ st.caption('Activation heatmap')
51
+
52
+ if new_img is None:
53
+ with st.sidebar:
54
+ st.warning('''
55
+ Unable to detect image.
56
+ Please upload retinal image for classification.
57
+ \n\n Thank you 🙏
58
+ ''')
59
+
60
+ st.write('##') # create space
61
+ st.write('DISCLAIMER: THIS WEBSITE DOES NOT REPLACE MEDICAL ADVICE \n\n The information, including but not limited to, text, graphics, images and other material contained on this website are for informational purposes only. No material on this site is intended to be a substitute for professional medical advice, diagnosis or treatment. Always seek the advice of a physician or other qualified health care provider with any questions you may have regarding a medical condition or treatment and before undertaking a new health care regimen, and never disregard professional medical advice or delay in seeking it because of something you have read on this website.')
model_methods.py ADDED
@@ -0,0 +1,131 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # reference https://stackoverflow.com/questions/69134379/how-to-make-prediction-based-on-model-tensorflow-lite
2
+ import numpy as np
3
+ import tensorflow as tf
4
+ import streamlit as st
5
+ from PIL import Image
6
+ import io
7
+ import matplotlib.pyplot as plt
8
+ import keras.backend as K # F1 score metric custom object
9
+ import cv2 # Activation heatmap
10
+
11
+ def predict(image): # to predict raw image input
12
+ interpreter = tf.lite.Interpreter('ENet_model.tflite')
13
+ interpreter.allocate_tensors()
14
+ #get input and output tensors
15
+ input_details = interpreter.get_input_details()
16
+ output_details = interpreter.get_output_details()
17
+
18
+ # Read the image and decode to a tensor
19
+ img = Image.open(io.BytesIO(image.read()))
20
+ img = img.convert('RGB')
21
+ # Resize the image to the desired size
22
+ img = img.resize((160,160))
23
+ img = tf.keras.preprocessing.image.img_to_array(img)
24
+
25
+ #Preprocess the image to required size and cast
26
+ #input_shape = input_details[0]['shape']
27
+ input_tensor= np.array(np.expand_dims(img,0), dtype=np.float32)
28
+ input_tensor= tf.keras.applications.efficientnet_v2.preprocess_input(input_tensor)
29
+ #set the tensor to point to the input data to be inferred
30
+
31
+ # Invoke the model on the input data
32
+ interpreter.set_tensor(input_details[0]['index'], input_tensor)
33
+
34
+ #Run the inference
35
+ interpreter.invoke()
36
+ output_details = interpreter.get_tensor(output_details[0]['index'])
37
+ return output_details
38
+
39
+ def orig_img(image):
40
+ img = Image.open(io.BytesIO(image.read()))
41
+ img = img.convert('RGB')
42
+ # Resize the image to the desired size
43
+ img = img.resize((160,160))
44
+ img = tf.keras.preprocessing.image.img_to_array(img)
45
+
46
+ #Preprocess the image to required size and cast
47
+ #input_shape = input_details[0]['shape']
48
+ input_array= np.array(np.expand_dims(img,0), dtype=np.float32)
49
+ input_array= tf.keras.applications.efficientnet_v2.preprocess_input(input_array)
50
+
51
+ input_tensor = tf.convert_to_tensor(input_array) # convert array to tensor
52
+ return input_tensor # output tensor format of image
53
+
54
+ def normalize_image(img): #normalise image
55
+ grads_norm = img[:,:,0]+ img[:,:,1]+ img[:,:,2]
56
+ grads_norm = (grads_norm - tf.reduce_min(grads_norm))/ (tf.reduce_max(grads_norm)- tf.reduce_min(grads_norm))
57
+ return grads_norm
58
+
59
+ # see this for cmap options: https://matplotlib.org/stable/tutorials/colors/colormaps.html
60
+ def plot_maps(img1, img2,vmin=0.3,vmax=0.7, mix_val=2): # saliency map
61
+ fig, ax = plt.subplots(figsize=(3.3,3.3))
62
+ ax.imshow(img1*mix_val+img2/mix_val, cmap = "terrain" )
63
+ plt.axis("off")
64
+ fig.savefig("temp_fig.png", transparent=True, frameon=False, bbox_inches='tight', pad_inches = 0)
65
+ image = Image.open('temp_fig.png')
66
+ st.image(image)
67
+ #st.pyplot(fig)
68
+
69
+ def f1_score(y_true, y_pred): #taken from old keras source code
70
+ true_positives = K.sum(K.round(K.clip(y_true * y_pred, 0, 1)))
71
+ possible_positives = K.sum(K.round(K.clip(y_true, 0, 1)))
72
+ predicted_positives = K.sum(K.round(K.clip(y_pred, 0, 1)))
73
+ precision = true_positives / (predicted_positives + K.epsilon())
74
+ recall = true_positives / (possible_positives + K.epsilon())
75
+ f1_val = 2*(precision*recall)/(precision+recall+K.epsilon())
76
+ return f1_val
77
+
78
+ # load full Saved model for Saliency and activation maps, unable to use tf lite model for these unless previously specified upon model construct
79
+ model = tf.keras.models.load_model("ENet_ep20_val0.311",
80
+ custom_objects={'f1_score': f1_score})
81
+
82
+ def plot_gradient_maps(input_im): # plot_maps() and predict() function embedded
83
+ with tf.GradientTape() as tape:
84
+ tape.watch(input_im)
85
+ result_img = model(input_im)
86
+ max_idx = tf.argmax(result_img,axis = 1)
87
+ max_score = tf.math.reduce_max(result_img[0,max_idx[0]]) # tensor max probability
88
+ grads = tape.gradient(max_score, input_im)
89
+ plot_maps(normalize_image(grads[0]), normalize_image(input_im[0]))
90
+
91
+ # Activation heatmap
92
+ def gradCAM(orig, intensity=0.5, res=270): # function
93
+ img = Image.open(io.BytesIO(orig.getvalue()))
94
+ img = img.convert('RGB')
95
+ # Resize the image to the desired size
96
+ img = img.resize((160,160))
97
+ x = tf.keras.preprocessing.image.img_to_array(img)
98
+
99
+ x = np.expand_dims(x, axis=0)
100
+ x = tf.keras.applications.efficientnet_v2.preprocess_input(x) # shape (1,160,160,3)
101
+
102
+ with tf.GradientTape() as tape: # Grad-CAM process
103
+ last_conv_layer = model.get_layer('top_conv')
104
+ iterate = tf.keras.models.Model([model.inputs], [model.output, last_conv_layer.output]) # create mini model function to get model output
105
+ model_out, last_conv_layer = iterate(x) # model_out shape (1,4)
106
+ class_out = model_out[:, np.argmax(model_out[0])]
107
+ grads = tape.gradient(class_out, last_conv_layer)
108
+ pooled_grads = K.mean(grads, axis=(0, 1, 2))
109
+
110
+ heatmap = tf.reduce_mean(tf.multiply(pooled_grads, last_conv_layer), axis=-1)
111
+ heatmap = np.maximum(heatmap, 0)
112
+ heatmap /= np.max(heatmap) # minmax pixel values (0,1)
113
+ heatmap = heatmap.reshape((5, 5)) # reshape to 5x5 array
114
+
115
+ # img = cv2.imread(orig) # numpy array
116
+ img = Image.open(io.BytesIO(orig.getvalue()))
117
+ img = img.convert('RGB')
118
+ # Resize the image to the desired size
119
+ img = img.resize((160,160))
120
+ img = tf.keras.preprocessing.image.img_to_array(img)
121
+
122
+ heatmap = cv2.resize(heatmap, (img.shape[1], img.shape[0]))
123
+
124
+ heatmap = cv2.applyColorMap(np.uint8(255*heatmap), cv2.COLORMAP_JET) # multiply 255 to convert to RGB form
125
+
126
+ img = heatmap * intensity + img
127
+
128
+ img1 = cv2.resize(img, (res, res)) # visualise heatmap overlay
129
+ cv2.imwrite('temporary.jpg', img1) # store image as a temporary file for st.image to interpret, unable to direct load from st.image(img1)
130
+ st.image('temporary.jpg')
131
+
requirements.txt ADDED
@@ -0,0 +1,5 @@
 
 
 
 
 
 
1
+ streamlit==1.13.0
2
+ numpy==1.21.5
3
+ tensorflow==2.8.2
4
+ matplotlib==3.5.1
5
+ opencv-python-headless==4.6.0.66