Spaces:
Sleeping
Sleeping
Commit
·
5440a34
1
Parent(s):
164e46f
Upload 2 files
Browse files- Ambrosia.py +296 -0
- app.py +78 -0
Ambrosia.py
ADDED
@@ -0,0 +1,296 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# CLASS:
|
2 |
+
# pre_process_image
|
3 |
+
# METHODS:
|
4 |
+
# __init__
|
5 |
+
# INPUT:
|
6 |
+
# image_dir = (str) a full path to an image with multiple beetles and possibly a size reference circle
|
7 |
+
# manual_thresh_buffer (float) {optional} this is a manual way to control the binarizxing threshold.
|
8 |
+
# use this when beetles are broken up into multiple images\
|
9 |
+
# inputs should range from -1 to 1. higehr vlaues include lighter colors into the blobs and lower values reduce blob size
|
10 |
+
# OUTPUT(ATTRIBUTES):
|
11 |
+
# image_dir = (str) the same directory as is given as an input to the iamge that is being processed
|
12 |
+
# image = (np.array) the original compound image
|
13 |
+
# grey_image = (np.array) the original compound image in greyscale
|
14 |
+
# bw_image = (np.array) the original image in binary black and white
|
15 |
+
# inv_bw_image = (np.array) the original image inverted black and white binary
|
16 |
+
# clear_inv_bw_image = (np.array) the inverted black and white binary original image with all components touching the border removed
|
17 |
+
# segment
|
18 |
+
# INPUT:
|
19 |
+
# cluster_num = (int) {default=2} the number of clusters used for kmeans to pick only the cluster with alrgest blobs
|
20 |
+
# image_edge_buffer = (int) {default=50} number of pixels to add to box borders
|
21 |
+
# OUTPUT(ATTRIBUTES):
|
22 |
+
# cluster_num = (int) the same as the input
|
23 |
+
# image_edge_buffer = (int) the same as the input
|
24 |
+
# labeled_image = (np.array) the original compound image that is labelled
|
25 |
+
# max_kmeans_label = (int) the label of the cluster with the largest object/blob
|
26 |
+
# image_selected_df = (pd.DataFrame) a dataframe with columns describing each segmented image:
|
27 |
+
# 'centroid' = centre of the image
|
28 |
+
# 'bbox-0' = border 0
|
29 |
+
# 'bbox-1' = border 1
|
30 |
+
# 'bbox-2' = border 2
|
31 |
+
# 'bbox-3' = border 3
|
32 |
+
# 'orientation' = angle of image segment
|
33 |
+
# 'axis_major_length'
|
34 |
+
# 'axis_minor_length'
|
35 |
+
# 'area'
|
36 |
+
# 'area_filled'
|
37 |
+
# image_properties_df = (pd.DataFrame) similar to the image_selected_df, but inlcudes all the artefacts that are picked up
|
38 |
+
# col_image_lst = (list) a list with all the segmented images in color
|
39 |
+
# inv_bw_image_lst = (list) a list with all the segmented images in inverted binary black and white
|
40 |
+
# image_segment_count = (int) number of segmented images extracted from the compound image
|
41 |
+
# detect_outlier
|
42 |
+
# INPUT:
|
43 |
+
# None
|
44 |
+
# OUTPUT(ATTRIBUTES):
|
45 |
+
# image_array = (np.array) an array of the list of color segemented images (number of images, (R,G,B))
|
46 |
+
# r_ar_lst = (list) a list of arrays with flattened images red values
|
47 |
+
# g_ar_lst = (list) a list of arrays with flattened images green values
|
48 |
+
# b_ar_lst = (list) a list of arrays with flattened images blue values
|
49 |
+
# all_ar_lst = (list) a list of arrays with flattened images all red, green, and blue values
|
50 |
+
# px_dens_dist = (np.array) frequency distribution at 0-255 of all the values for each pixel
|
51 |
+
# corr_coef = (np.array) a square array of length equal to the number of segmented images showing the spearman correlation bewteen images
|
52 |
+
# corr_pval = (np.array) the pvalues associatedwith each correlation
|
53 |
+
# corr_coef_sum = (np.array) the sum of the correlations across each iamge compared to all others
|
54 |
+
# outlier_idx = (int) the index of the image with the lowest spearman correlation sum
|
55 |
+
# outlier_val = (float) the lowest sum correlation value
|
56 |
+
# outlier_col_image = (np.array) the color image of what is detected as the outlier
|
57 |
+
# outlier_inv_bw_image = (np.array) the inverted black on white image of the outlier segmented image
|
58 |
+
# outlier_bw_image = (np.array) the white on black image of the outlier segmented image
|
59 |
+
# image_selected_df = (pd.DataFrame) an updated dataframe that contains the circle identification data
|
60 |
+
# estimate_size
|
61 |
+
# INPUT:
|
62 |
+
# known_radius = (int) {default=1} the radius of the reference circle (shoudl be approximately the same size as the specimens to work best)
|
63 |
+
# canny_sigma = (int) {default=5} this describes how strict the cleaning border is for identifying the circle to place over the reference circle
|
64 |
+
# outlier_idx = (int) {default should be self.outlier_idx} change this when the circle is falsely detected
|
65 |
+
# OUTPUT(ATTRIBUTES):
|
66 |
+
# outlier_bw_image = (np.array) an updated version of the outlier iamge with a clean circle clear of artifacts
|
67 |
+
# outlier_idx = (int) same as the input
|
68 |
+
# clean_inv_bw_image_lst = (list) a list of cleaned white on black images no blobs touching hte border
|
69 |
+
# image_selected_df = (pd.DataFrame) an update to the dataframe of metadata containing pixel counts and relative area in mm^2 of all segmented images
|
70 |
+
# *black and white is white on black
|
71 |
+
|
72 |
+
# import requirements
|
73 |
+
import os
|
74 |
+
os.environ["OMP_NUM_THREADS"] = '1' #use this line on windows machines to avoid memory leaks
|
75 |
+
import numpy as np
|
76 |
+
import pandas as pd
|
77 |
+
from math import ceil
|
78 |
+
from skimage import io
|
79 |
+
from skimage.filters import threshold_otsu
|
80 |
+
from skimage.color import rgb2gray
|
81 |
+
from skimage.segmentation import clear_border
|
82 |
+
from skimage.measure import label, regionprops_table
|
83 |
+
from skimage.transform import hough_circle, hough_circle_peaks
|
84 |
+
from skimage.feature import canny
|
85 |
+
from skimage.draw import disk
|
86 |
+
from sklearn.cluster import KMeans
|
87 |
+
from scipy.stats import spearmanr
|
88 |
+
|
89 |
+
class pre_process_image:
|
90 |
+
# initialize image to be segmented from path
|
91 |
+
def __init__(self, image=None, image_dir=None, manual_thresh_buffer=0):
|
92 |
+
if image_dir is not None:
|
93 |
+
self.image_dir = image_dir.replace('\\','/') # full directory path to image
|
94 |
+
self.image = io.imread(image_dir) # read image from directory
|
95 |
+
elif image is not None:
|
96 |
+
self.image = image
|
97 |
+
else:
|
98 |
+
print("No image given to function")
|
99 |
+
self.grey_image = rgb2gray(self.image) #convert image to greyscale
|
100 |
+
self.bw_image = self.grey_image > threshold_otsu(self.grey_image) + manual_thresh_buffer # binarize image to be black & white
|
101 |
+
self.inv_bw_image = np.invert(self.bw_image) # invert black and white image
|
102 |
+
self.clear_inv_bw_image = clear_border(self.inv_bw_image) # remove anything touching image border
|
103 |
+
|
104 |
+
# segment the image into smaller images
|
105 |
+
def segment(self, cluster_num=2, image_edge_buffer=50):
|
106 |
+
self.cluster_num = cluster_num
|
107 |
+
self.image_edge_buffer = image_edge_buffer
|
108 |
+
self.labeled_image = label(self.clear_inv_bw_image) #label image
|
109 |
+
image_properties_df = pd.DataFrame( # get the properties of each image used to segment blobs in image
|
110 |
+
regionprops_table(
|
111 |
+
self.labeled_image,
|
112 |
+
properties=('centroid',
|
113 |
+
'bbox',
|
114 |
+
'orientation',
|
115 |
+
'axis_major_length',
|
116 |
+
'axis_minor_length',
|
117 |
+
'area',
|
118 |
+
'area_filled')
|
119 |
+
)
|
120 |
+
)
|
121 |
+
# cluster boxes of blobs by size
|
122 |
+
kmean_result = KMeans(n_clusters=cluster_num, n_init='auto').fit(
|
123 |
+
np.array(
|
124 |
+
image_properties_df[['axis_major_length', 'axis_minor_length']]
|
125 |
+
)
|
126 |
+
)
|
127 |
+
image_properties_df['kmeans_label'] = kmean_result.labels_
|
128 |
+
# keep only the largest cluster (ball bearing needs to be a similar size as the beetles)
|
129 |
+
self.max_kmeans_label = int(image_properties_df.kmeans_label[image_properties_df['area'] == image_properties_df['area'].max()])
|
130 |
+
image_selected_df = image_properties_df[image_properties_df['kmeans_label']==self.max_kmeans_label]
|
131 |
+
self.image_properties_df = image_properties_df
|
132 |
+
# enlarge the boxes around blobs with buffer
|
133 |
+
coord_df = image_selected_df.loc[:,['bbox-0','bbox-1','bbox-2','bbox-3']].copy()
|
134 |
+
coord_df = coord_df.reset_index(drop = True)
|
135 |
+
image_selected_df = image_selected_df.reset_index(drop = True)
|
136 |
+
coord_df.loc[:,['bbox-0','bbox-1']] = coord_df.loc[:,['bbox-0','bbox-1']]-self.image_edge_buffer
|
137 |
+
coord_df.loc[:,['bbox-2','bbox-3']] = coord_df.loc[:,['bbox-2','bbox-3']]+self.image_edge_buffer
|
138 |
+
image_selected_df.loc[:,['bbox-0','bbox-1','bbox-2','bbox-3']] = coord_df.loc[:,['bbox-0','bbox-1','bbox-2','bbox-3']]
|
139 |
+
# limit boundaries to the initial image size without this the iamge size bugs out when the boundaries are negative and it removes the image
|
140 |
+
mask = image_selected_df[['bbox-0','bbox-1','bbox-2','bbox-3']]>=0
|
141 |
+
image_selected_df[['bbox-0','bbox-1','bbox-2','bbox-3']] = image_selected_df[['bbox-0','bbox-1','bbox-2','bbox-3']].where(mask, other=0)
|
142 |
+
self.image_selected_df = image_selected_df
|
143 |
+
# crop blobs from image based on box sizes and add to list
|
144 |
+
col_image_lst = []
|
145 |
+
inv_bw_image_lst = []
|
146 |
+
for i in range(len(image_selected_df)):
|
147 |
+
coord_i = image_selected_df.iloc[i]
|
148 |
+
# color images
|
149 |
+
crop_img = self.image[int(coord_i['bbox-0']):int(coord_i['bbox-2']), int(coord_i['bbox-1']):int(coord_i['bbox-3'])]
|
150 |
+
col_image_lst.append(crop_img)
|
151 |
+
# inverted black and white images
|
152 |
+
crop_bw_img = self.inv_bw_image[int(coord_i['bbox-0']):int(coord_i['bbox-2']), int(coord_i['bbox-1']):int(coord_i['bbox-3'])]
|
153 |
+
inv_bw_image_lst.append(crop_bw_img)
|
154 |
+
|
155 |
+
#clear all images that are empty
|
156 |
+
# col_image_lst = [x for x in col_image_lst if x.shape[0] != 0]
|
157 |
+
# inv_bw_image_lst = [x for x in inv_bw_image_lst if x.shape[0] != 0]
|
158 |
+
|
159 |
+
self.col_image_lst = col_image_lst
|
160 |
+
self.inv_bw_image_lst = inv_bw_image_lst
|
161 |
+
self.image_segment_count = len(col_image_lst)
|
162 |
+
|
163 |
+
def detect_outlier(self):
|
164 |
+
# convert list to numpy array
|
165 |
+
self.image_array = np.copy(np.array(self.col_image_lst, dtype='object'))
|
166 |
+
# initialize lists to store data in
|
167 |
+
r_ar_lst = []
|
168 |
+
g_ar_lst = []
|
169 |
+
b_ar_lst = []
|
170 |
+
all_ar_lst = []
|
171 |
+
for l in range(self.image_segment_count):
|
172 |
+
# flatten arrays
|
173 |
+
img_var = self.image_array[l]
|
174 |
+
r_ar = img_var[:,:,0].flatten() # red
|
175 |
+
g_ar = img_var[:,:,1].flatten() # green
|
176 |
+
b_ar = img_var[:,:,2].flatten() # blue
|
177 |
+
all_ar = img_var.flatten() # all
|
178 |
+
# collect data in lists
|
179 |
+
r_ar_lst.append(r_ar)
|
180 |
+
g_ar_lst.append(g_ar)
|
181 |
+
b_ar_lst.append(b_ar)
|
182 |
+
all_ar_lst.append(all_ar)
|
183 |
+
self.r_ar_lst = r_ar_lst
|
184 |
+
self.g_ar_lst = g_ar_lst
|
185 |
+
self.b_ar_lst = b_ar_lst
|
186 |
+
self.all_ar_lst = all_ar_lst
|
187 |
+
# get frequency of values at each rgb value(0-255)
|
188 |
+
values_array = all_ar_lst # use all, but can use any color
|
189 |
+
temp_dist_ar = np.zeros(shape=(255, self.image_segment_count))
|
190 |
+
for i in range(self.image_segment_count):
|
191 |
+
unique, counts = np.unique(values_array[i], return_counts=True)
|
192 |
+
temp_dict = dict(zip(unique, counts))
|
193 |
+
for j in temp_dict.keys():
|
194 |
+
temp_dist_ar[j-1][i] = temp_dict[j]
|
195 |
+
self.px_dens_dist = temp_dist_ar
|
196 |
+
# calculate the spearman correlation of distributions between images
|
197 |
+
# use spearman because it is a non-parametric measures
|
198 |
+
# use the sum of the correlation coefficients to identify the outlier image
|
199 |
+
corr_ar = np.array(spearmanr(temp_dist_ar, axis=0))
|
200 |
+
corr_coef_ar = corr_ar[0,:,:]
|
201 |
+
corr_pval_ar = corr_ar[1,:,:]
|
202 |
+
corr_sum_ar = corr_coef_ar.sum(axis=0)
|
203 |
+
self.corr_coef = corr_coef_ar
|
204 |
+
self.corr_pval = corr_pval_ar
|
205 |
+
self.corr_coef_sum = corr_sum_ar
|
206 |
+
self.outlier_idx = corr_sum_ar.argmin()
|
207 |
+
self.outlier_val = corr_sum_ar.min()
|
208 |
+
self.outlier_col_image = self.col_image_lst[self.outlier_idx]
|
209 |
+
self.outlier_inv_bw_image = self.inv_bw_image_lst[self.outlier_idx]
|
210 |
+
self.outlier_bw_image = np.invert(self.outlier_inv_bw_image)
|
211 |
+
# update metadata dataframe
|
212 |
+
self.image_selected_df['circle_class'] = 'non_circle'
|
213 |
+
self.image_selected_df.loc[self.outlier_idx, 'circle_class'] = 'circle'
|
214 |
+
|
215 |
+
def estimate_size(self, outlier_idx, known_radius=1, canny_sigma=5):
|
216 |
+
for i in range(len(self.corr_coef_sum)):
|
217 |
+
# add appropriate data to dataframe when circle not detected at all
|
218 |
+
if i == (len(self.corr_coef_sum)-1):
|
219 |
+
self.outlier_idx = None
|
220 |
+
self.outlier_val = None
|
221 |
+
self.outlier_col_image = None
|
222 |
+
self.outlier_inv_bw_image = None
|
223 |
+
self.outlier_bw_image = None
|
224 |
+
# update metadata dataframe
|
225 |
+
self.image_selected_df['circle_class'] = 'non_circle'
|
226 |
+
self.image_selected_df['real_area'] = 0
|
227 |
+
clean_inv_bw_image_lst = []
|
228 |
+
for inv_bw_image in self.inv_bw_image_lst:
|
229 |
+
# bw_image = np.invert(inv_bw_image)
|
230 |
+
clean_inv_bw_image = clear_border(inv_bw_image)
|
231 |
+
clean_inv_bw_image_lst.append(clean_inv_bw_image)
|
232 |
+
px_count_lst = []
|
233 |
+
for bw_img in clean_inv_bw_image_lst:
|
234 |
+
unique_px_count = np.unique(bw_img, return_counts=True)
|
235 |
+
px_dict = dict(zip(list(unique_px_count[0]), list(unique_px_count[1])))
|
236 |
+
if len(px_dict) == 1:
|
237 |
+
px_count = 0
|
238 |
+
else:
|
239 |
+
px_count = px_dict[True]
|
240 |
+
px_count_lst.append(px_count)
|
241 |
+
self.image_selected_df['pixel_count'] = px_count_lst
|
242 |
+
print("Circle could not be found: "+str(self.image_dir))
|
243 |
+
else:
|
244 |
+
try:
|
245 |
+
self.outlier_idx = np.argsort(self.corr_coef_sum)[i]
|
246 |
+
self.outlier_val = np.sort(self.corr_coef_sum)[i]
|
247 |
+
self.outlier_col_image = self.col_image_lst[self.outlier_idx]
|
248 |
+
self.outlier_inv_bw_image = self.inv_bw_image_lst[self.outlier_idx]
|
249 |
+
self.outlier_bw_image = np.invert(self.outlier_inv_bw_image)
|
250 |
+
# update metadata dataframe
|
251 |
+
self.image_selected_df['circle_class'] = 'non_circle'
|
252 |
+
self.image_selected_df.loc[self.outlier_idx, 'circle_class'] = 'circle'
|
253 |
+
outlier_inv_bw_image = np.invert(self.outlier_bw_image)
|
254 |
+
# remove the border touching blobs of all b&w images
|
255 |
+
clean_inv_bw_image_lst = []
|
256 |
+
for inv_bw_image in self.inv_bw_image_lst:
|
257 |
+
# bw_image = np.invert(inv_bw_image)
|
258 |
+
clean_inv_bw_image = clear_border(inv_bw_image)
|
259 |
+
clean_inv_bw_image_lst.append(clean_inv_bw_image)
|
260 |
+
# default is the image detected with detect_outlier
|
261 |
+
# change outlier_bw_image if this is not the ball bearing
|
262 |
+
edges = canny(self.outlier_bw_image, sigma=canny_sigma)
|
263 |
+
# Detect radius
|
264 |
+
max_r = int((max(outlier_inv_bw_image.shape)/2) + (self.image_edge_buffer/2)) # max radius
|
265 |
+
min_r = int((max_r-self.image_edge_buffer) - (self.image_edge_buffer/2)) # min radius
|
266 |
+
hough_radii = np.arange(min_r, max_r, 10)
|
267 |
+
hough_res = hough_circle(edges, hough_radii)
|
268 |
+
# Select the most prominent circle
|
269 |
+
accums, cx, cy, radii = hough_circle_peaks(hough_res, hough_radii, total_num_peaks=1)
|
270 |
+
circy, circx = disk((cy[0], cx[0]), radii[0])
|
271 |
+
# change the outlier image to fill in the circle
|
272 |
+
outlier_inv_bw_image[circy, circx] = True # this index error occurs when the outlier object circle does not fit into the image
|
273 |
+
|
274 |
+
self.outlier_inv_bw_image = clear_border(outlier_inv_bw_image)
|
275 |
+
clean_inv_bw_image_lst[self.outlier_idx] = self.outlier_inv_bw_image
|
276 |
+
self.clean_inv_bw_image_lst = clean_inv_bw_image_lst
|
277 |
+
# get the area of the ball bearing based on the known radius
|
278 |
+
circle_area = np.pi*(known_radius**2)
|
279 |
+
px_count_lst = []
|
280 |
+
for bw_img in clean_inv_bw_image_lst:
|
281 |
+
px_count = np.unique(bw_img, return_counts=True)[1][1] # this index error occurs when the outlier object touches the edge of the image (forces recalculation of outlier)
|
282 |
+
px_count_lst.append(px_count)
|
283 |
+
self.image_selected_df['pixel_count'] = px_count_lst
|
284 |
+
circle_px_count = px_count_lst[self.outlier_idx]
|
285 |
+
area_ar = (np.array(px_count_lst)/circle_px_count)*circle_area
|
286 |
+
self.image_selected_df['real_area'] = area_ar
|
287 |
+
|
288 |
+
break
|
289 |
+
|
290 |
+
except IndexError:
|
291 |
+
print('Updating circle classification for image: '+ str(self.image_dir))
|
292 |
+
|
293 |
+
else:
|
294 |
+
print("No circle was found to estimate beetle size")
|
295 |
+
|
296 |
+
# add a section at line 219 that labels all area as 0 and all circle_class as non_circle when the least outlying object is considered.
|
app.py
ADDED
@@ -0,0 +1,78 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import os
|
2 |
+
import dill
|
3 |
+
import timm
|
4 |
+
import numpy as np
|
5 |
+
from fastai.tabular.all import *
|
6 |
+
from fastai.vision.all import *
|
7 |
+
from fastai.vision.utils import get_image_files
|
8 |
+
from Ambrosia import pre_process_image
|
9 |
+
from huggingface_hub import from_pretrained_fastai, push_to_hub_fastai
|
10 |
+
import gradio as gr
|
11 |
+
|
12 |
+
# this function only describes how much a singular value in al ist stands out.
|
13 |
+
# if all values in the lsit are high or low this is 1
|
14 |
+
# the smaller the proportiopn of number of disimilar vlaues are to other more similar values the lower this number
|
15 |
+
# the larger the gap between the dissimilar numbers and the simialr number the smaller this number
|
16 |
+
# only able to interpret probabilities or values between 0 and 1
|
17 |
+
# this function outputs an estimate an inverse of the classification confidence based on the probabilities of all the classes.
|
18 |
+
# the wedge threshold splits the data on a threshold with a magnitude of a positive int to force a ledge/peak in the data
|
19 |
+
def unkown_prob_calc(probs, wedge_threshold, wedge_magnitude=1, wedge='strict'):
|
20 |
+
if wedge =='strict':
|
21 |
+
increase_var = (1/(wedge_magnitude))
|
22 |
+
decrease_var = (wedge_magnitude)
|
23 |
+
if wedge =='dynamic': # this allows pointsthat are furhter from the threshold ot be moved less and points clsoer to be moved more
|
24 |
+
increase_var = (1/(wedge_magnitude*((1-np.abs(probs-wedge_threshold)))))
|
25 |
+
decrease_var = (wedge_magnitude*((1-np.abs(probs-wedge_threshold))))
|
26 |
+
else:
|
27 |
+
print("Error: use 'strict' (default) or 'dynamic' as options for the wedge parameter!")
|
28 |
+
probs = np.where(probs>=wedge_threshold , probs**increase_var, probs)
|
29 |
+
probs = np.where(probs<=wedge_threshold , probs**decrease_var, probs)
|
30 |
+
diff_matrix = np.abs(probs[:, np.newaxis] - probs)
|
31 |
+
diff_matrix_sum = np.sum(diff_matrix)
|
32 |
+
probs_sum = np.sum(probs)
|
33 |
+
class_val = (diff_matrix_sum/probs_sum)
|
34 |
+
max_class_val = ((len(probs)-1)*2)
|
35 |
+
kown_prob = class_val/max_class_val
|
36 |
+
unknown_prob = 1-kown_prob
|
37 |
+
return(unknown_prob)
|
38 |
+
|
39 |
+
# load model
|
40 |
+
# learn = load_learner("E:\\GIT_REPOS\\Beetle_classifier\\Models\\beetle_classifier.pkl", cpu=False)
|
41 |
+
learn = load_learner(r"beetle_classifier.pkl", cpu=False)
|
42 |
+
# get class names
|
43 |
+
labels = np.append(np.array(learn.dls.vocab), "Unknown")
|
44 |
+
|
45 |
+
def predict(img):
|
46 |
+
# Segment image into smaller images
|
47 |
+
pre_process = pre_process_image(manual_thresh_buffer=0.15, image = img) # use image_dir if directory of image used
|
48 |
+
pre_process.segment(cluster_num=2,
|
49 |
+
image_edge_buffer=50)
|
50 |
+
# get predictions for all segments
|
51 |
+
conf_dict_lst = []
|
52 |
+
output_lst = []
|
53 |
+
img_cnt = len(pre_process.col_image_lst)
|
54 |
+
for i in range(0,img_cnt):
|
55 |
+
prob_ar = np.array(learn.predict(pre_process.col_image_lst[i])[2])
|
56 |
+
unkown_prob = unkown_prob_calc(probs=prob_ar, wedge_threshold=0.85, wedge_magnitude=5, wedge='dynamic')
|
57 |
+
prob_ar = np.append(prob_ar, unkown_prob)
|
58 |
+
prob_ar = np.around(prob_ar*100, decimals=1)
|
59 |
+
|
60 |
+
conf_dict = {labels[i]: float(prob_ar[i]) for i in range(len(prob_ar))}
|
61 |
+
conf_dict = dict(sorted(conf_dict.items(), key=lambda item: item[1], reverse=True))
|
62 |
+
conf_dict_lst.append(str(conf_dict))
|
63 |
+
result = list(zip(pre_process.col_image_lst, conf_dict_lst))
|
64 |
+
|
65 |
+
return(result)
|
66 |
+
|
67 |
+
with gr.Blocks() as demo:
|
68 |
+
with gr.Column(variant="panel"):
|
69 |
+
with gr.Row(variant="compact"):
|
70 |
+
inputs = gr.Image()
|
71 |
+
btn = gr.Button("Classify").style(full_width=False)
|
72 |
+
|
73 |
+
gallery = gr.Gallery(
|
74 |
+
label="Show images", show_label=True, elem_id="gallery"
|
75 |
+
).style(grid=[8], height="auto")
|
76 |
+
|
77 |
+
btn.click(predict, inputs, gallery)
|
78 |
+
demo.launch(share=True)
|