ibvhim commited on
Commit
c04f178
1 Parent(s): 4d30e8f

Create onnx_guide/app.py

Browse files
Files changed (1) hide show
  1. onnx_guide/app.py +81 -0
onnx_guide/app.py ADDED
@@ -0,0 +1,81 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import json
2
+ import math
3
+
4
+ import cv2
5
+ import gradio as gr
6
+ import matplotlib.pyplot as plt
7
+ import numpy as np
8
+ import onnxruntime as rt
9
+ from huggingface_hub import hf_hub_download
10
+
11
+ modele = hf_hub_download(repo_id="onnx/EfficientNet-Lite4", filename="efficientnet-lite4-11.onnx")
12
+ # load the labels text file
13
+ labels = json.load(open("onnx_guide/labels_map.txt", "r"))
14
+
15
+ # set image file dimensions to 224x224 by resizing and cropping image from center
16
+ def pre_process_edgetpu(img, dims):
17
+ output_height, output_width, _ = dims
18
+ img = resize_with_aspectratio(img, output_height, output_width, inter_pol=cv2.INTER_LINEAR)
19
+ img = center_crop(img, output_height, output_width)
20
+ img = np.asarray(img, dtype='float32')
21
+ # converts jpg pixel value from [0 - 255] to float array [-1.0 - 1.0]
22
+ img -= [127.0, 127.0, 127.0]
23
+ img /= [128.0, 128.0, 128.0]
24
+ return img
25
+
26
+
27
+ # resize the image with a proportional scale
28
+ def resize_with_aspectratio(img, out_height, out_width, scale=87.5, inter_pol=cv2.INTER_LINEAR):
29
+ height, width, _ = img.shape
30
+ new_height = int(100.0 * out_height / scale)
31
+ new_width = int(100.0 * out_width / scale)
32
+ if height > width:
33
+ w = new_width
34
+ h = int(new_height * height / width)
35
+ else:
36
+ h = new_height
37
+ w = int(new_width * width / height)
38
+ img = cv2.resize(img, (w, h), interpolation=inter_pol)
39
+ return img
40
+
41
+
42
+ # crop the image around the center based on given height and width
43
+ def center_crop(img, out_height, out_width):
44
+ height, width, _ = img.shape
45
+ left = int((width - out_width) / 2)
46
+ right = int((width + out_width) / 2)
47
+ top = int((height - out_height) / 2)
48
+ bottom = int((height + out_height) / 2)
49
+ img = img[top:bottom, left:right]
50
+ return img
51
+
52
+
53
+ sess = rt.InferenceSession(modele)
54
+
55
+
56
+ def inference(img):
57
+ img = cv2.imread(img)
58
+ img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
59
+
60
+ img = pre_process_edgetpu(img, (224, 224, 3))
61
+
62
+ img_batch = np.expand_dims(img, axis=0)
63
+
64
+ results = sess.run(["Softmax:0"], {"images:0": img_batch})[0]
65
+ result = reversed(results[0].argsort()[-5:])
66
+ resultdic = {}
67
+ for r in result:
68
+ resultdic[labels[str(r)]] = float(results[0][r])
69
+ return resultdic
70
+
71
+
72
+ title = "EfficientNet-Lite4"
73
+ description = "EfficientNet-Lite 4 is the largest variant and most accurate of the set of EfficientNet-Lite model. It is an integer-only quantized model that produces the highest accuracy of all of the EfficientNet models. It achieves 80.4% ImageNet top-1 accuracy, while still running in real-time (e.g. 30ms/image) on a Pixel 4 CPU."
74
+ examples = [[hf_hub_download('nateraw/gradio-guides-files', 'catonnx.jpg', repo_type='dataset', force_filename='catonnx.jpg')]]
75
+
76
+ interface = gr.Interface(
77
+ inference, gr.inputs.Image(type="filepath"), "label", title=title, description=description, examples=examples
78
+ )
79
+
80
+ if __name__ == '__main__':
81
+ interface.launch(debug=True)