Victorano commited on
Commit
9d20d64
·
1 Parent(s): d2709ad

All functionalities working fine

Browse files
Files changed (5) hide show
  1. app.py +29 -0
  2. detect.py +10 -0
  3. eff_quantized.onnx +3 -0
  4. examples/test.jpg +0 -0
  5. onnx_inference.py +27 -0
app.py ADDED
@@ -0,0 +1,29 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import gradio as gr
2
+ import os
3
+ import numpy as np
4
+ from onnx_inference import emotions_detector
5
+
6
+ class_names = ['angry', 'happy', 'sad']
7
+
8
+
9
+ def predict(img):
10
+ img = np.array(img)
11
+ onnx_pred, time_taken = emotions_detector(img)
12
+
13
+ pred_labels_and_probs = {class_names[i]: float(
14
+ onnx_pred[0][0][i]) for i in range(len(class_names))}
15
+
16
+ return pred_labels_and_probs, time_taken
17
+
18
+
19
+ title = "Human Emotion Detection 😭🤣🥹"
20
+ description = "An EfficientNet ONNX quantized feature extractor computer vision model to classify images and detect the emotion of the person in it.(Uploaded image should be of a single person)"
21
+ article = "Full Source code from scratch can be found in the huggingface Space...."
22
+
23
+ # Create examples list from "examples/" directory
24
+ example_list = [["examples/" + example] for example in os.listdir("examples")]
25
+
26
+ demo = gr.Interface(fn=predict, inputs=gr.Image(type='pil'), outputs=[gr.Label(num_top_classes=3, label='Predictions'), gr.Number(
27
+ label="Prediction time (s)")], examples=example_list, title=title, description=description, article=article)
28
+
29
+ demo.launch()
detect.py ADDED
@@ -0,0 +1,10 @@
 
 
 
 
 
 
 
 
 
 
 
1
+ from PIL import Image
2
+ from io import BytesIO
3
+ import numpy as np
4
+
5
+
6
+ def emo_router(im):
7
+ print(f"the Image: {im}")
8
+ image = np.array(im)
9
+
10
+ return image
eff_quantized.onnx ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:4c278206e78d48fc8ab5151bc22e2636faad7bb41323ac5f0b6bde72079ebf72
3
+ size 63147650
examples/test.jpg ADDED
onnx_inference.py ADDED
@@ -0,0 +1,27 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import onnxruntime as rt
2
+ import cv2
3
+ import numpy as np
4
+ import time
5
+
6
+ providers = ['CPUExecutionProvider']
7
+ m_q = rt.InferenceSession(
8
+ "eff_quantized.onnx", providers=providers)
9
+
10
+
11
+ def emotions_detector(img_array):
12
+ time_init = time.time()
13
+
14
+ # Check if image is in grayscale and convert to rgb
15
+ if len(img_array.shape) == 2:
16
+ img_array = cv2.cvtColor(img_array, cv2.COLOR_GRAY2RGB)
17
+
18
+ # resize layer
19
+ test_image = cv2.resize(img_array, (256, 256))
20
+ im = np.float32(test_image)
21
+ img_array = np.expand_dims(im, axis=0)
22
+
23
+ onnx_pred = m_q.run(['dense_2'], {"input_1": img_array})
24
+
25
+ time_elapsed = time.time() - time_init
26
+
27
+ return onnx_pred, time_elapsed