SeyedAli commited on
Commit
ebbab91
1 Parent(s): 294db6f

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +84 -1
app.py CHANGED
@@ -1,3 +1,86 @@
 
1
  import gradio as gr
 
 
 
 
 
2
 
3
- gr.Interface.load("models/facebook/detr-resnet-50").launch()
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from transformers import AutoFeatureExtractor, YolosForObjectDetection
2
  import gradio as gr
3
+ from PIL import Image
4
+ import torch
5
+ import matplotlib.pyplot as plt
6
+ import io
7
+ import numpy as np
8
 
9
+
10
+ COLORS = [[0.000, 0.447, 0.741], [0.850, 0.325, 0.098], [0.929, 0.694, 0.125],
11
+ [0.494, 0.184, 0.556], [0.466, 0.674, 0.188], [0.301, 0.745, 0.933]]
12
+
13
+
14
+ def process_class_list(classes_string: str):
15
+ if classes_string == "":
16
+ return []
17
+ classes_list = classes_string.split(",")
18
+ classes_list = [x.strip() for x in classes_list]
19
+ return classes_list
20
+
21
+ def model_inference(img, model_name: str, prob_threshold: int, classes_to_show = str):
22
+ feature_extractor = AutoFeatureExtractor.from_pretrained(f"hustvl/{model_name}")
23
+ model = YolosForObjectDetection.from_pretrained(f"hustvl/{model_name}")
24
+
25
+ img = Image.fromarray(img)
26
+
27
+ pixel_values = feature_extractor(img, return_tensors="pt").pixel_values
28
+
29
+ with torch.no_grad():
30
+ outputs = model(pixel_values, output_attentions=True)
31
+
32
+ probas = outputs.logits.softmax(-1)[0, :, :-1]
33
+ keep = probas.max(-1).values > prob_threshold
34
+
35
+ target_sizes = torch.tensor(img.size[::-1]).unsqueeze(0)
36
+ postprocessed_outputs = feature_extractor.post_process(outputs, target_sizes)
37
+ bboxes_scaled = postprocessed_outputs[0]['boxes']
38
+
39
+ classes_list = process_class_list(classes_to_show)
40
+ res_img = plot_results(img, probas[keep], bboxes_scaled[keep], model, classes_list)
41
+
42
+ return res_img
43
+
44
+ def plot_results(pil_img, prob, boxes, model, classes_list):
45
+ plt.figure(figsize=(16,10))
46
+ plt.imshow(pil_img)
47
+ ax = plt.gca()
48
+ colors = COLORS * 100
49
+ for p, (xmin, ymin, xmax, ymax), c in zip(prob, boxes.tolist(), colors):
50
+ cl = p.argmax()
51
+ object_class = model.config.id2label[cl.item()]
52
+
53
+ if len(classes_list) > 0 :
54
+ if object_class not in classes_list:
55
+ continue
56
+
57
+ ax.add_patch(plt.Rectangle((xmin, ymin), xmax - xmin, ymax - ymin,
58
+ fill=False, color=c, linewidth=3))
59
+ text = f'{object_class}: {p[cl]:0.2f}'
60
+ ax.text(xmin, ymin, text, fontsize=15,
61
+ bbox=dict(facecolor='yellow', alpha=0.5))
62
+ plt.axis('off')
63
+ return fig2img(plt.gcf())
64
+
65
+ def fig2img(fig):
66
+ buf = io.BytesIO()
67
+ fig.savefig(buf)
68
+ buf.seek(0)
69
+ img = Image.open(buf)
70
+ return img
71
+
72
+ description = """YOLOS - Object Detection"""
73
+
74
+ image_in = gr.components.Image()
75
+ image_out = gr.components.Image()
76
+ model_choice = gr.components.Dropdown(["yolos-tiny", "yolos-small", "yolos-base", "yolos-small-300", "yolos-small-dwr"], value="yolos-small", label="YOLOS Model")
77
+ prob_threshold_slider = gr.components.Slider(minimum=0, maximum=1.0, step=0.01, value=0.9, label="Probability Threshold")
78
+ classes_to_show = gr.components.Textbox(placeholder="e.g. person, truck", label="Classes to use (defaulted to detect all classes)")
79
+
80
+ Iface = gr.Interface(
81
+ fn=model_inference,
82
+ inputs=[image_in,model_choice, prob_threshold_slider, classes_to_show],
83
+ outputs=image_out,
84
+ title="YOLOS - Object Detection",
85
+ description=description,
86
+ ).launch()