SkalskiP commited on
Commit
06541ee
β€’
1 Parent(s): af87f8a

initial commit

Browse files
Files changed (4) hide show
  1. .gitignore +2 -0
  2. README.md +1 -1
  3. app.py +177 -0
  4. requirements.txt +3 -0
.gitignore ADDED
@@ -0,0 +1,2 @@
 
 
 
1
+ venv/
2
+ .idea/
README.md CHANGED
@@ -1,6 +1,6 @@
1
  ---
2
  title: YOLO ARENA
3
- emoji: πŸƒ
4
  colorFrom: pink
5
  colorTo: green
6
  sdk: gradio
 
1
  ---
2
  title: YOLO ARENA
3
+ emoji: 🏟️
4
  colorFrom: pink
5
  colorTo: green
6
  sdk: gradio
app.py ADDED
@@ -0,0 +1,177 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from typing import Tuple
2
+
3
+ import gradio as gr
4
+ import numpy as np
5
+ import supervision as sv
6
+ from inference import get_model
7
+
8
+ MARKDOWN = """
9
+ # YOLO-ARENA 🏟️
10
+
11
+ Powered by Roboflow [Inference](https://github.com/roboflow/inference) and
12
+ [Supervision](https://github.com/roboflow/supervision).
13
+ """
14
+
15
+ IMAGE_EXAMPLES = [
16
+ ['https://media.roboflow.com/dog.jpeg', 0.3]
17
+ ]
18
+
19
+ YOLO_V8_MODEL = get_model(model_id="yolov8s-640")
20
+ YOLO_NAS_MODEL = get_model(model_id="coco/14")
21
+ YOLO_V9_MODEL = get_model(model_id="coco/17")
22
+
23
+ LABEL_ANNOTATORS = sv.LabelAnnotator(text_color=sv.Color.black())
24
+ BOUNDING_BOX_ANNOTATORS = sv.BoundingBoxAnnotator()
25
+
26
+
27
+ def process_image(
28
+ input_image: np.ndarray,
29
+ confidence_threshold: float,
30
+ iou_threshold: float
31
+ ) -> Tuple[np.ndarray, np.ndarray, np.ndarray]:
32
+ yolo_v8_result = YOLO_V8_MODEL.infer(
33
+ input_image,
34
+ confidence=confidence_threshold,
35
+ iou_threshold=iou_threshold
36
+ )[0]
37
+ yolo_v8_detections = sv.Detections.from_inference(yolo_v8_result)
38
+
39
+ labels = [
40
+ f"{class_name} {confidence:.2f}"
41
+ for class_name, confidence
42
+ in zip(yolo_v8_detections["class_name"], yolo_v8_detections.confidence)
43
+ ]
44
+
45
+ yolo_v8_annotated_image = input_image.copy()
46
+ yolo_v8_annotated_image = BOUNDING_BOX_ANNOTATORS.annotate(
47
+ scene=yolo_v8_annotated_image, detections=yolo_v8_detections)
48
+ yolo_v8_annotated_image = LABEL_ANNOTATORS.annotate(
49
+ scene=yolo_v8_annotated_image, detections=yolo_v8_detections, labels=labels)
50
+
51
+ yolo_nas_result = YOLO_NAS_MODEL.infer(
52
+ input_image,
53
+ confidence=confidence_threshold,
54
+ iou_threshold=iou_threshold
55
+ )[0]
56
+ yolo_nas_detections = sv.Detections.from_inference(yolo_nas_result)
57
+
58
+ labels = [
59
+ f"{class_name} {confidence:.2f}"
60
+ for class_name, confidence
61
+ in zip(yolo_nas_detections["class_name"], yolo_nas_detections.confidence)
62
+ ]
63
+
64
+ yolo_nas_annotated_image = input_image.copy()
65
+ yolo_nas_annotated_image = BOUNDING_BOX_ANNOTATORS.annotate(
66
+ scene=yolo_nas_annotated_image, detections=yolo_nas_detections)
67
+ yolo_nas_annotated_image = LABEL_ANNOTATORS.annotate(
68
+ scene=yolo_nas_annotated_image, detections=yolo_nas_detections, labels=labels)
69
+
70
+ yolo_v9_result = YOLO_V9_MODEL.infer(
71
+ input_image,
72
+ confidence=confidence_threshold,
73
+ iou_threshold=iou_threshold
74
+ )[0]
75
+ yolo_v9_detections = sv.Detections.from_inference(yolo_v9_result)
76
+
77
+ labels = [
78
+ f"{class_name} {confidence:.2f}"
79
+ for class_name, confidence
80
+ in zip(yolo_v9_detections["class_name"], yolo_v9_detections.confidence)
81
+ ]
82
+
83
+ yolo_v9_annotated_image = input_image.copy()
84
+ yolo_v9_annotated_image = BOUNDING_BOX_ANNOTATORS.annotate(
85
+ scene=yolo_v9_annotated_image, detections=yolo_v9_detections)
86
+ yolo_v9_annotated_image = LABEL_ANNOTATORS.annotate(
87
+ scene=yolo_v9_annotated_image, detections=yolo_v9_detections, labels=labels)
88
+
89
+ return yolo_v8_annotated_image, yolo_nas_annotated_image, yolo_v9_annotated_image
90
+
91
+
92
+ confidence_threshold_component = gr.Slider(
93
+ minimum=0,
94
+ maximum=1.0,
95
+ value=0.3,
96
+ step=0.01,
97
+ label="Confidence Threshold",
98
+ info=(
99
+ "The confidence threshold for the YOLO model. Lower the threshold to "
100
+ "reduce false negatives, enhancing the model's sensitivity to detect "
101
+ "sought-after objects. Conversely, increase the threshold to minimize false "
102
+ "positives, preventing the model from identifying objects it shouldn't."
103
+ ))
104
+
105
+ iou_threshold_component = gr.Slider(
106
+ minimum=0,
107
+ maximum=1.0,
108
+ value=0.5,
109
+ step=0.01,
110
+ label="IoU Threshold",
111
+ info=(
112
+ "The Intersection over Union (IoU) threshold for non-maximum suppression. "
113
+ "Decrease the value to lessen the occurrence of overlapping bounding boxes, "
114
+ "making the detection process stricter. On the other hand, increase the value "
115
+ "to allow more overlapping bounding boxes, accommodating a broader range of "
116
+ "detections."
117
+ ))
118
+
119
+
120
+ with gr.Blocks() as demo:
121
+ gr.Markdown(MARKDOWN)
122
+ with gr.Accordion("Configuration", open=False):
123
+ confidence_threshold_component.render()
124
+ iou_threshold_component.render()
125
+ with gr.Row():
126
+ input_image_component = gr.Image(
127
+ type='numpy',
128
+ label='Input Image'
129
+ )
130
+ yolo_v8_output_image_component = gr.Image(
131
+ type='numpy',
132
+ label='YOLOv8 Output'
133
+ )
134
+ with gr.Row():
135
+ yolo_nas_output_image_component = gr.Image(
136
+ type='numpy',
137
+ label='YOLO-NAS Output'
138
+ )
139
+ yolo_v9_output_image_component = gr.Image(
140
+ type='numpy',
141
+ label='YOLOv9 Output'
142
+ )
143
+ submit_button_component = gr.Button(
144
+ value='Submit',
145
+ scale=1,
146
+ variant='primary'
147
+ )
148
+ gr.Examples(
149
+ fn=process_image,
150
+ examples=IMAGE_EXAMPLES,
151
+ inputs=[
152
+ input_image_component,
153
+ confidence_threshold_component,
154
+ iou_threshold_component
155
+ ],
156
+ outputs=[
157
+ yolo_v8_output_image_component,
158
+ yolo_nas_output_image_component,
159
+ yolo_v9_output_image_component
160
+ ]
161
+ )
162
+
163
+ submit_button_component.click(
164
+ fn=process_image,
165
+ inputs=[
166
+ input_image_component,
167
+ confidence_threshold_component,
168
+ iou_threshold_component
169
+ ],
170
+ outputs=[
171
+ yolo_v8_output_image_component,
172
+ yolo_nas_output_image_component,
173
+ yolo_v9_output_image_component
174
+ ]
175
+ )
176
+
177
+ demo.launch(debug=False, show_error=True, max_threads=1)
requirements.txt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ gradio==4.19.2
2
+ inference==0.9.15
3
+ supervision==0.18.0