BoukamchaSmartVisions commited on
Commit
211c827
1 Parent(s): acef008

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +11 -82
app.py CHANGED
@@ -3,8 +3,7 @@ from ultralytics import YOLOv10
3
  import supervision as sv
4
  import spaces
5
  from huggingface_hub import hf_hub_download
6
- import cv2
7
- import tempfile
8
 
9
  def download_models(model_id):
10
  hf_hub_download("BoukamchaSmartVisions/Yolov10", filename=f"{model_id}", local_dir=f"./")
@@ -30,9 +29,9 @@ category_dict = {
30
  77: 'teddy bear', 78: 'hair drier', 79: 'toothbrush'
31
  }
32
 
 
33
  @spaces.GPU(duration=200)
34
- def yolov10_inference(inputs):
35
- image, model_id, image_size, conf_threshold, iou_threshold = inputs[1], inputs[2], inputs[3], inputs[4], inputs[5]
36
  model_path = download_models(model_id)
37
  model = YOLOv10(model_path)
38
  results = model(source=image, imgsz=image_size, iou=iou_threshold, conf=conf_threshold, verbose=False)[0]
@@ -46,56 +45,12 @@ def yolov10_inference(inputs):
46
 
47
  return annotated_image
48
 
49
- def yolov10_video_inference(inputs):
50
- video, model_id, image_size, conf_threshold, iou_threshold = inputs[2], inputs[3], inputs[4], inputs[5], inputs[6]
51
- model_path = download_models(model_id)
52
- model = YOLOv10(model_path)
53
-
54
- cap = cv2.VideoCapture(video)
55
- fourcc = cv2.VideoWriter_fourcc(*'mp4v')
56
- out = tempfile.NamedTemporaryFile(delete=False, suffix='.mp4')
57
- out_path = out.name
58
-
59
- ret, frame = cap.read()
60
- height, width, _ = frame.shape
61
- writer = cv2.VideoWriter(out_path, fourcc, 30, (width, height))
62
-
63
- while ret:
64
- results = model(source=frame, imgsz=image_size, iou=iou_threshold, conf=conf_threshold, verbose=False)[0]
65
- detections = sv.Detections.from_ultralytics(results)
66
-
67
- labels = [
68
- f"{category_dict[class_id]} {confidence:.2f}"
69
- for class_id, confidence in zip(detections.class_id, detections.confidence)
70
- ]
71
- annotated_frame = box_annotator.annotate(frame, detections=detections, labels=labels)
72
-
73
- writer.write(annotated_frame)
74
- ret, frame = cap.read()
75
-
76
- cap.release()
77
- writer.release()
78
-
79
- return out_path
80
-
81
  def app():
82
  with gr.Blocks():
83
  with gr.Row():
84
  with gr.Column():
85
- image_or_video = gr.Radio(
86
- label="Input Type",
87
- choices=["Image", "Video"],
88
- value="Image",
89
- )
90
- image = gr.Image(type="numpy", label="Image", visible=True)
91
- video = gr.Video(label="Video", visible=False)
92
-
93
- image_or_video.change(
94
- lambda x: (gr.update(visible=x=="Image"), gr.update(visible=x=="Video")),
95
- inputs=[image_or_video],
96
- outputs=[image, video],
97
- )
98
-
99
  model_id = gr.Dropdown(
100
  label="Model",
101
  choices=[
@@ -132,79 +87,53 @@ def app():
132
  yolov10_infer = gr.Button(value="Detect Objects")
133
 
134
  with gr.Column():
135
- output_image = gr.Image(type="numpy", label="Annotated Image", visible=True)
136
- output_video = gr.Video(label="Annotated Video", visible=False)
137
-
138
- def process_inputs(inputs):
139
- if inputs[0] == "Image":
140
- return yolov10_inference(inputs)
141
- else:
142
- return yolov10_video_inference(inputs)
143
 
144
  yolov10_infer.click(
145
- fn=process_inputs,
146
  inputs=[
147
- image_or_video,
148
  image,
149
- video,
150
  model_id,
151
  image_size,
152
  conf_threshold,
153
  iou_threshold,
154
  ],
155
- outputs=[output_image, output_video],
156
  )
157
 
158
  gr.Examples(
159
  examples=[
160
  [
161
- "Image",
162
  "Animals_persones.jpg",
163
- None,
164
  "yolov10x.pt",
165
  640,
166
  0.25,
167
  0.45,
168
  ],
169
  [
170
- "Image",
171
  "collage-horses-other-pets-white.jpg",
172
- None,
173
  "yolov10m.pt",
174
  640,
175
  0.25,
176
  0.45,
177
  ],
178
  [
179
- "Image",
180
  "Ville.png",
181
- None,
182
  "yolov10b.pt",
183
  640,
184
  0.25,
185
  0.45,
186
  ],
187
- [
188
- "Video",
189
- None,
190
- "sample_video.mp4",
191
- "yolov10m.pt",
192
- 640,
193
- 0.25,
194
- 0.45,
195
- ],
196
  ],
197
- fn=process_inputs,
198
  inputs=[
199
- image_or_video,
200
  image,
201
- video,
202
  model_id,
203
  image_size,
204
  conf_threshold,
205
  iou_threshold,
206
  ],
207
- outputs=[output_image, output_video],
208
  cache_examples=True,
209
  )
210
 
@@ -227,4 +156,4 @@ with gradio_app:
227
  with gr.Column():
228
  app()
229
 
230
- gradio_app.launch(debug=True)
 
3
  import supervision as sv
4
  import spaces
5
  from huggingface_hub import hf_hub_download
6
+
 
7
 
8
  def download_models(model_id):
9
  hf_hub_download("BoukamchaSmartVisions/Yolov10", filename=f"{model_id}", local_dir=f"./")
 
29
  77: 'teddy bear', 78: 'hair drier', 79: 'toothbrush'
30
  }
31
 
32
+
33
  @spaces.GPU(duration=200)
34
+ def yolov10_inference(image, model_id, image_size, conf_threshold, iou_threshold):
 
35
  model_path = download_models(model_id)
36
  model = YOLOv10(model_path)
37
  results = model(source=image, imgsz=image_size, iou=iou_threshold, conf=conf_threshold, verbose=False)[0]
 
45
 
46
  return annotated_image
47
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
48
  def app():
49
  with gr.Blocks():
50
  with gr.Row():
51
  with gr.Column():
52
+ image = gr.Image(type="numpy", label="Image")
53
+
 
 
 
 
 
 
 
 
 
 
 
 
54
  model_id = gr.Dropdown(
55
  label="Model",
56
  choices=[
 
87
  yolov10_infer = gr.Button(value="Detect Objects")
88
 
89
  with gr.Column():
90
+ output_image = gr.Image(type="numpy", label="Annotated Image")
 
 
 
 
 
 
 
91
 
92
  yolov10_infer.click(
93
+ fn=yolov10_inference,
94
  inputs=[
 
95
  image,
 
96
  model_id,
97
  image_size,
98
  conf_threshold,
99
  iou_threshold,
100
  ],
101
+ outputs=[output_image],
102
  )
103
 
104
  gr.Examples(
105
  examples=[
106
  [
 
107
  "Animals_persones.jpg",
 
108
  "yolov10x.pt",
109
  640,
110
  0.25,
111
  0.45,
112
  ],
113
  [
 
114
  "collage-horses-other-pets-white.jpg",
 
115
  "yolov10m.pt",
116
  640,
117
  0.25,
118
  0.45,
119
  ],
120
  [
 
121
  "Ville.png",
 
122
  "yolov10b.pt",
123
  640,
124
  0.25,
125
  0.45,
126
  ],
 
 
 
 
 
 
 
 
 
127
  ],
128
+ fn=yolov10_inference,
129
  inputs=[
 
130
  image,
 
131
  model_id,
132
  image_size,
133
  conf_threshold,
134
  iou_threshold,
135
  ],
136
+ outputs=[output_image],
137
  cache_examples=True,
138
  )
139
 
 
156
  with gr.Column():
157
  app()
158
 
159
+ gradio_app.launch(debug=True)