xiang-wuu commited on
Commit
d11a245
1 Parent(s): e0768d6

MOT tracking component added to gradio app

Browse files
Files changed (2) hide show
  1. app.py +3 -2
  2. tracker_utils.py +69 -0
app.py CHANGED
@@ -1,10 +1,11 @@
1
  import gradio as gr
2
  from classify_utils import classify_tab
3
  from detect_utils import detect_tab
 
4
 
5
 
6
  if __name__ == '__main__':
7
  gr.TabbedInterface(
8
- [detect_tab, classify_tab],
9
- tab_names=['Image Detection', 'Image Classification']
10
  ).queue().launch()
 
1
  import gradio as gr
2
  from classify_utils import classify_tab
3
  from detect_utils import detect_tab
4
+ from tracker_utils import tracker_tab
5
 
6
 
7
  if __name__ == '__main__':
8
  gr.TabbedInterface(
9
+ [detect_tab, classify_tab, tracker_tab],
10
+ tab_names=['Image Detection', 'Image Classification', "Multi Object Tracker"]
11
  ).queue().launch()
tracker_utils.py ADDED
@@ -0,0 +1,69 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ from ultralytics import YOLO
3
+ import supervision as sv
4
+ import gradio as gr
5
+
6
+ os.system("wget https://raw.githubusercontent.com/spmallick/learnopencv/master/Understanding-Multiple-Object-Tracking-using-DeepSORT/yolov5/football-video.mp4")
7
+ os.system("wget https://raw.githubusercontent.com/spmallick/learnopencv/master/MultiObjectTracker/videos/run.mp4")
8
+
9
+
10
+ def process_video(
11
+ source_video_path: str,
12
+ source_weights_path: str,
13
+ confidence_threshold: float = 0.3,
14
+ iou_threshold: float = 0.7,):
15
+
16
+ model = YOLO(source_weights_path + '.pt')
17
+ tracker = sv.ByteTrack()
18
+ box_annotator = sv.BoxAnnotator()
19
+ frame_generator = sv.get_video_frames_generator(
20
+ source_path=source_video_path)
21
+ confidence_threshold = confidence_threshold / 100
22
+ iou_threshold = iou_threshold / 100
23
+ # video_info = sv.VideoInfo.from_video_path(video_path=source_video_path)
24
+
25
+ # with sv.VideoSink(target_path=target_video_path, video_info=video_info) as sink:
26
+ for frame in frame_generator:
27
+ results = model(
28
+ frame, verbose=False, conf=confidence_threshold, iou=iou_threshold
29
+ )[0]
30
+ detections = sv.Detections.from_ultralytics(results)
31
+ detections = tracker.update_with_detections(detections)
32
+
33
+ labels = [
34
+ f"#{tracker_id} {model.model.names[class_id]}"
35
+ for _, _, _, class_id, tracker_id in detections
36
+ ]
37
+
38
+ annotated_frame = box_annotator.annotate(
39
+ scene=frame.copy(), detections=detections, labels=labels
40
+ )
41
+
42
+ yield annotated_frame
43
+ # sink.write_frame(frame=annotated_frame)
44
+
45
+
46
+ inputs_thresh = [
47
+ gr.components.Video(type="filepath", label="Input Video"),
48
+ gr.inputs.Radio(label="Detection Methods",
49
+ choices=[
50
+ "yolov5s", "yolov8s"
51
+ ]),
52
+ gr.components.Slider(label="Class Probability Value",
53
+ value=30, minimum=1, maximum=100, step=1),
54
+ gr.components.Slider(label="IOU threshold Value",
55
+ value=50, minimum=1, maximum=100, step=1),
56
+ ]
57
+
58
+ outputs_thresh = [
59
+ gr.components.Image(type="numpy", label="Output")
60
+ ]
61
+
62
+ tracker_tab = gr.Interface(
63
+ process_video,
64
+ inputs=inputs_thresh,
65
+ outputs=outputs_thresh,
66
+ title="supervision",
67
+ examples=[["run.mp4", "yolov5s"], ["football-video.mp4", "yolov8s"]],
68
+ description="Gradio based demo for <a href='https://github.com/roboflow/supervision' style='text-decoration: underline' target='_blank'>roboflow/supervision</a>, We write your reusable computer vision tools."
69
+ )