File size: 2,602 Bytes
d11a245
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
import os
from ultralytics import YOLO
import supervision as sv
import gradio as gr

os.system("wget https://raw.githubusercontent.com/spmallick/learnopencv/master/Understanding-Multiple-Object-Tracking-using-DeepSORT/yolov5/football-video.mp4")
os.system("wget https://raw.githubusercontent.com/spmallick/learnopencv/master/MultiObjectTracker/videos/run.mp4")


def process_video(
        source_video_path: str,
        source_weights_path: str,
        confidence_threshold: float = 0.3,
        iou_threshold: float = 0.7,):

    model = YOLO(source_weights_path + '.pt')
    tracker = sv.ByteTrack()
    box_annotator = sv.BoxAnnotator()
    frame_generator = sv.get_video_frames_generator(
        source_path=source_video_path)
    confidence_threshold = confidence_threshold / 100
    iou_threshold = iou_threshold / 100
    # video_info = sv.VideoInfo.from_video_path(video_path=source_video_path)

    # with sv.VideoSink(target_path=target_video_path, video_info=video_info) as sink:
    for frame in frame_generator:
        results = model(
            frame, verbose=False, conf=confidence_threshold, iou=iou_threshold
        )[0]
        detections = sv.Detections.from_ultralytics(results)
        detections = tracker.update_with_detections(detections)

        labels = [
            f"#{tracker_id} {model.model.names[class_id]}"
            for _, _, _, class_id, tracker_id in detections
        ]

        annotated_frame = box_annotator.annotate(
            scene=frame.copy(), detections=detections, labels=labels
        )

        yield annotated_frame
        # sink.write_frame(frame=annotated_frame)


inputs_thresh = [
    gr.components.Video(type="filepath", label="Input Video"),
    gr.inputs.Radio(label="Detection Methods",
                    choices=[
                        "yolov5s", "yolov8s"
                    ]),
    gr.components.Slider(label="Class Probability Value",
                         value=30, minimum=1, maximum=100, step=1),
    gr.components.Slider(label="IOU threshold Value",
                         value=50, minimum=1, maximum=100, step=1),
]

outputs_thresh = [
    gr.components.Image(type="numpy", label="Output")
]

tracker_tab = gr.Interface(
    process_video,
    inputs=inputs_thresh,
    outputs=outputs_thresh,
    title="supervision",
    examples=[["run.mp4", "yolov5s"], ["football-video.mp4", "yolov8s"]],
    description="Gradio based demo for <a href='https://github.com/roboflow/supervision' style='text-decoration: underline' target='_blank'>roboflow/supervision</a>, We write your reusable computer vision tools."
)