tree3po commited on
Commit
747fbed
·
verified ·
1 Parent(s): 14122ff

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +30 -15
app.py CHANGED
@@ -12,17 +12,20 @@ import spaces
12
  ver=[0,0,0,0,0,0,6,7,8,9,10,11]
13
  ltr=["n","s","m","1","x"]
14
  tsk=["","-seg","-pose","-obb","-cls"]
15
- #yolov8s.pt
16
- modin=f"yolov{ver[9]}{ltr[1]}{tsk[0]}.pt"
17
- model = YOLO(modin)
18
  annotators = ["Box","RoundBox","BoxCorner","Color",
19
  "Circle","Dot","Triangle","Elipse","Halo",
20
  "PercentageBar","Mask","Polygon","Label",
21
  "RichLabel","Icon","Crop","Blur","Pixelate","HeatMap"]
 
 
 
 
 
 
22
 
23
  @spaces.GPU
24
- def stream_object_detection(video):
25
- SUBSAMPLE=1
26
  cap = cv2.VideoCapture(video)
27
  # This means we will output mp4 videos
28
  video_codec = cv2.VideoWriter_fourcc(*"mp4v") # type: ignore
@@ -40,9 +43,10 @@ def stream_object_detection(video):
40
  frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
41
  result = model(Image.fromarray(frame))[0]
42
  detections = sv.Detections.from_ultralytics(result)
43
- print(detections)
44
 
45
- box_annotator = eval(f'sv.{annotators[0]}Annotator()')
 
46
 
47
  outp = box_annotator.annotate(
48
  scene=frame.copy(),
@@ -60,16 +64,27 @@ def stream_object_detection(video):
60
  output_video = cv2.VideoWriter(output_video_name, video_codec, desired_fps, (width, height)) # type: ignore
61
  iterating, frame = cap.read()
62
  n_frames += 1
63
-
64
- with gr.Blocks() as app:
65
- gr.HTML("<div style='font-size: 50px;font-weight: 800;'>Supervision</div><div style='font-size: 30px;'>Video Object Detection</div><div>Github:<a href='https://github.com/roboflow/supervision' target='_blank'>https://github.com/roboflow/supervision</a></div>")
66
  #inp = gr.Image(type="filepath")
67
  with gr.Row():
68
  with gr.Column():
69
- inp = gr.Video()
70
  btn = gr.Button()
71
- outp_v = gr.Video(label="Processed Video", streaming=True, autoplay=True)
72
- outp_j = gr.JSON()
 
 
 
 
 
 
 
73
 
74
- btn.click(stream_object_detection,inp,[outp_v,outp_j])
75
- app.queue().launch()
 
 
 
 
 
12
  ver=[0,0,0,0,0,0,6,7,8,9,10,11]
13
  ltr=["n","s","m","1","x"]
14
  tsk=["","-seg","-pose","-obb","-cls"]
 
 
 
15
  annotators = ["Box","RoundBox","BoxCorner","Color",
16
  "Circle","Dot","Triangle","Elipse","Halo",
17
  "PercentageBar","Mask","Polygon","Label",
18
  "RichLabel","Icon","Crop","Blur","Pixelate","HeatMap"]
19
+ def model_select(v,l,t):
20
+ modin=f"yolov{v}{l}{t}.pt"
21
+ print(modin)
22
+ global model
23
+ model = YOLO(modin)
24
+
25
 
26
  @spaces.GPU
27
+ def stream_object_detection(video,anno):
28
+ SUBSAMPLE=2
29
  cap = cv2.VideoCapture(video)
30
  # This means we will output mp4 videos
31
  video_codec = cv2.VideoWriter_fourcc(*"mp4v") # type: ignore
 
43
  frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
44
  result = model(Image.fromarray(frame))[0]
45
  detections = sv.Detections.from_ultralytics(result)
46
+ #print(detections)
47
 
48
+ box_annotator = eval(f'sv.{anno}Annotator()')
49
+ #box_annotator = eval(f'sv.{annotators[0]}Annotator()')
50
 
51
  outp = box_annotator.annotate(
52
  scene=frame.copy(),
 
64
  output_video = cv2.VideoWriter(output_video_name, video_codec, desired_fps, (width, height)) # type: ignore
65
  iterating, frame = cap.read()
66
  n_frames += 1
67
+ #css="body{background:aliceblue;}"
68
+ with gr.Blocks(theme="Nymbo/Nymbo_Theme_5") as app:
69
+ gr.HTML("<div style='font-size: 50px;font-weight: 800;'>SuperVision</div><div style='font-size: 30px;'>Video Object Detection</div><div>Github:<a href='https://github.com/roboflow/supervision' target='_blank'>https://github.com/roboflow/supervision</a></div>")
70
  #inp = gr.Image(type="filepath")
71
  with gr.Row():
72
  with gr.Column():
73
+ inp = gr.Video(height=300)
74
  btn = gr.Button()
75
+ with gr.Accordion("Controls",open=False):
76
+ with gr.Group():
77
+ dd1=gr.Dropdown(label="Version",choices=ver[6:],value=ver[9],allow_custom_value=True)
78
+ dd2=gr.Dropdown(label="Ltr", choices=ltr,value=ltr[1],allow_custom_value=True)
79
+ dd3=gr.Dropdown(label="Task",choices=tsk,value=tsk[0],allow_custom_value=True)
80
+ dd4=gr.Dropdown(label="Annotator",choices=annotators,value="Box")
81
+ with gr.Column():
82
+ outp_v = gr.Video(label="Processed Video", streaming=True, autoplay=True,height=300)
83
+ outp_j = gr.JSON()
84
 
85
+ btn.click(stream_object_detection,[inp,dd4],[outp_v,outp_j])
86
+ app.load(model_select,[dd1,dd2,dd3],None)
87
+ dd1.change(model_select,[dd1,dd2,dd3],None)
88
+ dd2.change(model_select,[dd1,dd2,dd3],None)
89
+ dd3.change(model_select,[dd1,dd2,dd3],None)
90
+ app.queue().launch()