AlshimaaGamalAlsaied commited on
Commit
86567ab
·
1 Parent(s): b2eb80d
Files changed (1) hide show
  1. app.py +9 -7
app.py CHANGED
@@ -1,6 +1,6 @@
1
  import gradio as gr
2
  #import torch
3
- import yolov5
4
  import subprocess
5
  import tempfile
6
  import time
@@ -12,8 +12,10 @@ import gradio as gr
12
 
13
 
14
  # # Images
15
- # #torch.hub.download_url_to_file('https://github.com/ultralytics/yolov5/raw/master/data/images/zidane.jpg', 'zidane.jpg')
 
16
  # #torch.hub.download_url_to_file('https://raw.githubusercontent.com/obss/sahi/main/tests/data/small-vehicles1.jpeg', 'small-vehicles1.jpeg')
 
17
 
18
  def image_fn(
19
  image: gr.inputs.Image = None,
@@ -23,7 +25,7 @@ def image_fn(
23
  iou_threshold: gr.inputs.Slider = 0.45,
24
  ):
25
  """
26
- YOLOv5 inference function
27
  Args:
28
  image: Input image
29
  model_path: Path to the model
@@ -34,7 +36,7 @@ def image_fn(
34
  Rendered image
35
  """
36
 
37
- model = yolov5.load(model_path, device="cpu", hf_model=True, trace=False)
38
  model.conf = conf_threshold
39
  model.iou = iou_threshold
40
  results = model([image], size=image_size)
@@ -76,7 +78,7 @@ demo_app.launch(debug=True, enable_queue=True)
76
  # iou_threshold: gr.inputs.Slider = 0.45,
77
  # ):
78
  # """
79
- # YOLOv5 inference function
80
  # Args:
81
  # image: Input image
82
  # model_path: Path to the model
@@ -87,7 +89,7 @@ demo_app.launch(debug=True, enable_queue=True)
87
  # Rendered image
88
  # """
89
 
90
- # model = yolov5.load(model_path, device="cpu", hf_model=True, trace=False)
91
  # model.conf = conf_threshold
92
  # model.iou = iou_threshold
93
  # results = model([image], size=image_size)
@@ -96,7 +98,7 @@ demo_app.launch(debug=True, enable_queue=True)
96
 
97
 
98
  # def video_fn(model_path, video_file, conf_thres, iou_thres, start_sec, duration):
99
- # model = yolov5.load(model_path, device="cpu", hf_model=True, trace=False)
100
  # start_timestamp = time.strftime("%H:%M:%S", time.gmtime(start_sec))
101
  # end_timestamp = time.strftime("%H:%M:%S", time.gmtime(start_sec + duration))
102
 
 
1
  import gradio as gr
2
  #import torch
3
+ import yolov7
4
  import subprocess
5
  import tempfile
6
  import time
 
12
 
13
 
14
  # # Images
15
+ # #torch.hub.download_url_t
16
+ # o_file('https://github.com/ultralytics/yolov5/raw/master/data/images/zidane.jpg', 'zidane.jpg')
17
  # #torch.hub.download_url_to_file('https://raw.githubusercontent.com/obss/sahi/main/tests/data/small-vehicles1.jpeg', 'small-vehicles1.jpeg')
18
+
19
 
20
  def image_fn(
21
  image: gr.inputs.Image = None,
 
25
  iou_threshold: gr.inputs.Slider = 0.45,
26
  ):
27
  """
28
+ YOLOv7 inference function
29
  Args:
30
  image: Input image
31
  model_path: Path to the model
 
36
  Rendered image
37
  """
38
 
39
+ model = yolov7.load(model_path, device="cpu", hf_model=True, trace=False)
40
  model.conf = conf_threshold
41
  model.iou = iou_threshold
42
  results = model([image], size=image_size)
 
78
  # iou_threshold: gr.inputs.Slider = 0.45,
79
  # ):
80
  # """
81
+ # YOLOv7 inference function
82
  # Args:
83
  # image: Input image
84
  # model_path: Path to the model
 
89
  # Rendered image
90
  # """
91
 
92
+ # model = yolov7.load(model_path, device="cpu", hf_model=True, trace=False)
93
  # model.conf = conf_threshold
94
  # model.iou = iou_threshold
95
  # results = model([image], size=image_size)
 
98
 
99
 
100
  # def video_fn(model_path, video_file, conf_thres, iou_thres, start_sec, duration):
101
+ # model = yolov7.load(model_path, device="cpu", hf_model=True, trace=False)
102
  # start_timestamp = time.strftime("%H:%M:%S", time.gmtime(start_sec))
103
  # end_timestamp = time.strftime("%H:%M:%S", time.gmtime(start_sec + duration))
104