Sa-m commited on
Commit
3938920
1 Parent(s): 4233cde

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +90 -4
app.py CHANGED
@@ -5,17 +5,103 @@ os.system('git clone https://github.com/WongKinYiu/yolov7.git')
5
 
6
 
7
  def detect(inp):
8
- os.system('python ./yolov7/detect.py --weights ./yolov7/runs/train/yolov7/weights/best.pt --conf 0.25 --img-size 640 --source f{inp} "--project","./yolov7/runs/detect ')
9
  otp=inp.split('/')[2]
10
- return f"./yolov7/runs/detect/exp2/*"
11
 
12
  #f"./yolov7/runs/detect/exp/{otp}"
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
13
 
14
  inp = gr.inputs.Image(type="filepath", label="Input")
15
- output = gr.outputs.Image(type="filepath", label="Output")
 
16
  #.outputs.Textbox()
17
 
18
- io=gr.Interface(fn=detect, inputs=inp, outputs=output, title='Pot Hole Detection With Custom YOLOv7 ',examples=[["Examples/img-300_jpg.rf.6b7b035dff1cda092ce3dc22be8d0135.jpg"]])
19
  #,examples=["Examples/img-300_jpg.rf.6b7b035dff1cda092ce3dc22be8d0135.jpg"]
20
  io.launch(debug=True,share=False)
21
 
 
5
 
6
 
7
  def detect(inp):
8
+ os.system('python ./yolov7/detect.py --weights best.pt --conf 0.25 --img-size 640 --source f{inp} "--project","./yolov7/runs/detect ')
9
  otp=inp.split('/')[2]
10
+ return f"./yolov7/runs/detect/exp/*"
11
 
12
  #f"./yolov7/runs/detect/exp/{otp}"
13
+
14
+
15
+ opt = {
16
+
17
+ "weights": "best.pt", # Path to weights file default weights are for nano model
18
+ "yaml" : "custom.yaml",
19
+ "img-size": 640, # default image size
20
+ "conf-thres": 0.25, # confidence threshold for inference.
21
+ "iou-thres" : 0.45, # NMS IoU threshold for inference.
22
+ "device" : '0', # device to run our model i.e. 0 or 0,1,2,3 or cpu
23
+ "classes" : classes_to_filter # list of classes to filter or None
24
+
25
+ }
26
+
27
+ def detect2(inp):
28
+ with torch.no_grad():
29
+ weights, imgsz = opt['weights'], opt['img-size']
30
+ set_logging()
31
+ device = select_device(opt['device'])
32
+ half = device.type != 'cpu'
33
+ model = attempt_load(weights, map_location=device) # load FP32 model
34
+ stride = int(model.stride.max()) # model stride
35
+ imgsz = check_img_size(imgsz, s=stride) # check img_size
36
+ if half:
37
+ model.half()
38
+
39
+ names = model.module.names if hasattr(model, 'module') else model.names
40
+ colors = [[random.randint(0, 255) for _ in range(3)] for _ in names]
41
+ if device.type != 'cpu':
42
+ model(torch.zeros(1, 3, imgsz, imgsz).to(device).type_as(next(model.parameters())))
43
+
44
+ img0 = cv2.imread(inp)
45
+ img = letterbox(img0, imgsz, stride=stride)[0]
46
+ img = img[:, :, ::-1].transpose(2, 0, 1) # BGR to RGB, to 3x416x416
47
+ img = np.ascontiguousarray(img)
48
+ img = torch.from_numpy(img).to(device)
49
+ img = img.half() if half else img.float() # uint8 to fp16/32
50
+ img /= 255.0 # 0 - 255 to 0.0 - 1.0
51
+ if img.ndimension() == 3:
52
+ img = img.unsqueeze(0)
53
+
54
+ # Inference
55
+ t1 = time_synchronized()
56
+ pred = model(img, augment= False)[0]
57
+
58
+ # Apply NMS
59
+ classes = None
60
+ if opt['classes']:
61
+ classes = []
62
+ for class_name in opt['classes']:
63
+
64
+ classes.append(names.index(class_name))
65
+
66
+ if classes:
67
+
68
+ classes = [i for i in range(len(names)) if i not in classes]
69
+
70
+
71
+ pred = non_max_suppression(pred, opt['conf-thres'], opt['iou-thres'], classes= [17], agnostic= False)
72
+ t2 = time_synchronized()
73
+ for i, det in enumerate(pred):
74
+ s = ''
75
+ s += '%gx%g ' % img.shape[2:] # print string
76
+ gn = torch.tensor(img0.shape)[[1, 0, 1, 0]]
77
+ if len(det):
78
+ det[:, :4] = scale_coords(img.shape[2:], det[:, :4], img0.shape).round()
79
+
80
+ for c in det[:, -1].unique():
81
+ n = (det[:, -1] == c).sum() # detections per class
82
+ s += f"{n} {names[int(c)]}{'s' * (n > 1)}, " # add to string
83
+
84
+ for *xyxy, conf, cls in reversed(det):
85
+
86
+ label = f'{names[int(cls)]} {conf:.2f}'
87
+ plot_one_box(xyxy, img0, label=label, color=colors[int(cls)], line_thickness=3)
88
+ return img0
89
+
90
+
91
+
92
+
93
+
94
+
95
+
96
+
97
+
98
 
99
  inp = gr.inputs.Image(type="filepath", label="Input")
100
+ outputs=gr.outputs.Image(type="pil", label="Output Image")
101
+ #output = gr.outputs.Image(type="filepath", label="Output")
102
  #.outputs.Textbox()
103
 
104
+ io=gr.Interface(fn=detect2, inputs=inp, outputs=output, title='Pot Hole Detection With Custom YOLOv7 ',examples=[["Examples/img-300_jpg.rf.6b7b035dff1cda092ce3dc22be8d0135.jpg"]])
105
  #,examples=["Examples/img-300_jpg.rf.6b7b035dff1cda092ce3dc22be8d0135.jpg"]
106
  io.launch(debug=True,share=False)
107