Sa-m commited on
Commit
77664ae
1 Parent(s): fb1a2c6

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +2 -129
app.py CHANGED
@@ -13,136 +13,9 @@ def detect(inp):
13
 
14
 
15
 
16
-
17
- import argparse
18
- from pathlib import Path
19
- import cv2
20
- import torch
21
- import numpy as np
22
- from numpy import random
23
- from . import models
24
- from models.experimental import attempt_load
25
- from utils.datasets import LoadStreams, LoadImages
26
- from utils.general import check_img_size, check_requirements, check_imshow, non_max_suppression, apply_classifier,scale_coords, xyxy2xywh, strip_optimizer, set_logging, increment_path
27
- from utils.plots import plot_one_box
28
- from utils.torch_utils import select_device, time_synchronized
29
-
30
-
31
- def letterbox(img, new_shape=(640, 640), color=(114, 114, 114), auto=True, scaleFill=False, scaleup=True, stride=32):
32
- # Resize and pad image while meeting stride-multiple constraints
33
- shape = img.shape[:2] # current shape [height, width]
34
- if isinstance(new_shape, int):
35
- new_shape = (new_shape, new_shape)
36
-
37
- # Scale ratio (new / old)
38
- r = min(new_shape[0] / shape[0], new_shape[1] / shape[1])
39
- if not scaleup: # only scale down, do not scale up (for better test mAP)
40
- r = min(r, 1.0)
41
-
42
- # Compute padding
43
- ratio = r, r # width, height ratios
44
- new_unpad = int(round(shape[1] * r)), int(round(shape[0] * r))
45
- dw, dh = new_shape[1] - new_unpad[0], new_shape[0] - new_unpad[1] # wh padding
46
- if auto: # minimum rectangle
47
- dw, dh = np.mod(dw, stride), np.mod(dh, stride) # wh padding
48
- elif scaleFill: # stretch
49
- dw, dh = 0.0, 0.0
50
- new_unpad = (new_shape[1], new_shape[0])
51
- ratio = new_shape[1] / shape[1], new_shape[0] / shape[0] # width, height ratios
52
-
53
- dw /= 2 # divide padding into 2 sides
54
- dh /= 2
55
-
56
- if shape[::-1] != new_unpad: # resize
57
- img = cv2.resize(img, new_unpad, interpolation=cv2.INTER_LINEAR)
58
- top, bottom = int(round(dh - 0.1)), int(round(dh + 0.1))
59
- left, right = int(round(dw - 0.1)), int(round(dw + 0.1))
60
- img = cv2.copyMakeBorder(img, top, bottom, left, right, cv2.BORDER_CONSTANT, value=color) # add border
61
- return img, ratio, (dw, dh)
62
-
63
-
64
- opt = {
65
-
66
- "weights": "best.pt", # Path to weights file default weights are for nano model
67
- "yaml" : "custom.yaml",
68
- "img-size": 640, # default image size
69
- "conf-thres": 0.25, # confidence threshold for inference.
70
- "iou-thres" : 0.45, # NMS IoU threshold for inference.
71
- "device" : '0', # device to run our model i.e. 0 or 0,1,2,3 or cpu
72
- "classes" : classes_to_filter # list of classes to filter or None
73
-
74
- }
75
-
76
- def detect2(inp):
77
- with torch.no_grad():
78
- weights, imgsz = opt['weights'], opt['img-size']
79
- set_logging()
80
- device = select_device(opt['device'])
81
- half = device.type != 'cpu'
82
- model = attempt_load(weights, map_location=device) # load FP32 model
83
- stride = int(model.stride.max()) # model stride
84
- imgsz = check_img_size(imgsz, s=stride) # check img_size
85
- if half:
86
- model.half()
87
-
88
- names = model.module.names if hasattr(model, 'module') else model.names
89
- colors = [[random.randint(0, 255) for _ in range(3)] for _ in names]
90
- if device.type != 'cpu':
91
- model(torch.zeros(1, 3, imgsz, imgsz).to(device).type_as(next(model.parameters())))
92
-
93
- img0 = cv2.imread(inp)
94
- img = letterbox(img0, imgsz, stride=stride)[0]
95
- img = img[:, :, ::-1].transpose(2, 0, 1) # BGR to RGB, to 3x416x416
96
- img = np.ascontiguousarray(img)
97
- img = torch.from_numpy(img).to(device)
98
- img = img.half() if half else img.float() # uint8 to fp16/32
99
- img /= 255.0 # 0 - 255 to 0.0 - 1.0
100
- if img.ndimension() == 3:
101
- img = img.unsqueeze(0)
102
-
103
- # Inference
104
- t1 = time_synchronized()
105
- pred = model(img, augment= False)[0]
106
-
107
- # Apply NMS
108
- classes = None
109
- if opt['classes']:
110
- classes = []
111
- for class_name in opt['classes']:
112
-
113
- classes.append(names.index(class_name))
114
-
115
- if classes:
116
-
117
- classes = [i for i in range(len(names)) if i not in classes]
118
-
119
-
120
- pred = non_max_suppression(pred, opt['conf-thres'], opt['iou-thres'], classes= [17], agnostic= False)
121
- t2 = time_synchronized()
122
- for i, det in enumerate(pred):
123
- s = ''
124
- s += '%gx%g ' % img.shape[2:] # print string
125
- gn = torch.tensor(img0.shape)[[1, 0, 1, 0]]
126
- if len(det):
127
- det[:, :4] = scale_coords(img.shape[2:], det[:, :4], img0.shape).round()
128
-
129
- for c in det[:, -1].unique():
130
- n = (det[:, -1] == c).sum() # detections per class
131
- s += f"{n} {names[int(c)]}{'s' * (n > 1)}, " # add to string
132
-
133
- for *xyxy, conf, cls in reversed(det):
134
-
135
- label = f'{names[int(cls)]} {conf:.2f}'
136
- plot_one_box(xyxy, img0, label=label, color=colors[int(cls)], line_thickness=3)
137
- return img0
138
-
139
 
140
 
141
-
142
-
143
-
144
-
145
-
146
 
147
 
148
  inp = gr.inputs.Image(type="filepath", label="Input")
@@ -150,7 +23,7 @@ outputs=gr.outputs.Image(type="pil", label="Output Image")
150
  #output = gr.outputs.Image(type="filepath", label="Output")
151
  #.outputs.Textbox()
152
 
153
- io=gr.Interface(fn=detect2, inputs=inp, outputs=output, title='Pot Hole Detection With Custom YOLOv7 ',examples=[["Examples/img-300_jpg.rf.6b7b035dff1cda092ce3dc22be8d0135.jpg"]])
154
  #,examples=["Examples/img-300_jpg.rf.6b7b035dff1cda092ce3dc22be8d0135.jpg"]
155
  io.launch(debug=True,share=False)
156
 
 
13
 
14
 
15
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
16
 
17
 
18
+
 
 
 
 
19
 
20
 
21
  inp = gr.inputs.Image(type="filepath", label="Input")
 
23
  #output = gr.outputs.Image(type="filepath", label="Output")
24
  #.outputs.Textbox()
25
 
26
+ io=gr.Interface(fn=detect, inputs=inp, outputs=output, title='Pot Hole Detection With Custom YOLOv7 ',examples=[["Examples/img-300_jpg.rf.6b7b035dff1cda092ce3dc22be8d0135.jpg"]])
27
  #,examples=["Examples/img-300_jpg.rf.6b7b035dff1cda092ce3dc22be8d0135.jpg"]
28
  io.launch(debug=True,share=False)
29