Sa-m commited on
Commit
b55544c
1 Parent(s): 9ecef09

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +153 -35
app.py CHANGED
@@ -1,6 +1,13 @@
1
  import gradio as gr
2
  import os
3
  import torch
 
 
 
 
 
 
 
4
 
5
  os.system('git clone https://github.com/WongKinYiu/yolov7')
6
 
@@ -10,50 +17,161 @@ def detect(inp):
10
  otp=inp.split('/')[2]
11
  return f"./yolov7/runs/detect/exp/*"
12
 
13
- #f"./yolov7/runs/detect/exp/{otp}"
14
-
15
-
16
 
 
 
 
 
 
 
17
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
18
 
19
- def custom(path_or_model='path/to/model.pt', autoshape=True):
20
- """custom mode
21
- Arguments (3 options):
22
- path_or_model (str): 'path/to/model.pt'
23
- path_or_model (dict): torch.load('path/to/model.pt')
24
- path_or_model (nn.Module): torch.load('path/to/model.pt')['model']
25
- Returns:
26
- pytorch model
27
- """
28
- model = torch.load(path_or_model) if isinstance(path_or_model, str) else path_or_model # load checkpoint
29
- if isinstance(model, dict):
30
- model = model['ema' if model.get('ema') else 'model'] # load model
31
-
32
- hub_model = Model(model.yaml).to(next(model.parameters()).device) # create
33
- hub_model.load_state_dict(model.float().state_dict()) # load state_dict
34
- hub_model.names = model.names # class names
35
- if autoshape:
36
- hub_model = hub_model.autoshape() # for file/URI/PIL/cv2/np inputs and NMS
37
- device = select_device('0' if torch.cuda.is_available() else 'cpu') # default to GPU if available
38
- return hub_model.to(device)
39
 
40
- model = custom(path_or_model='best.pt')
 
 
 
 
41
 
42
- def detect1(inp):
43
- #g = (size / max(inp.size)) #gain
44
- #im = im.resize((int(x * g) for x in im.size), Image.ANTIALIAS) # resize
45
- results = model(inp,size=640) # inference
46
- results.render() # updates results.imgs with boxes and labels
47
- return Image.fromarray(results.imgs[0])
 
48
 
49
- inp = gr.inputs.Image(type="filepath", label="Input")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
50
  #output=gr.outputs.Image(type="pil", label="Output Image")
51
- output = gr.outputs.Image(type="filepath", label="Output")
 
52
  #.outputs.Textbox()
53
 
54
- io=gr.Interface(fn=detect1, inputs=inp, outputs=output, title='Pot Hole Detection With Custom YOLOv7 ',
55
- #examples=[["Examples/img-300_jpg.rf.6b7b035dff1cda092ce3dc22be8d0135.jpg"]]
 
 
56
  )
57
- #,examples=["Examples/img-300_jpg.rf.6b7b035dff1cda092ce3dc22be8d0135.jpg"]
58
  io.launch(debug=True,share=False)
59
 
 
1
  import gradio as gr
2
  import os
3
  import torch
4
+ import torch.backends.cudnn as cudnn
5
+ from models.experimental import attempt_load
6
+
7
+ from utils.datasets import LoadStreams, LoadImages
8
+ from utils.general import check_img_size, check_requirements, check_imshow, non_max_suppression, apply_classifier,scale_coords, xyxy2xywh, strip_optimizer, set_logging, increment_path
9
+ from utils.plots import plot_one_box
10
+ from utils.torch_utils import select_device, load_classifier, time_synchronized, TracedModel
11
 
12
  os.system('git clone https://github.com/WongKinYiu/yolov7')
13
 
 
17
  otp=inp.split('/')[2]
18
  return f"./yolov7/runs/detect/exp/*"
19
 
20
+
 
 
21
 
22
+
23
+
24
+
25
+ os.system("wget https://github.com/WongKinYiu/yolov7/releases/download/v0.1/yolov7.pt")
26
+ os.system("wget https://github.com/WongKinYiu/yolov7/releases/download/v0.1/yolov7-e6e.pt")
27
+ os.system("wget https://github.com/WongKinYiu/yolov7/releases/download/v0.1/yolov7-e6.pt")
28
 
29
+
30
+ def Custom_detect(img,model):
31
+ #if model =='Yolo_v7_Custom_model':
32
+ model='best'
33
+ parser = argparse.ArgumentParser()
34
+ parser.add_argument('--weights', nargs='+', type=str, default=model+".pt", help='model.pt path(s)')
35
+ parser.add_argument('--source', type=str, default='Temp_file/', help='source')
36
+ parser.add_argument('--img-size', type=int, default=640, help='inference size (pixels)')
37
+ parser.add_argument('--conf-thres', type=float, default=0.25, help='object confidence threshold')
38
+ parser.add_argument('--iou-thres', type=float, default=0.45, help='IOU threshold for NMS')
39
+ parser.add_argument('--device', default='', help='cuda device, i.e. 0 or 0,1,2,3 or cpu')
40
+ parser.add_argument('--view-img', action='store_true', help='display results')
41
+ parser.add_argument('--save-txt', action='store_true', help='save results to *.txt')
42
+ parser.add_argument('--save-conf', action='store_true', help='save confidences in --save-txt labels')
43
+ parser.add_argument('--nosave', action='store_true', help='do not save images/videos')
44
+ parser.add_argument('--classes', default=1,nargs='+', type=int, help='filter by class: --class 0, or --class 0 2 3')
45
+ parser.add_argument('--agnostic-nms', action='store_true', help='class-agnostic NMS')
46
+ parser.add_argument('--augment', action='store_true', help='augmented inference')
47
+ parser.add_argument('--update', action='store_true', help='update all models')
48
+ parser.add_argument('--project', default='runs/detect', help='save results to project/name')
49
+ parser.add_argument('--name', default='exp', help='save results to project/name')
50
+ parser.add_argument('--exist-ok', action='store_true', help='existing project/name ok, do not increment')
51
+ parser.add_argument('--trace', action='store_true', help='trace model')
52
+ opt = parser.parse_args()
53
+ img.save("Temp_file/test.jpg")
54
+ source, weights, view_img, save_txt, imgsz, trace = opt.source, opt.weights, opt.view_img, opt.save_txt, opt.img_size, opt.trace
55
+ save_img = True
56
+ webcam = source.isnumeric() or source.endswith('.txt') or source.lower().startswith(
57
+ ('rtsp://', 'rtmp://', 'http://', 'https://'))
58
+ save_dir = Path(increment_path(Path(opt.project) / opt.name, exist_ok=opt.exist_ok))
59
+ (save_dir / 'labels' if save_txt else save_dir).mkdir(parents=True, exist_ok=True)
60
+ set_logging()
61
+ device = select_device(opt.device)
62
+ half = device.type != 'cpu'
63
+ model = attempt_load(weights, map_location=device)
64
+ stride = int(model.stride.max())
65
+ imgsz = check_img_size(imgsz, s=stride)
66
+ if trace:
67
+ model = TracedModel(model, device, opt.img_size)
68
+ if half:
69
+ model.half()
70
+
71
+ classify = False
72
+ if classify:
73
+ modelc = load_classifier(name='resnet101', n=2) # initialize
74
+ modelc.load_state_dict(torch.load('weights/resnet101.pt', map_location=device)['model']).to(device).eval()
75
+ vid_path, vid_writer = None, None
76
+ if webcam:
77
+ view_img = check_imshow()
78
+ cudnn.benchmark = True
79
+ dataset = LoadStreams(source, img_size=imgsz, stride=stride)
80
+ else:
81
+ dataset = LoadImages(source, img_size=imgsz, stride=stride)
82
+ names = model.module.names if hasattr(model, 'module') else model.names
83
+ colors = [[random.randint(0, 255) for _ in range(3)] for _ in names]
84
+ if device.type != 'cpu':
85
+ model(torch.zeros(1, 3, imgsz, imgsz).to(device).type_as(next(model.parameters())))
86
+ t0 = time.time()
87
+ for path, img, im0s, vid_cap in dataset:
88
+ img = torch.from_numpy(img).to(device)
89
+ img = img.half() if half else img.float()
90
+ img /= 255.0
91
+ if img.ndimension() == 3:
92
+ img = img.unsqueeze(0)
93
 
94
+ # Inference
95
+ t1 = time_synchronized()
96
+ pred = model(img, augment=opt.augment)[0]
97
+
98
+ pred = non_max_suppression(pred, opt.conf_thres, opt.iou_thres, classes=opt.classes, agnostic=opt.agnostic_nms)
99
+ t2 = time_synchronized()
100
+
101
+
102
+ # Apply Classifier
103
+ if classify:
104
+ pred = apply_classifier(pred, modelc, img, im0s)
 
 
 
 
 
 
 
 
 
105
 
106
+ for i, det in enumerate(pred):
107
+ if webcam:
108
+ p, s, im0, frame = path[i], '%g: ' % i, im0s[i].copy(), dataset.count
109
+ else:
110
+ p, s, im0, frame = path, '', im0s, getattr(dataset, 'frame', 0)
111
 
112
+ p = Path(p)
113
+ save_path = str(save_dir / p.name)
114
+ txt_path = str(save_dir / 'labels' / p.stem) + ('' if dataset.mode == 'image' else f'_{frame}') # img.txt
115
+ s += '%gx%g ' % img.shape[2:]
116
+ gn = torch.tensor(im0.shape)[[1, 0, 1, 0]]
117
+ if len(det):
118
+ det[:, :4] = scale_coords(img.shape[2:], det[:, :4], im0.shape).round()
119
 
120
+
121
+ for c in det[:, -1].unique():
122
+ n = (det[:, -1] == c).sum()
123
+ s += f"{n} {names[int(c)]}{'s' * (n > 1)}, "
124
+
125
+
126
+ for *xyxy, conf, cls in reversed(det):
127
+ if save_txt:
128
+ xywh = (xyxy2xywh(torch.tensor(xyxy).view(1, 4)) / gn).view(-1).tolist()
129
+ line = (cls, *xywh, conf) if opt.save_conf else (cls, *xywh)
130
+ with open(txt_path + '.txt', 'a') as f:
131
+ f.write(('%g ' * len(line)).rstrip() % line + '\n')
132
+
133
+ if save_img or view_img:
134
+ label = f'{names[int(cls)]} {conf:.2f}'
135
+ plot_one_box(xyxy, im0, label=label, color=colors[int(cls)], line_thickness=3)
136
+ if view_img:
137
+ cv2.imshow(str(p), im0)
138
+ cv2.waitKey(1)
139
+
140
+ if save_img:
141
+ if dataset.mode == 'image':
142
+ cv2.imwrite(save_path, im0)
143
+ else:
144
+ if vid_path != save_path:
145
+ vid_path = save_path
146
+ if isinstance(vid_writer, cv2.VideoWriter):
147
+ vid_writer.release()
148
+ if vid_cap:
149
+ fps = vid_cap.get(cv2.CAP_PROP_FPS)
150
+ w = int(vid_cap.get(cv2.CAP_PROP_FRAME_WIDTH))
151
+ h = int(vid_cap.get(cv2.CAP_PROP_FRAME_HEIGHT))
152
+ else:
153
+ fps, w, h = 30, im0.shape[1], im0.shape[0]
154
+ save_path += '.mp4'
155
+ vid_writer = cv2.VideoWriter(save_path, cv2.VideoWriter_fourcc(*'mp4v'), fps, (w, h))
156
+ vid_writer.write(im0)
157
+
158
+ if save_txt or save_img:
159
+ s = f"\n{len(list(save_dir.glob('labels/*.txt')))} labels saved to {save_dir / 'labels'}" if save_txt else ''
160
+
161
+ print(f'Done. ({time.time() - t0:.3f}s)')
162
+
163
+ return Image.fromarray(im0[:,:,::-1])
164
+ inp = gr.Image(type="pil")
165
+ #gr.inputs.Image(type="filepath", label="Input")
166
  #output=gr.outputs.Image(type="pil", label="Output Image")
167
+ output = gr.Image(type="pil")
168
+ #gr.outputs.Image(type="filepath", label="Output")
169
  #.outputs.Textbox()
170
 
171
+ examples=[["Examples/img-300_jpg.rf.6b7b035dff1cda092ce3dc22be8d0135.jpg",'Image1']]
172
+
173
+ io=gr.Interface(fn=Custom_detect, inputs=inp, outputs=output, title='Pot Hole Detection With Custom YOLOv7 ',
174
+ examples=examples
175
  )
 
176
  io.launch(debug=True,share=False)
177