wangfangyuan commited on
Commit
8a60c6f
1 Parent(s): d857ef1

Upload 10 files

Browse files
Files changed (6) hide show
  1. coco.yaml +19 -1
  2. general_json2yolo.py +8 -3
  3. onnx_eval.py +45 -10
  4. onnx_inference.py +17 -5
  5. utils.py +349 -66
  6. yolov5s_qat.onnx +2 -2
coco.yaml CHANGED
@@ -25,4 +25,22 @@ names: ['person', 'bicycle', 'car', 'motorcycle', 'airplane', 'bus', 'train', 't
25
  'sandwich', 'orange', 'broccoli', 'carrot', 'hot dog', 'pizza', 'donut', 'cake', 'chair', 'couch',
26
  'potted plant', 'bed', 'dining table', 'toilet', 'tv', 'laptop', 'mouse', 'remote', 'keyboard', 'cell phone',
27
  'microwave', 'oven', 'toaster', 'sink', 'refrigerator', 'book', 'clock', 'vase', 'scissors', 'teddy bear',
28
- 'hair drier', 'toothbrush'] # class names
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
25
  'sandwich', 'orange', 'broccoli', 'carrot', 'hot dog', 'pizza', 'donut', 'cake', 'chair', 'couch',
26
  'potted plant', 'bed', 'dining table', 'toilet', 'tv', 'laptop', 'mouse', 'remote', 'keyboard', 'cell phone',
27
  'microwave', 'oven', 'toaster', 'sink', 'refrigerator', 'book', 'clock', 'vase', 'scissors', 'teddy bear',
28
+ 'hair drier', 'toothbrush'] # class names
29
+
30
+
31
+ # Download script/URL (optional)
32
+ download: |
33
+ from utils.general import download, Path
34
+
35
+ # Download labels
36
+ segments = False # segment or box labels
37
+ dir = Path(yaml['path']) # dataset root dir
38
+ url = 'https://github.com/ultralytics/yolov5/releases/download/v1.0/'
39
+ urls = [url + ('coco2017labels-segments.zip' if segments else 'coco2017labels.zip')] # labels
40
+ download(urls, dir=dir.parent)
41
+
42
+ # Download data
43
+ urls = ['http://images.cocodataset.org/zips/train2017.zip', # 19G, 118k images
44
+ 'http://images.cocodataset.org/zips/val2017.zip', # 1G, 5k images
45
+ 'http://images.cocodataset.org/zips/test2017.zip'] # 7G, 41k images (optional)
46
+ download(urls, dir=dir / 'images', threads=3)
general_json2yolo.py CHANGED
@@ -1,9 +1,12 @@
 
1
  import json
 
 
 
 
2
  from collections import defaultdict
3
  import sys
4
  import pathlib
5
- import numpy as np
6
- from tqdm import tqdm
7
  CURRENT_DIR = pathlib.Path(__file__).parent
8
  sys.path.append(str(CURRENT_DIR))
9
  from utils import *
@@ -15,7 +18,7 @@ def convert_coco_json(json_dir='../coco/annotations/', use_segments=False, cls91
15
 
16
  # Import json
17
  for json_file in sorted(Path(json_dir).resolve().glob('*.json')):
18
- if not str(json_file).endswith("instances_val2017.json"):
19
  continue
20
  fn = Path(save_dir) / 'labels' / json_file.stem.replace('instances_', '') # folder name
21
  fn.mkdir()
@@ -139,3 +142,5 @@ if __name__ == '__main__':
139
  convert_coco_json('./datasets/coco/annotations', # directory with *.json
140
  use_segments=True,
141
  cls91to80=True)
 
 
 
1
+ import contextlib
2
  import json
3
+
4
+ import cv2
5
+ import pandas as pd
6
+ from PIL import Image
7
  from collections import defaultdict
8
  import sys
9
  import pathlib
 
 
10
  CURRENT_DIR = pathlib.Path(__file__).parent
11
  sys.path.append(str(CURRENT_DIR))
12
  from utils import *
 
18
 
19
  # Import json
20
  for json_file in sorted(Path(json_dir).resolve().glob('*.json')):
21
+ if str(json_file).split("/")[-1] != "instances_val2017.json":
22
  continue
23
  fn = Path(save_dir) / 'labels' / json_file.stem.replace('instances_', '') # folder name
24
  fn.mkdir()
 
142
  convert_coco_json('./datasets/coco/annotations', # directory with *.json
143
  use_segments=True,
144
  cls91to80=True)
145
+ # zip results
146
+ # os.system('zip -r ../coco.zip ../coco')
onnx_eval.py CHANGED
@@ -3,9 +3,11 @@ import json
3
  import os
4
  import sys
5
  from pathlib import Path
 
 
 
6
  import onnxruntime
7
  import numpy as np
8
- import torch
9
  from tqdm import tqdm
10
  from pycocotools.coco import COCO
11
  from pycocotools.cocoeval import COCOeval
@@ -20,7 +22,7 @@ import pathlib
20
  CURRENT_DIR = pathlib.Path(__file__).parent
21
  sys.path.append(str(CURRENT_DIR))
22
  from utils import create_dataloader, coco80_to_coco91_class, check_dataset, box_iou, non_max_suppression, post_process, scale_coords, xyxy2xywh, xywh2xyxy, \
23
- increment_path, colorstr, ap_per_class
24
 
25
 
26
  def save_one_txt(predn, save_conf, shape, file):
@@ -75,8 +77,11 @@ def run(data,
75
  imgsz=640, # inference size (pixels)
76
  conf_thres=0.001, # confidence threshold
77
  iou_thres=0.6, # NMS IoU threshold
78
- task='val', # val, test
 
79
  single_cls=False, # treat as single-class dataset
 
 
80
  save_txt=False, # save results to *.txt
81
  save_hybrid=False, # save label+prediction hybrid results to *.txt
82
  save_conf=False, # save confidences in --save-txt labels
@@ -85,7 +90,21 @@ def run(data,
85
  name='exp', # save to project/name
86
  exist_ok=False, # existing project/name ok, do not increment
87
  half=True, # use FP16 half-precision inference
 
 
 
 
 
88
  plots=False,
 
 
 
 
 
 
 
 
 
89
  onnx_weights="./yolov5s_qat.onnx",
90
  ipu=False,
91
  provider_config='',
@@ -118,7 +137,7 @@ def run(data,
118
 
119
  # Dataloader
120
  pad = 0.0 if task == 'speed' else 0.5
121
- task = 'val' # path to val/test images
122
  dataloader = create_dataloader(data[task], imgsz, batch_size, gs, single_cls, pad=pad, rect=False,
123
  prefix=colorstr(f'{task}: '), workers=8)[0]
124
 
@@ -144,9 +163,10 @@ def run(data,
144
  img /= 255.0 # 0 - 255 to 0.0 - 1.0
145
  targets = targets.to(device)
146
  nb, _, height, width = img.shape # batch size, channels, height, width
147
-
148
- outputs = onnx_model.run(None, {onnx_model.get_inputs()[0].name: img.cpu().numpy()})
149
- outputs = [torch.tensor(item).to(device) for item in outputs]
 
150
  outputs = post_process(outputs)
151
  out, train_out = outputs[0], outputs[1]
152
 
@@ -204,6 +224,11 @@ def run(data,
204
  pf = '%20s' + '%11i' * 2 + '%11.3g' * 4 # print format
205
  print(pf % ('all', seen, nt.sum(), mp, mr, map50, map))
206
 
 
 
 
 
 
207
  # Save JSON
208
  if save_json and len(jdict):
209
  w = Path(weights[0] if isinstance(weights, list) else weights).stem if weights is not None else '' # weights
@@ -236,14 +261,17 @@ def run(data,
236
 
237
  def parse_opt():
238
  parser = argparse.ArgumentParser()
239
- parser.add_argument('--data', type=str, default='./coco.yaml', help='path to your dataset.yaml')
240
  parser.add_argument('--weights', nargs='+', type=str, default=ROOT / 'yolov5s.pt', help='model.pt path(s)')
241
  parser.add_argument('--batch-size', type=int, default=1, help='batch size')
242
  parser.add_argument('--imgsz', '--img', '--img-size', type=int, default=640, help='inference size (pixels)')
243
  parser.add_argument('--conf-thres', type=float, default=0.001, help='confidence threshold')
244
  parser.add_argument('--iou-thres', type=float, default=0.65, help='NMS IoU threshold')
245
- parser.add_argument('--task', default='val', help='val, test')
 
246
  parser.add_argument('--single-cls', action='store_true', help='treat as single-class dataset')
 
 
247
  parser.add_argument('--save-txt', action='store_true', help='save results to *.txt')
248
  parser.add_argument('--save-hybrid', action='store_true', help='save label+prediction hybrid results to *.txt')
249
  parser.add_argument('--save-conf', action='store_true', help='save confidences in --save-txt labels')
@@ -252,7 +280,14 @@ def parse_opt():
252
  parser.add_argument('--name', default='exp', help='save to project/name')
253
  parser.add_argument('--exist-ok', action='store_true', help='existing project/name ok, do not increment')
254
  parser.add_argument('--half', action='store_true', help='use FP16 half-precision inference')
255
- parser.add_argument('-m', '--onnx_weights', default='./yolov5s_qat.onnx', nargs='+', type=str, help='path to your onnx_weights')
 
 
 
 
 
 
 
256
  parser.add_argument('--ipu', action='store_true', help='flag for ryzen ai')
257
  parser.add_argument('--provider_config', default='', type=str, help='provider config for ryzen ai')
258
  opt = parser.parse_args()
 
3
  import os
4
  import sys
5
  from pathlib import Path
6
+ from threading import Thread
7
+ from functools import partial
8
+ import torch
9
  import onnxruntime
10
  import numpy as np
 
11
  from tqdm import tqdm
12
  from pycocotools.coco import COCO
13
  from pycocotools.cocoeval import COCOeval
 
22
  CURRENT_DIR = pathlib.Path(__file__).parent
23
  sys.path.append(str(CURRENT_DIR))
24
  from utils import create_dataloader, coco80_to_coco91_class, check_dataset, box_iou, non_max_suppression, post_process, scale_coords, xyxy2xywh, xywh2xyxy, \
25
+ increment_path, colorstr, ap_per_class, ConfusionMatrix, output_to_target, plot_val_study, check_yaml
26
 
27
 
28
  def save_one_txt(predn, save_conf, shape, file):
 
77
  imgsz=640, # inference size (pixels)
78
  conf_thres=0.001, # confidence threshold
79
  iou_thres=0.6, # NMS IoU threshold
80
+ task='val', # train, val, test, speed or study
81
+ device='', # cuda device, i.e. 0 or 0,1,2,3 or cpu
82
  single_cls=False, # treat as single-class dataset
83
+ augment=False, # augmented inference
84
+ verbose=False, # verbose output
85
  save_txt=False, # save results to *.txt
86
  save_hybrid=False, # save label+prediction hybrid results to *.txt
87
  save_conf=False, # save confidences in --save-txt labels
 
90
  name='exp', # save to project/name
91
  exist_ok=False, # existing project/name ok, do not increment
92
  half=True, # use FP16 half-precision inference
93
+ nndct_quant=False,
94
+ nndct_bitwidth=8,
95
+ model=None,
96
+ dataloader=None,
97
+ save_dir=Path(''),
98
  plots=False,
99
+ callbacks=None,
100
+ compute_loss=None,
101
+ quant_mode='calib',
102
+ dump_xmodel=False,
103
+ dump_onnx=False,
104
+ dump_torch_script=False,
105
+ nndct_stat=0,
106
+ with_postprocess=False,
107
+ onnx_runtime=True,
108
  onnx_weights="./yolov5s_qat.onnx",
109
  ipu=False,
110
  provider_config='',
 
137
 
138
  # Dataloader
139
  pad = 0.0 if task == 'speed' else 0.5
140
+ task = task if task in ('train', 'val', 'test') else 'val' # path to train/val/test images
141
  dataloader = create_dataloader(data[task], imgsz, batch_size, gs, single_cls, pad=pad, rect=False,
142
  prefix=colorstr(f'{task}: '), workers=8)[0]
143
 
 
163
  img /= 255.0 # 0 - 255 to 0.0 - 1.0
164
  targets = targets.to(device)
165
  nb, _, height, width = img.shape # batch size, channels, height, width
166
+ # outputs = onnx_model.run(None, {onnx_model.get_inputs()[0].name: img.cpu().numpy()})
167
+ outputs = onnx_model.run(None, {onnx_model.get_inputs()[0].name: img.permute(0, 2, 3, 1).cpu().numpy()})
168
+ # outputs = [torch.tensor(item).to(device) for item in outputs]
169
+ outputs = [torch.tensor(item).permute(0, 3, 1, 2).to(device) for item in outputs]
170
  outputs = post_process(outputs)
171
  out, train_out = outputs[0], outputs[1]
172
 
 
224
  pf = '%20s' + '%11i' * 2 + '%11.3g' * 4 # print format
225
  print(pf % ('all', seen, nt.sum(), mp, mr, map50, map))
226
 
227
+ # Print results per class
228
+ if (verbose or (nc < 50)) and nc > 1 and len(stats):
229
+ for i, c in enumerate(ap_class):
230
+ print(pf % (names[c], seen, nt[c], p[i], r[i], ap50[i], ap[i]))
231
+
232
  # Save JSON
233
  if save_json and len(jdict):
234
  w = Path(weights[0] if isinstance(weights, list) else weights).stem if weights is not None else '' # weights
 
261
 
262
  def parse_opt():
263
  parser = argparse.ArgumentParser()
264
+ parser.add_argument('--data', type=str, default='./coco.yaml', help='dataset.yaml path')
265
  parser.add_argument('--weights', nargs='+', type=str, default=ROOT / 'yolov5s.pt', help='model.pt path(s)')
266
  parser.add_argument('--batch-size', type=int, default=1, help='batch size')
267
  parser.add_argument('--imgsz', '--img', '--img-size', type=int, default=640, help='inference size (pixels)')
268
  parser.add_argument('--conf-thres', type=float, default=0.001, help='confidence threshold')
269
  parser.add_argument('--iou-thres', type=float, default=0.65, help='NMS IoU threshold')
270
+ parser.add_argument('--task', default='val', help='train, val, test, speed or study')
271
+ parser.add_argument('--device', default='', help='cuda device, i.e. 0 or 0,1,2,3 or cpu')
272
  parser.add_argument('--single-cls', action='store_true', help='treat as single-class dataset')
273
+ parser.add_argument('--augment', action='store_true', help='augmented inference')
274
+ parser.add_argument('--verbose', action='store_true', help='report mAP by class')
275
  parser.add_argument('--save-txt', action='store_true', help='save results to *.txt')
276
  parser.add_argument('--save-hybrid', action='store_true', help='save label+prediction hybrid results to *.txt')
277
  parser.add_argument('--save-conf', action='store_true', help='save confidences in --save-txt labels')
 
280
  parser.add_argument('--name', default='exp', help='save to project/name')
281
  parser.add_argument('--exist-ok', action='store_true', help='existing project/name ok, do not increment')
282
  parser.add_argument('--half', action='store_true', help='use FP16 half-precision inference')
283
+ parser.add_argument('--quant_mode', default='calib', help='nndct quant mode')
284
+ parser.add_argument('--nndct_quant', action='store_true', help='use nndct quant model for inference')
285
+ parser.add_argument('--dump_xmodel', action='store_true', help='dump nndct xmodel')
286
+ parser.add_argument('--dump_onnx', action='store_true', help='dump nndct onnx xmodel')
287
+ parser.add_argument('--with_postprocess', action='store_true', help='nndct model with postprocess')
288
+ parser.add_argument('--onnx_runtime', default=True, action='store_true', help='onnx_runtime')
289
+ parser.add_argument('-m', '--onnx_weights', default='./yolov5s_qat.onnx', nargs='+', type=str, help='onnx_weights')
290
+ parser.add_argument('--nndct_stat', type=int, required=False, default=0)
291
  parser.add_argument('--ipu', action='store_true', help='flag for ryzen ai')
292
  parser.add_argument('--provider_config', default='', type=str, help='provider config for ryzen ai')
293
  opt = parser.parse_args()
onnx_inference.py CHANGED
@@ -1,15 +1,26 @@
1
- import onnxruntime
2
  import numpy as np
 
 
3
  import cv2
 
 
4
  import torch
 
 
 
 
5
  import sys
6
  import pathlib
7
  CURRENT_DIR = pathlib.Path(__file__).parent
8
  sys.path.append(str(CURRENT_DIR))
9
  import argparse
10
  from utils import (
 
 
11
  letterbox,
 
12
  non_max_suppression,
 
13
  scale_coords,
14
  Annotator,
15
  Colors,
@@ -55,21 +66,21 @@ def make_parser():
55
  "--model",
56
  type=str,
57
  default="./yolov5s_qat.onnx",
58
- help="input your onnx model.",
59
  )
60
  parser.add_argument(
61
  "-i",
62
  "--image_path",
63
  type=str,
64
  default='./demo.jpg',
65
- help="path to your input image.",
66
  )
67
  parser.add_argument(
68
  "-o",
69
  "--output_path",
70
  type=str,
71
  default='./demo_infer.jpg',
72
- help="path to your output directory.",
73
  )
74
  parser.add_argument(
75
  '--ipu',
@@ -113,8 +124,9 @@ if __name__ == '__main__':
113
 
114
  img0 = cv2.imread(path)
115
  img = pre_process(img0)
116
- onnx_input = {onnx_model.get_inputs()[0].name: img}
117
  onnx_output = onnx_model.run(None, onnx_input)
 
118
  onnx_output = post_process(onnx_output)
119
  pred = non_max_suppression(
120
  onnx_output[0], conf_thres, iou_thres, classes, agnostic_nms, max_det=max_det
 
 
1
  import numpy as np
2
+ import onnx
3
+ import copy
4
  import cv2
5
+ from pathlib import Path
6
+ import matplotlib.pyplot as plt
7
  import torch
8
+ import onnxruntime
9
+ import time
10
+ import torchvision
11
+ import re
12
  import sys
13
  import pathlib
14
  CURRENT_DIR = pathlib.Path(__file__).parent
15
  sys.path.append(str(CURRENT_DIR))
16
  import argparse
17
  from utils import (
18
+ is_ascii,
19
+ is_chinese,
20
  letterbox,
21
+ xywh2xyxy,
22
  non_max_suppression,
23
+ clip_coords,
24
  scale_coords,
25
  Annotator,
26
  Colors,
 
66
  "--model",
67
  type=str,
68
  default="./yolov5s_qat.onnx",
69
+ help="Input your onnx model.",
70
  )
71
  parser.add_argument(
72
  "-i",
73
  "--image_path",
74
  type=str,
75
  default='./demo.jpg',
76
+ help="Path to your input image.",
77
  )
78
  parser.add_argument(
79
  "-o",
80
  "--output_path",
81
  type=str,
82
  default='./demo_infer.jpg',
83
+ help="Path to your output directory.",
84
  )
85
  parser.add_argument(
86
  '--ipu',
 
124
 
125
  img0 = cv2.imread(path)
126
  img = pre_process(img0)
127
+ onnx_input = {onnx_model.get_inputs()[0].name: img.transpose(0, 2, 3, 1)}
128
  onnx_output = onnx_model.run(None, onnx_input)
129
+ onnx_output = [torch.tensor(item).permute(0, 3, 1, 2) for item in onnx_output]
130
  onnx_output = post_process(onnx_output)
131
  pred = non_max_suppression(
132
  onnx_output[0], conf_thres, iou_thres, classes, agnostic_nms, max_det=max_det
utils.py CHANGED
@@ -1,11 +1,16 @@
 
1
  import numpy as np
 
 
2
  import cv2
3
  from pathlib import Path
 
4
  import torch
5
  import time
6
  import torchvision
7
  import re
8
  import glob
 
9
  from torch.utils.data import Dataset
10
  import yaml
11
  import os
@@ -15,6 +20,7 @@ from itertools import repeat
15
  import logging
16
  from PIL import Image, ExifTags
17
  import hashlib
 
18
  import sys
19
  import pathlib
20
  CURRENT_DIR = pathlib.Path(__file__).parent
@@ -277,40 +283,75 @@ class Annotator:
277
  im.data.contiguous
278
  ), "Image not contiguous. Apply np.ascontiguousarray(im) to Annotator() input images."
279
  self.pil = pil or not is_ascii(example) or is_chinese(example)
280
- self.im = im
 
 
 
 
 
 
 
 
281
  self.lw = line_width or max(round(sum(im.shape) / 2 * 0.003), 2) # line width
282
 
283
  def box_label(
284
  self, box, label="", color=(128, 128, 128), txt_color=(255, 255, 255)
285
  ):
286
  # Add one xyxy box to image with label
287
- p1, p2 = (int(box[0]), int(box[1])), (int(box[2]), int(box[3]))
288
- cv2.rectangle(
289
- self.im, p1, p2, color, thickness=self.lw, lineType=cv2.LINE_AA
290
- )
291
- if label:
292
- tf = max(self.lw - 1, 1) # font thickness
293
- w, h = cv2.getTextSize(label, 0, fontScale=self.lw / 3, thickness=tf)[
294
- 0
295
- ] # text width, height
296
- outside = p1[1] - h - 3 >= 0 # label fits outside box
297
- p2 = p1[0] + w, p1[1] - h - 3 if outside else p1[1] + h + 3
298
- cv2.rectangle(self.im, p1, p2, color, -1, cv2.LINE_AA) # filled
299
- cv2.putText(
300
- self.im,
301
- label,
302
- (p1[0], p1[1] - 2 if outside else p1[1] + h + 2),
303
- 0,
304
- self.lw / 3,
305
- txt_color,
306
- thickness=tf,
307
- lineType=cv2.LINE_AA,
 
 
 
 
308
  )
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
309
 
310
  def rectangle(self, xy, fill=None, outline=None, width=1):
311
  # Add rectangle to image (PIL-only)
312
  self.draw.rectangle(xy, fill, outline, width)
313
 
 
 
 
 
 
314
  def result(self):
315
  # Return annotated image as array
316
  return np.asarray(self.im)
@@ -354,19 +395,32 @@ class Colors:
354
  return tuple(int(h[1 + i : 1 + i + 2], 16) for i in (0, 2, 4))
355
 
356
 
 
 
 
 
 
 
 
 
 
 
 
 
357
  def create_dataloader(path, imgsz, batch_size, stride, single_cls=False, hyp=None, augment=False, cache=False, pad=0.0,
358
  rect=False, rank=-1, workers=8, image_weights=False, quad=False, prefix=''):
359
-
360
- dataset = LoadImagesAndLabels(path, imgsz, batch_size,
361
- augment=augment, # augment images
362
- hyp=hyp, # augmentation hyperparameters
363
- rect=rect, # rectangular training
364
- cache_images=cache,
365
- single_cls=single_cls,
366
- stride=int(stride),
367
- pad=pad,
368
- image_weights=image_weights,
369
- prefix=prefix)
 
370
 
371
  batch_size = min(batch_size, len(dataset))
372
  nw = min([os.cpu_count(), batch_size if batch_size > 1 else 0, workers]) # number of workers
@@ -378,7 +432,7 @@ def create_dataloader(path, imgsz, batch_size, stride, single_cls=False, hyp=Non
378
  num_workers=nw,
379
  sampler=sampler,
380
  pin_memory=True,
381
- collate_fn=LoadImagesAndLabels.collate_fn)
382
  return dataloader, dataset
383
 
384
 
@@ -393,29 +447,32 @@ class LoadImagesAndLabels(Dataset):
393
  self.hyp = hyp
394
  self.image_weights = image_weights
395
  self.rect = False if image_weights else rect
396
- self.mosaic = False # load 4 images at a time into a mosaic (only during training)
397
  self.mosaic_border = [-img_size // 2, -img_size // 2]
398
  self.stride = stride
399
  self.path = path
400
- self.albumentations = None
401
-
402
- f = [] # image files
403
- for p in path if isinstance(path, list) else [path]:
404
- p = Path(p) # os-agnostic
405
- if p.is_dir(): # dir
406
- f += glob.glob(str(p / '**' / '*.*'), recursive=True)
407
- # f = list(p.rglob('**/*.*')) # pathlib
408
- elif p.is_file(): # file
409
- with open(p, 'r') as t:
410
- t = t.read().strip().splitlines()
411
- parent = str(p.parent) + os.sep
412
- f += [x.replace('./', parent) if x.startswith('./') else x for x in t] # local to global path
413
- # f += [p.parent / x.lstrip(os.sep) for x in t] # local to global path (pathlib)
414
- else:
415
- raise Exception(f'{prefix}{p} does not exist')
416
- self.img_files = sorted([x.replace('/', os.sep) for x in f if x.split('.')[-1].lower() in IMG_FORMATS])
417
- # self.img_files = sorted([x for x in f if x.suffix[1:].lower() in img_formats]) # pathlib
418
- assert self.img_files, f'{prefix}No images found'
 
 
 
419
 
420
  # Check cache
421
  self.label_files = img2label_paths(self.img_files) # labels
@@ -434,6 +491,7 @@ class LoadImagesAndLabels(Dataset):
434
  tqdm(None, desc=prefix + d, total=n, initial=n) # display cache results
435
  if cache['msgs']:
436
  logging.info('\n'.join(cache['msgs'])) # display warnings
 
437
 
438
  # Read cache
439
  [cache.pop(k) for k in ('hash', 'version', 'msgs')] # remove items
@@ -520,6 +578,8 @@ class LoadImagesAndLabels(Dataset):
520
  pbar.close()
521
  if msgs:
522
  logging.info('\n'.join(msgs))
 
 
523
  x['hash'] = get_hash(self.label_files + self.img_files)
524
  x['results'] = nf, nm, ne, nc, len(self.img_files)
525
  x['msgs'] = msgs # warnings
@@ -545,24 +605,64 @@ class LoadImagesAndLabels(Dataset):
545
  index = self.indices[index] # linear, shuffled, or image_weights
546
 
547
  hyp = self.hyp
548
- mosaic = self.mosaic
 
 
 
 
549
 
550
- # Load image
551
- img, (h0, w0), (h, w) = load_image(self, index)
 
552
 
553
- # Letterbox
554
- shape = self.batch_shapes[self.batch[index]] if self.rect else self.img_size # final letterboxed shape
555
- img, ratio, pad = letterbox(img, shape, auto=False, scaleup=self.augment)
556
- shapes = (h0, w0), ((h / h0, w / w0), pad) # for COCO mAP rescaling
557
-
558
- labels = self.labels[index].copy()
559
- if labels.size: # normalized xywh to pixel xyxy format
560
- labels[:, 1:] = xywhn2xyxy(labels[:, 1:], ratio[0] * w, ratio[1] * h, padw=pad[0], padh=pad[1])
 
 
 
 
 
 
 
 
 
 
 
 
561
 
562
  nl = len(labels) # number of labels
563
  if nl:
564
  labels[:, 1:5] = xyxy2xywhn(labels[:, 1:5], w=img.shape[1], h=img.shape[0], clip=True, eps=1E-3)
565
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
566
  labels_out = torch.zeros((nl, 6))
567
  if nl:
568
  labels_out[:, 1:] = torch.from_numpy(labels)
@@ -580,6 +680,32 @@ class LoadImagesAndLabels(Dataset):
580
  l[:, 0] = i # add target image index for build_targets()
581
  return torch.stack(img, 0), torch.cat(label, 0), path, shapes
582
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
583
 
584
  def coco80_to_coco91_class(): # converts 80-index (val2014) to 91-index (paper)
585
  # https://tech.amikelive.com/node-718/what-object-categories-labels-are-in-coco-dataset/
@@ -599,6 +725,10 @@ def check_dataset(data, autodownload=True):
599
 
600
  # Download (optional)
601
  extract_dir = ''
 
 
 
 
602
 
603
  # Read yaml (optional)
604
  if isinstance(data, (str, Path)):
@@ -619,6 +749,24 @@ def check_dataset(data, autodownload=True):
619
  val = [Path(x).resolve() for x in (val if isinstance(val, list) else [val])] # val path
620
  if not all(x.exists() for x in val):
621
  print('\nWARNING: Dataset not found, nonexistent paths: %s' % [str(x) for x in val if not x.exists()])
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
622
 
623
  return data # dictionary
624
 
@@ -743,6 +891,11 @@ def ap_per_class(tp, conf, pred_cls, target_cls, plot=False, save_dir='.', names
743
 
744
  # Compute F1 (harmonic mean of precision and recall)
745
  f1 = 2 * p * r / (p + r + 1e-16)
 
 
 
 
 
746
 
747
  i = f1.mean(0).argmax() # max F1 index
748
  return p[:, i], r[:, i], ap, f1[:, i], unique_classes.astype('int32')
@@ -776,6 +929,84 @@ def compute_ap(recall, precision):
776
  return ap, mpre, mrec
777
 
778
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
779
  def output_to_target(output):
780
  # Convert model output to target format [batch_id, class_id, x, y, w, h, conf]
781
  targets = []
@@ -785,6 +1016,43 @@ def output_to_target(output):
785
  return np.array(targets)
786
 
787
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
788
  def check_yaml(file, suffix=('.yaml', '.yml')):
789
  # Search/download YAML file (if necessary) and return path, checking suffix
790
  return check_file(file, suffix)
@@ -794,7 +1062,22 @@ def check_file(file, suffix=''):
794
  # Search/download file (if necessary) and return path
795
  check_suffix(file, suffix) # optional
796
  file = str(file) # convert to str()
797
- return file
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
798
 
799
 
800
  def check_suffix(file='yolov5s.pt', suffix=('.pt',), msg=''):
 
1
+ import onnxruntime
2
  import numpy as np
3
+ import onnx
4
+ import copy
5
  import cv2
6
  from pathlib import Path
7
+ import matplotlib.pyplot as plt
8
  import torch
9
  import time
10
  import torchvision
11
  import re
12
  import glob
13
+ from contextlib import contextmanager
14
  from torch.utils.data import Dataset
15
  import yaml
16
  import os
 
20
  import logging
21
  from PIL import Image, ExifTags
22
  import hashlib
23
+ import shutil
24
  import sys
25
  import pathlib
26
  CURRENT_DIR = pathlib.Path(__file__).parent
 
283
  im.data.contiguous
284
  ), "Image not contiguous. Apply np.ascontiguousarray(im) to Annotator() input images."
285
  self.pil = pil or not is_ascii(example) or is_chinese(example)
286
+ if self.pil: # use PIL
287
+ self.im = im if isinstance(im, Image.Image) else Image.fromarray(im)
288
+ self.draw = ImageDraw.Draw(self.im)
289
+ self.font = check_font(
290
+ font="Arial.Unicode.ttf" if is_chinese(example) else font,
291
+ size=font_size or max(round(sum(self.im.size) / 2 * 0.035), 12),
292
+ )
293
+ else: # use cv2
294
+ self.im = im
295
  self.lw = line_width or max(round(sum(im.shape) / 2 * 0.003), 2) # line width
296
 
297
  def box_label(
298
  self, box, label="", color=(128, 128, 128), txt_color=(255, 255, 255)
299
  ):
300
  # Add one xyxy box to image with label
301
+ if self.pil or not is_ascii(label):
302
+ self.draw.rectangle(box, width=self.lw, outline=color) # box
303
+ if label:
304
+ w, h = self.font.getsize(label) # text width, height
305
+ outside = box[1] - h >= 0 # label fits outside box
306
+ self.draw.rectangle(
307
+ [
308
+ box[0],
309
+ box[1] - h if outside else box[1],
310
+ box[0] + w + 1,
311
+ box[1] + 1 if outside else box[1] + h + 1,
312
+ ],
313
+ fill=color,
314
+ )
315
+ # self.draw.text((box[0], box[1]), label, fill=txt_color, font=self.font, anchor='ls') # for PIL>8.0
316
+ self.draw.text(
317
+ (box[0], box[1] - h if outside else box[1]),
318
+ label,
319
+ fill=txt_color,
320
+ font=self.font,
321
+ )
322
+ else: # cv2
323
+ p1, p2 = (int(box[0]), int(box[1])), (int(box[2]), int(box[3]))
324
+ cv2.rectangle(
325
+ self.im, p1, p2, color, thickness=self.lw, lineType=cv2.LINE_AA
326
  )
327
+ if label:
328
+ tf = max(self.lw - 1, 1) # font thickness
329
+ w, h = cv2.getTextSize(label, 0, fontScale=self.lw / 3, thickness=tf)[
330
+ 0
331
+ ] # text width, height
332
+ outside = p1[1] - h - 3 >= 0 # label fits outside box
333
+ p2 = p1[0] + w, p1[1] - h - 3 if outside else p1[1] + h + 3
334
+ cv2.rectangle(self.im, p1, p2, color, -1, cv2.LINE_AA) # filled
335
+ cv2.putText(
336
+ self.im,
337
+ label,
338
+ (p1[0], p1[1] - 2 if outside else p1[1] + h + 2),
339
+ 0,
340
+ self.lw / 3,
341
+ txt_color,
342
+ thickness=tf,
343
+ lineType=cv2.LINE_AA,
344
+ )
345
 
346
  def rectangle(self, xy, fill=None, outline=None, width=1):
347
  # Add rectangle to image (PIL-only)
348
  self.draw.rectangle(xy, fill, outline, width)
349
 
350
+ def text(self, xy, text, txt_color=(255, 255, 255)):
351
+ # Add text to image (PIL-only)
352
+ w, h = self.font.getsize(text) # text width, height
353
+ self.draw.text((xy[0], xy[1] - h + 1), text, fill=txt_color, font=self.font)
354
+
355
  def result(self):
356
  # Return annotated image as array
357
  return np.asarray(self.im)
 
395
  return tuple(int(h[1 + i : 1 + i + 2], 16) for i in (0, 2, 4))
396
 
397
 
398
+ @contextmanager
399
+ def torch_distributed_zero_first(local_rank: int):
400
+ """
401
+ Decorator to make all processes in distributed training wait for each local_master to do something.
402
+ """
403
+ if local_rank not in [-1, 0]:
404
+ dist.barrier(device_ids=[local_rank])
405
+ yield
406
+ if local_rank == 0:
407
+ dist.barrier(device_ids=[0])
408
+
409
+
410
  def create_dataloader(path, imgsz, batch_size, stride, single_cls=False, hyp=None, augment=False, cache=False, pad=0.0,
411
  rect=False, rank=-1, workers=8, image_weights=False, quad=False, prefix=''):
412
+ # Make sure only the first process in DDP process the dataset first, and the following others can use the cache
413
+ with torch_distributed_zero_first(rank):
414
+ dataset = LoadImagesAndLabels(path, imgsz, batch_size,
415
+ augment=augment, # augment images
416
+ hyp=hyp, # augmentation hyperparameters
417
+ rect=rect, # rectangular training
418
+ cache_images=cache,
419
+ single_cls=single_cls,
420
+ stride=int(stride),
421
+ pad=pad,
422
+ image_weights=image_weights,
423
+ prefix=prefix)
424
 
425
  batch_size = min(batch_size, len(dataset))
426
  nw = min([os.cpu_count(), batch_size if batch_size > 1 else 0, workers]) # number of workers
 
432
  num_workers=nw,
433
  sampler=sampler,
434
  pin_memory=True,
435
+ collate_fn=LoadImagesAndLabels.collate_fn4 if quad else LoadImagesAndLabels.collate_fn)
436
  return dataloader, dataset
437
 
438
 
 
447
  self.hyp = hyp
448
  self.image_weights = image_weights
449
  self.rect = False if image_weights else rect
450
+ self.mosaic = self.augment and not self.rect # load 4 images at a time into a mosaic (only during training)
451
  self.mosaic_border = [-img_size // 2, -img_size // 2]
452
  self.stride = stride
453
  self.path = path
454
+ self.albumentations = Albumentations() if augment else None
455
+
456
+ try:
457
+ f = [] # image files
458
+ for p in path if isinstance(path, list) else [path]:
459
+ p = Path(p) # os-agnostic
460
+ if p.is_dir(): # dir
461
+ f += glob.glob(str(p / '**' / '*.*'), recursive=True)
462
+ # f = list(p.rglob('**/*.*')) # pathlib
463
+ elif p.is_file(): # file
464
+ with open(p, 'r') as t:
465
+ t = t.read().strip().splitlines()
466
+ parent = str(p.parent) + os.sep
467
+ f += [x.replace('./', parent) if x.startswith('./') else x for x in t] # local to global path
468
+ # f += [p.parent / x.lstrip(os.sep) for x in t] # local to global path (pathlib)
469
+ else:
470
+ raise Exception(f'{prefix}{p} does not exist')
471
+ self.img_files = sorted([x.replace('/', os.sep) for x in f if x.split('.')[-1].lower() in IMG_FORMATS])
472
+ # self.img_files = sorted([x for x in f if x.suffix[1:].lower() in img_formats]) # pathlib
473
+ assert self.img_files, f'{prefix}No images found'
474
+ except Exception as e:
475
+ raise Exception(f'{prefix}Error loading data from {path}: {e}\nSee {HELP_URL}')
476
 
477
  # Check cache
478
  self.label_files = img2label_paths(self.img_files) # labels
 
491
  tqdm(None, desc=prefix + d, total=n, initial=n) # display cache results
492
  if cache['msgs']:
493
  logging.info('\n'.join(cache['msgs'])) # display warnings
494
+ assert nf > 0 or not augment, f'{prefix}No labels in {cache_path}. Can not train without labels. See {HELP_URL}'
495
 
496
  # Read cache
497
  [cache.pop(k) for k in ('hash', 'version', 'msgs')] # remove items
 
578
  pbar.close()
579
  if msgs:
580
  logging.info('\n'.join(msgs))
581
+ if nf == 0:
582
+ logging.info(f'{prefix}WARNING: No labels found in {path}. See {HELP_URL}')
583
  x['hash'] = get_hash(self.label_files + self.img_files)
584
  x['results'] = nf, nm, ne, nc, len(self.img_files)
585
  x['msgs'] = msgs # warnings
 
605
  index = self.indices[index] # linear, shuffled, or image_weights
606
 
607
  hyp = self.hyp
608
+ mosaic = self.mosaic and random.random() < hyp['mosaic']
609
+ if mosaic:
610
+ # Load mosaic
611
+ img, labels = load_mosaic(self, index)
612
+ shapes = None
613
 
614
+ # MixUp augmentation
615
+ if random.random() < hyp['mixup']:
616
+ img, labels = mixup(img, labels, *load_mosaic(self, random.randint(0, self.n - 1)))
617
 
618
+ else:
619
+ # Load image
620
+ img, (h0, w0), (h, w) = load_image(self, index)
621
+
622
+ # Letterbox
623
+ shape = self.batch_shapes[self.batch[index]] if self.rect else self.img_size # final letterboxed shape
624
+ img, ratio, pad = letterbox(img, shape, auto=False, scaleup=self.augment)
625
+ shapes = (h0, w0), ((h / h0, w / w0), pad) # for COCO mAP rescaling
626
+
627
+ labels = self.labels[index].copy()
628
+ if labels.size: # normalized xywh to pixel xyxy format
629
+ labels[:, 1:] = xywhn2xyxy(labels[:, 1:], ratio[0] * w, ratio[1] * h, padw=pad[0], padh=pad[1])
630
+
631
+ if self.augment:
632
+ img, labels = random_perspective(img, labels,
633
+ degrees=hyp['degrees'],
634
+ translate=hyp['translate'],
635
+ scale=hyp['scale'],
636
+ shear=hyp['shear'],
637
+ perspective=hyp['perspective'])
638
 
639
  nl = len(labels) # number of labels
640
  if nl:
641
  labels[:, 1:5] = xyxy2xywhn(labels[:, 1:5], w=img.shape[1], h=img.shape[0], clip=True, eps=1E-3)
642
 
643
+ if self.augment:
644
+ # Albumentations
645
+ img, labels = self.albumentations(img, labels)
646
+ nl = len(labels) # update after albumentations
647
+
648
+ # HSV color-space
649
+ augment_hsv(img, hgain=hyp['hsv_h'], sgain=hyp['hsv_s'], vgain=hyp['hsv_v'])
650
+
651
+ # Flip up-down
652
+ if random.random() < hyp['flipud']:
653
+ img = np.flipud(img)
654
+ if nl:
655
+ labels[:, 2] = 1 - labels[:, 2]
656
+
657
+ # Flip left-right
658
+ if random.random() < hyp['fliplr']:
659
+ img = np.fliplr(img)
660
+ if nl:
661
+ labels[:, 1] = 1 - labels[:, 1]
662
+
663
+ # Cutouts
664
+ # labels = cutout(img, labels, p=0.5)
665
+
666
  labels_out = torch.zeros((nl, 6))
667
  if nl:
668
  labels_out[:, 1:] = torch.from_numpy(labels)
 
680
  l[:, 0] = i # add target image index for build_targets()
681
  return torch.stack(img, 0), torch.cat(label, 0), path, shapes
682
 
683
+ @staticmethod
684
+ def collate_fn4(batch):
685
+ img, label, path, shapes = zip(*batch) # transposed
686
+ n = len(shapes) // 4
687
+ img4, label4, path4, shapes4 = [], [], path[:n], shapes[:n]
688
+
689
+ ho = torch.tensor([[0., 0, 0, 1, 0, 0]])
690
+ wo = torch.tensor([[0., 0, 1, 0, 0, 0]])
691
+ s = torch.tensor([[1, 1, .5, .5, .5, .5]]) # scale
692
+ for i in range(n): # zidane torch.zeros(16,3,720,1280) # BCHW
693
+ i *= 4
694
+ if random.random() < 0.5:
695
+ im = F.interpolate(img[i].unsqueeze(0).float(), scale_factor=2., mode='bilinear', align_corners=False)[
696
+ 0].type(img[i].type())
697
+ l = label[i]
698
+ else:
699
+ im = torch.cat((torch.cat((img[i], img[i + 1]), 1), torch.cat((img[i + 2], img[i + 3]), 1)), 2)
700
+ l = torch.cat((label[i], label[i + 1] + ho, label[i + 2] + wo, label[i + 3] + ho + wo), 0) * s
701
+ img4.append(im)
702
+ label4.append(l)
703
+
704
+ for i, l in enumerate(label4):
705
+ l[:, 0] = i # add target image index for build_targets()
706
+
707
+ return torch.stack(img4, 0), torch.cat(label4, 0), path4, shapes4
708
+
709
 
710
  def coco80_to_coco91_class(): # converts 80-index (val2014) to 91-index (paper)
711
  # https://tech.amikelive.com/node-718/what-object-categories-labels-are-in-coco-dataset/
 
725
 
726
  # Download (optional)
727
  extract_dir = ''
728
+ if isinstance(data, (str, Path)) and str(data).endswith('.zip'): # i.e. gs://bucket/dir/coco128.zip
729
+ download(data, dir='../datasets', unzip=True, delete=False, curl=False, threads=1)
730
+ data = next((Path('../datasets') / Path(data).stem).rglob('*.yaml'))
731
+ extract_dir, autodownload = data.parent, False
732
 
733
  # Read yaml (optional)
734
  if isinstance(data, (str, Path)):
 
749
  val = [Path(x).resolve() for x in (val if isinstance(val, list) else [val])] # val path
750
  if not all(x.exists() for x in val):
751
  print('\nWARNING: Dataset not found, nonexistent paths: %s' % [str(x) for x in val if not x.exists()])
752
+ if s and autodownload: # download script
753
+ root = path.parent if 'path' in data else '..' # unzip directory i.e. '../'
754
+ if s.startswith('http') and s.endswith('.zip'): # URL
755
+ f = Path(s).name # filename
756
+ print(f'Downloading {s} to {f}...')
757
+ torch.hub.download_url_to_file(s, f)
758
+ Path(root).mkdir(parents=True, exist_ok=True) # create root
759
+ ZipFile(f).extractall(path=root) # unzip
760
+ Path(f).unlink() # remove zip
761
+ r = None # success
762
+ elif s.startswith('bash '): # bash script
763
+ print(f'Running {s} ...')
764
+ r = os.system(s)
765
+ else: # python script
766
+ r = exec(s, {'yaml': data}) # return None
767
+ print(f"Dataset autodownload {f'success, saved to {root}' if r in (0, None) else 'failure'}\n")
768
+ else:
769
+ raise Exception('Dataset not found.')
770
 
771
  return data # dictionary
772
 
 
891
 
892
  # Compute F1 (harmonic mean of precision and recall)
893
  f1 = 2 * p * r / (p + r + 1e-16)
894
+ if plot:
895
+ plot_pr_curve(px, py, ap, Path(save_dir) / 'PR_curve.png', names)
896
+ plot_mc_curve(px, f1, Path(save_dir) / 'F1_curve.png', names, ylabel='F1')
897
+ plot_mc_curve(px, p, Path(save_dir) / 'P_curve.png', names, ylabel='Precision')
898
+ plot_mc_curve(px, r, Path(save_dir) / 'R_curve.png', names, ylabel='Recall')
899
 
900
  i = f1.mean(0).argmax() # max F1 index
901
  return p[:, i], r[:, i], ap, f1[:, i], unique_classes.astype('int32')
 
929
  return ap, mpre, mrec
930
 
931
 
932
+ class ConfusionMatrix:
933
+ # Updated version of https://github.com/kaanakan/object_detection_confusion_matrix
934
+ def __init__(self, nc, conf=0.25, iou_thres=0.45):
935
+ self.matrix = np.zeros((nc + 1, nc + 1))
936
+ self.nc = nc # number of classes
937
+ self.conf = conf
938
+ self.iou_thres = iou_thres
939
+
940
+ def process_batch(self, detections, labels):
941
+ """
942
+ Return intersection-over-union (Jaccard index) of boxes.
943
+ Both sets of boxes are expected to be in (x1, y1, x2, y2) format.
944
+ Arguments:
945
+ detections (Array[N, 6]), x1, y1, x2, y2, conf, class
946
+ labels (Array[M, 5]), class, x1, y1, x2, y2
947
+ Returns:
948
+ None, updates confusion matrix accordingly
949
+ """
950
+ detections = detections[detections[:, 4] > self.conf]
951
+ gt_classes = labels[:, 0].int()
952
+ detection_classes = detections[:, 5].int()
953
+ iou = box_iou(labels[:, 1:], detections[:, :4])
954
+
955
+ x = torch.where(iou > self.iou_thres)
956
+ if x[0].shape[0]:
957
+ matches = torch.cat((torch.stack(x, 1), iou[x[0], x[1]][:, None]), 1).cpu().numpy()
958
+ if x[0].shape[0] > 1:
959
+ matches = matches[matches[:, 2].argsort()[::-1]]
960
+ matches = matches[np.unique(matches[:, 1], return_index=True)[1]]
961
+ matches = matches[matches[:, 2].argsort()[::-1]]
962
+ matches = matches[np.unique(matches[:, 0], return_index=True)[1]]
963
+ else:
964
+ matches = np.zeros((0, 3))
965
+
966
+ n = matches.shape[0] > 0
967
+ m0, m1, _ = matches.transpose().astype(np.int16)
968
+ for i, gc in enumerate(gt_classes):
969
+ j = m0 == i
970
+ if n and sum(j) == 1:
971
+ self.matrix[detection_classes[m1[j]], gc] += 1 # correct
972
+ else:
973
+ self.matrix[self.nc, gc] += 1 # background FP
974
+
975
+ if n:
976
+ for i, dc in enumerate(detection_classes):
977
+ if not any(m1 == i):
978
+ self.matrix[dc, self.nc] += 1 # background FN
979
+
980
+ def matrix(self):
981
+ return self.matrix
982
+
983
+ def plot(self, normalize=True, save_dir='', names=()):
984
+ try:
985
+ import seaborn as sn
986
+
987
+ array = self.matrix / ((self.matrix.sum(0).reshape(1, -1) + 1E-6) if normalize else 1) # normalize columns
988
+ array[array < 0.005] = np.nan # don't annotate (would appear as 0.00)
989
+
990
+ fig = plt.figure(figsize=(12, 9), tight_layout=True)
991
+ sn.set(font_scale=1.0 if self.nc < 50 else 0.8) # for label size
992
+ labels = (0 < len(names) < 99) and len(names) == self.nc # apply names to ticklabels
993
+ with warnings.catch_warnings():
994
+ warnings.simplefilter('ignore') # suppress empty matrix RuntimeWarning: All-NaN slice encountered
995
+ sn.heatmap(array, annot=self.nc < 30, annot_kws={"size": 8}, cmap='Blues', fmt='.2f', square=True,
996
+ xticklabels=names + ['background FP'] if labels else "auto",
997
+ yticklabels=names + ['background FN'] if labels else "auto").set_facecolor((1, 1, 1))
998
+ fig.axes[0].set_xlabel('True')
999
+ fig.axes[0].set_ylabel('Predicted')
1000
+ fig.savefig(Path(save_dir) / 'confusion_matrix.png', dpi=250)
1001
+ plt.close()
1002
+ except Exception as e:
1003
+ print(f'WARNING: ConfusionMatrix plot failure: {e}')
1004
+
1005
+ def print(self):
1006
+ for i in range(self.nc + 1):
1007
+ print(' '.join(map(str, self.matrix[i])))
1008
+
1009
+
1010
  def output_to_target(output):
1011
  # Convert model output to target format [batch_id, class_id, x, y, w, h, conf]
1012
  targets = []
 
1016
  return np.array(targets)
1017
 
1018
 
1019
+ def plot_val_study(file='', dir='', x=None): # from utils.plots import *; plot_val_study()
1020
+ # Plot file=study.txt generated by val.py (or plot all study*.txt in dir)
1021
+ save_dir = Path(file).parent if file else Path(dir)
1022
+ plot2 = False # plot additional results
1023
+ if plot2:
1024
+ ax = plt.subplots(2, 4, figsize=(10, 6), tight_layout=True)[1].ravel()
1025
+
1026
+ fig2, ax2 = plt.subplots(1, 1, figsize=(8, 4), tight_layout=True)
1027
+ # for f in [save_dir / f'study_coco_{x}.txt' for x in ['yolov5n6', 'yolov5s6', 'yolov5m6', 'yolov5l6', 'yolov5x6']]:
1028
+ for f in sorted(save_dir.glob('study*.txt')):
1029
+ y = np.loadtxt(f, dtype=np.float32, usecols=[0, 1, 2, 3, 7, 8, 9], ndmin=2).T
1030
+ x = np.arange(y.shape[1]) if x is None else np.array(x)
1031
+ if plot2:
1032
+ s = ['P', 'R', 'mAP@.5', 'mAP@.5:.95', 't_preprocess (ms/img)', 't_inference (ms/img)', 't_NMS (ms/img)']
1033
+ for i in range(7):
1034
+ ax[i].plot(x, y[i], '.-', linewidth=2, markersize=8)
1035
+ ax[i].set_title(s[i])
1036
+
1037
+ j = y[3].argmax() + 1
1038
+ ax2.plot(y[5, 1:j], y[3, 1:j] * 1E2, '.-', linewidth=2, markersize=8,
1039
+ label=f.stem.replace('study_coco_', '').replace('yolo', 'YOLO'))
1040
+
1041
+ ax2.plot(1E3 / np.array([209, 140, 97, 58, 35, 18]), [34.6, 40.5, 43.0, 47.5, 49.7, 51.5],
1042
+ 'k.-', linewidth=2, markersize=8, alpha=.25, label='EfficientDet')
1043
+
1044
+ ax2.grid(alpha=0.2)
1045
+ ax2.set_yticks(np.arange(20, 60, 5))
1046
+ ax2.set_xlim(0, 57)
1047
+ ax2.set_ylim(25, 55)
1048
+ ax2.set_xlabel('GPU Speed (ms/img)')
1049
+ ax2.set_ylabel('COCO AP val')
1050
+ ax2.legend(loc='lower right')
1051
+ f = save_dir / 'study.png'
1052
+ print(f'Saving {f}...')
1053
+ plt.savefig(f, dpi=300)
1054
+
1055
+
1056
  def check_yaml(file, suffix=('.yaml', '.yml')):
1057
  # Search/download YAML file (if necessary) and return path, checking suffix
1058
  return check_file(file, suffix)
 
1062
  # Search/download file (if necessary) and return path
1063
  check_suffix(file, suffix) # optional
1064
  file = str(file) # convert to str()
1065
+ if Path(file).is_file() or file == '': # exists
1066
+ return file
1067
+ elif file.startswith(('http:/', 'https:/')): # download
1068
+ url = str(Path(file)).replace(':/', '://') # Pathlib turns :// -> :/
1069
+ file = Path(urllib.parse.unquote(file).split('?')[0]).name # '%2F' to '/', split https://url.com/file.txt?auth
1070
+ print(f'Downloading {url} to {file}...')
1071
+ torch.hub.download_url_to_file(url, file)
1072
+ assert Path(file).exists() and Path(file).stat().st_size > 0, f'File download failed: {url}' # check
1073
+ return file
1074
+ else: # search
1075
+ files = []
1076
+ for d in 'data', 'models', 'utils': # search directories
1077
+ files.extend(glob.glob(str(ROOT / d / '**' / file), recursive=True)) # find file
1078
+ assert len(files), f'File not found: {file}' # assert file was found
1079
+ assert len(files) == 1, f"Multiple files match '{file}', specify exact path: {files}" # assert unique
1080
+ return files[0] # return file
1081
 
1082
 
1083
  def check_suffix(file='yolov5s.pt', suffix=('.pt',), msg=''):
yolov5s_qat.onnx CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:5ba00d5f170eab6130610bb543c1f4b1e8354f4944c127e61c28beb99beddf26
3
- size 29141657
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:5f05e2860614a4d10757405f5e4ad2849d380631e16915f91aa0f69597d10575
3
+ size 29142007