ibrim commited on
Commit
7f9efce
1 Parent(s): 842b180

Upload 9 files

Browse files
Files changed (9) hide show
  1. benchmarks.py +142 -0
  2. export.py +606 -0
  3. hubconf.py +107 -0
  4. train.py +634 -0
  5. train_dual.py +644 -0
  6. train_triple.py +636 -0
  7. val.py +389 -0
  8. val_dual.py +393 -0
  9. val_triple.py +391 -0
benchmarks.py ADDED
@@ -0,0 +1,142 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import argparse
2
+ import platform
3
+ import sys
4
+ import time
5
+ from pathlib import Path
6
+
7
+ import pandas as pd
8
+
9
+ FILE = Path(__file__).resolve()
10
+ ROOT = FILE.parents[0] # YOLO root directory
11
+ if str(ROOT) not in sys.path:
12
+ sys.path.append(str(ROOT)) # add ROOT to PATH
13
+ # ROOT = ROOT.relative_to(Path.cwd()) # relative
14
+
15
+ import export
16
+ from models.experimental import attempt_load
17
+ from models.yolo import SegmentationModel
18
+ from segment.val import run as val_seg
19
+ from utils import notebook_init
20
+ from utils.general import LOGGER, check_yaml, file_size, print_args
21
+ from utils.torch_utils import select_device
22
+ from val import run as val_det
23
+
24
+
25
+ def run(
26
+ weights=ROOT / 'yolo.pt', # weights path
27
+ imgsz=640, # inference size (pixels)
28
+ batch_size=1, # batch size
29
+ data=ROOT / 'data/coco.yaml', # dataset.yaml path
30
+ device='', # cuda device, i.e. 0 or 0,1,2,3 or cpu
31
+ half=False, # use FP16 half-precision inference
32
+ test=False, # test exports only
33
+ pt_only=False, # test PyTorch only
34
+ hard_fail=False, # throw error on benchmark failure
35
+ ):
36
+ y, t = [], time.time()
37
+ device = select_device(device)
38
+ model_type = type(attempt_load(weights, fuse=False)) # DetectionModel, SegmentationModel, etc.
39
+ for i, (name, f, suffix, cpu, gpu) in export.export_formats().iterrows(): # index, (name, file, suffix, CPU, GPU)
40
+ try:
41
+ assert i not in (9, 10), 'inference not supported' # Edge TPU and TF.js are unsupported
42
+ assert i != 5 or platform.system() == 'Darwin', 'inference only supported on macOS>=10.13' # CoreML
43
+ if 'cpu' in device.type:
44
+ assert cpu, 'inference not supported on CPU'
45
+ if 'cuda' in device.type:
46
+ assert gpu, 'inference not supported on GPU'
47
+
48
+ # Export
49
+ if f == '-':
50
+ w = weights # PyTorch format
51
+ else:
52
+ w = export.run(weights=weights, imgsz=[imgsz], include=[f], device=device, half=half)[-1] # all others
53
+ assert suffix in str(w), 'export failed'
54
+
55
+ # Validate
56
+ if model_type == SegmentationModel:
57
+ result = val_seg(data, w, batch_size, imgsz, plots=False, device=device, task='speed', half=half)
58
+ metric = result[0][7] # (box(p, r, map50, map), mask(p, r, map50, map), *loss(box, obj, cls))
59
+ else: # DetectionModel:
60
+ result = val_det(data, w, batch_size, imgsz, plots=False, device=device, task='speed', half=half)
61
+ metric = result[0][3] # (p, r, map50, map, *loss(box, obj, cls))
62
+ speed = result[2][1] # times (preprocess, inference, postprocess)
63
+ y.append([name, round(file_size(w), 1), round(metric, 4), round(speed, 2)]) # MB, mAP, t_inference
64
+ except Exception as e:
65
+ if hard_fail:
66
+ assert type(e) is AssertionError, f'Benchmark --hard-fail for {name}: {e}'
67
+ LOGGER.warning(f'WARNING ⚠️ Benchmark failure for {name}: {e}')
68
+ y.append([name, None, None, None]) # mAP, t_inference
69
+ if pt_only and i == 0:
70
+ break # break after PyTorch
71
+
72
+ # Print results
73
+ LOGGER.info('\n')
74
+ parse_opt()
75
+ notebook_init() # print system info
76
+ c = ['Format', 'Size (MB)', 'mAP50-95', 'Inference time (ms)'] if map else ['Format', 'Export', '', '']
77
+ py = pd.DataFrame(y, columns=c)
78
+ LOGGER.info(f'\nBenchmarks complete ({time.time() - t:.2f}s)')
79
+ LOGGER.info(str(py if map else py.iloc[:, :2]))
80
+ if hard_fail and isinstance(hard_fail, str):
81
+ metrics = py['mAP50-95'].array # values to compare to floor
82
+ floor = eval(hard_fail) # minimum metric floor to pass
83
+ assert all(x > floor for x in metrics if pd.notna(x)), f'HARD FAIL: mAP50-95 < floor {floor}'
84
+ return py
85
+
86
+
87
+ def test(
88
+ weights=ROOT / 'yolo.pt', # weights path
89
+ imgsz=640, # inference size (pixels)
90
+ batch_size=1, # batch size
91
+ data=ROOT / 'data/coco128.yaml', # dataset.yaml path
92
+ device='', # cuda device, i.e. 0 or 0,1,2,3 or cpu
93
+ half=False, # use FP16 half-precision inference
94
+ test=False, # test exports only
95
+ pt_only=False, # test PyTorch only
96
+ hard_fail=False, # throw error on benchmark failure
97
+ ):
98
+ y, t = [], time.time()
99
+ device = select_device(device)
100
+ for i, (name, f, suffix, gpu) in export.export_formats().iterrows(): # index, (name, file, suffix, gpu-capable)
101
+ try:
102
+ w = weights if f == '-' else \
103
+ export.run(weights=weights, imgsz=[imgsz], include=[f], device=device, half=half)[-1] # weights
104
+ assert suffix in str(w), 'export failed'
105
+ y.append([name, True])
106
+ except Exception:
107
+ y.append([name, False]) # mAP, t_inference
108
+
109
+ # Print results
110
+ LOGGER.info('\n')
111
+ parse_opt()
112
+ notebook_init() # print system info
113
+ py = pd.DataFrame(y, columns=['Format', 'Export'])
114
+ LOGGER.info(f'\nExports complete ({time.time() - t:.2f}s)')
115
+ LOGGER.info(str(py))
116
+ return py
117
+
118
+
119
+ def parse_opt():
120
+ parser = argparse.ArgumentParser()
121
+ parser.add_argument('--weights', type=str, default=ROOT / 'yolo.pt', help='weights path')
122
+ parser.add_argument('--imgsz', '--img', '--img-size', type=int, default=640, help='inference size (pixels)')
123
+ parser.add_argument('--batch-size', type=int, default=1, help='batch size')
124
+ parser.add_argument('--data', type=str, default=ROOT / 'data/coco128.yaml', help='dataset.yaml path')
125
+ parser.add_argument('--device', default='', help='cuda device, i.e. 0 or 0,1,2,3 or cpu')
126
+ parser.add_argument('--half', action='store_true', help='use FP16 half-precision inference')
127
+ parser.add_argument('--test', action='store_true', help='test exports only')
128
+ parser.add_argument('--pt-only', action='store_true', help='test PyTorch only')
129
+ parser.add_argument('--hard-fail', nargs='?', const=True, default=False, help='Exception on error or < min metric')
130
+ opt = parser.parse_args()
131
+ opt.data = check_yaml(opt.data) # check YAML
132
+ print_args(vars(opt))
133
+ return opt
134
+
135
+
136
+ def main(opt):
137
+ test(**vars(opt)) if opt.test else run(**vars(opt))
138
+
139
+
140
+ if __name__ == "__main__":
141
+ opt = parse_opt()
142
+ main(opt)
export.py ADDED
@@ -0,0 +1,606 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import argparse
2
+ import contextlib
3
+ import json
4
+ import os
5
+ import platform
6
+ import re
7
+ import subprocess
8
+ import sys
9
+ import time
10
+ import warnings
11
+ from pathlib import Path
12
+
13
+ import pandas as pd
14
+ import torch
15
+ from torch.utils.mobile_optimizer import optimize_for_mobile
16
+
17
+ FILE = Path(__file__).resolve()
18
+ ROOT = FILE.parents[0] # YOLO root directory
19
+ if str(ROOT) not in sys.path:
20
+ sys.path.append(str(ROOT)) # add ROOT to PATH
21
+ if platform.system() != 'Windows':
22
+ ROOT = Path(os.path.relpath(ROOT, Path.cwd())) # relative
23
+
24
+ from models.experimental import attempt_load
25
+ from models.yolo import ClassificationModel, Detect, DetectionModel, SegmentationModel
26
+ from utils.dataloaders import LoadImages
27
+ from utils.general import (LOGGER, Profile, check_dataset, check_img_size, check_requirements, check_version,
28
+ check_yaml, colorstr, file_size, get_default_args, print_args, url2file, yaml_save)
29
+ from utils.torch_utils import select_device, smart_inference_mode
30
+
31
+ MACOS = platform.system() == 'Darwin' # macOS environment
32
+
33
+
34
+ def export_formats():
35
+ # YOLO export formats
36
+ x = [
37
+ ['PyTorch', '-', '.pt', True, True],
38
+ ['TorchScript', 'torchscript', '.torchscript', True, True],
39
+ ['ONNX', 'onnx', '.onnx', True, True],
40
+ ['OpenVINO', 'openvino', '_openvino_model', True, False],
41
+ ['TensorRT', 'engine', '.engine', False, True],
42
+ ['CoreML', 'coreml', '.mlmodel', True, False],
43
+ ['TensorFlow SavedModel', 'saved_model', '_saved_model', True, True],
44
+ ['TensorFlow GraphDef', 'pb', '.pb', True, True],
45
+ ['TensorFlow Lite', 'tflite', '.tflite', True, False],
46
+ ['TensorFlow Edge TPU', 'edgetpu', '_edgetpu.tflite', False, False],
47
+ ['TensorFlow.js', 'tfjs', '_web_model', False, False],
48
+ ['PaddlePaddle', 'paddle', '_paddle_model', True, True],]
49
+ return pd.DataFrame(x, columns=['Format', 'Argument', 'Suffix', 'CPU', 'GPU'])
50
+
51
+
52
+ def try_export(inner_func):
53
+ # YOLO export decorator, i..e @try_export
54
+ inner_args = get_default_args(inner_func)
55
+
56
+ def outer_func(*args, **kwargs):
57
+ prefix = inner_args['prefix']
58
+ try:
59
+ with Profile() as dt:
60
+ f, model = inner_func(*args, **kwargs)
61
+ LOGGER.info(f'{prefix} export success ✅ {dt.t:.1f}s, saved as {f} ({file_size(f):.1f} MB)')
62
+ return f, model
63
+ except Exception as e:
64
+ LOGGER.info(f'{prefix} export failure ❌ {dt.t:.1f}s: {e}')
65
+ return None, None
66
+
67
+ return outer_func
68
+
69
+
70
+ @try_export
71
+ def export_torchscript(model, im, file, optimize, prefix=colorstr('TorchScript:')):
72
+ # YOLO TorchScript model export
73
+ LOGGER.info(f'\n{prefix} starting export with torch {torch.__version__}...')
74
+ f = file.with_suffix('.torchscript')
75
+
76
+ ts = torch.jit.trace(model, im, strict=False)
77
+ d = {"shape": im.shape, "stride": int(max(model.stride)), "names": model.names}
78
+ extra_files = {'config.txt': json.dumps(d)} # torch._C.ExtraFilesMap()
79
+ if optimize: # https://pytorch.org/tutorials/recipes/mobile_interpreter.html
80
+ optimize_for_mobile(ts)._save_for_lite_interpreter(str(f), _extra_files=extra_files)
81
+ else:
82
+ ts.save(str(f), _extra_files=extra_files)
83
+ return f, None
84
+
85
+
86
+ @try_export
87
+ def export_onnx(model, im, file, opset, dynamic, simplify, prefix=colorstr('ONNX:')):
88
+ # YOLO ONNX export
89
+ check_requirements('onnx')
90
+ import onnx
91
+
92
+ LOGGER.info(f'\n{prefix} starting export with onnx {onnx.__version__}...')
93
+ f = file.with_suffix('.onnx')
94
+
95
+ output_names = ['output0', 'output1'] if isinstance(model, SegmentationModel) else ['output0']
96
+ if dynamic:
97
+ dynamic = {'images': {0: 'batch', 2: 'height', 3: 'width'}} # shape(1,3,640,640)
98
+ if isinstance(model, SegmentationModel):
99
+ dynamic['output0'] = {0: 'batch', 1: 'anchors'} # shape(1,25200,85)
100
+ dynamic['output1'] = {0: 'batch', 2: 'mask_height', 3: 'mask_width'} # shape(1,32,160,160)
101
+ elif isinstance(model, DetectionModel):
102
+ dynamic['output0'] = {0: 'batch', 1: 'anchors'} # shape(1,25200,85)
103
+
104
+ torch.onnx.export(
105
+ model.cpu() if dynamic else model, # --dynamic only compatible with cpu
106
+ im.cpu() if dynamic else im,
107
+ f,
108
+ verbose=False,
109
+ opset_version=opset,
110
+ do_constant_folding=True,
111
+ input_names=['images'],
112
+ output_names=output_names,
113
+ dynamic_axes=dynamic or None)
114
+
115
+ # Checks
116
+ model_onnx = onnx.load(f) # load onnx model
117
+ onnx.checker.check_model(model_onnx) # check onnx model
118
+
119
+ # Metadata
120
+ d = {'stride': int(max(model.stride)), 'names': model.names}
121
+ for k, v in d.items():
122
+ meta = model_onnx.metadata_props.add()
123
+ meta.key, meta.value = k, str(v)
124
+ onnx.save(model_onnx, f)
125
+
126
+ # Simplify
127
+ if simplify:
128
+ try:
129
+ cuda = torch.cuda.is_available()
130
+ check_requirements(('onnxruntime-gpu' if cuda else 'onnxruntime', 'onnx-simplifier>=0.4.1'))
131
+ import onnxsim
132
+
133
+ LOGGER.info(f'{prefix} simplifying with onnx-simplifier {onnxsim.__version__}...')
134
+ model_onnx, check = onnxsim.simplify(model_onnx)
135
+ assert check, 'assert check failed'
136
+ onnx.save(model_onnx, f)
137
+ except Exception as e:
138
+ LOGGER.info(f'{prefix} simplifier failure: {e}')
139
+ return f, model_onnx
140
+
141
+
142
+ @try_export
143
+ def export_openvino(file, metadata, half, prefix=colorstr('OpenVINO:')):
144
+ # YOLO OpenVINO export
145
+ check_requirements('openvino-dev') # requires openvino-dev: https://pypi.org/project/openvino-dev/
146
+ import openvino.inference_engine as ie
147
+
148
+ LOGGER.info(f'\n{prefix} starting export with openvino {ie.__version__}...')
149
+ f = str(file).replace('.pt', f'_openvino_model{os.sep}')
150
+
151
+ cmd = f"mo --input_model {file.with_suffix('.onnx')} --output_dir {f} --data_type {'FP16' if half else 'FP32'}"
152
+ subprocess.run(cmd.split(), check=True, env=os.environ) # export
153
+ yaml_save(Path(f) / file.with_suffix('.yaml').name, metadata) # add metadata.yaml
154
+ return f, None
155
+
156
+
157
+ @try_export
158
+ def export_paddle(model, im, file, metadata, prefix=colorstr('PaddlePaddle:')):
159
+ # YOLO Paddle export
160
+ check_requirements(('paddlepaddle', 'x2paddle'))
161
+ import x2paddle
162
+ from x2paddle.convert import pytorch2paddle
163
+
164
+ LOGGER.info(f'\n{prefix} starting export with X2Paddle {x2paddle.__version__}...')
165
+ f = str(file).replace('.pt', f'_paddle_model{os.sep}')
166
+
167
+ pytorch2paddle(module=model, save_dir=f, jit_type='trace', input_examples=[im]) # export
168
+ yaml_save(Path(f) / file.with_suffix('.yaml').name, metadata) # add metadata.yaml
169
+ return f, None
170
+
171
+
172
+ @try_export
173
+ def export_coreml(model, im, file, int8, half, prefix=colorstr('CoreML:')):
174
+ # YOLO CoreML export
175
+ check_requirements('coremltools')
176
+ import coremltools as ct
177
+
178
+ LOGGER.info(f'\n{prefix} starting export with coremltools {ct.__version__}...')
179
+ f = file.with_suffix('.mlmodel')
180
+
181
+ ts = torch.jit.trace(model, im, strict=False) # TorchScript model
182
+ ct_model = ct.convert(ts, inputs=[ct.ImageType('image', shape=im.shape, scale=1 / 255, bias=[0, 0, 0])])
183
+ bits, mode = (8, 'kmeans_lut') if int8 else (16, 'linear') if half else (32, None)
184
+ if bits < 32:
185
+ if MACOS: # quantization only supported on macOS
186
+ with warnings.catch_warnings():
187
+ warnings.filterwarnings("ignore", category=DeprecationWarning) # suppress numpy==1.20 float warning
188
+ ct_model = ct.models.neural_network.quantization_utils.quantize_weights(ct_model, bits, mode)
189
+ else:
190
+ print(f'{prefix} quantization only supported on macOS, skipping...')
191
+ ct_model.save(f)
192
+ return f, ct_model
193
+
194
+
195
+ @try_export
196
+ def export_engine(model, im, file, half, dynamic, simplify, workspace=4, verbose=False, prefix=colorstr('TensorRT:')):
197
+ # YOLO TensorRT export https://developer.nvidia.com/tensorrt
198
+ assert im.device.type != 'cpu', 'export running on CPU but must be on GPU, i.e. `python export.py --device 0`'
199
+ try:
200
+ import tensorrt as trt
201
+ except Exception:
202
+ if platform.system() == 'Linux':
203
+ check_requirements('nvidia-tensorrt', cmds='-U --index-url https://pypi.ngc.nvidia.com')
204
+ import tensorrt as trt
205
+
206
+ if trt.__version__[0] == '7': # TensorRT 7 handling https://github.com/ultralytics/yolov5/issues/6012
207
+ grid = model.model[-1].anchor_grid
208
+ model.model[-1].anchor_grid = [a[..., :1, :1, :] for a in grid]
209
+ export_onnx(model, im, file, 12, dynamic, simplify) # opset 12
210
+ model.model[-1].anchor_grid = grid
211
+ else: # TensorRT >= 8
212
+ check_version(trt.__version__, '8.0.0', hard=True) # require tensorrt>=8.0.0
213
+ export_onnx(model, im, file, 12, dynamic, simplify) # opset 12
214
+ onnx = file.with_suffix('.onnx')
215
+
216
+ LOGGER.info(f'\n{prefix} starting export with TensorRT {trt.__version__}...')
217
+ assert onnx.exists(), f'failed to export ONNX file: {onnx}'
218
+ f = file.with_suffix('.engine') # TensorRT engine file
219
+ logger = trt.Logger(trt.Logger.INFO)
220
+ if verbose:
221
+ logger.min_severity = trt.Logger.Severity.VERBOSE
222
+
223
+ builder = trt.Builder(logger)
224
+ config = builder.create_builder_config()
225
+ config.max_workspace_size = workspace * 1 << 30
226
+ # config.set_memory_pool_limit(trt.MemoryPoolType.WORKSPACE, workspace << 30) # fix TRT 8.4 deprecation notice
227
+
228
+ flag = (1 << int(trt.NetworkDefinitionCreationFlag.EXPLICIT_BATCH))
229
+ network = builder.create_network(flag)
230
+ parser = trt.OnnxParser(network, logger)
231
+ if not parser.parse_from_file(str(onnx)):
232
+ raise RuntimeError(f'failed to load ONNX file: {onnx}')
233
+
234
+ inputs = [network.get_input(i) for i in range(network.num_inputs)]
235
+ outputs = [network.get_output(i) for i in range(network.num_outputs)]
236
+ for inp in inputs:
237
+ LOGGER.info(f'{prefix} input "{inp.name}" with shape{inp.shape} {inp.dtype}')
238
+ for out in outputs:
239
+ LOGGER.info(f'{prefix} output "{out.name}" with shape{out.shape} {out.dtype}')
240
+
241
+ if dynamic:
242
+ if im.shape[0] <= 1:
243
+ LOGGER.warning(f"{prefix} WARNING ⚠️ --dynamic model requires maximum --batch-size argument")
244
+ profile = builder.create_optimization_profile()
245
+ for inp in inputs:
246
+ profile.set_shape(inp.name, (1, *im.shape[1:]), (max(1, im.shape[0] // 2), *im.shape[1:]), im.shape)
247
+ config.add_optimization_profile(profile)
248
+
249
+ LOGGER.info(f'{prefix} building FP{16 if builder.platform_has_fast_fp16 and half else 32} engine as {f}')
250
+ if builder.platform_has_fast_fp16 and half:
251
+ config.set_flag(trt.BuilderFlag.FP16)
252
+ with builder.build_engine(network, config) as engine, open(f, 'wb') as t:
253
+ t.write(engine.serialize())
254
+ return f, None
255
+
256
+
257
+ @try_export
258
+ def export_saved_model(model,
259
+ im,
260
+ file,
261
+ dynamic,
262
+ tf_nms=False,
263
+ agnostic_nms=False,
264
+ topk_per_class=100,
265
+ topk_all=100,
266
+ iou_thres=0.45,
267
+ conf_thres=0.25,
268
+ keras=False,
269
+ prefix=colorstr('TensorFlow SavedModel:')):
270
+ # YOLO TensorFlow SavedModel export
271
+ try:
272
+ import tensorflow as tf
273
+ except Exception:
274
+ check_requirements(f"tensorflow{'' if torch.cuda.is_available() else '-macos' if MACOS else '-cpu'}")
275
+ import tensorflow as tf
276
+ from tensorflow.python.framework.convert_to_constants import convert_variables_to_constants_v2
277
+
278
+ from models.tf import TFModel
279
+
280
+ LOGGER.info(f'\n{prefix} starting export with tensorflow {tf.__version__}...')
281
+ f = str(file).replace('.pt', '_saved_model')
282
+ batch_size, ch, *imgsz = list(im.shape) # BCHW
283
+
284
+ tf_model = TFModel(cfg=model.yaml, model=model, nc=model.nc, imgsz=imgsz)
285
+ im = tf.zeros((batch_size, *imgsz, ch)) # BHWC order for TensorFlow
286
+ _ = tf_model.predict(im, tf_nms, agnostic_nms, topk_per_class, topk_all, iou_thres, conf_thres)
287
+ inputs = tf.keras.Input(shape=(*imgsz, ch), batch_size=None if dynamic else batch_size)
288
+ outputs = tf_model.predict(inputs, tf_nms, agnostic_nms, topk_per_class, topk_all, iou_thres, conf_thres)
289
+ keras_model = tf.keras.Model(inputs=inputs, outputs=outputs)
290
+ keras_model.trainable = False
291
+ keras_model.summary()
292
+ if keras:
293
+ keras_model.save(f, save_format='tf')
294
+ else:
295
+ spec = tf.TensorSpec(keras_model.inputs[0].shape, keras_model.inputs[0].dtype)
296
+ m = tf.function(lambda x: keras_model(x)) # full model
297
+ m = m.get_concrete_function(spec)
298
+ frozen_func = convert_variables_to_constants_v2(m)
299
+ tfm = tf.Module()
300
+ tfm.__call__ = tf.function(lambda x: frozen_func(x)[:4] if tf_nms else frozen_func(x), [spec])
301
+ tfm.__call__(im)
302
+ tf.saved_model.save(tfm,
303
+ f,
304
+ options=tf.saved_model.SaveOptions(experimental_custom_gradients=False) if check_version(
305
+ tf.__version__, '2.6') else tf.saved_model.SaveOptions())
306
+ return f, keras_model
307
+
308
+
309
+ @try_export
310
+ def export_pb(keras_model, file, prefix=colorstr('TensorFlow GraphDef:')):
311
+ # YOLO TensorFlow GraphDef *.pb export https://github.com/leimao/Frozen_Graph_TensorFlow
312
+ import tensorflow as tf
313
+ from tensorflow.python.framework.convert_to_constants import convert_variables_to_constants_v2
314
+
315
+ LOGGER.info(f'\n{prefix} starting export with tensorflow {tf.__version__}...')
316
+ f = file.with_suffix('.pb')
317
+
318
+ m = tf.function(lambda x: keras_model(x)) # full model
319
+ m = m.get_concrete_function(tf.TensorSpec(keras_model.inputs[0].shape, keras_model.inputs[0].dtype))
320
+ frozen_func = convert_variables_to_constants_v2(m)
321
+ frozen_func.graph.as_graph_def()
322
+ tf.io.write_graph(graph_or_graph_def=frozen_func.graph, logdir=str(f.parent), name=f.name, as_text=False)
323
+ return f, None
324
+
325
+
326
+ @try_export
327
+ def export_tflite(keras_model, im, file, int8, data, nms, agnostic_nms, prefix=colorstr('TensorFlow Lite:')):
328
+ # YOLOv5 TensorFlow Lite export
329
+ import tensorflow as tf
330
+
331
+ LOGGER.info(f'\n{prefix} starting export with tensorflow {tf.__version__}...')
332
+ batch_size, ch, *imgsz = list(im.shape) # BCHW
333
+ f = str(file).replace('.pt', '-fp16.tflite')
334
+
335
+ converter = tf.lite.TFLiteConverter.from_keras_model(keras_model)
336
+ converter.target_spec.supported_ops = [tf.lite.OpsSet.TFLITE_BUILTINS]
337
+ converter.target_spec.supported_types = [tf.float16]
338
+ converter.optimizations = [tf.lite.Optimize.DEFAULT]
339
+ if int8:
340
+ from models.tf import representative_dataset_gen
341
+ dataset = LoadImages(check_dataset(check_yaml(data))['train'], img_size=imgsz, auto=False)
342
+ converter.representative_dataset = lambda: representative_dataset_gen(dataset, ncalib=100)
343
+ converter.target_spec.supported_ops = [tf.lite.OpsSet.TFLITE_BUILTINS_INT8]
344
+ converter.target_spec.supported_types = []
345
+ converter.inference_input_type = tf.uint8 # or tf.int8
346
+ converter.inference_output_type = tf.uint8 # or tf.int8
347
+ converter.experimental_new_quantizer = True
348
+ f = str(file).replace('.pt', '-int8.tflite')
349
+ if nms or agnostic_nms:
350
+ converter.target_spec.supported_ops.append(tf.lite.OpsSet.SELECT_TF_OPS)
351
+
352
+ tflite_model = converter.convert()
353
+ open(f, "wb").write(tflite_model)
354
+ return f, None
355
+
356
+
357
+ @try_export
358
+ def export_edgetpu(file, prefix=colorstr('Edge TPU:')):
359
+ # YOLO Edge TPU export https://coral.ai/docs/edgetpu/models-intro/
360
+ cmd = 'edgetpu_compiler --version'
361
+ help_url = 'https://coral.ai/docs/edgetpu/compiler/'
362
+ assert platform.system() == 'Linux', f'export only supported on Linux. See {help_url}'
363
+ if subprocess.run(f'{cmd} >/dev/null', shell=True).returncode != 0:
364
+ LOGGER.info(f'\n{prefix} export requires Edge TPU compiler. Attempting install from {help_url}')
365
+ sudo = subprocess.run('sudo --version >/dev/null', shell=True).returncode == 0 # sudo installed on system
366
+ for c in (
367
+ 'curl https://packages.cloud.google.com/apt/doc/apt-key.gpg | sudo apt-key add -',
368
+ 'echo "deb https://packages.cloud.google.com/apt coral-edgetpu-stable main" | sudo tee /etc/apt/sources.list.d/coral-edgetpu.list',
369
+ 'sudo apt-get update', 'sudo apt-get install edgetpu-compiler'):
370
+ subprocess.run(c if sudo else c.replace('sudo ', ''), shell=True, check=True)
371
+ ver = subprocess.run(cmd, shell=True, capture_output=True, check=True).stdout.decode().split()[-1]
372
+
373
+ LOGGER.info(f'\n{prefix} starting export with Edge TPU compiler {ver}...')
374
+ f = str(file).replace('.pt', '-int8_edgetpu.tflite') # Edge TPU model
375
+ f_tfl = str(file).replace('.pt', '-int8.tflite') # TFLite model
376
+
377
+ cmd = f"edgetpu_compiler -s -d -k 10 --out_dir {file.parent} {f_tfl}"
378
+ subprocess.run(cmd.split(), check=True)
379
+ return f, None
380
+
381
+
382
+ @try_export
383
+ def export_tfjs(file, prefix=colorstr('TensorFlow.js:')):
384
+ # YOLO TensorFlow.js export
385
+ check_requirements('tensorflowjs')
386
+ import tensorflowjs as tfjs
387
+
388
+ LOGGER.info(f'\n{prefix} starting export with tensorflowjs {tfjs.__version__}...')
389
+ f = str(file).replace('.pt', '_web_model') # js dir
390
+ f_pb = file.with_suffix('.pb') # *.pb path
391
+ f_json = f'{f}/model.json' # *.json path
392
+
393
+ cmd = f'tensorflowjs_converter --input_format=tf_frozen_model ' \
394
+ f'--output_node_names=Identity,Identity_1,Identity_2,Identity_3 {f_pb} {f}'
395
+ subprocess.run(cmd.split())
396
+
397
+ json = Path(f_json).read_text()
398
+ with open(f_json, 'w') as j: # sort JSON Identity_* in ascending order
399
+ subst = re.sub(
400
+ r'{"outputs": {"Identity.?.?": {"name": "Identity.?.?"}, '
401
+ r'"Identity.?.?": {"name": "Identity.?.?"}, '
402
+ r'"Identity.?.?": {"name": "Identity.?.?"}, '
403
+ r'"Identity.?.?": {"name": "Identity.?.?"}}}', r'{"outputs": {"Identity": {"name": "Identity"}, '
404
+ r'"Identity_1": {"name": "Identity_1"}, '
405
+ r'"Identity_2": {"name": "Identity_2"}, '
406
+ r'"Identity_3": {"name": "Identity_3"}}}', json)
407
+ j.write(subst)
408
+ return f, None
409
+
410
+
411
+ def add_tflite_metadata(file, metadata, num_outputs):
412
+ # Add metadata to *.tflite models per https://www.tensorflow.org/lite/models/convert/metadata
413
+ with contextlib.suppress(ImportError):
414
+ # check_requirements('tflite_support')
415
+ from tflite_support import flatbuffers
416
+ from tflite_support import metadata as _metadata
417
+ from tflite_support import metadata_schema_py_generated as _metadata_fb
418
+
419
+ tmp_file = Path('/tmp/meta.txt')
420
+ with open(tmp_file, 'w') as meta_f:
421
+ meta_f.write(str(metadata))
422
+
423
+ model_meta = _metadata_fb.ModelMetadataT()
424
+ label_file = _metadata_fb.AssociatedFileT()
425
+ label_file.name = tmp_file.name
426
+ model_meta.associatedFiles = [label_file]
427
+
428
+ subgraph = _metadata_fb.SubGraphMetadataT()
429
+ subgraph.inputTensorMetadata = [_metadata_fb.TensorMetadataT()]
430
+ subgraph.outputTensorMetadata = [_metadata_fb.TensorMetadataT()] * num_outputs
431
+ model_meta.subgraphMetadata = [subgraph]
432
+
433
+ b = flatbuffers.Builder(0)
434
+ b.Finish(model_meta.Pack(b), _metadata.MetadataPopulator.METADATA_FILE_IDENTIFIER)
435
+ metadata_buf = b.Output()
436
+
437
+ populator = _metadata.MetadataPopulator.with_model_file(file)
438
+ populator.load_metadata_buffer(metadata_buf)
439
+ populator.load_associated_files([str(tmp_file)])
440
+ populator.populate()
441
+ tmp_file.unlink()
442
+
443
+
444
+ @smart_inference_mode()
445
+ def run(
446
+ data=ROOT / 'data/coco.yaml', # 'dataset.yaml path'
447
+ weights=ROOT / 'yolo.pt', # weights path
448
+ imgsz=(640, 640), # image (height, width)
449
+ batch_size=1, # batch size
450
+ device='cpu', # cuda device, i.e. 0 or 0,1,2,3 or cpu
451
+ include=('torchscript', 'onnx'), # include formats
452
+ half=False, # FP16 half-precision export
453
+ inplace=False, # set YOLO Detect() inplace=True
454
+ keras=False, # use Keras
455
+ optimize=False, # TorchScript: optimize for mobile
456
+ int8=False, # CoreML/TF INT8 quantization
457
+ dynamic=False, # ONNX/TF/TensorRT: dynamic axes
458
+ simplify=False, # ONNX: simplify model
459
+ opset=12, # ONNX: opset version
460
+ verbose=False, # TensorRT: verbose log
461
+ workspace=4, # TensorRT: workspace size (GB)
462
+ nms=False, # TF: add NMS to model
463
+ agnostic_nms=False, # TF: add agnostic NMS to model
464
+ topk_per_class=100, # TF.js NMS: topk per class to keep
465
+ topk_all=100, # TF.js NMS: topk for all classes to keep
466
+ iou_thres=0.45, # TF.js NMS: IoU threshold
467
+ conf_thres=0.25, # TF.js NMS: confidence threshold
468
+ ):
469
+ t = time.time()
470
+ include = [x.lower() for x in include] # to lowercase
471
+ fmts = tuple(export_formats()['Argument'][1:]) # --include arguments
472
+ flags = [x in include for x in fmts]
473
+ assert sum(flags) == len(include), f'ERROR: Invalid --include {include}, valid --include arguments are {fmts}'
474
+ jit, onnx, xml, engine, coreml, saved_model, pb, tflite, edgetpu, tfjs, paddle = flags # export booleans
475
+ file = Path(url2file(weights) if str(weights).startswith(('http:/', 'https:/')) else weights) # PyTorch weights
476
+
477
+ # Load PyTorch model
478
+ device = select_device(device)
479
+ if half:
480
+ assert device.type != 'cpu' or coreml, '--half only compatible with GPU export, i.e. use --device 0'
481
+ assert not dynamic, '--half not compatible with --dynamic, i.e. use either --half or --dynamic but not both'
482
+ model = attempt_load(weights, device=device, inplace=True, fuse=True) # load FP32 model
483
+
484
+ # Checks
485
+ imgsz *= 2 if len(imgsz) == 1 else 1 # expand
486
+ if optimize:
487
+ assert device.type == 'cpu', '--optimize not compatible with cuda devices, i.e. use --device cpu'
488
+
489
+ # Input
490
+ gs = int(max(model.stride)) # grid size (max stride)
491
+ imgsz = [check_img_size(x, gs) for x in imgsz] # verify img_size are gs-multiples
492
+ im = torch.zeros(batch_size, 3, *imgsz).to(device) # image size(1,3,320,192) BCHW iDetection
493
+
494
+ # Update model
495
+ model.eval()
496
+ for k, m in model.named_modules():
497
+ if isinstance(m, (Detect, V6Detect)):
498
+ m.inplace = inplace
499
+ m.dynamic = dynamic
500
+ m.export = True
501
+
502
+ for _ in range(2):
503
+ y = model(im) # dry runs
504
+ if half and not coreml:
505
+ im, model = im.half(), model.half() # to FP16
506
+ shape = tuple((y[0] if isinstance(y, tuple) else y).shape) # model output shape
507
+ metadata = {'stride': int(max(model.stride)), 'names': model.names} # model metadata
508
+ LOGGER.info(f"\n{colorstr('PyTorch:')} starting from {file} with output shape {shape} ({file_size(file):.1f} MB)")
509
+
510
+ # Exports
511
+ f = [''] * len(fmts) # exported filenames
512
+ warnings.filterwarnings(action='ignore', category=torch.jit.TracerWarning) # suppress TracerWarning
513
+ if jit: # TorchScript
514
+ f[0], _ = export_torchscript(model, im, file, optimize)
515
+ if engine: # TensorRT required before ONNX
516
+ f[1], _ = export_engine(model, im, file, half, dynamic, simplify, workspace, verbose)
517
+ if onnx or xml: # OpenVINO requires ONNX
518
+ f[2], _ = export_onnx(model, im, file, opset, dynamic, simplify)
519
+ if xml: # OpenVINO
520
+ f[3], _ = export_openvino(file, metadata, half)
521
+ if coreml: # CoreML
522
+ f[4], _ = export_coreml(model, im, file, int8, half)
523
+ if any((saved_model, pb, tflite, edgetpu, tfjs)): # TensorFlow formats
524
+ assert not tflite or not tfjs, 'TFLite and TF.js models must be exported separately, please pass only one type.'
525
+ assert not isinstance(model, ClassificationModel), 'ClassificationModel export to TF formats not yet supported.'
526
+ f[5], s_model = export_saved_model(model.cpu(),
527
+ im,
528
+ file,
529
+ dynamic,
530
+ tf_nms=nms or agnostic_nms or tfjs,
531
+ agnostic_nms=agnostic_nms or tfjs,
532
+ topk_per_class=topk_per_class,
533
+ topk_all=topk_all,
534
+ iou_thres=iou_thres,
535
+ conf_thres=conf_thres,
536
+ keras=keras)
537
+ if pb or tfjs: # pb prerequisite to tfjs
538
+ f[6], _ = export_pb(s_model, file)
539
+ if tflite or edgetpu:
540
+ f[7], _ = export_tflite(s_model, im, file, int8 or edgetpu, data=data, nms=nms, agnostic_nms=agnostic_nms)
541
+ if edgetpu:
542
+ f[8], _ = export_edgetpu(file)
543
+ add_tflite_metadata(f[8] or f[7], metadata, num_outputs=len(s_model.outputs))
544
+ if tfjs:
545
+ f[9], _ = export_tfjs(file)
546
+ if paddle: # PaddlePaddle
547
+ f[10], _ = export_paddle(model, im, file, metadata)
548
+
549
+ # Finish
550
+ f = [str(x) for x in f if x] # filter out '' and None
551
+ if any(f):
552
+ cls, det, seg = (isinstance(model, x) for x in (ClassificationModel, DetectionModel, SegmentationModel)) # type
553
+ dir = Path('segment' if seg else 'classify' if cls else '')
554
+ h = '--half' if half else '' # --half FP16 inference arg
555
+ s = "# WARNING ⚠️ ClassificationModel not yet supported for PyTorch Hub AutoShape inference" if cls else \
556
+ "# WARNING ⚠️ SegmentationModel not yet supported for PyTorch Hub AutoShape inference" if seg else ''
557
+ LOGGER.info(f'\nExport complete ({time.time() - t:.1f}s)'
558
+ f"\nResults saved to {colorstr('bold', file.parent.resolve())}"
559
+ f"\nDetect: python {dir / ('detect.py' if det else 'predict.py')} --weights {f[-1]} {h}"
560
+ f"\nValidate: python {dir / 'val.py'} --weights {f[-1]} {h}"
561
+ f"\nPyTorch Hub: model = torch.hub.load('ultralytics/yolov5', 'custom', '{f[-1]}') {s}"
562
+ f"\nVisualize: https://netron.app")
563
+ return f # return list of exported files/dirs
564
+
565
+
566
+ def parse_opt():
567
+ parser = argparse.ArgumentParser()
568
+ parser.add_argument('--data', type=str, default=ROOT / 'data/coco.yaml', help='dataset.yaml path')
569
+ parser.add_argument('--weights', nargs='+', type=str, default=ROOT / 'yolo.pt', help='model.pt path(s)')
570
+ parser.add_argument('--imgsz', '--img', '--img-size', nargs='+', type=int, default=[640, 640], help='image (h, w)')
571
+ parser.add_argument('--batch-size', type=int, default=1, help='batch size')
572
+ parser.add_argument('--device', default='cpu', help='cuda device, i.e. 0 or 0,1,2,3 or cpu')
573
+ parser.add_argument('--half', action='store_true', help='FP16 half-precision export')
574
+ parser.add_argument('--inplace', action='store_true', help='set YOLO Detect() inplace=True')
575
+ parser.add_argument('--keras', action='store_true', help='TF: use Keras')
576
+ parser.add_argument('--optimize', action='store_true', help='TorchScript: optimize for mobile')
577
+ parser.add_argument('--int8', action='store_true', help='CoreML/TF INT8 quantization')
578
+ parser.add_argument('--dynamic', action='store_true', help='ONNX/TF/TensorRT: dynamic axes')
579
+ parser.add_argument('--simplify', action='store_true', help='ONNX: simplify model')
580
+ parser.add_argument('--opset', type=int, default=12, help='ONNX: opset version')
581
+ parser.add_argument('--verbose', action='store_true', help='TensorRT: verbose log')
582
+ parser.add_argument('--workspace', type=int, default=4, help='TensorRT: workspace size (GB)')
583
+ parser.add_argument('--nms', action='store_true', help='TF: add NMS to model')
584
+ parser.add_argument('--agnostic-nms', action='store_true', help='TF: add agnostic NMS to model')
585
+ parser.add_argument('--topk-per-class', type=int, default=100, help='TF.js NMS: topk per class to keep')
586
+ parser.add_argument('--topk-all', type=int, default=100, help='TF.js NMS: topk for all classes to keep')
587
+ parser.add_argument('--iou-thres', type=float, default=0.45, help='TF.js NMS: IoU threshold')
588
+ parser.add_argument('--conf-thres', type=float, default=0.25, help='TF.js NMS: confidence threshold')
589
+ parser.add_argument(
590
+ '--include',
591
+ nargs='+',
592
+ default=['torchscript'],
593
+ help='torchscript, onnx, openvino, engine, coreml, saved_model, pb, tflite, edgetpu, tfjs, paddle')
594
+ opt = parser.parse_args()
595
+ print_args(vars(opt))
596
+ return opt
597
+
598
+
599
+ def main(opt):
600
+ for opt.weights in (opt.weights if isinstance(opt.weights, list) else [opt.weights]):
601
+ run(**vars(opt))
602
+
603
+
604
+ if __name__ == "__main__":
605
+ opt = parse_opt()
606
+ main(opt)
hubconf.py ADDED
@@ -0,0 +1,107 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import torch
2
+
3
+
4
+ def _create(name, pretrained=True, channels=3, classes=80, autoshape=True, verbose=True, device=None):
5
+ """Creates or loads a YOLO model
6
+
7
+ Arguments:
8
+ name (str): model name 'yolov3' or path 'path/to/best.pt'
9
+ pretrained (bool): load pretrained weights into the model
10
+ channels (int): number of input channels
11
+ classes (int): number of model classes
12
+ autoshape (bool): apply YOLO .autoshape() wrapper to model
13
+ verbose (bool): print all information to screen
14
+ device (str, torch.device, None): device to use for model parameters
15
+
16
+ Returns:
17
+ YOLO model
18
+ """
19
+ from pathlib import Path
20
+
21
+ from models.common import AutoShape, DetectMultiBackend
22
+ from models.experimental import attempt_load
23
+ from models.yolo import ClassificationModel, DetectionModel, SegmentationModel
24
+ from utils.downloads import attempt_download
25
+ from utils.general import LOGGER, check_requirements, intersect_dicts, logging
26
+ from utils.torch_utils import select_device
27
+
28
+ if not verbose:
29
+ LOGGER.setLevel(logging.WARNING)
30
+ check_requirements(exclude=('opencv-python', 'tensorboard', 'thop'))
31
+ name = Path(name)
32
+ path = name.with_suffix('.pt') if name.suffix == '' and not name.is_dir() else name # checkpoint path
33
+ try:
34
+ device = select_device(device)
35
+ if pretrained and channels == 3 and classes == 80:
36
+ try:
37
+ model = DetectMultiBackend(path, device=device, fuse=autoshape) # detection model
38
+ if autoshape:
39
+ if model.pt and isinstance(model.model, ClassificationModel):
40
+ LOGGER.warning('WARNING ⚠️ YOLO ClassificationModel is not yet AutoShape compatible. '
41
+ 'You must pass torch tensors in BCHW to this model, i.e. shape(1,3,224,224).')
42
+ elif model.pt and isinstance(model.model, SegmentationModel):
43
+ LOGGER.warning('WARNING ⚠️ YOLO SegmentationModel is not yet AutoShape compatible. '
44
+ 'You will not be able to run inference with this model.')
45
+ else:
46
+ model = AutoShape(model) # for file/URI/PIL/cv2/np inputs and NMS
47
+ except Exception:
48
+ model = attempt_load(path, device=device, fuse=False) # arbitrary model
49
+ else:
50
+ cfg = list((Path(__file__).parent / 'models').rglob(f'{path.stem}.yaml'))[0] # model.yaml path
51
+ model = DetectionModel(cfg, channels, classes) # create model
52
+ if pretrained:
53
+ ckpt = torch.load(attempt_download(path), map_location=device) # load
54
+ csd = ckpt['model'].float().state_dict() # checkpoint state_dict as FP32
55
+ csd = intersect_dicts(csd, model.state_dict(), exclude=['anchors']) # intersect
56
+ model.load_state_dict(csd, strict=False) # load
57
+ if len(ckpt['model'].names) == classes:
58
+ model.names = ckpt['model'].names # set class names attribute
59
+ if not verbose:
60
+ LOGGER.setLevel(logging.INFO) # reset to default
61
+ return model.to(device)
62
+
63
+ except Exception as e:
64
+ help_url = 'https://github.com/ultralytics/yolov5/issues/36'
65
+ s = f'{e}. Cache may be out of date, try `force_reload=True` or see {help_url} for help.'
66
+ raise Exception(s) from e
67
+
68
+
69
+ def custom(path='path/to/model.pt', autoshape=True, _verbose=True, device=None):
70
+ # YOLO custom or local model
71
+ return _create(path, autoshape=autoshape, verbose=_verbose, device=device)
72
+
73
+
74
+ if __name__ == '__main__':
75
+ import argparse
76
+ from pathlib import Path
77
+
78
+ import numpy as np
79
+ from PIL import Image
80
+
81
+ from utils.general import cv2, print_args
82
+
83
+ # Argparser
84
+ parser = argparse.ArgumentParser()
85
+ parser.add_argument('--model', type=str, default='yolo', help='model name')
86
+ opt = parser.parse_args()
87
+ print_args(vars(opt))
88
+
89
+ # Model
90
+ model = _create(name=opt.model, pretrained=True, channels=3, classes=80, autoshape=True, verbose=True)
91
+ # model = custom(path='path/to/model.pt') # custom
92
+
93
+ # Images
94
+ imgs = [
95
+ 'data/images/zidane.jpg', # filename
96
+ Path('data/images/zidane.jpg'), # Path
97
+ 'https://ultralytics.com/images/zidane.jpg', # URI
98
+ cv2.imread('data/images/bus.jpg')[:, :, ::-1], # OpenCV
99
+ Image.open('data/images/bus.jpg'), # PIL
100
+ np.zeros((320, 640, 3))] # numpy
101
+
102
+ # Inference
103
+ results = model(imgs, size=320) # batched inference
104
+
105
+ # Results
106
+ results.print()
107
+ results.save()
train.py ADDED
@@ -0,0 +1,634 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import argparse
2
+ import math
3
+ import os
4
+ import random
5
+ import sys
6
+ import time
7
+ from copy import deepcopy
8
+ from datetime import datetime
9
+ from pathlib import Path
10
+
11
+ import numpy as np
12
+ import torch
13
+ import torch.distributed as dist
14
+ import torch.nn as nn
15
+ import yaml
16
+ from torch.optim import lr_scheduler
17
+ from tqdm import tqdm
18
+
19
+ FILE = Path(__file__).resolve()
20
+ ROOT = FILE.parents[0] # root directory
21
+ if str(ROOT) not in sys.path:
22
+ sys.path.append(str(ROOT)) # add ROOT to PATH
23
+ ROOT = Path(os.path.relpath(ROOT, Path.cwd())) # relative
24
+
25
+ import val as validate # for end-of-epoch mAP
26
+ from models.experimental import attempt_load
27
+ from models.yolo import Model
28
+ from utils.autoanchor import check_anchors
29
+ from utils.autobatch import check_train_batch_size
30
+ from utils.callbacks import Callbacks
31
+ from utils.dataloaders import create_dataloader
32
+ from utils.downloads import attempt_download, is_url
33
+ from utils.general import (LOGGER, TQDM_BAR_FORMAT, check_amp, check_dataset, check_file, check_img_size,
34
+ check_suffix, check_yaml, colorstr, get_latest_run, increment_path, init_seeds,
35
+ intersect_dicts, labels_to_class_weights, labels_to_image_weights, methods,
36
+ one_cycle, one_flat_cycle, print_args, print_mutation, strip_optimizer, yaml_save)
37
+ from utils.loggers import Loggers
38
+ from utils.loggers.comet.comet_utils import check_comet_resume
39
+ from utils.loss_tal import ComputeLoss
40
+ from utils.metrics import fitness
41
+ from utils.plots import plot_evolve
42
+ from utils.torch_utils import (EarlyStopping, ModelEMA, de_parallel, select_device, smart_DDP,
43
+ smart_optimizer, smart_resume, torch_distributed_zero_first)
44
+
45
+ LOCAL_RANK = int(os.getenv('LOCAL_RANK', -1)) # https://pytorch.org/docs/stable/elastic/run.html
46
+ RANK = int(os.getenv('RANK', -1))
47
+ WORLD_SIZE = int(os.getenv('WORLD_SIZE', 1))
48
+ GIT_INFO = None
49
+
50
+
51
+ def train(hyp, opt, device, callbacks): # hyp is path/to/hyp.yaml or hyp dictionary
52
+ save_dir, epochs, batch_size, weights, single_cls, evolve, data, cfg, resume, noval, nosave, workers, freeze = \
53
+ Path(opt.save_dir), opt.epochs, opt.batch_size, opt.weights, opt.single_cls, opt.evolve, opt.data, opt.cfg, \
54
+ opt.resume, opt.noval, opt.nosave, opt.workers, opt.freeze
55
+ callbacks.run('on_pretrain_routine_start')
56
+
57
+ # Directories
58
+ w = save_dir / 'weights' # weights dir
59
+ (w.parent if evolve else w).mkdir(parents=True, exist_ok=True) # make dir
60
+ last, best = w / 'last.pt', w / 'best.pt'
61
+ last_striped, best_striped = w / 'last_striped.pt', w / 'best_striped.pt'
62
+
63
+ # Hyperparameters
64
+ if isinstance(hyp, str):
65
+ with open(hyp, errors='ignore') as f:
66
+ hyp = yaml.safe_load(f) # load hyps dict
67
+ LOGGER.info(colorstr('hyperparameters: ') + ', '.join(f'{k}={v}' for k, v in hyp.items()))
68
+ hyp['anchor_t'] = 5.0
69
+ opt.hyp = hyp.copy() # for saving hyps to checkpoints
70
+
71
+ # Save run settings
72
+ if not evolve:
73
+ yaml_save(save_dir / 'hyp.yaml', hyp)
74
+ yaml_save(save_dir / 'opt.yaml', vars(opt))
75
+
76
+ # Loggers
77
+ data_dict = None
78
+ if RANK in {-1, 0}:
79
+ loggers = Loggers(save_dir, weights, opt, hyp, LOGGER) # loggers instance
80
+
81
+ # Register actions
82
+ for k in methods(loggers):
83
+ callbacks.register_action(k, callback=getattr(loggers, k))
84
+
85
+ # Process custom dataset artifact link
86
+ data_dict = loggers.remote_dataset
87
+ if resume: # If resuming runs from remote artifact
88
+ weights, epochs, hyp, batch_size = opt.weights, opt.epochs, opt.hyp, opt.batch_size
89
+
90
+ # Config
91
+ plots = not evolve and not opt.noplots # create plots
92
+ cuda = device.type != 'cpu'
93
+ init_seeds(opt.seed + 1 + RANK, deterministic=True)
94
+ with torch_distributed_zero_first(LOCAL_RANK):
95
+ data_dict = data_dict or check_dataset(data) # check if None
96
+ train_path, val_path = data_dict['train'], data_dict['val']
97
+ nc = 1 if single_cls else int(data_dict['nc']) # number of classes
98
+ names = {0: 'item'} if single_cls and len(data_dict['names']) != 1 else data_dict['names'] # class names
99
+ #is_coco = isinstance(val_path, str) and val_path.endswith('coco/val2017.txt') # COCO dataset
100
+ is_coco = isinstance(val_path, str) and val_path.endswith('val2017.txt') # COCO dataset
101
+
102
+ # Model
103
+ check_suffix(weights, '.pt') # check weights
104
+ pretrained = weights.endswith('.pt')
105
+ if pretrained:
106
+ with torch_distributed_zero_first(LOCAL_RANK):
107
+ weights = attempt_download(weights) # download if not found locally
108
+ ckpt = torch.load(weights, map_location='cpu') # load checkpoint to CPU to avoid CUDA memory leak
109
+ model = Model(cfg or ckpt['model'].yaml, ch=3, nc=nc, anchors=hyp.get('anchors')).to(device) # create
110
+ exclude = ['anchor'] if (cfg or hyp.get('anchors')) and not resume else [] # exclude keys
111
+ csd = ckpt['model'].float().state_dict() # checkpoint state_dict as FP32
112
+ csd = intersect_dicts(csd, model.state_dict(), exclude=exclude) # intersect
113
+ model.load_state_dict(csd, strict=False) # load
114
+ LOGGER.info(f'Transferred {len(csd)}/{len(model.state_dict())} items from {weights}') # report
115
+ else:
116
+ model = Model(cfg, ch=3, nc=nc, anchors=hyp.get('anchors')).to(device) # create
117
+ amp = check_amp(model) # check AMP
118
+
119
+ # Freeze
120
+ freeze = [f'model.{x}.' for x in (freeze if len(freeze) > 1 else range(freeze[0]))] # layers to freeze
121
+ for k, v in model.named_parameters():
122
+ # v.requires_grad = True # train all layers TODO: uncomment this line as in master
123
+ # v.register_hook(lambda x: torch.nan_to_num(x)) # NaN to 0 (commented for erratic training results)
124
+ if any(x in k for x in freeze):
125
+ LOGGER.info(f'freezing {k}')
126
+ v.requires_grad = False
127
+
128
+ # Image size
129
+ gs = max(int(model.stride.max()), 32) # grid size (max stride)
130
+ imgsz = check_img_size(opt.imgsz, gs, floor=gs * 2) # verify imgsz is gs-multiple
131
+
132
+ # Batch size
133
+ if RANK == -1 and batch_size == -1: # single-GPU only, estimate best batch size
134
+ batch_size = check_train_batch_size(model, imgsz, amp)
135
+ loggers.on_params_update({"batch_size": batch_size})
136
+
137
+ # Optimizer
138
+ nbs = 64 # nominal batch size
139
+ accumulate = max(round(nbs / batch_size), 1) # accumulate loss before optimizing
140
+ hyp['weight_decay'] *= batch_size * accumulate / nbs # scale weight_decay
141
+ optimizer = smart_optimizer(model, opt.optimizer, hyp['lr0'], hyp['momentum'], hyp['weight_decay'])
142
+
143
+ # Scheduler
144
+ if opt.cos_lr:
145
+ lf = one_cycle(1, hyp['lrf'], epochs) # cosine 1->hyp['lrf']
146
+ elif opt.flat_cos_lr:
147
+ lf = one_flat_cycle(1, hyp['lrf'], epochs) # flat cosine 1->hyp['lrf']
148
+ elif opt.fixed_lr:
149
+ lf = lambda x: 1.0
150
+ else:
151
+ lf = lambda x: (1 - x / epochs) * (1.0 - hyp['lrf']) + hyp['lrf'] # linear
152
+
153
+ scheduler = lr_scheduler.LambdaLR(optimizer, lr_lambda=lf)
154
+ # from utils.plots import plot_lr_scheduler; plot_lr_scheduler(optimizer, scheduler, epochs)
155
+
156
+ # EMA
157
+ ema = ModelEMA(model) if RANK in {-1, 0} else None
158
+
159
+ # Resume
160
+ best_fitness, start_epoch = 0.0, 0
161
+ if pretrained:
162
+ if resume:
163
+ best_fitness, start_epoch, epochs = smart_resume(ckpt, optimizer, ema, weights, epochs, resume)
164
+ del ckpt, csd
165
+
166
+ # DP mode
167
+ if cuda and RANK == -1 and torch.cuda.device_count() > 1:
168
+ LOGGER.warning('WARNING ⚠️ DP not recommended, use torch.distributed.run for best DDP Multi-GPU results.')
169
+ model = torch.nn.DataParallel(model)
170
+
171
+ # SyncBatchNorm
172
+ if opt.sync_bn and cuda and RANK != -1:
173
+ model = torch.nn.SyncBatchNorm.convert_sync_batchnorm(model).to(device)
174
+ LOGGER.info('Using SyncBatchNorm()')
175
+
176
+ # Trainloader
177
+ train_loader, dataset = create_dataloader(train_path,
178
+ imgsz,
179
+ batch_size // WORLD_SIZE,
180
+ gs,
181
+ single_cls,
182
+ hyp=hyp,
183
+ augment=True,
184
+ cache=None if opt.cache == 'val' else opt.cache,
185
+ rect=opt.rect,
186
+ rank=LOCAL_RANK,
187
+ workers=workers,
188
+ image_weights=opt.image_weights,
189
+ close_mosaic=opt.close_mosaic != 0,
190
+ quad=opt.quad,
191
+ prefix=colorstr('train: '),
192
+ shuffle=True,
193
+ min_items=opt.min_items)
194
+ labels = np.concatenate(dataset.labels, 0)
195
+ mlc = int(labels[:, 0].max()) # max label class
196
+ assert mlc < nc, f'Label class {mlc} exceeds nc={nc} in {data}. Possible class labels are 0-{nc - 1}'
197
+
198
+ # Process 0
199
+ if RANK in {-1, 0}:
200
+ val_loader = create_dataloader(val_path,
201
+ imgsz,
202
+ batch_size // WORLD_SIZE * 2,
203
+ gs,
204
+ single_cls,
205
+ hyp=hyp,
206
+ cache=None if noval else opt.cache,
207
+ rect=True,
208
+ rank=-1,
209
+ workers=workers * 2,
210
+ pad=0.5,
211
+ prefix=colorstr('val: '))[0]
212
+
213
+ if not resume:
214
+ # if not opt.noautoanchor:
215
+ # check_anchors(dataset, model=model, thr=hyp['anchor_t'], imgsz=imgsz) # run AutoAnchor
216
+ model.half().float() # pre-reduce anchor precision
217
+
218
+ callbacks.run('on_pretrain_routine_end', labels, names)
219
+
220
+ # DDP mode
221
+ if cuda and RANK != -1:
222
+ model = smart_DDP(model)
223
+
224
+ # Model attributes
225
+ nl = de_parallel(model).model[-1].nl # number of detection layers (to scale hyps)
226
+ #hyp['box'] *= 3 / nl # scale to layers
227
+ #hyp['cls'] *= nc / 80 * 3 / nl # scale to classes and layers
228
+ #hyp['obj'] *= (imgsz / 640) ** 2 * 3 / nl # scale to image size and layers
229
+ hyp['label_smoothing'] = opt.label_smoothing
230
+ model.nc = nc # attach number of classes to model
231
+ model.hyp = hyp # attach hyperparameters to model
232
+ model.class_weights = labels_to_class_weights(dataset.labels, nc).to(device) * nc # attach class weights
233
+ model.names = names
234
+
235
+ # Start training
236
+ t0 = time.time()
237
+ nb = len(train_loader) # number of batches
238
+ nw = max(round(hyp['warmup_epochs'] * nb), 100) # number of warmup iterations, max(3 epochs, 100 iterations)
239
+ # nw = min(nw, (epochs - start_epoch) / 2 * nb) # limit warmup to < 1/2 of training
240
+ last_opt_step = -1
241
+ maps = np.zeros(nc) # mAP per class
242
+ results = (0, 0, 0, 0, 0, 0, 0) # P, R, mAP@.5, mAP@.5-.95, val_loss(box, obj, cls)
243
+ scheduler.last_epoch = start_epoch - 1 # do not move
244
+ scaler = torch.cuda.amp.GradScaler(enabled=amp)
245
+ stopper, stop = EarlyStopping(patience=opt.patience), False
246
+ compute_loss = ComputeLoss(model) # init loss class
247
+ callbacks.run('on_train_start')
248
+ LOGGER.info(f'Image sizes {imgsz} train, {imgsz} val\n'
249
+ f'Using {train_loader.num_workers * WORLD_SIZE} dataloader workers\n'
250
+ f"Logging results to {colorstr('bold', save_dir)}\n"
251
+ f'Starting training for {epochs} epochs...')
252
+ for epoch in range(start_epoch, epochs): # epoch ------------------------------------------------------------------
253
+ callbacks.run('on_train_epoch_start')
254
+ model.train()
255
+
256
+ # Update image weights (optional, single-GPU only)
257
+ if opt.image_weights:
258
+ cw = model.class_weights.cpu().numpy() * (1 - maps) ** 2 / nc # class weights
259
+ iw = labels_to_image_weights(dataset.labels, nc=nc, class_weights=cw) # image weights
260
+ dataset.indices = random.choices(range(dataset.n), weights=iw, k=dataset.n) # rand weighted idx
261
+ if epoch == (epochs - opt.close_mosaic):
262
+ LOGGER.info("Closing dataloader mosaic")
263
+ dataset.mosaic = False
264
+
265
+ # Update mosaic border (optional)
266
+ # b = int(random.uniform(0.25 * imgsz, 0.75 * imgsz + gs) // gs * gs)
267
+ # dataset.mosaic_border = [b - imgsz, -b] # height, width borders
268
+
269
+ mloss = torch.zeros(3, device=device) # mean losses
270
+ if RANK != -1:
271
+ train_loader.sampler.set_epoch(epoch)
272
+ pbar = enumerate(train_loader)
273
+ LOGGER.info(('\n' + '%11s' * 7) % ('Epoch', 'GPU_mem', 'box_loss', 'cls_loss', 'dfl_loss', 'Instances', 'Size'))
274
+ if RANK in {-1, 0}:
275
+ pbar = tqdm(pbar, total=nb, bar_format=TQDM_BAR_FORMAT) # progress bar
276
+ optimizer.zero_grad()
277
+ for i, (imgs, targets, paths, _) in pbar: # batch -------------------------------------------------------------
278
+ callbacks.run('on_train_batch_start')
279
+ ni = i + nb * epoch # number integrated batches (since train start)
280
+ imgs = imgs.to(device, non_blocking=True).float() / 255 # uint8 to float32, 0-255 to 0.0-1.0
281
+
282
+ # Warmup
283
+ if ni <= nw:
284
+ xi = [0, nw] # x interp
285
+ # compute_loss.gr = np.interp(ni, xi, [0.0, 1.0]) # iou loss ratio (obj_loss = 1.0 or iou)
286
+ accumulate = max(1, np.interp(ni, xi, [1, nbs / batch_size]).round())
287
+ for j, x in enumerate(optimizer.param_groups):
288
+ # bias lr falls from 0.1 to lr0, all other lrs rise from 0.0 to lr0
289
+ x['lr'] = np.interp(ni, xi, [hyp['warmup_bias_lr'] if j == 0 else 0.0, x['initial_lr'] * lf(epoch)])
290
+ if 'momentum' in x:
291
+ x['momentum'] = np.interp(ni, xi, [hyp['warmup_momentum'], hyp['momentum']])
292
+
293
+ # Multi-scale
294
+ if opt.multi_scale:
295
+ sz = random.randrange(imgsz * 0.5, imgsz * 1.5 + gs) // gs * gs # size
296
+ sf = sz / max(imgs.shape[2:]) # scale factor
297
+ if sf != 1:
298
+ ns = [math.ceil(x * sf / gs) * gs for x in imgs.shape[2:]] # new shape (stretched to gs-multiple)
299
+ imgs = nn.functional.interpolate(imgs, size=ns, mode='bilinear', align_corners=False)
300
+
301
+ # Forward
302
+ with torch.cuda.amp.autocast(amp):
303
+ pred = model(imgs) # forward
304
+ loss, loss_items = compute_loss(pred, targets.to(device)) # loss scaled by batch_size
305
+ if RANK != -1:
306
+ loss *= WORLD_SIZE # gradient averaged between devices in DDP mode
307
+ if opt.quad:
308
+ loss *= 4.
309
+
310
+ # Backward
311
+ scaler.scale(loss).backward()
312
+
313
+ # Optimize - https://pytorch.org/docs/master/notes/amp_examples.html
314
+ if ni - last_opt_step >= accumulate:
315
+ scaler.unscale_(optimizer) # unscale gradients
316
+ torch.nn.utils.clip_grad_norm_(model.parameters(), max_norm=10.0) # clip gradients
317
+ scaler.step(optimizer) # optimizer.step
318
+ scaler.update()
319
+ optimizer.zero_grad()
320
+ if ema:
321
+ ema.update(model)
322
+ last_opt_step = ni
323
+
324
+ # Log
325
+ if RANK in {-1, 0}:
326
+ mloss = (mloss * i + loss_items) / (i + 1) # update mean losses
327
+ mem = f'{torch.cuda.memory_reserved() / 1E9 if torch.cuda.is_available() else 0:.3g}G' # (GB)
328
+ pbar.set_description(('%11s' * 2 + '%11.4g' * 5) %
329
+ (f'{epoch}/{epochs - 1}', mem, *mloss, targets.shape[0], imgs.shape[-1]))
330
+ callbacks.run('on_train_batch_end', model, ni, imgs, targets, paths, list(mloss))
331
+ if callbacks.stop_training:
332
+ return
333
+ # end batch ------------------------------------------------------------------------------------------------
334
+
335
+ # Scheduler
336
+ lr = [x['lr'] for x in optimizer.param_groups] # for loggers
337
+ scheduler.step()
338
+
339
+ if RANK in {-1, 0}:
340
+ # mAP
341
+ callbacks.run('on_train_epoch_end', epoch=epoch)
342
+ ema.update_attr(model, include=['yaml', 'nc', 'hyp', 'names', 'stride', 'class_weights'])
343
+ final_epoch = (epoch + 1 == epochs) or stopper.possible_stop
344
+ if not noval or final_epoch: # Calculate mAP
345
+ results, maps, _ = validate.run(data_dict,
346
+ batch_size=batch_size // WORLD_SIZE * 2,
347
+ imgsz=imgsz,
348
+ half=amp,
349
+ model=ema.ema,
350
+ single_cls=single_cls,
351
+ dataloader=val_loader,
352
+ save_dir=save_dir,
353
+ plots=False,
354
+ callbacks=callbacks,
355
+ compute_loss=compute_loss)
356
+
357
+ # Update best mAP
358
+ fi = fitness(np.array(results).reshape(1, -1)) # weighted combination of [P, R, mAP@.5, mAP@.5-.95]
359
+ stop = stopper(epoch=epoch, fitness=fi) # early stop check
360
+ if fi > best_fitness:
361
+ best_fitness = fi
362
+ log_vals = list(mloss) + list(results) + lr
363
+ callbacks.run('on_fit_epoch_end', log_vals, epoch, best_fitness, fi)
364
+
365
+ # Save model
366
+ if (not nosave) or (final_epoch and not evolve): # if save
367
+ ckpt = {
368
+ 'epoch': epoch,
369
+ 'best_fitness': best_fitness,
370
+ 'model': deepcopy(de_parallel(model)).half(),
371
+ 'ema': deepcopy(ema.ema).half(),
372
+ 'updates': ema.updates,
373
+ 'optimizer': optimizer.state_dict(),
374
+ 'opt': vars(opt),
375
+ 'git': GIT_INFO, # {remote, branch, commit} if a git repo
376
+ 'date': datetime.now().isoformat()}
377
+
378
+ # Save last, best and delete
379
+ torch.save(ckpt, last)
380
+ if best_fitness == fi:
381
+ torch.save(ckpt, best)
382
+ if opt.save_period > 0 and epoch % opt.save_period == 0:
383
+ torch.save(ckpt, w / f'epoch{epoch}.pt')
384
+ del ckpt
385
+ callbacks.run('on_model_save', last, epoch, final_epoch, best_fitness, fi)
386
+
387
+ # EarlyStopping
388
+ if RANK != -1: # if DDP training
389
+ broadcast_list = [stop if RANK == 0 else None]
390
+ dist.broadcast_object_list(broadcast_list, 0) # broadcast 'stop' to all ranks
391
+ if RANK != 0:
392
+ stop = broadcast_list[0]
393
+ if stop:
394
+ break # must break all DDP ranks
395
+
396
+ # end epoch ----------------------------------------------------------------------------------------------------
397
+ # end training -----------------------------------------------------------------------------------------------------
398
+ if RANK in {-1, 0}:
399
+ LOGGER.info(f'\n{epoch - start_epoch + 1} epochs completed in {(time.time() - t0) / 3600:.3f} hours.')
400
+ for f in last, best:
401
+ if f.exists():
402
+ if f is last:
403
+ strip_optimizer(f, last_striped) # strip optimizers
404
+ else:
405
+ strip_optimizer(f, best_striped) # strip optimizers
406
+ if f is best:
407
+ LOGGER.info(f'\nValidating {f}...')
408
+ results, _, _ = validate.run(
409
+ data_dict,
410
+ batch_size=batch_size // WORLD_SIZE * 2,
411
+ imgsz=imgsz,
412
+ model=attempt_load(f, device).half(),
413
+ single_cls=single_cls,
414
+ dataloader=val_loader,
415
+ save_dir=save_dir,
416
+ save_json=is_coco,
417
+ verbose=True,
418
+ plots=plots,
419
+ callbacks=callbacks,
420
+ compute_loss=compute_loss) # val best model with plots
421
+ if is_coco:
422
+ callbacks.run('on_fit_epoch_end', list(mloss) + list(results) + lr, epoch, best_fitness, fi)
423
+
424
+ callbacks.run('on_train_end', last, best, epoch, results)
425
+
426
+ torch.cuda.empty_cache()
427
+ return results
428
+
429
+
430
+ def parse_opt(known=False):
431
+ parser = argparse.ArgumentParser()
432
+ # parser.add_argument('--weights', type=str, default=ROOT / 'yolo.pt', help='initial weights path')
433
+ # parser.add_argument('--cfg', type=str, default='', help='model.yaml path')
434
+ parser.add_argument('--weights', type=str, default='', help='initial weights path')
435
+ parser.add_argument('--cfg', type=str, default='yolo.yaml', help='model.yaml path')
436
+ parser.add_argument('--data', type=str, default=ROOT / 'data/coco128.yaml', help='dataset.yaml path')
437
+ parser.add_argument('--hyp', type=str, default=ROOT / 'data/hyps/hyp.scratch-low.yaml', help='hyperparameters path')
438
+ parser.add_argument('--epochs', type=int, default=100, help='total training epochs')
439
+ parser.add_argument('--batch-size', type=int, default=16, help='total batch size for all GPUs, -1 for autobatch')
440
+ parser.add_argument('--imgsz', '--img', '--img-size', type=int, default=640, help='train, val image size (pixels)')
441
+ parser.add_argument('--rect', action='store_true', help='rectangular training')
442
+ parser.add_argument('--resume', nargs='?', const=True, default=False, help='resume most recent training')
443
+ parser.add_argument('--nosave', action='store_true', help='only save final checkpoint')
444
+ parser.add_argument('--noval', action='store_true', help='only validate final epoch')
445
+ parser.add_argument('--noautoanchor', action='store_true', help='disable AutoAnchor')
446
+ parser.add_argument('--noplots', action='store_true', help='save no plot files')
447
+ parser.add_argument('--evolve', type=int, nargs='?', const=300, help='evolve hyperparameters for x generations')
448
+ parser.add_argument('--bucket', type=str, default='', help='gsutil bucket')
449
+ parser.add_argument('--cache', type=str, nargs='?', const='ram', help='image --cache ram/disk')
450
+ parser.add_argument('--image-weights', action='store_true', help='use weighted image selection for training')
451
+ parser.add_argument('--device', default='', help='cuda device, i.e. 0 or 0,1,2,3 or cpu')
452
+ parser.add_argument('--multi-scale', action='store_true', help='vary img-size +/- 50%%')
453
+ parser.add_argument('--single-cls', action='store_true', help='train multi-class data as single-class')
454
+ parser.add_argument('--optimizer', type=str, choices=['SGD', 'Adam', 'AdamW', 'LION'], default='SGD', help='optimizer')
455
+ parser.add_argument('--sync-bn', action='store_true', help='use SyncBatchNorm, only available in DDP mode')
456
+ parser.add_argument('--workers', type=int, default=8, help='max dataloader workers (per RANK in DDP mode)')
457
+ parser.add_argument('--project', default=ROOT / 'runs/train', help='save to project/name')
458
+ parser.add_argument('--name', default='exp', help='save to project/name')
459
+ parser.add_argument('--exist-ok', action='store_true', help='existing project/name ok, do not increment')
460
+ parser.add_argument('--quad', action='store_true', help='quad dataloader')
461
+ parser.add_argument('--cos-lr', action='store_true', help='cosine LR scheduler')
462
+ parser.add_argument('--flat-cos-lr', action='store_true', help='flat cosine LR scheduler')
463
+ parser.add_argument('--fixed-lr', action='store_true', help='fixed LR scheduler')
464
+ parser.add_argument('--label-smoothing', type=float, default=0.0, help='Label smoothing epsilon')
465
+ parser.add_argument('--patience', type=int, default=100, help='EarlyStopping patience (epochs without improvement)')
466
+ parser.add_argument('--freeze', nargs='+', type=int, default=[0], help='Freeze layers: backbone=10, first3=0 1 2')
467
+ parser.add_argument('--save-period', type=int, default=-1, help='Save checkpoint every x epochs (disabled if < 1)')
468
+ parser.add_argument('--seed', type=int, default=0, help='Global training seed')
469
+ parser.add_argument('--local_rank', type=int, default=-1, help='Automatic DDP Multi-GPU argument, do not modify')
470
+ parser.add_argument('--min-items', type=int, default=0, help='Experimental')
471
+ parser.add_argument('--close-mosaic', type=int, default=0, help='Experimental')
472
+
473
+ # Logger arguments
474
+ parser.add_argument('--entity', default=None, help='Entity')
475
+ parser.add_argument('--upload_dataset', nargs='?', const=True, default=False, help='Upload data, "val" option')
476
+ parser.add_argument('--bbox_interval', type=int, default=-1, help='Set bounding-box image logging interval')
477
+ parser.add_argument('--artifact_alias', type=str, default='latest', help='Version of dataset artifact to use')
478
+
479
+ return parser.parse_known_args()[0] if known else parser.parse_args()
480
+
481
+
482
+ def main(opt, callbacks=Callbacks()):
483
+ # Checks
484
+ if RANK in {-1, 0}:
485
+ print_args(vars(opt))
486
+
487
+ # Resume (from specified or most recent last.pt)
488
+ if opt.resume and not check_comet_resume(opt) and not opt.evolve:
489
+ last = Path(check_file(opt.resume) if isinstance(opt.resume, str) else get_latest_run())
490
+ opt_yaml = last.parent.parent / 'opt.yaml' # train options yaml
491
+ opt_data = opt.data # original dataset
492
+ if opt_yaml.is_file():
493
+ with open(opt_yaml, errors='ignore') as f:
494
+ d = yaml.safe_load(f)
495
+ else:
496
+ d = torch.load(last, map_location='cpu')['opt']
497
+ opt = argparse.Namespace(**d) # replace
498
+ opt.cfg, opt.weights, opt.resume = '', str(last), True # reinstate
499
+ if is_url(opt_data):
500
+ opt.data = check_file(opt_data) # avoid HUB resume auth timeout
501
+ else:
502
+ opt.data, opt.cfg, opt.hyp, opt.weights, opt.project = \
503
+ check_file(opt.data), check_yaml(opt.cfg), check_yaml(opt.hyp), str(opt.weights), str(opt.project) # checks
504
+ assert len(opt.cfg) or len(opt.weights), 'either --cfg or --weights must be specified'
505
+ if opt.evolve:
506
+ if opt.project == str(ROOT / 'runs/train'): # if default project name, rename to runs/evolve
507
+ opt.project = str(ROOT / 'runs/evolve')
508
+ opt.exist_ok, opt.resume = opt.resume, False # pass resume to exist_ok and disable resume
509
+ if opt.name == 'cfg':
510
+ opt.name = Path(opt.cfg).stem # use model.yaml as name
511
+ opt.save_dir = str(increment_path(Path(opt.project) / opt.name, exist_ok=opt.exist_ok))
512
+
513
+ # DDP mode
514
+ device = select_device(opt.device, batch_size=opt.batch_size)
515
+ if LOCAL_RANK != -1:
516
+ msg = 'is not compatible with YOLO Multi-GPU DDP training'
517
+ assert not opt.image_weights, f'--image-weights {msg}'
518
+ assert not opt.evolve, f'--evolve {msg}'
519
+ assert opt.batch_size != -1, f'AutoBatch with --batch-size -1 {msg}, please pass a valid --batch-size'
520
+ assert opt.batch_size % WORLD_SIZE == 0, f'--batch-size {opt.batch_size} must be multiple of WORLD_SIZE'
521
+ assert torch.cuda.device_count() > LOCAL_RANK, 'insufficient CUDA devices for DDP command'
522
+ torch.cuda.set_device(LOCAL_RANK)
523
+ device = torch.device('cuda', LOCAL_RANK)
524
+ dist.init_process_group(backend="nccl" if dist.is_nccl_available() else "gloo")
525
+
526
+ # Train
527
+ if not opt.evolve:
528
+ train(opt.hyp, opt, device, callbacks)
529
+
530
+ # Evolve hyperparameters (optional)
531
+ else:
532
+ # Hyperparameter evolution metadata (mutation scale 0-1, lower_limit, upper_limit)
533
+ meta = {
534
+ 'lr0': (1, 1e-5, 1e-1), # initial learning rate (SGD=1E-2, Adam=1E-3)
535
+ 'lrf': (1, 0.01, 1.0), # final OneCycleLR learning rate (lr0 * lrf)
536
+ 'momentum': (0.3, 0.6, 0.98), # SGD momentum/Adam beta1
537
+ 'weight_decay': (1, 0.0, 0.001), # optimizer weight decay
538
+ 'warmup_epochs': (1, 0.0, 5.0), # warmup epochs (fractions ok)
539
+ 'warmup_momentum': (1, 0.0, 0.95), # warmup initial momentum
540
+ 'warmup_bias_lr': (1, 0.0, 0.2), # warmup initial bias lr
541
+ 'box': (1, 0.02, 0.2), # box loss gain
542
+ 'cls': (1, 0.2, 4.0), # cls loss gain
543
+ 'cls_pw': (1, 0.5, 2.0), # cls BCELoss positive_weight
544
+ 'obj': (1, 0.2, 4.0), # obj loss gain (scale with pixels)
545
+ 'obj_pw': (1, 0.5, 2.0), # obj BCELoss positive_weight
546
+ 'iou_t': (0, 0.1, 0.7), # IoU training threshold
547
+ 'anchor_t': (1, 2.0, 8.0), # anchor-multiple threshold
548
+ 'anchors': (2, 2.0, 10.0), # anchors per output grid (0 to ignore)
549
+ 'fl_gamma': (0, 0.0, 2.0), # focal loss gamma (efficientDet default gamma=1.5)
550
+ 'hsv_h': (1, 0.0, 0.1), # image HSV-Hue augmentation (fraction)
551
+ 'hsv_s': (1, 0.0, 0.9), # image HSV-Saturation augmentation (fraction)
552
+ 'hsv_v': (1, 0.0, 0.9), # image HSV-Value augmentation (fraction)
553
+ 'degrees': (1, 0.0, 45.0), # image rotation (+/- deg)
554
+ 'translate': (1, 0.0, 0.9), # image translation (+/- fraction)
555
+ 'scale': (1, 0.0, 0.9), # image scale (+/- gain)
556
+ 'shear': (1, 0.0, 10.0), # image shear (+/- deg)
557
+ 'perspective': (0, 0.0, 0.001), # image perspective (+/- fraction), range 0-0.001
558
+ 'flipud': (1, 0.0, 1.0), # image flip up-down (probability)
559
+ 'fliplr': (0, 0.0, 1.0), # image flip left-right (probability)
560
+ 'mosaic': (1, 0.0, 1.0), # image mixup (probability)
561
+ 'mixup': (1, 0.0, 1.0), # image mixup (probability)
562
+ 'copy_paste': (1, 0.0, 1.0)} # segment copy-paste (probability)
563
+
564
+ with open(opt.hyp, errors='ignore') as f:
565
+ hyp = yaml.safe_load(f) # load hyps dict
566
+ if 'anchors' not in hyp: # anchors commented in hyp.yaml
567
+ hyp['anchors'] = 3
568
+ if opt.noautoanchor:
569
+ del hyp['anchors'], meta['anchors']
570
+ opt.noval, opt.nosave, save_dir = True, True, Path(opt.save_dir) # only val/save final epoch
571
+ # ei = [isinstance(x, (int, float)) for x in hyp.values()] # evolvable indices
572
+ evolve_yaml, evolve_csv = save_dir / 'hyp_evolve.yaml', save_dir / 'evolve.csv'
573
+ if opt.bucket:
574
+ os.system(f'gsutil cp gs://{opt.bucket}/evolve.csv {evolve_csv}') # download evolve.csv if exists
575
+
576
+ for _ in range(opt.evolve): # generations to evolve
577
+ if evolve_csv.exists(): # if evolve.csv exists: select best hyps and mutate
578
+ # Select parent(s)
579
+ parent = 'single' # parent selection method: 'single' or 'weighted'
580
+ x = np.loadtxt(evolve_csv, ndmin=2, delimiter=',', skiprows=1)
581
+ n = min(5, len(x)) # number of previous results to consider
582
+ x = x[np.argsort(-fitness(x))][:n] # top n mutations
583
+ w = fitness(x) - fitness(x).min() + 1E-6 # weights (sum > 0)
584
+ if parent == 'single' or len(x) == 1:
585
+ # x = x[random.randint(0, n - 1)] # random selection
586
+ x = x[random.choices(range(n), weights=w)[0]] # weighted selection
587
+ elif parent == 'weighted':
588
+ x = (x * w.reshape(n, 1)).sum(0) / w.sum() # weighted combination
589
+
590
+ # Mutate
591
+ mp, s = 0.8, 0.2 # mutation probability, sigma
592
+ npr = np.random
593
+ npr.seed(int(time.time()))
594
+ g = np.array([meta[k][0] for k in hyp.keys()]) # gains 0-1
595
+ ng = len(meta)
596
+ v = np.ones(ng)
597
+ while all(v == 1): # mutate until a change occurs (prevent duplicates)
598
+ v = (g * (npr.random(ng) < mp) * npr.randn(ng) * npr.random() * s + 1).clip(0.3, 3.0)
599
+ for i, k in enumerate(hyp.keys()): # plt.hist(v.ravel(), 300)
600
+ hyp[k] = float(x[i + 7] * v[i]) # mutate
601
+
602
+ # Constrain to limits
603
+ for k, v in meta.items():
604
+ hyp[k] = max(hyp[k], v[1]) # lower limit
605
+ hyp[k] = min(hyp[k], v[2]) # upper limit
606
+ hyp[k] = round(hyp[k], 5) # significant digits
607
+
608
+ # Train mutation
609
+ results = train(hyp.copy(), opt, device, callbacks)
610
+ callbacks = Callbacks()
611
+ # Write mutation results
612
+ keys = ('metrics/precision', 'metrics/recall', 'metrics/mAP_0.5', 'metrics/mAP_0.5:0.95', 'val/box_loss',
613
+ 'val/obj_loss', 'val/cls_loss')
614
+ print_mutation(keys, results, hyp.copy(), save_dir, opt.bucket)
615
+
616
+ # Plot results
617
+ plot_evolve(evolve_csv)
618
+ LOGGER.info(f'Hyperparameter evolution finished {opt.evolve} generations\n'
619
+ f"Results saved to {colorstr('bold', save_dir)}\n"
620
+ f'Usage example: $ python train.py --hyp {evolve_yaml}')
621
+
622
+
623
+ def run(**kwargs):
624
+ # Usage: import train; train.run(data='coco128.yaml', imgsz=320, weights='yolo.pt')
625
+ opt = parse_opt(True)
626
+ for k, v in kwargs.items():
627
+ setattr(opt, k, v)
628
+ main(opt)
629
+ return opt
630
+
631
+
632
+ if __name__ == "__main__":
633
+ opt = parse_opt()
634
+ main(opt)
train_dual.py ADDED
@@ -0,0 +1,644 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import argparse
2
+ import math
3
+ import os
4
+ import random
5
+ import sys
6
+ import time
7
+ from copy import deepcopy
8
+ from datetime import datetime
9
+ from pathlib import Path
10
+
11
+ import numpy as np
12
+ import torch
13
+ import torch.distributed as dist
14
+ import torch.nn as nn
15
+ import yaml
16
+ from torch.optim import lr_scheduler
17
+ from tqdm import tqdm
18
+
19
+ FILE = Path(__file__).resolve()
20
+ ROOT = FILE.parents[0] # YOLO root directory
21
+ if str(ROOT) not in sys.path:
22
+ sys.path.append(str(ROOT)) # add ROOT to PATH
23
+ ROOT = Path(os.path.relpath(ROOT, Path.cwd())) # relative
24
+
25
+ import val_dual as validate # for end-of-epoch mAP
26
+ from models.experimental import attempt_load
27
+ from models.yolo import Model
28
+ from utils.autoanchor import check_anchors
29
+ from utils.autobatch import check_train_batch_size
30
+ from utils.callbacks import Callbacks
31
+ from utils.dataloaders import create_dataloader
32
+ from utils.downloads import attempt_download, is_url
33
+ from utils.general import (LOGGER, TQDM_BAR_FORMAT, check_amp, check_dataset, check_file, check_git_info,
34
+ check_git_status, check_img_size, check_requirements, check_suffix, check_yaml, colorstr,
35
+ get_latest_run, increment_path, init_seeds, intersect_dicts, labels_to_class_weights,
36
+ labels_to_image_weights, methods, one_cycle, print_args, print_mutation, strip_optimizer,
37
+ yaml_save, one_flat_cycle)
38
+ from utils.loggers import Loggers
39
+ from utils.loggers.comet.comet_utils import check_comet_resume
40
+ from utils.loss_tal_dual import ComputeLoss
41
+ #from utils.loss_tal_dual import ComputeLossLH as ComputeLoss
42
+ #from utils.loss_tal_dual import ComputeLossLHCF as ComputeLoss
43
+ from utils.metrics import fitness
44
+ from utils.plots import plot_evolve
45
+ from utils.torch_utils import (EarlyStopping, ModelEMA, de_parallel, select_device, smart_DDP, smart_optimizer,
46
+ smart_resume, torch_distributed_zero_first)
47
+
48
+ LOCAL_RANK = int(os.getenv('LOCAL_RANK', -1)) # https://pytorch.org/docs/stable/elastic/run.html
49
+ RANK = int(os.getenv('RANK', -1))
50
+ WORLD_SIZE = int(os.getenv('WORLD_SIZE', 1))
51
+ GIT_INFO = None#check_git_info()
52
+
53
+
54
+ def train(hyp, opt, device, callbacks): # hyp is path/to/hyp.yaml or hyp dictionary
55
+ save_dir, epochs, batch_size, weights, single_cls, evolve, data, cfg, resume, noval, nosave, workers, freeze = \
56
+ Path(opt.save_dir), opt.epochs, opt.batch_size, opt.weights, opt.single_cls, opt.evolve, opt.data, opt.cfg, \
57
+ opt.resume, opt.noval, opt.nosave, opt.workers, opt.freeze
58
+ callbacks.run('on_pretrain_routine_start')
59
+
60
+ # Directories
61
+ w = save_dir / 'weights' # weights dir
62
+ (w.parent if evolve else w).mkdir(parents=True, exist_ok=True) # make dir
63
+ last, best = w / 'last.pt', w / 'best.pt'
64
+
65
+ # Hyperparameters
66
+ if isinstance(hyp, str):
67
+ with open(hyp, errors='ignore') as f:
68
+ hyp = yaml.safe_load(f) # load hyps dict
69
+ LOGGER.info(colorstr('hyperparameters: ') + ', '.join(f'{k}={v}' for k, v in hyp.items()))
70
+ hyp['anchor_t'] = 5.0
71
+ opt.hyp = hyp.copy() # for saving hyps to checkpoints
72
+
73
+ # Save run settings
74
+ if not evolve:
75
+ yaml_save(save_dir / 'hyp.yaml', hyp)
76
+ yaml_save(save_dir / 'opt.yaml', vars(opt))
77
+
78
+ # Loggers
79
+ data_dict = None
80
+ if RANK in {-1, 0}:
81
+ loggers = Loggers(save_dir, weights, opt, hyp, LOGGER) # loggers instance
82
+
83
+ # Register actions
84
+ for k in methods(loggers):
85
+ callbacks.register_action(k, callback=getattr(loggers, k))
86
+
87
+ # Process custom dataset artifact link
88
+ data_dict = loggers.remote_dataset
89
+ if resume: # If resuming runs from remote artifact
90
+ weights, epochs, hyp, batch_size = opt.weights, opt.epochs, opt.hyp, opt.batch_size
91
+
92
+ # Config
93
+ plots = not evolve and not opt.noplots # create plots
94
+ cuda = device.type != 'cpu'
95
+ init_seeds(opt.seed + 1 + RANK, deterministic=True)
96
+ with torch_distributed_zero_first(LOCAL_RANK):
97
+ data_dict = data_dict or check_dataset(data) # check if None
98
+ train_path, val_path = data_dict['train'], data_dict['val']
99
+ nc = 1 if single_cls else int(data_dict['nc']) # number of classes
100
+ names = {0: 'item'} if single_cls and len(data_dict['names']) != 1 else data_dict['names'] # class names
101
+ #is_coco = isinstance(val_path, str) and val_path.endswith('coco/val2017.txt') # COCO dataset
102
+ is_coco = isinstance(val_path, str) and val_path.endswith('val2017.txt') # COCO dataset
103
+
104
+ # Model
105
+ check_suffix(weights, '.pt') # check weights
106
+ pretrained = weights.endswith('.pt')
107
+ if pretrained:
108
+ with torch_distributed_zero_first(LOCAL_RANK):
109
+ weights = attempt_download(weights) # download if not found locally
110
+ ckpt = torch.load(weights, map_location='cpu') # load checkpoint to CPU to avoid CUDA memory leak
111
+ model = Model(cfg or ckpt['model'].yaml, ch=3, nc=nc, anchors=hyp.get('anchors')).to(device) # create
112
+ exclude = ['anchor'] if (cfg or hyp.get('anchors')) and not resume else [] # exclude keys
113
+ csd = ckpt['model'].float().state_dict() # checkpoint state_dict as FP32
114
+ csd = intersect_dicts(csd, model.state_dict(), exclude=exclude) # intersect
115
+ model.load_state_dict(csd, strict=False) # load
116
+ LOGGER.info(f'Transferred {len(csd)}/{len(model.state_dict())} items from {weights}') # report
117
+ else:
118
+ model = Model(cfg, ch=3, nc=nc, anchors=hyp.get('anchors')).to(device) # create
119
+ amp = check_amp(model) # check AMP
120
+
121
+ # Freeze
122
+ freeze = [f'model.{x}.' for x in (freeze if len(freeze) > 1 else range(freeze[0]))] # layers to freeze
123
+ for k, v in model.named_parameters():
124
+ # v.requires_grad = True # train all layers TODO: uncomment this line as in master
125
+ # v.register_hook(lambda x: torch.nan_to_num(x)) # NaN to 0 (commented for erratic training results)
126
+ if any(x in k for x in freeze):
127
+ LOGGER.info(f'freezing {k}')
128
+ v.requires_grad = False
129
+
130
+ # Image size
131
+ gs = max(int(model.stride.max()), 32) # grid size (max stride)
132
+ imgsz = check_img_size(opt.imgsz, gs, floor=gs * 2) # verify imgsz is gs-multiple
133
+
134
+ # Batch size
135
+ if RANK == -1 and batch_size == -1: # single-GPU only, estimate best batch size
136
+ batch_size = check_train_batch_size(model, imgsz, amp)
137
+ loggers.on_params_update({"batch_size": batch_size})
138
+
139
+ # Optimizer
140
+ nbs = 64 # nominal batch size
141
+ accumulate = max(round(nbs / batch_size), 1) # accumulate loss before optimizing
142
+ hyp['weight_decay'] *= batch_size * accumulate / nbs # scale weight_decay
143
+ optimizer = smart_optimizer(model, opt.optimizer, hyp['lr0'], hyp['momentum'], hyp['weight_decay'])
144
+
145
+ # Scheduler
146
+ if opt.cos_lr:
147
+ lf = one_cycle(1, hyp['lrf'], epochs) # cosine 1->hyp['lrf']
148
+ elif opt.flat_cos_lr:
149
+ lf = one_flat_cycle(1, hyp['lrf'], epochs) # flat cosine 1->hyp['lrf']
150
+ elif opt.fixed_lr:
151
+ lf = lambda x: 1.0
152
+ else:
153
+ lf = lambda x: (1 - x / epochs) * (1.0 - hyp['lrf']) + hyp['lrf'] # linear
154
+
155
+ # def lf(x): # saw
156
+ # return (1 - (x % 30) / 30) * (1 - x / epochs) * (1.0 - hyp['lrf']) + hyp['lrf']
157
+ #
158
+ # def lf(x): # triangle start at min
159
+ # return 2 * abs(x / 30 - math.floor(x / 30 + 1 / 2)) * (1 - x / epochs) * (1.0 - hyp['lrf']) + hyp['lrf']
160
+ #
161
+ # def lf(x): # triangle start at max
162
+ # return 2 * abs(x / 32 + .5 - math.floor(x / 32 + 1)) * (1 - x / epochs) * (1.0 - hyp['lrf']) + hyp['lrf']
163
+
164
+ scheduler = lr_scheduler.LambdaLR(optimizer, lr_lambda=lf)
165
+ # from utils.plots import plot_lr_scheduler; plot_lr_scheduler(optimizer, scheduler, epochs)
166
+
167
+ # EMA
168
+ ema = ModelEMA(model) if RANK in {-1, 0} else None
169
+
170
+ # Resume
171
+ best_fitness, start_epoch = 0.0, 0
172
+ if pretrained:
173
+ if resume:
174
+ best_fitness, start_epoch, epochs = smart_resume(ckpt, optimizer, ema, weights, epochs, resume)
175
+ del ckpt, csd
176
+
177
+ # DP mode
178
+ if cuda and RANK == -1 and torch.cuda.device_count() > 1:
179
+ LOGGER.warning('WARNING ⚠️ DP not recommended, use torch.distributed.run for best DDP Multi-GPU results.')
180
+ model = torch.nn.DataParallel(model)
181
+
182
+ # SyncBatchNorm
183
+ if opt.sync_bn and cuda and RANK != -1:
184
+ model = torch.nn.SyncBatchNorm.convert_sync_batchnorm(model).to(device)
185
+ LOGGER.info('Using SyncBatchNorm()')
186
+
187
+ # Trainloader
188
+ train_loader, dataset = create_dataloader(train_path,
189
+ imgsz,
190
+ batch_size // WORLD_SIZE,
191
+ gs,
192
+ single_cls,
193
+ hyp=hyp,
194
+ augment=True,
195
+ cache=None if opt.cache == 'val' else opt.cache,
196
+ rect=opt.rect,
197
+ rank=LOCAL_RANK,
198
+ workers=workers,
199
+ image_weights=opt.image_weights,
200
+ close_mosaic=opt.close_mosaic != 0,
201
+ quad=opt.quad,
202
+ prefix=colorstr('train: '),
203
+ shuffle=True,
204
+ min_items=opt.min_items)
205
+ labels = np.concatenate(dataset.labels, 0)
206
+ mlc = int(labels[:, 0].max()) # max label class
207
+ assert mlc < nc, f'Label class {mlc} exceeds nc={nc} in {data}. Possible class labels are 0-{nc - 1}'
208
+
209
+ # Process 0
210
+ if RANK in {-1, 0}:
211
+ val_loader = create_dataloader(val_path,
212
+ imgsz,
213
+ batch_size // WORLD_SIZE * 2,
214
+ gs,
215
+ single_cls,
216
+ hyp=hyp,
217
+ cache=None if noval else opt.cache,
218
+ rect=True,
219
+ rank=-1,
220
+ workers=workers * 2,
221
+ pad=0.5,
222
+ prefix=colorstr('val: '))[0]
223
+
224
+ if not resume:
225
+ # if not opt.noautoanchor:
226
+ # check_anchors(dataset, model=model, thr=hyp['anchor_t'], imgsz=imgsz) # run AutoAnchor
227
+ model.half().float() # pre-reduce anchor precision
228
+
229
+ callbacks.run('on_pretrain_routine_end', labels, names)
230
+
231
+ # DDP mode
232
+ if cuda and RANK != -1:
233
+ model = smart_DDP(model)
234
+
235
+ # Model attributes
236
+ nl = de_parallel(model).model[-1].nl # number of detection layers (to scale hyps)
237
+ #hyp['box'] *= 3 / nl # scale to layers
238
+ #hyp['cls'] *= nc / 80 * 3 / nl # scale to classes and layers
239
+ #hyp['obj'] *= (imgsz / 640) ** 2 * 3 / nl # scale to image size and layers
240
+ hyp['label_smoothing'] = opt.label_smoothing
241
+ model.nc = nc # attach number of classes to model
242
+ model.hyp = hyp # attach hyperparameters to model
243
+ model.class_weights = labels_to_class_weights(dataset.labels, nc).to(device) * nc # attach class weights
244
+ model.names = names
245
+
246
+ # Start training
247
+ t0 = time.time()
248
+ nb = len(train_loader) # number of batches
249
+ nw = max(round(hyp['warmup_epochs'] * nb), 100) # number of warmup iterations, max(3 epochs, 100 iterations)
250
+ # nw = min(nw, (epochs - start_epoch) / 2 * nb) # limit warmup to < 1/2 of training
251
+ last_opt_step = -1
252
+ maps = np.zeros(nc) # mAP per class
253
+ results = (0, 0, 0, 0, 0, 0, 0) # P, R, mAP@.5, mAP@.5-.95, val_loss(box, obj, cls)
254
+ scheduler.last_epoch = start_epoch - 1 # do not move
255
+ scaler = torch.cuda.amp.GradScaler(enabled=amp)
256
+ stopper, stop = EarlyStopping(patience=opt.patience), False
257
+ compute_loss = ComputeLoss(model) # init loss class
258
+ callbacks.run('on_train_start')
259
+ LOGGER.info(f'Image sizes {imgsz} train, {imgsz} val\n'
260
+ f'Using {train_loader.num_workers * WORLD_SIZE} dataloader workers\n'
261
+ f"Logging results to {colorstr('bold', save_dir)}\n"
262
+ f'Starting training for {epochs} epochs...')
263
+ for epoch in range(start_epoch, epochs): # epoch ------------------------------------------------------------------
264
+ callbacks.run('on_train_epoch_start')
265
+ model.train()
266
+
267
+ # Update image weights (optional, single-GPU only)
268
+ if opt.image_weights:
269
+ cw = model.class_weights.cpu().numpy() * (1 - maps) ** 2 / nc # class weights
270
+ iw = labels_to_image_weights(dataset.labels, nc=nc, class_weights=cw) # image weights
271
+ dataset.indices = random.choices(range(dataset.n), weights=iw, k=dataset.n) # rand weighted idx
272
+ if epoch == (epochs - opt.close_mosaic):
273
+ LOGGER.info("Closing dataloader mosaic")
274
+ dataset.mosaic = False
275
+
276
+ # Update mosaic border (optional)
277
+ # b = int(random.uniform(0.25 * imgsz, 0.75 * imgsz + gs) // gs * gs)
278
+ # dataset.mosaic_border = [b - imgsz, -b] # height, width borders
279
+
280
+ mloss = torch.zeros(3, device=device) # mean losses
281
+ if RANK != -1:
282
+ train_loader.sampler.set_epoch(epoch)
283
+ pbar = enumerate(train_loader)
284
+ LOGGER.info(('\n' + '%11s' * 7) % ('Epoch', 'GPU_mem', 'box_loss', 'cls_loss', 'dfl_loss', 'Instances', 'Size'))
285
+ if RANK in {-1, 0}:
286
+ pbar = tqdm(pbar, total=nb, bar_format=TQDM_BAR_FORMAT) # progress bar
287
+ optimizer.zero_grad()
288
+ for i, (imgs, targets, paths, _) in pbar: # batch -------------------------------------------------------------
289
+ callbacks.run('on_train_batch_start')
290
+ ni = i + nb * epoch # number integrated batches (since train start)
291
+ imgs = imgs.to(device, non_blocking=True).float() / 255 # uint8 to float32, 0-255 to 0.0-1.0
292
+
293
+ # Warmup
294
+ if ni <= nw:
295
+ xi = [0, nw] # x interp
296
+ # compute_loss.gr = np.interp(ni, xi, [0.0, 1.0]) # iou loss ratio (obj_loss = 1.0 or iou)
297
+ accumulate = max(1, np.interp(ni, xi, [1, nbs / batch_size]).round())
298
+ for j, x in enumerate(optimizer.param_groups):
299
+ # bias lr falls from 0.1 to lr0, all other lrs rise from 0.0 to lr0
300
+ x['lr'] = np.interp(ni, xi, [hyp['warmup_bias_lr'] if j == 0 else 0.0, x['initial_lr'] * lf(epoch)])
301
+ if 'momentum' in x:
302
+ x['momentum'] = np.interp(ni, xi, [hyp['warmup_momentum'], hyp['momentum']])
303
+
304
+ # Multi-scale
305
+ if opt.multi_scale:
306
+ sz = random.randrange(imgsz * 0.5, imgsz * 1.5 + gs) // gs * gs # size
307
+ sf = sz / max(imgs.shape[2:]) # scale factor
308
+ if sf != 1:
309
+ ns = [math.ceil(x * sf / gs) * gs for x in imgs.shape[2:]] # new shape (stretched to gs-multiple)
310
+ imgs = nn.functional.interpolate(imgs, size=ns, mode='bilinear', align_corners=False)
311
+
312
+ # Forward
313
+ with torch.cuda.amp.autocast(amp):
314
+ pred = model(imgs) # forward
315
+ loss, loss_items = compute_loss(pred, targets.to(device)) # loss scaled by batch_size
316
+ if RANK != -1:
317
+ loss *= WORLD_SIZE # gradient averaged between devices in DDP mode
318
+ if opt.quad:
319
+ loss *= 4.
320
+
321
+ # Backward
322
+ scaler.scale(loss).backward()
323
+
324
+ # Optimize - https://pytorch.org/docs/master/notes/amp_examples.html
325
+ if ni - last_opt_step >= accumulate:
326
+ scaler.unscale_(optimizer) # unscale gradients
327
+ torch.nn.utils.clip_grad_norm_(model.parameters(), max_norm=10.0) # clip gradients
328
+ scaler.step(optimizer) # optimizer.step
329
+ scaler.update()
330
+ optimizer.zero_grad()
331
+ if ema:
332
+ ema.update(model)
333
+ last_opt_step = ni
334
+
335
+ # Log
336
+ if RANK in {-1, 0}:
337
+ mloss = (mloss * i + loss_items) / (i + 1) # update mean losses
338
+ mem = f'{torch.cuda.memory_reserved() / 1E9 if torch.cuda.is_available() else 0:.3g}G' # (GB)
339
+ pbar.set_description(('%11s' * 2 + '%11.4g' * 5) %
340
+ (f'{epoch}/{epochs - 1}', mem, *mloss, targets.shape[0], imgs.shape[-1]))
341
+ callbacks.run('on_train_batch_end', model, ni, imgs, targets, paths, list(mloss))
342
+ if callbacks.stop_training:
343
+ return
344
+ # end batch ------------------------------------------------------------------------------------------------
345
+
346
+ # Scheduler
347
+ lr = [x['lr'] for x in optimizer.param_groups] # for loggers
348
+ scheduler.step()
349
+
350
+ if RANK in {-1, 0}:
351
+ # mAP
352
+ callbacks.run('on_train_epoch_end', epoch=epoch)
353
+ ema.update_attr(model, include=['yaml', 'nc', 'hyp', 'names', 'stride', 'class_weights'])
354
+ final_epoch = (epoch + 1 == epochs) or stopper.possible_stop
355
+ if not noval or final_epoch: # Calculate mAP
356
+ results, maps, _ = validate.run(data_dict,
357
+ batch_size=batch_size // WORLD_SIZE * 2,
358
+ imgsz=imgsz,
359
+ half=amp,
360
+ model=ema.ema,
361
+ single_cls=single_cls,
362
+ dataloader=val_loader,
363
+ save_dir=save_dir,
364
+ plots=False,
365
+ callbacks=callbacks,
366
+ compute_loss=compute_loss)
367
+
368
+ # Update best mAP
369
+ fi = fitness(np.array(results).reshape(1, -1)) # weighted combination of [P, R, mAP@.5, mAP@.5-.95]
370
+ stop = stopper(epoch=epoch, fitness=fi) # early stop check
371
+ if fi > best_fitness:
372
+ best_fitness = fi
373
+ log_vals = list(mloss) + list(results) + lr
374
+ callbacks.run('on_fit_epoch_end', log_vals, epoch, best_fitness, fi)
375
+
376
+ # Save model
377
+ if (not nosave) or (final_epoch and not evolve): # if save
378
+ ckpt = {
379
+ 'epoch': epoch,
380
+ 'best_fitness': best_fitness,
381
+ 'model': deepcopy(de_parallel(model)).half(),
382
+ 'ema': deepcopy(ema.ema).half(),
383
+ 'updates': ema.updates,
384
+ 'optimizer': optimizer.state_dict(),
385
+ 'opt': vars(opt),
386
+ 'git': GIT_INFO, # {remote, branch, commit} if a git repo
387
+ 'date': datetime.now().isoformat()}
388
+
389
+ # Save last, best and delete
390
+ torch.save(ckpt, last)
391
+ if best_fitness == fi:
392
+ torch.save(ckpt, best)
393
+ if opt.save_period > 0 and epoch % opt.save_period == 0:
394
+ torch.save(ckpt, w / f'epoch{epoch}.pt')
395
+ del ckpt
396
+ callbacks.run('on_model_save', last, epoch, final_epoch, best_fitness, fi)
397
+
398
+ # EarlyStopping
399
+ if RANK != -1: # if DDP training
400
+ broadcast_list = [stop if RANK == 0 else None]
401
+ dist.broadcast_object_list(broadcast_list, 0) # broadcast 'stop' to all ranks
402
+ if RANK != 0:
403
+ stop = broadcast_list[0]
404
+ if stop:
405
+ break # must break all DDP ranks
406
+
407
+ # end epoch ----------------------------------------------------------------------------------------------------
408
+ # end training -----------------------------------------------------------------------------------------------------
409
+ if RANK in {-1, 0}:
410
+ LOGGER.info(f'\n{epoch - start_epoch + 1} epochs completed in {(time.time() - t0) / 3600:.3f} hours.')
411
+ for f in last, best:
412
+ if f.exists():
413
+ strip_optimizer(f) # strip optimizers
414
+ if f is best:
415
+ LOGGER.info(f'\nValidating {f}...')
416
+ results, _, _ = validate.run(
417
+ data_dict,
418
+ batch_size=batch_size // WORLD_SIZE * 2,
419
+ imgsz=imgsz,
420
+ model=attempt_load(f, device).half(),
421
+ single_cls=single_cls,
422
+ dataloader=val_loader,
423
+ save_dir=save_dir,
424
+ save_json=is_coco,
425
+ verbose=True,
426
+ plots=plots,
427
+ callbacks=callbacks,
428
+ compute_loss=compute_loss) # val best model with plots
429
+ if is_coco:
430
+ callbacks.run('on_fit_epoch_end', list(mloss) + list(results) + lr, epoch, best_fitness, fi)
431
+
432
+ callbacks.run('on_train_end', last, best, epoch, results)
433
+
434
+ torch.cuda.empty_cache()
435
+ return results
436
+
437
+
438
+ def parse_opt(known=False):
439
+ parser = argparse.ArgumentParser()
440
+ # parser.add_argument('--weights', type=str, default=ROOT / 'yolo.pt', help='initial weights path')
441
+ # parser.add_argument('--cfg', type=str, default='', help='model.yaml path')
442
+ parser.add_argument('--weights', type=str, default='', help='initial weights path')
443
+ parser.add_argument('--cfg', type=str, default='yolo.yaml', help='model.yaml path')
444
+ parser.add_argument('--data', type=str, default=ROOT / 'data/coco.yaml', help='dataset.yaml path')
445
+ parser.add_argument('--hyp', type=str, default=ROOT / 'data/hyps/hyp.scratch-high.yaml', help='hyperparameters path')
446
+ parser.add_argument('--epochs', type=int, default=100, help='total training epochs')
447
+ parser.add_argument('--batch-size', type=int, default=16, help='total batch size for all GPUs, -1 for autobatch')
448
+ parser.add_argument('--imgsz', '--img', '--img-size', type=int, default=640, help='train, val image size (pixels)')
449
+ parser.add_argument('--rect', action='store_true', help='rectangular training')
450
+ parser.add_argument('--resume', nargs='?', const=True, default=False, help='resume most recent training')
451
+ parser.add_argument('--nosave', action='store_true', help='only save final checkpoint')
452
+ parser.add_argument('--noval', action='store_true', help='only validate final epoch')
453
+ parser.add_argument('--noautoanchor', action='store_true', help='disable AutoAnchor')
454
+ parser.add_argument('--noplots', action='store_true', help='save no plot files')
455
+ parser.add_argument('--evolve', type=int, nargs='?', const=300, help='evolve hyperparameters for x generations')
456
+ parser.add_argument('--bucket', type=str, default='', help='gsutil bucket')
457
+ parser.add_argument('--cache', type=str, nargs='?', const='ram', help='image --cache ram/disk')
458
+ parser.add_argument('--image-weights', action='store_true', help='use weighted image selection for training')
459
+ parser.add_argument('--device', default='', help='cuda device, i.e. 0 or 0,1,2,3 or cpu')
460
+ parser.add_argument('--multi-scale', action='store_true', help='vary img-size +/- 50%%')
461
+ parser.add_argument('--single-cls', action='store_true', help='train multi-class data as single-class')
462
+ parser.add_argument('--optimizer', type=str, choices=['SGD', 'Adam', 'AdamW', 'LION'], default='SGD', help='optimizer')
463
+ parser.add_argument('--sync-bn', action='store_true', help='use SyncBatchNorm, only available in DDP mode')
464
+ parser.add_argument('--workers', type=int, default=8, help='max dataloader workers (per RANK in DDP mode)')
465
+ parser.add_argument('--project', default=ROOT / 'runs/train', help='save to project/name')
466
+ parser.add_argument('--name', default='exp', help='save to project/name')
467
+ parser.add_argument('--exist-ok', action='store_true', help='existing project/name ok, do not increment')
468
+ parser.add_argument('--quad', action='store_true', help='quad dataloader')
469
+ parser.add_argument('--cos-lr', action='store_true', help='cosine LR scheduler')
470
+ parser.add_argument('--flat-cos-lr', action='store_true', help='flat cosine LR scheduler')
471
+ parser.add_argument('--fixed-lr', action='store_true', help='fixed LR scheduler')
472
+ parser.add_argument('--label-smoothing', type=float, default=0.0, help='Label smoothing epsilon')
473
+ parser.add_argument('--patience', type=int, default=100, help='EarlyStopping patience (epochs without improvement)')
474
+ parser.add_argument('--freeze', nargs='+', type=int, default=[0], help='Freeze layers: backbone=10, first3=0 1 2')
475
+ parser.add_argument('--save-period', type=int, default=-1, help='Save checkpoint every x epochs (disabled if < 1)')
476
+ parser.add_argument('--seed', type=int, default=0, help='Global training seed')
477
+ parser.add_argument('--local_rank', type=int, default=-1, help='Automatic DDP Multi-GPU argument, do not modify')
478
+ parser.add_argument('--min-items', type=int, default=0, help='Experimental')
479
+ parser.add_argument('--close-mosaic', type=int, default=0, help='Experimental')
480
+
481
+ # Logger arguments
482
+ parser.add_argument('--entity', default=None, help='Entity')
483
+ parser.add_argument('--upload_dataset', nargs='?', const=True, default=False, help='Upload data, "val" option')
484
+ parser.add_argument('--bbox_interval', type=int, default=-1, help='Set bounding-box image logging interval')
485
+ parser.add_argument('--artifact_alias', type=str, default='latest', help='Version of dataset artifact to use')
486
+
487
+ return parser.parse_known_args()[0] if known else parser.parse_args()
488
+
489
+
490
+ def main(opt, callbacks=Callbacks()):
491
+ # Checks
492
+ if RANK in {-1, 0}:
493
+ print_args(vars(opt))
494
+ #check_git_status()
495
+ #check_requirements()
496
+
497
+ # Resume (from specified or most recent last.pt)
498
+ if opt.resume and not check_comet_resume(opt) and not opt.evolve:
499
+ last = Path(check_file(opt.resume) if isinstance(opt.resume, str) else get_latest_run())
500
+ opt_yaml = last.parent.parent / 'opt.yaml' # train options yaml
501
+ opt_data = opt.data # original dataset
502
+ if opt_yaml.is_file():
503
+ with open(opt_yaml, errors='ignore') as f:
504
+ d = yaml.safe_load(f)
505
+ else:
506
+ d = torch.load(last, map_location='cpu')['opt']
507
+ opt = argparse.Namespace(**d) # replace
508
+ opt.cfg, opt.weights, opt.resume = '', str(last), True # reinstate
509
+ if is_url(opt_data):
510
+ opt.data = check_file(opt_data) # avoid HUB resume auth timeout
511
+ else:
512
+ opt.data, opt.cfg, opt.hyp, opt.weights, opt.project = \
513
+ check_file(opt.data), check_yaml(opt.cfg), check_yaml(opt.hyp), str(opt.weights), str(opt.project) # checks
514
+ assert len(opt.cfg) or len(opt.weights), 'either --cfg or --weights must be specified'
515
+ if opt.evolve:
516
+ if opt.project == str(ROOT / 'runs/train'): # if default project name, rename to runs/evolve
517
+ opt.project = str(ROOT / 'runs/evolve')
518
+ opt.exist_ok, opt.resume = opt.resume, False # pass resume to exist_ok and disable resume
519
+ if opt.name == 'cfg':
520
+ opt.name = Path(opt.cfg).stem # use model.yaml as name
521
+ opt.save_dir = str(increment_path(Path(opt.project) / opt.name, exist_ok=opt.exist_ok))
522
+
523
+ # DDP mode
524
+ device = select_device(opt.device, batch_size=opt.batch_size)
525
+ if LOCAL_RANK != -1:
526
+ msg = 'is not compatible with YOLO Multi-GPU DDP training'
527
+ assert not opt.image_weights, f'--image-weights {msg}'
528
+ assert not opt.evolve, f'--evolve {msg}'
529
+ assert opt.batch_size != -1, f'AutoBatch with --batch-size -1 {msg}, please pass a valid --batch-size'
530
+ assert opt.batch_size % WORLD_SIZE == 0, f'--batch-size {opt.batch_size} must be multiple of WORLD_SIZE'
531
+ assert torch.cuda.device_count() > LOCAL_RANK, 'insufficient CUDA devices for DDP command'
532
+ torch.cuda.set_device(LOCAL_RANK)
533
+ device = torch.device('cuda', LOCAL_RANK)
534
+ dist.init_process_group(backend="nccl" if dist.is_nccl_available() else "gloo")
535
+
536
+ # Train
537
+ if not opt.evolve:
538
+ train(opt.hyp, opt, device, callbacks)
539
+
540
+ # Evolve hyperparameters (optional)
541
+ else:
542
+ # Hyperparameter evolution metadata (mutation scale 0-1, lower_limit, upper_limit)
543
+ meta = {
544
+ 'lr0': (1, 1e-5, 1e-1), # initial learning rate (SGD=1E-2, Adam=1E-3)
545
+ 'lrf': (1, 0.01, 1.0), # final OneCycleLR learning rate (lr0 * lrf)
546
+ 'momentum': (0.3, 0.6, 0.98), # SGD momentum/Adam beta1
547
+ 'weight_decay': (1, 0.0, 0.001), # optimizer weight decay
548
+ 'warmup_epochs': (1, 0.0, 5.0), # warmup epochs (fractions ok)
549
+ 'warmup_momentum': (1, 0.0, 0.95), # warmup initial momentum
550
+ 'warmup_bias_lr': (1, 0.0, 0.2), # warmup initial bias lr
551
+ 'box': (1, 0.02, 0.2), # box loss gain
552
+ 'cls': (1, 0.2, 4.0), # cls loss gain
553
+ 'cls_pw': (1, 0.5, 2.0), # cls BCELoss positive_weight
554
+ 'obj': (1, 0.2, 4.0), # obj loss gain (scale with pixels)
555
+ 'obj_pw': (1, 0.5, 2.0), # obj BCELoss positive_weight
556
+ 'iou_t': (0, 0.1, 0.7), # IoU training threshold
557
+ 'anchor_t': (1, 2.0, 8.0), # anchor-multiple threshold
558
+ 'anchors': (2, 2.0, 10.0), # anchors per output grid (0 to ignore)
559
+ 'fl_gamma': (0, 0.0, 2.0), # focal loss gamma (efficientDet default gamma=1.5)
560
+ 'hsv_h': (1, 0.0, 0.1), # image HSV-Hue augmentation (fraction)
561
+ 'hsv_s': (1, 0.0, 0.9), # image HSV-Saturation augmentation (fraction)
562
+ 'hsv_v': (1, 0.0, 0.9), # image HSV-Value augmentation (fraction)
563
+ 'degrees': (1, 0.0, 45.0), # image rotation (+/- deg)
564
+ 'translate': (1, 0.0, 0.9), # image translation (+/- fraction)
565
+ 'scale': (1, 0.0, 0.9), # image scale (+/- gain)
566
+ 'shear': (1, 0.0, 10.0), # image shear (+/- deg)
567
+ 'perspective': (0, 0.0, 0.001), # image perspective (+/- fraction), range 0-0.001
568
+ 'flipud': (1, 0.0, 1.0), # image flip up-down (probability)
569
+ 'fliplr': (0, 0.0, 1.0), # image flip left-right (probability)
570
+ 'mosaic': (1, 0.0, 1.0), # image mixup (probability)
571
+ 'mixup': (1, 0.0, 1.0), # image mixup (probability)
572
+ 'copy_paste': (1, 0.0, 1.0)} # segment copy-paste (probability)
573
+
574
+ with open(opt.hyp, errors='ignore') as f:
575
+ hyp = yaml.safe_load(f) # load hyps dict
576
+ if 'anchors' not in hyp: # anchors commented in hyp.yaml
577
+ hyp['anchors'] = 3
578
+ if opt.noautoanchor:
579
+ del hyp['anchors'], meta['anchors']
580
+ opt.noval, opt.nosave, save_dir = True, True, Path(opt.save_dir) # only val/save final epoch
581
+ # ei = [isinstance(x, (int, float)) for x in hyp.values()] # evolvable indices
582
+ evolve_yaml, evolve_csv = save_dir / 'hyp_evolve.yaml', save_dir / 'evolve.csv'
583
+ if opt.bucket:
584
+ os.system(f'gsutil cp gs://{opt.bucket}/evolve.csv {evolve_csv}') # download evolve.csv if exists
585
+
586
+ for _ in range(opt.evolve): # generations to evolve
587
+ if evolve_csv.exists(): # if evolve.csv exists: select best hyps and mutate
588
+ # Select parent(s)
589
+ parent = 'single' # parent selection method: 'single' or 'weighted'
590
+ x = np.loadtxt(evolve_csv, ndmin=2, delimiter=',', skiprows=1)
591
+ n = min(5, len(x)) # number of previous results to consider
592
+ x = x[np.argsort(-fitness(x))][:n] # top n mutations
593
+ w = fitness(x) - fitness(x).min() + 1E-6 # weights (sum > 0)
594
+ if parent == 'single' or len(x) == 1:
595
+ # x = x[random.randint(0, n - 1)] # random selection
596
+ x = x[random.choices(range(n), weights=w)[0]] # weighted selection
597
+ elif parent == 'weighted':
598
+ x = (x * w.reshape(n, 1)).sum(0) / w.sum() # weighted combination
599
+
600
+ # Mutate
601
+ mp, s = 0.8, 0.2 # mutation probability, sigma
602
+ npr = np.random
603
+ npr.seed(int(time.time()))
604
+ g = np.array([meta[k][0] for k in hyp.keys()]) # gains 0-1
605
+ ng = len(meta)
606
+ v = np.ones(ng)
607
+ while all(v == 1): # mutate until a change occurs (prevent duplicates)
608
+ v = (g * (npr.random(ng) < mp) * npr.randn(ng) * npr.random() * s + 1).clip(0.3, 3.0)
609
+ for i, k in enumerate(hyp.keys()): # plt.hist(v.ravel(), 300)
610
+ hyp[k] = float(x[i + 7] * v[i]) # mutate
611
+
612
+ # Constrain to limits
613
+ for k, v in meta.items():
614
+ hyp[k] = max(hyp[k], v[1]) # lower limit
615
+ hyp[k] = min(hyp[k], v[2]) # upper limit
616
+ hyp[k] = round(hyp[k], 5) # significant digits
617
+
618
+ # Train mutation
619
+ results = train(hyp.copy(), opt, device, callbacks)
620
+ callbacks = Callbacks()
621
+ # Write mutation results
622
+ keys = ('metrics/precision', 'metrics/recall', 'metrics/mAP_0.5', 'metrics/mAP_0.5:0.95', 'val/box_loss',
623
+ 'val/obj_loss', 'val/cls_loss')
624
+ print_mutation(keys, results, hyp.copy(), save_dir, opt.bucket)
625
+
626
+ # Plot results
627
+ plot_evolve(evolve_csv)
628
+ LOGGER.info(f'Hyperparameter evolution finished {opt.evolve} generations\n'
629
+ f"Results saved to {colorstr('bold', save_dir)}\n"
630
+ f'Usage example: $ python train.py --hyp {evolve_yaml}')
631
+
632
+
633
+ def run(**kwargs):
634
+ # Usage: import train; train.run(data='coco128.yaml', imgsz=320, weights='yolo.pt')
635
+ opt = parse_opt(True)
636
+ for k, v in kwargs.items():
637
+ setattr(opt, k, v)
638
+ main(opt)
639
+ return opt
640
+
641
+
642
+ if __name__ == "__main__":
643
+ opt = parse_opt()
644
+ main(opt)
train_triple.py ADDED
@@ -0,0 +1,636 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import argparse
2
+ import math
3
+ import os
4
+ import random
5
+ import sys
6
+ import time
7
+ from copy import deepcopy
8
+ from datetime import datetime
9
+ from pathlib import Path
10
+
11
+ import numpy as np
12
+ import torch
13
+ import torch.distributed as dist
14
+ import torch.nn as nn
15
+ import yaml
16
+ from torch.optim import lr_scheduler
17
+ from tqdm import tqdm
18
+
19
+ FILE = Path(__file__).resolve()
20
+ ROOT = FILE.parents[0] # YOLO root directory
21
+ if str(ROOT) not in sys.path:
22
+ sys.path.append(str(ROOT)) # add ROOT to PATH
23
+ ROOT = Path(os.path.relpath(ROOT, Path.cwd())) # relative
24
+
25
+ import val_triple as validate # for end-of-epoch mAP
26
+ from models.experimental import attempt_load
27
+ from models.yolo import Model
28
+ from utils.autoanchor import check_anchors
29
+ from utils.autobatch import check_train_batch_size
30
+ from utils.callbacks import Callbacks
31
+ from utils.dataloaders import create_dataloader
32
+ from utils.downloads import attempt_download, is_url
33
+ from utils.general import (LOGGER, TQDM_BAR_FORMAT, check_amp, check_dataset, check_file, check_git_info,
34
+ check_git_status, check_img_size, check_requirements, check_suffix, check_yaml, colorstr,
35
+ get_latest_run, increment_path, init_seeds, intersect_dicts, labels_to_class_weights,
36
+ labels_to_image_weights, methods, one_cycle, print_args, print_mutation, strip_optimizer,
37
+ yaml_save)
38
+ from utils.loggers import Loggers
39
+ from utils.loggers.comet.comet_utils import check_comet_resume
40
+ from utils.loss_tal_triple import ComputeLoss
41
+ from utils.metrics import fitness
42
+ from utils.plots import plot_evolve
43
+ from utils.torch_utils import (EarlyStopping, ModelEMA, de_parallel, select_device, smart_DDP, smart_optimizer,
44
+ smart_resume, torch_distributed_zero_first)
45
+
46
+ LOCAL_RANK = int(os.getenv('LOCAL_RANK', -1)) # https://pytorch.org/docs/stable/elastic/run.html
47
+ RANK = int(os.getenv('RANK', -1))
48
+ WORLD_SIZE = int(os.getenv('WORLD_SIZE', 1))
49
+ GIT_INFO = None#check_git_info()
50
+
51
+
52
+ def train(hyp, opt, device, callbacks): # hyp is path/to/hyp.yaml or hyp dictionary
53
+ save_dir, epochs, batch_size, weights, single_cls, evolve, data, cfg, resume, noval, nosave, workers, freeze = \
54
+ Path(opt.save_dir), opt.epochs, opt.batch_size, opt.weights, opt.single_cls, opt.evolve, opt.data, opt.cfg, \
55
+ opt.resume, opt.noval, opt.nosave, opt.workers, opt.freeze
56
+ callbacks.run('on_pretrain_routine_start')
57
+
58
+ # Directories
59
+ w = save_dir / 'weights' # weights dir
60
+ (w.parent if evolve else w).mkdir(parents=True, exist_ok=True) # make dir
61
+ last, best = w / 'last.pt', w / 'best.pt'
62
+
63
+ # Hyperparameters
64
+ if isinstance(hyp, str):
65
+ with open(hyp, errors='ignore') as f:
66
+ hyp = yaml.safe_load(f) # load hyps dict
67
+ LOGGER.info(colorstr('hyperparameters: ') + ', '.join(f'{k}={v}' for k, v in hyp.items()))
68
+ hyp['anchor_t'] = 5.0
69
+ opt.hyp = hyp.copy() # for saving hyps to checkpoints
70
+
71
+ # Save run settings
72
+ if not evolve:
73
+ yaml_save(save_dir / 'hyp.yaml', hyp)
74
+ yaml_save(save_dir / 'opt.yaml', vars(opt))
75
+
76
+ # Loggers
77
+ data_dict = None
78
+ if RANK in {-1, 0}:
79
+ loggers = Loggers(save_dir, weights, opt, hyp, LOGGER) # loggers instance
80
+
81
+ # Register actions
82
+ for k in methods(loggers):
83
+ callbacks.register_action(k, callback=getattr(loggers, k))
84
+
85
+ # Process custom dataset artifact link
86
+ data_dict = loggers.remote_dataset
87
+ if resume: # If resuming runs from remote artifact
88
+ weights, epochs, hyp, batch_size = opt.weights, opt.epochs, opt.hyp, opt.batch_size
89
+
90
+ # Config
91
+ plots = not evolve and not opt.noplots # create plots
92
+ cuda = device.type != 'cpu'
93
+ init_seeds(opt.seed + 1 + RANK, deterministic=True)
94
+ with torch_distributed_zero_first(LOCAL_RANK):
95
+ data_dict = data_dict or check_dataset(data) # check if None
96
+ train_path, val_path = data_dict['train'], data_dict['val']
97
+ nc = 1 if single_cls else int(data_dict['nc']) # number of classes
98
+ names = {0: 'item'} if single_cls and len(data_dict['names']) != 1 else data_dict['names'] # class names
99
+ #is_coco = isinstance(val_path, str) and val_path.endswith('coco/val2017.txt') # COCO dataset
100
+ is_coco = isinstance(val_path, str) and val_path.endswith('val2017.txt') # COCO dataset
101
+
102
+ # Model
103
+ check_suffix(weights, '.pt') # check weights
104
+ pretrained = weights.endswith('.pt')
105
+ if pretrained:
106
+ with torch_distributed_zero_first(LOCAL_RANK):
107
+ weights = attempt_download(weights) # download if not found locally
108
+ ckpt = torch.load(weights, map_location='cpu') # load checkpoint to CPU to avoid CUDA memory leak
109
+ model = Model(cfg or ckpt['model'].yaml, ch=3, nc=nc, anchors=hyp.get('anchors')).to(device) # create
110
+ exclude = ['anchor'] if (cfg or hyp.get('anchors')) and not resume else [] # exclude keys
111
+ csd = ckpt['model'].float().state_dict() # checkpoint state_dict as FP32
112
+ csd = intersect_dicts(csd, model.state_dict(), exclude=exclude) # intersect
113
+ model.load_state_dict(csd, strict=False) # load
114
+ LOGGER.info(f'Transferred {len(csd)}/{len(model.state_dict())} items from {weights}') # report
115
+ else:
116
+ model = Model(cfg, ch=3, nc=nc, anchors=hyp.get('anchors')).to(device) # create
117
+ amp = check_amp(model) # check AMP
118
+
119
+ # Freeze
120
+ freeze = [f'model.{x}.' for x in (freeze if len(freeze) > 1 else range(freeze[0]))] # layers to freeze
121
+ for k, v in model.named_parameters():
122
+ # v.requires_grad = True # train all layers TODO: uncomment this line as in master
123
+ # v.register_hook(lambda x: torch.nan_to_num(x)) # NaN to 0 (commented for erratic training results)
124
+ if any(x in k for x in freeze):
125
+ LOGGER.info(f'freezing {k}')
126
+ v.requires_grad = False
127
+
128
+ # Image size
129
+ gs = max(int(model.stride.max()), 32) # grid size (max stride)
130
+ imgsz = check_img_size(opt.imgsz, gs, floor=gs * 2) # verify imgsz is gs-multiple
131
+
132
+ # Batch size
133
+ if RANK == -1 and batch_size == -1: # single-GPU only, estimate best batch size
134
+ batch_size = check_train_batch_size(model, imgsz, amp)
135
+ loggers.on_params_update({"batch_size": batch_size})
136
+
137
+ # Optimizer
138
+ nbs = 64 # nominal batch size
139
+ accumulate = max(round(nbs / batch_size), 1) # accumulate loss before optimizing
140
+ hyp['weight_decay'] *= batch_size * accumulate / nbs # scale weight_decay
141
+ optimizer = smart_optimizer(model, opt.optimizer, hyp['lr0'], hyp['momentum'], hyp['weight_decay'])
142
+
143
+ # Scheduler
144
+ if opt.cos_lr:
145
+ lf = one_cycle(1, hyp['lrf'], epochs) # cosine 1->hyp['lrf']
146
+ else:
147
+ lf = lambda x: (1 - x / epochs) * (1.0 - hyp['lrf']) + hyp['lrf'] # linear
148
+
149
+ # def lf(x): # saw
150
+ # return (1 - (x % 30) / 30) * (1 - x / epochs) * (1.0 - hyp['lrf']) + hyp['lrf']
151
+ #
152
+ # def lf(x): # triangle start at min
153
+ # return 2 * abs(x / 30 - math.floor(x / 30 + 1 / 2)) * (1 - x / epochs) * (1.0 - hyp['lrf']) + hyp['lrf']
154
+ #
155
+ # def lf(x): # triangle start at max
156
+ # return 2 * abs(x / 32 + .5 - math.floor(x / 32 + 1)) * (1 - x / epochs) * (1.0 - hyp['lrf']) + hyp['lrf']
157
+
158
+ scheduler = lr_scheduler.LambdaLR(optimizer, lr_lambda=lf)
159
+ # from utils.plots import plot_lr_scheduler; plot_lr_scheduler(optimizer, scheduler, epochs)
160
+
161
+ # EMA
162
+ ema = ModelEMA(model) if RANK in {-1, 0} else None
163
+
164
+ # Resume
165
+ best_fitness, start_epoch = 0.0, 0
166
+ if pretrained:
167
+ if resume:
168
+ best_fitness, start_epoch, epochs = smart_resume(ckpt, optimizer, ema, weights, epochs, resume)
169
+ del ckpt, csd
170
+
171
+ # DP mode
172
+ if cuda and RANK == -1 and torch.cuda.device_count() > 1:
173
+ LOGGER.warning('WARNING ⚠️ DP not recommended, use torch.distributed.run for best DDP Multi-GPU results.')
174
+ model = torch.nn.DataParallel(model)
175
+
176
+ # SyncBatchNorm
177
+ if opt.sync_bn and cuda and RANK != -1:
178
+ model = torch.nn.SyncBatchNorm.convert_sync_batchnorm(model).to(device)
179
+ LOGGER.info('Using SyncBatchNorm()')
180
+
181
+ # Trainloader
182
+ train_loader, dataset = create_dataloader(train_path,
183
+ imgsz,
184
+ batch_size // WORLD_SIZE,
185
+ gs,
186
+ single_cls,
187
+ hyp=hyp,
188
+ augment=True,
189
+ cache=None if opt.cache == 'val' else opt.cache,
190
+ rect=opt.rect,
191
+ rank=LOCAL_RANK,
192
+ workers=workers,
193
+ image_weights=opt.image_weights,
194
+ close_mosaic=opt.close_mosaic != 0,
195
+ quad=opt.quad,
196
+ prefix=colorstr('train: '),
197
+ shuffle=True,
198
+ min_items=opt.min_items)
199
+ labels = np.concatenate(dataset.labels, 0)
200
+ mlc = int(labels[:, 0].max()) # max label class
201
+ assert mlc < nc, f'Label class {mlc} exceeds nc={nc} in {data}. Possible class labels are 0-{nc - 1}'
202
+
203
+ # Process 0
204
+ if RANK in {-1, 0}:
205
+ val_loader = create_dataloader(val_path,
206
+ imgsz,
207
+ batch_size // WORLD_SIZE * 2,
208
+ gs,
209
+ single_cls,
210
+ hyp=hyp,
211
+ cache=None if noval else opt.cache,
212
+ rect=True,
213
+ rank=-1,
214
+ workers=workers * 2,
215
+ pad=0.5,
216
+ prefix=colorstr('val: '))[0]
217
+
218
+ if not resume:
219
+ # if not opt.noautoanchor:
220
+ # check_anchors(dataset, model=model, thr=hyp['anchor_t'], imgsz=imgsz) # run AutoAnchor
221
+ model.half().float() # pre-reduce anchor precision
222
+
223
+ callbacks.run('on_pretrain_routine_end', labels, names)
224
+
225
+ # DDP mode
226
+ if cuda and RANK != -1:
227
+ model = smart_DDP(model)
228
+
229
+ # Model attributes
230
+ nl = de_parallel(model).model[-1].nl # number of detection layers (to scale hyps)
231
+ #hyp['box'] *= 3 / nl # scale to layers
232
+ #hyp['cls'] *= nc / 80 * 3 / nl # scale to classes and layers
233
+ #hyp['obj'] *= (imgsz / 640) ** 2 * 3 / nl # scale to image size and layers
234
+ hyp['label_smoothing'] = opt.label_smoothing
235
+ model.nc = nc # attach number of classes to model
236
+ model.hyp = hyp # attach hyperparameters to model
237
+ model.class_weights = labels_to_class_weights(dataset.labels, nc).to(device) * nc # attach class weights
238
+ model.names = names
239
+
240
+ # Start training
241
+ t0 = time.time()
242
+ nb = len(train_loader) # number of batches
243
+ nw = max(round(hyp['warmup_epochs'] * nb), 100) # number of warmup iterations, max(3 epochs, 100 iterations)
244
+ # nw = min(nw, (epochs - start_epoch) / 2 * nb) # limit warmup to < 1/2 of training
245
+ last_opt_step = -1
246
+ maps = np.zeros(nc) # mAP per class
247
+ results = (0, 0, 0, 0, 0, 0, 0) # P, R, mAP@.5, mAP@.5-.95, val_loss(box, obj, cls)
248
+ scheduler.last_epoch = start_epoch - 1 # do not move
249
+ scaler = torch.cuda.amp.GradScaler(enabled=amp)
250
+ stopper, stop = EarlyStopping(patience=opt.patience), False
251
+ compute_loss = ComputeLoss(model) # init loss class
252
+ callbacks.run('on_train_start')
253
+ LOGGER.info(f'Image sizes {imgsz} train, {imgsz} val\n'
254
+ f'Using {train_loader.num_workers * WORLD_SIZE} dataloader workers\n'
255
+ f"Logging results to {colorstr('bold', save_dir)}\n"
256
+ f'Starting training for {epochs} epochs...')
257
+ for epoch in range(start_epoch, epochs): # epoch ------------------------------------------------------------------
258
+ callbacks.run('on_train_epoch_start')
259
+ model.train()
260
+
261
+ # Update image weights (optional, single-GPU only)
262
+ if opt.image_weights:
263
+ cw = model.class_weights.cpu().numpy() * (1 - maps) ** 2 / nc # class weights
264
+ iw = labels_to_image_weights(dataset.labels, nc=nc, class_weights=cw) # image weights
265
+ dataset.indices = random.choices(range(dataset.n), weights=iw, k=dataset.n) # rand weighted idx
266
+ if epoch == (epochs - opt.close_mosaic):
267
+ LOGGER.info("Closing dataloader mosaic")
268
+ dataset.mosaic = False
269
+
270
+ # Update mosaic border (optional)
271
+ # b = int(random.uniform(0.25 * imgsz, 0.75 * imgsz + gs) // gs * gs)
272
+ # dataset.mosaic_border = [b - imgsz, -b] # height, width borders
273
+
274
+ mloss = torch.zeros(3, device=device) # mean losses
275
+ if RANK != -1:
276
+ train_loader.sampler.set_epoch(epoch)
277
+ pbar = enumerate(train_loader)
278
+ LOGGER.info(('\n' + '%11s' * 7) % ('Epoch', 'GPU_mem', 'box_loss', 'cls_loss', 'dfl_loss', 'Instances', 'Size'))
279
+ if RANK in {-1, 0}:
280
+ pbar = tqdm(pbar, total=nb, bar_format=TQDM_BAR_FORMAT) # progress bar
281
+ optimizer.zero_grad()
282
+ for i, (imgs, targets, paths, _) in pbar: # batch -------------------------------------------------------------
283
+ callbacks.run('on_train_batch_start')
284
+ ni = i + nb * epoch # number integrated batches (since train start)
285
+ imgs = imgs.to(device, non_blocking=True).float() / 255 # uint8 to float32, 0-255 to 0.0-1.0
286
+
287
+ # Warmup
288
+ if ni <= nw:
289
+ xi = [0, nw] # x interp
290
+ # compute_loss.gr = np.interp(ni, xi, [0.0, 1.0]) # iou loss ratio (obj_loss = 1.0 or iou)
291
+ accumulate = max(1, np.interp(ni, xi, [1, nbs / batch_size]).round())
292
+ for j, x in enumerate(optimizer.param_groups):
293
+ # bias lr falls from 0.1 to lr0, all other lrs rise from 0.0 to lr0
294
+ x['lr'] = np.interp(ni, xi, [hyp['warmup_bias_lr'] if j == 0 else 0.0, x['initial_lr'] * lf(epoch)])
295
+ if 'momentum' in x:
296
+ x['momentum'] = np.interp(ni, xi, [hyp['warmup_momentum'], hyp['momentum']])
297
+
298
+ # Multi-scale
299
+ if opt.multi_scale:
300
+ sz = random.randrange(imgsz * 0.5, imgsz * 1.5 + gs) // gs * gs # size
301
+ sf = sz / max(imgs.shape[2:]) # scale factor
302
+ if sf != 1:
303
+ ns = [math.ceil(x * sf / gs) * gs for x in imgs.shape[2:]] # new shape (stretched to gs-multiple)
304
+ imgs = nn.functional.interpolate(imgs, size=ns, mode='bilinear', align_corners=False)
305
+
306
+ # Forward
307
+ with torch.cuda.amp.autocast(amp):
308
+ pred = model(imgs) # forward
309
+ loss, loss_items = compute_loss(pred, targets.to(device)) # loss scaled by batch_size
310
+ if RANK != -1:
311
+ loss *= WORLD_SIZE # gradient averaged between devices in DDP mode
312
+ if opt.quad:
313
+ loss *= 4.
314
+
315
+ # Backward
316
+ scaler.scale(loss).backward()
317
+
318
+ # Optimize - https://pytorch.org/docs/master/notes/amp_examples.html
319
+ if ni - last_opt_step >= accumulate:
320
+ scaler.unscale_(optimizer) # unscale gradients
321
+ torch.nn.utils.clip_grad_norm_(model.parameters(), max_norm=10.0) # clip gradients
322
+ scaler.step(optimizer) # optimizer.step
323
+ scaler.update()
324
+ optimizer.zero_grad()
325
+ if ema:
326
+ ema.update(model)
327
+ last_opt_step = ni
328
+
329
+ # Log
330
+ if RANK in {-1, 0}:
331
+ mloss = (mloss * i + loss_items) / (i + 1) # update mean losses
332
+ mem = f'{torch.cuda.memory_reserved() / 1E9 if torch.cuda.is_available() else 0:.3g}G' # (GB)
333
+ pbar.set_description(('%11s' * 2 + '%11.4g' * 5) %
334
+ (f'{epoch}/{epochs - 1}', mem, *mloss, targets.shape[0], imgs.shape[-1]))
335
+ callbacks.run('on_train_batch_end', model, ni, imgs, targets, paths, list(mloss))
336
+ if callbacks.stop_training:
337
+ return
338
+ # end batch ------------------------------------------------------------------------------------------------
339
+
340
+ # Scheduler
341
+ lr = [x['lr'] for x in optimizer.param_groups] # for loggers
342
+ scheduler.step()
343
+
344
+ if RANK in {-1, 0}:
345
+ # mAP
346
+ callbacks.run('on_train_epoch_end', epoch=epoch)
347
+ ema.update_attr(model, include=['yaml', 'nc', 'hyp', 'names', 'stride', 'class_weights'])
348
+ final_epoch = (epoch + 1 == epochs) or stopper.possible_stop
349
+ if not noval or final_epoch: # Calculate mAP
350
+ results, maps, _ = validate.run(data_dict,
351
+ batch_size=batch_size // WORLD_SIZE * 2,
352
+ imgsz=imgsz,
353
+ half=amp,
354
+ model=ema.ema,
355
+ single_cls=single_cls,
356
+ dataloader=val_loader,
357
+ save_dir=save_dir,
358
+ plots=False,
359
+ callbacks=callbacks,
360
+ compute_loss=compute_loss)
361
+
362
+ # Update best mAP
363
+ fi = fitness(np.array(results).reshape(1, -1)) # weighted combination of [P, R, mAP@.5, mAP@.5-.95]
364
+ stop = stopper(epoch=epoch, fitness=fi) # early stop check
365
+ if fi > best_fitness:
366
+ best_fitness = fi
367
+ log_vals = list(mloss) + list(results) + lr
368
+ callbacks.run('on_fit_epoch_end', log_vals, epoch, best_fitness, fi)
369
+
370
+ # Save model
371
+ if (not nosave) or (final_epoch and not evolve): # if save
372
+ ckpt = {
373
+ 'epoch': epoch,
374
+ 'best_fitness': best_fitness,
375
+ 'model': deepcopy(de_parallel(model)).half(),
376
+ 'ema': deepcopy(ema.ema).half(),
377
+ 'updates': ema.updates,
378
+ 'optimizer': optimizer.state_dict(),
379
+ 'opt': vars(opt),
380
+ 'git': GIT_INFO, # {remote, branch, commit} if a git repo
381
+ 'date': datetime.now().isoformat()}
382
+
383
+ # Save last, best and delete
384
+ torch.save(ckpt, last)
385
+ if best_fitness == fi:
386
+ torch.save(ckpt, best)
387
+ if opt.save_period > 0 and epoch % opt.save_period == 0:
388
+ torch.save(ckpt, w / f'epoch{epoch}.pt')
389
+ del ckpt
390
+ callbacks.run('on_model_save', last, epoch, final_epoch, best_fitness, fi)
391
+
392
+ # EarlyStopping
393
+ if RANK != -1: # if DDP training
394
+ broadcast_list = [stop if RANK == 0 else None]
395
+ dist.broadcast_object_list(broadcast_list, 0) # broadcast 'stop' to all ranks
396
+ if RANK != 0:
397
+ stop = broadcast_list[0]
398
+ if stop:
399
+ break # must break all DDP ranks
400
+
401
+ # end epoch ----------------------------------------------------------------------------------------------------
402
+ # end training -----------------------------------------------------------------------------------------------------
403
+ if RANK in {-1, 0}:
404
+ LOGGER.info(f'\n{epoch - start_epoch + 1} epochs completed in {(time.time() - t0) / 3600:.3f} hours.')
405
+ for f in last, best:
406
+ if f.exists():
407
+ strip_optimizer(f) # strip optimizers
408
+ if f is best:
409
+ LOGGER.info(f'\nValidating {f}...')
410
+ results, _, _ = validate.run(
411
+ data_dict,
412
+ batch_size=batch_size // WORLD_SIZE * 2,
413
+ imgsz=imgsz,
414
+ model=attempt_load(f, device).half(),
415
+ single_cls=single_cls,
416
+ dataloader=val_loader,
417
+ save_dir=save_dir,
418
+ save_json=is_coco,
419
+ verbose=True,
420
+ plots=plots,
421
+ callbacks=callbacks,
422
+ compute_loss=compute_loss) # val best model with plots
423
+ if is_coco:
424
+ callbacks.run('on_fit_epoch_end', list(mloss) + list(results) + lr, epoch, best_fitness, fi)
425
+
426
+ callbacks.run('on_train_end', last, best, epoch, results)
427
+
428
+ torch.cuda.empty_cache()
429
+ return results
430
+
431
+
432
+ def parse_opt(known=False):
433
+ parser = argparse.ArgumentParser()
434
+ # parser.add_argument('--weights', type=str, default=ROOT / 'yolo.pt', help='initial weights path')
435
+ # parser.add_argument('--cfg', type=str, default='', help='model.yaml path')
436
+ parser.add_argument('--weights', type=str, default='', help='initial weights path')
437
+ parser.add_argument('--cfg', type=str, default='yolo.yaml', help='model.yaml path')
438
+ parser.add_argument('--data', type=str, default=ROOT / 'data/coco.yaml', help='dataset.yaml path')
439
+ parser.add_argument('--hyp', type=str, default=ROOT / 'data/hyps/hyp.scratch-high.yaml', help='hyperparameters path')
440
+ parser.add_argument('--epochs', type=int, default=100, help='total training epochs')
441
+ parser.add_argument('--batch-size', type=int, default=16, help='total batch size for all GPUs, -1 for autobatch')
442
+ parser.add_argument('--imgsz', '--img', '--img-size', type=int, default=640, help='train, val image size (pixels)')
443
+ parser.add_argument('--rect', action='store_true', help='rectangular training')
444
+ parser.add_argument('--resume', nargs='?', const=True, default=False, help='resume most recent training')
445
+ parser.add_argument('--nosave', action='store_true', help='only save final checkpoint')
446
+ parser.add_argument('--noval', action='store_true', help='only validate final epoch')
447
+ parser.add_argument('--noautoanchor', action='store_true', help='disable AutoAnchor')
448
+ parser.add_argument('--noplots', action='store_true', help='save no plot files')
449
+ parser.add_argument('--evolve', type=int, nargs='?', const=300, help='evolve hyperparameters for x generations')
450
+ parser.add_argument('--bucket', type=str, default='', help='gsutil bucket')
451
+ parser.add_argument('--cache', type=str, nargs='?', const='ram', help='image --cache ram/disk')
452
+ parser.add_argument('--image-weights', action='store_true', help='use weighted image selection for training')
453
+ parser.add_argument('--device', default='', help='cuda device, i.e. 0 or 0,1,2,3 or cpu')
454
+ parser.add_argument('--multi-scale', action='store_true', help='vary img-size +/- 50%%')
455
+ parser.add_argument('--single-cls', action='store_true', help='train multi-class data as single-class')
456
+ parser.add_argument('--optimizer', type=str, choices=['SGD', 'Adam', 'AdamW', 'LION'], default='SGD', help='optimizer')
457
+ parser.add_argument('--sync-bn', action='store_true', help='use SyncBatchNorm, only available in DDP mode')
458
+ parser.add_argument('--workers', type=int, default=8, help='max dataloader workers (per RANK in DDP mode)')
459
+ parser.add_argument('--project', default=ROOT / 'runs/train', help='save to project/name')
460
+ parser.add_argument('--name', default='exp', help='save to project/name')
461
+ parser.add_argument('--exist-ok', action='store_true', help='existing project/name ok, do not increment')
462
+ parser.add_argument('--quad', action='store_true', help='quad dataloader')
463
+ parser.add_argument('--cos-lr', action='store_true', help='cosine LR scheduler')
464
+ parser.add_argument('--label-smoothing', type=float, default=0.0, help='Label smoothing epsilon')
465
+ parser.add_argument('--patience', type=int, default=100, help='EarlyStopping patience (epochs without improvement)')
466
+ parser.add_argument('--freeze', nargs='+', type=int, default=[0], help='Freeze layers: backbone=10, first3=0 1 2')
467
+ parser.add_argument('--save-period', type=int, default=-1, help='Save checkpoint every x epochs (disabled if < 1)')
468
+ parser.add_argument('--seed', type=int, default=0, help='Global training seed')
469
+ parser.add_argument('--local_rank', type=int, default=-1, help='Automatic DDP Multi-GPU argument, do not modify')
470
+ parser.add_argument('--min-items', type=int, default=0, help='Experimental')
471
+ parser.add_argument('--close-mosaic', type=int, default=0, help='Experimental')
472
+
473
+ # Logger arguments
474
+ parser.add_argument('--entity', default=None, help='Entity')
475
+ parser.add_argument('--upload_dataset', nargs='?', const=True, default=False, help='Upload data, "val" option')
476
+ parser.add_argument('--bbox_interval', type=int, default=-1, help='Set bounding-box image logging interval')
477
+ parser.add_argument('--artifact_alias', type=str, default='latest', help='Version of dataset artifact to use')
478
+
479
+ return parser.parse_known_args()[0] if known else parser.parse_args()
480
+
481
+
482
+ def main(opt, callbacks=Callbacks()):
483
+ # Checks
484
+ if RANK in {-1, 0}:
485
+ print_args(vars(opt))
486
+ #check_git_status()
487
+ #check_requirements()
488
+
489
+ # Resume (from specified or most recent last.pt)
490
+ if opt.resume and not check_comet_resume(opt) and not opt.evolve:
491
+ last = Path(check_file(opt.resume) if isinstance(opt.resume, str) else get_latest_run())
492
+ opt_yaml = last.parent.parent / 'opt.yaml' # train options yaml
493
+ opt_data = opt.data # original dataset
494
+ if opt_yaml.is_file():
495
+ with open(opt_yaml, errors='ignore') as f:
496
+ d = yaml.safe_load(f)
497
+ else:
498
+ d = torch.load(last, map_location='cpu')['opt']
499
+ opt = argparse.Namespace(**d) # replace
500
+ opt.cfg, opt.weights, opt.resume = '', str(last), True # reinstate
501
+ if is_url(opt_data):
502
+ opt.data = check_file(opt_data) # avoid HUB resume auth timeout
503
+ else:
504
+ opt.data, opt.cfg, opt.hyp, opt.weights, opt.project = \
505
+ check_file(opt.data), check_yaml(opt.cfg), check_yaml(opt.hyp), str(opt.weights), str(opt.project) # checks
506
+ assert len(opt.cfg) or len(opt.weights), 'either --cfg or --weights must be specified'
507
+ if opt.evolve:
508
+ if opt.project == str(ROOT / 'runs/train'): # if default project name, rename to runs/evolve
509
+ opt.project = str(ROOT / 'runs/evolve')
510
+ opt.exist_ok, opt.resume = opt.resume, False # pass resume to exist_ok and disable resume
511
+ if opt.name == 'cfg':
512
+ opt.name = Path(opt.cfg).stem # use model.yaml as name
513
+ opt.save_dir = str(increment_path(Path(opt.project) / opt.name, exist_ok=opt.exist_ok))
514
+
515
+ # DDP mode
516
+ device = select_device(opt.device, batch_size=opt.batch_size)
517
+ if LOCAL_RANK != -1:
518
+ msg = 'is not compatible with YOLO Multi-GPU DDP training'
519
+ assert not opt.image_weights, f'--image-weights {msg}'
520
+ assert not opt.evolve, f'--evolve {msg}'
521
+ assert opt.batch_size != -1, f'AutoBatch with --batch-size -1 {msg}, please pass a valid --batch-size'
522
+ assert opt.batch_size % WORLD_SIZE == 0, f'--batch-size {opt.batch_size} must be multiple of WORLD_SIZE'
523
+ assert torch.cuda.device_count() > LOCAL_RANK, 'insufficient CUDA devices for DDP command'
524
+ torch.cuda.set_device(LOCAL_RANK)
525
+ device = torch.device('cuda', LOCAL_RANK)
526
+ dist.init_process_group(backend="nccl" if dist.is_nccl_available() else "gloo")
527
+
528
+ # Train
529
+ if not opt.evolve:
530
+ train(opt.hyp, opt, device, callbacks)
531
+
532
+ # Evolve hyperparameters (optional)
533
+ else:
534
+ # Hyperparameter evolution metadata (mutation scale 0-1, lower_limit, upper_limit)
535
+ meta = {
536
+ 'lr0': (1, 1e-5, 1e-1), # initial learning rate (SGD=1E-2, Adam=1E-3)
537
+ 'lrf': (1, 0.01, 1.0), # final OneCycleLR learning rate (lr0 * lrf)
538
+ 'momentum': (0.3, 0.6, 0.98), # SGD momentum/Adam beta1
539
+ 'weight_decay': (1, 0.0, 0.001), # optimizer weight decay
540
+ 'warmup_epochs': (1, 0.0, 5.0), # warmup epochs (fractions ok)
541
+ 'warmup_momentum': (1, 0.0, 0.95), # warmup initial momentum
542
+ 'warmup_bias_lr': (1, 0.0, 0.2), # warmup initial bias lr
543
+ 'box': (1, 0.02, 0.2), # box loss gain
544
+ 'cls': (1, 0.2, 4.0), # cls loss gain
545
+ 'cls_pw': (1, 0.5, 2.0), # cls BCELoss positive_weight
546
+ 'obj': (1, 0.2, 4.0), # obj loss gain (scale with pixels)
547
+ 'obj_pw': (1, 0.5, 2.0), # obj BCELoss positive_weight
548
+ 'iou_t': (0, 0.1, 0.7), # IoU training threshold
549
+ 'anchor_t': (1, 2.0, 8.0), # anchor-multiple threshold
550
+ 'anchors': (2, 2.0, 10.0), # anchors per output grid (0 to ignore)
551
+ 'fl_gamma': (0, 0.0, 2.0), # focal loss gamma (efficientDet default gamma=1.5)
552
+ 'hsv_h': (1, 0.0, 0.1), # image HSV-Hue augmentation (fraction)
553
+ 'hsv_s': (1, 0.0, 0.9), # image HSV-Saturation augmentation (fraction)
554
+ 'hsv_v': (1, 0.0, 0.9), # image HSV-Value augmentation (fraction)
555
+ 'degrees': (1, 0.0, 45.0), # image rotation (+/- deg)
556
+ 'translate': (1, 0.0, 0.9), # image translation (+/- fraction)
557
+ 'scale': (1, 0.0, 0.9), # image scale (+/- gain)
558
+ 'shear': (1, 0.0, 10.0), # image shear (+/- deg)
559
+ 'perspective': (0, 0.0, 0.001), # image perspective (+/- fraction), range 0-0.001
560
+ 'flipud': (1, 0.0, 1.0), # image flip up-down (probability)
561
+ 'fliplr': (0, 0.0, 1.0), # image flip left-right (probability)
562
+ 'mosaic': (1, 0.0, 1.0), # image mixup (probability)
563
+ 'mixup': (1, 0.0, 1.0), # image mixup (probability)
564
+ 'copy_paste': (1, 0.0, 1.0)} # segment copy-paste (probability)
565
+
566
+ with open(opt.hyp, errors='ignore') as f:
567
+ hyp = yaml.safe_load(f) # load hyps dict
568
+ if 'anchors' not in hyp: # anchors commented in hyp.yaml
569
+ hyp['anchors'] = 3
570
+ if opt.noautoanchor:
571
+ del hyp['anchors'], meta['anchors']
572
+ opt.noval, opt.nosave, save_dir = True, True, Path(opt.save_dir) # only val/save final epoch
573
+ # ei = [isinstance(x, (int, float)) for x in hyp.values()] # evolvable indices
574
+ evolve_yaml, evolve_csv = save_dir / 'hyp_evolve.yaml', save_dir / 'evolve.csv'
575
+ if opt.bucket:
576
+ os.system(f'gsutil cp gs://{opt.bucket}/evolve.csv {evolve_csv}') # download evolve.csv if exists
577
+
578
+ for _ in range(opt.evolve): # generations to evolve
579
+ if evolve_csv.exists(): # if evolve.csv exists: select best hyps and mutate
580
+ # Select parent(s)
581
+ parent = 'single' # parent selection method: 'single' or 'weighted'
582
+ x = np.loadtxt(evolve_csv, ndmin=2, delimiter=',', skiprows=1)
583
+ n = min(5, len(x)) # number of previous results to consider
584
+ x = x[np.argsort(-fitness(x))][:n] # top n mutations
585
+ w = fitness(x) - fitness(x).min() + 1E-6 # weights (sum > 0)
586
+ if parent == 'single' or len(x) == 1:
587
+ # x = x[random.randint(0, n - 1)] # random selection
588
+ x = x[random.choices(range(n), weights=w)[0]] # weighted selection
589
+ elif parent == 'weighted':
590
+ x = (x * w.reshape(n, 1)).sum(0) / w.sum() # weighted combination
591
+
592
+ # Mutate
593
+ mp, s = 0.8, 0.2 # mutation probability, sigma
594
+ npr = np.random
595
+ npr.seed(int(time.time()))
596
+ g = np.array([meta[k][0] for k in hyp.keys()]) # gains 0-1
597
+ ng = len(meta)
598
+ v = np.ones(ng)
599
+ while all(v == 1): # mutate until a change occurs (prevent duplicates)
600
+ v = (g * (npr.random(ng) < mp) * npr.randn(ng) * npr.random() * s + 1).clip(0.3, 3.0)
601
+ for i, k in enumerate(hyp.keys()): # plt.hist(v.ravel(), 300)
602
+ hyp[k] = float(x[i + 7] * v[i]) # mutate
603
+
604
+ # Constrain to limits
605
+ for k, v in meta.items():
606
+ hyp[k] = max(hyp[k], v[1]) # lower limit
607
+ hyp[k] = min(hyp[k], v[2]) # upper limit
608
+ hyp[k] = round(hyp[k], 5) # significant digits
609
+
610
+ # Train mutation
611
+ results = train(hyp.copy(), opt, device, callbacks)
612
+ callbacks = Callbacks()
613
+ # Write mutation results
614
+ keys = ('metrics/precision', 'metrics/recall', 'metrics/mAP_0.5', 'metrics/mAP_0.5:0.95', 'val/box_loss',
615
+ 'val/obj_loss', 'val/cls_loss')
616
+ print_mutation(keys, results, hyp.copy(), save_dir, opt.bucket)
617
+
618
+ # Plot results
619
+ plot_evolve(evolve_csv)
620
+ LOGGER.info(f'Hyperparameter evolution finished {opt.evolve} generations\n'
621
+ f"Results saved to {colorstr('bold', save_dir)}\n"
622
+ f'Usage example: $ python train.py --hyp {evolve_yaml}')
623
+
624
+
625
+ def run(**kwargs):
626
+ # Usage: import train; train.run(data='coco128.yaml', imgsz=320, weights='yolo.pt')
627
+ opt = parse_opt(True)
628
+ for k, v in kwargs.items():
629
+ setattr(opt, k, v)
630
+ main(opt)
631
+ return opt
632
+
633
+
634
+ if __name__ == "__main__":
635
+ opt = parse_opt()
636
+ main(opt)
val.py ADDED
@@ -0,0 +1,389 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import argparse
2
+ import json
3
+ import os
4
+ import sys
5
+ from pathlib import Path
6
+
7
+ import numpy as np
8
+ import torch
9
+ from tqdm import tqdm
10
+
11
+ FILE = Path(__file__).resolve()
12
+ ROOT = FILE.parents[0] # YOLO root directory
13
+ if str(ROOT) not in sys.path:
14
+ sys.path.append(str(ROOT)) # add ROOT to PATH
15
+ ROOT = Path(os.path.relpath(ROOT, Path.cwd())) # relative
16
+
17
+ from models.common import DetectMultiBackend
18
+ from utils.callbacks import Callbacks
19
+ from utils.dataloaders import create_dataloader
20
+ from utils.general import (LOGGER, TQDM_BAR_FORMAT, Profile, check_dataset, check_img_size, check_requirements,
21
+ check_yaml, coco80_to_coco91_class, colorstr, increment_path, non_max_suppression,
22
+ print_args, scale_boxes, xywh2xyxy, xyxy2xywh)
23
+ from utils.metrics import ConfusionMatrix, ap_per_class, box_iou
24
+ from utils.plots import output_to_target, plot_images, plot_val_study
25
+ from utils.torch_utils import select_device, smart_inference_mode
26
+
27
+
28
+ def save_one_txt(predn, save_conf, shape, file):
29
+ # Save one txt result
30
+ gn = torch.tensor(shape)[[1, 0, 1, 0]] # normalization gain whwh
31
+ for *xyxy, conf, cls in predn.tolist():
32
+ xywh = (xyxy2xywh(torch.tensor(xyxy).view(1, 4)) / gn).view(-1).tolist() # normalized xywh
33
+ line = (cls, *xywh, conf) if save_conf else (cls, *xywh) # label format
34
+ with open(file, 'a') as f:
35
+ f.write(('%g ' * len(line)).rstrip() % line + '\n')
36
+
37
+
38
+ def save_one_json(predn, jdict, path, class_map):
39
+ # Save one JSON result {"image_id": 42, "category_id": 18, "bbox": [258.15, 41.29, 348.26, 243.78], "score": 0.236}
40
+ image_id = int(path.stem) if path.stem.isnumeric() else path.stem
41
+ box = xyxy2xywh(predn[:, :4]) # xywh
42
+ box[:, :2] -= box[:, 2:] / 2 # xy center to top-left corner
43
+ for p, b in zip(predn.tolist(), box.tolist()):
44
+ jdict.append({
45
+ 'image_id': image_id,
46
+ 'category_id': class_map[int(p[5])],
47
+ 'bbox': [round(x, 3) for x in b],
48
+ 'score': round(p[4], 5)})
49
+
50
+
51
+ def process_batch(detections, labels, iouv):
52
+ """
53
+ Return correct prediction matrix
54
+ Arguments:
55
+ detections (array[N, 6]), x1, y1, x2, y2, conf, class
56
+ labels (array[M, 5]), class, x1, y1, x2, y2
57
+ Returns:
58
+ correct (array[N, 10]), for 10 IoU levels
59
+ """
60
+ correct = np.zeros((detections.shape[0], iouv.shape[0])).astype(bool)
61
+ iou = box_iou(labels[:, 1:], detections[:, :4])
62
+ correct_class = labels[:, 0:1] == detections[:, 5]
63
+ for i in range(len(iouv)):
64
+ x = torch.where((iou >= iouv[i]) & correct_class) # IoU > threshold and classes match
65
+ if x[0].shape[0]:
66
+ matches = torch.cat((torch.stack(x, 1), iou[x[0], x[1]][:, None]), 1).cpu().numpy() # [label, detect, iou]
67
+ if x[0].shape[0] > 1:
68
+ matches = matches[matches[:, 2].argsort()[::-1]]
69
+ matches = matches[np.unique(matches[:, 1], return_index=True)[1]]
70
+ # matches = matches[matches[:, 2].argsort()[::-1]]
71
+ matches = matches[np.unique(matches[:, 0], return_index=True)[1]]
72
+ correct[matches[:, 1].astype(int), i] = True
73
+ return torch.tensor(correct, dtype=torch.bool, device=iouv.device)
74
+
75
+
76
+ @smart_inference_mode()
77
+ def run(
78
+ data,
79
+ weights=None, # model.pt path(s)
80
+ batch_size=32, # batch size
81
+ imgsz=640, # inference size (pixels)
82
+ conf_thres=0.001, # confidence threshold
83
+ iou_thres=0.7, # NMS IoU threshold
84
+ max_det=300, # maximum detections per image
85
+ task='val', # train, val, test, speed or study
86
+ device='', # cuda device, i.e. 0 or 0,1,2,3 or cpu
87
+ workers=8, # max dataloader workers (per RANK in DDP mode)
88
+ single_cls=False, # treat as single-class dataset
89
+ augment=False, # augmented inference
90
+ verbose=False, # verbose output
91
+ save_txt=False, # save results to *.txt
92
+ save_hybrid=False, # save label+prediction hybrid results to *.txt
93
+ save_conf=False, # save confidences in --save-txt labels
94
+ save_json=False, # save a COCO-JSON results file
95
+ project=ROOT / 'runs/val', # save to project/name
96
+ name='exp', # save to project/name
97
+ exist_ok=False, # existing project/name ok, do not increment
98
+ half=True, # use FP16 half-precision inference
99
+ dnn=False, # use OpenCV DNN for ONNX inference
100
+ min_items=0, # Experimental
101
+ model=None,
102
+ dataloader=None,
103
+ save_dir=Path(''),
104
+ plots=True,
105
+ callbacks=Callbacks(),
106
+ compute_loss=None,
107
+ ):
108
+ # Initialize/load model and set device
109
+ training = model is not None
110
+ if training: # called by train.py
111
+ device, pt, jit, engine = next(model.parameters()).device, True, False, False # get model device, PyTorch model
112
+ half &= device.type != 'cpu' # half precision only supported on CUDA
113
+ model.half() if half else model.float()
114
+ else: # called directly
115
+ device = select_device(device, batch_size=batch_size)
116
+
117
+ # Directories
118
+ save_dir = increment_path(Path(project) / name, exist_ok=exist_ok) # increment run
119
+ (save_dir / 'labels' if save_txt else save_dir).mkdir(parents=True, exist_ok=True) # make dir
120
+
121
+ # Load model
122
+ model = DetectMultiBackend(weights, device=device, dnn=dnn, data=data, fp16=half)
123
+ stride, pt, jit, engine = model.stride, model.pt, model.jit, model.engine
124
+ imgsz = check_img_size(imgsz, s=stride) # check image size
125
+ half = model.fp16 # FP16 supported on limited backends with CUDA
126
+ if engine:
127
+ batch_size = model.batch_size
128
+ else:
129
+ device = model.device
130
+ if not (pt or jit):
131
+ batch_size = 1 # export.py models default to batch-size 1
132
+ LOGGER.info(f'Forcing --batch-size 1 square inference (1,3,{imgsz},{imgsz}) for non-PyTorch models')
133
+
134
+ # Data
135
+ data = check_dataset(data) # check
136
+
137
+ # Configure
138
+ model.eval()
139
+ cuda = device.type != 'cpu'
140
+ #is_coco = isinstance(data.get('val'), str) and data['val'].endswith(f'coco{os.sep}val2017.txt') # COCO dataset
141
+ is_coco = isinstance(data.get('val'), str) and data['val'].endswith(f'val2017.txt') # COCO dataset
142
+ nc = 1 if single_cls else int(data['nc']) # number of classes
143
+ iouv = torch.linspace(0.5, 0.95, 10, device=device) # iou vector for mAP@0.5:0.95
144
+ niou = iouv.numel()
145
+
146
+ # Dataloader
147
+ if not training:
148
+ if pt and not single_cls: # check --weights are trained on --data
149
+ ncm = model.model.nc
150
+ assert ncm == nc, f'{weights} ({ncm} classes) trained on different --data than what you passed ({nc} ' \
151
+ f'classes). Pass correct combination of --weights and --data that are trained together.'
152
+ model.warmup(imgsz=(1 if pt else batch_size, 3, imgsz, imgsz)) # warmup
153
+ pad, rect = (0.0, False) if task == 'speed' else (0.5, pt) # square inference for benchmarks
154
+ task = task if task in ('train', 'val', 'test') else 'val' # path to train/val/test images
155
+ dataloader = create_dataloader(data[task],
156
+ imgsz,
157
+ batch_size,
158
+ stride,
159
+ single_cls,
160
+ pad=pad,
161
+ rect=rect,
162
+ workers=workers,
163
+ min_items=opt.min_items,
164
+ prefix=colorstr(f'{task}: '))[0]
165
+
166
+ seen = 0
167
+ confusion_matrix = ConfusionMatrix(nc=nc)
168
+ names = model.names if hasattr(model, 'names') else model.module.names # get class names
169
+ if isinstance(names, (list, tuple)): # old format
170
+ names = dict(enumerate(names))
171
+ class_map = coco80_to_coco91_class() if is_coco else list(range(1000))
172
+ s = ('%22s' + '%11s' * 6) % ('Class', 'Images', 'Instances', 'P', 'R', 'mAP50', 'mAP50-95')
173
+ tp, fp, p, r, f1, mp, mr, map50, ap50, map = 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0
174
+ dt = Profile(), Profile(), Profile() # profiling times
175
+ loss = torch.zeros(3, device=device)
176
+ jdict, stats, ap, ap_class = [], [], [], []
177
+ callbacks.run('on_val_start')
178
+ pbar = tqdm(dataloader, desc=s, bar_format=TQDM_BAR_FORMAT) # progress bar
179
+ for batch_i, (im, targets, paths, shapes) in enumerate(pbar):
180
+ callbacks.run('on_val_batch_start')
181
+ with dt[0]:
182
+ if cuda:
183
+ im = im.to(device, non_blocking=True)
184
+ targets = targets.to(device)
185
+ im = im.half() if half else im.float() # uint8 to fp16/32
186
+ im /= 255 # 0 - 255 to 0.0 - 1.0
187
+ nb, _, height, width = im.shape # batch size, channels, height, width
188
+
189
+ # Inference
190
+ with dt[1]:
191
+ preds, train_out = model(im) if compute_loss else (model(im, augment=augment), None)
192
+
193
+ # Loss
194
+ if compute_loss:
195
+ loss += compute_loss(train_out, targets)[1] # box, obj, cls
196
+
197
+ # NMS
198
+ targets[:, 2:] *= torch.tensor((width, height, width, height), device=device) # to pixels
199
+ lb = [targets[targets[:, 0] == i, 1:] for i in range(nb)] if save_hybrid else [] # for autolabelling
200
+ with dt[2]:
201
+ preds = non_max_suppression(preds,
202
+ conf_thres,
203
+ iou_thres,
204
+ labels=lb,
205
+ multi_label=True,
206
+ agnostic=single_cls,
207
+ max_det=max_det)
208
+
209
+ # Metrics
210
+ for si, pred in enumerate(preds):
211
+ labels = targets[targets[:, 0] == si, 1:]
212
+ nl, npr = labels.shape[0], pred.shape[0] # number of labels, predictions
213
+ path, shape = Path(paths[si]), shapes[si][0]
214
+ correct = torch.zeros(npr, niou, dtype=torch.bool, device=device) # init
215
+ seen += 1
216
+
217
+ if npr == 0:
218
+ if nl:
219
+ stats.append((correct, *torch.zeros((2, 0), device=device), labels[:, 0]))
220
+ if plots:
221
+ confusion_matrix.process_batch(detections=None, labels=labels[:, 0])
222
+ continue
223
+
224
+ # Predictions
225
+ if single_cls:
226
+ pred[:, 5] = 0
227
+ predn = pred.clone()
228
+ scale_boxes(im[si].shape[1:], predn[:, :4], shape, shapes[si][1]) # native-space pred
229
+
230
+ # Evaluate
231
+ if nl:
232
+ tbox = xywh2xyxy(labels[:, 1:5]) # target boxes
233
+ scale_boxes(im[si].shape[1:], tbox, shape, shapes[si][1]) # native-space labels
234
+ labelsn = torch.cat((labels[:, 0:1], tbox), 1) # native-space labels
235
+ correct = process_batch(predn, labelsn, iouv)
236
+ if plots:
237
+ confusion_matrix.process_batch(predn, labelsn)
238
+ stats.append((correct, pred[:, 4], pred[:, 5], labels[:, 0])) # (correct, conf, pcls, tcls)
239
+
240
+ # Save/log
241
+ if save_txt:
242
+ save_one_txt(predn, save_conf, shape, file=save_dir / 'labels' / f'{path.stem}.txt')
243
+ if save_json:
244
+ save_one_json(predn, jdict, path, class_map) # append to COCO-JSON dictionary
245
+ callbacks.run('on_val_image_end', pred, predn, path, names, im[si])
246
+
247
+ # Plot images
248
+ if plots and batch_i < 3:
249
+ plot_images(im, targets, paths, save_dir / f'val_batch{batch_i}_labels.jpg', names) # labels
250
+ plot_images(im, output_to_target(preds), paths, save_dir / f'val_batch{batch_i}_pred.jpg', names) # pred
251
+
252
+ callbacks.run('on_val_batch_end', batch_i, im, targets, paths, shapes, preds)
253
+
254
+ # Compute metrics
255
+ stats = [torch.cat(x, 0).cpu().numpy() for x in zip(*stats)] # to numpy
256
+ if len(stats) and stats[0].any():
257
+ tp, fp, p, r, f1, ap, ap_class = ap_per_class(*stats, plot=plots, save_dir=save_dir, names=names)
258
+ ap50, ap = ap[:, 0], ap.mean(1) # AP@0.5, AP@0.5:0.95
259
+ mp, mr, map50, map = p.mean(), r.mean(), ap50.mean(), ap.mean()
260
+ nt = np.bincount(stats[3].astype(int), minlength=nc) # number of targets per class
261
+
262
+ # Print results
263
+ pf = '%22s' + '%11i' * 2 + '%11.3g' * 4 # print format
264
+ LOGGER.info(pf % ('all', seen, nt.sum(), mp, mr, map50, map))
265
+ if nt.sum() == 0:
266
+ LOGGER.warning(f'WARNING ⚠️ no labels found in {task} set, can not compute metrics without labels')
267
+
268
+ # Print results per class
269
+ if (verbose or (nc < 50 and not training)) and nc > 1 and len(stats):
270
+ for i, c in enumerate(ap_class):
271
+ LOGGER.info(pf % (names[c], seen, nt[c], p[i], r[i], ap50[i], ap[i]))
272
+
273
+ # Print speeds
274
+ t = tuple(x.t / seen * 1E3 for x in dt) # speeds per image
275
+ if not training:
276
+ shape = (batch_size, 3, imgsz, imgsz)
277
+ LOGGER.info(f'Speed: %.1fms pre-process, %.1fms inference, %.1fms NMS per image at shape {shape}' % t)
278
+
279
+ # Plots
280
+ if plots:
281
+ confusion_matrix.plot(save_dir=save_dir, names=list(names.values()))
282
+ callbacks.run('on_val_end', nt, tp, fp, p, r, f1, ap, ap50, ap_class, confusion_matrix)
283
+
284
+ # Save JSON
285
+ if save_json and len(jdict):
286
+ w = Path(weights[0] if isinstance(weights, list) else weights).stem if weights is not None else '' # weights
287
+ anno_json = str(Path(data.get('path', '../coco')) / 'annotations/instances_val2017.json') # annotations json
288
+ pred_json = str(save_dir / f"{w}_predictions.json") # predictions json
289
+ LOGGER.info(f'\nEvaluating pycocotools mAP... saving {pred_json}...')
290
+ with open(pred_json, 'w') as f:
291
+ json.dump(jdict, f)
292
+
293
+ try: # https://github.com/cocodataset/cocoapi/blob/master/PythonAPI/pycocoEvalDemo.ipynb
294
+ check_requirements('pycocotools')
295
+ from pycocotools.coco import COCO
296
+ from pycocotools.cocoeval import COCOeval
297
+
298
+ anno = COCO(anno_json) # init annotations api
299
+ pred = anno.loadRes(pred_json) # init predictions api
300
+ eval = COCOeval(anno, pred, 'bbox')
301
+ if is_coco:
302
+ eval.params.imgIds = [int(Path(x).stem) for x in dataloader.dataset.im_files] # image IDs to evaluate
303
+ eval.evaluate()
304
+ eval.accumulate()
305
+ eval.summarize()
306
+ map, map50 = eval.stats[:2] # update results (mAP@0.5:0.95, mAP@0.5)
307
+ except Exception as e:
308
+ LOGGER.info(f'pycocotools unable to run: {e}')
309
+
310
+ # Return results
311
+ model.float() # for training
312
+ if not training:
313
+ s = f"\n{len(list(save_dir.glob('labels/*.txt')))} labels saved to {save_dir / 'labels'}" if save_txt else ''
314
+ LOGGER.info(f"Results saved to {colorstr('bold', save_dir)}{s}")
315
+ maps = np.zeros(nc) + map
316
+ for i, c in enumerate(ap_class):
317
+ maps[c] = ap[i]
318
+ return (mp, mr, map50, map, *(loss.cpu() / len(dataloader)).tolist()), maps, t
319
+
320
+
321
+ def parse_opt():
322
+ parser = argparse.ArgumentParser()
323
+ parser.add_argument('--data', type=str, default=ROOT / 'data/coco.yaml', help='dataset.yaml path')
324
+ parser.add_argument('--weights', nargs='+', type=str, default=ROOT / 'yolo.pt', help='model path(s)')
325
+ parser.add_argument('--batch-size', type=int, default=32, help='batch size')
326
+ parser.add_argument('--imgsz', '--img', '--img-size', type=int, default=640, help='inference size (pixels)')
327
+ parser.add_argument('--conf-thres', type=float, default=0.001, help='confidence threshold')
328
+ parser.add_argument('--iou-thres', type=float, default=0.7, help='NMS IoU threshold')
329
+ parser.add_argument('--max-det', type=int, default=300, help='maximum detections per image')
330
+ parser.add_argument('--task', default='val', help='train, val, test, speed or study')
331
+ parser.add_argument('--device', default='', help='cuda device, i.e. 0 or 0,1,2,3 or cpu')
332
+ parser.add_argument('--workers', type=int, default=8, help='max dataloader workers (per RANK in DDP mode)')
333
+ parser.add_argument('--single-cls', action='store_true', help='treat as single-class dataset')
334
+ parser.add_argument('--augment', action='store_true', help='augmented inference')
335
+ parser.add_argument('--verbose', action='store_true', help='report mAP by class')
336
+ parser.add_argument('--save-txt', action='store_true', help='save results to *.txt')
337
+ parser.add_argument('--save-hybrid', action='store_true', help='save label+prediction hybrid results to *.txt')
338
+ parser.add_argument('--save-conf', action='store_true', help='save confidences in --save-txt labels')
339
+ parser.add_argument('--save-json', action='store_true', help='save a COCO-JSON results file')
340
+ parser.add_argument('--project', default=ROOT / 'runs/val', help='save to project/name')
341
+ parser.add_argument('--name', default='exp', help='save to project/name')
342
+ parser.add_argument('--exist-ok', action='store_true', help='existing project/name ok, do not increment')
343
+ parser.add_argument('--half', action='store_true', help='use FP16 half-precision inference')
344
+ parser.add_argument('--dnn', action='store_true', help='use OpenCV DNN for ONNX inference')
345
+ parser.add_argument('--min-items', type=int, default=0, help='Experimental')
346
+ opt = parser.parse_args()
347
+ opt.data = check_yaml(opt.data) # check YAML
348
+ opt.save_json |= opt.data.endswith('coco.yaml')
349
+ opt.save_txt |= opt.save_hybrid
350
+ print_args(vars(opt))
351
+ return opt
352
+
353
+
354
+ def main(opt):
355
+ #check_requirements(exclude=('tensorboard', 'thop'))
356
+
357
+ if opt.task in ('train', 'val', 'test'): # run normally
358
+ if opt.conf_thres > 0.001: # https://github.com/ultralytics/yolov5/issues/1466
359
+ LOGGER.info(f'WARNING ⚠️ confidence threshold {opt.conf_thres} > 0.001 produces invalid results')
360
+ if opt.save_hybrid:
361
+ LOGGER.info('WARNING ⚠️ --save-hybrid will return high mAP from hybrid labels, not from predictions alone')
362
+ run(**vars(opt))
363
+
364
+ else:
365
+ weights = opt.weights if isinstance(opt.weights, list) else [opt.weights]
366
+ opt.half = torch.cuda.is_available() and opt.device != 'cpu' # FP16 for fastest results
367
+ if opt.task == 'speed': # speed benchmarks
368
+ # python val.py --task speed --data coco.yaml --batch 1 --weights yolo.pt...
369
+ opt.conf_thres, opt.iou_thres, opt.save_json = 0.25, 0.45, False
370
+ for opt.weights in weights:
371
+ run(**vars(opt), plots=False)
372
+
373
+ elif opt.task == 'study': # speed vs mAP benchmarks
374
+ # python val.py --task study --data coco.yaml --iou 0.7 --weights yolo.pt...
375
+ for opt.weights in weights:
376
+ f = f'study_{Path(opt.data).stem}_{Path(opt.weights).stem}.txt' # filename to save to
377
+ x, y = list(range(256, 1536 + 128, 128)), [] # x axis (image sizes), y axis
378
+ for opt.imgsz in x: # img-size
379
+ LOGGER.info(f'\nRunning {f} --imgsz {opt.imgsz}...')
380
+ r, _, t = run(**vars(opt), plots=False)
381
+ y.append(r + t) # results and times
382
+ np.savetxt(f, y, fmt='%10.4g') # save
383
+ os.system('zip -r study.zip study_*.txt')
384
+ plot_val_study(x=x) # plot
385
+
386
+
387
+ if __name__ == "__main__":
388
+ opt = parse_opt()
389
+ main(opt)
val_dual.py ADDED
@@ -0,0 +1,393 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import argparse
2
+ import json
3
+ import os
4
+ import sys
5
+ from pathlib import Path
6
+
7
+ import numpy as np
8
+ import torch
9
+ from tqdm import tqdm
10
+
11
+ FILE = Path(__file__).resolve()
12
+ ROOT = FILE.parents[0] # YOLO root directory
13
+ if str(ROOT) not in sys.path:
14
+ sys.path.append(str(ROOT)) # add ROOT to PATH
15
+ ROOT = Path(os.path.relpath(ROOT, Path.cwd())) # relative
16
+
17
+ from models.common import DetectMultiBackend
18
+ from utils.callbacks import Callbacks
19
+ from utils.dataloaders import create_dataloader
20
+ from utils.general import (LOGGER, TQDM_BAR_FORMAT, Profile, check_dataset, check_img_size, check_requirements,
21
+ check_yaml, coco80_to_coco91_class, colorstr, increment_path, non_max_suppression,
22
+ print_args, scale_boxes, xywh2xyxy, xyxy2xywh)
23
+ from utils.metrics import ConfusionMatrix, ap_per_class, box_iou
24
+ from utils.plots import output_to_target, plot_images, plot_val_study
25
+ from utils.torch_utils import select_device, smart_inference_mode
26
+
27
+
28
+ def save_one_txt(predn, save_conf, shape, file):
29
+ # Save one txt result
30
+ gn = torch.tensor(shape)[[1, 0, 1, 0]] # normalization gain whwh
31
+ for *xyxy, conf, cls in predn.tolist():
32
+ xywh = (xyxy2xywh(torch.tensor(xyxy).view(1, 4)) / gn).view(-1).tolist() # normalized xywh
33
+ line = (cls, *xywh, conf) if save_conf else (cls, *xywh) # label format
34
+ with open(file, 'a') as f:
35
+ f.write(('%g ' * len(line)).rstrip() % line + '\n')
36
+
37
+
38
+ def save_one_json(predn, jdict, path, class_map):
39
+ # Save one JSON result {"image_id": 42, "category_id": 18, "bbox": [258.15, 41.29, 348.26, 243.78], "score": 0.236}
40
+ image_id = int(path.stem) if path.stem.isnumeric() else path.stem
41
+ box = xyxy2xywh(predn[:, :4]) # xywh
42
+ box[:, :2] -= box[:, 2:] / 2 # xy center to top-left corner
43
+ for p, b in zip(predn.tolist(), box.tolist()):
44
+ jdict.append({
45
+ 'image_id': image_id,
46
+ 'category_id': class_map[int(p[5])],
47
+ 'bbox': [round(x, 3) for x in b],
48
+ 'score': round(p[4], 5)})
49
+
50
+
51
+ def process_batch(detections, labels, iouv):
52
+ """
53
+ Return correct prediction matrix
54
+ Arguments:
55
+ detections (array[N, 6]), x1, y1, x2, y2, conf, class
56
+ labels (array[M, 5]), class, x1, y1, x2, y2
57
+ Returns:
58
+ correct (array[N, 10]), for 10 IoU levels
59
+ """
60
+ correct = np.zeros((detections.shape[0], iouv.shape[0])).astype(bool)
61
+ iou = box_iou(labels[:, 1:], detections[:, :4])
62
+ correct_class = labels[:, 0:1] == detections[:, 5]
63
+ for i in range(len(iouv)):
64
+ x = torch.where((iou >= iouv[i]) & correct_class) # IoU > threshold and classes match
65
+ if x[0].shape[0]:
66
+ matches = torch.cat((torch.stack(x, 1), iou[x[0], x[1]][:, None]), 1).cpu().numpy() # [label, detect, iou]
67
+ if x[0].shape[0] > 1:
68
+ matches = matches[matches[:, 2].argsort()[::-1]]
69
+ matches = matches[np.unique(matches[:, 1], return_index=True)[1]]
70
+ # matches = matches[matches[:, 2].argsort()[::-1]]
71
+ matches = matches[np.unique(matches[:, 0], return_index=True)[1]]
72
+ correct[matches[:, 1].astype(int), i] = True
73
+ return torch.tensor(correct, dtype=torch.bool, device=iouv.device)
74
+
75
+
76
+ @smart_inference_mode()
77
+ def run(
78
+ data,
79
+ weights=None, # model.pt path(s)
80
+ batch_size=32, # batch size
81
+ imgsz=640, # inference size (pixels)
82
+ conf_thres=0.001, # confidence threshold
83
+ iou_thres=0.7, # NMS IoU threshold
84
+ max_det=300, # maximum detections per image
85
+ task='val', # train, val, test, speed or study
86
+ device='', # cuda device, i.e. 0 or 0,1,2,3 or cpu
87
+ workers=8, # max dataloader workers (per RANK in DDP mode)
88
+ single_cls=False, # treat as single-class dataset
89
+ augment=False, # augmented inference
90
+ verbose=False, # verbose output
91
+ save_txt=False, # save results to *.txt
92
+ save_hybrid=False, # save label+prediction hybrid results to *.txt
93
+ save_conf=False, # save confidences in --save-txt labels
94
+ save_json=False, # save a COCO-JSON results file
95
+ project=ROOT / 'runs/val', # save to project/name
96
+ name='exp', # save to project/name
97
+ exist_ok=False, # existing project/name ok, do not increment
98
+ half=True, # use FP16 half-precision inference
99
+ dnn=False, # use OpenCV DNN for ONNX inference
100
+ min_items=0, # Experimental
101
+ model=None,
102
+ dataloader=None,
103
+ save_dir=Path(''),
104
+ plots=True,
105
+ callbacks=Callbacks(),
106
+ compute_loss=None,
107
+ ):
108
+ # Initialize/load model and set device
109
+ training = model is not None
110
+ if training: # called by train.py
111
+ device, pt, jit, engine = next(model.parameters()).device, True, False, False # get model device, PyTorch model
112
+ half &= device.type != 'cpu' # half precision only supported on CUDA
113
+ model.half() if half else model.float()
114
+ else: # called directly
115
+ device = select_device(device, batch_size=batch_size)
116
+
117
+ # Directories
118
+ save_dir = increment_path(Path(project) / name, exist_ok=exist_ok) # increment run
119
+ (save_dir / 'labels' if save_txt else save_dir).mkdir(parents=True, exist_ok=True) # make dir
120
+
121
+ # Load model
122
+ model = DetectMultiBackend(weights, device=device, dnn=dnn, data=data, fp16=half)
123
+ stride, pt, jit, engine = model.stride, model.pt, model.jit, model.engine
124
+ imgsz = check_img_size(imgsz, s=stride) # check image size
125
+ half = model.fp16 # FP16 supported on limited backends with CUDA
126
+ if engine:
127
+ batch_size = model.batch_size
128
+ else:
129
+ device = model.device
130
+ if not (pt or jit):
131
+ batch_size = 1 # export.py models default to batch-size 1
132
+ LOGGER.info(f'Forcing --batch-size 1 square inference (1,3,{imgsz},{imgsz}) for non-PyTorch models')
133
+
134
+ # Data
135
+ data = check_dataset(data) # check
136
+
137
+ # Configure
138
+ model.eval()
139
+ cuda = device.type != 'cpu'
140
+ #is_coco = isinstance(data.get('val'), str) and data['val'].endswith(f'coco{os.sep}val2017.txt') # COCO dataset
141
+ is_coco = isinstance(data.get('val'), str) and data['val'].endswith(f'val2017.txt') # COCO dataset
142
+ nc = 1 if single_cls else int(data['nc']) # number of classes
143
+ iouv = torch.linspace(0.5, 0.95, 10, device=device) # iou vector for mAP@0.5:0.95
144
+ niou = iouv.numel()
145
+
146
+ # Dataloader
147
+ if not training:
148
+ if pt and not single_cls: # check --weights are trained on --data
149
+ ncm = model.model.nc
150
+ assert ncm == nc, f'{weights} ({ncm} classes) trained on different --data than what you passed ({nc} ' \
151
+ f'classes). Pass correct combination of --weights and --data that are trained together.'
152
+ model.warmup(imgsz=(1 if pt else batch_size, 3, imgsz, imgsz)) # warmup
153
+ pad, rect = (0.0, False) if task == 'speed' else (0.5, pt) # square inference for benchmarks
154
+ task = task if task in ('train', 'val', 'test') else 'val' # path to train/val/test images
155
+ dataloader = create_dataloader(data[task],
156
+ imgsz,
157
+ batch_size,
158
+ stride,
159
+ single_cls,
160
+ pad=pad,
161
+ rect=rect,
162
+ workers=workers,
163
+ min_items=opt.min_items,
164
+ prefix=colorstr(f'{task}: '))[0]
165
+
166
+ seen = 0
167
+ confusion_matrix = ConfusionMatrix(nc=nc)
168
+ names = model.names if hasattr(model, 'names') else model.module.names # get class names
169
+ if isinstance(names, (list, tuple)): # old format
170
+ names = dict(enumerate(names))
171
+ class_map = coco80_to_coco91_class() if is_coco else list(range(1000))
172
+ s = ('%22s' + '%11s' * 6) % ('Class', 'Images', 'Instances', 'P', 'R', 'mAP50', 'mAP50-95')
173
+ tp, fp, p, r, f1, mp, mr, map50, ap50, map = 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0
174
+ dt = Profile(), Profile(), Profile() # profiling times
175
+ loss = torch.zeros(3, device=device)
176
+ jdict, stats, ap, ap_class = [], [], [], []
177
+ callbacks.run('on_val_start')
178
+ pbar = tqdm(dataloader, desc=s, bar_format=TQDM_BAR_FORMAT) # progress bar
179
+ for batch_i, (im, targets, paths, shapes) in enumerate(pbar):
180
+ callbacks.run('on_val_batch_start')
181
+ with dt[0]:
182
+ if cuda:
183
+ im = im.to(device, non_blocking=True)
184
+ targets = targets.to(device)
185
+ im = im.half() if half else im.float() # uint8 to fp16/32
186
+ im /= 255 # 0 - 255 to 0.0 - 1.0
187
+ nb, _, height, width = im.shape # batch size, channels, height, width
188
+
189
+ # Inference
190
+ with dt[1]:
191
+ preds, train_out = model(im) if compute_loss else (model(im, augment=augment), None)
192
+
193
+ # Loss
194
+ if compute_loss:
195
+ preds = preds[1]
196
+ #train_out = train_out[1]
197
+ #loss += compute_loss(train_out, targets)[1] # box, obj, cls
198
+ else:
199
+ preds = preds[0][1]
200
+
201
+ # NMS
202
+ targets[:, 2:] *= torch.tensor((width, height, width, height), device=device) # to pixels
203
+ lb = [targets[targets[:, 0] == i, 1:] for i in range(nb)] if save_hybrid else [] # for autolabelling
204
+ with dt[2]:
205
+ preds = non_max_suppression(preds,
206
+ conf_thres,
207
+ iou_thres,
208
+ labels=lb,
209
+ multi_label=True,
210
+ agnostic=single_cls,
211
+ max_det=max_det)
212
+
213
+ # Metrics
214
+ for si, pred in enumerate(preds):
215
+ labels = targets[targets[:, 0] == si, 1:]
216
+ nl, npr = labels.shape[0], pred.shape[0] # number of labels, predictions
217
+ path, shape = Path(paths[si]), shapes[si][0]
218
+ correct = torch.zeros(npr, niou, dtype=torch.bool, device=device) # init
219
+ seen += 1
220
+
221
+ if npr == 0:
222
+ if nl:
223
+ stats.append((correct, *torch.zeros((2, 0), device=device), labels[:, 0]))
224
+ if plots:
225
+ confusion_matrix.process_batch(detections=None, labels=labels[:, 0])
226
+ continue
227
+
228
+ # Predictions
229
+ if single_cls:
230
+ pred[:, 5] = 0
231
+ predn = pred.clone()
232
+ scale_boxes(im[si].shape[1:], predn[:, :4], shape, shapes[si][1]) # native-space pred
233
+
234
+ # Evaluate
235
+ if nl:
236
+ tbox = xywh2xyxy(labels[:, 1:5]) # target boxes
237
+ scale_boxes(im[si].shape[1:], tbox, shape, shapes[si][1]) # native-space labels
238
+ labelsn = torch.cat((labels[:, 0:1], tbox), 1) # native-space labels
239
+ correct = process_batch(predn, labelsn, iouv)
240
+ if plots:
241
+ confusion_matrix.process_batch(predn, labelsn)
242
+ stats.append((correct, pred[:, 4], pred[:, 5], labels[:, 0])) # (correct, conf, pcls, tcls)
243
+
244
+ # Save/log
245
+ if save_txt:
246
+ save_one_txt(predn, save_conf, shape, file=save_dir / 'labels' / f'{path.stem}.txt')
247
+ if save_json:
248
+ save_one_json(predn, jdict, path, class_map) # append to COCO-JSON dictionary
249
+ callbacks.run('on_val_image_end', pred, predn, path, names, im[si])
250
+
251
+ # Plot images
252
+ if plots and batch_i < 3:
253
+ plot_images(im, targets, paths, save_dir / f'val_batch{batch_i}_labels.jpg', names) # labels
254
+ plot_images(im, output_to_target(preds), paths, save_dir / f'val_batch{batch_i}_pred.jpg', names) # pred
255
+
256
+ callbacks.run('on_val_batch_end', batch_i, im, targets, paths, shapes, preds)
257
+
258
+ # Compute metrics
259
+ stats = [torch.cat(x, 0).cpu().numpy() for x in zip(*stats)] # to numpy
260
+ if len(stats) and stats[0].any():
261
+ tp, fp, p, r, f1, ap, ap_class = ap_per_class(*stats, plot=plots, save_dir=save_dir, names=names)
262
+ ap50, ap = ap[:, 0], ap.mean(1) # AP@0.5, AP@0.5:0.95
263
+ mp, mr, map50, map = p.mean(), r.mean(), ap50.mean(), ap.mean()
264
+ nt = np.bincount(stats[3].astype(int), minlength=nc) # number of targets per class
265
+
266
+ # Print results
267
+ pf = '%22s' + '%11i' * 2 + '%11.3g' * 4 # print format
268
+ LOGGER.info(pf % ('all', seen, nt.sum(), mp, mr, map50, map))
269
+ if nt.sum() == 0:
270
+ LOGGER.warning(f'WARNING ⚠️ no labels found in {task} set, can not compute metrics without labels')
271
+
272
+ # Print results per class
273
+ if (verbose or (nc < 50 and not training)) and nc > 1 and len(stats):
274
+ for i, c in enumerate(ap_class):
275
+ LOGGER.info(pf % (names[c], seen, nt[c], p[i], r[i], ap50[i], ap[i]))
276
+
277
+ # Print speeds
278
+ t = tuple(x.t / seen * 1E3 for x in dt) # speeds per image
279
+ if not training:
280
+ shape = (batch_size, 3, imgsz, imgsz)
281
+ LOGGER.info(f'Speed: %.1fms pre-process, %.1fms inference, %.1fms NMS per image at shape {shape}' % t)
282
+
283
+ # Plots
284
+ if plots:
285
+ confusion_matrix.plot(save_dir=save_dir, names=list(names.values()))
286
+ callbacks.run('on_val_end', nt, tp, fp, p, r, f1, ap, ap50, ap_class, confusion_matrix)
287
+
288
+ # Save JSON
289
+ if save_json and len(jdict):
290
+ w = Path(weights[0] if isinstance(weights, list) else weights).stem if weights is not None else '' # weights
291
+ anno_json = str(Path(data.get('path', '../coco')) / 'annotations/instances_val2017.json') # annotations json
292
+ pred_json = str(save_dir / f"{w}_predictions.json") # predictions json
293
+ LOGGER.info(f'\nEvaluating pycocotools mAP... saving {pred_json}...')
294
+ with open(pred_json, 'w') as f:
295
+ json.dump(jdict, f)
296
+
297
+ try: # https://github.com/cocodataset/cocoapi/blob/master/PythonAPI/pycocoEvalDemo.ipynb
298
+ check_requirements('pycocotools')
299
+ from pycocotools.coco import COCO
300
+ from pycocotools.cocoeval import COCOeval
301
+
302
+ anno = COCO(anno_json) # init annotations api
303
+ pred = anno.loadRes(pred_json) # init predictions api
304
+ eval = COCOeval(anno, pred, 'bbox')
305
+ if is_coco:
306
+ eval.params.imgIds = [int(Path(x).stem) for x in dataloader.dataset.im_files] # image IDs to evaluate
307
+ eval.evaluate()
308
+ eval.accumulate()
309
+ eval.summarize()
310
+ map, map50 = eval.stats[:2] # update results (mAP@0.5:0.95, mAP@0.5)
311
+ except Exception as e:
312
+ LOGGER.info(f'pycocotools unable to run: {e}')
313
+
314
+ # Return results
315
+ model.float() # for training
316
+ if not training:
317
+ s = f"\n{len(list(save_dir.glob('labels/*.txt')))} labels saved to {save_dir / 'labels'}" if save_txt else ''
318
+ LOGGER.info(f"Results saved to {colorstr('bold', save_dir)}{s}")
319
+ maps = np.zeros(nc) + map
320
+ for i, c in enumerate(ap_class):
321
+ maps[c] = ap[i]
322
+ return (mp, mr, map50, map, *(loss.cpu() / len(dataloader)).tolist()), maps, t
323
+
324
+
325
+ def parse_opt():
326
+ parser = argparse.ArgumentParser()
327
+ parser.add_argument('--data', type=str, default=ROOT / 'data/coco.yaml', help='dataset.yaml path')
328
+ parser.add_argument('--weights', nargs='+', type=str, default=ROOT / 'yolo.pt', help='model path(s)')
329
+ parser.add_argument('--batch-size', type=int, default=32, help='batch size')
330
+ parser.add_argument('--imgsz', '--img', '--img-size', type=int, default=640, help='inference size (pixels)')
331
+ parser.add_argument('--conf-thres', type=float, default=0.001, help='confidence threshold')
332
+ parser.add_argument('--iou-thres', type=float, default=0.7, help='NMS IoU threshold')
333
+ parser.add_argument('--max-det', type=int, default=300, help='maximum detections per image')
334
+ parser.add_argument('--task', default='val', help='train, val, test, speed or study')
335
+ parser.add_argument('--device', default='', help='cuda device, i.e. 0 or 0,1,2,3 or cpu')
336
+ parser.add_argument('--workers', type=int, default=8, help='max dataloader workers (per RANK in DDP mode)')
337
+ parser.add_argument('--single-cls', action='store_true', help='treat as single-class dataset')
338
+ parser.add_argument('--augment', action='store_true', help='augmented inference')
339
+ parser.add_argument('--verbose', action='store_true', help='report mAP by class')
340
+ parser.add_argument('--save-txt', action='store_true', help='save results to *.txt')
341
+ parser.add_argument('--save-hybrid', action='store_true', help='save label+prediction hybrid results to *.txt')
342
+ parser.add_argument('--save-conf', action='store_true', help='save confidences in --save-txt labels')
343
+ parser.add_argument('--save-json', action='store_true', help='save a COCO-JSON results file')
344
+ parser.add_argument('--project', default=ROOT / 'runs/val', help='save to project/name')
345
+ parser.add_argument('--name', default='exp', help='save to project/name')
346
+ parser.add_argument('--exist-ok', action='store_true', help='existing project/name ok, do not increment')
347
+ parser.add_argument('--half', action='store_true', help='use FP16 half-precision inference')
348
+ parser.add_argument('--dnn', action='store_true', help='use OpenCV DNN for ONNX inference')
349
+ parser.add_argument('--min-items', type=int, default=0, help='Experimental')
350
+ opt = parser.parse_args()
351
+ opt.data = check_yaml(opt.data) # check YAML
352
+ opt.save_json |= opt.data.endswith('coco.yaml')
353
+ opt.save_txt |= opt.save_hybrid
354
+ print_args(vars(opt))
355
+ return opt
356
+
357
+
358
+ def main(opt):
359
+ #check_requirements(exclude=('tensorboard', 'thop'))
360
+
361
+ if opt.task in ('train', 'val', 'test'): # run normally
362
+ if opt.conf_thres > 0.001: # https://github.com/ultralytics/yolov5/issues/1466
363
+ LOGGER.info(f'WARNING ⚠️ confidence threshold {opt.conf_thres} > 0.001 produces invalid results')
364
+ if opt.save_hybrid:
365
+ LOGGER.info('WARNING ⚠️ --save-hybrid will return high mAP from hybrid labels, not from predictions alone')
366
+ run(**vars(opt))
367
+
368
+ else:
369
+ weights = opt.weights if isinstance(opt.weights, list) else [opt.weights]
370
+ opt.half = torch.cuda.is_available() and opt.device != 'cpu' # FP16 for fastest results
371
+ if opt.task == 'speed': # speed benchmarks
372
+ # python val.py --task speed --data coco.yaml --batch 1 --weights yolo.pt...
373
+ opt.conf_thres, opt.iou_thres, opt.save_json = 0.25, 0.45, False
374
+ for opt.weights in weights:
375
+ run(**vars(opt), plots=False)
376
+
377
+ elif opt.task == 'study': # speed vs mAP benchmarks
378
+ # python val.py --task study --data coco.yaml --iou 0.7 --weights yolo.pt...
379
+ for opt.weights in weights:
380
+ f = f'study_{Path(opt.data).stem}_{Path(opt.weights).stem}.txt' # filename to save to
381
+ x, y = list(range(256, 1536 + 128, 128)), [] # x axis (image sizes), y axis
382
+ for opt.imgsz in x: # img-size
383
+ LOGGER.info(f'\nRunning {f} --imgsz {opt.imgsz}...')
384
+ r, _, t = run(**vars(opt), plots=False)
385
+ y.append(r + t) # results and times
386
+ np.savetxt(f, y, fmt='%10.4g') # save
387
+ os.system('zip -r study.zip study_*.txt')
388
+ plot_val_study(x=x) # plot
389
+
390
+
391
+ if __name__ == "__main__":
392
+ opt = parse_opt()
393
+ main(opt)
val_triple.py ADDED
@@ -0,0 +1,391 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import argparse
2
+ import json
3
+ import os
4
+ import sys
5
+ from pathlib import Path
6
+
7
+ import numpy as np
8
+ import torch
9
+ from tqdm import tqdm
10
+
11
+ FILE = Path(__file__).resolve()
12
+ ROOT = FILE.parents[0] # YOLO root directory
13
+ if str(ROOT) not in sys.path:
14
+ sys.path.append(str(ROOT)) # add ROOT to PATH
15
+ ROOT = Path(os.path.relpath(ROOT, Path.cwd())) # relative
16
+
17
+ from models.common import DetectMultiBackend
18
+ from utils.callbacks import Callbacks
19
+ from utils.dataloaders import create_dataloader
20
+ from utils.general import (LOGGER, TQDM_BAR_FORMAT, Profile, check_dataset, check_img_size, check_requirements,
21
+ check_yaml, coco80_to_coco91_class, colorstr, increment_path, non_max_suppression,
22
+ print_args, scale_boxes, xywh2xyxy, xyxy2xywh)
23
+ from utils.metrics import ConfusionMatrix, ap_per_class, box_iou
24
+ from utils.plots import output_to_target, plot_images, plot_val_study
25
+ from utils.torch_utils import select_device, smart_inference_mode
26
+
27
+
28
+ def save_one_txt(predn, save_conf, shape, file):
29
+ # Save one txt result
30
+ gn = torch.tensor(shape)[[1, 0, 1, 0]] # normalization gain whwh
31
+ for *xyxy, conf, cls in predn.tolist():
32
+ xywh = (xyxy2xywh(torch.tensor(xyxy).view(1, 4)) / gn).view(-1).tolist() # normalized xywh
33
+ line = (cls, *xywh, conf) if save_conf else (cls, *xywh) # label format
34
+ with open(file, 'a') as f:
35
+ f.write(('%g ' * len(line)).rstrip() % line + '\n')
36
+
37
+
38
+ def save_one_json(predn, jdict, path, class_map):
39
+ # Save one JSON result {"image_id": 42, "category_id": 18, "bbox": [258.15, 41.29, 348.26, 243.78], "score": 0.236}
40
+ image_id = int(path.stem) if path.stem.isnumeric() else path.stem
41
+ box = xyxy2xywh(predn[:, :4]) # xywh
42
+ box[:, :2] -= box[:, 2:] / 2 # xy center to top-left corner
43
+ for p, b in zip(predn.tolist(), box.tolist()):
44
+ jdict.append({
45
+ 'image_id': image_id,
46
+ 'category_id': class_map[int(p[5])],
47
+ 'bbox': [round(x, 3) for x in b],
48
+ 'score': round(p[4], 5)})
49
+
50
+
51
+ def process_batch(detections, labels, iouv):
52
+ """
53
+ Return correct prediction matrix
54
+ Arguments:
55
+ detections (array[N, 6]), x1, y1, x2, y2, conf, class
56
+ labels (array[M, 5]), class, x1, y1, x2, y2
57
+ Returns:
58
+ correct (array[N, 10]), for 10 IoU levels
59
+ """
60
+ correct = np.zeros((detections.shape[0], iouv.shape[0])).astype(bool)
61
+ iou = box_iou(labels[:, 1:], detections[:, :4])
62
+ correct_class = labels[:, 0:1] == detections[:, 5]
63
+ for i in range(len(iouv)):
64
+ x = torch.where((iou >= iouv[i]) & correct_class) # IoU > threshold and classes match
65
+ if x[0].shape[0]:
66
+ matches = torch.cat((torch.stack(x, 1), iou[x[0], x[1]][:, None]), 1).cpu().numpy() # [label, detect, iou]
67
+ if x[0].shape[0] > 1:
68
+ matches = matches[matches[:, 2].argsort()[::-1]]
69
+ matches = matches[np.unique(matches[:, 1], return_index=True)[1]]
70
+ # matches = matches[matches[:, 2].argsort()[::-1]]
71
+ matches = matches[np.unique(matches[:, 0], return_index=True)[1]]
72
+ correct[matches[:, 1].astype(int), i] = True
73
+ return torch.tensor(correct, dtype=torch.bool, device=iouv.device)
74
+
75
+
76
+ @smart_inference_mode()
77
+ def run(
78
+ data,
79
+ weights=None, # model.pt path(s)
80
+ batch_size=32, # batch size
81
+ imgsz=640, # inference size (pixels)
82
+ conf_thres=0.001, # confidence threshold
83
+ iou_thres=0.7, # NMS IoU threshold
84
+ max_det=300, # maximum detections per image
85
+ task='val', # train, val, test, speed or study
86
+ device='', # cuda device, i.e. 0 or 0,1,2,3 or cpu
87
+ workers=8, # max dataloader workers (per RANK in DDP mode)
88
+ single_cls=False, # treat as single-class dataset
89
+ augment=False, # augmented inference
90
+ verbose=False, # verbose output
91
+ save_txt=False, # save results to *.txt
92
+ save_hybrid=False, # save label+prediction hybrid results to *.txt
93
+ save_conf=False, # save confidences in --save-txt labels
94
+ save_json=False, # save a COCO-JSON results file
95
+ project=ROOT / 'runs/val', # save to project/name
96
+ name='exp', # save to project/name
97
+ exist_ok=False, # existing project/name ok, do not increment
98
+ half=True, # use FP16 half-precision inference
99
+ dnn=False, # use OpenCV DNN for ONNX inference
100
+ min_items=0, # Experimental
101
+ model=None,
102
+ dataloader=None,
103
+ save_dir=Path(''),
104
+ plots=True,
105
+ callbacks=Callbacks(),
106
+ compute_loss=None,
107
+ ):
108
+ # Initialize/load model and set device
109
+ training = model is not None
110
+ if training: # called by train.py
111
+ device, pt, jit, engine = next(model.parameters()).device, True, False, False # get model device, PyTorch model
112
+ half &= device.type != 'cpu' # half precision only supported on CUDA
113
+ model.half() if half else model.float()
114
+ else: # called directly
115
+ device = select_device(device, batch_size=batch_size)
116
+
117
+ # Directories
118
+ save_dir = increment_path(Path(project) / name, exist_ok=exist_ok) # increment run
119
+ (save_dir / 'labels' if save_txt else save_dir).mkdir(parents=True, exist_ok=True) # make dir
120
+
121
+ # Load model
122
+ model = DetectMultiBackend(weights, device=device, dnn=dnn, data=data, fp16=half)
123
+ stride, pt, jit, engine = model.stride, model.pt, model.jit, model.engine
124
+ imgsz = check_img_size(imgsz, s=stride) # check image size
125
+ half = model.fp16 # FP16 supported on limited backends with CUDA
126
+ if engine:
127
+ batch_size = model.batch_size
128
+ else:
129
+ device = model.device
130
+ if not (pt or jit):
131
+ batch_size = 1 # export.py models default to batch-size 1
132
+ LOGGER.info(f'Forcing --batch-size 1 square inference (1,3,{imgsz},{imgsz}) for non-PyTorch models')
133
+
134
+ # Data
135
+ data = check_dataset(data) # check
136
+
137
+ # Configure
138
+ model.eval()
139
+ cuda = device.type != 'cpu'
140
+ #is_coco = isinstance(data.get('val'), str) and data['val'].endswith(f'coco{os.sep}val2017.txt') # COCO dataset
141
+ is_coco = isinstance(data.get('val'), str) and data['val'].endswith(f'val2017.txt') # COCO dataset
142
+ nc = 1 if single_cls else int(data['nc']) # number of classes
143
+ iouv = torch.linspace(0.5, 0.95, 10, device=device) # iou vector for mAP@0.5:0.95
144
+ niou = iouv.numel()
145
+
146
+ # Dataloader
147
+ if not training:
148
+ if pt and not single_cls: # check --weights are trained on --data
149
+ ncm = model.model.nc
150
+ assert ncm == nc, f'{weights} ({ncm} classes) trained on different --data than what you passed ({nc} ' \
151
+ f'classes). Pass correct combination of --weights and --data that are trained together.'
152
+ model.warmup(imgsz=(1 if pt else batch_size, 3, imgsz, imgsz)) # warmup
153
+ pad, rect = (0.0, False) if task == 'speed' else (0.5, pt) # square inference for benchmarks
154
+ task = task if task in ('train', 'val', 'test') else 'val' # path to train/val/test images
155
+ dataloader = create_dataloader(data[task],
156
+ imgsz,
157
+ batch_size,
158
+ stride,
159
+ single_cls,
160
+ pad=pad,
161
+ rect=rect,
162
+ workers=workers,
163
+ min_items=opt.min_items,
164
+ prefix=colorstr(f'{task}: '))[0]
165
+
166
+ seen = 0
167
+ confusion_matrix = ConfusionMatrix(nc=nc)
168
+ names = model.names if hasattr(model, 'names') else model.module.names # get class names
169
+ if isinstance(names, (list, tuple)): # old format
170
+ names = dict(enumerate(names))
171
+ class_map = coco80_to_coco91_class() if is_coco else list(range(1000))
172
+ s = ('%22s' + '%11s' * 6) % ('Class', 'Images', 'Instances', 'P', 'R', 'mAP50', 'mAP50-95')
173
+ tp, fp, p, r, f1, mp, mr, map50, ap50, map = 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0
174
+ dt = Profile(), Profile(), Profile() # profiling times
175
+ loss = torch.zeros(3, device=device)
176
+ jdict, stats, ap, ap_class = [], [], [], []
177
+ callbacks.run('on_val_start')
178
+ pbar = tqdm(dataloader, desc=s, bar_format=TQDM_BAR_FORMAT) # progress bar
179
+ for batch_i, (im, targets, paths, shapes) in enumerate(pbar):
180
+ callbacks.run('on_val_batch_start')
181
+ with dt[0]:
182
+ if cuda:
183
+ im = im.to(device, non_blocking=True)
184
+ targets = targets.to(device)
185
+ im = im.half() if half else im.float() # uint8 to fp16/32
186
+ im /= 255 # 0 - 255 to 0.0 - 1.0
187
+ nb, _, height, width = im.shape # batch size, channels, height, width
188
+
189
+ # Inference
190
+ with dt[1]:
191
+ preds, train_out = model(im) if compute_loss else (model(im, augment=augment), None)
192
+ preds = preds[2]
193
+ train_out = train_out[2]
194
+
195
+ # Loss
196
+ #if compute_loss:
197
+ # loss += compute_loss(train_out, targets)[2] # box, obj, cls
198
+
199
+ # NMS
200
+ targets[:, 2:] *= torch.tensor((width, height, width, height), device=device) # to pixels
201
+ lb = [targets[targets[:, 0] == i, 1:] for i in range(nb)] if save_hybrid else [] # for autolabelling
202
+ with dt[2]:
203
+ preds = non_max_suppression(preds,
204
+ conf_thres,
205
+ iou_thres,
206
+ labels=lb,
207
+ multi_label=True,
208
+ agnostic=single_cls,
209
+ max_det=max_det)
210
+
211
+ # Metrics
212
+ for si, pred in enumerate(preds):
213
+ labels = targets[targets[:, 0] == si, 1:]
214
+ nl, npr = labels.shape[0], pred.shape[0] # number of labels, predictions
215
+ path, shape = Path(paths[si]), shapes[si][0]
216
+ correct = torch.zeros(npr, niou, dtype=torch.bool, device=device) # init
217
+ seen += 1
218
+
219
+ if npr == 0:
220
+ if nl:
221
+ stats.append((correct, *torch.zeros((2, 0), device=device), labels[:, 0]))
222
+ if plots:
223
+ confusion_matrix.process_batch(detections=None, labels=labels[:, 0])
224
+ continue
225
+
226
+ # Predictions
227
+ if single_cls:
228
+ pred[:, 5] = 0
229
+ predn = pred.clone()
230
+ scale_boxes(im[si].shape[1:], predn[:, :4], shape, shapes[si][1]) # native-space pred
231
+
232
+ # Evaluate
233
+ if nl:
234
+ tbox = xywh2xyxy(labels[:, 1:5]) # target boxes
235
+ scale_boxes(im[si].shape[1:], tbox, shape, shapes[si][1]) # native-space labels
236
+ labelsn = torch.cat((labels[:, 0:1], tbox), 1) # native-space labels
237
+ correct = process_batch(predn, labelsn, iouv)
238
+ if plots:
239
+ confusion_matrix.process_batch(predn, labelsn)
240
+ stats.append((correct, pred[:, 4], pred[:, 5], labels[:, 0])) # (correct, conf, pcls, tcls)
241
+
242
+ # Save/log
243
+ if save_txt:
244
+ save_one_txt(predn, save_conf, shape, file=save_dir / 'labels' / f'{path.stem}.txt')
245
+ if save_json:
246
+ save_one_json(predn, jdict, path, class_map) # append to COCO-JSON dictionary
247
+ callbacks.run('on_val_image_end', pred, predn, path, names, im[si])
248
+
249
+ # Plot images
250
+ if plots and batch_i < 3:
251
+ plot_images(im, targets, paths, save_dir / f'val_batch{batch_i}_labels.jpg', names) # labels
252
+ plot_images(im, output_to_target(preds), paths, save_dir / f'val_batch{batch_i}_pred.jpg', names) # pred
253
+
254
+ callbacks.run('on_val_batch_end', batch_i, im, targets, paths, shapes, preds)
255
+
256
+ # Compute metrics
257
+ stats = [torch.cat(x, 0).cpu().numpy() for x in zip(*stats)] # to numpy
258
+ if len(stats) and stats[0].any():
259
+ tp, fp, p, r, f1, ap, ap_class = ap_per_class(*stats, plot=plots, save_dir=save_dir, names=names)
260
+ ap50, ap = ap[:, 0], ap.mean(1) # AP@0.5, AP@0.5:0.95
261
+ mp, mr, map50, map = p.mean(), r.mean(), ap50.mean(), ap.mean()
262
+ nt = np.bincount(stats[3].astype(int), minlength=nc) # number of targets per class
263
+
264
+ # Print results
265
+ pf = '%22s' + '%11i' * 2 + '%11.3g' * 4 # print format
266
+ LOGGER.info(pf % ('all', seen, nt.sum(), mp, mr, map50, map))
267
+ if nt.sum() == 0:
268
+ LOGGER.warning(f'WARNING ⚠️ no labels found in {task} set, can not compute metrics without labels')
269
+
270
+ # Print results per class
271
+ if (verbose or (nc < 50 and not training)) and nc > 1 and len(stats):
272
+ for i, c in enumerate(ap_class):
273
+ LOGGER.info(pf % (names[c], seen, nt[c], p[i], r[i], ap50[i], ap[i]))
274
+
275
+ # Print speeds
276
+ t = tuple(x.t / seen * 1E3 for x in dt) # speeds per image
277
+ if not training:
278
+ shape = (batch_size, 3, imgsz, imgsz)
279
+ LOGGER.info(f'Speed: %.1fms pre-process, %.1fms inference, %.1fms NMS per image at shape {shape}' % t)
280
+
281
+ # Plots
282
+ if plots:
283
+ confusion_matrix.plot(save_dir=save_dir, names=list(names.values()))
284
+ callbacks.run('on_val_end', nt, tp, fp, p, r, f1, ap, ap50, ap_class, confusion_matrix)
285
+
286
+ # Save JSON
287
+ if save_json and len(jdict):
288
+ w = Path(weights[0] if isinstance(weights, list) else weights).stem if weights is not None else '' # weights
289
+ anno_json = str(Path(data.get('path', '../coco')) / 'annotations/instances_val2017.json') # annotations json
290
+ pred_json = str(save_dir / f"{w}_predictions.json") # predictions json
291
+ LOGGER.info(f'\nEvaluating pycocotools mAP... saving {pred_json}...')
292
+ with open(pred_json, 'w') as f:
293
+ json.dump(jdict, f)
294
+
295
+ try: # https://github.com/cocodataset/cocoapi/blob/master/PythonAPI/pycocoEvalDemo.ipynb
296
+ check_requirements('pycocotools')
297
+ from pycocotools.coco import COCO
298
+ from pycocotools.cocoeval import COCOeval
299
+
300
+ anno = COCO(anno_json) # init annotations api
301
+ pred = anno.loadRes(pred_json) # init predictions api
302
+ eval = COCOeval(anno, pred, 'bbox')
303
+ if is_coco:
304
+ eval.params.imgIds = [int(Path(x).stem) for x in dataloader.dataset.im_files] # image IDs to evaluate
305
+ eval.evaluate()
306
+ eval.accumulate()
307
+ eval.summarize()
308
+ map, map50 = eval.stats[:2] # update results (mAP@0.5:0.95, mAP@0.5)
309
+ except Exception as e:
310
+ LOGGER.info(f'pycocotools unable to run: {e}')
311
+
312
+ # Return results
313
+ model.float() # for training
314
+ if not training:
315
+ s = f"\n{len(list(save_dir.glob('labels/*.txt')))} labels saved to {save_dir / 'labels'}" if save_txt else ''
316
+ LOGGER.info(f"Results saved to {colorstr('bold', save_dir)}{s}")
317
+ maps = np.zeros(nc) + map
318
+ for i, c in enumerate(ap_class):
319
+ maps[c] = ap[i]
320
+ return (mp, mr, map50, map, *(loss.cpu() / len(dataloader)).tolist()), maps, t
321
+
322
+
323
+ def parse_opt():
324
+ parser = argparse.ArgumentParser()
325
+ parser.add_argument('--data', type=str, default=ROOT / 'data/coco.yaml', help='dataset.yaml path')
326
+ parser.add_argument('--weights', nargs='+', type=str, default=ROOT / 'yolo.pt', help='model path(s)')
327
+ parser.add_argument('--batch-size', type=int, default=32, help='batch size')
328
+ parser.add_argument('--imgsz', '--img', '--img-size', type=int, default=640, help='inference size (pixels)')
329
+ parser.add_argument('--conf-thres', type=float, default=0.001, help='confidence threshold')
330
+ parser.add_argument('--iou-thres', type=float, default=0.7, help='NMS IoU threshold')
331
+ parser.add_argument('--max-det', type=int, default=300, help='maximum detections per image')
332
+ parser.add_argument('--task', default='val', help='train, val, test, speed or study')
333
+ parser.add_argument('--device', default='', help='cuda device, i.e. 0 or 0,1,2,3 or cpu')
334
+ parser.add_argument('--workers', type=int, default=8, help='max dataloader workers (per RANK in DDP mode)')
335
+ parser.add_argument('--single-cls', action='store_true', help='treat as single-class dataset')
336
+ parser.add_argument('--augment', action='store_true', help='augmented inference')
337
+ parser.add_argument('--verbose', action='store_true', help='report mAP by class')
338
+ parser.add_argument('--save-txt', action='store_true', help='save results to *.txt')
339
+ parser.add_argument('--save-hybrid', action='store_true', help='save label+prediction hybrid results to *.txt')
340
+ parser.add_argument('--save-conf', action='store_true', help='save confidences in --save-txt labels')
341
+ parser.add_argument('--save-json', action='store_true', help='save a COCO-JSON results file')
342
+ parser.add_argument('--project', default=ROOT / 'runs/val', help='save to project/name')
343
+ parser.add_argument('--name', default='exp', help='save to project/name')
344
+ parser.add_argument('--exist-ok', action='store_true', help='existing project/name ok, do not increment')
345
+ parser.add_argument('--half', action='store_true', help='use FP16 half-precision inference')
346
+ parser.add_argument('--dnn', action='store_true', help='use OpenCV DNN for ONNX inference')
347
+ parser.add_argument('--min-items', type=int, default=0, help='Experimental')
348
+ opt = parser.parse_args()
349
+ opt.data = check_yaml(opt.data) # check YAML
350
+ opt.save_json |= opt.data.endswith('coco.yaml')
351
+ opt.save_txt |= opt.save_hybrid
352
+ print_args(vars(opt))
353
+ return opt
354
+
355
+
356
+ def main(opt):
357
+ #check_requirements(exclude=('tensorboard', 'thop'))
358
+
359
+ if opt.task in ('train', 'val', 'test'): # run normally
360
+ if opt.conf_thres > 0.001: # https://github.com/ultralytics/yolov5/issues/1466
361
+ LOGGER.info(f'WARNING ⚠️ confidence threshold {opt.conf_thres} > 0.001 produces invalid results')
362
+ if opt.save_hybrid:
363
+ LOGGER.info('WARNING ⚠️ --save-hybrid will return high mAP from hybrid labels, not from predictions alone')
364
+ run(**vars(opt))
365
+
366
+ else:
367
+ weights = opt.weights if isinstance(opt.weights, list) else [opt.weights]
368
+ opt.half = torch.cuda.is_available() and opt.device != 'cpu' # FP16 for fastest results
369
+ if opt.task == 'speed': # speed benchmarks
370
+ # python val.py --task speed --data coco.yaml --batch 1 --weights yolo.pt...
371
+ opt.conf_thres, opt.iou_thres, opt.save_json = 0.25, 0.45, False
372
+ for opt.weights in weights:
373
+ run(**vars(opt), plots=False)
374
+
375
+ elif opt.task == 'study': # speed vs mAP benchmarks
376
+ # python val.py --task study --data coco.yaml --iou 0.7 --weights yolo.pt...
377
+ for opt.weights in weights:
378
+ f = f'study_{Path(opt.data).stem}_{Path(opt.weights).stem}.txt' # filename to save to
379
+ x, y = list(range(256, 1536 + 128, 128)), [] # x axis (image sizes), y axis
380
+ for opt.imgsz in x: # img-size
381
+ LOGGER.info(f'\nRunning {f} --imgsz {opt.imgsz}...')
382
+ r, _, t = run(**vars(opt), plots=False)
383
+ y.append(r + t) # results and times
384
+ np.savetxt(f, y, fmt='%10.4g') # save
385
+ os.system('zip -r study.zip study_*.txt')
386
+ plot_val_study(x=x) # plot
387
+
388
+
389
+ if __name__ == "__main__":
390
+ opt = parse_opt()
391
+ main(opt)