PKaushik commited on
Commit
e31779b
1 Parent(s): cb1de97
Files changed (1) hide show
  1. yolov6/core/evaler.py +256 -0
yolov6/core/evaler.py ADDED
@@ -0,0 +1,256 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/usr/bin/env python3
2
+ # -*- coding:utf-8 -*-
3
+ import os
4
+ from tqdm import tqdm
5
+ import numpy as np
6
+ import json
7
+ import torch
8
+ import yaml
9
+ from pathlib import Path
10
+
11
+ from pycocotools.coco import COCO
12
+ from pycocotools.cocoeval import COCOeval
13
+
14
+ from yolov6.data.data_load import create_dataloader
15
+ from yolov6.utils.events import LOGGER, NCOLS
16
+ from yolov6.utils.nms import non_max_suppression
17
+ from yolov6.utils.checkpoint import load_checkpoint
18
+ from yolov6.utils.torch_utils import time_sync, get_model_info
19
+
20
+ '''
21
+ python tools/eval.py --task 'train'/'val'/'speed'
22
+ '''
23
+
24
+
25
+ class Evaler:
26
+ def __init__(self,
27
+ data,
28
+ batch_size=32,
29
+ img_size=640,
30
+ conf_thres=0.001,
31
+ iou_thres=0.65,
32
+ device='',
33
+ half=True,
34
+ save_dir=''):
35
+ self.data = data
36
+ self.batch_size = batch_size
37
+ self.img_size = img_size
38
+ self.conf_thres = conf_thres
39
+ self.iou_thres = iou_thres
40
+ self.device = device
41
+ self.half = half
42
+ self.save_dir = save_dir
43
+
44
+ def init_model(self, model, weights, task):
45
+ if task != 'train':
46
+ model = load_checkpoint(weights, map_location=self.device)
47
+ self.stride = int(model.stride.max())
48
+ if self.device.type != 'cpu':
49
+ model(torch.zeros(1, 3, self.img_size, self.img_size).to(self.device).type_as(next(model.parameters())))
50
+ # switch to deploy
51
+ from yolov6.layers.common import RepVGGBlock
52
+ for layer in model.modules():
53
+ if isinstance(layer, RepVGGBlock):
54
+ layer.switch_to_deploy()
55
+ LOGGER.info("Switch model to deploy modality.")
56
+ LOGGER.info("Model Summary: {}".format(get_model_info(model, self.img_size)))
57
+ model.half() if self.half else model.float()
58
+ return model
59
+
60
+ def init_data(self, dataloader, task):
61
+ '''Initialize dataloader.
62
+ Returns a dataloader for task val or speed.
63
+ '''
64
+ self.is_coco = self.data.get("is_coco", False)
65
+ self.ids = self.coco80_to_coco91_class() if self.is_coco else list(range(1000))
66
+ if task != 'train':
67
+ pad = 0.0 if task == 'speed' else 0.5
68
+ dataloader = create_dataloader(self.data[task if task in ('train', 'val', 'test') else 'val'],
69
+ self.img_size, self.batch_size, self.stride, check_labels=True, pad=pad, rect=True,
70
+ data_dict=self.data, task=task)[0]
71
+ return dataloader
72
+
73
+ def predict_model(self, model, dataloader, task):
74
+ '''Model prediction
75
+ Predicts the whole dataset and gets the prediced results and inference time.
76
+ '''
77
+ self.speed_result = torch.zeros(4, device=self.device)
78
+ pred_results = []
79
+ pbar = tqdm(dataloader, desc="Inferencing model in val datasets.", ncols=NCOLS)
80
+ for imgs, targets, paths, shapes in pbar:
81
+ # pre-process
82
+ t1 = time_sync()
83
+ imgs = imgs.to(self.device, non_blocking=True)
84
+ imgs = imgs.half() if self.half else imgs.float()
85
+ imgs /= 255
86
+ self.speed_result[1] += time_sync() - t1 # pre-process time
87
+
88
+ # Inference
89
+ t2 = time_sync()
90
+ outputs = model(imgs)
91
+ self.speed_result[2] += time_sync() - t2 # inference time
92
+
93
+ # post-process
94
+ t3 = time_sync()
95
+ outputs = non_max_suppression(outputs, self.conf_thres, self.iou_thres, multi_label=True)
96
+ self.speed_result[3] += time_sync() - t3 # post-process time
97
+ self.speed_result[0] += len(outputs)
98
+
99
+ # save result
100
+ pred_results.extend(self.convert_to_coco_format(outputs, imgs, paths, shapes, self.ids))
101
+ return pred_results
102
+
103
+ def eval_model(self, pred_results, model, dataloader, task):
104
+ '''Evaluate models
105
+ For task speed, this function only evaluates the speed of model and outputs inference time.
106
+ For task val, this function evaluates the speed and mAP by pycocotools, and returns
107
+ inference time and mAP value.
108
+ '''
109
+ LOGGER.info(f'\nEvaluating speed.')
110
+ self.eval_speed(task)
111
+
112
+ LOGGER.info(f'\nEvaluating mAP by pycocotools.')
113
+ if task != 'speed' and len(pred_results):
114
+ if 'anno_path' in self.data:
115
+ anno_json = self.data['anno_path']
116
+ else:
117
+ # generated coco format labels in dataset initialization
118
+ dataset_root = os.path.dirname(os.path.dirname(self.data['val']))
119
+ base_name = os.path.basename(self.data['val'])
120
+ anno_json = os.path.join(dataset_root, 'annotations', f'instances_{base_name}.json')
121
+ pred_json = os.path.join(self.save_dir, "predictions.json")
122
+ LOGGER.info(f'Saving {pred_json}...')
123
+ with open(pred_json, 'w') as f:
124
+ json.dump(pred_results, f)
125
+
126
+ anno = COCO(anno_json)
127
+ pred = anno.loadRes(pred_json)
128
+ cocoEval = COCOeval(anno, pred, 'bbox')
129
+ if self.is_coco:
130
+ imgIds = [int(os.path.basename(x).split(".")[0])
131
+ for x in dataloader.dataset.img_paths]
132
+ cocoEval.params.imgIds = imgIds
133
+ cocoEval.evaluate()
134
+ cocoEval.accumulate()
135
+ cocoEval.summarize()
136
+ map, map50 = cocoEval.stats[:2] # update results (mAP@0.5:0.95, mAP@0.5)
137
+ # Return results
138
+ model.float() # for training
139
+ if task != 'train':
140
+ LOGGER.info(f"Results saved to {self.save_dir}")
141
+ return (map50, map)
142
+ return (0.0, 0.0)
143
+
144
+ def eval_speed(self, task):
145
+ '''Evaluate model inference speed.'''
146
+ if task != 'train':
147
+ n_samples = self.speed_result[0].item()
148
+ pre_time, inf_time, nms_time = 1000 * self.speed_result[1:].cpu().numpy() / n_samples
149
+ for n, v in zip(["pre-process", "inference", "NMS"],[pre_time, inf_time, nms_time]):
150
+ LOGGER.info("Average {} time: {:.2f} ms".format(n, v))
151
+
152
+ def box_convert(self, x):
153
+ # Convert boxes with shape [n, 4] from [x1, y1, x2, y2] to [x, y, w, h] where x1y1=top-left, x2y2=bottom-right
154
+ y = x.clone() if isinstance(x, torch.Tensor) else np.copy(x)
155
+ y[:, 0] = (x[:, 0] + x[:, 2]) / 2 # x center
156
+ y[:, 1] = (x[:, 1] + x[:, 3]) / 2 # y center
157
+ y[:, 2] = x[:, 2] - x[:, 0] # width
158
+ y[:, 3] = x[:, 3] - x[:, 1] # height
159
+ return y
160
+
161
+ def scale_coords(self, img1_shape, coords, img0_shape, ratio_pad=None):
162
+ # Rescale coords (xyxy) from img1_shape to img0_shape
163
+ if ratio_pad is None: # calculate from img0_shape
164
+ gain = min(img1_shape[0] / img0_shape[0], img1_shape[1] / img0_shape[1]) # gain = old / new
165
+ pad = (img1_shape[1] - img0_shape[1] * gain) / 2, (img1_shape[0] - img0_shape[0] * gain) / 2 # wh padding
166
+ else:
167
+ gain = ratio_pad[0][0]
168
+ pad = ratio_pad[1]
169
+
170
+ coords[:, [0, 2]] -= pad[0] # x padding
171
+ coords[:, [1, 3]] -= pad[1] # y padding
172
+ coords[:, :4] /= gain
173
+ if isinstance(coords, torch.Tensor): # faster individually
174
+ coords[:, 0].clamp_(0, img0_shape[1]) # x1
175
+ coords[:, 1].clamp_(0, img0_shape[0]) # y1
176
+ coords[:, 2].clamp_(0, img0_shape[1]) # x2
177
+ coords[:, 3].clamp_(0, img0_shape[0]) # y2
178
+ else: # np.array (faster grouped)
179
+ coords[:, [0, 2]] = coords[:, [0, 2]].clip(0, img0_shape[1]) # x1, x2
180
+ coords[:, [1, 3]] = coords[:, [1, 3]].clip(0, img0_shape[0]) # y1, y2
181
+ return coords
182
+
183
+ def convert_to_coco_format(self, outputs, imgs, paths, shapes, ids):
184
+ pred_results = []
185
+ for i, pred in enumerate(outputs):
186
+ if len(pred) == 0:
187
+ continue
188
+ path, shape = Path(paths[i]), shapes[i][0]
189
+ self.scale_coords(imgs[i].shape[1:], pred[:, :4], shape, shapes[i][1])
190
+ image_id = int(path.stem) if path.stem.isnumeric() else path.stem
191
+ bboxes = self.box_convert(pred[:, 0:4])
192
+ bboxes[:, :2] -= bboxes[:, 2:] / 2
193
+ cls = pred[:, 5]
194
+ scores = pred[:, 4]
195
+ for ind in range(pred.shape[0]):
196
+ category_id = ids[int(cls[ind])]
197
+ bbox = [round(x, 3) for x in bboxes[ind].tolist()]
198
+ score = round(scores[ind].item(), 5)
199
+ pred_data = {
200
+ "image_id": image_id,
201
+ "category_id": category_id,
202
+ "bbox": bbox,
203
+ "score": score
204
+ }
205
+ pred_results.append(pred_data)
206
+ return pred_results
207
+
208
+ @staticmethod
209
+ def check_task(task):
210
+ if task not in ['train', 'val', 'speed']:
211
+ raise Exception("task argument error: only support 'train' / 'val' / 'speed' task.")
212
+
213
+ @staticmethod
214
+ def reload_thres(conf_thres, iou_thres, task):
215
+ '''Sets conf and iou threshold for task val/speed'''
216
+ if task != 'train':
217
+ if task == 'val':
218
+ conf_thres = 0.001
219
+ if task == 'speed':
220
+ conf_thres = 0.25
221
+ iou_thres = 0.45
222
+ return conf_thres, iou_thres
223
+
224
+ @staticmethod
225
+ def reload_device(device, model, task):
226
+ # device = 'cpu' or '0' or '0,1,2,3'
227
+ if task == 'train':
228
+ device = next(model.parameters()).device
229
+ else:
230
+ if device == 'cpu':
231
+ os.environ['CUDA_VISIBLE_DEVICES'] = '-1'
232
+ elif device:
233
+ os.environ['CUDA_VISIBLE_DEVICES'] = device
234
+ assert torch.cuda.is_available()
235
+ cuda = device != 'cpu' and torch.cuda.is_available()
236
+ device = torch.device('cuda:0' if cuda else 'cpu')
237
+ return device
238
+
239
+ @staticmethod
240
+ def reload_dataset(data):
241
+ with open(data, errors='ignore') as yaml_file:
242
+ data = yaml.safe_load(yaml_file)
243
+ val = data.get('val')
244
+ if not os.path.exists(val):
245
+ raise Exception('Dataset not found.')
246
+ return data
247
+
248
+ @staticmethod
249
+ def coco80_to_coco91_class(): # converts 80-index (val2014) to 91-index (paper)
250
+ # https://tech.amikelive.com/node-718/what-object-categories-labels-are-in-coco-dataset/
251
+ x = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 13, 14, 15, 16, 17, 18, 19, 20,
252
+ 21, 22, 23, 24, 25, 27, 28, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40,
253
+ 41, 42, 43, 44, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58,
254
+ 59, 60, 61, 62, 63, 64, 65, 67, 70, 72, 73, 74, 75, 76, 77, 78, 79,
255
+ 80, 81, 82, 84, 85, 86, 87, 88, 89, 90]
256
+ return x