hank1996 commited on
Commit
78e8355
·
1 Parent(s): 409b809

Delete models/yolo.py

Browse files
Files changed (1) hide show
  1. models/yolo.py +0 -552
models/yolo.py DELETED
@@ -1,552 +0,0 @@
1
-
2
- import argparse
3
- import logging
4
- import sys
5
- from copy import deepcopy
6
-
7
- sys.path.append('./') # to run '$ python *.py' files in subdirectories
8
- logger = logging.getLogger(__name__)
9
-
10
- from models.common import *
11
- from models.experimental import *
12
- from utils.autoanchor import check_anchor_order
13
- from utils.general import make_divisible, check_file, set_logging
14
- from utils.torch_utils import time_synchronized, fuse_conv_and_bn, model_info, scale_img, initialize_weights, \
15
- select_device, copy_attr
16
- from utils.loss import SigmoidBin
17
-
18
- try:
19
- import thop # for FLOPS computation
20
- except ImportError:
21
- thop = None
22
-
23
-
24
- class Detect(nn.Module):
25
- stride = None # strides computed during build
26
- export = False # onnx export
27
-
28
- def __init__(self, nc=80, anchors=(), ch=()): # detection layer
29
- super(Detect, self).__init__()
30
- self.nc = nc # number of classes
31
- self.no = nc + 5 # number of outputs per anchor
32
- self.nl = len(anchors) # number of detection layers
33
- self.na = len(anchors[0]) // 2 # number of anchors
34
- self.grid = [torch.zeros(1)] * self.nl # init grid
35
- a = torch.tensor(anchors).float().view(self.nl, -1, 2)
36
- self.register_buffer('anchors', a) # shape(nl,na,2)
37
- self.register_buffer('anchor_grid', a.clone().view(self.nl, 1, -1, 1, 1, 2)) # shape(nl,1,na,1,1,2)
38
- self.m = nn.ModuleList(nn.Conv2d(x, self.no * self.na, 1) for x in ch) # output conv
39
-
40
- def forward(self, x):
41
- # x = x.copy() # for profiling
42
- z = [] # inference output
43
- self.training |= self.export
44
- for i in range(self.nl):
45
- x[i] = self.m[i](x[i]) # conv
46
- bs, _, ny, nx = x[i].shape # x(bs,255,20,20) to x(bs,3,20,20,85)
47
- x[i] = x[i].view(bs, self.na, self.no, ny, nx).permute(0, 1, 3, 4, 2).contiguous()
48
-
49
- if not self.training: # inference
50
- if self.grid[i].shape[2:4] != x[i].shape[2:4]:
51
- self.grid[i] = self._make_grid(nx, ny).to(x[i].device)
52
-
53
- y = x[i].sigmoid()
54
- y[..., 0:2] = (y[..., 0:2] * 2. - 0.5 + self.grid[i]) * self.stride[i] # xy
55
- y[..., 2:4] = (y[..., 2:4] * 2) ** 2 * self.anchor_grid[i] # wh
56
- z.append(y.view(bs, -1, self.no))
57
-
58
- return x if self.training else (torch.cat(z, 1), x)
59
-
60
- @staticmethod
61
- def _make_grid(nx=20, ny=20):
62
- yv, xv = torch.meshgrid([torch.arange(ny), torch.arange(nx)])
63
- return torch.stack((xv, yv), 2).view((1, 1, ny, nx, 2)).float()
64
-
65
-
66
- class IDetect(nn.Module):
67
- stride = None # strides computed during build
68
- export = False # onnx export
69
-
70
- def __init__(self, nc=80, anchors=(), ch=()): # detection layer
71
- super(IDetect, self).__init__()
72
- self.nc = nc # number of classes
73
- self.no = nc + 5 # number of outputs per anchor
74
- self.nl = len(anchors) # number of detection layers
75
- self.na = len(anchors[0]) // 2 # number of anchors
76
- self.grid = [torch.zeros(1)] * self.nl # init grid
77
- a = torch.tensor(anchors).float().view(self.nl, -1, 2)
78
- self.register_buffer('anchors', a) # shape(nl,na,2)
79
- self.register_buffer('anchor_grid', a.clone().view(self.nl, 1, -1, 1, 1, 2)) # shape(nl,1,na,1,1,2)
80
- self.m = nn.ModuleList(nn.Conv2d(x, self.no * self.na, 1) for x in ch) # output conv
81
-
82
- self.ia = nn.ModuleList(ImplicitA(x) for x in ch)
83
- self.im = nn.ModuleList(ImplicitM(self.no * self.na) for _ in ch)
84
-
85
- def forward(self, x):
86
- # x = x.copy() # for profiling
87
- z = [] # inference output
88
- self.training |= self.export
89
- for i in range(self.nl):
90
- x[i] = self.m[i](self.ia[i](x[i])) # conv
91
- x[i] = self.im[i](x[i])
92
- bs, _, ny, nx = x[i].shape # x(bs,255,20,20) to x(bs,3,20,20,85)
93
- x[i] = x[i].view(bs, self.na, self.no, ny, nx).permute(0, 1, 3, 4, 2).contiguous()
94
-
95
- if not self.training: # inference
96
- if self.grid[i].shape[2:4] != x[i].shape[2:4]:
97
- self.grid[i] = self._make_grid(nx, ny).to(x[i].device)
98
-
99
- y = x[i].sigmoid()
100
- y[..., 0:2] = (y[..., 0:2] * 2. - 0.5 + self.grid[i]) * self.stride[i] # xy
101
- y[..., 2:4] = (y[..., 2:4] * 2) ** 2 * self.anchor_grid[i] # wh
102
- z.append(y.view(bs, -1, self.no))
103
-
104
- return x if self.training else (torch.cat(z, 1), x)
105
-
106
- @staticmethod
107
- def _make_grid(nx=20, ny=20):
108
- yv, xv = torch.meshgrid([torch.arange(ny), torch.arange(nx)])
109
- return torch.stack((xv, yv), 2).view((1, 1, ny, nx, 2)).float()
110
-
111
-
112
- class IAuxDetect(nn.Module):
113
- stride = None # strides computed during build
114
- export = False # onnx export
115
-
116
- def __init__(self, nc=80, anchors=(), ch=()): # detection layer
117
- super(IAuxDetect, self).__init__()
118
- self.nc = nc # number of classes
119
- self.no = nc + 5 # number of outputs per anchor
120
- self.nl = len(anchors) # number of detection layers
121
- self.na = len(anchors[0]) // 2 # number of anchors
122
- self.grid = [torch.zeros(1)] * self.nl # init grid
123
- a = torch.tensor(anchors).float().view(self.nl, -1, 2)
124
- self.register_buffer('anchors', a) # shape(nl,na,2)
125
- self.register_buffer('anchor_grid', a.clone().view(self.nl, 1, -1, 1, 1, 2)) # shape(nl,1,na,1,1,2)
126
- self.m = nn.ModuleList(nn.Conv2d(x, self.no * self.na, 1) for x in ch[:self.nl]) # output conv
127
- self.m2 = nn.ModuleList(nn.Conv2d(x, self.no * self.na, 1) for x in ch[self.nl:]) # output conv
128
-
129
- self.ia = nn.ModuleList(ImplicitA(x) for x in ch[:self.nl])
130
- self.im = nn.ModuleList(ImplicitM(self.no * self.na) for _ in ch[:self.nl])
131
-
132
- def forward(self, x):
133
- # x = x.copy() # for profiling
134
- z = [] # inference output
135
- self.training |= self.export
136
- for i in range(self.nl):
137
- x[i] = self.m[i](self.ia[i](x[i])) # conv
138
- x[i] = self.im[i](x[i])
139
- bs, _, ny, nx = x[i].shape # x(bs,255,20,20) to x(bs,3,20,20,85)
140
- x[i] = x[i].view(bs, self.na, self.no, ny, nx).permute(0, 1, 3, 4, 2).contiguous()
141
-
142
- x[i+self.nl] = self.m2[i](x[i+self.nl])
143
- x[i+self.nl] = x[i+self.nl].view(bs, self.na, self.no, ny, nx).permute(0, 1, 3, 4, 2).contiguous()
144
-
145
- if not self.training: # inference
146
- if self.grid[i].shape[2:4] != x[i].shape[2:4]:
147
- self.grid[i] = self._make_grid(nx, ny).to(x[i].device)
148
-
149
- y = x[i].sigmoid()
150
- y[..., 0:2] = (y[..., 0:2] * 2. - 0.5 + self.grid[i]) * self.stride[i] # xy
151
- y[..., 2:4] = (y[..., 2:4] * 2) ** 2 * self.anchor_grid[i] # wh
152
- z.append(y.view(bs, -1, self.no))
153
-
154
- return x if self.training else (torch.cat(z, 1), x[:self.nl])
155
-
156
- @staticmethod
157
- def _make_grid(nx=20, ny=20):
158
- yv, xv = torch.meshgrid([torch.arange(ny), torch.arange(nx)])
159
- return torch.stack((xv, yv), 2).view((1, 1, ny, nx, 2)).float()
160
-
161
-
162
- class IBin(nn.Module):
163
- stride = None # strides computed during build
164
- export = False # onnx export
165
-
166
- def __init__(self, nc=80, anchors=(), ch=(), bin_count=21): # detection layer
167
- super(IBin, self).__init__()
168
- self.nc = nc # number of classes
169
- self.bin_count = bin_count
170
-
171
- self.w_bin_sigmoid = SigmoidBin(bin_count=self.bin_count, min=0.0, max=4.0)
172
- self.h_bin_sigmoid = SigmoidBin(bin_count=self.bin_count, min=0.0, max=4.0)
173
- # classes, x,y,obj
174
- self.no = nc + 3 + \
175
- self.w_bin_sigmoid.get_length() + self.h_bin_sigmoid.get_length() # w-bce, h-bce
176
- # + self.x_bin_sigmoid.get_length() + self.y_bin_sigmoid.get_length()
177
-
178
- self.nl = len(anchors) # number of detection layers
179
- self.na = len(anchors[0]) // 2 # number of anchors
180
- self.grid = [torch.zeros(1)] * self.nl # init grid
181
- a = torch.tensor(anchors).float().view(self.nl, -1, 2)
182
- self.register_buffer('anchors', a) # shape(nl,na,2)
183
- self.register_buffer('anchor_grid', a.clone().view(self.nl, 1, -1, 1, 1, 2)) # shape(nl,1,na,1,1,2)
184
- self.m = nn.ModuleList(nn.Conv2d(x, self.no * self.na, 1) for x in ch) # output conv
185
-
186
- self.ia = nn.ModuleList(ImplicitA(x) for x in ch)
187
- self.im = nn.ModuleList(ImplicitM(self.no * self.na) for _ in ch)
188
-
189
- def forward(self, x):
190
-
191
- #self.x_bin_sigmoid.use_fw_regression = True
192
- #self.y_bin_sigmoid.use_fw_regression = True
193
- self.w_bin_sigmoid.use_fw_regression = True
194
- self.h_bin_sigmoid.use_fw_regression = True
195
-
196
- # x = x.copy() # for profiling
197
- z = [] # inference output
198
- self.training |= self.export
199
- for i in range(self.nl):
200
- x[i] = self.m[i](self.ia[i](x[i])) # conv
201
- x[i] = self.im[i](x[i])
202
- bs, _, ny, nx = x[i].shape # x(bs,255,20,20) to x(bs,3,20,20,85)
203
- x[i] = x[i].view(bs, self.na, self.no, ny, nx).permute(0, 1, 3, 4, 2).contiguous()
204
-
205
- if not self.training: # inference
206
- if self.grid[i].shape[2:4] != x[i].shape[2:4]:
207
- self.grid[i] = self._make_grid(nx, ny).to(x[i].device)
208
-
209
- y = x[i].sigmoid()
210
- y[..., 0:2] = (y[..., 0:2] * 2. - 0.5 + self.grid[i]) * self.stride[i] # xy
211
- #y[..., 2:4] = (y[..., 2:4] * 2) ** 2 * self.anchor_grid[i] # wh
212
-
213
-
214
- #px = (self.x_bin_sigmoid.forward(y[..., 0:12]) + self.grid[i][..., 0]) * self.stride[i]
215
- #py = (self.y_bin_sigmoid.forward(y[..., 12:24]) + self.grid[i][..., 1]) * self.stride[i]
216
-
217
- pw = self.w_bin_sigmoid.forward(y[..., 2:24]) * self.anchor_grid[i][..., 0]
218
- ph = self.h_bin_sigmoid.forward(y[..., 24:46]) * self.anchor_grid[i][..., 1]
219
-
220
- #y[..., 0] = px
221
- #y[..., 1] = py
222
- y[..., 2] = pw
223
- y[..., 3] = ph
224
-
225
- y = torch.cat((y[..., 0:4], y[..., 46:]), dim=-1)
226
-
227
- z.append(y.view(bs, -1, y.shape[-1]))
228
-
229
- return x if self.training else (torch.cat(z, 1), x)
230
-
231
- @staticmethod
232
- def _make_grid(nx=20, ny=20):
233
- yv, xv = torch.meshgrid([torch.arange(ny), torch.arange(nx)])
234
- return torch.stack((xv, yv), 2).view((1, 1, ny, nx, 2)).float()
235
-
236
-
237
- class Model(nn.Module):
238
- def __init__(self, cfg='yolor-csp-c.yaml', ch=3, nc=None, anchors=None): # model, input channels, number of classes
239
- super(Model, self).__init__()
240
- self.traced = False
241
- if isinstance(cfg, dict):
242
- self.yaml = cfg # model dict
243
- else: # is *.yaml
244
- import yaml # for torch hub
245
- self.yaml_file = Path(cfg).name
246
- with open(cfg) as f:
247
- self.yaml = yaml.load(f, Loader=yaml.SafeLoader) # model dict
248
-
249
- # Define model
250
- ch = self.yaml['ch'] = self.yaml.get('ch', ch) # input channels
251
- if nc and nc != self.yaml['nc']:
252
- logger.info(f"Overriding model.yaml nc={self.yaml['nc']} with nc={nc}")
253
- self.yaml['nc'] = nc # override yaml value
254
- if anchors:
255
- logger.info(f'Overriding model.yaml anchors with anchors={anchors}')
256
- self.yaml['anchors'] = round(anchors) # override yaml value
257
- self.model, self.save = parse_model(deepcopy(self.yaml), ch=[ch]) # model, savelist
258
- self.names = [str(i) for i in range(self.yaml['nc'])] # default names
259
- # print([x.shape for x in self.forward(torch.zeros(1, ch, 64, 64))])
260
-
261
- # Build strides, anchors
262
- m = self.model[-1] # Detect()
263
- if isinstance(m, Detect):
264
- s = 256 # 2x min stride
265
- m.stride = torch.tensor([s / x.shape[-2] for x in self.forward(torch.zeros(1, ch, s, s))]) # forward
266
- m.anchors /= m.stride.view(-1, 1, 1)
267
- check_anchor_order(m)
268
- self.stride = m.stride
269
- self._initialize_biases() # only run once
270
- # print('Strides: %s' % m.stride.tolist())
271
- if isinstance(m, IDetect):
272
- s = 256 # 2x min stride
273
- m.stride = torch.tensor([s / x.shape[-2] for x in self.forward(torch.zeros(1, ch, s, s))]) # forward
274
- m.anchors /= m.stride.view(-1, 1, 1)
275
- check_anchor_order(m)
276
- self.stride = m.stride
277
- self._initialize_biases() # only run once
278
- # print('Strides: %s' % m.stride.tolist())
279
- if isinstance(m, IAuxDetect):
280
- s = 256 # 2x min stride
281
- m.stride = torch.tensor([s / x.shape[-2] for x in self.forward(torch.zeros(1, ch, s, s))[:4]]) # forward
282
- #print(m.stride)
283
- m.anchors /= m.stride.view(-1, 1, 1)
284
- check_anchor_order(m)
285
- self.stride = m.stride
286
- self._initialize_aux_biases() # only run once
287
- # print('Strides: %s' % m.stride.tolist())
288
- if isinstance(m, IBin):
289
- s = 256 # 2x min stride
290
- m.stride = torch.tensor([s / x.shape[-2] for x in self.forward(torch.zeros(1, ch, s, s))]) # forward
291
- m.anchors /= m.stride.view(-1, 1, 1)
292
- check_anchor_order(m)
293
- self.stride = m.stride
294
- self._initialize_biases_bin() # only run once
295
- # print('Strides: %s' % m.stride.tolist())
296
-
297
- # Init weights, biases
298
- initialize_weights(self)
299
- self.info()
300
- logger.info('')
301
-
302
- def forward(self, x, augment=False, profile=False):
303
- if augment:
304
- img_size = x.shape[-2:] # height, width
305
- s = [1, 0.83, 0.67] # scales
306
- f = [None, 3, None] # flips (2-ud, 3-lr)
307
- y = [] # outputs
308
- for si, fi in zip(s, f):
309
- xi = scale_img(x.flip(fi) if fi else x, si, gs=int(self.stride.max()))
310
- yi = self.forward_once(xi)[0] # forward
311
- # cv2.imwrite(f'img_{si}.jpg', 255 * xi[0].cpu().numpy().transpose((1, 2, 0))[:, :, ::-1]) # save
312
- yi[..., :4] /= si # de-scale
313
- if fi == 2:
314
- yi[..., 1] = img_size[0] - yi[..., 1] # de-flip ud
315
- elif fi == 3:
316
- yi[..., 0] = img_size[1] - yi[..., 0] # de-flip lr
317
- y.append(yi)
318
- return torch.cat(y, 1), None # augmented inference, train
319
- else:
320
- return self.forward_once(x, profile) # single-scale inference, train
321
-
322
- def forward_once(self, x, profile=False):
323
- y, dt = [], [] # outputs
324
- for m in self.model:
325
- if m.f != -1: # if not from previous layer
326
- x = y[m.f] if isinstance(m.f, int) else [x if j == -1 else y[j] for j in m.f] # from earlier layers
327
-
328
- if not hasattr(self, 'traced'):
329
- self.traced=False
330
-
331
- if self.traced:
332
- if isinstance(m, Detect) or isinstance(m, IDetect) or isinstance(m, IAuxDetect):
333
- break
334
-
335
- if profile:
336
- c = isinstance(m, (Detect, IDetect, IAuxDetect, IBin))
337
- o = thop.profile(m, inputs=(x.copy() if c else x,), verbose=False)[0] / 1E9 * 2 if thop else 0 # FLOPS
338
- for _ in range(10):
339
- m(x.copy() if c else x)
340
- t = time_synchronized()
341
- for _ in range(10):
342
- m(x.copy() if c else x)
343
- dt.append((time_synchronized() - t) * 100)
344
- print('%10.1f%10.0f%10.1fms %-40s' % (o, m.np, dt[-1], m.type))
345
-
346
- x = m(x) # run
347
-
348
- y.append(x if m.i in self.save else None) # save output
349
-
350
- if profile:
351
- print('%.1fms total' % sum(dt))
352
- return x
353
-
354
- def _initialize_biases(self, cf=None): # initialize biases into Detect(), cf is class frequency
355
- # https://arxiv.org/abs/1708.02002 section 3.3
356
- # cf = torch.bincount(torch.tensor(np.concatenate(dataset.labels, 0)[:, 0]).long(), minlength=nc) + 1.
357
- m = self.model[-1] # Detect() module
358
- for mi, s in zip(m.m, m.stride): # from
359
- b = mi.bias.view(m.na, -1) # conv.bias(255) to (3,85)
360
- b.data[:, 4] += math.log(8 / (640 / s) ** 2) # obj (8 objects per 640 image)
361
- b.data[:, 5:] += math.log(0.6 / (m.nc - 0.99)) if cf is None else torch.log(cf / cf.sum()) # cls
362
- mi.bias = torch.nn.Parameter(b.view(-1), requires_grad=True)
363
-
364
- def _initialize_aux_biases(self, cf=None): # initialize biases into Detect(), cf is class frequency
365
- # https://arxiv.org/abs/1708.02002 section 3.3
366
- # cf = torch.bincount(torch.tensor(np.concatenate(dataset.labels, 0)[:, 0]).long(), minlength=nc) + 1.
367
- m = self.model[-1] # Detect() module
368
- for mi, mi2, s in zip(m.m, m.m2, m.stride): # from
369
- b = mi.bias.view(m.na, -1) # conv.bias(255) to (3,85)
370
- b.data[:, 4] += math.log(8 / (640 / s) ** 2) # obj (8 objects per 640 image)
371
- b.data[:, 5:] += math.log(0.6 / (m.nc - 0.99)) if cf is None else torch.log(cf / cf.sum()) # cls
372
- mi.bias = torch.nn.Parameter(b.view(-1), requires_grad=True)
373
- b2 = mi2.bias.view(m.na, -1) # conv.bias(255) to (3,85)
374
- b2.data[:, 4] += math.log(8 / (640 / s) ** 2) # obj (8 objects per 640 image)
375
- b2.data[:, 5:] += math.log(0.6 / (m.nc - 0.99)) if cf is None else torch.log(cf / cf.sum()) # cls
376
- mi2.bias = torch.nn.Parameter(b2.view(-1), requires_grad=True)
377
-
378
- def _initialize_biases_bin(self, cf=None): # initialize biases into Detect(), cf is class frequency
379
- # https://arxiv.org/abs/1708.02002 section 3.3
380
- # cf = torch.bincount(torch.tensor(np.concatenate(dataset.labels, 0)[:, 0]).long(), minlength=nc) + 1.
381
- m = self.model[-1] # Bin() module
382
- bc = m.bin_count
383
- for mi, s in zip(m.m, m.stride): # from
384
- b = mi.bias.view(m.na, -1) # conv.bias(255) to (3,85)
385
- old = b[:, (0,1,2,bc+3)].data
386
- obj_idx = 2*bc+4
387
- b[:, :obj_idx].data += math.log(0.6 / (bc + 1 - 0.99))
388
- b[:, obj_idx].data += math.log(8 / (640 / s) ** 2) # obj (8 objects per 640 image)
389
- b[:, (obj_idx+1):].data += math.log(0.6 / (m.nc - 0.99)) if cf is None else torch.log(cf / cf.sum()) # cls
390
- b[:, (0,1,2,bc+3)].data = old
391
- mi.bias = torch.nn.Parameter(b.view(-1), requires_grad=True)
392
-
393
- def _print_biases(self):
394
- m = self.model[-1] # Detect() module
395
- for mi in m.m: # from
396
- b = mi.bias.detach().view(m.na, -1).T # conv.bias(255) to (3,85)
397
- print(('%6g Conv2d.bias:' + '%10.3g' * 6) % (mi.weight.shape[1], *b[:5].mean(1).tolist(), b[5:].mean()))
398
-
399
- # def _print_weights(self):
400
- # for m in self.model.modules():
401
- # if type(m) is Bottleneck:
402
- # print('%10.3g' % (m.w.detach().sigmoid() * 2)) # shortcut weights
403
-
404
- def fuse(self): # fuse model Conv2d() + BatchNorm2d() layers
405
- print('Fusing layers... ')
406
- for m in self.model.modules():
407
- if isinstance(m, RepConv):
408
- #print(f" fuse_repvgg_block")
409
- m.fuse_repvgg_block()
410
- elif isinstance(m, RepConv_OREPA):
411
- #print(f" switch_to_deploy")
412
- m.switch_to_deploy()
413
- elif type(m) is Conv and hasattr(m, 'bn'):
414
- m.conv = fuse_conv_and_bn(m.conv, m.bn) # update conv
415
- delattr(m, 'bn') # remove batchnorm
416
- m.forward = m.fuseforward # update forward
417
- self.info()
418
- return self
419
-
420
- def nms(self, mode=True): # add or remove NMS module
421
- present = type(self.model[-1]) is NMS # last layer is NMS
422
- if mode and not present:
423
- print('Adding NMS... ')
424
- m = NMS() # module
425
- m.f = -1 # from
426
- m.i = self.model[-1].i + 1 # index
427
- self.model.add_module(name='%s' % m.i, module=m) # add
428
- self.eval()
429
- elif not mode and present:
430
- print('Removing NMS... ')
431
- self.model = self.model[:-1] # remove
432
- return self
433
-
434
- def autoshape(self): # add autoShape module
435
- print('Adding autoShape... ')
436
- m = autoShape(self) # wrap model
437
- copy_attr(m, self, include=('yaml', 'nc', 'hyp', 'names', 'stride'), exclude=()) # copy attributes
438
- return m
439
-
440
- def info(self, verbose=False, img_size=640): # print model information
441
- model_info(self, verbose, img_size)
442
-
443
-
444
- def parse_model(d, ch): # model_dict, input_channels(3)
445
- logger.info('\n%3s%18s%3s%10s %-40s%-30s' % ('', 'from', 'n', 'params', 'module', 'arguments'))
446
- anchors, nc, gd, gw = d['anchors'], d['nc'], d['depth_multiple'], d['width_multiple']
447
- na = (len(anchors[0]) // 2) if isinstance(anchors, list) else anchors # number of anchors
448
- no = na * (nc + 5) # number of outputs = anchors * (classes + 5)
449
-
450
- layers, save, c2 = [], [], ch[-1] # layers, savelist, ch out
451
- for i, (f, n, m, args) in enumerate(d['backbone'] + d['head']): # from, number, module, args
452
- m = eval(m) if isinstance(m, str) else m # eval strings
453
- for j, a in enumerate(args):
454
- try:
455
- args[j] = eval(a) if isinstance(a, str) else a # eval strings
456
- except:
457
- pass
458
-
459
- n = max(round(n * gd), 1) if n > 1 else n # depth gain
460
- if m in [nn.Conv2d, Conv, RobustConv, RobustConv2, DWConv, GhostConv, RepConv, RepConv_OREPA, DownC,
461
- SPP, SPPF, SPPCSPC, GhostSPPCSPC, MixConv2d, Focus, Stem, GhostStem, CrossConv,
462
- Bottleneck, BottleneckCSPA, BottleneckCSPB, BottleneckCSPC,
463
- RepBottleneck, RepBottleneckCSPA, RepBottleneckCSPB, RepBottleneckCSPC,
464
- Res, ResCSPA, ResCSPB, ResCSPC,
465
- RepRes, RepResCSPA, RepResCSPB, RepResCSPC,
466
- ResX, ResXCSPA, ResXCSPB, ResXCSPC,
467
- RepResX, RepResXCSPA, RepResXCSPB, RepResXCSPC,
468
- Ghost, GhostCSPA, GhostCSPB, GhostCSPC,
469
- SwinTransformerBlock, STCSPA, STCSPB, STCSPC,
470
- SwinTransformer2Block, ST2CSPA, ST2CSPB, ST2CSPC]:
471
- c1, c2 = ch[f], args[0]
472
- if c2 != no: # if not output
473
- c2 = make_divisible(c2 * gw, 8)
474
-
475
- args = [c1, c2, *args[1:]]
476
- if m in [DownC, SPPCSPC, GhostSPPCSPC,
477
- BottleneckCSPA, BottleneckCSPB, BottleneckCSPC,
478
- RepBottleneckCSPA, RepBottleneckCSPB, RepBottleneckCSPC,
479
- ResCSPA, ResCSPB, ResCSPC,
480
- RepResCSPA, RepResCSPB, RepResCSPC,
481
- ResXCSPA, ResXCSPB, ResXCSPC,
482
- RepResXCSPA, RepResXCSPB, RepResXCSPC,
483
- GhostCSPA, GhostCSPB, GhostCSPC,
484
- STCSPA, STCSPB, STCSPC,
485
- ST2CSPA, ST2CSPB, ST2CSPC]:
486
- args.insert(2, n) # number of repeats
487
- n = 1
488
- elif m is nn.BatchNorm2d:
489
- args = [ch[f]]
490
- elif m is Concat:
491
- c2 = sum([ch[x] for x in f])
492
- elif m is Chuncat:
493
- c2 = sum([ch[x] for x in f])
494
- elif m is Shortcut:
495
- c2 = ch[f[0]]
496
- elif m is Foldcut:
497
- c2 = ch[f] // 2
498
- elif m in [Detect, IDetect, IAuxDetect, IBin]:
499
- args.append([ch[x] for x in f])
500
- if isinstance(args[1], int): # number of anchors
501
- args[1] = [list(range(args[1] * 2))] * len(f)
502
- elif m is ReOrg:
503
- c2 = ch[f] * 4
504
- elif m is Contract:
505
- c2 = ch[f] * args[0] ** 2
506
- elif m is Expand:
507
- c2 = ch[f] // args[0] ** 2
508
- else:
509
- c2 = ch[f]
510
-
511
- m_ = nn.Sequential(*[m(*args) for _ in range(n)]) if n > 1 else m(*args) # module
512
- t = str(m)[8:-2].replace('__main__.', '') # module type
513
- np = sum([x.numel() for x in m_.parameters()]) # number params
514
- m_.i, m_.f, m_.type, m_.np = i, f, t, np # attach index, 'from' index, type, number params
515
- logger.info('%3s%18s%3s%10.0f %-40s%-30s' % (i, f, n, np, t, args)) # print
516
- save.extend(x % i for x in ([f] if isinstance(f, int) else f) if x != -1) # append to savelist
517
- layers.append(m_)
518
- if i == 0:
519
- ch = []
520
- ch.append(c2)
521
- return nn.Sequential(*layers), sorted(save)
522
-
523
-
524
- if __name__ == '__main__':
525
- parser = argparse.ArgumentParser()
526
- parser.add_argument('--cfg', type=str, default='yolor-csp-c.yaml', help='model.yaml')
527
- parser.add_argument('--device', default='', help='cuda device, i.e. 0 or 0,1,2,3 or cpu')
528
- parser.add_argument('--profile', action='store_true', help='profile model speed')
529
- opt = parser.parse_args()
530
- opt.cfg = check_file(opt.cfg) # check file
531
- set_logging()
532
- device = select_device(opt.device)
533
-
534
- # Create model
535
- model = Model(opt.cfg).to(device)
536
- model.train()
537
-
538
- if opt.profile:
539
- img = torch.rand(1, 3, 640, 640).to(device)
540
- y = model(img, profile=True)
541
-
542
- # Profile
543
- # img = torch.rand(8 if torch.cuda.is_available() else 1, 3, 640, 640).to(device)
544
- # y = model(img, profile=True)
545
-
546
- # Tensorboard
547
- # from torch.utils.tensorboard import SummaryWriter
548
- # tb_writer = SummaryWriter()
549
- # print("Run 'tensorboard --logdir=models/runs' to view tensorboard at http://localhost:6006/")
550
- # tb_writer.add_graph(model.model, img) # add model to tensorboard
551
- # tb_writer.add_image('test', img[0], dataformats='CWH') # add model to tensorboard
552
-