Sa-m commited on
Commit
a217b32
1 Parent(s): f76a0af

Upload loss.py

Browse files
Files changed (1) hide show
  1. utils/loss.py +1697 -0
utils/loss.py ADDED
@@ -0,0 +1,1697 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Loss functions
2
+
3
+ import torch
4
+ import torch.nn as nn
5
+ import torch.nn.functional as F
6
+
7
+ from utils.general import bbox_iou, bbox_alpha_iou, box_iou, box_giou, box_diou, box_ciou, xywh2xyxy
8
+ from utils.torch_utils import is_parallel
9
+
10
+
11
+ def smooth_BCE(eps=0.1): # https://github.com/ultralytics/yolov3/issues/238#issuecomment-598028441
12
+ # return positive, negative label smoothing BCE targets
13
+ return 1.0 - 0.5 * eps, 0.5 * eps
14
+
15
+
16
+ class BCEBlurWithLogitsLoss(nn.Module):
17
+ # BCEwithLogitLoss() with reduced missing label effects.
18
+ def __init__(self, alpha=0.05):
19
+ super(BCEBlurWithLogitsLoss, self).__init__()
20
+ self.loss_fcn = nn.BCEWithLogitsLoss(reduction='none') # must be nn.BCEWithLogitsLoss()
21
+ self.alpha = alpha
22
+
23
+ def forward(self, pred, true):
24
+ loss = self.loss_fcn(pred, true)
25
+ pred = torch.sigmoid(pred) # prob from logits
26
+ dx = pred - true # reduce only missing label effects
27
+ # dx = (pred - true).abs() # reduce missing label and false label effects
28
+ alpha_factor = 1 - torch.exp((dx - 1) / (self.alpha + 1e-4))
29
+ loss *= alpha_factor
30
+ return loss.mean()
31
+
32
+
33
+ class SigmoidBin(nn.Module):
34
+ stride = None # strides computed during build
35
+ export = False # onnx export
36
+
37
+ def __init__(self, bin_count=10, min=0.0, max=1.0, reg_scale = 2.0, use_loss_regression=True, use_fw_regression=True, BCE_weight=1.0, smooth_eps=0.0):
38
+ super(SigmoidBin, self).__init__()
39
+
40
+ self.bin_count = bin_count
41
+ self.length = bin_count + 1
42
+ self.min = min
43
+ self.max = max
44
+ self.scale = float(max - min)
45
+ self.shift = self.scale / 2.0
46
+
47
+ self.use_loss_regression = use_loss_regression
48
+ self.use_fw_regression = use_fw_regression
49
+ self.reg_scale = reg_scale
50
+ self.BCE_weight = BCE_weight
51
+
52
+ start = min + (self.scale/2.0) / self.bin_count
53
+ end = max - (self.scale/2.0) / self.bin_count
54
+ step = self.scale / self.bin_count
55
+ self.step = step
56
+ #print(f" start = {start}, end = {end}, step = {step} ")
57
+
58
+ bins = torch.range(start, end + 0.0001, step).float()
59
+ self.register_buffer('bins', bins)
60
+
61
+
62
+ self.cp = 1.0 - 0.5 * smooth_eps
63
+ self.cn = 0.5 * smooth_eps
64
+
65
+ self.BCEbins = nn.BCEWithLogitsLoss(pos_weight=torch.Tensor([BCE_weight]))
66
+ self.MSELoss = nn.MSELoss()
67
+
68
+ def get_length(self):
69
+ return self.length
70
+
71
+ def forward(self, pred):
72
+ assert pred.shape[-1] == self.length, 'pred.shape[-1]=%d is not equal to self.length=%d' % (pred.shape[-1], self.length)
73
+
74
+ pred_reg = (pred[..., 0] * self.reg_scale - self.reg_scale/2.0) * self.step
75
+ pred_bin = pred[..., 1:(1+self.bin_count)]
76
+
77
+ _, bin_idx = torch.max(pred_bin, dim=-1)
78
+ bin_bias = self.bins[bin_idx]
79
+
80
+ if self.use_fw_regression:
81
+ result = pred_reg + bin_bias
82
+ else:
83
+ result = bin_bias
84
+ result = result.clamp(min=self.min, max=self.max)
85
+
86
+ return result
87
+
88
+
89
+ def training_loss(self, pred, target):
90
+ assert pred.shape[-1] == self.length, 'pred.shape[-1]=%d is not equal to self.length=%d' % (pred.shape[-1], self.length)
91
+ assert pred.shape[0] == target.shape[0], 'pred.shape=%d is not equal to the target.shape=%d' % (pred.shape[0], target.shape[0])
92
+ device = pred.device
93
+
94
+ pred_reg = (pred[..., 0].sigmoid() * self.reg_scale - self.reg_scale/2.0) * self.step
95
+ pred_bin = pred[..., 1:(1+self.bin_count)]
96
+
97
+ diff_bin_target = torch.abs(target[..., None] - self.bins)
98
+ _, bin_idx = torch.min(diff_bin_target, dim=-1)
99
+
100
+ bin_bias = self.bins[bin_idx]
101
+ bin_bias.requires_grad = False
102
+ result = pred_reg + bin_bias
103
+
104
+ target_bins = torch.full_like(pred_bin, self.cn, device=device) # targets
105
+ n = pred.shape[0]
106
+ target_bins[range(n), bin_idx] = self.cp
107
+
108
+ loss_bin = self.BCEbins(pred_bin, target_bins) # BCE
109
+
110
+ if self.use_loss_regression:
111
+ loss_regression = self.MSELoss(result, target) # MSE
112
+ loss = loss_bin + loss_regression
113
+ else:
114
+ loss = loss_bin
115
+
116
+ out_result = result.clamp(min=self.min, max=self.max)
117
+
118
+ return loss, out_result
119
+
120
+
121
+ class FocalLoss(nn.Module):
122
+ # Wraps focal loss around existing loss_fcn(), i.e. criteria = FocalLoss(nn.BCEWithLogitsLoss(), gamma=1.5)
123
+ def __init__(self, loss_fcn, gamma=1.5, alpha=0.25):
124
+ super(FocalLoss, self).__init__()
125
+ self.loss_fcn = loss_fcn # must be nn.BCEWithLogitsLoss()
126
+ self.gamma = gamma
127
+ self.alpha = alpha
128
+ self.reduction = loss_fcn.reduction
129
+ self.loss_fcn.reduction = 'none' # required to apply FL to each element
130
+
131
+ def forward(self, pred, true):
132
+ loss = self.loss_fcn(pred, true)
133
+ # p_t = torch.exp(-loss)
134
+ # loss *= self.alpha * (1.000001 - p_t) ** self.gamma # non-zero power for gradient stability
135
+
136
+ # TF implementation https://github.com/tensorflow/addons/blob/v0.7.1/tensorflow_addons/losses/focal_loss.py
137
+ pred_prob = torch.sigmoid(pred) # prob from logits
138
+ p_t = true * pred_prob + (1 - true) * (1 - pred_prob)
139
+ alpha_factor = true * self.alpha + (1 - true) * (1 - self.alpha)
140
+ modulating_factor = (1.0 - p_t) ** self.gamma
141
+ loss *= alpha_factor * modulating_factor
142
+
143
+ if self.reduction == 'mean':
144
+ return loss.mean()
145
+ elif self.reduction == 'sum':
146
+ return loss.sum()
147
+ else: # 'none'
148
+ return loss
149
+
150
+
151
+ class QFocalLoss(nn.Module):
152
+ # Wraps Quality focal loss around existing loss_fcn(), i.e. criteria = FocalLoss(nn.BCEWithLogitsLoss(), gamma=1.5)
153
+ def __init__(self, loss_fcn, gamma=1.5, alpha=0.25):
154
+ super(QFocalLoss, self).__init__()
155
+ self.loss_fcn = loss_fcn # must be nn.BCEWithLogitsLoss()
156
+ self.gamma = gamma
157
+ self.alpha = alpha
158
+ self.reduction = loss_fcn.reduction
159
+ self.loss_fcn.reduction = 'none' # required to apply FL to each element
160
+
161
+ def forward(self, pred, true):
162
+ loss = self.loss_fcn(pred, true)
163
+
164
+ pred_prob = torch.sigmoid(pred) # prob from logits
165
+ alpha_factor = true * self.alpha + (1 - true) * (1 - self.alpha)
166
+ modulating_factor = torch.abs(true - pred_prob) ** self.gamma
167
+ loss *= alpha_factor * modulating_factor
168
+
169
+ if self.reduction == 'mean':
170
+ return loss.mean()
171
+ elif self.reduction == 'sum':
172
+ return loss.sum()
173
+ else: # 'none'
174
+ return loss
175
+
176
+ class RankSort(torch.autograd.Function):
177
+ @staticmethod
178
+ def forward(ctx, logits, targets, delta_RS=0.50, eps=1e-10):
179
+
180
+ classification_grads=torch.zeros(logits.shape).cuda()
181
+
182
+ #Filter fg logits
183
+ fg_labels = (targets > 0.)
184
+ fg_logits = logits[fg_labels]
185
+ fg_targets = targets[fg_labels]
186
+ fg_num = len(fg_logits)
187
+
188
+ #Do not use bg with scores less than minimum fg logit
189
+ #since changing its score does not have an effect on precision
190
+ threshold_logit = torch.min(fg_logits)-delta_RS
191
+ relevant_bg_labels=((targets==0) & (logits>=threshold_logit))
192
+
193
+ relevant_bg_logits = logits[relevant_bg_labels]
194
+ relevant_bg_grad=torch.zeros(len(relevant_bg_logits)).cuda()
195
+ sorting_error=torch.zeros(fg_num).cuda()
196
+ ranking_error=torch.zeros(fg_num).cuda()
197
+ fg_grad=torch.zeros(fg_num).cuda()
198
+
199
+ #sort the fg logits
200
+ order=torch.argsort(fg_logits)
201
+ #Loops over each positive following the order
202
+ for ii in order:
203
+ # Difference Transforms (x_ij)
204
+ fg_relations=fg_logits-fg_logits[ii]
205
+ bg_relations=relevant_bg_logits-fg_logits[ii]
206
+
207
+ if delta_RS > 0:
208
+ fg_relations=torch.clamp(fg_relations/(2*delta_RS)+0.5,min=0,max=1)
209
+ bg_relations=torch.clamp(bg_relations/(2*delta_RS)+0.5,min=0,max=1)
210
+ else:
211
+ fg_relations = (fg_relations >= 0).float()
212
+ bg_relations = (bg_relations >= 0).float()
213
+
214
+ # Rank of ii among pos and false positive number (bg with larger scores)
215
+ rank_pos=torch.sum(fg_relations)
216
+ FP_num=torch.sum(bg_relations)
217
+
218
+ # Rank of ii among all examples
219
+ rank=rank_pos+FP_num
220
+
221
+ # Ranking error of example ii. target_ranking_error is always 0. (Eq. 7)
222
+ ranking_error[ii]=FP_num/rank
223
+
224
+ # Current sorting error of example ii. (Eq. 7)
225
+ current_sorting_error = torch.sum(fg_relations*(1-fg_targets))/rank_pos
226
+
227
+ #Find examples in the target sorted order for example ii
228
+ iou_relations = (fg_targets >= fg_targets[ii])
229
+ target_sorted_order = iou_relations * fg_relations
230
+
231
+ #The rank of ii among positives in sorted order
232
+ rank_pos_target = torch.sum(target_sorted_order)
233
+
234
+ #Compute target sorting error. (Eq. 8)
235
+ #Since target ranking error is 0, this is also total target error
236
+ target_sorting_error= torch.sum(target_sorted_order*(1-fg_targets))/rank_pos_target
237
+
238
+ #Compute sorting error on example ii
239
+ sorting_error[ii] = current_sorting_error - target_sorting_error
240
+
241
+ #Identity Update for Ranking Error
242
+ if FP_num > eps:
243
+ #For ii the update is the ranking error
244
+ fg_grad[ii] -= ranking_error[ii]
245
+ #For negatives, distribute error via ranking pmf (i.e. bg_relations/FP_num)
246
+ relevant_bg_grad += (bg_relations*(ranking_error[ii]/FP_num))
247
+
248
+ #Find the positives that are misranked (the cause of the error)
249
+ #These are the ones with smaller IoU but larger logits
250
+ missorted_examples = (~ iou_relations) * fg_relations
251
+
252
+ #Denominotor of sorting pmf
253
+ sorting_pmf_denom = torch.sum(missorted_examples)
254
+
255
+ #Identity Update for Sorting Error
256
+ if sorting_pmf_denom > eps:
257
+ #For ii the update is the sorting error
258
+ fg_grad[ii] -= sorting_error[ii]
259
+ #For positives, distribute error via sorting pmf (i.e. missorted_examples/sorting_pmf_denom)
260
+ fg_grad += (missorted_examples*(sorting_error[ii]/sorting_pmf_denom))
261
+
262
+ #Normalize gradients by number of positives
263
+ classification_grads[fg_labels]= (fg_grad/fg_num)
264
+ classification_grads[relevant_bg_labels]= (relevant_bg_grad/fg_num)
265
+
266
+ ctx.save_for_backward(classification_grads)
267
+
268
+ return ranking_error.mean(), sorting_error.mean()
269
+
270
+ @staticmethod
271
+ def backward(ctx, out_grad1, out_grad2):
272
+ g1, =ctx.saved_tensors
273
+ return g1*out_grad1, None, None, None
274
+
275
+ class aLRPLoss(torch.autograd.Function):
276
+ @staticmethod
277
+ def forward(ctx, logits, targets, regression_losses, delta=1., eps=1e-5):
278
+ classification_grads=torch.zeros(logits.shape).cuda()
279
+
280
+ #Filter fg logits
281
+ fg_labels = (targets == 1)
282
+ fg_logits = logits[fg_labels]
283
+ fg_num = len(fg_logits)
284
+
285
+ #Do not use bg with scores less than minimum fg logit
286
+ #since changing its score does not have an effect on precision
287
+ threshold_logit = torch.min(fg_logits)-delta
288
+
289
+ #Get valid bg logits
290
+ relevant_bg_labels=((targets==0)&(logits>=threshold_logit))
291
+ relevant_bg_logits=logits[relevant_bg_labels]
292
+ relevant_bg_grad=torch.zeros(len(relevant_bg_logits)).cuda()
293
+ rank=torch.zeros(fg_num).cuda()
294
+ prec=torch.zeros(fg_num).cuda()
295
+ fg_grad=torch.zeros(fg_num).cuda()
296
+
297
+ max_prec=0
298
+ #sort the fg logits
299
+ order=torch.argsort(fg_logits)
300
+ #Loops over each positive following the order
301
+ for ii in order:
302
+ #x_ij s as score differences with fgs
303
+ fg_relations=fg_logits-fg_logits[ii]
304
+ #Apply piecewise linear function and determine relations with fgs
305
+ fg_relations=torch.clamp(fg_relations/(2*delta)+0.5,min=0,max=1)
306
+ #Discard i=j in the summation in rank_pos
307
+ fg_relations[ii]=0
308
+
309
+ #x_ij s as score differences with bgs
310
+ bg_relations=relevant_bg_logits-fg_logits[ii]
311
+ #Apply piecewise linear function and determine relations with bgs
312
+ bg_relations=torch.clamp(bg_relations/(2*delta)+0.5,min=0,max=1)
313
+
314
+ #Compute the rank of the example within fgs and number of bgs with larger scores
315
+ rank_pos=1+torch.sum(fg_relations)
316
+ FP_num=torch.sum(bg_relations)
317
+ #Store the total since it is normalizer also for aLRP Regression error
318
+ rank[ii]=rank_pos+FP_num
319
+
320
+ #Compute precision for this example to compute classification loss
321
+ prec[ii]=rank_pos/rank[ii]
322
+ #For stability, set eps to a infinitesmall value (e.g. 1e-6), then compute grads
323
+ if FP_num > eps:
324
+ fg_grad[ii] = -(torch.sum(fg_relations*regression_losses)+FP_num)/rank[ii]
325
+ relevant_bg_grad += (bg_relations*(-fg_grad[ii]/FP_num))
326
+
327
+ #aLRP with grad formulation fg gradient
328
+ classification_grads[fg_labels]= fg_grad
329
+ #aLRP with grad formulation bg gradient
330
+ classification_grads[relevant_bg_labels]= relevant_bg_grad
331
+
332
+ classification_grads /= (fg_num)
333
+
334
+ cls_loss=1-prec.mean()
335
+ ctx.save_for_backward(classification_grads)
336
+
337
+ return cls_loss, rank, order
338
+
339
+ @staticmethod
340
+ def backward(ctx, out_grad1, out_grad2, out_grad3):
341
+ g1, =ctx.saved_tensors
342
+ return g1*out_grad1, None, None, None, None
343
+
344
+
345
+ class APLoss(torch.autograd.Function):
346
+ @staticmethod
347
+ def forward(ctx, logits, targets, delta=1.):
348
+ classification_grads=torch.zeros(logits.shape).cuda()
349
+
350
+ #Filter fg logits
351
+ fg_labels = (targets == 1)
352
+ fg_logits = logits[fg_labels]
353
+ fg_num = len(fg_logits)
354
+
355
+ #Do not use bg with scores less than minimum fg logit
356
+ #since changing its score does not have an effect on precision
357
+ threshold_logit = torch.min(fg_logits)-delta
358
+
359
+ #Get valid bg logits
360
+ relevant_bg_labels=((targets==0)&(logits>=threshold_logit))
361
+ relevant_bg_logits=logits[relevant_bg_labels]
362
+ relevant_bg_grad=torch.zeros(len(relevant_bg_logits)).cuda()
363
+ rank=torch.zeros(fg_num).cuda()
364
+ prec=torch.zeros(fg_num).cuda()
365
+ fg_grad=torch.zeros(fg_num).cuda()
366
+
367
+ max_prec=0
368
+ #sort the fg logits
369
+ order=torch.argsort(fg_logits)
370
+ #Loops over each positive following the order
371
+ for ii in order:
372
+ #x_ij s as score differences with fgs
373
+ fg_relations=fg_logits-fg_logits[ii]
374
+ #Apply piecewise linear function and determine relations with fgs
375
+ fg_relations=torch.clamp(fg_relations/(2*delta)+0.5,min=0,max=1)
376
+ #Discard i=j in the summation in rank_pos
377
+ fg_relations[ii]=0
378
+
379
+ #x_ij s as score differences with bgs
380
+ bg_relations=relevant_bg_logits-fg_logits[ii]
381
+ #Apply piecewise linear function and determine relations with bgs
382
+ bg_relations=torch.clamp(bg_relations/(2*delta)+0.5,min=0,max=1)
383
+
384
+ #Compute the rank of the example within fgs and number of bgs with larger scores
385
+ rank_pos=1+torch.sum(fg_relations)
386
+ FP_num=torch.sum(bg_relations)
387
+ #Store the total since it is normalizer also for aLRP Regression error
388
+ rank[ii]=rank_pos+FP_num
389
+
390
+ #Compute precision for this example
391
+ current_prec=rank_pos/rank[ii]
392
+
393
+ #Compute interpolated AP and store gradients for relevant bg examples
394
+ if (max_prec<=current_prec):
395
+ max_prec=current_prec
396
+ relevant_bg_grad += (bg_relations/rank[ii])
397
+ else:
398
+ relevant_bg_grad += (bg_relations/rank[ii])*(((1-max_prec)/(1-current_prec)))
399
+
400
+ #Store fg gradients
401
+ fg_grad[ii]=-(1-max_prec)
402
+ prec[ii]=max_prec
403
+
404
+ #aLRP with grad formulation fg gradient
405
+ classification_grads[fg_labels]= fg_grad
406
+ #aLRP with grad formulation bg gradient
407
+ classification_grads[relevant_bg_labels]= relevant_bg_grad
408
+
409
+ classification_grads /= fg_num
410
+
411
+ cls_loss=1-prec.mean()
412
+ ctx.save_for_backward(classification_grads)
413
+
414
+ return cls_loss
415
+
416
+ @staticmethod
417
+ def backward(ctx, out_grad1):
418
+ g1, =ctx.saved_tensors
419
+ return g1*out_grad1, None, None
420
+
421
+
422
+ class ComputeLoss:
423
+ # Compute losses
424
+ def __init__(self, model, autobalance=False):
425
+ super(ComputeLoss, self).__init__()
426
+ device = next(model.parameters()).device # get model device
427
+ h = model.hyp # hyperparameters
428
+
429
+ # Define criteria
430
+ BCEcls = nn.BCEWithLogitsLoss(pos_weight=torch.tensor([h['cls_pw']], device=device))
431
+ BCEobj = nn.BCEWithLogitsLoss(pos_weight=torch.tensor([h['obj_pw']], device=device))
432
+
433
+ # Class label smoothing https://arxiv.org/pdf/1902.04103.pdf eqn 3
434
+ self.cp, self.cn = smooth_BCE(eps=h.get('label_smoothing', 0.0)) # positive, negative BCE targets
435
+
436
+ # Focal loss
437
+ g = h['fl_gamma'] # focal loss gamma
438
+ if g > 0:
439
+ BCEcls, BCEobj = FocalLoss(BCEcls, g), FocalLoss(BCEobj, g)
440
+
441
+ det = model.module.model[-1] if is_parallel(model) else model.model[-1] # Detect() module
442
+ self.balance = {3: [4.0, 1.0, 0.4]}.get(det.nl, [4.0, 1.0, 0.25, 0.06, .02]) # P3-P7
443
+ #self.balance = {3: [4.0, 1.0, 0.4]}.get(det.nl, [4.0, 1.0, 0.25, 0.1, .05]) # P3-P7
444
+ #self.balance = {3: [4.0, 1.0, 0.4]}.get(det.nl, [4.0, 1.0, 0.5, 0.4, .1]) # P3-P7
445
+ self.ssi = list(det.stride).index(16) if autobalance else 0 # stride 16 index
446
+ self.BCEcls, self.BCEobj, self.gr, self.hyp, self.autobalance = BCEcls, BCEobj, model.gr, h, autobalance
447
+ for k in 'na', 'nc', 'nl', 'anchors':
448
+ setattr(self, k, getattr(det, k))
449
+
450
+ def __call__(self, p, targets): # predictions, targets, model
451
+ device = targets.device
452
+ lcls, lbox, lobj = torch.zeros(1, device=device), torch.zeros(1, device=device), torch.zeros(1, device=device)
453
+ tcls, tbox, indices, anchors = self.build_targets(p, targets) # targets
454
+
455
+ # Losses
456
+ for i, pi in enumerate(p): # layer index, layer predictions
457
+ b, a, gj, gi = indices[i] # image, anchor, gridy, gridx
458
+ tobj = torch.zeros_like(pi[..., 0], device=device) # target obj
459
+
460
+ n = b.shape[0] # number of targets
461
+ if n:
462
+ ps = pi[b, a, gj, gi] # prediction subset corresponding to targets
463
+
464
+ # Regression
465
+ pxy = ps[:, :2].sigmoid() * 2. - 0.5
466
+ pwh = (ps[:, 2:4].sigmoid() * 2) ** 2 * anchors[i]
467
+ pbox = torch.cat((pxy, pwh), 1) # predicted box
468
+ iou = bbox_iou(pbox.T, tbox[i], x1y1x2y2=False, CIoU=True) # iou(prediction, target)
469
+ lbox += (1.0 - iou).mean() # iou loss
470
+
471
+ # Objectness
472
+ tobj[b, a, gj, gi] = (1.0 - self.gr) + self.gr * iou.detach().clamp(0).type(tobj.dtype) # iou ratio
473
+
474
+ # Classification
475
+ if self.nc > 1: # cls loss (only if multiple classes)
476
+ t = torch.full_like(ps[:, 5:], self.cn, device=device) # targets
477
+ t[range(n), tcls[i]] = self.cp
478
+ #t[t==self.cp] = iou.detach().clamp(0).type(t.dtype)
479
+ lcls += self.BCEcls(ps[:, 5:], t) # BCE
480
+
481
+ # Append targets to text file
482
+ # with open('targets.txt', 'a') as file:
483
+ # [file.write('%11.5g ' * 4 % tuple(x) + '\n') for x in torch.cat((txy[i], twh[i]), 1)]
484
+
485
+ obji = self.BCEobj(pi[..., 4], tobj)
486
+ lobj += obji * self.balance[i] # obj loss
487
+ if self.autobalance:
488
+ self.balance[i] = self.balance[i] * 0.9999 + 0.0001 / obji.detach().item()
489
+
490
+ if self.autobalance:
491
+ self.balance = [x / self.balance[self.ssi] for x in self.balance]
492
+ lbox *= self.hyp['box']
493
+ lobj *= self.hyp['obj']
494
+ lcls *= self.hyp['cls']
495
+ bs = tobj.shape[0] # batch size
496
+
497
+ loss = lbox + lobj + lcls
498
+ return loss * bs, torch.cat((lbox, lobj, lcls, loss)).detach()
499
+
500
+ def build_targets(self, p, targets):
501
+ # Build targets for compute_loss(), input targets(image,class,x,y,w,h)
502
+ na, nt = self.na, targets.shape[0] # number of anchors, targets
503
+ tcls, tbox, indices, anch = [], [], [], []
504
+ gain = torch.ones(7, device=targets.device).long() # normalized to gridspace gain
505
+ ai = torch.arange(na, device=targets.device).float().view(na, 1).repeat(1, nt) # same as .repeat_interleave(nt)
506
+ targets = torch.cat((targets.repeat(na, 1, 1), ai[:, :, None]), 2) # append anchor indices
507
+
508
+ g = 0.5 # bias
509
+ off = torch.tensor([[0, 0],
510
+ [1, 0], [0, 1], [-1, 0], [0, -1], # j,k,l,m
511
+ # [1, 1], [1, -1], [-1, 1], [-1, -1], # jk,jm,lk,lm
512
+ ], device=targets.device).float() * g # offsets
513
+
514
+ for i in range(self.nl):
515
+ anchors = self.anchors[i]
516
+ gain[2:6] = torch.tensor(p[i].shape)[[3, 2, 3, 2]] # xyxy gain
517
+
518
+ # Match targets to anchors
519
+ t = targets * gain
520
+ if nt:
521
+ # Matches
522
+ r = t[:, :, 4:6] / anchors[:, None] # wh ratio
523
+ j = torch.max(r, 1. / r).max(2)[0] < self.hyp['anchor_t'] # compare
524
+ # j = wh_iou(anchors, t[:, 4:6]) > model.hyp['iou_t'] # iou(3,n)=wh_iou(anchors(3,2), gwh(n,2))
525
+ t = t[j] # filter
526
+
527
+ # Offsets
528
+ gxy = t[:, 2:4] # grid xy
529
+ gxi = gain[[2, 3]] - gxy # inverse
530
+ j, k = ((gxy % 1. < g) & (gxy > 1.)).T
531
+ l, m = ((gxi % 1. < g) & (gxi > 1.)).T
532
+ j = torch.stack((torch.ones_like(j), j, k, l, m))
533
+ t = t.repeat((5, 1, 1))[j]
534
+ offsets = (torch.zeros_like(gxy)[None] + off[:, None])[j]
535
+ else:
536
+ t = targets[0]
537
+ offsets = 0
538
+
539
+ # Define
540
+ b, c = t[:, :2].long().T # image, class
541
+ gxy = t[:, 2:4] # grid xy
542
+ gwh = t[:, 4:6] # grid wh
543
+ gij = (gxy - offsets).long()
544
+ gi, gj = gij.T # grid xy indices
545
+
546
+ # Append
547
+ a = t[:, 6].long() # anchor indices
548
+ indices.append((b, a, gj.clamp_(0, gain[3] - 1), gi.clamp_(0, gain[2] - 1))) # image, anchor, grid indices
549
+ tbox.append(torch.cat((gxy - gij, gwh), 1)) # box
550
+ anch.append(anchors[a]) # anchors
551
+ tcls.append(c) # class
552
+
553
+ return tcls, tbox, indices, anch
554
+
555
+
556
+ class ComputeLossOTA:
557
+ # Compute losses
558
+ def __init__(self, model, autobalance=False):
559
+ super(ComputeLossOTA, self).__init__()
560
+ device = next(model.parameters()).device # get model device
561
+ h = model.hyp # hyperparameters
562
+
563
+ # Define criteria
564
+ BCEcls = nn.BCEWithLogitsLoss(pos_weight=torch.tensor([h['cls_pw']], device=device))
565
+ BCEobj = nn.BCEWithLogitsLoss(pos_weight=torch.tensor([h['obj_pw']], device=device))
566
+
567
+ # Class label smoothing https://arxiv.org/pdf/1902.04103.pdf eqn 3
568
+ self.cp, self.cn = smooth_BCE(eps=h.get('label_smoothing', 0.0)) # positive, negative BCE targets
569
+
570
+ # Focal loss
571
+ g = h['fl_gamma'] # focal loss gamma
572
+ if g > 0:
573
+ BCEcls, BCEobj = FocalLoss(BCEcls, g), FocalLoss(BCEobj, g)
574
+
575
+ det = model.module.model[-1] if is_parallel(model) else model.model[-1] # Detect() module
576
+ self.balance = {3: [4.0, 1.0, 0.4]}.get(det.nl, [4.0, 1.0, 0.25, 0.06, .02]) # P3-P7
577
+ self.ssi = list(det.stride).index(16) if autobalance else 0 # stride 16 index
578
+ self.BCEcls, self.BCEobj, self.gr, self.hyp, self.autobalance = BCEcls, BCEobj, model.gr, h, autobalance
579
+ for k in 'na', 'nc', 'nl', 'anchors', 'stride':
580
+ setattr(self, k, getattr(det, k))
581
+
582
+ def __call__(self, p, targets, imgs): # predictions, targets, model
583
+ device = targets.device
584
+ lcls, lbox, lobj = torch.zeros(1, device=device), torch.zeros(1, device=device), torch.zeros(1, device=device)
585
+ bs, as_, gjs, gis, targets, anchors = self.build_targets(p, targets, imgs)
586
+ pre_gen_gains = [torch.tensor(pp.shape, device=device)[[3, 2, 3, 2]] for pp in p]
587
+
588
+
589
+ # Losses
590
+ for i, pi in enumerate(p): # layer index, layer predictions
591
+ b, a, gj, gi = bs[i], as_[i], gjs[i], gis[i] # image, anchor, gridy, gridx
592
+ tobj = torch.zeros_like(pi[..., 0], device=device) # target obj
593
+
594
+ n = b.shape[0] # number of targets
595
+ if n:
596
+ ps = pi[b, a, gj, gi] # prediction subset corresponding to targets
597
+
598
+ # Regression
599
+ grid = torch.stack([gi, gj], dim=1)
600
+ pxy = ps[:, :2].sigmoid() * 2. - 0.5
601
+ #pxy = ps[:, :2].sigmoid() * 3. - 1.
602
+ pwh = (ps[:, 2:4].sigmoid() * 2) ** 2 * anchors[i]
603
+ pbox = torch.cat((pxy, pwh), 1) # predicted box
604
+ selected_tbox = targets[i][:, 2:6] * pre_gen_gains[i]
605
+ selected_tbox[:, :2] -= grid
606
+ iou = bbox_iou(pbox.T, selected_tbox, x1y1x2y2=False, CIoU=True) # iou(prediction, target)
607
+ lbox += (1.0 - iou).mean() # iou loss
608
+
609
+ # Objectness
610
+ tobj[b, a, gj, gi] = (1.0 - self.gr) + self.gr * iou.detach().clamp(0).type(tobj.dtype) # iou ratio
611
+
612
+ # Classification
613
+ selected_tcls = targets[i][:, 1].long()
614
+ if self.nc > 1: # cls loss (only if multiple classes)
615
+ t = torch.full_like(ps[:, 5:], self.cn, device=device) # targets
616
+ t[range(n), selected_tcls] = self.cp
617
+ lcls += self.BCEcls(ps[:, 5:], t) # BCE
618
+
619
+ # Append targets to text file
620
+ # with open('targets.txt', 'a') as file:
621
+ # [file.write('%11.5g ' * 4 % tuple(x) + '\n') for x in torch.cat((txy[i], twh[i]), 1)]
622
+
623
+ obji = self.BCEobj(pi[..., 4], tobj)
624
+ lobj += obji * self.balance[i] # obj loss
625
+ if self.autobalance:
626
+ self.balance[i] = self.balance[i] * 0.9999 + 0.0001 / obji.detach().item()
627
+
628
+ if self.autobalance:
629
+ self.balance = [x / self.balance[self.ssi] for x in self.balance]
630
+ lbox *= self.hyp['box']
631
+ lobj *= self.hyp['obj']
632
+ lcls *= self.hyp['cls']
633
+ bs = tobj.shape[0] # batch size
634
+
635
+ loss = lbox + lobj + lcls
636
+ return loss * bs, torch.cat((lbox, lobj, lcls, loss)).detach()
637
+
638
+ def build_targets(self, p, targets, imgs):
639
+
640
+ #indices, anch = self.find_positive(p, targets)
641
+ indices, anch = self.find_3_positive(p, targets)
642
+ #indices, anch = self.find_4_positive(p, targets)
643
+ #indices, anch = self.find_5_positive(p, targets)
644
+ #indices, anch = self.find_9_positive(p, targets)
645
+
646
+ matching_bs = [[] for pp in p]
647
+ matching_as = [[] for pp in p]
648
+ matching_gjs = [[] for pp in p]
649
+ matching_gis = [[] for pp in p]
650
+ matching_targets = [[] for pp in p]
651
+ matching_anchs = [[] for pp in p]
652
+
653
+ nl = len(p)
654
+
655
+ for batch_idx in range(p[0].shape[0]):
656
+
657
+ b_idx = targets[:, 0]==batch_idx
658
+ this_target = targets[b_idx]
659
+ if this_target.shape[0] == 0:
660
+ continue
661
+
662
+ txywh = this_target[:, 2:6] * imgs[batch_idx].shape[1]
663
+ txyxy = xywh2xyxy(txywh)
664
+
665
+ pxyxys = []
666
+ p_cls = []
667
+ p_obj = []
668
+ from_which_layer = []
669
+ all_b = []
670
+ all_a = []
671
+ all_gj = []
672
+ all_gi = []
673
+ all_anch = []
674
+
675
+ for i, pi in enumerate(p):
676
+
677
+ b, a, gj, gi = indices[i]
678
+ idx = (b == batch_idx)
679
+ b, a, gj, gi = b[idx], a[idx], gj[idx], gi[idx]
680
+ all_b.append(b)
681
+ all_a.append(a)
682
+ all_gj.append(gj)
683
+ all_gi.append(gi)
684
+ all_anch.append(anch[i][idx])
685
+ from_which_layer.append(torch.ones(size=(len(b),)) * i)
686
+
687
+ fg_pred = pi[b, a, gj, gi]
688
+ p_obj.append(fg_pred[:, 4:5])
689
+ p_cls.append(fg_pred[:, 5:])
690
+
691
+ grid = torch.stack([gi, gj], dim=1)
692
+ pxy = (fg_pred[:, :2].sigmoid() * 2. - 0.5 + grid) * self.stride[i] #/ 8.
693
+ #pxy = (fg_pred[:, :2].sigmoid() * 3. - 1. + grid) * self.stride[i]
694
+ pwh = (fg_pred[:, 2:4].sigmoid() * 2) ** 2 * anch[i][idx] * self.stride[i] #/ 8.
695
+ pxywh = torch.cat([pxy, pwh], dim=-1)
696
+ pxyxy = xywh2xyxy(pxywh)
697
+ pxyxys.append(pxyxy)
698
+
699
+ pxyxys = torch.cat(pxyxys, dim=0)
700
+ if pxyxys.shape[0] == 0:
701
+ continue
702
+ p_obj = torch.cat(p_obj, dim=0)
703
+ p_cls = torch.cat(p_cls, dim=0)
704
+ from_which_layer = torch.cat(from_which_layer, dim=0)
705
+ all_b = torch.cat(all_b, dim=0)
706
+ all_a = torch.cat(all_a, dim=0)
707
+ all_gj = torch.cat(all_gj, dim=0)
708
+ all_gi = torch.cat(all_gi, dim=0)
709
+ all_anch = torch.cat(all_anch, dim=0)
710
+
711
+ pair_wise_iou = box_iou(txyxy, pxyxys)
712
+
713
+ pair_wise_iou_loss = -torch.log(pair_wise_iou + 1e-8)
714
+
715
+ top_k, _ = torch.topk(pair_wise_iou, min(10, pair_wise_iou.shape[1]), dim=1)
716
+ dynamic_ks = torch.clamp(top_k.sum(1).int(), min=1)
717
+
718
+ gt_cls_per_image = (
719
+ F.one_hot(this_target[:, 1].to(torch.int64), self.nc)
720
+ .float()
721
+ .unsqueeze(1)
722
+ .repeat(1, pxyxys.shape[0], 1)
723
+ )
724
+
725
+ num_gt = this_target.shape[0]
726
+ cls_preds_ = (
727
+ p_cls.float().unsqueeze(0).repeat(num_gt, 1, 1).sigmoid_()
728
+ * p_obj.unsqueeze(0).repeat(num_gt, 1, 1).sigmoid_()
729
+ )
730
+
731
+ y = cls_preds_.sqrt_()
732
+ pair_wise_cls_loss = F.binary_cross_entropy_with_logits(
733
+ torch.log(y/(1-y)) , gt_cls_per_image, reduction="none"
734
+ ).sum(-1)
735
+ del cls_preds_
736
+
737
+ cost = (
738
+ pair_wise_cls_loss
739
+ + 3.0 * pair_wise_iou_loss
740
+ )
741
+
742
+ matching_matrix = torch.zeros_like(cost)
743
+
744
+ for gt_idx in range(num_gt):
745
+ _, pos_idx = torch.topk(
746
+ cost[gt_idx], k=dynamic_ks[gt_idx].item(), largest=False
747
+ )
748
+ matching_matrix[gt_idx][pos_idx] = 1.0
749
+
750
+ del top_k, dynamic_ks
751
+ anchor_matching_gt = matching_matrix.sum(0)
752
+ if (anchor_matching_gt > 1).sum() > 0:
753
+ _, cost_argmin = torch.min(cost[:, anchor_matching_gt > 1], dim=0)
754
+ matching_matrix[:, anchor_matching_gt > 1] *= 0.0
755
+ matching_matrix[cost_argmin, anchor_matching_gt > 1] = 1.0
756
+ fg_mask_inboxes = matching_matrix.sum(0) > 0.0
757
+ matched_gt_inds = matching_matrix[:, fg_mask_inboxes].argmax(0)
758
+
759
+ from_which_layer = from_which_layer[fg_mask_inboxes]
760
+ all_b = all_b[fg_mask_inboxes]
761
+ all_a = all_a[fg_mask_inboxes]
762
+ all_gj = all_gj[fg_mask_inboxes]
763
+ all_gi = all_gi[fg_mask_inboxes]
764
+ all_anch = all_anch[fg_mask_inboxes]
765
+
766
+ this_target = this_target[matched_gt_inds]
767
+
768
+ for i in range(nl):
769
+ layer_idx = from_which_layer == i
770
+ matching_bs[i].append(all_b[layer_idx])
771
+ matching_as[i].append(all_a[layer_idx])
772
+ matching_gjs[i].append(all_gj[layer_idx])
773
+ matching_gis[i].append(all_gi[layer_idx])
774
+ matching_targets[i].append(this_target[layer_idx])
775
+ matching_anchs[i].append(all_anch[layer_idx])
776
+
777
+ for i in range(nl):
778
+ if matching_targets[i] != []:
779
+ matching_bs[i] = torch.cat(matching_bs[i], dim=0)
780
+ matching_as[i] = torch.cat(matching_as[i], dim=0)
781
+ matching_gjs[i] = torch.cat(matching_gjs[i], dim=0)
782
+ matching_gis[i] = torch.cat(matching_gis[i], dim=0)
783
+ matching_targets[i] = torch.cat(matching_targets[i], dim=0)
784
+ matching_anchs[i] = torch.cat(matching_anchs[i], dim=0)
785
+ else:
786
+ matching_bs[i] = torch.tensor([], device='cuda:0', dtype=torch.int64)
787
+ matching_as[i] = torch.tensor([], device='cuda:0', dtype=torch.int64)
788
+ matching_gjs[i] = torch.tensor([], device='cuda:0', dtype=torch.int64)
789
+ matching_gis[i] = torch.tensor([], device='cuda:0', dtype=torch.int64)
790
+ matching_targets[i] = torch.tensor([], device='cuda:0', dtype=torch.int64)
791
+ matching_anchs[i] = torch.tensor([], device='cuda:0', dtype=torch.int64)
792
+
793
+ return matching_bs, matching_as, matching_gjs, matching_gis, matching_targets, matching_anchs
794
+
795
+ def find_3_positive(self, p, targets):
796
+ # Build targets for compute_loss(), input targets(image,class,x,y,w,h)
797
+ na, nt = self.na, targets.shape[0] # number of anchors, targets
798
+ indices, anch = [], []
799
+ gain = torch.ones(7, device=targets.device).long() # normalized to gridspace gain
800
+ ai = torch.arange(na, device=targets.device).float().view(na, 1).repeat(1, nt) # same as .repeat_interleave(nt)
801
+ targets = torch.cat((targets.repeat(na, 1, 1), ai[:, :, None]), 2) # append anchor indices
802
+
803
+ g = 0.5 # bias
804
+ off = torch.tensor([[0, 0],
805
+ [1, 0], [0, 1], [-1, 0], [0, -1], # j,k,l,m
806
+ # [1, 1], [1, -1], [-1, 1], [-1, -1], # jk,jm,lk,lm
807
+ ], device=targets.device).float() * g # offsets
808
+
809
+ for i in range(self.nl):
810
+ anchors = self.anchors[i]
811
+ gain[2:6] = torch.tensor(p[i].shape)[[3, 2, 3, 2]] # xyxy gain
812
+
813
+ # Match targets to anchors
814
+ t = targets * gain
815
+ if nt:
816
+ # Matches
817
+ r = t[:, :, 4:6] / anchors[:, None] # wh ratio
818
+ j = torch.max(r, 1. / r).max(2)[0] < self.hyp['anchor_t'] # compare
819
+ # j = wh_iou(anchors, t[:, 4:6]) > model.hyp['iou_t'] # iou(3,n)=wh_iou(anchors(3,2), gwh(n,2))
820
+ t = t[j] # filter
821
+
822
+ # Offsets
823
+ gxy = t[:, 2:4] # grid xy
824
+ gxi = gain[[2, 3]] - gxy # inverse
825
+ j, k = ((gxy % 1. < g) & (gxy > 1.)).T
826
+ l, m = ((gxi % 1. < g) & (gxi > 1.)).T
827
+ j = torch.stack((torch.ones_like(j), j, k, l, m))
828
+ t = t.repeat((5, 1, 1))[j]
829
+ offsets = (torch.zeros_like(gxy)[None] + off[:, None])[j]
830
+ else:
831
+ t = targets[0]
832
+ offsets = 0
833
+
834
+ # Define
835
+ b, c = t[:, :2].long().T # image, class
836
+ gxy = t[:, 2:4] # grid xy
837
+ gwh = t[:, 4:6] # grid wh
838
+ gij = (gxy - offsets).long()
839
+ gi, gj = gij.T # grid xy indices
840
+
841
+ # Append
842
+ a = t[:, 6].long() # anchor indices
843
+ indices.append((b, a, gj.clamp_(0, gain[3] - 1), gi.clamp_(0, gain[2] - 1))) # image, anchor, grid indices
844
+ anch.append(anchors[a]) # anchors
845
+
846
+ return indices, anch
847
+
848
+
849
+ class ComputeLossBinOTA:
850
+ # Compute losses
851
+ def __init__(self, model, autobalance=False):
852
+ super(ComputeLossBinOTA, self).__init__()
853
+ device = next(model.parameters()).device # get model device
854
+ h = model.hyp # hyperparameters
855
+
856
+ # Define criteria
857
+ BCEcls = nn.BCEWithLogitsLoss(pos_weight=torch.tensor([h['cls_pw']], device=device))
858
+ BCEobj = nn.BCEWithLogitsLoss(pos_weight=torch.tensor([h['obj_pw']], device=device))
859
+ #MSEangle = nn.MSELoss().to(device)
860
+
861
+ # Class label smoothing https://arxiv.org/pdf/1902.04103.pdf eqn 3
862
+ self.cp, self.cn = smooth_BCE(eps=h.get('label_smoothing', 0.0)) # positive, negative BCE targets
863
+
864
+ # Focal loss
865
+ g = h['fl_gamma'] # focal loss gamma
866
+ if g > 0:
867
+ BCEcls, BCEobj = FocalLoss(BCEcls, g), FocalLoss(BCEobj, g)
868
+
869
+ det = model.module.model[-1] if is_parallel(model) else model.model[-1] # Detect() module
870
+ self.balance = {3: [4.0, 1.0, 0.4]}.get(det.nl, [4.0, 1.0, 0.25, 0.06, .02]) # P3-P7
871
+ self.ssi = list(det.stride).index(16) if autobalance else 0 # stride 16 index
872
+ self.BCEcls, self.BCEobj, self.gr, self.hyp, self.autobalance = BCEcls, BCEobj, model.gr, h, autobalance
873
+ for k in 'na', 'nc', 'nl', 'anchors', 'stride', 'bin_count':
874
+ setattr(self, k, getattr(det, k))
875
+
876
+ #xy_bin_sigmoid = SigmoidBin(bin_count=11, min=-0.5, max=1.5, use_loss_regression=False).to(device)
877
+ wh_bin_sigmoid = SigmoidBin(bin_count=self.bin_count, min=0.0, max=4.0, use_loss_regression=False).to(device)
878
+ #angle_bin_sigmoid = SigmoidBin(bin_count=31, min=-1.1, max=1.1, use_loss_regression=False).to(device)
879
+ self.wh_bin_sigmoid = wh_bin_sigmoid
880
+
881
+ def __call__(self, p, targets, imgs): # predictions, targets, model
882
+ device = targets.device
883
+ lcls, lbox, lobj = torch.zeros(1, device=device), torch.zeros(1, device=device), torch.zeros(1, device=device)
884
+ bs, as_, gjs, gis, targets, anchors = self.build_targets(p, targets, imgs)
885
+ pre_gen_gains = [torch.tensor(pp.shape, device=device)[[3, 2, 3, 2]] for pp in p]
886
+
887
+
888
+ # Losses
889
+ for i, pi in enumerate(p): # layer index, layer predictions
890
+ b, a, gj, gi = bs[i], as_[i], gjs[i], gis[i] # image, anchor, gridy, gridx
891
+ tobj = torch.zeros_like(pi[..., 0], device=device) # target obj
892
+
893
+ obj_idx = self.wh_bin_sigmoid.get_length()*2 + 2 # x,y, w-bce, h-bce # xy_bin_sigmoid.get_length()*2
894
+
895
+ n = b.shape[0] # number of targets
896
+ if n:
897
+ ps = pi[b, a, gj, gi] # prediction subset corresponding to targets
898
+
899
+ # Regression
900
+ grid = torch.stack([gi, gj], dim=1)
901
+ selected_tbox = targets[i][:, 2:6] * pre_gen_gains[i]
902
+ selected_tbox[:, :2] -= grid
903
+
904
+ #pxy = ps[:, :2].sigmoid() * 2. - 0.5
905
+ ##pxy = ps[:, :2].sigmoid() * 3. - 1.
906
+ #pwh = (ps[:, 2:4].sigmoid() * 2) ** 2 * anchors[i]
907
+ #pbox = torch.cat((pxy, pwh), 1) # predicted box
908
+
909
+ #x_loss, px = xy_bin_sigmoid.training_loss(ps[..., 0:12], tbox[i][..., 0])
910
+ #y_loss, py = xy_bin_sigmoid.training_loss(ps[..., 12:24], tbox[i][..., 1])
911
+ w_loss, pw = self.wh_bin_sigmoid.training_loss(ps[..., 2:(3+self.bin_count)], selected_tbox[..., 2] / anchors[i][..., 0])
912
+ h_loss, ph = self.wh_bin_sigmoid.training_loss(ps[..., (3+self.bin_count):obj_idx], selected_tbox[..., 3] / anchors[i][..., 1])
913
+
914
+ pw *= anchors[i][..., 0]
915
+ ph *= anchors[i][..., 1]
916
+
917
+ px = ps[:, 0].sigmoid() * 2. - 0.5
918
+ py = ps[:, 1].sigmoid() * 2. - 0.5
919
+
920
+ lbox += w_loss + h_loss # + x_loss + y_loss
921
+
922
+ #print(f"\n px = {px.shape}, py = {py.shape}, pw = {pw.shape}, ph = {ph.shape} \n")
923
+
924
+ pbox = torch.cat((px.unsqueeze(1), py.unsqueeze(1), pw.unsqueeze(1), ph.unsqueeze(1)), 1).to(device) # predicted box
925
+
926
+
927
+
928
+
929
+ iou = bbox_iou(pbox.T, selected_tbox, x1y1x2y2=False, CIoU=True) # iou(prediction, target)
930
+ lbox += (1.0 - iou).mean() # iou loss
931
+
932
+ # Objectness
933
+ tobj[b, a, gj, gi] = (1.0 - self.gr) + self.gr * iou.detach().clamp(0).type(tobj.dtype) # iou ratio
934
+
935
+ # Classification
936
+ selected_tcls = targets[i][:, 1].long()
937
+ if self.nc > 1: # cls loss (only if multiple classes)
938
+ t = torch.full_like(ps[:, (1+obj_idx):], self.cn, device=device) # targets
939
+ t[range(n), selected_tcls] = self.cp
940
+ lcls += self.BCEcls(ps[:, (1+obj_idx):], t) # BCE
941
+
942
+ # Append targets to text file
943
+ # with open('targets.txt', 'a') as file:
944
+ # [file.write('%11.5g ' * 4 % tuple(x) + '\n') for x in torch.cat((txy[i], twh[i]), 1)]
945
+
946
+ obji = self.BCEobj(pi[..., obj_idx], tobj)
947
+ lobj += obji * self.balance[i] # obj loss
948
+ if self.autobalance:
949
+ self.balance[i] = self.balance[i] * 0.9999 + 0.0001 / obji.detach().item()
950
+
951
+ if self.autobalance:
952
+ self.balance = [x / self.balance[self.ssi] for x in self.balance]
953
+ lbox *= self.hyp['box']
954
+ lobj *= self.hyp['obj']
955
+ lcls *= self.hyp['cls']
956
+ bs = tobj.shape[0] # batch size
957
+
958
+ loss = lbox + lobj + lcls
959
+ return loss * bs, torch.cat((lbox, lobj, lcls, loss)).detach()
960
+
961
+ def build_targets(self, p, targets, imgs):
962
+
963
+ #indices, anch = self.find_positive(p, targets)
964
+ indices, anch = self.find_3_positive(p, targets)
965
+ #indices, anch = self.find_4_positive(p, targets)
966
+ #indices, anch = self.find_5_positive(p, targets)
967
+ #indices, anch = self.find_9_positive(p, targets)
968
+
969
+ matching_bs = [[] for pp in p]
970
+ matching_as = [[] for pp in p]
971
+ matching_gjs = [[] for pp in p]
972
+ matching_gis = [[] for pp in p]
973
+ matching_targets = [[] for pp in p]
974
+ matching_anchs = [[] for pp in p]
975
+
976
+ nl = len(p)
977
+
978
+ for batch_idx in range(p[0].shape[0]):
979
+
980
+ b_idx = targets[:, 0]==batch_idx
981
+ this_target = targets[b_idx]
982
+ if this_target.shape[0] == 0:
983
+ continue
984
+
985
+ txywh = this_target[:, 2:6] * imgs[batch_idx].shape[1]
986
+ txyxy = xywh2xyxy(txywh)
987
+
988
+ pxyxys = []
989
+ p_cls = []
990
+ p_obj = []
991
+ from_which_layer = []
992
+ all_b = []
993
+ all_a = []
994
+ all_gj = []
995
+ all_gi = []
996
+ all_anch = []
997
+
998
+ for i, pi in enumerate(p):
999
+
1000
+ obj_idx = self.wh_bin_sigmoid.get_length()*2 + 2
1001
+
1002
+ b, a, gj, gi = indices[i]
1003
+ idx = (b == batch_idx)
1004
+ b, a, gj, gi = b[idx], a[idx], gj[idx], gi[idx]
1005
+ all_b.append(b)
1006
+ all_a.append(a)
1007
+ all_gj.append(gj)
1008
+ all_gi.append(gi)
1009
+ all_anch.append(anch[i][idx])
1010
+ from_which_layer.append(torch.ones(size=(len(b),)) * i)
1011
+
1012
+ fg_pred = pi[b, a, gj, gi]
1013
+ p_obj.append(fg_pred[:, obj_idx:(obj_idx+1)])
1014
+ p_cls.append(fg_pred[:, (obj_idx+1):])
1015
+
1016
+ grid = torch.stack([gi, gj], dim=1)
1017
+ pxy = (fg_pred[:, :2].sigmoid() * 2. - 0.5 + grid) * self.stride[i] #/ 8.
1018
+ #pwh = (fg_pred[:, 2:4].sigmoid() * 2) ** 2 * anch[i][idx] * self.stride[i] #/ 8.
1019
+ pw = self.wh_bin_sigmoid.forward(fg_pred[..., 2:(3+self.bin_count)].sigmoid()) * anch[i][idx][:, 0] * self.stride[i]
1020
+ ph = self.wh_bin_sigmoid.forward(fg_pred[..., (3+self.bin_count):obj_idx].sigmoid()) * anch[i][idx][:, 1] * self.stride[i]
1021
+
1022
+ pxywh = torch.cat([pxy, pw.unsqueeze(1), ph.unsqueeze(1)], dim=-1)
1023
+ pxyxy = xywh2xyxy(pxywh)
1024
+ pxyxys.append(pxyxy)
1025
+
1026
+ pxyxys = torch.cat(pxyxys, dim=0)
1027
+ if pxyxys.shape[0] == 0:
1028
+ continue
1029
+ p_obj = torch.cat(p_obj, dim=0)
1030
+ p_cls = torch.cat(p_cls, dim=0)
1031
+ from_which_layer = torch.cat(from_which_layer, dim=0)
1032
+ all_b = torch.cat(all_b, dim=0)
1033
+ all_a = torch.cat(all_a, dim=0)
1034
+ all_gj = torch.cat(all_gj, dim=0)
1035
+ all_gi = torch.cat(all_gi, dim=0)
1036
+ all_anch = torch.cat(all_anch, dim=0)
1037
+
1038
+ pair_wise_iou = box_iou(txyxy, pxyxys)
1039
+
1040
+ pair_wise_iou_loss = -torch.log(pair_wise_iou + 1e-8)
1041
+
1042
+ top_k, _ = torch.topk(pair_wise_iou, min(10, pair_wise_iou.shape[1]), dim=1)
1043
+ dynamic_ks = torch.clamp(top_k.sum(1).int(), min=1)
1044
+
1045
+ gt_cls_per_image = (
1046
+ F.one_hot(this_target[:, 1].to(torch.int64), self.nc)
1047
+ .float()
1048
+ .unsqueeze(1)
1049
+ .repeat(1, pxyxys.shape[0], 1)
1050
+ )
1051
+
1052
+ num_gt = this_target.shape[0]
1053
+ cls_preds_ = (
1054
+ p_cls.float().unsqueeze(0).repeat(num_gt, 1, 1).sigmoid_()
1055
+ * p_obj.unsqueeze(0).repeat(num_gt, 1, 1).sigmoid_()
1056
+ )
1057
+
1058
+ y = cls_preds_.sqrt_()
1059
+ pair_wise_cls_loss = F.binary_cross_entropy_with_logits(
1060
+ torch.log(y/(1-y)) , gt_cls_per_image, reduction="none"
1061
+ ).sum(-1)
1062
+ del cls_preds_
1063
+
1064
+ cost = (
1065
+ pair_wise_cls_loss
1066
+ + 3.0 * pair_wise_iou_loss
1067
+ )
1068
+
1069
+ matching_matrix = torch.zeros_like(cost)
1070
+
1071
+ for gt_idx in range(num_gt):
1072
+ _, pos_idx = torch.topk(
1073
+ cost[gt_idx], k=dynamic_ks[gt_idx].item(), largest=False
1074
+ )
1075
+ matching_matrix[gt_idx][pos_idx] = 1.0
1076
+
1077
+ del top_k, dynamic_ks
1078
+ anchor_matching_gt = matching_matrix.sum(0)
1079
+ if (anchor_matching_gt > 1).sum() > 0:
1080
+ _, cost_argmin = torch.min(cost[:, anchor_matching_gt > 1], dim=0)
1081
+ matching_matrix[:, anchor_matching_gt > 1] *= 0.0
1082
+ matching_matrix[cost_argmin, anchor_matching_gt > 1] = 1.0
1083
+ fg_mask_inboxes = matching_matrix.sum(0) > 0.0
1084
+ matched_gt_inds = matching_matrix[:, fg_mask_inboxes].argmax(0)
1085
+
1086
+ from_which_layer = from_which_layer[fg_mask_inboxes]
1087
+ all_b = all_b[fg_mask_inboxes]
1088
+ all_a = all_a[fg_mask_inboxes]
1089
+ all_gj = all_gj[fg_mask_inboxes]
1090
+ all_gi = all_gi[fg_mask_inboxes]
1091
+ all_anch = all_anch[fg_mask_inboxes]
1092
+
1093
+ this_target = this_target[matched_gt_inds]
1094
+
1095
+ for i in range(nl):
1096
+ layer_idx = from_which_layer == i
1097
+ matching_bs[i].append(all_b[layer_idx])
1098
+ matching_as[i].append(all_a[layer_idx])
1099
+ matching_gjs[i].append(all_gj[layer_idx])
1100
+ matching_gis[i].append(all_gi[layer_idx])
1101
+ matching_targets[i].append(this_target[layer_idx])
1102
+ matching_anchs[i].append(all_anch[layer_idx])
1103
+
1104
+ for i in range(nl):
1105
+ if matching_targets[i] != []:
1106
+ matching_bs[i] = torch.cat(matching_bs[i], dim=0)
1107
+ matching_as[i] = torch.cat(matching_as[i], dim=0)
1108
+ matching_gjs[i] = torch.cat(matching_gjs[i], dim=0)
1109
+ matching_gis[i] = torch.cat(matching_gis[i], dim=0)
1110
+ matching_targets[i] = torch.cat(matching_targets[i], dim=0)
1111
+ matching_anchs[i] = torch.cat(matching_anchs[i], dim=0)
1112
+ else:
1113
+ matching_bs[i] = torch.tensor([], device='cuda:0', dtype=torch.int64)
1114
+ matching_as[i] = torch.tensor([], device='cuda:0', dtype=torch.int64)
1115
+ matching_gjs[i] = torch.tensor([], device='cuda:0', dtype=torch.int64)
1116
+ matching_gis[i] = torch.tensor([], device='cuda:0', dtype=torch.int64)
1117
+ matching_targets[i] = torch.tensor([], device='cuda:0', dtype=torch.int64)
1118
+ matching_anchs[i] = torch.tensor([], device='cuda:0', dtype=torch.int64)
1119
+
1120
+ return matching_bs, matching_as, matching_gjs, matching_gis, matching_targets, matching_anchs
1121
+
1122
+ def find_3_positive(self, p, targets):
1123
+ # Build targets for compute_loss(), input targets(image,class,x,y,w,h)
1124
+ na, nt = self.na, targets.shape[0] # number of anchors, targets
1125
+ indices, anch = [], []
1126
+ gain = torch.ones(7, device=targets.device).long() # normalized to gridspace gain
1127
+ ai = torch.arange(na, device=targets.device).float().view(na, 1).repeat(1, nt) # same as .repeat_interleave(nt)
1128
+ targets = torch.cat((targets.repeat(na, 1, 1), ai[:, :, None]), 2) # append anchor indices
1129
+
1130
+ g = 0.5 # bias
1131
+ off = torch.tensor([[0, 0],
1132
+ [1, 0], [0, 1], [-1, 0], [0, -1], # j,k,l,m
1133
+ # [1, 1], [1, -1], [-1, 1], [-1, -1], # jk,jm,lk,lm
1134
+ ], device=targets.device).float() * g # offsets
1135
+
1136
+ for i in range(self.nl):
1137
+ anchors = self.anchors[i]
1138
+ gain[2:6] = torch.tensor(p[i].shape)[[3, 2, 3, 2]] # xyxy gain
1139
+
1140
+ # Match targets to anchors
1141
+ t = targets * gain
1142
+ if nt:
1143
+ # Matches
1144
+ r = t[:, :, 4:6] / anchors[:, None] # wh ratio
1145
+ j = torch.max(r, 1. / r).max(2)[0] < self.hyp['anchor_t'] # compare
1146
+ # j = wh_iou(anchors, t[:, 4:6]) > model.hyp['iou_t'] # iou(3,n)=wh_iou(anchors(3,2), gwh(n,2))
1147
+ t = t[j] # filter
1148
+
1149
+ # Offsets
1150
+ gxy = t[:, 2:4] # grid xy
1151
+ gxi = gain[[2, 3]] - gxy # inverse
1152
+ j, k = ((gxy % 1. < g) & (gxy > 1.)).T
1153
+ l, m = ((gxi % 1. < g) & (gxi > 1.)).T
1154
+ j = torch.stack((torch.ones_like(j), j, k, l, m))
1155
+ t = t.repeat((5, 1, 1))[j]
1156
+ offsets = (torch.zeros_like(gxy)[None] + off[:, None])[j]
1157
+ else:
1158
+ t = targets[0]
1159
+ offsets = 0
1160
+
1161
+ # Define
1162
+ b, c = t[:, :2].long().T # image, class
1163
+ gxy = t[:, 2:4] # grid xy
1164
+ gwh = t[:, 4:6] # grid wh
1165
+ gij = (gxy - offsets).long()
1166
+ gi, gj = gij.T # grid xy indices
1167
+
1168
+ # Append
1169
+ a = t[:, 6].long() # anchor indices
1170
+ indices.append((b, a, gj.clamp_(0, gain[3] - 1), gi.clamp_(0, gain[2] - 1))) # image, anchor, grid indices
1171
+ anch.append(anchors[a]) # anchors
1172
+
1173
+ return indices, anch
1174
+
1175
+
1176
+ class ComputeLossAuxOTA:
1177
+ # Compute losses
1178
+ def __init__(self, model, autobalance=False):
1179
+ super(ComputeLossAuxOTA, self).__init__()
1180
+ device = next(model.parameters()).device # get model device
1181
+ h = model.hyp # hyperparameters
1182
+
1183
+ # Define criteria
1184
+ BCEcls = nn.BCEWithLogitsLoss(pos_weight=torch.tensor([h['cls_pw']], device=device))
1185
+ BCEobj = nn.BCEWithLogitsLoss(pos_weight=torch.tensor([h['obj_pw']], device=device))
1186
+
1187
+ # Class label smoothing https://arxiv.org/pdf/1902.04103.pdf eqn 3
1188
+ self.cp, self.cn = smooth_BCE(eps=h.get('label_smoothing', 0.0)) # positive, negative BCE targets
1189
+
1190
+ # Focal loss
1191
+ g = h['fl_gamma'] # focal loss gamma
1192
+ if g > 0:
1193
+ BCEcls, BCEobj = FocalLoss(BCEcls, g), FocalLoss(BCEobj, g)
1194
+
1195
+ det = model.module.model[-1] if is_parallel(model) else model.model[-1] # Detect() module
1196
+ self.balance = {3: [4.0, 1.0, 0.4]}.get(det.nl, [4.0, 1.0, 0.25, 0.06, .02]) # P3-P7
1197
+ self.ssi = list(det.stride).index(16) if autobalance else 0 # stride 16 index
1198
+ self.BCEcls, self.BCEobj, self.gr, self.hyp, self.autobalance = BCEcls, BCEobj, model.gr, h, autobalance
1199
+ for k in 'na', 'nc', 'nl', 'anchors', 'stride':
1200
+ setattr(self, k, getattr(det, k))
1201
+
1202
+ def __call__(self, p, targets, imgs): # predictions, targets, model
1203
+ device = targets.device
1204
+ lcls, lbox, lobj = torch.zeros(1, device=device), torch.zeros(1, device=device), torch.zeros(1, device=device)
1205
+ bs_aux, as_aux_, gjs_aux, gis_aux, targets_aux, anchors_aux = self.build_targets2(p[:self.nl], targets, imgs)
1206
+ bs, as_, gjs, gis, targets, anchors = self.build_targets(p[:self.nl], targets, imgs)
1207
+ pre_gen_gains_aux = [torch.tensor(pp.shape, device=device)[[3, 2, 3, 2]] for pp in p[:self.nl]]
1208
+ pre_gen_gains = [torch.tensor(pp.shape, device=device)[[3, 2, 3, 2]] for pp in p[:self.nl]]
1209
+
1210
+
1211
+ # Losses
1212
+ for i in range(self.nl): # layer index, layer predictions
1213
+ pi = p[i]
1214
+ pi_aux = p[i+self.nl]
1215
+ b, a, gj, gi = bs[i], as_[i], gjs[i], gis[i] # image, anchor, gridy, gridx
1216
+ b_aux, a_aux, gj_aux, gi_aux = bs_aux[i], as_aux_[i], gjs_aux[i], gis_aux[i] # image, anchor, gridy, gridx
1217
+ tobj = torch.zeros_like(pi[..., 0], device=device) # target obj
1218
+ tobj_aux = torch.zeros_like(pi_aux[..., 0], device=device) # target obj
1219
+
1220
+ n = b.shape[0] # number of targets
1221
+ if n:
1222
+ ps = pi[b, a, gj, gi] # prediction subset corresponding to targets
1223
+
1224
+ # Regression
1225
+ grid = torch.stack([gi, gj], dim=1)
1226
+ pxy = ps[:, :2].sigmoid() * 2. - 0.5
1227
+ pwh = (ps[:, 2:4].sigmoid() * 2) ** 2 * anchors[i]
1228
+ pbox = torch.cat((pxy, pwh), 1) # predicted box
1229
+ selected_tbox = targets[i][:, 2:6] * pre_gen_gains[i]
1230
+ selected_tbox[:, :2] -= grid
1231
+ iou = bbox_iou(pbox.T, selected_tbox, x1y1x2y2=False, CIoU=True) # iou(prediction, target)
1232
+ lbox += (1.0 - iou).mean() # iou loss
1233
+
1234
+ # Objectness
1235
+ tobj[b, a, gj, gi] = (1.0 - self.gr) + self.gr * iou.detach().clamp(0).type(tobj.dtype) # iou ratio
1236
+
1237
+ # Classification
1238
+ selected_tcls = targets[i][:, 1].long()
1239
+ if self.nc > 1: # cls loss (only if multiple classes)
1240
+ t = torch.full_like(ps[:, 5:], self.cn, device=device) # targets
1241
+ t[range(n), selected_tcls] = self.cp
1242
+ lcls += self.BCEcls(ps[:, 5:], t) # BCE
1243
+
1244
+ # Append targets to text file
1245
+ # with open('targets.txt', 'a') as file:
1246
+ # [file.write('%11.5g ' * 4 % tuple(x) + '\n') for x in torch.cat((txy[i], twh[i]), 1)]
1247
+
1248
+ n_aux = b_aux.shape[0] # number of targets
1249
+ if n_aux:
1250
+ ps_aux = pi_aux[b_aux, a_aux, gj_aux, gi_aux] # prediction subset corresponding to targets
1251
+ grid_aux = torch.stack([gi_aux, gj_aux], dim=1)
1252
+ pxy_aux = ps_aux[:, :2].sigmoid() * 2. - 0.5
1253
+ #pxy_aux = ps_aux[:, :2].sigmoid() * 3. - 1.
1254
+ pwh_aux = (ps_aux[:, 2:4].sigmoid() * 2) ** 2 * anchors_aux[i]
1255
+ pbox_aux = torch.cat((pxy_aux, pwh_aux), 1) # predicted box
1256
+ selected_tbox_aux = targets_aux[i][:, 2:6] * pre_gen_gains_aux[i]
1257
+ selected_tbox_aux[:, :2] -= grid_aux
1258
+ iou_aux = bbox_iou(pbox_aux.T, selected_tbox_aux, x1y1x2y2=False, CIoU=True) # iou(prediction, target)
1259
+ lbox += 0.25 * (1.0 - iou_aux).mean() # iou loss
1260
+
1261
+ # Objectness
1262
+ tobj_aux[b_aux, a_aux, gj_aux, gi_aux] = (1.0 - self.gr) + self.gr * iou_aux.detach().clamp(0).type(tobj_aux.dtype) # iou ratio
1263
+
1264
+ # Classification
1265
+ selected_tcls_aux = targets_aux[i][:, 1].long()
1266
+ if self.nc > 1: # cls loss (only if multiple classes)
1267
+ t_aux = torch.full_like(ps_aux[:, 5:], self.cn, device=device) # targets
1268
+ t_aux[range(n_aux), selected_tcls_aux] = self.cp
1269
+ lcls += 0.25 * self.BCEcls(ps_aux[:, 5:], t_aux) # BCE
1270
+
1271
+ obji = self.BCEobj(pi[..., 4], tobj)
1272
+ obji_aux = self.BCEobj(pi_aux[..., 4], tobj_aux)
1273
+ lobj += obji * self.balance[i] + 0.25 * obji_aux * self.balance[i] # obj loss
1274
+ if self.autobalance:
1275
+ self.balance[i] = self.balance[i] * 0.9999 + 0.0001 / obji.detach().item()
1276
+
1277
+ if self.autobalance:
1278
+ self.balance = [x / self.balance[self.ssi] for x in self.balance]
1279
+ lbox *= self.hyp['box']
1280
+ lobj *= self.hyp['obj']
1281
+ lcls *= self.hyp['cls']
1282
+ bs = tobj.shape[0] # batch size
1283
+
1284
+ loss = lbox + lobj + lcls
1285
+ return loss * bs, torch.cat((lbox, lobj, lcls, loss)).detach()
1286
+
1287
+ def build_targets(self, p, targets, imgs):
1288
+
1289
+ indices, anch = self.find_3_positive(p, targets)
1290
+
1291
+ matching_bs = [[] for pp in p]
1292
+ matching_as = [[] for pp in p]
1293
+ matching_gjs = [[] for pp in p]
1294
+ matching_gis = [[] for pp in p]
1295
+ matching_targets = [[] for pp in p]
1296
+ matching_anchs = [[] for pp in p]
1297
+
1298
+ nl = len(p)
1299
+
1300
+ for batch_idx in range(p[0].shape[0]):
1301
+
1302
+ b_idx = targets[:, 0]==batch_idx
1303
+ this_target = targets[b_idx]
1304
+ if this_target.shape[0] == 0:
1305
+ continue
1306
+
1307
+ txywh = this_target[:, 2:6] * imgs[batch_idx].shape[1]
1308
+ txyxy = xywh2xyxy(txywh)
1309
+
1310
+ pxyxys = []
1311
+ p_cls = []
1312
+ p_obj = []
1313
+ from_which_layer = []
1314
+ all_b = []
1315
+ all_a = []
1316
+ all_gj = []
1317
+ all_gi = []
1318
+ all_anch = []
1319
+
1320
+ for i, pi in enumerate(p):
1321
+
1322
+ b, a, gj, gi = indices[i]
1323
+ idx = (b == batch_idx)
1324
+ b, a, gj, gi = b[idx], a[idx], gj[idx], gi[idx]
1325
+ all_b.append(b)
1326
+ all_a.append(a)
1327
+ all_gj.append(gj)
1328
+ all_gi.append(gi)
1329
+ all_anch.append(anch[i][idx])
1330
+ from_which_layer.append(torch.ones(size=(len(b),)) * i)
1331
+
1332
+ fg_pred = pi[b, a, gj, gi]
1333
+ p_obj.append(fg_pred[:, 4:5])
1334
+ p_cls.append(fg_pred[:, 5:])
1335
+
1336
+ grid = torch.stack([gi, gj], dim=1)
1337
+ pxy = (fg_pred[:, :2].sigmoid() * 2. - 0.5 + grid) * self.stride[i] #/ 8.
1338
+ #pxy = (fg_pred[:, :2].sigmoid() * 3. - 1. + grid) * self.stride[i]
1339
+ pwh = (fg_pred[:, 2:4].sigmoid() * 2) ** 2 * anch[i][idx] * self.stride[i] #/ 8.
1340
+ pxywh = torch.cat([pxy, pwh], dim=-1)
1341
+ pxyxy = xywh2xyxy(pxywh)
1342
+ pxyxys.append(pxyxy)
1343
+
1344
+ pxyxys = torch.cat(pxyxys, dim=0)
1345
+ if pxyxys.shape[0] == 0:
1346
+ continue
1347
+ p_obj = torch.cat(p_obj, dim=0)
1348
+ p_cls = torch.cat(p_cls, dim=0)
1349
+ from_which_layer = torch.cat(from_which_layer, dim=0)
1350
+ all_b = torch.cat(all_b, dim=0)
1351
+ all_a = torch.cat(all_a, dim=0)
1352
+ all_gj = torch.cat(all_gj, dim=0)
1353
+ all_gi = torch.cat(all_gi, dim=0)
1354
+ all_anch = torch.cat(all_anch, dim=0)
1355
+
1356
+ pair_wise_iou = box_iou(txyxy, pxyxys)
1357
+
1358
+ pair_wise_iou_loss = -torch.log(pair_wise_iou + 1e-8)
1359
+
1360
+ top_k, _ = torch.topk(pair_wise_iou, min(20, pair_wise_iou.shape[1]), dim=1)
1361
+ dynamic_ks = torch.clamp(top_k.sum(1).int(), min=1)
1362
+
1363
+ gt_cls_per_image = (
1364
+ F.one_hot(this_target[:, 1].to(torch.int64), self.nc)
1365
+ .float()
1366
+ .unsqueeze(1)
1367
+ .repeat(1, pxyxys.shape[0], 1)
1368
+ )
1369
+
1370
+ num_gt = this_target.shape[0]
1371
+ cls_preds_ = (
1372
+ p_cls.float().unsqueeze(0).repeat(num_gt, 1, 1).sigmoid_()
1373
+ * p_obj.unsqueeze(0).repeat(num_gt, 1, 1).sigmoid_()
1374
+ )
1375
+
1376
+ y = cls_preds_.sqrt_()
1377
+ pair_wise_cls_loss = F.binary_cross_entropy_with_logits(
1378
+ torch.log(y/(1-y)) , gt_cls_per_image, reduction="none"
1379
+ ).sum(-1)
1380
+ del cls_preds_
1381
+
1382
+ cost = (
1383
+ pair_wise_cls_loss
1384
+ + 3.0 * pair_wise_iou_loss
1385
+ )
1386
+
1387
+ matching_matrix = torch.zeros_like(cost)
1388
+
1389
+ for gt_idx in range(num_gt):
1390
+ _, pos_idx = torch.topk(
1391
+ cost[gt_idx], k=dynamic_ks[gt_idx].item(), largest=False
1392
+ )
1393
+ matching_matrix[gt_idx][pos_idx] = 1.0
1394
+
1395
+ del top_k, dynamic_ks
1396
+ anchor_matching_gt = matching_matrix.sum(0)
1397
+ if (anchor_matching_gt > 1).sum() > 0:
1398
+ _, cost_argmin = torch.min(cost[:, anchor_matching_gt > 1], dim=0)
1399
+ matching_matrix[:, anchor_matching_gt > 1] *= 0.0
1400
+ matching_matrix[cost_argmin, anchor_matching_gt > 1] = 1.0
1401
+ fg_mask_inboxes = matching_matrix.sum(0) > 0.0
1402
+ matched_gt_inds = matching_matrix[:, fg_mask_inboxes].argmax(0)
1403
+
1404
+ from_which_layer = from_which_layer[fg_mask_inboxes]
1405
+ all_b = all_b[fg_mask_inboxes]
1406
+ all_a = all_a[fg_mask_inboxes]
1407
+ all_gj = all_gj[fg_mask_inboxes]
1408
+ all_gi = all_gi[fg_mask_inboxes]
1409
+ all_anch = all_anch[fg_mask_inboxes]
1410
+
1411
+ this_target = this_target[matched_gt_inds]
1412
+
1413
+ for i in range(nl):
1414
+ layer_idx = from_which_layer == i
1415
+ matching_bs[i].append(all_b[layer_idx])
1416
+ matching_as[i].append(all_a[layer_idx])
1417
+ matching_gjs[i].append(all_gj[layer_idx])
1418
+ matching_gis[i].append(all_gi[layer_idx])
1419
+ matching_targets[i].append(this_target[layer_idx])
1420
+ matching_anchs[i].append(all_anch[layer_idx])
1421
+
1422
+ for i in range(nl):
1423
+ if matching_targets[i] != []:
1424
+ matching_bs[i] = torch.cat(matching_bs[i], dim=0)
1425
+ matching_as[i] = torch.cat(matching_as[i], dim=0)
1426
+ matching_gjs[i] = torch.cat(matching_gjs[i], dim=0)
1427
+ matching_gis[i] = torch.cat(matching_gis[i], dim=0)
1428
+ matching_targets[i] = torch.cat(matching_targets[i], dim=0)
1429
+ matching_anchs[i] = torch.cat(matching_anchs[i], dim=0)
1430
+ else:
1431
+ matching_bs[i] = torch.tensor([], device='cuda:0', dtype=torch.int64)
1432
+ matching_as[i] = torch.tensor([], device='cuda:0', dtype=torch.int64)
1433
+ matching_gjs[i] = torch.tensor([], device='cuda:0', dtype=torch.int64)
1434
+ matching_gis[i] = torch.tensor([], device='cuda:0', dtype=torch.int64)
1435
+ matching_targets[i] = torch.tensor([], device='cuda:0', dtype=torch.int64)
1436
+ matching_anchs[i] = torch.tensor([], device='cuda:0', dtype=torch.int64)
1437
+
1438
+ return matching_bs, matching_as, matching_gjs, matching_gis, matching_targets, matching_anchs
1439
+
1440
+ def build_targets2(self, p, targets, imgs):
1441
+
1442
+ indices, anch = self.find_5_positive(p, targets)
1443
+
1444
+ matching_bs = [[] for pp in p]
1445
+ matching_as = [[] for pp in p]
1446
+ matching_gjs = [[] for pp in p]
1447
+ matching_gis = [[] for pp in p]
1448
+ matching_targets = [[] for pp in p]
1449
+ matching_anchs = [[] for pp in p]
1450
+
1451
+ nl = len(p)
1452
+
1453
+ for batch_idx in range(p[0].shape[0]):
1454
+
1455
+ b_idx = targets[:, 0]==batch_idx
1456
+ this_target = targets[b_idx]
1457
+ if this_target.shape[0] == 0:
1458
+ continue
1459
+
1460
+ txywh = this_target[:, 2:6] * imgs[batch_idx].shape[1]
1461
+ txyxy = xywh2xyxy(txywh)
1462
+
1463
+ pxyxys = []
1464
+ p_cls = []
1465
+ p_obj = []
1466
+ from_which_layer = []
1467
+ all_b = []
1468
+ all_a = []
1469
+ all_gj = []
1470
+ all_gi = []
1471
+ all_anch = []
1472
+
1473
+ for i, pi in enumerate(p):
1474
+
1475
+ b, a, gj, gi = indices[i]
1476
+ idx = (b == batch_idx)
1477
+ b, a, gj, gi = b[idx], a[idx], gj[idx], gi[idx]
1478
+ all_b.append(b)
1479
+ all_a.append(a)
1480
+ all_gj.append(gj)
1481
+ all_gi.append(gi)
1482
+ all_anch.append(anch[i][idx])
1483
+ from_which_layer.append(torch.ones(size=(len(b),)) * i)
1484
+
1485
+ fg_pred = pi[b, a, gj, gi]
1486
+ p_obj.append(fg_pred[:, 4:5])
1487
+ p_cls.append(fg_pred[:, 5:])
1488
+
1489
+ grid = torch.stack([gi, gj], dim=1)
1490
+ pxy = (fg_pred[:, :2].sigmoid() * 2. - 0.5 + grid) * self.stride[i] #/ 8.
1491
+ #pxy = (fg_pred[:, :2].sigmoid() * 3. - 1. + grid) * self.stride[i]
1492
+ pwh = (fg_pred[:, 2:4].sigmoid() * 2) ** 2 * anch[i][idx] * self.stride[i] #/ 8.
1493
+ pxywh = torch.cat([pxy, pwh], dim=-1)
1494
+ pxyxy = xywh2xyxy(pxywh)
1495
+ pxyxys.append(pxyxy)
1496
+
1497
+ pxyxys = torch.cat(pxyxys, dim=0)
1498
+ if pxyxys.shape[0] == 0:
1499
+ continue
1500
+ p_obj = torch.cat(p_obj, dim=0)
1501
+ p_cls = torch.cat(p_cls, dim=0)
1502
+ from_which_layer = torch.cat(from_which_layer, dim=0)
1503
+ all_b = torch.cat(all_b, dim=0)
1504
+ all_a = torch.cat(all_a, dim=0)
1505
+ all_gj = torch.cat(all_gj, dim=0)
1506
+ all_gi = torch.cat(all_gi, dim=0)
1507
+ all_anch = torch.cat(all_anch, dim=0)
1508
+
1509
+ pair_wise_iou = box_iou(txyxy, pxyxys)
1510
+
1511
+ pair_wise_iou_loss = -torch.log(pair_wise_iou + 1e-8)
1512
+
1513
+ top_k, _ = torch.topk(pair_wise_iou, min(20, pair_wise_iou.shape[1]), dim=1)
1514
+ dynamic_ks = torch.clamp(top_k.sum(1).int(), min=1)
1515
+
1516
+ gt_cls_per_image = (
1517
+ F.one_hot(this_target[:, 1].to(torch.int64), self.nc)
1518
+ .float()
1519
+ .unsqueeze(1)
1520
+ .repeat(1, pxyxys.shape[0], 1)
1521
+ )
1522
+
1523
+ num_gt = this_target.shape[0]
1524
+ cls_preds_ = (
1525
+ p_cls.float().unsqueeze(0).repeat(num_gt, 1, 1).sigmoid_()
1526
+ * p_obj.unsqueeze(0).repeat(num_gt, 1, 1).sigmoid_()
1527
+ )
1528
+
1529
+ y = cls_preds_.sqrt_()
1530
+ pair_wise_cls_loss = F.binary_cross_entropy_with_logits(
1531
+ torch.log(y/(1-y)) , gt_cls_per_image, reduction="none"
1532
+ ).sum(-1)
1533
+ del cls_preds_
1534
+
1535
+ cost = (
1536
+ pair_wise_cls_loss
1537
+ + 3.0 * pair_wise_iou_loss
1538
+ )
1539
+
1540
+ matching_matrix = torch.zeros_like(cost)
1541
+
1542
+ for gt_idx in range(num_gt):
1543
+ _, pos_idx = torch.topk(
1544
+ cost[gt_idx], k=dynamic_ks[gt_idx].item(), largest=False
1545
+ )
1546
+ matching_matrix[gt_idx][pos_idx] = 1.0
1547
+
1548
+ del top_k, dynamic_ks
1549
+ anchor_matching_gt = matching_matrix.sum(0)
1550
+ if (anchor_matching_gt > 1).sum() > 0:
1551
+ _, cost_argmin = torch.min(cost[:, anchor_matching_gt > 1], dim=0)
1552
+ matching_matrix[:, anchor_matching_gt > 1] *= 0.0
1553
+ matching_matrix[cost_argmin, anchor_matching_gt > 1] = 1.0
1554
+ fg_mask_inboxes = matching_matrix.sum(0) > 0.0
1555
+ matched_gt_inds = matching_matrix[:, fg_mask_inboxes].argmax(0)
1556
+
1557
+ from_which_layer = from_which_layer[fg_mask_inboxes]
1558
+ all_b = all_b[fg_mask_inboxes]
1559
+ all_a = all_a[fg_mask_inboxes]
1560
+ all_gj = all_gj[fg_mask_inboxes]
1561
+ all_gi = all_gi[fg_mask_inboxes]
1562
+ all_anch = all_anch[fg_mask_inboxes]
1563
+
1564
+ this_target = this_target[matched_gt_inds]
1565
+
1566
+ for i in range(nl):
1567
+ layer_idx = from_which_layer == i
1568
+ matching_bs[i].append(all_b[layer_idx])
1569
+ matching_as[i].append(all_a[layer_idx])
1570
+ matching_gjs[i].append(all_gj[layer_idx])
1571
+ matching_gis[i].append(all_gi[layer_idx])
1572
+ matching_targets[i].append(this_target[layer_idx])
1573
+ matching_anchs[i].append(all_anch[layer_idx])
1574
+
1575
+ for i in range(nl):
1576
+ if matching_targets[i] != []:
1577
+ matching_bs[i] = torch.cat(matching_bs[i], dim=0)
1578
+ matching_as[i] = torch.cat(matching_as[i], dim=0)
1579
+ matching_gjs[i] = torch.cat(matching_gjs[i], dim=0)
1580
+ matching_gis[i] = torch.cat(matching_gis[i], dim=0)
1581
+ matching_targets[i] = torch.cat(matching_targets[i], dim=0)
1582
+ matching_anchs[i] = torch.cat(matching_anchs[i], dim=0)
1583
+ else:
1584
+ matching_bs[i] = torch.tensor([], device='cuda:0', dtype=torch.int64)
1585
+ matching_as[i] = torch.tensor([], device='cuda:0', dtype=torch.int64)
1586
+ matching_gjs[i] = torch.tensor([], device='cuda:0', dtype=torch.int64)
1587
+ matching_gis[i] = torch.tensor([], device='cuda:0', dtype=torch.int64)
1588
+ matching_targets[i] = torch.tensor([], device='cuda:0', dtype=torch.int64)
1589
+ matching_anchs[i] = torch.tensor([], device='cuda:0', dtype=torch.int64)
1590
+
1591
+ return matching_bs, matching_as, matching_gjs, matching_gis, matching_targets, matching_anchs
1592
+
1593
+ def find_5_positive(self, p, targets):
1594
+ # Build targets for compute_loss(), input targets(image,class,x,y,w,h)
1595
+ na, nt = self.na, targets.shape[0] # number of anchors, targets
1596
+ indices, anch = [], []
1597
+ gain = torch.ones(7, device=targets.device).long() # normalized to gridspace gain
1598
+ ai = torch.arange(na, device=targets.device).float().view(na, 1).repeat(1, nt) # same as .repeat_interleave(nt)
1599
+ targets = torch.cat((targets.repeat(na, 1, 1), ai[:, :, None]), 2) # append anchor indices
1600
+
1601
+ g = 1.0 # bias
1602
+ off = torch.tensor([[0, 0],
1603
+ [1, 0], [0, 1], [-1, 0], [0, -1], # j,k,l,m
1604
+ # [1, 1], [1, -1], [-1, 1], [-1, -1], # jk,jm,lk,lm
1605
+ ], device=targets.device).float() * g # offsets
1606
+
1607
+ for i in range(self.nl):
1608
+ anchors = self.anchors[i]
1609
+ gain[2:6] = torch.tensor(p[i].shape)[[3, 2, 3, 2]] # xyxy gain
1610
+
1611
+ # Match targets to anchors
1612
+ t = targets * gain
1613
+ if nt:
1614
+ # Matches
1615
+ r = t[:, :, 4:6] / anchors[:, None] # wh ratio
1616
+ j = torch.max(r, 1. / r).max(2)[0] < self.hyp['anchor_t'] # compare
1617
+ # j = wh_iou(anchors, t[:, 4:6]) > model.hyp['iou_t'] # iou(3,n)=wh_iou(anchors(3,2), gwh(n,2))
1618
+ t = t[j] # filter
1619
+
1620
+ # Offsets
1621
+ gxy = t[:, 2:4] # grid xy
1622
+ gxi = gain[[2, 3]] - gxy # inverse
1623
+ j, k = ((gxy % 1. < g) & (gxy > 1.)).T
1624
+ l, m = ((gxi % 1. < g) & (gxi > 1.)).T
1625
+ j = torch.stack((torch.ones_like(j), j, k, l, m))
1626
+ t = t.repeat((5, 1, 1))[j]
1627
+ offsets = (torch.zeros_like(gxy)[None] + off[:, None])[j]
1628
+ else:
1629
+ t = targets[0]
1630
+ offsets = 0
1631
+
1632
+ # Define
1633
+ b, c = t[:, :2].long().T # image, class
1634
+ gxy = t[:, 2:4] # grid xy
1635
+ gwh = t[:, 4:6] # grid wh
1636
+ gij = (gxy - offsets).long()
1637
+ gi, gj = gij.T # grid xy indices
1638
+
1639
+ # Append
1640
+ a = t[:, 6].long() # anchor indices
1641
+ indices.append((b, a, gj.clamp_(0, gain[3] - 1), gi.clamp_(0, gain[2] - 1))) # image, anchor, grid indices
1642
+ anch.append(anchors[a]) # anchors
1643
+
1644
+ return indices, anch
1645
+
1646
+ def find_3_positive(self, p, targets):
1647
+ # Build targets for compute_loss(), input targets(image,class,x,y,w,h)
1648
+ na, nt = self.na, targets.shape[0] # number of anchors, targets
1649
+ indices, anch = [], []
1650
+ gain = torch.ones(7, device=targets.device).long() # normalized to gridspace gain
1651
+ ai = torch.arange(na, device=targets.device).float().view(na, 1).repeat(1, nt) # same as .repeat_interleave(nt)
1652
+ targets = torch.cat((targets.repeat(na, 1, 1), ai[:, :, None]), 2) # append anchor indices
1653
+
1654
+ g = 0.5 # bias
1655
+ off = torch.tensor([[0, 0],
1656
+ [1, 0], [0, 1], [-1, 0], [0, -1], # j,k,l,m
1657
+ # [1, 1], [1, -1], [-1, 1], [-1, -1], # jk,jm,lk,lm
1658
+ ], device=targets.device).float() * g # offsets
1659
+
1660
+ for i in range(self.nl):
1661
+ anchors = self.anchors[i]
1662
+ gain[2:6] = torch.tensor(p[i].shape)[[3, 2, 3, 2]] # xyxy gain
1663
+
1664
+ # Match targets to anchors
1665
+ t = targets * gain
1666
+ if nt:
1667
+ # Matches
1668
+ r = t[:, :, 4:6] / anchors[:, None] # wh ratio
1669
+ j = torch.max(r, 1. / r).max(2)[0] < self.hyp['anchor_t'] # compare
1670
+ # j = wh_iou(anchors, t[:, 4:6]) > model.hyp['iou_t'] # iou(3,n)=wh_iou(anchors(3,2), gwh(n,2))
1671
+ t = t[j] # filter
1672
+
1673
+ # Offsets
1674
+ gxy = t[:, 2:4] # grid xy
1675
+ gxi = gain[[2, 3]] - gxy # inverse
1676
+ j, k = ((gxy % 1. < g) & (gxy > 1.)).T
1677
+ l, m = ((gxi % 1. < g) & (gxi > 1.)).T
1678
+ j = torch.stack((torch.ones_like(j), j, k, l, m))
1679
+ t = t.repeat((5, 1, 1))[j]
1680
+ offsets = (torch.zeros_like(gxy)[None] + off[:, None])[j]
1681
+ else:
1682
+ t = targets[0]
1683
+ offsets = 0
1684
+
1685
+ # Define
1686
+ b, c = t[:, :2].long().T # image, class
1687
+ gxy = t[:, 2:4] # grid xy
1688
+ gwh = t[:, 4:6] # grid wh
1689
+ gij = (gxy - offsets).long()
1690
+ gi, gj = gij.T # grid xy indices
1691
+
1692
+ # Append
1693
+ a = t[:, 6].long() # anchor indices
1694
+ indices.append((b, a, gj.clamp_(0, gain[3] - 1), gi.clamp_(0, gain[2] - 1))) # image, anchor, grid indices
1695
+ anch.append(anchors[a]) # anchors
1696
+
1697
+ return indices, anch