File size: 6,976 Bytes
c2ca15f |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 |
# Copyright (c) OpenMMLab. All rights reserved.
from typing import Optional
import torch
from mmdet.models.losses.utils import weighted_loss
from torch import Tensor
from torch import nn as nn
from mmdet3d.registry import MODELS
@weighted_loss
def uncertain_smooth_l1_loss(pred: Tensor,
target: Tensor,
sigma: Tensor,
alpha: float = 1.0,
beta: float = 1.0) -> Tensor:
"""Smooth L1 loss with uncertainty.
Args:
pred (Tensor): The prediction.
target (Tensor): The learning target of the prediction.
sigma (Tensor): The sigma for uncertainty.
alpha (float): The coefficient of log(sigma).
Defaults to 1.0.
beta (float): The threshold in the piecewise function.
Defaults to 1.0.
Returns:
Tensor: Calculated loss
"""
assert beta > 0
assert target.numel() > 0
assert pred.size() == target.size() == sigma.size(), 'The size of pred ' \
f'{pred.size()}, target {target.size()}, and sigma {sigma.size()} ' \
'are inconsistent.'
diff = torch.abs(pred - target)
loss = torch.where(diff < beta, 0.5 * diff * diff / beta,
diff - 0.5 * beta)
loss = torch.exp(-sigma) * loss + alpha * sigma
return loss
@weighted_loss
def uncertain_l1_loss(pred: Tensor,
target: Tensor,
sigma: Tensor,
alpha: float = 1.0) -> Tensor:
"""L1 loss with uncertainty.
Args:
pred (Tensor): The prediction.
target (Tensor): The learning target of the prediction.
sigma (Tensor): The sigma for uncertainty.
alpha (float): The coefficient of log(sigma).
Defaults to 1.0.
Returns:
Tensor: Calculated loss
"""
assert target.numel() > 0
assert pred.size() == target.size() == sigma.size(), 'The size of pred ' \
f'{pred.size()}, target {target.size()}, and sigma {sigma.size()} ' \
'are inconsistent.'
loss = torch.abs(pred - target)
loss = torch.exp(-sigma) * loss + alpha * sigma
return loss
@MODELS.register_module()
class UncertainSmoothL1Loss(nn.Module):
r"""Smooth L1 loss with uncertainty.
Please refer to `PGD <https://arxiv.org/abs/2107.14160>`_ and
`Multi-Task Learning Using Uncertainty to Weigh Losses for Scene Geometry
and Semantics <https://arxiv.org/abs/1705.07115>`_ for more details.
Args:
alpha (float): The coefficient of log(sigma).
Defaults to 1.0.
beta (float): The threshold in the piecewise function.
Defaults to 1.0.
reduction (str): The method to reduce the loss.
Options are 'none', 'mean' and 'sum'. Defaults to 'mean'.
loss_weight (float): The weight of loss. Defaults to 1.0
"""
def __init__(self,
alpha: float = 1.0,
beta: float = 1.0,
reduction: str = 'mean',
loss_weight: float = 1.0) -> None:
super(UncertainSmoothL1Loss, self).__init__()
assert reduction in ['none', 'sum', 'mean']
self.alpha = alpha
self.beta = beta
self.reduction = reduction
self.loss_weight = loss_weight
def forward(self,
pred: Tensor,
target: Tensor,
sigma: Tensor,
weight: Optional[Tensor] = None,
avg_factor: Optional[float] = None,
reduction_override: Optional[str] = None,
**kwargs) -> Tensor:
"""Forward function.
Args:
pred (Tensor): The prediction.
target (Tensor): The learning target of the prediction.
sigma (Tensor): The sigma for uncertainty.
weight (Tensor, optional): The weight of loss for each
prediction. Defaults to None.
avg_factor (float, optional): Average factor that is used to
average the loss. Defaults to None.
reduction_override (str, optional): The reduction method used to
override the original reduction method of the loss.
Defaults to None.
Returns:
Tensor: Calculated loss
"""
assert reduction_override in (None, 'none', 'mean', 'sum')
reduction = (
reduction_override if reduction_override else self.reduction)
loss_bbox = self.loss_weight * uncertain_smooth_l1_loss(
pred,
target,
weight,
sigma=sigma,
alpha=self.alpha,
beta=self.beta,
reduction=reduction,
avg_factor=avg_factor,
**kwargs)
return loss_bbox
@MODELS.register_module()
class UncertainL1Loss(nn.Module):
"""L1 loss with uncertainty.
Args:
alpha (float): The coefficient of log(sigma).
Defaults to 1.0.
reduction (str): The method to reduce the loss.
Options are 'none', 'mean' and 'sum'. Defaults to 'mean'.
loss_weight (float): The weight of loss. Defaults to 1.0.
"""
def __init__(self,
alpha: float = 1.0,
reduction: str = 'mean',
loss_weight: float = 1.0) -> None:
super(UncertainL1Loss, self).__init__()
assert reduction in ['none', 'sum', 'mean']
self.alpha = alpha
self.reduction = reduction
self.loss_weight = loss_weight
def forward(self,
pred: Tensor,
target: Tensor,
sigma: Tensor,
weight: Optional[Tensor] = None,
avg_factor: Optional[float] = None,
reduction_override: Optional[str] = None) -> Tensor:
"""Forward function.
Args:
pred (Tensor): The prediction.
target (Tensor): The learning target of the prediction.
sigma (Tensor): The sigma for uncertainty.
weight (Tensor, optional): The weight of loss for each
prediction. Defaults to None.
avg_factor (float, optional): Average factor that is used to
average the loss. Defaults to None.
reduction_override (str, optional): The reduction method used to
override the original reduction method of the loss.
Defaults to None.
Returns:
Tensor: Calculated loss
"""
assert reduction_override in (None, 'none', 'mean', 'sum')
reduction = (
reduction_override if reduction_override else self.reduction)
loss_bbox = self.loss_weight * uncertain_l1_loss(
pred,
target,
weight,
sigma=sigma,
alpha=self.alpha,
reduction=reduction,
avg_factor=avg_factor)
return loss_bbox
|