Spaces:
Running
on
Zero
Running
on
Zero
# Copyright (c) Meta Platforms, Inc. and affiliates. | |
# All rights reserved. | |
# | |
# This source code is licensed under the license found in the | |
# LICENSE file in the root directory of this source tree. | |
from typing import Optional, Tuple | |
import torch.nn.functional as F | |
from torch import Tensor | |
from mmdet.registry import MODELS | |
from .fpn import FPN | |
class FPN_DropBlock(FPN): | |
def __init__(self, | |
*args, | |
plugin: Optional[dict] = dict( | |
type='DropBlock', | |
drop_prob=0.3, | |
block_size=3, | |
warmup_iters=0), | |
**kwargs) -> None: | |
super().__init__(*args, **kwargs) | |
self.plugin = None | |
if plugin is not None: | |
self.plugin = MODELS.build(plugin) | |
def forward(self, inputs: Tuple[Tensor]) -> tuple: | |
"""Forward function. | |
Args: | |
inputs (tuple[Tensor]): Features from the upstream network, each | |
is a 4D-tensor. | |
Returns: | |
tuple: Feature maps, each is a 4D-tensor. | |
""" | |
assert len(inputs) == len(self.in_channels) | |
# build laterals | |
laterals = [ | |
lateral_conv(inputs[i + self.start_level]) | |
for i, lateral_conv in enumerate(self.lateral_convs) | |
] | |
# build top-down path | |
used_backbone_levels = len(laterals) | |
for i in range(used_backbone_levels - 1, 0, -1): | |
# In some cases, fixing `scale factor` (e.g. 2) is preferred, but | |
# it cannot co-exist with `size` in `F.interpolate`. | |
if 'scale_factor' in self.upsample_cfg: | |
# fix runtime error of "+=" inplace operation in PyTorch 1.10 | |
laterals[i - 1] = laterals[i - 1] + F.interpolate( | |
laterals[i], **self.upsample_cfg) | |
else: | |
prev_shape = laterals[i - 1].shape[2:] | |
laterals[i - 1] = laterals[i - 1] + F.interpolate( | |
laterals[i], size=prev_shape, **self.upsample_cfg) | |
if self.plugin is not None: | |
laterals[i - 1] = self.plugin(laterals[i - 1]) | |
# build outputs | |
# part 1: from original levels | |
outs = [ | |
self.fpn_convs[i](laterals[i]) for i in range(used_backbone_levels) | |
] | |
# part 2: add extra levels | |
if self.num_outs > len(outs): | |
# use max pool to get more levels on top of outputs | |
# (e.g., Faster R-CNN, Mask R-CNN) | |
if not self.add_extra_convs: | |
for i in range(self.num_outs - used_backbone_levels): | |
outs.append(F.max_pool2d(outs[-1], 1, stride=2)) | |
# add conv layers on top of original feature maps (RetinaNet) | |
else: | |
if self.add_extra_convs == 'on_input': | |
extra_source = inputs[self.backbone_end_level - 1] | |
elif self.add_extra_convs == 'on_lateral': | |
extra_source = laterals[-1] | |
elif self.add_extra_convs == 'on_output': | |
extra_source = outs[-1] | |
else: | |
raise NotImplementedError | |
outs.append(self.fpn_convs[used_backbone_levels](extra_source)) | |
for i in range(used_backbone_levels + 1, self.num_outs): | |
if self.relu_before_extra_convs: | |
outs.append(self.fpn_convs[i](F.relu(outs[-1]))) | |
else: | |
outs.append(self.fpn_convs[i](outs[-1])) | |
return tuple(outs) | |