|
|
|
|
|
|
|
import torch.nn as nn |
|
import torch.nn.functional as F |
|
|
|
__all__ = ['MobileNetV3', 'mobilenet_v3'] |
|
|
|
|
|
def conv_bn(inp, oup, stride, conv_layer=nn.Conv2d, norm_layer=nn.BatchNorm2d, nlin_layer=nn.ReLU): |
|
return nn.Sequential( |
|
conv_layer(inp, oup, 3, stride, 1, bias=False), |
|
norm_layer(oup), |
|
nlin_layer(inplace=True) |
|
) |
|
|
|
|
|
def conv_1x1_bn(inp, oup, conv_layer=nn.Conv2d, norm_layer=nn.BatchNorm2d, nlin_layer=nn.ReLU): |
|
return nn.Sequential( |
|
conv_layer(inp, oup, 1, 1, 0, bias=False), |
|
norm_layer(oup), |
|
nlin_layer(inplace=True) |
|
) |
|
|
|
|
|
class Hswish(nn.Module): |
|
def __init__(self, inplace=True): |
|
super(Hswish, self).__init__() |
|
self.inplace = inplace |
|
|
|
def forward(self, x): |
|
return x * F.relu6(x + 3., inplace=self.inplace) / 6. |
|
|
|
|
|
class Hsigmoid(nn.Module): |
|
def __init__(self, inplace=True): |
|
super(Hsigmoid, self).__init__() |
|
self.inplace = inplace |
|
|
|
def forward(self, x): |
|
return F.relu6(x + 3., inplace=self.inplace) / 6. |
|
|
|
|
|
class SEModule(nn.Module): |
|
def __init__(self, channel, reduction=4): |
|
super(SEModule, self).__init__() |
|
self.avg_pool = nn.AdaptiveAvgPool2d(1) |
|
self.fc = nn.Sequential( |
|
nn.Linear(channel, channel // reduction, bias=False), |
|
nn.ReLU(inplace=True), |
|
nn.Linear(channel // reduction, channel, bias=False), |
|
Hsigmoid() |
|
|
|
) |
|
|
|
def forward(self, x): |
|
b, c, _, _ = x.size() |
|
y = self.avg_pool(x).view(b, c) |
|
y = self.fc(y).view(b, c, 1, 1) |
|
return x * y.expand_as(x) |
|
|
|
|
|
class Identity(nn.Module): |
|
def __init__(self, channel): |
|
super(Identity, self).__init__() |
|
|
|
def forward(self, x): |
|
return x |
|
|
|
|
|
def make_divisible(x, divisible_by=8): |
|
import numpy as np |
|
return int(np.ceil(x * 1. / divisible_by) * divisible_by) |
|
|
|
|
|
class MobileBottleneck(nn.Module): |
|
def __init__(self, inp, oup, kernel, stride, exp, se=False, nl='RE'): |
|
super(MobileBottleneck, self).__init__() |
|
assert stride in [1, 2] |
|
assert kernel in [3, 5] |
|
padding = (kernel - 1) // 2 |
|
self.use_res_connect = stride == 1 and inp == oup |
|
|
|
conv_layer = nn.Conv2d |
|
norm_layer = nn.BatchNorm2d |
|
if nl == 'RE': |
|
nlin_layer = nn.ReLU |
|
elif nl == 'HS': |
|
nlin_layer = Hswish |
|
else: |
|
raise NotImplementedError |
|
if se: |
|
SELayer = SEModule |
|
else: |
|
SELayer = Identity |
|
|
|
self.conv = nn.Sequential( |
|
|
|
conv_layer(inp, exp, 1, 1, 0, bias=False), |
|
norm_layer(exp), |
|
nlin_layer(inplace=True), |
|
|
|
conv_layer(exp, exp, kernel, stride, padding, groups=exp, bias=False), |
|
norm_layer(exp), |
|
SELayer(exp), |
|
nlin_layer(inplace=True), |
|
|
|
conv_layer(exp, oup, 1, 1, 0, bias=False), |
|
norm_layer(oup), |
|
) |
|
|
|
def forward(self, x): |
|
if self.use_res_connect: |
|
return x + self.conv(x) |
|
else: |
|
return self.conv(x) |
|
|
|
|
|
class MobileNetV3(nn.Module): |
|
def __init__(self, widen_factor=1.0, num_classes=141, num_landmarks=136, input_size=120, mode='small'): |
|
super(MobileNetV3, self).__init__() |
|
input_channel = 16 |
|
last_channel = 1280 |
|
if mode == 'large': |
|
|
|
mobile_setting = [ |
|
|
|
[3, 16, 16, False, 'RE', 1], |
|
[3, 64, 24, False, 'RE', 2], |
|
[3, 72, 24, False, 'RE', 1], |
|
[5, 72, 40, True, 'RE', 2], |
|
[5, 120, 40, True, 'RE', 1], |
|
[5, 120, 40, True, 'RE', 1], |
|
[3, 240, 80, False, 'HS', 2], |
|
[3, 200, 80, False, 'HS', 1], |
|
[3, 184, 80, False, 'HS', 1], |
|
[3, 184, 80, False, 'HS', 1], |
|
[3, 480, 112, True, 'HS', 1], |
|
[3, 672, 112, True, 'HS', 1], |
|
[5, 672, 160, True, 'HS', 2], |
|
[5, 960, 160, True, 'HS', 1], |
|
[5, 960, 160, True, 'HS', 1], |
|
] |
|
elif mode == 'small': |
|
|
|
mobile_setting = [ |
|
|
|
[3, 16, 16, True, 'RE', 2], |
|
[3, 72, 24, False, 'RE', 2], |
|
[3, 88, 24, False, 'RE', 1], |
|
[5, 96, 40, True, 'HS', 2], |
|
[5, 240, 40, True, 'HS', 1], |
|
[5, 240, 40, True, 'HS', 1], |
|
[5, 120, 48, True, 'HS', 1], |
|
[5, 144, 48, True, 'HS', 1], |
|
[5, 288, 96, True, 'HS', 2], |
|
[5, 576, 96, True, 'HS', 1], |
|
[5, 576, 96, True, 'HS', 1], |
|
] |
|
else: |
|
raise NotImplementedError |
|
|
|
|
|
assert input_size % 32 == 0 |
|
last_channel = make_divisible(last_channel * widen_factor) if widen_factor > 1.0 else last_channel |
|
self.features = [conv_bn(3, input_channel, 2, nlin_layer=Hswish)] |
|
|
|
|
|
|
|
for k, exp, c, se, nl, s in mobile_setting: |
|
output_channel = make_divisible(c * widen_factor) |
|
exp_channel = make_divisible(exp * widen_factor) |
|
self.features.append(MobileBottleneck(input_channel, output_channel, k, s, exp_channel, se, nl)) |
|
input_channel = output_channel |
|
|
|
|
|
if mode == 'large': |
|
last_conv = make_divisible(960 * widen_factor) |
|
self.features.append(conv_1x1_bn(input_channel, last_conv, nlin_layer=Hswish)) |
|
self.features.append(nn.AdaptiveAvgPool2d(1)) |
|
self.features.append(nn.Conv2d(last_conv, last_channel, 1, 1, 0)) |
|
self.features.append(Hswish(inplace=True)) |
|
elif mode == 'small': |
|
last_conv = make_divisible(576 * widen_factor) |
|
self.features.append(conv_1x1_bn(input_channel, last_conv, nlin_layer=Hswish)) |
|
|
|
self.features.append(nn.AdaptiveAvgPool2d(1)) |
|
self.features.append(nn.Conv2d(last_conv, last_channel, 1, 1, 0)) |
|
self.features.append(Hswish(inplace=True)) |
|
else: |
|
raise NotImplementedError |
|
|
|
|
|
self.features = nn.Sequential(*self.features) |
|
|
|
|
|
self.fc = nn.Linear(int(last_channel), num_classes) |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
self._initialize_weights() |
|
|
|
def forward(self, x): |
|
x = self.features(x) |
|
x_share = x.mean(3).mean(2) |
|
|
|
|
|
|
|
|
|
|
|
|
|
xp = self.fc(x_share) |
|
|
|
return xp |
|
|
|
def _initialize_weights(self): |
|
|
|
for m in self.modules(): |
|
if isinstance(m, nn.Conv2d): |
|
nn.init.kaiming_normal_(m.weight, mode='fan_out') |
|
if m.bias is not None: |
|
nn.init.zeros_(m.bias) |
|
elif isinstance(m, nn.BatchNorm2d): |
|
nn.init.ones_(m.weight) |
|
nn.init.zeros_(m.bias) |
|
elif isinstance(m, nn.Linear): |
|
nn.init.normal_(m.weight, 0, 0.01) |
|
if m.bias is not None: |
|
nn.init.zeros_(m.bias) |
|
|
|
|
|
def mobilenet_v3(**kwargs): |
|
model = MobileNetV3( |
|
widen_factor=kwargs.get('widen_factor', 1.0), |
|
num_classes=kwargs.get('num_classes', 62), |
|
num_landmarks=kwargs.get('num_landmarks', 136), |
|
input_size=kwargs.get('size', 128), |
|
mode=kwargs.get('mode', 'small') |
|
) |
|
|
|
return model |
|
|