Add files using upload-large-folder tool
Browse filesThis view is limited to 50 files because it contains too many changes.
See raw diff
- pytorch-image-models/timm/layers/__pycache__/activations_me.cpython-39.pyc +0 -0
- pytorch-image-models/timm/layers/__pycache__/attention_pool2d.cpython-39.pyc +0 -0
- pytorch-image-models/timm/layers/__pycache__/blur_pool.cpython-39.pyc +0 -0
- pytorch-image-models/timm/layers/__pycache__/conv_bn_act.cpython-39.pyc +0 -0
- pytorch-image-models/timm/layers/__pycache__/create_attn.cpython-39.pyc +0 -0
- pytorch-image-models/timm/layers/__pycache__/drop.cpython-39.pyc +0 -0
- pytorch-image-models/timm/layers/__pycache__/eca.cpython-39.pyc +0 -0
- pytorch-image-models/timm/layers/__pycache__/gather_excite.cpython-39.pyc +0 -0
- pytorch-image-models/timm/layers/__pycache__/grid.cpython-39.pyc +0 -0
- pytorch-image-models/timm/layers/__pycache__/halo_attn.cpython-39.pyc +0 -0
- pytorch-image-models/timm/layers/__pycache__/inplace_abn.cpython-39.pyc +0 -0
- pytorch-image-models/timm/layers/__pycache__/lambda_layer.cpython-39.pyc +0 -0
- pytorch-image-models/timm/layers/__pycache__/layer_scale.cpython-39.pyc +0 -0
- pytorch-image-models/timm/layers/__pycache__/non_local_attn.cpython-39.pyc +0 -0
- pytorch-image-models/timm/layers/__pycache__/pool2d_same.cpython-39.pyc +0 -0
- pytorch-image-models/timm/layers/__pycache__/pos_embed_sincos.cpython-39.pyc +0 -0
- pytorch-image-models/timm/layers/__pycache__/selective_kernel.cpython-39.pyc +0 -0
- pytorch-image-models/timm/layers/__pycache__/split_attn.cpython-39.pyc +0 -0
- pytorch-image-models/timm/layers/__pycache__/squeeze_excite.cpython-39.pyc +0 -0
- pytorch-image-models/timm/layers/__pycache__/trace_utils.cpython-39.pyc +0 -0
- pytorch-image-models/timm/layers/__pycache__/typing.cpython-39.pyc +0 -0
- pytorch-image-models/timm/layers/__pycache__/weight_init.cpython-39.pyc +0 -0
- pytorch-image-models/timm/layers/create_norm.py +58 -0
- pytorch-image-models/timm/layers/create_norm_act.py +95 -0
- pytorch-image-models/timm/layers/drop.py +182 -0
- pytorch-image-models/timm/layers/eca.py +145 -0
- pytorch-image-models/timm/layers/grid.py +49 -0
- pytorch-image-models/timm/layers/grn.py +39 -0
- pytorch-image-models/timm/layers/helpers.py +43 -0
- pytorch-image-models/timm/layers/interpolate.py +68 -0
- pytorch-image-models/timm/layers/linear.py +19 -0
- pytorch-image-models/timm/layers/median_pool.py +49 -0
- pytorch-image-models/timm/loss/__init__.py +4 -0
- pytorch-image-models/timm/loss/__pycache__/__init__.cpython-39.pyc +0 -0
- pytorch-image-models/timm/loss/__pycache__/asymmetric_loss.cpython-39.pyc +0 -0
- pytorch-image-models/timm/loss/__pycache__/binary_cross_entropy.cpython-39.pyc +0 -0
- pytorch-image-models/timm/loss/__pycache__/cross_entropy.cpython-39.pyc +0 -0
- pytorch-image-models/timm/loss/__pycache__/jsd.cpython-39.pyc +0 -0
- pytorch-image-models/timm/loss/binary_cross_entropy.py +65 -0
- pytorch-image-models/timm/loss/cross_entropy.py +36 -0
- pytorch-image-models/timm/loss/jsd.py +39 -0
- pytorch-image-models/timm/models/__init__.py +100 -0
- pytorch-image-models/timm/models/_builder.py +482 -0
- pytorch-image-models/timm/models/_efficientnet_blocks.py +702 -0
- pytorch-image-models/timm/models/_factory.py +137 -0
- pytorch-image-models/timm/models/_features.py +484 -0
- pytorch-image-models/timm/models/_features_fx.py +179 -0
- pytorch-image-models/timm/models/_hub.py +465 -0
- pytorch-image-models/timm/models/_registry.py +352 -0
- pytorch-image-models/timm/models/beit.py +716 -0
pytorch-image-models/timm/layers/__pycache__/activations_me.cpython-39.pyc
ADDED
Binary file (8.6 kB). View file
|
|
pytorch-image-models/timm/layers/__pycache__/attention_pool2d.cpython-39.pyc
ADDED
Binary file (8.67 kB). View file
|
|
pytorch-image-models/timm/layers/__pycache__/blur_pool.cpython-39.pyc
ADDED
Binary file (3.09 kB). View file
|
|
pytorch-image-models/timm/layers/__pycache__/conv_bn_act.cpython-39.pyc
ADDED
Binary file (2.39 kB). View file
|
|
pytorch-image-models/timm/layers/__pycache__/create_attn.cpython-39.pyc
ADDED
Binary file (1.94 kB). View file
|
|
pytorch-image-models/timm/layers/__pycache__/drop.cpython-39.pyc
ADDED
Binary file (5.98 kB). View file
|
|
pytorch-image-models/timm/layers/__pycache__/eca.cpython-39.pyc
ADDED
Binary file (6.07 kB). View file
|
|
pytorch-image-models/timm/layers/__pycache__/gather_excite.cpython-39.pyc
ADDED
Binary file (3.02 kB). View file
|
|
pytorch-image-models/timm/layers/__pycache__/grid.cpython-39.pyc
ADDED
Binary file (1.56 kB). View file
|
|
pytorch-image-models/timm/layers/__pycache__/halo_attn.cpython-39.pyc
ADDED
Binary file (7.44 kB). View file
|
|
pytorch-image-models/timm/layers/__pycache__/inplace_abn.cpython-39.pyc
ADDED
Binary file (3.11 kB). View file
|
|
pytorch-image-models/timm/layers/__pycache__/lambda_layer.cpython-39.pyc
ADDED
Binary file (5.45 kB). View file
|
|
pytorch-image-models/timm/layers/__pycache__/layer_scale.cpython-39.pyc
ADDED
Binary file (1.63 kB). View file
|
|
pytorch-image-models/timm/layers/__pycache__/non_local_attn.cpython-39.pyc
ADDED
Binary file (5.63 kB). View file
|
|
pytorch-image-models/timm/layers/__pycache__/pool2d_same.cpython-39.pyc
ADDED
Binary file (3.05 kB). View file
|
|
pytorch-image-models/timm/layers/__pycache__/pos_embed_sincos.cpython-39.pyc
ADDED
Binary file (11.3 kB). View file
|
|
pytorch-image-models/timm/layers/__pycache__/selective_kernel.cpython-39.pyc
ADDED
Binary file (5.53 kB). View file
|
|
pytorch-image-models/timm/layers/__pycache__/split_attn.cpython-39.pyc
ADDED
Binary file (2.96 kB). View file
|
|
pytorch-image-models/timm/layers/__pycache__/squeeze_excite.cpython-39.pyc
ADDED
Binary file (3.9 kB). View file
|
|
pytorch-image-models/timm/layers/__pycache__/trace_utils.cpython-39.pyc
ADDED
Binary file (648 Bytes). View file
|
|
pytorch-image-models/timm/layers/__pycache__/typing.cpython-39.pyc
ADDED
Binary file (331 Bytes). View file
|
|
pytorch-image-models/timm/layers/__pycache__/weight_init.cpython-39.pyc
ADDED
Binary file (5.27 kB). View file
|
|
pytorch-image-models/timm/layers/create_norm.py
ADDED
@@ -0,0 +1,58 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
""" Norm Layer Factory
|
2 |
+
|
3 |
+
Create norm modules by string (to mirror create_act and creat_norm-act fns)
|
4 |
+
|
5 |
+
Copyright 2022 Ross Wightman
|
6 |
+
"""
|
7 |
+
import functools
|
8 |
+
import types
|
9 |
+
from typing import Type
|
10 |
+
|
11 |
+
import torch.nn as nn
|
12 |
+
|
13 |
+
from .norm import GroupNorm, GroupNorm1, LayerNorm, LayerNorm2d, RmsNorm, RmsNorm2d
|
14 |
+
from torchvision.ops.misc import FrozenBatchNorm2d
|
15 |
+
|
16 |
+
_NORM_MAP = dict(
|
17 |
+
batchnorm=nn.BatchNorm2d,
|
18 |
+
batchnorm2d=nn.BatchNorm2d,
|
19 |
+
batchnorm1d=nn.BatchNorm1d,
|
20 |
+
groupnorm=GroupNorm,
|
21 |
+
groupnorm1=GroupNorm1,
|
22 |
+
layernorm=LayerNorm,
|
23 |
+
layernorm2d=LayerNorm2d,
|
24 |
+
rmsnorm=RmsNorm,
|
25 |
+
rmsnorm2d=RmsNorm2d,
|
26 |
+
frozenbatchnorm2d=FrozenBatchNorm2d,
|
27 |
+
)
|
28 |
+
_NORM_TYPES = {m for n, m in _NORM_MAP.items()}
|
29 |
+
|
30 |
+
|
31 |
+
def create_norm_layer(layer_name, num_features, **kwargs):
|
32 |
+
layer = get_norm_layer(layer_name)
|
33 |
+
layer_instance = layer(num_features, **kwargs)
|
34 |
+
return layer_instance
|
35 |
+
|
36 |
+
|
37 |
+
def get_norm_layer(norm_layer):
|
38 |
+
if norm_layer is None:
|
39 |
+
return None
|
40 |
+
assert isinstance(norm_layer, (type, str, types.FunctionType, functools.partial))
|
41 |
+
norm_kwargs = {}
|
42 |
+
|
43 |
+
# unbind partial fn, so args can be rebound later
|
44 |
+
if isinstance(norm_layer, functools.partial):
|
45 |
+
norm_kwargs.update(norm_layer.keywords)
|
46 |
+
norm_layer = norm_layer.func
|
47 |
+
|
48 |
+
if isinstance(norm_layer, str):
|
49 |
+
if not norm_layer:
|
50 |
+
return None
|
51 |
+
layer_name = norm_layer.replace('_', '').lower()
|
52 |
+
norm_layer = _NORM_MAP[layer_name]
|
53 |
+
else:
|
54 |
+
norm_layer = norm_layer
|
55 |
+
|
56 |
+
if norm_kwargs:
|
57 |
+
norm_layer = functools.partial(norm_layer, **norm_kwargs) # bind/rebind args
|
58 |
+
return norm_layer
|
pytorch-image-models/timm/layers/create_norm_act.py
ADDED
@@ -0,0 +1,95 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
""" NormAct (Normalizaiton + Activation Layer) Factory
|
2 |
+
|
3 |
+
Create norm + act combo modules that attempt to be backwards compatible with separate norm + act
|
4 |
+
isntances in models. Where these are used it will be possible to swap separate BN + act layers with
|
5 |
+
combined modules like IABN or EvoNorms.
|
6 |
+
|
7 |
+
Hacked together by / Copyright 2020 Ross Wightman
|
8 |
+
"""
|
9 |
+
import types
|
10 |
+
import functools
|
11 |
+
|
12 |
+
from .evo_norm import *
|
13 |
+
from .filter_response_norm import FilterResponseNormAct2d, FilterResponseNormTlu2d
|
14 |
+
from .norm_act import BatchNormAct2d, GroupNormAct, LayerNormAct, LayerNormAct2d
|
15 |
+
from .inplace_abn import InplaceAbn
|
16 |
+
|
17 |
+
_NORM_ACT_MAP = dict(
|
18 |
+
batchnorm=BatchNormAct2d,
|
19 |
+
batchnorm2d=BatchNormAct2d,
|
20 |
+
groupnorm=GroupNormAct,
|
21 |
+
groupnorm1=functools.partial(GroupNormAct, num_groups=1),
|
22 |
+
layernorm=LayerNormAct,
|
23 |
+
layernorm2d=LayerNormAct2d,
|
24 |
+
evonormb0=EvoNorm2dB0,
|
25 |
+
evonormb1=EvoNorm2dB1,
|
26 |
+
evonormb2=EvoNorm2dB2,
|
27 |
+
evonorms0=EvoNorm2dS0,
|
28 |
+
evonorms0a=EvoNorm2dS0a,
|
29 |
+
evonorms1=EvoNorm2dS1,
|
30 |
+
evonorms1a=EvoNorm2dS1a,
|
31 |
+
evonorms2=EvoNorm2dS2,
|
32 |
+
evonorms2a=EvoNorm2dS2a,
|
33 |
+
frn=FilterResponseNormAct2d,
|
34 |
+
frntlu=FilterResponseNormTlu2d,
|
35 |
+
inplaceabn=InplaceAbn,
|
36 |
+
iabn=InplaceAbn,
|
37 |
+
)
|
38 |
+
_NORM_ACT_TYPES = {m for n, m in _NORM_ACT_MAP.items()}
|
39 |
+
# has act_layer arg to define act type
|
40 |
+
_NORM_ACT_REQUIRES_ARG = {
|
41 |
+
BatchNormAct2d, GroupNormAct, LayerNormAct, LayerNormAct2d, FilterResponseNormAct2d, InplaceAbn}
|
42 |
+
|
43 |
+
|
44 |
+
def create_norm_act_layer(layer_name, num_features, act_layer=None, apply_act=True, jit=False, **kwargs):
|
45 |
+
layer = get_norm_act_layer(layer_name, act_layer=act_layer)
|
46 |
+
layer_instance = layer(num_features, apply_act=apply_act, **kwargs)
|
47 |
+
if jit:
|
48 |
+
layer_instance = torch.jit.script(layer_instance)
|
49 |
+
return layer_instance
|
50 |
+
|
51 |
+
|
52 |
+
def get_norm_act_layer(norm_layer, act_layer=None):
|
53 |
+
if norm_layer is None:
|
54 |
+
return None
|
55 |
+
assert isinstance(norm_layer, (type, str, types.FunctionType, functools.partial))
|
56 |
+
assert act_layer is None or isinstance(act_layer, (type, str, types.FunctionType, functools.partial))
|
57 |
+
norm_act_kwargs = {}
|
58 |
+
|
59 |
+
# unbind partial fn, so args can be rebound later
|
60 |
+
if isinstance(norm_layer, functools.partial):
|
61 |
+
norm_act_kwargs.update(norm_layer.keywords)
|
62 |
+
norm_layer = norm_layer.func
|
63 |
+
|
64 |
+
if isinstance(norm_layer, str):
|
65 |
+
if not norm_layer:
|
66 |
+
return None
|
67 |
+
layer_name = norm_layer.replace('_', '').lower().split('-')[0]
|
68 |
+
norm_act_layer = _NORM_ACT_MAP[layer_name]
|
69 |
+
elif norm_layer in _NORM_ACT_TYPES:
|
70 |
+
norm_act_layer = norm_layer
|
71 |
+
elif isinstance(norm_layer, types.FunctionType):
|
72 |
+
# if function type, must be a lambda/fn that creates a norm_act layer
|
73 |
+
norm_act_layer = norm_layer
|
74 |
+
else:
|
75 |
+
type_name = norm_layer.__name__.lower()
|
76 |
+
if type_name.startswith('batchnorm'):
|
77 |
+
norm_act_layer = BatchNormAct2d
|
78 |
+
elif type_name.startswith('groupnorm'):
|
79 |
+
norm_act_layer = GroupNormAct
|
80 |
+
elif type_name.startswith('groupnorm1'):
|
81 |
+
norm_act_layer = functools.partial(GroupNormAct, num_groups=1)
|
82 |
+
elif type_name.startswith('layernorm2d'):
|
83 |
+
norm_act_layer = LayerNormAct2d
|
84 |
+
elif type_name.startswith('layernorm'):
|
85 |
+
norm_act_layer = LayerNormAct
|
86 |
+
else:
|
87 |
+
assert False, f"No equivalent norm_act layer for {type_name}"
|
88 |
+
|
89 |
+
if norm_act_layer in _NORM_ACT_REQUIRES_ARG:
|
90 |
+
# pass `act_layer` through for backwards compat where `act_layer=None` implies no activation.
|
91 |
+
# In the future, may force use of `apply_act` with `act_layer` arg bound to relevant NormAct types
|
92 |
+
norm_act_kwargs.setdefault('act_layer', act_layer)
|
93 |
+
if norm_act_kwargs:
|
94 |
+
norm_act_layer = functools.partial(norm_act_layer, **norm_act_kwargs) # bind/rebind args
|
95 |
+
return norm_act_layer
|
pytorch-image-models/timm/layers/drop.py
ADDED
@@ -0,0 +1,182 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
""" DropBlock, DropPath
|
2 |
+
|
3 |
+
PyTorch implementations of DropBlock and DropPath (Stochastic Depth) regularization layers.
|
4 |
+
|
5 |
+
Papers:
|
6 |
+
DropBlock: A regularization method for convolutional networks (https://arxiv.org/abs/1810.12890)
|
7 |
+
|
8 |
+
Deep Networks with Stochastic Depth (https://arxiv.org/abs/1603.09382)
|
9 |
+
|
10 |
+
Code:
|
11 |
+
DropBlock impl inspired by two Tensorflow impl that I liked:
|
12 |
+
- https://github.com/tensorflow/tpu/blob/master/models/official/resnet/resnet_model.py#L74
|
13 |
+
- https://github.com/clovaai/assembled-cnn/blob/master/nets/blocks.py
|
14 |
+
|
15 |
+
Hacked together by / Copyright 2020 Ross Wightman
|
16 |
+
"""
|
17 |
+
import torch
|
18 |
+
import torch.nn as nn
|
19 |
+
import torch.nn.functional as F
|
20 |
+
|
21 |
+
from .grid import ndgrid
|
22 |
+
|
23 |
+
|
24 |
+
def drop_block_2d(
|
25 |
+
x,
|
26 |
+
drop_prob: float = 0.1,
|
27 |
+
block_size: int = 7,
|
28 |
+
gamma_scale: float = 1.0,
|
29 |
+
with_noise: bool = False,
|
30 |
+
inplace: bool = False,
|
31 |
+
batchwise: bool = False
|
32 |
+
):
|
33 |
+
""" DropBlock. See https://arxiv.org/pdf/1810.12890.pdf
|
34 |
+
|
35 |
+
DropBlock with an experimental gaussian noise option. This layer has been tested on a few training
|
36 |
+
runs with success, but needs further validation and possibly optimization for lower runtime impact.
|
37 |
+
"""
|
38 |
+
B, C, H, W = x.shape
|
39 |
+
total_size = W * H
|
40 |
+
clipped_block_size = min(block_size, min(W, H))
|
41 |
+
# seed_drop_rate, the gamma parameter
|
42 |
+
gamma = gamma_scale * drop_prob * total_size / clipped_block_size ** 2 / (
|
43 |
+
(W - block_size + 1) * (H - block_size + 1))
|
44 |
+
|
45 |
+
# Forces the block to be inside the feature map.
|
46 |
+
w_i, h_i = ndgrid(torch.arange(W, device=x.device), torch.arange(H, device=x.device))
|
47 |
+
valid_block = ((w_i >= clipped_block_size // 2) & (w_i < W - (clipped_block_size - 1) // 2)) & \
|
48 |
+
((h_i >= clipped_block_size // 2) & (h_i < H - (clipped_block_size - 1) // 2))
|
49 |
+
valid_block = torch.reshape(valid_block, (1, 1, H, W)).to(dtype=x.dtype)
|
50 |
+
|
51 |
+
if batchwise:
|
52 |
+
# one mask for whole batch, quite a bit faster
|
53 |
+
uniform_noise = torch.rand((1, C, H, W), dtype=x.dtype, device=x.device)
|
54 |
+
else:
|
55 |
+
uniform_noise = torch.rand_like(x)
|
56 |
+
block_mask = ((2 - gamma - valid_block + uniform_noise) >= 1).to(dtype=x.dtype)
|
57 |
+
block_mask = -F.max_pool2d(
|
58 |
+
-block_mask,
|
59 |
+
kernel_size=clipped_block_size, # block_size,
|
60 |
+
stride=1,
|
61 |
+
padding=clipped_block_size // 2)
|
62 |
+
|
63 |
+
if with_noise:
|
64 |
+
normal_noise = torch.randn((1, C, H, W), dtype=x.dtype, device=x.device) if batchwise else torch.randn_like(x)
|
65 |
+
if inplace:
|
66 |
+
x.mul_(block_mask).add_(normal_noise * (1 - block_mask))
|
67 |
+
else:
|
68 |
+
x = x * block_mask + normal_noise * (1 - block_mask)
|
69 |
+
else:
|
70 |
+
normalize_scale = (block_mask.numel() / block_mask.to(dtype=torch.float32).sum().add(1e-7)).to(x.dtype)
|
71 |
+
if inplace:
|
72 |
+
x.mul_(block_mask * normalize_scale)
|
73 |
+
else:
|
74 |
+
x = x * block_mask * normalize_scale
|
75 |
+
return x
|
76 |
+
|
77 |
+
|
78 |
+
def drop_block_fast_2d(
|
79 |
+
x: torch.Tensor,
|
80 |
+
drop_prob: float = 0.1,
|
81 |
+
block_size: int = 7,
|
82 |
+
gamma_scale: float = 1.0,
|
83 |
+
with_noise: bool = False,
|
84 |
+
inplace: bool = False,
|
85 |
+
):
|
86 |
+
""" DropBlock. See https://arxiv.org/pdf/1810.12890.pdf
|
87 |
+
|
88 |
+
DropBlock with an experimental gaussian noise option. Simplied from above without concern for valid
|
89 |
+
block mask at edges.
|
90 |
+
"""
|
91 |
+
B, C, H, W = x.shape
|
92 |
+
total_size = W * H
|
93 |
+
clipped_block_size = min(block_size, min(W, H))
|
94 |
+
gamma = gamma_scale * drop_prob * total_size / clipped_block_size ** 2 / (
|
95 |
+
(W - block_size + 1) * (H - block_size + 1))
|
96 |
+
|
97 |
+
block_mask = torch.empty_like(x).bernoulli_(gamma)
|
98 |
+
block_mask = F.max_pool2d(
|
99 |
+
block_mask.to(x.dtype), kernel_size=clipped_block_size, stride=1, padding=clipped_block_size // 2)
|
100 |
+
|
101 |
+
if with_noise:
|
102 |
+
normal_noise = torch.empty_like(x).normal_()
|
103 |
+
if inplace:
|
104 |
+
x.mul_(1. - block_mask).add_(normal_noise * block_mask)
|
105 |
+
else:
|
106 |
+
x = x * (1. - block_mask) + normal_noise * block_mask
|
107 |
+
else:
|
108 |
+
block_mask = 1 - block_mask
|
109 |
+
normalize_scale = (block_mask.numel() / block_mask.to(dtype=torch.float32).sum().add(1e-6)).to(dtype=x.dtype)
|
110 |
+
if inplace:
|
111 |
+
x.mul_(block_mask * normalize_scale)
|
112 |
+
else:
|
113 |
+
x = x * block_mask * normalize_scale
|
114 |
+
return x
|
115 |
+
|
116 |
+
|
117 |
+
class DropBlock2d(nn.Module):
|
118 |
+
""" DropBlock. See https://arxiv.org/pdf/1810.12890.pdf
|
119 |
+
"""
|
120 |
+
|
121 |
+
def __init__(
|
122 |
+
self,
|
123 |
+
drop_prob: float = 0.1,
|
124 |
+
block_size: int = 7,
|
125 |
+
gamma_scale: float = 1.0,
|
126 |
+
with_noise: bool = False,
|
127 |
+
inplace: bool = False,
|
128 |
+
batchwise: bool = False,
|
129 |
+
fast: bool = True):
|
130 |
+
super(DropBlock2d, self).__init__()
|
131 |
+
self.drop_prob = drop_prob
|
132 |
+
self.gamma_scale = gamma_scale
|
133 |
+
self.block_size = block_size
|
134 |
+
self.with_noise = with_noise
|
135 |
+
self.inplace = inplace
|
136 |
+
self.batchwise = batchwise
|
137 |
+
self.fast = fast # FIXME finish comparisons of fast vs not
|
138 |
+
|
139 |
+
def forward(self, x):
|
140 |
+
if not self.training or not self.drop_prob:
|
141 |
+
return x
|
142 |
+
if self.fast:
|
143 |
+
return drop_block_fast_2d(
|
144 |
+
x, self.drop_prob, self.block_size, self.gamma_scale, self.with_noise, self.inplace)
|
145 |
+
else:
|
146 |
+
return drop_block_2d(
|
147 |
+
x, self.drop_prob, self.block_size, self.gamma_scale, self.with_noise, self.inplace, self.batchwise)
|
148 |
+
|
149 |
+
|
150 |
+
def drop_path(x, drop_prob: float = 0., training: bool = False, scale_by_keep: bool = True):
|
151 |
+
"""Drop paths (Stochastic Depth) per sample (when applied in main path of residual blocks).
|
152 |
+
|
153 |
+
This is the same as the DropConnect impl I created for EfficientNet, etc networks, however,
|
154 |
+
the original name is misleading as 'Drop Connect' is a different form of dropout in a separate paper...
|
155 |
+
See discussion: https://github.com/tensorflow/tpu/issues/494#issuecomment-532968956 ... I've opted for
|
156 |
+
changing the layer and argument names to 'drop path' rather than mix DropConnect as a layer name and use
|
157 |
+
'survival rate' as the argument.
|
158 |
+
|
159 |
+
"""
|
160 |
+
if drop_prob == 0. or not training:
|
161 |
+
return x
|
162 |
+
keep_prob = 1 - drop_prob
|
163 |
+
shape = (x.shape[0],) + (1,) * (x.ndim - 1) # work with diff dim tensors, not just 2D ConvNets
|
164 |
+
random_tensor = x.new_empty(shape).bernoulli_(keep_prob)
|
165 |
+
if keep_prob > 0.0 and scale_by_keep:
|
166 |
+
random_tensor.div_(keep_prob)
|
167 |
+
return x * random_tensor
|
168 |
+
|
169 |
+
|
170 |
+
class DropPath(nn.Module):
|
171 |
+
"""Drop paths (Stochastic Depth) per sample (when applied in main path of residual blocks).
|
172 |
+
"""
|
173 |
+
def __init__(self, drop_prob: float = 0., scale_by_keep: bool = True):
|
174 |
+
super(DropPath, self).__init__()
|
175 |
+
self.drop_prob = drop_prob
|
176 |
+
self.scale_by_keep = scale_by_keep
|
177 |
+
|
178 |
+
def forward(self, x):
|
179 |
+
return drop_path(x, self.drop_prob, self.training, self.scale_by_keep)
|
180 |
+
|
181 |
+
def extra_repr(self):
|
182 |
+
return f'drop_prob={round(self.drop_prob,3):0.3f}'
|
pytorch-image-models/timm/layers/eca.py
ADDED
@@ -0,0 +1,145 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
"""
|
2 |
+
ECA module from ECAnet
|
3 |
+
|
4 |
+
paper: ECA-Net: Efficient Channel Attention for Deep Convolutional Neural Networks
|
5 |
+
https://arxiv.org/abs/1910.03151
|
6 |
+
|
7 |
+
Original ECA model borrowed from https://github.com/BangguWu/ECANet
|
8 |
+
|
9 |
+
Modified circular ECA implementation and adaption for use in timm package
|
10 |
+
by Chris Ha https://github.com/VRandme
|
11 |
+
|
12 |
+
Original License:
|
13 |
+
|
14 |
+
MIT License
|
15 |
+
|
16 |
+
Copyright (c) 2019 BangguWu, Qilong Wang
|
17 |
+
|
18 |
+
Permission is hereby granted, free of charge, to any person obtaining a copy
|
19 |
+
of this software and associated documentation files (the "Software"), to deal
|
20 |
+
in the Software without restriction, including without limitation the rights
|
21 |
+
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
22 |
+
copies of the Software, and to permit persons to whom the Software is
|
23 |
+
furnished to do so, subject to the following conditions:
|
24 |
+
|
25 |
+
The above copyright notice and this permission notice shall be included in all
|
26 |
+
copies or substantial portions of the Software.
|
27 |
+
|
28 |
+
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
29 |
+
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
30 |
+
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
31 |
+
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
32 |
+
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
33 |
+
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
34 |
+
SOFTWARE.
|
35 |
+
"""
|
36 |
+
import math
|
37 |
+
from torch import nn
|
38 |
+
import torch.nn.functional as F
|
39 |
+
|
40 |
+
|
41 |
+
from .create_act import create_act_layer
|
42 |
+
from .helpers import make_divisible
|
43 |
+
|
44 |
+
|
45 |
+
class EcaModule(nn.Module):
|
46 |
+
"""Constructs an ECA module.
|
47 |
+
|
48 |
+
Args:
|
49 |
+
channels: Number of channels of the input feature map for use in adaptive kernel sizes
|
50 |
+
for actual calculations according to channel.
|
51 |
+
gamma, beta: when channel is given parameters of mapping function
|
52 |
+
refer to original paper https://arxiv.org/pdf/1910.03151.pdf
|
53 |
+
(default=None. if channel size not given, use k_size given for kernel size.)
|
54 |
+
kernel_size: Adaptive selection of kernel size (default=3)
|
55 |
+
gamm: used in kernel_size calc, see above
|
56 |
+
beta: used in kernel_size calc, see above
|
57 |
+
act_layer: optional non-linearity after conv, enables conv bias, this is an experiment
|
58 |
+
gate_layer: gating non-linearity to use
|
59 |
+
"""
|
60 |
+
def __init__(
|
61 |
+
self, channels=None, kernel_size=3, gamma=2, beta=1, act_layer=None, gate_layer='sigmoid',
|
62 |
+
rd_ratio=1/8, rd_channels=None, rd_divisor=8, use_mlp=False):
|
63 |
+
super(EcaModule, self).__init__()
|
64 |
+
if channels is not None:
|
65 |
+
t = int(abs(math.log(channels, 2) + beta) / gamma)
|
66 |
+
kernel_size = max(t if t % 2 else t + 1, 3)
|
67 |
+
assert kernel_size % 2 == 1
|
68 |
+
padding = (kernel_size - 1) // 2
|
69 |
+
if use_mlp:
|
70 |
+
# NOTE 'mlp' mode is a timm experiment, not in paper
|
71 |
+
assert channels is not None
|
72 |
+
if rd_channels is None:
|
73 |
+
rd_channels = make_divisible(channels * rd_ratio, divisor=rd_divisor)
|
74 |
+
act_layer = act_layer or nn.ReLU
|
75 |
+
self.conv = nn.Conv1d(1, rd_channels, kernel_size=1, padding=0, bias=True)
|
76 |
+
self.act = create_act_layer(act_layer)
|
77 |
+
self.conv2 = nn.Conv1d(rd_channels, 1, kernel_size=kernel_size, padding=padding, bias=True)
|
78 |
+
else:
|
79 |
+
self.conv = nn.Conv1d(1, 1, kernel_size=kernel_size, padding=padding, bias=False)
|
80 |
+
self.act = None
|
81 |
+
self.conv2 = None
|
82 |
+
self.gate = create_act_layer(gate_layer)
|
83 |
+
|
84 |
+
def forward(self, x):
|
85 |
+
y = x.mean((2, 3)).view(x.shape[0], 1, -1) # view for 1d conv
|
86 |
+
y = self.conv(y)
|
87 |
+
if self.conv2 is not None:
|
88 |
+
y = self.act(y)
|
89 |
+
y = self.conv2(y)
|
90 |
+
y = self.gate(y).view(x.shape[0], -1, 1, 1)
|
91 |
+
return x * y.expand_as(x)
|
92 |
+
|
93 |
+
|
94 |
+
EfficientChannelAttn = EcaModule # alias
|
95 |
+
|
96 |
+
|
97 |
+
class CecaModule(nn.Module):
|
98 |
+
"""Constructs a circular ECA module.
|
99 |
+
|
100 |
+
ECA module where the conv uses circular padding rather than zero padding.
|
101 |
+
Unlike the spatial dimension, the channels do not have inherent ordering nor
|
102 |
+
locality. Although this module in essence, applies such an assumption, it is unnecessary
|
103 |
+
to limit the channels on either "edge" from being circularly adapted to each other.
|
104 |
+
This will fundamentally increase connectivity and possibly increase performance metrics
|
105 |
+
(accuracy, robustness), without significantly impacting resource metrics
|
106 |
+
(parameter size, throughput,latency, etc)
|
107 |
+
|
108 |
+
Args:
|
109 |
+
channels: Number of channels of the input feature map for use in adaptive kernel sizes
|
110 |
+
for actual calculations according to channel.
|
111 |
+
gamma, beta: when channel is given parameters of mapping function
|
112 |
+
refer to original paper https://arxiv.org/pdf/1910.03151.pdf
|
113 |
+
(default=None. if channel size not given, use k_size given for kernel size.)
|
114 |
+
kernel_size: Adaptive selection of kernel size (default=3)
|
115 |
+
gamm: used in kernel_size calc, see above
|
116 |
+
beta: used in kernel_size calc, see above
|
117 |
+
act_layer: optional non-linearity after conv, enables conv bias, this is an experiment
|
118 |
+
gate_layer: gating non-linearity to use
|
119 |
+
"""
|
120 |
+
|
121 |
+
def __init__(self, channels=None, kernel_size=3, gamma=2, beta=1, act_layer=None, gate_layer='sigmoid'):
|
122 |
+
super(CecaModule, self).__init__()
|
123 |
+
if channels is not None:
|
124 |
+
t = int(abs(math.log(channels, 2) + beta) / gamma)
|
125 |
+
kernel_size = max(t if t % 2 else t + 1, 3)
|
126 |
+
has_act = act_layer is not None
|
127 |
+
assert kernel_size % 2 == 1
|
128 |
+
|
129 |
+
# PyTorch circular padding mode is buggy as of pytorch 1.4
|
130 |
+
# see https://github.com/pytorch/pytorch/pull/17240
|
131 |
+
# implement manual circular padding
|
132 |
+
self.padding = (kernel_size - 1) // 2
|
133 |
+
self.conv = nn.Conv1d(1, 1, kernel_size=kernel_size, padding=0, bias=has_act)
|
134 |
+
self.gate = create_act_layer(gate_layer)
|
135 |
+
|
136 |
+
def forward(self, x):
|
137 |
+
y = x.mean((2, 3)).view(x.shape[0], 1, -1)
|
138 |
+
# Manually implement circular padding, F.pad does not seemed to be bugged
|
139 |
+
y = F.pad(y, (self.padding, self.padding), mode='circular')
|
140 |
+
y = self.conv(y)
|
141 |
+
y = self.gate(y).view(x.shape[0], -1, 1, 1)
|
142 |
+
return x * y.expand_as(x)
|
143 |
+
|
144 |
+
|
145 |
+
CircularEfficientChannelAttn = CecaModule
|
pytorch-image-models/timm/layers/grid.py
ADDED
@@ -0,0 +1,49 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from typing import Tuple
|
2 |
+
|
3 |
+
import torch
|
4 |
+
|
5 |
+
|
6 |
+
def ndgrid(*tensors) -> Tuple[torch.Tensor, ...]:
|
7 |
+
"""generate N-D grid in dimension order.
|
8 |
+
|
9 |
+
The ndgrid function is like meshgrid except that the order of the first two input arguments are switched.
|
10 |
+
|
11 |
+
That is, the statement
|
12 |
+
[X1,X2,X3] = ndgrid(x1,x2,x3)
|
13 |
+
|
14 |
+
produces the same result as
|
15 |
+
|
16 |
+
[X2,X1,X3] = meshgrid(x2,x1,x3)
|
17 |
+
|
18 |
+
This naming is based on MATLAB, the purpose is to avoid confusion due to torch's change to make
|
19 |
+
torch.meshgrid behaviour move from matching ndgrid ('ij') indexing to numpy meshgrid defaults of ('xy').
|
20 |
+
|
21 |
+
"""
|
22 |
+
try:
|
23 |
+
return torch.meshgrid(*tensors, indexing='ij')
|
24 |
+
except TypeError:
|
25 |
+
# old PyTorch < 1.10 will follow this path as it does not have indexing arg,
|
26 |
+
# the old behaviour of meshgrid was 'ij'
|
27 |
+
return torch.meshgrid(*tensors)
|
28 |
+
|
29 |
+
|
30 |
+
def meshgrid(*tensors) -> Tuple[torch.Tensor, ...]:
|
31 |
+
"""generate N-D grid in spatial dim order.
|
32 |
+
|
33 |
+
The meshgrid function is similar to ndgrid except that the order of the
|
34 |
+
first two input and output arguments is switched.
|
35 |
+
|
36 |
+
That is, the statement
|
37 |
+
|
38 |
+
[X,Y,Z] = meshgrid(x,y,z)
|
39 |
+
produces the same result as
|
40 |
+
|
41 |
+
[Y,X,Z] = ndgrid(y,x,z)
|
42 |
+
Because of this, meshgrid is better suited to problems in two- or three-dimensional Cartesian space,
|
43 |
+
while ndgrid is better suited to multidimensional problems that aren't spatially based.
|
44 |
+
"""
|
45 |
+
|
46 |
+
# NOTE: this will throw in PyTorch < 1.10 as meshgrid did not support indexing arg or have
|
47 |
+
# capability of generating grid in xy order before then.
|
48 |
+
return torch.meshgrid(*tensors, indexing='xy')
|
49 |
+
|
pytorch-image-models/timm/layers/grn.py
ADDED
@@ -0,0 +1,39 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
""" Global Response Normalization Module
|
2 |
+
|
3 |
+
Based on the GRN layer presented in
|
4 |
+
`ConvNeXt-V2 - Co-designing and Scaling ConvNets with Masked Autoencoders` - https://arxiv.org/abs/2301.00808
|
5 |
+
|
6 |
+
This implementation
|
7 |
+
* works for both NCHW and NHWC tensor layouts
|
8 |
+
* uses affine param names matching existing torch norm layers
|
9 |
+
* slightly improves eager mode performance via fused addcmul
|
10 |
+
|
11 |
+
Hacked together by / Copyright 2023 Ross Wightman
|
12 |
+
"""
|
13 |
+
|
14 |
+
import torch
|
15 |
+
from torch import nn as nn
|
16 |
+
|
17 |
+
|
18 |
+
class GlobalResponseNorm(nn.Module):
|
19 |
+
""" Global Response Normalization layer
|
20 |
+
"""
|
21 |
+
def __init__(self, dim, eps=1e-6, channels_last=True):
|
22 |
+
super().__init__()
|
23 |
+
self.eps = eps
|
24 |
+
if channels_last:
|
25 |
+
self.spatial_dim = (1, 2)
|
26 |
+
self.channel_dim = -1
|
27 |
+
self.wb_shape = (1, 1, 1, -1)
|
28 |
+
else:
|
29 |
+
self.spatial_dim = (2, 3)
|
30 |
+
self.channel_dim = 1
|
31 |
+
self.wb_shape = (1, -1, 1, 1)
|
32 |
+
|
33 |
+
self.weight = nn.Parameter(torch.zeros(dim))
|
34 |
+
self.bias = nn.Parameter(torch.zeros(dim))
|
35 |
+
|
36 |
+
def forward(self, x):
|
37 |
+
x_g = x.norm(p=2, dim=self.spatial_dim, keepdim=True)
|
38 |
+
x_n = x_g / (x_g.mean(dim=self.channel_dim, keepdim=True) + self.eps)
|
39 |
+
return x + torch.addcmul(self.bias.view(self.wb_shape), self.weight.view(self.wb_shape), x * x_n)
|
pytorch-image-models/timm/layers/helpers.py
ADDED
@@ -0,0 +1,43 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
""" Layer/Module Helpers
|
2 |
+
|
3 |
+
Hacked together by / Copyright 2020 Ross Wightman
|
4 |
+
"""
|
5 |
+
from itertools import repeat
|
6 |
+
import collections.abc
|
7 |
+
|
8 |
+
|
9 |
+
# From PyTorch internals
|
10 |
+
def _ntuple(n):
|
11 |
+
def parse(x):
|
12 |
+
if isinstance(x, collections.abc.Iterable) and not isinstance(x, str):
|
13 |
+
return tuple(x)
|
14 |
+
return tuple(repeat(x, n))
|
15 |
+
return parse
|
16 |
+
|
17 |
+
|
18 |
+
to_1tuple = _ntuple(1)
|
19 |
+
to_2tuple = _ntuple(2)
|
20 |
+
to_3tuple = _ntuple(3)
|
21 |
+
to_4tuple = _ntuple(4)
|
22 |
+
to_ntuple = _ntuple
|
23 |
+
|
24 |
+
|
25 |
+
def make_divisible(v, divisor=8, min_value=None, round_limit=.9):
|
26 |
+
min_value = min_value or divisor
|
27 |
+
new_v = max(min_value, int(v + divisor / 2) // divisor * divisor)
|
28 |
+
# Make sure that round down does not go down by more than 10%.
|
29 |
+
if new_v < round_limit * v:
|
30 |
+
new_v += divisor
|
31 |
+
return new_v
|
32 |
+
|
33 |
+
|
34 |
+
def extend_tuple(x, n):
|
35 |
+
# pads a tuple to specified n by padding with last value
|
36 |
+
if not isinstance(x, (tuple, list)):
|
37 |
+
x = (x,)
|
38 |
+
else:
|
39 |
+
x = tuple(x)
|
40 |
+
pad_n = n - len(x)
|
41 |
+
if pad_n <= 0:
|
42 |
+
return x[:n]
|
43 |
+
return x + (x[-1],) * pad_n
|
pytorch-image-models/timm/layers/interpolate.py
ADDED
@@ -0,0 +1,68 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
""" Interpolation helpers for timm layers
|
2 |
+
|
3 |
+
RegularGridInterpolator from https://github.com/sbarratt/torch_interpolations
|
4 |
+
Copyright Shane Barratt, Apache 2.0 license
|
5 |
+
"""
|
6 |
+
import torch
|
7 |
+
from itertools import product
|
8 |
+
|
9 |
+
|
10 |
+
class RegularGridInterpolator:
|
11 |
+
""" Interpolate data defined on a rectilinear grid with even or uneven spacing.
|
12 |
+
Produces similar results to scipy RegularGridInterpolator or interp2d
|
13 |
+
in 'linear' mode.
|
14 |
+
|
15 |
+
Taken from https://github.com/sbarratt/torch_interpolations
|
16 |
+
"""
|
17 |
+
|
18 |
+
def __init__(self, points, values):
|
19 |
+
self.points = points
|
20 |
+
self.values = values
|
21 |
+
|
22 |
+
assert isinstance(self.points, tuple) or isinstance(self.points, list)
|
23 |
+
assert isinstance(self.values, torch.Tensor)
|
24 |
+
|
25 |
+
self.ms = list(self.values.shape)
|
26 |
+
self.n = len(self.points)
|
27 |
+
|
28 |
+
assert len(self.ms) == self.n
|
29 |
+
|
30 |
+
for i, p in enumerate(self.points):
|
31 |
+
assert isinstance(p, torch.Tensor)
|
32 |
+
assert p.shape[0] == self.values.shape[i]
|
33 |
+
|
34 |
+
def __call__(self, points_to_interp):
|
35 |
+
assert self.points is not None
|
36 |
+
assert self.values is not None
|
37 |
+
|
38 |
+
assert len(points_to_interp) == len(self.points)
|
39 |
+
K = points_to_interp[0].shape[0]
|
40 |
+
for x in points_to_interp:
|
41 |
+
assert x.shape[0] == K
|
42 |
+
|
43 |
+
idxs = []
|
44 |
+
dists = []
|
45 |
+
overalls = []
|
46 |
+
for p, x in zip(self.points, points_to_interp):
|
47 |
+
idx_right = torch.bucketize(x, p)
|
48 |
+
idx_right[idx_right >= p.shape[0]] = p.shape[0] - 1
|
49 |
+
idx_left = (idx_right - 1).clamp(0, p.shape[0] - 1)
|
50 |
+
dist_left = x - p[idx_left]
|
51 |
+
dist_right = p[idx_right] - x
|
52 |
+
dist_left[dist_left < 0] = 0.
|
53 |
+
dist_right[dist_right < 0] = 0.
|
54 |
+
both_zero = (dist_left == 0) & (dist_right == 0)
|
55 |
+
dist_left[both_zero] = dist_right[both_zero] = 1.
|
56 |
+
|
57 |
+
idxs.append((idx_left, idx_right))
|
58 |
+
dists.append((dist_left, dist_right))
|
59 |
+
overalls.append(dist_left + dist_right)
|
60 |
+
|
61 |
+
numerator = 0.
|
62 |
+
for indexer in product([0, 1], repeat=self.n):
|
63 |
+
as_s = [idx[onoff] for onoff, idx in zip(indexer, idxs)]
|
64 |
+
bs_s = [dist[1 - onoff] for onoff, dist in zip(indexer, dists)]
|
65 |
+
numerator += self.values[as_s] * \
|
66 |
+
torch.prod(torch.stack(bs_s), dim=0)
|
67 |
+
denominator = torch.prod(torch.stack(overalls), dim=0)
|
68 |
+
return numerator / denominator
|
pytorch-image-models/timm/layers/linear.py
ADDED
@@ -0,0 +1,19 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
""" Linear layer (alternate definition)
|
2 |
+
"""
|
3 |
+
import torch
|
4 |
+
import torch.nn.functional as F
|
5 |
+
from torch import nn as nn
|
6 |
+
|
7 |
+
|
8 |
+
class Linear(nn.Linear):
|
9 |
+
r"""Applies a linear transformation to the incoming data: :math:`y = xA^T + b`
|
10 |
+
|
11 |
+
Wraps torch.nn.Linear to support AMP + torchscript usage by manually casting
|
12 |
+
weight & bias to input.dtype to work around an issue w/ torch.addmm in this use case.
|
13 |
+
"""
|
14 |
+
def forward(self, input: torch.Tensor) -> torch.Tensor:
|
15 |
+
if torch.jit.is_scripting():
|
16 |
+
bias = self.bias.to(dtype=input.dtype) if self.bias is not None else None
|
17 |
+
return F.linear(input, self.weight.to(dtype=input.dtype), bias=bias)
|
18 |
+
else:
|
19 |
+
return F.linear(input, self.weight, self.bias)
|
pytorch-image-models/timm/layers/median_pool.py
ADDED
@@ -0,0 +1,49 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
""" Median Pool
|
2 |
+
Hacked together by / Copyright 2020 Ross Wightman
|
3 |
+
"""
|
4 |
+
import torch.nn as nn
|
5 |
+
import torch.nn.functional as F
|
6 |
+
from .helpers import to_2tuple, to_4tuple
|
7 |
+
|
8 |
+
|
9 |
+
class MedianPool2d(nn.Module):
|
10 |
+
""" Median pool (usable as median filter when stride=1) module.
|
11 |
+
|
12 |
+
Args:
|
13 |
+
kernel_size: size of pooling kernel, int or 2-tuple
|
14 |
+
stride: pool stride, int or 2-tuple
|
15 |
+
padding: pool padding, int or 4-tuple (l, r, t, b) as in pytorch F.pad
|
16 |
+
same: override padding and enforce same padding, boolean
|
17 |
+
"""
|
18 |
+
def __init__(self, kernel_size=3, stride=1, padding=0, same=False):
|
19 |
+
super(MedianPool2d, self).__init__()
|
20 |
+
self.k = to_2tuple(kernel_size)
|
21 |
+
self.stride = to_2tuple(stride)
|
22 |
+
self.padding = to_4tuple(padding) # convert to l, r, t, b
|
23 |
+
self.same = same
|
24 |
+
|
25 |
+
def _padding(self, x):
|
26 |
+
if self.same:
|
27 |
+
ih, iw = x.size()[2:]
|
28 |
+
if ih % self.stride[0] == 0:
|
29 |
+
ph = max(self.k[0] - self.stride[0], 0)
|
30 |
+
else:
|
31 |
+
ph = max(self.k[0] - (ih % self.stride[0]), 0)
|
32 |
+
if iw % self.stride[1] == 0:
|
33 |
+
pw = max(self.k[1] - self.stride[1], 0)
|
34 |
+
else:
|
35 |
+
pw = max(self.k[1] - (iw % self.stride[1]), 0)
|
36 |
+
pl = pw // 2
|
37 |
+
pr = pw - pl
|
38 |
+
pt = ph // 2
|
39 |
+
pb = ph - pt
|
40 |
+
padding = (pl, pr, pt, pb)
|
41 |
+
else:
|
42 |
+
padding = self.padding
|
43 |
+
return padding
|
44 |
+
|
45 |
+
def forward(self, x):
|
46 |
+
x = F.pad(x, self._padding(x), mode='reflect')
|
47 |
+
x = x.unfold(2, self.k[0], self.stride[0]).unfold(3, self.k[1], self.stride[1])
|
48 |
+
x = x.contiguous().view(x.size()[:4] + (-1,)).median(dim=-1)[0]
|
49 |
+
return x
|
pytorch-image-models/timm/loss/__init__.py
ADDED
@@ -0,0 +1,4 @@
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from .asymmetric_loss import AsymmetricLossMultiLabel, AsymmetricLossSingleLabel
|
2 |
+
from .binary_cross_entropy import BinaryCrossEntropy
|
3 |
+
from .cross_entropy import LabelSmoothingCrossEntropy, SoftTargetCrossEntropy
|
4 |
+
from .jsd import JsdCrossEntropy
|
pytorch-image-models/timm/loss/__pycache__/__init__.cpython-39.pyc
ADDED
Binary file (441 Bytes). View file
|
|
pytorch-image-models/timm/loss/__pycache__/asymmetric_loss.cpython-39.pyc
ADDED
Binary file (2.79 kB). View file
|
|
pytorch-image-models/timm/loss/__pycache__/binary_cross_entropy.cpython-39.pyc
ADDED
Binary file (2.08 kB). View file
|
|
pytorch-image-models/timm/loss/__pycache__/cross_entropy.cpython-39.pyc
ADDED
Binary file (1.85 kB). View file
|
|
pytorch-image-models/timm/loss/__pycache__/jsd.cpython-39.pyc
ADDED
Binary file (2 kB). View file
|
|
pytorch-image-models/timm/loss/binary_cross_entropy.py
ADDED
@@ -0,0 +1,65 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
""" Binary Cross Entropy w/ a few extras
|
2 |
+
|
3 |
+
Hacked together by / Copyright 2021 Ross Wightman
|
4 |
+
"""
|
5 |
+
from typing import Optional, Union
|
6 |
+
|
7 |
+
import torch
|
8 |
+
import torch.nn as nn
|
9 |
+
import torch.nn.functional as F
|
10 |
+
|
11 |
+
|
12 |
+
class BinaryCrossEntropy(nn.Module):
|
13 |
+
""" BCE with optional one-hot from dense targets, label smoothing, thresholding
|
14 |
+
NOTE for experiments comparing CE to BCE /w label smoothing, may remove
|
15 |
+
"""
|
16 |
+
def __init__(
|
17 |
+
self,
|
18 |
+
smoothing=0.1,
|
19 |
+
target_threshold: Optional[float] = None,
|
20 |
+
weight: Optional[torch.Tensor] = None,
|
21 |
+
reduction: str = 'mean',
|
22 |
+
sum_classes: bool = False,
|
23 |
+
pos_weight: Optional[Union[torch.Tensor, float]] = None,
|
24 |
+
):
|
25 |
+
super(BinaryCrossEntropy, self).__init__()
|
26 |
+
assert 0. <= smoothing < 1.0
|
27 |
+
if pos_weight is not None:
|
28 |
+
if not isinstance(pos_weight, torch.Tensor):
|
29 |
+
pos_weight = torch.tensor(pos_weight)
|
30 |
+
self.smoothing = smoothing
|
31 |
+
self.target_threshold = target_threshold
|
32 |
+
self.reduction = 'none' if sum_classes else reduction
|
33 |
+
self.sum_classes = sum_classes
|
34 |
+
self.register_buffer('weight', weight)
|
35 |
+
self.register_buffer('pos_weight', pos_weight)
|
36 |
+
|
37 |
+
def forward(self, x: torch.Tensor, target: torch.Tensor) -> torch.Tensor:
|
38 |
+
batch_size = x.shape[0]
|
39 |
+
assert batch_size == target.shape[0]
|
40 |
+
|
41 |
+
if target.shape != x.shape:
|
42 |
+
# NOTE currently assume smoothing or other label softening is applied upstream if targets are already sparse
|
43 |
+
num_classes = x.shape[-1]
|
44 |
+
# FIXME should off/on be different for smoothing w/ BCE? Other impl out there differ
|
45 |
+
off_value = self.smoothing / num_classes
|
46 |
+
on_value = 1. - self.smoothing + off_value
|
47 |
+
target = target.long().view(-1, 1)
|
48 |
+
target = torch.full(
|
49 |
+
(batch_size, num_classes),
|
50 |
+
off_value,
|
51 |
+
device=x.device, dtype=x.dtype).scatter_(1, target, on_value)
|
52 |
+
|
53 |
+
if self.target_threshold is not None:
|
54 |
+
# Make target 0, or 1 if threshold set
|
55 |
+
target = target.gt(self.target_threshold).to(dtype=target.dtype)
|
56 |
+
|
57 |
+
loss = F.binary_cross_entropy_with_logits(
|
58 |
+
x, target,
|
59 |
+
self.weight,
|
60 |
+
pos_weight=self.pos_weight,
|
61 |
+
reduction=self.reduction,
|
62 |
+
)
|
63 |
+
if self.sum_classes:
|
64 |
+
loss = loss.sum(-1).mean()
|
65 |
+
return loss
|
pytorch-image-models/timm/loss/cross_entropy.py
ADDED
@@ -0,0 +1,36 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
""" Cross Entropy w/ smoothing or soft targets
|
2 |
+
|
3 |
+
Hacked together by / Copyright 2021 Ross Wightman
|
4 |
+
"""
|
5 |
+
|
6 |
+
import torch
|
7 |
+
import torch.nn as nn
|
8 |
+
import torch.nn.functional as F
|
9 |
+
|
10 |
+
|
11 |
+
class LabelSmoothingCrossEntropy(nn.Module):
|
12 |
+
""" NLL loss with label smoothing.
|
13 |
+
"""
|
14 |
+
def __init__(self, smoothing=0.1):
|
15 |
+
super(LabelSmoothingCrossEntropy, self).__init__()
|
16 |
+
assert smoothing < 1.0
|
17 |
+
self.smoothing = smoothing
|
18 |
+
self.confidence = 1. - smoothing
|
19 |
+
|
20 |
+
def forward(self, x: torch.Tensor, target: torch.Tensor) -> torch.Tensor:
|
21 |
+
logprobs = F.log_softmax(x, dim=-1)
|
22 |
+
nll_loss = -logprobs.gather(dim=-1, index=target.unsqueeze(1))
|
23 |
+
nll_loss = nll_loss.squeeze(1)
|
24 |
+
smooth_loss = -logprobs.mean(dim=-1)
|
25 |
+
loss = self.confidence * nll_loss + self.smoothing * smooth_loss
|
26 |
+
return loss.mean()
|
27 |
+
|
28 |
+
|
29 |
+
class SoftTargetCrossEntropy(nn.Module):
|
30 |
+
|
31 |
+
def __init__(self):
|
32 |
+
super(SoftTargetCrossEntropy, self).__init__()
|
33 |
+
|
34 |
+
def forward(self, x: torch.Tensor, target: torch.Tensor) -> torch.Tensor:
|
35 |
+
loss = torch.sum(-target * F.log_softmax(x, dim=-1), dim=-1)
|
36 |
+
return loss.mean()
|
pytorch-image-models/timm/loss/jsd.py
ADDED
@@ -0,0 +1,39 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import torch
|
2 |
+
import torch.nn as nn
|
3 |
+
import torch.nn.functional as F
|
4 |
+
|
5 |
+
from .cross_entropy import LabelSmoothingCrossEntropy
|
6 |
+
|
7 |
+
|
8 |
+
class JsdCrossEntropy(nn.Module):
|
9 |
+
""" Jensen-Shannon Divergence + Cross-Entropy Loss
|
10 |
+
|
11 |
+
Based on impl here: https://github.com/google-research/augmix/blob/master/imagenet.py
|
12 |
+
From paper: 'AugMix: A Simple Data Processing Method to Improve Robustness and Uncertainty -
|
13 |
+
https://arxiv.org/abs/1912.02781
|
14 |
+
|
15 |
+
Hacked together by / Copyright 2020 Ross Wightman
|
16 |
+
"""
|
17 |
+
def __init__(self, num_splits=3, alpha=12, smoothing=0.1):
|
18 |
+
super().__init__()
|
19 |
+
self.num_splits = num_splits
|
20 |
+
self.alpha = alpha
|
21 |
+
if smoothing is not None and smoothing > 0:
|
22 |
+
self.cross_entropy_loss = LabelSmoothingCrossEntropy(smoothing)
|
23 |
+
else:
|
24 |
+
self.cross_entropy_loss = torch.nn.CrossEntropyLoss()
|
25 |
+
|
26 |
+
def __call__(self, output, target):
|
27 |
+
split_size = output.shape[0] // self.num_splits
|
28 |
+
assert split_size * self.num_splits == output.shape[0]
|
29 |
+
logits_split = torch.split(output, split_size)
|
30 |
+
|
31 |
+
# Cross-entropy is only computed on clean images
|
32 |
+
loss = self.cross_entropy_loss(logits_split[0], target[:split_size])
|
33 |
+
probs = [F.softmax(logits, dim=1) for logits in logits_split]
|
34 |
+
|
35 |
+
# Clamp mixture distribution to avoid exploding KL divergence
|
36 |
+
logp_mixture = torch.clamp(torch.stack(probs).mean(axis=0), 1e-7, 1).log()
|
37 |
+
loss += self.alpha * sum([F.kl_div(
|
38 |
+
logp_mixture, p_split, reduction='batchmean') for p_split in probs]) / len(probs)
|
39 |
+
return loss
|
pytorch-image-models/timm/models/__init__.py
ADDED
@@ -0,0 +1,100 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from .beit import *
|
2 |
+
from .byoanet import *
|
3 |
+
from .byobnet import *
|
4 |
+
from .cait import *
|
5 |
+
from .coat import *
|
6 |
+
from .convit import *
|
7 |
+
from .convmixer import *
|
8 |
+
from .convnext import *
|
9 |
+
from .crossvit import *
|
10 |
+
from .cspnet import *
|
11 |
+
from .davit import *
|
12 |
+
from .deit import *
|
13 |
+
from .densenet import *
|
14 |
+
from .dla import *
|
15 |
+
from .dpn import *
|
16 |
+
from .edgenext import *
|
17 |
+
from .efficientformer import *
|
18 |
+
from .efficientformer_v2 import *
|
19 |
+
from .efficientnet import *
|
20 |
+
from .efficientvit_mit import *
|
21 |
+
from .efficientvit_msra import *
|
22 |
+
from .eva import *
|
23 |
+
from .fastvit import *
|
24 |
+
from .focalnet import *
|
25 |
+
from .gcvit import *
|
26 |
+
from .ghostnet import *
|
27 |
+
from .hardcorenas import *
|
28 |
+
from .hgnet import *
|
29 |
+
from .hiera import *
|
30 |
+
from .hieradet_sam2 import *
|
31 |
+
from .hrnet import *
|
32 |
+
from .inception_next import *
|
33 |
+
from .inception_resnet_v2 import *
|
34 |
+
from .inception_v3 import *
|
35 |
+
from .inception_v4 import *
|
36 |
+
from .levit import *
|
37 |
+
from .maxxvit import *
|
38 |
+
from .mambaout import *
|
39 |
+
from .metaformer import *
|
40 |
+
from .mlp_mixer import *
|
41 |
+
from .mobilenetv3 import *
|
42 |
+
from .mobilevit import *
|
43 |
+
from .mvitv2 import *
|
44 |
+
from .nasnet import *
|
45 |
+
from .nest import *
|
46 |
+
from .nextvit import *
|
47 |
+
from .nfnet import *
|
48 |
+
from .pit import *
|
49 |
+
from .pnasnet import *
|
50 |
+
from .pvt_v2 import *
|
51 |
+
from .rdnet import *
|
52 |
+
from .regnet import *
|
53 |
+
from .repghost import *
|
54 |
+
from .repvit import *
|
55 |
+
from .res2net import *
|
56 |
+
from .resnest import *
|
57 |
+
from .resnet import *
|
58 |
+
from .resnetv2 import *
|
59 |
+
from .rexnet import *
|
60 |
+
from .selecsls import *
|
61 |
+
from .senet import *
|
62 |
+
from .sequencer import *
|
63 |
+
from .sknet import *
|
64 |
+
from .swin_transformer import *
|
65 |
+
from .swin_transformer_v2 import *
|
66 |
+
from .swin_transformer_v2_cr import *
|
67 |
+
from .tiny_vit import *
|
68 |
+
from .tnt import *
|
69 |
+
from .tresnet import *
|
70 |
+
from .twins import *
|
71 |
+
from .vgg import *
|
72 |
+
from .visformer import *
|
73 |
+
from .vision_transformer import *
|
74 |
+
from .vision_transformer_hybrid import *
|
75 |
+
from .vision_transformer_relpos import *
|
76 |
+
from .vision_transformer_sam import *
|
77 |
+
from .vitamin import *
|
78 |
+
from .volo import *
|
79 |
+
from .vovnet import *
|
80 |
+
from .xception import *
|
81 |
+
from .xception_aligned import *
|
82 |
+
from .xcit import *
|
83 |
+
|
84 |
+
from ._builder import build_model_with_cfg, load_pretrained, load_custom_pretrained, resolve_pretrained_cfg, \
|
85 |
+
set_pretrained_download_progress, set_pretrained_check_hash
|
86 |
+
from ._factory import create_model, parse_model_name, safe_model_name
|
87 |
+
from ._features import FeatureInfo, FeatureHooks, FeatureHookNet, FeatureListNet, FeatureDictNet
|
88 |
+
from ._features_fx import FeatureGraphNet, GraphExtractNet, create_feature_extractor, get_graph_node_names, \
|
89 |
+
register_notrace_module, is_notrace_module, get_notrace_modules, \
|
90 |
+
register_notrace_function, is_notrace_function, get_notrace_functions
|
91 |
+
from ._helpers import clean_state_dict, load_state_dict, load_checkpoint, remap_state_dict, resume_checkpoint
|
92 |
+
from ._hub import load_model_config_from_hf, load_state_dict_from_hf, push_to_hf_hub
|
93 |
+
from ._manipulate import model_parameters, named_apply, named_modules, named_modules_with_params, \
|
94 |
+
group_modules, group_parameters, checkpoint_seq, adapt_input_conv
|
95 |
+
from ._pretrained import PretrainedCfg, DefaultCfg, filter_pretrained_cfg
|
96 |
+
from ._prune import adapt_model_from_string
|
97 |
+
from ._registry import split_model_name_tag, get_arch_name, generate_default_cfgs, register_model, \
|
98 |
+
register_model_deprecations, model_entrypoint, list_models, list_pretrained, get_deprecated_models, \
|
99 |
+
is_model, list_modules, is_model_in_modules, is_model_pretrained, get_pretrained_cfg, get_pretrained_cfg_value, \
|
100 |
+
get_arch_pretrained_cfgs
|
pytorch-image-models/timm/models/_builder.py
ADDED
@@ -0,0 +1,482 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import dataclasses
|
2 |
+
import logging
|
3 |
+
import os
|
4 |
+
from copy import deepcopy
|
5 |
+
from pathlib import Path
|
6 |
+
from typing import Any, Callable, Dict, List, Optional, Tuple, Union
|
7 |
+
|
8 |
+
from torch import nn as nn
|
9 |
+
from torch.hub import load_state_dict_from_url
|
10 |
+
|
11 |
+
from timm.models._features import FeatureListNet, FeatureDictNet, FeatureHookNet, FeatureGetterNet
|
12 |
+
from timm.models._features_fx import FeatureGraphNet
|
13 |
+
from timm.models._helpers import load_state_dict
|
14 |
+
from timm.models._hub import has_hf_hub, download_cached_file, check_cached_file, load_state_dict_from_hf,\
|
15 |
+
load_custom_from_hf
|
16 |
+
from timm.models._manipulate import adapt_input_conv
|
17 |
+
from timm.models._pretrained import PretrainedCfg
|
18 |
+
from timm.models._prune import adapt_model_from_file
|
19 |
+
from timm.models._registry import get_pretrained_cfg
|
20 |
+
|
21 |
+
_logger = logging.getLogger(__name__)
|
22 |
+
|
23 |
+
# Global variables for rarely used pretrained checkpoint download progress and hash check.
|
24 |
+
# Use set_pretrained_download_progress / set_pretrained_check_hash functions to toggle.
|
25 |
+
_DOWNLOAD_PROGRESS = False
|
26 |
+
_CHECK_HASH = False
|
27 |
+
_USE_OLD_CACHE = int(os.environ.get('TIMM_USE_OLD_CACHE', 0)) > 0
|
28 |
+
|
29 |
+
__all__ = ['set_pretrained_download_progress', 'set_pretrained_check_hash', 'load_custom_pretrained', 'load_pretrained',
|
30 |
+
'pretrained_cfg_for_features', 'resolve_pretrained_cfg', 'build_model_with_cfg']
|
31 |
+
|
32 |
+
|
33 |
+
def _resolve_pretrained_source(pretrained_cfg):
|
34 |
+
cfg_source = pretrained_cfg.get('source', '')
|
35 |
+
pretrained_url = pretrained_cfg.get('url', None)
|
36 |
+
pretrained_file = pretrained_cfg.get('file', None)
|
37 |
+
pretrained_sd = pretrained_cfg.get('state_dict', None)
|
38 |
+
hf_hub_id = pretrained_cfg.get('hf_hub_id', None)
|
39 |
+
|
40 |
+
# resolve where to load pretrained weights from
|
41 |
+
load_from = ''
|
42 |
+
pretrained_loc = ''
|
43 |
+
if cfg_source == 'hf-hub' and has_hf_hub(necessary=True):
|
44 |
+
# hf-hub specified as source via model identifier
|
45 |
+
load_from = 'hf-hub'
|
46 |
+
assert hf_hub_id
|
47 |
+
pretrained_loc = hf_hub_id
|
48 |
+
else:
|
49 |
+
# default source == timm or unspecified
|
50 |
+
if pretrained_sd:
|
51 |
+
# direct state_dict pass through is the highest priority
|
52 |
+
load_from = 'state_dict'
|
53 |
+
pretrained_loc = pretrained_sd
|
54 |
+
assert isinstance(pretrained_loc, dict)
|
55 |
+
elif pretrained_file:
|
56 |
+
# file load override is the second-highest priority if set
|
57 |
+
load_from = 'file'
|
58 |
+
pretrained_loc = pretrained_file
|
59 |
+
else:
|
60 |
+
old_cache_valid = False
|
61 |
+
if _USE_OLD_CACHE:
|
62 |
+
# prioritized old cached weights if exists and env var enabled
|
63 |
+
old_cache_valid = check_cached_file(pretrained_url) if pretrained_url else False
|
64 |
+
if not old_cache_valid and hf_hub_id and has_hf_hub(necessary=True):
|
65 |
+
# hf-hub available as alternate weight source in default_cfg
|
66 |
+
load_from = 'hf-hub'
|
67 |
+
pretrained_loc = hf_hub_id
|
68 |
+
elif pretrained_url:
|
69 |
+
load_from = 'url'
|
70 |
+
pretrained_loc = pretrained_url
|
71 |
+
|
72 |
+
if load_from == 'hf-hub' and pretrained_cfg.get('hf_hub_filename', None):
|
73 |
+
# if a filename override is set, return tuple for location w/ (hub_id, filename)
|
74 |
+
pretrained_loc = pretrained_loc, pretrained_cfg['hf_hub_filename']
|
75 |
+
return load_from, pretrained_loc
|
76 |
+
|
77 |
+
|
78 |
+
def set_pretrained_download_progress(enable=True):
|
79 |
+
""" Set download progress for pretrained weights on/off (globally). """
|
80 |
+
global _DOWNLOAD_PROGRESS
|
81 |
+
_DOWNLOAD_PROGRESS = enable
|
82 |
+
|
83 |
+
|
84 |
+
def set_pretrained_check_hash(enable=True):
|
85 |
+
""" Set hash checking for pretrained weights on/off (globally). """
|
86 |
+
global _CHECK_HASH
|
87 |
+
_CHECK_HASH = enable
|
88 |
+
|
89 |
+
|
90 |
+
def load_custom_pretrained(
|
91 |
+
model: nn.Module,
|
92 |
+
pretrained_cfg: Optional[Dict] = None,
|
93 |
+
load_fn: Optional[Callable] = None,
|
94 |
+
cache_dir: Optional[Union[str, Path]] = None,
|
95 |
+
):
|
96 |
+
r"""Loads a custom (read non .pth) weight file
|
97 |
+
|
98 |
+
Downloads checkpoint file into cache-dir like torch.hub based loaders, but calls
|
99 |
+
a passed in custom load fun, or the `load_pretrained` model member fn.
|
100 |
+
|
101 |
+
If the object is already present in `model_dir`, it's deserialized and returned.
|
102 |
+
The default value of `model_dir` is ``<hub_dir>/checkpoints`` where
|
103 |
+
`hub_dir` is the directory returned by :func:`~torch.hub.get_dir`.
|
104 |
+
|
105 |
+
Args:
|
106 |
+
model: The instantiated model to load weights into
|
107 |
+
pretrained_cfg: Default pretrained model cfg
|
108 |
+
load_fn: An external standalone fn that loads weights into provided model, otherwise a fn named
|
109 |
+
'load_pretrained' on the model will be called if it exists
|
110 |
+
cache_dir: Override model checkpoint cache dir for this load
|
111 |
+
"""
|
112 |
+
pretrained_cfg = pretrained_cfg or getattr(model, 'pretrained_cfg', None)
|
113 |
+
if not pretrained_cfg:
|
114 |
+
_logger.warning("Invalid pretrained config, cannot load weights.")
|
115 |
+
return
|
116 |
+
|
117 |
+
load_from, pretrained_loc = _resolve_pretrained_source(pretrained_cfg)
|
118 |
+
if not load_from:
|
119 |
+
_logger.warning("No pretrained weights exist for this model. Using random initialization.")
|
120 |
+
return
|
121 |
+
if load_from == 'hf-hub':
|
122 |
+
_logger.warning("Hugging Face hub not currently supported for custom load pretrained models.")
|
123 |
+
elif load_from == 'url':
|
124 |
+
pretrained_loc = download_cached_file(
|
125 |
+
pretrained_loc,
|
126 |
+
check_hash=_CHECK_HASH,
|
127 |
+
progress=_DOWNLOAD_PROGRESS,
|
128 |
+
cache_dir=cache_dir,
|
129 |
+
)
|
130 |
+
|
131 |
+
if load_fn is not None:
|
132 |
+
load_fn(model, pretrained_loc)
|
133 |
+
elif hasattr(model, 'load_pretrained'):
|
134 |
+
model.load_pretrained(pretrained_loc)
|
135 |
+
else:
|
136 |
+
_logger.warning("Valid function to load pretrained weights is not available, using random initialization.")
|
137 |
+
|
138 |
+
|
139 |
+
def load_pretrained(
|
140 |
+
model: nn.Module,
|
141 |
+
pretrained_cfg: Optional[Dict] = None,
|
142 |
+
num_classes: int = 1000,
|
143 |
+
in_chans: int = 3,
|
144 |
+
filter_fn: Optional[Callable] = None,
|
145 |
+
strict: bool = True,
|
146 |
+
cache_dir: Optional[Union[str, Path]] = None,
|
147 |
+
):
|
148 |
+
""" Load pretrained checkpoint
|
149 |
+
|
150 |
+
Args:
|
151 |
+
model: PyTorch module
|
152 |
+
pretrained_cfg: Configuration for pretrained weights / target dataset
|
153 |
+
num_classes: Number of classes for target model. Will adapt pretrained if different.
|
154 |
+
in_chans: Number of input chans for target model. Will adapt pretrained if different.
|
155 |
+
filter_fn: state_dict filter fn for load (takes state_dict, model as args)
|
156 |
+
strict: Strict load of checkpoint
|
157 |
+
cache_dir: Override model checkpoint cache dir for this load
|
158 |
+
"""
|
159 |
+
pretrained_cfg = pretrained_cfg or getattr(model, 'pretrained_cfg', None)
|
160 |
+
if not pretrained_cfg:
|
161 |
+
raise RuntimeError("Invalid pretrained config, cannot load weights. Use `pretrained=False` for random init.")
|
162 |
+
|
163 |
+
load_from, pretrained_loc = _resolve_pretrained_source(pretrained_cfg)
|
164 |
+
if load_from == 'state_dict':
|
165 |
+
_logger.info(f'Loading pretrained weights from state dict')
|
166 |
+
state_dict = pretrained_loc # pretrained_loc is the actual state dict for this override
|
167 |
+
elif load_from == 'file':
|
168 |
+
_logger.info(f'Loading pretrained weights from file ({pretrained_loc})')
|
169 |
+
if pretrained_cfg.get('custom_load', False):
|
170 |
+
model.load_pretrained(pretrained_loc)
|
171 |
+
return
|
172 |
+
else:
|
173 |
+
state_dict = load_state_dict(pretrained_loc)
|
174 |
+
elif load_from == 'url':
|
175 |
+
_logger.info(f'Loading pretrained weights from url ({pretrained_loc})')
|
176 |
+
if pretrained_cfg.get('custom_load', False):
|
177 |
+
pretrained_loc = download_cached_file(
|
178 |
+
pretrained_loc,
|
179 |
+
progress=_DOWNLOAD_PROGRESS,
|
180 |
+
check_hash=_CHECK_HASH,
|
181 |
+
cache_dir=cache_dir,
|
182 |
+
)
|
183 |
+
model.load_pretrained(pretrained_loc)
|
184 |
+
return
|
185 |
+
else:
|
186 |
+
try:
|
187 |
+
state_dict = load_state_dict_from_url(
|
188 |
+
pretrained_loc,
|
189 |
+
map_location='cpu',
|
190 |
+
progress=_DOWNLOAD_PROGRESS,
|
191 |
+
check_hash=_CHECK_HASH,
|
192 |
+
weights_only=True,
|
193 |
+
model_dir=cache_dir,
|
194 |
+
)
|
195 |
+
except TypeError:
|
196 |
+
state_dict = load_state_dict_from_url(
|
197 |
+
pretrained_loc,
|
198 |
+
map_location='cpu',
|
199 |
+
progress=_DOWNLOAD_PROGRESS,
|
200 |
+
check_hash=_CHECK_HASH,
|
201 |
+
model_dir=cache_dir,
|
202 |
+
)
|
203 |
+
elif load_from == 'hf-hub':
|
204 |
+
_logger.info(f'Loading pretrained weights from Hugging Face hub ({pretrained_loc})')
|
205 |
+
if isinstance(pretrained_loc, (list, tuple)):
|
206 |
+
custom_load = pretrained_cfg.get('custom_load', False)
|
207 |
+
if isinstance(custom_load, str) and custom_load == 'hf':
|
208 |
+
load_custom_from_hf(*pretrained_loc, model, cache_dir=cache_dir)
|
209 |
+
return
|
210 |
+
else:
|
211 |
+
state_dict = load_state_dict_from_hf(*pretrained_loc, cache_dir=cache_dir)
|
212 |
+
else:
|
213 |
+
state_dict = load_state_dict_from_hf(pretrained_loc, weights_only=True, cache_dir=cache_dir)
|
214 |
+
else:
|
215 |
+
model_name = pretrained_cfg.get('architecture', 'this model')
|
216 |
+
raise RuntimeError(f"No pretrained weights exist for {model_name}. Use `pretrained=False` for random init.")
|
217 |
+
|
218 |
+
if filter_fn is not None:
|
219 |
+
try:
|
220 |
+
state_dict = filter_fn(state_dict, model)
|
221 |
+
except TypeError as e:
|
222 |
+
# for backwards compat with filter fn that take one arg
|
223 |
+
state_dict = filter_fn(state_dict)
|
224 |
+
|
225 |
+
input_convs = pretrained_cfg.get('first_conv', None)
|
226 |
+
if input_convs is not None and in_chans != 3:
|
227 |
+
if isinstance(input_convs, str):
|
228 |
+
input_convs = (input_convs,)
|
229 |
+
for input_conv_name in input_convs:
|
230 |
+
weight_name = input_conv_name + '.weight'
|
231 |
+
try:
|
232 |
+
state_dict[weight_name] = adapt_input_conv(in_chans, state_dict[weight_name])
|
233 |
+
_logger.info(
|
234 |
+
f'Converted input conv {input_conv_name} pretrained weights from 3 to {in_chans} channel(s)')
|
235 |
+
except NotImplementedError as e:
|
236 |
+
del state_dict[weight_name]
|
237 |
+
strict = False
|
238 |
+
_logger.warning(
|
239 |
+
f'Unable to convert pretrained {input_conv_name} weights, using random init for this layer.')
|
240 |
+
|
241 |
+
classifiers = pretrained_cfg.get('classifier', None)
|
242 |
+
label_offset = pretrained_cfg.get('label_offset', 0)
|
243 |
+
if classifiers is not None:
|
244 |
+
if isinstance(classifiers, str):
|
245 |
+
classifiers = (classifiers,)
|
246 |
+
if num_classes != pretrained_cfg['num_classes']:
|
247 |
+
for classifier_name in classifiers:
|
248 |
+
# completely discard fully connected if model num_classes doesn't match pretrained weights
|
249 |
+
state_dict.pop(classifier_name + '.weight', None)
|
250 |
+
state_dict.pop(classifier_name + '.bias', None)
|
251 |
+
strict = False
|
252 |
+
elif label_offset > 0:
|
253 |
+
for classifier_name in classifiers:
|
254 |
+
# special case for pretrained weights with an extra background class in pretrained weights
|
255 |
+
classifier_weight = state_dict[classifier_name + '.weight']
|
256 |
+
state_dict[classifier_name + '.weight'] = classifier_weight[label_offset:]
|
257 |
+
classifier_bias = state_dict[classifier_name + '.bias']
|
258 |
+
state_dict[classifier_name + '.bias'] = classifier_bias[label_offset:]
|
259 |
+
|
260 |
+
load_result = model.load_state_dict(state_dict, strict=strict)
|
261 |
+
if load_result.missing_keys:
|
262 |
+
_logger.info(
|
263 |
+
f'Missing keys ({", ".join(load_result.missing_keys)}) discovered while loading pretrained weights.'
|
264 |
+
f' This is expected if model is being adapted.')
|
265 |
+
if load_result.unexpected_keys:
|
266 |
+
_logger.warning(
|
267 |
+
f'Unexpected keys ({", ".join(load_result.unexpected_keys)}) found while loading pretrained weights.'
|
268 |
+
f' This may be expected if model is being adapted.')
|
269 |
+
|
270 |
+
|
271 |
+
def pretrained_cfg_for_features(pretrained_cfg):
|
272 |
+
pretrained_cfg = deepcopy(pretrained_cfg)
|
273 |
+
# remove default pretrained cfg fields that don't have much relevance for feature backbone
|
274 |
+
to_remove = ('num_classes', 'classifier', 'global_pool') # add default final pool size?
|
275 |
+
for tr in to_remove:
|
276 |
+
pretrained_cfg.pop(tr, None)
|
277 |
+
return pretrained_cfg
|
278 |
+
|
279 |
+
|
280 |
+
def _filter_kwargs(kwargs, names):
|
281 |
+
if not kwargs or not names:
|
282 |
+
return
|
283 |
+
for n in names:
|
284 |
+
kwargs.pop(n, None)
|
285 |
+
|
286 |
+
|
287 |
+
def _update_default_model_kwargs(pretrained_cfg, kwargs, kwargs_filter):
|
288 |
+
""" Update the default_cfg and kwargs before passing to model
|
289 |
+
|
290 |
+
Args:
|
291 |
+
pretrained_cfg: input pretrained cfg (updated in-place)
|
292 |
+
kwargs: keyword args passed to model build fn (updated in-place)
|
293 |
+
kwargs_filter: keyword arg keys that must be removed before model __init__
|
294 |
+
"""
|
295 |
+
# Set model __init__ args that can be determined by default_cfg (if not already passed as kwargs)
|
296 |
+
default_kwarg_names = ('num_classes', 'global_pool', 'in_chans')
|
297 |
+
if pretrained_cfg.get('fixed_input_size', False):
|
298 |
+
# if fixed_input_size exists and is True, model takes an img_size arg that fixes its input size
|
299 |
+
default_kwarg_names += ('img_size',)
|
300 |
+
|
301 |
+
for n in default_kwarg_names:
|
302 |
+
# for legacy reasons, model __init__args uses img_size + in_chans as separate args while
|
303 |
+
# pretrained_cfg has one input_size=(C, H ,W) entry
|
304 |
+
if n == 'img_size':
|
305 |
+
input_size = pretrained_cfg.get('input_size', None)
|
306 |
+
if input_size is not None:
|
307 |
+
assert len(input_size) == 3
|
308 |
+
kwargs.setdefault(n, input_size[-2:])
|
309 |
+
elif n == 'in_chans':
|
310 |
+
input_size = pretrained_cfg.get('input_size', None)
|
311 |
+
if input_size is not None:
|
312 |
+
assert len(input_size) == 3
|
313 |
+
kwargs.setdefault(n, input_size[0])
|
314 |
+
elif n == 'num_classes':
|
315 |
+
default_val = pretrained_cfg.get(n, None)
|
316 |
+
# if default is < 0, don't pass through to model
|
317 |
+
if default_val is not None and default_val >= 0:
|
318 |
+
kwargs.setdefault(n, pretrained_cfg[n])
|
319 |
+
else:
|
320 |
+
default_val = pretrained_cfg.get(n, None)
|
321 |
+
if default_val is not None:
|
322 |
+
kwargs.setdefault(n, pretrained_cfg[n])
|
323 |
+
|
324 |
+
# Filter keyword args for task specific model variants (some 'features only' models, etc.)
|
325 |
+
_filter_kwargs(kwargs, names=kwargs_filter)
|
326 |
+
|
327 |
+
|
328 |
+
def resolve_pretrained_cfg(
|
329 |
+
variant: str,
|
330 |
+
pretrained_cfg: Optional[Union[str, Dict[str, Any]]] = None,
|
331 |
+
pretrained_cfg_overlay: Optional[Dict[str, Any]] = None,
|
332 |
+
) -> PretrainedCfg:
|
333 |
+
model_with_tag = variant
|
334 |
+
pretrained_tag = None
|
335 |
+
if pretrained_cfg:
|
336 |
+
if isinstance(pretrained_cfg, dict):
|
337 |
+
# pretrained_cfg dict passed as arg, validate by converting to PretrainedCfg
|
338 |
+
pretrained_cfg = PretrainedCfg(**pretrained_cfg)
|
339 |
+
elif isinstance(pretrained_cfg, str):
|
340 |
+
pretrained_tag = pretrained_cfg
|
341 |
+
pretrained_cfg = None
|
342 |
+
|
343 |
+
# fallback to looking up pretrained cfg in model registry by variant identifier
|
344 |
+
if not pretrained_cfg:
|
345 |
+
if pretrained_tag:
|
346 |
+
model_with_tag = '.'.join([variant, pretrained_tag])
|
347 |
+
pretrained_cfg = get_pretrained_cfg(model_with_tag)
|
348 |
+
|
349 |
+
if not pretrained_cfg:
|
350 |
+
_logger.warning(
|
351 |
+
f"No pretrained configuration specified for {model_with_tag} model. Using a default."
|
352 |
+
f" Please add a config to the model pretrained_cfg registry or pass explicitly.")
|
353 |
+
pretrained_cfg = PretrainedCfg() # instance with defaults
|
354 |
+
|
355 |
+
pretrained_cfg_overlay = pretrained_cfg_overlay or {}
|
356 |
+
if not pretrained_cfg.architecture:
|
357 |
+
pretrained_cfg_overlay.setdefault('architecture', variant)
|
358 |
+
pretrained_cfg = dataclasses.replace(pretrained_cfg, **pretrained_cfg_overlay)
|
359 |
+
|
360 |
+
return pretrained_cfg
|
361 |
+
|
362 |
+
|
363 |
+
def build_model_with_cfg(
|
364 |
+
model_cls: Callable,
|
365 |
+
variant: str,
|
366 |
+
pretrained: bool,
|
367 |
+
pretrained_cfg: Optional[Dict] = None,
|
368 |
+
pretrained_cfg_overlay: Optional[Dict] = None,
|
369 |
+
model_cfg: Optional[Any] = None,
|
370 |
+
feature_cfg: Optional[Dict] = None,
|
371 |
+
pretrained_strict: bool = True,
|
372 |
+
pretrained_filter_fn: Optional[Callable] = None,
|
373 |
+
cache_dir: Optional[Union[str, Path]] = None,
|
374 |
+
kwargs_filter: Optional[Tuple[str]] = None,
|
375 |
+
**kwargs,
|
376 |
+
):
|
377 |
+
""" Build model with specified default_cfg and optional model_cfg
|
378 |
+
|
379 |
+
This helper fn aids in the construction of a model including:
|
380 |
+
* handling default_cfg and associated pretrained weight loading
|
381 |
+
* passing through optional model_cfg for models with config based arch spec
|
382 |
+
* features_only model adaptation
|
383 |
+
* pruning config / model adaptation
|
384 |
+
|
385 |
+
Args:
|
386 |
+
model_cls: Model class
|
387 |
+
variant: Model variant name
|
388 |
+
pretrained: Load the pretrained weights
|
389 |
+
pretrained_cfg: Model's pretrained weight/task config
|
390 |
+
pretrained_cfg_overlay: Entries that will override those in pretrained_cfg
|
391 |
+
model_cfg: Model's architecture config
|
392 |
+
feature_cfg: Feature extraction adapter config
|
393 |
+
pretrained_strict: Load pretrained weights strictly
|
394 |
+
pretrained_filter_fn: Filter callable for pretrained weights
|
395 |
+
cache_dir: Override model cache dir for Hugging Face Hub and Torch checkpoints
|
396 |
+
kwargs_filter: Kwargs keys to filter (remove) before passing to model
|
397 |
+
**kwargs: Model args passed through to model __init__
|
398 |
+
"""
|
399 |
+
pruned = kwargs.pop('pruned', False)
|
400 |
+
features = False
|
401 |
+
feature_cfg = feature_cfg or {}
|
402 |
+
|
403 |
+
# resolve and update model pretrained config and model kwargs
|
404 |
+
pretrained_cfg = resolve_pretrained_cfg(
|
405 |
+
variant,
|
406 |
+
pretrained_cfg=pretrained_cfg,
|
407 |
+
pretrained_cfg_overlay=pretrained_cfg_overlay
|
408 |
+
)
|
409 |
+
pretrained_cfg = pretrained_cfg.to_dict()
|
410 |
+
|
411 |
+
_update_default_model_kwargs(pretrained_cfg, kwargs, kwargs_filter)
|
412 |
+
|
413 |
+
# Setup for feature extraction wrapper done at end of this fn
|
414 |
+
if kwargs.pop('features_only', False):
|
415 |
+
features = True
|
416 |
+
feature_cfg.setdefault('out_indices', (0, 1, 2, 3, 4))
|
417 |
+
if 'out_indices' in kwargs:
|
418 |
+
feature_cfg['out_indices'] = kwargs.pop('out_indices')
|
419 |
+
if 'feature_cls' in kwargs:
|
420 |
+
feature_cfg['feature_cls'] = kwargs.pop('feature_cls')
|
421 |
+
|
422 |
+
# Instantiate the model
|
423 |
+
if model_cfg is None:
|
424 |
+
model = model_cls(**kwargs)
|
425 |
+
else:
|
426 |
+
model = model_cls(cfg=model_cfg, **kwargs)
|
427 |
+
model.pretrained_cfg = pretrained_cfg
|
428 |
+
model.default_cfg = model.pretrained_cfg # alias for backwards compat
|
429 |
+
|
430 |
+
if pruned:
|
431 |
+
model = adapt_model_from_file(model, variant)
|
432 |
+
|
433 |
+
# For classification models, check class attr, then kwargs, then default to 1k, otherwise 0 for feats
|
434 |
+
num_classes_pretrained = 0 if features else getattr(model, 'num_classes', kwargs.get('num_classes', 1000))
|
435 |
+
if pretrained:
|
436 |
+
load_pretrained(
|
437 |
+
model,
|
438 |
+
pretrained_cfg=pretrained_cfg,
|
439 |
+
num_classes=num_classes_pretrained,
|
440 |
+
in_chans=kwargs.get('in_chans', 3),
|
441 |
+
filter_fn=pretrained_filter_fn,
|
442 |
+
strict=pretrained_strict,
|
443 |
+
cache_dir=cache_dir,
|
444 |
+
)
|
445 |
+
|
446 |
+
# Wrap the model in a feature extraction module if enabled
|
447 |
+
if features:
|
448 |
+
use_getter = False
|
449 |
+
if 'feature_cls' in feature_cfg:
|
450 |
+
feature_cls = feature_cfg.pop('feature_cls')
|
451 |
+
if isinstance(feature_cls, str):
|
452 |
+
feature_cls = feature_cls.lower()
|
453 |
+
|
454 |
+
# flatten_sequential only valid for some feature extractors
|
455 |
+
if feature_cls not in ('dict', 'list', 'hook'):
|
456 |
+
feature_cfg.pop('flatten_sequential', None)
|
457 |
+
|
458 |
+
if 'hook' in feature_cls:
|
459 |
+
feature_cls = FeatureHookNet
|
460 |
+
elif feature_cls == 'list':
|
461 |
+
feature_cls = FeatureListNet
|
462 |
+
elif feature_cls == 'dict':
|
463 |
+
feature_cls = FeatureDictNet
|
464 |
+
elif feature_cls == 'fx':
|
465 |
+
feature_cls = FeatureGraphNet
|
466 |
+
elif feature_cls == 'getter':
|
467 |
+
use_getter = True
|
468 |
+
feature_cls = FeatureGetterNet
|
469 |
+
else:
|
470 |
+
assert False, f'Unknown feature class {feature_cls}'
|
471 |
+
else:
|
472 |
+
feature_cls = FeatureListNet
|
473 |
+
|
474 |
+
output_fmt = getattr(model, 'output_fmt', None)
|
475 |
+
if output_fmt is not None and not use_getter: # don't set default for intermediate feat getter
|
476 |
+
feature_cfg.setdefault('output_fmt', output_fmt)
|
477 |
+
|
478 |
+
model = feature_cls(model, **feature_cfg)
|
479 |
+
model.pretrained_cfg = pretrained_cfg_for_features(pretrained_cfg) # add back pretrained cfg
|
480 |
+
model.default_cfg = model.pretrained_cfg # alias for rename backwards compat (default_cfg -> pretrained_cfg)
|
481 |
+
|
482 |
+
return model
|
pytorch-image-models/timm/models/_efficientnet_blocks.py
ADDED
@@ -0,0 +1,702 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
""" EfficientNet, MobileNetV3, etc Blocks
|
2 |
+
|
3 |
+
Hacked together by / Copyright 2019, Ross Wightman
|
4 |
+
"""
|
5 |
+
from typing import Callable, Dict, Optional, Type
|
6 |
+
|
7 |
+
import torch
|
8 |
+
import torch.nn as nn
|
9 |
+
from torch.nn import functional as F
|
10 |
+
|
11 |
+
from timm.layers import create_conv2d, DropPath, make_divisible, create_act_layer, create_aa, to_2tuple, LayerType,\
|
12 |
+
ConvNormAct, get_norm_act_layer, MultiQueryAttention2d, Attention2d
|
13 |
+
|
14 |
+
__all__ = [
|
15 |
+
'SqueezeExcite', 'ConvBnAct', 'DepthwiseSeparableConv', 'InvertedResidual', 'CondConvResidual', 'EdgeResidual',
|
16 |
+
'UniversalInvertedResidual', 'MobileAttention'
|
17 |
+
]
|
18 |
+
|
19 |
+
ModuleType = Type[nn.Module]
|
20 |
+
|
21 |
+
|
22 |
+
def num_groups(group_size: Optional[int], channels: int):
|
23 |
+
if not group_size: # 0 or None
|
24 |
+
return 1 # normal conv with 1 group
|
25 |
+
else:
|
26 |
+
# NOTE group_size == 1 -> depthwise conv
|
27 |
+
assert channels % group_size == 0
|
28 |
+
return channels // group_size
|
29 |
+
|
30 |
+
|
31 |
+
class SqueezeExcite(nn.Module):
|
32 |
+
""" Squeeze-and-Excitation w/ specific features for EfficientNet/MobileNet family
|
33 |
+
|
34 |
+
Args:
|
35 |
+
in_chs (int): input channels to layer
|
36 |
+
rd_ratio (float): ratio of squeeze reduction
|
37 |
+
act_layer (nn.Module): activation layer of containing block
|
38 |
+
gate_layer (Callable): attention gate function
|
39 |
+
force_act_layer (nn.Module): override block's activation fn if this is set/bound
|
40 |
+
rd_round_fn (Callable): specify a fn to calculate rounding of reduced chs
|
41 |
+
"""
|
42 |
+
|
43 |
+
def __init__(
|
44 |
+
self,
|
45 |
+
in_chs: int,
|
46 |
+
rd_ratio: float = 0.25,
|
47 |
+
rd_channels: Optional[int] = None,
|
48 |
+
act_layer: LayerType = nn.ReLU,
|
49 |
+
gate_layer: LayerType = nn.Sigmoid,
|
50 |
+
force_act_layer: Optional[LayerType] = None,
|
51 |
+
rd_round_fn: Optional[Callable] = None,
|
52 |
+
):
|
53 |
+
super(SqueezeExcite, self).__init__()
|
54 |
+
if rd_channels is None:
|
55 |
+
rd_round_fn = rd_round_fn or round
|
56 |
+
rd_channels = rd_round_fn(in_chs * rd_ratio)
|
57 |
+
act_layer = force_act_layer or act_layer
|
58 |
+
self.conv_reduce = nn.Conv2d(in_chs, rd_channels, 1, bias=True)
|
59 |
+
self.act1 = create_act_layer(act_layer, inplace=True)
|
60 |
+
self.conv_expand = nn.Conv2d(rd_channels, in_chs, 1, bias=True)
|
61 |
+
self.gate = create_act_layer(gate_layer)
|
62 |
+
|
63 |
+
def forward(self, x):
|
64 |
+
x_se = x.mean((2, 3), keepdim=True)
|
65 |
+
x_se = self.conv_reduce(x_se)
|
66 |
+
x_se = self.act1(x_se)
|
67 |
+
x_se = self.conv_expand(x_se)
|
68 |
+
return x * self.gate(x_se)
|
69 |
+
|
70 |
+
|
71 |
+
class ConvBnAct(nn.Module):
|
72 |
+
""" Conv + Norm Layer + Activation w/ optional skip connection
|
73 |
+
"""
|
74 |
+
def __init__(
|
75 |
+
self,
|
76 |
+
in_chs: int,
|
77 |
+
out_chs: int,
|
78 |
+
kernel_size: int,
|
79 |
+
stride: int = 1,
|
80 |
+
dilation: int = 1,
|
81 |
+
group_size: int = 0,
|
82 |
+
pad_type: str = '',
|
83 |
+
skip: bool = False,
|
84 |
+
act_layer: LayerType = nn.ReLU,
|
85 |
+
norm_layer: LayerType = nn.BatchNorm2d,
|
86 |
+
aa_layer: Optional[LayerType] = None,
|
87 |
+
drop_path_rate: float = 0.,
|
88 |
+
):
|
89 |
+
super(ConvBnAct, self).__init__()
|
90 |
+
norm_act_layer = get_norm_act_layer(norm_layer, act_layer)
|
91 |
+
groups = num_groups(group_size, in_chs)
|
92 |
+
self.has_skip = skip and stride == 1 and in_chs == out_chs
|
93 |
+
use_aa = aa_layer is not None and stride > 1 # FIXME handle dilation
|
94 |
+
|
95 |
+
self.conv = create_conv2d(
|
96 |
+
in_chs, out_chs, kernel_size,
|
97 |
+
stride=1 if use_aa else stride,
|
98 |
+
dilation=dilation, groups=groups, padding=pad_type)
|
99 |
+
self.bn1 = norm_act_layer(out_chs, inplace=True)
|
100 |
+
self.aa = create_aa(aa_layer, channels=out_chs, stride=stride, enable=use_aa)
|
101 |
+
self.drop_path = DropPath(drop_path_rate) if drop_path_rate else nn.Identity()
|
102 |
+
|
103 |
+
def feature_info(self, location):
|
104 |
+
if location == 'expansion': # output of conv after act, same as block coutput
|
105 |
+
return dict(module='bn1', hook_type='forward', num_chs=self.conv.out_channels)
|
106 |
+
else: # location == 'bottleneck', block output
|
107 |
+
return dict(module='', num_chs=self.conv.out_channels)
|
108 |
+
|
109 |
+
def forward(self, x):
|
110 |
+
shortcut = x
|
111 |
+
x = self.conv(x)
|
112 |
+
x = self.bn1(x)
|
113 |
+
x = self.aa(x)
|
114 |
+
if self.has_skip:
|
115 |
+
x = self.drop_path(x) + shortcut
|
116 |
+
return x
|
117 |
+
|
118 |
+
|
119 |
+
class DepthwiseSeparableConv(nn.Module):
|
120 |
+
""" Depthwise-separable block
|
121 |
+
Used for DS convs in MobileNet-V1 and in the place of IR blocks that have no expansion
|
122 |
+
(factor of 1.0). This is an alternative to having a IR with an optional first pw conv.
|
123 |
+
"""
|
124 |
+
def __init__(
|
125 |
+
self,
|
126 |
+
in_chs: int,
|
127 |
+
out_chs: int,
|
128 |
+
dw_kernel_size: int = 3,
|
129 |
+
stride: int = 1,
|
130 |
+
dilation: int = 1,
|
131 |
+
group_size: int = 1,
|
132 |
+
pad_type: str = '',
|
133 |
+
noskip: bool = False,
|
134 |
+
pw_kernel_size: int = 1,
|
135 |
+
pw_act: bool = False,
|
136 |
+
s2d: int = 0,
|
137 |
+
act_layer: LayerType = nn.ReLU,
|
138 |
+
norm_layer: LayerType = nn.BatchNorm2d,
|
139 |
+
aa_layer: Optional[LayerType] = None,
|
140 |
+
se_layer: Optional[ModuleType] = None,
|
141 |
+
drop_path_rate: float = 0.,
|
142 |
+
):
|
143 |
+
super(DepthwiseSeparableConv, self).__init__()
|
144 |
+
norm_act_layer = get_norm_act_layer(norm_layer, act_layer)
|
145 |
+
self.has_skip = (stride == 1 and in_chs == out_chs) and not noskip
|
146 |
+
self.has_pw_act = pw_act # activation after point-wise conv
|
147 |
+
use_aa = aa_layer is not None and stride > 1 # FIXME handle dilation
|
148 |
+
|
149 |
+
# Space to depth
|
150 |
+
if s2d == 1:
|
151 |
+
sd_chs = int(in_chs * 4)
|
152 |
+
self.conv_s2d = create_conv2d(in_chs, sd_chs, kernel_size=2, stride=2, padding='same')
|
153 |
+
self.bn_s2d = norm_act_layer(sd_chs, sd_chs)
|
154 |
+
dw_kernel_size = (dw_kernel_size + 1) // 2
|
155 |
+
dw_pad_type = 'same' if dw_kernel_size == 2 else pad_type
|
156 |
+
in_chs = sd_chs
|
157 |
+
use_aa = False # disable AA
|
158 |
+
else:
|
159 |
+
self.conv_s2d = None
|
160 |
+
self.bn_s2d = None
|
161 |
+
dw_pad_type = pad_type
|
162 |
+
|
163 |
+
groups = num_groups(group_size, in_chs)
|
164 |
+
|
165 |
+
self.conv_dw = create_conv2d(
|
166 |
+
in_chs, in_chs, dw_kernel_size,
|
167 |
+
stride=1 if use_aa else stride,
|
168 |
+
dilation=dilation, padding=dw_pad_type, groups=groups)
|
169 |
+
self.bn1 = norm_act_layer(in_chs, inplace=True)
|
170 |
+
self.aa = create_aa(aa_layer, channels=out_chs, stride=stride, enable=use_aa)
|
171 |
+
|
172 |
+
# Squeeze-and-excitation
|
173 |
+
self.se = se_layer(in_chs, act_layer=act_layer) if se_layer else nn.Identity()
|
174 |
+
|
175 |
+
self.conv_pw = create_conv2d(in_chs, out_chs, pw_kernel_size, padding=pad_type)
|
176 |
+
self.bn2 = norm_act_layer(out_chs, inplace=True, apply_act=self.has_pw_act)
|
177 |
+
self.drop_path = DropPath(drop_path_rate) if drop_path_rate else nn.Identity()
|
178 |
+
|
179 |
+
def feature_info(self, location):
|
180 |
+
if location == 'expansion': # after SE, input to PW
|
181 |
+
return dict(module='conv_pw', hook_type='forward_pre', num_chs=self.conv_pw.in_channels)
|
182 |
+
else: # location == 'bottleneck', block output
|
183 |
+
return dict(module='', num_chs=self.conv_pw.out_channels)
|
184 |
+
|
185 |
+
def forward(self, x):
|
186 |
+
shortcut = x
|
187 |
+
if self.conv_s2d is not None:
|
188 |
+
x = self.conv_s2d(x)
|
189 |
+
x = self.bn_s2d(x)
|
190 |
+
x = self.conv_dw(x)
|
191 |
+
x = self.bn1(x)
|
192 |
+
x = self.aa(x)
|
193 |
+
x = self.se(x)
|
194 |
+
x = self.conv_pw(x)
|
195 |
+
x = self.bn2(x)
|
196 |
+
if self.has_skip:
|
197 |
+
x = self.drop_path(x) + shortcut
|
198 |
+
return x
|
199 |
+
|
200 |
+
|
201 |
+
class InvertedResidual(nn.Module):
|
202 |
+
""" Inverted residual block w/ optional SE
|
203 |
+
|
204 |
+
Originally used in MobileNet-V2 - https://arxiv.org/abs/1801.04381v4, this layer is often
|
205 |
+
referred to as 'MBConv' for (Mobile inverted bottleneck conv) and is also used in
|
206 |
+
* MNasNet - https://arxiv.org/abs/1807.11626
|
207 |
+
* EfficientNet - https://arxiv.org/abs/1905.11946
|
208 |
+
* MobileNet-V3 - https://arxiv.org/abs/1905.02244
|
209 |
+
"""
|
210 |
+
|
211 |
+
def __init__(
|
212 |
+
self,
|
213 |
+
in_chs: int,
|
214 |
+
out_chs: int,
|
215 |
+
dw_kernel_size: int = 3,
|
216 |
+
stride: int = 1,
|
217 |
+
dilation: int = 1,
|
218 |
+
group_size: int = 1,
|
219 |
+
pad_type: str = '',
|
220 |
+
noskip: bool = False,
|
221 |
+
exp_ratio: float = 1.0,
|
222 |
+
exp_kernel_size: int = 1,
|
223 |
+
pw_kernel_size: int = 1,
|
224 |
+
s2d: int = 0,
|
225 |
+
act_layer: LayerType = nn.ReLU,
|
226 |
+
norm_layer: LayerType = nn.BatchNorm2d,
|
227 |
+
aa_layer: Optional[LayerType] = None,
|
228 |
+
se_layer: Optional[ModuleType] = None,
|
229 |
+
conv_kwargs: Optional[Dict] = None,
|
230 |
+
drop_path_rate: float = 0.,
|
231 |
+
):
|
232 |
+
super(InvertedResidual, self).__init__()
|
233 |
+
norm_act_layer = get_norm_act_layer(norm_layer, act_layer)
|
234 |
+
conv_kwargs = conv_kwargs or {}
|
235 |
+
self.has_skip = (in_chs == out_chs and stride == 1) and not noskip
|
236 |
+
use_aa = aa_layer is not None and stride > 1 # FIXME handle dilation
|
237 |
+
|
238 |
+
# Space to depth
|
239 |
+
if s2d == 1:
|
240 |
+
sd_chs = int(in_chs * 4)
|
241 |
+
self.conv_s2d = create_conv2d(in_chs, sd_chs, kernel_size=2, stride=2, padding='same')
|
242 |
+
self.bn_s2d = norm_act_layer(sd_chs, sd_chs)
|
243 |
+
dw_kernel_size = (dw_kernel_size + 1) // 2
|
244 |
+
dw_pad_type = 'same' if dw_kernel_size == 2 else pad_type
|
245 |
+
in_chs = sd_chs
|
246 |
+
use_aa = False # disable AA
|
247 |
+
else:
|
248 |
+
self.conv_s2d = None
|
249 |
+
self.bn_s2d = None
|
250 |
+
dw_pad_type = pad_type
|
251 |
+
|
252 |
+
mid_chs = make_divisible(in_chs * exp_ratio)
|
253 |
+
groups = num_groups(group_size, mid_chs)
|
254 |
+
|
255 |
+
# Point-wise expansion
|
256 |
+
self.conv_pw = create_conv2d(in_chs, mid_chs, exp_kernel_size, padding=pad_type, **conv_kwargs)
|
257 |
+
self.bn1 = norm_act_layer(mid_chs, inplace=True)
|
258 |
+
|
259 |
+
# Depth-wise convolution
|
260 |
+
self.conv_dw = create_conv2d(
|
261 |
+
mid_chs, mid_chs, dw_kernel_size,
|
262 |
+
stride=1 if use_aa else stride,
|
263 |
+
dilation=dilation, groups=groups, padding=dw_pad_type, **conv_kwargs)
|
264 |
+
self.bn2 = norm_act_layer(mid_chs, inplace=True)
|
265 |
+
self.aa = create_aa(aa_layer, channels=mid_chs, stride=stride, enable=use_aa)
|
266 |
+
|
267 |
+
# Squeeze-and-excitation
|
268 |
+
self.se = se_layer(mid_chs, act_layer=act_layer) if se_layer else nn.Identity()
|
269 |
+
|
270 |
+
# Point-wise linear projection
|
271 |
+
self.conv_pwl = create_conv2d(mid_chs, out_chs, pw_kernel_size, padding=pad_type, **conv_kwargs)
|
272 |
+
self.bn3 = norm_act_layer(out_chs, apply_act=False)
|
273 |
+
self.drop_path = DropPath(drop_path_rate) if drop_path_rate else nn.Identity()
|
274 |
+
|
275 |
+
def feature_info(self, location):
|
276 |
+
if location == 'expansion': # after SE, input to PWL
|
277 |
+
return dict(module='conv_pwl', hook_type='forward_pre', num_chs=self.conv_pwl.in_channels)
|
278 |
+
else: # location == 'bottleneck', block output
|
279 |
+
return dict(module='', num_chs=self.conv_pwl.out_channels)
|
280 |
+
|
281 |
+
def forward(self, x):
|
282 |
+
shortcut = x
|
283 |
+
if self.conv_s2d is not None:
|
284 |
+
x = self.conv_s2d(x)
|
285 |
+
x = self.bn_s2d(x)
|
286 |
+
x = self.conv_pw(x)
|
287 |
+
x = self.bn1(x)
|
288 |
+
x = self.conv_dw(x)
|
289 |
+
x = self.bn2(x)
|
290 |
+
x = self.aa(x)
|
291 |
+
x = self.se(x)
|
292 |
+
x = self.conv_pwl(x)
|
293 |
+
x = self.bn3(x)
|
294 |
+
if self.has_skip:
|
295 |
+
x = self.drop_path(x) + shortcut
|
296 |
+
return x
|
297 |
+
|
298 |
+
|
299 |
+
class LayerScale2d(nn.Module):
|
300 |
+
def __init__(self, dim: int, init_values: float = 1e-5, inplace: bool = False):
|
301 |
+
super().__init__()
|
302 |
+
self.inplace = inplace
|
303 |
+
self.gamma = nn.Parameter(init_values * torch.ones(dim))
|
304 |
+
|
305 |
+
def forward(self, x):
|
306 |
+
gamma = self.gamma.view(1, -1, 1, 1)
|
307 |
+
return x.mul_(gamma) if self.inplace else x * gamma
|
308 |
+
|
309 |
+
|
310 |
+
class UniversalInvertedResidual(nn.Module):
|
311 |
+
""" Universal Inverted Residual Block (aka Universal Inverted Bottleneck, UIB)
|
312 |
+
|
313 |
+
For MobileNetV4 - https://arxiv.org/abs/, referenced from
|
314 |
+
https://github.com/tensorflow/models/blob/d93c7e932de27522b2fa3b115f58d06d6f640537/official/vision/modeling/layers/nn_blocks.py#L778
|
315 |
+
"""
|
316 |
+
|
317 |
+
def __init__(
|
318 |
+
self,
|
319 |
+
in_chs: int,
|
320 |
+
out_chs: int,
|
321 |
+
dw_kernel_size_start: int = 0,
|
322 |
+
dw_kernel_size_mid: int = 3,
|
323 |
+
dw_kernel_size_end: int = 0,
|
324 |
+
stride: int = 1,
|
325 |
+
dilation: int = 1,
|
326 |
+
group_size: int = 1,
|
327 |
+
pad_type: str = '',
|
328 |
+
noskip: bool = False,
|
329 |
+
exp_ratio: float = 1.0,
|
330 |
+
act_layer: LayerType = nn.ReLU,
|
331 |
+
norm_layer: LayerType = nn.BatchNorm2d,
|
332 |
+
aa_layer: Optional[LayerType] = None,
|
333 |
+
se_layer: Optional[ModuleType] = None,
|
334 |
+
conv_kwargs: Optional[Dict] = None,
|
335 |
+
drop_path_rate: float = 0.,
|
336 |
+
layer_scale_init_value: Optional[float] = 1e-5,
|
337 |
+
):
|
338 |
+
super(UniversalInvertedResidual, self).__init__()
|
339 |
+
conv_kwargs = conv_kwargs or {}
|
340 |
+
self.has_skip = (in_chs == out_chs and stride == 1) and not noskip
|
341 |
+
if stride > 1:
|
342 |
+
assert dw_kernel_size_start or dw_kernel_size_mid or dw_kernel_size_end
|
343 |
+
|
344 |
+
# FIXME dilation isn't right w/ extra ks > 1 convs
|
345 |
+
if dw_kernel_size_start:
|
346 |
+
dw_start_stride = stride if not dw_kernel_size_mid else 1
|
347 |
+
dw_start_groups = num_groups(group_size, in_chs)
|
348 |
+
self.dw_start = ConvNormAct(
|
349 |
+
in_chs, in_chs, dw_kernel_size_start,
|
350 |
+
stride=dw_start_stride,
|
351 |
+
dilation=dilation, # FIXME
|
352 |
+
groups=dw_start_groups,
|
353 |
+
padding=pad_type,
|
354 |
+
apply_act=False,
|
355 |
+
act_layer=act_layer,
|
356 |
+
norm_layer=norm_layer,
|
357 |
+
aa_layer=aa_layer,
|
358 |
+
**conv_kwargs,
|
359 |
+
)
|
360 |
+
else:
|
361 |
+
self.dw_start = nn.Identity()
|
362 |
+
|
363 |
+
# Point-wise expansion
|
364 |
+
mid_chs = make_divisible(in_chs * exp_ratio)
|
365 |
+
self.pw_exp = ConvNormAct(
|
366 |
+
in_chs, mid_chs, 1,
|
367 |
+
padding=pad_type,
|
368 |
+
act_layer=act_layer,
|
369 |
+
norm_layer=norm_layer,
|
370 |
+
**conv_kwargs,
|
371 |
+
)
|
372 |
+
|
373 |
+
# Middle depth-wise convolution
|
374 |
+
if dw_kernel_size_mid:
|
375 |
+
groups = num_groups(group_size, mid_chs)
|
376 |
+
self.dw_mid = ConvNormAct(
|
377 |
+
mid_chs, mid_chs, dw_kernel_size_mid,
|
378 |
+
stride=stride,
|
379 |
+
dilation=dilation, # FIXME
|
380 |
+
groups=groups,
|
381 |
+
padding=pad_type,
|
382 |
+
act_layer=act_layer,
|
383 |
+
norm_layer=norm_layer,
|
384 |
+
aa_layer=aa_layer,
|
385 |
+
**conv_kwargs,
|
386 |
+
)
|
387 |
+
else:
|
388 |
+
# keeping mid as identity so it can be hooked more easily for features
|
389 |
+
self.dw_mid = nn.Identity()
|
390 |
+
|
391 |
+
# Squeeze-and-excitation
|
392 |
+
self.se = se_layer(mid_chs, act_layer=act_layer) if se_layer else nn.Identity()
|
393 |
+
|
394 |
+
# Point-wise linear projection
|
395 |
+
self.pw_proj = ConvNormAct(
|
396 |
+
mid_chs, out_chs, 1,
|
397 |
+
padding=pad_type,
|
398 |
+
apply_act=False,
|
399 |
+
act_layer=act_layer,
|
400 |
+
norm_layer=norm_layer,
|
401 |
+
**conv_kwargs,
|
402 |
+
)
|
403 |
+
|
404 |
+
if dw_kernel_size_end:
|
405 |
+
dw_end_stride = stride if not dw_kernel_size_start and not dw_kernel_size_mid else 1
|
406 |
+
dw_end_groups = num_groups(group_size, out_chs)
|
407 |
+
if dw_end_stride > 1:
|
408 |
+
assert not aa_layer
|
409 |
+
self.dw_end = ConvNormAct(
|
410 |
+
out_chs, out_chs, dw_kernel_size_end,
|
411 |
+
stride=dw_end_stride,
|
412 |
+
dilation=dilation,
|
413 |
+
groups=dw_end_groups,
|
414 |
+
padding=pad_type,
|
415 |
+
apply_act=False,
|
416 |
+
act_layer=act_layer,
|
417 |
+
norm_layer=norm_layer,
|
418 |
+
**conv_kwargs,
|
419 |
+
)
|
420 |
+
else:
|
421 |
+
self.dw_end = nn.Identity()
|
422 |
+
|
423 |
+
if layer_scale_init_value is not None:
|
424 |
+
self.layer_scale = LayerScale2d(out_chs, layer_scale_init_value)
|
425 |
+
else:
|
426 |
+
self.layer_scale = nn.Identity()
|
427 |
+
self.drop_path = DropPath(drop_path_rate) if drop_path_rate else nn.Identity()
|
428 |
+
|
429 |
+
def feature_info(self, location):
|
430 |
+
if location == 'expansion': # after SE, input to PWL
|
431 |
+
return dict(module='pw_proj.conv', hook_type='forward_pre', num_chs=self.pw_proj.conv.in_channels)
|
432 |
+
else: # location == 'bottleneck', block output
|
433 |
+
return dict(module='', num_chs=self.pw_proj.conv.out_channels)
|
434 |
+
|
435 |
+
def forward(self, x):
|
436 |
+
shortcut = x
|
437 |
+
x = self.dw_start(x)
|
438 |
+
x = self.pw_exp(x)
|
439 |
+
x = self.dw_mid(x)
|
440 |
+
x = self.se(x)
|
441 |
+
x = self.pw_proj(x)
|
442 |
+
x = self.dw_end(x)
|
443 |
+
x = self.layer_scale(x)
|
444 |
+
if self.has_skip:
|
445 |
+
x = self.drop_path(x) + shortcut
|
446 |
+
return x
|
447 |
+
|
448 |
+
|
449 |
+
class MobileAttention(nn.Module):
|
450 |
+
""" Mobile Attention Block
|
451 |
+
|
452 |
+
For MobileNetV4 - https://arxiv.org/abs/, referenced from
|
453 |
+
https://github.com/tensorflow/models/blob/d93c7e932de27522b2fa3b115f58d06d6f640537/official/vision/modeling/layers/nn_blocks.py#L1504
|
454 |
+
"""
|
455 |
+
def __init__(
|
456 |
+
self,
|
457 |
+
in_chs: int,
|
458 |
+
out_chs: int,
|
459 |
+
stride: int = 1,
|
460 |
+
dw_kernel_size: int = 3,
|
461 |
+
dilation: int = 1,
|
462 |
+
group_size: int = 1,
|
463 |
+
pad_type: str = '',
|
464 |
+
num_heads: int = 8,
|
465 |
+
key_dim: int = 64,
|
466 |
+
value_dim: int = 64,
|
467 |
+
use_multi_query: bool = False,
|
468 |
+
query_strides: int = (1, 1),
|
469 |
+
kv_stride: int = 1,
|
470 |
+
cpe_dw_kernel_size: int = 3,
|
471 |
+
noskip: bool = False,
|
472 |
+
act_layer: LayerType = nn.ReLU,
|
473 |
+
norm_layer: LayerType = nn.BatchNorm2d,
|
474 |
+
aa_layer: Optional[LayerType] = None,
|
475 |
+
drop_path_rate: float = 0.,
|
476 |
+
attn_drop: float = 0.0,
|
477 |
+
proj_drop: float = 0.0,
|
478 |
+
layer_scale_init_value: Optional[float] = 1e-5,
|
479 |
+
use_bias: bool = False,
|
480 |
+
use_cpe: bool = False,
|
481 |
+
):
|
482 |
+
super(MobileAttention, self).__init__()
|
483 |
+
norm_act_layer = get_norm_act_layer(norm_layer, act_layer)
|
484 |
+
self.has_skip = (stride == 1 and in_chs == out_chs) and not noskip
|
485 |
+
self.query_strides = to_2tuple(query_strides)
|
486 |
+
self.kv_stride = kv_stride
|
487 |
+
self.has_query_stride = any([s > 1 for s in self.query_strides])
|
488 |
+
|
489 |
+
# This CPE is different than the one suggested in the original paper.
|
490 |
+
# https://arxiv.org/abs/2102.10882
|
491 |
+
# 1. Rather than adding one CPE before the attention blocks, we add a CPE
|
492 |
+
# into every attention block.
|
493 |
+
# 2. We replace the expensive Conv2D by a Seperable DW Conv.
|
494 |
+
if use_cpe:
|
495 |
+
self.conv_cpe_dw = create_conv2d(
|
496 |
+
in_chs, in_chs,
|
497 |
+
kernel_size=cpe_dw_kernel_size,
|
498 |
+
dilation=dilation,
|
499 |
+
depthwise=True,
|
500 |
+
bias=True,
|
501 |
+
)
|
502 |
+
else:
|
503 |
+
self.conv_cpe_dw = None
|
504 |
+
|
505 |
+
self.norm = norm_act_layer(in_chs, apply_act=False)
|
506 |
+
|
507 |
+
if num_heads is None:
|
508 |
+
assert in_chs % key_dim == 0
|
509 |
+
num_heads = in_chs // key_dim
|
510 |
+
|
511 |
+
if use_multi_query:
|
512 |
+
self.attn = MultiQueryAttention2d(
|
513 |
+
in_chs,
|
514 |
+
dim_out=out_chs,
|
515 |
+
num_heads=num_heads,
|
516 |
+
key_dim=key_dim,
|
517 |
+
value_dim=value_dim,
|
518 |
+
query_strides=query_strides,
|
519 |
+
kv_stride=kv_stride,
|
520 |
+
dilation=dilation,
|
521 |
+
padding=pad_type,
|
522 |
+
dw_kernel_size=dw_kernel_size,
|
523 |
+
attn_drop=attn_drop,
|
524 |
+
proj_drop=proj_drop,
|
525 |
+
#bias=use_bias, # why not here if used w/ mhsa?
|
526 |
+
)
|
527 |
+
else:
|
528 |
+
self.attn = Attention2d(
|
529 |
+
in_chs,
|
530 |
+
dim_out=out_chs,
|
531 |
+
num_heads=num_heads,
|
532 |
+
attn_drop=attn_drop,
|
533 |
+
proj_drop=proj_drop,
|
534 |
+
bias=use_bias,
|
535 |
+
)
|
536 |
+
|
537 |
+
if layer_scale_init_value is not None:
|
538 |
+
self.layer_scale = LayerScale2d(out_chs, layer_scale_init_value)
|
539 |
+
else:
|
540 |
+
self.layer_scale = nn.Identity()
|
541 |
+
|
542 |
+
self.drop_path = DropPath(drop_path_rate) if drop_path_rate else nn.Identity()
|
543 |
+
|
544 |
+
def feature_info(self, location):
|
545 |
+
if location == 'expansion': # after SE, input to PW
|
546 |
+
return dict(module='conv_pw', hook_type='forward_pre', num_chs=self.conv_pw.in_channels)
|
547 |
+
else: # location == 'bottleneck', block output
|
548 |
+
return dict(module='', num_chs=self.conv_pw.out_channels)
|
549 |
+
|
550 |
+
def forward(self, x):
|
551 |
+
if self.conv_cpe_dw is not None:
|
552 |
+
x_cpe = self.conv_cpe_dw(x)
|
553 |
+
x = x + x_cpe
|
554 |
+
|
555 |
+
shortcut = x
|
556 |
+
x = self.norm(x)
|
557 |
+
x = self.attn(x)
|
558 |
+
x = self.layer_scale(x)
|
559 |
+
if self.has_skip:
|
560 |
+
x = self.drop_path(x) + shortcut
|
561 |
+
|
562 |
+
return x
|
563 |
+
|
564 |
+
|
565 |
+
class CondConvResidual(InvertedResidual):
|
566 |
+
""" Inverted residual block w/ CondConv routing"""
|
567 |
+
|
568 |
+
def __init__(
|
569 |
+
self,
|
570 |
+
in_chs: int,
|
571 |
+
out_chs: int,
|
572 |
+
dw_kernel_size: int = 3,
|
573 |
+
stride: int = 1,
|
574 |
+
dilation: int = 1,
|
575 |
+
group_size: int = 1,
|
576 |
+
pad_type: str = '',
|
577 |
+
noskip: bool = False,
|
578 |
+
exp_ratio: float = 1.0,
|
579 |
+
exp_kernel_size: int = 1,
|
580 |
+
pw_kernel_size: int = 1,
|
581 |
+
act_layer: LayerType = nn.ReLU,
|
582 |
+
norm_layer: LayerType = nn.BatchNorm2d,
|
583 |
+
aa_layer: Optional[LayerType] = None,
|
584 |
+
se_layer: Optional[ModuleType] = None,
|
585 |
+
num_experts: int = 0,
|
586 |
+
drop_path_rate: float = 0.,
|
587 |
+
):
|
588 |
+
|
589 |
+
self.num_experts = num_experts
|
590 |
+
conv_kwargs = dict(num_experts=self.num_experts)
|
591 |
+
super(CondConvResidual, self).__init__(
|
592 |
+
in_chs,
|
593 |
+
out_chs,
|
594 |
+
dw_kernel_size=dw_kernel_size,
|
595 |
+
stride=stride,
|
596 |
+
dilation=dilation,
|
597 |
+
group_size=group_size,
|
598 |
+
pad_type=pad_type,
|
599 |
+
noskip=noskip,
|
600 |
+
exp_ratio=exp_ratio,
|
601 |
+
exp_kernel_size=exp_kernel_size,
|
602 |
+
pw_kernel_size=pw_kernel_size,
|
603 |
+
act_layer=act_layer,
|
604 |
+
norm_layer=norm_layer,
|
605 |
+
aa_layer=aa_layer,
|
606 |
+
se_layer=se_layer,
|
607 |
+
conv_kwargs=conv_kwargs,
|
608 |
+
drop_path_rate=drop_path_rate,
|
609 |
+
)
|
610 |
+
self.routing_fn = nn.Linear(in_chs, self.num_experts)
|
611 |
+
|
612 |
+
def forward(self, x):
|
613 |
+
shortcut = x
|
614 |
+
pooled_inputs = F.adaptive_avg_pool2d(x, 1).flatten(1) # CondConv routing
|
615 |
+
routing_weights = torch.sigmoid(self.routing_fn(pooled_inputs))
|
616 |
+
x = self.conv_pw(x, routing_weights)
|
617 |
+
x = self.bn1(x)
|
618 |
+
x = self.conv_dw(x, routing_weights)
|
619 |
+
x = self.bn2(x)
|
620 |
+
x = self.se(x)
|
621 |
+
x = self.conv_pwl(x, routing_weights)
|
622 |
+
x = self.bn3(x)
|
623 |
+
if self.has_skip:
|
624 |
+
x = self.drop_path(x) + shortcut
|
625 |
+
return x
|
626 |
+
|
627 |
+
|
628 |
+
class EdgeResidual(nn.Module):
|
629 |
+
""" Residual block with expansion convolution followed by pointwise-linear w/ stride
|
630 |
+
|
631 |
+
Originally introduced in `EfficientNet-EdgeTPU: Creating Accelerator-Optimized Neural Networks with AutoML`
|
632 |
+
- https://ai.googleblog.com/2019/08/efficientnet-edgetpu-creating.html
|
633 |
+
|
634 |
+
This layer is also called FusedMBConv in the MobileDet, EfficientNet-X, and EfficientNet-V2 papers
|
635 |
+
* MobileDet - https://arxiv.org/abs/2004.14525
|
636 |
+
* EfficientNet-X - https://arxiv.org/abs/2102.05610
|
637 |
+
* EfficientNet-V2 - https://arxiv.org/abs/2104.00298
|
638 |
+
"""
|
639 |
+
|
640 |
+
def __init__(
|
641 |
+
self,
|
642 |
+
in_chs: int,
|
643 |
+
out_chs: int,
|
644 |
+
exp_kernel_size: int = 3,
|
645 |
+
stride: int = 1,
|
646 |
+
dilation: int = 1,
|
647 |
+
group_size: int = 0,
|
648 |
+
pad_type: str = '',
|
649 |
+
force_in_chs: int = 0,
|
650 |
+
noskip: bool = False,
|
651 |
+
exp_ratio: float = 1.0,
|
652 |
+
pw_kernel_size: int = 1,
|
653 |
+
act_layer: LayerType = nn.ReLU,
|
654 |
+
norm_layer: LayerType = nn.BatchNorm2d,
|
655 |
+
aa_layer: Optional[LayerType] = None,
|
656 |
+
se_layer: Optional[ModuleType] = None,
|
657 |
+
drop_path_rate: float = 0.,
|
658 |
+
):
|
659 |
+
super(EdgeResidual, self).__init__()
|
660 |
+
norm_act_layer = get_norm_act_layer(norm_layer, act_layer)
|
661 |
+
if force_in_chs > 0:
|
662 |
+
mid_chs = make_divisible(force_in_chs * exp_ratio)
|
663 |
+
else:
|
664 |
+
mid_chs = make_divisible(in_chs * exp_ratio)
|
665 |
+
groups = num_groups(group_size, mid_chs) # NOTE: Using out_chs of conv_exp for groups calc
|
666 |
+
self.has_skip = (in_chs == out_chs and stride == 1) and not noskip
|
667 |
+
use_aa = aa_layer is not None and stride > 1 # FIXME handle dilation
|
668 |
+
|
669 |
+
# Expansion convolution
|
670 |
+
self.conv_exp = create_conv2d(
|
671 |
+
in_chs, mid_chs, exp_kernel_size,
|
672 |
+
stride=1 if use_aa else stride,
|
673 |
+
dilation=dilation, groups=groups, padding=pad_type)
|
674 |
+
self.bn1 = norm_act_layer(mid_chs, inplace=True)
|
675 |
+
|
676 |
+
self.aa = create_aa(aa_layer, channels=mid_chs, stride=stride, enable=use_aa)
|
677 |
+
|
678 |
+
# Squeeze-and-excitation
|
679 |
+
self.se = se_layer(mid_chs, act_layer=act_layer) if se_layer else nn.Identity()
|
680 |
+
|
681 |
+
# Point-wise linear projection
|
682 |
+
self.conv_pwl = create_conv2d(mid_chs, out_chs, pw_kernel_size, padding=pad_type)
|
683 |
+
self.bn2 = norm_act_layer(out_chs, apply_act=False)
|
684 |
+
self.drop_path = DropPath(drop_path_rate) if drop_path_rate else nn.Identity()
|
685 |
+
|
686 |
+
def feature_info(self, location):
|
687 |
+
if location == 'expansion': # after SE, before PWL
|
688 |
+
return dict(module='conv_pwl', hook_type='forward_pre', num_chs=self.conv_pwl.in_channels)
|
689 |
+
else: # location == 'bottleneck', block output
|
690 |
+
return dict(module='', num_chs=self.conv_pwl.out_channels)
|
691 |
+
|
692 |
+
def forward(self, x):
|
693 |
+
shortcut = x
|
694 |
+
x = self.conv_exp(x)
|
695 |
+
x = self.bn1(x)
|
696 |
+
x = self.aa(x)
|
697 |
+
x = self.se(x)
|
698 |
+
x = self.conv_pwl(x)
|
699 |
+
x = self.bn2(x)
|
700 |
+
if self.has_skip:
|
701 |
+
x = self.drop_path(x) + shortcut
|
702 |
+
return x
|
pytorch-image-models/timm/models/_factory.py
ADDED
@@ -0,0 +1,137 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import os
|
2 |
+
from pathlib import Path
|
3 |
+
from typing import Any, Dict, Optional, Union
|
4 |
+
from urllib.parse import urlsplit
|
5 |
+
|
6 |
+
from timm.layers import set_layer_config
|
7 |
+
from ._helpers import load_checkpoint
|
8 |
+
from ._hub import load_model_config_from_hf
|
9 |
+
from ._pretrained import PretrainedCfg
|
10 |
+
from ._registry import is_model, model_entrypoint, split_model_name_tag
|
11 |
+
|
12 |
+
|
13 |
+
__all__ = ['parse_model_name', 'safe_model_name', 'create_model']
|
14 |
+
|
15 |
+
|
16 |
+
def parse_model_name(model_name: str):
|
17 |
+
if model_name.startswith('hf_hub'):
|
18 |
+
# NOTE for backwards compat, deprecate hf_hub use
|
19 |
+
model_name = model_name.replace('hf_hub', 'hf-hub')
|
20 |
+
parsed = urlsplit(model_name)
|
21 |
+
assert parsed.scheme in ('', 'timm', 'hf-hub')
|
22 |
+
if parsed.scheme == 'hf-hub':
|
23 |
+
# FIXME may use fragment as revision, currently `@` in URI path
|
24 |
+
return parsed.scheme, parsed.path
|
25 |
+
else:
|
26 |
+
model_name = os.path.split(parsed.path)[-1]
|
27 |
+
return 'timm', model_name
|
28 |
+
|
29 |
+
|
30 |
+
def safe_model_name(model_name: str, remove_source: bool = True):
|
31 |
+
# return a filename / path safe model name
|
32 |
+
def make_safe(name):
|
33 |
+
return ''.join(c if c.isalnum() else '_' for c in name).rstrip('_')
|
34 |
+
if remove_source:
|
35 |
+
model_name = parse_model_name(model_name)[-1]
|
36 |
+
return make_safe(model_name)
|
37 |
+
|
38 |
+
|
39 |
+
def create_model(
|
40 |
+
model_name: str,
|
41 |
+
pretrained: bool = False,
|
42 |
+
pretrained_cfg: Optional[Union[str, Dict[str, Any], PretrainedCfg]] = None,
|
43 |
+
pretrained_cfg_overlay: Optional[Dict[str, Any]] = None,
|
44 |
+
checkpoint_path: Optional[Union[str, Path]] = None,
|
45 |
+
cache_dir: Optional[Union[str, Path]] = None,
|
46 |
+
scriptable: Optional[bool] = None,
|
47 |
+
exportable: Optional[bool] = None,
|
48 |
+
no_jit: Optional[bool] = None,
|
49 |
+
**kwargs,
|
50 |
+
):
|
51 |
+
"""Create a model.
|
52 |
+
|
53 |
+
Lookup model's entrypoint function and pass relevant args to create a new model.
|
54 |
+
|
55 |
+
Tip:
|
56 |
+
**kwargs will be passed through entrypoint fn to ``timm.models.build_model_with_cfg()``
|
57 |
+
and then the model class __init__(). kwargs values set to None are pruned before passing.
|
58 |
+
|
59 |
+
Args:
|
60 |
+
model_name: Name of model to instantiate.
|
61 |
+
pretrained: If set to `True`, load pretrained ImageNet-1k weights.
|
62 |
+
pretrained_cfg: Pass in an external pretrained_cfg for model.
|
63 |
+
pretrained_cfg_overlay: Replace key-values in base pretrained_cfg with these.
|
64 |
+
checkpoint_path: Path of checkpoint to load _after_ the model is initialized.
|
65 |
+
cache_dir: Override model cache dir for Hugging Face Hub and Torch checkpoints.
|
66 |
+
scriptable: Set layer config so that model is jit scriptable (not working for all models yet).
|
67 |
+
exportable: Set layer config so that model is traceable / ONNX exportable (not fully impl/obeyed yet).
|
68 |
+
no_jit: Set layer config so that model doesn't utilize jit scripted layers (so far activations only).
|
69 |
+
|
70 |
+
Keyword Args:
|
71 |
+
drop_rate (float): Classifier dropout rate for training.
|
72 |
+
drop_path_rate (float): Stochastic depth drop rate for training.
|
73 |
+
global_pool (str): Classifier global pooling type.
|
74 |
+
|
75 |
+
Example:
|
76 |
+
|
77 |
+
```py
|
78 |
+
>>> from timm import create_model
|
79 |
+
|
80 |
+
>>> # Create a MobileNetV3-Large model with no pretrained weights.
|
81 |
+
>>> model = create_model('mobilenetv3_large_100')
|
82 |
+
|
83 |
+
>>> # Create a MobileNetV3-Large model with pretrained weights.
|
84 |
+
>>> model = create_model('mobilenetv3_large_100', pretrained=True)
|
85 |
+
>>> model.num_classes
|
86 |
+
1000
|
87 |
+
|
88 |
+
>>> # Create a MobileNetV3-Large model with pretrained weights and a new head with 10 classes.
|
89 |
+
>>> model = create_model('mobilenetv3_large_100', pretrained=True, num_classes=10)
|
90 |
+
>>> model.num_classes
|
91 |
+
10
|
92 |
+
|
93 |
+
>>> # Create a Dinov2 small model with pretrained weights and save weights in a custom directory.
|
94 |
+
>>> model = create_model('vit_small_patch14_dinov2.lvd142m', pretrained=True, cache_dir="/data/my-models")
|
95 |
+
>>> # Data will be stored at `/data/my-models/models--timm--vit_small_patch14_dinov2.lvd142m/`
|
96 |
+
```
|
97 |
+
"""
|
98 |
+
# Parameters that aren't supported by all models or are intended to only override model defaults if set
|
99 |
+
# should default to None in command line args/cfg. Remove them if they are present and not set so that
|
100 |
+
# non-supporting models don't break and default args remain in effect.
|
101 |
+
kwargs = {k: v for k, v in kwargs.items() if v is not None}
|
102 |
+
|
103 |
+
model_source, model_name = parse_model_name(model_name)
|
104 |
+
if model_source == 'hf-hub':
|
105 |
+
assert not pretrained_cfg, 'pretrained_cfg should not be set when sourcing model from Hugging Face Hub.'
|
106 |
+
# For model names specified in the form `hf-hub:path/architecture_name@revision`,
|
107 |
+
# load model weights + pretrained_cfg from Hugging Face hub.
|
108 |
+
pretrained_cfg, model_name, model_args = load_model_config_from_hf(
|
109 |
+
model_name,
|
110 |
+
cache_dir=cache_dir,
|
111 |
+
)
|
112 |
+
if model_args:
|
113 |
+
for k, v in model_args.items():
|
114 |
+
kwargs.setdefault(k, v)
|
115 |
+
else:
|
116 |
+
model_name, pretrained_tag = split_model_name_tag(model_name)
|
117 |
+
if pretrained_tag and not pretrained_cfg:
|
118 |
+
# a valid pretrained_cfg argument takes priority over tag in model name
|
119 |
+
pretrained_cfg = pretrained_tag
|
120 |
+
|
121 |
+
if not is_model(model_name):
|
122 |
+
raise RuntimeError('Unknown model (%s)' % model_name)
|
123 |
+
|
124 |
+
create_fn = model_entrypoint(model_name)
|
125 |
+
with set_layer_config(scriptable=scriptable, exportable=exportable, no_jit=no_jit):
|
126 |
+
model = create_fn(
|
127 |
+
pretrained=pretrained,
|
128 |
+
pretrained_cfg=pretrained_cfg,
|
129 |
+
pretrained_cfg_overlay=pretrained_cfg_overlay,
|
130 |
+
cache_dir=cache_dir,
|
131 |
+
**kwargs,
|
132 |
+
)
|
133 |
+
|
134 |
+
if checkpoint_path:
|
135 |
+
load_checkpoint(model, checkpoint_path)
|
136 |
+
|
137 |
+
return model
|
pytorch-image-models/timm/models/_features.py
ADDED
@@ -0,0 +1,484 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
""" PyTorch Feature Extraction Helpers
|
2 |
+
|
3 |
+
A collection of classes, functions, modules to help extract features from models
|
4 |
+
and provide a common interface for describing them.
|
5 |
+
|
6 |
+
The return_layers, module re-writing idea inspired by torchvision IntermediateLayerGetter
|
7 |
+
https://github.com/pytorch/vision/blob/d88d8961ae51507d0cb680329d985b1488b1b76b/torchvision/models/_utils.py
|
8 |
+
|
9 |
+
Hacked together by / Copyright 2020 Ross Wightman
|
10 |
+
"""
|
11 |
+
from collections import OrderedDict, defaultdict
|
12 |
+
from copy import deepcopy
|
13 |
+
from functools import partial
|
14 |
+
from typing import Dict, List, Optional, Sequence, Tuple, Union
|
15 |
+
|
16 |
+
import torch
|
17 |
+
import torch.nn as nn
|
18 |
+
from torch.utils.checkpoint import checkpoint
|
19 |
+
|
20 |
+
from timm.layers import Format, _assert
|
21 |
+
|
22 |
+
|
23 |
+
__all__ = [
|
24 |
+
'FeatureInfo', 'FeatureHooks', 'FeatureDictNet', 'FeatureListNet', 'FeatureHookNet', 'FeatureGetterNet',
|
25 |
+
'feature_take_indices'
|
26 |
+
]
|
27 |
+
|
28 |
+
|
29 |
+
def feature_take_indices(
|
30 |
+
num_features: int,
|
31 |
+
indices: Optional[Union[int, List[int]]] = None,
|
32 |
+
as_set: bool = False,
|
33 |
+
) -> Tuple[List[int], int]:
|
34 |
+
""" Determine the absolute feature indices to 'take' from.
|
35 |
+
|
36 |
+
Note: This function can be called in forwar() so must be torchscript compatible,
|
37 |
+
which requires some incomplete typing and workaround hacks.
|
38 |
+
|
39 |
+
Args:
|
40 |
+
num_features: total number of features to select from
|
41 |
+
indices: indices to select,
|
42 |
+
None -> select all
|
43 |
+
int -> select last n
|
44 |
+
list/tuple of int -> return specified (-ve indices specify from end)
|
45 |
+
as_set: return as a set
|
46 |
+
|
47 |
+
Returns:
|
48 |
+
List (or set) of absolute (from beginning) indices, Maximum index
|
49 |
+
"""
|
50 |
+
if indices is None:
|
51 |
+
indices = num_features # all features if None
|
52 |
+
|
53 |
+
if isinstance(indices, int):
|
54 |
+
# convert int -> last n indices
|
55 |
+
_assert(0 < indices <= num_features, f'last-n ({indices}) is out of range (1 to {num_features})')
|
56 |
+
take_indices = [num_features - indices + i for i in range(indices)]
|
57 |
+
else:
|
58 |
+
take_indices: List[int] = []
|
59 |
+
for i in indices:
|
60 |
+
idx = num_features + i if i < 0 else i
|
61 |
+
_assert(0 <= idx < num_features, f'feature index {idx} is out of range (0 to {num_features - 1})')
|
62 |
+
take_indices.append(idx)
|
63 |
+
|
64 |
+
if not torch.jit.is_scripting() and as_set:
|
65 |
+
return set(take_indices), max(take_indices)
|
66 |
+
|
67 |
+
return take_indices, max(take_indices)
|
68 |
+
|
69 |
+
|
70 |
+
def _out_indices_as_tuple(x: Union[int, Tuple[int, ...]]) -> Tuple[int, ...]:
|
71 |
+
if isinstance(x, int):
|
72 |
+
# if indices is an int, take last N features
|
73 |
+
return tuple(range(-x, 0))
|
74 |
+
return tuple(x)
|
75 |
+
|
76 |
+
|
77 |
+
OutIndicesT = Union[int, Tuple[int, ...]]
|
78 |
+
|
79 |
+
|
80 |
+
class FeatureInfo:
|
81 |
+
|
82 |
+
def __init__(
|
83 |
+
self,
|
84 |
+
feature_info: List[Dict],
|
85 |
+
out_indices: OutIndicesT,
|
86 |
+
):
|
87 |
+
out_indices = _out_indices_as_tuple(out_indices)
|
88 |
+
prev_reduction = 1
|
89 |
+
for i, fi in enumerate(feature_info):
|
90 |
+
# sanity check the mandatory fields, there may be additional fields depending on the model
|
91 |
+
assert 'num_chs' in fi and fi['num_chs'] > 0
|
92 |
+
assert 'reduction' in fi and fi['reduction'] >= prev_reduction
|
93 |
+
prev_reduction = fi['reduction']
|
94 |
+
assert 'module' in fi
|
95 |
+
fi.setdefault('index', i)
|
96 |
+
self.out_indices = out_indices
|
97 |
+
self.info = feature_info
|
98 |
+
|
99 |
+
def from_other(self, out_indices: OutIndicesT):
|
100 |
+
out_indices = _out_indices_as_tuple(out_indices)
|
101 |
+
return FeatureInfo(deepcopy(self.info), out_indices)
|
102 |
+
|
103 |
+
def get(self, key: str, idx: Optional[Union[int, List[int]]] = None):
|
104 |
+
""" Get value by key at specified index (indices)
|
105 |
+
if idx == None, returns value for key at each output index
|
106 |
+
if idx is an integer, return value for that feature module index (ignoring output indices)
|
107 |
+
if idx is a list/tuple, return value for each module index (ignoring output indices)
|
108 |
+
"""
|
109 |
+
if idx is None:
|
110 |
+
return [self.info[i][key] for i in self.out_indices]
|
111 |
+
if isinstance(idx, (tuple, list)):
|
112 |
+
return [self.info[i][key] for i in idx]
|
113 |
+
else:
|
114 |
+
return self.info[idx][key]
|
115 |
+
|
116 |
+
def get_dicts(self, keys: Optional[List[str]] = None, idx: Optional[Union[int, List[int]]] = None):
|
117 |
+
""" return info dicts for specified keys (or all if None) at specified indices (or out_indices if None)
|
118 |
+
"""
|
119 |
+
if idx is None:
|
120 |
+
if keys is None:
|
121 |
+
return [self.info[i] for i in self.out_indices]
|
122 |
+
else:
|
123 |
+
return [{k: self.info[i][k] for k in keys} for i in self.out_indices]
|
124 |
+
if isinstance(idx, (tuple, list)):
|
125 |
+
return [self.info[i] if keys is None else {k: self.info[i][k] for k in keys} for i in idx]
|
126 |
+
else:
|
127 |
+
return self.info[idx] if keys is None else {k: self.info[idx][k] for k in keys}
|
128 |
+
|
129 |
+
def channels(self, idx: Optional[Union[int, List[int]]] = None):
|
130 |
+
""" feature channels accessor
|
131 |
+
"""
|
132 |
+
return self.get('num_chs', idx)
|
133 |
+
|
134 |
+
def reduction(self, idx: Optional[Union[int, List[int]]] = None):
|
135 |
+
""" feature reduction (output stride) accessor
|
136 |
+
"""
|
137 |
+
return self.get('reduction', idx)
|
138 |
+
|
139 |
+
def module_name(self, idx: Optional[Union[int, List[int]]] = None):
|
140 |
+
""" feature module name accessor
|
141 |
+
"""
|
142 |
+
return self.get('module', idx)
|
143 |
+
|
144 |
+
def __getitem__(self, item):
|
145 |
+
return self.info[item]
|
146 |
+
|
147 |
+
def __len__(self):
|
148 |
+
return len(self.info)
|
149 |
+
|
150 |
+
|
151 |
+
class FeatureHooks:
|
152 |
+
""" Feature Hook Helper
|
153 |
+
|
154 |
+
This module helps with the setup and extraction of hooks for extracting features from
|
155 |
+
internal nodes in a model by node name.
|
156 |
+
|
157 |
+
FIXME This works well in eager Python but needs redesign for torchscript.
|
158 |
+
"""
|
159 |
+
|
160 |
+
def __init__(
|
161 |
+
self,
|
162 |
+
hooks: Sequence[Union[str, Dict]],
|
163 |
+
named_modules: dict,
|
164 |
+
out_map: Sequence[Union[int, str]] = None,
|
165 |
+
default_hook_type: str = 'forward',
|
166 |
+
):
|
167 |
+
# setup feature hooks
|
168 |
+
self._feature_outputs = defaultdict(OrderedDict)
|
169 |
+
self._handles = []
|
170 |
+
modules = {k: v for k, v in named_modules}
|
171 |
+
for i, h in enumerate(hooks):
|
172 |
+
hook_name = h if isinstance(h, str) else h['module']
|
173 |
+
m = modules[hook_name]
|
174 |
+
hook_id = out_map[i] if out_map else hook_name
|
175 |
+
hook_fn = partial(self._collect_output_hook, hook_id)
|
176 |
+
hook_type = default_hook_type
|
177 |
+
if isinstance(h, dict):
|
178 |
+
hook_type = h.get('hook_type', default_hook_type)
|
179 |
+
if hook_type == 'forward_pre':
|
180 |
+
handle = m.register_forward_pre_hook(hook_fn)
|
181 |
+
elif hook_type == 'forward':
|
182 |
+
handle = m.register_forward_hook(hook_fn)
|
183 |
+
else:
|
184 |
+
assert False, "Unsupported hook type"
|
185 |
+
self._handles.append(handle)
|
186 |
+
|
187 |
+
def _collect_output_hook(self, hook_id, *args):
|
188 |
+
x = args[-1] # tensor we want is last argument, output for fwd, input for fwd_pre
|
189 |
+
if isinstance(x, tuple):
|
190 |
+
x = x[0] # unwrap input tuple
|
191 |
+
self._feature_outputs[x.device][hook_id] = x
|
192 |
+
|
193 |
+
def get_output(self, device) -> Dict[str, torch.tensor]:
|
194 |
+
output = self._feature_outputs[device]
|
195 |
+
self._feature_outputs[device] = OrderedDict() # clear after reading
|
196 |
+
return output
|
197 |
+
|
198 |
+
|
199 |
+
def _module_list(module, flatten_sequential=False):
|
200 |
+
# a yield/iter would be better for this but wouldn't be compatible with torchscript
|
201 |
+
ml = []
|
202 |
+
for name, module in module.named_children():
|
203 |
+
if flatten_sequential and isinstance(module, nn.Sequential):
|
204 |
+
# first level of Sequential containers is flattened into containing model
|
205 |
+
for child_name, child_module in module.named_children():
|
206 |
+
combined = [name, child_name]
|
207 |
+
ml.append(('_'.join(combined), '.'.join(combined), child_module))
|
208 |
+
else:
|
209 |
+
ml.append((name, name, module))
|
210 |
+
return ml
|
211 |
+
|
212 |
+
|
213 |
+
def _get_feature_info(net, out_indices: OutIndicesT):
|
214 |
+
feature_info = getattr(net, 'feature_info')
|
215 |
+
if isinstance(feature_info, FeatureInfo):
|
216 |
+
return feature_info.from_other(out_indices)
|
217 |
+
elif isinstance(feature_info, (list, tuple)):
|
218 |
+
return FeatureInfo(net.feature_info, out_indices)
|
219 |
+
else:
|
220 |
+
assert False, "Provided feature_info is not valid"
|
221 |
+
|
222 |
+
|
223 |
+
def _get_return_layers(feature_info, out_map):
|
224 |
+
module_names = feature_info.module_name()
|
225 |
+
return_layers = {}
|
226 |
+
for i, name in enumerate(module_names):
|
227 |
+
return_layers[name] = out_map[i] if out_map is not None else feature_info.out_indices[i]
|
228 |
+
return return_layers
|
229 |
+
|
230 |
+
|
231 |
+
class FeatureDictNet(nn.ModuleDict):
|
232 |
+
""" Feature extractor with OrderedDict return
|
233 |
+
|
234 |
+
Wrap a model and extract features as specified by the out indices, the network is
|
235 |
+
partially re-built from contained modules.
|
236 |
+
|
237 |
+
There is a strong assumption that the modules have been registered into the model in the same
|
238 |
+
order as they are used. There should be no reuse of the same nn.Module more than once, including
|
239 |
+
trivial modules like `self.relu = nn.ReLU`.
|
240 |
+
|
241 |
+
Only submodules that are directly assigned to the model class (`model.feature1`) or at most
|
242 |
+
one Sequential container deep (`model.features.1`, with flatten_sequent=True) can be captured.
|
243 |
+
All Sequential containers that are directly assigned to the original model will have their
|
244 |
+
modules assigned to this module with the name `model.features.1` being changed to `model.features_1`
|
245 |
+
"""
|
246 |
+
def __init__(
|
247 |
+
self,
|
248 |
+
model: nn.Module,
|
249 |
+
out_indices: OutIndicesT = (0, 1, 2, 3, 4),
|
250 |
+
out_map: Sequence[Union[int, str]] = None,
|
251 |
+
output_fmt: str = 'NCHW',
|
252 |
+
feature_concat: bool = False,
|
253 |
+
flatten_sequential: bool = False,
|
254 |
+
):
|
255 |
+
"""
|
256 |
+
Args:
|
257 |
+
model: Model from which to extract features.
|
258 |
+
out_indices: Output indices of the model features to extract.
|
259 |
+
out_map: Return id mapping for each output index, otherwise str(index) is used.
|
260 |
+
feature_concat: Concatenate intermediate features that are lists or tuples instead of selecting
|
261 |
+
first element e.g. `x[0]`
|
262 |
+
flatten_sequential: Flatten first two-levels of sequential modules in model (re-writes model modules)
|
263 |
+
"""
|
264 |
+
super(FeatureDictNet, self).__init__()
|
265 |
+
self.feature_info = _get_feature_info(model, out_indices)
|
266 |
+
self.output_fmt = Format(output_fmt)
|
267 |
+
self.concat = feature_concat
|
268 |
+
self.grad_checkpointing = False
|
269 |
+
self.return_layers = {}
|
270 |
+
|
271 |
+
return_layers = _get_return_layers(self.feature_info, out_map)
|
272 |
+
modules = _module_list(model, flatten_sequential=flatten_sequential)
|
273 |
+
remaining = set(return_layers.keys())
|
274 |
+
layers = OrderedDict()
|
275 |
+
for new_name, old_name, module in modules:
|
276 |
+
layers[new_name] = module
|
277 |
+
if old_name in remaining:
|
278 |
+
# return id has to be consistently str type for torchscript
|
279 |
+
self.return_layers[new_name] = str(return_layers[old_name])
|
280 |
+
remaining.remove(old_name)
|
281 |
+
if not remaining:
|
282 |
+
break
|
283 |
+
assert not remaining and len(self.return_layers) == len(return_layers), \
|
284 |
+
f'Return layers ({remaining}) are not present in model'
|
285 |
+
self.update(layers)
|
286 |
+
|
287 |
+
def set_grad_checkpointing(self, enable: bool = True):
|
288 |
+
self.grad_checkpointing = enable
|
289 |
+
|
290 |
+
def _collect(self, x) -> (Dict[str, torch.Tensor]):
|
291 |
+
out = OrderedDict()
|
292 |
+
for i, (name, module) in enumerate(self.items()):
|
293 |
+
if self.grad_checkpointing and not torch.jit.is_scripting():
|
294 |
+
# Skipping checkpoint of first module because need a gradient at input
|
295 |
+
# Skipping last because networks with in-place ops might fail w/ checkpointing enabled
|
296 |
+
# NOTE: first_or_last module could be static, but recalc in is_scripting guard to avoid jit issues
|
297 |
+
first_or_last_module = i == 0 or i == max(len(self) - 1, 0)
|
298 |
+
x = module(x) if first_or_last_module else checkpoint(module, x)
|
299 |
+
else:
|
300 |
+
x = module(x)
|
301 |
+
|
302 |
+
if name in self.return_layers:
|
303 |
+
out_id = self.return_layers[name]
|
304 |
+
if isinstance(x, (tuple, list)):
|
305 |
+
# If model tap is a tuple or list, concat or select first element
|
306 |
+
# FIXME this may need to be more generic / flexible for some nets
|
307 |
+
out[out_id] = torch.cat(x, 1) if self.concat else x[0]
|
308 |
+
else:
|
309 |
+
out[out_id] = x
|
310 |
+
return out
|
311 |
+
|
312 |
+
def forward(self, x) -> Dict[str, torch.Tensor]:
|
313 |
+
return self._collect(x)
|
314 |
+
|
315 |
+
|
316 |
+
class FeatureListNet(FeatureDictNet):
|
317 |
+
""" Feature extractor with list return
|
318 |
+
|
319 |
+
A specialization of FeatureDictNet that always returns features as a list (values() of dict).
|
320 |
+
"""
|
321 |
+
def __init__(
|
322 |
+
self,
|
323 |
+
model: nn.Module,
|
324 |
+
out_indices: OutIndicesT = (0, 1, 2, 3, 4),
|
325 |
+
output_fmt: str = 'NCHW',
|
326 |
+
feature_concat: bool = False,
|
327 |
+
flatten_sequential: bool = False,
|
328 |
+
):
|
329 |
+
"""
|
330 |
+
Args:
|
331 |
+
model: Model from which to extract features.
|
332 |
+
out_indices: Output indices of the model features to extract.
|
333 |
+
feature_concat: Concatenate intermediate features that are lists or tuples instead of selecting
|
334 |
+
first element e.g. `x[0]`
|
335 |
+
flatten_sequential: Flatten first two-levels of sequential modules in model (re-writes model modules)
|
336 |
+
"""
|
337 |
+
super().__init__(
|
338 |
+
model,
|
339 |
+
out_indices=out_indices,
|
340 |
+
output_fmt=output_fmt,
|
341 |
+
feature_concat=feature_concat,
|
342 |
+
flatten_sequential=flatten_sequential,
|
343 |
+
)
|
344 |
+
|
345 |
+
def forward(self, x) -> (List[torch.Tensor]):
|
346 |
+
return list(self._collect(x).values())
|
347 |
+
|
348 |
+
|
349 |
+
class FeatureHookNet(nn.ModuleDict):
|
350 |
+
""" FeatureHookNet
|
351 |
+
|
352 |
+
Wrap a model and extract features specified by the out indices using forward/forward-pre hooks.
|
353 |
+
|
354 |
+
If `no_rewrite` is True, features are extracted via hooks without modifying the underlying
|
355 |
+
network in any way.
|
356 |
+
|
357 |
+
If `no_rewrite` is False, the model will be re-written as in the
|
358 |
+
FeatureList/FeatureDict case by folding first to second (Sequential only) level modules into this one.
|
359 |
+
|
360 |
+
FIXME this does not currently work with Torchscript, see FeatureHooks class
|
361 |
+
"""
|
362 |
+
def __init__(
|
363 |
+
self,
|
364 |
+
model: nn.Module,
|
365 |
+
out_indices: OutIndicesT = (0, 1, 2, 3, 4),
|
366 |
+
out_map: Optional[Sequence[Union[int, str]]] = None,
|
367 |
+
return_dict: bool = False,
|
368 |
+
output_fmt: str = 'NCHW',
|
369 |
+
no_rewrite: Optional[bool] = None,
|
370 |
+
flatten_sequential: bool = False,
|
371 |
+
default_hook_type: str = 'forward',
|
372 |
+
):
|
373 |
+
"""
|
374 |
+
|
375 |
+
Args:
|
376 |
+
model: Model from which to extract features.
|
377 |
+
out_indices: Output indices of the model features to extract.
|
378 |
+
out_map: Return id mapping for each output index, otherwise str(index) is used.
|
379 |
+
return_dict: Output features as a dict.
|
380 |
+
no_rewrite: Enforce that model is not re-written if True, ie no modules are removed / changed.
|
381 |
+
flatten_sequential arg must also be False if this is set True.
|
382 |
+
flatten_sequential: Re-write modules by flattening first two levels of nn.Sequential containers.
|
383 |
+
default_hook_type: The default hook type to use if not specified in model.feature_info.
|
384 |
+
"""
|
385 |
+
super().__init__()
|
386 |
+
assert not torch.jit.is_scripting()
|
387 |
+
self.feature_info = _get_feature_info(model, out_indices)
|
388 |
+
self.return_dict = return_dict
|
389 |
+
self.output_fmt = Format(output_fmt)
|
390 |
+
self.grad_checkpointing = False
|
391 |
+
if no_rewrite is None:
|
392 |
+
no_rewrite = not flatten_sequential
|
393 |
+
layers = OrderedDict()
|
394 |
+
hooks = []
|
395 |
+
if no_rewrite:
|
396 |
+
assert not flatten_sequential
|
397 |
+
if hasattr(model, 'reset_classifier'): # make sure classifier is removed?
|
398 |
+
model.reset_classifier(0)
|
399 |
+
layers['body'] = model
|
400 |
+
hooks.extend(self.feature_info.get_dicts())
|
401 |
+
else:
|
402 |
+
modules = _module_list(model, flatten_sequential=flatten_sequential)
|
403 |
+
remaining = {
|
404 |
+
f['module']: f['hook_type'] if 'hook_type' in f else default_hook_type
|
405 |
+
for f in self.feature_info.get_dicts()
|
406 |
+
}
|
407 |
+
for new_name, old_name, module in modules:
|
408 |
+
layers[new_name] = module
|
409 |
+
for fn, fm in module.named_modules(prefix=old_name):
|
410 |
+
if fn in remaining:
|
411 |
+
hooks.append(dict(module=fn, hook_type=remaining[fn]))
|
412 |
+
del remaining[fn]
|
413 |
+
if not remaining:
|
414 |
+
break
|
415 |
+
assert not remaining, f'Return layers ({remaining}) are not present in model'
|
416 |
+
self.update(layers)
|
417 |
+
self.hooks = FeatureHooks(hooks, model.named_modules(), out_map=out_map)
|
418 |
+
|
419 |
+
def set_grad_checkpointing(self, enable: bool = True):
|
420 |
+
self.grad_checkpointing = enable
|
421 |
+
|
422 |
+
def forward(self, x):
|
423 |
+
for i, (name, module) in enumerate(self.items()):
|
424 |
+
if self.grad_checkpointing and not torch.jit.is_scripting():
|
425 |
+
# Skipping checkpoint of first module because need a gradient at input
|
426 |
+
# Skipping last because networks with in-place ops might fail w/ checkpointing enabled
|
427 |
+
# NOTE: first_or_last module could be static, but recalc in is_scripting guard to avoid jit issues
|
428 |
+
first_or_last_module = i == 0 or i == max(len(self) - 1, 0)
|
429 |
+
x = module(x) if first_or_last_module else checkpoint(module, x)
|
430 |
+
else:
|
431 |
+
x = module(x)
|
432 |
+
out = self.hooks.get_output(x.device)
|
433 |
+
return out if self.return_dict else list(out.values())
|
434 |
+
|
435 |
+
|
436 |
+
class FeatureGetterNet(nn.ModuleDict):
|
437 |
+
""" FeatureGetterNet
|
438 |
+
|
439 |
+
Wrap models with a feature getter method, like 'get_intermediate_layers'
|
440 |
+
|
441 |
+
"""
|
442 |
+
def __init__(
|
443 |
+
self,
|
444 |
+
model: nn.Module,
|
445 |
+
out_indices: OutIndicesT = 4,
|
446 |
+
out_map: Optional[Sequence[Union[int, str]]] = None,
|
447 |
+
return_dict: bool = False,
|
448 |
+
output_fmt: str = 'NCHW',
|
449 |
+
norm: bool = False,
|
450 |
+
prune: bool = True,
|
451 |
+
):
|
452 |
+
"""
|
453 |
+
|
454 |
+
Args:
|
455 |
+
model: Model to wrap.
|
456 |
+
out_indices: Indices of features to extract.
|
457 |
+
out_map: Remap feature names for dict output (WIP, not supported).
|
458 |
+
return_dict: Return features as dictionary instead of list (WIP, not supported).
|
459 |
+
norm: Apply final model norm to all output features (if possible).
|
460 |
+
"""
|
461 |
+
super().__init__()
|
462 |
+
if prune and hasattr(model, 'prune_intermediate_layers'):
|
463 |
+
# replace out_indices after they've been normalized, -ve indices will be invalid after prune
|
464 |
+
out_indices = model.prune_intermediate_layers(
|
465 |
+
out_indices,
|
466 |
+
prune_norm=not norm,
|
467 |
+
)
|
468 |
+
self.feature_info = _get_feature_info(model, out_indices)
|
469 |
+
self.model = model
|
470 |
+
self.out_indices = out_indices
|
471 |
+
self.out_map = out_map
|
472 |
+
self.return_dict = return_dict
|
473 |
+
self.output_fmt = Format(output_fmt)
|
474 |
+
self.norm = norm
|
475 |
+
|
476 |
+
def forward(self, x):
|
477 |
+
features = self.model.forward_intermediates(
|
478 |
+
x,
|
479 |
+
indices=self.out_indices,
|
480 |
+
norm=self.norm,
|
481 |
+
output_fmt=self.output_fmt,
|
482 |
+
intermediates_only=True,
|
483 |
+
)
|
484 |
+
return features
|
pytorch-image-models/timm/models/_features_fx.py
ADDED
@@ -0,0 +1,179 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
""" PyTorch FX Based Feature Extraction Helpers
|
2 |
+
Using https://pytorch.org/vision/stable/feature_extraction.html
|
3 |
+
"""
|
4 |
+
from typing import Callable, Dict, List, Optional, Union, Tuple, Type
|
5 |
+
|
6 |
+
import torch
|
7 |
+
from torch import nn
|
8 |
+
|
9 |
+
from ._features import _get_feature_info, _get_return_layers
|
10 |
+
|
11 |
+
try:
|
12 |
+
# NOTE we wrap torchvision fns to use timm leaf / no trace definitions
|
13 |
+
from torchvision.models.feature_extraction import create_feature_extractor as _create_feature_extractor
|
14 |
+
from torchvision.models.feature_extraction import get_graph_node_names as _get_graph_node_names
|
15 |
+
has_fx_feature_extraction = True
|
16 |
+
except ImportError:
|
17 |
+
has_fx_feature_extraction = False
|
18 |
+
|
19 |
+
# Layers we went to treat as leaf modules
|
20 |
+
from timm.layers import Conv2dSame, ScaledStdConv2dSame, CondConv2d, StdConv2dSame, Format
|
21 |
+
from timm.layers import resample_abs_pos_embed, resample_abs_pos_embed_nhwc
|
22 |
+
from timm.layers.non_local_attn import BilinearAttnTransform
|
23 |
+
from timm.layers.pool2d_same import MaxPool2dSame, AvgPool2dSame
|
24 |
+
from timm.layers.norm_act import (
|
25 |
+
BatchNormAct2d,
|
26 |
+
SyncBatchNormAct,
|
27 |
+
FrozenBatchNormAct2d,
|
28 |
+
GroupNormAct,
|
29 |
+
GroupNorm1Act,
|
30 |
+
LayerNormAct,
|
31 |
+
LayerNormAct2d
|
32 |
+
)
|
33 |
+
|
34 |
+
__all__ = ['register_notrace_module', 'is_notrace_module', 'get_notrace_modules',
|
35 |
+
'register_notrace_function', 'is_notrace_function', 'get_notrace_functions',
|
36 |
+
'create_feature_extractor', 'get_graph_node_names', 'FeatureGraphNet', 'GraphExtractNet']
|
37 |
+
|
38 |
+
|
39 |
+
# NOTE: By default, any modules from timm.models.layers that we want to treat as leaf modules go here
|
40 |
+
# BUT modules from timm.models should use the registration mechanism below
|
41 |
+
_leaf_modules = {
|
42 |
+
BilinearAttnTransform, # reason: flow control t <= 1
|
43 |
+
# Reason: get_same_padding has a max which raises a control flow error
|
44 |
+
Conv2dSame, MaxPool2dSame, ScaledStdConv2dSame, StdConv2dSame, AvgPool2dSame,
|
45 |
+
CondConv2d, # reason: TypeError: F.conv2d received Proxy in groups=self.groups * B (because B = x.shape[0]),
|
46 |
+
BatchNormAct2d,
|
47 |
+
SyncBatchNormAct,
|
48 |
+
FrozenBatchNormAct2d,
|
49 |
+
GroupNormAct,
|
50 |
+
GroupNorm1Act,
|
51 |
+
LayerNormAct,
|
52 |
+
LayerNormAct2d,
|
53 |
+
}
|
54 |
+
|
55 |
+
try:
|
56 |
+
from timm.layers import InplaceAbn
|
57 |
+
_leaf_modules.add(InplaceAbn)
|
58 |
+
except ImportError:
|
59 |
+
pass
|
60 |
+
|
61 |
+
|
62 |
+
def register_notrace_module(module: Type[nn.Module]):
|
63 |
+
"""
|
64 |
+
Any module not under timm.models.layers should get this decorator if we don't want to trace through it.
|
65 |
+
"""
|
66 |
+
_leaf_modules.add(module)
|
67 |
+
return module
|
68 |
+
|
69 |
+
|
70 |
+
def is_notrace_module(module: Type[nn.Module]):
|
71 |
+
return module in _leaf_modules
|
72 |
+
|
73 |
+
|
74 |
+
def get_notrace_modules():
|
75 |
+
return list(_leaf_modules)
|
76 |
+
|
77 |
+
|
78 |
+
# Functions we want to autowrap (treat them as leaves)
|
79 |
+
_autowrap_functions = {
|
80 |
+
resample_abs_pos_embed,
|
81 |
+
resample_abs_pos_embed_nhwc,
|
82 |
+
}
|
83 |
+
|
84 |
+
|
85 |
+
def register_notrace_function(func: Callable):
|
86 |
+
"""
|
87 |
+
Decorator for functions which ought not to be traced through
|
88 |
+
"""
|
89 |
+
_autowrap_functions.add(func)
|
90 |
+
return func
|
91 |
+
|
92 |
+
|
93 |
+
def is_notrace_function(func: Callable):
|
94 |
+
return func in _autowrap_functions
|
95 |
+
|
96 |
+
|
97 |
+
def get_notrace_functions():
|
98 |
+
return list(_autowrap_functions)
|
99 |
+
|
100 |
+
|
101 |
+
def get_graph_node_names(model: nn.Module) -> Tuple[List[str], List[str]]:
|
102 |
+
return _get_graph_node_names(
|
103 |
+
model,
|
104 |
+
tracer_kwargs={'leaf_modules': list(_leaf_modules), 'autowrap_functions': list(_autowrap_functions)}
|
105 |
+
)
|
106 |
+
|
107 |
+
|
108 |
+
def create_feature_extractor(model: nn.Module, return_nodes: Union[Dict[str, str], List[str]]):
|
109 |
+
assert has_fx_feature_extraction, 'Please update to PyTorch 1.10+, torchvision 0.11+ for FX feature extraction'
|
110 |
+
return _create_feature_extractor(
|
111 |
+
model, return_nodes,
|
112 |
+
tracer_kwargs={'leaf_modules': list(_leaf_modules), 'autowrap_functions': list(_autowrap_functions)}
|
113 |
+
)
|
114 |
+
|
115 |
+
|
116 |
+
class FeatureGraphNet(nn.Module):
|
117 |
+
""" A FX Graph based feature extractor that works with the model feature_info metadata
|
118 |
+
"""
|
119 |
+
return_dict: torch.jit.Final[bool]
|
120 |
+
|
121 |
+
def __init__(
|
122 |
+
self,
|
123 |
+
model: nn.Module,
|
124 |
+
out_indices: Tuple[int, ...],
|
125 |
+
out_map: Optional[Dict] = None,
|
126 |
+
output_fmt: str = 'NCHW',
|
127 |
+
return_dict: bool = False,
|
128 |
+
):
|
129 |
+
super().__init__()
|
130 |
+
assert has_fx_feature_extraction, 'Please update to PyTorch 1.10+, torchvision 0.11+ for FX feature extraction'
|
131 |
+
self.feature_info = _get_feature_info(model, out_indices)
|
132 |
+
if out_map is not None:
|
133 |
+
assert len(out_map) == len(out_indices)
|
134 |
+
self.output_fmt = Format(output_fmt)
|
135 |
+
return_nodes = _get_return_layers(self.feature_info, out_map)
|
136 |
+
self.graph_module = create_feature_extractor(model, return_nodes)
|
137 |
+
self.return_dict = return_dict
|
138 |
+
|
139 |
+
def forward(self, x):
|
140 |
+
out = self.graph_module(x)
|
141 |
+
if self.return_dict:
|
142 |
+
return out
|
143 |
+
return list(out.values())
|
144 |
+
|
145 |
+
|
146 |
+
class GraphExtractNet(nn.Module):
|
147 |
+
""" A standalone feature extraction wrapper that maps dict -> list or single tensor
|
148 |
+
NOTE:
|
149 |
+
* one can use feature_extractor directly if dictionary output is desired
|
150 |
+
* unlike FeatureGraphNet, this is intended to be used standalone and not with model feature_info
|
151 |
+
metadata for builtin feature extraction mode
|
152 |
+
* create_feature_extractor can be used directly if dictionary output is acceptable
|
153 |
+
|
154 |
+
Args:
|
155 |
+
model: model to extract features from
|
156 |
+
return_nodes: node names to return features from (dict or list)
|
157 |
+
squeeze_out: if only one output, and output in list format, flatten to single tensor
|
158 |
+
return_dict: return as dictionary from extractor with node names as keys, ignores squeeze_out arg
|
159 |
+
"""
|
160 |
+
return_dict: torch.jit.Final[bool]
|
161 |
+
|
162 |
+
def __init__(
|
163 |
+
self,
|
164 |
+
model: nn.Module,
|
165 |
+
return_nodes: Union[Dict[str, str], List[str]],
|
166 |
+
squeeze_out: bool = True,
|
167 |
+
return_dict: bool = False,
|
168 |
+
):
|
169 |
+
super().__init__()
|
170 |
+
self.squeeze_out = squeeze_out
|
171 |
+
self.graph_module = create_feature_extractor(model, return_nodes)
|
172 |
+
self.return_dict = return_dict
|
173 |
+
|
174 |
+
def forward(self, x) -> Union[List[torch.Tensor], torch.Tensor]:
|
175 |
+
out = self.graph_module(x)
|
176 |
+
if self.return_dict:
|
177 |
+
return out
|
178 |
+
out = list(out.values())
|
179 |
+
return out[0] if self.squeeze_out and len(out) == 1 else out
|
pytorch-image-models/timm/models/_hub.py
ADDED
@@ -0,0 +1,465 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import hashlib
|
2 |
+
import json
|
3 |
+
import logging
|
4 |
+
import os
|
5 |
+
from functools import partial
|
6 |
+
from pathlib import Path
|
7 |
+
from tempfile import TemporaryDirectory
|
8 |
+
from typing import Iterable, List, Optional, Tuple, Union
|
9 |
+
|
10 |
+
import torch
|
11 |
+
from torch.hub import HASH_REGEX, download_url_to_file, urlparse
|
12 |
+
|
13 |
+
try:
|
14 |
+
from torch.hub import get_dir
|
15 |
+
except ImportError:
|
16 |
+
from torch.hub import _get_torch_home as get_dir
|
17 |
+
|
18 |
+
try:
|
19 |
+
import safetensors.torch
|
20 |
+
_has_safetensors = True
|
21 |
+
except ImportError:
|
22 |
+
_has_safetensors = False
|
23 |
+
|
24 |
+
try:
|
25 |
+
from typing import Literal
|
26 |
+
except ImportError:
|
27 |
+
from typing_extensions import Literal
|
28 |
+
|
29 |
+
from timm import __version__
|
30 |
+
from timm.models._pretrained import filter_pretrained_cfg
|
31 |
+
|
32 |
+
try:
|
33 |
+
from huggingface_hub import (
|
34 |
+
create_repo, get_hf_file_metadata,
|
35 |
+
hf_hub_download, hf_hub_url,
|
36 |
+
repo_type_and_id_from_hf_id, upload_folder)
|
37 |
+
from huggingface_hub.utils import EntryNotFoundError
|
38 |
+
hf_hub_download = partial(hf_hub_download, library_name="timm", library_version=__version__)
|
39 |
+
_has_hf_hub = True
|
40 |
+
except ImportError:
|
41 |
+
hf_hub_download = None
|
42 |
+
_has_hf_hub = False
|
43 |
+
|
44 |
+
_logger = logging.getLogger(__name__)
|
45 |
+
|
46 |
+
__all__ = ['get_cache_dir', 'download_cached_file', 'has_hf_hub', 'hf_split', 'load_model_config_from_hf',
|
47 |
+
'load_state_dict_from_hf', 'save_for_hf', 'push_to_hf_hub']
|
48 |
+
|
49 |
+
# Default name for a weights file hosted on the Huggingface Hub.
|
50 |
+
HF_WEIGHTS_NAME = "pytorch_model.bin" # default pytorch pkl
|
51 |
+
HF_SAFE_WEIGHTS_NAME = "model.safetensors" # safetensors version
|
52 |
+
HF_OPEN_CLIP_WEIGHTS_NAME = "open_clip_pytorch_model.bin" # default pytorch pkl
|
53 |
+
HF_OPEN_CLIP_SAFE_WEIGHTS_NAME = "open_clip_model.safetensors" # safetensors version
|
54 |
+
|
55 |
+
|
56 |
+
def get_cache_dir(child_dir: str = ''):
|
57 |
+
"""
|
58 |
+
Returns the location of the directory where models are cached (and creates it if necessary).
|
59 |
+
"""
|
60 |
+
# Issue warning to move data if old env is set
|
61 |
+
if os.getenv('TORCH_MODEL_ZOO'):
|
62 |
+
_logger.warning('TORCH_MODEL_ZOO is deprecated, please use env TORCH_HOME instead')
|
63 |
+
|
64 |
+
hub_dir = get_dir()
|
65 |
+
child_dir = () if not child_dir else (child_dir,)
|
66 |
+
model_dir = os.path.join(hub_dir, 'checkpoints', *child_dir)
|
67 |
+
os.makedirs(model_dir, exist_ok=True)
|
68 |
+
return model_dir
|
69 |
+
|
70 |
+
|
71 |
+
def download_cached_file(
|
72 |
+
url: Union[str, List[str], Tuple[str, str]],
|
73 |
+
check_hash: bool = True,
|
74 |
+
progress: bool = False,
|
75 |
+
cache_dir: Optional[Union[str, Path]] = None,
|
76 |
+
):
|
77 |
+
if isinstance(url, (list, tuple)):
|
78 |
+
url, filename = url
|
79 |
+
else:
|
80 |
+
parts = urlparse(url)
|
81 |
+
filename = os.path.basename(parts.path)
|
82 |
+
if cache_dir:
|
83 |
+
os.makedirs(cache_dir, exist_ok=True)
|
84 |
+
else:
|
85 |
+
cache_dir = get_cache_dir()
|
86 |
+
cached_file = os.path.join(cache_dir, filename)
|
87 |
+
if not os.path.exists(cached_file):
|
88 |
+
_logger.info('Downloading: "{}" to {}\n'.format(url, cached_file))
|
89 |
+
hash_prefix = None
|
90 |
+
if check_hash:
|
91 |
+
r = HASH_REGEX.search(filename) # r is Optional[Match[str]]
|
92 |
+
hash_prefix = r.group(1) if r else None
|
93 |
+
download_url_to_file(url, cached_file, hash_prefix, progress=progress)
|
94 |
+
return cached_file
|
95 |
+
|
96 |
+
|
97 |
+
def check_cached_file(
|
98 |
+
url: Union[str, List[str], Tuple[str, str]],
|
99 |
+
check_hash: bool = True,
|
100 |
+
cache_dir: Optional[Union[str, Path]] = None,
|
101 |
+
):
|
102 |
+
if isinstance(url, (list, tuple)):
|
103 |
+
url, filename = url
|
104 |
+
else:
|
105 |
+
parts = urlparse(url)
|
106 |
+
filename = os.path.basename(parts.path)
|
107 |
+
if not cache_dir:
|
108 |
+
cache_dir = get_cache_dir()
|
109 |
+
cached_file = os.path.join(cache_dir, filename)
|
110 |
+
if os.path.exists(cached_file):
|
111 |
+
if check_hash:
|
112 |
+
r = HASH_REGEX.search(filename) # r is Optional[Match[str]]
|
113 |
+
hash_prefix = r.group(1) if r else None
|
114 |
+
if hash_prefix:
|
115 |
+
with open(cached_file, 'rb') as f:
|
116 |
+
hd = hashlib.sha256(f.read()).hexdigest()
|
117 |
+
if hd[:len(hash_prefix)] != hash_prefix:
|
118 |
+
return False
|
119 |
+
return True
|
120 |
+
return False
|
121 |
+
|
122 |
+
|
123 |
+
def has_hf_hub(necessary: bool = False):
|
124 |
+
if not _has_hf_hub and necessary:
|
125 |
+
# if no HF Hub module installed, and it is necessary to continue, raise error
|
126 |
+
raise RuntimeError(
|
127 |
+
'Hugging Face hub model specified but package not installed. Run `pip install huggingface_hub`.')
|
128 |
+
return _has_hf_hub
|
129 |
+
|
130 |
+
|
131 |
+
def hf_split(hf_id: str):
|
132 |
+
# FIXME I may change @ -> # and be parsed as fragment in a URI model name scheme
|
133 |
+
rev_split = hf_id.split('@')
|
134 |
+
assert 0 < len(rev_split) <= 2, 'hf_hub id should only contain one @ character to identify revision.'
|
135 |
+
hf_model_id = rev_split[0]
|
136 |
+
hf_revision = rev_split[-1] if len(rev_split) > 1 else None
|
137 |
+
return hf_model_id, hf_revision
|
138 |
+
|
139 |
+
|
140 |
+
def load_cfg_from_json(json_file: Union[str, Path]):
|
141 |
+
with open(json_file, "r", encoding="utf-8") as reader:
|
142 |
+
text = reader.read()
|
143 |
+
return json.loads(text)
|
144 |
+
|
145 |
+
|
146 |
+
def download_from_hf(
|
147 |
+
model_id: str,
|
148 |
+
filename: str,
|
149 |
+
cache_dir: Optional[Union[str, Path]] = None,
|
150 |
+
):
|
151 |
+
hf_model_id, hf_revision = hf_split(model_id)
|
152 |
+
return hf_hub_download(
|
153 |
+
hf_model_id,
|
154 |
+
filename,
|
155 |
+
revision=hf_revision,
|
156 |
+
cache_dir=cache_dir,
|
157 |
+
)
|
158 |
+
|
159 |
+
|
160 |
+
def load_model_config_from_hf(
|
161 |
+
model_id: str,
|
162 |
+
cache_dir: Optional[Union[str, Path]] = None,
|
163 |
+
):
|
164 |
+
assert has_hf_hub(True)
|
165 |
+
cached_file = download_from_hf(model_id, 'config.json', cache_dir=cache_dir)
|
166 |
+
|
167 |
+
hf_config = load_cfg_from_json(cached_file)
|
168 |
+
if 'pretrained_cfg' not in hf_config:
|
169 |
+
# old form, pull pretrain_cfg out of the base dict
|
170 |
+
pretrained_cfg = hf_config
|
171 |
+
hf_config = {}
|
172 |
+
hf_config['architecture'] = pretrained_cfg.pop('architecture')
|
173 |
+
hf_config['num_features'] = pretrained_cfg.pop('num_features', None)
|
174 |
+
if 'labels' in pretrained_cfg: # deprecated name for 'label_names'
|
175 |
+
pretrained_cfg['label_names'] = pretrained_cfg.pop('labels')
|
176 |
+
hf_config['pretrained_cfg'] = pretrained_cfg
|
177 |
+
|
178 |
+
# NOTE currently discarding parent config as only arch name and pretrained_cfg used in timm right now
|
179 |
+
pretrained_cfg = hf_config['pretrained_cfg']
|
180 |
+
pretrained_cfg['hf_hub_id'] = model_id # insert hf_hub id for pretrained weight load during model creation
|
181 |
+
pretrained_cfg['source'] = 'hf-hub'
|
182 |
+
|
183 |
+
# model should be created with base config num_classes if its exist
|
184 |
+
if 'num_classes' in hf_config:
|
185 |
+
pretrained_cfg['num_classes'] = hf_config['num_classes']
|
186 |
+
|
187 |
+
# label meta-data in base config overrides saved pretrained_cfg on load
|
188 |
+
if 'label_names' in hf_config:
|
189 |
+
pretrained_cfg['label_names'] = hf_config.pop('label_names')
|
190 |
+
if 'label_descriptions' in hf_config:
|
191 |
+
pretrained_cfg['label_descriptions'] = hf_config.pop('label_descriptions')
|
192 |
+
|
193 |
+
model_args = hf_config.get('model_args', {})
|
194 |
+
model_name = hf_config['architecture']
|
195 |
+
return pretrained_cfg, model_name, model_args
|
196 |
+
|
197 |
+
|
198 |
+
def load_state_dict_from_hf(
|
199 |
+
model_id: str,
|
200 |
+
filename: str = HF_WEIGHTS_NAME,
|
201 |
+
weights_only: bool = False,
|
202 |
+
cache_dir: Optional[Union[str, Path]] = None,
|
203 |
+
):
|
204 |
+
assert has_hf_hub(True)
|
205 |
+
hf_model_id, hf_revision = hf_split(model_id)
|
206 |
+
|
207 |
+
# Look for .safetensors alternatives and load from it if it exists
|
208 |
+
if _has_safetensors:
|
209 |
+
for safe_filename in _get_safe_alternatives(filename):
|
210 |
+
try:
|
211 |
+
cached_safe_file = hf_hub_download(
|
212 |
+
repo_id=hf_model_id,
|
213 |
+
filename=safe_filename,
|
214 |
+
revision=hf_revision,
|
215 |
+
cache_dir=cache_dir,
|
216 |
+
)
|
217 |
+
_logger.info(
|
218 |
+
f"[{model_id}] Safe alternative available for '{filename}' "
|
219 |
+
f"(as '{safe_filename}'). Loading weights using safetensors.")
|
220 |
+
return safetensors.torch.load_file(cached_safe_file, device="cpu")
|
221 |
+
except EntryNotFoundError:
|
222 |
+
pass
|
223 |
+
|
224 |
+
# Otherwise, load using pytorch.load
|
225 |
+
cached_file = hf_hub_download(
|
226 |
+
hf_model_id,
|
227 |
+
filename=filename,
|
228 |
+
revision=hf_revision,
|
229 |
+
cache_dir=cache_dir,
|
230 |
+
)
|
231 |
+
_logger.debug(f"[{model_id}] Safe alternative not found for '{filename}'. Loading weights using default pytorch.")
|
232 |
+
try:
|
233 |
+
state_dict = torch.load(cached_file, map_location='cpu', weights_only=weights_only)
|
234 |
+
except TypeError:
|
235 |
+
state_dict = torch.load(cached_file, map_location='cpu')
|
236 |
+
return state_dict
|
237 |
+
|
238 |
+
|
239 |
+
def load_custom_from_hf(
|
240 |
+
model_id: str,
|
241 |
+
filename: str,
|
242 |
+
model: torch.nn.Module,
|
243 |
+
cache_dir: Optional[Union[str, Path]] = None,
|
244 |
+
):
|
245 |
+
assert has_hf_hub(True)
|
246 |
+
hf_model_id, hf_revision = hf_split(model_id)
|
247 |
+
cached_file = hf_hub_download(
|
248 |
+
hf_model_id,
|
249 |
+
filename=filename,
|
250 |
+
revision=hf_revision,
|
251 |
+
cache_dir=cache_dir,
|
252 |
+
)
|
253 |
+
return model.load_pretrained(cached_file)
|
254 |
+
|
255 |
+
|
256 |
+
def save_config_for_hf(
|
257 |
+
model: torch.nn.Module,
|
258 |
+
config_path: str,
|
259 |
+
model_config: Optional[dict] = None,
|
260 |
+
model_args: Optional[dict] = None
|
261 |
+
):
|
262 |
+
model_config = model_config or {}
|
263 |
+
hf_config = {}
|
264 |
+
pretrained_cfg = filter_pretrained_cfg(model.pretrained_cfg, remove_source=True, remove_null=True)
|
265 |
+
# set some values at root config level
|
266 |
+
hf_config['architecture'] = pretrained_cfg.pop('architecture')
|
267 |
+
hf_config['num_classes'] = model_config.pop('num_classes', model.num_classes)
|
268 |
+
|
269 |
+
# NOTE these attr saved for informational purposes, do not impact model build
|
270 |
+
hf_config['num_features'] = model_config.pop('num_features', model.num_features)
|
271 |
+
global_pool_type = model_config.pop('global_pool', getattr(model, 'global_pool', None))
|
272 |
+
if isinstance(global_pool_type, str) and global_pool_type:
|
273 |
+
hf_config['global_pool'] = global_pool_type
|
274 |
+
|
275 |
+
# Save class label info
|
276 |
+
if 'labels' in model_config:
|
277 |
+
_logger.warning(
|
278 |
+
"'labels' as a config field for is deprecated. Please use 'label_names' and 'label_descriptions'."
|
279 |
+
" Renaming provided 'labels' field to 'label_names'.")
|
280 |
+
model_config.setdefault('label_names', model_config.pop('labels'))
|
281 |
+
|
282 |
+
label_names = model_config.pop('label_names', None)
|
283 |
+
if label_names:
|
284 |
+
assert isinstance(label_names, (dict, list, tuple))
|
285 |
+
# map label id (classifier index) -> unique label name (ie synset for ImageNet, MID for OpenImages)
|
286 |
+
# can be a dict id: name if there are id gaps, or tuple/list if no gaps.
|
287 |
+
hf_config['label_names'] = label_names
|
288 |
+
|
289 |
+
label_descriptions = model_config.pop('label_descriptions', None)
|
290 |
+
if label_descriptions:
|
291 |
+
assert isinstance(label_descriptions, dict)
|
292 |
+
# maps label names -> descriptions
|
293 |
+
hf_config['label_descriptions'] = label_descriptions
|
294 |
+
|
295 |
+
if model_args:
|
296 |
+
hf_config['model_args'] = model_args
|
297 |
+
|
298 |
+
hf_config['pretrained_cfg'] = pretrained_cfg
|
299 |
+
hf_config.update(model_config)
|
300 |
+
|
301 |
+
with config_path.open('w') as f:
|
302 |
+
json.dump(hf_config, f, indent=2)
|
303 |
+
|
304 |
+
|
305 |
+
def save_for_hf(
|
306 |
+
model: torch.nn.Module,
|
307 |
+
save_directory: str,
|
308 |
+
model_config: Optional[dict] = None,
|
309 |
+
model_args: Optional[dict] = None,
|
310 |
+
safe_serialization: Union[bool, Literal["both"]] = False,
|
311 |
+
):
|
312 |
+
assert has_hf_hub(True)
|
313 |
+
save_directory = Path(save_directory)
|
314 |
+
save_directory.mkdir(exist_ok=True, parents=True)
|
315 |
+
|
316 |
+
# Save model weights, either safely (using safetensors), or using legacy pytorch approach or both.
|
317 |
+
tensors = model.state_dict()
|
318 |
+
if safe_serialization is True or safe_serialization == "both":
|
319 |
+
assert _has_safetensors, "`pip install safetensors` to use .safetensors"
|
320 |
+
safetensors.torch.save_file(tensors, save_directory / HF_SAFE_WEIGHTS_NAME)
|
321 |
+
if safe_serialization is False or safe_serialization == "both":
|
322 |
+
torch.save(tensors, save_directory / HF_WEIGHTS_NAME)
|
323 |
+
|
324 |
+
config_path = save_directory / 'config.json'
|
325 |
+
save_config_for_hf(
|
326 |
+
model,
|
327 |
+
config_path,
|
328 |
+
model_config=model_config,
|
329 |
+
model_args=model_args,
|
330 |
+
)
|
331 |
+
|
332 |
+
|
333 |
+
def push_to_hf_hub(
|
334 |
+
model: torch.nn.Module,
|
335 |
+
repo_id: str,
|
336 |
+
commit_message: str = 'Add model',
|
337 |
+
token: Optional[str] = None,
|
338 |
+
revision: Optional[str] = None,
|
339 |
+
private: bool = False,
|
340 |
+
create_pr: bool = False,
|
341 |
+
model_config: Optional[dict] = None,
|
342 |
+
model_card: Optional[dict] = None,
|
343 |
+
model_args: Optional[dict] = None,
|
344 |
+
safe_serialization: Union[bool, Literal["both"]] = 'both',
|
345 |
+
):
|
346 |
+
"""
|
347 |
+
Arguments:
|
348 |
+
(...)
|
349 |
+
safe_serialization (`bool` or `"both"`, *optional*, defaults to `False`):
|
350 |
+
Whether to save the model using `safetensors` or the traditional PyTorch way (that uses `pickle`).
|
351 |
+
Can be set to `"both"` in order to push both safe and unsafe weights.
|
352 |
+
"""
|
353 |
+
# Create repo if it doesn't exist yet
|
354 |
+
repo_url = create_repo(repo_id, token=token, private=private, exist_ok=True)
|
355 |
+
|
356 |
+
# Infer complete repo_id from repo_url
|
357 |
+
# Can be different from the input `repo_id` if repo_owner was implicit
|
358 |
+
_, repo_owner, repo_name = repo_type_and_id_from_hf_id(repo_url)
|
359 |
+
repo_id = f"{repo_owner}/{repo_name}"
|
360 |
+
|
361 |
+
# Check if README file already exist in repo
|
362 |
+
try:
|
363 |
+
get_hf_file_metadata(hf_hub_url(repo_id=repo_id, filename="README.md", revision=revision))
|
364 |
+
has_readme = True
|
365 |
+
except EntryNotFoundError:
|
366 |
+
has_readme = False
|
367 |
+
|
368 |
+
# Dump model and push to Hub
|
369 |
+
with TemporaryDirectory() as tmpdir:
|
370 |
+
# Save model weights and config.
|
371 |
+
save_for_hf(
|
372 |
+
model,
|
373 |
+
tmpdir,
|
374 |
+
model_config=model_config,
|
375 |
+
model_args=model_args,
|
376 |
+
safe_serialization=safe_serialization,
|
377 |
+
)
|
378 |
+
|
379 |
+
# Add readme if it does not exist
|
380 |
+
if not has_readme:
|
381 |
+
model_card = model_card or {}
|
382 |
+
model_name = repo_id.split('/')[-1]
|
383 |
+
readme_path = Path(tmpdir) / "README.md"
|
384 |
+
readme_text = generate_readme(model_card, model_name)
|
385 |
+
readme_path.write_text(readme_text)
|
386 |
+
|
387 |
+
# Upload model and return
|
388 |
+
return upload_folder(
|
389 |
+
repo_id=repo_id,
|
390 |
+
folder_path=tmpdir,
|
391 |
+
revision=revision,
|
392 |
+
create_pr=create_pr,
|
393 |
+
commit_message=commit_message,
|
394 |
+
)
|
395 |
+
|
396 |
+
|
397 |
+
def generate_readme(model_card: dict, model_name: str):
|
398 |
+
readme_text = "---\n"
|
399 |
+
readme_text += "tags:\n- image-classification\n- timm\n"
|
400 |
+
readme_text += "library_name: timm\n"
|
401 |
+
readme_text += f"license: {model_card.get('license', 'apache-2.0')}\n"
|
402 |
+
if 'details' in model_card and 'Dataset' in model_card['details']:
|
403 |
+
readme_text += 'datasets:\n'
|
404 |
+
if isinstance(model_card['details']['Dataset'], (tuple, list)):
|
405 |
+
for d in model_card['details']['Dataset']:
|
406 |
+
readme_text += f"- {d.lower()}\n"
|
407 |
+
else:
|
408 |
+
readme_text += f"- {model_card['details']['Dataset'].lower()}\n"
|
409 |
+
if 'Pretrain Dataset' in model_card['details']:
|
410 |
+
if isinstance(model_card['details']['Pretrain Dataset'], (tuple, list)):
|
411 |
+
for d in model_card['details']['Pretrain Dataset']:
|
412 |
+
readme_text += f"- {d.lower()}\n"
|
413 |
+
else:
|
414 |
+
readme_text += f"- {model_card['details']['Pretrain Dataset'].lower()}\n"
|
415 |
+
readme_text += "---\n"
|
416 |
+
readme_text += f"# Model card for {model_name}\n"
|
417 |
+
if 'description' in model_card:
|
418 |
+
readme_text += f"\n{model_card['description']}\n"
|
419 |
+
if 'details' in model_card:
|
420 |
+
readme_text += f"\n## Model Details\n"
|
421 |
+
for k, v in model_card['details'].items():
|
422 |
+
if isinstance(v, (list, tuple)):
|
423 |
+
readme_text += f"- **{k}:**\n"
|
424 |
+
for vi in v:
|
425 |
+
readme_text += f" - {vi}\n"
|
426 |
+
elif isinstance(v, dict):
|
427 |
+
readme_text += f"- **{k}:**\n"
|
428 |
+
for ki, vi in v.items():
|
429 |
+
readme_text += f" - {ki}: {vi}\n"
|
430 |
+
else:
|
431 |
+
readme_text += f"- **{k}:** {v}\n"
|
432 |
+
if 'usage' in model_card:
|
433 |
+
readme_text += f"\n## Model Usage\n"
|
434 |
+
readme_text += model_card['usage']
|
435 |
+
readme_text += '\n'
|
436 |
+
|
437 |
+
if 'comparison' in model_card:
|
438 |
+
readme_text += f"\n## Model Comparison\n"
|
439 |
+
readme_text += model_card['comparison']
|
440 |
+
readme_text += '\n'
|
441 |
+
|
442 |
+
if 'citation' in model_card:
|
443 |
+
readme_text += f"\n## Citation\n"
|
444 |
+
if not isinstance(model_card['citation'], (list, tuple)):
|
445 |
+
citations = [model_card['citation']]
|
446 |
+
else:
|
447 |
+
citations = model_card['citation']
|
448 |
+
for c in citations:
|
449 |
+
readme_text += f"```bibtex\n{c}\n```\n"
|
450 |
+
return readme_text
|
451 |
+
|
452 |
+
|
453 |
+
def _get_safe_alternatives(filename: str) -> Iterable[str]:
|
454 |
+
"""Returns potential safetensors alternatives for a given filename.
|
455 |
+
|
456 |
+
Use case:
|
457 |
+
When downloading a model from the Huggingface Hub, we first look if a .safetensors file exists and if yes, we use it.
|
458 |
+
Main use case is filename "pytorch_model.bin" => check for "model.safetensors" or "pytorch_model.safetensors".
|
459 |
+
"""
|
460 |
+
if filename == HF_WEIGHTS_NAME:
|
461 |
+
yield HF_SAFE_WEIGHTS_NAME
|
462 |
+
if filename == HF_OPEN_CLIP_WEIGHTS_NAME:
|
463 |
+
yield HF_OPEN_CLIP_SAFE_WEIGHTS_NAME
|
464 |
+
if filename not in (HF_WEIGHTS_NAME, HF_OPEN_CLIP_WEIGHTS_NAME) and filename.endswith(".bin"):
|
465 |
+
yield filename[:-4] + ".safetensors"
|
pytorch-image-models/timm/models/_registry.py
ADDED
@@ -0,0 +1,352 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
""" Model Registry
|
2 |
+
Hacked together by / Copyright 2020 Ross Wightman
|
3 |
+
"""
|
4 |
+
|
5 |
+
import fnmatch
|
6 |
+
import re
|
7 |
+
import sys
|
8 |
+
import warnings
|
9 |
+
from collections import defaultdict, deque
|
10 |
+
from copy import deepcopy
|
11 |
+
from dataclasses import replace
|
12 |
+
from typing import Any, Callable, Dict, Iterable, List, Optional, Set, Sequence, Union, Tuple
|
13 |
+
|
14 |
+
from ._pretrained import PretrainedCfg, DefaultCfg
|
15 |
+
|
16 |
+
__all__ = [
|
17 |
+
'split_model_name_tag', 'get_arch_name', 'register_model', 'generate_default_cfgs',
|
18 |
+
'list_models', 'list_pretrained', 'is_model', 'model_entrypoint', 'list_modules', 'is_model_in_modules',
|
19 |
+
'get_pretrained_cfg_value', 'is_model_pretrained', 'get_arch_pretrained_cfgs'
|
20 |
+
]
|
21 |
+
|
22 |
+
_module_to_models: Dict[str, Set[str]] = defaultdict(set) # dict of sets to check membership of model in module
|
23 |
+
_model_to_module: Dict[str, str] = {} # mapping of model names to module names
|
24 |
+
_model_entrypoints: Dict[str, Callable[..., Any]] = {} # mapping of model names to architecture entrypoint fns
|
25 |
+
_model_has_pretrained: Set[str] = set() # set of model names that have pretrained weight url present
|
26 |
+
_model_default_cfgs: Dict[str, PretrainedCfg] = {} # central repo for model arch -> default cfg objects
|
27 |
+
_model_pretrained_cfgs: Dict[str, PretrainedCfg] = {} # central repo for model arch.tag -> pretrained cfgs
|
28 |
+
_model_with_tags: Dict[str, List[str]] = defaultdict(list) # shortcut to map each model arch to all model + tag names
|
29 |
+
_module_to_deprecated_models: Dict[str, Dict[str, Optional[str]]] = defaultdict(dict)
|
30 |
+
_deprecated_models: Dict[str, Optional[str]] = {}
|
31 |
+
|
32 |
+
|
33 |
+
def split_model_name_tag(model_name: str, no_tag: str = '') -> Tuple[str, str]:
|
34 |
+
model_name, *tag_list = model_name.split('.', 1)
|
35 |
+
tag = tag_list[0] if tag_list else no_tag
|
36 |
+
return model_name, tag
|
37 |
+
|
38 |
+
|
39 |
+
def get_arch_name(model_name: str) -> str:
|
40 |
+
return split_model_name_tag(model_name)[0]
|
41 |
+
|
42 |
+
|
43 |
+
def generate_default_cfgs(cfgs: Dict[str, Union[Dict[str, Any], PretrainedCfg]]):
|
44 |
+
out = defaultdict(DefaultCfg)
|
45 |
+
default_set = set() # no tag and tags ending with * are prioritized as default
|
46 |
+
|
47 |
+
for k, v in cfgs.items():
|
48 |
+
if isinstance(v, dict):
|
49 |
+
v = PretrainedCfg(**v)
|
50 |
+
has_weights = v.has_weights
|
51 |
+
|
52 |
+
model, tag = split_model_name_tag(k)
|
53 |
+
is_default_set = model in default_set
|
54 |
+
priority = (has_weights and not tag) or (tag.endswith('*') and not is_default_set)
|
55 |
+
tag = tag.strip('*')
|
56 |
+
|
57 |
+
default_cfg = out[model]
|
58 |
+
|
59 |
+
if priority:
|
60 |
+
default_cfg.tags.appendleft(tag)
|
61 |
+
default_set.add(model)
|
62 |
+
elif has_weights and not default_cfg.is_pretrained:
|
63 |
+
default_cfg.tags.appendleft(tag)
|
64 |
+
else:
|
65 |
+
default_cfg.tags.append(tag)
|
66 |
+
|
67 |
+
if has_weights:
|
68 |
+
default_cfg.is_pretrained = True
|
69 |
+
|
70 |
+
default_cfg.cfgs[tag] = v
|
71 |
+
|
72 |
+
return out
|
73 |
+
|
74 |
+
|
75 |
+
def register_model(fn: Callable[..., Any]) -> Callable[..., Any]:
|
76 |
+
# lookup containing module
|
77 |
+
mod = sys.modules[fn.__module__]
|
78 |
+
module_name_split = fn.__module__.split('.')
|
79 |
+
module_name = module_name_split[-1] if len(module_name_split) else ''
|
80 |
+
|
81 |
+
# add model to __all__ in module
|
82 |
+
model_name = fn.__name__
|
83 |
+
if hasattr(mod, '__all__'):
|
84 |
+
mod.__all__.append(model_name)
|
85 |
+
else:
|
86 |
+
mod.__all__ = [model_name] # type: ignore
|
87 |
+
|
88 |
+
# add entries to registry dict/sets
|
89 |
+
if model_name in _model_entrypoints:
|
90 |
+
warnings.warn(
|
91 |
+
f'Overwriting {model_name} in registry with {fn.__module__}.{model_name}. This is because the name being '
|
92 |
+
'registered conflicts with an existing name. Please check if this is not expected.',
|
93 |
+
stacklevel=2,
|
94 |
+
)
|
95 |
+
_model_entrypoints[model_name] = fn
|
96 |
+
_model_to_module[model_name] = module_name
|
97 |
+
_module_to_models[module_name].add(model_name)
|
98 |
+
if hasattr(mod, 'default_cfgs') and model_name in mod.default_cfgs:
|
99 |
+
# this will catch all models that have entrypoint matching cfg key, but miss any aliasing
|
100 |
+
# entrypoints or non-matching combos
|
101 |
+
default_cfg = mod.default_cfgs[model_name]
|
102 |
+
if not isinstance(default_cfg, DefaultCfg):
|
103 |
+
# new style default cfg dataclass w/ multiple entries per model-arch
|
104 |
+
assert isinstance(default_cfg, dict)
|
105 |
+
# old style cfg dict per model-arch
|
106 |
+
pretrained_cfg = PretrainedCfg(**default_cfg)
|
107 |
+
default_cfg = DefaultCfg(tags=deque(['']), cfgs={'': pretrained_cfg})
|
108 |
+
|
109 |
+
for tag_idx, tag in enumerate(default_cfg.tags):
|
110 |
+
is_default = tag_idx == 0
|
111 |
+
pretrained_cfg = default_cfg.cfgs[tag]
|
112 |
+
model_name_tag = '.'.join([model_name, tag]) if tag else model_name
|
113 |
+
replace_items = dict(architecture=model_name, tag=tag if tag else None)
|
114 |
+
if pretrained_cfg.hf_hub_id and pretrained_cfg.hf_hub_id == 'timm/':
|
115 |
+
# auto-complete hub name w/ architecture.tag
|
116 |
+
replace_items['hf_hub_id'] = pretrained_cfg.hf_hub_id + model_name_tag
|
117 |
+
pretrained_cfg = replace(pretrained_cfg, **replace_items)
|
118 |
+
|
119 |
+
if is_default:
|
120 |
+
_model_pretrained_cfgs[model_name] = pretrained_cfg
|
121 |
+
if pretrained_cfg.has_weights:
|
122 |
+
# add tagless entry if it's default and has weights
|
123 |
+
_model_has_pretrained.add(model_name)
|
124 |
+
|
125 |
+
if tag:
|
126 |
+
_model_pretrained_cfgs[model_name_tag] = pretrained_cfg
|
127 |
+
if pretrained_cfg.has_weights:
|
128 |
+
# add model w/ tag if tag is valid
|
129 |
+
_model_has_pretrained.add(model_name_tag)
|
130 |
+
_model_with_tags[model_name].append(model_name_tag)
|
131 |
+
else:
|
132 |
+
_model_with_tags[model_name].append(model_name) # has empty tag (to slowly remove these instances)
|
133 |
+
|
134 |
+
_model_default_cfgs[model_name] = default_cfg
|
135 |
+
|
136 |
+
return fn
|
137 |
+
|
138 |
+
|
139 |
+
def _deprecated_model_shim(deprecated_name: str, current_fn: Callable = None, current_tag: str = ''):
|
140 |
+
def _fn(pretrained=False, **kwargs):
|
141 |
+
assert current_fn is not None, f'Model {deprecated_name} has been removed with no replacement.'
|
142 |
+
current_name = '.'.join([current_fn.__name__, current_tag]) if current_tag else current_fn.__name__
|
143 |
+
warnings.warn(f'Mapping deprecated model name {deprecated_name} to current {current_name}.', stacklevel=2)
|
144 |
+
pretrained_cfg = kwargs.pop('pretrained_cfg', None)
|
145 |
+
return current_fn(pretrained=pretrained, pretrained_cfg=pretrained_cfg or current_tag, **kwargs)
|
146 |
+
return _fn
|
147 |
+
|
148 |
+
|
149 |
+
def register_model_deprecations(module_name: str, deprecation_map: Dict[str, Optional[str]]):
|
150 |
+
mod = sys.modules[module_name]
|
151 |
+
module_name_split = module_name.split('.')
|
152 |
+
module_name = module_name_split[-1] if len(module_name_split) else ''
|
153 |
+
|
154 |
+
for deprecated, current in deprecation_map.items():
|
155 |
+
if hasattr(mod, '__all__'):
|
156 |
+
mod.__all__.append(deprecated)
|
157 |
+
current_fn = None
|
158 |
+
current_tag = ''
|
159 |
+
if current:
|
160 |
+
current_name, current_tag = split_model_name_tag(current)
|
161 |
+
current_fn = getattr(mod, current_name)
|
162 |
+
deprecated_entrypoint_fn = _deprecated_model_shim(deprecated, current_fn, current_tag)
|
163 |
+
setattr(mod, deprecated, deprecated_entrypoint_fn)
|
164 |
+
_model_entrypoints[deprecated] = deprecated_entrypoint_fn
|
165 |
+
_model_to_module[deprecated] = module_name
|
166 |
+
_module_to_models[module_name].add(deprecated)
|
167 |
+
_deprecated_models[deprecated] = current
|
168 |
+
_module_to_deprecated_models[module_name][deprecated] = current
|
169 |
+
|
170 |
+
|
171 |
+
def _natural_key(string_: str) -> List[Union[int, str]]:
|
172 |
+
"""See https://blog.codinghorror.com/sorting-for-humans-natural-sort-order/"""
|
173 |
+
return [int(s) if s.isdigit() else s for s in re.split(r'(\d+)', string_.lower())]
|
174 |
+
|
175 |
+
|
176 |
+
def _expand_filter(filter: str):
|
177 |
+
""" expand a 'base_filter' to 'base_filter.*' if no tag portion"""
|
178 |
+
filter_base, filter_tag = split_model_name_tag(filter)
|
179 |
+
if not filter_tag:
|
180 |
+
return ['.'.join([filter_base, '*']), filter]
|
181 |
+
else:
|
182 |
+
return [filter]
|
183 |
+
|
184 |
+
|
185 |
+
def list_models(
|
186 |
+
filter: Union[str, List[str]] = '',
|
187 |
+
module: Union[str, List[str]] = '',
|
188 |
+
pretrained: bool = False,
|
189 |
+
exclude_filters: Union[str, List[str]] = '',
|
190 |
+
name_matches_cfg: bool = False,
|
191 |
+
include_tags: Optional[bool] = None,
|
192 |
+
) -> List[str]:
|
193 |
+
""" Return list of available model names, sorted alphabetically
|
194 |
+
|
195 |
+
Args:
|
196 |
+
filter - Wildcard filter string that works with fnmatch
|
197 |
+
module - Limit model selection to a specific submodule (ie 'vision_transformer')
|
198 |
+
pretrained - Include only models with valid pretrained weights if True
|
199 |
+
exclude_filters - Wildcard filters to exclude models after including them with filter
|
200 |
+
name_matches_cfg - Include only models w/ model_name matching default_cfg name (excludes some aliases)
|
201 |
+
include_tags - Include pretrained tags in model names (model.tag). If None, defaults
|
202 |
+
set to True when pretrained=True else False (default: None)
|
203 |
+
|
204 |
+
Returns:
|
205 |
+
models - The sorted list of models
|
206 |
+
|
207 |
+
Example:
|
208 |
+
model_list('gluon_resnet*') -- returns all models starting with 'gluon_resnet'
|
209 |
+
model_list('*resnext*, 'resnet') -- returns all models with 'resnext' in 'resnet' module
|
210 |
+
"""
|
211 |
+
if filter:
|
212 |
+
include_filters = filter if isinstance(filter, (tuple, list)) else [filter]
|
213 |
+
else:
|
214 |
+
include_filters = []
|
215 |
+
|
216 |
+
if include_tags is None:
|
217 |
+
# FIXME should this be default behaviour? or default to include_tags=True?
|
218 |
+
include_tags = pretrained
|
219 |
+
|
220 |
+
if not module:
|
221 |
+
all_models: Set[str] = set(_model_entrypoints.keys())
|
222 |
+
else:
|
223 |
+
if isinstance(module, str):
|
224 |
+
all_models: Set[str] = _module_to_models[module]
|
225 |
+
else:
|
226 |
+
assert isinstance(module, Sequence)
|
227 |
+
all_models: Set[str] = set()
|
228 |
+
for m in module:
|
229 |
+
all_models.update(_module_to_models[m])
|
230 |
+
all_models = all_models - _deprecated_models.keys() # remove deprecated models from listings
|
231 |
+
|
232 |
+
if include_tags:
|
233 |
+
# expand model names to include names w/ pretrained tags
|
234 |
+
models_with_tags: Set[str] = set()
|
235 |
+
for m in all_models:
|
236 |
+
models_with_tags.update(_model_with_tags[m])
|
237 |
+
all_models = models_with_tags
|
238 |
+
# expand include and exclude filters to include a '.*' for proper match if no tags in filter
|
239 |
+
include_filters = [ef for f in include_filters for ef in _expand_filter(f)]
|
240 |
+
exclude_filters = [ef for f in exclude_filters for ef in _expand_filter(f)]
|
241 |
+
|
242 |
+
if include_filters:
|
243 |
+
models: Set[str] = set()
|
244 |
+
for f in include_filters:
|
245 |
+
include_models = fnmatch.filter(all_models, f) # include these models
|
246 |
+
if len(include_models):
|
247 |
+
models = models.union(include_models)
|
248 |
+
else:
|
249 |
+
models = all_models
|
250 |
+
|
251 |
+
if exclude_filters:
|
252 |
+
if not isinstance(exclude_filters, (tuple, list)):
|
253 |
+
exclude_filters = [exclude_filters]
|
254 |
+
for xf in exclude_filters:
|
255 |
+
exclude_models = fnmatch.filter(models, xf) # exclude these models
|
256 |
+
if len(exclude_models):
|
257 |
+
models = models.difference(exclude_models)
|
258 |
+
|
259 |
+
if pretrained:
|
260 |
+
models = _model_has_pretrained.intersection(models)
|
261 |
+
|
262 |
+
if name_matches_cfg:
|
263 |
+
models = set(_model_pretrained_cfgs).intersection(models)
|
264 |
+
|
265 |
+
return sorted(models, key=_natural_key)
|
266 |
+
|
267 |
+
|
268 |
+
def list_pretrained(
|
269 |
+
filter: Union[str, List[str]] = '',
|
270 |
+
exclude_filters: str = '',
|
271 |
+
) -> List[str]:
|
272 |
+
return list_models(
|
273 |
+
filter=filter,
|
274 |
+
pretrained=True,
|
275 |
+
exclude_filters=exclude_filters,
|
276 |
+
include_tags=True,
|
277 |
+
)
|
278 |
+
|
279 |
+
|
280 |
+
def get_deprecated_models(module: str = '') -> Dict[str, str]:
|
281 |
+
all_deprecated = _module_to_deprecated_models[module] if module else _deprecated_models
|
282 |
+
return deepcopy(all_deprecated)
|
283 |
+
|
284 |
+
|
285 |
+
def is_model(model_name: str) -> bool:
|
286 |
+
""" Check if a model name exists
|
287 |
+
"""
|
288 |
+
arch_name = get_arch_name(model_name)
|
289 |
+
return arch_name in _model_entrypoints
|
290 |
+
|
291 |
+
|
292 |
+
def model_entrypoint(model_name: str, module_filter: Optional[str] = None) -> Callable[..., Any]:
|
293 |
+
"""Fetch a model entrypoint for specified model name
|
294 |
+
"""
|
295 |
+
arch_name = get_arch_name(model_name)
|
296 |
+
if module_filter and arch_name not in _module_to_models.get(module_filter, {}):
|
297 |
+
raise RuntimeError(f'Model ({model_name} not found in module {module_filter}.')
|
298 |
+
return _model_entrypoints[arch_name]
|
299 |
+
|
300 |
+
|
301 |
+
def list_modules() -> List[str]:
|
302 |
+
""" Return list of module names that contain models / model entrypoints
|
303 |
+
"""
|
304 |
+
modules = _module_to_models.keys()
|
305 |
+
return sorted(modules)
|
306 |
+
|
307 |
+
|
308 |
+
def is_model_in_modules(
|
309 |
+
model_name: str, module_names: Union[Tuple[str, ...], List[str], Set[str]]
|
310 |
+
) -> bool:
|
311 |
+
"""Check if a model exists within a subset of modules
|
312 |
+
|
313 |
+
Args:
|
314 |
+
model_name - name of model to check
|
315 |
+
module_names - names of modules to search in
|
316 |
+
"""
|
317 |
+
arch_name = get_arch_name(model_name)
|
318 |
+
assert isinstance(module_names, (tuple, list, set))
|
319 |
+
return any(arch_name in _module_to_models[n] for n in module_names)
|
320 |
+
|
321 |
+
|
322 |
+
def is_model_pretrained(model_name: str) -> bool:
|
323 |
+
return model_name in _model_has_pretrained
|
324 |
+
|
325 |
+
|
326 |
+
def get_pretrained_cfg(model_name: str, allow_unregistered: bool = True) -> Optional[PretrainedCfg]:
|
327 |
+
if model_name in _model_pretrained_cfgs:
|
328 |
+
return deepcopy(_model_pretrained_cfgs[model_name])
|
329 |
+
arch_name, tag = split_model_name_tag(model_name)
|
330 |
+
if arch_name in _model_default_cfgs:
|
331 |
+
# if model arch exists, but the tag is wrong, error out
|
332 |
+
raise RuntimeError(f'Invalid pretrained tag ({tag}) for {arch_name}.')
|
333 |
+
if allow_unregistered:
|
334 |
+
# if model arch doesn't exist, it has no pretrained_cfg registered, allow a default to be created
|
335 |
+
return None
|
336 |
+
raise RuntimeError(f'Model architecture ({arch_name}) has no pretrained cfg registered.')
|
337 |
+
|
338 |
+
|
339 |
+
def get_pretrained_cfg_value(model_name: str, cfg_key: str) -> Optional[Any]:
|
340 |
+
""" Get a specific model default_cfg value by key. None if key doesn't exist.
|
341 |
+
"""
|
342 |
+
cfg = get_pretrained_cfg(model_name, allow_unregistered=False)
|
343 |
+
return getattr(cfg, cfg_key, None)
|
344 |
+
|
345 |
+
|
346 |
+
def get_arch_pretrained_cfgs(model_name: str) -> Dict[str, PretrainedCfg]:
|
347 |
+
""" Get all pretrained cfgs for a given architecture.
|
348 |
+
"""
|
349 |
+
arch_name, _ = split_model_name_tag(model_name)
|
350 |
+
model_names = _model_with_tags[arch_name]
|
351 |
+
cfgs = {m: _model_pretrained_cfgs[m] for m in model_names}
|
352 |
+
return cfgs
|
pytorch-image-models/timm/models/beit.py
ADDED
@@ -0,0 +1,716 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
""" BEiT: BERT Pre-Training of Image Transformers (https://arxiv.org/abs/2106.08254)
|
2 |
+
|
3 |
+
Model from official source: https://github.com/microsoft/unilm/tree/master/beit
|
4 |
+
|
5 |
+
@inproceedings{beit,
|
6 |
+
title={{BEiT}: {BERT} Pre-Training of Image Transformers},
|
7 |
+
author={Hangbo Bao and Li Dong and Songhao Piao and Furu Wei},
|
8 |
+
booktitle={International Conference on Learning Representations},
|
9 |
+
year={2022},
|
10 |
+
url={https://openreview.net/forum?id=p-BhZSz59o4}
|
11 |
+
}
|
12 |
+
|
13 |
+
BEiT-v2 from https://github.com/microsoft/unilm/tree/master/beit2
|
14 |
+
|
15 |
+
@article{beitv2,
|
16 |
+
title={{BEiT v2}: Masked Image Modeling with Vector-Quantized Visual Tokenizers},
|
17 |
+
author={Zhiliang Peng and Li Dong and Hangbo Bao and Qixiang Ye and Furu Wei},
|
18 |
+
year={2022},
|
19 |
+
eprint={2208.06366},
|
20 |
+
archivePrefix={arXiv},
|
21 |
+
primaryClass={cs.CV}
|
22 |
+
}
|
23 |
+
|
24 |
+
At this point only the 1k fine-tuned classification weights and model configs have been added,
|
25 |
+
see original source above for pre-training models and procedure.
|
26 |
+
|
27 |
+
Modifications by / Copyright 2021 Ross Wightman, original copyrights below
|
28 |
+
"""
|
29 |
+
# --------------------------------------------------------
|
30 |
+
# BEIT: BERT Pre-Training of Image Transformers (https://arxiv.org/abs/2106.08254)
|
31 |
+
# Github source: https://github.com/microsoft/unilm/tree/master/beit
|
32 |
+
# Copyright (c) 2021 Microsoft
|
33 |
+
# Licensed under The MIT License [see LICENSE for details]
|
34 |
+
# By Hangbo Bao
|
35 |
+
# Based on timm and DeiT code bases
|
36 |
+
# https://github.com/rwightman/pytorch-image-models/tree/master/timm
|
37 |
+
# https://github.com/facebookresearch/deit/
|
38 |
+
# https://github.com/facebookresearch/dino
|
39 |
+
# --------------------------------------------------------'
|
40 |
+
|
41 |
+
import math
|
42 |
+
from typing import Callable, List, Optional, Tuple, Union
|
43 |
+
|
44 |
+
import torch
|
45 |
+
import torch.nn as nn
|
46 |
+
import torch.nn.functional as F
|
47 |
+
from torch.utils.checkpoint import checkpoint
|
48 |
+
|
49 |
+
from timm.data import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD
|
50 |
+
from timm.layers import PatchEmbed, Mlp, SwiGLU, LayerNorm, DropPath, trunc_normal_, use_fused_attn
|
51 |
+
from timm.layers import resample_patch_embed, resample_abs_pos_embed, resize_rel_pos_bias_table, ndgrid
|
52 |
+
|
53 |
+
|
54 |
+
from ._builder import build_model_with_cfg
|
55 |
+
from ._features import feature_take_indices
|
56 |
+
from ._registry import generate_default_cfgs, register_model
|
57 |
+
|
58 |
+
__all__ = ['Beit']
|
59 |
+
|
60 |
+
|
61 |
+
def gen_relative_position_index(window_size: Tuple[int, int]) -> torch.Tensor:
|
62 |
+
num_relative_distance = (2 * window_size[0] - 1) * (2 * window_size[1] - 1) + 3
|
63 |
+
# cls to token & token 2 cls & cls to cls
|
64 |
+
# get pair-wise relative position index for each token inside the window
|
65 |
+
window_area = window_size[0] * window_size[1]
|
66 |
+
coords = torch.stack(ndgrid(torch.arange(window_size[0]), torch.arange(window_size[1]))) # 2, Wh, Ww
|
67 |
+
coords_flatten = torch.flatten(coords, 1) # 2, Wh*Ww
|
68 |
+
relative_coords = coords_flatten[:, :, None] - coords_flatten[:, None, :] # 2, Wh*Ww, Wh*Ww
|
69 |
+
relative_coords = relative_coords.permute(1, 2, 0).contiguous() # Wh*Ww, Wh*Ww, 2
|
70 |
+
relative_coords[:, :, 0] += window_size[0] - 1 # shift to start from 0
|
71 |
+
relative_coords[:, :, 1] += window_size[1] - 1
|
72 |
+
relative_coords[:, :, 0] *= 2 * window_size[1] - 1
|
73 |
+
relative_position_index = torch.zeros(size=(window_area + 1,) * 2, dtype=relative_coords.dtype)
|
74 |
+
relative_position_index[1:, 1:] = relative_coords.sum(-1) # Wh*Ww, Wh*Ww
|
75 |
+
relative_position_index[0, 0:] = num_relative_distance - 3
|
76 |
+
relative_position_index[0:, 0] = num_relative_distance - 2
|
77 |
+
relative_position_index[0, 0] = num_relative_distance - 1
|
78 |
+
return relative_position_index
|
79 |
+
|
80 |
+
|
81 |
+
class Attention(nn.Module):
|
82 |
+
fused_attn: torch.jit.Final[bool]
|
83 |
+
|
84 |
+
def __init__(
|
85 |
+
self,
|
86 |
+
dim: int,
|
87 |
+
num_heads: int = 8,
|
88 |
+
qkv_bias: bool = False,
|
89 |
+
qkv_bias_separate: bool = False,
|
90 |
+
attn_drop: float = 0.,
|
91 |
+
proj_drop: float = 0.,
|
92 |
+
window_size: Optional[Tuple[int, int]] = None,
|
93 |
+
attn_head_dim: Optional[int] = None,
|
94 |
+
):
|
95 |
+
super().__init__()
|
96 |
+
self.num_heads = num_heads
|
97 |
+
head_dim = dim // num_heads
|
98 |
+
if attn_head_dim is not None:
|
99 |
+
head_dim = attn_head_dim
|
100 |
+
all_head_dim = head_dim * self.num_heads
|
101 |
+
self.scale = head_dim ** -0.5
|
102 |
+
self.fused_attn = use_fused_attn()
|
103 |
+
self.qkv_bias_separate = qkv_bias_separate
|
104 |
+
|
105 |
+
self.qkv = nn.Linear(dim, all_head_dim * 3, bias=False)
|
106 |
+
if qkv_bias:
|
107 |
+
self.q_bias = nn.Parameter(torch.zeros(all_head_dim))
|
108 |
+
self.register_buffer('k_bias', torch.zeros(all_head_dim), persistent=False)
|
109 |
+
self.v_bias = nn.Parameter(torch.zeros(all_head_dim))
|
110 |
+
else:
|
111 |
+
self.q_bias = None
|
112 |
+
self.k_bias = None
|
113 |
+
self.v_bias = None
|
114 |
+
|
115 |
+
if window_size:
|
116 |
+
self.window_size = window_size
|
117 |
+
self.num_relative_distance = (2 * window_size[0] - 1) * (2 * window_size[1] - 1) + 3
|
118 |
+
self.relative_position_bias_table = nn.Parameter(
|
119 |
+
torch.zeros(self.num_relative_distance, num_heads)) # 2*Wh-1 * 2*Ww-1, nH
|
120 |
+
self.register_buffer("relative_position_index", gen_relative_position_index(window_size), persistent=False)
|
121 |
+
else:
|
122 |
+
self.window_size = None
|
123 |
+
self.relative_position_bias_table = None
|
124 |
+
self.relative_position_index = None
|
125 |
+
|
126 |
+
self.attn_drop = nn.Dropout(attn_drop)
|
127 |
+
self.proj = nn.Linear(all_head_dim, dim)
|
128 |
+
self.proj_drop = nn.Dropout(proj_drop)
|
129 |
+
|
130 |
+
def _get_rel_pos_bias(self):
|
131 |
+
relative_position_bias = self.relative_position_bias_table[
|
132 |
+
self.relative_position_index.view(-1)].view(
|
133 |
+
self.window_size[0] * self.window_size[1] + 1,
|
134 |
+
self.window_size[0] * self.window_size[1] + 1, -1) # Wh*Ww,Wh*Ww,nH
|
135 |
+
relative_position_bias = relative_position_bias.permute(2, 0, 1).contiguous() # nH, Wh*Ww, Wh*Ww
|
136 |
+
return relative_position_bias.unsqueeze(0)
|
137 |
+
|
138 |
+
def forward(self, x, shared_rel_pos_bias: Optional[torch.Tensor] = None):
|
139 |
+
B, N, C = x.shape
|
140 |
+
|
141 |
+
if self.q_bias is None:
|
142 |
+
qkv = self.qkv(x)
|
143 |
+
else:
|
144 |
+
qkv_bias = torch.cat((self.q_bias, self.k_bias, self.v_bias))
|
145 |
+
if self.qkv_bias_separate:
|
146 |
+
qkv = self.qkv(x)
|
147 |
+
qkv += qkv_bias
|
148 |
+
else:
|
149 |
+
qkv = F.linear(x, weight=self.qkv.weight, bias=qkv_bias)
|
150 |
+
qkv = qkv.reshape(B, N, 3, self.num_heads, -1).permute(2, 0, 3, 1, 4)
|
151 |
+
q, k, v = qkv.unbind(0) # B, num_heads, N, head_dim
|
152 |
+
|
153 |
+
if self.fused_attn:
|
154 |
+
rel_pos_bias = None
|
155 |
+
if self.relative_position_bias_table is not None:
|
156 |
+
rel_pos_bias = self._get_rel_pos_bias()
|
157 |
+
if shared_rel_pos_bias is not None:
|
158 |
+
rel_pos_bias = rel_pos_bias + shared_rel_pos_bias
|
159 |
+
elif shared_rel_pos_bias is not None:
|
160 |
+
rel_pos_bias = shared_rel_pos_bias
|
161 |
+
|
162 |
+
x = F.scaled_dot_product_attention(
|
163 |
+
q, k, v,
|
164 |
+
attn_mask=rel_pos_bias,
|
165 |
+
dropout_p=self.attn_drop.p if self.training else 0.,
|
166 |
+
)
|
167 |
+
else:
|
168 |
+
q = q * self.scale
|
169 |
+
attn = (q @ k.transpose(-2, -1))
|
170 |
+
|
171 |
+
if self.relative_position_bias_table is not None:
|
172 |
+
attn = attn + self._get_rel_pos_bias()
|
173 |
+
if shared_rel_pos_bias is not None:
|
174 |
+
attn = attn + shared_rel_pos_bias
|
175 |
+
|
176 |
+
attn = attn.softmax(dim=-1)
|
177 |
+
attn = self.attn_drop(attn)
|
178 |
+
x = attn @ v
|
179 |
+
|
180 |
+
x = x.transpose(1, 2).reshape(B, N, C)
|
181 |
+
x = self.proj(x)
|
182 |
+
x = self.proj_drop(x)
|
183 |
+
return x
|
184 |
+
|
185 |
+
|
186 |
+
class Block(nn.Module):
|
187 |
+
|
188 |
+
def __init__(
|
189 |
+
self,
|
190 |
+
dim: int,
|
191 |
+
num_heads: int,
|
192 |
+
qkv_bias: bool = False,
|
193 |
+
mlp_ratio: float = 4.,
|
194 |
+
scale_mlp: bool = False,
|
195 |
+
swiglu_mlp: bool = False,
|
196 |
+
proj_drop: float = 0.,
|
197 |
+
attn_drop: float = 0.,
|
198 |
+
drop_path: float = 0.,
|
199 |
+
init_values: Optional[float] = None,
|
200 |
+
act_layer: Callable = nn.GELU,
|
201 |
+
norm_layer: Callable = LayerNorm,
|
202 |
+
window_size: Optional[Tuple[int, int]] = None,
|
203 |
+
attn_head_dim: Optional[int] = None,
|
204 |
+
):
|
205 |
+
super().__init__()
|
206 |
+
self.norm1 = norm_layer(dim)
|
207 |
+
self.attn = Attention(
|
208 |
+
dim,
|
209 |
+
num_heads=num_heads,
|
210 |
+
qkv_bias=qkv_bias,
|
211 |
+
attn_drop=attn_drop,
|
212 |
+
proj_drop=proj_drop,
|
213 |
+
window_size=window_size,
|
214 |
+
attn_head_dim=attn_head_dim,
|
215 |
+
)
|
216 |
+
# NOTE: drop path for stochastic depth, we shall see if this is better than dropout here
|
217 |
+
self.drop_path1 = DropPath(drop_path) if drop_path > 0. else nn.Identity()
|
218 |
+
|
219 |
+
self.norm2 = norm_layer(dim)
|
220 |
+
if swiglu_mlp:
|
221 |
+
self.mlp = SwiGLU(
|
222 |
+
in_features=dim,
|
223 |
+
hidden_features=int(dim * mlp_ratio),
|
224 |
+
norm_layer=norm_layer if scale_mlp else None,
|
225 |
+
drop=proj_drop,
|
226 |
+
)
|
227 |
+
else:
|
228 |
+
self.mlp = Mlp(
|
229 |
+
in_features=dim,
|
230 |
+
hidden_features=int(dim * mlp_ratio),
|
231 |
+
act_layer=act_layer,
|
232 |
+
norm_layer=norm_layer if scale_mlp else None,
|
233 |
+
drop=proj_drop,
|
234 |
+
)
|
235 |
+
self.drop_path2 = DropPath(drop_path) if drop_path > 0. else nn.Identity()
|
236 |
+
|
237 |
+
if init_values:
|
238 |
+
self.gamma_1 = nn.Parameter(init_values * torch.ones(dim))
|
239 |
+
self.gamma_2 = nn.Parameter(init_values * torch.ones(dim))
|
240 |
+
else:
|
241 |
+
self.gamma_1, self.gamma_2 = None, None
|
242 |
+
|
243 |
+
def forward(self, x, shared_rel_pos_bias: Optional[torch.Tensor] = None):
|
244 |
+
if self.gamma_1 is None:
|
245 |
+
x = x + self.drop_path1(self.attn(self.norm1(x), shared_rel_pos_bias=shared_rel_pos_bias))
|
246 |
+
x = x + self.drop_path2(self.mlp(self.norm2(x)))
|
247 |
+
else:
|
248 |
+
x = x + self.drop_path1(self.gamma_1 * self.attn(self.norm1(x), shared_rel_pos_bias=shared_rel_pos_bias))
|
249 |
+
x = x + self.drop_path2(self.gamma_2 * self.mlp(self.norm2(x)))
|
250 |
+
return x
|
251 |
+
|
252 |
+
|
253 |
+
class RelativePositionBias(nn.Module):
|
254 |
+
|
255 |
+
def __init__(self, window_size, num_heads):
|
256 |
+
super().__init__()
|
257 |
+
self.window_size = window_size
|
258 |
+
self.window_area = window_size[0] * window_size[1]
|
259 |
+
num_relative_distance = (2 * window_size[0] - 1) * (2 * window_size[1] - 1) + 3
|
260 |
+
self.relative_position_bias_table = nn.Parameter(torch.zeros(num_relative_distance, num_heads))
|
261 |
+
# trunc_normal_(self.relative_position_bias_table, std=.02)
|
262 |
+
self.register_buffer("relative_position_index", gen_relative_position_index(window_size))
|
263 |
+
|
264 |
+
def forward(self):
|
265 |
+
relative_position_bias = self.relative_position_bias_table[self.relative_position_index.view(-1)].view(
|
266 |
+
self.window_area + 1, self.window_area + 1, -1) # Wh*Ww,Wh*Ww,nH
|
267 |
+
return relative_position_bias.permute(2, 0, 1).contiguous() # nH, Wh*Ww, Wh*Ww
|
268 |
+
|
269 |
+
|
270 |
+
class Beit(nn.Module):
|
271 |
+
""" Vision Transformer with support for patch or hybrid CNN input stage
|
272 |
+
"""
|
273 |
+
|
274 |
+
def __init__(
|
275 |
+
self,
|
276 |
+
img_size: Union[int, Tuple[int, int]] = 224,
|
277 |
+
patch_size: Union[int, Tuple[int, int]] = 16,
|
278 |
+
in_chans: int = 3,
|
279 |
+
num_classes: int = 1000,
|
280 |
+
global_pool: str = 'avg',
|
281 |
+
embed_dim: int = 768,
|
282 |
+
depth: int = 12,
|
283 |
+
num_heads: int = 12,
|
284 |
+
qkv_bias: bool = True,
|
285 |
+
mlp_ratio: float = 4.,
|
286 |
+
swiglu_mlp: bool = False,
|
287 |
+
scale_mlp: bool = False,
|
288 |
+
drop_rate: float = 0.,
|
289 |
+
pos_drop_rate: float = 0.,
|
290 |
+
proj_drop_rate: float = 0.,
|
291 |
+
attn_drop_rate: float = 0.,
|
292 |
+
drop_path_rate: float = 0.,
|
293 |
+
norm_layer: Callable = LayerNorm,
|
294 |
+
init_values: Optional[float] = None,
|
295 |
+
use_abs_pos_emb: bool = True,
|
296 |
+
use_rel_pos_bias: bool = False,
|
297 |
+
use_shared_rel_pos_bias: bool = False,
|
298 |
+
head_init_scale: float = 0.001,
|
299 |
+
):
|
300 |
+
super().__init__()
|
301 |
+
self.num_classes = num_classes
|
302 |
+
self.global_pool = global_pool
|
303 |
+
self.num_features = self.head_hidden_size = self.embed_dim = embed_dim # for consistency with other models
|
304 |
+
self.num_prefix_tokens = 1
|
305 |
+
self.grad_checkpointing = False
|
306 |
+
|
307 |
+
self.patch_embed = PatchEmbed(
|
308 |
+
img_size=img_size,
|
309 |
+
patch_size=patch_size,
|
310 |
+
in_chans=in_chans,
|
311 |
+
embed_dim=embed_dim,
|
312 |
+
)
|
313 |
+
num_patches = self.patch_embed.num_patches
|
314 |
+
r = self.patch_embed.feat_ratio() if hasattr(self.patch_embed, 'feat_ratio') else patch_size
|
315 |
+
|
316 |
+
self.cls_token = nn.Parameter(torch.zeros(1, 1, embed_dim))
|
317 |
+
# self.mask_token = nn.Parameter(torch.zeros(1, 1, embed_dim))
|
318 |
+
self.pos_embed = nn.Parameter(torch.zeros(1, num_patches + 1, embed_dim)) if use_abs_pos_emb else None
|
319 |
+
self.pos_drop = nn.Dropout(p=pos_drop_rate)
|
320 |
+
|
321 |
+
if use_shared_rel_pos_bias:
|
322 |
+
self.rel_pos_bias = RelativePositionBias(
|
323 |
+
window_size=self.patch_embed.grid_size,
|
324 |
+
num_heads=num_heads,
|
325 |
+
)
|
326 |
+
else:
|
327 |
+
self.rel_pos_bias = None
|
328 |
+
|
329 |
+
dpr = [x.item() for x in torch.linspace(0, drop_path_rate, depth)] # stochastic depth decay rule
|
330 |
+
self.blocks = nn.ModuleList([
|
331 |
+
Block(
|
332 |
+
dim=embed_dim,
|
333 |
+
num_heads=num_heads,
|
334 |
+
qkv_bias=qkv_bias,
|
335 |
+
mlp_ratio=mlp_ratio,
|
336 |
+
scale_mlp=scale_mlp,
|
337 |
+
swiglu_mlp=swiglu_mlp,
|
338 |
+
proj_drop=proj_drop_rate,
|
339 |
+
attn_drop=attn_drop_rate,
|
340 |
+
drop_path=dpr[i],
|
341 |
+
norm_layer=norm_layer,
|
342 |
+
init_values=init_values,
|
343 |
+
window_size=self.patch_embed.grid_size if use_rel_pos_bias else None,
|
344 |
+
)
|
345 |
+
for i in range(depth)])
|
346 |
+
self.feature_info = [
|
347 |
+
dict(module=f'blocks.{i}', num_chs=embed_dim, reduction=r) for i in range(depth)]
|
348 |
+
|
349 |
+
use_fc_norm = self.global_pool == 'avg'
|
350 |
+
self.norm = nn.Identity() if use_fc_norm else norm_layer(embed_dim)
|
351 |
+
self.fc_norm = norm_layer(embed_dim) if use_fc_norm else nn.Identity()
|
352 |
+
self.head_drop = nn.Dropout(drop_rate)
|
353 |
+
self.head = nn.Linear(embed_dim, num_classes) if num_classes > 0 else nn.Identity()
|
354 |
+
|
355 |
+
self.apply(self._init_weights)
|
356 |
+
if self.pos_embed is not None:
|
357 |
+
trunc_normal_(self.pos_embed, std=.02)
|
358 |
+
trunc_normal_(self.cls_token, std=.02)
|
359 |
+
|
360 |
+
self.fix_init_weight()
|
361 |
+
if isinstance(self.head, nn.Linear):
|
362 |
+
trunc_normal_(self.head.weight, std=.02)
|
363 |
+
self.head.weight.data.mul_(head_init_scale)
|
364 |
+
self.head.bias.data.mul_(head_init_scale)
|
365 |
+
|
366 |
+
def fix_init_weight(self):
|
367 |
+
def rescale(param, layer_id):
|
368 |
+
param.div_(math.sqrt(2.0 * layer_id))
|
369 |
+
|
370 |
+
for layer_id, layer in enumerate(self.blocks):
|
371 |
+
rescale(layer.attn.proj.weight.data, layer_id + 1)
|
372 |
+
rescale(layer.mlp.fc2.weight.data, layer_id + 1)
|
373 |
+
|
374 |
+
def _init_weights(self, m):
|
375 |
+
if isinstance(m, nn.Linear):
|
376 |
+
trunc_normal_(m.weight, std=.02)
|
377 |
+
if isinstance(m, nn.Linear) and m.bias is not None:
|
378 |
+
nn.init.constant_(m.bias, 0)
|
379 |
+
elif isinstance(m, nn.LayerNorm):
|
380 |
+
nn.init.constant_(m.bias, 0)
|
381 |
+
nn.init.constant_(m.weight, 1.0)
|
382 |
+
|
383 |
+
@torch.jit.ignore
|
384 |
+
def no_weight_decay(self):
|
385 |
+
nwd = {'pos_embed', 'cls_token'}
|
386 |
+
for n, _ in self.named_parameters():
|
387 |
+
if 'relative_position_bias_table' in n:
|
388 |
+
nwd.add(n)
|
389 |
+
return nwd
|
390 |
+
|
391 |
+
@torch.jit.ignore
|
392 |
+
def set_grad_checkpointing(self, enable=True):
|
393 |
+
self.grad_checkpointing = enable
|
394 |
+
|
395 |
+
@torch.jit.ignore
|
396 |
+
def group_matcher(self, coarse=False):
|
397 |
+
matcher = dict(
|
398 |
+
stem=r'^cls_token|pos_embed|patch_embed|rel_pos_bias', # stem and embed
|
399 |
+
blocks=[(r'^blocks\.(\d+)', None), (r'^norm', (99999,))],
|
400 |
+
)
|
401 |
+
return matcher
|
402 |
+
|
403 |
+
@torch.jit.ignore
|
404 |
+
def get_classifier(self) -> nn.Module:
|
405 |
+
return self.head
|
406 |
+
|
407 |
+
def reset_classifier(self, num_classes: int, global_pool: Optional[str] = None):
|
408 |
+
self.num_classes = num_classes
|
409 |
+
if global_pool is not None:
|
410 |
+
self.global_pool = global_pool
|
411 |
+
self.head = nn.Linear(self.embed_dim, num_classes) if num_classes > 0 else nn.Identity()
|
412 |
+
|
413 |
+
def forward_intermediates(
|
414 |
+
self,
|
415 |
+
x: torch.Tensor,
|
416 |
+
indices: Optional[Union[int, List[int]]] = None,
|
417 |
+
return_prefix_tokens: bool = False,
|
418 |
+
norm: bool = False,
|
419 |
+
stop_early: bool = False,
|
420 |
+
output_fmt: str = 'NCHW',
|
421 |
+
intermediates_only: bool = False,
|
422 |
+
) -> Union[List[torch.Tensor], Tuple[torch.Tensor, List[torch.Tensor]]]:
|
423 |
+
""" Forward features that returns intermediates.
|
424 |
+
|
425 |
+
Args:
|
426 |
+
x: Input image tensor
|
427 |
+
indices: Take last n blocks if an int, if is a sequence, select by matching indices
|
428 |
+
return_prefix_tokens: Return both prefix and spatial intermediate tokens
|
429 |
+
norm: Apply norm layer to all intermediates
|
430 |
+
stop_early: Stop iterating over blocks when last desired intermediate hit
|
431 |
+
output_fmt: Shape of intermediate feature outputs
|
432 |
+
intermediates_only: Only return intermediate features
|
433 |
+
Returns:
|
434 |
+
|
435 |
+
"""
|
436 |
+
assert output_fmt in ('NCHW', 'NLC'), 'Output format must be one of NCHW or NLC.'
|
437 |
+
reshape = output_fmt == 'NCHW'
|
438 |
+
intermediates = []
|
439 |
+
take_indices, max_index = feature_take_indices(len(self.blocks), indices)
|
440 |
+
|
441 |
+
# forward pass
|
442 |
+
B, _, height, width = x.shape
|
443 |
+
x = self.patch_embed(x)
|
444 |
+
x = torch.cat((self.cls_token.expand(x.shape[0], -1, -1), x), dim=1)
|
445 |
+
if self.pos_embed is not None:
|
446 |
+
x = x + self.pos_embed
|
447 |
+
x = self.pos_drop(x)
|
448 |
+
|
449 |
+
rel_pos_bias = self.rel_pos_bias() if self.rel_pos_bias is not None else None
|
450 |
+
if torch.jit.is_scripting() or not stop_early: # can't slice blocks in torchscript
|
451 |
+
blocks = self.blocks
|
452 |
+
else:
|
453 |
+
blocks = self.blocks[:max_index + 1]
|
454 |
+
for i, blk in enumerate(blocks):
|
455 |
+
x = blk(x, shared_rel_pos_bias=rel_pos_bias)
|
456 |
+
if i in take_indices:
|
457 |
+
# normalize intermediates with final norm layer if enabled
|
458 |
+
intermediates.append(self.norm(x) if norm else x)
|
459 |
+
|
460 |
+
# process intermediates
|
461 |
+
if self.num_prefix_tokens:
|
462 |
+
# split prefix (e.g. class, distill) and spatial feature tokens
|
463 |
+
prefix_tokens = [y[:, 0:self.num_prefix_tokens] for y in intermediates]
|
464 |
+
intermediates = [y[:, self.num_prefix_tokens:] for y in intermediates]
|
465 |
+
if reshape:
|
466 |
+
# reshape to BCHW output format
|
467 |
+
H, W = self.patch_embed.dynamic_feat_size((height, width))
|
468 |
+
intermediates = [y.reshape(B, H, W, -1).permute(0, 3, 1, 2).contiguous() for y in intermediates]
|
469 |
+
if not torch.jit.is_scripting() and return_prefix_tokens:
|
470 |
+
# return_prefix not support in torchscript due to poor type handling
|
471 |
+
intermediates = list(zip(intermediates, prefix_tokens))
|
472 |
+
|
473 |
+
if intermediates_only:
|
474 |
+
return intermediates
|
475 |
+
|
476 |
+
x = self.norm(x)
|
477 |
+
|
478 |
+
return x, intermediates
|
479 |
+
|
480 |
+
def prune_intermediate_layers(
|
481 |
+
self,
|
482 |
+
indices: Union[int, List[int]] = 1,
|
483 |
+
prune_norm: bool = False,
|
484 |
+
prune_head: bool = True,
|
485 |
+
):
|
486 |
+
""" Prune layers not required for specified intermediates.
|
487 |
+
"""
|
488 |
+
take_indices, max_index = feature_take_indices(len(self.blocks), indices)
|
489 |
+
self.blocks = self.blocks[:max_index + 1] # truncate blocks
|
490 |
+
if prune_norm:
|
491 |
+
self.norm = nn.Identity()
|
492 |
+
if prune_head:
|
493 |
+
self.fc_norm = nn.Identity()
|
494 |
+
self.reset_classifier(0, '')
|
495 |
+
return take_indices
|
496 |
+
|
497 |
+
def forward_features(self, x):
|
498 |
+
x = self.patch_embed(x)
|
499 |
+
x = torch.cat((self.cls_token.expand(x.shape[0], -1, -1), x), dim=1)
|
500 |
+
if self.pos_embed is not None:
|
501 |
+
x = x + self.pos_embed
|
502 |
+
x = self.pos_drop(x)
|
503 |
+
|
504 |
+
rel_pos_bias = self.rel_pos_bias() if self.rel_pos_bias is not None else None
|
505 |
+
for blk in self.blocks:
|
506 |
+
if self.grad_checkpointing and not torch.jit.is_scripting():
|
507 |
+
x = checkpoint(blk, x, shared_rel_pos_bias=rel_pos_bias)
|
508 |
+
else:
|
509 |
+
x = blk(x, shared_rel_pos_bias=rel_pos_bias)
|
510 |
+
x = self.norm(x)
|
511 |
+
return x
|
512 |
+
|
513 |
+
def forward_head(self, x, pre_logits: bool = False):
|
514 |
+
if self.global_pool:
|
515 |
+
x = x[:, self.num_prefix_tokens:].mean(dim=1) if self.global_pool == 'avg' else x[:, 0]
|
516 |
+
x = self.fc_norm(x)
|
517 |
+
x = self.head_drop(x)
|
518 |
+
return x if pre_logits else self.head(x)
|
519 |
+
|
520 |
+
def forward(self, x):
|
521 |
+
x = self.forward_features(x)
|
522 |
+
x = self.forward_head(x)
|
523 |
+
return x
|
524 |
+
|
525 |
+
|
526 |
+
def _cfg(url='', **kwargs):
|
527 |
+
return {
|
528 |
+
'url': url,
|
529 |
+
'num_classes': 1000, 'input_size': (3, 224, 224), 'pool_size': None,
|
530 |
+
'crop_pct': .9, 'interpolation': 'bicubic', 'fixed_input_size': True,
|
531 |
+
'mean': (0.5, 0.5, 0.5), 'std': (0.5, 0.5, 0.5),
|
532 |
+
'first_conv': 'patch_embed.proj', 'classifier': 'head',
|
533 |
+
**kwargs
|
534 |
+
}
|
535 |
+
|
536 |
+
|
537 |
+
default_cfgs = generate_default_cfgs({
|
538 |
+
'beit_base_patch16_224.in22k_ft_in22k_in1k': _cfg(
|
539 |
+
#url='https://conversationhub.blob.core.windows.net/beit-share-public/beit/beit_base_patch16_224_pt22k_ft22kto1k.pth',
|
540 |
+
hf_hub_id='timm/'),
|
541 |
+
'beit_base_patch16_384.in22k_ft_in22k_in1k': _cfg(
|
542 |
+
#url='https://conversationhub.blob.core.windows.net/beit-share-public/beit/beit_base_patch16_384_pt22k_ft22kto1k.pth',
|
543 |
+
hf_hub_id='timm/',
|
544 |
+
input_size=(3, 384, 384), crop_pct=1.0,
|
545 |
+
),
|
546 |
+
'beit_base_patch16_224.in22k_ft_in22k': _cfg(
|
547 |
+
#url='https://conversationhub.blob.core.windows.net/beit-share-public/beit/beit_base_patch16_224_pt22k_ft22k.pth',
|
548 |
+
hf_hub_id='timm/',
|
549 |
+
num_classes=21841,
|
550 |
+
),
|
551 |
+
'beit_large_patch16_224.in22k_ft_in22k_in1k': _cfg(
|
552 |
+
#url='https://conversationhub.blob.core.windows.net/beit-share-public/beit/beit_large_patch16_224_pt22k_ft22kto1k.pth',
|
553 |
+
hf_hub_id='timm/'),
|
554 |
+
'beit_large_patch16_384.in22k_ft_in22k_in1k': _cfg(
|
555 |
+
#url='https://conversationhub.blob.core.windows.net/beit-share-public/beit/beit_large_patch16_384_pt22k_ft22kto1k.pth',
|
556 |
+
hf_hub_id='timm/',
|
557 |
+
input_size=(3, 384, 384), crop_pct=1.0,
|
558 |
+
),
|
559 |
+
'beit_large_patch16_512.in22k_ft_in22k_in1k': _cfg(
|
560 |
+
#url='https://conversationhub.blob.core.windows.net/beit-share-public/beit/beit_large_patch16_512_pt22k_ft22kto1k.pth',
|
561 |
+
hf_hub_id='timm/',
|
562 |
+
input_size=(3, 512, 512), crop_pct=1.0,
|
563 |
+
),
|
564 |
+
'beit_large_patch16_224.in22k_ft_in22k': _cfg(
|
565 |
+
#url='https://conversationhub.blob.core.windows.net/beit-share-public/beit/beit_large_patch16_224_pt22k_ft22k.pth',
|
566 |
+
hf_hub_id='timm/',
|
567 |
+
num_classes=21841,
|
568 |
+
),
|
569 |
+
|
570 |
+
'beitv2_base_patch16_224.in1k_ft_in22k_in1k': _cfg(
|
571 |
+
#url='https://conversationhub.blob.core.windows.net/beit-share-public/beitv2/beitv2_base_patch16_224_pt1k_ft21kto1k.pth',
|
572 |
+
hf_hub_id='timm/',
|
573 |
+
mean=IMAGENET_DEFAULT_MEAN, std=IMAGENET_DEFAULT_STD
|
574 |
+
),
|
575 |
+
'beitv2_base_patch16_224.in1k_ft_in1k': _cfg(
|
576 |
+
#url='https://conversationhub.blob.core.windows.net/beit-share-public/beitv2/beitv2_base_patch16_224_pt1k_ft1k.pth',
|
577 |
+
hf_hub_id='timm/',
|
578 |
+
mean=IMAGENET_DEFAULT_MEAN, std=IMAGENET_DEFAULT_STD
|
579 |
+
),
|
580 |
+
'beitv2_base_patch16_224.in1k_ft_in22k': _cfg(
|
581 |
+
#url='https://conversationhub.blob.core.windows.net/beit-share-public/beitv2/beitv2_base_patch16_224_pt1k_ft21k.pth',
|
582 |
+
hf_hub_id='timm/',
|
583 |
+
num_classes=21841, mean=IMAGENET_DEFAULT_MEAN, std=IMAGENET_DEFAULT_STD
|
584 |
+
),
|
585 |
+
'beitv2_large_patch16_224.in1k_ft_in22k_in1k': _cfg(
|
586 |
+
#url='https://conversationhub.blob.core.windows.net/beit-share-public/beitv2/beitv2_large_patch16_224_pt1k_ft21kto1k.pth',
|
587 |
+
hf_hub_id='timm/',
|
588 |
+
crop_pct=0.95, mean=IMAGENET_DEFAULT_MEAN, std=IMAGENET_DEFAULT_STD
|
589 |
+
),
|
590 |
+
'beitv2_large_patch16_224.in1k_ft_in1k': _cfg(
|
591 |
+
#url='https://conversationhub.blob.core.windows.net/beit-share-public/beitv2/beitv2_large_patch16_224_pt1k_ft1k.pth',
|
592 |
+
hf_hub_id='timm/',
|
593 |
+
crop_pct=0.95, mean=IMAGENET_DEFAULT_MEAN, std=IMAGENET_DEFAULT_STD
|
594 |
+
),
|
595 |
+
'beitv2_large_patch16_224.in1k_ft_in22k': _cfg(
|
596 |
+
#url='https://conversationhub.blob.core.windows.net/beit-share-public/beitv2/beitv2_large_patch16_224_pt1k_ft21k.pth',
|
597 |
+
hf_hub_id='timm/',
|
598 |
+
num_classes=21841, mean=IMAGENET_DEFAULT_MEAN, std=IMAGENET_DEFAULT_STD
|
599 |
+
),
|
600 |
+
})
|
601 |
+
|
602 |
+
|
603 |
+
def checkpoint_filter_fn(state_dict, model, interpolation='bicubic', antialias=True):
|
604 |
+
state_dict = state_dict.get('model', state_dict)
|
605 |
+
state_dict = state_dict.get('module', state_dict)
|
606 |
+
# beit v2 didn't strip module
|
607 |
+
|
608 |
+
out_dict = {}
|
609 |
+
for k, v in state_dict.items():
|
610 |
+
if 'relative_position_index' in k:
|
611 |
+
continue
|
612 |
+
if 'patch_embed.proj.weight' in k:
|
613 |
+
O, I, H, W = model.patch_embed.proj.weight.shape
|
614 |
+
if v.shape[-1] != W or v.shape[-2] != H:
|
615 |
+
v = resample_patch_embed(
|
616 |
+
v,
|
617 |
+
(H, W),
|
618 |
+
interpolation=interpolation,
|
619 |
+
antialias=antialias,
|
620 |
+
verbose=True,
|
621 |
+
)
|
622 |
+
elif k == 'pos_embed' and v.shape[1] != model.pos_embed.shape[1]:
|
623 |
+
# To resize pos embedding when using model at different size from pretrained weights
|
624 |
+
num_prefix_tokens = 1
|
625 |
+
v = resample_abs_pos_embed(
|
626 |
+
v,
|
627 |
+
new_size=model.patch_embed.grid_size,
|
628 |
+
num_prefix_tokens=num_prefix_tokens,
|
629 |
+
interpolation=interpolation,
|
630 |
+
antialias=antialias,
|
631 |
+
verbose=True,
|
632 |
+
)
|
633 |
+
elif k.endswith('relative_position_bias_table'):
|
634 |
+
m = model.get_submodule(k[:-29])
|
635 |
+
if v.shape != m.relative_position_bias_table.shape or m.window_size[0] != m.window_size[1]:
|
636 |
+
v = resize_rel_pos_bias_table(
|
637 |
+
v,
|
638 |
+
new_window_size=m.window_size,
|
639 |
+
new_bias_shape=m.relative_position_bias_table.shape,
|
640 |
+
)
|
641 |
+
out_dict[k] = v
|
642 |
+
return out_dict
|
643 |
+
|
644 |
+
|
645 |
+
def _create_beit(variant, pretrained=False, **kwargs):
|
646 |
+
out_indices = kwargs.pop('out_indices', 3)
|
647 |
+
model = build_model_with_cfg(
|
648 |
+
Beit, variant, pretrained,
|
649 |
+
pretrained_filter_fn=checkpoint_filter_fn,
|
650 |
+
feature_cfg=dict(out_indices=out_indices, feature_cls='getter'),
|
651 |
+
**kwargs,
|
652 |
+
)
|
653 |
+
return model
|
654 |
+
|
655 |
+
|
656 |
+
@register_model
|
657 |
+
def beit_base_patch16_224(pretrained=False, **kwargs) -> Beit:
|
658 |
+
model_args = dict(
|
659 |
+
patch_size=16, embed_dim=768, depth=12, num_heads=12, mlp_ratio=4,
|
660 |
+
use_abs_pos_emb=False, use_rel_pos_bias=True, init_values=0.1)
|
661 |
+
model = _create_beit('beit_base_patch16_224', pretrained=pretrained, **dict(model_args, **kwargs))
|
662 |
+
return model
|
663 |
+
|
664 |
+
|
665 |
+
@register_model
|
666 |
+
def beit_base_patch16_384(pretrained=False, **kwargs) -> Beit:
|
667 |
+
model_args = dict(
|
668 |
+
img_size=384, patch_size=16, embed_dim=768, depth=12, num_heads=12,
|
669 |
+
use_abs_pos_emb=False, use_rel_pos_bias=True, init_values=0.1)
|
670 |
+
model = _create_beit('beit_base_patch16_384', pretrained=pretrained, **dict(model_args, **kwargs))
|
671 |
+
return model
|
672 |
+
|
673 |
+
|
674 |
+
@register_model
|
675 |
+
def beit_large_patch16_224(pretrained=False, **kwargs) -> Beit:
|
676 |
+
model_args = dict(
|
677 |
+
patch_size=16, embed_dim=1024, depth=24, num_heads=16,
|
678 |
+
use_abs_pos_emb=False, use_rel_pos_bias=True, init_values=1e-5)
|
679 |
+
model = _create_beit('beit_large_patch16_224', pretrained=pretrained, **dict(model_args, **kwargs))
|
680 |
+
return model
|
681 |
+
|
682 |
+
|
683 |
+
@register_model
|
684 |
+
def beit_large_patch16_384(pretrained=False, **kwargs) -> Beit:
|
685 |
+
model_args = dict(
|
686 |
+
img_size=384, patch_size=16, embed_dim=1024, depth=24, num_heads=16,
|
687 |
+
use_abs_pos_emb=False, use_rel_pos_bias=True, init_values=1e-5)
|
688 |
+
model = _create_beit('beit_large_patch16_384', pretrained=pretrained, **dict(model_args, **kwargs))
|
689 |
+
return model
|
690 |
+
|
691 |
+
|
692 |
+
@register_model
|
693 |
+
def beit_large_patch16_512(pretrained=False, **kwargs) -> Beit:
|
694 |
+
model_args = dict(
|
695 |
+
img_size=512, patch_size=16, embed_dim=1024, depth=24, num_heads=16,
|
696 |
+
use_abs_pos_emb=False, use_rel_pos_bias=True, init_values=1e-5)
|
697 |
+
model = _create_beit('beit_large_patch16_512', pretrained=pretrained, **dict(model_args, **kwargs))
|
698 |
+
return model
|
699 |
+
|
700 |
+
|
701 |
+
@register_model
|
702 |
+
def beitv2_base_patch16_224(pretrained=False, **kwargs) -> Beit:
|
703 |
+
model_args = dict(
|
704 |
+
patch_size=16, embed_dim=768, depth=12, num_heads=12, mlp_ratio=4,
|
705 |
+
use_abs_pos_emb=False, use_rel_pos_bias=True, init_values=1e-5)
|
706 |
+
model = _create_beit('beitv2_base_patch16_224', pretrained=pretrained, **dict(model_args, **kwargs))
|
707 |
+
return model
|
708 |
+
|
709 |
+
|
710 |
+
@register_model
|
711 |
+
def beitv2_large_patch16_224(pretrained=False, **kwargs) -> Beit:
|
712 |
+
model_args = dict(
|
713 |
+
patch_size=16, embed_dim=1024, depth=24, num_heads=16,
|
714 |
+
use_abs_pos_emb=False, use_rel_pos_bias=True, init_values=1e-5)
|
715 |
+
model = _create_beit('beitv2_large_patch16_224', pretrained=pretrained, **dict(model_args, **kwargs))
|
716 |
+
return model
|