Commit
·
257c098
1
Parent(s):
85a7f9b
Upload model
Browse files- config.json +26 -0
- configuration_resnet.py +79 -0
- modeling_resnet.py +75 -0
- pytorch_model.bin +3 -0
config.json
ADDED
@@ -0,0 +1,26 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"architectures": [
|
3 |
+
"ResnetModelForImageClassification"
|
4 |
+
],
|
5 |
+
"auto_map": {
|
6 |
+
"AutoConfig": "configuration_resnet.ResnetConfig",
|
7 |
+
"AutoModelForImageClassification": "modeling_resnet.ResnetModelForImageClassification"
|
8 |
+
},
|
9 |
+
"avg_down": true,
|
10 |
+
"base_width": 64,
|
11 |
+
"block_type": "bottleneck",
|
12 |
+
"cardinality": 1,
|
13 |
+
"input_channels": 3,
|
14 |
+
"layers": [
|
15 |
+
3,
|
16 |
+
4,
|
17 |
+
6,
|
18 |
+
3
|
19 |
+
],
|
20 |
+
"model_type": "resnet",
|
21 |
+
"num_classes": 1000,
|
22 |
+
"stem_type": "deep",
|
23 |
+
"stem_width": 32,
|
24 |
+
"torch_dtype": "float32",
|
25 |
+
"transformers_version": "4.26.1"
|
26 |
+
}
|
configuration_resnet.py
ADDED
@@ -0,0 +1,79 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from typing import List
|
2 |
+
|
3 |
+
from transformers import PretrainedConfig
|
4 |
+
|
5 |
+
"""
|
6 |
+
The configuration of a model is an object that
|
7 |
+
will contain all the necessary information to build the model.
|
8 |
+
|
9 |
+
The three important things to remember when writing you own configuration are the following:
|
10 |
+
|
11 |
+
- you have to inherit from PretrainedConfig,
|
12 |
+
- the __init__ of your PretrainedConfig must accept any kwargs,
|
13 |
+
- those kwargs need to be passed to the superclass __init__.
|
14 |
+
"""
|
15 |
+
|
16 |
+
|
17 |
+
class ResnetConfig(PretrainedConfig):
|
18 |
+
|
19 |
+
"""
|
20 |
+
Defining a model_type for your configuration (here model_type="resnet") is not mandatory,
|
21 |
+
unless you want to register your model with the auto classes (see last section)."""
|
22 |
+
|
23 |
+
model_type = "resnet"
|
24 |
+
|
25 |
+
def __init__(
|
26 |
+
self,
|
27 |
+
block_type="bottleneck",
|
28 |
+
layers: List[int] = [3, 4, 6, 3],
|
29 |
+
num_classes: int = 1000,
|
30 |
+
input_channels: int = 3,
|
31 |
+
cardinality: int = 1,
|
32 |
+
base_width: int = 64,
|
33 |
+
stem_width: int = 64,
|
34 |
+
stem_type: str = "",
|
35 |
+
avg_down: bool = False,
|
36 |
+
**kwargs,
|
37 |
+
):
|
38 |
+
if block_type not in ["basic", "bottleneck"]:
|
39 |
+
raise ValueError(
|
40 |
+
f"`block_type` must be 'basic' or bottleneck', got {block_type}."
|
41 |
+
)
|
42 |
+
if stem_type not in ["", "deep", "deep-tiered"]:
|
43 |
+
raise ValueError(
|
44 |
+
f"`stem_type` must be '', 'deep' or 'deep-tiered', got {stem_type}."
|
45 |
+
)
|
46 |
+
|
47 |
+
self.block_type = block_type
|
48 |
+
self.layers = layers
|
49 |
+
self.num_classes = num_classes
|
50 |
+
self.input_channels = input_channels
|
51 |
+
self.cardinality = cardinality
|
52 |
+
self.base_width = base_width
|
53 |
+
self.stem_width = stem_width
|
54 |
+
self.stem_type = stem_type
|
55 |
+
self.avg_down = avg_down
|
56 |
+
super().__init__(**kwargs)
|
57 |
+
|
58 |
+
|
59 |
+
if __name__ == "__main__":
|
60 |
+
"""
|
61 |
+
With this done, you can easily create and save your configuration like
|
62 |
+
you would do with any other model config of the library.
|
63 |
+
Here is how we can create a resnet50d config and save it:
|
64 |
+
"""
|
65 |
+
resnet50d_config = ResnetConfig(
|
66 |
+
block_type="bottleneck", stem_width=32, stem_type="deep", avg_down=True
|
67 |
+
)
|
68 |
+
resnet50d_config.save_pretrained("custom-resnet")
|
69 |
+
|
70 |
+
"""
|
71 |
+
This will save a file named config.json inside the folder custom-resnet.
|
72 |
+
You can then reload your config with the from_pretrained method:
|
73 |
+
"""
|
74 |
+
resnet50d_config = ResnetConfig.from_pretrained("custom-resnet")
|
75 |
+
|
76 |
+
"""
|
77 |
+
You can also use any other method of the PretrainedConfig class,
|
78 |
+
like push_to_hub() to directly upload your config to the Hub.
|
79 |
+
"""
|
modeling_resnet.py
ADDED
@@ -0,0 +1,75 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from typing import Dict
|
2 |
+
|
3 |
+
import timm
|
4 |
+
from timm.models.resnet import BasicBlock, Bottleneck, ResNet
|
5 |
+
from torch import Tensor, nn
|
6 |
+
from transformers import PreTrainedModel
|
7 |
+
|
8 |
+
from .configuration_resnet import ResnetConfig
|
9 |
+
|
10 |
+
BLOCK_MAPPING = {"basic": BasicBlock, "bottleneck": Bottleneck}
|
11 |
+
|
12 |
+
|
13 |
+
class ResnetModel(PreTrainedModel):
|
14 |
+
"""
|
15 |
+
The line that sets the config_class is not mandatory,
|
16 |
+
unless you want to register your model with the auto classes
|
17 |
+
"""
|
18 |
+
|
19 |
+
config_class = ResnetConfig
|
20 |
+
|
21 |
+
def __init__(self, config: ResnetConfig):
|
22 |
+
super().__init__(config)
|
23 |
+
block_layer = BLOCK_MAPPING[config.block_type]
|
24 |
+
self.model = ResNet(
|
25 |
+
block_layer,
|
26 |
+
config.layers,
|
27 |
+
num_classes=config.num_classes,
|
28 |
+
in_chans=config.input_channels,
|
29 |
+
cardinality=config.cardinality,
|
30 |
+
base_width=config.base_width,
|
31 |
+
stem_width=config.stem_width,
|
32 |
+
stem_type=config.stem_type,
|
33 |
+
avg_down=config.avg_down,
|
34 |
+
)
|
35 |
+
|
36 |
+
def forward(self, tensor: Tensor) -> Tensor:
|
37 |
+
return self.model.forward_features(tensor)
|
38 |
+
|
39 |
+
|
40 |
+
class ResnetModelForImageClassification(PreTrainedModel):
|
41 |
+
"""
|
42 |
+
The line that sets the config_class is not mandatory,
|
43 |
+
unless you want to register your model with the auto classes
|
44 |
+
"""
|
45 |
+
|
46 |
+
config_class = ResnetConfig
|
47 |
+
|
48 |
+
def __init__(self, config: ResnetConfig):
|
49 |
+
super().__init__(config)
|
50 |
+
self.model = ResnetModel(config)
|
51 |
+
|
52 |
+
"""
|
53 |
+
You can have your model return anything you want,
|
54 |
+
but returning a dictionary like we did for ResnetModelForImageClassification,
|
55 |
+
with the loss included when labels are passed,
|
56 |
+
will make your model directly usable inside the Trainer class.
|
57 |
+
Using another output format is fine as long as you are planning on
|
58 |
+
using your own training loop or another library for training.
|
59 |
+
"""
|
60 |
+
|
61 |
+
def forward(self, tensor: Tensor, labels=None) -> Dict[str, Tensor]:
|
62 |
+
logits = self.model(tensor)
|
63 |
+
if labels is not None:
|
64 |
+
loss = nn.cross_entropy(logits, labels)
|
65 |
+
return {"loss": loss, "logits": logits}
|
66 |
+
return {"logits": logits}
|
67 |
+
|
68 |
+
|
69 |
+
if __name__ == "__main__":
|
70 |
+
resnet50d_config = ResnetConfig.from_pretrained("custom-resnet")
|
71 |
+
resnet50d = ResnetModelForImageClassification(resnet50d_config)
|
72 |
+
|
73 |
+
# Load pretrained weights from timm
|
74 |
+
pretrained_model: nn.Module = timm.create_model("resnet50d", pretrained=True)
|
75 |
+
resnet50d.model.load_state_dict(pretrained_model.state_dict())
|
pytorch_model.bin
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:c2e8b04c4def7ce6efa549445f1e9c07235aea7042a62cb213538b9eb3aae3f9
|
3 |
+
size 102622141
|