Add model
Browse files- README.md +141 -0
- config.json +32 -0
- pytorch_model.bin +3 -0
README.md
ADDED
@@ -0,0 +1,141 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
---
|
2 |
+
tags:
|
3 |
+
- image-classification
|
4 |
+
- timm
|
5 |
+
library_tag: timm
|
6 |
+
license: apache-2.0
|
7 |
+
datasets:
|
8 |
+
- imagenet-1k
|
9 |
+
---
|
10 |
+
# Model card for efficientformerv2_l.snap_dist_in1k
|
11 |
+
|
12 |
+
A EfficientFormer-V2 image classification model. Pretrained with distillation on ImageNet-1k.
|
13 |
+
|
14 |
+
## Model Details
|
15 |
+
- **Model Type:** Image classification / feature backbone
|
16 |
+
- **Model Stats:**
|
17 |
+
- Params (M): 26.3
|
18 |
+
- GMACs: 2.6
|
19 |
+
- Activations (M): 18.5
|
20 |
+
- Image size: 224 x 224
|
21 |
+
- **Original:** https://github.com/snap-research/EfficientFormer
|
22 |
+
- **Papers:**
|
23 |
+
- Rethinking Vision Transformers for MobileNet Size and Speed: https://arxiv.org/abs/2212.08059
|
24 |
+
- **Dataset:** ImageNet-1k
|
25 |
+
|
26 |
+
## Model Usage
|
27 |
+
### Image Classification
|
28 |
+
```python
|
29 |
+
from urllib.request import urlopen
|
30 |
+
from PIL import Image
|
31 |
+
import timm
|
32 |
+
|
33 |
+
img = Image.open(
|
34 |
+
urlopen('https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/beignets-task-guide.png'))
|
35 |
+
|
36 |
+
model = timm.create_model('efficientformerv2_l.snap_dist_in1k', pretrained=True)
|
37 |
+
model = model.eval()
|
38 |
+
|
39 |
+
# get model specific transforms (normalization, resize)
|
40 |
+
data_config = timm.data.resolve_model_data_config(model)
|
41 |
+
transforms = timm.data.create_transform(**data_config, is_training=False)
|
42 |
+
|
43 |
+
output = model(transforms(img).unsqueeze(0)) # unsqueeze single image into batch of 1
|
44 |
+
|
45 |
+
top5_probabilities, top5_class_indices = torch.topk(output.softmax(dim=1) * 100, k=5)
|
46 |
+
```
|
47 |
+
|
48 |
+
### Image Embeddings
|
49 |
+
```python
|
50 |
+
from urllib.request import urlopen
|
51 |
+
from PIL import Image
|
52 |
+
import timm
|
53 |
+
|
54 |
+
img = Image.open(
|
55 |
+
urlopen('https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/beignets-task-guide.png'))
|
56 |
+
|
57 |
+
model = timm.create_model(
|
58 |
+
'efficientformerv2_l.snap_dist_in1k',
|
59 |
+
pretrained=True,
|
60 |
+
num_classes=0, # remove classifier nn.Linear
|
61 |
+
)
|
62 |
+
model = model.eval()
|
63 |
+
|
64 |
+
# get model specific transforms (normalization, resize)
|
65 |
+
data_config = timm.data.resolve_model_data_config(model)
|
66 |
+
transforms = timm.data.create_transform(**data_config, is_training=False)
|
67 |
+
|
68 |
+
output = model(transforms(img).unsqueeze(0)) # output is (batch_size, num_features) shaped tensor
|
69 |
+
|
70 |
+
# or equivalently (without needing to set num_classes=0)
|
71 |
+
|
72 |
+
output = model.forward_features(transforms(img).unsqueeze(0))
|
73 |
+
# output is unpooled (ie.e a (batch_size, num_features, H, W) tensor
|
74 |
+
|
75 |
+
output = model.forward_head(output, pre_logits=True)
|
76 |
+
# output is (batch_size, num_features) tensor
|
77 |
+
```
|
78 |
+
|
79 |
+
### Feature Map Extraction
|
80 |
+
```python
|
81 |
+
from urllib.request import urlopen
|
82 |
+
from PIL import Image
|
83 |
+
import timm
|
84 |
+
|
85 |
+
img = Image.open(
|
86 |
+
urlopen('https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/beignets-task-guide.png'))
|
87 |
+
|
88 |
+
model = timm.create_model(
|
89 |
+
'efficientformerv2_l.snap_dist_in1k',
|
90 |
+
pretrained=True,
|
91 |
+
features_only=True,
|
92 |
+
)
|
93 |
+
model = model.eval()
|
94 |
+
|
95 |
+
# get model specific transforms (normalization, resize)
|
96 |
+
data_config = timm.data.resolve_model_data_config(model)
|
97 |
+
transforms = timm.data.create_transform(**data_config, is_training=False)
|
98 |
+
|
99 |
+
output = model(transforms(img).unsqueeze(0)) # unsqueeze single image into batch of 1
|
100 |
+
|
101 |
+
for o in output:
|
102 |
+
# print shape of each feature map in output
|
103 |
+
# e.g. for efficientformerv2_l:
|
104 |
+
# torch.Size([2, 40, 56, 56])
|
105 |
+
# torch.Size([2, 80, 28, 28])
|
106 |
+
# torch.Size([2, 192, 14, 14])
|
107 |
+
# torch.Size([2, 384, 7, 7])
|
108 |
+
print(o.shape)
|
109 |
+
```
|
110 |
+
|
111 |
+
## Model Comparison
|
112 |
+
|model |top1 |top5 |param_count|img_size|
|
113 |
+
|-----------------------------------|------|------|-----------|--------|
|
114 |
+
|efficientformerv2_l.snap_dist_in1k |83.628|96.54 |26.32 |224 |
|
115 |
+
|efficientformer_l7.snap_dist_in1k |83.368|96.534|82.23 |224 |
|
116 |
+
|efficientformer_l3.snap_dist_in1k |82.572|96.24 |31.41 |224 |
|
117 |
+
|efficientformerv2_s2.snap_dist_in1k|82.128|95.902|12.71 |224 |
|
118 |
+
|efficientformer_l1.snap_dist_in1k |80.496|94.984|12.29 |224 |
|
119 |
+
|efficientformerv2_s1.snap_dist_in1k|79.698|94.698|6.19 |224 |
|
120 |
+
|efficientformerv2_s0.snap_dist_in1k|76.026|92.77 |3.6 |224 |
|
121 |
+
|
122 |
+
## Citation
|
123 |
+
```bibtex
|
124 |
+
@article{li2022rethinking,
|
125 |
+
title={Rethinking Vision Transformers for MobileNet Size and Speed},
|
126 |
+
author={Li, Yanyu and Hu, Ju and Wen, Yang and Evangelidis, Georgios and Salahi, Kamyar and Wang, Yanzhi and Tulyakov, Sergey and Ren, Jian},
|
127 |
+
journal={arXiv preprint arXiv:2212.08059},
|
128 |
+
year={2022}
|
129 |
+
}
|
130 |
+
```
|
131 |
+
```bibtex
|
132 |
+
@misc{rw2019timm,
|
133 |
+
author = {Ross Wightman},
|
134 |
+
title = {PyTorch Image Models},
|
135 |
+
year = {2019},
|
136 |
+
publisher = {GitHub},
|
137 |
+
journal = {GitHub repository},
|
138 |
+
doi = {10.5281/zenodo.4414861},
|
139 |
+
howpublished = {\url{https://github.com/rwightman/pytorch-image-models}}
|
140 |
+
}
|
141 |
+
```
|
config.json
ADDED
@@ -0,0 +1,32 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"architecture": "efficientformerv2_l",
|
3 |
+
"num_classes": 1000,
|
4 |
+
"num_features": 384,
|
5 |
+
"pretrained_cfg": {
|
6 |
+
"tag": "snap_dist_in1k",
|
7 |
+
"custom_load": false,
|
8 |
+
"input_size": [
|
9 |
+
3,
|
10 |
+
224,
|
11 |
+
224
|
12 |
+
],
|
13 |
+
"fixed_input_size": true,
|
14 |
+
"interpolation": "bicubic",
|
15 |
+
"crop_pct": 0.95,
|
16 |
+
"crop_mode": "center",
|
17 |
+
"mean": [
|
18 |
+
0.485,
|
19 |
+
0.456,
|
20 |
+
0.406
|
21 |
+
],
|
22 |
+
"std": [
|
23 |
+
0.229,
|
24 |
+
0.224,
|
25 |
+
0.225
|
26 |
+
],
|
27 |
+
"num_classes": 1000,
|
28 |
+
"pool_size": null,
|
29 |
+
"first_conv": null,
|
30 |
+
"classifier": "head"
|
31 |
+
}
|
32 |
+
}
|
pytorch_model.bin
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:0226f247bcca663154d5baeef57d8f3b0d2fe72a7cf0b4d30042090746e5959d
|
3 |
+
size 106571127
|