meg HF staff commited on
Commit
abee7a4
1 Parent(s): 847619e

Add files using upload-large-folder tool

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. pytorch-image-models/hfdocs/source/models/ensemble-adversarial.mdx +165 -0
  2. pytorch-image-models/hfdocs/source/models/ese-vovnet.mdx +159 -0
  3. pytorch-image-models/hfdocs/source/models/gloun-resnext.mdx +209 -0
  4. pytorch-image-models/hfdocs/source/models/gloun-seresnext.mdx +203 -0
  5. pytorch-image-models/hfdocs/source/models/gloun-xception.mdx +133 -0
  6. pytorch-image-models/hfdocs/source/models/hrnet.mdx +425 -0
  7. pytorch-image-models/hfdocs/source/models/ig-resnext.mdx +276 -0
  8. pytorch-image-models/hfdocs/source/models/inception-resnet-v2.mdx +139 -0
  9. pytorch-image-models/hfdocs/source/models/inception-v3.mdx +152 -0
  10. pytorch-image-models/hfdocs/source/models/inception-v4.mdx +138 -0
  11. pytorch-image-models/hfdocs/source/models/legacy-se-resnet.mdx +324 -0
  12. pytorch-image-models/hfdocs/source/models/legacy-se-resnext.mdx +234 -0
  13. pytorch-image-models/hfdocs/source/models/legacy-senet.mdx +141 -0
  14. pytorch-image-models/hfdocs/source/models/mixnet.mdx +231 -0
  15. pytorch-image-models/hfdocs/source/models/mnasnet.mdx +176 -0
  16. pytorch-image-models/hfdocs/source/models/mobilenet-v2.mdx +277 -0
  17. pytorch-image-models/hfdocs/source/models/mobilenet-v3.mdx +205 -0
  18. pytorch-image-models/hfdocs/source/models/nasnet.mdx +137 -0
  19. pytorch-image-models/hfdocs/source/models/noisy-student.mdx +577 -0
  20. pytorch-image-models/hfdocs/source/models/pnasnet.mdx +138 -0
  21. pytorch-image-models/hfdocs/source/models/regnetx.mdx +559 -0
  22. pytorch-image-models/hfdocs/source/models/regnety.mdx +573 -0
  23. pytorch-image-models/hfdocs/source/models/res2net.mdx +327 -0
  24. pytorch-image-models/hfdocs/source/models/res2next.mdx +142 -0
  25. pytorch-image-models/hfdocs/source/models/resnet.mdx +445 -0
  26. pytorch-image-models/hfdocs/source/models/resnext.mdx +250 -0
  27. pytorch-image-models/hfdocs/source/models/se-resnet.mdx +189 -0
  28. pytorch-image-models/hfdocs/source/models/selecsls.mdx +203 -0
  29. pytorch-image-models/hfdocs/source/models/skresnet.mdx +179 -0
  30. pytorch-image-models/hfdocs/source/models/skresnext.mdx +137 -0
  31. pytorch-image-models/hfdocs/source/models/spnasnet.mdx +129 -0
  32. pytorch-image-models/hfdocs/source/models/ssl-resnet.mdx +198 -0
  33. pytorch-image-models/hfdocs/source/models/swsl-resnet.mdx +198 -0
  34. pytorch-image-models/hfdocs/source/models/swsl-resnext.mdx +284 -0
  35. pytorch-image-models/hfdocs/source/models/tf-efficientnet-lite.mdx +262 -0
  36. pytorch-image-models/hfdocs/source/models/tf-efficientnet.mdx +669 -0
  37. pytorch-image-models/hfdocs/source/models/tf-mixnet.mdx +200 -0
  38. pytorch-image-models/hfdocs/source/models/tf-mobilenet-v3.mdx +387 -0
  39. pytorch-image-models/hfdocs/source/models/tresnet.mdx +358 -0
  40. pytorch-image-models/hfdocs/source/models/xception.mdx +230 -0
  41. pytorch-image-models/hfdocs/source/reference/data.mdx +9 -0
  42. pytorch-image-models/hfdocs/source/reference/optimizers.mdx +33 -0
  43. pytorch-image-models/hfdocs/source/reference/schedulers.mdx +19 -0
  44. pytorch-image-models/results/benchmark-infer-amp-nchw-pt113-cu117-rtx3090.csv +933 -0
  45. pytorch-image-models/results/benchmark-infer-amp-nchw-pt210-cu121-rtx3090.csv +1294 -0
  46. pytorch-image-models/results/benchmark-infer-amp-nchw-pt240-cu124-rtx3090.csv +1444 -0
  47. pytorch-image-models/results/benchmark-infer-amp-nchw-pt240-cu124-rtx4090-dynamo.csv +1444 -0
  48. pytorch-image-models/results/benchmark-infer-amp-nchw-pt240-cu124-rtx4090.csv +1445 -0
  49. pytorch-image-models/results/benchmark-infer-amp-nhwc-pt113-cu117-rtx3090.csv +930 -0
  50. pytorch-image-models/results/benchmark-infer-amp-nhwc-pt210-cu121-rtx3090.csv +1205 -0
pytorch-image-models/hfdocs/source/models/ensemble-adversarial.mdx ADDED
@@ -0,0 +1,165 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # # Ensemble Adversarial Inception ResNet v2
2
+
3
+ **Inception-ResNet-v2** is a convolutional neural architecture that builds on the Inception family of architectures but incorporates [residual connections](https://paperswithcode.com/method/residual-connection) (replacing the filter concatenation stage of the Inception architecture).
4
+
5
+ This particular model was trained for study of adversarial examples (adversarial training).
6
+
7
+ The weights from this model were ported from [Tensorflow/Models](https://github.com/tensorflow/models).
8
+
9
+ ## How do I use this model on an image?
10
+
11
+ To load a pretrained model:
12
+
13
+ ```py
14
+ >>> import timm
15
+ >>> model = timm.create_model('ens_adv_inception_resnet_v2', pretrained=True)
16
+ >>> model.eval()
17
+ ```
18
+
19
+ To load and preprocess the image:
20
+
21
+ ```py
22
+ >>> import urllib
23
+ >>> from PIL import Image
24
+ >>> from timm.data import resolve_data_config
25
+ >>> from timm.data.transforms_factory import create_transform
26
+
27
+ >>> config = resolve_data_config({}, model=model)
28
+ >>> transform = create_transform(**config)
29
+
30
+ >>> url, filename = ("https://github.com/pytorch/hub/raw/master/images/dog.jpg", "dog.jpg")
31
+ >>> urllib.request.urlretrieve(url, filename)
32
+ >>> img = Image.open(filename).convert('RGB')
33
+ >>> tensor = transform(img).unsqueeze(0) # transform and add batch dimension
34
+ ```
35
+
36
+ To get the model predictions:
37
+
38
+ ```py
39
+ >>> import torch
40
+ >>> with torch.no_grad():
41
+ ... out = model(tensor)
42
+ >>> probabilities = torch.nn.functional.softmax(out[0], dim=0)
43
+ >>> print(probabilities.shape)
44
+ >>> # prints: torch.Size([1000])
45
+ ```
46
+
47
+ To get the top-5 predictions class names:
48
+
49
+ ```py
50
+ >>> # Get imagenet class mappings
51
+ >>> url, filename = ("https://raw.githubusercontent.com/pytorch/hub/master/imagenet_classes.txt", "imagenet_classes.txt")
52
+ >>> urllib.request.urlretrieve(url, filename)
53
+ >>> with open("imagenet_classes.txt", "r") as f:
54
+ ... categories = [s.strip() for s in f.readlines()]
55
+
56
+ >>> # Print top categories per image
57
+ >>> top5_prob, top5_catid = torch.topk(probabilities, 5)
58
+ >>> for i in range(top5_prob.size(0)):
59
+ ... print(categories[top5_catid[i]], top5_prob[i].item())
60
+ >>> # prints class names and probabilities like:
61
+ >>> # [('Samoyed', 0.6425196528434753), ('Pomeranian', 0.04062102362513542), ('keeshond', 0.03186424449086189), ('white wolf', 0.01739676296710968), ('Eskimo dog', 0.011717947199940681)]
62
+ ```
63
+
64
+ Replace the model name with the variant you want to use, e.g. `ens_adv_inception_resnet_v2`. You can find the IDs in the model summaries at the top of this page.
65
+
66
+ To extract image features with this model, follow the [timm feature extraction examples](../feature_extraction), just change the name of the model you want to use.
67
+
68
+ ## How do I finetune this model?
69
+
70
+ You can finetune any of the pre-trained models just by changing the classifier (the last layer).
71
+
72
+ ```py
73
+ >>> model = timm.create_model('ens_adv_inception_resnet_v2', pretrained=True, num_classes=NUM_FINETUNE_CLASSES)
74
+ ```
75
+ To finetune on your own dataset, you have to write a training loop or adapt [timm's training
76
+ script](https://github.com/rwightman/pytorch-image-models/blob/master/train.py) to use your dataset.
77
+
78
+ ## How do I train this model?
79
+
80
+ You can follow the [timm recipe scripts](../scripts) for training a new model afresh.
81
+
82
+ ## Citation
83
+
84
+ ```BibTeX
85
+ @article{DBLP:journals/corr/abs-1804-00097,
86
+ author = {Alexey Kurakin and
87
+ Ian J. Goodfellow and
88
+ Samy Bengio and
89
+ Yinpeng Dong and
90
+ Fangzhou Liao and
91
+ Ming Liang and
92
+ Tianyu Pang and
93
+ Jun Zhu and
94
+ Xiaolin Hu and
95
+ Cihang Xie and
96
+ Jianyu Wang and
97
+ Zhishuai Zhang and
98
+ Zhou Ren and
99
+ Alan L. Yuille and
100
+ Sangxia Huang and
101
+ Yao Zhao and
102
+ Yuzhe Zhao and
103
+ Zhonglin Han and
104
+ Junjiajia Long and
105
+ Yerkebulan Berdibekov and
106
+ Takuya Akiba and
107
+ Seiya Tokui and
108
+ Motoki Abe},
109
+ title = {Adversarial Attacks and Defences Competition},
110
+ journal = {CoRR},
111
+ volume = {abs/1804.00097},
112
+ year = {2018},
113
+ url = {http://arxiv.org/abs/1804.00097},
114
+ archivePrefix = {arXiv},
115
+ eprint = {1804.00097},
116
+ timestamp = {Thu, 31 Oct 2019 16:31:22 +0100},
117
+ biburl = {https://dblp.org/rec/journals/corr/abs-1804-00097.bib},
118
+ bibsource = {dblp computer science bibliography, https://dblp.org}
119
+ }
120
+ ```
121
+
122
+ <!--
123
+ Type: model-index
124
+ Collections:
125
+ - Name: Ensemble Adversarial
126
+ Paper:
127
+ Title: Adversarial Attacks and Defences Competition
128
+ URL: https://paperswithcode.com/paper/adversarial-attacks-and-defences-competition
129
+ Models:
130
+ - Name: ens_adv_inception_resnet_v2
131
+ In Collection: Ensemble Adversarial
132
+ Metadata:
133
+ FLOPs: 16959133120
134
+ Parameters: 55850000
135
+ File Size: 223774238
136
+ Architecture:
137
+ - 1x1 Convolution
138
+ - Auxiliary Classifier
139
+ - Average Pooling
140
+ - Average Pooling
141
+ - Batch Normalization
142
+ - Convolution
143
+ - Dense Connections
144
+ - Dropout
145
+ - Inception-v3 Module
146
+ - Max Pooling
147
+ - ReLU
148
+ - Softmax
149
+ Tasks:
150
+ - Image Classification
151
+ Training Data:
152
+ - ImageNet
153
+ ID: ens_adv_inception_resnet_v2
154
+ Crop Pct: '0.897'
155
+ Image Size: '299'
156
+ Interpolation: bicubic
157
+ Code: https://github.com/rwightman/pytorch-image-models/blob/d8e69206be253892b2956341fea09fdebfaae4e3/timm/models/inception_resnet_v2.py#L351
158
+ Weights: https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/ens_adv_inception_resnet_v2-2592a550.pth
159
+ Results:
160
+ - Task: Image Classification
161
+ Dataset: ImageNet
162
+ Metrics:
163
+ Top 1 Accuracy: 1.0%
164
+ Top 5 Accuracy: 17.32%
165
+ -->
pytorch-image-models/hfdocs/source/models/ese-vovnet.mdx ADDED
@@ -0,0 +1,159 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # ESE-VoVNet
2
+
3
+ **VoVNet** is a convolutional neural network that seeks to make [DenseNet](https://paperswithcode.com/method/densenet) more efficient by concatenating all features only once in the last feature map, which makes input size constant and enables enlarging new output channel.
4
+
5
+ Read about [one-shot aggregation here](https://paperswithcode.com/method/one-shot-aggregation).
6
+
7
+ ## How do I use this model on an image?
8
+
9
+ To load a pretrained model:
10
+
11
+ ```py
12
+ >>> import timm
13
+ >>> model = timm.create_model('ese_vovnet19b_dw', pretrained=True)
14
+ >>> model.eval()
15
+ ```
16
+
17
+ To load and preprocess the image:
18
+
19
+ ```py
20
+ >>> import urllib
21
+ >>> from PIL import Image
22
+ >>> from timm.data import resolve_data_config
23
+ >>> from timm.data.transforms_factory import create_transform
24
+
25
+ >>> config = resolve_data_config({}, model=model)
26
+ >>> transform = create_transform(**config)
27
+
28
+ >>> url, filename = ("https://github.com/pytorch/hub/raw/master/images/dog.jpg", "dog.jpg")
29
+ >>> urllib.request.urlretrieve(url, filename)
30
+ >>> img = Image.open(filename).convert('RGB')
31
+ >>> tensor = transform(img).unsqueeze(0) # transform and add batch dimension
32
+ ```
33
+
34
+ To get the model predictions:
35
+
36
+ ```py
37
+ >>> import torch
38
+ >>> with torch.no_grad():
39
+ ... out = model(tensor)
40
+ >>> probabilities = torch.nn.functional.softmax(out[0], dim=0)
41
+ >>> print(probabilities.shape)
42
+ >>> # prints: torch.Size([1000])
43
+ ```
44
+
45
+ To get the top-5 predictions class names:
46
+
47
+ ```py
48
+ >>> # Get imagenet class mappings
49
+ >>> url, filename = ("https://raw.githubusercontent.com/pytorch/hub/master/imagenet_classes.txt", "imagenet_classes.txt")
50
+ >>> urllib.request.urlretrieve(url, filename)
51
+ >>> with open("imagenet_classes.txt", "r") as f:
52
+ ... categories = [s.strip() for s in f.readlines()]
53
+
54
+ >>> # Print top categories per image
55
+ >>> top5_prob, top5_catid = torch.topk(probabilities, 5)
56
+ >>> for i in range(top5_prob.size(0)):
57
+ ... print(categories[top5_catid[i]], top5_prob[i].item())
58
+ >>> # prints class names and probabilities like:
59
+ >>> # [('Samoyed', 0.6425196528434753), ('Pomeranian', 0.04062102362513542), ('keeshond', 0.03186424449086189), ('white wolf', 0.01739676296710968), ('Eskimo dog', 0.011717947199940681)]
60
+ ```
61
+
62
+ Replace the model name with the variant you want to use, e.g. `ese_vovnet19b_dw`. You can find the IDs in the model summaries at the top of this page.
63
+
64
+ To extract image features with this model, follow the [timm feature extraction examples](../feature_extraction), just change the name of the model you want to use.
65
+
66
+ ## How do I finetune this model?
67
+
68
+ You can finetune any of the pre-trained models just by changing the classifier (the last layer).
69
+
70
+ ```py
71
+ >>> model = timm.create_model('ese_vovnet19b_dw', pretrained=True, num_classes=NUM_FINETUNE_CLASSES)
72
+ ```
73
+ To finetune on your own dataset, you have to write a training loop or adapt [timm's training
74
+ script](https://github.com/rwightman/pytorch-image-models/blob/master/train.py) to use your dataset.
75
+
76
+ ## How do I train this model?
77
+
78
+ You can follow the [timm recipe scripts](../scripts) for training a new model afresh.
79
+
80
+ ## Citation
81
+
82
+ ```BibTeX
83
+ @misc{lee2019energy,
84
+ title={An Energy and GPU-Computation Efficient Backbone Network for Real-Time Object Detection},
85
+ author={Youngwan Lee and Joong-won Hwang and Sangrok Lee and Yuseok Bae and Jongyoul Park},
86
+ year={2019},
87
+ eprint={1904.09730},
88
+ archivePrefix={arXiv},
89
+ primaryClass={cs.CV}
90
+ }
91
+ ```
92
+
93
+ <!--
94
+ Type: model-index
95
+ Collections:
96
+ - Name: ESE VovNet
97
+ Paper:
98
+ Title: 'CenterMask : Real-Time Anchor-Free Instance Segmentation'
99
+ URL: https://paperswithcode.com/paper/centermask-real-time-anchor-free-instance-1
100
+ Models:
101
+ - Name: ese_vovnet19b_dw
102
+ In Collection: ESE VovNet
103
+ Metadata:
104
+ FLOPs: 1711959904
105
+ Parameters: 6540000
106
+ File Size: 26243175
107
+ Architecture:
108
+ - Batch Normalization
109
+ - Convolution
110
+ - Max Pooling
111
+ - One-Shot Aggregation
112
+ - ReLU
113
+ Tasks:
114
+ - Image Classification
115
+ Training Data:
116
+ - ImageNet
117
+ ID: ese_vovnet19b_dw
118
+ Layers: 19
119
+ Crop Pct: '0.875'
120
+ Image Size: '224'
121
+ Interpolation: bicubic
122
+ Code: https://github.com/rwightman/pytorch-image-models/blob/d8e69206be253892b2956341fea09fdebfaae4e3/timm/models/vovnet.py#L361
123
+ Weights: https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/ese_vovnet19b_dw-a8741004.pth
124
+ Results:
125
+ - Task: Image Classification
126
+ Dataset: ImageNet
127
+ Metrics:
128
+ Top 1 Accuracy: 76.82%
129
+ Top 5 Accuracy: 93.28%
130
+ - Name: ese_vovnet39b
131
+ In Collection: ESE VovNet
132
+ Metadata:
133
+ FLOPs: 9089259008
134
+ Parameters: 24570000
135
+ File Size: 98397138
136
+ Architecture:
137
+ - Batch Normalization
138
+ - Convolution
139
+ - Max Pooling
140
+ - One-Shot Aggregation
141
+ - ReLU
142
+ Tasks:
143
+ - Image Classification
144
+ Training Data:
145
+ - ImageNet
146
+ ID: ese_vovnet39b
147
+ Layers: 39
148
+ Crop Pct: '0.875'
149
+ Image Size: '224'
150
+ Interpolation: bicubic
151
+ Code: https://github.com/rwightman/pytorch-image-models/blob/d8e69206be253892b2956341fea09fdebfaae4e3/timm/models/vovnet.py#L371
152
+ Weights: https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/ese_vovnet39b-f912fe73.pth
153
+ Results:
154
+ - Task: Image Classification
155
+ Dataset: ImageNet
156
+ Metrics:
157
+ Top 1 Accuracy: 79.31%
158
+ Top 5 Accuracy: 94.72%
159
+ -->
pytorch-image-models/hfdocs/source/models/gloun-resnext.mdx ADDED
@@ -0,0 +1,209 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # (Gluon) ResNeXt
2
+
3
+ A **ResNeXt** repeats a [building block](https://paperswithcode.com/method/resnext-block) that aggregates a set of transformations with the same topology. Compared to a [ResNet](https://paperswithcode.com/method/resnet), it exposes a new dimension, *cardinality* (the size of the set of transformations) \\( C \\), as an essential factor in addition to the dimensions of depth and width.
4
+
5
+ The weights from this model were ported from [Gluon](https://cv.gluon.ai/model_zoo/classification.html).
6
+
7
+ ## How do I use this model on an image?
8
+
9
+ To load a pretrained model:
10
+
11
+ ```py
12
+ >>> import timm
13
+ >>> model = timm.create_model('gluon_resnext101_32x4d', pretrained=True)
14
+ >>> model.eval()
15
+ ```
16
+
17
+ To load and preprocess the image:
18
+
19
+ ```py
20
+ >>> import urllib
21
+ >>> from PIL import Image
22
+ >>> from timm.data import resolve_data_config
23
+ >>> from timm.data.transforms_factory import create_transform
24
+
25
+ >>> config = resolve_data_config({}, model=model)
26
+ >>> transform = create_transform(**config)
27
+
28
+ >>> url, filename = ("https://github.com/pytorch/hub/raw/master/images/dog.jpg", "dog.jpg")
29
+ >>> urllib.request.urlretrieve(url, filename)
30
+ >>> img = Image.open(filename).convert('RGB')
31
+ >>> tensor = transform(img).unsqueeze(0) # transform and add batch dimension
32
+ ```
33
+
34
+ To get the model predictions:
35
+
36
+ ```py
37
+ >>> import torch
38
+ >>> with torch.no_grad():
39
+ ... out = model(tensor)
40
+ >>> probabilities = torch.nn.functional.softmax(out[0], dim=0)
41
+ >>> print(probabilities.shape)
42
+ >>> # prints: torch.Size([1000])
43
+ ```
44
+
45
+ To get the top-5 predictions class names:
46
+
47
+ ```py
48
+ >>> # Get imagenet class mappings
49
+ >>> url, filename = ("https://raw.githubusercontent.com/pytorch/hub/master/imagenet_classes.txt", "imagenet_classes.txt")
50
+ >>> urllib.request.urlretrieve(url, filename)
51
+ >>> with open("imagenet_classes.txt", "r") as f:
52
+ ... categories = [s.strip() for s in f.readlines()]
53
+
54
+ >>> # Print top categories per image
55
+ >>> top5_prob, top5_catid = torch.topk(probabilities, 5)
56
+ >>> for i in range(top5_prob.size(0)):
57
+ ... print(categories[top5_catid[i]], top5_prob[i].item())
58
+ >>> # prints class names and probabilities like:
59
+ >>> # [('Samoyed', 0.6425196528434753), ('Pomeranian', 0.04062102362513542), ('keeshond', 0.03186424449086189), ('white wolf', 0.01739676296710968), ('Eskimo dog', 0.011717947199940681)]
60
+ ```
61
+
62
+ Replace the model name with the variant you want to use, e.g. `gluon_resnext101_32x4d`. You can find the IDs in the model summaries at the top of this page.
63
+
64
+ To extract image features with this model, follow the [timm feature extraction examples](../feature_extraction), just change the name of the model you want to use.
65
+
66
+ ## How do I finetune this model?
67
+
68
+ You can finetune any of the pre-trained models just by changing the classifier (the last layer).
69
+
70
+ ```py
71
+ >>> model = timm.create_model('gluon_resnext101_32x4d', pretrained=True, num_classes=NUM_FINETUNE_CLASSES)
72
+ ```
73
+ To finetune on your own dataset, you have to write a training loop or adapt [timm's training
74
+ script](https://github.com/rwightman/pytorch-image-models/blob/master/train.py) to use your dataset.
75
+
76
+ ## How do I train this model?
77
+
78
+ You can follow the [timm recipe scripts](../scripts) for training a new model afresh.
79
+
80
+ ## Citation
81
+
82
+ ```BibTeX
83
+ @article{DBLP:journals/corr/XieGDTH16,
84
+ author = {Saining Xie and
85
+ Ross B. Girshick and
86
+ Piotr Doll{\'{a}}r and
87
+ Zhuowen Tu and
88
+ Kaiming He},
89
+ title = {Aggregated Residual Transformations for Deep Neural Networks},
90
+ journal = {CoRR},
91
+ volume = {abs/1611.05431},
92
+ year = {2016},
93
+ url = {http://arxiv.org/abs/1611.05431},
94
+ archivePrefix = {arXiv},
95
+ eprint = {1611.05431},
96
+ timestamp = {Mon, 13 Aug 2018 16:45:58 +0200},
97
+ biburl = {https://dblp.org/rec/journals/corr/XieGDTH16.bib},
98
+ bibsource = {dblp computer science bibliography, https://dblp.org}
99
+ }
100
+ ```
101
+
102
+ <!--
103
+ Type: model-index
104
+ Collections:
105
+ - Name: Gloun ResNeXt
106
+ Paper:
107
+ Title: Aggregated Residual Transformations for Deep Neural Networks
108
+ URL: https://paperswithcode.com/paper/aggregated-residual-transformations-for-deep
109
+ Models:
110
+ - Name: gluon_resnext101_32x4d
111
+ In Collection: Gloun ResNeXt
112
+ Metadata:
113
+ FLOPs: 10298145792
114
+ Parameters: 44180000
115
+ File Size: 177367414
116
+ Architecture:
117
+ - 1x1 Convolution
118
+ - Batch Normalization
119
+ - Convolution
120
+ - Global Average Pooling
121
+ - Grouped Convolution
122
+ - Max Pooling
123
+ - ReLU
124
+ - ResNeXt Block
125
+ - Residual Connection
126
+ - Softmax
127
+ Tasks:
128
+ - Image Classification
129
+ Training Data:
130
+ - ImageNet
131
+ ID: gluon_resnext101_32x4d
132
+ Crop Pct: '0.875'
133
+ Image Size: '224'
134
+ Interpolation: bicubic
135
+ Code: https://github.com/rwightman/pytorch-image-models/blob/d8e69206be253892b2956341fea09fdebfaae4e3/timm/models/gluon_resnet.py#L193
136
+ Weights: https://github.com/rwightman/pytorch-pretrained-gluonresnet/releases/download/v0.1/gluon_resnext101_32x4d-b253c8c4.pth
137
+ Results:
138
+ - Task: Image Classification
139
+ Dataset: ImageNet
140
+ Metrics:
141
+ Top 1 Accuracy: 80.33%
142
+ Top 5 Accuracy: 94.91%
143
+ - Name: gluon_resnext101_64x4d
144
+ In Collection: Gloun ResNeXt
145
+ Metadata:
146
+ FLOPs: 19954172928
147
+ Parameters: 83460000
148
+ File Size: 334737852
149
+ Architecture:
150
+ - 1x1 Convolution
151
+ - Batch Normalization
152
+ - Convolution
153
+ - Global Average Pooling
154
+ - Grouped Convolution
155
+ - Max Pooling
156
+ - ReLU
157
+ - ResNeXt Block
158
+ - Residual Connection
159
+ - Softmax
160
+ Tasks:
161
+ - Image Classification
162
+ Training Data:
163
+ - ImageNet
164
+ ID: gluon_resnext101_64x4d
165
+ Crop Pct: '0.875'
166
+ Image Size: '224'
167
+ Interpolation: bicubic
168
+ Code: https://github.com/rwightman/pytorch-image-models/blob/d8e69206be253892b2956341fea09fdebfaae4e3/timm/models/gluon_resnet.py#L201
169
+ Weights: https://github.com/rwightman/pytorch-pretrained-gluonresnet/releases/download/v0.1/gluon_resnext101_64x4d-f9a8e184.pth
170
+ Results:
171
+ - Task: Image Classification
172
+ Dataset: ImageNet
173
+ Metrics:
174
+ Top 1 Accuracy: 80.63%
175
+ Top 5 Accuracy: 95.0%
176
+ - Name: gluon_resnext50_32x4d
177
+ In Collection: Gloun ResNeXt
178
+ Metadata:
179
+ FLOPs: 5472648192
180
+ Parameters: 25030000
181
+ File Size: 100441719
182
+ Architecture:
183
+ - 1x1 Convolution
184
+ - Batch Normalization
185
+ - Convolution
186
+ - Global Average Pooling
187
+ - Grouped Convolution
188
+ - Max Pooling
189
+ - ReLU
190
+ - ResNeXt Block
191
+ - Residual Connection
192
+ - Softmax
193
+ Tasks:
194
+ - Image Classification
195
+ Training Data:
196
+ - ImageNet
197
+ ID: gluon_resnext50_32x4d
198
+ Crop Pct: '0.875'
199
+ Image Size: '224'
200
+ Interpolation: bicubic
201
+ Code: https://github.com/rwightman/pytorch-image-models/blob/d8e69206be253892b2956341fea09fdebfaae4e3/timm/models/gluon_resnet.py#L185
202
+ Weights: https://github.com/rwightman/pytorch-pretrained-gluonresnet/releases/download/v0.1/gluon_resnext50_32x4d-e6a097c1.pth
203
+ Results:
204
+ - Task: Image Classification
205
+ Dataset: ImageNet
206
+ Metrics:
207
+ Top 1 Accuracy: 79.35%
208
+ Top 5 Accuracy: 94.42%
209
+ -->
pytorch-image-models/hfdocs/source/models/gloun-seresnext.mdx ADDED
@@ -0,0 +1,203 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # (Gluon) SE-ResNeXt
2
+
3
+ **SE ResNeXt** is a variant of a [ResNext](https://www.paperswithcode.com/method/resnext) that employs [squeeze-and-excitation blocks](https://paperswithcode.com/method/squeeze-and-excitation-block) to enable the network to perform dynamic channel-wise feature recalibration.
4
+
5
+ The weights from this model were ported from [Gluon](https://cv.gluon.ai/model_zoo/classification.html).
6
+
7
+ ## How do I use this model on an image?
8
+
9
+ To load a pretrained model:
10
+
11
+ ```py
12
+ >>> import timm
13
+ >>> model = timm.create_model('gluon_seresnext101_32x4d', pretrained=True)
14
+ >>> model.eval()
15
+ ```
16
+
17
+ To load and preprocess the image:
18
+
19
+ ```py
20
+ >>> import urllib
21
+ >>> from PIL import Image
22
+ >>> from timm.data import resolve_data_config
23
+ >>> from timm.data.transforms_factory import create_transform
24
+
25
+ >>> config = resolve_data_config({}, model=model)
26
+ >>> transform = create_transform(**config)
27
+
28
+ >>> url, filename = ("https://github.com/pytorch/hub/raw/master/images/dog.jpg", "dog.jpg")
29
+ >>> urllib.request.urlretrieve(url, filename)
30
+ >>> img = Image.open(filename).convert('RGB')
31
+ >>> tensor = transform(img).unsqueeze(0) # transform and add batch dimension
32
+ ```
33
+
34
+ To get the model predictions:
35
+
36
+ ```py
37
+ >>> import torch
38
+ >>> with torch.no_grad():
39
+ ... out = model(tensor)
40
+ >>> probabilities = torch.nn.functional.softmax(out[0], dim=0)
41
+ >>> print(probabilities.shape)
42
+ >>> # prints: torch.Size([1000])
43
+ ```
44
+
45
+ To get the top-5 predictions class names:
46
+
47
+ ```py
48
+ >>> # Get imagenet class mappings
49
+ >>> url, filename = ("https://raw.githubusercontent.com/pytorch/hub/master/imagenet_classes.txt", "imagenet_classes.txt")
50
+ >>> urllib.request.urlretrieve(url, filename)
51
+ >>> with open("imagenet_classes.txt", "r") as f:
52
+ ... categories = [s.strip() for s in f.readlines()]
53
+
54
+ >>> # Print top categories per image
55
+ >>> top5_prob, top5_catid = torch.topk(probabilities, 5)
56
+ >>> for i in range(top5_prob.size(0)):
57
+ ... print(categories[top5_catid[i]], top5_prob[i].item())
58
+ >>> # prints class names and probabilities like:
59
+ >>> # [('Samoyed', 0.6425196528434753), ('Pomeranian', 0.04062102362513542), ('keeshond', 0.03186424449086189), ('white wolf', 0.01739676296710968), ('Eskimo dog', 0.011717947199940681)]
60
+ ```
61
+
62
+ Replace the model name with the variant you want to use, e.g. `gluon_seresnext101_32x4d`. You can find the IDs in the model summaries at the top of this page.
63
+
64
+ To extract image features with this model, follow the [timm feature extraction examples](../feature_extraction), just change the name of the model you want to use.
65
+
66
+ ## How do I finetune this model?
67
+
68
+ You can finetune any of the pre-trained models just by changing the classifier (the last layer).
69
+
70
+ ```py
71
+ >>> model = timm.create_model('gluon_seresnext101_32x4d', pretrained=True, num_classes=NUM_FINETUNE_CLASSES)
72
+ ```
73
+ To finetune on your own dataset, you have to write a training loop or adapt [timm's training
74
+ script](https://github.com/rwightman/pytorch-image-models/blob/master/train.py) to use your dataset.
75
+
76
+ ## How do I train this model?
77
+
78
+ You can follow the [timm recipe scripts](../scripts) for training a new model afresh.
79
+
80
+ ## Citation
81
+
82
+ ```BibTeX
83
+ @misc{hu2019squeezeandexcitation,
84
+ title={Squeeze-and-Excitation Networks},
85
+ author={Jie Hu and Li Shen and Samuel Albanie and Gang Sun and Enhua Wu},
86
+ year={2019},
87
+ eprint={1709.01507},
88
+ archivePrefix={arXiv},
89
+ primaryClass={cs.CV}
90
+ }
91
+ ```
92
+
93
+ <!--
94
+ Type: model-index
95
+ Collections:
96
+ - Name: Gloun SEResNeXt
97
+ Paper:
98
+ Title: Squeeze-and-Excitation Networks
99
+ URL: https://paperswithcode.com/paper/squeeze-and-excitation-networks
100
+ Models:
101
+ - Name: gluon_seresnext101_32x4d
102
+ In Collection: Gloun SEResNeXt
103
+ Metadata:
104
+ FLOPs: 10302923504
105
+ Parameters: 48960000
106
+ File Size: 196505510
107
+ Architecture:
108
+ - 1x1 Convolution
109
+ - Batch Normalization
110
+ - Convolution
111
+ - Global Average Pooling
112
+ - Grouped Convolution
113
+ - Max Pooling
114
+ - ReLU
115
+ - ResNeXt Block
116
+ - Residual Connection
117
+ - Softmax
118
+ - Squeeze-and-Excitation Block
119
+ Tasks:
120
+ - Image Classification
121
+ Training Data:
122
+ - ImageNet
123
+ ID: gluon_seresnext101_32x4d
124
+ Crop Pct: '0.875'
125
+ Image Size: '224'
126
+ Interpolation: bicubic
127
+ Code: https://github.com/rwightman/pytorch-image-models/blob/d8e69206be253892b2956341fea09fdebfaae4e3/timm/models/gluon_resnet.py#L219
128
+ Weights: https://github.com/rwightman/pytorch-pretrained-gluonresnet/releases/download/v0.1/gluon_seresnext101_32x4d-cf52900d.pth
129
+ Results:
130
+ - Task: Image Classification
131
+ Dataset: ImageNet
132
+ Metrics:
133
+ Top 1 Accuracy: 80.87%
134
+ Top 5 Accuracy: 95.29%
135
+ - Name: gluon_seresnext101_64x4d
136
+ In Collection: Gloun SEResNeXt
137
+ Metadata:
138
+ FLOPs: 19958950640
139
+ Parameters: 88230000
140
+ File Size: 353875948
141
+ Architecture:
142
+ - 1x1 Convolution
143
+ - Batch Normalization
144
+ - Convolution
145
+ - Global Average Pooling
146
+ - Grouped Convolution
147
+ - Max Pooling
148
+ - ReLU
149
+ - ResNeXt Block
150
+ - Residual Connection
151
+ - Softmax
152
+ - Squeeze-and-Excitation Block
153
+ Tasks:
154
+ - Image Classification
155
+ Training Data:
156
+ - ImageNet
157
+ ID: gluon_seresnext101_64x4d
158
+ Crop Pct: '0.875'
159
+ Image Size: '224'
160
+ Interpolation: bicubic
161
+ Code: https://github.com/rwightman/pytorch-image-models/blob/d8e69206be253892b2956341fea09fdebfaae4e3/timm/models/gluon_resnet.py#L229
162
+ Weights: https://github.com/rwightman/pytorch-pretrained-gluonresnet/releases/download/v0.1/gluon_seresnext101_64x4d-f9926f93.pth
163
+ Results:
164
+ - Task: Image Classification
165
+ Dataset: ImageNet
166
+ Metrics:
167
+ Top 1 Accuracy: 80.88%
168
+ Top 5 Accuracy: 95.31%
169
+ - Name: gluon_seresnext50_32x4d
170
+ In Collection: Gloun SEResNeXt
171
+ Metadata:
172
+ FLOPs: 5475179184
173
+ Parameters: 27560000
174
+ File Size: 110578827
175
+ Architecture:
176
+ - 1x1 Convolution
177
+ - Batch Normalization
178
+ - Convolution
179
+ - Global Average Pooling
180
+ - Grouped Convolution
181
+ - Max Pooling
182
+ - ReLU
183
+ - ResNeXt Block
184
+ - Residual Connection
185
+ - Softmax
186
+ - Squeeze-and-Excitation Block
187
+ Tasks:
188
+ - Image Classification
189
+ Training Data:
190
+ - ImageNet
191
+ ID: gluon_seresnext50_32x4d
192
+ Crop Pct: '0.875'
193
+ Image Size: '224'
194
+ Interpolation: bicubic
195
+ Code: https://github.com/rwightman/pytorch-image-models/blob/d8e69206be253892b2956341fea09fdebfaae4e3/timm/models/gluon_resnet.py#L209
196
+ Weights: https://github.com/rwightman/pytorch-pretrained-gluonresnet/releases/download/v0.1/gluon_seresnext50_32x4d-90cf2d6e.pth
197
+ Results:
198
+ - Task: Image Classification
199
+ Dataset: ImageNet
200
+ Metrics:
201
+ Top 1 Accuracy: 79.92%
202
+ Top 5 Accuracy: 94.82%
203
+ -->
pytorch-image-models/hfdocs/source/models/gloun-xception.mdx ADDED
@@ -0,0 +1,133 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # (Gluon) Xception
2
+
3
+ **Xception** is a convolutional neural network architecture that relies solely on [depthwise separable convolution](https://paperswithcode.com/method/depthwise-separable-convolution) layers.
4
+
5
+ The weights from this model were ported from [Gluon](https://cv.gluon.ai/model_zoo/classification.html).
6
+
7
+ ## How do I use this model on an image?
8
+
9
+ To load a pretrained model:
10
+
11
+ ```py
12
+ >>> import timm
13
+ >>> model = timm.create_model('gluon_xception65', pretrained=True)
14
+ >>> model.eval()
15
+ ```
16
+
17
+ To load and preprocess the image:
18
+
19
+ ```py
20
+ >>> import urllib
21
+ >>> from PIL import Image
22
+ >>> from timm.data import resolve_data_config
23
+ >>> from timm.data.transforms_factory import create_transform
24
+
25
+ >>> config = resolve_data_config({}, model=model)
26
+ >>> transform = create_transform(**config)
27
+
28
+ >>> url, filename = ("https://github.com/pytorch/hub/raw/master/images/dog.jpg", "dog.jpg")
29
+ >>> urllib.request.urlretrieve(url, filename)
30
+ >>> img = Image.open(filename).convert('RGB')
31
+ >>> tensor = transform(img).unsqueeze(0) # transform and add batch dimension
32
+ ```
33
+
34
+ To get the model predictions:
35
+
36
+ ```py
37
+ >>> import torch
38
+ >>> with torch.no_grad():
39
+ ... out = model(tensor)
40
+ >>> probabilities = torch.nn.functional.softmax(out[0], dim=0)
41
+ >>> print(probabilities.shape)
42
+ >>> # prints: torch.Size([1000])
43
+ ```
44
+
45
+ To get the top-5 predictions class names:
46
+
47
+ ```py
48
+ >>> # Get imagenet class mappings
49
+ >>> url, filename = ("https://raw.githubusercontent.com/pytorch/hub/master/imagenet_classes.txt", "imagenet_classes.txt")
50
+ >>> urllib.request.urlretrieve(url, filename)
51
+ >>> with open("imagenet_classes.txt", "r") as f:
52
+ ... categories = [s.strip() for s in f.readlines()]
53
+
54
+ >>> # Print top categories per image
55
+ >>> top5_prob, top5_catid = torch.topk(probabilities, 5)
56
+ >>> for i in range(top5_prob.size(0)):
57
+ ... print(categories[top5_catid[i]], top5_prob[i].item())
58
+ >>> # prints class names and probabilities like:
59
+ >>> # [('Samoyed', 0.6425196528434753), ('Pomeranian', 0.04062102362513542), ('keeshond', 0.03186424449086189), ('white wolf', 0.01739676296710968), ('Eskimo dog', 0.011717947199940681)]
60
+ ```
61
+
62
+ Replace the model name with the variant you want to use, e.g. `gluon_xception65`. You can find the IDs in the model summaries at the top of this page.
63
+
64
+ To extract image features with this model, follow the [timm feature extraction examples](../feature_extraction), just change the name of the model you want to use.
65
+
66
+ ## How do I finetune this model?
67
+
68
+ You can finetune any of the pre-trained models just by changing the classifier (the last layer).
69
+
70
+ ```py
71
+ >>> model = timm.create_model('gluon_xception65', pretrained=True, num_classes=NUM_FINETUNE_CLASSES)
72
+ ```
73
+ To finetune on your own dataset, you have to write a training loop or adapt [timm's training
74
+ script](https://github.com/rwightman/pytorch-image-models/blob/master/train.py) to use your dataset.
75
+
76
+ ## How do I train this model?
77
+
78
+ You can follow the [timm recipe scripts](../scripts) for training a new model afresh.
79
+
80
+ ## Citation
81
+
82
+ ```BibTeX
83
+ @misc{chollet2017xception,
84
+ title={Xception: Deep Learning with Depthwise Separable Convolutions},
85
+ author={François Chollet},
86
+ year={2017},
87
+ eprint={1610.02357},
88
+ archivePrefix={arXiv},
89
+ primaryClass={cs.CV}
90
+ }
91
+ ```
92
+
93
+ <!--
94
+ Type: model-index
95
+ Collections:
96
+ - Name: Gloun Xception
97
+ Paper:
98
+ Title: 'Xception: Deep Learning with Depthwise Separable Convolutions'
99
+ URL: https://paperswithcode.com/paper/xception-deep-learning-with-depthwise
100
+ Models:
101
+ - Name: gluon_xception65
102
+ In Collection: Gloun Xception
103
+ Metadata:
104
+ FLOPs: 17594889728
105
+ Parameters: 39920000
106
+ File Size: 160551306
107
+ Architecture:
108
+ - 1x1 Convolution
109
+ - Convolution
110
+ - Dense Connections
111
+ - Depthwise Separable Convolution
112
+ - Global Average Pooling
113
+ - Max Pooling
114
+ - ReLU
115
+ - Residual Connection
116
+ - Softmax
117
+ Tasks:
118
+ - Image Classification
119
+ Training Data:
120
+ - ImageNet
121
+ ID: gluon_xception65
122
+ Crop Pct: '0.903'
123
+ Image Size: '299'
124
+ Interpolation: bicubic
125
+ Code: https://github.com/rwightman/pytorch-image-models/blob/d8e69206be253892b2956341fea09fdebfaae4e3/timm/models/gluon_xception.py#L241
126
+ Weights: https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/gluon_xception-7015a15c.pth
127
+ Results:
128
+ - Task: Image Classification
129
+ Dataset: ImageNet
130
+ Metrics:
131
+ Top 1 Accuracy: 79.7%
132
+ Top 5 Accuracy: 94.87%
133
+ -->
pytorch-image-models/hfdocs/source/models/hrnet.mdx ADDED
@@ -0,0 +1,425 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # HRNet
2
+
3
+ **HRNet**, or **High-Resolution Net**, is a general purpose convolutional neural network for tasks like semantic segmentation, object detection and image classification. It is able to maintain high resolution representations through the whole process. We start from a high-resolution convolution stream, gradually add high-to-low resolution convolution streams one by one, and connect the multi-resolution streams in parallel. The resulting network consists of several (\\( 4 \\) in the paper) stages and the \\( n \\)th stage contains \\( n \\) streams corresponding to \\( n \\) resolutions. The authors conduct repeated multi-resolution fusions by exchanging the information across the parallel streams over and over.
4
+
5
+ ## How do I use this model on an image?
6
+
7
+ To load a pretrained model:
8
+
9
+ ```py
10
+ >>> import timm
11
+ >>> model = timm.create_model('hrnet_w18', pretrained=True)
12
+ >>> model.eval()
13
+ ```
14
+
15
+ To load and preprocess the image:
16
+
17
+ ```py
18
+ >>> import urllib
19
+ >>> from PIL import Image
20
+ >>> from timm.data import resolve_data_config
21
+ >>> from timm.data.transforms_factory import create_transform
22
+
23
+ >>> config = resolve_data_config({}, model=model)
24
+ >>> transform = create_transform(**config)
25
+
26
+ >>> url, filename = ("https://github.com/pytorch/hub/raw/master/images/dog.jpg", "dog.jpg")
27
+ >>> urllib.request.urlretrieve(url, filename)
28
+ >>> img = Image.open(filename).convert('RGB')
29
+ >>> tensor = transform(img).unsqueeze(0) # transform and add batch dimension
30
+ ```
31
+
32
+ To get the model predictions:
33
+
34
+ ```py
35
+ >>> import torch
36
+ >>> with torch.no_grad():
37
+ ... out = model(tensor)
38
+ >>> probabilities = torch.nn.functional.softmax(out[0], dim=0)
39
+ >>> print(probabilities.shape)
40
+ >>> # prints: torch.Size([1000])
41
+ ```
42
+
43
+ To get the top-5 predictions class names:
44
+
45
+ ```py
46
+ >>> # Get imagenet class mappings
47
+ >>> url, filename = ("https://raw.githubusercontent.com/pytorch/hub/master/imagenet_classes.txt", "imagenet_classes.txt")
48
+ >>> urllib.request.urlretrieve(url, filename)
49
+ >>> with open("imagenet_classes.txt", "r") as f:
50
+ ... categories = [s.strip() for s in f.readlines()]
51
+
52
+ >>> # Print top categories per image
53
+ >>> top5_prob, top5_catid = torch.topk(probabilities, 5)
54
+ >>> for i in range(top5_prob.size(0)):
55
+ ... print(categories[top5_catid[i]], top5_prob[i].item())
56
+ >>> # prints class names and probabilities like:
57
+ >>> # [('Samoyed', 0.6425196528434753), ('Pomeranian', 0.04062102362513542), ('keeshond', 0.03186424449086189), ('white wolf', 0.01739676296710968), ('Eskimo dog', 0.011717947199940681)]
58
+ ```
59
+
60
+ Replace the model name with the variant you want to use, e.g. `hrnet_w18`. You can find the IDs in the model summaries at the top of this page.
61
+
62
+ To extract image features with this model, follow the [timm feature extraction examples](../feature_extraction), just change the name of the model you want to use.
63
+
64
+ ## How do I finetune this model?
65
+
66
+ You can finetune any of the pre-trained models just by changing the classifier (the last layer).
67
+
68
+ ```py
69
+ >>> model = timm.create_model('hrnet_w18', pretrained=True, num_classes=NUM_FINETUNE_CLASSES)
70
+ ```
71
+ To finetune on your own dataset, you have to write a training loop or adapt [timm's training
72
+ script](https://github.com/rwightman/pytorch-image-models/blob/master/train.py) to use your dataset.
73
+
74
+ ## How do I train this model?
75
+
76
+ You can follow the [timm recipe scripts](../scripts) for training a new model afresh.
77
+
78
+ ## Citation
79
+
80
+ ```BibTeX
81
+ @misc{sun2019highresolution,
82
+ title={High-Resolution Representations for Labeling Pixels and Regions},
83
+ author={Ke Sun and Yang Zhao and Borui Jiang and Tianheng Cheng and Bin Xiao and Dong Liu and Yadong Mu and Xinggang Wang and Wenyu Liu and Jingdong Wang},
84
+ year={2019},
85
+ eprint={1904.04514},
86
+ archivePrefix={arXiv},
87
+ primaryClass={cs.CV}
88
+ }
89
+ ```
90
+
91
+ <!--
92
+ Type: model-index
93
+ Collections:
94
+ - Name: HRNet
95
+ Paper:
96
+ Title: Deep High-Resolution Representation Learning for Visual Recognition
97
+ URL: https://paperswithcode.com/paper/190807919
98
+ Models:
99
+ - Name: hrnet_w18
100
+ In Collection: HRNet
101
+ Metadata:
102
+ FLOPs: 5547205500
103
+ Parameters: 21300000
104
+ File Size: 85718883
105
+ Architecture:
106
+ - Batch Normalization
107
+ - Convolution
108
+ - ReLU
109
+ - Residual Connection
110
+ Tasks:
111
+ - Image Classification
112
+ Training Techniques:
113
+ - Nesterov Accelerated Gradient
114
+ - Weight Decay
115
+ Training Data:
116
+ - ImageNet
117
+ Training Resources: 4x NVIDIA V100 GPUs
118
+ ID: hrnet_w18
119
+ Epochs: 100
120
+ Layers: 18
121
+ Crop Pct: '0.875'
122
+ Momentum: 0.9
123
+ Batch Size: 256
124
+ Image Size: '224'
125
+ Weight Decay: 0.001
126
+ Interpolation: bilinear
127
+ Code: https://github.com/rwightman/pytorch-image-models/blob/d8e69206be253892b2956341fea09fdebfaae4e3/timm/models/hrnet.py#L800
128
+ Weights: https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-hrnet/hrnetv2_w18-8cb57bb9.pth
129
+ Results:
130
+ - Task: Image Classification
131
+ Dataset: ImageNet
132
+ Metrics:
133
+ Top 1 Accuracy: 76.76%
134
+ Top 5 Accuracy: 93.44%
135
+ - Name: hrnet_w18_small
136
+ In Collection: HRNet
137
+ Metadata:
138
+ FLOPs: 2071651488
139
+ Parameters: 13190000
140
+ File Size: 52934302
141
+ Architecture:
142
+ - Batch Normalization
143
+ - Convolution
144
+ - ReLU
145
+ - Residual Connection
146
+ Tasks:
147
+ - Image Classification
148
+ Training Techniques:
149
+ - Nesterov Accelerated Gradient
150
+ - Weight Decay
151
+ Training Data:
152
+ - ImageNet
153
+ Training Resources: 4x NVIDIA V100 GPUs
154
+ ID: hrnet_w18_small
155
+ Epochs: 100
156
+ Layers: 18
157
+ Crop Pct: '0.875'
158
+ Momentum: 0.9
159
+ Batch Size: 256
160
+ Image Size: '224'
161
+ Weight Decay: 0.001
162
+ Interpolation: bilinear
163
+ Code: https://github.com/rwightman/pytorch-image-models/blob/d8e69206be253892b2956341fea09fdebfaae4e3/timm/models/hrnet.py#L790
164
+ Weights: https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-hrnet/hrnet_w18_small_v1-f460c6bc.pth
165
+ Results:
166
+ - Task: Image Classification
167
+ Dataset: ImageNet
168
+ Metrics:
169
+ Top 1 Accuracy: 72.34%
170
+ Top 5 Accuracy: 90.68%
171
+ - Name: hrnet_w18_small_v2
172
+ In Collection: HRNet
173
+ Metadata:
174
+ FLOPs: 3360023160
175
+ Parameters: 15600000
176
+ File Size: 62682879
177
+ Architecture:
178
+ - Batch Normalization
179
+ - Convolution
180
+ - ReLU
181
+ - Residual Connection
182
+ Tasks:
183
+ - Image Classification
184
+ Training Techniques:
185
+ - Nesterov Accelerated Gradient
186
+ - Weight Decay
187
+ Training Data:
188
+ - ImageNet
189
+ Training Resources: 4x NVIDIA V100 GPUs
190
+ ID: hrnet_w18_small_v2
191
+ Epochs: 100
192
+ Layers: 18
193
+ Crop Pct: '0.875'
194
+ Momentum: 0.9
195
+ Batch Size: 256
196
+ Image Size: '224'
197
+ Weight Decay: 0.001
198
+ Interpolation: bilinear
199
+ Code: https://github.com/rwightman/pytorch-image-models/blob/d8e69206be253892b2956341fea09fdebfaae4e3/timm/models/hrnet.py#L795
200
+ Weights: https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-hrnet/hrnet_w18_small_v2-4c50a8cb.pth
201
+ Results:
202
+ - Task: Image Classification
203
+ Dataset: ImageNet
204
+ Metrics:
205
+ Top 1 Accuracy: 75.11%
206
+ Top 5 Accuracy: 92.41%
207
+ - Name: hrnet_w30
208
+ In Collection: HRNet
209
+ Metadata:
210
+ FLOPs: 10474119492
211
+ Parameters: 37710000
212
+ File Size: 151452218
213
+ Architecture:
214
+ - Batch Normalization
215
+ - Convolution
216
+ - ReLU
217
+ - Residual Connection
218
+ Tasks:
219
+ - Image Classification
220
+ Training Techniques:
221
+ - Nesterov Accelerated Gradient
222
+ - Weight Decay
223
+ Training Data:
224
+ - ImageNet
225
+ Training Resources: 4x NVIDIA V100 GPUs
226
+ ID: hrnet_w30
227
+ Epochs: 100
228
+ Layers: 30
229
+ Crop Pct: '0.875'
230
+ Momentum: 0.9
231
+ Batch Size: 256
232
+ Image Size: '224'
233
+ Weight Decay: 0.001
234
+ Interpolation: bilinear
235
+ Code: https://github.com/rwightman/pytorch-image-models/blob/d8e69206be253892b2956341fea09fdebfaae4e3/timm/models/hrnet.py#L805
236
+ Weights: https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-hrnet/hrnetv2_w30-8d7f8dab.pth
237
+ Results:
238
+ - Task: Image Classification
239
+ Dataset: ImageNet
240
+ Metrics:
241
+ Top 1 Accuracy: 78.21%
242
+ Top 5 Accuracy: 94.22%
243
+ - Name: hrnet_w32
244
+ In Collection: HRNet
245
+ Metadata:
246
+ FLOPs: 11524528320
247
+ Parameters: 41230000
248
+ File Size: 165547812
249
+ Architecture:
250
+ - Batch Normalization
251
+ - Convolution
252
+ - ReLU
253
+ - Residual Connection
254
+ Tasks:
255
+ - Image Classification
256
+ Training Techniques:
257
+ - Nesterov Accelerated Gradient
258
+ - Weight Decay
259
+ Training Data:
260
+ - ImageNet
261
+ Training Resources: 4x NVIDIA V100 GPUs
262
+ Training Time: 60 hours
263
+ ID: hrnet_w32
264
+ Epochs: 100
265
+ Layers: 32
266
+ Crop Pct: '0.875'
267
+ Momentum: 0.9
268
+ Batch Size: 256
269
+ Image Size: '224'
270
+ Weight Decay: 0.001
271
+ Interpolation: bilinear
272
+ Code: https://github.com/rwightman/pytorch-image-models/blob/d8e69206be253892b2956341fea09fdebfaae4e3/timm/models/hrnet.py#L810
273
+ Weights: https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-hrnet/hrnetv2_w32-90d8c5fb.pth
274
+ Results:
275
+ - Task: Image Classification
276
+ Dataset: ImageNet
277
+ Metrics:
278
+ Top 1 Accuracy: 78.45%
279
+ Top 5 Accuracy: 94.19%
280
+ - Name: hrnet_w40
281
+ In Collection: HRNet
282
+ Metadata:
283
+ FLOPs: 16381182192
284
+ Parameters: 57560000
285
+ File Size: 230899236
286
+ Architecture:
287
+ - Batch Normalization
288
+ - Convolution
289
+ - ReLU
290
+ - Residual Connection
291
+ Tasks:
292
+ - Image Classification
293
+ Training Techniques:
294
+ - Nesterov Accelerated Gradient
295
+ - Weight Decay
296
+ Training Data:
297
+ - ImageNet
298
+ Training Resources: 4x NVIDIA V100 GPUs
299
+ ID: hrnet_w40
300
+ Epochs: 100
301
+ Layers: 40
302
+ Crop Pct: '0.875'
303
+ Momentum: 0.9
304
+ Batch Size: 256
305
+ Image Size: '224'
306
+ Weight Decay: 0.001
307
+ Interpolation: bilinear
308
+ Code: https://github.com/rwightman/pytorch-image-models/blob/d8e69206be253892b2956341fea09fdebfaae4e3/timm/models/hrnet.py#L815
309
+ Weights: https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-hrnet/hrnetv2_w40-7cd397a4.pth
310
+ Results:
311
+ - Task: Image Classification
312
+ Dataset: ImageNet
313
+ Metrics:
314
+ Top 1 Accuracy: 78.93%
315
+ Top 5 Accuracy: 94.48%
316
+ - Name: hrnet_w44
317
+ In Collection: HRNet
318
+ Metadata:
319
+ FLOPs: 19202520264
320
+ Parameters: 67060000
321
+ File Size: 268957432
322
+ Architecture:
323
+ - Batch Normalization
324
+ - Convolution
325
+ - ReLU
326
+ - Residual Connection
327
+ Tasks:
328
+ - Image Classification
329
+ Training Techniques:
330
+ - Nesterov Accelerated Gradient
331
+ - Weight Decay
332
+ Training Data:
333
+ - ImageNet
334
+ Training Resources: 4x NVIDIA V100 GPUs
335
+ ID: hrnet_w44
336
+ Epochs: 100
337
+ Layers: 44
338
+ Crop Pct: '0.875'
339
+ Momentum: 0.9
340
+ Batch Size: 256
341
+ Image Size: '224'
342
+ Weight Decay: 0.001
343
+ Interpolation: bilinear
344
+ Code: https://github.com/rwightman/pytorch-image-models/blob/d8e69206be253892b2956341fea09fdebfaae4e3/timm/models/hrnet.py#L820
345
+ Weights: https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-hrnet/hrnetv2_w44-c9ac8c18.pth
346
+ Results:
347
+ - Task: Image Classification
348
+ Dataset: ImageNet
349
+ Metrics:
350
+ Top 1 Accuracy: 78.89%
351
+ Top 5 Accuracy: 94.37%
352
+ - Name: hrnet_w48
353
+ In Collection: HRNet
354
+ Metadata:
355
+ FLOPs: 22285865760
356
+ Parameters: 77470000
357
+ File Size: 310603710
358
+ Architecture:
359
+ - Batch Normalization
360
+ - Convolution
361
+ - ReLU
362
+ - Residual Connection
363
+ Tasks:
364
+ - Image Classification
365
+ Training Techniques:
366
+ - Nesterov Accelerated Gradient
367
+ - Weight Decay
368
+ Training Data:
369
+ - ImageNet
370
+ Training Resources: 4x NVIDIA V100 GPUs
371
+ Training Time: 80 hours
372
+ ID: hrnet_w48
373
+ Epochs: 100
374
+ Layers: 48
375
+ Crop Pct: '0.875'
376
+ Momentum: 0.9
377
+ Batch Size: 256
378
+ Image Size: '224'
379
+ Weight Decay: 0.001
380
+ Interpolation: bilinear
381
+ Code: https://github.com/rwightman/pytorch-image-models/blob/d8e69206be253892b2956341fea09fdebfaae4e3/timm/models/hrnet.py#L825
382
+ Weights: https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-hrnet/hrnetv2_w48-abd2e6ab.pth
383
+ Results:
384
+ - Task: Image Classification
385
+ Dataset: ImageNet
386
+ Metrics:
387
+ Top 1 Accuracy: 79.32%
388
+ Top 5 Accuracy: 94.51%
389
+ - Name: hrnet_w64
390
+ In Collection: HRNet
391
+ Metadata:
392
+ FLOPs: 37239321984
393
+ Parameters: 128060000
394
+ File Size: 513071818
395
+ Architecture:
396
+ - Batch Normalization
397
+ - Convolution
398
+ - ReLU
399
+ - Residual Connection
400
+ Tasks:
401
+ - Image Classification
402
+ Training Techniques:
403
+ - Nesterov Accelerated Gradient
404
+ - Weight Decay
405
+ Training Data:
406
+ - ImageNet
407
+ Training Resources: 4x NVIDIA V100 GPUs
408
+ ID: hrnet_w64
409
+ Epochs: 100
410
+ Layers: 64
411
+ Crop Pct: '0.875'
412
+ Momentum: 0.9
413
+ Batch Size: 256
414
+ Image Size: '224'
415
+ Weight Decay: 0.001
416
+ Interpolation: bilinear
417
+ Code: https://github.com/rwightman/pytorch-image-models/blob/d8e69206be253892b2956341fea09fdebfaae4e3/timm/models/hrnet.py#L830
418
+ Weights: https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-hrnet/hrnetv2_w64-b47cc881.pth
419
+ Results:
420
+ - Task: Image Classification
421
+ Dataset: ImageNet
422
+ Metrics:
423
+ Top 1 Accuracy: 79.46%
424
+ Top 5 Accuracy: 94.65%
425
+ -->
pytorch-image-models/hfdocs/source/models/ig-resnext.mdx ADDED
@@ -0,0 +1,276 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Instagram ResNeXt WSL
2
+
3
+ A **ResNeXt** repeats a [building block](https://paperswithcode.com/method/resnext-block) that aggregates a set of transformations with the same topology. Compared to a [ResNet](https://paperswithcode.com/method/resnet), it exposes a new dimension, *cardinality* (the size of the set of transformations) \\( C \\), as an essential factor in addition to the dimensions of depth and width.
4
+
5
+ This model was trained on billions of Instagram images using thousands of distinct hashtags as labels exhibit excellent transfer learning performance.
6
+
7
+ Please note the CC-BY-NC 4.0 license on theses weights, non-commercial use only.
8
+
9
+ ## How do I use this model on an image?
10
+
11
+ To load a pretrained model:
12
+
13
+ ```py
14
+ >>> import timm
15
+ >>> model = timm.create_model('ig_resnext101_32x16d', pretrained=True)
16
+ >>> model.eval()
17
+ ```
18
+
19
+ To load and preprocess the image:
20
+
21
+ ```py
22
+ >>> import urllib
23
+ >>> from PIL import Image
24
+ >>> from timm.data import resolve_data_config
25
+ >>> from timm.data.transforms_factory import create_transform
26
+
27
+ >>> config = resolve_data_config({}, model=model)
28
+ >>> transform = create_transform(**config)
29
+
30
+ >>> url, filename = ("https://github.com/pytorch/hub/raw/master/images/dog.jpg", "dog.jpg")
31
+ >>> urllib.request.urlretrieve(url, filename)
32
+ >>> img = Image.open(filename).convert('RGB')
33
+ >>> tensor = transform(img).unsqueeze(0) # transform and add batch dimension
34
+ ```
35
+
36
+ To get the model predictions:
37
+
38
+ ```py
39
+ >>> import torch
40
+ >>> with torch.no_grad():
41
+ ... out = model(tensor)
42
+ >>> probabilities = torch.nn.functional.softmax(out[0], dim=0)
43
+ >>> print(probabilities.shape)
44
+ >>> # prints: torch.Size([1000])
45
+ ```
46
+
47
+ To get the top-5 predictions class names:
48
+
49
+ ```py
50
+ >>> # Get imagenet class mappings
51
+ >>> url, filename = ("https://raw.githubusercontent.com/pytorch/hub/master/imagenet_classes.txt", "imagenet_classes.txt")
52
+ >>> urllib.request.urlretrieve(url, filename)
53
+ >>> with open("imagenet_classes.txt", "r") as f:
54
+ ... categories = [s.strip() for s in f.readlines()]
55
+
56
+ >>> # Print top categories per image
57
+ >>> top5_prob, top5_catid = torch.topk(probabilities, 5)
58
+ >>> for i in range(top5_prob.size(0)):
59
+ ... print(categories[top5_catid[i]], top5_prob[i].item())
60
+ >>> # prints class names and probabilities like:
61
+ >>> # [('Samoyed', 0.6425196528434753), ('Pomeranian', 0.04062102362513542), ('keeshond', 0.03186424449086189), ('white wolf', 0.01739676296710968), ('Eskimo dog', 0.011717947199940681)]
62
+ ```
63
+
64
+ Replace the model name with the variant you want to use, e.g. `ig_resnext101_32x16d`. You can find the IDs in the model summaries at the top of this page.
65
+
66
+ To extract image features with this model, follow the [timm feature extraction examples](../feature_extraction), just change the name of the model you want to use.
67
+
68
+ ## How do I finetune this model?
69
+
70
+ You can finetune any of the pre-trained models just by changing the classifier (the last layer).
71
+
72
+ ```py
73
+ >>> model = timm.create_model('ig_resnext101_32x16d', pretrained=True, num_classes=NUM_FINETUNE_CLASSES)
74
+ ```
75
+ To finetune on your own dataset, you have to write a training loop or adapt [timm's training
76
+ script](https://github.com/rwightman/pytorch-image-models/blob/master/train.py) to use your dataset.
77
+
78
+ ## How do I train this model?
79
+
80
+ You can follow the [timm recipe scripts](../scripts) for training a new model afresh.
81
+
82
+ ## Citation
83
+
84
+ ```BibTeX
85
+ @misc{mahajan2018exploring,
86
+ title={Exploring the Limits of Weakly Supervised Pretraining},
87
+ author={Dhruv Mahajan and Ross Girshick and Vignesh Ramanathan and Kaiming He and Manohar Paluri and Yixuan Li and Ashwin Bharambe and Laurens van der Maaten},
88
+ year={2018},
89
+ eprint={1805.00932},
90
+ archivePrefix={arXiv},
91
+ primaryClass={cs.CV}
92
+ }
93
+ ```
94
+
95
+ <!--
96
+ Type: model-index
97
+ Collections:
98
+ - Name: IG ResNeXt
99
+ Paper:
100
+ Title: Exploring the Limits of Weakly Supervised Pretraining
101
+ URL: https://paperswithcode.com/paper/exploring-the-limits-of-weakly-supervised
102
+ Models:
103
+ - Name: ig_resnext101_32x16d
104
+ In Collection: IG ResNeXt
105
+ Metadata:
106
+ FLOPs: 46623691776
107
+ Parameters: 194030000
108
+ File Size: 777518664
109
+ Architecture:
110
+ - 1x1 Convolution
111
+ - Batch Normalization
112
+ - Convolution
113
+ - Global Average Pooling
114
+ - Grouped Convolution
115
+ - Max Pooling
116
+ - ReLU
117
+ - ResNeXt Block
118
+ - Residual Connection
119
+ - Softmax
120
+ Tasks:
121
+ - Image Classification
122
+ Training Techniques:
123
+ - Nesterov Accelerated Gradient
124
+ - Weight Decay
125
+ Training Data:
126
+ - IG-3.5B-17k
127
+ - ImageNet
128
+ Training Resources: 336x GPUs
129
+ ID: ig_resnext101_32x16d
130
+ Epochs: 100
131
+ Layers: 101
132
+ Crop Pct: '0.875'
133
+ Momentum: 0.9
134
+ Batch Size: 8064
135
+ Image Size: '224'
136
+ Weight Decay: 0.001
137
+ Interpolation: bilinear
138
+ Code: https://github.com/rwightman/pytorch-image-models/blob/d8e69206be253892b2956341fea09fdebfaae4e3/timm/models/resnet.py#L874
139
+ Weights: https://download.pytorch.org/models/ig_resnext101_32x16-c6f796b0.pth
140
+ Results:
141
+ - Task: Image Classification
142
+ Dataset: ImageNet
143
+ Metrics:
144
+ Top 1 Accuracy: 84.16%
145
+ Top 5 Accuracy: 97.19%
146
+ - Name: ig_resnext101_32x32d
147
+ In Collection: IG ResNeXt
148
+ Metadata:
149
+ FLOPs: 112225170432
150
+ Parameters: 468530000
151
+ File Size: 1876573776
152
+ Architecture:
153
+ - 1x1 Convolution
154
+ - Batch Normalization
155
+ - Convolution
156
+ - Global Average Pooling
157
+ - Grouped Convolution
158
+ - Max Pooling
159
+ - ReLU
160
+ - ResNeXt Block
161
+ - Residual Connection
162
+ - Softmax
163
+ Tasks:
164
+ - Image Classification
165
+ Training Techniques:
166
+ - Nesterov Accelerated Gradient
167
+ - Weight Decay
168
+ Training Data:
169
+ - IG-3.5B-17k
170
+ - ImageNet
171
+ Training Resources: 336x GPUs
172
+ ID: ig_resnext101_32x32d
173
+ Epochs: 100
174
+ Layers: 101
175
+ Crop Pct: '0.875'
176
+ Momentum: 0.9
177
+ Batch Size: 8064
178
+ Image Size: '224'
179
+ Weight Decay: 0.001
180
+ Interpolation: bilinear
181
+ Minibatch Size: 8064
182
+ Code: https://github.com/rwightman/pytorch-image-models/blob/d8e69206be253892b2956341fea09fdebfaae4e3/timm/models/resnet.py#L885
183
+ Weights: https://download.pytorch.org/models/ig_resnext101_32x32-e4b90b00.pth
184
+ Results:
185
+ - Task: Image Classification
186
+ Dataset: ImageNet
187
+ Metrics:
188
+ Top 1 Accuracy: 85.09%
189
+ Top 5 Accuracy: 97.44%
190
+ - Name: ig_resnext101_32x48d
191
+ In Collection: IG ResNeXt
192
+ Metadata:
193
+ FLOPs: 197446554624
194
+ Parameters: 828410000
195
+ File Size: 3317136976
196
+ Architecture:
197
+ - 1x1 Convolution
198
+ - Batch Normalization
199
+ - Convolution
200
+ - Global Average Pooling
201
+ - Grouped Convolution
202
+ - Max Pooling
203
+ - ReLU
204
+ - ResNeXt Block
205
+ - Residual Connection
206
+ - Softmax
207
+ Tasks:
208
+ - Image Classification
209
+ Training Techniques:
210
+ - Nesterov Accelerated Gradient
211
+ - Weight Decay
212
+ Training Data:
213
+ - IG-3.5B-17k
214
+ - ImageNet
215
+ Training Resources: 336x GPUs
216
+ ID: ig_resnext101_32x48d
217
+ Epochs: 100
218
+ Layers: 101
219
+ Crop Pct: '0.875'
220
+ Momentum: 0.9
221
+ Batch Size: 8064
222
+ Image Size: '224'
223
+ Weight Decay: 0.001
224
+ Interpolation: bilinear
225
+ Code: https://github.com/rwightman/pytorch-image-models/blob/d8e69206be253892b2956341fea09fdebfaae4e3/timm/models/resnet.py#L896
226
+ Weights: https://download.pytorch.org/models/ig_resnext101_32x48-3e41cc8a.pth
227
+ Results:
228
+ - Task: Image Classification
229
+ Dataset: ImageNet
230
+ Metrics:
231
+ Top 1 Accuracy: 85.42%
232
+ Top 5 Accuracy: 97.58%
233
+ - Name: ig_resnext101_32x8d
234
+ In Collection: IG ResNeXt
235
+ Metadata:
236
+ FLOPs: 21180417024
237
+ Parameters: 88790000
238
+ File Size: 356056638
239
+ Architecture:
240
+ - 1x1 Convolution
241
+ - Batch Normalization
242
+ - Convolution
243
+ - Global Average Pooling
244
+ - Grouped Convolution
245
+ - Max Pooling
246
+ - ReLU
247
+ - ResNeXt Block
248
+ - Residual Connection
249
+ - Softmax
250
+ Tasks:
251
+ - Image Classification
252
+ Training Techniques:
253
+ - Nesterov Accelerated Gradient
254
+ - Weight Decay
255
+ Training Data:
256
+ - IG-3.5B-17k
257
+ - ImageNet
258
+ Training Resources: 336x GPUs
259
+ ID: ig_resnext101_32x8d
260
+ Epochs: 100
261
+ Layers: 101
262
+ Crop Pct: '0.875'
263
+ Momentum: 0.9
264
+ Batch Size: 8064
265
+ Image Size: '224'
266
+ Weight Decay: 0.001
267
+ Interpolation: bilinear
268
+ Code: https://github.com/rwightman/pytorch-image-models/blob/d8e69206be253892b2956341fea09fdebfaae4e3/timm/models/resnet.py#L863
269
+ Weights: https://download.pytorch.org/models/ig_resnext101_32x8-c38310e5.pth
270
+ Results:
271
+ - Task: Image Classification
272
+ Dataset: ImageNet
273
+ Metrics:
274
+ Top 1 Accuracy: 82.7%
275
+ Top 5 Accuracy: 96.64%
276
+ -->
pytorch-image-models/hfdocs/source/models/inception-resnet-v2.mdx ADDED
@@ -0,0 +1,139 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Inception ResNet v2
2
+
3
+ **Inception-ResNet-v2** is a convolutional neural architecture that builds on the Inception family of architectures but incorporates [residual connections](https://paperswithcode.com/method/residual-connection) (replacing the filter concatenation stage of the Inception architecture).
4
+
5
+ ## How do I use this model on an image?
6
+
7
+ To load a pretrained model:
8
+
9
+ ```py
10
+ >>> import timm
11
+ >>> model = timm.create_model('inception_resnet_v2', pretrained=True)
12
+ >>> model.eval()
13
+ ```
14
+
15
+ To load and preprocess the image:
16
+
17
+ ```py
18
+ >>> import urllib
19
+ >>> from PIL import Image
20
+ >>> from timm.data import resolve_data_config
21
+ >>> from timm.data.transforms_factory import create_transform
22
+
23
+ >>> config = resolve_data_config({}, model=model)
24
+ >>> transform = create_transform(**config)
25
+
26
+ >>> url, filename = ("https://github.com/pytorch/hub/raw/master/images/dog.jpg", "dog.jpg")
27
+ >>> urllib.request.urlretrieve(url, filename)
28
+ >>> img = Image.open(filename).convert('RGB')
29
+ >>> tensor = transform(img).unsqueeze(0) # transform and add batch dimension
30
+ ```
31
+
32
+ To get the model predictions:
33
+
34
+ ```py
35
+ >>> import torch
36
+ >>> with torch.no_grad():
37
+ ... out = model(tensor)
38
+ >>> probabilities = torch.nn.functional.softmax(out[0], dim=0)
39
+ >>> print(probabilities.shape)
40
+ >>> # prints: torch.Size([1000])
41
+ ```
42
+
43
+ To get the top-5 predictions class names:
44
+
45
+ ```py
46
+ >>> # Get imagenet class mappings
47
+ >>> url, filename = ("https://raw.githubusercontent.com/pytorch/hub/master/imagenet_classes.txt", "imagenet_classes.txt")
48
+ >>> urllib.request.urlretrieve(url, filename)
49
+ >>> with open("imagenet_classes.txt", "r") as f:
50
+ ... categories = [s.strip() for s in f.readlines()]
51
+
52
+ >>> # Print top categories per image
53
+ >>> top5_prob, top5_catid = torch.topk(probabilities, 5)
54
+ >>> for i in range(top5_prob.size(0)):
55
+ ... print(categories[top5_catid[i]], top5_prob[i].item())
56
+ >>> # prints class names and probabilities like:
57
+ >>> # [('Samoyed', 0.6425196528434753), ('Pomeranian', 0.04062102362513542), ('keeshond', 0.03186424449086189), ('white wolf', 0.01739676296710968), ('Eskimo dog', 0.011717947199940681)]
58
+ ```
59
+
60
+ Replace the model name with the variant you want to use, e.g. `inception_resnet_v2`. You can find the IDs in the model summaries at the top of this page.
61
+
62
+ To extract image features with this model, follow the [timm feature extraction examples](../feature_extraction), just change the name of the model you want to use.
63
+
64
+ ## How do I finetune this model?
65
+
66
+ You can finetune any of the pre-trained models just by changing the classifier (the last layer).
67
+
68
+ ```py
69
+ >>> model = timm.create_model('inception_resnet_v2', pretrained=True, num_classes=NUM_FINETUNE_CLASSES)
70
+ ```
71
+ To finetune on your own dataset, you have to write a training loop or adapt [timm's training
72
+ script](https://github.com/rwightman/pytorch-image-models/blob/master/train.py) to use your dataset.
73
+
74
+ ## How do I train this model?
75
+
76
+ You can follow the [timm recipe scripts](../scripts) for training a new model afresh.
77
+
78
+ ## Citation
79
+
80
+ ```BibTeX
81
+ @misc{szegedy2016inceptionv4,
82
+ title={Inception-v4, Inception-ResNet and the Impact of Residual Connections on Learning},
83
+ author={Christian Szegedy and Sergey Ioffe and Vincent Vanhoucke and Alex Alemi},
84
+ year={2016},
85
+ eprint={1602.07261},
86
+ archivePrefix={arXiv},
87
+ primaryClass={cs.CV}
88
+ }
89
+ ```
90
+
91
+ <!--
92
+ Type: model-index
93
+ Collections:
94
+ - Name: Inception ResNet v2
95
+ Paper:
96
+ Title: Inception-v4, Inception-ResNet and the Impact of Residual Connections on
97
+ Learning
98
+ URL: https://paperswithcode.com/paper/inception-v4-inception-resnet-and-the-impact
99
+ Models:
100
+ - Name: inception_resnet_v2
101
+ In Collection: Inception ResNet v2
102
+ Metadata:
103
+ FLOPs: 16959133120
104
+ Parameters: 55850000
105
+ File Size: 223774238
106
+ Architecture:
107
+ - Average Pooling
108
+ - Dropout
109
+ - Inception-ResNet-v2 Reduction-B
110
+ - Inception-ResNet-v2-A
111
+ - Inception-ResNet-v2-B
112
+ - Inception-ResNet-v2-C
113
+ - Reduction-A
114
+ - Softmax
115
+ Tasks:
116
+ - Image Classification
117
+ Training Techniques:
118
+ - Label Smoothing
119
+ - RMSProp
120
+ - Weight Decay
121
+ Training Data:
122
+ - ImageNet
123
+ Training Resources: 20x NVIDIA Kepler GPUs
124
+ ID: inception_resnet_v2
125
+ LR: 0.045
126
+ Dropout: 0.2
127
+ Crop Pct: '0.897'
128
+ Momentum: 0.9
129
+ Image Size: '299'
130
+ Interpolation: bicubic
131
+ Code: https://github.com/rwightman/pytorch-image-models/blob/d8e69206be253892b2956341fea09fdebfaae4e3/timm/models/inception_resnet_v2.py#L343
132
+ Weights: https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/inception_resnet_v2-940b1cd6.pth
133
+ Results:
134
+ - Task: Image Classification
135
+ Dataset: ImageNet
136
+ Metrics:
137
+ Top 1 Accuracy: 0.95%
138
+ Top 5 Accuracy: 17.29%
139
+ -->
pytorch-image-models/hfdocs/source/models/inception-v3.mdx ADDED
@@ -0,0 +1,152 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Inception v3
2
+
3
+ **Inception v3** is a convolutional neural network architecture from the Inception family that makes several improvements including using [Label Smoothing](https://paperswithcode.com/method/label-smoothing), Factorized 7 x 7 convolutions, and the use of an [auxiliary classifer](https://paperswithcode.com/method/auxiliary-classifier) to propagate label information lower down the network (along with the use of batch normalization for layers in the sidehead). The key building block is an [Inception Module](https://paperswithcode.com/method/inception-v3-module).
4
+
5
+ ## How do I use this model on an image?
6
+
7
+ To load a pretrained model:
8
+
9
+ ```py
10
+ >>> import timm
11
+ >>> model = timm.create_model('inception_v3', pretrained=True)
12
+ >>> model.eval()
13
+ ```
14
+
15
+ To load and preprocess the image:
16
+
17
+ ```py
18
+ >>> import urllib
19
+ >>> from PIL import Image
20
+ >>> from timm.data import resolve_data_config
21
+ >>> from timm.data.transforms_factory import create_transform
22
+
23
+ >>> config = resolve_data_config({}, model=model)
24
+ >>> transform = create_transform(**config)
25
+
26
+ >>> url, filename = ("https://github.com/pytorch/hub/raw/master/images/dog.jpg", "dog.jpg")
27
+ >>> urllib.request.urlretrieve(url, filename)
28
+ >>> img = Image.open(filename).convert('RGB')
29
+ >>> tensor = transform(img).unsqueeze(0) # transform and add batch dimension
30
+ ```
31
+
32
+ To get the model predictions:
33
+
34
+ ```py
35
+ >>> import torch
36
+ >>> with torch.no_grad():
37
+ ... out = model(tensor)
38
+ >>> probabilities = torch.nn.functional.softmax(out[0], dim=0)
39
+ >>> print(probabilities.shape)
40
+ >>> # prints: torch.Size([1000])
41
+ ```
42
+
43
+ To get the top-5 predictions class names:
44
+
45
+ ```py
46
+ >>> # Get imagenet class mappings
47
+ >>> url, filename = ("https://raw.githubusercontent.com/pytorch/hub/master/imagenet_classes.txt", "imagenet_classes.txt")
48
+ >>> urllib.request.urlretrieve(url, filename)
49
+ >>> with open("imagenet_classes.txt", "r") as f:
50
+ ... categories = [s.strip() for s in f.readlines()]
51
+
52
+ >>> # Print top categories per image
53
+ >>> top5_prob, top5_catid = torch.topk(probabilities, 5)
54
+ >>> for i in range(top5_prob.size(0)):
55
+ ... print(categories[top5_catid[i]], top5_prob[i].item())
56
+ >>> # prints class names and probabilities like:
57
+ >>> # [('Samoyed', 0.6425196528434753), ('Pomeranian', 0.04062102362513542), ('keeshond', 0.03186424449086189), ('white wolf', 0.01739676296710968), ('Eskimo dog', 0.011717947199940681)]
58
+ ```
59
+
60
+ Replace the model name with the variant you want to use, e.g. `inception_v3`. You can find the IDs in the model summaries at the top of this page.
61
+
62
+ To extract image features with this model, follow the [timm feature extraction examples](../feature_extraction), just change the name of the model you want to use.
63
+
64
+ ## How do I finetune this model?
65
+
66
+ You can finetune any of the pre-trained models just by changing the classifier (the last layer).
67
+
68
+ ```py
69
+ >>> model = timm.create_model('inception_v3', pretrained=True, num_classes=NUM_FINETUNE_CLASSES)
70
+ ```
71
+ To finetune on your own dataset, you have to write a training loop or adapt [timm's training
72
+ script](https://github.com/rwightman/pytorch-image-models/blob/master/train.py) to use your dataset.
73
+
74
+ ## How do I train this model?
75
+
76
+ You can follow the [timm recipe scripts](../scripts) for training a new model afresh.
77
+
78
+ ## Citation
79
+
80
+ ```BibTeX
81
+ @article{DBLP:journals/corr/SzegedyVISW15,
82
+ author = {Christian Szegedy and
83
+ Vincent Vanhoucke and
84
+ Sergey Ioffe and
85
+ Jonathon Shlens and
86
+ Zbigniew Wojna},
87
+ title = {Rethinking the Inception Architecture for Computer Vision},
88
+ journal = {CoRR},
89
+ volume = {abs/1512.00567},
90
+ year = {2015},
91
+ url = {http://arxiv.org/abs/1512.00567},
92
+ archivePrefix = {arXiv},
93
+ eprint = {1512.00567},
94
+ timestamp = {Mon, 13 Aug 2018 16:49:07 +0200},
95
+ biburl = {https://dblp.org/rec/journals/corr/SzegedyVISW15.bib},
96
+ bibsource = {dblp computer science bibliography, https://dblp.org}
97
+ }
98
+ ```
99
+
100
+ <!--
101
+ Type: model-index
102
+ Collections:
103
+ - Name: Inception v3
104
+ Paper:
105
+ Title: Rethinking the Inception Architecture for Computer Vision
106
+ URL: https://paperswithcode.com/paper/rethinking-the-inception-architecture-for
107
+ Models:
108
+ - Name: inception_v3
109
+ In Collection: Inception v3
110
+ Metadata:
111
+ FLOPs: 7352418880
112
+ Parameters: 23830000
113
+ File Size: 108857766
114
+ Architecture:
115
+ - 1x1 Convolution
116
+ - Auxiliary Classifier
117
+ - Average Pooling
118
+ - Average Pooling
119
+ - Batch Normalization
120
+ - Convolution
121
+ - Dense Connections
122
+ - Dropout
123
+ - Inception-v3 Module
124
+ - Max Pooling
125
+ - ReLU
126
+ - Softmax
127
+ Tasks:
128
+ - Image Classification
129
+ Training Techniques:
130
+ - Gradient Clipping
131
+ - Label Smoothing
132
+ - RMSProp
133
+ - Weight Decay
134
+ Training Data:
135
+ - ImageNet
136
+ Training Resources: 50x NVIDIA Kepler GPUs
137
+ ID: inception_v3
138
+ LR: 0.045
139
+ Dropout: 0.2
140
+ Crop Pct: '0.875'
141
+ Momentum: 0.9
142
+ Image Size: '299'
143
+ Interpolation: bicubic
144
+ Code: https://github.com/rwightman/pytorch-image-models/blob/d8e69206be253892b2956341fea09fdebfaae4e3/timm/models/inception_v3.py#L442
145
+ Weights: https://download.pytorch.org/models/inception_v3_google-1a9a5a14.pth
146
+ Results:
147
+ - Task: Image Classification
148
+ Dataset: ImageNet
149
+ Metrics:
150
+ Top 1 Accuracy: 77.46%
151
+ Top 5 Accuracy: 93.48%
152
+ -->
pytorch-image-models/hfdocs/source/models/inception-v4.mdx ADDED
@@ -0,0 +1,138 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Inception v4
2
+
3
+ **Inception-v4** is a convolutional neural network architecture that builds on previous iterations of the Inception family by simplifying the architecture and using more inception modules than [Inception-v3](https://paperswithcode.com/method/inception-v3).
4
+ ## How do I use this model on an image?
5
+
6
+ To load a pretrained model:
7
+
8
+ ```py
9
+ >>> import timm
10
+ >>> model = timm.create_model('inception_v4', pretrained=True)
11
+ >>> model.eval()
12
+ ```
13
+
14
+ To load and preprocess the image:
15
+
16
+ ```py
17
+ >>> import urllib
18
+ >>> from PIL import Image
19
+ >>> from timm.data import resolve_data_config
20
+ >>> from timm.data.transforms_factory import create_transform
21
+
22
+ >>> config = resolve_data_config({}, model=model)
23
+ >>> transform = create_transform(**config)
24
+
25
+ >>> url, filename = ("https://github.com/pytorch/hub/raw/master/images/dog.jpg", "dog.jpg")
26
+ >>> urllib.request.urlretrieve(url, filename)
27
+ >>> img = Image.open(filename).convert('RGB')
28
+ >>> tensor = transform(img).unsqueeze(0) # transform and add batch dimension
29
+ ```
30
+
31
+ To get the model predictions:
32
+
33
+ ```py
34
+ >>> import torch
35
+ >>> with torch.no_grad():
36
+ ... out = model(tensor)
37
+ >>> probabilities = torch.nn.functional.softmax(out[0], dim=0)
38
+ >>> print(probabilities.shape)
39
+ >>> # prints: torch.Size([1000])
40
+ ```
41
+
42
+ To get the top-5 predictions class names:
43
+
44
+ ```py
45
+ >>> # Get imagenet class mappings
46
+ >>> url, filename = ("https://raw.githubusercontent.com/pytorch/hub/master/imagenet_classes.txt", "imagenet_classes.txt")
47
+ >>> urllib.request.urlretrieve(url, filename)
48
+ >>> with open("imagenet_classes.txt", "r") as f:
49
+ ... categories = [s.strip() for s in f.readlines()]
50
+
51
+ >>> # Print top categories per image
52
+ >>> top5_prob, top5_catid = torch.topk(probabilities, 5)
53
+ >>> for i in range(top5_prob.size(0)):
54
+ ... print(categories[top5_catid[i]], top5_prob[i].item())
55
+ >>> # prints class names and probabilities like:
56
+ >>> # [('Samoyed', 0.6425196528434753), ('Pomeranian', 0.04062102362513542), ('keeshond', 0.03186424449086189), ('white wolf', 0.01739676296710968), ('Eskimo dog', 0.011717947199940681)]
57
+ ```
58
+
59
+ Replace the model name with the variant you want to use, e.g. `inception_v4`. You can find the IDs in the model summaries at the top of this page.
60
+
61
+ To extract image features with this model, follow the [timm feature extraction examples](../feature_extraction), just change the name of the model you want to use.
62
+
63
+ ## How do I finetune this model?
64
+
65
+ You can finetune any of the pre-trained models just by changing the classifier (the last layer).
66
+
67
+ ```py
68
+ >>> model = timm.create_model('inception_v4', pretrained=True, num_classes=NUM_FINETUNE_CLASSES)
69
+ ```
70
+ To finetune on your own dataset, you have to write a training loop or adapt [timm's training
71
+ script](https://github.com/rwightman/pytorch-image-models/blob/master/train.py) to use your dataset.
72
+
73
+ ## How do I train this model?
74
+
75
+ You can follow the [timm recipe scripts](../scripts) for training a new model afresh.
76
+
77
+ ## Citation
78
+
79
+ ```BibTeX
80
+ @misc{szegedy2016inceptionv4,
81
+ title={Inception-v4, Inception-ResNet and the Impact of Residual Connections on Learning},
82
+ author={Christian Szegedy and Sergey Ioffe and Vincent Vanhoucke and Alex Alemi},
83
+ year={2016},
84
+ eprint={1602.07261},
85
+ archivePrefix={arXiv},
86
+ primaryClass={cs.CV}
87
+ }
88
+ ```
89
+
90
+ <!--
91
+ Type: model-index
92
+ Collections:
93
+ - Name: Inception v4
94
+ Paper:
95
+ Title: Inception-v4, Inception-ResNet and the Impact of Residual Connections on
96
+ Learning
97
+ URL: https://paperswithcode.com/paper/inception-v4-inception-resnet-and-the-impact
98
+ Models:
99
+ - Name: inception_v4
100
+ In Collection: Inception v4
101
+ Metadata:
102
+ FLOPs: 15806527936
103
+ Parameters: 42680000
104
+ File Size: 171082495
105
+ Architecture:
106
+ - Average Pooling
107
+ - Dropout
108
+ - Inception-A
109
+ - Inception-B
110
+ - Inception-C
111
+ - Reduction-A
112
+ - Reduction-B
113
+ - Softmax
114
+ Tasks:
115
+ - Image Classification
116
+ Training Techniques:
117
+ - Label Smoothing
118
+ - RMSProp
119
+ - Weight Decay
120
+ Training Data:
121
+ - ImageNet
122
+ Training Resources: 20x NVIDIA Kepler GPUs
123
+ ID: inception_v4
124
+ LR: 0.045
125
+ Dropout: 0.2
126
+ Crop Pct: '0.875'
127
+ Momentum: 0.9
128
+ Image Size: '299'
129
+ Interpolation: bicubic
130
+ Code: https://github.com/rwightman/pytorch-image-models/blob/d8e69206be253892b2956341fea09fdebfaae4e3/timm/models/inception_v4.py#L313
131
+ Weights: https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-cadene/inceptionv4-8e4777a0.pth
132
+ Results:
133
+ - Task: Image Classification
134
+ Dataset: ImageNet
135
+ Metrics:
136
+ Top 1 Accuracy: 1.01%
137
+ Top 5 Accuracy: 16.85%
138
+ -->
pytorch-image-models/hfdocs/source/models/legacy-se-resnet.mdx ADDED
@@ -0,0 +1,324 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # (Legacy) SE-ResNet
2
+
3
+ **SE ResNet** is a variant of a [ResNet](https://www.paperswithcode.com/method/resnet) that employs [squeeze-and-excitation blocks](https://paperswithcode.com/method/squeeze-and-excitation-block) to enable the network to perform dynamic channel-wise feature recalibration.
4
+
5
+ ## How do I use this model on an image?
6
+
7
+ To load a pretrained model:
8
+
9
+ ```py
10
+ >>> import timm
11
+ >>> model = timm.create_model('legacy_seresnet101', pretrained=True)
12
+ >>> model.eval()
13
+ ```
14
+
15
+ To load and preprocess the image:
16
+
17
+ ```py
18
+ >>> import urllib
19
+ >>> from PIL import Image
20
+ >>> from timm.data import resolve_data_config
21
+ >>> from timm.data.transforms_factory import create_transform
22
+
23
+ >>> config = resolve_data_config({}, model=model)
24
+ >>> transform = create_transform(**config)
25
+
26
+ >>> url, filename = ("https://github.com/pytorch/hub/raw/master/images/dog.jpg", "dog.jpg")
27
+ >>> urllib.request.urlretrieve(url, filename)
28
+ >>> img = Image.open(filename).convert('RGB')
29
+ >>> tensor = transform(img).unsqueeze(0) # transform and add batch dimension
30
+ ```
31
+
32
+ To get the model predictions:
33
+
34
+ ```py
35
+ >>> import torch
36
+ >>> with torch.no_grad():
37
+ ... out = model(tensor)
38
+ >>> probabilities = torch.nn.functional.softmax(out[0], dim=0)
39
+ >>> print(probabilities.shape)
40
+ >>> # prints: torch.Size([1000])
41
+ ```
42
+
43
+ To get the top-5 predictions class names:
44
+
45
+ ```py
46
+ >>> # Get imagenet class mappings
47
+ >>> url, filename = ("https://raw.githubusercontent.com/pytorch/hub/master/imagenet_classes.txt", "imagenet_classes.txt")
48
+ >>> urllib.request.urlretrieve(url, filename)
49
+ >>> with open("imagenet_classes.txt", "r") as f:
50
+ ... categories = [s.strip() for s in f.readlines()]
51
+
52
+ >>> # Print top categories per image
53
+ >>> top5_prob, top5_catid = torch.topk(probabilities, 5)
54
+ >>> for i in range(top5_prob.size(0)):
55
+ ... print(categories[top5_catid[i]], top5_prob[i].item())
56
+ >>> # prints class names and probabilities like:
57
+ >>> # [('Samoyed', 0.6425196528434753), ('Pomeranian', 0.04062102362513542), ('keeshond', 0.03186424449086189), ('white wolf', 0.01739676296710968), ('Eskimo dog', 0.011717947199940681)]
58
+ ```
59
+
60
+ Replace the model name with the variant you want to use, e.g. `legacy_seresnet101`. You can find the IDs in the model summaries at the top of this page.
61
+
62
+ To extract image features with this model, follow the [timm feature extraction examples](../feature_extraction), just change the name of the model you want to use.
63
+
64
+ ## How do I finetune this model?
65
+
66
+ You can finetune any of the pre-trained models just by changing the classifier (the last layer).
67
+
68
+ ```py
69
+ >>> model = timm.create_model('legacy_seresnet101', pretrained=True, num_classes=NUM_FINETUNE_CLASSES)
70
+ ```
71
+ To finetune on your own dataset, you have to write a training loop or adapt [timm's training
72
+ script](https://github.com/rwightman/pytorch-image-models/blob/master/train.py) to use your dataset.
73
+
74
+ ## How do I train this model?
75
+
76
+ You can follow the [timm recipe scripts](../scripts) for training a new model afresh.
77
+
78
+ ## Citation
79
+
80
+ ```BibTeX
81
+ @misc{hu2019squeezeandexcitation,
82
+ title={Squeeze-and-Excitation Networks},
83
+ author={Jie Hu and Li Shen and Samuel Albanie and Gang Sun and Enhua Wu},
84
+ year={2019},
85
+ eprint={1709.01507},
86
+ archivePrefix={arXiv},
87
+ primaryClass={cs.CV}
88
+ }
89
+ ```
90
+
91
+ <!--
92
+ Type: model-index
93
+ Collections:
94
+ - Name: Legacy SE ResNet
95
+ Paper:
96
+ Title: Squeeze-and-Excitation Networks
97
+ URL: https://paperswithcode.com/paper/squeeze-and-excitation-networks
98
+ Models:
99
+ - Name: legacy_seresnet101
100
+ In Collection: Legacy SE ResNet
101
+ Metadata:
102
+ FLOPs: 9762614000
103
+ Parameters: 49330000
104
+ File Size: 197822624
105
+ Architecture:
106
+ - 1x1 Convolution
107
+ - Batch Normalization
108
+ - Bottleneck Residual Block
109
+ - Convolution
110
+ - Global Average Pooling
111
+ - Max Pooling
112
+ - ReLU
113
+ - Residual Block
114
+ - Residual Connection
115
+ - Softmax
116
+ - Squeeze-and-Excitation Block
117
+ Tasks:
118
+ - Image Classification
119
+ Training Techniques:
120
+ - Label Smoothing
121
+ - SGD with Momentum
122
+ - Weight Decay
123
+ Training Data:
124
+ - ImageNet
125
+ Training Resources: 8x NVIDIA Titan X GPUs
126
+ ID: legacy_seresnet101
127
+ LR: 0.6
128
+ Epochs: 100
129
+ Layers: 101
130
+ Dropout: 0.2
131
+ Crop Pct: '0.875'
132
+ Momentum: 0.9
133
+ Batch Size: 1024
134
+ Image Size: '224'
135
+ Interpolation: bilinear
136
+ Code: https://github.com/rwightman/pytorch-image-models/blob/d8e69206be253892b2956341fea09fdebfaae4e3/timm/models/senet.py#L426
137
+ Weights: https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-cadene/se_resnet101-7e38fcc6.pth
138
+ Results:
139
+ - Task: Image Classification
140
+ Dataset: ImageNet
141
+ Metrics:
142
+ Top 1 Accuracy: 78.38%
143
+ Top 5 Accuracy: 94.26%
144
+ - Name: legacy_seresnet152
145
+ In Collection: Legacy SE ResNet
146
+ Metadata:
147
+ FLOPs: 14553578160
148
+ Parameters: 66819999
149
+ File Size: 268033864
150
+ Architecture:
151
+ - 1x1 Convolution
152
+ - Batch Normalization
153
+ - Bottleneck Residual Block
154
+ - Convolution
155
+ - Global Average Pooling
156
+ - Max Pooling
157
+ - ReLU
158
+ - Residual Block
159
+ - Residual Connection
160
+ - Softmax
161
+ - Squeeze-and-Excitation Block
162
+ Tasks:
163
+ - Image Classification
164
+ Training Techniques:
165
+ - Label Smoothing
166
+ - SGD with Momentum
167
+ - Weight Decay
168
+ Training Data:
169
+ - ImageNet
170
+ Training Resources: 8x NVIDIA Titan X GPUs
171
+ ID: legacy_seresnet152
172
+ LR: 0.6
173
+ Epochs: 100
174
+ Layers: 152
175
+ Dropout: 0.2
176
+ Crop Pct: '0.875'
177
+ Momentum: 0.9
178
+ Batch Size: 1024
179
+ Image Size: '224'
180
+ Interpolation: bilinear
181
+ Code: https://github.com/rwightman/pytorch-image-models/blob/d8e69206be253892b2956341fea09fdebfaae4e3/timm/models/senet.py#L433
182
+ Weights: https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-cadene/se_resnet152-d17c99b7.pth
183
+ Results:
184
+ - Task: Image Classification
185
+ Dataset: ImageNet
186
+ Metrics:
187
+ Top 1 Accuracy: 78.67%
188
+ Top 5 Accuracy: 94.38%
189
+ - Name: legacy_seresnet18
190
+ In Collection: Legacy SE ResNet
191
+ Metadata:
192
+ FLOPs: 2328876024
193
+ Parameters: 11780000
194
+ File Size: 47175663
195
+ Architecture:
196
+ - 1x1 Convolution
197
+ - Batch Normalization
198
+ - Bottleneck Residual Block
199
+ - Convolution
200
+ - Global Average Pooling
201
+ - Max Pooling
202
+ - ReLU
203
+ - Residual Block
204
+ - Residual Connection
205
+ - Softmax
206
+ - Squeeze-and-Excitation Block
207
+ Tasks:
208
+ - Image Classification
209
+ Training Techniques:
210
+ - Label Smoothing
211
+ - SGD with Momentum
212
+ - Weight Decay
213
+ Training Data:
214
+ - ImageNet
215
+ Training Resources: 8x NVIDIA Titan X GPUs
216
+ ID: legacy_seresnet18
217
+ LR: 0.6
218
+ Epochs: 100
219
+ Layers: 18
220
+ Dropout: 0.2
221
+ Crop Pct: '0.875'
222
+ Momentum: 0.9
223
+ Batch Size: 1024
224
+ Image Size: '224'
225
+ Interpolation: bicubic
226
+ Code: https://github.com/rwightman/pytorch-image-models/blob/d8e69206be253892b2956341fea09fdebfaae4e3/timm/models/senet.py#L405
227
+ Weights: https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/seresnet18-4bb0ce65.pth
228
+ Results:
229
+ - Task: Image Classification
230
+ Dataset: ImageNet
231
+ Metrics:
232
+ Top 1 Accuracy: 71.74%
233
+ Top 5 Accuracy: 90.34%
234
+ - Name: legacy_seresnet34
235
+ In Collection: Legacy SE ResNet
236
+ Metadata:
237
+ FLOPs: 4706201004
238
+ Parameters: 21960000
239
+ File Size: 87958697
240
+ Architecture:
241
+ - 1x1 Convolution
242
+ - Batch Normalization
243
+ - Bottleneck Residual Block
244
+ - Convolution
245
+ - Global Average Pooling
246
+ - Max Pooling
247
+ - ReLU
248
+ - Residual Block
249
+ - Residual Connection
250
+ - Softmax
251
+ - Squeeze-and-Excitation Block
252
+ Tasks:
253
+ - Image Classification
254
+ Training Techniques:
255
+ - Label Smoothing
256
+ - SGD with Momentum
257
+ - Weight Decay
258
+ Training Data:
259
+ - ImageNet
260
+ Training Resources: 8x NVIDIA Titan X GPUs
261
+ ID: legacy_seresnet34
262
+ LR: 0.6
263
+ Epochs: 100
264
+ Layers: 34
265
+ Dropout: 0.2
266
+ Crop Pct: '0.875'
267
+ Momentum: 0.9
268
+ Batch Size: 1024
269
+ Image Size: '224'
270
+ Interpolation: bilinear
271
+ Code: https://github.com/rwightman/pytorch-image-models/blob/d8e69206be253892b2956341fea09fdebfaae4e3/timm/models/senet.py#L412
272
+ Weights: https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/seresnet34-a4004e63.pth
273
+ Results:
274
+ - Task: Image Classification
275
+ Dataset: ImageNet
276
+ Metrics:
277
+ Top 1 Accuracy: 74.79%
278
+ Top 5 Accuracy: 92.13%
279
+ - Name: legacy_seresnet50
280
+ In Collection: Legacy SE ResNet
281
+ Metadata:
282
+ FLOPs: 4974351024
283
+ Parameters: 28090000
284
+ File Size: 112611220
285
+ Architecture:
286
+ - 1x1 Convolution
287
+ - Batch Normalization
288
+ - Bottleneck Residual Block
289
+ - Convolution
290
+ - Global Average Pooling
291
+ - Max Pooling
292
+ - ReLU
293
+ - Residual Block
294
+ - Residual Connection
295
+ - Softmax
296
+ - Squeeze-and-Excitation Block
297
+ Tasks:
298
+ - Image Classification
299
+ Training Techniques:
300
+ - Label Smoothing
301
+ - SGD with Momentum
302
+ - Weight Decay
303
+ Training Data:
304
+ - ImageNet
305
+ Training Resources: 8x NVIDIA Titan X GPUs
306
+ ID: legacy_seresnet50
307
+ LR: 0.6
308
+ Epochs: 100
309
+ Layers: 50
310
+ Dropout: 0.2
311
+ Crop Pct: '0.875'
312
+ Momentum: 0.9
313
+ Image Size: '224'
314
+ Interpolation: bilinear
315
+ Minibatch Size: 1024
316
+ Code: https://github.com/rwightman/pytorch-image-models/blob/d8e69206be253892b2956341fea09fdebfaae4e3/timm/models/senet.py#L419
317
+ Weights: https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-cadene/se_resnet50-ce0d4300.pth
318
+ Results:
319
+ - Task: Image Classification
320
+ Dataset: ImageNet
321
+ Metrics:
322
+ Top 1 Accuracy: 77.64%
323
+ Top 5 Accuracy: 93.74%
324
+ -->
pytorch-image-models/hfdocs/source/models/legacy-se-resnext.mdx ADDED
@@ -0,0 +1,234 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # (Legacy) SE-ResNeXt
2
+
3
+ **SE ResNeXt** is a variant of a [ResNeXt](https://www.paperswithcode.com/method/resnext) that employs [squeeze-and-excitation blocks](https://paperswithcode.com/method/squeeze-and-excitation-block) to enable the network to perform dynamic channel-wise feature recalibration.
4
+
5
+ ## How do I use this model on an image?
6
+
7
+ To load a pretrained model:
8
+
9
+ ```py
10
+ >>> import timm
11
+ >>> model = timm.create_model('legacy_seresnext101_32x4d', pretrained=True)
12
+ >>> model.eval()
13
+ ```
14
+
15
+ To load and preprocess the image:
16
+
17
+ ```py
18
+ >>> import urllib
19
+ >>> from PIL import Image
20
+ >>> from timm.data import resolve_data_config
21
+ >>> from timm.data.transforms_factory import create_transform
22
+
23
+ >>> config = resolve_data_config({}, model=model)
24
+ >>> transform = create_transform(**config)
25
+
26
+ >>> url, filename = ("https://github.com/pytorch/hub/raw/master/images/dog.jpg", "dog.jpg")
27
+ >>> urllib.request.urlretrieve(url, filename)
28
+ >>> img = Image.open(filename).convert('RGB')
29
+ >>> tensor = transform(img).unsqueeze(0) # transform and add batch dimension
30
+ ```
31
+
32
+ To get the model predictions:
33
+
34
+ ```py
35
+ >>> import torch
36
+ >>> with torch.no_grad():
37
+ ... out = model(tensor)
38
+ >>> probabilities = torch.nn.functional.softmax(out[0], dim=0)
39
+ >>> print(probabilities.shape)
40
+ >>> # prints: torch.Size([1000])
41
+ ```
42
+
43
+ To get the top-5 predictions class names:
44
+
45
+ ```py
46
+ >>> # Get imagenet class mappings
47
+ >>> url, filename = ("https://raw.githubusercontent.com/pytorch/hub/master/imagenet_classes.txt", "imagenet_classes.txt")
48
+ >>> urllib.request.urlretrieve(url, filename)
49
+ >>> with open("imagenet_classes.txt", "r") as f:
50
+ ... categories = [s.strip() for s in f.readlines()]
51
+
52
+ >>> # Print top categories per image
53
+ >>> top5_prob, top5_catid = torch.topk(probabilities, 5)
54
+ >>> for i in range(top5_prob.size(0)):
55
+ ... print(categories[top5_catid[i]], top5_prob[i].item())
56
+ >>> # prints class names and probabilities like:
57
+ >>> # [('Samoyed', 0.6425196528434753), ('Pomeranian', 0.04062102362513542), ('keeshond', 0.03186424449086189), ('white wolf', 0.01739676296710968), ('Eskimo dog', 0.011717947199940681)]
58
+ ```
59
+
60
+ Replace the model name with the variant you want to use, e.g. `legacy_seresnext101_32x4d`. You can find the IDs in the model summaries at the top of this page.
61
+
62
+ To extract image features with this model, follow the [timm feature extraction examples](../feature_extraction), just change the name of the model you want to use.
63
+
64
+ ## How do I finetune this model?
65
+
66
+ You can finetune any of the pre-trained models just by changing the classifier (the last layer).
67
+
68
+ ```py
69
+ >>> model = timm.create_model('legacy_seresnext101_32x4d', pretrained=True, num_classes=NUM_FINETUNE_CLASSES)
70
+ ```
71
+ To finetune on your own dataset, you have to write a training loop or adapt [timm's training
72
+ script](https://github.com/rwightman/pytorch-image-models/blob/master/train.py) to use your dataset.
73
+
74
+ ## How do I train this model?
75
+
76
+ You can follow the [timm recipe scripts](../scripts) for training a new model afresh.
77
+
78
+ ## Citation
79
+
80
+ ```BibTeX
81
+ @misc{hu2019squeezeandexcitation,
82
+ title={Squeeze-and-Excitation Networks},
83
+ author={Jie Hu and Li Shen and Samuel Albanie and Gang Sun and Enhua Wu},
84
+ year={2019},
85
+ eprint={1709.01507},
86
+ archivePrefix={arXiv},
87
+ primaryClass={cs.CV}
88
+ }
89
+ ```
90
+
91
+ <!--
92
+ Type: model-index
93
+ Collections:
94
+ - Name: Legacy SE ResNeXt
95
+ Paper:
96
+ Title: Squeeze-and-Excitation Networks
97
+ URL: https://paperswithcode.com/paper/squeeze-and-excitation-networks
98
+ Models:
99
+ - Name: legacy_seresnext101_32x4d
100
+ In Collection: Legacy SE ResNeXt
101
+ Metadata:
102
+ FLOPs: 10287698672
103
+ Parameters: 48960000
104
+ File Size: 196466866
105
+ Architecture:
106
+ - 1x1 Convolution
107
+ - Batch Normalization
108
+ - Convolution
109
+ - Global Average Pooling
110
+ - Grouped Convolution
111
+ - Max Pooling
112
+ - ReLU
113
+ - ResNeXt Block
114
+ - Residual Connection
115
+ - Softmax
116
+ - Squeeze-and-Excitation Block
117
+ Tasks:
118
+ - Image Classification
119
+ Training Techniques:
120
+ - Label Smoothing
121
+ - SGD with Momentum
122
+ - Weight Decay
123
+ Training Data:
124
+ - ImageNet
125
+ Training Resources: 8x NVIDIA Titan X GPUs
126
+ ID: legacy_seresnext101_32x4d
127
+ LR: 0.6
128
+ Epochs: 100
129
+ Layers: 101
130
+ Dropout: 0.2
131
+ Crop Pct: '0.875'
132
+ Momentum: 0.9
133
+ Batch Size: 1024
134
+ Image Size: '224'
135
+ Interpolation: bilinear
136
+ Code: https://github.com/rwightman/pytorch-image-models/blob/d8e69206be253892b2956341fea09fdebfaae4e3/timm/models/senet.py#L462
137
+ Weights: http://data.lip6.fr/cadene/pretrainedmodels/se_resnext101_32x4d-3b2fe3d8.pth
138
+ Results:
139
+ - Task: Image Classification
140
+ Dataset: ImageNet
141
+ Metrics:
142
+ Top 1 Accuracy: 80.23%
143
+ Top 5 Accuracy: 95.02%
144
+ - Name: legacy_seresnext26_32x4d
145
+ In Collection: Legacy SE ResNeXt
146
+ Metadata:
147
+ FLOPs: 3187342304
148
+ Parameters: 16790000
149
+ File Size: 67346327
150
+ Architecture:
151
+ - 1x1 Convolution
152
+ - Batch Normalization
153
+ - Convolution
154
+ - Global Average Pooling
155
+ - Grouped Convolution
156
+ - Max Pooling
157
+ - ReLU
158
+ - ResNeXt Block
159
+ - Residual Connection
160
+ - Softmax
161
+ - Squeeze-and-Excitation Block
162
+ Tasks:
163
+ - Image Classification
164
+ Training Techniques:
165
+ - Label Smoothing
166
+ - SGD with Momentum
167
+ - Weight Decay
168
+ Training Data:
169
+ - ImageNet
170
+ Training Resources: 8x NVIDIA Titan X GPUs
171
+ ID: legacy_seresnext26_32x4d
172
+ LR: 0.6
173
+ Epochs: 100
174
+ Layers: 26
175
+ Dropout: 0.2
176
+ Crop Pct: '0.875'
177
+ Momentum: 0.9
178
+ Batch Size: 1024
179
+ Image Size: '224'
180
+ Interpolation: bicubic
181
+ Code: https://github.com/rwightman/pytorch-image-models/blob/d8e69206be253892b2956341fea09fdebfaae4e3/timm/models/senet.py#L448
182
+ Weights: https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/seresnext26_32x4d-65ebdb501.pth
183
+ Results:
184
+ - Task: Image Classification
185
+ Dataset: ImageNet
186
+ Metrics:
187
+ Top 1 Accuracy: 77.11%
188
+ Top 5 Accuracy: 93.31%
189
+ - Name: legacy_seresnext50_32x4d
190
+ In Collection: Legacy SE ResNeXt
191
+ Metadata:
192
+ FLOPs: 5459954352
193
+ Parameters: 27560000
194
+ File Size: 110559176
195
+ Architecture:
196
+ - 1x1 Convolution
197
+ - Batch Normalization
198
+ - Convolution
199
+ - Global Average Pooling
200
+ - Grouped Convolution
201
+ - Max Pooling
202
+ - ReLU
203
+ - ResNeXt Block
204
+ - Residual Connection
205
+ - Softmax
206
+ - Squeeze-and-Excitation Block
207
+ Tasks:
208
+ - Image Classification
209
+ Training Techniques:
210
+ - Label Smoothing
211
+ - SGD with Momentum
212
+ - Weight Decay
213
+ Training Data:
214
+ - ImageNet
215
+ Training Resources: 8x NVIDIA Titan X GPUs
216
+ ID: legacy_seresnext50_32x4d
217
+ LR: 0.6
218
+ Epochs: 100
219
+ Layers: 50
220
+ Dropout: 0.2
221
+ Crop Pct: '0.875'
222
+ Momentum: 0.9
223
+ Batch Size: 1024
224
+ Image Size: '224'
225
+ Interpolation: bilinear
226
+ Code: https://github.com/rwightman/pytorch-image-models/blob/d8e69206be253892b2956341fea09fdebfaae4e3/timm/models/senet.py#L455
227
+ Weights: http://data.lip6.fr/cadene/pretrainedmodels/se_resnext50_32x4d-a260b3a4.pth
228
+ Results:
229
+ - Task: Image Classification
230
+ Dataset: ImageNet
231
+ Metrics:
232
+ Top 1 Accuracy: 79.08%
233
+ Top 5 Accuracy: 94.43%
234
+ -->
pytorch-image-models/hfdocs/source/models/legacy-senet.mdx ADDED
@@ -0,0 +1,141 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # (Legacy) SENet
2
+
3
+ A **SENet** is a convolutional neural network architecture that employs [squeeze-and-excitation blocks](https://paperswithcode.com/method/squeeze-and-excitation-block) to enable the network to perform dynamic channel-wise feature recalibration.
4
+
5
+ The weights from this model were ported from Gluon.
6
+
7
+ ## How do I use this model on an image?
8
+
9
+ To load a pretrained model:
10
+
11
+ ```py
12
+ >>> import timm
13
+ >>> model = timm.create_model('legacy_senet154', pretrained=True)
14
+ >>> model.eval()
15
+ ```
16
+
17
+ To load and preprocess the image:
18
+
19
+ ```py
20
+ >>> import urllib
21
+ >>> from PIL import Image
22
+ >>> from timm.data import resolve_data_config
23
+ >>> from timm.data.transforms_factory import create_transform
24
+
25
+ >>> config = resolve_data_config({}, model=model)
26
+ >>> transform = create_transform(**config)
27
+
28
+ >>> url, filename = ("https://github.com/pytorch/hub/raw/master/images/dog.jpg", "dog.jpg")
29
+ >>> urllib.request.urlretrieve(url, filename)
30
+ >>> img = Image.open(filename).convert('RGB')
31
+ >>> tensor = transform(img).unsqueeze(0) # transform and add batch dimension
32
+ ```
33
+
34
+ To get the model predictions:
35
+
36
+ ```py
37
+ >>> import torch
38
+ >>> with torch.no_grad():
39
+ ... out = model(tensor)
40
+ >>> probabilities = torch.nn.functional.softmax(out[0], dim=0)
41
+ >>> print(probabilities.shape)
42
+ >>> # prints: torch.Size([1000])
43
+ ```
44
+
45
+ To get the top-5 predictions class names:
46
+
47
+ ```py
48
+ >>> # Get imagenet class mappings
49
+ >>> url, filename = ("https://raw.githubusercontent.com/pytorch/hub/master/imagenet_classes.txt", "imagenet_classes.txt")
50
+ >>> urllib.request.urlretrieve(url, filename)
51
+ >>> with open("imagenet_classes.txt", "r") as f:
52
+ ... categories = [s.strip() for s in f.readlines()]
53
+
54
+ >>> # Print top categories per image
55
+ >>> top5_prob, top5_catid = torch.topk(probabilities, 5)
56
+ >>> for i in range(top5_prob.size(0)):
57
+ ... print(categories[top5_catid[i]], top5_prob[i].item())
58
+ >>> # prints class names and probabilities like:
59
+ >>> # [('Samoyed', 0.6425196528434753), ('Pomeranian', 0.04062102362513542), ('keeshond', 0.03186424449086189), ('white wolf', 0.01739676296710968), ('Eskimo dog', 0.011717947199940681)]
60
+ ```
61
+
62
+ Replace the model name with the variant you want to use, e.g. `legacy_senet154`. You can find the IDs in the model summaries at the top of this page.
63
+
64
+ To extract image features with this model, follow the [timm feature extraction examples](../feature_extraction), just change the name of the model you want to use.
65
+
66
+ ## How do I finetune this model?
67
+
68
+ You can finetune any of the pre-trained models just by changing the classifier (the last layer).
69
+
70
+ ```py
71
+ >>> model = timm.create_model('legacy_senet154', pretrained=True, num_classes=NUM_FINETUNE_CLASSES)
72
+ ```
73
+ To finetune on your own dataset, you have to write a training loop or adapt [timm's training
74
+ script](https://github.com/rwightman/pytorch-image-models/blob/master/train.py) to use your dataset.
75
+
76
+ ## How do I train this model?
77
+
78
+ You can follow the [timm recipe scripts](../scripts) for training a new model afresh.
79
+
80
+ ## Citation
81
+
82
+ ```BibTeX
83
+ @misc{hu2019squeezeandexcitation,
84
+ title={Squeeze-and-Excitation Networks},
85
+ author={Jie Hu and Li Shen and Samuel Albanie and Gang Sun and Enhua Wu},
86
+ year={2019},
87
+ eprint={1709.01507},
88
+ archivePrefix={arXiv},
89
+ primaryClass={cs.CV}
90
+ }
91
+ ```
92
+
93
+ <!--
94
+ Type: model-index
95
+ Collections:
96
+ - Name: Legacy SENet
97
+ Paper:
98
+ Title: Squeeze-and-Excitation Networks
99
+ URL: https://paperswithcode.com/paper/squeeze-and-excitation-networks
100
+ Models:
101
+ - Name: legacy_senet154
102
+ In Collection: Legacy SENet
103
+ Metadata:
104
+ FLOPs: 26659556016
105
+ Parameters: 115090000
106
+ File Size: 461488402
107
+ Architecture:
108
+ - Convolution
109
+ - Dense Connections
110
+ - Global Average Pooling
111
+ - Max Pooling
112
+ - Softmax
113
+ - Squeeze-and-Excitation Block
114
+ Tasks:
115
+ - Image Classification
116
+ Training Techniques:
117
+ - Label Smoothing
118
+ - SGD with Momentum
119
+ - Weight Decay
120
+ Training Data:
121
+ - ImageNet
122
+ Training Resources: 8x NVIDIA Titan X GPUs
123
+ ID: legacy_senet154
124
+ LR: 0.6
125
+ Epochs: 100
126
+ Layers: 154
127
+ Dropout: 0.2
128
+ Crop Pct: '0.875'
129
+ Momentum: 0.9
130
+ Batch Size: 1024
131
+ Image Size: '224'
132
+ Interpolation: bilinear
133
+ Code: https://github.com/rwightman/pytorch-image-models/blob/d8e69206be253892b2956341fea09fdebfaae4e3/timm/models/senet.py#L440
134
+ Weights: http://data.lip6.fr/cadene/pretrainedmodels/senet154-c7b49a05.pth
135
+ Results:
136
+ - Task: Image Classification
137
+ Dataset: ImageNet
138
+ Metrics:
139
+ Top 1 Accuracy: 81.33%
140
+ Top 5 Accuracy: 95.51%
141
+ -->
pytorch-image-models/hfdocs/source/models/mixnet.mdx ADDED
@@ -0,0 +1,231 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # MixNet
2
+
3
+ **MixNet** is a type of convolutional neural network discovered via AutoML that utilises [MixConvs](https://paperswithcode.com/method/mixconv) instead of regular [depthwise convolutions](https://paperswithcode.com/method/depthwise-convolution).
4
+
5
+ ## How do I use this model on an image?
6
+
7
+ To load a pretrained model:
8
+
9
+ ```py
10
+ >>> import timm
11
+ >>> model = timm.create_model('mixnet_l', pretrained=True)
12
+ >>> model.eval()
13
+ ```
14
+
15
+ To load and preprocess the image:
16
+
17
+ ```py
18
+ >>> import urllib
19
+ >>> from PIL import Image
20
+ >>> from timm.data import resolve_data_config
21
+ >>> from timm.data.transforms_factory import create_transform
22
+
23
+ >>> config = resolve_data_config({}, model=model)
24
+ >>> transform = create_transform(**config)
25
+
26
+ >>> url, filename = ("https://github.com/pytorch/hub/raw/master/images/dog.jpg", "dog.jpg")
27
+ >>> urllib.request.urlretrieve(url, filename)
28
+ >>> img = Image.open(filename).convert('RGB')
29
+ >>> tensor = transform(img).unsqueeze(0) # transform and add batch dimension
30
+ ```
31
+
32
+ To get the model predictions:
33
+
34
+ ```py
35
+ >>> import torch
36
+ >>> with torch.no_grad():
37
+ ... out = model(tensor)
38
+ >>> probabilities = torch.nn.functional.softmax(out[0], dim=0)
39
+ >>> print(probabilities.shape)
40
+ >>> # prints: torch.Size([1000])
41
+ ```
42
+
43
+ To get the top-5 predictions class names:
44
+
45
+ ```py
46
+ >>> # Get imagenet class mappings
47
+ >>> url, filename = ("https://raw.githubusercontent.com/pytorch/hub/master/imagenet_classes.txt", "imagenet_classes.txt")
48
+ >>> urllib.request.urlretrieve(url, filename)
49
+ >>> with open("imagenet_classes.txt", "r") as f:
50
+ ... categories = [s.strip() for s in f.readlines()]
51
+
52
+ >>> # Print top categories per image
53
+ >>> top5_prob, top5_catid = torch.topk(probabilities, 5)
54
+ >>> for i in range(top5_prob.size(0)):
55
+ ... print(categories[top5_catid[i]], top5_prob[i].item())
56
+ >>> # prints class names and probabilities like:
57
+ >>> # [('Samoyed', 0.6425196528434753), ('Pomeranian', 0.04062102362513542), ('keeshond', 0.03186424449086189), ('white wolf', 0.01739676296710968), ('Eskimo dog', 0.011717947199940681)]
58
+ ```
59
+
60
+ Replace the model name with the variant you want to use, e.g. `mixnet_l`. You can find the IDs in the model summaries at the top of this page.
61
+
62
+ To extract image features with this model, follow the [timm feature extraction examples](../feature_extraction), just change the name of the model you want to use.
63
+
64
+ ## How do I finetune this model?
65
+
66
+ You can finetune any of the pre-trained models just by changing the classifier (the last layer).
67
+
68
+ ```py
69
+ >>> model = timm.create_model('mixnet_l', pretrained=True, num_classes=NUM_FINETUNE_CLASSES)
70
+ ```
71
+ To finetune on your own dataset, you have to write a training loop or adapt [timm's training
72
+ script](https://github.com/rwightman/pytorch-image-models/blob/master/train.py) to use your dataset.
73
+
74
+ ## How do I train this model?
75
+
76
+ You can follow the [timm recipe scripts](../scripts) for training a new model afresh.
77
+
78
+ ## Citation
79
+
80
+ ```BibTeX
81
+ @misc{tan2019mixconv,
82
+ title={MixConv: Mixed Depthwise Convolutional Kernels},
83
+ author={Mingxing Tan and Quoc V. Le},
84
+ year={2019},
85
+ eprint={1907.09595},
86
+ archivePrefix={arXiv},
87
+ primaryClass={cs.CV}
88
+ }
89
+ ```
90
+
91
+ <!--
92
+ Type: model-index
93
+ Collections:
94
+ - Name: MixNet
95
+ Paper:
96
+ Title: 'MixConv: Mixed Depthwise Convolutional Kernels'
97
+ URL: https://paperswithcode.com/paper/mixnet-mixed-depthwise-convolutional-kernels
98
+ Models:
99
+ - Name: mixnet_l
100
+ In Collection: MixNet
101
+ Metadata:
102
+ FLOPs: 738671316
103
+ Parameters: 7330000
104
+ File Size: 29608232
105
+ Architecture:
106
+ - Batch Normalization
107
+ - Dense Connections
108
+ - Dropout
109
+ - Global Average Pooling
110
+ - Grouped Convolution
111
+ - MixConv
112
+ - Squeeze-and-Excitation Block
113
+ - Swish
114
+ Tasks:
115
+ - Image Classification
116
+ Training Techniques:
117
+ - MNAS
118
+ Training Data:
119
+ - ImageNet
120
+ ID: mixnet_l
121
+ Crop Pct: '0.875'
122
+ Image Size: '224'
123
+ Interpolation: bicubic
124
+ Code: https://github.com/rwightman/pytorch-image-models/blob/9a25fdf3ad0414b4d66da443fe60ae0aa14edc84/timm/models/efficientnet.py#L1669
125
+ Weights: https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/mixnet_l-5a9a2ed8.pth
126
+ Results:
127
+ - Task: Image Classification
128
+ Dataset: ImageNet
129
+ Metrics:
130
+ Top 1 Accuracy: 78.98%
131
+ Top 5 Accuracy: 94.18%
132
+ - Name: mixnet_m
133
+ In Collection: MixNet
134
+ Metadata:
135
+ FLOPs: 454543374
136
+ Parameters: 5010000
137
+ File Size: 20298347
138
+ Architecture:
139
+ - Batch Normalization
140
+ - Dense Connections
141
+ - Dropout
142
+ - Global Average Pooling
143
+ - Grouped Convolution
144
+ - MixConv
145
+ - Squeeze-and-Excitation Block
146
+ - Swish
147
+ Tasks:
148
+ - Image Classification
149
+ Training Techniques:
150
+ - MNAS
151
+ Training Data:
152
+ - ImageNet
153
+ ID: mixnet_m
154
+ Crop Pct: '0.875'
155
+ Image Size: '224'
156
+ Interpolation: bicubic
157
+ Code: https://github.com/rwightman/pytorch-image-models/blob/9a25fdf3ad0414b4d66da443fe60ae0aa14edc84/timm/models/efficientnet.py#L1660
158
+ Weights: https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/mixnet_m-4647fc68.pth
159
+ Results:
160
+ - Task: Image Classification
161
+ Dataset: ImageNet
162
+ Metrics:
163
+ Top 1 Accuracy: 77.27%
164
+ Top 5 Accuracy: 93.42%
165
+ - Name: mixnet_s
166
+ In Collection: MixNet
167
+ Metadata:
168
+ FLOPs: 321264910
169
+ Parameters: 4130000
170
+ File Size: 16727982
171
+ Architecture:
172
+ - Batch Normalization
173
+ - Dense Connections
174
+ - Dropout
175
+ - Global Average Pooling
176
+ - Grouped Convolution
177
+ - MixConv
178
+ - Squeeze-and-Excitation Block
179
+ - Swish
180
+ Tasks:
181
+ - Image Classification
182
+ Training Techniques:
183
+ - MNAS
184
+ Training Data:
185
+ - ImageNet
186
+ ID: mixnet_s
187
+ Crop Pct: '0.875'
188
+ Image Size: '224'
189
+ Interpolation: bicubic
190
+ Code: https://github.com/rwightman/pytorch-image-models/blob/9a25fdf3ad0414b4d66da443fe60ae0aa14edc84/timm/models/efficientnet.py#L1651
191
+ Weights: https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/mixnet_s-a907afbc.pth
192
+ Results:
193
+ - Task: Image Classification
194
+ Dataset: ImageNet
195
+ Metrics:
196
+ Top 1 Accuracy: 75.99%
197
+ Top 5 Accuracy: 92.79%
198
+ - Name: mixnet_xl
199
+ In Collection: MixNet
200
+ Metadata:
201
+ FLOPs: 1195880424
202
+ Parameters: 11900000
203
+ File Size: 48001170
204
+ Architecture:
205
+ - Batch Normalization
206
+ - Dense Connections
207
+ - Dropout
208
+ - Global Average Pooling
209
+ - Grouped Convolution
210
+ - MixConv
211
+ - Squeeze-and-Excitation Block
212
+ - Swish
213
+ Tasks:
214
+ - Image Classification
215
+ Training Techniques:
216
+ - MNAS
217
+ Training Data:
218
+ - ImageNet
219
+ ID: mixnet_xl
220
+ Crop Pct: '0.875'
221
+ Image Size: '224'
222
+ Interpolation: bicubic
223
+ Code: https://github.com/rwightman/pytorch-image-models/blob/9a25fdf3ad0414b4d66da443fe60ae0aa14edc84/timm/models/efficientnet.py#L1678
224
+ Weights: https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/mixnet_xl_ra-aac3c00c.pth
225
+ Results:
226
+ - Task: Image Classification
227
+ Dataset: ImageNet
228
+ Metrics:
229
+ Top 1 Accuracy: 80.47%
230
+ Top 5 Accuracy: 94.93%
231
+ -->
pytorch-image-models/hfdocs/source/models/mnasnet.mdx ADDED
@@ -0,0 +1,176 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # MnasNet
2
+
3
+ **MnasNet** is a type of convolutional neural network optimized for mobile devices that is discovered through mobile neural architecture search, which explicitly incorporates model latency into the main objective so that the search can identify a model that achieves a good trade-off between accuracy and latency. The main building block is an [inverted residual block](https://paperswithcode.com/method/inverted-residual-block) (from [MobileNetV2](https://paperswithcode.com/method/mobilenetv2)).
4
+
5
+ ## How do I use this model on an image?
6
+
7
+ To load a pretrained model:
8
+
9
+ ```py
10
+ >>> import timm
11
+ >>> model = timm.create_model('mnasnet_100', pretrained=True)
12
+ >>> model.eval()
13
+ ```
14
+
15
+ To load and preprocess the image:
16
+
17
+ ```py
18
+ >>> import urllib
19
+ >>> from PIL import Image
20
+ >>> from timm.data import resolve_data_config
21
+ >>> from timm.data.transforms_factory import create_transform
22
+
23
+ >>> config = resolve_data_config({}, model=model)
24
+ >>> transform = create_transform(**config)
25
+
26
+ >>> url, filename = ("https://github.com/pytorch/hub/raw/master/images/dog.jpg", "dog.jpg")
27
+ >>> urllib.request.urlretrieve(url, filename)
28
+ >>> img = Image.open(filename).convert('RGB')
29
+ >>> tensor = transform(img).unsqueeze(0) # transform and add batch dimension
30
+ ```
31
+
32
+ To get the model predictions:
33
+
34
+ ```py
35
+ >>> import torch
36
+ >>> with torch.no_grad():
37
+ ... out = model(tensor)
38
+ >>> probabilities = torch.nn.functional.softmax(out[0], dim=0)
39
+ >>> print(probabilities.shape)
40
+ >>> # prints: torch.Size([1000])
41
+ ```
42
+
43
+ To get the top-5 predictions class names:
44
+
45
+ ```py
46
+ >>> # Get imagenet class mappings
47
+ >>> url, filename = ("https://raw.githubusercontent.com/pytorch/hub/master/imagenet_classes.txt", "imagenet_classes.txt")
48
+ >>> urllib.request.urlretrieve(url, filename)
49
+ >>> with open("imagenet_classes.txt", "r") as f:
50
+ ... categories = [s.strip() for s in f.readlines()]
51
+
52
+ >>> # Print top categories per image
53
+ >>> top5_prob, top5_catid = torch.topk(probabilities, 5)
54
+ >>> for i in range(top5_prob.size(0)):
55
+ ... print(categories[top5_catid[i]], top5_prob[i].item())
56
+ >>> # prints class names and probabilities like:
57
+ >>> # [('Samoyed', 0.6425196528434753), ('Pomeranian', 0.04062102362513542), ('keeshond', 0.03186424449086189), ('white wolf', 0.01739676296710968), ('Eskimo dog', 0.011717947199940681)]
58
+ ```
59
+
60
+ Replace the model name with the variant you want to use, e.g. `mnasnet_100`. You can find the IDs in the model summaries at the top of this page.
61
+
62
+ To extract image features with this model, follow the [timm feature extraction examples](../feature_extraction), just change the name of the model you want to use.
63
+
64
+ ## How do I finetune this model?
65
+
66
+ You can finetune any of the pre-trained models just by changing the classifier (the last layer).
67
+
68
+ ```py
69
+ >>> model = timm.create_model('mnasnet_100', pretrained=True, num_classes=NUM_FINETUNE_CLASSES)
70
+ ```
71
+ To finetune on your own dataset, you have to write a training loop or adapt [timm's training
72
+ script](https://github.com/rwightman/pytorch-image-models/blob/master/train.py) to use your dataset.
73
+
74
+ ## How do I train this model?
75
+
76
+ You can follow the [timm recipe scripts](../scripts) for training a new model afresh.
77
+
78
+ ## Citation
79
+
80
+ ```BibTeX
81
+ @misc{tan2019mnasnet,
82
+ title={MnasNet: Platform-Aware Neural Architecture Search for Mobile},
83
+ author={Mingxing Tan and Bo Chen and Ruoming Pang and Vijay Vasudevan and Mark Sandler and Andrew Howard and Quoc V. Le},
84
+ year={2019},
85
+ eprint={1807.11626},
86
+ archivePrefix={arXiv},
87
+ primaryClass={cs.CV}
88
+ }
89
+ ```
90
+
91
+ <!--
92
+ Type: model-index
93
+ Collections:
94
+ - Name: MNASNet
95
+ Paper:
96
+ Title: 'MnasNet: Platform-Aware Neural Architecture Search for Mobile'
97
+ URL: https://paperswithcode.com/paper/mnasnet-platform-aware-neural-architecture
98
+ Models:
99
+ - Name: mnasnet_100
100
+ In Collection: MNASNet
101
+ Metadata:
102
+ FLOPs: 416415488
103
+ Parameters: 4380000
104
+ File Size: 17731774
105
+ Architecture:
106
+ - 1x1 Convolution
107
+ - Batch Normalization
108
+ - Convolution
109
+ - Depthwise Separable Convolution
110
+ - Dropout
111
+ - Global Average Pooling
112
+ - Inverted Residual Block
113
+ - Max Pooling
114
+ - ReLU
115
+ - Residual Connection
116
+ - Softmax
117
+ Tasks:
118
+ - Image Classification
119
+ Training Techniques:
120
+ - RMSProp
121
+ - Weight Decay
122
+ Training Data:
123
+ - ImageNet
124
+ ID: mnasnet_100
125
+ Layers: 100
126
+ Dropout: 0.2
127
+ Crop Pct: '0.875'
128
+ Momentum: 0.9
129
+ Batch Size: 4000
130
+ Image Size: '224'
131
+ Interpolation: bicubic
132
+ RMSProp Decay: 0.9
133
+ Code: https://github.com/rwightman/pytorch-image-models/blob/9a25fdf3ad0414b4d66da443fe60ae0aa14edc84/timm/models/efficientnet.py#L894
134
+ Weights: https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/mnasnet_b1-74cb7081.pth
135
+ Results:
136
+ - Task: Image Classification
137
+ Dataset: ImageNet
138
+ Metrics:
139
+ Top 1 Accuracy: 74.67%
140
+ Top 5 Accuracy: 92.1%
141
+ - Name: semnasnet_100
142
+ In Collection: MNASNet
143
+ Metadata:
144
+ FLOPs: 414570766
145
+ Parameters: 3890000
146
+ File Size: 15731489
147
+ Architecture:
148
+ - 1x1 Convolution
149
+ - Batch Normalization
150
+ - Convolution
151
+ - Depthwise Separable Convolution
152
+ - Dropout
153
+ - Global Average Pooling
154
+ - Inverted Residual Block
155
+ - Max Pooling
156
+ - ReLU
157
+ - Residual Connection
158
+ - Softmax
159
+ - Squeeze-and-Excitation Block
160
+ Tasks:
161
+ - Image Classification
162
+ Training Data:
163
+ - ImageNet
164
+ ID: semnasnet_100
165
+ Crop Pct: '0.875'
166
+ Image Size: '224'
167
+ Interpolation: bicubic
168
+ Code: https://github.com/rwightman/pytorch-image-models/blob/9a25fdf3ad0414b4d66da443fe60ae0aa14edc84/timm/models/efficientnet.py#L928
169
+ Weights: https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/mnasnet_a1-d9418771.pth
170
+ Results:
171
+ - Task: Image Classification
172
+ Dataset: ImageNet
173
+ Metrics:
174
+ Top 1 Accuracy: 75.45%
175
+ Top 5 Accuracy: 92.61%
176
+ -->
pytorch-image-models/hfdocs/source/models/mobilenet-v2.mdx ADDED
@@ -0,0 +1,277 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # MobileNet v2
2
+
3
+ **MobileNetV2** is a convolutional neural network architecture that seeks to perform well on mobile devices. It is based on an [inverted residual structure](https://paperswithcode.com/method/inverted-residual-block) where the residual connections are between the bottleneck layers. The intermediate expansion layer uses lightweight depthwise convolutions to filter features as a source of non-linearity. As a whole, the architecture of MobileNetV2 contains the initial fully convolution layer with 32 filters, followed by 19 residual bottleneck layers.
4
+
5
+ ## How do I use this model on an image?
6
+
7
+ To load a pretrained model:
8
+
9
+ ```py
10
+ >>> import timm
11
+ >>> model = timm.create_model('mobilenetv2_100', pretrained=True)
12
+ >>> model.eval()
13
+ ```
14
+
15
+ To load and preprocess the image:
16
+
17
+ ```py
18
+ >>> import urllib
19
+ >>> from PIL import Image
20
+ >>> from timm.data import resolve_data_config
21
+ >>> from timm.data.transforms_factory import create_transform
22
+
23
+ >>> config = resolve_data_config({}, model=model)
24
+ >>> transform = create_transform(**config)
25
+
26
+ >>> url, filename = ("https://github.com/pytorch/hub/raw/master/images/dog.jpg", "dog.jpg")
27
+ >>> urllib.request.urlretrieve(url, filename)
28
+ >>> img = Image.open(filename).convert('RGB')
29
+ >>> tensor = transform(img).unsqueeze(0) # transform and add batch dimension
30
+ ```
31
+
32
+ To get the model predictions:
33
+
34
+ ```py
35
+ >>> import torch
36
+ >>> with torch.no_grad():
37
+ ... out = model(tensor)
38
+ >>> probabilities = torch.nn.functional.softmax(out[0], dim=0)
39
+ >>> print(probabilities.shape)
40
+ >>> # prints: torch.Size([1000])
41
+ ```
42
+
43
+ To get the top-5 predictions class names:
44
+
45
+ ```py
46
+ >>> # Get imagenet class mappings
47
+ >>> url, filename = ("https://raw.githubusercontent.com/pytorch/hub/master/imagenet_classes.txt", "imagenet_classes.txt")
48
+ >>> urllib.request.urlretrieve(url, filename)
49
+ >>> with open("imagenet_classes.txt", "r") as f:
50
+ ... categories = [s.strip() for s in f.readlines()]
51
+
52
+ >>> # Print top categories per image
53
+ >>> top5_prob, top5_catid = torch.topk(probabilities, 5)
54
+ >>> for i in range(top5_prob.size(0)):
55
+ ... print(categories[top5_catid[i]], top5_prob[i].item())
56
+ >>> # prints class names and probabilities like:
57
+ >>> # [('Samoyed', 0.6425196528434753), ('Pomeranian', 0.04062102362513542), ('keeshond', 0.03186424449086189), ('white wolf', 0.01739676296710968), ('Eskimo dog', 0.011717947199940681)]
58
+ ```
59
+
60
+ Replace the model name with the variant you want to use, e.g. `mobilenetv2_100`. You can find the IDs in the model summaries at the top of this page.
61
+
62
+ To extract image features with this model, follow the [timm feature extraction examples](../feature_extraction), just change the name of the model you want to use.
63
+
64
+ ## How do I finetune this model?
65
+
66
+ You can finetune any of the pre-trained models just by changing the classifier (the last layer).
67
+
68
+ ```py
69
+ >>> model = timm.create_model('mobilenetv2_100', pretrained=True, num_classes=NUM_FINETUNE_CLASSES)
70
+ ```
71
+ To finetune on your own dataset, you have to write a training loop or adapt [timm's training
72
+ script](https://github.com/rwightman/pytorch-image-models/blob/master/train.py) to use your dataset.
73
+
74
+ ## How do I train this model?
75
+
76
+ You can follow the [timm recipe scripts](../scripts) for training a new model afresh.
77
+
78
+ ## Citation
79
+
80
+ ```BibTeX
81
+ @article{DBLP:journals/corr/abs-1801-04381,
82
+ author = {Mark Sandler and
83
+ Andrew G. Howard and
84
+ Menglong Zhu and
85
+ Andrey Zhmoginov and
86
+ Liang{-}Chieh Chen},
87
+ title = {Inverted Residuals and Linear Bottlenecks: Mobile Networks for Classification,
88
+ Detection and Segmentation},
89
+ journal = {CoRR},
90
+ volume = {abs/1801.04381},
91
+ year = {2018},
92
+ url = {http://arxiv.org/abs/1801.04381},
93
+ archivePrefix = {arXiv},
94
+ eprint = {1801.04381},
95
+ timestamp = {Tue, 12 Jan 2021 15:30:06 +0100},
96
+ biburl = {https://dblp.org/rec/journals/corr/abs-1801-04381.bib},
97
+ bibsource = {dblp computer science bibliography, https://dblp.org}
98
+ }
99
+ ```
100
+
101
+ <!--
102
+ Type: model-index
103
+ Collections:
104
+ - Name: MobileNet V2
105
+ Paper:
106
+ Title: 'MobileNetV2: Inverted Residuals and Linear Bottlenecks'
107
+ URL: https://paperswithcode.com/paper/mobilenetv2-inverted-residuals-and-linear
108
+ Models:
109
+ - Name: mobilenetv2_100
110
+ In Collection: MobileNet V2
111
+ Metadata:
112
+ FLOPs: 401920448
113
+ Parameters: 3500000
114
+ File Size: 14202571
115
+ Architecture:
116
+ - 1x1 Convolution
117
+ - Batch Normalization
118
+ - Convolution
119
+ - Depthwise Separable Convolution
120
+ - Dropout
121
+ - Inverted Residual Block
122
+ - Max Pooling
123
+ - ReLU6
124
+ - Residual Connection
125
+ - Softmax
126
+ Tasks:
127
+ - Image Classification
128
+ Training Techniques:
129
+ - RMSProp
130
+ - Weight Decay
131
+ Training Data:
132
+ - ImageNet
133
+ Training Resources: 16x GPUs
134
+ ID: mobilenetv2_100
135
+ LR: 0.045
136
+ Crop Pct: '0.875'
137
+ Momentum: 0.9
138
+ Batch Size: 1536
139
+ Image Size: '224'
140
+ Weight Decay: 4.0e-05
141
+ Interpolation: bicubic
142
+ RMSProp Decay: 0.9
143
+ Code: https://github.com/rwightman/pytorch-image-models/blob/9a25fdf3ad0414b4d66da443fe60ae0aa14edc84/timm/models/efficientnet.py#L955
144
+ Weights: https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/mobilenetv2_100_ra-b33bc2c4.pth
145
+ Results:
146
+ - Task: Image Classification
147
+ Dataset: ImageNet
148
+ Metrics:
149
+ Top 1 Accuracy: 72.95%
150
+ Top 5 Accuracy: 91.0%
151
+ - Name: mobilenetv2_110d
152
+ In Collection: MobileNet V2
153
+ Metadata:
154
+ FLOPs: 573958832
155
+ Parameters: 4520000
156
+ File Size: 18316431
157
+ Architecture:
158
+ - 1x1 Convolution
159
+ - Batch Normalization
160
+ - Convolution
161
+ - Depthwise Separable Convolution
162
+ - Dropout
163
+ - Inverted Residual Block
164
+ - Max Pooling
165
+ - ReLU6
166
+ - Residual Connection
167
+ - Softmax
168
+ Tasks:
169
+ - Image Classification
170
+ Training Techniques:
171
+ - RMSProp
172
+ - Weight Decay
173
+ Training Data:
174
+ - ImageNet
175
+ Training Resources: 16x GPUs
176
+ ID: mobilenetv2_110d
177
+ LR: 0.045
178
+ Crop Pct: '0.875'
179
+ Momentum: 0.9
180
+ Batch Size: 1536
181
+ Image Size: '224'
182
+ Weight Decay: 4.0e-05
183
+ Interpolation: bicubic
184
+ RMSProp Decay: 0.9
185
+ Code: https://github.com/rwightman/pytorch-image-models/blob/9a25fdf3ad0414b4d66da443fe60ae0aa14edc84/timm/models/efficientnet.py#L969
186
+ Weights: https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/mobilenetv2_110d_ra-77090ade.pth
187
+ Results:
188
+ - Task: Image Classification
189
+ Dataset: ImageNet
190
+ Metrics:
191
+ Top 1 Accuracy: 75.05%
192
+ Top 5 Accuracy: 92.19%
193
+ - Name: mobilenetv2_120d
194
+ In Collection: MobileNet V2
195
+ Metadata:
196
+ FLOPs: 888510048
197
+ Parameters: 5830000
198
+ File Size: 23651121
199
+ Architecture:
200
+ - 1x1 Convolution
201
+ - Batch Normalization
202
+ - Convolution
203
+ - Depthwise Separable Convolution
204
+ - Dropout
205
+ - Inverted Residual Block
206
+ - Max Pooling
207
+ - ReLU6
208
+ - Residual Connection
209
+ - Softmax
210
+ Tasks:
211
+ - Image Classification
212
+ Training Techniques:
213
+ - RMSProp
214
+ - Weight Decay
215
+ Training Data:
216
+ - ImageNet
217
+ Training Resources: 16x GPUs
218
+ ID: mobilenetv2_120d
219
+ LR: 0.045
220
+ Crop Pct: '0.875'
221
+ Momentum: 0.9
222
+ Batch Size: 1536
223
+ Image Size: '224'
224
+ Weight Decay: 4.0e-05
225
+ Interpolation: bicubic
226
+ RMSProp Decay: 0.9
227
+ Code: https://github.com/rwightman/pytorch-image-models/blob/9a25fdf3ad0414b4d66da443fe60ae0aa14edc84/timm/models/efficientnet.py#L977
228
+ Weights: https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/mobilenetv2_120d_ra-5987e2ed.pth
229
+ Results:
230
+ - Task: Image Classification
231
+ Dataset: ImageNet
232
+ Metrics:
233
+ Top 1 Accuracy: 77.28%
234
+ Top 5 Accuracy: 93.51%
235
+ - Name: mobilenetv2_140
236
+ In Collection: MobileNet V2
237
+ Metadata:
238
+ FLOPs: 770196784
239
+ Parameters: 6110000
240
+ File Size: 24673555
241
+ Architecture:
242
+ - 1x1 Convolution
243
+ - Batch Normalization
244
+ - Convolution
245
+ - Depthwise Separable Convolution
246
+ - Dropout
247
+ - Inverted Residual Block
248
+ - Max Pooling
249
+ - ReLU6
250
+ - Residual Connection
251
+ - Softmax
252
+ Tasks:
253
+ - Image Classification
254
+ Training Techniques:
255
+ - RMSProp
256
+ - Weight Decay
257
+ Training Data:
258
+ - ImageNet
259
+ Training Resources: 16x GPUs
260
+ ID: mobilenetv2_140
261
+ LR: 0.045
262
+ Crop Pct: '0.875'
263
+ Momentum: 0.9
264
+ Batch Size: 1536
265
+ Image Size: '224'
266
+ Weight Decay: 4.0e-05
267
+ Interpolation: bicubic
268
+ RMSProp Decay: 0.9
269
+ Code: https://github.com/rwightman/pytorch-image-models/blob/9a25fdf3ad0414b4d66da443fe60ae0aa14edc84/timm/models/efficientnet.py#L962
270
+ Weights: https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/mobilenetv2_140_ra-21a4e913.pth
271
+ Results:
272
+ - Task: Image Classification
273
+ Dataset: ImageNet
274
+ Metrics:
275
+ Top 1 Accuracy: 76.51%
276
+ Top 5 Accuracy: 93.0%
277
+ -->
pytorch-image-models/hfdocs/source/models/mobilenet-v3.mdx ADDED
@@ -0,0 +1,205 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # MobileNet v3
2
+
3
+ **MobileNetV3** is a convolutional neural network that is designed for mobile phone CPUs. The network design includes the use of a [hard swish activation](https://paperswithcode.com/method/hard-swish) and [squeeze-and-excitation](https://paperswithcode.com/method/squeeze-and-excitation-block) modules in the [MBConv blocks](https://paperswithcode.com/method/inverted-residual-block).
4
+
5
+ ## How do I use this model on an image?
6
+
7
+ To load a pretrained model:
8
+
9
+ ```py
10
+ >>> import timm
11
+ >>> model = timm.create_model('mobilenetv3_large_100', pretrained=True)
12
+ >>> model.eval()
13
+ ```
14
+
15
+ To load and preprocess the image:
16
+
17
+ ```py
18
+ >>> import urllib
19
+ >>> from PIL import Image
20
+ >>> from timm.data import resolve_data_config
21
+ >>> from timm.data.transforms_factory import create_transform
22
+
23
+ >>> config = resolve_data_config({}, model=model)
24
+ >>> transform = create_transform(**config)
25
+
26
+ >>> url, filename = ("https://github.com/pytorch/hub/raw/master/images/dog.jpg", "dog.jpg")
27
+ >>> urllib.request.urlretrieve(url, filename)
28
+ >>> img = Image.open(filename).convert('RGB')
29
+ >>> tensor = transform(img).unsqueeze(0) # transform and add batch dimension
30
+ ```
31
+
32
+ To get the model predictions:
33
+
34
+ ```py
35
+ >>> import torch
36
+ >>> with torch.no_grad():
37
+ ... out = model(tensor)
38
+ >>> probabilities = torch.nn.functional.softmax(out[0], dim=0)
39
+ >>> print(probabilities.shape)
40
+ >>> # prints: torch.Size([1000])
41
+ ```
42
+
43
+ To get the top-5 predictions class names:
44
+
45
+ ```py
46
+ >>> # Get imagenet class mappings
47
+ >>> url, filename = ("https://raw.githubusercontent.com/pytorch/hub/master/imagenet_classes.txt", "imagenet_classes.txt")
48
+ >>> urllib.request.urlretrieve(url, filename)
49
+ >>> with open("imagenet_classes.txt", "r") as f:
50
+ ... categories = [s.strip() for s in f.readlines()]
51
+
52
+ >>> # Print top categories per image
53
+ >>> top5_prob, top5_catid = torch.topk(probabilities, 5)
54
+ >>> for i in range(top5_prob.size(0)):
55
+ ... print(categories[top5_catid[i]], top5_prob[i].item())
56
+ >>> # prints class names and probabilities like:
57
+ >>> # [('Samoyed', 0.6425196528434753), ('Pomeranian', 0.04062102362513542), ('keeshond', 0.03186424449086189), ('white wolf', 0.01739676296710968), ('Eskimo dog', 0.011717947199940681)]
58
+ ```
59
+
60
+ Replace the model name with the variant you want to use, e.g. `mobilenetv3_large_100`. You can find the IDs in the model summaries at the top of this page.
61
+
62
+ To extract image features with this model, follow the [timm feature extraction examples](../feature_extraction), just change the name of the model you want to use.
63
+
64
+ ## How do I finetune this model?
65
+
66
+ You can finetune any of the pre-trained models just by changing the classifier (the last layer).
67
+
68
+ ```py
69
+ >>> model = timm.create_model('mobilenetv3_large_100', pretrained=True, num_classes=NUM_FINETUNE_CLASSES)
70
+ ```
71
+ To finetune on your own dataset, you have to write a training loop or adapt [timm's training
72
+ script](https://github.com/rwightman/pytorch-image-models/blob/master/train.py) to use your dataset.
73
+
74
+ ## How do I train this model?
75
+
76
+ You can follow the [timm recipe scripts](../scripts) for training a new model afresh.
77
+
78
+ ## Citation
79
+
80
+ ```BibTeX
81
+ @article{DBLP:journals/corr/abs-1905-02244,
82
+ author = {Andrew Howard and
83
+ Mark Sandler and
84
+ Grace Chu and
85
+ Liang{-}Chieh Chen and
86
+ Bo Chen and
87
+ Mingxing Tan and
88
+ Weijun Wang and
89
+ Yukun Zhu and
90
+ Ruoming Pang and
91
+ Vijay Vasudevan and
92
+ Quoc V. Le and
93
+ Hartwig Adam},
94
+ title = {Searching for MobileNetV3},
95
+ journal = {CoRR},
96
+ volume = {abs/1905.02244},
97
+ year = {2019},
98
+ url = {http://arxiv.org/abs/1905.02244},
99
+ archivePrefix = {arXiv},
100
+ eprint = {1905.02244},
101
+ timestamp = {Tue, 12 Jan 2021 15:30:06 +0100},
102
+ biburl = {https://dblp.org/rec/journals/corr/abs-1905-02244.bib},
103
+ bibsource = {dblp computer science bibliography, https://dblp.org}
104
+ }
105
+ ```
106
+
107
+ <!--
108
+ Type: model-index
109
+ Collections:
110
+ - Name: MobileNet V3
111
+ Paper:
112
+ Title: Searching for MobileNetV3
113
+ URL: https://paperswithcode.com/paper/searching-for-mobilenetv3
114
+ Models:
115
+ - Name: mobilenetv3_large_100
116
+ In Collection: MobileNet V3
117
+ Metadata:
118
+ FLOPs: 287193752
119
+ Parameters: 5480000
120
+ File Size: 22076443
121
+ Architecture:
122
+ - 1x1 Convolution
123
+ - Batch Normalization
124
+ - Convolution
125
+ - Dense Connections
126
+ - Depthwise Separable Convolution
127
+ - Dropout
128
+ - Global Average Pooling
129
+ - Hard Swish
130
+ - Inverted Residual Block
131
+ - ReLU
132
+ - Residual Connection
133
+ - Softmax
134
+ - Squeeze-and-Excitation Block
135
+ Tasks:
136
+ - Image Classification
137
+ Training Techniques:
138
+ - RMSProp
139
+ - Weight Decay
140
+ Training Data:
141
+ - ImageNet
142
+ Training Resources: 4x4 TPU Pod
143
+ ID: mobilenetv3_large_100
144
+ LR: 0.1
145
+ Dropout: 0.8
146
+ Crop Pct: '0.875'
147
+ Momentum: 0.9
148
+ Batch Size: 4096
149
+ Image Size: '224'
150
+ Weight Decay: 1.0e-05
151
+ Interpolation: bicubic
152
+ Code: https://github.com/rwightman/pytorch-image-models/blob/9a25fdf3ad0414b4d66da443fe60ae0aa14edc84/timm/models/mobilenetv3.py#L363
153
+ Weights: https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/mobilenetv3_large_100_ra-f55367f5.pth
154
+ Results:
155
+ - Task: Image Classification
156
+ Dataset: ImageNet
157
+ Metrics:
158
+ Top 1 Accuracy: 75.77%
159
+ Top 5 Accuracy: 92.54%
160
+ - Name: mobilenetv3_rw
161
+ In Collection: MobileNet V3
162
+ Metadata:
163
+ FLOPs: 287190638
164
+ Parameters: 5480000
165
+ File Size: 22064048
166
+ Architecture:
167
+ - 1x1 Convolution
168
+ - Batch Normalization
169
+ - Convolution
170
+ - Dense Connections
171
+ - Depthwise Separable Convolution
172
+ - Dropout
173
+ - Global Average Pooling
174
+ - Hard Swish
175
+ - Inverted Residual Block
176
+ - ReLU
177
+ - Residual Connection
178
+ - Softmax
179
+ - Squeeze-and-Excitation Block
180
+ Tasks:
181
+ - Image Classification
182
+ Training Techniques:
183
+ - RMSProp
184
+ - Weight Decay
185
+ Training Data:
186
+ - ImageNet
187
+ Training Resources: 4x4 TPU Pod
188
+ ID: mobilenetv3_rw
189
+ LR: 0.1
190
+ Dropout: 0.8
191
+ Crop Pct: '0.875'
192
+ Momentum: 0.9
193
+ Batch Size: 4096
194
+ Image Size: '224'
195
+ Weight Decay: 1.0e-05
196
+ Interpolation: bicubic
197
+ Code: https://github.com/rwightman/pytorch-image-models/blob/9a25fdf3ad0414b4d66da443fe60ae0aa14edc84/timm/models/mobilenetv3.py#L384
198
+ Weights: https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/mobilenetv3_100-35495452.pth
199
+ Results:
200
+ - Task: Image Classification
201
+ Dataset: ImageNet
202
+ Metrics:
203
+ Top 1 Accuracy: 75.62%
204
+ Top 5 Accuracy: 92.71%
205
+ -->
pytorch-image-models/hfdocs/source/models/nasnet.mdx ADDED
@@ -0,0 +1,137 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # NASNet
2
+
3
+ **NASNet** is a type of convolutional neural network discovered through neural architecture search. The building blocks consist of normal and reduction cells.
4
+
5
+ ## How do I use this model on an image?
6
+
7
+ To load a pretrained model:
8
+
9
+ ```py
10
+ >>> import timm
11
+ >>> model = timm.create_model('nasnetalarge', pretrained=True)
12
+ >>> model.eval()
13
+ ```
14
+
15
+ To load and preprocess the image:
16
+
17
+ ```py
18
+ >>> import urllib
19
+ >>> from PIL import Image
20
+ >>> from timm.data import resolve_data_config
21
+ >>> from timm.data.transforms_factory import create_transform
22
+
23
+ >>> config = resolve_data_config({}, model=model)
24
+ >>> transform = create_transform(**config)
25
+
26
+ >>> url, filename = ("https://github.com/pytorch/hub/raw/master/images/dog.jpg", "dog.jpg")
27
+ >>> urllib.request.urlretrieve(url, filename)
28
+ >>> img = Image.open(filename).convert('RGB')
29
+ >>> tensor = transform(img).unsqueeze(0) # transform and add batch dimension
30
+ ```
31
+
32
+ To get the model predictions:
33
+
34
+ ```py
35
+ >>> import torch
36
+ >>> with torch.no_grad():
37
+ ... out = model(tensor)
38
+ >>> probabilities = torch.nn.functional.softmax(out[0], dim=0)
39
+ >>> print(probabilities.shape)
40
+ >>> # prints: torch.Size([1000])
41
+ ```
42
+
43
+ To get the top-5 predictions class names:
44
+
45
+ ```py
46
+ >>> # Get imagenet class mappings
47
+ >>> url, filename = ("https://raw.githubusercontent.com/pytorch/hub/master/imagenet_classes.txt", "imagenet_classes.txt")
48
+ >>> urllib.request.urlretrieve(url, filename)
49
+ >>> with open("imagenet_classes.txt", "r") as f:
50
+ ... categories = [s.strip() for s in f.readlines()]
51
+
52
+ >>> # Print top categories per image
53
+ >>> top5_prob, top5_catid = torch.topk(probabilities, 5)
54
+ >>> for i in range(top5_prob.size(0)):
55
+ ... print(categories[top5_catid[i]], top5_prob[i].item())
56
+ >>> # prints class names and probabilities like:
57
+ >>> # [('Samoyed', 0.6425196528434753), ('Pomeranian', 0.04062102362513542), ('keeshond', 0.03186424449086189), ('white wolf', 0.01739676296710968), ('Eskimo dog', 0.011717947199940681)]
58
+ ```
59
+
60
+ Replace the model name with the variant you want to use, e.g. `nasnetalarge`. You can find the IDs in the model summaries at the top of this page.
61
+
62
+ To extract image features with this model, follow the [timm feature extraction examples](../feature_extraction), just change the name of the model you want to use.
63
+
64
+ ## How do I finetune this model?
65
+
66
+ You can finetune any of the pre-trained models just by changing the classifier (the last layer).
67
+
68
+ ```py
69
+ >>> model = timm.create_model('nasnetalarge', pretrained=True, num_classes=NUM_FINETUNE_CLASSES)
70
+ ```
71
+ To finetune on your own dataset, you have to write a training loop or adapt [timm's training
72
+ script](https://github.com/rwightman/pytorch-image-models/blob/master/train.py) to use your dataset.
73
+
74
+ ## How do I train this model?
75
+
76
+ You can follow the [timm recipe scripts](../scripts) for training a new model afresh.
77
+
78
+ ## Citation
79
+
80
+ ```BibTeX
81
+ @misc{zoph2018learning,
82
+ title={Learning Transferable Architectures for Scalable Image Recognition},
83
+ author={Barret Zoph and Vijay Vasudevan and Jonathon Shlens and Quoc V. Le},
84
+ year={2018},
85
+ eprint={1707.07012},
86
+ archivePrefix={arXiv},
87
+ primaryClass={cs.CV}
88
+ }
89
+ ```
90
+
91
+ <!--
92
+ Type: model-index
93
+ Collections:
94
+ - Name: NASNet
95
+ Paper:
96
+ Title: Learning Transferable Architectures for Scalable Image Recognition
97
+ URL: https://paperswithcode.com/paper/learning-transferable-architectures-for
98
+ Models:
99
+ - Name: nasnetalarge
100
+ In Collection: NASNet
101
+ Metadata:
102
+ FLOPs: 30242402862
103
+ Parameters: 88750000
104
+ File Size: 356056626
105
+ Architecture:
106
+ - Average Pooling
107
+ - Batch Normalization
108
+ - Convolution
109
+ - Depthwise Separable Convolution
110
+ - Dropout
111
+ - ReLU
112
+ Tasks:
113
+ - Image Classification
114
+ Training Techniques:
115
+ - Label Smoothing
116
+ - RMSProp
117
+ - Weight Decay
118
+ Training Data:
119
+ - ImageNet
120
+ Training Resources: 50x Tesla K40 GPUs
121
+ ID: nasnetalarge
122
+ Dropout: 0.5
123
+ Crop Pct: '0.911'
124
+ Momentum: 0.9
125
+ Image Size: '331'
126
+ Interpolation: bicubic
127
+ Label Smoothing: 0.1
128
+ RMSProp \\( \epsilon \\): 1.0
129
+ Code: https://github.com/rwightman/pytorch-image-models/blob/d8e69206be253892b2956341fea09fdebfaae4e3/timm/models/nasnet.py#L562
130
+ Weights: http://data.lip6.fr/cadene/pretrainedmodels/nasnetalarge-a1897284.pth
131
+ Results:
132
+ - Task: Image Classification
133
+ Dataset: ImageNet
134
+ Metrics:
135
+ Top 1 Accuracy: 82.63%
136
+ Top 5 Accuracy: 96.05%
137
+ -->
pytorch-image-models/hfdocs/source/models/noisy-student.mdx ADDED
@@ -0,0 +1,577 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Noisy Student (EfficientNet)
2
+
3
+ **Noisy Student Training** is a semi-supervised learning approach. It extends the idea of self-training
4
+ and distillation with the use of equal-or-larger student models and noise added to the student during learning. It has three main steps:
5
+
6
+ 1. train a teacher model on labeled images
7
+ 2. use the teacher to generate pseudo labels on unlabeled images
8
+ 3. train a student model on the combination of labeled images and pseudo labeled images.
9
+
10
+ The algorithm is iterated a few times by treating the student as a teacher to relabel the unlabeled data and training a new student.
11
+
12
+ Noisy Student Training seeks to improve on self-training and distillation in two ways. First, it makes the student larger than, or at least equal to, the teacher so the student can better learn from a larger dataset. Second, it adds noise to the student so the noised student is forced to learn harder from the pseudo labels. To noise the student, it uses input noise such as RandAugment data augmentation, and model noise such as dropout and stochastic depth during training.
13
+
14
+ ## How do I use this model on an image?
15
+
16
+ To load a pretrained model:
17
+
18
+ ```py
19
+ >>> import timm
20
+ >>> model = timm.create_model('tf_efficientnet_b0_ns', pretrained=True)
21
+ >>> model.eval()
22
+ ```
23
+
24
+ To load and preprocess the image:
25
+
26
+ ```py
27
+ >>> import urllib
28
+ >>> from PIL import Image
29
+ >>> from timm.data import resolve_data_config
30
+ >>> from timm.data.transforms_factory import create_transform
31
+
32
+ >>> config = resolve_data_config({}, model=model)
33
+ >>> transform = create_transform(**config)
34
+
35
+ >>> url, filename = ("https://github.com/pytorch/hub/raw/master/images/dog.jpg", "dog.jpg")
36
+ >>> urllib.request.urlretrieve(url, filename)
37
+ >>> img = Image.open(filename).convert('RGB')
38
+ >>> tensor = transform(img).unsqueeze(0) # transform and add batch dimension
39
+ ```
40
+
41
+ To get the model predictions:
42
+
43
+ ```py
44
+ >>> import torch
45
+ >>> with torch.no_grad():
46
+ ... out = model(tensor)
47
+ >>> probabilities = torch.nn.functional.softmax(out[0], dim=0)
48
+ >>> print(probabilities.shape)
49
+ >>> # prints: torch.Size([1000])
50
+ ```
51
+
52
+ To get the top-5 predictions class names:
53
+
54
+ ```py
55
+ >>> # Get imagenet class mappings
56
+ >>> url, filename = ("https://raw.githubusercontent.com/pytorch/hub/master/imagenet_classes.txt", "imagenet_classes.txt")
57
+ >>> urllib.request.urlretrieve(url, filename)
58
+ >>> with open("imagenet_classes.txt", "r") as f:
59
+ ... categories = [s.strip() for s in f.readlines()]
60
+
61
+ >>> # Print top categories per image
62
+ >>> top5_prob, top5_catid = torch.topk(probabilities, 5)
63
+ >>> for i in range(top5_prob.size(0)):
64
+ ... print(categories[top5_catid[i]], top5_prob[i].item())
65
+ >>> # prints class names and probabilities like:
66
+ >>> # [('Samoyed', 0.6425196528434753), ('Pomeranian', 0.04062102362513542), ('keeshond', 0.03186424449086189), ('white wolf', 0.01739676296710968), ('Eskimo dog', 0.011717947199940681)]
67
+ ```
68
+
69
+ Replace the model name with the variant you want to use, e.g. `tf_efficientnet_b0_ns`. You can find the IDs in the model summaries at the top of this page.
70
+
71
+ To extract image features with this model, follow the [timm feature extraction examples](../feature_extraction), just change the name of the model you want to use.
72
+
73
+ ## How do I finetune this model?
74
+
75
+ You can finetune any of the pre-trained models just by changing the classifier (the last layer).
76
+
77
+ ```py
78
+ >>> model = timm.create_model('tf_efficientnet_b0_ns', pretrained=True, num_classes=NUM_FINETUNE_CLASSES)
79
+ ```
80
+ To finetune on your own dataset, you have to write a training loop or adapt [timm's training
81
+ script](https://github.com/rwightman/pytorch-image-models/blob/master/train.py) to use your dataset.
82
+
83
+ ## How do I train this model?
84
+
85
+ You can follow the [timm recipe scripts](../scripts) for training a new model afresh.
86
+
87
+ ## Citation
88
+
89
+ ```BibTeX
90
+ @misc{xie2020selftraining,
91
+ title={Self-training with Noisy Student improves ImageNet classification},
92
+ author={Qizhe Xie and Minh-Thang Luong and Eduard Hovy and Quoc V. Le},
93
+ year={2020},
94
+ eprint={1911.04252},
95
+ archivePrefix={arXiv},
96
+ primaryClass={cs.LG}
97
+ }
98
+ ```
99
+
100
+ <!--
101
+ Type: model-index
102
+ Collections:
103
+ - Name: Noisy Student
104
+ Paper:
105
+ Title: Self-training with Noisy Student improves ImageNet classification
106
+ URL: https://paperswithcode.com/paper/self-training-with-noisy-student-improves
107
+ Models:
108
+ - Name: tf_efficientnet_b0_ns
109
+ In Collection: Noisy Student
110
+ Metadata:
111
+ FLOPs: 488688572
112
+ Parameters: 5290000
113
+ File Size: 21386709
114
+ Architecture:
115
+ - 1x1 Convolution
116
+ - Average Pooling
117
+ - Batch Normalization
118
+ - Convolution
119
+ - Dense Connections
120
+ - Dropout
121
+ - Inverted Residual Block
122
+ - Squeeze-and-Excitation Block
123
+ - Swish
124
+ Tasks:
125
+ - Image Classification
126
+ Training Techniques:
127
+ - AutoAugment
128
+ - FixRes
129
+ - Label Smoothing
130
+ - Noisy Student
131
+ - RMSProp
132
+ - RandAugment
133
+ - Weight Decay
134
+ Training Data:
135
+ - ImageNet
136
+ - JFT-300M
137
+ Training Resources: Cloud TPU v3 Pod
138
+ ID: tf_efficientnet_b0_ns
139
+ LR: 0.128
140
+ Epochs: 700
141
+ Dropout: 0.5
142
+ Crop Pct: '0.875'
143
+ Momentum: 0.9
144
+ Batch Size: 2048
145
+ Image Size: '224'
146
+ Weight Decay: 1.0e-05
147
+ Interpolation: bicubic
148
+ RMSProp Decay: 0.9
149
+ Label Smoothing: 0.1
150
+ BatchNorm Momentum: 0.99
151
+ Stochastic Depth Survival: 0.8
152
+ Code: https://github.com/rwightman/pytorch-image-models/blob/9a25fdf3ad0414b4d66da443fe60ae0aa14edc84/timm/models/efficientnet.py#L1427
153
+ Weights: https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/tf_efficientnet_b0_ns-c0e6a31c.pth
154
+ Results:
155
+ - Task: Image Classification
156
+ Dataset: ImageNet
157
+ Metrics:
158
+ Top 1 Accuracy: 78.66%
159
+ Top 5 Accuracy: 94.37%
160
+ - Name: tf_efficientnet_b1_ns
161
+ In Collection: Noisy Student
162
+ Metadata:
163
+ FLOPs: 883633200
164
+ Parameters: 7790000
165
+ File Size: 31516408
166
+ Architecture:
167
+ - 1x1 Convolution
168
+ - Average Pooling
169
+ - Batch Normalization
170
+ - Convolution
171
+ - Dense Connections
172
+ - Dropout
173
+ - Inverted Residual Block
174
+ - Squeeze-and-Excitation Block
175
+ - Swish
176
+ Tasks:
177
+ - Image Classification
178
+ Training Techniques:
179
+ - AutoAugment
180
+ - FixRes
181
+ - Label Smoothing
182
+ - Noisy Student
183
+ - RMSProp
184
+ - RandAugment
185
+ - Weight Decay
186
+ Training Data:
187
+ - ImageNet
188
+ - JFT-300M
189
+ Training Resources: Cloud TPU v3 Pod
190
+ ID: tf_efficientnet_b1_ns
191
+ LR: 0.128
192
+ Epochs: 700
193
+ Dropout: 0.5
194
+ Crop Pct: '0.882'
195
+ Momentum: 0.9
196
+ Batch Size: 2048
197
+ Image Size: '240'
198
+ Weight Decay: 1.0e-05
199
+ Interpolation: bicubic
200
+ RMSProp Decay: 0.9
201
+ Label Smoothing: 0.1
202
+ BatchNorm Momentum: 0.99
203
+ Stochastic Depth Survival: 0.8
204
+ Code: https://github.com/rwightman/pytorch-image-models/blob/9a25fdf3ad0414b4d66da443fe60ae0aa14edc84/timm/models/efficientnet.py#L1437
205
+ Weights: https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/tf_efficientnet_b1_ns-99dd0c41.pth
206
+ Results:
207
+ - Task: Image Classification
208
+ Dataset: ImageNet
209
+ Metrics:
210
+ Top 1 Accuracy: 81.39%
211
+ Top 5 Accuracy: 95.74%
212
+ - Name: tf_efficientnet_b2_ns
213
+ In Collection: Noisy Student
214
+ Metadata:
215
+ FLOPs: 1234321170
216
+ Parameters: 9110000
217
+ File Size: 36801803
218
+ Architecture:
219
+ - 1x1 Convolution
220
+ - Average Pooling
221
+ - Batch Normalization
222
+ - Convolution
223
+ - Dense Connections
224
+ - Dropout
225
+ - Inverted Residual Block
226
+ - Squeeze-and-Excitation Block
227
+ - Swish
228
+ Tasks:
229
+ - Image Classification
230
+ Training Techniques:
231
+ - AutoAugment
232
+ - FixRes
233
+ - Label Smoothing
234
+ - Noisy Student
235
+ - RMSProp
236
+ - RandAugment
237
+ - Weight Decay
238
+ Training Data:
239
+ - ImageNet
240
+ - JFT-300M
241
+ Training Resources: Cloud TPU v3 Pod
242
+ ID: tf_efficientnet_b2_ns
243
+ LR: 0.128
244
+ Epochs: 700
245
+ Dropout: 0.5
246
+ Crop Pct: '0.89'
247
+ Momentum: 0.9
248
+ Batch Size: 2048
249
+ Image Size: '260'
250
+ Weight Decay: 1.0e-05
251
+ Interpolation: bicubic
252
+ RMSProp Decay: 0.9
253
+ Label Smoothing: 0.1
254
+ BatchNorm Momentum: 0.99
255
+ Stochastic Depth Survival: 0.8
256
+ Code: https://github.com/rwightman/pytorch-image-models/blob/9a25fdf3ad0414b4d66da443fe60ae0aa14edc84/timm/models/efficientnet.py#L1447
257
+ Weights: https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/tf_efficientnet_b2_ns-00306e48.pth
258
+ Results:
259
+ - Task: Image Classification
260
+ Dataset: ImageNet
261
+ Metrics:
262
+ Top 1 Accuracy: 82.39%
263
+ Top 5 Accuracy: 96.24%
264
+ - Name: tf_efficientnet_b3_ns
265
+ In Collection: Noisy Student
266
+ Metadata:
267
+ FLOPs: 2275247568
268
+ Parameters: 12230000
269
+ File Size: 49385734
270
+ Architecture:
271
+ - 1x1 Convolution
272
+ - Average Pooling
273
+ - Batch Normalization
274
+ - Convolution
275
+ - Dense Connections
276
+ - Dropout
277
+ - Inverted Residual Block
278
+ - Squeeze-and-Excitation Block
279
+ - Swish
280
+ Tasks:
281
+ - Image Classification
282
+ Training Techniques:
283
+ - AutoAugment
284
+ - FixRes
285
+ - Label Smoothing
286
+ - Noisy Student
287
+ - RMSProp
288
+ - RandAugment
289
+ - Weight Decay
290
+ Training Data:
291
+ - ImageNet
292
+ - JFT-300M
293
+ Training Resources: Cloud TPU v3 Pod
294
+ ID: tf_efficientnet_b3_ns
295
+ LR: 0.128
296
+ Epochs: 700
297
+ Dropout: 0.5
298
+ Crop Pct: '0.904'
299
+ Momentum: 0.9
300
+ Batch Size: 2048
301
+ Image Size: '300'
302
+ Weight Decay: 1.0e-05
303
+ Interpolation: bicubic
304
+ RMSProp Decay: 0.9
305
+ Label Smoothing: 0.1
306
+ BatchNorm Momentum: 0.99
307
+ Stochastic Depth Survival: 0.8
308
+ Code: https://github.com/rwightman/pytorch-image-models/blob/9a25fdf3ad0414b4d66da443fe60ae0aa14edc84/timm/models/efficientnet.py#L1457
309
+ Weights: https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/tf_efficientnet_b3_ns-9d44bf68.pth
310
+ Results:
311
+ - Task: Image Classification
312
+ Dataset: ImageNet
313
+ Metrics:
314
+ Top 1 Accuracy: 84.04%
315
+ Top 5 Accuracy: 96.91%
316
+ - Name: tf_efficientnet_b4_ns
317
+ In Collection: Noisy Student
318
+ Metadata:
319
+ FLOPs: 5749638672
320
+ Parameters: 19340000
321
+ File Size: 77995057
322
+ Architecture:
323
+ - 1x1 Convolution
324
+ - Average Pooling
325
+ - Batch Normalization
326
+ - Convolution
327
+ - Dense Connections
328
+ - Dropout
329
+ - Inverted Residual Block
330
+ - Squeeze-and-Excitation Block
331
+ - Swish
332
+ Tasks:
333
+ - Image Classification
334
+ Training Techniques:
335
+ - AutoAugment
336
+ - FixRes
337
+ - Label Smoothing
338
+ - Noisy Student
339
+ - RMSProp
340
+ - RandAugment
341
+ - Weight Decay
342
+ Training Data:
343
+ - ImageNet
344
+ - JFT-300M
345
+ Training Resources: Cloud TPU v3 Pod
346
+ ID: tf_efficientnet_b4_ns
347
+ LR: 0.128
348
+ Epochs: 700
349
+ Dropout: 0.5
350
+ Crop Pct: '0.922'
351
+ Momentum: 0.9
352
+ Batch Size: 2048
353
+ Image Size: '380'
354
+ Weight Decay: 1.0e-05
355
+ Interpolation: bicubic
356
+ RMSProp Decay: 0.9
357
+ Label Smoothing: 0.1
358
+ BatchNorm Momentum: 0.99
359
+ Stochastic Depth Survival: 0.8
360
+ Code: https://github.com/rwightman/pytorch-image-models/blob/9a25fdf3ad0414b4d66da443fe60ae0aa14edc84/timm/models/efficientnet.py#L1467
361
+ Weights: https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/tf_efficientnet_b4_ns-d6313a46.pth
362
+ Results:
363
+ - Task: Image Classification
364
+ Dataset: ImageNet
365
+ Metrics:
366
+ Top 1 Accuracy: 85.15%
367
+ Top 5 Accuracy: 97.47%
368
+ - Name: tf_efficientnet_b5_ns
369
+ In Collection: Noisy Student
370
+ Metadata:
371
+ FLOPs: 13176501888
372
+ Parameters: 30390000
373
+ File Size: 122404944
374
+ Architecture:
375
+ - 1x1 Convolution
376
+ - Average Pooling
377
+ - Batch Normalization
378
+ - Convolution
379
+ - Dense Connections
380
+ - Dropout
381
+ - Inverted Residual Block
382
+ - Squeeze-and-Excitation Block
383
+ - Swish
384
+ Tasks:
385
+ - Image Classification
386
+ Training Techniques:
387
+ - AutoAugment
388
+ - FixRes
389
+ - Label Smoothing
390
+ - Noisy Student
391
+ - RMSProp
392
+ - RandAugment
393
+ - Weight Decay
394
+ Training Data:
395
+ - ImageNet
396
+ - JFT-300M
397
+ Training Resources: Cloud TPU v3 Pod
398
+ ID: tf_efficientnet_b5_ns
399
+ LR: 0.128
400
+ Epochs: 350
401
+ Dropout: 0.5
402
+ Crop Pct: '0.934'
403
+ Momentum: 0.9
404
+ Batch Size: 2048
405
+ Image Size: '456'
406
+ Weight Decay: 1.0e-05
407
+ Interpolation: bicubic
408
+ RMSProp Decay: 0.9
409
+ Label Smoothing: 0.1
410
+ BatchNorm Momentum: 0.99
411
+ Stochastic Depth Survival: 0.8
412
+ Code: https://github.com/rwightman/pytorch-image-models/blob/9a25fdf3ad0414b4d66da443fe60ae0aa14edc84/timm/models/efficientnet.py#L1477
413
+ Weights: https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/tf_efficientnet_b5_ns-6f26d0cf.pth
414
+ Results:
415
+ - Task: Image Classification
416
+ Dataset: ImageNet
417
+ Metrics:
418
+ Top 1 Accuracy: 86.08%
419
+ Top 5 Accuracy: 97.75%
420
+ - Name: tf_efficientnet_b6_ns
421
+ In Collection: Noisy Student
422
+ Metadata:
423
+ FLOPs: 24180518488
424
+ Parameters: 43040000
425
+ File Size: 173239537
426
+ Architecture:
427
+ - 1x1 Convolution
428
+ - Average Pooling
429
+ - Batch Normalization
430
+ - Convolution
431
+ - Dense Connections
432
+ - Dropout
433
+ - Inverted Residual Block
434
+ - Squeeze-and-Excitation Block
435
+ - Swish
436
+ Tasks:
437
+ - Image Classification
438
+ Training Techniques:
439
+ - AutoAugment
440
+ - FixRes
441
+ - Label Smoothing
442
+ - Noisy Student
443
+ - RMSProp
444
+ - RandAugment
445
+ - Weight Decay
446
+ Training Data:
447
+ - ImageNet
448
+ - JFT-300M
449
+ Training Resources: Cloud TPU v3 Pod
450
+ ID: tf_efficientnet_b6_ns
451
+ LR: 0.128
452
+ Epochs: 350
453
+ Dropout: 0.5
454
+ Crop Pct: '0.942'
455
+ Momentum: 0.9
456
+ Batch Size: 2048
457
+ Image Size: '528'
458
+ Weight Decay: 1.0e-05
459
+ Interpolation: bicubic
460
+ RMSProp Decay: 0.9
461
+ Label Smoothing: 0.1
462
+ BatchNorm Momentum: 0.99
463
+ Stochastic Depth Survival: 0.8
464
+ Code: https://github.com/rwightman/pytorch-image-models/blob/9a25fdf3ad0414b4d66da443fe60ae0aa14edc84/timm/models/efficientnet.py#L1487
465
+ Weights: https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/tf_efficientnet_b6_ns-51548356.pth
466
+ Results:
467
+ - Task: Image Classification
468
+ Dataset: ImageNet
469
+ Metrics:
470
+ Top 1 Accuracy: 86.45%
471
+ Top 5 Accuracy: 97.88%
472
+ - Name: tf_efficientnet_b7_ns
473
+ In Collection: Noisy Student
474
+ Metadata:
475
+ FLOPs: 48205304880
476
+ Parameters: 66349999
477
+ File Size: 266853140
478
+ Architecture:
479
+ - 1x1 Convolution
480
+ - Average Pooling
481
+ - Batch Normalization
482
+ - Convolution
483
+ - Dense Connections
484
+ - Dropout
485
+ - Inverted Residual Block
486
+ - Squeeze-and-Excitation Block
487
+ - Swish
488
+ Tasks:
489
+ - Image Classification
490
+ Training Techniques:
491
+ - AutoAugment
492
+ - FixRes
493
+ - Label Smoothing
494
+ - Noisy Student
495
+ - RMSProp
496
+ - RandAugment
497
+ - Weight Decay
498
+ Training Data:
499
+ - ImageNet
500
+ - JFT-300M
501
+ Training Resources: Cloud TPU v3 Pod
502
+ ID: tf_efficientnet_b7_ns
503
+ LR: 0.128
504
+ Epochs: 350
505
+ Dropout: 0.5
506
+ Crop Pct: '0.949'
507
+ Momentum: 0.9
508
+ Batch Size: 2048
509
+ Image Size: '600'
510
+ Weight Decay: 1.0e-05
511
+ Interpolation: bicubic
512
+ RMSProp Decay: 0.9
513
+ Label Smoothing: 0.1
514
+ BatchNorm Momentum: 0.99
515
+ Stochastic Depth Survival: 0.8
516
+ Code: https://github.com/rwightman/pytorch-image-models/blob/9a25fdf3ad0414b4d66da443fe60ae0aa14edc84/timm/models/efficientnet.py#L1498
517
+ Weights: https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/tf_efficientnet_b7_ns-1dbc32de.pth
518
+ Results:
519
+ - Task: Image Classification
520
+ Dataset: ImageNet
521
+ Metrics:
522
+ Top 1 Accuracy: 86.83%
523
+ Top 5 Accuracy: 98.08%
524
+ - Name: tf_efficientnet_l2_ns
525
+ In Collection: Noisy Student
526
+ Metadata:
527
+ FLOPs: 611646113804
528
+ Parameters: 480310000
529
+ File Size: 1925950424
530
+ Architecture:
531
+ - 1x1 Convolution
532
+ - Average Pooling
533
+ - Batch Normalization
534
+ - Convolution
535
+ - Dense Connections
536
+ - Dropout
537
+ - Inverted Residual Block
538
+ - Squeeze-and-Excitation Block
539
+ - Swish
540
+ Tasks:
541
+ - Image Classification
542
+ Training Techniques:
543
+ - AutoAugment
544
+ - FixRes
545
+ - Label Smoothing
546
+ - Noisy Student
547
+ - RMSProp
548
+ - RandAugment
549
+ - Weight Decay
550
+ Training Data:
551
+ - ImageNet
552
+ - JFT-300M
553
+ Training Resources: Cloud TPU v3 Pod
554
+ Training Time: 6 days
555
+ ID: tf_efficientnet_l2_ns
556
+ LR: 0.128
557
+ Epochs: 350
558
+ Dropout: 0.5
559
+ Crop Pct: '0.96'
560
+ Momentum: 0.9
561
+ Batch Size: 2048
562
+ Image Size: '800'
563
+ Weight Decay: 1.0e-05
564
+ Interpolation: bicubic
565
+ RMSProp Decay: 0.9
566
+ Label Smoothing: 0.1
567
+ BatchNorm Momentum: 0.99
568
+ Stochastic Depth Survival: 0.8
569
+ Code: https://github.com/rwightman/pytorch-image-models/blob/9a25fdf3ad0414b4d66da443fe60ae0aa14edc84/timm/models/efficientnet.py#L1520
570
+ Weights: https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/tf_efficientnet_l2_ns-df73bb44.pth
571
+ Results:
572
+ - Task: Image Classification
573
+ Dataset: ImageNet
574
+ Metrics:
575
+ Top 1 Accuracy: 88.35%
576
+ Top 5 Accuracy: 98.66%
577
+ -->
pytorch-image-models/hfdocs/source/models/pnasnet.mdx ADDED
@@ -0,0 +1,138 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # PNASNet
2
+
3
+ **Progressive Neural Architecture Search**, or **PNAS**, is a method for learning the structure of convolutional neural networks (CNNs). It uses a sequential model-based optimization (SMBO) strategy, where we search the space of cell structures, starting with simple (shallow) models and progressing to complex ones, pruning out unpromising structures as we go.
4
+
5
+ ## How do I use this model on an image?
6
+
7
+ To load a pretrained model:
8
+
9
+ ```py
10
+ >>> import timm
11
+ >>> model = timm.create_model('pnasnet5large', pretrained=True)
12
+ >>> model.eval()
13
+ ```
14
+
15
+ To load and preprocess the image:
16
+
17
+ ```py
18
+ >>> import urllib
19
+ >>> from PIL import Image
20
+ >>> from timm.data import resolve_data_config
21
+ >>> from timm.data.transforms_factory import create_transform
22
+
23
+ >>> config = resolve_data_config({}, model=model)
24
+ >>> transform = create_transform(**config)
25
+
26
+ >>> url, filename = ("https://github.com/pytorch/hub/raw/master/images/dog.jpg", "dog.jpg")
27
+ >>> urllib.request.urlretrieve(url, filename)
28
+ >>> img = Image.open(filename).convert('RGB')
29
+ >>> tensor = transform(img).unsqueeze(0) # transform and add batch dimension
30
+ ```
31
+
32
+ To get the model predictions:
33
+
34
+ ```py
35
+ >>> import torch
36
+ >>> with torch.no_grad():
37
+ ... out = model(tensor)
38
+ >>> probabilities = torch.nn.functional.softmax(out[0], dim=0)
39
+ >>> print(probabilities.shape)
40
+ >>> # prints: torch.Size([1000])
41
+ ```
42
+
43
+ To get the top-5 predictions class names:
44
+
45
+ ```py
46
+ >>> # Get imagenet class mappings
47
+ >>> url, filename = ("https://raw.githubusercontent.com/pytorch/hub/master/imagenet_classes.txt", "imagenet_classes.txt")
48
+ >>> urllib.request.urlretrieve(url, filename)
49
+ >>> with open("imagenet_classes.txt", "r") as f:
50
+ ... categories = [s.strip() for s in f.readlines()]
51
+
52
+ >>> # Print top categories per image
53
+ >>> top5_prob, top5_catid = torch.topk(probabilities, 5)
54
+ >>> for i in range(top5_prob.size(0)):
55
+ ... print(categories[top5_catid[i]], top5_prob[i].item())
56
+ >>> # prints class names and probabilities like:
57
+ >>> # [('Samoyed', 0.6425196528434753), ('Pomeranian', 0.04062102362513542), ('keeshond', 0.03186424449086189), ('white wolf', 0.01739676296710968), ('Eskimo dog', 0.011717947199940681)]
58
+ ```
59
+
60
+ Replace the model name with the variant you want to use, e.g. `pnasnet5large`. You can find the IDs in the model summaries at the top of this page.
61
+
62
+ To extract image features with this model, follow the [timm feature extraction examples](../feature_extraction), just change the name of the model you want to use.
63
+
64
+ ## How do I finetune this model?
65
+
66
+ You can finetune any of the pre-trained models just by changing the classifier (the last layer).
67
+
68
+ ```py
69
+ >>> model = timm.create_model('pnasnet5large', pretrained=True, num_classes=NUM_FINETUNE_CLASSES)
70
+ ```
71
+ To finetune on your own dataset, you have to write a training loop or adapt [timm's training
72
+ script](https://github.com/rwightman/pytorch-image-models/blob/master/train.py) to use your dataset.
73
+
74
+ ## How do I train this model?
75
+
76
+ You can follow the [timm recipe scripts](../scripts) for training a new model afresh.
77
+
78
+ ## Citation
79
+
80
+ ```BibTeX
81
+ @misc{liu2018progressive,
82
+ title={Progressive Neural Architecture Search},
83
+ author={Chenxi Liu and Barret Zoph and Maxim Neumann and Jonathon Shlens and Wei Hua and Li-Jia Li and Li Fei-Fei and Alan Yuille and Jonathan Huang and Kevin Murphy},
84
+ year={2018},
85
+ eprint={1712.00559},
86
+ archivePrefix={arXiv},
87
+ primaryClass={cs.CV}
88
+ }
89
+ ```
90
+
91
+ <!--
92
+ Type: model-index
93
+ Collections:
94
+ - Name: PNASNet
95
+ Paper:
96
+ Title: Progressive Neural Architecture Search
97
+ URL: https://paperswithcode.com/paper/progressive-neural-architecture-search
98
+ Models:
99
+ - Name: pnasnet5large
100
+ In Collection: PNASNet
101
+ Metadata:
102
+ FLOPs: 31458865950
103
+ Parameters: 86060000
104
+ File Size: 345153926
105
+ Architecture:
106
+ - Average Pooling
107
+ - Batch Normalization
108
+ - Convolution
109
+ - Depthwise Separable Convolution
110
+ - Dropout
111
+ - ReLU
112
+ Tasks:
113
+ - Image Classification
114
+ Training Techniques:
115
+ - Label Smoothing
116
+ - RMSProp
117
+ - Weight Decay
118
+ Training Data:
119
+ - ImageNet
120
+ Training Resources: 100x NVIDIA P100 GPUs
121
+ ID: pnasnet5large
122
+ LR: 0.015
123
+ Dropout: 0.5
124
+ Crop Pct: '0.911'
125
+ Momentum: 0.9
126
+ Batch Size: 1600
127
+ Image Size: '331'
128
+ Interpolation: bicubic
129
+ Label Smoothing: 0.1
130
+ Code: https://github.com/rwightman/pytorch-image-models/blob/d8e69206be253892b2956341fea09fdebfaae4e3/timm/models/pnasnet.py#L343
131
+ Weights: https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-cadene/pnasnet5large-bf079911.pth
132
+ Results:
133
+ - Task: Image Classification
134
+ Dataset: ImageNet
135
+ Metrics:
136
+ Top 1 Accuracy: 0.98%
137
+ Top 5 Accuracy: 18.58%
138
+ -->
pytorch-image-models/hfdocs/source/models/regnetx.mdx ADDED
@@ -0,0 +1,559 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # RegNetX
2
+
3
+ **RegNetX** is a convolutional network design space with simple, regular models with parameters: depth \\( d \\), initial width \\( w\_{0} > 0 \\), and slope \\( w\_{a} > 0 \\), and generates a different block width \\( u\_{j} \\) for each block \\( j < d \\). The key restriction for the RegNet types of model is that there is a linear parameterisation of block widths (the design space only contains models with this linear structure):
4
+
5
+ \\( \\) u\_{j} = w\_{0} + w\_{a}\cdot{j} \\( \\)
6
+
7
+ For **RegNetX** we have additional restrictions: we set \\( b = 1 \\) (the bottleneck ratio), \\( 12 \leq d \leq 28 \\), and \\( w\_{m} \geq 2 \\) (the width multiplier).
8
+
9
+ ## How do I use this model on an image?
10
+
11
+ To load a pretrained model:
12
+
13
+ ```py
14
+ >>> import timm
15
+ >>> model = timm.create_model('regnetx_002', pretrained=True)
16
+ >>> model.eval()
17
+ ```
18
+
19
+ To load and preprocess the image:
20
+
21
+ ```py
22
+ >>> import urllib
23
+ >>> from PIL import Image
24
+ >>> from timm.data import resolve_data_config
25
+ >>> from timm.data.transforms_factory import create_transform
26
+
27
+ >>> config = resolve_data_config({}, model=model)
28
+ >>> transform = create_transform(**config)
29
+
30
+ >>> url, filename = ("https://github.com/pytorch/hub/raw/master/images/dog.jpg", "dog.jpg")
31
+ >>> urllib.request.urlretrieve(url, filename)
32
+ >>> img = Image.open(filename).convert('RGB')
33
+ >>> tensor = transform(img).unsqueeze(0) # transform and add batch dimension
34
+ ```
35
+
36
+ To get the model predictions:
37
+
38
+ ```py
39
+ >>> import torch
40
+ >>> with torch.no_grad():
41
+ ... out = model(tensor)
42
+ >>> probabilities = torch.nn.functional.softmax(out[0], dim=0)
43
+ >>> print(probabilities.shape)
44
+ >>> # prints: torch.Size([1000])
45
+ ```
46
+
47
+ To get the top-5 predictions class names:
48
+
49
+ ```py
50
+ >>> # Get imagenet class mappings
51
+ >>> url, filename = ("https://raw.githubusercontent.com/pytorch/hub/master/imagenet_classes.txt", "imagenet_classes.txt")
52
+ >>> urllib.request.urlretrieve(url, filename)
53
+ >>> with open("imagenet_classes.txt", "r") as f:
54
+ ... categories = [s.strip() for s in f.readlines()]
55
+
56
+ >>> # Print top categories per image
57
+ >>> top5_prob, top5_catid = torch.topk(probabilities, 5)
58
+ >>> for i in range(top5_prob.size(0)):
59
+ ... print(categories[top5_catid[i]], top5_prob[i].item())
60
+ >>> # prints class names and probabilities like:
61
+ >>> # [('Samoyed', 0.6425196528434753), ('Pomeranian', 0.04062102362513542), ('keeshond', 0.03186424449086189), ('white wolf', 0.01739676296710968), ('Eskimo dog', 0.011717947199940681)]
62
+ ```
63
+
64
+ Replace the model name with the variant you want to use, e.g. `regnetx_002`. You can find the IDs in the model summaries at the top of this page.
65
+
66
+ To extract image features with this model, follow the [timm feature extraction examples](../feature_extraction), just change the name of the model you want to use.
67
+
68
+ ## How do I finetune this model?
69
+
70
+ You can finetune any of the pre-trained models just by changing the classifier (the last layer).
71
+
72
+ ```py
73
+ >>> model = timm.create_model('regnetx_002', pretrained=True, num_classes=NUM_FINETUNE_CLASSES)
74
+ ```
75
+ To finetune on your own dataset, you have to write a training loop or adapt [timm's training
76
+ script](https://github.com/rwightman/pytorch-image-models/blob/master/train.py) to use your dataset.
77
+
78
+ ## How do I train this model?
79
+
80
+ You can follow the [timm recipe scripts](../scripts) for training a new model afresh.
81
+
82
+ ## Citation
83
+
84
+ ```BibTeX
85
+ @misc{radosavovic2020designing,
86
+ title={Designing Network Design Spaces},
87
+ author={Ilija Radosavovic and Raj Prateek Kosaraju and Ross Girshick and Kaiming He and Piotr Dollár},
88
+ year={2020},
89
+ eprint={2003.13678},
90
+ archivePrefix={arXiv},
91
+ primaryClass={cs.CV}
92
+ }
93
+ ```
94
+
95
+ <!--
96
+ Type: model-index
97
+ Collections:
98
+ - Name: RegNetX
99
+ Paper:
100
+ Title: Designing Network Design Spaces
101
+ URL: https://paperswithcode.com/paper/designing-network-design-spaces
102
+ Models:
103
+ - Name: regnetx_002
104
+ In Collection: RegNetX
105
+ Metadata:
106
+ FLOPs: 255276032
107
+ Parameters: 2680000
108
+ File Size: 10862199
109
+ Architecture:
110
+ - 1x1 Convolution
111
+ - Batch Normalization
112
+ - Convolution
113
+ - Dense Connections
114
+ - Global Average Pooling
115
+ - Grouped Convolution
116
+ - ReLU
117
+ Tasks:
118
+ - Image Classification
119
+ Training Techniques:
120
+ - SGD with Momentum
121
+ - Weight Decay
122
+ Training Data:
123
+ - ImageNet
124
+ Training Resources: 8x NVIDIA V100 GPUs
125
+ ID: regnetx_002
126
+ Epochs: 100
127
+ Crop Pct: '0.875'
128
+ Momentum: 0.9
129
+ Batch Size: 1024
130
+ Image Size: '224'
131
+ Weight Decay: 5.0e-05
132
+ Interpolation: bicubic
133
+ Code: https://github.com/rwightman/pytorch-image-models/blob/d8e69206be253892b2956341fea09fdebfaae4e3/timm/models/regnet.py#L337
134
+ Weights: https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-regnet/regnetx_002-e7e85e5c.pth
135
+ Results:
136
+ - Task: Image Classification
137
+ Dataset: ImageNet
138
+ Metrics:
139
+ Top 1 Accuracy: 68.75%
140
+ Top 5 Accuracy: 88.56%
141
+ - Name: regnetx_004
142
+ In Collection: RegNetX
143
+ Metadata:
144
+ FLOPs: 510619136
145
+ Parameters: 5160000
146
+ File Size: 20841309
147
+ Architecture:
148
+ - 1x1 Convolution
149
+ - Batch Normalization
150
+ - Convolution
151
+ - Dense Connections
152
+ - Global Average Pooling
153
+ - Grouped Convolution
154
+ - ReLU
155
+ Tasks:
156
+ - Image Classification
157
+ Training Techniques:
158
+ - SGD with Momentum
159
+ - Weight Decay
160
+ Training Data:
161
+ - ImageNet
162
+ Training Resources: 8x NVIDIA V100 GPUs
163
+ ID: regnetx_004
164
+ Epochs: 100
165
+ Crop Pct: '0.875'
166
+ Momentum: 0.9
167
+ Batch Size: 1024
168
+ Image Size: '224'
169
+ Weight Decay: 5.0e-05
170
+ Interpolation: bicubic
171
+ Code: https://github.com/rwightman/pytorch-image-models/blob/d8e69206be253892b2956341fea09fdebfaae4e3/timm/models/regnet.py#L343
172
+ Weights: https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-regnet/regnetx_004-7d0e9424.pth
173
+ Results:
174
+ - Task: Image Classification
175
+ Dataset: ImageNet
176
+ Metrics:
177
+ Top 1 Accuracy: 72.39%
178
+ Top 5 Accuracy: 90.82%
179
+ - Name: regnetx_006
180
+ In Collection: RegNetX
181
+ Metadata:
182
+ FLOPs: 771659136
183
+ Parameters: 6200000
184
+ File Size: 24965172
185
+ Architecture:
186
+ - 1x1 Convolution
187
+ - Batch Normalization
188
+ - Convolution
189
+ - Dense Connections
190
+ - Global Average Pooling
191
+ - Grouped Convolution
192
+ - ReLU
193
+ Tasks:
194
+ - Image Classification
195
+ Training Techniques:
196
+ - SGD with Momentum
197
+ - Weight Decay
198
+ Training Data:
199
+ - ImageNet
200
+ Training Resources: 8x NVIDIA V100 GPUs
201
+ ID: regnetx_006
202
+ Epochs: 100
203
+ Crop Pct: '0.875'
204
+ Momentum: 0.9
205
+ Batch Size: 1024
206
+ Image Size: '224'
207
+ Weight Decay: 5.0e-05
208
+ Interpolation: bicubic
209
+ Code: https://github.com/rwightman/pytorch-image-models/blob/d8e69206be253892b2956341fea09fdebfaae4e3/timm/models/regnet.py#L349
210
+ Weights: https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-regnet/regnetx_006-85ec1baa.pth
211
+ Results:
212
+ - Task: Image Classification
213
+ Dataset: ImageNet
214
+ Metrics:
215
+ Top 1 Accuracy: 73.84%
216
+ Top 5 Accuracy: 91.68%
217
+ - Name: regnetx_008
218
+ In Collection: RegNetX
219
+ Metadata:
220
+ FLOPs: 1027038208
221
+ Parameters: 7260000
222
+ File Size: 29235944
223
+ Architecture:
224
+ - 1x1 Convolution
225
+ - Batch Normalization
226
+ - Convolution
227
+ - Dense Connections
228
+ - Global Average Pooling
229
+ - Grouped Convolution
230
+ - ReLU
231
+ Tasks:
232
+ - Image Classification
233
+ Training Techniques:
234
+ - SGD with Momentum
235
+ - Weight Decay
236
+ Training Data:
237
+ - ImageNet
238
+ Training Resources: 8x NVIDIA V100 GPUs
239
+ ID: regnetx_008
240
+ Epochs: 100
241
+ Crop Pct: '0.875'
242
+ Momentum: 0.9
243
+ Batch Size: 1024
244
+ Image Size: '224'
245
+ Weight Decay: 5.0e-05
246
+ Interpolation: bicubic
247
+ Code: https://github.com/rwightman/pytorch-image-models/blob/d8e69206be253892b2956341fea09fdebfaae4e3/timm/models/regnet.py#L355
248
+ Weights: https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-regnet/regnetx_008-d8b470eb.pth
249
+ Results:
250
+ - Task: Image Classification
251
+ Dataset: ImageNet
252
+ Metrics:
253
+ Top 1 Accuracy: 75.05%
254
+ Top 5 Accuracy: 92.34%
255
+ - Name: regnetx_016
256
+ In Collection: RegNetX
257
+ Metadata:
258
+ FLOPs: 2059337856
259
+ Parameters: 9190000
260
+ File Size: 36988158
261
+ Architecture:
262
+ - 1x1 Convolution
263
+ - Batch Normalization
264
+ - Convolution
265
+ - Dense Connections
266
+ - Global Average Pooling
267
+ - Grouped Convolution
268
+ - ReLU
269
+ Tasks:
270
+ - Image Classification
271
+ Training Techniques:
272
+ - SGD with Momentum
273
+ - Weight Decay
274
+ Training Data:
275
+ - ImageNet
276
+ Training Resources: 8x NVIDIA V100 GPUs
277
+ ID: regnetx_016
278
+ Epochs: 100
279
+ Crop Pct: '0.875'
280
+ Momentum: 0.9
281
+ Batch Size: 1024
282
+ Image Size: '224'
283
+ Weight Decay: 5.0e-05
284
+ Interpolation: bicubic
285
+ Code: https://github.com/rwightman/pytorch-image-models/blob/d8e69206be253892b2956341fea09fdebfaae4e3/timm/models/regnet.py#L361
286
+ Weights: https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-regnet/regnetx_016-65ca972a.pth
287
+ Results:
288
+ - Task: Image Classification
289
+ Dataset: ImageNet
290
+ Metrics:
291
+ Top 1 Accuracy: 76.95%
292
+ Top 5 Accuracy: 93.43%
293
+ - Name: regnetx_032
294
+ In Collection: RegNetX
295
+ Metadata:
296
+ FLOPs: 4082555904
297
+ Parameters: 15300000
298
+ File Size: 61509573
299
+ Architecture:
300
+ - 1x1 Convolution
301
+ - Batch Normalization
302
+ - Convolution
303
+ - Dense Connections
304
+ - Global Average Pooling
305
+ - Grouped Convolution
306
+ - ReLU
307
+ Tasks:
308
+ - Image Classification
309
+ Training Techniques:
310
+ - SGD with Momentum
311
+ - Weight Decay
312
+ Training Data:
313
+ - ImageNet
314
+ Training Resources: 8x NVIDIA V100 GPUs
315
+ ID: regnetx_032
316
+ Epochs: 100
317
+ Crop Pct: '0.875'
318
+ Momentum: 0.9
319
+ Batch Size: 512
320
+ Image Size: '224'
321
+ Weight Decay: 5.0e-05
322
+ Interpolation: bicubic
323
+ Code: https://github.com/rwightman/pytorch-image-models/blob/d8e69206be253892b2956341fea09fdebfaae4e3/timm/models/regnet.py#L367
324
+ Weights: https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-regnet/regnetx_032-ed0c7f7e.pth
325
+ Results:
326
+ - Task: Image Classification
327
+ Dataset: ImageNet
328
+ Metrics:
329
+ Top 1 Accuracy: 78.15%
330
+ Top 5 Accuracy: 94.09%
331
+ - Name: regnetx_040
332
+ In Collection: RegNetX
333
+ Metadata:
334
+ FLOPs: 5095167744
335
+ Parameters: 22120000
336
+ File Size: 88844824
337
+ Architecture:
338
+ - 1x1 Convolution
339
+ - Batch Normalization
340
+ - Convolution
341
+ - Dense Connections
342
+ - Global Average Pooling
343
+ - Grouped Convolution
344
+ - ReLU
345
+ Tasks:
346
+ - Image Classification
347
+ Training Techniques:
348
+ - SGD with Momentum
349
+ - Weight Decay
350
+ Training Data:
351
+ - ImageNet
352
+ Training Resources: 8x NVIDIA V100 GPUs
353
+ ID: regnetx_040
354
+ Epochs: 100
355
+ Crop Pct: '0.875'
356
+ Momentum: 0.9
357
+ Batch Size: 512
358
+ Image Size: '224'
359
+ Weight Decay: 5.0e-05
360
+ Interpolation: bicubic
361
+ Code: https://github.com/rwightman/pytorch-image-models/blob/d8e69206be253892b2956341fea09fdebfaae4e3/timm/models/regnet.py#L373
362
+ Weights: https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-regnet/regnetx_040-73c2a654.pth
363
+ Results:
364
+ - Task: Image Classification
365
+ Dataset: ImageNet
366
+ Metrics:
367
+ Top 1 Accuracy: 78.48%
368
+ Top 5 Accuracy: 94.25%
369
+ - Name: regnetx_064
370
+ In Collection: RegNetX
371
+ Metadata:
372
+ FLOPs: 8303405824
373
+ Parameters: 26210000
374
+ File Size: 105184854
375
+ Architecture:
376
+ - 1x1 Convolution
377
+ - Batch Normalization
378
+ - Convolution
379
+ - Dense Connections
380
+ - Global Average Pooling
381
+ - Grouped Convolution
382
+ - ReLU
383
+ Tasks:
384
+ - Image Classification
385
+ Training Techniques:
386
+ - SGD with Momentum
387
+ - Weight Decay
388
+ Training Data:
389
+ - ImageNet
390
+ Training Resources: 8x NVIDIA V100 GPUs
391
+ ID: regnetx_064
392
+ Epochs: 100
393
+ Crop Pct: '0.875'
394
+ Momentum: 0.9
395
+ Batch Size: 512
396
+ Image Size: '224'
397
+ Weight Decay: 5.0e-05
398
+ Interpolation: bicubic
399
+ Code: https://github.com/rwightman/pytorch-image-models/blob/d8e69206be253892b2956341fea09fdebfaae4e3/timm/models/regnet.py#L379
400
+ Weights: https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-regnet/regnetx_064-29278baa.pth
401
+ Results:
402
+ - Task: Image Classification
403
+ Dataset: ImageNet
404
+ Metrics:
405
+ Top 1 Accuracy: 79.06%
406
+ Top 5 Accuracy: 94.47%
407
+ - Name: regnetx_080
408
+ In Collection: RegNetX
409
+ Metadata:
410
+ FLOPs: 10276726784
411
+ Parameters: 39570000
412
+ File Size: 158720042
413
+ Architecture:
414
+ - 1x1 Convolution
415
+ - Batch Normalization
416
+ - Convolution
417
+ - Dense Connections
418
+ - Global Average Pooling
419
+ - Grouped Convolution
420
+ - ReLU
421
+ Tasks:
422
+ - Image Classification
423
+ Training Techniques:
424
+ - SGD with Momentum
425
+ - Weight Decay
426
+ Training Data:
427
+ - ImageNet
428
+ Training Resources: 8x NVIDIA V100 GPUs
429
+ ID: regnetx_080
430
+ Epochs: 100
431
+ Crop Pct: '0.875'
432
+ Momentum: 0.9
433
+ Batch Size: 512
434
+ Image Size: '224'
435
+ Weight Decay: 5.0e-05
436
+ Interpolation: bicubic
437
+ Code: https://github.com/rwightman/pytorch-image-models/blob/d8e69206be253892b2956341fea09fdebfaae4e3/timm/models/regnet.py#L385
438
+ Weights: https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-regnet/regnetx_080-7c7fcab1.pth
439
+ Results:
440
+ - Task: Image Classification
441
+ Dataset: ImageNet
442
+ Metrics:
443
+ Top 1 Accuracy: 79.21%
444
+ Top 5 Accuracy: 94.55%
445
+ - Name: regnetx_120
446
+ In Collection: RegNetX
447
+ Metadata:
448
+ FLOPs: 15536378368
449
+ Parameters: 46110000
450
+ File Size: 184866342
451
+ Architecture:
452
+ - 1x1 Convolution
453
+ - Batch Normalization
454
+ - Convolution
455
+ - Dense Connections
456
+ - Global Average Pooling
457
+ - Grouped Convolution
458
+ - ReLU
459
+ Tasks:
460
+ - Image Classification
461
+ Training Techniques:
462
+ - SGD with Momentum
463
+ - Weight Decay
464
+ Training Data:
465
+ - ImageNet
466
+ Training Resources: 8x NVIDIA V100 GPUs
467
+ ID: regnetx_120
468
+ Epochs: 100
469
+ Crop Pct: '0.875'
470
+ Momentum: 0.9
471
+ Batch Size: 512
472
+ Image Size: '224'
473
+ Weight Decay: 5.0e-05
474
+ Interpolation: bicubic
475
+ Code: https://github.com/rwightman/pytorch-image-models/blob/d8e69206be253892b2956341fea09fdebfaae4e3/timm/models/regnet.py#L391
476
+ Weights: https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-regnet/regnetx_120-65d5521e.pth
477
+ Results:
478
+ - Task: Image Classification
479
+ Dataset: ImageNet
480
+ Metrics:
481
+ Top 1 Accuracy: 79.61%
482
+ Top 5 Accuracy: 94.73%
483
+ - Name: regnetx_160
484
+ In Collection: RegNetX
485
+ Metadata:
486
+ FLOPs: 20491740672
487
+ Parameters: 54280000
488
+ File Size: 217623862
489
+ Architecture:
490
+ - 1x1 Convolution
491
+ - Batch Normalization
492
+ - Convolution
493
+ - Dense Connections
494
+ - Global Average Pooling
495
+ - Grouped Convolution
496
+ - ReLU
497
+ Tasks:
498
+ - Image Classification
499
+ Training Techniques:
500
+ - SGD with Momentum
501
+ - Weight Decay
502
+ Training Data:
503
+ - ImageNet
504
+ Training Resources: 8x NVIDIA V100 GPUs
505
+ ID: regnetx_160
506
+ Epochs: 100
507
+ Crop Pct: '0.875'
508
+ Momentum: 0.9
509
+ Batch Size: 512
510
+ Image Size: '224'
511
+ Weight Decay: 5.0e-05
512
+ Interpolation: bicubic
513
+ Code: https://github.com/rwightman/pytorch-image-models/blob/d8e69206be253892b2956341fea09fdebfaae4e3/timm/models/regnet.py#L397
514
+ Weights: https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-regnet/regnetx_160-c98c4112.pth
515
+ Results:
516
+ - Task: Image Classification
517
+ Dataset: ImageNet
518
+ Metrics:
519
+ Top 1 Accuracy: 79.84%
520
+ Top 5 Accuracy: 94.82%
521
+ - Name: regnetx_320
522
+ In Collection: RegNetX
523
+ Metadata:
524
+ FLOPs: 40798958592
525
+ Parameters: 107810000
526
+ File Size: 431962133
527
+ Architecture:
528
+ - 1x1 Convolution
529
+ - Batch Normalization
530
+ - Convolution
531
+ - Dense Connections
532
+ - Global Average Pooling
533
+ - Grouped Convolution
534
+ - ReLU
535
+ Tasks:
536
+ - Image Classification
537
+ Training Techniques:
538
+ - SGD with Momentum
539
+ - Weight Decay
540
+ Training Data:
541
+ - ImageNet
542
+ Training Resources: 8x NVIDIA V100 GPUs
543
+ ID: regnetx_320
544
+ Epochs: 100
545
+ Crop Pct: '0.875'
546
+ Momentum: 0.9
547
+ Batch Size: 256
548
+ Image Size: '224'
549
+ Weight Decay: 5.0e-05
550
+ Interpolation: bicubic
551
+ Code: https://github.com/rwightman/pytorch-image-models/blob/d8e69206be253892b2956341fea09fdebfaae4e3/timm/models/regnet.py#L403
552
+ Weights: https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-regnet/regnetx_320-8ea38b93.pth
553
+ Results:
554
+ - Task: Image Classification
555
+ Dataset: ImageNet
556
+ Metrics:
557
+ Top 1 Accuracy: 80.25%
558
+ Top 5 Accuracy: 95.03%
559
+ -->
pytorch-image-models/hfdocs/source/models/regnety.mdx ADDED
@@ -0,0 +1,573 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # RegNetY
2
+
3
+ **RegNetY** is a convolutional network design space with simple, regular models with parameters: depth \\( d \\), initial width \\( w\_{0} > 0 \\), and slope \\( w\_{a} > 0 \\), and generates a different block width \\( u\_{j} \\) for each block \\( j < d \\). The key restriction for the RegNet types of model is that there is a linear parameterisation of block widths (the design space only contains models with this linear structure):
4
+
5
+ \\( \\) u\_{j} = w\_{0} + w\_{a}\cdot{j} \\( \\)
6
+
7
+ For **RegNetX** authors have additional restrictions: we set \\( b = 1 \\) (the bottleneck ratio), \\( 12 \leq d \leq 28 \\), and \\( w\_{m} \geq 2 \\) (the width multiplier).
8
+
9
+ For **RegNetY** authors make one change, which is to include [Squeeze-and-Excitation blocks](https://paperswithcode.com/method/squeeze-and-excitation-block).
10
+
11
+ ## How do I use this model on an image?
12
+
13
+ To load a pretrained model:
14
+
15
+ ```py
16
+ >>> import timm
17
+ >>> model = timm.create_model('regnety_002', pretrained=True)
18
+ >>> model.eval()
19
+ ```
20
+
21
+ To load and preprocess the image:
22
+
23
+ ```py
24
+ >>> import urllib
25
+ >>> from PIL import Image
26
+ >>> from timm.data import resolve_data_config
27
+ >>> from timm.data.transforms_factory import create_transform
28
+
29
+ >>> config = resolve_data_config({}, model=model)
30
+ >>> transform = create_transform(**config)
31
+
32
+ >>> url, filename = ("https://github.com/pytorch/hub/raw/master/images/dog.jpg", "dog.jpg")
33
+ >>> urllib.request.urlretrieve(url, filename)
34
+ >>> img = Image.open(filename).convert('RGB')
35
+ >>> tensor = transform(img).unsqueeze(0) # transform and add batch dimension
36
+ ```
37
+
38
+ To get the model predictions:
39
+
40
+ ```py
41
+ >>> import torch
42
+ >>> with torch.no_grad():
43
+ ... out = model(tensor)
44
+ >>> probabilities = torch.nn.functional.softmax(out[0], dim=0)
45
+ >>> print(probabilities.shape)
46
+ >>> # prints: torch.Size([1000])
47
+ ```
48
+
49
+ To get the top-5 predictions class names:
50
+
51
+ ```py
52
+ >>> # Get imagenet class mappings
53
+ >>> url, filename = ("https://raw.githubusercontent.com/pytorch/hub/master/imagenet_classes.txt", "imagenet_classes.txt")
54
+ >>> urllib.request.urlretrieve(url, filename)
55
+ >>> with open("imagenet_classes.txt", "r") as f:
56
+ ... categories = [s.strip() for s in f.readlines()]
57
+
58
+ >>> # Print top categories per image
59
+ >>> top5_prob, top5_catid = torch.topk(probabilities, 5)
60
+ >>> for i in range(top5_prob.size(0)):
61
+ ... print(categories[top5_catid[i]], top5_prob[i].item())
62
+ >>> # prints class names and probabilities like:
63
+ >>> # [('Samoyed', 0.6425196528434753), ('Pomeranian', 0.04062102362513542), ('keeshond', 0.03186424449086189), ('white wolf', 0.01739676296710968), ('Eskimo dog', 0.011717947199940681)]
64
+ ```
65
+
66
+ Replace the model name with the variant you want to use, e.g. `regnety_002`. You can find the IDs in the model summaries at the top of this page.
67
+
68
+ To extract image features with this model, follow the [timm feature extraction examples](../feature_extraction), just change the name of the model you want to use.
69
+
70
+ ## How do I finetune this model?
71
+
72
+ You can finetune any of the pre-trained models just by changing the classifier (the last layer).
73
+
74
+ ```py
75
+ >>> model = timm.create_model('regnety_002', pretrained=True, num_classes=NUM_FINETUNE_CLASSES)
76
+ ```
77
+ To finetune on your own dataset, you have to write a training loop or adapt [timm's training
78
+ script](https://github.com/rwightman/pytorch-image-models/blob/master/train.py) to use your dataset.
79
+
80
+ ## How do I train this model?
81
+
82
+ You can follow the [timm recipe scripts](../scripts) for training a new model afresh.
83
+
84
+ ## Citation
85
+
86
+ ```BibTeX
87
+ @misc{radosavovic2020designing,
88
+ title={Designing Network Design Spaces},
89
+ author={Ilija Radosavovic and Raj Prateek Kosaraju and Ross Girshick and Kaiming He and Piotr Dollár},
90
+ year={2020},
91
+ eprint={2003.13678},
92
+ archivePrefix={arXiv},
93
+ primaryClass={cs.CV}
94
+ }
95
+ ```
96
+
97
+ <!--
98
+ Type: model-index
99
+ Collections:
100
+ - Name: RegNetY
101
+ Paper:
102
+ Title: Designing Network Design Spaces
103
+ URL: https://paperswithcode.com/paper/designing-network-design-spaces
104
+ Models:
105
+ - Name: regnety_002
106
+ In Collection: RegNetY
107
+ Metadata:
108
+ FLOPs: 255754236
109
+ Parameters: 3160000
110
+ File Size: 12782926
111
+ Architecture:
112
+ - 1x1 Convolution
113
+ - Batch Normalization
114
+ - Convolution
115
+ - Dense Connections
116
+ - Global Average Pooling
117
+ - Grouped Convolution
118
+ - ReLU
119
+ - Squeeze-and-Excitation Block
120
+ Tasks:
121
+ - Image Classification
122
+ Training Techniques:
123
+ - SGD with Momentum
124
+ - Weight Decay
125
+ Training Data:
126
+ - ImageNet
127
+ Training Resources: 8x NVIDIA V100 GPUs
128
+ ID: regnety_002
129
+ Epochs: 100
130
+ Crop Pct: '0.875'
131
+ Momentum: 0.9
132
+ Batch Size: 1024
133
+ Image Size: '224'
134
+ Weight Decay: 5.0e-05
135
+ Interpolation: bicubic
136
+ Code: https://github.com/rwightman/pytorch-image-models/blob/d8e69206be253892b2956341fea09fdebfaae4e3/timm/models/regnet.py#L409
137
+ Weights: https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-regnet/regnety_002-e68ca334.pth
138
+ Results:
139
+ - Task: Image Classification
140
+ Dataset: ImageNet
141
+ Metrics:
142
+ Top 1 Accuracy: 70.28%
143
+ Top 5 Accuracy: 89.55%
144
+ - Name: regnety_004
145
+ In Collection: RegNetY
146
+ Metadata:
147
+ FLOPs: 515664568
148
+ Parameters: 4340000
149
+ File Size: 17542753
150
+ Architecture:
151
+ - 1x1 Convolution
152
+ - Batch Normalization
153
+ - Convolution
154
+ - Dense Connections
155
+ - Global Average Pooling
156
+ - Grouped Convolution
157
+ - ReLU
158
+ - Squeeze-and-Excitation Block
159
+ Tasks:
160
+ - Image Classification
161
+ Training Techniques:
162
+ - SGD with Momentum
163
+ - Weight Decay
164
+ Training Data:
165
+ - ImageNet
166
+ Training Resources: 8x NVIDIA V100 GPUs
167
+ ID: regnety_004
168
+ Epochs: 100
169
+ Crop Pct: '0.875'
170
+ Momentum: 0.9
171
+ Batch Size: 1024
172
+ Image Size: '224'
173
+ Weight Decay: 5.0e-05
174
+ Interpolation: bicubic
175
+ Code: https://github.com/rwightman/pytorch-image-models/blob/d8e69206be253892b2956341fea09fdebfaae4e3/timm/models/regnet.py#L415
176
+ Weights: https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-regnet/regnety_004-0db870e6.pth
177
+ Results:
178
+ - Task: Image Classification
179
+ Dataset: ImageNet
180
+ Metrics:
181
+ Top 1 Accuracy: 74.02%
182
+ Top 5 Accuracy: 91.76%
183
+ - Name: regnety_006
184
+ In Collection: RegNetY
185
+ Metadata:
186
+ FLOPs: 771746928
187
+ Parameters: 6060000
188
+ File Size: 24394127
189
+ Architecture:
190
+ - 1x1 Convolution
191
+ - Batch Normalization
192
+ - Convolution
193
+ - Dense Connections
194
+ - Global Average Pooling
195
+ - Grouped Convolution
196
+ - ReLU
197
+ - Squeeze-and-Excitation Block
198
+ Tasks:
199
+ - Image Classification
200
+ Training Techniques:
201
+ - SGD with Momentum
202
+ - Weight Decay
203
+ Training Data:
204
+ - ImageNet
205
+ Training Resources: 8x NVIDIA V100 GPUs
206
+ ID: regnety_006
207
+ Epochs: 100
208
+ Crop Pct: '0.875'
209
+ Momentum: 0.9
210
+ Batch Size: 1024
211
+ Image Size: '224'
212
+ Weight Decay: 5.0e-05
213
+ Interpolation: bicubic
214
+ Code: https://github.com/rwightman/pytorch-image-models/blob/d8e69206be253892b2956341fea09fdebfaae4e3/timm/models/regnet.py#L421
215
+ Weights: https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-regnet/regnety_006-c67e57ec.pth
216
+ Results:
217
+ - Task: Image Classification
218
+ Dataset: ImageNet
219
+ Metrics:
220
+ Top 1 Accuracy: 75.27%
221
+ Top 5 Accuracy: 92.53%
222
+ - Name: regnety_008
223
+ In Collection: RegNetY
224
+ Metadata:
225
+ FLOPs: 1023448952
226
+ Parameters: 6260000
227
+ File Size: 25223268
228
+ Architecture:
229
+ - 1x1 Convolution
230
+ - Batch Normalization
231
+ - Convolution
232
+ - Dense Connections
233
+ - Global Average Pooling
234
+ - Grouped Convolution
235
+ - ReLU
236
+ - Squeeze-and-Excitation Block
237
+ Tasks:
238
+ - Image Classification
239
+ Training Techniques:
240
+ - SGD with Momentum
241
+ - Weight Decay
242
+ Training Data:
243
+ - ImageNet
244
+ Training Resources: 8x NVIDIA V100 GPUs
245
+ ID: regnety_008
246
+ Epochs: 100
247
+ Crop Pct: '0.875'
248
+ Momentum: 0.9
249
+ Batch Size: 1024
250
+ Image Size: '224'
251
+ Weight Decay: 5.0e-05
252
+ Interpolation: bicubic
253
+ Code: https://github.com/rwightman/pytorch-image-models/blob/d8e69206be253892b2956341fea09fdebfaae4e3/timm/models/regnet.py#L427
254
+ Weights: https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-regnet/regnety_008-dc900dbe.pth
255
+ Results:
256
+ - Task: Image Classification
257
+ Dataset: ImageNet
258
+ Metrics:
259
+ Top 1 Accuracy: 76.32%
260
+ Top 5 Accuracy: 93.07%
261
+ - Name: regnety_016
262
+ In Collection: RegNetY
263
+ Metadata:
264
+ FLOPs: 2070895094
265
+ Parameters: 11200000
266
+ File Size: 45115589
267
+ Architecture:
268
+ - 1x1 Convolution
269
+ - Batch Normalization
270
+ - Convolution
271
+ - Dense Connections
272
+ - Global Average Pooling
273
+ - Grouped Convolution
274
+ - ReLU
275
+ - Squeeze-and-Excitation Block
276
+ Tasks:
277
+ - Image Classification
278
+ Training Techniques:
279
+ - SGD with Momentum
280
+ - Weight Decay
281
+ Training Data:
282
+ - ImageNet
283
+ Training Resources: 8x NVIDIA V100 GPUs
284
+ ID: regnety_016
285
+ Epochs: 100
286
+ Crop Pct: '0.875'
287
+ Momentum: 0.9
288
+ Batch Size: 1024
289
+ Image Size: '224'
290
+ Weight Decay: 5.0e-05
291
+ Interpolation: bicubic
292
+ Code: https://github.com/rwightman/pytorch-image-models/blob/d8e69206be253892b2956341fea09fdebfaae4e3/timm/models/regnet.py#L433
293
+ Weights: https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-regnet/regnety_016-54367f74.pth
294
+ Results:
295
+ - Task: Image Classification
296
+ Dataset: ImageNet
297
+ Metrics:
298
+ Top 1 Accuracy: 77.87%
299
+ Top 5 Accuracy: 93.73%
300
+ - Name: regnety_032
301
+ In Collection: RegNetY
302
+ Metadata:
303
+ FLOPs: 4081118714
304
+ Parameters: 19440000
305
+ File Size: 78084523
306
+ Architecture:
307
+ - 1x1 Convolution
308
+ - Batch Normalization
309
+ - Convolution
310
+ - Dense Connections
311
+ - Global Average Pooling
312
+ - Grouped Convolution
313
+ - ReLU
314
+ - Squeeze-and-Excitation Block
315
+ Tasks:
316
+ - Image Classification
317
+ Training Techniques:
318
+ - SGD with Momentum
319
+ - Weight Decay
320
+ Training Data:
321
+ - ImageNet
322
+ Training Resources: 8x NVIDIA V100 GPUs
323
+ ID: regnety_032
324
+ Epochs: 100
325
+ Crop Pct: '0.875'
326
+ Momentum: 0.9
327
+ Batch Size: 512
328
+ Image Size: '224'
329
+ Weight Decay: 5.0e-05
330
+ Interpolation: bicubic
331
+ Code: https://github.com/rwightman/pytorch-image-models/blob/d8e69206be253892b2956341fea09fdebfaae4e3/timm/models/regnet.py#L439
332
+ Weights: https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/regnety_032_ra-7f2439f9.pth
333
+ Results:
334
+ - Task: Image Classification
335
+ Dataset: ImageNet
336
+ Metrics:
337
+ Top 1 Accuracy: 82.01%
338
+ Top 5 Accuracy: 95.91%
339
+ - Name: regnety_040
340
+ In Collection: RegNetY
341
+ Metadata:
342
+ FLOPs: 5105933432
343
+ Parameters: 20650000
344
+ File Size: 82913909
345
+ Architecture:
346
+ - 1x1 Convolution
347
+ - Batch Normalization
348
+ - Convolution
349
+ - Dense Connections
350
+ - Global Average Pooling
351
+ - Grouped Convolution
352
+ - ReLU
353
+ - Squeeze-and-Excitation Block
354
+ Tasks:
355
+ - Image Classification
356
+ Training Techniques:
357
+ - SGD with Momentum
358
+ - Weight Decay
359
+ Training Data:
360
+ - ImageNet
361
+ Training Resources: 8x NVIDIA V100 GPUs
362
+ ID: regnety_040
363
+ Epochs: 100
364
+ Crop Pct: '0.875'
365
+ Momentum: 0.9
366
+ Batch Size: 512
367
+ Image Size: '224'
368
+ Weight Decay: 5.0e-05
369
+ Interpolation: bicubic
370
+ Code: https://github.com/rwightman/pytorch-image-models/blob/d8e69206be253892b2956341fea09fdebfaae4e3/timm/models/regnet.py#L445
371
+ Weights: https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-regnet/regnety_040-f0d569f9.pth
372
+ Results:
373
+ - Task: Image Classification
374
+ Dataset: ImageNet
375
+ Metrics:
376
+ Top 1 Accuracy: 79.23%
377
+ Top 5 Accuracy: 94.64%
378
+ - Name: regnety_064
379
+ In Collection: RegNetY
380
+ Metadata:
381
+ FLOPs: 8167730444
382
+ Parameters: 30580000
383
+ File Size: 122751416
384
+ Architecture:
385
+ - 1x1 Convolution
386
+ - Batch Normalization
387
+ - Convolution
388
+ - Dense Connections
389
+ - Global Average Pooling
390
+ - Grouped Convolution
391
+ - ReLU
392
+ - Squeeze-and-Excitation Block
393
+ Tasks:
394
+ - Image Classification
395
+ Training Techniques:
396
+ - SGD with Momentum
397
+ - Weight Decay
398
+ Training Data:
399
+ - ImageNet
400
+ Training Resources: 8x NVIDIA V100 GPUs
401
+ ID: regnety_064
402
+ Epochs: 100
403
+ Crop Pct: '0.875'
404
+ Momentum: 0.9
405
+ Batch Size: 512
406
+ Image Size: '224'
407
+ Weight Decay: 5.0e-05
408
+ Interpolation: bicubic
409
+ Code: https://github.com/rwightman/pytorch-image-models/blob/d8e69206be253892b2956341fea09fdebfaae4e3/timm/models/regnet.py#L451
410
+ Weights: https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-regnet/regnety_064-0a48325c.pth
411
+ Results:
412
+ - Task: Image Classification
413
+ Dataset: ImageNet
414
+ Metrics:
415
+ Top 1 Accuracy: 79.73%
416
+ Top 5 Accuracy: 94.76%
417
+ - Name: regnety_080
418
+ In Collection: RegNetY
419
+ Metadata:
420
+ FLOPs: 10233621420
421
+ Parameters: 39180000
422
+ File Size: 157124671
423
+ Architecture:
424
+ - 1x1 Convolution
425
+ - Batch Normalization
426
+ - Convolution
427
+ - Dense Connections
428
+ - Global Average Pooling
429
+ - Grouped Convolution
430
+ - ReLU
431
+ - Squeeze-and-Excitation Block
432
+ Tasks:
433
+ - Image Classification
434
+ Training Techniques:
435
+ - SGD with Momentum
436
+ - Weight Decay
437
+ Training Data:
438
+ - ImageNet
439
+ Training Resources: 8x NVIDIA V100 GPUs
440
+ ID: regnety_080
441
+ Epochs: 100
442
+ Crop Pct: '0.875'
443
+ Momentum: 0.9
444
+ Batch Size: 512
445
+ Image Size: '224'
446
+ Weight Decay: 5.0e-05
447
+ Interpolation: bicubic
448
+ Code: https://github.com/rwightman/pytorch-image-models/blob/d8e69206be253892b2956341fea09fdebfaae4e3/timm/models/regnet.py#L457
449
+ Weights: https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-regnet/regnety_080-e7f3eb93.pth
450
+ Results:
451
+ - Task: Image Classification
452
+ Dataset: ImageNet
453
+ Metrics:
454
+ Top 1 Accuracy: 79.87%
455
+ Top 5 Accuracy: 94.83%
456
+ - Name: regnety_120
457
+ In Collection: RegNetY
458
+ Metadata:
459
+ FLOPs: 15542094856
460
+ Parameters: 51820000
461
+ File Size: 207743949
462
+ Architecture:
463
+ - 1x1 Convolution
464
+ - Batch Normalization
465
+ - Convolution
466
+ - Dense Connections
467
+ - Global Average Pooling
468
+ - Grouped Convolution
469
+ - ReLU
470
+ - Squeeze-and-Excitation Block
471
+ Tasks:
472
+ - Image Classification
473
+ Training Techniques:
474
+ - SGD with Momentum
475
+ - Weight Decay
476
+ Training Data:
477
+ - ImageNet
478
+ Training Resources: 8x NVIDIA V100 GPUs
479
+ ID: regnety_120
480
+ Epochs: 100
481
+ Crop Pct: '0.875'
482
+ Momentum: 0.9
483
+ Batch Size: 512
484
+ Image Size: '224'
485
+ Weight Decay: 5.0e-05
486
+ Interpolation: bicubic
487
+ Code: https://github.com/rwightman/pytorch-image-models/blob/d8e69206be253892b2956341fea09fdebfaae4e3/timm/models/regnet.py#L463
488
+ Weights: https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-regnet/regnety_120-721ba79a.pth
489
+ Results:
490
+ - Task: Image Classification
491
+ Dataset: ImageNet
492
+ Metrics:
493
+ Top 1 Accuracy: 80.38%
494
+ Top 5 Accuracy: 95.12%
495
+ - Name: regnety_160
496
+ In Collection: RegNetY
497
+ Metadata:
498
+ FLOPs: 20450196852
499
+ Parameters: 83590000
500
+ File Size: 334916722
501
+ Architecture:
502
+ - 1x1 Convolution
503
+ - Batch Normalization
504
+ - Convolution
505
+ - Dense Connections
506
+ - Global Average Pooling
507
+ - Grouped Convolution
508
+ - ReLU
509
+ - Squeeze-and-Excitation Block
510
+ Tasks:
511
+ - Image Classification
512
+ Training Techniques:
513
+ - SGD with Momentum
514
+ - Weight Decay
515
+ Training Data:
516
+ - ImageNet
517
+ Training Resources: 8x NVIDIA V100 GPUs
518
+ ID: regnety_160
519
+ Epochs: 100
520
+ Crop Pct: '0.875'
521
+ Momentum: 0.9
522
+ Batch Size: 512
523
+ Image Size: '224'
524
+ Weight Decay: 5.0e-05
525
+ Interpolation: bicubic
526
+ Code: https://github.com/rwightman/pytorch-image-models/blob/d8e69206be253892b2956341fea09fdebfaae4e3/timm/models/regnet.py#L469
527
+ Weights: https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-regnet/regnety_160-d64013cd.pth
528
+ Results:
529
+ - Task: Image Classification
530
+ Dataset: ImageNet
531
+ Metrics:
532
+ Top 1 Accuracy: 80.28%
533
+ Top 5 Accuracy: 94.97%
534
+ - Name: regnety_320
535
+ In Collection: RegNetY
536
+ Metadata:
537
+ FLOPs: 41492618394
538
+ Parameters: 145050000
539
+ File Size: 580891965
540
+ Architecture:
541
+ - 1x1 Convolution
542
+ - Batch Normalization
543
+ - Convolution
544
+ - Dense Connections
545
+ - Global Average Pooling
546
+ - Grouped Convolution
547
+ - ReLU
548
+ - Squeeze-and-Excitation Block
549
+ Tasks:
550
+ - Image Classification
551
+ Training Techniques:
552
+ - SGD with Momentum
553
+ - Weight Decay
554
+ Training Data:
555
+ - ImageNet
556
+ Training Resources: 8x NVIDIA V100 GPUs
557
+ ID: regnety_320
558
+ Epochs: 100
559
+ Crop Pct: '0.875'
560
+ Momentum: 0.9
561
+ Batch Size: 256
562
+ Image Size: '224'
563
+ Weight Decay: 5.0e-05
564
+ Interpolation: bicubic
565
+ Code: https://github.com/rwightman/pytorch-image-models/blob/d8e69206be253892b2956341fea09fdebfaae4e3/timm/models/regnet.py#L475
566
+ Weights: https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-regnet/regnety_320-ba464b29.pth
567
+ Results:
568
+ - Task: Image Classification
569
+ Dataset: ImageNet
570
+ Metrics:
571
+ Top 1 Accuracy: 80.8%
572
+ Top 5 Accuracy: 95.25%
573
+ -->
pytorch-image-models/hfdocs/source/models/res2net.mdx ADDED
@@ -0,0 +1,327 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Res2Net
2
+
3
+ **Res2Net** is an image model that employs a variation on bottleneck residual blocks, [Res2Net Blocks](https://paperswithcode.com/method/res2net-block). The motivation is to be able to represent features at multiple scales. This is achieved through a novel building block for CNNs that constructs hierarchical residual-like connections within one single residual block. This represents multi-scale features at a granular level and increases the range of receptive fields for each network layer.
4
+
5
+ ## How do I use this model on an image?
6
+
7
+ To load a pretrained model:
8
+
9
+ ```py
10
+ >>> import timm
11
+ >>> model = timm.create_model('res2net101_26w_4s', pretrained=True)
12
+ >>> model.eval()
13
+ ```
14
+
15
+ To load and preprocess the image:
16
+
17
+ ```py
18
+ >>> import urllib
19
+ >>> from PIL import Image
20
+ >>> from timm.data import resolve_data_config
21
+ >>> from timm.data.transforms_factory import create_transform
22
+
23
+ >>> config = resolve_data_config({}, model=model)
24
+ >>> transform = create_transform(**config)
25
+
26
+ >>> url, filename = ("https://github.com/pytorch/hub/raw/master/images/dog.jpg", "dog.jpg")
27
+ >>> urllib.request.urlretrieve(url, filename)
28
+ >>> img = Image.open(filename).convert('RGB')
29
+ >>> tensor = transform(img).unsqueeze(0) # transform and add batch dimension
30
+ ```
31
+
32
+ To get the model predictions:
33
+
34
+ ```py
35
+ >>> import torch
36
+ >>> with torch.no_grad():
37
+ ... out = model(tensor)
38
+ >>> probabilities = torch.nn.functional.softmax(out[0], dim=0)
39
+ >>> print(probabilities.shape)
40
+ >>> # prints: torch.Size([1000])
41
+ ```
42
+
43
+ To get the top-5 predictions class names:
44
+
45
+ ```py
46
+ >>> # Get imagenet class mappings
47
+ >>> url, filename = ("https://raw.githubusercontent.com/pytorch/hub/master/imagenet_classes.txt", "imagenet_classes.txt")
48
+ >>> urllib.request.urlretrieve(url, filename)
49
+ >>> with open("imagenet_classes.txt", "r") as f:
50
+ ... categories = [s.strip() for s in f.readlines()]
51
+
52
+ >>> # Print top categories per image
53
+ >>> top5_prob, top5_catid = torch.topk(probabilities, 5)
54
+ >>> for i in range(top5_prob.size(0)):
55
+ ... print(categories[top5_catid[i]], top5_prob[i].item())
56
+ >>> # prints class names and probabilities like:
57
+ >>> # [('Samoyed', 0.6425196528434753), ('Pomeranian', 0.04062102362513542), ('keeshond', 0.03186424449086189), ('white wolf', 0.01739676296710968), ('Eskimo dog', 0.011717947199940681)]
58
+ ```
59
+
60
+ Replace the model name with the variant you want to use, e.g. `res2net101_26w_4s`. You can find the IDs in the model summaries at the top of this page.
61
+
62
+ To extract image features with this model, follow the [timm feature extraction examples](../feature_extraction), just change the name of the model you want to use.
63
+
64
+ ## How do I finetune this model?
65
+
66
+ You can finetune any of the pre-trained models just by changing the classifier (the last layer).
67
+
68
+ ```py
69
+ >>> model = timm.create_model('res2net101_26w_4s', pretrained=True, num_classes=NUM_FINETUNE_CLASSES)
70
+ ```
71
+ To finetune on your own dataset, you have to write a training loop or adapt [timm's training
72
+ script](https://github.com/rwightman/pytorch-image-models/blob/master/train.py) to use your dataset.
73
+
74
+ ## How do I train this model?
75
+
76
+ You can follow the [timm recipe scripts](../scripts) for training a new model afresh.
77
+
78
+ ## Citation
79
+
80
+ ```BibTeX
81
+ @article{Gao_2021,
82
+ title={Res2Net: A New Multi-Scale Backbone Architecture},
83
+ volume={43},
84
+ ISSN={1939-3539},
85
+ url={http://dx.doi.org/10.1109/TPAMI.2019.2938758},
86
+ DOI={10.1109/tpami.2019.2938758},
87
+ number={2},
88
+ journal={IEEE Transactions on Pattern Analysis and Machine Intelligence},
89
+ publisher={Institute of Electrical and Electronics Engineers (IEEE)},
90
+ author={Gao, Shang-Hua and Cheng, Ming-Ming and Zhao, Kai and Zhang, Xin-Yu and Yang, Ming-Hsuan and Torr, Philip},
91
+ year={2021},
92
+ month={Feb},
93
+ pages={652–662}
94
+ }
95
+ ```
96
+
97
+ <!--
98
+ Type: model-index
99
+ Collections:
100
+ - Name: Res2Net
101
+ Paper:
102
+ Title: 'Res2Net: A New Multi-scale Backbone Architecture'
103
+ URL: https://paperswithcode.com/paper/res2net-a-new-multi-scale-backbone
104
+ Models:
105
+ - Name: res2net101_26w_4s
106
+ In Collection: Res2Net
107
+ Metadata:
108
+ FLOPs: 10415881200
109
+ Parameters: 45210000
110
+ File Size: 181456059
111
+ Architecture:
112
+ - Batch Normalization
113
+ - Convolution
114
+ - Global Average Pooling
115
+ - ReLU
116
+ - Res2Net Block
117
+ Tasks:
118
+ - Image Classification
119
+ Training Techniques:
120
+ - SGD with Momentum
121
+ - Weight Decay
122
+ Training Data:
123
+ - ImageNet
124
+ Training Resources: 4x Titan Xp GPUs
125
+ ID: res2net101_26w_4s
126
+ LR: 0.1
127
+ Epochs: 100
128
+ Crop Pct: '0.875'
129
+ Momentum: 0.9
130
+ Batch Size: 256
131
+ Image Size: '224'
132
+ Weight Decay: 0.0001
133
+ Interpolation: bilinear
134
+ Code: https://github.com/rwightman/pytorch-image-models/blob/d8e69206be253892b2956341fea09fdebfaae4e3/timm/models/res2net.py#L152
135
+ Weights: https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-res2net/res2net101_26w_4s-02a759a1.pth
136
+ Results:
137
+ - Task: Image Classification
138
+ Dataset: ImageNet
139
+ Metrics:
140
+ Top 1 Accuracy: 79.19%
141
+ Top 5 Accuracy: 94.43%
142
+ - Name: res2net50_14w_8s
143
+ In Collection: Res2Net
144
+ Metadata:
145
+ FLOPs: 5403546768
146
+ Parameters: 25060000
147
+ File Size: 100638543
148
+ Architecture:
149
+ - Batch Normalization
150
+ - Convolution
151
+ - Global Average Pooling
152
+ - ReLU
153
+ - Res2Net Block
154
+ Tasks:
155
+ - Image Classification
156
+ Training Techniques:
157
+ - SGD with Momentum
158
+ - Weight Decay
159
+ Training Data:
160
+ - ImageNet
161
+ Training Resources: 4x Titan Xp GPUs
162
+ ID: res2net50_14w_8s
163
+ LR: 0.1
164
+ Epochs: 100
165
+ Crop Pct: '0.875'
166
+ Momentum: 0.9
167
+ Batch Size: 256
168
+ Image Size: '224'
169
+ Weight Decay: 0.0001
170
+ Interpolation: bilinear
171
+ Code: https://github.com/rwightman/pytorch-image-models/blob/d8e69206be253892b2956341fea09fdebfaae4e3/timm/models/res2net.py#L196
172
+ Weights: https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-res2net/res2net50_14w_8s-6527dddc.pth
173
+ Results:
174
+ - Task: Image Classification
175
+ Dataset: ImageNet
176
+ Metrics:
177
+ Top 1 Accuracy: 78.14%
178
+ Top 5 Accuracy: 93.86%
179
+ - Name: res2net50_26w_4s
180
+ In Collection: Res2Net
181
+ Metadata:
182
+ FLOPs: 5499974064
183
+ Parameters: 25700000
184
+ File Size: 103110087
185
+ Architecture:
186
+ - Batch Normalization
187
+ - Convolution
188
+ - Global Average Pooling
189
+ - ReLU
190
+ - Res2Net Block
191
+ Tasks:
192
+ - Image Classification
193
+ Training Techniques:
194
+ - SGD with Momentum
195
+ - Weight Decay
196
+ Training Data:
197
+ - ImageNet
198
+ Training Resources: 4x Titan Xp GPUs
199
+ ID: res2net50_26w_4s
200
+ LR: 0.1
201
+ Epochs: 100
202
+ Crop Pct: '0.875'
203
+ Momentum: 0.9
204
+ Batch Size: 256
205
+ Image Size: '224'
206
+ Weight Decay: 0.0001
207
+ Interpolation: bilinear
208
+ Code: https://github.com/rwightman/pytorch-image-models/blob/d8e69206be253892b2956341fea09fdebfaae4e3/timm/models/res2net.py#L141
209
+ Weights: https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-res2net/res2net50_26w_4s-06e79181.pth
210
+ Results:
211
+ - Task: Image Classification
212
+ Dataset: ImageNet
213
+ Metrics:
214
+ Top 1 Accuracy: 77.99%
215
+ Top 5 Accuracy: 93.85%
216
+ - Name: res2net50_26w_6s
217
+ In Collection: Res2Net
218
+ Metadata:
219
+ FLOPs: 8130156528
220
+ Parameters: 37050000
221
+ File Size: 148603239
222
+ Architecture:
223
+ - Batch Normalization
224
+ - Convolution
225
+ - Global Average Pooling
226
+ - ReLU
227
+ - Res2Net Block
228
+ Tasks:
229
+ - Image Classification
230
+ Training Techniques:
231
+ - SGD with Momentum
232
+ - Weight Decay
233
+ Training Data:
234
+ - ImageNet
235
+ Training Resources: 4x Titan Xp GPUs
236
+ ID: res2net50_26w_6s
237
+ LR: 0.1
238
+ Epochs: 100
239
+ Crop Pct: '0.875'
240
+ Momentum: 0.9
241
+ Batch Size: 256
242
+ Image Size: '224'
243
+ Weight Decay: 0.0001
244
+ Interpolation: bilinear
245
+ Code: https://github.com/rwightman/pytorch-image-models/blob/d8e69206be253892b2956341fea09fdebfaae4e3/timm/models/res2net.py#L163
246
+ Weights: https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-res2net/res2net50_26w_6s-19041792.pth
247
+ Results:
248
+ - Task: Image Classification
249
+ Dataset: ImageNet
250
+ Metrics:
251
+ Top 1 Accuracy: 78.57%
252
+ Top 5 Accuracy: 94.12%
253
+ - Name: res2net50_26w_8s
254
+ In Collection: Res2Net
255
+ Metadata:
256
+ FLOPs: 10760338992
257
+ Parameters: 48400000
258
+ File Size: 194085165
259
+ Architecture:
260
+ - Batch Normalization
261
+ - Convolution
262
+ - Global Average Pooling
263
+ - ReLU
264
+ - Res2Net Block
265
+ Tasks:
266
+ - Image Classification
267
+ Training Techniques:
268
+ - SGD with Momentum
269
+ - Weight Decay
270
+ Training Data:
271
+ - ImageNet
272
+ Training Resources: 4x Titan Xp GPUs
273
+ ID: res2net50_26w_8s
274
+ LR: 0.1
275
+ Epochs: 100
276
+ Crop Pct: '0.875'
277
+ Momentum: 0.9
278
+ Batch Size: 256
279
+ Image Size: '224'
280
+ Weight Decay: 0.0001
281
+ Interpolation: bilinear
282
+ Code: https://github.com/rwightman/pytorch-image-models/blob/d8e69206be253892b2956341fea09fdebfaae4e3/timm/models/res2net.py#L174
283
+ Weights: https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-res2net/res2net50_26w_8s-2c7c9f12.pth
284
+ Results:
285
+ - Task: Image Classification
286
+ Dataset: ImageNet
287
+ Metrics:
288
+ Top 1 Accuracy: 79.19%
289
+ Top 5 Accuracy: 94.37%
290
+ - Name: res2net50_48w_2s
291
+ In Collection: Res2Net
292
+ Metadata:
293
+ FLOPs: 5375291520
294
+ Parameters: 25290000
295
+ File Size: 101421406
296
+ Architecture:
297
+ - Batch Normalization
298
+ - Convolution
299
+ - Global Average Pooling
300
+ - ReLU
301
+ - Res2Net Block
302
+ Tasks:
303
+ - Image Classification
304
+ Training Techniques:
305
+ - SGD with Momentum
306
+ - Weight Decay
307
+ Training Data:
308
+ - ImageNet
309
+ Training Resources: 4x Titan Xp GPUs
310
+ ID: res2net50_48w_2s
311
+ LR: 0.1
312
+ Epochs: 100
313
+ Crop Pct: '0.875'
314
+ Momentum: 0.9
315
+ Batch Size: 256
316
+ Image Size: '224'
317
+ Weight Decay: 0.0001
318
+ Interpolation: bilinear
319
+ Code: https://github.com/rwightman/pytorch-image-models/blob/d8e69206be253892b2956341fea09fdebfaae4e3/timm/models/res2net.py#L185
320
+ Weights: https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-res2net/res2net50_48w_2s-afed724a.pth
321
+ Results:
322
+ - Task: Image Classification
323
+ Dataset: ImageNet
324
+ Metrics:
325
+ Top 1 Accuracy: 77.53%
326
+ Top 5 Accuracy: 93.56%
327
+ -->
pytorch-image-models/hfdocs/source/models/res2next.mdx ADDED
@@ -0,0 +1,142 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Res2NeXt
2
+
3
+ **Res2NeXt** is an image model that employs a variation on [ResNeXt](https://paperswithcode.com/method/resnext) bottleneck residual blocks. The motivation is to be able to represent features at multiple scales. This is achieved through a novel building block for CNNs that constructs hierarchical residual-like connections within one single residual block. This represents multi-scale features at a granular level and increases the range of receptive fields for each network layer.
4
+
5
+ ## How do I use this model on an image?
6
+
7
+ To load a pretrained model:
8
+
9
+ ```py
10
+ >>> import timm
11
+ >>> model = timm.create_model('res2next50', pretrained=True)
12
+ >>> model.eval()
13
+ ```
14
+
15
+ To load and preprocess the image:
16
+
17
+ ```py
18
+ >>> import urllib
19
+ >>> from PIL import Image
20
+ >>> from timm.data import resolve_data_config
21
+ >>> from timm.data.transforms_factory import create_transform
22
+
23
+ >>> config = resolve_data_config({}, model=model)
24
+ >>> transform = create_transform(**config)
25
+
26
+ >>> url, filename = ("https://github.com/pytorch/hub/raw/master/images/dog.jpg", "dog.jpg")
27
+ >>> urllib.request.urlretrieve(url, filename)
28
+ >>> img = Image.open(filename).convert('RGB')
29
+ >>> tensor = transform(img).unsqueeze(0) # transform and add batch dimension
30
+ ```
31
+
32
+ To get the model predictions:
33
+
34
+ ```py
35
+ >>> import torch
36
+ >>> with torch.no_grad():
37
+ ... out = model(tensor)
38
+ >>> probabilities = torch.nn.functional.softmax(out[0], dim=0)
39
+ >>> print(probabilities.shape)
40
+ >>> # prints: torch.Size([1000])
41
+ ```
42
+
43
+ To get the top-5 predictions class names:
44
+
45
+ ```py
46
+ >>> # Get imagenet class mappings
47
+ >>> url, filename = ("https://raw.githubusercontent.com/pytorch/hub/master/imagenet_classes.txt", "imagenet_classes.txt")
48
+ >>> urllib.request.urlretrieve(url, filename)
49
+ >>> with open("imagenet_classes.txt", "r") as f:
50
+ ... categories = [s.strip() for s in f.readlines()]
51
+
52
+ >>> # Print top categories per image
53
+ >>> top5_prob, top5_catid = torch.topk(probabilities, 5)
54
+ >>> for i in range(top5_prob.size(0)):
55
+ ... print(categories[top5_catid[i]], top5_prob[i].item())
56
+ >>> # prints class names and probabilities like:
57
+ >>> # [('Samoyed', 0.6425196528434753), ('Pomeranian', 0.04062102362513542), ('keeshond', 0.03186424449086189), ('white wolf', 0.01739676296710968), ('Eskimo dog', 0.011717947199940681)]
58
+ ```
59
+
60
+ Replace the model name with the variant you want to use, e.g. `res2next50`. You can find the IDs in the model summaries at the top of this page.
61
+
62
+ To extract image features with this model, follow the [timm feature extraction examples](../feature_extraction), just change the name of the model you want to use.
63
+
64
+ ## How do I finetune this model?
65
+
66
+ You can finetune any of the pre-trained models just by changing the classifier (the last layer).
67
+
68
+ ```py
69
+ >>> model = timm.create_model('res2next50', pretrained=True, num_classes=NUM_FINETUNE_CLASSES)
70
+ ```
71
+ To finetune on your own dataset, you have to write a training loop or adapt [timm's training
72
+ script](https://github.com/rwightman/pytorch-image-models/blob/master/train.py) to use your dataset.
73
+
74
+ ## How do I train this model?
75
+
76
+ You can follow the [timm recipe scripts](../scripts) for training a new model afresh.
77
+
78
+ ## Citation
79
+
80
+ ```BibTeX
81
+ @article{Gao_2021,
82
+ title={Res2Net: A New Multi-Scale Backbone Architecture},
83
+ volume={43},
84
+ ISSN={1939-3539},
85
+ url={http://dx.doi.org/10.1109/TPAMI.2019.2938758},
86
+ DOI={10.1109/tpami.2019.2938758},
87
+ number={2},
88
+ journal={IEEE Transactions on Pattern Analysis and Machine Intelligence},
89
+ publisher={Institute of Electrical and Electronics Engineers (IEEE)},
90
+ author={Gao, Shang-Hua and Cheng, Ming-Ming and Zhao, Kai and Zhang, Xin-Yu and Yang, Ming-Hsuan and Torr, Philip},
91
+ year={2021},
92
+ month={Feb},
93
+ pages={652–662}
94
+ }
95
+ ```
96
+
97
+ <!--
98
+ Type: model-index
99
+ Collections:
100
+ - Name: Res2NeXt
101
+ Paper:
102
+ Title: 'Res2Net: A New Multi-scale Backbone Architecture'
103
+ URL: https://paperswithcode.com/paper/res2net-a-new-multi-scale-backbone
104
+ Models:
105
+ - Name: res2next50
106
+ In Collection: Res2NeXt
107
+ Metadata:
108
+ FLOPs: 5396798208
109
+ Parameters: 24670000
110
+ File Size: 99019592
111
+ Architecture:
112
+ - Batch Normalization
113
+ - Convolution
114
+ - Global Average Pooling
115
+ - ReLU
116
+ - Res2NeXt Block
117
+ Tasks:
118
+ - Image Classification
119
+ Training Techniques:
120
+ - SGD with Momentum
121
+ - Weight Decay
122
+ Training Data:
123
+ - ImageNet
124
+ Training Resources: 4x Titan Xp GPUs
125
+ ID: res2next50
126
+ LR: 0.1
127
+ Epochs: 100
128
+ Crop Pct: '0.875'
129
+ Momentum: 0.9
130
+ Batch Size: 256
131
+ Image Size: '224'
132
+ Weight Decay: 0.0001
133
+ Interpolation: bilinear
134
+ Code: https://github.com/rwightman/pytorch-image-models/blob/d8e69206be253892b2956341fea09fdebfaae4e3/timm/models/res2net.py#L207
135
+ Weights: https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-res2net/res2next50_4s-6ef7e7bf.pth
136
+ Results:
137
+ - Task: Image Classification
138
+ Dataset: ImageNet
139
+ Metrics:
140
+ Top 1 Accuracy: 78.24%
141
+ Top 5 Accuracy: 93.91%
142
+ -->
pytorch-image-models/hfdocs/source/models/resnet.mdx ADDED
@@ -0,0 +1,445 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # ResNet
2
+
3
+ **Residual Networks**, or **ResNets**, learn residual functions with reference to the layer inputs, instead of learning unreferenced functions. Instead of hoping each few stacked layers directly fit a desired underlying mapping, residual nets let these layers fit a residual mapping. They stack [residual blocks](https://paperswithcode.com/method/residual-block) ontop of each other to form network: e.g. a ResNet-50 has fifty layers using these blocks.
4
+
5
+ ## How do I use this model on an image?
6
+
7
+ To load a pretrained model:
8
+
9
+ ```py
10
+ >>> import timm
11
+ >>> model = timm.create_model('resnet18', pretrained=True)
12
+ >>> model.eval()
13
+ ```
14
+
15
+ To load and preprocess the image:
16
+
17
+ ```py
18
+ >>> import urllib
19
+ >>> from PIL import Image
20
+ >>> from timm.data import resolve_data_config
21
+ >>> from timm.data.transforms_factory import create_transform
22
+
23
+ >>> config = resolve_data_config({}, model=model)
24
+ >>> transform = create_transform(**config)
25
+
26
+ >>> url, filename = ("https://github.com/pytorch/hub/raw/master/images/dog.jpg", "dog.jpg")
27
+ >>> urllib.request.urlretrieve(url, filename)
28
+ >>> img = Image.open(filename).convert('RGB')
29
+ >>> tensor = transform(img).unsqueeze(0) # transform and add batch dimension
30
+ ```
31
+
32
+ To get the model predictions:
33
+
34
+ ```py
35
+ >>> import torch
36
+ >>> with torch.no_grad():
37
+ ... out = model(tensor)
38
+ >>> probabilities = torch.nn.functional.softmax(out[0], dim=0)
39
+ >>> print(probabilities.shape)
40
+ >>> # prints: torch.Size([1000])
41
+ ```
42
+
43
+ To get the top-5 predictions class names:
44
+
45
+ ```py
46
+ >>> # Get imagenet class mappings
47
+ >>> url, filename = ("https://raw.githubusercontent.com/pytorch/hub/master/imagenet_classes.txt", "imagenet_classes.txt")
48
+ >>> urllib.request.urlretrieve(url, filename)
49
+ >>> with open("imagenet_classes.txt", "r") as f:
50
+ ... categories = [s.strip() for s in f.readlines()]
51
+
52
+ >>> # Print top categories per image
53
+ >>> top5_prob, top5_catid = torch.topk(probabilities, 5)
54
+ >>> for i in range(top5_prob.size(0)):
55
+ ... print(categories[top5_catid[i]], top5_prob[i].item())
56
+ >>> # prints class names and probabilities like:
57
+ >>> # [('Samoyed', 0.6425196528434753), ('Pomeranian', 0.04062102362513542), ('keeshond', 0.03186424449086189), ('white wolf', 0.01739676296710968), ('Eskimo dog', 0.011717947199940681)]
58
+ ```
59
+
60
+ Replace the model name with the variant you want to use, e.g. `resnet18`. You can find the IDs in the model summaries at the top of this page.
61
+
62
+ To extract image features with this model, follow the [timm feature extraction examples](../feature_extraction), just change the name of the model you want to use.
63
+
64
+ ## How do I finetune this model?
65
+
66
+ You can finetune any of the pre-trained models just by changing the classifier (the last layer).
67
+
68
+ ```py
69
+ >>> model = timm.create_model('resnet18', pretrained=True, num_classes=NUM_FINETUNE_CLASSES)
70
+ ```
71
+ To finetune on your own dataset, you have to write a training loop or adapt [timm's training
72
+ script](https://github.com/rwightman/pytorch-image-models/blob/master/train.py) to use your dataset.
73
+
74
+ ## How do I train this model?
75
+
76
+ You can follow the [timm recipe scripts](../scripts) for training a new model afresh.
77
+
78
+ ## Citation
79
+
80
+ ```BibTeX
81
+ @article{DBLP:journals/corr/HeZRS15,
82
+ author = {Kaiming He and
83
+ Xiangyu Zhang and
84
+ Shaoqing Ren and
85
+ Jian Sun},
86
+ title = {Deep Residual Learning for Image Recognition},
87
+ journal = {CoRR},
88
+ volume = {abs/1512.03385},
89
+ year = {2015},
90
+ url = {http://arxiv.org/abs/1512.03385},
91
+ archivePrefix = {arXiv},
92
+ eprint = {1512.03385},
93
+ timestamp = {Wed, 17 Apr 2019 17:23:45 +0200},
94
+ biburl = {https://dblp.org/rec/journals/corr/HeZRS15.bib},
95
+ bibsource = {dblp computer science bibliography, https://dblp.org}
96
+ }
97
+ ```
98
+
99
+ <!--
100
+ Type: model-index
101
+ Collections:
102
+ - Name: ResNet
103
+ Paper:
104
+ Title: Deep Residual Learning for Image Recognition
105
+ URL: https://paperswithcode.com/paper/deep-residual-learning-for-image-recognition
106
+ Models:
107
+ - Name: resnet18
108
+ In Collection: ResNet
109
+ Metadata:
110
+ FLOPs: 2337073152
111
+ Parameters: 11690000
112
+ File Size: 46827520
113
+ Architecture:
114
+ - 1x1 Convolution
115
+ - Batch Normalization
116
+ - Bottleneck Residual Block
117
+ - Convolution
118
+ - Global Average Pooling
119
+ - Max Pooling
120
+ - ReLU
121
+ - Residual Block
122
+ - Residual Connection
123
+ - Softmax
124
+ Tasks:
125
+ - Image Classification
126
+ Training Data:
127
+ - ImageNet
128
+ ID: resnet18
129
+ Crop Pct: '0.875'
130
+ Image Size: '224'
131
+ Interpolation: bilinear
132
+ Code: https://github.com/rwightman/pytorch-image-models/blob/d8e69206be253892b2956341fea09fdebfaae4e3/timm/models/resnet.py#L641
133
+ Weights: https://download.pytorch.org/models/resnet18-5c106cde.pth
134
+ Results:
135
+ - Task: Image Classification
136
+ Dataset: ImageNet
137
+ Metrics:
138
+ Top 1 Accuracy: 69.74%
139
+ Top 5 Accuracy: 89.09%
140
+ - Name: resnet26
141
+ In Collection: ResNet
142
+ Metadata:
143
+ FLOPs: 3026804736
144
+ Parameters: 16000000
145
+ File Size: 64129972
146
+ Architecture:
147
+ - 1x1 Convolution
148
+ - Batch Normalization
149
+ - Bottleneck Residual Block
150
+ - Convolution
151
+ - Global Average Pooling
152
+ - Max Pooling
153
+ - ReLU
154
+ - Residual Block
155
+ - Residual Connection
156
+ - Softmax
157
+ Tasks:
158
+ - Image Classification
159
+ Training Data:
160
+ - ImageNet
161
+ ID: resnet26
162
+ Crop Pct: '0.875'
163
+ Image Size: '224'
164
+ Interpolation: bicubic
165
+ Code: https://github.com/rwightman/pytorch-image-models/blob/d8e69206be253892b2956341fea09fdebfaae4e3/timm/models/resnet.py#L675
166
+ Weights: https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/resnet26-9aa10e23.pth
167
+ Results:
168
+ - Task: Image Classification
169
+ Dataset: ImageNet
170
+ Metrics:
171
+ Top 1 Accuracy: 75.29%
172
+ Top 5 Accuracy: 92.57%
173
+ - Name: resnet34
174
+ In Collection: ResNet
175
+ Metadata:
176
+ FLOPs: 4718469120
177
+ Parameters: 21800000
178
+ File Size: 87290831
179
+ Architecture:
180
+ - 1x1 Convolution
181
+ - Batch Normalization
182
+ - Bottleneck Residual Block
183
+ - Convolution
184
+ - Global Average Pooling
185
+ - Max Pooling
186
+ - ReLU
187
+ - Residual Block
188
+ - Residual Connection
189
+ - Softmax
190
+ Tasks:
191
+ - Image Classification
192
+ Training Data:
193
+ - ImageNet
194
+ ID: resnet34
195
+ Crop Pct: '0.875'
196
+ Image Size: '224'
197
+ Interpolation: bilinear
198
+ Code: https://github.com/rwightman/pytorch-image-models/blob/d8e69206be253892b2956341fea09fdebfaae4e3/timm/models/resnet.py#L658
199
+ Weights: https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/resnet34-43635321.pth
200
+ Results:
201
+ - Task: Image Classification
202
+ Dataset: ImageNet
203
+ Metrics:
204
+ Top 1 Accuracy: 75.11%
205
+ Top 5 Accuracy: 92.28%
206
+ - Name: resnet50
207
+ In Collection: ResNet
208
+ Metadata:
209
+ FLOPs: 5282531328
210
+ Parameters: 25560000
211
+ File Size: 102488165
212
+ Architecture:
213
+ - 1x1 Convolution
214
+ - Batch Normalization
215
+ - Bottleneck Residual Block
216
+ - Convolution
217
+ - Global Average Pooling
218
+ - Max Pooling
219
+ - ReLU
220
+ - Residual Block
221
+ - Residual Connection
222
+ - Softmax
223
+ Tasks:
224
+ - Image Classification
225
+ Training Data:
226
+ - ImageNet
227
+ ID: resnet50
228
+ Crop Pct: '0.875'
229
+ Image Size: '224'
230
+ Interpolation: bicubic
231
+ Code: https://github.com/rwightman/pytorch-image-models/blob/d8e69206be253892b2956341fea09fdebfaae4e3/timm/models/resnet.py#L691
232
+ Weights: https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/resnet50_ram-a26f946b.pth
233
+ Results:
234
+ - Task: Image Classification
235
+ Dataset: ImageNet
236
+ Metrics:
237
+ Top 1 Accuracy: 79.04%
238
+ Top 5 Accuracy: 94.39%
239
+ - Name: resnetblur50
240
+ In Collection: ResNet
241
+ Metadata:
242
+ FLOPs: 6621606912
243
+ Parameters: 25560000
244
+ File Size: 102488165
245
+ Architecture:
246
+ - 1x1 Convolution
247
+ - Batch Normalization
248
+ - Blur Pooling
249
+ - Bottleneck Residual Block
250
+ - Convolution
251
+ - Global Average Pooling
252
+ - Max Pooling
253
+ - ReLU
254
+ - Residual Block
255
+ - Residual Connection
256
+ - Softmax
257
+ Tasks:
258
+ - Image Classification
259
+ Training Data:
260
+ - ImageNet
261
+ ID: resnetblur50
262
+ Crop Pct: '0.875'
263
+ Image Size: '224'
264
+ Interpolation: bicubic
265
+ Code: https://github.com/rwightman/pytorch-image-models/blob/d8e69206be253892b2956341fea09fdebfaae4e3/timm/models/resnet.py#L1160
266
+ Weights: https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/resnetblur50-84f4748f.pth
267
+ Results:
268
+ - Task: Image Classification
269
+ Dataset: ImageNet
270
+ Metrics:
271
+ Top 1 Accuracy: 79.29%
272
+ Top 5 Accuracy: 94.64%
273
+ - Name: tv_resnet101
274
+ In Collection: ResNet
275
+ Metadata:
276
+ FLOPs: 10068547584
277
+ Parameters: 44550000
278
+ File Size: 178728960
279
+ Architecture:
280
+ - 1x1 Convolution
281
+ - Batch Normalization
282
+ - Bottleneck Residual Block
283
+ - Convolution
284
+ - Global Average Pooling
285
+ - Max Pooling
286
+ - ReLU
287
+ - Residual Block
288
+ - Residual Connection
289
+ - Softmax
290
+ Tasks:
291
+ - Image Classification
292
+ Training Techniques:
293
+ - SGD with Momentum
294
+ - Weight Decay
295
+ Training Data:
296
+ - ImageNet
297
+ ID: tv_resnet101
298
+ LR: 0.1
299
+ Epochs: 90
300
+ Crop Pct: '0.875'
301
+ LR Gamma: 0.1
302
+ Momentum: 0.9
303
+ Batch Size: 32
304
+ Image Size: '224'
305
+ LR Step Size: 30
306
+ Weight Decay: 0.0001
307
+ Interpolation: bilinear
308
+ Code: https://github.com/rwightman/pytorch-image-models/blob/9a25fdf3ad0414b4d66da443fe60ae0aa14edc84/timm/models/resnet.py#L761
309
+ Weights: https://download.pytorch.org/models/resnet101-5d3b4d8f.pth
310
+ Results:
311
+ - Task: Image Classification
312
+ Dataset: ImageNet
313
+ Metrics:
314
+ Top 1 Accuracy: 77.37%
315
+ Top 5 Accuracy: 93.56%
316
+ - Name: tv_resnet152
317
+ In Collection: ResNet
318
+ Metadata:
319
+ FLOPs: 14857660416
320
+ Parameters: 60190000
321
+ File Size: 241530880
322
+ Architecture:
323
+ - 1x1 Convolution
324
+ - Batch Normalization
325
+ - Bottleneck Residual Block
326
+ - Convolution
327
+ - Global Average Pooling
328
+ - Max Pooling
329
+ - ReLU
330
+ - Residual Block
331
+ - Residual Connection
332
+ - Softmax
333
+ Tasks:
334
+ - Image Classification
335
+ Training Techniques:
336
+ - SGD with Momentum
337
+ - Weight Decay
338
+ Training Data:
339
+ - ImageNet
340
+ ID: tv_resnet152
341
+ LR: 0.1
342
+ Epochs: 90
343
+ Crop Pct: '0.875'
344
+ LR Gamma: 0.1
345
+ Momentum: 0.9
346
+ Batch Size: 32
347
+ Image Size: '224'
348
+ LR Step Size: 30
349
+ Weight Decay: 0.0001
350
+ Interpolation: bilinear
351
+ Code: https://github.com/rwightman/pytorch-image-models/blob/9a25fdf3ad0414b4d66da443fe60ae0aa14edc84/timm/models/resnet.py#L769
352
+ Weights: https://download.pytorch.org/models/resnet152-b121ed2d.pth
353
+ Results:
354
+ - Task: Image Classification
355
+ Dataset: ImageNet
356
+ Metrics:
357
+ Top 1 Accuracy: 78.32%
358
+ Top 5 Accuracy: 94.05%
359
+ - Name: tv_resnet34
360
+ In Collection: ResNet
361
+ Metadata:
362
+ FLOPs: 4718469120
363
+ Parameters: 21800000
364
+ File Size: 87306240
365
+ Architecture:
366
+ - 1x1 Convolution
367
+ - Batch Normalization
368
+ - Bottleneck Residual Block
369
+ - Convolution
370
+ - Global Average Pooling
371
+ - Max Pooling
372
+ - ReLU
373
+ - Residual Block
374
+ - Residual Connection
375
+ - Softmax
376
+ Tasks:
377
+ - Image Classification
378
+ Training Techniques:
379
+ - SGD with Momentum
380
+ - Weight Decay
381
+ Training Data:
382
+ - ImageNet
383
+ ID: tv_resnet34
384
+ LR: 0.1
385
+ Epochs: 90
386
+ Crop Pct: '0.875'
387
+ LR Gamma: 0.1
388
+ Momentum: 0.9
389
+ Batch Size: 32
390
+ Image Size: '224'
391
+ LR Step Size: 30
392
+ Weight Decay: 0.0001
393
+ Interpolation: bilinear
394
+ Code: https://github.com/rwightman/pytorch-image-models/blob/9a25fdf3ad0414b4d66da443fe60ae0aa14edc84/timm/models/resnet.py#L745
395
+ Weights: https://download.pytorch.org/models/resnet34-333f7ec4.pth
396
+ Results:
397
+ - Task: Image Classification
398
+ Dataset: ImageNet
399
+ Metrics:
400
+ Top 1 Accuracy: 73.3%
401
+ Top 5 Accuracy: 91.42%
402
+ - Name: tv_resnet50
403
+ In Collection: ResNet
404
+ Metadata:
405
+ FLOPs: 5282531328
406
+ Parameters: 25560000
407
+ File Size: 102502400
408
+ Architecture:
409
+ - 1x1 Convolution
410
+ - Batch Normalization
411
+ - Bottleneck Residual Block
412
+ - Convolution
413
+ - Global Average Pooling
414
+ - Max Pooling
415
+ - ReLU
416
+ - Residual Block
417
+ - Residual Connection
418
+ - Softmax
419
+ Tasks:
420
+ - Image Classification
421
+ Training Techniques:
422
+ - SGD with Momentum
423
+ - Weight Decay
424
+ Training Data:
425
+ - ImageNet
426
+ ID: tv_resnet50
427
+ LR: 0.1
428
+ Epochs: 90
429
+ Crop Pct: '0.875'
430
+ LR Gamma: 0.1
431
+ Momentum: 0.9
432
+ Batch Size: 32
433
+ Image Size: '224'
434
+ LR Step Size: 30
435
+ Weight Decay: 0.0001
436
+ Interpolation: bilinear
437
+ Code: https://github.com/rwightman/pytorch-image-models/blob/9a25fdf3ad0414b4d66da443fe60ae0aa14edc84/timm/models/resnet.py#L753
438
+ Weights: https://download.pytorch.org/models/resnet50-19c8e357.pth
439
+ Results:
440
+ - Task: Image Classification
441
+ Dataset: ImageNet
442
+ Metrics:
443
+ Top 1 Accuracy: 76.16%
444
+ Top 5 Accuracy: 92.88%
445
+ -->
pytorch-image-models/hfdocs/source/models/resnext.mdx ADDED
@@ -0,0 +1,250 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # ResNeXt
2
+
3
+ A **ResNeXt** repeats a [building block](https://paperswithcode.com/method/resnext-block) that aggregates a set of transformations with the same topology. Compared to a [ResNet](https://paperswithcode.com/method/resnet), it exposes a new dimension, *cardinality* (the size of the set of transformations) \\( C \\), as an essential factor in addition to the dimensions of depth and width.
4
+
5
+ ## How do I use this model on an image?
6
+
7
+ To load a pretrained model:
8
+
9
+ ```py
10
+ >>> import timm
11
+ >>> model = timm.create_model('resnext101_32x8d', pretrained=True)
12
+ >>> model.eval()
13
+ ```
14
+
15
+ To load and preprocess the image:
16
+
17
+ ```py
18
+ >>> import urllib
19
+ >>> from PIL import Image
20
+ >>> from timm.data import resolve_data_config
21
+ >>> from timm.data.transforms_factory import create_transform
22
+
23
+ >>> config = resolve_data_config({}, model=model)
24
+ >>> transform = create_transform(**config)
25
+
26
+ >>> url, filename = ("https://github.com/pytorch/hub/raw/master/images/dog.jpg", "dog.jpg")
27
+ >>> urllib.request.urlretrieve(url, filename)
28
+ >>> img = Image.open(filename).convert('RGB')
29
+ >>> tensor = transform(img).unsqueeze(0) # transform and add batch dimension
30
+ ```
31
+
32
+ To get the model predictions:
33
+
34
+ ```py
35
+ >>> import torch
36
+ >>> with torch.no_grad():
37
+ ... out = model(tensor)
38
+ >>> probabilities = torch.nn.functional.softmax(out[0], dim=0)
39
+ >>> print(probabilities.shape)
40
+ >>> # prints: torch.Size([1000])
41
+ ```
42
+
43
+ To get the top-5 predictions class names:
44
+
45
+ ```py
46
+ >>> # Get imagenet class mappings
47
+ >>> url, filename = ("https://raw.githubusercontent.com/pytorch/hub/master/imagenet_classes.txt", "imagenet_classes.txt")
48
+ >>> urllib.request.urlretrieve(url, filename)
49
+ >>> with open("imagenet_classes.txt", "r") as f:
50
+ ... categories = [s.strip() for s in f.readlines()]
51
+
52
+ >>> # Print top categories per image
53
+ >>> top5_prob, top5_catid = torch.topk(probabilities, 5)
54
+ >>> for i in range(top5_prob.size(0)):
55
+ ... print(categories[top5_catid[i]], top5_prob[i].item())
56
+ >>> # prints class names and probabilities like:
57
+ >>> # [('Samoyed', 0.6425196528434753), ('Pomeranian', 0.04062102362513542), ('keeshond', 0.03186424449086189), ('white wolf', 0.01739676296710968), ('Eskimo dog', 0.011717947199940681)]
58
+ ```
59
+
60
+ Replace the model name with the variant you want to use, e.g. `resnext101_32x8d`. You can find the IDs in the model summaries at the top of this page.
61
+
62
+ To extract image features with this model, follow the [timm feature extraction examples](../feature_extraction), just change the name of the model you want to use.
63
+
64
+ ## How do I finetune this model?
65
+
66
+ You can finetune any of the pre-trained models just by changing the classifier (the last layer).
67
+
68
+ ```py
69
+ >>> model = timm.create_model('resnext101_32x8d', pretrained=True, num_classes=NUM_FINETUNE_CLASSES)
70
+ ```
71
+ To finetune on your own dataset, you have to write a training loop or adapt [timm's training
72
+ script](https://github.com/rwightman/pytorch-image-models/blob/master/train.py) to use your dataset.
73
+
74
+ ## How do I train this model?
75
+
76
+ You can follow the [timm recipe scripts](../scripts) for training a new model afresh.
77
+
78
+ ## Citation
79
+
80
+ ```BibTeX
81
+ @article{DBLP:journals/corr/XieGDTH16,
82
+ author = {Saining Xie and
83
+ Ross B. Girshick and
84
+ Piotr Doll{\'{a}}r and
85
+ Zhuowen Tu and
86
+ Kaiming He},
87
+ title = {Aggregated Residual Transformations for Deep Neural Networks},
88
+ journal = {CoRR},
89
+ volume = {abs/1611.05431},
90
+ year = {2016},
91
+ url = {http://arxiv.org/abs/1611.05431},
92
+ archivePrefix = {arXiv},
93
+ eprint = {1611.05431},
94
+ timestamp = {Mon, 13 Aug 2018 16:45:58 +0200},
95
+ biburl = {https://dblp.org/rec/journals/corr/XieGDTH16.bib},
96
+ bibsource = {dblp computer science bibliography, https://dblp.org}
97
+ }
98
+ ```
99
+
100
+ <!--
101
+ Type: model-index
102
+ Collections:
103
+ - Name: ResNeXt
104
+ Paper:
105
+ Title: Aggregated Residual Transformations for Deep Neural Networks
106
+ URL: https://paperswithcode.com/paper/aggregated-residual-transformations-for-deep
107
+ Models:
108
+ - Name: resnext101_32x8d
109
+ In Collection: ResNeXt
110
+ Metadata:
111
+ FLOPs: 21180417024
112
+ Parameters: 88790000
113
+ File Size: 356082095
114
+ Architecture:
115
+ - 1x1 Convolution
116
+ - Batch Normalization
117
+ - Convolution
118
+ - Global Average Pooling
119
+ - Grouped Convolution
120
+ - Max Pooling
121
+ - ReLU
122
+ - ResNeXt Block
123
+ - Residual Connection
124
+ - Softmax
125
+ Tasks:
126
+ - Image Classification
127
+ Training Data:
128
+ - ImageNet
129
+ ID: resnext101_32x8d
130
+ Crop Pct: '0.875'
131
+ Image Size: '224'
132
+ Interpolation: bilinear
133
+ Code: https://github.com/rwightman/pytorch-image-models/blob/b9843f954b0457af2db4f9dea41a8538f51f5d78/timm/models/resnet.py#L877
134
+ Weights: https://download.pytorch.org/models/resnext101_32x8d-8ba56ff5.pth
135
+ Results:
136
+ - Task: Image Classification
137
+ Dataset: ImageNet
138
+ Metrics:
139
+ Top 1 Accuracy: 79.3%
140
+ Top 5 Accuracy: 94.53%
141
+ - Name: resnext50_32x4d
142
+ In Collection: ResNeXt
143
+ Metadata:
144
+ FLOPs: 5472648192
145
+ Parameters: 25030000
146
+ File Size: 100435887
147
+ Architecture:
148
+ - 1x1 Convolution
149
+ - Batch Normalization
150
+ - Convolution
151
+ - Global Average Pooling
152
+ - Grouped Convolution
153
+ - Max Pooling
154
+ - ReLU
155
+ - ResNeXt Block
156
+ - Residual Connection
157
+ - Softmax
158
+ Tasks:
159
+ - Image Classification
160
+ Training Data:
161
+ - ImageNet
162
+ ID: resnext50_32x4d
163
+ Crop Pct: '0.875'
164
+ Image Size: '224'
165
+ Interpolation: bicubic
166
+ Code: https://github.com/rwightman/pytorch-image-models/blob/b9843f954b0457af2db4f9dea41a8538f51f5d78/timm/models/resnet.py#L851
167
+ Weights: https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/resnext50_32x4d_ra-d733960d.pth
168
+ Results:
169
+ - Task: Image Classification
170
+ Dataset: ImageNet
171
+ Metrics:
172
+ Top 1 Accuracy: 79.79%
173
+ Top 5 Accuracy: 94.61%
174
+ - Name: resnext50d_32x4d
175
+ In Collection: ResNeXt
176
+ Metadata:
177
+ FLOPs: 5781119488
178
+ Parameters: 25050000
179
+ File Size: 100515304
180
+ Architecture:
181
+ - 1x1 Convolution
182
+ - Batch Normalization
183
+ - Convolution
184
+ - Global Average Pooling
185
+ - Grouped Convolution
186
+ - Max Pooling
187
+ - ReLU
188
+ - ResNeXt Block
189
+ - Residual Connection
190
+ - Softmax
191
+ Tasks:
192
+ - Image Classification
193
+ Training Data:
194
+ - ImageNet
195
+ ID: resnext50d_32x4d
196
+ Crop Pct: '0.875'
197
+ Image Size: '224'
198
+ Interpolation: bicubic
199
+ Code: https://github.com/rwightman/pytorch-image-models/blob/b9843f954b0457af2db4f9dea41a8538f51f5d78/timm/models/resnet.py#L869
200
+ Weights: https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/resnext50d_32x4d-103e99f8.pth
201
+ Results:
202
+ - Task: Image Classification
203
+ Dataset: ImageNet
204
+ Metrics:
205
+ Top 1 Accuracy: 79.67%
206
+ Top 5 Accuracy: 94.87%
207
+ - Name: tv_resnext50_32x4d
208
+ In Collection: ResNeXt
209
+ Metadata:
210
+ FLOPs: 5472648192
211
+ Parameters: 25030000
212
+ File Size: 100441675
213
+ Architecture:
214
+ - 1x1 Convolution
215
+ - Batch Normalization
216
+ - Convolution
217
+ - Global Average Pooling
218
+ - Grouped Convolution
219
+ - Max Pooling
220
+ - ReLU
221
+ - ResNeXt Block
222
+ - Residual Connection
223
+ - Softmax
224
+ Tasks:
225
+ - Image Classification
226
+ Training Techniques:
227
+ - SGD with Momentum
228
+ - Weight Decay
229
+ Training Data:
230
+ - ImageNet
231
+ ID: tv_resnext50_32x4d
232
+ LR: 0.1
233
+ Epochs: 90
234
+ Crop Pct: '0.875'
235
+ LR Gamma: 0.1
236
+ Momentum: 0.9
237
+ Batch Size: 32
238
+ Image Size: '224'
239
+ LR Step Size: 30
240
+ Weight Decay: 0.0001
241
+ Interpolation: bilinear
242
+ Code: https://github.com/rwightman/pytorch-image-models/blob/9a25fdf3ad0414b4d66da443fe60ae0aa14edc84/timm/models/resnet.py#L842
243
+ Weights: https://download.pytorch.org/models/resnext50_32x4d-7cdf4587.pth
244
+ Results:
245
+ - Task: Image Classification
246
+ Dataset: ImageNet
247
+ Metrics:
248
+ Top 1 Accuracy: 77.61%
249
+ Top 5 Accuracy: 93.68%
250
+ -->
pytorch-image-models/hfdocs/source/models/se-resnet.mdx ADDED
@@ -0,0 +1,189 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # SE-ResNet
2
+
3
+ **SE ResNet** is a variant of a [ResNet](https://www.paperswithcode.com/method/resnet) that employs [squeeze-and-excitation blocks](https://paperswithcode.com/method/squeeze-and-excitation-block) to enable the network to perform dynamic channel-wise feature recalibration.
4
+
5
+ ## How do I use this model on an image?
6
+
7
+ To load a pretrained model:
8
+
9
+ ```py
10
+ >>> import timm
11
+ >>> model = timm.create_model('seresnet152d', pretrained=True)
12
+ >>> model.eval()
13
+ ```
14
+
15
+ To load and preprocess the image:
16
+
17
+ ```py
18
+ >>> import urllib
19
+ >>> from PIL import Image
20
+ >>> from timm.data import resolve_data_config
21
+ >>> from timm.data.transforms_factory import create_transform
22
+
23
+ >>> config = resolve_data_config({}, model=model)
24
+ >>> transform = create_transform(**config)
25
+
26
+ >>> url, filename = ("https://github.com/pytorch/hub/raw/master/images/dog.jpg", "dog.jpg")
27
+ >>> urllib.request.urlretrieve(url, filename)
28
+ >>> img = Image.open(filename).convert('RGB')
29
+ >>> tensor = transform(img).unsqueeze(0) # transform and add batch dimension
30
+ ```
31
+
32
+ To get the model predictions:
33
+
34
+ ```py
35
+ >>> import torch
36
+ >>> with torch.no_grad():
37
+ ... out = model(tensor)
38
+ >>> probabilities = torch.nn.functional.softmax(out[0], dim=0)
39
+ >>> print(probabilities.shape)
40
+ >>> # prints: torch.Size([1000])
41
+ ```
42
+
43
+ To get the top-5 predictions class names:
44
+
45
+ ```py
46
+ >>> # Get imagenet class mappings
47
+ >>> url, filename = ("https://raw.githubusercontent.com/pytorch/hub/master/imagenet_classes.txt", "imagenet_classes.txt")
48
+ >>> urllib.request.urlretrieve(url, filename)
49
+ >>> with open("imagenet_classes.txt", "r") as f:
50
+ ... categories = [s.strip() for s in f.readlines()]
51
+
52
+ >>> # Print top categories per image
53
+ >>> top5_prob, top5_catid = torch.topk(probabilities, 5)
54
+ >>> for i in range(top5_prob.size(0)):
55
+ ... print(categories[top5_catid[i]], top5_prob[i].item())
56
+ >>> # prints class names and probabilities like:
57
+ >>> # [('Samoyed', 0.6425196528434753), ('Pomeranian', 0.04062102362513542), ('keeshond', 0.03186424449086189), ('white wolf', 0.01739676296710968), ('Eskimo dog', 0.011717947199940681)]
58
+ ```
59
+
60
+ Replace the model name with the variant you want to use, e.g. `seresnet152d`. You can find the IDs in the model summaries at the top of this page.
61
+
62
+ To extract image features with this model, follow the [timm feature extraction examples](../feature_extraction), just change the name of the model you want to use.
63
+
64
+ ## How do I finetune this model?
65
+
66
+ You can finetune any of the pre-trained models just by changing the classifier (the last layer).
67
+
68
+ ```py
69
+ >>> model = timm.create_model('seresnet152d', pretrained=True, num_classes=NUM_FINETUNE_CLASSES)
70
+ ```
71
+ To finetune on your own dataset, you have to write a training loop or adapt [timm's training
72
+ script](https://github.com/rwightman/pytorch-image-models/blob/master/train.py) to use your dataset.
73
+
74
+ ## How do I train this model?
75
+
76
+ You can follow the [timm recipe scripts](../scripts) for training a new model afresh.
77
+
78
+ ## Citation
79
+
80
+ ```BibTeX
81
+ @misc{hu2019squeezeandexcitation,
82
+ title={Squeeze-and-Excitation Networks},
83
+ author={Jie Hu and Li Shen and Samuel Albanie and Gang Sun and Enhua Wu},
84
+ year={2019},
85
+ eprint={1709.01507},
86
+ archivePrefix={arXiv},
87
+ primaryClass={cs.CV}
88
+ }
89
+ ```
90
+
91
+ <!--
92
+ Type: model-index
93
+ Collections:
94
+ - Name: SE ResNet
95
+ Paper:
96
+ Title: Squeeze-and-Excitation Networks
97
+ URL: https://paperswithcode.com/paper/squeeze-and-excitation-networks
98
+ Models:
99
+ - Name: seresnet152d
100
+ In Collection: SE ResNet
101
+ Metadata:
102
+ FLOPs: 20161904304
103
+ Parameters: 66840000
104
+ File Size: 268144497
105
+ Architecture:
106
+ - 1x1 Convolution
107
+ - Batch Normalization
108
+ - Bottleneck Residual Block
109
+ - Convolution
110
+ - Global Average Pooling
111
+ - Max Pooling
112
+ - ReLU
113
+ - Residual Block
114
+ - Residual Connection
115
+ - Softmax
116
+ - Squeeze-and-Excitation Block
117
+ Tasks:
118
+ - Image Classification
119
+ Training Techniques:
120
+ - Label Smoothing
121
+ - SGD with Momentum
122
+ - Weight Decay
123
+ Training Data:
124
+ - ImageNet
125
+ Training Resources: 8x NVIDIA Titan X GPUs
126
+ ID: seresnet152d
127
+ LR: 0.6
128
+ Epochs: 100
129
+ Layers: 152
130
+ Dropout: 0.2
131
+ Crop Pct: '0.94'
132
+ Momentum: 0.9
133
+ Batch Size: 1024
134
+ Image Size: '256'
135
+ Interpolation: bicubic
136
+ Code: https://github.com/rwightman/pytorch-image-models/blob/a7f95818e44b281137503bcf4b3e3e94d8ffa52f/timm/models/resnet.py#L1206
137
+ Weights: https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/seresnet152d_ra2-04464dd2.pth
138
+ Results:
139
+ - Task: Image Classification
140
+ Dataset: ImageNet
141
+ Metrics:
142
+ Top 1 Accuracy: 83.74%
143
+ Top 5 Accuracy: 96.77%
144
+ - Name: seresnet50
145
+ In Collection: SE ResNet
146
+ Metadata:
147
+ FLOPs: 5285062320
148
+ Parameters: 28090000
149
+ File Size: 112621903
150
+ Architecture:
151
+ - 1x1 Convolution
152
+ - Batch Normalization
153
+ - Bottleneck Residual Block
154
+ - Convolution
155
+ - Global Average Pooling
156
+ - Max Pooling
157
+ - ReLU
158
+ - Residual Block
159
+ - Residual Connection
160
+ - Softmax
161
+ - Squeeze-and-Excitation Block
162
+ Tasks:
163
+ - Image Classification
164
+ Training Techniques:
165
+ - Label Smoothing
166
+ - SGD with Momentum
167
+ - Weight Decay
168
+ Training Data:
169
+ - ImageNet
170
+ Training Resources: 8x NVIDIA Titan X GPUs
171
+ ID: seresnet50
172
+ LR: 0.6
173
+ Epochs: 100
174
+ Layers: 50
175
+ Dropout: 0.2
176
+ Crop Pct: '0.875'
177
+ Momentum: 0.9
178
+ Batch Size: 1024
179
+ Image Size: '224'
180
+ Interpolation: bicubic
181
+ Code: https://github.com/rwightman/pytorch-image-models/blob/a7f95818e44b281137503bcf4b3e3e94d8ffa52f/timm/models/resnet.py#L1180
182
+ Weights: https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/seresnet50_ra_224-8efdb4bb.pth
183
+ Results:
184
+ - Task: Image Classification
185
+ Dataset: ImageNet
186
+ Metrics:
187
+ Top 1 Accuracy: 80.26%
188
+ Top 5 Accuracy: 95.07%
189
+ -->
pytorch-image-models/hfdocs/source/models/selecsls.mdx ADDED
@@ -0,0 +1,203 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # SelecSLS
2
+
3
+ **SelecSLS** uses novel selective long and short range skip connections to improve the information flow allowing for a drastically faster network without compromising accuracy.
4
+
5
+ ## How do I use this model on an image?
6
+
7
+ To load a pretrained model:
8
+
9
+ ```py
10
+ >>> import timm
11
+ >>> model = timm.create_model('selecsls42b', pretrained=True)
12
+ >>> model.eval()
13
+ ```
14
+
15
+ To load and preprocess the image:
16
+
17
+ ```py
18
+ >>> import urllib
19
+ >>> from PIL import Image
20
+ >>> from timm.data import resolve_data_config
21
+ >>> from timm.data.transforms_factory import create_transform
22
+
23
+ >>> config = resolve_data_config({}, model=model)
24
+ >>> transform = create_transform(**config)
25
+
26
+ >>> url, filename = ("https://github.com/pytorch/hub/raw/master/images/dog.jpg", "dog.jpg")
27
+ >>> urllib.request.urlretrieve(url, filename)
28
+ >>> img = Image.open(filename).convert('RGB')
29
+ >>> tensor = transform(img).unsqueeze(0) # transform and add batch dimension
30
+ ```
31
+
32
+ To get the model predictions:
33
+
34
+ ```py
35
+ >>> import torch
36
+ >>> with torch.no_grad():
37
+ ... out = model(tensor)
38
+ >>> probabilities = torch.nn.functional.softmax(out[0], dim=0)
39
+ >>> print(probabilities.shape)
40
+ >>> # prints: torch.Size([1000])
41
+ ```
42
+
43
+ To get the top-5 predictions class names:
44
+
45
+ ```py
46
+ >>> # Get imagenet class mappings
47
+ >>> url, filename = ("https://raw.githubusercontent.com/pytorch/hub/master/imagenet_classes.txt", "imagenet_classes.txt")
48
+ >>> urllib.request.urlretrieve(url, filename)
49
+ >>> with open("imagenet_classes.txt", "r") as f:
50
+ ... categories = [s.strip() for s in f.readlines()]
51
+
52
+ >>> # Print top categories per image
53
+ >>> top5_prob, top5_catid = torch.topk(probabilities, 5)
54
+ >>> for i in range(top5_prob.size(0)):
55
+ ... print(categories[top5_catid[i]], top5_prob[i].item())
56
+ >>> # prints class names and probabilities like:
57
+ >>> # [('Samoyed', 0.6425196528434753), ('Pomeranian', 0.04062102362513542), ('keeshond', 0.03186424449086189), ('white wolf', 0.01739676296710968), ('Eskimo dog', 0.011717947199940681)]
58
+ ```
59
+
60
+ Replace the model name with the variant you want to use, e.g. `selecsls42b`. You can find the IDs in the model summaries at the top of this page.
61
+
62
+ To extract image features with this model, follow the [timm feature extraction examples](../feature_extraction), just change the name of the model you want to use.
63
+
64
+ ## How do I finetune this model?
65
+
66
+ You can finetune any of the pre-trained models just by changing the classifier (the last layer).
67
+
68
+ ```py
69
+ >>> model = timm.create_model('selecsls42b', pretrained=True, num_classes=NUM_FINETUNE_CLASSES)
70
+ ```
71
+ To finetune on your own dataset, you have to write a training loop or adapt [timm's training
72
+ script](https://github.com/rwightman/pytorch-image-models/blob/master/train.py) to use your dataset.
73
+
74
+ ## How do I train this model?
75
+
76
+ You can follow the [timm recipe scripts](../scripts) for training a new model afresh.
77
+
78
+ ## Citation
79
+
80
+ ```BibTeX
81
+ @article{Mehta_2020,
82
+ title={XNect},
83
+ volume={39},
84
+ ISSN={1557-7368},
85
+ url={http://dx.doi.org/10.1145/3386569.3392410},
86
+ DOI={10.1145/3386569.3392410},
87
+ number={4},
88
+ journal={ACM Transactions on Graphics},
89
+ publisher={Association for Computing Machinery (ACM)},
90
+ author={Mehta, Dushyant and Sotnychenko, Oleksandr and Mueller, Franziska and Xu, Weipeng and Elgharib, Mohamed and Fua, Pascal and Seidel, Hans-Peter and Rhodin, Helge and Pons-Moll, Gerard and Theobalt, Christian},
91
+ year={2020},
92
+ month={Jul}
93
+ }
94
+ ```
95
+
96
+ <!--
97
+ Type: model-index
98
+ Collections:
99
+ - Name: SelecSLS
100
+ Paper:
101
+ Title: 'XNect: Real-time Multi-Person 3D Motion Capture with a Single RGB Camera'
102
+ URL: https://paperswithcode.com/paper/xnect-real-time-multi-person-3d-human-pose
103
+ Models:
104
+ - Name: selecsls42b
105
+ In Collection: SelecSLS
106
+ Metadata:
107
+ FLOPs: 3824022528
108
+ Parameters: 32460000
109
+ File Size: 129948954
110
+ Architecture:
111
+ - Batch Normalization
112
+ - Convolution
113
+ - Dense Connections
114
+ - Dropout
115
+ - Global Average Pooling
116
+ - ReLU
117
+ - SelecSLS Block
118
+ Tasks:
119
+ - Image Classification
120
+ Training Techniques:
121
+ - Cosine Annealing
122
+ - Random Erasing
123
+ Training Data:
124
+ - ImageNet
125
+ ID: selecsls42b
126
+ Crop Pct: '0.875'
127
+ Image Size: '224'
128
+ Interpolation: bicubic
129
+ Code: https://github.com/rwightman/pytorch-image-models/blob/b9843f954b0457af2db4f9dea41a8538f51f5d78/timm/models/selecsls.py#L335
130
+ Weights: https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-selecsls/selecsls42b-8af30141.pth
131
+ Results:
132
+ - Task: Image Classification
133
+ Dataset: ImageNet
134
+ Metrics:
135
+ Top 1 Accuracy: 77.18%
136
+ Top 5 Accuracy: 93.39%
137
+ - Name: selecsls60
138
+ In Collection: SelecSLS
139
+ Metadata:
140
+ FLOPs: 4610472600
141
+ Parameters: 30670000
142
+ File Size: 122839714
143
+ Architecture:
144
+ - Batch Normalization
145
+ - Convolution
146
+ - Dense Connections
147
+ - Dropout
148
+ - Global Average Pooling
149
+ - ReLU
150
+ - SelecSLS Block
151
+ Tasks:
152
+ - Image Classification
153
+ Training Techniques:
154
+ - Cosine Annealing
155
+ - Random Erasing
156
+ Training Data:
157
+ - ImageNet
158
+ ID: selecsls60
159
+ Crop Pct: '0.875'
160
+ Image Size: '224'
161
+ Interpolation: bicubic
162
+ Code: https://github.com/rwightman/pytorch-image-models/blob/b9843f954b0457af2db4f9dea41a8538f51f5d78/timm/models/selecsls.py#L342
163
+ Weights: https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-selecsls/selecsls60-bbf87526.pth
164
+ Results:
165
+ - Task: Image Classification
166
+ Dataset: ImageNet
167
+ Metrics:
168
+ Top 1 Accuracy: 77.99%
169
+ Top 5 Accuracy: 93.83%
170
+ - Name: selecsls60b
171
+ In Collection: SelecSLS
172
+ Metadata:
173
+ FLOPs: 4657653144
174
+ Parameters: 32770000
175
+ File Size: 131252898
176
+ Architecture:
177
+ - Batch Normalization
178
+ - Convolution
179
+ - Dense Connections
180
+ - Dropout
181
+ - Global Average Pooling
182
+ - ReLU
183
+ - SelecSLS Block
184
+ Tasks:
185
+ - Image Classification
186
+ Training Techniques:
187
+ - Cosine Annealing
188
+ - Random Erasing
189
+ Training Data:
190
+ - ImageNet
191
+ ID: selecsls60b
192
+ Crop Pct: '0.875'
193
+ Image Size: '224'
194
+ Interpolation: bicubic
195
+ Code: https://github.com/rwightman/pytorch-image-models/blob/b9843f954b0457af2db4f9dea41a8538f51f5d78/timm/models/selecsls.py#L349
196
+ Weights: https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-selecsls/selecsls60b-94e619b5.pth
197
+ Results:
198
+ - Task: Image Classification
199
+ Dataset: ImageNet
200
+ Metrics:
201
+ Top 1 Accuracy: 78.41%
202
+ Top 5 Accuracy: 94.18%
203
+ -->
pytorch-image-models/hfdocs/source/models/skresnet.mdx ADDED
@@ -0,0 +1,179 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # SK-ResNet
2
+
3
+ **SK ResNet** is a variant of a [ResNet](https://www.paperswithcode.com/method/resnet) that employs a [Selective Kernel](https://paperswithcode.com/method/selective-kernel) unit. In general, all the large kernel convolutions in the original bottleneck blocks in ResNet are replaced by the proposed [SK convolutions](https://paperswithcode.com/method/selective-kernel-convolution), enabling the network to choose appropriate receptive field sizes in an adaptive manner.
4
+
5
+ ## How do I use this model on an image?
6
+
7
+ To load a pretrained model:
8
+
9
+ ```py
10
+ >>> import timm
11
+ >>> model = timm.create_model('skresnet18', pretrained=True)
12
+ >>> model.eval()
13
+ ```
14
+
15
+ To load and preprocess the image:
16
+
17
+ ```py
18
+ >>> import urllib
19
+ >>> from PIL import Image
20
+ >>> from timm.data import resolve_data_config
21
+ >>> from timm.data.transforms_factory import create_transform
22
+
23
+ >>> config = resolve_data_config({}, model=model)
24
+ >>> transform = create_transform(**config)
25
+
26
+ >>> url, filename = ("https://github.com/pytorch/hub/raw/master/images/dog.jpg", "dog.jpg")
27
+ >>> urllib.request.urlretrieve(url, filename)
28
+ >>> img = Image.open(filename).convert('RGB')
29
+ >>> tensor = transform(img).unsqueeze(0) # transform and add batch dimension
30
+ ```
31
+
32
+ To get the model predictions:
33
+
34
+ ```py
35
+ >>> import torch
36
+ >>> with torch.no_grad():
37
+ ... out = model(tensor)
38
+ >>> probabilities = torch.nn.functional.softmax(out[0], dim=0)
39
+ >>> print(probabilities.shape)
40
+ >>> # prints: torch.Size([1000])
41
+ ```
42
+
43
+ To get the top-5 predictions class names:
44
+
45
+ ```py
46
+ >>> # Get imagenet class mappings
47
+ >>> url, filename = ("https://raw.githubusercontent.com/pytorch/hub/master/imagenet_classes.txt", "imagenet_classes.txt")
48
+ >>> urllib.request.urlretrieve(url, filename)
49
+ >>> with open("imagenet_classes.txt", "r") as f:
50
+ ... categories = [s.strip() for s in f.readlines()]
51
+
52
+ >>> # Print top categories per image
53
+ >>> top5_prob, top5_catid = torch.topk(probabilities, 5)
54
+ >>> for i in range(top5_prob.size(0)):
55
+ ... print(categories[top5_catid[i]], top5_prob[i].item())
56
+ >>> # prints class names and probabilities like:
57
+ >>> # [('Samoyed', 0.6425196528434753), ('Pomeranian', 0.04062102362513542), ('keeshond', 0.03186424449086189), ('white wolf', 0.01739676296710968), ('Eskimo dog', 0.011717947199940681)]
58
+ ```
59
+
60
+ Replace the model name with the variant you want to use, e.g. `skresnet18`. You can find the IDs in the model summaries at the top of this page.
61
+
62
+ To extract image features with this model, follow the [timm feature extraction examples](../feature_extraction), just change the name of the model you want to use.
63
+
64
+ ## How do I finetune this model?
65
+
66
+ You can finetune any of the pre-trained models just by changing the classifier (the last layer).
67
+
68
+ ```py
69
+ >>> model = timm.create_model('skresnet18', pretrained=True, num_classes=NUM_FINETUNE_CLASSES)
70
+ ```
71
+ To finetune on your own dataset, you have to write a training loop or adapt [timm's training
72
+ script](https://github.com/rwightman/pytorch-image-models/blob/master/train.py) to use your dataset.
73
+
74
+ ## How do I train this model?
75
+
76
+ You can follow the [timm recipe scripts](../scripts) for training a new model afresh.
77
+
78
+ ## Citation
79
+
80
+ ```BibTeX
81
+ @misc{li2019selective,
82
+ title={Selective Kernel Networks},
83
+ author={Xiang Li and Wenhai Wang and Xiaolin Hu and Jian Yang},
84
+ year={2019},
85
+ eprint={1903.06586},
86
+ archivePrefix={arXiv},
87
+ primaryClass={cs.CV}
88
+ }
89
+ ```
90
+
91
+ <!--
92
+ Type: model-index
93
+ Collections:
94
+ - Name: SKResNet
95
+ Paper:
96
+ Title: Selective Kernel Networks
97
+ URL: https://paperswithcode.com/paper/selective-kernel-networks
98
+ Models:
99
+ - Name: skresnet18
100
+ In Collection: SKResNet
101
+ Metadata:
102
+ FLOPs: 2333467136
103
+ Parameters: 11960000
104
+ File Size: 47923238
105
+ Architecture:
106
+ - Convolution
107
+ - Dense Connections
108
+ - Global Average Pooling
109
+ - Max Pooling
110
+ - Residual Connection
111
+ - Selective Kernel
112
+ - Softmax
113
+ Tasks:
114
+ - Image Classification
115
+ Training Techniques:
116
+ - SGD with Momentum
117
+ - Weight Decay
118
+ Training Data:
119
+ - ImageNet
120
+ Training Resources: 8x GPUs
121
+ ID: skresnet18
122
+ LR: 0.1
123
+ Epochs: 100
124
+ Layers: 18
125
+ Crop Pct: '0.875'
126
+ Momentum: 0.9
127
+ Batch Size: 256
128
+ Image Size: '224'
129
+ Weight Decay: 4.0e-05
130
+ Interpolation: bicubic
131
+ Code: https://github.com/rwightman/pytorch-image-models/blob/a7f95818e44b281137503bcf4b3e3e94d8ffa52f/timm/models/sknet.py#L148
132
+ Weights: https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/skresnet18_ra-4eec2804.pth
133
+ Results:
134
+ - Task: Image Classification
135
+ Dataset: ImageNet
136
+ Metrics:
137
+ Top 1 Accuracy: 73.03%
138
+ Top 5 Accuracy: 91.17%
139
+ - Name: skresnet34
140
+ In Collection: SKResNet
141
+ Metadata:
142
+ FLOPs: 4711849952
143
+ Parameters: 22280000
144
+ File Size: 89299314
145
+ Architecture:
146
+ - Convolution
147
+ - Dense Connections
148
+ - Global Average Pooling
149
+ - Max Pooling
150
+ - Residual Connection
151
+ - Selective Kernel
152
+ - Softmax
153
+ Tasks:
154
+ - Image Classification
155
+ Training Techniques:
156
+ - SGD with Momentum
157
+ - Weight Decay
158
+ Training Data:
159
+ - ImageNet
160
+ Training Resources: 8x GPUs
161
+ ID: skresnet34
162
+ LR: 0.1
163
+ Epochs: 100
164
+ Layers: 34
165
+ Crop Pct: '0.875'
166
+ Momentum: 0.9
167
+ Batch Size: 256
168
+ Image Size: '224'
169
+ Weight Decay: 4.0e-05
170
+ Interpolation: bicubic
171
+ Code: https://github.com/rwightman/pytorch-image-models/blob/a7f95818e44b281137503bcf4b3e3e94d8ffa52f/timm/models/sknet.py#L165
172
+ Weights: https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/skresnet34_ra-bdc0ccde.pth
173
+ Results:
174
+ - Task: Image Classification
175
+ Dataset: ImageNet
176
+ Metrics:
177
+ Top 1 Accuracy: 76.93%
178
+ Top 5 Accuracy: 93.32%
179
+ -->
pytorch-image-models/hfdocs/source/models/skresnext.mdx ADDED
@@ -0,0 +1,137 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # SK-ResNeXt
2
+
3
+ **SK ResNeXt** is a variant of a [ResNeXt](https://www.paperswithcode.com/method/resnext) that employs a [Selective Kernel](https://paperswithcode.com/method/selective-kernel) unit. In general, all the large kernel convolutions in the original bottleneck blocks in ResNext are replaced by the proposed [SK convolutions](https://paperswithcode.com/method/selective-kernel-convolution), enabling the network to choose appropriate receptive field sizes in an adaptive manner.
4
+
5
+ ## How do I use this model on an image?
6
+
7
+ To load a pretrained model:
8
+
9
+ ```py
10
+ >>> import timm
11
+ >>> model = timm.create_model('skresnext50_32x4d', pretrained=True)
12
+ >>> model.eval()
13
+ ```
14
+
15
+ To load and preprocess the image:
16
+
17
+ ```py
18
+ >>> import urllib
19
+ >>> from PIL import Image
20
+ >>> from timm.data import resolve_data_config
21
+ >>> from timm.data.transforms_factory import create_transform
22
+
23
+ >>> config = resolve_data_config({}, model=model)
24
+ >>> transform = create_transform(**config)
25
+
26
+ >>> url, filename = ("https://github.com/pytorch/hub/raw/master/images/dog.jpg", "dog.jpg")
27
+ >>> urllib.request.urlretrieve(url, filename)
28
+ >>> img = Image.open(filename).convert('RGB')
29
+ >>> tensor = transform(img).unsqueeze(0) # transform and add batch dimension
30
+ ```
31
+
32
+ To get the model predictions:
33
+
34
+ ```py
35
+ >>> import torch
36
+ >>> with torch.no_grad():
37
+ ... out = model(tensor)
38
+ >>> probabilities = torch.nn.functional.softmax(out[0], dim=0)
39
+ >>> print(probabilities.shape)
40
+ >>> # prints: torch.Size([1000])
41
+ ```
42
+
43
+ To get the top-5 predictions class names:
44
+
45
+ ```py
46
+ >>> # Get imagenet class mappings
47
+ >>> url, filename = ("https://raw.githubusercontent.com/pytorch/hub/master/imagenet_classes.txt", "imagenet_classes.txt")
48
+ >>> urllib.request.urlretrieve(url, filename)
49
+ >>> with open("imagenet_classes.txt", "r") as f:
50
+ ... categories = [s.strip() for s in f.readlines()]
51
+
52
+ >>> # Print top categories per image
53
+ >>> top5_prob, top5_catid = torch.topk(probabilities, 5)
54
+ >>> for i in range(top5_prob.size(0)):
55
+ ... print(categories[top5_catid[i]], top5_prob[i].item())
56
+ >>> # prints class names and probabilities like:
57
+ >>> # [('Samoyed', 0.6425196528434753), ('Pomeranian', 0.04062102362513542), ('keeshond', 0.03186424449086189), ('white wolf', 0.01739676296710968), ('Eskimo dog', 0.011717947199940681)]
58
+ ```
59
+
60
+ Replace the model name with the variant you want to use, e.g. `skresnext50_32x4d`. You can find the IDs in the model summaries at the top of this page.
61
+
62
+ To extract image features with this model, follow the [timm feature extraction examples](../feature_extraction), just change the name of the model you want to use.
63
+
64
+ ## How do I finetune this model?
65
+
66
+ You can finetune any of the pre-trained models just by changing the classifier (the last layer).
67
+
68
+ ```py
69
+ >>> model = timm.create_model('skresnext50_32x4d', pretrained=True, num_classes=NUM_FINETUNE_CLASSES)
70
+ ```
71
+ To finetune on your own dataset, you have to write a training loop or adapt [timm's training
72
+ script](https://github.com/rwightman/pytorch-image-models/blob/master/train.py) to use your dataset.
73
+
74
+ ## How do I train this model?
75
+
76
+ You can follow the [timm recipe scripts](../scripts) for training a new model afresh.
77
+
78
+ ## Citation
79
+
80
+ ```BibTeX
81
+ @misc{li2019selective,
82
+ title={Selective Kernel Networks},
83
+ author={Xiang Li and Wenhai Wang and Xiaolin Hu and Jian Yang},
84
+ year={2019},
85
+ eprint={1903.06586},
86
+ archivePrefix={arXiv},
87
+ primaryClass={cs.CV}
88
+ }
89
+ ```
90
+
91
+ <!--
92
+ Type: model-index
93
+ Collections:
94
+ - Name: SKResNeXt
95
+ Paper:
96
+ Title: Selective Kernel Networks
97
+ URL: https://paperswithcode.com/paper/selective-kernel-networks
98
+ Models:
99
+ - Name: skresnext50_32x4d
100
+ In Collection: SKResNeXt
101
+ Metadata:
102
+ FLOPs: 5739845824
103
+ Parameters: 27480000
104
+ File Size: 110340975
105
+ Architecture:
106
+ - Convolution
107
+ - Dense Connections
108
+ - Global Average Pooling
109
+ - Grouped Convolution
110
+ - Max Pooling
111
+ - Residual Connection
112
+ - Selective Kernel
113
+ - Softmax
114
+ Tasks:
115
+ - Image Classification
116
+ Training Data:
117
+ - ImageNet
118
+ Training Resources: 8x GPUs
119
+ ID: skresnext50_32x4d
120
+ LR: 0.1
121
+ Epochs: 100
122
+ Layers: 50
123
+ Crop Pct: '0.875'
124
+ Momentum: 0.9
125
+ Batch Size: 256
126
+ Image Size: '224'
127
+ Weight Decay: 0.0001
128
+ Interpolation: bicubic
129
+ Code: https://github.com/rwightman/pytorch-image-models/blob/a7f95818e44b281137503bcf4b3e3e94d8ffa52f/timm/models/sknet.py#L210
130
+ Weights: https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/skresnext50_ra-f40e40bf.pth
131
+ Results:
132
+ - Task: Image Classification
133
+ Dataset: ImageNet
134
+ Metrics:
135
+ Top 1 Accuracy: 80.15%
136
+ Top 5 Accuracy: 94.64%
137
+ -->
pytorch-image-models/hfdocs/source/models/spnasnet.mdx ADDED
@@ -0,0 +1,129 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # SPNASNet
2
+
3
+ **Single-Path NAS** is a novel differentiable NAS method for designing hardware-efficient ConvNets in less than 4 hours.
4
+
5
+ ## How do I use this model on an image?
6
+
7
+ To load a pretrained model:
8
+
9
+ ```py
10
+ >>> import timm
11
+ >>> model = timm.create_model('spnasnet_100', pretrained=True)
12
+ >>> model.eval()
13
+ ```
14
+
15
+ To load and preprocess the image:
16
+
17
+ ```py
18
+ >>> import urllib
19
+ >>> from PIL import Image
20
+ >>> from timm.data import resolve_data_config
21
+ >>> from timm.data.transforms_factory import create_transform
22
+
23
+ >>> config = resolve_data_config({}, model=model)
24
+ >>> transform = create_transform(**config)
25
+
26
+ >>> url, filename = ("https://github.com/pytorch/hub/raw/master/images/dog.jpg", "dog.jpg")
27
+ >>> urllib.request.urlretrieve(url, filename)
28
+ >>> img = Image.open(filename).convert('RGB')
29
+ >>> tensor = transform(img).unsqueeze(0) # transform and add batch dimension
30
+ ```
31
+
32
+ To get the model predictions:
33
+
34
+ ```py
35
+ >>> import torch
36
+ >>> with torch.no_grad():
37
+ ... out = model(tensor)
38
+ >>> probabilities = torch.nn.functional.softmax(out[0], dim=0)
39
+ >>> print(probabilities.shape)
40
+ >>> # prints: torch.Size([1000])
41
+ ```
42
+
43
+ To get the top-5 predictions class names:
44
+
45
+ ```py
46
+ >>> # Get imagenet class mappings
47
+ >>> url, filename = ("https://raw.githubusercontent.com/pytorch/hub/master/imagenet_classes.txt", "imagenet_classes.txt")
48
+ >>> urllib.request.urlretrieve(url, filename)
49
+ >>> with open("imagenet_classes.txt", "r") as f:
50
+ ... categories = [s.strip() for s in f.readlines()]
51
+
52
+ >>> # Print top categories per image
53
+ >>> top5_prob, top5_catid = torch.topk(probabilities, 5)
54
+ >>> for i in range(top5_prob.size(0)):
55
+ ... print(categories[top5_catid[i]], top5_prob[i].item())
56
+ >>> # prints class names and probabilities like:
57
+ >>> # [('Samoyed', 0.6425196528434753), ('Pomeranian', 0.04062102362513542), ('keeshond', 0.03186424449086189), ('white wolf', 0.01739676296710968), ('Eskimo dog', 0.011717947199940681)]
58
+ ```
59
+
60
+ Replace the model name with the variant you want to use, e.g. `spnasnet_100`. You can find the IDs in the model summaries at the top of this page.
61
+
62
+ To extract image features with this model, follow the [timm feature extraction examples](../feature_extraction), just change the name of the model you want to use.
63
+
64
+ ## How do I finetune this model?
65
+
66
+ You can finetune any of the pre-trained models just by changing the classifier (the last layer).
67
+
68
+ ```py
69
+ >>> model = timm.create_model('spnasnet_100', pretrained=True, num_classes=NUM_FINETUNE_CLASSES)
70
+ ```
71
+ To finetune on your own dataset, you have to write a training loop or adapt [timm's training
72
+ script](https://github.com/rwightman/pytorch-image-models/blob/master/train.py) to use your dataset.
73
+
74
+ ## How do I train this model?
75
+
76
+ You can follow the [timm recipe scripts](../scripts) for training a new model afresh.
77
+
78
+ ## Citation
79
+
80
+ ```BibTeX
81
+ @misc{stamoulis2019singlepath,
82
+ title={Single-Path NAS: Designing Hardware-Efficient ConvNets in less than 4 Hours},
83
+ author={Dimitrios Stamoulis and Ruizhou Ding and Di Wang and Dimitrios Lymberopoulos and Bodhi Priyantha and Jie Liu and Diana Marculescu},
84
+ year={2019},
85
+ eprint={1904.02877},
86
+ archivePrefix={arXiv},
87
+ primaryClass={cs.LG}
88
+ }
89
+ ```
90
+
91
+ <!--
92
+ Type: model-index
93
+ Collections:
94
+ - Name: SPNASNet
95
+ Paper:
96
+ Title: 'Single-Path NAS: Designing Hardware-Efficient ConvNets in less than 4
97
+ Hours'
98
+ URL: https://paperswithcode.com/paper/single-path-nas-designing-hardware-efficient
99
+ Models:
100
+ - Name: spnasnet_100
101
+ In Collection: SPNASNet
102
+ Metadata:
103
+ FLOPs: 442385600
104
+ Parameters: 4420000
105
+ File Size: 17902337
106
+ Architecture:
107
+ - Average Pooling
108
+ - Batch Normalization
109
+ - Convolution
110
+ - Depthwise Separable Convolution
111
+ - Dropout
112
+ - ReLU
113
+ Tasks:
114
+ - Image Classification
115
+ Training Data:
116
+ - ImageNet
117
+ ID: spnasnet_100
118
+ Crop Pct: '0.875'
119
+ Image Size: '224'
120
+ Interpolation: bilinear
121
+ Code: https://github.com/rwightman/pytorch-image-models/blob/9a25fdf3ad0414b4d66da443fe60ae0aa14edc84/timm/models/efficientnet.py#L995
122
+ Weights: https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/spnasnet_100-048bc3f4.pth
123
+ Results:
124
+ - Task: Image Classification
125
+ Dataset: ImageNet
126
+ Metrics:
127
+ Top 1 Accuracy: 74.08%
128
+ Top 5 Accuracy: 91.82%
129
+ -->
pytorch-image-models/hfdocs/source/models/ssl-resnet.mdx ADDED
@@ -0,0 +1,198 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # SSL ResNet
2
+
3
+ **Residual Networks**, or **ResNets**, learn residual functions with reference to the layer inputs, instead of learning unreferenced functions. Instead of hoping each few stacked layers directly fit a desired underlying mapping, residual nets let these layers fit a residual mapping. They stack [residual blocks](https://paperswithcode.com/method/residual-block) ontop of each other to form network: e.g. a ResNet-50 has fifty layers using these blocks.
4
+
5
+ The model in this collection utilises semi-supervised learning to improve the performance of the model. The approach brings important gains to standard architectures for image, video and fine-grained classification.
6
+
7
+ Please note the CC-BY-NC 4.0 license on theses weights, non-commercial use only.
8
+
9
+ ## How do I use this model on an image?
10
+
11
+ To load a pretrained model:
12
+
13
+ ```py
14
+ >>> import timm
15
+ >>> model = timm.create_model('ssl_resnet18', pretrained=True)
16
+ >>> model.eval()
17
+ ```
18
+
19
+ To load and preprocess the image:
20
+
21
+ ```py
22
+ >>> import urllib
23
+ >>> from PIL import Image
24
+ >>> from timm.data import resolve_data_config
25
+ >>> from timm.data.transforms_factory import create_transform
26
+
27
+ >>> config = resolve_data_config({}, model=model)
28
+ >>> transform = create_transform(**config)
29
+
30
+ >>> url, filename = ("https://github.com/pytorch/hub/raw/master/images/dog.jpg", "dog.jpg")
31
+ >>> urllib.request.urlretrieve(url, filename)
32
+ >>> img = Image.open(filename).convert('RGB')
33
+ >>> tensor = transform(img).unsqueeze(0) # transform and add batch dimension
34
+ ```
35
+
36
+ To get the model predictions:
37
+
38
+ ```py
39
+ >>> import torch
40
+ >>> with torch.no_grad():
41
+ ... out = model(tensor)
42
+ >>> probabilities = torch.nn.functional.softmax(out[0], dim=0)
43
+ >>> print(probabilities.shape)
44
+ >>> # prints: torch.Size([1000])
45
+ ```
46
+
47
+ To get the top-5 predictions class names:
48
+
49
+ ```py
50
+ >>> # Get imagenet class mappings
51
+ >>> url, filename = ("https://raw.githubusercontent.com/pytorch/hub/master/imagenet_classes.txt", "imagenet_classes.txt")
52
+ >>> urllib.request.urlretrieve(url, filename)
53
+ >>> with open("imagenet_classes.txt", "r") as f:
54
+ ... categories = [s.strip() for s in f.readlines()]
55
+
56
+ >>> # Print top categories per image
57
+ >>> top5_prob, top5_catid = torch.topk(probabilities, 5)
58
+ >>> for i in range(top5_prob.size(0)):
59
+ ... print(categories[top5_catid[i]], top5_prob[i].item())
60
+ >>> # prints class names and probabilities like:
61
+ >>> # [('Samoyed', 0.6425196528434753), ('Pomeranian', 0.04062102362513542), ('keeshond', 0.03186424449086189), ('white wolf', 0.01739676296710968), ('Eskimo dog', 0.011717947199940681)]
62
+ ```
63
+
64
+ Replace the model name with the variant you want to use, e.g. `ssl_resnet18`. You can find the IDs in the model summaries at the top of this page.
65
+
66
+ To extract image features with this model, follow the [timm feature extraction examples](../feature_extraction), just change the name of the model you want to use.
67
+
68
+ ## How do I finetune this model?
69
+
70
+ You can finetune any of the pre-trained models just by changing the classifier (the last layer).
71
+
72
+ ```py
73
+ >>> model = timm.create_model('ssl_resnet18', pretrained=True, num_classes=NUM_FINETUNE_CLASSES)
74
+ ```
75
+ To finetune on your own dataset, you have to write a training loop or adapt [timm's training
76
+ script](https://github.com/rwightman/pytorch-image-models/blob/master/train.py) to use your dataset.
77
+
78
+ ## How do I train this model?
79
+
80
+ You can follow the [timm recipe scripts](../scripts) for training a new model afresh.
81
+
82
+ ## Citation
83
+
84
+ ```BibTeX
85
+ @article{DBLP:journals/corr/abs-1905-00546,
86
+ author = {I. Zeki Yalniz and
87
+ Herv{\'{e}} J{\'{e}}gou and
88
+ Kan Chen and
89
+ Manohar Paluri and
90
+ Dhruv Mahajan},
91
+ title = {Billion-scale semi-supervised learning for image classification},
92
+ journal = {CoRR},
93
+ volume = {abs/1905.00546},
94
+ year = {2019},
95
+ url = {http://arxiv.org/abs/1905.00546},
96
+ archivePrefix = {arXiv},
97
+ eprint = {1905.00546},
98
+ timestamp = {Mon, 28 Sep 2020 08:19:37 +0200},
99
+ biburl = {https://dblp.org/rec/journals/corr/abs-1905-00546.bib},
100
+ bibsource = {dblp computer science bibliography, https://dblp.org}
101
+ }
102
+ ```
103
+
104
+ <!--
105
+ Type: model-index
106
+ Collections:
107
+ - Name: SSL ResNet
108
+ Paper:
109
+ Title: Billion-scale semi-supervised learning for image classification
110
+ URL: https://paperswithcode.com/paper/billion-scale-semi-supervised-learning-for
111
+ Models:
112
+ - Name: ssl_resnet18
113
+ In Collection: SSL ResNet
114
+ Metadata:
115
+ FLOPs: 2337073152
116
+ Parameters: 11690000
117
+ File Size: 46811375
118
+ Architecture:
119
+ - 1x1 Convolution
120
+ - Batch Normalization
121
+ - Bottleneck Residual Block
122
+ - Convolution
123
+ - Global Average Pooling
124
+ - Max Pooling
125
+ - ReLU
126
+ - Residual Block
127
+ - Residual Connection
128
+ - Softmax
129
+ Tasks:
130
+ - Image Classification
131
+ Training Techniques:
132
+ - SGD with Momentum
133
+ - Weight Decay
134
+ Training Data:
135
+ - ImageNet
136
+ - YFCC-100M
137
+ Training Resources: 64x GPUs
138
+ ID: ssl_resnet18
139
+ LR: 0.0015
140
+ Epochs: 30
141
+ Layers: 18
142
+ Crop Pct: '0.875'
143
+ Batch Size: 1536
144
+ Image Size: '224'
145
+ Weight Decay: 0.0001
146
+ Interpolation: bilinear
147
+ Code: https://github.com/rwightman/pytorch-image-models/blob/9a25fdf3ad0414b4d66da443fe60ae0aa14edc84/timm/models/resnet.py#L894
148
+ Weights: https://dl.fbaipublicfiles.com/semiweaksupervision/model_files/semi_supervised_resnet18-d92f0530.pth
149
+ Results:
150
+ - Task: Image Classification
151
+ Dataset: ImageNet
152
+ Metrics:
153
+ Top 1 Accuracy: 72.62%
154
+ Top 5 Accuracy: 91.42%
155
+ - Name: ssl_resnet50
156
+ In Collection: SSL ResNet
157
+ Metadata:
158
+ FLOPs: 5282531328
159
+ Parameters: 25560000
160
+ File Size: 102480594
161
+ Architecture:
162
+ - 1x1 Convolution
163
+ - Batch Normalization
164
+ - Bottleneck Residual Block
165
+ - Convolution
166
+ - Global Average Pooling
167
+ - Max Pooling
168
+ - ReLU
169
+ - Residual Block
170
+ - Residual Connection
171
+ - Softmax
172
+ Tasks:
173
+ - Image Classification
174
+ Training Techniques:
175
+ - SGD with Momentum
176
+ - Weight Decay
177
+ Training Data:
178
+ - ImageNet
179
+ - YFCC-100M
180
+ Training Resources: 64x GPUs
181
+ ID: ssl_resnet50
182
+ LR: 0.0015
183
+ Epochs: 30
184
+ Layers: 50
185
+ Crop Pct: '0.875'
186
+ Batch Size: 1536
187
+ Image Size: '224'
188
+ Weight Decay: 0.0001
189
+ Interpolation: bilinear
190
+ Code: https://github.com/rwightman/pytorch-image-models/blob/9a25fdf3ad0414b4d66da443fe60ae0aa14edc84/timm/models/resnet.py#L904
191
+ Weights: https://dl.fbaipublicfiles.com/semiweaksupervision/model_files/semi_supervised_resnet50-08389792.pth
192
+ Results:
193
+ - Task: Image Classification
194
+ Dataset: ImageNet
195
+ Metrics:
196
+ Top 1 Accuracy: 79.24%
197
+ Top 5 Accuracy: 94.83%
198
+ -->
pytorch-image-models/hfdocs/source/models/swsl-resnet.mdx ADDED
@@ -0,0 +1,198 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # SWSL ResNet
2
+
3
+ **Residual Networks**, or **ResNets**, learn residual functions with reference to the layer inputs, instead of learning unreferenced functions. Instead of hoping each few stacked layers directly fit a desired underlying mapping, residual nets let these layers fit a residual mapping. They stack [residual blocks](https://paperswithcode.com/method/residual-block) ontop of each other to form network: e.g. a ResNet-50 has fifty layers using these blocks.
4
+
5
+ The models in this collection utilise semi-weakly supervised learning to improve the performance of the model. The approach brings important gains to standard architectures for image, video and fine-grained classification.
6
+
7
+ Please note the CC-BY-NC 4.0 license on theses weights, non-commercial use only.
8
+
9
+ ## How do I use this model on an image?
10
+
11
+ To load a pretrained model:
12
+
13
+ ```py
14
+ >>> import timm
15
+ >>> model = timm.create_model('swsl_resnet18', pretrained=True)
16
+ >>> model.eval()
17
+ ```
18
+
19
+ To load and preprocess the image:
20
+
21
+ ```py
22
+ >>> import urllib
23
+ >>> from PIL import Image
24
+ >>> from timm.data import resolve_data_config
25
+ >>> from timm.data.transforms_factory import create_transform
26
+
27
+ >>> config = resolve_data_config({}, model=model)
28
+ >>> transform = create_transform(**config)
29
+
30
+ >>> url, filename = ("https://github.com/pytorch/hub/raw/master/images/dog.jpg", "dog.jpg")
31
+ >>> urllib.request.urlretrieve(url, filename)
32
+ >>> img = Image.open(filename).convert('RGB')
33
+ >>> tensor = transform(img).unsqueeze(0) # transform and add batch dimension
34
+ ```
35
+
36
+ To get the model predictions:
37
+
38
+ ```py
39
+ >>> import torch
40
+ >>> with torch.no_grad():
41
+ ... out = model(tensor)
42
+ >>> probabilities = torch.nn.functional.softmax(out[0], dim=0)
43
+ >>> print(probabilities.shape)
44
+ >>> # prints: torch.Size([1000])
45
+ ```
46
+
47
+ To get the top-5 predictions class names:
48
+
49
+ ```py
50
+ >>> # Get imagenet class mappings
51
+ >>> url, filename = ("https://raw.githubusercontent.com/pytorch/hub/master/imagenet_classes.txt", "imagenet_classes.txt")
52
+ >>> urllib.request.urlretrieve(url, filename)
53
+ >>> with open("imagenet_classes.txt", "r") as f:
54
+ ... categories = [s.strip() for s in f.readlines()]
55
+
56
+ >>> # Print top categories per image
57
+ >>> top5_prob, top5_catid = torch.topk(probabilities, 5)
58
+ >>> for i in range(top5_prob.size(0)):
59
+ ... print(categories[top5_catid[i]], top5_prob[i].item())
60
+ >>> # prints class names and probabilities like:
61
+ >>> # [('Samoyed', 0.6425196528434753), ('Pomeranian', 0.04062102362513542), ('keeshond', 0.03186424449086189), ('white wolf', 0.01739676296710968), ('Eskimo dog', 0.011717947199940681)]
62
+ ```
63
+
64
+ Replace the model name with the variant you want to use, e.g. `swsl_resnet18`. You can find the IDs in the model summaries at the top of this page.
65
+
66
+ To extract image features with this model, follow the [timm feature extraction examples](../feature_extraction), just change the name of the model you want to use.
67
+
68
+ ## How do I finetune this model?
69
+
70
+ You can finetune any of the pre-trained models just by changing the classifier (the last layer).
71
+
72
+ ```py
73
+ >>> model = timm.create_model('swsl_resnet18', pretrained=True, num_classes=NUM_FINETUNE_CLASSES)
74
+ ```
75
+ To finetune on your own dataset, you have to write a training loop or adapt [timm's training
76
+ script](https://github.com/rwightman/pytorch-image-models/blob/master/train.py) to use your dataset.
77
+
78
+ ## How do I train this model?
79
+
80
+ You can follow the [timm recipe scripts](../scripts) for training a new model afresh.
81
+
82
+ ## Citation
83
+
84
+ ```BibTeX
85
+ @article{DBLP:journals/corr/abs-1905-00546,
86
+ author = {I. Zeki Yalniz and
87
+ Herv{\'{e}} J{\'{e}}gou and
88
+ Kan Chen and
89
+ Manohar Paluri and
90
+ Dhruv Mahajan},
91
+ title = {Billion-scale semi-supervised learning for image classification},
92
+ journal = {CoRR},
93
+ volume = {abs/1905.00546},
94
+ year = {2019},
95
+ url = {http://arxiv.org/abs/1905.00546},
96
+ archivePrefix = {arXiv},
97
+ eprint = {1905.00546},
98
+ timestamp = {Mon, 28 Sep 2020 08:19:37 +0200},
99
+ biburl = {https://dblp.org/rec/journals/corr/abs-1905-00546.bib},
100
+ bibsource = {dblp computer science bibliography, https://dblp.org}
101
+ }
102
+ ```
103
+
104
+ <!--
105
+ Type: model-index
106
+ Collections:
107
+ - Name: SWSL ResNet
108
+ Paper:
109
+ Title: Billion-scale semi-supervised learning for image classification
110
+ URL: https://paperswithcode.com/paper/billion-scale-semi-supervised-learning-for
111
+ Models:
112
+ - Name: swsl_resnet18
113
+ In Collection: SWSL ResNet
114
+ Metadata:
115
+ FLOPs: 2337073152
116
+ Parameters: 11690000
117
+ File Size: 46811375
118
+ Architecture:
119
+ - 1x1 Convolution
120
+ - Batch Normalization
121
+ - Bottleneck Residual Block
122
+ - Convolution
123
+ - Global Average Pooling
124
+ - Max Pooling
125
+ - ReLU
126
+ - Residual Block
127
+ - Residual Connection
128
+ - Softmax
129
+ Tasks:
130
+ - Image Classification
131
+ Training Techniques:
132
+ - SGD with Momentum
133
+ - Weight Decay
134
+ Training Data:
135
+ - IG-1B-Targeted
136
+ - ImageNet
137
+ Training Resources: 64x GPUs
138
+ ID: swsl_resnet18
139
+ LR: 0.0015
140
+ Epochs: 30
141
+ Layers: 18
142
+ Crop Pct: '0.875'
143
+ Batch Size: 1536
144
+ Image Size: '224'
145
+ Weight Decay: 0.0001
146
+ Interpolation: bilinear
147
+ Code: https://github.com/rwightman/pytorch-image-models/blob/9a25fdf3ad0414b4d66da443fe60ae0aa14edc84/timm/models/resnet.py#L954
148
+ Weights: https://dl.fbaipublicfiles.com/semiweaksupervision/model_files/semi_weakly_supervised_resnet18-118f1556.pth
149
+ Results:
150
+ - Task: Image Classification
151
+ Dataset: ImageNet
152
+ Metrics:
153
+ Top 1 Accuracy: 73.28%
154
+ Top 5 Accuracy: 91.76%
155
+ - Name: swsl_resnet50
156
+ In Collection: SWSL ResNet
157
+ Metadata:
158
+ FLOPs: 5282531328
159
+ Parameters: 25560000
160
+ File Size: 102480594
161
+ Architecture:
162
+ - 1x1 Convolution
163
+ - Batch Normalization
164
+ - Bottleneck Residual Block
165
+ - Convolution
166
+ - Global Average Pooling
167
+ - Max Pooling
168
+ - ReLU
169
+ - Residual Block
170
+ - Residual Connection
171
+ - Softmax
172
+ Tasks:
173
+ - Image Classification
174
+ Training Techniques:
175
+ - SGD with Momentum
176
+ - Weight Decay
177
+ Training Data:
178
+ - IG-1B-Targeted
179
+ - ImageNet
180
+ Training Resources: 64x GPUs
181
+ ID: swsl_resnet50
182
+ LR: 0.0015
183
+ Epochs: 30
184
+ Layers: 50
185
+ Crop Pct: '0.875'
186
+ Batch Size: 1536
187
+ Image Size: '224'
188
+ Weight Decay: 0.0001
189
+ Interpolation: bilinear
190
+ Code: https://github.com/rwightman/pytorch-image-models/blob/9a25fdf3ad0414b4d66da443fe60ae0aa14edc84/timm/models/resnet.py#L965
191
+ Weights: https://dl.fbaipublicfiles.com/semiweaksupervision/model_files/semi_weakly_supervised_resnet50-16a12f1b.pth
192
+ Results:
193
+ - Task: Image Classification
194
+ Dataset: ImageNet
195
+ Metrics:
196
+ Top 1 Accuracy: 81.14%
197
+ Top 5 Accuracy: 95.97%
198
+ -->
pytorch-image-models/hfdocs/source/models/swsl-resnext.mdx ADDED
@@ -0,0 +1,284 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # SWSL ResNeXt
2
+
3
+ A **ResNeXt** repeats a [building block](https://paperswithcode.com/method/resnext-block) that aggregates a set of transformations with the same topology. Compared to a [ResNet](https://paperswithcode.com/method/resnet), it exposes a new dimension, *cardinality* (the size of the set of transformations) \\( C \\), as an essential factor in addition to the dimensions of depth and width.
4
+
5
+ The models in this collection utilise semi-weakly supervised learning to improve the performance of the model. The approach brings important gains to standard architectures for image, video and fine-grained classification.
6
+
7
+ Please note the CC-BY-NC 4.0 license on theses weights, non-commercial use only.
8
+
9
+ ## How do I use this model on an image?
10
+
11
+ To load a pretrained model:
12
+
13
+ ```py
14
+ >>> import timm
15
+ >>> model = timm.create_model('swsl_resnext101_32x16d', pretrained=True)
16
+ >>> model.eval()
17
+ ```
18
+
19
+ To load and preprocess the image:
20
+
21
+ ```py
22
+ >>> import urllib
23
+ >>> from PIL import Image
24
+ >>> from timm.data import resolve_data_config
25
+ >>> from timm.data.transforms_factory import create_transform
26
+
27
+ >>> config = resolve_data_config({}, model=model)
28
+ >>> transform = create_transform(**config)
29
+
30
+ >>> url, filename = ("https://github.com/pytorch/hub/raw/master/images/dog.jpg", "dog.jpg")
31
+ >>> urllib.request.urlretrieve(url, filename)
32
+ >>> img = Image.open(filename).convert('RGB')
33
+ >>> tensor = transform(img).unsqueeze(0) # transform and add batch dimension
34
+ ```
35
+
36
+ To get the model predictions:
37
+
38
+ ```py
39
+ >>> import torch
40
+ >>> with torch.no_grad():
41
+ ... out = model(tensor)
42
+ >>> probabilities = torch.nn.functional.softmax(out[0], dim=0)
43
+ >>> print(probabilities.shape)
44
+ >>> # prints: torch.Size([1000])
45
+ ```
46
+
47
+ To get the top-5 predictions class names:
48
+
49
+ ```py
50
+ >>> # Get imagenet class mappings
51
+ >>> url, filename = ("https://raw.githubusercontent.com/pytorch/hub/master/imagenet_classes.txt", "imagenet_classes.txt")
52
+ >>> urllib.request.urlretrieve(url, filename)
53
+ >>> with open("imagenet_classes.txt", "r") as f:
54
+ ... categories = [s.strip() for s in f.readlines()]
55
+
56
+ >>> # Print top categories per image
57
+ >>> top5_prob, top5_catid = torch.topk(probabilities, 5)
58
+ >>> for i in range(top5_prob.size(0)):
59
+ ... print(categories[top5_catid[i]], top5_prob[i].item())
60
+ >>> # prints class names and probabilities like:
61
+ >>> # [('Samoyed', 0.6425196528434753), ('Pomeranian', 0.04062102362513542), ('keeshond', 0.03186424449086189), ('white wolf', 0.01739676296710968), ('Eskimo dog', 0.011717947199940681)]
62
+ ```
63
+
64
+ Replace the model name with the variant you want to use, e.g. `swsl_resnext101_32x16d`. You can find the IDs in the model summaries at the top of this page.
65
+
66
+ To extract image features with this model, follow the [timm feature extraction examples](../feature_extraction), just change the name of the model you want to use.
67
+
68
+ ## How do I finetune this model?
69
+
70
+ You can finetune any of the pre-trained models just by changing the classifier (the last layer).
71
+
72
+ ```py
73
+ >>> model = timm.create_model('swsl_resnext101_32x16d', pretrained=True, num_classes=NUM_FINETUNE_CLASSES)
74
+ ```
75
+ To finetune on your own dataset, you have to write a training loop or adapt [timm's training
76
+ script](https://github.com/rwightman/pytorch-image-models/blob/master/train.py) to use your dataset.
77
+
78
+ ## How do I train this model?
79
+
80
+ You can follow the [timm recipe scripts](../scripts) for training a new model afresh.
81
+
82
+ ## Citation
83
+
84
+ ```BibTeX
85
+ @article{DBLP:journals/corr/abs-1905-00546,
86
+ author = {I. Zeki Yalniz and
87
+ Herv{\'{e}} J{\'{e}}gou and
88
+ Kan Chen and
89
+ Manohar Paluri and
90
+ Dhruv Mahajan},
91
+ title = {Billion-scale semi-supervised learning for image classification},
92
+ journal = {CoRR},
93
+ volume = {abs/1905.00546},
94
+ year = {2019},
95
+ url = {http://arxiv.org/abs/1905.00546},
96
+ archivePrefix = {arXiv},
97
+ eprint = {1905.00546},
98
+ timestamp = {Mon, 28 Sep 2020 08:19:37 +0200},
99
+ biburl = {https://dblp.org/rec/journals/corr/abs-1905-00546.bib},
100
+ bibsource = {dblp computer science bibliography, https://dblp.org}
101
+ }
102
+ ```
103
+
104
+ <!--
105
+ Type: model-index
106
+ Collections:
107
+ - Name: SWSL ResNext
108
+ Paper:
109
+ Title: Billion-scale semi-supervised learning for image classification
110
+ URL: https://paperswithcode.com/paper/billion-scale-semi-supervised-learning-for
111
+ Models:
112
+ - Name: swsl_resnext101_32x16d
113
+ In Collection: SWSL ResNext
114
+ Metadata:
115
+ FLOPs: 46623691776
116
+ Parameters: 194030000
117
+ File Size: 777518664
118
+ Architecture:
119
+ - 1x1 Convolution
120
+ - Batch Normalization
121
+ - Convolution
122
+ - Global Average Pooling
123
+ - Grouped Convolution
124
+ - Max Pooling
125
+ - ReLU
126
+ - ResNeXt Block
127
+ - Residual Connection
128
+ - Softmax
129
+ Tasks:
130
+ - Image Classification
131
+ Training Techniques:
132
+ - SGD with Momentum
133
+ - Weight Decay
134
+ Training Data:
135
+ - IG-1B-Targeted
136
+ - ImageNet
137
+ Training Resources: 64x GPUs
138
+ ID: swsl_resnext101_32x16d
139
+ LR: 0.0015
140
+ Epochs: 30
141
+ Layers: 101
142
+ Crop Pct: '0.875'
143
+ Batch Size: 1536
144
+ Image Size: '224'
145
+ Weight Decay: 0.0001
146
+ Interpolation: bilinear
147
+ Code: https://github.com/rwightman/pytorch-image-models/blob/9a25fdf3ad0414b4d66da443fe60ae0aa14edc84/timm/models/resnet.py#L1009
148
+ Weights: https://dl.fbaipublicfiles.com/semiweaksupervision/model_files/semi_weakly_supervised_resnext101_32x16-f3559a9c.pth
149
+ Results:
150
+ - Task: Image Classification
151
+ Dataset: ImageNet
152
+ Metrics:
153
+ Top 1 Accuracy: 83.34%
154
+ Top 5 Accuracy: 96.84%
155
+ - Name: swsl_resnext101_32x4d
156
+ In Collection: SWSL ResNext
157
+ Metadata:
158
+ FLOPs: 10298145792
159
+ Parameters: 44180000
160
+ File Size: 177341913
161
+ Architecture:
162
+ - 1x1 Convolution
163
+ - Batch Normalization
164
+ - Convolution
165
+ - Global Average Pooling
166
+ - Grouped Convolution
167
+ - Max Pooling
168
+ - ReLU
169
+ - ResNeXt Block
170
+ - Residual Connection
171
+ - Softmax
172
+ Tasks:
173
+ - Image Classification
174
+ Training Techniques:
175
+ - SGD with Momentum
176
+ - Weight Decay
177
+ Training Data:
178
+ - IG-1B-Targeted
179
+ - ImageNet
180
+ Training Resources: 64x GPUs
181
+ ID: swsl_resnext101_32x4d
182
+ LR: 0.0015
183
+ Epochs: 30
184
+ Layers: 101
185
+ Crop Pct: '0.875'
186
+ Batch Size: 1536
187
+ Image Size: '224'
188
+ Weight Decay: 0.0001
189
+ Interpolation: bilinear
190
+ Code: https://github.com/rwightman/pytorch-image-models/blob/9a25fdf3ad0414b4d66da443fe60ae0aa14edc84/timm/models/resnet.py#L987
191
+ Weights: https://dl.fbaipublicfiles.com/semiweaksupervision/model_files/semi_weakly_supervised_resnext101_32x4-3f87e46b.pth
192
+ Results:
193
+ - Task: Image Classification
194
+ Dataset: ImageNet
195
+ Metrics:
196
+ Top 1 Accuracy: 83.22%
197
+ Top 5 Accuracy: 96.77%
198
+ - Name: swsl_resnext101_32x8d
199
+ In Collection: SWSL ResNext
200
+ Metadata:
201
+ FLOPs: 21180417024
202
+ Parameters: 88790000
203
+ File Size: 356056638
204
+ Architecture:
205
+ - 1x1 Convolution
206
+ - Batch Normalization
207
+ - Convolution
208
+ - Global Average Pooling
209
+ - Grouped Convolution
210
+ - Max Pooling
211
+ - ReLU
212
+ - ResNeXt Block
213
+ - Residual Connection
214
+ - Softmax
215
+ Tasks:
216
+ - Image Classification
217
+ Training Techniques:
218
+ - SGD with Momentum
219
+ - Weight Decay
220
+ Training Data:
221
+ - IG-1B-Targeted
222
+ - ImageNet
223
+ Training Resources: 64x GPUs
224
+ ID: swsl_resnext101_32x8d
225
+ LR: 0.0015
226
+ Epochs: 30
227
+ Layers: 101
228
+ Crop Pct: '0.875'
229
+ Batch Size: 1536
230
+ Image Size: '224'
231
+ Weight Decay: 0.0001
232
+ Interpolation: bilinear
233
+ Code: https://github.com/rwightman/pytorch-image-models/blob/9a25fdf3ad0414b4d66da443fe60ae0aa14edc84/timm/models/resnet.py#L998
234
+ Weights: https://dl.fbaipublicfiles.com/semiweaksupervision/model_files/semi_weakly_supervised_resnext101_32x8-b4712904.pth
235
+ Results:
236
+ - Task: Image Classification
237
+ Dataset: ImageNet
238
+ Metrics:
239
+ Top 1 Accuracy: 84.27%
240
+ Top 5 Accuracy: 97.17%
241
+ - Name: swsl_resnext50_32x4d
242
+ In Collection: SWSL ResNext
243
+ Metadata:
244
+ FLOPs: 5472648192
245
+ Parameters: 25030000
246
+ File Size: 100428550
247
+ Architecture:
248
+ - 1x1 Convolution
249
+ - Batch Normalization
250
+ - Convolution
251
+ - Global Average Pooling
252
+ - Grouped Convolution
253
+ - Max Pooling
254
+ - ReLU
255
+ - ResNeXt Block
256
+ - Residual Connection
257
+ - Softmax
258
+ Tasks:
259
+ - Image Classification
260
+ Training Techniques:
261
+ - SGD with Momentum
262
+ - Weight Decay
263
+ Training Data:
264
+ - IG-1B-Targeted
265
+ - ImageNet
266
+ Training Resources: 64x GPUs
267
+ ID: swsl_resnext50_32x4d
268
+ LR: 0.0015
269
+ Epochs: 30
270
+ Layers: 50
271
+ Crop Pct: '0.875'
272
+ Batch Size: 1536
273
+ Image Size: '224'
274
+ Weight Decay: 0.0001
275
+ Interpolation: bilinear
276
+ Code: https://github.com/rwightman/pytorch-image-models/blob/9a25fdf3ad0414b4d66da443fe60ae0aa14edc84/timm/models/resnet.py#L976
277
+ Weights: https://dl.fbaipublicfiles.com/semiweaksupervision/model_files/semi_weakly_supervised_resnext50_32x4-72679e44.pth
278
+ Results:
279
+ - Task: Image Classification
280
+ Dataset: ImageNet
281
+ Metrics:
282
+ Top 1 Accuracy: 82.17%
283
+ Top 5 Accuracy: 96.23%
284
+ -->
pytorch-image-models/hfdocs/source/models/tf-efficientnet-lite.mdx ADDED
@@ -0,0 +1,262 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # (Tensorflow) EfficientNet Lite
2
+
3
+ **EfficientNet** is a convolutional neural network architecture and scaling method that uniformly scales all dimensions of depth/width/resolution using a *compound coefficient*. Unlike conventional practice that arbitrary scales these factors, the EfficientNet scaling method uniformly scales network width, depth, and resolution with a set of fixed scaling coefficients. For example, if we want to use \\( 2^N \\) times more computational resources, then we can simply increase the network depth by \\( \alpha ^ N \\), width by \\( \beta ^ N \\), and image size by \\( \gamma ^ N \\), where \\( \alpha, \beta, \gamma \\) are constant coefficients determined by a small grid search on the original small model. EfficientNet uses a compound coefficient \\( \phi \\) to uniformly scales network width, depth, and resolution in a principled way.
4
+
5
+ The compound scaling method is justified by the intuition that if the input image is bigger, then the network needs more layers to increase the receptive field and more channels to capture more fine-grained patterns on the bigger image.
6
+
7
+ The base EfficientNet-B0 network is based on the inverted bottleneck residual blocks of [MobileNetV2](https://paperswithcode.com/method/mobilenetv2).
8
+
9
+ EfficientNet-Lite makes EfficientNet more suitable for mobile devices by introducing [ReLU6](https://paperswithcode.com/method/relu6) activation functions and removing [squeeze-and-excitation blocks](https://paperswithcode.com/method/squeeze-and-excitation).
10
+
11
+ The weights from this model were ported from [Tensorflow/TPU](https://github.com/tensorflow/tpu).
12
+
13
+ ## How do I use this model on an image?
14
+
15
+ To load a pretrained model:
16
+
17
+ ```py
18
+ >>> import timm
19
+ >>> model = timm.create_model('tf_efficientnet_lite0', pretrained=True)
20
+ >>> model.eval()
21
+ ```
22
+
23
+ To load and preprocess the image:
24
+
25
+ ```py
26
+ >>> import urllib
27
+ >>> from PIL import Image
28
+ >>> from timm.data import resolve_data_config
29
+ >>> from timm.data.transforms_factory import create_transform
30
+
31
+ >>> config = resolve_data_config({}, model=model)
32
+ >>> transform = create_transform(**config)
33
+
34
+ >>> url, filename = ("https://github.com/pytorch/hub/raw/master/images/dog.jpg", "dog.jpg")
35
+ >>> urllib.request.urlretrieve(url, filename)
36
+ >>> img = Image.open(filename).convert('RGB')
37
+ >>> tensor = transform(img).unsqueeze(0) # transform and add batch dimension
38
+ ```
39
+
40
+ To get the model predictions:
41
+
42
+ ```py
43
+ >>> import torch
44
+ >>> with torch.no_grad():
45
+ ... out = model(tensor)
46
+ >>> probabilities = torch.nn.functional.softmax(out[0], dim=0)
47
+ >>> print(probabilities.shape)
48
+ >>> # prints: torch.Size([1000])
49
+ ```
50
+
51
+ To get the top-5 predictions class names:
52
+
53
+ ```py
54
+ >>> # Get imagenet class mappings
55
+ >>> url, filename = ("https://raw.githubusercontent.com/pytorch/hub/master/imagenet_classes.txt", "imagenet_classes.txt")
56
+ >>> urllib.request.urlretrieve(url, filename)
57
+ >>> with open("imagenet_classes.txt", "r") as f:
58
+ ... categories = [s.strip() for s in f.readlines()]
59
+
60
+ >>> # Print top categories per image
61
+ >>> top5_prob, top5_catid = torch.topk(probabilities, 5)
62
+ >>> for i in range(top5_prob.size(0)):
63
+ ... print(categories[top5_catid[i]], top5_prob[i].item())
64
+ >>> # prints class names and probabilities like:
65
+ >>> # [('Samoyed', 0.6425196528434753), ('Pomeranian', 0.04062102362513542), ('keeshond', 0.03186424449086189), ('white wolf', 0.01739676296710968), ('Eskimo dog', 0.011717947199940681)]
66
+ ```
67
+
68
+ Replace the model name with the variant you want to use, e.g. `tf_efficientnet_lite0`. You can find the IDs in the model summaries at the top of this page.
69
+
70
+ To extract image features with this model, follow the [timm feature extraction examples](../feature_extraction), just change the name of the model you want to use.
71
+
72
+ ## How do I finetune this model?
73
+
74
+ You can finetune any of the pre-trained models just by changing the classifier (the last layer).
75
+
76
+ ```py
77
+ >>> model = timm.create_model('tf_efficientnet_lite0', pretrained=True, num_classes=NUM_FINETUNE_CLASSES)
78
+ ```
79
+ To finetune on your own dataset, you have to write a training loop or adapt [timm's training
80
+ script](https://github.com/rwightman/pytorch-image-models/blob/master/train.py) to use your dataset.
81
+
82
+ ## How do I train this model?
83
+
84
+ You can follow the [timm recipe scripts](../scripts) for training a new model afresh.
85
+
86
+ ## Citation
87
+
88
+ ```BibTeX
89
+ @misc{tan2020efficientnet,
90
+ title={EfficientNet: Rethinking Model Scaling for Convolutional Neural Networks},
91
+ author={Mingxing Tan and Quoc V. Le},
92
+ year={2020},
93
+ eprint={1905.11946},
94
+ archivePrefix={arXiv},
95
+ primaryClass={cs.LG}
96
+ }
97
+ ```
98
+
99
+ <!--
100
+ Type: model-index
101
+ Collections:
102
+ - Name: TF EfficientNet Lite
103
+ Paper:
104
+ Title: 'EfficientNet: Rethinking Model Scaling for Convolutional Neural Networks'
105
+ URL: https://paperswithcode.com/paper/efficientnet-rethinking-model-scaling-for
106
+ Models:
107
+ - Name: tf_efficientnet_lite0
108
+ In Collection: TF EfficientNet Lite
109
+ Metadata:
110
+ FLOPs: 488052032
111
+ Parameters: 4650000
112
+ File Size: 18820223
113
+ Architecture:
114
+ - 1x1 Convolution
115
+ - Average Pooling
116
+ - Batch Normalization
117
+ - Convolution
118
+ - Dense Connections
119
+ - Dropout
120
+ - Inverted Residual Block
121
+ - RELU6
122
+ Tasks:
123
+ - Image Classification
124
+ Training Data:
125
+ - ImageNet
126
+ ID: tf_efficientnet_lite0
127
+ Crop Pct: '0.875'
128
+ Image Size: '224'
129
+ Interpolation: bicubic
130
+ Code: https://github.com/rwightman/pytorch-image-models/blob/9a25fdf3ad0414b4d66da443fe60ae0aa14edc84/timm/models/efficientnet.py#L1596
131
+ Weights: https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/tf_efficientnet_lite0-0aa007d2.pth
132
+ Results:
133
+ - Task: Image Classification
134
+ Dataset: ImageNet
135
+ Metrics:
136
+ Top 1 Accuracy: 74.83%
137
+ Top 5 Accuracy: 92.17%
138
+ - Name: tf_efficientnet_lite1
139
+ In Collection: TF EfficientNet Lite
140
+ Metadata:
141
+ FLOPs: 773639520
142
+ Parameters: 5420000
143
+ File Size: 21939331
144
+ Architecture:
145
+ - 1x1 Convolution
146
+ - Average Pooling
147
+ - Batch Normalization
148
+ - Convolution
149
+ - Dense Connections
150
+ - Dropout
151
+ - Inverted Residual Block
152
+ - RELU6
153
+ Tasks:
154
+ - Image Classification
155
+ Training Data:
156
+ - ImageNet
157
+ ID: tf_efficientnet_lite1
158
+ Crop Pct: '0.882'
159
+ Image Size: '240'
160
+ Interpolation: bicubic
161
+ Code: https://github.com/rwightman/pytorch-image-models/blob/9a25fdf3ad0414b4d66da443fe60ae0aa14edc84/timm/models/efficientnet.py#L1607
162
+ Weights: https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/tf_efficientnet_lite1-bde8b488.pth
163
+ Results:
164
+ - Task: Image Classification
165
+ Dataset: ImageNet
166
+ Metrics:
167
+ Top 1 Accuracy: 76.67%
168
+ Top 5 Accuracy: 93.24%
169
+ - Name: tf_efficientnet_lite2
170
+ In Collection: TF EfficientNet Lite
171
+ Metadata:
172
+ FLOPs: 1068494432
173
+ Parameters: 6090000
174
+ File Size: 24658687
175
+ Architecture:
176
+ - 1x1 Convolution
177
+ - Average Pooling
178
+ - Batch Normalization
179
+ - Convolution
180
+ - Dense Connections
181
+ - Dropout
182
+ - Inverted Residual Block
183
+ - RELU6
184
+ Tasks:
185
+ - Image Classification
186
+ Training Data:
187
+ - ImageNet
188
+ ID: tf_efficientnet_lite2
189
+ Crop Pct: '0.89'
190
+ Image Size: '260'
191
+ Interpolation: bicubic
192
+ Code: https://github.com/rwightman/pytorch-image-models/blob/9a25fdf3ad0414b4d66da443fe60ae0aa14edc84/timm/models/efficientnet.py#L1618
193
+ Weights: https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/tf_efficientnet_lite2-dcccb7df.pth
194
+ Results:
195
+ - Task: Image Classification
196
+ Dataset: ImageNet
197
+ Metrics:
198
+ Top 1 Accuracy: 77.48%
199
+ Top 5 Accuracy: 93.75%
200
+ - Name: tf_efficientnet_lite3
201
+ In Collection: TF EfficientNet Lite
202
+ Metadata:
203
+ FLOPs: 2011534304
204
+ Parameters: 8199999
205
+ File Size: 33161413
206
+ Architecture:
207
+ - 1x1 Convolution
208
+ - Average Pooling
209
+ - Batch Normalization
210
+ - Convolution
211
+ - Dense Connections
212
+ - Dropout
213
+ - Inverted Residual Block
214
+ - RELU6
215
+ Tasks:
216
+ - Image Classification
217
+ Training Data:
218
+ - ImageNet
219
+ ID: tf_efficientnet_lite3
220
+ Crop Pct: '0.904'
221
+ Image Size: '300'
222
+ Interpolation: bilinear
223
+ Code: https://github.com/rwightman/pytorch-image-models/blob/9a25fdf3ad0414b4d66da443fe60ae0aa14edc84/timm/models/efficientnet.py#L1629
224
+ Weights: https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/tf_efficientnet_lite3-b733e338.pth
225
+ Results:
226
+ - Task: Image Classification
227
+ Dataset: ImageNet
228
+ Metrics:
229
+ Top 1 Accuracy: 79.83%
230
+ Top 5 Accuracy: 94.91%
231
+ - Name: tf_efficientnet_lite4
232
+ In Collection: TF EfficientNet Lite
233
+ Metadata:
234
+ FLOPs: 5164802912
235
+ Parameters: 13010000
236
+ File Size: 52558819
237
+ Architecture:
238
+ - 1x1 Convolution
239
+ - Average Pooling
240
+ - Batch Normalization
241
+ - Convolution
242
+ - Dense Connections
243
+ - Dropout
244
+ - Inverted Residual Block
245
+ - RELU6
246
+ Tasks:
247
+ - Image Classification
248
+ Training Data:
249
+ - ImageNet
250
+ ID: tf_efficientnet_lite4
251
+ Crop Pct: '0.92'
252
+ Image Size: '380'
253
+ Interpolation: bilinear
254
+ Code: https://github.com/rwightman/pytorch-image-models/blob/9a25fdf3ad0414b4d66da443fe60ae0aa14edc84/timm/models/efficientnet.py#L1640
255
+ Weights: https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/tf_efficientnet_lite4-741542c3.pth
256
+ Results:
257
+ - Task: Image Classification
258
+ Dataset: ImageNet
259
+ Metrics:
260
+ Top 1 Accuracy: 81.54%
261
+ Top 5 Accuracy: 95.66%
262
+ -->
pytorch-image-models/hfdocs/source/models/tf-efficientnet.mdx ADDED
@@ -0,0 +1,669 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # (Tensorflow) EfficientNet
2
+
3
+ **EfficientNet** is a convolutional neural network architecture and scaling method that uniformly scales all dimensions of depth/width/resolution using a *compound coefficient*. Unlike conventional practice that arbitrary scales these factors, the EfficientNet scaling method uniformly scales network width, depth, and resolution with a set of fixed scaling coefficients. For example, if we want to use \\( 2^N \\) times more computational resources, then we can simply increase the network depth by \\( \alpha ^ N \\), width by \\( \beta ^ N \\), and image size by \\( \gamma ^ N \\), where \\( \alpha, \beta, \gamma \\) are constant coefficients determined by a small grid search on the original small model. EfficientNet uses a compound coefficient \\( \phi \\) to uniformly scales network width, depth, and resolution in a principled way.
4
+
5
+ The compound scaling method is justified by the intuition that if the input image is bigger, then the network needs more layers to increase the receptive field and more channels to capture more fine-grained patterns on the bigger image.
6
+
7
+ The base EfficientNet-B0 network is based on the inverted bottleneck residual blocks of [MobileNetV2](https://paperswithcode.com/method/mobilenetv2), in addition to [squeeze-and-excitation blocks](https://paperswithcode.com/method/squeeze-and-excitation-block).
8
+
9
+ The weights from this model were ported from [Tensorflow/TPU](https://github.com/tensorflow/tpu).
10
+
11
+ ## How do I use this model on an image?
12
+
13
+ To load a pretrained model:
14
+
15
+ ```py
16
+ >>> import timm
17
+ >>> model = timm.create_model('tf_efficientnet_b0', pretrained=True)
18
+ >>> model.eval()
19
+ ```
20
+
21
+ To load and preprocess the image:
22
+
23
+ ```py
24
+ >>> import urllib
25
+ >>> from PIL import Image
26
+ >>> from timm.data import resolve_data_config
27
+ >>> from timm.data.transforms_factory import create_transform
28
+
29
+ >>> config = resolve_data_config({}, model=model)
30
+ >>> transform = create_transform(**config)
31
+
32
+ >>> url, filename = ("https://github.com/pytorch/hub/raw/master/images/dog.jpg", "dog.jpg")
33
+ >>> urllib.request.urlretrieve(url, filename)
34
+ >>> img = Image.open(filename).convert('RGB')
35
+ >>> tensor = transform(img).unsqueeze(0) # transform and add batch dimension
36
+ ```
37
+
38
+ To get the model predictions:
39
+
40
+ ```py
41
+ >>> import torch
42
+ >>> with torch.no_grad():
43
+ ... out = model(tensor)
44
+ >>> probabilities = torch.nn.functional.softmax(out[0], dim=0)
45
+ >>> print(probabilities.shape)
46
+ >>> # prints: torch.Size([1000])
47
+ ```
48
+
49
+ To get the top-5 predictions class names:
50
+
51
+ ```py
52
+ >>> # Get imagenet class mappings
53
+ >>> url, filename = ("https://raw.githubusercontent.com/pytorch/hub/master/imagenet_classes.txt", "imagenet_classes.txt")
54
+ >>> urllib.request.urlretrieve(url, filename)
55
+ >>> with open("imagenet_classes.txt", "r") as f:
56
+ ... categories = [s.strip() for s in f.readlines()]
57
+
58
+ >>> # Print top categories per image
59
+ >>> top5_prob, top5_catid = torch.topk(probabilities, 5)
60
+ >>> for i in range(top5_prob.size(0)):
61
+ ... print(categories[top5_catid[i]], top5_prob[i].item())
62
+ >>> # prints class names and probabilities like:
63
+ >>> # [('Samoyed', 0.6425196528434753), ('Pomeranian', 0.04062102362513542), ('keeshond', 0.03186424449086189), ('white wolf', 0.01739676296710968), ('Eskimo dog', 0.011717947199940681)]
64
+ ```
65
+
66
+ Replace the model name with the variant you want to use, e.g. `tf_efficientnet_b0`. You can find the IDs in the model summaries at the top of this page.
67
+
68
+ To extract image features with this model, follow the [timm feature extraction examples](../feature_extraction), just change the name of the model you want to use.
69
+
70
+ ## How do I finetune this model?
71
+
72
+ You can finetune any of the pre-trained models just by changing the classifier (the last layer).
73
+
74
+ ```py
75
+ >>> model = timm.create_model('tf_efficientnet_b0', pretrained=True, num_classes=NUM_FINETUNE_CLASSES)
76
+ ```
77
+ To finetune on your own dataset, you have to write a training loop or adapt [timm's training
78
+ script](https://github.com/rwightman/pytorch-image-models/blob/master/train.py) to use your dataset.
79
+
80
+ ## How do I train this model?
81
+
82
+ You can follow the [timm recipe scripts](../scripts) for training a new model afresh.
83
+
84
+ ## Citation
85
+
86
+ ```BibTeX
87
+ @misc{tan2020efficientnet,
88
+ title={EfficientNet: Rethinking Model Scaling for Convolutional Neural Networks},
89
+ author={Mingxing Tan and Quoc V. Le},
90
+ year={2020},
91
+ eprint={1905.11946},
92
+ archivePrefix={arXiv},
93
+ primaryClass={cs.LG}
94
+ }
95
+ ```
96
+
97
+ <!--
98
+ Type: model-index
99
+ Collections:
100
+ - Name: TF EfficientNet
101
+ Paper:
102
+ Title: 'EfficientNet: Rethinking Model Scaling for Convolutional Neural Networks'
103
+ URL: https://paperswithcode.com/paper/efficientnet-rethinking-model-scaling-for
104
+ Models:
105
+ - Name: tf_efficientnet_b0
106
+ In Collection: TF EfficientNet
107
+ Metadata:
108
+ FLOPs: 488688572
109
+ Parameters: 5290000
110
+ File Size: 21383997
111
+ Architecture:
112
+ - 1x1 Convolution
113
+ - Average Pooling
114
+ - Batch Normalization
115
+ - Convolution
116
+ - Dense Connections
117
+ - Dropout
118
+ - Inverted Residual Block
119
+ - Squeeze-and-Excitation Block
120
+ - Swish
121
+ Tasks:
122
+ - Image Classification
123
+ Training Techniques:
124
+ - AutoAugment
125
+ - Label Smoothing
126
+ - RMSProp
127
+ - Stochastic Depth
128
+ - Weight Decay
129
+ Training Data:
130
+ - ImageNet
131
+ Training Resources: TPUv3 Cloud TPU
132
+ ID: tf_efficientnet_b0
133
+ LR: 0.256
134
+ Epochs: 350
135
+ Crop Pct: '0.875'
136
+ Momentum: 0.9
137
+ Batch Size: 2048
138
+ Image Size: '224'
139
+ Weight Decay: 1.0e-05
140
+ Interpolation: bicubic
141
+ RMSProp Decay: 0.9
142
+ Label Smoothing: 0.1
143
+ BatchNorm Momentum: 0.99
144
+ Code: https://github.com/rwightman/pytorch-image-models/blob/9a25fdf3ad0414b4d66da443fe60ae0aa14edc84/timm/models/efficientnet.py#L1241
145
+ Weights: https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/tf_efficientnet_b0_aa-827b6e33.pth
146
+ Results:
147
+ - Task: Image Classification
148
+ Dataset: ImageNet
149
+ Metrics:
150
+ Top 1 Accuracy: 76.85%
151
+ Top 5 Accuracy: 93.23%
152
+ - Name: tf_efficientnet_b1
153
+ In Collection: TF EfficientNet
154
+ Metadata:
155
+ FLOPs: 883633200
156
+ Parameters: 7790000
157
+ File Size: 31512534
158
+ Architecture:
159
+ - 1x1 Convolution
160
+ - Average Pooling
161
+ - Batch Normalization
162
+ - Convolution
163
+ - Dense Connections
164
+ - Dropout
165
+ - Inverted Residual Block
166
+ - Squeeze-and-Excitation Block
167
+ - Swish
168
+ Tasks:
169
+ - Image Classification
170
+ Training Techniques:
171
+ - AutoAugment
172
+ - Label Smoothing
173
+ - RMSProp
174
+ - Stochastic Depth
175
+ - Weight Decay
176
+ Training Data:
177
+ - ImageNet
178
+ ID: tf_efficientnet_b1
179
+ LR: 0.256
180
+ Epochs: 350
181
+ Crop Pct: '0.882'
182
+ Momentum: 0.9
183
+ Batch Size: 2048
184
+ Image Size: '240'
185
+ Weight Decay: 1.0e-05
186
+ Interpolation: bicubic
187
+ RMSProp Decay: 0.9
188
+ Label Smoothing: 0.1
189
+ BatchNorm Momentum: 0.99
190
+ Code: https://github.com/rwightman/pytorch-image-models/blob/9a25fdf3ad0414b4d66da443fe60ae0aa14edc84/timm/models/efficientnet.py#L1251
191
+ Weights: https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/tf_efficientnet_b1_aa-ea7a6ee0.pth
192
+ Results:
193
+ - Task: Image Classification
194
+ Dataset: ImageNet
195
+ Metrics:
196
+ Top 1 Accuracy: 78.84%
197
+ Top 5 Accuracy: 94.2%
198
+ - Name: tf_efficientnet_b2
199
+ In Collection: TF EfficientNet
200
+ Metadata:
201
+ FLOPs: 1234321170
202
+ Parameters: 9110000
203
+ File Size: 36797929
204
+ Architecture:
205
+ - 1x1 Convolution
206
+ - Average Pooling
207
+ - Batch Normalization
208
+ - Convolution
209
+ - Dense Connections
210
+ - Dropout
211
+ - Inverted Residual Block
212
+ - Squeeze-and-Excitation Block
213
+ - Swish
214
+ Tasks:
215
+ - Image Classification
216
+ Training Techniques:
217
+ - AutoAugment
218
+ - Label Smoothing
219
+ - RMSProp
220
+ - Stochastic Depth
221
+ - Weight Decay
222
+ Training Data:
223
+ - ImageNet
224
+ ID: tf_efficientnet_b2
225
+ LR: 0.256
226
+ Epochs: 350
227
+ Crop Pct: '0.89'
228
+ Momentum: 0.9
229
+ Batch Size: 2048
230
+ Image Size: '260'
231
+ Weight Decay: 1.0e-05
232
+ Interpolation: bicubic
233
+ RMSProp Decay: 0.9
234
+ Label Smoothing: 0.1
235
+ BatchNorm Momentum: 0.99
236
+ Code: https://github.com/rwightman/pytorch-image-models/blob/9a25fdf3ad0414b4d66da443fe60ae0aa14edc84/timm/models/efficientnet.py#L1261
237
+ Weights: https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/tf_efficientnet_b2_aa-60c94f97.pth
238
+ Results:
239
+ - Task: Image Classification
240
+ Dataset: ImageNet
241
+ Metrics:
242
+ Top 1 Accuracy: 80.07%
243
+ Top 5 Accuracy: 94.9%
244
+ - Name: tf_efficientnet_b3
245
+ In Collection: TF EfficientNet
246
+ Metadata:
247
+ FLOPs: 2275247568
248
+ Parameters: 12230000
249
+ File Size: 49381362
250
+ Architecture:
251
+ - 1x1 Convolution
252
+ - Average Pooling
253
+ - Batch Normalization
254
+ - Convolution
255
+ - Dense Connections
256
+ - Dropout
257
+ - Inverted Residual Block
258
+ - Squeeze-and-Excitation Block
259
+ - Swish
260
+ Tasks:
261
+ - Image Classification
262
+ Training Techniques:
263
+ - AutoAugment
264
+ - Label Smoothing
265
+ - RMSProp
266
+ - Stochastic Depth
267
+ - Weight Decay
268
+ Training Data:
269
+ - ImageNet
270
+ ID: tf_efficientnet_b3
271
+ LR: 0.256
272
+ Epochs: 350
273
+ Crop Pct: '0.904'
274
+ Momentum: 0.9
275
+ Batch Size: 2048
276
+ Image Size: '300'
277
+ Weight Decay: 1.0e-05
278
+ Interpolation: bicubic
279
+ RMSProp Decay: 0.9
280
+ Label Smoothing: 0.1
281
+ BatchNorm Momentum: 0.99
282
+ Code: https://github.com/rwightman/pytorch-image-models/blob/9a25fdf3ad0414b4d66da443fe60ae0aa14edc84/timm/models/efficientnet.py#L1271
283
+ Weights: https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/tf_efficientnet_b3_aa-84b4657e.pth
284
+ Results:
285
+ - Task: Image Classification
286
+ Dataset: ImageNet
287
+ Metrics:
288
+ Top 1 Accuracy: 81.65%
289
+ Top 5 Accuracy: 95.72%
290
+ - Name: tf_efficientnet_b4
291
+ In Collection: TF EfficientNet
292
+ Metadata:
293
+ FLOPs: 5749638672
294
+ Parameters: 19340000
295
+ File Size: 77989689
296
+ Architecture:
297
+ - 1x1 Convolution
298
+ - Average Pooling
299
+ - Batch Normalization
300
+ - Convolution
301
+ - Dense Connections
302
+ - Dropout
303
+ - Inverted Residual Block
304
+ - Squeeze-and-Excitation Block
305
+ - Swish
306
+ Tasks:
307
+ - Image Classification
308
+ Training Techniques:
309
+ - AutoAugment
310
+ - Label Smoothing
311
+ - RMSProp
312
+ - Stochastic Depth
313
+ - Weight Decay
314
+ Training Data:
315
+ - ImageNet
316
+ Training Resources: TPUv3 Cloud TPU
317
+ ID: tf_efficientnet_b4
318
+ LR: 0.256
319
+ Epochs: 350
320
+ Crop Pct: '0.922'
321
+ Momentum: 0.9
322
+ Batch Size: 2048
323
+ Image Size: '380'
324
+ Weight Decay: 1.0e-05
325
+ Interpolation: bicubic
326
+ RMSProp Decay: 0.9
327
+ Label Smoothing: 0.1
328
+ BatchNorm Momentum: 0.99
329
+ Code: https://github.com/rwightman/pytorch-image-models/blob/9a25fdf3ad0414b4d66da443fe60ae0aa14edc84/timm/models/efficientnet.py#L1281
330
+ Weights: https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/tf_efficientnet_b4_aa-818f208c.pth
331
+ Results:
332
+ - Task: Image Classification
333
+ Dataset: ImageNet
334
+ Metrics:
335
+ Top 1 Accuracy: 83.03%
336
+ Top 5 Accuracy: 96.3%
337
+ - Name: tf_efficientnet_b5
338
+ In Collection: TF EfficientNet
339
+ Metadata:
340
+ FLOPs: 13176501888
341
+ Parameters: 30390000
342
+ File Size: 122403150
343
+ Architecture:
344
+ - 1x1 Convolution
345
+ - Average Pooling
346
+ - Batch Normalization
347
+ - Convolution
348
+ - Dense Connections
349
+ - Dropout
350
+ - Inverted Residual Block
351
+ - Squeeze-and-Excitation Block
352
+ - Swish
353
+ Tasks:
354
+ - Image Classification
355
+ Training Techniques:
356
+ - AutoAugment
357
+ - Label Smoothing
358
+ - RMSProp
359
+ - Stochastic Depth
360
+ - Weight Decay
361
+ Training Data:
362
+ - ImageNet
363
+ ID: tf_efficientnet_b5
364
+ LR: 0.256
365
+ Epochs: 350
366
+ Crop Pct: '0.934'
367
+ Momentum: 0.9
368
+ Batch Size: 2048
369
+ Image Size: '456'
370
+ Weight Decay: 1.0e-05
371
+ Interpolation: bicubic
372
+ RMSProp Decay: 0.9
373
+ Label Smoothing: 0.1
374
+ BatchNorm Momentum: 0.99
375
+ Code: https://github.com/rwightman/pytorch-image-models/blob/9a25fdf3ad0414b4d66da443fe60ae0aa14edc84/timm/models/efficientnet.py#L1291
376
+ Weights: https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/tf_efficientnet_b5_ra-9a3e5369.pth
377
+ Results:
378
+ - Task: Image Classification
379
+ Dataset: ImageNet
380
+ Metrics:
381
+ Top 1 Accuracy: 83.81%
382
+ Top 5 Accuracy: 96.75%
383
+ - Name: tf_efficientnet_b6
384
+ In Collection: TF EfficientNet
385
+ Metadata:
386
+ FLOPs: 24180518488
387
+ Parameters: 43040000
388
+ File Size: 173232007
389
+ Architecture:
390
+ - 1x1 Convolution
391
+ - Average Pooling
392
+ - Batch Normalization
393
+ - Convolution
394
+ - Dense Connections
395
+ - Dropout
396
+ - Inverted Residual Block
397
+ - Squeeze-and-Excitation Block
398
+ - Swish
399
+ Tasks:
400
+ - Image Classification
401
+ Training Techniques:
402
+ - AutoAugment
403
+ - Label Smoothing
404
+ - RMSProp
405
+ - Stochastic Depth
406
+ - Weight Decay
407
+ Training Data:
408
+ - ImageNet
409
+ ID: tf_efficientnet_b6
410
+ LR: 0.256
411
+ Epochs: 350
412
+ Crop Pct: '0.942'
413
+ Momentum: 0.9
414
+ Batch Size: 2048
415
+ Image Size: '528'
416
+ Weight Decay: 1.0e-05
417
+ Interpolation: bicubic
418
+ RMSProp Decay: 0.9
419
+ Label Smoothing: 0.1
420
+ BatchNorm Momentum: 0.99
421
+ Code: https://github.com/rwightman/pytorch-image-models/blob/9a25fdf3ad0414b4d66da443fe60ae0aa14edc84/timm/models/efficientnet.py#L1301
422
+ Weights: https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/tf_efficientnet_b6_aa-80ba17e4.pth
423
+ Results:
424
+ - Task: Image Classification
425
+ Dataset: ImageNet
426
+ Metrics:
427
+ Top 1 Accuracy: 84.11%
428
+ Top 5 Accuracy: 96.89%
429
+ - Name: tf_efficientnet_b7
430
+ In Collection: TF EfficientNet
431
+ Metadata:
432
+ FLOPs: 48205304880
433
+ Parameters: 66349999
434
+ File Size: 266850607
435
+ Architecture:
436
+ - 1x1 Convolution
437
+ - Average Pooling
438
+ - Batch Normalization
439
+ - Convolution
440
+ - Dense Connections
441
+ - Dropout
442
+ - Inverted Residual Block
443
+ - Squeeze-and-Excitation Block
444
+ - Swish
445
+ Tasks:
446
+ - Image Classification
447
+ Training Techniques:
448
+ - AutoAugment
449
+ - Label Smoothing
450
+ - RMSProp
451
+ - Stochastic Depth
452
+ - Weight Decay
453
+ Training Data:
454
+ - ImageNet
455
+ ID: tf_efficientnet_b7
456
+ LR: 0.256
457
+ Epochs: 350
458
+ Crop Pct: '0.949'
459
+ Momentum: 0.9
460
+ Batch Size: 2048
461
+ Image Size: '600'
462
+ Weight Decay: 1.0e-05
463
+ Interpolation: bicubic
464
+ RMSProp Decay: 0.9
465
+ Label Smoothing: 0.1
466
+ BatchNorm Momentum: 0.99
467
+ Code: https://github.com/rwightman/pytorch-image-models/blob/9a25fdf3ad0414b4d66da443fe60ae0aa14edc84/timm/models/efficientnet.py#L1312
468
+ Weights: https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/tf_efficientnet_b7_ra-6c08e654.pth
469
+ Results:
470
+ - Task: Image Classification
471
+ Dataset: ImageNet
472
+ Metrics:
473
+ Top 1 Accuracy: 84.93%
474
+ Top 5 Accuracy: 97.2%
475
+ - Name: tf_efficientnet_b8
476
+ In Collection: TF EfficientNet
477
+ Metadata:
478
+ FLOPs: 80962956270
479
+ Parameters: 87410000
480
+ File Size: 351379853
481
+ Architecture:
482
+ - 1x1 Convolution
483
+ - Average Pooling
484
+ - Batch Normalization
485
+ - Convolution
486
+ - Dense Connections
487
+ - Dropout
488
+ - Inverted Residual Block
489
+ - Squeeze-and-Excitation Block
490
+ - Swish
491
+ Tasks:
492
+ - Image Classification
493
+ Training Techniques:
494
+ - AutoAugment
495
+ - Label Smoothing
496
+ - RMSProp
497
+ - Stochastic Depth
498
+ - Weight Decay
499
+ Training Data:
500
+ - ImageNet
501
+ ID: tf_efficientnet_b8
502
+ LR: 0.256
503
+ Epochs: 350
504
+ Crop Pct: '0.954'
505
+ Momentum: 0.9
506
+ Batch Size: 2048
507
+ Image Size: '672'
508
+ Weight Decay: 1.0e-05
509
+ Interpolation: bicubic
510
+ RMSProp Decay: 0.9
511
+ Label Smoothing: 0.1
512
+ BatchNorm Momentum: 0.99
513
+ Code: https://github.com/rwightman/pytorch-image-models/blob/9a25fdf3ad0414b4d66da443fe60ae0aa14edc84/timm/models/efficientnet.py#L1323
514
+ Weights: https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/tf_efficientnet_b8_ra-572d5dd9.pth
515
+ Results:
516
+ - Task: Image Classification
517
+ Dataset: ImageNet
518
+ Metrics:
519
+ Top 1 Accuracy: 85.35%
520
+ Top 5 Accuracy: 97.39%
521
+ - Name: tf_efficientnet_el
522
+ In Collection: TF EfficientNet
523
+ Metadata:
524
+ FLOPs: 9356616096
525
+ Parameters: 10590000
526
+ File Size: 42800271
527
+ Architecture:
528
+ - 1x1 Convolution
529
+ - Average Pooling
530
+ - Batch Normalization
531
+ - Convolution
532
+ - Dense Connections
533
+ - Dropout
534
+ - Inverted Residual Block
535
+ - Squeeze-and-Excitation Block
536
+ - Swish
537
+ Tasks:
538
+ - Image Classification
539
+ Training Data:
540
+ - ImageNet
541
+ ID: tf_efficientnet_el
542
+ Crop Pct: '0.904'
543
+ Image Size: '300'
544
+ Interpolation: bicubic
545
+ Code: https://github.com/rwightman/pytorch-image-models/blob/9a25fdf3ad0414b4d66da443fe60ae0aa14edc84/timm/models/efficientnet.py#L1551
546
+ Weights: https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/tf_efficientnet_el-5143854e.pth
547
+ Results:
548
+ - Task: Image Classification
549
+ Dataset: ImageNet
550
+ Metrics:
551
+ Top 1 Accuracy: 80.45%
552
+ Top 5 Accuracy: 95.17%
553
+ - Name: tf_efficientnet_em
554
+ In Collection: TF EfficientNet
555
+ Metadata:
556
+ FLOPs: 3636607040
557
+ Parameters: 6900000
558
+ File Size: 27933644
559
+ Architecture:
560
+ - 1x1 Convolution
561
+ - Average Pooling
562
+ - Batch Normalization
563
+ - Convolution
564
+ - Dense Connections
565
+ - Dropout
566
+ - Inverted Residual Block
567
+ - Squeeze-and-Excitation Block
568
+ - Swish
569
+ Tasks:
570
+ - Image Classification
571
+ Training Data:
572
+ - ImageNet
573
+ ID: tf_efficientnet_em
574
+ Crop Pct: '0.882'
575
+ Image Size: '240'
576
+ Interpolation: bicubic
577
+ Code: https://github.com/rwightman/pytorch-image-models/blob/9a25fdf3ad0414b4d66da443fe60ae0aa14edc84/timm/models/efficientnet.py#L1541
578
+ Weights: https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/tf_efficientnet_em-e78cfe58.pth
579
+ Results:
580
+ - Task: Image Classification
581
+ Dataset: ImageNet
582
+ Metrics:
583
+ Top 1 Accuracy: 78.71%
584
+ Top 5 Accuracy: 94.33%
585
+ - Name: tf_efficientnet_es
586
+ In Collection: TF EfficientNet
587
+ Metadata:
588
+ FLOPs: 2057577472
589
+ Parameters: 5440000
590
+ File Size: 22008479
591
+ Architecture:
592
+ - 1x1 Convolution
593
+ - Average Pooling
594
+ - Batch Normalization
595
+ - Convolution
596
+ - Dense Connections
597
+ - Dropout
598
+ - Inverted Residual Block
599
+ - Squeeze-and-Excitation Block
600
+ - Swish
601
+ Tasks:
602
+ - Image Classification
603
+ Training Data:
604
+ - ImageNet
605
+ ID: tf_efficientnet_es
606
+ Crop Pct: '0.875'
607
+ Image Size: '224'
608
+ Interpolation: bicubic
609
+ Code: https://github.com/rwightman/pytorch-image-models/blob/9a25fdf3ad0414b4d66da443fe60ae0aa14edc84/timm/models/efficientnet.py#L1531
610
+ Weights: https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/tf_efficientnet_es-ca1afbfe.pth
611
+ Results:
612
+ - Task: Image Classification
613
+ Dataset: ImageNet
614
+ Metrics:
615
+ Top 1 Accuracy: 77.28%
616
+ Top 5 Accuracy: 93.6%
617
+ - Name: tf_efficientnet_l2_ns_475
618
+ In Collection: TF EfficientNet
619
+ Metadata:
620
+ FLOPs: 217795669644
621
+ Parameters: 480310000
622
+ File Size: 1925950424
623
+ Architecture:
624
+ - 1x1 Convolution
625
+ - Average Pooling
626
+ - Batch Normalization
627
+ - Convolution
628
+ - Dense Connections
629
+ - Dropout
630
+ - Inverted Residual Block
631
+ - Squeeze-and-Excitation Block
632
+ - Swish
633
+ Tasks:
634
+ - Image Classification
635
+ Training Techniques:
636
+ - AutoAugment
637
+ - FixRes
638
+ - Label Smoothing
639
+ - Noisy Student
640
+ - RMSProp
641
+ - RandAugment
642
+ - Weight Decay
643
+ Training Data:
644
+ - ImageNet
645
+ - JFT-300M
646
+ Training Resources: TPUv3 Cloud TPU
647
+ ID: tf_efficientnet_l2_ns_475
648
+ LR: 0.128
649
+ Epochs: 350
650
+ Dropout: 0.5
651
+ Crop Pct: '0.936'
652
+ Momentum: 0.9
653
+ Batch Size: 2048
654
+ Image Size: '475'
655
+ Weight Decay: 1.0e-05
656
+ Interpolation: bicubic
657
+ RMSProp Decay: 0.9
658
+ Label Smoothing: 0.1
659
+ BatchNorm Momentum: 0.99
660
+ Stochastic Depth Survival: 0.8
661
+ Code: https://github.com/rwightman/pytorch-image-models/blob/9a25fdf3ad0414b4d66da443fe60ae0aa14edc84/timm/models/efficientnet.py#L1509
662
+ Weights: https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/tf_efficientnet_l2_ns_475-bebbd00a.pth
663
+ Results:
664
+ - Task: Image Classification
665
+ Dataset: ImageNet
666
+ Metrics:
667
+ Top 1 Accuracy: 88.24%
668
+ Top 5 Accuracy: 98.55%
669
+ -->
pytorch-image-models/hfdocs/source/models/tf-mixnet.mdx ADDED
@@ -0,0 +1,200 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # (Tensorflow) MixNet
2
+
3
+ **MixNet** is a type of convolutional neural network discovered via AutoML that utilises [MixConvs](https://paperswithcode.com/method/mixconv) instead of regular [depthwise convolutions](https://paperswithcode.com/method/depthwise-convolution).
4
+
5
+ The weights from this model were ported from [Tensorflow/TPU](https://github.com/tensorflow/tpu).
6
+
7
+ ## How do I use this model on an image?
8
+
9
+ To load a pretrained model:
10
+
11
+ ```py
12
+ >>> import timm
13
+ >>> model = timm.create_model('tf_mixnet_l', pretrained=True)
14
+ >>> model.eval()
15
+ ```
16
+
17
+ To load and preprocess the image:
18
+
19
+ ```py
20
+ >>> import urllib
21
+ >>> from PIL import Image
22
+ >>> from timm.data import resolve_data_config
23
+ >>> from timm.data.transforms_factory import create_transform
24
+
25
+ >>> config = resolve_data_config({}, model=model)
26
+ >>> transform = create_transform(**config)
27
+
28
+ >>> url, filename = ("https://github.com/pytorch/hub/raw/master/images/dog.jpg", "dog.jpg")
29
+ >>> urllib.request.urlretrieve(url, filename)
30
+ >>> img = Image.open(filename).convert('RGB')
31
+ >>> tensor = transform(img).unsqueeze(0) # transform and add batch dimension
32
+ ```
33
+
34
+ To get the model predictions:
35
+
36
+ ```py
37
+ >>> import torch
38
+ >>> with torch.no_grad():
39
+ ... out = model(tensor)
40
+ >>> probabilities = torch.nn.functional.softmax(out[0], dim=0)
41
+ >>> print(probabilities.shape)
42
+ >>> # prints: torch.Size([1000])
43
+ ```
44
+
45
+ To get the top-5 predictions class names:
46
+
47
+ ```py
48
+ >>> # Get imagenet class mappings
49
+ >>> url, filename = ("https://raw.githubusercontent.com/pytorch/hub/master/imagenet_classes.txt", "imagenet_classes.txt")
50
+ >>> urllib.request.urlretrieve(url, filename)
51
+ >>> with open("imagenet_classes.txt", "r") as f:
52
+ ... categories = [s.strip() for s in f.readlines()]
53
+
54
+ >>> # Print top categories per image
55
+ >>> top5_prob, top5_catid = torch.topk(probabilities, 5)
56
+ >>> for i in range(top5_prob.size(0)):
57
+ ... print(categories[top5_catid[i]], top5_prob[i].item())
58
+ >>> # prints class names and probabilities like:
59
+ >>> # [('Samoyed', 0.6425196528434753), ('Pomeranian', 0.04062102362513542), ('keeshond', 0.03186424449086189), ('white wolf', 0.01739676296710968), ('Eskimo dog', 0.011717947199940681)]
60
+ ```
61
+
62
+ Replace the model name with the variant you want to use, e.g. `tf_mixnet_l`. You can find the IDs in the model summaries at the top of this page.
63
+
64
+ To extract image features with this model, follow the [timm feature extraction examples](../feature_extraction), just change the name of the model you want to use.
65
+
66
+ ## How do I finetune this model?
67
+
68
+ You can finetune any of the pre-trained models just by changing the classifier (the last layer).
69
+
70
+ ```py
71
+ >>> model = timm.create_model('tf_mixnet_l', pretrained=True, num_classes=NUM_FINETUNE_CLASSES)
72
+ ```
73
+ To finetune on your own dataset, you have to write a training loop or adapt [timm's training
74
+ script](https://github.com/rwightman/pytorch-image-models/blob/master/train.py) to use your dataset.
75
+
76
+ ## How do I train this model?
77
+
78
+ You can follow the [timm recipe scripts](../scripts) for training a new model afresh.
79
+
80
+ ## Citation
81
+
82
+ ```BibTeX
83
+ @misc{tan2019mixconv,
84
+ title={MixConv: Mixed Depthwise Convolutional Kernels},
85
+ author={Mingxing Tan and Quoc V. Le},
86
+ year={2019},
87
+ eprint={1907.09595},
88
+ archivePrefix={arXiv},
89
+ primaryClass={cs.CV}
90
+ }
91
+ ```
92
+
93
+ <!--
94
+ Type: model-index
95
+ Collections:
96
+ - Name: TF MixNet
97
+ Paper:
98
+ Title: 'MixConv: Mixed Depthwise Convolutional Kernels'
99
+ URL: https://paperswithcode.com/paper/mixnet-mixed-depthwise-convolutional-kernels
100
+ Models:
101
+ - Name: tf_mixnet_l
102
+ In Collection: TF MixNet
103
+ Metadata:
104
+ FLOPs: 688674516
105
+ Parameters: 7330000
106
+ File Size: 29620756
107
+ Architecture:
108
+ - Batch Normalization
109
+ - Dense Connections
110
+ - Dropout
111
+ - Global Average Pooling
112
+ - Grouped Convolution
113
+ - MixConv
114
+ - Squeeze-and-Excitation Block
115
+ - Swish
116
+ Tasks:
117
+ - Image Classification
118
+ Training Techniques:
119
+ - MNAS
120
+ Training Data:
121
+ - ImageNet
122
+ ID: tf_mixnet_l
123
+ Crop Pct: '0.875'
124
+ Image Size: '224'
125
+ Interpolation: bicubic
126
+ Code: https://github.com/rwightman/pytorch-image-models/blob/9a25fdf3ad0414b4d66da443fe60ae0aa14edc84/timm/models/efficientnet.py#L1720
127
+ Weights: https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/tf_mixnet_l-6c92e0c8.pth
128
+ Results:
129
+ - Task: Image Classification
130
+ Dataset: ImageNet
131
+ Metrics:
132
+ Top 1 Accuracy: 78.78%
133
+ Top 5 Accuracy: 94.0%
134
+ - Name: tf_mixnet_m
135
+ In Collection: TF MixNet
136
+ Metadata:
137
+ FLOPs: 416633502
138
+ Parameters: 5010000
139
+ File Size: 20310871
140
+ Architecture:
141
+ - Batch Normalization
142
+ - Dense Connections
143
+ - Dropout
144
+ - Global Average Pooling
145
+ - Grouped Convolution
146
+ - MixConv
147
+ - Squeeze-and-Excitation Block
148
+ - Swish
149
+ Tasks:
150
+ - Image Classification
151
+ Training Techniques:
152
+ - MNAS
153
+ Training Data:
154
+ - ImageNet
155
+ ID: tf_mixnet_m
156
+ Crop Pct: '0.875'
157
+ Image Size: '224'
158
+ Interpolation: bicubic
159
+ Code: https://github.com/rwightman/pytorch-image-models/blob/9a25fdf3ad0414b4d66da443fe60ae0aa14edc84/timm/models/efficientnet.py#L1709
160
+ Weights: https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/tf_mixnet_m-0f4d8805.pth
161
+ Results:
162
+ - Task: Image Classification
163
+ Dataset: ImageNet
164
+ Metrics:
165
+ Top 1 Accuracy: 76.96%
166
+ Top 5 Accuracy: 93.16%
167
+ - Name: tf_mixnet_s
168
+ In Collection: TF MixNet
169
+ Metadata:
170
+ FLOPs: 302587678
171
+ Parameters: 4130000
172
+ File Size: 16738218
173
+ Architecture:
174
+ - Batch Normalization
175
+ - Dense Connections
176
+ - Dropout
177
+ - Global Average Pooling
178
+ - Grouped Convolution
179
+ - MixConv
180
+ - Squeeze-and-Excitation Block
181
+ - Swish
182
+ Tasks:
183
+ - Image Classification
184
+ Training Techniques:
185
+ - MNAS
186
+ Training Data:
187
+ - ImageNet
188
+ ID: tf_mixnet_s
189
+ Crop Pct: '0.875'
190
+ Image Size: '224'
191
+ Interpolation: bicubic
192
+ Code: https://github.com/rwightman/pytorch-image-models/blob/9a25fdf3ad0414b4d66da443fe60ae0aa14edc84/timm/models/efficientnet.py#L1698
193
+ Weights: https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/tf_mixnet_s-89d3354b.pth
194
+ Results:
195
+ - Task: Image Classification
196
+ Dataset: ImageNet
197
+ Metrics:
198
+ Top 1 Accuracy: 75.68%
199
+ Top 5 Accuracy: 92.64%
200
+ -->
pytorch-image-models/hfdocs/source/models/tf-mobilenet-v3.mdx ADDED
@@ -0,0 +1,387 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # (Tensorflow) MobileNet v3
2
+
3
+ **MobileNetV3** is a convolutional neural network that is designed for mobile phone CPUs. The network design includes the use of a [hard swish activation](https://paperswithcode.com/method/hard-swish) and [squeeze-and-excitation](https://paperswithcode.com/method/squeeze-and-excitation-block) modules in the [MBConv blocks](https://paperswithcode.com/method/inverted-residual-block).
4
+
5
+ The weights from this model were ported from [Tensorflow/Models](https://github.com/tensorflow/models).
6
+
7
+ ## How do I use this model on an image?
8
+
9
+ To load a pretrained model:
10
+
11
+ ```py
12
+ >>> import timm
13
+ >>> model = timm.create_model('tf_mobilenetv3_large_075', pretrained=True)
14
+ >>> model.eval()
15
+ ```
16
+
17
+ To load and preprocess the image:
18
+
19
+ ```py
20
+ >>> import urllib
21
+ >>> from PIL import Image
22
+ >>> from timm.data import resolve_data_config
23
+ >>> from timm.data.transforms_factory import create_transform
24
+
25
+ >>> config = resolve_data_config({}, model=model)
26
+ >>> transform = create_transform(**config)
27
+
28
+ >>> url, filename = ("https://github.com/pytorch/hub/raw/master/images/dog.jpg", "dog.jpg")
29
+ >>> urllib.request.urlretrieve(url, filename)
30
+ >>> img = Image.open(filename).convert('RGB')
31
+ >>> tensor = transform(img).unsqueeze(0) # transform and add batch dimension
32
+ ```
33
+
34
+ To get the model predictions:
35
+
36
+ ```py
37
+ >>> import torch
38
+ >>> with torch.no_grad():
39
+ ... out = model(tensor)
40
+ >>> probabilities = torch.nn.functional.softmax(out[0], dim=0)
41
+ >>> print(probabilities.shape)
42
+ >>> # prints: torch.Size([1000])
43
+ ```
44
+
45
+ To get the top-5 predictions class names:
46
+
47
+ ```py
48
+ >>> # Get imagenet class mappings
49
+ >>> url, filename = ("https://raw.githubusercontent.com/pytorch/hub/master/imagenet_classes.txt", "imagenet_classes.txt")
50
+ >>> urllib.request.urlretrieve(url, filename)
51
+ >>> with open("imagenet_classes.txt", "r") as f:
52
+ ... categories = [s.strip() for s in f.readlines()]
53
+
54
+ >>> # Print top categories per image
55
+ >>> top5_prob, top5_catid = torch.topk(probabilities, 5)
56
+ >>> for i in range(top5_prob.size(0)):
57
+ ... print(categories[top5_catid[i]], top5_prob[i].item())
58
+ >>> # prints class names and probabilities like:
59
+ >>> # [('Samoyed', 0.6425196528434753), ('Pomeranian', 0.04062102362513542), ('keeshond', 0.03186424449086189), ('white wolf', 0.01739676296710968), ('Eskimo dog', 0.011717947199940681)]
60
+ ```
61
+
62
+ Replace the model name with the variant you want to use, e.g. `tf_mobilenetv3_large_075`. You can find the IDs in the model summaries at the top of this page.
63
+
64
+ To extract image features with this model, follow the [timm feature extraction examples](../feature_extraction), just change the name of the model you want to use.
65
+
66
+ ## How do I finetune this model?
67
+
68
+ You can finetune any of the pre-trained models just by changing the classifier (the last layer).
69
+
70
+ ```py
71
+ >>> model = timm.create_model('tf_mobilenetv3_large_075', pretrained=True, num_classes=NUM_FINETUNE_CLASSES)
72
+ ```
73
+ To finetune on your own dataset, you have to write a training loop or adapt [timm's training
74
+ script](https://github.com/rwightman/pytorch-image-models/blob/master/train.py) to use your dataset.
75
+
76
+ ## How do I train this model?
77
+
78
+ You can follow the [timm recipe scripts](../scripts) for training a new model afresh.
79
+
80
+ ## Citation
81
+
82
+ ```BibTeX
83
+ @article{DBLP:journals/corr/abs-1905-02244,
84
+ author = {Andrew Howard and
85
+ Mark Sandler and
86
+ Grace Chu and
87
+ Liang{-}Chieh Chen and
88
+ Bo Chen and
89
+ Mingxing Tan and
90
+ Weijun Wang and
91
+ Yukun Zhu and
92
+ Ruoming Pang and
93
+ Vijay Vasudevan and
94
+ Quoc V. Le and
95
+ Hartwig Adam},
96
+ title = {Searching for MobileNetV3},
97
+ journal = {CoRR},
98
+ volume = {abs/1905.02244},
99
+ year = {2019},
100
+ url = {http://arxiv.org/abs/1905.02244},
101
+ archivePrefix = {arXiv},
102
+ eprint = {1905.02244},
103
+ timestamp = {Tue, 12 Jan 2021 15:30:06 +0100},
104
+ biburl = {https://dblp.org/rec/journals/corr/abs-1905-02244.bib},
105
+ bibsource = {dblp computer science bibliography, https://dblp.org}
106
+ }
107
+ ```
108
+
109
+ <!--
110
+ Type: model-index
111
+ Collections:
112
+ - Name: TF MobileNet V3
113
+ Paper:
114
+ Title: Searching for MobileNetV3
115
+ URL: https://paperswithcode.com/paper/searching-for-mobilenetv3
116
+ Models:
117
+ - Name: tf_mobilenetv3_large_075
118
+ In Collection: TF MobileNet V3
119
+ Metadata:
120
+ FLOPs: 194323712
121
+ Parameters: 3990000
122
+ File Size: 16097377
123
+ Architecture:
124
+ - 1x1 Convolution
125
+ - Batch Normalization
126
+ - Convolution
127
+ - Dense Connections
128
+ - Depthwise Separable Convolution
129
+ - Dropout
130
+ - Global Average Pooling
131
+ - Hard Swish
132
+ - Inverted Residual Block
133
+ - ReLU
134
+ - Residual Connection
135
+ - Softmax
136
+ - Squeeze-and-Excitation Block
137
+ Tasks:
138
+ - Image Classification
139
+ Training Techniques:
140
+ - RMSProp
141
+ - Weight Decay
142
+ Training Data:
143
+ - ImageNet
144
+ Training Resources: 4x4 TPU Pod
145
+ ID: tf_mobilenetv3_large_075
146
+ LR: 0.1
147
+ Dropout: 0.8
148
+ Crop Pct: '0.875'
149
+ Momentum: 0.9
150
+ Batch Size: 4096
151
+ Image Size: '224'
152
+ Weight Decay: 1.0e-05
153
+ Interpolation: bilinear
154
+ Code: https://github.com/rwightman/pytorch-image-models/blob/9a25fdf3ad0414b4d66da443fe60ae0aa14edc84/timm/models/mobilenetv3.py#L394
155
+ Weights: https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/tf_mobilenetv3_large_075-150ee8b0.pth
156
+ Results:
157
+ - Task: Image Classification
158
+ Dataset: ImageNet
159
+ Metrics:
160
+ Top 1 Accuracy: 73.45%
161
+ Top 5 Accuracy: 91.34%
162
+ - Name: tf_mobilenetv3_large_100
163
+ In Collection: TF MobileNet V3
164
+ Metadata:
165
+ FLOPs: 274535288
166
+ Parameters: 5480000
167
+ File Size: 22076649
168
+ Architecture:
169
+ - 1x1 Convolution
170
+ - Batch Normalization
171
+ - Convolution
172
+ - Dense Connections
173
+ - Depthwise Separable Convolution
174
+ - Dropout
175
+ - Global Average Pooling
176
+ - Hard Swish
177
+ - Inverted Residual Block
178
+ - ReLU
179
+ - Residual Connection
180
+ - Softmax
181
+ - Squeeze-and-Excitation Block
182
+ Tasks:
183
+ - Image Classification
184
+ Training Techniques:
185
+ - RMSProp
186
+ - Weight Decay
187
+ Training Data:
188
+ - ImageNet
189
+ Training Resources: 4x4 TPU Pod
190
+ ID: tf_mobilenetv3_large_100
191
+ LR: 0.1
192
+ Dropout: 0.8
193
+ Crop Pct: '0.875'
194
+ Momentum: 0.9
195
+ Batch Size: 4096
196
+ Image Size: '224'
197
+ Weight Decay: 1.0e-05
198
+ Interpolation: bilinear
199
+ Code: https://github.com/rwightman/pytorch-image-models/blob/9a25fdf3ad0414b4d66da443fe60ae0aa14edc84/timm/models/mobilenetv3.py#L403
200
+ Weights: https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/tf_mobilenetv3_large_100-427764d5.pth
201
+ Results:
202
+ - Task: Image Classification
203
+ Dataset: ImageNet
204
+ Metrics:
205
+ Top 1 Accuracy: 75.51%
206
+ Top 5 Accuracy: 92.61%
207
+ - Name: tf_mobilenetv3_large_minimal_100
208
+ In Collection: TF MobileNet V3
209
+ Metadata:
210
+ FLOPs: 267216928
211
+ Parameters: 3920000
212
+ File Size: 15836368
213
+ Architecture:
214
+ - 1x1 Convolution
215
+ - Batch Normalization
216
+ - Convolution
217
+ - Dense Connections
218
+ - Depthwise Separable Convolution
219
+ - Dropout
220
+ - Global Average Pooling
221
+ - Hard Swish
222
+ - Inverted Residual Block
223
+ - ReLU
224
+ - Residual Connection
225
+ - Softmax
226
+ - Squeeze-and-Excitation Block
227
+ Tasks:
228
+ - Image Classification
229
+ Training Techniques:
230
+ - RMSProp
231
+ - Weight Decay
232
+ Training Data:
233
+ - ImageNet
234
+ Training Resources: 4x4 TPU Pod
235
+ ID: tf_mobilenetv3_large_minimal_100
236
+ LR: 0.1
237
+ Dropout: 0.8
238
+ Crop Pct: '0.875'
239
+ Momentum: 0.9
240
+ Batch Size: 4096
241
+ Image Size: '224'
242
+ Weight Decay: 1.0e-05
243
+ Interpolation: bilinear
244
+ Code: https://github.com/rwightman/pytorch-image-models/blob/9a25fdf3ad0414b4d66da443fe60ae0aa14edc84/timm/models/mobilenetv3.py#L412
245
+ Weights: https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/tf_mobilenetv3_large_minimal_100-8596ae28.pth
246
+ Results:
247
+ - Task: Image Classification
248
+ Dataset: ImageNet
249
+ Metrics:
250
+ Top 1 Accuracy: 72.24%
251
+ Top 5 Accuracy: 90.64%
252
+ - Name: tf_mobilenetv3_small_075
253
+ In Collection: TF MobileNet V3
254
+ Metadata:
255
+ FLOPs: 48457664
256
+ Parameters: 2040000
257
+ File Size: 8242701
258
+ Architecture:
259
+ - 1x1 Convolution
260
+ - Batch Normalization
261
+ - Convolution
262
+ - Dense Connections
263
+ - Depthwise Separable Convolution
264
+ - Dropout
265
+ - Global Average Pooling
266
+ - Hard Swish
267
+ - Inverted Residual Block
268
+ - ReLU
269
+ - Residual Connection
270
+ - Softmax
271
+ - Squeeze-and-Excitation Block
272
+ Tasks:
273
+ - Image Classification
274
+ Training Techniques:
275
+ - RMSProp
276
+ - Weight Decay
277
+ Training Data:
278
+ - ImageNet
279
+ Training Resources: 16x GPUs
280
+ ID: tf_mobilenetv3_small_075
281
+ LR: 0.045
282
+ Crop Pct: '0.875'
283
+ Momentum: 0.9
284
+ Batch Size: 4096
285
+ Image Size: '224'
286
+ Weight Decay: 4.0e-05
287
+ Interpolation: bilinear
288
+ RMSProp Decay: 0.9
289
+ Code: https://github.com/rwightman/pytorch-image-models/blob/9a25fdf3ad0414b4d66da443fe60ae0aa14edc84/timm/models/mobilenetv3.py#L421
290
+ Weights: https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/tf_mobilenetv3_small_075-da427f52.pth
291
+ Results:
292
+ - Task: Image Classification
293
+ Dataset: ImageNet
294
+ Metrics:
295
+ Top 1 Accuracy: 65.72%
296
+ Top 5 Accuracy: 86.13%
297
+ - Name: tf_mobilenetv3_small_100
298
+ In Collection: TF MobileNet V3
299
+ Metadata:
300
+ FLOPs: 65450600
301
+ Parameters: 2540000
302
+ File Size: 10256398
303
+ Architecture:
304
+ - 1x1 Convolution
305
+ - Batch Normalization
306
+ - Convolution
307
+ - Dense Connections
308
+ - Depthwise Separable Convolution
309
+ - Dropout
310
+ - Global Average Pooling
311
+ - Hard Swish
312
+ - Inverted Residual Block
313
+ - ReLU
314
+ - Residual Connection
315
+ - Softmax
316
+ - Squeeze-and-Excitation Block
317
+ Tasks:
318
+ - Image Classification
319
+ Training Techniques:
320
+ - RMSProp
321
+ - Weight Decay
322
+ Training Data:
323
+ - ImageNet
324
+ Training Resources: 16x GPUs
325
+ ID: tf_mobilenetv3_small_100
326
+ LR: 0.045
327
+ Crop Pct: '0.875'
328
+ Momentum: 0.9
329
+ Batch Size: 4096
330
+ Image Size: '224'
331
+ Weight Decay: 4.0e-05
332
+ Interpolation: bilinear
333
+ RMSProp Decay: 0.9
334
+ Code: https://github.com/rwightman/pytorch-image-models/blob/9a25fdf3ad0414b4d66da443fe60ae0aa14edc84/timm/models/mobilenetv3.py#L430
335
+ Weights: https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/tf_mobilenetv3_small_100-37f49e2b.pth
336
+ Results:
337
+ - Task: Image Classification
338
+ Dataset: ImageNet
339
+ Metrics:
340
+ Top 1 Accuracy: 67.92%
341
+ Top 5 Accuracy: 87.68%
342
+ - Name: tf_mobilenetv3_small_minimal_100
343
+ In Collection: TF MobileNet V3
344
+ Metadata:
345
+ FLOPs: 60827936
346
+ Parameters: 2040000
347
+ File Size: 8258083
348
+ Architecture:
349
+ - 1x1 Convolution
350
+ - Batch Normalization
351
+ - Convolution
352
+ - Dense Connections
353
+ - Depthwise Separable Convolution
354
+ - Dropout
355
+ - Global Average Pooling
356
+ - Hard Swish
357
+ - Inverted Residual Block
358
+ - ReLU
359
+ - Residual Connection
360
+ - Softmax
361
+ - Squeeze-and-Excitation Block
362
+ Tasks:
363
+ - Image Classification
364
+ Training Techniques:
365
+ - RMSProp
366
+ - Weight Decay
367
+ Training Data:
368
+ - ImageNet
369
+ Training Resources: 16x GPUs
370
+ ID: tf_mobilenetv3_small_minimal_100
371
+ LR: 0.045
372
+ Crop Pct: '0.875'
373
+ Momentum: 0.9
374
+ Batch Size: 4096
375
+ Image Size: '224'
376
+ Weight Decay: 4.0e-05
377
+ Interpolation: bilinear
378
+ RMSProp Decay: 0.9
379
+ Code: https://github.com/rwightman/pytorch-image-models/blob/9a25fdf3ad0414b4d66da443fe60ae0aa14edc84/timm/models/mobilenetv3.py#L439
380
+ Weights: https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/tf_mobilenetv3_small_minimal_100-922a7843.pth
381
+ Results:
382
+ - Task: Image Classification
383
+ Dataset: ImageNet
384
+ Metrics:
385
+ Top 1 Accuracy: 62.91%
386
+ Top 5 Accuracy: 84.24%
387
+ -->
pytorch-image-models/hfdocs/source/models/tresnet.mdx ADDED
@@ -0,0 +1,358 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # TResNet
2
+
3
+ A **TResNet** is a variant on a [ResNet](https://paperswithcode.com/method/resnet) that aim to boost accuracy while maintaining GPU training and inference efficiency. They contain several design tricks including a SpaceToDepth stem, [Anti-Alias downsampling](https://paperswithcode.com/method/anti-alias-downsampling), In-Place Activated BatchNorm, Blocks selection and [squeeze-and-excitation layers](https://paperswithcode.com/method/squeeze-and-excitation-block).
4
+
5
+ ## How do I use this model on an image?
6
+
7
+ To load a pretrained model:
8
+
9
+ ```py
10
+ >>> import timm
11
+ >>> model = timm.create_model('tresnet_l', pretrained=True)
12
+ >>> model.eval()
13
+ ```
14
+
15
+ To load and preprocess the image:
16
+
17
+ ```py
18
+ >>> import urllib
19
+ >>> from PIL import Image
20
+ >>> from timm.data import resolve_data_config
21
+ >>> from timm.data.transforms_factory import create_transform
22
+
23
+ >>> config = resolve_data_config({}, model=model)
24
+ >>> transform = create_transform(**config)
25
+
26
+ >>> url, filename = ("https://github.com/pytorch/hub/raw/master/images/dog.jpg", "dog.jpg")
27
+ >>> urllib.request.urlretrieve(url, filename)
28
+ >>> img = Image.open(filename).convert('RGB')
29
+ >>> tensor = transform(img).unsqueeze(0) # transform and add batch dimension
30
+ ```
31
+
32
+ To get the model predictions:
33
+
34
+ ```py
35
+ >>> import torch
36
+ >>> with torch.no_grad():
37
+ ... out = model(tensor)
38
+ >>> probabilities = torch.nn.functional.softmax(out[0], dim=0)
39
+ >>> print(probabilities.shape)
40
+ >>> # prints: torch.Size([1000])
41
+ ```
42
+
43
+ To get the top-5 predictions class names:
44
+
45
+ ```py
46
+ >>> # Get imagenet class mappings
47
+ >>> url, filename = ("https://raw.githubusercontent.com/pytorch/hub/master/imagenet_classes.txt", "imagenet_classes.txt")
48
+ >>> urllib.request.urlretrieve(url, filename)
49
+ >>> with open("imagenet_classes.txt", "r") as f:
50
+ ... categories = [s.strip() for s in f.readlines()]
51
+
52
+ >>> # Print top categories per image
53
+ >>> top5_prob, top5_catid = torch.topk(probabilities, 5)
54
+ >>> for i in range(top5_prob.size(0)):
55
+ ... print(categories[top5_catid[i]], top5_prob[i].item())
56
+ >>> # prints class names and probabilities like:
57
+ >>> # [('Samoyed', 0.6425196528434753), ('Pomeranian', 0.04062102362513542), ('keeshond', 0.03186424449086189), ('white wolf', 0.01739676296710968), ('Eskimo dog', 0.011717947199940681)]
58
+ ```
59
+
60
+ Replace the model name with the variant you want to use, e.g. `tresnet_l`. You can find the IDs in the model summaries at the top of this page.
61
+
62
+ To extract image features with this model, follow the [timm feature extraction examples](../feature_extraction), just change the name of the model you want to use.
63
+
64
+ ## How do I finetune this model?
65
+
66
+ You can finetune any of the pre-trained models just by changing the classifier (the last layer).
67
+
68
+ ```py
69
+ >>> model = timm.create_model('tresnet_l', pretrained=True, num_classes=NUM_FINETUNE_CLASSES)
70
+ ```
71
+ To finetune on your own dataset, you have to write a training loop or adapt [timm's training
72
+ script](https://github.com/rwightman/pytorch-image-models/blob/master/train.py) to use your dataset.
73
+
74
+ ## How do I train this model?
75
+
76
+ You can follow the [timm recipe scripts](../scripts) for training a new model afresh.
77
+
78
+ ## Citation
79
+
80
+ ```BibTeX
81
+ @misc{ridnik2020tresnet,
82
+ title={TResNet: High Performance GPU-Dedicated Architecture},
83
+ author={Tal Ridnik and Hussam Lawen and Asaf Noy and Emanuel Ben Baruch and Gilad Sharir and Itamar Friedman},
84
+ year={2020},
85
+ eprint={2003.13630},
86
+ archivePrefix={arXiv},
87
+ primaryClass={cs.CV}
88
+ }
89
+ ```
90
+
91
+ <!--
92
+ Type: model-index
93
+ Collections:
94
+ - Name: TResNet
95
+ Paper:
96
+ Title: 'TResNet: High Performance GPU-Dedicated Architecture'
97
+ URL: https://paperswithcode.com/paper/tresnet-high-performance-gpu-dedicated
98
+ Models:
99
+ - Name: tresnet_l
100
+ In Collection: TResNet
101
+ Metadata:
102
+ FLOPs: 10873416792
103
+ Parameters: 53456696
104
+ File Size: 224440219
105
+ Architecture:
106
+ - 1x1 Convolution
107
+ - Anti-Alias Downsampling
108
+ - Convolution
109
+ - Global Average Pooling
110
+ - InPlace-ABN
111
+ - Leaky ReLU
112
+ - ReLU
113
+ - Residual Connection
114
+ - Squeeze-and-Excitation Block
115
+ Tasks:
116
+ - Image Classification
117
+ Training Techniques:
118
+ - AutoAugment
119
+ - Cutout
120
+ - Label Smoothing
121
+ - SGD with Momentum
122
+ - Weight Decay
123
+ Training Data:
124
+ - ImageNet
125
+ Training Resources: 8x NVIDIA 100 GPUs
126
+ ID: tresnet_l
127
+ LR: 0.01
128
+ Epochs: 300
129
+ Crop Pct: '0.875'
130
+ Momentum: 0.9
131
+ Image Size: '224'
132
+ Weight Decay: 0.0001
133
+ Interpolation: bilinear
134
+ Code: https://github.com/rwightman/pytorch-image-models/blob/9a25fdf3ad0414b4d66da443fe60ae0aa14edc84/timm/models/tresnet.py#L267
135
+ Weights: https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-tresnet/tresnet_l_81_5-235b486c.pth
136
+ Results:
137
+ - Task: Image Classification
138
+ Dataset: ImageNet
139
+ Metrics:
140
+ Top 1 Accuracy: 81.49%
141
+ Top 5 Accuracy: 95.62%
142
+ - Name: tresnet_l_448
143
+ In Collection: TResNet
144
+ Metadata:
145
+ FLOPs: 43488238584
146
+ Parameters: 53456696
147
+ File Size: 224440219
148
+ Architecture:
149
+ - 1x1 Convolution
150
+ - Anti-Alias Downsampling
151
+ - Convolution
152
+ - Global Average Pooling
153
+ - InPlace-ABN
154
+ - Leaky ReLU
155
+ - ReLU
156
+ - Residual Connection
157
+ - Squeeze-and-Excitation Block
158
+ Tasks:
159
+ - Image Classification
160
+ Training Techniques:
161
+ - AutoAugment
162
+ - Cutout
163
+ - Label Smoothing
164
+ - SGD with Momentum
165
+ - Weight Decay
166
+ Training Data:
167
+ - ImageNet
168
+ Training Resources: 8x NVIDIA 100 GPUs
169
+ ID: tresnet_l_448
170
+ LR: 0.01
171
+ Epochs: 300
172
+ Crop Pct: '0.875'
173
+ Momentum: 0.9
174
+ Image Size: '448'
175
+ Weight Decay: 0.0001
176
+ Interpolation: bilinear
177
+ Code: https://github.com/rwightman/pytorch-image-models/blob/9a25fdf3ad0414b4d66da443fe60ae0aa14edc84/timm/models/tresnet.py#L285
178
+ Weights: https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-tresnet/tresnet_l_448-940d0cd1.pth
179
+ Results:
180
+ - Task: Image Classification
181
+ Dataset: ImageNet
182
+ Metrics:
183
+ Top 1 Accuracy: 82.26%
184
+ Top 5 Accuracy: 95.98%
185
+ - Name: tresnet_m
186
+ In Collection: TResNet
187
+ Metadata:
188
+ FLOPs: 5733048064
189
+ Parameters: 41282200
190
+ File Size: 125861314
191
+ Architecture:
192
+ - 1x1 Convolution
193
+ - Anti-Alias Downsampling
194
+ - Convolution
195
+ - Global Average Pooling
196
+ - InPlace-ABN
197
+ - Leaky ReLU
198
+ - ReLU
199
+ - Residual Connection
200
+ - Squeeze-and-Excitation Block
201
+ Tasks:
202
+ - Image Classification
203
+ Training Techniques:
204
+ - AutoAugment
205
+ - Cutout
206
+ - Label Smoothing
207
+ - SGD with Momentum
208
+ - Weight Decay
209
+ Training Data:
210
+ - ImageNet
211
+ Training Resources: 8x NVIDIA 100 GPUs
212
+ Training Time: < 24 hours
213
+ ID: tresnet_m
214
+ LR: 0.01
215
+ Epochs: 300
216
+ Crop Pct: '0.875'
217
+ Momentum: 0.9
218
+ Image Size: '224'
219
+ Weight Decay: 0.0001
220
+ Interpolation: bilinear
221
+ Code: https://github.com/rwightman/pytorch-image-models/blob/9a25fdf3ad0414b4d66da443fe60ae0aa14edc84/timm/models/tresnet.py#L261
222
+ Weights: https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-tresnet/tresnet_m_80_8-dbc13962.pth
223
+ Results:
224
+ - Task: Image Classification
225
+ Dataset: ImageNet
226
+ Metrics:
227
+ Top 1 Accuracy: 80.8%
228
+ Top 5 Accuracy: 94.86%
229
+ - Name: tresnet_m_448
230
+ In Collection: TResNet
231
+ Metadata:
232
+ FLOPs: 22929743104
233
+ Parameters: 29278464
234
+ File Size: 125861314
235
+ Architecture:
236
+ - 1x1 Convolution
237
+ - Anti-Alias Downsampling
238
+ - Convolution
239
+ - Global Average Pooling
240
+ - InPlace-ABN
241
+ - Leaky ReLU
242
+ - ReLU
243
+ - Residual Connection
244
+ - Squeeze-and-Excitation Block
245
+ Tasks:
246
+ - Image Classification
247
+ Training Techniques:
248
+ - AutoAugment
249
+ - Cutout
250
+ - Label Smoothing
251
+ - SGD with Momentum
252
+ - Weight Decay
253
+ Training Data:
254
+ - ImageNet
255
+ Training Resources: 8x NVIDIA 100 GPUs
256
+ ID: tresnet_m_448
257
+ LR: 0.01
258
+ Epochs: 300
259
+ Crop Pct: '0.875'
260
+ Momentum: 0.9
261
+ Image Size: '448'
262
+ Weight Decay: 0.0001
263
+ Interpolation: bilinear
264
+ Code: https://github.com/rwightman/pytorch-image-models/blob/9a25fdf3ad0414b4d66da443fe60ae0aa14edc84/timm/models/tresnet.py#L279
265
+ Weights: https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-tresnet/tresnet_m_448-bc359d10.pth
266
+ Results:
267
+ - Task: Image Classification
268
+ Dataset: ImageNet
269
+ Metrics:
270
+ Top 1 Accuracy: 81.72%
271
+ Top 5 Accuracy: 95.57%
272
+ - Name: tresnet_xl
273
+ In Collection: TResNet
274
+ Metadata:
275
+ FLOPs: 15162534034
276
+ Parameters: 75646610
277
+ File Size: 314378965
278
+ Architecture:
279
+ - 1x1 Convolution
280
+ - Anti-Alias Downsampling
281
+ - Convolution
282
+ - Global Average Pooling
283
+ - InPlace-ABN
284
+ - Leaky ReLU
285
+ - ReLU
286
+ - Residual Connection
287
+ - Squeeze-and-Excitation Block
288
+ Tasks:
289
+ - Image Classification
290
+ Training Techniques:
291
+ - AutoAugment
292
+ - Cutout
293
+ - Label Smoothing
294
+ - SGD with Momentum
295
+ - Weight Decay
296
+ Training Data:
297
+ - ImageNet
298
+ Training Resources: 8x NVIDIA 100 GPUs
299
+ ID: tresnet_xl
300
+ LR: 0.01
301
+ Epochs: 300
302
+ Crop Pct: '0.875'
303
+ Momentum: 0.9
304
+ Image Size: '224'
305
+ Weight Decay: 0.0001
306
+ Interpolation: bilinear
307
+ Code: https://github.com/rwightman/pytorch-image-models/blob/9a25fdf3ad0414b4d66da443fe60ae0aa14edc84/timm/models/tresnet.py#L273
308
+ Weights: https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-tresnet/tresnet_xl_82_0-a2d51b00.pth
309
+ Results:
310
+ - Task: Image Classification
311
+ Dataset: ImageNet
312
+ Metrics:
313
+ Top 1 Accuracy: 82.05%
314
+ Top 5 Accuracy: 95.93%
315
+ - Name: tresnet_xl_448
316
+ In Collection: TResNet
317
+ Metadata:
318
+ FLOPs: 60641712730
319
+ Parameters: 75646610
320
+ File Size: 224440219
321
+ Architecture:
322
+ - 1x1 Convolution
323
+ - Anti-Alias Downsampling
324
+ - Convolution
325
+ - Global Average Pooling
326
+ - InPlace-ABN
327
+ - Leaky ReLU
328
+ - ReLU
329
+ - Residual Connection
330
+ - Squeeze-and-Excitation Block
331
+ Tasks:
332
+ - Image Classification
333
+ Training Techniques:
334
+ - AutoAugment
335
+ - Cutout
336
+ - Label Smoothing
337
+ - SGD with Momentum
338
+ - Weight Decay
339
+ Training Data:
340
+ - ImageNet
341
+ Training Resources: 8x NVIDIA 100 GPUs
342
+ ID: tresnet_xl_448
343
+ LR: 0.01
344
+ Epochs: 300
345
+ Crop Pct: '0.875'
346
+ Momentum: 0.9
347
+ Image Size: '448'
348
+ Weight Decay: 0.0001
349
+ Interpolation: bilinear
350
+ Code: https://github.com/rwightman/pytorch-image-models/blob/9a25fdf3ad0414b4d66da443fe60ae0aa14edc84/timm/models/tresnet.py#L291
351
+ Weights: https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-tresnet/tresnet_l_448-940d0cd1.pth
352
+ Results:
353
+ - Task: Image Classification
354
+ Dataset: ImageNet
355
+ Metrics:
356
+ Top 1 Accuracy: 83.06%
357
+ Top 5 Accuracy: 96.19%
358
+ -->
pytorch-image-models/hfdocs/source/models/xception.mdx ADDED
@@ -0,0 +1,230 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Xception
2
+
3
+ **Xception** is a convolutional neural network architecture that relies solely on [depthwise separable convolution layers](https://paperswithcode.com/method/depthwise-separable-convolution).
4
+
5
+ The weights from this model were ported from [Tensorflow/Models](https://github.com/tensorflow/models).
6
+
7
+ ## How do I use this model on an image?
8
+
9
+ To load a pretrained model:
10
+
11
+ ```py
12
+ >>> import timm
13
+ >>> model = timm.create_model('xception', pretrained=True)
14
+ >>> model.eval()
15
+ ```
16
+
17
+ To load and preprocess the image:
18
+
19
+ ```py
20
+ >>> import urllib
21
+ >>> from PIL import Image
22
+ >>> from timm.data import resolve_data_config
23
+ >>> from timm.data.transforms_factory import create_transform
24
+
25
+ >>> config = resolve_data_config({}, model=model)
26
+ >>> transform = create_transform(**config)
27
+
28
+ >>> url, filename = ("https://github.com/pytorch/hub/raw/master/images/dog.jpg", "dog.jpg")
29
+ >>> urllib.request.urlretrieve(url, filename)
30
+ >>> img = Image.open(filename).convert('RGB')
31
+ >>> tensor = transform(img).unsqueeze(0) # transform and add batch dimension
32
+ ```
33
+
34
+ To get the model predictions:
35
+
36
+ ```py
37
+ >>> import torch
38
+ >>> with torch.no_grad():
39
+ ... out = model(tensor)
40
+ >>> probabilities = torch.nn.functional.softmax(out[0], dim=0)
41
+ >>> print(probabilities.shape)
42
+ >>> # prints: torch.Size([1000])
43
+ ```
44
+
45
+ To get the top-5 predictions class names:
46
+
47
+ ```py
48
+ >>> # Get imagenet class mappings
49
+ >>> url, filename = ("https://raw.githubusercontent.com/pytorch/hub/master/imagenet_classes.txt", "imagenet_classes.txt")
50
+ >>> urllib.request.urlretrieve(url, filename)
51
+ >>> with open("imagenet_classes.txt", "r") as f:
52
+ ... categories = [s.strip() for s in f.readlines()]
53
+
54
+ >>> # Print top categories per image
55
+ >>> top5_prob, top5_catid = torch.topk(probabilities, 5)
56
+ >>> for i in range(top5_prob.size(0)):
57
+ ... print(categories[top5_catid[i]], top5_prob[i].item())
58
+ >>> # prints class names and probabilities like:
59
+ >>> # [('Samoyed', 0.6425196528434753), ('Pomeranian', 0.04062102362513542), ('keeshond', 0.03186424449086189), ('white wolf', 0.01739676296710968), ('Eskimo dog', 0.011717947199940681)]
60
+ ```
61
+
62
+ Replace the model name with the variant you want to use, e.g. `xception`. You can find the IDs in the model summaries at the top of this page.
63
+
64
+ To extract image features with this model, follow the [timm feature extraction examples](../feature_extraction), just change the name of the model you want to use.
65
+
66
+ ## How do I finetune this model?
67
+
68
+ You can finetune any of the pre-trained models just by changing the classifier (the last layer).
69
+
70
+ ```py
71
+ >>> model = timm.create_model('xception', pretrained=True, num_classes=NUM_FINETUNE_CLASSES)
72
+ ```
73
+ To finetune on your own dataset, you have to write a training loop or adapt [timm's training
74
+ script](https://github.com/rwightman/pytorch-image-models/blob/master/train.py) to use your dataset.
75
+
76
+ ## How do I train this model?
77
+
78
+ You can follow the [timm recipe scripts](../scripts) for training a new model afresh.
79
+
80
+ ## Citation
81
+
82
+ ```BibTeX
83
+ @article{DBLP:journals/corr/ZagoruykoK16,
84
+ @misc{chollet2017xception,
85
+ title={Xception: Deep Learning with Depthwise Separable Convolutions},
86
+ author={François Chollet},
87
+ year={2017},
88
+ eprint={1610.02357},
89
+ archivePrefix={arXiv},
90
+ primaryClass={cs.CV}
91
+ }
92
+ ```
93
+
94
+ <!--
95
+ Type: model-index
96
+ Collections:
97
+ - Name: Xception
98
+ Paper:
99
+ Title: 'Xception: Deep Learning with Depthwise Separable Convolutions'
100
+ URL: https://paperswithcode.com/paper/xception-deep-learning-with-depthwise
101
+ Models:
102
+ - Name: xception
103
+ In Collection: Xception
104
+ Metadata:
105
+ FLOPs: 10600506792
106
+ Parameters: 22860000
107
+ File Size: 91675053
108
+ Architecture:
109
+ - 1x1 Convolution
110
+ - Convolution
111
+ - Dense Connections
112
+ - Depthwise Separable Convolution
113
+ - Global Average Pooling
114
+ - Max Pooling
115
+ - ReLU
116
+ - Residual Connection
117
+ - Softmax
118
+ Tasks:
119
+ - Image Classification
120
+ Training Data:
121
+ - ImageNet
122
+ ID: xception
123
+ Crop Pct: '0.897'
124
+ Image Size: '299'
125
+ Interpolation: bicubic
126
+ Code: https://github.com/rwightman/pytorch-image-models/blob/d8e69206be253892b2956341fea09fdebfaae4e3/timm/models/xception.py#L229
127
+ Weights: https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-cadene/xception-43020ad28.pth
128
+ Results:
129
+ - Task: Image Classification
130
+ Dataset: ImageNet
131
+ Metrics:
132
+ Top 1 Accuracy: 79.05%
133
+ Top 5 Accuracy: 94.4%
134
+ - Name: xception41
135
+ In Collection: Xception
136
+ Metadata:
137
+ FLOPs: 11681983232
138
+ Parameters: 26970000
139
+ File Size: 108422028
140
+ Architecture:
141
+ - 1x1 Convolution
142
+ - Convolution
143
+ - Dense Connections
144
+ - Depthwise Separable Convolution
145
+ - Global Average Pooling
146
+ - Max Pooling
147
+ - ReLU
148
+ - Residual Connection
149
+ - Softmax
150
+ Tasks:
151
+ - Image Classification
152
+ Training Data:
153
+ - ImageNet
154
+ ID: xception41
155
+ Crop Pct: '0.903'
156
+ Image Size: '299'
157
+ Interpolation: bicubic
158
+ Code: https://github.com/rwightman/pytorch-image-models/blob/d8e69206be253892b2956341fea09fdebfaae4e3/timm/models/xception_aligned.py#L181
159
+ Weights: https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/tf_xception_41-e6439c97.pth
160
+ Results:
161
+ - Task: Image Classification
162
+ Dataset: ImageNet
163
+ Metrics:
164
+ Top 1 Accuracy: 78.54%
165
+ Top 5 Accuracy: 94.28%
166
+ - Name: xception65
167
+ In Collection: Xception
168
+ Metadata:
169
+ FLOPs: 17585702144
170
+ Parameters: 39920000
171
+ File Size: 160536780
172
+ Architecture:
173
+ - 1x1 Convolution
174
+ - Convolution
175
+ - Dense Connections
176
+ - Depthwise Separable Convolution
177
+ - Global Average Pooling
178
+ - Max Pooling
179
+ - ReLU
180
+ - Residual Connection
181
+ - Softmax
182
+ Tasks:
183
+ - Image Classification
184
+ Training Data:
185
+ - ImageNet
186
+ ID: xception65
187
+ Crop Pct: '0.903'
188
+ Image Size: '299'
189
+ Interpolation: bicubic
190
+ Code: https://github.com/rwightman/pytorch-image-models/blob/d8e69206be253892b2956341fea09fdebfaae4e3/timm/models/xception_aligned.py#L200
191
+ Weights: https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/tf_xception_65-c9ae96e8.pth
192
+ Results:
193
+ - Task: Image Classification
194
+ Dataset: ImageNet
195
+ Metrics:
196
+ Top 1 Accuracy: 79.55%
197
+ Top 5 Accuracy: 94.66%
198
+ - Name: xception71
199
+ In Collection: Xception
200
+ Metadata:
201
+ FLOPs: 22817346560
202
+ Parameters: 42340000
203
+ File Size: 170295556
204
+ Architecture:
205
+ - 1x1 Convolution
206
+ - Convolution
207
+ - Dense Connections
208
+ - Depthwise Separable Convolution
209
+ - Global Average Pooling
210
+ - Max Pooling
211
+ - ReLU
212
+ - Residual Connection
213
+ - Softmax
214
+ Tasks:
215
+ - Image Classification
216
+ Training Data:
217
+ - ImageNet
218
+ ID: xception71
219
+ Crop Pct: '0.903'
220
+ Image Size: '299'
221
+ Interpolation: bicubic
222
+ Code: https://github.com/rwightman/pytorch-image-models/blob/d8e69206be253892b2956341fea09fdebfaae4e3/timm/models/xception_aligned.py#L219
223
+ Weights: https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/tf_xception_71-8eec7df1.pth
224
+ Results:
225
+ - Task: Image Classification
226
+ Dataset: ImageNet
227
+ Metrics:
228
+ Top 1 Accuracy: 79.88%
229
+ Top 5 Accuracy: 94.93%
230
+ -->
pytorch-image-models/hfdocs/source/reference/data.mdx ADDED
@@ -0,0 +1,9 @@
 
 
 
 
 
 
 
 
 
 
1
+ # Data
2
+
3
+ [[autodoc]] timm.data.create_dataset
4
+
5
+ [[autodoc]] timm.data.create_loader
6
+
7
+ [[autodoc]] timm.data.create_transform
8
+
9
+ [[autodoc]] timm.data.resolve_data_config
pytorch-image-models/hfdocs/source/reference/optimizers.mdx ADDED
@@ -0,0 +1,33 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Optimization
2
+
3
+ This page contains the API reference documentation for learning rate optimizers included in `timm`.
4
+
5
+ ## Optimizers
6
+
7
+ ### Factory functions
8
+
9
+ [[autodoc]] timm.optim.create_optimizer_v2
10
+ [[autodoc]] timm.optim.list_optimizers
11
+ [[autodoc]] timm.optim.get_optimizer_class
12
+
13
+ ### Optimizer Classes
14
+
15
+ [[autodoc]] timm.optim.adabelief.AdaBelief
16
+ [[autodoc]] timm.optim.adafactor.Adafactor
17
+ [[autodoc]] timm.optim.adafactor_bv.AdafactorBigVision
18
+ [[autodoc]] timm.optim.adahessian.Adahessian
19
+ [[autodoc]] timm.optim.adamp.AdamP
20
+ [[autodoc]] timm.optim.adan.Adan
21
+ [[autodoc]] timm.optim.adopt.Adopt
22
+ [[autodoc]] timm.optim.lamb.Lamb
23
+ [[autodoc]] timm.optim.laprop.LaProp
24
+ [[autodoc]] timm.optim.lars.Lars
25
+ [[autodoc]] timm.optim.lion.Lion
26
+ [[autodoc]] timm.optim.lookahead.Lookahead
27
+ [[autodoc]] timm.optim.madgrad.MADGRAD
28
+ [[autodoc]] timm.optim.mars.Mars
29
+ [[autodoc]] timm.optim.nadamw.NAdamW
30
+ [[autodoc]] timm.optim.nvnovograd.NvNovoGrad
31
+ [[autodoc]] timm.optim.rmsprop_tf.RMSpropTF
32
+ [[autodoc]] timm.optim.sgdp.SGDP
33
+ [[autodoc]] timm.optim.sgdw.SGDW
pytorch-image-models/hfdocs/source/reference/schedulers.mdx ADDED
@@ -0,0 +1,19 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Learning Rate Schedulers
2
+
3
+ This page contains the API reference documentation for learning rate schedulers included in `timm`.
4
+
5
+ ## Schedulers
6
+
7
+ ### Factory functions
8
+
9
+ [[autodoc]] timm.scheduler.scheduler_factory.create_scheduler
10
+ [[autodoc]] timm.scheduler.scheduler_factory.create_scheduler_v2
11
+
12
+ ### Scheduler Classes
13
+
14
+ [[autodoc]] timm.scheduler.cosine_lr.CosineLRScheduler
15
+ [[autodoc]] timm.scheduler.multistep_lr.MultiStepLRScheduler
16
+ [[autodoc]] timm.scheduler.plateau_lr.PlateauLRScheduler
17
+ [[autodoc]] timm.scheduler.poly_lr.PolyLRScheduler
18
+ [[autodoc]] timm.scheduler.step_lr.StepLRScheduler
19
+ [[autodoc]] timm.scheduler.tanh_lr.TanhLRScheduler
pytorch-image-models/results/benchmark-infer-amp-nchw-pt113-cu117-rtx3090.csv ADDED
@@ -0,0 +1,933 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ model,infer_samples_per_sec,infer_step_time,infer_batch_size,infer_img_size,infer_gmacs,infer_macts,param_count
2
+ tinynet_e,49277.65,20.77,1024,106,0.03,0.69,2.04
3
+ mobilenetv3_small_050,45562.75,22.464,1024,224,0.03,0.92,1.59
4
+ lcnet_035,41026.68,24.949,1024,224,0.03,1.04,1.64
5
+ lcnet_050,37575.13,27.242,1024,224,0.05,1.26,1.88
6
+ mobilenetv3_small_075,33062.39,30.961,1024,224,0.05,1.3,2.04
7
+ mobilenetv3_small_100,30012.26,34.109,1024,224,0.06,1.42,2.54
8
+ tf_mobilenetv3_small_minimal_100,28698.14,35.672,1024,224,0.06,1.41,2.04
9
+ tf_mobilenetv3_small_075,27407.51,37.352,1024,224,0.05,1.3,2.04
10
+ tinynet_d,27236.47,37.585,1024,152,0.05,1.42,2.34
11
+ tf_mobilenetv3_small_100,25103.65,40.781,1024,224,0.06,1.42,2.54
12
+ lcnet_075,24140.95,42.406,1024,224,0.1,1.99,2.36
13
+ mnasnet_small,20706.43,49.443,1024,224,0.07,2.16,2.03
14
+ levit_128s,20595.72,49.709,1024,224,0.31,1.88,7.78
15
+ lcnet_100,19684.75,52.01,1024,224,0.16,2.52,2.95
16
+ mobilenetv2_035,18358.82,55.767,1024,224,0.07,2.86,1.68
17
+ regnetx_002,18244.04,56.117,1024,224,0.2,2.16,2.68
18
+ ghostnet_050,17564.96,58.287,1024,224,0.05,1.77,2.59
19
+ regnety_002,17006.07,60.202,1024,224,0.2,2.17,3.16
20
+ mnasnet_050,15925.32,64.29,1024,224,0.11,3.07,2.22
21
+ vit_tiny_r_s16_p8_224,15068.38,67.946,1024,224,0.44,2.06,6.34
22
+ mobilenetv2_050,14843.74,68.974,1024,224,0.1,3.64,1.97
23
+ tinynet_c,14634.69,69.959,1024,184,0.11,2.87,2.46
24
+ semnasnet_050,14248.78,71.855,1024,224,0.11,3.44,2.08
25
+ levit_128,14164.26,72.284,1024,224,0.41,2.71,9.21
26
+ vit_small_patch32_224,13811.36,74.131,1024,224,1.15,2.5,22.88
27
+ mixer_s32_224,13352.85,76.677,1024,224,1.0,2.28,19.1
28
+ cs3darknet_focus_s,12798.44,79.999,1024,256,0.69,2.7,3.27
29
+ lcnet_150,12783.12,80.094,1024,224,0.34,3.79,4.5
30
+ cs3darknet_s,12395.11,82.602,1024,256,0.72,2.97,3.28
31
+ regnetx_004,12366.39,82.791,1024,224,0.4,3.14,5.16
32
+ mobilenetv3_large_075,12001.32,85.313,1024,224,0.16,4.0,3.99
33
+ levit_192,11882.81,86.163,1024,224,0.66,3.2,10.95
34
+ resnet10t,11615.84,88.145,1024,224,1.1,2.43,5.44
35
+ ese_vovnet19b_slim_dw,11539.4,88.729,1024,224,0.4,5.28,1.9
36
+ gernet_s,11496.77,89.058,1024,224,0.75,2.65,8.17
37
+ mobilenetv3_rw,10873.77,94.16,1024,224,0.23,4.41,5.48
38
+ mobilenetv3_large_100,10705.06,95.645,1024,224,0.23,4.41,5.48
39
+ hardcorenas_a,10554.34,97.012,1024,224,0.23,4.38,5.26
40
+ tf_mobilenetv3_large_075,10511.12,97.41,1024,224,0.16,4.0,3.99
41
+ tf_mobilenetv3_large_minimal_100,10371.16,98.725,1024,224,0.22,4.4,3.92
42
+ mnasnet_075,10345.17,98.972,1024,224,0.23,4.77,3.17
43
+ hardcorenas_b,9695.74,105.601,1024,224,0.26,5.09,5.18
44
+ regnety_004,9655.22,106.046,1024,224,0.41,3.89,4.34
45
+ ghostnet_100,9483.99,107.96,1024,224,0.15,3.55,5.18
46
+ hardcorenas_c,9481.05,107.994,1024,224,0.28,5.01,5.52
47
+ tf_mobilenetv3_large_100,9456.79,108.271,1024,224,0.23,4.41,5.48
48
+ regnetx_006,9408.22,108.83,1024,224,0.61,3.98,6.2
49
+ mobilenetv2_075,9313.88,109.932,1024,224,0.22,5.86,2.64
50
+ tinynet_b,9291.99,110.191,1024,188,0.21,4.44,3.73
51
+ mnasnet_b1,9286.4,110.258,1024,224,0.33,5.46,4.38
52
+ mnasnet_100,9263.52,110.53,1024,224,0.33,5.46,4.38
53
+ gluon_resnet18_v1b,9078.31,112.785,1024,224,1.82,2.48,11.69
54
+ semnasnet_075,9069.42,112.895,1024,224,0.23,5.54,2.91
55
+ resnet18,9045.63,113.192,1024,224,1.82,2.48,11.69
56
+ ssl_resnet18,9045.4,113.196,1024,224,1.82,2.48,11.69
57
+ swsl_resnet18,9040.4,113.258,1024,224,1.82,2.48,11.69
58
+ levit_256,8921.47,114.768,1024,224,1.13,4.23,18.89
59
+ hardcorenas_d,8879.46,115.311,1024,224,0.3,4.93,7.5
60
+ regnety_006,8666.48,118.144,1024,224,0.61,4.33,6.06
61
+ seresnet18,8542.99,119.851,1024,224,1.82,2.49,11.78
62
+ mobilenetv2_100,8507.29,120.356,1024,224,0.31,6.68,3.5
63
+ spnasnet_100,8342.04,122.741,1024,224,0.35,6.03,4.42
64
+ legacy_seresnet18,8310.8,123.202,1024,224,1.82,2.49,11.78
65
+ semnasnet_100,8284.16,123.599,1024,224,0.32,6.23,3.89
66
+ mnasnet_a1,8283.57,123.607,1024,224,0.32,6.23,3.89
67
+ regnetx_008,7852.75,130.39,1024,224,0.81,5.15,7.26
68
+ hardcorenas_f,7809.07,131.117,1024,224,0.35,5.57,8.2
69
+ hardcorenas_e,7730.97,132.444,1024,224,0.35,5.65,8.07
70
+ efficientnet_lite0,7722.75,132.584,1024,224,0.4,6.74,4.65
71
+ levit_256d,7689.03,133.165,1024,224,1.4,4.93,26.21
72
+ xcit_nano_12_p16_224_dist,7674.8,133.413,1024,224,0.56,4.17,3.05
73
+ xcit_nano_12_p16_224,7670.11,133.492,1024,224,0.56,4.17,3.05
74
+ resnet18d,7636.48,134.082,1024,224,2.06,3.29,11.71
75
+ ghostnet_130,7625.58,134.274,1024,224,0.24,4.6,7.36
76
+ tf_efficientnetv2_b0,7614.25,134.473,1024,224,0.73,4.77,7.14
77
+ ese_vovnet19b_slim,7588.4,134.932,1024,224,1.69,3.52,3.17
78
+ deit_tiny_distilled_patch16_224,7449.3,137.451,1024,224,1.27,6.01,5.91
79
+ deit_tiny_patch16_224,7398.73,138.391,1024,224,1.26,5.97,5.72
80
+ vit_tiny_patch16_224,7390.78,138.538,1024,224,1.26,5.97,5.72
81
+ regnety_008,7366.88,138.989,1024,224,0.81,5.25,6.26
82
+ tinynet_a,7358.6,139.145,1024,192,0.35,5.41,6.19
83
+ dla46_c,7311.64,140.038,1024,224,0.58,4.5,1.3
84
+ fbnetc_100,7303.94,140.187,1024,224,0.4,6.51,5.57
85
+ mobilevitv2_050,7248.37,141.262,1024,256,0.48,8.04,1.37
86
+ tf_efficientnet_lite0,6816.26,150.218,1024,224,0.4,6.74,4.65
87
+ pit_ti_distilled_224,6788.49,150.832,1024,224,0.71,6.23,5.1
88
+ pit_ti_224,6762.99,151.401,1024,224,0.7,6.19,4.85
89
+ efficientnet_b0,6687.26,153.115,1024,224,0.4,6.75,5.29
90
+ visformer_tiny,6618.81,154.698,1024,224,1.27,5.72,10.32
91
+ rexnet_100,6608.65,154.937,1024,224,0.41,7.44,4.8
92
+ mnasnet_140,6580.58,155.597,1024,224,0.6,7.71,7.12
93
+ efficientnet_b1_pruned,6513.48,157.201,1024,240,0.4,6.21,6.33
94
+ rexnetr_100,6491.35,157.737,1024,224,0.43,7.72,4.88
95
+ mobilenetv2_110d,6395.98,160.089,1024,224,0.45,8.71,4.52
96
+ resnet14t,6341.58,161.462,1024,224,1.69,5.8,10.08
97
+ regnetz_005,6208.75,164.916,1024,224,0.52,5.86,7.12
98
+ dla46x_c,6145.64,166.61,1024,224,0.54,5.66,1.07
99
+ nf_regnet_b0,6055.0,169.104,1024,256,0.64,5.58,8.76
100
+ tf_efficientnet_b0,5992.76,170.862,1024,224,0.4,6.75,5.29
101
+ hrnet_w18_small,5908.15,173.308,1024,224,1.61,5.72,13.19
102
+ edgenext_xx_small,5886.07,173.957,1024,288,0.33,4.21,1.33
103
+ semnasnet_140,5856.63,174.833,1024,224,0.6,8.87,6.11
104
+ resnetblur18,5839.81,175.336,1024,224,2.34,3.39,11.69
105
+ ese_vovnet19b_dw,5825.11,175.779,1024,224,1.34,8.25,6.54
106
+ dla60x_c,5790.89,176.817,1024,224,0.59,6.01,1.32
107
+ mobilenetv2_140,5780.41,177.139,1024,224,0.6,9.57,6.11
108
+ skresnet18,5648.81,181.265,1024,224,1.82,3.24,11.96
109
+ mobilevit_xxs,5528.18,185.22,1024,256,0.42,8.34,1.27
110
+ efficientnet_b0_gn,5401.88,189.551,1024,224,0.42,6.75,5.29
111
+ convnext_atto,5364.13,190.886,1024,288,0.91,6.3,3.7
112
+ gluon_resnet34_v1b,5344.34,191.593,1024,224,3.67,3.74,21.8
113
+ resnet34,5335.05,191.926,1024,224,3.67,3.74,21.8
114
+ efficientnet_lite1,5334.12,191.959,1024,240,0.62,10.14,5.42
115
+ tv_resnet34,5332.7,192.011,1024,224,3.67,3.74,21.8
116
+ vit_base_patch32_224,5287.0,193.67,1024,224,4.41,5.01,88.22
117
+ vit_base_patch32_clip_224,5281.4,193.877,1024,224,4.41,5.01,88.22
118
+ levit_384,5276.74,194.047,1024,224,2.36,6.26,39.13
119
+ pit_xs_distilled_224,5241.4,195.357,1024,224,1.41,7.76,11.0
120
+ pit_xs_224,5237.09,195.517,1024,224,1.4,7.71,10.62
121
+ selecsls42,5225.99,195.932,1024,224,2.94,4.62,30.35
122
+ selecsls42b,5201.55,196.853,1024,224,2.98,4.62,32.46
123
+ gernet_m,5124.67,199.807,1024,224,3.02,5.24,21.14
124
+ pvt_v2_b0,5122.72,199.882,1024,224,0.57,7.99,3.67
125
+ tf_efficientnetv2_b1,5122.21,199.903,1024,240,1.21,7.34,8.14
126
+ mixnet_s,5079.84,201.57,1024,224,0.25,6.25,4.13
127
+ convnext_atto_ols,5062.64,202.255,1024,288,0.96,6.8,3.7
128
+ seresnet34,5028.88,203.611,1024,224,3.67,3.74,21.96
129
+ rexnetr_130,5003.96,204.626,1024,224,0.68,9.81,7.61
130
+ fbnetv3_b,5003.0,204.666,1024,256,0.55,9.1,8.6
131
+ mixer_b32_224,4982.51,205.508,1024,224,3.24,6.29,60.29
132
+ xcit_tiny_12_p16_224_dist,4879.26,209.853,1024,224,1.24,6.29,6.72
133
+ legacy_seresnet34,4875.12,210.034,1024,224,3.67,3.74,21.96
134
+ xcit_tiny_12_p16_224,4870.16,210.244,1024,224,1.24,6.29,6.72
135
+ resnet34d,4834.78,211.786,1024,224,3.91,4.54,21.82
136
+ tf_efficientnet_lite1,4822.03,212.348,1024,240,0.62,10.14,5.42
137
+ resnet26,4794.98,213.545,1024,224,2.36,7.35,16.0
138
+ mobilenetv2_120d,4786.27,213.934,1024,224,0.69,11.97,5.83
139
+ rexnet_130,4770.1,214.659,1024,224,0.68,9.71,7.56
140
+ efficientnet_b0_g16_evos,4743.69,215.854,1024,224,1.01,7.42,8.11
141
+ efficientnet_es,4736.89,216.163,1024,224,1.81,8.73,5.44
142
+ efficientnet_es_pruned,4735.25,216.239,1024,224,1.81,8.73,5.44
143
+ tf_mixnet_s,4735.17,216.242,1024,224,0.25,6.25,4.13
144
+ gmlp_ti16_224,4709.0,217.445,1024,224,1.34,7.55,5.87
145
+ convnext_femto,4672.08,219.162,1024,288,1.3,7.56,5.22
146
+ mobilevitv2_075,4638.17,220.764,1024,256,1.05,12.06,2.87
147
+ resmlp_12_224,4601.92,222.504,1024,224,3.01,5.5,15.35
148
+ resmlp_12_distilled_224,4597.97,222.695,1024,224,3.01,5.5,15.35
149
+ gmixer_12_224,4543.02,225.388,1024,224,2.67,7.26,12.7
150
+ fbnetv3_d,4532.2,225.927,1024,256,0.68,11.1,10.31
151
+ tf_efficientnet_es,4518.93,226.591,1024,224,1.81,8.73,5.44
152
+ selecsls60,4510.1,227.034,1024,224,3.59,5.52,30.67
153
+ mixer_s16_224,4509.29,227.075,1024,224,3.79,5.97,18.53
154
+ regnetx_016,4507.02,227.189,1024,224,1.62,7.93,9.19
155
+ selecsls60b,4490.35,228.033,1024,224,3.63,5.52,32.77
156
+ cs3darknet_focus_m,4487.64,228.171,1024,288,2.51,6.19,9.3
157
+ dla34,4481.03,228.505,1024,224,3.07,5.02,15.74
158
+ crossvit_tiny_240,4476.83,228.722,1024,240,1.57,9.08,7.01
159
+ convnext_femto_ols,4473.25,228.904,1024,288,1.35,8.06,5.23
160
+ vit_tiny_r_s16_p8_384,4463.13,229.423,1024,384,1.34,6.49,6.36
161
+ cs3darknet_m,4452.94,229.949,1024,288,2.63,6.69,9.31
162
+ repvgg_b0,4433.11,230.978,1024,224,3.41,6.15,15.82
163
+ resnet26d,4354.59,235.143,1024,224,2.6,8.15,16.01
164
+ rexnetr_150,4349.97,235.392,1024,224,0.89,11.13,9.78
165
+ resnetaa34d,4309.77,237.588,1024,224,4.43,5.07,21.82
166
+ efficientnet_b2_pruned,4309.58,237.598,1024,260,0.73,9.13,8.31
167
+ darknet17,4296.61,238.316,1024,256,3.26,7.18,14.3
168
+ vit_small_patch32_384,4250.58,240.897,1024,384,3.45,8.25,22.92
169
+ crossvit_9_240,4201.98,243.683,1024,240,1.85,9.52,8.55
170
+ nf_resnet26,4197.39,243.949,1024,224,2.41,7.35,16.0
171
+ efficientnet_b0_g8_gn,4190.39,244.357,1024,224,0.66,6.75,6.56
172
+ rexnet_150,4186.31,244.594,1024,224,0.9,11.21,9.73
173
+ ecaresnet50d_pruned,4182.62,244.81,1024,224,2.53,6.43,19.94
174
+ efficientformer_l1,4075.83,251.225,1024,224,1.3,5.53,12.29
175
+ poolformer_s12,4050.19,252.815,1024,224,1.82,5.53,11.92
176
+ regnety_016,4035.9,253.712,1024,224,1.63,8.04,11.2
177
+ efficientnet_lite2,4013.48,255.128,1024,260,0.89,12.9,6.09
178
+ crossvit_9_dagger_240,3992.98,256.437,1024,240,1.99,9.97,8.78
179
+ efficientnet_cc_b0_8e,3929.29,260.595,1024,224,0.42,9.42,24.01
180
+ efficientnet_cc_b0_4e,3918.01,261.346,1024,224,0.41,9.42,13.31
181
+ darknet21,3914.26,261.596,1024,256,3.93,7.47,20.86
182
+ efficientnet_b1,3876.9,264.116,1024,256,0.77,12.22,7.79
183
+ tf_efficientnet_b1,3834.3,267.052,1024,240,0.71,10.88,7.79
184
+ resnest14d,3793.21,269.944,1024,224,2.76,7.33,10.61
185
+ sedarknet21,3784.73,270.549,1024,256,3.93,7.47,20.95
186
+ resnext26ts,3775.5,271.211,1024,256,2.43,10.52,10.3
187
+ tf_efficientnetv2_b2,3727.06,274.735,1024,260,1.72,9.84,10.1
188
+ convnext_pico,3702.78,276.537,1024,288,2.27,10.08,9.05
189
+ edgenext_x_small,3692.42,277.311,1024,288,0.68,7.5,2.34
190
+ tf_efficientnet_cc_b0_8e,3691.33,277.395,1024,224,0.42,9.42,24.01
191
+ dpn48b,3689.99,277.494,1024,224,1.69,8.92,9.13
192
+ eca_resnext26ts,3675.59,278.583,1024,256,2.43,10.52,10.3
193
+ seresnext26ts,3670.33,278.98,1024,256,2.43,10.52,10.39
194
+ tf_efficientnet_cc_b0_4e,3665.41,279.357,1024,224,0.41,9.42,13.31
195
+ tf_efficientnet_lite2,3662.0,279.618,1024,260,0.89,12.9,6.09
196
+ nf_ecaresnet26,3619.99,282.862,1024,224,2.41,7.36,16.0
197
+ nf_seresnet26,3618.8,282.955,1024,224,2.41,7.36,17.4
198
+ gcresnext26ts,3594.7,284.852,1024,256,2.43,10.53,10.48
199
+ mobilevitv2_100,3589.19,213.964,768,256,1.84,16.08,4.9
200
+ gernet_l,3556.24,287.933,1024,256,4.57,8.0,31.08
201
+ legacy_seresnext26_32x4d,3545.88,288.774,1024,224,2.49,9.39,16.79
202
+ convnext_pico_ols,3532.27,289.886,1024,288,2.37,10.74,9.06
203
+ resnet26t,3503.33,292.28,1024,256,3.35,10.52,16.01
204
+ repvgg_a2,3454.82,296.386,1024,224,5.7,6.26,28.21
205
+ mixnet_m,3418.52,299.526,1024,224,0.36,8.19,5.01
206
+ efficientnet_b3_pruned,3356.7,305.049,1024,300,1.04,11.86,9.86
207
+ nf_regnet_b1,3352.23,305.456,1024,288,1.02,9.2,10.22
208
+ ecaresnext50t_32x4d,3339.2,306.649,1024,224,2.7,10.09,15.41
209
+ ecaresnext26t_32x4d,3337.18,306.833,1024,224,2.7,10.09,15.41
210
+ seresnext26tn_32x4d,3327.66,307.711,1024,224,2.7,10.09,16.81
211
+ seresnext26t_32x4d,3327.23,307.751,1024,224,2.7,10.09,16.81
212
+ seresnext26d_32x4d,3303.57,309.954,1024,224,2.73,10.19,16.81
213
+ tf_mixnet_m,3301.19,310.17,1024,224,0.36,8.19,5.01
214
+ convit_tiny,3286.62,311.554,1024,224,1.26,7.94,5.71
215
+ mobilevit_xs,3278.19,234.265,768,256,1.05,16.33,2.32
216
+ pit_s_224,3268.88,313.245,1024,224,2.88,11.56,23.46
217
+ pit_s_distilled_224,3266.72,313.452,1024,224,2.9,11.64,24.04
218
+ skresnet34,3242.45,315.8,1024,224,3.67,5.13,22.28
219
+ eca_botnext26ts_256,3224.24,317.583,1024,256,2.46,11.6,10.59
220
+ ecaresnet101d_pruned,3223.88,317.616,1024,224,3.48,7.69,24.88
221
+ deit_small_distilled_patch16_224,3220.79,317.922,1024,224,4.63,12.02,22.44
222
+ ecaresnetlight,3215.57,318.439,1024,224,4.11,8.42,30.16
223
+ deit_small_patch16_224,3209.05,319.085,1024,224,4.61,11.95,22.05
224
+ vit_small_patch16_224,3199.98,319.99,1024,224,4.61,11.95,22.05
225
+ eca_halonext26ts,3173.71,322.639,1024,256,2.44,11.46,10.76
226
+ convnextv2_atto,3162.98,323.733,1024,288,0.91,6.3,3.71
227
+ resnetv2_50,3158.28,324.214,1024,224,4.11,11.11,25.55
228
+ nf_regnet_b2,3133.63,326.765,1024,272,1.22,9.27,14.31
229
+ rexnetr_200,3133.12,245.111,768,224,1.59,15.11,16.52
230
+ botnet26t_256,3123.98,327.772,1024,256,3.32,11.98,12.49
231
+ coat_lite_tiny,3113.54,328.874,1024,224,1.6,11.65,5.72
232
+ vit_small_r26_s32_224,3112.34,329.001,1024,224,3.56,9.85,36.43
233
+ bat_resnext26ts,3103.95,329.89,1024,256,2.53,12.51,10.73
234
+ halonet26t,3103.39,329.95,1024,256,3.19,11.69,12.48
235
+ pvt_v2_b1,3095.14,330.828,1024,224,2.12,15.39,14.01
236
+ cspresnet50,3063.22,334.278,1024,256,4.54,11.5,21.62
237
+ resnet32ts,3055.79,335.09,1024,256,4.63,11.58,17.96
238
+ rexnet_200,3051.5,251.668,768,224,1.56,14.91,16.37
239
+ lambda_resnet26t,3046.2,336.144,1024,256,3.02,11.87,10.96
240
+ ssl_resnet50,3030.48,337.887,1024,224,4.11,11.11,25.56
241
+ gluon_resnet50_v1b,3027.43,338.23,1024,224,4.11,11.11,25.56
242
+ tv_resnet50,3027.39,338.232,1024,224,4.11,11.11,25.56
243
+ swsl_resnet50,3027.07,338.268,1024,224,4.11,11.11,25.56
244
+ resnet50,3025.4,338.455,1024,224,4.11,11.11,25.56
245
+ deit3_small_patch16_224_in21ft1k,3023.02,338.721,1024,224,4.61,11.95,22.06
246
+ deit3_small_patch16_224,3017.77,339.312,1024,224,4.61,11.95,22.06
247
+ tresnet_m,3006.54,340.578,1024,224,5.74,7.31,31.39
248
+ resnet33ts,3005.78,340.665,1024,256,4.76,11.66,19.68
249
+ vit_small_resnet26d_224,2994.08,341.995,1024,224,5.07,11.12,63.61
250
+ resnetv2_50t,2989.06,342.569,1024,224,4.32,11.82,25.57
251
+ regnetx_032,2988.15,342.675,1024,224,3.2,11.37,15.3
252
+ dpn68b,2981.13,343.481,1024,224,2.35,10.47,12.61
253
+ hrnet_w18_small_v2,2978.67,343.765,1024,224,2.62,9.65,15.6
254
+ dpn68,2975.29,344.155,1024,224,2.35,10.47,12.61
255
+ resnetv2_50d,2971.15,344.633,1024,224,4.35,11.92,25.57
256
+ efficientnet_em,2938.12,348.51,1024,240,3.04,14.34,6.9
257
+ vit_base_patch32_plus_256,2934.64,348.925,1024,256,7.79,7.76,119.48
258
+ coat_lite_mini,2921.75,350.462,1024,224,2.0,12.25,11.01
259
+ tf_efficientnet_b2,2919.63,350.718,1024,260,1.02,13.83,9.11
260
+ seresnet33ts,2919.51,350.732,1024,256,4.76,11.66,19.78
261
+ eca_resnet33ts,2917.21,351.008,1024,256,4.76,11.66,19.68
262
+ haloregnetz_b,2890.29,354.276,1024,224,1.97,11.94,11.68
263
+ coatnet_pico_rw_224,2884.58,354.98,1024,224,2.05,14.62,10.85
264
+ dla60,2883.99,355.049,1024,224,4.26,10.16,22.04
265
+ gluon_resnet50_v1c,2872.58,356.463,1024,224,4.35,11.92,25.58
266
+ resnet50t,2869.49,356.844,1024,224,4.32,11.82,25.57
267
+ gcresnet33ts,2863.36,357.609,1024,256,4.76,11.68,19.88
268
+ gluon_resnet50_v1d,2853.24,358.879,1024,224,4.35,11.92,25.58
269
+ cspresnet50d,2852.98,358.911,1024,256,4.86,12.55,21.64
270
+ resnet50d,2850.55,359.218,1024,224,4.35,11.92,25.58
271
+ vovnet39a,2845.31,359.878,1024,224,7.09,6.73,22.6
272
+ cspresnet50w,2835.31,361.148,1024,256,5.04,12.19,28.12
273
+ vgg11,2827.53,362.143,1024,224,7.61,7.44,132.86
274
+ tf_efficientnet_em,2826.28,362.303,1024,240,3.04,14.34,6.9
275
+ visformer_small,2818.88,363.251,1024,224,4.88,11.43,40.22
276
+ vit_relpos_small_patch16_224,2792.87,366.637,1024,224,4.59,13.05,21.98
277
+ vit_relpos_base_patch32_plus_rpn_256,2784.26,367.771,1024,256,7.68,8.01,119.42
278
+ vit_srelpos_small_patch16_224,2781.72,368.106,1024,224,4.59,12.16,21.97
279
+ resnest26d,2772.97,369.267,1024,224,3.64,9.97,17.07
280
+ cs3darknet_focus_l,2770.5,369.596,1024,288,5.9,10.16,21.15
281
+ efficientnet_b2a,2767.64,369.979,1024,288,1.12,16.2,9.11
282
+ efficientnet_b2,2766.98,370.065,1024,288,1.12,16.2,9.11
283
+ ese_vovnet39b,2760.12,370.986,1024,224,7.09,6.74,24.57
284
+ legacy_seresnet50,2753.49,371.881,1024,224,3.88,10.6,28.09
285
+ densenet121,2749.79,372.378,1024,224,2.87,6.9,7.98
286
+ tv_densenet121,2747.16,372.735,1024,224,2.87,6.9,7.98
287
+ eca_vovnet39b,2736.53,374.185,1024,224,7.09,6.74,22.6
288
+ coatnet_nano_cc_224,2716.19,376.986,1024,224,2.24,15.02,13.76
289
+ convnextv2_femto,2710.95,377.714,1024,288,1.3,7.56,5.23
290
+ resnetv2_50x1_bit_distilled,2704.93,378.554,1024,224,4.23,11.11,25.55
291
+ selecsls84,2697.2,379.64,1024,224,5.9,7.57,50.95
292
+ flexivit_small,2693.55,380.153,1024,240,5.35,14.18,22.06
293
+ twins_svt_small,2691.25,380.48,1024,224,2.94,13.75,24.06
294
+ mixnet_l,2678.25,382.327,1024,224,0.58,10.84,7.33
295
+ seresnet50,2674.61,382.848,1024,224,4.11,11.13,28.09
296
+ xcit_nano_12_p16_384_dist,2668.39,383.74,1024,384,1.64,12.15,3.05
297
+ cs3darknet_l,2649.93,386.412,1024,288,6.16,10.83,21.16
298
+ coatnet_nano_rw_224,2633.36,388.844,1024,224,2.41,15.41,15.14
299
+ coatnext_nano_rw_224,2627.24,389.75,1024,224,2.47,12.8,14.7
300
+ xcit_tiny_24_p16_224_dist,2617.14,391.253,1024,224,2.34,11.82,12.12
301
+ densenet121d,2616.98,391.278,1024,224,3.11,7.7,8.0
302
+ xcit_tiny_24_p16_224,2614.91,391.584,1024,224,2.34,11.82,12.12
303
+ resnet50_gn,2599.07,393.975,1024,224,4.14,11.11,25.56
304
+ vit_relpos_small_patch16_rpn_224,2596.73,394.33,1024,224,4.59,13.05,21.97
305
+ res2net50_48w_2s,2593.21,394.865,1024,224,4.18,11.72,25.29
306
+ mobilevit_s,2587.93,296.749,768,256,2.03,19.94,5.58
307
+ convnext_nano,2579.36,396.983,1024,288,4.06,13.84,15.59
308
+ tf_mixnet_l,2577.4,397.288,1024,224,0.58,10.84,7.33
309
+ resnetaa50d,2573.35,397.912,1024,224,5.39,12.44,25.58
310
+ vgg11_bn,2556.04,400.607,1024,224,7.62,7.44,132.87
311
+ seresnet50t,2550.33,401.504,1024,224,4.32,11.83,28.1
312
+ ecaresnet50d,2544.16,402.478,1024,224,4.35,11.93,25.58
313
+ gcvit_xxtiny,2518.13,406.639,1024,224,2.14,15.36,12.0
314
+ cs3sedarknet_l,2502.51,409.176,1024,288,6.16,10.83,21.91
315
+ resnetrs50,2497.73,409.96,1024,224,4.48,12.14,35.69
316
+ mobilevitv2_125,2489.87,308.438,768,256,2.86,20.1,7.48
317
+ resnetblur50,2484.87,412.08,1024,224,5.16,12.02,25.56
318
+ cspresnext50,2483.24,412.352,1024,256,4.05,15.86,20.57
319
+ gluon_resnet50_v1s,2459.02,416.413,1024,224,5.47,13.52,25.68
320
+ efficientnet_cc_b1_8e,2458.85,416.443,1024,240,0.75,15.44,39.72
321
+ vit_base_resnet26d_224,2458.01,416.584,1024,224,6.97,13.16,101.4
322
+ densenetblur121d,2444.58,418.873,1024,224,3.11,7.9,8.0
323
+ tv_resnext50_32x4d,2431.41,421.143,1024,224,4.26,14.4,25.03
324
+ ssl_resnext50_32x4d,2431.35,421.155,1024,224,4.26,14.4,25.03
325
+ swsl_resnext50_32x4d,2430.87,421.236,1024,224,4.26,14.4,25.03
326
+ resnext50_32x4d,2429.56,421.462,1024,224,4.26,14.4,25.03
327
+ gluon_resnext50_32x4d,2428.35,421.674,1024,224,4.26,14.4,25.03
328
+ dla60x,2414.82,424.035,1024,224,3.54,13.8,17.35
329
+ efficientnet_lite3,2407.43,212.664,512,300,1.65,21.85,8.2
330
+ regnetx_040,2406.98,425.416,1024,224,3.99,12.2,22.12
331
+ semobilevit_s,2404.63,319.371,768,256,2.03,19.95,5.74
332
+ gcresnext50ts,2402.57,426.196,1024,256,3.75,15.46,15.67
333
+ regnety_040s_gn,2385.11,429.317,1024,224,4.03,12.29,20.65
334
+ resnetblur50d,2367.52,432.507,1024,224,5.4,12.82,25.58
335
+ vovnet57a,2360.79,433.737,1024,224,8.95,7.52,36.64
336
+ tf_efficientnet_cc_b1_8e,2357.71,434.307,1024,240,0.75,15.44,39.72
337
+ resmlp_24_distilled_224,2351.85,435.39,1024,224,5.96,10.91,30.02
338
+ resmlp_24_224,2345.81,436.509,1024,224,5.96,10.91,30.02
339
+ res2net50_14w_8s,2341.48,437.317,1024,224,4.21,13.28,25.06
340
+ coatnet_rmlp_nano_rw_224,2340.53,437.494,1024,224,2.62,20.34,15.15
341
+ sehalonet33ts,2339.44,328.271,768,256,3.55,14.7,13.69
342
+ res2net50_26w_4s,2338.49,437.876,1024,224,4.28,12.61,25.7
343
+ convnext_nano_ols,2328.37,439.779,1024,288,4.38,15.5,15.65
344
+ lambda_resnet26rpt_256,2324.88,165.158,384,256,3.16,11.87,10.99
345
+ gmixer_24_224,2324.82,440.451,1024,224,5.28,14.45,24.72
346
+ gcresnet50t,2321.78,441.028,1024,256,5.42,14.67,25.9
347
+ resnext50d_32x4d,2317.05,441.929,1024,224,4.5,15.2,25.05
348
+ resnest50d_1s4x24d,2309.9,443.296,1024,224,4.43,13.57,25.68
349
+ seresnetaa50d,2309.78,443.319,1024,224,5.4,12.46,28.11
350
+ dla60_res2net,2301.91,444.834,1024,224,4.15,12.34,20.85
351
+ vit_base_r26_s32_224,2301.77,444.864,1024,224,6.81,12.36,101.38
352
+ twins_pcpvt_small,2290.09,447.132,1024,224,3.83,18.08,24.11
353
+ regnetz_b16,2286.62,447.81,1024,288,2.39,16.43,9.72
354
+ ese_vovnet57b,2267.23,451.64,1024,224,8.95,7.52,38.61
355
+ gluon_inception_v3,2265.31,452.024,1024,299,5.73,8.97,23.83
356
+ inception_v3,2260.97,452.888,1024,299,5.73,8.97,23.83
357
+ adv_inception_v3,2258.89,453.305,1024,299,5.73,8.97,23.83
358
+ tf_inception_v3,2255.73,453.943,1024,299,5.73,8.97,23.83
359
+ densenet169,2232.91,458.582,1024,224,3.4,7.3,14.15
360
+ tf_efficientnetv2_b3,2223.64,460.493,1024,300,3.04,15.74,14.36
361
+ nf_ecaresnet50,2211.52,463.019,1024,224,4.21,11.13,25.56
362
+ nf_seresnet50,2207.21,463.921,1024,224,4.21,11.13,28.09
363
+ skresnet50,2206.75,464.017,1024,224,4.11,12.5,25.8
364
+ edgenext_small,2206.31,464.109,1024,320,1.97,14.16,5.59
365
+ seresnext50_32x4d,2197.09,466.058,1024,224,4.26,14.42,27.56
366
+ gluon_seresnext50_32x4d,2196.94,466.091,1024,224,4.26,14.42,27.56
367
+ xcit_small_12_p16_224_dist,2195.81,466.33,1024,224,4.82,12.58,26.25
368
+ legacy_seresnext50_32x4d,2193.34,466.856,1024,224,4.26,14.42,27.56
369
+ xcit_small_12_p16_224,2190.16,467.534,1024,224,4.82,12.58,26.25
370
+ repvgg_b1g4,2188.83,467.817,1024,224,8.15,10.64,39.97
371
+ tf_efficientnet_lite3,2188.37,233.953,512,300,1.65,21.85,8.2
372
+ efficientnetv2_rw_t,2170.03,471.87,1024,288,3.19,16.42,13.65
373
+ gmlp_s16_224,2164.56,473.061,1024,224,4.42,15.1,19.42
374
+ dla60_res2next,2126.26,481.583,1024,224,3.49,13.17,17.03
375
+ gc_efficientnetv2_rw_t,2126.09,481.621,1024,288,3.2,16.45,13.68
376
+ skresnet50d,2112.57,484.703,1024,224,4.36,13.31,25.82
377
+ mobilevitv2_150,2105.0,243.219,512,256,4.09,24.11,10.59
378
+ mobilevitv2_150_in22ft1k,2104.51,243.274,512,256,4.09,24.11,10.59
379
+ convnextv2_pico,2092.16,489.434,1024,288,2.27,10.08,9.07
380
+ poolformer_s24,2090.38,489.851,1024,224,3.41,10.68,21.39
381
+ cs3sedarknet_xdw,2090.04,489.929,1024,256,5.97,17.18,21.6
382
+ res2next50,2085.23,491.055,1024,224,4.2,13.71,24.67
383
+ cspdarknet53,2084.51,491.231,1024,256,6.57,16.81,27.64
384
+ fbnetv3_g,2084.48,491.238,1024,288,1.77,21.09,16.62
385
+ crossvit_small_240,2074.04,493.709,1024,240,5.63,18.17,26.86
386
+ deit3_medium_patch16_224_in21ft1k,2064.27,496.046,1024,224,8.0,15.93,38.85
387
+ deit3_medium_patch16_224,2063.34,496.268,1024,224,8.0,15.93,38.85
388
+ xcit_nano_12_p8_224_dist,2049.01,499.742,1024,224,2.16,15.71,3.05
389
+ xcit_nano_12_p8_224,2044.48,500.848,1024,224,2.16,15.71,3.05
390
+ nf_regnet_b3,2035.39,503.085,1024,320,2.05,14.61,18.59
391
+ cs3darknet_focus_x,2017.73,507.488,1024,256,8.03,10.69,35.02
392
+ vit_relpos_medium_patch16_cls_224,2000.38,511.89,1024,224,8.03,18.24,38.76
393
+ lambda_resnet50ts,1991.21,514.246,1024,256,5.07,17.48,21.54
394
+ swin_tiny_patch4_window7_224,1978.72,517.495,1024,224,4.51,17.06,28.29
395
+ sebotnet33ts_256,1959.75,195.932,384,256,3.89,17.46,13.7
396
+ coatnet_0_rw_224,1957.32,523.148,1024,224,4.43,18.73,27.44
397
+ ecaresnet26t,1953.32,524.224,1024,320,5.24,16.44,16.01
398
+ regnetx_080,1942.5,527.144,1024,224,8.02,14.06,39.57
399
+ gcvit_xtiny,1941.57,527.393,1024,224,2.93,20.26,19.98
400
+ resnetv2_101,1925.46,531.806,1024,224,7.83,16.23,44.54
401
+ regnetx_064,1920.06,533.303,1024,224,6.49,16.37,26.21
402
+ mixnet_xl,1918.85,533.64,1024,224,0.93,14.57,11.9
403
+ edgenext_small_rw,1912.9,535.3,1024,320,2.46,14.85,7.83
404
+ vit_relpos_medium_patch16_224,1907.96,536.687,1024,224,7.97,17.02,38.75
405
+ vit_srelpos_medium_patch16_224,1900.57,538.773,1024,224,7.96,16.21,38.74
406
+ resnest50d,1896.74,539.858,1024,224,5.4,14.36,27.48
407
+ crossvit_15_240,1894.86,540.397,1024,240,5.81,19.77,27.53
408
+ vit_base_resnet50d_224,1892.78,540.989,1024,224,8.73,16.92,110.97
409
+ gluon_resnet101_v1b,1879.26,544.883,1024,224,7.83,16.23,44.55
410
+ tv_resnet101,1878.26,545.172,1024,224,7.83,16.23,44.55
411
+ resnet101,1875.25,546.047,1024,224,7.83,16.23,44.55
412
+ dla102,1873.79,546.472,1024,224,7.19,14.18,33.27
413
+ efficientformer_l3,1868.08,548.142,1024,224,3.93,12.01,31.41
414
+ maxvit_rmlp_pico_rw_256,1866.73,411.402,768,256,1.85,24.86,7.52
415
+ resnetv2_101d,1855.94,551.727,1024,224,8.07,17.04,44.56
416
+ pvt_v2_b2,1835.92,557.745,1024,224,4.05,27.53,25.36
417
+ maxvit_pico_rw_256,1829.44,419.787,768,256,1.83,22.3,7.46
418
+ vgg13,1820.36,562.512,1024,224,11.31,12.25,133.05
419
+ lamhalobotnet50ts_256,1818.57,563.067,1024,256,5.02,18.44,22.57
420
+ crossvit_15_dagger_240,1817.96,563.255,1024,240,6.13,20.43,28.21
421
+ gluon_resnet101_v1c,1816.14,563.82,1024,224,8.08,17.04,44.57
422
+ res2net50_26w_6s,1811.81,565.168,1024,224,6.33,15.28,37.05
423
+ gluon_resnet101_v1d,1808.21,566.295,1024,224,8.08,17.04,44.57
424
+ swin_s3_tiny_224,1803.67,567.72,1024,224,4.64,19.13,28.33
425
+ coatnet_rmlp_0_rw_224,1803.63,567.733,1024,224,4.72,24.89,27.45
426
+ vit_relpos_medium_patch16_rpn_224,1770.72,578.284,1024,224,7.97,17.02,38.73
427
+ halonet50ts,1765.73,579.917,1024,256,5.3,19.2,22.73
428
+ repvgg_b1,1760.92,581.5,1024,224,13.16,10.64,57.42
429
+ coatnet_bn_0_rw_224,1753.99,583.799,1024,224,4.67,22.04,27.44
430
+ wide_resnet50_2,1747.87,585.844,1024,224,11.43,14.4,68.88
431
+ efficientnet_b3,1741.21,294.036,512,320,2.01,26.52,12.23
432
+ efficientnet_b3a,1740.84,294.1,512,320,2.01,26.52,12.23
433
+ densenet201,1738.22,589.096,1024,224,4.34,7.85,20.01
434
+ coatnet_0_224,1727.45,296.376,512,224,4.58,24.01,25.04
435
+ darknetaa53,1721.33,594.876,1024,288,10.08,15.68,36.02
436
+ tf_efficientnet_b3,1720.61,297.558,512,300,1.87,23.83,12.23
437
+ cait_xxs24_224,1720.1,595.301,1024,224,2.53,20.29,11.96
438
+ vit_large_patch32_224,1718.53,595.845,1024,224,15.41,13.32,327.9
439
+ mobilevitv2_175,1697.71,301.572,512,256,5.54,28.13,14.25
440
+ mobilevitv2_175_in22ft1k,1697.51,301.606,512,256,5.54,28.13,14.25
441
+ xcit_tiny_12_p16_384_dist,1694.92,604.145,1024,384,3.64,18.26,6.72
442
+ pvt_v2_b2_li,1694.45,604.311,1024,224,3.91,27.6,22.55
443
+ coat_lite_small,1694.41,604.328,1024,224,3.96,22.09,19.84
444
+ resnetaa101d,1692.59,604.976,1024,224,9.12,17.56,44.57
445
+ legacy_seresnet101,1686.93,607.005,1024,224,7.61,15.74,49.33
446
+ tresnet_v2_l,1685.52,607.515,1024,224,8.81,16.34,46.17
447
+ hrnet_w18,1679.12,609.832,1024,224,4.32,16.31,21.3
448
+ vit_medium_patch16_gap_240,1667.0,614.264,1024,240,9.22,18.81,44.4
449
+ vit_tiny_patch16_384,1660.88,616.528,1024,384,4.7,25.39,5.79
450
+ regnetv_040,1659.81,616.926,1024,288,6.6,20.3,20.64
451
+ convnext_tiny_hnf,1659.73,616.951,1024,288,7.39,22.21,28.59
452
+ seresnet101,1655.13,618.666,1024,224,7.84,16.27,49.33
453
+ vit_base_patch32_384,1651.29,620.109,1024,384,13.06,16.5,88.3
454
+ vit_base_patch32_clip_384,1649.72,620.7,1024,384,13.06,16.5,88.3
455
+ regnety_040,1647.66,621.47,1024,288,6.61,20.3,20.65
456
+ regnety_032,1645.25,622.383,1024,288,5.29,18.61,19.44
457
+ gluon_resnet101_v1s,1642.29,623.505,1024,224,9.19,18.64,44.67
458
+ vgg13_bn,1634.19,626.596,1024,224,11.33,12.25,133.05
459
+ resnetaa50,1631.05,627.803,1024,288,8.52,19.24,25.56
460
+ mixer_b16_224_miil,1628.71,628.706,1024,224,12.62,14.53,59.88
461
+ mixer_b16_224,1627.79,629.061,1024,224,12.62,14.53,59.88
462
+ convnext_tiny,1626.95,629.384,1024,288,7.39,22.21,28.59
463
+ nf_resnet101,1620.77,631.785,1024,224,8.01,16.23,44.55
464
+ swinv2_cr_tiny_224,1618.15,632.807,1024,224,4.66,28.45,28.33
465
+ ecaresnet101d,1609.33,636.276,1024,224,8.08,17.07,44.57
466
+ twins_pcpvt_base,1605.41,637.831,1024,224,6.68,25.25,43.83
467
+ dla102x,1601.78,639.274,1024,224,5.89,19.42,26.31
468
+ ese_vovnet39b_evos,1601.47,639.4,1024,224,7.07,6.74,24.58
469
+ darknet53,1597.03,641.177,1024,288,11.78,15.68,41.61
470
+ resnetblur101d,1596.24,641.494,1024,224,9.12,17.94,44.57
471
+ resnet51q,1592.08,643.172,1024,288,8.07,20.94,35.7
472
+ swinv2_cr_tiny_ns_224,1591.39,643.448,1024,224,4.66,28.45,28.33
473
+ mixer_l32_224,1583.03,646.85,1024,224,11.27,19.86,206.94
474
+ resmlp_36_distilled_224,1577.86,648.967,1024,224,8.91,16.33,44.69
475
+ resmlp_36_224,1577.4,649.158,1024,224,8.91,16.33,44.69
476
+ resnetv2_50d_gn,1561.87,655.61,1024,288,7.24,19.7,25.57
477
+ botnet50ts_256,1556.81,246.643,384,256,5.54,22.23,22.74
478
+ nf_resnet50,1548.83,661.132,1024,288,6.88,18.37,25.56
479
+ resnetv2_50d_frn,1547.35,661.764,1024,224,4.33,11.92,25.59
480
+ halo2botnet50ts_256,1546.64,496.545,768,256,5.02,21.78,22.64
481
+ mvitv2_tiny,1534.63,667.247,1024,224,4.7,21.16,24.17
482
+ gluon_resnext101_32x4d,1505.04,680.366,1024,224,8.01,21.23,44.18
483
+ swsl_resnext101_32x4d,1504.46,680.63,1024,224,8.01,21.23,44.18
484
+ cs3darknet_x,1504.38,680.665,1024,288,10.6,14.36,35.05
485
+ ssl_resnext101_32x4d,1503.93,680.869,1024,224,8.01,21.23,44.18
486
+ resnext101_32x4d,1503.63,681.005,1024,224,8.01,21.23,44.18
487
+ resnest50d_4s2x40d,1497.58,683.755,1024,224,4.4,17.94,30.42
488
+ convnextv2_nano,1488.75,515.858,768,288,4.06,13.84,15.62
489
+ skresnext50_32x4d,1478.83,692.427,1024,224,4.5,17.18,27.48
490
+ mobilevitv2_200,1478.44,519.454,768,256,7.22,32.15,18.45
491
+ tresnet_l,1477.44,693.076,1024,224,10.88,11.9,55.99
492
+ mobilevitv2_200_in22ft1k,1477.37,519.83,768,256,7.22,32.15,18.45
493
+ vgg16,1475.59,693.946,1024,224,15.47,13.56,138.36
494
+ regnetz_c16,1475.58,693.953,1024,320,3.92,25.88,13.46
495
+ resnetv2_50d_evob,1468.61,697.244,1024,224,4.33,11.92,25.59
496
+ vit_medium_patch16_gap_256,1467.03,697.996,1024,256,10.59,22.15,38.86
497
+ res2net50_26w_8s,1466.52,698.239,1024,224,8.37,17.95,48.4
498
+ sequencer2d_s,1465.84,698.562,1024,224,4.96,11.31,27.65
499
+ eca_nfnet_l0,1461.61,700.586,1024,288,7.12,17.29,24.14
500
+ nfnet_l0,1460.27,701.228,1024,288,7.13,17.29,35.07
501
+ cs3sedarknet_x,1435.72,713.217,1024,288,10.6,14.37,35.4
502
+ resnet61q,1434.01,714.068,1024,288,9.87,21.52,36.85
503
+ res2net101_26w_4s,1424.71,718.728,1024,224,8.1,18.45,45.21
504
+ repvgg_b2g4,1415.15,723.581,1024,224,12.63,12.9,61.76
505
+ nest_tiny,1413.2,543.434,768,224,5.83,25.48,17.06
506
+ poolformer_s36,1408.65,726.922,1024,224,5.0,15.82,30.86
507
+ maxvit_rmlp_nano_rw_256,1404.06,546.971,768,256,4.47,31.92,15.5
508
+ convit_small,1397.72,732.608,1024,224,5.76,17.87,27.78
509
+ jx_nest_tiny,1387.89,553.347,768,224,5.83,25.48,17.06
510
+ maxvit_nano_rw_256,1378.18,557.246,768,256,4.46,30.28,15.45
511
+ nf_ecaresnet101,1373.28,745.649,1024,224,8.01,16.27,44.55
512
+ nf_seresnet101,1369.04,747.958,1024,224,8.02,16.27,49.33
513
+ gluon_seresnext101_32x4d,1358.35,753.84,1024,224,8.02,21.26,48.96
514
+ legacy_seresnext101_32x4d,1357.27,754.442,1024,224,8.02,21.26,48.96
515
+ efficientnet_b3_gn,1357.0,282.964,384,320,2.14,28.83,11.73
516
+ nfnet_f0,1356.65,754.786,1024,256,12.62,18.05,71.49
517
+ seresnext101_32x4d,1356.0,755.148,1024,224,8.02,21.26,48.96
518
+ resnetv2_152,1353.28,756.668,1024,224,11.55,22.56,60.19
519
+ xception,1353.17,567.542,768,299,8.4,35.83,22.86
520
+ twins_svt_base,1350.54,758.199,1024,224,8.59,26.33,56.07
521
+ crossvit_18_240,1343.82,761.996,1024,240,9.05,26.26,43.27
522
+ ese_vovnet99b_iabn,1343.72,762.049,1024,224,16.49,11.27,63.2
523
+ maxxvit_rmlp_nano_rw_256,1341.45,763.341,1024,256,4.37,26.05,16.78
524
+ regnetx_120,1339.05,764.708,1024,224,12.13,21.37,46.11
525
+ vgg16_bn,1336.79,765.998,1024,224,15.5,13.56,138.37
526
+ dpn92,1330.6,769.562,1024,224,6.54,18.21,37.67
527
+ tv_resnet152,1329.75,770.054,1024,224,11.56,22.56,60.19
528
+ gcvit_tiny,1328.61,770.718,1024,224,4.79,29.82,28.22
529
+ gluon_resnet152_v1b,1328.2,770.954,1024,224,11.56,22.56,60.19
530
+ resnet152,1327.13,771.578,1024,224,11.56,22.56,60.19
531
+ ese_vovnet99b,1316.93,777.554,1024,224,16.51,11.27,63.2
532
+ pvt_v2_b3,1316.31,777.917,1024,224,6.92,37.7,45.24
533
+ xcit_tiny_12_p8_224_dist,1300.55,787.348,1024,224,4.81,23.6,6.71
534
+ xcit_tiny_12_p8_224,1299.96,787.704,1024,224,4.81,23.6,6.71
535
+ crossvit_18_dagger_240,1298.96,788.312,1024,240,9.5,27.03,44.27
536
+ hrnet_w32,1297.82,789.002,1024,224,8.97,22.02,41.23
537
+ gluon_resnet152_v1c,1296.47,789.825,1024,224,11.8,23.36,60.21
538
+ resnetv2_152d,1296.37,789.881,1024,224,11.8,23.36,60.2
539
+ gluon_resnet152_v1d,1293.21,791.811,1024,224,11.8,23.36,60.21
540
+ vit_small_resnet50d_s16_224,1288.35,794.801,1024,224,13.48,24.82,57.53
541
+ cs3edgenet_x,1281.15,799.266,1024,288,14.59,16.36,47.82
542
+ edgenext_base,1272.74,804.548,1024,320,6.01,24.32,18.51
543
+ regnety_120,1268.38,807.318,1024,224,12.14,21.38,51.82
544
+ dla169,1258.34,813.753,1024,224,11.6,20.2,53.39
545
+ hrnet_w30,1252.2,817.74,1024,224,8.15,21.21,37.71
546
+ xception41p,1249.06,409.896,512,299,9.25,39.86,26.91
547
+ maxxvitv2_nano_rw_256,1248.81,819.967,1024,256,6.26,23.05,23.7
548
+ ecaresnet50t,1243.91,823.198,1024,320,8.82,24.13,25.57
549
+ vgg19,1237.03,827.774,1024,224,19.63,14.86,143.67
550
+ swin_small_patch4_window7_224,1228.67,833.406,1024,224,8.77,27.47,49.61
551
+ efficientnet_el_pruned,1220.93,838.69,1024,300,8.0,30.7,10.59
552
+ densenet161,1220.41,839.05,1024,224,7.79,11.06,28.68
553
+ efficientnet_el,1218.76,840.187,1024,300,8.0,30.7,10.59
554
+ deit_base_distilled_patch16_224,1211.4,845.292,1024,224,17.68,24.05,87.34
555
+ vit_base_patch16_224,1209.0,846.969,1024,224,17.58,23.9,86.57
556
+ vit_base_patch16_224_miil,1208.72,847.163,1024,224,17.59,23.91,94.4
557
+ deit_base_patch16_224,1208.56,847.275,1024,224,17.58,23.9,86.57
558
+ vit_base_patch16_clip_224,1205.77,849.236,1024,224,17.58,23.9,86.57
559
+ gluon_resnet152_v1s,1205.41,849.488,1024,224,12.92,24.96,60.32
560
+ coatnet_rmlp_1_rw_224,1201.89,851.979,1024,224,7.85,35.47,41.69
561
+ maxvit_tiny_rw_224,1200.3,853.107,1024,224,5.11,33.11,29.06
562
+ mixnet_xxl,1193.04,643.721,768,224,2.04,23.43,23.96
563
+ tf_efficientnet_el,1192.11,858.967,1024,300,8.0,30.7,10.59
564
+ swinv2_tiny_window8_256,1191.01,859.761,1024,256,5.96,24.57,28.35
565
+ volo_d1_224,1190.57,860.079,1024,224,6.94,24.43,26.63
566
+ repvgg_b2,1183.91,864.916,1024,224,20.45,12.9,89.02
567
+ legacy_seresnet152,1181.09,866.978,1024,224,11.33,22.08,66.82
568
+ xcit_small_24_p16_224_dist,1175.31,871.245,1024,224,9.1,23.64,47.67
569
+ xcit_small_24_p16_224,1174.76,871.656,1024,224,9.1,23.64,47.67
570
+ inception_v4,1168.76,876.127,1024,299,12.28,15.09,42.68
571
+ seresnet152,1166.02,878.19,1024,224,11.57,22.61,66.82
572
+ twins_pcpvt_large,1163.18,880.331,1024,224,9.84,35.82,60.99
573
+ deit3_base_patch16_224,1159.4,883.201,1024,224,17.58,23.9,86.59
574
+ deit3_base_patch16_224_in21ft1k,1159.14,883.404,1024,224,17.58,23.9,86.59
575
+ cait_xxs36_224,1156.4,885.493,1024,224,3.77,30.34,17.3
576
+ vit_base_patch32_clip_448,1154.9,886.645,1024,448,17.93,23.9,88.34
577
+ regnetx_160,1153.07,888.048,1024,224,15.99,25.52,54.28
578
+ dm_nfnet_f0,1152.75,888.293,1024,256,12.62,18.05,71.49
579
+ sequencer2d_m,1147.71,892.201,1024,224,6.55,14.26,38.31
580
+ repvgg_b3g4,1145.87,893.631,1024,224,17.89,15.1,83.83
581
+ mvitv2_small_cls,1144.7,894.542,1024,224,7.04,28.17,34.87
582
+ mvitv2_small,1143.83,895.224,1024,224,7.0,28.08,34.87
583
+ efficientnet_lite4,1139.64,336.935,384,380,4.04,45.66,13.01
584
+ tnt_s_patch16_224,1135.12,902.091,1024,224,5.24,24.37,23.76
585
+ convmixer_1024_20_ks9_p14,1130.85,905.497,1024,224,5.55,5.51,24.38
586
+ vgg19_bn,1127.16,908.464,1024,224,19.66,14.86,143.68
587
+ vit_relpos_base_patch16_clsgap_224,1124.58,910.547,1024,224,17.6,25.12,86.43
588
+ vit_relpos_base_patch16_cls_224,1122.76,912.026,1024,224,17.6,25.12,86.43
589
+ coatnet_rmlp_1_rw2_224,1119.61,914.591,1024,224,8.11,40.13,41.72
590
+ beit_base_patch16_224,1109.32,923.073,1024,224,17.58,23.9,86.53
591
+ xception41,1107.6,462.251,512,299,9.28,39.86,26.97
592
+ tresnet_xl,1106.51,925.423,1024,224,15.17,15.34,78.44
593
+ beitv2_base_patch16_224,1106.05,925.798,1024,224,17.58,23.9,86.53
594
+ coat_tiny,1099.16,931.604,1024,224,4.35,27.2,5.5
595
+ vit_base_patch16_gap_224,1085.51,943.323,1024,224,17.49,25.59,86.57
596
+ maxvit_tiny_tf_224,1081.57,710.062,768,224,5.6,35.78,30.92
597
+ vit_relpos_base_patch16_224,1078.21,949.713,1024,224,17.51,24.97,86.43
598
+ nf_regnet_b4,1075.82,951.823,1024,384,4.7,28.61,30.21
599
+ coatnet_1_rw_224,1074.48,953.005,1024,224,8.04,34.6,41.72
600
+ dla102x2,1070.83,956.252,1024,224,9.34,29.91,41.28
601
+ pit_b_224,1066.8,479.928,512,224,12.42,32.94,73.76
602
+ pit_b_distilled_224,1063.31,481.504,512,224,12.5,33.07,74.79
603
+ tf_efficientnet_lite4,1058.68,362.703,384,380,4.04,45.66,13.01
604
+ efficientnetv2_s,1057.28,968.508,1024,384,8.44,35.77,21.46
605
+ vit_large_r50_s32_224,1034.79,989.556,1024,224,19.58,24.41,328.99
606
+ vit_small_patch16_36x1_224,1032.1,992.142,1024,224,13.71,35.69,64.67
607
+ efficientnet_b3_g8_gn,1031.26,496.465,512,320,3.2,28.83,14.25
608
+ tf_efficientnetv2_s,1029.13,995.002,1024,384,8.44,35.77,21.46
609
+ flexivit_base,1028.55,995.558,1024,240,20.29,28.36,86.59
610
+ vit_base_patch16_rpn_224,1016.66,1007.208,1024,224,17.49,23.75,86.54
611
+ vit_small_r26_s32_384,1011.11,1012.73,1024,384,10.43,29.85,36.47
612
+ vit_small_patch16_18x2_224,1005.34,1018.547,1024,224,13.71,35.69,64.67
613
+ swinv2_cr_small_224,1000.71,1023.259,1024,224,9.07,50.27,49.7
614
+ efficientnetv2_rw_s,995.91,1028.19,1024,384,8.72,38.03,23.94
615
+ wide_resnet101_2,995.32,1028.801,1024,224,22.8,21.23,126.89
616
+ swinv2_cr_small_ns_224,989.25,1035.114,1024,224,9.08,50.27,49.7
617
+ vit_relpos_base_patch16_rpn_224,986.84,1037.641,1024,224,17.51,24.97,86.41
618
+ coatnet_1_224,984.69,519.944,512,224,8.7,39.0,42.23
619
+ resnet200,983.36,1041.314,1024,224,15.07,32.19,64.67
620
+ dpn98,982.09,1042.657,1024,224,11.73,25.2,61.57
621
+ convnext_small,981.97,1042.782,1024,288,14.39,35.65,50.22
622
+ cs3se_edgenet_x,975.89,1049.279,1024,320,18.01,20.21,50.72
623
+ regnety_080,969.67,1056.01,1024,288,13.22,29.69,39.18
624
+ poolformer_m36,966.97,1058.965,1024,224,8.8,22.02,56.17
625
+ resnest101e,963.69,1062.57,1024,256,13.38,28.66,48.28
626
+ regnetz_b16_evos,955.65,803.632,768,288,2.36,16.43,9.74
627
+ twins_svt_large,954.95,1072.291,1024,224,15.15,35.1,99.27
628
+ pvt_v2_b4,952.02,1075.594,1024,224,10.14,53.74,62.56
629
+ gluon_resnext101_64x4d,944.48,1084.183,1024,224,15.52,31.21,83.46
630
+ regnetv_064,944.32,1084.367,1024,288,10.55,27.11,30.58
631
+ regnety_064,944.18,1084.526,1024,288,10.56,27.11,30.58
632
+ maxvit_rmlp_tiny_rw_256,941.64,815.588,768,256,6.77,46.92,29.15
633
+ regnetz_d8,936.16,1093.814,1024,320,6.19,37.08,23.37
634
+ resnetrs101,936.12,1093.858,1024,288,13.56,28.53,63.62
635
+ regnetz_d32,933.58,1096.833,1024,320,9.33,37.08,27.58
636
+ ig_resnext101_32x8d,930.9,1099.997,1024,224,16.48,31.21,88.79
637
+ swsl_resnext101_32x8d,930.28,1100.725,1024,224,16.48,31.21,88.79
638
+ resnext101_32x8d,929.98,1101.084,1024,224,16.48,31.21,88.79
639
+ ssl_resnext101_32x8d,929.0,1102.24,1024,224,16.48,31.21,88.79
640
+ convnextv2_tiny,925.13,553.423,512,288,7.39,22.21,28.64
641
+ convnextv2_small,924.53,1107.57,1024,224,8.71,21.56,50.32
642
+ maxvit_tiny_rw_256,921.72,833.209,768,256,6.74,44.35,29.07
643
+ inception_resnet_v2,917.69,1115.834,1024,299,13.18,25.06,55.84
644
+ ens_adv_inception_resnet_v2,917.66,1115.871,1024,299,13.18,25.06,55.84
645
+ maxxvit_rmlp_tiny_rw_256,914.74,1119.428,1024,256,6.66,39.76,29.64
646
+ xcit_tiny_24_p16_384_dist,912.61,1122.045,1024,384,6.87,34.29,12.12
647
+ cait_s24_224,908.65,1126.929,1024,224,9.35,40.58,46.92
648
+ pvt_v2_b5,904.89,1131.615,1024,224,11.76,50.92,81.96
649
+ nest_small,902.63,850.834,768,224,10.35,40.04,38.35
650
+ repvgg_b3,901.73,1135.583,1024,224,29.16,15.1,123.09
651
+ maxvit_tiny_pm_256,896.67,1141.994,1024,256,6.61,47.9,30.09
652
+ xception65p,896.53,571.079,512,299,13.91,52.48,39.82
653
+ swin_s3_small_224,896.35,856.792,768,224,9.43,37.84,49.74
654
+ jx_nest_small,892.32,860.663,768,224,10.35,40.04,38.35
655
+ efficientnet_b4,890.89,431.018,384,384,4.51,50.04,19.34
656
+ gmlp_b16_224,885.75,1156.072,1024,224,15.78,30.21,73.08
657
+ gluon_seresnext101_64x4d,885.23,1156.747,1024,224,15.53,31.25,88.23
658
+ hrnet_w40,881.9,1161.12,1024,224,12.75,25.29,57.56
659
+ efficientformer_l7,877.43,1167.027,1024,224,10.17,24.45,82.23
660
+ coat_mini,874.29,1171.227,1024,224,6.82,33.68,10.34
661
+ resnet101d,871.81,1174.559,1024,320,16.48,34.77,44.57
662
+ swin_base_patch4_window7_224,870.1,1176.867,1024,224,15.47,36.63,87.77
663
+ regnetz_040,868.17,884.605,768,320,6.35,37.78,27.12
664
+ regnetz_040h,862.76,890.151,768,320,6.43,37.94,28.94
665
+ mobilevitv2_150_384_in22ft1k,848.7,301.627,256,384,9.2,54.25,10.59
666
+ resnetv2_50d_evos,844.34,909.573,768,288,7.15,19.7,25.59
667
+ tf_efficientnet_b4,838.16,458.136,384,380,4.49,49.49,19.34
668
+ crossvit_base_240,835.31,919.411,768,240,21.22,36.33,105.03
669
+ vit_base_r50_s16_224,821.15,1247.01,1024,224,21.67,35.31,114.69
670
+ xcit_medium_24_p16_224_dist,819.59,1249.397,1024,224,16.13,31.71,84.4
671
+ xcit_medium_24_p16_224,818.73,1250.697,1024,224,16.13,31.71,84.4
672
+ gcvit_small,807.46,1268.151,1024,224,8.57,41.61,51.09
673
+ gluon_xception65,806.21,635.055,512,299,13.96,52.48,39.92
674
+ xception65,800.01,639.983,512,299,13.96,52.48,39.92
675
+ mvitv2_base,799.31,1281.092,1024,224,10.16,40.5,51.47
676
+ hrnet_w44,789.29,1297.348,1024,224,14.94,26.92,67.06
677
+ vit_base_patch16_plus_240,780.68,1311.665,1024,240,27.41,33.08,117.56
678
+ hrnet_w48,780.39,1312.147,1024,224,17.34,28.56,77.47
679
+ swinv2_tiny_window16_256,778.19,657.926,512,256,6.68,39.02,28.35
680
+ tresnet_m_448,775.99,1319.596,1024,448,22.94,29.21,31.39
681
+ xcit_small_12_p16_384_dist,760.88,1345.804,1024,384,14.14,36.51,26.25
682
+ vit_small_patch16_384,750.95,1022.685,768,384,15.52,50.78,22.2
683
+ maxvit_rmlp_small_rw_224,745.49,1373.585,1024,224,10.75,49.3,64.9
684
+ sequencer2d_l,742.48,1379.149,1024,224,9.74,22.12,54.3
685
+ swinv2_small_window8_256,738.39,1386.788,1024,256,11.58,40.14,49.73
686
+ swin_s3_base_224,730.45,1401.854,1024,224,13.69,48.26,71.13
687
+ poolformer_m48,729.44,1403.808,1024,224,11.59,29.17,73.47
688
+ densenet264d_iabn,727.43,1407.671,1024,224,13.47,14.0,72.74
689
+ vit_relpos_base_patch16_plus_240,723.43,1415.468,1024,240,27.3,34.33,117.38
690
+ dpn131,722.72,1416.854,1024,224,16.09,32.97,79.25
691
+ tnt_b_patch16_224,722.12,1418.026,1024,224,14.09,39.01,65.41
692
+ deit3_small_patch16_384,717.36,1070.572,768,384,15.52,50.78,22.21
693
+ deit3_small_patch16_384_in21ft1k,716.76,1071.477,768,384,15.52,50.78,22.21
694
+ swinv2_cr_base_224,715.64,1430.874,1024,224,15.86,59.66,87.88
695
+ eca_nfnet_l1,713.15,1435.867,1024,320,14.92,34.42,41.41
696
+ coatnet_2_rw_224,709.88,721.237,512,224,15.09,49.22,73.87
697
+ swinv2_cr_base_ns_224,709.69,1442.871,1024,224,15.86,59.66,87.88
698
+ coatnet_rmlp_2_rw_224,708.85,722.285,512,224,15.18,54.78,73.88
699
+ convit_base,706.65,1449.076,1024,224,17.52,31.77,86.54
700
+ mobilevitv2_175_384_in22ft1k,703.41,363.928,256,384,12.47,63.29,14.25
701
+ maxvit_small_tf_224,701.58,729.767,512,224,11.66,53.17,68.93
702
+ densenet264,701.03,1460.686,1024,224,12.95,12.8,72.69
703
+ ecaresnet200d,694.19,1475.094,1024,256,20.0,43.15,64.69
704
+ resnetv2_50x1_bitm,691.29,740.624,512,448,16.62,44.46,25.55
705
+ seresnet200d,691.25,1481.355,1024,256,20.01,43.15,71.86
706
+ xcit_tiny_24_p8_224,684.73,1495.467,1024,224,9.21,45.39,12.11
707
+ xcit_tiny_24_p8_224_dist,684.22,1496.573,1024,224,9.21,45.39,12.11
708
+ convnext_base,682.42,1500.518,1024,288,25.43,47.53,88.59
709
+ volo_d2_224,663.51,1543.3,1024,224,14.34,41.34,58.68
710
+ coatnet_2_224,660.84,581.062,384,224,16.5,52.67,74.68
711
+ legacy_senet154,654.15,1565.387,1024,224,20.77,38.69,115.09
712
+ gluon_senet154,654.04,1565.641,1024,224,20.77,38.69,115.09
713
+ senet154,653.94,1565.866,1024,224,20.77,38.69,115.09
714
+ xcit_nano_12_p8_384_dist,646.53,1583.823,1024,384,6.34,46.08,3.05
715
+ dpn107,646.38,1584.202,1024,224,18.38,33.46,86.92
716
+ nest_base,640.55,799.298,512,224,17.96,53.39,67.72
717
+ jx_nest_base,633.53,808.151,512,224,17.96,53.39,67.72
718
+ mobilevitv2_200_384_in22ft1k,626.31,408.731,256,384,16.24,72.34,18.45
719
+ xception71,619.72,826.163,512,299,18.09,69.92,42.34
720
+ hrnet_w64,618.15,1656.539,1024,224,28.97,35.09,128.06
721
+ resnet152d,618.09,1656.699,1024,320,24.08,47.67,60.21
722
+ regnetz_c16_evos,604.19,847.399,512,320,3.86,25.88,13.49
723
+ gcvit_base,594.61,1722.135,1024,224,14.87,55.48,90.32
724
+ regnety_160,594.3,1292.258,768,288,26.37,38.07,83.59
725
+ maxxvit_rmlp_small_rw_256,588.15,1741.023,1024,256,14.67,58.38,66.01
726
+ xcit_small_12_p8_224,582.04,1759.324,1024,224,18.69,47.21,26.21
727
+ xcit_small_12_p8_224_dist,581.74,1760.224,1024,224,18.69,47.21,26.21
728
+ maxvit_rmlp_small_rw_256,575.72,1333.976,768,256,14.15,66.09,64.9
729
+ regnetx_320,551.07,1393.631,768,224,31.81,36.3,107.81
730
+ seresnet152d,547.51,1870.27,1024,320,24.09,47.72,66.84
731
+ resnetrs152,544.33,1881.196,1024,320,24.34,48.14,86.62
732
+ vit_large_patch32_384,543.23,1884.997,1024,384,45.31,43.86,306.63
733
+ halonet_h1,540.47,473.65,256,256,3.0,51.17,8.1
734
+ seresnet269d,540.42,1894.818,1024,256,26.59,53.6,113.67
735
+ swinv2_base_window8_256,529.22,1451.182,768,256,20.37,52.59,87.92
736
+ maxxvitv2_rmlp_base_rw_224,523.43,1956.308,1024,224,24.2,62.77,116.09
737
+ resnext101_64x4d,521.77,1962.525,1024,288,25.66,51.59,83.46
738
+ regnetz_e8,521.5,1472.647,768,320,15.46,63.94,57.7
739
+ mixer_l16_224,518.26,1975.807,1024,224,44.6,41.69,208.2
740
+ vit_medium_patch16_gap_384,508.63,1006.611,512,384,26.08,67.54,39.03
741
+ swin_large_patch4_window7_224,501.11,1532.586,768,224,34.53,54.94,196.53
742
+ regnety_320,490.98,2085.591,1024,224,32.34,30.26,145.05
743
+ swinv2_small_window16_256,487.64,1049.932,512,256,12.82,66.29,49.73
744
+ seresnext101_32x8d,483.23,2119.074,1024,288,27.24,51.63,93.57
745
+ vit_small_patch8_224,478.05,1071.009,512,224,22.44,80.84,21.67
746
+ ig_resnext101_32x16d,477.64,2143.862,1024,224,36.27,51.18,194.03
747
+ swsl_resnext101_32x16d,476.69,2148.145,1024,224,36.27,51.18,194.03
748
+ ssl_resnext101_32x16d,476.06,2150.954,1024,224,36.27,51.18,194.03
749
+ seresnext101d_32x8d,475.05,2155.547,1024,288,27.64,52.95,93.59
750
+ nf_regnet_b5,470.14,1089.029,512,456,11.7,61.95,49.74
751
+ xcit_large_24_p16_224_dist,468.86,2184.017,1024,224,35.86,47.27,189.1
752
+ xcit_large_24_p16_224,468.75,2184.529,1024,224,35.86,47.27,189.1
753
+ volo_d3_224,463.72,2208.199,1024,224,20.78,60.09,86.33
754
+ nfnet_f1,463.52,2209.163,1024,320,35.97,46.77,132.63
755
+ efficientnet_b5,460.91,555.412,256,448,9.59,93.56,30.39
756
+ resnet200d,453.15,2259.739,1024,320,31.25,67.33,64.69
757
+ efficientnetv2_m,451.89,2266.018,1024,416,18.6,67.5,54.14
758
+ seresnextaa101d_32x8d,447.26,2289.498,1024,288,28.51,56.44,93.59
759
+ efficientnetv2_rw_m,437.1,1757.005,768,416,21.49,79.62,53.24
760
+ swinv2_cr_large_224,422.08,1819.551,768,224,35.1,78.42,196.68
761
+ coatnet_rmlp_3_rw_224,421.87,910.226,384,224,33.56,79.47,165.15
762
+ xcit_tiny_12_p8_384_dist,421.04,2432.044,1024,384,14.13,69.14,6.71
763
+ swinv2_cr_tiny_384,419.77,609.847,256,384,15.34,161.01,28.33
764
+ maxvit_rmlp_base_rw_224,419.03,1832.808,768,224,23.15,92.64,116.14
765
+ resnetv2_152x2_bit_teacher,418.89,2444.553,1024,224,46.95,45.11,236.34
766
+ resnetv2_101x1_bitm,418.36,1223.813,512,448,31.65,64.93,44.54
767
+ dm_nfnet_f1,409.02,1877.643,768,320,35.97,46.77,132.63
768
+ xcit_small_24_p16_384_dist,407.47,2513.062,1024,384,26.72,68.58,47.67
769
+ coatnet_3_rw_224,404.39,633.033,256,224,33.44,73.83,181.81
770
+ tf_efficientnet_b5,403.59,634.298,256,456,10.46,98.86,30.39
771
+ convnextv2_base,402.92,1270.715,512,288,25.43,47.53,88.72
772
+ resnetrs200,396.11,2585.123,1024,320,31.51,67.81,93.21
773
+ tresnet_l_448,395.6,2588.481,1024,448,43.5,47.56,55.99
774
+ eva_large_patch14_196,391.22,2617.408,1024,196,61.57,63.52,304.14
775
+ vit_large_patch16_224,389.92,2626.132,1024,224,61.6,63.52,304.33
776
+ regnetz_d8_evos,389.86,1969.937,768,320,7.03,38.92,23.46
777
+ maxvit_base_tf_224,387.71,1320.545,512,224,24.04,95.01,119.47
778
+ coatnet_3_224,387.35,660.882,256,224,36.56,79.01,166.97
779
+ crossvit_15_dagger_408,386.57,662.227,256,408,21.45,95.05,28.5
780
+ vit_base_patch16_18x2_224,384.3,2664.545,1024,224,52.51,71.38,256.73
781
+ deit3_large_patch16_224,376.93,2716.643,1024,224,61.6,63.52,304.37
782
+ deit3_large_patch16_224_in21ft1k,376.54,2719.504,1024,224,61.6,63.52,304.37
783
+ tf_efficientnetv2_m,374.38,2051.373,768,480,24.76,89.84,54.14
784
+ convnext_large,371.39,1378.579,512,288,56.87,71.29,197.77
785
+ beitv2_large_patch16_224,360.12,2843.465,1024,224,61.6,63.52,304.43
786
+ beit_large_patch16_224,359.86,2845.558,1024,224,61.6,63.52,304.43
787
+ swinv2_base_window12to16_192to256_22kft1k,359.31,1068.705,384,256,22.02,84.71,87.92
788
+ swinv2_base_window16_256,359.09,1069.342,384,256,22.02,84.71,87.92
789
+ eca_nfnet_l2,347.1,2212.621,768,384,30.05,68.28,56.72
790
+ flexivit_large,333.31,3072.173,1024,240,70.99,75.39,304.36
791
+ vit_large_r50_s32_384,332.86,3076.333,1024,384,57.43,76.52,329.09
792
+ maxxvitv2_rmlp_large_rw_224,330.79,3095.576,1024,224,44.14,87.15,215.42
793
+ resnest200e,317.25,3227.754,1024,320,35.69,82.78,70.2
794
+ maxvit_tiny_tf_384,317.22,807.002,256,384,17.53,123.42,30.98
795
+ convmixer_768_32,309.28,3310.892,1024,224,19.55,25.95,21.11
796
+ deit_base_patch16_384,306.13,1254.335,384,384,55.54,101.56,86.86
797
+ vit_base_patch16_384,306.13,1254.349,384,384,55.54,101.56,86.86
798
+ vit_base_patch16_clip_384,305.56,1256.673,384,384,55.54,101.56,86.86
799
+ xcit_small_24_p8_224_dist,305.18,3355.41,1024,224,35.81,90.78,47.63
800
+ deit_base_distilled_patch16_384,304.96,1259.16,384,384,55.65,101.82,87.63
801
+ xcit_small_24_p8_224,304.86,3358.887,1024,224,35.81,90.78,47.63
802
+ nasnetalarge,300.31,1278.679,384,331,23.89,90.56,88.75
803
+ volo_d1_384,299.05,1712.072,512,384,22.75,108.55,26.78
804
+ volo_d4_224,295.86,3461.069,1024,224,44.34,80.22,192.96
805
+ deit3_base_patch16_384,294.03,1305.985,384,384,55.54,101.56,86.88
806
+ deit3_base_patch16_384_in21ft1k,293.78,1307.085,384,384,55.54,101.56,86.88
807
+ tresnet_xl_448,292.43,2626.294,768,448,60.65,61.31,78.44
808
+ pnasnet5large,285.95,1342.894,384,331,25.04,92.89,86.06
809
+ vit_large_patch14_224,285.66,3584.705,1024,224,81.08,88.79,304.2
810
+ vit_large_patch14_clip_224,285.43,3587.599,1024,224,81.08,88.79,304.2
811
+ crossvit_18_dagger_408,283.82,901.967,256,408,32.47,124.87,44.61
812
+ xcit_medium_24_p16_384_dist,282.22,3628.317,1024,384,47.39,91.64,84.4
813
+ cait_xxs24_384,275.38,3718.492,1024,384,9.63,122.66,12.03
814
+ regnety_640,271.79,2825.663,768,224,64.16,42.5,281.38
815
+ maxvit_large_tf_224,268.97,1427.67,384,224,43.68,127.35,211.79
816
+ nfnet_f2,263.0,3893.59,1024,352,63.22,79.06,193.78
817
+ beit_base_patch16_384,260.66,1473.146,384,384,55.54,101.56,86.74
818
+ swinv2_cr_small_384,258.79,989.214,256,384,29.7,298.03,49.7
819
+ ecaresnet269d,257.79,3972.16,1024,352,50.25,101.25,102.09
820
+ resnetrs270,249.11,4110.633,1024,352,51.13,105.48,129.86
821
+ mvitv2_large,248.64,2059.181,512,224,43.87,112.02,217.99
822
+ efficientnet_b6,246.42,519.432,128,528,19.4,167.39,43.04
823
+ convnext_xlarge,241.35,2121.412,512,288,100.8,95.05,350.2
824
+ convnextv2_large,238.64,1072.708,256,288,56.87,71.29,197.96
825
+ tf_efficientnet_b6,236.4,541.434,128,528,19.4,167.39,43.04
826
+ swin_base_patch4_window12_384,235.04,816.885,192,384,47.19,134.78,87.9
827
+ dm_nfnet_f2,234.34,3277.279,768,352,63.22,79.06,193.78
828
+ coatnet_4_224,228.52,1120.23,256,224,62.48,129.26,275.43
829
+ vit_base_r50_s16_384,227.31,1689.303,384,384,67.43,135.03,98.95
830
+ efficientnetv2_l,221.97,2306.653,512,480,56.4,157.99,118.52
831
+ xcit_tiny_24_p8_384_dist,221.23,4628.611,1024,384,27.05,132.95,12.11
832
+ ig_resnext101_32x32d,220.61,2320.857,512,224,87.29,91.12,468.53
833
+ swinv2_large_window12to16_192to256_22kft1k,219.46,1166.485,256,256,47.81,121.53,196.74
834
+ tf_efficientnetv2_l,219.35,2334.183,512,480,56.4,157.99,118.52
835
+ resmlp_big_24_224,214.31,4778.166,1024,224,100.23,87.31,129.14
836
+ resmlp_big_24_224_in22ft1k,214.13,4782.043,1024,224,100.23,87.31,129.14
837
+ resmlp_big_24_distilled_224,214.04,4784.169,1024,224,100.23,87.31,129.14
838
+ xcit_medium_24_p8_224_dist,210.1,4873.763,1024,224,63.53,121.23,84.32
839
+ xcit_medium_24_p8_224,210.01,4875.864,1024,224,63.53,121.23,84.32
840
+ maxvit_small_tf_384,208.79,919.556,192,384,35.87,183.65,69.02
841
+ vit_base_patch8_224,199.59,1282.637,256,224,78.22,161.69,86.58
842
+ eca_nfnet_l3,199.58,2565.434,512,448,52.55,118.4,72.04
843
+ volo_d5_224,196.25,5217.924,1024,224,72.4,118.11,295.46
844
+ xcit_small_12_p8_384_dist,194.27,2635.521,512,384,54.92,138.29,26.21
845
+ cait_xs24_384,192.73,3984.863,768,384,19.28,183.98,26.67
846
+ swinv2_cr_base_384,184.92,1384.392,256,384,50.57,333.68,87.88
847
+ cait_xxs36_384,184.35,5554.56,1024,384,14.35,183.7,17.37
848
+ swinv2_cr_huge_224,183.61,2091.395,384,224,115.97,121.08,657.83
849
+ convnext_xxlarge,183.01,2098.268,384,224,151.66,95.29,846.47
850
+ coatnet_rmlp_2_rw_384,178.88,715.532,128,384,47.69,209.43,73.88
851
+ convmixer_1536_20,173.51,5901.752,1024,224,48.68,33.03,51.63
852
+ volo_d2_384,168.46,1519.603,256,384,46.17,184.51,58.87
853
+ resnetrs350,168.28,6085.136,1024,384,77.59,154.74,163.96
854
+ xcit_large_24_p16_384_dist,160.71,4778.847,768,384,105.35,137.17,189.1
855
+ resnetv2_152x2_bit_teacher_384,159.55,1604.488,256,384,136.16,132.56,236.34
856
+ maxvit_xlarge_tf_224,155.79,1643.178,256,224,97.49,191.02,474.95
857
+ maxvit_tiny_tf_512,155.64,822.373,128,512,33.49,257.59,31.05
858
+ regnety_1280,155.18,2474.502,384,224,127.66,71.58,644.81
859
+ vit_huge_patch14_224,154.03,6647.897,1024,224,167.43,139.43,658.75
860
+ vit_huge_patch14_clip_224,153.92,6652.944,1024,224,167.4,139.41,632.05
861
+ maxxvitv2_rmlp_base_rw_384,153.34,1669.502,256,384,72.98,213.74,116.09
862
+ efficientnetv2_xl,152.49,3357.61,512,512,93.85,247.32,208.12
863
+ tf_efficientnetv2_xl,151.4,2536.254,384,512,93.85,247.32,208.12
864
+ deit3_huge_patch14_224_in21ft1k,149.08,6868.834,1024,224,167.4,139.41,632.13
865
+ deit3_huge_patch14_224,149.01,6871.974,1024,224,167.4,139.41,632.13
866
+ cait_s24_384,148.46,3448.684,512,384,32.17,245.31,47.06
867
+ resnest269e,147.61,3468.584,512,416,77.69,171.98,110.93
868
+ nfnet_f3,147.43,3472.717,512,416,115.58,141.78,254.92
869
+ efficientnet_b7,142.41,674.084,96,600,38.33,289.94,66.35
870
+ resnetv2_50x3_bitm,138.27,1388.564,192,448,145.7,133.37,217.32
871
+ tf_efficientnet_b7,137.89,696.181,96,600,38.33,289.94,66.35
872
+ swin_large_patch4_window12_384,137.6,930.229,128,384,104.08,202.16,196.74
873
+ ig_resnext101_32x48d,132.29,2902.628,384,224,153.57,131.06,828.41
874
+ dm_nfnet_f3,127.59,4012.898,512,416,115.58,141.78,254.92
875
+ coatnet_5_224,125.18,1022.512,128,224,145.49,194.24,687.47
876
+ maxvit_rmlp_base_rw_384,121.26,2111.079,256,384,70.97,318.95,116.14
877
+ xcit_large_24_p8_224,119.97,6401.598,768,224,141.23,181.56,188.93
878
+ xcit_large_24_p8_224_dist,119.94,6403.17,768,224,141.23,181.56,188.93
879
+ resnetrs420,119.93,6403.598,768,416,108.45,213.79,191.89
880
+ resnetv2_152x2_bitm,117.33,2181.801,256,448,184.99,180.43,236.34
881
+ maxvit_base_tf_384,113.69,1688.826,192,384,73.8,332.9,119.65
882
+ swinv2_cr_large_384,113.07,1132.03,128,384,108.95,404.96,196.68
883
+ eva_large_patch14_336,102.65,2493.904,256,336,191.1,270.24,304.53
884
+ vit_large_patch14_clip_336,102.47,2498.286,256,336,191.11,270.24,304.53
885
+ vit_large_patch16_384,102.37,2500.639,256,384,191.21,270.24,304.72
886
+ xcit_small_24_p8_384_dist,102.36,5001.728,512,384,105.24,265.91,47.63
887
+ eva_giant_patch14_224,101.75,10063.521,1024,224,267.18,192.64,1012.56
888
+ vit_giant_patch14_224,100.42,7648.057,768,224,267.18,192.64,1012.61
889
+ vit_giant_patch14_clip_224,100.32,7655.265,768,224,267.18,192.64,1012.65
890
+ cait_s36_384,99.37,5152.338,512,384,47.99,367.4,68.37
891
+ deit3_large_patch16_384,99.34,2577.037,256,384,191.21,270.24,304.76
892
+ deit3_large_patch16_384_in21ft1k,99.27,2578.907,256,384,191.21,270.24,304.76
893
+ regnety_2560,97.99,2612.623,256,224,257.07,87.48,826.14
894
+ maxvit_small_tf_512,97.85,981.11,96,512,67.26,383.77,69.13
895
+ swinv2_base_window12to24_192to384_22kft1k,95.95,666.98,64,384,55.25,280.36,87.92
896
+ efficientnet_b8,95.3,1007.298,96,672,63.48,442.89,87.41
897
+ tf_efficientnet_b8,92.65,1036.1,96,672,63.48,442.89,87.41
898
+ beit_large_patch16_384,88.55,2890.891,256,384,191.21,270.24,305.0
899
+ resnetv2_101x3_bitm,83.1,2310.491,192,448,280.33,194.78,387.93
900
+ maxvit_large_tf_384,80.34,1593.284,128,384,132.55,445.84,212.03
901
+ nfnet_f4,79.54,4827.723,384,512,216.26,262.26,316.07
902
+ volo_d3_448,73.5,2612.274,192,448,96.33,446.83,86.63
903
+ dm_nfnet_f4,71.41,3584.699,256,512,216.26,262.26,316.07
904
+ xcit_medium_24_p8_384_dist,70.91,5415.294,384,384,186.67,354.73,84.32
905
+ swinv2_large_window12to24_192to384_22kft1k,60.84,788.97,48,384,116.15,407.83,196.74
906
+ vit_gigantic_patch14_clip_224,60.15,8511.823,512,224,483.96,275.37,1844.91
907
+ vit_gigantic_patch14_224,60.11,8517.291,512,224,483.95,275.37,1844.44
908
+ nfnet_f5,58.02,4412.387,256,544,290.97,349.71,377.21
909
+ vit_huge_patch14_clip_336,57.29,4468.831,256,336,390.97,407.54,632.46
910
+ convnextv2_huge,56.06,1712.576,96,384,337.96,232.35,660.29
911
+ volo_d4_448,54.47,2349.801,128,448,197.13,527.35,193.41
912
+ tf_efficientnet_l2,54.12,1182.593,64,475,172.11,609.89,480.31
913
+ maxvit_base_tf_512,52.65,1823.292,96,512,138.02,703.99,119.88
914
+ swinv2_cr_giant_224,52.12,2455.882,128,224,483.85,309.15,2598.76
915
+ dm_nfnet_f5,50.7,5049.339,256,544,290.97,349.71,377.21
916
+ swinv2_cr_huge_384,48.86,1309.971,64,384,352.04,583.18,657.94
917
+ maxvit_xlarge_tf_384,46.24,2076.289,96,384,292.78,668.76,475.32
918
+ nfnet_f6,44.3,5778.548,256,576,378.69,452.2,438.36
919
+ xcit_large_24_p8_384_dist,40.2,6368.127,256,384,415.0,531.82,188.93
920
+ eva_giant_patch14_336,39.77,6436.237,256,336,620.64,550.67,1013.01
921
+ dm_nfnet_f6,39.62,6461.626,256,576,378.69,452.2,438.36
922
+ maxvit_large_tf_512,38.67,1654.908,64,512,244.75,942.15,212.33
923
+ volo_d5_448,37.56,3408.043,128,448,315.06,737.92,295.91
924
+ beit_large_patch16_512,35.36,2715.28,96,512,362.24,656.39,305.67
925
+ nfnet_f7,34.74,7370.0,256,608,480.39,570.85,499.5
926
+ cait_m36_384,32.36,7912.123,256,384,173.11,734.81,271.22
927
+ resnetv2_152x4_bitm,30.0,4266.89,128,480,844.84,414.26,936.53
928
+ volo_d5_512,26.35,4857.602,128,512,425.09,1105.37,296.09
929
+ maxvit_xlarge_tf_512,23.12,2076.455,48,512,534.14,1413.22,475.77
930
+ efficientnet_l2,21.26,1505.032,32,800,479.12,1707.39,480.31
931
+ swinv2_cr_giant_384,15.03,2129.6,32,384,1450.71,1394.86,2598.76
932
+ cait_m48_448,13.69,9353.048,128,448,329.41,1708.23,356.46
933
+ eva_giant_patch14_560,10.36,4631.037,48,560,1906.76,2577.17,1014.45
pytorch-image-models/results/benchmark-infer-amp-nchw-pt210-cu121-rtx3090.csv ADDED
@@ -0,0 +1,1294 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ model,infer_img_size,infer_batch_size,infer_samples_per_sec,infer_step_time,infer_gmacs,infer_macts,param_count
2
+ tinynet_e,106,1024.0,50604.03,20.225,0.03,0.69,2.04
3
+ mobilenetv3_small_050,224,1024.0,46069.42,22.217,0.03,0.92,1.59
4
+ lcnet_035,224,1024.0,41190.64,24.85,0.03,1.04,1.64
5
+ lcnet_050,224,1024.0,37663.82,27.178,0.05,1.26,1.88
6
+ mobilenetv3_small_075,224,1024.0,33398.64,30.649,0.05,1.3,2.04
7
+ efficientvit_m0,224,1024.0,32179.13,31.812,0.08,0.91,2.35
8
+ mobilenetv3_small_100,224,1024.0,29653.41,34.522,0.06,1.42,2.54
9
+ tf_mobilenetv3_small_minimal_100,224,1024.0,28352.57,36.106,0.06,1.41,2.04
10
+ tinynet_d,152,1024.0,27612.87,37.074,0.05,1.42,2.34
11
+ tf_mobilenetv3_small_075,224,1024.0,27505.95,37.218,0.05,1.3,2.04
12
+ tf_mobilenetv3_small_100,224,1024.0,24859.95,41.18,0.06,1.42,2.54
13
+ efficientvit_m1,224,1024.0,24836.87,41.219,0.17,1.33,2.98
14
+ lcnet_075,224,1024.0,24184.78,42.33,0.1,1.99,2.36
15
+ efficientvit_m2,224,1024.0,21907.95,46.731,0.2,1.47,4.19
16
+ mnasnet_small,224,1024.0,20764.95,49.303,0.07,2.16,2.03
17
+ levit_128s,224,1024.0,20669.44,49.531,0.31,1.88,7.78
18
+ lcnet_100,224,1024.0,19774.93,51.772,0.16,2.52,2.95
19
+ regnetx_002,224,1024.0,18945.55,54.04,0.2,2.16,2.68
20
+ resnet10t,176,1024.0,18840.28,54.342,0.7,1.51,5.44
21
+ efficientvit_m3,224,1024.0,18627.14,54.963,0.27,1.62,6.9
22
+ mobilenetv2_035,224,1024.0,18464.78,55.447,0.07,2.86,1.68
23
+ ghostnet_050,224,1024.0,17741.46,57.707,0.05,1.77,2.59
24
+ resnet18,160,1024.0,17592.15,58.198,0.93,1.27,11.69
25
+ regnety_002,224,1024.0,17571.32,58.267,0.2,2.17,3.16
26
+ levit_conv_128s,224,1024.0,17529.9,58.404,0.31,1.88,7.78
27
+ efficientvit_m4,224,1024.0,17446.52,58.683,0.3,1.7,8.8
28
+ repghostnet_050,224,1024.0,17090.91,59.904,0.05,2.02,2.31
29
+ efficientvit_b0,224,1024.0,16784.26,60.999,0.1,2.87,3.41
30
+ vit_tiny_r_s16_p8_224,224,1024.0,16479.31,62.128,0.43,1.85,6.34
31
+ vit_small_patch32_224,224,1024.0,15974.78,64.091,1.12,2.09,22.88
32
+ mnasnet_050,224,1024.0,15859.35,64.557,0.11,3.07,2.22
33
+ mobilenetv2_050,224,1024.0,14885.11,68.783,0.1,3.64,1.97
34
+ tinynet_c,184,1024.0,14726.2,69.525,0.11,2.87,2.46
35
+ pit_ti_224,224,1024.0,14628.51,69.989,0.5,2.75,4.85
36
+ pit_ti_distilled_224,224,1024.0,14546.3,70.385,0.51,2.77,5.1
37
+ semnasnet_050,224,1024.0,14351.42,71.341,0.11,3.44,2.08
38
+ levit_128,224,1024.0,14192.78,72.139,0.41,2.71,9.21
39
+ repghostnet_058,224,1024.0,13482.93,75.937,0.07,2.59,2.55
40
+ mixer_s32_224,224,1024.0,13082.53,78.262,1.0,2.28,19.1
41
+ cs3darknet_focus_s,256,1024.0,12838.86,79.748,0.69,2.7,3.27
42
+ regnetx_004,224,1024.0,12620.59,81.127,0.4,3.14,5.16
43
+ levit_conv_128,224,1024.0,12584.5,81.359,0.41,2.71,9.21
44
+ cs3darknet_s,256,1024.0,12531.56,81.703,0.72,2.97,3.28
45
+ lcnet_150,224,1024.0,12510.06,81.844,0.34,3.79,4.5
46
+ regnetx_004_tv,224,1024.0,12294.91,83.276,0.42,3.17,5.5
47
+ efficientvit_m5,224,1024.0,12067.16,84.847,0.53,2.41,12.47
48
+ mobilenetv3_large_075,224,1024.0,12041.45,85.029,0.16,4.0,3.99
49
+ levit_192,224,1024.0,11986.94,85.416,0.66,3.2,10.95
50
+ resnet10t,224,1024.0,11963.05,85.587,1.1,2.43,5.44
51
+ gernet_s,224,1024.0,11809.29,86.701,0.75,2.65,8.17
52
+ ese_vovnet19b_slim_dw,224,1024.0,11618.32,88.126,0.4,5.28,1.9
53
+ vit_tiny_patch16_224,224,1024.0,11270.42,90.846,1.08,4.12,5.72
54
+ deit_tiny_patch16_224,224,1024.0,11259.37,90.936,1.08,4.12,5.72
55
+ deit_tiny_distilled_patch16_224,224,1024.0,11217.54,91.275,1.09,4.15,5.91
56
+ repghostnet_080,224,1024.0,11079.58,92.412,0.1,3.22,3.28
57
+ mobilenetv3_rw,224,1024.0,10908.78,93.859,0.23,4.41,5.48
58
+ levit_conv_192,224,1024.0,10768.96,95.077,0.66,3.2,10.95
59
+ mobilenetv3_large_100,224,1024.0,10731.24,95.412,0.23,4.41,5.48
60
+ hardcorenas_a,224,1024.0,10620.31,96.408,0.23,4.38,5.26
61
+ tf_mobilenetv3_large_075,224,1024.0,10495.83,97.552,0.16,4.0,3.99
62
+ resnet14t,176,1024.0,10451.45,97.965,1.07,3.61,10.08
63
+ mnasnet_075,224,1024.0,10423.24,98.231,0.23,4.77,3.17
64
+ tf_mobilenetv3_large_minimal_100,224,1024.0,10369.07,98.745,0.22,4.4,3.92
65
+ resnet34,160,1024.0,10330.89,99.109,1.87,1.91,21.8
66
+ regnety_004,224,1024.0,9931.33,103.097,0.41,3.89,4.34
67
+ nf_regnet_b0,192,1024.0,9884.05,103.59,0.37,3.15,8.76
68
+ regnetx_006,224,1024.0,9823.29,104.232,0.61,3.98,6.2
69
+ hardcorenas_b,224,1024.0,9755.67,104.953,0.26,5.09,5.18
70
+ hardcorenas_c,224,1024.0,9572.88,106.958,0.28,5.01,5.52
71
+ ghostnet_100,224,1024.0,9528.83,107.453,0.15,3.55,5.18
72
+ tf_mobilenetv3_large_100,224,1024.0,9484.05,107.96,0.23,4.41,5.48
73
+ tinynet_b,188,1024.0,9358.37,109.409,0.21,4.44,3.73
74
+ mnasnet_100,224,1024.0,9357.9,109.416,0.33,5.46,4.38
75
+ tf_efficientnetv2_b0,192,1024.0,9316.15,109.906,0.54,3.51,7.14
76
+ repghostnet_100,224,1024.0,9303.14,110.06,0.15,3.98,4.07
77
+ mobilenetv2_075,224,1024.0,9280.78,110.325,0.22,5.86,2.64
78
+ resnet18,224,1024.0,9222.44,111.023,1.82,2.48,11.69
79
+ pit_xs_distilled_224,224,1024.0,9172.76,111.624,1.11,4.15,11.0
80
+ semnasnet_075,224,1024.0,9145.4,111.959,0.23,5.54,2.91
81
+ pit_xs_224,224,1024.0,9134.12,112.096,1.1,4.12,10.62
82
+ regnety_006,224,1024.0,9106.78,112.433,0.61,4.33,6.06
83
+ convnext_atto,224,1024.0,8993.29,113.851,0.55,3.81,3.7
84
+ hardcorenas_d,224,1024.0,8915.53,114.845,0.3,4.93,7.5
85
+ levit_256,224,1024.0,8893.96,115.124,1.13,4.23,18.89
86
+ seresnet18,224,1024.0,8718.39,117.442,1.82,2.49,11.78
87
+ convnext_atto_ols,224,1024.0,8549.03,119.769,0.58,4.11,3.7
88
+ mobilenetv2_100,224,1024.0,8479.08,120.757,0.31,6.68,3.5
89
+ legacy_seresnet18,224,1024.0,8452.0,121.144,1.82,2.49,11.78
90
+ spnasnet_100,224,1024.0,8438.72,121.334,0.35,6.03,4.42
91
+ repghostnet_111,224,1024.0,8382.7,122.146,0.18,4.38,4.54
92
+ semnasnet_100,224,1024.0,8351.88,122.597,0.32,6.23,3.89
93
+ dla46_c,224,1024.0,8209.51,124.721,0.58,4.5,1.3
94
+ repvgg_a0,224,1024.0,8124.8,126.024,1.52,3.59,9.11
95
+ levit_conv_256,224,1024.0,7997.32,128.032,1.13,4.23,18.89
96
+ edgenext_xx_small,256,1024.0,7955.06,128.711,0.26,3.33,1.33
97
+ regnetx_008,224,1024.0,7889.15,129.787,0.81,5.15,7.26
98
+ resnet18d,224,1024.0,7873.83,130.041,2.06,3.29,11.71
99
+ convnext_femto,224,1024.0,7867.13,130.151,0.79,4.57,5.22
100
+ ese_vovnet19b_slim,224,1024.0,7834.56,130.693,1.69,3.52,3.17
101
+ mobilevit_xxs,256,1024.0,7818.95,130.953,0.34,5.74,1.27
102
+ hardcorenas_f,224,1024.0,7811.68,131.075,0.35,5.57,8.2
103
+ hardcorenas_e,224,1024.0,7751.65,132.09,0.35,5.65,8.07
104
+ efficientnet_lite0,224,1024.0,7716.09,132.699,0.4,6.74,4.65
105
+ xcit_nano_12_p16_224,224,1024.0,7711.63,132.776,0.56,4.17,3.05
106
+ ghostnet_130,224,1024.0,7680.26,133.318,0.24,4.6,7.36
107
+ levit_256d,224,1024.0,7643.23,133.964,1.4,4.93,26.21
108
+ tf_efficientnetv2_b0,224,1024.0,7637.19,134.07,0.73,4.77,7.14
109
+ repghostnet_130,224,1024.0,7550.55,135.609,0.25,5.24,5.48
110
+ convnext_femto_ols,224,1024.0,7514.81,136.254,0.82,4.87,5.23
111
+ regnety_008,224,1024.0,7508.88,136.361,0.81,5.25,6.26
112
+ tinynet_a,192,1024.0,7458.0,137.291,0.35,5.41,6.19
113
+ fbnetc_100,224,1024.0,7362.02,139.082,0.4,6.51,5.57
114
+ tf_efficientnetv2_b1,192,1024.0,7241.64,141.394,0.76,4.59,8.14
115
+ crossvit_tiny_240,240,1024.0,7093.57,144.345,1.3,5.67,7.01
116
+ regnety_008_tv,224,1024.0,7067.28,144.882,0.84,5.42,6.43
117
+ mobilevitv2_050,256,1024.0,7057.9,145.075,0.48,8.04,1.37
118
+ crossvit_9_240,240,1024.0,6964.15,147.028,1.55,5.59,8.55
119
+ dla46x_c,224,1024.0,6837.04,149.761,0.54,5.66,1.07
120
+ tf_efficientnet_lite0,224,1024.0,6819.73,150.142,0.4,6.74,4.65
121
+ efficientnet_b0,224,1024.0,6721.47,152.337,0.4,6.75,5.29
122
+ rexnet_100,224,1024.0,6689.15,153.073,0.41,7.44,4.8
123
+ rexnetr_100,224,1024.0,6646.85,154.047,0.43,7.72,4.88
124
+ levit_conv_256d,224,1024.0,6618.0,154.719,1.4,4.93,26.21
125
+ repvit_m1,224,1024.0,6591.52,155.339,0.83,7.45,5.49
126
+ efficientnet_b1_pruned,240,1024.0,6583.2,155.537,0.4,6.21,6.33
127
+ repghostnet_150,224,1024.0,6564.41,155.982,0.32,6.0,6.58
128
+ mnasnet_140,224,1024.0,6559.1,156.108,0.6,7.71,7.12
129
+ efficientvit_b1,224,1024.0,6458.82,158.532,0.53,7.25,9.1
130
+ visformer_tiny,224,1024.0,6456.3,158.594,1.27,5.72,10.32
131
+ crossvit_9_dagger_240,240,1024.0,6436.13,159.091,1.68,6.03,8.78
132
+ resnet14t,224,1024.0,6404.13,159.886,1.69,5.8,10.08
133
+ dla60x_c,224,1024.0,6404.11,159.885,0.59,6.01,1.32
134
+ mobilenetv2_110d,224,1024.0,6387.15,160.311,0.45,8.71,4.52
135
+ ghostnetv2_100,224,1024.0,6375.73,160.599,0.18,4.55,6.16
136
+ regnetz_005,224,1024.0,6372.66,160.676,0.52,5.86,7.12
137
+ repvit_m0_9,224,1024.0,6295.33,162.649,0.83,7.45,5.49
138
+ edgenext_xx_small,288,1024.0,6241.41,164.053,0.33,4.21,1.33
139
+ fbnetv3_b,224,1024.0,6166.1,166.058,0.42,6.97,8.6
140
+ convnext_pico,224,1024.0,6145.95,166.603,1.37,6.1,9.05
141
+ cs3darknet_focus_m,256,1024.0,6145.46,166.616,1.98,4.89,9.3
142
+ pvt_v2_b0,224,1024.0,6126.38,167.135,0.53,7.01,3.67
143
+ tf_efficientnet_b0,224,1024.0,6026.91,169.894,0.4,6.75,5.29
144
+ nf_regnet_b0,256,1024.0,5970.36,171.503,0.64,5.58,8.76
145
+ resnetblur18,224,1024.0,5963.74,171.694,2.34,3.39,11.69
146
+ ese_vovnet19b_dw,224,1024.0,5956.2,171.911,1.34,8.25,6.54
147
+ hrnet_w18_small,224,1024.0,5950.21,172.083,1.61,5.72,13.19
148
+ resnet50,160,1024.0,5943.32,172.284,2.1,5.67,25.56
149
+ repvgg_a1,224,1024.0,5891.09,173.812,2.64,4.74,14.09
150
+ cs3darknet_m,256,1024.0,5871.36,174.395,2.08,5.28,9.31
151
+ convnext_pico_ols,224,1024.0,5852.38,174.961,1.43,6.5,9.06
152
+ vit_base_patch32_clip_224,224,1024.0,5768.1,177.517,4.37,4.19,88.22
153
+ tf_efficientnetv2_b2,208,1024.0,5753.76,177.96,1.06,6.0,10.1
154
+ vit_base_patch32_224,224,1024.0,5748.7,178.117,4.37,4.19,88.22
155
+ semnasnet_140,224,1024.0,5744.77,178.239,0.6,8.87,6.11
156
+ skresnet18,224,1024.0,5740.29,178.378,1.82,3.24,11.96
157
+ vit_tiny_r_s16_p8_384,384,1024.0,5663.72,180.79,1.25,5.39,6.36
158
+ resnet50d,160,1024.0,5651.35,181.185,2.22,6.08,25.58
159
+ resnet18,288,1024.0,5636.85,181.651,3.01,4.11,11.69
160
+ mobilenetv2_140,224,1024.0,5629.57,181.886,0.6,9.57,6.11
161
+ vit_small_patch32_384,384,1024.0,5499.31,186.195,3.26,6.07,22.92
162
+ convnext_atto,288,1024.0,5487.38,186.599,0.91,6.3,3.7
163
+ efficientnet_b0_gn,224,1024.0,5481.83,186.788,0.42,6.75,5.29
164
+ selecsls42,224,1024.0,5458.22,187.596,2.94,4.62,30.35
165
+ efficientnet_lite1,240,1024.0,5452.84,187.782,0.62,10.14,5.42
166
+ fbnetv3_d,224,1024.0,5449.6,187.893,0.52,8.5,10.31
167
+ pit_s_224,224,1024.0,5438.08,188.291,2.42,6.18,23.46
168
+ selecsls42b,224,1024.0,5414.81,189.1,2.98,4.62,32.46
169
+ resnet34,224,1024.0,5413.46,189.147,3.67,3.74,21.8
170
+ pit_s_distilled_224,224,1024.0,5407.14,189.368,2.45,6.22,24.04
171
+ efficientvit_b1,256,1024.0,5391.26,189.926,0.69,9.46,9.1
172
+ seresnet18,288,1024.0,5348.84,191.432,3.01,4.11,11.78
173
+ tf_efficientnetv2_b1,240,1024.0,5293.37,193.439,1.21,7.34,8.14
174
+ levit_384,224,1024.0,5286.23,193.7,2.36,6.26,39.13
175
+ convnextv2_atto,224,1024.0,5265.85,194.45,0.55,3.81,3.71
176
+ repvit_m1_0,224,1024.0,5259.32,194.683,1.13,8.69,7.3
177
+ seresnet50,160,1024.0,5236.4,195.543,2.1,5.69,28.09
178
+ convnext_atto_ols,288,1024.0,5201.4,196.86,0.96,6.8,3.7
179
+ gernet_m,224,1024.0,5195.05,197.1,3.02,5.24,21.14
180
+ fbnetv3_b,256,1024.0,5178.49,197.729,0.55,9.1,8.6
181
+ mixnet_s,224,1024.0,5129.76,199.608,0.25,6.25,4.13
182
+ repghostnet_200,224,1024.0,5125.91,199.759,0.54,7.96,9.8
183
+ vit_base_patch32_clip_quickgelu_224,224,1024.0,5125.16,199.787,4.37,4.19,87.85
184
+ seresnet34,224,1024.0,5104.13,200.612,3.67,3.74,21.96
185
+ repvit_m2,224,1024.0,5098.16,200.845,1.36,9.43,8.8
186
+ rexnetr_130,224,1024.0,5082.35,201.471,0.68,9.81,7.61
187
+ efficientnet_b0_g16_evos,224,1024.0,5016.04,204.134,1.01,7.42,8.11
188
+ ghostnetv2_130,224,1024.0,5011.79,204.307,0.28,5.9,8.96
189
+ edgenext_x_small,256,1024.0,4992.08,205.112,0.54,5.93,2.34
190
+ ecaresnet50t,160,1024.0,4989.39,205.225,2.21,6.04,25.57
191
+ tiny_vit_5m_224,224,1024.0,4963.53,206.293,1.18,9.32,12.08
192
+ rexnet_130,224,1024.0,4939.41,207.301,0.68,9.71,7.56
193
+ legacy_seresnet34,224,1024.0,4938.49,207.34,3.67,3.74,21.96
194
+ eva02_tiny_patch14_224,224,1024.0,4931.19,207.646,1.4,6.17,5.5
195
+ resnet34d,224,1024.0,4924.89,207.912,3.91,4.54,21.82
196
+ tf_efficientnet_lite1,240,1024.0,4918.8,208.17,0.62,10.14,5.42
197
+ mixer_b32_224,224,1024.0,4917.45,208.227,3.24,6.29,60.29
198
+ resnet50,176,1024.0,4914.58,208.348,2.62,6.92,25.56
199
+ resnetrs50,160,1024.0,4904.24,208.788,2.29,6.2,35.69
200
+ xcit_tiny_12_p16_224,224,1024.0,4900.19,208.961,1.24,6.29,6.72
201
+ repvit_m1_1,224,1024.0,4858.32,210.759,1.36,9.43,8.8
202
+ levit_conv_384,224,1024.0,4851.29,211.066,2.36,6.26,39.13
203
+ efficientnet_es_pruned,224,1024.0,4832.02,211.909,1.81,8.73,5.44
204
+ efficientnet_es,224,1024.0,4828.47,212.065,1.81,8.73,5.44
205
+ dla34,224,1024.0,4823.61,212.277,3.07,5.02,15.74
206
+ resnet26,224,1024.0,4806.46,213.036,2.36,7.35,16.0
207
+ resnet18d,288,1024.0,4806.17,213.049,3.41,5.43,11.71
208
+ resnext50_32x4d,160,1024.0,4797.48,213.435,2.17,7.35,25.03
209
+ tf_mixnet_s,224,1024.0,4783.68,214.05,0.25,6.25,4.13
210
+ convnext_femto,288,1024.0,4774.19,214.475,1.3,7.56,5.22
211
+ efficientnet_b1,224,1024.0,4707.45,217.516,0.59,9.36,7.79
212
+ gmlp_ti16_224,224,1024.0,4694.71,218.108,1.34,7.55,5.87
213
+ cs3darknet_focus_m,288,1024.0,4686.36,218.495,2.51,6.19,9.3
214
+ mobilenetv2_120d,224,1024.0,4673.25,219.108,0.69,11.97,5.83
215
+ selecsls60,224,1024.0,4656.74,219.885,3.59,5.52,30.67
216
+ selecsls60b,224,1024.0,4628.67,221.219,3.63,5.52,32.77
217
+ tf_efficientnet_es,224,1024.0,4617.85,221.737,1.81,8.73,5.44
218
+ resmlp_12_224,224,1024.0,4607.73,222.224,3.01,5.5,15.35
219
+ vit_small_patch16_224,224,1024.0,4586.65,223.246,4.25,8.25,22.05
220
+ deit_small_patch16_224,224,1024.0,4584.29,223.359,4.25,8.25,22.05
221
+ fbnetv3_d,256,1024.0,4567.33,224.19,0.68,11.1,10.31
222
+ gmixer_12_224,224,1024.0,4565.4,224.285,2.67,7.26,12.7
223
+ deit_small_distilled_patch16_224,224,1024.0,4564.97,224.306,4.27,8.29,22.44
224
+ convnext_femto_ols,288,1024.0,4561.96,224.454,1.35,8.06,5.23
225
+ efficientnet_b0_g8_gn,224,1024.0,4561.27,224.488,0.66,6.75,6.56
226
+ efficientnet_cc_b0_8e,224,1024.0,4542.29,225.426,0.42,9.42,24.01
227
+ efficientnet_cc_b0_4e,224,1024.0,4540.5,225.515,0.41,9.42,13.31
228
+ repvgg_b0,224,1024.0,4526.99,226.188,3.41,6.15,15.82
229
+ mixer_s16_224,224,1024.0,4518.8,226.598,3.79,5.97,18.53
230
+ cs3darknet_m,288,1024.0,4513.42,226.868,2.63,6.69,9.31
231
+ convnextv2_femto,224,1024.0,4509.16,227.082,0.79,4.57,5.23
232
+ regnetx_016,224,1024.0,4476.6,228.734,1.62,7.93,9.19
233
+ nf_regnet_b1,256,1024.0,4444.68,230.377,0.82,7.27,10.22
234
+ vit_base_patch32_clip_256,256,1024.0,4442.76,230.476,5.68,5.44,87.86
235
+ mobilevitv2_075,256,1024.0,4419.22,231.704,1.05,12.06,2.87
236
+ rexnetr_150,224,1024.0,4415.72,231.888,0.89,11.13,9.78
237
+ darknet17,256,1024.0,4402.14,232.603,3.26,7.18,14.3
238
+ resnet26d,224,1024.0,4396.77,232.887,2.6,8.15,16.01
239
+ resnetaa34d,224,1024.0,4381.9,233.677,4.43,5.07,21.82
240
+ efficientnet_b2_pruned,260,1024.0,4356.91,235.018,0.73,9.13,8.31
241
+ convnext_nano,224,1024.0,4340.39,235.913,2.46,8.37,15.59
242
+ ecaresnet50d_pruned,224,1024.0,4337.48,236.07,2.53,6.43,19.94
243
+ efficientformer_l1,224,1024.0,4271.29,239.728,1.3,5.53,12.29
244
+ nf_resnet26,224,1024.0,4216.31,242.856,2.41,7.35,16.0
245
+ deit3_small_patch16_224,224,1024.0,4203.29,243.607,4.25,8.25,22.06
246
+ nf_regnet_b2,240,1024.0,4197.9,243.92,0.97,7.23,14.31
247
+ tf_efficientnet_cc_b0_4e,224,1024.0,4196.5,244.002,0.41,9.42,13.31
248
+ tf_efficientnet_cc_b0_8e,224,1024.0,4190.23,244.367,0.42,9.42,24.01
249
+ regnety_016,224,1024.0,4161.97,246.026,1.63,8.04,11.2
250
+ rexnet_150,224,1024.0,4147.2,246.903,0.9,11.21,9.73
251
+ ghostnetv2_160,224,1024.0,4116.92,248.718,0.42,7.23,12.39
252
+ tiny_vit_11m_224,224,1024.0,4086.56,250.566,1.9,10.73,20.35
253
+ poolformer_s12,224,1024.0,4071.24,251.51,1.82,5.53,11.92
254
+ regnetz_005,288,1024.0,4056.8,252.404,0.86,9.68,7.12
255
+ efficientnet_lite2,260,1024.0,4046.71,253.034,0.89,12.9,6.09
256
+ darknet21,256,1024.0,4001.6,255.887,3.93,7.47,20.86
257
+ efficientvit_b1,288,1024.0,3997.55,256.145,0.87,11.96,9.1
258
+ resnext50_32x4d,176,1024.0,3992.51,256.47,2.71,8.97,25.03
259
+ edgenext_x_small,288,1024.0,3965.96,258.184,0.68,7.5,2.34
260
+ efficientnet_b1,256,1024.0,3961.36,258.486,0.77,12.22,7.79
261
+ convnext_nano_ols,224,1024.0,3944.64,259.582,2.65,9.38,15.65
262
+ resnest14d,224,1024.0,3932.19,260.404,2.76,7.33,10.61
263
+ tf_efficientnet_b1,240,1024.0,3922.37,261.055,0.71,10.88,7.79
264
+ flexivit_small,240,1024.0,3913.54,261.645,4.88,9.46,22.06
265
+ mobilevit_xs,256,768.0,3904.8,196.672,0.93,13.62,2.32
266
+ regnetz_b16,224,1024.0,3893.58,262.986,1.45,9.95,9.72
267
+ sedarknet21,256,1024.0,3874.2,264.302,3.93,7.47,20.95
268
+ resnext26ts,256,1024.0,3832.52,267.176,2.43,10.52,10.3
269
+ mobileone_s1,224,1024.0,3826.99,267.562,0.86,9.67,4.83
270
+ tf_efficientnetv2_b2,260,1024.0,3817.93,268.197,1.72,9.84,10.1
271
+ edgenext_small,256,1024.0,3770.23,271.588,1.26,9.07,5.59
272
+ convnext_pico,288,1024.0,3731.48,274.411,2.27,10.08,9.05
273
+ gernet_l,256,1024.0,3727.69,274.69,4.57,8.0,31.08
274
+ seresnext26ts,256,1024.0,3724.62,274.916,2.43,10.52,10.39
275
+ eca_resnext26ts,256,1024.0,3723.07,275.031,2.43,10.52,10.3
276
+ dpn48b,224,1024.0,3716.75,275.497,1.69,8.92,9.13
277
+ tf_efficientnet_lite2,260,1024.0,3695.32,277.096,0.89,12.9,6.09
278
+ gcresnext26ts,256,1024.0,3691.17,277.409,2.43,10.53,10.48
279
+ efficientnet_b2,256,1024.0,3671.26,278.912,0.89,12.81,9.11
280
+ nf_ecaresnet26,224,1024.0,3640.87,281.24,2.41,7.36,16.0
281
+ resnetblur18,288,1024.0,3639.91,281.314,3.87,5.6,11.69
282
+ nf_seresnet26,224,1024.0,3637.43,281.506,2.41,7.36,17.4
283
+ resnet101,160,1024.0,3616.15,283.164,4.0,8.28,44.55
284
+ vit_relpos_small_patch16_224,224,1024.0,3590.52,285.183,4.24,9.38,21.98
285
+ resnet26t,256,1024.0,3578.9,286.111,3.35,10.52,16.01
286
+ vit_srelpos_small_patch16_224,224,1024.0,3572.97,286.585,4.23,8.49,21.97
287
+ convnext_pico_ols,288,1024.0,3558.03,287.789,2.37,10.74,9.06
288
+ cs3darknet_focus_l,256,1024.0,3544.69,288.872,4.66,8.03,21.15
289
+ tf_efficientnetv2_b3,240,1024.0,3543.38,288.978,1.93,9.95,14.36
290
+ legacy_seresnext26_32x4d,224,1024.0,3516.72,291.169,2.49,9.39,16.79
291
+ pvt_v2_b1,224,1024.0,3507.87,291.903,2.04,14.01,14.01
292
+ repvit_m3,224,1024.0,3501.61,292.425,1.89,13.94,10.68
293
+ repvgg_a2,224,1024.0,3495.75,292.916,5.7,6.26,28.21
294
+ efficientnetv2_rw_t,224,1024.0,3486.59,293.686,1.93,9.94,13.65
295
+ ecaresnet101d_pruned,224,1024.0,3483.13,293.977,3.48,7.69,24.88
296
+ ese_vovnet19b_dw,288,1024.0,3478.51,294.369,2.22,13.63,6.54
297
+ mixnet_m,224,1024.0,3474.22,294.731,0.36,8.19,5.01
298
+ edgenext_small_rw,256,1024.0,3458.08,296.106,1.58,9.51,7.83
299
+ convnextv2_pico,224,1024.0,3458.0,296.113,1.37,6.1,9.07
300
+ gc_efficientnetv2_rw_t,224,1024.0,3445.15,297.218,1.94,9.97,13.68
301
+ cs3darknet_l,256,1024.0,3414.99,299.845,4.86,8.55,21.16
302
+ efficientnet_b3_pruned,300,1024.0,3412.19,300.09,1.04,11.86,9.86
303
+ nf_regnet_b1,288,1024.0,3373.08,303.57,1.02,9.2,10.22
304
+ tf_mixnet_m,224,1024.0,3353.29,305.361,0.36,8.19,5.01
305
+ convit_tiny,224,1024.0,3342.83,306.316,1.26,7.94,5.71
306
+ eca_botnext26ts_256,256,1024.0,3341.38,306.449,2.46,11.6,10.59
307
+ ecaresnext50t_32x4d,224,1024.0,3327.77,307.703,2.7,10.09,15.41
308
+ ecaresnext26t_32x4d,224,1024.0,3321.66,308.269,2.7,10.09,15.41
309
+ resnet34,288,1024.0,3320.08,308.416,6.07,6.18,21.8
310
+ seresnext26t_32x4d,224,1024.0,3319.26,308.491,2.7,10.09,16.81
311
+ vit_tiny_patch16_384,384,1024.0,3311.59,309.206,3.16,12.08,5.79
312
+ vit_base_patch32_plus_256,256,1024.0,3301.22,310.177,7.7,6.35,119.48
313
+ seresnext26d_32x4d,224,1024.0,3300.83,310.214,2.73,10.19,16.81
314
+ skresnet34,224,1024.0,3294.57,310.803,3.67,5.13,22.28
315
+ mobilevitv2_100,256,768.0,3290.58,233.384,1.84,16.08,4.9
316
+ vit_relpos_small_patch16_rpn_224,224,1024.0,3279.29,312.245,4.24,9.38,21.97
317
+ eca_halonext26ts,256,1024.0,3270.39,313.1,2.44,11.46,10.76
318
+ coatnet_pico_rw_224,224,1024.0,3250.74,314.993,1.96,12.91,10.85
319
+ rexnetr_200,224,768.0,3238.38,237.146,1.59,15.11,16.52
320
+ ecaresnet26t,256,1024.0,3228.23,317.19,3.35,10.53,16.01
321
+ ecaresnetlight,224,1024.0,3222.96,317.708,4.11,8.42,30.16
322
+ coatnext_nano_rw_224,224,1024.0,3218.47,318.153,2.36,10.68,14.7
323
+ cs3sedarknet_l,256,1024.0,3218.11,318.188,4.86,8.56,21.91
324
+ coat_lite_tiny,224,1024.0,3216.35,318.362,1.6,11.65,5.72
325
+ nf_regnet_b2,272,1024.0,3205.43,319.447,1.22,9.27,14.31
326
+ convnextv2_atto,288,1024.0,3199.9,319.999,0.91,6.3,3.71
327
+ vit_small_r26_s32_224,224,1024.0,3174.89,322.52,3.54,9.44,36.43
328
+ botnet26t_256,256,1024.0,3173.81,322.63,3.32,11.98,12.49
329
+ resnetv2_50,224,1024.0,3170.95,322.919,4.11,11.11,25.55
330
+ fastvit_t8,256,1024.0,3164.9,323.538,0.7,8.63,4.03
331
+ crossvit_small_240,240,1024.0,3164.86,323.541,5.09,11.34,26.86
332
+ bat_resnext26ts,256,1024.0,3139.26,326.18,2.53,12.51,10.73
333
+ seresnet34,288,1024.0,3136.77,326.439,6.07,6.18,21.96
334
+ halonet26t,256,1024.0,3132.55,326.879,3.19,11.69,12.48
335
+ lambda_resnet26t,256,1024.0,3123.88,327.786,3.02,11.87,10.96
336
+ rexnet_200,224,768.0,3120.89,246.073,1.56,14.91,16.37
337
+ vit_small_resnet26d_224,224,1024.0,3106.26,329.645,5.04,10.65,63.61
338
+ hrnet_w18_small_v2,224,1024.0,3095.42,330.8,2.62,9.65,15.6
339
+ mobileone_s2,224,1024.0,3085.91,331.82,1.34,11.55,7.88
340
+ vit_relpos_base_patch32_plus_rpn_256,256,1024.0,3081.88,332.247,7.59,6.63,119.42
341
+ tresnet_m,224,1024.0,3073.78,333.129,5.75,7.31,31.39
342
+ resnet32ts,256,1024.0,3072.91,333.224,4.63,11.58,17.96
343
+ coatnet_nano_cc_224,224,1024.0,3066.72,333.896,2.13,13.1,13.76
344
+ resnet101,176,1024.0,3047.24,336.031,4.92,10.08,44.55
345
+ resnet33ts,256,1024.0,3032.6,337.653,4.76,11.66,19.68
346
+ efficientvit_b2,224,1024.0,3030.14,337.927,1.6,14.62,24.33
347
+ resnet50,224,1024.0,3021.24,338.922,4.11,11.11,25.56
348
+ coat_lite_mini,224,1024.0,3021.22,338.925,2.0,12.25,11.01
349
+ resnet34d,288,1024.0,3013.98,339.739,6.47,7.51,21.82
350
+ cspresnet50,256,1024.0,3012.57,339.898,4.54,11.5,21.62
351
+ resnetv2_50t,224,1024.0,3011.73,339.991,4.32,11.82,25.57
352
+ dpn68b,224,1024.0,3008.58,340.347,2.35,10.47,12.61
353
+ coatnet_nano_rw_224,224,1024.0,3001.39,341.165,2.29,13.29,15.14
354
+ dpn68,224,1024.0,3001.33,341.17,2.35,10.47,12.61
355
+ resnetv2_50d,224,1024.0,2992.98,342.12,4.35,11.92,25.57
356
+ convnext_tiny,224,1024.0,2986.71,342.841,4.47,13.44,28.59
357
+ levit_512,224,1024.0,2974.0,344.305,5.64,10.22,95.17
358
+ dla60,224,1024.0,2959.44,345.999,4.26,10.16,22.04
359
+ fbnetv3_g,240,1024.0,2957.87,346.184,1.28,14.87,16.62
360
+ tf_efficientnet_b2,260,1024.0,2957.04,346.28,1.02,13.83,9.11
361
+ efficientnet_em,240,1024.0,2948.76,347.254,3.04,14.34,6.9
362
+ crossvit_15_240,240,1024.0,2948.65,347.266,5.17,12.01,27.53
363
+ eca_resnet33ts,256,1024.0,2945.18,347.676,4.76,11.66,19.68
364
+ seresnet33ts,256,1024.0,2940.4,348.24,4.76,11.66,19.78
365
+ regnetx_032,224,1024.0,2932.49,349.18,3.2,11.37,15.3
366
+ gcresnet33ts,256,1024.0,2919.42,350.744,4.76,11.68,19.88
367
+ mobileone_s0,224,1024.0,2911.68,351.675,1.09,15.48,5.29
368
+ resnet50t,224,1024.0,2893.61,353.872,4.32,11.82,25.57
369
+ resnet50c,224,1024.0,2893.38,353.9,4.35,11.92,25.58
370
+ repvit_m1_5,224,1024.0,2891.53,354.126,2.31,15.7,14.64
371
+ selecsls84,224,1024.0,2891.52,354.128,5.9,7.57,50.95
372
+ efficientnet_cc_b1_8e,240,1024.0,2883.89,355.064,0.75,15.44,39.72
373
+ haloregnetz_b,224,1024.0,2883.33,355.134,1.97,11.94,11.68
374
+ vgg11,224,1024.0,2881.16,355.4,7.61,7.44,132.86
375
+ resnet50d,224,1024.0,2872.03,356.53,4.35,11.92,25.58
376
+ resnest26d,224,1024.0,2863.53,357.59,3.64,9.97,17.07
377
+ tf_efficientnet_em,240,1024.0,2860.98,357.908,3.04,14.34,6.9
378
+ visformer_small,224,1024.0,2837.73,360.841,4.88,11.43,40.22
379
+ cspresnet50w,256,1024.0,2834.78,361.216,5.04,12.19,28.12
380
+ vovnet39a,224,1024.0,2834.5,361.252,7.09,6.73,22.6
381
+ wide_resnet50_2,176,1024.0,2833.12,361.428,7.29,8.97,68.88
382
+ cspresnet50d,256,1024.0,2828.94,361.963,4.86,12.55,21.64
383
+ resnet26,288,1024.0,2826.83,362.233,3.9,12.15,16.0
384
+ resnext26ts,288,1024.0,2826.2,362.312,3.07,13.31,10.3
385
+ efficientnet_b2,288,1024.0,2822.88,362.739,1.12,16.2,9.11
386
+ regnetv_040,224,1024.0,2785.35,367.627,4.0,12.29,20.64
387
+ levit_512d,224,1024.0,2784.75,367.707,5.85,11.3,92.5
388
+ levit_conv_512,224,1024.0,2781.3,368.162,5.64,10.22,95.17
389
+ deit3_medium_patch16_224,224,1024.0,2780.75,368.235,7.53,10.99,38.85
390
+ crossvit_15_dagger_240,240,1024.0,2776.34,368.82,5.5,12.68,28.21
391
+ regnety_040,224,1024.0,2768.62,369.849,4.0,12.29,20.65
392
+ legacy_seresnet50,224,1024.0,2766.98,370.066,3.88,10.6,28.09
393
+ eca_resnext26ts,288,1024.0,2756.51,371.473,3.07,13.32,10.3
394
+ seresnext26ts,288,1024.0,2751.54,372.144,3.07,13.32,10.39
395
+ regnety_032,224,1024.0,2744.75,373.065,3.2,11.26,19.44
396
+ convnext_tiny_hnf,224,1024.0,2744.61,373.082,4.47,13.44,28.59
397
+ convnextv2_femto,288,1024.0,2744.25,373.131,1.3,7.56,5.23
398
+ eca_vovnet39b,224,1024.0,2742.23,373.408,7.09,6.74,22.6
399
+ resnetv2_50x1_bit,224,1024.0,2741.57,373.497,4.23,11.11,25.55
400
+ gcresnext26ts,288,1024.0,2728.39,375.302,3.07,13.33,10.48
401
+ resnetaa50,224,1024.0,2728.16,375.334,5.15,11.64,25.56
402
+ densenet121,224,1024.0,2725.3,375.726,2.87,6.9,7.98
403
+ ese_vovnet39b,224,1024.0,2723.97,375.912,7.09,6.74,24.57
404
+ mixnet_l,224,1024.0,2712.93,377.44,0.58,10.84,7.33
405
+ tf_efficientnet_cc_b1_8e,240,1024.0,2710.75,377.745,0.75,15.44,39.72
406
+ mobilevit_s,256,768.0,2698.84,284.557,1.86,17.03,5.58
407
+ cs3darknet_focus_l,288,1024.0,2695.52,379.878,5.9,10.16,21.15
408
+ seresnet50,224,1024.0,2693.22,380.203,4.11,11.13,28.09
409
+ xcit_nano_12_p16_384,384,1024.0,2679.82,382.104,1.64,12.14,3.05
410
+ resnetaa34d,288,1024.0,2675.02,382.79,7.33,8.38,21.82
411
+ twins_svt_small,224,1024.0,2670.35,383.458,2.82,10.7,24.06
412
+ ecaresnet50d_pruned,288,1024.0,2662.19,384.634,4.19,10.61,19.94
413
+ convnext_nano,288,1024.0,2634.79,388.635,4.06,13.84,15.59
414
+ resnet50_gn,224,1024.0,2631.91,389.06,4.14,11.11,25.56
415
+ resnetv2_50d_gn,224,1024.0,2623.43,390.317,4.38,11.92,25.57
416
+ xcit_tiny_24_p16_224,224,1024.0,2616.39,391.368,2.34,11.82,12.12
417
+ tf_mixnet_l,224,1024.0,2615.89,391.443,0.58,10.84,7.33
418
+ res2net50_48w_2s,224,1024.0,2611.06,392.166,4.18,11.72,25.29
419
+ gcvit_xxtiny,224,1024.0,2608.34,392.574,2.14,15.36,12.0
420
+ cs3darknet_l,288,1024.0,2607.33,392.728,6.16,10.83,21.16
421
+ resnetaa50d,224,1024.0,2596.72,394.332,5.39,12.44,25.58
422
+ vgg11_bn,224,1024.0,2590.27,395.315,7.62,7.44,132.87
423
+ vit_base_resnet26d_224,224,1024.0,2580.41,396.822,6.93,12.34,101.4
424
+ vit_relpos_medium_patch16_cls_224,224,1024.0,2579.62,396.946,7.55,13.3,38.76
425
+ ecaresnet50t,224,1024.0,2579.62,396.946,4.32,11.83,25.57
426
+ coatnet_rmlp_nano_rw_224,224,1024.0,2579.38,396.984,2.51,18.21,15.15
427
+ davit_tiny,224,1024.0,2578.68,397.091,4.47,17.08,28.36
428
+ seresnet50t,224,1024.0,2574.91,397.672,4.32,11.83,28.1
429
+ resnet26d,288,1024.0,2569.96,398.438,4.29,13.48,16.01
430
+ mobilevitv2_125,256,768.0,2568.23,299.03,2.86,20.1,7.48
431
+ nf_regnet_b3,288,1024.0,2563.17,399.494,1.67,11.84,18.59
432
+ ecaresnet50d,224,1024.0,2560.76,399.87,4.35,11.93,25.58
433
+ levit_conv_512d,224,1024.0,2557.63,400.359,5.85,11.3,92.5
434
+ resnet152,160,1024.0,2531.48,404.495,5.9,11.51,60.19
435
+ efficientvit_b2,256,1024.0,2531.18,404.544,2.09,19.03,24.33
436
+ mobileone_s3,224,1024.0,2513.71,407.355,1.94,13.85,10.17
437
+ resnetrs50,224,1024.0,2512.05,407.624,4.48,12.14,35.69
438
+ twins_pcpvt_small,224,1024.0,2506.77,408.482,3.68,15.51,24.11
439
+ resnetblur50,224,1024.0,2495.43,410.338,5.16,12.02,25.56
440
+ poolformerv2_s12,224,1024.0,2489.38,411.337,1.83,5.53,11.89
441
+ convnextv2_nano,224,1024.0,2480.83,412.755,2.46,8.37,15.62
442
+ regnetx_040,224,1024.0,2478.03,413.222,3.99,12.2,22.12
443
+ eca_nfnet_l0,224,1024.0,2476.91,413.407,4.35,10.47,24.14
444
+ gcresnext50ts,256,1024.0,2473.39,413.995,3.75,15.46,15.67
445
+ nfnet_l0,224,1024.0,2472.84,414.088,4.36,10.47,35.07
446
+ tiny_vit_21m_224,224,1024.0,2468.7,414.781,4.08,15.96,33.22
447
+ cs3sedarknet_l,288,1024.0,2463.79,415.609,6.16,10.83,21.91
448
+ resnet50s,224,1024.0,2456.52,416.838,5.47,13.52,25.68
449
+ dla60x,224,1024.0,2437.95,420.012,3.54,13.8,17.35
450
+ densenetblur121d,224,1024.0,2433.6,420.765,3.11,7.9,8.0
451
+ edgenext_small,320,1024.0,2424.08,422.414,1.97,14.16,5.59
452
+ resnext50_32x4d,224,1024.0,2410.12,424.862,4.26,14.4,25.03
453
+ inception_next_tiny,224,1024.0,2404.04,425.937,4.19,11.98,28.06
454
+ convnext_nano_ols,288,1024.0,2397.01,427.188,4.38,15.5,15.65
455
+ vit_relpos_medium_patch16_224,224,1024.0,2394.54,427.629,7.5,12.13,38.75
456
+ efficientnet_lite3,300,512.0,2392.78,213.967,1.65,21.85,8.2
457
+ vit_srelpos_medium_patch16_224,224,1024.0,2386.54,429.062,7.49,11.32,38.74
458
+ regnetz_c16,256,1024.0,2383.36,429.635,2.51,16.57,13.46
459
+ resnetblur50d,224,1024.0,2382.64,429.765,5.4,12.82,25.58
460
+ vit_base_r26_s32_224,224,1024.0,2381.88,429.901,6.76,11.54,101.38
461
+ gcresnet50t,256,1024.0,2372.96,431.518,5.42,14.67,25.9
462
+ regnety_040_sgn,224,1024.0,2371.57,431.77,4.03,12.29,20.65
463
+ res2net50_26w_4s,224,1024.0,2359.62,433.957,4.28,12.61,25.7
464
+ vovnet57a,224,1024.0,2357.12,434.416,8.95,7.52,36.64
465
+ resmlp_24_224,224,1024.0,2350.19,435.697,5.96,10.91,30.02
466
+ maxvit_pico_rw_256,256,768.0,2346.84,327.238,1.68,18.77,7.46
467
+ inception_v3,299,1024.0,2346.46,436.391,5.73,8.97,23.83
468
+ maxvit_rmlp_pico_rw_256,256,768.0,2343.0,327.774,1.69,21.32,7.52
469
+ seresnetaa50d,224,1024.0,2333.21,438.87,5.4,12.46,28.11
470
+ focalnet_tiny_srf,224,1024.0,2331.81,439.132,4.42,16.32,28.43
471
+ cspresnext50,256,1024.0,2330.62,439.358,4.05,15.86,20.57
472
+ res2net50_14w_8s,224,1024.0,2327.89,439.871,4.21,13.28,25.06
473
+ dla60_res2net,224,1024.0,2327.26,439.99,4.15,12.34,20.85
474
+ coatnet_0_rw_224,224,1024.0,2319.62,441.438,4.23,15.1,27.44
475
+ regnetz_b16,288,1024.0,2318.51,441.651,2.39,16.43,9.72
476
+ gmixer_24_224,224,1024.0,2315.73,442.182,5.28,14.45,24.72
477
+ resnext50d_32x4d,224,1024.0,2305.65,444.116,4.5,15.2,25.05
478
+ lambda_resnet26rpt_256,256,768.0,2282.36,336.484,3.16,11.87,10.99
479
+ ese_vovnet57b,224,1024.0,2279.9,449.132,8.95,7.52,38.61
480
+ resnest50d_1s4x24d,224,1024.0,2278.75,449.357,4.43,13.57,25.68
481
+ dla60_res2next,224,1024.0,2268.77,451.333,3.49,13.17,17.03
482
+ sehalonet33ts,256,1024.0,2262.52,452.582,3.55,14.7,13.69
483
+ res2net50d,224,1024.0,2256.17,453.855,4.52,13.41,25.72
484
+ vit_medium_patch16_gap_240,240,1024.0,2253.27,454.439,8.6,12.57,44.4
485
+ res2next50,224,1024.0,2251.4,454.817,4.2,13.71,24.67
486
+ resnet32ts,288,1024.0,2244.87,456.139,5.86,14.65,17.96
487
+ edgenext_base,256,1024.0,2239.63,457.204,3.85,15.58,18.51
488
+ efficientvit_l1,224,1024.0,2235.54,458.043,5.27,15.85,52.65
489
+ skresnet50,224,1024.0,2226.66,459.87,4.11,12.5,25.8
490
+ nfnet_f0,192,1024.0,2226.44,459.916,7.21,10.16,71.49
491
+ tf_efficientnetv2_b3,300,1024.0,2226.35,459.935,3.04,15.74,14.36
492
+ efficientnetv2_rw_t,288,1024.0,2225.5,460.11,3.19,16.42,13.65
493
+ nf_ecaresnet50,224,1024.0,2219.3,461.395,4.21,11.13,25.56
494
+ darknetaa53,256,1024.0,2219.0,461.459,7.97,12.39,36.02
495
+ densenet169,224,1024.0,2218.3,461.604,3.4,7.3,14.15
496
+ nf_seresnet50,224,1024.0,2217.49,461.772,4.21,11.13,28.09
497
+ edgenext_small_rw,320,1024.0,2214.15,462.468,2.46,14.85,7.83
498
+ resnet33ts,288,1024.0,2214.09,462.482,6.02,14.75,19.68
499
+ xcit_small_12_p16_224,224,1024.0,2207.67,463.826,4.82,12.57,26.25
500
+ focalnet_tiny_lrf,224,1024.0,2205.41,464.301,4.49,17.76,28.65
501
+ resnet51q,256,1024.0,2195.84,466.325,6.38,16.55,35.7
502
+ repvgg_b1g4,224,1024.0,2195.75,466.344,8.15,10.64,39.97
503
+ seresnext50_32x4d,224,1024.0,2188.04,467.986,4.26,14.42,27.56
504
+ vit_relpos_medium_patch16_rpn_224,224,1024.0,2187.29,468.147,7.5,12.13,38.73
505
+ cs3darknet_focus_x,256,1024.0,2185.7,468.489,8.03,10.69,35.02
506
+ legacy_seresnext50_32x4d,224,1024.0,2184.4,468.766,4.26,14.42,27.56
507
+ tf_efficientnet_lite3,300,512.0,2178.27,235.039,1.65,21.85,8.2
508
+ resnet26t,320,1024.0,2173.03,471.22,5.24,16.44,16.01
509
+ gc_efficientnetv2_rw_t,288,1024.0,2170.84,471.696,3.2,16.45,13.68
510
+ gmlp_s16_224,224,1024.0,2161.42,473.752,4.42,15.1,19.42
511
+ seresnet33ts,288,1024.0,2156.33,474.868,6.02,14.76,19.78
512
+ eca_resnet33ts,288,1024.0,2152.27,475.765,6.02,14.76,19.68
513
+ fastvit_t12,256,1024.0,2151.9,475.846,1.42,12.42,7.55
514
+ nf_regnet_b3,320,1024.0,2148.66,476.564,2.05,14.61,18.59
515
+ eva02_small_patch14_224,224,1024.0,2144.78,477.426,5.53,12.34,21.62
516
+ resnet152,176,1024.0,2139.0,478.716,7.22,13.99,60.19
517
+ vit_medium_patch16_reg4_gap_256,256,1024.0,2137.51,479.051,9.93,14.51,38.87
518
+ gcresnet33ts,288,1024.0,2134.49,479.728,6.02,14.78,19.88
519
+ skresnet50d,224,1024.0,2133.34,479.986,4.36,13.31,25.82
520
+ ecaresnet101d_pruned,288,1024.0,2128.45,481.09,5.75,12.71,24.88
521
+ fbnetv3_g,288,1024.0,2127.74,481.25,1.77,21.09,16.62
522
+ vit_medium_patch16_reg4_256,256,1024.0,2119.83,483.047,9.97,14.56,38.87
523
+ eva02_tiny_patch14_336,336,1024.0,2106.54,486.094,3.14,13.85,5.76
524
+ convnextv2_pico,288,1024.0,2101.04,487.367,2.27,10.08,9.07
525
+ nf_resnet50,256,1024.0,2100.31,487.536,5.46,14.52,25.56
526
+ resnetrs101,192,1024.0,2100.21,487.558,6.04,12.7,63.62
527
+ poolformer_s24,224,1024.0,2099.97,487.615,3.41,10.68,21.39
528
+ pvt_v2_b2,224,1024.0,2099.92,487.626,3.9,24.96,25.36
529
+ efficientnet_b3,288,512.0,2089.91,244.977,1.63,21.49,12.23
530
+ cs3sedarknet_xdw,256,1024.0,2078.01,492.768,5.97,17.18,21.6
531
+ darknet53,256,1024.0,2077.03,493.0,9.31,12.39,41.61
532
+ ecaresnet50t,256,1024.0,2076.41,493.149,5.64,15.45,25.57
533
+ cs3darknet_x,256,1024.0,2060.02,497.071,8.38,11.35,35.05
534
+ xcit_nano_12_p8_224,224,1024.0,2059.06,497.302,2.16,15.71,3.05
535
+ mobilevitv2_150,256,512.0,2058.61,248.702,4.09,24.11,10.59
536
+ rexnetr_300,224,1024.0,2042.01,501.455,3.39,22.16,34.81
537
+ lambda_resnet50ts,256,1024.0,2041.61,501.552,5.07,17.48,21.54
538
+ fastvit_s12,256,1024.0,2028.81,504.718,1.82,13.67,9.47
539
+ coatnet_rmlp_0_rw_224,224,1024.0,2024.25,505.855,4.52,21.26,27.45
540
+ gcvit_xtiny,224,1024.0,2023.42,506.063,2.93,20.26,19.98
541
+ fastvit_sa12,256,1024.0,2022.28,506.347,1.96,13.83,11.58
542
+ crossvit_18_240,240,1024.0,2014.44,508.318,8.21,16.14,43.27
543
+ vit_medium_patch16_gap_256,256,1024.0,1996.45,512.899,9.78,14.29,38.86
544
+ resnet61q,256,1024.0,1996.22,512.958,7.8,17.01,36.85
545
+ coatnet_bn_0_rw_224,224,1024.0,1985.64,515.69,4.48,18.41,27.44
546
+ vit_base_patch32_384,384,1024.0,1984.44,516.005,12.67,12.14,88.3
547
+ vit_base_patch32_clip_384,384,1024.0,1981.44,516.784,12.67,12.14,88.3
548
+ cspdarknet53,256,1024.0,1981.04,516.888,6.57,16.81,27.64
549
+ sebotnet33ts_256,256,512.0,1977.98,258.841,3.89,17.46,13.7
550
+ ecaresnet26t,320,1024.0,1973.79,518.786,5.24,16.44,16.01
551
+ vit_base_resnet50d_224,224,1024.0,1971.35,519.428,8.68,16.1,110.97
552
+ cs3sedarknet_x,256,1024.0,1962.3,521.825,8.38,11.35,35.4
553
+ regnetx_080,224,1024.0,1962.04,521.894,8.02,14.06,39.57
554
+ seresnext26t_32x4d,288,1024.0,1950.77,524.91,4.46,16.68,16.81
555
+ mixnet_xl,224,1024.0,1948.29,525.576,0.93,14.57,11.9
556
+ resnest50d,224,1024.0,1945.36,526.368,5.4,14.36,27.48
557
+ seresnext26d_32x4d,288,1024.0,1940.04,527.813,4.51,16.85,16.81
558
+ coatnet_0_224,224,512.0,1939.29,264.004,4.43,21.14,25.04
559
+ swin_tiny_patch4_window7_224,224,1024.0,1938.74,528.165,4.51,17.06,28.29
560
+ resnetv2_101,224,1024.0,1935.15,529.146,7.83,16.23,44.54
561
+ regnetx_064,224,1024.0,1933.12,529.703,6.49,16.37,26.21
562
+ dla102,224,1024.0,1924.77,531.998,7.19,14.18,33.27
563
+ crossvit_18_dagger_240,240,1024.0,1921.19,532.991,8.65,16.91,44.27
564
+ rexnetr_200,288,512.0,1914.7,267.396,2.62,24.96,16.52
565
+ rexnet_300,224,1024.0,1911.46,535.706,3.44,22.4,34.71
566
+ nest_tiny,224,1024.0,1908.27,536.601,5.24,14.75,17.06
567
+ dm_nfnet_f0,192,1024.0,1907.3,536.873,7.21,10.16,71.49
568
+ ecaresnetlight,288,1024.0,1897.75,539.574,6.79,13.91,30.16
569
+ maxxvit_rmlp_nano_rw_256,256,768.0,1897.05,404.83,4.17,21.53,16.78
570
+ resnet101,224,1024.0,1885.15,543.183,7.83,16.23,44.55
571
+ nest_tiny_jx,224,1024.0,1884.26,543.437,5.24,14.75,17.06
572
+ pvt_v2_b2_li,224,1024.0,1882.78,543.863,3.77,25.04,22.55
573
+ vit_large_patch32_224,224,1024.0,1869.82,547.632,15.27,11.11,305.51
574
+ vgg13,224,1024.0,1868.34,548.068,11.31,12.25,133.05
575
+ resnetv2_101d,224,1024.0,1865.75,548.827,8.07,17.04,44.56
576
+ efficientformer_l3,224,1024.0,1865.63,548.865,3.93,12.01,31.41
577
+ resnetv2_50,288,1024.0,1863.99,549.347,6.79,18.37,25.55
578
+ mobileone_s4,224,1024.0,1856.33,551.615,3.04,17.74,14.95
579
+ res2net50_26w_6s,224,1024.0,1853.01,552.603,6.33,15.28,37.05
580
+ efficientvit_b2,288,1024.0,1851.14,553.16,2.64,24.03,24.33
581
+ lamhalobotnet50ts_256,256,1024.0,1841.89,555.938,5.02,18.44,22.57
582
+ maxvit_nano_rw_256,256,768.0,1833.65,418.827,4.26,25.76,15.45
583
+ maxvit_rmlp_nano_rw_256,256,768.0,1832.13,419.175,4.28,27.4,15.5
584
+ convnext_small,224,1024.0,1829.72,559.636,8.71,21.56,50.22
585
+ resnet101c,224,1024.0,1824.57,561.217,8.08,17.04,44.57
586
+ convnext_tiny,288,1024.0,1817.02,563.549,7.39,22.21,28.59
587
+ resnet101d,224,1024.0,1816.61,563.677,8.08,17.04,44.57
588
+ gcresnext50ts,288,1024.0,1802.21,568.181,4.75,19.57,15.67
589
+ efficientnetv2_s,288,1024.0,1800.9,568.595,4.75,20.13,21.46
590
+ pit_b_distilled_224,224,1024.0,1798.47,569.363,10.63,16.67,74.79
591
+ resnet50,288,1024.0,1790.94,571.757,6.8,18.37,25.56
592
+ twins_pcpvt_base,224,1024.0,1774.55,577.037,6.46,21.35,43.83
593
+ halonet50ts,256,1024.0,1772.89,577.576,5.3,19.2,22.73
594
+ dpn68b,288,1024.0,1770.85,578.24,3.89,17.3,12.61
595
+ pit_b_224,224,1024.0,1769.93,578.542,10.56,16.6,73.76
596
+ hrnet_w18_ssld,224,1024.0,1769.77,578.594,4.32,16.31,21.3
597
+ swin_s3_tiny_224,224,1024.0,1768.18,579.114,4.64,19.13,28.33
598
+ efficientvit_l2,224,1024.0,1765.89,579.866,6.97,19.58,63.71
599
+ hrnet_w18,224,1024.0,1763.75,580.57,4.32,16.31,21.3
600
+ coat_lite_small,224,1024.0,1746.27,586.38,3.96,22.09,19.84
601
+ repvgg_b1,224,1024.0,1745.5,586.64,13.16,10.64,57.42
602
+ wide_resnet50_2,224,1024.0,1744.59,586.947,11.43,14.4,68.88
603
+ efficientnet_b3,320,512.0,1740.17,294.213,2.01,26.52,12.23
604
+ gcresnet50t,288,1024.0,1734.6,590.328,6.86,18.57,25.9
605
+ densenet201,224,1024.0,1731.46,591.397,4.34,7.85,20.01
606
+ tresnet_v2_l,224,1024.0,1730.52,591.717,8.85,16.34,46.17
607
+ tf_efficientnet_b3,300,512.0,1724.68,296.856,1.87,23.83,12.23
608
+ efficientnetv2_rw_s,288,1024.0,1722.48,594.481,4.91,21.41,23.94
609
+ darknetaa53,288,1024.0,1719.51,595.509,10.08,15.68,36.02
610
+ maxxvitv2_nano_rw_256,256,768.0,1706.28,450.091,6.12,19.66,23.7
611
+ resnetaa101d,224,1024.0,1701.55,601.792,9.12,17.56,44.57
612
+ xcit_tiny_12_p16_384,384,1024.0,1700.55,602.144,3.64,18.25,6.72
613
+ cait_xxs24_224,224,1024.0,1698.66,602.815,2.53,20.29,11.96
614
+ resnet50t,288,1024.0,1694.77,604.2,7.14,19.53,25.57
615
+ legacy_seresnet101,224,1024.0,1693.62,604.611,7.61,15.74,49.33
616
+ cs3edgenet_x,256,1024.0,1692.79,604.907,11.53,12.92,47.82
617
+ resnet50d,288,1024.0,1684.01,608.061,7.19,19.7,25.58
618
+ mobilevitv2_175,256,512.0,1675.38,305.592,5.54,28.13,14.25
619
+ regnetv_064,224,1024.0,1674.09,611.663,6.39,16.41,30.58
620
+ resnetv2_101x1_bit,224,1024.0,1672.61,612.204,8.04,16.23,44.54
621
+ efficientnet_b3_gn,288,512.0,1669.75,306.623,1.74,23.35,11.73
622
+ ese_vovnet39b,288,768.0,1667.87,460.459,11.71,11.13,24.57
623
+ regnety_032,288,1024.0,1666.89,614.307,5.29,18.61,19.44
624
+ seresnet101,224,1024.0,1666.33,614.509,7.84,16.27,49.33
625
+ regnety_064,224,1024.0,1666.11,614.593,6.39,16.41,30.58
626
+ convnext_tiny_hnf,288,1024.0,1663.94,615.393,7.39,22.21,28.59
627
+ regnetv_040,288,1024.0,1658.56,617.391,6.6,20.3,20.64
628
+ regnety_040,288,1024.0,1648.75,621.064,6.61,20.3,20.65
629
+ regnety_080,224,1024.0,1645.74,622.202,8.0,17.97,39.18
630
+ resnet101s,224,1024.0,1640.53,624.176,9.19,18.64,44.67
631
+ mixer_b16_224,224,1024.0,1627.76,629.075,12.62,14.53,59.88
632
+ dla102x,224,1024.0,1623.56,630.698,5.89,19.42,26.31
633
+ nf_resnet101,224,1024.0,1622.48,631.12,8.01,16.23,44.55
634
+ swinv2_cr_tiny_224,224,1024.0,1621.28,631.59,4.66,28.45,28.33
635
+ ecaresnet101d,224,1024.0,1619.0,632.477,8.08,17.07,44.57
636
+ convnextv2_tiny,224,1024.0,1618.49,632.676,4.47,13.44,28.64
637
+ darknet53,288,1024.0,1615.64,633.795,11.78,15.68,41.61
638
+ wide_resnet101_2,176,1024.0,1615.25,633.945,14.31,13.18,126.89
639
+ repvit_m2_3,224,1024.0,1614.73,634.149,4.57,26.21,23.69
640
+ resnetaa50,288,1024.0,1610.23,635.923,8.52,19.24,25.56
641
+ resnetblur101d,224,1024.0,1609.76,636.109,9.12,17.94,44.57
642
+ efficientvit_b3,224,1024.0,1609.54,636.196,3.99,26.9,48.65
643
+ regnetz_d32,256,1024.0,1603.03,638.779,5.98,23.74,27.58
644
+ regnetz_b16_evos,224,1024.0,1602.47,639.001,1.43,9.95,9.74
645
+ ese_vovnet39b_evos,224,1024.0,1599.88,640.036,7.07,6.74,24.58
646
+ davit_small,224,1024.0,1599.81,640.066,8.69,27.54,49.75
647
+ seresnet50,288,1024.0,1595.89,641.637,6.8,18.39,28.09
648
+ cs3se_edgenet_x,256,1024.0,1593.53,642.587,11.53,12.94,50.72
649
+ nf_regnet_b4,320,1024.0,1592.57,642.975,3.29,19.88,30.21
650
+ swinv2_cr_tiny_ns_224,224,1024.0,1590.7,643.731,4.66,28.45,28.33
651
+ sequencer2d_s,224,1024.0,1586.65,645.372,4.96,11.31,27.65
652
+ tf_efficientnetv2_s,300,1024.0,1583.75,646.555,5.35,22.73,21.46
653
+ densenet121,288,1024.0,1581.16,647.615,4.74,11.41,7.98
654
+ resnet51q,288,1024.0,1581.05,647.659,8.07,20.94,35.7
655
+ regnetz_d8,256,1024.0,1580.57,647.855,3.97,23.74,23.37
656
+ resmlp_36_224,224,1024.0,1577.5,649.116,8.91,16.33,44.69
657
+ mixer_l32_224,224,1024.0,1577.26,649.215,11.27,19.86,206.94
658
+ regnetz_040,256,1024.0,1574.58,650.32,4.06,24.19,27.12
659
+ vit_base_patch16_224_miil,224,1024.0,1574.06,650.535,16.88,16.5,94.4
660
+ botnet50ts_256,256,512.0,1573.5,325.38,5.54,22.23,22.74
661
+ resnet50_gn,288,1024.0,1570.23,652.122,6.85,18.37,25.56
662
+ vit_base_patch16_clip_224,224,1024.0,1569.93,652.248,16.87,16.49,86.57
663
+ cs3darknet_x,288,1024.0,1569.68,652.352,10.6,14.36,35.05
664
+ deit_base_distilled_patch16_224,224,1024.0,1568.26,652.942,16.95,16.58,87.34
665
+ vit_base_patch16_224,224,1024.0,1568.03,653.038,16.87,16.49,86.57
666
+ deit_base_patch16_224,224,1024.0,1567.8,653.131,16.87,16.49,86.57
667
+ regnetz_040_h,256,1024.0,1564.2,654.638,4.12,24.29,28.94
668
+ resnetv2_50d_gn,288,1024.0,1555.81,658.164,7.24,19.7,25.57
669
+ resnetv2_50d_frn,224,1024.0,1553.07,659.326,4.33,11.92,25.59
670
+ tresnet_l,224,1024.0,1528.92,669.739,10.9,11.9,55.99
671
+ regnety_080_tv,224,1024.0,1528.54,669.91,8.51,19.73,39.38
672
+ resnetaa50d,288,1024.0,1524.48,671.692,8.92,20.57,25.58
673
+ nf_resnet50,288,1024.0,1524.41,671.724,6.88,18.37,25.56
674
+ caformer_s18,224,1024.0,1522.76,672.449,3.9,15.18,26.34
675
+ resnext101_32x8d,176,1024.0,1521.82,672.868,10.33,19.37,88.79
676
+ seresnet50t,288,1024.0,1518.59,674.299,7.14,19.55,28.1
677
+ ecaresnet50t,288,1024.0,1518.21,674.465,7.14,19.55,25.57
678
+ mvitv2_tiny,224,1024.0,1518.01,674.556,4.7,21.16,24.17
679
+ resnet101d,256,1024.0,1517.18,674.926,10.55,22.25,44.57
680
+ pvt_v2_b3,224,1024.0,1516.27,675.326,6.71,33.8,45.24
681
+ maxvit_tiny_rw_224,224,768.0,1513.7,507.357,4.93,28.54,29.06
682
+ ecaresnet50d,288,1024.0,1510.36,677.975,7.19,19.72,25.58
683
+ convnextv2_nano,288,768.0,1503.98,510.637,4.06,13.84,15.62
684
+ halo2botnet50ts_256,256,1024.0,1499.3,682.975,5.02,21.78,22.64
685
+ cs3sedarknet_x,288,1024.0,1498.9,683.158,10.6,14.37,35.4
686
+ res2net50_26w_8s,224,1024.0,1498.8,683.201,8.37,17.95,48.4
687
+ resnext101_32x4d,224,1024.0,1496.35,684.32,8.01,21.23,44.18
688
+ deit3_base_patch16_224,224,1024.0,1488.08,688.122,16.87,16.49,86.59
689
+ regnetz_c16,320,1024.0,1478.43,692.615,3.92,25.88,13.46
690
+ resnest50d_4s2x40d,224,1024.0,1478.06,692.785,4.4,17.94,30.42
691
+ resnetblur50,288,1024.0,1477.0,693.285,8.52,19.87,25.56
692
+ skresnext50_32x4d,224,1024.0,1470.18,696.502,4.5,17.18,27.48
693
+ efficientvit_l2,256,1024.0,1466.16,698.41,9.09,25.49,63.71
694
+ eca_nfnet_l0,288,1024.0,1463.28,699.787,7.12,17.29,24.14
695
+ mobilevitv2_200,256,768.0,1462.66,525.062,7.22,32.15,18.45
696
+ nfnet_l0,288,1024.0,1461.21,700.775,7.13,17.29,35.07
697
+ resnet61q,288,1024.0,1460.17,701.277,9.87,21.52,36.85
698
+ vit_base_patch32_clip_448,448,1024.0,1456.81,702.892,17.21,16.49,88.34
699
+ vit_small_patch16_36x1_224,224,1024.0,1454.45,704.036,12.63,24.59,64.67
700
+ vit_small_resnet50d_s16_224,224,1024.0,1451.55,705.439,13.0,21.12,57.53
701
+ beit_base_patch16_224,224,1024.0,1443.54,709.354,16.87,16.49,86.53
702
+ res2net101_26w_4s,224,1024.0,1442.54,709.848,8.1,18.45,45.21
703
+ vit_base_patch16_siglip_224,224,1024.0,1439.5,711.343,17.02,16.71,92.88
704
+ vit_base_patch16_gap_224,224,1024.0,1436.45,712.857,16.78,16.41,86.57
705
+ regnety_040_sgn,288,1024.0,1436.16,712.999,6.67,20.3,20.65
706
+ beitv2_base_patch16_224,224,1024.0,1436.01,713.075,16.87,16.49,86.53
707
+ convit_small,224,1024.0,1431.38,715.383,5.76,17.87,27.78
708
+ edgenext_base,320,1024.0,1423.6,719.289,6.01,24.32,18.51
709
+ convformer_s18,224,1024.0,1421.81,720.197,3.96,15.82,26.77
710
+ focalnet_small_srf,224,1024.0,1419.82,721.204,8.62,26.26,49.89
711
+ densenetblur121d,288,1024.0,1416.47,722.914,5.14,13.06,8.0
712
+ poolformer_s36,224,1024.0,1415.39,723.463,5.0,15.82,30.86
713
+ resnetv2_50d_evos,224,1024.0,1415.09,723.614,4.33,11.92,25.59
714
+ coatnet_rmlp_1_rw_224,224,1024.0,1413.05,724.664,7.44,28.08,41.69
715
+ res2net101d,224,1024.0,1406.68,727.943,8.35,19.25,45.23
716
+ legacy_xception,299,1024.0,1405.99,728.302,8.4,35.83,22.86
717
+ vit_small_patch16_18x2_224,224,1024.0,1405.24,728.689,12.63,24.59,64.67
718
+ resnetblur50d,288,1024.0,1403.3,729.695,8.92,21.19,25.58
719
+ resnext50_32x4d,288,1024.0,1402.5,730.115,7.04,23.81,25.03
720
+ inception_next_small,224,1024.0,1397.1,732.931,8.36,19.27,49.37
721
+ repvgg_b2g4,224,1024.0,1392.83,735.183,12.63,12.9,61.76
722
+ gcvit_tiny,224,1024.0,1390.57,736.376,4.79,29.82,28.22
723
+ vit_relpos_base_patch16_clsgap_224,224,1024.0,1386.7,738.433,16.88,17.72,86.43
724
+ vit_base_patch16_clip_quickgelu_224,224,1024.0,1384.47,739.621,16.87,16.49,86.19
725
+ vit_relpos_base_patch16_cls_224,224,1024.0,1384.18,739.775,16.88,17.72,86.43
726
+ dpn92,224,1024.0,1380.04,741.995,6.54,18.21,37.67
727
+ seresnetaa50d,288,1024.0,1379.8,742.125,8.92,20.59,28.11
728
+ vit_small_patch16_384,384,1024.0,1379.23,742.429,12.45,24.15,22.2
729
+ nf_ecaresnet101,224,1024.0,1375.27,744.569,8.01,16.27,44.55
730
+ nf_seresnet101,224,1024.0,1370.83,746.983,8.02,16.27,49.33
731
+ efficientnet_b3_gn,320,384.0,1366.12,281.077,2.14,28.83,11.73
732
+ vgg16_bn,224,1024.0,1361.56,752.067,15.5,13.56,138.37
733
+ flexivit_base,240,1024.0,1360.19,752.822,19.35,18.92,86.59
734
+ efficientformerv2_s0,224,1024.0,1357.83,754.133,0.41,5.3,3.6
735
+ resnetv2_152,224,1024.0,1356.74,754.735,11.55,22.56,60.19
736
+ seresnext101_32x4d,224,1024.0,1356.08,755.105,8.02,21.26,48.96
737
+ legacy_seresnext101_32x4d,224,1024.0,1355.29,755.543,8.02,21.26,48.96
738
+ efficientnet_b3_g8_gn,288,768.0,1342.01,572.264,2.59,23.35,14.25
739
+ efficientvit_b3,256,768.0,1340.35,572.972,5.2,35.01,48.65
740
+ efficientnet_b4,320,512.0,1338.46,382.52,3.13,34.76,19.34
741
+ nfnet_f0,256,1024.0,1336.25,766.311,12.62,18.05,71.49
742
+ resnext50d_32x4d,288,1024.0,1335.71,766.62,7.44,25.13,25.05
743
+ focalnet_small_lrf,224,1024.0,1333.55,767.863,8.74,28.61,50.34
744
+ resnet152,224,1024.0,1331.42,769.094,11.56,22.56,60.19
745
+ ese_vovnet99b,224,1024.0,1328.91,770.544,16.51,11.27,63.2
746
+ resnetv2_152d,224,1024.0,1322.45,774.307,11.8,23.36,60.2
747
+ regnetx_120,224,1024.0,1317.68,777.11,12.13,21.37,46.11
748
+ hrnet_w32,224,1024.0,1308.75,782.414,8.97,22.02,41.23
749
+ xception41p,299,512.0,1308.08,391.403,9.25,39.86,26.91
750
+ vit_relpos_base_patch16_224,224,1024.0,1306.59,783.71,16.8,17.63,86.43
751
+ xcit_tiny_12_p8_224,224,1024.0,1306.3,783.883,4.81,23.6,6.71
752
+ coatnet_1_rw_224,224,1024.0,1303.02,785.857,7.63,27.22,41.72
753
+ resnet152c,224,1024.0,1301.97,786.489,11.8,23.36,60.21
754
+ coatnet_rmlp_1_rw2_224,224,1024.0,1300.63,787.299,7.71,32.74,41.72
755
+ twins_pcpvt_large,224,1024.0,1297.56,789.162,9.53,30.21,60.99
756
+ maxvit_tiny_tf_224,224,768.0,1297.26,592.007,5.42,31.21,30.92
757
+ resnet152d,224,1024.0,1296.94,789.538,11.8,23.36,60.21
758
+ cs3edgenet_x,288,1024.0,1296.8,789.626,14.59,16.36,47.82
759
+ vit_base_patch16_xp_224,224,1024.0,1295.7,790.295,16.85,16.49,86.51
760
+ poolformerv2_s24,224,1024.0,1287.82,795.129,3.42,10.68,21.34
761
+ dla169,224,1024.0,1280.41,799.732,11.6,20.2,53.39
762
+ efficientnet_el_pruned,300,1024.0,1280.32,799.789,8.0,30.7,10.59
763
+ efficientnet_el,300,1024.0,1279.02,800.603,8.0,30.7,10.59
764
+ seresnext50_32x4d,288,1024.0,1276.82,801.978,7.04,23.82,27.56
765
+ hrnet_w30,224,1024.0,1276.63,802.098,8.15,21.21,37.71
766
+ deit3_small_patch16_384,384,1024.0,1274.41,803.494,12.45,24.15,22.21
767
+ ecaresnet50t,320,1024.0,1274.01,803.751,8.82,24.13,25.57
768
+ maxxvit_rmlp_tiny_rw_256,256,768.0,1269.37,605.011,6.36,32.69,29.64
769
+ volo_d1_224,224,1024.0,1269.05,806.894,6.94,24.43,26.63
770
+ vgg19,224,1024.0,1264.63,809.714,19.63,14.86,143.67
771
+ convnext_base,224,1024.0,1259.04,813.306,15.38,28.75,88.59
772
+ rexnetr_300,288,512.0,1257.05,407.293,5.59,36.61,34.81
773
+ vit_base_patch16_rpn_224,224,1024.0,1255.24,815.771,16.78,16.41,86.54
774
+ densenet161,224,1024.0,1254.96,815.95,7.79,11.06,28.68
775
+ efficientformerv2_s1,224,1024.0,1251.09,818.477,0.67,7.66,6.19
776
+ regnety_120,224,1024.0,1250.69,818.739,12.14,21.38,51.82
777
+ twins_svt_base,224,1024.0,1249.89,819.258,8.36,20.42,56.07
778
+ tf_efficientnet_el,300,1024.0,1249.79,819.323,8.0,30.7,10.59
779
+ sequencer2d_m,224,1024.0,1238.3,826.927,6.55,14.26,38.31
780
+ nest_small,224,1024.0,1229.99,832.512,9.41,22.88,38.35
781
+ maxvit_tiny_rw_256,256,768.0,1229.06,624.855,6.44,37.27,29.07
782
+ maxvit_rmlp_tiny_rw_256,256,768.0,1228.3,625.245,6.47,39.84,29.15
783
+ repvgg_b2,224,1024.0,1219.54,839.651,20.45,12.9,89.02
784
+ nest_small_jx,224,1024.0,1219.36,839.775,9.41,22.88,38.35
785
+ mixnet_xxl,224,768.0,1211.88,633.716,2.04,23.43,23.96
786
+ resnet152s,224,1024.0,1205.05,849.747,12.92,24.96,60.32
787
+ swin_small_patch4_window7_224,224,1024.0,1202.25,851.724,8.77,27.47,49.61
788
+ inception_v4,299,1024.0,1191.21,859.617,12.28,15.09,42.68
789
+ swinv2_tiny_window8_256,256,1024.0,1191.2,859.622,5.96,24.57,28.35
790
+ legacy_seresnet152,224,1024.0,1187.19,862.527,11.33,22.08,66.82
791
+ coatnet_1_224,224,512.0,1184.08,432.392,8.28,31.3,42.23
792
+ xcit_small_24_p16_224,224,1024.0,1178.16,869.138,9.1,23.63,47.67
793
+ vit_relpos_base_patch16_rpn_224,224,1024.0,1177.44,869.665,16.8,17.63,86.41
794
+ eca_nfnet_l1,256,1024.0,1175.13,871.38,9.62,22.04,41.41
795
+ seresnet152,224,1024.0,1173.43,872.64,11.57,22.61,66.82
796
+ maxvit_tiny_pm_256,256,768.0,1169.83,656.496,6.31,40.82,30.09
797
+ crossvit_base_240,240,1024.0,1165.77,878.374,20.13,22.67,105.03
798
+ efficientnet_lite4,380,384.0,1155.38,332.349,4.04,45.66,13.01
799
+ xception41,299,512.0,1153.48,443.864,9.28,39.86,26.97
800
+ regnetx_160,224,1024.0,1153.37,887.82,15.99,25.52,54.28
801
+ vgg19_bn,224,1024.0,1151.34,889.391,19.66,14.86,143.68
802
+ cait_xxs36_224,224,1024.0,1139.1,898.942,3.77,30.34,17.3
803
+ tresnet_xl,224,1024.0,1138.98,899.04,15.2,15.34,78.44
804
+ tnt_s_patch16_224,224,1024.0,1134.46,902.62,5.24,24.37,23.76
805
+ davit_base,224,1024.0,1133.31,903.534,15.36,36.72,87.95
806
+ dm_nfnet_f0,256,1024.0,1132.28,904.361,12.62,18.05,71.49
807
+ resnetv2_101,288,1024.0,1131.44,905.029,12.94,26.83,44.54
808
+ mvitv2_small_cls,224,1024.0,1129.19,906.833,7.04,28.17,34.87
809
+ mvitv2_small,224,1024.0,1128.19,907.64,7.0,28.08,34.87
810
+ coat_tiny,224,1024.0,1126.07,909.345,4.35,27.2,5.5
811
+ convmixer_1024_20_ks9_p14,224,1024.0,1123.31,911.577,5.55,5.51,24.38
812
+ vit_base_patch16_reg8_gap_256,256,1024.0,1115.77,917.744,22.6,22.09,86.62
813
+ fastvit_sa24,256,1024.0,1114.43,918.841,3.79,23.92,21.55
814
+ repvgg_b3g4,224,1024.0,1113.37,919.717,17.89,15.1,83.83
815
+ convnext_small,288,1024.0,1110.94,921.731,14.39,35.65,50.22
816
+ vit_base_patch16_siglip_256,256,1024.0,1108.01,924.168,22.23,21.83,92.93
817
+ resnet101,288,1024.0,1104.31,927.267,12.95,26.83,44.55
818
+ dla102x2,224,1024.0,1104.21,927.342,9.34,29.91,41.28
819
+ pvt_v2_b4,224,1024.0,1101.67,929.481,9.83,48.14,62.56
820
+ vit_large_r50_s32_224,224,1024.0,1091.33,938.289,19.45,22.22,328.99
821
+ eva02_base_patch16_clip_224,224,1024.0,1090.31,939.167,16.9,18.91,86.26
822
+ vgg13_bn,224,1024.0,1090.15,939.306,11.33,12.25,133.05
823
+ resnet152d,256,1024.0,1089.57,939.806,15.41,30.51,60.21
824
+ nf_regnet_b4,384,1024.0,1089.51,939.86,4.7,28.61,30.21
825
+ efficientnet_b3_g8_gn,320,768.0,1085.43,707.541,3.2,28.83,14.25
826
+ vit_small_r26_s32_384,384,1024.0,1083.82,944.797,10.24,27.67,36.47
827
+ efficientvit_l2,288,1024.0,1083.69,944.906,11.51,32.19,63.71
828
+ efficientnetv2_s,384,1024.0,1081.44,946.869,8.44,35.77,21.46
829
+ tf_efficientnet_lite4,380,384.0,1073.72,357.628,4.04,45.66,13.01
830
+ pvt_v2_b5,224,1024.0,1068.28,958.536,11.39,44.23,81.96
831
+ hrnet_w18_ssld,288,1024.0,1066.01,960.575,7.14,26.96,21.3
832
+ tf_efficientnetv2_s,384,1024.0,1054.1,971.431,8.44,35.77,21.46
833
+ regnety_160,224,1024.0,1046.76,978.242,15.96,23.04,83.59
834
+ samvit_base_patch16_224,224,1024.0,1027.37,996.713,16.83,17.2,86.46
835
+ convnext_tiny,384,768.0,1026.31,748.299,13.14,39.48,28.59
836
+ wide_resnet50_2,288,1024.0,1025.91,998.129,18.89,23.81,68.88
837
+ efficientnetv2_rw_s,384,1024.0,1024.66,999.343,8.72,38.03,23.94
838
+ vgg16,224,1024.0,1020.44,1003.475,15.47,13.56,138.36
839
+ cs3se_edgenet_x,320,1024.0,1009.45,1014.397,18.01,20.21,50.72
840
+ vit_base_patch16_plus_240,240,1024.0,1002.7,1021.234,26.31,22.07,117.56
841
+ swinv2_cr_small_224,224,1024.0,1001.72,1022.232,9.07,50.27,49.7
842
+ dpn98,224,1024.0,998.61,1025.406,11.73,25.2,61.57
843
+ efficientvit_b3,288,768.0,996.43,770.744,6.58,44.2,48.65
844
+ resnetaa101d,288,1024.0,996.18,1027.911,15.07,29.03,44.57
845
+ wide_resnet101_2,224,1024.0,994.0,1030.164,22.8,21.23,126.89
846
+ regnetz_d32,320,1024.0,994.0,1030.165,9.33,37.08,27.58
847
+ swinv2_cr_small_ns_224,224,1024.0,991.13,1033.149,9.08,50.27,49.7
848
+ focalnet_base_srf,224,1024.0,990.91,1033.385,15.28,35.01,88.15
849
+ convnextv2_small,224,1024.0,989.67,1034.674,8.71,21.56,50.32
850
+ resnet200,224,1024.0,987.28,1037.18,15.07,32.19,64.67
851
+ convnextv2_tiny,288,768.0,983.87,780.578,7.39,22.21,28.64
852
+ seresnet101,288,1024.0,983.64,1041.016,12.95,26.87,49.33
853
+ vit_small_patch8_224,224,1024.0,981.8,1042.968,16.76,32.86,21.67
854
+ regnetz_d8,320,1024.0,980.9,1043.922,6.19,37.08,23.37
855
+ regnety_080,288,1024.0,977.86,1047.177,13.22,29.69,39.18
856
+ inception_next_base,224,1024.0,977.1,1047.988,14.85,25.69,86.67
857
+ vit_base_r50_s16_224,224,1024.0,974.47,1050.816,20.94,27.88,97.89
858
+ resnest101e,256,1024.0,968.0,1057.838,13.38,28.66,48.28
859
+ convnext_base,256,1024.0,965.93,1060.101,20.09,37.55,88.59
860
+ regnetz_c16_evos,256,768.0,965.5,795.429,2.48,16.57,13.49
861
+ regnetz_040,320,512.0,964.02,531.096,6.35,37.78,27.12
862
+ poolformer_m36,224,1024.0,963.9,1062.337,8.8,22.02,56.17
863
+ regnetz_b16_evos,288,768.0,961.28,798.923,2.36,16.43,9.74
864
+ inception_resnet_v2,299,1024.0,958.82,1067.962,13.18,25.06,55.84
865
+ regnetz_040_h,320,512.0,958.46,534.182,6.43,37.94,28.94
866
+ seresnet152d,256,1024.0,956.44,1070.629,15.42,30.56,66.84
867
+ ecaresnet101d,288,1024.0,951.62,1076.05,13.35,28.19,44.57
868
+ regnety_064,288,1024.0,949.24,1078.741,10.56,27.11,30.58
869
+ resnetrs152,256,1024.0,948.32,1079.798,15.59,30.83,86.62
870
+ resnext101_64x4d,224,1024.0,947.79,1080.397,15.52,31.21,83.46
871
+ regnetv_064,288,1024.0,947.23,1081.038,10.55,27.11,30.58
872
+ xception65p,299,512.0,944.43,542.118,13.91,52.48,39.82
873
+ resnetblur101d,288,1024.0,942.52,1086.438,15.07,29.65,44.57
874
+ resnetrs101,288,1024.0,941.79,1087.277,13.56,28.53,63.62
875
+ focalnet_base_lrf,224,1024.0,941.31,1087.831,15.43,38.13,88.75
876
+ resnext101_32x8d,224,1024.0,939.44,1090.002,16.48,31.21,88.79
877
+ repvgg_b3,224,1024.0,933.91,1096.448,29.16,15.1,123.09
878
+ hrnet_w40,224,1024.0,931.96,1098.75,12.75,25.29,57.56
879
+ nfnet_f1,224,1024.0,924.88,1107.159,17.87,22.94,132.63
880
+ eva02_small_patch14_336,336,1024.0,923.99,1108.223,12.41,27.7,22.13
881
+ resnet101d,320,1024.0,923.18,1109.193,16.48,34.77,44.57
882
+ xcit_tiny_24_p16_384,384,1024.0,910.96,1124.082,6.87,34.29,12.12
883
+ efficientnet_b4,384,384.0,908.88,422.486,4.51,50.04,19.34
884
+ cait_s24_224,224,1024.0,904.24,1132.424,9.35,40.58,46.92
885
+ mobilevitv2_150,384,256.0,899.17,284.697,9.2,54.25,10.59
886
+ maxvit_rmlp_small_rw_224,224,768.0,898.81,854.449,10.48,42.44,64.9
887
+ coat_mini,224,1024.0,894.78,1144.406,6.82,33.68,10.34
888
+ coat_lite_medium,224,1024.0,892.4,1147.459,9.81,40.06,44.57
889
+ efficientnetv2_m,320,1024.0,889.26,1151.505,11.01,39.97,54.14
890
+ seresnext101_64x4d,224,1024.0,888.73,1152.196,15.53,31.25,88.23
891
+ gmlp_b16_224,224,1024.0,884.5,1157.706,15.78,30.21,73.08
892
+ seresnext101_32x8d,224,1024.0,883.56,1158.934,16.48,31.25,93.57
893
+ swin_s3_small_224,224,768.0,879.87,872.841,9.43,37.84,49.74
894
+ vit_relpos_base_patch16_plus_240,240,1024.0,875.04,1170.215,26.21,23.41,117.38
895
+ efficientformer_l7,224,1024.0,873.11,1172.808,10.17,24.45,82.23
896
+ nest_base,224,1024.0,870.02,1176.974,16.71,30.51,67.72
897
+ poolformerv2_s36,224,1024.0,869.16,1178.141,5.01,15.82,30.79
898
+ maxvit_small_tf_224,224,512.0,868.0,589.85,11.39,46.31,68.93
899
+ seresnext101d_32x8d,224,1024.0,866.35,1181.949,16.72,32.05,93.59
900
+ nest_base_jx,224,1024.0,862.67,1187.001,16.71,30.51,67.72
901
+ levit_384_s8,224,512.0,854.68,599.045,9.98,35.86,39.12
902
+ regnetz_e8,256,1024.0,853.36,1199.952,9.91,40.94,57.7
903
+ swin_base_patch4_window7_224,224,1024.0,852.78,1200.762,15.47,36.63,87.77
904
+ coatnet_2_rw_224,224,512.0,852.23,600.767,14.55,39.37,73.87
905
+ tf_efficientnet_b4,380,384.0,851.5,450.956,4.49,49.49,19.34
906
+ gcvit_small,224,1024.0,841.82,1216.401,8.57,41.61,51.09
907
+ convnextv2_nano,384,512.0,841.68,608.3,7.22,24.61,15.62
908
+ resnetv2_50d_evos,288,1024.0,840.21,1218.735,7.15,19.7,25.59
909
+ levit_conv_384_s8,224,512.0,839.77,609.68,9.98,35.86,39.12
910
+ xception65,299,512.0,839.39,609.953,13.96,52.48,39.92
911
+ hrnet_w44,224,1024.0,835.38,1225.779,14.94,26.92,67.06
912
+ crossvit_15_dagger_408,408,1024.0,833.7,1228.252,16.07,37.0,28.5
913
+ tiny_vit_21m_384,384,512.0,827.46,618.747,11.94,46.84,21.23
914
+ twins_svt_large,224,1024.0,824.23,1242.353,14.84,27.23,99.27
915
+ seresnextaa101d_32x8d,224,1024.0,820.77,1247.602,17.25,34.16,93.59
916
+ xcit_medium_24_p16_224,224,1024.0,820.51,1247.988,16.13,31.71,84.4
917
+ eva02_base_patch14_224,224,1024.0,819.51,1249.51,22.0,24.67,85.76
918
+ coatnet_rmlp_2_rw_224,224,512.0,814.13,628.885,14.64,44.94,73.88
919
+ hrnet_w48_ssld,224,1024.0,812.33,1260.551,17.34,28.56,77.47
920
+ hrnet_w48,224,1024.0,811.26,1262.228,17.34,28.56,77.47
921
+ caformer_s36,224,1024.0,810.13,1263.986,7.55,29.29,39.3
922
+ tresnet_m,448,1024.0,809.9,1264.343,22.99,29.21,31.39
923
+ resnet200d,256,1024.0,803.17,1274.938,20.0,43.09,64.69
924
+ sequencer2d_l,224,1024.0,802.78,1275.557,9.74,22.12,54.3
925
+ maxxvit_rmlp_small_rw_256,256,768.0,801.57,958.106,14.21,47.76,66.01
926
+ swinv2_base_window12_192,192,1024.0,799.54,1280.724,11.9,39.72,109.28
927
+ dm_nfnet_f1,224,1024.0,798.67,1282.118,17.87,22.94,132.63
928
+ coatnet_2_224,224,512.0,796.89,642.486,15.94,42.41,74.68
929
+ vit_medium_patch16_gap_384,384,1024.0,795.07,1287.922,22.01,32.15,39.03
930
+ mvitv2_base_cls,224,1024.0,791.15,1294.298,10.23,40.65,65.44
931
+ mvitv2_base,224,1024.0,785.87,1303.007,10.16,40.5,51.47
932
+ efficientnetv2_rw_m,320,1024.0,785.27,1303.997,12.72,47.14,53.24
933
+ resnet152,288,1024.0,781.77,1309.827,19.11,37.28,60.19
934
+ swinv2_tiny_window16_256,256,512.0,775.64,660.087,6.68,39.02,28.35
935
+ fastvit_sa36,256,1024.0,768.44,1332.545,5.62,34.02,31.53
936
+ xcit_small_12_p16_384,384,1024.0,764.7,1339.074,14.14,36.5,26.25
937
+ convnext_base,288,1024.0,763.36,1341.427,25.43,47.53,88.59
938
+ convformer_s36,224,1024.0,754.92,1356.424,7.67,30.5,40.01
939
+ regnety_120,288,768.0,738.36,1040.13,20.06,35.34,51.82
940
+ swinv2_small_window8_256,256,1024.0,737.99,1387.548,11.58,40.14,49.73
941
+ dpn131,224,1024.0,732.6,1397.744,16.09,32.97,79.25
942
+ swinv2_cr_small_ns_256,256,1024.0,731.79,1399.291,12.07,76.21,49.7
943
+ mobilevitv2_175,384,256.0,731.75,349.838,12.47,63.29,14.25
944
+ convit_base,224,1024.0,730.43,1401.91,17.52,31.77,86.54
945
+ resnetv2_50x1_bit,448,512.0,729.61,701.734,16.62,44.46,25.55
946
+ poolformer_m48,224,1024.0,727.01,1408.491,11.59,29.17,73.47
947
+ maxvit_rmlp_small_rw_256,256,768.0,724.69,1059.745,13.69,55.48,64.9
948
+ tnt_b_patch16_224,224,1024.0,721.67,1418.912,14.09,39.01,65.41
949
+ eca_nfnet_l1,320,1024.0,720.22,1421.77,14.92,34.42,41.41
950
+ swinv2_cr_base_224,224,1024.0,716.89,1428.383,15.86,59.66,87.88
951
+ swin_s3_base_224,224,1024.0,715.81,1430.534,13.69,48.26,71.13
952
+ volo_d2_224,224,1024.0,711.4,1439.408,14.34,41.34,58.68
953
+ swinv2_cr_base_ns_224,224,1024.0,711.07,1440.068,15.86,59.66,87.88
954
+ convnextv2_base,224,768.0,708.71,1083.64,15.38,28.75,88.72
955
+ densenet264d,224,1024.0,697.85,1467.348,13.57,14.0,72.74
956
+ ecaresnet200d,256,1024.0,697.3,1468.506,20.0,43.15,64.69
957
+ seresnet200d,256,1024.0,696.92,1469.301,20.01,43.15,71.86
958
+ nf_regnet_b5,384,1024.0,694.76,1473.879,7.95,42.9,49.74
959
+ seresnet152,288,1024.0,693.47,1476.616,19.11,37.34,66.82
960
+ resnetrs200,256,1024.0,693.26,1477.057,20.18,43.42,93.21
961
+ coat_small,224,1024.0,689.68,1484.732,12.61,44.25,21.69
962
+ convnext_large,224,1024.0,686.69,1491.207,34.4,43.13,197.77
963
+ xcit_tiny_24_p8_224,224,1024.0,684.2,1496.615,9.21,45.38,12.11
964
+ efficientvit_l3,224,1024.0,667.4,1534.307,27.62,39.16,246.04
965
+ dpn107,224,1024.0,666.43,1536.527,18.38,33.46,86.92
966
+ resnet152d,320,1024.0,664.6,1540.768,24.08,47.67,60.21
967
+ senet154,224,1024.0,664.59,1540.791,20.77,38.69,115.09
968
+ legacy_senet154,224,1024.0,663.62,1543.045,20.77,38.69,115.09
969
+ efficientformerv2_s2,224,1024.0,658.11,1555.962,1.27,11.77,12.71
970
+ maxxvitv2_rmlp_base_rw_224,224,768.0,650.48,1180.654,23.88,54.39,116.09
971
+ xcit_nano_12_p8_384,384,1024.0,649.92,1575.56,6.34,46.06,3.05
972
+ xception71,299,512.0,649.47,788.325,18.09,69.92,42.34
973
+ vit_large_patch32_384,384,1024.0,643.51,1591.268,44.28,32.22,306.63
974
+ mobilevitv2_200,384,256.0,640.82,399.48,16.24,72.34,18.45
975
+ davit_large,224,1024.0,630.01,1625.361,34.37,55.08,196.81
976
+ hrnet_w64,224,1024.0,629.26,1627.299,28.97,35.09,128.06
977
+ convnext_small,384,768.0,628.81,1221.341,25.58,63.37,50.22
978
+ regnetz_d8_evos,256,1024.0,626.83,1633.604,4.5,24.92,23.46
979
+ regnety_160,288,768.0,626.54,1225.759,26.37,38.07,83.59
980
+ convnext_base,320,768.0,617.04,1244.641,31.39,58.68,88.59
981
+ fastvit_ma36,256,1024.0,615.75,1662.995,7.85,40.39,44.07
982
+ tf_efficientnetv2_m,384,1024.0,614.24,1667.09,15.85,57.52,54.14
983
+ gcvit_base,224,1024.0,612.92,1670.669,14.87,55.48,90.32
984
+ regnety_320,224,1024.0,612.34,1672.272,32.34,30.26,145.05
985
+ efficientvit_l2,384,768.0,610.03,1258.949,20.45,57.01,63.71
986
+ poolformerv2_m36,224,1024.0,609.2,1680.886,8.81,22.02,56.08
987
+ regnetz_c16_evos,320,512.0,608.23,841.78,3.86,25.88,13.49
988
+ resnetv2_50x3_bit,224,768.0,585.49,1311.719,37.06,33.34,217.32
989
+ seresnet152d,320,1024.0,585.32,1749.453,24.09,47.72,66.84
990
+ xcit_small_12_p8_224,224,1024.0,584.75,1751.159,18.69,47.19,26.21
991
+ resnet200,288,1024.0,584.49,1751.952,24.91,53.21,64.67
992
+ resnetrs152,320,1024.0,580.71,1763.336,24.34,48.14,86.62
993
+ caformer_m36,224,1024.0,580.7,1763.373,12.75,40.61,56.2
994
+ resnext101_64x4d,288,1024.0,579.65,1766.578,25.66,51.59,83.46
995
+ levit_conv_512_s8,224,256.0,579.33,441.879,21.82,52.28,74.05
996
+ crossvit_18_dagger_408,408,1024.0,578.67,1769.56,25.31,49.38,44.61
997
+ levit_512_s8,224,256.0,564.15,453.77,21.82,52.28,74.05
998
+ convnextv2_tiny,384,384.0,553.95,693.189,13.14,39.48,28.64
999
+ convformer_m36,224,1024.0,546.86,1872.507,12.89,42.05,57.05
1000
+ efficientnet_b5,416,256.0,546.68,468.268,8.27,80.68,30.39
1001
+ seresnet269d,256,1024.0,545.35,1877.679,26.59,53.6,113.67
1002
+ efficientvit_l3,256,768.0,542.99,1414.373,36.06,50.98,246.04
1003
+ seresnext101_32x8d,288,1024.0,537.9,1903.669,27.24,51.63,93.57
1004
+ efficientnetv2_m,416,1024.0,531.24,1927.549,18.6,67.5,54.14
1005
+ resnetrs270,256,1024.0,529.33,1934.515,27.06,55.84,129.86
1006
+ maxvit_rmlp_base_rw_224,224,768.0,529.1,1451.502,22.63,79.3,116.14
1007
+ swinv2_base_window8_256,256,1024.0,528.71,1936.775,20.37,52.59,87.92
1008
+ regnetz_e8,320,768.0,528.46,1453.264,15.46,63.94,57.7
1009
+ seresnext101d_32x8d,288,1024.0,527.36,1941.726,27.64,52.95,93.59
1010
+ convnext_large_mlp,256,768.0,525.72,1460.834,44.94,56.33,200.13
1011
+ nfnet_f2,256,1024.0,524.14,1953.657,33.76,41.85,193.78
1012
+ halonet_h1,256,256.0,522.84,489.621,3.0,51.17,8.1
1013
+ regnetx_320,224,1024.0,522.6,1959.408,31.81,36.3,107.81
1014
+ mixer_l16_224,224,1024.0,520.22,1968.376,44.6,41.69,208.2
1015
+ resnext101_32x16d,224,1024.0,519.8,1969.975,36.27,51.18,194.03
1016
+ eca_nfnet_l2,320,1024.0,509.51,2009.758,20.95,47.43,56.72
1017
+ ecaresnet200d,288,1024.0,503.74,2032.793,25.31,54.59,64.69
1018
+ seresnet200d,288,1024.0,503.36,2034.329,25.32,54.6,71.86
1019
+ caformer_s18,384,512.0,501.38,1021.162,11.45,44.61,26.34
1020
+ volo_d3_224,224,1024.0,497.87,2056.757,20.78,60.09,86.33
1021
+ resnet200d,320,1024.0,493.82,2073.621,31.25,67.33,64.69
1022
+ swin_large_patch4_window7_224,224,768.0,492.35,1559.852,34.53,54.94,196.53
1023
+ vit_base_patch16_18x2_224,224,1024.0,492.32,2079.918,50.37,49.17,256.73
1024
+ deit_base_patch16_384,384,1024.0,491.82,2082.046,49.4,48.3,86.86
1025
+ vit_base_patch16_clip_384,384,1024.0,491.74,2082.405,49.41,48.3,86.86
1026
+ vit_base_patch16_384,384,1024.0,491.42,2083.727,49.4,48.3,86.86
1027
+ deit_base_distilled_patch16_384,384,1024.0,491.32,2084.164,49.49,48.39,87.63
1028
+ hrnet_w48_ssld,288,1024.0,490.92,2085.876,28.66,47.21,77.47
1029
+ eva_large_patch14_196,196,1024.0,490.45,2087.863,59.66,43.77,304.14
1030
+ maxvit_base_tf_224,224,512.0,488.88,1047.285,23.52,81.67,119.47
1031
+ efficientnet_b5,448,256.0,488.83,523.691,9.59,93.56,30.39
1032
+ vit_large_patch16_224,224,1024.0,488.5,2096.219,59.7,43.77,304.33
1033
+ swinv2_small_window16_256,256,512.0,486.59,1052.215,12.82,66.29,49.73
1034
+ swinv2_large_window12_192,192,768.0,485.58,1581.6,26.17,56.53,228.77
1035
+ convformer_s18,384,512.0,484.08,1057.663,11.63,46.49,26.77
1036
+ seresnextaa101d_32x8d,288,1024.0,479.96,2133.497,28.51,56.44,93.59
1037
+ coatnet_3_rw_224,224,256.0,478.44,535.067,32.63,59.07,181.81
1038
+ coatnet_rmlp_3_rw_224,224,256.0,477.75,535.833,32.75,64.7,165.15
1039
+ xcit_large_24_p16_224,224,1024.0,472.07,2169.166,35.86,47.26,189.1
1040
+ vit_small_patch14_dinov2,518,1024.0,469.29,2181.987,29.46,57.34,22.06
1041
+ deit3_base_patch16_384,384,1024.0,466.88,2193.286,49.4,48.3,86.88
1042
+ deit3_large_patch16_224,224,1024.0,466.56,2194.777,59.7,43.77,304.37
1043
+ efficientnetv2_rw_m,416,768.0,466.5,1646.281,21.49,79.62,53.24
1044
+ nfnet_f1,320,1024.0,466.35,2195.774,35.97,46.77,132.63
1045
+ nf_regnet_b5,456,768.0,464.5,1653.385,11.7,61.95,49.74
1046
+ coatnet_3_224,224,256.0,464.1,551.594,35.72,63.61,166.97
1047
+ vit_small_patch14_reg4_dinov2,518,1024.0,460.4,2224.119,29.55,57.51,22.06
1048
+ poolformerv2_m48,224,1024.0,459.37,2229.113,11.59,29.17,73.35
1049
+ beitv2_large_patch16_224,224,1024.0,452.16,2264.697,59.7,43.77,304.43
1050
+ beit_large_patch16_224,224,1024.0,452.15,2264.716,59.7,43.77,304.43
1051
+ resnetv2_101x1_bit,448,512.0,451.35,1134.365,31.65,64.93,44.54
1052
+ dm_nfnet_f2,256,1024.0,451.22,2269.395,33.76,41.85,193.78
1053
+ vit_base_patch16_siglip_384,384,1024.0,448.34,2283.991,50.0,49.11,93.18
1054
+ resnetv2_152x2_bit,224,1024.0,441.5,2319.35,46.95,45.11,236.34
1055
+ convnext_xlarge,224,768.0,435.62,1762.988,60.98,57.5,350.2
1056
+ maxvit_tiny_tf_384,384,256.0,434.99,588.503,16.0,94.22,30.98
1057
+ efficientformerv2_l,224,1024.0,431.02,2375.769,2.59,18.54,26.32
1058
+ convnext_base,384,512.0,430.72,1188.698,45.21,84.49,88.59
1059
+ convnextv2_base,288,512.0,429.59,1191.832,25.43,47.53,88.72
1060
+ resnetrs200,320,1024.0,428.05,2392.217,31.51,67.81,93.21
1061
+ flexivit_large,240,1024.0,424.67,2411.279,68.48,50.22,304.36
1062
+ convnextv2_large,224,512.0,423.49,1208.977,34.4,43.13,197.96
1063
+ xcit_tiny_12_p8_384,384,1024.0,423.2,2419.661,14.12,69.12,6.71
1064
+ swinv2_cr_large_224,224,768.0,422.05,1819.675,35.1,78.42,196.68
1065
+ caformer_b36,224,768.0,419.19,1832.111,22.5,54.14,98.75
1066
+ swinv2_cr_tiny_384,384,256.0,419.04,610.909,15.34,161.01,28.33
1067
+ tf_efficientnet_b5,456,256.0,418.1,612.278,10.46,98.86,30.39
1068
+ convnext_large,288,512.0,415.42,1232.482,56.87,71.29,197.77
1069
+ davit_huge,224,512.0,410.45,1247.402,60.93,73.44,348.92
1070
+ maxxvitv2_rmlp_large_rw_224,224,768.0,409.41,1875.861,43.69,75.4,215.42
1071
+ tiny_vit_21m_512,512,384.0,408.26,940.575,21.23,83.26,21.27
1072
+ xcit_small_24_p16_384,384,1024.0,408.08,2509.308,26.72,68.57,47.67
1073
+ tf_efficientnetv2_m,480,768.0,405.02,1896.185,24.76,89.84,54.14
1074
+ tresnet_l,448,1024.0,403.56,2537.407,43.59,47.56,55.99
1075
+ beit_base_patch16_384,384,1024.0,401.76,2548.786,49.4,48.3,86.74
1076
+ convformer_b36,224,768.0,396.81,1935.431,22.69,56.06,99.88
1077
+ regnetz_d8_evos,320,768.0,395.82,1940.285,7.03,38.92,23.46
1078
+ seresnextaa101d_32x8d,320,1024.0,395.0,2592.386,35.19,69.67,93.59
1079
+ seresnet269d,288,1024.0,393.84,2600.059,33.65,67.81,113.67
1080
+ dm_nfnet_f1,320,1024.0,393.6,2601.642,35.97,46.77,132.63
1081
+ regnety_160,384,384.0,378.47,1014.589,46.87,67.67,83.59
1082
+ vit_large_r50_s32_384,384,1024.0,372.96,2745.589,56.4,64.88,329.09
1083
+ regnety_640,224,768.0,362.45,2118.906,64.16,42.5,281.38
1084
+ eca_nfnet_l2,384,768.0,361.66,2123.504,30.05,68.28,56.72
1085
+ vit_large_patch14_224,224,1024.0,359.79,2846.069,77.83,57.11,304.2
1086
+ vit_large_patch14_clip_224,224,1024.0,359.08,2851.744,77.83,57.11,304.2
1087
+ swinv2_base_window12to16_192to256,256,384.0,358.35,1071.569,22.02,84.71,87.92
1088
+ swinv2_base_window16_256,256,384.0,358.25,1071.869,22.02,84.71,87.92
1089
+ vit_large_patch16_siglip_256,256,1024.0,351.53,2912.942,78.12,57.42,315.96
1090
+ vit_base_patch8_224,224,1024.0,350.95,2917.813,66.87,65.71,86.58
1091
+ efficientvit_l3,320,512.0,346.1,1479.341,56.32,79.34,246.04
1092
+ efficientnetv2_l,384,1024.0,342.83,2986.92,36.1,101.16,118.52
1093
+ tf_efficientnetv2_l,384,1024.0,338.97,3020.897,36.1,101.16,118.52
1094
+ ecaresnet269d,320,1024.0,337.13,3037.39,41.53,83.69,102.09
1095
+ resnest200e,320,1024.0,336.33,3044.627,35.69,82.78,70.2
1096
+ maxvit_large_tf_224,224,384.0,336.26,1141.954,42.99,109.57,211.79
1097
+ convnext_large_mlp,320,512.0,336.03,1523.669,70.21,88.02,200.13
1098
+ inception_next_base,384,512.0,335.9,1524.27,43.64,75.48,86.67
1099
+ resnetv2_101x3_bit,224,768.0,334.56,2295.509,71.23,48.7,387.93
1100
+ eca_nfnet_l3,352,768.0,328.62,2337.043,32.57,73.12,72.04
1101
+ vit_large_patch14_clip_quickgelu_224,224,1024.0,324.15,3159.023,77.83,57.11,303.97
1102
+ repvgg_d2se,320,1024.0,320.2,3197.943,74.57,46.82,133.33
1103
+ vit_base_r50_s16_384,384,1024.0,317.01,3230.175,61.29,81.77,98.95
1104
+ volo_d4_224,224,1024.0,317.0,3230.22,44.34,80.22,192.96
1105
+ volo_d1_384,384,512.0,314.1,1630.023,22.75,108.55,26.78
1106
+ vit_large_patch14_xp_224,224,1024.0,309.84,3304.92,77.77,57.11,304.06
1107
+ convmixer_768_32,224,1024.0,308.6,3318.227,19.55,25.95,21.11
1108
+ xcit_small_24_p8_224,224,1024.0,305.72,3349.464,35.81,90.77,47.63
1109
+ resnetrs350,288,1024.0,304.48,3363.098,43.67,87.09,163.96
1110
+ nasnetalarge,331,384.0,300.79,1276.642,23.89,90.56,88.75
1111
+ coat_lite_medium_384,384,512.0,299.62,1708.831,28.73,116.7,44.57
1112
+ tresnet_xl,448,768.0,296.15,2593.304,60.77,61.31,78.44
1113
+ maxvit_small_tf_384,384,192.0,288.16,666.295,33.58,139.86,69.02
1114
+ pnasnet5large,331,384.0,287.26,1336.778,25.04,92.89,86.06
1115
+ xcit_medium_24_p16_384,384,1024.0,282.76,3621.451,47.39,91.63,84.4
1116
+ ecaresnet269d,352,1024.0,281.17,3641.867,50.25,101.25,102.09
1117
+ coatnet_4_224,224,256.0,280.04,914.128,60.81,98.85,275.43
1118
+ cait_xxs24_384,384,1024.0,277.04,3696.16,9.63,122.65,12.03
1119
+ coatnet_rmlp_2_rw_384,384,192.0,273.87,701.059,43.04,132.57,73.88
1120
+ resnetrs270,352,1024.0,271.91,3765.914,51.13,105.48,129.86
1121
+ nfnet_f2,352,768.0,270.88,2835.244,63.22,79.06,193.78
1122
+ caformer_s36,384,512.0,266.29,1922.686,22.2,86.08,39.3
1123
+ convnext_xlarge,288,512.0,263.75,1941.25,100.8,95.05,350.2
1124
+ swinv2_cr_small_384,384,256.0,258.42,990.618,29.7,298.03,49.7
1125
+ efficientnet_b6,528,128.0,257.57,496.944,19.4,167.39,43.04
1126
+ convformer_s36,384,512.0,257.36,1989.401,22.54,89.62,40.01
1127
+ convnextv2_large,288,256.0,256.91,996.448,56.87,71.29,197.96
1128
+ eva02_large_patch14_224,224,1024.0,256.79,3987.739,77.9,65.52,303.27
1129
+ eva02_large_patch14_clip_224,224,1024.0,253.51,4039.312,77.93,65.52,304.11
1130
+ resnext101_32x32d,224,512.0,253.0,2023.672,87.29,91.12,468.53
1131
+ maxvit_tiny_tf_512,512,192.0,249.39,769.864,28.66,172.66,31.05
1132
+ tf_efficientnet_b6,528,128.0,247.44,517.29,19.4,167.39,43.04
1133
+ nfnet_f3,320,1024.0,247.37,4139.575,68.77,83.93,254.92
1134
+ mvitv2_large_cls,224,768.0,246.55,3114.926,42.17,111.69,234.58
1135
+ vit_so400m_patch14_siglip_224,224,1024.0,246.49,4154.292,106.18,70.45,427.68
1136
+ efficientnetv2_xl,384,1024.0,244.46,4188.739,52.81,139.2,208.12
1137
+ mvitv2_large,224,512.0,242.6,2110.485,43.87,112.02,217.99
1138
+ convnextv2_base,384,256.0,242.26,1056.699,45.21,84.49,88.72
1139
+ vit_base_patch16_siglip_512,512,512.0,241.2,2122.705,88.89,87.3,93.52
1140
+ convnext_large,384,384.0,234.69,1636.209,101.1,126.74,197.77
1141
+ convnext_large_mlp,384,384.0,234.65,1636.476,101.11,126.74,200.13
1142
+ dm_nfnet_f2,352,768.0,234.38,3276.685,63.22,79.06,193.78
1143
+ tf_efficientnetv2_xl,384,1024.0,230.18,4448.679,52.81,139.2,208.12
1144
+ efficientnetv2_l,480,512.0,229.94,2226.68,56.4,157.99,118.52
1145
+ tf_efficientnetv2_l,480,512.0,227.38,2251.742,56.4,157.99,118.52
1146
+ swin_base_patch4_window12_384,384,256.0,226.65,1129.483,47.19,134.78,87.9
1147
+ regnety_320,384,384.0,225.95,1699.504,95.0,88.87,145.05
1148
+ resnetrs420,320,1024.0,221.8,4616.729,64.2,126.56,191.89
1149
+ xcit_tiny_24_p8_384,384,1024.0,221.03,4632.753,27.05,132.94,12.11
1150
+ efficientvit_l3,384,384.0,220.15,1744.25,81.08,114.02,246.04
1151
+ swinv2_large_window12to16_192to256,256,256.0,218.91,1169.41,47.81,121.53,196.74
1152
+ maxxvitv2_rmlp_base_rw_384,384,384.0,215.87,1778.825,70.18,160.22,116.09
1153
+ resmlp_big_24_224,224,1024.0,214.65,4770.604,100.23,87.31,129.14
1154
+ dm_nfnet_f3,320,1024.0,212.33,4822.62,68.77,83.93,254.92
1155
+ volo_d5_224,224,1024.0,212.3,4823.349,72.4,118.11,295.46
1156
+ xcit_medium_24_p8_224,224,1024.0,210.35,4868.038,63.52,121.22,84.32
1157
+ seresnextaa201d_32x8d,320,1024.0,207.05,4945.752,70.22,138.71,149.39
1158
+ eca_nfnet_l3,448,512.0,204.74,2500.737,52.55,118.4,72.04
1159
+ xcit_small_12_p8_384,384,512.0,195.78,2615.134,54.92,138.25,26.21
1160
+ cait_xs24_384,384,768.0,193.45,3970.037,19.28,183.98,26.67
1161
+ caformer_m36,384,256.0,191.51,1336.728,37.45,119.33,56.2
1162
+ focalnet_huge_fl3,224,384.0,190.45,2016.221,118.26,104.8,745.28
1163
+ eva02_base_patch14_448,448,512.0,189.13,2707.053,87.74,98.4,87.12
1164
+ maxvit_xlarge_tf_224,224,256.0,188.97,1354.682,96.49,164.37,506.99
1165
+ convformer_m36,384,384.0,186.96,2053.847,37.87,123.56,57.05
1166
+ cait_xxs36_384,384,1024.0,185.14,5531.038,14.35,183.7,17.37
1167
+ swinv2_cr_base_384,384,256.0,184.66,1386.338,50.57,333.68,87.88
1168
+ resnetrs350,384,1024.0,184.39,5553.562,77.59,154.74,163.96
1169
+ regnety_1280,224,512.0,182.89,2799.45,127.66,71.58,644.81
1170
+ swinv2_cr_huge_224,224,384.0,181.27,2118.357,115.97,121.08,657.83
1171
+ vit_huge_patch14_clip_224,224,1024.0,179.25,5712.71,161.99,95.07,632.05
1172
+ vit_huge_patch14_224,224,1024.0,179.24,5713.082,161.99,95.07,630.76
1173
+ volo_d2_384,384,384.0,177.67,2161.247,46.17,184.51,58.87
1174
+ maxvit_rmlp_base_rw_384,384,384.0,177.21,2166.875,66.51,233.79,116.14
1175
+ vit_base_patch14_dinov2,518,512.0,175.93,2910.275,117.11,114.68,86.58
1176
+ vit_huge_patch14_gap_224,224,1024.0,175.35,5839.715,161.36,94.7,630.76
1177
+ vit_base_patch14_reg4_dinov2,518,512.0,175.34,2920.066,117.45,115.02,86.58
1178
+ convnextv2_huge,224,256.0,174.19,1469.676,115.0,79.07,660.29
1179
+ deit3_huge_patch14_224,224,1024.0,172.49,5936.531,161.99,95.07,632.13
1180
+ convmixer_1536_20,224,1024.0,172.27,5944.074,48.68,33.03,51.63
1181
+ vit_huge_patch14_clip_quickgelu_224,224,1024.0,165.12,6201.386,161.99,95.07,632.08
1182
+ maxvit_small_tf_512,512,96.0,163.95,585.546,60.02,256.36,69.13
1183
+ maxvit_base_tf_384,384,192.0,162.75,1179.72,69.34,247.75,119.65
1184
+ xcit_large_24_p16_384,384,1024.0,162.01,6320.659,105.34,137.15,189.1
1185
+ resnetv2_152x2_bit,384,384.0,160.06,2399.153,136.16,132.56,236.34
1186
+ vit_huge_patch14_xp_224,224,1024.0,159.21,6431.544,161.88,95.07,631.8
1187
+ resnest269e,416,512.0,159.04,3219.278,77.69,171.98,110.93
1188
+ eva_large_patch14_336,336,768.0,155.41,4941.906,174.74,128.21,304.53
1189
+ vit_large_patch14_clip_336,336,768.0,155.09,4951.819,174.74,128.21,304.53
1190
+ vit_large_patch16_384,384,768.0,154.94,4956.737,174.85,128.21,304.72
1191
+ convnext_xxlarge,256,384.0,152.35,2520.42,198.09,124.45,846.47
1192
+ davit_giant,224,384.0,151.56,2533.626,192.34,138.2,1406.47
1193
+ resnetv2_50x3_bit,448,192.0,150.44,1276.251,145.7,133.37,217.32
1194
+ coatnet_5_224,224,192.0,149.61,1283.336,142.72,143.69,687.47
1195
+ efficientnetv2_xl,512,512.0,149.15,3432.877,93.85,247.32,208.12
1196
+ cait_s24_384,384,512.0,148.91,3438.219,32.17,245.3,47.06
1197
+ convnext_xlarge,384,256.0,148.61,1722.573,179.2,168.99,350.2
1198
+ tf_efficientnetv2_xl,512,512.0,148.0,3459.525,93.85,247.32,208.12
1199
+ efficientnet_b7,600,96.0,147.91,649.053,38.33,289.94,66.35
1200
+ deit3_large_patch16_384,384,1024.0,147.79,6928.856,174.85,128.21,304.76
1201
+ seresnextaa201d_32x8d,384,768.0,147.05,5222.537,101.11,199.72,149.39
1202
+ nfnet_f3,416,512.0,146.71,3489.974,115.58,141.78,254.92
1203
+ vit_giant_patch16_gap_224,224,1024.0,145.38,7043.632,198.14,103.64,1011.37
1204
+ convnextv2_large,384,192.0,144.92,1324.86,101.1,126.74,197.96
1205
+ resnetv2_152x4_bit,224,512.0,144.91,3533.266,186.9,90.22,936.53
1206
+ vit_large_patch16_siglip_384,384,768.0,144.23,5324.878,175.76,129.18,316.28
1207
+ tf_efficientnet_b7,600,96.0,143.48,669.058,38.33,289.94,66.35
1208
+ nfnet_f4,384,768.0,142.67,5383.101,122.14,147.57,316.07
1209
+ vit_large_patch14_clip_quickgelu_336,336,768.0,140.95,5448.604,174.74,128.21,304.29
1210
+ caformer_b36,384,256.0,138.42,1849.458,66.12,159.11,98.75
1211
+ swin_large_patch4_window12_384,384,128.0,135.49,944.717,104.08,202.16,196.74
1212
+ convformer_b36,384,256.0,135.29,1892.221,66.67,164.75,99.88
1213
+ resnetrs420,416,1024.0,130.11,7870.213,108.45,213.79,191.89
1214
+ beit_large_patch16_384,384,768.0,129.31,5939.365,174.84,128.21,305.0
1215
+ dm_nfnet_f3,416,512.0,127.57,4013.328,115.58,141.78,254.92
1216
+ regnety_640,384,256.0,126.8,2018.836,188.47,124.83,281.38
1217
+ dm_nfnet_f4,384,768.0,123.05,6241.189,122.14,147.57,316.07
1218
+ focalnet_huge_fl4,224,512.0,122.81,4169.023,118.9,113.34,686.46
1219
+ xcit_large_24_p8_224,224,512.0,120.1,4263.036,141.22,181.53,188.93
1220
+ resnetv2_152x2_bit,448,256.0,117.91,2171.109,184.99,180.43,236.34
1221
+ eva_giant_patch14_224,224,1024.0,116.71,8773.739,259.74,135.89,1012.56
1222
+ eva_giant_patch14_clip_224,224,1024.0,116.64,8779.464,259.74,135.89,1012.59
1223
+ vit_giant_patch14_224,224,1024.0,114.18,8968.21,259.74,135.89,1012.61
1224
+ vit_giant_patch14_clip_224,224,1024.0,114.09,8975.383,259.74,135.89,1012.65
1225
+ swinv2_cr_large_384,384,128.0,112.81,1134.666,108.96,404.96,196.68
1226
+ maxvit_large_tf_384,384,128.0,111.17,1151.411,126.61,332.3,212.03
1227
+ eva02_large_patch14_clip_336,336,1024.0,110.28,9285.405,174.97,147.1,304.43
1228
+ mvitv2_huge_cls,224,384.0,107.61,3568.518,120.67,243.63,694.8
1229
+ convnextv2_huge,288,128.0,105.35,1214.957,190.1,130.7,660.29
1230
+ xcit_small_24_p8_384,384,512.0,102.73,4983.926,105.23,265.87,47.63
1231
+ nfnet_f5,416,512.0,100.11,5114.164,170.71,204.56,377.21
1232
+ cait_s36_384,384,512.0,99.61,5140.29,47.99,367.39,68.37
1233
+ swinv2_base_window12to24_192to384,384,96.0,96.35,996.364,55.25,280.36,87.92
1234
+ efficientnet_b8,672,96.0,95.78,1002.248,63.48,442.89,87.41
1235
+ focalnet_large_fl3,384,384.0,94.47,4064.948,105.06,168.04,239.13
1236
+ tf_efficientnet_b8,672,96.0,93.18,1030.252,63.48,442.89,87.41
1237
+ maxvit_base_tf_512,512,96.0,92.2,1041.169,123.93,456.26,119.88
1238
+ focalnet_large_fl4,384,256.0,90.17,2839.222,105.2,181.78,239.32
1239
+ resnetv2_101x3_bit,448,192.0,87.88,2184.819,280.33,194.78,387.93
1240
+ dm_nfnet_f5,416,512.0,86.64,5909.833,170.71,204.56,377.21
1241
+ nfnet_f4,512,384.0,81.51,4711.211,216.26,262.26,316.07
1242
+ volo_d3_448,448,192.0,76.74,2501.831,96.33,446.83,86.63
1243
+ vit_so400m_patch14_siglip_384,384,512.0,75.92,6743.556,302.34,200.62,428.23
1244
+ nfnet_f6,448,512.0,75.59,6773.482,229.7,273.62,438.36
1245
+ vit_huge_patch14_clip_336,336,768.0,75.49,10173.683,363.7,213.44,632.46
1246
+ xcit_medium_24_p8_384,384,384.0,71.15,5396.903,186.67,354.69,84.32
1247
+ dm_nfnet_f4,512,384.0,69.56,5520.408,216.26,262.26,316.07
1248
+ vit_gigantic_patch14_224,224,512.0,66.18,7736.423,473.4,204.12,1844.44
1249
+ vit_gigantic_patch14_clip_224,224,512.0,66.18,7735.92,473.41,204.12,1844.91
1250
+ focalnet_xlarge_fl3,384,256.0,66.07,3874.786,185.61,223.99,408.79
1251
+ dm_nfnet_f6,448,512.0,65.28,7842.994,229.7,273.62,438.36
1252
+ maxvit_large_tf_512,512,64.0,63.68,1005.087,225.96,611.85,212.33
1253
+ focalnet_xlarge_fl4,384,192.0,63.39,3028.979,185.79,242.31,409.03
1254
+ maxvit_xlarge_tf_384,384,96.0,63.2,1518.995,283.86,498.45,475.32
1255
+ regnety_1280,384,128.0,62.14,2059.919,374.99,210.2,644.81
1256
+ beit_large_patch16_512,512,256.0,61.47,4164.41,310.6,227.76,305.67
1257
+ convnextv2_huge,384,96.0,60.73,1580.79,337.96,232.35,660.29
1258
+ swinv2_large_window12to24_192to384,384,48.0,60.6,792.119,116.15,407.83,196.74
1259
+ eva02_large_patch14_448,448,512.0,59.6,8591.147,310.69,261.32,305.08
1260
+ tf_efficientnet_l2,475,128.0,59.14,2164.439,172.11,609.89,480.31
1261
+ nfnet_f5,544,384.0,58.55,6558.595,290.97,349.71,377.21
1262
+ vit_huge_patch14_clip_378,378,512.0,58.17,8801.788,460.13,270.04,632.68
1263
+ volo_d4_448,448,192.0,57.2,3356.883,197.13,527.35,193.41
1264
+ nfnet_f7,480,384.0,57.05,6730.663,300.08,355.86,499.5
1265
+ vit_large_patch14_dinov2,518,384.0,56.81,6759.458,414.89,304.42,304.37
1266
+ vit_large_patch14_reg4_dinov2,518,384.0,56.51,6795.142,416.1,305.31,304.37
1267
+ vit_huge_patch14_clip_quickgelu_378,378,384.0,53.9,7123.722,460.13,270.04,632.68
1268
+ swinv2_cr_giant_224,224,192.0,52.42,3662.593,483.85,309.15,2598.76
1269
+ dm_nfnet_f5,544,384.0,50.82,7555.977,290.97,349.71,377.21
1270
+ eva_giant_patch14_336,336,512.0,49.6,10322.486,583.14,305.1,1013.01
1271
+ swinv2_cr_huge_384,384,64.0,48.85,1310.056,352.04,583.18,657.94
1272
+ nfnet_f6,576,256.0,45.99,5566.397,378.69,452.2,438.36
1273
+ xcit_large_24_p8_384,384,256.0,40.54,6315.135,415.0,531.74,188.93
1274
+ volo_d5_448,448,192.0,39.97,4803.918,315.06,737.92,295.91
1275
+ dm_nfnet_f6,576,256.0,39.68,6452.4,378.69,452.2,438.36
1276
+ nfnet_f7,608,256.0,35.92,7127.91,480.39,570.85,499.5
1277
+ maxvit_xlarge_tf_512,512,48.0,35.73,1343.449,505.95,917.77,475.77
1278
+ regnety_2560,384,96.0,35.19,2728.299,747.83,296.49,1282.6
1279
+ convnextv2_huge,512,48.0,34.07,1408.989,600.81,413.07,660.29
1280
+ cait_m36_384,384,256.0,32.53,7868.895,173.11,734.79,271.22
1281
+ resnetv2_152x4_bit,480,128.0,32.31,3961.512,844.84,414.26,936.53
1282
+ volo_d5_512,512,96.0,27.94,3435.72,425.09,1105.37,296.09
1283
+ samvit_base_patch16,1024,12.0,23.01,521.487,371.55,403.08,89.67
1284
+ efficientnet_l2,800,32.0,22.53,1420.616,479.12,1707.39,480.31
1285
+ tf_efficientnet_l2,800,32.0,22.12,1446.454,479.12,1707.39,480.31
1286
+ vit_giant_patch14_dinov2,518,192.0,17.14,11200.639,1553.56,871.89,1136.48
1287
+ vit_giant_patch14_reg4_dinov2,518,128.0,17.05,7505.847,1558.09,874.43,1136.48
1288
+ swinv2_cr_giant_384,384,32.0,15.01,2131.256,1450.71,1394.86,2598.76
1289
+ eva_giant_patch14_560,560,192.0,15.01,12792.976,1618.04,846.56,1014.45
1290
+ cait_m48_448,448,128.0,13.76,9299.464,329.4,1708.21,356.46
1291
+ samvit_large_patch16,1024,8.0,10.25,780.237,1317.08,1055.58,308.28
1292
+ samvit_huge_patch16,1024,6.0,6.31,950.475,2741.59,1727.57,637.03
1293
+ eva02_enormous_patch14_clip_224,224,,,,1132.46,497.58,4350.56
1294
+ vit_huge_patch16_gap_448,448,,,,544.7,636.83,631.67
pytorch-image-models/results/benchmark-infer-amp-nchw-pt240-cu124-rtx3090.csv ADDED
@@ -0,0 +1,1444 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ model,infer_img_size,infer_samples_per_sec,infer_step_time,infer_batch_size,param_count,infer_gmacs,infer_macts
2
+ test_vit,160,109337.21,9.356,1024,0.37,0.04,0.48
3
+ test_byobnet,160,82185.02,12.45,1024,0.46,0.03,0.43
4
+ test_efficientnet,160,76411.59,13.392,1024,0.36,0.06,0.55
5
+ tinynet_e,106,53275.73,19.211,1024,2.04,0.03,0.69
6
+ mobilenetv3_small_050,224,47496.52,21.55,1024,1.59,0.03,0.92
7
+ lcnet_035,224,42719.32,23.961,1024,1.64,0.03,1.04
8
+ lcnet_050,224,38393.43,26.662,1024,1.88,0.05,1.26
9
+ mobilenetv3_small_075,224,34935.91,29.301,1024,2.04,0.05,1.3
10
+ efficientvit_m0,224,32556.1,31.443,1024,2.35,0.08,0.91
11
+ mobilenetv3_small_100,224,31410.96,32.59,1024,2.54,0.06,1.42
12
+ tf_mobilenetv3_small_minimal_100,224,29476.16,34.73,1024,2.04,0.06,1.41
13
+ tinynet_d,152,29431.12,34.783,1024,2.34,0.05,1.42
14
+ tf_mobilenetv3_small_075,224,28685.83,35.688,1024,2.04,0.05,1.3
15
+ tf_mobilenetv3_small_100,224,26229.43,39.03,1024,2.54,0.06,1.42
16
+ efficientvit_m1,224,25342.72,40.397,1024,2.98,0.17,1.33
17
+ lcnet_075,224,24815.53,41.255,1024,2.36,0.1,1.99
18
+ efficientvit_m2,224,22234.8,46.044,1024,4.19,0.2,1.47
19
+ mobilenetv4_conv_small,224,21980.64,46.577,1024,3.77,0.19,1.97
20
+ mnasnet_small,224,21439.71,47.752,1024,2.03,0.07,2.16
21
+ levit_128s,224,21017.47,48.711,1024,7.78,0.31,1.88
22
+ lcnet_100,224,20320.08,50.384,1024,2.95,0.16,2.52
23
+ mobilenetv4_conv_small,256,19758.75,51.816,1024,3.77,0.25,2.57
24
+ regnetx_002,224,19130.47,53.516,1024,2.68,0.2,2.16
25
+ efficientvit_m3,224,19121.62,53.542,1024,6.9,0.27,1.62
26
+ mobilenetv2_035,224,19047.74,53.75,1024,1.68,0.07,2.86
27
+ resnet10t,176,19017.0,53.837,1024,5.44,0.7,1.51
28
+ ghostnet_050,224,18326.71,55.865,1024,2.59,0.05,1.77
29
+ levit_conv_128s,224,17825.2,57.436,1024,7.78,0.31,1.88
30
+ regnety_002,224,17806.29,57.495,1024,3.16,0.2,2.17
31
+ efficientvit_m4,224,17783.18,57.573,1024,8.8,0.3,1.7
32
+ resnet18,160,17690.73,57.874,1024,11.69,0.93,1.27
33
+ repghostnet_050,224,17490.98,58.535,1024,2.31,0.05,2.02
34
+ efficientvit_b0,224,16914.4,60.53,1024,3.41,0.1,2.87
35
+ mnasnet_050,224,16594.59,61.697,1024,2.22,0.11,3.07
36
+ vit_tiny_r_s16_p8_224,224,16372.59,62.534,1024,6.34,0.44,2.06
37
+ tinynet_c,184,15537.22,65.896,1024,2.46,0.11,2.87
38
+ mobilenetv2_050,224,15294.16,66.944,1024,1.97,0.1,3.64
39
+ pit_ti_224,224,14941.65,68.524,1024,4.85,0.7,6.19
40
+ pit_ti_distilled_224,224,14919.02,68.627,1024,5.1,0.71,6.23
41
+ semnasnet_050,224,14881.16,68.802,1024,2.08,0.11,3.44
42
+ levit_128,224,14392.72,71.137,1024,9.21,0.41,2.71
43
+ repghostnet_058,224,13974.19,73.268,1024,2.55,0.07,2.59
44
+ vit_small_patch32_224,224,13132.7,77.963,1024,22.88,1.15,2.5
45
+ lcnet_150,224,13019.52,78.641,1024,4.5,0.34,3.79
46
+ cs3darknet_focus_s,256,12823.07,79.845,1024,3.27,0.69,2.7
47
+ regnetx_004,224,12790.25,80.052,1024,5.16,0.4,3.14
48
+ levit_conv_128,224,12771.21,80.17,1024,9.21,0.41,2.71
49
+ mobilenetv3_large_075,224,12600.27,81.258,1024,3.99,0.16,4.0
50
+ cs3darknet_s,256,12496.47,81.932,1024,3.28,0.72,2.97
51
+ regnetx_004_tv,224,12440.42,82.303,1024,5.5,0.42,3.17
52
+ efficientvit_m5,224,12202.57,83.907,1024,12.47,0.53,2.41
53
+ levit_192,224,12163.42,84.177,1024,10.95,0.66,3.2
54
+ resnet10t,224,12120.7,84.474,1024,5.44,1.1,2.43
55
+ gernet_s,224,11872.38,86.241,1024,8.17,0.75,2.65
56
+ ese_vovnet19b_slim_dw,224,11765.52,87.024,1024,1.9,0.4,5.28
57
+ hardcorenas_a,224,11423.94,89.627,1024,5.26,0.23,4.38
58
+ repghostnet_080,224,11343.56,90.261,1024,3.28,0.1,3.22
59
+ mobilenetv3_rw,224,11309.87,90.531,1024,5.48,0.23,4.41
60
+ mobilenetv3_large_100,224,11135.72,91.947,1024,5.48,0.23,4.41
61
+ tf_mobilenetv3_large_075,224,10970.94,93.328,1024,3.99,0.16,4.0
62
+ mixer_s32_224,224,10941.26,93.581,1024,19.1,1.0,2.28
63
+ mnasnet_075,224,10916.2,93.796,1024,3.17,0.23,4.77
64
+ levit_conv_192,224,10869.06,94.202,1024,10.95,0.66,3.2
65
+ mobilenetv1_100,224,10802.87,94.78,1024,4.23,0.58,5.04
66
+ tf_mobilenetv3_large_minimal_100,224,10586.88,96.713,1024,3.92,0.22,4.4
67
+ resnet14t,176,10568.07,96.886,1024,10.08,1.07,3.61
68
+ mobilenetv1_100h,224,10512.64,97.397,1024,5.28,0.63,5.09
69
+ hardcorenas_b,224,10474.56,97.751,1024,5.18,0.26,5.09
70
+ resnet34,160,10402.15,98.431,1024,21.8,1.87,1.91
71
+ hardcorenas_c,224,10270.04,99.697,1024,5.52,0.28,5.01
72
+ nf_regnet_b0,192,10246.95,99.922,1024,8.76,0.37,3.15
73
+ deit_tiny_patch16_224,224,10236.41,100.025,1024,5.72,1.26,5.97
74
+ vit_tiny_patch16_224,224,10231.54,100.072,1024,5.72,1.26,5.97
75
+ regnety_004,224,10230.97,100.079,1024,4.34,0.41,3.89
76
+ deit_tiny_distilled_patch16_224,224,10185.75,100.523,1024,5.91,1.27,6.01
77
+ regnetx_006,224,10150.71,100.87,1024,6.2,0.61,3.98
78
+ tinynet_b,188,9883.52,103.596,1024,3.73,0.21,4.44
79
+ ghostnet_100,224,9881.78,103.616,1024,5.18,0.15,3.55
80
+ tf_mobilenetv3_large_100,224,9800.29,104.477,1024,5.48,0.23,4.41
81
+ mnasnet_100,224,9763.25,104.873,1024,4.38,0.33,5.46
82
+ repghostnet_100,224,9586.17,106.811,1024,4.07,0.15,3.98
83
+ hardcorenas_d,224,9568.31,107.009,1024,7.5,0.3,4.93
84
+ tf_efficientnetv2_b0,192,9480.34,108.003,1024,7.14,0.54,3.51
85
+ mobilenetv2_075,224,9478.95,108.018,1024,2.64,0.22,5.86
86
+ semnasnet_075,224,9435.3,108.519,1024,2.91,0.23,5.54
87
+ regnety_006,224,9299.07,110.103,1024,6.06,0.61,4.33
88
+ resnet18,224,9260.29,110.57,1024,11.69,1.82,2.48
89
+ pit_xs_224,224,9215.59,111.106,1024,10.62,1.4,7.71
90
+ pit_xs_distilled_224,224,9179.35,111.544,1024,11.0,1.41,7.76
91
+ mobilenet_edgetpu_v2_xs,224,9103.13,112.477,1024,4.46,0.7,4.8
92
+ convnext_atto,224,9094.81,112.582,1024,3.7,0.55,3.81
93
+ vit_xsmall_patch16_clip_224,224,9084.06,112.715,1024,8.28,1.79,6.65
94
+ levit_256,224,9041.44,113.246,1024,18.89,1.13,4.23
95
+ vit_medium_patch32_clip_224,224,9037.75,113.292,1024,39.69,2.0,3.34
96
+ mobilenetv1_100,256,8925.01,114.724,1024,4.23,0.76,6.59
97
+ spnasnet_100,224,8850.63,115.688,1024,4.42,0.35,6.03
98
+ seresnet18,224,8755.08,116.951,1024,11.78,1.82,2.49
99
+ mobilenetv1_100h,256,8716.6,117.468,1024,5.28,0.82,6.65
100
+ repghostnet_111,224,8703.6,117.642,1024,4.54,0.18,4.38
101
+ convnext_atto_ols,224,8634.04,118.591,1024,3.7,0.58,4.11
102
+ mobilenetv2_100,224,8629.73,118.65,1024,3.5,0.31,6.68
103
+ semnasnet_100,224,8576.31,119.389,1024,3.89,0.32,6.23
104
+ legacy_seresnet18,224,8496.1,120.516,1024,11.78,1.82,2.49
105
+ hgnetv2_b0,224,8489.19,120.614,1024,6.0,0.33,2.12
106
+ hardcorenas_f,224,8388.5,122.062,1024,8.2,0.35,5.57
107
+ hardcorenas_e,224,8316.4,123.12,1024,8.07,0.35,5.65
108
+ edgenext_xx_small,256,8267.32,123.851,1024,1.33,0.26,3.33
109
+ repvgg_a0,224,8195.29,124.938,1024,9.11,1.52,3.59
110
+ regnetx_008,224,8145.67,125.701,1024,7.26,0.81,5.15
111
+ levit_conv_256,224,8113.72,126.196,1024,18.89,1.13,4.23
112
+ dla46_c,224,8109.77,126.257,1024,1.3,0.58,4.5
113
+ mobilenetv1_125,224,8097.26,126.453,1024,6.27,0.89,6.3
114
+ efficientnet_lite0,224,7979.97,128.311,1024,4.65,0.4,6.74
115
+ convnext_femto,224,7952.03,128.762,1024,5.22,0.79,4.57
116
+ resnet18d,224,7915.36,129.359,1024,11.71,2.06,3.29
117
+ ghostnet_130,224,7893.46,129.718,1024,7.36,0.24,4.6
118
+ mobilevit_xxs,256,7881.37,129.917,1024,1.27,0.42,8.34
119
+ ese_vovnet19b_slim,224,7874.63,130.029,1024,3.17,1.69,3.52
120
+ levit_256d,224,7779.0,131.627,1024,26.21,1.4,4.93
121
+ mobilenetv4_conv_medium,224,7776.66,131.666,1024,9.72,0.84,5.8
122
+ mobilenet_edgetpu_100,224,7770.98,131.763,1024,4.09,1.0,5.75
123
+ xcit_nano_12_p16_224,224,7759.26,131.961,1024,3.05,0.56,4.17
124
+ repghostnet_130,224,7749.8,132.123,1024,5.48,0.25,5.24
125
+ tinynet_a,192,7745.18,132.201,1024,6.19,0.35,5.41
126
+ regnety_008,224,7721.37,132.605,1024,6.26,0.81,5.25
127
+ tf_efficientnetv2_b0,224,7710.08,132.803,1024,7.14,0.73,4.77
128
+ fbnetc_100,224,7646.21,133.909,1024,5.57,0.4,6.51
129
+ convnext_femto_ols,224,7577.53,135.127,1024,5.23,0.82,4.87
130
+ mobilenetv4_hybrid_medium_075,224,7514.13,136.267,1024,7.31,0.66,5.65
131
+ mobilevitv2_050,256,7395.66,138.45,1024,1.37,0.48,8.04
132
+ tf_efficientnetv2_b1,192,7389.78,138.56,1024,8.14,0.76,4.59
133
+ regnety_008_tv,224,7307.18,140.123,1024,6.43,0.84,5.42
134
+ tf_efficientnet_lite0,224,7042.0,145.403,1024,4.65,0.4,6.74
135
+ efficientnet_b0,224,6924.93,147.857,1024,5.29,0.4,6.75
136
+ mobilenetv4_conv_medium,256,6920.92,147.948,1024,9.72,1.1,7.58
137
+ dla46x_c,224,6863.64,149.181,1024,1.07,0.54,5.66
138
+ mnasnet_140,224,6835.02,149.805,1024,7.12,0.6,7.71
139
+ resnet14t,224,6822.56,150.08,1024,10.08,1.69,5.8
140
+ repghostnet_150,224,6798.46,150.612,1024,6.58,0.32,6.0
141
+ rexnet_100,224,6759.37,151.483,1024,4.8,0.41,7.44
142
+ rexnetr_100,224,6733.45,152.067,1024,4.88,0.43,7.72
143
+ efficientnet_b1_pruned,240,6731.65,152.107,1024,6.33,0.4,6.21
144
+ mobilenetv1_125,256,6707.02,152.666,1024,6.27,1.16,8.23
145
+ visformer_tiny,224,6688.07,153.098,1024,10.32,1.27,5.72
146
+ levit_conv_256d,224,6662.02,153.697,1024,26.21,1.4,4.93
147
+ pvt_v2_b0,224,6589.46,155.389,1024,3.67,0.57,7.99
148
+ efficientvit_b1,224,6579.31,155.629,1024,9.1,0.53,7.25
149
+ fbnetv3_b,224,6563.03,156.015,1024,8.6,0.42,6.97
150
+ repvit_m1,224,6541.47,156.523,1024,5.49,0.83,7.45
151
+ edgenext_xx_small,288,6532.74,156.738,1024,1.33,0.33,4.21
152
+ mobilenet_edgetpu_v2_s,224,6516.09,157.14,1024,5.99,1.21,6.6
153
+ mobilenetv2_110d,224,6504.09,157.429,1024,4.52,0.45,8.71
154
+ vit_betwixt_patch32_clip_224,224,6503.93,157.433,1024,61.41,3.09,4.17
155
+ regnetz_005,224,6484.34,157.909,1024,7.12,0.52,5.86
156
+ ese_vovnet19b_dw,224,6451.87,158.704,1024,6.54,1.34,8.25
157
+ dla60x_c,224,6417.77,159.546,1024,1.32,0.59,6.01
158
+ hgnetv2_b1,224,6310.48,162.26,1024,6.34,0.49,2.73
159
+ repvit_m0_9,224,6253.66,163.733,1024,5.49,0.83,7.45
160
+ crossvit_tiny_240,240,6253.35,163.742,1024,7.01,1.57,9.08
161
+ cs3darknet_focus_m,256,6210.25,164.879,1024,9.3,1.98,4.89
162
+ tf_efficientnet_b0,224,6210.04,164.879,1024,5.29,0.4,6.75
163
+ convnext_pico,224,6200.96,165.126,1024,9.05,1.37,6.1
164
+ nf_regnet_b0,256,6157.62,166.288,1024,8.76,0.64,5.58
165
+ semnasnet_140,224,6069.35,168.703,1024,6.11,0.6,8.87
166
+ crossvit_9_dagger_240,240,6067.95,168.745,1024,8.78,1.99,9.97
167
+ resnet50,160,6033.87,169.699,1024,25.56,2.1,5.67
168
+ repvgg_a1,224,5998.64,170.696,1024,14.09,2.64,4.74
169
+ resnetblur18,224,5978.13,171.282,1024,11.69,2.34,3.39
170
+ cs3darknet_m,256,5943.51,172.279,1024,9.31,2.08,5.28
171
+ mobilenetv2_140,224,5943.08,172.292,1024,6.11,0.6,9.57
172
+ convnext_pico_ols,224,5910.37,173.245,1024,9.06,1.43,6.5
173
+ mobilenetv4_hybrid_medium,224,5890.59,173.827,1024,11.07,0.98,6.84
174
+ efficientnet_b0,256,5798.8,176.577,1024,5.29,0.52,8.81
175
+ tf_efficientnetv2_b2,208,5794.22,176.717,1024,10.1,1.06,6.0
176
+ hrnet_w18_small,224,5781.49,177.103,1024,13.19,1.61,5.72
177
+ crossvit_9_240,240,5771.83,177.403,1024,8.55,1.85,9.52
178
+ skresnet18,224,5765.67,177.593,1024,11.96,1.82,3.24
179
+ resnet50d,160,5707.52,179.403,1024,25.58,2.22,6.08
180
+ resnet18,288,5648.23,181.286,1024,11.69,3.01,4.11
181
+ vit_tiny_r_s16_p8_384,384,5625.7,182.012,1024,6.36,1.34,6.49
182
+ efficientnet_b0_gn,224,5621.97,182.132,1024,5.29,0.42,6.75
183
+ efficientnet_lite1,240,5579.58,183.516,1024,5.42,0.62,10.14
184
+ ghostnetv2_100,224,5564.84,184.002,1024,6.16,0.18,4.55
185
+ fbnetv3_d,224,5563.09,184.06,1024,10.31,0.52,8.5
186
+ convnext_atto,288,5527.85,185.234,1024,3.7,0.91,6.3
187
+ fbnetv3_b,256,5497.24,186.265,1024,8.6,0.55,9.1
188
+ selecsls42,224,5467.25,187.287,1024,30.35,2.94,4.62
189
+ efficientnet_blur_b0,224,5455.02,187.706,1024,5.29,0.43,8.72
190
+ resnet34,224,5454.89,187.712,1024,21.8,3.67,3.74
191
+ efficientvit_b1,256,5439.39,188.246,1024,9.1,0.69,9.46
192
+ tiny_vit_5m_224,224,5432.07,188.5,1024,12.08,1.28,11.25
193
+ selecsls42b,224,5418.51,188.972,1024,32.46,2.98,4.62
194
+ levit_384,224,5395.45,189.779,1024,39.13,2.36,6.26
195
+ tf_efficientnetv2_b1,240,5369.96,190.68,1024,8.14,1.21,7.34
196
+ seresnet18,288,5366.44,190.805,1024,11.78,3.01,4.11
197
+ repvit_m1_0,224,5361.91,190.966,1024,7.3,1.13,8.69
198
+ convnextv2_atto,224,5357.95,191.108,1024,3.71,0.55,3.81
199
+ mixnet_s,224,5315.69,192.627,1024,4.13,0.25,6.25
200
+ repghostnet_200,224,5315.68,192.627,1024,9.8,0.54,7.96
201
+ seresnet50,160,5298.11,193.266,1024,28.09,2.1,5.69
202
+ edgenext_x_small,256,5274.46,194.132,1024,2.34,0.54,5.93
203
+ rexnetr_130,224,5269.16,194.329,1024,7.61,0.68,9.81
204
+ convnext_atto_ols,288,5250.22,195.03,1024,3.7,0.96,6.8
205
+ repvit_m2,224,5242.24,195.319,1024,8.8,1.36,9.43
206
+ gernet_m,224,5225.82,195.94,1024,21.14,3.02,5.24
207
+ hgnetv2_b0,288,5205.77,196.695,1024,6.0,0.54,3.51
208
+ seresnet34,224,5135.96,199.368,1024,21.96,3.67,3.74
209
+ mobilenetv4_hybrid_medium,256,5134.61,199.421,1024,11.07,1.29,9.01
210
+ resnet26,224,5121.5,199.932,1024,16.0,2.36,7.35
211
+ vit_base_patch32_224,224,5098.65,200.828,1024,88.22,4.41,5.01
212
+ mobilenetv3_large_150d,224,5094.2,201.003,1024,14.62,,
213
+ vit_base_patch32_clip_224,224,5090.11,201.165,1024,88.22,4.41,5.01
214
+ ecaresnet50t,160,5084.11,201.402,1024,25.57,2.21,6.04
215
+ mobilenetv4_conv_blur_medium,224,5057.66,202.456,1024,9.72,1.22,8.58
216
+ mobilenet_edgetpu_v2_m,224,5045.14,202.956,1024,8.46,1.85,8.15
217
+ tf_efficientnet_lite1,240,5038.81,203.213,1024,5.42,0.62,10.14
218
+ resnet50,176,5019.75,203.984,1024,25.56,2.62,6.92
219
+ repvit_m1_1,224,5014.8,204.185,1024,8.8,1.36,9.43
220
+ legacy_seresnet34,224,4973.36,205.887,1024,21.96,3.67,3.74
221
+ resnet34d,224,4971.07,205.982,1024,21.82,3.91,4.54
222
+ tf_mixnet_s,224,4958.21,206.516,1024,4.13,0.25,6.25
223
+ resnetrs50,160,4955.95,206.604,1024,35.69,2.29,6.2
224
+ xcit_tiny_12_p16_224,224,4922.03,208.034,1024,6.72,1.24,6.29
225
+ eva02_tiny_patch14_224,224,4913.38,208.399,1024,5.5,1.7,9.14
226
+ mobilevitv2_075,256,4906.85,208.678,1024,2.87,1.05,12.06
227
+ pit_s_224,224,4895.76,209.15,1024,23.46,2.88,11.56
228
+ pit_s_distilled_224,224,4881.41,209.765,1024,24.04,2.9,11.64
229
+ efficientnet_es_pruned,224,4880.76,209.792,1024,5.44,1.81,8.73
230
+ efficientnet_es,224,4880.24,209.815,1024,5.44,1.81,8.73
231
+ mobilenetv2_120d,224,4862.72,210.571,1024,5.83,0.69,11.97
232
+ resnet18d,288,4851.45,211.06,1024,11.71,3.41,5.43
233
+ resnext50_32x4d,160,4849.69,211.137,1024,25.03,2.17,7.35
234
+ efficientnet_b1,224,4846.84,211.261,1024,7.79,0.59,9.36
235
+ levit_conv_384,224,4839.13,211.598,1024,39.13,2.36,6.26
236
+ rexnet_130,224,4831.59,211.926,1024,7.56,0.68,9.71
237
+ cs3darknet_focus_m,288,4831.24,211.944,1024,9.3,2.51,6.19
238
+ convnext_femto,288,4824.54,212.238,1024,5.22,1.3,7.56
239
+ dla34,224,4805.9,213.06,1024,15.74,3.07,5.02
240
+ efficientnet_b0_g16_evos,224,4800.56,213.298,1024,8.11,1.01,7.42
241
+ resnet26d,224,4679.82,218.802,1024,16.01,2.6,8.15
242
+ tf_efficientnet_es,224,4673.69,219.089,1024,5.44,1.81,8.73
243
+ resmlp_12_224,224,4653.37,220.046,1024,15.35,3.01,5.5
244
+ cs3darknet_m,288,4646.1,220.39,1024,9.31,2.63,6.69
245
+ fbnetv3_d,256,4637.02,220.821,1024,10.31,0.68,11.1
246
+ mobilenetv4_conv_aa_medium,256,4630.5,221.132,1024,9.72,1.58,10.3
247
+ selecsls60,224,4624.54,221.418,1024,30.67,3.59,5.52
248
+ rexnetr_150,224,4617.37,221.761,1024,9.78,0.89,11.13
249
+ convnext_femto_ols,288,4613.88,221.929,1024,5.23,1.35,8.06
250
+ nf_regnet_b1,256,4603.23,222.442,1024,10.22,0.82,7.27
251
+ vit_base_patch32_clip_quickgelu_224,224,4601.51,222.526,1024,87.85,4.41,5.01
252
+ selecsls60b,224,4600.54,222.572,1024,32.77,3.63,5.52
253
+ convnextv2_femto,224,4593.84,222.896,1024,5.23,0.79,4.57
254
+ regnetx_016,224,4589.73,223.096,1024,9.19,1.62,7.93
255
+ deit_small_patch16_224,224,4586.09,223.273,1024,22.05,4.61,11.95
256
+ vit_small_patch16_224,224,4584.86,223.334,1024,22.05,4.61,11.95
257
+ gmixer_12_224,224,4571.76,223.974,1024,12.7,2.67,7.26
258
+ gmlp_ti16_224,224,4565.24,224.293,1024,5.87,1.34,7.55
259
+ repvgg_b0,224,4553.06,224.894,1024,15.82,3.41,6.15
260
+ deit_small_distilled_patch16_224,224,4543.3,225.376,1024,22.44,4.63,12.02
261
+ mixer_s16_224,224,4530.96,225.99,1024,18.53,3.79,5.97
262
+ vit_small_patch32_384,384,4513.63,226.858,1024,22.92,3.45,8.25
263
+ efficientnet_cc_b0_4e,224,4507.68,227.158,1024,13.31,0.41,9.42
264
+ efficientnet_cc_b0_8e,224,4491.22,227.99,1024,24.01,0.42,9.42
265
+ mixer_b32_224,224,4485.82,228.265,1024,60.29,3.24,6.29
266
+ tiny_vit_11m_224,224,4481.54,228.483,1024,20.35,2.04,13.49
267
+ mobilenetv4_conv_medium,320,4477.38,228.695,1024,9.72,1.71,11.84
268
+ nf_resnet26,224,4466.75,229.24,1024,16.0,2.41,7.35
269
+ mobilenet_edgetpu_v2_l,224,4462.15,229.476,1024,10.92,2.55,9.05
270
+ efficientnet_b2_pruned,260,4421.73,231.573,1024,8.31,0.73,9.13
271
+ efficientformer_l1,224,4415.57,231.896,1024,12.29,1.3,5.53
272
+ resnetaa34d,224,4392.45,233.118,1024,21.82,4.43,5.07
273
+ darknet17,256,4386.8,233.416,1024,14.3,3.26,7.18
274
+ ghostnetv2_130,224,4382.43,233.65,1024,8.96,0.28,5.9
275
+ rexnet_150,224,4375.57,234.017,1024,9.73,0.9,11.21
276
+ convnext_nano,224,4355.17,235.113,1024,15.59,2.46,8.37
277
+ ecaresnet50d_pruned,224,4352.57,235.254,1024,19.94,2.53,6.43
278
+ efficientnet_b1,240,4341.0,235.879,1024,7.79,0.71,10.88
279
+ nf_regnet_b2,240,4323.87,236.815,1024,14.31,0.97,7.23
280
+ poolformer_s12,224,4258.87,240.428,1024,11.92,1.82,5.53
281
+ regnety_016,224,4256.18,240.571,1024,11.2,1.63,8.04
282
+ mobilenetv4_conv_blur_medium,256,4255.16,180.477,768,9.72,1.59,11.2
283
+ vit_wee_patch16_reg1_gap_256,256,4234.72,241.8,1024,13.42,3.83,13.9
284
+ mobilenet_edgetpu_v2_m,256,4230.57,242.038,1024,8.46,2.42,10.65
285
+ vit_pwee_patch16_reg1_gap_256,256,4211.58,243.129,1024,15.25,4.37,15.87
286
+ deit3_small_patch16_224,224,4202.98,243.625,1024,22.06,4.61,11.95
287
+ hgnetv2_b2,224,4202.72,243.64,1024,11.22,1.15,4.12
288
+ edgenext_x_small,288,4195.02,244.088,1024,2.34,0.68,7.5
289
+ tf_efficientnet_cc_b0_4e,224,4189.01,244.439,1024,13.31,0.41,9.42
290
+ efficientnet_lite2,260,4184.61,244.696,1024,6.09,0.89,12.9
291
+ tf_efficientnet_cc_b0_8e,224,4158.98,246.204,1024,24.01,0.42,9.42
292
+ regnetz_005,288,4139.21,247.38,1024,7.12,0.86,9.68
293
+ hgnetv2_b4,224,4136.11,247.566,1024,19.8,2.75,6.7
294
+ efficientvit_b1,288,4112.03,249.015,1024,9.1,0.87,11.96
295
+ resnest14d,224,4103.54,249.531,1024,10.61,2.76,7.33
296
+ resnext26ts,256,4101.49,249.654,1024,10.3,2.43,10.52
297
+ efficientnet_b0_g8_gn,224,4095.29,250.033,1024,6.56,0.66,6.75
298
+ efficientnet_b1,256,4062.7,252.039,1024,7.79,0.77,12.22
299
+ tf_efficientnet_b1,240,4009.73,255.369,1024,7.79,0.71,10.88
300
+ edgenext_small,256,3998.75,256.069,1024,5.59,1.26,9.07
301
+ eca_resnext26ts,256,3985.65,256.911,1024,10.3,2.43,10.52
302
+ darknet21,256,3985.31,256.933,1024,20.86,3.93,7.47
303
+ seresnext26ts,256,3983.57,257.043,1024,10.39,2.43,10.52
304
+ regnetz_b16,224,3982.17,257.134,1024,9.72,1.45,9.95
305
+ resnext50_32x4d,176,3977.56,257.434,1024,25.03,2.71,8.97
306
+ convnext_nano_ols,224,3963.35,258.357,1024,15.65,2.65,9.38
307
+ vit_base_patch32_clip_256,256,3950.74,259.181,1024,87.86,5.76,6.65
308
+ flexivit_small,240,3949.57,259.258,1024,22.06,5.35,14.18
309
+ gcresnext26ts,256,3942.19,259.744,1024,10.48,2.43,10.53
310
+ mobileone_s1,224,3939.35,259.93,1024,4.83,0.86,9.67
311
+ hgnetv2_b1,288,3863.09,265.063,1024,6.34,0.82,4.51
312
+ sedarknet21,256,3855.83,265.558,1024,20.95,3.93,7.47
313
+ tf_efficientnetv2_b2,260,3852.83,265.768,1024,10.1,1.72,9.84
314
+ nf_ecaresnet26,224,3841.08,266.581,1024,16.0,2.41,7.36
315
+ efficientnet_b2,256,3835.68,266.957,1024,9.11,0.89,12.81
316
+ nf_seresnet26,224,3835.46,266.972,1024,17.4,2.41,7.36
317
+ mobilevit_xs,256,3825.9,200.727,768,2.32,1.05,16.33
318
+ dpn48b,224,3821.05,267.978,1024,9.13,1.69,8.92
319
+ mobilenetv4_conv_large,256,3819.0,268.122,1024,32.59,2.86,12.14
320
+ vit_relpos_small_patch16_224,224,3815.41,268.375,1024,21.98,4.59,13.05
321
+ tf_efficientnet_lite2,260,3814.92,268.409,1024,6.09,0.89,12.9
322
+ pvt_v2_b1,224,3812.97,268.546,1024,14.01,2.12,15.39
323
+ resnet26t,256,3801.26,269.374,1024,16.01,3.35,10.52
324
+ vit_srelpos_small_patch16_224,224,3792.64,269.986,1024,21.97,4.59,12.16
325
+ legacy_seresnext26_32x4d,224,3774.18,271.304,1024,16.79,2.49,9.39
326
+ ese_vovnet19b_dw,288,3768.37,271.725,1024,6.54,2.22,13.63
327
+ convnext_pico,288,3762.1,272.178,1024,9.05,2.27,10.08
328
+ gernet_l,256,3741.79,273.656,1024,31.08,4.57,8.0
329
+ mobilenetv4_hybrid_large_075,256,3731.28,274.426,1024,22.75,2.06,11.64
330
+ resnet101,160,3660.13,279.761,1024,44.55,4.0,8.28
331
+ edgenext_small_rw,256,3647.4,280.737,1024,7.83,1.58,9.51
332
+ resnetblur18,288,3644.87,280.933,1024,11.69,3.87,5.6
333
+ tf_efficientnetv2_b3,240,3640.22,281.291,1024,14.36,1.93,9.95
334
+ cs3darknet_focus_l,256,3628.94,282.166,1024,21.15,4.66,8.03
335
+ efficientnetv2_rw_t,224,3628.38,282.209,1024,13.65,1.93,9.94
336
+ repvit_m3,224,3609.67,283.663,1024,10.68,1.89,13.94
337
+ mixnet_m,224,3606.44,283.926,1024,5.01,0.36,8.19
338
+ coatnet_pico_rw_224,224,3590.64,285.176,1024,10.85,2.05,14.62
339
+ ghostnetv2_160,224,3589.83,285.24,1024,12.39,0.42,7.23
340
+ gc_efficientnetv2_rw_t,224,3589.63,285.255,1024,13.68,1.94,9.97
341
+ convnext_pico_ols,288,3587.72,285.408,1024,9.06,2.37,10.74
342
+ ecaresnext50t_32x4d,224,3560.67,287.576,1024,15.41,2.7,10.09
343
+ ecaresnext26t_32x4d,224,3560.44,287.594,1024,15.41,2.7,10.09
344
+ seresnext26t_32x4d,224,3558.26,287.767,1024,16.81,2.7,10.09
345
+ eca_botnext26ts_256,256,3542.14,289.08,1024,10.59,2.46,11.6
346
+ efficientnet_b3_pruned,300,3528.65,290.185,1024,9.86,1.04,11.86
347
+ seresnext26d_32x4d,224,3528.55,290.194,1024,16.81,2.73,10.19
348
+ nf_regnet_b1,288,3527.3,290.296,1024,10.22,1.02,9.2
349
+ coat_lite_tiny,224,3515.07,291.307,1024,5.72,1.6,11.65
350
+ convnextv2_pico,224,3514.61,291.344,1024,9.07,1.37,6.1
351
+ cs3darknet_l,256,3497.24,292.792,1024,21.16,4.86,8.55
352
+ repvgg_a2,224,3492.05,293.227,1024,28.21,5.7,6.26
353
+ tf_mixnet_m,224,3485.45,293.782,1024,5.01,0.36,8.19
354
+ vit_relpos_small_patch16_rpn_224,224,3481.46,294.119,1024,21.97,4.59,13.05
355
+ hgnet_tiny,224,3480.86,294.17,1024,14.74,4.54,6.36
356
+ eca_halonext26ts,256,3471.15,294.993,1024,10.76,2.44,11.46
357
+ mobilevitv2_100,256,3470.14,221.307,768,4.9,1.84,16.08
358
+ ecaresnet101d_pruned,224,3466.94,295.35,1024,24.88,3.48,7.69
359
+ ecaresnet26t,256,3417.04,299.664,1024,16.01,3.35,10.53
360
+ hgnetv2_b3,224,3369.26,303.913,1024,16.29,1.78,5.07
361
+ resnetv2_50,224,3355.79,305.133,1024,25.55,4.11,11.11
362
+ botnet26t_256,256,3355.2,305.187,1024,12.49,3.32,11.98
363
+ nf_regnet_b2,272,3353.24,305.366,1024,14.31,1.22,9.27
364
+ bat_resnext26ts,256,3346.65,305.963,1024,10.73,2.53,12.51
365
+ coatnext_nano_rw_224,224,3342.84,306.315,1024,14.7,2.47,12.8
366
+ ecaresnetlight,224,3334.95,307.041,1024,30.16,4.11,8.42
367
+ resnet34,288,3329.62,307.532,1024,21.8,6.07,6.18
368
+ rexnetr_200,224,3328.94,230.694,768,16.52,1.59,15.11
369
+ skresnet34,224,3313.88,308.994,1024,22.28,3.67,5.13
370
+ fastvit_t8,256,3313.63,309.016,1024,4.03,0.7,8.63
371
+ halonet26t,256,3312.1,309.159,1024,12.48,3.19,11.69
372
+ vit_small_r26_s32_224,224,3304.01,309.916,1024,36.43,3.56,9.85
373
+ cs3sedarknet_l,256,3303.09,310.003,1024,21.91,4.86,8.56
374
+ coatnet_nano_cc_224,224,3289.43,311.29,1024,13.76,2.24,15.02
375
+ coat_lite_mini,224,3284.89,311.72,1024,11.01,2.0,12.25
376
+ lambda_resnet26t,256,3270.07,313.133,1024,10.96,3.02,11.87
377
+ mobilenetv4_hybrid_medium,320,3262.62,313.848,1024,11.07,2.05,14.36
378
+ convnextv2_atto,288,3253.42,314.736,1024,3.71,0.91,6.3
379
+ vit_small_resnet26d_224,224,3246.31,315.424,1024,63.61,5.07,11.12
380
+ resnet32ts,256,3243.82,315.666,1024,17.96,4.63,11.58
381
+ vit_tiny_patch16_384,384,3237.39,316.294,1024,5.79,4.7,25.39
382
+ convit_tiny,224,3231.06,316.913,1024,5.71,1.26,7.94
383
+ resnet50,224,3219.06,318.095,1024,25.56,4.11,11.11
384
+ coatnet_nano_rw_224,224,3215.75,318.422,1024,15.14,2.41,15.41
385
+ rexnet_200,224,3200.93,239.92,768,16.37,1.56,14.91
386
+ resnet33ts,256,3195.24,320.467,1024,19.68,4.76,11.66
387
+ resnetv2_50t,224,3185.32,321.463,1024,25.57,4.32,11.82
388
+ mobileone_s2,224,3179.36,322.067,1024,7.88,1.34,11.55
389
+ sam2_hiera_tiny,224,3178.9,322.114,1024,26.85,4.91,17.12
390
+ resnetv2_50d,224,3167.44,323.278,1024,25.57,4.35,11.92
391
+ cspresnet50,256,3155.9,324.462,1024,21.62,4.54,11.5
392
+ seresnet34,288,3147.55,325.319,1024,21.96,6.07,6.18
393
+ efficientvit_b2,224,3143.3,325.761,1024,24.33,1.6,14.62
394
+ resnext26ts,288,3127.83,327.374,1024,10.3,3.07,13.31
395
+ fbnetv3_g,240,3119.5,328.234,1024,16.62,1.28,14.87
396
+ hrnet_w18_small_v2,224,3113.69,328.86,1024,15.6,2.62,9.65
397
+ efficientnet_b1,288,3113.4,328.891,1024,7.79,0.97,15.46
398
+ resnet26,288,3110.68,329.178,1024,16.0,3.9,12.15
399
+ tresnet_m,224,3098.68,330.452,1024,31.39,5.75,7.31
400
+ resnet101,176,3098.41,330.482,1024,44.55,4.92,10.08
401
+ seresnet33ts,256,3096.38,330.698,1024,19.78,4.76,11.66
402
+ eca_resnet33ts,256,3095.76,330.765,1024,19.68,4.76,11.66
403
+ convnext_tiny,224,3090.39,331.339,1024,28.59,4.47,13.44
404
+ dpn68b,224,3087.27,331.673,1024,12.61,2.35,10.47
405
+ dpn68,224,3074.12,333.092,1024,12.61,2.35,10.47
406
+ gcresnet33ts,256,3071.03,333.428,1024,19.88,4.76,11.68
407
+ resnet50t,224,3051.2,335.595,1024,25.57,4.32,11.82
408
+ resnet50c,224,3048.9,335.849,1024,25.58,4.35,11.92
409
+ seresnext26ts,288,3040.9,336.731,1024,10.39,3.07,13.32
410
+ eca_resnext26ts,288,3040.18,336.812,1024,10.3,3.07,13.32
411
+ tf_efficientnet_b2,260,3038.71,336.975,1024,9.11,1.02,13.83
412
+ resnet34d,288,3032.3,337.687,1024,21.82,6.47,7.51
413
+ regnetx_032,224,3027.88,338.176,1024,15.3,3.2,11.37
414
+ resnet50d,224,3025.46,338.448,1024,25.58,4.35,11.92
415
+ dla60,224,3019.09,339.163,1024,22.04,4.26,10.16
416
+ gcresnext26ts,288,3012.63,339.891,1024,10.48,3.07,13.33
417
+ efficientnet_em,240,3000.85,341.226,1024,6.9,3.04,14.34
418
+ vit_medium_patch16_clip_224,224,2993.15,342.104,1024,38.59,8.0,15.93
419
+ levit_512,224,2990.53,342.404,1024,95.17,5.64,10.22
420
+ resnest26d,224,2990.32,342.428,1024,17.07,3.64,9.97
421
+ vit_base_patch32_plus_256,256,2981.28,343.466,1024,119.48,7.79,7.76
422
+ crossvit_small_240,240,2972.85,344.44,1024,26.86,5.63,18.17
423
+ repvit_m1_5,224,2972.23,344.512,1024,14.64,2.31,15.7
424
+ mobileone_s0,224,2964.67,345.389,1024,5.29,1.09,15.48
425
+ cspresnet50d,256,2953.53,346.693,1024,21.64,4.86,12.55
426
+ efficientnet_b2,288,2950.49,347.05,1024,9.11,1.12,16.2
427
+ haloregnetz_b,224,2936.32,348.725,1024,11.68,1.97,11.94
428
+ mobilevit_s,256,2931.67,261.956,768,5.58,2.03,19.94
429
+ cspresnet50w,256,2926.16,349.933,1024,28.12,5.04,12.19
430
+ legacy_seresnet50,224,2915.2,351.251,1024,28.09,3.88,10.6
431
+ tf_efficientnet_em,240,2908.61,352.048,1024,6.9,3.04,14.34
432
+ vgg11,224,2887.76,354.59,1024,132.86,7.61,7.44
433
+ resnetv2_50x1_bit,224,2880.89,355.434,1024,25.55,4.23,11.11
434
+ vit_little_patch16_reg1_gap_256,256,2872.75,356.442,1024,22.52,6.27,18.06
435
+ hiera_tiny_224,224,2872.44,356.48,1024,27.91,4.91,17.13
436
+ resnetaa50,224,2865.28,357.372,1024,25.56,5.15,11.64
437
+ regnetv_040,224,2864.51,357.468,1024,20.64,4.0,12.29
438
+ efficientnet_cc_b1_8e,240,2860.03,358.028,1024,39.72,0.75,15.44
439
+ selecsls84,224,2852.61,358.96,1024,50.95,5.9,7.57
440
+ regnety_032,224,2852.43,358.981,1024,19.44,3.2,11.26
441
+ vit_little_patch16_reg4_gap_256,256,2849.96,359.293,1024,22.52,6.35,18.33
442
+ regnety_040,224,2844.62,359.967,1024,20.65,4.0,12.29
443
+ vovnet39a,224,2844.48,359.985,1024,22.6,7.09,6.73
444
+ coatnet_rmlp_nano_rw_224,224,2836.87,360.951,1024,15.15,2.62,20.34
445
+ seresnet50,224,2831.5,361.636,1024,28.09,4.11,11.13
446
+ resnet26d,288,2828.9,361.967,1024,16.01,4.29,13.48
447
+ wide_resnet50_2,176,2817.33,363.453,1024,68.88,7.29,8.97
448
+ vit_relpos_base_patch32_plus_rpn_256,256,2816.85,363.51,1024,119.42,7.68,8.01
449
+ mixnet_l,224,2808.18,364.638,1024,7.33,0.58,10.84
450
+ cs3darknet_focus_l,288,2803.64,365.228,1024,21.15,5.9,10.16
451
+ levit_512d,224,2803.3,365.271,1024,92.5,5.85,11.3
452
+ convnextv2_femto,288,2792.8,366.646,1024,5.23,1.3,7.56
453
+ deit3_medium_patch16_224,224,2781.17,368.18,1024,38.85,8.0,15.93
454
+ crossvit_15_240,240,2780.78,368.231,1024,27.53,5.81,19.77
455
+ res2net50_48w_2s,224,2776.63,368.782,1024,25.29,4.18,11.72
456
+ resnet50_gn,224,2765.5,370.266,1024,25.56,4.14,11.11
457
+ convnext_tiny_hnf,224,2765.22,370.304,1024,28.59,4.47,13.44
458
+ densenet121,224,2764.41,370.412,1024,7.98,2.87,6.9
459
+ levit_conv_512,224,2753.82,371.836,1024,95.17,5.64,10.22
460
+ resnetv2_50d_gn,224,2752.94,371.954,1024,25.57,4.38,11.92
461
+ visformer_small,224,2752.56,372.007,1024,40.22,4.88,11.43
462
+ ese_vovnet39b,224,2750.44,372.293,1024,24.57,7.09,6.74
463
+ mobilevitv2_125,256,2748.02,279.464,768,7.48,2.86,20.1
464
+ vit_relpos_medium_patch16_cls_224,224,2744.95,373.038,1024,38.76,8.03,18.24
465
+ eca_vovnet39b,224,2742.08,373.43,1024,22.6,7.09,6.74
466
+ tiny_vit_21m_224,224,2740.23,373.681,1024,33.22,4.29,20.08
467
+ twins_svt_small,224,2734.38,374.479,1024,24.06,2.94,13.75
468
+ gcvit_xxtiny,224,2725.34,375.722,1024,12.0,2.14,15.36
469
+ twins_pcpvt_small,224,2723.83,375.93,1024,24.11,3.83,18.08
470
+ resnet50_clip_gap,224,2722.78,376.075,1024,23.53,5.39,12.44
471
+ resnetaa50d,224,2721.94,376.192,1024,25.58,5.39,12.44
472
+ tf_mixnet_l,224,2713.4,377.376,1024,7.33,0.58,10.84
473
+ crossvit_15_dagger_240,240,2708.78,378.02,1024,28.21,6.13,20.43
474
+ ecaresnet50t,224,2707.73,378.166,1024,25.57,4.32,11.83
475
+ seresnet50t,224,2705.39,378.493,1024,28.1,4.32,11.83
476
+ cs3darknet_l,288,2703.4,378.772,1024,21.16,6.16,10.83
477
+ tf_efficientnet_cc_b1_8e,240,2701.66,379.016,1024,39.72,0.75,15.44
478
+ davit_tiny,224,2698.07,284.638,768,28.36,4.54,18.89
479
+ xcit_nano_12_p16_384,384,2693.55,380.156,1024,3.05,1.64,12.15
480
+ resnetaa34d,288,2691.25,380.482,1024,21.82,7.33,8.38
481
+ ecaresnet50d,224,2687.82,380.967,1024,25.58,4.35,11.93
482
+ ecaresnet50d_pruned,288,2675.98,382.653,1024,19.94,4.19,10.61
483
+ vit_base_resnet26d_224,224,2655.81,385.559,1024,101.4,6.97,13.16
484
+ convnext_nano,288,2652.03,386.11,1024,15.59,4.06,13.84
485
+ resnetrs50,224,2651.0,386.252,1024,35.69,4.48,12.14
486
+ nf_regnet_b3,288,2644.82,387.161,1024,18.59,1.67,11.84
487
+ xcit_tiny_24_p16_224,224,2635.92,388.469,1024,12.12,2.34,11.82
488
+ gcresnext50ts,256,2630.41,389.282,1024,15.67,3.75,15.46
489
+ efficientvit_b2,256,2610.75,392.214,1024,24.33,2.09,19.03
490
+ resnetblur50,224,2609.83,392.352,1024,25.56,5.16,12.02
491
+ vgg11_bn,224,2600.05,393.829,1024,132.87,7.62,7.44
492
+ resnet50s,224,2592.0,395.051,1024,25.68,5.47,13.52
493
+ mobileone_s3,224,2581.04,396.729,1024,10.17,1.94,13.85
494
+ resnext50_32x4d,224,2577.31,397.304,1024,25.03,4.26,14.4
495
+ resnet152,160,2576.63,397.407,1024,60.19,5.9,11.51
496
+ hgnetv2_b2,288,2574.93,397.67,1024,11.22,1.89,6.8
497
+ inception_next_tiny,224,2572.75,398.008,1024,28.06,4.19,11.98
498
+ eca_nfnet_l0,224,2572.54,398.04,1024,24.14,4.35,10.47
499
+ poolformerv2_s12,224,2568.37,398.686,1024,11.89,1.83,5.53
500
+ edgenext_small,320,2567.9,398.756,1024,5.59,1.97,14.16
501
+ nfnet_l0,224,2566.01,399.053,1024,35.07,4.36,10.47
502
+ cs3sedarknet_l,288,2552.9,401.1,1024,21.91,6.16,10.83
503
+ cspresnext50,256,2550.51,401.475,1024,20.57,4.05,15.86
504
+ hgnetv2_b4,288,2544.67,402.4,1024,19.8,4.54,11.08
505
+ vit_relpos_medium_patch16_224,224,2533.85,404.118,1024,38.75,7.97,17.02
506
+ resnet50_clip,224,2532.71,404.3,1024,38.32,6.14,12.98
507
+ levit_conv_512d,224,2532.59,404.319,1024,92.5,5.85,11.3
508
+ efficientnet_lite3,300,2522.45,202.967,512,8.2,1.65,21.85
509
+ convnextv2_nano,224,2515.35,407.09,1024,15.62,2.46,8.37
510
+ res2net50_26w_4s,224,2515.04,407.14,1024,25.7,4.28,12.61
511
+ vit_srelpos_medium_patch16_224,224,2513.47,407.394,1024,38.74,7.96,16.21
512
+ gcresnet50t,256,2508.19,408.251,1024,25.9,5.42,14.67
513
+ dla60x,224,2506.41,408.541,1024,17.35,3.54,13.8
514
+ coatnet_0_rw_224,224,2498.58,409.821,1024,27.44,4.43,18.73
515
+ resnetblur50d,224,2484.69,412.114,1024,25.58,5.4,12.82
516
+ resnest50d_1s4x24d,224,2468.01,414.899,1024,25.68,4.43,13.57
517
+ regnetx_040,224,2467.33,415.012,1024,22.12,3.99,12.2
518
+ densenetblur121d,224,2464.18,415.544,1024,8.0,3.11,7.9
519
+ maxvit_pico_rw_256,256,2458.1,312.426,768,7.46,1.83,22.3
520
+ resnext50d_32x4d,224,2457.94,416.598,1024,25.05,4.5,15.2
521
+ res2net50_14w_8s,224,2455.12,417.077,1024,25.06,4.21,13.28
522
+ maxvit_rmlp_pico_rw_256,256,2451.7,313.241,768,7.52,1.85,24.86
523
+ vit_base_r26_s32_224,224,2446.42,418.56,1024,101.38,6.81,12.36
524
+ regnetz_c16,256,2443.63,419.039,1024,13.46,2.51,16.57
525
+ seresnetaa50d,224,2442.34,419.253,1024,28.11,5.4,12.46
526
+ dla60_res2net,224,2435.63,420.413,1024,20.85,4.15,12.34
527
+ mobilenetv4_conv_large,320,2431.25,421.172,1024,32.59,4.47,18.97
528
+ regnety_040_sgn,224,2430.55,421.294,1024,20.65,4.03,12.29
529
+ resnet32ts,288,2428.67,421.62,1024,17.96,5.86,14.65
530
+ regnetz_b16,288,2414.87,424.028,1024,9.72,2.39,16.43
531
+ convnext_nano_ols,288,2407.94,425.249,1024,15.65,4.38,15.5
532
+ res2net50d,224,2403.99,425.948,1024,25.72,4.52,13.41
533
+ res2next50,224,2396.84,427.213,1024,24.67,4.2,13.71
534
+ resnet33ts,288,2391.23,428.221,1024,19.68,6.02,14.75
535
+ resnet26t,320,2387.91,428.817,1024,16.01,5.24,16.44
536
+ focalnet_tiny_srf,224,2383.46,429.616,1024,28.43,4.42,16.32
537
+ lambda_resnet26rpt_256,256,2377.83,322.974,768,10.99,3.16,11.87
538
+ resmlp_24_224,224,2376.96,430.792,1024,30.02,5.96,10.91
539
+ efficientnetv2_rw_t,288,2366.54,432.688,1024,13.65,3.19,16.42
540
+ sehalonet33ts,256,2360.24,433.843,1024,13.69,3.55,14.7
541
+ vovnet57a,224,2356.05,434.611,1024,36.64,8.95,7.52
542
+ inception_v3,299,2349.22,435.874,1024,23.83,5.73,8.97
543
+ edgenext_base,256,2342.04,437.215,1024,18.51,3.85,15.58
544
+ gmixer_24_224,224,2339.36,437.716,1024,24.72,5.28,14.45
545
+ tf_efficientnetv2_b3,300,2333.45,438.824,1024,14.36,3.04,15.74
546
+ dla60_res2next,224,2330.49,439.381,1024,17.03,3.49,13.17
547
+ hiera_small_224,224,2327.86,439.879,1024,35.01,6.42,20.75
548
+ seresnext50_32x4d,224,2326.69,440.099,1024,27.56,4.26,14.42
549
+ nf_ecaresnet50,224,2326.39,440.157,1024,25.56,4.21,11.13
550
+ nf_seresnet50,224,2322.81,440.835,1024,28.09,4.21,11.13
551
+ seresnet33ts,288,2321.27,441.126,1024,19.78,6.02,14.76
552
+ eca_resnet33ts,288,2320.92,441.194,1024,19.68,6.02,14.76
553
+ skresnet50,224,2319.56,441.453,1024,25.8,4.11,12.5
554
+ legacy_seresnext50_32x4d,224,2319.29,441.503,1024,27.56,4.26,14.42
555
+ gc_efficientnetv2_rw_t,288,2314.22,442.471,1024,13.68,3.2,16.45
556
+ vit_relpos_medium_patch16_rpn_224,224,2310.7,443.143,1024,38.73,7.97,17.02
557
+ hgnetv2_b5,224,2308.27,443.612,1024,39.57,6.56,11.19
558
+ nfnet_f0,192,2304.96,444.248,1024,71.49,7.21,10.16
559
+ gcresnet33ts,288,2301.68,444.881,1024,19.88,6.02,14.78
560
+ resnet51q,256,2298.43,445.511,1024,35.7,6.38,16.55
561
+ tf_efficientnet_lite3,300,2290.61,223.511,512,8.2,1.65,21.85
562
+ fbnetv3_g,288,2283.27,448.453,1024,16.62,1.77,21.09
563
+ ese_vovnet57b,224,2280.74,448.967,1024,38.61,8.95,7.52
564
+ vit_medium_patch16_gap_240,240,2270.51,450.989,1024,44.4,9.22,18.81
565
+ hgnet_small,224,2266.61,451.766,1024,24.36,8.53,8.79
566
+ fastvit_t12,256,2264.32,452.222,1024,7.55,1.42,12.42
567
+ pvt_v2_b2,224,2256.22,453.841,1024,25.36,4.05,27.53
568
+ edgenext_small_rw,320,2251.75,454.745,1024,7.83,2.46,14.85
569
+ rdnet_tiny,224,2248.51,455.402,1024,23.86,5.06,15.98
570
+ densenet169,224,2245.68,455.974,1024,14.15,3.4,7.3
571
+ cs3darknet_focus_x,256,2244.78,456.159,1024,35.02,8.03,10.69
572
+ coatnet_rmlp_0_rw_224,224,2240.35,457.061,1024,27.45,4.72,24.89
573
+ darknetaa53,256,2230.29,459.123,1024,36.02,7.97,12.39
574
+ focalnet_tiny_lrf,224,2229.47,459.291,1024,28.65,4.49,17.76
575
+ repvgg_b1g4,224,2228.31,459.531,1024,39.97,8.15,10.64
576
+ efficientvit_l1,224,2227.63,459.67,1024,52.65,5.27,15.85
577
+ skresnet50d,224,2225.51,460.109,1024,25.82,4.36,13.31
578
+ xcit_small_12_p16_224,224,2223.72,460.479,1024,26.25,4.82,12.58
579
+ nf_resnet50,256,2213.33,462.642,1024,25.56,5.46,14.52
580
+ nextvit_small,224,2207.92,463.775,1024,31.76,5.81,18.44
581
+ mobilenetv4_hybrid_medium,384,2197.46,465.981,1024,11.07,3.01,21.18
582
+ poolformer_s24,224,2197.0,466.076,1024,21.39,3.41,10.68
583
+ coatnet_bn_0_rw_224,224,2195.15,466.473,1024,27.44,4.67,22.04
584
+ resnet152,176,2193.45,466.834,1024,60.19,7.22,13.99
585
+ resnet50_mlp,256,2191.94,467.155,1024,26.65,7.05,16.25
586
+ ecaresnet50t,256,2191.4,467.27,1024,25.57,5.64,15.45
587
+ nf_regnet_b3,320,2189.28,467.722,1024,18.59,2.05,14.61
588
+ efficientnet_b3,288,2184.31,234.387,512,12.23,1.63,21.49
589
+ seresnext26t_32x4d,288,2172.28,471.381,1024,16.81,4.46,16.68
590
+ fastvit_s12,256,2167.43,472.438,1024,9.47,1.82,13.67
591
+ resnetrs101,192,2164.92,472.987,1024,63.62,6.04,12.7
592
+ cs3darknet_x,256,2164.39,473.101,1024,35.05,8.38,11.35
593
+ fastvit_sa12,256,2158.11,474.477,1024,11.58,1.96,14.03
594
+ eva02_small_patch14_224,224,2155.47,475.059,1024,21.62,6.14,18.28
595
+ cs3sedarknet_xdw,256,2153.33,475.532,1024,21.6,5.97,17.18
596
+ seresnext26d_32x4d,288,2151.74,475.88,1024,16.81,4.51,16.85
597
+ ecaresnet26t,320,2151.68,475.897,1024,16.01,5.24,16.44
598
+ rexnetr_300,224,2147.85,476.744,1024,34.81,3.39,22.16
599
+ eva02_tiny_patch14_336,336,2147.15,476.899,1024,5.76,4.68,27.16
600
+ convnextv2_pico,288,2134.41,479.746,1024,9.07,2.27,10.08
601
+ ecaresnet101d_pruned,288,2128.71,481.03,1024,24.88,5.75,12.71
602
+ gcvit_xtiny,224,2125.89,481.67,1024,19.98,2.93,20.26
603
+ gmlp_s16_224,224,2125.3,481.803,1024,19.42,4.42,15.1
604
+ lambda_resnet50ts,256,2124.77,481.923,1024,21.54,5.07,17.48
605
+ mobilevitv2_150,256,2105.23,243.194,512,10.59,4.09,24.11
606
+ coatnet_0_224,224,2091.24,244.821,512,25.04,4.58,24.01
607
+ xcit_nano_12_p8_224,224,2072.91,493.98,1024,3.05,2.16,15.71
608
+ darknet53,256,2072.88,493.984,1024,41.61,9.31,12.39
609
+ cs3sedarknet_x,256,2064.75,495.935,1024,35.4,8.38,11.35
610
+ hgnet_tiny,288,2057.8,497.609,1024,14.74,7.51,10.51
611
+ hieradet_small,256,2057.47,373.263,768,34.72,8.51,27.76
612
+ vit_medium_patch16_reg1_gap_256,256,2056.16,498.005,1024,38.88,10.63,22.26
613
+ hgnetv2_b3,288,2054.95,498.299,1024,16.29,2.94,8.38
614
+ rexnetr_200,288,2048.59,249.918,512,16.52,2.62,24.96
615
+ vit_medium_patch16_reg4_gap_256,256,2044.7,500.797,1024,38.88,10.76,22.6
616
+ resnet61q,256,2040.64,501.793,1024,36.85,7.8,17.01
617
+ vit_base_resnet50d_224,224,2033.13,503.646,1024,110.97,8.73,16.92
618
+ resnest50d,224,2025.99,505.42,1024,27.48,5.4,14.36
619
+ regnetx_080,224,2024.58,505.775,1024,39.57,8.02,14.06
620
+ rexnet_300,224,2023.97,505.926,1024,34.71,3.44,22.4
621
+ mixnet_xl,224,2021.99,506.421,1024,11.9,0.93,14.57
622
+ resnetv2_50,288,2021.6,506.519,1024,25.55,6.79,18.37
623
+ vit_medium_patch16_gap_256,256,2021.46,506.555,1024,38.86,10.59,22.15
624
+ pvt_v2_b2_li,224,2015.57,508.034,1024,22.55,3.91,27.6
625
+ resnetv2_101,224,2011.15,509.149,1024,44.54,7.83,16.23
626
+ ecaresnetlight,288,2010.28,509.37,1024,30.16,6.79,13.91
627
+ sebotnet33ts_256,256,2002.67,255.649,512,13.7,3.89,17.46
628
+ swin_tiny_patch4_window7_224,224,1994.25,513.466,1024,28.29,4.51,17.06
629
+ cspdarknet53,256,1989.31,514.74,1024,27.64,6.57,16.81
630
+ maxvit_nano_rw_256,256,1987.74,386.357,768,15.45,4.46,30.28
631
+ maxvit_rmlp_nano_rw_256,256,1983.0,387.281,768,15.5,4.47,31.92
632
+ maxxvit_rmlp_nano_rw_256,256,1975.69,388.707,768,16.78,4.37,26.05
633
+ dm_nfnet_f0,192,1969.57,519.898,1024,71.49,7.21,10.16
634
+ gcresnext50ts,288,1969.42,519.94,1024,15.67,4.75,19.57
635
+ nest_tiny,224,1965.87,520.878,1024,17.06,5.83,25.48
636
+ dla102,224,1956.25,523.44,1024,33.27,7.19,14.18
637
+ resnet101,224,1956.19,523.457,1024,44.55,7.83,16.23
638
+ efficientvit_b2,288,1950.05,525.102,1024,24.33,2.64,24.03
639
+ nest_tiny_jx,224,1941.61,527.385,1024,17.06,5.83,25.48
640
+ efficientformer_l3,224,1940.81,527.604,1024,31.41,3.93,12.01
641
+ resnet50,288,1939.79,527.881,1024,25.56,6.8,18.37
642
+ resnetv2_101d,224,1936.61,528.747,1024,44.56,8.07,17.04
643
+ crossvit_18_240,240,1935.28,529.112,1024,43.27,9.05,26.26
644
+ lamhalobotnet50ts_256,256,1924.13,532.179,1024,22.57,5.02,18.44
645
+ convnext_tiny,288,1920.23,533.259,1024,28.59,7.39,22.21
646
+ res2net50_26w_6s,224,1912.13,535.518,1024,37.05,6.33,15.28
647
+ resnet101c,224,1894.68,540.45,1024,44.57,8.08,17.04
648
+ mobileone_s4,224,1894.21,540.586,1024,14.95,3.04,17.74
649
+ crossvit_18_dagger_240,240,1889.12,542.039,1024,44.27,9.5,27.03
650
+ resnet101d,224,1886.88,542.683,1024,44.57,8.08,17.04
651
+ coat_lite_small,224,1882.69,543.892,1024,19.84,3.96,22.09
652
+ gcresnet50t,288,1876.91,545.567,1024,25.9,6.86,18.57
653
+ twins_pcpvt_base,224,1875.57,545.957,1024,43.83,6.68,25.25
654
+ vgg13,224,1872.83,546.756,1024,133.05,11.31,12.25
655
+ convnext_small,224,1866.18,548.704,1024,50.22,8.71,21.56
656
+ dpn68b,288,1861.61,550.049,1024,12.61,3.89,17.3
657
+ regnetx_064,224,1854.58,552.136,1024,26.21,6.49,16.37
658
+ mobilevitv2_175,256,1853.4,276.238,512,14.25,5.54,28.13
659
+ halonet50ts,256,1851.66,553.006,1024,22.73,5.3,19.2
660
+ efficientnet_b3,320,1851.55,276.514,512,12.23,2.01,26.52
661
+ resnet50t,288,1843.63,555.416,1024,25.57,7.14,19.53
662
+ efficientnetv2_s,288,1830.25,559.474,1024,21.46,4.75,20.13
663
+ resnet50d,288,1828.73,559.94,1024,25.58,7.19,19.7
664
+ wide_resnet50_2,224,1825.62,560.893,1024,68.88,11.43,14.4
665
+ swin_s3_tiny_224,224,1817.27,563.471,1024,28.33,4.64,19.13
666
+ tf_efficientnet_b3,300,1797.7,284.798,512,12.23,1.87,23.83
667
+ hrnet_w18_ssld,224,1790.44,571.914,1024,21.3,4.32,16.31
668
+ tresnet_v2_l,224,1789.04,572.364,1024,46.17,8.85,16.34
669
+ hrnet_w18,224,1781.62,574.732,1024,21.3,4.32,16.31
670
+ repvgg_b1,224,1778.73,575.68,1024,57.42,13.16,10.64
671
+ maxxvitv2_nano_rw_256,256,1768.07,434.362,768,23.7,6.26,23.05
672
+ cs3edgenet_x,256,1767.31,579.4,1024,47.82,11.53,12.92
673
+ resnetaa101d,224,1761.72,581.24,1024,44.57,9.12,17.56
674
+ resnet101_clip_gap,224,1761.35,581.36,1024,42.52,9.11,17.56
675
+ efficientvit_l2,224,1761.03,581.466,1024,63.71,6.97,19.58
676
+ vit_large_patch32_224,224,1759.2,582.072,1024,305.51,15.39,13.3
677
+ legacy_seresnet101,224,1752.85,584.182,1024,49.33,7.61,15.74
678
+ vit_base_patch32_clip_384,384,1750.87,584.842,1024,88.3,13.06,16.5
679
+ densenet201,224,1750.03,585.121,1024,20.01,4.34,7.85
680
+ vit_base_patch32_384,384,1749.95,585.149,1024,88.3,13.06,16.5
681
+ efficientnetv2_rw_s,288,1749.21,585.397,1024,23.94,4.91,21.41
682
+ pit_b_distilled_224,224,1743.25,587.399,1024,74.79,12.5,33.07
683
+ darknetaa53,288,1734.96,590.204,1024,36.02,10.08,15.68
684
+ efficientnet_b3_gn,288,1734.87,295.112,512,11.73,1.74,23.35
685
+ resnetv2_101x1_bit,224,1730.79,591.627,1024,44.54,8.04,16.23
686
+ resnetaa50,288,1729.69,592.003,1024,25.56,8.52,19.24
687
+ seresnet101,224,1724.67,593.727,1024,49.33,7.84,16.27
688
+ regnety_032,288,1721.01,594.988,1024,19.44,5.29,18.61
689
+ seresnet50,288,1715.52,596.892,1024,28.09,6.8,18.39
690
+ regnetv_040,288,1714.23,597.341,1024,20.64,6.6,20.3
691
+ pit_b_224,224,1713.92,597.451,1024,73.76,12.42,32.94
692
+ xcit_tiny_12_p16_384,384,1713.1,597.736,1024,6.72,3.64,18.26
693
+ resnet101s,224,1707.16,599.813,1024,44.67,9.19,18.64
694
+ regnety_040,288,1706.68,599.986,1024,20.65,6.61,20.3
695
+ maxvit_tiny_rw_224,224,1699.84,451.796,768,29.06,5.11,33.11
696
+ regnetv_064,224,1694.43,604.321,1024,30.58,6.39,16.41
697
+ cait_xxs24_224,224,1694.07,604.451,1024,11.96,2.53,20.29
698
+ regnety_064,224,1688.52,606.436,1024,30.58,6.39,16.41
699
+ densenet121,288,1688.36,606.494,1024,7.98,4.74,11.41
700
+ resnet50_gn,288,1685.84,607.404,1024,25.56,6.85,18.37
701
+ resnet51q,288,1684.11,608.026,1024,35.7,8.07,20.94
702
+ resnet101_clip,224,1683.83,608.126,1024,56.26,9.81,18.08
703
+ nf_resnet101,224,1683.1,608.389,1024,44.55,8.01,16.23
704
+ convnext_tiny_hnf,288,1681.41,608.997,1024,28.59,7.39,22.21
705
+ ese_vovnet39b,288,1678.63,457.506,768,24.57,11.71,11.13
706
+ repvit_m2_3,224,1677.4,610.445,1024,23.69,4.57,26.21
707
+ resnetv2_50d_gn,288,1676.39,610.823,1024,25.57,7.24,19.7
708
+ ecaresnet101d,224,1674.43,611.54,1024,44.57,8.08,17.07
709
+ cs3darknet_x,288,1672.69,612.175,1024,35.05,10.6,14.36
710
+ vitamin_small_224,224,1669.96,613.177,1024,22.03,5.92,26.38
711
+ convnextv2_tiny,224,1666.61,614.408,1024,28.64,4.47,13.44
712
+ cs3se_edgenet_x,256,1659.92,616.886,1024,50.72,11.53,12.94
713
+ resnetblur101d,224,1659.81,616.926,1024,44.57,9.12,17.94
714
+ dla102x,224,1657.53,617.775,1024,26.31,5.89,19.42
715
+ regnetz_d32,256,1655.18,618.654,1024,27.58,5.98,23.74
716
+ nf_resnet50,288,1650.24,620.505,1024,25.56,6.88,18.37
717
+ efficientvit_b3,224,1649.16,620.91,1024,48.65,3.99,26.9
718
+ mobilenetv4_conv_large,384,1643.43,623.074,1024,32.59,6.43,27.31
719
+ resnetaa50d,288,1643.41,623.082,1024,25.58,8.92,20.57
720
+ regnetz_d8,256,1642.29,623.507,1024,23.37,3.97,23.74
721
+ hiera_small_abswin_256,256,1642.12,623.574,1024,34.36,8.29,26.38
722
+ ecaresnet50t,288,1641.73,623.721,1024,25.57,7.14,19.55
723
+ seresnet50t,288,1641.04,623.984,1024,28.1,7.14,19.55
724
+ regnetz_b16_evos,224,1637.18,625.455,1024,9.74,1.43,9.95
725
+ nextvit_base,224,1636.37,625.762,1024,44.82,8.29,23.71
726
+ davit_small,224,1634.36,469.897,768,49.75,8.8,30.49
727
+ mixer_b16_224,224,1632.43,627.274,1024,59.88,12.62,14.53
728
+ ecaresnet50d,288,1630.52,628.009,1024,25.58,7.19,19.72
729
+ swinv2_cr_tiny_224,224,1629.93,628.235,1024,28.33,4.66,28.45
730
+ mobilenetv4_hybrid_medium,448,1629.05,471.43,768,11.07,4.2,29.64
731
+ regnety_080,224,1624.53,630.326,1024,39.18,8.0,17.97
732
+ nf_regnet_b4,320,1623.35,630.784,1024,30.21,3.29,19.88
733
+ regnetz_040,256,1621.31,631.576,1024,27.12,4.06,24.19
734
+ volo_d1_224,224,1620.03,632.078,1024,26.63,6.94,24.43
735
+ ese_vovnet39b_evos,224,1614.15,634.378,1024,24.58,7.07,6.74
736
+ darknet53,288,1612.47,635.036,1024,41.61,11.78,15.68
737
+ regnetz_040_h,256,1612.19,635.152,1024,28.94,4.12,24.29
738
+ resnetv2_50d_frn,224,1608.23,636.713,1024,25.59,4.33,11.92
739
+ tf_efficientnetv2_s,300,1604.9,638.035,1024,21.46,5.35,22.73
740
+ swinv2_cr_tiny_ns_224,224,1602.17,639.123,1024,28.33,4.66,28.45
741
+ botnet50ts_256,256,1595.84,320.823,512,22.74,5.54,22.23
742
+ resmlp_36_224,224,1595.2,641.917,1024,44.69,8.91,16.33
743
+ cs3sedarknet_x,288,1594.16,642.334,1024,35.4,10.6,14.37
744
+ pvt_v2_b3,224,1594.09,642.356,1024,45.24,6.92,37.7
745
+ wide_resnet101_2,176,1589.86,644.071,1024,126.89,14.31,13.18
746
+ hiera_base_224,224,1588.68,644.549,1024,51.52,9.4,30.42
747
+ mvitv2_tiny,224,1580.5,647.884,1024,24.17,4.7,21.16
748
+ sequencer2d_s,224,1577.41,649.155,1024,27.65,4.96,11.31
749
+ resnetblur50,288,1577.39,649.165,1024,25.56,8.52,19.87
750
+ resnet101d,256,1576.21,649.647,1024,44.57,10.55,22.25
751
+ mobilevitv2_200,256,1575.87,324.89,512,18.45,7.22,32.15
752
+ resnest50d_4s2x40d,224,1573.08,650.942,1024,30.42,4.4,17.94
753
+ vit_base_patch16_224_miil,224,1571.45,651.616,1024,94.4,17.59,23.91
754
+ vit_base_patch16_224,224,1570.66,651.945,1024,86.57,17.58,23.9
755
+ resnext50_32x4d,288,1565.78,653.979,1024,25.03,7.04,23.81
756
+ deit_base_patch16_224,224,1564.98,654.312,1024,86.57,17.58,23.9
757
+ vit_base_patch16_clip_224,224,1564.55,654.489,1024,86.57,17.58,23.9
758
+ deit_base_distilled_patch16_224,224,1562.81,655.219,1024,87.34,17.68,24.05
759
+ resnext101_32x4d,224,1562.81,655.22,1024,44.18,8.01,21.23
760
+ halo2botnet50ts_256,256,1560.7,656.107,1024,22.64,5.02,21.78
761
+ skresnext50_32x4d,224,1546.56,662.103,1024,27.48,4.5,17.18
762
+ caformer_s18,224,1545.75,662.45,1024,26.34,4.13,19.39
763
+ vit_base_mci_224,224,1545.28,662.65,1024,86.35,17.73,24.65
764
+ eca_nfnet_l0,288,1542.25,663.956,1024,24.14,7.12,17.29
765
+ tresnet_l,224,1541.09,664.452,1024,55.99,10.9,11.9
766
+ nfnet_l0,288,1540.89,664.54,1024,35.07,7.13,17.29
767
+ regnetz_c16,320,1536.02,666.649,1024,13.46,3.92,25.88
768
+ vit_medium_patch16_rope_reg1_gap_256,256,1527.11,670.537,1024,38.74,10.63,22.26
769
+ rdnet_small,224,1526.64,670.743,1024,50.44,8.74,22.55
770
+ convnextv2_nano,288,1526.13,503.224,768,15.62,4.06,13.84
771
+ beit_base_patch16_224,224,1524.53,671.669,1024,86.53,17.58,23.9
772
+ coatnet_rmlp_1_rw_224,224,1520.15,673.605,1024,41.69,7.85,35.47
773
+ mixer_l32_224,224,1519.29,673.986,1024,206.94,11.27,19.86
774
+ res2net50_26w_8s,224,1519.14,674.046,1024,48.4,8.37,17.95
775
+ vit_small_resnet50d_s16_224,224,1515.04,675.876,1024,57.53,13.48,24.82
776
+ regnety_080_tv,224,1514.62,676.066,1024,39.38,8.51,19.73
777
+ res2net101_26w_4s,224,1512.48,677.014,1024,45.21,8.1,18.45
778
+ beitv2_base_patch16_224,224,1510.67,677.835,1024,86.53,17.58,23.9
779
+ resnet61q,288,1508.28,678.909,1024,36.85,9.87,21.52
780
+ resnetblur50d,288,1502.22,681.649,1024,25.58,8.92,21.19
781
+ densenetblur121d,288,1501.15,682.13,1024,8.0,5.14,13.06
782
+ resnext101_32x8d,176,1496.15,684.414,1024,88.79,10.33,19.37
783
+ edgenext_base,320,1496.01,684.475,1024,18.51,6.01,24.32
784
+ resnext50d_32x4d,288,1492.98,685.867,1024,25.05,7.44,25.13
785
+ repvgg_b2g4,224,1487.15,688.554,1024,61.76,12.63,12.9
786
+ deit3_base_patch16_224,224,1485.67,689.239,1024,86.59,17.58,23.9
787
+ fastvit_mci0,256,1480.55,691.623,1024,11.41,2.42,18.29
788
+ poolformer_s36,224,1480.43,691.678,1024,30.86,5.0,15.82
789
+ regnety_040_sgn,288,1480.21,691.781,1024,20.65,6.67,20.3
790
+ seresnetaa50d,288,1478.73,692.475,1024,28.11,8.92,20.59
791
+ res2net101d,224,1471.66,695.8,1024,45.23,8.35,19.25
792
+ resnetv2_50d_evos,224,1466.16,698.411,1024,25.59,4.33,11.92
793
+ vit_relpos_base_patch16_clsgap_224,224,1464.16,699.363,1024,86.43,17.6,25.12
794
+ vit_relpos_base_patch16_cls_224,224,1462.1,700.351,1024,86.43,17.6,25.12
795
+ vit_small_patch16_36x1_224,224,1460.83,700.959,1024,64.67,13.71,35.69
796
+ vit_small_patch16_384,384,1459.09,701.796,1024,22.2,15.52,50.78
797
+ efficientnet_b3_gn,320,1457.71,263.416,384,11.73,2.14,28.83
798
+ inception_next_small,224,1454.59,703.968,1024,49.37,8.36,19.27
799
+ efficientvit_l2,256,1451.75,705.344,1024,63.71,9.09,25.49
800
+ convformer_s18,224,1450.71,705.851,1024,26.77,3.96,15.82
801
+ vit_base_patch16_siglip_gap_224,224,1449.9,706.247,1024,85.8,17.49,23.75
802
+ dpn92,224,1447.04,707.638,1024,37.67,6.54,18.21
803
+ gcvit_tiny,224,1443.56,709.345,1024,28.22,4.79,29.82
804
+ convit_small,224,1442.31,709.962,1024,27.78,5.76,17.87
805
+ focalnet_small_srf,224,1440.19,711.006,1024,49.89,8.62,26.26
806
+ vit_betwixt_patch16_reg1_gap_256,256,1438.93,711.629,1024,60.4,16.32,27.83
807
+ vit_base_patch16_siglip_224,224,1434.62,713.769,1024,92.88,17.73,24.06
808
+ vit_betwixt_patch16_reg4_gap_256,256,1427.38,717.384,1024,60.4,16.52,28.24
809
+ vit_base_patch16_gap_224,224,1426.06,718.052,1024,86.57,17.49,25.59
810
+ maxvit_tiny_tf_224,224,1425.39,538.79,768,30.92,5.6,35.78
811
+ nf_ecaresnet101,224,1424.0,719.089,1024,44.55,8.01,16.27
812
+ coatnet_1_rw_224,224,1423.83,719.174,1024,41.72,8.04,34.6
813
+ nf_seresnet101,224,1422.7,719.746,1024,49.33,8.02,16.27
814
+ coatnet_rmlp_1_rw2_224,224,1422.67,719.763,1024,41.72,8.11,40.13
815
+ seresnext50_32x4d,288,1414.64,723.843,1024,27.56,7.04,23.82
816
+ seresnext101_32x4d,224,1413.69,724.336,1024,48.96,8.02,21.26
817
+ legacy_xception,299,1413.65,543.263,768,22.86,8.4,35.83
818
+ legacy_seresnext101_32x4d,224,1412.48,724.957,1024,48.96,8.02,21.26
819
+ hgnetv2_b5,288,1407.43,727.559,1024,39.57,10.84,18.5
820
+ vit_small_patch16_18x2_224,224,1406.97,727.793,1024,64.67,13.71,35.69
821
+ resnetv2_152,224,1397.01,732.984,1024,60.19,11.55,22.56
822
+ efficientnet_b4,320,1389.63,368.432,512,19.34,3.13,34.76
823
+ vit_base_patch16_clip_quickgelu_224,224,1387.46,738.029,1024,86.19,17.58,23.9
824
+ nfnet_f0,256,1379.83,742.109,1024,71.49,12.62,18.05
825
+ resnet152,224,1373.51,745.522,1024,60.19,11.56,22.56
826
+ flexivit_base,240,1371.79,746.461,1024,86.59,20.29,28.36
827
+ ecaresnet50t,320,1370.27,747.287,1024,25.57,8.82,24.13
828
+ efficientvit_b3,256,1369.93,560.6,768,48.65,5.2,35.01
829
+ vit_relpos_base_patch16_224,224,1369.33,747.799,1024,86.43,17.51,24.97
830
+ cs3edgenet_x,288,1368.22,748.405,1024,47.82,14.59,16.36
831
+ vgg16_bn,224,1364.4,750.503,1024,138.37,15.5,13.56
832
+ resnetv2_152d,224,1363.32,751.094,1024,60.2,11.8,23.36
833
+ mobilenetv4_conv_aa_large,384,1359.23,753.356,1024,32.59,7.07,32.29
834
+ efficientformerv2_s0,224,1356.88,754.66,1024,3.6,0.41,5.3
835
+ regnetx_120,224,1352.8,756.936,1024,46.11,12.13,21.37
836
+ focalnet_small_lrf,224,1350.27,758.334,1024,50.34,8.74,28.61
837
+ twins_pcpvt_large,224,1348.04,759.609,1024,60.99,9.84,35.82
838
+ deit3_small_patch16_384,384,1344.15,761.807,1024,22.21,15.52,50.78
839
+ resnet152c,224,1340.85,763.681,1024,60.21,11.8,23.36
840
+ rexnetr_300,288,1339.94,382.097,512,34.81,5.59,36.61
841
+ maxxvit_rmlp_tiny_rw_256,256,1339.27,573.433,768,29.64,6.66,39.76
842
+ maxvit_tiny_rw_256,256,1338.26,573.871,768,29.07,6.74,44.35
843
+ resnet152d,224,1336.77,766.012,1024,60.21,11.8,23.36
844
+ maxvit_rmlp_tiny_rw_256,256,1336.57,574.593,768,29.15,6.77,46.92
845
+ ese_vovnet99b,224,1332.86,768.26,1024,63.2,16.51,11.27
846
+ poolformerv2_s24,224,1332.11,768.696,1024,21.34,3.42,10.68
847
+ xcit_tiny_12_p8_224,224,1314.83,778.795,1024,6.71,4.81,23.6
848
+ xception41p,299,1314.22,389.574,512,26.91,9.25,39.86
849
+ vit_base_patch32_clip_448,448,1306.81,783.576,1024,88.34,17.93,23.9
850
+ convnext_base,224,1301.11,787.008,1024,88.59,15.38,28.75
851
+ efficientnet_el,300,1300.79,787.203,1024,10.59,8.0,30.7
852
+ nextvit_large,224,1299.51,787.975,1024,57.87,10.78,28.99
853
+ efficientnet_el_pruned,300,1297.09,789.448,1024,10.59,8.0,30.7
854
+ vit_base_patch16_xp_224,224,1295.73,790.276,1024,86.51,17.56,23.9
855
+ dla169,224,1285.68,796.451,1024,53.39,11.6,20.2
856
+ regnety_120,224,1281.03,799.347,1024,51.82,12.14,21.38
857
+ hrnet_w32,224,1280.36,799.766,1024,41.23,8.97,22.02
858
+ coatnet_1_224,224,1275.86,401.286,512,42.23,8.7,39.0
859
+ tf_efficientnet_el,300,1270.05,806.254,1024,10.59,8.0,30.7
860
+ hrnet_w30,224,1268.85,807.019,1024,37.71,8.15,21.21
861
+ vgg19,224,1266.93,808.24,1024,143.67,19.63,14.86
862
+ mixnet_xxl,224,1264.71,607.242,768,23.96,2.04,23.43
863
+ maxvit_tiny_pm_256,256,1264.25,607.461,768,30.09,6.61,47.9
864
+ hiera_base_plus_224,224,1260.02,812.675,1024,69.9,12.67,37.98
865
+ mobilenetv4_conv_large,448,1258.67,610.158,768,32.59,8.75,37.17
866
+ twins_svt_base,224,1256.07,815.231,1024,56.07,8.59,26.33
867
+ vit_base_patch16_rpn_224,224,1255.52,815.59,1024,86.54,17.49,23.75
868
+ nest_small,224,1253.67,816.789,1024,38.35,10.35,40.04
869
+ hgnet_small,288,1251.61,613.599,768,24.36,14.09,14.53
870
+ efficientformerv2_s1,224,1251.14,818.442,1024,6.19,0.67,7.66
871
+ densenet161,224,1249.24,819.689,1024,28.68,7.79,11.06
872
+ resnet152s,224,1245.59,822.091,1024,60.32,12.92,24.96
873
+ vit_mediumd_patch16_reg4_gap_256,256,1243.97,823.162,1024,64.11,17.87,37.57
874
+ nest_small_jx,224,1243.46,823.5,1024,38.35,10.35,40.04
875
+ sequencer2d_m,224,1232.89,830.559,1024,38.31,6.55,14.26
876
+ vit_relpos_base_patch16_rpn_224,224,1232.22,831.004,1024,86.41,17.51,24.97
877
+ repvgg_b2,224,1228.43,833.576,1024,89.02,20.45,12.9
878
+ swin_small_patch4_window7_224,224,1226.82,834.669,1024,49.61,8.77,27.47
879
+ legacy_seresnet152,224,1220.81,838.774,1024,66.82,11.33,22.08
880
+ efficientnet_b3_g8_gn,288,1217.35,630.868,768,14.25,2.59,23.35
881
+ eca_nfnet_l1,256,1214.94,842.831,1024,41.41,9.62,22.04
882
+ mobilenetv4_hybrid_large,384,1210.73,845.759,1024,37.76,7.77,34.52
883
+ swinv2_tiny_window8_256,256,1208.62,847.237,1024,28.35,5.96,24.57
884
+ seresnet152,224,1205.5,849.426,1024,66.82,11.57,22.61
885
+ inception_v4,299,1197.2,855.32,1024,42.68,12.28,15.09
886
+ repvgg_b3g4,224,1194.99,856.9,1024,83.83,17.89,15.1
887
+ fastvit_sa24,256,1192.29,858.839,1024,21.55,3.8,24.32
888
+ resnetv2_101,288,1191.07,859.716,1024,44.54,12.94,26.83
889
+ efficientnet_lite4,380,1188.14,323.185,384,13.01,4.04,45.66
890
+ xcit_small_24_p16_224,224,1187.59,862.241,1024,47.67,9.1,23.64
891
+ mvitv2_small_cls,224,1178.05,869.223,1024,34.87,7.04,28.17
892
+ dm_nfnet_f0,256,1174.19,872.081,1024,71.49,12.62,18.05
893
+ tnt_s_patch16_224,224,1171.01,874.446,1024,23.76,5.24,24.37
894
+ regnetx_160,224,1167.74,876.901,1024,54.28,15.99,25.52
895
+ mvitv2_small,224,1166.57,877.775,1024,34.87,7.0,28.08
896
+ resnet101,288,1162.74,880.669,1024,44.55,12.95,26.83
897
+ xception41,299,1160.72,441.096,512,26.97,9.28,39.86
898
+ davit_base,224,1158.29,663.034,768,87.95,15.51,40.66
899
+ vgg19_bn,224,1153.27,887.9,1024,143.68,19.66,14.86
900
+ convnext_small,288,1152.13,888.774,1024,50.22,14.39,35.65
901
+ vit_base_patch16_reg4_gap_256,256,1148.57,891.534,1024,86.62,23.5,33.89
902
+ coat_tiny,224,1146.04,893.499,1024,5.5,4.35,27.2
903
+ pvt_v2_b4,224,1144.8,894.466,1024,62.56,10.14,53.74
904
+ cait_xxs36_224,224,1141.88,896.758,1024,17.3,3.77,30.34
905
+ nf_regnet_b4,384,1139.48,898.646,1024,30.21,4.7,28.61
906
+ tresnet_xl,224,1139.37,898.735,1024,78.44,15.2,15.34
907
+ crossvit_base_240,240,1129.38,906.68,1024,105.03,21.22,36.33
908
+ vit_small_r26_s32_384,384,1128.5,907.387,1024,36.47,10.43,29.85
909
+ vit_base_patch16_siglip_gap_256,256,1126.79,908.765,1024,85.84,23.13,33.23
910
+ dla102x2,224,1125.59,909.733,1024,41.28,9.34,29.91
911
+ resnet152d,256,1122.78,912.013,1024,60.21,15.41,30.51
912
+ vit_base_patch16_siglip_256,256,1115.06,918.328,1024,92.93,23.44,33.63
913
+ hiera_base_abswin_256,256,1108.56,923.708,1024,51.27,12.46,40.7
914
+ wide_resnet50_2,288,1108.54,923.726,1024,68.88,18.89,23.81
915
+ eva02_base_patch16_clip_224,224,1105.74,926.062,1024,86.26,17.62,26.32
916
+ tf_efficientnet_lite4,380,1103.18,348.074,384,13.01,4.04,45.66
917
+ vit_large_r50_s32_224,224,1102.83,928.511,1024,328.99,19.58,24.41
918
+ efficientnetv2_s,384,1096.79,933.624,1024,21.46,8.44,35.77
919
+ vit_betwixt_patch16_rope_reg4_gap_256,256,1096.33,934.014,1024,60.23,16.52,28.24
920
+ vgg13_bn,224,1093.1,936.772,1024,133.05,11.33,12.25
921
+ efficientvit_l2,288,1092.38,937.387,1024,63.71,11.51,32.19
922
+ hrnet_w18_ssld,288,1090.28,939.198,1024,21.3,7.14,26.96
923
+ convnext_tiny,384,1087.48,706.209,768,28.59,13.14,39.48
924
+ pvt_v2_b5,224,1085.62,943.223,1024,81.96,11.76,50.92
925
+ samvit_base_patch16_224,224,1070.47,956.58,1024,86.46,17.54,24.54
926
+ regnety_160,224,1070.01,956.986,1024,83.59,15.96,23.04
927
+ tf_efficientnetv2_s,384,1069.37,957.558,1024,21.46,8.44,35.77
928
+ cs3se_edgenet_x,320,1057.79,968.048,1024,50.72,18.01,20.21
929
+ resnetaa101d,288,1046.05,978.908,1024,44.57,15.07,29.03
930
+ regnetz_d32,320,1039.68,984.905,1024,27.58,9.33,37.08
931
+ efficientnetv2_rw_s,384,1037.06,987.392,1024,23.94,8.72,38.03
932
+ mobilenetv4_conv_aa_large,448,1034.18,742.609,768,32.59,9.63,43.94
933
+ regnetz_d8,320,1031.64,992.584,1024,23.37,6.19,37.08
934
+ seresnet101,288,1031.63,992.588,1024,49.33,12.95,26.87
935
+ vit_small_patch8_224,224,1031.53,992.686,1024,21.67,22.44,80.84
936
+ efficientvit_b3,288,1028.85,746.455,768,48.65,6.58,44.2
937
+ regnetv_064,288,1024.98,999.029,1024,30.58,10.55,27.11
938
+ regnety_064,288,1024.29,999.702,1024,30.58,10.56,27.11
939
+ poolformer_m36,224,1023.88,1000.107,1024,56.17,8.8,22.02
940
+ vgg16,224,1021.7,1002.242,1024,138.36,15.47,13.56
941
+ wide_resnet101_2,224,1020.62,1003.3,1024,126.89,22.8,21.23
942
+ rdnet_base,224,1017.24,754.976,768,87.45,15.4,31.14
943
+ dpn98,224,1014.12,1009.734,1024,61.57,11.73,25.2
944
+ resnet200,224,1013.79,1010.054,1024,64.67,15.07,32.19
945
+ convnextv2_small,224,1013.29,1010.559,1024,50.32,8.71,21.56
946
+ convnextv2_tiny,288,1011.72,759.092,768,28.64,7.39,22.21
947
+ regnetz_040,320,1006.41,508.726,512,27.12,6.35,37.78
948
+ convmixer_1024_20_ks9_p14,224,1004.68,1019.218,1024,24.38,5.55,5.51
949
+ vit_base_patch16_plus_240,240,1004.42,1019.485,1024,117.56,27.41,33.08
950
+ hgnetv2_b6,224,1002.06,1021.881,1024,75.26,16.88,21.23
951
+ regnety_080,288,1001.83,1022.121,1024,39.18,13.22,29.69
952
+ ecaresnet101d,288,1000.75,1023.221,1024,44.57,13.35,28.19
953
+ swinv2_cr_small_224,224,1000.55,1023.43,1024,49.7,9.07,50.27
954
+ regnetz_040_h,320,1000.36,511.803,512,28.94,6.43,37.94
955
+ resnest101e,256,997.21,1026.848,1024,48.28,13.38,28.66
956
+ convnext_base,256,996.66,1027.418,1024,88.59,20.09,37.55
957
+ vit_base_r50_s16_224,224,993.23,1030.967,1024,97.89,21.66,35.28
958
+ resnetrs101,288,992.71,1031.506,1024,63.62,13.56,28.53
959
+ resnetblur101d,288,992.25,1031.989,1024,44.57,15.07,29.65
960
+ efficientnet_b3_g8_gn,320,990.46,775.388,768,14.25,3.2,28.83
961
+ focalnet_base_srf,224,989.98,1034.35,1024,88.15,15.28,35.01
962
+ swinv2_cr_small_ns_224,224,989.16,1035.213,1024,49.7,9.08,50.27
963
+ regnetz_b16_evos,288,988.59,776.854,768,9.74,2.36,16.43
964
+ regnetz_c16_evos,256,986.94,778.15,768,13.49,2.48,16.57
965
+ maxvit_rmlp_small_rw_224,224,986.14,778.781,768,64.9,10.75,49.3
966
+ inception_next_base,224,985.08,1039.5,1024,86.67,14.85,25.69
967
+ seresnet152d,256,984.13,1040.502,1024,66.84,15.42,30.56
968
+ resnetrs152,256,977.43,1047.637,1024,86.62,15.59,30.83
969
+ resnet101d,320,975.08,1050.156,1024,44.57,16.48,34.77
970
+ inception_resnet_v2,299,965.48,1060.586,1024,55.84,13.18,25.06
971
+ mobilevitv2_150,384,965.44,265.153,256,10.59,9.2,54.25
972
+ resnext101_64x4d,224,959.27,1067.461,1024,83.46,15.52,31.21
973
+ resnext101_32x8d,224,954.55,1072.743,1024,88.79,16.48,31.21
974
+ nfnet_f1,224,951.44,1076.249,1024,132.63,17.87,22.94
975
+ xception65p,299,950.53,538.627,512,39.82,13.91,52.48
976
+ eva02_small_patch14_336,336,946.95,1081.358,1024,22.13,15.48,54.33
977
+ resnext101_32x4d,288,940.09,1089.251,1024,44.18,13.24,35.09
978
+ efficientnet_b4,384,937.43,409.62,384,19.34,4.51,50.04
979
+ coat_lite_medium,224,936.83,1093.037,1024,44.57,9.81,40.06
980
+ focalnet_base_lrf,224,931.65,1099.111,1024,88.75,15.43,38.13
981
+ vit_mediumd_patch16_rope_reg1_gap_256,256,927.19,1104.398,1024,63.95,17.65,37.02
982
+ repvgg_b3,224,927.07,1104.549,1024,123.09,29.16,15.1
983
+ vit_relpos_base_patch16_plus_240,240,923.53,1108.778,1024,117.38,27.3,34.33
984
+ efficientformer_l7,224,921.36,1111.391,1024,82.23,10.17,24.45
985
+ xcit_tiny_24_p16_384,384,918.06,1115.38,1024,12.12,6.87,34.29
986
+ coatnet_2_rw_224,224,914.51,559.852,512,73.87,15.09,49.22
987
+ hrnet_w40,224,911.04,1123.981,1024,57.56,12.75,25.29
988
+ efficientnetv2_m,320,906.73,1129.323,1024,54.14,11.01,39.97
989
+ cait_s24_224,224,906.1,1130.105,1024,46.92,9.35,40.58
990
+ maxvit_small_tf_224,224,903.75,566.52,512,68.93,11.66,53.17
991
+ coat_mini,224,901.59,1135.767,1024,10.34,6.82,33.68
992
+ swin_s3_small_224,224,900.95,852.422,768,49.74,9.43,37.84
993
+ seresnext101_64x4d,224,899.36,1138.577,1024,88.23,15.53,31.25
994
+ poolformerv2_s36,224,899.23,1138.739,1024,30.79,5.01,15.82
995
+ volo_d2_224,224,898.78,1139.307,1024,58.68,14.34,41.34
996
+ seresnext101_32x8d,224,896.76,1141.857,1024,93.57,16.48,31.25
997
+ mobilenetv4_conv_aa_large,480,895.59,857.521,768,32.59,11.05,50.45
998
+ gmlp_b16_224,224,892.77,1146.973,1024,73.08,15.78,30.21
999
+ mobilenetv4_hybrid_large,448,892.61,860.386,768,37.76,10.74,48.61
1000
+ nest_base,224,891.48,1148.639,1024,67.72,17.96,53.39
1001
+ regnetz_e8,256,885.16,1156.837,1024,57.7,9.91,40.94
1002
+ nest_base_jx,224,884.72,1157.412,1024,67.72,17.96,53.39
1003
+ resnetv2_50d_evos,288,884.63,1157.531,1024,25.59,7.15,19.7
1004
+ seresnext101d_32x8d,224,880.67,1162.73,1024,93.59,16.72,32.05
1005
+ swin_base_patch4_window7_224,224,877.35,1167.139,1024,87.77,15.47,36.63
1006
+ coatnet_rmlp_2_rw_224,224,872.68,586.689,512,73.88,15.18,54.78
1007
+ tf_efficientnet_b4,380,871.25,440.737,384,19.34,4.49,49.49
1008
+ levit_384_s8,224,867.41,590.255,512,39.12,9.98,35.86
1009
+ vit_base_patch16_rope_reg1_gap_256,256,867.28,1180.694,1024,86.43,23.22,33.39
1010
+ tiny_vit_21m_384,384,864.15,592.481,512,21.23,13.77,77.83
1011
+ gcvit_small,224,861.28,1188.917,1024,51.09,8.57,41.61
1012
+ convnextv2_nano,384,858.36,596.473,512,15.62,7.22,24.61
1013
+ crossvit_15_dagger_408,408,856.8,1195.136,1024,28.5,21.45,95.05
1014
+ seresnext101_32x4d,288,853.89,1199.198,1024,48.96,13.25,35.12
1015
+ coatnet_2_224,224,853.24,600.055,512,74.68,16.5,52.67
1016
+ xception65,299,843.97,606.646,512,39.92,13.96,52.48
1017
+ maxxvit_rmlp_small_rw_256,256,836.98,917.574,768,66.01,14.67,58.38
1018
+ twins_svt_large,224,833.5,1228.541,1024,99.27,15.15,35.1
1019
+ resnet50x4_clip_gap,288,832.97,1229.323,1024,65.62,19.57,34.11
1020
+ levit_conv_384_s8,224,832.4,615.075,512,39.12,9.98,35.86
1021
+ seresnextaa101d_32x8d,224,831.45,1231.569,1024,93.59,17.25,34.16
1022
+ mvitv2_base_cls,224,829.71,1234.159,1024,65.44,10.23,40.65
1023
+ hgnet_base,224,828.09,927.42,768,71.58,25.14,15.47
1024
+ hrnet_w44,224,826.04,1239.642,1024,67.06,14.94,26.92
1025
+ xcit_medium_24_p16_224,224,825.94,1239.785,1024,84.4,16.13,31.71
1026
+ resnet200d,256,824.85,1241.424,1024,64.69,20.0,43.09
1027
+ eva02_base_patch14_224,224,824.57,1241.85,1024,85.76,23.22,36.55
1028
+ vit_medium_patch16_gap_384,384,824.05,1242.632,1024,39.03,26.08,67.54
1029
+ fastvit_sa36,256,823.61,1243.29,1024,31.53,5.64,34.61
1030
+ dm_nfnet_f1,224,823.18,1243.945,1024,132.63,17.87,22.94
1031
+ caformer_s36,224,822.12,1245.551,1024,39.3,8.0,37.53
1032
+ tresnet_m,448,818.79,1250.62,1024,31.39,22.99,29.21
1033
+ mvitv2_base,224,817.05,1253.276,1024,51.47,10.16,40.5
1034
+ resnet152,288,811.97,1261.117,1024,60.19,19.11,37.28
1035
+ swinv2_base_window12_192,192,804.65,1272.585,1024,109.28,11.9,39.72
1036
+ mobilevitv2_175,384,803.11,318.749,256,14.25,12.47,63.29
1037
+ sequencer2d_l,224,800.9,1278.551,1024,54.3,9.74,22.12
1038
+ efficientnetv2_rw_m,320,796.23,1286.05,1024,53.24,12.72,47.14
1039
+ hrnet_w48_ssld,224,792.32,1292.387,1024,77.47,17.34,28.56
1040
+ fastvit_mci1,256,791.46,1293.799,1024,21.54,4.72,32.84
1041
+ hrnet_w48,224,790.96,1294.607,1024,77.47,17.34,28.56
1042
+ resnet50x4_clip,288,789.83,1296.468,1024,87.14,21.35,35.27
1043
+ convnext_base,288,788.08,1299.342,1024,88.59,25.43,47.53
1044
+ swinv2_tiny_window16_256,256,783.75,653.259,512,28.35,6.68,39.02
1045
+ regnety_120,288,782.19,981.843,768,51.82,20.06,35.34
1046
+ poolformer_m48,224,772.39,1325.737,1024,73.47,11.59,29.17
1047
+ convformer_s36,224,771.32,1327.576,1024,40.01,7.67,30.5
1048
+ maxvit_rmlp_small_rw_256,256,770.25,997.069,768,64.9,14.15,66.09
1049
+ xcit_small_12_p16_384,384,769.74,1330.306,1024,26.25,14.14,36.51
1050
+ resnetv2_50x1_bit,448,763.06,670.968,512,25.55,16.62,44.46
1051
+ tnt_b_patch16_224,224,751.21,1363.12,1024,65.41,14.09,39.01
1052
+ nextvit_small,384,743.78,1376.73,1024,31.76,17.26,57.14
1053
+ dpn131,224,742.22,1379.624,1024,79.25,16.09,32.97
1054
+ swinv2_small_window8_256,256,741.58,1380.817,1024,49.73,11.58,40.14
1055
+ eca_nfnet_l1,320,737.99,1387.537,1024,41.41,14.92,34.42
1056
+ convit_base,224,737.69,1388.094,1024,86.54,17.52,31.77
1057
+ swinv2_cr_small_ns_256,256,735.25,1392.717,1024,49.7,12.07,76.21
1058
+ nf_regnet_b5,384,732.21,1398.493,1024,49.74,7.95,42.9
1059
+ convnextv2_base,224,728.97,1053.529,768,88.72,15.38,28.75
1060
+ swin_s3_base_224,224,726.96,1408.586,1024,71.13,13.69,48.26
1061
+ vit_so150m_patch16_reg4_gap_256,256,726.07,1410.317,1024,134.13,36.75,53.21
1062
+ swinv2_cr_base_224,224,719.8,1422.605,1024,87.88,15.86,59.66
1063
+ seresnet152,288,718.93,1424.325,1024,66.82,19.11,37.34
1064
+ vit_so150m_patch16_reg4_map_256,256,718.36,1425.463,1024,141.48,37.18,53.68
1065
+ vitamin_base_224,224,715.12,715.949,512,87.72,22.68,52.77
1066
+ ecaresnet200d,256,715.01,1432.144,1024,64.69,20.0,43.15
1067
+ seresnet200d,256,714.8,1432.561,1024,71.86,20.01,43.15
1068
+ swinv2_cr_base_ns_224,224,714.1,1433.953,1024,87.88,15.86,59.66
1069
+ resnetrs200,256,710.99,1440.207,1024,93.21,20.18,43.42
1070
+ xcit_nano_12_p8_384,384,706.26,1449.87,1024,3.05,6.34,46.08
1071
+ convnext_large,224,700.67,1461.443,1024,197.77,34.4,43.13
1072
+ densenet264d,224,698.28,1466.454,1024,72.74,13.57,14.0
1073
+ mobilenetv4_conv_aa_large,544,697.25,550.726,384,32.59,14.19,64.79
1074
+ resnet152d,320,692.74,1478.183,1024,60.21,24.08,47.67
1075
+ coat_small,224,688.09,1488.162,1024,21.69,12.61,44.25
1076
+ xcit_tiny_24_p8_224,224,688.06,1488.225,1024,12.11,9.21,45.39
1077
+ senet154,224,671.82,1524.195,1024,115.09,20.77,38.69
1078
+ maxxvitv2_rmlp_base_rw_224,224,671.73,1143.308,768,116.09,24.2,62.77
1079
+ legacy_senet154,224,671.25,1525.511,1024,115.09,20.77,38.69
1080
+ efficientvit_l3,224,665.72,1538.173,1024,246.04,27.62,39.16
1081
+ mobilevitv2_200,384,664.44,385.277,256,18.45,16.24,72.34
1082
+ dpn107,224,662.3,1546.122,1024,86.92,18.38,33.46
1083
+ efficientformerv2_s2,224,659.15,1553.507,1024,12.71,1.27,11.77
1084
+ xception71,299,656.41,779.991,512,42.34,18.09,69.92
1085
+ regnety_160,288,653.9,1174.472,768,83.59,26.37,38.07
1086
+ convnext_small,384,651.61,1178.605,768,50.22,25.58,63.37
1087
+ regnety_320,224,650.19,1574.91,1024,145.05,32.34,30.26
1088
+ volo_d3_224,224,645.72,1585.818,1024,86.33,20.78,60.09
1089
+ fastvit_ma36,256,645.47,1586.419,1024,44.07,7.88,41.09
1090
+ regnetz_d8_evos,256,644.05,1589.928,1024,23.46,4.5,24.92
1091
+ convnext_base,320,637.05,1205.552,768,88.59,31.39,58.68
1092
+ poolformerv2_m36,224,635.87,1610.368,1024,56.08,8.81,22.02
1093
+ davit_large,224,635.02,1209.401,768,196.81,34.6,60.99
1094
+ gcvit_base,224,632.31,1619.451,1024,90.32,14.87,55.48
1095
+ vit_betwixt_patch16_reg4_gap_384,384,627.16,1632.736,1024,60.6,39.71,85.28
1096
+ tf_efficientnetv2_m,384,625.53,1636.99,1024,54.14,15.85,57.52
1097
+ regnetz_c16_evos,320,625.03,819.15,512,13.49,3.86,25.88
1098
+ hgnetv2_b6,288,616.55,1245.628,768,75.26,27.9,35.09
1099
+ hrnet_w64,224,612.17,1672.733,1024,128.06,28.97,35.09
1100
+ seresnet152d,320,607.38,1685.905,1024,66.84,24.09,47.72
1101
+ vit_large_patch32_384,384,604.74,1693.288,1024,306.63,45.31,43.86
1102
+ resnetrs152,320,603.9,1695.633,1024,86.62,24.34,48.14
1103
+ resnet200,288,603.31,1697.296,1024,64.67,24.91,53.21
1104
+ efficientvit_l2,384,599.94,1280.123,768,63.71,20.45,57.01
1105
+ crossvit_18_dagger_408,408,597.37,1714.173,1024,44.61,32.47,124.87
1106
+ caformer_m36,224,590.66,1733.627,1024,56.2,13.29,50.48
1107
+ regnetx_320,224,589.67,1736.551,1024,107.81,31.81,36.3
1108
+ xcit_small_12_p8_224,224,588.09,1741.223,1024,26.21,18.69,47.21
1109
+ resnext101_64x4d,288,587.4,1743.275,1024,83.46,25.66,51.59
1110
+ fastvit_mci2,256,583.9,1753.716,1024,35.82,7.91,43.34
1111
+ resnetv2_50x3_bit,224,582.22,1319.082,768,217.32,37.06,33.34
1112
+ levit_conv_512_s8,224,573.27,669.829,384,74.05,21.82,52.28
1113
+ rdnet_large,224,571.45,895.96,512,186.27,34.74,46.67
1114
+ levit_512_s8,224,570.5,448.717,256,74.05,21.82,52.28
1115
+ convnextv2_tiny,384,570.14,673.511,384,28.64,13.14,39.48
1116
+ efficientnet_b5,416,567.51,451.086,256,30.39,8.27,80.68
1117
+ maxvit_rmlp_base_rw_224,224,564.83,1359.676,768,116.14,23.15,92.64
1118
+ nextvit_base,384,564.24,1814.832,1024,44.82,24.64,73.95
1119
+ seresnet269d,256,557.61,1836.401,1024,113.67,26.59,53.6
1120
+ convformer_m36,224,556.43,1840.292,1024,57.05,12.89,42.05
1121
+ seresnext101_32x8d,288,545.61,1876.8,1024,93.57,27.24,51.63
1122
+ vit_mediumd_patch16_reg4_gap_384,384,545.01,1878.868,1024,64.27,43.67,113.51
1123
+ resnetrs270,256,542.54,1887.403,1024,129.86,27.06,55.84
1124
+ efficientnetv2_m,416,542.39,1887.938,1024,54.14,18.6,67.5
1125
+ efficientvit_l3,256,537.65,1428.432,768,246.04,36.06,50.98
1126
+ nfnet_f2,256,537.3,1905.805,1024,193.78,33.76,41.85
1127
+ seresnext101d_32x8d,288,536.93,1907.142,1024,93.59,27.64,52.95
1128
+ convnext_large_mlp,256,535.86,1433.184,768,200.13,44.94,56.33
1129
+ volo_d1_384,384,534.71,1915.039,1024,26.78,22.75,108.55
1130
+ swinv2_base_window8_256,256,531.77,1925.628,1024,87.92,20.37,52.59
1131
+ halonet_h1,256,529.72,483.267,256,8.1,3.0,51.17
1132
+ resnext101_32x16d,224,523.52,1955.962,1024,194.03,36.27,51.18
1133
+ eca_nfnet_l2,320,520.54,1967.185,1024,56.72,20.95,47.43
1134
+ ecaresnet200d,288,520.38,1967.761,1024,64.69,25.31,54.59
1135
+ seresnet200d,288,520.31,1968.044,1024,71.86,25.32,54.6
1136
+ mixer_l16_224,224,519.58,1970.82,1024,208.2,44.6,41.69
1137
+ caformer_s18,384,518.39,987.656,512,26.34,13.42,77.34
1138
+ regnetz_e8,320,511.76,2000.937,1024,57.7,15.46,63.94
1139
+ resnet200d,320,510.27,2006.761,1024,64.69,31.25,67.33
1140
+ vit_base_patch16_384,384,509.98,2007.91,1024,86.86,55.54,101.56
1141
+ deit_base_patch16_384,384,508.8,2012.559,1024,86.86,55.54,101.56
1142
+ vit_base_patch16_clip_384,384,508.48,2013.827,1024,86.86,55.54,101.56
1143
+ deit_base_distilled_patch16_384,384,508.27,2014.657,1024,87.63,55.65,101.82
1144
+ maxvit_base_tf_224,224,507.09,1009.673,512,119.47,24.04,95.01
1145
+ efficientnet_b5,448,506.95,504.97,256,30.39,9.59,93.56
1146
+ hgnet_base,288,502.62,1018.647,512,71.58,41.55,25.57
1147
+ swin_large_patch4_window7_224,224,500.32,1535.009,768,196.53,34.53,54.94
1148
+ seresnextaa101d_32x8d,288,496.57,2062.142,1024,93.59,28.51,56.44
1149
+ convformer_s18,384,496.26,1031.706,512,26.77,11.63,46.49
1150
+ vit_base_patch16_18x2_224,224,493.7,2074.102,1024,256.73,52.51,71.38
1151
+ coatnet_3_rw_224,224,492.8,519.468,256,181.81,33.44,73.83
1152
+ coatnet_rmlp_3_rw_224,224,492.67,519.608,256,165.15,33.56,79.47
1153
+ swinv2_small_window16_256,256,489.28,1046.418,512,49.73,12.82,66.29
1154
+ vit_small_patch14_dinov2,518,488.22,1573.045,768,22.06,46.76,198.79
1155
+ vit_large_patch16_224,224,486.7,2103.941,1024,304.33,61.6,63.52
1156
+ deit3_base_patch16_384,384,483.16,2119.352,1024,86.88,55.54,101.56
1157
+ eva_large_patch14_196,196,482.51,2122.231,1024,304.14,61.57,63.52
1158
+ hrnet_w48_ssld,288,481.65,2125.993,1024,77.47,28.66,47.21
1159
+ swinv2_large_window12_192,192,480.44,1065.672,512,228.77,26.17,56.53
1160
+ poolformerv2_m48,224,479.37,2136.125,1024,73.35,11.59,29.17
1161
+ vit_small_patch14_reg4_dinov2,518,477.58,2144.131,1024,22.06,46.95,199.77
1162
+ nf_regnet_b5,456,475.93,1613.682,768,49.74,11.7,61.95
1163
+ xcit_large_24_p16_224,224,474.28,2159.06,1024,189.1,35.86,47.27
1164
+ nfnet_f1,320,473.95,2160.551,1024,132.63,35.97,46.77
1165
+ hiera_large_224,224,472.69,2166.325,1024,213.74,40.34,83.37
1166
+ coatnet_3_224,224,472.05,542.3,256,166.97,36.56,79.01
1167
+ beit_large_patch16_224,224,471.9,2169.953,1024,304.43,61.6,63.52
1168
+ beitv2_large_patch16_224,224,471.02,2173.984,1024,304.43,61.6,63.52
1169
+ efficientnetv2_rw_m,416,467.89,1641.398,768,53.24,21.49,79.62
1170
+ beit_base_patch16_384,384,466.33,2195.871,1024,86.74,55.54,101.56
1171
+ deit3_large_patch16_224,224,466.0,2197.402,1024,304.37,61.6,63.52
1172
+ resnetv2_101x1_bit,448,465.55,1099.773,512,44.54,31.65,64.93
1173
+ vit_base_patch16_siglip_gap_384,384,464.63,2203.912,1024,86.09,55.43,101.3
1174
+ dm_nfnet_f2,256,462.44,2214.346,1024,193.78,33.76,41.85
1175
+ maxvit_tiny_tf_384,384,461.19,555.072,256,30.98,17.53,123.42
1176
+ vit_base_patch16_siglip_384,384,459.33,2229.343,1024,93.18,56.12,102.2
1177
+ nextvit_large,384,453.31,2258.917,1024,57.87,32.03,90.76
1178
+ xcit_tiny_12_p8_384,384,445.81,2296.929,1024,6.71,14.13,69.14
1179
+ convnext_base,384,444.68,1151.37,512,88.59,45.21,84.49
1180
+ resnetv2_152x2_bit,224,443.47,2309.045,1024,236.34,46.95,45.11
1181
+ convnext_xlarge,224,442.0,1737.54,768,350.2,60.98,57.5
1182
+ convnextv2_base,288,441.99,1158.384,512,88.72,25.43,47.53
1183
+ resnetrs200,320,441.67,2318.444,1024,93.21,31.51,67.81
1184
+ efficientformerv2_l,224,430.68,2377.646,1024,26.32,2.59,18.54
1185
+ tiny_vit_21m_512,512,429.7,893.628,384,21.27,27.02,177.93
1186
+ convnextv2_large,224,427.59,1197.409,512,197.96,34.4,43.13
1187
+ swinv2_cr_tiny_384,384,426.11,600.773,256,28.33,15.34,161.01
1188
+ tf_efficientnet_b5,456,425.94,601.015,256,30.39,10.46,98.86
1189
+ flexivit_large,240,425.42,2407.038,1024,304.36,70.99,75.39
1190
+ caformer_b36,224,424.34,1809.846,768,98.75,23.22,67.3
1191
+ convnext_large,288,423.15,1209.95,512,197.77,56.87,71.29
1192
+ maxxvitv2_rmlp_large_rw_224,224,421.82,1820.649,768,215.42,44.14,87.15
1193
+ swinv2_cr_large_224,224,421.57,1821.761,768,196.68,35.1,78.42
1194
+ seresnextaa101d_32x8d,320,419.27,1831.76,768,93.59,35.19,69.67
1195
+ tf_efficientnetv2_m,480,411.3,1867.24,768,54.14,24.76,89.84
1196
+ xcit_small_24_p16_384,384,410.23,2496.154,1024,47.67,26.72,68.58
1197
+ davit_huge,224,408.33,1253.884,512,348.92,61.23,81.32
1198
+ regnetz_d8_evos,320,408.1,1881.86,768,23.46,7.03,38.92
1199
+ tresnet_l,448,406.59,2518.499,1024,55.99,43.59,47.56
1200
+ seresnet269d,288,404.46,2531.781,1024,113.67,33.65,67.81
1201
+ dm_nfnet_f1,320,403.9,2535.248,1024,132.63,35.97,46.77
1202
+ convformer_b36,224,402.48,1908.173,768,99.88,22.69,56.06
1203
+ regnety_160,384,385.7,995.583,384,83.59,46.87,67.67
1204
+ vit_large_r50_s32_384,384,380.77,2689.262,1024,329.09,57.43,76.52
1205
+ volo_d4_224,224,377.23,2714.52,1024,192.96,44.34,80.22
1206
+ eca_nfnet_l2,384,365.9,2098.95,768,56.72,30.05,68.28
1207
+ regnety_640,224,364.69,2105.867,768,281.38,64.16,42.5
1208
+ vit_base_patch8_224,224,364.14,2812.13,1024,86.58,78.22,161.69
1209
+ vit_large_patch14_224,224,359.91,2845.139,1024,304.2,81.08,88.79
1210
+ vit_large_patch14_clip_224,224,359.73,2846.6,1024,304.2,81.08,88.79
1211
+ swinv2_base_window16_256,256,357.24,1074.897,384,87.92,22.02,84.71
1212
+ swinv2_base_window12to16_192to256,256,357.13,1075.233,384,87.92,22.02,84.71
1213
+ vit_large_patch16_siglip_gap_256,256,354.73,2886.676,1024,303.36,80.8,88.34
1214
+ vit_large_patch16_siglip_256,256,352.45,2905.34,1024,315.96,81.34,88.88
1215
+ maxvit_large_tf_224,224,347.37,1105.422,384,211.79,43.68,127.35
1216
+ resnest200e,320,346.52,2955.04,1024,70.2,35.69,82.78
1217
+ ecaresnet269d,320,346.03,2959.242,1024,102.09,41.53,83.69
1218
+ efficientnetv2_l,384,345.28,2965.659,1024,118.52,36.1,101.16
1219
+ convnext_large_mlp,320,342.48,1494.96,512,200.13,70.21,88.02
1220
+ tf_efficientnetv2_l,384,341.98,2994.345,1024,118.52,36.1,101.16
1221
+ efficientvit_l3,320,341.33,1500.013,512,246.04,56.32,79.34
1222
+ convmixer_768_32,224,341.12,3001.86,1024,21.11,19.55,25.95
1223
+ inception_next_base,384,340.77,1502.454,512,86.67,43.64,75.48
1224
+ eca_nfnet_l3,352,334.48,3061.425,1024,72.04,32.57,73.12
1225
+ resnetv2_101x3_bit,224,334.42,2296.52,768,387.93,71.23,48.7
1226
+ vit_base_r50_s16_384,384,334.22,2297.893,768,98.95,67.43,135.03
1227
+ vit_large_patch14_clip_quickgelu_224,224,325.86,3142.415,1024,303.97,81.08,88.79
1228
+ repvgg_d2se,320,322.97,3170.518,1024,133.33,74.57,46.82
1229
+ coat_lite_medium_384,384,313.99,1630.619,512,44.57,28.73,116.7
1230
+ resnetrs350,288,312.34,3278.479,1024,163.96,43.67,87.09
1231
+ vit_large_patch14_xp_224,224,310.38,3299.14,1024,304.06,81.01,88.79
1232
+ nasnetalarge,331,307.5,1248.755,384,88.75,23.89,90.56
1233
+ xcit_small_24_p8_224,224,306.97,3335.811,1024,47.63,35.81,90.78
1234
+ tresnet_xl,448,297.76,2579.254,768,78.44,60.77,61.31
1235
+ pnasnet5large,331,297.12,1292.401,384,86.06,25.04,92.89
1236
+ volo_d2_384,384,296.76,3450.594,1024,58.87,46.17,184.51
1237
+ maxvit_small_tf_384,384,291.09,659.588,192,69.02,35.87,183.65
1238
+ vitamin_large2_224,224,290.22,1764.162,512,333.58,75.05,112.83
1239
+ vitamin_large_224,224,290.09,1764.937,512,333.32,75.05,112.83
1240
+ ecaresnet269d,352,288.69,3547.034,1024,102.09,50.25,101.25
1241
+ coatnet_4_224,224,287.2,891.364,256,275.43,62.48,129.26
1242
+ xcit_medium_24_p16_384,384,284.24,3602.53,1024,84.4,47.39,91.64
1243
+ coatnet_rmlp_2_rw_384,384,282.99,678.457,192,73.88,47.69,209.43
1244
+ cait_xxs24_384,384,280.88,3645.694,1024,12.03,9.63,122.66
1245
+ resnetrs270,352,280.56,3649.828,1024,129.86,51.13,105.48
1246
+ caformer_s36,384,275.88,1855.885,512,39.3,26.08,150.33
1247
+ resnet50x16_clip_gap,384,269.91,1896.893,512,136.2,70.32,100.64
1248
+ nfnet_f2,352,268.71,3810.855,1024,193.78,63.22,79.06
1249
+ convnext_xlarge,288,267.69,1912.637,512,350.2,100.8,95.05
1250
+ efficientnet_b6,528,265.01,482.998,128,43.04,19.4,167.39
1251
+ convformer_s36,384,263.97,1939.565,512,40.01,22.54,89.62
1252
+ eva02_large_patch14_224,224,260.65,3928.659,1024,303.27,81.15,97.2
1253
+ swinv2_cr_small_384,384,260.05,984.414,256,49.7,29.7,298.03
1254
+ maxvit_tiny_tf_512,512,259.65,739.436,192,31.05,33.49,257.59
1255
+ convnextv2_large,288,259.46,986.639,256,197.96,56.87,71.29
1256
+ resnet50x16_clip,384,258.3,1982.153,512,167.33,74.9,103.54
1257
+ eva02_large_patch14_clip_224,224,256.99,3984.533,1024,304.11,81.18,97.2
1258
+ mvitv2_large_cls,224,256.7,2991.779,768,234.58,42.17,111.69
1259
+ tf_efficientnet_b6,528,254.23,503.468,128,43.04,19.4,167.39
1260
+ vit_so400m_patch14_siglip_gap_224,224,251.8,4066.704,1024,412.44,109.57,106.13
1261
+ resnext101_32x32d,224,251.59,2035.02,512,468.53,87.29,91.12
1262
+ nfnet_f3,320,250.99,4079.757,1024,254.92,68.77,83.93
1263
+ vit_so400m_patch14_siglip_224,224,250.86,4082.015,1024,427.68,110.26,106.73
1264
+ vit_base_patch16_siglip_gap_512,512,250.3,2045.523,512,86.43,107.0,246.15
1265
+ convnextv2_base,384,249.33,1026.75,256,88.72,45.21,84.49
1266
+ mvitv2_large,224,249.02,2056.082,512,217.99,43.87,112.02
1267
+ vit_base_patch16_siglip_512,512,247.63,2067.618,512,93.52,108.22,247.74
1268
+ volo_d5_224,224,246.42,4155.464,1024,295.46,72.4,118.11
1269
+ efficientnetv2_xl,384,242.5,4222.705,1024,208.12,52.81,139.2
1270
+ convnext_large,384,238.93,1607.182,384,197.77,101.1,126.74
1271
+ convnext_large_mlp,384,238.86,1607.59,384,200.13,101.11,126.74
1272
+ dm_nfnet_f2,352,235.33,3263.538,768,193.78,63.22,79.06
1273
+ xcit_tiny_24_p8_384,384,233.44,4386.633,1024,12.11,27.05,132.95
1274
+ swin_base_patch4_window12_384,384,232.72,1100.045,256,87.9,47.19,134.78
1275
+ efficientnetv2_l,480,231.29,2213.666,512,118.52,56.4,157.99
1276
+ tf_efficientnetv2_xl,384,229.94,4453.261,1024,208.12,52.81,139.2
1277
+ tf_efficientnetv2_l,480,229.17,2234.12,512,118.52,56.4,157.99
1278
+ resnetrs420,320,226.89,4513.089,1024,191.89,64.2,126.56
1279
+ vitamin_large_256,256,224.62,1709.558,384,333.38,99.0,154.99
1280
+ vitamin_large2_256,256,224.19,1712.805,384,333.64,99.0,154.99
1281
+ maxxvitv2_rmlp_base_rw_384,384,219.77,1747.277,384,116.09,72.98,213.74
1282
+ swinv2_large_window12to16_192to256,256,218.3,1172.672,256,196.74,47.81,121.53
1283
+ regnety_320,384,216.75,1771.603,384,145.05,95.0,88.87
1284
+ dm_nfnet_f3,320,216.6,4727.524,1024,254.92,68.77,83.93
1285
+ resmlp_big_24_224,224,215.29,4756.307,1024,129.14,100.23,87.31
1286
+ efficientvit_l3,384,214.99,1786.092,384,246.04,81.08,114.02
1287
+ seresnextaa201d_32x8d,320,214.12,4782.305,1024,149.39,70.22,138.71
1288
+ xcit_medium_24_p8_224,224,211.66,4837.959,1024,84.32,63.53,121.23
1289
+ hiera_huge_224,224,211.18,2424.411,512,672.78,124.85,150.95
1290
+ eca_nfnet_l3,448,206.31,2481.677,512,72.04,52.55,118.4
1291
+ caformer_m36,384,198.38,1290.465,256,56.2,42.11,196.35
1292
+ xcit_small_12_p8_384,384,196.63,2603.882,512,26.21,54.92,138.29
1293
+ cait_xs24_384,384,196.17,3914.895,768,26.67,19.28,183.98
1294
+ rdnet_large,384,195.11,984.048,192,186.27,102.09,137.13
1295
+ eva02_base_patch14_448,448,193.77,2642.327,512,87.12,107.11,259.14
1296
+ maxvit_xlarge_tf_224,224,190.88,1341.125,256,506.99,97.52,191.04
1297
+ focalnet_huge_fl3,224,190.8,2683.391,512,745.28,118.26,104.8
1298
+ convformer_m36,384,189.9,1348.049,256,57.05,37.87,123.56
1299
+ resnetrs350,384,188.33,5437.373,1024,163.96,77.59,154.74
1300
+ cait_xxs36_384,384,188.29,5438.504,1024,17.37,14.35,183.7
1301
+ swinv2_cr_base_384,384,185.69,1378.646,256,87.88,50.57,333.68
1302
+ vit_huge_patch14_224,224,183.94,5567.143,1024,630.76,167.4,139.41
1303
+ vit_huge_patch14_clip_224,224,183.87,5569.209,1024,632.05,167.4,139.41
1304
+ vit_base_patch14_dinov2,518,181.94,2814.035,512,86.58,151.71,397.58
1305
+ regnety_1280,224,181.49,2821.098,512,644.81,127.66,71.58
1306
+ maxvit_rmlp_base_rw_384,384,181.15,2119.803,384,116.14,70.97,318.95
1307
+ vit_base_patch14_reg4_dinov2,518,180.64,2834.287,512,86.58,152.25,399.53
1308
+ vitamin_xlarge_256,256,180.27,1420.118,256,436.06,130.13,177.37
1309
+ swinv2_cr_huge_224,224,179.14,2143.597,384,657.83,115.97,121.08
1310
+ vit_huge_patch14_gap_224,224,178.21,5745.966,1024,630.76,166.73,138.74
1311
+ deit3_huge_patch14_224,224,176.76,5793.252,1024,632.13,167.4,139.41
1312
+ convnextv2_huge,224,175.22,1461.021,256,660.29,115.0,79.07
1313
+ sam2_hiera_tiny,896,173.99,367.818,64,26.85,99.86,384.63
1314
+ vit_huge_patch14_clip_quickgelu_224,224,169.48,6042.139,1024,632.08,167.4,139.41
1315
+ maxvit_base_tf_384,384,163.19,1176.531,192,119.65,73.8,332.9
1316
+ vit_huge_patch14_xp_224,224,162.96,6283.755,1024,631.8,167.3,139.41
1317
+ maxvit_small_tf_512,512,162.91,589.27,96,69.13,67.26,383.77
1318
+ xcit_large_24_p16_384,384,162.76,6291.334,1024,189.1,105.35,137.17
1319
+ resnest269e,416,162.7,3146.963,512,110.93,77.69,171.98
1320
+ vit_large_patch16_384,384,159.33,4820.268,768,304.72,191.21,270.24
1321
+ resnetv2_152x2_bit,384,158.13,2428.302,384,236.34,136.16,132.56
1322
+ eva_large_patch14_336,336,157.87,4864.597,768,304.53,191.1,270.24
1323
+ vit_large_patch14_clip_336,336,157.73,4868.972,768,304.53,191.11,270.24
1324
+ efficientnet_b7,600,155.34,618.001,96,66.35,38.33,289.94
1325
+ convmixer_1536_20,224,153.83,6656.648,1024,51.63,48.68,33.03
1326
+ coatnet_5_224,224,152.87,1255.992,192,687.47,145.49,194.24
1327
+ seresnextaa201d_32x8d,384,152.07,5050.135,768,149.39,101.11,199.72
1328
+ convnext_xxlarge,256,152.04,2525.627,384,846.47,198.09,124.45
1329
+ deit3_large_patch16_384,384,151.9,6741.438,1024,304.76,191.21,270.24
1330
+ cait_s24_384,384,150.93,3392.288,512,47.06,32.17,245.31
1331
+ convnext_xlarge,384,150.77,1697.89,256,350.2,179.2,168.99
1332
+ davit_giant,224,150.39,2553.341,384,1406.47,192.92,153.06
1333
+ tf_efficientnet_b7,600,150.32,638.606,96,66.35,38.33,289.94
1334
+ volo_d3_448,448,150.3,3406.615,512,86.63,96.33,446.83
1335
+ nfnet_f3,416,148.76,3441.715,512,254.92,115.58,141.78
1336
+ vit_large_patch16_siglip_gap_384,384,148.02,5188.469,768,303.69,190.85,269.55
1337
+ vit_giant_patch16_gap_224,224,147.29,6952.477,1024,1011.37,202.46,139.26
1338
+ vit_large_patch16_siglip_384,384,147.15,5219.006,768,316.28,192.07,270.75
1339
+ sam2_hiera_small,896,147.12,435.006,64,33.95,123.99,442.63
1340
+ resnetv2_50x3_bit,448,147.08,1305.439,192,217.32,145.7,133.37
1341
+ beit_large_patch16_384,384,146.92,6969.565,1024,305.0,191.21,270.24
1342
+ convnextv2_large,384,146.46,1310.909,192,197.96,101.1,126.74
1343
+ resnetv2_152x4_bit,224,144.26,3549.137,512,936.53,186.9,90.22
1344
+ efficientnetv2_xl,512,144.24,3549.736,512,208.12,93.85,247.32
1345
+ vit_large_patch14_clip_quickgelu_336,336,143.45,5353.757,768,304.29,191.11,270.24
1346
+ nfnet_f4,384,143.08,3578.376,512,316.07,122.14,147.57
1347
+ tf_efficientnetv2_xl,512,143.04,3579.486,512,208.12,93.85,247.32
1348
+ caformer_b36,384,142.66,1794.459,256,98.75,72.33,261.79
1349
+ convformer_b36,384,137.39,1863.311,256,99.88,66.67,164.75
1350
+ swin_large_patch4_window12_384,384,136.15,940.102,128,196.74,104.08,202.16
1351
+ resnetrs420,416,133.29,7682.756,1024,191.89,108.45,213.79
1352
+ dm_nfnet_f3,416,128.51,3984.046,512,254.92,115.58,141.78
1353
+ regnety_640,384,127.6,2006.234,256,281.38,188.47,124.83
1354
+ vitamin_large2_336,336,125.13,1534.357,192,333.83,175.72,307.47
1355
+ vitamin_large_336,336,125.0,1536.041,192,333.57,175.72,307.47
1356
+ dm_nfnet_f4,384,124.43,4114.62,512,316.07,122.14,147.57
1357
+ focalnet_huge_fl4,224,122.1,4193.181,512,686.46,118.9,113.34
1358
+ xcit_large_24_p8_224,224,120.47,4249.905,512,188.93,141.23,181.56
1359
+ eva_giant_patch14_224,224,119.09,8598.315,1024,1012.56,267.18,192.64
1360
+ eva_giant_patch14_clip_224,224,118.98,8606.661,1024,1012.59,267.18,192.64
1361
+ vit_giant_patch14_clip_224,224,116.63,8780.15,1024,1012.65,267.18,192.64
1362
+ vit_giant_patch14_224,224,116.59,8782.578,1024,1012.61,267.18,192.64
1363
+ resnetv2_152x2_bit,448,115.47,2217.012,256,236.34,184.99,180.43
1364
+ maxvit_large_tf_384,384,114.31,1119.749,128,212.03,132.55,445.84
1365
+ eva02_large_patch14_clip_336,336,113.18,6785.716,768,304.43,191.34,289.13
1366
+ swinv2_cr_large_384,384,111.86,1144.308,128,196.68,108.96,404.96
1367
+ mvitv2_huge_cls,224,111.11,3456.171,384,694.8,120.67,243.63
1368
+ convnextv2_huge,288,106.25,1204.727,128,660.29,190.1,130.7
1369
+ xcit_small_24_p8_384,384,103.14,4964.351,512,47.63,105.24,265.91
1370
+ nfnet_f5,416,101.36,5051.238,512,377.21,170.71,204.56
1371
+ vitamin_xlarge_336,336,101.3,1895.3,192,436.06,230.18,347.33
1372
+ efficientnet_b8,672,101.16,948.953,96,87.41,63.48,442.89
1373
+ cait_s36_384,384,100.96,5071.452,512,68.37,47.99,367.4
1374
+ tf_efficientnet_b8,672,98.41,975.542,96,87.41,63.48,442.89
1375
+ davit_base_fl,768,97.7,1310.076,128,90.37,190.32,530.15
1376
+ swinv2_base_window12to24_192to384,384,96.33,664.374,64,87.92,55.25,280.36
1377
+ focalnet_large_fl3,384,94.31,4071.8,384,239.13,105.06,168.04
1378
+ resnet50x64_clip_gap,448,93.27,2744.574,256,365.03,253.96,233.22
1379
+ maxvit_base_tf_512,512,91.79,1045.908,96,119.88,138.02,703.99
1380
+ focalnet_large_fl4,384,91.43,4199.805,384,239.32,105.2,181.78
1381
+ resnet50x64_clip,448,90.11,2840.862,256,420.38,265.02,239.13
1382
+ vitamin_large2_384,384,88.05,2180.459,192,333.97,234.44,440.16
1383
+ vitamin_large_384,384,87.97,2182.658,192,333.71,234.44,440.16
1384
+ dm_nfnet_f5,416,87.96,5820.621,512,377.21,170.71,204.56
1385
+ volo_d4_448,448,86.81,5898.056,512,193.41,197.13,527.35
1386
+ resnetv2_101x3_bit,448,85.75,2239.106,192,387.93,280.33,194.78
1387
+ nfnet_f4,512,81.27,4725.064,384,316.07,216.26,262.26
1388
+ vit_so400m_patch14_siglip_gap_384,384,81.27,6299.962,512,412.99,333.46,451.19
1389
+ vit_so400m_patch14_siglip_384,384,80.82,6335.238,512,428.23,335.4,452.89
1390
+ vit_huge_patch14_clip_336,336,79.74,6420.825,512,632.46,390.97,407.54
1391
+ sam2_hiera_base_plus,896,77.26,828.314,64,68.68,227.48,828.88
1392
+ nfnet_f6,448,75.59,6773.363,512,438.36,229.7,273.62
1393
+ beit_large_patch16_512,512,75.53,6779.013,512,305.67,362.24,656.39
1394
+ vitamin_xlarge_384,384,75.14,1703.48,128,436.06,306.38,493.46
1395
+ xcit_medium_24_p8_384,384,71.62,5361.499,384,84.32,186.67,354.73
1396
+ dm_nfnet_f4,512,70.24,5466.713,384,316.07,216.26,262.26
1397
+ vit_gigantic_patch14_224,224,66.99,7643.451,512,1844.44,483.95,275.37
1398
+ vit_gigantic_patch14_clip_224,224,66.83,7661.682,512,1844.91,483.96,275.37
1399
+ dm_nfnet_f6,448,66.08,5811.539,384,438.36,229.7,273.62
1400
+ focalnet_xlarge_fl3,384,65.69,3897.182,256,408.79,185.61,223.99
1401
+ regnety_1280,384,64.85,2960.449,192,644.81,374.99,210.2
1402
+ maxvit_large_tf_512,512,64.1,998.492,64,212.33,244.75,942.15
1403
+ maxvit_xlarge_tf_384,384,63.88,1502.728,96,475.32,292.78,668.76
1404
+ focalnet_xlarge_fl4,384,63.84,4009.787,256,409.03,185.79,242.31
1405
+ vit_huge_patch14_clip_378,378,62.23,8227.954,512,632.68,503.79,572.79
1406
+ eva02_large_patch14_448,448,61.3,8351.964,512,305.08,362.33,689.95
1407
+ convnextv2_huge,384,61.13,1570.42,96,660.29,337.96,232.35
1408
+ swinv2_large_window12to24_192to384,384,61.02,786.614,48,196.74,116.15,407.83
1409
+ tf_efficientnet_l2,475,60.44,1588.24,96,480.31,172.11,609.89
1410
+ nfnet_f5,544,58.88,6521.51,384,377.21,290.97,349.71
1411
+ vit_large_patch14_dinov2,518,58.84,6526.064,384,304.37,507.15,1058.82
1412
+ vit_large_patch14_reg4_dinov2,518,58.65,6547.461,384,304.37,508.9,1064.02
1413
+ nfnet_f7,480,57.75,6649.626,384,499.5,300.08,355.86
1414
+ volo_d5_448,448,57.67,4439.066,256,295.91,315.06,737.92
1415
+ vit_so400m_patch14_siglip_gap_448,448,57.55,6672.399,384,413.33,487.18,764.26
1416
+ vit_huge_patch14_clip_quickgelu_378,378,57.44,8913.629,512,632.68,503.79,572.79
1417
+ vit_huge_patch16_gap_448,448,53.54,7172.285,384,631.67,544.7,636.83
1418
+ eva_giant_patch14_336,336,51.78,9887.539,512,1013.01,620.64,550.67
1419
+ swinv2_cr_giant_224,224,51.7,3713.689,192,2598.76,483.85,309.15
1420
+ dm_nfnet_f5,544,51.67,4954.931,256,377.21,290.97,349.71
1421
+ swinv2_cr_huge_384,384,48.44,1321.195,64,657.94,352.04,583.18
1422
+ nfnet_f6,576,45.98,5567.566,256,438.36,378.69,452.2
1423
+ volo_d5_512,512,44.13,5800.796,256,296.09,425.09,1105.37
1424
+ xcit_large_24_p8_384,384,40.73,6285.389,256,188.93,415.0,531.82
1425
+ dm_nfnet_f6,576,39.87,6420.075,256,438.36,378.69,452.2
1426
+ nfnet_f7,608,36.14,7083.228,256,499.5,480.39,570.85
1427
+ maxvit_xlarge_tf_512,512,35.64,1346.799,48,475.77,534.14,1413.22
1428
+ regnety_2560,384,34.99,2743.264,96,1282.6,747.83,296.49
1429
+ convnextv2_huge,512,34.27,1400.46,48,660.29,600.81,413.07
1430
+ davit_huge_fl,768,34.25,1868.539,64,360.64,744.84,1060.3
1431
+ cait_m36_384,384,33.17,7717.041,256,271.22,173.11,734.81
1432
+ resnetv2_152x4_bit,480,32.37,3954.731,128,936.53,844.84,414.26
1433
+ sam2_hiera_large,1024,23.78,2018.069,48,212.15,907.48,2190.34
1434
+ efficientnet_l2,800,22.94,1394.635,32,480.31,479.12,1707.39
1435
+ samvit_base_patch16,1024,22.72,528.058,12,89.67,486.43,1343.27
1436
+ tf_efficientnet_l2,800,22.52,1420.719,32,480.31,479.12,1707.39
1437
+ vit_giant_patch14_dinov2,518,17.71,7227.529,128,1136.48,1784.2,2757.89
1438
+ vit_giant_patch14_reg4_dinov2,518,17.61,7266.646,128,1136.48,1790.08,2771.21
1439
+ eva_giant_patch14_560,560,16.99,7533.399,128,1014.45,1906.76,2577.17
1440
+ swinv2_cr_giant_384,384,14.93,2142.906,32,2598.76,1450.71,1394.86
1441
+ cait_m48_448,448,14.06,9102.618,128,356.46,329.41,1708.23
1442
+ samvit_large_patch16,1024,10.4,769.501,8,308.28,1493.86,2553.78
1443
+ vit_so400m_patch14_siglip_gap_896,896,10.27,9344.729,96,416.87,2731.49,8492.88
1444
+ samvit_huge_patch16,1024,6.35,944.29,6,637.03,2982.23,3428.16
pytorch-image-models/results/benchmark-infer-amp-nchw-pt240-cu124-rtx4090-dynamo.csv ADDED
@@ -0,0 +1,1444 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ model,infer_img_size,infer_samples_per_sec,infer_step_time,infer_batch_size,param_count,infer_gmacs,infer_macts
2
+ test_efficientnet,160,188911.31,1.347,256,0.36,0.06,0.55
3
+ test_byobnet,160,178532.03,1.426,256,0.46,0.03,0.43
4
+ test_vit,160,155871.09,1.635,256,0.37,0.04,0.48
5
+ lcnet_035,224,97850.77,2.608,256,1.64,0.03,1.04
6
+ tf_mobilenetv3_small_minimal_100,224,93614.74,2.726,256,2.04,0.06,1.41
7
+ lcnet_050,224,86660.68,2.946,256,1.88,0.05,1.26
8
+ mobilenetv3_small_050,224,83744.91,3.049,256,1.59,0.03,0.92
9
+ tinynet_e,106,83059.88,3.074,256,2.04,0.03,0.69
10
+ mobilenetv3_small_075,224,82620.81,3.091,256,2.04,0.05,1.3
11
+ mobilenetv3_small_100,224,79752.52,3.202,256,2.54,0.06,1.42
12
+ mobilenetv4_conv_small,224,74963.65,3.407,256,3.77,0.19,1.97
13
+ tf_mobilenetv3_small_075,224,68793.66,3.712,256,2.04,0.05,1.3
14
+ tf_mobilenetv3_small_100,224,63377.08,4.031,256,2.54,0.06,1.42
15
+ tinynet_d,152,62921.33,4.061,256,2.34,0.05,1.42
16
+ lcnet_075,224,59549.33,4.29,256,2.36,0.1,1.99
17
+ mnasnet_small,224,58424.52,4.373,256,2.03,0.07,2.16
18
+ levit_conv_128s,224,58296.58,4.383,256,7.78,0.31,1.88
19
+ mobilenetv4_conv_small,256,54925.26,4.652,256,3.77,0.25,2.57
20
+ levit_128s,224,53157.92,4.807,256,7.78,0.31,1.88
21
+ resnet10t,176,52732.02,4.846,256,5.44,0.7,1.51
22
+ ghostnet_050,224,52648.58,4.854,256,2.59,0.05,1.77
23
+ regnetx_002,224,50885.47,5.023,256,2.68,0.2,2.16
24
+ repghostnet_050,224,49844.85,5.127,256,2.31,0.05,2.02
25
+ resnet18,160,49720.41,5.14,256,11.69,0.93,1.27
26
+ mobilenetv2_035,224,47284.89,5.404,256,1.68,0.07,2.86
27
+ regnety_002,224,47175.78,5.418,256,3.16,0.2,2.17
28
+ lcnet_100,224,46545.58,5.491,256,2.95,0.16,2.52
29
+ mnasnet_050,224,45318.19,5.64,256,2.22,0.11,3.07
30
+ repghostnet_058,224,44344.16,5.764,256,2.55,0.07,2.59
31
+ levit_conv_128,224,43034.18,5.94,256,9.21,0.41,2.71
32
+ vit_tiny_r_s16_p8_224,224,40905.75,6.25,256,6.34,0.44,2.06
33
+ efficientvit_b0,224,40487.21,6.315,256,3.41,0.1,2.87
34
+ regnetx_004,224,40146.51,6.368,256,5.16,0.4,3.14
35
+ levit_128,224,39948.51,6.4,256,9.21,0.41,2.71
36
+ regnetx_004_tv,224,38547.28,6.633,256,5.5,0.42,3.17
37
+ repghostnet_080,224,37919.74,6.742,256,3.28,0.1,3.22
38
+ levit_conv_192,224,37779.54,6.768,256,10.95,0.66,3.2
39
+ semnasnet_050,224,37506.25,6.817,256,2.08,0.11,3.44
40
+ hgnetv2_b0,224,37356.26,6.845,256,6.0,0.33,2.12
41
+ mobilenetv2_050,224,37215.01,6.87,256,1.97,0.1,3.64
42
+ gernet_s,224,36311.59,7.041,256,8.17,0.75,2.65
43
+ efficientvit_m2,224,36122.02,7.079,256,4.19,0.2,1.47
44
+ pit_ti_224,224,36065.64,7.089,256,4.85,0.7,6.19
45
+ pit_ti_distilled_224,224,35852.04,7.132,256,5.1,0.71,6.23
46
+ efficientvit_m1,224,34221.22,7.472,256,2.98,0.17,1.33
47
+ resnet10t,224,33377.1,7.661,256,5.44,1.1,2.43
48
+ vit_small_patch32_224,224,32872.0,7.779,256,22.88,1.15,2.5
49
+ efficientvit_m3,224,32739.6,7.81,256,6.9,0.27,1.62
50
+ mixer_s32_224,224,32505.37,7.867,256,19.1,1.0,2.28
51
+ levit_192,224,31250.48,8.183,256,10.95,0.66,3.2
52
+ edgenext_xx_small,256,31029.41,8.242,256,1.33,0.26,3.33
53
+ xcit_nano_12_p16_224,224,30599.92,8.357,256,3.05,0.56,4.17
54
+ tinynet_c,184,30555.73,8.369,256,2.46,0.11,2.87
55
+ nf_regnet_b0,192,30346.81,8.427,256,8.76,0.37,3.15
56
+ efficientvit_m0,224,30313.3,8.437,256,2.35,0.08,0.91
57
+ lcnet_150,224,30064.96,8.506,256,4.5,0.34,3.79
58
+ resnet34,160,29702.78,8.608,256,21.8,1.87,1.91
59
+ repghostnet_100,224,29445.74,8.685,256,4.07,0.15,3.98
60
+ efficientvit_m4,224,29026.74,8.81,256,8.8,0.3,1.7
61
+ cs3darknet_focus_s,256,28372.65,9.014,256,3.27,0.69,2.7
62
+ regnety_004,224,27981.57,9.139,256,4.34,0.41,3.89
63
+ tf_mobilenetv3_large_minimal_100,224,27961.28,9.147,256,3.92,0.22,4.4
64
+ mobilenetv3_large_075,224,27774.4,9.208,256,3.99,0.16,4.0
65
+ resnet14t,176,27760.24,9.213,256,10.08,1.07,3.61
66
+ mnasnet_075,224,27169.47,9.414,256,3.17,0.23,4.77
67
+ convnext_atto,224,27156.87,9.418,256,3.7,0.55,3.81
68
+ hgnetv2_b1,224,27032.22,9.462,256,6.34,0.49,2.73
69
+ cs3darknet_s,256,26830.41,9.533,256,3.28,0.72,2.97
70
+ regnety_006,224,26488.55,9.656,256,6.06,0.61,4.33
71
+ efficientvit_m5,224,26226.6,9.753,256,12.47,0.53,2.41
72
+ resnet18,224,25948.79,9.857,256,11.69,1.82,2.48
73
+ tf_efficientnetv2_b0,192,25767.89,9.926,256,7.14,0.54,3.51
74
+ tf_mobilenetv3_large_075,224,25675.44,9.962,256,3.99,0.16,4.0
75
+ ghostnet_100,224,25628.25,9.98,256,5.18,0.15,3.55
76
+ levit_conv_256,224,25595.46,9.993,256,18.89,1.13,4.23
77
+ convnextv2_atto,224,25550.99,10.011,256,3.71,0.55,3.81
78
+ convnext_atto_ols,224,25437.32,10.056,256,3.7,0.58,4.11
79
+ repghostnet_111,224,25146.64,10.171,256,4.54,0.18,4.38
80
+ mobilenetv3_rw,224,24721.37,10.347,256,5.48,0.23,4.41
81
+ mobilenetv3_large_100,224,24474.85,10.451,256,5.48,0.23,4.41
82
+ deit_tiny_patch16_224,224,24312.15,10.521,256,5.72,1.26,5.97
83
+ vit_tiny_patch16_224,224,24266.11,10.54,256,5.72,1.26,5.97
84
+ repvgg_a0,224,24061.2,10.63,256,9.11,1.52,3.59
85
+ seresnet18,224,24041.64,10.639,256,11.78,1.82,2.49
86
+ legacy_seresnet18,224,24022.17,10.648,256,11.78,1.82,2.49
87
+ hardcorenas_b,224,23830.5,10.734,256,5.18,0.26,5.09
88
+ deit_tiny_distilled_patch16_224,224,23769.88,10.761,256,5.91,1.27,6.01
89
+ mnasnet_100,224,23629.36,10.825,256,4.38,0.33,5.46
90
+ edgenext_xx_small,288,23592.08,10.842,256,1.33,0.33,4.21
91
+ regnetx_008,224,23475.66,10.896,256,7.26,0.81,5.15
92
+ hardcorenas_c,224,23258.07,10.998,256,5.52,0.28,5.01
93
+ mobilenetv1_100,224,22998.4,11.123,256,4.23,0.58,5.04
94
+ mobilenet_edgetpu_v2_xs,224,22908.62,11.166,256,4.46,0.7,4.8
95
+ semnasnet_075,224,22722.94,11.257,256,2.91,0.23,5.54
96
+ levit_256,224,22644.71,11.296,256,18.89,1.13,4.23
97
+ mobilenetv1_100h,224,22625.21,11.306,256,5.28,0.63,5.09
98
+ tf_mobilenetv3_large_100,224,22476.69,11.381,256,5.48,0.23,4.41
99
+ convnext_femto,224,22282.51,11.48,256,5.22,0.79,4.57
100
+ resnet18d,224,22111.11,11.569,256,11.71,2.06,3.29
101
+ levit_conv_256d,224,22077.96,11.586,256,26.21,1.4,4.93
102
+ spnasnet_100,224,22077.72,11.586,256,4.42,0.35,6.03
103
+ mobilenetv4_hybrid_medium_075,224,22065.84,11.593,256,7.31,0.66,5.65
104
+ dla46_c,224,21982.6,11.636,256,1.3,0.58,4.5
105
+ vit_medium_patch32_clip_224,224,21846.57,11.709,256,39.69,2.0,3.34
106
+ mobilenetv2_075,224,21759.16,11.757,256,2.64,0.22,5.86
107
+ hardcorenas_a,224,21729.5,11.773,256,5.26,0.23,4.38
108
+ mobilenetv4_conv_medium,224,21722.12,11.776,256,9.72,0.84,5.8
109
+ hgnetv2_b0,288,21721.77,11.777,256,6.0,0.54,3.51
110
+ regnety_008,224,21671.84,11.803,256,6.26,0.81,5.25
111
+ hardcorenas_d,224,21580.66,11.853,256,7.5,0.3,4.93
112
+ convnextv2_femto,224,21531.01,11.881,256,5.23,0.79,4.57
113
+ convnext_femto_ols,224,21072.2,12.14,256,5.23,0.82,4.87
114
+ repghostnet_130,224,21064.38,12.144,256,5.48,0.25,5.24
115
+ mobilenet_edgetpu_100,224,21052.52,12.151,256,4.09,1.0,5.75
116
+ pit_xs_224,224,20835.05,12.278,256,10.62,1.4,7.71
117
+ efficientformerv2_s0,224,20726.97,12.342,256,3.6,0.41,5.3
118
+ ese_vovnet19b_slim_dw,224,20707.54,12.354,256,1.9,0.4,5.28
119
+ pit_xs_distilled_224,224,20528.33,12.461,256,11.0,1.41,7.76
120
+ regnety_008_tv,224,20329.15,12.583,256,6.43,0.84,5.42
121
+ fbnetc_100,224,20140.52,12.702,256,5.57,0.4,6.51
122
+ vit_xsmall_patch16_clip_224,224,19971.81,12.809,256,8.28,1.79,6.65
123
+ tf_efficientnetv2_b1,192,19947.06,12.825,256,8.14,0.76,4.59
124
+ levit_256d,224,19733.29,12.964,256,26.21,1.4,4.93
125
+ semnasnet_100,224,19697.19,12.988,256,3.89,0.32,6.23
126
+ ese_vovnet19b_slim,224,19687.27,12.994,256,3.17,1.69,3.52
127
+ ghostnet_130,224,19375.99,13.203,256,7.36,0.24,4.6
128
+ regnetx_006,224,19266.74,13.277,256,6.2,0.61,3.98
129
+ mobilenetv2_100,224,18995.91,13.468,256,3.5,0.31,6.68
130
+ tinynet_b,188,18989.86,13.472,256,3.73,0.21,4.44
131
+ hrnet_w18_small,224,18828.83,13.587,256,13.19,1.61,5.72
132
+ repghostnet_150,224,18415.28,13.892,256,6.58,0.32,6.0
133
+ efficientnet_lite0,224,17985.67,14.225,256,4.65,0.4,6.74
134
+ skresnet18,224,17812.46,14.363,256,11.96,1.82,3.24
135
+ resnetblur18,224,17704.62,14.451,256,11.69,2.34,3.39
136
+ tf_efficientnet_lite0,224,17685.01,14.466,256,4.65,0.4,6.74
137
+ gmlp_ti16_224,224,17642.28,14.501,256,5.87,1.34,7.55
138
+ mobilevit_xxs,256,17607.31,14.53,256,1.27,0.42,8.34
139
+ edgenext_x_small,256,17443.49,14.667,256,2.34,0.54,5.93
140
+ resnet14t,224,17428.63,14.68,256,10.08,1.69,5.8
141
+ hardcorenas_e,224,17401.15,14.702,256,8.07,0.35,5.65
142
+ tf_efficientnetv2_b0,224,17323.92,14.768,256,7.14,0.73,4.77
143
+ mobilenetv4_hybrid_medium,224,17322.11,14.769,256,11.07,0.98,6.84
144
+ repvit_m1,224,17266.08,14.817,256,5.49,0.83,7.45
145
+ pvt_v2_b0,224,17238.77,14.841,256,3.67,0.57,7.99
146
+ xcit_tiny_12_p16_224,224,17236.25,14.843,256,6.72,1.24,6.29
147
+ mobilenetv1_125,224,17195.51,14.879,256,6.27,0.89,6.3
148
+ hardcorenas_f,224,17171.48,14.9,256,8.2,0.35,5.57
149
+ hgnetv2_b2,224,17062.45,14.995,256,11.22,1.15,4.12
150
+ repvgg_a1,224,17061.07,14.996,256,14.09,2.64,4.74
151
+ nf_regnet_b0,256,16940.55,15.102,256,8.76,0.64,5.58
152
+ mobilenetv1_100,256,16680.51,15.338,256,4.23,0.76,6.59
153
+ mobilenetv1_100h,256,16453.67,15.55,256,5.28,0.82,6.65
154
+ resnet50,160,16423.72,15.578,256,25.56,2.1,5.67
155
+ repvit_m0_9,224,16304.29,15.692,256,5.49,0.83,7.45
156
+ hgnetv2_b1,288,16122.96,15.869,256,6.34,0.82,4.51
157
+ mobilenetv4_conv_medium,256,16108.18,15.883,256,9.72,1.1,7.58
158
+ crossvit_tiny_240,240,16046.42,15.945,256,7.01,1.57,9.08
159
+ convnext_pico,224,16034.63,15.957,256,9.05,1.37,6.1
160
+ convnext_atto,288,15915.6,16.076,256,3.7,0.91,6.3
161
+ gernet_m,224,15764.43,16.23,256,21.14,3.02,5.24
162
+ vit_betwixt_patch32_clip_224,224,15727.09,16.268,256,61.41,3.09,4.17
163
+ efficientvit_b1,224,15699.71,16.296,256,9.1,0.53,7.25
164
+ resnet18,288,15598.12,16.402,256,11.69,3.01,4.11
165
+ crossvit_9_240,240,15301.84,16.72,256,8.55,1.85,9.52
166
+ resnet50d,160,15291.83,16.732,256,25.58,2.22,6.08
167
+ resnet34,224,15291.59,16.732,256,21.8,3.67,3.74
168
+ convnext_pico_ols,224,15194.28,16.84,256,9.06,1.43,6.5
169
+ tinynet_a,192,15162.99,16.873,256,6.19,0.35,5.41
170
+ mobilenet_edgetpu_v2_s,224,15100.58,16.944,256,5.99,1.21,6.6
171
+ mnasnet_140,224,15010.98,17.045,256,7.12,0.6,7.71
172
+ convnextv2_pico,224,14927.0,17.141,256,9.07,1.37,6.1
173
+ levit_conv_384,224,14906.88,17.164,256,39.13,2.36,6.26
174
+ convnext_atto_ols,288,14901.48,17.17,256,3.7,0.96,6.8
175
+ fbnetv3_b,224,14619.25,17.502,256,8.6,0.42,6.97
176
+ convnextv2_atto,288,14583.28,17.546,256,3.71,0.91,6.3
177
+ regnetz_005,224,14395.17,17.774,256,7.12,0.52,5.86
178
+ mobilenetv4_conv_blur_medium,224,14300.06,17.893,256,9.72,1.22,8.58
179
+ seresnet18,288,14286.39,17.91,256,11.78,3.01,4.11
180
+ efficientformerv2_s1,224,14252.25,17.953,256,6.19,0.67,7.66
181
+ seresnet34,224,14239.31,17.969,256,21.96,3.67,3.74
182
+ mobilevitv2_050,256,14216.39,17.998,256,1.37,0.48,8.04
183
+ legacy_seresnet34,224,14186.47,18.036,256,21.96,3.67,3.74
184
+ mobilenetv2_110d,224,14184.27,18.039,256,4.52,0.45,8.71
185
+ crossvit_9_dagger_240,240,14117.97,18.123,256,8.78,1.99,9.97
186
+ efficientformer_l1,224,14110.06,18.133,256,12.29,1.3,5.53
187
+ rexnetr_100,224,14090.03,18.159,256,4.88,0.43,7.72
188
+ cs3darknet_focus_m,256,14044.33,18.219,256,9.3,1.98,4.89
189
+ resnet34d,224,13891.67,18.42,256,21.82,3.91,4.54
190
+ eva02_tiny_patch14_224,224,13809.61,18.529,256,5.5,1.7,9.14
191
+ tf_efficientnetv2_b2,208,13703.38,18.673,256,10.1,1.06,6.0
192
+ resnext50_32x4d,160,13655.32,18.739,256,25.03,2.17,7.35
193
+ vit_tiny_r_s16_p8_384,384,13521.23,18.924,256,6.36,1.34,6.49
194
+ dla34,224,13514.0,18.934,256,15.74,3.07,5.02
195
+ cs3darknet_m,256,13508.36,18.942,256,9.31,2.08,5.28
196
+ repghostnet_200,224,13466.37,19.001,256,9.8,0.54,7.96
197
+ selecsls42,224,13463.14,19.006,256,30.35,2.94,4.62
198
+ rexnet_100,224,13432.99,19.048,256,4.8,0.41,7.44
199
+ ghostnetv2_100,224,13426.18,19.057,256,6.16,0.18,4.55
200
+ selecsls42b,224,13399.31,19.096,256,32.46,2.98,4.62
201
+ resnet18d,288,13378.17,19.127,256,11.71,3.41,5.43
202
+ seresnet50,160,13373.47,19.133,256,28.09,2.1,5.69
203
+ repvgg_b0,224,13287.11,19.257,256,15.82,3.41,6.15
204
+ hgnetv2_b3,224,13275.88,19.274,256,16.29,1.78,5.07
205
+ resnet26,224,13255.66,19.304,256,16.0,2.36,7.35
206
+ convnext_femto,288,13199.45,19.386,256,5.22,1.3,7.56
207
+ edgenext_x_small,288,13195.3,19.392,256,2.34,0.68,7.5
208
+ resnet50,176,13189.38,19.401,256,25.56,2.62,6.92
209
+ nf_regnet_b2,240,13112.1,19.515,256,14.31,0.97,7.23
210
+ repvit_m1_0,224,13097.62,19.537,256,7.3,1.13,8.69
211
+ levit_384,224,13010.17,19.668,256,39.13,2.36,6.26
212
+ mobilenetv4_hybrid_medium,256,12899.45,19.837,256,11.07,1.29,9.01
213
+ mobilenetv1_125,256,12862.06,19.894,256,6.27,1.16,8.23
214
+ ecaresnet50t,160,12731.21,20.1,256,25.57,2.21,6.04
215
+ semnasnet_140,224,12618.94,20.277,256,6.11,0.6,8.87
216
+ nf_regnet_b1,256,12525.77,20.429,256,10.22,0.82,7.27
217
+ repvit_m2,224,12475.42,20.511,256,8.8,1.36,9.43
218
+ resnetrs50,160,12428.45,20.588,256,35.69,2.29,6.2
219
+ convnext_femto_ols,288,12403.21,20.631,256,5.23,1.35,8.06
220
+ gmixer_12_224,224,12369.96,20.686,256,12.7,2.67,7.26
221
+ mobilenetv2_140,224,12263.51,20.865,256,6.11,0.6,9.57
222
+ pit_s_distilled_224,224,12210.42,20.957,256,24.04,2.9,11.64
223
+ resnetaa34d,224,12190.8,20.991,256,21.82,4.43,5.07
224
+ convnextv2_femto,288,12161.51,21.041,256,5.23,1.3,7.56
225
+ fbnetv3_d,224,12090.69,21.164,256,10.31,0.52,8.5
226
+ nf_resnet26,224,12044.22,21.246,256,16.0,2.41,7.35
227
+ visformer_tiny,224,11983.41,21.354,256,10.32,1.27,5.72
228
+ poolformerv2_s12,224,11968.43,21.381,256,11.89,1.83,5.53
229
+ pit_s_224,224,11936.79,21.437,256,23.46,2.88,11.56
230
+ efficientnet_es_pruned,224,11927.45,21.454,256,5.44,1.81,8.73
231
+ vit_base_patch32_224,224,11925.14,21.458,256,88.22,4.41,5.01
232
+ efficientnet_es,224,11898.4,21.507,256,5.44,1.81,8.73
233
+ vit_base_patch32_clip_224,224,11887.61,21.526,256,88.22,4.41,5.01
234
+ vit_base_patch32_clip_quickgelu_224,224,11884.79,21.532,256,87.85,4.41,5.01
235
+ efficientnet_lite1,240,11883.98,21.533,256,5.42,0.62,10.14
236
+ tiny_vit_5m_224,224,11858.98,21.578,256,12.08,1.28,11.25
237
+ selecsls60,224,11842.43,21.608,256,30.67,3.59,5.52
238
+ tf_efficientnet_es,224,11819.88,21.649,256,5.44,1.81,8.73
239
+ ese_vovnet19b_dw,224,11813.91,21.66,256,6.54,1.34,8.25
240
+ repvit_m1_1,224,11786.2,21.711,256,8.8,1.36,9.43
241
+ selecsls60b,224,11784.48,21.714,256,32.77,3.63,5.52
242
+ resnet26d,224,11759.24,21.761,256,16.01,2.6,8.15
243
+ tf_efficientnet_lite1,240,11696.2,21.877,256,5.42,0.62,10.14
244
+ efficientnet_b0,224,11670.69,21.926,256,5.29,0.4,6.75
245
+ efficientvit_b1,256,11622.24,22.018,256,9.1,0.69,9.46
246
+ mobilenet_edgetpu_v2_m,224,11435.95,22.377,256,8.46,1.85,8.15
247
+ hgnetv2_b4,224,11396.46,22.454,256,19.8,2.75,6.7
248
+ resmlp_12_224,224,11392.33,22.463,256,15.35,3.01,5.5
249
+ darknet17,256,11376.06,22.495,256,14.3,3.26,7.18
250
+ nf_ecaresnet26,224,11300.75,22.644,256,16.0,2.41,7.36
251
+ nf_seresnet26,224,11261.33,22.724,256,17.4,2.41,7.36
252
+ convnext_nano,224,11259.0,22.727,256,15.59,2.46,8.37
253
+ vit_small_patch32_384,384,11156.92,22.936,256,22.92,3.45,8.25
254
+ efficientnet_b1_pruned,240,11085.12,23.084,256,6.33,0.4,6.21
255
+ resnext50_32x4d,176,11026.93,23.206,256,25.03,2.71,8.97
256
+ tf_efficientnetv2_b1,240,11002.33,23.259,256,8.14,1.21,7.34
257
+ dla46x_c,224,10979.44,23.307,256,1.07,0.54,5.66
258
+ mobilenetv4_conv_aa_medium,256,10961.85,23.344,256,9.72,1.58,10.3
259
+ cs3darknet_focus_m,288,10880.75,23.519,256,9.3,2.51,6.19
260
+ mixer_s16_224,224,10874.1,23.533,256,18.53,3.79,5.97
261
+ edgenext_small,256,10831.26,23.626,256,5.59,1.26,9.07
262
+ dla60x_c,224,10780.61,23.737,256,1.32,0.59,6.01
263
+ mobilenetv4_conv_blur_medium,256,10753.26,23.797,256,9.72,1.59,11.2
264
+ mixer_b32_224,224,10749.19,23.807,256,60.29,3.24,6.29
265
+ resnetblur18,288,10709.95,23.894,256,11.69,3.87,5.6
266
+ rexnetr_130,224,10680.75,23.959,256,7.61,0.68,9.81
267
+ poolformer_s12,224,10614.31,24.108,256,11.92,1.82,5.53
268
+ cs3darknet_m,288,10519.8,24.326,256,9.31,2.63,6.69
269
+ fbnetv3_b,256,10482.22,24.413,256,8.6,0.55,9.1
270
+ resnet101,160,10466.64,24.45,256,44.55,4.0,8.28
271
+ skresnet34,224,10457.92,24.469,256,22.28,3.67,5.13
272
+ convnextv2_nano,224,10414.74,24.572,256,15.62,2.46,8.37
273
+ darknet21,256,10326.92,24.78,256,20.86,3.93,7.47
274
+ ghostnetv2_130,224,10285.16,24.881,256,8.96,0.28,5.9
275
+ gernet_l,256,10282.94,24.887,256,31.08,4.57,8.0
276
+ hgnetv2_b2,288,10247.93,24.971,256,11.22,1.89,6.8
277
+ mobilenetv2_120d,224,10245.9,24.977,256,5.83,0.69,11.97
278
+ convnext_nano_ols,224,10137.55,25.244,256,15.65,2.65,9.38
279
+ dpn48b,224,10002.5,25.584,256,9.13,1.69,8.92
280
+ nf_regnet_b2,272,9981.6,25.638,256,14.31,1.22,9.27
281
+ ecaresnet50d_pruned,224,9963.17,25.685,256,19.94,2.53,6.43
282
+ mobilenetv4_conv_medium,320,9962.23,25.687,256,9.72,1.71,11.84
283
+ mobileone_s1,224,9945.03,25.732,256,4.83,0.86,9.67
284
+ mobilenet_edgetpu_v2_l,224,9929.94,25.772,256,10.92,2.55,9.05
285
+ efficientnet_b0_gn,224,9920.37,25.796,256,5.29,0.42,6.75
286
+ tiny_vit_11m_224,224,9911.24,25.819,256,20.35,2.04,13.49
287
+ convnext_pico,288,9741.71,26.27,256,9.05,2.27,10.08
288
+ resnext26ts,256,9713.54,26.346,256,10.3,2.43,10.52
289
+ nf_regnet_b1,288,9675.15,26.45,256,10.22,1.02,9.2
290
+ vit_small_patch16_224,224,9665.97,26.475,256,22.05,4.61,11.95
291
+ repvgg_a2,224,9665.68,26.476,256,28.21,5.7,6.26
292
+ deit_small_patch16_224,224,9661.41,26.487,256,22.05,4.61,11.95
293
+ deit3_small_patch16_224,224,9645.46,26.532,256,22.06,4.61,11.95
294
+ tf_efficientnet_b0,224,9637.3,26.554,256,5.29,0.4,6.75
295
+ tf_mixnet_s,224,9581.92,26.707,256,4.13,0.25,6.25
296
+ rexnet_130,224,9568.98,26.744,256,7.56,0.68,9.71
297
+ deit_small_distilled_patch16_224,224,9556.61,26.778,256,22.44,4.63,12.02
298
+ vit_wee_patch16_reg1_gap_256,256,9553.41,26.788,256,13.42,3.83,13.9
299
+ xcit_tiny_24_p16_224,224,9465.47,27.036,256,12.12,2.34,11.82
300
+ mixnet_s,224,9398.83,27.228,256,4.13,0.25,6.25
301
+ vit_relpos_small_patch16_224,224,9365.71,27.325,256,21.98,4.59,13.05
302
+ vit_srelpos_small_patch16_224,224,9347.26,27.377,256,21.97,4.59,12.16
303
+ gc_efficientnetv2_rw_t,224,9344.17,27.386,256,13.68,1.94,9.97
304
+ vit_pwee_patch16_reg1_gap_256,256,9263.81,27.625,256,15.25,4.37,15.87
305
+ rexnetr_150,224,9243.8,27.684,256,9.78,0.89,11.13
306
+ hrnet_w18_small_v2,224,9242.43,27.688,256,15.6,2.62,9.65
307
+ vit_base_patch32_clip_256,256,9241.91,27.69,256,87.86,5.76,6.65
308
+ convnext_pico_ols,288,9219.94,27.757,256,9.06,2.37,10.74
309
+ mobilevitv2_075,256,9169.72,27.908,256,2.87,1.05,12.06
310
+ resnet26t,256,9168.78,27.912,256,16.01,3.35,10.52
311
+ resnet34,288,9144.8,27.985,256,21.8,6.07,6.18
312
+ gcresnext26ts,256,9136.94,28.009,256,10.48,2.43,10.53
313
+ legacy_seresnext26_32x4d,224,9127.31,28.037,256,16.79,2.49,9.39
314
+ efficientformerv2_s2,224,9079.95,28.184,256,12.71,1.27,11.77
315
+ regnetx_016,224,9048.99,28.281,256,9.19,1.62,7.93
316
+ sedarknet21,256,9044.06,28.297,256,20.95,3.93,7.47
317
+ mobilenetv4_hybrid_large_075,256,9021.75,28.366,256,22.75,2.06,11.64
318
+ convnextv2_pico,288,8940.75,28.624,256,9.07,2.27,10.08
319
+ mobilenetv4_conv_large,256,8935.42,28.64,256,32.59,2.86,12.14
320
+ efficientnet_lite2,260,8893.65,28.775,256,6.09,0.89,12.9
321
+ efficientvit_b1,288,8878.52,28.825,256,9.1,0.87,11.96
322
+ dpn68,224,8840.27,28.949,256,12.61,2.35,10.47
323
+ regnety_016,224,8805.36,29.063,256,11.2,1.63,8.04
324
+ tf_efficientnet_lite2,260,8796.92,29.092,256,6.09,0.89,12.9
325
+ hgnet_tiny,224,8776.27,29.161,256,14.74,4.54,6.36
326
+ seresnext26ts,256,8763.95,29.202,256,10.39,2.43,10.52
327
+ eca_resnext26ts,256,8762.16,29.207,256,10.3,2.43,10.52
328
+ vit_relpos_small_patch16_rpn_224,224,8724.84,29.332,256,21.97,4.59,13.05
329
+ fbnetv3_d,256,8723.73,29.335,256,10.31,0.68,11.1
330
+ vit_small_r26_s32_224,224,8684.28,29.469,256,36.43,3.56,9.85
331
+ efficientnet_b0,256,8654.63,29.57,256,5.29,0.52,8.81
332
+ mobilenet_edgetpu_v2_m,256,8614.85,29.707,256,8.46,2.42,10.65
333
+ botnet26t_256,256,8607.7,29.728,256,12.49,3.32,11.98
334
+ halonet26t,256,8583.89,29.814,256,12.48,3.19,11.69
335
+ efficientnet_blur_b0,224,8559.05,29.9,256,5.29,0.43,8.72
336
+ resnest14d,224,8557.7,29.905,256,10.61,2.76,7.33
337
+ edgenext_small_rw,256,8555.55,29.913,256,7.83,1.58,9.51
338
+ flexivit_small,240,8505.35,30.09,256,22.06,5.35,14.18
339
+ ecaresnext50t_32x4d,224,8481.39,30.175,256,15.41,2.7,10.09
340
+ ecaresnext26t_32x4d,224,8479.72,30.18,256,15.41,2.7,10.09
341
+ seresnext26t_32x4d,224,8460.91,30.245,256,16.81,2.7,10.09
342
+ dpn68b,224,8448.18,30.292,256,12.61,2.35,10.47
343
+ seresnet34,288,8419.97,30.395,256,21.96,6.07,6.18
344
+ efficientnet_b0_g16_evos,224,8418.52,30.399,256,8.11,1.01,7.42
345
+ ecaresnet101d_pruned,224,8411.86,30.422,256,24.88,3.48,7.69
346
+ resnet101,176,8404.43,30.451,256,44.55,4.92,10.08
347
+ seresnext26d_32x4d,224,8398.85,30.471,256,16.81,2.73,10.19
348
+ resnet34d,288,8385.33,30.521,256,21.82,6.47,7.51
349
+ rexnet_150,224,8361.13,30.608,256,9.73,0.9,11.21
350
+ efficientnetv2_rw_t,224,8343.87,30.671,256,13.65,1.93,9.94
351
+ repvit_m3,224,8338.0,30.693,256,10.68,1.89,13.94
352
+ pvt_v2_b1,224,8309.51,30.799,256,14.01,2.12,15.39
353
+ convit_tiny,224,8301.19,30.83,256,5.71,1.26,7.94
354
+ ecaresnetlight,224,8286.53,30.885,256,30.16,4.11,8.42
355
+ eca_nfnet_l0,224,8268.29,30.953,256,24.14,4.35,10.47
356
+ xcit_nano_12_p16_384,384,8253.59,31.007,256,3.05,1.64,12.15
357
+ resnet50,224,8237.46,31.069,256,25.56,4.11,11.11
358
+ nfnet_l0,224,8226.21,31.11,256,35.07,4.36,10.47
359
+ cs3darknet_focus_l,256,8211.8,31.166,256,21.15,4.66,8.03
360
+ tresnet_m,224,8210.79,31.169,256,31.39,5.75,7.31
361
+ mobileone_s2,224,8190.43,31.246,256,7.88,1.34,11.55
362
+ coatnext_nano_rw_224,224,8142.29,31.431,256,14.7,2.47,12.8
363
+ regnetz_005,288,8113.29,31.543,256,7.12,0.86,9.68
364
+ efficientnet_b1,224,8096.89,31.607,256,7.79,0.59,9.36
365
+ eca_botnext26ts_256,256,8088.97,31.637,256,10.59,2.46,11.6
366
+ mobileone_s0,224,8070.39,31.71,256,5.29,1.09,15.48
367
+ dla60,224,8036.02,31.847,256,22.04,4.26,10.16
368
+ ghostnetv2_160,224,8032.91,31.859,256,12.39,0.42,7.23
369
+ resnet26,288,8028.16,31.879,256,16.0,3.9,12.15
370
+ hgnetv2_b3,288,7993.28,32.017,256,16.29,2.94,8.38
371
+ resnet32ts,256,7987.07,32.043,256,17.96,4.63,11.58
372
+ eca_halonext26ts,256,7959.44,32.153,256,10.76,2.44,11.46
373
+ cs3darknet_l,256,7898.11,32.403,256,21.16,4.86,8.55
374
+ resnet33ts,256,7888.41,32.444,256,19.68,4.76,11.66
375
+ resnet50c,224,7858.28,32.567,256,25.58,4.35,11.92
376
+ fastvit_t8,256,7845.11,32.622,256,4.03,0.7,8.63
377
+ mobilenetv3_large_150d,256,7830.3,32.683,256,14.62,1.03,12.35
378
+ efficientnet_b0_g8_gn,224,7820.23,32.725,256,6.56,0.66,6.75
379
+ vit_small_resnet26d_224,224,7754.96,33.002,256,63.61,5.07,11.12
380
+ lambda_resnet26t,256,7725.02,33.129,256,10.96,3.02,11.87
381
+ coat_lite_tiny,224,7696.35,33.252,256,5.72,1.6,11.65
382
+ resnet50t,224,7687.2,33.293,256,25.57,4.32,11.82
383
+ levit_conv_512,224,7679.59,33.326,256,95.17,5.64,10.22
384
+ mobilevit_xs,256,7674.3,33.348,256,2.32,1.05,16.33
385
+ resnext26ts,288,7671.02,33.363,256,10.3,3.07,13.31
386
+ tf_efficientnetv2_b2,260,7642.74,33.487,256,10.1,1.72,9.84
387
+ resnet50d,224,7639.23,33.502,256,25.58,4.35,11.92
388
+ mobilenetv4_hybrid_medium,320,7554.53,33.877,256,11.07,2.05,14.36
389
+ ecaresnet26t,256,7544.42,33.923,256,16.01,3.35,10.53
390
+ efficientnet_cc_b0_8e,224,7516.53,34.049,256,24.01,0.42,9.42
391
+ nf_regnet_b3,288,7509.89,34.079,256,18.59,1.67,11.84
392
+ efficientnet_cc_b0_4e,224,7508.02,34.087,256,13.31,0.41,9.42
393
+ gmlp_s16_224,224,7502.53,34.112,256,19.42,4.42,15.1
394
+ resnetv2_50,224,7495.78,34.144,256,25.55,4.11,11.11
395
+ vit_tiny_patch16_384,384,7452.45,34.342,256,5.79,4.7,25.39
396
+ vovnet39a,224,7432.2,34.436,256,22.6,7.09,6.73
397
+ gcresnet33ts,256,7420.0,34.491,256,19.88,4.76,11.68
398
+ tf_efficientnetv2_b3,240,7371.52,34.718,256,14.36,1.93,9.95
399
+ wide_resnet50_2,176,7362.71,34.761,256,68.88,7.29,8.97
400
+ resnet152,160,7349.12,34.825,256,60.19,5.9,11.51
401
+ resnetaa34d,288,7340.2,34.867,256,21.82,7.33,8.38
402
+ selecsls84,224,7334.1,34.895,256,50.95,5.9,7.57
403
+ resnetaa50,224,7291.3,35.101,256,25.56,5.15,11.64
404
+ efficientnet_em,240,7258.93,35.257,256,6.9,3.04,14.34
405
+ levit_conv_512d,224,7214.89,35.472,256,92.5,5.85,11.3
406
+ tf_efficientnet_em,240,7200.67,35.543,256,6.9,3.04,14.34
407
+ gcresnext26ts,288,7186.05,35.614,256,10.48,3.07,13.33
408
+ vit_base_patch32_plus_256,256,7183.84,35.627,256,119.48,7.79,7.76
409
+ res2net50_48w_2s,224,7183.73,35.627,256,25.29,4.18,11.72
410
+ coat_lite_mini,224,7158.54,35.75,256,11.01,2.0,12.25
411
+ resnet26d,288,7151.59,35.787,256,16.01,4.29,13.48
412
+ efficientvit_b2,224,7108.81,36.002,256,24.33,1.6,14.62
413
+ repvit_m1_5,224,7103.0,36.031,256,14.64,2.31,15.7
414
+ ese_vovnet19b_dw,288,7097.11,36.062,256,6.54,2.22,13.63
415
+ resnet50_gn,224,7077.2,36.163,256,25.56,4.14,11.11
416
+ eca_resnet33ts,256,7061.62,36.243,256,19.68,4.76,11.66
417
+ resnetv2_50t,224,7051.5,36.295,256,25.57,4.32,11.82
418
+ seresnet33ts,256,7046.47,36.321,256,19.78,4.76,11.66
419
+ eca_vovnet39b,224,7017.69,36.47,256,22.6,7.09,6.74
420
+ resnetv2_50d,224,7006.58,36.528,256,25.57,4.35,11.92
421
+ crossvit_small_240,240,6948.36,36.834,256,26.86,5.63,18.17
422
+ inception_v3,299,6933.82,36.911,256,23.83,5.73,8.97
423
+ levit_512,224,6931.73,36.922,256,95.17,5.64,10.22
424
+ seresnext26ts,288,6920.44,36.982,256,10.39,3.07,13.32
425
+ eca_resnext26ts,288,6919.11,36.989,256,10.3,3.07,13.32
426
+ nf_ecaresnet50,224,6916.89,37.001,256,25.56,4.21,11.13
427
+ resnetblur50,224,6906.06,37.06,256,25.56,5.16,12.02
428
+ ese_vovnet39b,224,6897.07,37.107,256,24.57,7.09,6.74
429
+ hgnetv2_b4,288,6892.16,37.134,256,19.8,4.54,11.08
430
+ nf_seresnet50,224,6872.82,37.239,256,28.09,4.21,11.13
431
+ vgg11_bn,224,6872.74,37.239,256,132.87,7.62,7.44
432
+ vgg11,224,6868.96,37.26,256,132.86,7.61,7.44
433
+ resnext50_32x4d,224,6858.21,37.319,256,25.03,4.26,14.4
434
+ resnetaa50d,224,6834.01,37.45,256,25.58,5.39,12.44
435
+ sam2_hiera_tiny,224,6819.52,37.53,256,26.85,4.91,17.12
436
+ legacy_seresnet50,224,6808.13,37.592,256,28.09,3.88,10.6
437
+ resnet50_clip_gap,224,6805.83,37.606,256,23.53,5.39,12.44
438
+ convnext_nano,288,6796.73,37.655,256,15.59,4.06,13.84
439
+ dla60x,224,6789.78,37.695,256,17.35,3.54,13.8
440
+ efficientnet_b1,240,6787.57,37.706,256,7.79,0.71,10.88
441
+ mobileone_s3,224,6778.3,37.757,256,10.17,1.94,13.85
442
+ edgenext_small,320,6777.59,37.762,256,5.59,1.97,14.16
443
+ mobilevitv2_100,256,6775.09,37.776,256,4.9,1.84,16.08
444
+ convnext_tiny_hnf,224,6762.18,37.848,256,28.59,4.47,13.44
445
+ xcit_small_12_p16_224,224,6719.89,38.086,256,26.25,4.82,12.58
446
+ inception_next_tiny,224,6716.71,38.104,256,28.06,4.19,11.98
447
+ convnext_tiny,224,6713.74,38.121,256,28.59,4.47,13.44
448
+ twins_svt_small,224,6712.42,38.129,256,24.06,2.94,13.75
449
+ resnet50s,224,6700.52,38.196,256,25.68,5.47,13.52
450
+ vit_little_patch16_reg1_gap_256,256,6683.54,38.293,256,22.52,6.27,18.06
451
+ vit_relpos_base_patch32_plus_rpn_256,256,6635.1,38.573,256,119.42,7.68,8.01
452
+ ese_vovnet39b_evos,224,6627.24,38.618,256,24.58,7.07,6.74
453
+ vit_little_patch16_reg4_gap_256,256,6624.29,38.636,256,22.52,6.35,18.33
454
+ skresnet50,224,6610.42,38.716,256,25.8,4.11,12.5
455
+ rexnetr_200,224,6609.18,38.725,256,16.52,1.59,15.11
456
+ cs3sedarknet_l,256,6604.77,38.75,256,21.91,4.86,8.56
457
+ regnetz_b16_evos,224,6598.52,38.787,256,9.74,1.43,9.95
458
+ efficientnet_b2_pruned,260,6597.68,38.791,256,8.31,0.73,9.13
459
+ levit_512d,224,6590.97,38.832,256,92.5,5.85,11.3
460
+ crossvit_15_240,240,6583.92,38.873,256,27.53,5.81,19.77
461
+ densenet121,224,6580.95,38.89,256,7.98,2.87,6.9
462
+ tf_efficientnet_cc_b0_4e,224,6574.97,38.926,256,13.31,0.41,9.42
463
+ tf_efficientnet_cc_b0_8e,224,6564.08,38.99,256,24.01,0.42,9.42
464
+ seresnet50,224,6510.44,39.312,256,28.09,4.11,11.13
465
+ regnetz_b16,224,6497.98,39.386,256,9.72,1.45,9.95
466
+ resnetblur50d,224,6469.2,39.563,256,25.58,5.4,12.82
467
+ regnetx_032,224,6433.96,39.779,256,15.3,3.2,11.37
468
+ resnext50d_32x4d,224,6433.36,39.782,256,25.05,4.5,15.2
469
+ cspresnet50,256,6423.77,39.843,256,21.62,4.54,11.5
470
+ haloregnetz_b,224,6420.84,39.86,256,11.68,1.97,11.94
471
+ convformer_s18,224,6401.93,39.977,256,26.77,3.96,15.82
472
+ gmixer_24_224,224,6397.38,40.007,256,24.72,5.28,14.45
473
+ resnest26d,224,6382.9,40.098,256,17.07,3.64,9.97
474
+ caformer_s18,224,6357.61,40.255,256,26.34,4.13,19.39
475
+ resnet50_clip,224,6340.6,40.365,256,38.32,6.14,12.98
476
+ hgnetv2_b5,224,6336.07,40.393,256,39.57,6.56,11.19
477
+ tf_mixnet_m,224,6323.56,40.473,256,5.01,0.36,8.19
478
+ cs3darknet_focus_l,288,6307.83,40.575,256,21.15,5.9,10.16
479
+ repvgg_b1g4,224,6303.26,40.604,256,39.97,8.15,10.64
480
+ vit_medium_patch16_clip_224,224,6298.39,40.635,256,38.59,8.0,15.93
481
+ resnet32ts,288,6297.13,40.644,256,17.96,5.86,14.65
482
+ deit3_medium_patch16_224,224,6292.44,40.675,256,38.85,8.0,15.93
483
+ mixnet_m,224,6274.65,40.789,256,5.01,0.36,8.19
484
+ convnextv2_tiny,224,6265.42,40.849,256,28.64,4.47,13.44
485
+ convnextv2_nano,288,6252.04,40.936,256,15.62,4.06,13.84
486
+ efficientformer_l3,224,6248.87,40.958,256,31.41,3.93,12.01
487
+ coatnet_pico_rw_224,224,6223.54,41.125,256,10.85,2.05,14.62
488
+ resnet33ts,288,6212.45,41.198,256,19.68,6.02,14.75
489
+ vit_base_resnet26d_224,224,6210.89,41.208,256,101.4,6.97,13.16
490
+ skresnet50d,224,6206.47,41.238,256,25.82,4.36,13.31
491
+ tiny_vit_21m_224,224,6202.51,41.264,256,33.22,4.29,20.08
492
+ ecaresnet50t,224,6182.25,41.4,256,25.57,4.32,11.83
493
+ rexnet_200,224,6181.08,41.407,256,16.37,1.56,14.91
494
+ vit_relpos_medium_patch16_224,224,6168.22,41.493,256,38.75,7.97,17.02
495
+ poolformerv2_s24,224,6162.46,41.532,256,21.34,3.42,10.68
496
+ seresnet50t,224,6161.79,41.537,256,28.1,4.32,11.83
497
+ sehalonet33ts,256,6154.72,41.584,256,13.69,3.55,14.7
498
+ ecaresnet50d,224,6146.92,41.638,256,25.58,4.35,11.93
499
+ vit_srelpos_medium_patch16_224,224,6143.0,41.665,256,38.74,7.96,16.21
500
+ cs3darknet_l,288,6142.02,41.67,256,21.16,6.16,10.83
501
+ crossvit_15_dagger_240,240,6130.99,41.745,256,28.21,6.13,20.43
502
+ vovnet57a,224,6130.59,41.749,256,36.64,8.95,7.52
503
+ cspresnet50d,256,6120.99,41.814,256,21.64,4.86,12.55
504
+ convnext_nano_ols,288,6113.65,41.864,256,15.65,4.38,15.5
505
+ cspresnet50w,256,6107.36,41.907,256,28.12,5.04,12.19
506
+ vit_relpos_medium_patch16_cls_224,224,6098.64,41.967,256,38.76,8.03,18.24
507
+ resnetrs50,224,6086.47,42.05,256,35.69,4.48,12.14
508
+ xcit_nano_12_p8_224,224,6075.31,42.129,256,3.05,2.16,15.71
509
+ gcresnext50ts,256,6060.87,42.228,256,15.67,3.75,15.46
510
+ fbnetv3_g,240,6057.83,42.25,256,16.62,1.28,14.87
511
+ densenetblur121d,224,6056.82,42.256,256,8.0,3.11,7.9
512
+ gcvit_xxtiny,224,6049.45,42.308,256,12.0,2.14,15.36
513
+ vit_base_r26_s32_224,224,6038.4,42.386,256,101.38,6.81,12.36
514
+ nf_regnet_b3,320,6032.74,42.424,256,18.59,2.05,14.61
515
+ efficientnet_b1,256,6002.08,42.642,256,7.79,0.77,12.22
516
+ mobilevit_s,256,5976.11,42.827,256,5.58,2.03,19.94
517
+ resnet152,176,5947.93,43.031,256,60.19,7.22,13.99
518
+ tf_efficientnet_b1,240,5946.3,43.042,256,7.79,0.71,10.88
519
+ res2next50,224,5944.69,43.054,256,24.67,4.2,13.71
520
+ res2net50_26w_4s,224,5895.46,43.412,256,25.7,4.28,12.61
521
+ resnet26t,320,5888.07,43.468,256,16.01,5.24,16.44
522
+ res2net50_14w_8s,224,5885.45,43.487,256,25.06,4.21,13.28
523
+ dla60_res2next,224,5850.87,43.744,256,17.03,3.49,13.17
524
+ resmlp_24_224,224,5846.3,43.778,256,30.02,5.96,10.91
525
+ twins_pcpvt_small,224,5845.62,43.784,256,24.11,3.83,18.08
526
+ dla60_res2net,224,5837.47,43.845,256,20.85,4.15,12.34
527
+ edgenext_base,256,5822.01,43.961,256,18.51,3.85,15.58
528
+ gcresnet33ts,288,5797.71,44.145,256,19.88,6.02,14.78
529
+ regnety_040,224,5793.29,44.179,256,20.65,4.0,12.29
530
+ eva02_tiny_patch14_336,336,5792.23,44.186,256,5.76,4.68,27.16
531
+ resnetv2_50x1_bit,224,5786.08,44.234,256,25.55,4.23,11.11
532
+ regnetv_040,224,5783.78,44.252,256,20.64,4.0,12.29
533
+ ecaresnet50d_pruned,288,5762.03,44.418,256,19.94,4.19,10.61
534
+ efficientvit_l1,224,5756.46,44.461,256,52.65,5.27,15.85
535
+ visformer_small,224,5750.7,44.507,256,40.22,4.88,11.43
536
+ eva02_small_patch14_224,224,5748.38,44.524,256,21.62,6.14,18.28
537
+ ese_vovnet57b,224,5743.03,44.566,256,38.61,8.95,7.52
538
+ hgnet_small,224,5739.36,44.595,256,24.36,8.53,8.79
539
+ gcresnet50t,256,5724.09,44.713,256,25.9,5.42,14.67
540
+ nf_resnet50,256,5711.71,44.81,256,25.56,5.46,14.52
541
+ resnet51q,256,5706.87,44.849,256,35.7,6.38,16.55
542
+ hiera_tiny_224,224,5685.03,45.021,256,27.91,4.91,17.13
543
+ efficientnet_b2,256,5683.07,45.037,256,9.11,0.89,12.81
544
+ seresnext50_32x4d,224,5625.84,45.495,256,27.56,4.26,14.42
545
+ legacy_seresnext50_32x4d,224,5623.19,45.516,256,27.56,4.26,14.42
546
+ sebotnet33ts_256,256,5614.57,45.585,256,13.7,3.89,17.46
547
+ seresnetaa50d,224,5601.18,45.695,256,28.11,5.4,12.46
548
+ res2net50d,224,5592.35,45.768,256,25.72,4.52,13.41
549
+ regnety_032,224,5575.12,45.908,256,19.44,3.2,11.26
550
+ eca_resnet33ts,288,5547.26,46.139,256,19.68,6.02,14.76
551
+ seresnet33ts,288,5546.78,46.143,256,19.78,6.02,14.76
552
+ mobilenetv4_conv_large,320,5543.63,46.169,256,32.59,4.47,18.97
553
+ focalnet_tiny_srf,224,5540.33,46.197,256,28.43,4.42,16.32
554
+ resnetv2_50d_frn,224,5526.17,46.316,256,25.59,4.33,11.92
555
+ coatnet_0_rw_224,224,5518.99,46.375,256,27.44,4.43,18.73
556
+ davit_tiny,224,5515.29,46.407,256,28.36,4.54,18.89
557
+ fastvit_t12,256,5513.86,46.418,256,7.55,1.42,12.42
558
+ vit_relpos_medium_patch16_rpn_224,224,5513.6,46.421,256,38.73,7.97,17.02
559
+ resnetrs101,192,5499.51,46.54,256,63.62,6.04,12.7
560
+ vit_medium_patch16_gap_240,240,5483.44,46.677,256,44.4,9.22,18.81
561
+ efficientformerv2_l,224,5466.54,46.819,256,26.32,2.59,18.54
562
+ regnetx_040,224,5462.75,46.854,256,22.12,3.99,12.2
563
+ resnetv2_50d_gn,224,5452.78,46.939,256,25.57,4.38,11.92
564
+ coatnet_nano_rw_224,224,5433.12,47.109,256,15.14,2.41,15.41
565
+ edgenext_small_rw,320,5425.79,47.173,256,7.83,2.46,14.85
566
+ resnetv2_50d_evos,224,5402.37,47.378,256,25.59,4.33,11.92
567
+ efficientvit_b2,256,5396.4,47.427,256,24.33,2.09,19.03
568
+ dla102,224,5369.84,47.664,256,33.27,7.19,14.18
569
+ cspresnext50,256,5358.14,47.768,256,20.57,4.05,15.86
570
+ hrnet_w18_ssld,224,5312.43,48.178,256,21.3,4.32,16.31
571
+ mobilevitv2_125,256,5304.51,48.251,256,7.48,2.86,20.1
572
+ gc_efficientnetv2_rw_t,288,5303.02,48.263,256,13.68,3.2,16.45
573
+ resnest50d_1s4x24d,224,5302.55,48.268,256,25.68,4.43,13.57
574
+ coatnet_nano_cc_224,224,5300.81,48.285,256,13.76,2.24,15.02
575
+ densenet169,224,5293.85,48.347,256,14.15,3.4,7.3
576
+ resnet61q,256,5274.43,48.526,256,36.85,7.8,17.01
577
+ darknet53,256,5272.3,48.545,256,41.61,9.31,12.39
578
+ hgnet_tiny,288,5268.94,48.578,256,14.74,7.51,10.51
579
+ lambda_resnet26rpt_256,256,5256.58,48.689,256,10.99,3.16,11.87
580
+ resnet50_mlp,256,5238.43,48.86,256,26.65,7.05,16.25
581
+ rdnet_tiny,224,5230.54,48.933,256,23.86,5.06,15.98
582
+ nextvit_small,224,5230.17,48.937,256,31.76,5.81,18.44
583
+ nfnet_f0,192,5229.68,48.94,256,71.49,7.21,10.16
584
+ efficientnet_b3_pruned,300,5219.72,49.033,256,9.86,1.04,11.86
585
+ cs3darknet_focus_x,256,5215.35,49.076,256,35.02,8.03,10.69
586
+ resnet101,224,5204.99,49.174,256,44.55,7.83,16.23
587
+ poolformer_s24,224,5178.8,49.423,256,21.39,3.41,10.68
588
+ dm_nfnet_f0,192,5176.64,49.442,256,71.49,7.21,10.16
589
+ fastvit_s12,256,5169.44,49.511,256,9.47,1.82,13.67
590
+ xcit_tiny_12_p16_384,384,5122.18,49.969,256,6.72,3.64,18.26
591
+ efficientnet_lite3,300,5121.62,49.975,256,8.2,1.65,21.85
592
+ fastvit_sa12,256,5118.51,50.004,256,11.58,1.96,14.03
593
+ focalnet_tiny_lrf,224,5112.56,50.063,256,28.65,4.49,17.76
594
+ seresnext26t_32x4d,288,5093.87,50.247,256,16.81,4.46,16.68
595
+ darknetaa53,256,5089.88,50.286,256,36.02,7.97,12.39
596
+ cs3sedarknet_l,288,5080.41,50.38,256,21.91,6.16,10.83
597
+ tf_efficientnet_lite3,300,5063.4,50.55,256,8.2,1.65,21.85
598
+ hrnet_w18,224,5061.86,50.564,256,21.3,4.32,16.31
599
+ seresnext26d_32x4d,288,5057.03,50.614,256,16.81,4.51,16.85
600
+ cs3darknet_x,256,5045.79,50.726,256,35.05,8.38,11.35
601
+ resnet101c,224,5044.49,50.739,256,44.57,8.08,17.04
602
+ swin_tiny_patch4_window7_224,224,5039.45,50.789,256,28.29,4.51,17.06
603
+ maxvit_pico_rw_256,256,4998.32,51.207,256,7.46,1.83,22.3
604
+ ecaresnetlight,288,4963.55,51.567,256,30.16,6.79,13.91
605
+ maxvit_rmlp_pico_rw_256,256,4963.22,51.568,256,7.52,1.85,24.86
606
+ eca_nfnet_l0,288,4962.52,51.577,256,24.14,7.12,17.29
607
+ resnet50,288,4962.25,51.58,256,25.56,6.8,18.37
608
+ resnet101d,224,4941.95,51.792,256,44.57,8.08,17.04
609
+ mobilenetv4_hybrid_medium,384,4939.03,51.822,256,11.07,3.01,21.18
610
+ mobileone_s4,224,4931.74,51.899,256,14.95,3.04,17.74
611
+ nfnet_l0,288,4931.09,51.906,256,35.07,7.13,17.29
612
+ skresnext50_32x4d,224,4919.18,52.031,256,27.48,4.5,17.18
613
+ vit_medium_patch16_gap_256,256,4903.6,52.197,256,38.86,10.59,22.15
614
+ coatnet_bn_0_rw_224,224,4887.13,52.371,256,27.44,4.67,22.04
615
+ dpn68b,288,4849.7,52.777,256,12.61,3.89,17.3
616
+ mobilenetv3_large_150d,320,4830.54,52.987,256,14.62,1.61,19.29
617
+ vit_base_resnet50d_224,224,4826.53,53.03,256,110.97,8.73,16.92
618
+ ecaresnet26t,320,4809.11,53.223,256,16.01,5.24,16.44
619
+ vgg13,224,4804.86,53.27,256,133.05,11.31,12.25
620
+ vgg13_bn,224,4801.12,53.312,256,133.05,11.33,12.25
621
+ repvgg_b1,224,4794.12,53.389,256,57.42,13.16,10.64
622
+ halonet50ts,256,4780.36,53.542,256,22.73,5.3,19.2
623
+ coatnet_rmlp_nano_rw_224,224,4765.2,53.712,256,15.15,2.62,20.34
624
+ gcresnext50ts,288,4747.63,53.91,256,15.67,4.75,19.57
625
+ lambda_resnet50ts,256,4746.66,53.923,256,21.54,5.07,17.48
626
+ swinv2_cr_tiny_224,224,4745.98,53.93,256,28.33,4.66,28.45
627
+ efficientnet_cc_b1_8e,240,4739.38,54.005,256,39.72,0.75,15.44
628
+ ecaresnet50t,256,4722.81,54.195,256,25.57,5.64,15.45
629
+ ecaresnet101d_pruned,288,4706.4,54.383,256,24.88,5.75,12.71
630
+ pvt_v2_b2,224,4697.71,54.485,256,25.36,4.05,27.53
631
+ efficientnetv2_rw_t,288,4690.17,54.572,256,13.65,3.19,16.42
632
+ efficientnet_b1,288,4690.13,54.573,256,7.79,0.97,15.46
633
+ swinv2_cr_tiny_ns_224,224,4689.94,54.575,256,28.33,4.66,28.45
634
+ vit_medium_patch16_reg1_gap_256,256,4686.95,54.609,256,38.88,10.63,22.26
635
+ gcvit_xtiny,224,4677.16,54.722,256,19.98,2.93,20.26
636
+ nf_resnet101,224,4669.02,54.819,256,44.55,8.01,16.23
637
+ regnety_040_sgn,224,4665.34,54.863,256,20.65,4.03,12.29
638
+ lamhalobotnet50ts_256,256,4659.1,54.936,256,22.57,5.02,18.44
639
+ vit_medium_patch16_reg4_gap_256,256,4651.05,55.032,256,38.88,10.76,22.6
640
+ resnet50t,288,4649.07,55.055,256,25.57,7.14,19.53
641
+ dla102x,224,4635.33,55.219,256,26.31,5.89,19.42
642
+ hiera_small_224,224,4619.55,55.408,256,35.01,6.42,20.75
643
+ resnet50d,288,4619.02,55.413,256,25.58,7.19,19.7
644
+ wide_resnet50_2,224,4612.63,55.49,256,68.88,11.43,14.4
645
+ resnetv2_101,224,4600.01,55.642,256,44.54,7.83,16.23
646
+ resnet101_clip_gap,224,4595.55,55.696,256,42.52,9.11,17.56
647
+ tf_efficientnet_b2,260,4594.48,55.709,256,9.11,1.02,13.83
648
+ resnetaa101d,224,4591.39,55.747,256,44.57,9.12,17.56
649
+ tf_mixnet_l,224,4570.85,55.997,256,7.33,0.58,10.84
650
+ efficientvit_l2,224,4564.31,56.076,256,63.71,6.97,19.58
651
+ tf_efficientnetv2_b3,300,4554.51,56.198,256,14.36,3.04,15.74
652
+ resnetv2_50,288,4548.51,56.273,256,25.55,6.79,18.37
653
+ mixnet_l,224,4545.22,56.312,256,7.33,0.58,10.84
654
+ resnet101s,224,4541.57,56.358,256,44.67,9.19,18.64
655
+ gcresnet50t,288,4540.87,56.364,256,25.9,6.86,18.57
656
+ mvitv2_tiny,224,4518.92,56.64,256,24.17,4.7,21.16
657
+ cait_xxs24_224,224,4502.7,56.839,256,11.96,2.53,20.29
658
+ resnet51q,288,4496.23,56.927,256,35.7,8.07,20.94
659
+ nf_resnet50,288,4484.43,57.077,256,25.56,6.88,18.37
660
+ resnest50d,224,4484.41,57.077,256,27.48,5.4,14.36
661
+ nf_regnet_b4,320,4455.95,57.441,256,30.21,3.29,19.88
662
+ crossvit_18_240,240,4445.99,57.569,256,43.27,9.05,26.26
663
+ resnetblur101d,224,4442.61,57.614,256,44.57,9.12,17.94
664
+ efficientnet_b2,288,4434.83,57.715,256,9.11,1.12,16.2
665
+ resnext101_32x4d,224,4430.11,57.776,256,44.18,8.01,21.23
666
+ halo2botnet50ts_256,256,4424.96,57.843,256,22.64,5.02,21.78
667
+ botnet50ts_256,256,4416.09,57.959,256,22.74,5.54,22.23
668
+ vitamin_small_224,224,4405.81,58.094,256,22.03,5.92,26.38
669
+ resnetv2_101d,224,4395.97,58.225,256,44.56,8.07,17.04
670
+ resnetaa50,288,4392.86,58.267,256,25.56,8.52,19.24
671
+ nf_ecaresnet101,224,4388.13,58.329,256,44.55,8.01,16.27
672
+ resnet101_clip,224,4373.65,58.522,256,56.26,9.81,18.08
673
+ nf_seresnet101,224,4356.81,58.747,256,49.33,8.02,16.27
674
+ hieradet_small,256,4349.43,58.848,256,34.72,8.51,27.76
675
+ mobilevitv2_150,256,4334.6,59.05,256,10.59,4.09,24.11
676
+ rexnetr_300,224,4324.73,59.184,256,34.81,3.39,22.16
677
+ tf_efficientnet_cc_b1_8e,240,4322.38,59.217,256,39.72,0.75,15.44
678
+ tresnet_v2_l,224,4301.4,59.505,256,46.17,8.85,16.34
679
+ swin_s3_tiny_224,224,4290.26,59.66,256,28.33,4.64,19.13
680
+ resnext101_32x8d,176,4284.27,59.743,256,88.79,10.33,19.37
681
+ res2net50_26w_6s,224,4277.02,59.844,256,37.05,6.33,15.28
682
+ ese_vovnet39b,288,4248.82,60.241,256,24.57,11.71,11.13
683
+ legacy_seresnet101,224,4227.93,60.54,256,49.33,7.61,15.74
684
+ resnet50_gn,288,4222.36,60.62,256,25.56,6.85,18.37
685
+ cs3sedarknet_x,256,4215.77,60.715,256,35.4,8.38,11.35
686
+ wide_resnet101_2,176,4209.79,60.801,256,126.89,14.31,13.18
687
+ fbnetv3_g,288,4208.81,60.814,256,16.62,1.77,21.09
688
+ crossvit_18_dagger_240,240,4205.84,60.858,256,44.27,9.5,27.03
689
+ maxxvit_rmlp_nano_rw_256,256,4202.69,60.903,256,16.78,4.37,26.05
690
+ vit_base_patch32_384,384,4178.56,61.256,256,88.3,13.06,16.5
691
+ vit_base_patch32_clip_384,384,4178.4,61.258,256,88.3,13.06,16.5
692
+ twins_pcpvt_base,224,4172.72,61.341,256,43.83,6.68,25.25
693
+ resnetblur50,288,4164.2,61.467,256,25.56,8.52,19.87
694
+ efficientvit_b2,288,4145.81,61.737,256,24.33,2.64,24.03
695
+ resnet61q,288,4130.49,61.968,256,36.85,9.87,21.52
696
+ darknet53,288,4124.09,62.065,256,41.61,11.78,15.68
697
+ resnext50_32x4d,288,4120.75,62.115,256,25.03,7.04,23.81
698
+ coatnet_rmlp_0_rw_224,224,4120.51,62.117,256,27.45,4.72,24.89
699
+ poolformerv2_s36,224,4120.18,62.123,256,30.79,5.01,15.82
700
+ resnetaa50d,288,4119.71,62.13,256,25.58,8.92,20.57
701
+ seresnet101,224,4119.53,62.132,256,49.33,7.84,16.27
702
+ cs3edgenet_x,256,4107.9,62.309,256,47.82,11.53,12.92
703
+ cspdarknet53,256,4106.02,62.337,256,27.64,6.57,16.81
704
+ volo_d1_224,224,4101.98,62.399,256,26.63,6.94,24.43
705
+ convnext_tiny_hnf,288,4094.49,62.513,256,28.59,7.39,22.21
706
+ convnext_tiny,288,4073.96,62.828,256,28.59,7.39,22.21
707
+ hrnet_w32,224,4056.25,63.101,256,41.23,8.97,22.02
708
+ regnetx_080,224,4050.3,63.196,256,39.57,8.02,14.06
709
+ nextvit_base,224,4038.08,63.385,256,44.82,8.29,23.71
710
+ pit_b_distilled_224,224,4034.42,63.444,256,74.79,12.5,33.07
711
+ pit_b_224,224,4022.18,63.637,256,73.76,12.42,32.94
712
+ fastvit_mci0,256,4004.83,63.911,256,11.41,2.42,18.29
713
+ convnext_small,224,3998.3,64.017,256,50.22,8.71,21.56
714
+ darknetaa53,288,3994.45,64.079,256,36.02,10.08,15.68
715
+ inception_next_small,224,3986.65,64.204,256,49.37,8.36,19.27
716
+ ecaresnet101d,224,3983.05,64.261,256,44.57,8.08,17.07
717
+ coat_lite_small,224,3969.68,64.477,256,19.84,3.96,22.09
718
+ regnetz_c16,256,3947.47,64.841,256,13.46,2.51,16.57
719
+ regnetx_064,224,3944.55,64.89,256,26.21,6.49,16.37
720
+ cs3sedarknet_xdw,256,3941.85,64.932,256,21.6,5.97,17.18
721
+ cs3darknet_x,288,3934.32,65.058,256,35.05,10.6,14.36
722
+ regnetz_b16,288,3931.2,65.11,256,9.72,2.39,16.43
723
+ rexnetr_200,288,3927.78,65.166,256,16.52,2.62,24.96
724
+ resnetblur50d,288,3921.99,65.263,256,25.58,8.92,21.19
725
+ resmlp_36_224,224,3921.81,65.266,256,44.69,8.91,16.33
726
+ pvt_v2_b2_li,224,3910.42,65.455,256,22.55,3.91,27.6
727
+ maxxvitv2_nano_rw_256,256,3888.05,65.832,256,23.7,6.26,23.05
728
+ resnext50d_32x4d,288,3885.38,65.879,256,25.05,7.44,25.13
729
+ vit_large_patch32_224,224,3882.7,65.923,256,305.51,15.39,13.3
730
+ seresnet50,288,3880.91,65.955,256,28.09,6.8,18.39
731
+ res2net101_26w_4s,224,3875.61,66.043,256,45.21,8.1,18.45
732
+ regnetz_c16_evos,256,3866.46,66.2,256,13.49,2.48,16.57
733
+ repvit_m2_3,224,3852.0,66.448,256,23.69,4.57,26.21
734
+ xcit_tiny_12_p8_224,224,3851.28,66.461,256,6.71,4.81,23.6
735
+ densenet121,288,3850.05,66.481,256,7.98,4.74,11.41
736
+ vgg16_bn,224,3843.29,66.6,256,138.37,15.5,13.56
737
+ resnet101d,256,3841.99,66.622,256,44.57,10.55,22.25
738
+ mixer_b16_224,224,3841.01,66.64,256,59.88,12.62,14.53
739
+ regnetz_b16_evos,288,3840.11,66.654,256,9.74,2.36,16.43
740
+ vgg16,224,3839.88,66.659,256,138.36,15.47,13.56
741
+ vit_medium_patch16_rope_reg1_gap_256,256,3839.28,66.669,256,38.74,10.63,22.26
742
+ mobilenetv4_conv_large,384,3820.17,67.003,256,32.59,6.43,27.31
743
+ convnextv2_tiny,288,3792.59,67.49,256,28.64,7.39,22.21
744
+ rexnet_300,224,3771.96,67.859,256,34.71,3.44,22.4
745
+ convnextv2_small,224,3766.94,67.949,256,50.32,8.71,21.56
746
+ densenet201,224,3748.64,68.28,256,20.01,4.34,7.85
747
+ res2net101d,224,3734.18,68.545,256,45.23,8.35,19.25
748
+ hgnetv2_b5,288,3726.11,68.693,256,39.57,10.84,18.5
749
+ nest_tiny,224,3705.24,69.082,256,17.06,5.83,25.48
750
+ efficientnetv2_s,288,3702.94,69.124,256,21.46,4.75,20.13
751
+ ecaresnet50t,288,3690.88,69.35,256,25.57,7.14,19.55
752
+ seresnet50t,288,3690.42,69.359,256,28.1,7.14,19.55
753
+ edgenext_base,320,3685.34,69.455,256,18.51,6.01,24.32
754
+ coatnet_0_224,224,3684.19,69.475,256,25.04,4.58,24.01
755
+ convit_small,224,3679.62,69.563,256,27.78,5.76,17.87
756
+ ecaresnet50d,288,3676.55,69.621,256,25.58,7.19,19.72
757
+ swinv2_tiny_window8_256,256,3670.64,69.733,256,28.35,5.96,24.57
758
+ nest_tiny_jx,224,3669.11,69.761,256,17.06,5.83,25.48
759
+ eca_nfnet_l1,256,3668.26,69.777,256,41.41,9.62,22.04
760
+ efficientvit_b3,224,3667.52,69.791,256,48.65,3.99,26.9
761
+ mobilevitv2_175,256,3660.41,69.927,256,14.25,5.54,28.13
762
+ resnet152,224,3646.55,70.193,256,60.19,11.56,22.56
763
+ seresnext101_32x4d,224,3626.8,70.575,256,48.96,8.02,21.26
764
+ legacy_seresnext101_32x4d,224,3621.17,70.685,256,48.96,8.02,21.26
765
+ inception_v4,299,3620.42,70.7,256,42.68,12.28,15.09
766
+ xcit_small_24_p16_224,224,3616.94,70.768,256,47.67,9.1,23.64
767
+ tresnet_l,224,3593.17,71.235,256,55.99,10.9,11.9
768
+ resnet152c,224,3589.3,71.313,256,60.21,11.8,23.36
769
+ dla169,224,3570.86,71.681,256,53.39,11.6,20.2
770
+ densenetblur121d,288,3562.85,71.841,256,8.0,5.14,13.06
771
+ tnt_s_patch16_224,224,3559.68,71.906,256,23.76,5.24,24.37
772
+ resnetv2_101x1_bit,224,3551.18,72.078,256,44.54,8.04,16.23
773
+ convnextv2_nano,384,3546.15,72.18,256,15.62,7.22,24.61
774
+ rdnet_small,224,3539.46,72.314,256,50.44,8.74,22.55
775
+ resnet152d,224,3528.53,72.542,256,60.21,11.8,23.36
776
+ regnetv_040,288,3523.74,72.639,256,20.64,6.6,20.3
777
+ efficientvit_l2,256,3518.25,72.753,256,63.71,9.09,25.49
778
+ efficientnetv2_rw_s,288,3505.36,73.02,256,23.94,4.91,21.41
779
+ vit_small_resnet50d_s16_224,224,3504.26,73.043,256,57.53,13.48,24.82
780
+ maxvit_nano_rw_256,256,3501.58,73.099,256,15.45,4.46,30.28
781
+ vit_small_patch16_18x2_224,224,3500.73,73.117,256,64.67,13.71,35.69
782
+ maxvit_rmlp_nano_rw_256,256,3494.64,73.244,256,15.5,4.47,31.92
783
+ res2net50_26w_8s,224,3491.2,73.317,256,48.4,8.37,17.95
784
+ hgnet_small,288,3466.9,73.83,256,24.36,14.09,14.53
785
+ coatnet_rmlp_1_rw_224,224,3457.73,74.026,256,41.69,7.85,35.47
786
+ mobilenetv4_hybrid_medium,448,3448.71,74.22,256,11.07,4.2,29.64
787
+ resnest50d_4s2x40d,224,3439.65,74.416,256,30.42,4.4,17.94
788
+ focalnet_small_srf,224,3422.65,74.785,256,49.89,8.62,26.26
789
+ regnety_040,288,3396.57,75.36,256,20.65,6.61,20.3
790
+ poolformer_s36,224,3394.32,75.41,256,30.86,5.0,15.82
791
+ mvitv2_small,224,3388.47,75.541,256,34.87,7.0,28.08
792
+ davit_small,224,3382.96,75.663,256,49.75,8.8,30.49
793
+ ese_vovnet99b,224,3373.76,75.869,256,63.2,16.51,11.27
794
+ mixer_l32_224,224,3364.3,76.082,256,206.94,11.27,19.86
795
+ vit_base_patch16_siglip_gap_224,224,3352.55,76.35,256,85.8,17.49,23.75
796
+ vit_base_patch16_xp_224,224,3348.63,76.439,256,86.51,17.56,23.9
797
+ vit_base_patch16_224_miil,224,3348.42,76.443,256,94.4,17.59,23.91
798
+ seresnext50_32x4d,288,3348.33,76.445,256,27.56,7.04,23.82
799
+ seresnetaa50d,288,3347.65,76.461,256,28.11,8.92,20.59
800
+ vit_betwixt_patch16_reg1_gap_256,256,3346.86,76.479,256,60.4,16.32,27.83
801
+ vit_base_patch16_224,224,3345.67,76.508,256,86.57,17.58,23.9
802
+ convformer_s36,224,3344.88,76.523,256,40.01,7.67,30.5
803
+ deit_base_patch16_224,224,3344.81,76.527,256,86.57,17.58,23.9
804
+ vit_base_patch16_clip_quickgelu_224,224,3343.2,76.564,256,86.19,17.58,23.9
805
+ deit3_base_patch16_224,224,3342.68,76.576,256,86.59,17.58,23.9
806
+ vit_base_patch16_clip_224,224,3342.03,76.591,256,86.57,17.58,23.9
807
+ pvt_v2_b3,224,3337.39,76.696,256,45.24,6.92,37.7
808
+ hiera_small_abswin_256,256,3333.92,76.777,256,34.36,8.29,26.38
809
+ caformer_s36,224,3329.65,76.872,256,39.3,8.0,37.53
810
+ efficientnet_b3,288,3326.65,76.944,256,12.23,1.63,21.49
811
+ vit_base_patch16_siglip_224,224,3320.61,77.085,256,92.88,17.73,24.06
812
+ vit_betwixt_patch16_reg4_gap_256,256,3319.25,77.116,256,60.4,16.52,28.24
813
+ resnet152s,224,3317.78,77.15,256,60.32,12.92,24.96
814
+ cs3se_edgenet_x,256,3309.24,77.349,256,50.72,11.53,12.94
815
+ vit_base_patch16_gap_224,224,3303.26,77.489,256,86.57,17.49,25.59
816
+ vit_small_patch16_36x1_224,224,3302.19,77.513,256,64.67,13.71,35.69
817
+ deit_base_distilled_patch16_224,224,3300.25,77.56,256,87.34,17.68,24.05
818
+ cs3sedarknet_x,288,3293.75,77.714,256,35.4,10.6,14.37
819
+ vit_relpos_base_patch16_224,224,3288.47,77.838,256,86.43,17.51,24.97
820
+ nextvit_large,224,3287.18,77.867,256,57.87,10.78,28.99
821
+ vit_base_mci_224,224,3282.25,77.986,256,86.35,17.73,24.65
822
+ regnetv_064,224,3266.81,78.353,256,30.58,6.39,16.41
823
+ resnetv2_50d_gn,288,3266.39,78.364,256,25.57,7.24,19.7
824
+ vit_relpos_base_patch16_clsgap_224,224,3264.17,78.417,256,86.43,17.6,25.12
825
+ repvgg_b2,224,3260.0,78.517,256,89.02,20.45,12.9
826
+ beit_base_patch16_224,224,3259.83,78.519,256,86.53,17.58,23.9
827
+ vit_relpos_base_patch16_cls_224,224,3259.47,78.529,256,86.43,17.6,25.12
828
+ repvgg_b2g4,224,3254.26,78.656,256,61.76,12.63,12.9
829
+ resnetv2_50d_evos,288,3251.16,78.731,256,25.59,7.15,19.7
830
+ regnety_080,224,3246.07,78.854,256,39.18,8.0,17.97
831
+ beitv2_base_patch16_224,224,3245.71,78.862,256,86.53,17.58,23.9
832
+ regnety_064,224,3237.99,79.05,256,30.58,6.39,16.41
833
+ sequencer2d_s,224,3229.54,79.258,256,27.65,4.96,11.31
834
+ cs3edgenet_x,288,3219.84,79.497,256,47.82,14.59,16.36
835
+ maxvit_tiny_rw_224,224,3218.36,79.532,256,29.06,5.11,33.11
836
+ coatnet_1_rw_224,224,3216.02,79.59,256,41.72,8.04,34.6
837
+ efficientnet_el_pruned,300,3211.46,79.704,256,10.59,8.0,30.7
838
+ efficientnet_el,300,3210.88,79.719,256,10.59,8.0,30.7
839
+ mixnet_xl,224,3208.22,79.783,256,11.9,0.93,14.57
840
+ vgg19,224,3200.7,79.971,256,143.67,19.63,14.86
841
+ vgg19_bn,224,3198.64,80.025,256,143.68,19.66,14.86
842
+ fastvit_sa24,256,3196.7,80.071,256,21.55,3.8,24.32
843
+ legacy_xception,299,3195.72,80.097,256,22.86,8.4,35.83
844
+ tf_efficientnet_el,300,3194.92,80.117,256,10.59,8.0,30.7
845
+ regnety_032,288,3185.8,80.346,256,19.44,5.29,18.61
846
+ vit_small_patch16_384,384,3183.86,80.395,256,22.2,15.52,50.78
847
+ resnetv2_152,224,3179.36,80.509,256,60.19,11.55,22.56
848
+ deit3_small_patch16_384,384,3174.6,80.63,256,22.21,15.52,50.78
849
+ hrnet_w30,224,3172.72,80.677,256,37.71,8.15,21.21
850
+ focalnet_small_lrf,224,3161.39,80.966,256,50.34,8.74,28.61
851
+ swin_small_patch4_window7_224,224,3153.83,81.161,256,49.61,8.77,27.47
852
+ tf_efficientnetv2_s,300,3151.03,81.233,256,21.46,5.35,22.73
853
+ mobilevitv2_200,256,3150.51,81.247,256,18.45,7.22,32.15
854
+ resnet101,288,3147.94,81.312,256,44.55,12.95,26.83
855
+ vit_base_patch32_clip_448,448,3138.59,81.555,256,88.34,17.93,23.9
856
+ dpn92,224,3109.73,82.312,256,37.67,6.54,18.21
857
+ hiera_base_224,224,3102.53,82.503,256,51.52,9.4,30.42
858
+ nfnet_f0,256,3101.19,82.539,256,71.49,12.62,18.05
859
+ mobilenetv4_conv_aa_large,384,3093.53,82.743,256,32.59,7.07,32.29
860
+ dm_nfnet_f0,256,3083.56,83.011,256,71.49,12.62,18.05
861
+ resnetv2_152d,224,3076.59,83.198,256,60.2,11.8,23.36
862
+ hrnet_w18_ssld,288,3064.15,83.535,256,21.3,7.14,26.96
863
+ dla102x2,224,3061.68,83.603,256,41.28,9.34,29.91
864
+ nf_regnet_b4,384,3049.71,83.932,256,30.21,4.7,28.61
865
+ gcvit_tiny,224,3042.25,84.138,256,28.22,4.79,29.82
866
+ cait_xxs36_224,224,3031.38,84.438,256,17.3,3.77,30.34
867
+ xception41p,299,3019.53,84.771,256,26.91,9.25,39.86
868
+ densenet161,224,3006.8,85.129,256,28.68,7.79,11.06
869
+ ecaresnet50t,320,2995.44,85.452,256,25.57,8.82,24.13
870
+ regnety_080_tv,224,2993.18,85.516,256,39.38,8.51,19.73
871
+ vit_base_patch16_rpn_224,224,2978.25,85.944,256,86.54,17.49,23.75
872
+ twins_pcpvt_large,224,2977.85,85.958,256,60.99,9.84,35.82
873
+ regnetz_040,256,2959.14,86.499,256,27.12,4.06,24.19
874
+ vit_small_r26_s32_384,384,2948.63,86.81,256,36.47,10.43,29.85
875
+ gmlp_b16_224,224,2948.58,86.812,256,73.08,15.78,30.21
876
+ legacy_seresnet152,224,2940.44,87.052,256,66.82,11.33,22.08
877
+ regnetz_040_h,256,2937.29,87.143,256,28.94,4.12,24.29
878
+ twins_svt_base,224,2935.09,87.21,256,56.07,8.59,26.33
879
+ regnetz_d8,256,2918.46,87.707,256,23.37,3.97,23.74
880
+ flexivit_base,240,2911.53,87.916,256,86.59,20.29,28.36
881
+ vit_relpos_base_patch16_rpn_224,224,2909.18,87.987,256,86.41,17.51,24.97
882
+ efficientformer_l7,224,2896.41,88.374,256,82.23,10.17,24.45
883
+ regnetz_d8_evos,256,2895.78,88.394,256,23.46,4.5,24.92
884
+ seresnet152,224,2888.03,88.63,256,66.82,11.57,22.61
885
+ mvitv2_small_cls,224,2869.23,89.212,256,34.87,7.04,28.17
886
+ swinv2_cr_small_224,224,2869.18,89.213,256,49.7,9.07,50.27
887
+ dpn98,224,2866.56,89.294,256,61.57,11.73,25.2
888
+ swinv2_cr_small_ns_224,224,2851.92,89.753,256,49.7,9.08,50.27
889
+ inception_resnet_v2,299,2841.25,90.09,256,55.84,13.18,25.06
890
+ hrnet_w40,224,2841.0,90.097,256,57.56,12.75,25.29
891
+ vit_mediumd_patch16_reg4_gap_256,256,2831.24,90.41,256,64.11,17.87,37.57
892
+ maxxvit_rmlp_tiny_rw_256,256,2818.8,90.808,256,29.64,6.66,39.76
893
+ regnety_040_sgn,288,2814.17,90.956,256,20.65,6.67,20.3
894
+ eva02_base_patch16_clip_224,224,2810.17,91.088,256,86.26,17.62,26.32
895
+ wide_resnet50_2,288,2801.8,91.36,256,68.88,18.89,23.81
896
+ efficientvit_b3,256,2799.72,91.426,256,48.65,5.2,35.01
897
+ poolformerv2_m36,224,2797.01,91.515,256,56.08,8.81,22.02
898
+ resnetv2_101,288,2796.77,91.523,256,44.54,12.94,26.83
899
+ vit_betwixt_patch16_rope_reg4_gap_256,256,2794.77,91.589,256,60.23,16.52,28.24
900
+ resnetaa101d,288,2788.15,91.807,256,44.57,15.07,29.03
901
+ hgnetv2_b6,224,2778.72,92.115,256,75.26,16.88,21.23
902
+ xcit_tiny_24_p16_384,384,2775.49,92.225,256,12.12,6.87,34.29
903
+ mobilenetv4_hybrid_large,384,2771.92,92.344,256,37.76,7.77,34.52
904
+ efficientvit_l2,288,2770.71,92.385,256,63.71,11.51,32.19
905
+ levit_conv_384_s8,224,2749.96,93.083,256,39.12,9.98,35.86
906
+ resnet152d,256,2742.49,93.336,256,60.21,15.41,30.51
907
+ mobilenetv4_conv_large,448,2739.84,93.426,256,32.59,8.75,37.17
908
+ coatnet_rmlp_1_rw2_224,224,2704.02,94.662,256,41.72,8.11,40.13
909
+ resnetblur101d,288,2697.04,94.909,256,44.57,15.07,29.65
910
+ repvgg_b3g4,224,2696.88,94.914,256,83.83,17.89,15.1
911
+ efficientnet_b3,320,2696.04,94.944,256,12.23,2.01,26.52
912
+ convnext_base,224,2678.52,95.564,256,88.59,15.38,28.75
913
+ regnetx_120,224,2677.22,95.611,256,46.11,12.13,21.37
914
+ resnext101_64x4d,224,2667.58,95.957,256,83.46,15.52,31.21
915
+ levit_384_s8,224,2646.86,96.708,256,39.12,9.98,35.86
916
+ resnext101_32x8d,224,2646.71,96.714,256,88.79,16.48,31.21
917
+ wide_resnet101_2,224,2643.91,96.816,256,126.89,22.8,21.23
918
+ inception_next_base,224,2636.33,97.093,256,86.67,14.85,25.69
919
+ tf_efficientnet_b3,300,2632.25,97.244,256,12.23,1.87,23.83
920
+ resnet200,224,2631.56,97.27,256,64.67,15.07,32.19
921
+ resnext101_32x4d,288,2614.05,97.922,256,44.18,13.24,35.09
922
+ rexnetr_300,288,2612.58,97.977,256,34.81,5.59,36.61
923
+ vit_base_patch16_siglip_gap_256,256,2590.2,98.824,256,85.84,23.13,33.23
924
+ vit_large_r50_s32_224,224,2566.89,99.721,256,328.99,19.58,24.41
925
+ vit_base_patch16_siglip_256,256,2564.67,99.808,256,92.93,23.44,33.63
926
+ maxvit_tiny_tf_224,224,2550.75,100.351,256,30.92,5.6,35.78
927
+ efficientnet_b3_gn,288,2546.16,100.533,256,11.73,1.74,23.35
928
+ regnetz_d32,256,2543.95,100.62,256,27.58,5.98,23.74
929
+ samvit_base_patch16_224,224,2528.02,101.254,256,86.46,17.54,24.54
930
+ eva02_small_patch14_336,336,2526.33,101.323,256,22.13,15.48,54.33
931
+ crossvit_base_240,240,2521.38,101.521,256,105.03,21.22,36.33
932
+ convnextv2_base,224,2520.76,101.546,256,88.72,15.38,28.75
933
+ sequencer2d_m,224,2518.88,101.621,256,38.31,6.55,14.26
934
+ regnety_120,224,2511.21,101.932,256,51.82,12.14,21.38
935
+ regnetz_c16,320,2510.69,101.954,256,13.46,3.92,25.88
936
+ coat_tiny,224,2491.49,102.738,256,5.5,4.35,27.2
937
+ vit_base_patch16_reg4_gap_256,256,2482.98,103.091,256,86.62,23.5,33.89
938
+ seresnet101,288,2460.54,104.032,256,49.33,12.95,26.87
939
+ repvgg_b3,224,2459.38,104.081,256,123.09,29.16,15.1
940
+ swinv2_tiny_window16_256,256,2451.11,104.433,256,28.35,6.68,39.02
941
+ resnet101d,320,2444.46,104.716,256,44.57,16.48,34.77
942
+ regnetz_c16_evos,320,2443.36,104.763,256,13.49,3.86,25.88
943
+ xception41,299,2423.55,105.62,256,26.97,9.28,39.86
944
+ tresnet_xl,224,2413.65,106.052,256,78.44,15.2,15.34
945
+ efficientnet_lite4,380,2412.2,106.117,256,13.01,4.04,45.66
946
+ convnext_small,288,2411.61,106.142,256,50.22,14.39,35.65
947
+ coatnet_1_224,224,2409.54,106.233,256,42.23,8.7,39.0
948
+ hrnet_w48_ssld,224,2404.14,106.471,256,77.47,17.34,28.56
949
+ hrnet_w48,224,2403.21,106.513,256,77.47,17.34,28.56
950
+ tf_efficientnet_lite4,380,2401.87,106.574,256,13.01,4.04,45.66
951
+ caformer_m36,224,2378.95,107.598,256,56.2,13.29,50.48
952
+ ecaresnet101d,288,2377.02,107.688,256,44.57,13.35,28.19
953
+ hiera_base_plus_224,224,2375.75,107.745,256,69.9,12.67,37.98
954
+ fastvit_mci1,256,2370.49,107.983,256,21.54,4.72,32.84
955
+ rdnet_base,224,2367.82,108.105,256,87.45,15.4,31.14
956
+ maxvit_tiny_rw_256,256,2365.28,108.22,256,29.07,6.74,44.35
957
+ resnetrs101,288,2361.51,108.395,256,63.62,13.56,28.53
958
+ convformer_m36,224,2358.97,108.511,256,57.05,12.89,42.05
959
+ pvt_v2_b5,224,2356.7,108.615,256,81.96,11.76,50.92
960
+ pvt_v2_b4,224,2355.66,108.663,256,62.56,10.14,53.74
961
+ maxvit_rmlp_tiny_rw_256,256,2354.21,108.731,256,29.15,6.77,46.92
962
+ seresnext101_64x4d,224,2353.54,108.76,256,88.23,15.53,31.25
963
+ xcit_medium_24_p16_224,224,2347.78,109.028,256,84.4,16.13,31.71
964
+ seresnext101_32x8d,224,2341.18,109.335,256,93.57,16.48,31.25
965
+ vit_mediumd_patch16_rope_reg1_gap_256,256,2336.5,109.556,256,63.95,17.65,37.02
966
+ regnetx_160,224,2330.93,109.816,256,54.28,15.99,25.52
967
+ fastvit_sa36,256,2327.07,109.998,256,31.53,5.64,34.61
968
+ volo_d2_224,224,2323.74,110.155,256,58.68,14.34,41.34
969
+ nest_small,224,2322.48,110.217,256,38.35,10.35,40.04
970
+ convnext_tiny,384,2310.31,110.798,256,28.59,13.14,39.48
971
+ vit_base_r50_s16_224,224,2309.82,110.82,256,97.89,21.66,35.28
972
+ nest_small_jx,224,2306.72,110.97,256,38.35,10.35,40.04
973
+ eca_nfnet_l1,320,2302.23,111.183,256,41.41,14.92,34.42
974
+ hgnet_base,224,2292.28,111.669,256,71.58,25.14,15.47
975
+ davit_base,224,2283.75,112.086,256,87.95,15.51,40.66
976
+ mvitv2_base,224,2283.18,112.113,256,51.47,10.16,40.5
977
+ seresnext101d_32x8d,224,2279.47,112.295,256,93.59,16.72,32.05
978
+ vit_base_patch16_plus_240,240,2273.79,112.577,256,117.56,27.41,33.08
979
+ poolformer_m36,224,2266.85,112.921,256,56.17,8.8,22.02
980
+ vit_small_patch8_224,224,2262.19,113.154,256,21.67,22.44,80.84
981
+ focalnet_base_srf,224,2247.74,113.88,256,88.15,15.28,35.01
982
+ hiera_base_abswin_256,256,2242.95,114.124,256,51.27,12.46,40.7
983
+ vit_relpos_base_patch16_plus_240,240,2237.99,114.378,256,117.38,27.3,34.33
984
+ resnest101e,256,2234.57,114.552,256,48.28,13.38,28.66
985
+ mobilenetv4_conv_aa_large,448,2229.25,114.827,256,32.59,9.63,43.94
986
+ xcit_small_12_p16_384,384,2221.32,115.235,256,26.25,14.14,36.51
987
+ xception65p,299,2220.07,115.3,256,39.82,13.91,52.48
988
+ cait_s24_224,224,2217.42,115.437,256,46.92,9.35,40.58
989
+ resnet152,288,2213.91,115.622,256,60.19,19.11,37.28
990
+ swinv2_small_window8_256,256,2213.69,115.633,256,49.73,11.58,40.14
991
+ efficientnet_b3_g8_gn,288,2210.95,115.777,256,14.25,2.59,23.35
992
+ convformer_s18,384,2192.78,116.736,256,26.77,11.63,46.49
993
+ swinv2_cr_small_ns_256,256,2188.78,116.95,256,49.7,12.07,76.21
994
+ swin_base_patch4_window7_224,224,2187.06,117.041,256,87.77,15.47,36.63
995
+ efficientvit_b3,288,2171.71,117.869,256,48.65,6.58,44.2
996
+ seresnextaa101d_32x8d,224,2165.9,118.185,256,93.59,17.25,34.16
997
+ convnextv2_tiny,384,2148.79,119.127,256,28.64,13.14,39.48
998
+ seresnet152d,256,2148.49,119.143,256,66.84,15.42,30.56
999
+ resnet50x4_clip_gap,288,2144.6,119.359,256,65.62,19.57,34.11
1000
+ caformer_s18,384,2141.55,119.527,256,26.34,13.42,77.34
1001
+ resnetrs152,256,2135.82,119.85,256,86.62,15.59,30.83
1002
+ vit_base_patch16_rope_reg1_gap_256,256,2133.64,119.972,256,86.43,23.22,33.39
1003
+ swinv2_base_window12_192,192,2130.04,120.175,256,109.28,11.9,39.72
1004
+ seresnext101_32x4d,288,2120.08,120.739,256,48.96,13.25,35.12
1005
+ eva02_base_patch14_224,224,2117.73,120.874,256,85.76,23.22,36.55
1006
+ poolformerv2_m48,224,2116.33,120.954,256,73.35,11.59,29.17
1007
+ focalnet_base_lrf,224,2108.01,121.429,256,88.75,15.43,38.13
1008
+ dm_nfnet_f1,224,2107.34,121.469,256,132.63,17.87,22.94
1009
+ cs3se_edgenet_x,320,2106.36,121.525,256,50.72,18.01,20.21
1010
+ regnety_160,224,2105.14,121.597,256,83.59,15.96,23.04
1011
+ nfnet_f1,224,2090.25,122.462,256,132.63,17.87,22.94
1012
+ vit_medium_patch16_gap_384,384,2078.7,123.143,256,39.03,26.08,67.54
1013
+ dpn131,224,2074.97,123.363,256,79.25,16.09,32.97
1014
+ efficientnetv2_s,384,2071.25,123.585,256,21.46,8.44,35.77
1015
+ swin_s3_small_224,224,2068.44,123.754,256,49.74,9.43,37.84
1016
+ hrnet_w44,224,2065.62,123.922,256,67.06,14.94,26.92
1017
+ convnext_base,256,2065.1,123.954,256,88.59,20.09,37.55
1018
+ coat_lite_medium,224,2064.62,123.979,256,44.57,9.81,40.06
1019
+ efficientnet_b3_gn,320,2056.56,124.469,256,11.73,2.14,28.83
1020
+ mixnet_xxl,224,2051.52,124.774,256,23.96,2.04,23.43
1021
+ nf_regnet_b5,384,2048.92,124.932,256,49.74,7.95,42.9
1022
+ resnet50x4_clip,288,2033.61,125.874,256,87.14,21.35,35.27
1023
+ maxvit_rmlp_small_rw_224,224,2024.79,126.421,256,64.9,10.75,49.3
1024
+ efficientnet_b4,320,2021.3,126.64,256,19.34,3.13,34.76
1025
+ xcit_tiny_24_p8_224,224,2021.23,126.644,256,12.11,9.21,45.39
1026
+ swinv2_cr_base_224,224,2018.07,126.843,256,87.88,15.86,59.66
1027
+ swinv2_cr_base_ns_224,224,2005.7,127.625,256,87.88,15.86,59.66
1028
+ tf_efficientnetv2_s,384,1994.9,128.316,256,21.46,8.44,35.77
1029
+ regnetv_064,288,1990.64,128.591,256,30.58,10.55,27.11
1030
+ tresnet_m,448,1981.93,129.156,256,31.39,22.99,29.21
1031
+ xcit_nano_12_p8_384,384,1964.01,130.335,256,3.05,6.34,46.08
1032
+ efficientnetv2_rw_s,384,1962.9,130.408,256,23.94,8.72,38.03
1033
+ resnet200d,256,1962.4,130.442,256,64.69,20.0,43.09
1034
+ twins_svt_large,224,1960.0,130.601,256,99.27,15.15,35.1
1035
+ crossvit_15_dagger_408,408,1958.74,130.685,256,28.5,21.45,95.05
1036
+ mvitv2_base_cls,224,1952.99,131.07,256,65.44,10.23,40.65
1037
+ mobilenetv4_hybrid_large,448,1943.33,131.721,256,37.76,10.74,48.61
1038
+ tnt_b_patch16_224,224,1941.85,131.823,256,65.41,14.09,39.01
1039
+ mobilenetv4_conv_aa_large,480,1934.33,132.335,256,32.59,11.05,50.45
1040
+ gcvit_small,224,1930.43,132.601,256,51.09,8.57,41.61
1041
+ regnety_064,288,1927.43,132.808,256,30.58,10.56,27.11
1042
+ halonet_h1,256,1922.3,133.164,256,8.1,3.0,51.17
1043
+ regnety_080,288,1921.98,133.184,256,39.18,13.22,29.69
1044
+ convit_base,224,1918.39,133.435,256,86.54,17.52,31.77
1045
+ maxvit_tiny_pm_256,256,1916.11,133.593,256,30.09,6.61,47.9
1046
+ coat_mini,224,1897.57,134.897,256,10.34,6.82,33.68
1047
+ fastvit_ma36,256,1894.76,135.098,256,44.07,7.88,41.09
1048
+ regnetz_040,320,1893.7,135.174,256,27.12,6.35,37.78
1049
+ regnetz_040_h,320,1880.89,136.095,256,28.94,6.43,37.94
1050
+ mobilevitv2_150,384,1864.53,137.289,256,10.59,9.2,54.25
1051
+ regnetz_d8,320,1863.04,137.398,256,23.37,6.19,37.08
1052
+ regnetz_d8_evos,320,1832.93,139.657,256,23.46,7.03,38.92
1053
+ dpn107,224,1826.37,140.156,256,86.92,18.38,33.46
1054
+ vitamin_base_224,224,1815.08,141.031,256,87.72,22.68,52.77
1055
+ efficientnet_b3_g8_gn,320,1805.58,141.771,256,14.25,3.2,28.83
1056
+ hrnet_w64,224,1800.18,142.196,256,128.06,28.97,35.09
1057
+ coatnet_2_rw_224,224,1781.07,143.723,256,73.87,15.09,49.22
1058
+ xception65,299,1774.28,144.273,256,39.92,13.96,52.48
1059
+ fastvit_mci2,256,1766.58,144.901,256,35.82,7.91,43.34
1060
+ maxxvit_rmlp_small_rw_256,256,1763.23,145.178,256,66.01,14.67,58.38
1061
+ nextvit_small,384,1756.58,145.726,256,31.76,17.26,57.14
1062
+ efficientnetv2_m,320,1752.86,146.036,256,54.14,11.01,39.97
1063
+ coatnet_rmlp_2_rw_224,224,1741.02,147.029,256,73.88,15.18,54.78
1064
+ resnet152d,320,1737.66,147.314,256,60.21,24.08,47.67
1065
+ efficientvit_l3,224,1715.62,149.206,256,246.04,27.62,39.16
1066
+ tiny_vit_21m_384,384,1715.38,149.227,256,21.23,13.77,77.83
1067
+ seresnet152,288,1715.22,149.241,256,66.82,19.11,37.34
1068
+ levit_conv_512_s8,224,1706.1,150.039,256,74.05,21.82,52.28
1069
+ xcit_small_12_p8_224,224,1700.59,150.524,256,26.21,18.69,47.21
1070
+ poolformer_m48,224,1686.87,151.749,256,73.47,11.59,29.17
1071
+ volo_d3_224,224,1676.56,152.682,256,86.33,20.78,60.09
1072
+ levit_512_s8,224,1666.95,153.563,256,74.05,21.82,52.28
1073
+ caformer_b36,224,1666.61,153.592,256,98.75,23.22,67.3
1074
+ coatnet_2_224,224,1651.34,155.015,256,74.68,16.5,52.67
1075
+ convformer_b36,224,1649.96,155.143,256,99.88,22.69,56.06
1076
+ maxvit_small_tf_224,224,1638.94,156.187,256,68.93,11.66,53.17
1077
+ sequencer2d_l,224,1635.78,156.489,256,54.3,9.74,22.12
1078
+ swin_s3_base_224,224,1634.71,156.59,256,71.13,13.69,48.26
1079
+ regnetz_e8,256,1630.84,156.963,256,57.7,9.91,40.94
1080
+ nest_base,224,1627.71,157.266,256,67.72,17.96,53.39
1081
+ hgnetv2_b6,288,1622.05,157.812,256,75.26,27.9,35.09
1082
+ convnext_base,288,1617.77,158.232,256,88.59,25.43,47.53
1083
+ eca_nfnet_l2,320,1615.7,158.43,256,56.72,20.95,47.43
1084
+ nest_base_jx,224,1615.45,158.458,256,67.72,17.96,53.39
1085
+ convmixer_768_32,224,1615.29,158.475,256,21.11,19.55,25.95
1086
+ resnext101_64x4d,288,1613.61,158.64,256,83.46,25.66,51.59
1087
+ regnetz_d32,320,1612.01,158.797,256,27.58,9.33,37.08
1088
+ vit_so150m_patch16_reg4_gap_256,256,1607.89,159.204,256,134.13,36.75,53.21
1089
+ vit_so150m_patch16_reg4_map_256,256,1592.32,160.761,256,141.48,37.18,53.68
1090
+ densenet264d,224,1591.16,160.877,256,72.74,13.57,14.0
1091
+ mobilevitv2_175,384,1573.85,162.648,256,14.25,12.47,63.29
1092
+ resnet200,288,1570.42,163.003,256,64.67,24.91,53.21
1093
+ efficientvit_l2,384,1551.96,164.941,256,63.71,20.45,57.01
1094
+ regnety_120,288,1541.43,166.07,256,51.82,20.06,35.34
1095
+ swinv2_base_window8_256,256,1538.74,166.358,256,87.92,20.37,52.59
1096
+ convnextv2_base,288,1528.58,167.465,256,88.72,25.43,47.53
1097
+ ecaresnet200d,256,1516.5,168.798,256,64.69,20.0,43.15
1098
+ efficientnetv2_rw_m,320,1516.44,168.804,256,53.24,12.72,47.14
1099
+ seresnet200d,256,1510.21,169.5,256,71.86,20.01,43.15
1100
+ resnetrs200,256,1506.62,169.905,256,93.21,20.18,43.42
1101
+ maxvit_rmlp_small_rw_256,256,1505.28,170.056,256,64.9,14.15,66.09
1102
+ mobilenetv4_conv_aa_large,544,1502.9,170.324,256,32.59,14.19,64.79
1103
+ maxxvitv2_rmlp_base_rw_224,224,1492.62,171.499,256,116.09,24.2,62.77
1104
+ coat_small,224,1479.25,173.047,256,21.69,12.61,44.25
1105
+ convnext_large,224,1474.99,173.55,256,197.77,34.4,43.13
1106
+ vit_betwixt_patch16_reg4_gap_384,384,1474.89,173.562,256,60.6,39.71,85.28
1107
+ hrnet_w48_ssld,288,1457.57,175.623,256,77.47,28.66,47.21
1108
+ resnext101_32x16d,224,1452.32,176.26,256,194.03,36.27,51.18
1109
+ swinv2_small_window16_256,256,1451.76,176.326,256,49.73,12.82,66.29
1110
+ senet154,224,1443.54,177.331,256,115.09,20.77,38.69
1111
+ legacy_senet154,224,1435.62,178.308,256,115.09,20.77,38.69
1112
+ resnetv2_50x1_bit,448,1429.57,179.064,256,25.55,16.62,44.46
1113
+ nf_regnet_b5,456,1421.4,180.093,256,49.74,11.7,61.95
1114
+ seresnext101_32x8d,288,1405.19,182.171,256,93.57,27.24,51.63
1115
+ convnextv2_large,224,1398.77,183.007,256,197.96,34.4,43.13
1116
+ efficientnet_b4,384,1396.87,183.256,256,19.34,4.51,50.04
1117
+ gcvit_base,224,1394.71,183.536,256,90.32,14.87,55.48
1118
+ volo_d1_384,384,1386.95,184.567,256,26.78,22.75,108.55
1119
+ hgnet_base,288,1385.67,184.737,256,71.58,41.55,25.57
1120
+ seresnext101d_32x8d,288,1371.24,186.68,256,93.59,27.64,52.95
1121
+ convnext_small,384,1370.68,186.757,256,50.22,25.58,63.37
1122
+ xception71,299,1367.4,187.205,256,42.34,18.09,69.92
1123
+ vit_large_patch32_384,384,1360.57,188.147,256,306.63,45.31,43.86
1124
+ seresnet152d,320,1353.12,189.182,256,66.84,24.09,47.72
1125
+ crossvit_18_dagger_408,408,1350.07,189.608,256,44.61,32.47,124.87
1126
+ resnetrs152,320,1348.43,189.84,256,86.62,24.34,48.14
1127
+ nextvit_base,384,1347.63,189.95,256,44.82,24.64,73.95
1128
+ rdnet_large,224,1330.13,192.452,256,186.27,34.74,46.67
1129
+ efficientvit_l3,256,1327.54,192.828,256,246.04,36.06,50.98
1130
+ convnext_base,320,1319.96,193.934,256,88.59,31.39,58.68
1131
+ resnetv2_50x3_bit,224,1315.05,194.657,256,217.32,37.06,33.34
1132
+ xcit_tiny_12_p8_384,384,1309.52,195.481,256,6.71,14.13,69.14
1133
+ seresnextaa101d_32x8d,288,1303.25,196.419,256,93.59,28.51,56.44
1134
+ regnety_160,288,1303.15,196.436,256,83.59,26.37,38.07
1135
+ davit_large,224,1281.18,199.804,256,196.81,34.6,60.99
1136
+ tf_efficientnet_b4,380,1278.54,200.216,256,19.34,4.49,49.49
1137
+ xcit_large_24_p16_224,224,1277.51,200.379,256,189.1,35.86,47.27
1138
+ regnety_320,224,1269.25,201.683,256,145.05,32.34,30.26
1139
+ swinv2_large_window12_192,192,1267.31,201.991,256,228.77,26.17,56.53
1140
+ vit_mediumd_patch16_reg4_gap_384,384,1254.95,203.981,256,64.27,43.67,113.51
1141
+ regnetx_320,224,1253.43,204.228,256,107.81,31.81,36.3
1142
+ swin_large_patch4_window7_224,224,1249.84,204.814,256,196.53,34.53,54.94
1143
+ swinv2_cr_tiny_384,384,1248.53,205.031,256,28.33,15.34,161.01
1144
+ resnet200d,320,1245.29,205.564,256,64.69,31.25,67.33
1145
+ dm_nfnet_f2,256,1211.63,211.274,256,193.78,33.76,41.85
1146
+ nfnet_f2,256,1207.12,212.064,256,193.78,33.76,41.85
1147
+ mixer_l16_224,224,1198.62,213.568,256,208.2,44.6,41.69
1148
+ xcit_small_24_p16_384,384,1191.61,214.823,256,47.67,26.72,68.58
1149
+ tf_efficientnetv2_m,384,1187.13,215.635,256,54.14,15.85,57.52
1150
+ ecaresnet200d,288,1180.84,216.784,256,64.69,25.31,54.59
1151
+ seresnet200d,288,1176.89,217.511,256,71.86,25.32,54.6
1152
+ vit_small_patch14_dinov2,518,1176.63,217.56,256,22.06,46.76,198.79
1153
+ seresnet269d,256,1174.38,217.976,256,113.67,26.59,53.6
1154
+ vit_base_patch16_18x2_224,224,1171.44,218.522,256,256.73,52.51,71.38
1155
+ vit_small_patch14_reg4_dinov2,518,1167.53,219.256,256,22.06,46.95,199.77
1156
+ swinv2_cr_large_224,224,1159.63,220.749,256,196.68,35.1,78.42
1157
+ resnetrs270,256,1150.07,222.584,256,129.86,27.06,55.84
1158
+ convformer_s36,384,1147.41,223.099,256,40.01,22.54,89.62
1159
+ convnext_large_mlp,256,1136.31,225.279,256,200.13,44.94,56.33
1160
+ eca_nfnet_l2,384,1126.24,227.293,256,56.72,30.05,68.28
1161
+ maxvit_rmlp_base_rw_224,224,1122.54,228.041,256,116.14,23.15,92.64
1162
+ caformer_s36,384,1120.64,228.427,256,39.3,26.08,150.33
1163
+ vit_base_patch16_siglip_gap_384,384,1108.75,230.879,256,86.09,55.43,101.3
1164
+ vit_base_patch16_384,384,1108.32,230.969,256,86.86,55.54,101.56
1165
+ deit_base_patch16_384,384,1106.79,231.289,256,86.86,55.54,101.56
1166
+ deit3_base_patch16_384,384,1106.51,231.346,256,86.88,55.54,101.56
1167
+ vit_base_patch16_clip_384,384,1104.66,231.734,256,86.86,55.54,101.56
1168
+ deit_base_distilled_patch16_384,384,1099.37,232.85,256,87.63,55.65,101.82
1169
+ vit_base_patch16_siglip_384,384,1099.06,232.916,256,93.18,56.12,102.2
1170
+ nextvit_large,384,1093.53,234.092,256,57.87,32.03,90.76
1171
+ dm_nfnet_f1,320,1086.93,235.515,256,132.63,35.97,46.77
1172
+ nfnet_f1,320,1081.87,236.614,256,132.63,35.97,46.77
1173
+ seresnextaa101d_32x8d,320,1057.25,242.126,256,93.59,35.19,69.67
1174
+ convmixer_1024_20_ks9_p14,224,1053.87,242.903,256,24.38,5.55,5.51
1175
+ regnetz_e8,320,1043.8,245.246,256,57.7,15.46,63.94
1176
+ vit_large_patch16_224,224,1041.62,245.761,256,304.33,61.6,63.52
1177
+ eva_large_patch14_196,196,1041.47,245.796,256,304.14,61.57,63.52
1178
+ beit_base_patch16_384,384,1041.45,245.799,256,86.74,55.54,101.56
1179
+ swinv2_base_window16_256,256,1041.34,245.827,256,87.92,22.02,84.71
1180
+ deit3_large_patch16_224,224,1040.35,246.06,256,304.37,61.6,63.52
1181
+ swinv2_base_window12to16_192to256,256,1035.65,247.176,256,87.92,22.02,84.71
1182
+ efficientnetv2_m,416,1034.88,247.361,256,54.14,18.6,67.5
1183
+ eca_nfnet_l3,352,1034.06,247.553,256,72.04,32.57,73.12
1184
+ beit_large_patch16_224,224,1021.33,250.639,256,304.43,61.6,63.52
1185
+ mobilevitv2_200,384,1019.97,250.974,256,18.45,16.24,72.34
1186
+ beitv2_large_patch16_224,224,1019.31,251.137,256,304.43,61.6,63.52
1187
+ volo_d4_224,224,972.56,263.212,256,192.96,44.34,80.22
1188
+ maxvit_base_tf_224,224,971.51,263.496,256,119.47,24.04,95.01
1189
+ hiera_large_224,224,954.9,268.081,256,213.74,40.34,83.37
1190
+ maxxvitv2_rmlp_large_rw_224,224,954.34,268.236,256,215.42,44.14,87.15
1191
+ resnetrs200,320,950.29,269.381,256,93.21,31.51,67.81
1192
+ resnetv2_152x2_bit,224,931.12,274.926,256,236.34,46.95,45.11
1193
+ convnext_xlarge,224,930.04,275.246,256,350.2,60.98,57.5
1194
+ seresnet269d,288,924.08,277.021,256,113.67,33.65,67.81
1195
+ convnext_base,384,920.56,278.081,256,88.59,45.21,84.49
1196
+ nasnetalarge,331,914.25,279.998,256,88.75,23.89,90.56
1197
+ flexivit_large,240,911.71,280.781,256,304.36,70.99,75.39
1198
+ inception_next_base,384,899.71,284.524,256,86.67,43.64,75.48
1199
+ efficientnetv2_rw_m,416,897.78,285.137,256,53.24,21.49,79.62
1200
+ convnext_large,288,891.34,287.196,256,197.77,56.87,71.29
1201
+ xcit_small_24_p8_224,224,885.98,288.934,256,47.63,35.81,90.78
1202
+ vit_large_r50_s32_384,384,883.45,289.761,256,329.09,57.43,76.52
1203
+ tresnet_l,448,883.01,289.904,256,55.99,43.59,47.56
1204
+ convnextv2_base,384,870.69,294.006,256,88.72,45.21,84.49
1205
+ resnetv2_101x1_bit,448,859.26,297.921,256,44.54,31.65,64.93
1206
+ pnasnet5large,331,852.39,300.32,256,86.06,25.04,92.89
1207
+ efficientnet_b5,416,849.66,301.286,256,30.39,8.27,80.68
1208
+ convnextv2_large,288,847.65,301.999,256,197.96,56.87,71.29
1209
+ efficientvit_l3,320,845.04,302.933,256,246.04,56.32,79.34
1210
+ davit_huge,224,829.13,308.745,256,348.92,61.23,81.32
1211
+ convformer_m36,384,815.63,313.855,256,57.05,37.87,123.56
1212
+ coatnet_rmlp_3_rw_224,224,808.28,316.71,256,165.15,33.56,79.47
1213
+ coatnet_3_rw_224,224,807.9,316.858,256,181.81,33.44,73.83
1214
+ xcit_medium_24_p16_384,384,806.79,317.297,256,84.4,47.39,91.64
1215
+ vit_large_patch16_siglip_gap_256,256,806.47,317.422,256,303.36,80.8,88.34
1216
+ caformer_m36,384,804.97,318.01,256,56.2,42.11,196.35
1217
+ vit_large_patch16_siglip_256,256,801.91,319.228,256,315.96,81.34,88.88
1218
+ repvgg_d2se,320,798.65,320.531,256,133.33,74.57,46.82
1219
+ vit_base_patch8_224,224,795.31,321.875,256,86.58,78.22,161.69
1220
+ volo_d2_384,384,789.01,324.444,256,58.87,46.17,184.51
1221
+ vit_large_patch14_clip_quickgelu_224,224,782.99,326.939,256,303.97,81.08,88.79
1222
+ vit_large_patch14_xp_224,224,782.94,326.962,256,304.06,81.01,88.79
1223
+ vit_large_patch14_224,224,781.86,327.414,256,304.2,81.08,88.79
1224
+ vit_large_patch14_clip_224,224,781.23,327.678,256,304.2,81.08,88.79
1225
+ coatnet_3_224,224,772.28,331.477,256,166.97,36.56,79.01
1226
+ resnest200e,320,765.73,334.308,256,70.2,35.69,82.78
1227
+ regnety_160,384,765.62,334.358,256,83.59,46.87,67.67
1228
+ vit_base_r50_s16_384,384,764.44,334.876,256,98.95,67.43,135.03
1229
+ swinv2_cr_small_384,384,761.16,336.319,256,49.7,29.7,298.03
1230
+ tf_efficientnetv2_m,480,760.12,336.778,256,54.14,24.76,89.84
1231
+ regnety_640,224,754.37,339.342,256,281.38,64.16,42.5
1232
+ ecaresnet269d,320,742.7,344.674,256,102.09,41.53,83.69
1233
+ resnetv2_101x3_bit,224,740.89,345.52,256,387.93,71.23,48.7
1234
+ efficientnet_b5,448,731.74,349.841,256,30.39,9.59,93.56
1235
+ convnext_large_mlp,320,724.03,353.565,256,200.13,70.21,88.02
1236
+ vitamin_large2_224,224,716.86,357.1,256,333.58,75.05,112.83
1237
+ vitamin_large_224,224,716.07,357.492,256,333.32,75.05,112.83
1238
+ resnetrs350,288,710.31,360.394,256,163.96,43.67,87.09
1239
+ mvitv2_large,224,709.52,360.797,256,217.99,43.87,112.02
1240
+ cait_xxs24_384,384,704.92,363.148,256,12.03,9.63,122.66
1241
+ xcit_tiny_24_p8_384,384,690.01,370.998,256,12.11,27.05,132.95
1242
+ resnet50x16_clip_gap,384,680.63,376.11,256,136.2,70.32,100.64
1243
+ maxvit_large_tf_224,224,679.37,376.81,256,211.79,43.68,127.35
1244
+ coat_lite_medium_384,384,679.13,376.941,256,44.57,28.73,116.7
1245
+ efficientnetv2_l,384,676.62,378.337,256,118.52,36.1,101.16
1246
+ tiny_vit_21m_512,512,663.92,385.573,256,21.27,27.02,177.93
1247
+ tf_efficientnetv2_l,384,663.26,385.959,256,118.52,36.1,101.16
1248
+ maxvit_tiny_tf_384,384,662.03,386.678,256,30.98,17.53,123.42
1249
+ resnet50x16_clip,384,652.97,392.041,256,167.33,74.9,103.54
1250
+ tf_efficientnet_b5,456,640.6,399.612,256,30.39,10.46,98.86
1251
+ eca_nfnet_l3,448,638.11,401.174,256,72.04,52.55,118.4
1252
+ nfnet_f2,352,631.24,405.539,256,193.78,63.22,79.06
1253
+ swinv2_large_window12to16_192to256,256,628.73,407.156,256,196.74,47.81,121.53
1254
+ dm_nfnet_f2,352,628.21,407.495,256,193.78,63.22,79.06
1255
+ volo_d5_224,224,617.89,414.298,256,295.46,72.4,118.11
1256
+ mvitv2_large_cls,224,615.19,416.117,256,234.58,42.17,111.69
1257
+ ecaresnet269d,352,613.97,416.948,256,102.09,50.25,101.25
1258
+ eva02_large_patch14_clip_224,224,605.15,423.025,256,304.11,81.18,97.2
1259
+ eva02_large_patch14_224,224,605.1,423.058,256,303.27,81.15,97.2
1260
+ vit_so400m_patch14_siglip_gap_224,224,597.92,428.139,256,412.44,109.57,106.13
1261
+ xcit_medium_24_p8_224,224,597.46,428.468,256,84.32,63.53,121.23
1262
+ vit_so400m_patch14_siglip_224,224,596.3,429.305,256,427.68,110.26,106.73
1263
+ vit_base_patch16_siglip_gap_512,512,595.41,429.941,256,86.43,107.0,246.15
1264
+ resnetrs270,352,593.7,431.181,256,129.86,51.13,105.48
1265
+ vit_base_patch16_siglip_512,512,592.23,432.25,256,93.52,108.22,247.74
1266
+ tresnet_xl,448,585.25,437.403,256,78.44,60.77,61.31
1267
+ nfnet_f3,320,580.98,440.622,256,254.92,68.77,83.93
1268
+ dm_nfnet_f3,320,578.4,442.591,256,254.92,68.77,83.93
1269
+ xcit_small_12_p8_384,384,575.87,444.537,256,26.21,54.92,138.29
1270
+ convformer_b36,384,571.54,447.902,256,99.88,66.67,164.75
1271
+ convnext_xlarge,288,564.23,453.705,256,350.2,100.8,95.05
1272
+ caformer_b36,384,564.02,453.867,256,98.75,72.33,261.79
1273
+ efficientvit_l3,384,540.41,473.7,256,246.04,81.08,114.02
1274
+ swinv2_cr_base_384,384,538.78,475.138,256,87.88,50.57,333.68
1275
+ resmlp_big_24_224,224,534.87,478.611,256,129.14,100.23,87.31
1276
+ seresnextaa201d_32x8d,320,530.48,482.573,256,149.39,70.22,138.71
1277
+ swin_base_patch4_window12_384,384,526.43,486.278,256,87.9,47.19,134.78
1278
+ convnextv2_huge,224,522.5,489.937,256,660.29,115.0,79.07
1279
+ coatnet_4_224,224,516.83,495.312,256,275.43,62.48,129.26
1280
+ cait_xs24_384,384,514.59,497.468,256,26.67,19.28,183.98
1281
+ resnext101_32x32d,224,508.29,503.635,256,468.53,87.29,91.12
1282
+ convnext_large,384,505.8,506.117,256,197.77,101.1,126.74
1283
+ convnext_large_mlp,384,505.51,506.405,256,200.13,101.11,126.74
1284
+ eva02_base_patch14_448,448,495.86,516.266,256,87.12,107.11,259.14
1285
+ resnetrs420,320,490.22,522.204,256,191.89,64.2,126.56
1286
+ convnextv2_large,384,480.94,532.276,256,197.96,101.1,126.74
1287
+ vitamin_large_256,256,476.63,537.095,256,333.38,99.0,154.99
1288
+ vitamin_large2_256,256,476.34,537.415,256,333.64,99.0,154.99
1289
+ efficientnetv2_xl,384,473.56,540.575,256,208.12,52.81,139.2
1290
+ cait_xxs36_384,384,472.1,542.238,256,17.37,14.35,183.7
1291
+ regnety_320,384,471.4,543.046,256,145.05,95.0,88.87
1292
+ tf_efficientnetv2_xl,384,467.46,547.633,256,208.12,52.81,139.2
1293
+ swinv2_cr_huge_224,224,461.55,554.638,256,657.83,115.97,121.08
1294
+ maxxvitv2_rmlp_base_rw_384,384,460.8,555.548,256,116.09,72.98,213.74
1295
+ rdnet_large,384,454.9,562.743,256,186.27,102.09,137.13
1296
+ focalnet_huge_fl3,224,451.28,567.265,256,745.28,118.26,104.8
1297
+ xcit_large_24_p16_384,384,445.92,574.081,256,189.1,105.35,137.17
1298
+ efficientnetv2_l,480,432.31,592.158,256,118.52,56.4,157.99
1299
+ maxvit_small_tf_384,384,430.69,445.784,192,69.02,35.87,183.65
1300
+ tf_efficientnetv2_l,480,425.62,601.47,256,118.52,56.4,157.99
1301
+ vit_base_patch14_dinov2,518,422.75,605.542,256,86.58,151.71,397.58
1302
+ vit_base_patch14_reg4_dinov2,518,420.52,608.763,256,86.58,152.25,399.53
1303
+ hiera_huge_224,224,415.21,616.545,256,672.78,124.85,150.95
1304
+ vit_huge_patch14_gap_224,224,405.72,630.968,256,630.76,166.73,138.74
1305
+ coatnet_rmlp_2_rw_384,384,405.05,474.001,192,73.88,47.69,209.43
1306
+ volo_d3_448,448,402.31,636.318,256,86.63,96.33,446.83
1307
+ resnetrs350,384,400.15,639.744,256,163.96,77.59,154.74
1308
+ cait_s24_384,384,399.34,641.043,256,47.06,32.17,245.31
1309
+ deit3_huge_patch14_224,224,397.79,643.537,256,632.13,167.4,139.41
1310
+ vit_huge_patch14_224,224,397.77,643.573,256,630.76,167.4,139.41
1311
+ vit_huge_patch14_clip_quickgelu_224,224,397.73,643.637,256,632.08,167.4,139.41
1312
+ vit_huge_patch14_clip_224,224,397.45,644.094,256,632.05,167.4,139.41
1313
+ vit_huge_patch14_xp_224,224,395.98,646.484,256,631.8,167.3,139.41
1314
+ sam2_hiera_tiny,896,388.8,164.6,64,26.85,99.86,384.63
1315
+ vitamin_xlarge_256,256,386.93,661.605,256,436.06,130.13,177.37
1316
+ regnety_1280,224,379.67,674.264,256,644.81,127.66,71.58
1317
+ seresnextaa201d_32x8d,384,366.37,698.744,256,149.39,101.11,199.72
1318
+ maxvit_xlarge_tf_224,224,358.95,713.18,256,506.99,97.52,191.04
1319
+ resnest269e,416,358.78,713.517,256,110.93,77.69,171.98
1320
+ maxvit_tiny_tf_512,512,354.93,360.623,128,31.05,33.49,257.59
1321
+ efficientnet_b6,528,354.3,722.545,256,43.04,19.4,167.39
1322
+ vit_large_patch14_clip_quickgelu_336,336,344.63,742.82,256,304.29,191.11,270.24
1323
+ vit_large_patch16_siglip_gap_384,384,344.54,743.009,256,303.69,190.85,269.55
1324
+ vit_large_patch14_clip_336,336,343.67,744.879,256,304.53,191.11,270.24
1325
+ vit_large_patch16_384,384,343.49,745.276,256,304.72,191.21,270.24
1326
+ eva_large_patch14_336,336,343.16,745.991,256,304.53,191.1,270.24
1327
+ deit3_large_patch16_384,384,342.55,747.334,256,304.76,191.21,270.24
1328
+ vit_large_patch16_siglip_384,384,342.3,747.871,256,316.28,192.07,270.75
1329
+ nfnet_f3,416,340.62,751.561,256,254.92,115.58,141.78
1330
+ dm_nfnet_f3,416,339.53,753.969,256,254.92,115.58,141.78
1331
+ vit_giant_patch16_gap_224,224,338.53,756.187,256,1011.37,202.46,139.26
1332
+ nfnet_f4,384,335.52,762.977,256,316.07,122.14,147.57
1333
+ dm_nfnet_f4,384,332.75,769.343,256,316.07,122.14,147.57
1334
+ xcit_large_24_p8_224,224,331.23,772.865,256,188.93,141.23,181.56
1335
+ tf_efficientnet_b6,528,330.56,580.814,192,43.04,19.4,167.39
1336
+ beit_large_patch16_384,384,327.97,780.534,256,305.0,191.21,270.24
1337
+ sam2_hiera_small,896,325.93,196.351,64,33.95,123.99,442.63
1338
+ convnext_xxlarge,256,322.53,793.713,256,846.47,198.09,124.45
1339
+ swinv2_cr_large_384,384,319.63,600.684,192,196.68,108.96,404.96
1340
+ convnext_xlarge,384,319.57,801.065,256,350.2,179.2,168.99
1341
+ maxvit_rmlp_base_rw_384,384,319.22,801.939,256,116.14,70.97,318.95
1342
+ convnextv2_huge,288,317.37,806.616,256,660.29,190.1,130.7
1343
+ swin_large_patch4_window12_384,384,316.67,606.299,192,196.74,104.08,202.16
1344
+ resnetv2_152x4_bit,224,316.17,809.666,256,936.53,186.9,90.22
1345
+ resnetv2_152x2_bit,384,314.26,814.59,256,236.34,136.16,132.56
1346
+ davit_giant,224,312.39,819.474,256,1406.47,192.92,153.06
1347
+ xcit_small_24_p8_384,384,302.44,846.436,256,47.63,105.24,265.91
1348
+ maxvit_base_tf_384,384,300.57,425.842,128,119.65,73.8,332.9
1349
+ swinv2_base_window12to24_192to384,384,298.98,321.077,96,87.92,55.25,280.36
1350
+ coatnet_5_224,224,295.77,865.513,256,687.47,145.49,194.24
1351
+ eva02_large_patch14_clip_336,336,288.34,887.823,256,304.43,191.34,289.13
1352
+ resnetrs420,416,284.53,899.724,256,191.89,108.45,213.79
1353
+ resnetv2_50x3_bit,448,281.44,682.202,192,217.32,145.7,133.37
1354
+ regnety_640,384,272.07,940.925,256,281.38,188.47,124.83
1355
+ focalnet_huge_fl4,224,270.92,944.907,256,686.46,118.9,113.34
1356
+ vitamin_large_336,336,270.63,709.44,192,333.57,175.72,307.47
1357
+ vitamin_large2_336,336,270.54,709.67,192,333.83,175.72,307.47
1358
+ mvitv2_huge_cls,224,270.48,946.469,256,694.8,120.67,243.63
1359
+ cait_s36_384,384,267.14,958.275,256,68.37,47.99,367.4
1360
+ efficientnetv2_xl,512,266.88,959.22,256,208.12,93.85,247.32
1361
+ tf_efficientnetv2_xl,512,263.31,972.209,256,208.12,93.85,247.32
1362
+ vit_giant_patch14_224,224,257.22,995.234,256,1012.61,267.18,192.64
1363
+ vit_giant_patch14_clip_224,224,256.33,998.705,256,1012.65,267.18,192.64
1364
+ eva_giant_patch14_224,224,256.2,999.216,256,1012.56,267.18,192.64
1365
+ eva_giant_patch14_clip_224,224,254.62,1005.406,256,1012.59,267.18,192.64
1366
+ resnet50x64_clip_gap,448,235.46,1087.208,256,365.03,253.96,233.22
1367
+ nfnet_f5,416,232.61,1100.541,256,377.21,170.71,204.56
1368
+ maxvit_small_tf_512,512,232.28,413.275,96,69.13,67.26,383.77
1369
+ dm_nfnet_f5,416,231.56,1105.537,256,377.21,170.71,204.56
1370
+ volo_d4_448,448,231.52,1105.726,256,193.41,197.13,527.35
1371
+ resnet50x64_clip,448,228.54,1120.162,256,420.38,265.02,239.13
1372
+ resnetv2_152x2_bit,448,225.56,1134.941,256,236.34,184.99,180.43
1373
+ vitamin_xlarge_336,336,219.99,872.741,192,436.06,230.18,347.33
1374
+ efficientnet_b7,600,209.53,1221.782,256,66.35,38.33,289.94
1375
+ vitamin_large_384,384,205.2,623.762,128,333.71,234.44,440.16
1376
+ vitamin_large2_384,384,205.17,623.858,128,333.97,234.44,440.16
1377
+ focalnet_large_fl3,384,204.28,1253.192,256,239.13,105.06,168.04
1378
+ xcit_medium_24_p8_384,384,203.5,1257.962,256,84.32,186.67,354.73
1379
+ tf_efficientnet_b7,600,198.11,646.087,128,66.35,38.33,289.94
1380
+ vit_so400m_patch14_siglip_gap_384,384,195.82,1307.314,256,412.99,333.46,451.19
1381
+ focalnet_large_fl4,384,195.3,1310.76,256,239.32,105.2,181.78
1382
+ vit_so400m_patch14_siglip_384,384,195.2,1311.466,256,428.23,335.4,452.89
1383
+ davit_base_fl,768,192.39,665.305,128,90.37,190.32,530.15
1384
+ maxvit_large_tf_384,384,190.72,671.13,128,212.03,132.55,445.84
1385
+ nfnet_f4,512,190.38,1344.673,256,316.07,216.26,262.26
1386
+ dm_nfnet_f4,512,189.19,1353.158,256,316.07,216.26,262.26
1387
+ swinv2_large_window12to24_192to384,384,186.98,342.268,64,196.74,116.15,407.83
1388
+ convnextv2_huge,384,178.56,1433.694,256,660.29,337.96,232.35
1389
+ nfnet_f6,448,177.22,1444.522,256,438.36,229.7,273.62
1390
+ dm_nfnet_f6,448,175.57,1458.065,256,438.36,229.7,273.62
1391
+ vit_huge_patch14_clip_336,336,173.02,1479.562,256,632.46,390.97,407.54
1392
+ sam2_hiera_base_plus,896,170.31,375.779,64,68.68,227.48,828.88
1393
+ beit_large_patch16_512,512,169.73,1508.229,256,305.67,362.24,656.39
1394
+ resnetv2_101x3_bit,448,167.54,1145.984,192,387.93,280.33,194.78
1395
+ vitamin_xlarge_384,384,165.63,772.812,128,436.06,306.38,493.46
1396
+ convmixer_1536_20,224,153.79,1664.601,256,51.63,48.68,33.03
1397
+ eva02_large_patch14_448,448,153.5,1667.723,256,305.08,362.33,689.95
1398
+ volo_d5_448,448,148.89,1719.408,256,295.91,315.06,737.92
1399
+ vit_gigantic_patch14_224,224,146.07,1752.63,256,1844.44,483.95,275.37
1400
+ vit_gigantic_patch14_clip_224,224,145.81,1755.702,256,1844.91,483.96,275.37
1401
+ maxvit_base_tf_512,512,144.2,665.741,96,119.88,138.02,703.99
1402
+ regnety_1280,384,138.04,1390.849,192,644.81,374.99,210.2
1403
+ nfnet_f5,544,137.1,1867.295,256,377.21,290.97,349.71
1404
+ efficientnet_b8,672,136.75,935.971,128,87.41,63.48,442.89
1405
+ dm_nfnet_f5,544,136.7,1872.679,256,377.21,290.97,349.71
1406
+ focalnet_xlarge_fl3,384,136.68,1873.042,256,408.79,185.61,223.99
1407
+ focalnet_xlarge_fl4,384,135.52,1888.958,256,409.03,185.79,242.31
1408
+ vit_huge_patch14_clip_quickgelu_378,378,135.21,1893.33,256,632.68,503.79,572.79
1409
+ vit_so400m_patch14_siglip_gap_448,448,135.13,1894.403,256,413.33,487.18,764.26
1410
+ vit_huge_patch14_clip_378,378,134.97,1896.634,256,632.68,503.79,572.79
1411
+ nfnet_f7,480,133.4,1919.099,256,499.5,300.08,355.86
1412
+ vit_large_patch14_dinov2,518,133.12,1923.026,256,304.37,507.15,1058.82
1413
+ vit_large_patch14_reg4_dinov2,518,132.04,1938.813,256,304.37,508.9,1064.02
1414
+ tf_efficientnet_b8,672,130.88,977.99,128,87.41,63.48,442.89
1415
+ swinv2_cr_huge_384,384,130.75,489.486,64,657.94,352.04,583.18
1416
+ swinv2_cr_giant_224,224,125.01,1023.872,128,2598.76,483.85,309.15
1417
+ maxvit_xlarge_tf_384,384,123.97,516.244,64,475.32,292.78,668.76
1418
+ vit_huge_patch16_gap_448,448,123.46,2073.454,256,631.67,544.7,636.83
1419
+ cait_m36_384,384,120.83,2118.7,256,271.22,173.11,734.81
1420
+ volo_d5_512,512,113.48,2255.978,256,296.09,425.09,1105.37
1421
+ xcit_large_24_p8_384,384,112.54,2274.795,256,188.93,415.0,531.82
1422
+ eva_giant_patch14_336,336,111.59,2294.19,256,1013.01,620.64,550.67
1423
+ nfnet_f6,576,107.92,2372.106,256,438.36,378.69,452.2
1424
+ dm_nfnet_f6,576,107.35,2384.806,256,438.36,378.69,452.2
1425
+ maxvit_large_tf_512,512,101.74,629.046,64,212.33,244.75,942.15
1426
+ convnextv2_huge,512,100.55,1273.014,128,660.29,600.81,413.07
1427
+ tf_efficientnet_l2,475,90.19,1064.381,96,480.31,172.11,609.89
1428
+ nfnet_f7,608,84.41,3032.802,256,499.5,480.39,570.85
1429
+ regnety_2560,384,76.59,1671.223,128,1282.6,747.83,296.49
1430
+ davit_huge_fl,768,69.12,925.974,64,360.64,744.84,1060.3
1431
+ resnetv2_152x4_bit,480,66.53,1442.988,96,936.53,844.84,414.26
1432
+ eva02_enormous_patch14_clip_224,224,63.84,4009.924,256,4350.56,1132.46,497.58
1433
+ samvit_base_patch16,1024,61.83,258.784,16,89.67,486.43,1343.27
1434
+ maxvit_xlarge_tf_512,512,61.08,785.831,48,475.77,534.14,1413.22
1435
+ sam2_hiera_large,1024,53.33,900.092,48,212.15,907.48,2190.34
1436
+ vit_giant_patch14_dinov2,518,40.02,4797.862,192,1136.48,1784.2,2757.89
1437
+ vit_giant_patch14_reg4_dinov2,518,39.61,4847.503,192,1136.48,1790.08,2771.21
1438
+ swinv2_cr_giant_384,384,36.91,866.946,32,2598.76,1450.71,1394.86
1439
+ eva_giant_patch14_560,560,35.98,7115.889,256,1014.45,1906.76,2577.17
1440
+ efficientnet_l2,800,32.95,1942.195,64,480.31,479.12,1707.39
1441
+ tf_efficientnet_l2,800,31.83,1005.239,32,480.31,479.12,1707.39
1442
+ samvit_large_patch16,1024,26.29,304.233,8,308.28,1493.86,2553.78
1443
+ vit_so400m_patch14_siglip_gap_896,896,23.37,5478.218,128,416.87,2731.49,8492.88
1444
+ samvit_huge_patch16,1024,15.57,770.869,12,637.03,2982.23,3428.16
pytorch-image-models/results/benchmark-infer-amp-nchw-pt240-cu124-rtx4090.csv ADDED
@@ -0,0 +1,1445 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ model,infer_img_size,infer_samples_per_sec,infer_step_time,infer_batch_size,param_count,infer_gmacs,infer_macts
2
+ test_vit,160,188343.61,5.426,1024,0.37,0.04,0.48
3
+ test_byobnet,160,114439.82,8.933,1024,0.46,0.03,0.43
4
+ test_efficientnet,160,101055.26,10.121,1024,0.36,0.06,0.55
5
+ tinynet_e,106,76644.06,13.346,1024,2.04,0.03,0.69
6
+ mobilenetv3_small_050,224,70186.63,14.579,1024,1.59,0.03,0.92
7
+ lcnet_035,224,64212.36,15.936,1024,1.64,0.03,1.04
8
+ efficientvit_m0,224,54771.53,18.686,1024,2.35,0.08,0.91
9
+ lcnet_050,224,53644.11,19.078,1024,1.88,0.05,1.26
10
+ mobilenetv3_small_075,224,49475.98,20.686,1024,2.04,0.05,1.3
11
+ mobilenetv3_small_100,224,43953.49,23.286,1024,2.54,0.06,1.42
12
+ tinynet_d,152,40047.08,25.554,1024,2.34,0.05,1.42
13
+ efficientvit_m1,224,38637.78,26.491,1024,2.98,0.17,1.33
14
+ tf_mobilenetv3_small_075,224,37336.46,27.408,1024,2.04,0.05,1.3
15
+ tf_mobilenetv3_small_minimal_100,224,37294.33,27.442,1024,2.04,0.06,1.41
16
+ efficientvit_m2,224,34551.89,29.625,1024,4.19,0.2,1.47
17
+ tf_mobilenetv3_small_100,224,34059.68,30.049,1024,2.54,0.06,1.42
18
+ lcnet_075,224,33742.36,30.337,1024,2.36,0.1,1.99
19
+ mobilenetv4_conv_small,224,32546.3,31.45,1024,3.77,0.19,1.97
20
+ efficientvit_m3,224,29796.09,34.355,1024,6.9,0.27,1.62
21
+ mnasnet_small,224,29488.73,34.713,1024,2.03,0.07,2.16
22
+ levit_128s,224,28915.97,35.397,1024,7.78,0.31,1.88
23
+ ghostnet_050,224,27810.93,36.808,1024,2.59,0.05,1.77
24
+ efficientvit_m4,224,27584.68,37.111,1024,8.8,0.3,1.7
25
+ vit_small_patch32_224,224,27011.35,37.895,1024,22.88,1.15,2.5
26
+ lcnet_100,224,26901.09,38.053,1024,2.95,0.16,2.52
27
+ regnetx_002,224,26857.85,38.105,1024,2.68,0.2,2.16
28
+ resnet18,160,26266.23,38.966,1024,11.69,0.93,1.27
29
+ resnet10t,176,25959.28,39.426,1024,5.44,0.7,1.51
30
+ mobilenetv4_conv_small,256,25751.02,39.753,1024,3.77,0.25,2.57
31
+ repghostnet_050,224,25423.49,40.26,1024,2.31,0.05,2.02
32
+ levit_conv_128s,224,25136.44,40.725,1024,7.78,0.31,1.88
33
+ regnety_002,224,25031.98,40.889,1024,3.16,0.2,2.17
34
+ mobilenetv2_035,224,25024.38,40.909,1024,1.68,0.07,2.86
35
+ vit_tiny_r_s16_p8_224,224,24696.86,41.446,1024,6.34,0.44,2.06
36
+ efficientvit_b0,224,22706.84,45.084,1024,3.41,0.1,2.87
37
+ mnasnet_050,224,22309.73,45.888,1024,2.22,0.11,3.07
38
+ pit_ti_224,224,21640.68,47.298,1024,4.85,0.7,6.19
39
+ pit_ti_distilled_224,224,21496.66,47.616,1024,5.1,0.71,6.23
40
+ tinynet_c,184,20204.93,50.66,1024,2.46,0.11,2.87
41
+ repghostnet_058,224,19802.21,51.689,1024,2.55,0.07,2.59
42
+ mixer_s32_224,224,19527.38,52.427,1024,19.1,1.0,2.28
43
+ mobilenetv2_050,224,19456.73,52.616,1024,1.97,0.1,3.64
44
+ semnasnet_050,224,19111.19,53.559,1024,2.08,0.11,3.44
45
+ levit_128,224,19082.4,53.651,1024,9.21,0.41,2.71
46
+ efficientvit_m5,224,18614.46,54.998,1024,12.47,0.53,2.41
47
+ vit_medium_patch32_clip_224,224,18383.79,55.687,1024,39.69,2.0,3.34
48
+ regnetx_004,224,18131.35,56.448,1024,5.16,0.4,3.14
49
+ gernet_s,224,17973.35,56.961,1024,8.17,0.75,2.65
50
+ lcnet_150,224,17737.51,57.717,1024,4.5,0.34,3.79
51
+ deit_tiny_patch16_224,224,17673.84,57.926,1024,5.72,1.26,5.97
52
+ vit_tiny_patch16_224,224,17666.09,57.952,1024,5.72,1.26,5.97
53
+ cs3darknet_focus_s,256,17602.37,58.161,1024,3.27,0.69,2.7
54
+ deit_tiny_distilled_patch16_224,224,17560.74,58.299,1024,5.91,1.27,6.01
55
+ levit_conv_128,224,17447.46,58.676,1024,9.21,0.41,2.71
56
+ regnetx_004_tv,224,17439.64,58.689,1024,5.5,0.42,3.17
57
+ levit_192,224,16980.83,60.29,1024,10.95,0.66,3.2
58
+ cs3darknet_s,256,16965.42,60.345,1024,3.28,0.72,2.97
59
+ resnet10t,224,16340.38,62.643,1024,5.44,1.1,2.43
60
+ resnet34,160,16119.94,63.502,1024,21.8,1.87,1.91
61
+ repghostnet_080,224,15726.41,65.091,1024,3.28,0.1,3.22
62
+ levit_conv_192,224,15726.05,65.102,1024,10.95,0.66,3.2
63
+ mobilenetv3_large_075,224,15518.95,65.962,1024,3.99,0.16,4.0
64
+ vit_xsmall_patch16_clip_224,224,15266.09,67.063,1024,8.28,1.79,6.65
65
+ hardcorenas_a,224,14844.77,68.969,1024,5.26,0.23,4.38
66
+ mnasnet_075,224,14443.19,70.885,1024,3.17,0.23,4.77
67
+ ese_vovnet19b_slim_dw,224,14337.06,71.41,1024,1.9,0.4,5.28
68
+ nf_regnet_b0,192,14314.09,71.525,1024,8.76,0.37,3.15
69
+ mobilenetv3_rw,224,14298.34,71.594,1024,5.48,0.23,4.41
70
+ resnet14t,176,14213.29,72.022,1024,10.08,1.07,3.61
71
+ mobilenetv3_large_100,224,14191.17,72.134,1024,5.48,0.23,4.41
72
+ pit_xs_224,224,14164.01,72.272,1024,10.62,1.4,7.71
73
+ pit_xs_distilled_224,224,14041.03,72.903,1024,11.0,1.41,7.76
74
+ ghostnet_100,224,13948.64,73.4,1024,5.18,0.15,3.55
75
+ regnetx_006,224,13813.98,74.091,1024,6.2,0.61,3.98
76
+ mobilenetv1_100,224,13760.09,74.406,1024,4.23,0.58,5.04
77
+ tf_mobilenetv3_large_075,224,13614.3,75.186,1024,3.99,0.16,4.0
78
+ hardcorenas_b,224,13584.5,75.367,1024,5.18,0.26,5.09
79
+ resnet18,224,13537.15,75.62,1024,11.69,1.82,2.48
80
+ hardcorenas_c,224,13512.54,75.768,1024,5.52,0.28,5.01
81
+ mobilenetv1_100h,224,13446.31,76.143,1024,5.28,0.63,5.09
82
+ regnety_004,224,13238.44,77.296,1024,4.34,0.41,3.89
83
+ tf_efficientnetv2_b0,192,13104.66,78.117,1024,7.14,0.54,3.51
84
+ tf_mobilenetv3_large_minimal_100,224,13074.99,78.292,1024,3.92,0.22,4.4
85
+ tinynet_b,188,13009.4,78.686,1024,3.73,0.21,4.44
86
+ mobilenet_edgetpu_v2_xs,224,12965.31,78.967,1024,4.46,0.7,4.8
87
+ vit_betwixt_patch32_clip_224,224,12906.63,79.313,1024,61.41,3.09,4.17
88
+ convnext_atto,224,12819.99,79.862,1024,3.7,0.55,3.81
89
+ repghostnet_100,224,12769.67,80.164,1024,4.07,0.15,3.98
90
+ mnasnet_100,224,12674.12,80.779,1024,4.38,0.33,5.46
91
+ seresnet18,224,12647.44,80.94,1024,11.78,1.82,2.49
92
+ hardcorenas_d,224,12555.09,81.548,1024,7.5,0.3,4.93
93
+ levit_256,224,12506.52,81.863,1024,18.89,1.13,4.23
94
+ legacy_seresnet18,224,12443.24,82.276,1024,11.78,1.82,2.49
95
+ tf_mobilenetv3_large_100,224,12230.56,83.698,1024,5.48,0.23,4.41
96
+ convnext_atto_ols,224,12208.81,83.859,1024,3.7,0.58,4.11
97
+ mobilenetv2_075,224,12206.99,83.872,1024,2.64,0.22,5.86
98
+ edgenext_xx_small,256,12172.17,84.111,1024,1.33,0.26,3.33
99
+ semnasnet_075,224,12007.14,85.256,1024,2.91,0.23,5.54
100
+ regnety_006,224,11941.12,85.707,1024,6.06,0.61,4.33
101
+ levit_conv_256,224,11783.71,86.886,1024,18.89,1.13,4.23
102
+ repghostnet_111,224,11492.86,89.073,1024,4.54,0.18,4.38
103
+ spnasnet_100,224,11256.71,90.94,1024,4.42,0.35,6.03
104
+ levit_256d,224,11118.19,92.088,1024,26.21,1.4,4.93
105
+ hardcorenas_f,224,11102.22,92.22,1024,8.2,0.35,5.57
106
+ convnext_femto,224,11040.93,92.732,1024,5.22,0.79,4.57
107
+ resnet18d,224,11008.69,92.989,1024,11.71,2.06,3.29
108
+ repvgg_a0,224,10985.0,93.181,1024,9.11,1.52,3.59
109
+ dla46_c,224,10958.84,93.426,1024,1.3,0.58,4.5
110
+ mobilenetv1_100,256,10920.28,93.756,1024,4.23,0.76,6.59
111
+ hardcorenas_e,224,10912.94,93.82,1024,8.07,0.35,5.65
112
+ mobilenetv4_conv_medium,224,10887.04,94.044,1024,9.72,0.84,5.8
113
+ ghostnet_130,224,10826.86,94.566,1024,7.36,0.24,4.6
114
+ ese_vovnet19b_slim,224,10822.95,94.602,1024,3.17,1.69,3.52
115
+ mobilenetv2_100,224,10810.08,94.71,1024,3.5,0.31,6.68
116
+ mobilenetv1_100h,256,10741.59,95.317,1024,5.28,0.82,6.65
117
+ semnasnet_100,224,10684.42,95.813,1024,3.89,0.32,6.23
118
+ regnetx_008,224,10649.58,96.116,1024,7.26,0.81,5.15
119
+ convnext_femto_ols,224,10568.01,96.88,1024,5.23,0.82,4.87
120
+ crossvit_tiny_240,240,10467.46,97.811,1024,7.01,1.57,9.08
121
+ mobilenet_edgetpu_100,224,10392.92,98.516,1024,4.09,1.0,5.75
122
+ efficientnet_lite0,224,10376.87,98.665,1024,4.65,0.4,6.74
123
+ mobilenetv1_125,224,10367.58,98.748,1024,6.27,0.89,6.3
124
+ mobilevit_xxs,256,10336.7,99.048,1024,1.27,0.42,8.34
125
+ fbnetc_100,224,10321.13,99.198,1024,5.57,0.4,6.51
126
+ tinynet_a,192,10287.99,99.507,1024,6.19,0.35,5.41
127
+ tf_efficientnetv2_b0,224,10217.13,100.198,1024,7.14,0.73,4.77
128
+ mobilenetv4_hybrid_medium_075,224,10204.4,100.333,1024,7.31,0.66,5.65
129
+ hgnetv2_b0,224,10169.22,100.683,1024,6.0,0.33,2.12
130
+ vit_base_patch32_clip_224,224,10127.52,101.084,1024,88.22,4.41,5.01
131
+ vit_base_patch32_224,224,10112.91,101.231,1024,88.22,4.41,5.01
132
+ tf_efficientnetv2_b1,192,10100.97,101.35,1024,8.14,0.76,4.59
133
+ regnety_008,224,9957.68,102.806,1024,6.26,0.81,5.25
134
+ levit_conv_256d,224,9877.75,103.652,1024,26.21,1.4,4.93
135
+ crossvit_9_240,240,9867.74,103.759,1024,8.55,1.85,9.52
136
+ repghostnet_130,224,9782.41,104.652,1024,5.48,0.25,5.24
137
+ regnety_008_tv,224,9578.99,106.856,1024,6.43,0.84,5.42
138
+ edgenext_xx_small,288,9574.3,106.938,1024,1.33,0.33,4.21
139
+ resnetblur18,224,9357.08,109.406,1024,11.69,2.34,3.39
140
+ vit_small_patch32_384,384,9286.73,110.252,1024,22.92,3.45,8.25
141
+ xcit_nano_12_p16_224,224,9284.43,110.262,1024,3.05,0.56,4.17
142
+ mobilenet_edgetpu_v2_s,224,9204.18,111.24,1024,5.99,1.21,6.6
143
+ visformer_tiny,224,9194.72,111.341,1024,10.32,1.27,5.72
144
+ dla46x_c,224,9155.27,111.834,1024,1.07,0.54,5.66
145
+ crossvit_9_dagger_240,240,9081.92,112.735,1024,8.78,1.99,9.97
146
+ mobilenetv4_conv_medium,256,9068.95,112.899,1024,9.72,1.1,7.58
147
+ pvt_v2_b0,224,8956.42,114.305,1024,3.67,0.57,7.99
148
+ resnet14t,224,8930.87,114.632,1024,10.08,1.69,5.8
149
+ efficientnet_b0,224,8926.12,114.703,1024,5.29,0.4,6.75
150
+ mnasnet_140,224,8908.91,114.927,1024,7.12,0.6,7.71
151
+ fbnetv3_b,224,8904.43,114.985,1024,8.6,0.42,6.97
152
+ pit_s_224,224,8898.37,115.049,1024,23.46,2.88,11.56
153
+ pit_s_distilled_224,224,8880.77,115.275,1024,24.04,2.9,11.64
154
+ mobilevitv2_050,256,8864.77,115.498,1024,1.37,0.48,8.04
155
+ cs3darknet_focus_m,256,8810.63,116.207,1024,9.3,1.98,4.89
156
+ tf_efficientnet_lite0,224,8709.74,117.542,1024,4.65,0.4,6.74
157
+ dla60x_c,224,8644.34,118.444,1024,1.32,0.59,6.01
158
+ efficientnet_b1_pruned,240,8640.04,118.503,1024,6.33,0.4,6.21
159
+ efficientvit_b1,224,8637.15,118.543,1024,9.1,0.53,7.25
160
+ convnext_pico,224,8576.11,119.388,1024,9.05,1.37,6.1
161
+ regnetz_005,224,8537.29,119.917,1024,7.12,0.52,5.86
162
+ rexnet_100,224,8512.86,120.248,1024,4.8,0.41,7.44
163
+ repghostnet_150,224,8511.4,120.282,1024,6.58,0.32,6.0
164
+ mobilenetv1_125,256,8425.89,121.511,1024,6.27,1.16,8.23
165
+ repvit_m1,224,8403.37,121.801,1024,5.49,0.83,7.45
166
+ vit_base_patch32_clip_quickgelu_224,224,8389.12,122.035,1024,87.85,4.41,5.01
167
+ ese_vovnet19b_dw,224,8324.1,123.002,1024,6.54,1.34,8.25
168
+ resnet18,288,8261.03,123.927,1024,11.69,3.01,4.11
169
+ repvgg_a1,224,8255.57,124.011,1024,14.09,2.64,4.74
170
+ rexnetr_100,224,8248.96,124.109,1024,4.88,0.43,7.72
171
+ convnext_pico_ols,224,8234.27,124.344,1024,9.06,1.43,6.5
172
+ cs3darknet_m,256,8195.28,124.934,1024,9.31,2.08,5.28
173
+ resnet34,224,8192.41,124.968,1024,21.8,3.67,3.74
174
+ resnet50,160,8139.65,125.774,1024,25.56,2.1,5.67
175
+ mobilenetv4_hybrid_medium,224,8135.37,125.856,1024,11.07,0.98,6.84
176
+ vit_tiny_r_s16_p8_384,384,8129.06,125.953,1024,6.36,1.34,6.49
177
+ selecsls42,224,8099.05,126.406,1024,30.35,2.94,4.62
178
+ mobilenetv2_110d,224,8095.22,126.469,1024,4.52,0.45,8.71
179
+ nf_regnet_b0,256,8083.32,126.666,1024,8.76,0.64,5.58
180
+ selecsls42b,224,8049.57,127.183,1024,32.46,2.98,4.62
181
+ repvit_m0_9,224,7983.61,128.234,1024,5.49,0.83,7.45
182
+ tf_efficientnetv2_b2,208,7955.98,128.679,1024,10.1,1.06,6.0
183
+ vit_base_patch32_clip_256,256,7846.08,130.484,1024,87.86,5.76,6.65
184
+ hrnet_w18_small,224,7772.99,131.724,1024,13.19,1.61,5.72
185
+ convnext_atto,288,7751.5,132.09,1024,3.7,0.91,6.3
186
+ efficientnet_b0_gn,224,7748.62,132.137,1024,5.29,0.42,6.75
187
+ levit_384,224,7713.47,132.738,1024,39.13,2.36,6.26
188
+ seresnet18,288,7713.21,132.732,1024,11.78,3.01,4.11
189
+ gernet_m,224,7709.22,132.814,1024,21.14,3.02,5.24
190
+ vit_small_patch16_224,224,7693.38,133.087,1024,22.05,4.61,11.95
191
+ deit_small_patch16_224,224,7687.89,133.182,1024,22.05,4.61,11.95
192
+ resnet50d,160,7681.19,133.288,1024,25.58,2.22,6.08
193
+ tf_efficientnet_b0,224,7640.8,133.985,1024,5.29,0.4,6.75
194
+ deit_small_distilled_patch16_224,224,7620.92,134.352,1024,22.44,4.63,12.02
195
+ edgenext_x_small,256,7580.19,135.075,1024,2.34,0.54,5.93
196
+ seresnet34,224,7569.0,135.26,1024,21.96,3.67,3.74
197
+ ghostnetv2_100,224,7562.78,135.385,1024,6.16,0.18,4.55
198
+ skresnet18,224,7543.01,135.725,1024,11.96,1.82,3.24
199
+ mobilenetv2_140,224,7486.19,136.767,1024,6.11,0.6,9.57
200
+ semnasnet_140,224,7485.31,136.75,1024,6.11,0.6,8.87
201
+ legacy_seresnet34,224,7446.07,137.508,1024,21.96,3.67,3.74
202
+ fbnetv3_d,224,7426.0,137.878,1024,10.31,0.52,8.5
203
+ hgnetv2_b1,224,7416.62,138.054,1024,6.34,0.49,2.73
204
+ convnext_atto_ols,288,7379.61,138.745,1024,3.7,0.96,6.8
205
+ mixer_b32_224,224,7373.37,138.863,1024,60.29,3.24,6.29
206
+ vit_pwee_patch16_reg1_gap_256,256,7329.65,139.691,1024,15.25,4.37,15.87
207
+ resnet34d,224,7202.58,142.142,1024,21.82,3.91,4.54
208
+ levit_conv_384,224,7171.07,142.782,1024,39.13,2.36,6.26
209
+ efficientnet_lite1,240,7085.22,144.508,1024,5.42,0.62,10.14
210
+ efficientnet_b0,256,7082.56,144.564,1024,5.29,0.52,8.81
211
+ dla34,224,7078.3,144.651,1024,15.74,3.07,5.02
212
+ mobilenet_edgetpu_v2_m,224,7064.66,144.928,1024,8.46,1.85,8.15
213
+ mixnet_s,224,7052.75,145.177,1024,4.13,0.25,6.25
214
+ fbnetv3_b,256,7022.87,145.793,1024,8.6,0.55,9.1
215
+ seresnet50,160,6986.99,146.503,1024,28.09,2.1,5.69
216
+ cs3darknet_focus_m,288,6969.39,146.91,1024,9.3,2.51,6.19
217
+ eva02_tiny_patch14_224,224,6896.46,148.467,1024,5.5,1.7,9.14
218
+ ecaresnet50t,160,6885.36,148.705,1024,25.57,2.21,6.04
219
+ tf_efficientnetv2_b1,240,6876.19,148.891,1024,8.14,1.21,7.34
220
+ efficientvit_b1,256,6866.64,149.111,1024,9.1,0.69,9.46
221
+ selecsls60b,224,6862.29,149.193,1024,32.77,3.63,5.52
222
+ selecsls60,224,6832.72,149.84,1024,30.67,3.59,5.52
223
+ vit_wee_patch16_reg1_gap_256,256,6818.08,150.174,1024,13.42,3.83,13.9
224
+ mobilenetv4_conv_blur_medium,224,6816.69,150.205,1024,9.72,1.22,8.58
225
+ deit3_small_patch16_224,224,6812.85,150.286,1024,22.06,4.61,11.95
226
+ efficientnet_es,224,6810.09,150.348,1024,5.44,1.81,8.73
227
+ mixer_s16_224,224,6808.46,150.387,1024,18.53,3.79,5.97
228
+ efficientnet_blur_b0,224,6806.84,150.42,1024,5.29,0.43,8.72
229
+ tiny_vit_5m_224,224,6795.79,150.652,1024,12.08,1.28,11.25
230
+ repvit_m1_0,224,6784.56,150.903,1024,7.3,1.13,8.69
231
+ regnetx_016,224,6766.08,151.313,1024,9.19,1.62,7.93
232
+ resnet50,176,6703.95,152.718,1024,25.56,2.62,6.92
233
+ convnext_femto,288,6687.36,153.109,1024,5.22,1.3,7.56
234
+ flexivit_small,240,6660.66,153.724,1024,22.06,5.35,14.18
235
+ efficientnet_b0_g16_evos,224,6654.19,153.871,1024,8.11,1.01,7.42
236
+ resmlp_12_224,224,6650.72,153.939,1024,15.35,3.01,5.5
237
+ resnet26,224,6639.65,154.197,1024,16.0,2.36,7.35
238
+ resnet18d,288,6633.67,154.335,1024,11.71,3.41,5.43
239
+ repvit_m2,224,6610.13,154.886,1024,8.8,1.36,9.43
240
+ mobilenetv4_hybrid_medium,256,6587.07,155.438,1024,11.07,1.29,9.01
241
+ convnextv2_atto,224,6572.88,155.773,1024,3.71,0.55,3.81
242
+ resnetrs50,160,6568.89,155.826,1024,35.69,2.29,6.2
243
+ resnext50_32x4d,160,6540.25,156.542,1024,25.03,2.17,7.35
244
+ cs3darknet_m,288,6499.55,157.533,1024,9.31,2.63,6.69
245
+ rexnetr_130,224,6489.24,157.769,1024,7.61,0.68,9.81
246
+ rexnet_130,224,6476.24,158.089,1024,7.56,0.68,9.71
247
+ resnetaa34d,224,6464.72,158.371,1024,21.82,4.43,5.07
248
+ repghostnet_200,224,6438.16,159.023,1024,9.8,0.54,7.96
249
+ convnext_femto_ols,288,6405.94,159.835,1024,5.23,1.35,8.06
250
+ xcit_tiny_12_p16_224,224,6316.09,162.101,1024,6.72,1.24,6.29
251
+ gmixer_12_224,224,6305.79,162.375,1024,12.7,2.67,7.26
252
+ tf_mixnet_s,224,6303.08,162.43,1024,4.13,0.25,6.25
253
+ mobilenetv4_conv_aa_medium,256,6295.03,162.653,1024,9.72,1.58,10.3
254
+ repvit_m1_1,224,6283.35,162.917,1024,8.8,1.36,9.43
255
+ tf_efficientnet_es,224,6275.13,163.154,1024,5.44,1.81,8.73
256
+ efficientnet_b1,224,6258.96,163.587,1024,7.79,0.59,9.36
257
+ efficientnet_es_pruned,224,6256.91,163.63,1024,5.44,1.81,8.73
258
+ efficientnet_b0_g8_gn,224,6255.71,163.672,1024,6.56,0.66,6.75
259
+ convnext_nano,224,6249.99,163.825,1024,15.59,2.46,8.37
260
+ repvgg_b0,224,6210.44,164.855,1024,15.82,3.41,6.15
261
+ hgnetv2_b0,288,6150.46,166.477,1024,6.0,0.54,3.51
262
+ ecaresnet50d_pruned,224,6136.73,166.846,1024,19.94,2.53,6.43
263
+ hgnetv2_b4,224,6125.62,167.15,1024,19.8,2.75,6.7
264
+ tf_efficientnet_lite1,240,6121.06,167.26,1024,5.42,0.62,10.14
265
+ resnet26d,224,6072.87,168.59,1024,16.01,2.6,8.15
266
+ efficientnet_cc_b0_4e,224,6016.7,170.181,1024,13.31,0.41,9.42
267
+ efficientnet_cc_b0_8e,224,6014.04,170.256,1024,24.01,0.42,9.42
268
+ nf_regnet_b1,256,6005.41,170.494,1024,10.22,0.82,7.27
269
+ mobilenetv4_conv_medium,320,5999.7,170.66,1024,9.72,1.71,11.84
270
+ mobilenet_edgetpu_v2_l,224,5960.09,171.794,1024,10.92,2.55,9.05
271
+ vit_relpos_small_patch16_224,224,5956.13,171.907,1024,21.98,4.59,13.05
272
+ edgenext_x_small,288,5955.77,171.92,1024,2.34,0.68,7.5
273
+ regnety_016,224,5939.38,172.381,1024,11.2,1.63,8.04
274
+ darknet17,256,5937.82,172.434,1024,14.3,3.26,7.18
275
+ fbnetv3_d,256,5909.96,173.251,1024,10.31,0.68,11.1
276
+ vit_srelpos_small_patch16_224,224,5903.24,173.446,1024,21.97,4.59,12.16
277
+ nf_resnet26,224,5897.16,173.627,1024,16.0,2.41,7.35
278
+ mobilevitv2_075,256,5875.41,174.271,1024,2.87,1.05,12.06
279
+ nf_regnet_b2,240,5860.87,174.704,1024,14.31,0.97,7.23
280
+ ghostnetv2_130,224,5854.47,174.894,1024,8.96,0.28,5.9
281
+ efficientnet_b2_pruned,260,5817.56,176.002,1024,8.31,0.73,9.13
282
+ vit_base_patch32_plus_256,256,5803.45,176.415,1024,119.48,7.79,7.76
283
+ tiny_vit_11m_224,224,5800.65,176.504,1024,20.35,2.04,13.49
284
+ vit_tiny_patch16_384,384,5746.17,178.189,1024,5.79,4.7,25.39
285
+ gmlp_ti16_224,224,5736.13,178.502,1024,5.87,1.34,7.55
286
+ mobilenet_edgetpu_v2_m,256,5727.79,178.761,1024,8.46,2.42,10.65
287
+ mobilenetv2_120d,224,5720.65,178.972,1024,5.83,0.69,11.97
288
+ vit_relpos_small_patch16_rpn_224,224,5707.01,179.411,1024,21.97,4.59,13.05
289
+ resnetblur18,288,5705.18,179.458,1024,11.69,3.87,5.6
290
+ rexnetr_150,224,5686.03,180.063,1024,9.78,0.89,11.13
291
+ convnext_nano_ols,224,5675.19,180.417,1024,15.65,2.65,9.38
292
+ poolformer_s12,224,5647.45,181.292,1024,11.92,1.82,5.53
293
+ efficientformer_l1,224,5641.9,181.48,1024,12.29,1.3,5.53
294
+ convnextv2_femto,224,5629.97,181.868,1024,5.23,0.79,4.57
295
+ rexnet_150,224,5597.8,182.901,1024,9.73,0.9,11.21
296
+ efficientnet_lite2,260,5526.09,185.282,1024,6.09,0.89,12.9
297
+ efficientnet_b1,240,5515.48,185.64,1024,7.79,0.71,10.88
298
+ darknet21,256,5507.84,185.899,1024,20.86,3.93,7.47
299
+ edgenext_small,256,5504.6,186.01,1024,5.59,1.26,9.07
300
+ mobilenetv4_conv_blur_medium,256,5452.15,140.844,768,9.72,1.59,11.2
301
+ resnext50_32x4d,176,5448.72,187.904,1024,25.03,2.71,8.97
302
+ tf_efficientnet_cc_b0_8e,224,5378.31,190.379,1024,24.01,0.42,9.42
303
+ tf_efficientnet_cc_b0_4e,224,5350.74,191.362,1024,13.31,0.41,9.42
304
+ resnet101,160,5347.38,191.468,1024,44.55,4.0,8.28
305
+ gernet_l,256,5345.88,191.534,1024,31.08,4.57,8.0
306
+ efficientvit_b1,288,5336.48,191.869,1024,9.1,0.87,11.96
307
+ regnetz_005,288,5324.94,192.275,1024,7.12,0.86,9.68
308
+ hgnet_tiny,224,5289.94,193.552,1024,14.74,4.54,6.36
309
+ repvgg_a2,224,5234.66,195.589,1024,28.21,5.7,6.26
310
+ mobilenetv4_conv_large,256,5210.97,196.493,1024,32.59,2.86,12.14
311
+ convnext_pico,288,5203.54,196.772,1024,9.05,2.27,10.08
312
+ cs3darknet_focus_l,256,5201.05,196.865,1024,21.15,4.66,8.03
313
+ vit_relpos_base_patch32_plus_rpn_256,256,5200.74,196.879,1024,119.42,7.68,8.01
314
+ mobilenetv3_large_150d,256,5199.32,196.924,1024,14.62,1.03,12.35
315
+ resnest14d,224,5198.83,196.94,1024,10.61,2.76,7.33
316
+ vit_medium_patch16_clip_224,224,5170.05,198.043,1024,38.59,8.0,15.93
317
+ sedarknet21,256,5136.28,199.326,1024,20.95,3.93,7.47
318
+ tf_efficientnetv2_b2,260,5112.53,200.262,1024,10.1,1.72,9.84
319
+ regnetz_b16,224,5111.47,200.306,1024,9.72,1.45,9.95
320
+ efficientnetv2_rw_t,224,5105.62,200.547,1024,13.65,1.93,9.94
321
+ hgnetv2_b2,224,5071.32,201.903,1024,11.22,1.15,4.12
322
+ edgenext_small_rw,256,5063.53,202.213,1024,7.83,1.58,9.51
323
+ legacy_seresnext26_32x4d,224,5035.51,203.34,1024,16.79,2.49,9.39
324
+ mobilenetv4_hybrid_large_075,256,5015.06,204.168,1024,22.75,2.06,11.64
325
+ ecaresnet101d_pruned,224,5009.62,204.385,1024,24.88,3.48,7.69
326
+ tf_efficientnetv2_b3,240,5006.45,204.508,1024,14.36,1.93,9.95
327
+ crossvit_small_240,240,5004.67,204.591,1024,26.86,5.63,18.17
328
+ convnext_pico_ols,288,4990.08,205.192,1024,9.06,2.37,10.74
329
+ resnext26ts,256,4989.4,205.207,1024,10.3,2.43,10.52
330
+ resnet34,288,4989.09,205.219,1024,21.8,6.07,6.18
331
+ efficientnet_b1,256,4979.81,205.61,1024,7.79,0.77,12.22
332
+ pvt_v2_b1,224,4972.11,205.919,1024,14.01,2.12,15.39
333
+ mixnet_m,224,4961.12,206.387,1024,5.01,0.36,8.19
334
+ dpn48b,224,4946.37,207.002,1024,9.13,1.69,8.92
335
+ sam2_hiera_tiny,224,4939.34,207.285,1024,26.85,4.91,17.12
336
+ nf_ecaresnet26,224,4936.32,207.425,1024,16.0,2.41,7.36
337
+ eca_resnext26ts,256,4936.16,207.428,1024,10.3,2.43,10.52
338
+ mobilevit_xs,256,4915.37,156.229,768,2.32,1.05,16.33
339
+ cs3darknet_l,256,4898.39,209.032,1024,21.16,4.86,8.55
340
+ tf_efficientnet_b1,240,4891.58,209.309,1024,7.79,0.71,10.88
341
+ nf_seresnet26,224,4877.41,209.923,1024,17.4,2.41,7.36
342
+ gcresnext26ts,256,4873.11,210.116,1024,10.48,2.43,10.53
343
+ ecaresnetlight,224,4830.89,211.948,1024,30.16,4.11,8.42
344
+ seresnext26ts,256,4825.46,212.166,1024,10.39,2.43,10.52
345
+ tf_efficientnet_lite2,260,4812.46,212.754,1024,6.09,0.89,12.9
346
+ resnet26t,256,4797.54,213.414,1024,16.01,3.35,10.52
347
+ convnext_tiny,224,4796.99,213.45,1024,28.59,4.47,13.44
348
+ coatnext_nano_rw_224,224,4780.38,214.189,1024,14.7,2.47,12.8
349
+ ecaresnext50t_32x4d,224,4760.92,215.063,1024,15.41,2.7,10.09
350
+ ecaresnext26t_32x4d,224,4760.44,215.086,1024,15.41,2.7,10.09
351
+ ghostnetv2_160,224,4741.68,215.94,1024,12.39,0.42,7.23
352
+ mobileone_s1,224,4716.61,217.083,1024,4.83,0.86,9.67
353
+ efficientnet_b2,256,4713.06,217.249,1024,9.11,0.89,12.81
354
+ ese_vovnet19b_dw,288,4711.0,217.348,1024,6.54,2.22,13.63
355
+ vit_little_patch16_reg1_gap_256,256,4683.45,218.614,1024,22.52,6.27,18.06
356
+ gc_efficientnetv2_rw_t,224,4667.78,219.357,1024,13.68,1.94,9.97
357
+ nf_regnet_b1,288,4666.05,219.44,1024,10.22,1.02,9.2
358
+ vit_small_resnet26d_224,224,4663.1,219.577,1024,63.61,5.07,11.12
359
+ vit_little_patch16_reg4_gap_256,256,4660.7,219.682,1024,22.52,6.35,18.33
360
+ efficientnet_b3_pruned,300,4660.19,219.714,1024,9.86,1.04,11.86
361
+ seresnext26t_32x4d,224,4649.25,220.222,1024,16.81,2.7,10.09
362
+ tf_mixnet_m,224,4635.47,220.876,1024,5.01,0.36,8.19
363
+ crossvit_15_240,240,4634.12,220.945,1024,27.53,5.81,19.77
364
+ vit_small_r26_s32_224,224,4628.86,221.202,1024,36.43,3.56,9.85
365
+ seresnet34,288,4622.08,221.495,1024,21.96,6.07,6.18
366
+ vit_relpos_medium_patch16_cls_224,224,4616.98,221.77,1024,38.76,8.03,18.24
367
+ tresnet_m,224,4615.66,221.824,1024,31.39,5.75,7.31
368
+ deit3_medium_patch16_224,224,4613.13,221.958,1024,38.85,8.0,15.93
369
+ seresnext26d_32x4d,224,4594.08,222.859,1024,16.81,2.73,10.19
370
+ hgnetv2_b1,288,4576.22,223.749,1024,6.34,0.82,4.51
371
+ cs3sedarknet_l,256,4561.19,224.486,1024,21.91,4.86,8.56
372
+ levit_512,224,4507.01,227.185,1024,95.17,5.64,10.22
373
+ nf_regnet_b2,272,4499.96,227.541,1024,14.31,1.22,9.27
374
+ repvit_m3,224,4488.27,228.109,1024,10.68,1.89,13.94
375
+ selecsls84,224,4485.13,228.28,1024,50.95,5.9,7.57
376
+ coatnet_pico_rw_224,224,4472.02,228.953,1024,10.85,2.05,14.62
377
+ resnetv2_50,224,4451.45,230.006,1024,25.55,4.11,11.11
378
+ mobilevitv2_100,256,4424.26,173.574,768,4.9,1.84,16.08
379
+ wide_resnet50_2,176,4418.37,231.739,1024,68.88,7.29,8.97
380
+ hiera_tiny_224,224,4417.32,231.797,1024,27.91,4.91,17.13
381
+ coat_lite_tiny,224,4413.07,232.018,1024,5.72,1.6,11.65
382
+ resnet101,176,4401.33,232.627,1024,44.55,4.92,10.08
383
+ crossvit_15_dagger_240,240,4395.44,232.947,1024,28.21,6.13,20.43
384
+ vgg11,224,4384.0,233.545,1024,132.86,7.61,7.44
385
+ resnet34d,288,4379.98,233.76,1024,21.82,6.47,7.51
386
+ eca_botnext26ts_256,256,4372.52,234.17,1024,10.59,2.46,11.6
387
+ ecaresnet26t,256,4365.68,234.537,1024,16.01,3.35,10.53
388
+ convit_tiny,224,4364.79,234.585,1024,5.71,1.26,7.94
389
+ skresnet34,224,4357.69,234.957,1024,22.28,3.67,5.13
390
+ vovnet39a,224,4329.41,236.505,1024,22.6,7.09,6.73
391
+ convnextv2_pico,224,4303.27,237.943,1024,9.07,1.37,6.1
392
+ eca_halonext26ts,256,4303.05,237.954,1024,10.76,2.44,11.46
393
+ cspresnet50,256,4286.58,238.859,1024,21.62,4.54,11.5
394
+ fbnetv3_g,240,4271.76,239.698,1024,16.62,1.28,14.87
395
+ fastvit_t8,256,4268.46,239.88,1024,4.03,0.7,8.63
396
+ dla60,224,4267.0,239.958,1024,22.04,4.26,10.16
397
+ resnetv2_50t,224,4244.14,241.24,1024,25.57,4.32,11.82
398
+ regnetx_032,224,4235.6,241.712,1024,15.3,3.2,11.37
399
+ hrnet_w18_small_v2,224,4233.72,241.85,1024,15.6,2.62,9.65
400
+ levit_512d,224,4224.89,242.354,1024,92.5,5.85,11.3
401
+ resnet32ts,256,4223.86,242.395,1024,17.96,4.63,11.58
402
+ mobilenetv4_hybrid_medium,320,4223.22,242.451,1024,11.07,2.05,14.36
403
+ resnet50,224,4217.6,242.763,1024,25.56,4.11,11.11
404
+ levit_conv_512,224,4205.6,243.467,1024,95.17,5.64,10.22
405
+ resnetv2_50d,224,4198.97,243.837,1024,25.57,4.35,11.92
406
+ efficientvit_b2,224,4196.18,244.014,1024,24.33,1.6,14.62
407
+ lambda_resnet26t,256,4192.89,244.204,1024,10.96,3.02,11.87
408
+ coat_lite_mini,224,4171.78,245.439,1024,11.01,2.0,12.25
409
+ resnet33ts,256,4170.49,245.507,1024,19.68,4.76,11.66
410
+ regnety_032,224,4166.74,245.728,1024,19.44,3.2,11.26
411
+ botnet26t_256,256,4164.92,245.843,1024,12.49,3.32,11.98
412
+ halonet26t,256,4157.99,246.255,1024,12.48,3.19,11.69
413
+ ese_vovnet39b,224,4150.95,246.672,1024,24.57,7.09,6.74
414
+ eca_vovnet39b,224,4144.28,247.067,1024,22.6,7.09,6.74
415
+ dpn68,224,4139.9,247.329,1024,12.61,2.35,10.47
416
+ hgnetv2_b3,224,4136.91,247.511,1024,16.29,1.78,5.07
417
+ coatnet_nano_cc_224,224,4135.45,247.589,1024,13.76,2.24,15.02
418
+ rexnetr_200,224,4135.03,185.7,768,16.52,1.59,15.11
419
+ rexnet_200,224,4128.12,186.013,768,16.37,1.56,14.91
420
+ vit_relpos_medium_patch16_224,224,4120.0,248.525,1024,38.75,7.97,17.02
421
+ eca_resnet33ts,256,4101.91,249.618,1024,19.68,4.76,11.66
422
+ vit_srelpos_medium_patch16_224,224,4077.53,251.113,1024,38.74,7.96,16.21
423
+ gcresnet33ts,256,4074.93,251.273,1024,19.88,4.76,11.68
424
+ dpn68b,224,4071.04,251.513,1024,12.61,2.35,10.47
425
+ cs3darknet_focus_l,288,4065.11,251.879,1024,21.15,5.9,10.16
426
+ resnet26,288,4059.09,252.243,1024,16.0,3.9,12.15
427
+ seresnet33ts,256,4037.86,253.571,1024,19.78,4.76,11.66
428
+ resnet50t,224,4031.22,253.987,1024,25.57,4.32,11.82
429
+ visformer_small,224,4021.8,254.581,1024,40.22,4.88,11.43
430
+ resnet50d,224,3994.33,256.335,1024,25.58,4.35,11.92
431
+ davit_tiny,224,3979.92,192.952,768,28.36,4.54,18.89
432
+ cspresnet50w,256,3979.02,257.332,1024,28.12,5.04,12.19
433
+ resnetaa34d,288,3962.03,258.426,1024,21.82,7.33,8.38
434
+ resnet50c,224,3951.72,259.096,1024,25.58,4.35,11.92
435
+ resnetv2_50x1_bit,224,3946.63,259.432,1024,25.55,4.23,11.11
436
+ cspresnet50d,256,3928.57,260.626,1024,21.64,4.86,12.55
437
+ efficientnet_b1,288,3916.7,261.423,1024,7.79,0.97,15.46
438
+ resnext26ts,288,3914.08,261.582,1024,10.3,3.07,13.31
439
+ convnext_tiny_hnf,224,3907.17,262.058,1024,28.59,4.47,13.44
440
+ vit_base_resnet26d_224,224,3907.09,262.061,1024,101.4,6.97,13.16
441
+ bat_resnext26ts,256,3900.03,262.512,1024,10.73,2.53,12.51
442
+ resnetaa50,224,3892.01,263.074,1024,25.56,5.15,11.64
443
+ coatnet_nano_rw_224,224,3890.95,263.144,1024,15.14,2.41,15.41
444
+ regnetv_040,224,3888.77,263.292,1024,20.64,4.0,12.29
445
+ vit_relpos_medium_patch16_rpn_224,224,3883.35,263.671,1024,38.73,7.97,17.02
446
+ twins_svt_small,224,3875.45,264.197,1024,24.06,2.94,13.75
447
+ eca_resnext26ts,288,3875.29,264.216,1024,10.3,3.07,13.32
448
+ mobileone_s2,224,3859.75,265.286,1024,7.88,1.34,11.55
449
+ hgnetv2_b4,288,3849.96,265.959,1024,19.8,4.54,11.08
450
+ haloregnetz_b,224,3847.23,266.147,1024,11.68,1.97,11.94
451
+ legacy_seresnet50,224,3845.77,266.241,1024,28.09,3.88,10.6
452
+ cs3darknet_l,288,3845.41,266.274,1024,21.16,6.16,10.83
453
+ efficientnet_cc_b1_8e,240,3844.11,266.369,1024,39.72,0.75,15.44
454
+ tf_efficientnet_em,240,3839.63,266.664,1024,6.9,3.04,14.34
455
+ mobilevit_s,256,3835.87,200.199,768,5.58,2.03,19.94
456
+ levit_conv_512d,224,3830.33,267.323,1024,92.5,5.85,11.3
457
+ vgg11_bn,224,3829.44,267.372,1024,132.87,7.62,7.44
458
+ tf_efficientnet_b2,260,3827.63,267.5,1024,9.11,1.02,13.83
459
+ gcresnext26ts,288,3824.01,267.764,1024,10.48,3.07,13.33
460
+ regnety_040,224,3819.46,268.036,1024,20.65,4.0,12.29
461
+ resnet152,160,3816.3,268.292,1024,60.19,5.9,11.51
462
+ resnetv2_50d_gn,224,3798.52,269.547,1024,25.57,4.38,11.92
463
+ convnext_nano,288,3784.13,270.586,1024,15.59,4.06,13.84
464
+ mixnet_l,224,3783.23,270.648,1024,7.33,0.58,10.84
465
+ ecaresnet50d_pruned,288,3782.37,270.708,1024,19.94,4.19,10.61
466
+ seresnext26ts,288,3781.76,270.745,1024,10.39,3.07,13.32
467
+ resnet50_gn,224,3769.97,271.59,1024,25.56,4.14,11.11
468
+ res2net50_48w_2s,224,3744.75,273.407,1024,25.29,4.18,11.72
469
+ repvit_m1_5,224,3729.62,274.53,1024,14.64,2.31,15.7
470
+ tiny_vit_21m_224,224,3725.54,274.83,1024,33.22,4.29,20.08
471
+ resnest26d,224,3723.36,274.993,1024,17.07,3.64,9.97
472
+ resnet26d,288,3717.52,275.422,1024,16.01,4.29,13.48
473
+ efficientnet_b2,288,3712.61,275.797,1024,9.11,1.12,16.2
474
+ efficientnet_em,240,3709.85,275.987,1024,6.9,3.04,14.34
475
+ vovnet57a,224,3706.22,276.265,1024,36.64,8.95,7.52
476
+ resnetaa50d,224,3694.51,277.138,1024,25.58,5.39,12.44
477
+ resnet50_clip_gap,224,3684.19,277.913,1024,23.53,5.39,12.44
478
+ regnetx_040,224,3677.93,278.37,1024,22.12,3.99,12.2
479
+ convnextv2_atto,288,3666.13,279.278,1024,3.71,0.91,6.3
480
+ inception_v3,299,3663.18,279.511,1024,23.83,5.73,8.97
481
+ hiera_small_224,224,3657.67,279.941,1024,35.01,6.42,20.75
482
+ gcvit_xxtiny,224,3656.12,280.058,1024,12.0,2.14,15.36
483
+ twins_pcpvt_small,224,3652.03,280.363,1024,24.11,3.83,18.08
484
+ seresnet50,224,3649.19,280.58,1024,28.09,4.11,11.13
485
+ resnetblur50,224,3629.01,282.14,1024,25.56,5.16,12.02
486
+ densenet121,224,3614.99,283.247,1024,7.98,2.87,6.9
487
+ vit_medium_patch16_gap_240,240,3607.76,283.806,1024,44.4,9.22,18.81
488
+ ecaresnet50t,224,3581.41,285.899,1024,25.57,4.32,11.83
489
+ cs3sedarknet_l,288,3578.3,286.15,1024,21.91,6.16,10.83
490
+ vit_base_r26_s32_224,224,3567.73,286.987,1024,101.38,6.81,12.36
491
+ mobilenetv4_conv_large,320,3558.21,287.765,1024,32.59,4.47,18.97
492
+ inception_next_tiny,224,3555.09,288.019,1024,28.06,4.19,11.98
493
+ ecaresnet50d,224,3553.36,288.157,1024,25.58,4.35,11.93
494
+ mobileone_s0,224,3552.46,288.227,1024,5.29,1.09,15.48
495
+ coatnet_rmlp_nano_rw_224,224,3537.96,289.405,1024,15.15,2.62,20.34
496
+ tf_mixnet_l,224,3536.96,289.484,1024,7.33,0.58,10.84
497
+ dla60x,224,3535.6,289.605,1024,17.35,3.54,13.8
498
+ ese_vovnet57b,224,3532.21,289.885,1024,38.61,8.95,7.52
499
+ edgenext_small,320,3521.78,290.744,1024,5.59,1.97,14.16
500
+ mobilevitv2_125,256,3514.09,218.532,768,7.48,2.86,20.1
501
+ resnet50_clip,224,3513.39,291.426,1024,38.32,6.14,12.98
502
+ vit_base_patch32_384,384,3507.62,291.907,1024,88.3,13.06,16.5
503
+ nf_regnet_b3,288,3505.93,292.061,1024,18.59,1.67,11.84
504
+ vit_base_patch32_clip_384,384,3505.16,292.11,1024,88.3,13.06,16.5
505
+ hgnetv2_b5,224,3503.99,292.219,1024,39.57,6.56,11.19
506
+ tf_efficientnet_cc_b1_8e,240,3502.1,292.382,1024,39.72,0.75,15.44
507
+ seresnet50t,224,3492.13,293.202,1024,28.1,4.32,11.83
508
+ vit_large_patch32_224,224,3482.36,294.033,1024,305.51,15.39,13.3
509
+ vit_medium_patch16_reg1_gap_256,256,3459.44,295.982,1024,38.88,10.63,22.26
510
+ vit_medium_patch16_reg4_gap_256,256,3450.76,296.726,1024,38.88,10.76,22.6
511
+ resnetblur50d,224,3438.8,297.748,1024,25.58,5.4,12.82
512
+ cs3darknet_focus_x,256,3437.61,297.861,1024,35.02,8.03,10.69
513
+ eca_nfnet_l0,224,3436.69,297.944,1024,24.14,4.35,10.47
514
+ convnext_nano_ols,288,3435.64,298.035,1024,15.65,4.38,15.5
515
+ resnetrs50,224,3434.24,298.144,1024,35.69,4.48,12.14
516
+ resnext50_32x4d,224,3427.06,298.772,1024,25.03,4.26,14.4
517
+ coatnet_0_rw_224,224,3425.88,298.879,1024,27.44,4.43,18.73
518
+ hgnet_small,224,3416.04,299.744,1024,24.36,8.53,8.79
519
+ cspresnext50,256,3409.98,300.268,1024,20.57,4.05,15.86
520
+ dla60_res2net,224,3403.55,300.841,1024,20.85,4.15,12.34
521
+ res2net50_26w_4s,224,3401.73,300.973,1024,25.7,4.28,12.61
522
+ maxvit_pico_rw_256,256,3401.1,225.792,768,7.46,1.83,22.3
523
+ resmlp_24_224,224,3399.81,301.162,1024,30.02,5.96,10.91
524
+ res2net50_14w_8s,224,3398.1,301.29,1024,25.06,4.21,13.28
525
+ maxvit_rmlp_pico_rw_256,256,3396.4,226.105,768,7.52,1.85,24.86
526
+ resnet50s,224,3396.16,301.49,1024,25.68,5.47,13.52
527
+ convnextv2_femto,288,3395.92,301.516,1024,5.23,1.3,7.56
528
+ regnety_040_sgn,224,3390.72,301.971,1024,20.65,4.03,12.29
529
+ xcit_tiny_24_p16_224,224,3375.55,303.331,1024,12.12,2.34,11.82
530
+ efficientvit_b2,256,3369.65,303.868,1024,24.33,2.09,19.03
531
+ nfnet_f0,192,3349.0,305.741,1024,71.49,7.21,10.16
532
+ nfnet_l0,224,3341.81,306.39,1024,35.07,4.36,10.47
533
+ hieradet_small,256,3335.5,230.232,768,34.72,8.51,27.76
534
+ edgenext_base,256,3315.68,308.815,1024,18.51,3.85,15.58
535
+ resnest50d_1s4x24d,224,3309.67,309.367,1024,25.68,4.43,13.57
536
+ efficientnet_lite3,300,3296.2,155.31,512,8.2,1.65,21.85
537
+ dla60_res2next,224,3295.03,310.75,1024,17.03,3.49,13.17
538
+ mobilenetv3_large_150d,320,3287.23,233.606,768,14.62,1.61,19.29
539
+ cs3darknet_x,256,3280.58,312.118,1024,35.05,8.38,11.35
540
+ crossvit_18_240,240,3276.68,312.485,1024,43.27,9.05,26.26
541
+ lambda_resnet26rpt_256,256,3275.72,234.435,768,10.99,3.16,11.87
542
+ resnet32ts,288,3273.65,312.761,1024,17.96,5.86,14.65
543
+ resnext50d_32x4d,224,3267.01,313.407,1024,25.05,4.5,15.2
544
+ densenetblur121d,224,3260.14,314.077,1024,8.0,3.11,7.9
545
+ res2net50d,224,3253.03,314.728,1024,25.72,4.52,13.41
546
+ darknetaa53,256,3249.03,315.15,1024,36.02,7.97,12.39
547
+ edgenext_small_rw,320,3242.29,315.807,1024,7.83,2.46,14.85
548
+ resnet33ts,288,3239.71,316.048,1024,19.68,6.02,14.75
549
+ seresnetaa50d,224,3235.08,316.472,1024,28.11,5.4,12.46
550
+ tf_efficientnetv2_b3,300,3232.69,316.734,1024,14.36,3.04,15.74
551
+ focalnet_tiny_srf,224,3226.46,317.357,1024,28.43,4.42,16.32
552
+ efficientnetv2_rw_t,288,3225.91,317.41,1024,13.65,3.19,16.42
553
+ eva02_small_patch14_224,224,3225.8,317.422,1024,21.62,6.14,18.28
554
+ gcresnext50ts,256,3223.82,317.616,1024,15.67,3.75,15.46
555
+ res2next50,224,3218.65,318.1,1024,24.67,4.2,13.71
556
+ gcresnet50t,256,3218.31,318.16,1024,25.9,5.42,14.67
557
+ gmixer_24_224,224,3213.22,318.663,1024,24.72,5.28,14.45
558
+ eca_resnet33ts,288,3197.58,320.22,1024,19.68,6.02,14.76
559
+ efficientvit_l1,224,3180.29,321.964,1024,52.65,5.27,15.85
560
+ mobileone_s3,224,3179.46,322.047,1024,10.17,1.94,13.85
561
+ repvgg_b1g4,224,3167.88,323.216,1024,39.97,8.15,10.64
562
+ gcresnet33ts,288,3167.19,323.296,1024,19.88,6.02,14.78
563
+ crossvit_18_dagger_240,240,3155.34,324.503,1024,44.27,9.5,27.03
564
+ vit_medium_patch16_gap_256,256,3146.86,325.379,1024,38.86,10.59,22.15
565
+ resnet152,176,3145.18,325.546,1024,60.19,7.22,13.99
566
+ hgnetv2_b2,288,3131.53,326.979,1024,11.22,1.89,6.8
567
+ seresnet33ts,288,3123.88,327.768,1024,19.78,6.02,14.76
568
+ regnetz_b16,288,3121.28,328.043,1024,9.72,2.39,16.43
569
+ legacy_seresnext50_32x4d,224,3106.31,329.614,1024,27.56,4.26,14.42
570
+ nextvit_small,224,3105.38,329.731,1024,31.76,5.81,18.44
571
+ convnextv2_nano,224,3104.37,329.834,1024,15.62,2.46,8.37
572
+ xcit_nano_12_p16_384,384,3100.3,330.26,1024,3.05,1.64,12.15
573
+ regnetz_c16,256,3087.39,331.642,1024,13.46,2.51,16.57
574
+ resnet26t,320,3086.83,331.703,1024,16.01,5.24,16.44
575
+ pit_b_distilled_224,224,3072.82,333.214,1024,74.79,12.5,33.07
576
+ poolformerv2_s12,224,3071.46,333.361,1024,11.89,1.83,5.53
577
+ cs3sedarknet_x,256,3067.22,333.833,1024,35.4,8.38,11.35
578
+ ecaresnet101d_pruned,288,3064.98,334.074,1024,24.88,5.75,12.71
579
+ pit_b_224,224,3054.02,335.268,1024,73.76,12.42,32.94
580
+ eva02_tiny_patch14_336,336,3050.79,335.63,1024,5.76,4.68,27.16
581
+ gc_efficientnetv2_rw_t,288,3042.11,336.591,1024,13.68,3.2,16.45
582
+ resnetrs101,192,3039.94,336.818,1024,63.62,6.04,12.7
583
+ fbnetv3_g,288,3035.76,337.294,1024,16.62,1.77,21.09
584
+ resnet51q,256,3034.29,337.446,1024,35.7,6.38,16.55
585
+ seresnext50_32x4d,224,3033.27,337.56,1024,27.56,4.26,14.42
586
+ rdnet_tiny,224,3025.51,338.425,1024,23.86,5.06,15.98
587
+ darknet53,256,2995.64,341.801,1024,41.61,9.31,12.39
588
+ ecaresnetlight,288,2980.87,343.501,1024,30.16,6.79,13.91
589
+ xcit_small_12_p16_224,224,2979.57,343.65,1024,26.25,4.82,12.58
590
+ nf_ecaresnet50,224,2969.89,344.775,1024,25.56,4.21,11.13
591
+ convnext_small,224,2968.52,344.929,1024,50.22,8.71,21.56
592
+ coatnet_bn_0_rw_224,224,2961.32,345.762,1024,27.44,4.67,22.04
593
+ densenet169,224,2960.88,345.825,1024,14.15,3.4,7.3
594
+ focalnet_tiny_lrf,224,2958.28,346.126,1024,28.65,4.49,17.76
595
+ pvt_v2_b2,224,2957.1,346.245,1024,25.36,4.05,27.53
596
+ nf_seresnet50,224,2955.47,346.454,1024,28.09,4.21,11.13
597
+ fastvit_t12,256,2935.13,348.86,1024,7.55,1.42,12.42
598
+ hgnet_tiny,288,2932.95,349.114,1024,14.74,7.51,10.51
599
+ regnetx_080,224,2928.56,349.629,1024,39.57,8.02,14.06
600
+ vit_base_resnet50d_224,224,2926.61,349.865,1024,110.97,8.73,16.92
601
+ convnext_tiny,288,2917.07,351.019,1024,28.59,7.39,22.21
602
+ mobilevitv2_150,256,2915.63,175.59,512,10.59,4.09,24.11
603
+ coatnet_rmlp_0_rw_224,224,2907.73,352.136,1024,27.45,4.72,24.89
604
+ skresnet50,224,2900.52,353.008,1024,25.8,4.11,12.5
605
+ poolformer_s24,224,2899.97,353.076,1024,21.39,3.41,10.68
606
+ mobilenetv4_hybrid_medium,384,2891.5,354.122,1024,11.07,3.01,21.18
607
+ nf_regnet_b3,320,2880.71,355.451,1024,18.59,2.05,14.61
608
+ tf_efficientnet_lite3,300,2879.81,177.761,512,8.2,1.65,21.85
609
+ resnet50_mlp,256,2876.92,355.906,1024,26.65,7.05,16.25
610
+ sehalonet33ts,256,2863.49,357.576,1024,13.69,3.55,14.7
611
+ gcvit_xtiny,224,2857.49,358.335,1024,19.98,2.93,20.26
612
+ cs3sedarknet_xdw,256,2854.33,358.703,1024,21.6,5.97,17.18
613
+ seresnext26t_32x4d,288,2852.69,358.931,1024,16.81,4.46,16.68
614
+ gmlp_s16_224,224,2850.78,359.18,1024,19.42,4.42,15.1
615
+ deit_base_patch16_224,224,2847.84,359.551,1024,86.57,17.58,23.9
616
+ resnetv2_101,224,2835.29,361.131,1024,44.54,7.83,16.23
617
+ deit_base_distilled_patch16_224,224,2834.0,361.303,1024,87.34,17.68,24.05
618
+ seresnext26d_32x4d,288,2821.4,362.912,1024,16.81,4.51,16.85
619
+ nf_resnet50,256,2816.37,363.566,1024,25.56,5.46,14.52
620
+ regnetx_064,224,2813.74,363.901,1024,26.21,6.49,16.37
621
+ ecaresnet26t,320,2807.22,364.749,1024,16.01,5.24,16.44
622
+ dla102,224,2806.5,364.845,1024,33.27,7.19,14.18
623
+ nest_tiny,224,2805.62,364.963,1024,17.06,5.83,25.48
624
+ ecaresnet50t,256,2802.02,365.426,1024,25.57,5.64,15.45
625
+ efficientnet_b3,288,2792.77,183.313,512,12.23,1.63,21.49
626
+ wide_resnet50_2,224,2792.33,366.693,1024,68.88,11.43,14.4
627
+ vit_base_patch16_224_miil,224,2791.53,366.793,1024,94.4,17.59,23.91
628
+ skresnet50d,224,2791.4,366.81,1024,25.82,4.36,13.31
629
+ vit_base_patch16_224,224,2787.15,367.372,1024,86.57,17.58,23.9
630
+ vit_base_patch16_clip_224,224,2783.55,367.849,1024,86.57,17.58,23.9
631
+ fastvit_sa12,256,2781.99,368.06,1024,11.58,1.96,14.03
632
+ vgg13,224,2780.3,368.277,1024,133.05,11.31,12.25
633
+ resnet61q,256,2779.68,368.358,1024,36.85,7.8,17.01
634
+ fastvit_s12,256,2776.35,368.806,1024,9.47,1.82,13.67
635
+ maxxvit_rmlp_nano_rw_256,256,2775.33,276.707,768,16.78,4.37,26.05
636
+ cs3edgenet_x,256,2763.68,370.501,1024,47.82,11.53,12.92
637
+ lambda_resnet50ts,256,2759.83,371.018,1024,21.54,5.07,17.48
638
+ dm_nfnet_f0,192,2757.16,371.375,1024,71.49,7.21,10.16
639
+ nest_tiny_jx,224,2755.91,371.543,1024,17.06,5.83,25.48
640
+ rexnetr_300,224,2754.02,278.836,768,34.81,3.39,22.16
641
+ cspdarknet53,256,2752.11,372.057,1024,27.64,6.57,16.81
642
+ mixnet_xl,224,2741.75,373.463,1024,11.9,0.93,14.57
643
+ resnet101,224,2736.75,374.138,1024,44.55,7.83,16.23
644
+ resnetv2_50,288,2733.41,374.59,1024,25.55,6.79,18.37
645
+ repvgg_b1,224,2730.49,374.994,1024,57.42,13.16,10.64
646
+ coatnet_0_224,224,2722.37,188.051,512,25.04,4.58,24.01
647
+ resnetv2_101d,224,2718.59,376.633,1024,44.56,8.07,17.04
648
+ wide_resnet101_2,176,2695.47,379.875,1024,126.89,14.31,13.18
649
+ vit_base_mci_224,224,2690.01,380.639,1024,86.35,17.73,24.65
650
+ beitv2_base_patch16_224,224,2684.73,381.393,1024,86.53,17.58,23.9
651
+ res2net50_26w_6s,224,2681.29,381.848,1024,37.05,6.33,15.28
652
+ rexnet_300,224,2677.08,286.849,768,34.71,3.44,22.4
653
+ swin_tiny_patch4_window7_224,224,2659.27,385.037,1024,28.29,4.51,17.06
654
+ maxxvitv2_nano_rw_256,256,2649.05,289.896,768,23.7,6.26,23.05
655
+ beit_base_patch16_224,224,2644.21,387.233,1024,86.53,17.58,23.9
656
+ resnet101d,224,2638.05,388.135,1024,44.57,8.08,17.04
657
+ vit_base_patch32_clip_448,448,2613.19,391.83,1024,88.34,17.93,23.9
658
+ resnet101c,224,2612.47,391.937,1024,44.57,8.08,17.04
659
+ mixer_b16_224,224,2607.54,392.687,1024,59.88,12.62,14.53
660
+ vit_relpos_base_patch16_clsgap_224,224,2605.09,393.051,1024,86.43,17.6,25.12
661
+ efficientnetv2_s,288,2604.83,393.096,1024,21.46,4.75,20.13
662
+ twins_pcpvt_base,224,2604.17,393.184,1024,43.83,6.68,25.25
663
+ efficientvit_b2,288,2600.38,393.768,1024,24.33,2.64,24.03
664
+ convnextv2_pico,288,2600.23,393.79,1024,9.07,2.27,10.08
665
+ vit_relpos_base_patch16_cls_224,224,2599.77,393.861,1024,86.43,17.6,25.12
666
+ efficientformer_l3,224,2594.92,394.593,1024,31.41,3.93,12.01
667
+ resnet50,288,2592.09,395.018,1024,25.56,6.8,18.37
668
+ maxvit_nano_rw_256,256,2589.91,296.516,768,15.45,4.46,30.28
669
+ maxvit_rmlp_nano_rw_256,256,2586.52,296.905,768,15.5,4.47,31.92
670
+ deit3_base_patch16_224,224,2583.38,396.36,1024,86.59,17.58,23.9
671
+ pvt_v2_b2_li,224,2575.01,397.638,1024,22.55,3.91,27.6
672
+ regnety_032,288,2560.26,399.931,1024,19.44,5.29,18.61
673
+ efficientvit_l2,224,2558.08,400.28,1024,63.71,6.97,19.58
674
+ rexnetr_200,288,2556.5,200.242,512,16.52,2.62,24.96
675
+ cs3darknet_x,288,2555.27,400.72,1024,35.05,10.6,14.36
676
+ darknetaa53,288,2551.71,401.279,1024,36.02,10.08,15.68
677
+ tresnet_v2_l,224,2548.5,401.775,1024,46.17,8.85,16.34
678
+ hgnetv2_b3,288,2548.29,401.817,1024,16.29,2.94,8.38
679
+ cs3se_edgenet_x,256,2541.11,402.95,1024,50.72,11.53,12.94
680
+ resnest50d,224,2525.71,405.398,1024,27.48,5.4,14.36
681
+ gcresnext50ts,288,2518.28,406.608,1024,15.67,4.75,19.57
682
+ gcresnet50t,288,2515.86,406.998,1024,25.9,6.86,18.57
683
+ xcit_nano_12_p8_224,224,2506.42,408.525,1024,3.05,2.16,15.71
684
+ vit_small_patch16_384,384,2502.77,409.123,1024,22.2,15.52,50.78
685
+ resnet101_clip_gap,224,2494.59,410.455,1024,42.52,9.11,17.56
686
+ hiera_base_224,224,2493.69,410.614,1024,51.52,9.4,30.42
687
+ resnetaa101d,224,2493.54,410.631,1024,44.57,9.12,17.56
688
+ resnetv2_101x1_bit,224,2493.42,410.646,1024,44.54,8.04,16.23
689
+ davit_small,224,2490.93,308.299,768,49.75,8.8,30.49
690
+ dpn68b,288,2486.84,411.746,1024,12.61,3.89,17.3
691
+ efficientnetv2_rw_s,288,2485.89,411.904,1024,23.94,4.91,21.41
692
+ hrnet_w18_ssld,224,2477.35,413.328,1024,21.3,4.32,16.31
693
+ mobilenetv4_conv_large,384,2476.57,413.456,1024,32.59,6.43,27.31
694
+ cait_xxs24_224,224,2472.37,414.153,1024,11.96,2.53,20.29
695
+ mobilevitv2_175,256,2468.71,207.376,512,14.25,5.54,28.13
696
+ resnet50t,288,2467.58,414.952,1024,25.57,7.14,19.53
697
+ vit_betwixt_patch16_reg1_gap_256,256,2466.98,415.052,1024,60.4,16.32,27.83
698
+ lamhalobotnet50ts_256,256,2464.83,415.423,1024,22.57,5.02,18.44
699
+ hrnet_w18,224,2463.39,415.645,1024,21.3,4.32,16.31
700
+ vit_base_patch16_siglip_gap_224,224,2459.11,416.38,1024,85.8,17.49,23.75
701
+ flexivit_base,240,2458.77,416.449,1024,86.59,20.29,28.36
702
+ vit_betwixt_patch16_reg4_gap_256,256,2454.92,417.093,1024,60.4,16.52,28.24
703
+ legacy_seresnet101,224,2446.63,418.514,1024,49.33,7.61,15.74
704
+ resnet50d,288,2439.85,419.669,1024,25.58,7.19,19.7
705
+ vit_base_patch16_siglip_224,224,2438.72,419.861,1024,92.88,17.73,24.06
706
+ resnet101_clip,224,2414.07,424.148,1024,56.26,9.81,18.08
707
+ tresnet_l,224,2412.56,424.417,1024,55.99,10.9,11.9
708
+ ese_vovnet39b,288,2409.2,318.758,768,24.57,11.71,11.13
709
+ darknet53,288,2407.72,425.266,1024,41.61,11.78,15.68
710
+ cs3sedarknet_x,288,2391.52,428.157,1024,35.4,10.6,14.37
711
+ mixer_l32_224,224,2388.54,428.693,1024,206.94,11.27,19.86
712
+ resnetblur101d,224,2387.51,428.868,1024,44.57,9.12,17.94
713
+ coat_lite_small,224,2386.34,429.081,1024,19.84,3.96,22.09
714
+ regnetv_040,288,2386.15,429.115,1024,20.64,6.6,20.3
715
+ swin_s3_tiny_224,224,2384.47,429.416,1024,28.33,4.64,19.13
716
+ regnety_080,224,2379.83,430.251,1024,39.18,8.0,17.97
717
+ resnetaa50,288,2379.68,430.28,1024,25.56,8.52,19.24
718
+ nextvit_base,224,2376.93,430.787,1024,44.82,8.29,23.71
719
+ vit_base_patch16_gap_224,224,2375.57,431.026,1024,86.57,17.49,25.59
720
+ sebotnet33ts_256,256,2374.22,161.709,384,13.7,3.89,17.46
721
+ resnet101s,224,2366.29,432.714,1024,44.67,9.19,18.64
722
+ convnext_tiny_hnf,288,2365.38,432.876,1024,28.59,7.39,22.21
723
+ mobileone_s4,224,2361.85,433.537,1024,14.95,3.04,17.74
724
+ regnety_040,288,2353.01,435.158,1024,20.65,6.61,20.3
725
+ vit_small_patch16_36x1_224,224,2352.87,435.189,1024,64.67,13.71,35.69
726
+ regnetv_064,224,2352.23,435.302,1024,30.58,6.39,16.41
727
+ vit_relpos_base_patch16_224,224,2344.88,436.673,1024,86.43,17.51,24.97
728
+ seresnet101,224,2343.32,436.959,1024,49.33,7.84,16.27
729
+ dla102x,224,2342.65,437.086,1024,26.31,5.89,19.42
730
+ vit_small_resnet50d_s16_224,224,2336.25,438.285,1024,57.53,13.48,24.82
731
+ ecaresnet101d,224,2325.28,440.352,1024,44.57,8.08,17.07
732
+ regnety_064,224,2324.93,440.376,1024,30.58,6.39,16.41
733
+ volo_d1_224,224,2323.65,440.668,1024,26.63,6.94,24.43
734
+ resnetv2_50d_gn,288,2323.37,440.707,1024,25.57,7.24,19.7
735
+ hiera_small_abswin_256,256,2323.14,440.761,1024,34.36,8.29,26.38
736
+ densenet201,224,2317.12,441.908,1024,20.01,4.34,7.85
737
+ resnet51q,288,2316.81,441.957,1024,35.7,8.07,20.94
738
+ efficientnet_b3,320,2307.8,221.837,512,12.23,2.01,26.52
739
+ resnet50_gn,288,2303.59,444.495,1024,25.56,6.85,18.37
740
+ halonet50ts,256,2300.62,445.077,1024,22.73,5.3,19.2
741
+ nf_resnet101,224,2293.33,446.489,1024,44.55,8.01,16.23
742
+ resnext101_32x8d,176,2291.04,446.931,1024,88.79,10.33,19.37
743
+ maxvit_tiny_rw_224,224,2283.98,336.235,768,29.06,5.11,33.11
744
+ resmlp_36_224,224,2277.0,449.683,1024,44.69,8.91,16.33
745
+ efficientnet_b3_gn,288,2272.8,225.253,512,11.73,1.74,23.35
746
+ vit_base_patch16_clip_quickgelu_224,224,2269.68,451.136,1024,86.19,17.58,23.9
747
+ legacy_xception,299,2267.92,338.617,768,22.86,8.4,35.83
748
+ tf_efficientnet_b3,300,2265.14,226.006,512,12.23,1.87,23.83
749
+ resnetaa50d,288,2259.31,453.205,1024,25.58,8.92,20.57
750
+ vitamin_small_224,224,2252.3,454.619,1024,22.03,5.92,26.38
751
+ vit_medium_patch16_rope_reg1_gap_256,256,2246.04,455.888,1024,38.74,10.63,22.26
752
+ vgg13_bn,224,2243.6,456.38,1024,133.05,11.33,12.25
753
+ sequencer2d_s,224,2241.43,456.822,1024,27.65,4.96,11.31
754
+ seresnet50,288,2233.67,458.373,1024,28.09,6.8,18.39
755
+ efficientvit_b3,224,2229.48,459.279,1024,48.65,3.99,26.9
756
+ repvgg_b2g4,224,2226.86,459.812,1024,61.76,12.63,12.9
757
+ vit_small_patch16_18x2_224,224,2225.09,460.18,1024,64.67,13.71,35.69
758
+ tf_efficientnetv2_s,300,2217.63,461.725,1024,21.46,5.35,22.73
759
+ deit3_small_patch16_384,384,2216.36,461.997,1024,22.21,15.52,50.78
760
+ resnetblur50,288,2215.39,462.19,1024,25.56,8.52,19.87
761
+ res2net101_26w_4s,224,2210.59,463.166,1024,45.21,8.1,18.45
762
+ regnety_080_tv,224,2199.69,465.492,1024,39.38,8.51,19.73
763
+ xcit_tiny_12_p16_384,384,2198.84,465.672,1024,6.72,3.64,18.26
764
+ hgnetv2_b5,288,2192.99,466.916,1024,39.57,10.84,18.5
765
+ resnext101_32x4d,224,2191.44,467.242,1024,44.18,8.01,21.23
766
+ res2net50_26w_8s,224,2190.96,467.347,1024,48.4,8.37,17.95
767
+ ecaresnet50t,288,2189.92,467.571,1024,25.57,7.14,19.55
768
+ vit_relpos_base_patch16_rpn_224,224,2183.57,468.919,1024,86.41,17.51,24.97
769
+ densenet121,288,2181.11,469.465,1024,7.98,4.74,11.41
770
+ ecaresnet50d,288,2174.22,470.95,1024,25.58,7.19,19.72
771
+ pvt_v2_b3,224,2173.14,471.171,1024,45.24,6.92,37.7
772
+ vgg16,224,2165.35,472.876,1024,138.36,15.47,13.56
773
+ vit_base_patch16_xp_224,224,2165.22,472.901,1024,86.51,17.56,23.9
774
+ vit_base_patch16_rpn_224,224,2161.0,473.826,1024,86.54,17.49,23.75
775
+ repvit_m2_3,224,2157.85,474.494,1024,23.69,4.57,26.21
776
+ cs3edgenet_x,288,2155.75,474.984,1024,47.82,14.59,16.36
777
+ mvitv2_tiny,224,2155.16,475.117,1024,24.17,4.7,21.16
778
+ swinv2_cr_tiny_224,224,2153.94,475.376,1024,28.33,4.66,28.45
779
+ nf_resnet50,288,2147.63,476.776,1024,25.56,6.88,18.37
780
+ ese_vovnet99b,224,2146.96,476.932,1024,63.2,16.51,11.27
781
+ mobilevitv2_200,256,2143.14,358.33,768,18.45,7.22,32.15
782
+ res2net101d,224,2140.6,478.302,1024,45.23,8.35,19.25
783
+ seresnet50t,288,2138.78,478.747,1024,28.1,7.14,19.55
784
+ edgenext_base,320,2134.2,479.781,1024,18.51,6.01,24.32
785
+ resnet61q,288,2130.61,480.584,1024,36.85,9.87,21.52
786
+ swinv2_cr_tiny_ns_224,224,2127.09,481.38,1024,28.33,4.66,28.45
787
+ inception_next_small,224,2115.6,484.002,1024,49.37,8.36,19.27
788
+ vit_mediumd_patch16_reg4_gap_256,256,2113.09,484.575,1024,64.11,17.87,37.57
789
+ ese_vovnet39b_evos,224,2111.78,484.876,1024,24.58,7.07,6.74
790
+ rdnet_small,224,2110.32,485.203,1024,50.44,8.74,22.55
791
+ resnext50_32x4d,288,2108.45,485.637,1024,25.03,7.04,23.81
792
+ eca_nfnet_l0,288,2099.14,487.794,1024,24.14,7.12,17.29
793
+ mobilenetv4_hybrid_medium,448,2097.44,366.139,768,11.07,4.2,29.64
794
+ convnext_base,224,2095.37,488.672,1024,88.59,15.38,28.75
795
+ resnetblur50d,288,2095.33,488.675,1024,25.58,8.92,21.19
796
+ regnety_040_sgn,288,2093.1,489.199,1024,20.65,6.67,20.3
797
+ resnet101d,256,2091.32,489.613,1024,44.57,10.55,22.25
798
+ coatnet_rmlp_1_rw_224,224,2085.77,490.917,1024,41.69,7.85,35.47
799
+ xception41p,299,2084.2,245.637,512,26.91,9.25,39.86
800
+ regnetz_040,256,2068.8,494.89,1024,27.12,4.06,24.19
801
+ nf_regnet_b4,320,2065.2,495.816,1024,30.21,3.29,19.88
802
+ regnetz_040_h,256,2063.38,496.243,1024,28.94,4.12,24.29
803
+ efficientvit_l2,256,2060.07,497.046,1024,63.71,9.09,25.49
804
+ resnest50d_4s2x40d,224,2047.78,500.025,1024,30.42,4.4,17.94
805
+ nfnet_l0,288,2047.5,500.093,1024,35.07,7.13,17.29
806
+ vit_base_patch16_reg4_gap_256,256,2044.87,500.737,1024,86.62,23.5,33.89
807
+ regnetz_d32,256,2033.99,503.413,1024,27.58,5.98,23.74
808
+ dpn92,224,2027.88,504.934,1024,37.67,6.54,18.21
809
+ regnetz_d8,256,2018.69,507.232,1024,23.37,3.97,23.74
810
+ resnext50d_32x4d,288,2012.24,508.858,1024,25.05,7.44,25.13
811
+ focalnet_small_srf,224,1995.65,513.095,1024,49.89,8.62,26.26
812
+ hgnet_small,288,1995.54,384.841,768,24.36,14.09,14.53
813
+ vgg19,224,1994.6,513.358,1024,143.67,19.63,14.86
814
+ crossvit_base_240,240,1994.0,513.516,1024,105.03,21.22,36.33
815
+ regnetz_c16,320,1992.12,513.999,1024,13.46,3.92,25.88
816
+ resnetv2_152,224,1991.8,514.076,1024,60.19,11.55,22.56
817
+ mobilenetv4_conv_aa_large,384,1988.94,514.825,1024,32.59,7.07,32.29
818
+ hiera_base_plus_224,224,1983.12,516.333,1024,69.9,12.67,37.98
819
+ regnetx_120,224,1982.95,516.372,1024,46.11,12.13,21.37
820
+ convnextv2_tiny,224,1982.1,516.59,1024,28.64,4.47,13.44
821
+ seresnetaa50d,288,1977.18,517.879,1024,28.11,8.92,20.59
822
+ repvgg_b2,224,1974.36,518.618,1024,89.02,20.45,12.9
823
+ legacy_seresnext101_32x4d,224,1971.37,519.379,1024,48.96,8.02,21.26
824
+ densenetblur121d,288,1970.87,519.545,1024,8.0,5.14,13.06
825
+ convit_small,224,1970.28,519.698,1024,27.78,5.76,17.87
826
+ coatnet_1_rw_224,224,1960.67,522.246,1024,41.72,8.04,34.6
827
+ nfnet_f0,256,1953.19,524.243,1024,71.49,12.62,18.05
828
+ botnet50ts_256,256,1951.79,262.305,512,22.74,5.54,22.23
829
+ poolformer_s36,224,1948.45,525.506,1024,30.86,5.0,15.82
830
+ convmixer_1024_20_ks9_p14,224,1942.47,527.139,1024,24.38,5.55,5.51
831
+ resnet152,224,1941.85,527.301,1024,60.19,11.56,22.56
832
+ skresnext50_32x4d,224,1938.97,528.085,1024,27.48,4.5,17.18
833
+ resnetv2_152d,224,1938.05,528.334,1024,60.2,11.8,23.36
834
+ fastvit_mci0,256,1935.03,529.168,1024,11.41,2.42,18.29
835
+ inception_v4,299,1934.57,529.276,1024,42.68,12.28,15.09
836
+ seresnext101_32x4d,224,1925.49,531.724,1024,48.96,8.02,21.26
837
+ nextvit_large,224,1924.7,532.009,1024,57.87,10.78,28.99
838
+ halo2botnet50ts_256,256,1915.33,534.611,1024,22.64,5.02,21.78
839
+ coatnet_rmlp_1_rw2_224,224,1914.3,534.892,1024,41.72,8.11,40.13
840
+ vgg16_bn,224,1909.7,536.182,1024,138.37,15.5,13.56
841
+ resnetv2_50d_frn,224,1907.53,536.788,1024,25.59,4.33,11.92
842
+ twins_svt_base,224,1904.0,537.783,1024,56.07,8.59,26.33
843
+ gcvit_tiny,224,1903.79,537.854,1024,28.22,4.79,29.82
844
+ vit_base_patch16_siglip_gap_256,256,1894.78,540.402,1024,85.84,23.13,33.23
845
+ dla169,224,1893.96,540.641,1024,53.39,11.6,20.2
846
+ convnextv2_nano,288,1893.91,405.49,768,15.62,4.06,13.84
847
+ resnet152d,224,1891.46,541.352,1024,60.21,11.8,23.36
848
+ efficientnet_el,300,1884.76,543.277,1024,10.59,8.0,30.7
849
+ resnet152c,224,1882.54,543.916,1024,60.21,11.8,23.36
850
+ twins_pcpvt_large,224,1881.08,544.338,1024,60.99,9.84,35.82
851
+ nf_ecaresnet101,224,1877.87,545.282,1024,44.55,8.01,16.27
852
+ vit_base_patch16_siglip_256,256,1875.34,546.002,1024,92.93,23.44,33.63
853
+ maxvit_tiny_tf_224,224,1873.59,409.888,768,30.92,5.6,35.78
854
+ maxxvit_rmlp_tiny_rw_256,256,1870.47,410.571,768,29.64,6.66,39.76
855
+ efficientnet_el_pruned,300,1867.55,548.285,1024,10.59,8.0,30.7
856
+ efficientnet_b3_gn,320,1866.95,205.663,384,11.73,2.14,28.83
857
+ seresnext50_32x4d,288,1856.61,551.512,1024,27.56,7.04,23.82
858
+ regnetz_b16_evos,224,1854.56,552.123,1024,9.74,1.43,9.95
859
+ nf_seresnet101,224,1848.05,554.073,1024,49.33,8.02,16.27
860
+ repvgg_b3g4,224,1843.59,555.409,1024,83.83,17.89,15.1
861
+ regnety_120,224,1840.57,556.321,1024,51.82,12.14,21.38
862
+ mobilenetv4_conv_large,448,1830.25,419.592,768,32.59,8.75,37.17
863
+ focalnet_small_lrf,224,1830.02,559.535,1024,50.34,8.74,28.61
864
+ caformer_s18,224,1825.4,560.94,1024,26.34,4.13,19.39
865
+ nest_small,224,1821.81,562.055,1024,38.35,10.35,40.04
866
+ efficientnet_b3_g8_gn,288,1817.33,422.578,768,14.25,2.59,23.35
867
+ densenet161,224,1806.94,566.679,1024,28.68,7.79,11.06
868
+ tresnet_xl,224,1805.47,567.135,1024,78.44,15.2,15.34
869
+ convnext_small,288,1801.22,568.48,1024,50.22,14.39,35.65
870
+ nest_small_jx,224,1800.35,568.755,1024,38.35,10.35,40.04
871
+ ecaresnet50t,320,1797.24,569.733,1024,25.57,8.82,24.13
872
+ vit_small_patch8_224,224,1790.17,571.984,1024,21.67,22.44,80.84
873
+ vit_large_r50_s32_224,224,1790.0,572.042,1024,328.99,19.58,24.41
874
+ efficientvit_b3,256,1789.45,429.156,768,48.65,5.2,35.01
875
+ davit_base,224,1784.6,430.324,768,87.95,15.51,40.66
876
+ tf_efficientnet_el,300,1777.69,576.0,1024,10.59,8.0,30.7
877
+ vit_base_patch16_plus_240,240,1770.19,578.441,1024,117.56,27.41,33.08
878
+ maxvit_tiny_rw_256,256,1767.83,434.406,768,29.07,6.74,44.35
879
+ maxvit_rmlp_tiny_rw_256,256,1764.91,435.129,768,29.15,6.77,46.92
880
+ sequencer2d_m,224,1760.97,581.467,1024,38.31,6.55,14.26
881
+ xception41,299,1760.86,290.748,512,26.97,9.28,39.86
882
+ resnet152s,224,1745.67,586.565,1024,60.32,12.92,24.96
883
+ efficientnet_b4,320,1740.06,294.222,512,19.34,3.13,34.76
884
+ coatnet_1_224,224,1739.09,294.386,512,42.23,8.7,39.0
885
+ resnetv2_101,288,1737.4,589.353,1024,44.54,12.94,26.83
886
+ hrnet_w30,224,1723.75,593.993,1024,37.71,8.15,21.21
887
+ mixnet_xxl,224,1723.5,445.584,768,23.96,2.04,23.43
888
+ convformer_s18,224,1723.21,594.213,1024,26.77,3.96,15.82
889
+ legacy_seresnet152,224,1717.65,596.143,1024,66.82,11.33,22.08
890
+ regnetx_160,224,1711.44,598.295,1024,54.28,15.99,25.52
891
+ resnetv2_50d_evos,224,1709.91,598.829,1024,25.59,4.33,11.92
892
+ rexnetr_300,288,1708.11,299.718,512,34.81,5.59,36.61
893
+ mobilenetv4_hybrid_large,384,1707.76,599.592,1024,37.76,7.77,34.52
894
+ eva02_base_patch16_clip_224,224,1707.16,599.804,1024,86.26,17.62,26.32
895
+ wide_resnet50_2,288,1707.11,599.82,1024,68.88,18.89,23.81
896
+ mvitv2_small_cls,224,1705.56,600.369,1024,34.87,7.04,28.17
897
+ hrnet_w32,224,1694.43,604.278,1024,41.23,8.97,22.02
898
+ xcit_tiny_12_p8_224,224,1681.45,608.969,1024,6.71,4.81,23.6
899
+ resnet101,288,1679.51,609.673,1024,44.55,12.95,26.83
900
+ tnt_s_patch16_224,224,1675.64,611.078,1024,23.76,5.24,24.37
901
+ wide_resnet101_2,224,1674.87,611.366,1024,126.89,22.8,21.23
902
+ vgg19_bn,224,1663.12,615.681,1024,143.68,19.66,14.86
903
+ cait_xxs36_224,224,1660.72,616.58,1024,17.3,3.77,30.34
904
+ swin_small_patch4_window7_224,224,1658.9,617.245,1024,49.61,8.77,27.47
905
+ seresnet152,224,1654.98,618.709,1024,66.82,11.57,22.61
906
+ vit_betwixt_patch16_rope_reg4_gap_256,256,1646.55,621.878,1024,60.23,16.52,28.24
907
+ convnext_tiny,384,1644.05,311.408,512,28.59,13.14,39.48
908
+ efficientformerv2_s0,224,1630.94,627.828,1024,3.6,0.41,5.3
909
+ cs3se_edgenet_x,320,1622.94,630.928,1024,50.72,18.01,20.21
910
+ mvitv2_small,224,1621.36,631.548,1024,34.87,7.0,28.08
911
+ vit_relpos_base_patch16_plus_240,240,1621.32,631.559,1024,117.38,27.3,34.33
912
+ efficientvit_l2,288,1608.39,636.638,1024,63.71,11.51,32.19
913
+ convnext_base,256,1605.42,637.811,1024,88.59,20.09,37.55
914
+ dm_nfnet_f0,256,1604.65,638.122,1024,71.49,12.62,18.05
915
+ dla102x2,224,1600.16,639.91,1024,41.28,9.34,29.91
916
+ maxvit_tiny_pm_256,256,1598.65,480.383,768,30.09,6.61,47.9
917
+ efficientnet_lite4,380,1596.86,240.45,384,13.01,4.04,45.66
918
+ xcit_small_24_p16_224,224,1593.77,642.475,1024,47.67,9.1,23.64
919
+ samvit_base_patch16_224,224,1591.15,643.539,1024,86.46,17.54,24.54
920
+ vit_small_r26_s32_384,384,1589.79,644.083,1024,36.47,10.43,29.85
921
+ regnety_160,224,1584.47,646.244,1024,83.59,15.96,23.04
922
+ hiera_base_abswin_256,256,1579.74,648.184,1024,51.27,12.46,40.7
923
+ hgnetv2_b6,224,1577.08,649.276,1024,75.26,16.88,21.23
924
+ vit_base_r50_s16_224,224,1574.79,650.212,1024,97.89,21.66,35.28
925
+ poolformerv2_s24,224,1572.19,651.293,1024,21.34,3.42,10.68
926
+ coat_tiny,224,1561.91,655.584,1024,5.5,4.35,27.2
927
+ pvt_v2_b4,224,1560.91,655.996,1024,62.56,10.14,53.74
928
+ pvt_v2_b5,224,1557.23,657.547,1024,81.96,11.76,50.92
929
+ eca_nfnet_l1,256,1553.7,659.05,1024,41.41,9.62,22.04
930
+ repvgg_b3,224,1551.91,659.803,1024,123.09,29.16,15.1
931
+ xception65p,299,1551.33,329.999,512,39.82,13.91,52.48
932
+ swinv2_tiny_window8_256,256,1547.17,661.823,1024,28.35,5.96,24.57
933
+ resnetaa101d,288,1535.29,666.945,1024,44.57,15.07,29.03
934
+ fastvit_sa24,256,1525.62,671.177,1024,21.55,3.8,24.32
935
+ efficientnetv2_s,384,1507.35,679.311,1024,21.46,8.44,35.77
936
+ efficientformerv2_s1,224,1505.34,680.214,1024,6.19,0.67,7.66
937
+ resnet152d,256,1501.71,681.861,1024,60.21,15.41,30.51
938
+ inception_next_base,224,1485.4,689.354,1024,86.67,14.85,25.69
939
+ efficientnet_b3_g8_gn,320,1480.62,518.679,768,14.25,3.2,28.83
940
+ regnety_080,288,1476.82,693.353,1024,39.18,13.22,29.69
941
+ dpn98,224,1476.36,693.573,1024,61.57,11.73,25.2
942
+ mobilenetv4_conv_aa_large,448,1470.08,522.395,768,32.59,9.63,43.94
943
+ hrnet_w18_ssld,288,1462.78,700.012,1024,21.3,7.14,26.96
944
+ resnetblur101d,288,1462.58,700.1,1024,44.57,15.07,29.65
945
+ rdnet_base,224,1460.56,525.796,768,87.45,15.4,31.14
946
+ hgnet_base,224,1454.32,528.06,768,71.58,25.14,15.47
947
+ efficientnetv2_rw_s,384,1440.54,710.82,1024,23.94,8.72,38.03
948
+ nf_regnet_b4,384,1439.96,711.11,1024,30.21,4.7,28.61
949
+ seresnet101,288,1437.85,712.145,1024,49.33,12.95,26.87
950
+ regnetv_064,288,1435.74,713.191,1024,30.58,10.55,27.11
951
+ focalnet_base_srf,224,1425.69,718.219,1024,88.15,15.28,35.01
952
+ eva02_small_patch14_336,336,1424.13,719.007,1024,22.13,15.48,54.33
953
+ ecaresnet101d,288,1423.42,719.366,1024,44.57,13.35,28.19
954
+ regnety_064,288,1420.12,720.99,1024,30.58,10.56,27.11
955
+ tf_efficientnetv2_s,384,1419.64,721.279,1024,21.46,8.44,35.77
956
+ tf_efficientnet_lite4,380,1419.26,270.533,384,13.01,4.04,45.66
957
+ inception_resnet_v2,299,1412.75,724.752,1024,55.84,13.18,25.06
958
+ resnext101_64x4d,224,1412.06,725.153,1024,83.46,15.52,31.21
959
+ crossvit_15_dagger_408,408,1397.53,732.691,1024,28.5,21.45,95.05
960
+ resnext101_32x8d,224,1396.99,732.978,1024,88.79,16.48,31.21
961
+ resnet200,224,1396.79,733.081,1024,64.67,15.07,32.19
962
+ efficientvit_b3,288,1387.6,553.447,768,48.65,6.58,44.2
963
+ resnetrs101,288,1381.07,741.426,1024,63.62,13.56,28.53
964
+ poolformer_m36,224,1377.13,743.544,1024,56.17,8.8,22.02
965
+ maxvit_rmlp_small_rw_224,224,1365.13,562.563,768,64.9,10.75,49.3
966
+ vit_mediumd_patch16_rope_reg1_gap_256,256,1360.17,752.819,1024,63.95,17.65,37.02
967
+ resnext101_32x4d,288,1355.28,755.535,1024,44.18,13.24,35.09
968
+ vit_so150m_patch16_reg4_gap_256,256,1352.83,756.904,1024,134.13,36.75,53.21
969
+ vit_medium_patch16_gap_384,384,1340.12,764.084,1024,39.03,26.08,67.54
970
+ vit_so150m_patch16_reg4_map_256,256,1340.1,764.094,1024,141.48,37.18,53.68
971
+ swinv2_cr_small_224,224,1339.46,764.46,1024,49.7,9.07,50.27
972
+ resnet101d,320,1328.63,770.69,1024,44.57,16.48,34.77
973
+ regnetz_040,320,1328.31,385.424,512,27.12,6.35,37.78
974
+ swinv2_cr_small_ns_224,224,1326.17,772.119,1024,49.7,9.08,50.27
975
+ regnetz_040_h,320,1323.75,386.752,512,28.94,6.43,37.94
976
+ focalnet_base_lrf,224,1318.02,776.898,1024,88.75,15.43,38.13
977
+ eva02_base_patch14_224,224,1315.36,778.472,1024,85.76,23.22,36.55
978
+ xception65,299,1311.64,390.314,512,39.92,13.96,52.48
979
+ vit_base_patch16_rope_reg1_gap_256,256,1310.51,781.347,1024,86.43,23.22,33.39
980
+ nest_base,224,1310.17,781.549,1024,67.72,17.96,53.39
981
+ convnextv2_small,224,1310.01,781.645,1024,50.32,8.71,21.56
982
+ nfnet_f1,224,1306.81,783.562,1024,132.63,17.87,22.94
983
+ efficientnetv2_m,320,1304.81,784.765,1024,54.14,11.01,39.97
984
+ volo_d2_224,224,1303.25,785.708,1024,58.68,14.34,41.34
985
+ coatnet_2_rw_224,224,1301.67,393.319,512,73.87,15.09,49.22
986
+ regnetz_d32,320,1300.21,787.532,1024,27.58,9.33,37.08
987
+ seresnext101_64x4d,224,1298.53,788.558,1024,88.23,15.53,31.25
988
+ nest_base_jx,224,1295.76,790.243,1024,67.72,17.96,53.39
989
+ gmlp_b16_224,224,1289.85,793.862,1024,73.08,15.78,30.21
990
+ mobilevitv2_150,384,1286.35,198.995,256,10.59,9.2,54.25
991
+ hrnet_w40,224,1285.98,796.256,1024,57.56,12.75,25.29
992
+ regnetz_d8,320,1285.82,796.348,1024,23.37,6.19,37.08
993
+ seresnext101_32x8d,224,1284.38,797.243,1024,93.57,16.48,31.25
994
+ seresnet152d,256,1276.56,802.126,1024,66.84,15.42,30.56
995
+ resnetrs152,256,1273.91,803.74,1024,86.62,15.59,30.83
996
+ mobilenetv4_conv_aa_large,480,1272.27,603.62,768,32.59,11.05,50.45
997
+ convnextv2_tiny,288,1271.26,604.097,768,28.64,7.39,22.21
998
+ cait_s24_224,224,1270.01,806.265,1024,46.92,9.35,40.58
999
+ convnext_base,288,1266.87,808.261,1024,88.59,25.43,47.53
1000
+ seresnext101d_32x8d,224,1264.56,809.737,1024,93.59,16.72,32.05
1001
+ resnest101e,256,1259.59,812.93,1024,48.28,13.38,28.66
1002
+ efficientformer_l7,224,1257.98,813.975,1024,82.23,10.17,24.45
1003
+ twins_svt_large,224,1250.2,819.039,1024,99.27,15.15,35.1
1004
+ maxvit_small_tf_224,224,1245.42,411.087,512,68.93,11.66,53.17
1005
+ maxxvit_rmlp_small_rw_256,256,1239.49,619.588,768,66.01,14.67,58.38
1006
+ mobilenetv4_hybrid_large,448,1233.31,622.691,768,37.76,10.74,48.61
1007
+ resnet50x4_clip_gap,288,1228.45,833.533,1024,65.62,19.57,34.11
1008
+ coatnet_rmlp_2_rw_224,224,1227.94,416.935,512,73.88,15.18,54.78
1009
+ coat_mini,224,1224.8,836.033,1024,10.34,6.82,33.68
1010
+ coatnet_2_224,224,1217.65,420.455,512,74.68,16.5,52.67
1011
+ coat_lite_medium,224,1217.51,841.038,1024,44.57,9.81,40.06
1012
+ efficientnet_b4,384,1211.17,317.024,384,19.34,4.51,50.04
1013
+ swin_base_patch4_window7_224,224,1207.02,848.341,1024,87.77,15.47,36.63
1014
+ convnext_large,224,1206.16,848.95,1024,197.77,34.4,43.13
1015
+ tresnet_m,448,1204.65,850.01,1024,31.39,22.99,29.21
1016
+ mvitv2_base_cls,224,1201.41,852.308,1024,65.44,10.23,40.65
1017
+ vit_large_patch32_384,384,1196.88,855.533,1024,306.63,45.31,43.86
1018
+ resnet152,288,1192.43,858.72,1024,60.19,19.11,37.28
1019
+ seresnext101_32x4d,288,1192.03,859.008,1024,48.96,13.25,35.12
1020
+ tiny_vit_21m_384,384,1188.05,646.409,768,21.23,13.77,77.83
1021
+ seresnextaa101d_32x8d,224,1183.29,865.35,1024,93.59,17.25,34.16
1022
+ resnet50x4_clip,288,1179.84,867.878,1024,87.14,21.35,35.27
1023
+ xcit_tiny_24_p16_384,384,1171.17,874.311,1024,12.12,6.87,34.29
1024
+ levit_conv_384_s8,224,1166.16,439.026,512,39.12,9.98,35.86
1025
+ dm_nfnet_f1,224,1150.17,890.285,1024,132.63,17.87,22.94
1026
+ regnetz_e8,256,1147.85,892.072,1024,57.7,9.91,40.94
1027
+ swin_s3_small_224,224,1145.1,670.656,768,49.74,9.43,37.84
1028
+ mvitv2_base,224,1138.44,899.454,1024,51.47,10.16,40.5
1029
+ sequencer2d_l,224,1135.5,901.779,1024,54.3,9.74,22.12
1030
+ efficientnetv2_rw_m,320,1134.41,902.651,1024,53.24,12.72,47.14
1031
+ gcvit_small,224,1132.43,904.224,1024,51.09,8.57,41.61
1032
+ regnety_120,288,1127.97,680.842,768,51.82,20.06,35.34
1033
+ hrnet_w44,224,1125.91,909.412,1024,67.06,14.94,26.92
1034
+ levit_384_s8,224,1123.29,455.784,512,39.12,9.98,35.86
1035
+ regnetz_b16_evos,288,1122.27,684.3,768,9.74,2.36,16.43
1036
+ hrnet_w48_ssld,224,1116.96,916.749,1024,77.47,17.34,28.56
1037
+ hrnet_w48,224,1113.97,919.215,1024,77.47,17.34,28.56
1038
+ regnetz_c16_evos,256,1112.53,690.289,768,13.49,2.48,16.57
1039
+ tf_efficientnet_b4,380,1110.38,345.798,384,19.34,4.49,49.49
1040
+ xcit_medium_24_p16_224,224,1105.25,926.46,1024,84.4,16.13,31.71
1041
+ tnt_b_patch16_224,224,1094.56,935.51,1024,65.41,14.09,39.01
1042
+ mobilevitv2_175,384,1091.31,234.563,256,14.25,12.47,63.29
1043
+ dpn131,224,1083.25,945.269,1024,79.25,16.09,32.97
1044
+ nextvit_small,384,1081.98,946.382,1024,31.76,17.26,57.14
1045
+ resnet200d,256,1077.87,949.987,1024,64.69,20.0,43.09
1046
+ vit_betwixt_patch16_reg4_gap_384,384,1077.83,950.026,1024,60.6,39.71,85.28
1047
+ efficientvit_l3,224,1066.9,719.814,768,246.04,27.62,39.16
1048
+ convnextv2_nano,384,1066.76,359.947,384,15.62,7.22,24.61
1049
+ maxvit_rmlp_small_rw_256,256,1061.31,723.613,768,64.9,14.15,66.09
1050
+ poolformerv2_s36,224,1054.78,970.792,1024,30.79,5.01,15.82
1051
+ fastvit_sa36,256,1050.89,974.39,1024,31.53,5.64,34.61
1052
+ davit_large,224,1043.66,735.848,768,196.81,34.6,60.99
1053
+ convit_base,224,1041.2,983.452,1024,86.54,17.52,31.77
1054
+ legacy_senet154,224,1041.02,983.633,1024,115.09,20.77,38.69
1055
+ resnetv2_50d_evos,288,1038.94,985.591,1024,25.59,7.15,19.7
1056
+ poolformer_m48,224,1037.67,986.794,1024,73.47,11.59,29.17
1057
+ vitamin_base_224,224,1035.95,494.209,512,87.72,22.68,52.77
1058
+ crossvit_18_dagger_408,408,1032.79,991.461,1024,44.61,32.47,124.87
1059
+ fastvit_mci1,256,1032.41,991.824,1024,21.54,4.72,32.84
1060
+ maxxvitv2_rmlp_base_rw_224,224,1032.37,743.892,768,116.09,24.2,62.77
1061
+ swinv2_base_window12_192,192,1028.07,996.011,1024,109.28,11.9,39.72
1062
+ convnext_base,320,1024.52,749.592,768,88.59,31.39,58.68
1063
+ xcit_small_12_p16_384,384,1021.49,1002.43,1024,26.25,14.14,36.51
1064
+ resnetv2_50x1_bit,448,1019.03,502.406,512,25.55,16.62,44.46
1065
+ senet154,224,1016.88,1006.903,1024,115.09,20.77,38.69
1066
+ densenet264d,224,1016.07,1007.782,1024,72.74,13.57,14.0
1067
+ convnext_small,384,1015.93,755.929,768,50.22,25.58,63.37
1068
+ seresnet152,288,1013.71,1010.121,1024,66.82,19.11,37.34
1069
+ regnety_320,224,1007.78,1016.066,1024,145.05,32.34,30.26
1070
+ dpn107,224,1005.01,1018.86,1024,86.92,18.38,33.46
1071
+ xception71,299,1004.0,509.938,512,42.34,18.09,69.92
1072
+ hgnetv2_b6,288,981.26,782.643,768,75.26,27.9,35.09
1073
+ swinv2_cr_base_224,224,978.09,1046.904,1024,87.88,15.86,59.66
1074
+ eca_nfnet_l1,320,976.83,1048.265,1024,41.41,14.92,34.42
1075
+ regnety_160,288,976.51,524.284,512,83.59,26.37,38.07
1076
+ caformer_s36,224,971.41,1054.103,1024,39.3,8.0,37.53
1077
+ swinv2_cr_base_ns_224,224,970.9,1054.655,1024,87.88,15.86,59.66
1078
+ mobilenetv4_conv_aa_large,544,968.9,528.415,512,32.59,14.19,64.79
1079
+ swinv2_small_window8_256,256,964.83,1061.297,1024,49.73,11.58,40.14
1080
+ resnetv2_50x3_bit,224,964.09,796.571,768,217.32,37.06,33.34
1081
+ regnetx_320,224,962.49,1063.881,1024,107.81,31.81,36.3
1082
+ swinv2_cr_small_ns_256,256,961.42,1065.06,1024,49.7,12.07,76.21
1083
+ nf_regnet_b5,384,955.45,803.79,768,49.74,7.95,42.9
1084
+ swin_s3_base_224,224,954.89,1072.349,1024,71.13,13.69,48.26
1085
+ resnet152d,320,952.68,1074.832,1024,60.21,24.08,47.67
1086
+ swinv2_tiny_window16_256,256,949.54,404.378,384,28.35,6.68,39.02
1087
+ mobilevitv2_200,384,949.03,269.729,256,18.45,16.24,72.34
1088
+ efficientvit_l2,384,946.77,540.762,512,63.71,20.45,57.01
1089
+ coat_small,224,946.54,1081.808,1024,21.69,12.61,44.25
1090
+ convnextv2_base,224,943.15,814.261,768,88.72,15.38,28.75
1091
+ volo_d3_224,224,938.88,1090.634,1024,86.33,20.78,60.09
1092
+ ecaresnet200d,256,930.32,1100.668,1024,64.69,20.0,43.15
1093
+ vit_mediumd_patch16_reg4_gap_384,384,927.5,1104.011,1024,64.27,43.67,113.51
1094
+ deit_base_patch16_384,384,924.35,1107.782,1024,86.86,55.54,101.56
1095
+ deit_base_distilled_patch16_384,384,923.14,1109.233,1024,87.63,55.65,101.82
1096
+ convnext_large_mlp,256,921.21,833.66,768,200.13,44.94,56.33
1097
+ vit_base_patch16_384,384,915.77,1118.154,1024,86.86,55.54,101.56
1098
+ convformer_s36,224,915.39,1118.618,1024,40.01,7.67,30.5
1099
+ vit_base_patch16_clip_384,384,912.89,1121.683,1024,86.86,55.54,101.56
1100
+ vit_large_patch16_224,224,912.7,1121.913,1024,304.33,61.6,63.52
1101
+ eva_large_patch14_196,196,906.88,1129.113,1024,304.14,61.57,63.52
1102
+ seresnet200d,256,906.73,1129.3,1024,71.86,20.01,43.15
1103
+ resnetrs200,256,903.32,1133.498,1024,93.21,20.18,43.42
1104
+ hgnet_base,288,882.61,580.058,512,71.58,41.55,25.57
1105
+ xcit_tiny_24_p8_224,224,882.26,1160.631,1024,12.11,9.21,45.39
1106
+ rdnet_large,224,880.89,581.2,512,186.27,34.74,46.67
1107
+ resnext101_64x4d,288,874.91,1170.378,1024,83.46,25.66,51.59
1108
+ fastvit_ma36,256,873.15,1172.691,1024,44.07,7.88,41.09
1109
+ tf_efficientnetv2_m,384,871.16,1175.422,1024,54.14,15.85,57.52
1110
+ beit_large_patch16_224,224,866.7,1181.465,1024,304.43,61.6,63.52
1111
+ hrnet_w64,224,866.3,1181.977,1024,128.06,28.97,35.09
1112
+ efficientvit_l3,256,859.51,893.502,768,246.04,36.06,50.98
1113
+ mixer_l16_224,224,858.48,1192.773,1024,208.2,44.6,41.69
1114
+ vit_small_patch14_dinov2,518,855.54,1196.86,1024,22.06,46.76,198.79
1115
+ beitv2_large_patch16_224,224,855.13,1197.449,1024,304.43,61.6,63.52
1116
+ resnet200,288,849.17,1205.852,1024,64.67,24.91,53.21
1117
+ xcit_nano_12_p8_384,384,849.15,1205.887,1024,3.05,6.34,46.08
1118
+ nextvit_base,384,845.41,1211.22,1024,44.82,24.64,73.95
1119
+ deit3_base_patch16_384,384,844.28,1212.833,1024,86.88,55.54,101.56
1120
+ deit3_large_patch16_224,224,842.04,1216.066,1024,304.37,61.6,63.52
1121
+ gcvit_base,224,837.14,1223.178,1024,90.32,14.87,55.48
1122
+ vit_base_patch16_18x2_224,224,836.78,1223.713,1024,256.73,52.51,71.38
1123
+ beit_base_patch16_384,384,834.75,1226.684,1024,86.74,55.54,101.56
1124
+ hiera_large_224,224,833.73,1228.187,1024,213.74,40.34,83.37
1125
+ maxvit_rmlp_base_rw_224,224,831.5,923.612,768,116.14,23.15,92.64
1126
+ vit_small_patch14_reg4_dinov2,518,820.76,1247.592,1024,22.06,46.95,199.77
1127
+ seresnet152d,320,810.78,1262.948,1024,66.84,24.09,47.72
1128
+ resnetrs152,320,806.91,1269.013,1024,86.62,24.34,48.14
1129
+ vit_base_patch16_siglip_gap_384,384,803.81,1273.896,1024,86.09,55.43,101.3
1130
+ resnext101_32x16d,224,802.86,1275.414,1024,194.03,36.27,51.18
1131
+ volo_d1_384,384,801.94,1276.869,1024,26.78,22.75,108.55
1132
+ levit_conv_512_s8,224,796.51,321.385,256,74.05,21.82,52.28
1133
+ vit_base_patch16_siglip_384,384,796.23,1286.031,1024,93.18,56.12,102.2
1134
+ efficientformerv2_s2,224,793.27,1290.823,1024,12.71,1.27,11.77
1135
+ flexivit_large,240,792.48,1292.113,1024,304.36,70.99,75.39
1136
+ seresnext101_32x8d,288,790.85,1294.776,1024,93.57,27.24,51.63
1137
+ convnext_xlarge,224,789.11,973.216,768,350.2,60.98,57.5
1138
+ seresnext101d_32x8d,288,779.13,1314.209,1024,93.59,27.64,52.95
1139
+ fastvit_mci2,256,770.69,1328.66,1024,35.82,7.91,43.34
1140
+ xcit_small_12_p8_224,224,768.93,1331.684,1024,26.21,18.69,47.21
1141
+ efficientnetv2_m,416,757.66,1351.509,1024,54.14,18.6,67.5
1142
+ levit_512_s8,224,756.91,338.197,256,74.05,21.82,52.28
1143
+ nfnet_f2,256,754.41,1357.331,1024,193.78,33.76,41.85
1144
+ poolformerv2_m36,224,753.91,1358.223,1024,56.08,8.81,22.02
1145
+ coatnet_rmlp_3_rw_224,224,747.12,342.626,256,165.15,33.56,79.47
1146
+ swin_large_patch4_window7_224,224,734.47,1045.628,768,196.53,34.53,54.94
1147
+ coatnet_3_rw_224,224,734.18,348.657,256,181.81,33.44,73.83
1148
+ coatnet_3_224,224,734.12,348.691,256,166.97,36.56,79.01
1149
+ efficientnet_b5,416,732.18,349.614,256,30.39,8.27,80.68
1150
+ maxvit_base_tf_224,224,727.37,703.885,512,119.47,24.04,95.01
1151
+ seresnextaa101d_32x8d,288,726.54,1409.397,1024,93.59,28.51,56.44
1152
+ regnetz_e8,320,726.44,1057.182,768,57.7,15.46,63.94
1153
+ convnext_large,288,724.73,706.442,512,197.77,56.87,71.29
1154
+ convnextv2_tiny,384,722.62,531.372,384,28.64,13.14,39.48
1155
+ ecaresnet200d,288,722.05,1418.16,1024,64.69,25.31,54.59
1156
+ regnetz_d8_evos,256,720.34,1421.53,1024,23.46,4.5,24.92
1157
+ resnetv2_152x2_bit,224,716.98,1428.18,1024,236.34,46.95,45.11
1158
+ seresnet269d,256,714.47,1433.198,1024,113.67,26.59,53.6
1159
+ convnext_base,384,714.31,716.749,512,88.59,45.21,84.49
1160
+ regnetz_c16_evos,320,711.04,720.042,512,13.49,3.86,25.88
1161
+ seresnet200d,288,706.46,1449.448,1024,71.86,25.32,54.6
1162
+ caformer_m36,224,705.55,1451.316,1024,56.2,13.29,50.48
1163
+ swinv2_base_window8_256,256,704.56,1090.005,768,87.92,20.37,52.59
1164
+ davit_huge,224,697.25,734.284,512,348.92,61.23,81.32
1165
+ xcit_large_24_p16_224,224,695.71,1471.847,1024,189.1,35.86,47.27
1166
+ nextvit_large,384,694.8,1473.781,1024,57.87,32.03,90.76
1167
+ nfnet_f1,320,694.64,1474.11,1024,132.63,35.97,46.77
1168
+ resnetrs270,256,693.43,1476.685,1024,129.86,27.06,55.84
1169
+ maxxvitv2_rmlp_large_rw_224,224,685.54,1120.26,768,215.42,44.14,87.15
1170
+ resnet200d,320,684.31,1496.359,1024,64.69,31.25,67.33
1171
+ eca_nfnet_l2,320,677.83,1510.682,1024,56.72,20.95,47.43
1172
+ hrnet_w48_ssld,288,674.81,1517.452,1024,77.47,28.66,47.21
1173
+ convformer_m36,224,673.4,1520.613,1024,57.05,12.89,42.05
1174
+ vit_large_patch14_224,224,671.42,1525.099,1024,304.2,81.08,88.79
1175
+ efficientnetv2_rw_m,416,660.69,1162.389,768,53.24,21.49,79.62
1176
+ vit_base_patch8_224,224,659.1,1165.195,768,86.58,78.22,161.69
1177
+ vit_large_patch14_clip_224,224,658.44,1555.16,1024,304.2,81.08,88.79
1178
+ resnetv2_101x1_bit,448,655.55,780.987,512,44.54,31.65,64.93
1179
+ swinv2_large_window12_192,192,647.21,791.055,512,228.77,26.17,56.53
1180
+ nf_regnet_b5,456,647.09,791.219,512,49.74,11.7,61.95
1181
+ efficientnet_b5,448,640.21,399.84,256,30.39,9.59,93.56
1182
+ tiny_vit_21m_512,512,639.87,600.092,384,21.27,27.02,177.93
1183
+ dm_nfnet_f2,256,635.02,1612.538,1024,193.78,33.76,41.85
1184
+ tresnet_l,448,634.37,1614.169,1024,55.99,43.59,47.56
1185
+ halonet_h1,256,633.74,403.931,256,8.1,3.0,51.17
1186
+ caformer_s18,384,629.68,813.073,512,26.34,13.42,77.34
1187
+ vit_large_patch16_siglip_gap_256,256,628.22,1629.977,1024,303.36,80.8,88.34
1188
+ maxvit_tiny_tf_384,384,626.52,408.586,256,30.98,17.53,123.42
1189
+ vit_large_patch16_siglip_256,256,625.66,1636.644,1024,315.96,81.34,88.88
1190
+ vit_large_r50_s32_384,384,615.76,1662.953,1024,329.09,57.43,76.52
1191
+ regnety_640,224,613.16,1252.507,768,281.38,64.16,42.5
1192
+ swinv2_cr_large_224,224,606.35,1266.568,768,196.68,35.1,78.42
1193
+ swinv2_small_window16_256,256,601.52,638.354,384,49.73,12.82,66.29
1194
+ seresnextaa101d_32x8d,320,597.68,1284.933,768,93.59,35.19,69.67
1195
+ convnextv2_large,224,595.67,859.51,512,197.96,34.4,43.13
1196
+ convmixer_768_32,224,587.98,1741.521,1024,21.11,19.55,25.95
1197
+ convnext_large_mlp,320,587.82,870.986,512,200.13,70.21,88.02
1198
+ convformer_s18,384,584.32,876.206,512,26.77,11.63,46.49
1199
+ volo_d4_224,224,581.51,1760.899,1024,192.96,44.34,80.22
1200
+ convnextv2_base,288,576.25,888.468,512,88.72,25.43,47.53
1201
+ resnetrs200,320,573.92,1784.186,1024,93.21,31.51,67.81
1202
+ dm_nfnet_f1,320,570.82,1793.897,1024,132.63,35.97,46.77
1203
+ resnetv2_101x3_bit,224,568.43,1351.065,768,387.93,71.23,48.7
1204
+ poolformerv2_m48,224,567.13,1805.556,1024,73.35,11.59,29.17
1205
+ vit_large_patch14_clip_quickgelu_224,224,566.47,1807.657,1024,303.97,81.08,88.79
1206
+ xcit_tiny_12_p8_384,384,566.0,1809.156,1024,6.71,14.13,69.14
1207
+ regnety_160,384,565.42,679.108,384,83.59,46.87,67.67
1208
+ seresnet269d,288,556.07,1841.481,1024,113.67,33.65,67.81
1209
+ vit_large_patch14_xp_224,224,555.64,1842.897,1024,304.06,81.01,88.79
1210
+ tf_efficientnet_b5,456,553.22,462.718,256,30.39,10.46,98.86
1211
+ tf_efficientnetv2_m,480,552.28,1390.555,768,54.14,24.76,89.84
1212
+ xcit_small_24_p16_384,384,550.05,1861.633,1024,47.67,26.72,68.58
1213
+ efficientvit_l3,320,548.76,932.978,512,246.04,56.32,79.34
1214
+ vit_base_r50_s16_384,384,528.19,1938.65,1024,98.95,67.43,135.03
1215
+ swinv2_cr_tiny_384,384,527.59,485.193,256,28.33,15.34,161.01
1216
+ inception_next_base,384,523.15,978.654,512,86.67,43.64,75.48
1217
+ caformer_b36,224,522.73,1469.167,768,98.75,23.22,67.3
1218
+ efficientformerv2_l,224,520.87,1965.917,1024,26.32,2.59,18.54
1219
+ maxvit_large_tf_224,224,511.9,750.122,384,211.79,43.68,127.35
1220
+ efficientnetv2_l,384,505.24,2026.716,1024,118.52,36.1,101.16
1221
+ convformer_b36,224,493.89,1554.99,768,99.88,22.69,56.06
1222
+ nasnetalarge,331,490.88,782.249,384,88.75,23.89,90.56
1223
+ vitamin_large2_224,224,490.43,1043.963,512,333.58,75.05,112.83
1224
+ vitamin_large_224,224,490.33,1044.155,512,333.32,75.05,112.83
1225
+ tf_efficientnetv2_l,384,486.38,2105.328,1024,118.52,36.1,101.16
1226
+ eca_nfnet_l2,384,481.18,1596.052,768,56.72,30.05,68.28
1227
+ convnext_xlarge,288,474.06,809.995,384,350.2,100.8,95.05
1228
+ tresnet_xl,448,468.78,1638.268,768,78.44,60.77,61.31
1229
+ ecaresnet269d,320,467.07,2192.338,1024,102.09,41.53,83.69
1230
+ vit_so400m_patch14_siglip_gap_224,224,464.21,2205.889,1024,412.44,109.57,106.13
1231
+ vit_so400m_patch14_siglip_224,224,463.58,2208.874,1024,427.68,110.26,106.73
1232
+ regnetz_d8_evos,320,459.77,1670.385,768,23.46,7.03,38.92
1233
+ pnasnet5large,331,451.71,850.077,384,86.06,25.04,92.89
1234
+ coatnet_4_224,224,448.81,570.366,256,275.43,62.48,129.26
1235
+ volo_d2_384,384,446.3,1720.798,768,58.87,46.17,184.51
1236
+ swinv2_base_window16_256,256,443.63,865.555,384,87.92,22.02,84.71
1237
+ swinv2_base_window12to16_192to256,256,443.19,866.406,384,87.92,22.02,84.71
1238
+ eca_nfnet_l3,352,438.82,2333.492,1024,72.04,32.57,73.12
1239
+ vit_base_patch16_siglip_gap_512,512,436.48,1172.98,512,86.43,107.0,246.15
1240
+ resnest200e,320,433.87,2360.1,1024,70.2,35.69,82.78
1241
+ repvgg_d2se,320,433.46,2362.343,1024,133.33,74.57,46.82
1242
+ vit_base_patch16_siglip_512,512,432.68,1183.28,512,93.52,108.22,247.74
1243
+ resnetrs350,288,431.8,2371.351,1024,163.96,43.67,87.09
1244
+ eva02_large_patch14_224,224,427.61,2394.657,1024,303.27,81.15,97.2
1245
+ eva02_large_patch14_clip_224,224,422.17,2425.529,1024,304.11,81.18,97.2
1246
+ maxvit_small_tf_384,384,417.56,459.79,192,69.02,35.87,183.65
1247
+ xcit_small_24_p8_224,224,413.85,2474.292,1024,47.63,35.81,90.78
1248
+ coat_lite_medium_384,384,412.65,1240.736,512,44.57,28.73,116.7
1249
+ cait_xxs24_384,384,409.04,2503.383,1024,12.03,9.63,122.66
1250
+ convnext_large,384,408.43,626.768,256,197.77,101.1,126.74
1251
+ convnext_large_mlp,384,408.43,626.758,256,200.13,101.11,126.74
1252
+ resnet50x16_clip_gap,384,408.06,1254.695,512,136.2,70.32,100.64
1253
+ coatnet_rmlp_2_rw_384,384,407.99,470.57,192,73.88,47.69,209.43
1254
+ resnext101_32x32d,224,402.22,1272.909,512,468.53,87.29,91.12
1255
+ nfnet_f2,352,397.35,1932.786,768,193.78,63.22,79.06
1256
+ mvitv2_large_cls,224,396.76,1935.67,768,234.58,42.17,111.69
1257
+ resnet50x16_clip,384,396.57,1291.043,512,167.33,74.9,103.54
1258
+ ecaresnet269d,352,387.68,2641.296,1024,102.09,50.25,101.25
1259
+ volo_d5_224,224,384.55,2662.854,1024,295.46,72.4,118.11
1260
+ xcit_medium_24_p16_384,384,381.47,2684.348,1024,84.4,47.39,91.64
1261
+ mvitv2_large,224,376.52,1359.807,512,217.99,43.87,112.02
1262
+ vitamin_large2_256,256,375.97,1021.316,384,333.64,99.0,154.99
1263
+ vitamin_large_256,256,375.75,1021.913,384,333.38,99.0,154.99
1264
+ hiera_huge_224,224,370.3,1382.641,512,672.78,124.85,150.95
1265
+ nfnet_f3,320,368.66,2777.571,1024,254.92,68.77,83.93
1266
+ efficientvit_l3,384,368.28,1042.649,384,246.04,81.08,114.02
1267
+ resnetrs270,352,365.8,2799.336,1024,129.86,51.13,105.48
1268
+ efficientnetv2_xl,384,365.74,2799.773,1024,208.12,52.81,139.2
1269
+ convnextv2_large,288,360.11,710.871,256,197.96,56.87,71.29
1270
+ regnety_320,384,355.95,1078.775,384,145.05,95.0,88.87
1271
+ tf_efficientnetv2_xl,384,352.9,2901.677,1024,208.12,52.81,139.2
1272
+ maxvit_tiny_tf_512,512,350.4,365.274,128,31.05,33.49,257.59
1273
+ efficientnet_b6,528,348.25,367.527,128,43.04,19.4,167.39
1274
+ vit_huge_patch14_224,224,346.11,2958.6,1024,630.76,167.4,139.41
1275
+ resmlp_big_24_224,224,346.09,2958.721,1024,129.14,100.23,87.31
1276
+ vit_huge_patch14_clip_224,224,345.85,2960.828,1024,632.05,167.4,139.41
1277
+ maxxvitv2_rmlp_base_rw_384,384,338.23,1135.306,384,116.09,72.98,213.74
1278
+ dm_nfnet_f2,352,333.3,2304.205,768,193.78,63.22,79.06
1279
+ caformer_s36,384,330.56,1548.873,512,39.3,26.08,150.33
1280
+ vit_base_patch14_dinov2,518,330.07,1551.146,512,86.58,151.71,397.58
1281
+ deit3_huge_patch14_224,224,328.48,3117.396,1024,632.13,167.4,139.41
1282
+ vit_base_patch14_reg4_dinov2,518,326.8,1566.682,512,86.58,152.25,399.53
1283
+ swinv2_cr_small_384,384,326.37,784.356,256,49.7,29.7,298.03
1284
+ convnextv2_base,384,326.0,785.254,256,88.72,45.21,84.49
1285
+ efficientnetv2_l,480,324.02,1580.122,512,118.52,56.4,157.99
1286
+ tf_efficientnet_b6,528,322.38,397.021,128,43.04,19.4,167.39
1287
+ vit_huge_patch14_gap_224,224,320.65,3193.485,1024,630.76,166.73,138.74
1288
+ eva02_base_patch14_448,448,313.01,1635.692,512,87.12,107.11,259.14
1289
+ convformer_s36,384,312.77,1636.965,512,40.01,22.54,89.62
1290
+ tf_efficientnetv2_l,480,311.92,1641.431,512,118.52,56.4,157.99
1291
+ regnety_1280,224,311.67,1642.714,512,644.81,127.66,71.58
1292
+ dm_nfnet_f3,320,308.4,3320.307,1024,254.92,68.77,83.93
1293
+ focalnet_huge_fl3,224,308.26,1660.931,512,745.28,118.26,104.8
1294
+ maxvit_xlarge_tf_224,224,304.7,840.149,256,506.99,97.52,191.04
1295
+ convmixer_1536_20,224,304.65,3361.196,1024,51.63,48.68,33.03
1296
+ vit_huge_patch14_clip_quickgelu_224,224,301.93,3391.487,1024,632.08,167.4,139.41
1297
+ xcit_tiny_24_p8_384,384,301.9,3391.864,1024,12.11,27.05,132.95
1298
+ seresnextaa201d_32x8d,320,301.18,3399.969,1024,149.39,70.22,138.71
1299
+ rdnet_large,384,300.8,638.262,192,186.27,102.09,137.13
1300
+ vitamin_xlarge_256,256,299.66,854.266,256,436.06,130.13,177.37
1301
+ resnetrs420,320,299.37,3420.532,1024,191.89,64.2,126.56
1302
+ swin_base_patch4_window12_384,384,298.5,857.581,256,87.9,47.19,134.78
1303
+ vit_large_patch16_384,384,297.48,2581.665,768,304.72,191.21,270.24
1304
+ vit_huge_patch14_xp_224,224,293.38,3490.267,1024,631.8,167.3,139.41
1305
+ eva_large_patch14_336,336,293.21,2619.236,768,304.53,191.1,270.24
1306
+ vit_large_patch14_clip_336,336,291.64,2633.322,768,304.53,191.11,270.24
1307
+ swinv2_cr_huge_224,224,289.13,1328.114,384,657.83,115.97,121.08
1308
+ cait_xs24_384,384,285.75,2687.593,768,26.67,19.28,183.98
1309
+ xcit_medium_24_p8_224,224,285.74,3583.627,1024,84.32,63.53,121.23
1310
+ sam2_hiera_tiny,896,284.13,225.218,64,26.85,99.86,384.63
1311
+ swinv2_large_window12to16_192to256,256,282.09,680.616,192,196.74,47.81,121.53
1312
+ convnext_xxlarge,256,281.32,909.972,256,846.47,198.09,124.45
1313
+ maxvit_rmlp_base_rw_384,384,277.19,1385.303,384,116.14,70.97,318.95
1314
+ davit_giant,224,275.78,1392.401,384,1406.47,192.92,153.06
1315
+ beit_large_patch16_384,384,274.3,3733.061,1024,305.0,191.21,270.24
1316
+ convnextv2_huge,224,273.48,936.071,256,660.29,115.0,79.07
1317
+ cait_xxs36_384,384,273.32,3746.438,1024,17.37,14.35,183.7
1318
+ deit3_large_patch16_384,384,271.88,3766.274,1024,304.76,191.21,270.24
1319
+ vit_giant_patch16_gap_224,224,271.62,3769.888,1024,1011.37,202.46,139.26
1320
+ eca_nfnet_l3,448,271.28,1887.306,512,72.04,52.55,118.4
1321
+ convnext_xlarge,384,266.61,960.183,256,350.2,179.2,168.99
1322
+ vit_large_patch16_siglip_gap_384,384,265.41,2893.649,768,303.69,190.85,269.55
1323
+ xcit_small_12_p8_384,384,264.43,1452.163,384,26.21,54.92,138.29
1324
+ vit_large_patch16_siglip_384,384,264.41,2904.586,768,316.28,192.07,270.75
1325
+ resnetv2_152x2_bit,384,257.56,1490.858,384,236.34,136.16,132.56
1326
+ coatnet_5_224,224,255.18,752.374,192,687.47,145.49,194.24
1327
+ vit_large_patch14_clip_quickgelu_336,336,250.08,3071.031,768,304.29,191.11,270.24
1328
+ resnetv2_152x4_bit,224,249.77,2049.841,512,936.53,186.9,90.22
1329
+ resnetv2_50x3_bit,448,246.98,777.372,192,217.32,145.7,133.37
1330
+ maxvit_base_tf_384,384,242.99,790.147,192,119.65,73.8,332.9
1331
+ sam2_hiera_small,896,242.55,263.836,64,33.95,123.99,442.63
1332
+ swinv2_cr_base_384,384,240.19,1065.812,256,87.88,50.57,333.68
1333
+ caformer_m36,384,240.01,1066.57,256,56.2,42.11,196.35
1334
+ xcit_large_24_p16_384,384,237.61,3232.218,768,189.1,105.35,137.17
1335
+ resnetrs350,384,236.9,4322.505,1024,163.96,77.59,154.74
1336
+ maxvit_small_tf_512,512,235.03,408.44,96,69.13,67.26,383.77
1337
+ volo_d3_448,448,231.1,2215.491,512,86.63,96.33,446.83
1338
+ eva_giant_patch14_224,224,229.92,4453.698,1024,1012.56,267.18,192.64
1339
+ eva_giant_patch14_clip_224,224,229.31,4465.497,1024,1012.59,267.18,192.64
1340
+ convformer_m36,384,228.98,1117.982,256,57.05,37.87,123.56
1341
+ vit_giant_patch14_224,224,225.31,4544.745,1024,1012.61,267.18,192.64
1342
+ vit_giant_patch14_clip_224,224,224.09,4569.511,1024,1012.65,267.18,192.64
1343
+ regnety_640,384,219.6,1165.748,256,281.38,188.47,124.83
1344
+ cait_s24_384,384,215.82,2372.283,512,47.06,32.17,245.31
1345
+ vitamin_large_336,336,214.38,895.584,192,333.57,175.72,307.47
1346
+ vitamin_large2_336,336,214.36,895.665,192,333.83,175.72,307.47
1347
+ seresnextaa201d_32x8d,384,212.89,2405.003,512,149.39,101.11,199.72
1348
+ nfnet_f3,416,210.03,2437.746,512,254.92,115.58,141.78
1349
+ efficientnetv2_xl,512,209.86,2439.71,512,208.12,93.85,247.32
1350
+ focalnet_huge_fl4,224,209.11,2448.416,512,686.46,118.9,113.34
1351
+ resnest269e,416,205.52,2491.203,512,110.93,77.69,171.98
1352
+ nfnet_f4,384,204.25,3760.055,768,316.07,122.14,147.57
1353
+ efficientnet_b7,600,202.28,474.573,96,66.35,38.33,289.94
1354
+ tf_efficientnetv2_xl,512,202.25,2531.445,512,208.12,93.85,247.32
1355
+ convnextv2_large,384,201.98,950.553,192,197.96,101.1,126.74
1356
+ tf_efficientnet_b7,600,189.69,506.052,96,66.35,38.33,289.94
1357
+ resnetv2_152x2_bit,448,187.23,1367.275,256,236.34,184.99,180.43
1358
+ eva02_large_patch14_clip_336,336,186.43,4119.531,768,304.43,191.34,289.13
1359
+ swin_large_patch4_window12_384,384,185.91,688.483,128,196.74,104.08,202.16
1360
+ dm_nfnet_f3,416,181.39,2822.569,512,254.92,115.58,141.78
1361
+ caformer_b36,384,177.61,1441.35,256,98.75,72.33,261.79
1362
+ resnetrs420,416,176.81,4343.655,768,191.89,108.45,213.79
1363
+ dm_nfnet_f4,384,176.15,2906.576,512,316.07,122.14,147.57
1364
+ xcit_large_24_p8_224,224,176.14,2906.744,512,188.93,141.23,181.56
1365
+ maxvit_large_tf_384,384,171.73,745.342,128,212.03,132.55,445.84
1366
+ vitamin_xlarge_336,336,171.61,1118.79,192,436.06,230.18,347.33
1367
+ mvitv2_huge_cls,224,171.54,2238.569,384,694.8,120.67,243.63
1368
+ convformer_b36,384,169.76,1508.019,256,99.88,66.67,164.75
1369
+ convnextv2_huge,288,165.43,773.711,128,660.29,190.1,130.7
1370
+ vit_so400m_patch14_siglip_gap_384,384,154.69,3309.867,512,412.99,333.46,451.19
1371
+ vitamin_large_384,384,154.64,1241.528,192,333.71,234.44,440.16
1372
+ vitamin_large2_384,384,154.6,1241.879,192,333.97,234.44,440.16
1373
+ vit_so400m_patch14_siglip_384,384,153.85,3327.91,512,428.23,335.4,452.89
1374
+ focalnet_large_fl3,384,153.05,1672.622,256,239.13,105.06,168.04
1375
+ swinv2_cr_large_384,384,151.56,844.525,128,196.68,108.96,404.96
1376
+ resnet50x64_clip_gap,448,151.54,1689.315,256,365.03,253.96,233.22
1377
+ davit_base_fl,768,150.78,848.881,128,90.37,190.32,530.15
1378
+ vit_huge_patch14_clip_336,336,150.49,3402.128,512,632.46,390.97,407.54
1379
+ resnetv2_101x3_bit,448,148.24,1295.156,192,387.93,280.33,194.78
1380
+ resnet50x64_clip,448,147.7,1733.217,256,420.38,265.02,239.13
1381
+ focalnet_large_fl4,384,145.84,1755.371,256,239.32,105.2,181.78
1382
+ nfnet_f5,416,144.21,3550.232,512,377.21,170.71,204.56
1383
+ cait_s36_384,384,143.55,3566.739,512,68.37,47.99,367.4
1384
+ beit_large_patch16_512,512,142.33,3597.213,512,305.67,362.24,656.39
1385
+ volo_d4_448,448,141.31,2717.358,384,193.41,197.13,527.35
1386
+ xcit_small_24_p8_384,384,138.77,2767.038,384,47.63,105.24,265.91
1387
+ maxvit_base_tf_512,512,136.61,702.73,96,119.88,138.02,703.99
1388
+ vit_gigantic_patch14_clip_224,224,131.25,3900.97,512,1844.91,483.96,275.37
1389
+ vit_gigantic_patch14_224,224,131.1,3905.342,512,1844.44,483.95,275.37
1390
+ efficientnet_b8,672,130.9,733.374,96,87.41,63.48,442.89
1391
+ sam2_hiera_base_plus,896,129.71,493.389,64,68.68,227.48,828.88
1392
+ vitamin_xlarge_384,384,129.66,987.169,128,436.06,306.38,493.46
1393
+ dm_nfnet_f5,416,123.97,4129.905,512,377.21,170.71,204.56
1394
+ tf_efficientnet_b8,672,123.56,776.89,96,87.41,63.48,442.89
1395
+ swinv2_base_window12to24_192to384,384,116.6,548.838,64,87.92,55.25,280.36
1396
+ nfnet_f4,512,116.51,3295.94,384,316.07,216.26,262.26
1397
+ vit_huge_patch14_clip_378,378,115.85,4419.399,512,632.68,503.79,572.79
1398
+ regnety_1280,384,113.91,1123.629,128,644.81,374.99,210.2
1399
+ nfnet_f6,448,108.83,4704.573,512,438.36,229.7,273.62
1400
+ vit_large_patch14_reg4_dinov2,518,108.71,3532.214,384,304.37,508.9,1064.02
1401
+ focalnet_xlarge_fl3,384,108.53,1769.039,192,408.79,185.61,223.99
1402
+ vit_large_patch14_dinov2,518,108.27,3546.558,384,304.37,507.15,1058.82
1403
+ vit_so400m_patch14_siglip_gap_448,448,107.18,3582.703,384,413.33,487.18,764.26
1404
+ focalnet_xlarge_fl4,384,103.7,1851.388,192,409.03,185.79,242.31
1405
+ vit_huge_patch14_clip_quickgelu_378,378,103.32,3716.757,384,632.68,503.79,572.79
1406
+ maxvit_xlarge_tf_384,384,102.58,935.85,96,475.32,292.78,668.76
1407
+ eva02_large_patch14_448,448,102.5,4995.169,512,305.08,362.33,689.95
1408
+ eva_giant_patch14_336,336,100.67,5085.918,512,1013.01,620.64,550.67
1409
+ dm_nfnet_f4,512,98.99,3879.344,384,316.07,216.26,262.26
1410
+ vit_huge_patch16_gap_448,448,98.47,3899.461,384,631.67,544.7,636.83
1411
+ xcit_medium_24_p8_384,384,95.83,2671.483,256,84.32,186.67,354.73
1412
+ maxvit_large_tf_512,512,95.8,668.018,64,212.33,244.75,942.15
1413
+ volo_d5_448,448,94.17,2718.505,256,295.91,315.06,737.92
1414
+ dm_nfnet_f6,448,93.55,4104.519,384,438.36,229.7,273.62
1415
+ convnextv2_huge,384,93.18,1030.237,96,660.29,337.96,232.35
1416
+ swinv2_cr_giant_224,224,86.22,1484.541,128,2598.76,483.85,309.15
1417
+ nfnet_f5,544,84.48,3030.209,256,377.21,290.97,349.71
1418
+ nfnet_f7,480,82.66,4645.539,384,499.5,300.08,355.86
1419
+ tf_efficientnet_l2,475,82.5,1163.656,96,480.31,172.11,609.89
1420
+ swinv2_large_window12to24_192to384,384,74.51,644.202,48,196.74,116.15,407.83
1421
+ dm_nfnet_f5,544,72.96,3508.675,256,377.21,290.97,349.71
1422
+ volo_d5_512,512,72.05,3553.134,256,296.09,425.09,1105.37
1423
+ swinv2_cr_huge_384,384,71.81,891.159,64,657.94,352.04,583.18
1424
+ nfnet_f6,576,66.27,3862.681,256,438.36,378.69,452.2
1425
+ regnety_2560,384,62.8,1528.693,96,1282.6,747.83,296.49
1426
+ cait_m36_384,384,62.36,4105.492,256,271.22,173.11,734.81
1427
+ davit_huge_fl,768,58.91,1086.419,64,360.64,744.84,1060.3
1428
+ xcit_large_24_p8_384,384,58.6,3276.243,192,188.93,415.0,531.82
1429
+ maxvit_xlarge_tf_512,512,57.35,836.875,48,475.77,534.14,1413.22
1430
+ dm_nfnet_f6,576,56.67,4517.07,256,438.36,378.69,452.2
1431
+ resnetv2_152x4_bit,480,56.51,2265.038,128,936.53,844.84,414.26
1432
+ convnextv2_huge,512,52.45,915.15,48,660.29,600.81,413.07
1433
+ nfnet_f7,608,52.01,4921.813,256,499.5,480.39,570.85
1434
+ sam2_hiera_large,1024,42.98,1116.814,48,212.15,907.48,2190.34
1435
+ eva_giant_patch14_560,560,33.62,3807.521,128,1014.45,1906.76,2577.17
1436
+ vit_giant_patch14_dinov2,518,33.11,3866.111,128,1136.48,1784.2,2757.89
1437
+ vit_giant_patch14_reg4_dinov2,518,32.88,3893.06,128,1136.48,1790.08,2771.21
1438
+ samvit_base_patch16,1024,31.14,385.374,12,89.67,486.43,1343.27
1439
+ efficientnet_l2,800,30.89,1035.976,32,480.31,479.12,1707.39
1440
+ tf_efficientnet_l2,800,30.04,1065.17,32,480.31,479.12,1707.39
1441
+ cait_m48_448,448,27.15,4715.382,128,356.46,329.41,1708.23
1442
+ swinv2_cr_giant_384,384,23.32,1372.464,32,2598.76,1450.71,1394.86
1443
+ vit_so400m_patch14_siglip_gap_896,896,19.9,4824.742,96,416.87,2731.49,8492.88
1444
+ samvit_large_patch16,1024,14.97,534.402,8,308.28,1493.86,2553.78
1445
+ samvit_huge_patch16,1024,9.99,600.87,6,637.03,2982.23,3428.16
pytorch-image-models/results/benchmark-infer-amp-nhwc-pt113-cu117-rtx3090.csv ADDED
@@ -0,0 +1,930 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ model,infer_samples_per_sec,infer_step_time,infer_batch_size,infer_img_size,infer_gmacs,infer_macts,param_count
2
+ tinynet_e,72737.62,14.068,1024,106,0.03,0.69,2.04
3
+ mobilenetv3_small_050,54822.3,18.668,1024,224,0.03,0.92,1.59
4
+ lcnet_035,53629.35,19.084,1024,224,0.03,1.04,1.64
5
+ lcnet_050,45492.41,22.499,1024,224,0.05,1.26,1.88
6
+ mobilenetv3_small_075,39215.51,26.102,1024,224,0.05,1.3,2.04
7
+ tinynet_d,37346.61,27.409,1024,152,0.05,1.42,2.34
8
+ mobilenetv3_small_100,36280.34,28.214,1024,224,0.06,1.42,2.54
9
+ tf_mobilenetv3_small_minimal_100,31726.33,32.265,1024,224,0.06,1.41,2.04
10
+ tf_mobilenetv3_small_075,31503.43,32.494,1024,224,0.05,1.3,2.04
11
+ lcnet_075,29817.69,34.332,1024,224,0.1,1.99,2.36
12
+ tf_mobilenetv3_small_100,29444.91,34.767,1024,224,0.06,1.42,2.54
13
+ mnasnet_small,25354.86,40.376,1024,224,0.07,2.16,2.03
14
+ lcnet_100,24134.76,42.417,1024,224,0.16,2.52,2.95
15
+ regnetx_002,23983.4,42.686,1024,224,0.2,2.16,2.68
16
+ levit_128s,22675.73,45.148,1024,224,0.31,1.88,7.78
17
+ regnety_002,21709.37,47.158,1024,224,0.2,2.17,3.16
18
+ mobilenetv2_035,21673.44,47.236,1024,224,0.07,2.86,1.68
19
+ mnasnet_050,20010.27,51.163,1024,224,0.11,3.07,2.22
20
+ ghostnet_050,18932.82,54.075,1024,224,0.05,1.77,2.59
21
+ tinynet_c,18428.42,55.556,1024,184,0.11,2.87,2.46
22
+ semnasnet_050,17215.18,59.471,1024,224,0.11,3.44,2.08
23
+ mobilenetv2_050,17194.94,59.542,1024,224,0.1,3.64,1.97
24
+ cs3darknet_focus_s,16189.76,63.24,1024,256,0.69,2.7,3.27
25
+ lcnet_150,15557.15,65.811,1024,224,0.34,3.79,4.5
26
+ cs3darknet_s,15369.47,66.615,1024,256,0.72,2.97,3.28
27
+ levit_128,15337.67,66.754,1024,224,0.41,2.71,9.21
28
+ gernet_s,15288.68,66.966,1024,224,0.75,2.65,8.17
29
+ mobilenetv3_large_075,14216.3,72.019,1024,224,0.16,4.0,3.99
30
+ mixer_s32_224,14182.92,72.188,1024,224,1.0,2.28,19.1
31
+ vit_tiny_r_s16_p8_224,14125.39,72.482,1024,224,0.44,2.06,6.34
32
+ resnet10t,14112.07,72.551,1024,224,1.1,2.43,5.44
33
+ vit_small_patch32_224,13799.47,74.195,1024,224,1.15,2.5,22.88
34
+ regnetx_004,13610.2,75.225,1024,224,0.4,3.14,5.16
35
+ levit_192,13524.14,75.706,1024,224,0.66,3.2,10.95
36
+ mobilenetv3_rw,12956.58,79.021,1024,224,0.23,4.41,5.48
37
+ hardcorenas_a,12803.61,79.966,1024,224,0.23,4.38,5.26
38
+ mobilenetv3_large_100,12749.93,80.304,1024,224,0.23,4.41,5.48
39
+ mnasnet_075,12532.36,81.697,1024,224,0.23,4.77,3.17
40
+ tf_mobilenetv3_large_075,12186.51,84.017,1024,224,0.16,4.0,3.99
41
+ tinynet_b,12083.18,84.735,1024,188,0.21,4.44,3.73
42
+ regnety_004,11918.36,85.906,1024,224,0.41,3.89,4.34
43
+ tf_mobilenetv3_large_minimal_100,11715.94,87.392,1024,224,0.22,4.4,3.92
44
+ hardcorenas_c,11548.05,88.662,1024,224,0.28,5.01,5.52
45
+ hardcorenas_b,11510.71,88.949,1024,224,0.26,5.09,5.18
46
+ ese_vovnet19b_slim_dw,11501.95,89.018,1024,224,0.4,5.28,1.9
47
+ ghostnet_100,11332.61,90.348,1024,224,0.15,3.55,5.18
48
+ mnasnet_100,11138.43,91.923,1024,224,0.33,5.46,4.38
49
+ gluon_resnet18_v1b,11098.78,92.252,1024,224,1.82,2.48,11.69
50
+ resnet18,11083.1,92.383,1024,224,1.82,2.48,11.69
51
+ swsl_resnet18,11062.48,92.555,1024,224,1.82,2.48,11.69
52
+ ssl_resnet18,11061.11,92.565,1024,224,1.82,2.48,11.69
53
+ tf_mobilenetv3_large_100,11018.56,92.922,1024,224,0.23,4.41,5.48
54
+ mnasnet_b1,10993.58,93.135,1024,224,0.33,5.46,4.38
55
+ hardcorenas_d,10910.47,93.843,1024,224,0.3,4.93,7.5
56
+ semnasnet_075,10898.09,93.951,1024,224,0.23,5.54,2.91
57
+ mobilenetv2_075,10893.76,93.988,1024,224,0.22,5.86,2.64
58
+ seresnet18,10385.56,98.588,1024,224,1.82,2.49,11.78
59
+ legacy_seresnet18,10064.41,101.734,1024,224,1.82,2.49,11.78
60
+ spnasnet_100,10009.21,102.296,1024,224,0.35,6.03,4.42
61
+ tf_efficientnetv2_b0,9930.95,103.1,1024,224,0.73,4.77,7.14
62
+ levit_256,9858.1,103.863,1024,224,1.13,4.23,18.89
63
+ tinynet_a,9720.11,105.337,1024,192,0.35,5.41,6.19
64
+ hardcorenas_f,9714.91,105.393,1024,224,0.35,5.57,8.2
65
+ semnasnet_100,9623.78,106.393,1024,224,0.32,6.23,3.89
66
+ mnasnet_a1,9623.77,106.393,1024,224,0.32,6.23,3.89
67
+ mobilenetv2_100,9598.91,106.667,1024,224,0.31,6.68,3.5
68
+ hardcorenas_e,9571.87,106.966,1024,224,0.35,5.65,8.07
69
+ dla46_c,9568.4,107.007,1024,224,0.58,4.5,1.3
70
+ efficientnet_lite0,9361.14,109.377,1024,224,0.4,6.74,4.65
71
+ fbnetc_100,9352.03,109.484,1024,224,0.4,6.51,5.57
72
+ resnet18d,9334.83,109.687,1024,224,2.06,3.29,11.71
73
+ ese_vovnet19b_slim,9109.47,112.4,1024,224,1.69,3.52,3.17
74
+ regnety_006,9097.63,112.542,1024,224,0.61,4.33,6.06
75
+ regnetz_005,8607.49,118.955,1024,224,0.52,5.86,7.12
76
+ xcit_nano_12_p16_224_dist,8577.2,119.375,1024,224,0.56,4.17,3.05
77
+ xcit_nano_12_p16_224,8554.61,119.689,1024,224,0.56,4.17,3.05
78
+ levit_256d,8382.88,122.143,1024,224,1.4,4.93,26.21
79
+ regnetx_006,8379.52,122.192,1024,224,0.61,3.98,6.2
80
+ ghostnet_130,8278.59,123.681,1024,224,0.24,4.6,7.36
81
+ tf_efficientnet_lite0,8080.51,126.714,1024,224,0.4,6.74,4.65
82
+ efficientnet_b0,7965.17,128.548,1024,224,0.4,6.75,5.29
83
+ mnasnet_140,7779.42,131.618,1024,224,0.6,7.71,7.12
84
+ deit_tiny_distilled_patch16_224,7467.68,137.113,1024,224,1.27,6.01,5.91
85
+ rexnetr_100,7464.12,137.179,1024,224,0.43,7.72,4.88
86
+ deit_tiny_patch16_224,7430.15,137.806,1024,224,1.26,5.97,5.72
87
+ resnet14t,7429.68,137.815,1024,224,1.69,5.8,10.08
88
+ vit_tiny_patch16_224,7424.93,137.902,1024,224,1.26,5.97,5.72
89
+ regnetx_008,7394.88,138.463,1024,224,0.81,5.15,7.26
90
+ mobilenetv2_110d,7247.12,141.287,1024,224,0.45,8.71,4.52
91
+ hrnet_w18_small,7232.93,141.561,1024,224,1.61,5.72,13.19
92
+ tf_efficientnet_b0,7016.18,145.938,1024,224,0.4,6.75,5.29
93
+ regnety_008,6938.46,147.571,1024,224,0.81,5.25,6.26
94
+ mobilevitv2_050,6848.87,149.503,1024,256,0.48,8.04,1.37
95
+ pit_ti_distilled_224,6811.68,150.317,1024,224,0.71,6.23,5.1
96
+ pit_ti_224,6784.24,150.927,1024,224,0.7,6.19,4.85
97
+ gernet_m,6679.85,153.286,1024,224,3.02,5.24,21.14
98
+ efficientnet_b1_pruned,6642.37,154.15,1024,240,0.4,6.21,6.33
99
+ resnet34,6496.42,157.614,1024,224,3.67,3.74,21.8
100
+ gluon_resnet34_v1b,6494.61,157.658,1024,224,3.67,3.74,21.8
101
+ tv_resnet34,6481.01,157.989,1024,224,3.67,3.74,21.8
102
+ tf_efficientnetv2_b1,6476.52,158.098,1024,240,1.21,7.34,8.14
103
+ semnasnet_140,6454.5,158.637,1024,224,0.6,8.87,6.11
104
+ nf_regnet_b0,6452.24,158.693,1024,256,0.64,5.58,8.76
105
+ ese_vovnet19b_dw,6335.13,161.627,1024,224,1.34,8.25,6.54
106
+ mobilenetv2_140,6271.56,163.266,1024,224,0.6,9.57,6.11
107
+ rexnet_100,6226.48,164.447,1024,224,0.41,7.44,4.8
108
+ efficientnet_lite1,6187.91,165.472,1024,240,0.62,10.14,5.42
109
+ efficientnet_es_pruned,6115.4,167.434,1024,224,1.81,8.73,5.44
110
+ efficientnet_es,6115.12,167.443,1024,224,1.81,8.73,5.44
111
+ visformer_tiny,6103.09,167.772,1024,224,1.27,5.72,10.32
112
+ seresnet34,6058.13,169.019,1024,224,3.67,3.74,21.96
113
+ fbnetv3_b,6018.76,170.124,1024,256,0.55,9.1,8.6
114
+ selecsls42,5953.76,171.98,1024,224,2.94,4.62,30.35
115
+ selecsls42b,5921.2,172.924,1024,224,2.98,4.62,32.46
116
+ resnet26,5895.21,173.69,1024,224,2.36,7.35,16.0
117
+ edgenext_xx_small,5893.72,173.732,1024,288,0.33,4.21,1.33
118
+ levit_384,5880.4,174.126,1024,224,2.36,6.26,39.13
119
+ resnet34d,5865.98,174.555,1024,224,3.91,4.54,21.82
120
+ legacy_seresnet34,5850.24,175.025,1024,224,3.67,3.74,21.96
121
+ dla34,5827.3,175.712,1024,224,3.07,5.02,15.74
122
+ tf_efficientnet_es,5781.29,177.112,1024,224,1.81,8.73,5.44
123
+ cs3darknet_focus_m,5721.39,178.967,1024,288,2.51,6.19,9.3
124
+ resnetblur18,5636.65,181.657,1024,224,2.34,3.39,11.69
125
+ rexnetr_130,5590.0,183.173,1024,224,0.68,9.81,7.61
126
+ mobilevit_xxs,5524.87,185.333,1024,256,0.42,8.34,1.27
127
+ tf_efficientnet_lite1,5524.68,185.339,1024,240,0.62,10.14,5.42
128
+ cs3darknet_m,5478.07,186.916,1024,288,2.63,6.69,9.31
129
+ convnext_atto,5460.54,187.516,1024,288,0.91,6.3,3.7
130
+ xcit_tiny_12_p16_224_dist,5457.72,187.611,1024,224,1.24,6.29,6.72
131
+ xcit_tiny_12_p16_224,5456.63,187.649,1024,224,1.24,6.29,6.72
132
+ skresnet18,5413.1,189.159,1024,224,1.82,3.24,11.96
133
+ darknet17,5401.37,189.571,1024,256,3.26,7.18,14.3
134
+ mixnet_s,5392.58,189.878,1024,224,0.25,6.25,4.13
135
+ resmlp_12_224,5366.15,190.814,1024,224,3.01,5.5,15.35
136
+ resmlp_12_distilled_224,5364.91,190.857,1024,224,3.01,5.5,15.35
137
+ convnext_atto_ols,5288.94,193.6,1024,288,0.96,6.8,3.7
138
+ vit_base_patch32_clip_224,5280.68,193.903,1024,224,4.41,5.01,88.22
139
+ vit_base_patch32_224,5280.52,193.908,1024,224,4.41,5.01,88.22
140
+ pit_xs_distilled_224,5272.13,194.218,1024,224,1.41,7.76,11.0
141
+ pit_xs_224,5271.0,194.259,1024,224,1.4,7.71,10.62
142
+ repvgg_b0,5252.66,194.939,1024,224,3.41,6.15,15.82
143
+ mixer_b32_224,5221.71,196.094,1024,224,3.24,6.29,60.29
144
+ pvt_v2_b0,5210.31,196.521,1024,224,0.57,7.99,3.67
145
+ resnetaa34d,5171.78,197.986,1024,224,4.43,5.07,21.82
146
+ selecsls60,5160.83,198.407,1024,224,3.59,5.52,30.67
147
+ selecsls60b,5119.51,200.008,1024,224,3.63,5.52,32.77
148
+ mobilenetv2_120d,5111.95,200.304,1024,224,0.69,11.97,5.83
149
+ resnet26d,5108.26,200.449,1024,224,2.6,8.15,16.01
150
+ gmixer_12_224,5064.97,202.162,1024,224,2.67,7.26,12.7
151
+ gmlp_ti16_224,5007.93,204.464,1024,224,1.34,7.55,5.87
152
+ mixer_s16_224,4998.69,204.842,1024,224,3.79,5.97,18.53
153
+ tf_mixnet_s,4989.18,205.231,1024,224,0.25,6.25,4.13
154
+ efficientnet_b0_g16_evos,4930.67,207.667,1024,224,1.01,7.42,8.11
155
+ rexnetr_150,4900.22,208.959,1024,224,0.89,11.13,9.78
156
+ fbnetv3_d,4881.14,209.776,1024,256,0.68,11.1,10.31
157
+ darknet21,4850.41,211.105,1024,256,3.93,7.47,20.86
158
+ nf_resnet26,4816.48,212.591,1024,224,2.41,7.35,16.0
159
+ efficientnet_lite2,4781.65,214.14,1024,260,0.89,12.9,6.09
160
+ convnext_femto,4749.12,215.607,1024,288,1.3,7.56,5.22
161
+ tf_efficientnetv2_b2,4718.26,217.018,1024,260,1.72,9.84,10.1
162
+ sedarknet21,4656.51,219.895,1024,256,3.93,7.47,20.95
163
+ dla46x_c,4636.77,220.831,1024,224,0.54,5.66,1.07
164
+ convnext_femto_ols,4618.33,221.714,1024,288,1.35,8.06,5.23
165
+ resnext26ts,4603.25,222.441,1024,256,2.43,10.52,10.3
166
+ efficientformer_l1,4566.14,224.248,1024,224,1.3,5.53,12.29
167
+ dpn48b,4506.78,227.201,1024,224,1.69,8.92,9.13
168
+ crossvit_tiny_240,4481.69,228.473,1024,240,1.57,9.08,7.01
169
+ dla60x_c,4459.27,229.622,1024,224,0.59,6.01,1.32
170
+ eca_resnext26ts,4456.63,229.759,1024,256,2.43,10.52,10.3
171
+ seresnext26ts,4453.99,229.896,1024,256,2.43,10.52,10.39
172
+ legacy_seresnext26_32x4d,4441.15,230.558,1024,224,2.49,9.39,16.79
173
+ gernet_l,4396.56,232.898,1024,256,4.57,8.0,31.08
174
+ mobilevitv2_075,4393.87,233.041,1024,256,1.05,12.06,2.87
175
+ gcresnext26ts,4384.92,233.516,1024,256,2.43,10.53,10.48
176
+ tf_efficientnet_b1,4370.6,234.282,1024,240,0.71,10.88,7.79
177
+ tf_efficientnet_lite2,4293.9,238.467,1024,260,0.89,12.9,6.09
178
+ rexnet_130,4262.16,240.243,1024,224,0.68,9.71,7.56
179
+ efficientnet_b1,4239.44,241.53,1024,256,0.77,12.22,7.79
180
+ vit_small_patch32_384,4239.1,241.55,1024,384,3.45,8.25,22.92
181
+ crossvit_9_240,4212.37,243.082,1024,240,1.85,9.52,8.55
182
+ crossvit_9_dagger_240,4095.03,250.049,1024,240,1.99,9.97,8.78
183
+ nf_ecaresnet26,4091.86,250.24,1024,224,2.41,7.36,16.0
184
+ nf_seresnet26,4088.47,250.449,1024,224,2.41,7.36,17.4
185
+ efficientnet_cc_b0_8e,4076.51,251.183,1024,224,0.42,9.42,24.01
186
+ efficientnet_cc_b0_4e,4073.3,251.382,1024,224,0.41,9.42,13.31
187
+ ecaresnet50d_pruned,4055.39,252.492,1024,224,2.53,6.43,19.94
188
+ efficientnet_b2_pruned,4030.92,254.025,1024,260,0.73,9.13,8.31
189
+ ecaresnext50t_32x4d,4018.73,254.796,1024,224,2.7,10.09,15.41
190
+ ecaresnext26t_32x4d,4017.09,254.9,1024,224,2.7,10.09,15.41
191
+ seresnext26t_32x4d,4014.43,255.069,1024,224,2.7,10.09,16.81
192
+ seresnext26tn_32x4d,4014.36,255.074,1024,224,2.7,10.09,16.81
193
+ repvgg_a2,3987.84,256.77,1024,224,5.7,6.26,28.21
194
+ poolformer_s12,3982.67,257.103,1024,224,1.82,5.53,11.92
195
+ seresnext26d_32x4d,3979.57,257.303,1024,224,2.73,10.19,16.81
196
+ vit_tiny_r_s16_p8_384,3963.05,258.374,1024,384,1.34,6.49,6.36
197
+ resnet26t,3939.46,259.923,1024,256,3.35,10.52,16.01
198
+ nf_regnet_b1,3911.64,261.772,1024,288,1.02,9.2,10.22
199
+ rexnet_150,3881.93,263.775,1024,224,0.9,11.21,9.73
200
+ nf_regnet_b2,3879.78,263.921,1024,272,1.22,9.27,14.31
201
+ resnetv2_50,3865.49,264.896,1024,224,4.11,11.11,25.55
202
+ regnetx_016,3852.41,265.794,1024,224,1.62,7.93,9.19
203
+ tf_efficientnet_cc_b0_4e,3812.08,268.608,1024,224,0.41,9.42,13.31
204
+ tf_efficientnet_cc_b0_8e,3803.67,269.202,1024,224,0.42,9.42,24.01
205
+ convnext_pico,3747.49,273.239,1024,288,2.27,10.08,9.05
206
+ ecaresnetlight,3744.45,273.459,1024,224,4.11,8.42,30.16
207
+ dpn68,3724.59,274.917,1024,224,2.35,10.47,12.61
208
+ edgenext_x_small,3714.71,275.646,1024,288,0.68,7.5,2.34
209
+ gluon_resnet50_v1b,3672.76,278.798,1024,224,4.11,11.11,25.56
210
+ ssl_resnet50,3671.85,278.866,1024,224,4.11,11.11,25.56
211
+ efficientnet_em,3671.25,278.913,1024,240,3.04,14.34,6.9
212
+ resnet50,3668.58,279.116,1024,224,4.11,11.11,25.56
213
+ swsl_resnet50,3668.32,279.136,1024,224,4.11,11.11,25.56
214
+ tv_resnet50,3667.14,279.225,1024,224,4.11,11.11,25.56
215
+ dpn68b,3667.07,279.229,1024,224,2.35,10.47,12.61
216
+ rexnetr_200,3659.45,279.811,1024,224,1.59,15.11,16.52
217
+ convnext_pico_ols,3651.34,280.434,1024,288,2.37,10.74,9.06
218
+ botnet26t_256,3594.28,284.883,1024,256,3.32,11.98,12.49
219
+ bat_resnext26ts,3569.91,286.828,1024,256,2.53,12.51,10.73
220
+ resnetv2_50t,3547.32,288.657,1024,224,4.32,11.82,25.57
221
+ mixnet_m,3537.26,289.477,1024,224,0.36,8.19,5.01
222
+ regnety_016,3531.88,289.919,1024,224,1.63,8.04,11.2
223
+ tf_efficientnet_em,3529.62,290.106,1024,240,3.04,14.34,6.9
224
+ resnetv2_50d,3525.02,290.482,1024,224,4.35,11.92,25.57
225
+ halonet26t,3515.15,291.299,1024,256,3.19,11.69,12.48
226
+ resnet32ts,3492.62,293.179,1024,256,4.63,11.58,17.96
227
+ hrnet_w18_small_v2,3482.81,294.001,1024,224,2.62,9.65,15.6
228
+ gluon_resnet50_v1c,3481.59,294.107,1024,224,4.35,11.92,25.58
229
+ dla60,3466.91,295.351,1024,224,4.26,10.16,22.04
230
+ resnet33ts,3460.78,295.875,1024,256,4.76,11.66,19.68
231
+ tf_efficientnet_b2,3402.3,300.962,1024,260,1.02,13.83,9.11
232
+ convit_tiny,3399.61,301.199,1024,224,1.26,7.94,5.71
233
+ resnet50t,3373.72,303.51,1024,224,4.32,11.82,25.57
234
+ tf_mixnet_m,3366.38,304.167,1024,224,0.36,8.19,5.01
235
+ efficientnet_b3_pruned,3360.1,304.74,1024,300,1.04,11.86,9.86
236
+ seresnet33ts,3354.27,305.27,1024,256,4.76,11.66,19.78
237
+ resnet50d,3351.47,305.527,1024,224,4.35,11.92,25.58
238
+ eca_resnet33ts,3350.95,305.574,1024,256,4.76,11.66,19.68
239
+ vit_small_resnet26d_224,3346.77,305.954,1024,224,5.07,11.12,63.61
240
+ cs3darknet_focus_l,3335.18,307.018,1024,288,5.9,10.16,21.15
241
+ gluon_resnet50_v1d,3334.65,307.068,1024,224,4.35,11.92,25.58
242
+ mobilevitv2_100,3324.63,307.994,1024,256,1.84,16.08,4.9
243
+ vovnet39a,3320.12,308.408,1024,224,7.09,6.73,22.6
244
+ legacy_seresnet50,3312.33,309.135,1024,224,3.88,10.6,28.09
245
+ efficientnet_b0_gn,3307.86,309.554,1024,224,0.42,6.75,5.29
246
+ gcresnet33ts,3307.01,309.633,1024,256,4.76,11.68,19.88
247
+ pit_s_distilled_224,3301.25,310.173,1024,224,2.9,11.64,24.04
248
+ pit_s_224,3299.97,310.295,1024,224,2.88,11.56,23.46
249
+ mobilevit_xs,3252.28,314.844,1024,256,1.05,16.33,2.32
250
+ deit_small_distilled_patch16_224,3233.6,316.663,1024,224,4.63,12.02,22.44
251
+ efficientnet_b2a,3223.97,317.608,1024,288,1.12,16.2,9.11
252
+ efficientnet_b2,3223.9,317.615,1024,288,1.12,16.2,9.11
253
+ deit_small_patch16_224,3218.99,318.1,1024,224,4.61,11.95,22.05
254
+ vit_small_patch16_224,3218.38,318.16,1024,224,4.61,11.95,22.05
255
+ cs3darknet_l,3210.26,318.965,1024,288,6.16,10.83,21.16
256
+ ese_vovnet39b,3206.21,319.369,1024,224,7.09,6.74,24.57
257
+ eca_vovnet39b,3203.77,319.612,1024,224,7.09,6.74,22.6
258
+ convnextv2_atto,3196.73,320.315,1024,288,0.91,6.3,3.71
259
+ coatnet_pico_rw_224,3189.82,321.008,1024,224,2.05,14.62,10.85
260
+ seresnet50,3181.57,321.841,1024,224,4.11,11.13,28.09
261
+ pvt_v2_b1,3147.37,325.339,1024,224,2.12,15.39,14.01
262
+ coat_lite_tiny,3146.41,325.439,1024,224,1.6,11.65,5.72
263
+ res2net50_48w_2s,3127.52,327.404,1024,224,4.18,11.72,25.29
264
+ eca_botnext26ts_256,3112.32,329.003,1024,256,2.46,11.6,10.59
265
+ ecaresnet101d_pruned,3103.16,329.973,1024,224,3.48,7.69,24.88
266
+ efficientnet_b0_g8_gn,3073.2,333.192,1024,224,0.66,6.75,6.56
267
+ ssl_resnext50_32x4d,3071.68,333.356,1024,224,4.26,14.4,25.03
268
+ dla60x,3071.64,333.359,1024,224,3.54,13.8,17.35
269
+ swsl_resnext50_32x4d,3070.7,333.464,1024,224,4.26,14.4,25.03
270
+ tv_resnext50_32x4d,3069.81,333.56,1024,224,4.26,14.4,25.03
271
+ resnext50_32x4d,3069.72,333.57,1024,224,4.26,14.4,25.03
272
+ gluon_resnext50_32x4d,3068.47,333.704,1024,224,4.26,14.4,25.03
273
+ vit_small_r26_s32_224,3061.92,334.417,1024,224,3.56,9.85,36.43
274
+ skresnet34,3055.95,335.073,1024,224,3.67,5.13,22.28
275
+ deit3_small_patch16_224_in21ft1k,3048.82,335.855,1024,224,4.61,11.95,22.06
276
+ deit3_small_patch16_224,3047.23,336.031,1024,224,4.61,11.95,22.06
277
+ eca_halonext26ts,3035.71,337.305,1024,256,2.44,11.46,10.76
278
+ haloregnetz_b,3032.47,337.665,1024,224,1.97,11.94,11.68
279
+ vit_relpos_base_patch32_plus_rpn_256,3026.45,338.338,1024,256,7.68,8.01,119.42
280
+ vit_relpos_small_patch16_rpn_224,3019.95,339.067,1024,224,4.59,13.05,21.97
281
+ vit_relpos_small_patch16_224,3008.26,340.383,1024,224,4.59,13.05,21.98
282
+ vit_srelpos_small_patch16_224,3000.96,341.213,1024,224,4.59,12.16,21.97
283
+ xcit_nano_12_p16_384_dist,3000.48,341.266,1024,384,1.64,12.15,3.05
284
+ cs3sedarknet_l,2995.41,341.845,1024,288,6.16,10.83,21.91
285
+ resnetaa50d,2993.03,342.116,1024,224,5.39,12.44,25.58
286
+ vgg11,2983.47,85.796,256,224,7.61,7.44,132.86
287
+ selecsls84,2973.16,344.402,1024,224,5.9,7.57,50.95
288
+ resnetrs50,2963.42,345.535,1024,224,4.48,12.14,35.69
289
+ seresnet50t,2957.12,346.271,1024,224,4.32,11.83,28.1
290
+ resnest14d,2954.69,346.556,1024,224,2.76,7.33,10.61
291
+ gluon_resnet50_v1s,2953.65,346.677,1024,224,5.47,13.52,25.68
292
+ coat_lite_mini,2952.61,346.799,1024,224,2.0,12.25,11.01
293
+ ecaresnet50d,2945.96,347.583,1024,224,4.35,11.93,25.58
294
+ densenet121,2933.45,349.064,1024,224,2.87,6.9,7.98
295
+ tv_densenet121,2929.69,349.514,1024,224,2.87,6.9,7.98
296
+ vit_base_patch32_plus_256,2929.65,349.519,1024,256,7.79,7.76,119.48
297
+ rexnet_200,2927.94,349.723,1024,224,1.56,14.91,16.37
298
+ xcit_tiny_24_p16_224_dist,2927.0,349.834,1024,224,2.34,11.82,12.12
299
+ xcit_tiny_24_p16_224,2921.97,350.436,1024,224,2.34,11.82,12.12
300
+ coatnet_nano_cc_224,2867.38,357.108,1024,224,2.24,15.02,13.76
301
+ gcresnext50ts,2857.34,358.363,1024,256,3.75,15.46,15.67
302
+ lambda_resnet26rpt_256,2853.55,358.839,1024,256,3.16,11.87,10.99
303
+ resnext50d_32x4d,2845.08,359.908,1024,224,4.5,15.2,25.05
304
+ mixnet_l,2828.6,361.996,1024,224,0.58,10.84,7.33
305
+ densenet121d,2824.08,362.584,1024,224,3.11,7.7,8.0
306
+ efficientnet_lite3,2821.84,362.87,1024,300,1.65,21.85,8.2
307
+ cspresnet50,2793.65,366.534,1024,256,4.54,11.5,21.62
308
+ coatnet_nano_rw_224,2781.93,368.077,1024,224,2.41,15.41,15.14
309
+ vgg11_bn,2760.38,370.949,1024,224,7.62,7.44,132.87
310
+ vovnet57a,2755.77,371.572,1024,224,8.95,7.52,36.64
311
+ resmlp_24_224,2750.33,372.306,1024,224,5.96,10.91,30.02
312
+ resmlp_24_distilled_224,2740.33,373.665,1024,224,5.96,10.91,30.02
313
+ convnextv2_femto,2735.91,374.269,1024,288,1.3,7.56,5.23
314
+ flexivit_small,2735.78,374.287,1024,240,5.35,14.18,22.06
315
+ gcresnet50t,2732.04,374.8,1024,256,5.42,14.67,25.9
316
+ legacy_seresnext50_32x4d,2722.84,376.065,1024,224,4.26,14.42,27.56
317
+ seresnext50_32x4d,2721.47,376.256,1024,224,4.26,14.42,27.56
318
+ gluon_seresnext50_32x4d,2720.58,376.379,1024,224,4.26,14.42,27.56
319
+ visformer_small,2719.93,376.468,1024,224,4.88,11.43,40.22
320
+ twins_svt_small,2713.39,377.374,1024,224,2.94,13.75,24.06
321
+ resnetv2_50x1_bit_distilled,2708.81,378.014,1024,224,4.23,11.11,25.55
322
+ res2net50_14w_8s,2692.9,380.248,1024,224,4.21,13.28,25.06
323
+ resnetblur50,2685.97,381.228,1024,224,5.16,12.02,25.56
324
+ vit_base_resnet26d_224,2684.6,381.421,1024,224,6.97,13.16,101.4
325
+ tf_mixnet_l,2680.8,381.958,1024,224,0.58,10.84,7.33
326
+ seresnetaa50d,2658.93,385.106,1024,224,5.4,12.46,28.11
327
+ dla60_res2net,2656.16,385.506,1024,224,4.15,12.34,20.85
328
+ cspresnet50d,2655.05,385.668,1024,256,4.86,12.55,21.64
329
+ coatnext_nano_rw_224,2655.0,385.674,1024,224,2.47,12.8,14.7
330
+ ese_vovnet57b,2654.33,385.773,1024,224,8.95,7.52,38.61
331
+ tf_efficientnetv2_b3,2654.14,385.8,1024,300,3.04,15.74,14.36
332
+ cspresnet50w,2641.68,387.621,1024,256,5.04,12.19,28.12
333
+ res2net50_26w_4s,2629.64,389.395,1024,224,4.28,12.61,25.7
334
+ regnetz_b16,2626.71,389.828,1024,288,2.39,16.43,9.72
335
+ convnext_nano,2611.78,392.059,1024,288,4.06,13.84,15.59
336
+ efficientnetv2_rw_t,2601.49,393.609,1024,288,3.19,16.42,13.65
337
+ fbnetv3_g,2595.29,394.549,1024,288,1.77,21.09,16.62
338
+ gmixer_24_224,2595.15,394.571,1024,224,5.28,14.45,24.72
339
+ mobilevit_s,2586.09,395.952,1024,256,2.03,19.94,5.58
340
+ coatnet_rmlp_nano_rw_224,2569.7,398.478,1024,224,2.62,20.34,15.15
341
+ gcvit_xxtiny,2561.41,399.768,1024,224,2.14,15.36,12.0
342
+ tf_efficientnet_lite3,2530.94,404.582,1024,300,1.65,21.85,8.2
343
+ efficientnet_cc_b1_8e,2530.65,404.628,1024,240,0.75,15.44,39.72
344
+ densenetblur121d,2522.66,405.908,1024,224,3.11,7.9,8.0
345
+ resnetblur50d,2509.45,408.045,1024,224,5.4,12.82,25.58
346
+ nf_ecaresnet50,2490.39,411.168,1024,224,4.21,11.13,25.56
347
+ inception_v3,2485.21,412.025,1024,299,5.73,8.97,23.83
348
+ nf_seresnet50,2482.66,412.449,1024,224,4.21,11.13,28.09
349
+ tf_inception_v3,2481.38,412.658,1024,299,5.73,8.97,23.83
350
+ gc_efficientnetv2_rw_t,2480.59,412.793,1024,288,3.2,16.45,13.68
351
+ adv_inception_v3,2479.41,412.983,1024,299,5.73,8.97,23.83
352
+ repvgg_b1g4,2473.34,414.003,1024,224,8.15,10.64,39.97
353
+ mobilevitv2_125,2472.28,414.18,1024,256,2.86,20.1,7.48
354
+ gluon_inception_v3,2468.42,414.827,1024,299,5.73,8.97,23.83
355
+ nf_regnet_b3,2461.52,415.991,1024,320,2.05,14.61,18.59
356
+ xcit_small_12_p16_224_dist,2446.89,418.478,1024,224,4.82,12.58,26.25
357
+ xcit_small_12_p16_224,2446.42,418.558,1024,224,4.82,12.58,26.25
358
+ cspresnext50,2438.96,419.836,1024,256,4.05,15.86,20.57
359
+ convnext_nano_ols,2435.0,420.521,1024,288,4.38,15.5,15.65
360
+ regnetx_032,2429.42,421.489,1024,224,3.2,11.37,15.3
361
+ densenet169,2426.29,422.031,1024,224,3.4,7.3,14.15
362
+ sehalonet33ts,2419.4,423.234,1024,256,3.55,14.7,13.69
363
+ tf_efficientnet_cc_b1_8e,2406.19,425.557,1024,240,0.75,15.44,39.72
364
+ semobilevit_s,2402.02,426.294,1024,256,2.03,19.95,5.74
365
+ resnetv2_101,2330.6,439.36,1024,224,7.83,16.23,44.54
366
+ twins_pcpvt_small,2312.72,442.754,1024,224,3.83,18.08,24.11
367
+ xcit_nano_12_p8_224_dist,2295.5,446.077,1024,224,2.16,15.71,3.05
368
+ xcit_nano_12_p8_224,2292.87,446.587,1024,224,2.16,15.71,3.05
369
+ gmlp_s16_224,2290.73,447.007,1024,224,4.42,15.1,19.42
370
+ cs3darknet_focus_x,2287.2,447.697,1024,256,8.03,10.69,35.02
371
+ vit_base_r26_s32_224,2275.25,450.047,1024,224,6.81,12.36,101.38
372
+ gluon_resnet101_v1b,2260.37,453.01,1024,224,7.83,16.23,44.55
373
+ tv_resnet101,2258.59,453.368,1024,224,7.83,16.23,44.55
374
+ resnet101,2258.28,453.43,1024,224,7.83,16.23,44.55
375
+ skresnet50,2234.62,458.23,1024,224,4.11,12.5,25.8
376
+ ecaresnet26t,2232.29,458.709,1024,320,5.24,16.44,16.01
377
+ edgenext_small,2226.69,459.86,1024,320,1.97,14.16,5.59
378
+ dla102,2219.96,461.255,1024,224,7.19,14.18,33.27
379
+ res2next50,2214.71,462.347,1024,224,4.2,13.71,24.67
380
+ dla60_res2next,2210.67,463.194,1024,224,3.49,13.17,17.03
381
+ resnetv2_101d,2203.82,464.633,1024,224,8.07,17.04,44.56
382
+ gluon_resnet101_v1c,2194.65,466.578,1024,224,8.08,17.04,44.57
383
+ resnest26d,2170.04,471.869,1024,224,3.64,9.97,17.07
384
+ vgg13,2149.71,476.331,1024,224,11.31,12.25,133.05
385
+ gluon_resnet101_v1d,2137.49,479.053,1024,224,8.08,17.04,44.57
386
+ skresnet50d,2115.22,484.098,1024,224,4.36,13.31,25.82
387
+ convnextv2_pico,2108.5,485.64,1024,288,2.27,10.08,9.07
388
+ vit_base_resnet50d_224,2101.17,487.333,1024,224,8.73,16.92,110.97
389
+ coatnet_0_rw_224,2082.49,491.706,1024,224,4.43,18.73,27.44
390
+ crossvit_small_240,2081.5,491.94,1024,240,5.63,18.17,26.86
391
+ deit3_medium_patch16_224_in21ft1k,2076.53,493.118,1024,224,8.0,15.93,38.85
392
+ deit3_medium_patch16_224,2072.34,494.116,1024,224,8.0,15.93,38.85
393
+ mobilevitv2_150,2071.36,494.349,1024,256,4.09,24.11,10.59
394
+ mobilevitv2_150_in22ft1k,2070.3,494.603,1024,256,4.09,24.11,10.59
395
+ sebotnet33ts_256,2067.91,247.581,512,256,3.89,17.46,13.7
396
+ wide_resnet50_2,2057.08,497.78,1024,224,11.43,14.4,68.88
397
+ vit_relpos_medium_patch16_rpn_224,2044.85,500.757,1024,224,7.97,17.02,38.73
398
+ efficientformer_l3,2041.79,501.507,1024,224,3.93,12.01,31.41
399
+ poolformer_s24,2040.35,501.863,1024,224,3.41,10.68,21.39
400
+ vit_relpos_medium_patch16_224,2037.47,502.572,1024,224,7.97,17.02,38.75
401
+ cspdarknet53,2035.94,502.949,1024,256,6.57,16.81,27.64
402
+ resnet51q,2034.41,503.329,1024,288,8.07,20.94,35.7
403
+ vit_srelpos_medium_patch16_224,2033.15,503.638,1024,224,7.96,16.21,38.74
404
+ maxvit_rmlp_pico_rw_256,2008.78,509.748,1024,256,1.85,24.86,7.52
405
+ vit_relpos_medium_patch16_cls_224,2007.24,510.141,1024,224,8.03,18.24,38.76
406
+ dla102x,2006.55,510.315,1024,224,5.89,19.42,26.31
407
+ legacy_seresnet101,2003.12,511.188,1024,224,7.61,15.74,49.33
408
+ swin_tiny_patch4_window7_224,1995.14,513.235,1024,224,4.51,17.06,28.29
409
+ repvgg_b1,1985.42,515.747,1024,224,13.16,10.64,57.42
410
+ resnetaa101d,1982.98,516.381,1024,224,9.12,17.56,44.57
411
+ coatnet_rmlp_0_rw_224,1981.75,516.703,1024,224,4.72,24.89,27.45
412
+ tf_efficientnet_b3,1975.92,518.226,1024,300,1.87,23.83,12.23
413
+ gcvit_xtiny,1969.68,519.869,1024,224,2.93,20.26,19.98
414
+ hrnet_w18,1967.17,520.531,1024,224,4.32,16.31,21.3
415
+ gluon_resnet101_v1s,1965.68,520.926,1024,224,9.19,18.64,44.67
416
+ maxvit_pico_rw_256,1965.38,521.006,1024,256,1.83,22.3,7.46
417
+ resnetaa50,1958.15,522.93,1024,288,8.52,19.24,25.56
418
+ seresnet101,1954.63,523.871,1024,224,7.84,16.27,49.33
419
+ efficientnet_b3,1949.54,525.239,1024,320,2.01,26.52,12.23
420
+ efficientnet_b3a,1949.11,525.356,1024,320,2.01,26.52,12.23
421
+ edgenext_small_rw,1932.68,529.816,1024,320,2.46,14.85,7.83
422
+ regnetx_040,1932.62,529.839,1024,224,3.99,12.2,22.12
423
+ cs3sedarknet_xdw,1925.4,531.825,1024,256,5.97,17.18,21.6
424
+ coatnet_bn_0_rw_224,1920.71,533.123,1024,224,4.67,22.04,27.44
425
+ xcit_tiny_12_p16_384_dist,1911.65,535.652,1024,384,3.64,18.26,6.72
426
+ ssl_resnext101_32x4d,1910.73,535.909,1024,224,8.01,21.23,44.18
427
+ swsl_resnext101_32x4d,1910.43,535.993,1024,224,8.01,21.23,44.18
428
+ resnext101_32x4d,1909.99,536.115,1024,224,8.01,21.23,44.18
429
+ gluon_resnext101_32x4d,1909.34,536.298,1024,224,8.01,21.23,44.18
430
+ darknet53,1903.77,537.866,1024,288,11.78,15.68,41.61
431
+ darknetaa53,1898.12,539.468,1024,288,10.08,15.68,36.02
432
+ crossvit_15_240,1892.46,541.083,1024,240,5.81,19.77,27.53
433
+ halonet50ts,1881.53,544.226,1024,256,5.3,19.2,22.73
434
+ vgg13_bn,1879.72,544.749,1024,224,11.33,12.25,133.05
435
+ mixnet_xl,1872.46,546.86,1024,224,0.93,14.57,11.9
436
+ res2net50_26w_6s,1870.88,547.321,1024,224,6.33,15.28,37.05
437
+ ecaresnet101d,1869.88,547.616,1024,224,8.08,17.07,44.57
438
+ densenet201,1869.57,547.706,1024,224,4.34,7.85,20.01
439
+ nf_resnet101,1858.48,550.976,1024,224,8.01,16.23,44.55
440
+ coatnet_0_224,1857.28,275.661,512,224,4.58,24.01,25.04
441
+ pvt_v2_b2,1854.85,552.053,1024,224,4.05,27.53,25.36
442
+ crossvit_15_dagger_240,1850.69,553.295,1024,240,6.13,20.43,28.21
443
+ resmlp_36_224,1846.41,554.574,1024,224,8.91,16.33,44.69
444
+ resmlp_36_distilled_224,1845.04,554.99,1024,224,8.91,16.33,44.69
445
+ resnet61q,1841.84,555.954,1024,288,9.87,21.52,36.85
446
+ swin_s3_tiny_224,1817.5,563.398,1024,224,4.64,19.13,28.33
447
+ cait_xxs24_224,1796.55,569.968,1024,224,2.53,20.29,11.96
448
+ cs3darknet_x,1789.33,572.268,1024,288,10.6,14.36,35.05
449
+ vit_medium_patch16_gap_240,1785.54,573.481,1024,240,9.22,18.81,44.4
450
+ nf_resnet50,1784.84,573.708,1024,288,6.88,18.37,25.56
451
+ resnet50_gn,1764.31,580.385,1024,224,4.14,11.11,25.56
452
+ mixer_b16_224_miil,1761.45,581.327,1024,224,12.62,14.53,59.88
453
+ mixer_b16_224,1759.76,581.885,1024,224,12.62,14.53,59.88
454
+ resnetblur101d,1757.96,582.482,1024,224,9.12,17.94,44.57
455
+ eca_nfnet_l0,1726.58,593.068,1024,288,7.12,17.29,24.14
456
+ nfnet_l0,1721.83,594.705,1024,288,7.13,17.29,35.07
457
+ vit_large_patch32_224,1717.59,596.169,1024,224,15.41,13.32,327.9
458
+ vgg16,1717.44,596.224,1024,224,15.47,13.56,138.36
459
+ regnetz_c16,1710.89,598.505,1024,320,3.92,25.88,13.46
460
+ pvt_v2_b2_li,1709.89,598.855,1024,224,3.91,27.6,22.55
461
+ resnest50d_1s4x24d,1705.52,600.391,1024,224,4.43,13.57,25.68
462
+ coat_lite_small,1704.55,600.733,1024,224,3.96,22.09,19.84
463
+ resnetv2_50d_frn,1697.1,603.368,1024,224,4.33,11.92,25.59
464
+ cs3sedarknet_x,1689.8,605.975,1024,288,10.6,14.37,35.4
465
+ seresnext101_32x4d,1687.65,606.747,1024,224,8.02,21.26,48.96
466
+ gluon_seresnext101_32x4d,1687.1,606.945,1024,224,8.02,21.26,48.96
467
+ legacy_seresnext101_32x4d,1684.69,607.813,1024,224,8.02,21.26,48.96
468
+ regnetv_040,1682.92,608.454,1024,288,6.6,20.3,20.64
469
+ mobilevitv2_175,1677.66,457.769,768,256,5.54,28.13,14.25
470
+ regnety_040,1677.03,610.59,1024,288,6.61,20.3,20.65
471
+ mobilevitv2_175_in22ft1k,1677.0,457.949,768,256,5.54,28.13,14.25
472
+ convnext_tiny_hnf,1676.16,610.908,1024,288,7.39,22.21,28.59
473
+ res2net101_26w_4s,1675.37,611.195,1024,224,8.1,18.45,45.21
474
+ vit_tiny_patch16_384,1665.76,614.72,1024,384,4.7,25.39,5.79
475
+ sequencer2d_s,1661.32,616.362,1024,224,4.96,11.31,27.65
476
+ ese_vovnet39b_evos,1661.21,616.404,1024,224,7.07,6.74,24.58
477
+ vit_base_patch32_384,1649.27,620.868,1024,384,13.06,16.5,88.3
478
+ vit_base_patch32_clip_384,1648.64,621.105,1024,384,13.06,16.5,88.3
479
+ mixer_l32_224,1645.23,622.393,1024,224,11.27,19.86,206.94
480
+ convnext_tiny,1642.14,623.562,1024,288,7.39,22.21,28.59
481
+ botnet50ts_256,1639.64,312.25,512,256,5.54,22.23,22.74
482
+ swinv2_cr_tiny_224,1630.02,628.199,1024,224,4.66,28.45,28.33
483
+ resnetv2_50d_evob,1627.44,629.196,1024,224,4.33,11.92,25.59
484
+ twins_pcpvt_base,1615.12,633.996,1024,224,6.68,25.25,43.83
485
+ resnetv2_152,1614.43,634.268,1024,224,11.55,22.56,60.19
486
+ hrnet_w32,1605.06,637.96,1024,224,8.97,22.02,41.23
487
+ swinv2_cr_tiny_ns_224,1600.43,639.811,1024,224,4.66,28.45,28.33
488
+ xception41p,1598.79,480.351,768,299,9.25,39.86,26.91
489
+ tv_resnet152,1582.54,647.049,1024,224,11.56,22.56,60.19
490
+ gluon_resnet152_v1b,1581.57,647.444,1024,224,11.56,22.56,60.19
491
+ resnet152,1581.02,647.671,1024,224,11.56,22.56,60.19
492
+ xception,1579.88,648.138,1024,299,8.4,35.83,22.86
493
+ halo2botnet50ts_256,1572.75,651.076,1024,256,5.02,21.78,22.64
494
+ res2net50_26w_8s,1568.85,652.695,1024,224,8.37,17.95,48.4
495
+ vit_medium_patch16_gap_256,1564.22,654.626,1024,256,10.59,22.15,38.86
496
+ resnetv2_152d,1557.03,657.648,1024,224,11.8,23.36,60.2
497
+ efficientnet_el_pruned,1555.14,658.449,1024,300,8.0,30.7,10.59
498
+ maxvit_rmlp_nano_rw_256,1551.85,659.845,1024,256,4.47,31.92,15.5
499
+ regnetx_064,1550.52,660.413,1024,224,6.49,16.37,26.21
500
+ efficientnet_el,1549.97,660.646,1024,300,8.0,30.7,10.59
501
+ gluon_resnet152_v1c,1548.96,661.078,1024,224,11.8,23.36,60.21
502
+ nf_ecaresnet101,1546.58,662.091,1024,224,8.01,16.27,44.55
503
+ nf_seresnet101,1539.38,665.191,1024,224,8.02,16.27,49.33
504
+ mvitv2_tiny,1537.54,665.985,1024,224,4.7,21.16,24.17
505
+ nfnet_f0,1525.01,671.456,1024,256,12.62,18.05,71.49
506
+ vgg16_bn,1523.86,671.963,1024,224,15.5,13.56,138.37
507
+ cs3edgenet_x,1521.21,673.136,1024,288,14.59,16.36,47.82
508
+ gluon_resnet152_v1d,1520.11,673.621,1024,224,11.8,23.36,60.21
509
+ maxvit_nano_rw_256,1517.43,674.812,1024,256,4.46,30.28,15.45
510
+ tf_efficientnet_el,1506.16,679.862,1024,300,8.0,30.7,10.59
511
+ convnextv2_nano,1500.71,511.746,768,288,4.06,13.84,15.62
512
+ resnest50d,1492.63,686.022,1024,224,5.4,14.36,27.48
513
+ ese_vovnet99b,1489.17,687.617,1024,224,16.51,11.27,63.2
514
+ dla169,1471.11,696.059,1024,224,11.6,20.2,53.39
515
+ regnety_032,1467.85,697.604,1024,288,5.29,18.61,19.44
516
+ skresnext50_32x4d,1463.28,699.785,1024,224,4.5,17.18,27.48
517
+ xcit_tiny_12_p8_224_dist,1458.7,701.981,1024,224,4.81,23.6,6.71
518
+ xcit_tiny_12_p8_224,1458.23,702.211,1024,224,4.81,23.6,6.71
519
+ convit_small,1457.54,702.541,1024,224,5.76,17.87,27.78
520
+ mobilevitv2_200_in22ft1k,1456.59,527.247,768,256,7.22,32.15,18.45
521
+ mobilevitv2_200,1456.02,527.451,768,256,7.22,32.15,18.45
522
+ ecaresnet50t,1438.32,711.929,1024,320,8.82,24.13,25.57
523
+ gluon_resnet152_v1s,1432.22,714.961,1024,224,12.92,24.96,60.32
524
+ nest_tiny,1415.33,542.618,768,224,5.83,25.48,17.06
525
+ regnety_040s_gn,1412.65,724.867,1024,224,4.03,12.29,20.65
526
+ vgg19,1393.71,183.67,256,224,19.63,14.86,143.67
527
+ jx_nest_tiny,1389.62,552.657,768,224,5.83,25.48,17.06
528
+ legacy_seresnet152,1383.83,739.96,1024,224,11.33,22.08,66.82
529
+ densenet161,1376.52,743.891,1024,224,7.79,11.06,28.68
530
+ poolformer_s36,1370.67,747.069,1024,224,5.0,15.82,30.86
531
+ vit_small_resnet50d_s16_224,1367.59,748.748,1024,224,13.48,24.82,57.53
532
+ twins_svt_base,1362.65,751.463,1024,224,8.59,26.33,56.07
533
+ seresnet152,1361.7,751.99,1024,224,11.57,22.61,66.82
534
+ xception41,1356.44,566.173,768,299,9.28,39.86,26.97
535
+ maxvit_tiny_rw_224,1350.45,758.254,1024,224,5.11,33.11,29.06
536
+ crossvit_18_240,1348.85,759.154,1024,240,9.05,26.26,43.27
537
+ maxxvit_rmlp_nano_rw_256,1347.73,759.767,1024,256,4.37,26.05,16.78
538
+ efficientnet_lite4,1343.74,571.528,768,380,4.04,45.66,13.01
539
+ gcvit_tiny,1339.65,764.364,1024,224,4.79,29.82,28.22
540
+ pvt_v2_b3,1325.92,772.282,1024,224,6.92,37.7,45.24
541
+ crossvit_18_dagger_240,1313.78,779.419,1024,240,9.5,27.03,44.27
542
+ volo_d1_224,1312.37,780.255,1024,224,6.94,24.43,26.63
543
+ xcit_small_24_p16_224_dist,1307.3,783.278,1024,224,9.1,23.64,47.67
544
+ tresnet_m,1305.71,784.234,1024,224,5.74,7.31,31.39
545
+ inception_v4,1305.41,784.412,1024,299,12.28,15.09,42.68
546
+ repvgg_b2,1305.22,784.529,1024,224,20.45,12.9,89.02
547
+ xcit_small_24_p16_224,1303.71,785.433,1024,224,9.1,23.64,47.67
548
+ sequencer2d_m,1295.72,790.281,1024,224,6.55,14.26,38.31
549
+ edgenext_base,1283.77,797.633,1024,320,6.01,24.32,18.51
550
+ hrnet_w30,1280.53,799.653,1024,224,8.15,21.21,37.71
551
+ dm_nfnet_f0,1275.46,802.834,1024,256,12.62,18.05,71.49
552
+ coatnet_rmlp_1_rw_224,1268.37,807.322,1024,224,7.85,35.47,41.69
553
+ maxxvitv2_nano_rw_256,1259.7,812.877,1024,256,6.26,23.05,23.7
554
+ efficientnetv2_s,1254.49,816.255,1024,384,8.44,35.77,21.46
555
+ vgg19_bn,1246.52,205.36,256,224,19.66,14.86,143.68
556
+ nf_regnet_b4,1235.79,828.604,1024,384,4.7,28.61,30.21
557
+ swin_small_patch4_window7_224,1235.74,828.641,1024,224,8.77,27.47,49.61
558
+ tf_efficientnet_lite4,1232.22,623.25,768,380,4.04,45.66,13.01
559
+ regnetz_d32,1223.51,836.919,1024,320,9.33,37.08,27.58
560
+ mixnet_xxl,1219.27,629.871,768,224,2.04,23.43,23.96
561
+ tf_efficientnetv2_s,1219.16,839.906,1024,384,8.44,35.77,21.46
562
+ deit_base_patch16_224,1213.08,844.121,1024,224,17.58,23.9,86.57
563
+ deit_base_distilled_patch16_224,1212.98,844.19,1024,224,17.68,24.05,87.34
564
+ vit_base_patch16_clip_224,1211.82,844.996,1024,224,17.58,23.9,86.57
565
+ vit_base_patch16_224_miil,1211.26,845.389,1024,224,17.59,23.91,94.4
566
+ dpn92,1210.45,845.948,1024,224,6.54,18.21,37.67
567
+ vit_base_patch16_224,1210.28,846.074,1024,224,17.58,23.9,86.57
568
+ coatnet_rmlp_1_rw2_224,1208.65,847.215,1024,224,8.11,40.13,41.72
569
+ cait_xxs36_224,1205.51,849.419,1024,224,3.77,30.34,17.3
570
+ maxvit_tiny_tf_224,1200.3,639.828,768,224,5.6,35.78,30.92
571
+ swinv2_tiny_window8_256,1200.06,853.274,1024,256,5.96,24.57,28.35
572
+ efficientnetv2_rw_s,1199.87,853.413,1024,384,8.72,38.03,23.94
573
+ dla102x2,1198.52,854.374,1024,224,9.34,29.91,41.28
574
+ regnetx_160,1195.08,856.833,1024,224,15.99,25.52,54.28
575
+ dpn98,1183.92,864.908,1024,224,11.73,25.2,61.57
576
+ vit_base_patch16_rpn_224,1180.39,867.498,1024,224,17.49,23.75,86.54
577
+ twins_pcpvt_large,1168.64,876.22,1024,224,9.84,35.82,60.99
578
+ deit3_base_patch16_224,1164.77,879.134,1024,224,17.58,23.9,86.59
579
+ deit3_base_patch16_224_in21ft1k,1164.5,879.334,1024,224,17.58,23.9,86.59
580
+ regnetz_d8,1163.64,879.982,1024,320,6.19,37.08,23.37
581
+ swsl_resnext101_32x8d,1158.15,884.156,1024,224,16.48,31.21,88.79
582
+ resnext101_32x8d,1158.05,884.232,1024,224,16.48,31.21,88.79
583
+ ssl_resnext101_32x8d,1158.02,884.255,1024,224,16.48,31.21,88.79
584
+ wide_resnet101_2,1157.66,884.531,1024,224,22.8,21.23,126.89
585
+ ig_resnext101_32x8d,1157.3,884.8,1024,224,16.48,31.21,88.79
586
+ coatnet_1_rw_224,1155.72,886.014,1024,224,8.04,34.6,41.72
587
+ vit_base_patch16_gap_224,1154.73,886.777,1024,224,17.49,25.59,86.57
588
+ vit_base_patch32_clip_448,1154.21,887.173,1024,448,17.93,23.9,88.34
589
+ resnet200,1149.71,890.646,1024,224,15.07,32.19,64.67
590
+ mvitv2_small,1146.92,892.812,1024,224,7.0,28.08,34.87
591
+ xception65p,1145.07,670.686,768,299,13.91,52.48,39.82
592
+ cs3se_edgenet_x,1143.17,895.738,1024,320,18.01,20.21,50.72
593
+ vit_relpos_base_patch16_rpn_224,1143.15,895.76,1024,224,17.51,24.97,86.41
594
+ vit_relpos_base_patch16_224,1141.31,897.204,1024,224,17.51,24.97,86.43
595
+ tnt_s_patch16_224,1135.32,901.935,1024,224,5.24,24.37,23.76
596
+ resnetrs101,1134.67,902.454,1024,288,13.56,28.53,63.62
597
+ vit_relpos_base_patch16_clsgap_224,1128.94,907.03,1024,224,17.6,25.12,86.43
598
+ vit_relpos_base_patch16_cls_224,1126.78,908.771,1024,224,17.6,25.12,86.43
599
+ inception_resnet_v2,1126.73,908.809,1024,299,13.18,25.06,55.84
600
+ ens_adv_inception_resnet_v2,1125.41,909.877,1024,299,13.18,25.06,55.84
601
+ beit_base_patch16_224,1112.26,920.631,1024,224,17.58,23.9,86.53
602
+ coat_tiny,1108.72,923.572,1024,224,4.35,27.2,5.5
603
+ beitv2_base_patch16_224,1108.55,923.711,1024,224,17.58,23.9,86.53
604
+ mvitv2_small_cls,1101.66,929.491,1024,224,7.04,28.17,34.87
605
+ resnetv2_50d_gn,1092.35,937.413,1024,288,7.24,19.7,25.57
606
+ pit_b_distilled_224,1078.48,474.731,512,224,12.5,33.07,74.79
607
+ pit_b_224,1075.34,476.117,512,224,12.42,32.94,73.76
608
+ hrnet_w40,1059.78,966.217,1024,224,12.75,25.29,57.56
609
+ coatnet_1_224,1045.17,489.859,512,224,8.7,39.0,42.23
610
+ resnet101d,1039.88,984.712,1024,320,16.48,34.77,44.57
611
+ flexivit_base,1037.21,987.248,1024,240,20.29,28.36,86.59
612
+ gluon_resnext101_64x4d,1034.86,989.491,1024,224,15.52,31.21,83.46
613
+ vit_small_patch16_36x1_224,1033.13,991.146,1024,224,13.71,35.69,64.67
614
+ vit_large_r50_s32_224,1030.67,993.517,1024,224,19.58,24.41,328.99
615
+ maxvit_rmlp_tiny_rw_256,1029.25,746.162,768,256,6.77,46.92,29.15
616
+ xcit_tiny_24_p16_384_dist,1027.64,996.444,1024,384,6.87,34.29,12.12
617
+ efficientnet_b4,1014.08,504.879,512,384,4.51,50.04,19.34
618
+ maxvit_tiny_rw_256,1008.0,1015.861,1024,256,6.74,44.35,29.07
619
+ vit_small_patch16_18x2_224,1006.7,1017.169,1024,224,13.71,35.69,64.67
620
+ swinv2_cr_small_224,1005.28,1018.603,1024,224,9.07,50.27,49.7
621
+ regnetx_080,1004.51,1019.384,1024,224,8.02,14.06,39.57
622
+ repvgg_b3,994.23,1029.925,1024,224,29.16,15.1,123.09
623
+ swinv2_cr_small_ns_224,993.75,1030.424,1024,224,9.08,50.27,49.7
624
+ repvgg_b2g4,988.97,1035.405,1024,224,12.63,12.9,61.76
625
+ convnext_small,988.3,1036.113,1024,288,14.39,35.65,50.22
626
+ gluon_xception65,987.82,777.458,768,299,13.96,52.48,39.92
627
+ vit_small_r26_s32_384,982.68,1042.031,1024,384,10.43,29.85,36.47
628
+ xception65,978.83,784.597,768,299,13.96,52.48,39.92
629
+ regnetz_040,975.77,787.056,768,320,6.35,37.78,27.12
630
+ regnetz_040h,971.51,790.512,768,320,6.43,37.94,28.94
631
+ gluon_seresnext101_64x4d,965.3,1060.794,1024,224,15.53,31.25,88.23
632
+ maxvit_tiny_pm_256,964.03,1062.189,1024,256,6.61,47.9,30.09
633
+ efficientformer_l7,962.55,1063.825,1024,224,10.17,24.45,82.23
634
+ twins_svt_large,962.19,1064.229,1024,224,15.15,35.1,99.27
635
+ tf_efficientnet_b4,957.62,534.646,512,380,4.49,49.49,19.34
636
+ pvt_v2_b4,957.38,1069.569,1024,224,10.14,53.74,62.56
637
+ poolformer_m36,954.91,1072.334,1024,224,8.8,22.02,56.17
638
+ cait_s24_224,954.44,1072.866,1024,224,9.35,40.58,46.92
639
+ regnetz_b16_evos,950.47,808.013,768,288,2.36,16.43,9.74
640
+ resnest50d_4s2x40d,938.07,1091.586,1024,224,4.4,17.94,30.42
641
+ hrnet_w48,936.07,1093.917,1024,224,17.34,28.56,77.47
642
+ gmlp_b16_224,930.95,1099.935,1024,224,15.78,30.21,73.08
643
+ convnextv2_tiny,930.82,550.041,512,288,7.39,22.21,28.64
644
+ convnextv2_small,928.68,1102.629,1024,224,8.71,21.56,50.32
645
+ maxxvit_rmlp_tiny_rw_256,918.72,1114.583,1024,256,6.66,39.76,29.64
646
+ mobilevitv2_150_384_in22ft1k,915.49,419.435,384,384,9.2,54.25,10.59
647
+ pvt_v2_b5,909.79,1125.516,1024,224,11.76,50.92,81.96
648
+ nest_small,903.21,850.284,768,224,10.35,40.04,38.35
649
+ swin_s3_small_224,899.98,853.339,768,224,9.43,37.84,49.74
650
+ xcit_medium_24_p16_224_dist,898.61,1139.525,1024,224,16.13,31.71,84.4
651
+ xcit_medium_24_p16_224,898.6,1139.542,1024,224,16.13,31.71,84.4
652
+ jx_nest_small,892.03,860.939,768,224,10.35,40.04,38.35
653
+ coat_mini,880.8,1162.569,1024,224,6.82,33.68,10.34
654
+ swin_base_patch4_window7_224,875.38,1169.764,1024,224,15.47,36.63,87.77
655
+ dpn131,865.2,1183.527,1024,224,16.09,32.97,79.25
656
+ resnetv2_50d_evos,854.82,1197.895,1024,288,7.15,19.7,25.59
657
+ xcit_small_12_p16_384_dist,853.54,1199.694,1024,384,14.14,36.51,26.25
658
+ sequencer2d_l,839.78,1219.347,1024,224,9.74,22.12,54.3
659
+ crossvit_base_240,839.43,914.892,768,240,21.22,36.33,105.03
660
+ hrnet_w44,821.37,1246.671,1024,224,14.94,26.92,67.06
661
+ eca_nfnet_l1,818.87,1250.489,1024,320,14.92,34.42,41.41
662
+ vit_base_r50_s16_224,817.55,1252.502,1024,224,21.67,35.31,114.69
663
+ maxvit_rmlp_small_rw_224,816.34,1254.368,1024,224,10.75,49.3,64.9
664
+ gcvit_small,815.24,1256.055,1024,224,8.57,41.61,51.09
665
+ regnety_080,811.28,1262.191,1024,288,13.22,29.69,39.18
666
+ densenet264,804.85,1272.268,1024,224,12.95,12.8,72.69
667
+ mvitv2_base,804.14,1273.395,1024,224,10.16,40.5,51.47
668
+ repvgg_b3g4,802.85,1275.443,1024,224,17.89,15.1,83.83
669
+ vit_base_patch16_plus_240,782.25,1309.022,1024,240,27.41,33.08,117.56
670
+ swinv2_tiny_window16_256,781.61,655.045,512,256,6.68,39.02,28.35
671
+ maxvit_small_tf_224,777.04,658.899,512,224,11.66,53.17,68.93
672
+ xcit_tiny_24_p8_224,771.1,1327.958,1024,224,9.21,45.39,12.11
673
+ xcit_tiny_24_p8_224_dist,770.21,1329.496,1024,224,9.21,45.39,12.11
674
+ coatnet_2_rw_224,763.52,670.562,512,224,15.09,49.22,73.87
675
+ vit_relpos_base_patch16_plus_240,763.4,1341.361,1024,240,27.3,34.33,117.38
676
+ efficientnet_b3_gn,763.0,671.023,512,320,2.14,28.83,11.73
677
+ coatnet_rmlp_2_rw_224,759.73,673.906,512,224,15.18,54.78,73.88
678
+ vit_small_patch16_384,753.82,1018.79,768,384,15.52,50.78,22.2
679
+ hrnet_w64,750.36,1364.663,1024,224,28.97,35.09,128.06
680
+ xception71,749.7,1024.396,768,299,18.09,69.92,42.34
681
+ resnet152d,742.37,1379.356,1024,320,24.08,47.67,60.21
682
+ swinv2_small_window8_256,741.95,1380.134,1024,256,11.58,40.14,49.73
683
+ mobilevitv2_175_384_in22ft1k,739.09,519.544,384,384,12.47,63.29,14.25
684
+ ecaresnet200d,736.17,1390.959,1024,256,20.0,43.15,64.69
685
+ seresnet200d,733.28,1396.444,1024,256,20.01,43.15,71.86
686
+ swin_s3_base_224,733.27,1396.459,1024,224,13.69,48.26,71.13
687
+ convit_base,731.09,1400.636,1024,224,17.52,31.77,86.54
688
+ resnest101e,726.65,1409.184,1024,256,13.38,28.66,48.28
689
+ deit3_small_patch16_384,726.49,1057.125,768,384,15.52,50.78,22.21
690
+ deit3_small_patch16_384_in21ft1k,726.32,1057.368,768,384,15.52,50.78,22.21
691
+ volo_d2_224,722.61,1417.079,1024,224,14.34,41.34,58.68
692
+ tnt_b_patch16_224,721.24,1419.762,1024,224,14.09,39.01,65.41
693
+ xcit_nano_12_p8_384_dist,720.41,1421.4,1024,384,6.34,46.08,3.05
694
+ swinv2_cr_base_224,719.23,1423.721,1024,224,15.86,59.66,87.88
695
+ poolformer_m48,719.07,1424.046,1024,224,11.59,29.17,73.47
696
+ coatnet_2_224,715.36,715.711,512,224,16.5,52.67,74.68
697
+ swinv2_cr_base_ns_224,712.96,1436.239,1024,224,15.86,59.66,87.88
698
+ dpn107,691.0,1481.897,1024,224,18.38,33.46,86.92
699
+ convnext_base,687.14,1490.219,1024,288,25.43,47.53,88.59
700
+ resnetv2_50x1_bitm,684.31,374.087,256,448,16.62,44.46,25.55
701
+ efficientnet_b3_g8_gn,664.63,770.341,512,320,3.2,28.83,14.25
702
+ regnety_064,657.71,1556.911,1024,288,10.56,27.11,30.58
703
+ regnetv_064,652.6,1569.096,1024,288,10.55,27.11,30.58
704
+ xcit_small_12_p8_224,651.3,1572.214,1024,224,18.69,47.21,26.21
705
+ xcit_small_12_p8_224_dist,651.08,1572.755,1024,224,18.69,47.21,26.21
706
+ resnetrs152,649.95,1575.501,1024,320,24.34,48.14,86.62
707
+ mobilevitv2_200_384_in22ft1k,647.42,395.4,256,384,16.24,72.34,18.45
708
+ seresnet152d,645.69,1585.88,1024,320,24.09,47.72,66.84
709
+ tresnet_l,644.38,1589.105,1024,224,10.88,11.9,55.99
710
+ tresnet_v2_l,642.3,1594.246,1024,224,8.81,16.34,46.17
711
+ nest_base,640.98,798.76,512,224,17.96,53.39,67.72
712
+ regnetx_120,640.37,1599.07,1024,224,12.13,21.37,46.11
713
+ seresnext101_32x8d,639.53,1601.159,1024,288,27.24,51.63,93.57
714
+ regnetz_e8,639.43,1601.423,1024,320,15.46,63.94,57.7
715
+ ese_vovnet99b_iabn,636.1,1609.798,1024,224,16.49,11.27,63.2
716
+ jx_nest_base,634.61,806.787,512,224,17.96,53.39,67.72
717
+ regnety_120,625.75,1636.422,1024,224,12.14,21.38,51.82
718
+ efficientnetv2_m,624.53,1639.618,1024,416,18.6,67.5,54.14
719
+ seresnext101d_32x8d,621.55,1647.466,1024,288,27.64,52.95,93.59
720
+ resnext101_64x4d,619.77,1652.21,1024,288,25.66,51.59,83.46
721
+ swsl_resnext101_32x16d,612.21,1672.624,1024,224,36.27,51.18,194.03
722
+ ig_resnext101_32x16d,611.98,1673.243,1024,224,36.27,51.18,194.03
723
+ maxvit_rmlp_small_rw_256,611.67,1255.571,768,256,14.15,66.09,64.9
724
+ ssl_resnext101_32x16d,611.31,1675.063,1024,224,36.27,51.18,194.03
725
+ regnety_320,605.31,1691.684,1024,224,32.34,30.26,145.05
726
+ gcvit_base,602.42,1699.782,1024,224,14.87,55.48,90.32
727
+ regnetz_c16_evos,596.93,857.706,512,320,3.86,25.88,13.49
728
+ maxxvit_rmlp_small_rw_256,590.18,1735.046,1024,256,14.67,58.38,66.01
729
+ legacy_senet154,585.86,1747.854,1024,224,20.77,38.69,115.09
730
+ senet154,585.53,1748.836,1024,224,20.77,38.69,115.09
731
+ seresnextaa101d_32x8d,585.08,1750.175,1024,288,28.51,56.44,93.59
732
+ gluon_senet154,584.86,1750.843,1024,224,20.77,38.69,115.09
733
+ convmixer_768_32,581.95,1759.577,1024,224,19.55,25.95,21.11
734
+ seresnet269d,574.5,1782.4,1024,256,26.59,53.6,113.67
735
+ nf_regnet_b5,565.36,905.602,512,456,11.7,61.95,49.74
736
+ mixer_l16_224,553.66,1849.49,1024,224,44.6,41.69,208.2
737
+ resnet200d,545.14,1878.401,1024,320,31.25,67.33,64.69
738
+ nfnet_f1,544.28,1881.353,1024,320,35.97,46.77,132.63
739
+ vit_large_patch32_384,543.45,1884.237,1024,384,45.31,43.86,306.63
740
+ efficientnetv2_rw_m,543.37,1884.512,1024,416,21.49,79.62,53.24
741
+ vit_medium_patch16_gap_384,539.24,949.475,512,384,26.08,67.54,39.03
742
+ efficientnet_b5,533.21,960.212,512,448,9.59,93.56,30.39
743
+ swinv2_base_window8_256,531.81,1925.495,1024,256,20.37,52.59,87.92
744
+ maxxvitv2_rmlp_base_rw_224,525.72,1947.791,1024,224,24.2,62.77,116.09
745
+ xcit_large_24_p16_224_dist,509.19,2011.039,1024,224,35.86,47.27,189.1
746
+ xcit_large_24_p16_224,509.15,2011.169,1024,224,35.86,47.27,189.1
747
+ swin_large_patch4_window7_224,504.4,1522.593,768,224,34.53,54.94,196.53
748
+ halonet_h1,503.39,508.543,256,256,3.0,51.17,8.1
749
+ volo_d3_224,502.58,2037.467,1024,224,20.78,60.09,86.33
750
+ swinv2_small_window16_256,488.97,1047.084,512,256,12.82,66.29,49.73
751
+ tresnet_xl,481.58,2126.301,1024,224,15.17,15.34,78.44
752
+ vit_small_patch8_224,479.11,1068.641,512,224,22.44,80.84,21.67
753
+ tf_efficientnet_b5,476.47,805.919,384,456,10.46,98.86,30.39
754
+ maxvit_rmlp_base_rw_224,472.06,2169.196,1024,224,23.15,92.64,116.14
755
+ resnetrs200,471.68,2170.964,1024,320,31.51,67.81,93.21
756
+ xcit_tiny_12_p8_384_dist,471.45,2172.002,1024,384,14.13,69.14,6.71
757
+ dm_nfnet_f1,461.24,2220.087,1024,320,35.97,46.77,132.63
758
+ tf_efficientnetv2_m,458.93,1673.426,768,480,24.76,89.84,54.14
759
+ xcit_small_24_p16_384_dist,457.16,2239.891,1024,384,26.72,68.58,47.67
760
+ coatnet_rmlp_3_rw_224,439.5,582.463,256,224,33.56,79.47,165.15
761
+ maxvit_base_tf_224,430.05,1190.542,512,224,24.04,95.01,119.47
762
+ swinv2_cr_large_224,423.86,1811.887,768,224,35.1,78.42,196.68
763
+ resnetv2_152x2_bit_teacher,423.36,2418.743,1024,224,46.95,45.11,236.34
764
+ swinv2_cr_tiny_384,423.1,907.565,384,384,15.34,161.01,28.33
765
+ coatnet_3_rw_224,421.95,606.701,256,224,33.44,73.83,181.81
766
+ resnetv2_101x1_bitm,419.35,610.453,256,448,31.65,64.93,44.54
767
+ coatnet_3_224,405.07,631.982,256,224,36.56,79.01,166.97
768
+ convnextv2_base,403.59,1268.593,512,288,25.43,47.53,88.72
769
+ eca_nfnet_l2,401.73,2548.946,1024,384,30.05,68.28,56.72
770
+ regnetz_d8_evos,394.39,1947.294,768,320,7.03,38.92,23.46
771
+ convmixer_1024_20_ks9_p14,393.5,2602.254,1024,224,5.55,5.51,24.38
772
+ eva_large_patch14_196,392.3,2610.234,1024,196,61.57,63.52,304.14
773
+ crossvit_15_dagger_408,390.72,655.182,256,408,21.45,95.05,28.5
774
+ vit_large_patch16_224,390.66,2621.182,1024,224,61.6,63.52,304.33
775
+ vit_base_patch16_18x2_224,384.38,2663.987,1024,224,52.51,71.38,256.73
776
+ deit3_large_patch16_224_in21ft1k,377.58,2711.976,1024,224,61.6,63.52,304.37
777
+ deit3_large_patch16_224,377.53,2712.348,1024,224,61.6,63.52,304.37
778
+ convnext_large,373.02,2058.836,768,288,56.87,71.29,197.77
779
+ beit_large_patch16_224,360.62,2839.572,1024,224,61.6,63.52,304.43
780
+ beitv2_large_patch16_224,360.58,2839.86,1024,224,61.6,63.52,304.43
781
+ swinv2_base_window12to16_192to256_22kft1k,360.56,1065.006,384,256,22.02,84.71,87.92
782
+ swinv2_base_window16_256,360.23,1065.959,384,256,22.02,84.71,87.92
783
+ regnety_160,353.5,2172.566,768,288,26.37,38.07,83.59
784
+ nasnetalarge,345.63,1111.004,384,331,23.89,90.56,88.75
785
+ maxvit_tiny_tf_384,344.01,744.157,256,384,17.53,123.42,30.98
786
+ xcit_small_24_p8_224,342.37,2990.915,1024,224,35.81,90.78,47.63
787
+ xcit_small_24_p8_224_dist,342.26,2991.817,1024,224,35.81,90.78,47.63
788
+ flexivit_large,335.35,3053.52,1024,240,70.99,75.39,304.36
789
+ maxxvitv2_rmlp_large_rw_224,332.33,3081.271,1024,224,44.14,87.15,215.42
790
+ vit_large_r50_s32_384,329.8,3104.921,1024,384,57.43,76.52,329.09
791
+ pnasnet5large,328.89,1167.534,384,331,25.04,92.89,86.06
792
+ tresnet_m_448,325.8,3143.01,1024,448,22.94,29.21,31.39
793
+ volo_d1_384,323.04,1584.906,512,384,22.75,108.55,26.78
794
+ volo_d4_224,318.96,3210.439,1024,224,44.34,80.22,192.96
795
+ xcit_medium_24_p16_384_dist,312.74,3274.268,1024,384,47.39,91.64,84.4
796
+ nfnet_f2,310.6,3296.869,1024,352,63.22,79.06,193.78
797
+ vit_base_patch16_384,307.09,1250.42,384,384,55.54,101.56,86.86
798
+ deit_base_patch16_384,306.8,1251.599,384,384,55.54,101.56,86.86
799
+ vit_base_patch16_clip_384,306.29,1253.685,384,384,55.54,101.56,86.86
800
+ deit_base_distilled_patch16_384,305.48,1257.017,384,384,55.65,101.82,87.63
801
+ ecaresnet269d,305.06,3356.684,1024,352,50.25,101.25,102.09
802
+ maxvit_large_tf_224,301.43,1273.908,384,224,43.68,127.35,211.79
803
+ deit3_base_patch16_384_in21ft1k,298.01,1288.526,384,384,55.54,101.56,86.88
804
+ deit3_base_patch16_384,297.88,1289.093,384,384,55.54,101.56,86.88
805
+ resnetrs270,296.97,3448.186,1024,352,51.13,105.48,129.86
806
+ regnetx_320,289.44,2653.413,768,224,31.81,36.3,107.81
807
+ efficientnet_b6,287.31,890.997,256,528,19.4,167.39,43.04
808
+ vit_large_patch14_224,286.23,3577.501,1024,224,81.08,88.79,304.2
809
+ vit_large_patch14_clip_224,285.99,3580.5,1024,224,81.08,88.79,304.2
810
+ crossvit_18_dagger_408,285.18,673.248,192,408,32.47,124.87,44.61
811
+ cait_xxs24_384,281.48,3637.936,1024,384,9.63,122.66,12.03
812
+ ig_resnext101_32x32d,275.12,1860.956,512,224,87.29,91.12,468.53
813
+ tf_efficientnet_b6,274.07,700.545,192,528,19.4,167.39,43.04
814
+ dm_nfnet_f2,264.79,2900.408,768,352,63.22,79.06,193.78
815
+ beit_base_patch16_384,261.27,1469.733,384,384,55.54,101.56,86.74
816
+ efficientnetv2_l,260.33,1966.694,512,480,56.4,157.99,118.52
817
+ swinv2_cr_small_384,259.75,985.56,256,384,29.7,298.03,49.7
818
+ tf_efficientnetv2_l,257.29,1989.923,512,480,56.4,157.99,118.52
819
+ resnest200e,254.36,1006.453,256,320,35.69,82.78,70.2
820
+ mvitv2_large,249.99,2048.061,512,224,43.87,112.02,217.99
821
+ xcit_tiny_24_p8_384_dist,248.25,4124.916,1024,384,27.05,132.95,12.11
822
+ convnext_xlarge,242.63,2110.182,512,288,100.8,95.05,350.2
823
+ resmlp_big_24_224_in22ft1k,241.9,4233.056,1024,224,100.23,87.31,129.14
824
+ resmlp_big_24_224,241.74,4235.988,1024,224,100.23,87.31,129.14
825
+ resmlp_big_24_distilled_224,241.44,4241.249,1024,224,100.23,87.31,129.14
826
+ convnextv2_large,239.52,1068.782,256,288,56.87,71.29,197.96
827
+ coatnet_4_224,238.62,1072.827,256,224,62.48,129.26,275.43
828
+ swin_base_patch4_window12_384,236.12,813.144,192,384,47.19,134.78,87.9
829
+ xcit_medium_24_p8_224_dist,233.5,3289.007,768,224,63.53,121.23,84.32
830
+ xcit_medium_24_p8_224,233.5,3289.104,768,224,63.53,121.23,84.32
831
+ eca_nfnet_l3,229.87,2227.284,512,448,52.55,118.4,72.04
832
+ vit_base_r50_s16_384,226.32,1696.687,384,384,67.43,135.03,98.95
833
+ maxvit_small_tf_384,224.01,857.105,192,384,35.87,183.65,69.02
834
+ xcit_small_12_p8_384_dist,221.54,1733.28,384,384,54.92,138.29,26.21
835
+ swinv2_large_window12to16_192to256_22kft1k,220.1,1163.101,256,256,47.81,121.53,196.74
836
+ volo_d5_224,210.88,4855.76,1024,224,72.4,118.11,295.46
837
+ vit_base_patch8_224,199.67,1282.079,256,224,78.22,161.69,86.58
838
+ cait_xs24_384,197.64,3885.811,768,384,19.28,183.98,26.67
839
+ resnetrs350,196.19,5219.377,1024,384,77.59,154.74,163.96
840
+ cait_xxs36_384,188.27,5439.03,1024,384,14.35,183.7,17.37
841
+ swinv2_cr_base_384,185.68,1378.725,256,384,50.57,333.68,87.88
842
+ coatnet_rmlp_2_rw_384,184.84,1038.746,192,384,47.69,209.43,73.88
843
+ swinv2_cr_huge_224,184.09,2085.934,384,224,115.97,121.08,657.83
844
+ convnext_xxlarge,183.68,2787.486,512,224,151.66,95.29,846.47
845
+ volo_d2_384,180.56,2126.753,384,384,46.17,184.51,58.87
846
+ xcit_large_24_p16_384_dist,176.39,5805.281,1024,384,105.35,137.17,189.1
847
+ regnety_640,174.81,4393.396,768,224,64.16,42.5,281.38
848
+ maxvit_xlarge_tf_224,171.63,1491.6,256,224,97.49,191.02,474.95
849
+ nfnet_f3,170.11,4514.791,768,416,115.58,141.78,254.92
850
+ densenet264d_iabn,167.13,6126.84,1024,224,13.47,14.0,72.74
851
+ efficientnet_b7,166.38,1153.975,192,600,38.33,289.94,66.35
852
+ maxvit_tiny_tf_512,163.72,781.809,128,512,33.49,257.59,31.05
853
+ efficientnetv2_xl,162.7,3146.865,512,512,93.85,247.32,208.12
854
+ tf_efficientnetv2_xl,161.32,3173.821,512,512,93.85,247.32,208.12
855
+ tf_efficientnet_b7,160.43,1196.798,192,600,38.33,289.94,66.35
856
+ resnetv2_152x2_bit_teacher_384,159.54,1604.579,256,384,136.16,132.56,236.34
857
+ tresnet_l_448,154.66,6620.743,1024,448,43.5,47.56,55.99
858
+ vit_huge_patch14_224,154.27,6637.58,1024,224,167.43,139.43,658.75
859
+ vit_huge_patch14_clip_224,154.17,6642.017,1024,224,167.4,139.41,632.05
860
+ maxxvitv2_rmlp_base_rw_384,153.9,1663.429,256,384,72.98,213.74,116.09
861
+ cait_s24_384,152.41,3359.254,512,384,32.17,245.31,47.06
862
+ deit3_huge_patch14_224_in21ft1k,150.05,6824.53,1024,224,167.4,139.41,632.13
863
+ deit3_huge_patch14_224,149.59,6845.356,1024,224,167.4,139.41,632.13
864
+ dm_nfnet_f3,145.48,3519.403,512,416,115.58,141.78,254.92
865
+ resnetrs420,142.37,5394.528,768,416,108.45,213.79,191.89
866
+ swin_large_patch4_window12_384,138.37,925.016,128,384,104.08,202.16,196.74
867
+ resnetv2_50x3_bitm,133.5,1438.189,192,448,145.7,133.37,217.32
868
+ maxvit_rmlp_base_rw_384,131.6,1945.285,256,384,70.97,318.95,116.14
869
+ xcit_large_24_p8_224_dist,131.32,3898.808,512,224,141.23,181.56,188.93
870
+ xcit_large_24_p8_224,131.27,3900.391,512,224,141.23,181.56,188.93
871
+ coatnet_5_224,130.48,1471.508,192,224,145.49,194.24,687.47
872
+ maxvit_base_tf_384,122.48,1567.652,192,384,73.8,332.9,119.65
873
+ resnest269e,119.17,2148.198,256,416,77.69,171.98,110.93
874
+ resnetv2_152x2_bitm,117.29,2182.534,256,448,184.99,180.43,236.34
875
+ xcit_small_24_p8_384_dist,116.59,3293.649,384,384,105.24,265.91,47.63
876
+ tresnet_xl_448,115.63,8855.938,1024,448,60.65,61.31,78.44
877
+ swinv2_cr_large_384,113.43,1128.479,128,384,108.95,404.96,196.68
878
+ maxvit_small_tf_512,106.82,1198.298,128,512,67.26,383.77,69.13
879
+ efficientnet_b8,106.21,1205.18,128,672,63.48,442.89,87.41
880
+ tf_efficientnet_b8,102.86,1244.358,128,672,63.48,442.89,87.41
881
+ eva_large_patch14_336,102.71,2492.371,256,336,191.1,270.24,304.53
882
+ vit_large_patch14_clip_336,102.52,2496.99,256,336,191.11,270.24,304.53
883
+ vit_large_patch16_384,102.5,2497.593,256,384,191.21,270.24,304.72
884
+ cait_s36_384,101.88,5025.316,512,384,47.99,367.4,68.37
885
+ eva_giant_patch14_224,101.84,10055.112,1024,224,267.18,192.64,1012.56
886
+ vit_giant_patch14_224,100.71,7625.752,768,224,267.18,192.64,1012.61
887
+ vit_giant_patch14_clip_224,100.43,7646.856,768,224,267.18,192.64,1012.65
888
+ deit3_large_patch16_384_in21ft1k,99.81,2564.809,256,384,191.21,270.24,304.76
889
+ deit3_large_patch16_384,99.8,2564.994,256,384,191.21,270.24,304.76
890
+ swinv2_base_window12to24_192to384_22kft1k,96.12,665.832,64,384,55.25,280.36,87.92
891
+ nfnet_f4,89.33,5731.574,512,512,216.26,262.26,316.07
892
+ beit_large_patch16_384,88.56,2890.58,256,384,191.21,270.24,305.0
893
+ maxvit_large_tf_384,86.44,1480.84,128,384,132.55,445.84,212.03
894
+ regnety_1280,82.49,4654.845,384,224,127.66,71.58,644.81
895
+ xcit_medium_24_p8_384_dist,79.96,3201.705,256,384,186.67,354.73,84.32
896
+ resnetv2_101x3_bitm,79.41,2417.67,192,448,280.33,194.78,387.93
897
+ volo_d3_448,77.64,2473.021,192,448,96.33,446.83,86.63
898
+ dm_nfnet_f4,77.54,4952.036,384,512,216.26,262.26,316.07
899
+ nfnet_f5,67.46,5691.915,384,544,290.97,349.71,377.21
900
+ tf_efficientnet_l2,63.66,1507.989,96,475,172.11,609.89,480.31
901
+ swinv2_large_window12to24_192to384_22kft1k,60.94,787.651,48,384,116.15,407.83,196.74
902
+ vit_gigantic_patch14_224,60.18,8507.121,512,224,483.95,275.37,1844.44
903
+ vit_gigantic_patch14_clip_224,60.11,8517.85,512,224,483.96,275.37,1844.91
904
+ volo_d4_448,57.87,3317.675,192,448,197.13,527.35,193.41
905
+ maxvit_base_tf_512,57.86,2212.256,128,512,138.02,703.99,119.88
906
+ dm_nfnet_f5,57.78,6645.368,384,544,290.97,349.71,377.21
907
+ vit_huge_patch14_clip_336,57.4,4460.085,256,336,390.97,407.54,632.46
908
+ ig_resnext101_32x48d,56.43,6804.709,384,224,153.57,131.06,828.41
909
+ convnextv2_huge,56.31,1704.92,96,384,337.96,232.35,660.29
910
+ convmixer_1536_20,55.47,18461.426,1024,224,48.68,33.03,51.63
911
+ swinv2_cr_giant_224,52.39,3665.046,192,224,483.85,309.15,2598.76
912
+ nfnet_f6,51.81,7411.574,384,576,378.69,452.2,438.36
913
+ maxvit_xlarge_tf_384,50.76,1891.335,96,384,292.78,668.76,475.32
914
+ swinv2_cr_huge_384,49.01,1305.73,64,384,352.04,583.18,657.94
915
+ regnety_2560,47.69,8051.463,384,224,257.07,87.48,826.14
916
+ xcit_large_24_p8_384_dist,44.91,4275.004,192,384,415.0,531.82,188.93
917
+ dm_nfnet_f6,44.62,5737.462,256,576,378.69,452.2,438.36
918
+ nfnet_f7,41.13,6224.782,256,608,480.39,570.85,499.5
919
+ maxvit_large_tf_512,41.04,1559.597,64,512,244.75,942.15,212.33
920
+ eva_giant_patch14_336,39.89,6418.269,256,336,620.64,550.67,1013.01
921
+ volo_d5_448,39.88,3209.812,128,448,315.06,737.92,295.91
922
+ beit_large_patch16_512,35.33,2716.953,96,512,362.24,656.39,305.67
923
+ cait_m36_384,32.89,7783.487,256,384,173.11,734.81,271.22
924
+ resnetv2_152x4_bitm,30.46,3151.929,96,480,844.84,414.26,936.53
925
+ volo_d5_512,27.89,4590.0,128,512,425.09,1105.37,296.09
926
+ maxvit_xlarge_tf_512,24.38,1968.424,48,512,534.14,1413.22,475.77
927
+ efficientnet_l2,23.13,1383.428,32,800,479.12,1707.39,480.31
928
+ swinv2_cr_giant_384,15.06,2124.735,32,384,1450.71,1394.86,2598.76
929
+ cait_m48_448,13.86,9235.876,128,448,329.41,1708.23,356.46
930
+ eva_giant_patch14_560,10.52,3043.009,32,560,1906.76,2577.17,1014.45
pytorch-image-models/results/benchmark-infer-amp-nhwc-pt210-cu121-rtx3090.csv ADDED
@@ -0,0 +1,1205 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ model,infer_img_size,infer_batch_size,infer_samples_per_sec,infer_step_time,infer_gmacs,infer_macts,param_count
2
+ tinynet_e,106,1024.0,75290.96,13.591,0.03,0.69,2.04
3
+ mobilenetv3_small_050,224,1024.0,56785.93,18.023,0.03,0.92,1.59
4
+ efficientvit_m0,224,1024.0,50656.23,20.205,0.08,0.91,2.35
5
+ lcnet_035,224,1024.0,48853.22,20.951,0.03,1.04,1.64
6
+ lcnet_050,224,1024.0,42147.98,24.285,0.05,1.26,1.88
7
+ mobilenetv3_small_075,224,1024.0,42002.46,24.369,0.05,1.3,2.04
8
+ mobilenetv3_small_100,224,1024.0,38516.23,26.573,0.06,1.42,2.54
9
+ tinynet_d,152,1024.0,37989.71,26.944,0.05,1.42,2.34
10
+ efficientvit_m1,224,1024.0,37486.44,27.306,0.17,1.33,2.98
11
+ tf_mobilenetv3_small_minimal_100,224,1024.0,33948.13,30.153,0.06,1.41,2.04
12
+ efficientvit_m2,224,1024.0,33551.67,30.51,0.2,1.47,4.19
13
+ tf_mobilenetv3_small_075,224,1024.0,33262.15,30.775,0.05,1.3,2.04
14
+ tf_mobilenetv3_small_100,224,1024.0,31002.71,33.019,0.06,1.42,2.54
15
+ lcnet_075,224,1024.0,30664.19,33.384,0.1,1.99,2.36
16
+ efficientvit_m3,224,1024.0,29423.78,34.792,0.27,1.62,6.9
17
+ efficientvit_m4,224,1024.0,27882.1,36.716,0.3,1.7,8.8
18
+ mnasnet_small,224,1024.0,25015.02,40.925,0.07,2.16,2.03
19
+ regnetx_002,224,1024.0,24564.71,41.67,0.2,2.16,2.68
20
+ lcnet_100,224,1024.0,24268.72,42.183,0.16,2.52,2.95
21
+ levit_128s,224,1024.0,22705.11,45.089,0.31,1.88,7.78
22
+ regnety_002,224,1024.0,22248.91,46.012,0.2,2.17,3.16
23
+ resnet10t,176,1024.0,22236.3,46.04,0.7,1.51,5.44
24
+ mobilenetv2_035,224,1024.0,22055.42,46.418,0.07,2.86,1.68
25
+ levit_conv_128s,224,1024.0,21863.15,46.826,0.31,1.88,7.78
26
+ ghostnet_050,224,1024.0,20782.95,49.261,0.05,1.77,2.59
27
+ mnasnet_050,224,1024.0,20672.17,49.525,0.11,3.07,2.22
28
+ repghostnet_050,224,1024.0,20617.05,49.657,0.05,2.02,2.31
29
+ efficientvit_m5,224,1024.0,19010.14,53.856,0.53,2.41,12.47
30
+ tinynet_c,184,1024.0,18737.07,54.641,0.11,2.87,2.46
31
+ efficientvit_b0,224,1024.0,18023.56,56.804,0.1,2.87,3.41
32
+ semnasnet_050,224,1024.0,17573.38,58.26,0.11,3.44,2.08
33
+ mobilenetv2_050,224,1024.0,17491.5,58.532,0.1,3.64,1.97
34
+ regnetx_004,224,1024.0,17164.74,59.647,0.4,3.14,5.16
35
+ repghostnet_058,224,1024.0,16947.81,60.41,0.07,2.59,2.55
36
+ regnetx_004_tv,224,1024.0,16485.73,62.101,0.42,3.17,5.5
37
+ vit_small_patch32_224,224,1024.0,16428.86,62.319,1.12,2.09,22.88
38
+ cs3darknet_focus_s,256,1024.0,16333.25,62.684,0.69,2.7,3.27
39
+ lcnet_150,224,1024.0,15841.02,64.632,0.34,3.79,4.5
40
+ gernet_s,224,1024.0,15617.62,65.556,0.75,2.65,8.17
41
+ cs3darknet_s,256,1024.0,15597.89,65.64,0.72,2.97,3.28
42
+ levit_128,224,1024.0,15372.6,66.601,0.41,2.71,9.21
43
+ vit_tiny_r_s16_p8_224,224,1024.0,15191.19,67.397,0.43,1.85,6.34
44
+ levit_conv_128,224,1024.0,14904.31,68.695,0.41,2.71,9.21
45
+ mobilenetv3_large_075,224,1024.0,14843.63,68.964,0.16,4.0,3.99
46
+ pit_ti_distilled_224,224,1024.0,14746.15,69.432,0.51,2.77,5.1
47
+ pit_ti_224,224,1024.0,14700.08,69.649,0.5,2.75,4.85
48
+ mixer_s32_224,224,1024.0,14362.24,71.288,1.0,2.28,19.1
49
+ resnet10t,224,1024.0,14254.88,71.825,1.1,2.43,5.44
50
+ repghostnet_080,224,1024.0,13967.84,73.293,0.1,3.22,3.28
51
+ tf_efficientnetv2_b0,192,1024.0,13629.52,75.121,0.54,3.51,7.14
52
+ mobilenetv3_rw,224,1024.0,13582.75,75.38,0.23,4.41,5.48
53
+ levit_192,224,1024.0,13511.34,75.778,0.66,3.2,10.95
54
+ mnasnet_075,224,1024.0,13417.36,76.309,0.23,4.77,3.17
55
+ mobilenetv3_large_100,224,1024.0,13322.79,76.851,0.23,4.41,5.48
56
+ hardcorenas_a,224,1024.0,13314.34,76.899,0.23,4.38,5.26
57
+ levit_conv_192,224,1024.0,12952.02,79.05,0.66,3.2,10.95
58
+ regnety_004,224,1024.0,12651.55,80.929,0.41,3.89,4.34
59
+ tf_mobilenetv3_large_075,224,1024.0,12636.69,81.023,0.16,4.0,3.99
60
+ nf_regnet_b0,192,1024.0,12264.41,83.481,0.37,3.15,8.76
61
+ tinynet_b,188,1024.0,12262.56,83.495,0.21,4.44,3.73
62
+ tf_mobilenetv3_large_minimal_100,224,1024.0,12182.74,84.043,0.22,4.4,3.92
63
+ hardcorenas_b,224,1024.0,12118.5,84.488,0.26,5.09,5.18
64
+ hardcorenas_c,224,1024.0,12088.28,84.699,0.28,5.01,5.52
65
+ resnet14t,176,1024.0,11843.82,86.448,1.07,3.61,10.08
66
+ mnasnet_100,224,1024.0,11686.43,87.612,0.33,5.46,4.38
67
+ regnety_006,224,1024.0,11675.48,87.69,0.61,4.33,6.06
68
+ ese_vovnet19b_slim_dw,224,1024.0,11663.91,87.781,0.4,5.28,1.9
69
+ repghostnet_100,224,1024.0,11508.79,88.956,0.15,3.98,4.07
70
+ tf_mobilenetv3_large_100,224,1024.0,11443.62,89.472,0.23,4.41,5.48
71
+ vit_tiny_patch16_224,224,1024.0,11342.82,90.267,1.08,4.12,5.72
72
+ hardcorenas_d,224,1024.0,11329.99,90.369,0.3,4.93,7.5
73
+ deit_tiny_distilled_patch16_224,224,1024.0,11311.9,90.514,1.09,4.15,5.91
74
+ deit_tiny_patch16_224,224,1024.0,11286.31,90.719,1.08,4.12,5.72
75
+ semnasnet_075,224,1024.0,11132.28,91.974,0.23,5.54,2.91
76
+ resnet18,224,1024.0,11101.69,92.228,1.82,2.48,11.69
77
+ ghostnet_100,224,1024.0,11039.87,92.744,0.15,3.55,5.18
78
+ mobilenetv2_075,224,1024.0,10984.87,93.208,0.22,5.86,2.64
79
+ spnasnet_100,224,1024.0,10557.11,96.986,0.35,6.03,4.42
80
+ tf_efficientnetv2_b1,192,1024.0,10473.04,97.765,0.76,4.59,8.14
81
+ regnetx_008,224,1024.0,10422.45,98.23,0.81,5.15,7.26
82
+ seresnet18,224,1024.0,10416.31,98.297,1.82,2.49,11.78
83
+ tf_efficientnetv2_b0,224,1024.0,10174.51,100.633,0.73,4.77,7.14
84
+ legacy_seresnet18,224,1024.0,10133.12,101.044,1.82,2.49,11.78
85
+ repghostnet_111,224,1024.0,10094.28,101.428,0.18,4.38,4.54
86
+ hardcorenas_f,224,1024.0,10012.95,102.257,0.35,5.57,8.2
87
+ tinynet_a,192,1024.0,9946.05,102.945,0.35,5.41,6.19
88
+ dla46_c,224,1024.0,9943.77,102.967,0.58,4.5,1.3
89
+ hardcorenas_e,224,1024.0,9851.75,103.931,0.35,5.65,8.07
90
+ semnasnet_100,224,1024.0,9823.16,104.233,0.32,6.23,3.89
91
+ levit_256,224,1024.0,9811.76,104.354,1.13,4.23,18.89
92
+ repvgg_a0,224,1024.0,9709.7,105.449,1.52,3.59,9.11
93
+ mobilenetv2_100,224,1024.0,9654.78,106.051,0.31,6.68,3.5
94
+ regnety_008,224,1024.0,9643.2,106.178,0.81,5.25,6.26
95
+ fbnetc_100,224,1024.0,9552.51,107.186,0.4,6.51,5.57
96
+ efficientnet_lite0,224,1024.0,9466.4,108.161,0.4,6.74,4.65
97
+ levit_conv_256,224,1024.0,9461.49,108.218,1.13,4.23,18.89
98
+ resnet18d,224,1024.0,9458.4,108.253,2.06,3.29,11.71
99
+ pit_xs_224,224,1024.0,9332.33,109.714,1.1,4.12,10.62
100
+ ese_vovnet19b_slim,224,1024.0,9277.16,110.369,1.69,3.52,3.17
101
+ regnety_008_tv,224,1024.0,9213.78,111.127,0.84,5.42,6.43
102
+ pit_xs_distilled_224,224,1024.0,9203.86,111.241,1.11,4.15,11.0
103
+ convnext_atto,224,1024.0,9104.06,112.467,0.55,3.81,3.7
104
+ repghostnet_130,224,1024.0,8873.05,115.395,0.25,5.24,5.48
105
+ ghostnet_130,224,1024.0,8870.81,115.424,0.24,4.6,7.36
106
+ convnext_atto_ols,224,1024.0,8829.55,115.964,0.58,4.11,3.7
107
+ regnetz_005,224,1024.0,8796.44,116.392,0.52,5.86,7.12
108
+ xcit_nano_12_p16_224,224,1024.0,8604.96,118.991,0.56,4.17,3.05
109
+ levit_256d,224,1024.0,8322.97,123.022,1.4,4.93,26.21
110
+ regnetx_006,224,1024.0,8320.1,123.064,0.61,3.98,6.2
111
+ tf_efficientnet_lite0,224,1024.0,8163.21,125.431,0.4,6.74,4.65
112
+ fbnetv3_b,224,1024.0,8152.31,125.598,0.42,6.97,8.6
113
+ efficientnet_b0,224,1024.0,8085.72,126.633,0.4,6.75,5.29
114
+ levit_conv_256d,224,1024.0,8055.13,127.113,1.4,4.93,26.21
115
+ edgenext_xx_small,256,1024.0,8014.51,127.757,0.26,3.33,1.33
116
+ mnasnet_140,224,1024.0,7984.3,128.241,0.6,7.71,7.12
117
+ convnext_femto,224,1024.0,7977.79,128.346,0.79,4.57,5.22
118
+ tf_efficientnetv2_b2,208,1024.0,7861.13,130.251,1.06,6.0,10.1
119
+ mobilevit_xxs,256,1024.0,7827.79,130.801,0.34,5.74,1.27
120
+ repghostnet_150,224,1024.0,7766.69,131.835,0.32,6.0,6.58
121
+ convnext_femto_ols,224,1024.0,7757.32,131.994,0.82,4.87,5.23
122
+ rexnetr_100,224,1024.0,7545.9,135.692,0.43,7.72,4.88
123
+ repvit_m1,224,1024.0,7543.44,135.728,0.83,7.45,5.49
124
+ resnet14t,224,1024.0,7466.4,137.137,1.69,5.8,10.08
125
+ mobilenetv2_110d,224,1024.0,7331.32,139.66,0.45,8.71,4.52
126
+ hrnet_w18_small,224,1024.0,7298.3,140.296,1.61,5.72,13.19
127
+ cs3darknet_focus_m,256,1024.0,7202.61,142.16,1.98,4.89,9.3
128
+ repvit_m0_9,224,1024.0,7165.5,142.888,0.83,7.45,5.49
129
+ crossvit_tiny_240,240,1024.0,7123.68,143.735,1.3,5.67,7.01
130
+ efficientvit_b1,224,1024.0,7109.59,144.02,0.53,7.25,9.1
131
+ tf_efficientnet_b0,224,1024.0,7104.21,144.129,0.4,6.75,5.29
132
+ crossvit_9_240,240,1024.0,7025.32,145.747,1.55,5.59,8.55
133
+ nf_regnet_b0,256,1024.0,6992.1,146.441,0.64,5.58,8.76
134
+ repvgg_a1,224,1024.0,6942.64,147.483,2.64,4.74,14.09
135
+ mobilevitv2_050,256,1024.0,6935.55,147.628,0.48,8.04,1.37
136
+ cs3darknet_m,256,1024.0,6929.59,147.762,2.08,5.28,9.31
137
+ efficientnet_b1_pruned,240,1024.0,6922.7,147.909,0.4,6.21,6.33
138
+ gernet_m,224,1024.0,6840.64,149.682,3.02,5.24,21.14
139
+ fbnetv3_d,224,1024.0,6784.35,150.925,0.52,8.5,10.31
140
+ semnasnet_140,224,1024.0,6771.35,151.215,0.6,8.87,6.11
141
+ crossvit_9_dagger_240,240,1024.0,6704.51,152.722,1.68,6.03,8.78
142
+ tf_efficientnetv2_b1,240,1024.0,6611.54,154.87,1.21,7.34,8.14
143
+ mobilenetv2_140,224,1024.0,6588.7,155.407,0.6,9.57,6.11
144
+ resnet34,224,1024.0,6504.25,157.425,3.67,3.74,21.8
145
+ ese_vovnet19b_dw,224,1024.0,6406.95,159.816,1.34,8.25,6.54
146
+ selecsls42,224,1024.0,6366.41,160.834,2.94,4.62,30.35
147
+ resnet18,288,1024.0,6354.7,161.13,3.01,4.11,11.69
148
+ selecsls42b,224,1024.0,6344.62,161.386,2.98,4.62,32.46
149
+ efficientnet_b0_g16_evos,224,1024.0,6342.4,161.442,1.01,7.42,8.11
150
+ edgenext_xx_small,288,1024.0,6334.97,161.631,0.33,4.21,1.33
151
+ efficientnet_lite1,240,1024.0,6268.15,163.355,0.62,10.14,5.42
152
+ pvt_v2_b0,224,1024.0,6254.52,163.711,0.53,7.01,3.67
153
+ visformer_tiny,224,1024.0,6218.29,164.665,1.27,5.72,10.32
154
+ convnext_pico,224,1024.0,6208.02,164.938,1.37,6.1,9.05
155
+ fbnetv3_b,256,1024.0,6192.25,165.357,0.55,9.1,8.6
156
+ efficientnet_es_pruned,224,1024.0,6175.39,165.809,1.81,8.73,5.44
157
+ efficientnet_es,224,1024.0,6170.12,165.95,1.81,8.73,5.44
158
+ rexnet_100,224,1024.0,6170.05,165.953,0.41,7.44,4.8
159
+ ghostnetv2_100,224,1024.0,6155.62,166.342,0.18,4.55,6.16
160
+ seresnet34,224,1024.0,6069.09,168.714,3.67,3.74,21.96
161
+ convnext_pico_ols,224,1024.0,6043.01,169.442,1.43,6.5,9.06
162
+ seresnet18,288,1024.0,5998.94,170.686,3.01,4.11,11.78
163
+ dla46x_c,224,1024.0,5992.19,170.877,0.54,5.66,1.07
164
+ dla34,224,1024.0,5954.72,171.952,3.07,5.02,15.74
165
+ repghostnet_200,224,1024.0,5934.75,172.524,0.54,7.96,9.8
166
+ resnet26,224,1024.0,5916.33,173.07,2.36,7.35,16.0
167
+ levit_384,224,1024.0,5897.4,173.625,2.36,6.26,39.13
168
+ resnet34d,224,1024.0,5884.13,174.017,3.91,4.54,21.82
169
+ cs3darknet_focus_m,288,1024.0,5878.89,174.173,2.51,6.19,9.3
170
+ legacy_seresnet34,224,1024.0,5873.4,174.335,3.67,3.74,21.96
171
+ repvit_m2,224,1024.0,5866.53,174.53,1.36,9.43,8.8
172
+ vit_base_patch32_224,224,1024.0,5866.04,174.553,4.37,4.19,88.22
173
+ vit_base_patch32_clip_224,224,1024.0,5864.79,174.59,4.37,4.19,88.22
174
+ repvit_m1_0,224,1024.0,5862.26,174.66,1.13,8.69,7.3
175
+ tf_efficientnet_es,224,1024.0,5831.76,175.58,1.81,8.73,5.44
176
+ rexnetr_130,224,1024.0,5827.09,175.72,0.68,9.81,7.61
177
+ resnetrs50,160,1024.0,5819.33,175.954,2.29,6.2,35.69
178
+ dla60x_c,224,1024.0,5709.85,179.326,0.59,6.01,1.32
179
+ vit_small_patch32_384,384,1024.0,5700.23,179.631,3.26,6.07,22.92
180
+ levit_conv_384,224,1024.0,5694.64,179.807,2.36,6.26,39.13
181
+ tiny_vit_5m_224,224,1024.0,5681.84,180.212,1.18,9.32,12.08
182
+ efficientnet_b1,224,1024.0,5671.54,180.54,0.59,9.36,7.79
183
+ cs3darknet_m,288,1024.0,5670.5,180.573,2.63,6.69,9.31
184
+ resnetblur18,224,1024.0,5631.98,181.808,2.34,3.39,11.69
185
+ tf_efficientnet_lite1,240,1024.0,5588.09,183.236,0.62,10.14,5.42
186
+ repvit_m1_1,224,1024.0,5584.25,183.355,1.36,9.43,8.8
187
+ mixnet_s,224,1024.0,5566.85,183.931,0.25,6.25,4.13
188
+ convnext_atto,288,1024.0,5556.64,184.274,0.91,6.3,3.7
189
+ darknet17,256,1024.0,5525.94,185.298,3.26,7.18,14.3
190
+ pit_s_224,224,1024.0,5520.06,185.491,2.42,6.18,23.46
191
+ resnet18d,288,1024.0,5497.35,186.262,3.41,5.43,11.71
192
+ selecsls60,224,1024.0,5496.69,186.283,3.59,5.52,30.67
193
+ pit_s_distilled_224,224,1024.0,5494.69,186.349,2.45,6.22,24.04
194
+ xcit_tiny_12_p16_224,224,1024.0,5472.11,187.12,1.24,6.29,6.72
195
+ selecsls60b,224,1024.0,5466.97,187.296,3.63,5.52,32.77
196
+ skresnet18,224,1024.0,5432.07,188.499,1.82,3.24,11.96
197
+ convnext_atto_ols,288,1024.0,5378.78,190.367,0.96,6.8,3.7
198
+ resmlp_12_224,224,1024.0,5371.14,190.637,3.01,5.5,15.35
199
+ regnetz_005,288,1024.0,5353.96,191.249,0.86,9.68,7.12
200
+ mobilenetv2_120d,224,1024.0,5347.39,191.484,0.69,11.97,5.83
201
+ convnextv2_atto,224,1024.0,5293.77,193.425,0.55,3.81,3.71
202
+ repvgg_b0,224,1024.0,5265.8,194.451,3.41,6.15,15.82
203
+ mixer_b32_224,224,1024.0,5245.72,195.191,3.24,6.29,60.29
204
+ vit_tiny_r_s16_p8_384,384,1024.0,5235.72,195.568,1.25,5.39,6.36
205
+ nf_regnet_b1,256,1024.0,5226.46,195.915,0.82,7.27,10.22
206
+ nf_regnet_b2,240,1024.0,5223.53,196.02,0.97,7.23,14.31
207
+ vit_base_patch32_clip_quickgelu_224,224,1024.0,5220.87,196.124,4.37,4.19,87.85
208
+ resnetaa34d,224,1024.0,5205.31,196.711,4.43,5.07,21.82
209
+ resnet26d,224,1024.0,5169.81,198.062,2.6,8.15,16.01
210
+ tf_mixnet_s,224,1024.0,5128.65,199.652,0.25,6.25,4.13
211
+ rexnetr_150,224,1024.0,5105.32,200.564,0.89,11.13,9.78
212
+ gmixer_12_224,224,1024.0,5083.79,201.414,2.67,7.26,12.7
213
+ fbnetv3_d,256,1024.0,5047.63,202.856,0.68,11.1,10.31
214
+ edgenext_x_small,256,1024.0,5018.94,204.014,0.54,5.93,2.34
215
+ mixer_s16_224,224,1024.0,5009.58,204.393,3.79,5.97,18.53
216
+ regnetz_b16,224,1024.0,5008.24,204.437,1.45,9.95,9.72
217
+ gmlp_ti16_224,224,1024.0,4999.44,204.811,1.34,7.55,5.87
218
+ darknet21,256,1024.0,4956.17,206.601,3.93,7.47,20.86
219
+ eva02_tiny_patch14_224,224,1024.0,4940.45,207.258,1.4,6.17,5.5
220
+ ghostnetv2_130,224,1024.0,4896.55,209.116,0.28,5.9,8.96
221
+ convnext_femto,288,1024.0,4844.52,211.362,1.3,7.56,5.22
222
+ nf_resnet26,224,1024.0,4822.21,212.339,2.41,7.35,16.0
223
+ efficientnet_lite2,260,1024.0,4817.66,212.541,0.89,12.9,6.09
224
+ tf_efficientnetv2_b2,260,1024.0,4797.27,213.444,1.72,9.84,10.1
225
+ efficientnet_cc_b0_8e,224,1024.0,4749.51,215.591,0.42,9.42,24.01
226
+ sedarknet21,256,1024.0,4747.46,215.684,3.93,7.47,20.95
227
+ efficientnet_cc_b0_4e,224,1024.0,4720.11,216.933,0.41,9.42,13.31
228
+ efficientnet_b2_pruned,260,1024.0,4716.64,217.093,0.73,9.13,8.31
229
+ convnext_femto_ols,288,1024.0,4709.5,217.422,1.35,8.06,5.23
230
+ resnext26ts,256,1024.0,4668.94,219.311,2.43,10.52,10.3
231
+ tiny_vit_11m_224,224,1024.0,4649.32,220.237,1.9,10.73,20.35
232
+ ecaresnet50d_pruned,224,1024.0,4636.78,220.832,2.53,6.43,19.94
233
+ deit_small_patch16_224,224,1024.0,4620.93,221.59,4.25,8.25,22.05
234
+ efficientformer_l1,224,1024.0,4616.64,221.795,1.3,5.53,12.29
235
+ vit_small_patch16_224,224,1024.0,4614.32,221.907,4.25,8.25,22.05
236
+ dpn48b,224,1024.0,4588.67,223.146,1.69,8.92,9.13
237
+ deit_small_distilled_patch16_224,224,1024.0,4587.3,223.214,4.27,8.29,22.44
238
+ vit_base_patch32_clip_256,256,1024.0,4547.51,225.168,5.68,5.44,87.86
239
+ convnextv2_femto,224,1024.0,4545.73,225.256,0.79,4.57,5.23
240
+ mobilevitv2_075,256,1024.0,4537.95,225.638,1.05,12.06,2.87
241
+ eca_resnext26ts,256,1024.0,4521.18,226.479,2.43,10.52,10.3
242
+ seresnext26ts,256,1024.0,4517.43,226.666,2.43,10.52,10.39
243
+ efficientnetv2_rw_t,224,1024.0,4511.98,226.94,1.93,9.94,13.65
244
+ legacy_seresnext26_32x4d,224,1024.0,4489.21,228.092,2.49,9.39,16.79
245
+ gernet_l,256,1024.0,4474.96,228.817,4.57,8.0,31.08
246
+ gcresnext26ts,256,1024.0,4472.11,228.964,2.43,10.53,10.48
247
+ rexnet_130,224,1024.0,4453.51,229.92,0.68,9.71,7.56
248
+ tf_efficientnet_b1,240,1024.0,4442.45,230.492,0.71,10.88,7.79
249
+ tf_efficientnet_cc_b0_8e,224,1024.0,4391.83,233.15,0.42,9.42,24.01
250
+ convnext_nano,224,1024.0,4389.78,233.258,2.46,8.37,15.59
251
+ gc_efficientnetv2_rw_t,224,1024.0,4373.41,234.132,1.94,9.97,13.68
252
+ tf_efficientnet_cc_b0_4e,224,1024.0,4373.37,234.134,0.41,9.42,13.31
253
+ tf_efficientnetv2_b3,240,1024.0,4372.06,234.204,1.93,9.95,14.36
254
+ tf_efficientnet_lite2,260,1024.0,4324.79,236.764,0.89,12.9,6.09
255
+ efficientnet_b1,256,1024.0,4298.75,238.198,0.77,12.22,7.79
256
+ deit3_small_patch16_224,224,1024.0,4270.38,239.779,4.25,8.25,22.06
257
+ cs3darknet_focus_l,256,1024.0,4230.07,242.066,4.66,8.03,21.15
258
+ nf_regnet_b1,288,1024.0,4135.98,247.568,1.02,9.2,10.22
259
+ convnext_nano_ols,224,1024.0,4118.16,248.644,2.65,9.38,15.65
260
+ nf_seresnet26,224,1024.0,4112.79,248.966,2.41,7.36,17.4
261
+ nf_ecaresnet26,224,1024.0,4107.39,249.292,2.41,7.36,16.0
262
+ efficientnet_b2,256,1024.0,4105.27,249.424,0.89,12.81,9.11
263
+ cs3darknet_l,256,1024.0,4101.41,249.66,4.86,8.55,21.16
264
+ nf_regnet_b2,272,1024.0,4097.18,249.913,1.22,9.27,14.31
265
+ ecaresnext50t_32x4d,224,1024.0,4074.12,251.332,2.7,10.09,15.41
266
+ ecaresnext26t_32x4d,224,1024.0,4072.14,251.454,2.7,10.09,15.41
267
+ seresnext26t_32x4d,224,1024.0,4061.05,252.141,2.7,10.09,16.81
268
+ repvgg_a2,224,1024.0,4049.32,252.867,5.7,6.26,28.21
269
+ poolformer_s12,224,1024.0,4047.55,252.981,1.82,5.53,11.92
270
+ seresnext26d_32x4d,224,1024.0,4037.54,253.609,2.73,10.19,16.81
271
+ regnetx_016,224,1024.0,4025.84,254.342,1.62,7.93,9.19
272
+ resnet26t,256,1024.0,4021.85,254.598,3.35,10.52,16.01
273
+ flexivit_small,240,1024.0,4011.8,255.236,4.88,9.46,22.06
274
+ edgenext_x_small,288,1024.0,3990.87,256.573,0.68,7.5,2.34
275
+ rexnet_150,224,1024.0,3983.48,257.051,0.9,11.21,9.73
276
+ vit_relpos_small_patch16_rpn_224,224,1024.0,3975.32,257.575,4.24,9.38,21.97
277
+ repvit_m3,224,1024.0,3966.18,258.164,1.89,13.94,10.68
278
+ vit_relpos_small_patch16_224,224,1024.0,3948.05,259.358,4.24,9.38,21.98
279
+ vit_srelpos_small_patch16_224,224,1024.0,3937.22,260.07,4.23,8.49,21.97
280
+ mobileone_s1,224,1024.0,3931.71,260.434,0.86,9.67,4.83
281
+ resnetv2_50,224,1024.0,3890.29,263.208,4.11,11.11,25.55
282
+ eca_botnext26ts_256,256,1024.0,3883.93,263.639,2.46,11.6,10.59
283
+ cs3sedarknet_l,256,1024.0,3835.91,266.94,4.86,8.56,21.91
284
+ ghostnetv2_160,224,1024.0,3826.79,267.576,0.42,7.23,12.39
285
+ resnet34,288,1024.0,3820.15,268.041,6.07,6.18,21.8
286
+ edgenext_small,256,1024.0,3794.31,269.865,1.26,9.07,5.59
287
+ dpn68,224,1024.0,3788.79,270.258,2.35,10.47,12.61
288
+ ese_vovnet19b_dw,288,1024.0,3782.88,270.682,2.22,13.63,6.54
289
+ fbnetv3_g,240,1024.0,3779.41,270.931,1.28,14.87,16.62
290
+ convnext_pico,288,1024.0,3777.8,271.046,2.27,10.08,9.05
291
+ ecaresnetlight,224,1024.0,3759.77,272.346,4.11,8.42,30.16
292
+ eca_halonext26ts,256,1024.0,3745.07,273.414,2.44,11.46,10.76
293
+ dpn68b,224,1024.0,3719.51,275.293,2.35,10.47,12.61
294
+ mixnet_m,224,1024.0,3687.37,277.689,0.36,8.19,5.01
295
+ resnet50,224,1024.0,3687.18,277.708,4.11,11.11,25.56
296
+ efficientnet_em,240,1024.0,3685.78,277.814,3.04,14.34,6.9
297
+ convnext_pico_ols,288,1024.0,3673.49,278.743,2.37,10.74,9.06
298
+ resnet32ts,256,1024.0,3641.96,281.156,4.63,11.58,17.96
299
+ bat_resnext26ts,256,1024.0,3638.35,281.435,2.53,12.51,10.73
300
+ efficientnet_b3_pruned,300,1024.0,3633.29,281.827,1.04,11.86,9.86
301
+ botnet26t_256,256,1024.0,3632.31,281.904,3.32,11.98,12.49
302
+ hrnet_w18_small_v2,224,1024.0,3631.33,281.979,2.62,9.65,15.6
303
+ ecaresnet101d_pruned,224,1024.0,3611.37,283.538,3.48,7.69,24.88
304
+ ecaresnet26t,256,1024.0,3599.02,284.511,3.35,10.53,16.01
305
+ regnetv_040,224,1024.0,3598.04,284.583,4.0,12.29,20.64
306
+ seresnet34,288,1024.0,3583.61,285.735,6.07,6.18,21.96
307
+ resnetv2_50t,224,1024.0,3573.26,286.561,4.32,11.82,25.57
308
+ pvt_v2_b1,224,1024.0,3571.19,286.726,2.04,14.01,14.01
309
+ regnety_016,224,1024.0,3567.37,287.031,1.63,8.04,11.2
310
+ resnext26ts,288,1024.0,3565.74,287.167,3.07,13.31,10.3
311
+ regnety_040,224,1024.0,3565.62,287.173,4.0,12.29,20.65
312
+ resnet33ts,256,1024.0,3563.66,287.335,4.76,11.66,19.68
313
+ resnetv2_50d,224,1024.0,3553.44,288.159,4.35,11.92,25.57
314
+ tf_efficientnet_em,240,1024.0,3544.42,288.894,3.04,14.34,6.9
315
+ halonet26t,256,1024.0,3541.55,289.129,3.19,11.69,12.48
316
+ dla60,224,1024.0,3527.55,290.275,4.26,10.16,22.04
317
+ tf_mixnet_m,224,1024.0,3524.0,290.567,0.36,8.19,5.01
318
+ resnet50c,224,1024.0,3521.04,290.812,4.35,11.92,25.58
319
+ edgenext_small_rw,256,1024.0,3501.76,292.411,1.58,9.51,7.83
320
+ resnet34d,288,1024.0,3491.3,293.29,6.47,7.51,21.82
321
+ convnextv2_pico,224,1024.0,3480.58,294.194,1.37,6.1,9.07
322
+ vit_small_resnet26d_224,224,1024.0,3476.26,294.557,5.04,10.65,63.61
323
+ convit_tiny,224,1024.0,3460.49,295.901,1.26,7.94,5.71
324
+ tresnet_m,224,1024.0,3457.69,296.14,5.75,7.31,31.39
325
+ resnet26,288,1024.0,3457.48,296.158,3.9,12.15,16.0
326
+ seresnext26ts,288,1024.0,3455.43,296.333,3.07,13.32,10.39
327
+ vit_relpos_base_patch32_plus_rpn_256,256,1024.0,3447.98,296.974,7.59,6.63,119.42
328
+ seresnet33ts,256,1024.0,3444.98,297.233,4.76,11.66,19.78
329
+ eca_resnext26ts,288,1024.0,3443.01,297.404,3.07,13.32,10.3
330
+ eca_resnet33ts,256,1024.0,3442.23,297.471,4.76,11.66,19.68
331
+ tf_efficientnet_b2,260,1024.0,3440.99,297.578,1.02,13.83,9.11
332
+ gcresnet33ts,256,1024.0,3424.64,298.998,4.76,11.68,19.88
333
+ gcresnext26ts,288,1024.0,3414.23,299.91,3.07,13.33,10.48
334
+ resnet50t,224,1024.0,3401.57,301.026,4.32,11.82,25.57
335
+ vovnet39a,224,1024.0,3395.56,301.56,7.09,6.73,22.6
336
+ resnet50d,224,1024.0,3380.59,302.894,4.35,11.92,25.58
337
+ efficientvit_b2,224,1024.0,3359.89,304.76,1.6,14.62,24.33
338
+ resnest14d,224,1024.0,3357.89,304.943,2.76,7.33,10.61
339
+ vit_base_patch32_plus_256,256,1024.0,3354.04,305.293,7.7,6.35,119.48
340
+ efficientnet_b0_gn,224,1024.0,3353.74,305.319,0.42,6.75,5.29
341
+ cs3darknet_focus_l,288,1024.0,3340.22,306.556,5.9,10.16,21.15
342
+ selecsls84,224,1024.0,3335.07,307.029,5.9,7.57,50.95
343
+ vit_tiny_patch16_384,384,1024.0,3332.37,307.277,3.16,12.08,5.79
344
+ legacy_seresnet50,224,1024.0,3325.14,307.946,3.88,10.6,28.09
345
+ coatnet_nano_cc_224,224,1024.0,3301.24,310.176,2.13,13.1,13.76
346
+ fastvit_t8,256,1024.0,3298.88,310.398,0.7,8.63,4.03
347
+ resnetblur18,288,1024.0,3292.39,311.01,3.87,5.6,11.69
348
+ repvit_m1_5,224,1024.0,3281.4,312.05,2.31,15.7,14.64
349
+ ese_vovnet39b,224,1024.0,3276.58,312.51,7.09,6.74,24.57
350
+ levit_512,224,1024.0,3274.29,312.728,5.64,10.22,95.17
351
+ haloregnetz_b,224,1024.0,3272.82,312.869,1.97,11.94,11.68
352
+ mobilevit_xs,256,1024.0,3272.76,312.87,0.93,13.62,2.32
353
+ coat_lite_tiny,224,1024.0,3257.39,314.352,1.6,11.65,5.72
354
+ coatnext_nano_rw_224,224,1024.0,3256.31,314.455,2.36,10.68,14.7
355
+ eca_vovnet39b,224,1024.0,3252.14,314.859,7.09,6.74,22.6
356
+ efficientnet_b2,288,1024.0,3249.31,315.132,1.12,16.2,9.11
357
+ resnetaa50,224,1024.0,3245.58,315.495,5.15,11.64,25.56
358
+ coatnet_nano_rw_224,224,1024.0,3238.25,316.209,2.29,13.29,15.14
359
+ cs3darknet_l,288,1024.0,3236.81,316.35,6.16,10.83,21.16
360
+ convnextv2_atto,288,1024.0,3226.1,317.401,0.91,6.3,3.71
361
+ mobileone_s2,224,1024.0,3211.19,318.869,1.34,11.55,7.88
362
+ seresnet50,224,1024.0,3200.07,319.981,4.11,11.13,28.09
363
+ nf_regnet_b3,288,1024.0,3185.16,321.477,1.67,11.84,18.59
364
+ crossvit_small_240,240,1024.0,3184.9,321.506,5.09,11.34,26.86
365
+ res2net50_48w_2s,224,1024.0,3168.87,323.132,4.18,11.72,25.29
366
+ resnetaa34d,288,1024.0,3155.87,324.463,7.33,8.38,21.82
367
+ vit_small_r26_s32_224,224,1024.0,3124.44,327.727,3.54,9.44,36.43
368
+ dla60x,224,1024.0,3106.99,329.567,3.54,13.8,17.35
369
+ efficientnet_b0_g8_gn,224,1024.0,3104.31,329.853,0.66,6.75,6.56
370
+ resnext50_32x4d,224,1024.0,3099.2,330.397,4.26,14.4,25.03
371
+ levit_conv_512,224,1024.0,3078.02,332.67,5.64,10.22,95.17
372
+ skresnet34,224,1024.0,3073.03,333.21,3.67,5.13,22.28
373
+ coat_lite_mini,224,1024.0,3058.66,334.777,2.0,12.25,11.01
374
+ resnet26d,288,1024.0,3053.73,335.317,4.29,13.48,16.01
375
+ mobileone_s0,224,1024.0,3053.01,335.391,1.09,15.48,5.29
376
+ levit_512d,224,1024.0,3045.04,336.274,5.85,11.3,92.5
377
+ cs3sedarknet_l,288,1024.0,3026.08,338.38,6.16,10.83,21.91
378
+ resnetaa50d,224,1024.0,3022.22,338.813,5.39,12.44,25.58
379
+ convnext_tiny,224,1024.0,3015.62,339.555,4.47,13.44,28.59
380
+ eca_nfnet_l0,224,1024.0,3011.21,340.052,4.35,10.47,24.14
381
+ xcit_nano_12_p16_384,384,1024.0,3011.18,340.055,1.64,12.14,3.05
382
+ nfnet_l0,224,1024.0,3000.78,341.23,4.36,10.47,35.07
383
+ resnetrs50,224,1024.0,2989.89,342.477,4.48,12.14,35.69
384
+ efficientnet_cc_b1_8e,240,1024.0,2988.69,342.615,0.75,15.44,39.72
385
+ regnetz_b16,288,1024.0,2987.05,342.79,2.39,16.43,9.72
386
+ seresnet50t,224,1024.0,2984.21,343.128,4.32,11.83,28.1
387
+ ecaresnet50d,224,1024.0,2975.54,344.128,4.35,11.93,25.58
388
+ regnetz_c16,256,1024.0,2971.35,344.607,2.51,16.57,13.46
389
+ densenet121,224,1024.0,2967.84,345.021,2.87,6.9,7.98
390
+ crossvit_15_240,240,1024.0,2967.06,345.11,5.17,12.01,27.53
391
+ resnet50s,224,1024.0,2958.0,346.169,5.47,13.52,25.68
392
+ rexnetr_200,224,1024.0,2955.32,346.483,1.59,15.11,16.52
393
+ mixnet_l,224,1024.0,2926.26,349.918,0.58,10.84,7.33
394
+ xcit_tiny_24_p16_224,224,1024.0,2925.33,350.035,2.34,11.82,12.12
395
+ levit_conv_512d,224,1024.0,2899.99,353.091,5.85,11.3,92.5
396
+ gcresnext50ts,256,1024.0,2897.54,353.393,3.75,15.46,15.67
397
+ lambda_resnet26rpt_256,256,1024.0,2887.51,354.621,3.16,11.87,10.99
398
+ resnext50d_32x4d,224,1024.0,2876.86,355.933,4.5,15.2,25.05
399
+ resnet32ts,288,1024.0,2868.64,356.953,5.86,14.65,17.96
400
+ crossvit_15_dagger_240,240,1024.0,2848.99,359.413,5.5,12.68,28.21
401
+ tiny_vit_21m_224,224,1024.0,2842.09,360.287,4.08,15.96,33.22
402
+ vit_base_resnet26d_224,224,1024.0,2837.87,360.821,6.93,12.34,101.4
403
+ tf_efficientnet_cc_b1_8e,240,1024.0,2835.77,361.09,0.75,15.44,39.72
404
+ cspresnet50,256,1024.0,2834.55,361.245,4.54,11.5,21.62
405
+ mobilevitv2_100,256,1024.0,2833.62,361.358,1.84,16.08,4.9
406
+ resnet33ts,288,1024.0,2829.43,361.9,6.02,14.75,19.68
407
+ vovnet57a,224,1024.0,2821.83,362.874,8.95,7.52,36.64
408
+ deit3_medium_patch16_224,224,1024.0,2805.09,365.038,7.53,10.99,38.85
409
+ inception_next_tiny,224,1024.0,2798.9,365.847,4.19,11.98,28.06
410
+ tf_mixnet_l,224,1024.0,2798.14,365.947,0.58,10.84,7.33
411
+ res2next50,224,1024.0,2797.04,366.091,4.2,13.71,24.67
412
+ dla60_res2next,224,1024.0,2795.54,366.285,3.49,13.17,17.03
413
+ coatnet_pico_rw_224,224,1024.0,2793.27,366.584,1.96,12.91,10.85
414
+ convnext_tiny_hnf,224,1024.0,2770.64,369.577,4.47,13.44,28.59
415
+ gcresnet50t,256,1024.0,2767.9,369.943,5.42,14.67,25.9
416
+ convnextv2_femto,288,1024.0,2762.62,370.652,1.3,7.56,5.23
417
+ tf_efficientnetv2_b3,300,1024.0,2757.15,371.387,3.04,15.74,14.36
418
+ legacy_seresnext50_32x4d,224,1024.0,2750.41,372.297,4.26,14.42,27.56
419
+ ecaresnet50d_pruned,288,1024.0,2749.78,372.383,4.19,10.61,19.94
420
+ res2net50_26w_4s,224,1024.0,2749.69,372.394,4.28,12.61,25.7
421
+ seresnext50_32x4d,224,1024.0,2749.17,372.464,4.26,14.42,27.56
422
+ vgg11_bn,224,1024.0,2746.28,372.857,7.62,7.44,132.87
423
+ resmlp_24_224,224,1024.0,2745.97,372.9,5.96,10.91,30.02
424
+ resnetv2_50x1_bit,224,1024.0,2742.41,373.383,4.23,11.11,25.55
425
+ eca_resnet33ts,288,1024.0,2737.24,374.089,6.02,14.76,19.68
426
+ efficientnetv2_rw_t,288,1024.0,2736.91,374.133,3.19,16.42,13.65
427
+ seresnet33ts,288,1024.0,2734.83,374.417,6.02,14.76,19.78
428
+ nfnet_f0,192,1024.0,2731.03,374.934,7.21,10.16,71.49
429
+ res2net50_14w_8s,224,1024.0,2724.75,375.804,4.21,13.28,25.06
430
+ visformer_small,224,1024.0,2720.95,376.328,4.88,11.43,40.22
431
+ ese_vovnet57b,224,1024.0,2711.8,377.598,8.95,7.52,38.61
432
+ gcresnet33ts,288,1024.0,2705.39,378.493,6.02,14.78,19.88
433
+ cspresnet50d,256,1024.0,2702.61,378.881,4.86,12.55,21.64
434
+ twins_svt_small,224,1024.0,2696.15,379.788,2.82,10.7,24.06
435
+ efficientvit_l1,224,1024.0,2692.51,380.303,5.27,15.85,52.65
436
+ resnetblur50,224,1024.0,2689.65,380.707,5.16,12.02,25.56
437
+ seresnetaa50d,224,1024.0,2682.26,381.757,5.4,12.46,28.11
438
+ fbnetv3_g,288,1024.0,2673.23,383.046,1.77,21.09,16.62
439
+ cspresnet50w,256,1024.0,2671.97,383.228,5.04,12.19,28.12
440
+ dla60_res2net,224,1024.0,2669.84,383.53,4.15,12.34,20.85
441
+ convnext_nano,288,1024.0,2669.05,383.645,4.06,13.84,15.59
442
+ gc_efficientnetv2_rw_t,288,1024.0,2659.37,385.042,3.2,16.45,13.68
443
+ gcvit_xxtiny,224,1024.0,2658.4,385.182,2.14,15.36,12.0
444
+ poolformerv2_s12,224,1024.0,2624.04,390.223,1.83,5.53,11.89
445
+ vit_relpos_medium_patch16_rpn_224,224,1024.0,2618.88,390.989,7.5,12.13,38.73
446
+ mobileone_s3,224,1024.0,2616.83,391.296,1.94,13.85,10.17
447
+ davit_tiny,224,1024.0,2612.7,391.92,4.47,17.08,28.36
448
+ vit_relpos_medium_patch16_224,224,1024.0,2603.89,393.246,7.5,12.13,38.75
449
+ resnet51q,256,1024.0,2602.52,393.454,6.38,16.55,35.7
450
+ gmixer_24_224,224,1024.0,2594.59,394.657,5.28,14.45,24.72
451
+ maxvit_pico_rw_256,256,768.0,2593.58,296.105,1.68,18.77,7.46
452
+ vit_srelpos_medium_patch16_224,224,1024.0,2591.17,395.176,7.49,11.32,38.74
453
+ vit_relpos_medium_patch16_cls_224,224,1024.0,2587.16,395.789,7.55,13.3,38.76
454
+ maxvit_rmlp_pico_rw_256,256,768.0,2587.02,296.857,1.69,21.32,7.52
455
+ nf_regnet_b3,320,1024.0,2582.41,396.514,2.05,14.61,18.59
456
+ res2net50d,224,1024.0,2577.65,397.25,4.52,13.41,25.72
457
+ cs3darknet_focus_x,256,1024.0,2569.33,398.536,8.03,10.69,35.02
458
+ densenetblur121d,224,1024.0,2559.52,400.063,3.11,7.9,8.0
459
+ inception_v3,299,1024.0,2546.29,402.143,5.73,8.97,23.83
460
+ coatnet_0_rw_224,224,1024.0,2545.57,402.256,4.23,15.1,27.44
461
+ repvgg_b1g4,224,1024.0,2545.06,402.332,8.15,10.64,39.97
462
+ regnetx_032,224,1024.0,2534.07,404.077,3.2,11.37,15.3
463
+ twins_pcpvt_small,224,1024.0,2533.92,404.104,3.68,15.51,24.11
464
+ resnetblur50d,224,1024.0,2528.9,404.909,5.4,12.82,25.58
465
+ rexnet_200,224,1024.0,2519.88,406.358,1.56,14.91,16.37
466
+ resnetrs101,192,1024.0,2505.12,408.751,6.04,12.7,63.62
467
+ resnet26t,320,1024.0,2502.87,409.119,5.24,16.44,16.01
468
+ nf_ecaresnet50,224,1024.0,2502.03,409.253,4.21,11.13,25.56
469
+ convnext_nano_ols,288,1024.0,2497.73,409.961,4.38,15.5,15.65
470
+ convnextv2_nano,224,1024.0,2497.72,409.963,2.46,8.37,15.62
471
+ nf_seresnet50,224,1024.0,2494.79,410.425,4.21,11.13,28.09
472
+ regnety_032,224,1024.0,2483.68,412.275,3.2,11.26,19.44
473
+ vit_medium_patch16_gap_240,240,1024.0,2477.36,413.332,8.6,12.57,44.4
474
+ cs3darknet_x,256,1024.0,2475.51,413.641,8.38,11.35,35.05
475
+ densenet169,224,1024.0,2463.83,415.603,3.4,7.3,14.15
476
+ xcit_small_12_p16_224,224,1024.0,2460.07,416.237,4.82,12.57,26.25
477
+ cspresnext50,256,1024.0,2452.36,417.546,4.05,15.86,20.57
478
+ mobilevit_s,256,1024.0,2447.35,418.395,1.86,17.03,5.58
479
+ darknet53,256,1024.0,2439.82,419.693,9.31,12.39,41.61
480
+ darknetaa53,256,1024.0,2432.07,421.03,7.97,12.39,36.02
481
+ edgenext_small,320,1024.0,2429.25,421.516,1.97,14.16,5.59
482
+ seresnext26t_32x4d,288,1024.0,2412.74,424.404,4.46,16.68,16.81
483
+ sehalonet33ts,256,1024.0,2403.77,425.986,3.55,14.7,13.69
484
+ seresnext26d_32x4d,288,1024.0,2391.16,428.231,4.51,16.85,16.81
485
+ resnet61q,256,1024.0,2368.17,432.39,7.8,17.01,36.85
486
+ fastvit_t12,256,1024.0,2356.34,434.562,1.42,12.42,7.55
487
+ vit_base_r26_s32_224,224,1024.0,2354.84,434.838,6.76,11.54,101.38
488
+ focalnet_tiny_srf,224,1024.0,2353.35,435.113,4.42,16.32,28.43
489
+ resnetv2_101,224,1024.0,2342.24,437.176,7.83,16.23,44.54
490
+ cs3sedarknet_x,256,1024.0,2329.01,439.66,8.38,11.35,35.4
491
+ nf_resnet50,256,1024.0,2318.52,441.645,5.46,14.52,25.56
492
+ xcit_nano_12_p8_224,224,1024.0,2310.67,443.15,2.16,15.71,3.05
493
+ resnest26d,224,1024.0,2309.28,443.418,3.64,9.97,17.07
494
+ coatnet_rmlp_nano_rw_224,224,1024.0,2308.34,443.598,2.51,18.21,15.15
495
+ resnetv2_50,288,1024.0,2302.9,444.644,6.79,18.37,25.55
496
+ ecaresnet50t,256,1024.0,2299.59,445.285,5.64,15.45,25.57
497
+ gmlp_s16_224,224,1024.0,2291.16,446.925,4.42,15.1,19.42
498
+ efficientnet_lite3,300,1024.0,2290.17,447.117,1.65,21.85,8.2
499
+ dm_nfnet_f0,192,1024.0,2271.28,450.836,7.21,10.16,71.49
500
+ resnet101,224,1024.0,2263.99,452.287,7.83,16.23,44.55
501
+ ecaresnet26t,320,1024.0,2258.47,453.393,5.24,16.44,16.01
502
+ edgenext_base,256,1024.0,2256.96,453.695,3.85,15.58,18.51
503
+ efficientnetv2_s,288,1024.0,2251.36,454.825,4.75,20.13,21.46
504
+ skresnet50,224,1024.0,2250.82,454.933,4.11,12.5,25.8
505
+ dla102,224,1024.0,2248.24,455.455,7.19,14.18,33.27
506
+ edgenext_small_rw,320,1024.0,2240.98,456.929,2.46,14.85,7.83
507
+ ecaresnetlight,288,1024.0,2235.21,458.11,6.79,13.91,30.16
508
+ dpn68b,288,1024.0,2234.13,458.331,3.89,17.3,12.61
509
+ gcresnext50ts,288,1024.0,2232.45,458.676,4.75,19.57,15.67
510
+ fastvit_s12,256,1024.0,2229.72,459.239,1.82,13.67,9.47
511
+ fastvit_sa12,256,1024.0,2225.03,460.206,1.96,13.83,11.58
512
+ focalnet_tiny_lrf,224,1024.0,2222.33,460.766,4.49,17.76,28.65
513
+ resnetv2_101d,224,1024.0,2216.51,461.976,8.07,17.04,44.56
514
+ resnet101c,224,1024.0,2202.12,464.995,8.08,17.04,44.57
515
+ vit_base_resnet50d_224,224,1024.0,2199.36,465.578,8.68,16.1,110.97
516
+ regnetv_040,288,1024.0,2190.89,467.375,6.6,20.3,20.64
517
+ vit_medium_patch16_gap_256,256,1024.0,2190.03,467.563,9.78,14.29,38.86
518
+ resnet50,288,1024.0,2185.5,468.532,6.8,18.37,25.56
519
+ gcresnet50t,288,1024.0,2180.99,469.5,6.86,18.57,25.9
520
+ regnety_040,288,1024.0,2169.28,472.031,6.61,20.3,20.65
521
+ vgg13,224,1024.0,2159.6,474.15,11.31,12.25,133.05
522
+ eva02_small_patch14_224,224,1024.0,2151.59,475.915,5.53,12.34,21.62
523
+ vit_medium_patch16_reg4_gap_256,256,1024.0,2149.02,476.485,9.93,14.51,38.87
524
+ efficientnetv2_rw_s,288,1024.0,2146.83,476.971,4.91,21.41,23.94
525
+ ecaresnet101d_pruned,288,1024.0,2141.83,478.084,5.75,12.71,24.88
526
+ mobilevitv2_125,256,1024.0,2139.71,478.555,2.86,20.1,7.48
527
+ vit_medium_patch16_reg4_256,256,1024.0,2136.17,479.352,9.97,14.56,38.87
528
+ skresnet50d,224,1024.0,2134.1,479.815,4.36,13.31,25.82
529
+ pvt_v2_b2,224,1024.0,2119.72,483.066,3.9,24.96,25.36
530
+ hrnet_w18_ssld,224,1024.0,2114.47,484.27,4.32,16.31,21.3
531
+ convnextv2_pico,288,1024.0,2113.62,484.464,2.27,10.08,9.07
532
+ eva02_tiny_patch14_336,336,1024.0,2113.11,484.582,3.14,13.85,5.76
533
+ efficientvit_l2,224,1024.0,2109.14,485.494,6.97,19.58,63.71
534
+ hrnet_w18,224,1024.0,2100.77,487.428,4.32,16.31,21.3
535
+ regnetx_040,224,1024.0,2099.85,487.636,3.99,12.2,22.12
536
+ tf_efficientnet_lite3,300,1024.0,2090.5,489.823,1.65,21.85,8.2
537
+ wide_resnet50_2,224,1024.0,2081.66,491.904,11.43,14.4,68.88
538
+ resnet51q,288,1024.0,2069.71,494.744,8.07,20.94,35.7
539
+ poolformer_s24,224,1024.0,2067.46,495.278,3.41,10.68,21.39
540
+ sebotnet33ts_256,256,512.0,2066.45,247.758,3.89,17.46,13.7
541
+ efficientformer_l3,224,1024.0,2064.62,495.963,3.93,12.01,31.41
542
+ resnest50d_1s4x24d,224,1024.0,2057.55,497.667,4.43,13.57,25.68
543
+ gcvit_xtiny,224,1024.0,2053.45,498.662,2.93,20.26,19.98
544
+ cspdarknet53,256,1024.0,2048.51,499.863,6.57,16.81,27.64
545
+ crossvit_18_240,240,1024.0,2029.53,504.539,8.21,16.14,43.27
546
+ mixnet_xl,224,1024.0,2029.05,504.653,0.93,14.57,11.9
547
+ vit_base_patch32_384,384,1024.0,2028.15,504.881,12.67,12.14,88.3
548
+ efficientnet_b3,288,1024.0,2027.72,504.989,1.63,21.49,12.23
549
+ vit_base_patch32_clip_384,384,1024.0,2026.31,505.34,12.67,12.14,88.3
550
+ resnet50t,288,1024.0,2024.16,505.879,7.14,19.53,25.57
551
+ dla102x,224,1024.0,2023.35,506.08,5.89,19.42,26.31
552
+ legacy_seresnet101,224,1024.0,2012.58,508.788,7.61,15.74,49.33
553
+ resnet50d,288,1024.0,2012.14,508.9,7.19,19.7,25.58
554
+ cs3edgenet_x,256,1024.0,2002.36,511.384,11.53,12.92,47.82
555
+ resnetaa101d,224,1024.0,1994.67,513.346,9.12,17.56,44.57
556
+ repvgg_b1,224,1024.0,1994.42,513.418,13.16,10.64,57.42
557
+ res2net50_26w_6s,224,1024.0,1979.48,517.295,6.33,15.28,37.05
558
+ regnetz_d32,256,1024.0,1978.14,517.642,5.98,23.74,27.58
559
+ cs3sedarknet_xdw,256,1024.0,1970.5,519.653,5.97,17.18,21.6
560
+ resnetaa50,288,1024.0,1968.61,520.152,8.52,19.24,25.56
561
+ seresnet101,224,1024.0,1966.15,520.803,7.84,16.27,49.33
562
+ resnet101s,224,1024.0,1964.56,521.226,9.19,18.64,44.67
563
+ cs3darknet_x,288,1024.0,1958.87,522.739,10.6,14.36,35.05
564
+ crossvit_18_dagger_240,240,1024.0,1955.55,523.625,8.65,16.91,44.27
565
+ swin_tiny_patch4_window7_224,224,1024.0,1951.67,524.668,4.51,17.06,28.29
566
+ tresnet_v2_l,224,1024.0,1947.69,525.738,8.85,16.34,46.17
567
+ ese_vovnet39b,288,1024.0,1941.03,527.543,11.71,11.13,24.57
568
+ regnetz_d8,256,1024.0,1940.13,527.785,3.97,23.74,23.37
569
+ tf_efficientnetv2_s,300,1024.0,1939.51,527.958,5.35,22.73,21.46
570
+ regnetz_c16,320,1024.0,1933.29,529.65,3.92,25.88,13.46
571
+ coatnet_bn_0_rw_224,224,1024.0,1926.49,531.525,4.48,18.41,27.44
572
+ darknet53,288,1024.0,1924.44,532.092,11.78,15.68,41.61
573
+ resnext101_32x4d,224,1024.0,1923.83,532.261,8.01,21.23,44.18
574
+ coatnet_rmlp_0_rw_224,224,1024.0,1920.22,533.259,4.52,21.26,27.45
575
+ xcit_tiny_12_p16_384,384,1024.0,1917.57,533.997,3.64,18.25,6.72
576
+ darknetaa53,288,1024.0,1915.93,534.454,10.08,15.68,36.02
577
+ mobileone_s4,224,1024.0,1915.84,534.474,3.04,17.74,14.95
578
+ maxxvit_rmlp_nano_rw_256,256,768.0,1913.61,401.326,4.17,21.53,16.78
579
+ nest_tiny,224,1024.0,1909.31,536.303,5.24,14.75,17.06
580
+ regnetz_040,256,1024.0,1906.99,536.946,4.06,24.19,27.12
581
+ nf_regnet_b4,320,1024.0,1906.99,536.957,3.29,19.88,30.21
582
+ seresnet50,288,1024.0,1902.22,538.306,6.8,18.39,28.09
583
+ pvt_v2_b2_li,224,1024.0,1897.86,539.539,3.77,25.04,22.55
584
+ regnetz_040_h,256,1024.0,1896.27,539.981,4.12,24.29,28.94
585
+ densenet201,224,1024.0,1895.14,540.319,4.34,7.85,20.01
586
+ halonet50ts,256,1024.0,1887.53,542.495,5.3,19.2,22.73
587
+ nest_tiny_jx,224,1024.0,1885.06,543.199,5.24,14.75,17.06
588
+ vgg13_bn,224,1024.0,1884.94,543.241,11.33,12.25,133.05
589
+ regnetx_080,224,1024.0,1883.47,543.661,8.02,14.06,39.57
590
+ vit_large_patch32_224,224,1024.0,1882.39,543.977,15.27,11.11,305.51
591
+ ecaresnet101d,224,1024.0,1880.92,544.404,8.08,17.07,44.57
592
+ resnet61q,288,1024.0,1874.14,546.373,9.87,21.52,36.85
593
+ nf_resnet101,224,1024.0,1864.42,549.218,8.01,16.23,44.55
594
+ cs3se_edgenet_x,256,1024.0,1859.86,550.568,11.53,12.94,50.72
595
+ repvit_m2_3,224,1024.0,1852.95,552.61,4.57,26.21,23.69
596
+ resmlp_36_224,224,1024.0,1843.66,555.406,8.91,16.33,44.69
597
+ cs3sedarknet_x,288,1024.0,1843.16,555.556,10.6,14.37,35.4
598
+ resnext50_32x4d,288,1024.0,1841.23,556.139,7.04,23.81,25.03
599
+ convnext_small,224,1024.0,1838.66,556.915,8.71,21.56,50.22
600
+ convnext_tiny,288,1024.0,1835.18,557.972,7.39,22.21,28.59
601
+ resnetv2_50d_gn,224,1024.0,1829.29,559.767,4.38,11.92,25.57
602
+ resnetaa50d,288,1024.0,1827.2,560.408,8.92,20.57,25.58
603
+ pit_b_224,224,1024.0,1823.77,561.458,10.56,16.6,73.76
604
+ eca_nfnet_l0,288,1024.0,1822.69,561.796,7.12,17.29,24.14
605
+ nfnet_l0,288,1024.0,1817.7,563.332,7.13,17.29,35.07
606
+ sequencer2d_s,224,1024.0,1816.41,563.738,4.96,11.31,27.65
607
+ pit_b_distilled_224,224,1024.0,1810.4,565.6,10.63,16.67,74.79
608
+ nf_resnet50,288,1024.0,1794.38,570.655,6.88,18.37,25.56
609
+ twins_pcpvt_base,224,1024.0,1790.37,571.935,6.46,21.35,43.83
610
+ rexnetr_200,288,768.0,1782.92,430.745,2.62,24.96,16.52
611
+ seresnet50t,288,1024.0,1780.59,575.079,7.14,19.55,28.1
612
+ cait_xxs24_224,224,1024.0,1779.24,575.513,2.53,20.29,11.96
613
+ swin_s3_tiny_224,224,1024.0,1777.31,576.139,4.64,19.13,28.33
614
+ resnet50_gn,224,1024.0,1776.88,576.279,4.14,11.11,25.56
615
+ ecaresnet50d,288,1024.0,1775.84,576.616,7.19,19.72,25.58
616
+ resnetblur101d,224,1024.0,1765.86,579.878,9.12,17.94,44.57
617
+ densenet121,288,1024.0,1761.12,581.437,4.74,11.41,7.98
618
+ coat_lite_small,224,1024.0,1760.12,581.767,3.96,22.09,19.84
619
+ mixer_b16_224,224,1024.0,1758.48,582.299,12.62,14.53,59.88
620
+ mobilevitv2_150,256,768.0,1748.31,439.266,4.09,24.11,10.59
621
+ efficientvit_b3,224,1024.0,1742.56,587.628,3.99,26.9,48.65
622
+ rexnetr_300,224,1024.0,1736.82,589.571,3.39,22.16,34.81
623
+ vgg16,224,1024.0,1730.88,591.595,15.47,13.56,138.36
624
+ maxxvitv2_nano_rw_256,256,768.0,1724.32,445.384,6.12,19.66,23.7
625
+ res2net101_26w_4s,224,1024.0,1723.01,594.296,8.1,18.45,45.21
626
+ resnext50d_32x4d,288,1024.0,1717.01,596.374,7.44,25.13,25.05
627
+ maxvit_nano_rw_256,256,768.0,1709.05,449.363,4.26,25.76,15.45
628
+ legacy_seresnext101_32x4d,224,1024.0,1707.02,599.865,8.02,21.26,48.96
629
+ seresnext101_32x4d,224,1024.0,1706.74,599.963,8.02,21.26,48.96
630
+ maxvit_rmlp_nano_rw_256,256,768.0,1705.93,450.183,4.28,27.4,15.5
631
+ resnetv2_50d_frn,224,1024.0,1703.71,601.028,4.33,11.92,25.59
632
+ mobilevitv2_175,256,512.0,1701.95,300.817,5.54,28.13,14.25
633
+ tf_efficientnet_b3,300,1024.0,1694.25,604.385,1.87,23.83,12.23
634
+ convnext_tiny_hnf,288,1024.0,1681.52,608.96,7.39,22.21,28.59
635
+ ese_vovnet39b_evos,224,1024.0,1671.22,612.716,7.07,6.74,24.58
636
+ res2net50_26w_8s,224,1024.0,1656.9,618.009,8.37,17.95,48.4
637
+ resnet101d,256,1024.0,1654.59,618.871,10.55,22.25,44.57
638
+ tresnet_l,224,1024.0,1652.13,619.794,10.9,11.9,55.99
639
+ res2net101d,224,1024.0,1652.09,619.808,8.35,19.25,45.23
640
+ mixer_l32_224,224,1024.0,1651.22,620.129,11.27,19.86,206.94
641
+ regnetz_b16_evos,224,1024.0,1648.87,621.016,1.43,9.95,9.74
642
+ botnet50ts_256,256,512.0,1645.51,311.14,5.54,22.23,22.74
643
+ efficientnet_b3,320,1024.0,1641.76,623.708,2.01,26.52,12.23
644
+ seresnext50_32x4d,288,1024.0,1638.34,625.012,7.04,23.82,27.56
645
+ coatnet_0_224,224,512.0,1634.58,313.22,4.43,21.14,25.04
646
+ swinv2_cr_tiny_224,224,1024.0,1629.27,628.491,4.66,28.45,28.33
647
+ inception_next_small,224,1024.0,1628.58,628.755,8.36,19.27,49.37
648
+ resnetv2_152,224,1024.0,1628.46,628.801,11.55,22.56,60.19
649
+ regnetx_064,224,1024.0,1628.2,628.898,6.49,16.37,26.21
650
+ hrnet_w32,224,1024.0,1627.55,629.157,8.97,22.02,41.23
651
+ convnextv2_tiny,224,1024.0,1627.26,629.266,4.47,13.44,28.64
652
+ seresnetaa50d,288,1024.0,1622.33,631.178,8.92,20.59,28.11
653
+ davit_small,224,1024.0,1614.32,634.313,8.69,27.54,49.75
654
+ regnety_040_sgn,224,1024.0,1612.57,634.996,4.03,12.29,20.65
655
+ legacy_xception,299,768.0,1604.43,478.663,8.4,35.83,22.86
656
+ swinv2_cr_tiny_ns_224,224,1024.0,1600.49,639.793,4.66,28.45,28.33
657
+ resnetblur50,288,1024.0,1598.7,640.511,8.52,19.87,25.56
658
+ efficientnet_el,300,1024.0,1595.26,641.889,8.0,30.7,10.59
659
+ efficientnet_el_pruned,300,1024.0,1592.53,642.988,8.0,30.7,10.59
660
+ resnet152,224,1024.0,1589.58,644.183,11.56,22.56,60.19
661
+ deit_base_patch16_224,224,1024.0,1581.19,647.603,16.87,16.49,86.57
662
+ cs3edgenet_x,288,1024.0,1577.26,649.216,14.59,16.36,47.82
663
+ deit_base_distilled_patch16_224,224,1024.0,1575.74,649.842,16.95,16.58,87.34
664
+ vit_base_patch16_224,224,1024.0,1574.94,650.173,16.87,16.49,86.57
665
+ vit_base_patch16_224_miil,224,1024.0,1574.63,650.301,16.88,16.5,94.4
666
+ vit_base_patch16_clip_224,224,1024.0,1574.46,650.371,16.87,16.49,86.57
667
+ vit_base_patch16_siglip_224,224,1024.0,1571.54,651.577,17.02,16.71,92.88
668
+ resnetv2_152d,224,1024.0,1564.52,654.501,11.8,23.36,60.2
669
+ vit_base_patch16_gap_224,224,1024.0,1563.13,655.085,16.78,16.41,86.57
670
+ halo2botnet50ts_256,256,1024.0,1562.09,655.52,5.02,21.78,22.64
671
+ resnet152c,224,1024.0,1558.11,657.195,11.8,23.36,60.21
672
+ ese_vovnet99b,224,1024.0,1554.99,658.512,16.51,11.27,63.2
673
+ vit_small_resnet50d_s16_224,224,1024.0,1551.97,659.792,13.0,21.12,57.53
674
+ nf_seresnet101,224,1024.0,1549.92,660.662,8.02,16.27,49.33
675
+ nf_ecaresnet101,224,1024.0,1549.88,660.683,8.01,16.27,44.55
676
+ tf_efficientnet_el,300,1024.0,1543.58,663.384,8.0,30.7,10.59
677
+ coatnet_rmlp_1_rw_224,224,1024.0,1542.97,663.643,7.44,28.08,41.69
678
+ nfnet_f0,256,1024.0,1541.8,664.144,12.62,18.05,71.49
679
+ vgg16_bn,224,1024.0,1533.25,667.85,15.5,13.56,138.37
680
+ resnest50d,224,1024.0,1530.42,669.084,5.4,14.36,27.48
681
+ caformer_s18,224,1024.0,1528.28,670.023,3.9,15.18,26.34
682
+ pvt_v2_b3,224,1024.0,1527.57,670.328,6.71,33.8,45.24
683
+ densenetblur121d,288,1024.0,1521.38,673.062,5.14,13.06,8.0
684
+ maxvit_tiny_rw_224,224,768.0,1520.98,504.928,4.93,28.54,29.06
685
+ mvitv2_tiny,224,1024.0,1518.09,674.509,4.7,21.16,24.17
686
+ vit_base_patch16_rpn_224,224,1024.0,1516.7,675.134,16.78,16.41,86.54
687
+ convnextv2_nano,288,768.0,1514.74,507.006,4.06,13.84,15.62
688
+ regnety_032,288,1024.0,1514.59,676.077,5.29,18.61,19.44
689
+ rexnet_300,224,1024.0,1508.74,678.701,3.44,22.4,34.71
690
+ resnetblur50d,288,1024.0,1506.45,679.732,8.92,21.19,25.58
691
+ deit3_base_patch16_224,224,1024.0,1497.14,683.959,16.87,16.49,86.59
692
+ convit_small,224,1024.0,1494.54,685.148,5.76,17.87,27.78
693
+ vit_base_patch32_clip_448,448,1024.0,1493.83,685.476,17.21,16.49,88.34
694
+ dla169,224,1024.0,1487.25,688.504,11.6,20.2,53.39
695
+ skresnext50_32x4d,224,1024.0,1470.99,696.12,4.5,17.18,27.48
696
+ xcit_tiny_12_p8_224,224,1024.0,1465.13,698.903,4.81,23.6,6.71
697
+ vit_small_patch16_36x1_224,224,1024.0,1460.65,701.044,12.63,24.59,64.67
698
+ ecaresnet50t,320,1024.0,1451.46,705.484,8.82,24.13,25.57
699
+ beitv2_base_patch16_224,224,1024.0,1448.02,707.161,16.87,16.49,86.53
700
+ vgg19,224,1024.0,1441.93,710.149,19.63,14.86,143.67
701
+ beit_base_patch16_224,224,1024.0,1440.48,710.862,16.87,16.49,86.53
702
+ hrnet_w30,224,1024.0,1436.17,712.996,8.15,21.21,37.71
703
+ edgenext_base,320,1024.0,1435.98,713.087,6.01,24.32,18.51
704
+ resnet152s,224,1024.0,1434.4,713.876,12.92,24.96,60.32
705
+ convformer_s18,224,1024.0,1427.19,717.481,3.96,15.82,26.77
706
+ resnetv2_50d_evos,224,1024.0,1426.57,717.793,4.33,11.92,25.59
707
+ focalnet_small_srf,224,1024.0,1426.35,717.904,8.62,26.26,49.89
708
+ sequencer2d_m,224,1024.0,1413.9,724.228,6.55,14.26,38.31
709
+ vit_relpos_base_patch16_rpn_224,224,1024.0,1408.36,727.069,16.8,17.63,86.41
710
+ volo_d1_224,224,1024.0,1407.83,727.348,6.94,24.43,26.63
711
+ regnety_080,224,1024.0,1407.5,727.512,8.0,17.97,39.18
712
+ vit_small_patch16_18x2_224,224,1024.0,1407.09,727.729,12.63,24.59,64.67
713
+ gcvit_tiny,224,1024.0,1405.32,728.65,4.79,29.82,28.22
714
+ dpn92,224,1024.0,1404.08,729.292,6.54,18.21,37.67
715
+ vit_relpos_base_patch16_224,224,1024.0,1402.98,729.864,16.8,17.63,86.43
716
+ resnetv2_101,288,1024.0,1402.28,730.227,12.94,26.83,44.54
717
+ regnetx_160,224,1024.0,1400.84,730.974,15.99,25.52,54.28
718
+ dla102x2,224,1024.0,1395.12,733.975,9.34,29.91,41.28
719
+ legacy_seresnet152,224,1024.0,1394.86,734.109,11.33,22.08,66.82
720
+ vit_relpos_base_patch16_clsgap_224,224,1024.0,1394.83,734.131,16.88,17.72,86.43
721
+ vit_relpos_base_patch16_cls_224,224,1024.0,1392.12,735.556,16.88,17.72,86.43
722
+ vit_small_patch16_384,384,1024.0,1390.73,736.291,12.45,24.15,22.2
723
+ poolformer_s36,224,1024.0,1388.46,737.493,5.0,15.82,30.86
724
+ vit_base_patch16_clip_quickgelu_224,224,1024.0,1388.13,737.672,16.87,16.49,86.19
725
+ densenet161,224,1024.0,1384.23,739.75,7.79,11.06,28.68
726
+ flexivit_base,240,1024.0,1380.45,741.777,19.35,18.92,86.59
727
+ efficientformerv2_s0,224,1024.0,1377.72,743.244,0.41,5.3,3.6
728
+ seresnet152,224,1024.0,1371.27,746.737,11.57,22.61,66.82
729
+ poolformerv2_s24,224,1024.0,1356.43,754.905,3.42,10.68,21.34
730
+ resnet101,288,1024.0,1354.29,756.102,12.95,26.83,44.55
731
+ focalnet_small_lrf,224,1024.0,1339.63,764.378,8.74,28.61,50.34
732
+ inception_v4,299,1024.0,1338.22,765.183,12.28,15.09,42.68
733
+ repvgg_b2,224,1024.0,1336.97,765.895,20.45,12.9,89.02
734
+ nf_regnet_b4,384,1024.0,1327.28,771.488,4.7,28.61,30.21
735
+ repvgg_b2g4,224,1024.0,1323.55,773.658,12.63,12.9,61.76
736
+ eca_nfnet_l1,256,1024.0,1319.97,775.763,9.62,22.04,41.41
737
+ fastvit_sa24,256,1024.0,1310.4,781.428,3.79,23.92,21.55
738
+ xcit_small_24_p16_224,224,1024.0,1307.21,783.335,9.1,23.63,47.67
739
+ twins_pcpvt_large,224,1024.0,1303.57,785.524,9.53,30.21,60.99
740
+ vit_base_patch16_xp_224,224,1024.0,1302.82,785.975,16.85,16.49,86.51
741
+ maxvit_tiny_tf_224,224,768.0,1301.05,590.28,5.42,31.21,30.92
742
+ deit3_small_patch16_384,384,1024.0,1298.34,788.686,12.45,24.15,22.21
743
+ coatnet_rmlp_1_rw2_224,224,1024.0,1296.36,789.892,7.71,32.74,41.72
744
+ coatnet_1_rw_224,224,1024.0,1295.8,790.234,7.63,27.22,41.72
745
+ regnety_080_tv,224,1024.0,1291.63,792.778,8.51,19.73,39.38
746
+ vgg19_bn,224,1024.0,1290.82,793.286,19.66,14.86,143.68
747
+ mixnet_xxl,224,768.0,1286.88,596.774,2.04,23.43,23.96
748
+ dm_nfnet_f0,256,1024.0,1286.75,795.79,12.62,18.05,71.49
749
+ efficientnet_b4,320,768.0,1280.17,599.91,3.13,34.76,19.34
750
+ hrnet_w18_ssld,288,1024.0,1279.49,800.308,7.14,26.96,21.3
751
+ maxxvit_rmlp_tiny_rw_256,256,768.0,1274.84,602.417,6.36,32.69,29.64
752
+ efficientformerv2_s1,224,1024.0,1271.59,805.28,0.67,7.66,6.19
753
+ convnext_base,224,1024.0,1268.86,807.011,15.38,28.75,88.59
754
+ mobilevitv2_200,256,512.0,1268.57,403.59,7.22,32.15,18.45
755
+ regnetz_d32,320,1024.0,1265.97,808.844,9.33,37.08,27.58
756
+ efficientnetv2_s,384,1024.0,1265.12,809.401,8.44,35.77,21.46
757
+ twins_svt_base,224,1024.0,1261.93,811.442,8.36,20.42,56.07
758
+ wide_resnet50_2,288,1024.0,1242.89,823.878,18.89,23.81,68.88
759
+ regnetz_d8,320,1024.0,1242.36,824.221,6.19,37.08,23.37
760
+ regnetz_040,320,512.0,1238.82,413.274,6.35,37.78,27.12
761
+ regnetz_040_h,320,512.0,1231.07,415.879,6.43,37.94,28.94
762
+ nest_small,224,1024.0,1230.37,832.252,9.41,22.88,38.35
763
+ tf_efficientnetv2_s,384,1024.0,1224.58,836.191,8.44,35.77,21.46
764
+ nest_small_jx,224,1024.0,1220.76,838.798,9.41,22.88,38.35
765
+ maxvit_tiny_rw_256,256,768.0,1213.37,632.937,6.44,37.27,29.07
766
+ maxvit_rmlp_tiny_rw_256,256,768.0,1210.44,634.468,6.47,39.84,29.15
767
+ vit_base_patch16_siglip_256,256,1024.0,1208.23,847.511,22.23,21.83,92.93
768
+ efficientnetv2_rw_s,384,1024.0,1208.22,847.514,8.72,38.03,23.94
769
+ resnetaa101d,288,1024.0,1207.75,847.844,15.07,29.03,44.57
770
+ swin_small_patch4_window7_224,224,1024.0,1206.81,848.507,8.77,27.47,49.61
771
+ dpn98,224,1024.0,1206.02,849.061,11.73,25.2,61.57
772
+ swinv2_tiny_window8_256,256,1024.0,1197.34,855.217,5.96,24.57,28.35
773
+ cs3se_edgenet_x,320,1024.0,1196.49,855.827,18.01,20.21,50.72
774
+ resnext101_64x4d,224,1024.0,1196.17,856.053,15.52,31.21,83.46
775
+ cait_xxs36_224,224,1024.0,1193.04,858.302,3.77,30.34,17.3
776
+ resnext101_32x8d,224,1024.0,1188.06,861.896,16.48,31.21,88.79
777
+ seresnet101,288,1024.0,1178.9,868.597,12.95,26.87,49.33
778
+ resnet152d,256,1024.0,1177.58,869.569,15.41,30.51,60.21
779
+ wide_resnet101_2,224,1024.0,1172.43,873.387,22.8,21.23,126.89
780
+ crossvit_base_240,240,1024.0,1171.25,874.269,20.13,22.67,105.03
781
+ resnet200,224,1024.0,1159.72,882.961,15.07,32.19,64.67
782
+ inception_resnet_v2,299,1024.0,1156.1,885.722,13.18,25.06,55.84
783
+ rexnetr_300,288,512.0,1153.3,443.932,5.59,36.61,34.81
784
+ resnetrs101,288,1024.0,1142.76,896.066,13.56,28.53,63.62
785
+ davit_base,224,1024.0,1141.57,896.996,15.36,36.72,87.95
786
+ tresnet_xl,224,1024.0,1136.08,901.333,15.2,15.34,78.44
787
+ coat_tiny,224,1024.0,1135.01,902.184,4.35,27.2,5.5
788
+ tnt_s_patch16_224,224,1024.0,1134.91,902.262,5.24,24.37,23.76
789
+ mvitv2_small,224,1024.0,1131.08,905.308,7.0,28.08,34.87
790
+ ecaresnet101d,288,1024.0,1130.54,905.749,13.35,28.19,44.57
791
+ vit_base_patch16_reg8_gap_256,256,1024.0,1124.62,910.517,22.6,22.09,86.62
792
+ maxvit_tiny_pm_256,256,768.0,1121.86,684.565,6.31,40.82,30.09
793
+ hrnet_w40,224,1024.0,1119.9,914.356,12.75,25.29,57.56
794
+ convnext_small,288,1024.0,1119.4,914.761,14.39,35.65,50.22
795
+ nfnet_f1,224,1024.0,1117.42,916.384,17.87,22.94,132.63
796
+ efficientnet_lite4,380,768.0,1117.23,687.403,4.04,45.66,13.01
797
+ pvt_v2_b4,224,1024.0,1107.81,924.328,9.83,48.14,62.56
798
+ seresnext101_64x4d,224,1024.0,1107.71,924.416,15.53,31.25,88.23
799
+ seresnext101_32x8d,224,1024.0,1101.53,929.602,16.48,31.25,93.57
800
+ resnetv2_50d_gn,288,1024.0,1100.54,930.437,7.24,19.7,25.57
801
+ coatnet_1_224,224,512.0,1098.68,466.003,8.28,31.3,42.23
802
+ repvgg_b3g4,224,1024.0,1097.61,932.923,17.89,15.1,83.83
803
+ samvit_base_patch16_224,224,1024.0,1097.38,933.118,16.83,17.2,86.46
804
+ eva02_base_patch16_clip_224,224,1024.0,1094.75,935.361,16.9,18.91,86.26
805
+ mvitv2_small_cls,224,1024.0,1086.56,942.407,7.04,28.17,34.87
806
+ vit_large_r50_s32_224,224,1024.0,1082.13,946.268,19.45,22.22,328.99
807
+ inception_next_base,224,1024.0,1079.66,948.435,14.85,25.69,86.67
808
+ resnet50_gn,288,1024.0,1076.3,951.4,6.85,18.37,25.56
809
+ pvt_v2_b5,224,1024.0,1073.94,953.474,11.39,44.23,81.96
810
+ seresnext101d_32x8d,224,1024.0,1071.41,955.74,16.72,32.05,93.59
811
+ efficientnetv2_m,320,1024.0,1070.2,956.818,11.01,39.97,54.14
812
+ vit_small_r26_s32_384,384,1024.0,1066.07,960.526,10.24,27.67,36.47
813
+ resnetblur101d,288,1024.0,1059.66,966.334,15.07,29.65,44.57
814
+ resnet101d,320,1024.0,1045.1,979.801,16.48,34.77,44.57
815
+ regnetz_e8,256,1024.0,1042.94,981.82,9.91,40.94,57.7
816
+ tf_efficientnet_lite4,380,768.0,1038.99,739.169,4.04,45.66,13.01
817
+ xception41p,299,768.0,1034.81,742.157,9.25,39.86,26.91
818
+ repvgg_b3,224,1024.0,1031.23,992.974,29.16,15.1,123.09
819
+ xcit_tiny_24_p16_384,384,1024.0,1026.84,997.227,6.87,34.29,12.12
820
+ resnetrs152,256,1024.0,1024.28,999.711,15.59,30.83,86.62
821
+ seresnet152d,256,1024.0,1022.13,1001.814,15.42,30.56,66.84
822
+ swinv2_cr_small_224,224,1024.0,1005.65,1018.232,9.07,50.27,49.7
823
+ vit_base_patch16_plus_240,240,1024.0,1004.91,1018.982,26.31,22.07,117.56
824
+ regnetz_b16_evos,288,768.0,997.65,769.796,2.36,16.43,9.74
825
+ focalnet_base_srf,224,1024.0,995.12,1029.007,15.28,35.01,88.15
826
+ swinv2_cr_small_ns_224,224,1024.0,993.65,1030.528,9.08,50.27,49.7
827
+ convnextv2_small,224,1024.0,992.07,1032.17,8.71,21.56,50.32
828
+ convnextv2_tiny,288,768.0,989.58,776.074,7.39,22.21,28.64
829
+ vit_small_patch8_224,224,1024.0,985.02,1039.56,16.76,32.86,21.67
830
+ regnety_040_sgn,288,1024.0,979.5,1045.407,6.67,20.3,20.65
831
+ regnetz_c16_evos,256,768.0,978.11,785.174,2.48,16.57,13.49
832
+ vit_base_r50_s16_224,224,1024.0,971.42,1054.108,20.94,27.88,97.89
833
+ hrnet_w44,224,1024.0,967.41,1058.48,14.94,26.92,67.06
834
+ efficientformer_l7,224,1024.0,966.26,1059.742,10.17,24.45,82.23
835
+ hrnet_w48_ssld,224,1024.0,963.59,1062.678,17.34,28.56,77.47
836
+ hrnet_w48,224,1024.0,962.72,1063.645,17.34,28.56,77.47
837
+ poolformer_m36,224,1024.0,959.97,1066.674,8.8,22.02,56.17
838
+ resnet152,288,1024.0,955.06,1072.17,19.11,37.28,60.19
839
+ cait_s24_224,224,1024.0,951.69,1075.97,9.35,40.58,46.92
840
+ tiny_vit_21m_384,384,512.0,946.04,541.193,11.94,46.84,21.23
841
+ focalnet_base_lrf,224,1024.0,946.02,1082.418,15.43,38.13,88.75
842
+ dm_nfnet_f1,224,1024.0,943.8,1084.958,17.87,22.94,132.63
843
+ efficientnet_b3_gn,288,512.0,943.58,542.602,1.74,23.35,11.73
844
+ efficientnetv2_rw_m,320,1024.0,934.42,1095.856,12.72,47.14,53.24
845
+ vit_relpos_base_patch16_plus_240,240,1024.0,933.99,1096.357,26.21,23.41,117.38
846
+ gmlp_b16_224,224,1024.0,931.13,1099.724,15.78,30.21,73.08
847
+ fastvit_sa36,256,1024.0,928.53,1102.809,5.62,34.02,31.53
848
+ xception41,299,768.0,927.7,827.842,9.28,39.86,26.97
849
+ eva02_small_patch14_336,336,1024.0,926.94,1104.696,12.41,27.7,22.13
850
+ maxvit_rmlp_small_rw_224,224,768.0,923.72,831.408,10.48,42.44,64.9
851
+ sequencer2d_l,224,1024.0,917.56,1115.991,9.74,22.12,54.3
852
+ poolformerv2_s36,224,1024.0,914.51,1119.704,5.01,15.82,30.79
853
+ xcit_medium_24_p16_224,224,1024.0,901.57,1135.786,16.13,31.71,84.4
854
+ coat_mini,224,1024.0,900.78,1136.787,6.82,33.68,10.34
855
+ coat_lite_medium,224,1024.0,898.48,1139.693,9.81,40.06,44.57
856
+ swin_s3_small_224,224,768.0,882.63,870.118,9.43,37.84,49.74
857
+ efficientnet_b3_g8_gn,288,512.0,882.63,580.072,2.59,23.35,14.25
858
+ dpn131,224,1024.0,878.67,1165.389,16.09,32.97,79.25
859
+ levit_384_s8,224,512.0,874.93,585.181,9.98,35.86,39.12
860
+ efficientnet_b4,384,512.0,874.47,585.489,4.51,50.04,19.34
861
+ vit_medium_patch16_gap_384,384,1024.0,873.17,1172.722,22.01,32.15,39.03
862
+ nest_base,224,1024.0,871.22,1175.339,16.71,30.51,67.72
863
+ nf_regnet_b5,384,1024.0,867.94,1179.793,7.95,42.9,49.74
864
+ resnet200d,256,1024.0,866.43,1181.848,20.0,43.09,64.69
865
+ maxvit_small_tf_224,224,512.0,864.97,591.915,11.39,46.31,68.93
866
+ nest_base_jx,224,1024.0,863.51,1185.835,16.71,30.51,67.72
867
+ xcit_small_12_p16_384,384,1024.0,860.6,1189.852,14.14,36.5,26.25
868
+ resnetv2_50d_evos,288,1024.0,857.98,1193.488,7.15,19.7,25.59
869
+ swin_base_patch4_window7_224,224,1024.0,857.23,1194.527,15.47,36.63,87.77
870
+ gcvit_small,224,1024.0,850.2,1204.416,8.57,41.61,51.09
871
+ crossvit_15_dagger_408,408,1024.0,849.94,1204.779,16.07,37.0,28.5
872
+ eca_nfnet_l1,320,1024.0,845.79,1210.693,14.92,34.42,41.41
873
+ tf_efficientnet_b4,380,512.0,836.31,612.204,4.49,49.49,19.34
874
+ regnety_080,288,1024.0,834.08,1227.682,13.22,29.69,39.18
875
+ levit_conv_384_s8,224,512.0,831.47,615.767,9.98,35.86,39.12
876
+ twins_svt_large,224,1024.0,829.67,1234.208,14.84,27.23,99.27
877
+ seresnet152,288,1024.0,826.68,1238.676,19.11,37.34,66.82
878
+ xception65p,299,768.0,826.46,929.251,13.91,52.48,39.82
879
+ eva02_base_patch14_224,224,1024.0,822.18,1245.459,22.0,24.67,85.76
880
+ caformer_s36,224,1024.0,811.28,1262.182,7.55,29.29,39.3
881
+ maxxvit_rmlp_small_rw_256,256,768.0,805.75,953.134,14.21,47.76,66.01
882
+ coatnet_2_rw_224,224,512.0,802.77,637.783,14.55,39.37,73.87
883
+ swinv2_base_window12_192,192,1024.0,801.77,1277.157,11.9,39.72,109.28
884
+ mvitv2_base,224,1024.0,789.29,1297.348,10.16,40.5,51.47
885
+ densenet264d,224,1024.0,784.72,1304.914,13.57,14.0,72.74
886
+ resnest50d_4s2x40d,224,1024.0,782.94,1307.879,4.4,17.94,30.42
887
+ swinv2_tiny_window16_256,256,512.0,779.51,656.811,6.68,39.02,28.35
888
+ volo_d2_224,224,1024.0,778.59,1315.191,14.34,41.34,58.68
889
+ dpn107,224,1024.0,773.9,1323.149,18.38,33.46,86.92
890
+ xcit_tiny_24_p8_224,224,1024.0,770.47,1329.042,9.21,45.38,12.11
891
+ convnext_base,288,1024.0,769.28,1331.103,25.43,47.53,88.59
892
+ coatnet_rmlp_2_rw_224,224,512.0,762.93,671.09,14.64,44.94,73.88
893
+ mvitv2_base_cls,224,1024.0,760.58,1346.32,10.23,40.65,65.44
894
+ convit_base,224,1024.0,757.3,1352.149,17.52,31.77,86.54
895
+ convformer_s36,224,1024.0,757.3,1352.161,7.67,30.5,40.01
896
+ coatnet_2_224,224,384.0,753.79,509.418,15.94,42.41,74.68
897
+ hrnet_w64,224,1024.0,748.82,1367.478,28.97,35.09,128.06
898
+ resnet152d,320,1024.0,747.67,1369.57,24.08,47.67,60.21
899
+ ecaresnet200d,256,1024.0,744.16,1376.037,20.0,43.15,64.69
900
+ seresnet200d,256,1024.0,743.64,1376.992,20.01,43.15,71.86
901
+ resnetrs200,256,1024.0,743.56,1377.137,20.18,43.42,93.21
902
+ swinv2_small_window8_256,256,1024.0,740.78,1382.313,11.58,40.14,49.73
903
+ xception65,299,768.0,738.05,1040.572,13.96,52.48,39.92
904
+ fastvit_ma36,256,1024.0,734.46,1394.207,7.85,40.39,44.07
905
+ swinv2_cr_small_ns_256,256,1024.0,733.6,1395.843,12.07,76.21,49.7
906
+ senet154,224,1024.0,731.81,1399.262,20.77,38.69,115.09
907
+ maxvit_rmlp_small_rw_256,256,768.0,731.54,1049.835,13.69,55.48,64.9
908
+ legacy_senet154,224,1024.0,730.99,1400.828,20.77,38.69,115.09
909
+ tf_efficientnetv2_m,384,1024.0,728.54,1405.529,15.85,57.52,54.14
910
+ xcit_nano_12_p8_384,384,1024.0,723.54,1415.249,6.34,46.06,3.05
911
+ poolformer_m48,224,1024.0,722.45,1417.374,11.59,29.17,73.47
912
+ tnt_b_patch16_224,224,1024.0,722.04,1418.187,14.09,39.01,65.41
913
+ efficientvit_l3,224,1024.0,720.55,1421.127,27.62,39.16,246.04
914
+ swinv2_cr_base_224,224,1024.0,719.69,1422.825,15.86,59.66,87.88
915
+ efficientnet_b3_g8_gn,320,512.0,718.69,712.395,3.2,28.83,14.25
916
+ resnest101e,256,1024.0,718.12,1425.925,13.38,28.66,48.28
917
+ swin_s3_base_224,224,1024.0,717.57,1427.034,13.69,48.26,71.13
918
+ resnext101_64x4d,288,1024.0,717.4,1427.37,25.66,51.59,83.46
919
+ swinv2_cr_base_ns_224,224,1024.0,713.5,1435.162,15.86,59.66,87.88
920
+ convnextv2_base,224,768.0,711.23,1079.807,15.38,28.75,88.72
921
+ resnet200,288,1024.0,697.53,1468.023,24.91,53.21,64.67
922
+ efficientnet_b3_gn,320,512.0,695.5,736.148,2.14,28.83,11.73
923
+ coat_small,224,1024.0,694.03,1475.431,12.61,44.25,21.69
924
+ convnext_large,224,1024.0,690.43,1483.117,34.4,43.13,197.77
925
+ regnetz_e8,320,1024.0,670.8,1526.503,15.46,63.94,57.7
926
+ efficientformerv2_s2,224,1024.0,670.26,1527.748,1.27,11.77,12.71
927
+ seresnext101_32x8d,288,1024.0,656.14,1560.626,27.24,51.63,93.57
928
+ resnetrs152,320,1024.0,655.8,1561.431,24.34,48.14,86.62
929
+ xcit_small_12_p8_224,224,1024.0,655.5,1562.148,18.69,47.19,26.21
930
+ maxxvitv2_rmlp_base_rw_224,224,768.0,651.85,1178.173,23.88,54.39,116.09
931
+ seresnet152d,320,1024.0,649.85,1575.74,24.09,47.72,66.84
932
+ vit_large_patch32_384,384,1024.0,647.57,1581.281,44.28,32.22,306.63
933
+ poolformerv2_m36,224,1024.0,646.73,1583.338,8.81,22.02,56.08
934
+ resnext101_32x16d,224,1024.0,641.29,1596.767,36.27,51.18,194.03
935
+ seresnext101d_32x8d,288,1024.0,639.61,1600.97,27.64,52.95,93.59
936
+ regnetz_d8_evos,256,1024.0,638.02,1604.938,4.5,24.92,23.46
937
+ davit_large,224,1024.0,634.07,1614.963,34.37,55.08,196.81
938
+ efficientnetv2_m,416,1024.0,633.12,1617.367,18.6,67.5,54.14
939
+ regnety_064,224,1024.0,632.1,1619.968,6.39,16.41,30.58
940
+ regnetv_064,224,1024.0,629.87,1625.704,6.39,16.41,30.58
941
+ regnetz_c16_evos,320,512.0,622.61,822.333,3.86,25.88,13.49
942
+ gcvit_base,224,1024.0,620.94,1649.111,14.87,55.48,90.32
943
+ nf_regnet_b5,456,512.0,602.97,849.111,11.7,61.95,49.74
944
+ seresnextaa101d_32x8d,288,1024.0,601.98,1701.035,28.51,56.44,93.59
945
+ xception71,299,768.0,600.76,1278.366,18.09,69.92,42.34
946
+ eca_nfnet_l2,320,1024.0,593.89,1724.216,20.95,47.43,56.72
947
+ nfnet_f2,256,1024.0,593.31,1725.904,33.76,41.85,193.78
948
+ crossvit_18_dagger_408,408,1024.0,585.92,1747.666,25.31,49.38,44.61
949
+ hrnet_w48_ssld,288,1024.0,585.32,1749.444,28.66,47.21,77.47
950
+ ecaresnet200d,288,1024.0,584.36,1752.321,25.31,54.59,64.69
951
+ seresnet200d,288,1024.0,583.25,1755.672,25.32,54.6,71.86
952
+ caformer_m36,224,1024.0,582.88,1756.773,12.75,40.61,56.2
953
+ levit_512_s8,224,256.0,582.77,439.271,21.82,52.28,74.05
954
+ maxvit_rmlp_base_rw_224,224,768.0,582.44,1318.589,22.63,79.3,116.14
955
+ seresnet269d,256,1024.0,581.62,1760.578,26.59,53.6,113.67
956
+ convmixer_768_32,224,1024.0,580.09,1765.235,19.55,25.95,21.11
957
+ resnetrs270,256,1024.0,565.62,1810.398,27.06,55.84,129.86
958
+ mixer_l16_224,224,1024.0,553.36,1850.484,44.6,41.69,208.2
959
+ levit_conv_512_s8,224,256.0,552.47,463.363,21.82,52.28,74.05
960
+ efficientnetv2_rw_m,416,1024.0,552.47,1853.491,21.49,79.62,53.24
961
+ resnet200d,320,1024.0,551.74,1855.93,31.25,67.33,64.69
962
+ nfnet_f1,320,1024.0,548.82,1865.795,35.97,46.77,132.63
963
+ convformer_m36,224,1024.0,548.78,1865.947,12.89,42.05,57.05
964
+ volo_d3_224,224,1024.0,541.9,1889.619,20.78,60.09,86.33
965
+ swinv2_base_window8_256,256,1024.0,530.42,1930.519,20.37,52.59,87.92
966
+ maxvit_base_tf_224,224,512.0,517.72,988.937,23.52,81.67,119.47
967
+ xcit_large_24_p16_224,224,1024.0,511.16,2003.26,35.86,47.26,189.1
968
+ convmixer_1024_20_ks9_p14,224,1024.0,510.74,2004.929,5.55,5.51,24.38
969
+ dm_nfnet_f2,256,1024.0,503.11,2035.325,33.76,41.85,193.78
970
+ swin_large_patch4_window7_224,224,768.0,494.53,1552.967,34.53,54.94,196.53
971
+ vit_base_patch16_18x2_224,224,1024.0,494.1,2072.443,50.37,49.17,256.73
972
+ deit_base_patch16_384,384,1024.0,493.77,2073.808,49.4,48.3,86.86
973
+ vit_base_patch16_384,384,1024.0,493.5,2074.946,49.4,48.3,86.86
974
+ deit_base_distilled_patch16_384,384,1024.0,493.31,2075.754,49.49,48.39,87.63
975
+ vit_base_patch16_clip_384,384,1024.0,492.52,2079.081,49.41,48.3,86.86
976
+ eva_large_patch14_196,196,1024.0,491.4,2083.813,59.66,43.77,304.14
977
+ vit_base_patch16_siglip_384,384,1024.0,490.82,2086.272,50.0,49.11,93.18
978
+ vit_large_patch16_224,224,1024.0,489.19,2093.231,59.7,43.77,304.33
979
+ halonet_h1,256,256.0,487.96,524.621,3.0,51.17,8.1
980
+ tiny_vit_21m_512,512,256.0,487.73,524.868,21.23,83.26,21.27
981
+ seresnextaa101d_32x8d,320,768.0,487.6,1575.053,35.19,69.67,93.59
982
+ swinv2_large_window12_192,192,768.0,487.6,1575.036,26.17,56.53,228.77
983
+ swinv2_small_window16_256,256,512.0,487.58,1050.071,12.82,66.29,49.73
984
+ poolformerv2_m48,224,1024.0,487.33,2101.208,11.59,29.17,73.35
985
+ resnetrs200,320,1024.0,476.69,2148.152,31.51,67.81,93.21
986
+ xcit_tiny_12_p8_384,384,1024.0,472.87,2165.479,14.12,69.12,6.71
987
+ vit_small_patch14_dinov2,518,1024.0,470.72,2175.374,29.46,57.34,22.06
988
+ deit3_base_patch16_384,384,1024.0,469.96,2178.883,49.4,48.3,86.88
989
+ vit_small_patch14_reg4_dinov2,518,1024.0,469.28,2182.048,29.55,57.51,22.06
990
+ deit3_large_patch16_224,224,1024.0,468.18,2187.162,59.7,43.77,304.37
991
+ tf_efficientnetv2_m,480,1024.0,466.8,2193.627,24.76,89.84,54.14
992
+ dm_nfnet_f1,320,1024.0,463.74,2208.099,35.97,46.77,132.63
993
+ xcit_small_24_p16_384,384,1024.0,458.11,2235.247,26.72,68.57,47.67
994
+ seresnet269d,288,1024.0,457.25,2239.451,33.65,67.81,113.67
995
+ beit_large_patch16_224,224,1024.0,453.95,2255.726,59.7,43.77,304.43
996
+ beitv2_large_patch16_224,224,1024.0,453.79,2256.515,59.7,43.77,304.43
997
+ regnetx_120,224,1024.0,452.56,2262.648,12.13,21.37,46.11
998
+ efficientnet_b5,448,512.0,444.06,1152.996,9.59,93.56,30.39
999
+ regnety_120,224,1024.0,444.03,2306.127,12.14,21.38,51.82
1000
+ efficientformerv2_l,224,1024.0,441.81,2317.703,2.59,18.54,26.32
1001
+ coatnet_3_rw_224,224,384.0,441.21,870.327,32.63,59.07,181.81
1002
+ resnetv2_152x2_bit,224,1024.0,439.95,2327.532,46.95,45.11,236.34
1003
+ convnext_xlarge,224,768.0,438.91,1749.766,60.98,57.5,350.2
1004
+ coatnet_rmlp_3_rw_224,224,256.0,438.69,583.549,32.75,64.7,165.15
1005
+ coatnet_3_224,224,256.0,431.52,593.24,35.72,63.61,166.97
1006
+ convnextv2_base,288,512.0,430.66,1188.858,25.43,47.53,88.72
1007
+ flexivit_large,240,1024.0,427.93,2392.897,68.48,50.22,304.36
1008
+ convnextv2_large,224,512.0,424.61,1205.798,34.4,43.13,197.96
1009
+ swinv2_cr_large_224,224,768.0,424.12,1810.813,35.1,78.42,196.68
1010
+ swinv2_cr_tiny_384,384,256.0,420.98,608.099,15.34,161.01,28.33
1011
+ caformer_b36,224,768.0,420.2,1827.698,22.5,54.14,98.75
1012
+ maxvit_tiny_tf_384,384,256.0,419.78,609.84,16.0,94.22,30.98
1013
+ convnext_large,288,768.0,417.93,1837.619,56.87,71.29,197.77
1014
+ regnety_160,224,1024.0,417.09,2455.096,15.96,23.04,83.59
1015
+ eca_nfnet_l2,384,1024.0,412.81,2480.539,30.05,68.28,56.72
1016
+ maxxvitv2_rmlp_large_rw_224,224,768.0,411.22,1867.582,43.69,75.4,215.42
1017
+ efficientnetv2_l,384,1024.0,409.83,2498.611,36.1,101.16,118.52
1018
+ davit_huge,224,768.0,407.6,1884.205,60.93,73.44,348.92
1019
+ tf_efficientnetv2_l,384,1024.0,405.08,2527.906,36.1,101.16,118.52
1020
+ regnety_320,224,1024.0,403.27,2539.241,32.34,30.26,145.05
1021
+ regnetz_d8_evos,320,768.0,403.13,1905.094,7.03,38.92,23.46
1022
+ beit_base_patch16_384,384,1024.0,402.61,2543.386,49.4,48.3,86.74
1023
+ convformer_b36,224,768.0,397.77,1930.749,22.69,56.06,99.88
1024
+ tf_efficientnet_b5,456,384.0,394.74,972.77,10.46,98.86,30.39
1025
+ eca_nfnet_l3,352,1024.0,378.23,2707.314,32.57,73.12,72.04
1026
+ vit_large_patch16_siglip_256,256,1024.0,375.52,2726.866,78.12,57.42,315.96
1027
+ ecaresnet269d,320,1024.0,372.48,2749.133,41.53,83.69,102.09
1028
+ vit_large_r50_s32_384,384,1024.0,369.32,2772.633,56.4,64.88,329.09
1029
+ maxvit_large_tf_224,224,384.0,359.98,1066.726,42.99,109.57,211.79
1030
+ vit_large_patch14_224,224,1024.0,359.62,2847.449,77.83,57.11,304.2
1031
+ vit_large_patch14_clip_224,224,1024.0,359.62,2847.409,77.83,57.11,304.2
1032
+ swinv2_base_window16_256,256,384.0,359.2,1069.042,22.02,84.71,87.92
1033
+ swinv2_base_window12to16_192to256,256,384.0,359.01,1069.609,22.02,84.71,87.92
1034
+ nasnetalarge,331,384.0,356.97,1075.708,23.89,90.56,88.75
1035
+ resnetrs350,288,1024.0,356.46,2872.642,43.67,87.09,163.96
1036
+ vit_base_patch8_224,224,1024.0,351.76,2911.045,66.87,65.71,86.58
1037
+ volo_d4_224,224,1024.0,343.2,2983.708,44.34,80.22,192.96
1038
+ xcit_small_24_p8_224,224,1024.0,342.74,2987.714,35.81,90.77,47.63
1039
+ volo_d1_384,384,512.0,340.3,1504.541,22.75,108.55,26.78
1040
+ convnext_large_mlp,320,512.0,338.23,1513.736,70.21,88.02,200.13
1041
+ repvgg_d2se,320,1024.0,335.87,3048.766,74.57,46.82,133.33
1042
+ vit_large_patch14_clip_quickgelu_224,224,1024.0,324.37,3156.896,77.83,57.11,303.97
1043
+ vit_base_r50_s16_384,384,1024.0,315.28,3247.919,61.29,81.77,98.95
1044
+ nfnet_f2,352,1024.0,313.79,3263.314,63.22,79.06,193.78
1045
+ xcit_medium_24_p16_384,384,1024.0,313.38,3267.626,47.39,91.63,84.4
1046
+ vit_large_patch14_xp_224,224,1024.0,311.53,3287.018,77.77,57.11,304.06
1047
+ ecaresnet269d,352,1024.0,307.84,3326.422,50.25,101.25,102.09
1048
+ coat_lite_medium_384,384,512.0,301.48,1698.273,28.73,116.7,44.57
1049
+ regnety_064,288,1024.0,298.91,3425.709,10.56,27.11,30.58
1050
+ resnetrs270,352,1024.0,298.81,3426.892,51.13,105.48,129.86
1051
+ regnetv_064,288,1024.0,298.12,3434.809,10.55,27.11,30.58
1052
+ resnext101_32x32d,224,512.0,296.06,1729.362,87.29,91.12,468.53
1053
+ nfnet_f3,320,1024.0,290.3,3527.352,68.77,83.93,254.92
1054
+ efficientnetv2_xl,384,1024.0,290.02,3530.821,52.81,139.2,208.12
1055
+ tf_efficientnetv2_xl,384,1024.0,287.47,3562.138,52.81,139.2,208.12
1056
+ cait_xxs24_384,384,1024.0,284.02,3605.396,9.63,122.65,12.03
1057
+ maxvit_small_tf_384,384,192.0,274.58,699.228,33.58,139.86,69.02
1058
+ coatnet_4_224,224,256.0,274.31,933.246,60.81,98.85,275.43
1059
+ convnext_xlarge,288,512.0,265.38,1929.279,100.8,95.05,350.2
1060
+ dm_nfnet_f2,352,1024.0,265.36,3858.944,63.22,79.06,193.78
1061
+ vit_base_patch16_siglip_512,512,512.0,263.16,1945.545,88.89,87.3,93.52
1062
+ vit_so400m_patch14_siglip_224,224,1024.0,262.63,3898.968,106.18,70.45,427.68
1063
+ efficientnetv2_l,480,512.0,261.08,1961.059,56.4,157.99,118.52
1064
+ swinv2_cr_small_384,384,256.0,258.97,988.525,29.7,298.03,49.7
1065
+ convnextv2_large,288,384.0,257.89,1488.981,56.87,71.29,197.96
1066
+ tf_efficientnetv2_l,480,512.0,257.78,1986.206,56.4,157.99,118.52
1067
+ eva02_large_patch14_224,224,1024.0,256.9,3985.935,77.9,65.52,303.27
1068
+ eva02_large_patch14_clip_224,224,1024.0,253.93,4032.531,77.93,65.52,304.11
1069
+ regnety_120,288,768.0,253.81,3025.924,20.06,35.34,51.82
1070
+ xcit_tiny_24_p8_384,384,1024.0,248.2,4125.63,27.05,132.94,12.11
1071
+ coatnet_rmlp_2_rw_384,384,192.0,247.61,775.41,43.04,132.57,73.88
1072
+ dm_nfnet_f3,320,1024.0,247.07,4144.617,68.77,83.93,254.92
1073
+ resnetrs420,320,1024.0,244.54,4187.355,64.2,126.56,191.89
1074
+ mvitv2_large,224,512.0,243.6,2101.832,43.87,112.02,217.99
1075
+ mvitv2_large_cls,224,512.0,241.75,2117.866,42.17,111.69,234.58
1076
+ resmlp_big_24_224,224,1024.0,241.59,4238.519,100.23,87.31,129.14
1077
+ regnety_160,288,768.0,237.71,3230.76,26.37,38.07,83.59
1078
+ xcit_medium_24_p8_224,224,768.0,234.01,3281.941,63.52,121.22,84.32
1079
+ eca_nfnet_l3,448,512.0,233.43,2193.322,52.55,118.4,72.04
1080
+ volo_d5_224,224,1024.0,228.8,4475.542,72.4,118.11,295.46
1081
+ swin_base_patch4_window12_384,384,256.0,227.46,1125.454,47.19,134.78,87.9
1082
+ xcit_small_12_p8_384,384,384.0,223.23,1720.206,54.92,138.25,26.21
1083
+ swinv2_large_window12to16_192to256,256,256.0,219.08,1168.537,47.81,121.53,196.74
1084
+ maxxvitv2_rmlp_base_rw_384,384,384.0,217.17,1768.16,70.18,160.22,116.09
1085
+ efficientnet_b6,528,256.0,205.22,1247.45,19.4,167.39,43.04
1086
+ regnetx_320,224,768.0,200.5,3830.333,31.81,36.3,107.81
1087
+ resnetrs350,384,1024.0,199.92,5122.143,77.59,154.74,163.96
1088
+ cait_xs24_384,384,768.0,198.76,3863.971,19.28,183.98,26.67
1089
+ maxvit_xlarge_tf_224,224,256.0,198.54,1289.412,96.49,164.37,506.99
1090
+ tf_efficientnet_b6,528,192.0,198.54,967.028,19.4,167.39,43.04
1091
+ focalnet_huge_fl3,224,512.0,191.39,2675.182,118.26,104.8,745.28
1092
+ volo_d2_384,384,384.0,190.85,2012.066,46.17,184.51,58.87
1093
+ cait_xxs36_384,384,1024.0,189.78,5395.721,14.35,183.7,17.37
1094
+ eva02_base_patch14_448,448,512.0,189.58,2700.759,87.74,98.4,87.12
1095
+ vit_huge_patch14_gap_224,224,1024.0,186.27,5497.294,161.36,94.7,630.76
1096
+ swinv2_cr_base_384,384,256.0,185.05,1383.395,50.57,333.68,87.88
1097
+ swinv2_cr_huge_224,224,384.0,182.04,2109.357,115.97,121.08,657.83
1098
+ maxvit_rmlp_base_rw_384,384,384.0,179.65,2137.52,66.51,233.79,116.14
1099
+ vit_huge_patch14_224,224,1024.0,179.6,5701.574,161.99,95.07,630.76
1100
+ vit_huge_patch14_clip_224,224,1024.0,179.43,5706.842,161.99,95.07,632.05
1101
+ xcit_large_24_p16_384,384,1024.0,177.48,5769.692,105.34,137.15,189.1
1102
+ vit_base_patch14_dinov2,518,512.0,176.68,2897.828,117.11,114.68,86.58
1103
+ vit_base_patch14_reg4_dinov2,518,512.0,175.98,2909.337,117.45,115.02,86.58
1104
+ deit3_huge_patch14_224,224,1024.0,173.53,5900.889,161.99,95.07,632.13
1105
+ nfnet_f3,416,768.0,171.77,4471.127,115.58,141.78,254.92
1106
+ maxvit_tiny_tf_512,512,128.0,170.91,748.92,28.66,172.66,31.05
1107
+ seresnextaa201d_32x8d,384,512.0,170.35,3005.583,101.11,199.72,149.39
1108
+ maxvit_base_tf_384,384,192.0,166.63,1152.259,69.34,247.75,119.65
1109
+ vit_huge_patch14_clip_quickgelu_224,224,1024.0,165.5,6187.275,161.99,95.07,632.08
1110
+ efficientnetv2_xl,512,512.0,163.45,3132.529,93.85,247.32,208.12
1111
+ nfnet_f4,384,768.0,163.26,4704.17,122.14,147.57,316.07
1112
+ tf_efficientnetv2_xl,512,512.0,161.63,3167.699,93.85,247.32,208.12
1113
+ vit_huge_patch14_xp_224,224,1024.0,159.72,6411.21,161.88,95.07,631.8
1114
+ eva_large_patch14_336,336,768.0,155.72,4931.845,174.74,128.21,304.53
1115
+ vit_large_patch14_clip_336,336,768.0,155.28,4945.947,174.74,128.21,304.53
1116
+ vit_large_patch16_384,384,768.0,155.12,4950.906,174.85,128.21,304.72
1117
+ vit_large_patch16_siglip_384,384,768.0,154.94,4956.619,175.76,129.18,316.28
1118
+ convnext_xxlarge,256,384.0,153.59,2500.071,198.09,124.45,846.47
1119
+ vit_giant_patch16_gap_224,224,1024.0,153.47,6672.363,198.14,103.64,1011.37
1120
+ cait_s24_384,384,512.0,153.12,3343.821,32.17,245.3,47.06
1121
+ davit_giant,224,384.0,152.05,2525.491,192.34,138.2,1406.47
1122
+ deit3_large_patch16_384,384,1024.0,148.73,6884.872,174.85,128.21,304.76
1123
+ coatnet_5_224,224,192.0,147.83,1298.762,142.72,143.69,687.47
1124
+ dm_nfnet_f3,416,512.0,146.0,3506.787,115.58,141.78,254.92
1125
+ resnetrs420,416,768.0,144.59,5311.727,108.45,213.79,191.89
1126
+ vit_large_patch14_clip_quickgelu_336,336,768.0,141.12,5441.998,174.74,128.21,304.29
1127
+ dm_nfnet_f4,384,768.0,139.13,5519.969,122.14,147.57,316.07
1128
+ swin_large_patch4_window12_384,384,128.0,135.95,941.498,104.08,202.16,196.74
1129
+ xcit_large_24_p8_224,224,512.0,131.73,3886.696,141.22,181.53,188.93
1130
+ beit_large_patch16_384,384,768.0,129.79,5917.023,174.84,128.21,305.0
1131
+ efficientnet_b7,600,192.0,128.05,1499.407,38.33,289.94,66.35
1132
+ tf_efficientnet_b7,600,192.0,124.56,1541.433,38.33,289.94,66.35
1133
+ focalnet_huge_fl4,224,512.0,123.26,4153.862,118.9,113.34,686.46
1134
+ eva_giant_patch14_clip_224,224,1024.0,116.99,8753.07,259.74,135.89,1012.59
1135
+ eva_giant_patch14_224,224,1024.0,116.91,8758.747,259.74,135.89,1012.56
1136
+ nfnet_f5,416,768.0,116.91,6569.029,170.71,204.56,377.21
1137
+ xcit_small_24_p8_384,384,384.0,116.73,3289.571,105.23,265.87,47.63
1138
+ maxvit_large_tf_384,384,128.0,116.56,1098.144,126.61,332.3,212.03
1139
+ vit_giant_patch14_224,224,1024.0,114.32,8957.604,259.74,135.89,1012.61
1140
+ vit_giant_patch14_clip_224,224,1024.0,114.12,8973.257,259.74,135.89,1012.65
1141
+ swinv2_cr_large_384,384,192.0,113.51,1691.47,108.96,404.96,196.68
1142
+ eva02_large_patch14_clip_336,336,768.0,110.42,6955.361,174.97,147.1,304.43
1143
+ mvitv2_huge_cls,224,384.0,105.54,3638.368,120.67,243.63,694.8
1144
+ maxvit_small_tf_512,512,96.0,104.89,915.238,60.02,256.36,69.13
1145
+ cait_s36_384,384,512.0,102.28,5005.663,47.99,367.39,68.37
1146
+ dm_nfnet_f5,416,512.0,99.59,5141.209,170.71,204.56,377.21
1147
+ swinv2_base_window12to24_192to384,384,96.0,96.5,994.841,55.25,280.36,87.92
1148
+ focalnet_large_fl3,384,256.0,93.78,2729.925,105.06,168.04,239.13
1149
+ nfnet_f4,512,512.0,91.69,5583.92,216.26,262.26,316.07
1150
+ focalnet_large_fl4,384,256.0,90.64,2824.324,105.2,181.78,239.32
1151
+ nfnet_f6,448,512.0,86.88,5893.345,229.7,273.62,438.36
1152
+ efficientnet_b8,672,128.0,85.75,1492.768,63.48,442.89,87.41
1153
+ tf_efficientnet_b8,672,128.0,83.71,1529.068,63.48,442.89,87.41
1154
+ volo_d3_448,448,128.0,81.1,1578.235,96.33,446.83,86.63
1155
+ vit_so400m_patch14_siglip_384,384,512.0,80.75,6340.618,302.34,200.62,428.23
1156
+ xcit_medium_24_p8_384,384,256.0,80.25,3189.919,186.67,354.69,84.32
1157
+ dm_nfnet_f4,512,384.0,78.23,4908.575,216.26,262.26,316.07
1158
+ vit_huge_patch14_clip_336,336,512.0,75.44,6786.84,363.7,213.44,632.46
1159
+ dm_nfnet_f6,448,512.0,74.17,6903.248,229.7,273.62,438.36
1160
+ maxvit_base_tf_512,512,96.0,72.37,1326.47,123.93,456.26,119.88
1161
+ nfnet_f5,544,384.0,68.39,5614.643,290.97,349.71,377.21
1162
+ nfnet_f7,480,512.0,66.61,7686.561,300.08,355.86,499.5
1163
+ vit_gigantic_patch14_224,224,512.0,66.24,7729.406,473.4,204.12,1844.44
1164
+ vit_gigantic_patch14_clip_224,224,512.0,66.15,7739.524,473.41,204.12,1844.91
1165
+ focalnet_xlarge_fl3,384,192.0,65.92,2912.463,185.61,223.99,408.79
1166
+ maxvit_xlarge_tf_384,384,96.0,64.9,1479.208,283.86,498.45,475.32
1167
+ focalnet_xlarge_fl4,384,192.0,63.63,3017.361,185.79,242.31,409.03
1168
+ beit_large_patch16_512,512,256.0,61.48,4163.85,310.6,227.76,305.67
1169
+ volo_d4_448,448,192.0,60.99,3147.895,197.13,527.35,193.41
1170
+ regnety_640,384,192.0,60.97,3149.012,188.47,124.83,281.38
1171
+ convnextv2_huge,384,96.0,60.92,1575.922,337.96,232.35,660.29
1172
+ swinv2_large_window12to24_192to384,384,48.0,60.75,790.151,116.15,407.83,196.74
1173
+ eva02_large_patch14_448,448,512.0,59.67,8581.221,310.69,261.32,305.08
1174
+ dm_nfnet_f5,544,384.0,58.35,6580.773,290.97,349.71,377.21
1175
+ vit_huge_patch14_clip_378,378,512.0,58.14,8806.389,460.13,270.04,632.68
1176
+ convmixer_1536_20,224,1024.0,56.99,17967.01,48.68,33.03,51.63
1177
+ vit_large_patch14_dinov2,518,384.0,56.83,6757.154,414.89,304.42,304.37
1178
+ vit_large_patch14_reg4_dinov2,518,384.0,56.64,6779.944,416.1,305.31,304.37
1179
+ maxvit_large_tf_512,512,64.0,54.68,1170.494,225.96,611.85,212.33
1180
+ tf_efficientnet_l2,475,96.0,54.05,1776.14,172.11,609.89,480.31
1181
+ vit_huge_patch14_clip_quickgelu_378,378,384.0,53.95,7117.573,460.13,270.04,632.68
1182
+ vit_huge_patch16_gap_448,448,512.0,52.86,9685.108,494.35,290.02,631.67
1183
+ nfnet_f6,576,384.0,52.55,7307.184,378.69,452.2,438.36
1184
+ swinv2_cr_giant_224,224,192.0,52.45,3660.551,483.85,309.15,2598.76
1185
+ eva_giant_patch14_336,336,512.0,49.65,10312.606,583.14,305.1,1013.01
1186
+ swinv2_cr_huge_384,384,96.0,49.62,1934.539,352.04,583.18,657.94
1187
+ xcit_large_24_p8_384,384,192.0,45.19,4249.177,415.0,531.74,188.93
1188
+ dm_nfnet_f6,576,256.0,44.83,5710.109,378.69,452.2,438.36
1189
+ volo_d5_448,448,192.0,42.49,4518.905,315.06,737.92,295.91
1190
+ nfnet_f7,608,256.0,41.52,6165.283,480.39,570.85,499.5
1191
+ cait_m36_384,384,256.0,33.1,7733.448,173.11,734.79,271.22
1192
+ resnetv2_152x4_bit,480,96.0,32.12,2989.13,844.84,414.26,936.53
1193
+ maxvit_xlarge_tf_512,512,48.0,30.41,1578.222,505.95,917.77,475.77
1194
+ regnety_2560,384,128.0,30.25,4231.43,747.83,296.49,1282.6
1195
+ volo_d5_512,512,128.0,29.54,4332.489,425.09,1105.37,296.09
1196
+ samvit_base_patch16,1024,16.0,23.81,671.88,371.55,403.08,89.67
1197
+ regnety_1280,384,128.0,22.93,5583.053,374.99,210.2,644.81
1198
+ efficientnet_l2,800,48.0,19.03,2521.932,479.12,1707.39,480.31
1199
+ vit_giant_patch14_dinov2,518,192.0,17.15,11193.542,1553.56,871.89,1136.48
1200
+ vit_giant_patch14_reg4_dinov2,518,192.0,17.12,11212.072,1558.09,874.43,1136.48
1201
+ swinv2_cr_giant_384,384,32.0,15.04,2127.877,1450.71,1394.86,2598.76
1202
+ eva_giant_patch14_560,560,192.0,15.03,12771.913,1618.04,846.56,1014.45
1203
+ cait_m48_448,448,128.0,13.96,9172.063,329.4,1708.21,356.46
1204
+ samvit_large_patch16,1024,12.0,10.64,1127.934,1317.08,1055.58,308.28
1205
+ samvit_huge_patch16,1024,8.0,6.61,1210.638,2741.59,1727.57,637.03