meg HF staff commited on
Commit
847619e
1 Parent(s): a05e70a

Add files using upload-large-folder tool

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. .gitattributes +0 -24
  2. .gitignore +1 -0
  3. Dockerfile +19 -0
  4. README.md +13 -0
  5. __pycache__/app.cpython-39.pyc +0 -0
  6. app.py +47 -0
  7. pytorch-image-models/.gitattributes +1 -0
  8. pytorch-image-models/.github/FUNDING.yml +2 -0
  9. pytorch-image-models/.github/ISSUE_TEMPLATE/bug_report.md +32 -0
  10. pytorch-image-models/.github/ISSUE_TEMPLATE/config.yml +5 -0
  11. pytorch-image-models/.github/ISSUE_TEMPLATE/feature_request.md +21 -0
  12. pytorch-image-models/.github/workflows/build_documentation.yml +20 -0
  13. pytorch-image-models/.github/workflows/build_pr_documentation.yml +19 -0
  14. pytorch-image-models/.github/workflows/tests.yml +65 -0
  15. pytorch-image-models/.github/workflows/trufflehog.yml +15 -0
  16. pytorch-image-models/.github/workflows/upload_pr_documentation.yml +16 -0
  17. pytorch-image-models/.gitignore +121 -0
  18. pytorch-image-models/CITATION.cff +11 -0
  19. pytorch-image-models/CODE_OF_CONDUCT.md +132 -0
  20. pytorch-image-models/CONTRIBUTING.md +106 -0
  21. pytorch-image-models/LICENSE +201 -0
  22. pytorch-image-models/MANIFEST.in +3 -0
  23. pytorch-image-models/README.md +626 -0
  24. pytorch-image-models/UPGRADING.md +24 -0
  25. pytorch-image-models/avg_checkpoints.py +152 -0
  26. pytorch-image-models/benchmark.py +699 -0
  27. pytorch-image-models/bulk_runner.py +244 -0
  28. pytorch-image-models/clean_checkpoint.py +115 -0
  29. pytorch-image-models/convert/convert_from_mxnet.py +107 -0
  30. pytorch-image-models/convert/convert_nest_flax.py +109 -0
  31. pytorch-image-models/distributed_train.sh +5 -0
  32. pytorch-image-models/hfdocs/README.md +14 -0
  33. pytorch-image-models/hfdocs/source/_toctree.yml +162 -0
  34. pytorch-image-models/hfdocs/source/changes.mdx +1080 -0
  35. pytorch-image-models/hfdocs/source/feature_extraction.mdx +273 -0
  36. pytorch-image-models/hfdocs/source/hf_hub.mdx +54 -0
  37. pytorch-image-models/hfdocs/source/index.mdx +22 -0
  38. pytorch-image-models/hfdocs/source/installation.mdx +74 -0
  39. pytorch-image-models/hfdocs/source/models.mdx +230 -0
  40. pytorch-image-models/hfdocs/source/models/adversarial-inception-v3.mdx +165 -0
  41. pytorch-image-models/hfdocs/source/models/advprop.mdx +524 -0
  42. pytorch-image-models/hfdocs/source/models/big-transfer.mdx +362 -0
  43. pytorch-image-models/hfdocs/source/models/csp-darknet.mdx +148 -0
  44. pytorch-image-models/hfdocs/source/models/csp-resnet.mdx +143 -0
  45. pytorch-image-models/hfdocs/source/models/csp-resnext.mdx +144 -0
  46. pytorch-image-models/hfdocs/source/models/densenet.mdx +372 -0
  47. pytorch-image-models/hfdocs/source/models/dla.mdx +612 -0
  48. pytorch-image-models/hfdocs/source/models/dpn.mdx +323 -0
  49. pytorch-image-models/hfdocs/source/models/ecaresnet.mdx +303 -0
  50. pytorch-image-models/hfdocs/source/models/efficientnet-pruned.mdx +212 -0
.gitattributes CHANGED
@@ -8,8 +8,6 @@
8
  *.h5 filter=lfs diff=lfs merge=lfs -text
9
  *.joblib filter=lfs diff=lfs merge=lfs -text
10
  *.lfs.* filter=lfs diff=lfs merge=lfs -text
11
- *.lz4 filter=lfs diff=lfs merge=lfs -text
12
- *.mds filter=lfs diff=lfs merge=lfs -text
13
  *.mlmodel filter=lfs diff=lfs merge=lfs -text
14
  *.model filter=lfs diff=lfs merge=lfs -text
15
  *.msgpack filter=lfs diff=lfs merge=lfs -text
@@ -35,25 +33,3 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
35
  *.zip filter=lfs diff=lfs merge=lfs -text
36
  *.zst filter=lfs diff=lfs merge=lfs -text
37
  *tfevents* filter=lfs diff=lfs merge=lfs -text
38
- # Audio files - uncompressed
39
- *.pcm filter=lfs diff=lfs merge=lfs -text
40
- *.sam filter=lfs diff=lfs merge=lfs -text
41
- *.raw filter=lfs diff=lfs merge=lfs -text
42
- # Audio files - compressed
43
- *.aac filter=lfs diff=lfs merge=lfs -text
44
- *.flac filter=lfs diff=lfs merge=lfs -text
45
- *.mp3 filter=lfs diff=lfs merge=lfs -text
46
- *.ogg filter=lfs diff=lfs merge=lfs -text
47
- *.wav filter=lfs diff=lfs merge=lfs -text
48
- # Image files - uncompressed
49
- *.bmp filter=lfs diff=lfs merge=lfs -text
50
- *.gif filter=lfs diff=lfs merge=lfs -text
51
- *.png filter=lfs diff=lfs merge=lfs -text
52
- *.tiff filter=lfs diff=lfs merge=lfs -text
53
- # Image files - compressed
54
- *.jpg filter=lfs diff=lfs merge=lfs -text
55
- *.jpeg filter=lfs diff=lfs merge=lfs -text
56
- *.webp filter=lfs diff=lfs merge=lfs -text
57
- # Video files - compressed
58
- *.mp4 filter=lfs diff=lfs merge=lfs -text
59
- *.webm filter=lfs diff=lfs merge=lfs -text
 
8
  *.h5 filter=lfs diff=lfs merge=lfs -text
9
  *.joblib filter=lfs diff=lfs merge=lfs -text
10
  *.lfs.* filter=lfs diff=lfs merge=lfs -text
 
 
11
  *.mlmodel filter=lfs diff=lfs merge=lfs -text
12
  *.model filter=lfs diff=lfs merge=lfs -text
13
  *.msgpack filter=lfs diff=lfs merge=lfs -text
 
33
  *.zip filter=lfs diff=lfs merge=lfs -text
34
  *.zst filter=lfs diff=lfs merge=lfs -text
35
  *tfevents* filter=lfs diff=lfs merge=lfs -text
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
.gitignore ADDED
@@ -0,0 +1 @@
 
 
1
+ *.wandb
Dockerfile ADDED
@@ -0,0 +1,19 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Read the doc: https://huggingface.co/docs/hub/spaces-sdks-docker
2
+ # you will also find guides on how best to write your Dockerfile
3
+
4
+ FROM python:3.9
5
+
6
+ RUN useradd -m -u 1000 user
7
+ USER user
8
+ ENV PATH="/home/user/.local/bin:$PATH"
9
+
10
+ WORKDIR /app
11
+
12
+ COPY --chown=user ./requirements.txt requirements.txt
13
+ RUN pip install --no-cache-dir --upgrade -r requirements.txt
14
+ RUN git clone https://github.com/huggingface/pytorch-image-models.git && cd pytorch-image-models && pip install --no-cache-dir --upgrade -r requirements.txt
15
+ COPY --chown=user train.sh pytorch-image-models
16
+ RUN chmod +x pytorch-image-models/train.sh
17
+
18
+ COPY --chown=user . /app
19
+ CMD ["uvicorn", "app:app", "--host", "0.0.0.0", "--port", "7860"]
README.md ADDED
@@ -0,0 +1,13 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ title: ImagenetTraining-imagenet-1k-random-20.0-frac-1over2
3
+ emoji: 😻
4
+ colorFrom: yellow
5
+ colorTo: blue
6
+ sdk: docker
7
+ pinned: false
8
+ license: cc
9
+ startup_duration_timeout: 5h
10
+ hf_oauth_expiration_minutes: 1440
11
+ ---
12
+
13
+ Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
__pycache__/app.cpython-39.pyc ADDED
Binary file (1.52 kB). View file
 
app.py ADDED
@@ -0,0 +1,47 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ from fastapi import FastAPI
3
+ import wandb
4
+ from huggingface_hub import HfApi
5
+
6
+ TOKEN = os.environ.get("DATACOMP_TOKEN")
7
+ API = HfApi(token=TOKEN)
8
+ wandb_api_key = os.environ.get('wandb_api_key')
9
+ wandb.login(key=wandb_api_key)
10
+
11
+ EXPERIMENT = "imagenet-1k-random-20.0-frac-1over2"
12
+ # Input dataset
13
+ INPUT = f"datacomp/{EXPERIMENT}"
14
+ # Output for files and Space ID
15
+ OUTPUT = f"datacomp/ImagenetTraining-{EXPERIMENT}"
16
+
17
+ app = FastAPI()
18
+
19
+ @app.get("/")
20
+ def start_train():
21
+ os.system("echo 'Space started!'")
22
+ os.system("echo pwd")
23
+ os.system("pwd")
24
+ os.system("echo ls")
25
+ os.system("ls")
26
+ os.system("echo 'creating dataset for output files if it doesn't exist...'")
27
+ try:
28
+ API.create_repo(repo_id=OUTPUT, repo_type="dataset",)
29
+ except:
30
+ pass
31
+ #space_variables = API.get_space_variables(repo_id=SPACE_ID)
32
+ #if 'STATUS' not in space_variables or space_variables['STATUS'] != 'COMPUTING':
33
+ os.system("echo 'Beginning processing.'")
34
+ # API.add_space_variable(repo_id=SPACE_ID, key='STATUS', value='COMPUTING')
35
+ # Handles CUDA OOM errors.
36
+ os.system(f"export PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True")
37
+ # Prints more informative CUDA errors (I think? I've forgotten now.)
38
+ os.system("export CUDA_LAUNCH_BLOCKING=1")
39
+ os.system("echo 'Okay, trying training.'")
40
+ os.system(f"cd pytorch-image-models; ./train.sh 4 --dataset hfds/{INPUT} --log-wandb --experiment {EXPERIMENT} --model seresnet34 --sched cosine --epochs 150 --warmup-epochs 5 --lr 0.4 --reprob 0.5 --remode pixel --batch-size 256 --amp -j 4")
41
+ os.system("echo ls")
42
+ os.system("ls")
43
+ os.system("echo 'trying to upload...'")
44
+ API.upload_large_folder(folder_path="/app", repo_id=OUTPUT, repo_type="dataset",)
45
+ #API.add_space_variable(repo_id=SPACE_ID, key='STATUS', value='NOT_COMPUTING')
46
+ #API.pause_space(SPACE_ID)
47
+ return {"Completed": "!"}
pytorch-image-models/.gitattributes ADDED
@@ -0,0 +1 @@
 
 
1
+ *.ipynb linguist-documentation
pytorch-image-models/.github/FUNDING.yml ADDED
@@ -0,0 +1,2 @@
 
 
 
1
+ # These are supported funding model platforms
2
+ github: rwightman
pytorch-image-models/.github/ISSUE_TEMPLATE/bug_report.md ADDED
@@ -0,0 +1,32 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ name: Bug report
3
+ about: Create a bug report to help us improve. Issues are for reporting bugs or requesting
4
+ features, the discussion forum is available for asking questions or seeking help
5
+ from the community.
6
+ title: "[BUG] Issue title..."
7
+ labels: bug
8
+ assignees: rwightman
9
+
10
+ ---
11
+
12
+ **Describe the bug**
13
+ A clear and concise description of what the bug is.
14
+
15
+ **To Reproduce**
16
+ Steps to reproduce the behavior:
17
+ 1.
18
+ 2.
19
+
20
+ **Expected behavior**
21
+ A clear and concise description of what you expected to happen.
22
+
23
+ **Screenshots**
24
+ If applicable, add screenshots to help explain your problem.
25
+
26
+ **Desktop (please complete the following information):**
27
+ - OS: [e.g. Windows 10, Ubuntu 18.04]
28
+ - This repository version [e.g. pip 0.3.1 or commit ref]
29
+ - PyTorch version w/ CUDA/cuDNN [e.g. from `conda list`, 1.7.0 py3.8_cuda11.0.221_cudnn8.0.3_0]
30
+
31
+ **Additional context**
32
+ Add any other context about the problem here.
pytorch-image-models/.github/ISSUE_TEMPLATE/config.yml ADDED
@@ -0,0 +1,5 @@
 
 
 
 
 
 
1
+ blank_issues_enabled: false
2
+ contact_links:
3
+ - name: Community Discussions
4
+ url: https://github.com/rwightman/pytorch-image-models/discussions
5
+ about: Hparam request in issues will be ignored! Issues are for features and bugs. Questions can be asked in Discussions.
pytorch-image-models/.github/ISSUE_TEMPLATE/feature_request.md ADDED
@@ -0,0 +1,21 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ name: Feature request
3
+ about: Suggest an idea for this project. Hparam requests, training help are not feature requests.
4
+ The discussion forum is available for asking questions or seeking help from the community.
5
+ title: "[FEATURE] Feature title..."
6
+ labels: enhancement
7
+ assignees: ''
8
+
9
+ ---
10
+
11
+ **Is your feature request related to a problem? Please describe.**
12
+ A clear and concise description of what the problem is.
13
+
14
+ **Describe the solution you'd like**
15
+ A clear and concise description of what you want to happen.
16
+
17
+ **Describe alternatives you've considered**
18
+ A clear and concise description of any alternative solutions or features you've considered.
19
+
20
+ **Additional context**
21
+ Add any other context or screenshots about the feature request here.
pytorch-image-models/.github/workflows/build_documentation.yml ADDED
@@ -0,0 +1,20 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ name: Build documentation
2
+
3
+ on:
4
+ push:
5
+ branches:
6
+ - main
7
+ - doc-builder*
8
+ - v*-release
9
+
10
+ jobs:
11
+ build:
12
+ uses: huggingface/doc-builder/.github/workflows/build_main_documentation.yml@main
13
+ with:
14
+ commit_sha: ${{ github.sha }}
15
+ package: pytorch-image-models
16
+ package_name: timm
17
+ path_to_docs: pytorch-image-models/hfdocs/source
18
+ version_tag_suffix: ""
19
+ secrets:
20
+ hf_token: ${{ secrets.HF_DOC_BUILD_PUSH }}
pytorch-image-models/.github/workflows/build_pr_documentation.yml ADDED
@@ -0,0 +1,19 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ name: Build PR Documentation
2
+
3
+ on:
4
+ pull_request:
5
+
6
+ concurrency:
7
+ group: ${{ github.workflow }}-${{ github.head_ref || github.run_id }}
8
+ cancel-in-progress: true
9
+
10
+ jobs:
11
+ build:
12
+ uses: huggingface/doc-builder/.github/workflows/build_pr_documentation.yml@main
13
+ with:
14
+ commit_sha: ${{ github.event.pull_request.head.sha }}
15
+ pr_number: ${{ github.event.number }}
16
+ package: pytorch-image-models
17
+ package_name: timm
18
+ path_to_docs: pytorch-image-models/hfdocs/source
19
+ version_tag_suffix: ""
pytorch-image-models/.github/workflows/tests.yml ADDED
@@ -0,0 +1,65 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ name: Python tests
2
+
3
+ on:
4
+ push:
5
+ branches: [ main ]
6
+ pull_request:
7
+ branches: [ main ]
8
+
9
+ env:
10
+ OMP_NUM_THREADS: 2
11
+ MKL_NUM_THREADS: 2
12
+
13
+ jobs:
14
+ test:
15
+ name: Run tests on ${{ matrix.os }} with Python ${{ matrix.python }}
16
+ strategy:
17
+ matrix:
18
+ os: [ubuntu-latest]
19
+ python: ['3.10', '3.12']
20
+ torch: [{base: '1.13.0', vision: '0.14.0'}, {base: '2.4.1', vision: '0.19.1'}]
21
+ testmarker: ['-k "not test_models"', '-m base', '-m cfg', '-m torchscript', '-m features', '-m fxforward', '-m fxbackward']
22
+ exclude:
23
+ - python: '3.12'
24
+ torch: {base: '1.13.0', vision: '0.14.0'}
25
+ runs-on: ${{ matrix.os }}
26
+
27
+ steps:
28
+ - uses: actions/checkout@v2
29
+ - name: Set up Python ${{ matrix.python }}
30
+ uses: actions/setup-python@v1
31
+ with:
32
+ python-version: ${{ matrix.python }}
33
+ - name: Install testing dependencies
34
+ run: |
35
+ python -m pip install --upgrade pip
36
+ pip install -r requirements-dev.txt
37
+ - name: Install torch on mac
38
+ if: startsWith(matrix.os, 'macOS')
39
+ run: pip install --no-cache-dir torch==${{ matrix.torch.base }} torchvision==${{ matrix.torch.vision }}
40
+ - name: Install torch on Windows
41
+ if: startsWith(matrix.os, 'windows')
42
+ run: pip install --no-cache-dir torch==${{ matrix.torch.base }} torchvision==${{ matrix.torch.vision }}
43
+ - name: Install torch on ubuntu
44
+ if: startsWith(matrix.os, 'ubuntu')
45
+ run: |
46
+ sudo sed -i 's/azure\.//' /etc/apt/sources.list
47
+ sudo apt update
48
+ sudo apt install -y google-perftools
49
+ pip install --no-cache-dir torch==${{ matrix.torch.base }}+cpu torchvision==${{ matrix.torch.vision }}+cpu --index-url https://download.pytorch.org/whl/cpu
50
+ - name: Install requirements
51
+ run: |
52
+ pip install -r requirements.txt
53
+ - name: Run tests on Windows
54
+ if: startsWith(matrix.os, 'windows')
55
+ env:
56
+ PYTHONDONTWRITEBYTECODE: 1
57
+ run: |
58
+ pytest -vv tests
59
+ - name: Run '${{ matrix.testmarker }}' tests on Linux / Mac
60
+ if: ${{ !startsWith(matrix.os, 'windows') }}
61
+ env:
62
+ LD_PRELOAD: /usr/lib/x86_64-linux-gnu/libtcmalloc.so.4
63
+ PYTHONDONTWRITEBYTECODE: 1
64
+ run: |
65
+ pytest -vv --forked --durations=0 ${{ matrix.testmarker }} tests
pytorch-image-models/.github/workflows/trufflehog.yml ADDED
@@ -0,0 +1,15 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ on:
2
+ push:
3
+
4
+ name: Secret Leaks
5
+
6
+ jobs:
7
+ trufflehog:
8
+ runs-on: ubuntu-latest
9
+ steps:
10
+ - name: Checkout code
11
+ uses: actions/checkout@v4
12
+ with:
13
+ fetch-depth: 0
14
+ - name: Secret Scanning
15
+ uses: trufflesecurity/trufflehog@main
pytorch-image-models/.github/workflows/upload_pr_documentation.yml ADDED
@@ -0,0 +1,16 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ name: Upload PR Documentation
2
+
3
+ on:
4
+ workflow_run:
5
+ workflows: ["Build PR Documentation"]
6
+ types:
7
+ - completed
8
+
9
+ jobs:
10
+ build:
11
+ uses: huggingface/doc-builder/.github/workflows/upload_pr_documentation.yml@main
12
+ with:
13
+ package_name: timm
14
+ secrets:
15
+ hf_token: ${{ secrets.HF_DOC_BUILD_PUSH }}
16
+ comment_bot_token: ${{ secrets.COMMENT_BOT_TOKEN }}
pytorch-image-models/.gitignore ADDED
@@ -0,0 +1,121 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Byte-compiled / optimized / DLL files
2
+ __pycache__/
3
+ *.py[cod]
4
+ *$py.class
5
+
6
+ # C extensions
7
+ *.so
8
+
9
+ # Distribution / packaging
10
+ .Python
11
+ build/
12
+ develop-eggs/
13
+ dist/
14
+ downloads/
15
+ eggs/
16
+ .eggs/
17
+ lib/
18
+ lib64/
19
+ parts/
20
+ sdist/
21
+ var/
22
+ wheels/
23
+ *.egg-info/
24
+ .installed.cfg
25
+ *.egg
26
+ MANIFEST
27
+
28
+ # PyInstaller
29
+ # Usually these files are written by a python script from a template
30
+ # before PyInstaller builds the exe, so as to inject date/other infos into it.
31
+ *.manifest
32
+ *.spec
33
+
34
+ # Installer logs
35
+ pip-log.txt
36
+ pip-delete-this-directory.txt
37
+
38
+ # Unit test / coverage reports
39
+ htmlcov/
40
+ .tox/
41
+ .coverage
42
+ .coverage.*
43
+ .cache
44
+ nosetests.xml
45
+ coverage.xml
46
+ *.cover
47
+ .hypothesis/
48
+ .pytest_cache/
49
+
50
+ # Translations
51
+ *.mo
52
+ *.pot
53
+
54
+ # Django stuff:
55
+ *.log
56
+ local_settings.py
57
+ db.sqlite3
58
+
59
+ # Flask stuff:
60
+ instance/
61
+ .webassets-cache
62
+
63
+ # Scrapy stuff:
64
+ .scrapy
65
+
66
+ # Sphinx documentation
67
+ docs/_build/
68
+
69
+ # PyBuilder
70
+ target/
71
+
72
+ # Jupyter Notebook
73
+ .ipynb_checkpoints
74
+
75
+ # pyenv
76
+ .python-version
77
+
78
+ # celery beat schedule file
79
+ celerybeat-schedule
80
+
81
+ # SageMath parsed files
82
+ *.sage.py
83
+
84
+ # Environments
85
+ .env
86
+ .venv
87
+ env/
88
+ venv/
89
+ ENV/
90
+ env.bak/
91
+ venv.bak/
92
+
93
+ # Spyder project settings
94
+ .spyderproject
95
+ .spyproject
96
+
97
+ # Rope project settings
98
+ .ropeproject
99
+
100
+ # PyCharm
101
+ .idea
102
+
103
+ output/
104
+
105
+ # PyTorch weights
106
+ *.tar
107
+ *.pth
108
+ *.pt
109
+ *.torch
110
+ *.gz
111
+ Untitled.ipynb
112
+ Testing notebook.ipynb
113
+
114
+ # Root dir exclusions
115
+ /*.csv
116
+ /*.yaml
117
+ /*.json
118
+ /*.jpg
119
+ /*.png
120
+ /*.zip
121
+ /*.tar.*
pytorch-image-models/CITATION.cff ADDED
@@ -0,0 +1,11 @@
 
 
 
 
 
 
 
 
 
 
 
 
1
+ message: "If you use this software, please cite it as below."
2
+ title: "PyTorch Image Models"
3
+ version: "1.2.2"
4
+ doi: "10.5281/zenodo.4414861"
5
+ authors:
6
+ - family-names: Wightman
7
+ given-names: Ross
8
+ version: 1.0.11
9
+ year: "2019"
10
+ url: "https://github.com/huggingface/pytorch-image-models"
11
+ license: "Apache 2.0"
pytorch-image-models/CODE_OF_CONDUCT.md ADDED
@@ -0,0 +1,132 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Contributor Covenant Code of Conduct
2
+
3
+ ## Our Pledge
4
+
5
+ We as members, contributors, and leaders pledge to participate in our
6
+ community a harassment-free experience for everyone, regardless of age, body
7
+ size, visible or invisible disability, ethnicity, sex characteristics, gender
8
+ identity and expression, level of experience, education, socio-economic status,
9
+ nationality, personal appearance, race, caste, color, religion, or sexual
10
+ identity and orientation.
11
+
12
+ We pledge to act and interact in ways that contribute to an open, welcoming,
13
+ diverse, inclusive, and healthy community.
14
+
15
+ ## Our Standards
16
+
17
+ Examples of behavior that contributes to a positive environment for our
18
+ community includes:
19
+
20
+ * Demonstrating empathy and kindness toward other people
21
+ * Being respectful of differing opinions, viewpoints, and experiences
22
+ * Giving and gracefully accepting constructive feedback
23
+ * Accepting responsibility and apologizing to those affected by our mistakes,
24
+ and learning from the experience
25
+ * Focusing on what is best not just for us as individuals, but for the overall
26
+ community
27
+
28
+ Examples of unacceptable behavior include:
29
+
30
+ * The use of sexualized language or imagery, and sexual attention or advances of
31
+ any kind
32
+ * Trolling, insulting or derogatory comments, and personal or political attacks
33
+ * Public or private harassment
34
+ * Publishing others' private information, such as a physical or email address,
35
+ without their explicit permission
36
+ * Other conduct that could reasonably be considered inappropriate in a
37
+ professional setting
38
+
39
+ ## Enforcement Responsibilities
40
+
41
+ Community leaders are responsible for clarifying and enforcing our standards of
42
+ acceptable behavior and will take appropriate and fair corrective action in
43
+ response to any behavior that they deem inappropriate, threatening, offensive,
44
+ or harmful.
45
+
46
+ Community leaders have the right and responsibility to remove, edit, or reject
47
+ comments, commits, code, wiki edits, issues, and other contributions that are
48
+ not aligned to this Code of Conduct, and will communicate reasons for moderation
49
+ decisions when appropriate.
50
+
51
+ ## Scope
52
+
53
+ This Code of Conduct applies within all community spaces, and also applies when
54
+ an individual is officially representing the community in public spaces.
55
+ Examples of representing our community include using an official e-mail address,
56
+ posting via an official social media account, or acting as an appointed
57
+ representative at an online or offline event.
58
+
59
+ ## Enforcement
60
+
61
+ Instances of abusive, harassing, or otherwise unacceptable behavior may be
62
+ reported to the community leaders responsible for enforcement at
63
+ feedback@huggingface.co.
64
+ All complaints will be reviewed and investigated promptly and fairly.
65
+
66
+ All community leaders are obligated to respect the privacy and security of the
67
+ reporter of any incident.
68
+
69
+ ## Enforcement Guidelines
70
+
71
+ Community leaders will follow these Community Impact Guidelines in determining
72
+ the consequences for any action they deem in violation of this Code of Conduct:
73
+
74
+ ### 1. Correction
75
+
76
+ **Community Impact**: Use of inappropriate language or other behavior deemed
77
+ unprofessional or unwelcome in the community.
78
+
79
+ **Consequence**: A private, written warning from community leaders, providing
80
+ clarity around the nature of the violation and an explanation of why the
81
+ behavior was inappropriate. A public apology may be requested.
82
+
83
+ ### 2. Warning
84
+
85
+ **Community Impact**: A violation through a single incident or series of
86
+ actions.
87
+
88
+ **Consequence**: A warning with consequences for continued behavior. No
89
+ interaction with the people involved, including unsolicited interaction with
90
+ those enforcing the Code of Conduct, for a specified period. This
91
+ includes avoiding interactions in community spaces and external channels
92
+ like social media. Violating these terms may lead to a temporary or permanent
93
+ ban.
94
+
95
+ ### 3. Temporary Ban
96
+
97
+ **Community Impact**: A serious violation of community standards, including
98
+ sustained inappropriate behavior.
99
+
100
+ **Consequence**: A temporary ban from any sort of interaction or public
101
+ communication with the community for a specified period of time. No public or
102
+ private interaction with the people involved, including unsolicited interaction
103
+ with those enforcing the Code of Conduct, is allowed during this period.
104
+ Violating these terms may lead to a permanent ban.
105
+
106
+ ### 4. Permanent Ban
107
+
108
+ **Community Impact**: Demonstrating a pattern of violation of community
109
+ standards, including sustained inappropriate behavior, harassment of an
110
+ individual, or aggression toward or disparagement of classes of individuals.
111
+
112
+ **Consequence**: A permanent ban from any public interaction within the
113
+ community.
114
+
115
+ ## Attribution
116
+
117
+ This Code of Conduct is adapted from the [Contributor Covenant][homepage],
118
+ version 2.1, available at
119
+ [https://www.contributor-covenant.org/version/2/1/code_of_conduct.html][v2.1].
120
+
121
+ Community Impact Guidelines were inspired by
122
+ [Mozilla's code of conduct enforcement ladder][Mozilla CoC].
123
+
124
+ For answers to common questions about this code of conduct, see the FAQ at
125
+ [https://www.contributor-covenant.org/faq][FAQ]. Translations are available at
126
+ [https://www.contributor-covenant.org/translations][translations].
127
+
128
+ [homepage]: https://www.contributor-covenant.org
129
+ [v2.1]: https://www.contributor-covenant.org/version/2/1/code_of_conduct.html
130
+ [Mozilla CoC]: https://github.com/mozilla/diversity
131
+ [FAQ]: https://www.contributor-covenant.org/faq
132
+ [translations]: https://www.contributor-covenant.org/translations
pytorch-image-models/CONTRIBUTING.md ADDED
@@ -0,0 +1,106 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ *This guideline is very much a work-in-progress.*
2
+
3
+ Contributions to `timm` for code, documentation, tests are more than welcome!
4
+
5
+ There haven't been any formal guidelines to date so please bear with me, and feel free to add to this guide.
6
+
7
+ # Coding style
8
+
9
+ Code linting and auto-format (black) are not currently in place but open to consideration. In the meantime, the style to follow is (mostly) aligned with Google's guide: https://google.github.io/styleguide/pyguide.html.
10
+
11
+ A few specific differences from Google style (or black)
12
+ 1. Line length is 120 char. Going over is okay in some cases (e.g. I prefer not to break URL across lines).
13
+ 2. Hanging indents are always prefered, please avoid aligning arguments with closing brackets or braces.
14
+
15
+ Example, from Google guide, but this is a NO here:
16
+ ```
17
+ # Aligned with opening delimiter.
18
+ foo = long_function_name(var_one, var_two,
19
+ var_three, var_four)
20
+ meal = (spam,
21
+ beans)
22
+
23
+ # Aligned with opening delimiter in a dictionary.
24
+ foo = {
25
+ 'long_dictionary_key': value1 +
26
+ value2,
27
+ ...
28
+ }
29
+ ```
30
+ This is YES:
31
+
32
+ ```
33
+ # 4-space hanging indent; nothing on first line,
34
+ # closing parenthesis on a new line.
35
+ foo = long_function_name(
36
+ var_one, var_two, var_three,
37
+ var_four
38
+ )
39
+ meal = (
40
+ spam,
41
+ beans,
42
+ )
43
+
44
+ # 4-space hanging indent in a dictionary.
45
+ foo = {
46
+ 'long_dictionary_key':
47
+ long_dictionary_value,
48
+ ...
49
+ }
50
+ ```
51
+
52
+ When there is discrepancy in a given source file (there are many origins for various bits of code and not all have been updated to what I consider current goal), please follow the style in a given file.
53
+
54
+ In general, if you add new code, formatting it with black using the following options should result in a style that is compatible with the rest of the code base:
55
+
56
+ ```
57
+ black --skip-string-normalization --line-length 120 <path-to-file>
58
+ ```
59
+
60
+ Avoid formatting code that is unrelated to your PR though.
61
+
62
+ PR with pure formatting / style fixes will be accepted but only in isolation from functional changes, best to ask before starting such a change.
63
+
64
+ # Documentation
65
+
66
+ As with code style, docstrings style based on the Google guide: guide: https://google.github.io/styleguide/pyguide.html
67
+
68
+ The goal for the code is to eventually move to have all major functions and `__init__` methods use PEP484 type annotations.
69
+
70
+ When type annotations are used for a function, as per the Google pyguide, they should **NOT** be duplicated in the docstrings, please leave annotations as the one source of truth re typing.
71
+
72
+ There are a LOT of gaps in current documentation relative to the functionality in timm, please, document away!
73
+
74
+ # Installation
75
+
76
+ Create a Python virtual environment using Python 3.10. Inside the environment, install torch` and `torchvision` using the instructions matching your system as listed on the [PyTorch website](https://pytorch.org/).
77
+
78
+ Then install the remaining dependencies:
79
+
80
+ ```
81
+ python -m pip install -r requirements.txt
82
+ python -m pip install -r requirements-dev.txt # for testing
83
+ python -m pip install -e .
84
+ ```
85
+
86
+ ## Unit tests
87
+
88
+ Run the tests using:
89
+
90
+ ```
91
+ pytest tests/
92
+ ```
93
+
94
+ Since the whole test suite takes a lot of time to run locally (a few hours), you may want to select a subset of tests relating to the changes you made by using the `-k` option of [`pytest`](https://docs.pytest.org/en/7.1.x/example/markers.html#using-k-expr-to-select-tests-based-on-their-name). Moreover, running tests in parallel (in this example 4 processes) with the `-n` option may help:
95
+
96
+ ```
97
+ pytest -k "substring-to-match" -n 4 tests/
98
+ ```
99
+
100
+ ## Building documentation
101
+
102
+ Please refer to [this document](https://github.com/huggingface/pytorch-image-models/tree/main/hfdocs).
103
+
104
+ # Questions
105
+
106
+ If you have any questions about contribution, where / how to contribute, please ask in the [Discussions](https://github.com/huggingface/pytorch-image-models/discussions/categories/contributing) (there is a `Contributing` topic).
pytorch-image-models/LICENSE ADDED
@@ -0,0 +1,201 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ Apache License
2
+ Version 2.0, January 2004
3
+ http://www.apache.org/licenses/
4
+
5
+ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
6
+
7
+ 1. Definitions.
8
+
9
+ "License" shall mean the terms and conditions for use, reproduction,
10
+ and distribution as defined by Sections 1 through 9 of this document.
11
+
12
+ "Licensor" shall mean the copyright owner or entity authorized by
13
+ the copyright owner that is granting the License.
14
+
15
+ "Legal Entity" shall mean the union of the acting entity and all
16
+ other entities that control, are controlled by, or are under common
17
+ control with that entity. For the purposes of this definition,
18
+ "control" means (i) the power, direct or indirect, to cause the
19
+ direction or management of such entity, whether by contract or
20
+ otherwise, or (ii) ownership of fifty percent (50%) or more of the
21
+ outstanding shares, or (iii) beneficial ownership of such entity.
22
+
23
+ "You" (or "Your") shall mean an individual or Legal Entity
24
+ exercising permissions granted by this License.
25
+
26
+ "Source" form shall mean the preferred form for making modifications,
27
+ including but not limited to software source code, documentation
28
+ source, and configuration files.
29
+
30
+ "Object" form shall mean any form resulting from mechanical
31
+ transformation or translation of a Source form, including but
32
+ not limited to compiled object code, generated documentation,
33
+ and conversions to other media types.
34
+
35
+ "Work" shall mean the work of authorship, whether in Source or
36
+ Object form, made available under the License, as indicated by a
37
+ copyright notice that is included in or attached to the work
38
+ (an example is provided in the Appendix below).
39
+
40
+ "Derivative Works" shall mean any work, whether in Source or Object
41
+ form, that is based on (or derived from) the Work and for which the
42
+ editorial revisions, annotations, elaborations, or other modifications
43
+ represent, as a whole, an original work of authorship. For the purposes
44
+ of this License, Derivative Works shall not include works that remain
45
+ separable from, or merely link (or bind by name) to the interfaces of,
46
+ the Work and Derivative Works thereof.
47
+
48
+ "Contribution" shall mean any work of authorship, including
49
+ the original version of the Work and any modifications or additions
50
+ to that Work or Derivative Works thereof, that is intentionally
51
+ submitted to Licensor for inclusion in the Work by the copyright owner
52
+ or by an individual or Legal Entity authorized to submit on behalf of
53
+ the copyright owner. For the purposes of this definition, "submitted"
54
+ means any form of electronic, verbal, or written communication sent
55
+ to the Licensor or its representatives, including but not limited to
56
+ communication on electronic mailing lists, source code control systems,
57
+ and issue tracking systems that are managed by, or on behalf of, the
58
+ Licensor for the purpose of discussing and improving the Work, but
59
+ excluding communication that is conspicuously marked or otherwise
60
+ designated in writing by the copyright owner as "Not a Contribution."
61
+
62
+ "Contributor" shall mean Licensor and any individual or Legal Entity
63
+ on behalf of whom a Contribution has been received by Licensor and
64
+ subsequently incorporated within the Work.
65
+
66
+ 2. Grant of Copyright License. Subject to the terms and conditions of
67
+ this License, each Contributor hereby grants to You a perpetual,
68
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
69
+ copyright license to reproduce, prepare Derivative Works of,
70
+ publicly display, publicly perform, sublicense, and distribute the
71
+ Work and such Derivative Works in Source or Object form.
72
+
73
+ 3. Grant of Patent License. Subject to the terms and conditions of
74
+ this License, each Contributor hereby grants to You a perpetual,
75
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
76
+ (except as stated in this section) patent license to make, have made,
77
+ use, offer to sell, sell, import, and otherwise transfer the Work,
78
+ where such license applies only to those patent claims licensable
79
+ by such Contributor that are necessarily infringed by their
80
+ Contribution(s) alone or by combination of their Contribution(s)
81
+ with the Work to which such Contribution(s) was submitted. If You
82
+ institute patent litigation against any entity (including a
83
+ cross-claim or counterclaim in a lawsuit) alleging that the Work
84
+ or a Contribution incorporated within the Work constitutes direct
85
+ or contributory patent infringement, then any patent licenses
86
+ granted to You under this License for that Work shall terminate
87
+ as of the date such litigation is filed.
88
+
89
+ 4. Redistribution. You may reproduce and distribute copies of the
90
+ Work or Derivative Works thereof in any medium, with or without
91
+ modifications, and in Source or Object form, provided that You
92
+ meet the following conditions:
93
+
94
+ (a) You must give any other recipients of the Work or
95
+ Derivative Works a copy of this License; and
96
+
97
+ (b) You must cause any modified files to carry prominent notices
98
+ stating that You changed the files; and
99
+
100
+ (c) You must retain, in the Source form of any Derivative Works
101
+ that You distribute, all copyright, patent, trademark, and
102
+ attribution notices from the Source form of the Work,
103
+ excluding those notices that do not pertain to any part of
104
+ the Derivative Works; and
105
+
106
+ (d) If the Work includes a "NOTICE" text file as part of its
107
+ distribution, then any Derivative Works that You distribute must
108
+ include a readable copy of the attribution notices contained
109
+ within such NOTICE file, excluding those notices that do not
110
+ pertain to any part of the Derivative Works, in at least one
111
+ of the following places: within a NOTICE text file distributed
112
+ as part of the Derivative Works; within the Source form or
113
+ documentation, if provided along with the Derivative Works; or,
114
+ within a display generated by the Derivative Works, if and
115
+ wherever such third-party notices normally appear. The contents
116
+ of the NOTICE file are for informational purposes only and
117
+ do not modify the License. You may add Your own attribution
118
+ notices within Derivative Works that You distribute, alongside
119
+ or as an addendum to the NOTICE text from the Work, provided
120
+ that such additional attribution notices cannot be construed
121
+ as modifying the License.
122
+
123
+ You may add Your own copyright statement to Your modifications and
124
+ may provide additional or different license terms and conditions
125
+ for use, reproduction, or distribution of Your modifications, or
126
+ for any such Derivative Works as a whole, provided Your use,
127
+ reproduction, and distribution of the Work otherwise complies with
128
+ the conditions stated in this License.
129
+
130
+ 5. Submission of Contributions. Unless You explicitly state otherwise,
131
+ any Contribution intentionally submitted for inclusion in the Work
132
+ by You to the Licensor shall be under the terms and conditions of
133
+ this License, without any additional terms or conditions.
134
+ Notwithstanding the above, nothing herein shall supersede or modify
135
+ the terms of any separate license agreement you may have executed
136
+ with Licensor regarding such Contributions.
137
+
138
+ 6. Trademarks. This License does not grant permission to use the trade
139
+ names, trademarks, service marks, or product names of the Licensor,
140
+ except as required for reasonable and customary use in describing the
141
+ origin of the Work and reproducing the content of the NOTICE file.
142
+
143
+ 7. Disclaimer of Warranty. Unless required by applicable law or
144
+ agreed to in writing, Licensor provides the Work (and each
145
+ Contributor provides its Contributions) on an "AS IS" BASIS,
146
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
147
+ implied, including, without limitation, any warranties or conditions
148
+ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
149
+ PARTICULAR PURPOSE. You are solely responsible for determining the
150
+ appropriateness of using or redistributing the Work and assume any
151
+ risks associated with Your exercise of permissions under this License.
152
+
153
+ 8. Limitation of Liability. In no event and under no legal theory,
154
+ whether in tort (including negligence), contract, or otherwise,
155
+ unless required by applicable law (such as deliberate and grossly
156
+ negligent acts) or agreed to in writing, shall any Contributor be
157
+ liable to You for damages, including any direct, indirect, special,
158
+ incidental, or consequential damages of any character arising as a
159
+ result of this License or out of the use or inability to use the
160
+ Work (including but not limited to damages for loss of goodwill,
161
+ work stoppage, computer failure or malfunction, or any and all
162
+ other commercial damages or losses), even if such Contributor
163
+ has been advised of the possibility of such damages.
164
+
165
+ 9. Accepting Warranty or Additional Liability. While redistributing
166
+ the Work or Derivative Works thereof, You may choose to offer,
167
+ and charge a fee for, acceptance of support, warranty, indemnity,
168
+ or other liability obligations and/or rights consistent with this
169
+ License. However, in accepting such obligations, You may act only
170
+ on Your own behalf and on Your sole responsibility, not on behalf
171
+ of any other Contributor, and only if You agree to indemnify,
172
+ defend, and hold each Contributor harmless for any liability
173
+ incurred by, or claims asserted against, such Contributor by reason
174
+ of your accepting any such warranty or additional liability.
175
+
176
+ END OF TERMS AND CONDITIONS
177
+
178
+ APPENDIX: How to apply the Apache License to your work.
179
+
180
+ To apply the Apache License to your work, attach the following
181
+ boilerplate notice, with the fields enclosed by brackets "{}"
182
+ replaced with your own identifying information. (Don't include
183
+ the brackets!) The text should be enclosed in the appropriate
184
+ comment syntax for the file format. We also recommend that a
185
+ file or class name and description of purpose be included on the
186
+ same "printed page" as the copyright notice for easier
187
+ identification within third-party archives.
188
+
189
+ Copyright 2019 Ross Wightman
190
+
191
+ Licensed under the Apache License, Version 2.0 (the "License");
192
+ you may not use this file except in compliance with the License.
193
+ You may obtain a copy of the License at
194
+
195
+ http://www.apache.org/licenses/LICENSE-2.0
196
+
197
+ Unless required by applicable law or agreed to in writing, software
198
+ distributed under the License is distributed on an "AS IS" BASIS,
199
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
200
+ See the License for the specific language governing permissions and
201
+ limitations under the License.
pytorch-image-models/MANIFEST.in ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ include timm/models/_pruned/*.txt
2
+ include timm/data/_info/*.txt
3
+ include timm/data/_info/*.json
pytorch-image-models/README.md ADDED
@@ -0,0 +1,626 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # PyTorch Image Models
2
+ - [What's New](#whats-new)
3
+ - [Introduction](#introduction)
4
+ - [Models](#models)
5
+ - [Features](#features)
6
+ - [Results](#results)
7
+ - [Getting Started (Documentation)](#getting-started-documentation)
8
+ - [Train, Validation, Inference Scripts](#train-validation-inference-scripts)
9
+ - [Awesome PyTorch Resources](#awesome-pytorch-resources)
10
+ - [Licenses](#licenses)
11
+ - [Citing](#citing)
12
+
13
+ ## What's New
14
+
15
+ ## Nov 28, 2024
16
+ * More optimizers
17
+ * Add MARS optimizer (https://arxiv.org/abs/2411.10438, https://github.com/AGI-Arena/MARS)
18
+ * Add LaProp optimizer (https://arxiv.org/abs/2002.04839, https://github.com/Z-T-WANG/LaProp-Optimizer)
19
+ * Add masking from 'Cautious Optimizers' (https://arxiv.org/abs/2411.16085, https://github.com/kyleliang919/C-Optim) to Adafactor, Adafactor Big Vision, AdamW (legacy), Adopt, Lamb, LaProp, Lion, NadamW, RMSPropTF, SGDW
20
+ * Cleanup some docstrings and type annotations re optimizers and factory
21
+ * Add MobileNet-V4 Conv Medium models pretrained on in12k and fine-tuned in1k @ 384x384
22
+ * https://huggingface.co/timm/mobilenetv4_conv_medium.e250_r384_in12k_ft_in1k
23
+ * https://huggingface.co/timm/mobilenetv4_conv_medium.e250_r384_in12k
24
+ * https://huggingface.co/timm/mobilenetv4_conv_medium.e180_ad_r384_in12k
25
+ * https://huggingface.co/timm/mobilenetv4_conv_medium.e180_r384_in12k
26
+ * Add small cs3darknet, quite good for the speed
27
+ * https://huggingface.co/timm/cs3darknet_focus_s.ra4_e3600_r256_in1k
28
+
29
+ ## Nov 12, 2024
30
+ * Optimizer factory refactor
31
+ * New factory works by registering optimizers using an OptimInfo dataclass w/ some key traits
32
+ * Add `list_optimizers`, `get_optimizer_class`, `get_optimizer_info` to reworked `create_optimizer_v2` fn to explore optimizers, get info or class
33
+ * deprecate `optim.optim_factory`, move fns to `optim/_optim_factory.py` and `optim/_param_groups.py` and encourage import via `timm.optim`
34
+ * Add Adopt (https://github.com/iShohei220/adopt) optimizer
35
+ * Add 'Big Vision' variant of Adafactor (https://github.com/google-research/big_vision/blob/main/big_vision/optax.py) optimizer
36
+ * Fix original Adafactor to pick better factorization dims for convolutions
37
+ * Tweak LAMB optimizer with some improvements in torch.where functionality since original, refactor clipping a bit
38
+ * dynamic img size support in vit, deit, eva improved to support resize from non-square patch grids, thanks https://github.com/wojtke
39
+ *
40
+ ## Oct 31, 2024
41
+ Add a set of new very well trained ResNet & ResNet-V2 18/34 (basic block) weights. See https://huggingface.co/blog/rwightman/resnet-trick-or-treat
42
+
43
+ ## Oct 19, 2024
44
+ * Cleanup torch amp usage to avoid cuda specific calls, merge support for Ascend (NPU) devices from [MengqingCao](https://github.com/MengqingCao) that should work now in PyTorch 2.5 w/ new device extension autoloading feature. Tested Intel Arc (XPU) in Pytorch 2.5 too and it (mostly) worked.
45
+
46
+ ## Oct 16, 2024
47
+ * Fix error on importing from deprecated path `timm.models.registry`, increased priority of existing deprecation warnings to be visible
48
+ * Port weights of InternViT-300M (https://huggingface.co/OpenGVLab/InternViT-300M-448px) to `timm` as `vit_intern300m_patch14_448`
49
+
50
+ ### Oct 14, 2024
51
+ * Pre-activation (ResNetV2) version of 18/18d/34/34d ResNet model defs added by request (weights pending)
52
+ * Release 1.0.10
53
+
54
+ ### Oct 11, 2024
55
+ * MambaOut (https://github.com/yuweihao/MambaOut) model & weights added. A cheeky take on SSM vision models w/o the SSM (essentially ConvNeXt w/ gating). A mix of original weights + custom variations & weights.
56
+
57
+ |model |img_size|top1 |top5 |param_count|
58
+ |---------------------------------------------------------------------------------------------------------------------|--------|------|------|-----------|
59
+ |[mambaout_base_plus_rw.sw_e150_r384_in12k_ft_in1k](http://huggingface.co/timm/mambaout_base_plus_rw.sw_e150_r384_in12k_ft_in1k)|384 |87.506|98.428|101.66 |
60
+ |[mambaout_base_plus_rw.sw_e150_in12k_ft_in1k](http://huggingface.co/timm/mambaout_base_plus_rw.sw_e150_in12k_ft_in1k)|288 |86.912|98.236|101.66 |
61
+ |[mambaout_base_plus_rw.sw_e150_in12k_ft_in1k](http://huggingface.co/timm/mambaout_base_plus_rw.sw_e150_in12k_ft_in1k)|224 |86.632|98.156|101.66 |
62
+ |[mambaout_base_tall_rw.sw_e500_in1k](http://huggingface.co/timm/mambaout_base_tall_rw.sw_e500_in1k) |288 |84.974|97.332|86.48 |
63
+ |[mambaout_base_wide_rw.sw_e500_in1k](http://huggingface.co/timm/mambaout_base_wide_rw.sw_e500_in1k) |288 |84.962|97.208|94.45 |
64
+ |[mambaout_base_short_rw.sw_e500_in1k](http://huggingface.co/timm/mambaout_base_short_rw.sw_e500_in1k) |288 |84.832|97.27 |88.83 |
65
+ |[mambaout_base.in1k](http://huggingface.co/timm/mambaout_base.in1k) |288 |84.72 |96.93 |84.81 |
66
+ |[mambaout_small_rw.sw_e450_in1k](http://huggingface.co/timm/mambaout_small_rw.sw_e450_in1k) |288 |84.598|97.098|48.5 |
67
+ |[mambaout_small.in1k](http://huggingface.co/timm/mambaout_small.in1k) |288 |84.5 |96.974|48.49 |
68
+ |[mambaout_base_wide_rw.sw_e500_in1k](http://huggingface.co/timm/mambaout_base_wide_rw.sw_e500_in1k) |224 |84.454|96.864|94.45 |
69
+ |[mambaout_base_tall_rw.sw_e500_in1k](http://huggingface.co/timm/mambaout_base_tall_rw.sw_e500_in1k) |224 |84.434|96.958|86.48 |
70
+ |[mambaout_base_short_rw.sw_e500_in1k](http://huggingface.co/timm/mambaout_base_short_rw.sw_e500_in1k) |224 |84.362|96.952|88.83 |
71
+ |[mambaout_base.in1k](http://huggingface.co/timm/mambaout_base.in1k) |224 |84.168|96.68 |84.81 |
72
+ |[mambaout_small.in1k](http://huggingface.co/timm/mambaout_small.in1k) |224 |84.086|96.63 |48.49 |
73
+ |[mambaout_small_rw.sw_e450_in1k](http://huggingface.co/timm/mambaout_small_rw.sw_e450_in1k) |224 |84.024|96.752|48.5 |
74
+ |[mambaout_tiny.in1k](http://huggingface.co/timm/mambaout_tiny.in1k) |288 |83.448|96.538|26.55 |
75
+ |[mambaout_tiny.in1k](http://huggingface.co/timm/mambaout_tiny.in1k) |224 |82.736|96.1 |26.55 |
76
+ |[mambaout_kobe.in1k](http://huggingface.co/timm/mambaout_kobe.in1k) |288 |81.054|95.718|9.14 |
77
+ |[mambaout_kobe.in1k](http://huggingface.co/timm/mambaout_kobe.in1k) |224 |79.986|94.986|9.14 |
78
+ |[mambaout_femto.in1k](http://huggingface.co/timm/mambaout_femto.in1k) |288 |79.848|95.14 |7.3 |
79
+ |[mambaout_femto.in1k](http://huggingface.co/timm/mambaout_femto.in1k) |224 |78.87 |94.408|7.3 |
80
+
81
+ * SigLIP SO400M ViT fine-tunes on ImageNet-1k @ 378x378, added 378x378 option for existing SigLIP 384x384 models
82
+ * [vit_so400m_patch14_siglip_378.webli_ft_in1k](https://huggingface.co/timm/vit_so400m_patch14_siglip_378.webli_ft_in1k) - 89.42 top-1
83
+ * [vit_so400m_patch14_siglip_gap_378.webli_ft_in1k](https://huggingface.co/timm/vit_so400m_patch14_siglip_gap_378.webli_ft_in1k) - 89.03
84
+ * SigLIP SO400M ViT encoder from recent multi-lingual (i18n) variant, patch16 @ 256x256 (https://huggingface.co/timm/ViT-SO400M-16-SigLIP-i18n-256). OpenCLIP update pending.
85
+ * Add two ConvNeXt 'Zepto' models & weights (one w/ overlapped stem and one w/ patch stem). Uses RMSNorm, smaller than previous 'Atto', 2.2M params.
86
+ * [convnext_zepto_rms_ols.ra4_e3600_r224_in1k](https://huggingface.co/timm/convnext_zepto_rms_ols.ra4_e3600_r224_in1k) - 73.20 top-1 @ 224
87
+ * [convnext_zepto_rms.ra4_e3600_r224_in1k](https://huggingface.co/timm/convnext_zepto_rms.ra4_e3600_r224_in1k) - 72.81 @ 224
88
+
89
+ ### Sept 2024
90
+ * Add a suite of tiny test models for improved unit tests and niche low-resource applications (https://huggingface.co/blog/rwightman/timm-tiny-test)
91
+ * Add MobileNetV4-Conv-Small (0.5x) model (https://huggingface.co/posts/rwightman/793053396198664)
92
+ * [mobilenetv4_conv_small_050.e3000_r224_in1k](http://hf.co/timm/mobilenetv4_conv_small_050.e3000_r224_in1k) - 65.81 top-1 @ 256, 64.76 @ 224
93
+ * Add MobileNetV3-Large variants trained with MNV4 Small recipe
94
+ * [mobilenetv3_large_150d.ra4_e3600_r256_in1k](http://hf.co/timm/mobilenetv3_large_150d.ra4_e3600_r256_in1k) - 81.81 @ 320, 80.94 @ 256
95
+ * [mobilenetv3_large_100.ra4_e3600_r224_in1k](http://hf.co/timm/mobilenetv3_large_100.ra4_e3600_r224_in1k) - 77.16 @ 256, 76.31 @ 224
96
+
97
+
98
+ ### Aug 21, 2024
99
+ * Updated SBB ViT models trained on ImageNet-12k and fine-tuned on ImageNet-1k, challenging quite a number of much larger, slower models
100
+
101
+ | model | top1 | top5 | param_count | img_size |
102
+ | -------------------------------------------------- | ------ | ------ | ----------- | -------- |
103
+ | [vit_mediumd_patch16_reg4_gap_384.sbb2_e200_in12k_ft_in1k](https://huggingface.co/timm/vit_mediumd_patch16_reg4_gap_384.sbb2_e200_in12k_ft_in1k) | 87.438 | 98.256 | 64.11 | 384 |
104
+ | [vit_mediumd_patch16_reg4_gap_256.sbb2_e200_in12k_ft_in1k](https://huggingface.co/timm/vit_mediumd_patch16_reg4_gap_256.sbb2_e200_in12k_ft_in1k) | 86.608 | 97.934 | 64.11 | 256 |
105
+ | [vit_betwixt_patch16_reg4_gap_384.sbb2_e200_in12k_ft_in1k](https://huggingface.co/timm/vit_betwixt_patch16_reg4_gap_384.sbb2_e200_in12k_ft_in1k) | 86.594 | 98.02 | 60.4 | 384 |
106
+ | [vit_betwixt_patch16_reg4_gap_256.sbb2_e200_in12k_ft_in1k](https://huggingface.co/timm/vit_betwixt_patch16_reg4_gap_256.sbb2_e200_in12k_ft_in1k) | 85.734 | 97.61 | 60.4 | 256 |
107
+ * MobileNet-V1 1.25, EfficientNet-B1, & ResNet50-D weights w/ MNV4 baseline challenge recipe
108
+
109
+ | model | top1 | top5 | param_count | img_size |
110
+ |--------------------------------------------------------------------------------------------------------------------------|--------|--------|-------------|----------|
111
+ | [resnet50d.ra4_e3600_r224_in1k](http://hf.co/timm/resnet50d.ra4_e3600_r224_in1k) | 81.838 | 95.922 | 25.58 | 288 |
112
+ | [efficientnet_b1.ra4_e3600_r240_in1k](http://hf.co/timm/efficientnet_b1.ra4_e3600_r240_in1k) | 81.440 | 95.700 | 7.79 | 288 |
113
+ | [resnet50d.ra4_e3600_r224_in1k](http://hf.co/timm/resnet50d.ra4_e3600_r224_in1k) | 80.952 | 95.384 | 25.58 | 224 |
114
+ | [efficientnet_b1.ra4_e3600_r240_in1k](http://hf.co/timm/efficientnet_b1.ra4_e3600_r240_in1k) | 80.406 | 95.152 | 7.79 | 240 |
115
+ | [mobilenetv1_125.ra4_e3600_r224_in1k](http://hf.co/timm/mobilenetv1_125.ra4_e3600_r224_in1k) | 77.600 | 93.804 | 6.27 | 256 |
116
+ | [mobilenetv1_125.ra4_e3600_r224_in1k](http://hf.co/timm/mobilenetv1_125.ra4_e3600_r224_in1k) | 76.924 | 93.234 | 6.27 | 224 |
117
+
118
+ * Add SAM2 (HieraDet) backbone arch & weight loading support
119
+ * Add Hiera Small weights trained w/ abswin pos embed on in12k & fine-tuned on 1k
120
+
121
+ |model |top1 |top5 |param_count|
122
+ |---------------------------------|------|------|-----------|
123
+ |hiera_small_abswin_256.sbb2_e200_in12k_ft_in1k |84.912|97.260|35.01 |
124
+ |hiera_small_abswin_256.sbb2_pd_e200_in12k_ft_in1k |84.560|97.106|35.01 |
125
+
126
+ ### Aug 8, 2024
127
+ * Add RDNet ('DenseNets Reloaded', https://arxiv.org/abs/2403.19588), thanks [Donghyun Kim](https://github.com/dhkim0225)
128
+
129
+ ### July 28, 2024
130
+ * Add `mobilenet_edgetpu_v2_m` weights w/ `ra4` mnv4-small based recipe. 80.1% top-1 @ 224 and 80.7 @ 256.
131
+ * Release 1.0.8
132
+
133
+ ### July 26, 2024
134
+ * More MobileNet-v4 weights, ImageNet-12k pretrain w/ fine-tunes, and anti-aliased ConvLarge models
135
+
136
+ | model |top1 |top1_err|top5 |top5_err|param_count|img_size|
137
+ |--------------------------------------------------------------------------------------------------|------|--------|------|--------|-----------|--------|
138
+ | [mobilenetv4_conv_aa_large.e230_r448_in12k_ft_in1k](http://hf.co/timm/mobilenetv4_conv_aa_large.e230_r448_in12k_ft_in1k)|84.99 |15.01 |97.294|2.706 |32.59 |544 |
139
+ | [mobilenetv4_conv_aa_large.e230_r384_in12k_ft_in1k](http://hf.co/timm/mobilenetv4_conv_aa_large.e230_r384_in12k_ft_in1k)|84.772|15.228 |97.344|2.656 |32.59 |480 |
140
+ | [mobilenetv4_conv_aa_large.e230_r448_in12k_ft_in1k](http://hf.co/timm/mobilenetv4_conv_aa_large.e230_r448_in12k_ft_in1k)|84.64 |15.36 |97.114|2.886 |32.59 |448 |
141
+ | [mobilenetv4_conv_aa_large.e230_r384_in12k_ft_in1k](http://hf.co/timm/mobilenetv4_conv_aa_large.e230_r384_in12k_ft_in1k)|84.314|15.686 |97.102|2.898 |32.59 |384 |
142
+ | [mobilenetv4_conv_aa_large.e600_r384_in1k](http://hf.co/timm/mobilenetv4_conv_aa_large.e600_r384_in1k) |83.824|16.176 |96.734|3.266 |32.59 |480 |
143
+ | [mobilenetv4_conv_aa_large.e600_r384_in1k](http://hf.co/timm/mobilenetv4_conv_aa_large.e600_r384_in1k) |83.244|16.756 |96.392|3.608 |32.59 |384 |
144
+ | [mobilenetv4_hybrid_medium.e200_r256_in12k_ft_in1k](http://hf.co/timm/mobilenetv4_hybrid_medium.e200_r256_in12k_ft_in1k)|82.99 |17.01 |96.67 |3.33 |11.07 |320 |
145
+ | [mobilenetv4_hybrid_medium.e200_r256_in12k_ft_in1k](http://hf.co/timm/mobilenetv4_hybrid_medium.e200_r256_in12k_ft_in1k)|82.364|17.636 |96.256|3.744 |11.07 |256 |
146
+
147
+ * Impressive MobileNet-V1 and EfficientNet-B0 baseline challenges (https://huggingface.co/blog/rwightman/mobilenet-baselines)
148
+
149
+ | model |top1 |top1_err|top5 |top5_err|param_count|img_size|
150
+ |--------------------------------------------------------------------------------------------------|------|--------|------|--------|-----------|--------|
151
+ | [efficientnet_b0.ra4_e3600_r224_in1k](http://hf.co/timm/efficientnet_b0.ra4_e3600_r224_in1k) |79.364|20.636 |94.754|5.246 |5.29 |256 |
152
+ | [efficientnet_b0.ra4_e3600_r224_in1k](http://hf.co/timm/efficientnet_b0.ra4_e3600_r224_in1k) |78.584|21.416 |94.338|5.662 |5.29 |224 |
153
+ | [mobilenetv1_100h.ra4_e3600_r224_in1k](http://hf.co/timm/mobilenetv1_100h.ra4_e3600_r224_in1k) |76.596|23.404 |93.272|6.728 |5.28 |256 |
154
+ | [mobilenetv1_100.ra4_e3600_r224_in1k](http://hf.co/timm/mobilenetv1_100.ra4_e3600_r224_in1k) |76.094|23.906 |93.004|6.996 |4.23 |256 |
155
+ | [mobilenetv1_100h.ra4_e3600_r224_in1k](http://hf.co/timm/mobilenetv1_100h.ra4_e3600_r224_in1k) |75.662|24.338 |92.504|7.496 |5.28 |224 |
156
+ | [mobilenetv1_100.ra4_e3600_r224_in1k](http://hf.co/timm/mobilenetv1_100.ra4_e3600_r224_in1k) |75.382|24.618 |92.312|7.688 |4.23 |224 |
157
+
158
+ * Prototype of `set_input_size()` added to vit and swin v1/v2 models to allow changing image size, patch size, window size after model creation.
159
+ * Improved support in swin for different size handling, in addition to `set_input_size`, `always_partition` and `strict_img_size` args have been added to `__init__` to allow more flexible input size constraints
160
+ * Fix out of order indices info for intermediate 'Getter' feature wrapper, check out or range indices for same.
161
+ * Add several `tiny` < .5M param models for testing that are actually trained on ImageNet-1k
162
+
163
+ |model |top1 |top1_err|top5 |top5_err|param_count|img_size|crop_pct|
164
+ |----------------------------|------|--------|------|--------|-----------|--------|--------|
165
+ |test_efficientnet.r160_in1k |47.156|52.844 |71.726|28.274 |0.36 |192 |1.0 |
166
+ |test_byobnet.r160_in1k |46.698|53.302 |71.674|28.326 |0.46 |192 |1.0 |
167
+ |test_efficientnet.r160_in1k |46.426|53.574 |70.928|29.072 |0.36 |160 |0.875 |
168
+ |test_byobnet.r160_in1k |45.378|54.622 |70.572|29.428 |0.46 |160 |0.875 |
169
+ |test_vit.r160_in1k|42.0 |58.0 |68.664|31.336 |0.37 |192 |1.0 |
170
+ |test_vit.r160_in1k|40.822|59.178 |67.212|32.788 |0.37 |160 |0.875 |
171
+
172
+ * Fix vit reg token init, thanks [Promisery](https://github.com/Promisery)
173
+ * Other misc fixes
174
+
175
+ ### June 24, 2024
176
+ * 3 more MobileNetV4 hyrid weights with different MQA weight init scheme
177
+
178
+ | model |top1 |top1_err|top5 |top5_err|param_count|img_size|
179
+ |--------------------------------------------------------------------------------------------------|------|--------|------|--------|-----------|--------|
180
+ | [mobilenetv4_hybrid_large.ix_e600_r384_in1k](http://hf.co/timm/mobilenetv4_hybrid_large.ix_e600_r384_in1k) |84.356|15.644 |96.892 |3.108 |37.76 |448 |
181
+ | [mobilenetv4_hybrid_large.ix_e600_r384_in1k](http://hf.co/timm/mobilenetv4_hybrid_large.ix_e600_r384_in1k) |83.990|16.010 |96.702 |3.298 |37.76 |384 |
182
+ | [mobilenetv4_hybrid_medium.ix_e550_r384_in1k](http://hf.co/timm/mobilenetv4_hybrid_medium.ix_e550_r384_in1k) |83.394|16.606 |96.760|3.240 |11.07 |448 |
183
+ | [mobilenetv4_hybrid_medium.ix_e550_r384_in1k](http://hf.co/timm/mobilenetv4_hybrid_medium.ix_e550_r384_in1k) |82.968|17.032 |96.474|3.526 |11.07 |384 |
184
+ | [mobilenetv4_hybrid_medium.ix_e550_r256_in1k](http://hf.co/timm/mobilenetv4_hybrid_medium.ix_e550_r256_in1k) |82.492|17.508 |96.278|3.722 |11.07 |320 |
185
+ | [mobilenetv4_hybrid_medium.ix_e550_r256_in1k](http://hf.co/timm/mobilenetv4_hybrid_medium.ix_e550_r256_in1k) |81.446|18.554 |95.704|4.296 |11.07 |256 |
186
+ * florence2 weight loading in DaViT model
187
+
188
+ ### June 12, 2024
189
+ * MobileNetV4 models and initial set of `timm` trained weights added:
190
+
191
+ | model |top1 |top1_err|top5 |top5_err|param_count|img_size|
192
+ |--------------------------------------------------------------------------------------------------|------|--------|------|--------|-----------|--------|
193
+ | [mobilenetv4_hybrid_large.e600_r384_in1k](http://hf.co/timm/mobilenetv4_hybrid_large.e600_r384_in1k) |84.266|15.734 |96.936 |3.064 |37.76 |448 |
194
+ | [mobilenetv4_hybrid_large.e600_r384_in1k](http://hf.co/timm/mobilenetv4_hybrid_large.e600_r384_in1k) |83.800|16.200 |96.770 |3.230 |37.76 |384 |
195
+ | [mobilenetv4_conv_large.e600_r384_in1k](http://hf.co/timm/mobilenetv4_conv_large.e600_r384_in1k) |83.392|16.608 |96.622 |3.378 |32.59 |448 |
196
+ | [mobilenetv4_conv_large.e600_r384_in1k](http://hf.co/timm/mobilenetv4_conv_large.e600_r384_in1k) |82.952|17.048 |96.266 |3.734 |32.59 |384 |
197
+ | [mobilenetv4_conv_large.e500_r256_in1k](http://hf.co/timm/mobilenetv4_conv_large.e500_r256_in1k) |82.674|17.326 |96.31 |3.69 |32.59 |320 |
198
+ | [mobilenetv4_conv_large.e500_r256_in1k](http://hf.co/timm/mobilenetv4_conv_large.e500_r256_in1k) |81.862|18.138 |95.69 |4.31 |32.59 |256 |
199
+ | [mobilenetv4_hybrid_medium.e500_r224_in1k](http://hf.co/timm/mobilenetv4_hybrid_medium.e500_r224_in1k) |81.276|18.724 |95.742|4.258 |11.07 |256 |
200
+ | [mobilenetv4_conv_medium.e500_r256_in1k](http://hf.co/timm/mobilenetv4_conv_medium.e500_r256_in1k) |80.858|19.142 |95.768|4.232 |9.72 |320 |
201
+ | [mobilenetv4_hybrid_medium.e500_r224_in1k](http://hf.co/timm/mobilenetv4_hybrid_medium.e500_r224_in1k) |80.442|19.558 |95.38 |4.62 |11.07 |224 |
202
+ | [mobilenetv4_conv_blur_medium.e500_r224_in1k](http://hf.co/timm/mobilenetv4_conv_blur_medium.e500_r224_in1k) |80.142|19.858 |95.298|4.702 |9.72 |256 |
203
+ | [mobilenetv4_conv_medium.e500_r256_in1k](http://hf.co/timm/mobilenetv4_conv_medium.e500_r256_in1k) |79.928|20.072 |95.184|4.816 |9.72 |256 |
204
+ | [mobilenetv4_conv_medium.e500_r224_in1k](http://hf.co/timm/mobilenetv4_conv_medium.e500_r224_in1k) |79.808|20.192 |95.186|4.814 |9.72 |256 |
205
+ | [mobilenetv4_conv_blur_medium.e500_r224_in1k](http://hf.co/timm/mobilenetv4_conv_blur_medium.e500_r224_in1k) |79.438|20.562 |94.932|5.068 |9.72 |224 |
206
+ | [mobilenetv4_conv_medium.e500_r224_in1k](http://hf.co/timm/mobilenetv4_conv_medium.e500_r224_in1k) |79.094|20.906 |94.77 |5.23 |9.72 |224 |
207
+ | [mobilenetv4_conv_small.e2400_r224_in1k](http://hf.co/timm/mobilenetv4_conv_small.e2400_r224_in1k) |74.616|25.384 |92.072|7.928 |3.77 |256 |
208
+ | [mobilenetv4_conv_small.e1200_r224_in1k](http://hf.co/timm/mobilenetv4_conv_small.e1200_r224_in1k) |74.292|25.708 |92.116|7.884 |3.77 |256 |
209
+ | [mobilenetv4_conv_small.e2400_r224_in1k](http://hf.co/timm/mobilenetv4_conv_small.e2400_r224_in1k) |73.756|26.244 |91.422|8.578 |3.77 |224 |
210
+ | [mobilenetv4_conv_small.e1200_r224_in1k](http://hf.co/timm/mobilenetv4_conv_small.e1200_r224_in1k) |73.454|26.546 |91.34 |8.66 |3.77 |224 |
211
+
212
+ * Apple MobileCLIP (https://arxiv.org/pdf/2311.17049, FastViT and ViT-B) image tower model support & weights added (part of OpenCLIP support).
213
+ * ViTamin (https://arxiv.org/abs/2404.02132) CLIP image tower model & weights added (part of OpenCLIP support).
214
+ * OpenAI CLIP Modified ResNet image tower modelling & weight support (via ByobNet). Refactor AttentionPool2d.
215
+
216
+ ### May 14, 2024
217
+ * Support loading PaliGemma jax weights into SigLIP ViT models with average pooling.
218
+ * Add Hiera models from Meta (https://github.com/facebookresearch/hiera).
219
+ * Add `normalize=` flag for transorms, return non-normalized torch.Tensor with original dytpe (for `chug`)
220
+ * Version 1.0.3 release
221
+
222
+ ### May 11, 2024
223
+ * `Searching for Better ViT Baselines (For the GPU Poor)` weights and vit variants released. Exploring model shapes between Tiny and Base.
224
+
225
+ | model | top1 | top5 | param_count | img_size |
226
+ | -------------------------------------------------- | ------ | ------ | ----------- | -------- |
227
+ | [vit_mediumd_patch16_reg4_gap_256.sbb_in12k_ft_in1k](https://huggingface.co/timm/vit_mediumd_patch16_reg4_gap_256.sbb_in12k_ft_in1k) | 86.202 | 97.874 | 64.11 | 256 |
228
+ | [vit_betwixt_patch16_reg4_gap_256.sbb_in12k_ft_in1k](https://huggingface.co/timm/vit_betwixt_patch16_reg4_gap_256.sbb_in12k_ft_in1k) | 85.418 | 97.48 | 60.4 | 256 |
229
+ | [vit_mediumd_patch16_rope_reg1_gap_256.sbb_in1k](https://huggingface.co/timm/vit_mediumd_patch16_rope_reg1_gap_256.sbb_in1k) | 84.322 | 96.812 | 63.95 | 256 |
230
+ | [vit_betwixt_patch16_rope_reg4_gap_256.sbb_in1k](https://huggingface.co/timm/vit_betwixt_patch16_rope_reg4_gap_256.sbb_in1k) | 83.906 | 96.684 | 60.23 | 256 |
231
+ | [vit_base_patch16_rope_reg1_gap_256.sbb_in1k](https://huggingface.co/timm/vit_base_patch16_rope_reg1_gap_256.sbb_in1k) | 83.866 | 96.67 | 86.43 | 256 |
232
+ | [vit_medium_patch16_rope_reg1_gap_256.sbb_in1k](https://huggingface.co/timm/vit_medium_patch16_rope_reg1_gap_256.sbb_in1k) | 83.81 | 96.824 | 38.74 | 256 |
233
+ | [vit_betwixt_patch16_reg4_gap_256.sbb_in1k](https://huggingface.co/timm/vit_betwixt_patch16_reg4_gap_256.sbb_in1k) | 83.706 | 96.616 | 60.4 | 256 |
234
+ | [vit_betwixt_patch16_reg1_gap_256.sbb_in1k](https://huggingface.co/timm/vit_betwixt_patch16_reg1_gap_256.sbb_in1k) | 83.628 | 96.544 | 60.4 | 256 |
235
+ | [vit_medium_patch16_reg4_gap_256.sbb_in1k](https://huggingface.co/timm/vit_medium_patch16_reg4_gap_256.sbb_in1k) | 83.47 | 96.622 | 38.88 | 256 |
236
+ | [vit_medium_patch16_reg1_gap_256.sbb_in1k](https://huggingface.co/timm/vit_medium_patch16_reg1_gap_256.sbb_in1k) | 83.462 | 96.548 | 38.88 | 256 |
237
+ | [vit_little_patch16_reg4_gap_256.sbb_in1k](https://huggingface.co/timm/vit_little_patch16_reg4_gap_256.sbb_in1k) | 82.514 | 96.262 | 22.52 | 256 |
238
+ | [vit_wee_patch16_reg1_gap_256.sbb_in1k](https://huggingface.co/timm/vit_wee_patch16_reg1_gap_256.sbb_in1k) | 80.256 | 95.360 | 13.42 | 256 |
239
+ | [vit_pwee_patch16_reg1_gap_256.sbb_in1k](https://huggingface.co/timm/vit_pwee_patch16_reg1_gap_256.sbb_in1k) | 80.072 | 95.136 | 15.25 | 256 |
240
+ | [vit_mediumd_patch16_reg4_gap_256.sbb_in12k](https://huggingface.co/timm/vit_mediumd_patch16_reg4_gap_256.sbb_in12k) | N/A | N/A | 64.11 | 256 |
241
+ | [vit_betwixt_patch16_reg4_gap_256.sbb_in12k](https://huggingface.co/timm/vit_betwixt_patch16_reg4_gap_256.sbb_in12k) | N/A | N/A | 60.4 | 256 |
242
+
243
+ * AttentionExtract helper added to extract attention maps from `timm` models. See example in https://github.com/huggingface/pytorch-image-models/discussions/1232#discussioncomment-9320949
244
+ * `forward_intermediates()` API refined and added to more models including some ConvNets that have other extraction methods.
245
+ * 1017 of 1047 model architectures support `features_only=True` feature extraction. Remaining 34 architectures can be supported but based on priority requests.
246
+ * Remove torch.jit.script annotated functions including old JIT activations. Conflict with dynamo and dynamo does a much better job when used.
247
+
248
+ ### April 11, 2024
249
+ * Prepping for a long overdue 1.0 release, things have been stable for a while now.
250
+ * Significant feature that's been missing for a while, `features_only=True` support for ViT models with flat hidden states or non-std module layouts (so far covering `'vit_*', 'twins_*', 'deit*', 'beit*', 'mvitv2*', 'eva*', 'samvit_*', 'flexivit*'`)
251
+ * Above feature support achieved through a new `forward_intermediates()` API that can be used with a feature wrapping module or direclty.
252
+ ```python
253
+ model = timm.create_model('vit_base_patch16_224')
254
+ final_feat, intermediates = model.forward_intermediates(input)
255
+ output = model.forward_head(final_feat) # pooling + classifier head
256
+
257
+ print(final_feat.shape)
258
+ torch.Size([2, 197, 768])
259
+
260
+ for f in intermediates:
261
+ print(f.shape)
262
+ torch.Size([2, 768, 14, 14])
263
+ torch.Size([2, 768, 14, 14])
264
+ torch.Size([2, 768, 14, 14])
265
+ torch.Size([2, 768, 14, 14])
266
+ torch.Size([2, 768, 14, 14])
267
+ torch.Size([2, 768, 14, 14])
268
+ torch.Size([2, 768, 14, 14])
269
+ torch.Size([2, 768, 14, 14])
270
+ torch.Size([2, 768, 14, 14])
271
+ torch.Size([2, 768, 14, 14])
272
+ torch.Size([2, 768, 14, 14])
273
+ torch.Size([2, 768, 14, 14])
274
+
275
+ print(output.shape)
276
+ torch.Size([2, 1000])
277
+ ```
278
+
279
+ ```python
280
+ model = timm.create_model('eva02_base_patch16_clip_224', pretrained=True, img_size=512, features_only=True, out_indices=(-3, -2,))
281
+ output = model(torch.randn(2, 3, 512, 512))
282
+
283
+ for o in output:
284
+ print(o.shape)
285
+ torch.Size([2, 768, 32, 32])
286
+ torch.Size([2, 768, 32, 32])
287
+ ```
288
+ * TinyCLIP vision tower weights added, thx [Thien Tran](https://github.com/gau-nernst)
289
+
290
+ ### Feb 19, 2024
291
+ * Next-ViT models added. Adapted from https://github.com/bytedance/Next-ViT
292
+ * HGNet and PP-HGNetV2 models added. Adapted from https://github.com/PaddlePaddle/PaddleClas by [SeeFun](https://github.com/seefun)
293
+ * Removed setup.py, moved to pyproject.toml based build supported by PDM
294
+ * Add updated model EMA impl using _for_each for less overhead
295
+ * Support device args in train script for non GPU devices
296
+ * Other misc fixes and small additions
297
+ * Min supported Python version increased to 3.8
298
+ * Release 0.9.16
299
+
300
+ ### Jan 8, 2024
301
+ Datasets & transform refactoring
302
+ * HuggingFace streaming (iterable) dataset support (`--dataset hfids:org/dataset`)
303
+ * Webdataset wrapper tweaks for improved split info fetching, can auto fetch splits from supported HF hub webdataset
304
+ * Tested HF `datasets` and webdataset wrapper streaming from HF hub with recent `timm` ImageNet uploads to https://huggingface.co/timm
305
+ * Make input & target column/field keys consistent across datasets and pass via args
306
+ * Full monochrome support when using e:g: `--input-size 1 224 224` or `--in-chans 1`, sets PIL image conversion appropriately in dataset
307
+ * Improved several alternate crop & resize transforms (ResizeKeepRatio, RandomCropOrPad, etc) for use in PixParse document AI project
308
+ * Add SimCLR style color jitter prob along with grayscale and gaussian blur options to augmentations and args
309
+ * Allow train without validation set (`--val-split ''`) in train script
310
+ * Add `--bce-sum` (sum over class dim) and `--bce-pos-weight` (positive weighting) args for training as they're common BCE loss tweaks I was often hard coding
311
+
312
+ ### Nov 23, 2023
313
+ * Added EfficientViT-Large models, thanks [SeeFun](https://github.com/seefun)
314
+ * Fix Python 3.7 compat, will be dropping support for it soon
315
+ * Other misc fixes
316
+ * Release 0.9.12
317
+
318
+ ### Nov 20, 2023
319
+ * Added significant flexibility for Hugging Face Hub based timm models via `model_args` config entry. `model_args` will be passed as kwargs through to models on creation.
320
+ * See example at https://huggingface.co/gaunernst/vit_base_patch16_1024_128.audiomae_as2m_ft_as20k/blob/main/config.json
321
+ * Usage: https://github.com/huggingface/pytorch-image-models/discussions/2035
322
+ * Updated imagenet eval and test set csv files with latest models
323
+ * `vision_transformer.py` typing and doc cleanup by [Laureηt](https://github.com/Laurent2916)
324
+ * 0.9.11 release
325
+
326
+ ### Nov 3, 2023
327
+ * [DFN (Data Filtering Networks)](https://huggingface.co/papers/2309.17425) and [MetaCLIP](https://huggingface.co/papers/2309.16671) ViT weights added
328
+ * DINOv2 'register' ViT model weights added (https://huggingface.co/papers/2309.16588, https://huggingface.co/papers/2304.07193)
329
+ * Add `quickgelu` ViT variants for OpenAI, DFN, MetaCLIP weights that use it (less efficient)
330
+ * Improved typing added to ResNet, MobileNet-v3 thanks to [Aryan](https://github.com/a-r-r-o-w)
331
+ * ImageNet-12k fine-tuned (from LAION-2B CLIP) `convnext_xxlarge`
332
+ * 0.9.9 release
333
+
334
+ ### Oct 20, 2023
335
+ * [SigLIP](https://huggingface.co/papers/2303.15343) image tower weights supported in `vision_transformer.py`.
336
+ * Great potential for fine-tune and downstream feature use.
337
+ * Experimental 'register' support in vit models as per [Vision Transformers Need Registers](https://huggingface.co/papers/2309.16588)
338
+ * Updated RepViT with new weight release. Thanks [wangao](https://github.com/jameslahm)
339
+ * Add patch resizing support (on pretrained weight load) to Swin models
340
+ * 0.9.8 release pending
341
+
342
+ ### Sep 1, 2023
343
+ * TinyViT added by [SeeFun](https://github.com/seefun)
344
+ * Fix EfficientViT (MIT) to use torch.autocast so it works back to PT 1.10
345
+ * 0.9.7 release
346
+
347
+ ## Introduction
348
+
349
+ Py**T**orch **Im**age **M**odels (`timm`) is a collection of image models, layers, utilities, optimizers, schedulers, data-loaders / augmentations, and reference training / validation scripts that aim to pull together a wide variety of SOTA models with ability to reproduce ImageNet training results.
350
+
351
+ The work of many others is present here. I've tried to make sure all source material is acknowledged via links to github, arxiv papers, etc in the README, documentation, and code docstrings. Please let me know if I missed anything.
352
+
353
+ ## Features
354
+
355
+ ### Models
356
+
357
+ All model architecture families include variants with pretrained weights. There are specific model variants without any weights, it is NOT a bug. Help training new or better weights is always appreciated.
358
+
359
+ * Aggregating Nested Transformers - https://arxiv.org/abs/2105.12723
360
+ * BEiT - https://arxiv.org/abs/2106.08254
361
+ * Big Transfer ResNetV2 (BiT) - https://arxiv.org/abs/1912.11370
362
+ * Bottleneck Transformers - https://arxiv.org/abs/2101.11605
363
+ * CaiT (Class-Attention in Image Transformers) - https://arxiv.org/abs/2103.17239
364
+ * CoaT (Co-Scale Conv-Attentional Image Transformers) - https://arxiv.org/abs/2104.06399
365
+ * CoAtNet (Convolution and Attention) - https://arxiv.org/abs/2106.04803
366
+ * ConvNeXt - https://arxiv.org/abs/2201.03545
367
+ * ConvNeXt-V2 - http://arxiv.org/abs/2301.00808
368
+ * ConViT (Soft Convolutional Inductive Biases Vision Transformers)- https://arxiv.org/abs/2103.10697
369
+ * CspNet (Cross-Stage Partial Networks) - https://arxiv.org/abs/1911.11929
370
+ * DeiT - https://arxiv.org/abs/2012.12877
371
+ * DeiT-III - https://arxiv.org/pdf/2204.07118.pdf
372
+ * DenseNet - https://arxiv.org/abs/1608.06993
373
+ * DLA - https://arxiv.org/abs/1707.06484
374
+ * DPN (Dual-Path Network) - https://arxiv.org/abs/1707.01629
375
+ * EdgeNeXt - https://arxiv.org/abs/2206.10589
376
+ * EfficientFormer - https://arxiv.org/abs/2206.01191
377
+ * EfficientNet (MBConvNet Family)
378
+ * EfficientNet NoisyStudent (B0-B7, L2) - https://arxiv.org/abs/1911.04252
379
+ * EfficientNet AdvProp (B0-B8) - https://arxiv.org/abs/1911.09665
380
+ * EfficientNet (B0-B7) - https://arxiv.org/abs/1905.11946
381
+ * EfficientNet-EdgeTPU (S, M, L) - https://ai.googleblog.com/2019/08/efficientnet-edgetpu-creating.html
382
+ * EfficientNet V2 - https://arxiv.org/abs/2104.00298
383
+ * FBNet-C - https://arxiv.org/abs/1812.03443
384
+ * MixNet - https://arxiv.org/abs/1907.09595
385
+ * MNASNet B1, A1 (Squeeze-Excite), and Small - https://arxiv.org/abs/1807.11626
386
+ * MobileNet-V2 - https://arxiv.org/abs/1801.04381
387
+ * Single-Path NAS - https://arxiv.org/abs/1904.02877
388
+ * TinyNet - https://arxiv.org/abs/2010.14819
389
+ * EfficientViT (MIT) - https://arxiv.org/abs/2205.14756
390
+ * EfficientViT (MSRA) - https://arxiv.org/abs/2305.07027
391
+ * EVA - https://arxiv.org/abs/2211.07636
392
+ * EVA-02 - https://arxiv.org/abs/2303.11331
393
+ * FastViT - https://arxiv.org/abs/2303.14189
394
+ * FlexiViT - https://arxiv.org/abs/2212.08013
395
+ * FocalNet (Focal Modulation Networks) - https://arxiv.org/abs/2203.11926
396
+ * GCViT (Global Context Vision Transformer) - https://arxiv.org/abs/2206.09959
397
+ * GhostNet - https://arxiv.org/abs/1911.11907
398
+ * GhostNet-V2 - https://arxiv.org/abs/2211.12905
399
+ * gMLP - https://arxiv.org/abs/2105.08050
400
+ * GPU-Efficient Networks - https://arxiv.org/abs/2006.14090
401
+ * Halo Nets - https://arxiv.org/abs/2103.12731
402
+ * HGNet / HGNet-V2 - TBD
403
+ * HRNet - https://arxiv.org/abs/1908.07919
404
+ * InceptionNeXt - https://arxiv.org/abs/2303.16900
405
+ * Inception-V3 - https://arxiv.org/abs/1512.00567
406
+ * Inception-ResNet-V2 and Inception-V4 - https://arxiv.org/abs/1602.07261
407
+ * Lambda Networks - https://arxiv.org/abs/2102.08602
408
+ * LeViT (Vision Transformer in ConvNet's Clothing) - https://arxiv.org/abs/2104.01136
409
+ * MambaOut - https://arxiv.org/abs/2405.07992
410
+ * MaxViT (Multi-Axis Vision Transformer) - https://arxiv.org/abs/2204.01697
411
+ * MetaFormer (PoolFormer-v2, ConvFormer, CAFormer) - https://arxiv.org/abs/2210.13452
412
+ * MLP-Mixer - https://arxiv.org/abs/2105.01601
413
+ * MobileCLIP - https://arxiv.org/abs/2311.17049
414
+ * MobileNet-V3 (MBConvNet w/ Efficient Head) - https://arxiv.org/abs/1905.02244
415
+ * FBNet-V3 - https://arxiv.org/abs/2006.02049
416
+ * HardCoRe-NAS - https://arxiv.org/abs/2102.11646
417
+ * LCNet - https://arxiv.org/abs/2109.15099
418
+ * MobileNetV4 - https://arxiv.org/abs/2404.10518
419
+ * MobileOne - https://arxiv.org/abs/2206.04040
420
+ * MobileViT - https://arxiv.org/abs/2110.02178
421
+ * MobileViT-V2 - https://arxiv.org/abs/2206.02680
422
+ * MViT-V2 (Improved Multiscale Vision Transformer) - https://arxiv.org/abs/2112.01526
423
+ * NASNet-A - https://arxiv.org/abs/1707.07012
424
+ * NesT - https://arxiv.org/abs/2105.12723
425
+ * Next-ViT - https://arxiv.org/abs/2207.05501
426
+ * NFNet-F - https://arxiv.org/abs/2102.06171
427
+ * NF-RegNet / NF-ResNet - https://arxiv.org/abs/2101.08692
428
+ * PNasNet - https://arxiv.org/abs/1712.00559
429
+ * PoolFormer (MetaFormer) - https://arxiv.org/abs/2111.11418
430
+ * Pooling-based Vision Transformer (PiT) - https://arxiv.org/abs/2103.16302
431
+ * PVT-V2 (Improved Pyramid Vision Transformer) - https://arxiv.org/abs/2106.13797
432
+ * RDNet (DenseNets Reloaded) - https://arxiv.org/abs/2403.19588
433
+ * RegNet - https://arxiv.org/abs/2003.13678
434
+ * RegNetZ - https://arxiv.org/abs/2103.06877
435
+ * RepVGG - https://arxiv.org/abs/2101.03697
436
+ * RepGhostNet - https://arxiv.org/abs/2211.06088
437
+ * RepViT - https://arxiv.org/abs/2307.09283
438
+ * ResMLP - https://arxiv.org/abs/2105.03404
439
+ * ResNet/ResNeXt
440
+ * ResNet (v1b/v1.5) - https://arxiv.org/abs/1512.03385
441
+ * ResNeXt - https://arxiv.org/abs/1611.05431
442
+ * 'Bag of Tricks' / Gluon C, D, E, S variations - https://arxiv.org/abs/1812.01187
443
+ * Weakly-supervised (WSL) Instagram pretrained / ImageNet tuned ResNeXt101 - https://arxiv.org/abs/1805.00932
444
+ * Semi-supervised (SSL) / Semi-weakly Supervised (SWSL) ResNet/ResNeXts - https://arxiv.org/abs/1905.00546
445
+ * ECA-Net (ECAResNet) - https://arxiv.org/abs/1910.03151v4
446
+ * Squeeze-and-Excitation Networks (SEResNet) - https://arxiv.org/abs/1709.01507
447
+ * ResNet-RS - https://arxiv.org/abs/2103.07579
448
+ * Res2Net - https://arxiv.org/abs/1904.01169
449
+ * ResNeSt - https://arxiv.org/abs/2004.08955
450
+ * ReXNet - https://arxiv.org/abs/2007.00992
451
+ * SelecSLS - https://arxiv.org/abs/1907.00837
452
+ * Selective Kernel Networks - https://arxiv.org/abs/1903.06586
453
+ * Sequencer2D - https://arxiv.org/abs/2205.01972
454
+ * Swin S3 (AutoFormerV2) - https://arxiv.org/abs/2111.14725
455
+ * Swin Transformer - https://arxiv.org/abs/2103.14030
456
+ * Swin Transformer V2 - https://arxiv.org/abs/2111.09883
457
+ * Transformer-iN-Transformer (TNT) - https://arxiv.org/abs/2103.00112
458
+ * TResNet - https://arxiv.org/abs/2003.13630
459
+ * Twins (Spatial Attention in Vision Transformers) - https://arxiv.org/pdf/2104.13840.pdf
460
+ * Visformer - https://arxiv.org/abs/2104.12533
461
+ * Vision Transformer - https://arxiv.org/abs/2010.11929
462
+ * ViTamin - https://arxiv.org/abs/2404.02132
463
+ * VOLO (Vision Outlooker) - https://arxiv.org/abs/2106.13112
464
+ * VovNet V2 and V1 - https://arxiv.org/abs/1911.06667
465
+ * Xception - https://arxiv.org/abs/1610.02357
466
+ * Xception (Modified Aligned, Gluon) - https://arxiv.org/abs/1802.02611
467
+ * Xception (Modified Aligned, TF) - https://arxiv.org/abs/1802.02611
468
+ * XCiT (Cross-Covariance Image Transformers) - https://arxiv.org/abs/2106.09681
469
+
470
+ ### Optimizers
471
+ To see full list of optimizers w/ descriptions: `timm.optim.list_optimizers(with_description=True)`
472
+
473
+ Included optimizers available via `timm.optim.create_optimizer_v2` factory method:
474
+ * `adabelief` an implementation of AdaBelief adapted from https://github.com/juntang-zhuang/Adabelief-Optimizer - https://arxiv.org/abs/2010.07468
475
+ * `adafactor` adapted from [FAIRSeq impl](https://github.com/pytorch/fairseq/blob/master/fairseq/optim/adafactor.py) - https://arxiv.org/abs/1804.04235
476
+ * `adafactorbv` adapted from [Big Vision](https://github.com/google-research/big_vision/blob/main/big_vision/optax.py) - https://arxiv.org/abs/2106.04560
477
+ * `adahessian` by [David Samuel](https://github.com/davda54/ada-hessian) - https://arxiv.org/abs/2006.00719
478
+ * `adamp` and `sgdp` by [Naver ClovAI](https://github.com/clovaai) - https://arxiv.org/abs/2006.08217
479
+ * `adan` an implementation of Adan adapted from https://github.com/sail-sg/Adan - https://arxiv.org/abs/2208.06677
480
+ * `adopt` ADOPT adapted from https://github.com/iShohei220/adopt - https://arxiv.org/abs/2411.02853
481
+ * `lamb` an implementation of Lamb and LambC (w/ trust-clipping) cleaned up and modified to support use with XLA - https://arxiv.org/abs/1904.00962
482
+ * `laprop` optimizer from https://github.com/Z-T-WANG/LaProp-Optimizer - https://arxiv.org/abs/2002.04839
483
+ * `lars` an implementation of LARS and LARC (w/ trust-clipping) - https://arxiv.org/abs/1708.03888
484
+ * `lion` and implementation of Lion adapted from https://github.com/google/automl/tree/master/lion - https://arxiv.org/abs/2302.06675
485
+ * `lookahead` adapted from impl by [Liam](https://github.com/alphadl/lookahead.pytorch) - https://arxiv.org/abs/1907.08610
486
+ * `madgrad` an implementation of MADGRAD adapted from https://github.com/facebookresearch/madgrad - https://arxiv.org/abs/2101.11075
487
+ * `mars` MARS optimizer from https://github.com/AGI-Arena/MARS - https://arxiv.org/abs/2411.10438
488
+ * `nadam` an implementation of Adam w/ Nesterov momentum
489
+ * `nadamw` an impementation of AdamW (Adam w/ decoupled weight-decay) w/ Nesterov momentum. A simplified impl based on https://github.com/mlcommons/algorithmic-efficiency
490
+ * `novograd` by [Masashi Kimura](https://github.com/convergence-lab/novograd) - https://arxiv.org/abs/1905.11286
491
+ * `radam` by [Liyuan Liu](https://github.com/LiyuanLucasLiu/RAdam) - https://arxiv.org/abs/1908.03265
492
+ * `rmsprop_tf` adapted from PyTorch RMSProp by myself. Reproduces much improved Tensorflow RMSProp behaviour
493
+ * `sgdw` and implementation of SGD w/ decoupled weight-decay
494
+ * `fused<name>` optimizers by name with [NVIDIA Apex](https://github.com/NVIDIA/apex/tree/master/apex/optimizers) installed
495
+ * `bnb<name>` optimizers by name with [BitsAndBytes](https://github.com/TimDettmers/bitsandbytes) installed
496
+ * `cadamw`, `clion`, and more 'Cautious' optimizers from https://github.com/kyleliang919/C-Optim - https://arxiv.org/abs/2411.16085
497
+ * `adam`, `adamw`, `rmsprop`, `adadelta`, `adagrad`, and `sgd` pass through to `torch.optim` implementations
498
+
499
+ ### Augmentations
500
+ * Random Erasing from [Zhun Zhong](https://github.com/zhunzhong07/Random-Erasing/blob/master/transforms.py) - https://arxiv.org/abs/1708.04896)
501
+ * Mixup - https://arxiv.org/abs/1710.09412
502
+ * CutMix - https://arxiv.org/abs/1905.04899
503
+ * AutoAugment (https://arxiv.org/abs/1805.09501) and RandAugment (https://arxiv.org/abs/1909.13719) ImageNet configurations modeled after impl for EfficientNet training (https://github.com/tensorflow/tpu/blob/master/models/official/efficientnet/autoaugment.py)
504
+ * AugMix w/ JSD loss, JSD w/ clean + augmented mixing support works with AutoAugment and RandAugment as well - https://arxiv.org/abs/1912.02781
505
+ * SplitBachNorm - allows splitting batch norm layers between clean and augmented (auxiliary batch norm) data
506
+
507
+ ### Regularization
508
+ * DropPath aka "Stochastic Depth" - https://arxiv.org/abs/1603.09382
509
+ * DropBlock - https://arxiv.org/abs/1810.12890
510
+ * Blur Pooling - https://arxiv.org/abs/1904.11486
511
+
512
+ ### Other
513
+
514
+ Several (less common) features that I often utilize in my projects are included. Many of their additions are the reason why I maintain my own set of models, instead of using others' via PIP:
515
+
516
+ * All models have a common default configuration interface and API for
517
+ * accessing/changing the classifier - `get_classifier` and `reset_classifier`
518
+ * doing a forward pass on just the features - `forward_features` (see [documentation](https://huggingface.co/docs/timm/feature_extraction))
519
+ * these makes it easy to write consistent network wrappers that work with any of the models
520
+ * All models support multi-scale feature map extraction (feature pyramids) via create_model (see [documentation](https://huggingface.co/docs/timm/feature_extraction))
521
+ * `create_model(name, features_only=True, out_indices=..., output_stride=...)`
522
+ * `out_indices` creation arg specifies which feature maps to return, these indices are 0 based and generally correspond to the `C(i + 1)` feature level.
523
+ * `output_stride` creation arg controls output stride of the network by using dilated convolutions. Most networks are stride 32 by default. Not all networks support this.
524
+ * feature map channel counts, reduction level (stride) can be queried AFTER model creation via the `.feature_info` member
525
+ * All models have a consistent pretrained weight loader that adapts last linear if necessary, and from 3 to 1 channel input if desired
526
+ * High performance [reference training, validation, and inference scripts](https://huggingface.co/docs/timm/training_script) that work in several process/GPU modes:
527
+ * NVIDIA DDP w/ a single GPU per process, multiple processes with APEX present (AMP mixed-precision optional)
528
+ * PyTorch DistributedDataParallel w/ multi-gpu, single process (AMP disabled as it crashes when enabled)
529
+ * PyTorch w/ single GPU single process (AMP optional)
530
+ * A dynamic global pool implementation that allows selecting from average pooling, max pooling, average + max, or concat([average, max]) at model creation. All global pooling is adaptive average by default and compatible with pretrained weights.
531
+ * A 'Test Time Pool' wrapper that can wrap any of the included models and usually provides improved performance doing inference with input images larger than the training size. Idea adapted from original DPN implementation when I ported (https://github.com/cypw/DPNs)
532
+ * Learning rate schedulers
533
+ * Ideas adopted from
534
+ * [AllenNLP schedulers](https://github.com/allenai/allennlp/tree/master/allennlp/training/learning_rate_schedulers)
535
+ * [FAIRseq lr_scheduler](https://github.com/pytorch/fairseq/tree/master/fairseq/optim/lr_scheduler)
536
+ * SGDR: Stochastic Gradient Descent with Warm Restarts (https://arxiv.org/abs/1608.03983)
537
+ * Schedulers include `step`, `cosine` w/ restarts, `tanh` w/ restarts, `plateau`
538
+ * Space-to-Depth by [mrT23](https://github.com/mrT23/TResNet/blob/master/src/models/tresnet/layers/space_to_depth.py) (https://arxiv.org/abs/1801.04590) -- original paper?
539
+ * Adaptive Gradient Clipping (https://arxiv.org/abs/2102.06171, https://github.com/deepmind/deepmind-research/tree/master/nfnets)
540
+ * An extensive selection of channel and/or spatial attention modules:
541
+ * Bottleneck Transformer - https://arxiv.org/abs/2101.11605
542
+ * CBAM - https://arxiv.org/abs/1807.06521
543
+ * Effective Squeeze-Excitation (ESE) - https://arxiv.org/abs/1911.06667
544
+ * Efficient Channel Attention (ECA) - https://arxiv.org/abs/1910.03151
545
+ * Gather-Excite (GE) - https://arxiv.org/abs/1810.12348
546
+ * Global Context (GC) - https://arxiv.org/abs/1904.11492
547
+ * Halo - https://arxiv.org/abs/2103.12731
548
+ * Involution - https://arxiv.org/abs/2103.06255
549
+ * Lambda Layer - https://arxiv.org/abs/2102.08602
550
+ * Non-Local (NL) - https://arxiv.org/abs/1711.07971
551
+ * Squeeze-and-Excitation (SE) - https://arxiv.org/abs/1709.01507
552
+ * Selective Kernel (SK) - (https://arxiv.org/abs/1903.06586
553
+ * Split (SPLAT) - https://arxiv.org/abs/2004.08955
554
+ * Shifted Window (SWIN) - https://arxiv.org/abs/2103.14030
555
+
556
+ ## Results
557
+
558
+ Model validation results can be found in the [results tables](results/README.md)
559
+
560
+ ## Getting Started (Documentation)
561
+
562
+ The official documentation can be found at https://huggingface.co/docs/hub/timm. Documentation contributions are welcome.
563
+
564
+ [Getting Started with PyTorch Image Models (timm): A Practitioner’s Guide](https://towardsdatascience.com/getting-started-with-pytorch-image-models-timm-a-practitioners-guide-4e77b4bf9055) by [Chris Hughes](https://github.com/Chris-hughes10) is an extensive blog post covering many aspects of `timm` in detail.
565
+
566
+ [timmdocs](http://timm.fast.ai/) is an alternate set of documentation for `timm`. A big thanks to [Aman Arora](https://github.com/amaarora) for his efforts creating timmdocs.
567
+
568
+ [paperswithcode](https://paperswithcode.com/lib/timm) is a good resource for browsing the models within `timm`.
569
+
570
+ ## Train, Validation, Inference Scripts
571
+
572
+ The root folder of the repository contains reference train, validation, and inference scripts that work with the included models and other features of this repository. They are adaptable for other datasets and use cases with a little hacking. See [documentation](https://huggingface.co/docs/timm/training_script).
573
+
574
+ ## Awesome PyTorch Resources
575
+
576
+ One of the greatest assets of PyTorch is the community and their contributions. A few of my favourite resources that pair well with the models and components here are listed below.
577
+
578
+ ### Object Detection, Instance and Semantic Segmentation
579
+ * Detectron2 - https://github.com/facebookresearch/detectron2
580
+ * Segmentation Models (Semantic) - https://github.com/qubvel/segmentation_models.pytorch
581
+ * EfficientDet (Obj Det, Semantic soon) - https://github.com/rwightman/efficientdet-pytorch
582
+
583
+ ### Computer Vision / Image Augmentation
584
+ * Albumentations - https://github.com/albumentations-team/albumentations
585
+ * Kornia - https://github.com/kornia/kornia
586
+
587
+ ### Knowledge Distillation
588
+ * RepDistiller - https://github.com/HobbitLong/RepDistiller
589
+ * torchdistill - https://github.com/yoshitomo-matsubara/torchdistill
590
+
591
+ ### Metric Learning
592
+ * PyTorch Metric Learning - https://github.com/KevinMusgrave/pytorch-metric-learning
593
+
594
+ ### Training / Frameworks
595
+ * fastai - https://github.com/fastai/fastai
596
+
597
+ ## Licenses
598
+
599
+ ### Code
600
+ The code here is licensed Apache 2.0. I've taken care to make sure any third party code included or adapted has compatible (permissive) licenses such as MIT, BSD, etc. I've made an effort to avoid any GPL / LGPL conflicts. That said, it is your responsibility to ensure you comply with licenses here and conditions of any dependent licenses. Where applicable, I've linked the sources/references for various components in docstrings. If you think I've missed anything please create an issue.
601
+
602
+ ### Pretrained Weights
603
+ So far all of the pretrained weights available here are pretrained on ImageNet with a select few that have some additional pretraining (see extra note below). ImageNet was released for non-commercial research purposes only (https://image-net.org/download). It's not clear what the implications of that are for the use of pretrained weights from that dataset. Any models I have trained with ImageNet are done for research purposes and one should assume that the original dataset license applies to the weights. It's best to seek legal advice if you intend to use the pretrained weights in a commercial product.
604
+
605
+ #### Pretrained on more than ImageNet
606
+ Several weights included or references here were pretrained with proprietary datasets that I do not have access to. These include the Facebook WSL, SSL, SWSL ResNe(Xt) and the Google Noisy Student EfficientNet models. The Facebook models have an explicit non-commercial license (CC-BY-NC 4.0, https://github.com/facebookresearch/semi-supervised-ImageNet1K-models, https://github.com/facebookresearch/WSL-Images). The Google models do not appear to have any restriction beyond the Apache 2.0 license (and ImageNet concerns). In either case, you should contact Facebook or Google with any questions.
607
+
608
+ ## Citing
609
+
610
+ ### BibTeX
611
+
612
+ ```bibtex
613
+ @misc{rw2019timm,
614
+ author = {Ross Wightman},
615
+ title = {PyTorch Image Models},
616
+ year = {2019},
617
+ publisher = {GitHub},
618
+ journal = {GitHub repository},
619
+ doi = {10.5281/zenodo.4414861},
620
+ howpublished = {\url{https://github.com/rwightman/pytorch-image-models}}
621
+ }
622
+ ```
623
+
624
+ ### Latest DOI
625
+
626
+ [![DOI](https://zenodo.org/badge/168799526.svg)](https://zenodo.org/badge/latestdoi/168799526)
pytorch-image-models/UPGRADING.md ADDED
@@ -0,0 +1,24 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Upgrading from previous versions
2
+
3
+ I generally try to maintain code interface and especially model weight compability across many `timm` versions. Sometimes there are exceptions.
4
+
5
+ ## Checkpoint remapping
6
+
7
+ Pretrained weight remapping is handled by `checkpoint_filter_fn` in a model implementation module. This remaps old pretrained checkpoints to new, and also 3rd party (original) checkpoints to `timm` format if the model was modified when brough into `timm`.
8
+
9
+ The `checkpoint_filter_fn` is automatically called when loading pretrained weights via `pretrained=True`, but they can be called manually if you call the fn directly with the current model instance and old state dict.
10
+
11
+ ## Upgrading from 0.6 and earlier
12
+
13
+ Many changes were made since the 0.6.x stable releases. They were previewed in 0.8.x dev releases but not everyone transitioned.
14
+ * `timm.models.layers` moved to `timm.layers`:
15
+ * `from timm.models.layers import name` will still work via deprecation mapping (but please transition to `timm.layers`).
16
+ * `import timm.models.layers.module` or `from timm.models.layers.module import name` needs to be changed now.
17
+ * Builder, helper, non-model modules in `timm.models` have a `_` prefix added, ie `timm.models.helpers` -> `timm.models._helpers`, there are temporary deprecation mapping files but those will be removed.
18
+ * All models now support `architecture.pretrained_tag` naming (ex `resnet50.rsb_a1`).
19
+ * The pretrained_tag is the specific weight variant (different head) for the architecture.
20
+ * Using only `architecture` defaults to the first weights in the default_cfgs for that model architecture.
21
+ * In adding pretrained tags, many model names that existed to differentiate were renamed to use the tag (ex: `vit_base_patch16_224_in21k` -> `vit_base_patch16_224.augreg_in21k`). There are deprecation mappings for these.
22
+ * A number of models had their checkpoints remaped to match architecture changes needed to better support `features_only=True`, there are `checkpoint_filter_fn` methods in any model module that was remapped. These can be passed to `timm.models.load_checkpoint(..., filter_fn=timm.models.swin_transformer_v2.checkpoint_filter_fn)` to remap your existing checkpoint.
23
+ * The Hugging Face Hub (https://huggingface.co/timm) is now the primary source for `timm` weights. Model cards include link to papers, original source, license.
24
+ * Previous 0.6.x can be cloned from [0.6.x](https://github.com/rwightman/pytorch-image-models/tree/0.6.x) branch or installed via pip with version.
pytorch-image-models/avg_checkpoints.py ADDED
@@ -0,0 +1,152 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/usr/bin/env python3
2
+ """ Checkpoint Averaging Script
3
+
4
+ This script averages all model weights for checkpoints in specified path that match
5
+ the specified filter wildcard. All checkpoints must be from the exact same model.
6
+
7
+ For any hope of decent results, the checkpoints should be from the same or child
8
+ (via resumes) training session. This can be viewed as similar to maintaining running
9
+ EMA (exponential moving average) of the model weights or performing SWA (stochastic
10
+ weight averaging), but post-training.
11
+
12
+ Hacked together by / Copyright 2020 Ross Wightman (https://github.com/rwightman)
13
+ """
14
+ import torch
15
+ import argparse
16
+ import os
17
+ import glob
18
+ import hashlib
19
+ from timm.models import load_state_dict
20
+ try:
21
+ import safetensors.torch
22
+ _has_safetensors = True
23
+ except ImportError:
24
+ _has_safetensors = False
25
+
26
+ DEFAULT_OUTPUT = "./averaged.pth"
27
+ DEFAULT_SAFE_OUTPUT = "./averaged.safetensors"
28
+
29
+ parser = argparse.ArgumentParser(description='PyTorch Checkpoint Averager')
30
+ parser.add_argument('--input', default='', type=str, metavar='PATH',
31
+ help='path to base input folder containing checkpoints')
32
+ parser.add_argument('--filter', default='*.pth.tar', type=str, metavar='WILDCARD',
33
+ help='checkpoint filter (path wildcard)')
34
+ parser.add_argument('--output', default=DEFAULT_OUTPUT, type=str, metavar='PATH',
35
+ help=f'Output filename. Defaults to {DEFAULT_SAFE_OUTPUT} when passing --safetensors.')
36
+ parser.add_argument('--no-use-ema', dest='no_use_ema', action='store_true',
37
+ help='Force not using ema version of weights (if present)')
38
+ parser.add_argument('--no-sort', dest='no_sort', action='store_true',
39
+ help='Do not sort and select by checkpoint metric, also makes "n" argument irrelevant')
40
+ parser.add_argument('-n', type=int, default=10, metavar='N',
41
+ help='Number of checkpoints to average')
42
+ parser.add_argument('--safetensors', action='store_true',
43
+ help='Save weights using safetensors instead of the default torch way (pickle).')
44
+
45
+
46
+ def checkpoint_metric(checkpoint_path):
47
+ if not checkpoint_path or not os.path.isfile(checkpoint_path):
48
+ return {}
49
+ print("=> Extracting metric from checkpoint '{}'".format(checkpoint_path))
50
+ checkpoint = torch.load(checkpoint_path, map_location='cpu')
51
+ metric = None
52
+ if 'metric' in checkpoint:
53
+ metric = checkpoint['metric']
54
+ elif 'metrics' in checkpoint and 'metric_name' in checkpoint:
55
+ metrics = checkpoint['metrics']
56
+ print(metrics)
57
+ metric = metrics[checkpoint['metric_name']]
58
+ return metric
59
+
60
+
61
+ def main():
62
+ args = parser.parse_args()
63
+ # by default use the EMA weights (if present)
64
+ args.use_ema = not args.no_use_ema
65
+ # by default sort by checkpoint metric (if present) and avg top n checkpoints
66
+ args.sort = not args.no_sort
67
+
68
+ if args.safetensors and args.output == DEFAULT_OUTPUT:
69
+ # Default path changes if using safetensors
70
+ args.output = DEFAULT_SAFE_OUTPUT
71
+
72
+ output, output_ext = os.path.splitext(args.output)
73
+ if not output_ext:
74
+ output_ext = ('.safetensors' if args.safetensors else '.pth')
75
+ output = output + output_ext
76
+
77
+ if args.safetensors and not output_ext == ".safetensors":
78
+ print(
79
+ "Warning: saving weights as safetensors but output file extension is not "
80
+ f"set to '.safetensors': {args.output}"
81
+ )
82
+
83
+ if os.path.exists(output):
84
+ print("Error: Output filename ({}) already exists.".format(output))
85
+ exit(1)
86
+
87
+ pattern = args.input
88
+ if not args.input.endswith(os.path.sep) and not args.filter.startswith(os.path.sep):
89
+ pattern += os.path.sep
90
+ pattern += args.filter
91
+ checkpoints = glob.glob(pattern, recursive=True)
92
+
93
+ if args.sort:
94
+ checkpoint_metrics = []
95
+ for c in checkpoints:
96
+ metric = checkpoint_metric(c)
97
+ if metric is not None:
98
+ checkpoint_metrics.append((metric, c))
99
+ checkpoint_metrics = list(sorted(checkpoint_metrics))
100
+ checkpoint_metrics = checkpoint_metrics[-args.n:]
101
+ if checkpoint_metrics:
102
+ print("Selected checkpoints:")
103
+ [print(m, c) for m, c in checkpoint_metrics]
104
+ avg_checkpoints = [c for m, c in checkpoint_metrics]
105
+ else:
106
+ avg_checkpoints = checkpoints
107
+ if avg_checkpoints:
108
+ print("Selected checkpoints:")
109
+ [print(c) for c in checkpoints]
110
+
111
+ if not avg_checkpoints:
112
+ print('Error: No checkpoints found to average.')
113
+ exit(1)
114
+
115
+ avg_state_dict = {}
116
+ avg_counts = {}
117
+ for c in avg_checkpoints:
118
+ new_state_dict = load_state_dict(c, args.use_ema)
119
+ if not new_state_dict:
120
+ print(f"Error: Checkpoint ({c}) doesn't exist")
121
+ continue
122
+ for k, v in new_state_dict.items():
123
+ if k not in avg_state_dict:
124
+ avg_state_dict[k] = v.clone().to(dtype=torch.float64)
125
+ avg_counts[k] = 1
126
+ else:
127
+ avg_state_dict[k] += v.to(dtype=torch.float64)
128
+ avg_counts[k] += 1
129
+
130
+ for k, v in avg_state_dict.items():
131
+ v.div_(avg_counts[k])
132
+
133
+ # float32 overflow seems unlikely based on weights seen to date, but who knows
134
+ float32_info = torch.finfo(torch.float32)
135
+ final_state_dict = {}
136
+ for k, v in avg_state_dict.items():
137
+ v = v.clamp(float32_info.min, float32_info.max)
138
+ final_state_dict[k] = v.to(dtype=torch.float32)
139
+
140
+ if args.safetensors:
141
+ assert _has_safetensors, "`pip install safetensors` to use .safetensors"
142
+ safetensors.torch.save_file(final_state_dict, output)
143
+ else:
144
+ torch.save(final_state_dict, output)
145
+
146
+ with open(output, 'rb') as f:
147
+ sha_hash = hashlib.sha256(f.read()).hexdigest()
148
+ print(f"=> Saved state_dict to '{output}, SHA256: {sha_hash}'")
149
+
150
+
151
+ if __name__ == '__main__':
152
+ main()
pytorch-image-models/benchmark.py ADDED
@@ -0,0 +1,699 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/usr/bin/env python3
2
+ """ Model Benchmark Script
3
+
4
+ An inference and train step benchmark script for timm models.
5
+
6
+ Hacked together by Ross Wightman (https://github.com/rwightman)
7
+ """
8
+ import argparse
9
+ import csv
10
+ import json
11
+ import logging
12
+ import time
13
+ from collections import OrderedDict
14
+ from contextlib import suppress
15
+ from functools import partial
16
+
17
+ import torch
18
+ import torch.nn as nn
19
+ import torch.nn.parallel
20
+
21
+ from timm.data import resolve_data_config
22
+ from timm.layers import set_fast_norm
23
+ from timm.models import create_model, is_model, list_models
24
+ from timm.optim import create_optimizer_v2
25
+ from timm.utils import setup_default_logging, set_jit_fuser, decay_batch_step, check_batch_size_retry, ParseKwargs,\
26
+ reparameterize_model
27
+
28
+ has_apex = False
29
+ try:
30
+ from apex import amp
31
+ has_apex = True
32
+ except ImportError:
33
+ pass
34
+
35
+ try:
36
+ from deepspeed.profiling.flops_profiler import get_model_profile
37
+ has_deepspeed_profiling = True
38
+ except ImportError as e:
39
+ has_deepspeed_profiling = False
40
+
41
+ try:
42
+ from fvcore.nn import FlopCountAnalysis, flop_count_str, ActivationCountAnalysis
43
+ has_fvcore_profiling = True
44
+ except ImportError as e:
45
+ FlopCountAnalysis = None
46
+ has_fvcore_profiling = False
47
+
48
+ try:
49
+ from functorch.compile import memory_efficient_fusion
50
+ has_functorch = True
51
+ except ImportError as e:
52
+ has_functorch = False
53
+
54
+ has_compile = hasattr(torch, 'compile')
55
+
56
+ if torch.cuda.is_available():
57
+ torch.backends.cuda.matmul.allow_tf32 = True
58
+ torch.backends.cudnn.benchmark = True
59
+ _logger = logging.getLogger('validate')
60
+
61
+
62
+ parser = argparse.ArgumentParser(description='PyTorch Benchmark')
63
+
64
+ # benchmark specific args
65
+ parser.add_argument('--model-list', metavar='NAME', default='',
66
+ help='txt file based list of model names to benchmark')
67
+ parser.add_argument('--bench', default='both', type=str,
68
+ help="Benchmark mode. One of 'inference', 'train', 'both'. Defaults to 'both'")
69
+ parser.add_argument('--detail', action='store_true', default=False,
70
+ help='Provide train fwd/bwd/opt breakdown detail if True. Defaults to False')
71
+ parser.add_argument('--no-retry', action='store_true', default=False,
72
+ help='Do not decay batch size and retry on error.')
73
+ parser.add_argument('--results-file', default='', type=str,
74
+ help='Output csv file for validation results (summary)')
75
+ parser.add_argument('--results-format', default='csv', type=str,
76
+ help='Format for results file one of (csv, json) (default: csv).')
77
+ parser.add_argument('--num-warm-iter', default=10, type=int,
78
+ help='Number of warmup iterations (default: 10)')
79
+ parser.add_argument('--num-bench-iter', default=40, type=int,
80
+ help='Number of benchmark iterations (default: 40)')
81
+ parser.add_argument('--device', default='cuda', type=str,
82
+ help="device to run benchmark on")
83
+
84
+ # common inference / train args
85
+ parser.add_argument('--model', '-m', metavar='NAME', default='resnet50',
86
+ help='model architecture (default: resnet50)')
87
+ parser.add_argument('-b', '--batch-size', default=256, type=int,
88
+ metavar='N', help='mini-batch size (default: 256)')
89
+ parser.add_argument('--img-size', default=None, type=int,
90
+ metavar='N', help='Input image dimension, uses model default if empty')
91
+ parser.add_argument('--input-size', default=None, nargs=3, type=int,
92
+ metavar='N N N', help='Input all image dimensions (d h w, e.g. --input-size 3 224 224), uses model default if empty')
93
+ parser.add_argument('--use-train-size', action='store_true', default=False,
94
+ help='Run inference at train size, not test-input-size if it exists.')
95
+ parser.add_argument('--num-classes', type=int, default=None,
96
+ help='Number classes in dataset')
97
+ parser.add_argument('--gp', default=None, type=str, metavar='POOL',
98
+ help='Global pool type, one of (fast, avg, max, avgmax, avgmaxc). Model default if None.')
99
+ parser.add_argument('--channels-last', action='store_true', default=False,
100
+ help='Use channels_last memory layout')
101
+ parser.add_argument('--grad-checkpointing', action='store_true', default=False,
102
+ help='Enable gradient checkpointing through model blocks/stages')
103
+ parser.add_argument('--amp', action='store_true', default=False,
104
+ help='use PyTorch Native AMP for mixed precision training. Overrides --precision arg.')
105
+ parser.add_argument('--amp-dtype', default='float16', type=str,
106
+ help='lower precision AMP dtype (default: float16). Overrides --precision arg if args.amp True.')
107
+ parser.add_argument('--precision', default='float32', type=str,
108
+ help='Numeric precision. One of (amp, float32, float16, bfloat16, tf32)')
109
+ parser.add_argument('--fuser', default='', type=str,
110
+ help="Select jit fuser. One of ('', 'te', 'old', 'nvfuser')")
111
+ parser.add_argument('--fast-norm', default=False, action='store_true',
112
+ help='enable experimental fast-norm')
113
+ parser.add_argument('--reparam', default=False, action='store_true',
114
+ help='Reparameterize model')
115
+ parser.add_argument('--model-kwargs', nargs='*', default={}, action=ParseKwargs)
116
+ parser.add_argument('--torchcompile-mode', type=str, default=None,
117
+ help="torch.compile mode (default: None).")
118
+
119
+ # codegen (model compilation) options
120
+ scripting_group = parser.add_mutually_exclusive_group()
121
+ scripting_group.add_argument('--torchscript', dest='torchscript', action='store_true',
122
+ help='convert model torchscript for inference')
123
+ scripting_group.add_argument('--torchcompile', nargs='?', type=str, default=None, const='inductor',
124
+ help="Enable compilation w/ specified backend (default: inductor).")
125
+ scripting_group.add_argument('--aot-autograd', default=False, action='store_true',
126
+ help="Enable AOT Autograd optimization.")
127
+
128
+ # train optimizer parameters
129
+ parser.add_argument('--opt', default='sgd', type=str, metavar='OPTIMIZER',
130
+ help='Optimizer (default: "sgd"')
131
+ parser.add_argument('--opt-eps', default=None, type=float, metavar='EPSILON',
132
+ help='Optimizer Epsilon (default: None, use opt default)')
133
+ parser.add_argument('--opt-betas', default=None, type=float, nargs='+', metavar='BETA',
134
+ help='Optimizer Betas (default: None, use opt default)')
135
+ parser.add_argument('--momentum', type=float, default=0.9, metavar='M',
136
+ help='Optimizer momentum (default: 0.9)')
137
+ parser.add_argument('--weight-decay', type=float, default=0.0001,
138
+ help='weight decay (default: 0.0001)')
139
+ parser.add_argument('--clip-grad', type=float, default=None, metavar='NORM',
140
+ help='Clip gradient norm (default: None, no clipping)')
141
+ parser.add_argument('--clip-mode', type=str, default='norm',
142
+ help='Gradient clipping mode. One of ("norm", "value", "agc")')
143
+
144
+
145
+ # model regularization / loss params that impact model or loss fn
146
+ parser.add_argument('--smoothing', type=float, default=0.1,
147
+ help='Label smoothing (default: 0.1)')
148
+ parser.add_argument('--drop', type=float, default=0.0, metavar='PCT',
149
+ help='Dropout rate (default: 0.)')
150
+ parser.add_argument('--drop-path', type=float, default=None, metavar='PCT',
151
+ help='Drop path rate (default: None)')
152
+ parser.add_argument('--drop-block', type=float, default=None, metavar='PCT',
153
+ help='Drop block rate (default: None)')
154
+
155
+
156
+ def timestamp(sync=False):
157
+ return time.perf_counter()
158
+
159
+
160
+ def cuda_timestamp(sync=False, device=None):
161
+ if sync:
162
+ torch.cuda.synchronize(device=device)
163
+ return time.perf_counter()
164
+
165
+
166
+ def count_params(model: nn.Module):
167
+ return sum([m.numel() for m in model.parameters()])
168
+
169
+
170
+ def resolve_precision(precision: str):
171
+ assert precision in ('amp', 'amp_bfloat16', 'float16', 'bfloat16', 'float32')
172
+ amp_dtype = None # amp disabled
173
+ model_dtype = torch.float32
174
+ data_dtype = torch.float32
175
+ if precision == 'amp':
176
+ amp_dtype = torch.float16
177
+ elif precision == 'amp_bfloat16':
178
+ amp_dtype = torch.bfloat16
179
+ elif precision == 'float16':
180
+ model_dtype = torch.float16
181
+ data_dtype = torch.float16
182
+ elif precision == 'bfloat16':
183
+ model_dtype = torch.bfloat16
184
+ data_dtype = torch.bfloat16
185
+ return amp_dtype, model_dtype, data_dtype
186
+
187
+
188
+ def profile_deepspeed(model, input_size=(3, 224, 224), batch_size=1, detailed=False):
189
+ _, macs, _ = get_model_profile(
190
+ model=model,
191
+ input_shape=(batch_size,) + input_size, # input shape/resolution
192
+ print_profile=detailed, # prints the model graph with the measured profile attached to each module
193
+ detailed=detailed, # print the detailed profile
194
+ warm_up=10, # the number of warm-ups before measuring the time of each module
195
+ as_string=False, # print raw numbers (e.g. 1000) or as human-readable strings (e.g. 1k)
196
+ output_file=None, # path to the output file. If None, the profiler prints to stdout.
197
+ ignore_modules=None) # the list of modules to ignore in the profiling
198
+ return macs, 0 # no activation count in DS
199
+
200
+
201
+ def profile_fvcore(model, input_size=(3, 224, 224), batch_size=1, detailed=False, force_cpu=False):
202
+ if force_cpu:
203
+ model = model.to('cpu')
204
+ device, dtype = next(model.parameters()).device, next(model.parameters()).dtype
205
+ example_input = torch.ones((batch_size,) + input_size, device=device, dtype=dtype)
206
+ fca = FlopCountAnalysis(model, example_input)
207
+ aca = ActivationCountAnalysis(model, example_input)
208
+ if detailed:
209
+ fcs = flop_count_str(fca)
210
+ print(fcs)
211
+ return fca.total(), aca.total()
212
+
213
+
214
+ class BenchmarkRunner:
215
+ def __init__(
216
+ self,
217
+ model_name,
218
+ detail=False,
219
+ device='cuda',
220
+ torchscript=False,
221
+ torchcompile=None,
222
+ torchcompile_mode=None,
223
+ aot_autograd=False,
224
+ reparam=False,
225
+ precision='float32',
226
+ fuser='',
227
+ num_warm_iter=10,
228
+ num_bench_iter=50,
229
+ use_train_size=False,
230
+ **kwargs
231
+ ):
232
+ self.model_name = model_name
233
+ self.detail = detail
234
+ self.device = device
235
+ self.amp_dtype, self.model_dtype, self.data_dtype = resolve_precision(precision)
236
+ self.channels_last = kwargs.pop('channels_last', False)
237
+ if self.amp_dtype is not None:
238
+ self.amp_autocast = partial(torch.amp.autocast, device_type=device, dtype=self.amp_dtype)
239
+ else:
240
+ self.amp_autocast = suppress
241
+
242
+ if fuser:
243
+ set_jit_fuser(fuser)
244
+ self.model = create_model(
245
+ model_name,
246
+ num_classes=kwargs.pop('num_classes', None),
247
+ in_chans=3,
248
+ global_pool=kwargs.pop('gp', 'fast'),
249
+ scriptable=torchscript,
250
+ drop_rate=kwargs.pop('drop', 0.),
251
+ drop_path_rate=kwargs.pop('drop_path', None),
252
+ drop_block_rate=kwargs.pop('drop_block', None),
253
+ **kwargs.pop('model_kwargs', {}),
254
+ )
255
+ if reparam:
256
+ self.model = reparameterize_model(self.model)
257
+ self.model.to(
258
+ device=self.device,
259
+ dtype=self.model_dtype,
260
+ memory_format=torch.channels_last if self.channels_last else None,
261
+ )
262
+ self.num_classes = self.model.num_classes
263
+ self.param_count = count_params(self.model)
264
+ _logger.info('Model %s created, param count: %d' % (model_name, self.param_count))
265
+
266
+ data_config = resolve_data_config(kwargs, model=self.model, use_test_size=not use_train_size)
267
+ self.input_size = data_config['input_size']
268
+ self.batch_size = kwargs.pop('batch_size', 256)
269
+
270
+ self.compiled = False
271
+ if torchscript:
272
+ self.model = torch.jit.script(self.model)
273
+ self.compiled = True
274
+ elif torchcompile:
275
+ assert has_compile, 'A version of torch w/ torch.compile() is required, possibly a nightly.'
276
+ torch._dynamo.reset()
277
+ self.model = torch.compile(self.model, backend=torchcompile, mode=torchcompile_mode)
278
+ self.compiled = True
279
+ elif aot_autograd:
280
+ assert has_functorch, "functorch is needed for --aot-autograd"
281
+ self.model = memory_efficient_fusion(self.model)
282
+ self.compiled = True
283
+
284
+ self.example_inputs = None
285
+ self.num_warm_iter = num_warm_iter
286
+ self.num_bench_iter = num_bench_iter
287
+ self.log_freq = num_bench_iter // 5
288
+ if 'cuda' in self.device:
289
+ self.time_fn = partial(cuda_timestamp, device=self.device)
290
+ else:
291
+ self.time_fn = timestamp
292
+
293
+ def _init_input(self):
294
+ self.example_inputs = torch.randn(
295
+ (self.batch_size,) + self.input_size, device=self.device, dtype=self.data_dtype)
296
+ if self.channels_last:
297
+ self.example_inputs = self.example_inputs.contiguous(memory_format=torch.channels_last)
298
+
299
+
300
+ class InferenceBenchmarkRunner(BenchmarkRunner):
301
+
302
+ def __init__(
303
+ self,
304
+ model_name,
305
+ device='cuda',
306
+ torchscript=False,
307
+ **kwargs
308
+ ):
309
+ super().__init__(model_name=model_name, device=device, torchscript=torchscript, **kwargs)
310
+ self.model.eval()
311
+
312
+ def run(self):
313
+ def _step():
314
+ t_step_start = self.time_fn()
315
+ with self.amp_autocast():
316
+ output = self.model(self.example_inputs)
317
+ t_step_end = self.time_fn(True)
318
+ return t_step_end - t_step_start
319
+
320
+ _logger.info(
321
+ f'Running inference benchmark on {self.model_name} for {self.num_bench_iter} steps w/ '
322
+ f'input size {self.input_size} and batch size {self.batch_size}.')
323
+
324
+ with torch.no_grad():
325
+ self._init_input()
326
+
327
+ for _ in range(self.num_warm_iter):
328
+ _step()
329
+
330
+ total_step = 0.
331
+ num_samples = 0
332
+ t_run_start = self.time_fn()
333
+ for i in range(self.num_bench_iter):
334
+ delta_fwd = _step()
335
+ total_step += delta_fwd
336
+ num_samples += self.batch_size
337
+ num_steps = i + 1
338
+ if num_steps % self.log_freq == 0:
339
+ _logger.info(
340
+ f"Infer [{num_steps}/{self.num_bench_iter}]."
341
+ f" {num_samples / total_step:0.2f} samples/sec."
342
+ f" {1000 * total_step / num_steps:0.3f} ms/step.")
343
+ t_run_end = self.time_fn(True)
344
+ t_run_elapsed = t_run_end - t_run_start
345
+
346
+ results = dict(
347
+ samples_per_sec=round(num_samples / t_run_elapsed, 2),
348
+ step_time=round(1000 * total_step / self.num_bench_iter, 3),
349
+ batch_size=self.batch_size,
350
+ img_size=self.input_size[-1],
351
+ param_count=round(self.param_count / 1e6, 2),
352
+ )
353
+
354
+ retries = 0 if self.compiled else 2 # skip profiling if model is scripted
355
+ while retries:
356
+ retries -= 1
357
+ try:
358
+ if has_deepspeed_profiling:
359
+ macs, _ = profile_deepspeed(self.model, self.input_size)
360
+ results['gmacs'] = round(macs / 1e9, 2)
361
+ elif has_fvcore_profiling:
362
+ macs, activations = profile_fvcore(self.model, self.input_size, force_cpu=not retries)
363
+ results['gmacs'] = round(macs / 1e9, 2)
364
+ results['macts'] = round(activations / 1e6, 2)
365
+ except RuntimeError as e:
366
+ pass
367
+
368
+ _logger.info(
369
+ f"Inference benchmark of {self.model_name} done. "
370
+ f"{results['samples_per_sec']:.2f} samples/sec, {results['step_time']:.2f} ms/step")
371
+
372
+ return results
373
+
374
+
375
+ class TrainBenchmarkRunner(BenchmarkRunner):
376
+
377
+ def __init__(
378
+ self,
379
+ model_name,
380
+ device='cuda',
381
+ torchscript=False,
382
+ **kwargs
383
+ ):
384
+ super().__init__(model_name=model_name, device=device, torchscript=torchscript, **kwargs)
385
+ self.model.train()
386
+
387
+ self.loss = nn.CrossEntropyLoss().to(self.device)
388
+ self.target_shape = tuple()
389
+
390
+ self.optimizer = create_optimizer_v2(
391
+ self.model,
392
+ opt=kwargs.pop('opt', 'sgd'),
393
+ lr=kwargs.pop('lr', 1e-4))
394
+
395
+ if kwargs.pop('grad_checkpointing', False):
396
+ self.model.set_grad_checkpointing()
397
+
398
+ def _gen_target(self, batch_size):
399
+ return torch.empty(
400
+ (batch_size,) + self.target_shape, device=self.device, dtype=torch.long).random_(self.num_classes)
401
+
402
+ def run(self):
403
+ def _step(detail=False):
404
+ self.optimizer.zero_grad() # can this be ignored?
405
+ t_start = self.time_fn()
406
+ t_fwd_end = t_start
407
+ t_bwd_end = t_start
408
+ with self.amp_autocast():
409
+ output = self.model(self.example_inputs)
410
+ if isinstance(output, tuple):
411
+ output = output[0]
412
+ if detail:
413
+ t_fwd_end = self.time_fn(True)
414
+ target = self._gen_target(output.shape[0])
415
+ self.loss(output, target).backward()
416
+ if detail:
417
+ t_bwd_end = self.time_fn(True)
418
+ self.optimizer.step()
419
+ t_end = self.time_fn(True)
420
+ if detail:
421
+ delta_fwd = t_fwd_end - t_start
422
+ delta_bwd = t_bwd_end - t_fwd_end
423
+ delta_opt = t_end - t_bwd_end
424
+ return delta_fwd, delta_bwd, delta_opt
425
+ else:
426
+ delta_step = t_end - t_start
427
+ return delta_step
428
+
429
+ _logger.info(
430
+ f'Running train benchmark on {self.model_name} for {self.num_bench_iter} steps w/ '
431
+ f'input size {self.input_size} and batch size {self.batch_size}.')
432
+
433
+ self._init_input()
434
+
435
+ for _ in range(self.num_warm_iter):
436
+ _step()
437
+
438
+ t_run_start = self.time_fn()
439
+ if self.detail:
440
+ total_fwd = 0.
441
+ total_bwd = 0.
442
+ total_opt = 0.
443
+ num_samples = 0
444
+ for i in range(self.num_bench_iter):
445
+ delta_fwd, delta_bwd, delta_opt = _step(True)
446
+ num_samples += self.batch_size
447
+ total_fwd += delta_fwd
448
+ total_bwd += delta_bwd
449
+ total_opt += delta_opt
450
+ num_steps = (i + 1)
451
+ if num_steps % self.log_freq == 0:
452
+ total_step = total_fwd + total_bwd + total_opt
453
+ _logger.info(
454
+ f"Train [{num_steps}/{self.num_bench_iter}]."
455
+ f" {num_samples / total_step:0.2f} samples/sec."
456
+ f" {1000 * total_fwd / num_steps:0.3f} ms/step fwd,"
457
+ f" {1000 * total_bwd / num_steps:0.3f} ms/step bwd,"
458
+ f" {1000 * total_opt / num_steps:0.3f} ms/step opt."
459
+ )
460
+ total_step = total_fwd + total_bwd + total_opt
461
+ t_run_elapsed = self.time_fn() - t_run_start
462
+ results = dict(
463
+ samples_per_sec=round(num_samples / t_run_elapsed, 2),
464
+ step_time=round(1000 * total_step / self.num_bench_iter, 3),
465
+ fwd_time=round(1000 * total_fwd / self.num_bench_iter, 3),
466
+ bwd_time=round(1000 * total_bwd / self.num_bench_iter, 3),
467
+ opt_time=round(1000 * total_opt / self.num_bench_iter, 3),
468
+ batch_size=self.batch_size,
469
+ img_size=self.input_size[-1],
470
+ param_count=round(self.param_count / 1e6, 2),
471
+ )
472
+ else:
473
+ total_step = 0.
474
+ num_samples = 0
475
+ for i in range(self.num_bench_iter):
476
+ delta_step = _step(False)
477
+ num_samples += self.batch_size
478
+ total_step += delta_step
479
+ num_steps = (i + 1)
480
+ if num_steps % self.log_freq == 0:
481
+ _logger.info(
482
+ f"Train [{num_steps}/{self.num_bench_iter}]."
483
+ f" {num_samples / total_step:0.2f} samples/sec."
484
+ f" {1000 * total_step / num_steps:0.3f} ms/step.")
485
+ t_run_elapsed = self.time_fn() - t_run_start
486
+ results = dict(
487
+ samples_per_sec=round(num_samples / t_run_elapsed, 2),
488
+ step_time=round(1000 * total_step / self.num_bench_iter, 3),
489
+ batch_size=self.batch_size,
490
+ img_size=self.input_size[-1],
491
+ param_count=round(self.param_count / 1e6, 2),
492
+ )
493
+
494
+ _logger.info(
495
+ f"Train benchmark of {self.model_name} done. "
496
+ f"{results['samples_per_sec']:.2f} samples/sec, {results['step_time']:.2f} ms/sample")
497
+
498
+ return results
499
+
500
+
501
+ class ProfileRunner(BenchmarkRunner):
502
+
503
+ def __init__(self, model_name, device='cuda', profiler='', **kwargs):
504
+ super().__init__(model_name=model_name, device=device, **kwargs)
505
+ if not profiler:
506
+ if has_deepspeed_profiling:
507
+ profiler = 'deepspeed'
508
+ elif has_fvcore_profiling:
509
+ profiler = 'fvcore'
510
+ assert profiler, "One of deepspeed or fvcore needs to be installed for profiling to work."
511
+ self.profiler = profiler
512
+ self.model.eval()
513
+
514
+ def run(self):
515
+ _logger.info(
516
+ f'Running profiler on {self.model_name} w/ '
517
+ f'input size {self.input_size} and batch size {self.batch_size}.')
518
+
519
+ macs = 0
520
+ activations = 0
521
+ if self.profiler == 'deepspeed':
522
+ macs, _ = profile_deepspeed(self.model, self.input_size, batch_size=self.batch_size, detailed=True)
523
+ elif self.profiler == 'fvcore':
524
+ macs, activations = profile_fvcore(self.model, self.input_size, batch_size=self.batch_size, detailed=True)
525
+
526
+ results = dict(
527
+ gmacs=round(macs / 1e9, 2),
528
+ macts=round(activations / 1e6, 2),
529
+ batch_size=self.batch_size,
530
+ img_size=self.input_size[-1],
531
+ param_count=round(self.param_count / 1e6, 2),
532
+ )
533
+
534
+ _logger.info(
535
+ f"Profile of {self.model_name} done. "
536
+ f"{results['gmacs']:.2f} GMACs, {results['param_count']:.2f} M params.")
537
+
538
+ return results
539
+
540
+
541
+ def _try_run(
542
+ model_name,
543
+ bench_fn,
544
+ bench_kwargs,
545
+ initial_batch_size,
546
+ no_batch_size_retry=False
547
+ ):
548
+ batch_size = initial_batch_size
549
+ results = dict()
550
+ error_str = 'Unknown'
551
+ while batch_size:
552
+ try:
553
+ torch.cuda.empty_cache()
554
+ bench = bench_fn(model_name=model_name, batch_size=batch_size, **bench_kwargs)
555
+ results = bench.run()
556
+ return results
557
+ except RuntimeError as e:
558
+ error_str = str(e)
559
+ _logger.error(f'"{error_str}" while running benchmark.')
560
+ if not check_batch_size_retry(error_str):
561
+ _logger.error(f'Unrecoverable error encountered while benchmarking {model_name}, skipping.')
562
+ break
563
+ if no_batch_size_retry:
564
+ break
565
+ batch_size = decay_batch_step(batch_size)
566
+ _logger.warning(f'Reducing batch size to {batch_size} for retry.')
567
+ results['error'] = error_str
568
+ return results
569
+
570
+
571
+ def benchmark(args):
572
+ if args.amp:
573
+ _logger.warning("Overriding precision to 'amp' since --amp flag set.")
574
+ args.precision = 'amp' if args.amp_dtype == 'float16' else '_'.join(['amp', args.amp_dtype])
575
+ _logger.info(f'Benchmarking in {args.precision} precision. '
576
+ f'{"NHWC" if args.channels_last else "NCHW"} layout. '
577
+ f'torchscript {"enabled" if args.torchscript else "disabled"}')
578
+
579
+ bench_kwargs = vars(args).copy()
580
+ bench_kwargs.pop('amp')
581
+ model = bench_kwargs.pop('model')
582
+ batch_size = bench_kwargs.pop('batch_size')
583
+
584
+ bench_fns = (InferenceBenchmarkRunner,)
585
+ prefixes = ('infer',)
586
+ if args.bench == 'both':
587
+ bench_fns = (
588
+ InferenceBenchmarkRunner,
589
+ TrainBenchmarkRunner
590
+ )
591
+ prefixes = ('infer', 'train')
592
+ elif args.bench == 'train':
593
+ bench_fns = TrainBenchmarkRunner,
594
+ prefixes = 'train',
595
+ elif args.bench.startswith('profile'):
596
+ # specific profiler used if included in bench mode string, otherwise default to deepspeed, fallback to fvcore
597
+ if 'deepspeed' in args.bench:
598
+ assert has_deepspeed_profiling, "deepspeed must be installed to use deepspeed flop counter"
599
+ bench_kwargs['profiler'] = 'deepspeed'
600
+ elif 'fvcore' in args.bench:
601
+ assert has_fvcore_profiling, "fvcore must be installed to use fvcore flop counter"
602
+ bench_kwargs['profiler'] = 'fvcore'
603
+ bench_fns = ProfileRunner,
604
+ batch_size = 1
605
+
606
+ model_results = OrderedDict(model=model)
607
+ for prefix, bench_fn in zip(prefixes, bench_fns):
608
+ run_results = _try_run(
609
+ model,
610
+ bench_fn,
611
+ bench_kwargs=bench_kwargs,
612
+ initial_batch_size=batch_size,
613
+ no_batch_size_retry=args.no_retry,
614
+ )
615
+ if prefix and 'error' not in run_results:
616
+ run_results = {'_'.join([prefix, k]): v for k, v in run_results.items()}
617
+ model_results.update(run_results)
618
+ if 'error' in run_results:
619
+ break
620
+ if 'error' not in model_results:
621
+ param_count = model_results.pop('infer_param_count', model_results.pop('train_param_count', 0))
622
+ model_results.setdefault('param_count', param_count)
623
+ model_results.pop('train_param_count', 0)
624
+ return model_results
625
+
626
+
627
+ def main():
628
+ setup_default_logging()
629
+ args = parser.parse_args()
630
+ model_cfgs = []
631
+ model_names = []
632
+
633
+ if args.fast_norm:
634
+ set_fast_norm()
635
+
636
+ if args.model_list:
637
+ args.model = ''
638
+ with open(args.model_list) as f:
639
+ model_names = [line.rstrip() for line in f]
640
+ model_cfgs = [(n, None) for n in model_names]
641
+ elif args.model == 'all':
642
+ # validate all models in a list of names with pretrained checkpoints
643
+ args.pretrained = True
644
+ model_names = list_models(pretrained=True, exclude_filters=['*in21k'])
645
+ model_cfgs = [(n, None) for n in model_names]
646
+ elif not is_model(args.model):
647
+ # model name doesn't exist, try as wildcard filter
648
+ model_names = list_models(args.model)
649
+ model_cfgs = [(n, None) for n in model_names]
650
+
651
+ if len(model_cfgs):
652
+ _logger.info('Running bulk validation on these pretrained models: {}'.format(', '.join(model_names)))
653
+ results = []
654
+ try:
655
+ for m, _ in model_cfgs:
656
+ if not m:
657
+ continue
658
+ args.model = m
659
+ r = benchmark(args)
660
+ if r:
661
+ results.append(r)
662
+ time.sleep(10)
663
+ except KeyboardInterrupt as e:
664
+ pass
665
+ sort_key = 'infer_samples_per_sec'
666
+ if 'train' in args.bench:
667
+ sort_key = 'train_samples_per_sec'
668
+ elif 'profile' in args.bench:
669
+ sort_key = 'infer_gmacs'
670
+ results = filter(lambda x: sort_key in x, results)
671
+ results = sorted(results, key=lambda x: x[sort_key], reverse=True)
672
+ else:
673
+ results = benchmark(args)
674
+
675
+ if args.results_file:
676
+ write_results(args.results_file, results, format=args.results_format)
677
+
678
+ # output results in JSON to stdout w/ delimiter for runner script
679
+ print(f'--result\n{json.dumps(results, indent=4)}')
680
+
681
+
682
+ def write_results(results_file, results, format='csv'):
683
+ with open(results_file, mode='w') as cf:
684
+ if format == 'json':
685
+ json.dump(results, cf, indent=4)
686
+ else:
687
+ if not isinstance(results, (list, tuple)):
688
+ results = [results]
689
+ if not results:
690
+ return
691
+ dw = csv.DictWriter(cf, fieldnames=results[0].keys())
692
+ dw.writeheader()
693
+ for r in results:
694
+ dw.writerow(r)
695
+ cf.flush()
696
+
697
+
698
+ if __name__ == '__main__':
699
+ main()
pytorch-image-models/bulk_runner.py ADDED
@@ -0,0 +1,244 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/usr/bin/env python3
2
+ """ Bulk Model Script Runner
3
+
4
+ Run validation or benchmark script in separate process for each model
5
+
6
+ Benchmark all 'vit*' models:
7
+ python bulk_runner.py --model-list 'vit*' --results-file vit_bench.csv benchmark.py --amp -b 512
8
+
9
+ Validate all models:
10
+ python bulk_runner.py --model-list all --results-file val.csv --pretrained validate.py --data-dir /imagenet/validation/ --amp -b 512 --retry
11
+
12
+ Hacked together by Ross Wightman (https://github.com/rwightman)
13
+ """
14
+ import argparse
15
+ import os
16
+ import sys
17
+ import csv
18
+ import json
19
+ import subprocess
20
+ import time
21
+ from typing import Callable, List, Tuple, Union
22
+
23
+
24
+ from timm.models import is_model, list_models, get_pretrained_cfg, get_arch_pretrained_cfgs
25
+
26
+
27
+ parser = argparse.ArgumentParser(description='Per-model process launcher')
28
+
29
+ # model and results args
30
+ parser.add_argument(
31
+ '--model-list', metavar='NAME', default='',
32
+ help='txt file based list of model names to benchmark')
33
+ parser.add_argument(
34
+ '--results-file', default='', type=str, metavar='FILENAME',
35
+ help='Output csv file for validation results (summary)')
36
+ parser.add_argument(
37
+ '--sort-key', default='', type=str, metavar='COL',
38
+ help='Specify sort key for results csv')
39
+ parser.add_argument(
40
+ "--pretrained", action='store_true',
41
+ help="only run models with pretrained weights")
42
+
43
+ parser.add_argument(
44
+ "--delay",
45
+ type=float,
46
+ default=0,
47
+ help="Interval, in seconds, to delay between model invocations.",
48
+ )
49
+ parser.add_argument(
50
+ "--start_method", type=str, default="spawn", choices=["spawn", "fork", "forkserver"],
51
+ help="Multiprocessing start method to use when creating workers.",
52
+ )
53
+ parser.add_argument(
54
+ "--no_python",
55
+ help="Skip prepending the script with 'python' - just execute it directly. Useful "
56
+ "when the script is not a Python script.",
57
+ )
58
+ parser.add_argument(
59
+ "-m",
60
+ "--module",
61
+ help="Change each process to interpret the launch script as a Python module, executing "
62
+ "with the same behavior as 'python -m'.",
63
+ )
64
+
65
+ # positional
66
+ parser.add_argument(
67
+ "script", type=str,
68
+ help="Full path to the program/script to be launched for each model config.",
69
+ )
70
+ parser.add_argument("script_args", nargs=argparse.REMAINDER)
71
+
72
+
73
+ def cmd_from_args(args) -> Tuple[Union[Callable, str], List[str]]:
74
+ # If ``args`` not passed, defaults to ``sys.argv[:1]``
75
+ with_python = not args.no_python
76
+ cmd: Union[Callable, str]
77
+ cmd_args = []
78
+ if with_python:
79
+ cmd = os.getenv("PYTHON_EXEC", sys.executable)
80
+ cmd_args.append("-u")
81
+ if args.module:
82
+ cmd_args.append("-m")
83
+ cmd_args.append(args.script)
84
+ else:
85
+ if args.module:
86
+ raise ValueError(
87
+ "Don't use both the '--no_python' flag"
88
+ " and the '--module' flag at the same time."
89
+ )
90
+ cmd = args.script
91
+ cmd_args.extend(args.script_args)
92
+
93
+ return cmd, cmd_args
94
+
95
+
96
+ def _get_model_cfgs(
97
+ model_names,
98
+ num_classes=None,
99
+ expand_train_test=False,
100
+ include_crop=True,
101
+ expand_arch=False,
102
+ ):
103
+ model_cfgs = set()
104
+
105
+ for name in model_names:
106
+ if expand_arch:
107
+ pt_cfgs = get_arch_pretrained_cfgs(name).values()
108
+ else:
109
+ pt_cfg = get_pretrained_cfg(name)
110
+ pt_cfgs = [pt_cfg] if pt_cfg is not None else []
111
+
112
+ for cfg in pt_cfgs:
113
+ if cfg.input_size is None:
114
+ continue
115
+ if num_classes is not None and getattr(cfg, 'num_classes', 0) != num_classes:
116
+ continue
117
+
118
+ # Add main configuration
119
+ size = cfg.input_size[-1]
120
+ if include_crop:
121
+ model_cfgs.add((name, size, cfg.crop_pct))
122
+ else:
123
+ model_cfgs.add((name, size))
124
+
125
+ # Add test configuration if required
126
+ if expand_train_test and cfg.test_input_size is not None:
127
+ test_size = cfg.test_input_size[-1]
128
+ if include_crop:
129
+ test_crop = cfg.test_crop_pct or cfg.crop_pct
130
+ model_cfgs.add((name, test_size, test_crop))
131
+ else:
132
+ model_cfgs.add((name, test_size))
133
+
134
+ # Format the output
135
+ if include_crop:
136
+ return [(n, {'img-size': r, 'crop-pct': cp}) for n, r, cp in sorted(model_cfgs)]
137
+ else:
138
+ return [(n, {'img-size': r}) for n, r in sorted(model_cfgs)]
139
+
140
+
141
+ def main():
142
+ args = parser.parse_args()
143
+ cmd, cmd_args = cmd_from_args(args)
144
+
145
+ model_cfgs = []
146
+ if args.model_list == 'all':
147
+ model_names = list_models(
148
+ pretrained=args.pretrained, # only include models w/ pretrained checkpoints if set
149
+ )
150
+ model_cfgs = [(n, None) for n in model_names]
151
+ elif args.model_list == 'all_in1k':
152
+ model_names = list_models(pretrained=True)
153
+ model_cfgs = _get_model_cfgs(model_names, num_classes=1000, expand_train_test=True)
154
+ elif args.model_list == 'all_res':
155
+ model_names = list_models()
156
+ model_cfgs = _get_model_cfgs(model_names, expand_train_test=True, include_crop=False, expand_arch=True)
157
+ elif not is_model(args.model_list):
158
+ # model name doesn't exist, try as wildcard filter
159
+ model_names = list_models(args.model_list)
160
+ model_cfgs = [(n, None) for n in model_names]
161
+
162
+ if not model_cfgs and os.path.exists(args.model_list):
163
+ with open(args.model_list) as f:
164
+ model_names = [line.rstrip() for line in f]
165
+ model_cfgs = _get_model_cfgs(
166
+ model_names,
167
+ #num_classes=1000,
168
+ expand_train_test=True,
169
+ #include_crop=False,
170
+ )
171
+
172
+ if len(model_cfgs):
173
+ results_file = args.results_file or './results.csv'
174
+ results = []
175
+ errors = []
176
+ model_strings = '\n'.join([f'{x[0]}, {x[1]}' for x in model_cfgs])
177
+ print(f"Running script on these models:\n {model_strings}")
178
+ if not args.sort_key:
179
+ if 'benchmark' in args.script:
180
+ if any(['train' in a for a in args.script_args]):
181
+ sort_key = 'train_samples_per_sec'
182
+ else:
183
+ sort_key = 'infer_samples_per_sec'
184
+ else:
185
+ sort_key = 'top1'
186
+ else:
187
+ sort_key = args.sort_key
188
+ print(f'Script: {args.script}, Args: {args.script_args}, Sort key: {sort_key}')
189
+
190
+ try:
191
+ for m, ax in model_cfgs:
192
+ if not m:
193
+ continue
194
+ args_str = (cmd, *[str(e) for e in cmd_args], '--model', m)
195
+ if ax is not None:
196
+ extra_args = [(f'--{k}', str(v)) for k, v in ax.items()]
197
+ extra_args = [i for t in extra_args for i in t]
198
+ args_str += tuple(extra_args)
199
+ try:
200
+ o = subprocess.check_output(args=args_str).decode('utf-8').split('--result')[-1]
201
+ r = json.loads(o)
202
+ results.append(r)
203
+ except Exception as e:
204
+ # FIXME batch_size retry loop is currently done in either validation.py or benchmark.py
205
+ # for further robustness (but more overhead), we may want to manage that by looping here...
206
+ errors.append(dict(model=m, error=str(e)))
207
+ if args.delay:
208
+ time.sleep(args.delay)
209
+ except KeyboardInterrupt as e:
210
+ pass
211
+
212
+ errors.extend(list(filter(lambda x: 'error' in x, results)))
213
+ if errors:
214
+ print(f'{len(errors)} models had errors during run.')
215
+ for e in errors:
216
+ if 'model' in e:
217
+ print(f"\t {e['model']} ({e.get('error', 'Unknown')})")
218
+ else:
219
+ print(e)
220
+
221
+ results = list(filter(lambda x: 'error' not in x, results))
222
+
223
+ no_sortkey = list(filter(lambda x: sort_key not in x, results))
224
+ if no_sortkey:
225
+ print(f'{len(no_sortkey)} results missing sort key, skipping sort.')
226
+ else:
227
+ results = sorted(results, key=lambda x: x[sort_key], reverse=True)
228
+
229
+ if len(results):
230
+ print(f'{len(results)} models run successfully. Saving results to {results_file}.')
231
+ write_results(results_file, results)
232
+
233
+
234
+ def write_results(results_file, results):
235
+ with open(results_file, mode='w') as cf:
236
+ dw = csv.DictWriter(cf, fieldnames=results[0].keys())
237
+ dw.writeheader()
238
+ for r in results:
239
+ dw.writerow(r)
240
+ cf.flush()
241
+
242
+
243
+ if __name__ == '__main__':
244
+ main()
pytorch-image-models/clean_checkpoint.py ADDED
@@ -0,0 +1,115 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/usr/bin/env python3
2
+ """ Checkpoint Cleaning Script
3
+
4
+ Takes training checkpoints with GPU tensors, optimizer state, extra dict keys, etc.
5
+ and outputs a CPU tensor checkpoint with only the `state_dict` along with SHA256
6
+ calculation for model zoo compatibility.
7
+
8
+ Hacked together by / Copyright 2020 Ross Wightman (https://github.com/rwightman)
9
+ """
10
+ import torch
11
+ import argparse
12
+ import os
13
+ import hashlib
14
+ import shutil
15
+ import tempfile
16
+ from timm.models import load_state_dict
17
+ try:
18
+ import safetensors.torch
19
+ _has_safetensors = True
20
+ except ImportError:
21
+ _has_safetensors = False
22
+
23
+ parser = argparse.ArgumentParser(description='PyTorch Checkpoint Cleaner')
24
+ parser.add_argument('--checkpoint', default='', type=str, metavar='PATH',
25
+ help='path to latest checkpoint (default: none)')
26
+ parser.add_argument('--output', default='', type=str, metavar='PATH',
27
+ help='output path')
28
+ parser.add_argument('--no-use-ema', dest='no_use_ema', action='store_true',
29
+ help='use ema version of weights if present')
30
+ parser.add_argument('--no-hash', dest='no_hash', action='store_true',
31
+ help='no hash in output filename')
32
+ parser.add_argument('--clean-aux-bn', dest='clean_aux_bn', action='store_true',
33
+ help='remove auxiliary batch norm layers (from SplitBN training) from checkpoint')
34
+ parser.add_argument('--safetensors', action='store_true',
35
+ help='Save weights using safetensors instead of the default torch way (pickle).')
36
+
37
+
38
+ def main():
39
+ args = parser.parse_args()
40
+
41
+ if os.path.exists(args.output):
42
+ print("Error: Output filename ({}) already exists.".format(args.output))
43
+ exit(1)
44
+
45
+ clean_checkpoint(
46
+ args.checkpoint,
47
+ args.output,
48
+ not args.no_use_ema,
49
+ args.no_hash,
50
+ args.clean_aux_bn,
51
+ safe_serialization=args.safetensors,
52
+ )
53
+
54
+
55
+ def clean_checkpoint(
56
+ checkpoint,
57
+ output,
58
+ use_ema=True,
59
+ no_hash=False,
60
+ clean_aux_bn=False,
61
+ safe_serialization: bool=False,
62
+ ):
63
+ # Load an existing checkpoint to CPU, strip everything but the state_dict and re-save
64
+ if checkpoint and os.path.isfile(checkpoint):
65
+ print("=> Loading checkpoint '{}'".format(checkpoint))
66
+ state_dict = load_state_dict(checkpoint, use_ema=use_ema)
67
+ new_state_dict = {}
68
+ for k, v in state_dict.items():
69
+ if clean_aux_bn and 'aux_bn' in k:
70
+ # If all aux_bn keys are removed, the SplitBN layers will end up as normal and
71
+ # load with the unmodified model using BatchNorm2d.
72
+ continue
73
+ name = k[7:] if k.startswith('module.') else k
74
+ new_state_dict[name] = v
75
+ print("=> Loaded state_dict from '{}'".format(checkpoint))
76
+
77
+ ext = ''
78
+ if output:
79
+ checkpoint_root, checkpoint_base = os.path.split(output)
80
+ checkpoint_base, ext = os.path.splitext(checkpoint_base)
81
+ else:
82
+ checkpoint_root = ''
83
+ checkpoint_base = os.path.split(checkpoint)[1]
84
+ checkpoint_base = os.path.splitext(checkpoint_base)[0]
85
+
86
+ temp_filename = '__' + checkpoint_base
87
+ if safe_serialization:
88
+ assert _has_safetensors, "`pip install safetensors` to use .safetensors"
89
+ safetensors.torch.save_file(new_state_dict, temp_filename)
90
+ else:
91
+ torch.save(new_state_dict, temp_filename)
92
+
93
+ with open(temp_filename, 'rb') as f:
94
+ sha_hash = hashlib.sha256(f.read()).hexdigest()
95
+
96
+ if ext:
97
+ final_ext = ext
98
+ else:
99
+ final_ext = ('.safetensors' if safe_serialization else '.pth')
100
+
101
+ if no_hash:
102
+ final_filename = checkpoint_base + final_ext
103
+ else:
104
+ final_filename = '-'.join([checkpoint_base, sha_hash[:8]]) + final_ext
105
+
106
+ shutil.move(temp_filename, os.path.join(checkpoint_root, final_filename))
107
+ print("=> Saved state_dict to '{}, SHA256: {}'".format(final_filename, sha_hash))
108
+ return final_filename
109
+ else:
110
+ print("Error: Checkpoint ({}) doesn't exist".format(checkpoint))
111
+ return ''
112
+
113
+
114
+ if __name__ == '__main__':
115
+ main()
pytorch-image-models/convert/convert_from_mxnet.py ADDED
@@ -0,0 +1,107 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import argparse
2
+ import hashlib
3
+ import os
4
+
5
+ import mxnet as mx
6
+ import gluoncv
7
+ import torch
8
+ from timm import create_model
9
+
10
+ parser = argparse.ArgumentParser(description='Convert from MXNet')
11
+ parser.add_argument('--model', default='all', type=str, metavar='MODEL',
12
+ help='Name of model to train (default: "all"')
13
+
14
+
15
+ def convert(mxnet_name, torch_name):
16
+ # download and load the pre-trained model
17
+ net = gluoncv.model_zoo.get_model(mxnet_name, pretrained=True)
18
+
19
+ # create corresponding torch model
20
+ torch_net = create_model(torch_name)
21
+
22
+ mxp = [(k, v) for k, v in net.collect_params().items() if 'running' not in k]
23
+ torchp = list(torch_net.named_parameters())
24
+ torch_params = {}
25
+
26
+ # convert parameters
27
+ # NOTE: we are relying on the fact that the order of parameters
28
+ # are usually exactly the same between these models, thus no key name mapping
29
+ # is necessary. Asserts will trip if this is not the case.
30
+ for (tn, tv), (mn, mv) in zip(torchp, mxp):
31
+ m_split = mn.split('_')
32
+ t_split = tn.split('.')
33
+ print(t_split, m_split)
34
+ print(tv.shape, mv.shape)
35
+
36
+ # ensure ordering of BN params match since their sizes are not specific
37
+ if m_split[-1] == 'gamma':
38
+ assert t_split[-1] == 'weight'
39
+ if m_split[-1] == 'beta':
40
+ assert t_split[-1] == 'bias'
41
+
42
+ # ensure shapes match
43
+ assert all(t == m for t, m in zip(tv.shape, mv.shape))
44
+
45
+ torch_tensor = torch.from_numpy(mv.data().asnumpy())
46
+ torch_params[tn] = torch_tensor
47
+
48
+ # convert buffers (batch norm running stats)
49
+ mxb = [(k, v) for k, v in net.collect_params().items() if any(x in k for x in ['running_mean', 'running_var'])]
50
+ torchb = [(k, v) for k, v in torch_net.named_buffers() if 'num_batches' not in k]
51
+ for (tn, tv), (mn, mv) in zip(torchb, mxb):
52
+ print(tn, mn)
53
+ print(tv.shape, mv.shape)
54
+
55
+ # ensure ordering of BN params match since their sizes are not specific
56
+ if 'running_var' in tn:
57
+ assert 'running_var' in mn
58
+ if 'running_mean' in tn:
59
+ assert 'running_mean' in mn
60
+
61
+ torch_tensor = torch.from_numpy(mv.data().asnumpy())
62
+ torch_params[tn] = torch_tensor
63
+
64
+ torch_net.load_state_dict(torch_params)
65
+ torch_filename = './%s.pth' % torch_name
66
+ torch.save(torch_net.state_dict(), torch_filename)
67
+ with open(torch_filename, 'rb') as f:
68
+ sha_hash = hashlib.sha256(f.read()).hexdigest()
69
+ final_filename = os.path.splitext(torch_filename)[0] + '-' + sha_hash[:8] + '.pth'
70
+ os.rename(torch_filename, final_filename)
71
+ print("=> Saved converted model to '{}, SHA256: {}'".format(final_filename, sha_hash))
72
+
73
+
74
+ def map_mx_to_torch_model(mx_name):
75
+ torch_name = mx_name.lower()
76
+ if torch_name.startswith('se_'):
77
+ torch_name = torch_name.replace('se_', 'se')
78
+ elif torch_name.startswith('senet_'):
79
+ torch_name = torch_name.replace('senet_', 'senet')
80
+ elif torch_name.startswith('inceptionv3'):
81
+ torch_name = torch_name.replace('inceptionv3', 'inception_v3')
82
+ torch_name = 'gluon_' + torch_name
83
+ return torch_name
84
+
85
+
86
+ ALL = ['resnet18_v1b', 'resnet34_v1b', 'resnet50_v1b', 'resnet101_v1b', 'resnet152_v1b',
87
+ 'resnet50_v1c', 'resnet101_v1c', 'resnet152_v1c', 'resnet50_v1d', 'resnet101_v1d', 'resnet152_v1d',
88
+ #'resnet50_v1e', 'resnet101_v1e', 'resnet152_v1e',
89
+ 'resnet50_v1s', 'resnet101_v1s', 'resnet152_v1s', 'resnext50_32x4d', 'resnext101_32x4d', 'resnext101_64x4d',
90
+ 'se_resnext50_32x4d', 'se_resnext101_32x4d', 'se_resnext101_64x4d', 'senet_154', 'inceptionv3']
91
+
92
+
93
+ def main():
94
+ args = parser.parse_args()
95
+
96
+ if not args.model or args.model == 'all':
97
+ for mx_model in ALL:
98
+ torch_model = map_mx_to_torch_model(mx_model)
99
+ convert(mx_model, torch_model)
100
+ else:
101
+ mx_model = args.model
102
+ torch_model = map_mx_to_torch_model(mx_model)
103
+ convert(mx_model, torch_model)
104
+
105
+
106
+ if __name__ == '__main__':
107
+ main()
pytorch-image-models/convert/convert_nest_flax.py ADDED
@@ -0,0 +1,109 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ Convert weights from https://github.com/google-research/nested-transformer
3
+ NOTE: You'll need https://github.com/google/CommonLoopUtils, not included in requirements.txt
4
+ """
5
+
6
+ import sys
7
+
8
+ import numpy as np
9
+ import torch
10
+
11
+ from clu import checkpoint
12
+
13
+
14
+ arch_depths = {
15
+ 'nest_base': [2, 2, 20],
16
+ 'nest_small': [2, 2, 20],
17
+ 'nest_tiny': [2, 2, 8],
18
+ }
19
+
20
+
21
+ def convert_nest(checkpoint_path, arch):
22
+ """
23
+ Expects path to checkpoint which is a dir containing 4 files like in each of these folders
24
+ - https://console.cloud.google.com/storage/browser/gresearch/nest-checkpoints
25
+ `arch` is needed to
26
+ Returns a state dict that can be used with `torch.nn.Module.load_state_dict`
27
+ Hint: Follow timm.models.nest.Nest.__init__ and
28
+ https://github.com/google-research/nested-transformer/blob/main/models/nest_net.py
29
+ """
30
+ assert arch in ['nest_base', 'nest_small', 'nest_tiny'], "Your `arch` is not supported"
31
+
32
+ flax_dict = checkpoint.load_state_dict(checkpoint_path)['optimizer']['target']
33
+ state_dict = {}
34
+
35
+ # Patch embedding
36
+ state_dict['patch_embed.proj.weight'] = torch.tensor(
37
+ flax_dict['PatchEmbedding_0']['Conv_0']['kernel']).permute(3, 2, 0, 1)
38
+ state_dict['patch_embed.proj.bias'] = torch.tensor(flax_dict['PatchEmbedding_0']['Conv_0']['bias'])
39
+
40
+ # Positional embeddings
41
+ posemb_keys = [k for k in flax_dict.keys() if k.startswith('PositionEmbedding')]
42
+ for i, k in enumerate(posemb_keys):
43
+ state_dict[f'levels.{i}.pos_embed'] = torch.tensor(flax_dict[k]['pos_embedding'])
44
+
45
+ # Transformer encoders
46
+ depths = arch_depths[arch]
47
+ for level in range(len(depths)):
48
+ for layer in range(depths[level]):
49
+ global_layer_ix = sum(depths[:level]) + layer
50
+ # Norms
51
+ for i in range(2):
52
+ state_dict[f'levels.{level}.transformer_encoder.{layer}.norm{i+1}.weight'] = torch.tensor(
53
+ flax_dict[f'EncoderNDBlock_{global_layer_ix}'][f'LayerNorm_{i}']['scale'])
54
+ state_dict[f'levels.{level}.transformer_encoder.{layer}.norm{i+1}.bias'] = torch.tensor(
55
+ flax_dict[f'EncoderNDBlock_{global_layer_ix}'][f'LayerNorm_{i}']['bias'])
56
+ # Attention qkv
57
+ w_q = flax_dict[f'EncoderNDBlock_{global_layer_ix}']['MultiHeadAttention_0']['DenseGeneral_0']['kernel']
58
+ w_kv = flax_dict[f'EncoderNDBlock_{global_layer_ix}']['MultiHeadAttention_0']['DenseGeneral_1']['kernel']
59
+ # Pay attention to dims here (maybe get pen and paper)
60
+ w_kv = np.concatenate(np.split(w_kv, 2, -1), 1)
61
+ w_qkv = np.concatenate([w_q, w_kv], 1)
62
+ state_dict[f'levels.{level}.transformer_encoder.{layer}.attn.qkv.weight'] = torch.tensor(w_qkv).flatten(1).permute(1,0)
63
+ b_q = flax_dict[f'EncoderNDBlock_{global_layer_ix}']['MultiHeadAttention_0']['DenseGeneral_0']['bias']
64
+ b_kv = flax_dict[f'EncoderNDBlock_{global_layer_ix}']['MultiHeadAttention_0']['DenseGeneral_1']['bias']
65
+ # Pay attention to dims here (maybe get pen and paper)
66
+ b_kv = np.concatenate(np.split(b_kv, 2, -1), 0)
67
+ b_qkv = np.concatenate([b_q, b_kv], 0)
68
+ state_dict[f'levels.{level}.transformer_encoder.{layer}.attn.qkv.bias'] = torch.tensor(b_qkv).reshape(-1)
69
+ # Attention proj
70
+ w_proj = flax_dict[f'EncoderNDBlock_{global_layer_ix}']['MultiHeadAttention_0']['proj_kernel']
71
+ w_proj = torch.tensor(w_proj).permute(2, 1, 0).flatten(1)
72
+ state_dict[f'levels.{level}.transformer_encoder.{layer}.attn.proj.weight'] = w_proj
73
+ state_dict[f'levels.{level}.transformer_encoder.{layer}.attn.proj.bias'] = torch.tensor(
74
+ flax_dict[f'EncoderNDBlock_{global_layer_ix}']['MultiHeadAttention_0']['bias'])
75
+ # MLP
76
+ for i in range(2):
77
+ state_dict[f'levels.{level}.transformer_encoder.{layer}.mlp.fc{i+1}.weight'] = torch.tensor(
78
+ flax_dict[f'EncoderNDBlock_{global_layer_ix}']['MlpBlock_0'][f'Dense_{i}']['kernel']).permute(1, 0)
79
+ state_dict[f'levels.{level}.transformer_encoder.{layer}.mlp.fc{i+1}.bias'] = torch.tensor(
80
+ flax_dict[f'EncoderNDBlock_{global_layer_ix}']['MlpBlock_0'][f'Dense_{i}']['bias'])
81
+
82
+ # Block aggregations (ConvPool)
83
+ for level in range(1, len(depths)):
84
+ # Convs
85
+ state_dict[f'levels.{level}.pool.conv.weight'] = torch.tensor(
86
+ flax_dict[f'ConvPool_{level-1}']['Conv_0']['kernel']).permute(3, 2, 0, 1)
87
+ state_dict[f'levels.{level}.pool.conv.bias'] = torch.tensor(
88
+ flax_dict[f'ConvPool_{level-1}']['Conv_0']['bias'])
89
+ # Norms
90
+ state_dict[f'levels.{level}.pool.norm.weight'] = torch.tensor(
91
+ flax_dict[f'ConvPool_{level-1}']['LayerNorm_0']['scale'])
92
+ state_dict[f'levels.{level}.pool.norm.bias'] = torch.tensor(
93
+ flax_dict[f'ConvPool_{level-1}']['LayerNorm_0']['bias'])
94
+
95
+ # Final norm
96
+ state_dict[f'norm.weight'] = torch.tensor(flax_dict['LayerNorm_0']['scale'])
97
+ state_dict[f'norm.bias'] = torch.tensor(flax_dict['LayerNorm_0']['bias'])
98
+
99
+ # Classifier
100
+ state_dict['head.weight'] = torch.tensor(flax_dict['Dense_0']['kernel']).permute(1, 0)
101
+ state_dict['head.bias'] = torch.tensor(flax_dict['Dense_0']['bias'])
102
+
103
+ return state_dict
104
+
105
+
106
+ if __name__ == '__main__':
107
+ variant = sys.argv[1] # base, small, or tiny
108
+ state_dict = convert_nest(f'./nest-{variant[0]}_imagenet', f'nest_{variant}')
109
+ torch.save(state_dict, f'./jx_nest_{variant}.pth')
pytorch-image-models/distributed_train.sh ADDED
@@ -0,0 +1,5 @@
 
 
 
 
 
 
1
+ #!/bin/bash
2
+ NUM_PROC=$1
3
+ shift
4
+ torchrun --nproc_per_node=$NUM_PROC train.py "$@"
5
+
pytorch-image-models/hfdocs/README.md ADDED
@@ -0,0 +1,14 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Hugging Face Timm Docs
2
+
3
+ ## Getting Started
4
+
5
+ ```
6
+ pip install git+https://github.com/huggingface/doc-builder.git@main#egg=hf-doc-builder
7
+ pip install watchdog black
8
+ ```
9
+
10
+ ## Preview the Docs Locally
11
+
12
+ ```
13
+ doc-builder preview timm hfdocs/source
14
+ ```
pytorch-image-models/hfdocs/source/_toctree.yml ADDED
@@ -0,0 +1,162 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ - sections:
2
+ - local: index
3
+ title: Home
4
+ - local: quickstart
5
+ title: Quickstart
6
+ - local: installation
7
+ title: Installation
8
+ - local: changes
9
+ title: Changelog
10
+ title: Get started
11
+ - sections:
12
+ - local: feature_extraction
13
+ title: Using Pretrained Models as Feature Extractors
14
+ - local: training_script
15
+ title: Training With The Official Training Script
16
+ - local: hf_hub
17
+ title: Share and Load Models from the 🤗 Hugging Face Hub
18
+ title: Tutorials
19
+ - sections:
20
+ - local: models
21
+ title: Model Summaries
22
+ - local: results
23
+ title: Results
24
+ - local: models/adversarial-inception-v3
25
+ title: Adversarial Inception v3
26
+ - local: models/advprop
27
+ title: AdvProp (EfficientNet)
28
+ - local: models/big-transfer
29
+ title: Big Transfer (BiT)
30
+ - local: models/csp-darknet
31
+ title: CSP-DarkNet
32
+ - local: models/csp-resnet
33
+ title: CSP-ResNet
34
+ - local: models/csp-resnext
35
+ title: CSP-ResNeXt
36
+ - local: models/densenet
37
+ title: DenseNet
38
+ - local: models/dla
39
+ title: Deep Layer Aggregation
40
+ - local: models/dpn
41
+ title: Dual Path Network (DPN)
42
+ - local: models/ecaresnet
43
+ title: ECA-ResNet
44
+ - local: models/efficientnet
45
+ title: EfficientNet
46
+ - local: models/efficientnet-pruned
47
+ title: EfficientNet (Knapsack Pruned)
48
+ - local: models/ensemble-adversarial
49
+ title: Ensemble Adversarial Inception ResNet v2
50
+ - local: models/ese-vovnet
51
+ title: ESE-VoVNet
52
+ - local: models/fbnet
53
+ title: FBNet
54
+ - local: models/gloun-inception-v3
55
+ title: (Gluon) Inception v3
56
+ - local: models/gloun-resnet
57
+ title: (Gluon) ResNet
58
+ - local: models/gloun-resnext
59
+ title: (Gluon) ResNeXt
60
+ - local: models/gloun-senet
61
+ title: (Gluon) SENet
62
+ - local: models/gloun-seresnext
63
+ title: (Gluon) SE-ResNeXt
64
+ - local: models/gloun-xception
65
+ title: (Gluon) Xception
66
+ - local: models/hrnet
67
+ title: HRNet
68
+ - local: models/ig-resnext
69
+ title: Instagram ResNeXt WSL
70
+ - local: models/inception-resnet-v2
71
+ title: Inception ResNet v2
72
+ - local: models/inception-v3
73
+ title: Inception v3
74
+ - local: models/inception-v4
75
+ title: Inception v4
76
+ - local: models/legacy-se-resnet
77
+ title: (Legacy) SE-ResNet
78
+ - local: models/legacy-se-resnext
79
+ title: (Legacy) SE-ResNeXt
80
+ - local: models/legacy-senet
81
+ title: (Legacy) SENet
82
+ - local: models/mixnet
83
+ title: MixNet
84
+ - local: models/mnasnet
85
+ title: MnasNet
86
+ - local: models/mobilenet-v2
87
+ title: MobileNet v2
88
+ - local: models/mobilenet-v3
89
+ title: MobileNet v3
90
+ - local: models/nasnet
91
+ title: NASNet
92
+ - local: models/noisy-student
93
+ title: Noisy Student (EfficientNet)
94
+ - local: models/pnasnet
95
+ title: PNASNet
96
+ - local: models/regnetx
97
+ title: RegNetX
98
+ - local: models/regnety
99
+ title: RegNetY
100
+ - local: models/res2net
101
+ title: Res2Net
102
+ - local: models/res2next
103
+ title: Res2NeXt
104
+ - local: models/resnest
105
+ title: ResNeSt
106
+ - local: models/resnet
107
+ title: ResNet
108
+ - local: models/resnet-d
109
+ title: ResNet-D
110
+ - local: models/resnext
111
+ title: ResNeXt
112
+ - local: models/rexnet
113
+ title: RexNet
114
+ - local: models/se-resnet
115
+ title: SE-ResNet
116
+ - local: models/selecsls
117
+ title: SelecSLS
118
+ - local: models/seresnext
119
+ title: SE-ResNeXt
120
+ - local: models/skresnet
121
+ title: SK-ResNet
122
+ - local: models/skresnext
123
+ title: SK-ResNeXt
124
+ - local: models/spnasnet
125
+ title: SPNASNet
126
+ - local: models/ssl-resnet
127
+ title: SSL ResNet
128
+ - local: models/swsl-resnet
129
+ title: SWSL ResNet
130
+ - local: models/swsl-resnext
131
+ title: SWSL ResNeXt
132
+ - local: models/tf-efficientnet
133
+ title: (Tensorflow) EfficientNet
134
+ - local: models/tf-efficientnet-condconv
135
+ title: (Tensorflow) EfficientNet CondConv
136
+ - local: models/tf-efficientnet-lite
137
+ title: (Tensorflow) EfficientNet Lite
138
+ - local: models/tf-inception-v3
139
+ title: (Tensorflow) Inception v3
140
+ - local: models/tf-mixnet
141
+ title: (Tensorflow) MixNet
142
+ - local: models/tf-mobilenet-v3
143
+ title: (Tensorflow) MobileNet v3
144
+ - local: models/tresnet
145
+ title: TResNet
146
+ - local: models/wide-resnet
147
+ title: Wide ResNet
148
+ - local: models/xception
149
+ title: Xception
150
+ title: Model Pages
151
+ isExpanded: false
152
+ - sections:
153
+ - local: reference/models
154
+ title: Models
155
+ - local: reference/data
156
+ title: Data
157
+ - local: reference/optimizers
158
+ title: Optimizers
159
+ - local: reference/schedulers
160
+ title: Learning Rate Schedulers
161
+ title: Reference
162
+
pytorch-image-models/hfdocs/source/changes.mdx ADDED
@@ -0,0 +1,1080 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Changelog
2
+
3
+ ### Aug 8, 2024
4
+ * Add RDNet ('DenseNets Reloaded', https://arxiv.org/abs/2403.19588), thanks [Donghyun Kim](https://github.com/dhkim0225)
5
+
6
+ ### July 28, 2024
7
+ * Add `mobilenet_edgetpu_v2_m` weights w/ `ra4` mnv4-small based recipe. 80.1% top-1 @ 224 and 80.7 @ 256.
8
+ * Release 1.0.8
9
+
10
+ ### July 26, 2024
11
+ * More MobileNet-v4 weights, ImageNet-12k pretrain w/ fine-tunes, and anti-aliased ConvLarge models
12
+
13
+ | model |top1 |top1_err|top5 |top5_err|param_count|img_size|
14
+ |--------------------------------------------------------------------------------------------------|------|--------|------|--------|-----------|--------|
15
+ | [mobilenetv4_conv_aa_large.e230_r448_in12k_ft_in1k](http://hf.co/timm/mobilenetv4_conv_aa_large.e230_r448_in12k_ft_in1k)|84.99 |15.01 |97.294|2.706 |32.59 |544 |
16
+ | [mobilenetv4_conv_aa_large.e230_r384_in12k_ft_in1k](http://hf.co/timm/mobilenetv4_conv_aa_large.e230_r384_in12k_ft_in1k)|84.772|15.228 |97.344|2.656 |32.59 |480 |
17
+ | [mobilenetv4_conv_aa_large.e230_r448_in12k_ft_in1k](http://hf.co/timm/mobilenetv4_conv_aa_large.e230_r448_in12k_ft_in1k)|84.64 |15.36 |97.114|2.886 |32.59 |448 |
18
+ | [mobilenetv4_conv_aa_large.e230_r384_in12k_ft_in1k](http://hf.co/timm/mobilenetv4_conv_aa_large.e230_r384_in12k_ft_in1k)|84.314|15.686 |97.102|2.898 |32.59 |384 |
19
+ | [mobilenetv4_conv_aa_large.e600_r384_in1k](http://hf.co/timm/mobilenetv4_conv_aa_large.e600_r384_in1k) |83.824|16.176 |96.734|3.266 |32.59 |480 |
20
+ | [mobilenetv4_conv_aa_large.e600_r384_in1k](http://hf.co/timm/mobilenetv4_conv_aa_large.e600_r384_in1k) |83.244|16.756 |96.392|3.608 |32.59 |384 |
21
+ | [mobilenetv4_hybrid_medium.e200_r256_in12k_ft_in1k](http://hf.co/timm/mobilenetv4_hybrid_medium.e200_r256_in12k_ft_in1k)|82.99 |17.01 |96.67 |3.33 |11.07 |320 |
22
+ | [mobilenetv4_hybrid_medium.e200_r256_in12k_ft_in1k](http://hf.co/timm/mobilenetv4_hybrid_medium.e200_r256_in12k_ft_in1k)|82.364|17.636 |96.256|3.744 |11.07 |256 |
23
+
24
+ * Impressive MobileNet-V1 and EfficientNet-B0 baseline challenges (https://huggingface.co/blog/rwightman/mobilenet-baselines)
25
+
26
+ | model |top1 |top1_err|top5 |top5_err|param_count|img_size|
27
+ |--------------------------------------------------------------------------------------------------|------|--------|------|--------|-----------|--------|
28
+ | [efficientnet_b0.ra4_e3600_r224_in1k](http://hf.co/timm/efficientnet_b0.ra4_e3600_r224_in1k) |79.364|20.636 |94.754|5.246 |5.29 |256 |
29
+ | [efficientnet_b0.ra4_e3600_r224_in1k](http://hf.co/timm/efficientnet_b0.ra4_e3600_r224_in1k) |78.584|21.416 |94.338|5.662 |5.29 |224 |
30
+ | [mobilenetv1_100h.ra4_e3600_r224_in1k](http://hf.co/timm/mobilenetv1_100h.ra4_e3600_r224_in1k) |76.596|23.404 |93.272|6.728 |5.28 |256 |
31
+ | [mobilenetv1_100.ra4_e3600_r224_in1k](http://hf.co/timm/mobilenetv1_100.ra4_e3600_r224_in1k) |76.094|23.906 |93.004|6.996 |4.23 |256 |
32
+ | [mobilenetv1_100h.ra4_e3600_r224_in1k](http://hf.co/timm/mobilenetv1_100h.ra4_e3600_r224_in1k) |75.662|24.338 |92.504|7.496 |5.28 |224 |
33
+ | [mobilenetv1_100.ra4_e3600_r224_in1k](http://hf.co/timm/mobilenetv1_100.ra4_e3600_r224_in1k) |75.382|24.618 |92.312|7.688 |4.23 |224 |
34
+
35
+ * Prototype of `set_input_size()` added to vit and swin v1/v2 models to allow changing image size, patch size, window size after model creation.
36
+ * Improved support in swin for different size handling, in addition to `set_input_size`, `always_partition` and `strict_img_size` args have been added to `__init__` to allow more flexible input size constraints
37
+ * Fix out of order indices info for intermediate 'Getter' feature wrapper, check out or range indices for same.
38
+ * Add several `tiny` < .5M param models for testing that are actually trained on ImageNet-1k
39
+
40
+ |model |top1 |top1_err|top5 |top5_err|param_count|img_size|crop_pct|
41
+ |----------------------------|------|--------|------|--------|-----------|--------|--------|
42
+ |test_efficientnet.r160_in1k |47.156|52.844 |71.726|28.274 |0.36 |192 |1.0 |
43
+ |test_byobnet.r160_in1k |46.698|53.302 |71.674|28.326 |0.46 |192 |1.0 |
44
+ |test_efficientnet.r160_in1k |46.426|53.574 |70.928|29.072 |0.36 |160 |0.875 |
45
+ |test_byobnet.r160_in1k |45.378|54.622 |70.572|29.428 |0.46 |160 |0.875 |
46
+ |test_vit.r160_in1k|42.0 |58.0 |68.664|31.336 |0.37 |192 |1.0 |
47
+ |test_vit.r160_in1k|40.822|59.178 |67.212|32.788 |0.37 |160 |0.875 |
48
+
49
+ * Fix vit reg token init, thanks [Promisery](https://github.com/Promisery)
50
+ * Other misc fixes
51
+
52
+ ### June 24, 2024
53
+ * 3 more MobileNetV4 hyrid weights with different MQA weight init scheme
54
+
55
+ | model |top1 |top1_err|top5 |top5_err|param_count|img_size|
56
+ |--------------------------------------------------------------------------------------------------|------|--------|------|--------|-----------|--------|
57
+ | [mobilenetv4_hybrid_large.ix_e600_r384_in1k](http://hf.co/timm/mobilenetv4_hybrid_large.ix_e600_r384_in1k) |84.356|15.644 |96.892 |3.108 |37.76 |448 |
58
+ | [mobilenetv4_hybrid_large.ix_e600_r384_in1k](http://hf.co/timm/mobilenetv4_hybrid_large.ix_e600_r384_in1k) |83.990|16.010 |96.702 |3.298 |37.76 |384 |
59
+ | [mobilenetv4_hybrid_medium.ix_e550_r384_in1k](http://hf.co/timm/mobilenetv4_hybrid_medium.ix_e550_r384_in1k) |83.394|16.606 |96.760|3.240 |11.07 |448 |
60
+ | [mobilenetv4_hybrid_medium.ix_e550_r384_in1k](http://hf.co/timm/mobilenetv4_hybrid_medium.ix_e550_r384_in1k) |82.968|17.032 |96.474|3.526 |11.07 |384 |
61
+ | [mobilenetv4_hybrid_medium.ix_e550_r256_in1k](http://hf.co/timm/mobilenetv4_hybrid_medium.ix_e550_r256_in1k) |82.492|17.508 |96.278|3.722 |11.07 |320 |
62
+ | [mobilenetv4_hybrid_medium.ix_e550_r256_in1k](http://hf.co/timm/mobilenetv4_hybrid_medium.ix_e550_r256_in1k) |81.446|18.554 |95.704|4.296 |11.07 |256 |
63
+ * florence2 weight loading in DaViT model
64
+
65
+ ### June 12, 2024
66
+ * MobileNetV4 models and initial set of `timm` trained weights added:
67
+
68
+ | model |top1 |top1_err|top5 |top5_err|param_count|img_size|
69
+ |--------------------------------------------------------------------------------------------------|------|--------|------|--------|-----------|--------|
70
+ | [mobilenetv4_hybrid_large.e600_r384_in1k](http://hf.co/timm/mobilenetv4_hybrid_large.e600_r384_in1k) |84.266|15.734 |96.936 |3.064 |37.76 |448 |
71
+ | [mobilenetv4_hybrid_large.e600_r384_in1k](http://hf.co/timm/mobilenetv4_hybrid_large.e600_r384_in1k) |83.800|16.200 |96.770 |3.230 |37.76 |384 |
72
+ | [mobilenetv4_conv_large.e600_r384_in1k](http://hf.co/timm/mobilenetv4_conv_large.e600_r384_in1k) |83.392|16.608 |96.622 |3.378 |32.59 |448 |
73
+ | [mobilenetv4_conv_large.e600_r384_in1k](http://hf.co/timm/mobilenetv4_conv_large.e600_r384_in1k) |82.952|17.048 |96.266 |3.734 |32.59 |384 |
74
+ | [mobilenetv4_conv_large.e500_r256_in1k](http://hf.co/timm/mobilenetv4_conv_large.e500_r256_in1k) |82.674|17.326 |96.31 |3.69 |32.59 |320 |
75
+ | [mobilenetv4_conv_large.e500_r256_in1k](http://hf.co/timm/mobilenetv4_conv_large.e500_r256_in1k) |81.862|18.138 |95.69 |4.31 |32.59 |256 |
76
+ | [mobilenetv4_hybrid_medium.e500_r224_in1k](http://hf.co/timm/mobilenetv4_hybrid_medium.e500_r224_in1k) |81.276|18.724 |95.742|4.258 |11.07 |256 |
77
+ | [mobilenetv4_conv_medium.e500_r256_in1k](http://hf.co/timm/mobilenetv4_conv_medium.e500_r256_in1k) |80.858|19.142 |95.768|4.232 |9.72 |320 |
78
+ | [mobilenetv4_hybrid_medium.e500_r224_in1k](http://hf.co/timm/mobilenetv4_hybrid_medium.e500_r224_in1k) |80.442|19.558 |95.38 |4.62 |11.07 |224 |
79
+ | [mobilenetv4_conv_blur_medium.e500_r224_in1k](http://hf.co/timm/mobilenetv4_conv_blur_medium.e500_r224_in1k) |80.142|19.858 |95.298|4.702 |9.72 |256 |
80
+ | [mobilenetv4_conv_medium.e500_r256_in1k](http://hf.co/timm/mobilenetv4_conv_medium.e500_r256_in1k) |79.928|20.072 |95.184|4.816 |9.72 |256 |
81
+ | [mobilenetv4_conv_medium.e500_r224_in1k](http://hf.co/timm/mobilenetv4_conv_medium.e500_r224_in1k) |79.808|20.192 |95.186|4.814 |9.72 |256 |
82
+ | [mobilenetv4_conv_blur_medium.e500_r224_in1k](http://hf.co/timm/mobilenetv4_conv_blur_medium.e500_r224_in1k) |79.438|20.562 |94.932|5.068 |9.72 |224 |
83
+ | [mobilenetv4_conv_medium.e500_r224_in1k](http://hf.co/timm/mobilenetv4_conv_medium.e500_r224_in1k) |79.094|20.906 |94.77 |5.23 |9.72 |224 |
84
+ | [mobilenetv4_conv_small.e2400_r224_in1k](http://hf.co/timm/mobilenetv4_conv_small.e2400_r224_in1k) |74.616|25.384 |92.072|7.928 |3.77 |256 |
85
+ | [mobilenetv4_conv_small.e1200_r224_in1k](http://hf.co/timm/mobilenetv4_conv_small.e1200_r224_in1k) |74.292|25.708 |92.116|7.884 |3.77 |256 |
86
+ | [mobilenetv4_conv_small.e2400_r224_in1k](http://hf.co/timm/mobilenetv4_conv_small.e2400_r224_in1k) |73.756|26.244 |91.422|8.578 |3.77 |224 |
87
+ | [mobilenetv4_conv_small.e1200_r224_in1k](http://hf.co/timm/mobilenetv4_conv_small.e1200_r224_in1k) |73.454|26.546 |91.34 |8.66 |3.77 |224 |
88
+
89
+ * Apple MobileCLIP (https://arxiv.org/pdf/2311.17049, FastViT and ViT-B) image tower model support & weights added (part of OpenCLIP support).
90
+ * ViTamin (https://arxiv.org/abs/2404.02132) CLIP image tower model & weights added (part of OpenCLIP support).
91
+ * OpenAI CLIP Modified ResNet image tower modelling & weight support (via ByobNet). Refactor AttentionPool2d.
92
+
93
+ ### May 14, 2024
94
+ * Support loading PaliGemma jax weights into SigLIP ViT models with average pooling.
95
+ * Add Hiera models from Meta (https://github.com/facebookresearch/hiera).
96
+ * Add `normalize=` flag for transorms, return non-normalized torch.Tensor with original dytpe (for `chug`)
97
+ * Version 1.0.3 release
98
+
99
+ ### May 11, 2024
100
+ * `Searching for Better ViT Baselines (For the GPU Poor)` weights and vit variants released. Exploring model shapes between Tiny and Base.
101
+
102
+ | model | top1 | top5 | param_count | img_size |
103
+ | -------------------------------------------------- | ------ | ------ | ----------- | -------- |
104
+ | [vit_mediumd_patch16_reg4_gap_256.sbb_in12k_ft_in1k](https://huggingface.co/timm/vit_mediumd_patch16_reg4_gap_256.sbb_in12k_ft_in1k) | 86.202 | 97.874 | 64.11 | 256 |
105
+ | [vit_betwixt_patch16_reg4_gap_256.sbb_in12k_ft_in1k](https://huggingface.co/timm/vit_betwixt_patch16_reg4_gap_256.sbb_in12k_ft_in1k) | 85.418 | 97.48 | 60.4 | 256 |
106
+ | [vit_mediumd_patch16_rope_reg1_gap_256.sbb_in1k](https://huggingface.co/timm/vit_mediumd_patch16_rope_reg1_gap_256.sbb_in1k) | 84.322 | 96.812 | 63.95 | 256 |
107
+ | [vit_betwixt_patch16_rope_reg4_gap_256.sbb_in1k](https://huggingface.co/timm/vit_betwixt_patch16_rope_reg4_gap_256.sbb_in1k) | 83.906 | 96.684 | 60.23 | 256 |
108
+ | [vit_base_patch16_rope_reg1_gap_256.sbb_in1k](https://huggingface.co/timm/vit_base_patch16_rope_reg1_gap_256.sbb_in1k) | 83.866 | 96.67 | 86.43 | 256 |
109
+ | [vit_medium_patch16_rope_reg1_gap_256.sbb_in1k](https://huggingface.co/timm/vit_medium_patch16_rope_reg1_gap_256.sbb_in1k) | 83.81 | 96.824 | 38.74 | 256 |
110
+ | [vit_betwixt_patch16_reg4_gap_256.sbb_in1k](https://huggingface.co/timm/vit_betwixt_patch16_reg4_gap_256.sbb_in1k) | 83.706 | 96.616 | 60.4 | 256 |
111
+ | [vit_betwixt_patch16_reg1_gap_256.sbb_in1k](https://huggingface.co/timm/vit_betwixt_patch16_reg1_gap_256.sbb_in1k) | 83.628 | 96.544 | 60.4 | 256 |
112
+ | [vit_medium_patch16_reg4_gap_256.sbb_in1k](https://huggingface.co/timm/vit_medium_patch16_reg4_gap_256.sbb_in1k) | 83.47 | 96.622 | 38.88 | 256 |
113
+ | [vit_medium_patch16_reg1_gap_256.sbb_in1k](https://huggingface.co/timm/vit_medium_patch16_reg1_gap_256.sbb_in1k) | 83.462 | 96.548 | 38.88 | 256 |
114
+ | [vit_little_patch16_reg4_gap_256.sbb_in1k](https://huggingface.co/timm/vit_little_patch16_reg4_gap_256.sbb_in1k) | 82.514 | 96.262 | 22.52 | 256 |
115
+ | [vit_wee_patch16_reg1_gap_256.sbb_in1k](https://huggingface.co/timm/vit_wee_patch16_reg1_gap_256.sbb_in1k) | 80.256 | 95.360 | 13.42 | 256 |
116
+ | [vit_pwee_patch16_reg1_gap_256.sbb_in1k](https://huggingface.co/timm/vit_pwee_patch16_reg1_gap_256.sbb_in1k) | 80.072 | 95.136 | 15.25 | 256 |
117
+ | [vit_mediumd_patch16_reg4_gap_256.sbb_in12k](https://huggingface.co/timm/vit_mediumd_patch16_reg4_gap_256.sbb_in12k) | N/A | N/A | 64.11 | 256 |
118
+ | [vit_betwixt_patch16_reg4_gap_256.sbb_in12k](https://huggingface.co/timm/vit_betwixt_patch16_reg4_gap_256.sbb_in12k) | N/A | N/A | 60.4 | 256 |
119
+
120
+ * AttentionExtract helper added to extract attention maps from `timm` models. See example in https://github.com/huggingface/pytorch-image-models/discussions/1232#discussioncomment-9320949
121
+ * `forward_intermediates()` API refined and added to more models including some ConvNets that have other extraction methods.
122
+ * 1017 of 1047 model architectures support `features_only=True` feature extraction. Remaining 34 architectures can be supported but based on priority requests.
123
+ * Remove torch.jit.script annotated functions including old JIT activations. Conflict with dynamo and dynamo does a much better job when used.
124
+
125
+ ### April 11, 2024
126
+ * Prepping for a long overdue 1.0 release, things have been stable for a while now.
127
+ * Significant feature that's been missing for a while, `features_only=True` support for ViT models with flat hidden states or non-std module layouts (so far covering `'vit_*', 'twins_*', 'deit*', 'beit*', 'mvitv2*', 'eva*', 'samvit_*', 'flexivit*'`)
128
+ * Above feature support achieved through a new `forward_intermediates()` API that can be used with a feature wrapping module or direclty.
129
+ ```python
130
+ model = timm.create_model('vit_base_patch16_224')
131
+ final_feat, intermediates = model.forward_intermediates(input)
132
+ output = model.forward_head(final_feat) # pooling + classifier head
133
+
134
+ print(final_feat.shape)
135
+ torch.Size([2, 197, 768])
136
+
137
+ for f in intermediates:
138
+ print(f.shape)
139
+ torch.Size([2, 768, 14, 14])
140
+ torch.Size([2, 768, 14, 14])
141
+ torch.Size([2, 768, 14, 14])
142
+ torch.Size([2, 768, 14, 14])
143
+ torch.Size([2, 768, 14, 14])
144
+ torch.Size([2, 768, 14, 14])
145
+ torch.Size([2, 768, 14, 14])
146
+ torch.Size([2, 768, 14, 14])
147
+ torch.Size([2, 768, 14, 14])
148
+ torch.Size([2, 768, 14, 14])
149
+ torch.Size([2, 768, 14, 14])
150
+ torch.Size([2, 768, 14, 14])
151
+
152
+ print(output.shape)
153
+ torch.Size([2, 1000])
154
+ ```
155
+
156
+ ```python
157
+ model = timm.create_model('eva02_base_patch16_clip_224', pretrained=True, img_size=512, features_only=True, out_indices=(-3, -2,))
158
+ output = model(torch.randn(2, 3, 512, 512))
159
+
160
+ for o in output:
161
+ print(o.shape)
162
+ torch.Size([2, 768, 32, 32])
163
+ torch.Size([2, 768, 32, 32])
164
+ ```
165
+ * TinyCLIP vision tower weights added, thx [Thien Tran](https://github.com/gau-nernst)
166
+
167
+ ### Feb 19, 2024
168
+ * Next-ViT models added. Adapted from https://github.com/bytedance/Next-ViT
169
+ * HGNet and PP-HGNetV2 models added. Adapted from https://github.com/PaddlePaddle/PaddleClas by [SeeFun](https://github.com/seefun)
170
+ * Removed setup.py, moved to pyproject.toml based build supported by PDM
171
+ * Add updated model EMA impl using _for_each for less overhead
172
+ * Support device args in train script for non GPU devices
173
+ * Other misc fixes and small additions
174
+ * Min supported Python version increased to 3.8
175
+ * Release 0.9.16
176
+
177
+ ### Jan 8, 2024
178
+ Datasets & transform refactoring
179
+ * HuggingFace streaming (iterable) dataset support (`--dataset hfids:org/dataset`)
180
+ * Webdataset wrapper tweaks for improved split info fetching, can auto fetch splits from supported HF hub webdataset
181
+ * Tested HF `datasets` and webdataset wrapper streaming from HF hub with recent `timm` ImageNet uploads to https://huggingface.co/timm
182
+ * Make input & target column/field keys consistent across datasets and pass via args
183
+ * Full monochrome support when using e:g: `--input-size 1 224 224` or `--in-chans 1`, sets PIL image conversion appropriately in dataset
184
+ * Improved several alternate crop & resize transforms (ResizeKeepRatio, RandomCropOrPad, etc) for use in PixParse document AI project
185
+ * Add SimCLR style color jitter prob along with grayscale and gaussian blur options to augmentations and args
186
+ * Allow train without validation set (`--val-split ''`) in train script
187
+ * Add `--bce-sum` (sum over class dim) and `--bce-pos-weight` (positive weighting) args for training as they're common BCE loss tweaks I was often hard coding
188
+
189
+ ### Nov 23, 2023
190
+ * Added EfficientViT-Large models, thanks [SeeFun](https://github.com/seefun)
191
+ * Fix Python 3.7 compat, will be dropping support for it soon
192
+ * Other misc fixes
193
+ * Release 0.9.12
194
+
195
+ ### Nov 20, 2023
196
+ * Added significant flexibility for Hugging Face Hub based timm models via `model_args` config entry. `model_args` will be passed as kwargs through to models on creation.
197
+ * See example at https://huggingface.co/gaunernst/vit_base_patch16_1024_128.audiomae_as2m_ft_as20k/blob/main/config.json
198
+ * Usage: https://github.com/huggingface/pytorch-image-models/discussions/2035
199
+ * Updated imagenet eval and test set csv files with latest models
200
+ * `vision_transformer.py` typing and doc cleanup by [Laureηt](https://github.com/Laurent2916)
201
+ * 0.9.11 release
202
+
203
+ ### Nov 3, 2023
204
+ * [DFN (Data Filtering Networks)](https://huggingface.co/papers/2309.17425) and [MetaCLIP](https://huggingface.co/papers/2309.16671) ViT weights added
205
+ * DINOv2 'register' ViT model weights added (https://huggingface.co/papers/2309.16588, https://huggingface.co/papers/2304.07193)
206
+ * Add `quickgelu` ViT variants for OpenAI, DFN, MetaCLIP weights that use it (less efficient)
207
+ * Improved typing added to ResNet, MobileNet-v3 thanks to [Aryan](https://github.com/a-r-r-o-w)
208
+ * ImageNet-12k fine-tuned (from LAION-2B CLIP) `convnext_xxlarge`
209
+ * 0.9.9 release
210
+
211
+ ### Oct 20, 2023
212
+ * [SigLIP](https://huggingface.co/papers/2303.15343) image tower weights supported in `vision_transformer.py`.
213
+ * Great potential for fine-tune and downstream feature use.
214
+ * Experimental 'register' support in vit models as per [Vision Transformers Need Registers](https://huggingface.co/papers/2309.16588)
215
+ * Updated RepViT with new weight release. Thanks [wangao](https://github.com/jameslahm)
216
+ * Add patch resizing support (on pretrained weight load) to Swin models
217
+ * 0.9.8 release pending
218
+
219
+ ### Sep 1, 2023
220
+ * TinyViT added by [SeeFun](https://github.com/seefun)
221
+ * Fix EfficientViT (MIT) to use torch.autocast so it works back to PT 1.10
222
+ * 0.9.7 release
223
+
224
+ ### Aug 28, 2023
225
+ * Add dynamic img size support to models in `vision_transformer.py`, `vision_transformer_hybrid.py`, `deit.py`, and `eva.py` w/o breaking backward compat.
226
+ * Add `dynamic_img_size=True` to args at model creation time to allow changing the grid size (interpolate abs and/or ROPE pos embed each forward pass).
227
+ * Add `dynamic_img_pad=True` to allow image sizes that aren't divisible by patch size (pad bottom right to patch size each forward pass).
228
+ * Enabling either dynamic mode will break FX tracing unless PatchEmbed module added as leaf.
229
+ * Existing method of resizing position embedding by passing different `img_size` (interpolate pretrained embed weights once) on creation still works.
230
+ * Existing method of changing `patch_size` (resize pretrained patch_embed weights once) on creation still works.
231
+ * Example validation cmd `python validate.py --data-dir /imagenet --model vit_base_patch16_224 --amp --amp-dtype bfloat16 --img-size 255 --crop-pct 1.0 --model-kwargs dynamic_img_size=True dyamic_img_pad=True`
232
+
233
+ ### Aug 25, 2023
234
+ * Many new models since last release
235
+ * FastViT - https://arxiv.org/abs/2303.14189
236
+ * MobileOne - https://arxiv.org/abs/2206.04040
237
+ * InceptionNeXt - https://arxiv.org/abs/2303.16900
238
+ * RepGhostNet - https://arxiv.org/abs/2211.06088 (thanks https://github.com/ChengpengChen)
239
+ * GhostNetV2 - https://arxiv.org/abs/2211.12905 (thanks https://github.com/yehuitang)
240
+ * EfficientViT (MSRA) - https://arxiv.org/abs/2305.07027 (thanks https://github.com/seefun)
241
+ * EfficientViT (MIT) - https://arxiv.org/abs/2205.14756 (thanks https://github.com/seefun)
242
+ * Add `--reparam` arg to `benchmark.py`, `onnx_export.py`, and `validate.py` to trigger layer reparameterization / fusion for models with any one of `reparameterize()`, `switch_to_deploy()` or `fuse()`
243
+ * Including FastViT, MobileOne, RepGhostNet, EfficientViT (MSRA), RepViT, RepVGG, and LeViT
244
+ * Preparing 0.9.6 'back to school' release
245
+
246
+ ### Aug 11, 2023
247
+ * Swin, MaxViT, CoAtNet, and BEiT models support resizing of image/window size on creation with adaptation of pretrained weights
248
+ * Example validation cmd to test w/ non-square resize `python validate.py --data-dir /imagenet --model swin_base_patch4_window7_224.ms_in22k_ft_in1k --amp --amp-dtype bfloat16 --input-size 3 256 320 --model-kwargs window_size=8,10 img_size=256,320`
249
+
250
+ ### Aug 3, 2023
251
+ * Add GluonCV weights for HRNet w18_small and w18_small_v2. Converted by [SeeFun](https://github.com/seefun)
252
+ * Fix `selecsls*` model naming regression
253
+ * Patch and position embedding for ViT/EVA works for bfloat16/float16 weights on load (or activations for on-the-fly resize)
254
+ * v0.9.5 release prep
255
+
256
+ ### July 27, 2023
257
+ * Added timm trained `seresnextaa201d_32x8d.sw_in12k_ft_in1k_384` weights (and `.sw_in12k` pretrain) with 87.3% top-1 on ImageNet-1k, best ImageNet ResNet family model I'm aware of.
258
+ * RepViT model and weights (https://arxiv.org/abs/2307.09283) added by [wangao](https://github.com/jameslahm)
259
+ * I-JEPA ViT feature weights (no classifier) added by [SeeFun](https://github.com/seefun)
260
+ * SAM-ViT (segment anything) feature weights (no classifier) added by [SeeFun](https://github.com/seefun)
261
+ * Add support for alternative feat extraction methods and -ve indices to EfficientNet
262
+ * Add NAdamW optimizer
263
+ * Misc fixes
264
+
265
+ ### May 11, 2023
266
+ * `timm` 0.9 released, transition from 0.8.xdev releases
267
+
268
+ ### May 10, 2023
269
+ * Hugging Face Hub downloading is now default, 1132 models on https://huggingface.co/timm, 1163 weights in `timm`
270
+ * DINOv2 vit feature backbone weights added thanks to [Leng Yue](https://github.com/leng-yue)
271
+ * FB MAE vit feature backbone weights added
272
+ * OpenCLIP DataComp-XL L/14 feat backbone weights added
273
+ * MetaFormer (poolformer-v2, caformer, convformer, updated poolformer (v1)) w/ weights added by [Fredo Guan](https://github.com/fffffgggg54)
274
+ * Experimental `get_intermediate_layers` function on vit/deit models for grabbing hidden states (inspired by DINO impl). This is WIP and may change significantly... feedback welcome.
275
+ * Model creation throws error if `pretrained=True` and no weights exist (instead of continuing with random initialization)
276
+ * Fix regression with inception / nasnet TF sourced weights with 1001 classes in original classifiers
277
+ * bitsandbytes (https://github.com/TimDettmers/bitsandbytes) optimizers added to factory, use `bnb` prefix, ie `bnbadam8bit`
278
+ * Misc cleanup and fixes
279
+ * Final testing before switching to a 0.9 and bringing `timm` out of pre-release state
280
+
281
+ ### April 27, 2023
282
+ * 97% of `timm` models uploaded to HF Hub and almost all updated to support multi-weight pretrained configs
283
+ * Minor cleanup and refactoring of another batch of models as multi-weight added. More fused_attn (F.sdpa) and features_only support, and torchscript fixes.
284
+
285
+ ### April 21, 2023
286
+ * Gradient accumulation support added to train script and tested (`--grad-accum-steps`), thanks [Taeksang Kim](https://github.com/voidbag)
287
+ * More weights on HF Hub (cspnet, cait, volo, xcit, tresnet, hardcorenas, densenet, dpn, vovnet, xception_aligned)
288
+ * Added `--head-init-scale` and `--head-init-bias` to train.py to scale classiifer head and set fixed bias for fine-tune
289
+ * Remove all InplaceABN (`inplace_abn`) use, replaced use in tresnet with standard BatchNorm (modified weights accordingly).
290
+
291
+ ### April 12, 2023
292
+ * Add ONNX export script, validate script, helpers that I've had kicking around for along time. Tweak 'same' padding for better export w/ recent ONNX + pytorch.
293
+ * Refactor dropout args for vit and vit-like models, separate drop_rate into `drop_rate` (classifier dropout), `proj_drop_rate` (block mlp / out projections), `pos_drop_rate` (position embedding drop), `attn_drop_rate` (attention dropout). Also add patch dropout (FLIP) to vit and eva models.
294
+ * fused F.scaled_dot_product_attention support to more vit models, add env var (TIMM_FUSED_ATTN) to control, and config interface to enable/disable
295
+ * Add EVA-CLIP backbones w/ image tower weights, all the way up to 4B param 'enormous' model, and 336x336 OpenAI ViT mode that was missed.
296
+
297
+ ### April 5, 2023
298
+ * ALL ResNet models pushed to Hugging Face Hub with multi-weight support
299
+ * All past `timm` trained weights added with recipe based tags to differentiate
300
+ * All ResNet strikes back A1/A2/A3 (seed 0) and R50 example B/C1/C2/D weights available
301
+ * Add torchvision v2 recipe weights to existing torchvision originals
302
+ * See comparison table in https://huggingface.co/timm/seresnextaa101d_32x8d.sw_in12k_ft_in1k_288#model-comparison
303
+ * New ImageNet-12k + ImageNet-1k fine-tunes available for a few anti-aliased ResNet models
304
+ * `resnetaa50d.sw_in12k_ft_in1k` - 81.7 @ 224, 82.6 @ 288
305
+ * `resnetaa101d.sw_in12k_ft_in1k` - 83.5 @ 224, 84.1 @ 288
306
+ * `seresnextaa101d_32x8d.sw_in12k_ft_in1k` - 86.0 @ 224, 86.5 @ 288
307
+ * `seresnextaa101d_32x8d.sw_in12k_ft_in1k_288` - 86.5 @ 288, 86.7 @ 320
308
+
309
+ ### March 31, 2023
310
+ * Add first ConvNext-XXLarge CLIP -> IN-1k fine-tune and IN-12k intermediate fine-tunes for convnext-base/large CLIP models.
311
+
312
+ | model |top1 |top5 |img_size|param_count|gmacs |macts |
313
+ |----------------------------------------------------------------------------------------------------------------------|------|------|--------|-----------|------|------|
314
+ | [convnext_xxlarge.clip_laion2b_soup_ft_in1k](https://huggingface.co/timm/convnext_xxlarge.clip_laion2b_soup_ft_in1k) |88.612|98.704|256 |846.47 |198.09|124.45|
315
+ | convnext_large_mlp.clip_laion2b_soup_ft_in12k_in1k_384 |88.312|98.578|384 |200.13 |101.11|126.74|
316
+ | convnext_large_mlp.clip_laion2b_soup_ft_in12k_in1k_320 |87.968|98.47 |320 |200.13 |70.21 |88.02 |
317
+ | convnext_base.clip_laion2b_augreg_ft_in12k_in1k_384 |87.138|98.212|384 |88.59 |45.21 |84.49 |
318
+ | convnext_base.clip_laion2b_augreg_ft_in12k_in1k |86.344|97.97 |256 |88.59 |20.09 |37.55 |
319
+
320
+ * Add EVA-02 MIM pretrained and fine-tuned weights, push to HF hub and update model cards for all EVA models. First model over 90% top-1 (99% top-5)! Check out the original code & weights at https://github.com/baaivision/EVA for more details on their work blending MIM, CLIP w/ many model, dataset, and train recipe tweaks.
321
+
322
+ | model |top1 |top5 |param_count|img_size|
323
+ |----------------------------------------------------|------|------|-----------|--------|
324
+ | [eva02_large_patch14_448.mim_m38m_ft_in22k_in1k](https://huggingface.co/timm/eva02_large_patch14_448.mim_m38m_ft_in1k) |90.054|99.042|305.08 |448 |
325
+ | eva02_large_patch14_448.mim_in22k_ft_in22k_in1k |89.946|99.01 |305.08 |448 |
326
+ | eva_giant_patch14_560.m30m_ft_in22k_in1k |89.792|98.992|1014.45 |560 |
327
+ | eva02_large_patch14_448.mim_in22k_ft_in1k |89.626|98.954|305.08 |448 |
328
+ | eva02_large_patch14_448.mim_m38m_ft_in1k |89.57 |98.918|305.08 |448 |
329
+ | eva_giant_patch14_336.m30m_ft_in22k_in1k |89.56 |98.956|1013.01 |336 |
330
+ | eva_giant_patch14_336.clip_ft_in1k |89.466|98.82 |1013.01 |336 |
331
+ | eva_large_patch14_336.in22k_ft_in22k_in1k |89.214|98.854|304.53 |336 |
332
+ | eva_giant_patch14_224.clip_ft_in1k |88.882|98.678|1012.56 |224 |
333
+ | eva02_base_patch14_448.mim_in22k_ft_in22k_in1k |88.692|98.722|87.12 |448 |
334
+ | eva_large_patch14_336.in22k_ft_in1k |88.652|98.722|304.53 |336 |
335
+ | eva_large_patch14_196.in22k_ft_in22k_in1k |88.592|98.656|304.14 |196 |
336
+ | eva02_base_patch14_448.mim_in22k_ft_in1k |88.23 |98.564|87.12 |448 |
337
+ | eva_large_patch14_196.in22k_ft_in1k |87.934|98.504|304.14 |196 |
338
+ | eva02_small_patch14_336.mim_in22k_ft_in1k |85.74 |97.614|22.13 |336 |
339
+ | eva02_tiny_patch14_336.mim_in22k_ft_in1k |80.658|95.524|5.76 |336 |
340
+
341
+ * Multi-weight and HF hub for DeiT and MLP-Mixer based models
342
+
343
+ ### March 22, 2023
344
+ * More weights pushed to HF hub along with multi-weight support, including: `regnet.py`, `rexnet.py`, `byobnet.py`, `resnetv2.py`, `swin_transformer.py`, `swin_transformer_v2.py`, `swin_transformer_v2_cr.py`
345
+ * Swin Transformer models support feature extraction (NCHW feat maps for `swinv2_cr_*`, and NHWC for all others) and spatial embedding outputs.
346
+ * FocalNet (from https://github.com/microsoft/FocalNet) models and weights added with significant refactoring, feature extraction, no fixed resolution / sizing constraint
347
+ * RegNet weights increased with HF hub push, SWAG, SEER, and torchvision v2 weights. SEER is pretty poor wrt to performance for model size, but possibly useful.
348
+ * More ImageNet-12k pretrained and 1k fine-tuned `timm` weights:
349
+ * `rexnetr_200.sw_in12k_ft_in1k` - 82.6 @ 224, 83.2 @ 288
350
+ * `rexnetr_300.sw_in12k_ft_in1k` - 84.0 @ 224, 84.5 @ 288
351
+ * `regnety_120.sw_in12k_ft_in1k` - 85.0 @ 224, 85.4 @ 288
352
+ * `regnety_160.lion_in12k_ft_in1k` - 85.6 @ 224, 86.0 @ 288
353
+ * `regnety_160.sw_in12k_ft_in1k` - 85.6 @ 224, 86.0 @ 288 (compare to SWAG PT + 1k FT this is same BUT much lower res, blows SEER FT away)
354
+ * Model name deprecation + remapping functionality added (a milestone for bringing 0.8.x out of pre-release). Mappings being added...
355
+ * Minor bug fixes and improvements.
356
+
357
+ ### Feb 26, 2023
358
+ * Add ConvNeXt-XXLarge CLIP pretrained image tower weights for fine-tune & features (fine-tuning TBD) -- see [model card](https://huggingface.co/laion/CLIP-convnext_xxlarge-laion2B-s34B-b82K-augreg-soup)
359
+ * Update `convnext_xxlarge` default LayerNorm eps to 1e-5 (for CLIP weights, improved stability)
360
+ * 0.8.15dev0
361
+
362
+ ### Feb 20, 2023
363
+ * Add 320x320 `convnext_large_mlp.clip_laion2b_ft_320` and `convnext_lage_mlp.clip_laion2b_ft_soup_320` CLIP image tower weights for features & fine-tune
364
+ * 0.8.13dev0 pypi release for latest changes w/ move to huggingface org
365
+
366
+ ### Feb 16, 2023
367
+ * `safetensor` checkpoint support added
368
+ * Add ideas from 'Scaling Vision Transformers to 22 B. Params' (https://arxiv.org/abs/2302.05442) -- qk norm, RmsNorm, parallel block
369
+ * Add F.scaled_dot_product_attention support (PyTorch 2.0 only) to `vit_*`, `vit_relpos*`, `coatnet` / `maxxvit` (to start)
370
+ * Lion optimizer (w/ multi-tensor option) added (https://arxiv.org/abs/2302.06675)
371
+ * gradient checkpointing works with `features_only=True`
372
+
373
+ ### Feb 7, 2023
374
+ * New inference benchmark numbers added in [results](results/) folder.
375
+ * Add convnext LAION CLIP trained weights and initial set of in1k fine-tunes
376
+ * `convnext_base.clip_laion2b_augreg_ft_in1k` - 86.2% @ 256x256
377
+ * `convnext_base.clip_laiona_augreg_ft_in1k_384` - 86.5% @ 384x384
378
+ * `convnext_large_mlp.clip_laion2b_augreg_ft_in1k` - 87.3% @ 256x256
379
+ * `convnext_large_mlp.clip_laion2b_augreg_ft_in1k_384` - 87.9% @ 384x384
380
+ * Add DaViT models. Supports `features_only=True`. Adapted from https://github.com/dingmyu/davit by [Fredo](https://github.com/fffffgggg54).
381
+ * Use a common NormMlpClassifierHead across MaxViT, ConvNeXt, DaViT
382
+ * Add EfficientFormer-V2 model, update EfficientFormer, and refactor LeViT (closely related architectures). Weights on HF hub.
383
+ * New EfficientFormer-V2 arch, significant refactor from original at (https://github.com/snap-research/EfficientFormer). Supports `features_only=True`.
384
+ * Minor updates to EfficientFormer.
385
+ * Refactor LeViT models to stages, add `features_only=True` support to new `conv` variants, weight remap required.
386
+ * Move ImageNet meta-data (synsets, indices) from `/results` to [`timm/data/_info`](timm/data/_info/).
387
+ * Add ImageNetInfo / DatasetInfo classes to provide labelling for various ImageNet classifier layouts in `timm`
388
+ * Update `inference.py` to use, try: `python inference.py --data-dir /folder/to/images --model convnext_small.in12k --label-type detail --topk 5`
389
+ * Ready for 0.8.10 pypi pre-release (final testing).
390
+
391
+ ### Jan 20, 2023
392
+ * Add two convnext 12k -> 1k fine-tunes at 384x384
393
+ * `convnext_tiny.in12k_ft_in1k_384` - 85.1 @ 384
394
+ * `convnext_small.in12k_ft_in1k_384` - 86.2 @ 384
395
+
396
+ * Push all MaxxViT weights to HF hub, and add new ImageNet-12k -> 1k fine-tunes for `rw` base MaxViT and CoAtNet 1/2 models
397
+
398
+ |model |top1 |top5 |samples / sec |Params (M) |GMAC |Act (M)|
399
+ |------------------------------------------------------------------------------------------------------------------------|----:|----:|--------------:|--------------:|-----:|------:|
400
+ |[maxvit_xlarge_tf_512.in21k_ft_in1k](https://huggingface.co/timm/maxvit_xlarge_tf_512.in21k_ft_in1k) |88.53|98.64| 21.76| 475.77|534.14|1413.22|
401
+ |[maxvit_xlarge_tf_384.in21k_ft_in1k](https://huggingface.co/timm/maxvit_xlarge_tf_384.in21k_ft_in1k) |88.32|98.54| 42.53| 475.32|292.78| 668.76|
402
+ |[maxvit_base_tf_512.in21k_ft_in1k](https://huggingface.co/timm/maxvit_base_tf_512.in21k_ft_in1k) |88.20|98.53| 50.87| 119.88|138.02| 703.99|
403
+ |[maxvit_large_tf_512.in21k_ft_in1k](https://huggingface.co/timm/maxvit_large_tf_512.in21k_ft_in1k) |88.04|98.40| 36.42| 212.33|244.75| 942.15|
404
+ |[maxvit_large_tf_384.in21k_ft_in1k](https://huggingface.co/timm/maxvit_large_tf_384.in21k_ft_in1k) |87.98|98.56| 71.75| 212.03|132.55| 445.84|
405
+ |[maxvit_base_tf_384.in21k_ft_in1k](https://huggingface.co/timm/maxvit_base_tf_384.in21k_ft_in1k) |87.92|98.54| 104.71| 119.65| 73.80| 332.90|
406
+ |[maxvit_rmlp_base_rw_384.sw_in12k_ft_in1k](https://huggingface.co/timm/maxvit_rmlp_base_rw_384.sw_in12k_ft_in1k) |87.81|98.37| 106.55| 116.14| 70.97| 318.95|
407
+ |[maxxvitv2_rmlp_base_rw_384.sw_in12k_ft_in1k](https://huggingface.co/timm/maxxvitv2_rmlp_base_rw_384.sw_in12k_ft_in1k) |87.47|98.37| 149.49| 116.09| 72.98| 213.74|
408
+ |[coatnet_rmlp_2_rw_384.sw_in12k_ft_in1k](https://huggingface.co/timm/coatnet_rmlp_2_rw_384.sw_in12k_ft_in1k) |87.39|98.31| 160.80| 73.88| 47.69| 209.43|
409
+ |[maxvit_rmlp_base_rw_224.sw_in12k_ft_in1k](https://huggingface.co/timm/maxvit_rmlp_base_rw_224.sw_in12k_ft_in1k) |86.89|98.02| 375.86| 116.14| 23.15| 92.64|
410
+ |[maxxvitv2_rmlp_base_rw_224.sw_in12k_ft_in1k](https://huggingface.co/timm/maxxvitv2_rmlp_base_rw_224.sw_in12k_ft_in1k) |86.64|98.02| 501.03| 116.09| 24.20| 62.77|
411
+ |[maxvit_base_tf_512.in1k](https://huggingface.co/timm/maxvit_base_tf_512.in1k) |86.60|97.92| 50.75| 119.88|138.02| 703.99|
412
+ |[coatnet_2_rw_224.sw_in12k_ft_in1k](https://huggingface.co/timm/coatnet_2_rw_224.sw_in12k_ft_in1k) |86.57|97.89| 631.88| 73.87| 15.09| 49.22|
413
+ |[maxvit_large_tf_512.in1k](https://huggingface.co/timm/maxvit_large_tf_512.in1k) |86.52|97.88| 36.04| 212.33|244.75| 942.15|
414
+ |[coatnet_rmlp_2_rw_224.sw_in12k_ft_in1k](https://huggingface.co/timm/coatnet_rmlp_2_rw_224.sw_in12k_ft_in1k) |86.49|97.90| 620.58| 73.88| 15.18| 54.78|
415
+ |[maxvit_base_tf_384.in1k](https://huggingface.co/timm/maxvit_base_tf_384.in1k) |86.29|97.80| 101.09| 119.65| 73.80| 332.90|
416
+ |[maxvit_large_tf_384.in1k](https://huggingface.co/timm/maxvit_large_tf_384.in1k) |86.23|97.69| 70.56| 212.03|132.55| 445.84|
417
+ |[maxvit_small_tf_512.in1k](https://huggingface.co/timm/maxvit_small_tf_512.in1k) |86.10|97.76| 88.63| 69.13| 67.26| 383.77|
418
+ |[maxvit_tiny_tf_512.in1k](https://huggingface.co/timm/maxvit_tiny_tf_512.in1k) |85.67|97.58| 144.25| 31.05| 33.49| 257.59|
419
+ |[maxvit_small_tf_384.in1k](https://huggingface.co/timm/maxvit_small_tf_384.in1k) |85.54|97.46| 188.35| 69.02| 35.87| 183.65|
420
+ |[maxvit_tiny_tf_384.in1k](https://huggingface.co/timm/maxvit_tiny_tf_384.in1k) |85.11|97.38| 293.46| 30.98| 17.53| 123.42|
421
+ |[maxvit_large_tf_224.in1k](https://huggingface.co/timm/maxvit_large_tf_224.in1k) |84.93|96.97| 247.71| 211.79| 43.68| 127.35|
422
+ |[coatnet_rmlp_1_rw2_224.sw_in12k_ft_in1k](https://huggingface.co/timm/coatnet_rmlp_1_rw2_224.sw_in12k_ft_in1k) |84.90|96.96| 1025.45| 41.72| 8.11| 40.13|
423
+ |[maxvit_base_tf_224.in1k](https://huggingface.co/timm/maxvit_base_tf_224.in1k) |84.85|96.99| 358.25| 119.47| 24.04| 95.01|
424
+ |[maxxvit_rmlp_small_rw_256.sw_in1k](https://huggingface.co/timm/maxxvit_rmlp_small_rw_256.sw_in1k) |84.63|97.06| 575.53| 66.01| 14.67| 58.38|
425
+ |[coatnet_rmlp_2_rw_224.sw_in1k](https://huggingface.co/timm/coatnet_rmlp_2_rw_224.sw_in1k) |84.61|96.74| 625.81| 73.88| 15.18| 54.78|
426
+ |[maxvit_rmlp_small_rw_224.sw_in1k](https://huggingface.co/timm/maxvit_rmlp_small_rw_224.sw_in1k) |84.49|96.76| 693.82| 64.90| 10.75| 49.30|
427
+ |[maxvit_small_tf_224.in1k](https://huggingface.co/timm/maxvit_small_tf_224.in1k) |84.43|96.83| 647.96| 68.93| 11.66| 53.17|
428
+ |[maxvit_rmlp_tiny_rw_256.sw_in1k](https://huggingface.co/timm/maxvit_rmlp_tiny_rw_256.sw_in1k) |84.23|96.78| 807.21| 29.15| 6.77| 46.92|
429
+ |[coatnet_1_rw_224.sw_in1k](https://huggingface.co/timm/coatnet_1_rw_224.sw_in1k) |83.62|96.38| 989.59| 41.72| 8.04| 34.60|
430
+ |[maxvit_tiny_rw_224.sw_in1k](https://huggingface.co/timm/maxvit_tiny_rw_224.sw_in1k) |83.50|96.50| 1100.53| 29.06| 5.11| 33.11|
431
+ |[maxvit_tiny_tf_224.in1k](https://huggingface.co/timm/maxvit_tiny_tf_224.in1k) |83.41|96.59| 1004.94| 30.92| 5.60| 35.78|
432
+ |[coatnet_rmlp_1_rw_224.sw_in1k](https://huggingface.co/timm/coatnet_rmlp_1_rw_224.sw_in1k) |83.36|96.45| 1093.03| 41.69| 7.85| 35.47|
433
+ |[maxxvitv2_nano_rw_256.sw_in1k](https://huggingface.co/timm/maxxvitv2_nano_rw_256.sw_in1k) |83.11|96.33| 1276.88| 23.70| 6.26| 23.05|
434
+ |[maxxvit_rmlp_nano_rw_256.sw_in1k](https://huggingface.co/timm/maxxvit_rmlp_nano_rw_256.sw_in1k) |83.03|96.34| 1341.24| 16.78| 4.37| 26.05|
435
+ |[maxvit_rmlp_nano_rw_256.sw_in1k](https://huggingface.co/timm/maxvit_rmlp_nano_rw_256.sw_in1k) |82.96|96.26| 1283.24| 15.50| 4.47| 31.92|
436
+ |[maxvit_nano_rw_256.sw_in1k](https://huggingface.co/timm/maxvit_nano_rw_256.sw_in1k) |82.93|96.23| 1218.17| 15.45| 4.46| 30.28|
437
+ |[coatnet_bn_0_rw_224.sw_in1k](https://huggingface.co/timm/coatnet_bn_0_rw_224.sw_in1k) |82.39|96.19| 1600.14| 27.44| 4.67| 22.04|
438
+ |[coatnet_0_rw_224.sw_in1k](https://huggingface.co/timm/coatnet_0_rw_224.sw_in1k) |82.39|95.84| 1831.21| 27.44| 4.43| 18.73|
439
+ |[coatnet_rmlp_nano_rw_224.sw_in1k](https://huggingface.co/timm/coatnet_rmlp_nano_rw_224.sw_in1k) |82.05|95.87| 2109.09| 15.15| 2.62| 20.34|
440
+ |[coatnext_nano_rw_224.sw_in1k](https://huggingface.co/timm/coatnext_nano_rw_224.sw_in1k) |81.95|95.92| 2525.52| 14.70| 2.47| 12.80|
441
+ |[coatnet_nano_rw_224.sw_in1k](https://huggingface.co/timm/coatnet_nano_rw_224.sw_in1k) |81.70|95.64| 2344.52| 15.14| 2.41| 15.41|
442
+ |[maxvit_rmlp_pico_rw_256.sw_in1k](https://huggingface.co/timm/maxvit_rmlp_pico_rw_256.sw_in1k) |80.53|95.21| 1594.71| 7.52| 1.85| 24.86|
443
+
444
+ ### Jan 11, 2023
445
+ * Update ConvNeXt ImageNet-12k pretrain series w/ two new fine-tuned weights (and pre FT `.in12k` tags)
446
+ * `convnext_nano.in12k_ft_in1k` - 82.3 @ 224, 82.9 @ 288 (previously released)
447
+ * `convnext_tiny.in12k_ft_in1k` - 84.2 @ 224, 84.5 @ 288
448
+ * `convnext_small.in12k_ft_in1k` - 85.2 @ 224, 85.3 @ 288
449
+
450
+ ### Jan 6, 2023
451
+ * Finally got around to adding `--model-kwargs` and `--opt-kwargs` to scripts to pass through rare args directly to model classes from cmd line
452
+ * `train.py --data-dir /imagenet --model resnet50 --amp --model-kwargs output_stride=16 act_layer=silu`
453
+ * `train.py --data-dir /imagenet --model vit_base_patch16_clip_224 --img-size 240 --amp --model-kwargs img_size=240 patch_size=12`
454
+ * Cleanup some popular models to better support arg passthrough / merge with model configs, more to go.
455
+
456
+ ### Jan 5, 2023
457
+ * ConvNeXt-V2 models and weights added to existing `convnext.py`
458
+ * Paper: [ConvNeXt V2: Co-designing and Scaling ConvNets with Masked Autoencoders](http://arxiv.org/abs/2301.00808)
459
+ * Reference impl: https://github.com/facebookresearch/ConvNeXt-V2 (NOTE: weights currently CC-BY-NC)
460
+ @dataclass
461
+ ### Dec 23, 2022 🎄☃
462
+ * Add FlexiViT models and weights from https://github.com/google-research/big_vision (check out paper at https://arxiv.org/abs/2212.08013)
463
+ * NOTE currently resizing is static on model creation, on-the-fly dynamic / train patch size sampling is a WIP
464
+ * Many more models updated to multi-weight and downloadable via HF hub now (convnext, efficientnet, mobilenet, vision_transformer*, beit)
465
+ * More model pretrained tag and adjustments, some model names changed (working on deprecation translations, consider main branch DEV branch right now, use 0.6.x for stable use)
466
+ * More ImageNet-12k (subset of 22k) pretrain models popping up:
467
+ * `efficientnet_b5.in12k_ft_in1k` - 85.9 @ 448x448
468
+ * `vit_medium_patch16_gap_384.in12k_ft_in1k` - 85.5 @ 384x384
469
+ * `vit_medium_patch16_gap_256.in12k_ft_in1k` - 84.5 @ 256x256
470
+ * `convnext_nano.in12k_ft_in1k` - 82.9 @ 288x288
471
+
472
+ ### Dec 8, 2022
473
+ * Add 'EVA l' to `vision_transformer.py`, MAE style ViT-L/14 MIM pretrain w/ EVA-CLIP targets, FT on ImageNet-1k (w/ ImageNet-22k intermediate for some)
474
+ * original source: https://github.com/baaivision/EVA
475
+
476
+ | model | top1 | param_count | gmac | macts | hub |
477
+ |:------------------------------------------|-----:|------------:|------:|------:|:----------------------------------------|
478
+ | eva_large_patch14_336.in22k_ft_in22k_in1k | 89.2 | 304.5 | 191.1 | 270.2 | [link](https://huggingface.co/BAAI/EVA) |
479
+ | eva_large_patch14_336.in22k_ft_in1k | 88.7 | 304.5 | 191.1 | 270.2 | [link](https://huggingface.co/BAAI/EVA) |
480
+ | eva_large_patch14_196.in22k_ft_in22k_in1k | 88.6 | 304.1 | 61.6 | 63.5 | [link](https://huggingface.co/BAAI/EVA) |
481
+ | eva_large_patch14_196.in22k_ft_in1k | 87.9 | 304.1 | 61.6 | 63.5 | [link](https://huggingface.co/BAAI/EVA) |
482
+
483
+ ### Dec 6, 2022
484
+ * Add 'EVA g', BEiT style ViT-g/14 model weights w/ both MIM pretrain and CLIP pretrain to `beit.py`.
485
+ * original source: https://github.com/baaivision/EVA
486
+ * paper: https://arxiv.org/abs/2211.07636
487
+
488
+ | model | top1 | param_count | gmac | macts | hub |
489
+ |:-----------------------------------------|-------:|--------------:|-------:|--------:|:----------------------------------------|
490
+ | eva_giant_patch14_560.m30m_ft_in22k_in1k | 89.8 | 1014.4 | 1906.8 | 2577.2 | [link](https://huggingface.co/BAAI/EVA) |
491
+ | eva_giant_patch14_336.m30m_ft_in22k_in1k | 89.6 | 1013 | 620.6 | 550.7 | [link](https://huggingface.co/BAAI/EVA) |
492
+ | eva_giant_patch14_336.clip_ft_in1k | 89.4 | 1013 | 620.6 | 550.7 | [link](https://huggingface.co/BAAI/EVA) |
493
+ | eva_giant_patch14_224.clip_ft_in1k | 89.1 | 1012.6 | 267.2 | 192.6 | [link](https://huggingface.co/BAAI/EVA) |
494
+
495
+ ### Dec 5, 2022
496
+
497
+ * Pre-release (`0.8.0dev0`) of multi-weight support (`model_arch.pretrained_tag`). Install with `pip install --pre timm`
498
+ * vision_transformer, maxvit, convnext are the first three model impl w/ support
499
+ * model names are changing with this (previous _21k, etc. fn will merge), still sorting out deprecation handling
500
+ * bugs are likely, but I need feedback so please try it out
501
+ * if stability is needed, please use 0.6.x pypi releases or clone from [0.6.x branch](https://github.com/rwightman/pytorch-image-models/tree/0.6.x)
502
+ * Support for PyTorch 2.0 compile is added in train/validate/inference/benchmark, use `--torchcompile` argument
503
+ * Inference script allows more control over output, select k for top-class index + prob json, csv or parquet output
504
+ * Add a full set of fine-tuned CLIP image tower weights from both LAION-2B and original OpenAI CLIP models
505
+
506
+ | model | top1 | param_count | gmac | macts | hub |
507
+ |:-------------------------------------------------|-------:|--------------:|-------:|--------:|:-------------------------------------------------------------------------------------|
508
+ | vit_huge_patch14_clip_336.laion2b_ft_in12k_in1k | 88.6 | 632.5 | 391 | 407.5 | [link](https://huggingface.co/timm/vit_huge_patch14_clip_336.laion2b_ft_in12k_in1k) |
509
+ | vit_large_patch14_clip_336.openai_ft_in12k_in1k | 88.3 | 304.5 | 191.1 | 270.2 | [link](https://huggingface.co/timm/vit_large_patch14_clip_336.openai_ft_in12k_in1k) |
510
+ | vit_huge_patch14_clip_224.laion2b_ft_in12k_in1k | 88.2 | 632 | 167.4 | 139.4 | [link](https://huggingface.co/timm/vit_huge_patch14_clip_224.laion2b_ft_in12k_in1k) |
511
+ | vit_large_patch14_clip_336.laion2b_ft_in12k_in1k | 88.2 | 304.5 | 191.1 | 270.2 | [link](https://huggingface.co/timm/vit_large_patch14_clip_336.laion2b_ft_in12k_in1k) |
512
+ | vit_large_patch14_clip_224.openai_ft_in12k_in1k | 88.2 | 304.2 | 81.1 | 88.8 | [link](https://huggingface.co/timm/vit_large_patch14_clip_224.openai_ft_in12k_in1k) |
513
+ | vit_large_patch14_clip_224.laion2b_ft_in12k_in1k | 87.9 | 304.2 | 81.1 | 88.8 | [link](https://huggingface.co/timm/vit_large_patch14_clip_224.laion2b_ft_in12k_in1k) |
514
+ | vit_large_patch14_clip_224.openai_ft_in1k | 87.9 | 304.2 | 81.1 | 88.8 | [link](https://huggingface.co/timm/vit_large_patch14_clip_224.openai_ft_in1k) |
515
+ | vit_large_patch14_clip_336.laion2b_ft_in1k | 87.9 | 304.5 | 191.1 | 270.2 | [link](https://huggingface.co/timm/vit_large_patch14_clip_336.laion2b_ft_in1k) |
516
+ | vit_huge_patch14_clip_224.laion2b_ft_in1k | 87.6 | 632 | 167.4 | 139.4 | [link](https://huggingface.co/timm/vit_huge_patch14_clip_224.laion2b_ft_in1k) |
517
+ | vit_large_patch14_clip_224.laion2b_ft_in1k | 87.3 | 304.2 | 81.1 | 88.8 | [link](https://huggingface.co/timm/vit_large_patch14_clip_224.laion2b_ft_in1k) |
518
+ | vit_base_patch16_clip_384.laion2b_ft_in12k_in1k | 87.2 | 86.9 | 55.5 | 101.6 | [link](https://huggingface.co/timm/vit_base_patch16_clip_384.laion2b_ft_in12k_in1k) |
519
+ | vit_base_patch16_clip_384.openai_ft_in12k_in1k | 87 | 86.9 | 55.5 | 101.6 | [link](https://huggingface.co/timm/vit_base_patch16_clip_384.openai_ft_in12k_in1k) |
520
+ | vit_base_patch16_clip_384.laion2b_ft_in1k | 86.6 | 86.9 | 55.5 | 101.6 | [link](https://huggingface.co/timm/vit_base_patch16_clip_384.laion2b_ft_in1k) |
521
+ | vit_base_patch16_clip_384.openai_ft_in1k | 86.2 | 86.9 | 55.5 | 101.6 | [link](https://huggingface.co/timm/vit_base_patch16_clip_384.openai_ft_in1k) |
522
+ | vit_base_patch16_clip_224.laion2b_ft_in12k_in1k | 86.2 | 86.6 | 17.6 | 23.9 | [link](https://huggingface.co/timm/vit_base_patch16_clip_224.laion2b_ft_in12k_in1k) |
523
+ | vit_base_patch16_clip_224.openai_ft_in12k_in1k | 85.9 | 86.6 | 17.6 | 23.9 | [link](https://huggingface.co/timm/vit_base_patch16_clip_224.openai_ft_in12k_in1k) |
524
+ | vit_base_patch32_clip_448.laion2b_ft_in12k_in1k | 85.8 | 88.3 | 17.9 | 23.9 | [link](https://huggingface.co/timm/vit_base_patch32_clip_448.laion2b_ft_in12k_in1k) |
525
+ | vit_base_patch16_clip_224.laion2b_ft_in1k | 85.5 | 86.6 | 17.6 | 23.9 | [link](https://huggingface.co/timm/vit_base_patch16_clip_224.laion2b_ft_in1k) |
526
+ | vit_base_patch32_clip_384.laion2b_ft_in12k_in1k | 85.4 | 88.3 | 13.1 | 16.5 | [link](https://huggingface.co/timm/vit_base_patch32_clip_384.laion2b_ft_in12k_in1k) |
527
+ | vit_base_patch16_clip_224.openai_ft_in1k | 85.3 | 86.6 | 17.6 | 23.9 | [link](https://huggingface.co/timm/vit_base_patch16_clip_224.openai_ft_in1k) |
528
+ | vit_base_patch32_clip_384.openai_ft_in12k_in1k | 85.2 | 88.3 | 13.1 | 16.5 | [link](https://huggingface.co/timm/vit_base_patch32_clip_384.openai_ft_in12k_in1k) |
529
+ | vit_base_patch32_clip_224.laion2b_ft_in12k_in1k | 83.3 | 88.2 | 4.4 | 5 | [link](https://huggingface.co/timm/vit_base_patch32_clip_224.laion2b_ft_in12k_in1k) |
530
+ | vit_base_patch32_clip_224.laion2b_ft_in1k | 82.6 | 88.2 | 4.4 | 5 | [link](https://huggingface.co/timm/vit_base_patch32_clip_224.laion2b_ft_in1k) |
531
+ | vit_base_patch32_clip_224.openai_ft_in1k | 81.9 | 88.2 | 4.4 | 5 | [link](https://huggingface.co/timm/vit_base_patch32_clip_224.openai_ft_in1k) |
532
+
533
+ * Port of MaxViT Tensorflow Weights from official impl at https://github.com/google-research/maxvit
534
+ * There was larger than expected drops for the upscaled 384/512 in21k fine-tune weights, possible detail missing, but the 21k FT did seem sensitive to small preprocessing
535
+
536
+ | model | top1 | param_count | gmac | macts | hub |
537
+ |:-----------------------------------|-------:|--------------:|-------:|--------:|:-----------------------------------------------------------------------|
538
+ | maxvit_xlarge_tf_512.in21k_ft_in1k | 88.5 | 475.8 | 534.1 | 1413.2 | [link](https://huggingface.co/timm/maxvit_xlarge_tf_512.in21k_ft_in1k) |
539
+ | maxvit_xlarge_tf_384.in21k_ft_in1k | 88.3 | 475.3 | 292.8 | 668.8 | [link](https://huggingface.co/timm/maxvit_xlarge_tf_384.in21k_ft_in1k) |
540
+ | maxvit_base_tf_512.in21k_ft_in1k | 88.2 | 119.9 | 138 | 704 | [link](https://huggingface.co/timm/maxvit_base_tf_512.in21k_ft_in1k) |
541
+ | maxvit_large_tf_512.in21k_ft_in1k | 88 | 212.3 | 244.8 | 942.2 | [link](https://huggingface.co/timm/maxvit_large_tf_512.in21k_ft_in1k) |
542
+ | maxvit_large_tf_384.in21k_ft_in1k | 88 | 212 | 132.6 | 445.8 | [link](https://huggingface.co/timm/maxvit_large_tf_384.in21k_ft_in1k) |
543
+ | maxvit_base_tf_384.in21k_ft_in1k | 87.9 | 119.6 | 73.8 | 332.9 | [link](https://huggingface.co/timm/maxvit_base_tf_384.in21k_ft_in1k) |
544
+ | maxvit_base_tf_512.in1k | 86.6 | 119.9 | 138 | 704 | [link](https://huggingface.co/timm/maxvit_base_tf_512.in1k) |
545
+ | maxvit_large_tf_512.in1k | 86.5 | 212.3 | 244.8 | 942.2 | [link](https://huggingface.co/timm/maxvit_large_tf_512.in1k) |
546
+ | maxvit_base_tf_384.in1k | 86.3 | 119.6 | 73.8 | 332.9 | [link](https://huggingface.co/timm/maxvit_base_tf_384.in1k) |
547
+ | maxvit_large_tf_384.in1k | 86.2 | 212 | 132.6 | 445.8 | [link](https://huggingface.co/timm/maxvit_large_tf_384.in1k) |
548
+ | maxvit_small_tf_512.in1k | 86.1 | 69.1 | 67.3 | 383.8 | [link](https://huggingface.co/timm/maxvit_small_tf_512.in1k) |
549
+ | maxvit_tiny_tf_512.in1k | 85.7 | 31 | 33.5 | 257.6 | [link](https://huggingface.co/timm/maxvit_tiny_tf_512.in1k) |
550
+ | maxvit_small_tf_384.in1k | 85.5 | 69 | 35.9 | 183.6 | [link](https://huggingface.co/timm/maxvit_small_tf_384.in1k) |
551
+ | maxvit_tiny_tf_384.in1k | 85.1 | 31 | 17.5 | 123.4 | [link](https://huggingface.co/timm/maxvit_tiny_tf_384.in1k) |
552
+ | maxvit_large_tf_224.in1k | 84.9 | 211.8 | 43.7 | 127.4 | [link](https://huggingface.co/timm/maxvit_large_tf_224.in1k) |
553
+ | maxvit_base_tf_224.in1k | 84.9 | 119.5 | 24 | 95 | [link](https://huggingface.co/timm/maxvit_base_tf_224.in1k) |
554
+ | maxvit_small_tf_224.in1k | 84.4 | 68.9 | 11.7 | 53.2 | [link](https://huggingface.co/timm/maxvit_small_tf_224.in1k) |
555
+ | maxvit_tiny_tf_224.in1k | 83.4 | 30.9 | 5.6 | 35.8 | [link](https://huggingface.co/timm/maxvit_tiny_tf_224.in1k) |
556
+
557
+ ### Oct 15, 2022
558
+ * Train and validation script enhancements
559
+ * Non-GPU (ie CPU) device support
560
+ * SLURM compatibility for train script
561
+ * HF datasets support (via ReaderHfds)
562
+ * TFDS/WDS dataloading improvements (sample padding/wrap for distributed use fixed wrt sample count estimate)
563
+ * in_chans !=3 support for scripts / loader
564
+ * Adan optimizer
565
+ * Can enable per-step LR scheduling via args
566
+ * Dataset 'parsers' renamed to 'readers', more descriptive of purpose
567
+ * AMP args changed, APEX via `--amp-impl apex`, bfloat16 supportedf via `--amp-dtype bfloat16`
568
+ * main branch switched to 0.7.x version, 0.6x forked for stable release of weight only adds
569
+ * master -> main branch rename
570
+
571
+ ### Oct 10, 2022
572
+ * More weights in `maxxvit` series, incl first ConvNeXt block based `coatnext` and `maxxvit` experiments:
573
+ * `coatnext_nano_rw_224` - 82.0 @ 224 (G) -- (uses ConvNeXt conv block, no BatchNorm)
574
+ * `maxxvit_rmlp_nano_rw_256` - 83.0 @ 256, 83.7 @ 320 (G) (uses ConvNeXt conv block, no BN)
575
+ * `maxvit_rmlp_small_rw_224` - 84.5 @ 224, 85.1 @ 320 (G)
576
+ * `maxxvit_rmlp_small_rw_256` - 84.6 @ 256, 84.9 @ 288 (G) -- could be trained better, hparams need tuning (uses ConvNeXt block, no BN)
577
+ * `coatnet_rmlp_2_rw_224` - 84.6 @ 224, 85 @ 320 (T)
578
+ * NOTE: official MaxVit weights (in1k) have been released at https://github.com/google-research/maxvit -- some extra work is needed to port and adapt since my impl was created independently of theirs and has a few small differences + the whole TF same padding fun.
579
+
580
+ ### Sept 23, 2022
581
+ * LAION-2B CLIP image towers supported as pretrained backbones for fine-tune or features (no classifier)
582
+ * vit_base_patch32_224_clip_laion2b
583
+ * vit_large_patch14_224_clip_laion2b
584
+ * vit_huge_patch14_224_clip_laion2b
585
+ * vit_giant_patch14_224_clip_laion2b
586
+
587
+ ### Sept 7, 2022
588
+ * Hugging Face [`timm` docs](https://huggingface.co/docs/hub/timm) home now exists, look for more here in the future
589
+ * Add BEiT-v2 weights for base and large 224x224 models from https://github.com/microsoft/unilm/tree/master/beit2
590
+ * Add more weights in `maxxvit` series incl a `pico` (7.5M params, 1.9 GMACs), two `tiny` variants:
591
+ * `maxvit_rmlp_pico_rw_256` - 80.5 @ 256, 81.3 @ 320 (T)
592
+ * `maxvit_tiny_rw_224` - 83.5 @ 224 (G)
593
+ * `maxvit_rmlp_tiny_rw_256` - 84.2 @ 256, 84.8 @ 320 (T)
594
+
595
+ ### Aug 29, 2022
596
+ * MaxVit window size scales with img_size by default. Add new RelPosMlp MaxViT weight that leverages this:
597
+ * `maxvit_rmlp_nano_rw_256` - 83.0 @ 256, 83.6 @ 320 (T)
598
+
599
+ ### Aug 26, 2022
600
+ * CoAtNet (https://arxiv.org/abs/2106.04803) and MaxVit (https://arxiv.org/abs/2204.01697) `timm` original models
601
+ * both found in [`maxxvit.py`](https://github.com/rwightman/pytorch-image-models/blob/master/timm/models/maxxvit.py) model def, contains numerous experiments outside scope of original papers
602
+ * an unfinished Tensorflow version from MaxVit authors can be found https://github.com/google-research/maxvit
603
+ * Initial CoAtNet and MaxVit timm pretrained weights (working on more):
604
+ * `coatnet_nano_rw_224` - 81.7 @ 224 (T)
605
+ * `coatnet_rmlp_nano_rw_224` - 82.0 @ 224, 82.8 @ 320 (T)
606
+ * `coatnet_0_rw_224` - 82.4 (T) -- NOTE timm '0' coatnets have 2 more 3rd stage blocks
607
+ * `coatnet_bn_0_rw_224` - 82.4 (T)
608
+ * `maxvit_nano_rw_256` - 82.9 @ 256 (T)
609
+ * `coatnet_rmlp_1_rw_224` - 83.4 @ 224, 84 @ 320 (T)
610
+ * `coatnet_1_rw_224` - 83.6 @ 224 (G)
611
+ * (T) = TPU trained with `bits_and_tpu` branch training code, (G) = GPU trained
612
+ * GCVit (weights adapted from https://github.com/NVlabs/GCVit, code 100% `timm` re-write for license purposes)
613
+ * MViT-V2 (multi-scale vit, adapted from https://github.com/facebookresearch/mvit)
614
+ * EfficientFormer (adapted from https://github.com/snap-research/EfficientFormer)
615
+ * PyramidVisionTransformer-V2 (adapted from https://github.com/whai362/PVT)
616
+ * 'Fast Norm' support for LayerNorm and GroupNorm that avoids float32 upcast w/ AMP (uses APEX LN if available for further boost)
617
+
618
+ ### Aug 15, 2022
619
+ * ConvNeXt atto weights added
620
+ * `convnext_atto` - 75.7 @ 224, 77.0 @ 288
621
+ * `convnext_atto_ols` - 75.9 @ 224, 77.2 @ 288
622
+
623
+ ### Aug 5, 2022
624
+ * More custom ConvNeXt smaller model defs with weights
625
+ * `convnext_femto` - 77.5 @ 224, 78.7 @ 288
626
+ * `convnext_femto_ols` - 77.9 @ 224, 78.9 @ 288
627
+ * `convnext_pico` - 79.5 @ 224, 80.4 @ 288
628
+ * `convnext_pico_ols` - 79.5 @ 224, 80.5 @ 288
629
+ * `convnext_nano_ols` - 80.9 @ 224, 81.6 @ 288
630
+ * Updated EdgeNeXt to improve ONNX export, add new base variant and weights from original (https://github.com/mmaaz60/EdgeNeXt)
631
+
632
+ ### July 28, 2022
633
+ * Add freshly minted DeiT-III Medium (width=512, depth=12, num_heads=8) model weights. Thanks [Hugo Touvron](https://github.com/TouvronHugo)!
634
+
635
+ ### July 27, 2022
636
+ * All runtime benchmark and validation result csv files are finally up-to-date!
637
+ * A few more weights & model defs added:
638
+ * `darknetaa53` - 79.8 @ 256, 80.5 @ 288
639
+ * `convnext_nano` - 80.8 @ 224, 81.5 @ 288
640
+ * `cs3sedarknet_l` - 81.2 @ 256, 81.8 @ 288
641
+ * `cs3darknet_x` - 81.8 @ 256, 82.2 @ 288
642
+ * `cs3sedarknet_x` - 82.2 @ 256, 82.7 @ 288
643
+ * `cs3edgenet_x` - 82.2 @ 256, 82.7 @ 288
644
+ * `cs3se_edgenet_x` - 82.8 @ 256, 83.5 @ 320
645
+ * `cs3*` weights above all trained on TPU w/ `bits_and_tpu` branch. Thanks to TRC program!
646
+ * Add output_stride=8 and 16 support to ConvNeXt (dilation)
647
+ * deit3 models not being able to resize pos_emb fixed
648
+ * Version 0.6.7 PyPi release (/w above bug fixes and new weighs since 0.6.5)
649
+
650
+ ### July 8, 2022
651
+ More models, more fixes
652
+ * Official research models (w/ weights) added:
653
+ * EdgeNeXt from (https://github.com/mmaaz60/EdgeNeXt)
654
+ * MobileViT-V2 from (https://github.com/apple/ml-cvnets)
655
+ * DeiT III (Revenge of the ViT) from (https://github.com/facebookresearch/deit)
656
+ * My own models:
657
+ * Small `ResNet` defs added by request with 1 block repeats for both basic and bottleneck (resnet10 and resnet14)
658
+ * `CspNet` refactored with dataclass config, simplified CrossStage3 (`cs3`) option. These are closer to YOLO-v5+ backbone defs.
659
+ * More relative position vit fiddling. Two `srelpos` (shared relative position) models trained, and a medium w/ class token.
660
+ * Add an alternate downsample mode to EdgeNeXt and train a `small` model. Better than original small, but not their new USI trained weights.
661
+ * My own model weight results (all ImageNet-1k training)
662
+ * `resnet10t` - 66.5 @ 176, 68.3 @ 224
663
+ * `resnet14t` - 71.3 @ 176, 72.3 @ 224
664
+ * `resnetaa50` - 80.6 @ 224 , 81.6 @ 288
665
+ * `darknet53` - 80.0 @ 256, 80.5 @ 288
666
+ * `cs3darknet_m` - 77.0 @ 256, 77.6 @ 288
667
+ * `cs3darknet_focus_m` - 76.7 @ 256, 77.3 @ 288
668
+ * `cs3darknet_l` - 80.4 @ 256, 80.9 @ 288
669
+ * `cs3darknet_focus_l` - 80.3 @ 256, 80.9 @ 288
670
+ * `vit_srelpos_small_patch16_224` - 81.1 @ 224, 82.1 @ 320
671
+ * `vit_srelpos_medium_patch16_224` - 82.3 @ 224, 83.1 @ 320
672
+ * `vit_relpos_small_patch16_cls_224` - 82.6 @ 224, 83.6 @ 320
673
+ * `edgnext_small_rw` - 79.6 @ 224, 80.4 @ 320
674
+ * `cs3`, `darknet`, and `vit_*relpos` weights above all trained on TPU thanks to TRC program! Rest trained on overheating GPUs.
675
+ * Hugging Face Hub support fixes verified, demo notebook TBA
676
+ * Pretrained weights / configs can be loaded externally (ie from local disk) w/ support for head adaptation.
677
+ * Add support to change image extensions scanned by `timm` datasets/readers. See (https://github.com/rwightman/pytorch-image-models/pull/1274#issuecomment-1178303103)
678
+ * Default ConvNeXt LayerNorm impl to use `F.layer_norm(x.permute(0, 2, 3, 1), ...).permute(0, 3, 1, 2)` via `LayerNorm2d` in all cases.
679
+ * a bit slower than previous custom impl on some hardware (ie Ampere w/ CL), but overall fewer regressions across wider HW / PyTorch version ranges.
680
+ * previous impl exists as `LayerNormExp2d` in `models/layers/norm.py`
681
+ * Numerous bug fixes
682
+ * Currently testing for imminent PyPi 0.6.x release
683
+ * LeViT pretraining of larger models still a WIP, they don't train well / easily without distillation. Time to add distill support (finally)?
684
+ * ImageNet-22k weight training + finetune ongoing, work on multi-weight support (slowly) chugging along (there are a LOT of weights, sigh) ...
685
+
686
+ ### May 13, 2022
687
+ * Official Swin-V2 models and weights added from (https://github.com/microsoft/Swin-Transformer). Cleaned up to support torchscript.
688
+ * Some refactoring for existing `timm` Swin-V2-CR impl, will likely do a bit more to bring parts closer to official and decide whether to merge some aspects.
689
+ * More Vision Transformer relative position / residual post-norm experiments (all trained on TPU thanks to TRC program)
690
+ * `vit_relpos_small_patch16_224` - 81.5 @ 224, 82.5 @ 320 -- rel pos, layer scale, no class token, avg pool
691
+ * `vit_relpos_medium_patch16_rpn_224` - 82.3 @ 224, 83.1 @ 320 -- rel pos + res-post-norm, no class token, avg pool
692
+ * `vit_relpos_medium_patch16_224` - 82.5 @ 224, 83.3 @ 320 -- rel pos, layer scale, no class token, avg pool
693
+ * `vit_relpos_base_patch16_gapcls_224` - 82.8 @ 224, 83.9 @ 320 -- rel pos, layer scale, class token, avg pool (by mistake)
694
+ * Bring 512 dim, 8-head 'medium' ViT model variant back to life (after using in a pre DeiT 'small' model for first ViT impl back in 2020)
695
+ * Add ViT relative position support for switching btw existing impl and some additions in official Swin-V2 impl for future trials
696
+ * Sequencer2D impl (https://arxiv.org/abs/2205.01972), added via PR from author (https://github.com/okojoalg)
697
+
698
+ ### May 2, 2022
699
+ * Vision Transformer experiments adding Relative Position (Swin-V2 log-coord) (`vision_transformer_relpos.py`) and Residual Post-Norm branches (from Swin-V2) (`vision_transformer*.py`)
700
+ * `vit_relpos_base_patch32_plus_rpn_256` - 79.5 @ 256, 80.6 @ 320 -- rel pos + extended width + res-post-norm, no class token, avg pool
701
+ * `vit_relpos_base_patch16_224` - 82.5 @ 224, 83.6 @ 320 -- rel pos, layer scale, no class token, avg pool
702
+ * `vit_base_patch16_rpn_224` - 82.3 @ 224 -- rel pos + res-post-norm, no class token, avg pool
703
+ * Vision Transformer refactor to remove representation layer that was only used in initial vit and rarely used since with newer pretrain (ie `How to Train Your ViT`)
704
+ * `vit_*` models support removal of class token, use of global average pool, use of fc_norm (ala beit, mae).
705
+
706
+ ### April 22, 2022
707
+ * `timm` models are now officially supported in [fast.ai](https://www.fast.ai/)! Just in time for the new Practical Deep Learning course. `timmdocs` documentation link updated to [timm.fast.ai](http://timm.fast.ai/).
708
+ * Two more model weights added in the TPU trained [series](https://github.com/rwightman/pytorch-image-models/releases/tag/v0.1-tpu-weights). Some In22k pretrain still in progress.
709
+ * `seresnext101d_32x8d` - 83.69 @ 224, 84.35 @ 288
710
+ * `seresnextaa101d_32x8d` (anti-aliased w/ AvgPool2d) - 83.85 @ 224, 84.57 @ 288
711
+
712
+ ### March 23, 2022
713
+ * Add `ParallelBlock` and `LayerScale` option to base vit models to support model configs in [Three things everyone should know about ViT](https://arxiv.org/abs/2203.09795)
714
+ * `convnext_tiny_hnf` (head norm first) weights trained with (close to) A2 recipe, 82.2% top-1, could do better with more epochs.
715
+
716
+ ### March 21, 2022
717
+ * Merge `norm_norm_norm`. **IMPORTANT** this update for a coming 0.6.x release will likely de-stabilize the master branch for a while. Branch [`0.5.x`](https://github.com/rwightman/pytorch-image-models/tree/0.5.x) or a previous 0.5.x release can be used if stability is required.
718
+ * Significant weights update (all TPU trained) as described in this [release](https://github.com/rwightman/pytorch-image-models/releases/tag/v0.1-tpu-weights)
719
+ * `regnety_040` - 82.3 @ 224, 82.96 @ 288
720
+ * `regnety_064` - 83.0 @ 224, 83.65 @ 288
721
+ * `regnety_080` - 83.17 @ 224, 83.86 @ 288
722
+ * `regnetv_040` - 82.44 @ 224, 83.18 @ 288 (timm pre-act)
723
+ * `regnetv_064` - 83.1 @ 224, 83.71 @ 288 (timm pre-act)
724
+ * `regnetz_040` - 83.67 @ 256, 84.25 @ 320
725
+ * `regnetz_040h` - 83.77 @ 256, 84.5 @ 320 (w/ extra fc in head)
726
+ * `resnetv2_50d_gn` - 80.8 @ 224, 81.96 @ 288 (pre-act GroupNorm)
727
+ * `resnetv2_50d_evos` 80.77 @ 224, 82.04 @ 288 (pre-act EvoNormS)
728
+ * `regnetz_c16_evos` - 81.9 @ 256, 82.64 @ 320 (EvoNormS)
729
+ * `regnetz_d8_evos` - 83.42 @ 256, 84.04 @ 320 (EvoNormS)
730
+ * `xception41p` - 82 @ 299 (timm pre-act)
731
+ * `xception65` - 83.17 @ 299
732
+ * `xception65p` - 83.14 @ 299 (timm pre-act)
733
+ * `resnext101_64x4d` - 82.46 @ 224, 83.16 @ 288
734
+ * `seresnext101_32x8d` - 83.57 @ 224, 84.270 @ 288
735
+ * `resnetrs200` - 83.85 @ 256, 84.44 @ 320
736
+ * HuggingFace hub support fixed w/ initial groundwork for allowing alternative 'config sources' for pretrained model definitions and weights (generic local file / remote url support soon)
737
+ * SwinTransformer-V2 implementation added. Submitted by [Christoph Reich](https://github.com/ChristophReich1996). Training experiments and model changes by myself are ongoing so expect compat breaks.
738
+ * Swin-S3 (AutoFormerV2) models / weights added from https://github.com/microsoft/Cream/tree/main/AutoFormerV2
739
+ * MobileViT models w/ weights adapted from https://github.com/apple/ml-cvnets
740
+ * PoolFormer models w/ weights adapted from https://github.com/sail-sg/poolformer
741
+ * VOLO models w/ weights adapted from https://github.com/sail-sg/volo
742
+ * Significant work experimenting with non-BatchNorm norm layers such as EvoNorm, FilterResponseNorm, GroupNorm, etc
743
+ * Enhance support for alternate norm + act ('NormAct') layers added to a number of models, esp EfficientNet/MobileNetV3, RegNet, and aligned Xception
744
+ * Grouped conv support added to EfficientNet family
745
+ * Add 'group matching' API to all models to allow grouping model parameters for application of 'layer-wise' LR decay, lr scale added to LR scheduler
746
+ * Gradient checkpointing support added to many models
747
+ * `forward_head(x, pre_logits=False)` fn added to all models to allow separate calls of `forward_features` + `forward_head`
748
+ * All vision transformer and vision MLP models update to return non-pooled / non-token selected features from `foward_features`, for consistency with CNN models, token selection or pooling now applied in `forward_head`
749
+
750
+ ### Feb 2, 2022
751
+ * [Chris Hughes](https://github.com/Chris-hughes10) posted an exhaustive run through of `timm` on his blog yesterday. Well worth a read. [Getting Started with PyTorch Image Models (timm): A Practitioner’s Guide](https://towardsdatascience.com/getting-started-with-pytorch-image-models-timm-a-practitioners-guide-4e77b4bf9055)
752
+ * I'm currently prepping to merge the `norm_norm_norm` branch back to master (ver 0.6.x) in next week or so.
753
+ * The changes are more extensive than usual and may destabilize and break some model API use (aiming for full backwards compat). So, beware `pip install git+https://github.com/rwightman/pytorch-image-models` installs!
754
+ * `0.5.x` releases and a `0.5.x` branch will remain stable with a cherry pick or two until dust clears. Recommend sticking to pypi install for a bit if you want stable.
755
+
756
+ ### Jan 14, 2022
757
+ * Version 0.5.4 w/ release to be pushed to pypi. It's been a while since last pypi update and riskier changes will be merged to main branch soon....
758
+ * Add ConvNeXT models /w weights from official impl (https://github.com/facebookresearch/ConvNeXt), a few perf tweaks, compatible with timm features
759
+ * Tried training a few small (~1.8-3M param) / mobile optimized models, a few are good so far, more on the way...
760
+ * `mnasnet_small` - 65.6 top-1
761
+ * `mobilenetv2_050` - 65.9
762
+ * `lcnet_100/075/050` - 72.1 / 68.8 / 63.1
763
+ * `semnasnet_075` - 73
764
+ * `fbnetv3_b/d/g` - 79.1 / 79.7 / 82.0
765
+ * TinyNet models added by [rsomani95](https://github.com/rsomani95)
766
+ * LCNet added via MobileNetV3 architecture
767
+
768
+ ### Jan 5, 2023
769
+ * ConvNeXt-V2 models and weights added to existing `convnext.py`
770
+ * Paper: [ConvNeXt V2: Co-designing and Scaling ConvNets with Masked Autoencoders](http://arxiv.org/abs/2301.00808)
771
+ * Reference impl: https://github.com/facebookresearch/ConvNeXt-V2 (NOTE: weights currently CC-BY-NC)
772
+
773
+ ### Dec 23, 2022 🎄☃
774
+ * Add FlexiViT models and weights from https://github.com/google-research/big_vision (check out paper at https://arxiv.org/abs/2212.08013)
775
+ * NOTE currently resizing is static on model creation, on-the-fly dynamic / train patch size sampling is a WIP
776
+ * Many more models updated to multi-weight and downloadable via HF hub now (convnext, efficientnet, mobilenet, vision_transformer*, beit)
777
+ * More model pretrained tag and adjustments, some model names changed (working on deprecation translations, consider main branch DEV branch right now, use 0.6.x for stable use)
778
+ * More ImageNet-12k (subset of 22k) pretrain models popping up:
779
+ * `efficientnet_b5.in12k_ft_in1k` - 85.9 @ 448x448
780
+ * `vit_medium_patch16_gap_384.in12k_ft_in1k` - 85.5 @ 384x384
781
+ * `vit_medium_patch16_gap_256.in12k_ft_in1k` - 84.5 @ 256x256
782
+ * `convnext_nano.in12k_ft_in1k` - 82.9 @ 288x288
783
+
784
+ ### Dec 8, 2022
785
+ * Add 'EVA l' to `vision_transformer.py`, MAE style ViT-L/14 MIM pretrain w/ EVA-CLIP targets, FT on ImageNet-1k (w/ ImageNet-22k intermediate for some)
786
+ * original source: https://github.com/baaivision/EVA
787
+
788
+ | model | top1 | param_count | gmac | macts | hub |
789
+ |:------------------------------------------|-----:|------------:|------:|------:|:----------------------------------------|
790
+ | eva_large_patch14_336.in22k_ft_in22k_in1k | 89.2 | 304.5 | 191.1 | 270.2 | [link](https://huggingface.co/BAAI/EVA) |
791
+ | eva_large_patch14_336.in22k_ft_in1k | 88.7 | 304.5 | 191.1 | 270.2 | [link](https://huggingface.co/BAAI/EVA) |
792
+ | eva_large_patch14_196.in22k_ft_in22k_in1k | 88.6 | 304.1 | 61.6 | 63.5 | [link](https://huggingface.co/BAAI/EVA) |
793
+ | eva_large_patch14_196.in22k_ft_in1k | 87.9 | 304.1 | 61.6 | 63.5 | [link](https://huggingface.co/BAAI/EVA) |
794
+
795
+ ### Dec 6, 2022
796
+ * Add 'EVA g', BEiT style ViT-g/14 model weights w/ both MIM pretrain and CLIP pretrain to `beit.py`.
797
+ * original source: https://github.com/baaivision/EVA
798
+ * paper: https://arxiv.org/abs/2211.07636
799
+
800
+ | model | top1 | param_count | gmac | macts | hub |
801
+ |:-----------------------------------------|-------:|--------------:|-------:|--------:|:----------------------------------------|
802
+ | eva_giant_patch14_560.m30m_ft_in22k_in1k | 89.8 | 1014.4 | 1906.8 | 2577.2 | [link](https://huggingface.co/BAAI/EVA) |
803
+ | eva_giant_patch14_336.m30m_ft_in22k_in1k | 89.6 | 1013 | 620.6 | 550.7 | [link](https://huggingface.co/BAAI/EVA) |
804
+ | eva_giant_patch14_336.clip_ft_in1k | 89.4 | 1013 | 620.6 | 550.7 | [link](https://huggingface.co/BAAI/EVA) |
805
+ | eva_giant_patch14_224.clip_ft_in1k | 89.1 | 1012.6 | 267.2 | 192.6 | [link](https://huggingface.co/BAAI/EVA) |
806
+
807
+ ### Dec 5, 2022
808
+
809
+ * Pre-release (`0.8.0dev0`) of multi-weight support (`model_arch.pretrained_tag`). Install with `pip install --pre timm`
810
+ * vision_transformer, maxvit, convnext are the first three model impl w/ support
811
+ * model names are changing with this (previous _21k, etc. fn will merge), still sorting out deprecation handling
812
+ * bugs are likely, but I need feedback so please try it out
813
+ * if stability is needed, please use 0.6.x pypi releases or clone from [0.6.x branch](https://github.com/rwightman/pytorch-image-models/tree/0.6.x)
814
+ * Support for PyTorch 2.0 compile is added in train/validate/inference/benchmark, use `--torchcompile` argument
815
+ * Inference script allows more control over output, select k for top-class index + prob json, csv or parquet output
816
+ * Add a full set of fine-tuned CLIP image tower weights from both LAION-2B and original OpenAI CLIP models
817
+
818
+ | model | top1 | param_count | gmac | macts | hub |
819
+ |:-------------------------------------------------|-------:|--------------:|-------:|--------:|:-------------------------------------------------------------------------------------|
820
+ | vit_huge_patch14_clip_336.laion2b_ft_in12k_in1k | 88.6 | 632.5 | 391 | 407.5 | [link](https://huggingface.co/timm/vit_huge_patch14_clip_336.laion2b_ft_in12k_in1k) |
821
+ | vit_large_patch14_clip_336.openai_ft_in12k_in1k | 88.3 | 304.5 | 191.1 | 270.2 | [link](https://huggingface.co/timm/vit_large_patch14_clip_336.openai_ft_in12k_in1k) |
822
+ | vit_huge_patch14_clip_224.laion2b_ft_in12k_in1k | 88.2 | 632 | 167.4 | 139.4 | [link](https://huggingface.co/timm/vit_huge_patch14_clip_224.laion2b_ft_in12k_in1k) |
823
+ | vit_large_patch14_clip_336.laion2b_ft_in12k_in1k | 88.2 | 304.5 | 191.1 | 270.2 | [link](https://huggingface.co/timm/vit_large_patch14_clip_336.laion2b_ft_in12k_in1k) |
824
+ | vit_large_patch14_clip_224.openai_ft_in12k_in1k | 88.2 | 304.2 | 81.1 | 88.8 | [link](https://huggingface.co/timm/vit_large_patch14_clip_224.openai_ft_in12k_in1k) |
825
+ | vit_large_patch14_clip_224.laion2b_ft_in12k_in1k | 87.9 | 304.2 | 81.1 | 88.8 | [link](https://huggingface.co/timm/vit_large_patch14_clip_224.laion2b_ft_in12k_in1k) |
826
+ | vit_large_patch14_clip_224.openai_ft_in1k | 87.9 | 304.2 | 81.1 | 88.8 | [link](https://huggingface.co/timm/vit_large_patch14_clip_224.openai_ft_in1k) |
827
+ | vit_large_patch14_clip_336.laion2b_ft_in1k | 87.9 | 304.5 | 191.1 | 270.2 | [link](https://huggingface.co/timm/vit_large_patch14_clip_336.laion2b_ft_in1k) |
828
+ | vit_huge_patch14_clip_224.laion2b_ft_in1k | 87.6 | 632 | 167.4 | 139.4 | [link](https://huggingface.co/timm/vit_huge_patch14_clip_224.laion2b_ft_in1k) |
829
+ | vit_large_patch14_clip_224.laion2b_ft_in1k | 87.3 | 304.2 | 81.1 | 88.8 | [link](https://huggingface.co/timm/vit_large_patch14_clip_224.laion2b_ft_in1k) |
830
+ | vit_base_patch16_clip_384.laion2b_ft_in12k_in1k | 87.2 | 86.9 | 55.5 | 101.6 | [link](https://huggingface.co/timm/vit_base_patch16_clip_384.laion2b_ft_in12k_in1k) |
831
+ | vit_base_patch16_clip_384.openai_ft_in12k_in1k | 87 | 86.9 | 55.5 | 101.6 | [link](https://huggingface.co/timm/vit_base_patch16_clip_384.openai_ft_in12k_in1k) |
832
+ | vit_base_patch16_clip_384.laion2b_ft_in1k | 86.6 | 86.9 | 55.5 | 101.6 | [link](https://huggingface.co/timm/vit_base_patch16_clip_384.laion2b_ft_in1k) |
833
+ | vit_base_patch16_clip_384.openai_ft_in1k | 86.2 | 86.9 | 55.5 | 101.6 | [link](https://huggingface.co/timm/vit_base_patch16_clip_384.openai_ft_in1k) |
834
+ | vit_base_patch16_clip_224.laion2b_ft_in12k_in1k | 86.2 | 86.6 | 17.6 | 23.9 | [link](https://huggingface.co/timm/vit_base_patch16_clip_224.laion2b_ft_in12k_in1k) |
835
+ | vit_base_patch16_clip_224.openai_ft_in12k_in1k | 85.9 | 86.6 | 17.6 | 23.9 | [link](https://huggingface.co/timm/vit_base_patch16_clip_224.openai_ft_in12k_in1k) |
836
+ | vit_base_patch32_clip_448.laion2b_ft_in12k_in1k | 85.8 | 88.3 | 17.9 | 23.9 | [link](https://huggingface.co/timm/vit_base_patch32_clip_448.laion2b_ft_in12k_in1k) |
837
+ | vit_base_patch16_clip_224.laion2b_ft_in1k | 85.5 | 86.6 | 17.6 | 23.9 | [link](https://huggingface.co/timm/vit_base_patch16_clip_224.laion2b_ft_in1k) |
838
+ | vit_base_patch32_clip_384.laion2b_ft_in12k_in1k | 85.4 | 88.3 | 13.1 | 16.5 | [link](https://huggingface.co/timm/vit_base_patch32_clip_384.laion2b_ft_in12k_in1k) |
839
+ | vit_base_patch16_clip_224.openai_ft_in1k | 85.3 | 86.6 | 17.6 | 23.9 | [link](https://huggingface.co/timm/vit_base_patch16_clip_224.openai_ft_in1k) |
840
+ | vit_base_patch32_clip_384.openai_ft_in12k_in1k | 85.2 | 88.3 | 13.1 | 16.5 | [link](https://huggingface.co/timm/vit_base_patch32_clip_384.openai_ft_in12k_in1k) |
841
+ | vit_base_patch32_clip_224.laion2b_ft_in12k_in1k | 83.3 | 88.2 | 4.4 | 5 | [link](https://huggingface.co/timm/vit_base_patch32_clip_224.laion2b_ft_in12k_in1k) |
842
+ | vit_base_patch32_clip_224.laion2b_ft_in1k | 82.6 | 88.2 | 4.4 | 5 | [link](https://huggingface.co/timm/vit_base_patch32_clip_224.laion2b_ft_in1k) |
843
+ | vit_base_patch32_clip_224.openai_ft_in1k | 81.9 | 88.2 | 4.4 | 5 | [link](https://huggingface.co/timm/vit_base_patch32_clip_224.openai_ft_in1k) |
844
+
845
+ * Port of MaxViT Tensorflow Weights from official impl at https://github.com/google-research/maxvit
846
+ * There was larger than expected drops for the upscaled 384/512 in21k fine-tune weights, possible detail missing, but the 21k FT did seem sensitive to small preprocessing
847
+
848
+ | model | top1 | param_count | gmac | macts | hub |
849
+ |:-----------------------------------|-------:|--------------:|-------:|--------:|:-----------------------------------------------------------------------|
850
+ | maxvit_xlarge_tf_512.in21k_ft_in1k | 88.5 | 475.8 | 534.1 | 1413.2 | [link](https://huggingface.co/timm/maxvit_xlarge_tf_512.in21k_ft_in1k) |
851
+ | maxvit_xlarge_tf_384.in21k_ft_in1k | 88.3 | 475.3 | 292.8 | 668.8 | [link](https://huggingface.co/timm/maxvit_xlarge_tf_384.in21k_ft_in1k) |
852
+ | maxvit_base_tf_512.in21k_ft_in1k | 88.2 | 119.9 | 138 | 704 | [link](https://huggingface.co/timm/maxvit_base_tf_512.in21k_ft_in1k) |
853
+ | maxvit_large_tf_512.in21k_ft_in1k | 88 | 212.3 | 244.8 | 942.2 | [link](https://huggingface.co/timm/maxvit_large_tf_512.in21k_ft_in1k) |
854
+ | maxvit_large_tf_384.in21k_ft_in1k | 88 | 212 | 132.6 | 445.8 | [link](https://huggingface.co/timm/maxvit_large_tf_384.in21k_ft_in1k) |
855
+ | maxvit_base_tf_384.in21k_ft_in1k | 87.9 | 119.6 | 73.8 | 332.9 | [link](https://huggingface.co/timm/maxvit_base_tf_384.in21k_ft_in1k) |
856
+ | maxvit_base_tf_512.in1k | 86.6 | 119.9 | 138 | 704 | [link](https://huggingface.co/timm/maxvit_base_tf_512.in1k) |
857
+ | maxvit_large_tf_512.in1k | 86.5 | 212.3 | 244.8 | 942.2 | [link](https://huggingface.co/timm/maxvit_large_tf_512.in1k) |
858
+ | maxvit_base_tf_384.in1k | 86.3 | 119.6 | 73.8 | 332.9 | [link](https://huggingface.co/timm/maxvit_base_tf_384.in1k) |
859
+ | maxvit_large_tf_384.in1k | 86.2 | 212 | 132.6 | 445.8 | [link](https://huggingface.co/timm/maxvit_large_tf_384.in1k) |
860
+ | maxvit_small_tf_512.in1k | 86.1 | 69.1 | 67.3 | 383.8 | [link](https://huggingface.co/timm/maxvit_small_tf_512.in1k) |
861
+ | maxvit_tiny_tf_512.in1k | 85.7 | 31 | 33.5 | 257.6 | [link](https://huggingface.co/timm/maxvit_tiny_tf_512.in1k) |
862
+ | maxvit_small_tf_384.in1k | 85.5 | 69 | 35.9 | 183.6 | [link](https://huggingface.co/timm/maxvit_small_tf_384.in1k) |
863
+ | maxvit_tiny_tf_384.in1k | 85.1 | 31 | 17.5 | 123.4 | [link](https://huggingface.co/timm/maxvit_tiny_tf_384.in1k) |
864
+ | maxvit_large_tf_224.in1k | 84.9 | 211.8 | 43.7 | 127.4 | [link](https://huggingface.co/timm/maxvit_large_tf_224.in1k) |
865
+ | maxvit_base_tf_224.in1k | 84.9 | 119.5 | 24 | 95 | [link](https://huggingface.co/timm/maxvit_base_tf_224.in1k) |
866
+ | maxvit_small_tf_224.in1k | 84.4 | 68.9 | 11.7 | 53.2 | [link](https://huggingface.co/timm/maxvit_small_tf_224.in1k) |
867
+ | maxvit_tiny_tf_224.in1k | 83.4 | 30.9 | 5.6 | 35.8 | [link](https://huggingface.co/timm/maxvit_tiny_tf_224.in1k) |
868
+
869
+ ### Oct 15, 2022
870
+ * Train and validation script enhancements
871
+ * Non-GPU (ie CPU) device support
872
+ * SLURM compatibility for train script
873
+ * HF datasets support (via ReaderHfds)
874
+ * TFDS/WDS dataloading improvements (sample padding/wrap for distributed use fixed wrt sample count estimate)
875
+ * in_chans !=3 support for scripts / loader
876
+ * Adan optimizer
877
+ * Can enable per-step LR scheduling via args
878
+ * Dataset 'parsers' renamed to 'readers', more descriptive of purpose
879
+ * AMP args changed, APEX via `--amp-impl apex`, bfloat16 supportedf via `--amp-dtype bfloat16`
880
+ * main branch switched to 0.7.x version, 0.6x forked for stable release of weight only adds
881
+ * master -> main branch rename
882
+
883
+ ### Oct 10, 2022
884
+ * More weights in `maxxvit` series, incl first ConvNeXt block based `coatnext` and `maxxvit` experiments:
885
+ * `coatnext_nano_rw_224` - 82.0 @ 224 (G) -- (uses ConvNeXt conv block, no BatchNorm)
886
+ * `maxxvit_rmlp_nano_rw_256` - 83.0 @ 256, 83.7 @ 320 (G) (uses ConvNeXt conv block, no BN)
887
+ * `maxvit_rmlp_small_rw_224` - 84.5 @ 224, 85.1 @ 320 (G)
888
+ * `maxxvit_rmlp_small_rw_256` - 84.6 @ 256, 84.9 @ 288 (G) -- could be trained better, hparams need tuning (uses ConvNeXt block, no BN)
889
+ * `coatnet_rmlp_2_rw_224` - 84.6 @ 224, 85 @ 320 (T)
890
+ * NOTE: official MaxVit weights (in1k) have been released at https://github.com/google-research/maxvit -- some extra work is needed to port and adapt since my impl was created independently of theirs and has a few small differences + the whole TF same padding fun.
891
+
892
+ ### Sept 23, 2022
893
+ * LAION-2B CLIP image towers supported as pretrained backbones for fine-tune or features (no classifier)
894
+ * vit_base_patch32_224_clip_laion2b
895
+ * vit_large_patch14_224_clip_laion2b
896
+ * vit_huge_patch14_224_clip_laion2b
897
+ * vit_giant_patch14_224_clip_laion2b
898
+
899
+ ### Sept 7, 2022
900
+ * Hugging Face [`timm` docs](https://huggingface.co/docs/hub/timm) home now exists, look for more here in the future
901
+ * Add BEiT-v2 weights for base and large 224x224 models from https://github.com/microsoft/unilm/tree/master/beit2
902
+ * Add more weights in `maxxvit` series incl a `pico` (7.5M params, 1.9 GMACs), two `tiny` variants:
903
+ * `maxvit_rmlp_pico_rw_256` - 80.5 @ 256, 81.3 @ 320 (T)
904
+ * `maxvit_tiny_rw_224` - 83.5 @ 224 (G)
905
+ * `maxvit_rmlp_tiny_rw_256` - 84.2 @ 256, 84.8 @ 320 (T)
906
+
907
+ ### Aug 29, 2022
908
+ * MaxVit window size scales with img_size by default. Add new RelPosMlp MaxViT weight that leverages this:
909
+ * `maxvit_rmlp_nano_rw_256` - 83.0 @ 256, 83.6 @ 320 (T)
910
+
911
+ ### Aug 26, 2022
912
+ * CoAtNet (https://arxiv.org/abs/2106.04803) and MaxVit (https://arxiv.org/abs/2204.01697) `timm` original models
913
+ * both found in [`maxxvit.py`](https://github.com/rwightman/pytorch-image-models/blob/master/timm/models/maxxvit.py) model def, contains numerous experiments outside scope of original papers
914
+ * an unfinished Tensorflow version from MaxVit authors can be found https://github.com/google-research/maxvit
915
+ * Initial CoAtNet and MaxVit timm pretrained weights (working on more):
916
+ * `coatnet_nano_rw_224` - 81.7 @ 224 (T)
917
+ * `coatnet_rmlp_nano_rw_224` - 82.0 @ 224, 82.8 @ 320 (T)
918
+ * `coatnet_0_rw_224` - 82.4 (T) -- NOTE timm '0' coatnets have 2 more 3rd stage blocks
919
+ * `coatnet_bn_0_rw_224` - 82.4 (T)
920
+ * `maxvit_nano_rw_256` - 82.9 @ 256 (T)
921
+ * `coatnet_rmlp_1_rw_224` - 83.4 @ 224, 84 @ 320 (T)
922
+ * `coatnet_1_rw_224` - 83.6 @ 224 (G)
923
+ * (T) = TPU trained with `bits_and_tpu` branch training code, (G) = GPU trained
924
+ * GCVit (weights adapted from https://github.com/NVlabs/GCVit, code 100% `timm` re-write for license purposes)
925
+ * MViT-V2 (multi-scale vit, adapted from https://github.com/facebookresearch/mvit)
926
+ * EfficientFormer (adapted from https://github.com/snap-research/EfficientFormer)
927
+ * PyramidVisionTransformer-V2 (adapted from https://github.com/whai362/PVT)
928
+ * 'Fast Norm' support for LayerNorm and GroupNorm that avoids float32 upcast w/ AMP (uses APEX LN if available for further boost)
929
+
930
+
931
+ ### Aug 15, 2022
932
+ * ConvNeXt atto weights added
933
+ * `convnext_atto` - 75.7 @ 224, 77.0 @ 288
934
+ * `convnext_atto_ols` - 75.9 @ 224, 77.2 @ 288
935
+
936
+ ### Aug 5, 2022
937
+ * More custom ConvNeXt smaller model defs with weights
938
+ * `convnext_femto` - 77.5 @ 224, 78.7 @ 288
939
+ * `convnext_femto_ols` - 77.9 @ 224, 78.9 @ 288
940
+ * `convnext_pico` - 79.5 @ 224, 80.4 @ 288
941
+ * `convnext_pico_ols` - 79.5 @ 224, 80.5 @ 288
942
+ * `convnext_nano_ols` - 80.9 @ 224, 81.6 @ 288
943
+ * Updated EdgeNeXt to improve ONNX export, add new base variant and weights from original (https://github.com/mmaaz60/EdgeNeXt)
944
+
945
+ ### July 28, 2022
946
+ * Add freshly minted DeiT-III Medium (width=512, depth=12, num_heads=8) model weights. Thanks [Hugo Touvron](https://github.com/TouvronHugo)!
947
+
948
+ ### July 27, 2022
949
+ * All runtime benchmark and validation result csv files are up-to-date!
950
+ * A few more weights & model defs added:
951
+ * `darknetaa53` - 79.8 @ 256, 80.5 @ 288
952
+ * `convnext_nano` - 80.8 @ 224, 81.5 @ 288
953
+ * `cs3sedarknet_l` - 81.2 @ 256, 81.8 @ 288
954
+ * `cs3darknet_x` - 81.8 @ 256, 82.2 @ 288
955
+ * `cs3sedarknet_x` - 82.2 @ 256, 82.7 @ 288
956
+ * `cs3edgenet_x` - 82.2 @ 256, 82.7 @ 288
957
+ * `cs3se_edgenet_x` - 82.8 @ 256, 83.5 @ 320
958
+ * `cs3*` weights above all trained on TPU w/ `bits_and_tpu` branch. Thanks to TRC program!
959
+ * Add output_stride=8 and 16 support to ConvNeXt (dilation)
960
+ * deit3 models not being able to resize pos_emb fixed
961
+ * Version 0.6.7 PyPi release (/w above bug fixes and new weighs since 0.6.5)
962
+
963
+ ### July 8, 2022
964
+ More models, more fixes
965
+ * Official research models (w/ weights) added:
966
+ * EdgeNeXt from (https://github.com/mmaaz60/EdgeNeXt)
967
+ * MobileViT-V2 from (https://github.com/apple/ml-cvnets)
968
+ * DeiT III (Revenge of the ViT) from (https://github.com/facebookresearch/deit)
969
+ * My own models:
970
+ * Small `ResNet` defs added by request with 1 block repeats for both basic and bottleneck (resnet10 and resnet14)
971
+ * `CspNet` refactored with dataclass config, simplified CrossStage3 (`cs3`) option. These are closer to YOLO-v5+ backbone defs.
972
+ * More relative position vit fiddling. Two `srelpos` (shared relative position) models trained, and a medium w/ class token.
973
+ * Add an alternate downsample mode to EdgeNeXt and train a `small` model. Better than original small, but not their new USI trained weights.
974
+ * My own model weight results (all ImageNet-1k training)
975
+ * `resnet10t` - 66.5 @ 176, 68.3 @ 224
976
+ * `resnet14t` - 71.3 @ 176, 72.3 @ 224
977
+ * `resnetaa50` - 80.6 @ 224 , 81.6 @ 288
978
+ * `darknet53` - 80.0 @ 256, 80.5 @ 288
979
+ * `cs3darknet_m` - 77.0 @ 256, 77.6 @ 288
980
+ * `cs3darknet_focus_m` - 76.7 @ 256, 77.3 @ 288
981
+ * `cs3darknet_l` - 80.4 @ 256, 80.9 @ 288
982
+ * `cs3darknet_focus_l` - 80.3 @ 256, 80.9 @ 288
983
+ * `vit_srelpos_small_patch16_224` - 81.1 @ 224, 82.1 @ 320
984
+ * `vit_srelpos_medium_patch16_224` - 82.3 @ 224, 83.1 @ 320
985
+ * `vit_relpos_small_patch16_cls_224` - 82.6 @ 224, 83.6 @ 320
986
+ * `edgnext_small_rw` - 79.6 @ 224, 80.4 @ 320
987
+ * `cs3`, `darknet`, and `vit_*relpos` weights above all trained on TPU thanks to TRC program! Rest trained on overheating GPUs.
988
+ * Hugging Face Hub support fixes verified, demo notebook TBA
989
+ * Pretrained weights / configs can be loaded externally (ie from local disk) w/ support for head adaptation.
990
+ * Add support to change image extensions scanned by `timm` datasets/parsers. See (https://github.com/rwightman/pytorch-image-models/pull/1274#issuecomment-1178303103)
991
+ * Default ConvNeXt LayerNorm impl to use `F.layer_norm(x.permute(0, 2, 3, 1), ...).permute(0, 3, 1, 2)` via `LayerNorm2d` in all cases.
992
+ * a bit slower than previous custom impl on some hardware (ie Ampere w/ CL), but overall fewer regressions across wider HW / PyTorch version ranges.
993
+ * previous impl exists as `LayerNormExp2d` in `models/layers/norm.py`
994
+ * Numerous bug fixes
995
+ * Currently testing for imminent PyPi 0.6.x release
996
+ * LeViT pretraining of larger models still a WIP, they don't train well / easily without distillation. Time to add distill support (finally)?
997
+ * ImageNet-22k weight training + finetune ongoing, work on multi-weight support (slowly) chugging along (there are a LOT of weights, sigh) ...
998
+
999
+ ### May 13, 2022
1000
+ * Official Swin-V2 models and weights added from (https://github.com/microsoft/Swin-Transformer). Cleaned up to support torchscript.
1001
+ * Some refactoring for existing `timm` Swin-V2-CR impl, will likely do a bit more to bring parts closer to official and decide whether to merge some aspects.
1002
+ * More Vision Transformer relative position / residual post-norm experiments (all trained on TPU thanks to TRC program)
1003
+ * `vit_relpos_small_patch16_224` - 81.5 @ 224, 82.5 @ 320 -- rel pos, layer scale, no class token, avg pool
1004
+ * `vit_relpos_medium_patch16_rpn_224` - 82.3 @ 224, 83.1 @ 320 -- rel pos + res-post-norm, no class token, avg pool
1005
+ * `vit_relpos_medium_patch16_224` - 82.5 @ 224, 83.3 @ 320 -- rel pos, layer scale, no class token, avg pool
1006
+ * `vit_relpos_base_patch16_gapcls_224` - 82.8 @ 224, 83.9 @ 320 -- rel pos, layer scale, class token, avg pool (by mistake)
1007
+ * Bring 512 dim, 8-head 'medium' ViT model variant back to life (after using in a pre DeiT 'small' model for first ViT impl back in 2020)
1008
+ * Add ViT relative position support for switching btw existing impl and some additions in official Swin-V2 impl for future trials
1009
+ * Sequencer2D impl (https://arxiv.org/abs/2205.01972), added via PR from author (https://github.com/okojoalg)
1010
+
1011
+ ### May 2, 2022
1012
+ * Vision Transformer experiments adding Relative Position (Swin-V2 log-coord) (`vision_transformer_relpos.py`) and Residual Post-Norm branches (from Swin-V2) (`vision_transformer*.py`)
1013
+ * `vit_relpos_base_patch32_plus_rpn_256` - 79.5 @ 256, 80.6 @ 320 -- rel pos + extended width + res-post-norm, no class token, avg pool
1014
+ * `vit_relpos_base_patch16_224` - 82.5 @ 224, 83.6 @ 320 -- rel pos, layer scale, no class token, avg pool
1015
+ * `vit_base_patch16_rpn_224` - 82.3 @ 224 -- rel pos + res-post-norm, no class token, avg pool
1016
+ * Vision Transformer refactor to remove representation layer that was only used in initial vit and rarely used since with newer pretrain (ie `How to Train Your ViT`)
1017
+ * `vit_*` models support removal of class token, use of global average pool, use of fc_norm (ala beit, mae).
1018
+
1019
+ ### April 22, 2022
1020
+ * `timm` models are now officially supported in [fast.ai](https://www.fast.ai/)! Just in time for the new Practical Deep Learning course. `timmdocs` documentation link updated to [timm.fast.ai](http://timm.fast.ai/).
1021
+ * Two more model weights added in the TPU trained [series](https://github.com/rwightman/pytorch-image-models/releases/tag/v0.1-tpu-weights). Some In22k pretrain still in progress.
1022
+ * `seresnext101d_32x8d` - 83.69 @ 224, 84.35 @ 288
1023
+ * `seresnextaa101d_32x8d` (anti-aliased w/ AvgPool2d) - 83.85 @ 224, 84.57 @ 288
1024
+
1025
+ ### March 23, 2022
1026
+ * Add `ParallelBlock` and `LayerScale` option to base vit models to support model configs in [Three things everyone should know about ViT](https://arxiv.org/abs/2203.09795)
1027
+ * `convnext_tiny_hnf` (head norm first) weights trained with (close to) A2 recipe, 82.2% top-1, could do better with more epochs.
1028
+
1029
+ ### March 21, 2022
1030
+ * Merge `norm_norm_norm`. **IMPORTANT** this update for a coming 0.6.x release will likely de-stabilize the master branch for a while. Branch [`0.5.x`](https://github.com/rwightman/pytorch-image-models/tree/0.5.x) or a previous 0.5.x release can be used if stability is required.
1031
+ * Significant weights update (all TPU trained) as described in this [release](https://github.com/rwightman/pytorch-image-models/releases/tag/v0.1-tpu-weights)
1032
+ * `regnety_040` - 82.3 @ 224, 82.96 @ 288
1033
+ * `regnety_064` - 83.0 @ 224, 83.65 @ 288
1034
+ * `regnety_080` - 83.17 @ 224, 83.86 @ 288
1035
+ * `regnetv_040` - 82.44 @ 224, 83.18 @ 288 (timm pre-act)
1036
+ * `regnetv_064` - 83.1 @ 224, 83.71 @ 288 (timm pre-act)
1037
+ * `regnetz_040` - 83.67 @ 256, 84.25 @ 320
1038
+ * `regnetz_040h` - 83.77 @ 256, 84.5 @ 320 (w/ extra fc in head)
1039
+ * `resnetv2_50d_gn` - 80.8 @ 224, 81.96 @ 288 (pre-act GroupNorm)
1040
+ * `resnetv2_50d_evos` 80.77 @ 224, 82.04 @ 288 (pre-act EvoNormS)
1041
+ * `regnetz_c16_evos` - 81.9 @ 256, 82.64 @ 320 (EvoNormS)
1042
+ * `regnetz_d8_evos` - 83.42 @ 256, 84.04 @ 320 (EvoNormS)
1043
+ * `xception41p` - 82 @ 299 (timm pre-act)
1044
+ * `xception65` - 83.17 @ 299
1045
+ * `xception65p` - 83.14 @ 299 (timm pre-act)
1046
+ * `resnext101_64x4d` - 82.46 @ 224, 83.16 @ 288
1047
+ * `seresnext101_32x8d` - 83.57 @ 224, 84.270 @ 288
1048
+ * `resnetrs200` - 83.85 @ 256, 84.44 @ 320
1049
+ * HuggingFace hub support fixed w/ initial groundwork for allowing alternative 'config sources' for pretrained model definitions and weights (generic local file / remote url support soon)
1050
+ * SwinTransformer-V2 implementation added. Submitted by [Christoph Reich](https://github.com/ChristophReich1996). Training experiments and model changes by myself are ongoing so expect compat breaks.
1051
+ * Swin-S3 (AutoFormerV2) models / weights added from https://github.com/microsoft/Cream/tree/main/AutoFormerV2
1052
+ * MobileViT models w/ weights adapted from https://github.com/apple/ml-cvnets
1053
+ * PoolFormer models w/ weights adapted from https://github.com/sail-sg/poolformer
1054
+ * VOLO models w/ weights adapted from https://github.com/sail-sg/volo
1055
+ * Significant work experimenting with non-BatchNorm norm layers such as EvoNorm, FilterResponseNorm, GroupNorm, etc
1056
+ * Enhance support for alternate norm + act ('NormAct') layers added to a number of models, esp EfficientNet/MobileNetV3, RegNet, and aligned Xception
1057
+ * Grouped conv support added to EfficientNet family
1058
+ * Add 'group matching' API to all models to allow grouping model parameters for application of 'layer-wise' LR decay, lr scale added to LR scheduler
1059
+ * Gradient checkpointing support added to many models
1060
+ * `forward_head(x, pre_logits=False)` fn added to all models to allow separate calls of `forward_features` + `forward_head`
1061
+ * All vision transformer and vision MLP models update to return non-pooled / non-token selected features from `foward_features`, for consistency with CNN models, token selection or pooling now applied in `forward_head`
1062
+
1063
+ ### Feb 2, 2022
1064
+ * [Chris Hughes](https://github.com/Chris-hughes10) posted an exhaustive run through of `timm` on his blog yesterday. Well worth a read. [Getting Started with PyTorch Image Models (timm): A Practitioner’s Guide](https://towardsdatascience.com/getting-started-with-pytorch-image-models-timm-a-practitioners-guide-4e77b4bf9055)
1065
+ * I'm currently prepping to merge the `norm_norm_norm` branch back to master (ver 0.6.x) in next week or so.
1066
+ * The changes are more extensive than usual and may destabilize and break some model API use (aiming for full backwards compat). So, beware `pip install git+https://github.com/rwightman/pytorch-image-models` installs!
1067
+ * `0.5.x` releases and a `0.5.x` branch will remain stable with a cherry pick or two until dust clears. Recommend sticking to pypi install for a bit if you want stable.
1068
+
1069
+ ### Jan 14, 2022
1070
+ * Version 0.5.4 w/ release to be pushed to pypi. It's been a while since last pypi update and riskier changes will be merged to main branch soon....
1071
+ * Add ConvNeXT models /w weights from official impl (https://github.com/facebookresearch/ConvNeXt), a few perf tweaks, compatible with timm features
1072
+ * Tried training a few small (~1.8-3M param) / mobile optimized models, a few are good so far, more on the way...
1073
+ * `mnasnet_small` - 65.6 top-1
1074
+ * `mobilenetv2_050` - 65.9
1075
+ * `lcnet_100/075/050` - 72.1 / 68.8 / 63.1
1076
+ * `semnasnet_075` - 73
1077
+ * `fbnetv3_b/d/g` - 79.1 / 79.7 / 82.0
1078
+ * TinyNet models added by [rsomani95](https://github.com/rsomani95)
1079
+ * LCNet added via MobileNetV3 architecture
1080
+
pytorch-image-models/hfdocs/source/feature_extraction.mdx ADDED
@@ -0,0 +1,273 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Feature Extraction
2
+
3
+ All of the models in `timm` have consistent mechanisms for obtaining various types of features from the model for tasks besides classification.
4
+
5
+ ## Penultimate Layer Features (Pre-Classifier Features)
6
+
7
+ The features from the penultimate model layer can be obtained in several ways without requiring model surgery (although feel free to do surgery). One must first decide if they want pooled or un-pooled features.
8
+
9
+ ### Unpooled
10
+
11
+ There are three ways to obtain unpooled features. The final, unpooled features are sometimes referred to as the last hidden state. In `timm` this is up to and including the final normalization layer (in e.g. ViT style models) but does not include pooling / class token selection and final post-pooling layers.
12
+
13
+ Without modifying the network, one can call `model.forward_features(input)` on any model instead of the usual `model(input)`. This will bypass the head classifier and global pooling for networks.
14
+
15
+ If one wants to explicitly modify the network to return unpooled features, they can either create the model without a classifier and pooling, or remove it later. Both paths remove the parameters associated with the classifier from the network.
16
+
17
+ #### forward_features()
18
+
19
+ ```py
20
+ >>> import torch
21
+ >>> import timm
22
+ >>> m = timm.create_model('xception41', pretrained=True)
23
+ >>> o = m(torch.randn(2, 3, 299, 299))
24
+ >>> print(f'Original shape: {o.shape}')
25
+ >>> o = m.forward_features(torch.randn(2, 3, 299, 299))
26
+ >>> print(f'Unpooled shape: {o.shape}')
27
+ ```
28
+
29
+ Output:
30
+
31
+ ```text
32
+ Original shape: torch.Size([2, 1000])
33
+ Unpooled shape: torch.Size([2, 2048, 10, 10])
34
+ ```
35
+
36
+ #### Create with no classifier and pooling
37
+
38
+ ```py
39
+ >>> import torch
40
+ >>> import timm
41
+ >>> m = timm.create_model('resnet50', pretrained=True, num_classes=0, global_pool='')
42
+ >>> o = m(torch.randn(2, 3, 224, 224))
43
+ >>> print(f'Unpooled shape: {o.shape}')
44
+ ```
45
+
46
+ Output:
47
+
48
+ ```text
49
+ Unpooled shape: torch.Size([2, 2048, 7, 7])
50
+ ```
51
+
52
+ #### Remove it later
53
+
54
+ ```py
55
+ >>> import torch
56
+ >>> import timm
57
+ >>> m = timm.create_model('densenet121', pretrained=True)
58
+ >>> o = m(torch.randn(2, 3, 224, 224))
59
+ >>> print(f'Original shape: {o.shape}')
60
+ >>> m.reset_classifier(0, '')
61
+ >>> o = m(torch.randn(2, 3, 224, 224))
62
+ >>> print(f'Unpooled shape: {o.shape}')
63
+ ```
64
+
65
+ Output:
66
+
67
+ ```text
68
+ Original shape: torch.Size([2, 1000])
69
+ Unpooled shape: torch.Size([2, 1024, 7, 7])
70
+ ```
71
+
72
+ #### Chaining unpooled output to classifier
73
+
74
+ The last hidden state can be fed back into the head of the model using the `forward_head()` function.
75
+
76
+ ```py
77
+ >>> model = timm.create_model('vit_medium_patch16_reg1_gap_256', pretrained=True)
78
+ >>> output = model.forward_features(torch.randn(2,3,256,256))
79
+ >>> print('Unpooled output shape:', output.shape)
80
+ >>> classified = model.forward_head(output)
81
+ >>> print('Classification output shape:', classified.shape)
82
+ ```
83
+
84
+ Output:
85
+
86
+ ```text
87
+ Unpooled output shape: torch.Size([2, 257, 512])
88
+ Classification output shape: torch.Size([2, 1000])
89
+ ```
90
+
91
+ ### Pooled
92
+
93
+ To modify the network to return pooled features, one can use `forward_features()` and pool/flatten the result themselves, or modify the network like above but keep pooling intact.
94
+
95
+ #### Create with no classifier
96
+
97
+ ```py
98
+ >>> import torch
99
+ >>> import timm
100
+ >>> m = timm.create_model('resnet50', pretrained=True, num_classes=0)
101
+ >>> o = m(torch.randn(2, 3, 224, 224))
102
+ >>> print(f'Pooled shape: {o.shape}')
103
+ ```
104
+
105
+ Output:
106
+
107
+ ```text
108
+ Pooled shape: torch.Size([2, 2048])
109
+ ```
110
+
111
+ #### Remove it later
112
+
113
+ ```py
114
+ >>> import torch
115
+ >>> import timm
116
+ >>> m = timm.create_model('ese_vovnet19b_dw', pretrained=True)
117
+ >>> o = m(torch.randn(2, 3, 224, 224))
118
+ >>> print(f'Original shape: {o.shape}')
119
+ >>> m.reset_classifier(0)
120
+ >>> o = m(torch.randn(2, 3, 224, 224))
121
+ >>> print(f'Pooled shape: {o.shape}')
122
+ ```
123
+
124
+ Output:
125
+
126
+ ```text
127
+ Original shape: torch.Size([2, 1000])
128
+ Pooled shape: torch.Size([2, 1024])
129
+ ```
130
+
131
+
132
+ ## Multi-scale Feature Maps (Feature Pyramid)
133
+
134
+ Object detection, segmentation, keypoint, and a variety of dense pixel tasks require access to feature maps from the backbone network at multiple scales. This is often done by modifying the original classification network. Since each network varies quite a bit in structure, it's not uncommon to see only a few backbones supported in any given obj detection or segmentation library.
135
+
136
+ `timm` allows a consistent interface for creating any of the included models as feature backbones that output feature maps for selected levels.
137
+
138
+ A feature backbone can be created by adding the argument `features_only=True` to any `create_model` call. By default most models with a feature hierarchy will output up to 5 features up to a reduction of 32. However this varies per model, some models have fewer hierarchy levels, and some (like ViT) have a larger number of non-hierarchical feature maps and they default to outputting the last 3. The `out_indices` arg can be passed to `create_model` to specify which features you want.
139
+
140
+ ### Create a feature map extraction model
141
+
142
+ ```py
143
+ >>> import torch
144
+ >>> import timm
145
+ >>> m = timm.create_model('resnest26d', features_only=True, pretrained=True)
146
+ >>> o = m(torch.randn(2, 3, 224, 224))
147
+ >>> for x in o:
148
+ ... print(x.shape)
149
+ ```
150
+
151
+ Output:
152
+
153
+ ```text
154
+ torch.Size([2, 64, 112, 112])
155
+ torch.Size([2, 256, 56, 56])
156
+ torch.Size([2, 512, 28, 28])
157
+ torch.Size([2, 1024, 14, 14])
158
+ torch.Size([2, 2048, 7, 7])
159
+ ```
160
+
161
+ ### Query the feature information
162
+
163
+ After a feature backbone has been created, it can be queried to provide channel or resolution reduction information to the downstream heads without requiring static config or hardcoded constants. The `.feature_info` attribute is a class encapsulating the information about the feature extraction points.
164
+
165
+ ```py
166
+ >>> import torch
167
+ >>> import timm
168
+ >>> m = timm.create_model('regnety_032', features_only=True, pretrained=True)
169
+ >>> print(f'Feature channels: {m.feature_info.channels()}')
170
+ >>> o = m(torch.randn(2, 3, 224, 224))
171
+ >>> for x in o:
172
+ ... print(x.shape)
173
+ ```
174
+
175
+ Output:
176
+
177
+ ```text
178
+ Feature channels: [32, 72, 216, 576, 1512]
179
+ torch.Size([2, 32, 112, 112])
180
+ torch.Size([2, 72, 56, 56])
181
+ torch.Size([2, 216, 28, 28])
182
+ torch.Size([2, 576, 14, 14])
183
+ torch.Size([2, 1512, 7, 7])
184
+ ```
185
+
186
+ ### Select specific feature levels or limit the stride
187
+
188
+ There are two additional creation arguments impacting the output features.
189
+
190
+ * `out_indices` selects which indices to output
191
+ * `output_stride` limits the feature output stride of the network (also works in classification mode BTW)
192
+
193
+ #### Output index selection
194
+
195
+ The `out_indices` argument is supported by all models, but not all models have the same index to feature stride mapping. Look at the code or check feature_info to compare. The out indices generally correspond to the `C(i+1)th` feature level (a `2^(i+1)` reduction). For most convnet models, index 0 is the stride 2 features, and index 4 is stride 32. For many ViT or ViT-Conv hybrids there may be many to all features maps of the same shape, or a combination of hierarchical and non-hierarchical feature maps. It is best to look at the `feature_info` attribute to see the number of features, their corresponding channel count and reduction level.
196
+
197
+ `out_indices` supports negative indexing, this makes it easy to get the last, penultimate, etc feature map. `out_indices=(-2,)` would return the penultimate feature map for any model.
198
+
199
+ #### Output stride (feature map dilation)
200
+
201
+ `output_stride` is achieved by converting layers to use dilated convolutions. Doing so is not always straightforward, some networks only support `output_stride=32`.
202
+
203
+ ```py
204
+ >>> import torch
205
+ >>> import timm
206
+ >>> m = timm.create_model('ecaresnet101d', features_only=True, output_stride=8, out_indices=(2, 4), pretrained=True)
207
+ >>> print(f'Feature channels: {m.feature_info.channels()}')
208
+ >>> print(f'Feature reduction: {m.feature_info.reduction()}')
209
+ >>> o = m(torch.randn(2, 3, 320, 320))
210
+ >>> for x in o:
211
+ ... print(x.shape)
212
+ ```
213
+
214
+ Output:
215
+
216
+ ```text
217
+ Feature channels: [512, 2048]
218
+ Feature reduction: [8, 8]
219
+ torch.Size([2, 512, 40, 40])
220
+ torch.Size([2, 2048, 40, 40])
221
+ ```
222
+
223
+ ## Flexible intermediate feature map extraction
224
+
225
+ In addition to using `features_only` with the model factory, many models support a `forward_intermediates()` method which provides a flexible mechanism for extracting both the intermediate feature maps and the last hidden state (which can be chained to the head). Additionally this method supports some model specific features such as returning class or distill prefix tokens for some models.
226
+
227
+ Accompanying the `forward_intermediates` function is a `prune_intermediate_layers` function that allows one to prune layers from the model, including both the head, final norm, and/or trailing blocks/stages that are not needed.
228
+
229
+ An `indices` argument is used for both `forward_intermediates()` and `prune_intermediate_layers()` to select the features to return or layers to remove. As with the `out_indices` for `features_only` API, `indices` is model specific and selects which intermediates are returned.
230
+
231
+ In non-hierarchical block based models such as ViT the indices correspond to the blocks, in models with hierarchical stages they usually correspond to the output of the stem + each hierarchical stage. Both positive (from the start), and negative (relative to the end) indexing works, and `None` is used to return all intermediates.
232
+
233
+ The `prune_intermediate_layers()` call returns an indices variable, as negative indices must be converted to absolute (positive) indices when the model is trimmed.
234
+
235
+ ```py
236
+ model = timm.create_model('vit_medium_patch16_reg1_gap_256', pretrained=True)
237
+ output, intermediates = model.forward_intermediates(torch.randn(2,3,256,256))
238
+ for i, o in enumerate(intermediates):
239
+ print(f'Feat index: {i}, shape: {o.shape}')
240
+ ```
241
+
242
+ ```text
243
+ Feat index: 0, shape: torch.Size([2, 512, 16, 16])
244
+ Feat index: 1, shape: torch.Size([2, 512, 16, 16])
245
+ Feat index: 2, shape: torch.Size([2, 512, 16, 16])
246
+ Feat index: 3, shape: torch.Size([2, 512, 16, 16])
247
+ Feat index: 4, shape: torch.Size([2, 512, 16, 16])
248
+ Feat index: 5, shape: torch.Size([2, 512, 16, 16])
249
+ Feat index: 6, shape: torch.Size([2, 512, 16, 16])
250
+ Feat index: 7, shape: torch.Size([2, 512, 16, 16])
251
+ Feat index: 8, shape: torch.Size([2, 512, 16, 16])
252
+ Feat index: 9, shape: torch.Size([2, 512, 16, 16])
253
+ Feat index: 10, shape: torch.Size([2, 512, 16, 16])
254
+ Feat index: 11, shape: torch.Size([2, 512, 16, 16])
255
+ ```
256
+
257
+ ```py
258
+ model = timm.create_model('vit_medium_patch16_reg1_gap_256', pretrained=True)
259
+ print('Original params:', sum([p.numel() for p in model.parameters()]))
260
+
261
+ indices = model.prune_intermediate_layers(indices=(-2,), prune_head=True, prune_norm=True) # prune head, norm, last block
262
+ print('Pruned params:', sum([p.numel() for p in model.parameters()]))
263
+
264
+ intermediates = model.forward_intermediates(torch.randn(2,3,256,256), indices=indices, intermediates_only=True) # return penultimate intermediate
265
+ for o in intermediates:
266
+ print(f'Feat shape: {o.shape}')
267
+ ```
268
+
269
+ ```text
270
+ Original params: 38880232
271
+ Pruned params: 35212800
272
+ Feat shape: torch.Size([2, 512, 16, 16])
273
+ ```
pytorch-image-models/hfdocs/source/hf_hub.mdx ADDED
@@ -0,0 +1,54 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Sharing and Loading Models From the Hugging Face Hub
2
+
3
+ The `timm` library has a built-in integration with the Hugging Face Hub, making it easy to share and load models from the 🤗 Hub.
4
+
5
+ In this short guide, we'll see how to:
6
+ 1. Share a `timm` model on the Hub
7
+ 2. How to load that model back from the Hub
8
+
9
+ ## Authenticating
10
+
11
+ First, you'll need to make sure you have the `huggingface_hub` package installed.
12
+
13
+ ```bash
14
+ pip install huggingface_hub
15
+ ```
16
+
17
+ Then, you'll need to authenticate yourself. You can do this by running the following command:
18
+
19
+ ```bash
20
+ huggingface-cli login
21
+ ```
22
+
23
+ Or, if you're using a notebook, you can use the `notebook_login` helper:
24
+
25
+ ```py
26
+ >>> from huggingface_hub import notebook_login
27
+ >>> notebook_login()
28
+ ```
29
+
30
+ ## Sharing a Model
31
+
32
+ ```py
33
+ >>> import timm
34
+ >>> model = timm.create_model('resnet18', pretrained=True, num_classes=4)
35
+ ```
36
+
37
+ Here is where you would normally train or fine-tune the model. We'll skip that for the sake of this tutorial.
38
+
39
+ Let's pretend we've now fine-tuned the model. The next step would be to push it to the Hub! We can do this with the `timm.models.hub.push_to_hf_hub` function.
40
+
41
+ ```py
42
+ >>> model_cfg = dict(label_names=['a', 'b', 'c', 'd'])
43
+ >>> timm.models.push_to_hf_hub(model, 'resnet18-random', model_config=model_cfg)
44
+ ```
45
+
46
+ Running the above would push the model to `<your-username>/resnet18-random` on the Hub. You can now share this model with your friends, or use it in your own code!
47
+
48
+ ## Loading a Model
49
+
50
+ Loading a model from the Hub is as simple as calling `timm.create_model` with the `pretrained` argument set to the name of the model you want to load. In this case, we'll use [`nateraw/resnet18-random`](https://huggingface.co/nateraw/resnet18-random), which is the model we just pushed to the Hub.
51
+
52
+ ```py
53
+ >>> model_reloaded = timm.create_model('hf_hub:nateraw/resnet18-random', pretrained=True)
54
+ ```
pytorch-image-models/hfdocs/source/index.mdx ADDED
@@ -0,0 +1,22 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # timm
2
+
3
+ <img class="float-left !m-0 !border-0 !dark:border-0 !shadow-none !max-w-lg w-[150px]" src="https://huggingface.co/front/thumbnails/docs/timm.png"/>
4
+
5
+ `timm` is a library containing SOTA computer vision models, layers, utilities, optimizers, schedulers, data-loaders, augmentations, and training/evaluation scripts.
6
+
7
+ It comes packaged with >700 pretrained models, and is designed to be flexible and easy to use.
8
+
9
+ Read the [quick start guide](quickstart) to get up and running with the `timm` library. You will learn how to load, discover, and use pretrained models included in the library.
10
+
11
+ <div class="mt-10">
12
+ <div class="w-full flex flex-col space-y-4 md:space-y-0 md:grid md:grid-cols-2 md:gap-y-4 md:gap-x-5">
13
+ <a class="!no-underline border dark:border-gray-700 p-5 rounded-lg shadow hover:shadow-lg" href="./feature_extraction"
14
+ ><div class="w-full text-center bg-gradient-to-br from-blue-400 to-blue-500 rounded-lg py-1.5 font-semibold mb-5 text-white text-lg leading-relaxed">Tutorials</div>
15
+ <p class="text-gray-700">Learn the basics and become familiar with timm. Start here if you are using timm for the first time!</p>
16
+ </a>
17
+ <a class="!no-underline border dark:border-gray-700 p-5 rounded-lg shadow hover:shadow-lg" href="./reference/models"
18
+ ><div class="w-full text-center bg-gradient-to-br from-purple-400 to-purple-500 rounded-lg py-1.5 font-semibold mb-5 text-white text-lg leading-relaxed">Reference</div>
19
+ <p class="text-gray-700">Technical descriptions of how timm classes and methods work.</p>
20
+ </a>
21
+ </div>
22
+ </div>
pytorch-image-models/hfdocs/source/installation.mdx ADDED
@@ -0,0 +1,74 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Installation
2
+
3
+ Before you start, you'll need to setup your environment and install the appropriate packages. `timm` is tested on **Python 3+**.
4
+
5
+ ## Virtual Environment
6
+
7
+ You should install `timm` in a [virtual environment](https://docs.python.org/3/library/venv.html) to keep things tidy and avoid dependency conflicts.
8
+
9
+ 1. Create and navigate to your project directory:
10
+
11
+ ```bash
12
+ mkdir ~/my-project
13
+ cd ~/my-project
14
+ ```
15
+
16
+ 2. Start a virtual environment inside your directory:
17
+
18
+ ```bash
19
+ python -m venv .env
20
+ ```
21
+
22
+ 3. Activate and deactivate the virtual environment with the following commands:
23
+
24
+ ```bash
25
+ # Activate the virtual environment
26
+ source .env/bin/activate
27
+
28
+ # Deactivate the virtual environment
29
+ source .env/bin/deactivate
30
+ ```
31
+
32
+ Once you've created your virtual environment, you can install `timm` in it.
33
+
34
+ ## Using pip
35
+
36
+ The most straightforward way to install `timm` is with pip:
37
+
38
+ ```bash
39
+ pip install timm
40
+ ```
41
+
42
+ Alternatively, you can install `timm` from GitHub directly to get the latest, bleeding-edge version:
43
+
44
+ ```bash
45
+ pip install git+https://github.com/rwightman/pytorch-image-models.git
46
+ ```
47
+
48
+ Run the following command to check if `timm` has been properly installed:
49
+
50
+ ```bash
51
+ python -c "from timm import list_models; print(list_models(pretrained=True)[:5])"
52
+ ```
53
+
54
+ This command lists the first five pretrained models available in `timm` (which are sorted alphebetically). You should see the following output:
55
+
56
+ ```python
57
+ ['adv_inception_v3', 'bat_resnext26ts', 'beit_base_patch16_224', 'beit_base_patch16_224_in22k', 'beit_base_patch16_384']
58
+ ```
59
+
60
+ ## From Source
61
+
62
+ Building `timm` from source lets you make changes to the code base. To install from the source, clone the repository and install with the following commands:
63
+
64
+ ```bash
65
+ git clone https://github.com/rwightman/pytorch-image-models.git
66
+ cd pytorch-image-models
67
+ pip install -e .
68
+ ```
69
+
70
+ Again, you can check if `timm` was properly installed with the following command:
71
+
72
+ ```bash
73
+ python -c "from timm import list_models; print(list_models(pretrained=True)[:5])"
74
+ ```
pytorch-image-models/hfdocs/source/models.mdx ADDED
@@ -0,0 +1,230 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Model Summaries
2
+
3
+ The model architectures included come from a wide variety of sources. Sources, including papers, original impl ("reference code") that I rewrote / adapted, and PyTorch impl that I leveraged directly ("code") are listed below.
4
+
5
+ Most included models have pretrained weights. The weights are either:
6
+
7
+ 1. from their original sources
8
+ 2. ported by myself from their original impl in a different framework (e.g. Tensorflow models)
9
+ 3. trained from scratch using the included training script
10
+
11
+ The validation results for the pretrained weights are [here](results)
12
+
13
+ A more exciting view (with pretty pictures) of the models within `timm` can be found at [paperswithcode](https://paperswithcode.com/lib/timm).
14
+
15
+ ## Big Transfer ResNetV2 (BiT)
16
+
17
+ * Implementation: [resnetv2.py](https://github.com/rwightman/pytorch-image-models/blob/master/timm/models/resnetv2.py)
18
+ * Paper: `Big Transfer (BiT): General Visual Representation Learning` - https://arxiv.org/abs/1912.11370
19
+ * Reference code: https://github.com/google-research/big_transfer
20
+
21
+ ## Cross-Stage Partial Networks
22
+
23
+ * Implementation: [cspnet.py](https://github.com/rwightman/pytorch-image-models/blob/master/timm/models/cspnet.py)
24
+ * Paper: `CSPNet: A New Backbone that can Enhance Learning Capability of CNN` - https://arxiv.org/abs/1911.11929
25
+ * Reference impl: https://github.com/WongKinYiu/CrossStagePartialNetworks
26
+
27
+ ## DenseNet
28
+
29
+ * Implementation: [densenet.py](https://github.com/rwightman/pytorch-image-models/blob/master/timm/models/densenet.py)
30
+ * Paper: `Densely Connected Convolutional Networks` - https://arxiv.org/abs/1608.06993
31
+ * Code: https://github.com/pytorch/vision/tree/master/torchvision/models
32
+
33
+ ## DLA
34
+
35
+ * Implementation: [dla.py](https://github.com/rwightman/pytorch-image-models/blob/master/timm/models/dla.py)
36
+ * Paper: https://arxiv.org/abs/1707.06484
37
+ * Code: https://github.com/ucbdrive/dla
38
+
39
+ ## Dual-Path Networks
40
+
41
+ * Implementation: [dpn.py](https://github.com/rwightman/pytorch-image-models/blob/master/timm/models/dpn.py)
42
+ * Paper: `Dual Path Networks` - https://arxiv.org/abs/1707.01629
43
+ * My PyTorch code: https://github.com/rwightman/pytorch-dpn-pretrained
44
+ * Reference code: https://github.com/cypw/DPNs
45
+
46
+ ## GPU-Efficient Networks
47
+
48
+ * Implementation: [byobnet.py](https://github.com/rwightman/pytorch-image-models/blob/master/timm/models/byobnet.py)
49
+ * Paper: `Neural Architecture Design for GPU-Efficient Networks` - https://arxiv.org/abs/2006.14090
50
+ * Reference code: https://github.com/idstcv/GPU-Efficient-Networks
51
+
52
+ ## HRNet
53
+
54
+ * Implementation: [hrnet.py](https://github.com/rwightman/pytorch-image-models/blob/master/timm/models/hrnet.py)
55
+ * Paper: `Deep High-Resolution Representation Learning for Visual Recognition` - https://arxiv.org/abs/1908.07919
56
+ * Code: https://github.com/HRNet/HRNet-Image-Classification
57
+
58
+ ## Inception-V3
59
+
60
+ * Implementation: [inception_v3.py](https://github.com/rwightman/pytorch-image-models/blob/master/timm/models/inception_v3.py)
61
+ * Paper: `Rethinking the Inception Architecture for Computer Vision` - https://arxiv.org/abs/1512.00567
62
+ * Code: https://github.com/pytorch/vision/tree/master/torchvision/models
63
+
64
+ ## Inception-V4
65
+
66
+ * Implementation: [inception_v4.py](https://github.com/rwightman/pytorch-image-models/blob/master/timm/models/inception_v4.py)
67
+ * Paper: `Inception-v4, Inception-ResNet and the Impact of Residual Connections on Learning` - https://arxiv.org/abs/1602.07261
68
+ * Code: https://github.com/Cadene/pretrained-models.pytorch
69
+ * Reference code: https://github.com/tensorflow/models/tree/master/research/slim/nets
70
+
71
+ ## Inception-ResNet-V2
72
+
73
+ * Implementation: [inception_resnet_v2.py](https://github.com/rwightman/pytorch-image-models/blob/master/timm/models/inception_resnet_v2.py)
74
+ * Paper: `Inception-v4, Inception-ResNet and the Impact of Residual Connections on Learning` - https://arxiv.org/abs/1602.07261
75
+ * Code: https://github.com/Cadene/pretrained-models.pytorch
76
+ * Reference code: https://github.com/tensorflow/models/tree/master/research/slim/nets
77
+
78
+ ## NASNet-A
79
+
80
+ * Implementation: [nasnet.py](https://github.com/rwightman/pytorch-image-models/blob/master/timm/models/nasnet.py)
81
+ * Papers: `Learning Transferable Architectures for Scalable Image Recognition` - https://arxiv.org/abs/1707.07012
82
+ * Code: https://github.com/Cadene/pretrained-models.pytorch
83
+ * Reference code: https://github.com/tensorflow/models/tree/master/research/slim/nets/nasnet
84
+
85
+ ## PNasNet-5
86
+
87
+ * Implementation: [pnasnet.py](https://github.com/rwightman/pytorch-image-models/blob/master/timm/models/pnasnet.py)
88
+ * Papers: `Progressive Neural Architecture Search` - https://arxiv.org/abs/1712.00559
89
+ * Code: https://github.com/Cadene/pretrained-models.pytorch
90
+ * Reference code: https://github.com/tensorflow/models/tree/master/research/slim/nets/nasnet
91
+
92
+ ## EfficientNet
93
+
94
+ * Implementation: [efficientnet.py](https://github.com/rwightman/pytorch-image-models/blob/master/timm/models/efficientnet.py)
95
+ * Papers:
96
+ * EfficientNet NoisyStudent (B0-B7, L2) - https://arxiv.org/abs/1911.04252
97
+ * EfficientNet AdvProp (B0-B8) - https://arxiv.org/abs/1911.09665
98
+ * EfficientNet (B0-B7) - https://arxiv.org/abs/1905.11946
99
+ * EfficientNet-EdgeTPU (S, M, L) - https://ai.googleblog.com/2019/08/efficientnet-edgetpu-creating.html
100
+ * MixNet - https://arxiv.org/abs/1907.09595
101
+ * MNASNet B1, A1 (Squeeze-Excite), and Small - https://arxiv.org/abs/1807.11626
102
+ * MobileNet-V2 - https://arxiv.org/abs/1801.04381
103
+ * FBNet-C - https://arxiv.org/abs/1812.03443
104
+ * Single-Path NAS - https://arxiv.org/abs/1904.02877
105
+ * My PyTorch code: https://github.com/rwightman/gen-efficientnet-pytorch
106
+ * Reference code: https://github.com/tensorflow/tpu/tree/master/models/official/efficientnet
107
+
108
+ ## MobileNet-V3
109
+
110
+ * Implementation: [mobilenetv3.py](https://github.com/rwightman/pytorch-image-models/blob/master/timm/models/mobilenetv3.py)
111
+ * Paper: `Searching for MobileNetV3` - https://arxiv.org/abs/1905.02244
112
+ * Reference code: https://github.com/tensorflow/models/tree/master/research/slim/nets/mobilenet
113
+
114
+ ## RegNet
115
+
116
+ * Implementation: [regnet.py](https://github.com/rwightman/pytorch-image-models/blob/master/timm/models/regnet.py)
117
+ * Paper: `Designing Network Design Spaces` - https://arxiv.org/abs/2003.13678
118
+ * Reference code: https://github.com/facebookresearch/pycls/blob/master/pycls/models/regnet.py
119
+
120
+ ## RepVGG
121
+
122
+ * Implementation: [byobnet.py](https://github.com/rwightman/pytorch-image-models/blob/master/timm/models/byobnet.py)
123
+ * Paper: `Making VGG-style ConvNets Great Again` - https://arxiv.org/abs/2101.03697
124
+ * Reference code: https://github.com/DingXiaoH/RepVGG
125
+
126
+ ## ResNet, ResNeXt
127
+
128
+ * Implementation: [resnet.py](https://github.com/rwightman/pytorch-image-models/blob/master/timm/models/resnet.py)
129
+
130
+ * ResNet (V1B)
131
+ * Paper: `Deep Residual Learning for Image Recognition` - https://arxiv.org/abs/1512.03385
132
+ * Code: https://github.com/pytorch/vision/tree/master/torchvision/models
133
+ * ResNeXt
134
+ * Paper: `Aggregated Residual Transformations for Deep Neural Networks` - https://arxiv.org/abs/1611.05431
135
+ * Code: https://github.com/pytorch/vision/tree/master/torchvision/models
136
+ * 'Bag of Tricks' / Gluon C, D, E, S ResNet variants
137
+ * Paper: `Bag of Tricks for Image Classification with CNNs` - https://arxiv.org/abs/1812.01187
138
+ * Code: https://github.com/dmlc/gluon-cv/blob/master/gluoncv/model_zoo/resnetv1b.py
139
+ * Instagram pretrained / ImageNet tuned ResNeXt101
140
+ * Paper: `Exploring the Limits of Weakly Supervised Pretraining` - https://arxiv.org/abs/1805.00932
141
+ * Weights: https://pytorch.org/hub/facebookresearch_WSL-Images_resnext (NOTE: CC BY-NC 4.0 License, NOT commercial friendly)
142
+ * Semi-supervised (SSL) / Semi-weakly Supervised (SWSL) ResNet and ResNeXts
143
+ * Paper: `Billion-scale semi-supervised learning for image classification` - https://arxiv.org/abs/1905.00546
144
+ * Weights: https://github.com/facebookresearch/semi-supervised-ImageNet1K-models (NOTE: CC BY-NC 4.0 License, NOT commercial friendly)
145
+ * Squeeze-and-Excitation Networks
146
+ * Paper: `Squeeze-and-Excitation Networks` - https://arxiv.org/abs/1709.01507
147
+ * Code: Added to ResNet base, this is current version going forward, old `senet.py` is being deprecated
148
+ * ECAResNet (ECA-Net)
149
+ * Paper: `ECA-Net: Efficient Channel Attention for Deep CNN` - https://arxiv.org/abs/1910.03151v4
150
+ * Code: Added to ResNet base, ECA module contributed by @VRandme, reference https://github.com/BangguWu/ECANet
151
+
152
+ ## Res2Net
153
+
154
+ * Implementation: [res2net.py](https://github.com/rwightman/pytorch-image-models/blob/master/timm/models/res2net.py)
155
+ * Paper: `Res2Net: A New Multi-scale Backbone Architecture` - https://arxiv.org/abs/1904.01169
156
+ * Code: https://github.com/gasvn/Res2Net
157
+
158
+ ## ResNeSt
159
+
160
+ * Implementation: [resnest.py](https://github.com/rwightman/pytorch-image-models/blob/master/timm/models/resnest.py)
161
+ * Paper: `ResNeSt: Split-Attention Networks` - https://arxiv.org/abs/2004.08955
162
+ * Code: https://github.com/zhanghang1989/ResNeSt
163
+
164
+ ## ReXNet
165
+
166
+ * Implementation: [rexnet.py](https://github.com/rwightman/pytorch-image-models/blob/master/timm/models/rexnet.py)
167
+ * Paper: `ReXNet: Diminishing Representational Bottleneck on CNN` - https://arxiv.org/abs/2007.00992
168
+ * Code: https://github.com/clovaai/rexnet
169
+
170
+ ## Selective-Kernel Networks
171
+
172
+ * Implementation: [sknet.py](https://github.com/rwightman/pytorch-image-models/blob/master/timm/models/sknet.py)
173
+ * Paper: `Selective-Kernel Networks` - https://arxiv.org/abs/1903.06586
174
+ * Code: https://github.com/implus/SKNet, https://github.com/clovaai/assembled-cnn
175
+
176
+ ## SelecSLS
177
+
178
+ * Implementation: [selecsls.py](https://github.com/rwightman/pytorch-image-models/blob/master/timm/models/selecsls.py)
179
+ * Paper: `XNect: Real-time Multi-Person 3D Motion Capture with a Single RGB Camera` - https://arxiv.org/abs/1907.00837
180
+ * Code: https://github.com/mehtadushy/SelecSLS-Pytorch
181
+
182
+ ## Squeeze-and-Excitation Networks
183
+
184
+ * Implementation: [senet.py](https://github.com/rwightman/pytorch-image-models/blob/master/timm/models/senet.py)
185
+ NOTE: I am deprecating this version of the networks, the new ones are part of `resnet.py`
186
+
187
+ * Paper: `Squeeze-and-Excitation Networks` - https://arxiv.org/abs/1709.01507
188
+ * Code: https://github.com/Cadene/pretrained-models.pytorch
189
+
190
+ ## TResNet
191
+
192
+ * Implementation: [tresnet.py](https://github.com/rwightman/pytorch-image-models/blob/master/timm/models/tresnet.py)
193
+ * Paper: `TResNet: High Performance GPU-Dedicated Architecture` - https://arxiv.org/abs/2003.13630
194
+ * Code: https://github.com/mrT23/TResNet
195
+
196
+ ## VGG
197
+
198
+ * Implementation: [vgg.py](https://github.com/rwightman/pytorch-image-models/blob/master/timm/models/vgg.py)
199
+ * Paper: `Very Deep Convolutional Networks For Large-Scale Image Recognition` - https://arxiv.org/pdf/1409.1556.pdf
200
+ * Reference code: https://github.com/pytorch/vision/blob/master/torchvision/models/vgg.py
201
+
202
+ ## Vision Transformer
203
+
204
+ * Implementation: [vision_transformer.py](https://github.com/rwightman/pytorch-image-models/blob/master/timm/models/vision_transformer.py)
205
+ * Paper: `An Image is Worth 16x16 Words: Transformers for Image Recognition at Scale` - https://arxiv.org/abs/2010.11929
206
+ * Reference code and pretrained weights: https://github.com/google-research/vision_transformer
207
+
208
+ ## VovNet V2 and V1
209
+
210
+ * Implementation: [vovnet.py](https://github.com/rwightman/pytorch-image-models/blob/master/timm/models/vovnet.py)
211
+ * Paper: `CenterMask : Real-Time Anchor-Free Instance Segmentation` - https://arxiv.org/abs/1911.06667
212
+ * Reference code: https://github.com/youngwanLEE/vovnet-detectron2
213
+
214
+ ## Xception
215
+
216
+ * Implementation: [xception.py](https://github.com/rwightman/pytorch-image-models/blob/master/timm/models/xception.py)
217
+ * Paper: `Xception: Deep Learning with Depthwise Separable Convolutions` - https://arxiv.org/abs/1610.02357
218
+ * Code: https://github.com/Cadene/pretrained-models.pytorch
219
+
220
+ ## Xception (Modified Aligned, Gluon)
221
+
222
+ * Implementation: [gluon_xception.py](https://github.com/rwightman/pytorch-image-models/blob/master/timm/models/gluon_xception.py)
223
+ * Paper: `Encoder-Decoder with Atrous Separable Convolution for Semantic Image Segmentation` - https://arxiv.org/abs/1802.02611
224
+ * Reference code: https://github.com/dmlc/gluon-cv/tree/master/gluoncv/model_zoo, https://github.com/jfzhang95/pytorch-deeplab-xception/
225
+
226
+ ## Xception (Modified Aligned, TF)
227
+
228
+ * Implementation: [aligned_xception.py](https://github.com/rwightman/pytorch-image-models/blob/master/timm/models/aligned_xception.py)
229
+ * Paper: `Encoder-Decoder with Atrous Separable Convolution for Semantic Image Segmentation` - https://arxiv.org/abs/1802.02611
230
+ * Reference code: https://github.com/tensorflow/models/tree/master/research/deeplab
pytorch-image-models/hfdocs/source/models/adversarial-inception-v3.mdx ADDED
@@ -0,0 +1,165 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Adversarial Inception v3
2
+
3
+ **Inception v3** is a convolutional neural network architecture from the Inception family that makes several improvements including using [Label Smoothing](https://paperswithcode.com/method/label-smoothing), Factorized 7 x 7 convolutions, and the use of an [auxiliary classifer](https://paperswithcode.com/method/auxiliary-classifier) to propagate label information lower down the network (along with the use of batch normalization for layers in the sidehead). The key building block is an [Inception Module](https://paperswithcode.com/method/inception-v3-module).
4
+
5
+ This particular model was trained for study of adversarial examples (adversarial training).
6
+
7
+ The weights from this model were ported from [Tensorflow/Models](https://github.com/tensorflow/models).
8
+
9
+ ## How do I use this model on an image?
10
+
11
+ To load a pretrained model:
12
+
13
+ ```py
14
+ >>> import timm
15
+ >>> model = timm.create_model('adv_inception_v3', pretrained=True)
16
+ >>> model.eval()
17
+ ```
18
+
19
+ To load and preprocess the image:
20
+
21
+ ```py
22
+ >>> import urllib
23
+ >>> from PIL import Image
24
+ >>> from timm.data import resolve_data_config
25
+ >>> from timm.data.transforms_factory import create_transform
26
+
27
+ >>> config = resolve_data_config({}, model=model)
28
+ >>> transform = create_transform(**config)
29
+
30
+ >>> url, filename = ("https://github.com/pytorch/hub/raw/master/images/dog.jpg", "dog.jpg")
31
+ >>> urllib.request.urlretrieve(url, filename)
32
+ >>> img = Image.open(filename).convert('RGB')
33
+ >>> tensor = transform(img).unsqueeze(0) # transform and add batch dimension
34
+ ```
35
+
36
+ To get the model predictions:
37
+
38
+ ```py
39
+ >>> import torch
40
+ >>> with torch.no_grad():
41
+ ... out = model(tensor)
42
+ >>> probabilities = torch.nn.functional.softmax(out[0], dim=0)
43
+ >>> print(probabilities.shape)
44
+ >>> # prints: torch.Size([1000])
45
+ ```
46
+
47
+ To get the top-5 predictions class names:
48
+
49
+ ```py
50
+ >>> # Get imagenet class mappings
51
+ >>> url, filename = ("https://raw.githubusercontent.com/pytorch/hub/master/imagenet_classes.txt", "imagenet_classes.txt")
52
+ >>> urllib.request.urlretrieve(url, filename)
53
+ >>> with open("imagenet_classes.txt", "r") as f:
54
+ ... categories = [s.strip() for s in f.readlines()]
55
+
56
+ >>> # Print top categories per image
57
+ >>> top5_prob, top5_catid = torch.topk(probabilities, 5)
58
+ >>> for i in range(top5_prob.size(0)):
59
+ ... print(categories[top5_catid[i]], top5_prob[i].item())
60
+ >>> # prints class names and probabilities like:
61
+ >>> # [('Samoyed', 0.6425196528434753), ('Pomeranian', 0.04062102362513542), ('keeshond', 0.03186424449086189), ('white wolf', 0.01739676296710968), ('Eskimo dog', 0.011717947199940681)]
62
+ ```
63
+
64
+ Replace the model name with the variant you want to use, e.g. `adv_inception_v3`. You can find the IDs in the model summaries at the top of this page.
65
+
66
+ To extract image features with this model, follow the [timm feature extraction examples](../feature_extraction), just change the name of the model you want to use.
67
+
68
+ ## How do I finetune this model?
69
+
70
+ You can finetune any of the pre-trained models just by changing the classifier (the last layer).
71
+
72
+ ```py
73
+ >>> model = timm.create_model('adv_inception_v3', pretrained=True, num_classes=NUM_FINETUNE_CLASSES)
74
+ ```
75
+ To finetune on your own dataset, you have to write a training loop or adapt [timm's training
76
+ script](https://github.com/rwightman/pytorch-image-models/blob/master/train.py) to use your dataset.
77
+
78
+ ## How do I train this model?
79
+
80
+ You can follow the [timm recipe scripts](../scripts) for training a new model afresh.
81
+
82
+ ## Citation
83
+
84
+ ```BibTeX
85
+ @article{DBLP:journals/corr/abs-1804-00097,
86
+ author = {Alexey Kurakin and
87
+ Ian J. Goodfellow and
88
+ Samy Bengio and
89
+ Yinpeng Dong and
90
+ Fangzhou Liao and
91
+ Ming Liang and
92
+ Tianyu Pang and
93
+ Jun Zhu and
94
+ Xiaolin Hu and
95
+ Cihang Xie and
96
+ Jianyu Wang and
97
+ Zhishuai Zhang and
98
+ Zhou Ren and
99
+ Alan L. Yuille and
100
+ Sangxia Huang and
101
+ Yao Zhao and
102
+ Yuzhe Zhao and
103
+ Zhonglin Han and
104
+ Junjiajia Long and
105
+ Yerkebulan Berdibekov and
106
+ Takuya Akiba and
107
+ Seiya Tokui and
108
+ Motoki Abe},
109
+ title = {Adversarial Attacks and Defences Competition},
110
+ journal = {CoRR},
111
+ volume = {abs/1804.00097},
112
+ year = {2018},
113
+ url = {http://arxiv.org/abs/1804.00097},
114
+ archivePrefix = {arXiv},
115
+ eprint = {1804.00097},
116
+ timestamp = {Thu, 31 Oct 2019 16:31:22 +0100},
117
+ biburl = {https://dblp.org/rec/journals/corr/abs-1804-00097.bib},
118
+ bibsource = {dblp computer science bibliography, https://dblp.org}
119
+ }
120
+ ```
121
+
122
+ <!--
123
+ Type: model-index
124
+ Collections:
125
+ - Name: Adversarial Inception v3
126
+ Paper:
127
+ Title: Adversarial Attacks and Defences Competition
128
+ URL: https://paperswithcode.com/paper/adversarial-attacks-and-defences-competition
129
+ Models:
130
+ - Name: adv_inception_v3
131
+ In Collection: Adversarial Inception v3
132
+ Metadata:
133
+ FLOPs: 7352418880
134
+ Parameters: 23830000
135
+ File Size: 95549439
136
+ Architecture:
137
+ - 1x1 Convolution
138
+ - Auxiliary Classifier
139
+ - Average Pooling
140
+ - Average Pooling
141
+ - Batch Normalization
142
+ - Convolution
143
+ - Dense Connections
144
+ - Dropout
145
+ - Inception-v3 Module
146
+ - Max Pooling
147
+ - ReLU
148
+ - Softmax
149
+ Tasks:
150
+ - Image Classification
151
+ Training Data:
152
+ - ImageNet
153
+ ID: adv_inception_v3
154
+ Crop Pct: '0.875'
155
+ Image Size: '299'
156
+ Interpolation: bicubic
157
+ Code: https://github.com/rwightman/pytorch-image-models/blob/d8e69206be253892b2956341fea09fdebfaae4e3/timm/models/inception_v3.py#L456
158
+ Weights: https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/adv_inception_v3-9e27bd63.pth
159
+ Results:
160
+ - Task: Image Classification
161
+ Dataset: ImageNet
162
+ Metrics:
163
+ Top 1 Accuracy: 77.58%
164
+ Top 5 Accuracy: 93.74%
165
+ -->
pytorch-image-models/hfdocs/source/models/advprop.mdx ADDED
@@ -0,0 +1,524 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # AdvProp (EfficientNet)
2
+
3
+ **AdvProp** is an adversarial training scheme which treats adversarial examples as additional examples, to prevent overfitting. Key to the method is the usage of a separate auxiliary batch norm for adversarial examples, as they have different underlying distributions to normal examples.
4
+
5
+ The weights from this model were ported from [Tensorflow/TPU](https://github.com/tensorflow/tpu).
6
+
7
+ ## How do I use this model on an image?
8
+
9
+ To load a pretrained model:
10
+
11
+ ```py
12
+ >>> import timm
13
+ >>> model = timm.create_model('tf_efficientnet_b0_ap', pretrained=True)
14
+ >>> model.eval()
15
+ ```
16
+
17
+ To load and preprocess the image:
18
+
19
+ ```py
20
+ >>> import urllib
21
+ >>> from PIL import Image
22
+ >>> from timm.data import resolve_data_config
23
+ >>> from timm.data.transforms_factory import create_transform
24
+
25
+ >>> config = resolve_data_config({}, model=model)
26
+ >>> transform = create_transform(**config)
27
+
28
+ >>> url, filename = ("https://github.com/pytorch/hub/raw/master/images/dog.jpg", "dog.jpg")
29
+ >>> urllib.request.urlretrieve(url, filename)
30
+ >>> img = Image.open(filename).convert('RGB')
31
+ >>> tensor = transform(img).unsqueeze(0) # transform and add batch dimension
32
+ ```
33
+
34
+ To get the model predictions:
35
+
36
+ ```py
37
+ >>> import torch
38
+ >>> with torch.no_grad():
39
+ ... out = model(tensor)
40
+ >>> probabilities = torch.nn.functional.softmax(out[0], dim=0)
41
+ >>> print(probabilities.shape)
42
+ >>> # prints: torch.Size([1000])
43
+ ```
44
+
45
+ To get the top-5 predictions class names:
46
+
47
+ ```py
48
+ >>> # Get imagenet class mappings
49
+ >>> url, filename = ("https://raw.githubusercontent.com/pytorch/hub/master/imagenet_classes.txt", "imagenet_classes.txt")
50
+ >>> urllib.request.urlretrieve(url, filename)
51
+ >>> with open("imagenet_classes.txt", "r") as f:
52
+ ... categories = [s.strip() for s in f.readlines()]
53
+
54
+ >>> # Print top categories per image
55
+ >>> top5_prob, top5_catid = torch.topk(probabilities, 5)
56
+ >>> for i in range(top5_prob.size(0)):
57
+ ... print(categories[top5_catid[i]], top5_prob[i].item())
58
+ >>> # prints class names and probabilities like:
59
+ >>> # [('Samoyed', 0.6425196528434753), ('Pomeranian', 0.04062102362513542), ('keeshond', 0.03186424449086189), ('white wolf', 0.01739676296710968), ('Eskimo dog', 0.011717947199940681)]
60
+ ```
61
+
62
+ Replace the model name with the variant you want to use, e.g. `tf_efficientnet_b0_ap`. You can find the IDs in the model summaries at the top of this page.
63
+
64
+ To extract image features with this model, follow the [timm feature extraction examples](../feature_extraction), just change the name of the model you want to use.
65
+
66
+ ## How do I finetune this model?
67
+
68
+ You can finetune any of the pre-trained models just by changing the classifier (the last layer).
69
+
70
+ ```py
71
+ >>> model = timm.create_model('tf_efficientnet_b0_ap', pretrained=True, num_classes=NUM_FINETUNE_CLASSES)
72
+ ```
73
+ To finetune on your own dataset, you have to write a training loop or adapt [timm's training
74
+ script](https://github.com/rwightman/pytorch-image-models/blob/master/train.py) to use your dataset.
75
+
76
+ ## How do I train this model?
77
+
78
+ You can follow the [timm recipe scripts](../scripts) for training a new model afresh.
79
+
80
+ ## Citation
81
+
82
+ ```BibTeX
83
+ @misc{xie2020adversarial,
84
+ title={Adversarial Examples Improve Image Recognition},
85
+ author={Cihang Xie and Mingxing Tan and Boqing Gong and Jiang Wang and Alan Yuille and Quoc V. Le},
86
+ year={2020},
87
+ eprint={1911.09665},
88
+ archivePrefix={arXiv},
89
+ primaryClass={cs.CV}
90
+ }
91
+ ```
92
+
93
+ <!--
94
+ Type: model-index
95
+ Collections:
96
+ - Name: AdvProp
97
+ Paper:
98
+ Title: Adversarial Examples Improve Image Recognition
99
+ URL: https://paperswithcode.com/paper/adversarial-examples-improve-image
100
+ Models:
101
+ - Name: tf_efficientnet_b0_ap
102
+ In Collection: AdvProp
103
+ Metadata:
104
+ FLOPs: 488688572
105
+ Parameters: 5290000
106
+ File Size: 21385973
107
+ Architecture:
108
+ - 1x1 Convolution
109
+ - Average Pooling
110
+ - Batch Normalization
111
+ - Convolution
112
+ - Dense Connections
113
+ - Dropout
114
+ - Inverted Residual Block
115
+ - Squeeze-and-Excitation Block
116
+ - Swish
117
+ Tasks:
118
+ - Image Classification
119
+ Training Techniques:
120
+ - AdvProp
121
+ - AutoAugment
122
+ - Label Smoothing
123
+ - RMSProp
124
+ - Stochastic Depth
125
+ - Weight Decay
126
+ Training Data:
127
+ - ImageNet
128
+ ID: tf_efficientnet_b0_ap
129
+ LR: 0.256
130
+ Epochs: 350
131
+ Crop Pct: '0.875'
132
+ Momentum: 0.9
133
+ Batch Size: 2048
134
+ Image Size: '224'
135
+ Weight Decay: 1.0e-05
136
+ Interpolation: bicubic
137
+ RMSProp Decay: 0.9
138
+ Label Smoothing: 0.1
139
+ BatchNorm Momentum: 0.99
140
+ Code: https://github.com/rwightman/pytorch-image-models/blob/9a25fdf3ad0414b4d66da443fe60ae0aa14edc84/timm/models/efficientnet.py#L1334
141
+ Weights: https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/tf_efficientnet_b0_ap-f262efe1.pth
142
+ Results:
143
+ - Task: Image Classification
144
+ Dataset: ImageNet
145
+ Metrics:
146
+ Top 1 Accuracy: 77.1%
147
+ Top 5 Accuracy: 93.26%
148
+ - Name: tf_efficientnet_b1_ap
149
+ In Collection: AdvProp
150
+ Metadata:
151
+ FLOPs: 883633200
152
+ Parameters: 7790000
153
+ File Size: 31515350
154
+ Architecture:
155
+ - 1x1 Convolution
156
+ - Average Pooling
157
+ - Batch Normalization
158
+ - Convolution
159
+ - Dense Connections
160
+ - Dropout
161
+ - Inverted Residual Block
162
+ - Squeeze-and-Excitation Block
163
+ - Swish
164
+ Tasks:
165
+ - Image Classification
166
+ Training Techniques:
167
+ - AdvProp
168
+ - AutoAugment
169
+ - Label Smoothing
170
+ - RMSProp
171
+ - Stochastic Depth
172
+ - Weight Decay
173
+ Training Data:
174
+ - ImageNet
175
+ ID: tf_efficientnet_b1_ap
176
+ LR: 0.256
177
+ Epochs: 350
178
+ Crop Pct: '0.882'
179
+ Momentum: 0.9
180
+ Batch Size: 2048
181
+ Image Size: '240'
182
+ Weight Decay: 1.0e-05
183
+ Interpolation: bicubic
184
+ RMSProp Decay: 0.9
185
+ Label Smoothing: 0.1
186
+ BatchNorm Momentum: 0.99
187
+ Code: https://github.com/rwightman/pytorch-image-models/blob/9a25fdf3ad0414b4d66da443fe60ae0aa14edc84/timm/models/efficientnet.py#L1344
188
+ Weights: https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/tf_efficientnet_b1_ap-44ef0a3d.pth
189
+ Results:
190
+ - Task: Image Classification
191
+ Dataset: ImageNet
192
+ Metrics:
193
+ Top 1 Accuracy: 79.28%
194
+ Top 5 Accuracy: 94.3%
195
+ - Name: tf_efficientnet_b2_ap
196
+ In Collection: AdvProp
197
+ Metadata:
198
+ FLOPs: 1234321170
199
+ Parameters: 9110000
200
+ File Size: 36800745
201
+ Architecture:
202
+ - 1x1 Convolution
203
+ - Average Pooling
204
+ - Batch Normalization
205
+ - Convolution
206
+ - Dense Connections
207
+ - Dropout
208
+ - Inverted Residual Block
209
+ - Squeeze-and-Excitation Block
210
+ - Swish
211
+ Tasks:
212
+ - Image Classification
213
+ Training Techniques:
214
+ - AdvProp
215
+ - AutoAugment
216
+ - Label Smoothing
217
+ - RMSProp
218
+ - Stochastic Depth
219
+ - Weight Decay
220
+ Training Data:
221
+ - ImageNet
222
+ ID: tf_efficientnet_b2_ap
223
+ LR: 0.256
224
+ Epochs: 350
225
+ Crop Pct: '0.89'
226
+ Momentum: 0.9
227
+ Batch Size: 2048
228
+ Image Size: '260'
229
+ Weight Decay: 1.0e-05
230
+ Interpolation: bicubic
231
+ RMSProp Decay: 0.9
232
+ Label Smoothing: 0.1
233
+ BatchNorm Momentum: 0.99
234
+ Code: https://github.com/rwightman/pytorch-image-models/blob/9a25fdf3ad0414b4d66da443fe60ae0aa14edc84/timm/models/efficientnet.py#L1354
235
+ Weights: https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/tf_efficientnet_b2_ap-2f8e7636.pth
236
+ Results:
237
+ - Task: Image Classification
238
+ Dataset: ImageNet
239
+ Metrics:
240
+ Top 1 Accuracy: 80.3%
241
+ Top 5 Accuracy: 95.03%
242
+ - Name: tf_efficientnet_b3_ap
243
+ In Collection: AdvProp
244
+ Metadata:
245
+ FLOPs: 2275247568
246
+ Parameters: 12230000
247
+ File Size: 49384538
248
+ Architecture:
249
+ - 1x1 Convolution
250
+ - Average Pooling
251
+ - Batch Normalization
252
+ - Convolution
253
+ - Dense Connections
254
+ - Dropout
255
+ - Inverted Residual Block
256
+ - Squeeze-and-Excitation Block
257
+ - Swish
258
+ Tasks:
259
+ - Image Classification
260
+ Training Techniques:
261
+ - AdvProp
262
+ - AutoAugment
263
+ - Label Smoothing
264
+ - RMSProp
265
+ - Stochastic Depth
266
+ - Weight Decay
267
+ Training Data:
268
+ - ImageNet
269
+ ID: tf_efficientnet_b3_ap
270
+ LR: 0.256
271
+ Epochs: 350
272
+ Crop Pct: '0.904'
273
+ Momentum: 0.9
274
+ Batch Size: 2048
275
+ Image Size: '300'
276
+ Weight Decay: 1.0e-05
277
+ Interpolation: bicubic
278
+ RMSProp Decay: 0.9
279
+ Label Smoothing: 0.1
280
+ BatchNorm Momentum: 0.99
281
+ Code: https://github.com/rwightman/pytorch-image-models/blob/9a25fdf3ad0414b4d66da443fe60ae0aa14edc84/timm/models/efficientnet.py#L1364
282
+ Weights: https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/tf_efficientnet_b3_ap-aad25bdd.pth
283
+ Results:
284
+ - Task: Image Classification
285
+ Dataset: ImageNet
286
+ Metrics:
287
+ Top 1 Accuracy: 81.82%
288
+ Top 5 Accuracy: 95.62%
289
+ - Name: tf_efficientnet_b4_ap
290
+ In Collection: AdvProp
291
+ Metadata:
292
+ FLOPs: 5749638672
293
+ Parameters: 19340000
294
+ File Size: 77993585
295
+ Architecture:
296
+ - 1x1 Convolution
297
+ - Average Pooling
298
+ - Batch Normalization
299
+ - Convolution
300
+ - Dense Connections
301
+ - Dropout
302
+ - Inverted Residual Block
303
+ - Squeeze-and-Excitation Block
304
+ - Swish
305
+ Tasks:
306
+ - Image Classification
307
+ Training Techniques:
308
+ - AdvProp
309
+ - AutoAugment
310
+ - Label Smoothing
311
+ - RMSProp
312
+ - Stochastic Depth
313
+ - Weight Decay
314
+ Training Data:
315
+ - ImageNet
316
+ ID: tf_efficientnet_b4_ap
317
+ LR: 0.256
318
+ Epochs: 350
319
+ Crop Pct: '0.922'
320
+ Momentum: 0.9
321
+ Batch Size: 2048
322
+ Image Size: '380'
323
+ Weight Decay: 1.0e-05
324
+ Interpolation: bicubic
325
+ RMSProp Decay: 0.9
326
+ Label Smoothing: 0.1
327
+ BatchNorm Momentum: 0.99
328
+ Code: https://github.com/rwightman/pytorch-image-models/blob/9a25fdf3ad0414b4d66da443fe60ae0aa14edc84/timm/models/efficientnet.py#L1374
329
+ Weights: https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/tf_efficientnet_b4_ap-dedb23e6.pth
330
+ Results:
331
+ - Task: Image Classification
332
+ Dataset: ImageNet
333
+ Metrics:
334
+ Top 1 Accuracy: 83.26%
335
+ Top 5 Accuracy: 96.39%
336
+ - Name: tf_efficientnet_b5_ap
337
+ In Collection: AdvProp
338
+ Metadata:
339
+ FLOPs: 13176501888
340
+ Parameters: 30390000
341
+ File Size: 122403150
342
+ Architecture:
343
+ - 1x1 Convolution
344
+ - Average Pooling
345
+ - Batch Normalization
346
+ - Convolution
347
+ - Dense Connections
348
+ - Dropout
349
+ - Inverted Residual Block
350
+ - Squeeze-and-Excitation Block
351
+ - Swish
352
+ Tasks:
353
+ - Image Classification
354
+ Training Techniques:
355
+ - AdvProp
356
+ - AutoAugment
357
+ - Label Smoothing
358
+ - RMSProp
359
+ - Stochastic Depth
360
+ - Weight Decay
361
+ Training Data:
362
+ - ImageNet
363
+ ID: tf_efficientnet_b5_ap
364
+ LR: 0.256
365
+ Epochs: 350
366
+ Crop Pct: '0.934'
367
+ Momentum: 0.9
368
+ Batch Size: 2048
369
+ Image Size: '456'
370
+ Weight Decay: 1.0e-05
371
+ Interpolation: bicubic
372
+ RMSProp Decay: 0.9
373
+ Label Smoothing: 0.1
374
+ BatchNorm Momentum: 0.99
375
+ Code: https://github.com/rwightman/pytorch-image-models/blob/9a25fdf3ad0414b4d66da443fe60ae0aa14edc84/timm/models/efficientnet.py#L1384
376
+ Weights: https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/tf_efficientnet_b5_ap-9e82fae8.pth
377
+ Results:
378
+ - Task: Image Classification
379
+ Dataset: ImageNet
380
+ Metrics:
381
+ Top 1 Accuracy: 84.25%
382
+ Top 5 Accuracy: 96.97%
383
+ - Name: tf_efficientnet_b6_ap
384
+ In Collection: AdvProp
385
+ Metadata:
386
+ FLOPs: 24180518488
387
+ Parameters: 43040000
388
+ File Size: 173237466
389
+ Architecture:
390
+ - 1x1 Convolution
391
+ - Average Pooling
392
+ - Batch Normalization
393
+ - Convolution
394
+ - Dense Connections
395
+ - Dropout
396
+ - Inverted Residual Block
397
+ - Squeeze-and-Excitation Block
398
+ - Swish
399
+ Tasks:
400
+ - Image Classification
401
+ Training Techniques:
402
+ - AdvProp
403
+ - AutoAugment
404
+ - Label Smoothing
405
+ - RMSProp
406
+ - Stochastic Depth
407
+ - Weight Decay
408
+ Training Data:
409
+ - ImageNet
410
+ ID: tf_efficientnet_b6_ap
411
+ LR: 0.256
412
+ Epochs: 350
413
+ Crop Pct: '0.942'
414
+ Momentum: 0.9
415
+ Batch Size: 2048
416
+ Image Size: '528'
417
+ Weight Decay: 1.0e-05
418
+ Interpolation: bicubic
419
+ RMSProp Decay: 0.9
420
+ Label Smoothing: 0.1
421
+ BatchNorm Momentum: 0.99
422
+ Code: https://github.com/rwightman/pytorch-image-models/blob/9a25fdf3ad0414b4d66da443fe60ae0aa14edc84/timm/models/efficientnet.py#L1394
423
+ Weights: https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/tf_efficientnet_b6_ap-4ffb161f.pth
424
+ Results:
425
+ - Task: Image Classification
426
+ Dataset: ImageNet
427
+ Metrics:
428
+ Top 1 Accuracy: 84.79%
429
+ Top 5 Accuracy: 97.14%
430
+ - Name: tf_efficientnet_b7_ap
431
+ In Collection: AdvProp
432
+ Metadata:
433
+ FLOPs: 48205304880
434
+ Parameters: 66349999
435
+ File Size: 266850607
436
+ Architecture:
437
+ - 1x1 Convolution
438
+ - Average Pooling
439
+ - Batch Normalization
440
+ - Convolution
441
+ - Dense Connections
442
+ - Dropout
443
+ - Inverted Residual Block
444
+ - Squeeze-and-Excitation Block
445
+ - Swish
446
+ Tasks:
447
+ - Image Classification
448
+ Training Techniques:
449
+ - AdvProp
450
+ - AutoAugment
451
+ - Label Smoothing
452
+ - RMSProp
453
+ - Stochastic Depth
454
+ - Weight Decay
455
+ Training Data:
456
+ - ImageNet
457
+ ID: tf_efficientnet_b7_ap
458
+ LR: 0.256
459
+ Epochs: 350
460
+ Crop Pct: '0.949'
461
+ Momentum: 0.9
462
+ Batch Size: 2048
463
+ Image Size: '600'
464
+ Weight Decay: 1.0e-05
465
+ Interpolation: bicubic
466
+ RMSProp Decay: 0.9
467
+ Label Smoothing: 0.1
468
+ BatchNorm Momentum: 0.99
469
+ Code: https://github.com/rwightman/pytorch-image-models/blob/9a25fdf3ad0414b4d66da443fe60ae0aa14edc84/timm/models/efficientnet.py#L1405
470
+ Weights: https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/tf_efficientnet_b7_ap-ddb28fec.pth
471
+ Results:
472
+ - Task: Image Classification
473
+ Dataset: ImageNet
474
+ Metrics:
475
+ Top 1 Accuracy: 85.12%
476
+ Top 5 Accuracy: 97.25%
477
+ - Name: tf_efficientnet_b8_ap
478
+ In Collection: AdvProp
479
+ Metadata:
480
+ FLOPs: 80962956270
481
+ Parameters: 87410000
482
+ File Size: 351412563
483
+ Architecture:
484
+ - 1x1 Convolution
485
+ - Average Pooling
486
+ - Batch Normalization
487
+ - Convolution
488
+ - Dense Connections
489
+ - Dropout
490
+ - Inverted Residual Block
491
+ - Squeeze-and-Excitation Block
492
+ - Swish
493
+ Tasks:
494
+ - Image Classification
495
+ Training Techniques:
496
+ - AdvProp
497
+ - AutoAugment
498
+ - Label Smoothing
499
+ - RMSProp
500
+ - Stochastic Depth
501
+ - Weight Decay
502
+ Training Data:
503
+ - ImageNet
504
+ ID: tf_efficientnet_b8_ap
505
+ LR: 0.128
506
+ Epochs: 350
507
+ Crop Pct: '0.954'
508
+ Momentum: 0.9
509
+ Batch Size: 2048
510
+ Image Size: '672'
511
+ Weight Decay: 1.0e-05
512
+ Interpolation: bicubic
513
+ RMSProp Decay: 0.9
514
+ Label Smoothing: 0.1
515
+ BatchNorm Momentum: 0.99
516
+ Code: https://github.com/rwightman/pytorch-image-models/blob/9a25fdf3ad0414b4d66da443fe60ae0aa14edc84/timm/models/efficientnet.py#L1416
517
+ Weights: https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/tf_efficientnet_b8_ap-00e169fa.pth
518
+ Results:
519
+ - Task: Image Classification
520
+ Dataset: ImageNet
521
+ Metrics:
522
+ Top 1 Accuracy: 85.37%
523
+ Top 5 Accuracy: 97.3%
524
+ -->
pytorch-image-models/hfdocs/source/models/big-transfer.mdx ADDED
@@ -0,0 +1,362 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Big Transfer (BiT)
2
+
3
+ **Big Transfer (BiT)** is a type of pretraining recipe that pre-trains on a large supervised source dataset, and fine-tunes the weights on the target task. Models are trained on the JFT-300M dataset. The finetuned models contained in this collection are finetuned on ImageNet.
4
+
5
+ ## How do I use this model on an image?
6
+
7
+ To load a pretrained model:
8
+
9
+ ```py
10
+ >>> import timm
11
+ >>> model = timm.create_model('resnetv2_101x1_bitm', pretrained=True)
12
+ >>> model.eval()
13
+ ```
14
+
15
+ To load and preprocess the image:
16
+
17
+ ```py
18
+ >>> import urllib
19
+ >>> from PIL import Image
20
+ >>> from timm.data import resolve_data_config
21
+ >>> from timm.data.transforms_factory import create_transform
22
+
23
+ >>> config = resolve_data_config({}, model=model)
24
+ >>> transform = create_transform(**config)
25
+
26
+ >>> url, filename = ("https://github.com/pytorch/hub/raw/master/images/dog.jpg", "dog.jpg")
27
+ >>> urllib.request.urlretrieve(url, filename)
28
+ >>> img = Image.open(filename).convert('RGB')
29
+ >>> tensor = transform(img).unsqueeze(0) # transform and add batch dimension
30
+ ```
31
+
32
+ To get the model predictions:
33
+
34
+ ```py
35
+ >>> import torch
36
+ >>> with torch.no_grad():
37
+ ... out = model(tensor)
38
+ >>> probabilities = torch.nn.functional.softmax(out[0], dim=0)
39
+ >>> print(probabilities.shape)
40
+ >>> # prints: torch.Size([1000])
41
+ ```
42
+
43
+ To get the top-5 predictions class names:
44
+
45
+ ```py
46
+ >>> # Get imagenet class mappings
47
+ >>> url, filename = ("https://raw.githubusercontent.com/pytorch/hub/master/imagenet_classes.txt", "imagenet_classes.txt")
48
+ >>> urllib.request.urlretrieve(url, filename)
49
+ >>> with open("imagenet_classes.txt", "r") as f:
50
+ ... categories = [s.strip() for s in f.readlines()]
51
+
52
+ >>> # Print top categories per image
53
+ >>> top5_prob, top5_catid = torch.topk(probabilities, 5)
54
+ >>> for i in range(top5_prob.size(0)):
55
+ ... print(categories[top5_catid[i]], top5_prob[i].item())
56
+ >>> # prints class names and probabilities like:
57
+ >>> # [('Samoyed', 0.6425196528434753), ('Pomeranian', 0.04062102362513542), ('keeshond', 0.03186424449086189), ('white wolf', 0.01739676296710968), ('Eskimo dog', 0.011717947199940681)]
58
+ ```
59
+
60
+ Replace the model name with the variant you want to use, e.g. `resnetv2_101x1_bitm`. You can find the IDs in the model summaries at the top of this page.
61
+
62
+ To extract image features with this model, follow the [timm feature extraction examples](../feature_extraction), just change the name of the model you want to use.
63
+
64
+ ## How do I finetune this model?
65
+
66
+ You can finetune any of the pre-trained models just by changing the classifier (the last layer).
67
+
68
+ ```py
69
+ >>> model = timm.create_model('resnetv2_101x1_bitm', pretrained=True, num_classes=NUM_FINETUNE_CLASSES)
70
+ ```
71
+ To finetune on your own dataset, you have to write a training loop or adapt [timm's training
72
+ script](https://github.com/rwightman/pytorch-image-models/blob/master/train.py) to use your dataset.
73
+
74
+ ## How do I train this model?
75
+
76
+ You can follow the [timm recipe scripts](../scripts) for training a new model afresh.
77
+
78
+ ## Citation
79
+
80
+ ```BibTeX
81
+ @misc{kolesnikov2020big,
82
+ title={Big Transfer (BiT): General Visual Representation Learning},
83
+ author={Alexander Kolesnikov and Lucas Beyer and Xiaohua Zhai and Joan Puigcerver and Jessica Yung and Sylvain Gelly and Neil Houlsby},
84
+ year={2020},
85
+ eprint={1912.11370},
86
+ archivePrefix={arXiv},
87
+ primaryClass={cs.CV}
88
+ }
89
+ ```
90
+
91
+ <!--
92
+ Type: model-index
93
+ Collections:
94
+ - Name: Big Transfer
95
+ Paper:
96
+ Title: 'Big Transfer (BiT): General Visual Representation Learning'
97
+ URL: https://paperswithcode.com/paper/large-scale-learning-of-general-visual
98
+ Models:
99
+ - Name: resnetv2_101x1_bitm
100
+ In Collection: Big Transfer
101
+ Metadata:
102
+ FLOPs: 5330896
103
+ Parameters: 44540000
104
+ File Size: 178256468
105
+ Architecture:
106
+ - 1x1 Convolution
107
+ - Bottleneck Residual Block
108
+ - Convolution
109
+ - Global Average Pooling
110
+ - Group Normalization
111
+ - Max Pooling
112
+ - ReLU
113
+ - Residual Block
114
+ - Residual Connection
115
+ - Softmax
116
+ - Weight Standardization
117
+ Tasks:
118
+ - Image Classification
119
+ Training Techniques:
120
+ - Mixup
121
+ - SGD with Momentum
122
+ - Weight Decay
123
+ Training Data:
124
+ - ImageNet
125
+ - JFT-300M
126
+ Training Resources: Cloud TPUv3-512
127
+ ID: resnetv2_101x1_bitm
128
+ LR: 0.03
129
+ Epochs: 90
130
+ Layers: 101
131
+ Crop Pct: '1.0'
132
+ Momentum: 0.9
133
+ Batch Size: 4096
134
+ Image Size: '480'
135
+ Weight Decay: 0.0001
136
+ Interpolation: bilinear
137
+ Code: https://github.com/rwightman/pytorch-image-models/blob/b9843f954b0457af2db4f9dea41a8538f51f5d78/timm/models/resnetv2.py#L444
138
+ Weights: https://storage.googleapis.com/bit_models/BiT-M-R101x1-ILSVRC2012.npz
139
+ Results:
140
+ - Task: Image Classification
141
+ Dataset: ImageNet
142
+ Metrics:
143
+ Top 1 Accuracy: 82.21%
144
+ Top 5 Accuracy: 96.47%
145
+ - Name: resnetv2_101x3_bitm
146
+ In Collection: Big Transfer
147
+ Metadata:
148
+ FLOPs: 15988688
149
+ Parameters: 387930000
150
+ File Size: 1551830100
151
+ Architecture:
152
+ - 1x1 Convolution
153
+ - Bottleneck Residual Block
154
+ - Convolution
155
+ - Global Average Pooling
156
+ - Group Normalization
157
+ - Max Pooling
158
+ - ReLU
159
+ - Residual Block
160
+ - Residual Connection
161
+ - Softmax
162
+ - Weight Standardization
163
+ Tasks:
164
+ - Image Classification
165
+ Training Techniques:
166
+ - Mixup
167
+ - SGD with Momentum
168
+ - Weight Decay
169
+ Training Data:
170
+ - ImageNet
171
+ - JFT-300M
172
+ Training Resources: Cloud TPUv3-512
173
+ ID: resnetv2_101x3_bitm
174
+ LR: 0.03
175
+ Epochs: 90
176
+ Layers: 101
177
+ Crop Pct: '1.0'
178
+ Momentum: 0.9
179
+ Batch Size: 4096
180
+ Image Size: '480'
181
+ Weight Decay: 0.0001
182
+ Interpolation: bilinear
183
+ Code: https://github.com/rwightman/pytorch-image-models/blob/b9843f954b0457af2db4f9dea41a8538f51f5d78/timm/models/resnetv2.py#L451
184
+ Weights: https://storage.googleapis.com/bit_models/BiT-M-R101x3-ILSVRC2012.npz
185
+ Results:
186
+ - Task: Image Classification
187
+ Dataset: ImageNet
188
+ Metrics:
189
+ Top 1 Accuracy: 84.38%
190
+ Top 5 Accuracy: 97.37%
191
+ - Name: resnetv2_152x2_bitm
192
+ In Collection: Big Transfer
193
+ Metadata:
194
+ FLOPs: 10659792
195
+ Parameters: 236340000
196
+ File Size: 945476668
197
+ Architecture:
198
+ - 1x1 Convolution
199
+ - Bottleneck Residual Block
200
+ - Convolution
201
+ - Global Average Pooling
202
+ - Group Normalization
203
+ - Max Pooling
204
+ - ReLU
205
+ - Residual Block
206
+ - Residual Connection
207
+ - Softmax
208
+ - Weight Standardization
209
+ Tasks:
210
+ - Image Classification
211
+ Training Techniques:
212
+ - Mixup
213
+ - SGD with Momentum
214
+ - Weight Decay
215
+ Training Data:
216
+ - ImageNet
217
+ - JFT-300M
218
+ ID: resnetv2_152x2_bitm
219
+ Crop Pct: '1.0'
220
+ Image Size: '480'
221
+ Interpolation: bilinear
222
+ Code: https://github.com/rwightman/pytorch-image-models/blob/b9843f954b0457af2db4f9dea41a8538f51f5d78/timm/models/resnetv2.py#L458
223
+ Weights: https://storage.googleapis.com/bit_models/BiT-M-R152x2-ILSVRC2012.npz
224
+ Results:
225
+ - Task: Image Classification
226
+ Dataset: ImageNet
227
+ Metrics:
228
+ Top 1 Accuracy: 84.4%
229
+ Top 5 Accuracy: 97.43%
230
+ - Name: resnetv2_152x4_bitm
231
+ In Collection: Big Transfer
232
+ Metadata:
233
+ FLOPs: 21317584
234
+ Parameters: 936530000
235
+ File Size: 3746270104
236
+ Architecture:
237
+ - 1x1 Convolution
238
+ - Bottleneck Residual Block
239
+ - Convolution
240
+ - Global Average Pooling
241
+ - Group Normalization
242
+ - Max Pooling
243
+ - ReLU
244
+ - Residual Block
245
+ - Residual Connection
246
+ - Softmax
247
+ - Weight Standardization
248
+ Tasks:
249
+ - Image Classification
250
+ Training Techniques:
251
+ - Mixup
252
+ - SGD with Momentum
253
+ - Weight Decay
254
+ Training Data:
255
+ - ImageNet
256
+ - JFT-300M
257
+ Training Resources: Cloud TPUv3-512
258
+ ID: resnetv2_152x4_bitm
259
+ Crop Pct: '1.0'
260
+ Image Size: '480'
261
+ Interpolation: bilinear
262
+ Code: https://github.com/rwightman/pytorch-image-models/blob/b9843f954b0457af2db4f9dea41a8538f51f5d78/timm/models/resnetv2.py#L465
263
+ Weights: https://storage.googleapis.com/bit_models/BiT-M-R152x4-ILSVRC2012.npz
264
+ Results:
265
+ - Task: Image Classification
266
+ Dataset: ImageNet
267
+ Metrics:
268
+ Top 1 Accuracy: 84.95%
269
+ Top 5 Accuracy: 97.45%
270
+ - Name: resnetv2_50x1_bitm
271
+ In Collection: Big Transfer
272
+ Metadata:
273
+ FLOPs: 5330896
274
+ Parameters: 25550000
275
+ File Size: 102242668
276
+ Architecture:
277
+ - 1x1 Convolution
278
+ - Bottleneck Residual Block
279
+ - Convolution
280
+ - Global Average Pooling
281
+ - Group Normalization
282
+ - Max Pooling
283
+ - ReLU
284
+ - Residual Block
285
+ - Residual Connection
286
+ - Softmax
287
+ - Weight Standardization
288
+ Tasks:
289
+ - Image Classification
290
+ Training Techniques:
291
+ - Mixup
292
+ - SGD with Momentum
293
+ - Weight Decay
294
+ Training Data:
295
+ - ImageNet
296
+ - JFT-300M
297
+ Training Resources: Cloud TPUv3-512
298
+ ID: resnetv2_50x1_bitm
299
+ LR: 0.03
300
+ Epochs: 90
301
+ Layers: 50
302
+ Crop Pct: '1.0'
303
+ Momentum: 0.9
304
+ Batch Size: 4096
305
+ Image Size: '480'
306
+ Weight Decay: 0.0001
307
+ Interpolation: bilinear
308
+ Code: https://github.com/rwightman/pytorch-image-models/blob/b9843f954b0457af2db4f9dea41a8538f51f5d78/timm/models/resnetv2.py#L430
309
+ Weights: https://storage.googleapis.com/bit_models/BiT-M-R50x1-ILSVRC2012.npz
310
+ Results:
311
+ - Task: Image Classification
312
+ Dataset: ImageNet
313
+ Metrics:
314
+ Top 1 Accuracy: 80.19%
315
+ Top 5 Accuracy: 95.63%
316
+ - Name: resnetv2_50x3_bitm
317
+ In Collection: Big Transfer
318
+ Metadata:
319
+ FLOPs: 15988688
320
+ Parameters: 217320000
321
+ File Size: 869321580
322
+ Architecture:
323
+ - 1x1 Convolution
324
+ - Bottleneck Residual Block
325
+ - Convolution
326
+ - Global Average Pooling
327
+ - Group Normalization
328
+ - Max Pooling
329
+ - ReLU
330
+ - Residual Block
331
+ - Residual Connection
332
+ - Softmax
333
+ - Weight Standardization
334
+ Tasks:
335
+ - Image Classification
336
+ Training Techniques:
337
+ - Mixup
338
+ - SGD with Momentum
339
+ - Weight Decay
340
+ Training Data:
341
+ - ImageNet
342
+ - JFT-300M
343
+ Training Resources: Cloud TPUv3-512
344
+ ID: resnetv2_50x3_bitm
345
+ LR: 0.03
346
+ Epochs: 90
347
+ Layers: 50
348
+ Crop Pct: '1.0'
349
+ Momentum: 0.9
350
+ Batch Size: 4096
351
+ Image Size: '480'
352
+ Weight Decay: 0.0001
353
+ Interpolation: bilinear
354
+ Code: https://github.com/rwightman/pytorch-image-models/blob/b9843f954b0457af2db4f9dea41a8538f51f5d78/timm/models/resnetv2.py#L437
355
+ Weights: https://storage.googleapis.com/bit_models/BiT-M-R50x3-ILSVRC2012.npz
356
+ Results:
357
+ - Task: Image Classification
358
+ Dataset: ImageNet
359
+ Metrics:
360
+ Top 1 Accuracy: 83.75%
361
+ Top 5 Accuracy: 97.12%
362
+ -->
pytorch-image-models/hfdocs/source/models/csp-darknet.mdx ADDED
@@ -0,0 +1,148 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # CSP-DarkNet
2
+
3
+ **CSPDarknet53** is a convolutional neural network and backbone for object detection that uses [DarkNet-53](https://paperswithcode.com/method/darknet-53). It employs a CSPNet strategy to partition the feature map of the base layer into two parts and then merges them through a cross-stage hierarchy. The use of a split and merge strategy allows for more gradient flow through the network.
4
+
5
+ This CNN is used as the backbone for [YOLOv4](https://paperswithcode.com/method/yolov4).
6
+
7
+ ## How do I use this model on an image?
8
+
9
+ To load a pretrained model:
10
+
11
+ ```py
12
+ >>> import timm
13
+ >>> model = timm.create_model('cspdarknet53', pretrained=True)
14
+ >>> model.eval()
15
+ ```
16
+
17
+ To load and preprocess the image:
18
+
19
+ ```py
20
+ >>> import urllib
21
+ >>> from PIL import Image
22
+ >>> from timm.data import resolve_data_config
23
+ >>> from timm.data.transforms_factory import create_transform
24
+
25
+ >>> config = resolve_data_config({}, model=model)
26
+ >>> transform = create_transform(**config)
27
+
28
+ >>> url, filename = ("https://github.com/pytorch/hub/raw/master/images/dog.jpg", "dog.jpg")
29
+ >>> urllib.request.urlretrieve(url, filename)
30
+ >>> img = Image.open(filename).convert('RGB')
31
+ >>> tensor = transform(img).unsqueeze(0) # transform and add batch dimension
32
+ ```
33
+
34
+ To get the model predictions:
35
+
36
+ ```py
37
+ >>> import torch
38
+ >>> with torch.no_grad():
39
+ ... out = model(tensor)
40
+ >>> probabilities = torch.nn.functional.softmax(out[0], dim=0)
41
+ >>> print(probabilities.shape)
42
+ >>> # prints: torch.Size([1000])
43
+ ```
44
+
45
+ To get the top-5 predictions class names:
46
+
47
+ ```py
48
+ >>> # Get imagenet class mappings
49
+ >>> url, filename = ("https://raw.githubusercontent.com/pytorch/hub/master/imagenet_classes.txt", "imagenet_classes.txt")
50
+ >>> urllib.request.urlretrieve(url, filename)
51
+ >>> with open("imagenet_classes.txt", "r") as f:
52
+ ... categories = [s.strip() for s in f.readlines()]
53
+
54
+ >>> # Print top categories per image
55
+ >>> top5_prob, top5_catid = torch.topk(probabilities, 5)
56
+ >>> for i in range(top5_prob.size(0)):
57
+ ... print(categories[top5_catid[i]], top5_prob[i].item())
58
+ >>> # prints class names and probabilities like:
59
+ >>> # [('Samoyed', 0.6425196528434753), ('Pomeranian', 0.04062102362513542), ('keeshond', 0.03186424449086189), ('white wolf', 0.01739676296710968), ('Eskimo dog', 0.011717947199940681)]
60
+ ```
61
+
62
+ Replace the model name with the variant you want to use, e.g. `cspdarknet53`. You can find the IDs in the model summaries at the top of this page.
63
+
64
+ To extract image features with this model, follow the [timm feature extraction examples](../feature_extraction), just change the name of the model you want to use.
65
+
66
+ ## How do I finetune this model?
67
+
68
+ You can finetune any of the pre-trained models just by changing the classifier (the last layer).
69
+
70
+ ```py
71
+ >>> model = timm.create_model('cspdarknet53', pretrained=True, num_classes=NUM_FINETUNE_CLASSES)
72
+ ```
73
+ To finetune on your own dataset, you have to write a training loop or adapt [timm's training
74
+ script](https://github.com/rwightman/pytorch-image-models/blob/master/train.py) to use your dataset.
75
+
76
+ ## How do I train this model?
77
+
78
+ You can follow the [timm recipe scripts](../scripts) for training a new model afresh.
79
+
80
+ ## Citation
81
+
82
+ ```BibTeX
83
+ @misc{bochkovskiy2020yolov4,
84
+ title={YOLOv4: Optimal Speed and Accuracy of Object Detection},
85
+ author={Alexey Bochkovskiy and Chien-Yao Wang and Hong-Yuan Mark Liao},
86
+ year={2020},
87
+ eprint={2004.10934},
88
+ archivePrefix={arXiv},
89
+ primaryClass={cs.CV}
90
+ }
91
+ ```
92
+
93
+ <!--
94
+ Type: model-index
95
+ Collections:
96
+ - Name: CSP DarkNet
97
+ Paper:
98
+ Title: 'YOLOv4: Optimal Speed and Accuracy of Object Detection'
99
+ URL: https://paperswithcode.com/paper/yolov4-optimal-speed-and-accuracy-of-object
100
+ Models:
101
+ - Name: cspdarknet53
102
+ In Collection: CSP DarkNet
103
+ Metadata:
104
+ FLOPs: 8545018880
105
+ Parameters: 27640000
106
+ File Size: 110775135
107
+ Architecture:
108
+ - 1x1 Convolution
109
+ - Batch Normalization
110
+ - Convolution
111
+ - Global Average Pooling
112
+ - Mish
113
+ - Residual Connection
114
+ - Softmax
115
+ Tasks:
116
+ - Image Classification
117
+ Training Techniques:
118
+ - CutMix
119
+ - Label Smoothing
120
+ - Mosaic
121
+ - Polynomial Learning Rate Decay
122
+ - SGD with Momentum
123
+ - Self-Adversarial Training
124
+ - Weight Decay
125
+ Training Data:
126
+ - ImageNet
127
+ Training Resources: 1x NVIDIA RTX 2070 GPU
128
+ ID: cspdarknet53
129
+ LR: 0.1
130
+ Layers: 53
131
+ Crop Pct: '0.887'
132
+ Momentum: 0.9
133
+ Batch Size: 128
134
+ Image Size: '256'
135
+ Warmup Steps: 1000
136
+ Weight Decay: 0.0005
137
+ Interpolation: bilinear
138
+ Training Steps: 8000000
139
+ FPS (GPU RTX 2070): 66
140
+ Code: https://github.com/rwightman/pytorch-image-models/blob/d8e69206be253892b2956341fea09fdebfaae4e3/timm/models/cspnet.py#L441
141
+ Weights: https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/cspdarknet53_ra_256-d05c7c21.pth
142
+ Results:
143
+ - Task: Image Classification
144
+ Dataset: ImageNet
145
+ Metrics:
146
+ Top 1 Accuracy: 80.05%
147
+ Top 5 Accuracy: 95.09%
148
+ -->
pytorch-image-models/hfdocs/source/models/csp-resnet.mdx ADDED
@@ -0,0 +1,143 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # CSP-ResNet
2
+
3
+ **CSPResNet** is a convolutional neural network where we apply the Cross Stage Partial Network (CSPNet) approach to [ResNet](https://paperswithcode.com/method/resnet). The CSPNet partitions the feature map of the base layer into two parts and then merges them through a cross-stage hierarchy. The use of a split and merge strategy allows for more gradient flow through the network.
4
+
5
+ ## How do I use this model on an image?
6
+
7
+ To load a pretrained model:
8
+
9
+ ```py
10
+ >>> import timm
11
+ >>> model = timm.create_model('cspresnet50', pretrained=True)
12
+ >>> model.eval()
13
+ ```
14
+
15
+ To load and preprocess the image:
16
+
17
+ ```py
18
+ >>> import urllib
19
+ >>> from PIL import Image
20
+ >>> from timm.data import resolve_data_config
21
+ >>> from timm.data.transforms_factory import create_transform
22
+
23
+ >>> config = resolve_data_config({}, model=model)
24
+ >>> transform = create_transform(**config)
25
+
26
+ >>> url, filename = ("https://github.com/pytorch/hub/raw/master/images/dog.jpg", "dog.jpg")
27
+ >>> urllib.request.urlretrieve(url, filename)
28
+ >>> img = Image.open(filename).convert('RGB')
29
+ >>> tensor = transform(img).unsqueeze(0) # transform and add batch dimension
30
+ ```
31
+
32
+ To get the model predictions:
33
+
34
+ ```py
35
+ >>> import torch
36
+ >>> with torch.no_grad():
37
+ ... out = model(tensor)
38
+ >>> probabilities = torch.nn.functional.softmax(out[0], dim=0)
39
+ >>> print(probabilities.shape)
40
+ >>> # prints: torch.Size([1000])
41
+ ```
42
+
43
+ To get the top-5 predictions class names:
44
+
45
+ ```py
46
+ >>> # Get imagenet class mappings
47
+ >>> url, filename = ("https://raw.githubusercontent.com/pytorch/hub/master/imagenet_classes.txt", "imagenet_classes.txt")
48
+ >>> urllib.request.urlretrieve(url, filename)
49
+ >>> with open("imagenet_classes.txt", "r") as f:
50
+ ... categories = [s.strip() for s in f.readlines()]
51
+
52
+ >>> # Print top categories per image
53
+ >>> top5_prob, top5_catid = torch.topk(probabilities, 5)
54
+ >>> for i in range(top5_prob.size(0)):
55
+ ... print(categories[top5_catid[i]], top5_prob[i].item())
56
+ >>> # prints class names and probabilities like:
57
+ >>> # [('Samoyed', 0.6425196528434753), ('Pomeranian', 0.04062102362513542), ('keeshond', 0.03186424449086189), ('white wolf', 0.01739676296710968), ('Eskimo dog', 0.011717947199940681)]
58
+ ```
59
+
60
+ Replace the model name with the variant you want to use, e.g. `cspresnet50`. You can find the IDs in the model summaries at the top of this page.
61
+
62
+ To extract image features with this model, follow the [timm feature extraction examples](../feature_extraction), just change the name of the model you want to use.
63
+
64
+ ## How do I finetune this model?
65
+
66
+ You can finetune any of the pre-trained models just by changing the classifier (the last layer).
67
+
68
+ ```py
69
+ >>> model = timm.create_model('cspresnet50', pretrained=True, num_classes=NUM_FINETUNE_CLASSES)
70
+ ```
71
+ To finetune on your own dataset, you have to write a training loop or adapt [timm's training
72
+ script](https://github.com/rwightman/pytorch-image-models/blob/master/train.py) to use your dataset.
73
+
74
+ ## How do I train this model?
75
+
76
+ You can follow the [timm recipe scripts](../scripts) for training a new model afresh.
77
+
78
+ ## Citation
79
+
80
+ ```BibTeX
81
+ @misc{wang2019cspnet,
82
+ title={CSPNet: A New Backbone that can Enhance Learning Capability of CNN},
83
+ author={Chien-Yao Wang and Hong-Yuan Mark Liao and I-Hau Yeh and Yueh-Hua Wu and Ping-Yang Chen and Jun-Wei Hsieh},
84
+ year={2019},
85
+ eprint={1911.11929},
86
+ archivePrefix={arXiv},
87
+ primaryClass={cs.CV}
88
+ }
89
+ ```
90
+
91
+ <!--
92
+ Type: model-index
93
+ Collections:
94
+ - Name: CSP ResNet
95
+ Paper:
96
+ Title: 'CSPNet: A New Backbone that can Enhance Learning Capability of CNN'
97
+ URL: https://paperswithcode.com/paper/cspnet-a-new-backbone-that-can-enhance
98
+ Models:
99
+ - Name: cspresnet50
100
+ In Collection: CSP ResNet
101
+ Metadata:
102
+ FLOPs: 5924992000
103
+ Parameters: 21620000
104
+ File Size: 86679303
105
+ Architecture:
106
+ - 1x1 Convolution
107
+ - Batch Normalization
108
+ - Bottleneck Residual Block
109
+ - Convolution
110
+ - Global Average Pooling
111
+ - Max Pooling
112
+ - ReLU
113
+ - Residual Block
114
+ - Residual Connection
115
+ - Softmax
116
+ Tasks:
117
+ - Image Classification
118
+ Training Techniques:
119
+ - Label Smoothing
120
+ - Polynomial Learning Rate Decay
121
+ - SGD with Momentum
122
+ - Weight Decay
123
+ Training Data:
124
+ - ImageNet
125
+ ID: cspresnet50
126
+ LR: 0.1
127
+ Layers: 50
128
+ Crop Pct: '0.887'
129
+ Momentum: 0.9
130
+ Batch Size: 128
131
+ Image Size: '256'
132
+ Weight Decay: 0.005
133
+ Interpolation: bilinear
134
+ Training Steps: 8000000
135
+ Code: https://github.com/rwightman/pytorch-image-models/blob/d8e69206be253892b2956341fea09fdebfaae4e3/timm/models/cspnet.py#L415
136
+ Weights: https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/cspresnet50_ra-d3e8d487.pth
137
+ Results:
138
+ - Task: Image Classification
139
+ Dataset: ImageNet
140
+ Metrics:
141
+ Top 1 Accuracy: 79.57%
142
+ Top 5 Accuracy: 94.71%
143
+ -->
pytorch-image-models/hfdocs/source/models/csp-resnext.mdx ADDED
@@ -0,0 +1,144 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # CSP-ResNeXt
2
+
3
+ **CSPResNeXt** is a convolutional neural network where we apply the Cross Stage Partial Network (CSPNet) approach to [ResNeXt](https://paperswithcode.com/method/resnext). The CSPNet partitions the feature map of the base layer into two parts and then merges them through a cross-stage hierarchy. The use of a split and merge strategy allows for more gradient flow through the network.
4
+
5
+ ## How do I use this model on an image?
6
+
7
+ To load a pretrained model:
8
+
9
+ ```py
10
+ >>> import timm
11
+ >>> model = timm.create_model('cspresnext50', pretrained=True)
12
+ >>> model.eval()
13
+ ```
14
+
15
+ To load and preprocess the image:
16
+
17
+ ```py
18
+ >>> import urllib
19
+ >>> from PIL import Image
20
+ >>> from timm.data import resolve_data_config
21
+ >>> from timm.data.transforms_factory import create_transform
22
+
23
+ >>> config = resolve_data_config({}, model=model)
24
+ >>> transform = create_transform(**config)
25
+
26
+ >>> url, filename = ("https://github.com/pytorch/hub/raw/master/images/dog.jpg", "dog.jpg")
27
+ >>> urllib.request.urlretrieve(url, filename)
28
+ >>> img = Image.open(filename).convert('RGB')
29
+ >>> tensor = transform(img).unsqueeze(0) # transform and add batch dimension
30
+ ```
31
+
32
+ To get the model predictions:
33
+
34
+ ```py
35
+ >>> import torch
36
+ >>> with torch.no_grad():
37
+ ... out = model(tensor)
38
+ >>> probabilities = torch.nn.functional.softmax(out[0], dim=0)
39
+ >>> print(probabilities.shape)
40
+ >>> # prints: torch.Size([1000])
41
+ ```
42
+
43
+ To get the top-5 predictions class names:
44
+
45
+ ```py
46
+ >>> # Get imagenet class mappings
47
+ >>> url, filename = ("https://raw.githubusercontent.com/pytorch/hub/master/imagenet_classes.txt", "imagenet_classes.txt")
48
+ >>> urllib.request.urlretrieve(url, filename)
49
+ >>> with open("imagenet_classes.txt", "r") as f:
50
+ ... categories = [s.strip() for s in f.readlines()]
51
+
52
+ >>> # Print top categories per image
53
+ >>> top5_prob, top5_catid = torch.topk(probabilities, 5)
54
+ >>> for i in range(top5_prob.size(0)):
55
+ ... print(categories[top5_catid[i]], top5_prob[i].item())
56
+ >>> # prints class names and probabilities like:
57
+ >>> # [('Samoyed', 0.6425196528434753), ('Pomeranian', 0.04062102362513542), ('keeshond', 0.03186424449086189), ('white wolf', 0.01739676296710968), ('Eskimo dog', 0.011717947199940681)]
58
+ ```
59
+
60
+ Replace the model name with the variant you want to use, e.g. `cspresnext50`. You can find the IDs in the model summaries at the top of this page.
61
+
62
+ To extract image features with this model, follow the [timm feature extraction examples](../feature_extraction), just change the name of the model you want to use.
63
+
64
+ ## How do I finetune this model?
65
+
66
+ You can finetune any of the pre-trained models just by changing the classifier (the last layer).
67
+
68
+ ```py
69
+ >>> model = timm.create_model('cspresnext50', pretrained=True, num_classes=NUM_FINETUNE_CLASSES)
70
+ ```
71
+ To finetune on your own dataset, you have to write a training loop or adapt [timm's training
72
+ script](https://github.com/rwightman/pytorch-image-models/blob/master/train.py) to use your dataset.
73
+
74
+ ## How do I train this model?
75
+
76
+ You can follow the [timm recipe scripts](../scripts) for training a new model afresh.
77
+
78
+ ## Citation
79
+
80
+ ```BibTeX
81
+ @misc{wang2019cspnet,
82
+ title={CSPNet: A New Backbone that can Enhance Learning Capability of CNN},
83
+ author={Chien-Yao Wang and Hong-Yuan Mark Liao and I-Hau Yeh and Yueh-Hua Wu and Ping-Yang Chen and Jun-Wei Hsieh},
84
+ year={2019},
85
+ eprint={1911.11929},
86
+ archivePrefix={arXiv},
87
+ primaryClass={cs.CV}
88
+ }
89
+ ```
90
+
91
+ <!--
92
+ Type: model-index
93
+ Collections:
94
+ - Name: CSP ResNeXt
95
+ Paper:
96
+ Title: 'CSPNet: A New Backbone that can Enhance Learning Capability of CNN'
97
+ URL: https://paperswithcode.com/paper/cspnet-a-new-backbone-that-can-enhance
98
+ Models:
99
+ - Name: cspresnext50
100
+ In Collection: CSP ResNeXt
101
+ Metadata:
102
+ FLOPs: 3962945536
103
+ Parameters: 20570000
104
+ File Size: 82562887
105
+ Architecture:
106
+ - 1x1 Convolution
107
+ - Batch Normalization
108
+ - Convolution
109
+ - Global Average Pooling
110
+ - Grouped Convolution
111
+ - Max Pooling
112
+ - ReLU
113
+ - ResNeXt Block
114
+ - Residual Connection
115
+ - Softmax
116
+ Tasks:
117
+ - Image Classification
118
+ Training Techniques:
119
+ - Label Smoothing
120
+ - Polynomial Learning Rate Decay
121
+ - SGD with Momentum
122
+ - Weight Decay
123
+ Training Data:
124
+ - ImageNet
125
+ Training Resources: 1x GPU
126
+ ID: cspresnext50
127
+ LR: 0.1
128
+ Layers: 50
129
+ Crop Pct: '0.875'
130
+ Momentum: 0.9
131
+ Batch Size: 128
132
+ Image Size: '224'
133
+ Weight Decay: 0.005
134
+ Interpolation: bilinear
135
+ Training Steps: 8000000
136
+ Code: https://github.com/rwightman/pytorch-image-models/blob/d8e69206be253892b2956341fea09fdebfaae4e3/timm/models/cspnet.py#L430
137
+ Weights: https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/cspresnext50_ra_224-648b4713.pth
138
+ Results:
139
+ - Task: Image Classification
140
+ Dataset: ImageNet
141
+ Metrics:
142
+ Top 1 Accuracy: 80.05%
143
+ Top 5 Accuracy: 94.94%
144
+ -->
pytorch-image-models/hfdocs/source/models/densenet.mdx ADDED
@@ -0,0 +1,372 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # DenseNet
2
+
3
+ **DenseNet** is a type of convolutional neural network that utilises dense connections between layers, through [Dense Blocks](http://www.paperswithcode.com/method/dense-block), where we connect *all layers* (with matching feature-map sizes) directly with each other. To preserve the feed-forward nature, each layer obtains additional inputs from all preceding layers and passes on its own feature-maps to all subsequent layers.
4
+
5
+ The **DenseNet Blur** variant in this collection by Ross Wightman employs [Blur Pooling](http://www.paperswithcode.com/method/blur-pooling)
6
+
7
+ ## How do I use this model on an image?
8
+
9
+ To load a pretrained model:
10
+
11
+ ```py
12
+ >>> import timm
13
+ >>> model = timm.create_model('densenet121', pretrained=True)
14
+ >>> model.eval()
15
+ ```
16
+
17
+ To load and preprocess the image:
18
+
19
+ ```py
20
+ >>> import urllib
21
+ >>> from PIL import Image
22
+ >>> from timm.data import resolve_data_config
23
+ >>> from timm.data.transforms_factory import create_transform
24
+
25
+ >>> config = resolve_data_config({}, model=model)
26
+ >>> transform = create_transform(**config)
27
+
28
+ >>> url, filename = ("https://github.com/pytorch/hub/raw/master/images/dog.jpg", "dog.jpg")
29
+ >>> urllib.request.urlretrieve(url, filename)
30
+ >>> img = Image.open(filename).convert('RGB')
31
+ >>> tensor = transform(img).unsqueeze(0) # transform and add batch dimension
32
+ ```
33
+
34
+ To get the model predictions:
35
+
36
+ ```py
37
+ >>> import torch
38
+ >>> with torch.no_grad():
39
+ ... out = model(tensor)
40
+ >>> probabilities = torch.nn.functional.softmax(out[0], dim=0)
41
+ >>> print(probabilities.shape)
42
+ >>> # prints: torch.Size([1000])
43
+ ```
44
+
45
+ To get the top-5 predictions class names:
46
+
47
+ ```py
48
+ >>> # Get imagenet class mappings
49
+ >>> url, filename = ("https://raw.githubusercontent.com/pytorch/hub/master/imagenet_classes.txt", "imagenet_classes.txt")
50
+ >>> urllib.request.urlretrieve(url, filename)
51
+ >>> with open("imagenet_classes.txt", "r") as f:
52
+ ... categories = [s.strip() for s in f.readlines()]
53
+
54
+ >>> # Print top categories per image
55
+ >>> top5_prob, top5_catid = torch.topk(probabilities, 5)
56
+ >>> for i in range(top5_prob.size(0)):
57
+ ... print(categories[top5_catid[i]], top5_prob[i].item())
58
+ >>> # prints class names and probabilities like:
59
+ >>> # [('Samoyed', 0.6425196528434753), ('Pomeranian', 0.04062102362513542), ('keeshond', 0.03186424449086189), ('white wolf', 0.01739676296710968), ('Eskimo dog', 0.011717947199940681)]
60
+ ```
61
+
62
+ Replace the model name with the variant you want to use, e.g. `densenet121`. You can find the IDs in the model summaries at the top of this page.
63
+
64
+ To extract image features with this model, follow the [timm feature extraction examples](../feature_extraction), just change the name of the model you want to use.
65
+
66
+ ## How do I finetune this model?
67
+
68
+ You can finetune any of the pre-trained models just by changing the classifier (the last layer).
69
+
70
+ ```py
71
+ >>> model = timm.create_model('densenet121', pretrained=True, num_classes=NUM_FINETUNE_CLASSES)
72
+ ```
73
+ To finetune on your own dataset, you have to write a training loop or adapt [timm's training
74
+ script](https://github.com/rwightman/pytorch-image-models/blob/master/train.py) to use your dataset.
75
+
76
+ ## How do I train this model?
77
+
78
+ You can follow the [timm recipe scripts](../scripts) for training a new model afresh.
79
+
80
+ ## Citation
81
+
82
+ ```BibTeX
83
+ @article{DBLP:journals/corr/HuangLW16a,
84
+ author = {Gao Huang and
85
+ Zhuang Liu and
86
+ Kilian Q. Weinberger},
87
+ title = {Densely Connected Convolutional Networks},
88
+ journal = {CoRR},
89
+ volume = {abs/1608.06993},
90
+ year = {2016},
91
+ url = {http://arxiv.org/abs/1608.06993},
92
+ archivePrefix = {arXiv},
93
+ eprint = {1608.06993},
94
+ timestamp = {Mon, 10 Sep 2018 15:49:32 +0200},
95
+ biburl = {https://dblp.org/rec/journals/corr/HuangLW16a.bib},
96
+ bibsource = {dblp computer science bibliography, https://dblp.org}
97
+ }
98
+ ```
99
+
100
+ ```
101
+ @misc{rw2019timm,
102
+ author = {Ross Wightman},
103
+ title = {PyTorch Image Models},
104
+ year = {2019},
105
+ publisher = {GitHub},
106
+ journal = {GitHub repository},
107
+ doi = {10.5281/zenodo.4414861},
108
+ howpublished = {\url{https://github.com/rwightman/pytorch-image-models}}
109
+ }
110
+ ```
111
+
112
+ <!--
113
+ Type: model-index
114
+ Collections:
115
+ - Name: DenseNet
116
+ Paper:
117
+ Title: Densely Connected Convolutional Networks
118
+ URL: https://paperswithcode.com/paper/densely-connected-convolutional-networks
119
+ Models:
120
+ - Name: densenet121
121
+ In Collection: DenseNet
122
+ Metadata:
123
+ FLOPs: 3641843200
124
+ Parameters: 7980000
125
+ File Size: 32376726
126
+ Architecture:
127
+ - 1x1 Convolution
128
+ - Average Pooling
129
+ - Batch Normalization
130
+ - Convolution
131
+ - Dense Block
132
+ - Dense Connections
133
+ - Dropout
134
+ - Max Pooling
135
+ - ReLU
136
+ - Softmax
137
+ Tasks:
138
+ - Image Classification
139
+ Training Techniques:
140
+ - Kaiming Initialization
141
+ - Nesterov Accelerated Gradient
142
+ - Weight Decay
143
+ Training Data:
144
+ - ImageNet
145
+ ID: densenet121
146
+ LR: 0.1
147
+ Epochs: 90
148
+ Layers: 121
149
+ Dropout: 0.2
150
+ Crop Pct: '0.875'
151
+ Momentum: 0.9
152
+ Batch Size: 256
153
+ Image Size: '224'
154
+ Weight Decay: 0.0001
155
+ Interpolation: bicubic
156
+ Code: https://github.com/rwightman/pytorch-image-models/blob/d8e69206be253892b2956341fea09fdebfaae4e3/timm/models/densenet.py#L295
157
+ Weights: https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/densenet121_ra-50efcf5c.pth
158
+ Results:
159
+ - Task: Image Classification
160
+ Dataset: ImageNet
161
+ Metrics:
162
+ Top 1 Accuracy: 75.56%
163
+ Top 5 Accuracy: 92.65%
164
+ - Name: densenet161
165
+ In Collection: DenseNet
166
+ Metadata:
167
+ FLOPs: 9931959264
168
+ Parameters: 28680000
169
+ File Size: 115730790
170
+ Architecture:
171
+ - 1x1 Convolution
172
+ - Average Pooling
173
+ - Batch Normalization
174
+ - Convolution
175
+ - Dense Block
176
+ - Dense Connections
177
+ - Dropout
178
+ - Max Pooling
179
+ - ReLU
180
+ - Softmax
181
+ Tasks:
182
+ - Image Classification
183
+ Training Techniques:
184
+ - Kaiming Initialization
185
+ - Nesterov Accelerated Gradient
186
+ - Weight Decay
187
+ Training Data:
188
+ - ImageNet
189
+ ID: densenet161
190
+ LR: 0.1
191
+ Epochs: 90
192
+ Layers: 161
193
+ Dropout: 0.2
194
+ Crop Pct: '0.875'
195
+ Momentum: 0.9
196
+ Batch Size: 256
197
+ Image Size: '224'
198
+ Weight Decay: 0.0001
199
+ Interpolation: bicubic
200
+ Code: https://github.com/rwightman/pytorch-image-models/blob/d8e69206be253892b2956341fea09fdebfaae4e3/timm/models/densenet.py#L347
201
+ Weights: https://download.pytorch.org/models/densenet161-8d451a50.pth
202
+ Results:
203
+ - Task: Image Classification
204
+ Dataset: ImageNet
205
+ Metrics:
206
+ Top 1 Accuracy: 77.36%
207
+ Top 5 Accuracy: 93.63%
208
+ - Name: densenet169
209
+ In Collection: DenseNet
210
+ Metadata:
211
+ FLOPs: 4316945792
212
+ Parameters: 14150000
213
+ File Size: 57365526
214
+ Architecture:
215
+ - 1x1 Convolution
216
+ - Average Pooling
217
+ - Batch Normalization
218
+ - Convolution
219
+ - Dense Block
220
+ - Dense Connections
221
+ - Dropout
222
+ - Max Pooling
223
+ - ReLU
224
+ - Softmax
225
+ Tasks:
226
+ - Image Classification
227
+ Training Techniques:
228
+ - Kaiming Initialization
229
+ - Nesterov Accelerated Gradient
230
+ - Weight Decay
231
+ Training Data:
232
+ - ImageNet
233
+ ID: densenet169
234
+ LR: 0.1
235
+ Epochs: 90
236
+ Layers: 169
237
+ Dropout: 0.2
238
+ Crop Pct: '0.875'
239
+ Momentum: 0.9
240
+ Batch Size: 256
241
+ Image Size: '224'
242
+ Weight Decay: 0.0001
243
+ Interpolation: bicubic
244
+ Code: https://github.com/rwightman/pytorch-image-models/blob/d8e69206be253892b2956341fea09fdebfaae4e3/timm/models/densenet.py#L327
245
+ Weights: https://download.pytorch.org/models/densenet169-b2777c0a.pth
246
+ Results:
247
+ - Task: Image Classification
248
+ Dataset: ImageNet
249
+ Metrics:
250
+ Top 1 Accuracy: 75.9%
251
+ Top 5 Accuracy: 93.02%
252
+ - Name: densenet201
253
+ In Collection: DenseNet
254
+ Metadata:
255
+ FLOPs: 5514321024
256
+ Parameters: 20010000
257
+ File Size: 81131730
258
+ Architecture:
259
+ - 1x1 Convolution
260
+ - Average Pooling
261
+ - Batch Normalization
262
+ - Convolution
263
+ - Dense Block
264
+ - Dense Connections
265
+ - Dropout
266
+ - Max Pooling
267
+ - ReLU
268
+ - Softmax
269
+ Tasks:
270
+ - Image Classification
271
+ Training Techniques:
272
+ - Kaiming Initialization
273
+ - Nesterov Accelerated Gradient
274
+ - Weight Decay
275
+ Training Data:
276
+ - ImageNet
277
+ ID: densenet201
278
+ LR: 0.1
279
+ Epochs: 90
280
+ Layers: 201
281
+ Dropout: 0.2
282
+ Crop Pct: '0.875'
283
+ Momentum: 0.9
284
+ Batch Size: 256
285
+ Image Size: '224'
286
+ Weight Decay: 0.0001
287
+ Interpolation: bicubic
288
+ Code: https://github.com/rwightman/pytorch-image-models/blob/d8e69206be253892b2956341fea09fdebfaae4e3/timm/models/densenet.py#L337
289
+ Weights: https://download.pytorch.org/models/densenet201-c1103571.pth
290
+ Results:
291
+ - Task: Image Classification
292
+ Dataset: ImageNet
293
+ Metrics:
294
+ Top 1 Accuracy: 77.29%
295
+ Top 5 Accuracy: 93.48%
296
+ - Name: densenetblur121d
297
+ In Collection: DenseNet
298
+ Metadata:
299
+ FLOPs: 3947812864
300
+ Parameters: 8000000
301
+ File Size: 32456500
302
+ Architecture:
303
+ - 1x1 Convolution
304
+ - Batch Normalization
305
+ - Blur Pooling
306
+ - Convolution
307
+ - Dense Block
308
+ - Dense Connections
309
+ - Dropout
310
+ - Max Pooling
311
+ - ReLU
312
+ - Softmax
313
+ Tasks:
314
+ - Image Classification
315
+ Training Data:
316
+ - ImageNet
317
+ ID: densenetblur121d
318
+ Crop Pct: '0.875'
319
+ Image Size: '224'
320
+ Interpolation: bicubic
321
+ Code: https://github.com/rwightman/pytorch-image-models/blob/d8e69206be253892b2956341fea09fdebfaae4e3/timm/models/densenet.py#L305
322
+ Weights: https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/densenetblur121d_ra-100dcfbc.pth
323
+ Results:
324
+ - Task: Image Classification
325
+ Dataset: ImageNet
326
+ Metrics:
327
+ Top 1 Accuracy: 76.59%
328
+ Top 5 Accuracy: 93.2%
329
+ - Name: tv_densenet121
330
+ In Collection: DenseNet
331
+ Metadata:
332
+ FLOPs: 3641843200
333
+ Parameters: 7980000
334
+ File Size: 32342954
335
+ Architecture:
336
+ - 1x1 Convolution
337
+ - Average Pooling
338
+ - Batch Normalization
339
+ - Convolution
340
+ - Dense Block
341
+ - Dense Connections
342
+ - Dropout
343
+ - Max Pooling
344
+ - ReLU
345
+ - Softmax
346
+ Tasks:
347
+ - Image Classification
348
+ Training Techniques:
349
+ - SGD with Momentum
350
+ - Weight Decay
351
+ Training Data:
352
+ - ImageNet
353
+ ID: tv_densenet121
354
+ LR: 0.1
355
+ Epochs: 90
356
+ Crop Pct: '0.875'
357
+ LR Gamma: 0.1
358
+ Momentum: 0.9
359
+ Batch Size: 32
360
+ Image Size: '224'
361
+ LR Step Size: 30
362
+ Weight Decay: 0.0001
363
+ Interpolation: bicubic
364
+ Code: https://github.com/rwightman/pytorch-image-models/blob/d8e69206be253892b2956341fea09fdebfaae4e3/timm/models/densenet.py#L379
365
+ Weights: https://download.pytorch.org/models/densenet121-a639ec97.pth
366
+ Results:
367
+ - Task: Image Classification
368
+ Dataset: ImageNet
369
+ Metrics:
370
+ Top 1 Accuracy: 74.74%
371
+ Top 5 Accuracy: 92.15%
372
+ -->
pytorch-image-models/hfdocs/source/models/dla.mdx ADDED
@@ -0,0 +1,612 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Deep Layer Aggregation
2
+
3
+ Extending “shallow” skip connections, **Dense Layer Aggregation (DLA)** incorporates more depth and sharing. The authors introduce two structures for deep layer aggregation (DLA): iterative deep aggregation (IDA) and hierarchical deep aggregation (HDA). These structures are expressed through an architectural framework, independent of the choice of backbone, for compatibility with current and future networks.
4
+
5
+ IDA focuses on fusing resolutions and scales while HDA focuses on merging features from all modules and channels. IDA follows the base hierarchy to refine resolution and aggregate scale stage-bystage. HDA assembles its own hierarchy of tree-structured connections that cross and merge stages to aggregate different levels of representation.
6
+
7
+ ## How do I use this model on an image?
8
+
9
+ To load a pretrained model:
10
+
11
+ ```py
12
+ >>> import timm
13
+ >>> model = timm.create_model('dla102', pretrained=True)
14
+ >>> model.eval()
15
+ ```
16
+
17
+ To load and preprocess the image:
18
+
19
+ ```py
20
+ >>> import urllib
21
+ >>> from PIL import Image
22
+ >>> from timm.data import resolve_data_config
23
+ >>> from timm.data.transforms_factory import create_transform
24
+
25
+ >>> config = resolve_data_config({}, model=model)
26
+ >>> transform = create_transform(**config)
27
+
28
+ >>> url, filename = ("https://github.com/pytorch/hub/raw/master/images/dog.jpg", "dog.jpg")
29
+ >>> urllib.request.urlretrieve(url, filename)
30
+ >>> img = Image.open(filename).convert('RGB')
31
+ >>> tensor = transform(img).unsqueeze(0) # transform and add batch dimension
32
+ ```
33
+
34
+ To get the model predictions:
35
+
36
+ ```py
37
+ >>> import torch
38
+ >>> with torch.no_grad():
39
+ ... out = model(tensor)
40
+ >>> probabilities = torch.nn.functional.softmax(out[0], dim=0)
41
+ >>> print(probabilities.shape)
42
+ >>> # prints: torch.Size([1000])
43
+ ```
44
+
45
+ To get the top-5 predictions class names:
46
+
47
+ ```py
48
+ >>> # Get imagenet class mappings
49
+ >>> url, filename = ("https://raw.githubusercontent.com/pytorch/hub/master/imagenet_classes.txt", "imagenet_classes.txt")
50
+ >>> urllib.request.urlretrieve(url, filename)
51
+ >>> with open("imagenet_classes.txt", "r") as f:
52
+ ... categories = [s.strip() for s in f.readlines()]
53
+
54
+ >>> # Print top categories per image
55
+ >>> top5_prob, top5_catid = torch.topk(probabilities, 5)
56
+ >>> for i in range(top5_prob.size(0)):
57
+ ... print(categories[top5_catid[i]], top5_prob[i].item())
58
+ >>> # prints class names and probabilities like:
59
+ >>> # [('Samoyed', 0.6425196528434753), ('Pomeranian', 0.04062102362513542), ('keeshond', 0.03186424449086189), ('white wolf', 0.01739676296710968), ('Eskimo dog', 0.011717947199940681)]
60
+ ```
61
+
62
+ Replace the model name with the variant you want to use, e.g. `dla102`. You can find the IDs in the model summaries at the top of this page.
63
+
64
+ To extract image features with this model, follow the [timm feature extraction examples](../feature_extraction), just change the name of the model you want to use.
65
+
66
+ ## How do I finetune this model?
67
+
68
+ You can finetune any of the pre-trained models just by changing the classifier (the last layer).
69
+
70
+ ```py
71
+ >>> model = timm.create_model('dla102', pretrained=True, num_classes=NUM_FINETUNE_CLASSES)
72
+ ```
73
+ To finetune on your own dataset, you have to write a training loop or adapt [timm's training
74
+ script](https://github.com/rwightman/pytorch-image-models/blob/master/train.py) to use your dataset.
75
+
76
+ ## How do I train this model?
77
+
78
+ You can follow the [timm recipe scripts](../scripts) for training a new model afresh.
79
+
80
+ ## Citation
81
+
82
+ ```BibTeX
83
+ @misc{yu2019deep,
84
+ title={Deep Layer Aggregation},
85
+ author={Fisher Yu and Dequan Wang and Evan Shelhamer and Trevor Darrell},
86
+ year={2019},
87
+ eprint={1707.06484},
88
+ archivePrefix={arXiv},
89
+ primaryClass={cs.CV}
90
+ }
91
+ ```
92
+
93
+ <!--
94
+ Type: model-index
95
+ Collections:
96
+ - Name: DLA
97
+ Paper:
98
+ Title: Deep Layer Aggregation
99
+ URL: https://paperswithcode.com/paper/deep-layer-aggregation
100
+ Models:
101
+ - Name: dla102
102
+ In Collection: DLA
103
+ Metadata:
104
+ FLOPs: 7192952808
105
+ Parameters: 33270000
106
+ File Size: 135290579
107
+ Architecture:
108
+ - 1x1 Convolution
109
+ - Batch Normalization
110
+ - Convolution
111
+ - DLA Bottleneck Residual Block
112
+ - DLA Residual Block
113
+ - Global Average Pooling
114
+ - Max Pooling
115
+ - ReLU
116
+ - Residual Block
117
+ - Residual Connection
118
+ - Softmax
119
+ Tasks:
120
+ - Image Classification
121
+ Training Techniques:
122
+ - SGD with Momentum
123
+ - Weight Decay
124
+ Training Data:
125
+ - ImageNet
126
+ Training Resources: 8x GPUs
127
+ ID: dla102
128
+ LR: 0.1
129
+ Epochs: 120
130
+ Layers: 102
131
+ Crop Pct: '0.875'
132
+ Momentum: 0.9
133
+ Batch Size: 256
134
+ Image Size: '224'
135
+ Weight Decay: 0.0001
136
+ Interpolation: bilinear
137
+ Code: https://github.com/rwightman/pytorch-image-models/blob/d8e69206be253892b2956341fea09fdebfaae4e3/timm/models/dla.py#L410
138
+ Weights: http://dl.yf.io/dla/models/imagenet/dla102-d94d9790.pth
139
+ Results:
140
+ - Task: Image Classification
141
+ Dataset: ImageNet
142
+ Metrics:
143
+ Top 1 Accuracy: 78.03%
144
+ Top 5 Accuracy: 93.95%
145
+ - Name: dla102x
146
+ In Collection: DLA
147
+ Metadata:
148
+ FLOPs: 5886821352
149
+ Parameters: 26310000
150
+ File Size: 107552695
151
+ Architecture:
152
+ - 1x1 Convolution
153
+ - Batch Normalization
154
+ - Convolution
155
+ - DLA Bottleneck Residual Block
156
+ - DLA Residual Block
157
+ - Global Average Pooling
158
+ - Max Pooling
159
+ - ReLU
160
+ - Residual Block
161
+ - Residual Connection
162
+ - Softmax
163
+ Tasks:
164
+ - Image Classification
165
+ Training Techniques:
166
+ - SGD with Momentum
167
+ - Weight Decay
168
+ Training Data:
169
+ - ImageNet
170
+ Training Resources: 8x GPUs
171
+ ID: dla102x
172
+ LR: 0.1
173
+ Epochs: 120
174
+ Layers: 102
175
+ Crop Pct: '0.875'
176
+ Momentum: 0.9
177
+ Batch Size: 256
178
+ Image Size: '224'
179
+ Weight Decay: 0.0001
180
+ Interpolation: bilinear
181
+ Code: https://github.com/rwightman/pytorch-image-models/blob/d8e69206be253892b2956341fea09fdebfaae4e3/timm/models/dla.py#L418
182
+ Weights: http://dl.yf.io/dla/models/imagenet/dla102x-ad62be81.pth
183
+ Results:
184
+ - Task: Image Classification
185
+ Dataset: ImageNet
186
+ Metrics:
187
+ Top 1 Accuracy: 78.51%
188
+ Top 5 Accuracy: 94.23%
189
+ - Name: dla102x2
190
+ In Collection: DLA
191
+ Metadata:
192
+ FLOPs: 9343847400
193
+ Parameters: 41280000
194
+ File Size: 167645295
195
+ Architecture:
196
+ - 1x1 Convolution
197
+ - Batch Normalization
198
+ - Convolution
199
+ - DLA Bottleneck Residual Block
200
+ - DLA Residual Block
201
+ - Global Average Pooling
202
+ - Max Pooling
203
+ - ReLU
204
+ - Residual Block
205
+ - Residual Connection
206
+ - Softmax
207
+ Tasks:
208
+ - Image Classification
209
+ Training Techniques:
210
+ - SGD with Momentum
211
+ - Weight Decay
212
+ Training Data:
213
+ - ImageNet
214
+ Training Resources: 8x GPUs
215
+ ID: dla102x2
216
+ LR: 0.1
217
+ Epochs: 120
218
+ Layers: 102
219
+ Crop Pct: '0.875'
220
+ Momentum: 0.9
221
+ Batch Size: 256
222
+ Image Size: '224'
223
+ Weight Decay: 0.0001
224
+ Interpolation: bilinear
225
+ Code: https://github.com/rwightman/pytorch-image-models/blob/d8e69206be253892b2956341fea09fdebfaae4e3/timm/models/dla.py#L426
226
+ Weights: http://dl.yf.io/dla/models/imagenet/dla102x2-262837b6.pth
227
+ Results:
228
+ - Task: Image Classification
229
+ Dataset: ImageNet
230
+ Metrics:
231
+ Top 1 Accuracy: 79.44%
232
+ Top 5 Accuracy: 94.65%
233
+ - Name: dla169
234
+ In Collection: DLA
235
+ Metadata:
236
+ FLOPs: 11598004200
237
+ Parameters: 53390000
238
+ File Size: 216547113
239
+ Architecture:
240
+ - 1x1 Convolution
241
+ - Batch Normalization
242
+ - Convolution
243
+ - DLA Bottleneck Residual Block
244
+ - DLA Residual Block
245
+ - Global Average Pooling
246
+ - Max Pooling
247
+ - ReLU
248
+ - Residual Block
249
+ - Residual Connection
250
+ - Softmax
251
+ Tasks:
252
+ - Image Classification
253
+ Training Techniques:
254
+ - SGD with Momentum
255
+ - Weight Decay
256
+ Training Data:
257
+ - ImageNet
258
+ Training Resources: 8x GPUs
259
+ ID: dla169
260
+ LR: 0.1
261
+ Epochs: 120
262
+ Layers: 169
263
+ Crop Pct: '0.875'
264
+ Momentum: 0.9
265
+ Batch Size: 256
266
+ Image Size: '224'
267
+ Weight Decay: 0.0001
268
+ Interpolation: bilinear
269
+ Code: https://github.com/rwightman/pytorch-image-models/blob/d8e69206be253892b2956341fea09fdebfaae4e3/timm/models/dla.py#L434
270
+ Weights: http://dl.yf.io/dla/models/imagenet/dla169-0914e092.pth
271
+ Results:
272
+ - Task: Image Classification
273
+ Dataset: ImageNet
274
+ Metrics:
275
+ Top 1 Accuracy: 78.69%
276
+ Top 5 Accuracy: 94.33%
277
+ - Name: dla34
278
+ In Collection: DLA
279
+ Metadata:
280
+ FLOPs: 3070105576
281
+ Parameters: 15740000
282
+ File Size: 63228658
283
+ Architecture:
284
+ - 1x1 Convolution
285
+ - Batch Normalization
286
+ - Convolution
287
+ - DLA Bottleneck Residual Block
288
+ - DLA Residual Block
289
+ - Global Average Pooling
290
+ - Max Pooling
291
+ - ReLU
292
+ - Residual Block
293
+ - Residual Connection
294
+ - Softmax
295
+ Tasks:
296
+ - Image Classification
297
+ Training Techniques:
298
+ - SGD with Momentum
299
+ - Weight Decay
300
+ Training Data:
301
+ - ImageNet
302
+ ID: dla34
303
+ LR: 0.1
304
+ Epochs: 120
305
+ Layers: 32
306
+ Crop Pct: '0.875'
307
+ Momentum: 0.9
308
+ Batch Size: 256
309
+ Image Size: '224'
310
+ Weight Decay: 0.0001
311
+ Interpolation: bilinear
312
+ Code: https://github.com/rwightman/pytorch-image-models/blob/d8e69206be253892b2956341fea09fdebfaae4e3/timm/models/dla.py#L362
313
+ Weights: http://dl.yf.io/dla/models/imagenet/dla34-ba72cf86.pth
314
+ Results:
315
+ - Task: Image Classification
316
+ Dataset: ImageNet
317
+ Metrics:
318
+ Top 1 Accuracy: 74.62%
319
+ Top 5 Accuracy: 92.06%
320
+ - Name: dla46_c
321
+ In Collection: DLA
322
+ Metadata:
323
+ FLOPs: 583277288
324
+ Parameters: 1300000
325
+ File Size: 5307963
326
+ Architecture:
327
+ - 1x1 Convolution
328
+ - Batch Normalization
329
+ - Convolution
330
+ - DLA Bottleneck Residual Block
331
+ - DLA Residual Block
332
+ - Global Average Pooling
333
+ - Max Pooling
334
+ - ReLU
335
+ - Residual Block
336
+ - Residual Connection
337
+ - Softmax
338
+ Tasks:
339
+ - Image Classification
340
+ Training Techniques:
341
+ - SGD with Momentum
342
+ - Weight Decay
343
+ Training Data:
344
+ - ImageNet
345
+ ID: dla46_c
346
+ LR: 0.1
347
+ Epochs: 120
348
+ Layers: 46
349
+ Crop Pct: '0.875'
350
+ Momentum: 0.9
351
+ Batch Size: 256
352
+ Image Size: '224'
353
+ Weight Decay: 0.0001
354
+ Interpolation: bilinear
355
+ Code: https://github.com/rwightman/pytorch-image-models/blob/d8e69206be253892b2956341fea09fdebfaae4e3/timm/models/dla.py#L369
356
+ Weights: http://dl.yf.io/dla/models/imagenet/dla46_c-2bfd52c3.pth
357
+ Results:
358
+ - Task: Image Classification
359
+ Dataset: ImageNet
360
+ Metrics:
361
+ Top 1 Accuracy: 64.87%
362
+ Top 5 Accuracy: 86.29%
363
+ - Name: dla46x_c
364
+ In Collection: DLA
365
+ Metadata:
366
+ FLOPs: 544052200
367
+ Parameters: 1070000
368
+ File Size: 4387641
369
+ Architecture:
370
+ - 1x1 Convolution
371
+ - Batch Normalization
372
+ - Convolution
373
+ - DLA Bottleneck Residual Block
374
+ - DLA Residual Block
375
+ - Global Average Pooling
376
+ - Max Pooling
377
+ - ReLU
378
+ - Residual Block
379
+ - Residual Connection
380
+ - Softmax
381
+ Tasks:
382
+ - Image Classification
383
+ Training Techniques:
384
+ - SGD with Momentum
385
+ - Weight Decay
386
+ Training Data:
387
+ - ImageNet
388
+ ID: dla46x_c
389
+ LR: 0.1
390
+ Epochs: 120
391
+ Layers: 46
392
+ Crop Pct: '0.875'
393
+ Momentum: 0.9
394
+ Batch Size: 256
395
+ Image Size: '224'
396
+ Weight Decay: 0.0001
397
+ Interpolation: bilinear
398
+ Code: https://github.com/rwightman/pytorch-image-models/blob/d8e69206be253892b2956341fea09fdebfaae4e3/timm/models/dla.py#L378
399
+ Weights: http://dl.yf.io/dla/models/imagenet/dla46x_c-d761bae7.pth
400
+ Results:
401
+ - Task: Image Classification
402
+ Dataset: ImageNet
403
+ Metrics:
404
+ Top 1 Accuracy: 65.98%
405
+ Top 5 Accuracy: 86.99%
406
+ - Name: dla60
407
+ In Collection: DLA
408
+ Metadata:
409
+ FLOPs: 4256251880
410
+ Parameters: 22040000
411
+ File Size: 89560235
412
+ Architecture:
413
+ - 1x1 Convolution
414
+ - Batch Normalization
415
+ - Convolution
416
+ - DLA Bottleneck Residual Block
417
+ - DLA Residual Block
418
+ - Global Average Pooling
419
+ - Max Pooling
420
+ - ReLU
421
+ - Residual Block
422
+ - Residual Connection
423
+ - Softmax
424
+ Tasks:
425
+ - Image Classification
426
+ Training Techniques:
427
+ - SGD with Momentum
428
+ - Weight Decay
429
+ Training Data:
430
+ - ImageNet
431
+ ID: dla60
432
+ LR: 0.1
433
+ Epochs: 120
434
+ Layers: 60
435
+ Dropout: 0.2
436
+ Crop Pct: '0.875'
437
+ Momentum: 0.9
438
+ Batch Size: 256
439
+ Image Size: '224'
440
+ Weight Decay: 0.0001
441
+ Interpolation: bilinear
442
+ Code: https://github.com/rwightman/pytorch-image-models/blob/d8e69206be253892b2956341fea09fdebfaae4e3/timm/models/dla.py#L394
443
+ Weights: http://dl.yf.io/dla/models/imagenet/dla60-24839fc4.pth
444
+ Results:
445
+ - Task: Image Classification
446
+ Dataset: ImageNet
447
+ Metrics:
448
+ Top 1 Accuracy: 77.04%
449
+ Top 5 Accuracy: 93.32%
450
+ - Name: dla60_res2net
451
+ In Collection: DLA
452
+ Metadata:
453
+ FLOPs: 4147578504
454
+ Parameters: 20850000
455
+ File Size: 84886593
456
+ Architecture:
457
+ - 1x1 Convolution
458
+ - Batch Normalization
459
+ - Convolution
460
+ - DLA Bottleneck Residual Block
461
+ - DLA Residual Block
462
+ - Global Average Pooling
463
+ - Max Pooling
464
+ - ReLU
465
+ - Residual Block
466
+ - Residual Connection
467
+ - Softmax
468
+ Tasks:
469
+ - Image Classification
470
+ Training Techniques:
471
+ - SGD with Momentum
472
+ - Weight Decay
473
+ Training Data:
474
+ - ImageNet
475
+ ID: dla60_res2net
476
+ Layers: 60
477
+ Crop Pct: '0.875'
478
+ Image Size: '224'
479
+ Interpolation: bilinear
480
+ Code: https://github.com/rwightman/pytorch-image-models/blob/d8e69206be253892b2956341fea09fdebfaae4e3/timm/models/dla.py#L346
481
+ Weights: https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-res2net/res2net_dla60_4s-d88db7f9.pth
482
+ Results:
483
+ - Task: Image Classification
484
+ Dataset: ImageNet
485
+ Metrics:
486
+ Top 1 Accuracy: 78.46%
487
+ Top 5 Accuracy: 94.21%
488
+ - Name: dla60_res2next
489
+ In Collection: DLA
490
+ Metadata:
491
+ FLOPs: 3485335272
492
+ Parameters: 17030000
493
+ File Size: 69639245
494
+ Architecture:
495
+ - 1x1 Convolution
496
+ - Batch Normalization
497
+ - Convolution
498
+ - DLA Bottleneck Residual Block
499
+ - DLA Residual Block
500
+ - Global Average Pooling
501
+ - Max Pooling
502
+ - ReLU
503
+ - Residual Block
504
+ - Residual Connection
505
+ - Softmax
506
+ Tasks:
507
+ - Image Classification
508
+ Training Techniques:
509
+ - SGD with Momentum
510
+ - Weight Decay
511
+ Training Data:
512
+ - ImageNet
513
+ ID: dla60_res2next
514
+ Layers: 60
515
+ Crop Pct: '0.875'
516
+ Image Size: '224'
517
+ Interpolation: bilinear
518
+ Code: https://github.com/rwightman/pytorch-image-models/blob/d8e69206be253892b2956341fea09fdebfaae4e3/timm/models/dla.py#L354
519
+ Weights: https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-res2net/res2next_dla60_4s-d327927b.pth
520
+ Results:
521
+ - Task: Image Classification
522
+ Dataset: ImageNet
523
+ Metrics:
524
+ Top 1 Accuracy: 78.44%
525
+ Top 5 Accuracy: 94.16%
526
+ - Name: dla60x
527
+ In Collection: DLA
528
+ Metadata:
529
+ FLOPs: 3544204264
530
+ Parameters: 17350000
531
+ File Size: 70883139
532
+ Architecture:
533
+ - 1x1 Convolution
534
+ - Batch Normalization
535
+ - Convolution
536
+ - DLA Bottleneck Residual Block
537
+ - DLA Residual Block
538
+ - Global Average Pooling
539
+ - Max Pooling
540
+ - ReLU
541
+ - Residual Block
542
+ - Residual Connection
543
+ - Softmax
544
+ Tasks:
545
+ - Image Classification
546
+ Training Techniques:
547
+ - SGD with Momentum
548
+ - Weight Decay
549
+ Training Data:
550
+ - ImageNet
551
+ ID: dla60x
552
+ LR: 0.1
553
+ Epochs: 120
554
+ Layers: 60
555
+ Crop Pct: '0.875'
556
+ Momentum: 0.9
557
+ Batch Size: 256
558
+ Image Size: '224'
559
+ Weight Decay: 0.0001
560
+ Interpolation: bilinear
561
+ Code: https://github.com/rwightman/pytorch-image-models/blob/d8e69206be253892b2956341fea09fdebfaae4e3/timm/models/dla.py#L402
562
+ Weights: http://dl.yf.io/dla/models/imagenet/dla60x-d15cacda.pth
563
+ Results:
564
+ - Task: Image Classification
565
+ Dataset: ImageNet
566
+ Metrics:
567
+ Top 1 Accuracy: 78.25%
568
+ Top 5 Accuracy: 94.02%
569
+ - Name: dla60x_c
570
+ In Collection: DLA
571
+ Metadata:
572
+ FLOPs: 593325032
573
+ Parameters: 1320000
574
+ File Size: 5454396
575
+ Architecture:
576
+ - 1x1 Convolution
577
+ - Batch Normalization
578
+ - Convolution
579
+ - DLA Bottleneck Residual Block
580
+ - DLA Residual Block
581
+ - Global Average Pooling
582
+ - Max Pooling
583
+ - ReLU
584
+ - Residual Block
585
+ - Residual Connection
586
+ - Softmax
587
+ Tasks:
588
+ - Image Classification
589
+ Training Techniques:
590
+ - SGD with Momentum
591
+ - Weight Decay
592
+ Training Data:
593
+ - ImageNet
594
+ ID: dla60x_c
595
+ LR: 0.1
596
+ Epochs: 120
597
+ Layers: 60
598
+ Crop Pct: '0.875'
599
+ Momentum: 0.9
600
+ Batch Size: 256
601
+ Image Size: '224'
602
+ Weight Decay: 0.0001
603
+ Interpolation: bilinear
604
+ Code: https://github.com/rwightman/pytorch-image-models/blob/d8e69206be253892b2956341fea09fdebfaae4e3/timm/models/dla.py#L386
605
+ Weights: http://dl.yf.io/dla/models/imagenet/dla60x_c-b870c45c.pth
606
+ Results:
607
+ - Task: Image Classification
608
+ Dataset: ImageNet
609
+ Metrics:
610
+ Top 1 Accuracy: 67.91%
611
+ Top 5 Accuracy: 88.42%
612
+ -->
pytorch-image-models/hfdocs/source/models/dpn.mdx ADDED
@@ -0,0 +1,323 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Dual Path Network (DPN)
2
+
3
+ A **Dual Path Network (DPN)** is a convolutional neural network which presents a new topology of connection paths internally. The intuition is that [ResNets](https://paperswithcode.com/method/resnet) enables feature re-usage while DenseNet enables new feature exploration, and both are important for learning good representations. To enjoy the benefits from both path topologies, Dual Path Networks share common features while maintaining the flexibility to explore new features through dual path architectures.
4
+
5
+ The principal building block is an [DPN Block](https://paperswithcode.com/method/dpn-block).
6
+
7
+ ## How do I use this model on an image?
8
+
9
+ To load a pretrained model:
10
+
11
+ ```py
12
+ >>> import timm
13
+ >>> model = timm.create_model('dpn107', pretrained=True)
14
+ >>> model.eval()
15
+ ```
16
+
17
+ To load and preprocess the image:
18
+
19
+ ```py
20
+ >>> import urllib
21
+ >>> from PIL import Image
22
+ >>> from timm.data import resolve_data_config
23
+ >>> from timm.data.transforms_factory import create_transform
24
+
25
+ >>> config = resolve_data_config({}, model=model)
26
+ >>> transform = create_transform(**config)
27
+
28
+ >>> url, filename = ("https://github.com/pytorch/hub/raw/master/images/dog.jpg", "dog.jpg")
29
+ >>> urllib.request.urlretrieve(url, filename)
30
+ >>> img = Image.open(filename).convert('RGB')
31
+ >>> tensor = transform(img).unsqueeze(0) # transform and add batch dimension
32
+ ```
33
+
34
+ To get the model predictions:
35
+
36
+ ```py
37
+ >>> import torch
38
+ >>> with torch.no_grad():
39
+ ... out = model(tensor)
40
+ >>> probabilities = torch.nn.functional.softmax(out[0], dim=0)
41
+ >>> print(probabilities.shape)
42
+ >>> # prints: torch.Size([1000])
43
+ ```
44
+
45
+ To get the top-5 predictions class names:
46
+
47
+ ```py
48
+ >>> # Get imagenet class mappings
49
+ >>> url, filename = ("https://raw.githubusercontent.com/pytorch/hub/master/imagenet_classes.txt", "imagenet_classes.txt")
50
+ >>> urllib.request.urlretrieve(url, filename)
51
+ >>> with open("imagenet_classes.txt", "r") as f:
52
+ ... categories = [s.strip() for s in f.readlines()]
53
+
54
+ >>> # Print top categories per image
55
+ >>> top5_prob, top5_catid = torch.topk(probabilities, 5)
56
+ >>> for i in range(top5_prob.size(0)):
57
+ ... print(categories[top5_catid[i]], top5_prob[i].item())
58
+ >>> # prints class names and probabilities like:
59
+ >>> # [('Samoyed', 0.6425196528434753), ('Pomeranian', 0.04062102362513542), ('keeshond', 0.03186424449086189), ('white wolf', 0.01739676296710968), ('Eskimo dog', 0.011717947199940681)]
60
+ ```
61
+
62
+ Replace the model name with the variant you want to use, e.g. `dpn107`. You can find the IDs in the model summaries at the top of this page.
63
+
64
+ To extract image features with this model, follow the [timm feature extraction examples](../feature_extraction), just change the name of the model you want to use.
65
+
66
+ ## How do I finetune this model?
67
+
68
+ You can finetune any of the pre-trained models just by changing the classifier (the last layer).
69
+
70
+ ```py
71
+ >>> model = timm.create_model('dpn107', pretrained=True, num_classes=NUM_FINETUNE_CLASSES)
72
+ ```
73
+ To finetune on your own dataset, you have to write a training loop or adapt [timm's training
74
+ script](https://github.com/rwightman/pytorch-image-models/blob/master/train.py) to use your dataset.
75
+
76
+ ## How do I train this model?
77
+
78
+ You can follow the [timm recipe scripts](../scripts) for training a new model afresh.
79
+
80
+ ## Citation
81
+
82
+ ```BibTeX
83
+ @misc{chen2017dual,
84
+ title={Dual Path Networks},
85
+ author={Yunpeng Chen and Jianan Li and Huaxin Xiao and Xiaojie Jin and Shuicheng Yan and Jiashi Feng},
86
+ year={2017},
87
+ eprint={1707.01629},
88
+ archivePrefix={arXiv},
89
+ primaryClass={cs.CV}
90
+ }
91
+ ```
92
+
93
+ <!--
94
+ Type: model-index
95
+ Collections:
96
+ - Name: DPN
97
+ Paper:
98
+ Title: Dual Path Networks
99
+ URL: https://paperswithcode.com/paper/dual-path-networks
100
+ Models:
101
+ - Name: dpn107
102
+ In Collection: DPN
103
+ Metadata:
104
+ FLOPs: 23524280296
105
+ Parameters: 86920000
106
+ File Size: 348612331
107
+ Architecture:
108
+ - Batch Normalization
109
+ - Convolution
110
+ - DPN Block
111
+ - Dense Connections
112
+ - Global Average Pooling
113
+ - Max Pooling
114
+ - Softmax
115
+ Tasks:
116
+ - Image Classification
117
+ Training Techniques:
118
+ - SGD with Momentum
119
+ - Weight Decay
120
+ Training Data:
121
+ - ImageNet
122
+ Training Resources: 40x K80 GPUs
123
+ ID: dpn107
124
+ LR: 0.316
125
+ Layers: 107
126
+ Crop Pct: '0.875'
127
+ Batch Size: 1280
128
+ Image Size: '224'
129
+ Interpolation: bicubic
130
+ Code: https://github.com/rwightman/pytorch-image-models/blob/d8e69206be253892b2956341fea09fdebfaae4e3/timm/models/dpn.py#L310
131
+ Weights: https://github.com/rwightman/pytorch-dpn-pretrained/releases/download/v0.1/dpn107_extra-1ac7121e2.pth
132
+ Results:
133
+ - Task: Image Classification
134
+ Dataset: ImageNet
135
+ Metrics:
136
+ Top 1 Accuracy: 80.16%
137
+ Top 5 Accuracy: 94.91%
138
+ - Name: dpn131
139
+ In Collection: DPN
140
+ Metadata:
141
+ FLOPs: 20586274792
142
+ Parameters: 79250000
143
+ File Size: 318016207
144
+ Architecture:
145
+ - Batch Normalization
146
+ - Convolution
147
+ - DPN Block
148
+ - Dense Connections
149
+ - Global Average Pooling
150
+ - Max Pooling
151
+ - Softmax
152
+ Tasks:
153
+ - Image Classification
154
+ Training Techniques:
155
+ - SGD with Momentum
156
+ - Weight Decay
157
+ Training Data:
158
+ - ImageNet
159
+ Training Resources: 40x K80 GPUs
160
+ ID: dpn131
161
+ LR: 0.316
162
+ Layers: 131
163
+ Crop Pct: '0.875'
164
+ Batch Size: 960
165
+ Image Size: '224'
166
+ Interpolation: bicubic
167
+ Code: https://github.com/rwightman/pytorch-image-models/blob/d8e69206be253892b2956341fea09fdebfaae4e3/timm/models/dpn.py#L302
168
+ Weights: https://github.com/rwightman/pytorch-dpn-pretrained/releases/download/v0.1/dpn131-71dfe43e0.pth
169
+ Results:
170
+ - Task: Image Classification
171
+ Dataset: ImageNet
172
+ Metrics:
173
+ Top 1 Accuracy: 79.83%
174
+ Top 5 Accuracy: 94.71%
175
+ - Name: dpn68
176
+ In Collection: DPN
177
+ Metadata:
178
+ FLOPs: 2990567880
179
+ Parameters: 12610000
180
+ File Size: 50761994
181
+ Architecture:
182
+ - Batch Normalization
183
+ - Convolution
184
+ - DPN Block
185
+ - Dense Connections
186
+ - Global Average Pooling
187
+ - Max Pooling
188
+ - Softmax
189
+ Tasks:
190
+ - Image Classification
191
+ Training Techniques:
192
+ - SGD with Momentum
193
+ - Weight Decay
194
+ Training Data:
195
+ - ImageNet
196
+ Training Resources: 40x K80 GPUs
197
+ ID: dpn68
198
+ LR: 0.316
199
+ Layers: 68
200
+ Crop Pct: '0.875'
201
+ Batch Size: 1280
202
+ Image Size: '224'
203
+ Interpolation: bicubic
204
+ Code: https://github.com/rwightman/pytorch-image-models/blob/d8e69206be253892b2956341fea09fdebfaae4e3/timm/models/dpn.py#L270
205
+ Weights: https://github.com/rwightman/pytorch-dpn-pretrained/releases/download/v0.1/dpn68-66bebafa7.pth
206
+ Results:
207
+ - Task: Image Classification
208
+ Dataset: ImageNet
209
+ Metrics:
210
+ Top 1 Accuracy: 76.31%
211
+ Top 5 Accuracy: 92.97%
212
+ - Name: dpn68b
213
+ In Collection: DPN
214
+ Metadata:
215
+ FLOPs: 2990567880
216
+ Parameters: 12610000
217
+ File Size: 50781025
218
+ Architecture:
219
+ - Batch Normalization
220
+ - Convolution
221
+ - DPN Block
222
+ - Dense Connections
223
+ - Global Average Pooling
224
+ - Max Pooling
225
+ - Softmax
226
+ Tasks:
227
+ - Image Classification
228
+ Training Techniques:
229
+ - SGD with Momentum
230
+ - Weight Decay
231
+ Training Data:
232
+ - ImageNet
233
+ Training Resources: 40x K80 GPUs
234
+ ID: dpn68b
235
+ LR: 0.316
236
+ Layers: 68
237
+ Crop Pct: '0.875'
238
+ Batch Size: 1280
239
+ Image Size: '224'
240
+ Interpolation: bicubic
241
+ Code: https://github.com/rwightman/pytorch-image-models/blob/d8e69206be253892b2956341fea09fdebfaae4e3/timm/models/dpn.py#L278
242
+ Weights: https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/dpn68b_ra-a31ca160.pth
243
+ Results:
244
+ - Task: Image Classification
245
+ Dataset: ImageNet
246
+ Metrics:
247
+ Top 1 Accuracy: 79.21%
248
+ Top 5 Accuracy: 94.42%
249
+ - Name: dpn92
250
+ In Collection: DPN
251
+ Metadata:
252
+ FLOPs: 8357659624
253
+ Parameters: 37670000
254
+ File Size: 151248422
255
+ Architecture:
256
+ - Batch Normalization
257
+ - Convolution
258
+ - DPN Block
259
+ - Dense Connections
260
+ - Global Average Pooling
261
+ - Max Pooling
262
+ - Softmax
263
+ Tasks:
264
+ - Image Classification
265
+ Training Techniques:
266
+ - SGD with Momentum
267
+ - Weight Decay
268
+ Training Data:
269
+ - ImageNet
270
+ Training Resources: 40x K80 GPUs
271
+ ID: dpn92
272
+ LR: 0.316
273
+ Layers: 92
274
+ Crop Pct: '0.875'
275
+ Batch Size: 1280
276
+ Image Size: '224'
277
+ Interpolation: bicubic
278
+ Code: https://github.com/rwightman/pytorch-image-models/blob/d8e69206be253892b2956341fea09fdebfaae4e3/timm/models/dpn.py#L286
279
+ Weights: https://github.com/rwightman/pytorch-dpn-pretrained/releases/download/v0.1/dpn92_extra-b040e4a9b.pth
280
+ Results:
281
+ - Task: Image Classification
282
+ Dataset: ImageNet
283
+ Metrics:
284
+ Top 1 Accuracy: 79.99%
285
+ Top 5 Accuracy: 94.84%
286
+ - Name: dpn98
287
+ In Collection: DPN
288
+ Metadata:
289
+ FLOPs: 15003675112
290
+ Parameters: 61570000
291
+ File Size: 247021307
292
+ Architecture:
293
+ - Batch Normalization
294
+ - Convolution
295
+ - DPN Block
296
+ - Dense Connections
297
+ - Global Average Pooling
298
+ - Max Pooling
299
+ - Softmax
300
+ Tasks:
301
+ - Image Classification
302
+ Training Techniques:
303
+ - SGD with Momentum
304
+ - Weight Decay
305
+ Training Data:
306
+ - ImageNet
307
+ Training Resources: 40x K80 GPUs
308
+ ID: dpn98
309
+ LR: 0.4
310
+ Layers: 98
311
+ Crop Pct: '0.875'
312
+ Batch Size: 1280
313
+ Image Size: '224'
314
+ Interpolation: bicubic
315
+ Code: https://github.com/rwightman/pytorch-image-models/blob/d8e69206be253892b2956341fea09fdebfaae4e3/timm/models/dpn.py#L294
316
+ Weights: https://github.com/rwightman/pytorch-dpn-pretrained/releases/download/v0.1/dpn98-5b90dec4d.pth
317
+ Results:
318
+ - Task: Image Classification
319
+ Dataset: ImageNet
320
+ Metrics:
321
+ Top 1 Accuracy: 79.65%
322
+ Top 5 Accuracy: 94.61%
323
+ -->
pytorch-image-models/hfdocs/source/models/ecaresnet.mdx ADDED
@@ -0,0 +1,303 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # ECA-ResNet
2
+
3
+ An **ECA ResNet** is a variant on a [ResNet](https://paperswithcode.com/method/resnet) that utilises an [Efficient Channel Attention module](https://paperswithcode.com/method/efficient-channel-attention). Efficient Channel Attention is an architectural unit based on [squeeze-and-excitation blocks](https://paperswithcode.com/method/squeeze-and-excitation-block) that reduces model complexity without dimensionality reduction.
4
+
5
+ ## How do I use this model on an image?
6
+
7
+ To load a pretrained model:
8
+
9
+ ```py
10
+ >>> import timm
11
+ >>> model = timm.create_model('ecaresnet101d', pretrained=True)
12
+ >>> model.eval()
13
+ ```
14
+
15
+ To load and preprocess the image:
16
+
17
+ ```py
18
+ >>> import urllib
19
+ >>> from PIL import Image
20
+ >>> from timm.data import resolve_data_config
21
+ >>> from timm.data.transforms_factory import create_transform
22
+
23
+ >>> config = resolve_data_config({}, model=model)
24
+ >>> transform = create_transform(**config)
25
+
26
+ >>> url, filename = ("https://github.com/pytorch/hub/raw/master/images/dog.jpg", "dog.jpg")
27
+ >>> urllib.request.urlretrieve(url, filename)
28
+ >>> img = Image.open(filename).convert('RGB')
29
+ >>> tensor = transform(img).unsqueeze(0) # transform and add batch dimension
30
+ ```
31
+
32
+ To get the model predictions:
33
+
34
+ ```py
35
+ >>> import torch
36
+ >>> with torch.no_grad():
37
+ ... out = model(tensor)
38
+ >>> probabilities = torch.nn.functional.softmax(out[0], dim=0)
39
+ >>> print(probabilities.shape)
40
+ >>> # prints: torch.Size([1000])
41
+ ```
42
+
43
+ To get the top-5 predictions class names:
44
+
45
+ ```py
46
+ >>> # Get imagenet class mappings
47
+ >>> url, filename = ("https://raw.githubusercontent.com/pytorch/hub/master/imagenet_classes.txt", "imagenet_classes.txt")
48
+ >>> urllib.request.urlretrieve(url, filename)
49
+ >>> with open("imagenet_classes.txt", "r") as f:
50
+ ... categories = [s.strip() for s in f.readlines()]
51
+
52
+ >>> # Print top categories per image
53
+ >>> top5_prob, top5_catid = torch.topk(probabilities, 5)
54
+ >>> for i in range(top5_prob.size(0)):
55
+ ... print(categories[top5_catid[i]], top5_prob[i].item())
56
+ >>> # prints class names and probabilities like:
57
+ >>> # [('Samoyed', 0.6425196528434753), ('Pomeranian', 0.04062102362513542), ('keeshond', 0.03186424449086189), ('white wolf', 0.01739676296710968), ('Eskimo dog', 0.011717947199940681)]
58
+ ```
59
+
60
+ Replace the model name with the variant you want to use, e.g. `ecaresnet101d`. You can find the IDs in the model summaries at the top of this page.
61
+
62
+ To extract image features with this model, follow the [timm feature extraction examples](../feature_extraction), just change the name of the model you want to use.
63
+
64
+ ## How do I finetune this model?
65
+
66
+ You can finetune any of the pre-trained models just by changing the classifier (the last layer).
67
+
68
+ ```py
69
+ >>> model = timm.create_model('ecaresnet101d', pretrained=True, num_classes=NUM_FINETUNE_CLASSES)
70
+ ```
71
+ To finetune on your own dataset, you have to write a training loop or adapt [timm's training
72
+ script](https://github.com/rwightman/pytorch-image-models/blob/master/train.py) to use your dataset.
73
+
74
+ ## How do I train this model?
75
+
76
+ You can follow the [timm recipe scripts](../scripts) for training a new model afresh.
77
+
78
+ ## Citation
79
+
80
+ ```BibTeX
81
+ @misc{wang2020ecanet,
82
+ title={ECA-Net: Efficient Channel Attention for Deep Convolutional Neural Networks},
83
+ author={Qilong Wang and Banggu Wu and Pengfei Zhu and Peihua Li and Wangmeng Zuo and Qinghua Hu},
84
+ year={2020},
85
+ eprint={1910.03151},
86
+ archivePrefix={arXiv},
87
+ primaryClass={cs.CV}
88
+ }
89
+ ```
90
+
91
+ <!--
92
+ Type: model-index
93
+ Collections:
94
+ - Name: ECAResNet
95
+ Paper:
96
+ Title: 'ECA-Net: Efficient Channel Attention for Deep Convolutional Neural Networks'
97
+ URL: https://paperswithcode.com/paper/eca-net-efficient-channel-attention-for-deep
98
+ Models:
99
+ - Name: ecaresnet101d
100
+ In Collection: ECAResNet
101
+ Metadata:
102
+ FLOPs: 10377193728
103
+ Parameters: 44570000
104
+ File Size: 178815067
105
+ Architecture:
106
+ - 1x1 Convolution
107
+ - Batch Normalization
108
+ - Bottleneck Residual Block
109
+ - Convolution
110
+ - Efficient Channel Attention
111
+ - Global Average Pooling
112
+ - Max Pooling
113
+ - ReLU
114
+ - Residual Block
115
+ - Residual Connection
116
+ - Softmax
117
+ - Squeeze-and-Excitation Block
118
+ Tasks:
119
+ - Image Classification
120
+ Training Techniques:
121
+ - SGD with Momentum
122
+ - Weight Decay
123
+ Training Data:
124
+ - ImageNet
125
+ Training Resources: 4x RTX 2080Ti GPUs
126
+ ID: ecaresnet101d
127
+ LR: 0.1
128
+ Epochs: 100
129
+ Layers: 101
130
+ Crop Pct: '0.875'
131
+ Batch Size: 256
132
+ Image Size: '224'
133
+ Weight Decay: 0.0001
134
+ Interpolation: bicubic
135
+ Code: https://github.com/rwightman/pytorch-image-models/blob/a7f95818e44b281137503bcf4b3e3e94d8ffa52f/timm/models/resnet.py#L1087
136
+ Weights: https://imvl-automl-sh.oss-cn-shanghai.aliyuncs.com/darts/hyperml/hyperml/job_45402/outputs/ECAResNet101D_281c5844.pth
137
+ Results:
138
+ - Task: Image Classification
139
+ Dataset: ImageNet
140
+ Metrics:
141
+ Top 1 Accuracy: 82.18%
142
+ Top 5 Accuracy: 96.06%
143
+ - Name: ecaresnet101d_pruned
144
+ In Collection: ECAResNet
145
+ Metadata:
146
+ FLOPs: 4463972081
147
+ Parameters: 24880000
148
+ File Size: 99852736
149
+ Architecture:
150
+ - 1x1 Convolution
151
+ - Batch Normalization
152
+ - Bottleneck Residual Block
153
+ - Convolution
154
+ - Efficient Channel Attention
155
+ - Global Average Pooling
156
+ - Max Pooling
157
+ - ReLU
158
+ - Residual Block
159
+ - Residual Connection
160
+ - Softmax
161
+ - Squeeze-and-Excitation Block
162
+ Tasks:
163
+ - Image Classification
164
+ Training Techniques:
165
+ - SGD with Momentum
166
+ - Weight Decay
167
+ Training Data:
168
+ - ImageNet
169
+ ID: ecaresnet101d_pruned
170
+ Layers: 101
171
+ Crop Pct: '0.875'
172
+ Image Size: '224'
173
+ Interpolation: bicubic
174
+ Code: https://github.com/rwightman/pytorch-image-models/blob/a7f95818e44b281137503bcf4b3e3e94d8ffa52f/timm/models/resnet.py#L1097
175
+ Weights: https://imvl-automl-sh.oss-cn-shanghai.aliyuncs.com/darts/hyperml/hyperml/job_45610/outputs/ECAResNet101D_P_75a3370e.pth
176
+ Results:
177
+ - Task: Image Classification
178
+ Dataset: ImageNet
179
+ Metrics:
180
+ Top 1 Accuracy: 80.82%
181
+ Top 5 Accuracy: 95.64%
182
+ - Name: ecaresnet50d
183
+ In Collection: ECAResNet
184
+ Metadata:
185
+ FLOPs: 5591090432
186
+ Parameters: 25580000
187
+ File Size: 102579290
188
+ Architecture:
189
+ - 1x1 Convolution
190
+ - Batch Normalization
191
+ - Bottleneck Residual Block
192
+ - Convolution
193
+ - Efficient Channel Attention
194
+ - Global Average Pooling
195
+ - Max Pooling
196
+ - ReLU
197
+ - Residual Block
198
+ - Residual Connection
199
+ - Softmax
200
+ - Squeeze-and-Excitation Block
201
+ Tasks:
202
+ - Image Classification
203
+ Training Techniques:
204
+ - SGD with Momentum
205
+ - Weight Decay
206
+ Training Data:
207
+ - ImageNet
208
+ Training Resources: 4x RTX 2080Ti GPUs
209
+ ID: ecaresnet50d
210
+ LR: 0.1
211
+ Epochs: 100
212
+ Layers: 50
213
+ Crop Pct: '0.875'
214
+ Batch Size: 256
215
+ Image Size: '224'
216
+ Weight Decay: 0.0001
217
+ Interpolation: bicubic
218
+ Code: https://github.com/rwightman/pytorch-image-models/blob/a7f95818e44b281137503bcf4b3e3e94d8ffa52f/timm/models/resnet.py#L1045
219
+ Weights: https://imvl-automl-sh.oss-cn-shanghai.aliyuncs.com/darts/hyperml/hyperml/job_45402/outputs/ECAResNet50D_833caf58.pth
220
+ Results:
221
+ - Task: Image Classification
222
+ Dataset: ImageNet
223
+ Metrics:
224
+ Top 1 Accuracy: 80.61%
225
+ Top 5 Accuracy: 95.31%
226
+ - Name: ecaresnet50d_pruned
227
+ In Collection: ECAResNet
228
+ Metadata:
229
+ FLOPs: 3250730657
230
+ Parameters: 19940000
231
+ File Size: 79990436
232
+ Architecture:
233
+ - 1x1 Convolution
234
+ - Batch Normalization
235
+ - Bottleneck Residual Block
236
+ - Convolution
237
+ - Efficient Channel Attention
238
+ - Global Average Pooling
239
+ - Max Pooling
240
+ - ReLU
241
+ - Residual Block
242
+ - Residual Connection
243
+ - Softmax
244
+ - Squeeze-and-Excitation Block
245
+ Tasks:
246
+ - Image Classification
247
+ Training Techniques:
248
+ - SGD with Momentum
249
+ - Weight Decay
250
+ Training Data:
251
+ - ImageNet
252
+ ID: ecaresnet50d_pruned
253
+ Layers: 50
254
+ Crop Pct: '0.875'
255
+ Image Size: '224'
256
+ Interpolation: bicubic
257
+ Code: https://github.com/rwightman/pytorch-image-models/blob/a7f95818e44b281137503bcf4b3e3e94d8ffa52f/timm/models/resnet.py#L1055
258
+ Weights: https://imvl-automl-sh.oss-cn-shanghai.aliyuncs.com/darts/hyperml/hyperml/job_45899/outputs/ECAResNet50D_P_9c67f710.pth
259
+ Results:
260
+ - Task: Image Classification
261
+ Dataset: ImageNet
262
+ Metrics:
263
+ Top 1 Accuracy: 79.71%
264
+ Top 5 Accuracy: 94.88%
265
+ - Name: ecaresnetlight
266
+ In Collection: ECAResNet
267
+ Metadata:
268
+ FLOPs: 5276118784
269
+ Parameters: 30160000
270
+ File Size: 120956612
271
+ Architecture:
272
+ - 1x1 Convolution
273
+ - Batch Normalization
274
+ - Bottleneck Residual Block
275
+ - Convolution
276
+ - Efficient Channel Attention
277
+ - Global Average Pooling
278
+ - Max Pooling
279
+ - ReLU
280
+ - Residual Block
281
+ - Residual Connection
282
+ - Softmax
283
+ - Squeeze-and-Excitation Block
284
+ Tasks:
285
+ - Image Classification
286
+ Training Techniques:
287
+ - SGD with Momentum
288
+ - Weight Decay
289
+ Training Data:
290
+ - ImageNet
291
+ ID: ecaresnetlight
292
+ Crop Pct: '0.875'
293
+ Image Size: '224'
294
+ Interpolation: bicubic
295
+ Code: https://github.com/rwightman/pytorch-image-models/blob/a7f95818e44b281137503bcf4b3e3e94d8ffa52f/timm/models/resnet.py#L1077
296
+ Weights: https://imvl-automl-sh.oss-cn-shanghai.aliyuncs.com/darts/hyperml/hyperml/job_45402/outputs/ECAResNetLight_4f34b35b.pth
297
+ Results:
298
+ - Task: Image Classification
299
+ Dataset: ImageNet
300
+ Metrics:
301
+ Top 1 Accuracy: 80.46%
302
+ Top 5 Accuracy: 95.25%
303
+ -->
pytorch-image-models/hfdocs/source/models/efficientnet-pruned.mdx ADDED
@@ -0,0 +1,212 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # EfficientNet (Knapsack Pruned)
2
+
3
+ **EfficientNet** is a convolutional neural network architecture and scaling method that uniformly scales all dimensions of depth/width/resolution using a *compound coefficient*. Unlike conventional practice that arbitrary scales these factors, the EfficientNet scaling method uniformly scales network width, depth, and resolution with a set of fixed scaling coefficients. For example, if we want to use \\( 2^N \\) times more computational resources, then we can simply increase the network depth by \\( \alpha ^ N \\), width by \\( \beta ^ N \\), and image size by \\( \gamma ^ N \\), where \\( \alpha, \beta, \gamma \\) are constant coefficients determined by a small grid search on the original small model. EfficientNet uses a compound coefficient \\( \phi \\) to uniformly scales network width, depth, and resolution in a principled way.
4
+
5
+ The compound scaling method is justified by the intuition that if the input image is bigger, then the network needs more layers to increase the receptive field and more channels to capture more fine-grained patterns on the bigger image.
6
+
7
+ The base EfficientNet-B0 network is based on the inverted bottleneck residual blocks of [MobileNetV2](https://paperswithcode.com/method/mobilenetv2), in addition to [squeeze-and-excitation blocks](https://paperswithcode.com/method/squeeze-and-excitation-block).
8
+
9
+ This collection consists of pruned EfficientNet models.
10
+
11
+ ## How do I use this model on an image?
12
+
13
+ To load a pretrained model:
14
+
15
+ ```py
16
+ >>> import timm
17
+ >>> model = timm.create_model('efficientnet_b1_pruned', pretrained=True)
18
+ >>> model.eval()
19
+ ```
20
+
21
+ To load and preprocess the image:
22
+
23
+ ```py
24
+ >>> import urllib
25
+ >>> from PIL import Image
26
+ >>> from timm.data import resolve_data_config
27
+ >>> from timm.data.transforms_factory import create_transform
28
+
29
+ >>> config = resolve_data_config({}, model=model)
30
+ >>> transform = create_transform(**config)
31
+
32
+ >>> url, filename = ("https://github.com/pytorch/hub/raw/master/images/dog.jpg", "dog.jpg")
33
+ >>> urllib.request.urlretrieve(url, filename)
34
+ >>> img = Image.open(filename).convert('RGB')
35
+ >>> tensor = transform(img).unsqueeze(0) # transform and add batch dimension
36
+ ```
37
+
38
+ To get the model predictions:
39
+
40
+ ```py
41
+ >>> import torch
42
+ >>> with torch.no_grad():
43
+ ... out = model(tensor)
44
+ >>> probabilities = torch.nn.functional.softmax(out[0], dim=0)
45
+ >>> print(probabilities.shape)
46
+ >>> # prints: torch.Size([1000])
47
+ ```
48
+
49
+ To get the top-5 predictions class names:
50
+
51
+ ```py
52
+ >>> # Get imagenet class mappings
53
+ >>> url, filename = ("https://raw.githubusercontent.com/pytorch/hub/master/imagenet_classes.txt", "imagenet_classes.txt")
54
+ >>> urllib.request.urlretrieve(url, filename)
55
+ >>> with open("imagenet_classes.txt", "r") as f:
56
+ ... categories = [s.strip() for s in f.readlines()]
57
+
58
+ >>> # Print top categories per image
59
+ >>> top5_prob, top5_catid = torch.topk(probabilities, 5)
60
+ >>> for i in range(top5_prob.size(0)):
61
+ ... print(categories[top5_catid[i]], top5_prob[i].item())
62
+ >>> # prints class names and probabilities like:
63
+ >>> # [('Samoyed', 0.6425196528434753), ('Pomeranian', 0.04062102362513542), ('keeshond', 0.03186424449086189), ('white wolf', 0.01739676296710968), ('Eskimo dog', 0.011717947199940681)]
64
+ ```
65
+
66
+ Replace the model name with the variant you want to use, e.g. `efficientnet_b1_pruned`. You can find the IDs in the model summaries at the top of this page.
67
+
68
+ To extract image features with this model, follow the [timm feature extraction examples](../feature_extraction), just change the name of the model you want to use.
69
+
70
+ ## How do I finetune this model?
71
+
72
+ You can finetune any of the pre-trained models just by changing the classifier (the last layer).
73
+
74
+ ```py
75
+ >>> model = timm.create_model('efficientnet_b1_pruned', pretrained=True, num_classes=NUM_FINETUNE_CLASSES)
76
+ ```
77
+ To finetune on your own dataset, you have to write a training loop or adapt [timm's training
78
+ script](https://github.com/rwightman/pytorch-image-models/blob/master/train.py) to use your dataset.
79
+
80
+ ## How do I train this model?
81
+
82
+ You can follow the [timm recipe scripts](../scripts) for training a new model afresh.
83
+
84
+ ## Citation
85
+
86
+ ```BibTeX
87
+ @misc{tan2020efficientnet,
88
+ title={EfficientNet: Rethinking Model Scaling for Convolutional Neural Networks},
89
+ author={Mingxing Tan and Quoc V. Le},
90
+ year={2020},
91
+ eprint={1905.11946},
92
+ archivePrefix={arXiv},
93
+ primaryClass={cs.LG}
94
+ }
95
+ ```
96
+
97
+ ```
98
+ @misc{aflalo2020knapsack,
99
+ title={Knapsack Pruning with Inner Distillation},
100
+ author={Yonathan Aflalo and Asaf Noy and Ming Lin and Itamar Friedman and Lihi Zelnik},
101
+ year={2020},
102
+ eprint={2002.08258},
103
+ archivePrefix={arXiv},
104
+ primaryClass={cs.LG}
105
+ }
106
+ ```
107
+
108
+ <!--
109
+ Type: model-index
110
+ Collections:
111
+ - Name: EfficientNet Pruned
112
+ Paper:
113
+ Title: Knapsack Pruning with Inner Distillation
114
+ URL: https://paperswithcode.com/paper/knapsack-pruning-with-inner-distillation
115
+ Models:
116
+ - Name: efficientnet_b1_pruned
117
+ In Collection: EfficientNet Pruned
118
+ Metadata:
119
+ FLOPs: 489653114
120
+ Parameters: 6330000
121
+ File Size: 25595162
122
+ Architecture:
123
+ - 1x1 Convolution
124
+ - Average Pooling
125
+ - Batch Normalization
126
+ - Convolution
127
+ - Dense Connections
128
+ - Dropout
129
+ - Inverted Residual Block
130
+ - Squeeze-and-Excitation Block
131
+ - Swish
132
+ Tasks:
133
+ - Image Classification
134
+ Training Data:
135
+ - ImageNet
136
+ ID: efficientnet_b1_pruned
137
+ Crop Pct: '0.882'
138
+ Image Size: '240'
139
+ Interpolation: bicubic
140
+ Code: https://github.com/rwightman/pytorch-image-models/blob/a7f95818e44b281137503bcf4b3e3e94d8ffa52f/timm/models/efficientnet.py#L1208
141
+ Weights: https://imvl-automl-sh.oss-cn-shanghai.aliyuncs.com/darts/hyperml/hyperml/job_45403/outputs/effnetb1_pruned_9ebb3fe6.pth
142
+ Results:
143
+ - Task: Image Classification
144
+ Dataset: ImageNet
145
+ Metrics:
146
+ Top 1 Accuracy: 78.25%
147
+ Top 5 Accuracy: 93.84%
148
+ - Name: efficientnet_b2_pruned
149
+ In Collection: EfficientNet Pruned
150
+ Metadata:
151
+ FLOPs: 878133915
152
+ Parameters: 8310000
153
+ File Size: 33555005
154
+ Architecture:
155
+ - 1x1 Convolution
156
+ - Average Pooling
157
+ - Batch Normalization
158
+ - Convolution
159
+ - Dense Connections
160
+ - Dropout
161
+ - Inverted Residual Block
162
+ - Squeeze-and-Excitation Block
163
+ - Swish
164
+ Tasks:
165
+ - Image Classification
166
+ Training Data:
167
+ - ImageNet
168
+ ID: efficientnet_b2_pruned
169
+ Crop Pct: '0.89'
170
+ Image Size: '260'
171
+ Interpolation: bicubic
172
+ Code: https://github.com/rwightman/pytorch-image-models/blob/a7f95818e44b281137503bcf4b3e3e94d8ffa52f/timm/models/efficientnet.py#L1219
173
+ Weights: https://imvl-automl-sh.oss-cn-shanghai.aliyuncs.com/darts/hyperml/hyperml/job_45403/outputs/effnetb2_pruned_203f55bc.pth
174
+ Results:
175
+ - Task: Image Classification
176
+ Dataset: ImageNet
177
+ Metrics:
178
+ Top 1 Accuracy: 79.91%
179
+ Top 5 Accuracy: 94.86%
180
+ - Name: efficientnet_b3_pruned
181
+ In Collection: EfficientNet Pruned
182
+ Metadata:
183
+ FLOPs: 1239590641
184
+ Parameters: 9860000
185
+ File Size: 39770812
186
+ Architecture:
187
+ - 1x1 Convolution
188
+ - Average Pooling
189
+ - Batch Normalization
190
+ - Convolution
191
+ - Dense Connections
192
+ - Dropout
193
+ - Inverted Residual Block
194
+ - Squeeze-and-Excitation Block
195
+ - Swish
196
+ Tasks:
197
+ - Image Classification
198
+ Training Data:
199
+ - ImageNet
200
+ ID: efficientnet_b3_pruned
201
+ Crop Pct: '0.904'
202
+ Image Size: '300'
203
+ Interpolation: bicubic
204
+ Code: https://github.com/rwightman/pytorch-image-models/blob/a7f95818e44b281137503bcf4b3e3e94d8ffa52f/timm/models/efficientnet.py#L1230
205
+ Weights: https://imvl-automl-sh.oss-cn-shanghai.aliyuncs.com/darts/hyperml/hyperml/job_45403/outputs/effnetb3_pruned_5abcc29f.pth
206
+ Results:
207
+ - Task: Image Classification
208
+ Dataset: ImageNet
209
+ Metrics:
210
+ Top 1 Accuracy: 80.86%
211
+ Top 5 Accuracy: 95.24%
212
+ -->