meg HF staff commited on
Commit
2c8d22f
1 Parent(s): 99aa18d

Add files using upload-large-folder tool

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. pytorch-image-models/timm/__pycache__/version.cpython-39.pyc +0 -0
  2. pytorch-image-models/timm/models/__pycache__/_efficientnet_builder.cpython-39.pyc +0 -0
  3. pytorch-image-models/timm/models/__pycache__/_features.cpython-39.pyc +0 -0
  4. pytorch-image-models/timm/models/__pycache__/_manipulate.cpython-39.pyc +0 -0
  5. pytorch-image-models/timm/models/__pycache__/_pretrained.cpython-39.pyc +0 -0
  6. pytorch-image-models/timm/models/__pycache__/_prune.cpython-39.pyc +0 -0
  7. pytorch-image-models/timm/models/__pycache__/_registry.cpython-39.pyc +0 -0
  8. pytorch-image-models/timm/models/__pycache__/deit.cpython-39.pyc +0 -0
  9. pytorch-image-models/timm/models/__pycache__/dla.cpython-39.pyc +0 -0
  10. pytorch-image-models/timm/models/__pycache__/dpn.cpython-39.pyc +0 -0
  11. pytorch-image-models/timm/models/__pycache__/efficientformer.cpython-39.pyc +0 -0
  12. pytorch-image-models/timm/models/__pycache__/efficientformer_v2.cpython-39.pyc +0 -0
  13. pytorch-image-models/timm/models/__pycache__/efficientvit_mit.cpython-39.pyc +0 -0
  14. pytorch-image-models/timm/models/__pycache__/efficientvit_msra.cpython-39.pyc +0 -0
  15. pytorch-image-models/timm/models/__pycache__/eva.cpython-39.pyc +0 -0
  16. pytorch-image-models/timm/models/__pycache__/fastvit.cpython-39.pyc +0 -0
  17. pytorch-image-models/timm/models/__pycache__/focalnet.cpython-39.pyc +0 -0
  18. pytorch-image-models/timm/models/__pycache__/hgnet.cpython-39.pyc +0 -0
  19. pytorch-image-models/timm/models/__pycache__/hiera.cpython-39.pyc +0 -0
  20. pytorch-image-models/timm/models/__pycache__/hieradet_sam2.cpython-39.pyc +0 -0
  21. pytorch-image-models/timm/models/__pycache__/inception_next.cpython-39.pyc +0 -0
  22. pytorch-image-models/timm/models/__pycache__/inception_resnet_v2.cpython-39.pyc +0 -0
  23. pytorch-image-models/timm/models/__pycache__/inception_v3.cpython-39.pyc +0 -0
  24. pytorch-image-models/timm/models/__pycache__/inception_v4.cpython-39.pyc +0 -0
  25. pytorch-image-models/timm/models/__pycache__/levit.cpython-39.pyc +0 -0
  26. pytorch-image-models/timm/models/__pycache__/mobilevit.cpython-39.pyc +0 -0
  27. pytorch-image-models/timm/models/__pycache__/mvitv2.cpython-39.pyc +0 -0
  28. pytorch-image-models/timm/models/__pycache__/nasnet.cpython-39.pyc +0 -0
  29. pytorch-image-models/timm/models/__pycache__/nest.cpython-39.pyc +0 -0
  30. pytorch-image-models/timm/models/__pycache__/nextvit.cpython-39.pyc +0 -0
  31. pytorch-image-models/timm/models/__pycache__/resnetv2.cpython-39.pyc +0 -0
  32. pytorch-image-models/timm/models/__pycache__/rexnet.cpython-39.pyc +0 -0
  33. pytorch-image-models/timm/models/__pycache__/selecsls.cpython-39.pyc +0 -0
  34. pytorch-image-models/timm/models/__pycache__/swin_transformer_v2_cr.cpython-39.pyc +0 -0
  35. pytorch-image-models/timm/models/__pycache__/tnt.cpython-39.pyc +0 -0
  36. pytorch-image-models/timm/models/__pycache__/tresnet.cpython-39.pyc +0 -0
  37. pytorch-image-models/timm/models/__pycache__/twins.cpython-39.pyc +0 -0
  38. pytorch-image-models/timm/models/__pycache__/vgg.cpython-39.pyc +0 -0
  39. pytorch-image-models/timm/models/__pycache__/vision_transformer_hybrid.cpython-39.pyc +0 -0
  40. pytorch-image-models/timm/models/__pycache__/vision_transformer_relpos.cpython-39.pyc +0 -0
  41. pytorch-image-models/timm/models/__pycache__/vision_transformer_sam.cpython-39.pyc +0 -0
  42. pytorch-image-models/timm/models/__pycache__/volo.cpython-39.pyc +0 -0
  43. pytorch-image-models/timm/models/__pycache__/vovnet.cpython-39.pyc +0 -0
  44. pytorch-image-models/timm/optim/__init__.py +34 -0
  45. pytorch-image-models/timm/optim/__pycache__/adabelief.cpython-39.pyc +0 -0
  46. pytorch-image-models/timm/optim/__pycache__/nadam.cpython-39.pyc +0 -0
  47. pytorch-image-models/timm/optim/_optim_factory.py +1081 -0
  48. pytorch-image-models/timm/optim/_param_groups.py +131 -0
  49. pytorch-image-models/timm/optim/adabelief.py +218 -0
  50. pytorch-image-models/timm/optim/adafactor.py +229 -0
pytorch-image-models/timm/__pycache__/version.cpython-39.pyc ADDED
Binary file (159 Bytes). View file
 
pytorch-image-models/timm/models/__pycache__/_efficientnet_builder.cpython-39.pyc ADDED
Binary file (15 kB). View file
 
pytorch-image-models/timm/models/__pycache__/_features.cpython-39.pyc ADDED
Binary file (18.1 kB). View file
 
pytorch-image-models/timm/models/__pycache__/_manipulate.cpython-39.pyc ADDED
Binary file (7.72 kB). View file
 
pytorch-image-models/timm/models/__pycache__/_pretrained.cpython-39.pyc ADDED
Binary file (3.39 kB). View file
 
pytorch-image-models/timm/models/__pycache__/_prune.cpython-39.pyc ADDED
Binary file (2.92 kB). View file
 
pytorch-image-models/timm/models/__pycache__/_registry.cpython-39.pyc ADDED
Binary file (10.8 kB). View file
 
pytorch-image-models/timm/models/__pycache__/deit.cpython-39.pyc ADDED
Binary file (14.8 kB). View file
 
pytorch-image-models/timm/models/__pycache__/dla.cpython-39.pyc ADDED
Binary file (14.4 kB). View file
 
pytorch-image-models/timm/models/__pycache__/dpn.cpython-39.pyc ADDED
Binary file (11.1 kB). View file
 
pytorch-image-models/timm/models/__pycache__/efficientformer.cpython-39.pyc ADDED
Binary file (19.4 kB). View file
 
pytorch-image-models/timm/models/__pycache__/efficientformer_v2.cpython-39.pyc ADDED
Binary file (20.5 kB). View file
 
pytorch-image-models/timm/models/__pycache__/efficientvit_mit.cpython-39.pyc ADDED
Binary file (23.2 kB). View file
 
pytorch-image-models/timm/models/__pycache__/efficientvit_msra.cpython-39.pyc ADDED
Binary file (18.8 kB). View file
 
pytorch-image-models/timm/models/__pycache__/eva.cpython-39.pyc ADDED
Binary file (30.1 kB). View file
 
pytorch-image-models/timm/models/__pycache__/fastvit.cpython-39.pyc ADDED
Binary file (40.5 kB). View file
 
pytorch-image-models/timm/models/__pycache__/focalnet.cpython-39.pyc ADDED
Binary file (19.9 kB). View file
 
pytorch-image-models/timm/models/__pycache__/hgnet.cpython-39.pyc ADDED
Binary file (18.1 kB). View file
 
pytorch-image-models/timm/models/__pycache__/hiera.cpython-39.pyc ADDED
Binary file (27 kB). View file
 
pytorch-image-models/timm/models/__pycache__/hieradet_sam2.cpython-39.pyc ADDED
Binary file (18.5 kB). View file
 
pytorch-image-models/timm/models/__pycache__/inception_next.cpython-39.pyc ADDED
Binary file (13 kB). View file
 
pytorch-image-models/timm/models/__pycache__/inception_resnet_v2.cpython-39.pyc ADDED
Binary file (11.3 kB). View file
 
pytorch-image-models/timm/models/__pycache__/inception_v3.cpython-39.pyc ADDED
Binary file (13.6 kB). View file
 
pytorch-image-models/timm/models/__pycache__/inception_v4.cpython-39.pyc ADDED
Binary file (10.6 kB). View file
 
pytorch-image-models/timm/models/__pycache__/levit.cpython-39.pyc ADDED
Binary file (28.7 kB). View file
 
pytorch-image-models/timm/models/__pycache__/mobilevit.cpython-39.pyc ADDED
Binary file (18 kB). View file
 
pytorch-image-models/timm/models/__pycache__/mvitv2.cpython-39.pyc ADDED
Binary file (28.3 kB). View file
 
pytorch-image-models/timm/models/__pycache__/nasnet.cpython-39.pyc ADDED
Binary file (16.1 kB). View file
 
pytorch-image-models/timm/models/__pycache__/nest.cpython-39.pyc ADDED
Binary file (18.5 kB). View file
 
pytorch-image-models/timm/models/__pycache__/nextvit.cpython-39.pyc ADDED
Binary file (17.7 kB). View file
 
pytorch-image-models/timm/models/__pycache__/resnetv2.cpython-39.pyc ADDED
Binary file (25.2 kB). View file
 
pytorch-image-models/timm/models/__pycache__/rexnet.cpython-39.pyc ADDED
Binary file (10.4 kB). View file
 
pytorch-image-models/timm/models/__pycache__/selecsls.cpython-39.pyc ADDED
Binary file (10.8 kB). View file
 
pytorch-image-models/timm/models/__pycache__/swin_transformer_v2_cr.cpython-39.pyc ADDED
Binary file (37.1 kB). View file
 
pytorch-image-models/timm/models/__pycache__/tnt.cpython-39.pyc ADDED
Binary file (11 kB). View file
 
pytorch-image-models/timm/models/__pycache__/tresnet.cpython-39.pyc ADDED
Binary file (10.7 kB). View file
 
pytorch-image-models/timm/models/__pycache__/twins.cpython-39.pyc ADDED
Binary file (17.9 kB). View file
 
pytorch-image-models/timm/models/__pycache__/vgg.cpython-39.pyc ADDED
Binary file (10.1 kB). View file
 
pytorch-image-models/timm/models/__pycache__/vision_transformer_hybrid.cpython-39.pyc ADDED
Binary file (13.8 kB). View file
 
pytorch-image-models/timm/models/__pycache__/vision_transformer_relpos.cpython-39.pyc ADDED
Binary file (23.3 kB). View file
 
pytorch-image-models/timm/models/__pycache__/vision_transformer_sam.cpython-39.pyc ADDED
Binary file (21.9 kB). View file
 
pytorch-image-models/timm/models/__pycache__/volo.cpython-39.pyc ADDED
Binary file (24.9 kB). View file
 
pytorch-image-models/timm/models/__pycache__/vovnet.cpython-39.pyc ADDED
Binary file (11.8 kB). View file
 
pytorch-image-models/timm/optim/__init__.py ADDED
@@ -0,0 +1,34 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from .adabelief import AdaBelief
2
+ from .adafactor import Adafactor
3
+ from .adafactor_bv import AdafactorBigVision
4
+ from .adahessian import Adahessian
5
+ from .adamp import AdamP
6
+ from .adamw import AdamWLegacy
7
+ from .adan import Adan
8
+ from .adopt import Adopt
9
+ from .lamb import Lamb
10
+ from .laprop import LaProp
11
+ from .lars import Lars
12
+ from .lion import Lion
13
+ from .lookahead import Lookahead
14
+ from .madgrad import MADGRAD
15
+ from .mars import Mars
16
+ from .nadam import NAdamLegacy
17
+ from .nadamw import NAdamW
18
+ from .nvnovograd import NvNovoGrad
19
+ from .radam import RAdamLegacy
20
+ from .rmsprop_tf import RMSpropTF
21
+ from .sgdp import SGDP
22
+ from .sgdw import SGDW
23
+
24
+ # bring common torch.optim Optimizers into timm.optim namespace for consistency
25
+ from torch.optim import Adadelta, Adagrad, Adamax, Adam, AdamW, RMSprop, SGD
26
+ try:
27
+ # in case any very old torch versions being used
28
+ from torch.optim import NAdam, RAdam
29
+ except ImportError:
30
+ pass
31
+
32
+ from ._optim_factory import list_optimizers, get_optimizer_class, get_optimizer_info, OptimInfo, OptimizerRegistry, \
33
+ create_optimizer_v2, create_optimizer, optimizer_kwargs
34
+ from ._param_groups import param_groups_layer_decay, param_groups_weight_decay, auto_group_layers
pytorch-image-models/timm/optim/__pycache__/adabelief.cpython-39.pyc ADDED
Binary file (6.53 kB). View file
 
pytorch-image-models/timm/optim/__pycache__/nadam.cpython-39.pyc ADDED
Binary file (3.33 kB). View file
 
pytorch-image-models/timm/optim/_optim_factory.py ADDED
@@ -0,0 +1,1081 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """ Optimizer Factory w/ custom Weight Decay & Layer Decay support
2
+
3
+ Hacked together by / Copyright 2021 Ross Wightman
4
+ """
5
+ import logging
6
+ from dataclasses import dataclass
7
+ from functools import partial
8
+ from typing import Any, Callable, Dict, List, Optional, Set, Tuple, Type, Union
9
+ from fnmatch import fnmatch
10
+ import importlib
11
+
12
+ import torch
13
+ import torch.nn as nn
14
+ import torch.optim
15
+
16
+ from ._param_groups import param_groups_layer_decay, param_groups_weight_decay
17
+ from ._types import ParamsT, OptimType, OptimizerCallable
18
+ from .adabelief import AdaBelief
19
+ from .adafactor import Adafactor
20
+ from .adafactor_bv import AdafactorBigVision
21
+ from .adahessian import Adahessian
22
+ from .adamp import AdamP
23
+ from .adamw import AdamWLegacy
24
+ from .adan import Adan
25
+ from .adopt import Adopt
26
+ from .lamb import Lamb
27
+ from .laprop import LaProp
28
+ from .lars import Lars
29
+ from .lion import Lion
30
+ from .lookahead import Lookahead
31
+ from .madgrad import MADGRAD
32
+ from .mars import Mars
33
+ from .nadam import NAdamLegacy
34
+ from .nadamw import NAdamW
35
+ from .nvnovograd import NvNovoGrad
36
+ from .radam import RAdamLegacy
37
+ from .rmsprop_tf import RMSpropTF
38
+ from .sgdp import SGDP
39
+ from .sgdw import SGDW
40
+
41
+ _logger = logging.getLogger(__name__)
42
+
43
+
44
+ def _import_class(class_string: str) -> Type:
45
+ """Dynamically import a class from a string."""
46
+ try:
47
+ module_name, class_name = class_string.rsplit(".", 1)
48
+ module = importlib.import_module(module_name)
49
+ return getattr(module, class_name)
50
+ except (ImportError, AttributeError) as e:
51
+ raise ImportError(f"Could not import {class_string}: {e}")
52
+
53
+
54
+
55
+ @dataclass(frozen=True)
56
+ class OptimInfo:
57
+ """Immutable configuration for an optimizer.
58
+
59
+ Attributes:
60
+ name: Unique identifier for the optimizer
61
+ opt_class: The optimizer class
62
+ description: Brief description of the optimizer's characteristics and behavior
63
+ has_eps: Whether the optimizer accepts epsilon parameter
64
+ has_momentum: Whether the optimizer accepts momentum parameter
65
+ has_betas: Whether the optimizer accepts a tuple of beta parameters
66
+ num_betas: number of betas in tuple (valid IFF has_betas = True)
67
+ defaults: Optional default parameters for the optimizer
68
+ """
69
+ name: str
70
+ opt_class: Union[str, OptimType]
71
+ description: str = ''
72
+ has_eps: bool = True
73
+ has_momentum: bool = False
74
+ has_betas: bool = False
75
+ num_betas: int = 2
76
+ second_order: bool = False
77
+ defaults: Optional[Dict[str, Any]] = None
78
+
79
+
80
+ class OptimizerRegistry:
81
+ """Registry managing optimizer configurations and instantiation.
82
+
83
+ This class provides a central registry for optimizer configurations and handles
84
+ their instantiation with appropriate parameter groups and settings.
85
+ """
86
+
87
+ def __init__(self) -> None:
88
+ self._optimizers: Dict[str, OptimInfo] = {}
89
+ self._foreach_defaults: Set[str] = {'lion'}
90
+
91
+ def register(self, info: OptimInfo) -> None:
92
+ """Register an optimizer configuration.
93
+
94
+ Args:
95
+ info: The OptimInfo configuration containing name, type and description
96
+ """
97
+ name = info.name.lower()
98
+ if name in self._optimizers:
99
+ _logger.warning(f'Optimizer {name} already registered, overwriting')
100
+ self._optimizers[name] = info
101
+
102
+ def register_alias(self, alias: str, target: str) -> None:
103
+ """Register an alias for an existing optimizer.
104
+
105
+ Args:
106
+ alias: The alias name
107
+ target: The target optimizer name
108
+
109
+ Raises:
110
+ KeyError: If target optimizer doesn't exist
111
+ """
112
+ target = target.lower()
113
+ if target not in self._optimizers:
114
+ raise KeyError(f'Cannot create alias for non-existent optimizer {target}')
115
+ self._optimizers[alias.lower()] = self._optimizers[target]
116
+
117
+ def register_foreach_default(self, name: str) -> None:
118
+ """Register an optimizer as defaulting to foreach=True."""
119
+ self._foreach_defaults.add(name.lower())
120
+
121
+ def list_optimizers(
122
+ self,
123
+ filter: Union[str, List[str]] = '',
124
+ exclude_filters: Optional[List[str]] = None,
125
+ with_description: bool = False
126
+ ) -> List[Union[str, Tuple[str, str]]]:
127
+ """List available optimizer names, optionally filtered.
128
+
129
+ Args:
130
+ filter: Wildcard style filter string (e.g., 'adam*')
131
+ exclude_filters: Optional list of wildcard patterns to exclude
132
+ with_description: If True, return tuples of (name, description)
133
+
134
+ Returns:
135
+ List of either optimizer names or (name, description) tuples
136
+ """
137
+ names = sorted(self._optimizers.keys())
138
+
139
+ if filter:
140
+ if isinstance(filter, str):
141
+ filters = [filter]
142
+ else:
143
+ filters = filter
144
+ filtered_names = set()
145
+ for f in filters:
146
+ filtered_names.update(n for n in names if fnmatch(n, f))
147
+ names = sorted(filtered_names)
148
+
149
+ if exclude_filters:
150
+ for exclude_filter in exclude_filters:
151
+ names = [n for n in names if not fnmatch(n, exclude_filter)]
152
+
153
+ if with_description:
154
+ return [(name, self._optimizers[name].description) for name in names]
155
+
156
+ return names
157
+
158
+ def get_optimizer_info(self, name: str) -> OptimInfo:
159
+ """Get the OptimInfo for an optimizer.
160
+
161
+ Args:
162
+ name: Name of the optimizer
163
+
164
+ Returns:
165
+ OptimInfo configuration
166
+
167
+ Raises:
168
+ ValueError: If optimizer is not found
169
+ """
170
+ name = name.lower()
171
+ if name not in self._optimizers:
172
+ raise ValueError(f'Optimizer {name} not found in registry')
173
+ return self._optimizers[name]
174
+
175
+ def get_optimizer_class(
176
+ self,
177
+ name_or_info: Union[str, OptimInfo],
178
+ bind_defaults: bool = True,
179
+ ) -> Union[OptimType, OptimizerCallable]:
180
+ """Get the optimizer class with any default arguments applied.
181
+
182
+ This allows direct instantiation of optimizers with their default configs
183
+ without going through the full factory.
184
+
185
+ Args:
186
+ name_or_info: Name of the optimizer
187
+ bind_defaults: Bind default arguments to optimizer class via `partial` before returning
188
+
189
+ Returns:
190
+ Optimizer class or partial with defaults applied
191
+
192
+ Raises:
193
+ ValueError: If optimizer not found
194
+ """
195
+ if isinstance(name_or_info, str):
196
+ opt_info = self.get_optimizer_info(name_or_info)
197
+ else:
198
+ assert isinstance(name_or_info, OptimInfo)
199
+ opt_info = name_or_info
200
+
201
+ if isinstance(opt_info.opt_class, str):
202
+ # Special handling for APEX and BNB optimizers
203
+ if opt_info.opt_class.startswith('apex.'):
204
+ assert torch.cuda.is_available(), 'CUDA required for APEX optimizers'
205
+ try:
206
+ opt_class = _import_class(opt_info.opt_class)
207
+ except ImportError as e:
208
+ raise ImportError('APEX optimizers require apex to be installed') from e
209
+ elif opt_info.opt_class.startswith('bitsandbytes.'):
210
+ assert torch.cuda.is_available(), 'CUDA required for bitsandbytes optimizers'
211
+ try:
212
+ opt_class = _import_class(opt_info.opt_class)
213
+ except ImportError as e:
214
+ raise ImportError('bitsandbytes optimizers require bitsandbytes to be installed') from e
215
+ else:
216
+ opt_class = _import_class(opt_info.opt_class)
217
+ else:
218
+ opt_class = opt_info.opt_class
219
+
220
+ # Return class or partial with defaults
221
+ if bind_defaults and opt_info.defaults:
222
+ opt_class = partial(opt_class, **opt_info.defaults)
223
+
224
+ return opt_class
225
+
226
+ def create_optimizer(
227
+ self,
228
+ model_or_params: Union[nn.Module, ParamsT],
229
+ opt: str,
230
+ lr: Optional[float] = None,
231
+ weight_decay: float = 0.,
232
+ momentum: float = 0.9,
233
+ foreach: Optional[bool] = None,
234
+ weight_decay_exclude_1d: bool = True,
235
+ layer_decay: Optional[float] = None,
236
+ param_group_fn: Optional[Callable[[nn.Module], ParamsT]] = None,
237
+ **kwargs: Any,
238
+ ) -> torch.optim.Optimizer:
239
+ """Create an optimizer instance.
240
+
241
+ Args:
242
+ model_or_params: Model or parameters to optimize
243
+ opt: Name of optimizer to create
244
+ lr: Learning rate
245
+ weight_decay: Weight decay factor
246
+ momentum: Momentum factor for applicable optimizers
247
+ foreach: Enable/disable foreach operation
248
+ weight_decay_exclude_1d: Whether to skip weight decay for 1d params (biases and norm affine)
249
+ layer_decay: Layer-wise learning rate decay
250
+ param_group_fn: Optional custom parameter grouping function
251
+ **kwargs: Additional optimizer-specific arguments
252
+
253
+ Returns:
254
+ Configured optimizer instance
255
+
256
+ Raises:
257
+ ValueError: If optimizer not found or configuration invalid
258
+ """
259
+
260
+ # Get parameters to optimize
261
+ if isinstance(model_or_params, nn.Module):
262
+ # Extract parameters from a nn.Module, build param groups w/ weight-decay and/or layer-decay applied
263
+ no_weight_decay = getattr(model_or_params, 'no_weight_decay', lambda: set())()
264
+
265
+ if param_group_fn:
266
+ # run custom fn to generate param groups from nn.Module
267
+ params = param_group_fn(model_or_params)
268
+ elif layer_decay is not None:
269
+ params = param_groups_layer_decay(
270
+ model_or_params,
271
+ weight_decay=weight_decay,
272
+ layer_decay=layer_decay,
273
+ no_weight_decay_list=no_weight_decay,
274
+ weight_decay_exclude_1d=weight_decay_exclude_1d,
275
+ )
276
+ weight_decay = 0.
277
+ elif weight_decay and weight_decay_exclude_1d:
278
+ params = param_groups_weight_decay(
279
+ model_or_params,
280
+ weight_decay=weight_decay,
281
+ no_weight_decay_list=no_weight_decay,
282
+ )
283
+ weight_decay = 0.
284
+ else:
285
+ params = model_or_params.parameters()
286
+ else:
287
+ # pass parameters / parameter groups through to optimizer
288
+ params = model_or_params
289
+
290
+ # Parse optimizer name
291
+ opt_split = opt.lower().split('_')
292
+ opt_name = opt_split[-1]
293
+ use_lookahead = opt_split[0] == 'lookahead' if len(opt_split) > 1 else False
294
+
295
+ opt_info = self.get_optimizer_info(opt_name)
296
+
297
+ # Build optimizer arguments
298
+ opt_args: Dict[str, Any] = {'weight_decay': weight_decay, **kwargs}
299
+
300
+ # Add LR to args, if None optimizer default is used, some optimizers manage LR internally if None.
301
+ if lr is not None:
302
+ opt_args['lr'] = lr
303
+
304
+ # Apply optimizer-specific settings
305
+ if opt_info.defaults:
306
+ for k, v in opt_info.defaults.items():
307
+ opt_args.setdefault(k, v)
308
+
309
+ # timm has always defaulted momentum to 0.9 if optimizer supports momentum, keep for backward compat.
310
+ if opt_info.has_momentum:
311
+ opt_args.setdefault('momentum', momentum)
312
+
313
+ # Remove commonly used kwargs that aren't always supported
314
+ if not opt_info.has_eps:
315
+ opt_args.pop('eps', None)
316
+ if not opt_info.has_betas:
317
+ opt_args.pop('betas', None)
318
+
319
+ if foreach is not None:
320
+ # Explicitly activate or deactivate multi-tensor foreach impl.
321
+ # Not all optimizers support this, and those that do usually default to using
322
+ # multi-tensor impl if foreach is left as default 'None' and can be enabled.
323
+ opt_args.setdefault('foreach', foreach)
324
+
325
+ # Create optimizer
326
+ opt_class = self.get_optimizer_class(opt_info, bind_defaults=False)
327
+ optimizer = opt_class(params, **opt_args)
328
+
329
+ # Apply Lookahead if requested
330
+ if use_lookahead:
331
+ optimizer = Lookahead(optimizer)
332
+
333
+ return optimizer
334
+
335
+
336
+ def _register_sgd_variants(registry: OptimizerRegistry) -> None:
337
+ """Register SGD-based optimizers"""
338
+ sgd_optimizers = [
339
+ OptimInfo(
340
+ name='sgd',
341
+ opt_class=torch.optim.SGD,
342
+ description='torch.Optim Stochastic Gradient Descent (SGD) with Nesterov momentum',
343
+ has_eps=False,
344
+ has_momentum=True,
345
+ defaults={'nesterov': True}
346
+ ),
347
+ OptimInfo(
348
+ name='momentum',
349
+ opt_class=torch.optim.SGD,
350
+ description='torch.Optim Stochastic Gradient Descent (SGD) with classical momentum',
351
+ has_eps=False,
352
+ has_momentum=True,
353
+ defaults={'nesterov': False}
354
+ ),
355
+ OptimInfo(
356
+ name='sgdp',
357
+ opt_class=SGDP,
358
+ description='SGD with built-in projection to unit norm sphere',
359
+ has_momentum=True,
360
+ defaults={'nesterov': True}
361
+ ),
362
+ OptimInfo(
363
+ name='sgdw',
364
+ opt_class=SGDW,
365
+ description='SGD with decoupled weight decay and Nesterov momentum',
366
+ has_eps=False,
367
+ has_momentum=True,
368
+ defaults={'nesterov': True}
369
+ ),
370
+ ]
371
+ for opt in sgd_optimizers:
372
+ registry.register(opt)
373
+
374
+
375
+ def _register_adam_variants(registry: OptimizerRegistry) -> None:
376
+ """Register Adam-based optimizers"""
377
+ adam_optimizers = [
378
+ OptimInfo(
379
+ name='adam',
380
+ opt_class=torch.optim.Adam,
381
+ description='torch.optim.Adam, Adaptive Moment Estimation',
382
+ has_betas=True
383
+ ),
384
+ OptimInfo(
385
+ name='adamw',
386
+ opt_class=torch.optim.AdamW,
387
+ description='torch.optim.AdamW, Adam with decoupled weight decay',
388
+ has_betas=True
389
+ ),
390
+ OptimInfo(
391
+ name='adamwlegacy',
392
+ opt_class=AdamWLegacy,
393
+ description='legacy impl of AdamW that pre-dates inclusion to torch.optim',
394
+ has_betas=True
395
+ ),
396
+ OptimInfo(
397
+ name='adamp',
398
+ opt_class=AdamP,
399
+ description='Adam with built-in projection to unit norm sphere',
400
+ has_betas=True,
401
+ defaults={'wd_ratio': 0.01, 'nesterov': True}
402
+ ),
403
+ OptimInfo(
404
+ name='nadam',
405
+ opt_class=torch.optim.NAdam,
406
+ description='torch.optim.NAdam, Adam with Nesterov momentum',
407
+ has_betas=True
408
+ ),
409
+ OptimInfo(
410
+ name='nadamlegacy',
411
+ opt_class=NAdamLegacy,
412
+ description='legacy impl of NAdam that pre-dates inclusion in torch.optim',
413
+ has_betas=True
414
+ ),
415
+ OptimInfo(
416
+ name='nadamw',
417
+ opt_class=NAdamW,
418
+ description='Adam with Nesterov momentum and decoupled weight decay, mlcommons/algorithmic-efficiency impl',
419
+ has_betas=True
420
+ ),
421
+ OptimInfo(
422
+ name='radam',
423
+ opt_class=torch.optim.RAdam,
424
+ description='torch.optim.RAdam, Rectified Adam with variance adaptation',
425
+ has_betas=True
426
+ ),
427
+ OptimInfo(
428
+ name='radamlegacy',
429
+ opt_class=RAdamLegacy,
430
+ description='legacy impl of RAdam that predates inclusion in torch.optim',
431
+ has_betas=True
432
+ ),
433
+ OptimInfo(
434
+ name='radamw',
435
+ opt_class=torch.optim.RAdam,
436
+ description='torch.optim.RAdamW, Rectified Adam with variance adaptation and decoupled weight decay',
437
+ has_betas=True,
438
+ defaults={'decoupled_weight_decay': True}
439
+ ),
440
+ OptimInfo(
441
+ name='adamax',
442
+ opt_class=torch.optim.Adamax,
443
+ description='torch.optim.Adamax, Adam with infinity norm for more stable updates',
444
+ has_betas=True
445
+ ),
446
+ OptimInfo(
447
+ name='adafactor',
448
+ opt_class=Adafactor,
449
+ description='Memory-efficient implementation of Adam with factored gradients',
450
+ ),
451
+ OptimInfo(
452
+ name='adafactorbv',
453
+ opt_class=AdafactorBigVision,
454
+ description='Big Vision variant of Adafactor with factored gradients, half precision momentum',
455
+ ),
456
+ OptimInfo(
457
+ name='adopt',
458
+ opt_class=Adopt,
459
+ description='Modified Adam that can converge with any β2 with the optimal rate',
460
+ ),
461
+ OptimInfo(
462
+ name='adoptw',
463
+ opt_class=Adopt,
464
+ description='Modified AdamW (decoupled decay) that can converge with any β2 with the optimal rate',
465
+ defaults={'decoupled': True}
466
+ ),
467
+ ]
468
+ for opt in adam_optimizers:
469
+ registry.register(opt)
470
+
471
+
472
+ def _register_lamb_lars(registry: OptimizerRegistry) -> None:
473
+ """Register LAMB and LARS variants"""
474
+ lamb_lars_optimizers = [
475
+ OptimInfo(
476
+ name='lamb',
477
+ opt_class=Lamb,
478
+ description='Layer-wise Adaptive Moments for batch optimization',
479
+ has_betas=True
480
+ ),
481
+ OptimInfo(
482
+ name='lambc',
483
+ opt_class=Lamb,
484
+ description='LAMB with trust ratio clipping for stability',
485
+ has_betas=True,
486
+ defaults={'trust_clip': True}
487
+ ),
488
+ OptimInfo(
489
+ name='lars',
490
+ opt_class=Lars,
491
+ description='Layer-wise Adaptive Rate Scaling',
492
+ has_momentum=True
493
+ ),
494
+ OptimInfo(
495
+ name='larc',
496
+ opt_class=Lars,
497
+ description='LARS with trust ratio clipping for stability',
498
+ has_momentum=True,
499
+ defaults={'trust_clip': True}
500
+ ),
501
+ OptimInfo(
502
+ name='nlars',
503
+ opt_class=Lars,
504
+ description='LARS with Nesterov momentum',
505
+ has_momentum=True,
506
+ defaults={'nesterov': True}
507
+ ),
508
+ OptimInfo(
509
+ name='nlarc',
510
+ opt_class=Lars,
511
+ description='LARS with Nesterov momentum & trust ratio clipping',
512
+ has_momentum=True,
513
+ defaults={'nesterov': True, 'trust_clip': True}
514
+ ),
515
+ ]
516
+ for opt in lamb_lars_optimizers:
517
+ registry.register(opt)
518
+
519
+
520
+ def _register_cautious_optimizers(registry: OptimizerRegistry) -> None:
521
+ cautious_optimizers = [
522
+ OptimInfo(
523
+ name='cadafactor',
524
+ opt_class=Adafactor,
525
+ description='Cautious Adafactor',
526
+ defaults={'caution': True}
527
+ ),
528
+ OptimInfo(
529
+ name='cadafactorbv',
530
+ opt_class=AdafactorBigVision,
531
+ description='Cautious Big Vision Adafactor',
532
+ defaults={'caution': True}
533
+ ),
534
+ OptimInfo(
535
+ name='cadamw',
536
+ opt_class=AdamWLegacy,
537
+ description='Cautious AdamW',
538
+ has_betas=True,
539
+ defaults={'caution': True}
540
+ ),
541
+ OptimInfo(
542
+ name='cadopt',
543
+ opt_class=Adopt,
544
+ description='Cautious Adopt',
545
+ defaults={'caution': True}
546
+ ),
547
+ OptimInfo(
548
+ name='cadoptw',
549
+ opt_class=Adopt,
550
+ description='Cautious AdoptW (decoupled decay)',
551
+ defaults={'decoupled': True, 'caution': True}
552
+ ),
553
+ OptimInfo(
554
+ name='clamb',
555
+ opt_class=Lamb,
556
+ description='Cautious LAMB',
557
+ has_betas=True,
558
+ defaults={'caution': True}
559
+ ),
560
+ OptimInfo(
561
+ name='claprop',
562
+ opt_class=LaProp,
563
+ description='Cautious LaProp',
564
+ has_betas=True,
565
+ defaults={'caution': True}
566
+ ),
567
+ OptimInfo(
568
+ name='clion',
569
+ opt_class=Lion,
570
+ description='Cautious Lion',
571
+ has_eps=False,
572
+ has_betas=True,
573
+ defaults = {'caution': True}
574
+ ),
575
+ OptimInfo(
576
+ name='cmars',
577
+ opt_class=Mars,
578
+ description='Cautious MARS',
579
+ has_betas=True,
580
+ defaults={'caution': True}
581
+ ),
582
+ OptimInfo(
583
+ name='cnadamw',
584
+ opt_class=NAdamW,
585
+ description='Cautious NAdamW',
586
+ has_betas=True,
587
+ defaults={'caution': True}
588
+ ),
589
+ OptimInfo(
590
+ name='crmsproptf',
591
+ opt_class=RMSpropTF,
592
+ description='Cautious TensorFlow-style RMSprop',
593
+ has_momentum=True,
594
+ defaults={'alpha': 0.9, 'caution': True}
595
+ ),
596
+ OptimInfo(
597
+ name='csgdw',
598
+ opt_class=SGDW,
599
+ description='Cautious SGD with decoupled weight decay and Nesterov momentum',
600
+ has_eps=False,
601
+ has_momentum=True,
602
+ defaults={'nesterov': True, 'caution': True}
603
+ ),
604
+ ]
605
+ for opt in cautious_optimizers:
606
+ registry.register(opt)
607
+
608
+ def _register_other_optimizers(registry: OptimizerRegistry) -> None:
609
+ """Register miscellaneous optimizers"""
610
+ other_optimizers = [
611
+ OptimInfo(
612
+ name='adabelief',
613
+ opt_class=AdaBelief,
614
+ description='Adapts learning rate based on gradient prediction error',
615
+ has_betas=True,
616
+ defaults={'rectify': False}
617
+ ),
618
+ OptimInfo(
619
+ name='radabelief',
620
+ opt_class=AdaBelief,
621
+ description='Rectified AdaBelief with variance adaptation',
622
+ has_betas=True,
623
+ defaults={'rectify': True}
624
+ ),
625
+ OptimInfo(
626
+ name='adadelta',
627
+ opt_class=torch.optim.Adadelta,
628
+ description='torch.optim.Adadelta, Adapts learning rates based on running windows of gradients'
629
+ ),
630
+ OptimInfo(
631
+ name='adagrad',
632
+ opt_class=torch.optim.Adagrad,
633
+ description='torch.optim.Adagrad, Adapts learning rates using cumulative squared gradients',
634
+ defaults={'eps': 1e-8}
635
+ ),
636
+ OptimInfo(
637
+ name='adan',
638
+ opt_class=Adan,
639
+ description='Adaptive Nesterov Momentum Algorithm',
640
+ defaults={'no_prox': False},
641
+ has_betas=True,
642
+ num_betas=3
643
+ ),
644
+ OptimInfo(
645
+ name='adanw',
646
+ opt_class=Adan,
647
+ description='Adaptive Nesterov Momentum with decoupled weight decay',
648
+ defaults={'no_prox': True},
649
+ has_betas=True,
650
+ num_betas=3
651
+ ),
652
+ OptimInfo(
653
+ name='adahessian',
654
+ opt_class=Adahessian,
655
+ description='An Adaptive Second Order Optimizer',
656
+ has_betas=True,
657
+ second_order=True,
658
+ ),
659
+ OptimInfo(
660
+ name='laprop',
661
+ opt_class=LaProp,
662
+ description='Separating Momentum and Adaptivity in Adam',
663
+ has_betas=True,
664
+ ),
665
+ OptimInfo(
666
+ name='lion',
667
+ opt_class=Lion,
668
+ description='Evolved Sign Momentum optimizer for improved convergence',
669
+ has_eps=False,
670
+ has_betas=True
671
+ ),
672
+ OptimInfo(
673
+ name='madgrad',
674
+ opt_class=MADGRAD,
675
+ description='Momentum-based Adaptive gradient method',
676
+ has_momentum=True
677
+ ),
678
+ OptimInfo(
679
+ name='madgradw',
680
+ opt_class=MADGRAD,
681
+ description='MADGRAD with decoupled weight decay',
682
+ has_momentum=True,
683
+ defaults={'decoupled_decay': True}
684
+ ),
685
+ OptimInfo(
686
+ name='mars',
687
+ opt_class=Mars,
688
+ description='Unleashing the Power of Variance Reduction for Training Large Models',
689
+ has_betas=True,
690
+ ),
691
+ OptimInfo(
692
+ name='novograd',
693
+ opt_class=NvNovoGrad,
694
+ description='Normalized Adam with L2 norm gradient normalization',
695
+ has_betas=True
696
+ ),
697
+ OptimInfo(
698
+ name='rmsprop',
699
+ opt_class=torch.optim.RMSprop,
700
+ description='torch.optim.RMSprop, Root Mean Square Propagation',
701
+ has_momentum=True,
702
+ defaults={'alpha': 0.9}
703
+ ),
704
+ OptimInfo(
705
+ name='rmsproptf',
706
+ opt_class=RMSpropTF,
707
+ description='TensorFlow-style RMSprop implementation, Root Mean Square Propagation',
708
+ has_momentum=True,
709
+ defaults={'alpha': 0.9}
710
+ ),
711
+ ]
712
+ for opt in other_optimizers:
713
+ registry.register(opt)
714
+ registry.register_foreach_default('lion')
715
+
716
+
717
+ def _register_apex_optimizers(registry: OptimizerRegistry) -> None:
718
+ """Register APEX optimizers (lazy import)"""
719
+ apex_optimizers = [
720
+ OptimInfo(
721
+ name='fusedsgd',
722
+ opt_class='apex.optimizers.FusedSGD',
723
+ description='NVIDIA APEX fused SGD implementation for faster training',
724
+ has_eps=False,
725
+ has_momentum=True,
726
+ defaults={'nesterov': True}
727
+ ),
728
+ OptimInfo(
729
+ name='fusedadam',
730
+ opt_class='apex.optimizers.FusedAdam',
731
+ description='NVIDIA APEX fused Adam implementation',
732
+ has_betas=True,
733
+ defaults={'adam_w_mode': False}
734
+ ),
735
+ OptimInfo(
736
+ name='fusedadamw',
737
+ opt_class='apex.optimizers.FusedAdam',
738
+ description='NVIDIA APEX fused AdamW implementation',
739
+ has_betas=True,
740
+ defaults={'adam_w_mode': True}
741
+ ),
742
+ OptimInfo(
743
+ name='fusedlamb',
744
+ opt_class='apex.optimizers.FusedLAMB',
745
+ description='NVIDIA APEX fused LAMB implementation',
746
+ has_betas=True
747
+ ),
748
+ OptimInfo(
749
+ name='fusednovograd',
750
+ opt_class='apex.optimizers.FusedNovoGrad',
751
+ description='NVIDIA APEX fused NovoGrad implementation',
752
+ has_betas=True,
753
+ defaults={'betas': (0.95, 0.98)}
754
+ ),
755
+ ]
756
+ for opt in apex_optimizers:
757
+ registry.register(opt)
758
+
759
+
760
+ def _register_bnb_optimizers(registry: OptimizerRegistry) -> None:
761
+ """Register bitsandbytes optimizers (lazy import)"""
762
+ bnb_optimizers = [
763
+ OptimInfo(
764
+ name='bnbsgd',
765
+ opt_class='bitsandbytes.optim.SGD',
766
+ description='bitsandbytes SGD',
767
+ has_eps=False,
768
+ has_momentum=True,
769
+ defaults={'nesterov': True}
770
+ ),
771
+ OptimInfo(
772
+ name='bnbsgd8bit',
773
+ opt_class='bitsandbytes.optim.SGD8bit',
774
+ description='bitsandbytes 8-bit SGD with dynamic quantization',
775
+ has_eps=False,
776
+ has_momentum=True,
777
+ defaults={'nesterov': True}
778
+ ),
779
+ OptimInfo(
780
+ name='bnbadam',
781
+ opt_class='bitsandbytes.optim.Adam',
782
+ description='bitsandbytes Adam',
783
+ has_betas=True
784
+ ),
785
+ OptimInfo(
786
+ name='bnbadam8bit',
787
+ opt_class='bitsandbytes.optim.Adam',
788
+ description='bitsandbytes 8-bit Adam with dynamic quantization',
789
+ has_betas=True
790
+ ),
791
+ OptimInfo(
792
+ name='bnbadamw',
793
+ opt_class='bitsandbytes.optim.AdamW',
794
+ description='bitsandbytes AdamW',
795
+ has_betas=True
796
+ ),
797
+ OptimInfo(
798
+ name='bnbadamw8bit',
799
+ opt_class='bitsandbytes.optim.AdamW',
800
+ description='bitsandbytes 8-bit AdamW with dynamic quantization',
801
+ has_betas=True
802
+ ),
803
+ OptimInfo(
804
+ 'bnblion',
805
+ 'bitsandbytes.optim.Lion',
806
+ description='bitsandbytes Lion',
807
+ has_eps=False,
808
+ has_betas=True
809
+ ),
810
+ OptimInfo(
811
+ 'bnblion8bit',
812
+ 'bitsandbytes.optim.Lion8bit',
813
+ description='bitsandbytes 8-bit Lion with dynamic quantization',
814
+ has_eps=False,
815
+ has_betas=True
816
+ ),
817
+ OptimInfo(
818
+ 'bnbademamix',
819
+ 'bitsandbytes.optim.AdEMAMix',
820
+ description='bitsandbytes AdEMAMix',
821
+ has_betas=True,
822
+ num_betas=3,
823
+ ),
824
+ OptimInfo(
825
+ 'bnbademamix8bit',
826
+ 'bitsandbytes.optim.AdEMAMix8bit',
827
+ description='bitsandbytes 8-bit AdEMAMix with dynamic quantization',
828
+ has_betas=True,
829
+ num_betas=3,
830
+ ),
831
+ ]
832
+ for opt in bnb_optimizers:
833
+ registry.register(opt)
834
+
835
+
836
+ default_registry = OptimizerRegistry()
837
+
838
+ def _register_default_optimizers() -> None:
839
+ """Register all default optimizers to the global registry."""
840
+ # Register all optimizer groups
841
+ _register_sgd_variants(default_registry)
842
+ _register_adam_variants(default_registry)
843
+ _register_lamb_lars(default_registry)
844
+ _register_other_optimizers(default_registry)
845
+ _register_apex_optimizers(default_registry)
846
+ _register_bnb_optimizers(default_registry)
847
+ _register_cautious_optimizers(default_registry)
848
+
849
+ # Register aliases
850
+ default_registry.register_alias('nesterov', 'sgd')
851
+ default_registry.register_alias('nesterovw', 'sgdw')
852
+
853
+
854
+ # Initialize default registry
855
+ _register_default_optimizers()
856
+
857
+ # Public API
858
+
859
+ def list_optimizers(
860
+ filter: Union[str, List[str]] = '',
861
+ exclude_filters: Optional[List[str]] = None,
862
+ with_description: bool = False,
863
+ ) -> List[Union[str, Tuple[str, str]]]:
864
+ """List available optimizer names, optionally filtered.
865
+
866
+ List all registered optimizers, with optional filtering using wildcard patterns.
867
+ Optimizers can be filtered using include and exclude patterns, and can optionally
868
+ return descriptions with each optimizer name.
869
+
870
+ Args:
871
+ filter: Wildcard style filter string or list of filter strings
872
+ (e.g., 'adam*' for all Adam variants, or ['adam*', '*8bit'] for
873
+ Adam variants and 8-bit optimizers). Empty string means no filtering.
874
+ exclude_filters: Optional list of wildcard patterns to exclude. For example,
875
+ ['*8bit', 'fused*'] would exclude 8-bit and fused implementations.
876
+ with_description: If True, returns tuples of (name, description) instead of
877
+ just names. Descriptions provide brief explanations of optimizer characteristics.
878
+
879
+ Returns:
880
+ If with_description is False:
881
+ List of optimizer names as strings (e.g., ['adam', 'adamw', ...])
882
+ If with_description is True:
883
+ List of tuples of (name, description) (e.g., [('adam', 'Adaptive Moment...'), ...])
884
+
885
+ Examples:
886
+ >>> list_optimizers()
887
+ ['adam', 'adamw', 'sgd', ...]
888
+
889
+ >>> list_optimizers(['la*', 'nla*']) # List lamb & lars
890
+ ['lamb', 'lambc', 'larc', 'lars', 'nlarc', 'nlars']
891
+
892
+ >>> list_optimizers('*adam*', exclude_filters=['bnb*', 'fused*']) # Exclude bnb & apex adam optimizers
893
+ ['adam', 'adamax', 'adamp', 'adamw', 'nadam', 'nadamw', 'radam']
894
+
895
+ >>> list_optimizers(with_description=True) # Get descriptions
896
+ [('adabelief', 'Adapts learning rate based on gradient prediction error'),
897
+ ('adadelta', 'torch.optim Adadelta, Adapts learning rates based on running windows of gradients'),
898
+ ('adafactor', 'Memory-efficient implementation of Adam with factored gradients'),
899
+ ...]
900
+ """
901
+ return default_registry.list_optimizers(filter, exclude_filters, with_description)
902
+
903
+
904
+ def get_optimizer_info(name: str) -> OptimInfo:
905
+ """Get the OptimInfo for an optimizer.
906
+
907
+ Args:
908
+ name: Name of the optimizer
909
+
910
+ Returns:
911
+ OptimInfo configuration
912
+
913
+ Raises:
914
+ ValueError: If optimizer is not found
915
+ """
916
+ return default_registry.get_optimizer_info(name)
917
+
918
+
919
+ def get_optimizer_class(
920
+ name: str,
921
+ bind_defaults: bool = True,
922
+ ) -> Union[OptimType, OptimizerCallable]:
923
+ """Get optimizer class by name with option to bind default arguments.
924
+
925
+ Retrieves the optimizer class or a partial function with default arguments bound.
926
+ This allows direct instantiation of optimizers with their default configurations
927
+ without going through the full factory.
928
+
929
+ Args:
930
+ name: Name of the optimizer to retrieve (e.g., 'adam', 'sgd')
931
+ bind_defaults: If True, returns a partial function with default arguments from OptimInfo bound.
932
+ If False, returns the raw optimizer class.
933
+
934
+ Returns:
935
+ If bind_defaults is False:
936
+ The optimizer class (e.g., torch.optim.Adam)
937
+ If bind_defaults is True:
938
+ A partial function with default arguments bound
939
+
940
+ Raises:
941
+ ValueError: If optimizer name is not found in registry
942
+
943
+ Examples:
944
+ >>> # Get SGD with nesterov momentum default
945
+ >>> SGD = get_optimizer_class('sgd') # nesterov=True bound
946
+ >>> opt = SGD(model.parameters(), lr=0.1, momentum=0.9)
947
+
948
+ >>> # Get raw optimizer class
949
+ >>> SGD = get_optimizer_class('sgd')
950
+ >>> opt = SGD(model.parameters(), lr=1e-3, momentum=0.9)
951
+
952
+ """
953
+ return default_registry.get_optimizer_class(name, bind_defaults=bind_defaults)
954
+
955
+
956
+ def create_optimizer_v2(
957
+ model_or_params: Union[nn.Module, ParamsT],
958
+ opt: str = 'sgd',
959
+ lr: Optional[float] = None,
960
+ weight_decay: float = 0.,
961
+ momentum: float = 0.9,
962
+ foreach: Optional[bool] = None,
963
+ filter_bias_and_bn: bool = True,
964
+ layer_decay: Optional[float] = None,
965
+ param_group_fn: Optional[Callable[[nn.Module], ParamsT]] = None,
966
+ **kwargs: Any,
967
+ ) -> torch.optim.Optimizer:
968
+ """Create an optimizer instance via timm registry.
969
+
970
+ Creates and configures an optimizer with appropriate parameter groups and settings.
971
+ Supports automatic parameter group creation for weight decay and layer-wise learning
972
+ rates, as well as custom parameter grouping.
973
+
974
+ Args:
975
+ model_or_params: A PyTorch model or an iterable of parameters/parameter groups.
976
+ If a model is provided, parameters will be automatically extracted and grouped
977
+ based on the other arguments.
978
+ opt: Name of the optimizer to create (e.g., 'adam', 'adamw', 'sgd').
979
+ Use list_optimizers() to see available options.
980
+ lr: Learning rate. If None, will use the optimizer's default.
981
+ weight_decay: Weight decay factor. Will be used to create param groups if model_or_params is a model.
982
+ momentum: Momentum factor for optimizers that support it. Only used if the
983
+ chosen optimizer accepts a momentum parameter.
984
+ foreach: Enable/disable foreach (multi-tensor) implementation if available.
985
+ If None, will use optimizer-specific defaults.
986
+ filter_bias_and_bn: If True, bias, norm layer parameters (all 1d params) will not have
987
+ weight decay applied. Only used when model_or_params is a model and
988
+ weight_decay > 0.
989
+ layer_decay: Optional layer-wise learning rate decay factor. If provided,
990
+ learning rates will be scaled by layer_decay^(max_depth - layer_depth).
991
+ Only used when model_or_params is a model.
992
+ param_group_fn: Optional function to create custom parameter groups.
993
+ If provided, other parameter grouping options will be ignored.
994
+ **kwargs: Additional optimizer-specific arguments (e.g., betas for Adam).
995
+
996
+ Returns:
997
+ Configured optimizer instance.
998
+
999
+ Examples:
1000
+ >>> # Basic usage with a model
1001
+ >>> optimizer = create_optimizer_v2(model, 'adamw', lr=1e-3)
1002
+
1003
+ >>> # SGD with momentum and weight decay
1004
+ >>> optimizer = create_optimizer_v2(
1005
+ ... model, 'sgd', lr=0.1, momentum=0.9, weight_decay=1e-4
1006
+ ... )
1007
+
1008
+ >>> # Adam with layer-wise learning rate decay
1009
+ >>> optimizer = create_optimizer_v2(
1010
+ ... model, 'adam', lr=1e-3, layer_decay=0.7
1011
+ ... )
1012
+
1013
+ >>> # Custom parameter groups
1014
+ >>> def group_fn(model):
1015
+ ... return [
1016
+ ... {'params': model.backbone.parameters(), 'lr': 1e-4},
1017
+ ... {'params': model.head.parameters(), 'lr': 1e-3}
1018
+ ... ]
1019
+ >>> optimizer = create_optimizer_v2(
1020
+ ... model, 'sgd', param_group_fn=group_fn
1021
+ ... )
1022
+
1023
+ Note:
1024
+ Parameter group handling precedence:
1025
+ 1. If param_group_fn is provided, it will be used exclusively
1026
+ 2. If layer_decay is provided, layer-wise groups will be created
1027
+ 3. If weight_decay > 0 and filter_bias_and_bn is True, weight decay groups will be created
1028
+ 4. Otherwise, all parameters will be in a single group
1029
+ """
1030
+
1031
+ return default_registry.create_optimizer(
1032
+ model_or_params,
1033
+ opt=opt,
1034
+ lr=lr,
1035
+ weight_decay=weight_decay,
1036
+ momentum=momentum,
1037
+ foreach=foreach,
1038
+ weight_decay_exclude_1d=filter_bias_and_bn,
1039
+ layer_decay=layer_decay,
1040
+ param_group_fn=param_group_fn,
1041
+ **kwargs
1042
+ )
1043
+
1044
+
1045
+ def optimizer_kwargs(cfg):
1046
+ """ cfg/argparse to kwargs helper
1047
+ Convert optimizer args in argparse args or cfg like object to keyword args for updated create fn.
1048
+ """
1049
+ kwargs = dict(
1050
+ opt=cfg.opt,
1051
+ lr=cfg.lr,
1052
+ weight_decay=cfg.weight_decay,
1053
+ momentum=cfg.momentum,
1054
+ )
1055
+ if getattr(cfg, 'opt_eps', None) is not None:
1056
+ kwargs['eps'] = cfg.opt_eps
1057
+ if getattr(cfg, 'opt_betas', None) is not None:
1058
+ kwargs['betas'] = cfg.opt_betas
1059
+ if getattr(cfg, 'layer_decay', None) is not None:
1060
+ kwargs['layer_decay'] = cfg.layer_decay
1061
+ if getattr(cfg, 'opt_args', None) is not None:
1062
+ kwargs.update(cfg.opt_args)
1063
+ if getattr(cfg, 'opt_foreach', None) is not None:
1064
+ kwargs['foreach'] = cfg.opt_foreach
1065
+ return kwargs
1066
+
1067
+
1068
+ def create_optimizer(
1069
+ args,
1070
+ model: Union[nn.Module, ParamsT],
1071
+ filter_bias_and_bn: bool = True,
1072
+ ) -> torch.optim.Optimizer:
1073
+ """ Legacy optimizer factory for backwards compatibility.
1074
+ NOTE: Use create_optimizer_v2 for new code.
1075
+ """
1076
+ return create_optimizer_v2(
1077
+ model,
1078
+ **optimizer_kwargs(cfg=args),
1079
+ filter_bias_and_bn=filter_bias_and_bn,
1080
+ )
1081
+
pytorch-image-models/timm/optim/_param_groups.py ADDED
@@ -0,0 +1,131 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import logging
2
+ from itertools import islice
3
+ from typing import Collection, Optional
4
+
5
+ from torch import nn as nn
6
+
7
+ from timm.models import group_parameters
8
+
9
+
10
+ _logger = logging.getLogger(__name__)
11
+
12
+
13
+ def param_groups_weight_decay(
14
+ model: nn.Module,
15
+ weight_decay: float = 1e-5,
16
+ no_weight_decay_list: Collection[str] = (),
17
+ ):
18
+ no_weight_decay_list = set(no_weight_decay_list)
19
+ decay = []
20
+ no_decay = []
21
+ for name, param in model.named_parameters():
22
+ if not param.requires_grad:
23
+ continue
24
+
25
+ if param.ndim <= 1 or name.endswith(".bias") or name in no_weight_decay_list:
26
+ no_decay.append(param)
27
+ else:
28
+ decay.append(param)
29
+
30
+ return [
31
+ {'params': no_decay, 'weight_decay': 0.},
32
+ {'params': decay, 'weight_decay': weight_decay}]
33
+
34
+
35
+ def _group(it, size):
36
+ it = iter(it)
37
+ return iter(lambda: tuple(islice(it, size)), ())
38
+
39
+
40
+ def auto_group_layers(model, layers_per_group=12, num_groups=None):
41
+ def _in_head(n, hp):
42
+ if not hp:
43
+ return True
44
+ elif isinstance(hp, (tuple, list)):
45
+ return any([n.startswith(hpi) for hpi in hp])
46
+ else:
47
+ return n.startswith(hp)
48
+
49
+ head_prefix = getattr(model, 'pretrained_cfg', {}).get('classifier', None)
50
+ names_trunk = []
51
+ names_head = []
52
+ for n, _ in model.named_parameters():
53
+ names_head.append(n) if _in_head(n, head_prefix) else names_trunk.append(n)
54
+
55
+ # group non-head layers
56
+ num_trunk_layers = len(names_trunk)
57
+ if num_groups is not None:
58
+ layers_per_group = -(num_trunk_layers // -num_groups)
59
+ names_trunk = list(_group(names_trunk, layers_per_group))
60
+
61
+ num_trunk_groups = len(names_trunk)
62
+ layer_map = {n: i for i, l in enumerate(names_trunk) for n in l}
63
+ layer_map.update({n: num_trunk_groups for n in names_head})
64
+ return layer_map
65
+
66
+ _layer_map = auto_group_layers # backward compat
67
+
68
+
69
+ def param_groups_layer_decay(
70
+ model: nn.Module,
71
+ weight_decay: float = 0.05,
72
+ no_weight_decay_list: Collection[str] = (),
73
+ weight_decay_exclude_1d: bool = True,
74
+ layer_decay: float = .75,
75
+ end_layer_decay: Optional[float] = None,
76
+ verbose: bool = False,
77
+ ):
78
+ """
79
+ Parameter groups for layer-wise lr decay & weight decay
80
+ Based on BEiT: https://github.com/microsoft/unilm/blob/master/beit/optim_factory.py#L58
81
+ """
82
+ no_weight_decay_list = set(no_weight_decay_list)
83
+ param_group_names = {} # NOTE for debugging
84
+ param_groups = {}
85
+
86
+ if hasattr(model, 'group_matcher'):
87
+ # FIXME interface needs more work
88
+ layer_map = group_parameters(model, model.group_matcher(coarse=False), reverse=True)
89
+ else:
90
+ # fallback
91
+ layer_map = auto_group_layers(model)
92
+ num_layers = max(layer_map.values()) + 1
93
+ layer_max = num_layers - 1
94
+ layer_scales = list(layer_decay ** (layer_max - i) for i in range(num_layers))
95
+
96
+ for name, param in model.named_parameters():
97
+ if not param.requires_grad:
98
+ continue
99
+
100
+ # no decay: all 1D parameters and model specific ones
101
+ if (weight_decay_exclude_1d and param.ndim <= 1) or name in no_weight_decay_list:
102
+ g_decay = "no_decay"
103
+ this_decay = 0.
104
+ else:
105
+ g_decay = "decay"
106
+ this_decay = weight_decay
107
+
108
+ layer_id = layer_map.get(name, layer_max)
109
+ group_name = "layer_%d_%s" % (layer_id, g_decay)
110
+
111
+ if group_name not in param_groups:
112
+ this_scale = layer_scales[layer_id]
113
+ param_group_names[group_name] = {
114
+ "lr_scale": this_scale,
115
+ "weight_decay": this_decay,
116
+ "param_names": [],
117
+ }
118
+ param_groups[group_name] = {
119
+ "lr_scale": this_scale,
120
+ "weight_decay": this_decay,
121
+ "params": [],
122
+ }
123
+
124
+ param_group_names[group_name]["param_names"].append(name)
125
+ param_groups[group_name]["params"].append(param)
126
+
127
+ if verbose:
128
+ import json
129
+ _logger.info("parameter groups: \n%s" % json.dumps(param_group_names, indent=2))
130
+
131
+ return list(param_groups.values())
pytorch-image-models/timm/optim/adabelief.py ADDED
@@ -0,0 +1,218 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import math
2
+ import torch
3
+ from torch.optim.optimizer import Optimizer
4
+
5
+
6
+ class AdaBelief(Optimizer):
7
+ r"""Implements AdaBelief algorithm. Modified from Adam in PyTorch
8
+
9
+ Arguments:
10
+ params (iterable): iterable of parameters to optimize or dicts defining
11
+ parameter groups
12
+ lr (float, optional): learning rate (default: 1e-3)
13
+ betas (Tuple[float, float], optional): coefficients used for computing
14
+ running averages of gradient and its square (default: (0.9, 0.999))
15
+ eps (float, optional): term added to the denominator to improve
16
+ numerical stability (default: 1e-16)
17
+ weight_decay (float, optional): weight decay (L2 penalty) (default: 0)
18
+ amsgrad (boolean, optional): whether to use the AMSGrad variant of this
19
+ algorithm from the paper `On the Convergence of Adam and Beyond`_
20
+ (default: False)
21
+ decoupled_decay (boolean, optional): (default: True) If set as True, then
22
+ the optimizer uses decoupled weight decay as in AdamW
23
+ fixed_decay (boolean, optional): (default: False) This is used when weight_decouple
24
+ is set as True.
25
+ When fixed_decay == True, the weight decay is performed as
26
+ $W_{new} = W_{old} - W_{old} \times decay$.
27
+ When fixed_decay == False, the weight decay is performed as
28
+ $W_{new} = W_{old} - W_{old} \times decay \times lr$. Note that in this case, the
29
+ weight decay ratio decreases with learning rate (lr).
30
+ rectify (boolean, optional): (default: True) If set as True, then perform the rectified
31
+ update similar to RAdam
32
+ degenerated_to_sgd (boolean, optional) (default:True) If set as True, then perform SGD update
33
+ when variance of gradient is high
34
+ reference: AdaBelief Optimizer, adapting stepsizes by the belief in observed gradients, NeurIPS 2020
35
+
36
+ For a complete table of recommended hyperparameters, see https://github.com/juntang-zhuang/Adabelief-Optimizer'
37
+ For example train/args for EfficientNet see these gists
38
+ - link to train_scipt: https://gist.github.com/juntang-zhuang/0a501dd51c02278d952cf159bc233037
39
+ - link to args.yaml: https://gist.github.com/juntang-zhuang/517ce3c27022b908bb93f78e4f786dc3
40
+ """
41
+
42
+ def __init__(
43
+ self,
44
+ params,
45
+ lr=1e-3,
46
+ betas=(0.9, 0.999),
47
+ eps=1e-16,
48
+ weight_decay=0,
49
+ amsgrad=False,
50
+ decoupled_decay=True,
51
+ fixed_decay=False,
52
+ rectify=True,
53
+ degenerated_to_sgd=True,
54
+ ):
55
+ if not 0.0 <= lr:
56
+ raise ValueError("Invalid learning rate: {}".format(lr))
57
+ if not 0.0 <= eps:
58
+ raise ValueError("Invalid epsilon value: {}".format(eps))
59
+ if not 0.0 <= betas[0] < 1.0:
60
+ raise ValueError("Invalid beta parameter at index 0: {}".format(betas[0]))
61
+ if not 0.0 <= betas[1] < 1.0:
62
+ raise ValueError("Invalid beta parameter at index 1: {}".format(betas[1]))
63
+
64
+ if isinstance(params, (list, tuple)) and len(params) > 0 and isinstance(params[0], dict):
65
+ for param in params:
66
+ if 'betas' in param and (param['betas'][0] != betas[0] or param['betas'][1] != betas[1]):
67
+ param['buffer'] = [[None, None, None] for _ in range(10)]
68
+
69
+ defaults = dict(
70
+ lr=lr,
71
+ betas=betas,
72
+ eps=eps,
73
+ weight_decay=weight_decay,
74
+ amsgrad=amsgrad,
75
+ degenerated_to_sgd=degenerated_to_sgd,
76
+ decoupled_decay=decoupled_decay,
77
+ rectify=rectify,
78
+ fixed_decay=fixed_decay,
79
+ buffer=[[None, None, None] for _ in range(10)]
80
+ )
81
+ super(AdaBelief, self).__init__(params, defaults)
82
+
83
+ def __setstate__(self, state):
84
+ super(AdaBelief, self).__setstate__(state)
85
+ for group in self.param_groups:
86
+ group.setdefault('amsgrad', False)
87
+
88
+ @torch.no_grad()
89
+ def reset(self):
90
+ for group in self.param_groups:
91
+ for p in group['params']:
92
+ state = self.state[p]
93
+ amsgrad = group['amsgrad']
94
+
95
+ # State initialization
96
+ state['step'] = 0
97
+ # Exponential moving average of gradient values
98
+ state['exp_avg'] = torch.zeros_like(p)
99
+
100
+ # Exponential moving average of squared gradient values
101
+ state['exp_avg_var'] = torch.zeros_like(p)
102
+ if amsgrad:
103
+ # Maintains max of all exp. moving avg. of sq. grad. values
104
+ state['max_exp_avg_var'] = torch.zeros_like(p)
105
+
106
+ @torch.no_grad()
107
+ def step(self, closure=None):
108
+ """Performs a single optimization step.
109
+ Arguments:
110
+ closure (callable, optional): A closure that reevaluates the model
111
+ and returns the loss.
112
+ """
113
+ loss = None
114
+ if closure is not None:
115
+ with torch.enable_grad():
116
+ loss = closure()
117
+
118
+ for group in self.param_groups:
119
+ for p in group['params']:
120
+ if p.grad is None:
121
+ continue
122
+ grad = p.grad
123
+ if grad.dtype in {torch.float16, torch.bfloat16}:
124
+ grad = grad.float()
125
+ if grad.is_sparse:
126
+ raise RuntimeError(
127
+ 'AdaBelief does not support sparse gradients, please consider SparseAdam instead')
128
+
129
+ p_fp32 = p
130
+ if p.dtype in {torch.float16, torch.bfloat16}:
131
+ p_fp32 = p_fp32.float()
132
+
133
+ amsgrad = group['amsgrad']
134
+ beta1, beta2 = group['betas']
135
+ state = self.state[p]
136
+ # State initialization
137
+ if len(state) == 0:
138
+ state['step'] = 0
139
+ # Exponential moving average of gradient values
140
+ state['exp_avg'] = torch.zeros_like(p_fp32)
141
+ # Exponential moving average of squared gradient values
142
+ state['exp_avg_var'] = torch.zeros_like(p_fp32)
143
+ if amsgrad:
144
+ # Maintains max of all exp. moving avg. of sq. grad. values
145
+ state['max_exp_avg_var'] = torch.zeros_like(p_fp32)
146
+
147
+ # perform weight decay, check if decoupled weight decay
148
+ if group['decoupled_decay']:
149
+ if not group['fixed_decay']:
150
+ p_fp32.mul_(1.0 - group['lr'] * group['weight_decay'])
151
+ else:
152
+ p_fp32.mul_(1.0 - group['weight_decay'])
153
+ else:
154
+ if group['weight_decay'] != 0:
155
+ grad.add_(p_fp32, alpha=group['weight_decay'])
156
+
157
+ # get current state variable
158
+ exp_avg, exp_avg_var = state['exp_avg'], state['exp_avg_var']
159
+
160
+ state['step'] += 1
161
+ bias_correction1 = 1 - beta1 ** state['step']
162
+ bias_correction2 = 1 - beta2 ** state['step']
163
+
164
+ # Update first and second moment running average
165
+ exp_avg.mul_(beta1).add_(grad, alpha=1 - beta1)
166
+ grad_residual = grad - exp_avg
167
+ exp_avg_var.mul_(beta2).addcmul_(grad_residual, grad_residual, value=1 - beta2)
168
+
169
+ if amsgrad:
170
+ max_exp_avg_var = state['max_exp_avg_var']
171
+ # Maintains the maximum of all 2nd moment running avg. till now
172
+ torch.max(max_exp_avg_var, exp_avg_var.add_(group['eps']), out=max_exp_avg_var)
173
+
174
+ # Use the max. for normalizing running avg. of gradient
175
+ denom = (max_exp_avg_var.sqrt() / math.sqrt(bias_correction2)).add_(group['eps'])
176
+ else:
177
+ denom = (exp_avg_var.add_(group['eps']).sqrt() / math.sqrt(bias_correction2)).add_(group['eps'])
178
+
179
+ # update
180
+ if not group['rectify']:
181
+ # Default update
182
+ step_size = group['lr'] / bias_correction1
183
+ p_fp32.addcdiv_(exp_avg, denom, value=-step_size)
184
+ else:
185
+ # Rectified update, forked from RAdam
186
+ buffered = group['buffer'][int(state['step'] % 10)]
187
+ if state['step'] == buffered[0]:
188
+ num_sma, step_size = buffered[1], buffered[2]
189
+ else:
190
+ buffered[0] = state['step']
191
+ beta2_t = beta2 ** state['step']
192
+ num_sma_max = 2 / (1 - beta2) - 1
193
+ num_sma = num_sma_max - 2 * state['step'] * beta2_t / (1 - beta2_t)
194
+ buffered[1] = num_sma
195
+
196
+ # more conservative since it's an approximated value
197
+ if num_sma >= 5:
198
+ step_size = math.sqrt(
199
+ (1 - beta2_t) *
200
+ (num_sma - 4) / (num_sma_max - 4) *
201
+ (num_sma - 2) / num_sma *
202
+ num_sma_max / (num_sma_max - 2)) / (1 - beta1 ** state['step'])
203
+ elif group['degenerated_to_sgd']:
204
+ step_size = 1.0 / (1 - beta1 ** state['step'])
205
+ else:
206
+ step_size = -1
207
+ buffered[2] = step_size
208
+
209
+ if num_sma >= 5:
210
+ denom = exp_avg_var.sqrt().add_(group['eps'])
211
+ p_fp32.addcdiv_(exp_avg, denom, value=-step_size * group['lr'])
212
+ elif step_size > 0:
213
+ p_fp32.add_(exp_avg, alpha=-step_size * group['lr'])
214
+
215
+ if p.dtype in {torch.float16, torch.bfloat16}:
216
+ p.copy_(p_fp32)
217
+
218
+ return loss
pytorch-image-models/timm/optim/adafactor.py ADDED
@@ -0,0 +1,229 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """ Adafactor Optimizer
2
+
3
+ Lifted from https://github.com/pytorch/fairseq/blob/master/fairseq/optim/adafactor.py
4
+
5
+ Modified by Ross Wightman to fix some issues with factorization dims for non nn.Linear layers
6
+
7
+ Original header/copyright below.
8
+ """
9
+ # Copyright (c) Facebook, Inc. and its affiliates.
10
+ #
11
+ # This source code is licensed under the MIT license found in the
12
+ # LICENSE file in the root directory of this source tree.
13
+ import math
14
+ from typing import Optional, Tuple
15
+
16
+ import torch
17
+
18
+ from ._types import ParamsT
19
+
20
+
21
+ class Adafactor(torch.optim.Optimizer):
22
+ """Implements Adafactor algorithm.
23
+
24
+ This implementation is based on: `Adafactor: Adaptive Learning Rates with Sublinear Memory Cost`
25
+ (see https://arxiv.org/abs/1804.04235)
26
+
27
+ Note that this optimizer internally adjusts the learning rate depending on the
28
+ *scale_parameter*, *relative_step* and *warmup_init* options.
29
+
30
+ To use a manual (external) learning rate schedule you should set `scale_parameter=False` and
31
+ `relative_step=False`.
32
+
33
+ Ags:
34
+ params: iterable of parameters to optimize or dicts defining parameter groups
35
+ lr: external learning rate
36
+ eps: regularization constants for square gradient and parameter scale respectively
37
+ eps_scale: regularization constants for parameter scale respectively
38
+ clip_threshold: threshold of root-mean-square of final gradient update
39
+ decay_rate: coefficient used to compute running averages of square gradient
40
+ beta1: coefficient used for computing running averages of gradient
41
+ weight_decay: weight decay
42
+ scale_parameter: if True, learning rate is scaled by root-mean-square of parameter
43
+ warmup_init: time-dependent learning rate computation depends on whether warm-up initialization is being used
44
+ """
45
+
46
+ def __init__(
47
+ self,
48
+ params: ParamsT,
49
+ lr: Optional[float] = None,
50
+ eps: float = 1e-30,
51
+ eps_scale: float = 1e-3,
52
+ clip_threshold: float = 1.0,
53
+ decay_rate: float = -0.8,
54
+ betas: Optional[Tuple[float, float]] = None,
55
+ weight_decay: float = 0.0,
56
+ scale_parameter: bool = True,
57
+ warmup_init: bool = False,
58
+ min_dim_size_to_factor: int = 16,
59
+ caution: bool = False,
60
+ ):
61
+ relative_step = not lr
62
+ if warmup_init and not relative_step:
63
+ raise ValueError('warmup_init requires relative_step=True')
64
+
65
+ beta1 = None if betas is None else betas[0] # make it compat with standard betas arg
66
+ defaults = dict(
67
+ lr=lr,
68
+ eps=eps,
69
+ eps_scale=eps_scale,
70
+ clip_threshold=clip_threshold,
71
+ decay_rate=decay_rate,
72
+ beta1=beta1,
73
+ weight_decay=weight_decay,
74
+ scale_parameter=scale_parameter,
75
+ relative_step=relative_step,
76
+ warmup_init=warmup_init,
77
+ min_dim_size_to_factor=min_dim_size_to_factor,
78
+ caution=caution,
79
+ )
80
+ super(Adafactor, self).__init__(params, defaults)
81
+
82
+ def __setstate__(self, state):
83
+ super().__setstate__(state)
84
+ for group in self.param_groups:
85
+ group.setdefault('caution', False)
86
+ group.setdefault('min_dim_size_to_factor', 16)
87
+
88
+ @staticmethod
89
+ def _get_lr(param_group, param_state):
90
+ if param_group['relative_step']:
91
+ min_step = 1e-6 * param_state['step'] if param_group['warmup_init'] else 1e-2
92
+ lr_t = min(min_step, 1.0 / math.sqrt(param_state['step']))
93
+ param_scale = 1.0
94
+ if param_group['scale_parameter']:
95
+ param_scale = max(param_group['eps_scale'], param_state['RMS'])
96
+ param_group['lr'] = lr_t * param_scale
97
+ return param_group['lr']
98
+
99
+ @staticmethod
100
+ def _get_options(param_group, param_shape, min_size_to_factor=16):
101
+ use_first_moment = param_group['beta1'] is not None
102
+ factored = None
103
+ ndim = len(param_shape)
104
+ # Use a simple heuristic to pick factorization row & col, note other PyTorch impl tend to
105
+ # always use -2, -1 BUT this will not pick correct dims for convolutions. This is a simple
106
+ # approach that should work in most cases, compare to the slightly more involved approach
107
+ # in AdafactorBigVision that sorts dims by size, please report if wrong dims chosen.
108
+ if ndim > 2 and param_shape[0] > min_size_to_factor and param_shape[1] > min_size_to_factor:
109
+ # nD convs in torch are ND + 2 dim weights with leading in/out chs
110
+ factored = 0, 1
111
+ elif ndim >= 2 and param_shape[-2] > min_size_to_factor and param_shape[-1] > min_size_to_factor:
112
+ # if the criteria above didn't match, test trailing dims for eligibility as per original impl
113
+ factored = ndim - 2, ndim - 1
114
+
115
+ return factored, use_first_moment
116
+
117
+ @staticmethod
118
+ def _rms(tensor):
119
+ return tensor.norm(2) / (tensor.numel() ** 0.5)
120
+
121
+ def _approx_sq_grad(self, exp_avg_sq_row, exp_avg_sq_col, dim_col, dim_row):
122
+ # from our dim heuristic, always dim_col < dim_row, so col reduction dim for factored row = dim_col
123
+ r_factor = (exp_avg_sq_row / exp_avg_sq_row.mean(dim=dim_col, keepdim=True)).rsqrt_().unsqueeze(dim_row)
124
+ c_factor = exp_avg_sq_col.unsqueeze(dim_col).rsqrt()
125
+ return torch.mul(r_factor, c_factor)
126
+
127
+ @torch.no_grad()
128
+ def step(self, closure=None):
129
+ """Performs a single optimization step.
130
+ Arguments:
131
+ closure (callable, optional): A closure that reevaluates the model and returns the loss.
132
+ """
133
+ loss = None
134
+ if closure is not None:
135
+ with torch.enable_grad():
136
+ loss = closure()
137
+
138
+ for group in self.param_groups:
139
+ for p in group['params']:
140
+ if p.grad is None:
141
+ continue
142
+ grad = p.grad
143
+ if grad.dtype in {torch.float16, torch.bfloat16}:
144
+ grad = grad.float()
145
+ if grad.is_sparse:
146
+ raise RuntimeError('Adafactor does not support sparse gradients.')
147
+
148
+ state = self.state[p]
149
+
150
+ factored_dims, use_first_moment = self._get_options(
151
+ group,
152
+ grad.shape,
153
+ min_size_to_factor=group['min_dim_size_to_factor'],
154
+ )
155
+ # State Initialization
156
+ if len(state) == 0:
157
+ state['step'] = 0
158
+
159
+ if use_first_moment:
160
+ # Exponential moving average of gradient values
161
+ state['exp_avg'] = torch.zeros_like(grad)
162
+ if factored_dims is not None:
163
+ dim_col, dim_row = factored_dims
164
+ def _remove_dim(shape, dim):
165
+ return shape[:dim] + shape[dim + 1:]
166
+ state['exp_avg_sq_row'] = torch.zeros(_remove_dim(grad.shape, dim_row)).to(grad)
167
+ state['exp_avg_sq_col'] = torch.zeros(_remove_dim(grad.shape, dim_col)).to(grad)
168
+ else:
169
+ state['exp_avg_sq'] = torch.zeros_like(grad)
170
+
171
+ state['RMS'] = 0
172
+ else:
173
+ if use_first_moment:
174
+ state['exp_avg'] = state['exp_avg'].to(grad)
175
+ if factored_dims is not None:
176
+ state['exp_avg_sq_row'] = state['exp_avg_sq_row'].to(grad)
177
+ state['exp_avg_sq_col'] = state['exp_avg_sq_col'].to(grad)
178
+ else:
179
+ state['exp_avg_sq'] = state['exp_avg_sq'].to(grad)
180
+
181
+ p_fp32 = p
182
+ if p.dtype in {torch.float16, torch.bfloat16}:
183
+ p_fp32 = p_fp32.float()
184
+
185
+ state['step'] += 1
186
+ state['RMS'] = self._rms(p_fp32)
187
+ lr_t = self._get_lr(group, state)
188
+
189
+ beta2t = 1.0 - math.pow(state['step'], group['decay_rate'])
190
+ update = grad ** 2 + group['eps']
191
+ if factored_dims is not None:
192
+ dim_col, dim_row = factored_dims
193
+ exp_avg_sq_row = state['exp_avg_sq_row']
194
+ exp_avg_sq_col = state['exp_avg_sq_col']
195
+
196
+ exp_avg_sq_row.mul_(beta2t).add_(update.mean(dim=dim_row), alpha=1.0 - beta2t)
197
+ exp_avg_sq_col.mul_(beta2t).add_(update.mean(dim=dim_col), alpha=1.0 - beta2t)
198
+
199
+ # Approximation of exponential moving average of square of gradient
200
+ update = self._approx_sq_grad(exp_avg_sq_row, exp_avg_sq_col, dim_col, dim_row)
201
+ update.mul_(grad)
202
+ else:
203
+ exp_avg_sq = state['exp_avg_sq']
204
+
205
+ exp_avg_sq.mul_(beta2t).add_(update, alpha=1.0 - beta2t)
206
+ update = exp_avg_sq.rsqrt().mul_(grad)
207
+
208
+ update.div_((self._rms(update) / group['clip_threshold']).clamp_(min=1.0))
209
+ update.mul_(lr_t)
210
+
211
+ if use_first_moment:
212
+ exp_avg = state['exp_avg']
213
+ exp_avg.mul_(group['beta1']).add_(update, alpha=1 - group['beta1'])
214
+ if group['caution']:
215
+ # Apply caution as per 'Cautious Optimizers' - https://arxiv.org/abs/2411.16085
216
+ mask = (exp_avg * grad > 0).to(grad.dtype)
217
+ mask.div_(mask.mean().clamp_(min=1e-3))
218
+ update = exp_avg * mask
219
+ else:
220
+ update = exp_avg
221
+
222
+ if group['weight_decay'] != 0:
223
+ p_fp32.add_(p_fp32, alpha=-group['weight_decay'] * lr_t)
224
+
225
+ p_fp32.add_(-update)
226
+ if p.dtype in {torch.float16, torch.bfloat16}:
227
+ p.copy_(p_fp32)
228
+
229
+ return loss