Upload configuration deepseek.py
Browse filesAdding configuration_deepseek.py should fix the issue with AutoConfig
```from transformers import AutoConfig
config = AutoConfig.from_pretrained("mlx-community/DeepSeek-R1-4bit", trust_remote_code=True)
print(config)
```
```
{
"name": "OSError",
"message": "mlx-community/DeepSeek-R1-4bit does not appear to have a file named configuration_deepseek.py. Checkout 'https://huggingface.co/mlx-community/DeepSeek-R1-4bit/tree/main' for available files.",
"stack": "---------------------------------------------------------------------------
HTTPError Traceback (most recent call last)
File ~/Desktop/gitlab/oura-phase1/.venv/lib/python3.11/site-packages/huggingface_hub/utils/_http.py:406, in hf_raise_for_status(response, endpoint_name)
405 try:
--> 406 response.raise_for_status()
407 except HTTPError as e:
File ~/Desktop/gitlab/oura-phase1/.venv/lib/python3.11/site-packages/requests/models.py:1024, in Response.raise_for_status(self)
1023 if http_error_msg:
-> 1024 raise HTTPError(http_error_msg, response=self)
HTTPError: 404 Client Error: Not Found for url: https://huggingface.co/mlx-community/DeepSeek-R1-4bit/resolve/main/configuration_deepseek.py
The above exception was the direct cause of the following exception:
EntryNotFoundError Traceback (most recent call last)
File ~/Desktop/gitlab/oura-phase1/.venv/lib/python3.11/site-packages/transformers/utils/hub.py:403, in cached_file(path_or_repo_id, filename, cache_dir, force_download, resume_download, proxies, token, revision, local_files_only, subfolder, repo_type, user_agent, _raise_exceptions_for_gated_repo, _raise_exceptions_for_missing_entries, _raise_exceptions_for_connection_errors, _commit_hash, **deprecated_kwargs)
401 try:
402 # Load from URL or cache if already cached
--> 403 resolved_file = hf_hub_download(
404 path_or_repo_id,
405 filename,
406 subfolder=None if len(subfolder) == 0 else subfolder,
407 repo_type=repo_type,
408 revision=revision,
409 cache_dir=cache_dir,
410 user_agent=user_agent,
411 force_download=force_download,
412 proxies=proxies,
413 resume_download=resume_download,
414 token=token,
415 local_files_only=local_files_only,
416 )
417 except GatedRepoError as e:
File ~/Desktop/gitlab/oura-phase1/.venv/lib/python3.11/site-packages/huggingface_hub/utils/_validators.py:114, in validate_hf_hub_args.<locals>._inner_fn(*args, **kwargs)
112 kwargs = smoothly_deprecate_use_auth_token(fn_name=fn.__name__, has_token=has_token, kwargs=kwargs)
--> 114 return fn(*args, **kwargs)
File ~/Desktop/gitlab/oura-phase1/.venv/lib/python3.11/site-packages/huggingface_hub/file_download.py:860, in hf_hub_download(repo_id, filename, subfolder, repo_type, revision, library_name, library_version, cache_dir, local_dir, user_agent, force_download, proxies, etag_timeout, token, local_files_only, headers, endpoint, resume_download, force_filename, local_dir_use_symlinks)
859 else:
--> 860 return _hf_hub_download_to_cache_dir(
861 # Destination
862 cache_dir=cache_dir,
863 # File info
864 repo_id=repo_id,
865 filename=filename,
866 repo_type=repo_type,
867 revision=revision,
868 # HTTP info
869 endpoint=endpoint,
870 etag_timeout=etag_timeout,
871 headers=hf_headers,
872 proxies=proxies,
873 token=token,
874 # Additional options
875 local_files_only=local_files_only,
876 force_download=force_download,
877 )
File ~/Desktop/gitlab/oura-phase1/.venv/lib/python3.11/site-packages/huggingface_hub/file_download.py:923, in _hf_hub_download_to_cache_dir(cache_dir, repo_id, filename, repo_type, revision, endpoint, etag_timeout, headers, proxies, token, local_files_only, force_download)
921 # Try to get metadata (etag, commit_hash, url, size) from the server.
922 # If we can't, a HEAD request error is returned.
--> 923 (url_to_download, etag, commit_hash, expected_size, head_call_error) = _get_metadata_or_catch_error(
924 repo_id=repo_id,
925 filename=filename,
926 repo_type=repo_type,
927 revision=revision,
928 endpoint=endpoint,
929 proxies=proxies,
930 etag_timeout=etag_timeout,
931 headers=headers,
932 token=token,
933 local_files_only=local_files_only,
934 storage_folder=storage_folder,
935 relative_filename=relative_filename,
936 )
938 # etag can be None for several reasons:
939 # 1. we passed local_files_only.
940 # 2. we don't have a connection
(...)
946 # If the specified revision is a commit hash, look inside \"snapshots\".
947 # If the specified revision is a branch or tag, look inside \"refs\".
File ~/Desktop/gitlab/oura-phase1/.venv/lib/python3.11/site-packages/huggingface_hub/file_download.py:1374, in _get_metadata_or_catch_error(repo_id, filename, repo_type, revision, endpoint, proxies, etag_timeout, headers, token, local_files_only, relative_filename, storage_folder)
1373 try:
-> 1374 metadata = get_hf_file_metadata(
1375 url=url, proxies=proxies, timeout=etag_timeout, headers=headers, token=token
1376 )
1377 except EntryNotFoundError as http_error:
File ~/Desktop/gitlab/oura-phase1/.venv/lib/python3.11/site-packages/huggingface_hub/utils/_validators.py:114, in validate_hf_hub_args.<locals>._inner_fn(*args, **kwargs)
112 kwargs = smoothly_deprecate_use_auth_token(fn_name=fn.__name__, has_token=has_token, kwargs=kwargs)
--> 114 return fn(*args, **kwargs)
File ~/Desktop/gitlab/oura-phase1/.venv/lib/python3.11/site-packages/huggingface_hub/file_download.py:1294, in get_hf_file_metadata(url, token, proxies, timeout, library_name, library_version, user_agent, headers)
1293 # Retrieve metadata
-> 1294 r = _request_wrapper(
1295 method=\"HEAD\",
1296 url=url,
1297 headers=hf_headers,
1298 allow_redirects=False,
1299 follow_relative_redirects=True,
1300 proxies=proxies,
1301 timeout=timeout,
1302 )
1303 hf_raise_for_status(r)
File ~/Desktop/gitlab/oura-phase1/.venv/lib/python3.11/site-packages/huggingface_hub/file_download.py:278, in _request_wrapper(method, url, follow_relative_redirects, **params)
277 if follow_relative_redirects:
--> 278 response = _request_wrapper(
279 method=method,
280 url=url,
281 follow_relative_redirects=False,
282 **params,
283 )
285 # If redirection, we redirect only relative paths.
286 # This is useful in case of a renamed repository.
File ~/Desktop/gitlab/oura-phase1/.venv/lib/python3.11/site-packages/huggingface_hub/file_download.py:302, in _request_wrapper(method, url, follow_relative_redirects, **params)
301 response = get_session().request(method=method, url=url, **params)
--> 302 hf_raise_for_status(response)
303 return response
File ~/Desktop/gitlab/oura-phase1/.venv/lib/python3.11/site-packages/huggingface_hub/utils/_http.py:417, in hf_raise_for_status(response, endpoint_name)
416 message = f\"{response.status_code} Client Error.\" + \"\
\
\" + f\"Entry Not Found for url: {response.url}.\"
--> 417 raise _format(EntryNotFoundError, message, response) from e
419 elif error_code == \"GatedRepo\":
EntryNotFoundError: 404 Client Error. (Request ID: Root=1-679beb26-1fc737e519d50def733753f2;4d93fdf8-c9f1-42d4-a232-a8984aa545de)
Entry Not Found for url: https://huggingface.co/mlx-community/DeepSeek-R1-4bit/resolve/main/configuration_deepseek.py.
The above exception was the direct cause of the following exception:
OSError Traceback (most recent call last)
Cell In[22], line 4
1 from transformers import AutoConfig
3 # config = AutoConfig.from_pretrained(\"mlx-community/DeepSeek-V3-4bit\", trust_remote_code=True)
----> 4 config = AutoConfig.from_pretrained(\"mlx-community/DeepSeek-R1-4bit\", trust_remote_code=True)
6 print(config)
File ~/Desktop/gitlab/oura-phase1/.venv/lib/python3.11/site-packages/transformers/models/auto/configuration_auto.py:1063, in AutoConfig.from_pretrained(cls, pretrained_model_name_or_path, **kwargs)
1061 if has_remote_code and trust_remote_code:
1062 class_ref = config_dict[\"auto_map\"][\"AutoConfig\"]
-> 1063 config_class = get_class_from_dynamic_module(
1064 class_ref, pretrained_model_name_or_path, code_revision=code_revision, **kwargs
1065 )
1066 if os.path.isdir(pretrained_model_name_or_path):
1067 config_class.register_for_auto_class()
File ~/Desktop/gitlab/oura-phase1/.venv/lib/python3.11/site-packages/transformers/dynamic_module_utils.py:541, in get_class_from_dynamic_module(class_reference, pretrained_model_name_or_path, cache_dir, force_download, resume_download, proxies, token, revision, local_files_only, repo_type, code_revision, **kwargs)
539 code_revision = revision
540 # And lastly we get the class inside our newly created module
--> 541 final_module = get_cached_module_file(
542 repo_id,
543 module_file + \".py\",
544 cache_dir=cache_dir,
545 force_download=force_download,
546 resume_download=resume_download,
547 proxies=proxies,
548 token=token,
549 revision=code_revision,
550 local_files_only=local_files_only,
551 repo_type=repo_type,
552 )
553 return get_class_in_module(class_name, final_module, force_reload=force_download)
File ~/Desktop/gitlab/oura-phase1/.venv/lib/python3.11/site-packages/transformers/dynamic_module_utils.py:345, in get_cached_module_file(pretrained_model_name_or_path, module_file, cache_dir, force_download, resume_download, proxies, token, revision, local_files_only, repo_type, _commit_hash, **deprecated_kwargs)
342 new_files = []
343 try:
344 # Load from URL or cache i
- configuration_deepseek.py +210 -0
@@ -0,0 +1,210 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from transformers.configuration_utils import PretrainedConfig
|
2 |
+
from transformers.utils import logging
|
3 |
+
|
4 |
+
logger = logging.get_logger(__name__)
|
5 |
+
|
6 |
+
DEEPSEEK_PRETRAINED_CONFIG_ARCHIVE_MAP = {}
|
7 |
+
class DeepseekV3Config(PretrainedConfig):
|
8 |
+
r"""
|
9 |
+
This is the configuration class to store the configuration of a [`DeepseekV3Model`]. It is used to instantiate an DeepSeek
|
10 |
+
model according to the specified arguments, defining the model architecture. Instantiating a configuration with the
|
11 |
+
defaults will yield a similar configuration to that of the DeepSeek-V3.
|
12 |
+
|
13 |
+
Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
|
14 |
+
documentation from [`PretrainedConfig`] for more information.
|
15 |
+
|
16 |
+
|
17 |
+
Args:
|
18 |
+
vocab_size (`int`, *optional*, defaults to 129280):
|
19 |
+
Vocabulary size of the Deep model. Defines the number of different tokens that can be represented by the
|
20 |
+
`inputs_ids` passed when calling [`DeepseekV3Model`]
|
21 |
+
hidden_size (`int`, *optional*, defaults to 4096):
|
22 |
+
Dimension of the hidden representations.
|
23 |
+
intermediate_size (`int`, *optional*, defaults to 11008):
|
24 |
+
Dimension of the MLP representations.
|
25 |
+
moe_intermediate_size (`int`, *optional*, defaults to 1407):
|
26 |
+
Dimension of the MoE representations.
|
27 |
+
num_hidden_layers (`int`, *optional*, defaults to 32):
|
28 |
+
Number of hidden layers in the Transformer decoder.
|
29 |
+
num_nextn_predict_layers (`int`, *optional*, defaults to 1):
|
30 |
+
Number of nextn predict layers in the DeepSeekV3 Model.
|
31 |
+
num_attention_heads (`int`, *optional*, defaults to 32):
|
32 |
+
Number of attention heads for each attention layer in the Transformer decoder.
|
33 |
+
n_shared_experts (`int`, *optional*, defaults to None):
|
34 |
+
Number of shared experts, None means dense model.
|
35 |
+
n_routed_experts (`int`, *optional*, defaults to None):
|
36 |
+
Number of routed experts, None means dense model.
|
37 |
+
routed_scaling_factor (`float`, *optional*, defaults to 1.0):
|
38 |
+
Scaling factor or routed experts.
|
39 |
+
topk_method (`str`, *optional*, defaults to `gready`):
|
40 |
+
Topk method used in routed gate.
|
41 |
+
n_group (`int`, *optional*, defaults to None):
|
42 |
+
Number of groups for routed experts.
|
43 |
+
topk_group (`int`, *optional*, defaults to None):
|
44 |
+
Number of selected groups for each token(for each token, ensuring the selected experts is only within `topk_group` groups).
|
45 |
+
num_experts_per_tok (`int`, *optional*, defaults to None):
|
46 |
+
Number of selected experts, None means dense model.
|
47 |
+
moe_layer_freq (`int`, *optional*, defaults to 1):
|
48 |
+
The frequency of the MoE layer: one expert layer for every `moe_layer_freq - 1` dense layers.
|
49 |
+
first_k_dense_replace (`int`, *optional*, defaults to 0):
|
50 |
+
Number of dense layers in shallow layers(embed->dense->dense->...->dense->moe->moe...->lm_head).
|
51 |
+
\--k dense layers--/
|
52 |
+
norm_topk_prob (`bool`, *optional*, defaults to False):
|
53 |
+
Whether to normalize the weights of the routed experts.
|
54 |
+
scoring_func (`str`, *optional*, defaults to 'softmax'):
|
55 |
+
Method of computing expert weights.
|
56 |
+
aux_loss_alpha (`float`, *optional*, defaults to 0.001):
|
57 |
+
Auxiliary loss weight coefficient.
|
58 |
+
seq_aux = (`bool`, *optional*, defaults to True):
|
59 |
+
Whether to compute the auxiliary loss for each individual sample.
|
60 |
+
num_key_value_heads (`int`, *optional*):
|
61 |
+
This is the number of key_value heads that should be used to implement Grouped Query Attention. If
|
62 |
+
`num_key_value_heads=num_attention_heads`, the model will use Multi Head Attention (MHA), if
|
63 |
+
`num_key_value_heads=1 the model will use Multi Query Attention (MQA) otherwise GQA is used. When
|
64 |
+
converting a multi-head checkpoint to a GQA checkpoint, each group key and value head should be constructed
|
65 |
+
by meanpooling all the original heads within that group. For more details checkout [this
|
66 |
+
paper](https://arxiv.org/pdf/2305.13245.pdf). If it is not specified, will default to
|
67 |
+
`num_attention_heads`.
|
68 |
+
hidden_act (`str` or `function`, *optional*, defaults to `"silu"`):
|
69 |
+
The non-linear activation function (function or string) in the decoder.
|
70 |
+
max_position_embeddings (`int`, *optional*, defaults to 2048):
|
71 |
+
The maximum sequence length that this model might ever be used with.
|
72 |
+
initializer_range (`float`, *optional*, defaults to 0.02):
|
73 |
+
The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
|
74 |
+
rms_norm_eps (`float`, *optional*, defaults to 1e-06):
|
75 |
+
The epsilon used by the rms normalization layers.
|
76 |
+
use_cache (`bool`, *optional*, defaults to `True`):
|
77 |
+
Whether or not the model should return the last key/values attentions (not used by all models). Only
|
78 |
+
relevant if `config.is_decoder=True`.
|
79 |
+
pad_token_id (`int`, *optional*):
|
80 |
+
Padding token id.
|
81 |
+
bos_token_id (`int`, *optional*, defaults to 1):
|
82 |
+
Beginning of stream token id.
|
83 |
+
eos_token_id (`int`, *optional*, defaults to 2):
|
84 |
+
End of stream token id.
|
85 |
+
pretraining_tp (`int`, *optional*, defaults to 1):
|
86 |
+
Experimental feature. Tensor parallelism rank used during pretraining. Please refer to [this
|
87 |
+
document](https://huggingface.co/docs/transformers/parallelism) to understand more about it. This value is
|
88 |
+
necessary to ensure exact reproducibility of the pretraining results. Please refer to [this
|
89 |
+
issue](https://github.com/pytorch/pytorch/issues/76232).
|
90 |
+
tie_word_embeddings (`bool`, *optional*, defaults to `False`):
|
91 |
+
Whether to tie weight embeddings
|
92 |
+
rope_theta (`float`, *optional*, defaults to 10000.0):
|
93 |
+
The base period of the RoPE embeddings.
|
94 |
+
rope_scaling (`Dict`, *optional*):
|
95 |
+
Dictionary containing the scaling configuration for the RoPE embeddings. Currently supports two scaling
|
96 |
+
strategies: linear and dynamic. Their scaling factor must be a float greater than 1. The expected format is
|
97 |
+
`{"type": strategy name, "factor": scaling factor}`. When using this flag, don't update
|
98 |
+
`max_position_embeddings` to the expected new maximum.
|
99 |
+
attention_bias (`bool`, defaults to `False`, *optional*, defaults to `False`):
|
100 |
+
Whether to use a bias in the query, key, value and output projection layers during self-attention.
|
101 |
+
attention_dropout (`float`, *optional*, defaults to 0.0):
|
102 |
+
The dropout ratio for the attention probabilities.
|
103 |
+
|
104 |
+
```python
|
105 |
+
>>> from transformers import DeepseekV3Model, DeepseekV3Config
|
106 |
+
|
107 |
+
>>> # Initializing a Deepseek-V3 style configuration
|
108 |
+
>>> configuration = DeepseekV3Config()
|
109 |
+
|
110 |
+
>>> # Accessing the model configuration
|
111 |
+
>>> configuration = model.config
|
112 |
+
```"""
|
113 |
+
|
114 |
+
model_type = "deepseek_v3"
|
115 |
+
keys_to_ignore_at_inference = ["past_key_values"]
|
116 |
+
|
117 |
+
def __init__(
|
118 |
+
self,
|
119 |
+
vocab_size=129280,
|
120 |
+
hidden_size=7168,
|
121 |
+
intermediate_size=18432,
|
122 |
+
moe_intermediate_size = 2048,
|
123 |
+
num_hidden_layers=61,
|
124 |
+
num_nextn_predict_layers=1,
|
125 |
+
num_attention_heads=128,
|
126 |
+
num_key_value_heads=128,
|
127 |
+
n_shared_experts = 1,
|
128 |
+
n_routed_experts = 256,
|
129 |
+
ep_size = 1,
|
130 |
+
routed_scaling_factor = 2.5,
|
131 |
+
kv_lora_rank = 512,
|
132 |
+
q_lora_rank = 1536,
|
133 |
+
qk_rope_head_dim = 64,
|
134 |
+
v_head_dim = 128,
|
135 |
+
qk_nope_head_dim = 128,
|
136 |
+
topk_method = 'noaux_tc',
|
137 |
+
n_group = 8,
|
138 |
+
topk_group = 4,
|
139 |
+
num_experts_per_tok = 8,
|
140 |
+
moe_layer_freq = 1,
|
141 |
+
first_k_dense_replace = 3,
|
142 |
+
norm_topk_prob = True,
|
143 |
+
scoring_func = 'sigmoid',
|
144 |
+
aux_loss_alpha = 0.001,
|
145 |
+
seq_aux = True,
|
146 |
+
hidden_act="silu",
|
147 |
+
max_position_embeddings=4096,
|
148 |
+
initializer_range=0.02,
|
149 |
+
rms_norm_eps=1e-6,
|
150 |
+
use_cache=True,
|
151 |
+
pad_token_id=None,
|
152 |
+
bos_token_id=0,
|
153 |
+
eos_token_id=1,
|
154 |
+
pretraining_tp=1,
|
155 |
+
tie_word_embeddings=False,
|
156 |
+
rope_theta=10000.0,
|
157 |
+
rope_scaling=None,
|
158 |
+
attention_bias=False,
|
159 |
+
attention_dropout=0.0,
|
160 |
+
**kwargs,
|
161 |
+
):
|
162 |
+
self.vocab_size = vocab_size
|
163 |
+
self.max_position_embeddings = max_position_embeddings
|
164 |
+
self.hidden_size = hidden_size
|
165 |
+
self.intermediate_size = intermediate_size
|
166 |
+
self.moe_intermediate_size = moe_intermediate_size
|
167 |
+
self.num_hidden_layers = num_hidden_layers
|
168 |
+
self.num_nextn_predict_layers = num_nextn_predict_layers
|
169 |
+
self.num_attention_heads = num_attention_heads
|
170 |
+
self.n_shared_experts = n_shared_experts
|
171 |
+
self.n_routed_experts = n_routed_experts
|
172 |
+
self.ep_size = ep_size
|
173 |
+
self.routed_scaling_factor = routed_scaling_factor
|
174 |
+
self.kv_lora_rank = kv_lora_rank
|
175 |
+
self.q_lora_rank = q_lora_rank
|
176 |
+
self.qk_rope_head_dim = qk_rope_head_dim
|
177 |
+
self.v_head_dim = v_head_dim
|
178 |
+
self.qk_nope_head_dim = qk_nope_head_dim
|
179 |
+
self.topk_method = topk_method
|
180 |
+
self.n_group = n_group
|
181 |
+
self.topk_group = topk_group
|
182 |
+
self.num_experts_per_tok = num_experts_per_tok
|
183 |
+
self.moe_layer_freq = moe_layer_freq
|
184 |
+
self.first_k_dense_replace = first_k_dense_replace
|
185 |
+
self.norm_topk_prob = norm_topk_prob
|
186 |
+
self.scoring_func = scoring_func
|
187 |
+
self.aux_loss_alpha = aux_loss_alpha
|
188 |
+
self.seq_aux = seq_aux
|
189 |
+
# for backward compatibility
|
190 |
+
if num_key_value_heads is None:
|
191 |
+
num_key_value_heads = num_attention_heads
|
192 |
+
|
193 |
+
self.num_key_value_heads = num_key_value_heads
|
194 |
+
self.hidden_act = hidden_act
|
195 |
+
self.initializer_range = initializer_range
|
196 |
+
self.rms_norm_eps = rms_norm_eps
|
197 |
+
self.pretraining_tp = pretraining_tp
|
198 |
+
self.use_cache = use_cache
|
199 |
+
self.rope_theta = rope_theta
|
200 |
+
self.rope_scaling = rope_scaling
|
201 |
+
self.attention_bias = attention_bias
|
202 |
+
self.attention_dropout = attention_dropout
|
203 |
+
|
204 |
+
super().__init__(
|
205 |
+
pad_token_id=pad_token_id,
|
206 |
+
bos_token_id=bos_token_id,
|
207 |
+
eos_token_id=eos_token_id,
|
208 |
+
tie_word_embeddings=tie_word_embeddings,
|
209 |
+
**kwargs,
|
210 |
+
)
|