Add files using upload-large-folder tool
Browse files- .gitattributes +1 -0
- added_tokens.json +0 -1
- config.json +3 -3
- generation_config.json +2 -2
- model-00001-of-00004.safetensors +2 -2
- model-00002-of-00004.safetensors +2 -2
- model-00003-of-00004.safetensors +2 -2
- model-00004-of-00004.safetensors +2 -2
- model.safetensors.index.json +137 -137
- special_tokens_map.json +1 -1
- tokenizer.json +0 -0
- tokenizer_config.json +3 -10
.gitattributes
CHANGED
@@ -33,3 +33,4 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
|
|
33 |
*.zip filter=lfs diff=lfs merge=lfs -text
|
34 |
*.zst filter=lfs diff=lfs merge=lfs -text
|
35 |
*tfevents* filter=lfs diff=lfs merge=lfs -text
|
|
|
|
33 |
*.zip filter=lfs diff=lfs merge=lfs -text
|
34 |
*.zst filter=lfs diff=lfs merge=lfs -text
|
35 |
*tfevents* filter=lfs diff=lfs merge=lfs -text
|
36 |
+
tokenizer.json filter=lfs diff=lfs merge=lfs -text
|
added_tokens.json
CHANGED
@@ -1,7 +1,6 @@
|
|
1 |
{
|
2 |
"</tool_call>": 151658,
|
3 |
"<tool_call>": 151657,
|
4 |
-
"<|PAD_TOKEN|>": 151665,
|
5 |
"<|box_end|>": 151649,
|
6 |
"<|box_start|>": 151648,
|
7 |
"<|endoftext|>": 151643,
|
|
|
1 |
{
|
2 |
"</tool_call>": 151658,
|
3 |
"<tool_call>": 151657,
|
|
|
4 |
"<|box_end|>": 151649,
|
5 |
"<|box_start|>": 151648,
|
6 |
"<|endoftext|>": 151643,
|
config.json
CHANGED
@@ -4,7 +4,6 @@
|
|
4 |
"Qwen2ForCausalLM"
|
5 |
],
|
6 |
"attention_dropout": 0.0,
|
7 |
-
"bos_token_id": 151643,
|
8 |
"eos_token_id": 151645,
|
9 |
"hidden_act": "silu",
|
10 |
"hidden_size": 3584,
|
@@ -16,13 +15,14 @@
|
|
16 |
"num_attention_heads": 28,
|
17 |
"num_hidden_layers": 28,
|
18 |
"num_key_value_heads": 4,
|
19 |
-
"pad_token_id":
|
20 |
"rms_norm_eps": 1e-06,
|
|
|
21 |
"rope_theta": 1000000.0,
|
22 |
"sliding_window": null,
|
23 |
"tie_word_embeddings": false,
|
24 |
"torch_dtype": "bfloat16",
|
25 |
-
"transformers_version": "4.
|
26 |
"unsloth_fixed": true,
|
27 |
"use_cache": true,
|
28 |
"use_sliding_window": false,
|
|
|
4 |
"Qwen2ForCausalLM"
|
5 |
],
|
6 |
"attention_dropout": 0.0,
|
|
|
7 |
"eos_token_id": 151645,
|
8 |
"hidden_act": "silu",
|
9 |
"hidden_size": 3584,
|
|
|
15 |
"num_attention_heads": 28,
|
16 |
"num_hidden_layers": 28,
|
17 |
"num_key_value_heads": 4,
|
18 |
+
"pad_token_id": 151654,
|
19 |
"rms_norm_eps": 1e-06,
|
20 |
+
"rope_scaling": null,
|
21 |
"rope_theta": 1000000.0,
|
22 |
"sliding_window": null,
|
23 |
"tie_word_embeddings": false,
|
24 |
"torch_dtype": "bfloat16",
|
25 |
+
"transformers_version": "4.49.0.dev0",
|
26 |
"unsloth_fixed": true,
|
27 |
"use_cache": true,
|
28 |
"use_sliding_window": false,
|
generation_config.json
CHANGED
@@ -6,10 +6,10 @@
|
|
6 |
151643
|
7 |
],
|
8 |
"max_length": 32768,
|
9 |
-
"pad_token_id":
|
10 |
"repetition_penalty": 1.05,
|
11 |
"temperature": 0.7,
|
12 |
"top_k": 20,
|
13 |
"top_p": 0.8,
|
14 |
-
"transformers_version": "4.
|
15 |
}
|
|
|
6 |
151643
|
7 |
],
|
8 |
"max_length": 32768,
|
9 |
+
"pad_token_id": 151654,
|
10 |
"repetition_penalty": 1.05,
|
11 |
"temperature": 0.7,
|
12 |
"top_k": 20,
|
13 |
"top_p": 0.8,
|
14 |
+
"transformers_version": "4.49.0.dev0"
|
15 |
}
|
model-00001-of-00004.safetensors
CHANGED
@@ -1,3 +1,3 @@
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
-
oid sha256:
|
3 |
-
size
|
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:9ff3b270cb6b8ce8a059a43678cebaefb7aa2a3b587aadc5a4e2f6c1e4d13a28
|
3 |
+
size 4877660776
|
model-00002-of-00004.safetensors
CHANGED
@@ -1,3 +1,3 @@
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
-
oid sha256:
|
3 |
-
size
|
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:854698ca4d7581b9fd40f5da9176b010422194ec26cab3abd907ff21e477d1ca
|
3 |
+
size 4932751008
|
model-00003-of-00004.safetensors
CHANGED
@@ -1,3 +1,3 @@
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
-
oid sha256:
|
3 |
-
size
|
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:6295baeb13806b95509f86175dd3a298bf39630071a4dc2b2d31441c9230b011
|
3 |
+
size 4330865200
|
model-00004-of-00004.safetensors
CHANGED
@@ -1,3 +1,3 @@
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
-
oid sha256:
|
3 |
-
size
|
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:06006972c3be88e8a44fe21cfe2b0472b130780c781a741f8f90f1fe5ba3aae2
|
3 |
+
size 1089994880
|
model.safetensors.index.json
CHANGED
@@ -77,11 +77,11 @@
|
|
77 |
"model.layers.13.self_attn.q_proj.weight": "model-00002-of-00004.safetensors",
|
78 |
"model.layers.13.self_attn.v_proj.bias": "model-00002-of-00004.safetensors",
|
79 |
"model.layers.13.self_attn.v_proj.weight": "model-00002-of-00004.safetensors",
|
80 |
-
"model.layers.14.input_layernorm.weight": "model-
|
81 |
-
"model.layers.14.mlp.down_proj.weight": "model-
|
82 |
"model.layers.14.mlp.gate_proj.weight": "model-00002-of-00004.safetensors",
|
83 |
-
"model.layers.14.mlp.up_proj.weight": "model-
|
84 |
-
"model.layers.14.post_attention_layernorm.weight": "model-
|
85 |
"model.layers.14.self_attn.k_proj.bias": "model-00002-of-00004.safetensors",
|
86 |
"model.layers.14.self_attn.k_proj.weight": "model-00002-of-00004.safetensors",
|
87 |
"model.layers.14.self_attn.o_proj.weight": "model-00002-of-00004.safetensors",
|
@@ -89,54 +89,54 @@
|
|
89 |
"model.layers.14.self_attn.q_proj.weight": "model-00002-of-00004.safetensors",
|
90 |
"model.layers.14.self_attn.v_proj.bias": "model-00002-of-00004.safetensors",
|
91 |
"model.layers.14.self_attn.v_proj.weight": "model-00002-of-00004.safetensors",
|
92 |
-
"model.layers.15.input_layernorm.weight": "model-
|
93 |
-
"model.layers.15.mlp.down_proj.weight": "model-
|
94 |
-
"model.layers.15.mlp.gate_proj.weight": "model-
|
95 |
-
"model.layers.15.mlp.up_proj.weight": "model-
|
96 |
-
"model.layers.15.post_attention_layernorm.weight": "model-
|
97 |
-
"model.layers.15.self_attn.k_proj.bias": "model-
|
98 |
-
"model.layers.15.self_attn.k_proj.weight": "model-
|
99 |
-
"model.layers.15.self_attn.o_proj.weight": "model-
|
100 |
-
"model.layers.15.self_attn.q_proj.bias": "model-
|
101 |
-
"model.layers.15.self_attn.q_proj.weight": "model-
|
102 |
-
"model.layers.15.self_attn.v_proj.bias": "model-
|
103 |
-
"model.layers.15.self_attn.v_proj.weight": "model-
|
104 |
-
"model.layers.16.input_layernorm.weight": "model-
|
105 |
-
"model.layers.16.mlp.down_proj.weight": "model-
|
106 |
-
"model.layers.16.mlp.gate_proj.weight": "model-
|
107 |
-
"model.layers.16.mlp.up_proj.weight": "model-
|
108 |
-
"model.layers.16.post_attention_layernorm.weight": "model-
|
109 |
-
"model.layers.16.self_attn.k_proj.bias": "model-
|
110 |
-
"model.layers.16.self_attn.k_proj.weight": "model-
|
111 |
-
"model.layers.16.self_attn.o_proj.weight": "model-
|
112 |
-
"model.layers.16.self_attn.q_proj.bias": "model-
|
113 |
-
"model.layers.16.self_attn.q_proj.weight": "model-
|
114 |
-
"model.layers.16.self_attn.v_proj.bias": "model-
|
115 |
-
"model.layers.16.self_attn.v_proj.weight": "model-
|
116 |
-
"model.layers.17.input_layernorm.weight": "model-
|
117 |
-
"model.layers.17.mlp.down_proj.weight": "model-
|
118 |
-
"model.layers.17.mlp.gate_proj.weight": "model-
|
119 |
-
"model.layers.17.mlp.up_proj.weight": "model-
|
120 |
-
"model.layers.17.post_attention_layernorm.weight": "model-
|
121 |
-
"model.layers.17.self_attn.k_proj.bias": "model-
|
122 |
-
"model.layers.17.self_attn.k_proj.weight": "model-
|
123 |
-
"model.layers.17.self_attn.o_proj.weight": "model-
|
124 |
-
"model.layers.17.self_attn.q_proj.bias": "model-
|
125 |
-
"model.layers.17.self_attn.q_proj.weight": "model-
|
126 |
-
"model.layers.17.self_attn.v_proj.bias": "model-
|
127 |
-
"model.layers.17.self_attn.v_proj.weight": "model-
|
128 |
"model.layers.18.input_layernorm.weight": "model-00003-of-00004.safetensors",
|
129 |
"model.layers.18.mlp.down_proj.weight": "model-00003-of-00004.safetensors",
|
130 |
-
"model.layers.18.mlp.gate_proj.weight": "model-
|
131 |
-
"model.layers.18.mlp.up_proj.weight": "model-
|
132 |
"model.layers.18.post_attention_layernorm.weight": "model-00003-of-00004.safetensors",
|
133 |
-
"model.layers.18.self_attn.k_proj.bias": "model-
|
134 |
-
"model.layers.18.self_attn.k_proj.weight": "model-
|
135 |
-
"model.layers.18.self_attn.o_proj.weight": "model-
|
136 |
-
"model.layers.18.self_attn.q_proj.bias": "model-
|
137 |
-
"model.layers.18.self_attn.q_proj.weight": "model-
|
138 |
-
"model.layers.18.self_attn.v_proj.bias": "model-
|
139 |
-
"model.layers.18.self_attn.v_proj.weight": "model-
|
140 |
"model.layers.19.input_layernorm.weight": "model-00003-of-00004.safetensors",
|
141 |
"model.layers.19.mlp.down_proj.weight": "model-00003-of-00004.safetensors",
|
142 |
"model.layers.19.mlp.gate_proj.weight": "model-00003-of-00004.safetensors",
|
@@ -185,11 +185,11 @@
|
|
185 |
"model.layers.21.self_attn.q_proj.weight": "model-00003-of-00004.safetensors",
|
186 |
"model.layers.21.self_attn.v_proj.bias": "model-00003-of-00004.safetensors",
|
187 |
"model.layers.21.self_attn.v_proj.weight": "model-00003-of-00004.safetensors",
|
188 |
-
"model.layers.22.input_layernorm.weight": "model-
|
189 |
-
"model.layers.22.mlp.down_proj.weight": "model-
|
190 |
"model.layers.22.mlp.gate_proj.weight": "model-00003-of-00004.safetensors",
|
191 |
"model.layers.22.mlp.up_proj.weight": "model-00003-of-00004.safetensors",
|
192 |
-
"model.layers.22.post_attention_layernorm.weight": "model-
|
193 |
"model.layers.22.self_attn.k_proj.bias": "model-00003-of-00004.safetensors",
|
194 |
"model.layers.22.self_attn.k_proj.weight": "model-00003-of-00004.safetensors",
|
195 |
"model.layers.22.self_attn.o_proj.weight": "model-00003-of-00004.safetensors",
|
@@ -197,66 +197,66 @@
|
|
197 |
"model.layers.22.self_attn.q_proj.weight": "model-00003-of-00004.safetensors",
|
198 |
"model.layers.22.self_attn.v_proj.bias": "model-00003-of-00004.safetensors",
|
199 |
"model.layers.22.self_attn.v_proj.weight": "model-00003-of-00004.safetensors",
|
200 |
-
"model.layers.23.input_layernorm.weight": "model-
|
201 |
-
"model.layers.23.mlp.down_proj.weight": "model-
|
202 |
-
"model.layers.23.mlp.gate_proj.weight": "model-
|
203 |
-
"model.layers.23.mlp.up_proj.weight": "model-
|
204 |
-
"model.layers.23.post_attention_layernorm.weight": "model-
|
205 |
-
"model.layers.23.self_attn.k_proj.bias": "model-
|
206 |
-
"model.layers.23.self_attn.k_proj.weight": "model-
|
207 |
-
"model.layers.23.self_attn.o_proj.weight": "model-
|
208 |
-
"model.layers.23.self_attn.q_proj.bias": "model-
|
209 |
-
"model.layers.23.self_attn.q_proj.weight": "model-
|
210 |
-
"model.layers.23.self_attn.v_proj.bias": "model-
|
211 |
-
"model.layers.23.self_attn.v_proj.weight": "model-
|
212 |
-
"model.layers.24.input_layernorm.weight": "model-
|
213 |
-
"model.layers.24.mlp.down_proj.weight": "model-
|
214 |
-
"model.layers.24.mlp.gate_proj.weight": "model-
|
215 |
-
"model.layers.24.mlp.up_proj.weight": "model-
|
216 |
-
"model.layers.24.post_attention_layernorm.weight": "model-
|
217 |
-
"model.layers.24.self_attn.k_proj.bias": "model-
|
218 |
-
"model.layers.24.self_attn.k_proj.weight": "model-
|
219 |
-
"model.layers.24.self_attn.o_proj.weight": "model-
|
220 |
-
"model.layers.24.self_attn.q_proj.bias": "model-
|
221 |
-
"model.layers.24.self_attn.q_proj.weight": "model-
|
222 |
-
"model.layers.24.self_attn.v_proj.bias": "model-
|
223 |
-
"model.layers.24.self_attn.v_proj.weight": "model-
|
224 |
-
"model.layers.25.input_layernorm.weight": "model-
|
225 |
-
"model.layers.25.mlp.down_proj.weight": "model-
|
226 |
-
"model.layers.25.mlp.gate_proj.weight": "model-
|
227 |
-
"model.layers.25.mlp.up_proj.weight": "model-
|
228 |
-
"model.layers.25.post_attention_layernorm.weight": "model-
|
229 |
-
"model.layers.25.self_attn.k_proj.bias": "model-
|
230 |
-
"model.layers.25.self_attn.k_proj.weight": "model-
|
231 |
-
"model.layers.25.self_attn.o_proj.weight": "model-
|
232 |
-
"model.layers.25.self_attn.q_proj.bias": "model-
|
233 |
-
"model.layers.25.self_attn.q_proj.weight": "model-
|
234 |
-
"model.layers.25.self_attn.v_proj.bias": "model-
|
235 |
-
"model.layers.25.self_attn.v_proj.weight": "model-
|
236 |
-
"model.layers.26.input_layernorm.weight": "model-
|
237 |
-
"model.layers.26.mlp.down_proj.weight": "model-
|
238 |
-
"model.layers.26.mlp.gate_proj.weight": "model-
|
239 |
-
"model.layers.26.mlp.up_proj.weight": "model-
|
240 |
-
"model.layers.26.post_attention_layernorm.weight": "model-
|
241 |
-
"model.layers.26.self_attn.k_proj.bias": "model-
|
242 |
-
"model.layers.26.self_attn.k_proj.weight": "model-
|
243 |
-
"model.layers.26.self_attn.o_proj.weight": "model-
|
244 |
-
"model.layers.26.self_attn.q_proj.bias": "model-
|
245 |
-
"model.layers.26.self_attn.q_proj.weight": "model-
|
246 |
-
"model.layers.26.self_attn.v_proj.bias": "model-
|
247 |
-
"model.layers.26.self_attn.v_proj.weight": "model-
|
248 |
-
"model.layers.27.input_layernorm.weight": "model-
|
249 |
-
"model.layers.27.mlp.down_proj.weight": "model-
|
250 |
-
"model.layers.27.mlp.gate_proj.weight": "model-
|
251 |
-
"model.layers.27.mlp.up_proj.weight": "model-
|
252 |
-
"model.layers.27.post_attention_layernorm.weight": "model-
|
253 |
-
"model.layers.27.self_attn.k_proj.bias": "model-
|
254 |
-
"model.layers.27.self_attn.k_proj.weight": "model-
|
255 |
-
"model.layers.27.self_attn.o_proj.weight": "model-
|
256 |
-
"model.layers.27.self_attn.q_proj.bias": "model-
|
257 |
-
"model.layers.27.self_attn.q_proj.weight": "model-
|
258 |
-
"model.layers.27.self_attn.v_proj.bias": "model-
|
259 |
-
"model.layers.27.self_attn.v_proj.weight": "model-
|
260 |
"model.layers.3.input_layernorm.weight": "model-00001-of-00004.safetensors",
|
261 |
"model.layers.3.mlp.down_proj.weight": "model-00001-of-00004.safetensors",
|
262 |
"model.layers.3.mlp.gate_proj.weight": "model-00001-of-00004.safetensors",
|
@@ -293,11 +293,11 @@
|
|
293 |
"model.layers.5.self_attn.q_proj.weight": "model-00001-of-00004.safetensors",
|
294 |
"model.layers.5.self_attn.v_proj.bias": "model-00001-of-00004.safetensors",
|
295 |
"model.layers.5.self_attn.v_proj.weight": "model-00001-of-00004.safetensors",
|
296 |
-
"model.layers.6.input_layernorm.weight": "model-
|
297 |
-
"model.layers.6.mlp.down_proj.weight": "model-
|
298 |
-
"model.layers.6.mlp.gate_proj.weight": "model-
|
299 |
-
"model.layers.6.mlp.up_proj.weight": "model-
|
300 |
-
"model.layers.6.post_attention_layernorm.weight": "model-
|
301 |
"model.layers.6.self_attn.k_proj.bias": "model-00001-of-00004.safetensors",
|
302 |
"model.layers.6.self_attn.k_proj.weight": "model-00001-of-00004.safetensors",
|
303 |
"model.layers.6.self_attn.o_proj.weight": "model-00001-of-00004.safetensors",
|
@@ -305,30 +305,30 @@
|
|
305 |
"model.layers.6.self_attn.q_proj.weight": "model-00001-of-00004.safetensors",
|
306 |
"model.layers.6.self_attn.v_proj.bias": "model-00001-of-00004.safetensors",
|
307 |
"model.layers.6.self_attn.v_proj.weight": "model-00001-of-00004.safetensors",
|
308 |
-
"model.layers.7.input_layernorm.weight": "model-
|
309 |
-
"model.layers.7.mlp.down_proj.weight": "model-
|
310 |
-
"model.layers.7.mlp.gate_proj.weight": "model-
|
311 |
-
"model.layers.7.mlp.up_proj.weight": "model-
|
312 |
-
"model.layers.7.post_attention_layernorm.weight": "model-
|
313 |
-
"model.layers.7.self_attn.k_proj.bias": "model-
|
314 |
-
"model.layers.7.self_attn.k_proj.weight": "model-
|
315 |
-
"model.layers.7.self_attn.o_proj.weight": "model-
|
316 |
-
"model.layers.7.self_attn.q_proj.bias": "model-
|
317 |
-
"model.layers.7.self_attn.q_proj.weight": "model-
|
318 |
-
"model.layers.7.self_attn.v_proj.bias": "model-
|
319 |
-
"model.layers.7.self_attn.v_proj.weight": "model-
|
320 |
"model.layers.8.input_layernorm.weight": "model-00002-of-00004.safetensors",
|
321 |
"model.layers.8.mlp.down_proj.weight": "model-00002-of-00004.safetensors",
|
322 |
"model.layers.8.mlp.gate_proj.weight": "model-00002-of-00004.safetensors",
|
323 |
"model.layers.8.mlp.up_proj.weight": "model-00002-of-00004.safetensors",
|
324 |
"model.layers.8.post_attention_layernorm.weight": "model-00002-of-00004.safetensors",
|
325 |
-
"model.layers.8.self_attn.k_proj.bias": "model-
|
326 |
-
"model.layers.8.self_attn.k_proj.weight": "model-
|
327 |
-
"model.layers.8.self_attn.o_proj.weight": "model-
|
328 |
-
"model.layers.8.self_attn.q_proj.bias": "model-
|
329 |
-
"model.layers.8.self_attn.q_proj.weight": "model-
|
330 |
-
"model.layers.8.self_attn.v_proj.bias": "model-
|
331 |
-
"model.layers.8.self_attn.v_proj.weight": "model-
|
332 |
"model.layers.9.input_layernorm.weight": "model-00002-of-00004.safetensors",
|
333 |
"model.layers.9.mlp.down_proj.weight": "model-00002-of-00004.safetensors",
|
334 |
"model.layers.9.mlp.gate_proj.weight": "model-00002-of-00004.safetensors",
|
@@ -341,6 +341,6 @@
|
|
341 |
"model.layers.9.self_attn.q_proj.weight": "model-00002-of-00004.safetensors",
|
342 |
"model.layers.9.self_attn.v_proj.bias": "model-00002-of-00004.safetensors",
|
343 |
"model.layers.9.self_attn.v_proj.weight": "model-00002-of-00004.safetensors",
|
344 |
-
"model.norm.weight": "model-
|
345 |
}
|
346 |
}
|
|
|
77 |
"model.layers.13.self_attn.q_proj.weight": "model-00002-of-00004.safetensors",
|
78 |
"model.layers.13.self_attn.v_proj.bias": "model-00002-of-00004.safetensors",
|
79 |
"model.layers.13.self_attn.v_proj.weight": "model-00002-of-00004.safetensors",
|
80 |
+
"model.layers.14.input_layernorm.weight": "model-00002-of-00004.safetensors",
|
81 |
+
"model.layers.14.mlp.down_proj.weight": "model-00002-of-00004.safetensors",
|
82 |
"model.layers.14.mlp.gate_proj.weight": "model-00002-of-00004.safetensors",
|
83 |
+
"model.layers.14.mlp.up_proj.weight": "model-00002-of-00004.safetensors",
|
84 |
+
"model.layers.14.post_attention_layernorm.weight": "model-00002-of-00004.safetensors",
|
85 |
"model.layers.14.self_attn.k_proj.bias": "model-00002-of-00004.safetensors",
|
86 |
"model.layers.14.self_attn.k_proj.weight": "model-00002-of-00004.safetensors",
|
87 |
"model.layers.14.self_attn.o_proj.weight": "model-00002-of-00004.safetensors",
|
|
|
89 |
"model.layers.14.self_attn.q_proj.weight": "model-00002-of-00004.safetensors",
|
90 |
"model.layers.14.self_attn.v_proj.bias": "model-00002-of-00004.safetensors",
|
91 |
"model.layers.14.self_attn.v_proj.weight": "model-00002-of-00004.safetensors",
|
92 |
+
"model.layers.15.input_layernorm.weight": "model-00002-of-00004.safetensors",
|
93 |
+
"model.layers.15.mlp.down_proj.weight": "model-00002-of-00004.safetensors",
|
94 |
+
"model.layers.15.mlp.gate_proj.weight": "model-00002-of-00004.safetensors",
|
95 |
+
"model.layers.15.mlp.up_proj.weight": "model-00002-of-00004.safetensors",
|
96 |
+
"model.layers.15.post_attention_layernorm.weight": "model-00002-of-00004.safetensors",
|
97 |
+
"model.layers.15.self_attn.k_proj.bias": "model-00002-of-00004.safetensors",
|
98 |
+
"model.layers.15.self_attn.k_proj.weight": "model-00002-of-00004.safetensors",
|
99 |
+
"model.layers.15.self_attn.o_proj.weight": "model-00002-of-00004.safetensors",
|
100 |
+
"model.layers.15.self_attn.q_proj.bias": "model-00002-of-00004.safetensors",
|
101 |
+
"model.layers.15.self_attn.q_proj.weight": "model-00002-of-00004.safetensors",
|
102 |
+
"model.layers.15.self_attn.v_proj.bias": "model-00002-of-00004.safetensors",
|
103 |
+
"model.layers.15.self_attn.v_proj.weight": "model-00002-of-00004.safetensors",
|
104 |
+
"model.layers.16.input_layernorm.weight": "model-00002-of-00004.safetensors",
|
105 |
+
"model.layers.16.mlp.down_proj.weight": "model-00002-of-00004.safetensors",
|
106 |
+
"model.layers.16.mlp.gate_proj.weight": "model-00002-of-00004.safetensors",
|
107 |
+
"model.layers.16.mlp.up_proj.weight": "model-00002-of-00004.safetensors",
|
108 |
+
"model.layers.16.post_attention_layernorm.weight": "model-00002-of-00004.safetensors",
|
109 |
+
"model.layers.16.self_attn.k_proj.bias": "model-00002-of-00004.safetensors",
|
110 |
+
"model.layers.16.self_attn.k_proj.weight": "model-00002-of-00004.safetensors",
|
111 |
+
"model.layers.16.self_attn.o_proj.weight": "model-00002-of-00004.safetensors",
|
112 |
+
"model.layers.16.self_attn.q_proj.bias": "model-00002-of-00004.safetensors",
|
113 |
+
"model.layers.16.self_attn.q_proj.weight": "model-00002-of-00004.safetensors",
|
114 |
+
"model.layers.16.self_attn.v_proj.bias": "model-00002-of-00004.safetensors",
|
115 |
+
"model.layers.16.self_attn.v_proj.weight": "model-00002-of-00004.safetensors",
|
116 |
+
"model.layers.17.input_layernorm.weight": "model-00002-of-00004.safetensors",
|
117 |
+
"model.layers.17.mlp.down_proj.weight": "model-00002-of-00004.safetensors",
|
118 |
+
"model.layers.17.mlp.gate_proj.weight": "model-00002-of-00004.safetensors",
|
119 |
+
"model.layers.17.mlp.up_proj.weight": "model-00002-of-00004.safetensors",
|
120 |
+
"model.layers.17.post_attention_layernorm.weight": "model-00002-of-00004.safetensors",
|
121 |
+
"model.layers.17.self_attn.k_proj.bias": "model-00002-of-00004.safetensors",
|
122 |
+
"model.layers.17.self_attn.k_proj.weight": "model-00002-of-00004.safetensors",
|
123 |
+
"model.layers.17.self_attn.o_proj.weight": "model-00002-of-00004.safetensors",
|
124 |
+
"model.layers.17.self_attn.q_proj.bias": "model-00002-of-00004.safetensors",
|
125 |
+
"model.layers.17.self_attn.q_proj.weight": "model-00002-of-00004.safetensors",
|
126 |
+
"model.layers.17.self_attn.v_proj.bias": "model-00002-of-00004.safetensors",
|
127 |
+
"model.layers.17.self_attn.v_proj.weight": "model-00002-of-00004.safetensors",
|
128 |
"model.layers.18.input_layernorm.weight": "model-00003-of-00004.safetensors",
|
129 |
"model.layers.18.mlp.down_proj.weight": "model-00003-of-00004.safetensors",
|
130 |
+
"model.layers.18.mlp.gate_proj.weight": "model-00002-of-00004.safetensors",
|
131 |
+
"model.layers.18.mlp.up_proj.weight": "model-00002-of-00004.safetensors",
|
132 |
"model.layers.18.post_attention_layernorm.weight": "model-00003-of-00004.safetensors",
|
133 |
+
"model.layers.18.self_attn.k_proj.bias": "model-00002-of-00004.safetensors",
|
134 |
+
"model.layers.18.self_attn.k_proj.weight": "model-00002-of-00004.safetensors",
|
135 |
+
"model.layers.18.self_attn.o_proj.weight": "model-00002-of-00004.safetensors",
|
136 |
+
"model.layers.18.self_attn.q_proj.bias": "model-00002-of-00004.safetensors",
|
137 |
+
"model.layers.18.self_attn.q_proj.weight": "model-00002-of-00004.safetensors",
|
138 |
+
"model.layers.18.self_attn.v_proj.bias": "model-00002-of-00004.safetensors",
|
139 |
+
"model.layers.18.self_attn.v_proj.weight": "model-00002-of-00004.safetensors",
|
140 |
"model.layers.19.input_layernorm.weight": "model-00003-of-00004.safetensors",
|
141 |
"model.layers.19.mlp.down_proj.weight": "model-00003-of-00004.safetensors",
|
142 |
"model.layers.19.mlp.gate_proj.weight": "model-00003-of-00004.safetensors",
|
|
|
185 |
"model.layers.21.self_attn.q_proj.weight": "model-00003-of-00004.safetensors",
|
186 |
"model.layers.21.self_attn.v_proj.bias": "model-00003-of-00004.safetensors",
|
187 |
"model.layers.21.self_attn.v_proj.weight": "model-00003-of-00004.safetensors",
|
188 |
+
"model.layers.22.input_layernorm.weight": "model-00003-of-00004.safetensors",
|
189 |
+
"model.layers.22.mlp.down_proj.weight": "model-00003-of-00004.safetensors",
|
190 |
"model.layers.22.mlp.gate_proj.weight": "model-00003-of-00004.safetensors",
|
191 |
"model.layers.22.mlp.up_proj.weight": "model-00003-of-00004.safetensors",
|
192 |
+
"model.layers.22.post_attention_layernorm.weight": "model-00003-of-00004.safetensors",
|
193 |
"model.layers.22.self_attn.k_proj.bias": "model-00003-of-00004.safetensors",
|
194 |
"model.layers.22.self_attn.k_proj.weight": "model-00003-of-00004.safetensors",
|
195 |
"model.layers.22.self_attn.o_proj.weight": "model-00003-of-00004.safetensors",
|
|
|
197 |
"model.layers.22.self_attn.q_proj.weight": "model-00003-of-00004.safetensors",
|
198 |
"model.layers.22.self_attn.v_proj.bias": "model-00003-of-00004.safetensors",
|
199 |
"model.layers.22.self_attn.v_proj.weight": "model-00003-of-00004.safetensors",
|
200 |
+
"model.layers.23.input_layernorm.weight": "model-00003-of-00004.safetensors",
|
201 |
+
"model.layers.23.mlp.down_proj.weight": "model-00003-of-00004.safetensors",
|
202 |
+
"model.layers.23.mlp.gate_proj.weight": "model-00003-of-00004.safetensors",
|
203 |
+
"model.layers.23.mlp.up_proj.weight": "model-00003-of-00004.safetensors",
|
204 |
+
"model.layers.23.post_attention_layernorm.weight": "model-00003-of-00004.safetensors",
|
205 |
+
"model.layers.23.self_attn.k_proj.bias": "model-00003-of-00004.safetensors",
|
206 |
+
"model.layers.23.self_attn.k_proj.weight": "model-00003-of-00004.safetensors",
|
207 |
+
"model.layers.23.self_attn.o_proj.weight": "model-00003-of-00004.safetensors",
|
208 |
+
"model.layers.23.self_attn.q_proj.bias": "model-00003-of-00004.safetensors",
|
209 |
+
"model.layers.23.self_attn.q_proj.weight": "model-00003-of-00004.safetensors",
|
210 |
+
"model.layers.23.self_attn.v_proj.bias": "model-00003-of-00004.safetensors",
|
211 |
+
"model.layers.23.self_attn.v_proj.weight": "model-00003-of-00004.safetensors",
|
212 |
+
"model.layers.24.input_layernorm.weight": "model-00003-of-00004.safetensors",
|
213 |
+
"model.layers.24.mlp.down_proj.weight": "model-00003-of-00004.safetensors",
|
214 |
+
"model.layers.24.mlp.gate_proj.weight": "model-00003-of-00004.safetensors",
|
215 |
+
"model.layers.24.mlp.up_proj.weight": "model-00003-of-00004.safetensors",
|
216 |
+
"model.layers.24.post_attention_layernorm.weight": "model-00003-of-00004.safetensors",
|
217 |
+
"model.layers.24.self_attn.k_proj.bias": "model-00003-of-00004.safetensors",
|
218 |
+
"model.layers.24.self_attn.k_proj.weight": "model-00003-of-00004.safetensors",
|
219 |
+
"model.layers.24.self_attn.o_proj.weight": "model-00003-of-00004.safetensors",
|
220 |
+
"model.layers.24.self_attn.q_proj.bias": "model-00003-of-00004.safetensors",
|
221 |
+
"model.layers.24.self_attn.q_proj.weight": "model-00003-of-00004.safetensors",
|
222 |
+
"model.layers.24.self_attn.v_proj.bias": "model-00003-of-00004.safetensors",
|
223 |
+
"model.layers.24.self_attn.v_proj.weight": "model-00003-of-00004.safetensors",
|
224 |
+
"model.layers.25.input_layernorm.weight": "model-00003-of-00004.safetensors",
|
225 |
+
"model.layers.25.mlp.down_proj.weight": "model-00003-of-00004.safetensors",
|
226 |
+
"model.layers.25.mlp.gate_proj.weight": "model-00003-of-00004.safetensors",
|
227 |
+
"model.layers.25.mlp.up_proj.weight": "model-00003-of-00004.safetensors",
|
228 |
+
"model.layers.25.post_attention_layernorm.weight": "model-00003-of-00004.safetensors",
|
229 |
+
"model.layers.25.self_attn.k_proj.bias": "model-00003-of-00004.safetensors",
|
230 |
+
"model.layers.25.self_attn.k_proj.weight": "model-00003-of-00004.safetensors",
|
231 |
+
"model.layers.25.self_attn.o_proj.weight": "model-00003-of-00004.safetensors",
|
232 |
+
"model.layers.25.self_attn.q_proj.bias": "model-00003-of-00004.safetensors",
|
233 |
+
"model.layers.25.self_attn.q_proj.weight": "model-00003-of-00004.safetensors",
|
234 |
+
"model.layers.25.self_attn.v_proj.bias": "model-00003-of-00004.safetensors",
|
235 |
+
"model.layers.25.self_attn.v_proj.weight": "model-00003-of-00004.safetensors",
|
236 |
+
"model.layers.26.input_layernorm.weight": "model-00003-of-00004.safetensors",
|
237 |
+
"model.layers.26.mlp.down_proj.weight": "model-00003-of-00004.safetensors",
|
238 |
+
"model.layers.26.mlp.gate_proj.weight": "model-00003-of-00004.safetensors",
|
239 |
+
"model.layers.26.mlp.up_proj.weight": "model-00003-of-00004.safetensors",
|
240 |
+
"model.layers.26.post_attention_layernorm.weight": "model-00003-of-00004.safetensors",
|
241 |
+
"model.layers.26.self_attn.k_proj.bias": "model-00003-of-00004.safetensors",
|
242 |
+
"model.layers.26.self_attn.k_proj.weight": "model-00003-of-00004.safetensors",
|
243 |
+
"model.layers.26.self_attn.o_proj.weight": "model-00003-of-00004.safetensors",
|
244 |
+
"model.layers.26.self_attn.q_proj.bias": "model-00003-of-00004.safetensors",
|
245 |
+
"model.layers.26.self_attn.q_proj.weight": "model-00003-of-00004.safetensors",
|
246 |
+
"model.layers.26.self_attn.v_proj.bias": "model-00003-of-00004.safetensors",
|
247 |
+
"model.layers.26.self_attn.v_proj.weight": "model-00003-of-00004.safetensors",
|
248 |
+
"model.layers.27.input_layernorm.weight": "model-00003-of-00004.safetensors",
|
249 |
+
"model.layers.27.mlp.down_proj.weight": "model-00003-of-00004.safetensors",
|
250 |
+
"model.layers.27.mlp.gate_proj.weight": "model-00003-of-00004.safetensors",
|
251 |
+
"model.layers.27.mlp.up_proj.weight": "model-00003-of-00004.safetensors",
|
252 |
+
"model.layers.27.post_attention_layernorm.weight": "model-00003-of-00004.safetensors",
|
253 |
+
"model.layers.27.self_attn.k_proj.bias": "model-00003-of-00004.safetensors",
|
254 |
+
"model.layers.27.self_attn.k_proj.weight": "model-00003-of-00004.safetensors",
|
255 |
+
"model.layers.27.self_attn.o_proj.weight": "model-00003-of-00004.safetensors",
|
256 |
+
"model.layers.27.self_attn.q_proj.bias": "model-00003-of-00004.safetensors",
|
257 |
+
"model.layers.27.self_attn.q_proj.weight": "model-00003-of-00004.safetensors",
|
258 |
+
"model.layers.27.self_attn.v_proj.bias": "model-00003-of-00004.safetensors",
|
259 |
+
"model.layers.27.self_attn.v_proj.weight": "model-00003-of-00004.safetensors",
|
260 |
"model.layers.3.input_layernorm.weight": "model-00001-of-00004.safetensors",
|
261 |
"model.layers.3.mlp.down_proj.weight": "model-00001-of-00004.safetensors",
|
262 |
"model.layers.3.mlp.gate_proj.weight": "model-00001-of-00004.safetensors",
|
|
|
293 |
"model.layers.5.self_attn.q_proj.weight": "model-00001-of-00004.safetensors",
|
294 |
"model.layers.5.self_attn.v_proj.bias": "model-00001-of-00004.safetensors",
|
295 |
"model.layers.5.self_attn.v_proj.weight": "model-00001-of-00004.safetensors",
|
296 |
+
"model.layers.6.input_layernorm.weight": "model-00001-of-00004.safetensors",
|
297 |
+
"model.layers.6.mlp.down_proj.weight": "model-00001-of-00004.safetensors",
|
298 |
+
"model.layers.6.mlp.gate_proj.weight": "model-00001-of-00004.safetensors",
|
299 |
+
"model.layers.6.mlp.up_proj.weight": "model-00001-of-00004.safetensors",
|
300 |
+
"model.layers.6.post_attention_layernorm.weight": "model-00001-of-00004.safetensors",
|
301 |
"model.layers.6.self_attn.k_proj.bias": "model-00001-of-00004.safetensors",
|
302 |
"model.layers.6.self_attn.k_proj.weight": "model-00001-of-00004.safetensors",
|
303 |
"model.layers.6.self_attn.o_proj.weight": "model-00001-of-00004.safetensors",
|
|
|
305 |
"model.layers.6.self_attn.q_proj.weight": "model-00001-of-00004.safetensors",
|
306 |
"model.layers.6.self_attn.v_proj.bias": "model-00001-of-00004.safetensors",
|
307 |
"model.layers.6.self_attn.v_proj.weight": "model-00001-of-00004.safetensors",
|
308 |
+
"model.layers.7.input_layernorm.weight": "model-00001-of-00004.safetensors",
|
309 |
+
"model.layers.7.mlp.down_proj.weight": "model-00001-of-00004.safetensors",
|
310 |
+
"model.layers.7.mlp.gate_proj.weight": "model-00001-of-00004.safetensors",
|
311 |
+
"model.layers.7.mlp.up_proj.weight": "model-00001-of-00004.safetensors",
|
312 |
+
"model.layers.7.post_attention_layernorm.weight": "model-00001-of-00004.safetensors",
|
313 |
+
"model.layers.7.self_attn.k_proj.bias": "model-00001-of-00004.safetensors",
|
314 |
+
"model.layers.7.self_attn.k_proj.weight": "model-00001-of-00004.safetensors",
|
315 |
+
"model.layers.7.self_attn.o_proj.weight": "model-00001-of-00004.safetensors",
|
316 |
+
"model.layers.7.self_attn.q_proj.bias": "model-00001-of-00004.safetensors",
|
317 |
+
"model.layers.7.self_attn.q_proj.weight": "model-00001-of-00004.safetensors",
|
318 |
+
"model.layers.7.self_attn.v_proj.bias": "model-00001-of-00004.safetensors",
|
319 |
+
"model.layers.7.self_attn.v_proj.weight": "model-00001-of-00004.safetensors",
|
320 |
"model.layers.8.input_layernorm.weight": "model-00002-of-00004.safetensors",
|
321 |
"model.layers.8.mlp.down_proj.weight": "model-00002-of-00004.safetensors",
|
322 |
"model.layers.8.mlp.gate_proj.weight": "model-00002-of-00004.safetensors",
|
323 |
"model.layers.8.mlp.up_proj.weight": "model-00002-of-00004.safetensors",
|
324 |
"model.layers.8.post_attention_layernorm.weight": "model-00002-of-00004.safetensors",
|
325 |
+
"model.layers.8.self_attn.k_proj.bias": "model-00001-of-00004.safetensors",
|
326 |
+
"model.layers.8.self_attn.k_proj.weight": "model-00001-of-00004.safetensors",
|
327 |
+
"model.layers.8.self_attn.o_proj.weight": "model-00001-of-00004.safetensors",
|
328 |
+
"model.layers.8.self_attn.q_proj.bias": "model-00001-of-00004.safetensors",
|
329 |
+
"model.layers.8.self_attn.q_proj.weight": "model-00001-of-00004.safetensors",
|
330 |
+
"model.layers.8.self_attn.v_proj.bias": "model-00001-of-00004.safetensors",
|
331 |
+
"model.layers.8.self_attn.v_proj.weight": "model-00001-of-00004.safetensors",
|
332 |
"model.layers.9.input_layernorm.weight": "model-00002-of-00004.safetensors",
|
333 |
"model.layers.9.mlp.down_proj.weight": "model-00002-of-00004.safetensors",
|
334 |
"model.layers.9.mlp.gate_proj.weight": "model-00002-of-00004.safetensors",
|
|
|
341 |
"model.layers.9.self_attn.q_proj.weight": "model-00002-of-00004.safetensors",
|
342 |
"model.layers.9.self_attn.v_proj.bias": "model-00002-of-00004.safetensors",
|
343 |
"model.layers.9.self_attn.v_proj.weight": "model-00002-of-00004.safetensors",
|
344 |
+
"model.norm.weight": "model-00003-of-00004.safetensors"
|
345 |
}
|
346 |
}
|
special_tokens_map.json
CHANGED
@@ -22,7 +22,7 @@
|
|
22 |
"single_word": false
|
23 |
},
|
24 |
"pad_token": {
|
25 |
-
"content": "<|
|
26 |
"lstrip": false,
|
27 |
"normalized": false,
|
28 |
"rstrip": false,
|
|
|
22 |
"single_word": false
|
23 |
},
|
24 |
"pad_token": {
|
25 |
+
"content": "<|vision_pad|>",
|
26 |
"lstrip": false,
|
27 |
"normalized": false,
|
28 |
"rstrip": false,
|
tokenizer.json
CHANGED
The diff for this file is too large to render.
See raw diff
|
|
tokenizer_config.json
CHANGED
@@ -177,14 +177,6 @@
|
|
177 |
"rstrip": false,
|
178 |
"single_word": false,
|
179 |
"special": false
|
180 |
-
},
|
181 |
-
"151665": {
|
182 |
-
"content": "<|PAD_TOKEN|>",
|
183 |
-
"lstrip": false,
|
184 |
-
"normalized": false,
|
185 |
-
"rstrip": false,
|
186 |
-
"single_word": false,
|
187 |
-
"special": true
|
188 |
}
|
189 |
},
|
190 |
"additional_special_tokens": [
|
@@ -207,8 +199,9 @@
|
|
207 |
"clean_up_tokenization_spaces": false,
|
208 |
"eos_token": "<|im_end|>",
|
209 |
"errors": "replace",
|
210 |
-
"
|
211 |
-
"
|
|
|
212 |
"padding_side": "left",
|
213 |
"split_special_tokens": false,
|
214 |
"tokenizer_class": "Qwen2Tokenizer",
|
|
|
177 |
"rstrip": false,
|
178 |
"single_word": false,
|
179 |
"special": false
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
180 |
}
|
181 |
},
|
182 |
"additional_special_tokens": [
|
|
|
199 |
"clean_up_tokenization_spaces": false,
|
200 |
"eos_token": "<|im_end|>",
|
201 |
"errors": "replace",
|
202 |
+
"extra_special_tokens": {},
|
203 |
+
"model_max_length": 32768,
|
204 |
+
"pad_token": "<|vision_pad|>",
|
205 |
"padding_side": "left",
|
206 |
"split_special_tokens": false,
|
207 |
"tokenizer_class": "Qwen2Tokenizer",
|