atsuki-yamaguchi commited on
Commit
6d814d0
1 Parent(s): fec4482

Upload folder using huggingface_hub

Browse files
README.md CHANGED
@@ -1,35 +1,21 @@
1
  ---
2
- license: mit
3
- language:
4
- - sw
5
  ---
6
- BLOOM-7B LAPT + Heuristics Swahili
7
- ===
8
 
9
- ## How to use
10
- ```python
11
- from peft import AutoPeftModelForCausalLM
12
- from transformers import AutoTokenizer
13
 
14
- model = AutoPeftModelForCausalLM.from_pretrained(
15
- "atsuki-yamaguchi/bloom-7b1-heuristics-sw"
16
- )
17
- tokenizer = AutoTokenizer.from_pretrained(
18
- "atsuki-yamaguchi/bloom-7b1-heuristics-sw"
19
- )
20
- ```
 
 
 
 
 
21
 
22
- ## Citation
23
- ```
24
- @article{yamaguchi2024empirical,
25
- title={An Empirical Study on Cross-lingual Vocabulary Adaptation for Efficient Generative {LLM} Inference},
26
- author={Atsuki Yamaguchi and Aline Villavicencio and Nikolaos Aletras},
27
- journal={ArXiv},
28
- year={2024},
29
- volume={abs/2402.10712},
30
- url={https://arxiv.org/abs/2402.10712}
31
- }
32
- ```
33
 
34
- ## Link
35
- For more details, please visit https://github.com/gucci-j/llm-cva
 
1
  ---
2
+ library_name: peft
 
 
3
  ---
4
+ ## Training procedure
 
5
 
 
 
 
 
6
 
7
+ The following `bitsandbytes` quantization config was used during training:
8
+ - quant_method: bitsandbytes
9
+ - load_in_8bit: True
10
+ - load_in_4bit: False
11
+ - llm_int8_threshold: 6.0
12
+ - llm_int8_skip_modules: None
13
+ - llm_int8_enable_fp32_cpu_offload: False
14
+ - llm_int8_has_fp16_weight: False
15
+ - bnb_4bit_quant_type: fp4
16
+ - bnb_4bit_use_double_quant: False
17
+ - bnb_4bit_compute_dtype: float32
18
+ ### Framework versions
19
 
 
 
 
 
 
 
 
 
 
 
 
20
 
21
+ - PEFT 0.5.0
 
adapter_config.json CHANGED
@@ -1,26 +1 @@
1
- {
2
- "auto_mapping": null,
3
- "base_model_name_or_path": "atsuki-yamaguchi/bloom-7b1-heuristics-sw",
4
- "bias": "none",
5
- "fan_in_fan_out": false,
6
- "inference_mode": true,
7
- "init_lora_weights": true,
8
- "layers_pattern": null,
9
- "layers_to_transform": null,
10
- "lora_alpha": 32,
11
- "lora_dropout": 0.05,
12
- "modules_to_save": [
13
- "lm_head",
14
- "word_embeddings"
15
- ],
16
- "peft_type": "LORA",
17
- "r": 8,
18
- "revision": null,
19
- "target_modules": [
20
- "query_key_value",
21
- "dense",
22
- "dense_h_to_4h",
23
- "dense_4h_to_h"
24
- ],
25
- "task_type": "CAUSAL_LM"
26
- }
 
1
+ {"auto_mapping": null, "base_model_name_or_path": "atsuki-yamaguchi/bloom-7b1-heuristics-sw", "bias": "none", "fan_in_fan_out": false, "inference_mode": true, "init_lora_weights": true, "layers_pattern": null, "layers_to_transform": null, "lora_alpha": 32, "lora_dropout": 0.05, "modules_to_save": ["lm_head", "word_embeddings"], "peft_type": "LORA", "r": 8, "revision": null, "target_modules": ["query_key_value", "dense", "dense_h_to_4h", "dense_4h_to_h"], "task_type": "CAUSAL_LM"}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
config.json CHANGED
@@ -1,5 +1,5 @@
1
  {
2
- "_name_or_path": "/mnt/parscratch/users/acp23ay/private/models/bloom-7b1-sw-heuristics",
3
  "apply_residual_connection_post_layernorm": false,
4
  "architectures": [
5
  "BloomForCausalLM"
@@ -24,9 +24,9 @@
24
  "skip_bias_add": true,
25
  "skip_bias_add_qkv": false,
26
  "slow_but_exact": false,
27
- "torch_dtype": "float32",
28
  "transformers_version": "4.35.0.dev0",
29
  "unk_token_id": 0,
30
  "use_cache": true,
31
- "vocab_size": 50257
32
  }
 
1
  {
2
+ "_name_or_path": "bigscience/bloom-7b1",
3
  "apply_residual_connection_post_layernorm": false,
4
  "architectures": [
5
  "BloomForCausalLM"
 
24
  "skip_bias_add": true,
25
  "skip_bias_add_qkv": false,
26
  "slow_but_exact": false,
27
+ "torch_dtype": "float64",
28
  "transformers_version": "4.35.0.dev0",
29
  "unk_token_id": 0,
30
  "use_cache": true,
31
+ "vocab_size": 50272
32
  }
model-00001-of-00006.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:3d424f582ffd6793354451f3f84f84bd770e095ad471c24ddf59a6d2a9ccad11
3
- size 4851080624
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:bbbdaf7cb3c5b5df1aef0162a6f4c95cd0d6fe93e9f9c9ccf0275e64647371a2
3
+ size 4868970592
model-00002-of-00006.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:546520806fb5c5389bae0242e872fb032d7547023eb1eb857ee4872449fd1ec9
3
- size 4833124624
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:f1d5a252d4336b06dedcb2d031bb21eedc918994e80819af7c5e73bc60446a27
3
+ size 4833124608
model-00003-of-00006.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:f70c4f6d76f1e48d3735097bccaf6f07757f28658fe79592f9237d873978d183
3
  size 4833124696
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:2e9d83dd1018b5a9ee4a1f3b10e8c3ea32faaa3f55bcdb3814adfdba784c94c9
3
  size 4833124696
model-00004-of-00006.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:2c7daedc0cb40d527e3f936031549e1cab23d44e64ec810fe3cbae43815b56f1
3
  size 4833124696
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:0fe1166e3469b78ab4b657c338e4f82f38d6aa47ccb7bde5b5a4ae9307f1a5b9
3
  size 4833124696
model-00005-of-00006.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:9fbf13230adb7dd5491565178fa219c4c9b894c5e82adaf54a38f579b6091fb3
3
  size 4833124696
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:32245f63fd874a4df104af2f04dd0348b0e08d978b4d74b185d4b25e11444059
3
  size 4833124696
model-00006-of-00006.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:1abde6fda778b8596cdfb64d6b7e3ea352af724de0ee5fc9e85e09884a1e4986
3
- size 805520752
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:bd4598f40e5289859ddf6987e63341b0466be2390844d049be7a3e8233af23e4
3
+ size 1611041536
model.safetensors.index.json CHANGED
@@ -1,6 +1,6 @@
1
  {
2
  "metadata": {
3
- "total_size": 24989057024
4
  },
5
  "weight_map": {
6
  "transformer.h.0.input_layernorm.bias": "model-00001-of-00006.safetensors",
@@ -29,18 +29,18 @@
29
  "transformer.h.1.self_attention.query_key_value.weight": "model-00001-of-00006.safetensors",
30
  "transformer.h.10.input_layernorm.bias": "model-00002-of-00006.safetensors",
31
  "transformer.h.10.input_layernorm.weight": "model-00002-of-00006.safetensors",
32
- "transformer.h.10.mlp.dense_4h_to_h.bias": "model-00002-of-00006.safetensors",
33
- "transformer.h.10.mlp.dense_4h_to_h.weight": "model-00002-of-00006.safetensors",
34
- "transformer.h.10.mlp.dense_h_to_4h.bias": "model-00002-of-00006.safetensors",
35
- "transformer.h.10.mlp.dense_h_to_4h.weight": "model-00002-of-00006.safetensors",
36
- "transformer.h.10.post_attention_layernorm.bias": "model-00002-of-00006.safetensors",
37
- "transformer.h.10.post_attention_layernorm.weight": "model-00002-of-00006.safetensors",
38
- "transformer.h.10.self_attention.dense.bias": "model-00002-of-00006.safetensors",
39
- "transformer.h.10.self_attention.dense.weight": "model-00002-of-00006.safetensors",
40
- "transformer.h.10.self_attention.query_key_value.bias": "model-00002-of-00006.safetensors",
41
- "transformer.h.10.self_attention.query_key_value.weight": "model-00002-of-00006.safetensors",
42
- "transformer.h.11.input_layernorm.bias": "model-00002-of-00006.safetensors",
43
- "transformer.h.11.input_layernorm.weight": "model-00002-of-00006.safetensors",
44
  "transformer.h.11.mlp.dense_4h_to_h.bias": "model-00003-of-00006.safetensors",
45
  "transformer.h.11.mlp.dense_4h_to_h.weight": "model-00003-of-00006.safetensors",
46
  "transformer.h.11.mlp.dense_h_to_4h.bias": "model-00003-of-00006.safetensors",
@@ -101,18 +101,18 @@
101
  "transformer.h.15.self_attention.query_key_value.weight": "model-00003-of-00006.safetensors",
102
  "transformer.h.16.input_layernorm.bias": "model-00003-of-00006.safetensors",
103
  "transformer.h.16.input_layernorm.weight": "model-00003-of-00006.safetensors",
104
- "transformer.h.16.mlp.dense_4h_to_h.bias": "model-00003-of-00006.safetensors",
105
- "transformer.h.16.mlp.dense_4h_to_h.weight": "model-00003-of-00006.safetensors",
106
- "transformer.h.16.mlp.dense_h_to_4h.bias": "model-00003-of-00006.safetensors",
107
- "transformer.h.16.mlp.dense_h_to_4h.weight": "model-00003-of-00006.safetensors",
108
- "transformer.h.16.post_attention_layernorm.bias": "model-00003-of-00006.safetensors",
109
- "transformer.h.16.post_attention_layernorm.weight": "model-00003-of-00006.safetensors",
110
- "transformer.h.16.self_attention.dense.bias": "model-00003-of-00006.safetensors",
111
- "transformer.h.16.self_attention.dense.weight": "model-00003-of-00006.safetensors",
112
- "transformer.h.16.self_attention.query_key_value.bias": "model-00003-of-00006.safetensors",
113
- "transformer.h.16.self_attention.query_key_value.weight": "model-00003-of-00006.safetensors",
114
- "transformer.h.17.input_layernorm.bias": "model-00003-of-00006.safetensors",
115
- "transformer.h.17.input_layernorm.weight": "model-00003-of-00006.safetensors",
116
  "transformer.h.17.mlp.dense_4h_to_h.bias": "model-00004-of-00006.safetensors",
117
  "transformer.h.17.mlp.dense_4h_to_h.weight": "model-00004-of-00006.safetensors",
118
  "transformer.h.17.mlp.dense_h_to_4h.bias": "model-00004-of-00006.safetensors",
@@ -185,18 +185,18 @@
185
  "transformer.h.21.self_attention.query_key_value.weight": "model-00004-of-00006.safetensors",
186
  "transformer.h.22.input_layernorm.bias": "model-00004-of-00006.safetensors",
187
  "transformer.h.22.input_layernorm.weight": "model-00004-of-00006.safetensors",
188
- "transformer.h.22.mlp.dense_4h_to_h.bias": "model-00004-of-00006.safetensors",
189
- "transformer.h.22.mlp.dense_4h_to_h.weight": "model-00004-of-00006.safetensors",
190
- "transformer.h.22.mlp.dense_h_to_4h.bias": "model-00004-of-00006.safetensors",
191
- "transformer.h.22.mlp.dense_h_to_4h.weight": "model-00004-of-00006.safetensors",
192
- "transformer.h.22.post_attention_layernorm.bias": "model-00004-of-00006.safetensors",
193
- "transformer.h.22.post_attention_layernorm.weight": "model-00004-of-00006.safetensors",
194
- "transformer.h.22.self_attention.dense.bias": "model-00004-of-00006.safetensors",
195
- "transformer.h.22.self_attention.dense.weight": "model-00004-of-00006.safetensors",
196
- "transformer.h.22.self_attention.query_key_value.bias": "model-00004-of-00006.safetensors",
197
- "transformer.h.22.self_attention.query_key_value.weight": "model-00004-of-00006.safetensors",
198
- "transformer.h.23.input_layernorm.bias": "model-00004-of-00006.safetensors",
199
- "transformer.h.23.input_layernorm.weight": "model-00004-of-00006.safetensors",
200
  "transformer.h.23.mlp.dense_4h_to_h.bias": "model-00005-of-00006.safetensors",
201
  "transformer.h.23.mlp.dense_4h_to_h.weight": "model-00005-of-00006.safetensors",
202
  "transformer.h.23.mlp.dense_h_to_4h.bias": "model-00005-of-00006.safetensors",
@@ -257,18 +257,18 @@
257
  "transformer.h.27.self_attention.query_key_value.weight": "model-00005-of-00006.safetensors",
258
  "transformer.h.28.input_layernorm.bias": "model-00005-of-00006.safetensors",
259
  "transformer.h.28.input_layernorm.weight": "model-00005-of-00006.safetensors",
260
- "transformer.h.28.mlp.dense_4h_to_h.bias": "model-00005-of-00006.safetensors",
261
- "transformer.h.28.mlp.dense_4h_to_h.weight": "model-00005-of-00006.safetensors",
262
- "transformer.h.28.mlp.dense_h_to_4h.bias": "model-00005-of-00006.safetensors",
263
- "transformer.h.28.mlp.dense_h_to_4h.weight": "model-00005-of-00006.safetensors",
264
- "transformer.h.28.post_attention_layernorm.bias": "model-00005-of-00006.safetensors",
265
- "transformer.h.28.post_attention_layernorm.weight": "model-00005-of-00006.safetensors",
266
- "transformer.h.28.self_attention.dense.bias": "model-00005-of-00006.safetensors",
267
- "transformer.h.28.self_attention.dense.weight": "model-00005-of-00006.safetensors",
268
- "transformer.h.28.self_attention.query_key_value.bias": "model-00005-of-00006.safetensors",
269
- "transformer.h.28.self_attention.query_key_value.weight": "model-00005-of-00006.safetensors",
270
- "transformer.h.29.input_layernorm.bias": "model-00005-of-00006.safetensors",
271
- "transformer.h.29.input_layernorm.weight": "model-00005-of-00006.safetensors",
272
  "transformer.h.29.mlp.dense_4h_to_h.bias": "model-00006-of-00006.safetensors",
273
  "transformer.h.29.mlp.dense_4h_to_h.weight": "model-00006-of-00006.safetensors",
274
  "transformer.h.29.mlp.dense_h_to_4h.bias": "model-00006-of-00006.safetensors",
@@ -293,18 +293,18 @@
293
  "transformer.h.3.self_attention.query_key_value.weight": "model-00001-of-00006.safetensors",
294
  "transformer.h.4.input_layernorm.bias": "model-00001-of-00006.safetensors",
295
  "transformer.h.4.input_layernorm.weight": "model-00001-of-00006.safetensors",
296
- "transformer.h.4.mlp.dense_4h_to_h.bias": "model-00001-of-00006.safetensors",
297
- "transformer.h.4.mlp.dense_4h_to_h.weight": "model-00001-of-00006.safetensors",
298
- "transformer.h.4.mlp.dense_h_to_4h.bias": "model-00001-of-00006.safetensors",
299
- "transformer.h.4.mlp.dense_h_to_4h.weight": "model-00001-of-00006.safetensors",
300
- "transformer.h.4.post_attention_layernorm.bias": "model-00001-of-00006.safetensors",
301
- "transformer.h.4.post_attention_layernorm.weight": "model-00001-of-00006.safetensors",
302
- "transformer.h.4.self_attention.dense.bias": "model-00001-of-00006.safetensors",
303
- "transformer.h.4.self_attention.dense.weight": "model-00001-of-00006.safetensors",
304
- "transformer.h.4.self_attention.query_key_value.bias": "model-00001-of-00006.safetensors",
305
- "transformer.h.4.self_attention.query_key_value.weight": "model-00001-of-00006.safetensors",
306
- "transformer.h.5.input_layernorm.bias": "model-00001-of-00006.safetensors",
307
- "transformer.h.5.input_layernorm.weight": "model-00001-of-00006.safetensors",
308
  "transformer.h.5.mlp.dense_4h_to_h.bias": "model-00002-of-00006.safetensors",
309
  "transformer.h.5.mlp.dense_4h_to_h.weight": "model-00002-of-00006.safetensors",
310
  "transformer.h.5.mlp.dense_h_to_4h.bias": "model-00002-of-00006.safetensors",
 
1
  {
2
  "metadata": {
3
+ "total_size": 25812467712
4
  },
5
  "weight_map": {
6
  "transformer.h.0.input_layernorm.bias": "model-00001-of-00006.safetensors",
 
29
  "transformer.h.1.self_attention.query_key_value.weight": "model-00001-of-00006.safetensors",
30
  "transformer.h.10.input_layernorm.bias": "model-00002-of-00006.safetensors",
31
  "transformer.h.10.input_layernorm.weight": "model-00002-of-00006.safetensors",
32
+ "transformer.h.10.mlp.dense_4h_to_h.bias": "model-00003-of-00006.safetensors",
33
+ "transformer.h.10.mlp.dense_4h_to_h.weight": "model-00003-of-00006.safetensors",
34
+ "transformer.h.10.mlp.dense_h_to_4h.bias": "model-00003-of-00006.safetensors",
35
+ "transformer.h.10.mlp.dense_h_to_4h.weight": "model-00003-of-00006.safetensors",
36
+ "transformer.h.10.post_attention_layernorm.bias": "model-00003-of-00006.safetensors",
37
+ "transformer.h.10.post_attention_layernorm.weight": "model-00003-of-00006.safetensors",
38
+ "transformer.h.10.self_attention.dense.bias": "model-00003-of-00006.safetensors",
39
+ "transformer.h.10.self_attention.dense.weight": "model-00003-of-00006.safetensors",
40
+ "transformer.h.10.self_attention.query_key_value.bias": "model-00003-of-00006.safetensors",
41
+ "transformer.h.10.self_attention.query_key_value.weight": "model-00003-of-00006.safetensors",
42
+ "transformer.h.11.input_layernorm.bias": "model-00003-of-00006.safetensors",
43
+ "transformer.h.11.input_layernorm.weight": "model-00003-of-00006.safetensors",
44
  "transformer.h.11.mlp.dense_4h_to_h.bias": "model-00003-of-00006.safetensors",
45
  "transformer.h.11.mlp.dense_4h_to_h.weight": "model-00003-of-00006.safetensors",
46
  "transformer.h.11.mlp.dense_h_to_4h.bias": "model-00003-of-00006.safetensors",
 
101
  "transformer.h.15.self_attention.query_key_value.weight": "model-00003-of-00006.safetensors",
102
  "transformer.h.16.input_layernorm.bias": "model-00003-of-00006.safetensors",
103
  "transformer.h.16.input_layernorm.weight": "model-00003-of-00006.safetensors",
104
+ "transformer.h.16.mlp.dense_4h_to_h.bias": "model-00004-of-00006.safetensors",
105
+ "transformer.h.16.mlp.dense_4h_to_h.weight": "model-00004-of-00006.safetensors",
106
+ "transformer.h.16.mlp.dense_h_to_4h.bias": "model-00004-of-00006.safetensors",
107
+ "transformer.h.16.mlp.dense_h_to_4h.weight": "model-00004-of-00006.safetensors",
108
+ "transformer.h.16.post_attention_layernorm.bias": "model-00004-of-00006.safetensors",
109
+ "transformer.h.16.post_attention_layernorm.weight": "model-00004-of-00006.safetensors",
110
+ "transformer.h.16.self_attention.dense.bias": "model-00004-of-00006.safetensors",
111
+ "transformer.h.16.self_attention.dense.weight": "model-00004-of-00006.safetensors",
112
+ "transformer.h.16.self_attention.query_key_value.bias": "model-00004-of-00006.safetensors",
113
+ "transformer.h.16.self_attention.query_key_value.weight": "model-00004-of-00006.safetensors",
114
+ "transformer.h.17.input_layernorm.bias": "model-00004-of-00006.safetensors",
115
+ "transformer.h.17.input_layernorm.weight": "model-00004-of-00006.safetensors",
116
  "transformer.h.17.mlp.dense_4h_to_h.bias": "model-00004-of-00006.safetensors",
117
  "transformer.h.17.mlp.dense_4h_to_h.weight": "model-00004-of-00006.safetensors",
118
  "transformer.h.17.mlp.dense_h_to_4h.bias": "model-00004-of-00006.safetensors",
 
185
  "transformer.h.21.self_attention.query_key_value.weight": "model-00004-of-00006.safetensors",
186
  "transformer.h.22.input_layernorm.bias": "model-00004-of-00006.safetensors",
187
  "transformer.h.22.input_layernorm.weight": "model-00004-of-00006.safetensors",
188
+ "transformer.h.22.mlp.dense_4h_to_h.bias": "model-00005-of-00006.safetensors",
189
+ "transformer.h.22.mlp.dense_4h_to_h.weight": "model-00005-of-00006.safetensors",
190
+ "transformer.h.22.mlp.dense_h_to_4h.bias": "model-00005-of-00006.safetensors",
191
+ "transformer.h.22.mlp.dense_h_to_4h.weight": "model-00005-of-00006.safetensors",
192
+ "transformer.h.22.post_attention_layernorm.bias": "model-00005-of-00006.safetensors",
193
+ "transformer.h.22.post_attention_layernorm.weight": "model-00005-of-00006.safetensors",
194
+ "transformer.h.22.self_attention.dense.bias": "model-00005-of-00006.safetensors",
195
+ "transformer.h.22.self_attention.dense.weight": "model-00005-of-00006.safetensors",
196
+ "transformer.h.22.self_attention.query_key_value.bias": "model-00005-of-00006.safetensors",
197
+ "transformer.h.22.self_attention.query_key_value.weight": "model-00005-of-00006.safetensors",
198
+ "transformer.h.23.input_layernorm.bias": "model-00005-of-00006.safetensors",
199
+ "transformer.h.23.input_layernorm.weight": "model-00005-of-00006.safetensors",
200
  "transformer.h.23.mlp.dense_4h_to_h.bias": "model-00005-of-00006.safetensors",
201
  "transformer.h.23.mlp.dense_4h_to_h.weight": "model-00005-of-00006.safetensors",
202
  "transformer.h.23.mlp.dense_h_to_4h.bias": "model-00005-of-00006.safetensors",
 
257
  "transformer.h.27.self_attention.query_key_value.weight": "model-00005-of-00006.safetensors",
258
  "transformer.h.28.input_layernorm.bias": "model-00005-of-00006.safetensors",
259
  "transformer.h.28.input_layernorm.weight": "model-00005-of-00006.safetensors",
260
+ "transformer.h.28.mlp.dense_4h_to_h.bias": "model-00006-of-00006.safetensors",
261
+ "transformer.h.28.mlp.dense_4h_to_h.weight": "model-00006-of-00006.safetensors",
262
+ "transformer.h.28.mlp.dense_h_to_4h.bias": "model-00006-of-00006.safetensors",
263
+ "transformer.h.28.mlp.dense_h_to_4h.weight": "model-00006-of-00006.safetensors",
264
+ "transformer.h.28.post_attention_layernorm.bias": "model-00006-of-00006.safetensors",
265
+ "transformer.h.28.post_attention_layernorm.weight": "model-00006-of-00006.safetensors",
266
+ "transformer.h.28.self_attention.dense.bias": "model-00006-of-00006.safetensors",
267
+ "transformer.h.28.self_attention.dense.weight": "model-00006-of-00006.safetensors",
268
+ "transformer.h.28.self_attention.query_key_value.bias": "model-00006-of-00006.safetensors",
269
+ "transformer.h.28.self_attention.query_key_value.weight": "model-00006-of-00006.safetensors",
270
+ "transformer.h.29.input_layernorm.bias": "model-00006-of-00006.safetensors",
271
+ "transformer.h.29.input_layernorm.weight": "model-00006-of-00006.safetensors",
272
  "transformer.h.29.mlp.dense_4h_to_h.bias": "model-00006-of-00006.safetensors",
273
  "transformer.h.29.mlp.dense_4h_to_h.weight": "model-00006-of-00006.safetensors",
274
  "transformer.h.29.mlp.dense_h_to_4h.bias": "model-00006-of-00006.safetensors",
 
293
  "transformer.h.3.self_attention.query_key_value.weight": "model-00001-of-00006.safetensors",
294
  "transformer.h.4.input_layernorm.bias": "model-00001-of-00006.safetensors",
295
  "transformer.h.4.input_layernorm.weight": "model-00001-of-00006.safetensors",
296
+ "transformer.h.4.mlp.dense_4h_to_h.bias": "model-00002-of-00006.safetensors",
297
+ "transformer.h.4.mlp.dense_4h_to_h.weight": "model-00002-of-00006.safetensors",
298
+ "transformer.h.4.mlp.dense_h_to_4h.bias": "model-00002-of-00006.safetensors",
299
+ "transformer.h.4.mlp.dense_h_to_4h.weight": "model-00002-of-00006.safetensors",
300
+ "transformer.h.4.post_attention_layernorm.bias": "model-00002-of-00006.safetensors",
301
+ "transformer.h.4.post_attention_layernorm.weight": "model-00002-of-00006.safetensors",
302
+ "transformer.h.4.self_attention.dense.bias": "model-00002-of-00006.safetensors",
303
+ "transformer.h.4.self_attention.dense.weight": "model-00002-of-00006.safetensors",
304
+ "transformer.h.4.self_attention.query_key_value.bias": "model-00002-of-00006.safetensors",
305
+ "transformer.h.4.self_attention.query_key_value.weight": "model-00002-of-00006.safetensors",
306
+ "transformer.h.5.input_layernorm.bias": "model-00002-of-00006.safetensors",
307
+ "transformer.h.5.input_layernorm.weight": "model-00002-of-00006.safetensors",
308
  "transformer.h.5.mlp.dense_4h_to_h.bias": "model-00002-of-00006.safetensors",
309
  "transformer.h.5.mlp.dense_4h_to_h.weight": "model-00002-of-00006.safetensors",
310
  "transformer.h.5.mlp.dense_h_to_4h.bias": "model-00002-of-00006.safetensors",
optimizer.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:312c6b6fa9ed61d6c725d48f6dbe5063bf167f4dc44061bea0ba657785df0cdd
3
+ size 856821242
rng_state.pth ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:fa466d51c4a031fb7e93c5159f17265ab7f9924c4f28f9a1b9b9db9f15a2be1f
3
+ size 14244
scheduler.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:0d403cd2fa0f47b387bc486ed7a0a0523b4d4d12118efa47d66edfd3fa514ef0
3
+ size 1064
special_tokens_map.json CHANGED
@@ -1,23 +1,5 @@
1
  {
2
- "bos_token": {
3
- "content": "<|endoftext|>",
4
- "lstrip": false,
5
- "normalized": false,
6
- "rstrip": false,
7
- "single_word": false
8
- },
9
- "eos_token": {
10
- "content": "<|endoftext|>",
11
- "lstrip": false,
12
- "normalized": false,
13
- "rstrip": false,
14
- "single_word": false
15
- },
16
- "unk_token": {
17
- "content": "<|endoftext|>",
18
- "lstrip": false,
19
- "normalized": false,
20
- "rstrip": false,
21
- "single_word": false
22
- }
23
  }
 
1
  {
2
+ "bos_token": "<|endoftext|>",
3
+ "eos_token": "<|endoftext|>",
4
+ "unk_token": "<|endoftext|>"
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
5
  }
trainer_state.json ADDED
The diff for this file is too large to render. See raw diff
 
training_args.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:1b09d1e7bdf41765b9d642ee35ad1141b8783fd94d5cbbf3601de6efa4d68ff3
3
+ size 4664