lewtun HF staff commited on
Commit
4d8424c
1 Parent(s): 17e11f7

Add HuggingFaceH4/starcoderplus-ift-v4.1 checkpoint

Browse files
README.md ADDED
@@ -0,0 +1,66 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ tags:
3
+ - generated_from_trainer
4
+ model-index:
5
+ - name: starcoderplus-ift-v41
6
+ results: []
7
+ ---
8
+
9
+ <!-- This model card has been generated automatically according to the information the Trainer had access to. You
10
+ should probably proofread and complete it, then remove this comment. -->
11
+
12
+ # starcoderplus-ift-v41
13
+
14
+ This model is a fine-tuned version of [bigcode-data/starcoderplus](https://huggingface.co/bigcode-data/starcoderplus) on the None dataset.
15
+ It achieves the following results on the evaluation set:
16
+ - Loss: 1.4720
17
+
18
+ ## Model description
19
+
20
+ More information needed
21
+
22
+ ## Intended uses & limitations
23
+
24
+ More information needed
25
+
26
+ ## Training and evaluation data
27
+
28
+ More information needed
29
+
30
+ ## Training procedure
31
+
32
+ ### Training hyperparameters
33
+
34
+ The following hyperparameters were used during training:
35
+ - learning_rate: 2e-05
36
+ - train_batch_size: 4
37
+ - eval_batch_size: 4
38
+ - seed: 42
39
+ - distributed_type: multi-GPU
40
+ - num_devices: 8
41
+ - gradient_accumulation_steps: 8
42
+ - total_train_batch_size: 256
43
+ - total_eval_batch_size: 32
44
+ - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08
45
+ - lr_scheduler_type: cosine
46
+ - lr_scheduler_warmup_ratio: 0.03
47
+ - num_epochs: 6
48
+
49
+ ### Training results
50
+
51
+ | Training Loss | Epoch | Step | Validation Loss |
52
+ |:-------------:|:-----:|:----:|:---------------:|
53
+ | 1.5321 | 0.98 | 15 | 1.2856 |
54
+ | 1.2071 | 1.97 | 30 | 1.2620 |
55
+ | 1.0162 | 2.95 | 45 | 1.2853 |
56
+ | 0.8484 | 4.0 | 61 | 1.3274 |
57
+ | 0.6981 | 4.98 | 76 | 1.3994 |
58
+ | 0.5668 | 5.9 | 90 | 1.4720 |
59
+
60
+
61
+ ### Framework versions
62
+
63
+ - Transformers 4.28.1
64
+ - Pytorch 2.0.1+cu118
65
+ - Datasets 2.12.0
66
+ - Tokenizers 0.13.3
added_tokens.json ADDED
@@ -0,0 +1,6 @@
 
 
 
 
 
 
 
1
+ {
2
+ "<|assistant|>": 49154,
3
+ "<|end|>": 49155,
4
+ "<|system|>": 49152,
5
+ "<|user|>": 49153
6
+ }
all_results.json ADDED
@@ -0,0 +1,14 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "epoch": 5.9,
3
+ "eval_loss": 1.4719988107681274,
4
+ "eval_runtime": 5.2039,
5
+ "eval_samples": 202,
6
+ "eval_samples_per_second": 38.817,
7
+ "eval_steps_per_second": 1.345,
8
+ "perplexity": 4.35793713301621,
9
+ "train_loss": 0.9728257921006944,
10
+ "train_runtime": 2307.7787,
11
+ "train_samples": 3888,
12
+ "train_samples_per_second": 10.108,
13
+ "train_steps_per_second": 0.039
14
+ }
config.json ADDED
@@ -0,0 +1,39 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_name_or_path": "/fsx/h4/checkpoints/starcoderplus-ift",
3
+ "activation_function": "gelu",
4
+ "architectures": [
5
+ "GPTBigCodeForCausalLM"
6
+ ],
7
+ "attention_softmax_in_fp32": true,
8
+ "attn_pdrop": 0.1,
9
+ "bos_token_id": 0,
10
+ "embd_pdrop": 0.1,
11
+ "eos_token_id": 0,
12
+ "inference_runner": 0,
13
+ "initializer_range": 0.02,
14
+ "layer_norm_epsilon": 1e-05,
15
+ "max_batch_size": null,
16
+ "max_sequence_length": null,
17
+ "model_type": "gpt_bigcode",
18
+ "multi_query": true,
19
+ "n_embd": 6144,
20
+ "n_head": 48,
21
+ "n_inner": 24576,
22
+ "n_layer": 40,
23
+ "n_positions": 8192,
24
+ "pad_key_length": true,
25
+ "pre_allocate_kv_cache": false,
26
+ "resid_pdrop": 0.1,
27
+ "scale_attention_softmax_in_fp32": true,
28
+ "scale_attn_weights": true,
29
+ "summary_activation": null,
30
+ "summary_first_dropout": 0.1,
31
+ "summary_proj_to_labels": true,
32
+ "summary_type": "cls_index",
33
+ "summary_use_proj": true,
34
+ "torch_dtype": "float16",
35
+ "transformers_version": "4.28.1",
36
+ "use_cache": true,
37
+ "validate_runner_input": true,
38
+ "vocab_size": 49156
39
+ }
dialogue_template.json ADDED
@@ -0,0 +1,12 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "system": "",
3
+ "messages": null,
4
+ "system_token": "<|system|>",
5
+ "system_format": "standard",
6
+ "user_token": "<|user|>",
7
+ "assistant_token": "<|assistant|>",
8
+ "end_token": "<|end|>",
9
+ "mid_str": "\n",
10
+ "end_str": "\n",
11
+ "extra_end_text": ""
12
+ }
eval_results.json ADDED
@@ -0,0 +1,9 @@
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "epoch": 5.9,
3
+ "eval_loss": 1.4719988107681274,
4
+ "eval_runtime": 5.2039,
5
+ "eval_samples": 202,
6
+ "eval_samples_per_second": 38.817,
7
+ "eval_steps_per_second": 1.345,
8
+ "perplexity": 4.35793713301621
9
+ }
generation_config.json ADDED
@@ -0,0 +1,6 @@
 
 
 
 
 
 
 
1
+ {
2
+ "_from_model_config": true,
3
+ "bos_token_id": 0,
4
+ "eos_token_id": 0,
5
+ "transformers_version": "4.28.1"
6
+ }
handler.py ADDED
@@ -0,0 +1,49 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from typing import Any, Dict
2
+
3
+ import torch
4
+ from transformers import AutoModelForCausalLM, AutoTokenizer
5
+
6
+ from peft import PeftConfig, PeftModel
7
+
8
+
9
+ class EndpointHandler:
10
+ def __init__(self, path=""):
11
+ # load model and processor from path
12
+ self.tokenizer = AutoTokenizer.from_pretrained(path)
13
+ try:
14
+ config = PeftConfig.from_pretrained(path)
15
+ model = AutoModelForCausalLM.from_pretrained(
16
+ config.base_model_name_or_path,
17
+ return_dict=True,
18
+ load_in_8bit=True,
19
+ device_map="auto",
20
+ torch_dtype=torch.float16,
21
+ trust_remote_code=True,
22
+ )
23
+ model.resize_token_embeddings(len(self.tokenizer))
24
+ model = PeftModel.from_pretrained(model, path)
25
+ except Exception:
26
+ model = AutoModelForCausalLM.from_pretrained(
27
+ path, device_map="auto", load_in_8bit=True, torch_dtype=torch.float16, trust_remote_code=True
28
+ )
29
+ self.model = model
30
+ self.device = "cuda" if torch.cuda.is_available() else "cpu"
31
+
32
+ def __call__(self, data: Dict[str, Any]) -> Dict[str, str]:
33
+ # process input
34
+ inputs = data.pop("inputs", data)
35
+ parameters = data.pop("parameters", None)
36
+
37
+ # preprocess
38
+ inputs = self.tokenizer(inputs, return_tensors="pt").to(self.device)
39
+
40
+ # pass inputs with all kwargs in data
41
+ if parameters is not None:
42
+ outputs = self.model.generate(**inputs, **parameters)
43
+ else:
44
+ outputs = self.model.generate(**inputs)
45
+
46
+ # postprocess the prediction
47
+ prediction = self.tokenizer.decode(outputs[0], skip_special_tokens=True)
48
+
49
+ return [{"generated_text": prediction}]
merges.txt ADDED
The diff for this file is too large to render. See raw diff
 
pytorch_model-00001-of-00004.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:a89629dc523d03530fded1baeb98a27df40876a39a432154324a1b8547e7e9f1
3
+ size 9958034229
pytorch_model-00002-of-00004.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:a74b574439dcd841d6434196cbf227d34a580a6d190cfc8dc20f6b4c9e2d653c
3
+ size 9857383847
pytorch_model-00003-of-00004.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:79d2764fedd8a64518187295c810eea9743ff7b70aaab915fefaa576c12aeaa6
3
+ size 9857383847
pytorch_model-00004-of-00004.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:322b505787c3d18c5ac9b9fca91f913dcf6e5b5f4c449f1115cc378bfc02aef9
3
+ size 1966357413
pytorch_model.bin.index.json ADDED
@@ -0,0 +1,492 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "metadata": {
3
+ "total_size": 31638990848
4
+ },
5
+ "weight_map": {
6
+ "lm_head.weight": "pytorch_model-00004-of-00004.bin",
7
+ "transformer.h.0.attn.c_attn.bias": "pytorch_model-00001-of-00004.bin",
8
+ "transformer.h.0.attn.c_attn.weight": "pytorch_model-00001-of-00004.bin",
9
+ "transformer.h.0.attn.c_proj.bias": "pytorch_model-00001-of-00004.bin",
10
+ "transformer.h.0.attn.c_proj.weight": "pytorch_model-00001-of-00004.bin",
11
+ "transformer.h.0.ln_1.bias": "pytorch_model-00001-of-00004.bin",
12
+ "transformer.h.0.ln_1.weight": "pytorch_model-00001-of-00004.bin",
13
+ "transformer.h.0.ln_2.bias": "pytorch_model-00001-of-00004.bin",
14
+ "transformer.h.0.ln_2.weight": "pytorch_model-00001-of-00004.bin",
15
+ "transformer.h.0.mlp.c_fc.bias": "pytorch_model-00001-of-00004.bin",
16
+ "transformer.h.0.mlp.c_fc.weight": "pytorch_model-00001-of-00004.bin",
17
+ "transformer.h.0.mlp.c_proj.bias": "pytorch_model-00001-of-00004.bin",
18
+ "transformer.h.0.mlp.c_proj.weight": "pytorch_model-00001-of-00004.bin",
19
+ "transformer.h.1.attn.c_attn.bias": "pytorch_model-00001-of-00004.bin",
20
+ "transformer.h.1.attn.c_attn.weight": "pytorch_model-00001-of-00004.bin",
21
+ "transformer.h.1.attn.c_proj.bias": "pytorch_model-00001-of-00004.bin",
22
+ "transformer.h.1.attn.c_proj.weight": "pytorch_model-00001-of-00004.bin",
23
+ "transformer.h.1.ln_1.bias": "pytorch_model-00001-of-00004.bin",
24
+ "transformer.h.1.ln_1.weight": "pytorch_model-00001-of-00004.bin",
25
+ "transformer.h.1.ln_2.bias": "pytorch_model-00001-of-00004.bin",
26
+ "transformer.h.1.ln_2.weight": "pytorch_model-00001-of-00004.bin",
27
+ "transformer.h.1.mlp.c_fc.bias": "pytorch_model-00001-of-00004.bin",
28
+ "transformer.h.1.mlp.c_fc.weight": "pytorch_model-00001-of-00004.bin",
29
+ "transformer.h.1.mlp.c_proj.bias": "pytorch_model-00001-of-00004.bin",
30
+ "transformer.h.1.mlp.c_proj.weight": "pytorch_model-00001-of-00004.bin",
31
+ "transformer.h.10.attn.c_attn.bias": "pytorch_model-00001-of-00004.bin",
32
+ "transformer.h.10.attn.c_attn.weight": "pytorch_model-00001-of-00004.bin",
33
+ "transformer.h.10.attn.c_proj.bias": "pytorch_model-00001-of-00004.bin",
34
+ "transformer.h.10.attn.c_proj.weight": "pytorch_model-00001-of-00004.bin",
35
+ "transformer.h.10.ln_1.bias": "pytorch_model-00001-of-00004.bin",
36
+ "transformer.h.10.ln_1.weight": "pytorch_model-00001-of-00004.bin",
37
+ "transformer.h.10.ln_2.bias": "pytorch_model-00001-of-00004.bin",
38
+ "transformer.h.10.ln_2.weight": "pytorch_model-00001-of-00004.bin",
39
+ "transformer.h.10.mlp.c_fc.bias": "pytorch_model-00001-of-00004.bin",
40
+ "transformer.h.10.mlp.c_fc.weight": "pytorch_model-00001-of-00004.bin",
41
+ "transformer.h.10.mlp.c_proj.bias": "pytorch_model-00001-of-00004.bin",
42
+ "transformer.h.10.mlp.c_proj.weight": "pytorch_model-00001-of-00004.bin",
43
+ "transformer.h.11.attn.c_attn.bias": "pytorch_model-00001-of-00004.bin",
44
+ "transformer.h.11.attn.c_attn.weight": "pytorch_model-00001-of-00004.bin",
45
+ "transformer.h.11.attn.c_proj.bias": "pytorch_model-00001-of-00004.bin",
46
+ "transformer.h.11.attn.c_proj.weight": "pytorch_model-00001-of-00004.bin",
47
+ "transformer.h.11.ln_1.bias": "pytorch_model-00001-of-00004.bin",
48
+ "transformer.h.11.ln_1.weight": "pytorch_model-00001-of-00004.bin",
49
+ "transformer.h.11.ln_2.bias": "pytorch_model-00001-of-00004.bin",
50
+ "transformer.h.11.ln_2.weight": "pytorch_model-00001-of-00004.bin",
51
+ "transformer.h.11.mlp.c_fc.bias": "pytorch_model-00001-of-00004.bin",
52
+ "transformer.h.11.mlp.c_fc.weight": "pytorch_model-00001-of-00004.bin",
53
+ "transformer.h.11.mlp.c_proj.bias": "pytorch_model-00001-of-00004.bin",
54
+ "transformer.h.11.mlp.c_proj.weight": "pytorch_model-00001-of-00004.bin",
55
+ "transformer.h.12.attn.c_attn.bias": "pytorch_model-00001-of-00004.bin",
56
+ "transformer.h.12.attn.c_attn.weight": "pytorch_model-00001-of-00004.bin",
57
+ "transformer.h.12.attn.c_proj.bias": "pytorch_model-00001-of-00004.bin",
58
+ "transformer.h.12.attn.c_proj.weight": "pytorch_model-00001-of-00004.bin",
59
+ "transformer.h.12.ln_1.bias": "pytorch_model-00001-of-00004.bin",
60
+ "transformer.h.12.ln_1.weight": "pytorch_model-00001-of-00004.bin",
61
+ "transformer.h.12.ln_2.bias": "pytorch_model-00001-of-00004.bin",
62
+ "transformer.h.12.ln_2.weight": "pytorch_model-00001-of-00004.bin",
63
+ "transformer.h.12.mlp.c_fc.bias": "pytorch_model-00002-of-00004.bin",
64
+ "transformer.h.12.mlp.c_fc.weight": "pytorch_model-00002-of-00004.bin",
65
+ "transformer.h.12.mlp.c_proj.bias": "pytorch_model-00002-of-00004.bin",
66
+ "transformer.h.12.mlp.c_proj.weight": "pytorch_model-00002-of-00004.bin",
67
+ "transformer.h.13.attn.c_attn.bias": "pytorch_model-00002-of-00004.bin",
68
+ "transformer.h.13.attn.c_attn.weight": "pytorch_model-00002-of-00004.bin",
69
+ "transformer.h.13.attn.c_proj.bias": "pytorch_model-00002-of-00004.bin",
70
+ "transformer.h.13.attn.c_proj.weight": "pytorch_model-00002-of-00004.bin",
71
+ "transformer.h.13.ln_1.bias": "pytorch_model-00002-of-00004.bin",
72
+ "transformer.h.13.ln_1.weight": "pytorch_model-00002-of-00004.bin",
73
+ "transformer.h.13.ln_2.bias": "pytorch_model-00002-of-00004.bin",
74
+ "transformer.h.13.ln_2.weight": "pytorch_model-00002-of-00004.bin",
75
+ "transformer.h.13.mlp.c_fc.bias": "pytorch_model-00002-of-00004.bin",
76
+ "transformer.h.13.mlp.c_fc.weight": "pytorch_model-00002-of-00004.bin",
77
+ "transformer.h.13.mlp.c_proj.bias": "pytorch_model-00002-of-00004.bin",
78
+ "transformer.h.13.mlp.c_proj.weight": "pytorch_model-00002-of-00004.bin",
79
+ "transformer.h.14.attn.c_attn.bias": "pytorch_model-00002-of-00004.bin",
80
+ "transformer.h.14.attn.c_attn.weight": "pytorch_model-00002-of-00004.bin",
81
+ "transformer.h.14.attn.c_proj.bias": "pytorch_model-00002-of-00004.bin",
82
+ "transformer.h.14.attn.c_proj.weight": "pytorch_model-00002-of-00004.bin",
83
+ "transformer.h.14.ln_1.bias": "pytorch_model-00002-of-00004.bin",
84
+ "transformer.h.14.ln_1.weight": "pytorch_model-00002-of-00004.bin",
85
+ "transformer.h.14.ln_2.bias": "pytorch_model-00002-of-00004.bin",
86
+ "transformer.h.14.ln_2.weight": "pytorch_model-00002-of-00004.bin",
87
+ "transformer.h.14.mlp.c_fc.bias": "pytorch_model-00002-of-00004.bin",
88
+ "transformer.h.14.mlp.c_fc.weight": "pytorch_model-00002-of-00004.bin",
89
+ "transformer.h.14.mlp.c_proj.bias": "pytorch_model-00002-of-00004.bin",
90
+ "transformer.h.14.mlp.c_proj.weight": "pytorch_model-00002-of-00004.bin",
91
+ "transformer.h.15.attn.c_attn.bias": "pytorch_model-00002-of-00004.bin",
92
+ "transformer.h.15.attn.c_attn.weight": "pytorch_model-00002-of-00004.bin",
93
+ "transformer.h.15.attn.c_proj.bias": "pytorch_model-00002-of-00004.bin",
94
+ "transformer.h.15.attn.c_proj.weight": "pytorch_model-00002-of-00004.bin",
95
+ "transformer.h.15.ln_1.bias": "pytorch_model-00002-of-00004.bin",
96
+ "transformer.h.15.ln_1.weight": "pytorch_model-00002-of-00004.bin",
97
+ "transformer.h.15.ln_2.bias": "pytorch_model-00002-of-00004.bin",
98
+ "transformer.h.15.ln_2.weight": "pytorch_model-00002-of-00004.bin",
99
+ "transformer.h.15.mlp.c_fc.bias": "pytorch_model-00002-of-00004.bin",
100
+ "transformer.h.15.mlp.c_fc.weight": "pytorch_model-00002-of-00004.bin",
101
+ "transformer.h.15.mlp.c_proj.bias": "pytorch_model-00002-of-00004.bin",
102
+ "transformer.h.15.mlp.c_proj.weight": "pytorch_model-00002-of-00004.bin",
103
+ "transformer.h.16.attn.c_attn.bias": "pytorch_model-00002-of-00004.bin",
104
+ "transformer.h.16.attn.c_attn.weight": "pytorch_model-00002-of-00004.bin",
105
+ "transformer.h.16.attn.c_proj.bias": "pytorch_model-00002-of-00004.bin",
106
+ "transformer.h.16.attn.c_proj.weight": "pytorch_model-00002-of-00004.bin",
107
+ "transformer.h.16.ln_1.bias": "pytorch_model-00002-of-00004.bin",
108
+ "transformer.h.16.ln_1.weight": "pytorch_model-00002-of-00004.bin",
109
+ "transformer.h.16.ln_2.bias": "pytorch_model-00002-of-00004.bin",
110
+ "transformer.h.16.ln_2.weight": "pytorch_model-00002-of-00004.bin",
111
+ "transformer.h.16.mlp.c_fc.bias": "pytorch_model-00002-of-00004.bin",
112
+ "transformer.h.16.mlp.c_fc.weight": "pytorch_model-00002-of-00004.bin",
113
+ "transformer.h.16.mlp.c_proj.bias": "pytorch_model-00002-of-00004.bin",
114
+ "transformer.h.16.mlp.c_proj.weight": "pytorch_model-00002-of-00004.bin",
115
+ "transformer.h.17.attn.c_attn.bias": "pytorch_model-00002-of-00004.bin",
116
+ "transformer.h.17.attn.c_attn.weight": "pytorch_model-00002-of-00004.bin",
117
+ "transformer.h.17.attn.c_proj.bias": "pytorch_model-00002-of-00004.bin",
118
+ "transformer.h.17.attn.c_proj.weight": "pytorch_model-00002-of-00004.bin",
119
+ "transformer.h.17.ln_1.bias": "pytorch_model-00002-of-00004.bin",
120
+ "transformer.h.17.ln_1.weight": "pytorch_model-00002-of-00004.bin",
121
+ "transformer.h.17.ln_2.bias": "pytorch_model-00002-of-00004.bin",
122
+ "transformer.h.17.ln_2.weight": "pytorch_model-00002-of-00004.bin",
123
+ "transformer.h.17.mlp.c_fc.bias": "pytorch_model-00002-of-00004.bin",
124
+ "transformer.h.17.mlp.c_fc.weight": "pytorch_model-00002-of-00004.bin",
125
+ "transformer.h.17.mlp.c_proj.bias": "pytorch_model-00002-of-00004.bin",
126
+ "transformer.h.17.mlp.c_proj.weight": "pytorch_model-00002-of-00004.bin",
127
+ "transformer.h.18.attn.c_attn.bias": "pytorch_model-00002-of-00004.bin",
128
+ "transformer.h.18.attn.c_attn.weight": "pytorch_model-00002-of-00004.bin",
129
+ "transformer.h.18.attn.c_proj.bias": "pytorch_model-00002-of-00004.bin",
130
+ "transformer.h.18.attn.c_proj.weight": "pytorch_model-00002-of-00004.bin",
131
+ "transformer.h.18.ln_1.bias": "pytorch_model-00002-of-00004.bin",
132
+ "transformer.h.18.ln_1.weight": "pytorch_model-00002-of-00004.bin",
133
+ "transformer.h.18.ln_2.bias": "pytorch_model-00002-of-00004.bin",
134
+ "transformer.h.18.ln_2.weight": "pytorch_model-00002-of-00004.bin",
135
+ "transformer.h.18.mlp.c_fc.bias": "pytorch_model-00002-of-00004.bin",
136
+ "transformer.h.18.mlp.c_fc.weight": "pytorch_model-00002-of-00004.bin",
137
+ "transformer.h.18.mlp.c_proj.bias": "pytorch_model-00002-of-00004.bin",
138
+ "transformer.h.18.mlp.c_proj.weight": "pytorch_model-00002-of-00004.bin",
139
+ "transformer.h.19.attn.c_attn.bias": "pytorch_model-00002-of-00004.bin",
140
+ "transformer.h.19.attn.c_attn.weight": "pytorch_model-00002-of-00004.bin",
141
+ "transformer.h.19.attn.c_proj.bias": "pytorch_model-00002-of-00004.bin",
142
+ "transformer.h.19.attn.c_proj.weight": "pytorch_model-00002-of-00004.bin",
143
+ "transformer.h.19.ln_1.bias": "pytorch_model-00002-of-00004.bin",
144
+ "transformer.h.19.ln_1.weight": "pytorch_model-00002-of-00004.bin",
145
+ "transformer.h.19.ln_2.bias": "pytorch_model-00002-of-00004.bin",
146
+ "transformer.h.19.ln_2.weight": "pytorch_model-00002-of-00004.bin",
147
+ "transformer.h.19.mlp.c_fc.bias": "pytorch_model-00002-of-00004.bin",
148
+ "transformer.h.19.mlp.c_fc.weight": "pytorch_model-00002-of-00004.bin",
149
+ "transformer.h.19.mlp.c_proj.bias": "pytorch_model-00002-of-00004.bin",
150
+ "transformer.h.19.mlp.c_proj.weight": "pytorch_model-00002-of-00004.bin",
151
+ "transformer.h.2.attn.c_attn.bias": "pytorch_model-00001-of-00004.bin",
152
+ "transformer.h.2.attn.c_attn.weight": "pytorch_model-00001-of-00004.bin",
153
+ "transformer.h.2.attn.c_proj.bias": "pytorch_model-00001-of-00004.bin",
154
+ "transformer.h.2.attn.c_proj.weight": "pytorch_model-00001-of-00004.bin",
155
+ "transformer.h.2.ln_1.bias": "pytorch_model-00001-of-00004.bin",
156
+ "transformer.h.2.ln_1.weight": "pytorch_model-00001-of-00004.bin",
157
+ "transformer.h.2.ln_2.bias": "pytorch_model-00001-of-00004.bin",
158
+ "transformer.h.2.ln_2.weight": "pytorch_model-00001-of-00004.bin",
159
+ "transformer.h.2.mlp.c_fc.bias": "pytorch_model-00001-of-00004.bin",
160
+ "transformer.h.2.mlp.c_fc.weight": "pytorch_model-00001-of-00004.bin",
161
+ "transformer.h.2.mlp.c_proj.bias": "pytorch_model-00001-of-00004.bin",
162
+ "transformer.h.2.mlp.c_proj.weight": "pytorch_model-00001-of-00004.bin",
163
+ "transformer.h.20.attn.c_attn.bias": "pytorch_model-00002-of-00004.bin",
164
+ "transformer.h.20.attn.c_attn.weight": "pytorch_model-00002-of-00004.bin",
165
+ "transformer.h.20.attn.c_proj.bias": "pytorch_model-00002-of-00004.bin",
166
+ "transformer.h.20.attn.c_proj.weight": "pytorch_model-00002-of-00004.bin",
167
+ "transformer.h.20.ln_1.bias": "pytorch_model-00002-of-00004.bin",
168
+ "transformer.h.20.ln_1.weight": "pytorch_model-00002-of-00004.bin",
169
+ "transformer.h.20.ln_2.bias": "pytorch_model-00002-of-00004.bin",
170
+ "transformer.h.20.ln_2.weight": "pytorch_model-00002-of-00004.bin",
171
+ "transformer.h.20.mlp.c_fc.bias": "pytorch_model-00002-of-00004.bin",
172
+ "transformer.h.20.mlp.c_fc.weight": "pytorch_model-00002-of-00004.bin",
173
+ "transformer.h.20.mlp.c_proj.bias": "pytorch_model-00002-of-00004.bin",
174
+ "transformer.h.20.mlp.c_proj.weight": "pytorch_model-00002-of-00004.bin",
175
+ "transformer.h.21.attn.c_attn.bias": "pytorch_model-00002-of-00004.bin",
176
+ "transformer.h.21.attn.c_attn.weight": "pytorch_model-00002-of-00004.bin",
177
+ "transformer.h.21.attn.c_proj.bias": "pytorch_model-00002-of-00004.bin",
178
+ "transformer.h.21.attn.c_proj.weight": "pytorch_model-00002-of-00004.bin",
179
+ "transformer.h.21.ln_1.bias": "pytorch_model-00002-of-00004.bin",
180
+ "transformer.h.21.ln_1.weight": "pytorch_model-00002-of-00004.bin",
181
+ "transformer.h.21.ln_2.bias": "pytorch_model-00002-of-00004.bin",
182
+ "transformer.h.21.ln_2.weight": "pytorch_model-00002-of-00004.bin",
183
+ "transformer.h.21.mlp.c_fc.bias": "pytorch_model-00002-of-00004.bin",
184
+ "transformer.h.21.mlp.c_fc.weight": "pytorch_model-00002-of-00004.bin",
185
+ "transformer.h.21.mlp.c_proj.bias": "pytorch_model-00002-of-00004.bin",
186
+ "transformer.h.21.mlp.c_proj.weight": "pytorch_model-00002-of-00004.bin",
187
+ "transformer.h.22.attn.c_attn.bias": "pytorch_model-00002-of-00004.bin",
188
+ "transformer.h.22.attn.c_attn.weight": "pytorch_model-00002-of-00004.bin",
189
+ "transformer.h.22.attn.c_proj.bias": "pytorch_model-00002-of-00004.bin",
190
+ "transformer.h.22.attn.c_proj.weight": "pytorch_model-00002-of-00004.bin",
191
+ "transformer.h.22.ln_1.bias": "pytorch_model-00002-of-00004.bin",
192
+ "transformer.h.22.ln_1.weight": "pytorch_model-00002-of-00004.bin",
193
+ "transformer.h.22.ln_2.bias": "pytorch_model-00002-of-00004.bin",
194
+ "transformer.h.22.ln_2.weight": "pytorch_model-00002-of-00004.bin",
195
+ "transformer.h.22.mlp.c_fc.bias": "pytorch_model-00002-of-00004.bin",
196
+ "transformer.h.22.mlp.c_fc.weight": "pytorch_model-00002-of-00004.bin",
197
+ "transformer.h.22.mlp.c_proj.bias": "pytorch_model-00002-of-00004.bin",
198
+ "transformer.h.22.mlp.c_proj.weight": "pytorch_model-00002-of-00004.bin",
199
+ "transformer.h.23.attn.c_attn.bias": "pytorch_model-00002-of-00004.bin",
200
+ "transformer.h.23.attn.c_attn.weight": "pytorch_model-00002-of-00004.bin",
201
+ "transformer.h.23.attn.c_proj.bias": "pytorch_model-00002-of-00004.bin",
202
+ "transformer.h.23.attn.c_proj.weight": "pytorch_model-00002-of-00004.bin",
203
+ "transformer.h.23.ln_1.bias": "pytorch_model-00002-of-00004.bin",
204
+ "transformer.h.23.ln_1.weight": "pytorch_model-00002-of-00004.bin",
205
+ "transformer.h.23.ln_2.bias": "pytorch_model-00002-of-00004.bin",
206
+ "transformer.h.23.ln_2.weight": "pytorch_model-00002-of-00004.bin",
207
+ "transformer.h.23.mlp.c_fc.bias": "pytorch_model-00002-of-00004.bin",
208
+ "transformer.h.23.mlp.c_fc.weight": "pytorch_model-00002-of-00004.bin",
209
+ "transformer.h.23.mlp.c_proj.bias": "pytorch_model-00002-of-00004.bin",
210
+ "transformer.h.23.mlp.c_proj.weight": "pytorch_model-00002-of-00004.bin",
211
+ "transformer.h.24.attn.c_attn.bias": "pytorch_model-00002-of-00004.bin",
212
+ "transformer.h.24.attn.c_attn.weight": "pytorch_model-00002-of-00004.bin",
213
+ "transformer.h.24.attn.c_proj.bias": "pytorch_model-00002-of-00004.bin",
214
+ "transformer.h.24.attn.c_proj.weight": "pytorch_model-00002-of-00004.bin",
215
+ "transformer.h.24.ln_1.bias": "pytorch_model-00002-of-00004.bin",
216
+ "transformer.h.24.ln_1.weight": "pytorch_model-00002-of-00004.bin",
217
+ "transformer.h.24.ln_2.bias": "pytorch_model-00002-of-00004.bin",
218
+ "transformer.h.24.ln_2.weight": "pytorch_model-00002-of-00004.bin",
219
+ "transformer.h.24.mlp.c_fc.bias": "pytorch_model-00002-of-00004.bin",
220
+ "transformer.h.24.mlp.c_fc.weight": "pytorch_model-00002-of-00004.bin",
221
+ "transformer.h.24.mlp.c_proj.bias": "pytorch_model-00002-of-00004.bin",
222
+ "transformer.h.24.mlp.c_proj.weight": "pytorch_model-00002-of-00004.bin",
223
+ "transformer.h.25.attn.c_attn.bias": "pytorch_model-00002-of-00004.bin",
224
+ "transformer.h.25.attn.c_attn.weight": "pytorch_model-00002-of-00004.bin",
225
+ "transformer.h.25.attn.c_proj.bias": "pytorch_model-00002-of-00004.bin",
226
+ "transformer.h.25.attn.c_proj.weight": "pytorch_model-00002-of-00004.bin",
227
+ "transformer.h.25.ln_1.bias": "pytorch_model-00002-of-00004.bin",
228
+ "transformer.h.25.ln_1.weight": "pytorch_model-00002-of-00004.bin",
229
+ "transformer.h.25.ln_2.bias": "pytorch_model-00002-of-00004.bin",
230
+ "transformer.h.25.ln_2.weight": "pytorch_model-00002-of-00004.bin",
231
+ "transformer.h.25.mlp.c_fc.bias": "pytorch_model-00003-of-00004.bin",
232
+ "transformer.h.25.mlp.c_fc.weight": "pytorch_model-00003-of-00004.bin",
233
+ "transformer.h.25.mlp.c_proj.bias": "pytorch_model-00003-of-00004.bin",
234
+ "transformer.h.25.mlp.c_proj.weight": "pytorch_model-00003-of-00004.bin",
235
+ "transformer.h.26.attn.c_attn.bias": "pytorch_model-00003-of-00004.bin",
236
+ "transformer.h.26.attn.c_attn.weight": "pytorch_model-00003-of-00004.bin",
237
+ "transformer.h.26.attn.c_proj.bias": "pytorch_model-00003-of-00004.bin",
238
+ "transformer.h.26.attn.c_proj.weight": "pytorch_model-00003-of-00004.bin",
239
+ "transformer.h.26.ln_1.bias": "pytorch_model-00003-of-00004.bin",
240
+ "transformer.h.26.ln_1.weight": "pytorch_model-00003-of-00004.bin",
241
+ "transformer.h.26.ln_2.bias": "pytorch_model-00003-of-00004.bin",
242
+ "transformer.h.26.ln_2.weight": "pytorch_model-00003-of-00004.bin",
243
+ "transformer.h.26.mlp.c_fc.bias": "pytorch_model-00003-of-00004.bin",
244
+ "transformer.h.26.mlp.c_fc.weight": "pytorch_model-00003-of-00004.bin",
245
+ "transformer.h.26.mlp.c_proj.bias": "pytorch_model-00003-of-00004.bin",
246
+ "transformer.h.26.mlp.c_proj.weight": "pytorch_model-00003-of-00004.bin",
247
+ "transformer.h.27.attn.c_attn.bias": "pytorch_model-00003-of-00004.bin",
248
+ "transformer.h.27.attn.c_attn.weight": "pytorch_model-00003-of-00004.bin",
249
+ "transformer.h.27.attn.c_proj.bias": "pytorch_model-00003-of-00004.bin",
250
+ "transformer.h.27.attn.c_proj.weight": "pytorch_model-00003-of-00004.bin",
251
+ "transformer.h.27.ln_1.bias": "pytorch_model-00003-of-00004.bin",
252
+ "transformer.h.27.ln_1.weight": "pytorch_model-00003-of-00004.bin",
253
+ "transformer.h.27.ln_2.bias": "pytorch_model-00003-of-00004.bin",
254
+ "transformer.h.27.ln_2.weight": "pytorch_model-00003-of-00004.bin",
255
+ "transformer.h.27.mlp.c_fc.bias": "pytorch_model-00003-of-00004.bin",
256
+ "transformer.h.27.mlp.c_fc.weight": "pytorch_model-00003-of-00004.bin",
257
+ "transformer.h.27.mlp.c_proj.bias": "pytorch_model-00003-of-00004.bin",
258
+ "transformer.h.27.mlp.c_proj.weight": "pytorch_model-00003-of-00004.bin",
259
+ "transformer.h.28.attn.c_attn.bias": "pytorch_model-00003-of-00004.bin",
260
+ "transformer.h.28.attn.c_attn.weight": "pytorch_model-00003-of-00004.bin",
261
+ "transformer.h.28.attn.c_proj.bias": "pytorch_model-00003-of-00004.bin",
262
+ "transformer.h.28.attn.c_proj.weight": "pytorch_model-00003-of-00004.bin",
263
+ "transformer.h.28.ln_1.bias": "pytorch_model-00003-of-00004.bin",
264
+ "transformer.h.28.ln_1.weight": "pytorch_model-00003-of-00004.bin",
265
+ "transformer.h.28.ln_2.bias": "pytorch_model-00003-of-00004.bin",
266
+ "transformer.h.28.ln_2.weight": "pytorch_model-00003-of-00004.bin",
267
+ "transformer.h.28.mlp.c_fc.bias": "pytorch_model-00003-of-00004.bin",
268
+ "transformer.h.28.mlp.c_fc.weight": "pytorch_model-00003-of-00004.bin",
269
+ "transformer.h.28.mlp.c_proj.bias": "pytorch_model-00003-of-00004.bin",
270
+ "transformer.h.28.mlp.c_proj.weight": "pytorch_model-00003-of-00004.bin",
271
+ "transformer.h.29.attn.c_attn.bias": "pytorch_model-00003-of-00004.bin",
272
+ "transformer.h.29.attn.c_attn.weight": "pytorch_model-00003-of-00004.bin",
273
+ "transformer.h.29.attn.c_proj.bias": "pytorch_model-00003-of-00004.bin",
274
+ "transformer.h.29.attn.c_proj.weight": "pytorch_model-00003-of-00004.bin",
275
+ "transformer.h.29.ln_1.bias": "pytorch_model-00003-of-00004.bin",
276
+ "transformer.h.29.ln_1.weight": "pytorch_model-00003-of-00004.bin",
277
+ "transformer.h.29.ln_2.bias": "pytorch_model-00003-of-00004.bin",
278
+ "transformer.h.29.ln_2.weight": "pytorch_model-00003-of-00004.bin",
279
+ "transformer.h.29.mlp.c_fc.bias": "pytorch_model-00003-of-00004.bin",
280
+ "transformer.h.29.mlp.c_fc.weight": "pytorch_model-00003-of-00004.bin",
281
+ "transformer.h.29.mlp.c_proj.bias": "pytorch_model-00003-of-00004.bin",
282
+ "transformer.h.29.mlp.c_proj.weight": "pytorch_model-00003-of-00004.bin",
283
+ "transformer.h.3.attn.c_attn.bias": "pytorch_model-00001-of-00004.bin",
284
+ "transformer.h.3.attn.c_attn.weight": "pytorch_model-00001-of-00004.bin",
285
+ "transformer.h.3.attn.c_proj.bias": "pytorch_model-00001-of-00004.bin",
286
+ "transformer.h.3.attn.c_proj.weight": "pytorch_model-00001-of-00004.bin",
287
+ "transformer.h.3.ln_1.bias": "pytorch_model-00001-of-00004.bin",
288
+ "transformer.h.3.ln_1.weight": "pytorch_model-00001-of-00004.bin",
289
+ "transformer.h.3.ln_2.bias": "pytorch_model-00001-of-00004.bin",
290
+ "transformer.h.3.ln_2.weight": "pytorch_model-00001-of-00004.bin",
291
+ "transformer.h.3.mlp.c_fc.bias": "pytorch_model-00001-of-00004.bin",
292
+ "transformer.h.3.mlp.c_fc.weight": "pytorch_model-00001-of-00004.bin",
293
+ "transformer.h.3.mlp.c_proj.bias": "pytorch_model-00001-of-00004.bin",
294
+ "transformer.h.3.mlp.c_proj.weight": "pytorch_model-00001-of-00004.bin",
295
+ "transformer.h.30.attn.c_attn.bias": "pytorch_model-00003-of-00004.bin",
296
+ "transformer.h.30.attn.c_attn.weight": "pytorch_model-00003-of-00004.bin",
297
+ "transformer.h.30.attn.c_proj.bias": "pytorch_model-00003-of-00004.bin",
298
+ "transformer.h.30.attn.c_proj.weight": "pytorch_model-00003-of-00004.bin",
299
+ "transformer.h.30.ln_1.bias": "pytorch_model-00003-of-00004.bin",
300
+ "transformer.h.30.ln_1.weight": "pytorch_model-00003-of-00004.bin",
301
+ "transformer.h.30.ln_2.bias": "pytorch_model-00003-of-00004.bin",
302
+ "transformer.h.30.ln_2.weight": "pytorch_model-00003-of-00004.bin",
303
+ "transformer.h.30.mlp.c_fc.bias": "pytorch_model-00003-of-00004.bin",
304
+ "transformer.h.30.mlp.c_fc.weight": "pytorch_model-00003-of-00004.bin",
305
+ "transformer.h.30.mlp.c_proj.bias": "pytorch_model-00003-of-00004.bin",
306
+ "transformer.h.30.mlp.c_proj.weight": "pytorch_model-00003-of-00004.bin",
307
+ "transformer.h.31.attn.c_attn.bias": "pytorch_model-00003-of-00004.bin",
308
+ "transformer.h.31.attn.c_attn.weight": "pytorch_model-00003-of-00004.bin",
309
+ "transformer.h.31.attn.c_proj.bias": "pytorch_model-00003-of-00004.bin",
310
+ "transformer.h.31.attn.c_proj.weight": "pytorch_model-00003-of-00004.bin",
311
+ "transformer.h.31.ln_1.bias": "pytorch_model-00003-of-00004.bin",
312
+ "transformer.h.31.ln_1.weight": "pytorch_model-00003-of-00004.bin",
313
+ "transformer.h.31.ln_2.bias": "pytorch_model-00003-of-00004.bin",
314
+ "transformer.h.31.ln_2.weight": "pytorch_model-00003-of-00004.bin",
315
+ "transformer.h.31.mlp.c_fc.bias": "pytorch_model-00003-of-00004.bin",
316
+ "transformer.h.31.mlp.c_fc.weight": "pytorch_model-00003-of-00004.bin",
317
+ "transformer.h.31.mlp.c_proj.bias": "pytorch_model-00003-of-00004.bin",
318
+ "transformer.h.31.mlp.c_proj.weight": "pytorch_model-00003-of-00004.bin",
319
+ "transformer.h.32.attn.c_attn.bias": "pytorch_model-00003-of-00004.bin",
320
+ "transformer.h.32.attn.c_attn.weight": "pytorch_model-00003-of-00004.bin",
321
+ "transformer.h.32.attn.c_proj.bias": "pytorch_model-00003-of-00004.bin",
322
+ "transformer.h.32.attn.c_proj.weight": "pytorch_model-00003-of-00004.bin",
323
+ "transformer.h.32.ln_1.bias": "pytorch_model-00003-of-00004.bin",
324
+ "transformer.h.32.ln_1.weight": "pytorch_model-00003-of-00004.bin",
325
+ "transformer.h.32.ln_2.bias": "pytorch_model-00003-of-00004.bin",
326
+ "transformer.h.32.ln_2.weight": "pytorch_model-00003-of-00004.bin",
327
+ "transformer.h.32.mlp.c_fc.bias": "pytorch_model-00003-of-00004.bin",
328
+ "transformer.h.32.mlp.c_fc.weight": "pytorch_model-00003-of-00004.bin",
329
+ "transformer.h.32.mlp.c_proj.bias": "pytorch_model-00003-of-00004.bin",
330
+ "transformer.h.32.mlp.c_proj.weight": "pytorch_model-00003-of-00004.bin",
331
+ "transformer.h.33.attn.c_attn.bias": "pytorch_model-00003-of-00004.bin",
332
+ "transformer.h.33.attn.c_attn.weight": "pytorch_model-00003-of-00004.bin",
333
+ "transformer.h.33.attn.c_proj.bias": "pytorch_model-00003-of-00004.bin",
334
+ "transformer.h.33.attn.c_proj.weight": "pytorch_model-00003-of-00004.bin",
335
+ "transformer.h.33.ln_1.bias": "pytorch_model-00003-of-00004.bin",
336
+ "transformer.h.33.ln_1.weight": "pytorch_model-00003-of-00004.bin",
337
+ "transformer.h.33.ln_2.bias": "pytorch_model-00003-of-00004.bin",
338
+ "transformer.h.33.ln_2.weight": "pytorch_model-00003-of-00004.bin",
339
+ "transformer.h.33.mlp.c_fc.bias": "pytorch_model-00003-of-00004.bin",
340
+ "transformer.h.33.mlp.c_fc.weight": "pytorch_model-00003-of-00004.bin",
341
+ "transformer.h.33.mlp.c_proj.bias": "pytorch_model-00003-of-00004.bin",
342
+ "transformer.h.33.mlp.c_proj.weight": "pytorch_model-00003-of-00004.bin",
343
+ "transformer.h.34.attn.c_attn.bias": "pytorch_model-00003-of-00004.bin",
344
+ "transformer.h.34.attn.c_attn.weight": "pytorch_model-00003-of-00004.bin",
345
+ "transformer.h.34.attn.c_proj.bias": "pytorch_model-00003-of-00004.bin",
346
+ "transformer.h.34.attn.c_proj.weight": "pytorch_model-00003-of-00004.bin",
347
+ "transformer.h.34.ln_1.bias": "pytorch_model-00003-of-00004.bin",
348
+ "transformer.h.34.ln_1.weight": "pytorch_model-00003-of-00004.bin",
349
+ "transformer.h.34.ln_2.bias": "pytorch_model-00003-of-00004.bin",
350
+ "transformer.h.34.ln_2.weight": "pytorch_model-00003-of-00004.bin",
351
+ "transformer.h.34.mlp.c_fc.bias": "pytorch_model-00003-of-00004.bin",
352
+ "transformer.h.34.mlp.c_fc.weight": "pytorch_model-00003-of-00004.bin",
353
+ "transformer.h.34.mlp.c_proj.bias": "pytorch_model-00003-of-00004.bin",
354
+ "transformer.h.34.mlp.c_proj.weight": "pytorch_model-00003-of-00004.bin",
355
+ "transformer.h.35.attn.c_attn.bias": "pytorch_model-00003-of-00004.bin",
356
+ "transformer.h.35.attn.c_attn.weight": "pytorch_model-00003-of-00004.bin",
357
+ "transformer.h.35.attn.c_proj.bias": "pytorch_model-00003-of-00004.bin",
358
+ "transformer.h.35.attn.c_proj.weight": "pytorch_model-00003-of-00004.bin",
359
+ "transformer.h.35.ln_1.bias": "pytorch_model-00003-of-00004.bin",
360
+ "transformer.h.35.ln_1.weight": "pytorch_model-00003-of-00004.bin",
361
+ "transformer.h.35.ln_2.bias": "pytorch_model-00003-of-00004.bin",
362
+ "transformer.h.35.ln_2.weight": "pytorch_model-00003-of-00004.bin",
363
+ "transformer.h.35.mlp.c_fc.bias": "pytorch_model-00003-of-00004.bin",
364
+ "transformer.h.35.mlp.c_fc.weight": "pytorch_model-00003-of-00004.bin",
365
+ "transformer.h.35.mlp.c_proj.bias": "pytorch_model-00003-of-00004.bin",
366
+ "transformer.h.35.mlp.c_proj.weight": "pytorch_model-00003-of-00004.bin",
367
+ "transformer.h.36.attn.c_attn.bias": "pytorch_model-00003-of-00004.bin",
368
+ "transformer.h.36.attn.c_attn.weight": "pytorch_model-00003-of-00004.bin",
369
+ "transformer.h.36.attn.c_proj.bias": "pytorch_model-00003-of-00004.bin",
370
+ "transformer.h.36.attn.c_proj.weight": "pytorch_model-00003-of-00004.bin",
371
+ "transformer.h.36.ln_1.bias": "pytorch_model-00003-of-00004.bin",
372
+ "transformer.h.36.ln_1.weight": "pytorch_model-00003-of-00004.bin",
373
+ "transformer.h.36.ln_2.bias": "pytorch_model-00003-of-00004.bin",
374
+ "transformer.h.36.ln_2.weight": "pytorch_model-00003-of-00004.bin",
375
+ "transformer.h.36.mlp.c_fc.bias": "pytorch_model-00003-of-00004.bin",
376
+ "transformer.h.36.mlp.c_fc.weight": "pytorch_model-00003-of-00004.bin",
377
+ "transformer.h.36.mlp.c_proj.bias": "pytorch_model-00003-of-00004.bin",
378
+ "transformer.h.36.mlp.c_proj.weight": "pytorch_model-00003-of-00004.bin",
379
+ "transformer.h.37.attn.c_attn.bias": "pytorch_model-00003-of-00004.bin",
380
+ "transformer.h.37.attn.c_attn.weight": "pytorch_model-00003-of-00004.bin",
381
+ "transformer.h.37.attn.c_proj.bias": "pytorch_model-00003-of-00004.bin",
382
+ "transformer.h.37.attn.c_proj.weight": "pytorch_model-00003-of-00004.bin",
383
+ "transformer.h.37.ln_1.bias": "pytorch_model-00003-of-00004.bin",
384
+ "transformer.h.37.ln_1.weight": "pytorch_model-00003-of-00004.bin",
385
+ "transformer.h.37.ln_2.bias": "pytorch_model-00003-of-00004.bin",
386
+ "transformer.h.37.ln_2.weight": "pytorch_model-00003-of-00004.bin",
387
+ "transformer.h.37.mlp.c_fc.bias": "pytorch_model-00003-of-00004.bin",
388
+ "transformer.h.37.mlp.c_fc.weight": "pytorch_model-00003-of-00004.bin",
389
+ "transformer.h.37.mlp.c_proj.bias": "pytorch_model-00003-of-00004.bin",
390
+ "transformer.h.37.mlp.c_proj.weight": "pytorch_model-00003-of-00004.bin",
391
+ "transformer.h.38.attn.c_attn.bias": "pytorch_model-00003-of-00004.bin",
392
+ "transformer.h.38.attn.c_attn.weight": "pytorch_model-00003-of-00004.bin",
393
+ "transformer.h.38.attn.c_proj.bias": "pytorch_model-00003-of-00004.bin",
394
+ "transformer.h.38.attn.c_proj.weight": "pytorch_model-00003-of-00004.bin",
395
+ "transformer.h.38.ln_1.bias": "pytorch_model-00003-of-00004.bin",
396
+ "transformer.h.38.ln_1.weight": "pytorch_model-00003-of-00004.bin",
397
+ "transformer.h.38.ln_2.bias": "pytorch_model-00003-of-00004.bin",
398
+ "transformer.h.38.ln_2.weight": "pytorch_model-00003-of-00004.bin",
399
+ "transformer.h.38.mlp.c_fc.bias": "pytorch_model-00004-of-00004.bin",
400
+ "transformer.h.38.mlp.c_fc.weight": "pytorch_model-00004-of-00004.bin",
401
+ "transformer.h.38.mlp.c_proj.bias": "pytorch_model-00004-of-00004.bin",
402
+ "transformer.h.38.mlp.c_proj.weight": "pytorch_model-00004-of-00004.bin",
403
+ "transformer.h.39.attn.c_attn.bias": "pytorch_model-00004-of-00004.bin",
404
+ "transformer.h.39.attn.c_attn.weight": "pytorch_model-00004-of-00004.bin",
405
+ "transformer.h.39.attn.c_proj.bias": "pytorch_model-00004-of-00004.bin",
406
+ "transformer.h.39.attn.c_proj.weight": "pytorch_model-00004-of-00004.bin",
407
+ "transformer.h.39.ln_1.bias": "pytorch_model-00004-of-00004.bin",
408
+ "transformer.h.39.ln_1.weight": "pytorch_model-00004-of-00004.bin",
409
+ "transformer.h.39.ln_2.bias": "pytorch_model-00004-of-00004.bin",
410
+ "transformer.h.39.ln_2.weight": "pytorch_model-00004-of-00004.bin",
411
+ "transformer.h.39.mlp.c_fc.bias": "pytorch_model-00004-of-00004.bin",
412
+ "transformer.h.39.mlp.c_fc.weight": "pytorch_model-00004-of-00004.bin",
413
+ "transformer.h.39.mlp.c_proj.bias": "pytorch_model-00004-of-00004.bin",
414
+ "transformer.h.39.mlp.c_proj.weight": "pytorch_model-00004-of-00004.bin",
415
+ "transformer.h.4.attn.c_attn.bias": "pytorch_model-00001-of-00004.bin",
416
+ "transformer.h.4.attn.c_attn.weight": "pytorch_model-00001-of-00004.bin",
417
+ "transformer.h.4.attn.c_proj.bias": "pytorch_model-00001-of-00004.bin",
418
+ "transformer.h.4.attn.c_proj.weight": "pytorch_model-00001-of-00004.bin",
419
+ "transformer.h.4.ln_1.bias": "pytorch_model-00001-of-00004.bin",
420
+ "transformer.h.4.ln_1.weight": "pytorch_model-00001-of-00004.bin",
421
+ "transformer.h.4.ln_2.bias": "pytorch_model-00001-of-00004.bin",
422
+ "transformer.h.4.ln_2.weight": "pytorch_model-00001-of-00004.bin",
423
+ "transformer.h.4.mlp.c_fc.bias": "pytorch_model-00001-of-00004.bin",
424
+ "transformer.h.4.mlp.c_fc.weight": "pytorch_model-00001-of-00004.bin",
425
+ "transformer.h.4.mlp.c_proj.bias": "pytorch_model-00001-of-00004.bin",
426
+ "transformer.h.4.mlp.c_proj.weight": "pytorch_model-00001-of-00004.bin",
427
+ "transformer.h.5.attn.c_attn.bias": "pytorch_model-00001-of-00004.bin",
428
+ "transformer.h.5.attn.c_attn.weight": "pytorch_model-00001-of-00004.bin",
429
+ "transformer.h.5.attn.c_proj.bias": "pytorch_model-00001-of-00004.bin",
430
+ "transformer.h.5.attn.c_proj.weight": "pytorch_model-00001-of-00004.bin",
431
+ "transformer.h.5.ln_1.bias": "pytorch_model-00001-of-00004.bin",
432
+ "transformer.h.5.ln_1.weight": "pytorch_model-00001-of-00004.bin",
433
+ "transformer.h.5.ln_2.bias": "pytorch_model-00001-of-00004.bin",
434
+ "transformer.h.5.ln_2.weight": "pytorch_model-00001-of-00004.bin",
435
+ "transformer.h.5.mlp.c_fc.bias": "pytorch_model-00001-of-00004.bin",
436
+ "transformer.h.5.mlp.c_fc.weight": "pytorch_model-00001-of-00004.bin",
437
+ "transformer.h.5.mlp.c_proj.bias": "pytorch_model-00001-of-00004.bin",
438
+ "transformer.h.5.mlp.c_proj.weight": "pytorch_model-00001-of-00004.bin",
439
+ "transformer.h.6.attn.c_attn.bias": "pytorch_model-00001-of-00004.bin",
440
+ "transformer.h.6.attn.c_attn.weight": "pytorch_model-00001-of-00004.bin",
441
+ "transformer.h.6.attn.c_proj.bias": "pytorch_model-00001-of-00004.bin",
442
+ "transformer.h.6.attn.c_proj.weight": "pytorch_model-00001-of-00004.bin",
443
+ "transformer.h.6.ln_1.bias": "pytorch_model-00001-of-00004.bin",
444
+ "transformer.h.6.ln_1.weight": "pytorch_model-00001-of-00004.bin",
445
+ "transformer.h.6.ln_2.bias": "pytorch_model-00001-of-00004.bin",
446
+ "transformer.h.6.ln_2.weight": "pytorch_model-00001-of-00004.bin",
447
+ "transformer.h.6.mlp.c_fc.bias": "pytorch_model-00001-of-00004.bin",
448
+ "transformer.h.6.mlp.c_fc.weight": "pytorch_model-00001-of-00004.bin",
449
+ "transformer.h.6.mlp.c_proj.bias": "pytorch_model-00001-of-00004.bin",
450
+ "transformer.h.6.mlp.c_proj.weight": "pytorch_model-00001-of-00004.bin",
451
+ "transformer.h.7.attn.c_attn.bias": "pytorch_model-00001-of-00004.bin",
452
+ "transformer.h.7.attn.c_attn.weight": "pytorch_model-00001-of-00004.bin",
453
+ "transformer.h.7.attn.c_proj.bias": "pytorch_model-00001-of-00004.bin",
454
+ "transformer.h.7.attn.c_proj.weight": "pytorch_model-00001-of-00004.bin",
455
+ "transformer.h.7.ln_1.bias": "pytorch_model-00001-of-00004.bin",
456
+ "transformer.h.7.ln_1.weight": "pytorch_model-00001-of-00004.bin",
457
+ "transformer.h.7.ln_2.bias": "pytorch_model-00001-of-00004.bin",
458
+ "transformer.h.7.ln_2.weight": "pytorch_model-00001-of-00004.bin",
459
+ "transformer.h.7.mlp.c_fc.bias": "pytorch_model-00001-of-00004.bin",
460
+ "transformer.h.7.mlp.c_fc.weight": "pytorch_model-00001-of-00004.bin",
461
+ "transformer.h.7.mlp.c_proj.bias": "pytorch_model-00001-of-00004.bin",
462
+ "transformer.h.7.mlp.c_proj.weight": "pytorch_model-00001-of-00004.bin",
463
+ "transformer.h.8.attn.c_attn.bias": "pytorch_model-00001-of-00004.bin",
464
+ "transformer.h.8.attn.c_attn.weight": "pytorch_model-00001-of-00004.bin",
465
+ "transformer.h.8.attn.c_proj.bias": "pytorch_model-00001-of-00004.bin",
466
+ "transformer.h.8.attn.c_proj.weight": "pytorch_model-00001-of-00004.bin",
467
+ "transformer.h.8.ln_1.bias": "pytorch_model-00001-of-00004.bin",
468
+ "transformer.h.8.ln_1.weight": "pytorch_model-00001-of-00004.bin",
469
+ "transformer.h.8.ln_2.bias": "pytorch_model-00001-of-00004.bin",
470
+ "transformer.h.8.ln_2.weight": "pytorch_model-00001-of-00004.bin",
471
+ "transformer.h.8.mlp.c_fc.bias": "pytorch_model-00001-of-00004.bin",
472
+ "transformer.h.8.mlp.c_fc.weight": "pytorch_model-00001-of-00004.bin",
473
+ "transformer.h.8.mlp.c_proj.bias": "pytorch_model-00001-of-00004.bin",
474
+ "transformer.h.8.mlp.c_proj.weight": "pytorch_model-00001-of-00004.bin",
475
+ "transformer.h.9.attn.c_attn.bias": "pytorch_model-00001-of-00004.bin",
476
+ "transformer.h.9.attn.c_attn.weight": "pytorch_model-00001-of-00004.bin",
477
+ "transformer.h.9.attn.c_proj.bias": "pytorch_model-00001-of-00004.bin",
478
+ "transformer.h.9.attn.c_proj.weight": "pytorch_model-00001-of-00004.bin",
479
+ "transformer.h.9.ln_1.bias": "pytorch_model-00001-of-00004.bin",
480
+ "transformer.h.9.ln_1.weight": "pytorch_model-00001-of-00004.bin",
481
+ "transformer.h.9.ln_2.bias": "pytorch_model-00001-of-00004.bin",
482
+ "transformer.h.9.ln_2.weight": "pytorch_model-00001-of-00004.bin",
483
+ "transformer.h.9.mlp.c_fc.bias": "pytorch_model-00001-of-00004.bin",
484
+ "transformer.h.9.mlp.c_fc.weight": "pytorch_model-00001-of-00004.bin",
485
+ "transformer.h.9.mlp.c_proj.bias": "pytorch_model-00001-of-00004.bin",
486
+ "transformer.h.9.mlp.c_proj.weight": "pytorch_model-00001-of-00004.bin",
487
+ "transformer.ln_f.bias": "pytorch_model-00004-of-00004.bin",
488
+ "transformer.ln_f.weight": "pytorch_model-00004-of-00004.bin",
489
+ "transformer.wpe.weight": "pytorch_model-00001-of-00004.bin",
490
+ "transformer.wte.weight": "pytorch_model-00001-of-00004.bin"
491
+ }
492
+ }
requirements.txt ADDED
@@ -0,0 +1,5 @@
 
 
 
 
 
 
1
+ transformers==4.28.1
2
+ accelerate>=0.16.0
3
+ bitsandbytes
4
+ sentencepiece
5
+ git+https://github.com/huggingface/peft.git@632997d1fb776c3cf05d8c2537ac9a98a7ce9435
runs/Jun03_19-41-09_ip-26-0-150-31/1685821571.9023504/events.out.tfevents.1685821571.ip-26-0-150-31.3019211.1 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:eb69a98243f145661744cf9b0f841612871d0dd8353921269d781900c5b44966
3
+ size 6340
runs/Jun03_19-41-09_ip-26-0-150-31/events.out.tfevents.1685821571.ip-26-0-150-31.3019211.0 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:b7b05d4d5bd3cb96e284db3c0efa120ba2e0c19c883a78412afa043418b84fcc
3
+ size 8463
runs/Jun03_19-41-09_ip-26-0-150-31/events.out.tfevents.1685823884.ip-26-0-150-31.3019211.2 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:a4a5c376bbecf308e9a64d9242aa95641f34ca58e827ce94004246a209e3bd37
3
+ size 354
special_tokens_map.json ADDED
@@ -0,0 +1,11 @@
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "additional_special_tokens": [
3
+ "<|system|>",
4
+ "<|user|>",
5
+ "<|assistant|>",
6
+ "<|end|>"
7
+ ],
8
+ "bos_token": "<|endoftext|>",
9
+ "eos_token": "<|endoftext|>",
10
+ "unk_token": "<|endoftext|>"
11
+ }
tokenizer.json ADDED
The diff for this file is too large to render. See raw diff
 
tokenizer_config.json ADDED
@@ -0,0 +1,31 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "add_prefix_space": false,
3
+ "additional_special_tokens": [
4
+ "<|endoftext|>",
5
+ "<fim_prefix>",
6
+ "<fim_middle>",
7
+ "<fim_suffix>",
8
+ "<fim_pad>",
9
+ "<filename>",
10
+ "<gh_stars>",
11
+ "<issue_start>",
12
+ "<issue_comment>",
13
+ "<issue_closed>",
14
+ "<jupyter_start>",
15
+ "<jupyter_text>",
16
+ "<jupyter_code>",
17
+ "<jupyter_output>",
18
+ "<empty_output>",
19
+ "<commit_before>",
20
+ "<commit_msg>",
21
+ "<commit_after>",
22
+ "<reponame>"
23
+ ],
24
+ "bos_token": "<|endoftext|>",
25
+ "clean_up_tokenization_spaces": true,
26
+ "eos_token": "<|endoftext|>",
27
+ "model_max_length": 1000000000000000019884624838656,
28
+ "tokenizer_class": "GPT2Tokenizer",
29
+ "unk_token": "<|endoftext|>",
30
+ "vocab_size": 49152
31
+ }
train_results.json ADDED
@@ -0,0 +1,8 @@
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "epoch": 5.9,
3
+ "train_loss": 0.9728257921006944,
4
+ "train_runtime": 2307.7787,
5
+ "train_samples": 3888,
6
+ "train_samples_per_second": 10.108,
7
+ "train_steps_per_second": 0.039
8
+ }
trainer_state.json ADDED
@@ -0,0 +1,145 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "best_metric": null,
3
+ "best_model_checkpoint": null,
4
+ "epoch": 5.901639344262295,
5
+ "global_step": 90,
6
+ "is_hyper_param_search": false,
7
+ "is_local_process_zero": true,
8
+ "is_world_process_zero": true,
9
+ "log_history": [
10
+ {
11
+ "epoch": 0.07,
12
+ "learning_rate": 0.0,
13
+ "loss": 1.8309,
14
+ "step": 1
15
+ },
16
+ {
17
+ "epoch": 0.52,
18
+ "learning_rate": 2e-05,
19
+ "loss": 1.5321,
20
+ "step": 8
21
+ },
22
+ {
23
+ "epoch": 0.98,
24
+ "eval_loss": 1.2855817079544067,
25
+ "eval_runtime": 6.6367,
26
+ "eval_samples_per_second": 30.437,
27
+ "eval_steps_per_second": 1.055,
28
+ "step": 15
29
+ },
30
+ {
31
+ "epoch": 1.05,
32
+ "learning_rate": 2e-05,
33
+ "loss": 1.35,
34
+ "step": 16
35
+ },
36
+ {
37
+ "epoch": 1.57,
38
+ "learning_rate": 2e-05,
39
+ "loss": 1.2071,
40
+ "step": 24
41
+ },
42
+ {
43
+ "epoch": 1.97,
44
+ "eval_loss": 1.2619894742965698,
45
+ "eval_runtime": 5.1554,
46
+ "eval_samples_per_second": 39.183,
47
+ "eval_steps_per_second": 1.358,
48
+ "step": 30
49
+ },
50
+ {
51
+ "epoch": 2.1,
52
+ "learning_rate": 2e-05,
53
+ "loss": 1.1502,
54
+ "step": 32
55
+ },
56
+ {
57
+ "epoch": 2.62,
58
+ "learning_rate": 2e-05,
59
+ "loss": 1.0162,
60
+ "step": 40
61
+ },
62
+ {
63
+ "epoch": 2.95,
64
+ "eval_loss": 1.285272240638733,
65
+ "eval_runtime": 5.1992,
66
+ "eval_samples_per_second": 38.852,
67
+ "eval_steps_per_second": 1.346,
68
+ "step": 45
69
+ },
70
+ {
71
+ "epoch": 3.15,
72
+ "learning_rate": 2e-05,
73
+ "loss": 0.9511,
74
+ "step": 48
75
+ },
76
+ {
77
+ "epoch": 3.67,
78
+ "learning_rate": 2e-05,
79
+ "loss": 0.8484,
80
+ "step": 56
81
+ },
82
+ {
83
+ "epoch": 4.0,
84
+ "eval_loss": 1.3274288177490234,
85
+ "eval_runtime": 5.1899,
86
+ "eval_samples_per_second": 38.922,
87
+ "eval_steps_per_second": 1.349,
88
+ "step": 61
89
+ },
90
+ {
91
+ "epoch": 4.2,
92
+ "learning_rate": 2e-05,
93
+ "loss": 0.7971,
94
+ "step": 64
95
+ },
96
+ {
97
+ "epoch": 4.72,
98
+ "learning_rate": 2e-05,
99
+ "loss": 0.6981,
100
+ "step": 72
101
+ },
102
+ {
103
+ "epoch": 4.98,
104
+ "eval_loss": 1.3993656635284424,
105
+ "eval_runtime": 5.213,
106
+ "eval_samples_per_second": 38.749,
107
+ "eval_steps_per_second": 1.343,
108
+ "step": 76
109
+ },
110
+ {
111
+ "epoch": 5.25,
112
+ "learning_rate": 2e-05,
113
+ "loss": 0.6462,
114
+ "step": 80
115
+ },
116
+ {
117
+ "epoch": 5.77,
118
+ "learning_rate": 2e-05,
119
+ "loss": 0.5668,
120
+ "step": 88
121
+ },
122
+ {
123
+ "epoch": 5.9,
124
+ "eval_loss": 1.4719988107681274,
125
+ "eval_runtime": 5.1996,
126
+ "eval_samples_per_second": 38.849,
127
+ "eval_steps_per_second": 1.346,
128
+ "step": 90
129
+ },
130
+ {
131
+ "epoch": 5.9,
132
+ "step": 90,
133
+ "total_flos": 383994839433216.0,
134
+ "train_loss": 0.9728257921006944,
135
+ "train_runtime": 2307.7787,
136
+ "train_samples_per_second": 10.108,
137
+ "train_steps_per_second": 0.039
138
+ }
139
+ ],
140
+ "max_steps": 90,
141
+ "num_train_epochs": 6,
142
+ "total_flos": 383994839433216.0,
143
+ "trial_name": null,
144
+ "trial_params": null
145
+ }
training_args.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:d758c20cdca5c51a4915cd92b74a547ca1b5077aaf48d7b685529d240356f725
3
+ size 4987
vocab.json ADDED
The diff for this file is too large to render. See raw diff