Globaly commited on
Commit
5380b78
1 Parent(s): 64f6b9e

Upload 33 files

Browse files
README.md ADDED
@@ -0,0 +1,9 @@
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ tags:
3
+ - autotrain
4
+ - text-generation
5
+ widget:
6
+ - text: "I love AutoTrain because "
7
+ ---
8
+
9
+ # Model Trained Using AutoTrain
adapter_config.json ADDED
@@ -0,0 +1,21 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "auto_mapping": null,
3
+ "base_model_name_or_path": "marianbasti/Llama-2-13b-fp16-alpaca-spanish",
4
+ "bias": "none",
5
+ "fan_in_fan_out": false,
6
+ "inference_mode": true,
7
+ "init_lora_weights": true,
8
+ "layers_pattern": null,
9
+ "layers_to_transform": null,
10
+ "lora_alpha": 32,
11
+ "lora_dropout": 0.05,
12
+ "modules_to_save": null,
13
+ "peft_type": "LORA",
14
+ "r": 16,
15
+ "revision": null,
16
+ "target_modules": [
17
+ "q_proj",
18
+ "v_proj"
19
+ ],
20
+ "task_type": "CAUSAL_LM"
21
+ }
adapter_model.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:dce189b34d89fbf9392d4698ff51ced25615abb2e403a146a0c54234af865427
3
+ size 52450328
added_tokens.json ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ {
2
+ "<PAD>": 32000
3
+ }
checkpoint-3340/README.md ADDED
@@ -0,0 +1,34 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ library_name: peft
3
+ ---
4
+ ## Training procedure
5
+
6
+
7
+ The following `bitsandbytes` quantization config was used during training:
8
+ - quant_method: bitsandbytes
9
+ - load_in_8bit: False
10
+ - load_in_4bit: True
11
+ - llm_int8_threshold: 6.0
12
+ - llm_int8_skip_modules: None
13
+ - llm_int8_enable_fp32_cpu_offload: False
14
+ - llm_int8_has_fp16_weight: False
15
+ - bnb_4bit_quant_type: nf4
16
+ - bnb_4bit_use_double_quant: False
17
+ - bnb_4bit_compute_dtype: float16
18
+
19
+ The following `bitsandbytes` quantization config was used during training:
20
+ - quant_method: bitsandbytes
21
+ - load_in_8bit: False
22
+ - load_in_4bit: True
23
+ - llm_int8_threshold: 6.0
24
+ - llm_int8_skip_modules: None
25
+ - llm_int8_enable_fp32_cpu_offload: False
26
+ - llm_int8_has_fp16_weight: False
27
+ - bnb_4bit_quant_type: nf4
28
+ - bnb_4bit_use_double_quant: False
29
+ - bnb_4bit_compute_dtype: float16
30
+ ### Framework versions
31
+
32
+ - PEFT 0.5.0
33
+
34
+ - PEFT 0.5.0
checkpoint-3340/adapter_config.json ADDED
@@ -0,0 +1,21 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "auto_mapping": null,
3
+ "base_model_name_or_path": "marianbasti/Llama-2-13b-fp16-alpaca-spanish",
4
+ "bias": "none",
5
+ "fan_in_fan_out": false,
6
+ "inference_mode": true,
7
+ "init_lora_weights": true,
8
+ "layers_pattern": null,
9
+ "layers_to_transform": null,
10
+ "lora_alpha": 32,
11
+ "lora_dropout": 0.05,
12
+ "modules_to_save": null,
13
+ "peft_type": "LORA",
14
+ "r": 16,
15
+ "revision": null,
16
+ "target_modules": [
17
+ "q_proj",
18
+ "v_proj"
19
+ ],
20
+ "task_type": "CAUSAL_LM"
21
+ }
checkpoint-3340/adapter_model.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:5eadb286b59879fe0474cdb3163a47aa2e5b8e95c91e6abd7c7483efd24647f5
3
+ size 52486922
checkpoint-3340/adapter_model.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:dce189b34d89fbf9392d4698ff51ced25615abb2e403a146a0c54234af865427
3
+ size 52450328
checkpoint-3340/added_tokens.json ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ {
2
+ "<PAD>": 32000
3
+ }
checkpoint-3340/optimizer.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:9f4a464fa2ce0ce023162e5c90e3149235b98125e471308e6fc52556ea97ced0
3
+ size 104992698
checkpoint-3340/pytorch_model.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:049c26b844b79121ddd8379f7f69194e63f6fbf6aa007eeac0c66f17eebb8893
3
+ size 888
checkpoint-3340/rng_state_0.pth ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:a45b5200eb5d1e3874ca13fda4d09590b1326ad1cc1c68cfa0836a1be3e07272
3
+ size 15984
checkpoint-3340/rng_state_1.pth ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:d88eaa9e286c62e50fa0dd353c03997f7476f78cabd601f8cb4de46d0905d327
3
+ size 15984
checkpoint-3340/rng_state_2.pth ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:c9029d48e8b2a739e44642e8ae8a48b967a298e8b030f01f60247690243324f0
3
+ size 15984
checkpoint-3340/rng_state_3.pth ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:08fb799831880a2159454e9f071bd1bbecbbb981d65bc0414d0cbfb341c27f82
3
+ size 15984
checkpoint-3340/rng_state_4.pth ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:8ac4cccc5c13a2d5800af39c5919aed6333c38268c9e2d26b2a18051d4124267
3
+ size 15984
checkpoint-3340/rng_state_5.pth ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:b6820a8cf5d7180fe33e6819a43446d15dd9cd975f38d542ab86d662e2c11f24
3
+ size 15984
checkpoint-3340/rng_state_6.pth ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:4441442c9da6aceb195971c05849aaffa22571fec062e0a85a47c8ec7dd25be3
3
+ size 15984
checkpoint-3340/rng_state_7.pth ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:955fdc02966583437ee2c9b35f773e75116b72583c7344578a3299901051990b
3
+ size 15984
checkpoint-3340/scheduler.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:7bd17184dc42eb074641c289b765f6e8d763b78481bf17046d86f587e908fa76
3
+ size 1064
checkpoint-3340/special_tokens_map.json ADDED
@@ -0,0 +1,30 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "bos_token": {
3
+ "content": "<s>",
4
+ "lstrip": false,
5
+ "normalized": true,
6
+ "rstrip": false,
7
+ "single_word": false
8
+ },
9
+ "eos_token": {
10
+ "content": "</s>",
11
+ "lstrip": false,
12
+ "normalized": true,
13
+ "rstrip": false,
14
+ "single_word": false
15
+ },
16
+ "pad_token": {
17
+ "content": "<PAD>",
18
+ "lstrip": false,
19
+ "normalized": false,
20
+ "rstrip": false,
21
+ "single_word": false
22
+ },
23
+ "unk_token": {
24
+ "content": "<unk>",
25
+ "lstrip": false,
26
+ "normalized": true,
27
+ "rstrip": false,
28
+ "single_word": false
29
+ }
30
+ }
checkpoint-3340/tokenizer.json ADDED
The diff for this file is too large to render. See raw diff
 
checkpoint-3340/tokenizer.model ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:9e556afd44213b6bd1be2b850ebbbd98f5481437a8021afaf58ee7fb1818d347
3
+ size 499723
checkpoint-3340/tokenizer_config.json ADDED
@@ -0,0 +1,49 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "add_bos_token": true,
3
+ "add_eos_token": false,
4
+ "added_tokens_decoder": {
5
+ "0": {
6
+ "content": "<unk>",
7
+ "lstrip": false,
8
+ "normalized": true,
9
+ "rstrip": false,
10
+ "single_word": false,
11
+ "special": true
12
+ },
13
+ "1": {
14
+ "content": "<s>",
15
+ "lstrip": false,
16
+ "normalized": true,
17
+ "rstrip": false,
18
+ "single_word": false,
19
+ "special": true
20
+ },
21
+ "2": {
22
+ "content": "</s>",
23
+ "lstrip": false,
24
+ "normalized": true,
25
+ "rstrip": false,
26
+ "single_word": false,
27
+ "special": true
28
+ },
29
+ "32000": {
30
+ "content": "<PAD>",
31
+ "lstrip": false,
32
+ "normalized": false,
33
+ "rstrip": false,
34
+ "single_word": false,
35
+ "special": true
36
+ }
37
+ },
38
+ "bos_token": "<s>",
39
+ "clean_up_tokenization_spaces": false,
40
+ "eos_token": "</s>",
41
+ "legacy": true,
42
+ "model_max_length": 1024,
43
+ "pad_token": "<PAD>",
44
+ "sp_model_kwargs": {},
45
+ "spaces_between_special_tokens": false,
46
+ "tokenizer_class": "LlamaTokenizer",
47
+ "unk_token": "<unk>",
48
+ "use_default_system_prompt": false
49
+ }
checkpoint-3340/trainer_state.json ADDED
@@ -0,0 +1,1021 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "best_metric": null,
3
+ "best_model_checkpoint": null,
4
+ "epoch": 3.9964104098115465,
5
+ "eval_steps": 500,
6
+ "global_step": 3340,
7
+ "is_hyper_param_search": false,
8
+ "is_local_process_zero": true,
9
+ "is_world_process_zero": true,
10
+ "log_history": [
11
+ {
12
+ "epoch": 0.02,
13
+ "learning_rate": 1.1976047904191617e-05,
14
+ "loss": 1.194,
15
+ "step": 20
16
+ },
17
+ {
18
+ "epoch": 0.05,
19
+ "learning_rate": 2.3952095808383234e-05,
20
+ "loss": 1.0484,
21
+ "step": 40
22
+ },
23
+ {
24
+ "epoch": 0.07,
25
+ "learning_rate": 3.592814371257485e-05,
26
+ "loss": 0.8359,
27
+ "step": 60
28
+ },
29
+ {
30
+ "epoch": 0.1,
31
+ "learning_rate": 4.790419161676647e-05,
32
+ "loss": 0.6286,
33
+ "step": 80
34
+ },
35
+ {
36
+ "epoch": 0.12,
37
+ "learning_rate": 5.988023952095808e-05,
38
+ "loss": 0.4686,
39
+ "step": 100
40
+ },
41
+ {
42
+ "epoch": 0.14,
43
+ "learning_rate": 7.18562874251497e-05,
44
+ "loss": 0.3526,
45
+ "step": 120
46
+ },
47
+ {
48
+ "epoch": 0.17,
49
+ "learning_rate": 8.383233532934131e-05,
50
+ "loss": 0.2483,
51
+ "step": 140
52
+ },
53
+ {
54
+ "epoch": 0.19,
55
+ "learning_rate": 9.580838323353294e-05,
56
+ "loss": 0.1862,
57
+ "step": 160
58
+ },
59
+ {
60
+ "epoch": 0.22,
61
+ "learning_rate": 0.00010778443113772456,
62
+ "loss": 0.1524,
63
+ "step": 180
64
+ },
65
+ {
66
+ "epoch": 0.24,
67
+ "learning_rate": 0.00011976047904191617,
68
+ "loss": 0.1389,
69
+ "step": 200
70
+ },
71
+ {
72
+ "epoch": 0.26,
73
+ "learning_rate": 0.0001317365269461078,
74
+ "loss": 0.1361,
75
+ "step": 220
76
+ },
77
+ {
78
+ "epoch": 0.29,
79
+ "learning_rate": 0.0001437125748502994,
80
+ "loss": 0.1286,
81
+ "step": 240
82
+ },
83
+ {
84
+ "epoch": 0.31,
85
+ "learning_rate": 0.00015568862275449103,
86
+ "loss": 0.1239,
87
+ "step": 260
88
+ },
89
+ {
90
+ "epoch": 0.34,
91
+ "learning_rate": 0.00016766467065868263,
92
+ "loss": 0.1205,
93
+ "step": 280
94
+ },
95
+ {
96
+ "epoch": 0.36,
97
+ "learning_rate": 0.00017964071856287425,
98
+ "loss": 0.1206,
99
+ "step": 300
100
+ },
101
+ {
102
+ "epoch": 0.38,
103
+ "learning_rate": 0.00019161676646706587,
104
+ "loss": 0.1166,
105
+ "step": 320
106
+ },
107
+ {
108
+ "epoch": 0.41,
109
+ "learning_rate": 0.0001996007984031936,
110
+ "loss": 0.1174,
111
+ "step": 340
112
+ },
113
+ {
114
+ "epoch": 0.43,
115
+ "learning_rate": 0.000198270126413839,
116
+ "loss": 0.115,
117
+ "step": 360
118
+ },
119
+ {
120
+ "epoch": 0.45,
121
+ "learning_rate": 0.00019693945442448438,
122
+ "loss": 0.1158,
123
+ "step": 380
124
+ },
125
+ {
126
+ "epoch": 0.48,
127
+ "learning_rate": 0.00019560878243512974,
128
+ "loss": 0.1157,
129
+ "step": 400
130
+ },
131
+ {
132
+ "epoch": 0.5,
133
+ "learning_rate": 0.00019427811044577512,
134
+ "loss": 0.114,
135
+ "step": 420
136
+ },
137
+ {
138
+ "epoch": 0.53,
139
+ "learning_rate": 0.00019294743845642048,
140
+ "loss": 0.1115,
141
+ "step": 440
142
+ },
143
+ {
144
+ "epoch": 0.55,
145
+ "learning_rate": 0.00019161676646706587,
146
+ "loss": 0.1152,
147
+ "step": 460
148
+ },
149
+ {
150
+ "epoch": 0.57,
151
+ "learning_rate": 0.00019028609447771126,
152
+ "loss": 0.1107,
153
+ "step": 480
154
+ },
155
+ {
156
+ "epoch": 0.6,
157
+ "learning_rate": 0.00018895542248835662,
158
+ "loss": 0.1117,
159
+ "step": 500
160
+ },
161
+ {
162
+ "epoch": 0.62,
163
+ "learning_rate": 0.000187624750499002,
164
+ "loss": 0.1096,
165
+ "step": 520
166
+ },
167
+ {
168
+ "epoch": 0.65,
169
+ "learning_rate": 0.00018629407850964737,
170
+ "loss": 0.1108,
171
+ "step": 540
172
+ },
173
+ {
174
+ "epoch": 0.67,
175
+ "learning_rate": 0.00018496340652029275,
176
+ "loss": 0.1103,
177
+ "step": 560
178
+ },
179
+ {
180
+ "epoch": 0.69,
181
+ "learning_rate": 0.00018363273453093811,
182
+ "loss": 0.106,
183
+ "step": 580
184
+ },
185
+ {
186
+ "epoch": 0.72,
187
+ "learning_rate": 0.0001823020625415835,
188
+ "loss": 0.1066,
189
+ "step": 600
190
+ },
191
+ {
192
+ "epoch": 0.74,
193
+ "learning_rate": 0.0001809713905522289,
194
+ "loss": 0.1078,
195
+ "step": 620
196
+ },
197
+ {
198
+ "epoch": 0.77,
199
+ "learning_rate": 0.00017964071856287425,
200
+ "loss": 0.1074,
201
+ "step": 640
202
+ },
203
+ {
204
+ "epoch": 0.79,
205
+ "learning_rate": 0.00017831004657351964,
206
+ "loss": 0.1047,
207
+ "step": 660
208
+ },
209
+ {
210
+ "epoch": 0.81,
211
+ "learning_rate": 0.000176979374584165,
212
+ "loss": 0.1051,
213
+ "step": 680
214
+ },
215
+ {
216
+ "epoch": 0.84,
217
+ "learning_rate": 0.00017564870259481038,
218
+ "loss": 0.1052,
219
+ "step": 700
220
+ },
221
+ {
222
+ "epoch": 0.86,
223
+ "learning_rate": 0.00017431803060545577,
224
+ "loss": 0.1068,
225
+ "step": 720
226
+ },
227
+ {
228
+ "epoch": 0.89,
229
+ "learning_rate": 0.00017298735861610113,
230
+ "loss": 0.1081,
231
+ "step": 740
232
+ },
233
+ {
234
+ "epoch": 0.91,
235
+ "learning_rate": 0.00017165668662674652,
236
+ "loss": 0.1069,
237
+ "step": 760
238
+ },
239
+ {
240
+ "epoch": 0.93,
241
+ "learning_rate": 0.00017032601463739188,
242
+ "loss": 0.1072,
243
+ "step": 780
244
+ },
245
+ {
246
+ "epoch": 0.96,
247
+ "learning_rate": 0.00016899534264803727,
248
+ "loss": 0.1057,
249
+ "step": 800
250
+ },
251
+ {
252
+ "epoch": 0.98,
253
+ "learning_rate": 0.00016766467065868263,
254
+ "loss": 0.1057,
255
+ "step": 820
256
+ },
257
+ {
258
+ "epoch": 1.01,
259
+ "learning_rate": 0.00016633399866932801,
260
+ "loss": 0.1072,
261
+ "step": 840
262
+ },
263
+ {
264
+ "epoch": 1.03,
265
+ "learning_rate": 0.0001650033266799734,
266
+ "loss": 0.1054,
267
+ "step": 860
268
+ },
269
+ {
270
+ "epoch": 1.05,
271
+ "learning_rate": 0.00016367265469061876,
272
+ "loss": 0.1064,
273
+ "step": 880
274
+ },
275
+ {
276
+ "epoch": 1.08,
277
+ "learning_rate": 0.00016234198270126415,
278
+ "loss": 0.1069,
279
+ "step": 900
280
+ },
281
+ {
282
+ "epoch": 1.1,
283
+ "learning_rate": 0.0001610113107119095,
284
+ "loss": 0.1062,
285
+ "step": 920
286
+ },
287
+ {
288
+ "epoch": 1.12,
289
+ "learning_rate": 0.0001596806387225549,
290
+ "loss": 0.1033,
291
+ "step": 940
292
+ },
293
+ {
294
+ "epoch": 1.15,
295
+ "learning_rate": 0.00015834996673320028,
296
+ "loss": 0.1022,
297
+ "step": 960
298
+ },
299
+ {
300
+ "epoch": 1.17,
301
+ "learning_rate": 0.00015701929474384565,
302
+ "loss": 0.1023,
303
+ "step": 980
304
+ },
305
+ {
306
+ "epoch": 1.2,
307
+ "learning_rate": 0.00015568862275449103,
308
+ "loss": 0.1024,
309
+ "step": 1000
310
+ },
311
+ {
312
+ "epoch": 1.22,
313
+ "learning_rate": 0.0001543579507651364,
314
+ "loss": 0.1008,
315
+ "step": 1020
316
+ },
317
+ {
318
+ "epoch": 1.24,
319
+ "learning_rate": 0.00015302727877578178,
320
+ "loss": 0.1011,
321
+ "step": 1040
322
+ },
323
+ {
324
+ "epoch": 1.27,
325
+ "learning_rate": 0.00015169660678642714,
326
+ "loss": 0.1035,
327
+ "step": 1060
328
+ },
329
+ {
330
+ "epoch": 1.29,
331
+ "learning_rate": 0.00015036593479707253,
332
+ "loss": 0.1012,
333
+ "step": 1080
334
+ },
335
+ {
336
+ "epoch": 1.32,
337
+ "learning_rate": 0.00014903526280771792,
338
+ "loss": 0.1004,
339
+ "step": 1100
340
+ },
341
+ {
342
+ "epoch": 1.34,
343
+ "learning_rate": 0.00014770459081836328,
344
+ "loss": 0.0996,
345
+ "step": 1120
346
+ },
347
+ {
348
+ "epoch": 1.36,
349
+ "learning_rate": 0.00014637391882900866,
350
+ "loss": 0.102,
351
+ "step": 1140
352
+ },
353
+ {
354
+ "epoch": 1.39,
355
+ "learning_rate": 0.00014504324683965402,
356
+ "loss": 0.0976,
357
+ "step": 1160
358
+ },
359
+ {
360
+ "epoch": 1.41,
361
+ "learning_rate": 0.0001437125748502994,
362
+ "loss": 0.1024,
363
+ "step": 1180
364
+ },
365
+ {
366
+ "epoch": 1.44,
367
+ "learning_rate": 0.0001423819028609448,
368
+ "loss": 0.1006,
369
+ "step": 1200
370
+ },
371
+ {
372
+ "epoch": 1.46,
373
+ "learning_rate": 0.00014105123087159016,
374
+ "loss": 0.101,
375
+ "step": 1220
376
+ },
377
+ {
378
+ "epoch": 1.48,
379
+ "learning_rate": 0.00013972055888223555,
380
+ "loss": 0.102,
381
+ "step": 1240
382
+ },
383
+ {
384
+ "epoch": 1.51,
385
+ "learning_rate": 0.0001383898868928809,
386
+ "loss": 0.1018,
387
+ "step": 1260
388
+ },
389
+ {
390
+ "epoch": 1.53,
391
+ "learning_rate": 0.0001370592149035263,
392
+ "loss": 0.0999,
393
+ "step": 1280
394
+ },
395
+ {
396
+ "epoch": 1.56,
397
+ "learning_rate": 0.00013572854291417165,
398
+ "loss": 0.1021,
399
+ "step": 1300
400
+ },
401
+ {
402
+ "epoch": 1.58,
403
+ "learning_rate": 0.00013439787092481704,
404
+ "loss": 0.0996,
405
+ "step": 1320
406
+ },
407
+ {
408
+ "epoch": 1.6,
409
+ "learning_rate": 0.00013306719893546243,
410
+ "loss": 0.1005,
411
+ "step": 1340
412
+ },
413
+ {
414
+ "epoch": 1.63,
415
+ "learning_rate": 0.0001317365269461078,
416
+ "loss": 0.0993,
417
+ "step": 1360
418
+ },
419
+ {
420
+ "epoch": 1.65,
421
+ "learning_rate": 0.00013040585495675318,
422
+ "loss": 0.0999,
423
+ "step": 1380
424
+ },
425
+ {
426
+ "epoch": 1.68,
427
+ "learning_rate": 0.00012907518296739854,
428
+ "loss": 0.1011,
429
+ "step": 1400
430
+ },
431
+ {
432
+ "epoch": 1.7,
433
+ "learning_rate": 0.00012774451097804392,
434
+ "loss": 0.0968,
435
+ "step": 1420
436
+ },
437
+ {
438
+ "epoch": 1.72,
439
+ "learning_rate": 0.0001264138389886893,
440
+ "loss": 0.0983,
441
+ "step": 1440
442
+ },
443
+ {
444
+ "epoch": 1.75,
445
+ "learning_rate": 0.00012508316699933467,
446
+ "loss": 0.0987,
447
+ "step": 1460
448
+ },
449
+ {
450
+ "epoch": 1.77,
451
+ "learning_rate": 0.00012375249500998006,
452
+ "loss": 0.0984,
453
+ "step": 1480
454
+ },
455
+ {
456
+ "epoch": 1.79,
457
+ "learning_rate": 0.00012242182302062542,
458
+ "loss": 0.0956,
459
+ "step": 1500
460
+ },
461
+ {
462
+ "epoch": 1.82,
463
+ "learning_rate": 0.0001210911510312708,
464
+ "loss": 0.0971,
465
+ "step": 1520
466
+ },
467
+ {
468
+ "epoch": 1.84,
469
+ "learning_rate": 0.00011976047904191617,
470
+ "loss": 0.0986,
471
+ "step": 1540
472
+ },
473
+ {
474
+ "epoch": 1.87,
475
+ "learning_rate": 0.00011842980705256155,
476
+ "loss": 0.0995,
477
+ "step": 1560
478
+ },
479
+ {
480
+ "epoch": 1.89,
481
+ "learning_rate": 0.00011709913506320693,
482
+ "loss": 0.0989,
483
+ "step": 1580
484
+ },
485
+ {
486
+ "epoch": 1.91,
487
+ "learning_rate": 0.0001157684630738523,
488
+ "loss": 0.1006,
489
+ "step": 1600
490
+ },
491
+ {
492
+ "epoch": 1.94,
493
+ "learning_rate": 0.00011443779108449768,
494
+ "loss": 0.0982,
495
+ "step": 1620
496
+ },
497
+ {
498
+ "epoch": 1.96,
499
+ "learning_rate": 0.00011310711909514305,
500
+ "loss": 0.0982,
501
+ "step": 1640
502
+ },
503
+ {
504
+ "epoch": 1.99,
505
+ "learning_rate": 0.00011177644710578842,
506
+ "loss": 0.0998,
507
+ "step": 1660
508
+ },
509
+ {
510
+ "epoch": 2.01,
511
+ "learning_rate": 0.00011044577511643381,
512
+ "loss": 0.0986,
513
+ "step": 1680
514
+ },
515
+ {
516
+ "epoch": 2.03,
517
+ "learning_rate": 0.00010911510312707917,
518
+ "loss": 0.0996,
519
+ "step": 1700
520
+ },
521
+ {
522
+ "epoch": 2.06,
523
+ "learning_rate": 0.00010778443113772456,
524
+ "loss": 0.0994,
525
+ "step": 1720
526
+ },
527
+ {
528
+ "epoch": 2.08,
529
+ "learning_rate": 0.00010645375914836992,
530
+ "loss": 0.0999,
531
+ "step": 1740
532
+ },
533
+ {
534
+ "epoch": 2.11,
535
+ "learning_rate": 0.0001051230871590153,
536
+ "loss": 0.1006,
537
+ "step": 1760
538
+ },
539
+ {
540
+ "epoch": 2.13,
541
+ "learning_rate": 0.00010379241516966068,
542
+ "loss": 0.0954,
543
+ "step": 1780
544
+ },
545
+ {
546
+ "epoch": 2.15,
547
+ "learning_rate": 0.00010246174318030605,
548
+ "loss": 0.0958,
549
+ "step": 1800
550
+ },
551
+ {
552
+ "epoch": 2.18,
553
+ "learning_rate": 0.00010113107119095144,
554
+ "loss": 0.0969,
555
+ "step": 1820
556
+ },
557
+ {
558
+ "epoch": 2.2,
559
+ "learning_rate": 9.98003992015968e-05,
560
+ "loss": 0.0949,
561
+ "step": 1840
562
+ },
563
+ {
564
+ "epoch": 2.23,
565
+ "learning_rate": 9.846972721224219e-05,
566
+ "loss": 0.0957,
567
+ "step": 1860
568
+ },
569
+ {
570
+ "epoch": 2.25,
571
+ "learning_rate": 9.713905522288756e-05,
572
+ "loss": 0.0951,
573
+ "step": 1880
574
+ },
575
+ {
576
+ "epoch": 2.27,
577
+ "learning_rate": 9.580838323353294e-05,
578
+ "loss": 0.0966,
579
+ "step": 1900
580
+ },
581
+ {
582
+ "epoch": 2.3,
583
+ "learning_rate": 9.447771124417831e-05,
584
+ "loss": 0.0946,
585
+ "step": 1920
586
+ },
587
+ {
588
+ "epoch": 2.32,
589
+ "learning_rate": 9.314703925482368e-05,
590
+ "loss": 0.0947,
591
+ "step": 1940
592
+ },
593
+ {
594
+ "epoch": 2.35,
595
+ "learning_rate": 9.181636726546906e-05,
596
+ "loss": 0.0947,
597
+ "step": 1960
598
+ },
599
+ {
600
+ "epoch": 2.37,
601
+ "learning_rate": 9.048569527611444e-05,
602
+ "loss": 0.0961,
603
+ "step": 1980
604
+ },
605
+ {
606
+ "epoch": 2.39,
607
+ "learning_rate": 8.915502328675982e-05,
608
+ "loss": 0.0926,
609
+ "step": 2000
610
+ },
611
+ {
612
+ "epoch": 2.42,
613
+ "learning_rate": 8.782435129740519e-05,
614
+ "loss": 0.0972,
615
+ "step": 2020
616
+ },
617
+ {
618
+ "epoch": 2.44,
619
+ "learning_rate": 8.649367930805057e-05,
620
+ "loss": 0.095,
621
+ "step": 2040
622
+ },
623
+ {
624
+ "epoch": 2.46,
625
+ "learning_rate": 8.516300731869594e-05,
626
+ "loss": 0.0969,
627
+ "step": 2060
628
+ },
629
+ {
630
+ "epoch": 2.49,
631
+ "learning_rate": 8.383233532934131e-05,
632
+ "loss": 0.0964,
633
+ "step": 2080
634
+ },
635
+ {
636
+ "epoch": 2.51,
637
+ "learning_rate": 8.25016633399867e-05,
638
+ "loss": 0.0944,
639
+ "step": 2100
640
+ },
641
+ {
642
+ "epoch": 2.54,
643
+ "learning_rate": 8.117099135063207e-05,
644
+ "loss": 0.097,
645
+ "step": 2120
646
+ },
647
+ {
648
+ "epoch": 2.56,
649
+ "learning_rate": 7.984031936127745e-05,
650
+ "loss": 0.0955,
651
+ "step": 2140
652
+ },
653
+ {
654
+ "epoch": 2.58,
655
+ "learning_rate": 7.850964737192282e-05,
656
+ "loss": 0.0957,
657
+ "step": 2160
658
+ },
659
+ {
660
+ "epoch": 2.61,
661
+ "learning_rate": 7.71789753825682e-05,
662
+ "loss": 0.095,
663
+ "step": 2180
664
+ },
665
+ {
666
+ "epoch": 2.63,
667
+ "learning_rate": 7.584830339321357e-05,
668
+ "loss": 0.0941,
669
+ "step": 2200
670
+ },
671
+ {
672
+ "epoch": 2.66,
673
+ "learning_rate": 7.451763140385896e-05,
674
+ "loss": 0.096,
675
+ "step": 2220
676
+ },
677
+ {
678
+ "epoch": 2.68,
679
+ "learning_rate": 7.318695941450433e-05,
680
+ "loss": 0.0942,
681
+ "step": 2240
682
+ },
683
+ {
684
+ "epoch": 2.7,
685
+ "learning_rate": 7.18562874251497e-05,
686
+ "loss": 0.0936,
687
+ "step": 2260
688
+ },
689
+ {
690
+ "epoch": 2.73,
691
+ "learning_rate": 7.052561543579508e-05,
692
+ "loss": 0.0928,
693
+ "step": 2280
694
+ },
695
+ {
696
+ "epoch": 2.75,
697
+ "learning_rate": 6.919494344644045e-05,
698
+ "loss": 0.0948,
699
+ "step": 2300
700
+ },
701
+ {
702
+ "epoch": 2.78,
703
+ "learning_rate": 6.786427145708583e-05,
704
+ "loss": 0.0937,
705
+ "step": 2320
706
+ },
707
+ {
708
+ "epoch": 2.8,
709
+ "learning_rate": 6.653359946773121e-05,
710
+ "loss": 0.0902,
711
+ "step": 2340
712
+ },
713
+ {
714
+ "epoch": 2.82,
715
+ "learning_rate": 6.520292747837659e-05,
716
+ "loss": 0.093,
717
+ "step": 2360
718
+ },
719
+ {
720
+ "epoch": 2.85,
721
+ "learning_rate": 6.387225548902196e-05,
722
+ "loss": 0.0936,
723
+ "step": 2380
724
+ },
725
+ {
726
+ "epoch": 2.87,
727
+ "learning_rate": 6.254158349966734e-05,
728
+ "loss": 0.0958,
729
+ "step": 2400
730
+ },
731
+ {
732
+ "epoch": 2.9,
733
+ "learning_rate": 6.121091151031271e-05,
734
+ "loss": 0.0948,
735
+ "step": 2420
736
+ },
737
+ {
738
+ "epoch": 2.92,
739
+ "learning_rate": 5.988023952095808e-05,
740
+ "loss": 0.0965,
741
+ "step": 2440
742
+ },
743
+ {
744
+ "epoch": 2.94,
745
+ "learning_rate": 5.8549567531603464e-05,
746
+ "loss": 0.0932,
747
+ "step": 2460
748
+ },
749
+ {
750
+ "epoch": 2.97,
751
+ "learning_rate": 5.721889554224884e-05,
752
+ "loss": 0.0941,
753
+ "step": 2480
754
+ },
755
+ {
756
+ "epoch": 2.99,
757
+ "learning_rate": 5.588822355289421e-05,
758
+ "loss": 0.0957,
759
+ "step": 2500
760
+ },
761
+ {
762
+ "epoch": 3.02,
763
+ "learning_rate": 5.4557551563539585e-05,
764
+ "loss": 0.0943,
765
+ "step": 2520
766
+ },
767
+ {
768
+ "epoch": 3.04,
769
+ "learning_rate": 5.322687957418496e-05,
770
+ "loss": 0.0954,
771
+ "step": 2540
772
+ },
773
+ {
774
+ "epoch": 3.06,
775
+ "learning_rate": 5.189620758483034e-05,
776
+ "loss": 0.095,
777
+ "step": 2560
778
+ },
779
+ {
780
+ "epoch": 3.09,
781
+ "learning_rate": 5.056553559547572e-05,
782
+ "loss": 0.096,
783
+ "step": 2580
784
+ },
785
+ {
786
+ "epoch": 3.11,
787
+ "learning_rate": 4.9234863606121094e-05,
788
+ "loss": 0.0954,
789
+ "step": 2600
790
+ },
791
+ {
792
+ "epoch": 3.13,
793
+ "learning_rate": 4.790419161676647e-05,
794
+ "loss": 0.0913,
795
+ "step": 2620
796
+ },
797
+ {
798
+ "epoch": 3.16,
799
+ "learning_rate": 4.657351962741184e-05,
800
+ "loss": 0.092,
801
+ "step": 2640
802
+ },
803
+ {
804
+ "epoch": 3.18,
805
+ "learning_rate": 4.524284763805722e-05,
806
+ "loss": 0.0933,
807
+ "step": 2660
808
+ },
809
+ {
810
+ "epoch": 3.21,
811
+ "learning_rate": 4.3912175648702596e-05,
812
+ "loss": 0.0915,
813
+ "step": 2680
814
+ },
815
+ {
816
+ "epoch": 3.23,
817
+ "learning_rate": 4.258150365934797e-05,
818
+ "loss": 0.0903,
819
+ "step": 2700
820
+ },
821
+ {
822
+ "epoch": 3.25,
823
+ "learning_rate": 4.125083166999335e-05,
824
+ "loss": 0.0924,
825
+ "step": 2720
826
+ },
827
+ {
828
+ "epoch": 3.28,
829
+ "learning_rate": 3.9920159680638724e-05,
830
+ "loss": 0.0926,
831
+ "step": 2740
832
+ },
833
+ {
834
+ "epoch": 3.3,
835
+ "learning_rate": 3.85894876912841e-05,
836
+ "loss": 0.0923,
837
+ "step": 2760
838
+ },
839
+ {
840
+ "epoch": 3.33,
841
+ "learning_rate": 3.725881570192948e-05,
842
+ "loss": 0.0899,
843
+ "step": 2780
844
+ },
845
+ {
846
+ "epoch": 3.35,
847
+ "learning_rate": 3.592814371257485e-05,
848
+ "loss": 0.0922,
849
+ "step": 2800
850
+ },
851
+ {
852
+ "epoch": 3.37,
853
+ "learning_rate": 3.4597471723220226e-05,
854
+ "loss": 0.0905,
855
+ "step": 2820
856
+ },
857
+ {
858
+ "epoch": 3.4,
859
+ "learning_rate": 3.326679973386561e-05,
860
+ "loss": 0.0897,
861
+ "step": 2840
862
+ },
863
+ {
864
+ "epoch": 3.42,
865
+ "learning_rate": 3.193612774451098e-05,
866
+ "loss": 0.0923,
867
+ "step": 2860
868
+ },
869
+ {
870
+ "epoch": 3.45,
871
+ "learning_rate": 3.0605455755156355e-05,
872
+ "loss": 0.0922,
873
+ "step": 2880
874
+ },
875
+ {
876
+ "epoch": 3.47,
877
+ "learning_rate": 2.9274783765801732e-05,
878
+ "loss": 0.0939,
879
+ "step": 2900
880
+ },
881
+ {
882
+ "epoch": 3.49,
883
+ "learning_rate": 2.7944111776447106e-05,
884
+ "loss": 0.0931,
885
+ "step": 2920
886
+ },
887
+ {
888
+ "epoch": 3.52,
889
+ "learning_rate": 2.661343978709248e-05,
890
+ "loss": 0.0898,
891
+ "step": 2940
892
+ },
893
+ {
894
+ "epoch": 3.54,
895
+ "learning_rate": 2.528276779773786e-05,
896
+ "loss": 0.0943,
897
+ "step": 2960
898
+ },
899
+ {
900
+ "epoch": 3.57,
901
+ "learning_rate": 2.3952095808383234e-05,
902
+ "loss": 0.0932,
903
+ "step": 2980
904
+ },
905
+ {
906
+ "epoch": 3.59,
907
+ "learning_rate": 2.2687957418496342e-05,
908
+ "loss": 0.0909,
909
+ "step": 3000
910
+ },
911
+ {
912
+ "epoch": 3.61,
913
+ "learning_rate": 2.135728542914172e-05,
914
+ "loss": 0.0926,
915
+ "step": 3020
916
+ },
917
+ {
918
+ "epoch": 3.64,
919
+ "learning_rate": 2.0026613439787093e-05,
920
+ "loss": 0.0902,
921
+ "step": 3040
922
+ },
923
+ {
924
+ "epoch": 3.66,
925
+ "learning_rate": 1.869594145043247e-05,
926
+ "loss": 0.0931,
927
+ "step": 3060
928
+ },
929
+ {
930
+ "epoch": 3.69,
931
+ "learning_rate": 1.7365269461077845e-05,
932
+ "loss": 0.0892,
933
+ "step": 3080
934
+ },
935
+ {
936
+ "epoch": 3.71,
937
+ "learning_rate": 1.603459747172322e-05,
938
+ "loss": 0.0912,
939
+ "step": 3100
940
+ },
941
+ {
942
+ "epoch": 3.73,
943
+ "learning_rate": 1.4703925482368597e-05,
944
+ "loss": 0.0897,
945
+ "step": 3120
946
+ },
947
+ {
948
+ "epoch": 3.76,
949
+ "learning_rate": 1.3373253493013973e-05,
950
+ "loss": 0.0912,
951
+ "step": 3140
952
+ },
953
+ {
954
+ "epoch": 3.78,
955
+ "learning_rate": 1.2042581503659348e-05,
956
+ "loss": 0.0918,
957
+ "step": 3160
958
+ },
959
+ {
960
+ "epoch": 3.8,
961
+ "learning_rate": 1.0711909514304724e-05,
962
+ "loss": 0.0861,
963
+ "step": 3180
964
+ },
965
+ {
966
+ "epoch": 3.83,
967
+ "learning_rate": 9.3812375249501e-06,
968
+ "loss": 0.091,
969
+ "step": 3200
970
+ },
971
+ {
972
+ "epoch": 3.85,
973
+ "learning_rate": 8.050565535595477e-06,
974
+ "loss": 0.0904,
975
+ "step": 3220
976
+ },
977
+ {
978
+ "epoch": 3.88,
979
+ "learning_rate": 6.719893546240852e-06,
980
+ "loss": 0.093,
981
+ "step": 3240
982
+ },
983
+ {
984
+ "epoch": 3.9,
985
+ "learning_rate": 5.3892215568862275e-06,
986
+ "loss": 0.0928,
987
+ "step": 3260
988
+ },
989
+ {
990
+ "epoch": 3.92,
991
+ "learning_rate": 4.058549567531603e-06,
992
+ "loss": 0.092,
993
+ "step": 3280
994
+ },
995
+ {
996
+ "epoch": 3.95,
997
+ "learning_rate": 2.7278775781769794e-06,
998
+ "loss": 0.0905,
999
+ "step": 3300
1000
+ },
1001
+ {
1002
+ "epoch": 3.97,
1003
+ "learning_rate": 1.3972055888223554e-06,
1004
+ "loss": 0.0913,
1005
+ "step": 3320
1006
+ },
1007
+ {
1008
+ "epoch": 4.0,
1009
+ "learning_rate": 6.65335994677312e-08,
1010
+ "loss": 0.0927,
1011
+ "step": 3340
1012
+ }
1013
+ ],
1014
+ "logging_steps": 20,
1015
+ "max_steps": 3340,
1016
+ "num_train_epochs": 4,
1017
+ "save_steps": 500,
1018
+ "total_flos": 1.6896316557662093e+19,
1019
+ "trial_name": null,
1020
+ "trial_params": null
1021
+ }
checkpoint-3340/training_args.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:6785de9e2a1661ee757764f6a2b91cb3f0ae7ea7ad5287ef1583b7684e89c61a
3
+ size 4536
runs/Dec08_16-11-26_globaly/events.out.tfevents.1702073498.globaly.10646.0 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:d89378dcd9dbb6a810097d00a00cda5a3bf41ccd66c1d94c6ffd42df50164a6b
3
+ size 31182
special_tokens_map.json ADDED
@@ -0,0 +1,30 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "bos_token": {
3
+ "content": "<s>",
4
+ "lstrip": false,
5
+ "normalized": true,
6
+ "rstrip": false,
7
+ "single_word": false
8
+ },
9
+ "eos_token": {
10
+ "content": "</s>",
11
+ "lstrip": false,
12
+ "normalized": true,
13
+ "rstrip": false,
14
+ "single_word": false
15
+ },
16
+ "pad_token": {
17
+ "content": "<PAD>",
18
+ "lstrip": false,
19
+ "normalized": false,
20
+ "rstrip": false,
21
+ "single_word": false
22
+ },
23
+ "unk_token": {
24
+ "content": "<unk>",
25
+ "lstrip": false,
26
+ "normalized": true,
27
+ "rstrip": false,
28
+ "single_word": false
29
+ }
30
+ }
tokenizer.json ADDED
The diff for this file is too large to render. See raw diff
 
tokenizer.model ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:9e556afd44213b6bd1be2b850ebbbd98f5481437a8021afaf58ee7fb1818d347
3
+ size 499723
tokenizer_config.json ADDED
@@ -0,0 +1,49 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "add_bos_token": true,
3
+ "add_eos_token": false,
4
+ "added_tokens_decoder": {
5
+ "0": {
6
+ "content": "<unk>",
7
+ "lstrip": false,
8
+ "normalized": true,
9
+ "rstrip": false,
10
+ "single_word": false,
11
+ "special": true
12
+ },
13
+ "1": {
14
+ "content": "<s>",
15
+ "lstrip": false,
16
+ "normalized": true,
17
+ "rstrip": false,
18
+ "single_word": false,
19
+ "special": true
20
+ },
21
+ "2": {
22
+ "content": "</s>",
23
+ "lstrip": false,
24
+ "normalized": true,
25
+ "rstrip": false,
26
+ "single_word": false,
27
+ "special": true
28
+ },
29
+ "32000": {
30
+ "content": "<PAD>",
31
+ "lstrip": false,
32
+ "normalized": false,
33
+ "rstrip": false,
34
+ "single_word": false,
35
+ "special": true
36
+ }
37
+ },
38
+ "bos_token": "<s>",
39
+ "clean_up_tokenization_spaces": false,
40
+ "eos_token": "</s>",
41
+ "legacy": true,
42
+ "model_max_length": 1024,
43
+ "pad_token": "<PAD>",
44
+ "sp_model_kwargs": {},
45
+ "spaces_between_special_tokens": false,
46
+ "tokenizer_class": "LlamaTokenizer",
47
+ "unk_token": "<unk>",
48
+ "use_default_system_prompt": false
49
+ }
training_args.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:6785de9e2a1661ee757764f6a2b91cb3f0ae7ea7ad5287ef1583b7684e89c61a
3
+ size 4536
training_params.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"model": "marianbasti/Llama-2-13b-fp16-alpaca-spanish", "data_path": "Globaly/class-165k", "project_name": "gl_autotrain", "train_split": "train", "valid_split": null, "text_column": "text", "rejected_text_column": "rejected", "lr": 0.0002, "epochs": 4, "batch_size": 2, "warmup_ratio": 0.1, "gradient_accumulation": 4, "optimizer": "adamw_torch", "scheduler": "linear", "weight_decay": 0.01, "max_grad_norm": 1.0, "seed": 42, "add_eos_token": false, "block_size": 1024, "use_peft": true, "lora_r": 16, "lora_alpha": 32, "lora_dropout": 0.05, "logging_steps": 20, "evaluation_strategy": "epoch", "save_total_limit": 1, "save_strategy": "epoch", "auto_find_batch_size": false, "fp16": true, "push_to_hub": true, "use_int8": false, "model_max_length": 1024, "repo_id": "Globaly/Globaly-1-es-classes-164k", "use_int4": true, "trainer": "default", "target_modules": null, "merge_adapter": false, "username": null, "use_flash_attention_2": false, "log": "tensorboard", "disable_gradient_checkpointing": false}