abcdabcd987 commited on
Commit
2f45d94
·
unverified ·
1 Parent(s): 551b27d
README.md CHANGED
@@ -1,3 +1,39 @@
1
  ---
2
  license: apache-2.0
 
 
 
 
 
 
 
 
 
 
 
3
  ---
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
  ---
2
  license: apache-2.0
3
+ base_model: meta-llama/Llama-2-7b-hf
4
+ datasets:
5
+ - GEM/viggo
6
+ language:
7
+ - en
8
+ pipeline_tag: text2text-generation
9
+ tags:
10
+ - punica
11
+ - llama-factory
12
+ - lora
13
+ - generated_from_trainer
14
  ---
15
+
16
+ * Base Model: [Llama-2-7b-hf](https://huggingface.co/meta-llama/Llama-2-7b-hf)
17
+ * LoRA target: `q_proj,k_proj,v_proj,o_proj,gate_proj,up_proj,down_proj`
18
+ * LoRA rank: 16
19
+
20
+ See <https://github.com/punica-ai/punica/tree/master/examples/finetune>
21
+
22
+
23
+ ### Training hyperparameters
24
+
25
+ The following hyperparameters were used during training:
26
+ - learning_rate: 5e-05
27
+ - train_batch_size: 32
28
+ - eval_batch_size: 8
29
+ - seed: 42
30
+ - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08
31
+ - lr_scheduler_type: cosine
32
+ - num_epochs: 4.0
33
+
34
+ ### Framework versions
35
+
36
+ - Transformers 4.34.1
37
+ - Pytorch 2.2.0.dev20230911+cu121
38
+ - Datasets 2.14.4
39
+ - Tokenizers 0.14.1
adapter_config.json ADDED
@@ -0,0 +1,28 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "alpha_pattern": {},
3
+ "auto_mapping": null,
4
+ "base_model_name_or_path": "meta-llama/Llama-2-7b-hf",
5
+ "bias": "none",
6
+ "fan_in_fan_out": false,
7
+ "inference_mode": true,
8
+ "init_lora_weights": true,
9
+ "layers_pattern": null,
10
+ "layers_to_transform": null,
11
+ "lora_alpha": 16.0,
12
+ "lora_dropout": 0.1,
13
+ "modules_to_save": null,
14
+ "peft_type": "LORA",
15
+ "r": 16,
16
+ "rank_pattern": {},
17
+ "revision": null,
18
+ "target_modules": [
19
+ "k_proj",
20
+ "q_proj",
21
+ "down_proj",
22
+ "up_proj",
23
+ "o_proj",
24
+ "gate_proj",
25
+ "v_proj"
26
+ ],
27
+ "task_type": "CAUSAL_LM"
28
+ }
adapter_model.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:a84fffc0cce7f7b82caceb8fe13a7a4d455001624f981701c0d90689a598ef98
3
+ size 160069834
all_results.json ADDED
@@ -0,0 +1,7 @@
 
 
 
 
 
 
 
 
1
+ {
2
+ "epoch": 4.0,
3
+ "train_loss": 0.7563483387231826,
4
+ "train_runtime": 1265.7157,
5
+ "train_samples_per_second": 16.127,
6
+ "train_steps_per_second": 0.506
7
+ }
special_tokens_map.json ADDED
@@ -0,0 +1,24 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "bos_token": {
3
+ "content": "<s>",
4
+ "lstrip": false,
5
+ "normalized": false,
6
+ "rstrip": false,
7
+ "single_word": false
8
+ },
9
+ "eos_token": {
10
+ "content": "</s>",
11
+ "lstrip": false,
12
+ "normalized": false,
13
+ "rstrip": false,
14
+ "single_word": false
15
+ },
16
+ "pad_token": "</s>",
17
+ "unk_token": {
18
+ "content": "<unk>",
19
+ "lstrip": false,
20
+ "normalized": false,
21
+ "rstrip": false,
22
+ "single_word": false
23
+ }
24
+ }
tokenizer.json ADDED
The diff for this file is too large to render. See raw diff
 
tokenizer.model ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:9e556afd44213b6bd1be2b850ebbbd98f5481437a8021afaf58ee7fb1818d347
3
+ size 499723
tokenizer_config.json ADDED
@@ -0,0 +1,40 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "added_tokens_decoder": {
3
+ "0": {
4
+ "content": "<unk>",
5
+ "lstrip": false,
6
+ "normalized": false,
7
+ "rstrip": false,
8
+ "single_word": false,
9
+ "special": true
10
+ },
11
+ "1": {
12
+ "content": "<s>",
13
+ "lstrip": false,
14
+ "normalized": false,
15
+ "rstrip": false,
16
+ "single_word": false,
17
+ "special": true
18
+ },
19
+ "2": {
20
+ "content": "</s>",
21
+ "lstrip": false,
22
+ "normalized": false,
23
+ "rstrip": false,
24
+ "single_word": false,
25
+ "special": true
26
+ }
27
+ },
28
+ "bos_token": "<s>",
29
+ "clean_up_tokenization_spaces": false,
30
+ "eos_token": "</s>",
31
+ "legacy": false,
32
+ "model_max_length": 1000000000000000019884624838656,
33
+ "pad_token": "</s>",
34
+ "padding_side": "right",
35
+ "sp_model_kwargs": {},
36
+ "split_special_tokens": false,
37
+ "tokenizer_class": "LlamaTokenizer",
38
+ "unk_token": "<unk>",
39
+ "use_default_system_prompt": true
40
+ }
train_results.json ADDED
@@ -0,0 +1,7 @@
 
 
 
 
 
 
 
 
1
+ {
2
+ "epoch": 4.0,
3
+ "train_loss": 0.7563483387231826,
4
+ "train_runtime": 1265.7157,
5
+ "train_samples_per_second": 16.127,
6
+ "train_steps_per_second": 0.506
7
+ }
trainer_log.jsonl ADDED
@@ -0,0 +1,65 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {"current_steps": 10, "total_steps": 640, "loss": 1.692, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 4.996988640512931e-05, "epoch": 0.06, "percentage": 1.56, "elapsed_time": "0:00:20", "remaining_time": "0:21:52"}
2
+ {"current_steps": 20, "total_steps": 640, "loss": 1.2657, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 4.987961816680492e-05, "epoch": 0.12, "percentage": 3.12, "elapsed_time": "0:00:41", "remaining_time": "0:21:15"}
3
+ {"current_steps": 30, "total_steps": 640, "loss": 1.055, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 4.972941274911953e-05, "epoch": 0.19, "percentage": 4.69, "elapsed_time": "0:01:01", "remaining_time": "0:20:42"}
4
+ {"current_steps": 40, "total_steps": 640, "loss": 0.9921, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 4.951963201008076e-05, "epoch": 0.25, "percentage": 6.25, "elapsed_time": "0:01:21", "remaining_time": "0:20:17"}
5
+ {"current_steps": 50, "total_steps": 640, "loss": 0.9406, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 4.9250781329863606e-05, "epoch": 0.31, "percentage": 7.81, "elapsed_time": "0:01:40", "remaining_time": "0:19:46"}
6
+ {"current_steps": 60, "total_steps": 640, "loss": 0.8524, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 4.892350839330522e-05, "epoch": 0.38, "percentage": 9.38, "elapsed_time": "0:02:00", "remaining_time": "0:19:25"}
7
+ {"current_steps": 70, "total_steps": 640, "loss": 0.8515, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 4.853860162957552e-05, "epoch": 0.44, "percentage": 10.94, "elapsed_time": "0:02:20", "remaining_time": "0:19:01"}
8
+ {"current_steps": 80, "total_steps": 640, "loss": 0.8268, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 4.8096988312782174e-05, "epoch": 0.5, "percentage": 12.5, "elapsed_time": "0:02:40", "remaining_time": "0:18:41"}
9
+ {"current_steps": 90, "total_steps": 640, "loss": 0.8694, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 4.759973232808609e-05, "epoch": 0.56, "percentage": 14.06, "elapsed_time": "0:02:59", "remaining_time": "0:18:17"}
10
+ {"current_steps": 100, "total_steps": 640, "loss": 0.8054, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 4.7048031608708876e-05, "epoch": 0.62, "percentage": 15.62, "elapsed_time": "0:03:18", "remaining_time": "0:17:53"}
11
+ {"current_steps": 110, "total_steps": 640, "loss": 0.7789, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 4.6443215250006806e-05, "epoch": 0.69, "percentage": 17.19, "elapsed_time": "0:03:38", "remaining_time": "0:17:32"}
12
+ {"current_steps": 120, "total_steps": 640, "loss": 0.8142, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 4.5786740307563636e-05, "epoch": 0.75, "percentage": 18.75, "elapsed_time": "0:03:58", "remaining_time": "0:17:12"}
13
+ {"current_steps": 130, "total_steps": 640, "loss": 0.7686, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 4.508018828701612e-05, "epoch": 0.81, "percentage": 20.31, "elapsed_time": "0:04:17", "remaining_time": "0:16:49"}
14
+ {"current_steps": 140, "total_steps": 640, "loss": 0.7904, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 4.4325261334068426e-05, "epoch": 0.88, "percentage": 21.88, "elapsed_time": "0:04:37", "remaining_time": "0:16:30"}
15
+ {"current_steps": 150, "total_steps": 640, "loss": 0.8099, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 4.352377813387398e-05, "epoch": 0.94, "percentage": 23.44, "elapsed_time": "0:04:57", "remaining_time": "0:16:11"}
16
+ {"current_steps": 160, "total_steps": 640, "loss": 0.8211, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 4.267766952966369e-05, "epoch": 1.0, "percentage": 25.0, "elapsed_time": "0:05:15", "remaining_time": "0:15:47"}
17
+ {"current_steps": 170, "total_steps": 640, "loss": 0.7577, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 4.178897387117546e-05, "epoch": 1.06, "percentage": 26.56, "elapsed_time": "0:05:34", "remaining_time": "0:15:26"}
18
+ {"current_steps": 180, "total_steps": 640, "loss": 0.778, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 4.085983210409114e-05, "epoch": 1.12, "percentage": 28.12, "elapsed_time": "0:05:54", "remaining_time": "0:15:07"}
19
+ {"current_steps": 190, "total_steps": 640, "loss": 0.752, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 3.9892482612310836e-05, "epoch": 1.19, "percentage": 29.69, "elapsed_time": "0:06:14", "remaining_time": "0:14:47"}
20
+ {"current_steps": 200, "total_steps": 640, "loss": 0.7247, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 3.888925582549006e-05, "epoch": 1.25, "percentage": 31.25, "elapsed_time": "0:06:34", "remaining_time": "0:14:27"}
21
+ {"current_steps": 210, "total_steps": 640, "loss": 0.7593, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 3.785256860483054e-05, "epoch": 1.31, "percentage": 32.81, "elapsed_time": "0:06:55", "remaining_time": "0:14:10"}
22
+ {"current_steps": 220, "total_steps": 640, "loss": 0.7217, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 3.678491842064995e-05, "epoch": 1.38, "percentage": 34.38, "elapsed_time": "0:07:14", "remaining_time": "0:13:49"}
23
+ {"current_steps": 230, "total_steps": 640, "loss": 0.7509, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 3.568887733575706e-05, "epoch": 1.44, "percentage": 35.94, "elapsed_time": "0:07:34", "remaining_time": "0:13:29"}
24
+ {"current_steps": 240, "total_steps": 640, "loss": 0.7367, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 3.456708580912725e-05, "epoch": 1.5, "percentage": 37.5, "elapsed_time": "0:07:54", "remaining_time": "0:13:11"}
25
+ {"current_steps": 250, "total_steps": 640, "loss": 0.7168, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 3.34222463348055e-05, "epoch": 1.56, "percentage": 39.06, "elapsed_time": "0:08:14", "remaining_time": "0:12:51"}
26
+ {"current_steps": 260, "total_steps": 640, "loss": 0.7567, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 3.225711693136156e-05, "epoch": 1.62, "percentage": 40.62, "elapsed_time": "0:08:34", "remaining_time": "0:12:31"}
27
+ {"current_steps": 270, "total_steps": 640, "loss": 0.7534, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 3.10745044975816e-05, "epoch": 1.69, "percentage": 42.19, "elapsed_time": "0:08:53", "remaining_time": "0:12:11"}
28
+ {"current_steps": 280, "total_steps": 640, "loss": 0.7681, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 2.9877258050403212e-05, "epoch": 1.75, "percentage": 43.75, "elapsed_time": "0:09:13", "remaining_time": "0:11:51"}
29
+ {"current_steps": 290, "total_steps": 640, "loss": 0.7094, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 2.8668261861384045e-05, "epoch": 1.81, "percentage": 45.31, "elapsed_time": "0:09:32", "remaining_time": "0:11:30"}
30
+ {"current_steps": 300, "total_steps": 640, "loss": 0.7504, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 2.7450428508239024e-05, "epoch": 1.88, "percentage": 46.88, "elapsed_time": "0:09:51", "remaining_time": "0:11:10"}
31
+ {"current_steps": 310, "total_steps": 640, "loss": 0.715, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 2.6226691858185454e-05, "epoch": 1.94, "percentage": 48.44, "elapsed_time": "0:10:11", "remaining_time": "0:10:51"}
32
+ {"current_steps": 320, "total_steps": 640, "loss": 0.7322, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 2.5e-05, "epoch": 2.0, "percentage": 50.0, "elapsed_time": "0:10:30", "remaining_time": "0:10:30"}
33
+ {"current_steps": 330, "total_steps": 640, "loss": 0.7124, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 2.3773308141814552e-05, "epoch": 2.06, "percentage": 51.56, "elapsed_time": "0:10:50", "remaining_time": "0:10:10"}
34
+ {"current_steps": 340, "total_steps": 640, "loss": 0.6626, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 2.2549571491760986e-05, "epoch": 2.12, "percentage": 53.12, "elapsed_time": "0:11:10", "remaining_time": "0:09:51"}
35
+ {"current_steps": 350, "total_steps": 640, "loss": 0.7094, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 2.1331738138615958e-05, "epoch": 2.19, "percentage": 54.69, "elapsed_time": "0:11:29", "remaining_time": "0:09:31"}
36
+ {"current_steps": 360, "total_steps": 640, "loss": 0.7211, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 2.0122741949596797e-05, "epoch": 2.25, "percentage": 56.25, "elapsed_time": "0:11:49", "remaining_time": "0:09:11"}
37
+ {"current_steps": 370, "total_steps": 640, "loss": 0.7176, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 1.8925495502418406e-05, "epoch": 2.31, "percentage": 57.81, "elapsed_time": "0:12:09", "remaining_time": "0:08:52"}
38
+ {"current_steps": 380, "total_steps": 640, "loss": 0.7019, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 1.7742883068638447e-05, "epoch": 2.38, "percentage": 59.38, "elapsed_time": "0:12:28", "remaining_time": "0:08:32"}
39
+ {"current_steps": 390, "total_steps": 640, "loss": 0.6754, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 1.65777536651945e-05, "epoch": 2.44, "percentage": 60.94, "elapsed_time": "0:12:48", "remaining_time": "0:08:12"}
40
+ {"current_steps": 400, "total_steps": 640, "loss": 0.6761, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 1.5432914190872757e-05, "epoch": 2.5, "percentage": 62.5, "elapsed_time": "0:13:08", "remaining_time": "0:07:53"}
41
+ {"current_steps": 410, "total_steps": 640, "loss": 0.6527, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 1.4311122664242954e-05, "epoch": 2.56, "percentage": 64.06, "elapsed_time": "0:13:29", "remaining_time": "0:07:33"}
42
+ {"current_steps": 420, "total_steps": 640, "loss": 0.678, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 1.3215081579350058e-05, "epoch": 2.62, "percentage": 65.62, "elapsed_time": "0:13:48", "remaining_time": "0:07:14"}
43
+ {"current_steps": 430, "total_steps": 640, "loss": 0.6991, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 1.2147431395169459e-05, "epoch": 2.69, "percentage": 67.19, "elapsed_time": "0:14:08", "remaining_time": "0:06:54"}
44
+ {"current_steps": 440, "total_steps": 640, "loss": 0.6507, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 1.1110744174509952e-05, "epoch": 2.75, "percentage": 68.75, "elapsed_time": "0:14:28", "remaining_time": "0:06:34"}
45
+ {"current_steps": 450, "total_steps": 640, "loss": 0.7174, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 1.0107517387689166e-05, "epoch": 2.81, "percentage": 70.31, "elapsed_time": "0:14:48", "remaining_time": "0:06:15"}
46
+ {"current_steps": 460, "total_steps": 640, "loss": 0.6967, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 9.140167895908867e-06, "epoch": 2.88, "percentage": 71.88, "elapsed_time": "0:15:08", "remaining_time": "0:05:55"}
47
+ {"current_steps": 470, "total_steps": 640, "loss": 0.6837, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 8.211026128824539e-06, "epoch": 2.94, "percentage": 73.44, "elapsed_time": "0:15:27", "remaining_time": "0:05:35"}
48
+ {"current_steps": 480, "total_steps": 640, "loss": 0.6855, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 7.3223304703363135e-06, "epoch": 3.0, "percentage": 75.0, "elapsed_time": "0:15:46", "remaining_time": "0:05:15"}
49
+ {"current_steps": 490, "total_steps": 640, "loss": 0.6642, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 6.476221866126029e-06, "epoch": 3.06, "percentage": 76.56, "elapsed_time": "0:16:06", "remaining_time": "0:04:55"}
50
+ {"current_steps": 500, "total_steps": 640, "loss": 0.6427, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 5.674738665931575e-06, "epoch": 3.12, "percentage": 78.12, "elapsed_time": "0:16:26", "remaining_time": "0:04:36"}
51
+ {"current_steps": 510, "total_steps": 640, "loss": 0.6512, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 4.91981171298388e-06, "epoch": 3.19, "percentage": 79.69, "elapsed_time": "0:16:46", "remaining_time": "0:04:16"}
52
+ {"current_steps": 520, "total_steps": 640, "loss": 0.677, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 4.213259692436367e-06, "epoch": 3.25, "percentage": 81.25, "elapsed_time": "0:17:05", "remaining_time": "0:03:56"}
53
+ {"current_steps": 530, "total_steps": 640, "loss": 0.6549, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 3.5567847499932e-06, "epoch": 3.31, "percentage": 82.81, "elapsed_time": "0:17:26", "remaining_time": "0:03:37"}
54
+ {"current_steps": 540, "total_steps": 640, "loss": 0.668, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 2.9519683912911266e-06, "epoch": 3.38, "percentage": 84.38, "elapsed_time": "0:17:46", "remaining_time": "0:03:17"}
55
+ {"current_steps": 550, "total_steps": 640, "loss": 0.6617, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 2.4002676719139166e-06, "epoch": 3.44, "percentage": 85.94, "elapsed_time": "0:18:05", "remaining_time": "0:02:57"}
56
+ {"current_steps": 560, "total_steps": 640, "loss": 0.6543, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 1.9030116872178316e-06, "epoch": 3.5, "percentage": 87.5, "elapsed_time": "0:18:24", "remaining_time": "0:02:37"}
57
+ {"current_steps": 570, "total_steps": 640, "loss": 0.6546, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 1.4613983704244826e-06, "epoch": 3.56, "percentage": 89.06, "elapsed_time": "0:18:44", "remaining_time": "0:02:18"}
58
+ {"current_steps": 580, "total_steps": 640, "loss": 0.6451, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 1.0764916066947794e-06, "epoch": 3.62, "percentage": 90.62, "elapsed_time": "0:19:04", "remaining_time": "0:01:58"}
59
+ {"current_steps": 590, "total_steps": 640, "loss": 0.6726, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 7.492186701364007e-07, "epoch": 3.69, "percentage": 92.19, "elapsed_time": "0:19:24", "remaining_time": "0:01:38"}
60
+ {"current_steps": 600, "total_steps": 640, "loss": 0.6451, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 4.803679899192392e-07, "epoch": 3.75, "percentage": 93.75, "elapsed_time": "0:19:44", "remaining_time": "0:01:18"}
61
+ {"current_steps": 610, "total_steps": 640, "loss": 0.6687, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 2.705872508804747e-07, "epoch": 3.81, "percentage": 95.31, "elapsed_time": "0:20:03", "remaining_time": "0:00:59"}
62
+ {"current_steps": 620, "total_steps": 640, "loss": 0.644, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 1.2038183319507955e-07, "epoch": 3.88, "percentage": 96.88, "elapsed_time": "0:20:23", "remaining_time": "0:00:39"}
63
+ {"current_steps": 630, "total_steps": 640, "loss": 0.6766, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 3.011359487068987e-08, "epoch": 3.94, "percentage": 98.44, "elapsed_time": "0:20:43", "remaining_time": "0:00:19"}
64
+ {"current_steps": 640, "total_steps": 640, "loss": 0.6681, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 0.0, "epoch": 4.0, "percentage": 100.0, "elapsed_time": "0:21:02", "remaining_time": "0:00:00"}
65
+ {"current_steps": 640, "total_steps": 640, "loss": null, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": null, "epoch": 4.0, "percentage": 100.0, "elapsed_time": "0:21:02", "remaining_time": "0:00:00"}
trainer_state.json ADDED
@@ -0,0 +1,412 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "best_metric": null,
3
+ "best_model_checkpoint": null,
4
+ "epoch": 4.0,
5
+ "eval_steps": 500,
6
+ "global_step": 640,
7
+ "is_hyper_param_search": false,
8
+ "is_local_process_zero": true,
9
+ "is_world_process_zero": true,
10
+ "log_history": [
11
+ {
12
+ "epoch": 0.06,
13
+ "learning_rate": 4.996988640512931e-05,
14
+ "loss": 1.692,
15
+ "step": 10
16
+ },
17
+ {
18
+ "epoch": 0.12,
19
+ "learning_rate": 4.987961816680492e-05,
20
+ "loss": 1.2657,
21
+ "step": 20
22
+ },
23
+ {
24
+ "epoch": 0.19,
25
+ "learning_rate": 4.972941274911953e-05,
26
+ "loss": 1.055,
27
+ "step": 30
28
+ },
29
+ {
30
+ "epoch": 0.25,
31
+ "learning_rate": 4.951963201008076e-05,
32
+ "loss": 0.9921,
33
+ "step": 40
34
+ },
35
+ {
36
+ "epoch": 0.31,
37
+ "learning_rate": 4.9250781329863606e-05,
38
+ "loss": 0.9406,
39
+ "step": 50
40
+ },
41
+ {
42
+ "epoch": 0.38,
43
+ "learning_rate": 4.892350839330522e-05,
44
+ "loss": 0.8524,
45
+ "step": 60
46
+ },
47
+ {
48
+ "epoch": 0.44,
49
+ "learning_rate": 4.853860162957552e-05,
50
+ "loss": 0.8515,
51
+ "step": 70
52
+ },
53
+ {
54
+ "epoch": 0.5,
55
+ "learning_rate": 4.8096988312782174e-05,
56
+ "loss": 0.8268,
57
+ "step": 80
58
+ },
59
+ {
60
+ "epoch": 0.56,
61
+ "learning_rate": 4.759973232808609e-05,
62
+ "loss": 0.8694,
63
+ "step": 90
64
+ },
65
+ {
66
+ "epoch": 0.62,
67
+ "learning_rate": 4.7048031608708876e-05,
68
+ "loss": 0.8054,
69
+ "step": 100
70
+ },
71
+ {
72
+ "epoch": 0.69,
73
+ "learning_rate": 4.6443215250006806e-05,
74
+ "loss": 0.7789,
75
+ "step": 110
76
+ },
77
+ {
78
+ "epoch": 0.75,
79
+ "learning_rate": 4.5786740307563636e-05,
80
+ "loss": 0.8142,
81
+ "step": 120
82
+ },
83
+ {
84
+ "epoch": 0.81,
85
+ "learning_rate": 4.508018828701612e-05,
86
+ "loss": 0.7686,
87
+ "step": 130
88
+ },
89
+ {
90
+ "epoch": 0.88,
91
+ "learning_rate": 4.4325261334068426e-05,
92
+ "loss": 0.7904,
93
+ "step": 140
94
+ },
95
+ {
96
+ "epoch": 0.94,
97
+ "learning_rate": 4.352377813387398e-05,
98
+ "loss": 0.8099,
99
+ "step": 150
100
+ },
101
+ {
102
+ "epoch": 1.0,
103
+ "learning_rate": 4.267766952966369e-05,
104
+ "loss": 0.8211,
105
+ "step": 160
106
+ },
107
+ {
108
+ "epoch": 1.06,
109
+ "learning_rate": 4.178897387117546e-05,
110
+ "loss": 0.7577,
111
+ "step": 170
112
+ },
113
+ {
114
+ "epoch": 1.12,
115
+ "learning_rate": 4.085983210409114e-05,
116
+ "loss": 0.778,
117
+ "step": 180
118
+ },
119
+ {
120
+ "epoch": 1.19,
121
+ "learning_rate": 3.9892482612310836e-05,
122
+ "loss": 0.752,
123
+ "step": 190
124
+ },
125
+ {
126
+ "epoch": 1.25,
127
+ "learning_rate": 3.888925582549006e-05,
128
+ "loss": 0.7247,
129
+ "step": 200
130
+ },
131
+ {
132
+ "epoch": 1.31,
133
+ "learning_rate": 3.785256860483054e-05,
134
+ "loss": 0.7593,
135
+ "step": 210
136
+ },
137
+ {
138
+ "epoch": 1.38,
139
+ "learning_rate": 3.678491842064995e-05,
140
+ "loss": 0.7217,
141
+ "step": 220
142
+ },
143
+ {
144
+ "epoch": 1.44,
145
+ "learning_rate": 3.568887733575706e-05,
146
+ "loss": 0.7509,
147
+ "step": 230
148
+ },
149
+ {
150
+ "epoch": 1.5,
151
+ "learning_rate": 3.456708580912725e-05,
152
+ "loss": 0.7367,
153
+ "step": 240
154
+ },
155
+ {
156
+ "epoch": 1.56,
157
+ "learning_rate": 3.34222463348055e-05,
158
+ "loss": 0.7168,
159
+ "step": 250
160
+ },
161
+ {
162
+ "epoch": 1.62,
163
+ "learning_rate": 3.225711693136156e-05,
164
+ "loss": 0.7567,
165
+ "step": 260
166
+ },
167
+ {
168
+ "epoch": 1.69,
169
+ "learning_rate": 3.10745044975816e-05,
170
+ "loss": 0.7534,
171
+ "step": 270
172
+ },
173
+ {
174
+ "epoch": 1.75,
175
+ "learning_rate": 2.9877258050403212e-05,
176
+ "loss": 0.7681,
177
+ "step": 280
178
+ },
179
+ {
180
+ "epoch": 1.81,
181
+ "learning_rate": 2.8668261861384045e-05,
182
+ "loss": 0.7094,
183
+ "step": 290
184
+ },
185
+ {
186
+ "epoch": 1.88,
187
+ "learning_rate": 2.7450428508239024e-05,
188
+ "loss": 0.7504,
189
+ "step": 300
190
+ },
191
+ {
192
+ "epoch": 1.94,
193
+ "learning_rate": 2.6226691858185454e-05,
194
+ "loss": 0.715,
195
+ "step": 310
196
+ },
197
+ {
198
+ "epoch": 2.0,
199
+ "learning_rate": 2.5e-05,
200
+ "loss": 0.7322,
201
+ "step": 320
202
+ },
203
+ {
204
+ "epoch": 2.06,
205
+ "learning_rate": 2.3773308141814552e-05,
206
+ "loss": 0.7124,
207
+ "step": 330
208
+ },
209
+ {
210
+ "epoch": 2.12,
211
+ "learning_rate": 2.2549571491760986e-05,
212
+ "loss": 0.6626,
213
+ "step": 340
214
+ },
215
+ {
216
+ "epoch": 2.19,
217
+ "learning_rate": 2.1331738138615958e-05,
218
+ "loss": 0.7094,
219
+ "step": 350
220
+ },
221
+ {
222
+ "epoch": 2.25,
223
+ "learning_rate": 2.0122741949596797e-05,
224
+ "loss": 0.7211,
225
+ "step": 360
226
+ },
227
+ {
228
+ "epoch": 2.31,
229
+ "learning_rate": 1.8925495502418406e-05,
230
+ "loss": 0.7176,
231
+ "step": 370
232
+ },
233
+ {
234
+ "epoch": 2.38,
235
+ "learning_rate": 1.7742883068638447e-05,
236
+ "loss": 0.7019,
237
+ "step": 380
238
+ },
239
+ {
240
+ "epoch": 2.44,
241
+ "learning_rate": 1.65777536651945e-05,
242
+ "loss": 0.6754,
243
+ "step": 390
244
+ },
245
+ {
246
+ "epoch": 2.5,
247
+ "learning_rate": 1.5432914190872757e-05,
248
+ "loss": 0.6761,
249
+ "step": 400
250
+ },
251
+ {
252
+ "epoch": 2.56,
253
+ "learning_rate": 1.4311122664242954e-05,
254
+ "loss": 0.6527,
255
+ "step": 410
256
+ },
257
+ {
258
+ "epoch": 2.62,
259
+ "learning_rate": 1.3215081579350058e-05,
260
+ "loss": 0.678,
261
+ "step": 420
262
+ },
263
+ {
264
+ "epoch": 2.69,
265
+ "learning_rate": 1.2147431395169459e-05,
266
+ "loss": 0.6991,
267
+ "step": 430
268
+ },
269
+ {
270
+ "epoch": 2.75,
271
+ "learning_rate": 1.1110744174509952e-05,
272
+ "loss": 0.6507,
273
+ "step": 440
274
+ },
275
+ {
276
+ "epoch": 2.81,
277
+ "learning_rate": 1.0107517387689166e-05,
278
+ "loss": 0.7174,
279
+ "step": 450
280
+ },
281
+ {
282
+ "epoch": 2.88,
283
+ "learning_rate": 9.140167895908867e-06,
284
+ "loss": 0.6967,
285
+ "step": 460
286
+ },
287
+ {
288
+ "epoch": 2.94,
289
+ "learning_rate": 8.211026128824539e-06,
290
+ "loss": 0.6837,
291
+ "step": 470
292
+ },
293
+ {
294
+ "epoch": 3.0,
295
+ "learning_rate": 7.3223304703363135e-06,
296
+ "loss": 0.6855,
297
+ "step": 480
298
+ },
299
+ {
300
+ "epoch": 3.06,
301
+ "learning_rate": 6.476221866126029e-06,
302
+ "loss": 0.6642,
303
+ "step": 490
304
+ },
305
+ {
306
+ "epoch": 3.12,
307
+ "learning_rate": 5.674738665931575e-06,
308
+ "loss": 0.6427,
309
+ "step": 500
310
+ },
311
+ {
312
+ "epoch": 3.19,
313
+ "learning_rate": 4.91981171298388e-06,
314
+ "loss": 0.6512,
315
+ "step": 510
316
+ },
317
+ {
318
+ "epoch": 3.25,
319
+ "learning_rate": 4.213259692436367e-06,
320
+ "loss": 0.677,
321
+ "step": 520
322
+ },
323
+ {
324
+ "epoch": 3.31,
325
+ "learning_rate": 3.5567847499932e-06,
326
+ "loss": 0.6549,
327
+ "step": 530
328
+ },
329
+ {
330
+ "epoch": 3.38,
331
+ "learning_rate": 2.9519683912911266e-06,
332
+ "loss": 0.668,
333
+ "step": 540
334
+ },
335
+ {
336
+ "epoch": 3.44,
337
+ "learning_rate": 2.4002676719139166e-06,
338
+ "loss": 0.6617,
339
+ "step": 550
340
+ },
341
+ {
342
+ "epoch": 3.5,
343
+ "learning_rate": 1.9030116872178316e-06,
344
+ "loss": 0.6543,
345
+ "step": 560
346
+ },
347
+ {
348
+ "epoch": 3.56,
349
+ "learning_rate": 1.4613983704244826e-06,
350
+ "loss": 0.6546,
351
+ "step": 570
352
+ },
353
+ {
354
+ "epoch": 3.62,
355
+ "learning_rate": 1.0764916066947794e-06,
356
+ "loss": 0.6451,
357
+ "step": 580
358
+ },
359
+ {
360
+ "epoch": 3.69,
361
+ "learning_rate": 7.492186701364007e-07,
362
+ "loss": 0.6726,
363
+ "step": 590
364
+ },
365
+ {
366
+ "epoch": 3.75,
367
+ "learning_rate": 4.803679899192392e-07,
368
+ "loss": 0.6451,
369
+ "step": 600
370
+ },
371
+ {
372
+ "epoch": 3.81,
373
+ "learning_rate": 2.705872508804747e-07,
374
+ "loss": 0.6687,
375
+ "step": 610
376
+ },
377
+ {
378
+ "epoch": 3.88,
379
+ "learning_rate": 1.2038183319507955e-07,
380
+ "loss": 0.644,
381
+ "step": 620
382
+ },
383
+ {
384
+ "epoch": 3.94,
385
+ "learning_rate": 3.011359487068987e-08,
386
+ "loss": 0.6766,
387
+ "step": 630
388
+ },
389
+ {
390
+ "epoch": 4.0,
391
+ "learning_rate": 0.0,
392
+ "loss": 0.6681,
393
+ "step": 640
394
+ },
395
+ {
396
+ "epoch": 4.0,
397
+ "step": 640,
398
+ "total_flos": 1.3862359554185626e+17,
399
+ "train_loss": 0.7563483387231826,
400
+ "train_runtime": 1265.7157,
401
+ "train_samples_per_second": 16.127,
402
+ "train_steps_per_second": 0.506
403
+ }
404
+ ],
405
+ "logging_steps": 10,
406
+ "max_steps": 640,
407
+ "num_train_epochs": 4,
408
+ "save_steps": 200,
409
+ "total_flos": 1.3862359554185626e+17,
410
+ "trial_name": null,
411
+ "trial_params": null
412
+ }
training_args.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:8964ebf0d40688644816d8995a9f6e8d6c6496768317c70d553f9ab98f59e673
3
+ size 4664
training_loss.png ADDED
viggo-r16.punica.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:21c5ede53e3b3015eb9fc6d4f861baa9b5aa8fd551e11fbe2fe9d0fa67c855d2
3
+ size 159912654