akhilfau commited on
Commit
b0af228
·
verified ·
1 Parent(s): 1f70c01

Fine-tuned smolLM2-360M with LoRA on camel-ai/physics

Browse files
README.md ADDED
@@ -0,0 +1,66 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ library_name: peft
3
+ license: apache-2.0
4
+ base_model: HuggingFaceTB/SmolLM2-360M
5
+ tags:
6
+ - generated_from_trainer
7
+ model-index:
8
+ - name: fine-tuned-smolLM2-360M-with-LoRA-on-camel-ai-physics
9
+ results: []
10
+ ---
11
+
12
+ <!-- This model card has been generated automatically according to the information the Trainer had access to. You
13
+ should probably proofread and complete it, then remove this comment. -->
14
+
15
+ # fine-tuned-smolLM2-360M-with-LoRA-on-camel-ai-physics
16
+
17
+ This model is a fine-tuned version of [HuggingFaceTB/SmolLM2-360M](https://huggingface.co/HuggingFaceTB/SmolLM2-360M) on an unknown dataset.
18
+ It achieves the following results on the evaluation set:
19
+ - Loss: 0.7558
20
+
21
+ ## Model description
22
+
23
+ More information needed
24
+
25
+ ## Intended uses & limitations
26
+
27
+ More information needed
28
+
29
+ ## Training and evaluation data
30
+
31
+ More information needed
32
+
33
+ ## Training procedure
34
+
35
+ ### Training hyperparameters
36
+
37
+ The following hyperparameters were used during training:
38
+ - learning_rate: 0.0005
39
+ - train_batch_size: 4
40
+ - eval_batch_size: 4
41
+ - seed: 42
42
+ - optimizer: Use OptimizerNames.ADAMW_TORCH with betas=(0.9,0.999) and epsilon=1e-08 and optimizer_args=No additional optimizer arguments
43
+ - lr_scheduler_type: cosine
44
+ - num_epochs: 8
45
+
46
+ ### Training results
47
+
48
+ | Training Loss | Epoch | Step | Validation Loss |
49
+ |:-------------:|:-----:|:-----:|:---------------:|
50
+ | 0.7878 | 1.0 | 4000 | 0.8105 |
51
+ | 0.7877 | 2.0 | 8000 | 0.7847 |
52
+ | 0.7671 | 3.0 | 12000 | 0.7703 |
53
+ | 0.7233 | 4.0 | 16000 | 0.7630 |
54
+ | 0.7043 | 5.0 | 20000 | 0.7583 |
55
+ | 0.6809 | 6.0 | 24000 | 0.7562 |
56
+ | 0.6795 | 7.0 | 28000 | 0.7553 |
57
+ | 0.6832 | 8.0 | 32000 | 0.7558 |
58
+
59
+
60
+ ### Framework versions
61
+
62
+ - PEFT 0.13.2
63
+ - Transformers 4.46.2
64
+ - Pytorch 2.4.1+cu121
65
+ - Datasets 3.1.0
66
+ - Tokenizers 0.20.3
adapter_config.json ADDED
@@ -0,0 +1,29 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "alpha_pattern": {},
3
+ "auto_mapping": null,
4
+ "base_model_name_or_path": "HuggingFaceTB/SmolLM2-360M",
5
+ "bias": "none",
6
+ "fan_in_fan_out": false,
7
+ "inference_mode": true,
8
+ "init_lora_weights": true,
9
+ "layer_replication": null,
10
+ "layers_pattern": null,
11
+ "layers_to_transform": null,
12
+ "loftq_config": {},
13
+ "lora_alpha": 32,
14
+ "lora_dropout": 0.1,
15
+ "megatron_config": null,
16
+ "megatron_core": "megatron.core",
17
+ "modules_to_save": null,
18
+ "peft_type": "LORA",
19
+ "r": 16,
20
+ "rank_pattern": {},
21
+ "revision": null,
22
+ "target_modules": [
23
+ "q_proj",
24
+ "v_proj"
25
+ ],
26
+ "task_type": "CAUSAL_LM",
27
+ "use_dora": false,
28
+ "use_rslora": false
29
+ }
adapter_model.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:4cebedf212d8ce92fd69558e99aa4d71ceaea1946f2a638874f80ed5cfee6b93
3
+ size 6570456
config.json ADDED
@@ -0,0 +1,29 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "architectures": [
3
+ "LlamaForCausalLM"
4
+ ],
5
+ "attention_bias": false,
6
+ "attention_dropout": 0.0,
7
+ "bos_token_id": 0,
8
+ "eos_token_id": 0,
9
+ "hidden_act": "silu",
10
+ "hidden_size": 576,
11
+ "initializer_range": 0.041666666666666664,
12
+ "intermediate_size": 1536,
13
+ "is_llama_config": true,
14
+ "max_position_embeddings": 8192,
15
+ "model_type": "llama",
16
+ "num_attention_heads": 9,
17
+ "num_hidden_layers": 30,
18
+ "num_key_value_heads": 3,
19
+ "pretraining_tp": 1,
20
+ "rms_norm_eps": 1e-05,
21
+ "rope_interleaved": false,
22
+ "rope_scaling": null,
23
+ "rope_theta": 100000,
24
+ "tie_word_embeddings": true,
25
+ "torch_dtype": "bfloat16",
26
+ "transformers_version": "4.40.1",
27
+ "use_cache": true,
28
+ "vocab_size": 49152
29
+ }
optimizer.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:db65933592695bde73750222b24de7fa5d29400d68697842430776a26199c967
3
+ size 13214842
rng_state.pth ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:df6ef5f513fcf0bd842259d31d74b21a1559ce1cfff1f8a93aafa828ebe97a6d
3
+ size 14244
scheduler.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:43763b2d379592a176dbd01e86a23eb5cd34a442cd18e3777a60081b417051ca
3
+ size 1064
trainer_state.json ADDED
@@ -0,0 +1,2329 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "best_metric": null,
3
+ "best_model_checkpoint": null,
4
+ "epoch": 8.0,
5
+ "eval_steps": 500,
6
+ "global_step": 32000,
7
+ "is_hyper_param_search": false,
8
+ "is_local_process_zero": true,
9
+ "is_world_process_zero": true,
10
+ "log_history": [
11
+ {
12
+ "epoch": 0.025,
13
+ "grad_norm": 0.15203669667243958,
14
+ "learning_rate": 0.000499987952239832,
15
+ "loss": 1.1745,
16
+ "step": 100
17
+ },
18
+ {
19
+ "epoch": 0.05,
20
+ "grad_norm": 0.14502686262130737,
21
+ "learning_rate": 0.0004999518101205162,
22
+ "loss": 0.907,
23
+ "step": 200
24
+ },
25
+ {
26
+ "epoch": 0.075,
27
+ "grad_norm": 0.1642378270626068,
28
+ "learning_rate": 0.0004998915771255053,
29
+ "loss": 0.8858,
30
+ "step": 300
31
+ },
32
+ {
33
+ "epoch": 0.1,
34
+ "grad_norm": 0.15131665766239166,
35
+ "learning_rate": 0.0004998072590601808,
36
+ "loss": 0.8573,
37
+ "step": 400
38
+ },
39
+ {
40
+ "epoch": 0.125,
41
+ "grad_norm": 0.17200812697410583,
42
+ "learning_rate": 0.0004996988640512931,
43
+ "loss": 0.8833,
44
+ "step": 500
45
+ },
46
+ {
47
+ "epoch": 0.15,
48
+ "grad_norm": 0.14832615852355957,
49
+ "learning_rate": 0.000499566402546179,
50
+ "loss": 0.8689,
51
+ "step": 600
52
+ },
53
+ {
54
+ "epoch": 0.175,
55
+ "grad_norm": 0.18304194509983063,
56
+ "learning_rate": 0.0004994098873117539,
57
+ "loss": 0.8837,
58
+ "step": 700
59
+ },
60
+ {
61
+ "epoch": 0.2,
62
+ "grad_norm": 0.16831472516059875,
63
+ "learning_rate": 0.000499229333433282,
64
+ "loss": 0.8673,
65
+ "step": 800
66
+ },
67
+ {
68
+ "epoch": 0.225,
69
+ "grad_norm": 0.17680132389068604,
70
+ "learning_rate": 0.0004990247583129218,
71
+ "loss": 0.8764,
72
+ "step": 900
73
+ },
74
+ {
75
+ "epoch": 0.25,
76
+ "grad_norm": 0.15322180092334747,
77
+ "learning_rate": 0.0004987961816680492,
78
+ "loss": 0.8304,
79
+ "step": 1000
80
+ },
81
+ {
82
+ "epoch": 0.275,
83
+ "grad_norm": 0.16825291514396667,
84
+ "learning_rate": 0.0004985436255293571,
85
+ "loss": 0.8547,
86
+ "step": 1100
87
+ },
88
+ {
89
+ "epoch": 0.3,
90
+ "grad_norm": 0.18319116532802582,
91
+ "learning_rate": 0.0004982671142387316,
92
+ "loss": 0.849,
93
+ "step": 1200
94
+ },
95
+ {
96
+ "epoch": 0.325,
97
+ "grad_norm": 0.18427041172981262,
98
+ "learning_rate": 0.0004979666744469065,
99
+ "loss": 0.8537,
100
+ "step": 1300
101
+ },
102
+ {
103
+ "epoch": 0.35,
104
+ "grad_norm": 0.1925463080406189,
105
+ "learning_rate": 0.0004976423351108943,
106
+ "loss": 0.8416,
107
+ "step": 1400
108
+ },
109
+ {
110
+ "epoch": 0.375,
111
+ "grad_norm": 0.15338857471942902,
112
+ "learning_rate": 0.0004972941274911952,
113
+ "loss": 0.8214,
114
+ "step": 1500
115
+ },
116
+ {
117
+ "epoch": 0.4,
118
+ "grad_norm": 0.16722452640533447,
119
+ "learning_rate": 0.0004969220851487844,
120
+ "loss": 0.8348,
121
+ "step": 1600
122
+ },
123
+ {
124
+ "epoch": 0.425,
125
+ "grad_norm": 0.1711965799331665,
126
+ "learning_rate": 0.0004965262439418772,
127
+ "loss": 0.846,
128
+ "step": 1700
129
+ },
130
+ {
131
+ "epoch": 0.45,
132
+ "grad_norm": 0.19129395484924316,
133
+ "learning_rate": 0.0004961066420224729,
134
+ "loss": 0.852,
135
+ "step": 1800
136
+ },
137
+ {
138
+ "epoch": 0.475,
139
+ "grad_norm": 0.17632591724395752,
140
+ "learning_rate": 0.000495663319832678,
141
+ "loss": 0.8424,
142
+ "step": 1900
143
+ },
144
+ {
145
+ "epoch": 0.5,
146
+ "grad_norm": 0.16157850623130798,
147
+ "learning_rate": 0.0004951963201008077,
148
+ "loss": 0.8428,
149
+ "step": 2000
150
+ },
151
+ {
152
+ "epoch": 0.525,
153
+ "grad_norm": 0.20878523588180542,
154
+ "learning_rate": 0.0004947056878372681,
155
+ "loss": 0.8362,
156
+ "step": 2100
157
+ },
158
+ {
159
+ "epoch": 0.55,
160
+ "grad_norm": 0.22794917225837708,
161
+ "learning_rate": 0.0004941914703302181,
162
+ "loss": 0.829,
163
+ "step": 2200
164
+ },
165
+ {
166
+ "epoch": 0.575,
167
+ "grad_norm": 0.21379908919334412,
168
+ "learning_rate": 0.0004936537171410112,
169
+ "loss": 0.8569,
170
+ "step": 2300
171
+ },
172
+ {
173
+ "epoch": 0.6,
174
+ "grad_norm": 0.18703673779964447,
175
+ "learning_rate": 0.0004930924800994192,
176
+ "loss": 0.829,
177
+ "step": 2400
178
+ },
179
+ {
180
+ "epoch": 0.625,
181
+ "grad_norm": 0.1804085075855255,
182
+ "learning_rate": 0.000492507813298636,
183
+ "loss": 0.8453,
184
+ "step": 2500
185
+ },
186
+ {
187
+ "epoch": 0.65,
188
+ "grad_norm": 0.18967971205711365,
189
+ "learning_rate": 0.0004918997730900649,
190
+ "loss": 0.8465,
191
+ "step": 2600
192
+ },
193
+ {
194
+ "epoch": 0.675,
195
+ "grad_norm": 0.1845272034406662,
196
+ "learning_rate": 0.0004912684180778869,
197
+ "loss": 0.829,
198
+ "step": 2700
199
+ },
200
+ {
201
+ "epoch": 0.7,
202
+ "grad_norm": 0.21101725101470947,
203
+ "learning_rate": 0.0004906138091134118,
204
+ "loss": 0.8145,
205
+ "step": 2800
206
+ },
207
+ {
208
+ "epoch": 0.725,
209
+ "grad_norm": 0.21849285066127777,
210
+ "learning_rate": 0.0004899360092892143,
211
+ "loss": 0.8293,
212
+ "step": 2900
213
+ },
214
+ {
215
+ "epoch": 0.75,
216
+ "grad_norm": 0.2142125815153122,
217
+ "learning_rate": 0.0004892350839330522,
218
+ "loss": 0.8229,
219
+ "step": 3000
220
+ },
221
+ {
222
+ "epoch": 0.775,
223
+ "grad_norm": 0.24221359193325043,
224
+ "learning_rate": 0.0004885111006015701,
225
+ "loss": 0.8163,
226
+ "step": 3100
227
+ },
228
+ {
229
+ "epoch": 0.8,
230
+ "grad_norm": 0.1932164877653122,
231
+ "learning_rate": 0.0004877641290737884,
232
+ "loss": 0.8137,
233
+ "step": 3200
234
+ },
235
+ {
236
+ "epoch": 0.825,
237
+ "grad_norm": 0.1931457221508026,
238
+ "learning_rate": 0.0004869942413443776,
239
+ "loss": 0.8096,
240
+ "step": 3300
241
+ },
242
+ {
243
+ "epoch": 0.85,
244
+ "grad_norm": 0.20192202925682068,
245
+ "learning_rate": 0.00048620151161671955,
246
+ "loss": 0.8232,
247
+ "step": 3400
248
+ },
249
+ {
250
+ "epoch": 0.875,
251
+ "grad_norm": 0.21490222215652466,
252
+ "learning_rate": 0.0004853860162957552,
253
+ "loss": 0.8225,
254
+ "step": 3500
255
+ },
256
+ {
257
+ "epoch": 0.9,
258
+ "grad_norm": 0.2179287225008011,
259
+ "learning_rate": 0.0004845478339806211,
260
+ "loss": 0.8056,
261
+ "step": 3600
262
+ },
263
+ {
264
+ "epoch": 0.925,
265
+ "grad_norm": 0.24655170738697052,
266
+ "learning_rate": 0.0004836870454570731,
267
+ "loss": 0.8215,
268
+ "step": 3700
269
+ },
270
+ {
271
+ "epoch": 0.95,
272
+ "grad_norm": 0.20784540474414825,
273
+ "learning_rate": 0.00048280373368970086,
274
+ "loss": 0.8029,
275
+ "step": 3800
276
+ },
277
+ {
278
+ "epoch": 0.975,
279
+ "grad_norm": 0.22836102545261383,
280
+ "learning_rate": 0.000481897983813931,
281
+ "loss": 0.8205,
282
+ "step": 3900
283
+ },
284
+ {
285
+ "epoch": 1.0,
286
+ "grad_norm": 0.2156483232975006,
287
+ "learning_rate": 0.0004809698831278217,
288
+ "loss": 0.7878,
289
+ "step": 4000
290
+ },
291
+ {
292
+ "epoch": 1.0,
293
+ "eval_loss": 0.8105162382125854,
294
+ "eval_runtime": 184.0402,
295
+ "eval_samples_per_second": 21.734,
296
+ "eval_steps_per_second": 5.434,
297
+ "step": 4000
298
+ },
299
+ {
300
+ "epoch": 1.025,
301
+ "grad_norm": 0.2116573601961136,
302
+ "learning_rate": 0.00048001952108364876,
303
+ "loss": 0.7884,
304
+ "step": 4100
305
+ },
306
+ {
307
+ "epoch": 1.05,
308
+ "grad_norm": 0.21137504279613495,
309
+ "learning_rate": 0.00047904698927928404,
310
+ "loss": 0.787,
311
+ "step": 4200
312
+ },
313
+ {
314
+ "epoch": 1.075,
315
+ "grad_norm": 0.21804390847682953,
316
+ "learning_rate": 0.0004780523814493669,
317
+ "loss": 0.7977,
318
+ "step": 4300
319
+ },
320
+ {
321
+ "epoch": 1.1,
322
+ "grad_norm": 0.21372774243354797,
323
+ "learning_rate": 0.00047703579345627036,
324
+ "loss": 0.7866,
325
+ "step": 4400
326
+ },
327
+ {
328
+ "epoch": 1.125,
329
+ "grad_norm": 0.24784786999225616,
330
+ "learning_rate": 0.0004759973232808609,
331
+ "loss": 0.7764,
332
+ "step": 4500
333
+ },
334
+ {
335
+ "epoch": 1.15,
336
+ "grad_norm": 0.2134639024734497,
337
+ "learning_rate": 0.0004749370710130554,
338
+ "loss": 0.8029,
339
+ "step": 4600
340
+ },
341
+ {
342
+ "epoch": 1.175,
343
+ "grad_norm": 0.21301183104515076,
344
+ "learning_rate": 0.0004738551388421742,
345
+ "loss": 0.7936,
346
+ "step": 4700
347
+ },
348
+ {
349
+ "epoch": 1.2,
350
+ "grad_norm": 0.21645288169384003,
351
+ "learning_rate": 0.00047275163104709196,
352
+ "loss": 0.7835,
353
+ "step": 4800
354
+ },
355
+ {
356
+ "epoch": 1.225,
357
+ "grad_norm": 0.1981271356344223,
358
+ "learning_rate": 0.00047162665398618666,
359
+ "loss": 0.7951,
360
+ "step": 4900
361
+ },
362
+ {
363
+ "epoch": 1.25,
364
+ "grad_norm": 0.24540849030017853,
365
+ "learning_rate": 0.00047048031608708875,
366
+ "loss": 0.7797,
367
+ "step": 5000
368
+ },
369
+ {
370
+ "epoch": 1.275,
371
+ "grad_norm": 0.22519993782043457,
372
+ "learning_rate": 0.00046931272783623106,
373
+ "loss": 0.789,
374
+ "step": 5100
375
+ },
376
+ {
377
+ "epoch": 1.3,
378
+ "grad_norm": 0.2160399854183197,
379
+ "learning_rate": 0.0004681240017681993,
380
+ "loss": 0.8028,
381
+ "step": 5200
382
+ },
383
+ {
384
+ "epoch": 1.325,
385
+ "grad_norm": 0.20990680158138275,
386
+ "learning_rate": 0.00046691425245488607,
387
+ "loss": 0.7795,
388
+ "step": 5300
389
+ },
390
+ {
391
+ "epoch": 1.35,
392
+ "grad_norm": 0.23152625560760498,
393
+ "learning_rate": 0.00046568359649444796,
394
+ "loss": 0.7862,
395
+ "step": 5400
396
+ },
397
+ {
398
+ "epoch": 1.375,
399
+ "grad_norm": 0.27092477679252625,
400
+ "learning_rate": 0.00046443215250006805,
401
+ "loss": 0.7895,
402
+ "step": 5500
403
+ },
404
+ {
405
+ "epoch": 1.4,
406
+ "grad_norm": 0.2427772879600525,
407
+ "learning_rate": 0.00046316004108852305,
408
+ "loss": 0.8032,
409
+ "step": 5600
410
+ },
411
+ {
412
+ "epoch": 1.425,
413
+ "grad_norm": 0.24519051611423492,
414
+ "learning_rate": 0.0004618673848685586,
415
+ "loss": 0.7728,
416
+ "step": 5700
417
+ },
418
+ {
419
+ "epoch": 1.45,
420
+ "grad_norm": 0.2481563240289688,
421
+ "learning_rate": 0.0004605543084290716,
422
+ "loss": 0.7886,
423
+ "step": 5800
424
+ },
425
+ {
426
+ "epoch": 1.475,
427
+ "grad_norm": 0.24766775965690613,
428
+ "learning_rate": 0.0004592209383271023,
429
+ "loss": 0.7931,
430
+ "step": 5900
431
+ },
432
+ {
433
+ "epoch": 1.5,
434
+ "grad_norm": 0.23152729868888855,
435
+ "learning_rate": 0.00045786740307563633,
436
+ "loss": 0.7922,
437
+ "step": 6000
438
+ },
439
+ {
440
+ "epoch": 1.525,
441
+ "grad_norm": 0.2345590442419052,
442
+ "learning_rate": 0.0004564938331312183,
443
+ "loss": 0.767,
444
+ "step": 6100
445
+ },
446
+ {
447
+ "epoch": 1.55,
448
+ "grad_norm": 0.20987170934677124,
449
+ "learning_rate": 0.0004551003608813784,
450
+ "loss": 0.7873,
451
+ "step": 6200
452
+ },
453
+ {
454
+ "epoch": 1.575,
455
+ "grad_norm": 0.2418334037065506,
456
+ "learning_rate": 0.00045368712063187237,
457
+ "loss": 0.7911,
458
+ "step": 6300
459
+ },
460
+ {
461
+ "epoch": 1.6,
462
+ "grad_norm": 0.2788698673248291,
463
+ "learning_rate": 0.0004522542485937369,
464
+ "loss": 0.7535,
465
+ "step": 6400
466
+ },
467
+ {
468
+ "epoch": 1.625,
469
+ "grad_norm": 0.243035227060318,
470
+ "learning_rate": 0.0004508018828701612,
471
+ "loss": 0.773,
472
+ "step": 6500
473
+ },
474
+ {
475
+ "epoch": 1.65,
476
+ "grad_norm": 0.2505854070186615,
477
+ "learning_rate": 0.0004493301634431768,
478
+ "loss": 0.7726,
479
+ "step": 6600
480
+ },
481
+ {
482
+ "epoch": 1.675,
483
+ "grad_norm": 0.24240297079086304,
484
+ "learning_rate": 0.00044783923216016507,
485
+ "loss": 0.7776,
486
+ "step": 6700
487
+ },
488
+ {
489
+ "epoch": 1.7,
490
+ "grad_norm": 0.2779521048069,
491
+ "learning_rate": 0.0004463292327201862,
492
+ "loss": 0.7712,
493
+ "step": 6800
494
+ },
495
+ {
496
+ "epoch": 1.725,
497
+ "grad_norm": 0.25853052735328674,
498
+ "learning_rate": 0.00044480031066012916,
499
+ "loss": 0.7953,
500
+ "step": 6900
501
+ },
502
+ {
503
+ "epoch": 1.75,
504
+ "grad_norm": 0.2403198778629303,
505
+ "learning_rate": 0.0004432526133406842,
506
+ "loss": 0.7722,
507
+ "step": 7000
508
+ },
509
+ {
510
+ "epoch": 1.775,
511
+ "grad_norm": 0.2707219421863556,
512
+ "learning_rate": 0.00044168628993214036,
513
+ "loss": 0.7526,
514
+ "step": 7100
515
+ },
516
+ {
517
+ "epoch": 1.8,
518
+ "grad_norm": 0.24813227355480194,
519
+ "learning_rate": 0.0004401014914000078,
520
+ "loss": 0.7582,
521
+ "step": 7200
522
+ },
523
+ {
524
+ "epoch": 1.825,
525
+ "grad_norm": 0.24337643384933472,
526
+ "learning_rate": 0.00043849837049046735,
527
+ "loss": 0.7777,
528
+ "step": 7300
529
+ },
530
+ {
531
+ "epoch": 1.85,
532
+ "grad_norm": 0.24402017891407013,
533
+ "learning_rate": 0.00043687708171564923,
534
+ "loss": 0.7738,
535
+ "step": 7400
536
+ },
537
+ {
538
+ "epoch": 1.875,
539
+ "grad_norm": 0.2371804267168045,
540
+ "learning_rate": 0.0004352377813387398,
541
+ "loss": 0.7778,
542
+ "step": 7500
543
+ },
544
+ {
545
+ "epoch": 1.9,
546
+ "grad_norm": 0.2974371016025543,
547
+ "learning_rate": 0.0004335806273589214,
548
+ "loss": 0.7907,
549
+ "step": 7600
550
+ },
551
+ {
552
+ "epoch": 1.925,
553
+ "grad_norm": 0.2702704966068268,
554
+ "learning_rate": 0.00043190577949614375,
555
+ "loss": 0.784,
556
+ "step": 7700
557
+ },
558
+ {
559
+ "epoch": 1.95,
560
+ "grad_norm": 0.24683596193790436,
561
+ "learning_rate": 0.0004302133991757297,
562
+ "loss": 0.7663,
563
+ "step": 7800
564
+ },
565
+ {
566
+ "epoch": 1.975,
567
+ "grad_norm": 0.26394063234329224,
568
+ "learning_rate": 0.00042850364951281707,
569
+ "loss": 0.7881,
570
+ "step": 7900
571
+ },
572
+ {
573
+ "epoch": 2.0,
574
+ "grad_norm": 0.2807358205318451,
575
+ "learning_rate": 0.00042677669529663686,
576
+ "loss": 0.7877,
577
+ "step": 8000
578
+ },
579
+ {
580
+ "epoch": 2.0,
581
+ "eval_loss": 0.7847200036048889,
582
+ "eval_runtime": 158.0469,
583
+ "eval_samples_per_second": 25.309,
584
+ "eval_steps_per_second": 6.327,
585
+ "step": 8000
586
+ },
587
+ {
588
+ "epoch": 2.025,
589
+ "grad_norm": 0.26467156410217285,
590
+ "learning_rate": 0.0004250327029746309,
591
+ "loss": 0.7632,
592
+ "step": 8100
593
+ },
594
+ {
595
+ "epoch": 2.05,
596
+ "grad_norm": 0.27014681696891785,
597
+ "learning_rate": 0.000423271840636409,
598
+ "loss": 0.7493,
599
+ "step": 8200
600
+ },
601
+ {
602
+ "epoch": 2.075,
603
+ "grad_norm": 0.27495408058166504,
604
+ "learning_rate": 0.00042149427799754817,
605
+ "loss": 0.7556,
606
+ "step": 8300
607
+ },
608
+ {
609
+ "epoch": 2.1,
610
+ "grad_norm": 0.2690233886241913,
611
+ "learning_rate": 0.00041970018638323546,
612
+ "loss": 0.7294,
613
+ "step": 8400
614
+ },
615
+ {
616
+ "epoch": 2.125,
617
+ "grad_norm": 0.25307732820510864,
618
+ "learning_rate": 0.00041788973871175465,
619
+ "loss": 0.7578,
620
+ "step": 8500
621
+ },
622
+ {
623
+ "epoch": 2.15,
624
+ "grad_norm": 0.2873128056526184,
625
+ "learning_rate": 0.00041606310947782046,
626
+ "loss": 0.7526,
627
+ "step": 8600
628
+ },
629
+ {
630
+ "epoch": 2.175,
631
+ "grad_norm": 0.3025578260421753,
632
+ "learning_rate": 0.00041422047473576033,
633
+ "loss": 0.7713,
634
+ "step": 8700
635
+ },
636
+ {
637
+ "epoch": 2.2,
638
+ "grad_norm": 0.2654635012149811,
639
+ "learning_rate": 0.0004123620120825459,
640
+ "loss": 0.7644,
641
+ "step": 8800
642
+ },
643
+ {
644
+ "epoch": 2.225,
645
+ "grad_norm": 0.2813340425491333,
646
+ "learning_rate": 0.00041048790064067577,
647
+ "loss": 0.7572,
648
+ "step": 8900
649
+ },
650
+ {
651
+ "epoch": 2.25,
652
+ "grad_norm": 0.240594744682312,
653
+ "learning_rate": 0.0004085983210409114,
654
+ "loss": 0.7511,
655
+ "step": 9000
656
+ },
657
+ {
658
+ "epoch": 2.275,
659
+ "grad_norm": 0.27496930956840515,
660
+ "learning_rate": 0.0004066934554048674,
661
+ "loss": 0.7511,
662
+ "step": 9100
663
+ },
664
+ {
665
+ "epoch": 2.3,
666
+ "grad_norm": 0.2850758135318756,
667
+ "learning_rate": 0.00040477348732745853,
668
+ "loss": 0.7536,
669
+ "step": 9200
670
+ },
671
+ {
672
+ "epoch": 2.325,
673
+ "grad_norm": 0.29253271222114563,
674
+ "learning_rate": 0.0004028386018592041,
675
+ "loss": 0.749,
676
+ "step": 9300
677
+ },
678
+ {
679
+ "epoch": 2.35,
680
+ "grad_norm": 0.3089846074581146,
681
+ "learning_rate": 0.0004008889854883929,
682
+ "loss": 0.7577,
683
+ "step": 9400
684
+ },
685
+ {
686
+ "epoch": 2.375,
687
+ "grad_norm": 0.25122740864753723,
688
+ "learning_rate": 0.0003989248261231084,
689
+ "loss": 0.7342,
690
+ "step": 9500
691
+ },
692
+ {
693
+ "epoch": 2.4,
694
+ "grad_norm": 0.33366382122039795,
695
+ "learning_rate": 0.0003969463130731183,
696
+ "loss": 0.7439,
697
+ "step": 9600
698
+ },
699
+ {
700
+ "epoch": 2.425,
701
+ "grad_norm": 0.25869670510292053,
702
+ "learning_rate": 0.00039495363703162843,
703
+ "loss": 0.7484,
704
+ "step": 9700
705
+ },
706
+ {
707
+ "epoch": 2.45,
708
+ "grad_norm": 0.27346816658973694,
709
+ "learning_rate": 0.000392946990056903,
710
+ "loss": 0.7844,
711
+ "step": 9800
712
+ },
713
+ {
714
+ "epoch": 2.475,
715
+ "grad_norm": 0.27572157979011536,
716
+ "learning_rate": 0.00039092656555375416,
717
+ "loss": 0.7656,
718
+ "step": 9900
719
+ },
720
+ {
721
+ "epoch": 2.5,
722
+ "grad_norm": 0.30408018827438354,
723
+ "learning_rate": 0.00038889255825490053,
724
+ "loss": 0.7482,
725
+ "step": 10000
726
+ },
727
+ {
728
+ "epoch": 2.525,
729
+ "grad_norm": 0.28552907705307007,
730
+ "learning_rate": 0.0003868451642021992,
731
+ "loss": 0.7464,
732
+ "step": 10100
733
+ },
734
+ {
735
+ "epoch": 2.55,
736
+ "grad_norm": 0.2961609661579132,
737
+ "learning_rate": 0.0003847845807277501,
738
+ "loss": 0.7447,
739
+ "step": 10200
740
+ },
741
+ {
742
+ "epoch": 2.575,
743
+ "grad_norm": 0.2863265573978424,
744
+ "learning_rate": 0.0003827110064348773,
745
+ "loss": 0.7538,
746
+ "step": 10300
747
+ },
748
+ {
749
+ "epoch": 2.6,
750
+ "grad_norm": 0.26066234707832336,
751
+ "learning_rate": 0.0003806246411789872,
752
+ "loss": 0.7476,
753
+ "step": 10400
754
+ },
755
+ {
756
+ "epoch": 2.625,
757
+ "grad_norm": 0.2573792338371277,
758
+ "learning_rate": 0.0003785256860483054,
759
+ "loss": 0.724,
760
+ "step": 10500
761
+ },
762
+ {
763
+ "epoch": 2.65,
764
+ "grad_norm": 0.28464919328689575,
765
+ "learning_rate": 0.0003764143433444962,
766
+ "loss": 0.7395,
767
+ "step": 10600
768
+ },
769
+ {
770
+ "epoch": 2.675,
771
+ "grad_norm": 0.26662376523017883,
772
+ "learning_rate": 0.0003742908165631636,
773
+ "loss": 0.7524,
774
+ "step": 10700
775
+ },
776
+ {
777
+ "epoch": 2.7,
778
+ "grad_norm": 0.25927239656448364,
779
+ "learning_rate": 0.0003721553103742388,
780
+ "loss": 0.776,
781
+ "step": 10800
782
+ },
783
+ {
784
+ "epoch": 2.725,
785
+ "grad_norm": 0.2786635160446167,
786
+ "learning_rate": 0.0003700080306022528,
787
+ "loss": 0.7392,
788
+ "step": 10900
789
+ },
790
+ {
791
+ "epoch": 2.75,
792
+ "grad_norm": 0.26539376378059387,
793
+ "learning_rate": 0.0003678491842064995,
794
+ "loss": 0.7495,
795
+ "step": 11000
796
+ },
797
+ {
798
+ "epoch": 2.775,
799
+ "grad_norm": 0.23436130583286285,
800
+ "learning_rate": 0.00036567897926108756,
801
+ "loss": 0.7241,
802
+ "step": 11100
803
+ },
804
+ {
805
+ "epoch": 2.8,
806
+ "grad_norm": 0.28717827796936035,
807
+ "learning_rate": 0.00036349762493488667,
808
+ "loss": 0.7249,
809
+ "step": 11200
810
+ },
811
+ {
812
+ "epoch": 2.825,
813
+ "grad_norm": 0.2903422713279724,
814
+ "learning_rate": 0.0003613053314713671,
815
+ "loss": 0.7514,
816
+ "step": 11300
817
+ },
818
+ {
819
+ "epoch": 2.85,
820
+ "grad_norm": 0.3125324845314026,
821
+ "learning_rate": 0.0003591023101683355,
822
+ "loss": 0.7689,
823
+ "step": 11400
824
+ },
825
+ {
826
+ "epoch": 2.875,
827
+ "grad_norm": 0.26884034276008606,
828
+ "learning_rate": 0.0003568887733575705,
829
+ "loss": 0.7708,
830
+ "step": 11500
831
+ },
832
+ {
833
+ "epoch": 2.9,
834
+ "grad_norm": 0.2495446503162384,
835
+ "learning_rate": 0.00035466493438435703,
836
+ "loss": 0.737,
837
+ "step": 11600
838
+ },
839
+ {
840
+ "epoch": 2.925,
841
+ "grad_norm": 0.2772550582885742,
842
+ "learning_rate": 0.0003524310075869239,
843
+ "loss": 0.7618,
844
+ "step": 11700
845
+ },
846
+ {
847
+ "epoch": 2.95,
848
+ "grad_norm": 0.23050114512443542,
849
+ "learning_rate": 0.0003501872082757852,
850
+ "loss": 0.7492,
851
+ "step": 11800
852
+ },
853
+ {
854
+ "epoch": 2.975,
855
+ "grad_norm": 0.3204950988292694,
856
+ "learning_rate": 0.000347933752712989,
857
+ "loss": 0.7456,
858
+ "step": 11900
859
+ },
860
+ {
861
+ "epoch": 3.0,
862
+ "grad_norm": 0.26798272132873535,
863
+ "learning_rate": 0.0003456708580912725,
864
+ "loss": 0.7671,
865
+ "step": 12000
866
+ },
867
+ {
868
+ "epoch": 3.0,
869
+ "eval_loss": 0.770311176776886,
870
+ "eval_runtime": 157.8536,
871
+ "eval_samples_per_second": 25.34,
872
+ "eval_steps_per_second": 6.335,
873
+ "step": 12000
874
+ },
875
+ {
876
+ "epoch": 3.025,
877
+ "grad_norm": 0.2797103822231293,
878
+ "learning_rate": 0.0003433987425131291,
879
+ "loss": 0.7356,
880
+ "step": 12100
881
+ },
882
+ {
883
+ "epoch": 3.05,
884
+ "grad_norm": 0.24729490280151367,
885
+ "learning_rate": 0.0003411176249697875,
886
+ "loss": 0.7274,
887
+ "step": 12200
888
+ },
889
+ {
890
+ "epoch": 3.075,
891
+ "grad_norm": 0.24234917759895325,
892
+ "learning_rate": 0.00033882772532010404,
893
+ "loss": 0.7187,
894
+ "step": 12300
895
+ },
896
+ {
897
+ "epoch": 3.1,
898
+ "grad_norm": 0.3106192648410797,
899
+ "learning_rate": 0.0003365292642693733,
900
+ "loss": 0.7131,
901
+ "step": 12400
902
+ },
903
+ {
904
+ "epoch": 3.125,
905
+ "grad_norm": 0.3047131597995758,
906
+ "learning_rate": 0.00033422246334805503,
907
+ "loss": 0.731,
908
+ "step": 12500
909
+ },
910
+ {
911
+ "epoch": 3.15,
912
+ "grad_norm": 0.2762167751789093,
913
+ "learning_rate": 0.0003319075448904234,
914
+ "loss": 0.7399,
915
+ "step": 12600
916
+ },
917
+ {
918
+ "epoch": 3.175,
919
+ "grad_norm": 0.32366228103637695,
920
+ "learning_rate": 0.00032958473201313745,
921
+ "loss": 0.7387,
922
+ "step": 12700
923
+ },
924
+ {
925
+ "epoch": 3.2,
926
+ "grad_norm": 0.2838393747806549,
927
+ "learning_rate": 0.00032725424859373687,
928
+ "loss": 0.7297,
929
+ "step": 12800
930
+ },
931
+ {
932
+ "epoch": 3.225,
933
+ "grad_norm": 0.3418114483356476,
934
+ "learning_rate": 0.00032491631924906416,
935
+ "loss": 0.7322,
936
+ "step": 12900
937
+ },
938
+ {
939
+ "epoch": 3.25,
940
+ "grad_norm": 0.31752169132232666,
941
+ "learning_rate": 0.00032257116931361555,
942
+ "loss": 0.7466,
943
+ "step": 13000
944
+ },
945
+ {
946
+ "epoch": 3.275,
947
+ "grad_norm": 0.28176289796829224,
948
+ "learning_rate": 0.00032021902481782304,
949
+ "loss": 0.7256,
950
+ "step": 13100
951
+ },
952
+ {
953
+ "epoch": 3.3,
954
+ "grad_norm": 0.3140346109867096,
955
+ "learning_rate": 0.00031786011246626855,
956
+ "loss": 0.7275,
957
+ "step": 13200
958
+ },
959
+ {
960
+ "epoch": 3.325,
961
+ "grad_norm": 0.3124103248119354,
962
+ "learning_rate": 0.0003154946596158343,
963
+ "loss": 0.7548,
964
+ "step": 13300
965
+ },
966
+ {
967
+ "epoch": 3.35,
968
+ "grad_norm": 0.27317100763320923,
969
+ "learning_rate": 0.0003131228942537895,
970
+ "loss": 0.7437,
971
+ "step": 13400
972
+ },
973
+ {
974
+ "epoch": 3.375,
975
+ "grad_norm": 0.3227018713951111,
976
+ "learning_rate": 0.000310745044975816,
977
+ "loss": 0.7265,
978
+ "step": 13500
979
+ },
980
+ {
981
+ "epoch": 3.4,
982
+ "grad_norm": 0.2868232727050781,
983
+ "learning_rate": 0.0003083613409639764,
984
+ "loss": 0.7197,
985
+ "step": 13600
986
+ },
987
+ {
988
+ "epoch": 3.425,
989
+ "grad_norm": 0.29342934489250183,
990
+ "learning_rate": 0.00030597201196462466,
991
+ "loss": 0.7073,
992
+ "step": 13700
993
+ },
994
+ {
995
+ "epoch": 3.45,
996
+ "grad_norm": 0.3376121520996094,
997
+ "learning_rate": 0.00030357728826626266,
998
+ "loss": 0.7346,
999
+ "step": 13800
1000
+ },
1001
+ {
1002
+ "epoch": 3.475,
1003
+ "grad_norm": 0.2940613031387329,
1004
+ "learning_rate": 0.00030117740067734495,
1005
+ "loss": 0.7463,
1006
+ "step": 13900
1007
+ },
1008
+ {
1009
+ "epoch": 3.5,
1010
+ "grad_norm": 0.2969967722892761,
1011
+ "learning_rate": 0.0002987725805040321,
1012
+ "loss": 0.7174,
1013
+ "step": 14000
1014
+ },
1015
+ {
1016
+ "epoch": 3.525,
1017
+ "grad_norm": 0.2882630527019501,
1018
+ "learning_rate": 0.0002963630595278977,
1019
+ "loss": 0.726,
1020
+ "step": 14100
1021
+ },
1022
+ {
1023
+ "epoch": 3.55,
1024
+ "grad_norm": 0.3352701663970947,
1025
+ "learning_rate": 0.0002939490699835887,
1026
+ "loss": 0.7177,
1027
+ "step": 14200
1028
+ },
1029
+ {
1030
+ "epoch": 3.575,
1031
+ "grad_norm": 0.28441768884658813,
1032
+ "learning_rate": 0.00029153084453644135,
1033
+ "loss": 0.7385,
1034
+ "step": 14300
1035
+ },
1036
+ {
1037
+ "epoch": 3.6,
1038
+ "grad_norm": 0.2801556885242462,
1039
+ "learning_rate": 0.00028910861626005774,
1040
+ "loss": 0.7242,
1041
+ "step": 14400
1042
+ },
1043
+ {
1044
+ "epoch": 3.625,
1045
+ "grad_norm": 0.3156888782978058,
1046
+ "learning_rate": 0.00028668261861384045,
1047
+ "loss": 0.7207,
1048
+ "step": 14500
1049
+ },
1050
+ {
1051
+ "epoch": 3.65,
1052
+ "grad_norm": 0.3086392879486084,
1053
+ "learning_rate": 0.00028425308542049207,
1054
+ "loss": 0.7337,
1055
+ "step": 14600
1056
+ },
1057
+ {
1058
+ "epoch": 3.675,
1059
+ "grad_norm": 0.312292218208313,
1060
+ "learning_rate": 0.0002818202508434783,
1061
+ "loss": 0.7469,
1062
+ "step": 14700
1063
+ },
1064
+ {
1065
+ "epoch": 3.7,
1066
+ "grad_norm": 0.32004839181900024,
1067
+ "learning_rate": 0.00027938434936445943,
1068
+ "loss": 0.7297,
1069
+ "step": 14800
1070
+ },
1071
+ {
1072
+ "epoch": 3.725,
1073
+ "grad_norm": 0.2992401719093323,
1074
+ "learning_rate": 0.00027694561576068985,
1075
+ "loss": 0.7379,
1076
+ "step": 14900
1077
+ },
1078
+ {
1079
+ "epoch": 3.75,
1080
+ "grad_norm": 0.31298381090164185,
1081
+ "learning_rate": 0.0002745042850823902,
1082
+ "loss": 0.742,
1083
+ "step": 15000
1084
+ },
1085
+ {
1086
+ "epoch": 3.775,
1087
+ "grad_norm": 0.3190864622592926,
1088
+ "learning_rate": 0.00027206059263009243,
1089
+ "loss": 0.715,
1090
+ "step": 15100
1091
+ },
1092
+ {
1093
+ "epoch": 3.8,
1094
+ "grad_norm": 0.30321431159973145,
1095
+ "learning_rate": 0.00026961477393196127,
1096
+ "loss": 0.7085,
1097
+ "step": 15200
1098
+ },
1099
+ {
1100
+ "epoch": 3.825,
1101
+ "grad_norm": 0.31598585844039917,
1102
+ "learning_rate": 0.0002671670647210934,
1103
+ "loss": 0.7456,
1104
+ "step": 15300
1105
+ },
1106
+ {
1107
+ "epoch": 3.85,
1108
+ "grad_norm": 0.3217307925224304,
1109
+ "learning_rate": 0.00026471770091279724,
1110
+ "loss": 0.7221,
1111
+ "step": 15400
1112
+ },
1113
+ {
1114
+ "epoch": 3.875,
1115
+ "grad_norm": 0.2786950469017029,
1116
+ "learning_rate": 0.00026226691858185456,
1117
+ "loss": 0.7297,
1118
+ "step": 15500
1119
+ },
1120
+ {
1121
+ "epoch": 3.9,
1122
+ "grad_norm": 0.29478856921195984,
1123
+ "learning_rate": 0.00025981495393976716,
1124
+ "loss": 0.7311,
1125
+ "step": 15600
1126
+ },
1127
+ {
1128
+ "epoch": 3.925,
1129
+ "grad_norm": 0.32220613956451416,
1130
+ "learning_rate": 0.00025736204331199084,
1131
+ "loss": 0.7376,
1132
+ "step": 15700
1133
+ },
1134
+ {
1135
+ "epoch": 3.95,
1136
+ "grad_norm": 0.296522319316864,
1137
+ "learning_rate": 0.00025490842311515704,
1138
+ "loss": 0.7263,
1139
+ "step": 15800
1140
+ },
1141
+ {
1142
+ "epoch": 3.975,
1143
+ "grad_norm": 0.32826748490333557,
1144
+ "learning_rate": 0.0002524543298342875,
1145
+ "loss": 0.7393,
1146
+ "step": 15900
1147
+ },
1148
+ {
1149
+ "epoch": 4.0,
1150
+ "grad_norm": 0.3822040259838104,
1151
+ "learning_rate": 0.00025,
1152
+ "loss": 0.7233,
1153
+ "step": 16000
1154
+ },
1155
+ {
1156
+ "epoch": 4.0,
1157
+ "eval_loss": 0.7630051970481873,
1158
+ "eval_runtime": 157.7404,
1159
+ "eval_samples_per_second": 25.358,
1160
+ "eval_steps_per_second": 6.34,
1161
+ "step": 16000
1162
+ },
1163
+ {
1164
+ "epoch": 4.025,
1165
+ "grad_norm": 0.33770787715911865,
1166
+ "learning_rate": 0.0002475456701657126,
1167
+ "loss": 0.7406,
1168
+ "step": 16100
1169
+ },
1170
+ {
1171
+ "epoch": 4.05,
1172
+ "grad_norm": 0.3054458200931549,
1173
+ "learning_rate": 0.00024509157688484297,
1174
+ "loss": 0.7034,
1175
+ "step": 16200
1176
+ },
1177
+ {
1178
+ "epoch": 4.075,
1179
+ "grad_norm": 0.3073260188102722,
1180
+ "learning_rate": 0.0002426379566880092,
1181
+ "loss": 0.7141,
1182
+ "step": 16300
1183
+ },
1184
+ {
1185
+ "epoch": 4.1,
1186
+ "grad_norm": 0.3053102195262909,
1187
+ "learning_rate": 0.00024018504606023293,
1188
+ "loss": 0.6981,
1189
+ "step": 16400
1190
+ },
1191
+ {
1192
+ "epoch": 4.125,
1193
+ "grad_norm": 0.33330512046813965,
1194
+ "learning_rate": 0.0002377330814181455,
1195
+ "loss": 0.7122,
1196
+ "step": 16500
1197
+ },
1198
+ {
1199
+ "epoch": 4.15,
1200
+ "grad_norm": 0.3163035213947296,
1201
+ "learning_rate": 0.00023528229908720272,
1202
+ "loss": 0.7145,
1203
+ "step": 16600
1204
+ },
1205
+ {
1206
+ "epoch": 4.175,
1207
+ "grad_norm": 0.3451402485370636,
1208
+ "learning_rate": 0.00023283293527890658,
1209
+ "loss": 0.7039,
1210
+ "step": 16700
1211
+ },
1212
+ {
1213
+ "epoch": 4.2,
1214
+ "grad_norm": 0.3371511399745941,
1215
+ "learning_rate": 0.0002303852260680388,
1216
+ "loss": 0.7078,
1217
+ "step": 16800
1218
+ },
1219
+ {
1220
+ "epoch": 4.225,
1221
+ "grad_norm": 0.29235777258872986,
1222
+ "learning_rate": 0.00022793940736990766,
1223
+ "loss": 0.6777,
1224
+ "step": 16900
1225
+ },
1226
+ {
1227
+ "epoch": 4.25,
1228
+ "grad_norm": 0.3207673728466034,
1229
+ "learning_rate": 0.00022549571491760985,
1230
+ "loss": 0.7235,
1231
+ "step": 17000
1232
+ },
1233
+ {
1234
+ "epoch": 4.275,
1235
+ "grad_norm": 0.2674269378185272,
1236
+ "learning_rate": 0.00022305438423931017,
1237
+ "loss": 0.693,
1238
+ "step": 17100
1239
+ },
1240
+ {
1241
+ "epoch": 4.3,
1242
+ "grad_norm": 0.3360178768634796,
1243
+ "learning_rate": 0.00022061565063554063,
1244
+ "loss": 0.7113,
1245
+ "step": 17200
1246
+ },
1247
+ {
1248
+ "epoch": 4.325,
1249
+ "grad_norm": 0.3393208682537079,
1250
+ "learning_rate": 0.00021817974915652172,
1251
+ "loss": 0.7128,
1252
+ "step": 17300
1253
+ },
1254
+ {
1255
+ "epoch": 4.35,
1256
+ "grad_norm": 0.3478228747844696,
1257
+ "learning_rate": 0.00021574691457950805,
1258
+ "loss": 0.7118,
1259
+ "step": 17400
1260
+ },
1261
+ {
1262
+ "epoch": 4.375,
1263
+ "grad_norm": 0.3498687446117401,
1264
+ "learning_rate": 0.00021331738138615958,
1265
+ "loss": 0.7389,
1266
+ "step": 17500
1267
+ },
1268
+ {
1269
+ "epoch": 4.4,
1270
+ "grad_norm": 0.2958277463912964,
1271
+ "learning_rate": 0.00021089138373994224,
1272
+ "loss": 0.723,
1273
+ "step": 17600
1274
+ },
1275
+ {
1276
+ "epoch": 4.425,
1277
+ "grad_norm": 0.3038991689682007,
1278
+ "learning_rate": 0.0002084691554635587,
1279
+ "loss": 0.7188,
1280
+ "step": 17700
1281
+ },
1282
+ {
1283
+ "epoch": 4.45,
1284
+ "grad_norm": 0.3416539132595062,
1285
+ "learning_rate": 0.00020605093001641137,
1286
+ "loss": 0.7004,
1287
+ "step": 17800
1288
+ },
1289
+ {
1290
+ "epoch": 4.475,
1291
+ "grad_norm": 0.32150790095329285,
1292
+ "learning_rate": 0.00020363694047210228,
1293
+ "loss": 0.7213,
1294
+ "step": 17900
1295
+ },
1296
+ {
1297
+ "epoch": 4.5,
1298
+ "grad_norm": 0.32991406321525574,
1299
+ "learning_rate": 0.00020122741949596797,
1300
+ "loss": 0.7045,
1301
+ "step": 18000
1302
+ },
1303
+ {
1304
+ "epoch": 4.525,
1305
+ "grad_norm": 0.315838098526001,
1306
+ "learning_rate": 0.00019882259932265512,
1307
+ "loss": 0.7382,
1308
+ "step": 18100
1309
+ },
1310
+ {
1311
+ "epoch": 4.55,
1312
+ "grad_norm": 0.35709577798843384,
1313
+ "learning_rate": 0.00019642271173373735,
1314
+ "loss": 0.6946,
1315
+ "step": 18200
1316
+ },
1317
+ {
1318
+ "epoch": 4.575,
1319
+ "grad_norm": 0.30055126547813416,
1320
+ "learning_rate": 0.00019402798803537538,
1321
+ "loss": 0.7453,
1322
+ "step": 18300
1323
+ },
1324
+ {
1325
+ "epoch": 4.6,
1326
+ "grad_norm": 0.33305642008781433,
1327
+ "learning_rate": 0.00019163865903602372,
1328
+ "loss": 0.708,
1329
+ "step": 18400
1330
+ },
1331
+ {
1332
+ "epoch": 4.625,
1333
+ "grad_norm": 0.32721662521362305,
1334
+ "learning_rate": 0.00018925495502418406,
1335
+ "loss": 0.7182,
1336
+ "step": 18500
1337
+ },
1338
+ {
1339
+ "epoch": 4.65,
1340
+ "grad_norm": 0.31406721472740173,
1341
+ "learning_rate": 0.00018687710574621051,
1342
+ "loss": 0.7342,
1343
+ "step": 18600
1344
+ },
1345
+ {
1346
+ "epoch": 4.675,
1347
+ "grad_norm": 0.3257578909397125,
1348
+ "learning_rate": 0.00018450534038416566,
1349
+ "loss": 0.6923,
1350
+ "step": 18700
1351
+ },
1352
+ {
1353
+ "epoch": 4.7,
1354
+ "grad_norm": 0.3608546555042267,
1355
+ "learning_rate": 0.00018213988753373146,
1356
+ "loss": 0.7099,
1357
+ "step": 18800
1358
+ },
1359
+ {
1360
+ "epoch": 4.725,
1361
+ "grad_norm": 0.34955260157585144,
1362
+ "learning_rate": 0.00017978097518217702,
1363
+ "loss": 0.7386,
1364
+ "step": 18900
1365
+ },
1366
+ {
1367
+ "epoch": 4.75,
1368
+ "grad_norm": 0.3003368675708771,
1369
+ "learning_rate": 0.00017742883068638446,
1370
+ "loss": 0.7253,
1371
+ "step": 19000
1372
+ },
1373
+ {
1374
+ "epoch": 4.775,
1375
+ "grad_norm": 0.3441944718360901,
1376
+ "learning_rate": 0.00017508368075093582,
1377
+ "loss": 0.689,
1378
+ "step": 19100
1379
+ },
1380
+ {
1381
+ "epoch": 4.8,
1382
+ "grad_norm": 0.3106822669506073,
1383
+ "learning_rate": 0.00017274575140626317,
1384
+ "loss": 0.7209,
1385
+ "step": 19200
1386
+ },
1387
+ {
1388
+ "epoch": 4.825,
1389
+ "grad_norm": 0.3345147669315338,
1390
+ "learning_rate": 0.0001704152679868626,
1391
+ "loss": 0.7361,
1392
+ "step": 19300
1393
+ },
1394
+ {
1395
+ "epoch": 4.85,
1396
+ "grad_norm": 0.3377486765384674,
1397
+ "learning_rate": 0.00016809245510957666,
1398
+ "loss": 0.7154,
1399
+ "step": 19400
1400
+ },
1401
+ {
1402
+ "epoch": 4.875,
1403
+ "grad_norm": 0.2919654846191406,
1404
+ "learning_rate": 0.000165777536651945,
1405
+ "loss": 0.717,
1406
+ "step": 19500
1407
+ },
1408
+ {
1409
+ "epoch": 4.9,
1410
+ "grad_norm": 0.32570144534111023,
1411
+ "learning_rate": 0.0001634707357306267,
1412
+ "loss": 0.718,
1413
+ "step": 19600
1414
+ },
1415
+ {
1416
+ "epoch": 4.925,
1417
+ "grad_norm": 0.3031199872493744,
1418
+ "learning_rate": 0.00016117227467989602,
1419
+ "loss": 0.7127,
1420
+ "step": 19700
1421
+ },
1422
+ {
1423
+ "epoch": 4.95,
1424
+ "grad_norm": 0.3258810043334961,
1425
+ "learning_rate": 0.0001588823750302126,
1426
+ "loss": 0.71,
1427
+ "step": 19800
1428
+ },
1429
+ {
1430
+ "epoch": 4.975,
1431
+ "grad_norm": 0.3082842230796814,
1432
+ "learning_rate": 0.00015660125748687094,
1433
+ "loss": 0.7096,
1434
+ "step": 19900
1435
+ },
1436
+ {
1437
+ "epoch": 5.0,
1438
+ "grad_norm": 0.3242599070072174,
1439
+ "learning_rate": 0.00015432914190872756,
1440
+ "loss": 0.7043,
1441
+ "step": 20000
1442
+ },
1443
+ {
1444
+ "epoch": 5.0,
1445
+ "eval_loss": 0.7582733035087585,
1446
+ "eval_runtime": 157.8194,
1447
+ "eval_samples_per_second": 25.345,
1448
+ "eval_steps_per_second": 6.336,
1449
+ "step": 20000
1450
+ },
1451
+ {
1452
+ "epoch": 5.025,
1453
+ "grad_norm": 0.33593127131462097,
1454
+ "learning_rate": 0.000152066247287011,
1455
+ "loss": 0.6958,
1456
+ "step": 20100
1457
+ },
1458
+ {
1459
+ "epoch": 5.05,
1460
+ "grad_norm": 0.3056102991104126,
1461
+ "learning_rate": 0.00014981279172421482,
1462
+ "loss": 0.7012,
1463
+ "step": 20200
1464
+ },
1465
+ {
1466
+ "epoch": 5.075,
1467
+ "grad_norm": 0.34994184970855713,
1468
+ "learning_rate": 0.00014756899241307614,
1469
+ "loss": 0.681,
1470
+ "step": 20300
1471
+ },
1472
+ {
1473
+ "epoch": 5.1,
1474
+ "grad_norm": 0.3363034427165985,
1475
+ "learning_rate": 0.00014533506561564306,
1476
+ "loss": 0.6864,
1477
+ "step": 20400
1478
+ },
1479
+ {
1480
+ "epoch": 5.125,
1481
+ "grad_norm": 0.33991631865501404,
1482
+ "learning_rate": 0.00014311122664242953,
1483
+ "loss": 0.6837,
1484
+ "step": 20500
1485
+ },
1486
+ {
1487
+ "epoch": 5.15,
1488
+ "grad_norm": 0.3321220576763153,
1489
+ "learning_rate": 0.00014089768983166444,
1490
+ "loss": 0.6938,
1491
+ "step": 20600
1492
+ },
1493
+ {
1494
+ "epoch": 5.175,
1495
+ "grad_norm": 0.33707287907600403,
1496
+ "learning_rate": 0.000138694668528633,
1497
+ "loss": 0.6866,
1498
+ "step": 20700
1499
+ },
1500
+ {
1501
+ "epoch": 5.2,
1502
+ "grad_norm": 0.36306506395339966,
1503
+ "learning_rate": 0.00013650237506511331,
1504
+ "loss": 0.7078,
1505
+ "step": 20800
1506
+ },
1507
+ {
1508
+ "epoch": 5.225,
1509
+ "grad_norm": 0.3246331512928009,
1510
+ "learning_rate": 0.0001343210207389125,
1511
+ "loss": 0.7157,
1512
+ "step": 20900
1513
+ },
1514
+ {
1515
+ "epoch": 5.25,
1516
+ "grad_norm": 0.2866950035095215,
1517
+ "learning_rate": 0.00013215081579350058,
1518
+ "loss": 0.7065,
1519
+ "step": 21000
1520
+ },
1521
+ {
1522
+ "epoch": 5.275,
1523
+ "grad_norm": 0.2862119972705841,
1524
+ "learning_rate": 0.00012999196939774722,
1525
+ "loss": 0.6948,
1526
+ "step": 21100
1527
+ },
1528
+ {
1529
+ "epoch": 5.3,
1530
+ "grad_norm": 0.3230210244655609,
1531
+ "learning_rate": 0.00012784468962576134,
1532
+ "loss": 0.7118,
1533
+ "step": 21200
1534
+ },
1535
+ {
1536
+ "epoch": 5.325,
1537
+ "grad_norm": 0.3498855233192444,
1538
+ "learning_rate": 0.00012570918343683636,
1539
+ "loss": 0.7203,
1540
+ "step": 21300
1541
+ },
1542
+ {
1543
+ "epoch": 5.35,
1544
+ "grad_norm": 0.34220677614212036,
1545
+ "learning_rate": 0.0001235856566555039,
1546
+ "loss": 0.7034,
1547
+ "step": 21400
1548
+ },
1549
+ {
1550
+ "epoch": 5.375,
1551
+ "grad_norm": 0.3399185538291931,
1552
+ "learning_rate": 0.0001214743139516946,
1553
+ "loss": 0.6889,
1554
+ "step": 21500
1555
+ },
1556
+ {
1557
+ "epoch": 5.4,
1558
+ "grad_norm": 0.3611920475959778,
1559
+ "learning_rate": 0.00011937535882101281,
1560
+ "loss": 0.7056,
1561
+ "step": 21600
1562
+ },
1563
+ {
1564
+ "epoch": 5.425,
1565
+ "grad_norm": 0.3285171091556549,
1566
+ "learning_rate": 0.00011728899356512265,
1567
+ "loss": 0.6799,
1568
+ "step": 21700
1569
+ },
1570
+ {
1571
+ "epoch": 5.45,
1572
+ "grad_norm": 0.385384738445282,
1573
+ "learning_rate": 0.00011521541927224994,
1574
+ "loss": 0.7142,
1575
+ "step": 21800
1576
+ },
1577
+ {
1578
+ "epoch": 5.475,
1579
+ "grad_norm": 0.3012756407260895,
1580
+ "learning_rate": 0.00011315483579780094,
1581
+ "loss": 0.6988,
1582
+ "step": 21900
1583
+ },
1584
+ {
1585
+ "epoch": 5.5,
1586
+ "grad_norm": 0.3784416615962982,
1587
+ "learning_rate": 0.00011110744174509952,
1588
+ "loss": 0.6883,
1589
+ "step": 22000
1590
+ },
1591
+ {
1592
+ "epoch": 5.525,
1593
+ "grad_norm": 0.3384094536304474,
1594
+ "learning_rate": 0.00010907343444624579,
1595
+ "loss": 0.7226,
1596
+ "step": 22100
1597
+ },
1598
+ {
1599
+ "epoch": 5.55,
1600
+ "grad_norm": 0.37700313329696655,
1601
+ "learning_rate": 0.00010705300994309697,
1602
+ "loss": 0.6975,
1603
+ "step": 22200
1604
+ },
1605
+ {
1606
+ "epoch": 5.575,
1607
+ "grad_norm": 0.3291476368904114,
1608
+ "learning_rate": 0.00010504636296837161,
1609
+ "loss": 0.7041,
1610
+ "step": 22300
1611
+ },
1612
+ {
1613
+ "epoch": 5.6,
1614
+ "grad_norm": 0.3541957139968872,
1615
+ "learning_rate": 0.00010305368692688174,
1616
+ "loss": 0.6819,
1617
+ "step": 22400
1618
+ },
1619
+ {
1620
+ "epoch": 5.625,
1621
+ "grad_norm": 0.31432709097862244,
1622
+ "learning_rate": 0.00010107517387689166,
1623
+ "loss": 0.7128,
1624
+ "step": 22500
1625
+ },
1626
+ {
1627
+ "epoch": 5.65,
1628
+ "grad_norm": 0.34442463517189026,
1629
+ "learning_rate": 9.911101451160715e-05,
1630
+ "loss": 0.7226,
1631
+ "step": 22600
1632
+ },
1633
+ {
1634
+ "epoch": 5.675,
1635
+ "grad_norm": 0.3558264672756195,
1636
+ "learning_rate": 9.716139814079594e-05,
1637
+ "loss": 0.7118,
1638
+ "step": 22700
1639
+ },
1640
+ {
1641
+ "epoch": 5.7,
1642
+ "grad_norm": 0.2919544577598572,
1643
+ "learning_rate": 9.522651267254148e-05,
1644
+ "loss": 0.7193,
1645
+ "step": 22800
1646
+ },
1647
+ {
1648
+ "epoch": 5.725,
1649
+ "grad_norm": 0.3444010019302368,
1650
+ "learning_rate": 9.330654459513265e-05,
1651
+ "loss": 0.6942,
1652
+ "step": 22900
1653
+ },
1654
+ {
1655
+ "epoch": 5.75,
1656
+ "grad_norm": 0.27795663475990295,
1657
+ "learning_rate": 9.140167895908866e-05,
1658
+ "loss": 0.7159,
1659
+ "step": 23000
1660
+ },
1661
+ {
1662
+ "epoch": 5.775,
1663
+ "grad_norm": 0.35614287853240967,
1664
+ "learning_rate": 8.951209935932425e-05,
1665
+ "loss": 0.6779,
1666
+ "step": 23100
1667
+ },
1668
+ {
1669
+ "epoch": 5.8,
1670
+ "grad_norm": 0.3566213548183441,
1671
+ "learning_rate": 8.763798791745412e-05,
1672
+ "loss": 0.7242,
1673
+ "step": 23200
1674
+ },
1675
+ {
1676
+ "epoch": 5.825,
1677
+ "grad_norm": 0.3220725953578949,
1678
+ "learning_rate": 8.577952526423969e-05,
1679
+ "loss": 0.6914,
1680
+ "step": 23300
1681
+ },
1682
+ {
1683
+ "epoch": 5.85,
1684
+ "grad_norm": 0.33407965302467346,
1685
+ "learning_rate": 8.393689052217964e-05,
1686
+ "loss": 0.7149,
1687
+ "step": 23400
1688
+ },
1689
+ {
1690
+ "epoch": 5.875,
1691
+ "grad_norm": 0.3270919919013977,
1692
+ "learning_rate": 8.211026128824539e-05,
1693
+ "loss": 0.7023,
1694
+ "step": 23500
1695
+ },
1696
+ {
1697
+ "epoch": 5.9,
1698
+ "grad_norm": 0.3497578799724579,
1699
+ "learning_rate": 8.029981361676455e-05,
1700
+ "loss": 0.7051,
1701
+ "step": 23600
1702
+ },
1703
+ {
1704
+ "epoch": 5.925,
1705
+ "grad_norm": 0.31495147943496704,
1706
+ "learning_rate": 7.850572200245185e-05,
1707
+ "loss": 0.7158,
1708
+ "step": 23700
1709
+ },
1710
+ {
1711
+ "epoch": 5.95,
1712
+ "grad_norm": 0.34137919545173645,
1713
+ "learning_rate": 7.672815936359106e-05,
1714
+ "loss": 0.7264,
1715
+ "step": 23800
1716
+ },
1717
+ {
1718
+ "epoch": 5.975,
1719
+ "grad_norm": 0.37350091338157654,
1720
+ "learning_rate": 7.496729702536912e-05,
1721
+ "loss": 0.6848,
1722
+ "step": 23900
1723
+ },
1724
+ {
1725
+ "epoch": 6.0,
1726
+ "grad_norm": 0.3614259362220764,
1727
+ "learning_rate": 7.322330470336314e-05,
1728
+ "loss": 0.6809,
1729
+ "step": 24000
1730
+ },
1731
+ {
1732
+ "epoch": 6.0,
1733
+ "eval_loss": 0.7561643123626709,
1734
+ "eval_runtime": 157.6906,
1735
+ "eval_samples_per_second": 25.366,
1736
+ "eval_steps_per_second": 6.342,
1737
+ "step": 24000
1738
+ },
1739
+ {
1740
+ "epoch": 6.025,
1741
+ "grad_norm": 0.3754175901412964,
1742
+ "learning_rate": 7.149635048718294e-05,
1743
+ "loss": 0.6724,
1744
+ "step": 24100
1745
+ },
1746
+ {
1747
+ "epoch": 6.05,
1748
+ "grad_norm": 0.3396029472351074,
1749
+ "learning_rate": 6.97866008242703e-05,
1750
+ "loss": 0.6737,
1751
+ "step": 24200
1752
+ },
1753
+ {
1754
+ "epoch": 6.075,
1755
+ "grad_norm": 0.3344869315624237,
1756
+ "learning_rate": 6.809422050385628e-05,
1757
+ "loss": 0.6948,
1758
+ "step": 24300
1759
+ },
1760
+ {
1761
+ "epoch": 6.1,
1762
+ "grad_norm": 0.34992533922195435,
1763
+ "learning_rate": 6.641937264107867e-05,
1764
+ "loss": 0.6685,
1765
+ "step": 24400
1766
+ },
1767
+ {
1768
+ "epoch": 6.125,
1769
+ "grad_norm": 0.35134392976760864,
1770
+ "learning_rate": 6.476221866126028e-05,
1771
+ "loss": 0.6895,
1772
+ "step": 24500
1773
+ },
1774
+ {
1775
+ "epoch": 6.15,
1776
+ "grad_norm": 0.3663840591907501,
1777
+ "learning_rate": 6.312291828435076e-05,
1778
+ "loss": 0.7168,
1779
+ "step": 24600
1780
+ },
1781
+ {
1782
+ "epoch": 6.175,
1783
+ "grad_norm": 0.38176533579826355,
1784
+ "learning_rate": 6.150162950953264e-05,
1785
+ "loss": 0.7008,
1786
+ "step": 24700
1787
+ },
1788
+ {
1789
+ "epoch": 6.2,
1790
+ "grad_norm": 0.36779487133026123,
1791
+ "learning_rate": 5.989850859999227e-05,
1792
+ "loss": 0.714,
1793
+ "step": 24800
1794
+ },
1795
+ {
1796
+ "epoch": 6.225,
1797
+ "grad_norm": 0.3299673795700073,
1798
+ "learning_rate": 5.831371006785963e-05,
1799
+ "loss": 0.6937,
1800
+ "step": 24900
1801
+ },
1802
+ {
1803
+ "epoch": 6.25,
1804
+ "grad_norm": 0.40928682684898376,
1805
+ "learning_rate": 5.6747386659315755e-05,
1806
+ "loss": 0.6806,
1807
+ "step": 25000
1808
+ },
1809
+ {
1810
+ "epoch": 6.275,
1811
+ "grad_norm": 0.3500606119632721,
1812
+ "learning_rate": 5.519968933987082e-05,
1813
+ "loss": 0.698,
1814
+ "step": 25100
1815
+ },
1816
+ {
1817
+ "epoch": 6.3,
1818
+ "grad_norm": 0.3423629403114319,
1819
+ "learning_rate": 5.367076727981382e-05,
1820
+ "loss": 0.6839,
1821
+ "step": 25200
1822
+ },
1823
+ {
1824
+ "epoch": 6.325,
1825
+ "grad_norm": 0.34447258710861206,
1826
+ "learning_rate": 5.216076783983492e-05,
1827
+ "loss": 0.7238,
1828
+ "step": 25300
1829
+ },
1830
+ {
1831
+ "epoch": 6.35,
1832
+ "grad_norm": 0.35269954800605774,
1833
+ "learning_rate": 5.066983655682325e-05,
1834
+ "loss": 0.7214,
1835
+ "step": 25400
1836
+ },
1837
+ {
1838
+ "epoch": 6.375,
1839
+ "grad_norm": 0.380842387676239,
1840
+ "learning_rate": 4.919811712983879e-05,
1841
+ "loss": 0.6966,
1842
+ "step": 25500
1843
+ },
1844
+ {
1845
+ "epoch": 6.4,
1846
+ "grad_norm": 0.3271864652633667,
1847
+ "learning_rate": 4.7745751406263163e-05,
1848
+ "loss": 0.6867,
1849
+ "step": 25600
1850
+ },
1851
+ {
1852
+ "epoch": 6.425,
1853
+ "grad_norm": 0.308631032705307,
1854
+ "learning_rate": 4.6312879368127645e-05,
1855
+ "loss": 0.7056,
1856
+ "step": 25700
1857
+ },
1858
+ {
1859
+ "epoch": 6.45,
1860
+ "grad_norm": 0.3455272912979126,
1861
+ "learning_rate": 4.4899639118621604e-05,
1862
+ "loss": 0.6774,
1863
+ "step": 25800
1864
+ },
1865
+ {
1866
+ "epoch": 6.475,
1867
+ "grad_norm": 0.3664863407611847,
1868
+ "learning_rate": 4.350616686878175e-05,
1869
+ "loss": 0.6826,
1870
+ "step": 25900
1871
+ },
1872
+ {
1873
+ "epoch": 6.5,
1874
+ "grad_norm": 0.3774823248386383,
1875
+ "learning_rate": 4.213259692436367e-05,
1876
+ "loss": 0.69,
1877
+ "step": 26000
1878
+ },
1879
+ {
1880
+ "epoch": 6.525,
1881
+ "grad_norm": 0.35024821758270264,
1882
+ "learning_rate": 4.077906167289766e-05,
1883
+ "loss": 0.7111,
1884
+ "step": 26100
1885
+ },
1886
+ {
1887
+ "epoch": 6.55,
1888
+ "grad_norm": 0.30343690514564514,
1889
+ "learning_rate": 3.944569157092839e-05,
1890
+ "loss": 0.687,
1891
+ "step": 26200
1892
+ },
1893
+ {
1894
+ "epoch": 6.575,
1895
+ "grad_norm": 0.30187878012657166,
1896
+ "learning_rate": 3.8132615131441396e-05,
1897
+ "loss": 0.7084,
1898
+ "step": 26300
1899
+ },
1900
+ {
1901
+ "epoch": 6.6,
1902
+ "grad_norm": 0.3257708251476288,
1903
+ "learning_rate": 3.6839958911476953e-05,
1904
+ "loss": 0.6622,
1905
+ "step": 26400
1906
+ },
1907
+ {
1908
+ "epoch": 6.625,
1909
+ "grad_norm": 0.328375905752182,
1910
+ "learning_rate": 3.5567847499932e-05,
1911
+ "loss": 0.7004,
1912
+ "step": 26500
1913
+ },
1914
+ {
1915
+ "epoch": 6.65,
1916
+ "grad_norm": 0.3516775667667389,
1917
+ "learning_rate": 3.431640350555204e-05,
1918
+ "loss": 0.7012,
1919
+ "step": 26600
1920
+ },
1921
+ {
1922
+ "epoch": 6.675,
1923
+ "grad_norm": 0.2901289463043213,
1924
+ "learning_rate": 3.308574754511404e-05,
1925
+ "loss": 0.6827,
1926
+ "step": 26700
1927
+ },
1928
+ {
1929
+ "epoch": 6.7,
1930
+ "grad_norm": 0.3217683732509613,
1931
+ "learning_rate": 3.187599823180071e-05,
1932
+ "loss": 0.6935,
1933
+ "step": 26800
1934
+ },
1935
+ {
1936
+ "epoch": 6.725,
1937
+ "grad_norm": 0.360307902097702,
1938
+ "learning_rate": 3.0687272163768986e-05,
1939
+ "loss": 0.6861,
1940
+ "step": 26900
1941
+ },
1942
+ {
1943
+ "epoch": 6.75,
1944
+ "grad_norm": 0.35294219851493835,
1945
+ "learning_rate": 2.9519683912911265e-05,
1946
+ "loss": 0.686,
1947
+ "step": 27000
1948
+ },
1949
+ {
1950
+ "epoch": 6.775,
1951
+ "grad_norm": 0.3497380316257477,
1952
+ "learning_rate": 2.8373346013813417e-05,
1953
+ "loss": 0.663,
1954
+ "step": 27100
1955
+ },
1956
+ {
1957
+ "epoch": 6.8,
1958
+ "grad_norm": 0.36016374826431274,
1959
+ "learning_rate": 2.7248368952908055e-05,
1960
+ "loss": 0.6904,
1961
+ "step": 27200
1962
+ },
1963
+ {
1964
+ "epoch": 6.825,
1965
+ "grad_norm": 0.37491482496261597,
1966
+ "learning_rate": 2.6144861157825773e-05,
1967
+ "loss": 0.6998,
1968
+ "step": 27300
1969
+ },
1970
+ {
1971
+ "epoch": 6.85,
1972
+ "grad_norm": 0.35341572761535645,
1973
+ "learning_rate": 2.5062928986944677e-05,
1974
+ "loss": 0.6909,
1975
+ "step": 27400
1976
+ },
1977
+ {
1978
+ "epoch": 6.875,
1979
+ "grad_norm": 0.34248894453048706,
1980
+ "learning_rate": 2.4002676719139166e-05,
1981
+ "loss": 0.699,
1982
+ "step": 27500
1983
+ },
1984
+ {
1985
+ "epoch": 6.9,
1986
+ "grad_norm": 0.3178793787956238,
1987
+ "learning_rate": 2.296420654372966e-05,
1988
+ "loss": 0.6879,
1989
+ "step": 27600
1990
+ },
1991
+ {
1992
+ "epoch": 6.925,
1993
+ "grad_norm": 0.3343910574913025,
1994
+ "learning_rate": 2.1947618550633096e-05,
1995
+ "loss": 0.6952,
1996
+ "step": 27700
1997
+ },
1998
+ {
1999
+ "epoch": 6.95,
2000
+ "grad_norm": 0.39180707931518555,
2001
+ "learning_rate": 2.0953010720716037e-05,
2002
+ "loss": 0.7093,
2003
+ "step": 27800
2004
+ },
2005
+ {
2006
+ "epoch": 6.975,
2007
+ "grad_norm": 0.3549463152885437,
2008
+ "learning_rate": 1.9980478916351297e-05,
2009
+ "loss": 0.7203,
2010
+ "step": 27900
2011
+ },
2012
+ {
2013
+ "epoch": 7.0,
2014
+ "grad_norm": 0.33500146865844727,
2015
+ "learning_rate": 1.9030116872178316e-05,
2016
+ "loss": 0.6795,
2017
+ "step": 28000
2018
+ },
2019
+ {
2020
+ "epoch": 7.0,
2021
+ "eval_loss": 0.7552515864372253,
2022
+ "eval_runtime": 157.5678,
2023
+ "eval_samples_per_second": 25.386,
2024
+ "eval_steps_per_second": 6.346,
2025
+ "step": 28000
2026
+ },
2027
+ {
2028
+ "epoch": 7.025,
2029
+ "grad_norm": 0.3765384256839752,
2030
+ "learning_rate": 1.8102016186068992e-05,
2031
+ "loss": 0.6917,
2032
+ "step": 28100
2033
+ },
2034
+ {
2035
+ "epoch": 7.05,
2036
+ "grad_norm": 0.32540133595466614,
2037
+ "learning_rate": 1.719626631029911e-05,
2038
+ "loss": 0.6983,
2039
+ "step": 28200
2040
+ },
2041
+ {
2042
+ "epoch": 7.075,
2043
+ "grad_norm": 0.3466598391532898,
2044
+ "learning_rate": 1.6312954542926888e-05,
2045
+ "loss": 0.6723,
2046
+ "step": 28300
2047
+ },
2048
+ {
2049
+ "epoch": 7.1,
2050
+ "grad_norm": 0.35120540857315063,
2051
+ "learning_rate": 1.5452166019378987e-05,
2052
+ "loss": 0.6802,
2053
+ "step": 28400
2054
+ },
2055
+ {
2056
+ "epoch": 7.125,
2057
+ "grad_norm": 0.3241645097732544,
2058
+ "learning_rate": 1.4613983704244827e-05,
2059
+ "loss": 0.6722,
2060
+ "step": 28500
2061
+ },
2062
+ {
2063
+ "epoch": 7.15,
2064
+ "grad_norm": 0.29869577288627625,
2065
+ "learning_rate": 1.3798488383280488e-05,
2066
+ "loss": 0.698,
2067
+ "step": 28600
2068
+ },
2069
+ {
2070
+ "epoch": 7.175,
2071
+ "grad_norm": 0.3423502743244171,
2072
+ "learning_rate": 1.3005758655622424e-05,
2073
+ "loss": 0.6757,
2074
+ "step": 28700
2075
+ },
2076
+ {
2077
+ "epoch": 7.2,
2078
+ "grad_norm": 0.31562918424606323,
2079
+ "learning_rate": 1.2235870926211617e-05,
2080
+ "loss": 0.7011,
2081
+ "step": 28800
2082
+ },
2083
+ {
2084
+ "epoch": 7.225,
2085
+ "grad_norm": 0.3869987726211548,
2086
+ "learning_rate": 1.1488899398429897e-05,
2087
+ "loss": 0.6994,
2088
+ "step": 28900
2089
+ },
2090
+ {
2091
+ "epoch": 7.25,
2092
+ "grad_norm": 0.34807926416397095,
2093
+ "learning_rate": 1.0764916066947795e-05,
2094
+ "loss": 0.6644,
2095
+ "step": 29000
2096
+ },
2097
+ {
2098
+ "epoch": 7.275,
2099
+ "grad_norm": 0.32184460759162903,
2100
+ "learning_rate": 1.0063990710785648e-05,
2101
+ "loss": 0.6944,
2102
+ "step": 29100
2103
+ },
2104
+ {
2105
+ "epoch": 7.3,
2106
+ "grad_norm": 0.33581066131591797,
2107
+ "learning_rate": 9.386190886588208e-06,
2108
+ "loss": 0.6889,
2109
+ "step": 29200
2110
+ },
2111
+ {
2112
+ "epoch": 7.325,
2113
+ "grad_norm": 0.30903181433677673,
2114
+ "learning_rate": 8.731581922113152e-06,
2115
+ "loss": 0.7131,
2116
+ "step": 29300
2117
+ },
2118
+ {
2119
+ "epoch": 7.35,
2120
+ "grad_norm": 0.3609547019004822,
2121
+ "learning_rate": 8.10022690993506e-06,
2122
+ "loss": 0.6729,
2123
+ "step": 29400
2124
+ },
2125
+ {
2126
+ "epoch": 7.375,
2127
+ "grad_norm": 0.35342085361480713,
2128
+ "learning_rate": 7.4921867013640064e-06,
2129
+ "loss": 0.6824,
2130
+ "step": 29500
2131
+ },
2132
+ {
2133
+ "epoch": 7.4,
2134
+ "grad_norm": 0.30028045177459717,
2135
+ "learning_rate": 6.907519900580861e-06,
2136
+ "loss": 0.7038,
2137
+ "step": 29600
2138
+ },
2139
+ {
2140
+ "epoch": 7.425,
2141
+ "grad_norm": 0.33283013105392456,
2142
+ "learning_rate": 6.34628285898875e-06,
2143
+ "loss": 0.6993,
2144
+ "step": 29700
2145
+ },
2146
+ {
2147
+ "epoch": 7.45,
2148
+ "grad_norm": 0.3742896318435669,
2149
+ "learning_rate": 5.808529669781903e-06,
2150
+ "loss": 0.6835,
2151
+ "step": 29800
2152
+ },
2153
+ {
2154
+ "epoch": 7.475,
2155
+ "grad_norm": 0.4153238534927368,
2156
+ "learning_rate": 5.294312162731935e-06,
2157
+ "loss": 0.6899,
2158
+ "step": 29900
2159
+ },
2160
+ {
2161
+ "epoch": 7.5,
2162
+ "grad_norm": 0.3647385835647583,
2163
+ "learning_rate": 4.803679899192393e-06,
2164
+ "loss": 0.6809,
2165
+ "step": 30000
2166
+ },
2167
+ {
2168
+ "epoch": 7.525,
2169
+ "grad_norm": 0.4097493886947632,
2170
+ "learning_rate": 4.336680167322055e-06,
2171
+ "loss": 0.6943,
2172
+ "step": 30100
2173
+ },
2174
+ {
2175
+ "epoch": 7.55,
2176
+ "grad_norm": 0.3686430752277374,
2177
+ "learning_rate": 3.893357977527101e-06,
2178
+ "loss": 0.6982,
2179
+ "step": 30200
2180
+ },
2181
+ {
2182
+ "epoch": 7.575,
2183
+ "grad_norm": 0.3367227613925934,
2184
+ "learning_rate": 3.4737560581228343e-06,
2185
+ "loss": 0.6926,
2186
+ "step": 30300
2187
+ },
2188
+ {
2189
+ "epoch": 7.6,
2190
+ "grad_norm": 0.33962807059288025,
2191
+ "learning_rate": 3.077914851215585e-06,
2192
+ "loss": 0.6978,
2193
+ "step": 30400
2194
+ },
2195
+ {
2196
+ "epoch": 7.625,
2197
+ "grad_norm": 0.3672046661376953,
2198
+ "learning_rate": 2.7058725088047465e-06,
2199
+ "loss": 0.6876,
2200
+ "step": 30500
2201
+ },
2202
+ {
2203
+ "epoch": 7.65,
2204
+ "grad_norm": 0.38116347789764404,
2205
+ "learning_rate": 2.357664889105687e-06,
2206
+ "loss": 0.7266,
2207
+ "step": 30600
2208
+ },
2209
+ {
2210
+ "epoch": 7.675,
2211
+ "grad_norm": 0.3807855248451233,
2212
+ "learning_rate": 2.0333255530934903e-06,
2213
+ "loss": 0.6838,
2214
+ "step": 30700
2215
+ },
2216
+ {
2217
+ "epoch": 7.7,
2218
+ "grad_norm": 0.3613605499267578,
2219
+ "learning_rate": 1.7328857612684267e-06,
2220
+ "loss": 0.6628,
2221
+ "step": 30800
2222
+ },
2223
+ {
2224
+ "epoch": 7.725,
2225
+ "grad_norm": 0.3573913276195526,
2226
+ "learning_rate": 1.4563744706429517e-06,
2227
+ "loss": 0.6657,
2228
+ "step": 30900
2229
+ },
2230
+ {
2231
+ "epoch": 7.75,
2232
+ "grad_norm": 0.378730833530426,
2233
+ "learning_rate": 1.2038183319507957e-06,
2234
+ "loss": 0.6951,
2235
+ "step": 31000
2236
+ },
2237
+ {
2238
+ "epoch": 7.775,
2239
+ "grad_norm": 0.35392504930496216,
2240
+ "learning_rate": 9.752416870782156e-07,
2241
+ "loss": 0.7021,
2242
+ "step": 31100
2243
+ },
2244
+ {
2245
+ "epoch": 7.8,
2246
+ "grad_norm": 0.37442269921302795,
2247
+ "learning_rate": 7.70666566718009e-07,
2248
+ "loss": 0.6924,
2249
+ "step": 31200
2250
+ },
2251
+ {
2252
+ "epoch": 7.825,
2253
+ "grad_norm": 0.37346911430358887,
2254
+ "learning_rate": 5.90112688246075e-07,
2255
+ "loss": 0.6956,
2256
+ "step": 31300
2257
+ },
2258
+ {
2259
+ "epoch": 7.85,
2260
+ "grad_norm": 0.3577325940132141,
2261
+ "learning_rate": 4.335974538210441e-07,
2262
+ "loss": 0.6897,
2263
+ "step": 31400
2264
+ },
2265
+ {
2266
+ "epoch": 7.875,
2267
+ "grad_norm": 0.38326212763786316,
2268
+ "learning_rate": 3.0113594870689873e-07,
2269
+ "loss": 0.6949,
2270
+ "step": 31500
2271
+ },
2272
+ {
2273
+ "epoch": 7.9,
2274
+ "grad_norm": 0.39763376116752625,
2275
+ "learning_rate": 1.9274093981927476e-07,
2276
+ "loss": 0.7033,
2277
+ "step": 31600
2278
+ },
2279
+ {
2280
+ "epoch": 7.925,
2281
+ "grad_norm": 0.3353131413459778,
2282
+ "learning_rate": 1.0842287449469579e-07,
2283
+ "loss": 0.6684,
2284
+ "step": 31700
2285
+ },
2286
+ {
2287
+ "epoch": 7.95,
2288
+ "grad_norm": 0.2801348865032196,
2289
+ "learning_rate": 4.818987948379538e-08,
2290
+ "loss": 0.6459,
2291
+ "step": 31800
2292
+ },
2293
+ {
2294
+ "epoch": 7.975,
2295
+ "grad_norm": 0.3617517352104187,
2296
+ "learning_rate": 1.2047760167999133e-08,
2297
+ "loss": 0.701,
2298
+ "step": 31900
2299
+ },
2300
+ {
2301
+ "epoch": 8.0,
2302
+ "grad_norm": 0.3444000482559204,
2303
+ "learning_rate": 0.0,
2304
+ "loss": 0.6832,
2305
+ "step": 32000
2306
+ }
2307
+ ],
2308
+ "logging_steps": 100,
2309
+ "max_steps": 32000,
2310
+ "num_input_tokens_seen": 0,
2311
+ "num_train_epochs": 8,
2312
+ "save_steps": 500,
2313
+ "stateful_callbacks": {
2314
+ "TrainerControl": {
2315
+ "args": {
2316
+ "should_epoch_stop": false,
2317
+ "should_evaluate": false,
2318
+ "should_log": false,
2319
+ "should_save": true,
2320
+ "should_training_stop": true
2321
+ },
2322
+ "attributes": {}
2323
+ }
2324
+ },
2325
+ "total_flos": 1.243638398976e+17,
2326
+ "train_batch_size": 4,
2327
+ "trial_name": null,
2328
+ "trial_params": null
2329
+ }
training_args.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:b69cd899885bfc0cb450dc28f87485b205dcd158880eae5b06924f633493face
3
+ size 5240