hyungjoochae commited on
Commit
b2e1dc0
·
verified ·
1 Parent(s): bf8c0ad

Upload folder using huggingface_hub

Browse files
.gitattributes CHANGED
@@ -33,3 +33,4 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
33
  *.zip filter=lfs diff=lfs merge=lfs -text
34
  *.zst filter=lfs diff=lfs merge=lfs -text
35
  *tfevents* filter=lfs diff=lfs merge=lfs -text
 
 
33
  *.zip filter=lfs diff=lfs merge=lfs -text
34
  *.zst filter=lfs diff=lfs merge=lfs -text
35
  *tfevents* filter=lfs diff=lfs merge=lfs -text
36
+ tokenizer.json filter=lfs diff=lfs merge=lfs -text
added_tokens.json ADDED
@@ -0,0 +1,24 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "</tool_call>": 151658,
3
+ "<tool_call>": 151657,
4
+ "<|box_end|>": 151649,
5
+ "<|box_start|>": 151648,
6
+ "<|endoftext|>": 151643,
7
+ "<|file_sep|>": 151664,
8
+ "<|fim_middle|>": 151660,
9
+ "<|fim_pad|>": 151662,
10
+ "<|fim_prefix|>": 151659,
11
+ "<|fim_suffix|>": 151661,
12
+ "<|im_end|>": 151645,
13
+ "<|im_start|>": 151644,
14
+ "<|image_pad|>": 151655,
15
+ "<|object_ref_end|>": 151647,
16
+ "<|object_ref_start|>": 151646,
17
+ "<|quad_end|>": 151651,
18
+ "<|quad_start|>": 151650,
19
+ "<|repo_name|>": 151663,
20
+ "<|video_pad|>": 151656,
21
+ "<|vision_end|>": 151653,
22
+ "<|vision_pad|>": 151654,
23
+ "<|vision_start|>": 151652
24
+ }
config.json ADDED
@@ -0,0 +1,28 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_name_or_path": "Qwen/Qwen2.5-7B-Instruct",
3
+ "architectures": [
4
+ "Qwen2ForCausalLM"
5
+ ],
6
+ "attention_dropout": 0.0,
7
+ "eos_token_id": 151645,
8
+ "hidden_act": "silu",
9
+ "hidden_size": 3584,
10
+ "initializer_range": 0.02,
11
+ "intermediate_size": 18944,
12
+ "max_position_embeddings": 32768,
13
+ "max_window_layers": 28,
14
+ "model_type": "qwen2",
15
+ "num_attention_heads": 28,
16
+ "num_hidden_layers": 28,
17
+ "num_key_value_heads": 4,
18
+ "rms_norm_eps": 1e-06,
19
+ "rope_scaling": null,
20
+ "rope_theta": 1000000.0,
21
+ "sliding_window": null,
22
+ "tie_word_embeddings": false,
23
+ "torch_dtype": "float32",
24
+ "transformers_version": "4.48.1",
25
+ "use_cache": false,
26
+ "use_sliding_window": false,
27
+ "vocab_size": 151665
28
+ }
generation_config.json ADDED
@@ -0,0 +1,14 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "bos_token_id": 151643,
3
+ "do_sample": true,
4
+ "eos_token_id": [
5
+ 151645,
6
+ 151643
7
+ ],
8
+ "pad_token_id": 151643,
9
+ "repetition_penalty": 1.05,
10
+ "temperature": 0.7,
11
+ "top_k": 20,
12
+ "top_p": 0.8,
13
+ "transformers_version": "4.48.1"
14
+ }
merges.txt ADDED
The diff for this file is too large to render. See raw diff
 
model-00001-of-00007.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:5754ba8dc9e3ec07574a4d6845cacacf003db948a3d170461b0d940803bdbd69
3
+ size 4970967152
model-00002-of-00007.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:6e48b431aba8b1389be80754e023bd9ce36d801629989e08dbdcb29f51811a8f
3
+ size 4778622352
model-00003-of-00007.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:8e6e5f242c711ae17c49e2b5964fd83b96370d6cff7c4467d536b71f6bfdff64
3
+ size 4932743960
model-00004-of-00007.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:d10630ef463c768c0acfc0e2633b51ec710d7b880b4ddb611d76ba7b604df754
3
+ size 4932743992
model-00005-of-00007.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:e8f9054b852a705d8484da398099a95ea1a80be5aff7c17332ad9eaeac349bc2
3
+ size 4998852296
model-00006-of-00007.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:ee94475a0e01b811ad7cb56ef4c12d7ff4397fc97046fde8128a92fedda57b26
3
+ size 3662865184
model-00007-of-00007.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:ff184d13f7463a8b81f7240ea9a5054b0521d60b22d8a1e6e9cfadf19cb47ca8
3
+ size 2174269568
model.safetensors.index.json ADDED
@@ -0,0 +1,346 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "metadata": {
3
+ "total_size": 30451025920
4
+ },
5
+ "weight_map": {
6
+ "lm_head.weight": "model-00007-of-00007.safetensors",
7
+ "model.embed_tokens.weight": "model-00001-of-00007.safetensors",
8
+ "model.layers.0.input_layernorm.weight": "model-00001-of-00007.safetensors",
9
+ "model.layers.0.mlp.down_proj.weight": "model-00001-of-00007.safetensors",
10
+ "model.layers.0.mlp.gate_proj.weight": "model-00001-of-00007.safetensors",
11
+ "model.layers.0.mlp.up_proj.weight": "model-00001-of-00007.safetensors",
12
+ "model.layers.0.post_attention_layernorm.weight": "model-00001-of-00007.safetensors",
13
+ "model.layers.0.self_attn.k_proj.bias": "model-00001-of-00007.safetensors",
14
+ "model.layers.0.self_attn.k_proj.weight": "model-00001-of-00007.safetensors",
15
+ "model.layers.0.self_attn.o_proj.weight": "model-00001-of-00007.safetensors",
16
+ "model.layers.0.self_attn.q_proj.bias": "model-00001-of-00007.safetensors",
17
+ "model.layers.0.self_attn.q_proj.weight": "model-00001-of-00007.safetensors",
18
+ "model.layers.0.self_attn.v_proj.bias": "model-00001-of-00007.safetensors",
19
+ "model.layers.0.self_attn.v_proj.weight": "model-00001-of-00007.safetensors",
20
+ "model.layers.1.input_layernorm.weight": "model-00001-of-00007.safetensors",
21
+ "model.layers.1.mlp.down_proj.weight": "model-00001-of-00007.safetensors",
22
+ "model.layers.1.mlp.gate_proj.weight": "model-00001-of-00007.safetensors",
23
+ "model.layers.1.mlp.up_proj.weight": "model-00001-of-00007.safetensors",
24
+ "model.layers.1.post_attention_layernorm.weight": "model-00001-of-00007.safetensors",
25
+ "model.layers.1.self_attn.k_proj.bias": "model-00001-of-00007.safetensors",
26
+ "model.layers.1.self_attn.k_proj.weight": "model-00001-of-00007.safetensors",
27
+ "model.layers.1.self_attn.o_proj.weight": "model-00001-of-00007.safetensors",
28
+ "model.layers.1.self_attn.q_proj.bias": "model-00001-of-00007.safetensors",
29
+ "model.layers.1.self_attn.q_proj.weight": "model-00001-of-00007.safetensors",
30
+ "model.layers.1.self_attn.v_proj.bias": "model-00001-of-00007.safetensors",
31
+ "model.layers.1.self_attn.v_proj.weight": "model-00001-of-00007.safetensors",
32
+ "model.layers.10.input_layernorm.weight": "model-00003-of-00007.safetensors",
33
+ "model.layers.10.mlp.down_proj.weight": "model-00003-of-00007.safetensors",
34
+ "model.layers.10.mlp.gate_proj.weight": "model-00003-of-00007.safetensors",
35
+ "model.layers.10.mlp.up_proj.weight": "model-00003-of-00007.safetensors",
36
+ "model.layers.10.post_attention_layernorm.weight": "model-00003-of-00007.safetensors",
37
+ "model.layers.10.self_attn.k_proj.bias": "model-00003-of-00007.safetensors",
38
+ "model.layers.10.self_attn.k_proj.weight": "model-00003-of-00007.safetensors",
39
+ "model.layers.10.self_attn.o_proj.weight": "model-00003-of-00007.safetensors",
40
+ "model.layers.10.self_attn.q_proj.bias": "model-00003-of-00007.safetensors",
41
+ "model.layers.10.self_attn.q_proj.weight": "model-00003-of-00007.safetensors",
42
+ "model.layers.10.self_attn.v_proj.bias": "model-00003-of-00007.safetensors",
43
+ "model.layers.10.self_attn.v_proj.weight": "model-00003-of-00007.safetensors",
44
+ "model.layers.11.input_layernorm.weight": "model-00003-of-00007.safetensors",
45
+ "model.layers.11.mlp.down_proj.weight": "model-00003-of-00007.safetensors",
46
+ "model.layers.11.mlp.gate_proj.weight": "model-00003-of-00007.safetensors",
47
+ "model.layers.11.mlp.up_proj.weight": "model-00003-of-00007.safetensors",
48
+ "model.layers.11.post_attention_layernorm.weight": "model-00003-of-00007.safetensors",
49
+ "model.layers.11.self_attn.k_proj.bias": "model-00003-of-00007.safetensors",
50
+ "model.layers.11.self_attn.k_proj.weight": "model-00003-of-00007.safetensors",
51
+ "model.layers.11.self_attn.o_proj.weight": "model-00003-of-00007.safetensors",
52
+ "model.layers.11.self_attn.q_proj.bias": "model-00003-of-00007.safetensors",
53
+ "model.layers.11.self_attn.q_proj.weight": "model-00003-of-00007.safetensors",
54
+ "model.layers.11.self_attn.v_proj.bias": "model-00003-of-00007.safetensors",
55
+ "model.layers.11.self_attn.v_proj.weight": "model-00003-of-00007.safetensors",
56
+ "model.layers.12.input_layernorm.weight": "model-00003-of-00007.safetensors",
57
+ "model.layers.12.mlp.down_proj.weight": "model-00003-of-00007.safetensors",
58
+ "model.layers.12.mlp.gate_proj.weight": "model-00003-of-00007.safetensors",
59
+ "model.layers.12.mlp.up_proj.weight": "model-00003-of-00007.safetensors",
60
+ "model.layers.12.post_attention_layernorm.weight": "model-00003-of-00007.safetensors",
61
+ "model.layers.12.self_attn.k_proj.bias": "model-00003-of-00007.safetensors",
62
+ "model.layers.12.self_attn.k_proj.weight": "model-00003-of-00007.safetensors",
63
+ "model.layers.12.self_attn.o_proj.weight": "model-00003-of-00007.safetensors",
64
+ "model.layers.12.self_attn.q_proj.bias": "model-00003-of-00007.safetensors",
65
+ "model.layers.12.self_attn.q_proj.weight": "model-00003-of-00007.safetensors",
66
+ "model.layers.12.self_attn.v_proj.bias": "model-00003-of-00007.safetensors",
67
+ "model.layers.12.self_attn.v_proj.weight": "model-00003-of-00007.safetensors",
68
+ "model.layers.13.input_layernorm.weight": "model-00004-of-00007.safetensors",
69
+ "model.layers.13.mlp.down_proj.weight": "model-00004-of-00007.safetensors",
70
+ "model.layers.13.mlp.gate_proj.weight": "model-00003-of-00007.safetensors",
71
+ "model.layers.13.mlp.up_proj.weight": "model-00004-of-00007.safetensors",
72
+ "model.layers.13.post_attention_layernorm.weight": "model-00004-of-00007.safetensors",
73
+ "model.layers.13.self_attn.k_proj.bias": "model-00003-of-00007.safetensors",
74
+ "model.layers.13.self_attn.k_proj.weight": "model-00003-of-00007.safetensors",
75
+ "model.layers.13.self_attn.o_proj.weight": "model-00003-of-00007.safetensors",
76
+ "model.layers.13.self_attn.q_proj.bias": "model-00003-of-00007.safetensors",
77
+ "model.layers.13.self_attn.q_proj.weight": "model-00003-of-00007.safetensors",
78
+ "model.layers.13.self_attn.v_proj.bias": "model-00003-of-00007.safetensors",
79
+ "model.layers.13.self_attn.v_proj.weight": "model-00003-of-00007.safetensors",
80
+ "model.layers.14.input_layernorm.weight": "model-00004-of-00007.safetensors",
81
+ "model.layers.14.mlp.down_proj.weight": "model-00004-of-00007.safetensors",
82
+ "model.layers.14.mlp.gate_proj.weight": "model-00004-of-00007.safetensors",
83
+ "model.layers.14.mlp.up_proj.weight": "model-00004-of-00007.safetensors",
84
+ "model.layers.14.post_attention_layernorm.weight": "model-00004-of-00007.safetensors",
85
+ "model.layers.14.self_attn.k_proj.bias": "model-00004-of-00007.safetensors",
86
+ "model.layers.14.self_attn.k_proj.weight": "model-00004-of-00007.safetensors",
87
+ "model.layers.14.self_attn.o_proj.weight": "model-00004-of-00007.safetensors",
88
+ "model.layers.14.self_attn.q_proj.bias": "model-00004-of-00007.safetensors",
89
+ "model.layers.14.self_attn.q_proj.weight": "model-00004-of-00007.safetensors",
90
+ "model.layers.14.self_attn.v_proj.bias": "model-00004-of-00007.safetensors",
91
+ "model.layers.14.self_attn.v_proj.weight": "model-00004-of-00007.safetensors",
92
+ "model.layers.15.input_layernorm.weight": "model-00004-of-00007.safetensors",
93
+ "model.layers.15.mlp.down_proj.weight": "model-00004-of-00007.safetensors",
94
+ "model.layers.15.mlp.gate_proj.weight": "model-00004-of-00007.safetensors",
95
+ "model.layers.15.mlp.up_proj.weight": "model-00004-of-00007.safetensors",
96
+ "model.layers.15.post_attention_layernorm.weight": "model-00004-of-00007.safetensors",
97
+ "model.layers.15.self_attn.k_proj.bias": "model-00004-of-00007.safetensors",
98
+ "model.layers.15.self_attn.k_proj.weight": "model-00004-of-00007.safetensors",
99
+ "model.layers.15.self_attn.o_proj.weight": "model-00004-of-00007.safetensors",
100
+ "model.layers.15.self_attn.q_proj.bias": "model-00004-of-00007.safetensors",
101
+ "model.layers.15.self_attn.q_proj.weight": "model-00004-of-00007.safetensors",
102
+ "model.layers.15.self_attn.v_proj.bias": "model-00004-of-00007.safetensors",
103
+ "model.layers.15.self_attn.v_proj.weight": "model-00004-of-00007.safetensors",
104
+ "model.layers.16.input_layernorm.weight": "model-00004-of-00007.safetensors",
105
+ "model.layers.16.mlp.down_proj.weight": "model-00004-of-00007.safetensors",
106
+ "model.layers.16.mlp.gate_proj.weight": "model-00004-of-00007.safetensors",
107
+ "model.layers.16.mlp.up_proj.weight": "model-00004-of-00007.safetensors",
108
+ "model.layers.16.post_attention_layernorm.weight": "model-00004-of-00007.safetensors",
109
+ "model.layers.16.self_attn.k_proj.bias": "model-00004-of-00007.safetensors",
110
+ "model.layers.16.self_attn.k_proj.weight": "model-00004-of-00007.safetensors",
111
+ "model.layers.16.self_attn.o_proj.weight": "model-00004-of-00007.safetensors",
112
+ "model.layers.16.self_attn.q_proj.bias": "model-00004-of-00007.safetensors",
113
+ "model.layers.16.self_attn.q_proj.weight": "model-00004-of-00007.safetensors",
114
+ "model.layers.16.self_attn.v_proj.bias": "model-00004-of-00007.safetensors",
115
+ "model.layers.16.self_attn.v_proj.weight": "model-00004-of-00007.safetensors",
116
+ "model.layers.17.input_layernorm.weight": "model-00004-of-00007.safetensors",
117
+ "model.layers.17.mlp.down_proj.weight": "model-00004-of-00007.safetensors",
118
+ "model.layers.17.mlp.gate_proj.weight": "model-00004-of-00007.safetensors",
119
+ "model.layers.17.mlp.up_proj.weight": "model-00004-of-00007.safetensors",
120
+ "model.layers.17.post_attention_layernorm.weight": "model-00004-of-00007.safetensors",
121
+ "model.layers.17.self_attn.k_proj.bias": "model-00004-of-00007.safetensors",
122
+ "model.layers.17.self_attn.k_proj.weight": "model-00004-of-00007.safetensors",
123
+ "model.layers.17.self_attn.o_proj.weight": "model-00004-of-00007.safetensors",
124
+ "model.layers.17.self_attn.q_proj.bias": "model-00004-of-00007.safetensors",
125
+ "model.layers.17.self_attn.q_proj.weight": "model-00004-of-00007.safetensors",
126
+ "model.layers.17.self_attn.v_proj.bias": "model-00004-of-00007.safetensors",
127
+ "model.layers.17.self_attn.v_proj.weight": "model-00004-of-00007.safetensors",
128
+ "model.layers.18.input_layernorm.weight": "model-00005-of-00007.safetensors",
129
+ "model.layers.18.mlp.down_proj.weight": "model-00005-of-00007.safetensors",
130
+ "model.layers.18.mlp.gate_proj.weight": "model-00004-of-00007.safetensors",
131
+ "model.layers.18.mlp.up_proj.weight": "model-00004-of-00007.safetensors",
132
+ "model.layers.18.post_attention_layernorm.weight": "model-00005-of-00007.safetensors",
133
+ "model.layers.18.self_attn.k_proj.bias": "model-00004-of-00007.safetensors",
134
+ "model.layers.18.self_attn.k_proj.weight": "model-00004-of-00007.safetensors",
135
+ "model.layers.18.self_attn.o_proj.weight": "model-00004-of-00007.safetensors",
136
+ "model.layers.18.self_attn.q_proj.bias": "model-00004-of-00007.safetensors",
137
+ "model.layers.18.self_attn.q_proj.weight": "model-00004-of-00007.safetensors",
138
+ "model.layers.18.self_attn.v_proj.bias": "model-00004-of-00007.safetensors",
139
+ "model.layers.18.self_attn.v_proj.weight": "model-00004-of-00007.safetensors",
140
+ "model.layers.19.input_layernorm.weight": "model-00005-of-00007.safetensors",
141
+ "model.layers.19.mlp.down_proj.weight": "model-00005-of-00007.safetensors",
142
+ "model.layers.19.mlp.gate_proj.weight": "model-00005-of-00007.safetensors",
143
+ "model.layers.19.mlp.up_proj.weight": "model-00005-of-00007.safetensors",
144
+ "model.layers.19.post_attention_layernorm.weight": "model-00005-of-00007.safetensors",
145
+ "model.layers.19.self_attn.k_proj.bias": "model-00005-of-00007.safetensors",
146
+ "model.layers.19.self_attn.k_proj.weight": "model-00005-of-00007.safetensors",
147
+ "model.layers.19.self_attn.o_proj.weight": "model-00005-of-00007.safetensors",
148
+ "model.layers.19.self_attn.q_proj.bias": "model-00005-of-00007.safetensors",
149
+ "model.layers.19.self_attn.q_proj.weight": "model-00005-of-00007.safetensors",
150
+ "model.layers.19.self_attn.v_proj.bias": "model-00005-of-00007.safetensors",
151
+ "model.layers.19.self_attn.v_proj.weight": "model-00005-of-00007.safetensors",
152
+ "model.layers.2.input_layernorm.weight": "model-00001-of-00007.safetensors",
153
+ "model.layers.2.mlp.down_proj.weight": "model-00001-of-00007.safetensors",
154
+ "model.layers.2.mlp.gate_proj.weight": "model-00001-of-00007.safetensors",
155
+ "model.layers.2.mlp.up_proj.weight": "model-00001-of-00007.safetensors",
156
+ "model.layers.2.post_attention_layernorm.weight": "model-00001-of-00007.safetensors",
157
+ "model.layers.2.self_attn.k_proj.bias": "model-00001-of-00007.safetensors",
158
+ "model.layers.2.self_attn.k_proj.weight": "model-00001-of-00007.safetensors",
159
+ "model.layers.2.self_attn.o_proj.weight": "model-00001-of-00007.safetensors",
160
+ "model.layers.2.self_attn.q_proj.bias": "model-00001-of-00007.safetensors",
161
+ "model.layers.2.self_attn.q_proj.weight": "model-00001-of-00007.safetensors",
162
+ "model.layers.2.self_attn.v_proj.bias": "model-00001-of-00007.safetensors",
163
+ "model.layers.2.self_attn.v_proj.weight": "model-00001-of-00007.safetensors",
164
+ "model.layers.20.input_layernorm.weight": "model-00005-of-00007.safetensors",
165
+ "model.layers.20.mlp.down_proj.weight": "model-00005-of-00007.safetensors",
166
+ "model.layers.20.mlp.gate_proj.weight": "model-00005-of-00007.safetensors",
167
+ "model.layers.20.mlp.up_proj.weight": "model-00005-of-00007.safetensors",
168
+ "model.layers.20.post_attention_layernorm.weight": "model-00005-of-00007.safetensors",
169
+ "model.layers.20.self_attn.k_proj.bias": "model-00005-of-00007.safetensors",
170
+ "model.layers.20.self_attn.k_proj.weight": "model-00005-of-00007.safetensors",
171
+ "model.layers.20.self_attn.o_proj.weight": "model-00005-of-00007.safetensors",
172
+ "model.layers.20.self_attn.q_proj.bias": "model-00005-of-00007.safetensors",
173
+ "model.layers.20.self_attn.q_proj.weight": "model-00005-of-00007.safetensors",
174
+ "model.layers.20.self_attn.v_proj.bias": "model-00005-of-00007.safetensors",
175
+ "model.layers.20.self_attn.v_proj.weight": "model-00005-of-00007.safetensors",
176
+ "model.layers.21.input_layernorm.weight": "model-00005-of-00007.safetensors",
177
+ "model.layers.21.mlp.down_proj.weight": "model-00005-of-00007.safetensors",
178
+ "model.layers.21.mlp.gate_proj.weight": "model-00005-of-00007.safetensors",
179
+ "model.layers.21.mlp.up_proj.weight": "model-00005-of-00007.safetensors",
180
+ "model.layers.21.post_attention_layernorm.weight": "model-00005-of-00007.safetensors",
181
+ "model.layers.21.self_attn.k_proj.bias": "model-00005-of-00007.safetensors",
182
+ "model.layers.21.self_attn.k_proj.weight": "model-00005-of-00007.safetensors",
183
+ "model.layers.21.self_attn.o_proj.weight": "model-00005-of-00007.safetensors",
184
+ "model.layers.21.self_attn.q_proj.bias": "model-00005-of-00007.safetensors",
185
+ "model.layers.21.self_attn.q_proj.weight": "model-00005-of-00007.safetensors",
186
+ "model.layers.21.self_attn.v_proj.bias": "model-00005-of-00007.safetensors",
187
+ "model.layers.21.self_attn.v_proj.weight": "model-00005-of-00007.safetensors",
188
+ "model.layers.22.input_layernorm.weight": "model-00005-of-00007.safetensors",
189
+ "model.layers.22.mlp.down_proj.weight": "model-00005-of-00007.safetensors",
190
+ "model.layers.22.mlp.gate_proj.weight": "model-00005-of-00007.safetensors",
191
+ "model.layers.22.mlp.up_proj.weight": "model-00005-of-00007.safetensors",
192
+ "model.layers.22.post_attention_layernorm.weight": "model-00005-of-00007.safetensors",
193
+ "model.layers.22.self_attn.k_proj.bias": "model-00005-of-00007.safetensors",
194
+ "model.layers.22.self_attn.k_proj.weight": "model-00005-of-00007.safetensors",
195
+ "model.layers.22.self_attn.o_proj.weight": "model-00005-of-00007.safetensors",
196
+ "model.layers.22.self_attn.q_proj.bias": "model-00005-of-00007.safetensors",
197
+ "model.layers.22.self_attn.q_proj.weight": "model-00005-of-00007.safetensors",
198
+ "model.layers.22.self_attn.v_proj.bias": "model-00005-of-00007.safetensors",
199
+ "model.layers.22.self_attn.v_proj.weight": "model-00005-of-00007.safetensors",
200
+ "model.layers.23.input_layernorm.weight": "model-00005-of-00007.safetensors",
201
+ "model.layers.23.mlp.down_proj.weight": "model-00005-of-00007.safetensors",
202
+ "model.layers.23.mlp.gate_proj.weight": "model-00005-of-00007.safetensors",
203
+ "model.layers.23.mlp.up_proj.weight": "model-00005-of-00007.safetensors",
204
+ "model.layers.23.post_attention_layernorm.weight": "model-00005-of-00007.safetensors",
205
+ "model.layers.23.self_attn.k_proj.bias": "model-00005-of-00007.safetensors",
206
+ "model.layers.23.self_attn.k_proj.weight": "model-00005-of-00007.safetensors",
207
+ "model.layers.23.self_attn.o_proj.weight": "model-00005-of-00007.safetensors",
208
+ "model.layers.23.self_attn.q_proj.bias": "model-00005-of-00007.safetensors",
209
+ "model.layers.23.self_attn.q_proj.weight": "model-00005-of-00007.safetensors",
210
+ "model.layers.23.self_attn.v_proj.bias": "model-00005-of-00007.safetensors",
211
+ "model.layers.23.self_attn.v_proj.weight": "model-00005-of-00007.safetensors",
212
+ "model.layers.24.input_layernorm.weight": "model-00006-of-00007.safetensors",
213
+ "model.layers.24.mlp.down_proj.weight": "model-00006-of-00007.safetensors",
214
+ "model.layers.24.mlp.gate_proj.weight": "model-00006-of-00007.safetensors",
215
+ "model.layers.24.mlp.up_proj.weight": "model-00006-of-00007.safetensors",
216
+ "model.layers.24.post_attention_layernorm.weight": "model-00006-of-00007.safetensors",
217
+ "model.layers.24.self_attn.k_proj.bias": "model-00005-of-00007.safetensors",
218
+ "model.layers.24.self_attn.k_proj.weight": "model-00005-of-00007.safetensors",
219
+ "model.layers.24.self_attn.o_proj.weight": "model-00006-of-00007.safetensors",
220
+ "model.layers.24.self_attn.q_proj.bias": "model-00005-of-00007.safetensors",
221
+ "model.layers.24.self_attn.q_proj.weight": "model-00005-of-00007.safetensors",
222
+ "model.layers.24.self_attn.v_proj.bias": "model-00005-of-00007.safetensors",
223
+ "model.layers.24.self_attn.v_proj.weight": "model-00005-of-00007.safetensors",
224
+ "model.layers.25.input_layernorm.weight": "model-00006-of-00007.safetensors",
225
+ "model.layers.25.mlp.down_proj.weight": "model-00006-of-00007.safetensors",
226
+ "model.layers.25.mlp.gate_proj.weight": "model-00006-of-00007.safetensors",
227
+ "model.layers.25.mlp.up_proj.weight": "model-00006-of-00007.safetensors",
228
+ "model.layers.25.post_attention_layernorm.weight": "model-00006-of-00007.safetensors",
229
+ "model.layers.25.self_attn.k_proj.bias": "model-00006-of-00007.safetensors",
230
+ "model.layers.25.self_attn.k_proj.weight": "model-00006-of-00007.safetensors",
231
+ "model.layers.25.self_attn.o_proj.weight": "model-00006-of-00007.safetensors",
232
+ "model.layers.25.self_attn.q_proj.bias": "model-00006-of-00007.safetensors",
233
+ "model.layers.25.self_attn.q_proj.weight": "model-00006-of-00007.safetensors",
234
+ "model.layers.25.self_attn.v_proj.bias": "model-00006-of-00007.safetensors",
235
+ "model.layers.25.self_attn.v_proj.weight": "model-00006-of-00007.safetensors",
236
+ "model.layers.26.input_layernorm.weight": "model-00006-of-00007.safetensors",
237
+ "model.layers.26.mlp.down_proj.weight": "model-00006-of-00007.safetensors",
238
+ "model.layers.26.mlp.gate_proj.weight": "model-00006-of-00007.safetensors",
239
+ "model.layers.26.mlp.up_proj.weight": "model-00006-of-00007.safetensors",
240
+ "model.layers.26.post_attention_layernorm.weight": "model-00006-of-00007.safetensors",
241
+ "model.layers.26.self_attn.k_proj.bias": "model-00006-of-00007.safetensors",
242
+ "model.layers.26.self_attn.k_proj.weight": "model-00006-of-00007.safetensors",
243
+ "model.layers.26.self_attn.o_proj.weight": "model-00006-of-00007.safetensors",
244
+ "model.layers.26.self_attn.q_proj.bias": "model-00006-of-00007.safetensors",
245
+ "model.layers.26.self_attn.q_proj.weight": "model-00006-of-00007.safetensors",
246
+ "model.layers.26.self_attn.v_proj.bias": "model-00006-of-00007.safetensors",
247
+ "model.layers.26.self_attn.v_proj.weight": "model-00006-of-00007.safetensors",
248
+ "model.layers.27.input_layernorm.weight": "model-00006-of-00007.safetensors",
249
+ "model.layers.27.mlp.down_proj.weight": "model-00006-of-00007.safetensors",
250
+ "model.layers.27.mlp.gate_proj.weight": "model-00006-of-00007.safetensors",
251
+ "model.layers.27.mlp.up_proj.weight": "model-00006-of-00007.safetensors",
252
+ "model.layers.27.post_attention_layernorm.weight": "model-00006-of-00007.safetensors",
253
+ "model.layers.27.self_attn.k_proj.bias": "model-00006-of-00007.safetensors",
254
+ "model.layers.27.self_attn.k_proj.weight": "model-00006-of-00007.safetensors",
255
+ "model.layers.27.self_attn.o_proj.weight": "model-00006-of-00007.safetensors",
256
+ "model.layers.27.self_attn.q_proj.bias": "model-00006-of-00007.safetensors",
257
+ "model.layers.27.self_attn.q_proj.weight": "model-00006-of-00007.safetensors",
258
+ "model.layers.27.self_attn.v_proj.bias": "model-00006-of-00007.safetensors",
259
+ "model.layers.27.self_attn.v_proj.weight": "model-00006-of-00007.safetensors",
260
+ "model.layers.3.input_layernorm.weight": "model-00002-of-00007.safetensors",
261
+ "model.layers.3.mlp.down_proj.weight": "model-00002-of-00007.safetensors",
262
+ "model.layers.3.mlp.gate_proj.weight": "model-00002-of-00007.safetensors",
263
+ "model.layers.3.mlp.up_proj.weight": "model-00002-of-00007.safetensors",
264
+ "model.layers.3.post_attention_layernorm.weight": "model-00002-of-00007.safetensors",
265
+ "model.layers.3.self_attn.k_proj.bias": "model-00002-of-00007.safetensors",
266
+ "model.layers.3.self_attn.k_proj.weight": "model-00002-of-00007.safetensors",
267
+ "model.layers.3.self_attn.o_proj.weight": "model-00002-of-00007.safetensors",
268
+ "model.layers.3.self_attn.q_proj.bias": "model-00002-of-00007.safetensors",
269
+ "model.layers.3.self_attn.q_proj.weight": "model-00002-of-00007.safetensors",
270
+ "model.layers.3.self_attn.v_proj.bias": "model-00002-of-00007.safetensors",
271
+ "model.layers.3.self_attn.v_proj.weight": "model-00002-of-00007.safetensors",
272
+ "model.layers.4.input_layernorm.weight": "model-00002-of-00007.safetensors",
273
+ "model.layers.4.mlp.down_proj.weight": "model-00002-of-00007.safetensors",
274
+ "model.layers.4.mlp.gate_proj.weight": "model-00002-of-00007.safetensors",
275
+ "model.layers.4.mlp.up_proj.weight": "model-00002-of-00007.safetensors",
276
+ "model.layers.4.post_attention_layernorm.weight": "model-00002-of-00007.safetensors",
277
+ "model.layers.4.self_attn.k_proj.bias": "model-00002-of-00007.safetensors",
278
+ "model.layers.4.self_attn.k_proj.weight": "model-00002-of-00007.safetensors",
279
+ "model.layers.4.self_attn.o_proj.weight": "model-00002-of-00007.safetensors",
280
+ "model.layers.4.self_attn.q_proj.bias": "model-00002-of-00007.safetensors",
281
+ "model.layers.4.self_attn.q_proj.weight": "model-00002-of-00007.safetensors",
282
+ "model.layers.4.self_attn.v_proj.bias": "model-00002-of-00007.safetensors",
283
+ "model.layers.4.self_attn.v_proj.weight": "model-00002-of-00007.safetensors",
284
+ "model.layers.5.input_layernorm.weight": "model-00002-of-00007.safetensors",
285
+ "model.layers.5.mlp.down_proj.weight": "model-00002-of-00007.safetensors",
286
+ "model.layers.5.mlp.gate_proj.weight": "model-00002-of-00007.safetensors",
287
+ "model.layers.5.mlp.up_proj.weight": "model-00002-of-00007.safetensors",
288
+ "model.layers.5.post_attention_layernorm.weight": "model-00002-of-00007.safetensors",
289
+ "model.layers.5.self_attn.k_proj.bias": "model-00002-of-00007.safetensors",
290
+ "model.layers.5.self_attn.k_proj.weight": "model-00002-of-00007.safetensors",
291
+ "model.layers.5.self_attn.o_proj.weight": "model-00002-of-00007.safetensors",
292
+ "model.layers.5.self_attn.q_proj.bias": "model-00002-of-00007.safetensors",
293
+ "model.layers.5.self_attn.q_proj.weight": "model-00002-of-00007.safetensors",
294
+ "model.layers.5.self_attn.v_proj.bias": "model-00002-of-00007.safetensors",
295
+ "model.layers.5.self_attn.v_proj.weight": "model-00002-of-00007.safetensors",
296
+ "model.layers.6.input_layernorm.weight": "model-00002-of-00007.safetensors",
297
+ "model.layers.6.mlp.down_proj.weight": "model-00002-of-00007.safetensors",
298
+ "model.layers.6.mlp.gate_proj.weight": "model-00002-of-00007.safetensors",
299
+ "model.layers.6.mlp.up_proj.weight": "model-00002-of-00007.safetensors",
300
+ "model.layers.6.post_attention_layernorm.weight": "model-00002-of-00007.safetensors",
301
+ "model.layers.6.self_attn.k_proj.bias": "model-00002-of-00007.safetensors",
302
+ "model.layers.6.self_attn.k_proj.weight": "model-00002-of-00007.safetensors",
303
+ "model.layers.6.self_attn.o_proj.weight": "model-00002-of-00007.safetensors",
304
+ "model.layers.6.self_attn.q_proj.bias": "model-00002-of-00007.safetensors",
305
+ "model.layers.6.self_attn.q_proj.weight": "model-00002-of-00007.safetensors",
306
+ "model.layers.6.self_attn.v_proj.bias": "model-00002-of-00007.safetensors",
307
+ "model.layers.6.self_attn.v_proj.weight": "model-00002-of-00007.safetensors",
308
+ "model.layers.7.input_layernorm.weight": "model-00002-of-00007.safetensors",
309
+ "model.layers.7.mlp.down_proj.weight": "model-00002-of-00007.safetensors",
310
+ "model.layers.7.mlp.gate_proj.weight": "model-00002-of-00007.safetensors",
311
+ "model.layers.7.mlp.up_proj.weight": "model-00002-of-00007.safetensors",
312
+ "model.layers.7.post_attention_layernorm.weight": "model-00002-of-00007.safetensors",
313
+ "model.layers.7.self_attn.k_proj.bias": "model-00002-of-00007.safetensors",
314
+ "model.layers.7.self_attn.k_proj.weight": "model-00002-of-00007.safetensors",
315
+ "model.layers.7.self_attn.o_proj.weight": "model-00002-of-00007.safetensors",
316
+ "model.layers.7.self_attn.q_proj.bias": "model-00002-of-00007.safetensors",
317
+ "model.layers.7.self_attn.q_proj.weight": "model-00002-of-00007.safetensors",
318
+ "model.layers.7.self_attn.v_proj.bias": "model-00002-of-00007.safetensors",
319
+ "model.layers.7.self_attn.v_proj.weight": "model-00002-of-00007.safetensors",
320
+ "model.layers.8.input_layernorm.weight": "model-00003-of-00007.safetensors",
321
+ "model.layers.8.mlp.down_proj.weight": "model-00003-of-00007.safetensors",
322
+ "model.layers.8.mlp.gate_proj.weight": "model-00003-of-00007.safetensors",
323
+ "model.layers.8.mlp.up_proj.weight": "model-00003-of-00007.safetensors",
324
+ "model.layers.8.post_attention_layernorm.weight": "model-00003-of-00007.safetensors",
325
+ "model.layers.8.self_attn.k_proj.bias": "model-00002-of-00007.safetensors",
326
+ "model.layers.8.self_attn.k_proj.weight": "model-00002-of-00007.safetensors",
327
+ "model.layers.8.self_attn.o_proj.weight": "model-00002-of-00007.safetensors",
328
+ "model.layers.8.self_attn.q_proj.bias": "model-00002-of-00007.safetensors",
329
+ "model.layers.8.self_attn.q_proj.weight": "model-00002-of-00007.safetensors",
330
+ "model.layers.8.self_attn.v_proj.bias": "model-00002-of-00007.safetensors",
331
+ "model.layers.8.self_attn.v_proj.weight": "model-00002-of-00007.safetensors",
332
+ "model.layers.9.input_layernorm.weight": "model-00003-of-00007.safetensors",
333
+ "model.layers.9.mlp.down_proj.weight": "model-00003-of-00007.safetensors",
334
+ "model.layers.9.mlp.gate_proj.weight": "model-00003-of-00007.safetensors",
335
+ "model.layers.9.mlp.up_proj.weight": "model-00003-of-00007.safetensors",
336
+ "model.layers.9.post_attention_layernorm.weight": "model-00003-of-00007.safetensors",
337
+ "model.layers.9.self_attn.k_proj.bias": "model-00003-of-00007.safetensors",
338
+ "model.layers.9.self_attn.k_proj.weight": "model-00003-of-00007.safetensors",
339
+ "model.layers.9.self_attn.o_proj.weight": "model-00003-of-00007.safetensors",
340
+ "model.layers.9.self_attn.q_proj.bias": "model-00003-of-00007.safetensors",
341
+ "model.layers.9.self_attn.q_proj.weight": "model-00003-of-00007.safetensors",
342
+ "model.layers.9.self_attn.v_proj.bias": "model-00003-of-00007.safetensors",
343
+ "model.layers.9.self_attn.v_proj.weight": "model-00003-of-00007.safetensors",
344
+ "model.norm.weight": "model-00006-of-00007.safetensors"
345
+ }
346
+ }
special_tokens_map.json ADDED
@@ -0,0 +1,31 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "additional_special_tokens": [
3
+ "<|im_start|>",
4
+ "<|im_end|>",
5
+ "<|object_ref_start|>",
6
+ "<|object_ref_end|>",
7
+ "<|box_start|>",
8
+ "<|box_end|>",
9
+ "<|quad_start|>",
10
+ "<|quad_end|>",
11
+ "<|vision_start|>",
12
+ "<|vision_end|>",
13
+ "<|vision_pad|>",
14
+ "<|image_pad|>",
15
+ "<|video_pad|>"
16
+ ],
17
+ "eos_token": {
18
+ "content": "<|im_end|>",
19
+ "lstrip": false,
20
+ "normalized": false,
21
+ "rstrip": false,
22
+ "single_word": false
23
+ },
24
+ "pad_token": {
25
+ "content": "<|endoftext|>",
26
+ "lstrip": false,
27
+ "normalized": false,
28
+ "rstrip": false,
29
+ "single_word": false
30
+ }
31
+ }
tokenizer.json ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:9c5ae00e602b8860cbd784ba82a8aa14e8feecec692e7076590d014d7b7fdafa
3
+ size 11421896
tokenizer_config.json ADDED
@@ -0,0 +1,208 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "add_bos_token": false,
3
+ "add_prefix_space": false,
4
+ "added_tokens_decoder": {
5
+ "151643": {
6
+ "content": "<|endoftext|>",
7
+ "lstrip": false,
8
+ "normalized": false,
9
+ "rstrip": false,
10
+ "single_word": false,
11
+ "special": true
12
+ },
13
+ "151644": {
14
+ "content": "<|im_start|>",
15
+ "lstrip": false,
16
+ "normalized": false,
17
+ "rstrip": false,
18
+ "single_word": false,
19
+ "special": true
20
+ },
21
+ "151645": {
22
+ "content": "<|im_end|>",
23
+ "lstrip": false,
24
+ "normalized": false,
25
+ "rstrip": false,
26
+ "single_word": false,
27
+ "special": true
28
+ },
29
+ "151646": {
30
+ "content": "<|object_ref_start|>",
31
+ "lstrip": false,
32
+ "normalized": false,
33
+ "rstrip": false,
34
+ "single_word": false,
35
+ "special": true
36
+ },
37
+ "151647": {
38
+ "content": "<|object_ref_end|>",
39
+ "lstrip": false,
40
+ "normalized": false,
41
+ "rstrip": false,
42
+ "single_word": false,
43
+ "special": true
44
+ },
45
+ "151648": {
46
+ "content": "<|box_start|>",
47
+ "lstrip": false,
48
+ "normalized": false,
49
+ "rstrip": false,
50
+ "single_word": false,
51
+ "special": true
52
+ },
53
+ "151649": {
54
+ "content": "<|box_end|>",
55
+ "lstrip": false,
56
+ "normalized": false,
57
+ "rstrip": false,
58
+ "single_word": false,
59
+ "special": true
60
+ },
61
+ "151650": {
62
+ "content": "<|quad_start|>",
63
+ "lstrip": false,
64
+ "normalized": false,
65
+ "rstrip": false,
66
+ "single_word": false,
67
+ "special": true
68
+ },
69
+ "151651": {
70
+ "content": "<|quad_end|>",
71
+ "lstrip": false,
72
+ "normalized": false,
73
+ "rstrip": false,
74
+ "single_word": false,
75
+ "special": true
76
+ },
77
+ "151652": {
78
+ "content": "<|vision_start|>",
79
+ "lstrip": false,
80
+ "normalized": false,
81
+ "rstrip": false,
82
+ "single_word": false,
83
+ "special": true
84
+ },
85
+ "151653": {
86
+ "content": "<|vision_end|>",
87
+ "lstrip": false,
88
+ "normalized": false,
89
+ "rstrip": false,
90
+ "single_word": false,
91
+ "special": true
92
+ },
93
+ "151654": {
94
+ "content": "<|vision_pad|>",
95
+ "lstrip": false,
96
+ "normalized": false,
97
+ "rstrip": false,
98
+ "single_word": false,
99
+ "special": true
100
+ },
101
+ "151655": {
102
+ "content": "<|image_pad|>",
103
+ "lstrip": false,
104
+ "normalized": false,
105
+ "rstrip": false,
106
+ "single_word": false,
107
+ "special": true
108
+ },
109
+ "151656": {
110
+ "content": "<|video_pad|>",
111
+ "lstrip": false,
112
+ "normalized": false,
113
+ "rstrip": false,
114
+ "single_word": false,
115
+ "special": true
116
+ },
117
+ "151657": {
118
+ "content": "<tool_call>",
119
+ "lstrip": false,
120
+ "normalized": false,
121
+ "rstrip": false,
122
+ "single_word": false,
123
+ "special": false
124
+ },
125
+ "151658": {
126
+ "content": "</tool_call>",
127
+ "lstrip": false,
128
+ "normalized": false,
129
+ "rstrip": false,
130
+ "single_word": false,
131
+ "special": false
132
+ },
133
+ "151659": {
134
+ "content": "<|fim_prefix|>",
135
+ "lstrip": false,
136
+ "normalized": false,
137
+ "rstrip": false,
138
+ "single_word": false,
139
+ "special": false
140
+ },
141
+ "151660": {
142
+ "content": "<|fim_middle|>",
143
+ "lstrip": false,
144
+ "normalized": false,
145
+ "rstrip": false,
146
+ "single_word": false,
147
+ "special": false
148
+ },
149
+ "151661": {
150
+ "content": "<|fim_suffix|>",
151
+ "lstrip": false,
152
+ "normalized": false,
153
+ "rstrip": false,
154
+ "single_word": false,
155
+ "special": false
156
+ },
157
+ "151662": {
158
+ "content": "<|fim_pad|>",
159
+ "lstrip": false,
160
+ "normalized": false,
161
+ "rstrip": false,
162
+ "single_word": false,
163
+ "special": false
164
+ },
165
+ "151663": {
166
+ "content": "<|repo_name|>",
167
+ "lstrip": false,
168
+ "normalized": false,
169
+ "rstrip": false,
170
+ "single_word": false,
171
+ "special": false
172
+ },
173
+ "151664": {
174
+ "content": "<|file_sep|>",
175
+ "lstrip": false,
176
+ "normalized": false,
177
+ "rstrip": false,
178
+ "single_word": false,
179
+ "special": false
180
+ }
181
+ },
182
+ "additional_special_tokens": [
183
+ "<|im_start|>",
184
+ "<|im_end|>",
185
+ "<|object_ref_start|>",
186
+ "<|object_ref_end|>",
187
+ "<|box_start|>",
188
+ "<|box_end|>",
189
+ "<|quad_start|>",
190
+ "<|quad_end|>",
191
+ "<|vision_start|>",
192
+ "<|vision_end|>",
193
+ "<|vision_pad|>",
194
+ "<|image_pad|>",
195
+ "<|video_pad|>"
196
+ ],
197
+ "bos_token": null,
198
+ "chat_template": "{%- if tools %}\n {{- '<|im_start|>system\\n' }}\n {%- if messages[0]['role'] == 'system' %}\n {{- messages[0]['content'] }}\n {%- else %}\n {{- 'You are Qwen, created by Alibaba Cloud. You are a helpful assistant.' }}\n {%- endif %}\n {{- \"\\n\\n# Tools\\n\\nYou may call one or more functions to assist with the user query.\\n\\nYou are provided with function signatures within <tools></tools> XML tags:\\n<tools>\" }}\n {%- for tool in tools %}\n {{- \"\\n\" }}\n {{- tool | tojson }}\n {%- endfor %}\n {{- \"\\n</tools>\\n\\nFor each function call, return a json object with function name and arguments within <tool_call></tool_call> XML tags:\\n<tool_call>\\n{\\\"name\\\": <function-name>, \\\"arguments\\\": <args-json-object>}\\n</tool_call><|im_end|>\\n\" }}\n{%- else %}\n {%- if messages[0]['role'] == 'system' %}\n {{- '<|im_start|>system\\n' + messages[0]['content'] + '<|im_end|>\\n' }}\n {%- else %}\n {{- '<|im_start|>system\\nYou are Qwen, created by Alibaba Cloud. You are a helpful assistant.<|im_end|>\\n' }}\n {%- endif %}\n{%- endif %}\n{%- for message in messages %}\n {%- if (message.role == \"user\") or (message.role == \"system\" and not loop.first) or (message.role == \"assistant\" and not message.tool_calls) %}\n {{- '<|im_start|>' + message.role + '\\n' + message.content + '<|im_end|>' + '\\n' }}\n {%- elif message.role == \"assistant\" %}\n {{- '<|im_start|>' + message.role }}\n {%- if message.content %}\n {{- '\\n' + message.content }}\n {%- endif %}\n {%- for tool_call in message.tool_calls %}\n {%- if tool_call.function is defined %}\n {%- set tool_call = tool_call.function %}\n {%- endif %}\n {{- '\\n<tool_call>\\n{\"name\": \"' }}\n {{- tool_call.name }}\n {{- '\", \"arguments\": ' }}\n {{- tool_call.arguments | tojson }}\n {{- '}\\n</tool_call>' }}\n {%- endfor %}\n {{- '<|im_end|>\\n' }}\n {%- elif message.role == \"tool\" %}\n {%- if (loop.index0 == 0) or (messages[loop.index0 - 1].role != \"tool\") %}\n {{- '<|im_start|>user' }}\n {%- endif %}\n {{- '\\n<tool_response>\\n' }}\n {{- message.content }}\n {{- '\\n</tool_response>' }}\n {%- if loop.last or (messages[loop.index0 + 1].role != \"tool\") %}\n {{- '<|im_end|>\\n' }}\n {%- endif %}\n {%- endif %}\n{%- endfor %}\n{%- if add_generation_prompt %}\n {{- '<|im_start|>assistant\\n' }}\n{%- endif %}\n",
199
+ "clean_up_tokenization_spaces": false,
200
+ "eos_token": "<|im_end|>",
201
+ "errors": "replace",
202
+ "extra_special_tokens": {},
203
+ "model_max_length": 131072,
204
+ "pad_token": "<|endoftext|>",
205
+ "split_special_tokens": false,
206
+ "tokenizer_class": "Qwen2Tokenizer",
207
+ "unk_token": null
208
+ }
trainer_state.json ADDED
@@ -0,0 +1,2678 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "best_metric": null,
3
+ "best_model_checkpoint": null,
4
+ "epoch": 4.937062937062937,
5
+ "eval_steps": 18,
6
+ "global_step": 355,
7
+ "is_hyper_param_search": false,
8
+ "is_local_process_zero": true,
9
+ "is_world_process_zero": true,
10
+ "log_history": [
11
+ {
12
+ "epoch": 0.013986013986013986,
13
+ "grad_norm": 6.746792793273926,
14
+ "learning_rate": 3.3333333333333335e-07,
15
+ "loss": 0.8294,
16
+ "step": 1
17
+ },
18
+ {
19
+ "epoch": 0.013986013986013986,
20
+ "eval_loss": 0.8744672536849976,
21
+ "eval_runtime": 36.967,
22
+ "eval_samples_per_second": 17.367,
23
+ "eval_steps_per_second": 2.191,
24
+ "step": 1
25
+ },
26
+ {
27
+ "epoch": 0.027972027972027972,
28
+ "grad_norm": 6.9825944900512695,
29
+ "learning_rate": 6.666666666666667e-07,
30
+ "loss": 0.8694,
31
+ "step": 2
32
+ },
33
+ {
34
+ "epoch": 0.04195804195804196,
35
+ "grad_norm": 7.01480770111084,
36
+ "learning_rate": 1.0000000000000002e-06,
37
+ "loss": 0.861,
38
+ "step": 3
39
+ },
40
+ {
41
+ "epoch": 0.055944055944055944,
42
+ "grad_norm": 7.156968593597412,
43
+ "learning_rate": 1.3333333333333334e-06,
44
+ "loss": 0.9027,
45
+ "step": 4
46
+ },
47
+ {
48
+ "epoch": 0.06993006993006994,
49
+ "grad_norm": 6.0878005027771,
50
+ "learning_rate": 1.6666666666666667e-06,
51
+ "loss": 0.8577,
52
+ "step": 5
53
+ },
54
+ {
55
+ "epoch": 0.08391608391608392,
56
+ "grad_norm": 5.853216648101807,
57
+ "learning_rate": 2.0000000000000003e-06,
58
+ "loss": 0.8168,
59
+ "step": 6
60
+ },
61
+ {
62
+ "epoch": 0.0979020979020979,
63
+ "grad_norm": 4.9973978996276855,
64
+ "learning_rate": 2.3333333333333336e-06,
65
+ "loss": 0.788,
66
+ "step": 7
67
+ },
68
+ {
69
+ "epoch": 0.11188811188811189,
70
+ "grad_norm": 4.611128330230713,
71
+ "learning_rate": 2.666666666666667e-06,
72
+ "loss": 0.7959,
73
+ "step": 8
74
+ },
75
+ {
76
+ "epoch": 0.1258741258741259,
77
+ "grad_norm": 3.1312103271484375,
78
+ "learning_rate": 3e-06,
79
+ "loss": 0.7374,
80
+ "step": 9
81
+ },
82
+ {
83
+ "epoch": 0.13986013986013987,
84
+ "grad_norm": 2.9217381477355957,
85
+ "learning_rate": 3.3333333333333333e-06,
86
+ "loss": 0.7329,
87
+ "step": 10
88
+ },
89
+ {
90
+ "epoch": 0.15384615384615385,
91
+ "grad_norm": 2.5225424766540527,
92
+ "learning_rate": 3.6666666666666666e-06,
93
+ "loss": 0.6905,
94
+ "step": 11
95
+ },
96
+ {
97
+ "epoch": 0.16783216783216784,
98
+ "grad_norm": 2.8658440113067627,
99
+ "learning_rate": 4.000000000000001e-06,
100
+ "loss": 0.702,
101
+ "step": 12
102
+ },
103
+ {
104
+ "epoch": 0.18181818181818182,
105
+ "grad_norm": 2.6459388732910156,
106
+ "learning_rate": 4.333333333333334e-06,
107
+ "loss": 0.6659,
108
+ "step": 13
109
+ },
110
+ {
111
+ "epoch": 0.1958041958041958,
112
+ "grad_norm": 2.4082329273223877,
113
+ "learning_rate": 4.666666666666667e-06,
114
+ "loss": 0.6732,
115
+ "step": 14
116
+ },
117
+ {
118
+ "epoch": 0.2097902097902098,
119
+ "grad_norm": 1.8969792127609253,
120
+ "learning_rate": 5e-06,
121
+ "loss": 0.626,
122
+ "step": 15
123
+ },
124
+ {
125
+ "epoch": 0.22377622377622378,
126
+ "grad_norm": 1.705984354019165,
127
+ "learning_rate": 5.333333333333334e-06,
128
+ "loss": 0.6357,
129
+ "step": 16
130
+ },
131
+ {
132
+ "epoch": 0.23776223776223776,
133
+ "grad_norm": 1.5265748500823975,
134
+ "learning_rate": 5.666666666666667e-06,
135
+ "loss": 0.6409,
136
+ "step": 17
137
+ },
138
+ {
139
+ "epoch": 0.2517482517482518,
140
+ "grad_norm": 1.3590223789215088,
141
+ "learning_rate": 6e-06,
142
+ "loss": 0.6128,
143
+ "step": 18
144
+ },
145
+ {
146
+ "epoch": 0.2517482517482518,
147
+ "eval_loss": 0.6171885132789612,
148
+ "eval_runtime": 35.4252,
149
+ "eval_samples_per_second": 18.123,
150
+ "eval_steps_per_second": 2.287,
151
+ "step": 18
152
+ },
153
+ {
154
+ "epoch": 0.26573426573426573,
155
+ "grad_norm": 1.3791933059692383,
156
+ "learning_rate": 6.333333333333333e-06,
157
+ "loss": 0.6181,
158
+ "step": 19
159
+ },
160
+ {
161
+ "epoch": 0.27972027972027974,
162
+ "grad_norm": 1.398863434791565,
163
+ "learning_rate": 6.666666666666667e-06,
164
+ "loss": 0.593,
165
+ "step": 20
166
+ },
167
+ {
168
+ "epoch": 0.2937062937062937,
169
+ "grad_norm": 1.1556097269058228,
170
+ "learning_rate": 7e-06,
171
+ "loss": 0.6274,
172
+ "step": 21
173
+ },
174
+ {
175
+ "epoch": 0.3076923076923077,
176
+ "grad_norm": 1.094146728515625,
177
+ "learning_rate": 7.333333333333333e-06,
178
+ "loss": 0.6113,
179
+ "step": 22
180
+ },
181
+ {
182
+ "epoch": 0.32167832167832167,
183
+ "grad_norm": 1.2191824913024902,
184
+ "learning_rate": 7.666666666666667e-06,
185
+ "loss": 0.6111,
186
+ "step": 23
187
+ },
188
+ {
189
+ "epoch": 0.3356643356643357,
190
+ "grad_norm": 0.9371815323829651,
191
+ "learning_rate": 8.000000000000001e-06,
192
+ "loss": 0.5895,
193
+ "step": 24
194
+ },
195
+ {
196
+ "epoch": 0.34965034965034963,
197
+ "grad_norm": 0.8173602223396301,
198
+ "learning_rate": 8.333333333333334e-06,
199
+ "loss": 0.6083,
200
+ "step": 25
201
+ },
202
+ {
203
+ "epoch": 0.36363636363636365,
204
+ "grad_norm": 1.0984693765640259,
205
+ "learning_rate": 8.666666666666668e-06,
206
+ "loss": 0.6072,
207
+ "step": 26
208
+ },
209
+ {
210
+ "epoch": 0.3776223776223776,
211
+ "grad_norm": 1.0279648303985596,
212
+ "learning_rate": 9e-06,
213
+ "loss": 0.6001,
214
+ "step": 27
215
+ },
216
+ {
217
+ "epoch": 0.3916083916083916,
218
+ "grad_norm": 0.9129611253738403,
219
+ "learning_rate": 9.333333333333334e-06,
220
+ "loss": 0.5644,
221
+ "step": 28
222
+ },
223
+ {
224
+ "epoch": 0.40559440559440557,
225
+ "grad_norm": 0.832744300365448,
226
+ "learning_rate": 9.666666666666667e-06,
227
+ "loss": 0.5716,
228
+ "step": 29
229
+ },
230
+ {
231
+ "epoch": 0.4195804195804196,
232
+ "grad_norm": 0.8230701684951782,
233
+ "learning_rate": 1e-05,
234
+ "loss": 0.59,
235
+ "step": 30
236
+ },
237
+ {
238
+ "epoch": 0.43356643356643354,
239
+ "grad_norm": 0.8343638181686401,
240
+ "learning_rate": 9.999766401714795e-06,
241
+ "loss": 0.5876,
242
+ "step": 31
243
+ },
244
+ {
245
+ "epoch": 0.44755244755244755,
246
+ "grad_norm": 0.7421298623085022,
247
+ "learning_rate": 9.999065628686439e-06,
248
+ "loss": 0.5959,
249
+ "step": 32
250
+ },
251
+ {
252
+ "epoch": 0.46153846153846156,
253
+ "grad_norm": 0.7471378445625305,
254
+ "learning_rate": 9.997897746394684e-06,
255
+ "loss": 0.5804,
256
+ "step": 33
257
+ },
258
+ {
259
+ "epoch": 0.4755244755244755,
260
+ "grad_norm": 0.8300222754478455,
261
+ "learning_rate": 9.996262863965651e-06,
262
+ "loss": 0.5726,
263
+ "step": 34
264
+ },
265
+ {
266
+ "epoch": 0.48951048951048953,
267
+ "grad_norm": 0.7753379940986633,
268
+ "learning_rate": 9.994161134161635e-06,
269
+ "loss": 0.6034,
270
+ "step": 35
271
+ },
272
+ {
273
+ "epoch": 0.5034965034965035,
274
+ "grad_norm": 0.8331146240234375,
275
+ "learning_rate": 9.991592753366822e-06,
276
+ "loss": 0.5953,
277
+ "step": 36
278
+ },
279
+ {
280
+ "epoch": 0.5034965034965035,
281
+ "eval_loss": 0.5805296897888184,
282
+ "eval_runtime": 35.0435,
283
+ "eval_samples_per_second": 18.32,
284
+ "eval_steps_per_second": 2.311,
285
+ "step": 36
286
+ },
287
+ {
288
+ "epoch": 0.5174825174825175,
289
+ "grad_norm": 0.7212592959403992,
290
+ "learning_rate": 9.988557961568956e-06,
291
+ "loss": 0.5639,
292
+ "step": 37
293
+ },
294
+ {
295
+ "epoch": 0.5314685314685315,
296
+ "grad_norm": 0.796295166015625,
297
+ "learning_rate": 9.985057042336898e-06,
298
+ "loss": 0.5771,
299
+ "step": 38
300
+ },
301
+ {
302
+ "epoch": 0.5454545454545454,
303
+ "grad_norm": 0.8607219457626343,
304
+ "learning_rate": 9.981090322794145e-06,
305
+ "loss": 0.5763,
306
+ "step": 39
307
+ },
308
+ {
309
+ "epoch": 0.5594405594405595,
310
+ "grad_norm": 0.861869215965271,
311
+ "learning_rate": 9.976658173588244e-06,
312
+ "loss": 0.5729,
313
+ "step": 40
314
+ },
315
+ {
316
+ "epoch": 0.5734265734265734,
317
+ "grad_norm": 0.7538414597511292,
318
+ "learning_rate": 9.97176100885618e-06,
319
+ "loss": 0.571,
320
+ "step": 41
321
+ },
322
+ {
323
+ "epoch": 0.5874125874125874,
324
+ "grad_norm": 0.7197255492210388,
325
+ "learning_rate": 9.966399286185666e-06,
326
+ "loss": 0.5421,
327
+ "step": 42
328
+ },
329
+ {
330
+ "epoch": 0.6013986013986014,
331
+ "grad_norm": 0.7522373199462891,
332
+ "learning_rate": 9.960573506572391e-06,
333
+ "loss": 0.5603,
334
+ "step": 43
335
+ },
336
+ {
337
+ "epoch": 0.6153846153846154,
338
+ "grad_norm": 0.8054993152618408,
339
+ "learning_rate": 9.954284214373204e-06,
340
+ "loss": 0.5723,
341
+ "step": 44
342
+ },
343
+ {
344
+ "epoch": 0.6293706293706294,
345
+ "grad_norm": 0.639057457447052,
346
+ "learning_rate": 9.947531997255256e-06,
347
+ "loss": 0.5483,
348
+ "step": 45
349
+ },
350
+ {
351
+ "epoch": 0.6433566433566433,
352
+ "grad_norm": 0.6742891073226929,
353
+ "learning_rate": 9.940317486141084e-06,
354
+ "loss": 0.5845,
355
+ "step": 46
356
+ },
357
+ {
358
+ "epoch": 0.6573426573426573,
359
+ "grad_norm": 0.6605424880981445,
360
+ "learning_rate": 9.932641355149655e-06,
361
+ "loss": 0.5639,
362
+ "step": 47
363
+ },
364
+ {
365
+ "epoch": 0.6713286713286714,
366
+ "grad_norm": 0.7080878019332886,
367
+ "learning_rate": 9.924504321533387e-06,
368
+ "loss": 0.5851,
369
+ "step": 48
370
+ },
371
+ {
372
+ "epoch": 0.6853146853146853,
373
+ "grad_norm": 0.6235523223876953,
374
+ "learning_rate": 9.915907145611117e-06,
375
+ "loss": 0.574,
376
+ "step": 49
377
+ },
378
+ {
379
+ "epoch": 0.6993006993006993,
380
+ "grad_norm": 0.6567375063896179,
381
+ "learning_rate": 9.906850630697068e-06,
382
+ "loss": 0.5705,
383
+ "step": 50
384
+ },
385
+ {
386
+ "epoch": 0.7132867132867133,
387
+ "grad_norm": 0.6011090278625488,
388
+ "learning_rate": 9.89733562302578e-06,
389
+ "loss": 0.574,
390
+ "step": 51
391
+ },
392
+ {
393
+ "epoch": 0.7272727272727273,
394
+ "grad_norm": 0.6043576002120972,
395
+ "learning_rate": 9.887363011673046e-06,
396
+ "loss": 0.5849,
397
+ "step": 52
398
+ },
399
+ {
400
+ "epoch": 0.7412587412587412,
401
+ "grad_norm": 0.7147118449211121,
402
+ "learning_rate": 9.876933728472826e-06,
403
+ "loss": 0.5584,
404
+ "step": 53
405
+ },
406
+ {
407
+ "epoch": 0.7552447552447552,
408
+ "grad_norm": 0.6480064392089844,
409
+ "learning_rate": 9.866048747930194e-06,
410
+ "loss": 0.5494,
411
+ "step": 54
412
+ },
413
+ {
414
+ "epoch": 0.7552447552447552,
415
+ "eval_loss": 0.5708758234977722,
416
+ "eval_runtime": 34.9921,
417
+ "eval_samples_per_second": 18.347,
418
+ "eval_steps_per_second": 2.315,
419
+ "step": 54
420
+ },
421
+ {
422
+ "epoch": 0.7692307692307693,
423
+ "grad_norm": 0.6563164591789246,
424
+ "learning_rate": 9.854709087130261e-06,
425
+ "loss": 0.5491,
426
+ "step": 55
427
+ },
428
+ {
429
+ "epoch": 0.7832167832167832,
430
+ "grad_norm": 0.6024691462516785,
431
+ "learning_rate": 9.842915805643156e-06,
432
+ "loss": 0.5589,
433
+ "step": 56
434
+ },
435
+ {
436
+ "epoch": 0.7972027972027972,
437
+ "grad_norm": 0.6186073422431946,
438
+ "learning_rate": 9.830670005425012e-06,
439
+ "loss": 0.5567,
440
+ "step": 57
441
+ },
442
+ {
443
+ "epoch": 0.8111888111888111,
444
+ "grad_norm": 0.6993715763092041,
445
+ "learning_rate": 9.817972830715003e-06,
446
+ "loss": 0.5534,
447
+ "step": 58
448
+ },
449
+ {
450
+ "epoch": 0.8251748251748252,
451
+ "grad_norm": 0.6327122449874878,
452
+ "learning_rate": 9.804825467928423e-06,
453
+ "loss": 0.5709,
454
+ "step": 59
455
+ },
456
+ {
457
+ "epoch": 0.8391608391608392,
458
+ "grad_norm": 0.6156756281852722,
459
+ "learning_rate": 9.791229145545832e-06,
460
+ "loss": 0.5445,
461
+ "step": 60
462
+ },
463
+ {
464
+ "epoch": 0.8531468531468531,
465
+ "grad_norm": 0.7704036235809326,
466
+ "learning_rate": 9.777185133998268e-06,
467
+ "loss": 0.5743,
468
+ "step": 61
469
+ },
470
+ {
471
+ "epoch": 0.8671328671328671,
472
+ "grad_norm": 0.5839553475379944,
473
+ "learning_rate": 9.76269474554854e-06,
474
+ "loss": 0.5536,
475
+ "step": 62
476
+ },
477
+ {
478
+ "epoch": 0.8811188811188811,
479
+ "grad_norm": 0.6872385144233704,
480
+ "learning_rate": 9.747759334168602e-06,
481
+ "loss": 0.5627,
482
+ "step": 63
483
+ },
484
+ {
485
+ "epoch": 0.8951048951048951,
486
+ "grad_norm": 0.663074791431427,
487
+ "learning_rate": 9.73238029541305e-06,
488
+ "loss": 0.5643,
489
+ "step": 64
490
+ },
491
+ {
492
+ "epoch": 0.9090909090909091,
493
+ "grad_norm": 0.7018933296203613,
494
+ "learning_rate": 9.716559066288716e-06,
495
+ "loss": 0.5729,
496
+ "step": 65
497
+ },
498
+ {
499
+ "epoch": 0.9230769230769231,
500
+ "grad_norm": 0.7574678659439087,
501
+ "learning_rate": 9.7002971251204e-06,
502
+ "loss": 0.5813,
503
+ "step": 66
504
+ },
505
+ {
506
+ "epoch": 0.9370629370629371,
507
+ "grad_norm": 0.6293357014656067,
508
+ "learning_rate": 9.683595991412725e-06,
509
+ "loss": 0.5819,
510
+ "step": 67
511
+ },
512
+ {
513
+ "epoch": 0.951048951048951,
514
+ "grad_norm": 0.6524381041526794,
515
+ "learning_rate": 9.666457225708175e-06,
516
+ "loss": 0.5856,
517
+ "step": 68
518
+ },
519
+ {
520
+ "epoch": 0.965034965034965,
521
+ "grad_norm": 0.8389201164245605,
522
+ "learning_rate": 9.648882429441258e-06,
523
+ "loss": 0.5587,
524
+ "step": 69
525
+ },
526
+ {
527
+ "epoch": 0.9790209790209791,
528
+ "grad_norm": 0.6339119672775269,
529
+ "learning_rate": 9.630873244788884e-06,
530
+ "loss": 0.5655,
531
+ "step": 70
532
+ },
533
+ {
534
+ "epoch": 0.993006993006993,
535
+ "grad_norm": 0.6689181923866272,
536
+ "learning_rate": 9.612431354516912e-06,
537
+ "loss": 0.574,
538
+ "step": 71
539
+ },
540
+ {
541
+ "epoch": 1.0,
542
+ "grad_norm": 0.7970519661903381,
543
+ "learning_rate": 9.593558481822923e-06,
544
+ "loss": 0.5541,
545
+ "step": 72
546
+ },
547
+ {
548
+ "epoch": 1.0,
549
+ "eval_loss": 0.5664608478546143,
550
+ "eval_runtime": 34.9634,
551
+ "eval_samples_per_second": 18.362,
552
+ "eval_steps_per_second": 2.317,
553
+ "step": 72
554
+ },
555
+ {
556
+ "epoch": 1.013986013986014,
557
+ "grad_norm": 0.6805382370948792,
558
+ "learning_rate": 9.574256390175192e-06,
559
+ "loss": 0.5175,
560
+ "step": 73
561
+ },
562
+ {
563
+ "epoch": 1.027972027972028,
564
+ "grad_norm": 0.6378044486045837,
565
+ "learning_rate": 9.554526883147926e-06,
566
+ "loss": 0.5323,
567
+ "step": 74
568
+ },
569
+ {
570
+ "epoch": 1.0419580419580419,
571
+ "grad_norm": 0.6296578645706177,
572
+ "learning_rate": 9.534371804252727e-06,
573
+ "loss": 0.5197,
574
+ "step": 75
575
+ },
576
+ {
577
+ "epoch": 1.055944055944056,
578
+ "grad_norm": 0.6116400361061096,
579
+ "learning_rate": 9.513793036766345e-06,
580
+ "loss": 0.504,
581
+ "step": 76
582
+ },
583
+ {
584
+ "epoch": 1.06993006993007,
585
+ "grad_norm": 0.6288114190101624,
586
+ "learning_rate": 9.492792503554695e-06,
587
+ "loss": 0.5314,
588
+ "step": 77
589
+ },
590
+ {
591
+ "epoch": 1.083916083916084,
592
+ "grad_norm": 0.6576322913169861,
593
+ "learning_rate": 9.4713721668932e-06,
594
+ "loss": 0.5437,
595
+ "step": 78
596
+ },
597
+ {
598
+ "epoch": 1.097902097902098,
599
+ "grad_norm": 0.5930177569389343,
600
+ "learning_rate": 9.44953402828342e-06,
601
+ "loss": 0.5213,
602
+ "step": 79
603
+ },
604
+ {
605
+ "epoch": 1.1118881118881119,
606
+ "grad_norm": 0.7437406778335571,
607
+ "learning_rate": 9.427280128266049e-06,
608
+ "loss": 0.5441,
609
+ "step": 80
610
+ },
611
+ {
612
+ "epoch": 1.1258741258741258,
613
+ "grad_norm": 0.7347025275230408,
614
+ "learning_rate": 9.404612546230244e-06,
615
+ "loss": 0.5078,
616
+ "step": 81
617
+ },
618
+ {
619
+ "epoch": 1.1398601398601398,
620
+ "grad_norm": 0.6133800148963928,
621
+ "learning_rate": 9.381533400219319e-06,
622
+ "loss": 0.5129,
623
+ "step": 82
624
+ },
625
+ {
626
+ "epoch": 1.1538461538461537,
627
+ "grad_norm": 0.8068645000457764,
628
+ "learning_rate": 9.358044846732848e-06,
629
+ "loss": 0.5252,
630
+ "step": 83
631
+ },
632
+ {
633
+ "epoch": 1.167832167832168,
634
+ "grad_norm": 0.7470645904541016,
635
+ "learning_rate": 9.334149080525154e-06,
636
+ "loss": 0.5251,
637
+ "step": 84
638
+ },
639
+ {
640
+ "epoch": 1.1818181818181819,
641
+ "grad_norm": 0.6085983514785767,
642
+ "learning_rate": 9.309848334400247e-06,
643
+ "loss": 0.5119,
644
+ "step": 85
645
+ },
646
+ {
647
+ "epoch": 1.1958041958041958,
648
+ "grad_norm": 0.6427562236785889,
649
+ "learning_rate": 9.285144879003173e-06,
650
+ "loss": 0.5327,
651
+ "step": 86
652
+ },
653
+ {
654
+ "epoch": 1.2097902097902098,
655
+ "grad_norm": 0.5992908477783203,
656
+ "learning_rate": 9.26004102260786e-06,
657
+ "loss": 0.5174,
658
+ "step": 87
659
+ },
660
+ {
661
+ "epoch": 1.2237762237762237,
662
+ "grad_norm": 0.6650605201721191,
663
+ "learning_rate": 9.23453911090143e-06,
664
+ "loss": 0.541,
665
+ "step": 88
666
+ },
667
+ {
668
+ "epoch": 1.2377622377622377,
669
+ "grad_norm": 0.6733765602111816,
670
+ "learning_rate": 9.208641526765024e-06,
671
+ "loss": 0.4968,
672
+ "step": 89
673
+ },
674
+ {
675
+ "epoch": 1.2517482517482517,
676
+ "grad_norm": 0.5896586775779724,
677
+ "learning_rate": 9.182350690051134e-06,
678
+ "loss": 0.5111,
679
+ "step": 90
680
+ },
681
+ {
682
+ "epoch": 1.2517482517482517,
683
+ "eval_loss": 0.5681217312812805,
684
+ "eval_runtime": 34.9547,
685
+ "eval_samples_per_second": 18.367,
686
+ "eval_steps_per_second": 2.317,
687
+ "step": 90
688
+ },
689
+ {
690
+ "epoch": 1.2657342657342658,
691
+ "grad_norm": 0.5879291892051697,
692
+ "learning_rate": 9.155669057357515e-06,
693
+ "loss": 0.5124,
694
+ "step": 91
695
+ },
696
+ {
697
+ "epoch": 1.2797202797202798,
698
+ "grad_norm": 0.6704349517822266,
699
+ "learning_rate": 9.12859912179762e-06,
700
+ "loss": 0.5264,
701
+ "step": 92
702
+ },
703
+ {
704
+ "epoch": 1.2937062937062938,
705
+ "grad_norm": 0.7005125284194946,
706
+ "learning_rate": 9.101143412767665e-06,
707
+ "loss": 0.5426,
708
+ "step": 93
709
+ },
710
+ {
711
+ "epoch": 1.3076923076923077,
712
+ "grad_norm": 0.5738447904586792,
713
+ "learning_rate": 9.073304495710267e-06,
714
+ "loss": 0.5057,
715
+ "step": 94
716
+ },
717
+ {
718
+ "epoch": 1.3216783216783217,
719
+ "grad_norm": 0.6039765477180481,
720
+ "learning_rate": 9.045084971874738e-06,
721
+ "loss": 0.5106,
722
+ "step": 95
723
+ },
724
+ {
725
+ "epoch": 1.3356643356643356,
726
+ "grad_norm": 0.6626608967781067,
727
+ "learning_rate": 9.016487478074032e-06,
728
+ "loss": 0.5231,
729
+ "step": 96
730
+ },
731
+ {
732
+ "epoch": 1.3496503496503496,
733
+ "grad_norm": 0.607319176197052,
734
+ "learning_rate": 8.987514686438353e-06,
735
+ "loss": 0.5373,
736
+ "step": 97
737
+ },
738
+ {
739
+ "epoch": 1.3636363636363638,
740
+ "grad_norm": 0.6294829249382019,
741
+ "learning_rate": 8.95816930416548e-06,
742
+ "loss": 0.5478,
743
+ "step": 98
744
+ },
745
+ {
746
+ "epoch": 1.3776223776223775,
747
+ "grad_norm": 0.5931101441383362,
748
+ "learning_rate": 8.928454073267801e-06,
749
+ "loss": 0.5183,
750
+ "step": 99
751
+ },
752
+ {
753
+ "epoch": 1.3916083916083917,
754
+ "grad_norm": 0.5525672435760498,
755
+ "learning_rate": 8.898371770316113e-06,
756
+ "loss": 0.5049,
757
+ "step": 100
758
+ },
759
+ {
760
+ "epoch": 1.4055944055944056,
761
+ "grad_norm": 0.5554185509681702,
762
+ "learning_rate": 8.867925206180166e-06,
763
+ "loss": 0.5329,
764
+ "step": 101
765
+ },
766
+ {
767
+ "epoch": 1.4195804195804196,
768
+ "grad_norm": 0.6104192137718201,
769
+ "learning_rate": 8.837117225766033e-06,
770
+ "loss": 0.5421,
771
+ "step": 102
772
+ },
773
+ {
774
+ "epoch": 1.4335664335664335,
775
+ "grad_norm": 0.5591093897819519,
776
+ "learning_rate": 8.805950707750268e-06,
777
+ "loss": 0.5434,
778
+ "step": 103
779
+ },
780
+ {
781
+ "epoch": 1.4475524475524475,
782
+ "grad_norm": 0.5589428544044495,
783
+ "learning_rate": 8.774428564310939e-06,
784
+ "loss": 0.5159,
785
+ "step": 104
786
+ },
787
+ {
788
+ "epoch": 1.4615384615384617,
789
+ "grad_norm": 0.580699622631073,
790
+ "learning_rate": 8.742553740855507e-06,
791
+ "loss": 0.5143,
792
+ "step": 105
793
+ },
794
+ {
795
+ "epoch": 1.4755244755244754,
796
+ "grad_norm": 0.6007757186889648,
797
+ "learning_rate": 8.710329215745612e-06,
798
+ "loss": 0.5066,
799
+ "step": 106
800
+ },
801
+ {
802
+ "epoch": 1.4895104895104896,
803
+ "grad_norm": 0.6713395118713379,
804
+ "learning_rate": 8.677758000018777e-06,
805
+ "loss": 0.5318,
806
+ "step": 107
807
+ },
808
+ {
809
+ "epoch": 1.5034965034965035,
810
+ "grad_norm": 0.5536379814147949,
811
+ "learning_rate": 8.644843137107058e-06,
812
+ "loss": 0.5159,
813
+ "step": 108
814
+ },
815
+ {
816
+ "epoch": 1.5034965034965035,
817
+ "eval_loss": 0.5661691427230835,
818
+ "eval_runtime": 35.3668,
819
+ "eval_samples_per_second": 18.153,
820
+ "eval_steps_per_second": 2.29,
821
+ "step": 108
822
+ },
823
+ {
824
+ "epoch": 1.5174825174825175,
825
+ "grad_norm": 0.645210325717926,
826
+ "learning_rate": 8.61158770255267e-06,
827
+ "loss": 0.5312,
828
+ "step": 109
829
+ },
830
+ {
831
+ "epoch": 1.5314685314685315,
832
+ "grad_norm": 0.601094126701355,
833
+ "learning_rate": 8.577994803720605e-06,
834
+ "loss": 0.5394,
835
+ "step": 110
836
+ },
837
+ {
838
+ "epoch": 1.5454545454545454,
839
+ "grad_norm": 0.5418203473091125,
840
+ "learning_rate": 8.544067579508292e-06,
841
+ "loss": 0.5264,
842
+ "step": 111
843
+ },
844
+ {
845
+ "epoch": 1.5594405594405596,
846
+ "grad_norm": 0.5513077974319458,
847
+ "learning_rate": 8.509809200052286e-06,
848
+ "loss": 0.5269,
849
+ "step": 112
850
+ },
851
+ {
852
+ "epoch": 1.5734265734265733,
853
+ "grad_norm": 0.6063372492790222,
854
+ "learning_rate": 8.475222866432065e-06,
855
+ "loss": 0.5199,
856
+ "step": 113
857
+ },
858
+ {
859
+ "epoch": 1.5874125874125875,
860
+ "grad_norm": 0.5637122988700867,
861
+ "learning_rate": 8.440311810370921e-06,
862
+ "loss": 0.5342,
863
+ "step": 114
864
+ },
865
+ {
866
+ "epoch": 1.6013986013986012,
867
+ "grad_norm": 0.5762498378753662,
868
+ "learning_rate": 8.405079293933986e-06,
869
+ "loss": 0.5419,
870
+ "step": 115
871
+ },
872
+ {
873
+ "epoch": 1.6153846153846154,
874
+ "grad_norm": 0.557772159576416,
875
+ "learning_rate": 8.36952860922343e-06,
876
+ "loss": 0.5217,
877
+ "step": 116
878
+ },
879
+ {
880
+ "epoch": 1.6293706293706294,
881
+ "grad_norm": 0.6382875442504883,
882
+ "learning_rate": 8.333663078070845e-06,
883
+ "loss": 0.5366,
884
+ "step": 117
885
+ },
886
+ {
887
+ "epoch": 1.6433566433566433,
888
+ "grad_norm": 0.5209150910377502,
889
+ "learning_rate": 8.297486051726864e-06,
890
+ "loss": 0.5087,
891
+ "step": 118
892
+ },
893
+ {
894
+ "epoch": 1.6573426573426573,
895
+ "grad_norm": 0.5415475964546204,
896
+ "learning_rate": 8.26100091054801e-06,
897
+ "loss": 0.5026,
898
+ "step": 119
899
+ },
900
+ {
901
+ "epoch": 1.6713286713286712,
902
+ "grad_norm": 0.6667906641960144,
903
+ "learning_rate": 8.224211063680854e-06,
904
+ "loss": 0.5224,
905
+ "step": 120
906
+ },
907
+ {
908
+ "epoch": 1.6853146853146854,
909
+ "grad_norm": 0.573965311050415,
910
+ "learning_rate": 8.18711994874345e-06,
911
+ "loss": 0.538,
912
+ "step": 121
913
+ },
914
+ {
915
+ "epoch": 1.6993006993006992,
916
+ "grad_norm": 0.6206014156341553,
917
+ "learning_rate": 8.149731031504136e-06,
918
+ "loss": 0.5161,
919
+ "step": 122
920
+ },
921
+ {
922
+ "epoch": 1.7132867132867133,
923
+ "grad_norm": 0.6324427127838135,
924
+ "learning_rate": 8.112047805557693e-06,
925
+ "loss": 0.5407,
926
+ "step": 123
927
+ },
928
+ {
929
+ "epoch": 1.7272727272727273,
930
+ "grad_norm": 0.5460613965988159,
931
+ "learning_rate": 8.074073791998907e-06,
932
+ "loss": 0.5238,
933
+ "step": 124
934
+ },
935
+ {
936
+ "epoch": 1.7412587412587412,
937
+ "grad_norm": 0.5684161186218262,
938
+ "learning_rate": 8.035812539093557e-06,
939
+ "loss": 0.5166,
940
+ "step": 125
941
+ },
942
+ {
943
+ "epoch": 1.7552447552447552,
944
+ "grad_norm": 0.6114190816879272,
945
+ "learning_rate": 7.997267621946871e-06,
946
+ "loss": 0.5212,
947
+ "step": 126
948
+ },
949
+ {
950
+ "epoch": 1.7552447552447552,
951
+ "eval_loss": 0.5644441843032837,
952
+ "eval_runtime": 34.8941,
953
+ "eval_samples_per_second": 18.399,
954
+ "eval_steps_per_second": 2.321,
955
+ "step": 126
956
+ },
957
+ {
958
+ "epoch": 1.7692307692307692,
959
+ "grad_norm": 0.5791452527046204,
960
+ "learning_rate": 7.958442642169469e-06,
961
+ "loss": 0.5219,
962
+ "step": 127
963
+ },
964
+ {
965
+ "epoch": 1.7832167832167833,
966
+ "grad_norm": 0.5814895033836365,
967
+ "learning_rate": 7.919341227540828e-06,
968
+ "loss": 0.5492,
969
+ "step": 128
970
+ },
971
+ {
972
+ "epoch": 1.797202797202797,
973
+ "grad_norm": 0.5562170147895813,
974
+ "learning_rate": 7.879967031670313e-06,
975
+ "loss": 0.5065,
976
+ "step": 129
977
+ },
978
+ {
979
+ "epoch": 1.8111888111888113,
980
+ "grad_norm": 0.5666476488113403,
981
+ "learning_rate": 7.84032373365578e-06,
982
+ "loss": 0.508,
983
+ "step": 130
984
+ },
985
+ {
986
+ "epoch": 1.8251748251748252,
987
+ "grad_norm": 0.6123917102813721,
988
+ "learning_rate": 7.800415037739802e-06,
989
+ "loss": 0.5245,
990
+ "step": 131
991
+ },
992
+ {
993
+ "epoch": 1.8391608391608392,
994
+ "grad_norm": 0.6137180924415588,
995
+ "learning_rate": 7.760244672963548e-06,
996
+ "loss": 0.5281,
997
+ "step": 132
998
+ },
999
+ {
1000
+ "epoch": 1.8531468531468531,
1001
+ "grad_norm": 0.5444206595420837,
1002
+ "learning_rate": 7.719816392818354e-06,
1003
+ "loss": 0.496,
1004
+ "step": 133
1005
+ },
1006
+ {
1007
+ "epoch": 1.867132867132867,
1008
+ "grad_norm": 0.5935954451560974,
1009
+ "learning_rate": 7.679133974894984e-06,
1010
+ "loss": 0.5164,
1011
+ "step": 134
1012
+ },
1013
+ {
1014
+ "epoch": 1.8811188811188813,
1015
+ "grad_norm": 0.568263828754425,
1016
+ "learning_rate": 7.638201220530664e-06,
1017
+ "loss": 0.509,
1018
+ "step": 135
1019
+ },
1020
+ {
1021
+ "epoch": 1.895104895104895,
1022
+ "grad_norm": 0.641503095626831,
1023
+ "learning_rate": 7.597021954453887e-06,
1024
+ "loss": 0.5389,
1025
+ "step": 136
1026
+ },
1027
+ {
1028
+ "epoch": 1.9090909090909092,
1029
+ "grad_norm": 0.5866712927818298,
1030
+ "learning_rate": 7.555600024427028e-06,
1031
+ "loss": 0.5163,
1032
+ "step": 137
1033
+ },
1034
+ {
1035
+ "epoch": 1.9230769230769231,
1036
+ "grad_norm": 0.559259831905365,
1037
+ "learning_rate": 7.513939300886816e-06,
1038
+ "loss": 0.5074,
1039
+ "step": 138
1040
+ },
1041
+ {
1042
+ "epoch": 1.937062937062937,
1043
+ "grad_norm": 0.5635555386543274,
1044
+ "learning_rate": 7.472043676582685e-06,
1045
+ "loss": 0.5184,
1046
+ "step": 139
1047
+ },
1048
+ {
1049
+ "epoch": 1.951048951048951,
1050
+ "grad_norm": 0.6236100196838379,
1051
+ "learning_rate": 7.42991706621303e-06,
1052
+ "loss": 0.5162,
1053
+ "step": 140
1054
+ },
1055
+ {
1056
+ "epoch": 1.965034965034965,
1057
+ "grad_norm": 0.60297691822052,
1058
+ "learning_rate": 7.387563406059433e-06,
1059
+ "loss": 0.5123,
1060
+ "step": 141
1061
+ },
1062
+ {
1063
+ "epoch": 1.9790209790209792,
1064
+ "grad_norm": 0.5734803080558777,
1065
+ "learning_rate": 7.344986653618844e-06,
1066
+ "loss": 0.5281,
1067
+ "step": 142
1068
+ },
1069
+ {
1070
+ "epoch": 1.993006993006993,
1071
+ "grad_norm": 0.561177134513855,
1072
+ "learning_rate": 7.302190787233808e-06,
1073
+ "loss": 0.5256,
1074
+ "step": 143
1075
+ },
1076
+ {
1077
+ "epoch": 2.0,
1078
+ "grad_norm": 0.6918484568595886,
1079
+ "learning_rate": 7.259179805720726e-06,
1080
+ "loss": 0.4956,
1081
+ "step": 144
1082
+ },
1083
+ {
1084
+ "epoch": 2.0,
1085
+ "eval_loss": 0.5634886622428894,
1086
+ "eval_runtime": 34.1505,
1087
+ "eval_samples_per_second": 18.799,
1088
+ "eval_steps_per_second": 2.372,
1089
+ "step": 144
1090
+ },
1091
+ {
1092
+ "epoch": 2.013986013986014,
1093
+ "grad_norm": 0.6467083096504211,
1094
+ "learning_rate": 7.215957727996208e-06,
1095
+ "loss": 0.4757,
1096
+ "step": 145
1097
+ },
1098
+ {
1099
+ "epoch": 2.027972027972028,
1100
+ "grad_norm": 0.628153920173645,
1101
+ "learning_rate": 7.17252859270155e-06,
1102
+ "loss": 0.4701,
1103
+ "step": 146
1104
+ },
1105
+ {
1106
+ "epoch": 2.041958041958042,
1107
+ "grad_norm": 0.6287585496902466,
1108
+ "learning_rate": 7.128896457825364e-06,
1109
+ "loss": 0.4334,
1110
+ "step": 147
1111
+ },
1112
+ {
1113
+ "epoch": 2.055944055944056,
1114
+ "grad_norm": 0.5704949498176575,
1115
+ "learning_rate": 7.085065400324407e-06,
1116
+ "loss": 0.4723,
1117
+ "step": 148
1118
+ },
1119
+ {
1120
+ "epoch": 2.06993006993007,
1121
+ "grad_norm": 0.6293634176254272,
1122
+ "learning_rate": 7.041039515742626e-06,
1123
+ "loss": 0.4875,
1124
+ "step": 149
1125
+ },
1126
+ {
1127
+ "epoch": 2.0839160839160837,
1128
+ "grad_norm": 0.7220337390899658,
1129
+ "learning_rate": 6.9968229178284775e-06,
1130
+ "loss": 0.4809,
1131
+ "step": 150
1132
+ },
1133
+ {
1134
+ "epoch": 2.097902097902098,
1135
+ "grad_norm": 0.5713090896606445,
1136
+ "learning_rate": 6.952419738150546e-06,
1137
+ "loss": 0.4998,
1138
+ "step": 151
1139
+ },
1140
+ {
1141
+ "epoch": 2.111888111888112,
1142
+ "grad_norm": 0.6713567972183228,
1143
+ "learning_rate": 6.9078341257114765e-06,
1144
+ "loss": 0.4837,
1145
+ "step": 152
1146
+ },
1147
+ {
1148
+ "epoch": 2.125874125874126,
1149
+ "grad_norm": 0.6542858481407166,
1150
+ "learning_rate": 6.863070246560319e-06,
1151
+ "loss": 0.4798,
1152
+ "step": 153
1153
+ },
1154
+ {
1155
+ "epoch": 2.13986013986014,
1156
+ "grad_norm": 0.5555688738822937,
1157
+ "learning_rate": 6.818132283403236e-06,
1158
+ "loss": 0.4593,
1159
+ "step": 154
1160
+ },
1161
+ {
1162
+ "epoch": 2.1538461538461537,
1163
+ "grad_norm": 0.5947204232215881,
1164
+ "learning_rate": 6.773024435212678e-06,
1165
+ "loss": 0.4831,
1166
+ "step": 155
1167
+ },
1168
+ {
1169
+ "epoch": 2.167832167832168,
1170
+ "grad_norm": 0.6230157613754272,
1171
+ "learning_rate": 6.7277509168350445e-06,
1172
+ "loss": 0.4634,
1173
+ "step": 156
1174
+ },
1175
+ {
1176
+ "epoch": 2.1818181818181817,
1177
+ "grad_norm": 0.5586286783218384,
1178
+ "learning_rate": 6.6823159585968355e-06,
1179
+ "loss": 0.4803,
1180
+ "step": 157
1181
+ },
1182
+ {
1183
+ "epoch": 2.195804195804196,
1184
+ "grad_norm": 0.5558333396911621,
1185
+ "learning_rate": 6.636723805909384e-06,
1186
+ "loss": 0.4734,
1187
+ "step": 158
1188
+ },
1189
+ {
1190
+ "epoch": 2.20979020979021,
1191
+ "grad_norm": 0.5960513949394226,
1192
+ "learning_rate": 6.590978718872166e-06,
1193
+ "loss": 0.4746,
1194
+ "step": 159
1195
+ },
1196
+ {
1197
+ "epoch": 2.2237762237762237,
1198
+ "grad_norm": 0.5779184103012085,
1199
+ "learning_rate": 6.545084971874738e-06,
1200
+ "loss": 0.4499,
1201
+ "step": 160
1202
+ },
1203
+ {
1204
+ "epoch": 2.237762237762238,
1205
+ "grad_norm": 0.5827864408493042,
1206
+ "learning_rate": 6.499046853197338e-06,
1207
+ "loss": 0.4826,
1208
+ "step": 161
1209
+ },
1210
+ {
1211
+ "epoch": 2.2517482517482517,
1212
+ "grad_norm": 0.6769295930862427,
1213
+ "learning_rate": 6.452868664610197e-06,
1214
+ "loss": 0.4797,
1215
+ "step": 162
1216
+ },
1217
+ {
1218
+ "epoch": 2.2517482517482517,
1219
+ "eval_loss": 0.5764052271842957,
1220
+ "eval_runtime": 34.051,
1221
+ "eval_samples_per_second": 18.854,
1222
+ "eval_steps_per_second": 2.379,
1223
+ "step": 162
1224
+ },
1225
+ {
1226
+ "epoch": 2.265734265734266,
1227
+ "grad_norm": 0.5850751996040344,
1228
+ "learning_rate": 6.406554720971583e-06,
1229
+ "loss": 0.4829,
1230
+ "step": 163
1231
+ },
1232
+ {
1233
+ "epoch": 2.2797202797202796,
1234
+ "grad_norm": 0.5925103425979614,
1235
+ "learning_rate": 6.3601093498246215e-06,
1236
+ "loss": 0.4936,
1237
+ "step": 164
1238
+ },
1239
+ {
1240
+ "epoch": 2.2937062937062938,
1241
+ "grad_norm": 0.5747277140617371,
1242
+ "learning_rate": 6.313536890992935e-06,
1243
+ "loss": 0.4686,
1244
+ "step": 165
1245
+ },
1246
+ {
1247
+ "epoch": 2.3076923076923075,
1248
+ "grad_norm": 0.6141413450241089,
1249
+ "learning_rate": 6.266841696175132e-06,
1250
+ "loss": 0.4659,
1251
+ "step": 166
1252
+ },
1253
+ {
1254
+ "epoch": 2.3216783216783217,
1255
+ "grad_norm": 0.5214844942092896,
1256
+ "learning_rate": 6.220028128538188e-06,
1257
+ "loss": 0.4714,
1258
+ "step": 167
1259
+ },
1260
+ {
1261
+ "epoch": 2.335664335664336,
1262
+ "grad_norm": 0.6260507106781006,
1263
+ "learning_rate": 6.173100562309751e-06,
1264
+ "loss": 0.4731,
1265
+ "step": 168
1266
+ },
1267
+ {
1268
+ "epoch": 2.3496503496503496,
1269
+ "grad_norm": 0.6246528625488281,
1270
+ "learning_rate": 6.1260633823694224e-06,
1271
+ "loss": 0.4575,
1272
+ "step": 169
1273
+ },
1274
+ {
1275
+ "epoch": 2.3636363636363638,
1276
+ "grad_norm": 0.5592030882835388,
1277
+ "learning_rate": 6.078920983839032e-06,
1278
+ "loss": 0.4293,
1279
+ "step": 170
1280
+ },
1281
+ {
1282
+ "epoch": 2.3776223776223775,
1283
+ "grad_norm": 0.5436908602714539,
1284
+ "learning_rate": 6.031677771671962e-06,
1285
+ "loss": 0.4821,
1286
+ "step": 171
1287
+ },
1288
+ {
1289
+ "epoch": 2.3916083916083917,
1290
+ "grad_norm": 0.5873638987541199,
1291
+ "learning_rate": 5.984338160241552e-06,
1292
+ "loss": 0.4755,
1293
+ "step": 172
1294
+ },
1295
+ {
1296
+ "epoch": 2.4055944055944054,
1297
+ "grad_norm": 0.6056978106498718,
1298
+ "learning_rate": 5.936906572928625e-06,
1299
+ "loss": 0.479,
1300
+ "step": 173
1301
+ },
1302
+ {
1303
+ "epoch": 2.4195804195804196,
1304
+ "grad_norm": 0.5452414751052856,
1305
+ "learning_rate": 5.889387441708162e-06,
1306
+ "loss": 0.4545,
1307
+ "step": 174
1308
+ },
1309
+ {
1310
+ "epoch": 2.4335664335664333,
1311
+ "grad_norm": 0.5708940625190735,
1312
+ "learning_rate": 5.841785206735192e-06,
1313
+ "loss": 0.4706,
1314
+ "step": 175
1315
+ },
1316
+ {
1317
+ "epoch": 2.4475524475524475,
1318
+ "grad_norm": 0.5819888114929199,
1319
+ "learning_rate": 5.794104315929904e-06,
1320
+ "loss": 0.4608,
1321
+ "step": 176
1322
+ },
1323
+ {
1324
+ "epoch": 2.4615384615384617,
1325
+ "grad_norm": 0.5468575358390808,
1326
+ "learning_rate": 5.746349224562021e-06,
1327
+ "loss": 0.4696,
1328
+ "step": 177
1329
+ },
1330
+ {
1331
+ "epoch": 2.4755244755244754,
1332
+ "grad_norm": 0.6171605587005615,
1333
+ "learning_rate": 5.698524394834531e-06,
1334
+ "loss": 0.4809,
1335
+ "step": 178
1336
+ },
1337
+ {
1338
+ "epoch": 2.4895104895104896,
1339
+ "grad_norm": 0.6046556234359741,
1340
+ "learning_rate": 5.650634295466717e-06,
1341
+ "loss": 0.4727,
1342
+ "step": 179
1343
+ },
1344
+ {
1345
+ "epoch": 2.5034965034965033,
1346
+ "grad_norm": 0.5517058968544006,
1347
+ "learning_rate": 5.6026834012766155e-06,
1348
+ "loss": 0.4728,
1349
+ "step": 180
1350
+ },
1351
+ {
1352
+ "epoch": 2.5034965034965033,
1353
+ "eval_loss": 0.5757314562797546,
1354
+ "eval_runtime": 34.5495,
1355
+ "eval_samples_per_second": 18.582,
1356
+ "eval_steps_per_second": 2.344,
1357
+ "step": 180
1358
+ },
1359
+ {
1360
+ "epoch": 2.5174825174825175,
1361
+ "grad_norm": 0.5916588306427002,
1362
+ "learning_rate": 5.554676192762891e-06,
1363
+ "loss": 0.4738,
1364
+ "step": 181
1365
+ },
1366
+ {
1367
+ "epoch": 2.5314685314685317,
1368
+ "grad_norm": 0.596782386302948,
1369
+ "learning_rate": 5.506617155686177e-06,
1370
+ "loss": 0.4725,
1371
+ "step": 182
1372
+ },
1373
+ {
1374
+ "epoch": 2.5454545454545454,
1375
+ "grad_norm": 0.5784814357757568,
1376
+ "learning_rate": 5.458510780649932e-06,
1377
+ "loss": 0.4743,
1378
+ "step": 183
1379
+ },
1380
+ {
1381
+ "epoch": 2.5594405594405596,
1382
+ "grad_norm": 0.5162186622619629,
1383
+ "learning_rate": 5.4103615626808426e-06,
1384
+ "loss": 0.4501,
1385
+ "step": 184
1386
+ },
1387
+ {
1388
+ "epoch": 2.5734265734265733,
1389
+ "grad_norm": 0.5629183053970337,
1390
+ "learning_rate": 5.362174000808813e-06,
1391
+ "loss": 0.4631,
1392
+ "step": 185
1393
+ },
1394
+ {
1395
+ "epoch": 2.5874125874125875,
1396
+ "grad_norm": 0.5455092191696167,
1397
+ "learning_rate": 5.3139525976465675e-06,
1398
+ "loss": 0.4839,
1399
+ "step": 186
1400
+ },
1401
+ {
1402
+ "epoch": 2.6013986013986012,
1403
+ "grad_norm": 0.6234388947486877,
1404
+ "learning_rate": 5.265701858968944e-06,
1405
+ "loss": 0.4729,
1406
+ "step": 187
1407
+ },
1408
+ {
1409
+ "epoch": 2.6153846153846154,
1410
+ "grad_norm": 0.5270193815231323,
1411
+ "learning_rate": 5.217426293291869e-06,
1412
+ "loss": 0.4767,
1413
+ "step": 188
1414
+ },
1415
+ {
1416
+ "epoch": 2.629370629370629,
1417
+ "grad_norm": 0.5291939973831177,
1418
+ "learning_rate": 5.169130411451083e-06,
1419
+ "loss": 0.4659,
1420
+ "step": 189
1421
+ },
1422
+ {
1423
+ "epoch": 2.6433566433566433,
1424
+ "grad_norm": 0.5210967063903809,
1425
+ "learning_rate": 5.120818726180662e-06,
1426
+ "loss": 0.4532,
1427
+ "step": 190
1428
+ },
1429
+ {
1430
+ "epoch": 2.6573426573426575,
1431
+ "grad_norm": 0.5697853565216064,
1432
+ "learning_rate": 5.072495751691338e-06,
1433
+ "loss": 0.4669,
1434
+ "step": 191
1435
+ },
1436
+ {
1437
+ "epoch": 2.6713286713286712,
1438
+ "grad_norm": 0.4967118203639984,
1439
+ "learning_rate": 5.024166003248703e-06,
1440
+ "loss": 0.4777,
1441
+ "step": 192
1442
+ },
1443
+ {
1444
+ "epoch": 2.6853146853146854,
1445
+ "grad_norm": 0.5514243245124817,
1446
+ "learning_rate": 4.9758339967512995e-06,
1447
+ "loss": 0.4689,
1448
+ "step": 193
1449
+ },
1450
+ {
1451
+ "epoch": 2.699300699300699,
1452
+ "grad_norm": 0.5476483702659607,
1453
+ "learning_rate": 4.927504248308663e-06,
1454
+ "loss": 0.4898,
1455
+ "step": 194
1456
+ },
1457
+ {
1458
+ "epoch": 2.7132867132867133,
1459
+ "grad_norm": 0.5073778033256531,
1460
+ "learning_rate": 4.87918127381934e-06,
1461
+ "loss": 0.4462,
1462
+ "step": 195
1463
+ },
1464
+ {
1465
+ "epoch": 2.7272727272727275,
1466
+ "grad_norm": 0.5061259865760803,
1467
+ "learning_rate": 4.830869588548918e-06,
1468
+ "loss": 0.4811,
1469
+ "step": 196
1470
+ },
1471
+ {
1472
+ "epoch": 2.7412587412587412,
1473
+ "grad_norm": 0.532632052898407,
1474
+ "learning_rate": 4.782573706708133e-06,
1475
+ "loss": 0.4514,
1476
+ "step": 197
1477
+ },
1478
+ {
1479
+ "epoch": 2.755244755244755,
1480
+ "grad_norm": 0.5079967379570007,
1481
+ "learning_rate": 4.734298141031057e-06,
1482
+ "loss": 0.4706,
1483
+ "step": 198
1484
+ },
1485
+ {
1486
+ "epoch": 2.755244755244755,
1487
+ "eval_loss": 0.5748186111450195,
1488
+ "eval_runtime": 34.6547,
1489
+ "eval_samples_per_second": 18.526,
1490
+ "eval_steps_per_second": 2.337,
1491
+ "step": 198
1492
+ },
1493
+ {
1494
+ "epoch": 2.769230769230769,
1495
+ "grad_norm": 0.5450592637062073,
1496
+ "learning_rate": 4.686047402353433e-06,
1497
+ "loss": 0.4717,
1498
+ "step": 199
1499
+ },
1500
+ {
1501
+ "epoch": 2.7832167832167833,
1502
+ "grad_norm": 0.4929758906364441,
1503
+ "learning_rate": 4.637825999191189e-06,
1504
+ "loss": 0.469,
1505
+ "step": 200
1506
+ },
1507
+ {
1508
+ "epoch": 2.797202797202797,
1509
+ "grad_norm": 0.514842689037323,
1510
+ "learning_rate": 4.589638437319157e-06,
1511
+ "loss": 0.4848,
1512
+ "step": 201
1513
+ },
1514
+ {
1515
+ "epoch": 2.8111888111888113,
1516
+ "grad_norm": 0.5259736776351929,
1517
+ "learning_rate": 4.541489219350069e-06,
1518
+ "loss": 0.4676,
1519
+ "step": 202
1520
+ },
1521
+ {
1522
+ "epoch": 2.825174825174825,
1523
+ "grad_norm": 0.571843683719635,
1524
+ "learning_rate": 4.493382844313826e-06,
1525
+ "loss": 0.482,
1526
+ "step": 203
1527
+ },
1528
+ {
1529
+ "epoch": 2.839160839160839,
1530
+ "grad_norm": 0.49216270446777344,
1531
+ "learning_rate": 4.445323807237112e-06,
1532
+ "loss": 0.479,
1533
+ "step": 204
1534
+ },
1535
+ {
1536
+ "epoch": 2.8531468531468533,
1537
+ "grad_norm": 0.5383098721504211,
1538
+ "learning_rate": 4.397316598723385e-06,
1539
+ "loss": 0.4517,
1540
+ "step": 205
1541
+ },
1542
+ {
1543
+ "epoch": 2.867132867132867,
1544
+ "grad_norm": 0.5011985898017883,
1545
+ "learning_rate": 4.349365704533285e-06,
1546
+ "loss": 0.4678,
1547
+ "step": 206
1548
+ },
1549
+ {
1550
+ "epoch": 2.8811188811188813,
1551
+ "grad_norm": 0.5291906595230103,
1552
+ "learning_rate": 4.301475605165471e-06,
1553
+ "loss": 0.4717,
1554
+ "step": 207
1555
+ },
1556
+ {
1557
+ "epoch": 2.895104895104895,
1558
+ "grad_norm": 0.5500873923301697,
1559
+ "learning_rate": 4.25365077543798e-06,
1560
+ "loss": 0.4572,
1561
+ "step": 208
1562
+ },
1563
+ {
1564
+ "epoch": 2.909090909090909,
1565
+ "grad_norm": 0.5690264105796814,
1566
+ "learning_rate": 4.205895684070099e-06,
1567
+ "loss": 0.4675,
1568
+ "step": 209
1569
+ },
1570
+ {
1571
+ "epoch": 2.9230769230769234,
1572
+ "grad_norm": 0.4746716022491455,
1573
+ "learning_rate": 4.158214793264808e-06,
1574
+ "loss": 0.4579,
1575
+ "step": 210
1576
+ },
1577
+ {
1578
+ "epoch": 2.937062937062937,
1579
+ "grad_norm": 0.5113067626953125,
1580
+ "learning_rate": 4.1106125582918385e-06,
1581
+ "loss": 0.5104,
1582
+ "step": 211
1583
+ },
1584
+ {
1585
+ "epoch": 2.951048951048951,
1586
+ "grad_norm": 0.5272907018661499,
1587
+ "learning_rate": 4.063093427071376e-06,
1588
+ "loss": 0.4532,
1589
+ "step": 212
1590
+ },
1591
+ {
1592
+ "epoch": 2.965034965034965,
1593
+ "grad_norm": 0.5059399008750916,
1594
+ "learning_rate": 4.01566183975845e-06,
1595
+ "loss": 0.4555,
1596
+ "step": 213
1597
+ },
1598
+ {
1599
+ "epoch": 2.979020979020979,
1600
+ "grad_norm": 0.4909096658229828,
1601
+ "learning_rate": 3.968322228328041e-06,
1602
+ "loss": 0.4785,
1603
+ "step": 214
1604
+ },
1605
+ {
1606
+ "epoch": 2.993006993006993,
1607
+ "grad_norm": 0.5192479491233826,
1608
+ "learning_rate": 3.92107901616097e-06,
1609
+ "loss": 0.4477,
1610
+ "step": 215
1611
+ },
1612
+ {
1613
+ "epoch": 3.0,
1614
+ "grad_norm": 0.7363195419311523,
1615
+ "learning_rate": 3.873936617630578e-06,
1616
+ "loss": 0.4927,
1617
+ "step": 216
1618
+ },
1619
+ {
1620
+ "epoch": 3.0,
1621
+ "eval_loss": 0.5740084052085876,
1622
+ "eval_runtime": 34.8551,
1623
+ "eval_samples_per_second": 18.419,
1624
+ "eval_steps_per_second": 2.324,
1625
+ "step": 216
1626
+ },
1627
+ {
1628
+ "epoch": 3.013986013986014,
1629
+ "grad_norm": 0.5987377762794495,
1630
+ "learning_rate": 3.82689943769025e-06,
1631
+ "loss": 0.4246,
1632
+ "step": 217
1633
+ },
1634
+ {
1635
+ "epoch": 3.027972027972028,
1636
+ "grad_norm": 0.589948832988739,
1637
+ "learning_rate": 3.779971871461813e-06,
1638
+ "loss": 0.4367,
1639
+ "step": 218
1640
+ },
1641
+ {
1642
+ "epoch": 3.041958041958042,
1643
+ "grad_norm": 0.5003005862236023,
1644
+ "learning_rate": 3.7331583038248688e-06,
1645
+ "loss": 0.4346,
1646
+ "step": 219
1647
+ },
1648
+ {
1649
+ "epoch": 3.055944055944056,
1650
+ "grad_norm": 0.528349757194519,
1651
+ "learning_rate": 3.6864631090070656e-06,
1652
+ "loss": 0.3993,
1653
+ "step": 220
1654
+ },
1655
+ {
1656
+ "epoch": 3.06993006993007,
1657
+ "grad_norm": 0.5285301208496094,
1658
+ "learning_rate": 3.639890650175379e-06,
1659
+ "loss": 0.419,
1660
+ "step": 221
1661
+ },
1662
+ {
1663
+ "epoch": 3.0839160839160837,
1664
+ "grad_norm": 0.5721102356910706,
1665
+ "learning_rate": 3.593445279028418e-06,
1666
+ "loss": 0.4328,
1667
+ "step": 222
1668
+ },
1669
+ {
1670
+ "epoch": 3.097902097902098,
1671
+ "grad_norm": 0.5271673202514648,
1672
+ "learning_rate": 3.5471313353898056e-06,
1673
+ "loss": 0.4252,
1674
+ "step": 223
1675
+ },
1676
+ {
1677
+ "epoch": 3.111888111888112,
1678
+ "grad_norm": 0.5354319214820862,
1679
+ "learning_rate": 3.5009531468026646e-06,
1680
+ "loss": 0.4367,
1681
+ "step": 224
1682
+ },
1683
+ {
1684
+ "epoch": 3.125874125874126,
1685
+ "grad_norm": 0.5849824547767639,
1686
+ "learning_rate": 3.4549150281252635e-06,
1687
+ "loss": 0.4263,
1688
+ "step": 225
1689
+ },
1690
+ {
1691
+ "epoch": 3.13986013986014,
1692
+ "grad_norm": 0.6300305128097534,
1693
+ "learning_rate": 3.409021281127835e-06,
1694
+ "loss": 0.4331,
1695
+ "step": 226
1696
+ },
1697
+ {
1698
+ "epoch": 3.1538461538461537,
1699
+ "grad_norm": 0.5985769033432007,
1700
+ "learning_rate": 3.3632761940906167e-06,
1701
+ "loss": 0.4316,
1702
+ "step": 227
1703
+ },
1704
+ {
1705
+ "epoch": 3.167832167832168,
1706
+ "grad_norm": 0.5028027296066284,
1707
+ "learning_rate": 3.3176840414031653e-06,
1708
+ "loss": 0.4243,
1709
+ "step": 228
1710
+ },
1711
+ {
1712
+ "epoch": 3.1818181818181817,
1713
+ "grad_norm": 0.5299258232116699,
1714
+ "learning_rate": 3.2722490831649568e-06,
1715
+ "loss": 0.4166,
1716
+ "step": 229
1717
+ },
1718
+ {
1719
+ "epoch": 3.195804195804196,
1720
+ "grad_norm": 0.5425248742103577,
1721
+ "learning_rate": 3.226975564787322e-06,
1722
+ "loss": 0.4389,
1723
+ "step": 230
1724
+ },
1725
+ {
1726
+ "epoch": 3.20979020979021,
1727
+ "grad_norm": 0.5929123759269714,
1728
+ "learning_rate": 3.181867716596765e-06,
1729
+ "loss": 0.4288,
1730
+ "step": 231
1731
+ },
1732
+ {
1733
+ "epoch": 3.2237762237762237,
1734
+ "grad_norm": 0.5462735891342163,
1735
+ "learning_rate": 3.1369297534396823e-06,
1736
+ "loss": 0.4434,
1737
+ "step": 232
1738
+ },
1739
+ {
1740
+ "epoch": 3.237762237762238,
1741
+ "grad_norm": 0.4862322211265564,
1742
+ "learning_rate": 3.092165874288525e-06,
1743
+ "loss": 0.4133,
1744
+ "step": 233
1745
+ },
1746
+ {
1747
+ "epoch": 3.2517482517482517,
1748
+ "grad_norm": 0.48885804414749146,
1749
+ "learning_rate": 3.0475802618494564e-06,
1750
+ "loss": 0.4426,
1751
+ "step": 234
1752
+ },
1753
+ {
1754
+ "epoch": 3.2517482517482517,
1755
+ "eval_loss": 0.5924859046936035,
1756
+ "eval_runtime": 34.7085,
1757
+ "eval_samples_per_second": 18.497,
1758
+ "eval_steps_per_second": 2.334,
1759
+ "step": 234
1760
+ },
1761
+ {
1762
+ "epoch": 3.265734265734266,
1763
+ "grad_norm": 0.4652189314365387,
1764
+ "learning_rate": 3.0031770821715233e-06,
1765
+ "loss": 0.4189,
1766
+ "step": 235
1767
+ },
1768
+ {
1769
+ "epoch": 3.2797202797202796,
1770
+ "grad_norm": 0.5389134883880615,
1771
+ "learning_rate": 2.9589604842573762e-06,
1772
+ "loss": 0.4226,
1773
+ "step": 236
1774
+ },
1775
+ {
1776
+ "epoch": 3.2937062937062938,
1777
+ "grad_norm": 0.507276177406311,
1778
+ "learning_rate": 2.914934599675594e-06,
1779
+ "loss": 0.4084,
1780
+ "step": 237
1781
+ },
1782
+ {
1783
+ "epoch": 3.3076923076923075,
1784
+ "grad_norm": 0.4876704216003418,
1785
+ "learning_rate": 2.871103542174637e-06,
1786
+ "loss": 0.4256,
1787
+ "step": 238
1788
+ },
1789
+ {
1790
+ "epoch": 3.3216783216783217,
1791
+ "grad_norm": 0.48441073298454285,
1792
+ "learning_rate": 2.827471407298451e-06,
1793
+ "loss": 0.4297,
1794
+ "step": 239
1795
+ },
1796
+ {
1797
+ "epoch": 3.335664335664336,
1798
+ "grad_norm": 0.4634881317615509,
1799
+ "learning_rate": 2.7840422720037943e-06,
1800
+ "loss": 0.4227,
1801
+ "step": 240
1802
+ },
1803
+ {
1804
+ "epoch": 3.3496503496503496,
1805
+ "grad_norm": 0.49520549178123474,
1806
+ "learning_rate": 2.7408201942792755e-06,
1807
+ "loss": 0.414,
1808
+ "step": 241
1809
+ },
1810
+ {
1811
+ "epoch": 3.3636363636363638,
1812
+ "grad_norm": 0.4892767369747162,
1813
+ "learning_rate": 2.697809212766195e-06,
1814
+ "loss": 0.4326,
1815
+ "step": 242
1816
+ },
1817
+ {
1818
+ "epoch": 3.3776223776223775,
1819
+ "grad_norm": 0.4968920052051544,
1820
+ "learning_rate": 2.655013346381158e-06,
1821
+ "loss": 0.4327,
1822
+ "step": 243
1823
+ },
1824
+ {
1825
+ "epoch": 3.3916083916083917,
1826
+ "grad_norm": 0.4823973476886749,
1827
+ "learning_rate": 2.612436593940568e-06,
1828
+ "loss": 0.4329,
1829
+ "step": 244
1830
+ },
1831
+ {
1832
+ "epoch": 3.4055944055944054,
1833
+ "grad_norm": 0.4838135540485382,
1834
+ "learning_rate": 2.57008293378697e-06,
1835
+ "loss": 0.4206,
1836
+ "step": 245
1837
+ },
1838
+ {
1839
+ "epoch": 3.4195804195804196,
1840
+ "grad_norm": 0.47422581911087036,
1841
+ "learning_rate": 2.5279563234173177e-06,
1842
+ "loss": 0.4336,
1843
+ "step": 246
1844
+ },
1845
+ {
1846
+ "epoch": 3.4335664335664333,
1847
+ "grad_norm": 0.4846055209636688,
1848
+ "learning_rate": 2.4860606991131857e-06,
1849
+ "loss": 0.4184,
1850
+ "step": 247
1851
+ },
1852
+ {
1853
+ "epoch": 3.4475524475524475,
1854
+ "grad_norm": 0.5305242538452148,
1855
+ "learning_rate": 2.444399975572974e-06,
1856
+ "loss": 0.4394,
1857
+ "step": 248
1858
+ },
1859
+ {
1860
+ "epoch": 3.4615384615384617,
1861
+ "grad_norm": 0.487332820892334,
1862
+ "learning_rate": 2.402978045546114e-06,
1863
+ "loss": 0.4033,
1864
+ "step": 249
1865
+ },
1866
+ {
1867
+ "epoch": 3.4755244755244754,
1868
+ "grad_norm": 0.4706343114376068,
1869
+ "learning_rate": 2.3617987794693358e-06,
1870
+ "loss": 0.4408,
1871
+ "step": 250
1872
+ },
1873
+ {
1874
+ "epoch": 3.4895104895104896,
1875
+ "grad_norm": 0.503103494644165,
1876
+ "learning_rate": 2.320866025105016e-06,
1877
+ "loss": 0.4166,
1878
+ "step": 251
1879
+ },
1880
+ {
1881
+ "epoch": 3.5034965034965033,
1882
+ "grad_norm": 0.5077600479125977,
1883
+ "learning_rate": 2.2801836071816476e-06,
1884
+ "loss": 0.4423,
1885
+ "step": 252
1886
+ },
1887
+ {
1888
+ "epoch": 3.5034965034965033,
1889
+ "eval_loss": 0.5952551364898682,
1890
+ "eval_runtime": 33.5546,
1891
+ "eval_samples_per_second": 19.133,
1892
+ "eval_steps_per_second": 2.414,
1893
+ "step": 252
1894
+ },
1895
+ {
1896
+ "epoch": 3.5174825174825175,
1897
+ "grad_norm": 0.48870253562927246,
1898
+ "learning_rate": 2.2397553270364546e-06,
1899
+ "loss": 0.4241,
1900
+ "step": 253
1901
+ },
1902
+ {
1903
+ "epoch": 3.5314685314685317,
1904
+ "grad_norm": 0.4966093897819519,
1905
+ "learning_rate": 2.1995849622602017e-06,
1906
+ "loss": 0.4396,
1907
+ "step": 254
1908
+ },
1909
+ {
1910
+ "epoch": 3.5454545454545454,
1911
+ "grad_norm": 0.4564977288246155,
1912
+ "learning_rate": 2.159676266344222e-06,
1913
+ "loss": 0.4223,
1914
+ "step": 255
1915
+ },
1916
+ {
1917
+ "epoch": 3.5594405594405596,
1918
+ "grad_norm": 0.46915507316589355,
1919
+ "learning_rate": 2.120032968329687e-06,
1920
+ "loss": 0.4283,
1921
+ "step": 256
1922
+ },
1923
+ {
1924
+ "epoch": 3.5734265734265733,
1925
+ "grad_norm": 0.49805694818496704,
1926
+ "learning_rate": 2.0806587724591725e-06,
1927
+ "loss": 0.4382,
1928
+ "step": 257
1929
+ },
1930
+ {
1931
+ "epoch": 3.5874125874125875,
1932
+ "grad_norm": 0.48657479882240295,
1933
+ "learning_rate": 2.0415573578305343e-06,
1934
+ "loss": 0.4378,
1935
+ "step": 258
1936
+ },
1937
+ {
1938
+ "epoch": 3.6013986013986012,
1939
+ "grad_norm": 0.46977299451828003,
1940
+ "learning_rate": 2.0027323780531312e-06,
1941
+ "loss": 0.4224,
1942
+ "step": 259
1943
+ },
1944
+ {
1945
+ "epoch": 3.6153846153846154,
1946
+ "grad_norm": 0.49343907833099365,
1947
+ "learning_rate": 1.9641874609064443e-06,
1948
+ "loss": 0.4088,
1949
+ "step": 260
1950
+ },
1951
+ {
1952
+ "epoch": 3.629370629370629,
1953
+ "grad_norm": 0.4801478385925293,
1954
+ "learning_rate": 1.9259262080010938e-06,
1955
+ "loss": 0.419,
1956
+ "step": 261
1957
+ },
1958
+ {
1959
+ "epoch": 3.6433566433566433,
1960
+ "grad_norm": 0.4632829427719116,
1961
+ "learning_rate": 1.887952194442309e-06,
1962
+ "loss": 0.4185,
1963
+ "step": 262
1964
+ },
1965
+ {
1966
+ "epoch": 3.6573426573426575,
1967
+ "grad_norm": 0.4722610414028168,
1968
+ "learning_rate": 1.8502689684958664e-06,
1969
+ "loss": 0.4223,
1970
+ "step": 263
1971
+ },
1972
+ {
1973
+ "epoch": 3.6713286713286712,
1974
+ "grad_norm": 0.46521317958831787,
1975
+ "learning_rate": 1.8128800512565514e-06,
1976
+ "loss": 0.4311,
1977
+ "step": 264
1978
+ },
1979
+ {
1980
+ "epoch": 3.6853146853146854,
1981
+ "grad_norm": 0.49360647797584534,
1982
+ "learning_rate": 1.7757889363191484e-06,
1983
+ "loss": 0.4336,
1984
+ "step": 265
1985
+ },
1986
+ {
1987
+ "epoch": 3.699300699300699,
1988
+ "grad_norm": 0.46490150690078735,
1989
+ "learning_rate": 1.738999089451991e-06,
1990
+ "loss": 0.41,
1991
+ "step": 266
1992
+ },
1993
+ {
1994
+ "epoch": 3.7132867132867133,
1995
+ "grad_norm": 0.47419989109039307,
1996
+ "learning_rate": 1.7025139482731385e-06,
1997
+ "loss": 0.4489,
1998
+ "step": 267
1999
+ },
2000
+ {
2001
+ "epoch": 3.7272727272727275,
2002
+ "grad_norm": 0.4471936821937561,
2003
+ "learning_rate": 1.6663369219291558e-06,
2004
+ "loss": 0.4075,
2005
+ "step": 268
2006
+ },
2007
+ {
2008
+ "epoch": 3.7412587412587412,
2009
+ "grad_norm": 0.4871998727321625,
2010
+ "learning_rate": 1.6304713907765713e-06,
2011
+ "loss": 0.4138,
2012
+ "step": 269
2013
+ },
2014
+ {
2015
+ "epoch": 3.755244755244755,
2016
+ "grad_norm": 0.4558921754360199,
2017
+ "learning_rate": 1.5949207060660138e-06,
2018
+ "loss": 0.4209,
2019
+ "step": 270
2020
+ },
2021
+ {
2022
+ "epoch": 3.755244755244755,
2023
+ "eval_loss": 0.5941651463508606,
2024
+ "eval_runtime": 34.8033,
2025
+ "eval_samples_per_second": 18.447,
2026
+ "eval_steps_per_second": 2.327,
2027
+ "step": 270
2028
+ },
2029
+ {
2030
+ "epoch": 3.769230769230769,
2031
+ "grad_norm": 0.43444135785102844,
2032
+ "learning_rate": 1.55968818962908e-06,
2033
+ "loss": 0.4186,
2034
+ "step": 271
2035
+ },
2036
+ {
2037
+ "epoch": 3.7832167832167833,
2038
+ "grad_norm": 0.47602659463882446,
2039
+ "learning_rate": 1.5247771335679372e-06,
2040
+ "loss": 0.4138,
2041
+ "step": 272
2042
+ },
2043
+ {
2044
+ "epoch": 3.797202797202797,
2045
+ "grad_norm": 0.4794568121433258,
2046
+ "learning_rate": 1.4901907999477167e-06,
2047
+ "loss": 0.4512,
2048
+ "step": 273
2049
+ },
2050
+ {
2051
+ "epoch": 3.8111888111888113,
2052
+ "grad_norm": 0.47370994091033936,
2053
+ "learning_rate": 1.4559324204917102e-06,
2054
+ "loss": 0.4446,
2055
+ "step": 274
2056
+ },
2057
+ {
2058
+ "epoch": 3.825174825174825,
2059
+ "grad_norm": 0.4493069052696228,
2060
+ "learning_rate": 1.4220051962793952e-06,
2061
+ "loss": 0.4316,
2062
+ "step": 275
2063
+ },
2064
+ {
2065
+ "epoch": 3.839160839160839,
2066
+ "grad_norm": 0.4439810812473297,
2067
+ "learning_rate": 1.3884122974473307e-06,
2068
+ "loss": 0.4276,
2069
+ "step": 276
2070
+ },
2071
+ {
2072
+ "epoch": 3.8531468531468533,
2073
+ "grad_norm": 0.44139519333839417,
2074
+ "learning_rate": 1.3551568628929434e-06,
2075
+ "loss": 0.427,
2076
+ "step": 277
2077
+ },
2078
+ {
2079
+ "epoch": 3.867132867132867,
2080
+ "grad_norm": 0.45054903626441956,
2081
+ "learning_rate": 1.3222419999812248e-06,
2082
+ "loss": 0.4356,
2083
+ "step": 278
2084
+ },
2085
+ {
2086
+ "epoch": 3.8811188811188813,
2087
+ "grad_norm": 0.44140151143074036,
2088
+ "learning_rate": 1.2896707842543898e-06,
2089
+ "loss": 0.4287,
2090
+ "step": 279
2091
+ },
2092
+ {
2093
+ "epoch": 3.895104895104895,
2094
+ "grad_norm": 0.4277818202972412,
2095
+ "learning_rate": 1.257446259144494e-06,
2096
+ "loss": 0.4298,
2097
+ "step": 280
2098
+ },
2099
+ {
2100
+ "epoch": 3.909090909090909,
2101
+ "grad_norm": 0.4403057098388672,
2102
+ "learning_rate": 1.225571435689062e-06,
2103
+ "loss": 0.4185,
2104
+ "step": 281
2105
+ },
2106
+ {
2107
+ "epoch": 3.9230769230769234,
2108
+ "grad_norm": 0.4724678099155426,
2109
+ "learning_rate": 1.1940492922497337e-06,
2110
+ "loss": 0.4465,
2111
+ "step": 282
2112
+ },
2113
+ {
2114
+ "epoch": 3.937062937062937,
2115
+ "grad_norm": 0.47128820419311523,
2116
+ "learning_rate": 1.1628827742339688e-06,
2117
+ "loss": 0.4126,
2118
+ "step": 283
2119
+ },
2120
+ {
2121
+ "epoch": 3.951048951048951,
2122
+ "grad_norm": 0.4331970512866974,
2123
+ "learning_rate": 1.1320747938198356e-06,
2124
+ "loss": 0.4105,
2125
+ "step": 284
2126
+ },
2127
+ {
2128
+ "epoch": 3.965034965034965,
2129
+ "grad_norm": 0.4537077844142914,
2130
+ "learning_rate": 1.1016282296838887e-06,
2131
+ "loss": 0.4257,
2132
+ "step": 285
2133
+ },
2134
+ {
2135
+ "epoch": 3.979020979020979,
2136
+ "grad_norm": 0.46981024742126465,
2137
+ "learning_rate": 1.0715459267321998e-06,
2138
+ "loss": 0.4336,
2139
+ "step": 286
2140
+ },
2141
+ {
2142
+ "epoch": 3.993006993006993,
2143
+ "grad_norm": 0.4497096538543701,
2144
+ "learning_rate": 1.0418306958345214e-06,
2145
+ "loss": 0.4326,
2146
+ "step": 287
2147
+ },
2148
+ {
2149
+ "epoch": 4.0,
2150
+ "grad_norm": 0.6176419258117676,
2151
+ "learning_rate": 1.0124853135616475e-06,
2152
+ "loss": 0.4261,
2153
+ "step": 288
2154
+ },
2155
+ {
2156
+ "epoch": 4.0,
2157
+ "eval_loss": 0.594137966632843,
2158
+ "eval_runtime": 35.3287,
2159
+ "eval_samples_per_second": 18.172,
2160
+ "eval_steps_per_second": 2.293,
2161
+ "step": 288
2162
+ },
2163
+ {
2164
+ "epoch": 4.013986013986014,
2165
+ "grad_norm": 0.48881927132606506,
2166
+ "learning_rate": 9.835125219259694e-07,
2167
+ "loss": 0.4126,
2168
+ "step": 289
2169
+ },
2170
+ {
2171
+ "epoch": 4.027972027972028,
2172
+ "grad_norm": 0.47744905948638916,
2173
+ "learning_rate": 9.549150281252633e-07,
2174
+ "loss": 0.3887,
2175
+ "step": 290
2176
+ },
2177
+ {
2178
+ "epoch": 4.041958041958042,
2179
+ "grad_norm": 0.4749980568885803,
2180
+ "learning_rate": 9.266955042897357e-07,
2181
+ "loss": 0.4085,
2182
+ "step": 291
2183
+ },
2184
+ {
2185
+ "epoch": 4.055944055944056,
2186
+ "grad_norm": 0.4653206169605255,
2187
+ "learning_rate": 8.988565872323362e-07,
2188
+ "loss": 0.3949,
2189
+ "step": 292
2190
+ },
2191
+ {
2192
+ "epoch": 4.06993006993007,
2193
+ "grad_norm": 0.44160446524620056,
2194
+ "learning_rate": 8.714008782023797e-07,
2195
+ "loss": 0.4049,
2196
+ "step": 293
2197
+ },
2198
+ {
2199
+ "epoch": 4.083916083916084,
2200
+ "grad_norm": 0.43797171115875244,
2201
+ "learning_rate": 8.443309426424862e-07,
2202
+ "loss": 0.4038,
2203
+ "step": 294
2204
+ },
2205
+ {
2206
+ "epoch": 4.0979020979020975,
2207
+ "grad_norm": 0.4569723904132843,
2208
+ "learning_rate": 8.176493099488664e-07,
2209
+ "loss": 0.3956,
2210
+ "step": 295
2211
+ },
2212
+ {
2213
+ "epoch": 4.111888111888112,
2214
+ "grad_norm": 0.47445249557495117,
2215
+ "learning_rate": 7.913584732349788e-07,
2216
+ "loss": 0.4107,
2217
+ "step": 296
2218
+ },
2219
+ {
2220
+ "epoch": 4.125874125874126,
2221
+ "grad_norm": 0.46384716033935547,
2222
+ "learning_rate": 7.654608890985709e-07,
2223
+ "loss": 0.3895,
2224
+ "step": 297
2225
+ },
2226
+ {
2227
+ "epoch": 4.13986013986014,
2228
+ "grad_norm": 0.47651711106300354,
2229
+ "learning_rate": 7.399589773921412e-07,
2230
+ "loss": 0.3859,
2231
+ "step": 298
2232
+ },
2233
+ {
2234
+ "epoch": 4.153846153846154,
2235
+ "grad_norm": 0.4623275697231293,
2236
+ "learning_rate": 7.148551209968279e-07,
2237
+ "loss": 0.394,
2238
+ "step": 299
2239
+ },
2240
+ {
2241
+ "epoch": 4.1678321678321675,
2242
+ "grad_norm": 0.4649985432624817,
2243
+ "learning_rate": 6.901516655997536e-07,
2244
+ "loss": 0.4108,
2245
+ "step": 300
2246
+ },
2247
+ {
2248
+ "epoch": 4.181818181818182,
2249
+ "grad_norm": 0.4691464304924011,
2250
+ "learning_rate": 6.658509194748463e-07,
2251
+ "loss": 0.3626,
2252
+ "step": 301
2253
+ },
2254
+ {
2255
+ "epoch": 4.195804195804196,
2256
+ "grad_norm": 0.48455217480659485,
2257
+ "learning_rate": 6.419551532671542e-07,
2258
+ "loss": 0.4172,
2259
+ "step": 302
2260
+ },
2261
+ {
2262
+ "epoch": 4.20979020979021,
2263
+ "grad_norm": 0.482030987739563,
2264
+ "learning_rate": 6.184665997806832e-07,
2265
+ "loss": 0.4038,
2266
+ "step": 303
2267
+ },
2268
+ {
2269
+ "epoch": 4.223776223776224,
2270
+ "grad_norm": 0.4398139715194702,
2271
+ "learning_rate": 5.953874537697573e-07,
2272
+ "loss": 0.4033,
2273
+ "step": 304
2274
+ },
2275
+ {
2276
+ "epoch": 4.2377622377622375,
2277
+ "grad_norm": 0.46925652027130127,
2278
+ "learning_rate": 5.727198717339511e-07,
2279
+ "loss": 0.4091,
2280
+ "step": 305
2281
+ },
2282
+ {
2283
+ "epoch": 4.251748251748252,
2284
+ "grad_norm": 0.46952134370803833,
2285
+ "learning_rate": 5.504659717165812e-07,
2286
+ "loss": 0.4111,
2287
+ "step": 306
2288
+ },
2289
+ {
2290
+ "epoch": 4.251748251748252,
2291
+ "eval_loss": 0.6070981025695801,
2292
+ "eval_runtime": 35.5097,
2293
+ "eval_samples_per_second": 18.08,
2294
+ "eval_steps_per_second": 2.281,
2295
+ "step": 306
2296
+ },
2297
+ {
2298
+ "epoch": 4.265734265734266,
2299
+ "grad_norm": 0.45535174012184143,
2300
+ "learning_rate": 5.286278331068018e-07,
2301
+ "loss": 0.4128,
2302
+ "step": 307
2303
+ },
2304
+ {
2305
+ "epoch": 4.27972027972028,
2306
+ "grad_norm": 0.4438033998012543,
2307
+ "learning_rate": 5.072074964453055e-07,
2308
+ "loss": 0.4052,
2309
+ "step": 308
2310
+ },
2311
+ {
2312
+ "epoch": 4.293706293706293,
2313
+ "grad_norm": 0.4887377917766571,
2314
+ "learning_rate": 4.862069632336558e-07,
2315
+ "loss": 0.3894,
2316
+ "step": 309
2317
+ },
2318
+ {
2319
+ "epoch": 4.3076923076923075,
2320
+ "grad_norm": 0.4616340100765228,
2321
+ "learning_rate": 4.6562819574727304e-07,
2322
+ "loss": 0.4242,
2323
+ "step": 310
2324
+ },
2325
+ {
2326
+ "epoch": 4.321678321678322,
2327
+ "grad_norm": 0.44037091732025146,
2328
+ "learning_rate": 4.454731168520754e-07,
2329
+ "loss": 0.4052,
2330
+ "step": 311
2331
+ },
2332
+ {
2333
+ "epoch": 4.335664335664336,
2334
+ "grad_norm": 0.4455097019672394,
2335
+ "learning_rate": 4.257436098248091e-07,
2336
+ "loss": 0.3882,
2337
+ "step": 312
2338
+ },
2339
+ {
2340
+ "epoch": 4.34965034965035,
2341
+ "grad_norm": 0.47457605600357056,
2342
+ "learning_rate": 4.064415181770787e-07,
2343
+ "loss": 0.4102,
2344
+ "step": 313
2345
+ },
2346
+ {
2347
+ "epoch": 4.363636363636363,
2348
+ "grad_norm": 0.4474296271800995,
2349
+ "learning_rate": 3.875686454830885e-07,
2350
+ "loss": 0.3866,
2351
+ "step": 314
2352
+ },
2353
+ {
2354
+ "epoch": 4.3776223776223775,
2355
+ "grad_norm": 0.44111815094947815,
2356
+ "learning_rate": 3.691267552111183e-07,
2357
+ "loss": 0.4091,
2358
+ "step": 315
2359
+ },
2360
+ {
2361
+ "epoch": 4.391608391608392,
2362
+ "grad_norm": 0.46066638827323914,
2363
+ "learning_rate": 3.511175705587433e-07,
2364
+ "loss": 0.422,
2365
+ "step": 316
2366
+ },
2367
+ {
2368
+ "epoch": 4.405594405594406,
2369
+ "grad_norm": 0.4345090389251709,
2370
+ "learning_rate": 3.3354277429182626e-07,
2371
+ "loss": 0.3882,
2372
+ "step": 317
2373
+ },
2374
+ {
2375
+ "epoch": 4.41958041958042,
2376
+ "grad_norm": 0.462768018245697,
2377
+ "learning_rate": 3.164040085872755e-07,
2378
+ "loss": 0.4125,
2379
+ "step": 318
2380
+ },
2381
+ {
2382
+ "epoch": 4.433566433566433,
2383
+ "grad_norm": 0.4575034976005554,
2384
+ "learning_rate": 2.997028748796016e-07,
2385
+ "loss": 0.4138,
2386
+ "step": 319
2387
+ },
2388
+ {
2389
+ "epoch": 4.4475524475524475,
2390
+ "grad_norm": 0.43728622794151306,
2391
+ "learning_rate": 2.834409337112842e-07,
2392
+ "loss": 0.4133,
2393
+ "step": 320
2394
+ },
2395
+ {
2396
+ "epoch": 4.461538461538462,
2397
+ "grad_norm": 0.4533195495605469,
2398
+ "learning_rate": 2.676197045869511e-07,
2399
+ "loss": 0.4067,
2400
+ "step": 321
2401
+ },
2402
+ {
2403
+ "epoch": 4.475524475524476,
2404
+ "grad_norm": 0.44842609763145447,
2405
+ "learning_rate": 2.522406658313997e-07,
2406
+ "loss": 0.4042,
2407
+ "step": 322
2408
+ },
2409
+ {
2410
+ "epoch": 4.489510489510489,
2411
+ "grad_norm": 0.4315699636936188,
2412
+ "learning_rate": 2.3730525445146146e-07,
2413
+ "loss": 0.3969,
2414
+ "step": 323
2415
+ },
2416
+ {
2417
+ "epoch": 4.503496503496503,
2418
+ "grad_norm": 0.43630900979042053,
2419
+ "learning_rate": 2.2281486600173207e-07,
2420
+ "loss": 0.3907,
2421
+ "step": 324
2422
+ },
2423
+ {
2424
+ "epoch": 4.503496503496503,
2425
+ "eval_loss": 0.6088654398918152,
2426
+ "eval_runtime": 35.0812,
2427
+ "eval_samples_per_second": 18.3,
2428
+ "eval_steps_per_second": 2.309,
2429
+ "step": 324
2430
+ },
2431
+ {
2432
+ "epoch": 4.5174825174825175,
2433
+ "grad_norm": 0.43661531805992126,
2434
+ "learning_rate": 2.0877085445416889e-07,
2435
+ "loss": 0.4079,
2436
+ "step": 325
2437
+ },
2438
+ {
2439
+ "epoch": 4.531468531468532,
2440
+ "grad_norm": 0.43984201550483704,
2441
+ "learning_rate": 1.9517453207157865e-07,
2442
+ "loss": 0.4071,
2443
+ "step": 326
2444
+ },
2445
+ {
2446
+ "epoch": 4.545454545454545,
2447
+ "grad_norm": 0.43304693698883057,
2448
+ "learning_rate": 1.8202716928499842e-07,
2449
+ "loss": 0.4,
2450
+ "step": 327
2451
+ },
2452
+ {
2453
+ "epoch": 4.559440559440559,
2454
+ "grad_norm": 0.44190627336502075,
2455
+ "learning_rate": 1.6932999457498823e-07,
2456
+ "loss": 0.3936,
2457
+ "step": 328
2458
+ },
2459
+ {
2460
+ "epoch": 4.573426573426573,
2461
+ "grad_norm": 0.46403783559799194,
2462
+ "learning_rate": 1.5708419435684463e-07,
2463
+ "loss": 0.4142,
2464
+ "step": 329
2465
+ },
2466
+ {
2467
+ "epoch": 4.5874125874125875,
2468
+ "grad_norm": 0.448397159576416,
2469
+ "learning_rate": 1.4529091286973994e-07,
2470
+ "loss": 0.411,
2471
+ "step": 330
2472
+ },
2473
+ {
2474
+ "epoch": 4.601398601398602,
2475
+ "grad_norm": 0.4263162910938263,
2476
+ "learning_rate": 1.3395125206980774e-07,
2477
+ "loss": 0.3991,
2478
+ "step": 331
2479
+ },
2480
+ {
2481
+ "epoch": 4.615384615384615,
2482
+ "grad_norm": 0.4367568790912628,
2483
+ "learning_rate": 1.230662715271741e-07,
2484
+ "loss": 0.4144,
2485
+ "step": 332
2486
+ },
2487
+ {
2488
+ "epoch": 4.629370629370629,
2489
+ "grad_norm": 0.4405047297477722,
2490
+ "learning_rate": 1.1263698832695513e-07,
2491
+ "loss": 0.3935,
2492
+ "step": 333
2493
+ },
2494
+ {
2495
+ "epoch": 4.643356643356643,
2496
+ "grad_norm": 0.4359452426433563,
2497
+ "learning_rate": 1.0266437697422026e-07,
2498
+ "loss": 0.3913,
2499
+ "step": 334
2500
+ },
2501
+ {
2502
+ "epoch": 4.6573426573426575,
2503
+ "grad_norm": 0.44500768184661865,
2504
+ "learning_rate": 9.314936930293283e-08,
2505
+ "loss": 0.4102,
2506
+ "step": 335
2507
+ },
2508
+ {
2509
+ "epoch": 4.671328671328672,
2510
+ "grad_norm": 0.46006131172180176,
2511
+ "learning_rate": 8.40928543888836e-08,
2512
+ "loss": 0.4138,
2513
+ "step": 336
2514
+ },
2515
+ {
2516
+ "epoch": 4.685314685314685,
2517
+ "grad_norm": 0.44435447454452515,
2518
+ "learning_rate": 7.549567846661388e-08,
2519
+ "loss": 0.4185,
2520
+ "step": 337
2521
+ },
2522
+ {
2523
+ "epoch": 4.699300699300699,
2524
+ "grad_norm": 0.43049922585487366,
2525
+ "learning_rate": 6.735864485034493e-08,
2526
+ "loss": 0.3946,
2527
+ "step": 338
2528
+ },
2529
+ {
2530
+ "epoch": 4.713286713286713,
2531
+ "grad_norm": 0.4270278513431549,
2532
+ "learning_rate": 5.968251385891744e-08,
2533
+ "loss": 0.3969,
2534
+ "step": 339
2535
+ },
2536
+ {
2537
+ "epoch": 4.7272727272727275,
2538
+ "grad_norm": 0.4480164647102356,
2539
+ "learning_rate": 5.246800274474439e-08,
2540
+ "loss": 0.4005,
2541
+ "step": 340
2542
+ },
2543
+ {
2544
+ "epoch": 4.741258741258742,
2545
+ "grad_norm": 0.4490266740322113,
2546
+ "learning_rate": 4.571578562679757e-08,
2547
+ "loss": 0.3884,
2548
+ "step": 341
2549
+ },
2550
+ {
2551
+ "epoch": 4.755244755244755,
2552
+ "grad_norm": 0.4623181223869324,
2553
+ "learning_rate": 3.9426493427611177e-08,
2554
+ "loss": 0.4169,
2555
+ "step": 342
2556
+ },
2557
+ {
2558
+ "epoch": 4.755244755244755,
2559
+ "eval_loss": 0.6084015965461731,
2560
+ "eval_runtime": 34.879,
2561
+ "eval_samples_per_second": 18.406,
2562
+ "eval_steps_per_second": 2.322,
2563
+ "step": 342
2564
+ },
2565
+ {
2566
+ "epoch": 4.769230769230769,
2567
+ "grad_norm": 0.4283956289291382,
2568
+ "learning_rate": 3.360071381433516e-08,
2569
+ "loss": 0.3969,
2570
+ "step": 343
2571
+ },
2572
+ {
2573
+ "epoch": 4.783216783216783,
2574
+ "grad_norm": 0.4356008470058441,
2575
+ "learning_rate": 2.823899114382078e-08,
2576
+ "loss": 0.4027,
2577
+ "step": 344
2578
+ },
2579
+ {
2580
+ "epoch": 4.7972027972027975,
2581
+ "grad_norm": 0.44547533988952637,
2582
+ "learning_rate": 2.3341826411756863e-08,
2583
+ "loss": 0.3987,
2584
+ "step": 345
2585
+ },
2586
+ {
2587
+ "epoch": 4.811188811188811,
2588
+ "grad_norm": 0.4299108386039734,
2589
+ "learning_rate": 1.8909677205856682e-08,
2590
+ "loss": 0.4017,
2591
+ "step": 346
2592
+ },
2593
+ {
2594
+ "epoch": 4.825174825174825,
2595
+ "grad_norm": 0.4200840890407562,
2596
+ "learning_rate": 1.494295766310161e-08,
2597
+ "loss": 0.3885,
2598
+ "step": 347
2599
+ },
2600
+ {
2601
+ "epoch": 4.839160839160839,
2602
+ "grad_norm": 0.43688181042671204,
2603
+ "learning_rate": 1.1442038431044856e-08,
2604
+ "loss": 0.4119,
2605
+ "step": 348
2606
+ },
2607
+ {
2608
+ "epoch": 4.853146853146853,
2609
+ "grad_norm": 0.4302099943161011,
2610
+ "learning_rate": 8.407246633178601e-09,
2611
+ "loss": 0.3843,
2612
+ "step": 349
2613
+ },
2614
+ {
2615
+ "epoch": 4.867132867132867,
2616
+ "grad_norm": 0.45412999391555786,
2617
+ "learning_rate": 5.838865838366792e-09,
2618
+ "loss": 0.4009,
2619
+ "step": 350
2620
+ },
2621
+ {
2622
+ "epoch": 4.881118881118881,
2623
+ "grad_norm": 0.43274399638175964,
2624
+ "learning_rate": 3.737136034349109e-09,
2625
+ "loss": 0.3951,
2626
+ "step": 351
2627
+ },
2628
+ {
2629
+ "epoch": 4.895104895104895,
2630
+ "grad_norm": 0.4244266450405121,
2631
+ "learning_rate": 2.102253605316684e-09,
2632
+ "loss": 0.4059,
2633
+ "step": 352
2634
+ },
2635
+ {
2636
+ "epoch": 4.909090909090909,
2637
+ "grad_norm": 0.4323265552520752,
2638
+ "learning_rate": 9.343713135623323e-10,
2639
+ "loss": 0.3963,
2640
+ "step": 353
2641
+ },
2642
+ {
2643
+ "epoch": 4.923076923076923,
2644
+ "grad_norm": 0.4487632215023041,
2645
+ "learning_rate": 2.335982852064156e-10,
2646
+ "loss": 0.3937,
2647
+ "step": 354
2648
+ },
2649
+ {
2650
+ "epoch": 4.937062937062937,
2651
+ "grad_norm": 0.4363052546977997,
2652
+ "learning_rate": 0.0,
2653
+ "loss": 0.405,
2654
+ "step": 355
2655
+ }
2656
+ ],
2657
+ "logging_steps": 1,
2658
+ "max_steps": 355,
2659
+ "num_input_tokens_seen": 0,
2660
+ "num_train_epochs": 5,
2661
+ "save_steps": 36,
2662
+ "stateful_callbacks": {
2663
+ "TrainerControl": {
2664
+ "args": {
2665
+ "should_epoch_stop": false,
2666
+ "should_evaluate": false,
2667
+ "should_log": false,
2668
+ "should_save": true,
2669
+ "should_training_stop": true
2670
+ },
2671
+ "attributes": {}
2672
+ }
2673
+ },
2674
+ "total_flos": 5.28345287429718e+17,
2675
+ "train_batch_size": 1,
2676
+ "trial_name": null,
2677
+ "trial_params": null
2678
+ }
vocab.json ADDED
The diff for this file is too large to render. See raw diff