blacksunfm commited on
Commit
338b689
·
verified ·
1 Parent(s): 29758d6

Model save

Browse files
README.md ADDED
@@ -0,0 +1,68 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ base_model: deepseek-ai/DeepSeek-R1-Distill-Qwen-1.5B
3
+ library_name: transformers
4
+ model_name: DeepSeek-R1-Distill-Qwen-1.5B-GRPO
5
+ tags:
6
+ - generated_from_trainer
7
+ - trl
8
+ - grpo
9
+ licence: license
10
+ ---
11
+
12
+ # Model Card for DeepSeek-R1-Distill-Qwen-1.5B-GRPO
13
+
14
+ This model is a fine-tuned version of [deepseek-ai/DeepSeek-R1-Distill-Qwen-1.5B](https://huggingface.co/deepseek-ai/DeepSeek-R1-Distill-Qwen-1.5B).
15
+ It has been trained using [TRL](https://github.com/huggingface/trl).
16
+
17
+ ## Quick start
18
+
19
+ ```python
20
+ from transformers import pipeline
21
+
22
+ question = "If you had a time machine, but could only go to the past or the future once and never return, which would you choose and why?"
23
+ generator = pipeline("text-generation", model="blacksunfm/DeepSeek-R1-Distill-Qwen-1.5B-GRPO", device="cuda")
24
+ output = generator([{"role": "user", "content": question}], max_new_tokens=128, return_full_text=False)[0]
25
+ print(output["generated_text"])
26
+ ```
27
+
28
+ ## Training procedure
29
+
30
+ [<img src="https://raw.githubusercontent.com/wandb/assets/main/wandb-github-badge-28.svg" alt="Visualize in Weights & Biases" width="150" height="24"/>](https://wandb.ai/blacksunfm23-nwpu/huggingface/runs/hvn5cd05)
31
+
32
+
33
+ This model was trained with GRPO, a method introduced in [DeepSeekMath: Pushing the Limits of Mathematical Reasoning in Open Language Models](https://huggingface.co/papers/2402.03300).
34
+
35
+ ### Framework versions
36
+
37
+ - TRL: 0.16.0.dev0
38
+ - Transformers: 4.49.0
39
+ - Pytorch: 2.5.1
40
+ - Datasets: 3.3.2
41
+ - Tokenizers: 0.21.0
42
+
43
+ ## Citations
44
+
45
+ Cite GRPO as:
46
+
47
+ ```bibtex
48
+ @article{zhihong2024deepseekmath,
49
+ title = {{DeepSeekMath: Pushing the Limits of Mathematical Reasoning in Open Language Models}},
50
+ author = {Zhihong Shao and Peiyi Wang and Qihao Zhu and Runxin Xu and Junxiao Song and Mingchuan Zhang and Y. K. Li and Y. Wu and Daya Guo},
51
+ year = 2024,
52
+ eprint = {arXiv:2402.03300},
53
+ }
54
+
55
+ ```
56
+
57
+ Cite TRL as:
58
+
59
+ ```bibtex
60
+ @misc{vonwerra2022trl,
61
+ title = {{TRL: Transformer Reinforcement Learning}},
62
+ author = {Leandro von Werra and Younes Belkada and Lewis Tunstall and Edward Beeching and Tristan Thrush and Nathan Lambert and Shengyi Huang and Kashif Rasul and Quentin Gallouédec},
63
+ year = 2020,
64
+ journal = {GitHub repository},
65
+ publisher = {GitHub},
66
+ howpublished = {\url{https://github.com/huggingface/trl}}
67
+ }
68
+ ```
all_results.json ADDED
@@ -0,0 +1,8 @@
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "total_flos": 0.0,
3
+ "train_loss": 0.13210956121238504,
4
+ "train_runtime": 6757.7515,
5
+ "train_samples": 311,
6
+ "train_samples_per_second": 0.046,
7
+ "train_steps_per_second": 0.011
8
+ }
generation_config.json ADDED
@@ -0,0 +1,9 @@
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_from_model_config": true,
3
+ "bos_token_id": 151646,
4
+ "do_sample": true,
5
+ "eos_token_id": 151643,
6
+ "temperature": 0.6,
7
+ "top_p": 0.95,
8
+ "transformers_version": "4.49.0"
9
+ }
train_results.json ADDED
@@ -0,0 +1,8 @@
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "total_flos": 0.0,
3
+ "train_loss": 0.13210956121238504,
4
+ "train_runtime": 6757.7515,
5
+ "train_samples": 311,
6
+ "train_samples_per_second": 0.046,
7
+ "train_steps_per_second": 0.011
8
+ }
trainer_state.json ADDED
@@ -0,0 +1,1197 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "best_metric": null,
3
+ "best_model_checkpoint": null,
4
+ "epoch": 0.9903536977491961,
5
+ "eval_steps": 500,
6
+ "global_step": 77,
7
+ "is_hyper_param_search": false,
8
+ "is_local_process_zero": true,
9
+ "is_world_process_zero": true,
10
+ "log_history": [
11
+ {
12
+ "clip_ratio": 0.0,
13
+ "completion_length": 1084.34375,
14
+ "epoch": 0.012861736334405145,
15
+ "grad_norm": 0.41294848918914795,
16
+ "kl": 0.0,
17
+ "learning_rate": 1.25e-07,
18
+ "loss": 0.053,
19
+ "reward": 1.54296875,
20
+ "reward_std": 0.11134403571486473,
21
+ "rewards/accuracy_reward": 1.0,
22
+ "rewards/format_reward": 0.0,
23
+ "rewards/tag_count_reward": 0.54296875,
24
+ "step": 1
25
+ },
26
+ {
27
+ "clip_ratio": 0.0,
28
+ "completion_length": 912.421875,
29
+ "epoch": 0.02572347266881029,
30
+ "grad_norm": 0.5866098403930664,
31
+ "kl": 0.0,
32
+ "learning_rate": 2.5e-07,
33
+ "loss": 0.1483,
34
+ "reward": 1.5625,
35
+ "reward_std": 0.15145022049546242,
36
+ "rewards/accuracy_reward": 1.0,
37
+ "rewards/format_reward": 0.0,
38
+ "rewards/tag_count_reward": 0.5625,
39
+ "step": 2
40
+ },
41
+ {
42
+ "clip_ratio": 0.0,
43
+ "completion_length": 1136.859375,
44
+ "epoch": 0.03858520900321544,
45
+ "grad_norm": 0.5072430372238159,
46
+ "kl": 0.00023818016052246094,
47
+ "learning_rate": 3.75e-07,
48
+ "loss": 0.117,
49
+ "reward": 1.57421875,
50
+ "reward_std": 0.1861907932907343,
51
+ "rewards/accuracy_reward": 1.0,
52
+ "rewards/format_reward": 0.0,
53
+ "rewards/tag_count_reward": 0.57421875,
54
+ "step": 3
55
+ },
56
+ {
57
+ "clip_ratio": 0.0,
58
+ "completion_length": 1155.015625,
59
+ "epoch": 0.05144694533762058,
60
+ "grad_norm": 0.4072016179561615,
61
+ "kl": 0.00027489662170410156,
62
+ "learning_rate": 5e-07,
63
+ "loss": 0.0995,
64
+ "reward": 1.5390625,
65
+ "reward_std": 0.10738485306501389,
66
+ "rewards/accuracy_reward": 1.0,
67
+ "rewards/format_reward": 0.0,
68
+ "rewards/tag_count_reward": 0.5390625,
69
+ "step": 4
70
+ },
71
+ {
72
+ "clip_ratio": 0.0,
73
+ "completion_length": 966.703125,
74
+ "epoch": 0.06430868167202572,
75
+ "grad_norm": 0.5740306377410889,
76
+ "kl": 0.0003867149353027344,
77
+ "learning_rate": 6.249999999999999e-07,
78
+ "loss": 0.1656,
79
+ "reward": 1.57421875,
80
+ "reward_std": 0.11065823957324028,
81
+ "rewards/accuracy_reward": 1.0,
82
+ "rewards/format_reward": 0.0,
83
+ "rewards/tag_count_reward": 0.57421875,
84
+ "step": 5
85
+ },
86
+ {
87
+ "clip_ratio": 0.0,
88
+ "completion_length": 1206.765625,
89
+ "epoch": 0.07717041800643087,
90
+ "grad_norm": 0.4439733028411865,
91
+ "kl": 0.00043845176696777344,
92
+ "learning_rate": 7.5e-07,
93
+ "loss": 0.0946,
94
+ "reward": 1.515625,
95
+ "reward_std": 0.08086910098791122,
96
+ "rewards/accuracy_reward": 1.0,
97
+ "rewards/format_reward": 0.0,
98
+ "rewards/tag_count_reward": 0.515625,
99
+ "step": 6
100
+ },
101
+ {
102
+ "clip_ratio": 0.0,
103
+ "completion_length": 1010.46875,
104
+ "epoch": 0.09003215434083602,
105
+ "grad_norm": 0.2982307970523834,
106
+ "kl": 0.00031566619873046875,
107
+ "learning_rate": 8.75e-07,
108
+ "loss": 0.0943,
109
+ "reward": 1.53125,
110
+ "reward_std": 0.09774631634354591,
111
+ "rewards/accuracy_reward": 1.0,
112
+ "rewards/format_reward": 0.0,
113
+ "rewards/tag_count_reward": 0.53125,
114
+ "step": 7
115
+ },
116
+ {
117
+ "clip_ratio": 0.0,
118
+ "completion_length": 1046.9375,
119
+ "epoch": 0.10289389067524116,
120
+ "grad_norm": 0.4670819938182831,
121
+ "kl": 0.00023698806762695312,
122
+ "learning_rate": 1e-06,
123
+ "loss": 0.1583,
124
+ "reward": 1.5390625,
125
+ "reward_std": 0.13101092725992203,
126
+ "rewards/accuracy_reward": 1.0,
127
+ "rewards/format_reward": 0.0,
128
+ "rewards/tag_count_reward": 0.5390625,
129
+ "step": 8
130
+ },
131
+ {
132
+ "clip_ratio": 0.0,
133
+ "completion_length": 966.28125,
134
+ "epoch": 0.1157556270096463,
135
+ "grad_norm": 0.40219369530677795,
136
+ "kl": 0.0002582073211669922,
137
+ "learning_rate": 9.995336531410273e-07,
138
+ "loss": 0.1739,
139
+ "reward": 1.546875,
140
+ "reward_std": 0.1388406604528427,
141
+ "rewards/accuracy_reward": 1.0,
142
+ "rewards/format_reward": 0.0,
143
+ "rewards/tag_count_reward": 0.546875,
144
+ "step": 9
145
+ },
146
+ {
147
+ "clip_ratio": 0.0,
148
+ "completion_length": 1078.390625,
149
+ "epoch": 0.12861736334405144,
150
+ "grad_norm": 0.291391521692276,
151
+ "kl": 0.00041747093200683594,
152
+ "learning_rate": 9.98135579139189e-07,
153
+ "loss": -0.0025,
154
+ "reward": 1.53125,
155
+ "reward_std": 0.04564354941248894,
156
+ "rewards/accuracy_reward": 1.0,
157
+ "rewards/format_reward": 0.0,
158
+ "rewards/tag_count_reward": 0.53125,
159
+ "step": 10
160
+ },
161
+ {
162
+ "clip_ratio": 0.0,
163
+ "completion_length": 1055.640625,
164
+ "epoch": 0.1414790996784566,
165
+ "grad_norm": 0.34915173053741455,
166
+ "kl": 0.00037789344787597656,
167
+ "learning_rate": 9.958086757163488e-07,
168
+ "loss": 0.0703,
169
+ "reward": 1.56640625,
170
+ "reward_std": 0.10219397768378258,
171
+ "rewards/accuracy_reward": 1.0,
172
+ "rewards/format_reward": 0.0,
173
+ "rewards/tag_count_reward": 0.56640625,
174
+ "step": 11
175
+ },
176
+ {
177
+ "clip_ratio": 0.0,
178
+ "completion_length": 1095.640625,
179
+ "epoch": 0.15434083601286175,
180
+ "grad_norm": 0.550199031829834,
181
+ "kl": 0.0002205371856689453,
182
+ "learning_rate": 9.92557765735184e-07,
183
+ "loss": 0.2093,
184
+ "reward": 1.58203125,
185
+ "reward_std": 0.19091684743762016,
186
+ "rewards/accuracy_reward": 1.0,
187
+ "rewards/format_reward": 0.0,
188
+ "rewards/tag_count_reward": 0.58203125,
189
+ "step": 12
190
+ },
191
+ {
192
+ "clip_ratio": 0.0,
193
+ "completion_length": 1095.921875,
194
+ "epoch": 0.16720257234726688,
195
+ "grad_norm": 0.4369826316833496,
196
+ "kl": 0.00036525726318359375,
197
+ "learning_rate": 9.883895872030657e-07,
198
+ "loss": 0.1704,
199
+ "reward": 1.52734375,
200
+ "reward_std": 0.09039457142353058,
201
+ "rewards/accuracy_reward": 1.0,
202
+ "rewards/format_reward": 0.0,
203
+ "rewards/tag_count_reward": 0.52734375,
204
+ "step": 13
205
+ },
206
+ {
207
+ "clip_ratio": 0.0,
208
+ "completion_length": 837.65625,
209
+ "epoch": 0.18006430868167203,
210
+ "grad_norm": 0.25077515840530396,
211
+ "kl": 0.0001766681671142578,
212
+ "learning_rate": 9.833127793065097e-07,
213
+ "loss": 0.0155,
214
+ "reward": 1.50390625,
215
+ "reward_std": 0.015625,
216
+ "rewards/accuracy_reward": 1.0,
217
+ "rewards/format_reward": 0.0,
218
+ "rewards/tag_count_reward": 0.50390625,
219
+ "step": 14
220
+ },
221
+ {
222
+ "clip_ratio": 0.0,
223
+ "completion_length": 1183.109375,
224
+ "epoch": 0.19292604501607716,
225
+ "grad_norm": 0.42581164836883545,
226
+ "kl": 0.00038433074951171875,
227
+ "learning_rate": 9.773378645051436e-07,
228
+ "loss": 0.1642,
229
+ "reward": 1.5,
230
+ "reward_std": 0.06994619965553284,
231
+ "rewards/accuracy_reward": 1.0,
232
+ "rewards/format_reward": 0.0,
233
+ "rewards/tag_count_reward": 0.5,
234
+ "step": 15
235
+ },
236
+ {
237
+ "clip_ratio": 0.0,
238
+ "completion_length": 1042.71875,
239
+ "epoch": 0.2057877813504823,
240
+ "grad_norm": 0.371883749961853,
241
+ "kl": 0.00041294097900390625,
242
+ "learning_rate": 9.704772267223019e-07,
243
+ "loss": 0.0703,
244
+ "reward": 1.54296875,
245
+ "reward_std": 0.08989579975605011,
246
+ "rewards/accuracy_reward": 1.0,
247
+ "rewards/format_reward": 0.0,
248
+ "rewards/tag_count_reward": 0.54296875,
249
+ "step": 16
250
+ },
251
+ {
252
+ "clip_ratio": 0.0,
253
+ "completion_length": 978.828125,
254
+ "epoch": 0.21864951768488747,
255
+ "grad_norm": 0.4985962212085724,
256
+ "kl": 0.00021839141845703125,
257
+ "learning_rate": 9.627450856774539e-07,
258
+ "loss": 0.079,
259
+ "reward": 1.5703125,
260
+ "reward_std": 0.11574668437242508,
261
+ "rewards/accuracy_reward": 1.0,
262
+ "rewards/format_reward": 0.0,
263
+ "rewards/tag_count_reward": 0.5703125,
264
+ "step": 17
265
+ },
266
+ {
267
+ "clip_ratio": 0.0,
268
+ "completion_length": 850.4375,
269
+ "epoch": 0.2315112540192926,
270
+ "grad_norm": 0.49231991171836853,
271
+ "kl": 0.0002932548522949219,
272
+ "learning_rate": 9.541574674136632e-07,
273
+ "loss": 0.0227,
274
+ "reward": 1.53125,
275
+ "reward_std": 0.0816391110420227,
276
+ "rewards/accuracy_reward": 1.0,
277
+ "rewards/format_reward": 0.0,
278
+ "rewards/tag_count_reward": 0.53125,
279
+ "step": 18
280
+ },
281
+ {
282
+ "clip_ratio": 0.0,
283
+ "completion_length": 1099.109375,
284
+ "epoch": 0.24437299035369775,
285
+ "grad_norm": 0.5843875408172607,
286
+ "kl": 0.0002193450927734375,
287
+ "learning_rate": 9.447321710811674e-07,
288
+ "loss": 0.1417,
289
+ "reward": 1.5390625,
290
+ "reward_std": 0.1500675454735756,
291
+ "rewards/accuracy_reward": 1.0,
292
+ "rewards/format_reward": 0.0,
293
+ "rewards/tag_count_reward": 0.5390625,
294
+ "step": 19
295
+ },
296
+ {
297
+ "clip_ratio": 0.0,
298
+ "completion_length": 1058.03125,
299
+ "epoch": 0.2572347266881029,
300
+ "grad_norm": 0.6623404026031494,
301
+ "kl": 0.0004153251647949219,
302
+ "learning_rate": 9.344887320459198e-07,
303
+ "loss": 0.1164,
304
+ "reward": 1.55078125,
305
+ "reward_std": 0.10405365750193596,
306
+ "rewards/accuracy_reward": 1.0,
307
+ "rewards/format_reward": 0.0,
308
+ "rewards/tag_count_reward": 0.55078125,
309
+ "step": 20
310
+ },
311
+ {
312
+ "clip_ratio": 0.0,
313
+ "completion_length": 820.734375,
314
+ "epoch": 0.27009646302250806,
315
+ "grad_norm": 0.6680777072906494,
316
+ "kl": 0.0003333091735839844,
317
+ "learning_rate": 9.234483813995613e-07,
318
+ "loss": 0.2424,
319
+ "reward": 1.55078125,
320
+ "reward_std": 0.174929304048419,
321
+ "rewards/accuracy_reward": 1.0,
322
+ "rewards/format_reward": 0.0,
323
+ "rewards/tag_count_reward": 0.55078125,
324
+ "step": 21
325
+ },
326
+ {
327
+ "clip_ratio": 0.0,
328
+ "completion_length": 1022.640625,
329
+ "epoch": 0.2829581993569132,
330
+ "grad_norm": 0.6333020329475403,
331
+ "kl": 0.0004470348358154297,
332
+ "learning_rate": 9.116340019547401e-07,
333
+ "loss": 0.0632,
334
+ "reward": 1.51171875,
335
+ "reward_std": 0.08957062661647797,
336
+ "rewards/accuracy_reward": 1.0,
337
+ "rewards/format_reward": 0.0,
338
+ "rewards/tag_count_reward": 0.51171875,
339
+ "step": 22
340
+ },
341
+ {
342
+ "clip_ratio": 0.0,
343
+ "completion_length": 1067.015625,
344
+ "epoch": 0.2958199356913183,
345
+ "grad_norm": 0.5886364579200745,
346
+ "kl": 0.0003254413604736328,
347
+ "learning_rate": 8.990700808169889e-07,
348
+ "loss": 0.1254,
349
+ "reward": 1.5703125,
350
+ "reward_std": 0.20152484998106956,
351
+ "rewards/accuracy_reward": 1.0,
352
+ "rewards/format_reward": 0.0,
353
+ "rewards/tag_count_reward": 0.5703125,
354
+ "step": 23
355
+ },
356
+ {
357
+ "clip_ratio": 0.0,
358
+ "completion_length": 1248.078125,
359
+ "epoch": 0.3086816720257235,
360
+ "grad_norm": 0.6907972693443298,
361
+ "kl": 0.0006000995635986328,
362
+ "learning_rate": 8.857826586314586e-07,
363
+ "loss": 0.0326,
364
+ "reward": 1.54296875,
365
+ "reward_std": 0.13257849216461182,
366
+ "rewards/accuracy_reward": 1.0,
367
+ "rewards/format_reward": 0.0,
368
+ "rewards/tag_count_reward": 0.54296875,
369
+ "step": 24
370
+ },
371
+ {
372
+ "clip_ratio": 0.0,
373
+ "completion_length": 1049.546875,
374
+ "epoch": 0.3215434083601286,
375
+ "grad_norm": 0.45920121669769287,
376
+ "kl": 0.0003612041473388672,
377
+ "learning_rate": 8.717992756097047e-07,
378
+ "loss": 0.0834,
379
+ "reward": 1.5546875,
380
+ "reward_std": 0.1335771605372429,
381
+ "rewards/accuracy_reward": 1.0,
382
+ "rewards/format_reward": 0.0,
383
+ "rewards/tag_count_reward": 0.5546875,
384
+ "step": 25
385
+ },
386
+ {
387
+ "clip_ratio": 0.0,
388
+ "completion_length": 912.46875,
389
+ "epoch": 0.33440514469453375,
390
+ "grad_norm": 0.336818665266037,
391
+ "kl": 0.00023627281188964844,
392
+ "learning_rate": 8.571489144483944e-07,
393
+ "loss": 0.04,
394
+ "reward": 1.51953125,
395
+ "reward_std": 0.06524410098791122,
396
+ "rewards/accuracy_reward": 1.0,
397
+ "rewards/format_reward": 0.0,
398
+ "rewards/tag_count_reward": 0.51953125,
399
+ "step": 26
400
+ },
401
+ {
402
+ "clip_ratio": 0.0,
403
+ "completion_length": 1066.484375,
404
+ "epoch": 0.34726688102893893,
405
+ "grad_norm": 0.39180704951286316,
406
+ "kl": 0.00023651123046875,
407
+ "learning_rate": 8.418619402582402e-07,
408
+ "loss": 0.1659,
409
+ "reward": 1.546875,
410
+ "reward_std": 0.12253788113594055,
411
+ "rewards/accuracy_reward": 1.0,
412
+ "rewards/format_reward": 0.0,
413
+ "rewards/tag_count_reward": 0.546875,
414
+ "step": 27
415
+ },
416
+ {
417
+ "clip_ratio": 0.0,
418
+ "completion_length": 1128.390625,
419
+ "epoch": 0.36012861736334406,
420
+ "grad_norm": 0.7383729815483093,
421
+ "kl": 0.0007467269897460938,
422
+ "learning_rate": 8.259700376276723e-07,
423
+ "loss": 0.15,
424
+ "reward": 1.578125,
425
+ "reward_std": 0.14458869770169258,
426
+ "rewards/accuracy_reward": 1.0,
427
+ "rewards/format_reward": 0.0,
428
+ "rewards/tag_count_reward": 0.578125,
429
+ "step": 28
430
+ },
431
+ {
432
+ "clip_ratio": 0.0,
433
+ "completion_length": 1031.703125,
434
+ "epoch": 0.3729903536977492,
435
+ "grad_norm": 0.730670154094696,
436
+ "kl": 0.0005695819854736328,
437
+ "learning_rate": 8.095061449516902e-07,
438
+ "loss": -0.0424,
439
+ "reward": 1.55859375,
440
+ "reward_std": 0.11572204157710075,
441
+ "rewards/accuracy_reward": 1.0,
442
+ "rewards/format_reward": 0.0,
443
+ "rewards/tag_count_reward": 0.55859375,
444
+ "step": 29
445
+ },
446
+ {
447
+ "clip_ratio": 0.0,
448
+ "completion_length": 1088.359375,
449
+ "epoch": 0.3858520900321543,
450
+ "grad_norm": 0.4401358366012573,
451
+ "kl": 0.0004668235778808594,
452
+ "learning_rate": 7.92504386162009e-07,
453
+ "loss": 0.0101,
454
+ "reward": 1.58984375,
455
+ "reward_std": 0.10618764162063599,
456
+ "rewards/accuracy_reward": 1.0,
457
+ "rewards/format_reward": 0.0,
458
+ "rewards/tag_count_reward": 0.58984375,
459
+ "step": 30
460
+ },
461
+ {
462
+ "clip_ratio": 0.0,
463
+ "completion_length": 970.046875,
464
+ "epoch": 0.3987138263665595,
465
+ "grad_norm": 0.57388836145401,
466
+ "kl": 0.00038433074951171875,
467
+ "learning_rate": 7.75e-07,
468
+ "loss": 0.0789,
469
+ "reward": 1.64453125,
470
+ "reward_std": 0.18705939501523972,
471
+ "rewards/accuracy_reward": 1.0,
472
+ "rewards/format_reward": 0.0,
473
+ "rewards/tag_count_reward": 0.64453125,
474
+ "step": 31
475
+ },
476
+ {
477
+ "clip_ratio": 0.0,
478
+ "completion_length": 852.6875,
479
+ "epoch": 0.4115755627009646,
480
+ "grad_norm": 0.5620797872543335,
481
+ "kl": 0.0002961158752441406,
482
+ "learning_rate": 7.570292669790184e-07,
483
+ "loss": 0.0866,
484
+ "reward": 1.6171875,
485
+ "reward_std": 0.21375522762537003,
486
+ "rewards/accuracy_reward": 1.0,
487
+ "rewards/format_reward": 0.0,
488
+ "rewards/tag_count_reward": 0.6171875,
489
+ "step": 32
490
+ },
491
+ {
492
+ "clip_ratio": 0.0,
493
+ "completion_length": 1189.84375,
494
+ "epoch": 0.42443729903536975,
495
+ "grad_norm": 0.4780595302581787,
496
+ "kl": 0.00036525726318359375,
497
+ "learning_rate": 7.38629434187499e-07,
498
+ "loss": -0.0424,
499
+ "reward": 1.5546875,
500
+ "reward_std": 0.10984911397099495,
501
+ "rewards/accuracy_reward": 1.0,
502
+ "rewards/format_reward": 0.0,
503
+ "rewards/tag_count_reward": 0.5546875,
504
+ "step": 33
505
+ },
506
+ {
507
+ "clip_ratio": 0.0,
508
+ "completion_length": 1082.984375,
509
+ "epoch": 0.43729903536977494,
510
+ "grad_norm": 0.6845836043357849,
511
+ "kl": 0.0010368824005126953,
512
+ "learning_rate": 7.198386380886764e-07,
513
+ "loss": 0.0489,
514
+ "reward": 1.57421875,
515
+ "reward_std": 0.15323764458298683,
516
+ "rewards/accuracy_reward": 1.0,
517
+ "rewards/format_reward": 0.0,
518
+ "rewards/tag_count_reward": 0.57421875,
519
+ "step": 34
520
+ },
521
+ {
522
+ "clip_ratio": 0.0,
523
+ "completion_length": 1034.34375,
524
+ "epoch": 0.45016077170418006,
525
+ "grad_norm": 0.46563801169395447,
526
+ "kl": 0.0003986358642578125,
527
+ "learning_rate": 7.006958254769437e-07,
528
+ "loss": 0.166,
529
+ "reward": 1.57421875,
530
+ "reward_std": 0.13553934916853905,
531
+ "rewards/accuracy_reward": 1.0,
532
+ "rewards/format_reward": 0.0,
533
+ "rewards/tag_count_reward": 0.57421875,
534
+ "step": 35
535
+ },
536
+ {
537
+ "clip_ratio": 0.0,
538
+ "completion_length": 918.84375,
539
+ "epoch": 0.4630225080385852,
540
+ "grad_norm": 0.5318493247032166,
541
+ "kl": 0.0004329681396484375,
542
+ "learning_rate": 6.812406727546712e-07,
543
+ "loss": 0.0854,
544
+ "reward": 1.5625,
545
+ "reward_std": 0.1329439952969551,
546
+ "rewards/accuracy_reward": 1.0,
547
+ "rewards/format_reward": 0.0,
548
+ "rewards/tag_count_reward": 0.5625,
549
+ "step": 36
550
+ },
551
+ {
552
+ "clip_ratio": 0.0,
553
+ "completion_length": 984.84375,
554
+ "epoch": 0.4758842443729904,
555
+ "grad_norm": 0.5245525240898132,
556
+ "kl": 0.00041675567626953125,
557
+ "learning_rate": 6.615135036968049e-07,
558
+ "loss": 0.1983,
559
+ "reward": 1.640625,
560
+ "reward_std": 0.18561063334345818,
561
+ "rewards/accuracy_reward": 1.0,
562
+ "rewards/format_reward": 0.015625,
563
+ "rewards/tag_count_reward": 0.625,
564
+ "step": 37
565
+ },
566
+ {
567
+ "clip_ratio": 0.0,
568
+ "completion_length": 948.4375,
569
+ "epoch": 0.4887459807073955,
570
+ "grad_norm": 0.617463231086731,
571
+ "kl": 0.0012621879577636719,
572
+ "learning_rate": 6.415552058736853e-07,
573
+ "loss": 0.1642,
574
+ "reward": 1.65234375,
575
+ "reward_std": 0.205689437687397,
576
+ "rewards/accuracy_reward": 1.0,
577
+ "rewards/format_reward": 0.0,
578
+ "rewards/tag_count_reward": 0.65234375,
579
+ "step": 38
580
+ },
581
+ {
582
+ "clip_ratio": 0.0,
583
+ "completion_length": 824.5,
584
+ "epoch": 0.5016077170418006,
585
+ "grad_norm": 0.6972614526748657,
586
+ "kl": 0.000629425048828125,
587
+ "learning_rate": 6.21407145905313e-07,
588
+ "loss": 0.1105,
589
+ "reward": 1.6484375,
590
+ "reward_std": 0.2235238291323185,
591
+ "rewards/accuracy_reward": 1.0,
592
+ "rewards/format_reward": 0.0,
593
+ "rewards/tag_count_reward": 0.6484375,
594
+ "step": 39
595
+ },
596
+ {
597
+ "clip_ratio": 0.0,
598
+ "completion_length": 1114.5,
599
+ "epoch": 0.5144694533762058,
600
+ "grad_norm": 0.5985956788063049,
601
+ "kl": 0.0006375312805175781,
602
+ "learning_rate": 6.011110837227137e-07,
603
+ "loss": 0.1203,
604
+ "reward": 1.6015625,
605
+ "reward_std": 0.17044181004166603,
606
+ "rewards/accuracy_reward": 1.0,
607
+ "rewards/format_reward": 0.0,
608
+ "rewards/tag_count_reward": 0.6015625,
609
+ "step": 40
610
+ },
611
+ {
612
+ "clip_ratio": 0.0,
613
+ "completion_length": 884.921875,
614
+ "epoch": 0.5273311897106109,
615
+ "grad_norm": 0.5041890740394592,
616
+ "kl": 0.000698089599609375,
617
+ "learning_rate": 5.80709086014102e-07,
618
+ "loss": 0.1753,
619
+ "reward": 1.609375,
620
+ "reward_std": 0.19224052503705025,
621
+ "rewards/accuracy_reward": 1.0,
622
+ "rewards/format_reward": 0.0,
623
+ "rewards/tag_count_reward": 0.609375,
624
+ "step": 41
625
+ },
626
+ {
627
+ "clip_ratio": 0.0,
628
+ "completion_length": 1000.265625,
629
+ "epoch": 0.5401929260450161,
630
+ "grad_norm": 0.5194684267044067,
631
+ "kl": 0.000476837158203125,
632
+ "learning_rate": 5.602434390352476e-07,
633
+ "loss": 0.1213,
634
+ "reward": 1.6015625,
635
+ "reward_std": 0.19315097481012344,
636
+ "rewards/accuracy_reward": 1.0,
637
+ "rewards/format_reward": 0.0,
638
+ "rewards/tag_count_reward": 0.6015625,
639
+ "step": 42
640
+ },
641
+ {
642
+ "clip_ratio": 0.0,
643
+ "completion_length": 973.03125,
644
+ "epoch": 0.5530546623794212,
645
+ "grad_norm": 0.6358352303504944,
646
+ "kl": 0.0004420280456542969,
647
+ "learning_rate": 5.397565609647524e-07,
648
+ "loss": 0.1533,
649
+ "reward": 1.671875,
650
+ "reward_std": 0.20554664731025696,
651
+ "rewards/accuracy_reward": 1.0,
652
+ "rewards/format_reward": 0.0,
653
+ "rewards/tag_count_reward": 0.671875,
654
+ "step": 43
655
+ },
656
+ {
657
+ "clip_ratio": 0.0,
658
+ "completion_length": 1015.921875,
659
+ "epoch": 0.5659163987138264,
660
+ "grad_norm": 0.5505901575088501,
661
+ "kl": 0.0008034706115722656,
662
+ "learning_rate": 5.192909139858981e-07,
663
+ "loss": 0.1629,
664
+ "reward": 1.59375,
665
+ "reward_std": 0.16546358913183212,
666
+ "rewards/accuracy_reward": 1.0,
667
+ "rewards/format_reward": 0.0,
668
+ "rewards/tag_count_reward": 0.59375,
669
+ "step": 44
670
+ },
671
+ {
672
+ "clip_ratio": 0.0,
673
+ "completion_length": 726.28125,
674
+ "epoch": 0.5787781350482315,
675
+ "grad_norm": 0.6235621571540833,
676
+ "kl": 0.0012774467468261719,
677
+ "learning_rate": 4.988889162772862e-07,
678
+ "loss": 0.2198,
679
+ "reward": 1.640625,
680
+ "reward_std": 0.19715734012424946,
681
+ "rewards/accuracy_reward": 1.0,
682
+ "rewards/format_reward": 0.0,
683
+ "rewards/tag_count_reward": 0.640625,
684
+ "step": 45
685
+ },
686
+ {
687
+ "clip_ratio": 0.0,
688
+ "completion_length": 1159.125,
689
+ "epoch": 0.5916398713826366,
690
+ "grad_norm": 0.5912997722625732,
691
+ "kl": 0.0010008811950683594,
692
+ "learning_rate": 4.785928540946868e-07,
693
+ "loss": 0.179,
694
+ "reward": 1.56640625,
695
+ "reward_std": 0.12528811395168304,
696
+ "rewards/accuracy_reward": 1.0,
697
+ "rewards/format_reward": 0.0,
698
+ "rewards/tag_count_reward": 0.56640625,
699
+ "step": 46
700
+ },
701
+ {
702
+ "clip_ratio": 0.0,
703
+ "completion_length": 1131.15625,
704
+ "epoch": 0.6045016077170418,
705
+ "grad_norm": 0.521943986415863,
706
+ "kl": 0.0008096694946289062,
707
+ "learning_rate": 4.584447941263149e-07,
708
+ "loss": 0.0605,
709
+ "reward": 1.58984375,
710
+ "reward_std": 0.12631270289421082,
711
+ "rewards/accuracy_reward": 1.0,
712
+ "rewards/format_reward": 0.0,
713
+ "rewards/tag_count_reward": 0.58984375,
714
+ "step": 47
715
+ },
716
+ {
717
+ "clip_ratio": 0.0,
718
+ "completion_length": 1118.828125,
719
+ "epoch": 0.617363344051447,
720
+ "grad_norm": 0.4915071725845337,
721
+ "kl": 0.0010766983032226562,
722
+ "learning_rate": 4.384864963031951e-07,
723
+ "loss": 0.2512,
724
+ "reward": 1.6484375,
725
+ "reward_std": 0.2036767303943634,
726
+ "rewards/accuracy_reward": 1.0,
727
+ "rewards/format_reward": 0.0,
728
+ "rewards/tag_count_reward": 0.6484375,
729
+ "step": 48
730
+ },
731
+ {
732
+ "clip_ratio": 0.0,
733
+ "completion_length": 1062.84375,
734
+ "epoch": 0.6302250803858521,
735
+ "grad_norm": 0.529114305973053,
736
+ "kl": 0.0007066726684570312,
737
+ "learning_rate": 4.187593272453288e-07,
738
+ "loss": 0.1496,
739
+ "reward": 1.60546875,
740
+ "reward_std": 0.15380773693323135,
741
+ "rewards/accuracy_reward": 1.0,
742
+ "rewards/format_reward": 0.0,
743
+ "rewards/tag_count_reward": 0.60546875,
744
+ "step": 49
745
+ },
746
+ {
747
+ "clip_ratio": 0.0,
748
+ "completion_length": 694.140625,
749
+ "epoch": 0.6430868167202572,
750
+ "grad_norm": 0.6105093359947205,
751
+ "kl": 0.0016994476318359375,
752
+ "learning_rate": 3.993041745230562e-07,
753
+ "loss": 0.3051,
754
+ "reward": 1.203125,
755
+ "reward_std": 0.2077712006866932,
756
+ "rewards/accuracy_reward": 0.5,
757
+ "rewards/format_reward": 0.0,
758
+ "rewards/tag_count_reward": 0.703125,
759
+ "step": 50
760
+ },
761
+ {
762
+ "clip_ratio": 0.0,
763
+ "completion_length": 982.140625,
764
+ "epoch": 0.6559485530546624,
765
+ "grad_norm": 0.540160596370697,
766
+ "kl": 0.0007996559143066406,
767
+ "learning_rate": 3.8016136191132354e-07,
768
+ "loss": 0.0934,
769
+ "reward": 1.609375,
770
+ "reward_std": 0.1588139720261097,
771
+ "rewards/accuracy_reward": 1.0,
772
+ "rewards/format_reward": 0.0,
773
+ "rewards/tag_count_reward": 0.609375,
774
+ "step": 51
775
+ },
776
+ {
777
+ "clip_ratio": 0.0,
778
+ "completion_length": 937.734375,
779
+ "epoch": 0.6688102893890675,
780
+ "grad_norm": 0.6522411108016968,
781
+ "kl": 0.0015106201171875,
782
+ "learning_rate": 3.6137056581250137e-07,
783
+ "loss": 0.1704,
784
+ "reward": 1.6171875,
785
+ "reward_std": 0.1773756481707096,
786
+ "rewards/accuracy_reward": 1.0,
787
+ "rewards/format_reward": 0.0,
788
+ "rewards/tag_count_reward": 0.6171875,
789
+ "step": 52
790
+ },
791
+ {
792
+ "clip_ratio": 0.0,
793
+ "completion_length": 831.9375,
794
+ "epoch": 0.6816720257234726,
795
+ "grad_norm": 0.6507072448730469,
796
+ "kl": 0.0011072158813476562,
797
+ "learning_rate": 3.4297073302098155e-07,
798
+ "loss": 0.1508,
799
+ "reward": 1.65625,
800
+ "reward_std": 0.1978173814713955,
801
+ "rewards/accuracy_reward": 1.0,
802
+ "rewards/format_reward": 0.0,
803
+ "rewards/tag_count_reward": 0.65625,
804
+ "step": 53
805
+ },
806
+ {
807
+ "clip_ratio": 0.0,
808
+ "completion_length": 1121.0625,
809
+ "epoch": 0.6945337620578779,
810
+ "grad_norm": 0.5701295137405396,
811
+ "kl": 0.000888824462890625,
812
+ "learning_rate": 3.250000000000001e-07,
813
+ "loss": 0.204,
814
+ "reward": 1.65234375,
815
+ "reward_std": 0.20623698085546494,
816
+ "rewards/accuracy_reward": 1.0,
817
+ "rewards/format_reward": 0.0,
818
+ "rewards/tag_count_reward": 0.65234375,
819
+ "step": 54
820
+ },
821
+ {
822
+ "clip_ratio": 0.0,
823
+ "completion_length": 880.375,
824
+ "epoch": 0.707395498392283,
825
+ "grad_norm": 0.7120354175567627,
826
+ "kl": 0.0023937225341796875,
827
+ "learning_rate": 3.0749561383799107e-07,
828
+ "loss": 0.1468,
829
+ "reward": 1.6640625,
830
+ "reward_std": 0.2058405578136444,
831
+ "rewards/accuracy_reward": 1.0,
832
+ "rewards/format_reward": 0.0,
833
+ "rewards/tag_count_reward": 0.6640625,
834
+ "step": 55
835
+ },
836
+ {
837
+ "clip_ratio": 0.0,
838
+ "completion_length": 1107.71875,
839
+ "epoch": 0.7202572347266881,
840
+ "grad_norm": 0.49433115124702454,
841
+ "kl": 0.0011415481567382812,
842
+ "learning_rate": 2.904938550483098e-07,
843
+ "loss": 0.2208,
844
+ "reward": 1.62890625,
845
+ "reward_std": 0.18008438870310783,
846
+ "rewards/accuracy_reward": 1.0,
847
+ "rewards/format_reward": 0.0,
848
+ "rewards/tag_count_reward": 0.62890625,
849
+ "step": 56
850
+ },
851
+ {
852
+ "clip_ratio": 0.0,
853
+ "completion_length": 1063.40625,
854
+ "epoch": 0.7331189710610932,
855
+ "grad_norm": 0.5648707151412964,
856
+ "kl": 0.0015516281127929688,
857
+ "learning_rate": 2.7402996237232757e-07,
858
+ "loss": 0.1323,
859
+ "reward": 1.58203125,
860
+ "reward_std": 0.1835322491824627,
861
+ "rewards/accuracy_reward": 1.0,
862
+ "rewards/format_reward": 0.0,
863
+ "rewards/tag_count_reward": 0.58203125,
864
+ "step": 57
865
+ },
866
+ {
867
+ "clip_ratio": 0.0,
868
+ "completion_length": 842.5625,
869
+ "epoch": 0.7459807073954984,
870
+ "grad_norm": 0.6252850890159607,
871
+ "kl": 0.0014705657958984375,
872
+ "learning_rate": 2.5813805974175984e-07,
873
+ "loss": 0.2248,
874
+ "reward": 1.625,
875
+ "reward_std": 0.18506385013461113,
876
+ "rewards/accuracy_reward": 1.0,
877
+ "rewards/format_reward": 0.0,
878
+ "rewards/tag_count_reward": 0.625,
879
+ "step": 58
880
+ },
881
+ {
882
+ "clip_ratio": 0.0,
883
+ "completion_length": 879.40625,
884
+ "epoch": 0.7588424437299035,
885
+ "grad_norm": 0.628139317035675,
886
+ "kl": 0.0014867782592773438,
887
+ "learning_rate": 2.4285108555160575e-07,
888
+ "loss": 0.0506,
889
+ "reward": 1.60546875,
890
+ "reward_std": 0.18587078154087067,
891
+ "rewards/accuracy_reward": 1.0,
892
+ "rewards/format_reward": 0.0,
893
+ "rewards/tag_count_reward": 0.60546875,
894
+ "step": 59
895
+ },
896
+ {
897
+ "clip_ratio": 0.0,
898
+ "completion_length": 1132.859375,
899
+ "epoch": 0.7717041800643086,
900
+ "grad_norm": 0.4942401647567749,
901
+ "kl": 0.00098419189453125,
902
+ "learning_rate": 2.2820072439029523e-07,
903
+ "loss": 0.1978,
904
+ "reward": 1.671875,
905
+ "reward_std": 0.2286946102976799,
906
+ "rewards/accuracy_reward": 1.0,
907
+ "rewards/format_reward": 0.0,
908
+ "rewards/tag_count_reward": 0.671875,
909
+ "step": 60
910
+ },
911
+ {
912
+ "clip_ratio": 0.0,
913
+ "completion_length": 1108.09375,
914
+ "epoch": 0.7845659163987139,
915
+ "grad_norm": 0.6833640336990356,
916
+ "kl": 0.0009565353393554688,
917
+ "learning_rate": 2.1421734136854153e-07,
918
+ "loss": 0.1842,
919
+ "reward": 1.625,
920
+ "reward_std": 0.20673153176903725,
921
+ "rewards/accuracy_reward": 1.0,
922
+ "rewards/format_reward": 0.0,
923
+ "rewards/tag_count_reward": 0.625,
924
+ "step": 61
925
+ },
926
+ {
927
+ "clip_ratio": 0.0,
928
+ "completion_length": 1164.8125,
929
+ "epoch": 0.797427652733119,
930
+ "grad_norm": 0.550803005695343,
931
+ "kl": 0.0009765625,
932
+ "learning_rate": 2.0092991918301106e-07,
933
+ "loss": 0.0963,
934
+ "reward": 1.578125,
935
+ "reward_std": 0.18822026252746582,
936
+ "rewards/accuracy_reward": 1.0,
937
+ "rewards/format_reward": 0.0,
938
+ "rewards/tag_count_reward": 0.578125,
939
+ "step": 62
940
+ },
941
+ {
942
+ "clip_ratio": 0.0,
943
+ "completion_length": 1222.84375,
944
+ "epoch": 0.8102893890675241,
945
+ "grad_norm": 0.47955459356307983,
946
+ "kl": 0.0008296966552734375,
947
+ "learning_rate": 1.883659980452598e-07,
948
+ "loss": 0.092,
949
+ "reward": 1.640625,
950
+ "reward_std": 0.1938280612230301,
951
+ "rewards/accuracy_reward": 1.0,
952
+ "rewards/format_reward": 0.0,
953
+ "rewards/tag_count_reward": 0.640625,
954
+ "step": 63
955
+ },
956
+ {
957
+ "clip_ratio": 0.0,
958
+ "completion_length": 1009.125,
959
+ "epoch": 0.8231511254019293,
960
+ "grad_norm": 0.6472100615501404,
961
+ "kl": 0.0012683868408203125,
962
+ "learning_rate": 1.765516186004387e-07,
963
+ "loss": 0.1924,
964
+ "reward": 1.671875,
965
+ "reward_std": 0.20133696496486664,
966
+ "rewards/accuracy_reward": 1.0,
967
+ "rewards/format_reward": 0.0,
968
+ "rewards/tag_count_reward": 0.671875,
969
+ "step": 64
970
+ },
971
+ {
972
+ "clip_ratio": 0.0,
973
+ "completion_length": 997.796875,
974
+ "epoch": 0.8360128617363344,
975
+ "grad_norm": 0.6434372067451477,
976
+ "kl": 0.00380706787109375,
977
+ "learning_rate": 1.6551126795408015e-07,
978
+ "loss": 0.2009,
979
+ "reward": 1.66796875,
980
+ "reward_std": 0.15900417417287827,
981
+ "rewards/accuracy_reward": 1.0,
982
+ "rewards/format_reward": 0.0,
983
+ "rewards/tag_count_reward": 0.66796875,
984
+ "step": 65
985
+ },
986
+ {
987
+ "clip_ratio": 0.0,
988
+ "completion_length": 942.125,
989
+ "epoch": 0.8488745980707395,
990
+ "grad_norm": 0.5874162912368774,
991
+ "kl": 0.0014162063598632812,
992
+ "learning_rate": 1.552678289188326e-07,
993
+ "loss": 0.2217,
994
+ "reward": 1.6484375,
995
+ "reward_std": 0.18080222606658936,
996
+ "rewards/accuracy_reward": 1.0,
997
+ "rewards/format_reward": 0.0,
998
+ "rewards/tag_count_reward": 0.6484375,
999
+ "step": 66
1000
+ },
1001
+ {
1002
+ "clip_ratio": 0.0,
1003
+ "completion_length": 847.015625,
1004
+ "epoch": 0.8617363344051447,
1005
+ "grad_norm": 0.6690056920051575,
1006
+ "kl": 0.0014781951904296875,
1007
+ "learning_rate": 1.4584253258633681e-07,
1008
+ "loss": 0.1697,
1009
+ "reward": 1.6484375,
1010
+ "reward_std": 0.20908700302243233,
1011
+ "rewards/accuracy_reward": 1.0,
1012
+ "rewards/format_reward": 0.0,
1013
+ "rewards/tag_count_reward": 0.6484375,
1014
+ "step": 67
1015
+ },
1016
+ {
1017
+ "clip_ratio": 0.0,
1018
+ "completion_length": 967.515625,
1019
+ "epoch": 0.8745980707395499,
1020
+ "grad_norm": 0.6969915628433228,
1021
+ "kl": 0.0023241043090820312,
1022
+ "learning_rate": 1.3725491432254623e-07,
1023
+ "loss": 0.0535,
1024
+ "reward": 1.64453125,
1025
+ "reward_std": 0.12684179469943047,
1026
+ "rewards/accuracy_reward": 1.0,
1027
+ "rewards/format_reward": 0.0,
1028
+ "rewards/tag_count_reward": 0.64453125,
1029
+ "step": 68
1030
+ },
1031
+ {
1032
+ "clip_ratio": 0.0,
1033
+ "completion_length": 1140.578125,
1034
+ "epoch": 0.887459807073955,
1035
+ "grad_norm": 0.5747631192207336,
1036
+ "kl": 0.0011663436889648438,
1037
+ "learning_rate": 1.2952277327769804e-07,
1038
+ "loss": 0.1446,
1039
+ "reward": 1.6171875,
1040
+ "reward_std": 0.1967878546565771,
1041
+ "rewards/accuracy_reward": 1.0,
1042
+ "rewards/format_reward": 0.0,
1043
+ "rewards/tag_count_reward": 0.6171875,
1044
+ "step": 69
1045
+ },
1046
+ {
1047
+ "clip_ratio": 0.0,
1048
+ "completion_length": 1219.296875,
1049
+ "epoch": 0.9003215434083601,
1050
+ "grad_norm": 0.5590356588363647,
1051
+ "kl": 0.0020923614501953125,
1052
+ "learning_rate": 1.2266213549485637e-07,
1053
+ "loss": 0.1291,
1054
+ "reward": 1.58984375,
1055
+ "reward_std": 0.16909249685704708,
1056
+ "rewards/accuracy_reward": 1.0,
1057
+ "rewards/format_reward": 0.0,
1058
+ "rewards/tag_count_reward": 0.58984375,
1059
+ "step": 70
1060
+ },
1061
+ {
1062
+ "clip_ratio": 0.0,
1063
+ "completion_length": 871.53125,
1064
+ "epoch": 0.9131832797427653,
1065
+ "grad_norm": 0.6128804087638855,
1066
+ "kl": 0.002285003662109375,
1067
+ "learning_rate": 1.166872206934904e-07,
1068
+ "loss": 0.2013,
1069
+ "reward": 1.66015625,
1070
+ "reward_std": 0.18546020612120628,
1071
+ "rewards/accuracy_reward": 1.0,
1072
+ "rewards/format_reward": 0.0,
1073
+ "rewards/tag_count_reward": 0.66015625,
1074
+ "step": 71
1075
+ },
1076
+ {
1077
+ "clip_ratio": 0.0,
1078
+ "completion_length": 804.84375,
1079
+ "epoch": 0.9260450160771704,
1080
+ "grad_norm": 0.6195582151412964,
1081
+ "kl": 0.00205230712890625,
1082
+ "learning_rate": 1.1161041279693445e-07,
1083
+ "loss": 0.3434,
1084
+ "reward": 1.71875,
1085
+ "reward_std": 0.2264367826282978,
1086
+ "rewards/accuracy_reward": 1.0,
1087
+ "rewards/format_reward": 0.0,
1088
+ "rewards/tag_count_reward": 0.71875,
1089
+ "step": 72
1090
+ },
1091
+ {
1092
+ "clip_ratio": 0.0,
1093
+ "completion_length": 1082.171875,
1094
+ "epoch": 0.9389067524115756,
1095
+ "grad_norm": 0.6192907691001892,
1096
+ "kl": 0.0029478073120117188,
1097
+ "learning_rate": 1.074422342648161e-07,
1098
+ "loss": 0.2337,
1099
+ "reward": 1.625,
1100
+ "reward_std": 0.2141621820628643,
1101
+ "rewards/accuracy_reward": 1.0,
1102
+ "rewards/format_reward": 0.0,
1103
+ "rewards/tag_count_reward": 0.625,
1104
+ "step": 73
1105
+ },
1106
+ {
1107
+ "clip_ratio": 0.0,
1108
+ "completion_length": 614.75,
1109
+ "epoch": 0.9517684887459807,
1110
+ "grad_norm": 0.75156170129776,
1111
+ "kl": 0.0022068023681640625,
1112
+ "learning_rate": 1.0419132428365116e-07,
1113
+ "loss": 0.1886,
1114
+ "reward": 1.796875,
1115
+ "reward_std": 0.18980605155229568,
1116
+ "rewards/accuracy_reward": 1.0,
1117
+ "rewards/format_reward": 0.0,
1118
+ "rewards/tag_count_reward": 0.796875,
1119
+ "step": 74
1120
+ },
1121
+ {
1122
+ "clip_ratio": 0.0,
1123
+ "completion_length": 1100.421875,
1124
+ "epoch": 0.9646302250803859,
1125
+ "grad_norm": 0.4935654401779175,
1126
+ "kl": 0.0010290145874023438,
1127
+ "learning_rate": 1.0186442086081092e-07,
1128
+ "loss": 0.0693,
1129
+ "reward": 1.59375,
1130
+ "reward_std": 0.19873745739459991,
1131
+ "rewards/accuracy_reward": 1.0,
1132
+ "rewards/format_reward": 0.0,
1133
+ "rewards/tag_count_reward": 0.59375,
1134
+ "step": 75
1135
+ },
1136
+ {
1137
+ "clip_ratio": 0.0,
1138
+ "completion_length": 1082.59375,
1139
+ "epoch": 0.977491961414791,
1140
+ "grad_norm": 0.4804929196834564,
1141
+ "kl": 0.0011501312255859375,
1142
+ "learning_rate": 1.004663468589726e-07,
1143
+ "loss": 0.1592,
1144
+ "reward": 1.64453125,
1145
+ "reward_std": 0.19965127855539322,
1146
+ "rewards/accuracy_reward": 1.0,
1147
+ "rewards/format_reward": 0.0,
1148
+ "rewards/tag_count_reward": 0.64453125,
1149
+ "step": 76
1150
+ },
1151
+ {
1152
+ "clip_ratio": 0.0,
1153
+ "completion_length": 972.4643249511719,
1154
+ "epoch": 0.9903536977491961,
1155
+ "grad_norm": 0.5502123236656189,
1156
+ "kl": 0.0015010833740234375,
1157
+ "learning_rate": 1e-07,
1158
+ "loss": 0.0737,
1159
+ "reward": 1.6640625,
1160
+ "reward_std": 0.212241068482399,
1161
+ "rewards/accuracy_reward": 1.0,
1162
+ "rewards/format_reward": 0.0,
1163
+ "rewards/tag_count_reward": 0.6640625,
1164
+ "step": 77
1165
+ },
1166
+ {
1167
+ "epoch": 0.9903536977491961,
1168
+ "step": 77,
1169
+ "total_flos": 0.0,
1170
+ "train_loss": 0.13210956121238504,
1171
+ "train_runtime": 6757.7515,
1172
+ "train_samples_per_second": 0.046,
1173
+ "train_steps_per_second": 0.011
1174
+ }
1175
+ ],
1176
+ "logging_steps": 1,
1177
+ "max_steps": 77,
1178
+ "num_input_tokens_seen": 0,
1179
+ "num_train_epochs": 1,
1180
+ "save_steps": 500,
1181
+ "stateful_callbacks": {
1182
+ "TrainerControl": {
1183
+ "args": {
1184
+ "should_epoch_stop": false,
1185
+ "should_evaluate": false,
1186
+ "should_log": false,
1187
+ "should_save": true,
1188
+ "should_training_stop": true
1189
+ },
1190
+ "attributes": {}
1191
+ }
1192
+ },
1193
+ "total_flos": 0.0,
1194
+ "train_batch_size": 16,
1195
+ "trial_name": null,
1196
+ "trial_params": null
1197
+ }