grpo_output_checkpoint_157 / trainer_state.json
cybershiptrooper's picture
Upload folder using huggingface_hub
c7be8ff verified
{
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 1.0,
"eval_steps": 500,
"global_step": 157,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"completion_length": 161.1875,
"epoch": 0.006389776357827476,
"grad_norm": 3.421875,
"kl": 0.0,
"learning_rate": 9.97863247863248e-07,
"loss": -0.0,
"reward": 0.8671875,
"reward_std": 0.061278700828552246,
"rewards/probe_reward_fn": 0.8671875,
"step": 1
},
{
"completion_length": 219.734375,
"epoch": 0.012779552715654952,
"grad_norm": 5.0625,
"kl": 0.001749292016029358,
"learning_rate": 9.957264957264956e-07,
"loss": 0.0001,
"reward": 0.5859375,
"reward_std": 0.20801588520407677,
"rewards/probe_reward_fn": 0.5859375,
"step": 2
},
{
"completion_length": 219.109375,
"epoch": 0.019169329073482427,
"grad_norm": 4.8125,
"kl": 0.003471970558166504,
"learning_rate": 9.935897435897436e-07,
"loss": 0.0001,
"reward": 0.625,
"reward_std": 0.11659536883234978,
"rewards/probe_reward_fn": 0.625,
"step": 3
},
{
"completion_length": 221.96875,
"epoch": 0.025559105431309903,
"grad_norm": 3.1875,
"kl": 0.001511693000793457,
"learning_rate": 9.914529914529915e-07,
"loss": 0.0001,
"reward": 0.78125,
"reward_std": 0.05444390885531902,
"rewards/probe_reward_fn": 0.78125,
"step": 4
},
{
"completion_length": 189.109375,
"epoch": 0.03194888178913738,
"grad_norm": 1.640625,
"kl": 0.003214411437511444,
"learning_rate": 9.893162393162393e-07,
"loss": 0.0001,
"reward": 0.7578125,
"reward_std": 0.06193945184350014,
"rewards/probe_reward_fn": 0.7578125,
"step": 5
},
{
"completion_length": 169.359375,
"epoch": 0.038338658146964855,
"grad_norm": 3.0625,
"kl": 0.004067838191986084,
"learning_rate": 9.871794871794872e-07,
"loss": 0.0002,
"reward": 0.65625,
"reward_std": 0.11964675411581993,
"rewards/probe_reward_fn": 0.65625,
"step": 6
},
{
"completion_length": 154.859375,
"epoch": 0.04472843450479233,
"grad_norm": 3.421875,
"kl": 0.0032278895378112793,
"learning_rate": 9.850427350427351e-07,
"loss": 0.0001,
"reward": 0.8203125,
"reward_std": 0.08424854651093483,
"rewards/probe_reward_fn": 0.8203125,
"step": 7
},
{
"completion_length": 167.796875,
"epoch": 0.051118210862619806,
"grad_norm": 1.8515625,
"kl": 0.003155946731567383,
"learning_rate": 9.829059829059829e-07,
"loss": 0.0001,
"reward": 0.8671875,
"reward_std": 0.04005437344312668,
"rewards/probe_reward_fn": 0.8671875,
"step": 8
},
{
"completion_length": 213.21875,
"epoch": 0.05750798722044728,
"grad_norm": 4.1875,
"kl": 0.004224300384521484,
"learning_rate": 9.807692307692306e-07,
"loss": 0.0002,
"reward": 0.7734375,
"reward_std": 0.13794121891260147,
"rewards/probe_reward_fn": 0.7734375,
"step": 9
},
{
"completion_length": 115.25,
"epoch": 0.06389776357827476,
"grad_norm": 3.375,
"kl": 0.0027216970920562744,
"learning_rate": 9.786324786324786e-07,
"loss": 0.0001,
"reward": 0.875,
"reward_std": 0.07884830981492996,
"rewards/probe_reward_fn": 0.875,
"step": 10
},
{
"completion_length": 115.5625,
"epoch": 0.07028753993610223,
"grad_norm": 3.5625,
"kl": 0.003567412495613098,
"learning_rate": 9.764957264957265e-07,
"loss": 0.0001,
"reward": 0.8515625,
"reward_std": 0.13964887335896492,
"rewards/probe_reward_fn": 0.8515625,
"step": 11
},
{
"completion_length": 149.234375,
"epoch": 0.07667731629392971,
"grad_norm": 4.09375,
"kl": 0.005543112754821777,
"learning_rate": 9.743589743589742e-07,
"loss": 0.0002,
"reward": 0.65625,
"reward_std": 0.13098980858922005,
"rewards/probe_reward_fn": 0.65625,
"step": 12
},
{
"completion_length": 208.171875,
"epoch": 0.08306709265175719,
"grad_norm": 4.03125,
"kl": 0.009611248970031738,
"learning_rate": 9.722222222222222e-07,
"loss": 0.0004,
"reward": 0.625,
"reward_std": 0.08337578736245632,
"rewards/probe_reward_fn": 0.625,
"step": 13
},
{
"completion_length": 185.8125,
"epoch": 0.08945686900958466,
"grad_norm": 3.0625,
"kl": 0.0038070380687713623,
"learning_rate": 9.700854700854701e-07,
"loss": 0.0002,
"reward": 0.9765625,
"reward_std": 0.051028965041041374,
"rewards/probe_reward_fn": 0.9765625,
"step": 14
},
{
"completion_length": 208.53125,
"epoch": 0.09584664536741214,
"grad_norm": 4.1875,
"kl": 0.007197260856628418,
"learning_rate": 9.679487179487179e-07,
"loss": 0.0003,
"reward": 0.7578125,
"reward_std": 0.07990914210677147,
"rewards/probe_reward_fn": 0.7578125,
"step": 15
},
{
"completion_length": 160.8125,
"epoch": 0.10223642172523961,
"grad_norm": 4.9375,
"kl": 0.0051407963037490845,
"learning_rate": 9.658119658119658e-07,
"loss": 0.0002,
"reward": 0.6640625,
"reward_std": 0.051028965041041374,
"rewards/probe_reward_fn": 0.6640625,
"step": 16
},
{
"completion_length": 165.078125,
"epoch": 0.10862619808306709,
"grad_norm": 2.796875,
"kl": 0.0035562217235565186,
"learning_rate": 9.636752136752138e-07,
"loss": 0.0001,
"reward": 0.8359375,
"reward_std": 0.08774714916944504,
"rewards/probe_reward_fn": 0.8359375,
"step": 17
},
{
"completion_length": 154.953125,
"epoch": 0.11501597444089456,
"grad_norm": 5.28125,
"kl": 0.0068502649664878845,
"learning_rate": 9.615384615384615e-07,
"loss": 0.0003,
"reward": 0.7265625,
"reward_std": 0.20028075203299522,
"rewards/probe_reward_fn": 0.7265625,
"step": 18
},
{
"completion_length": 175.65625,
"epoch": 0.12140575079872204,
"grad_norm": 3.5,
"kl": 0.00363941490650177,
"learning_rate": 9.594017094017094e-07,
"loss": 0.0001,
"reward": 0.828125,
"reward_std": 0.11230766586959362,
"rewards/probe_reward_fn": 0.828125,
"step": 19
},
{
"completion_length": 131.90625,
"epoch": 0.12779552715654952,
"grad_norm": 2.6875,
"kl": 0.00410085916519165,
"learning_rate": 9.572649572649572e-07,
"loss": 0.0002,
"reward": 0.8984375,
"reward_std": 0.06934264488518238,
"rewards/probe_reward_fn": 0.8984375,
"step": 20
},
{
"completion_length": 138.3125,
"epoch": 0.134185303514377,
"grad_norm": 1.9453125,
"kl": 0.002880275249481201,
"learning_rate": 9.551282051282051e-07,
"loss": 0.0001,
"reward": 0.8828125,
"reward_std": 0.022097086533904076,
"rewards/probe_reward_fn": 0.8828125,
"step": 21
},
{
"completion_length": 137.5,
"epoch": 0.14057507987220447,
"grad_norm": 4.0625,
"kl": 0.0054109953343868256,
"learning_rate": 9.529914529914528e-07,
"loss": 0.0002,
"reward": 0.828125,
"reward_std": 0.14738082326948643,
"rewards/probe_reward_fn": 0.828125,
"step": 22
},
{
"completion_length": 157.109375,
"epoch": 0.14696485623003194,
"grad_norm": 4.0,
"kl": 0.006090134382247925,
"learning_rate": 9.508547008547008e-07,
"loss": 0.0002,
"reward": 0.7890625,
"reward_std": 0.14359741657972336,
"rewards/probe_reward_fn": 0.7890625,
"step": 23
},
{
"completion_length": 159.046875,
"epoch": 0.15335463258785942,
"grad_norm": 4.21875,
"kl": 0.006067715585231781,
"learning_rate": 9.487179487179486e-07,
"loss": 0.0002,
"reward": 0.8359375,
"reward_std": 0.13485966622829437,
"rewards/probe_reward_fn": 0.8359375,
"step": 24
},
{
"completion_length": 155.390625,
"epoch": 0.1597444089456869,
"grad_norm": 2.875,
"kl": 0.002869546413421631,
"learning_rate": 9.465811965811965e-07,
"loss": 0.0001,
"reward": 0.859375,
"reward_std": 0.06859857402741909,
"rewards/probe_reward_fn": 0.859375,
"step": 25
},
{
"completion_length": 173.625,
"epoch": 0.16613418530351437,
"grad_norm": 4.09375,
"kl": 0.006188303232192993,
"learning_rate": 9.444444444444444e-07,
"loss": 0.0002,
"reward": 0.6953125,
"reward_std": 0.14246869646012783,
"rewards/probe_reward_fn": 0.6953125,
"step": 26
},
{
"completion_length": 185.5,
"epoch": 0.17252396166134185,
"grad_norm": 3.546875,
"kl": 0.004475951194763184,
"learning_rate": 9.423076923076923e-07,
"loss": 0.0002,
"reward": 0.8515625,
"reward_std": 0.0944982822984457,
"rewards/probe_reward_fn": 0.8515625,
"step": 27
},
{
"completion_length": 201.09375,
"epoch": 0.17891373801916932,
"grad_norm": 3.625,
"kl": 0.007355093955993652,
"learning_rate": 9.401709401709401e-07,
"loss": 0.0003,
"reward": 0.7421875,
"reward_std": 0.06629125960171223,
"rewards/probe_reward_fn": 0.7421875,
"step": 28
},
{
"completion_length": 124.5625,
"epoch": 0.1853035143769968,
"grad_norm": 4.84375,
"kl": 0.006694614887237549,
"learning_rate": 9.38034188034188e-07,
"loss": 0.0003,
"reward": 0.875,
"reward_std": 0.11548773944377899,
"rewards/probe_reward_fn": 0.875,
"step": 29
},
{
"completion_length": 153.90625,
"epoch": 0.19169329073482427,
"grad_norm": 6.375,
"kl": 0.009141117334365845,
"learning_rate": 9.358974358974359e-07,
"loss": 0.0004,
"reward": 0.75,
"reward_std": 0.18885356560349464,
"rewards/probe_reward_fn": 0.75,
"step": 30
},
{
"completion_length": 172.1875,
"epoch": 0.19808306709265175,
"grad_norm": 3.71875,
"kl": 0.0073334574699401855,
"learning_rate": 9.337606837606837e-07,
"loss": 0.0003,
"reward": 0.78125,
"reward_std": 0.09863808192312717,
"rewards/probe_reward_fn": 0.78125,
"step": 31
},
{
"completion_length": 199.734375,
"epoch": 0.20447284345047922,
"grad_norm": 3.984375,
"kl": 0.006796807050704956,
"learning_rate": 9.316239316239316e-07,
"loss": 0.0003,
"reward": 0.6015625,
"reward_std": 0.10865294747054577,
"rewards/probe_reward_fn": 0.6015625,
"step": 32
},
{
"completion_length": 209.609375,
"epoch": 0.2108626198083067,
"grad_norm": 4.1875,
"kl": 0.007922083139419556,
"learning_rate": 9.294871794871795e-07,
"loss": 0.0003,
"reward": 0.6875,
"reward_std": 0.12037160992622375,
"rewards/probe_reward_fn": 0.6875,
"step": 33
},
{
"completion_length": 220.046875,
"epoch": 0.21725239616613418,
"grad_norm": 2.625,
"kl": 0.008481711149215698,
"learning_rate": 9.273504273504273e-07,
"loss": 0.0003,
"reward": 0.4296875,
"reward_std": 0.08618971705436707,
"rewards/probe_reward_fn": 0.4296875,
"step": 34
},
{
"completion_length": 119.6875,
"epoch": 0.22364217252396165,
"grad_norm": 4.25,
"kl": 0.008033812046051025,
"learning_rate": 9.252136752136752e-07,
"loss": 0.0003,
"reward": 0.7890625,
"reward_std": 0.2384064383804798,
"rewards/probe_reward_fn": 0.7890625,
"step": 35
},
{
"completion_length": 203.25,
"epoch": 0.23003194888178913,
"grad_norm": 2.625,
"kl": 0.006860613822937012,
"learning_rate": 9.230769230769231e-07,
"loss": 0.0003,
"reward": 0.8828125,
"reward_std": 0.07959238067269325,
"rewards/probe_reward_fn": 0.8828125,
"step": 36
},
{
"completion_length": 158.484375,
"epoch": 0.2364217252396166,
"grad_norm": 3.859375,
"kl": 9.036064147949219e-05,
"learning_rate": 9.209401709401709e-07,
"loss": 0.0,
"reward": 0.9921875,
"reward_std": 0.022097086533904076,
"rewards/probe_reward_fn": 0.9921875,
"step": 37
},
{
"completion_length": 198.234375,
"epoch": 0.24281150159744408,
"grad_norm": 2.9375,
"kl": 0.0018144845962524414,
"learning_rate": 9.188034188034187e-07,
"loss": 0.0001,
"reward": 0.890625,
"reward_std": 0.04419417306780815,
"rewards/probe_reward_fn": 0.890625,
"step": 38
},
{
"completion_length": 202.8125,
"epoch": 0.24920127795527156,
"grad_norm": 4.78125,
"kl": 0.008247137069702148,
"learning_rate": 9.166666666666665e-07,
"loss": 0.0003,
"reward": 0.6796875,
"reward_std": 0.10889272205531597,
"rewards/probe_reward_fn": 0.6796875,
"step": 39
},
{
"completion_length": 203.265625,
"epoch": 0.25559105431309903,
"grad_norm": 5.75,
"kl": 0.01144362986087799,
"learning_rate": 9.145299145299145e-07,
"loss": 0.0005,
"reward": 0.8203125,
"reward_std": 0.2073613405227661,
"rewards/probe_reward_fn": 0.8203125,
"step": 40
},
{
"completion_length": 179.78125,
"epoch": 0.26198083067092653,
"grad_norm": 4.625,
"kl": 0.008382320404052734,
"learning_rate": 9.123931623931623e-07,
"loss": 0.0003,
"reward": 0.7578125,
"reward_std": 0.12019838578999043,
"rewards/probe_reward_fn": 0.7578125,
"step": 41
},
{
"completion_length": 195.546875,
"epoch": 0.268370607028754,
"grad_norm": 4.9375,
"kl": 0.009996116161346436,
"learning_rate": 9.102564102564102e-07,
"loss": 0.0004,
"reward": 0.7109375,
"reward_std": 0.22071710042655468,
"rewards/probe_reward_fn": 0.7109375,
"step": 42
},
{
"completion_length": 192.9375,
"epoch": 0.2747603833865815,
"grad_norm": 2.796875,
"kl": 0.003770500421524048,
"learning_rate": 9.081196581196581e-07,
"loss": 0.0002,
"reward": 0.8671875,
"reward_std": 0.10918501019477844,
"rewards/probe_reward_fn": 0.8671875,
"step": 43
},
{
"completion_length": 185.046875,
"epoch": 0.28115015974440893,
"grad_norm": 4.0625,
"kl": 0.0063970088958740234,
"learning_rate": 9.059829059829059e-07,
"loss": 0.0003,
"reward": 0.78125,
"reward_std": 0.12304248288273811,
"rewards/probe_reward_fn": 0.78125,
"step": 44
},
{
"completion_length": 208.75,
"epoch": 0.28753993610223644,
"grad_norm": 6.25,
"kl": 0.01544952392578125,
"learning_rate": 9.038461538461538e-07,
"loss": 0.0006,
"reward": 0.578125,
"reward_std": 0.21412582136690617,
"rewards/probe_reward_fn": 0.578125,
"step": 45
},
{
"completion_length": 122.625,
"epoch": 0.2939297124600639,
"grad_norm": 4.125,
"kl": 0.008356839418411255,
"learning_rate": 9.017094017094017e-07,
"loss": 0.0003,
"reward": 0.734375,
"reward_std": 0.12484738603234291,
"rewards/probe_reward_fn": 0.734375,
"step": 46
},
{
"completion_length": 171.875,
"epoch": 0.3003194888178914,
"grad_norm": 2.328125,
"kl": 0.00939759612083435,
"learning_rate": 8.995726495726496e-07,
"loss": 0.0004,
"reward": 0.9765625,
"reward_std": 0.046501487493515015,
"rewards/probe_reward_fn": 0.9765625,
"step": 47
},
{
"completion_length": 201.5,
"epoch": 0.30670926517571884,
"grad_norm": 5.46875,
"kl": 0.009128153324127197,
"learning_rate": 8.974358974358974e-07,
"loss": 0.0004,
"reward": 0.7109375,
"reward_std": 0.16392458602786064,
"rewards/probe_reward_fn": 0.7109375,
"step": 48
},
{
"completion_length": 186.421875,
"epoch": 0.31309904153354634,
"grad_norm": 3.984375,
"kl": 0.008066892623901367,
"learning_rate": 8.952991452991452e-07,
"loss": 0.0003,
"reward": 0.828125,
"reward_std": 0.1892017461359501,
"rewards/probe_reward_fn": 0.828125,
"step": 49
},
{
"completion_length": 206.140625,
"epoch": 0.3194888178913738,
"grad_norm": 5.3125,
"kl": 0.012567520141601562,
"learning_rate": 8.931623931623932e-07,
"loss": 0.0005,
"reward": 0.5625,
"reward_std": 0.16097761504352093,
"rewards/probe_reward_fn": 0.5625,
"step": 50
},
{
"completion_length": 211.78125,
"epoch": 0.3258785942492013,
"grad_norm": 3.53125,
"kl": 0.009197399020195007,
"learning_rate": 8.91025641025641e-07,
"loss": 0.0004,
"reward": 0.7421875,
"reward_std": 0.0946863554418087,
"rewards/probe_reward_fn": 0.7421875,
"step": 51
},
{
"completion_length": 162.046875,
"epoch": 0.33226837060702874,
"grad_norm": 3.5625,
"kl": 0.005607321858406067,
"learning_rate": 8.888888888888888e-07,
"loss": 0.0002,
"reward": 0.8125,
"reward_std": 0.09300297498703003,
"rewards/probe_reward_fn": 0.8125,
"step": 52
},
{
"completion_length": 167.359375,
"epoch": 0.33865814696485624,
"grad_norm": 7.65625,
"kl": 0.014207899570465088,
"learning_rate": 8.867521367521367e-07,
"loss": 0.0006,
"reward": 0.7109375,
"reward_std": 0.14246869646012783,
"rewards/probe_reward_fn": 0.7109375,
"step": 53
},
{
"completion_length": 151.296875,
"epoch": 0.3450479233226837,
"grad_norm": 5.75,
"kl": 0.015747159719467163,
"learning_rate": 8.846153846153846e-07,
"loss": 0.0006,
"reward": 0.6484375,
"reward_std": 0.13098490424454212,
"rewards/probe_reward_fn": 0.6484375,
"step": 54
},
{
"completion_length": 144.796875,
"epoch": 0.3514376996805112,
"grad_norm": 4.3125,
"kl": 0.009722955524921417,
"learning_rate": 8.824786324786324e-07,
"loss": 0.0004,
"reward": 0.859375,
"reward_std": 0.11844894476234913,
"rewards/probe_reward_fn": 0.859375,
"step": 55
},
{
"completion_length": 170.609375,
"epoch": 0.35782747603833864,
"grad_norm": 5.0,
"kl": 0.015789180994033813,
"learning_rate": 8.803418803418803e-07,
"loss": 0.0006,
"reward": 0.6796875,
"reward_std": 0.14819095470011234,
"rewards/probe_reward_fn": 0.6796875,
"step": 56
},
{
"completion_length": 194.546875,
"epoch": 0.36421725239616615,
"grad_norm": 3.296875,
"kl": 0.01312720775604248,
"learning_rate": 8.782051282051282e-07,
"loss": 0.0005,
"reward": 0.828125,
"reward_std": 0.04419417306780815,
"rewards/probe_reward_fn": 0.828125,
"step": 57
},
{
"completion_length": 194.921875,
"epoch": 0.3706070287539936,
"grad_norm": 3.421875,
"kl": 0.005645632743835449,
"learning_rate": 8.76068376068376e-07,
"loss": 0.0002,
"reward": 0.8515625,
"reward_std": 0.061278700828552246,
"rewards/probe_reward_fn": 0.8515625,
"step": 58
},
{
"completion_length": 199.765625,
"epoch": 0.3769968051118211,
"grad_norm": 2.953125,
"kl": 0.012275934219360352,
"learning_rate": 8.739316239316239e-07,
"loss": 0.0005,
"reward": 0.6171875,
"reward_std": 0.06629125960171223,
"rewards/probe_reward_fn": 0.6171875,
"step": 59
},
{
"completion_length": 200.015625,
"epoch": 0.38338658146964855,
"grad_norm": 6.125,
"kl": 0.014720819890499115,
"learning_rate": 8.717948717948718e-07,
"loss": 0.0006,
"reward": 0.71875,
"reward_std": 0.2209017463028431,
"rewards/probe_reward_fn": 0.71875,
"step": 60
},
{
"completion_length": 119.625,
"epoch": 0.38977635782747605,
"grad_norm": 3.65625,
"kl": 0.010319650173187256,
"learning_rate": 8.696581196581196e-07,
"loss": 0.0004,
"reward": 0.78125,
"reward_std": 0.10022296383976936,
"rewards/probe_reward_fn": 0.78125,
"step": 61
},
{
"completion_length": 147.203125,
"epoch": 0.3961661341853035,
"grad_norm": 3.953125,
"kl": 0.011631131172180176,
"learning_rate": 8.675213675213675e-07,
"loss": 0.0005,
"reward": 0.7734375,
"reward_std": 0.21041272580623627,
"rewards/probe_reward_fn": 0.7734375,
"step": 62
},
{
"completion_length": 116.34375,
"epoch": 0.402555910543131,
"grad_norm": 3.546875,
"kl": 0.010514132678508759,
"learning_rate": 8.653846153846154e-07,
"loss": 0.0004,
"reward": 0.9765625,
"reward_std": 0.03234682232141495,
"rewards/probe_reward_fn": 0.9765625,
"step": 63
},
{
"completion_length": 158.90625,
"epoch": 0.40894568690095845,
"grad_norm": 2.734375,
"kl": 0.008355051279067993,
"learning_rate": 8.632478632478633e-07,
"loss": 0.0003,
"reward": 0.875,
"reward_std": 0.0578637570142746,
"rewards/probe_reward_fn": 0.875,
"step": 64
},
{
"completion_length": 183.578125,
"epoch": 0.41533546325878595,
"grad_norm": 4.59375,
"kl": 0.0074748694896698,
"learning_rate": 8.611111111111111e-07,
"loss": 0.0003,
"reward": 0.890625,
"reward_std": 0.08337578736245632,
"rewards/probe_reward_fn": 0.890625,
"step": 65
},
{
"completion_length": 220.0,
"epoch": 0.4217252396166134,
"grad_norm": 4.75,
"kl": 0.015145301818847656,
"learning_rate": 8.589743589743588e-07,
"loss": 0.0006,
"reward": 0.796875,
"reward_std": 0.14902584999799728,
"rewards/probe_reward_fn": 0.796875,
"step": 66
},
{
"completion_length": 178.234375,
"epoch": 0.4281150159744409,
"grad_norm": 3.515625,
"kl": 0.013521432876586914,
"learning_rate": 8.568376068376068e-07,
"loss": 0.0005,
"reward": 0.8046875,
"reward_std": 0.08891239576041698,
"rewards/probe_reward_fn": 0.8046875,
"step": 67
},
{
"completion_length": 189.203125,
"epoch": 0.43450479233226835,
"grad_norm": 3.765625,
"kl": 0.01007544994354248,
"learning_rate": 8.547008547008546e-07,
"loss": 0.0004,
"reward": 0.8515625,
"reward_std": 0.10865294747054577,
"rewards/probe_reward_fn": 0.8515625,
"step": 68
},
{
"completion_length": 169.90625,
"epoch": 0.44089456869009586,
"grad_norm": 3.875,
"kl": 0.009205043315887451,
"learning_rate": 8.525641025641025e-07,
"loss": 0.0004,
"reward": 0.765625,
"reward_std": 0.12150033004581928,
"rewards/probe_reward_fn": 0.765625,
"step": 69
},
{
"completion_length": 208.515625,
"epoch": 0.4472843450479233,
"grad_norm": 5.625,
"kl": 0.012124300003051758,
"learning_rate": 8.504273504273504e-07,
"loss": 0.0005,
"reward": 0.7578125,
"reward_std": 0.1608732007443905,
"rewards/probe_reward_fn": 0.7578125,
"step": 70
},
{
"completion_length": 203.375,
"epoch": 0.4536741214057508,
"grad_norm": 2.703125,
"kl": 0.01857316493988037,
"learning_rate": 8.482905982905982e-07,
"loss": 0.0007,
"reward": 0.765625,
"reward_std": 0.0761774368584156,
"rewards/probe_reward_fn": 0.765625,
"step": 71
},
{
"completion_length": 190.03125,
"epoch": 0.46006389776357826,
"grad_norm": 3.65625,
"kl": 0.007373243570327759,
"learning_rate": 8.461538461538461e-07,
"loss": 0.0003,
"reward": 0.921875,
"reward_std": 0.05444390885531902,
"rewards/probe_reward_fn": 0.921875,
"step": 72
},
{
"completion_length": 199.296875,
"epoch": 0.46645367412140576,
"grad_norm": 3.46875,
"kl": 0.006693422794342041,
"learning_rate": 8.44017094017094e-07,
"loss": 0.0003,
"reward": 0.8203125,
"reward_std": 0.06898625195026398,
"rewards/probe_reward_fn": 0.8203125,
"step": 73
},
{
"completion_length": 225.765625,
"epoch": 0.4728434504792332,
"grad_norm": 3.90625,
"kl": 0.014077544212341309,
"learning_rate": 8.418803418803419e-07,
"loss": 0.0006,
"reward": 0.9296875,
"reward_std": 0.07996084354817867,
"rewards/probe_reward_fn": 0.9296875,
"step": 74
},
{
"completion_length": 111.0625,
"epoch": 0.4792332268370607,
"grad_norm": 3.609375,
"kl": 0.01001065969467163,
"learning_rate": 8.397435897435897e-07,
"loss": 0.0004,
"reward": 0.84375,
"reward_std": 0.06859857402741909,
"rewards/probe_reward_fn": 0.84375,
"step": 75
},
{
"completion_length": 183.484375,
"epoch": 0.48562300319488816,
"grad_norm": 4.21875,
"kl": 0.018206000328063965,
"learning_rate": 8.376068376068375e-07,
"loss": 0.0007,
"reward": 0.859375,
"reward_std": 0.16081063263118267,
"rewards/probe_reward_fn": 0.859375,
"step": 76
},
{
"completion_length": 134.984375,
"epoch": 0.49201277955271566,
"grad_norm": 3.53125,
"kl": 0.009386561810970306,
"learning_rate": 8.354700854700855e-07,
"loss": 0.0004,
"reward": 0.875,
"reward_std": 0.07884830981492996,
"rewards/probe_reward_fn": 0.875,
"step": 77
},
{
"completion_length": 175.953125,
"epoch": 0.4984025559105431,
"grad_norm": 3.953125,
"kl": 0.01347552239894867,
"learning_rate": 8.333333333333333e-07,
"loss": 0.0005,
"reward": 0.8671875,
"reward_std": 0.07996084354817867,
"rewards/probe_reward_fn": 0.8671875,
"step": 78
},
{
"completion_length": 194.671875,
"epoch": 0.5047923322683706,
"grad_norm": 2.859375,
"kl": 0.009766161441802979,
"learning_rate": 8.311965811965812e-07,
"loss": 0.0004,
"reward": 0.84375,
"reward_std": 0.08433220535516739,
"rewards/probe_reward_fn": 0.84375,
"step": 79
},
{
"completion_length": 192.046875,
"epoch": 0.5111821086261981,
"grad_norm": 4.5,
"kl": 0.01071232557296753,
"learning_rate": 8.290598290598291e-07,
"loss": 0.0004,
"reward": 0.75,
"reward_std": 0.09863808192312717,
"rewards/probe_reward_fn": 0.75,
"step": 80
},
{
"completion_length": 196.921875,
"epoch": 0.5175718849840255,
"grad_norm": 3.984375,
"kl": 0.010022461414337158,
"learning_rate": 8.269230769230768e-07,
"loss": 0.0004,
"reward": 0.796875,
"reward_std": 0.10958509147167206,
"rewards/probe_reward_fn": 0.796875,
"step": 81
},
{
"completion_length": 192.640625,
"epoch": 0.5239616613418531,
"grad_norm": 2.703125,
"kl": 0.01441425085067749,
"learning_rate": 8.247863247863247e-07,
"loss": 0.0006,
"reward": 0.8828125,
"reward_std": 0.0657544769346714,
"rewards/probe_reward_fn": 0.8828125,
"step": 82
},
{
"completion_length": 199.34375,
"epoch": 0.5303514376996805,
"grad_norm": 4.25,
"kl": 0.01363450288772583,
"learning_rate": 8.226495726495725e-07,
"loss": 0.0005,
"reward": 0.8359375,
"reward_std": 0.17274557799100876,
"rewards/probe_reward_fn": 0.8359375,
"step": 83
},
{
"completion_length": 247.59375,
"epoch": 0.536741214057508,
"grad_norm": 4.5,
"kl": 0.02956390380859375,
"learning_rate": 8.205128205128205e-07,
"loss": 0.0012,
"reward": 0.65625,
"reward_std": 0.1759442389011383,
"rewards/probe_reward_fn": 0.65625,
"step": 84
},
{
"completion_length": 182.96875,
"epoch": 0.5431309904153354,
"grad_norm": 2.9375,
"kl": 0.014686524868011475,
"learning_rate": 8.183760683760683e-07,
"loss": 0.0006,
"reward": 0.8984375,
"reward_std": 0.061278700828552246,
"rewards/probe_reward_fn": 0.8984375,
"step": 85
},
{
"completion_length": 192.84375,
"epoch": 0.549520766773163,
"grad_norm": 2.921875,
"kl": 0.006304048001766205,
"learning_rate": 8.162393162393162e-07,
"loss": 0.0003,
"reward": 0.9765625,
"reward_std": 0.03234682232141495,
"rewards/probe_reward_fn": 0.9765625,
"step": 86
},
{
"completion_length": 155.859375,
"epoch": 0.5559105431309904,
"grad_norm": 2.1875,
"kl": 0.018445342779159546,
"learning_rate": 8.141025641025641e-07,
"loss": 0.0007,
"reward": 0.8359375,
"reward_std": 0.046501487493515015,
"rewards/probe_reward_fn": 0.8359375,
"step": 87
},
{
"completion_length": 161.4375,
"epoch": 0.5623003194888179,
"grad_norm": 4.09375,
"kl": 0.015817761421203613,
"learning_rate": 8.119658119658119e-07,
"loss": 0.0006,
"reward": 0.75,
"reward_std": 0.13671206682920456,
"rewards/probe_reward_fn": 0.75,
"step": 88
},
{
"completion_length": 110.390625,
"epoch": 0.5686900958466453,
"grad_norm": 1.8359375,
"kl": 0.00672680139541626,
"learning_rate": 8.098290598290598e-07,
"loss": 0.0003,
"reward": 0.890625,
"reward_std": 0.0289318785071373,
"rewards/probe_reward_fn": 0.890625,
"step": 89
},
{
"completion_length": 190.71875,
"epoch": 0.5750798722044729,
"grad_norm": 6.375,
"kl": 0.019952058792114258,
"learning_rate": 8.076923076923077e-07,
"loss": 0.0008,
"reward": 0.828125,
"reward_std": 0.1651577204465866,
"rewards/probe_reward_fn": 0.828125,
"step": 90
},
{
"completion_length": 124.4375,
"epoch": 0.5814696485623003,
"grad_norm": 3.515625,
"kl": 0.008346617221832275,
"learning_rate": 8.055555555555556e-07,
"loss": 0.0003,
"reward": 0.9765625,
"reward_std": 0.051028965041041374,
"rewards/probe_reward_fn": 0.9765625,
"step": 91
},
{
"completion_length": 127.0625,
"epoch": 0.5878594249201278,
"grad_norm": 3.9375,
"kl": 0.015496894717216492,
"learning_rate": 8.034188034188034e-07,
"loss": 0.0006,
"reward": 0.8671875,
"reward_std": 0.11667902767658234,
"rewards/probe_reward_fn": 0.8671875,
"step": 92
},
{
"completion_length": 197.734375,
"epoch": 0.5942492012779552,
"grad_norm": 4.46875,
"kl": 0.021900296211242676,
"learning_rate": 8.012820512820512e-07,
"loss": 0.0009,
"reward": 0.6953125,
"reward_std": 0.09827452339231968,
"rewards/probe_reward_fn": 0.6953125,
"step": 93
},
{
"completion_length": 158.984375,
"epoch": 0.6006389776357828,
"grad_norm": 2.859375,
"kl": 0.017594188451766968,
"learning_rate": 7.991452991452992e-07,
"loss": 0.0007,
"reward": 0.84375,
"reward_std": 0.08065321296453476,
"rewards/probe_reward_fn": 0.84375,
"step": 94
},
{
"completion_length": 215.40625,
"epoch": 0.6070287539936102,
"grad_norm": 2.4375,
"kl": 0.01365518569946289,
"learning_rate": 7.97008547008547e-07,
"loss": 0.0005,
"reward": 0.875,
"reward_std": 0.08065321296453476,
"rewards/probe_reward_fn": 0.875,
"step": 95
},
{
"completion_length": 159.8125,
"epoch": 0.6134185303514377,
"grad_norm": 4.0,
"kl": 0.01733332872390747,
"learning_rate": 7.948717948717948e-07,
"loss": 0.0007,
"reward": 0.90625,
"reward_std": 0.0578637570142746,
"rewards/probe_reward_fn": 0.90625,
"step": 96
},
{
"completion_length": 246.46875,
"epoch": 0.6198083067092651,
"grad_norm": 4.59375,
"kl": 0.01923823356628418,
"learning_rate": 7.927350427350427e-07,
"loss": 0.0008,
"reward": 0.78125,
"reward_std": 0.09863808192312717,
"rewards/probe_reward_fn": 0.78125,
"step": 97
},
{
"completion_length": 242.421875,
"epoch": 0.6261980830670927,
"grad_norm": 4.28125,
"kl": 0.015945076942443848,
"learning_rate": 7.905982905982905e-07,
"loss": 0.0006,
"reward": 0.78125,
"reward_std": 0.20551892928779125,
"rewards/probe_reward_fn": 0.78125,
"step": 98
},
{
"completion_length": 189.609375,
"epoch": 0.6325878594249201,
"grad_norm": 6.1875,
"kl": 0.01627245545387268,
"learning_rate": 7.884615384615384e-07,
"loss": 0.0007,
"reward": 0.828125,
"reward_std": 0.14667173847556114,
"rewards/probe_reward_fn": 0.828125,
"step": 99
},
{
"completion_length": 181.765625,
"epoch": 0.6389776357827476,
"grad_norm": 3.625,
"kl": 0.021611660718917847,
"learning_rate": 7.863247863247862e-07,
"loss": 0.0009,
"reward": 0.7890625,
"reward_std": 0.1876232698559761,
"rewards/probe_reward_fn": 0.7890625,
"step": 100
},
{
"completion_length": 196.015625,
"epoch": 0.645367412140575,
"grad_norm": 6.0,
"kl": 0.021931931376457214,
"learning_rate": 7.841880341880342e-07,
"loss": 0.0009,
"reward": 0.7734375,
"reward_std": 0.21350038051605225,
"rewards/probe_reward_fn": 0.7734375,
"step": 101
},
{
"completion_length": 207.265625,
"epoch": 0.6517571884984026,
"grad_norm": 5.78125,
"kl": 0.02642202377319336,
"learning_rate": 7.82051282051282e-07,
"loss": 0.0011,
"reward": 0.6875,
"reward_std": 0.18506525456905365,
"rewards/probe_reward_fn": 0.6875,
"step": 102
},
{
"completion_length": 219.4375,
"epoch": 0.65814696485623,
"grad_norm": 4.6875,
"kl": 0.024353623390197754,
"learning_rate": 7.799145299145298e-07,
"loss": 0.001,
"reward": 0.71875,
"reward_std": 0.09959449991583824,
"rewards/probe_reward_fn": 0.71875,
"step": 103
},
{
"completion_length": 237.71875,
"epoch": 0.6645367412140575,
"grad_norm": 5.0,
"kl": 0.03186154365539551,
"learning_rate": 7.777777777777778e-07,
"loss": 0.0013,
"reward": 0.6484375,
"reward_std": 0.15538930520415306,
"rewards/probe_reward_fn": 0.6484375,
"step": 104
},
{
"completion_length": 157.625,
"epoch": 0.670926517571885,
"grad_norm": 7.25,
"kl": 0.025882452726364136,
"learning_rate": 7.756410256410256e-07,
"loss": 0.001,
"reward": 0.7265625,
"reward_std": 0.21599861234426498,
"rewards/probe_reward_fn": 0.7265625,
"step": 105
},
{
"completion_length": 141.78125,
"epoch": 0.6773162939297125,
"grad_norm": 2.5625,
"kl": 0.01080256700515747,
"learning_rate": 7.735042735042735e-07,
"loss": 0.0004,
"reward": 0.8203125,
"reward_std": 0.09704046696424484,
"rewards/probe_reward_fn": 0.8203125,
"step": 106
},
{
"completion_length": 83.640625,
"epoch": 0.6837060702875399,
"grad_norm": 0.021728515625,
"kl": 0.00013084709644317627,
"learning_rate": 7.713675213675214e-07,
"loss": 0.0,
"reward": 1.0,
"reward_std": 0.0,
"rewards/probe_reward_fn": 1.0,
"step": 107
},
{
"completion_length": 162.125,
"epoch": 0.6900958466453674,
"grad_norm": 5.3125,
"kl": 0.031369537115097046,
"learning_rate": 7.692307692307693e-07,
"loss": 0.0013,
"reward": 0.875,
"reward_std": 0.12430291995406151,
"rewards/probe_reward_fn": 0.875,
"step": 108
},
{
"completion_length": 192.1875,
"epoch": 0.6964856230031949,
"grad_norm": 6.03125,
"kl": 0.03304767608642578,
"learning_rate": 7.670940170940171e-07,
"loss": 0.0013,
"reward": 0.8203125,
"reward_std": 0.16236715391278267,
"rewards/probe_reward_fn": 0.8203125,
"step": 109
},
{
"completion_length": 186.875,
"epoch": 0.7028753993610224,
"grad_norm": 3.0,
"kl": 0.029998138546943665,
"learning_rate": 7.649572649572648e-07,
"loss": 0.0012,
"reward": 0.8046875,
"reward_std": 0.11265816539525986,
"rewards/probe_reward_fn": 0.8046875,
"step": 110
},
{
"completion_length": 176.046875,
"epoch": 0.7092651757188498,
"grad_norm": 4.125,
"kl": 0.026562809944152832,
"learning_rate": 7.628205128205128e-07,
"loss": 0.0011,
"reward": 0.8671875,
"reward_std": 0.10474801808595657,
"rewards/probe_reward_fn": 0.8671875,
"step": 111
},
{
"completion_length": 155.296875,
"epoch": 0.7156549520766773,
"grad_norm": 4.78125,
"kl": 0.043306052684783936,
"learning_rate": 7.606837606837606e-07,
"loss": 0.0017,
"reward": 0.8125,
"reward_std": 0.1158441323786974,
"rewards/probe_reward_fn": 0.8125,
"step": 112
},
{
"completion_length": 209.078125,
"epoch": 0.7220447284345048,
"grad_norm": 7.34375,
"kl": 0.04677651822566986,
"learning_rate": 7.585470085470084e-07,
"loss": 0.0019,
"reward": 0.9375,
"reward_std": 0.07425477169454098,
"rewards/probe_reward_fn": 0.9375,
"step": 113
},
{
"completion_length": 243.421875,
"epoch": 0.7284345047923323,
"grad_norm": 4.59375,
"kl": 0.05419015884399414,
"learning_rate": 7.564102564102564e-07,
"loss": 0.0022,
"reward": 0.7890625,
"reward_std": 0.1237865537405014,
"rewards/probe_reward_fn": 0.7890625,
"step": 114
},
{
"completion_length": 139.203125,
"epoch": 0.7348242811501597,
"grad_norm": 2.96875,
"kl": 0.027146384119987488,
"learning_rate": 7.542735042735042e-07,
"loss": 0.0011,
"reward": 0.7265625,
"reward_std": 0.07959238067269325,
"rewards/probe_reward_fn": 0.7265625,
"step": 115
},
{
"completion_length": 191.765625,
"epoch": 0.7412140575079872,
"grad_norm": 4.90625,
"kl": 0.05067324638366699,
"learning_rate": 7.521367521367521e-07,
"loss": 0.002,
"reward": 0.78125,
"reward_std": 0.17493948712944984,
"rewards/probe_reward_fn": 0.78125,
"step": 116
},
{
"completion_length": 208.484375,
"epoch": 0.7476038338658147,
"grad_norm": 5.46875,
"kl": 0.04659736156463623,
"learning_rate": 7.5e-07,
"loss": 0.0019,
"reward": 0.8125,
"reward_std": 0.11773985996842384,
"rewards/probe_reward_fn": 0.8125,
"step": 117
},
{
"completion_length": 176.3125,
"epoch": 0.7539936102236422,
"grad_norm": 4.65625,
"kl": 0.03570878505706787,
"learning_rate": 7.478632478632479e-07,
"loss": 0.0014,
"reward": 0.9453125,
"reward_std": 0.0521576851606369,
"rewards/probe_reward_fn": 0.9453125,
"step": 118
},
{
"completion_length": 221.015625,
"epoch": 0.7603833865814696,
"grad_norm": 5.90625,
"kl": 0.03645634651184082,
"learning_rate": 7.457264957264957e-07,
"loss": 0.0015,
"reward": 0.8515625,
"reward_std": 0.13281089812517166,
"rewards/probe_reward_fn": 0.8515625,
"step": 119
},
{
"completion_length": 241.609375,
"epoch": 0.7667731629392971,
"grad_norm": 7.4375,
"kl": 0.0670778751373291,
"learning_rate": 7.435897435897435e-07,
"loss": 0.0027,
"reward": 0.828125,
"reward_std": 0.16868516616523266,
"rewards/probe_reward_fn": 0.828125,
"step": 120
},
{
"completion_length": 192.53125,
"epoch": 0.7731629392971247,
"grad_norm": 4.75,
"kl": 0.08901721239089966,
"learning_rate": 7.414529914529915e-07,
"loss": 0.0036,
"reward": 0.65625,
"reward_std": 0.13671206682920456,
"rewards/probe_reward_fn": 0.65625,
"step": 121
},
{
"completion_length": 203.765625,
"epoch": 0.7795527156549521,
"grad_norm": 4.90625,
"kl": 0.03704953193664551,
"learning_rate": 7.393162393162393e-07,
"loss": 0.0015,
"reward": 0.90625,
"reward_std": 0.10844093933701515,
"rewards/probe_reward_fn": 0.90625,
"step": 122
},
{
"completion_length": 162.71875,
"epoch": 0.7859424920127795,
"grad_norm": 4.78125,
"kl": 0.059233278036117554,
"learning_rate": 7.371794871794872e-07,
"loss": 0.0024,
"reward": 0.78125,
"reward_std": 0.1444196179509163,
"rewards/probe_reward_fn": 0.78125,
"step": 123
},
{
"completion_length": 177.25,
"epoch": 0.792332268370607,
"grad_norm": 13.0625,
"kl": 0.07861459255218506,
"learning_rate": 7.350427350427351e-07,
"loss": 0.0031,
"reward": 0.8359375,
"reward_std": 0.1344047524034977,
"rewards/probe_reward_fn": 0.8359375,
"step": 124
},
{
"completion_length": 220.0625,
"epoch": 0.7987220447284346,
"grad_norm": 3.21875,
"kl": 0.03901100158691406,
"learning_rate": 7.329059829059828e-07,
"loss": 0.0016,
"reward": 0.9296875,
"reward_std": 0.06193945184350014,
"rewards/probe_reward_fn": 0.9296875,
"step": 125
},
{
"completion_length": 187.5625,
"epoch": 0.805111821086262,
"grad_norm": 6.5,
"kl": 0.06369009613990784,
"learning_rate": 7.307692307692307e-07,
"loss": 0.0025,
"reward": 0.7421875,
"reward_std": 0.265626123175025,
"rewards/probe_reward_fn": 0.7421875,
"step": 126
},
{
"completion_length": 217.328125,
"epoch": 0.8115015974440895,
"grad_norm": 4.84375,
"kl": 0.08575868606567383,
"learning_rate": 7.286324786324785e-07,
"loss": 0.0034,
"reward": 0.7265625,
"reward_std": 0.1623456198722124,
"rewards/probe_reward_fn": 0.7265625,
"step": 127
},
{
"completion_length": 201.703125,
"epoch": 0.8178913738019169,
"grad_norm": 3.953125,
"kl": 0.0716593861579895,
"learning_rate": 7.264957264957265e-07,
"loss": 0.0029,
"reward": 0.953125,
"reward_std": 0.06859857402741909,
"rewards/probe_reward_fn": 0.953125,
"step": 128
},
{
"completion_length": 157.78125,
"epoch": 0.8242811501597445,
"grad_norm": 4.46875,
"kl": 0.04764676094055176,
"learning_rate": 7.243589743589743e-07,
"loss": 0.0019,
"reward": 0.90625,
"reward_std": 0.05444390885531902,
"rewards/probe_reward_fn": 0.90625,
"step": 129
},
{
"completion_length": 160.796875,
"epoch": 0.8306709265175719,
"grad_norm": 3.5625,
"kl": 0.02914530038833618,
"learning_rate": 7.222222222222221e-07,
"loss": 0.0012,
"reward": 0.9765625,
"reward_std": 0.051028965041041374,
"rewards/probe_reward_fn": 0.9765625,
"step": 130
},
{
"completion_length": 197.859375,
"epoch": 0.8370607028753994,
"grad_norm": 7.9375,
"kl": 0.08538630604743958,
"learning_rate": 7.200854700854701e-07,
"loss": 0.0034,
"reward": 0.8125,
"reward_std": 0.10888781771063805,
"rewards/probe_reward_fn": 0.8125,
"step": 131
},
{
"completion_length": 189.015625,
"epoch": 0.8434504792332268,
"grad_norm": 6.4375,
"kl": 0.05089569091796875,
"learning_rate": 7.179487179487179e-07,
"loss": 0.002,
"reward": 0.8515625,
"reward_std": 0.13310656882822514,
"rewards/probe_reward_fn": 0.8515625,
"step": 132
},
{
"completion_length": 170.984375,
"epoch": 0.8498402555910544,
"grad_norm": 4.375,
"kl": 0.03627145290374756,
"learning_rate": 7.158119658119658e-07,
"loss": 0.0014,
"reward": 0.9453125,
"reward_std": 0.09827452339231968,
"rewards/probe_reward_fn": 0.9453125,
"step": 133
},
{
"completion_length": 187.84375,
"epoch": 0.8562300319488818,
"grad_norm": 5.3125,
"kl": 0.06550478935241699,
"learning_rate": 7.136752136752137e-07,
"loss": 0.0026,
"reward": 0.78125,
"reward_std": 0.18203496001660824,
"rewards/probe_reward_fn": 0.78125,
"step": 134
},
{
"completion_length": 197.6875,
"epoch": 0.8626198083067093,
"grad_norm": 2.765625,
"kl": 0.035640716552734375,
"learning_rate": 7.115384615384616e-07,
"loss": 0.0014,
"reward": 0.8984375,
"reward_std": 0.11733977869153023,
"rewards/probe_reward_fn": 0.8984375,
"step": 135
},
{
"completion_length": 245.390625,
"epoch": 0.8690095846645367,
"grad_norm": 5.875,
"kl": 0.06638574600219727,
"learning_rate": 7.094017094017094e-07,
"loss": 0.0027,
"reward": 0.8046875,
"reward_std": 0.1548035703599453,
"rewards/probe_reward_fn": 0.8046875,
"step": 136
},
{
"completion_length": 191.8125,
"epoch": 0.8753993610223643,
"grad_norm": 7.15625,
"kl": 0.04515576362609863,
"learning_rate": 7.072649572649572e-07,
"loss": 0.0018,
"reward": 0.8515625,
"reward_std": 0.12415501661598682,
"rewards/probe_reward_fn": 0.8515625,
"step": 137
},
{
"completion_length": 140.703125,
"epoch": 0.8817891373801917,
"grad_norm": 3.65625,
"kl": 0.03791502118110657,
"learning_rate": 7.051282051282052e-07,
"loss": 0.0015,
"reward": 0.9140625,
"reward_std": 0.046501487493515015,
"rewards/probe_reward_fn": 0.9140625,
"step": 138
},
{
"completion_length": 179.109375,
"epoch": 0.8881789137380192,
"grad_norm": 9.5625,
"kl": 0.07526063919067383,
"learning_rate": 7.029914529914529e-07,
"loss": 0.003,
"reward": 0.875,
"reward_std": 0.16333172656595707,
"rewards/probe_reward_fn": 0.875,
"step": 139
},
{
"completion_length": 169.671875,
"epoch": 0.8945686900958466,
"grad_norm": 0.34375,
"kl": 0.009998321533203125,
"learning_rate": 7.008547008547007e-07,
"loss": 0.0004,
"reward": 1.0,
"reward_std": 0.0,
"rewards/probe_reward_fn": 1.0,
"step": 140
},
{
"completion_length": 200.890625,
"epoch": 0.9009584664536742,
"grad_norm": 7.46875,
"kl": 0.08715200424194336,
"learning_rate": 6.987179487179487e-07,
"loss": 0.0035,
"reward": 0.7578125,
"reward_std": 0.20317899622023106,
"rewards/probe_reward_fn": 0.7578125,
"step": 141
},
{
"completion_length": 238.625,
"epoch": 0.9073482428115016,
"grad_norm": 5.5,
"kl": 0.0816185474395752,
"learning_rate": 6.965811965811965e-07,
"loss": 0.0033,
"reward": 0.8984375,
"reward_std": 0.10642929188907146,
"rewards/probe_reward_fn": 0.8984375,
"step": 142
},
{
"completion_length": 225.28125,
"epoch": 0.9137380191693291,
"grad_norm": 4.15625,
"kl": 0.06285196542739868,
"learning_rate": 6.944444444444444e-07,
"loss": 0.0025,
"reward": 0.8203125,
"reward_std": 0.09916213154792786,
"rewards/probe_reward_fn": 0.8203125,
"step": 143
},
{
"completion_length": 173.046875,
"epoch": 0.9201277955271565,
"grad_norm": 3.375,
"kl": 0.036841511726379395,
"learning_rate": 6.923076923076922e-07,
"loss": 0.0015,
"reward": 0.9609375,
"reward_std": 0.05550474114716053,
"rewards/probe_reward_fn": 0.9609375,
"step": 144
},
{
"completion_length": 179.515625,
"epoch": 0.9265175718849841,
"grad_norm": 4.03125,
"kl": 0.05916517972946167,
"learning_rate": 6.901709401709402e-07,
"loss": 0.0024,
"reward": 0.828125,
"reward_std": 0.12852637842297554,
"rewards/probe_reward_fn": 0.828125,
"step": 145
},
{
"completion_length": 166.390625,
"epoch": 0.9329073482428115,
"grad_norm": 4.0625,
"kl": 0.05105920135974884,
"learning_rate": 6.88034188034188e-07,
"loss": 0.002,
"reward": 0.8671875,
"reward_std": 0.1151000615209341,
"rewards/probe_reward_fn": 0.8671875,
"step": 146
},
{
"completion_length": 159.5625,
"epoch": 0.939297124600639,
"grad_norm": 3.828125,
"kl": 0.057680994272232056,
"learning_rate": 6.858974358974358e-07,
"loss": 0.0023,
"reward": 0.8046875,
"reward_std": 0.09545470029115677,
"rewards/probe_reward_fn": 0.8046875,
"step": 147
},
{
"completion_length": 155.140625,
"epoch": 0.9456869009584664,
"grad_norm": 3.15625,
"kl": 0.04469466209411621,
"learning_rate": 6.837606837606838e-07,
"loss": 0.0018,
"reward": 0.96875,
"reward_std": 0.07312605157494545,
"rewards/probe_reward_fn": 0.96875,
"step": 148
},
{
"completion_length": 168.5625,
"epoch": 0.952076677316294,
"grad_norm": 4.65625,
"kl": 0.0644528865814209,
"learning_rate": 6.816239316239316e-07,
"loss": 0.0026,
"reward": 0.9140625,
"reward_std": 0.14869756996631622,
"rewards/probe_reward_fn": 0.9140625,
"step": 149
},
{
"completion_length": 171.453125,
"epoch": 0.9584664536741214,
"grad_norm": 5.21875,
"kl": 0.04670530557632446,
"learning_rate": 6.794871794871795e-07,
"loss": 0.0019,
"reward": 0.8828125,
"reward_std": 0.1019018143415451,
"rewards/probe_reward_fn": 0.8828125,
"step": 150
},
{
"completion_length": 162.84375,
"epoch": 0.9648562300319489,
"grad_norm": 2.96875,
"kl": 0.04780060052871704,
"learning_rate": 6.773504273504274e-07,
"loss": 0.0019,
"reward": 0.9765625,
"reward_std": 0.046501487493515015,
"rewards/probe_reward_fn": 0.9765625,
"step": 151
},
{
"completion_length": 114.4375,
"epoch": 0.9712460063897763,
"grad_norm": 5.71875,
"kl": 0.02422526478767395,
"learning_rate": 6.752136752136752e-07,
"loss": 0.001,
"reward": 0.921875,
"reward_std": 0.07425477169454098,
"rewards/probe_reward_fn": 0.921875,
"step": 152
},
{
"completion_length": 165.25,
"epoch": 0.9776357827476039,
"grad_norm": 3.1875,
"kl": 0.05895336717367172,
"learning_rate": 6.730769230769231e-07,
"loss": 0.0024,
"reward": 0.984375,
"reward_std": 0.0289318785071373,
"rewards/probe_reward_fn": 0.984375,
"step": 153
},
{
"completion_length": 193.5,
"epoch": 0.9840255591054313,
"grad_norm": 3.359375,
"kl": 0.049422815442085266,
"learning_rate": 6.709401709401708e-07,
"loss": 0.002,
"reward": 0.8671875,
"reward_std": 0.051028965041041374,
"rewards/probe_reward_fn": 0.8671875,
"step": 154
},
{
"completion_length": 220.203125,
"epoch": 0.9904153354632588,
"grad_norm": 7.125,
"kl": 0.07123112678527832,
"learning_rate": 6.688034188034188e-07,
"loss": 0.0028,
"reward": 0.734375,
"reward_std": 0.1759442389011383,
"rewards/probe_reward_fn": 0.734375,
"step": 155
},
{
"completion_length": 165.265625,
"epoch": 0.9968051118210862,
"grad_norm": 4.4375,
"kl": 0.05888044834136963,
"learning_rate": 6.666666666666666e-07,
"loss": 0.0024,
"reward": 0.921875,
"reward_std": 0.0936255231499672,
"rewards/probe_reward_fn": 0.921875,
"step": 156
},
{
"completion_length": 183.09375,
"epoch": 1.0,
"grad_norm": 2.5,
"kl": 0.06583547592163086,
"learning_rate": 6.645299145299144e-07,
"loss": 0.0013,
"reward": 0.9375,
"reward_std": 0.06681530922651291,
"rewards/probe_reward_fn": 0.9375,
"step": 157
}
],
"logging_steps": 1,
"max_steps": 468,
"num_input_tokens_seen": 0,
"num_train_epochs": 3,
"save_steps": 500,
"stateful_callbacks": {
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": false
},
"attributes": {}
}
},
"total_flos": 0.0,
"train_batch_size": 1,
"trial_name": null,
"trial_params": null
}