GaetanMichelet's picture
Model save
fa3758e verified
raw
history blame contribute delete
No virus
29.1 kB
{
"best_metric": 0.7143663763999939,
"best_model_checkpoint": "data/Llama-31-8B_task-2_120-samples_config-4/checkpoint-187",
"epoch": 40.90909090909091,
"eval_steps": 500,
"global_step": 225,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.18181818181818182,
"grad_norm": 0.7671908140182495,
"learning_rate": 1.3333333333333336e-07,
"loss": 1.1716,
"step": 1
},
{
"epoch": 0.36363636363636365,
"grad_norm": 0.6510481834411621,
"learning_rate": 2.666666666666667e-07,
"loss": 1.0331,
"step": 2
},
{
"epoch": 0.7272727272727273,
"grad_norm": 0.702520489692688,
"learning_rate": 5.333333333333335e-07,
"loss": 1.1544,
"step": 4
},
{
"epoch": 0.9090909090909091,
"eval_loss": 1.1236884593963623,
"eval_runtime": 24.3567,
"eval_samples_per_second": 0.985,
"eval_steps_per_second": 0.985,
"step": 5
},
{
"epoch": 1.0909090909090908,
"grad_norm": 0.7136379480361938,
"learning_rate": 8.000000000000001e-07,
"loss": 1.0735,
"step": 6
},
{
"epoch": 1.4545454545454546,
"grad_norm": 0.7056083083152771,
"learning_rate": 1.066666666666667e-06,
"loss": 1.1336,
"step": 8
},
{
"epoch": 1.8181818181818183,
"grad_norm": 0.7233437895774841,
"learning_rate": 1.3333333333333334e-06,
"loss": 1.132,
"step": 10
},
{
"epoch": 2.0,
"eval_loss": 1.1193286180496216,
"eval_runtime": 24.3549,
"eval_samples_per_second": 0.985,
"eval_steps_per_second": 0.985,
"step": 11
},
{
"epoch": 2.1818181818181817,
"grad_norm": 0.6547144055366516,
"learning_rate": 1.6000000000000001e-06,
"loss": 1.0473,
"step": 12
},
{
"epoch": 2.5454545454545454,
"grad_norm": 0.7151400446891785,
"learning_rate": 1.8666666666666669e-06,
"loss": 1.1478,
"step": 14
},
{
"epoch": 2.909090909090909,
"grad_norm": 0.7789710760116577,
"learning_rate": 2.133333333333334e-06,
"loss": 1.0689,
"step": 16
},
{
"epoch": 2.909090909090909,
"eval_loss": 1.1136311292648315,
"eval_runtime": 24.3553,
"eval_samples_per_second": 0.985,
"eval_steps_per_second": 0.985,
"step": 16
},
{
"epoch": 3.2727272727272725,
"grad_norm": 0.6943601369857788,
"learning_rate": 2.4000000000000003e-06,
"loss": 1.1271,
"step": 18
},
{
"epoch": 3.6363636363636362,
"grad_norm": 0.691636860370636,
"learning_rate": 2.666666666666667e-06,
"loss": 1.0717,
"step": 20
},
{
"epoch": 4.0,
"grad_norm": 0.6316518187522888,
"learning_rate": 2.9333333333333338e-06,
"loss": 1.0956,
"step": 22
},
{
"epoch": 4.0,
"eval_loss": 1.1010679006576538,
"eval_runtime": 24.358,
"eval_samples_per_second": 0.985,
"eval_steps_per_second": 0.985,
"step": 22
},
{
"epoch": 4.363636363636363,
"grad_norm": 0.7023854851722717,
"learning_rate": 3.2000000000000003e-06,
"loss": 1.1054,
"step": 24
},
{
"epoch": 4.7272727272727275,
"grad_norm": 0.7613805532455444,
"learning_rate": 3.4666666666666672e-06,
"loss": 1.1157,
"step": 26
},
{
"epoch": 4.909090909090909,
"eval_loss": 1.087128758430481,
"eval_runtime": 24.3561,
"eval_samples_per_second": 0.985,
"eval_steps_per_second": 0.985,
"step": 27
},
{
"epoch": 5.090909090909091,
"grad_norm": 0.6336562633514404,
"learning_rate": 3.7333333333333337e-06,
"loss": 0.9922,
"step": 28
},
{
"epoch": 5.454545454545454,
"grad_norm": 0.7272669076919556,
"learning_rate": 4.000000000000001e-06,
"loss": 1.0947,
"step": 30
},
{
"epoch": 5.818181818181818,
"grad_norm": 0.7287123203277588,
"learning_rate": 4.266666666666668e-06,
"loss": 1.0778,
"step": 32
},
{
"epoch": 6.0,
"eval_loss": 1.063869833946228,
"eval_runtime": 24.3649,
"eval_samples_per_second": 0.985,
"eval_steps_per_second": 0.985,
"step": 33
},
{
"epoch": 6.181818181818182,
"grad_norm": 0.6734246015548706,
"learning_rate": 4.533333333333334e-06,
"loss": 1.038,
"step": 34
},
{
"epoch": 6.545454545454545,
"grad_norm": 0.628820538520813,
"learning_rate": 4.800000000000001e-06,
"loss": 1.03,
"step": 36
},
{
"epoch": 6.909090909090909,
"grad_norm": 0.6829433441162109,
"learning_rate": 5.0666666666666676e-06,
"loss": 1.0458,
"step": 38
},
{
"epoch": 6.909090909090909,
"eval_loss": 1.039298176765442,
"eval_runtime": 24.363,
"eval_samples_per_second": 0.985,
"eval_steps_per_second": 0.985,
"step": 38
},
{
"epoch": 7.2727272727272725,
"grad_norm": 0.684455931186676,
"learning_rate": 5.333333333333334e-06,
"loss": 1.0333,
"step": 40
},
{
"epoch": 7.636363636363637,
"grad_norm": 0.6354673504829407,
"learning_rate": 5.600000000000001e-06,
"loss": 0.9913,
"step": 42
},
{
"epoch": 8.0,
"grad_norm": 0.5571680665016174,
"learning_rate": 5.8666666666666675e-06,
"loss": 0.9854,
"step": 44
},
{
"epoch": 8.0,
"eval_loss": 1.0027111768722534,
"eval_runtime": 24.3512,
"eval_samples_per_second": 0.986,
"eval_steps_per_second": 0.986,
"step": 44
},
{
"epoch": 8.363636363636363,
"grad_norm": 0.5560793876647949,
"learning_rate": 6.133333333333334e-06,
"loss": 0.9371,
"step": 46
},
{
"epoch": 8.727272727272727,
"grad_norm": 0.6101187467575073,
"learning_rate": 6.4000000000000006e-06,
"loss": 0.9996,
"step": 48
},
{
"epoch": 8.909090909090908,
"eval_loss": 0.9696131348609924,
"eval_runtime": 24.3608,
"eval_samples_per_second": 0.985,
"eval_steps_per_second": 0.985,
"step": 49
},
{
"epoch": 9.090909090909092,
"grad_norm": 0.5255017280578613,
"learning_rate": 6.666666666666667e-06,
"loss": 0.9569,
"step": 50
},
{
"epoch": 9.454545454545455,
"grad_norm": 0.5077732801437378,
"learning_rate": 6.9333333333333344e-06,
"loss": 0.9602,
"step": 52
},
{
"epoch": 9.818181818181818,
"grad_norm": 0.4885198771953583,
"learning_rate": 7.2000000000000005e-06,
"loss": 0.8991,
"step": 54
},
{
"epoch": 10.0,
"eval_loss": 0.9317263960838318,
"eval_runtime": 24.3522,
"eval_samples_per_second": 0.986,
"eval_steps_per_second": 0.986,
"step": 55
},
{
"epoch": 10.181818181818182,
"grad_norm": 0.42795872688293457,
"learning_rate": 7.4666666666666675e-06,
"loss": 0.9282,
"step": 56
},
{
"epoch": 10.545454545454545,
"grad_norm": 0.3717321455478668,
"learning_rate": 7.733333333333334e-06,
"loss": 0.9173,
"step": 58
},
{
"epoch": 10.909090909090908,
"grad_norm": 0.37956222891807556,
"learning_rate": 8.000000000000001e-06,
"loss": 0.8897,
"step": 60
},
{
"epoch": 10.909090909090908,
"eval_loss": 0.9051777720451355,
"eval_runtime": 24.3554,
"eval_samples_per_second": 0.985,
"eval_steps_per_second": 0.985,
"step": 60
},
{
"epoch": 11.272727272727273,
"grad_norm": 0.32997962832450867,
"learning_rate": 8.266666666666667e-06,
"loss": 0.8511,
"step": 62
},
{
"epoch": 11.636363636363637,
"grad_norm": 0.3067737817764282,
"learning_rate": 8.533333333333335e-06,
"loss": 0.8714,
"step": 64
},
{
"epoch": 12.0,
"grad_norm": 0.3460928797721863,
"learning_rate": 8.8e-06,
"loss": 0.8711,
"step": 66
},
{
"epoch": 12.0,
"eval_loss": 0.878821074962616,
"eval_runtime": 24.3541,
"eval_samples_per_second": 0.985,
"eval_steps_per_second": 0.985,
"step": 66
},
{
"epoch": 12.363636363636363,
"grad_norm": 0.29182004928588867,
"learning_rate": 9.066666666666667e-06,
"loss": 0.8274,
"step": 68
},
{
"epoch": 12.727272727272727,
"grad_norm": 0.31693214178085327,
"learning_rate": 9.333333333333334e-06,
"loss": 0.8809,
"step": 70
},
{
"epoch": 12.909090909090908,
"eval_loss": 0.8587873578071594,
"eval_runtime": 24.3566,
"eval_samples_per_second": 0.985,
"eval_steps_per_second": 0.985,
"step": 71
},
{
"epoch": 13.090909090909092,
"grad_norm": 0.29670482873916626,
"learning_rate": 9.600000000000001e-06,
"loss": 0.8263,
"step": 72
},
{
"epoch": 13.454545454545455,
"grad_norm": 0.2604750096797943,
"learning_rate": 9.866666666666668e-06,
"loss": 0.8237,
"step": 74
},
{
"epoch": 13.818181818181818,
"grad_norm": 0.2680836021900177,
"learning_rate": 9.999945845889795e-06,
"loss": 0.7972,
"step": 76
},
{
"epoch": 14.0,
"eval_loss": 0.8367733955383301,
"eval_runtime": 24.3654,
"eval_samples_per_second": 0.985,
"eval_steps_per_second": 0.985,
"step": 77
},
{
"epoch": 14.181818181818182,
"grad_norm": 0.2775014638900757,
"learning_rate": 9.999512620046523e-06,
"loss": 0.8129,
"step": 78
},
{
"epoch": 14.545454545454545,
"grad_norm": 0.24960099160671234,
"learning_rate": 9.99864620589731e-06,
"loss": 0.8163,
"step": 80
},
{
"epoch": 14.909090909090908,
"grad_norm": 0.2805323004722595,
"learning_rate": 9.99734667851357e-06,
"loss": 0.8156,
"step": 82
},
{
"epoch": 14.909090909090908,
"eval_loss": 0.8207753300666809,
"eval_runtime": 24.3571,
"eval_samples_per_second": 0.985,
"eval_steps_per_second": 0.985,
"step": 82
},
{
"epoch": 15.272727272727273,
"grad_norm": 0.23699168860912323,
"learning_rate": 9.995614150494293e-06,
"loss": 0.7658,
"step": 84
},
{
"epoch": 15.636363636363637,
"grad_norm": 0.24089692533016205,
"learning_rate": 9.993448771956285e-06,
"loss": 0.8111,
"step": 86
},
{
"epoch": 16.0,
"grad_norm": 0.24843314290046692,
"learning_rate": 9.99085073052117e-06,
"loss": 0.7815,
"step": 88
},
{
"epoch": 16.0,
"eval_loss": 0.8057171702384949,
"eval_runtime": 24.3555,
"eval_samples_per_second": 0.985,
"eval_steps_per_second": 0.985,
"step": 88
},
{
"epoch": 16.363636363636363,
"grad_norm": 0.2455184906721115,
"learning_rate": 9.987820251299121e-06,
"loss": 0.7832,
"step": 90
},
{
"epoch": 16.727272727272727,
"grad_norm": 0.23427604138851166,
"learning_rate": 9.984357596869369e-06,
"loss": 0.7492,
"step": 92
},
{
"epoch": 16.90909090909091,
"eval_loss": 0.7956312298774719,
"eval_runtime": 24.3597,
"eval_samples_per_second": 0.985,
"eval_steps_per_second": 0.985,
"step": 93
},
{
"epoch": 17.09090909090909,
"grad_norm": 0.2534084618091583,
"learning_rate": 9.980463067257437e-06,
"loss": 0.774,
"step": 94
},
{
"epoch": 17.454545454545453,
"grad_norm": 0.2455044388771057,
"learning_rate": 9.976136999909156e-06,
"loss": 0.7431,
"step": 96
},
{
"epoch": 17.818181818181817,
"grad_norm": 0.22460192441940308,
"learning_rate": 9.971379769661422e-06,
"loss": 0.7587,
"step": 98
},
{
"epoch": 18.0,
"eval_loss": 0.7855010628700256,
"eval_runtime": 24.3575,
"eval_samples_per_second": 0.985,
"eval_steps_per_second": 0.985,
"step": 99
},
{
"epoch": 18.181818181818183,
"grad_norm": 0.2319430559873581,
"learning_rate": 9.966191788709716e-06,
"loss": 0.7886,
"step": 100
},
{
"epoch": 18.545454545454547,
"grad_norm": 0.22566601634025574,
"learning_rate": 9.960573506572391e-06,
"loss": 0.7468,
"step": 102
},
{
"epoch": 18.90909090909091,
"grad_norm": 0.2197207510471344,
"learning_rate": 9.95452541005172e-06,
"loss": 0.7483,
"step": 104
},
{
"epoch": 18.90909090909091,
"eval_loss": 0.7779585719108582,
"eval_runtime": 24.351,
"eval_samples_per_second": 0.986,
"eval_steps_per_second": 0.986,
"step": 104
},
{
"epoch": 19.272727272727273,
"grad_norm": 0.25423386693000793,
"learning_rate": 9.948048023191728e-06,
"loss": 0.7066,
"step": 106
},
{
"epoch": 19.636363636363637,
"grad_norm": 0.27388209104537964,
"learning_rate": 9.941141907232766e-06,
"loss": 0.7644,
"step": 108
},
{
"epoch": 20.0,
"grad_norm": 0.21174727380275726,
"learning_rate": 9.933807660562898e-06,
"loss": 0.7296,
"step": 110
},
{
"epoch": 20.0,
"eval_loss": 0.7694642543792725,
"eval_runtime": 24.3585,
"eval_samples_per_second": 0.985,
"eval_steps_per_second": 0.985,
"step": 110
},
{
"epoch": 20.363636363636363,
"grad_norm": 0.21564796566963196,
"learning_rate": 9.926045918666045e-06,
"loss": 0.7005,
"step": 112
},
{
"epoch": 20.727272727272727,
"grad_norm": 0.21516019105911255,
"learning_rate": 9.91785735406693e-06,
"loss": 0.7441,
"step": 114
},
{
"epoch": 20.90909090909091,
"eval_loss": 0.7629389762878418,
"eval_runtime": 24.3562,
"eval_samples_per_second": 0.985,
"eval_steps_per_second": 0.985,
"step": 115
},
{
"epoch": 21.09090909090909,
"grad_norm": 0.25928786396980286,
"learning_rate": 9.909242676272797e-06,
"loss": 0.7448,
"step": 116
},
{
"epoch": 21.454545454545453,
"grad_norm": 0.22024983167648315,
"learning_rate": 9.90020263171194e-06,
"loss": 0.7277,
"step": 118
},
{
"epoch": 21.818181818181817,
"grad_norm": 0.22692517936229706,
"learning_rate": 9.890738003669029e-06,
"loss": 0.7176,
"step": 120
},
{
"epoch": 22.0,
"eval_loss": 0.7560660243034363,
"eval_runtime": 24.3539,
"eval_samples_per_second": 0.985,
"eval_steps_per_second": 0.985,
"step": 121
},
{
"epoch": 22.181818181818183,
"grad_norm": 0.2096530646085739,
"learning_rate": 9.880849612217238e-06,
"loss": 0.6737,
"step": 122
},
{
"epoch": 22.545454545454547,
"grad_norm": 0.23917944729328156,
"learning_rate": 9.870538314147194e-06,
"loss": 0.7373,
"step": 124
},
{
"epoch": 22.90909090909091,
"grad_norm": 0.2294284701347351,
"learning_rate": 9.859805002892733e-06,
"loss": 0.7033,
"step": 126
},
{
"epoch": 22.90909090909091,
"eval_loss": 0.7507641911506653,
"eval_runtime": 24.3597,
"eval_samples_per_second": 0.985,
"eval_steps_per_second": 0.985,
"step": 126
},
{
"epoch": 23.272727272727273,
"grad_norm": 0.22704288363456726,
"learning_rate": 9.84865060845349e-06,
"loss": 0.6932,
"step": 128
},
{
"epoch": 23.636363636363637,
"grad_norm": 0.2241230458021164,
"learning_rate": 9.83707609731432e-06,
"loss": 0.6907,
"step": 130
},
{
"epoch": 24.0,
"grad_norm": 0.2692793011665344,
"learning_rate": 9.825082472361558e-06,
"loss": 0.6906,
"step": 132
},
{
"epoch": 24.0,
"eval_loss": 0.7443225383758545,
"eval_runtime": 24.3494,
"eval_samples_per_second": 0.986,
"eval_steps_per_second": 0.986,
"step": 132
},
{
"epoch": 24.363636363636363,
"grad_norm": 0.2639993727207184,
"learning_rate": 9.812670772796113e-06,
"loss": 0.7003,
"step": 134
},
{
"epoch": 24.727272727272727,
"grad_norm": 0.2364564836025238,
"learning_rate": 9.799842074043438e-06,
"loss": 0.6954,
"step": 136
},
{
"epoch": 24.90909090909091,
"eval_loss": 0.7396196722984314,
"eval_runtime": 24.3588,
"eval_samples_per_second": 0.985,
"eval_steps_per_second": 0.985,
"step": 137
},
{
"epoch": 25.09090909090909,
"grad_norm": 0.25801482796669006,
"learning_rate": 9.786597487660336e-06,
"loss": 0.6571,
"step": 138
},
{
"epoch": 25.454545454545453,
"grad_norm": 0.23688581585884094,
"learning_rate": 9.77293816123866e-06,
"loss": 0.6904,
"step": 140
},
{
"epoch": 25.818181818181817,
"grad_norm": 0.2453926056623459,
"learning_rate": 9.75886527830587e-06,
"loss": 0.6578,
"step": 142
},
{
"epoch": 26.0,
"eval_loss": 0.734368622303009,
"eval_runtime": 24.3632,
"eval_samples_per_second": 0.985,
"eval_steps_per_second": 0.985,
"step": 143
},
{
"epoch": 26.181818181818183,
"grad_norm": 0.2570464611053467,
"learning_rate": 9.744380058222483e-06,
"loss": 0.6708,
"step": 144
},
{
"epoch": 26.545454545454547,
"grad_norm": 0.2480405867099762,
"learning_rate": 9.729483756076436e-06,
"loss": 0.6975,
"step": 146
},
{
"epoch": 26.90909090909091,
"grad_norm": 0.2474363148212433,
"learning_rate": 9.714177662574316e-06,
"loss": 0.6495,
"step": 148
},
{
"epoch": 26.90909090909091,
"eval_loss": 0.7309517860412598,
"eval_runtime": 24.351,
"eval_samples_per_second": 0.986,
"eval_steps_per_second": 0.986,
"step": 148
},
{
"epoch": 27.272727272727273,
"grad_norm": 0.2587747871875763,
"learning_rate": 9.698463103929542e-06,
"loss": 0.6405,
"step": 150
},
{
"epoch": 27.636363636363637,
"grad_norm": 0.2785438597202301,
"learning_rate": 9.682341441747446e-06,
"loss": 0.6908,
"step": 152
},
{
"epoch": 28.0,
"grad_norm": 0.25493577122688293,
"learning_rate": 9.665814072907293e-06,
"loss": 0.6391,
"step": 154
},
{
"epoch": 28.0,
"eval_loss": 0.7269113063812256,
"eval_runtime": 24.36,
"eval_samples_per_second": 0.985,
"eval_steps_per_second": 0.985,
"step": 154
},
{
"epoch": 28.363636363636363,
"grad_norm": 0.25702783465385437,
"learning_rate": 9.648882429441258e-06,
"loss": 0.6446,
"step": 156
},
{
"epoch": 28.727272727272727,
"grad_norm": 0.2598085105419159,
"learning_rate": 9.63154797841033e-06,
"loss": 0.6442,
"step": 158
},
{
"epoch": 28.90909090909091,
"eval_loss": 0.723728358745575,
"eval_runtime": 24.3516,
"eval_samples_per_second": 0.986,
"eval_steps_per_second": 0.986,
"step": 159
},
{
"epoch": 29.09090909090909,
"grad_norm": 0.27804017066955566,
"learning_rate": 9.613812221777212e-06,
"loss": 0.6514,
"step": 160
},
{
"epoch": 29.454545454545453,
"grad_norm": 0.2686798572540283,
"learning_rate": 9.595676696276173e-06,
"loss": 0.6613,
"step": 162
},
{
"epoch": 29.818181818181817,
"grad_norm": 0.275974303483963,
"learning_rate": 9.577142973279896e-06,
"loss": 0.6268,
"step": 164
},
{
"epoch": 30.0,
"eval_loss": 0.7199351787567139,
"eval_runtime": 24.3608,
"eval_samples_per_second": 0.985,
"eval_steps_per_second": 0.985,
"step": 165
},
{
"epoch": 30.181818181818183,
"grad_norm": 0.2688060700893402,
"learning_rate": 9.55821265866333e-06,
"loss": 0.6002,
"step": 166
},
{
"epoch": 30.545454545454547,
"grad_norm": 0.27310389280319214,
"learning_rate": 9.538887392664544e-06,
"loss": 0.6103,
"step": 168
},
{
"epoch": 30.90909090909091,
"grad_norm": 0.294357031583786,
"learning_rate": 9.519168849742603e-06,
"loss": 0.6536,
"step": 170
},
{
"epoch": 30.90909090909091,
"eval_loss": 0.71826171875,
"eval_runtime": 24.3503,
"eval_samples_per_second": 0.986,
"eval_steps_per_second": 0.986,
"step": 170
},
{
"epoch": 31.272727272727273,
"grad_norm": 0.2845761775970459,
"learning_rate": 9.499058738432492e-06,
"loss": 0.6384,
"step": 172
},
{
"epoch": 31.636363636363637,
"grad_norm": 0.30121341347694397,
"learning_rate": 9.478558801197065e-06,
"loss": 0.6191,
"step": 174
},
{
"epoch": 32.0,
"grad_norm": 0.2854570746421814,
"learning_rate": 9.457670814276083e-06,
"loss": 0.6092,
"step": 176
},
{
"epoch": 32.0,
"eval_loss": 0.7163482308387756,
"eval_runtime": 24.3583,
"eval_samples_per_second": 0.985,
"eval_steps_per_second": 0.985,
"step": 176
},
{
"epoch": 32.36363636363637,
"grad_norm": 0.3030169904232025,
"learning_rate": 9.436396587532297e-06,
"loss": 0.5929,
"step": 178
},
{
"epoch": 32.72727272727273,
"grad_norm": 0.2960076928138733,
"learning_rate": 9.414737964294636e-06,
"loss": 0.621,
"step": 180
},
{
"epoch": 32.90909090909091,
"eval_loss": 0.7148606181144714,
"eval_runtime": 24.3579,
"eval_samples_per_second": 0.985,
"eval_steps_per_second": 0.985,
"step": 181
},
{
"epoch": 33.09090909090909,
"grad_norm": 0.3322046995162964,
"learning_rate": 9.392696821198488e-06,
"loss": 0.6048,
"step": 182
},
{
"epoch": 33.45454545454545,
"grad_norm": 0.335716187953949,
"learning_rate": 9.370275068023097e-06,
"loss": 0.6233,
"step": 184
},
{
"epoch": 33.81818181818182,
"grad_norm": 0.3075459599494934,
"learning_rate": 9.347474647526095e-06,
"loss": 0.5823,
"step": 186
},
{
"epoch": 34.0,
"eval_loss": 0.7143663763999939,
"eval_runtime": 24.3571,
"eval_samples_per_second": 0.985,
"eval_steps_per_second": 0.985,
"step": 187
},
{
"epoch": 34.18181818181818,
"grad_norm": 0.3422604203224182,
"learning_rate": 9.324297535275156e-06,
"loss": 0.5754,
"step": 188
},
{
"epoch": 34.54545454545455,
"grad_norm": 0.33866289258003235,
"learning_rate": 9.30074573947683e-06,
"loss": 0.5951,
"step": 190
},
{
"epoch": 34.90909090909091,
"grad_norm": 0.37743914127349854,
"learning_rate": 9.276821300802535e-06,
"loss": 0.5651,
"step": 192
},
{
"epoch": 34.90909090909091,
"eval_loss": 0.7156122326850891,
"eval_runtime": 24.3497,
"eval_samples_per_second": 0.986,
"eval_steps_per_second": 0.986,
"step": 192
},
{
"epoch": 35.27272727272727,
"grad_norm": 0.3403712511062622,
"learning_rate": 9.25252629221175e-06,
"loss": 0.5877,
"step": 194
},
{
"epoch": 35.63636363636363,
"grad_norm": 0.32227441668510437,
"learning_rate": 9.227862818772392e-06,
"loss": 0.5569,
"step": 196
},
{
"epoch": 36.0,
"grad_norm": 0.3595390021800995,
"learning_rate": 9.202833017478421e-06,
"loss": 0.5951,
"step": 198
},
{
"epoch": 36.0,
"eval_loss": 0.7163762450218201,
"eval_runtime": 24.3582,
"eval_samples_per_second": 0.985,
"eval_steps_per_second": 0.985,
"step": 198
},
{
"epoch": 36.36363636363637,
"grad_norm": 0.3400909900665283,
"learning_rate": 9.177439057064684e-06,
"loss": 0.552,
"step": 200
},
{
"epoch": 36.72727272727273,
"grad_norm": 0.3983656167984009,
"learning_rate": 9.151683137818989e-06,
"loss": 0.5637,
"step": 202
},
{
"epoch": 36.90909090909091,
"eval_loss": 0.7194623947143555,
"eval_runtime": 24.3553,
"eval_samples_per_second": 0.985,
"eval_steps_per_second": 0.985,
"step": 203
},
{
"epoch": 37.09090909090909,
"grad_norm": 0.4176684319972992,
"learning_rate": 9.125567491391476e-06,
"loss": 0.5684,
"step": 204
},
{
"epoch": 37.45454545454545,
"grad_norm": 0.3539072275161743,
"learning_rate": 9.099094380601244e-06,
"loss": 0.5392,
"step": 206
},
{
"epoch": 37.81818181818182,
"grad_norm": 0.40454044938087463,
"learning_rate": 9.072266099240286e-06,
"loss": 0.5669,
"step": 208
},
{
"epoch": 38.0,
"eval_loss": 0.7218795418739319,
"eval_runtime": 24.3543,
"eval_samples_per_second": 0.985,
"eval_steps_per_second": 0.985,
"step": 209
},
{
"epoch": 38.18181818181818,
"grad_norm": 0.3737054765224457,
"learning_rate": 9.045084971874738e-06,
"loss": 0.5348,
"step": 210
},
{
"epoch": 38.54545454545455,
"grad_norm": 0.37676355242729187,
"learning_rate": 9.017553353643479e-06,
"loss": 0.5322,
"step": 212
},
{
"epoch": 38.90909090909091,
"grad_norm": 0.41781264543533325,
"learning_rate": 8.989673630054044e-06,
"loss": 0.5613,
"step": 214
},
{
"epoch": 38.90909090909091,
"eval_loss": 0.7278082966804504,
"eval_runtime": 24.3537,
"eval_samples_per_second": 0.985,
"eval_steps_per_second": 0.985,
"step": 214
},
{
"epoch": 39.27272727272727,
"grad_norm": 0.41486313939094543,
"learning_rate": 8.961448216775955e-06,
"loss": 0.5221,
"step": 216
},
{
"epoch": 39.63636363636363,
"grad_norm": 0.40721336007118225,
"learning_rate": 8.932879559431392e-06,
"loss": 0.5181,
"step": 218
},
{
"epoch": 40.0,
"grad_norm": 0.46421483159065247,
"learning_rate": 8.903970133383297e-06,
"loss": 0.5156,
"step": 220
},
{
"epoch": 40.0,
"eval_loss": 0.7309436798095703,
"eval_runtime": 24.3653,
"eval_samples_per_second": 0.985,
"eval_steps_per_second": 0.985,
"step": 220
},
{
"epoch": 40.36363636363637,
"grad_norm": 0.5447797179222107,
"learning_rate": 8.874722443520898e-06,
"loss": 0.5196,
"step": 222
},
{
"epoch": 40.72727272727273,
"grad_norm": 0.44560515880584717,
"learning_rate": 8.845139024042664e-06,
"loss": 0.5044,
"step": 224
},
{
"epoch": 40.90909090909091,
"eval_loss": 0.739515483379364,
"eval_runtime": 24.3585,
"eval_samples_per_second": 0.985,
"eval_steps_per_second": 0.985,
"step": 225
},
{
"epoch": 40.90909090909091,
"step": 225,
"total_flos": 3.429455470966866e+17,
"train_loss": 0.7736989471647474,
"train_runtime": 12307.7975,
"train_samples_per_second": 1.072,
"train_steps_per_second": 0.061
}
],
"logging_steps": 2,
"max_steps": 750,
"num_input_tokens_seen": 0,
"num_train_epochs": 150,
"save_steps": 25,
"stateful_callbacks": {
"EarlyStoppingCallback": {
"args": {
"early_stopping_patience": 7,
"early_stopping_threshold": 0.0
},
"attributes": {
"early_stopping_patience_counter": 0
}
},
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": true
},
"attributes": {}
}
},
"total_flos": 3.429455470966866e+17,
"train_batch_size": 1,
"trial_name": null,
"trial_params": null
}