inflaton's picture
qwen2 72b checkpoints
9dc50d8
raw
history blame
15.3 kB
{
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 1.9900497512437811,
"eval_steps": 35,
"global_step": 350,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.028429282160625444,
"grad_norm": 2.917722702026367,
"learning_rate": 1.4285714285714285e-05,
"loss": 1.3615,
"step": 5
},
{
"epoch": 0.05685856432125089,
"grad_norm": 2.65405011177063,
"learning_rate": 2.857142857142857e-05,
"loss": 1.2182,
"step": 10
},
{
"epoch": 0.08528784648187633,
"grad_norm": 4.0127272605896,
"learning_rate": 4.2857142857142856e-05,
"loss": 0.8944,
"step": 15
},
{
"epoch": 0.11371712864250177,
"grad_norm": 0.8177616000175476,
"learning_rate": 5.714285714285714e-05,
"loss": 0.512,
"step": 20
},
{
"epoch": 0.14214641080312723,
"grad_norm": 0.3800067901611328,
"learning_rate": 7.142857142857143e-05,
"loss": 0.3435,
"step": 25
},
{
"epoch": 0.17057569296375266,
"grad_norm": 0.3455258309841156,
"learning_rate": 8.571428571428571e-05,
"loss": 0.3027,
"step": 30
},
{
"epoch": 0.19900497512437812,
"grad_norm": 0.3401183485984802,
"learning_rate": 0.0001,
"loss": 0.3045,
"step": 35
},
{
"epoch": 0.19900497512437812,
"eval_loss": 0.2556740641593933,
"eval_runtime": 1374.6693,
"eval_samples_per_second": 1.819,
"eval_steps_per_second": 1.819,
"step": 35
},
{
"epoch": 0.22743425728500355,
"grad_norm": 0.3088541328907013,
"learning_rate": 9.993784606094612e-05,
"loss": 0.2799,
"step": 40
},
{
"epoch": 0.255863539445629,
"grad_norm": 0.2526684105396271,
"learning_rate": 9.975153876827008e-05,
"loss": 0.2779,
"step": 45
},
{
"epoch": 0.28429282160625446,
"grad_norm": 0.3618779182434082,
"learning_rate": 9.944154131125642e-05,
"loss": 0.2781,
"step": 50
},
{
"epoch": 0.31272210376687987,
"grad_norm": 0.32333430647850037,
"learning_rate": 9.900862439242719e-05,
"loss": 0.2595,
"step": 55
},
{
"epoch": 0.3411513859275053,
"grad_norm": 0.328197717666626,
"learning_rate": 9.84538643114539e-05,
"loss": 0.275,
"step": 60
},
{
"epoch": 0.3695806680881308,
"grad_norm": 0.28067663311958313,
"learning_rate": 9.777864028930705e-05,
"loss": 0.2333,
"step": 65
},
{
"epoch": 0.39800995024875624,
"grad_norm": 0.3941590487957001,
"learning_rate": 9.698463103929542e-05,
"loss": 0.2524,
"step": 70
},
{
"epoch": 0.39800995024875624,
"eval_loss": 0.23971079289913177,
"eval_runtime": 1374.4109,
"eval_samples_per_second": 1.819,
"eval_steps_per_second": 1.819,
"step": 70
},
{
"epoch": 0.42643923240938164,
"grad_norm": 0.2214515656232834,
"learning_rate": 9.607381059352038e-05,
"loss": 0.2585,
"step": 75
},
{
"epoch": 0.4548685145700071,
"grad_norm": 0.27422434091567993,
"learning_rate": 9.504844339512095e-05,
"loss": 0.2122,
"step": 80
},
{
"epoch": 0.48329779673063256,
"grad_norm": 0.28153732419013977,
"learning_rate": 9.391107866851143e-05,
"loss": 0.2397,
"step": 85
},
{
"epoch": 0.511727078891258,
"grad_norm": 0.2967371344566345,
"learning_rate": 9.266454408160779e-05,
"loss": 0.2192,
"step": 90
},
{
"epoch": 0.5401563610518835,
"grad_norm": 0.29277151823043823,
"learning_rate": 9.131193871579975e-05,
"loss": 0.234,
"step": 95
},
{
"epoch": 0.5685856432125089,
"grad_norm": 0.30613964796066284,
"learning_rate": 8.985662536114613e-05,
"loss": 0.2376,
"step": 100
},
{
"epoch": 0.5970149253731343,
"grad_norm": 0.27135342359542847,
"learning_rate": 8.83022221559489e-05,
"loss": 0.245,
"step": 105
},
{
"epoch": 0.5970149253731343,
"eval_loss": 0.23002392053604126,
"eval_runtime": 1375.9551,
"eval_samples_per_second": 1.817,
"eval_steps_per_second": 1.817,
"step": 105
},
{
"epoch": 0.6254442075337597,
"grad_norm": 0.23706920444965363,
"learning_rate": 8.665259359149132e-05,
"loss": 0.2193,
"step": 110
},
{
"epoch": 0.6538734896943852,
"grad_norm": 0.3904951214790344,
"learning_rate": 8.491184090430364e-05,
"loss": 0.231,
"step": 115
},
{
"epoch": 0.6823027718550106,
"grad_norm": 0.23460106551647186,
"learning_rate": 8.308429187984297e-05,
"loss": 0.229,
"step": 120
},
{
"epoch": 0.7107320540156361,
"grad_norm": 0.29704052209854126,
"learning_rate": 8.117449009293668e-05,
"loss": 0.2277,
"step": 125
},
{
"epoch": 0.7391613361762616,
"grad_norm": 0.3585428297519684,
"learning_rate": 7.91871836117395e-05,
"loss": 0.2356,
"step": 130
},
{
"epoch": 0.767590618336887,
"grad_norm": 0.3541390895843506,
"learning_rate": 7.712731319328798e-05,
"loss": 0.2521,
"step": 135
},
{
"epoch": 0.7960199004975125,
"grad_norm": 0.3026750087738037,
"learning_rate": 7.500000000000001e-05,
"loss": 0.2363,
"step": 140
},
{
"epoch": 0.7960199004975125,
"eval_loss": 0.2129843682050705,
"eval_runtime": 1375.37,
"eval_samples_per_second": 1.818,
"eval_steps_per_second": 1.818,
"step": 140
},
{
"epoch": 0.8244491826581379,
"grad_norm": 0.3274117410182953,
"learning_rate": 7.281053286765815e-05,
"loss": 0.2308,
"step": 145
},
{
"epoch": 0.8528784648187633,
"grad_norm": 0.2670952379703522,
"learning_rate": 7.056435515653059e-05,
"loss": 0.2456,
"step": 150
},
{
"epoch": 0.8813077469793887,
"grad_norm": 0.36056894063949585,
"learning_rate": 6.826705121831976e-05,
"loss": 0.249,
"step": 155
},
{
"epoch": 0.9097370291400142,
"grad_norm": 0.3259892761707306,
"learning_rate": 6.592433251258423e-05,
"loss": 0.2312,
"step": 160
},
{
"epoch": 0.9381663113006397,
"grad_norm": 0.39926743507385254,
"learning_rate": 6.354202340715026e-05,
"loss": 0.2157,
"step": 165
},
{
"epoch": 0.9665955934612651,
"grad_norm": 0.23705609142780304,
"learning_rate": 6.112604669781572e-05,
"loss": 0.2128,
"step": 170
},
{
"epoch": 0.9950248756218906,
"grad_norm": 0.27746665477752686,
"learning_rate": 5.868240888334653e-05,
"loss": 0.2383,
"step": 175
},
{
"epoch": 0.9950248756218906,
"eval_loss": 0.2247208058834076,
"eval_runtime": 1375.4951,
"eval_samples_per_second": 1.818,
"eval_steps_per_second": 1.818,
"step": 175
},
{
"epoch": 1.023454157782516,
"grad_norm": 0.2372225672006607,
"learning_rate": 5.621718523237427e-05,
"loss": 0.2187,
"step": 180
},
{
"epoch": 1.0518834399431414,
"grad_norm": 0.2701272964477539,
"learning_rate": 5.373650467932122e-05,
"loss": 0.2024,
"step": 185
},
{
"epoch": 1.080312722103767,
"grad_norm": 0.32787126302719116,
"learning_rate": 5.124653458690365e-05,
"loss": 0.221,
"step": 190
},
{
"epoch": 1.1087420042643923,
"grad_norm": 0.2735784351825714,
"learning_rate": 4.875346541309637e-05,
"loss": 0.199,
"step": 195
},
{
"epoch": 1.1371712864250179,
"grad_norm": 0.35804641246795654,
"learning_rate": 4.626349532067879e-05,
"loss": 0.2299,
"step": 200
},
{
"epoch": 1.1656005685856432,
"grad_norm": 0.24261139333248138,
"learning_rate": 4.378281476762576e-05,
"loss": 0.2131,
"step": 205
},
{
"epoch": 1.1940298507462686,
"grad_norm": 0.23894722759723663,
"learning_rate": 4.131759111665349e-05,
"loss": 0.2067,
"step": 210
},
{
"epoch": 1.1940298507462686,
"eval_loss": 0.20743593573570251,
"eval_runtime": 1374.8457,
"eval_samples_per_second": 1.818,
"eval_steps_per_second": 1.818,
"step": 210
},
{
"epoch": 1.2224591329068941,
"grad_norm": 0.26743921637535095,
"learning_rate": 3.887395330218429e-05,
"loss": 0.2012,
"step": 215
},
{
"epoch": 1.2508884150675195,
"grad_norm": 0.26504266262054443,
"learning_rate": 3.6457976592849754e-05,
"loss": 0.1927,
"step": 220
},
{
"epoch": 1.279317697228145,
"grad_norm": 0.45054301619529724,
"learning_rate": 3.4075667487415785e-05,
"loss": 0.2153,
"step": 225
},
{
"epoch": 1.3077469793887704,
"grad_norm": 0.2784373462200165,
"learning_rate": 3.173294878168025e-05,
"loss": 0.211,
"step": 230
},
{
"epoch": 1.336176261549396,
"grad_norm": 0.2335204929113388,
"learning_rate": 2.9435644843469436e-05,
"loss": 0.2156,
"step": 235
},
{
"epoch": 1.3646055437100213,
"grad_norm": 0.25453269481658936,
"learning_rate": 2.718946713234185e-05,
"loss": 0.2048,
"step": 240
},
{
"epoch": 1.3930348258706466,
"grad_norm": 0.2633484899997711,
"learning_rate": 2.500000000000001e-05,
"loss": 0.2277,
"step": 245
},
{
"epoch": 1.3930348258706466,
"eval_loss": 0.20937685668468475,
"eval_runtime": 1374.1515,
"eval_samples_per_second": 1.819,
"eval_steps_per_second": 1.819,
"step": 245
},
{
"epoch": 1.4214641080312722,
"grad_norm": 0.27278706431388855,
"learning_rate": 2.2872686806712035e-05,
"loss": 0.2073,
"step": 250
},
{
"epoch": 1.4498933901918978,
"grad_norm": 0.2642519772052765,
"learning_rate": 2.0812816388260518e-05,
"loss": 0.2007,
"step": 255
},
{
"epoch": 1.4783226723525231,
"grad_norm": 0.25261521339416504,
"learning_rate": 1.8825509907063327e-05,
"loss": 0.1877,
"step": 260
},
{
"epoch": 1.5067519545131485,
"grad_norm": 0.2179146111011505,
"learning_rate": 1.691570812015704e-05,
"loss": 0.1904,
"step": 265
},
{
"epoch": 1.535181236673774,
"grad_norm": 0.35709136724472046,
"learning_rate": 1.5088159095696363e-05,
"loss": 0.2162,
"step": 270
},
{
"epoch": 1.5636105188343994,
"grad_norm": 0.4146248996257782,
"learning_rate": 1.3347406408508695e-05,
"loss": 0.1985,
"step": 275
},
{
"epoch": 1.5920398009950247,
"grad_norm": 0.27512213587760925,
"learning_rate": 1.1697777844051105e-05,
"loss": 0.2057,
"step": 280
},
{
"epoch": 1.5920398009950247,
"eval_loss": 0.20821616053581238,
"eval_runtime": 1373.7304,
"eval_samples_per_second": 1.82,
"eval_steps_per_second": 1.82,
"step": 280
},
{
"epoch": 1.6204690831556503,
"grad_norm": 0.4084846079349518,
"learning_rate": 1.0143374638853891e-05,
"loss": 0.2139,
"step": 285
},
{
"epoch": 1.6488983653162759,
"grad_norm": 0.3040494918823242,
"learning_rate": 8.688061284200266e-06,
"loss": 0.2083,
"step": 290
},
{
"epoch": 1.6773276474769012,
"grad_norm": 0.28483131527900696,
"learning_rate": 7.33545591839222e-06,
"loss": 0.185,
"step": 295
},
{
"epoch": 1.7057569296375266,
"grad_norm": 0.2855469286441803,
"learning_rate": 6.088921331488568e-06,
"loss": 0.2131,
"step": 300
},
{
"epoch": 1.7341862117981521,
"grad_norm": 0.3371677100658417,
"learning_rate": 4.951556604879048e-06,
"loss": 0.2071,
"step": 305
},
{
"epoch": 1.7626154939587777,
"grad_norm": 0.26064884662628174,
"learning_rate": 3.9261894064796135e-06,
"loss": 0.1715,
"step": 310
},
{
"epoch": 1.7910447761194028,
"grad_norm": 0.31194818019866943,
"learning_rate": 3.0153689607045845e-06,
"loss": 0.1981,
"step": 315
},
{
"epoch": 1.7910447761194028,
"eval_loss": 0.20436535775661469,
"eval_runtime": 1372.7158,
"eval_samples_per_second": 1.821,
"eval_steps_per_second": 1.821,
"step": 315
},
{
"epoch": 1.8194740582800284,
"grad_norm": 0.23213765025138855,
"learning_rate": 2.221359710692961e-06,
"loss": 0.1939,
"step": 320
},
{
"epoch": 1.847903340440654,
"grad_norm": 0.3259161412715912,
"learning_rate": 1.5461356885461075e-06,
"loss": 0.2021,
"step": 325
},
{
"epoch": 1.8763326226012793,
"grad_norm": 0.3195630609989166,
"learning_rate": 9.913756075728087e-07,
"loss": 0.1947,
"step": 330
},
{
"epoch": 1.9047619047619047,
"grad_norm": 0.26338306069374084,
"learning_rate": 5.584586887435739e-07,
"loss": 0.2069,
"step": 335
},
{
"epoch": 1.9331911869225302,
"grad_norm": 0.29805904626846313,
"learning_rate": 2.4846123172992954e-07,
"loss": 0.2079,
"step": 340
},
{
"epoch": 1.9616204690831558,
"grad_norm": 0.300195574760437,
"learning_rate": 6.215393905388278e-08,
"loss": 0.2028,
"step": 345
},
{
"epoch": 1.9900497512437811,
"grad_norm": 0.2901870906352997,
"learning_rate": 0.0,
"loss": 0.2107,
"step": 350
},
{
"epoch": 1.9900497512437811,
"eval_loss": 0.20492638647556305,
"eval_runtime": 1373.2127,
"eval_samples_per_second": 1.821,
"eval_steps_per_second": 1.821,
"step": 350
},
{
"epoch": 1.9900497512437811,
"step": 350,
"total_flos": 9.377647750828524e+18,
"train_loss": 0.14908545851707458,
"train_runtime": 36655.577,
"train_samples_per_second": 1.228,
"train_steps_per_second": 0.01
}
],
"logging_steps": 5,
"max_steps": 350,
"num_input_tokens_seen": 0,
"num_train_epochs": 2,
"save_steps": 35,
"stateful_callbacks": {
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": true
},
"attributes": {}
}
},
"total_flos": 9.377647750828524e+18,
"train_batch_size": 16,
"trial_name": null,
"trial_params": null
}