phi3-sft / trainer_state.json
LordNoah's picture
update
d6aaab2
{
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 1.0,
"eval_steps": 500,
"global_step": 922,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.010845986984815618,
"grad_norm": 5.733648324455126,
"learning_rate": 5.405405405405406e-07,
"loss": 0.9884,
"step": 10
},
{
"epoch": 0.021691973969631236,
"grad_norm": 3.143018703050177,
"learning_rate": 1.0810810810810812e-06,
"loss": 1.003,
"step": 20
},
{
"epoch": 0.03253796095444685,
"grad_norm": 1.95130960523758,
"learning_rate": 1.6216216216216219e-06,
"loss": 0.949,
"step": 30
},
{
"epoch": 0.04338394793926247,
"grad_norm": 1.309889103250474,
"learning_rate": 2.1621621621621623e-06,
"loss": 0.9141,
"step": 40
},
{
"epoch": 0.05422993492407809,
"grad_norm": 0.964663580942374,
"learning_rate": 2.702702702702703e-06,
"loss": 0.8705,
"step": 50
},
{
"epoch": 0.0650759219088937,
"grad_norm": 0.9285010836421135,
"learning_rate": 3.2432432432432437e-06,
"loss": 0.8527,
"step": 60
},
{
"epoch": 0.07592190889370933,
"grad_norm": 0.858798188360779,
"learning_rate": 3.7837837837837844e-06,
"loss": 0.8526,
"step": 70
},
{
"epoch": 0.08676789587852494,
"grad_norm": 0.9904029778258275,
"learning_rate": 4.324324324324325e-06,
"loss": 0.8533,
"step": 80
},
{
"epoch": 0.09761388286334056,
"grad_norm": 1.016007763190019,
"learning_rate": 4.864864864864866e-06,
"loss": 0.8337,
"step": 90
},
{
"epoch": 0.10845986984815618,
"grad_norm": 1.0144931809319673,
"learning_rate": 5.405405405405406e-06,
"loss": 0.8571,
"step": 100
},
{
"epoch": 0.1193058568329718,
"grad_norm": 1.0215217769302534,
"learning_rate": 5.945945945945947e-06,
"loss": 0.838,
"step": 110
},
{
"epoch": 0.1301518438177874,
"grad_norm": 0.9196298812766283,
"learning_rate": 6.486486486486487e-06,
"loss": 0.8642,
"step": 120
},
{
"epoch": 0.14099783080260303,
"grad_norm": 0.9071017283188243,
"learning_rate": 7.027027027027028e-06,
"loss": 0.8455,
"step": 130
},
{
"epoch": 0.15184381778741865,
"grad_norm": 0.8998523509417926,
"learning_rate": 7.567567567567569e-06,
"loss": 0.8173,
"step": 140
},
{
"epoch": 0.16268980477223427,
"grad_norm": 0.8967758141211395,
"learning_rate": 8.108108108108109e-06,
"loss": 0.8207,
"step": 150
},
{
"epoch": 0.1735357917570499,
"grad_norm": 0.854081117546703,
"learning_rate": 8.64864864864865e-06,
"loss": 0.826,
"step": 160
},
{
"epoch": 0.1843817787418655,
"grad_norm": 0.7791141573115371,
"learning_rate": 9.189189189189191e-06,
"loss": 0.8408,
"step": 170
},
{
"epoch": 0.19522776572668113,
"grad_norm": 0.931666128574369,
"learning_rate": 9.729729729729732e-06,
"loss": 0.8572,
"step": 180
},
{
"epoch": 0.20607375271149675,
"grad_norm": 1.0655645445027464,
"learning_rate": 9.999775878383519e-06,
"loss": 0.8449,
"step": 190
},
{
"epoch": 0.21691973969631237,
"grad_norm": 0.8552506900147497,
"learning_rate": 9.997983026003064e-06,
"loss": 0.8338,
"step": 200
},
{
"epoch": 0.227765726681128,
"grad_norm": 0.7811721176850553,
"learning_rate": 9.9943979641349e-06,
"loss": 0.8419,
"step": 210
},
{
"epoch": 0.2386117136659436,
"grad_norm": 0.9612589991038837,
"learning_rate": 9.989021978333996e-06,
"loss": 0.8447,
"step": 220
},
{
"epoch": 0.24945770065075923,
"grad_norm": 0.8713636930259827,
"learning_rate": 9.981856996356548e-06,
"loss": 0.826,
"step": 230
},
{
"epoch": 0.2603036876355748,
"grad_norm": 1.0230589449620575,
"learning_rate": 9.972905587468719e-06,
"loss": 0.8509,
"step": 240
},
{
"epoch": 0.27114967462039047,
"grad_norm": 0.9960895654836321,
"learning_rate": 9.962170961525338e-06,
"loss": 0.8278,
"step": 250
},
{
"epoch": 0.28199566160520606,
"grad_norm": 0.9708416426642364,
"learning_rate": 9.949656967818882e-06,
"loss": 0.8239,
"step": 260
},
{
"epoch": 0.2928416485900217,
"grad_norm": 0.9863073274043072,
"learning_rate": 9.935368093699171e-06,
"loss": 0.8729,
"step": 270
},
{
"epoch": 0.3036876355748373,
"grad_norm": 1.0619350079027392,
"learning_rate": 9.919309462964277e-06,
"loss": 0.8336,
"step": 280
},
{
"epoch": 0.31453362255965295,
"grad_norm": 0.9536874477902112,
"learning_rate": 9.901486834023182e-06,
"loss": 0.8371,
"step": 290
},
{
"epoch": 0.32537960954446854,
"grad_norm": 1.0060463939033413,
"learning_rate": 9.8819065978309e-06,
"loss": 0.864,
"step": 300
},
{
"epoch": 0.3362255965292842,
"grad_norm": 0.792063518682088,
"learning_rate": 9.860575775596767e-06,
"loss": 0.8313,
"step": 310
},
{
"epoch": 0.3470715835140998,
"grad_norm": 0.9546416116474393,
"learning_rate": 9.837502016266725e-06,
"loss": 0.8218,
"step": 320
},
{
"epoch": 0.3579175704989154,
"grad_norm": 0.9802614938135474,
"learning_rate": 9.812693593780515e-06,
"loss": 0.8721,
"step": 330
},
{
"epoch": 0.368763557483731,
"grad_norm": 0.854378979529178,
"learning_rate": 9.786159404104758e-06,
"loss": 0.8371,
"step": 340
},
{
"epoch": 0.3796095444685466,
"grad_norm": 0.8717109867216107,
"learning_rate": 9.757908962042968e-06,
"loss": 0.8339,
"step": 350
},
{
"epoch": 0.39045553145336226,
"grad_norm": 0.8877006786963313,
"learning_rate": 9.72795239782369e-06,
"loss": 0.8547,
"step": 360
},
{
"epoch": 0.40130151843817785,
"grad_norm": 1.0126192151398974,
"learning_rate": 9.696300453467922e-06,
"loss": 0.8438,
"step": 370
},
{
"epoch": 0.4121475054229935,
"grad_norm": 0.8577472807238208,
"learning_rate": 9.66296447893717e-06,
"loss": 0.872,
"step": 380
},
{
"epoch": 0.4229934924078091,
"grad_norm": 0.8412488678641884,
"learning_rate": 9.627956428063522e-06,
"loss": 0.8408,
"step": 390
},
{
"epoch": 0.43383947939262474,
"grad_norm": 0.7588179294196125,
"learning_rate": 9.59128885426314e-06,
"loss": 0.8451,
"step": 400
},
{
"epoch": 0.44468546637744033,
"grad_norm": 0.8703037224398377,
"learning_rate": 9.552974906034796e-06,
"loss": 0.8336,
"step": 410
},
{
"epoch": 0.455531453362256,
"grad_norm": 0.8699706833983841,
"learning_rate": 9.513028322244977e-06,
"loss": 0.8153,
"step": 420
},
{
"epoch": 0.46637744034707157,
"grad_norm": 0.847977363828918,
"learning_rate": 9.47146342720133e-06,
"loss": 0.857,
"step": 430
},
{
"epoch": 0.4772234273318872,
"grad_norm": 0.8984826481514769,
"learning_rate": 9.428295125516151e-06,
"loss": 0.8467,
"step": 440
},
{
"epoch": 0.4880694143167028,
"grad_norm": 0.8165556682574098,
"learning_rate": 9.383538896761787e-06,
"loss": 0.8311,
"step": 450
},
{
"epoch": 0.49891540130151846,
"grad_norm": 0.8007389807149831,
"learning_rate": 9.337210789919875e-06,
"loss": 0.8648,
"step": 460
},
{
"epoch": 0.5097613882863341,
"grad_norm": 0.769668675462935,
"learning_rate": 9.289327417626393e-06,
"loss": 0.8342,
"step": 470
},
{
"epoch": 0.5206073752711496,
"grad_norm": 0.9160701884545429,
"learning_rate": 9.239905950214587e-06,
"loss": 0.8509,
"step": 480
},
{
"epoch": 0.5314533622559653,
"grad_norm": 0.8467668226954682,
"learning_rate": 9.18896410955793e-06,
"loss": 0.8405,
"step": 490
},
{
"epoch": 0.5422993492407809,
"grad_norm": 0.8109237435952316,
"learning_rate": 9.136520162715288e-06,
"loss": 0.8454,
"step": 500
},
{
"epoch": 0.5422993492407809,
"eval_loss": 0.7866095304489136,
"eval_runtime": 2581.2899,
"eval_samples_per_second": 1.904,
"eval_steps_per_second": 0.476,
"step": 500
},
{
"epoch": 0.5531453362255966,
"grad_norm": 0.9798708544137009,
"learning_rate": 9.082592915380596e-06,
"loss": 0.8255,
"step": 510
},
{
"epoch": 0.5639913232104121,
"grad_norm": 0.9165811375712184,
"learning_rate": 9.027201705139406e-06,
"loss": 0.8663,
"step": 520
},
{
"epoch": 0.5748373101952278,
"grad_norm": 0.9060399071688227,
"learning_rate": 8.970366394534667e-06,
"loss": 0.8144,
"step": 530
},
{
"epoch": 0.5856832971800434,
"grad_norm": 0.8253353508928236,
"learning_rate": 8.912107363944297e-06,
"loss": 0.8129,
"step": 540
},
{
"epoch": 0.596529284164859,
"grad_norm": 0.8996220079581437,
"learning_rate": 8.852445504273056e-06,
"loss": 0.8493,
"step": 550
},
{
"epoch": 0.6073752711496746,
"grad_norm": 0.7975347083538386,
"learning_rate": 8.791402209461333e-06,
"loss": 0.8602,
"step": 560
},
{
"epoch": 0.6182212581344902,
"grad_norm": 0.7263963682022704,
"learning_rate": 8.728999368813591e-06,
"loss": 0.835,
"step": 570
},
{
"epoch": 0.6290672451193059,
"grad_norm": 0.9605105643436394,
"learning_rate": 8.665259359149132e-06,
"loss": 0.8362,
"step": 580
},
{
"epoch": 0.6399132321041214,
"grad_norm": 0.8209007974348012,
"learning_rate": 8.600205036778089e-06,
"loss": 0.8233,
"step": 590
},
{
"epoch": 0.6507592190889371,
"grad_norm": 0.911985371229915,
"learning_rate": 8.533859729305447e-06,
"loss": 0.8375,
"step": 600
},
{
"epoch": 0.6616052060737527,
"grad_norm": 0.6985325225275438,
"learning_rate": 8.466247227266091e-06,
"loss": 0.8225,
"step": 610
},
{
"epoch": 0.6724511930585684,
"grad_norm": 0.8132034730555108,
"learning_rate": 8.39739177559383e-06,
"loss": 0.836,
"step": 620
},
{
"epoch": 0.6832971800433839,
"grad_norm": 0.8360457612694335,
"learning_rate": 8.327318064927488e-06,
"loss": 0.8491,
"step": 630
},
{
"epoch": 0.6941431670281996,
"grad_norm": 0.8189142007610347,
"learning_rate": 8.256051222757188e-06,
"loss": 0.8486,
"step": 640
},
{
"epoch": 0.7049891540130152,
"grad_norm": 0.8530912616563548,
"learning_rate": 8.183616804413954e-06,
"loss": 0.8489,
"step": 650
},
{
"epoch": 0.7158351409978309,
"grad_norm": 0.9149414345864662,
"learning_rate": 8.110040783905924e-06,
"loss": 0.8244,
"step": 660
},
{
"epoch": 0.7266811279826464,
"grad_norm": 0.8342820136081186,
"learning_rate": 8.035349544604419e-06,
"loss": 0.8201,
"step": 670
},
{
"epoch": 0.737527114967462,
"grad_norm": 0.7652272869820805,
"learning_rate": 7.959569869783216e-06,
"loss": 0.8287,
"step": 680
},
{
"epoch": 0.7483731019522777,
"grad_norm": 0.8697789473135982,
"learning_rate": 7.882728933014431e-06,
"loss": 0.8565,
"step": 690
},
{
"epoch": 0.7592190889370932,
"grad_norm": 0.8289580942636415,
"learning_rate": 7.80485428842444e-06,
"loss": 0.8354,
"step": 700
},
{
"epoch": 0.7700650759219089,
"grad_norm": 0.82218666332152,
"learning_rate": 7.725973860813338e-06,
"loss": 0.8275,
"step": 710
},
{
"epoch": 0.7809110629067245,
"grad_norm": 0.8328912197470162,
"learning_rate": 7.646115935641488e-06,
"loss": 0.8554,
"step": 720
},
{
"epoch": 0.7917570498915402,
"grad_norm": 0.9144219376531081,
"learning_rate": 7.5653091488867215e-06,
"loss": 0.7935,
"step": 730
},
{
"epoch": 0.8026030368763557,
"grad_norm": 0.8432999710569549,
"learning_rate": 7.48358247677588e-06,
"loss": 0.8343,
"step": 740
},
{
"epoch": 0.8134490238611713,
"grad_norm": 0.9959358723449406,
"learning_rate": 7.400965225394316e-06,
"loss": 0.8215,
"step": 750
},
{
"epoch": 0.824295010845987,
"grad_norm": 0.7781247788376849,
"learning_rate": 7.31748702017713e-06,
"loss": 0.7865,
"step": 760
},
{
"epoch": 0.8351409978308026,
"grad_norm": 0.7268868727283686,
"learning_rate": 7.23317779528589e-06,
"loss": 0.8554,
"step": 770
},
{
"epoch": 0.8459869848156182,
"grad_norm": 0.8769959745106497,
"learning_rate": 7.14806778287464e-06,
"loss": 0.8556,
"step": 780
},
{
"epoch": 0.8568329718004338,
"grad_norm": 0.8083886562171313,
"learning_rate": 7.062187502249056e-06,
"loss": 0.8538,
"step": 790
},
{
"epoch": 0.8676789587852495,
"grad_norm": 0.8253588275102612,
"learning_rate": 6.975567748922639e-06,
"loss": 0.8483,
"step": 800
},
{
"epoch": 0.8785249457700651,
"grad_norm": 0.8419247557676373,
"learning_rate": 6.888239583573852e-06,
"loss": 0.8383,
"step": 810
},
{
"epoch": 0.8893709327548807,
"grad_norm": 0.8261807774132319,
"learning_rate": 6.8002343209081766e-06,
"loss": 0.8344,
"step": 820
},
{
"epoch": 0.9002169197396963,
"grad_norm": 0.9081092978343738,
"learning_rate": 6.711583518429093e-06,
"loss": 0.8614,
"step": 830
},
{
"epoch": 0.911062906724512,
"grad_norm": 0.8081110590736196,
"learning_rate": 6.622318965121972e-06,
"loss": 0.8283,
"step": 840
},
{
"epoch": 0.9219088937093276,
"grad_norm": 0.8961074992740756,
"learning_rate": 6.532472670054975e-06,
"loss": 0.8555,
"step": 850
},
{
"epoch": 0.9327548806941431,
"grad_norm": 0.855697485520701,
"learning_rate": 6.442076850901033e-06,
"loss": 0.805,
"step": 860
},
{
"epoch": 0.9436008676789588,
"grad_norm": 0.9715823055879019,
"learning_rate": 6.351163922385026e-06,
"loss": 0.8746,
"step": 870
},
{
"epoch": 0.9544468546637744,
"grad_norm": 0.8558421168141579,
"learning_rate": 6.259766484660297e-06,
"loss": 0.8194,
"step": 880
},
{
"epoch": 0.96529284164859,
"grad_norm": 0.9307662219253259,
"learning_rate": 6.1679173116186674e-06,
"loss": 0.8234,
"step": 890
},
{
"epoch": 0.9761388286334056,
"grad_norm": 0.8797281549707557,
"learning_rate": 6.075649339138174e-06,
"loss": 0.8336,
"step": 900
},
{
"epoch": 0.9869848156182213,
"grad_norm": 0.7892356935050042,
"learning_rate": 5.982995653272699e-06,
"loss": 0.8471,
"step": 910
},
{
"epoch": 0.9978308026030369,
"grad_norm": 0.7635682452713507,
"learning_rate": 5.8899894783877536e-06,
"loss": 0.8248,
"step": 920
}
],
"logging_steps": 10,
"max_steps": 1844,
"num_input_tokens_seen": 0,
"num_train_epochs": 2,
"save_steps": 500,
"stateful_callbacks": {
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": false
},
"attributes": {}
}
},
"total_flos": 49624320344064.0,
"train_batch_size": 6,
"trial_name": null,
"trial_params": null
}