phi3-7b-chess-beta / last-checkpoint /trainer_state.json
bhuvanmdev's picture
Training in progress, step 700, checkpoint
b0306fd verified
raw
history blame
15.4 kB
{
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 0.2504472271914132,
"eval_steps": 500,
"global_step": 700,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.0035778175313059034,
"grad_norm": 0.5825825929641724,
"learning_rate": 0.00019928443649373882,
"loss": 1.248,
"num_input_tokens_seen": 6646,
"step": 10
},
{
"epoch": 0.007155635062611807,
"grad_norm": 0.5380188822746277,
"learning_rate": 0.00019856887298747765,
"loss": 0.5478,
"num_input_tokens_seen": 13063,
"step": 20
},
{
"epoch": 0.01073345259391771,
"grad_norm": 0.3872911036014557,
"learning_rate": 0.00019785330948121648,
"loss": 0.5135,
"num_input_tokens_seen": 19512,
"step": 30
},
{
"epoch": 0.014311270125223614,
"grad_norm": 0.4991438686847687,
"learning_rate": 0.0001971377459749553,
"loss": 0.5092,
"num_input_tokens_seen": 26884,
"step": 40
},
{
"epoch": 0.017889087656529516,
"grad_norm": 0.6744784116744995,
"learning_rate": 0.0001964221824686941,
"loss": 0.4799,
"num_input_tokens_seen": 34831,
"step": 50
},
{
"epoch": 0.02146690518783542,
"grad_norm": 0.5413841009140015,
"learning_rate": 0.00019570661896243293,
"loss": 0.4738,
"num_input_tokens_seen": 40074,
"step": 60
},
{
"epoch": 0.025044722719141325,
"grad_norm": 0.33517029881477356,
"learning_rate": 0.00019499105545617174,
"loss": 0.4907,
"num_input_tokens_seen": 47194,
"step": 70
},
{
"epoch": 0.028622540250447227,
"grad_norm": 0.34275758266448975,
"learning_rate": 0.00019427549194991057,
"loss": 0.4642,
"num_input_tokens_seen": 53439,
"step": 80
},
{
"epoch": 0.03220035778175313,
"grad_norm": 0.4074145257472992,
"learning_rate": 0.00019355992844364938,
"loss": 0.4431,
"num_input_tokens_seen": 59366,
"step": 90
},
{
"epoch": 0.03577817531305903,
"grad_norm": 0.372760534286499,
"learning_rate": 0.0001928443649373882,
"loss": 0.4824,
"num_input_tokens_seen": 66414,
"step": 100
},
{
"epoch": 0.03935599284436494,
"grad_norm": 0.35169002413749695,
"learning_rate": 0.00019212880143112702,
"loss": 0.4863,
"num_input_tokens_seen": 73451,
"step": 110
},
{
"epoch": 0.04293381037567084,
"grad_norm": 0.4088020324707031,
"learning_rate": 0.00019141323792486585,
"loss": 0.4792,
"num_input_tokens_seen": 81934,
"step": 120
},
{
"epoch": 0.046511627906976744,
"grad_norm": 0.40062326192855835,
"learning_rate": 0.00019069767441860466,
"loss": 0.4607,
"num_input_tokens_seen": 88335,
"step": 130
},
{
"epoch": 0.05008944543828265,
"grad_norm": 0.5044320225715637,
"learning_rate": 0.0001899821109123435,
"loss": 0.456,
"num_input_tokens_seen": 96192,
"step": 140
},
{
"epoch": 0.05366726296958855,
"grad_norm": 0.4566495418548584,
"learning_rate": 0.0001892665474060823,
"loss": 0.429,
"num_input_tokens_seen": 101609,
"step": 150
},
{
"epoch": 0.057245080500894455,
"grad_norm": 0.4657338559627533,
"learning_rate": 0.0001885509838998211,
"loss": 0.4445,
"num_input_tokens_seen": 107467,
"step": 160
},
{
"epoch": 0.06082289803220036,
"grad_norm": 0.5721924304962158,
"learning_rate": 0.00018783542039355994,
"loss": 0.4304,
"num_input_tokens_seen": 113612,
"step": 170
},
{
"epoch": 0.06440071556350627,
"grad_norm": 0.2883516848087311,
"learning_rate": 0.00018711985688729877,
"loss": 0.4525,
"num_input_tokens_seen": 121416,
"step": 180
},
{
"epoch": 0.06797853309481217,
"grad_norm": 0.5061659216880798,
"learning_rate": 0.00018640429338103758,
"loss": 0.4439,
"num_input_tokens_seen": 128284,
"step": 190
},
{
"epoch": 0.07155635062611806,
"grad_norm": 0.3323754072189331,
"learning_rate": 0.00018568872987477638,
"loss": 0.4489,
"num_input_tokens_seen": 135571,
"step": 200
},
{
"epoch": 0.07513416815742398,
"grad_norm": 0.5354058742523193,
"learning_rate": 0.00018497316636851522,
"loss": 0.4634,
"num_input_tokens_seen": 141479,
"step": 210
},
{
"epoch": 0.07871198568872988,
"grad_norm": 0.4328760504722595,
"learning_rate": 0.00018425760286225405,
"loss": 0.4545,
"num_input_tokens_seen": 147677,
"step": 220
},
{
"epoch": 0.08228980322003578,
"grad_norm": 0.28675127029418945,
"learning_rate": 0.00018354203935599286,
"loss": 0.4814,
"num_input_tokens_seen": 154847,
"step": 230
},
{
"epoch": 0.08586762075134168,
"grad_norm": 0.31572216749191284,
"learning_rate": 0.00018282647584973166,
"loss": 0.446,
"num_input_tokens_seen": 162267,
"step": 240
},
{
"epoch": 0.08944543828264759,
"grad_norm": 0.360166996717453,
"learning_rate": 0.0001821109123434705,
"loss": 0.4549,
"num_input_tokens_seen": 168817,
"step": 250
},
{
"epoch": 0.09302325581395349,
"grad_norm": 0.342385470867157,
"learning_rate": 0.0001813953488372093,
"loss": 0.4297,
"num_input_tokens_seen": 174828,
"step": 260
},
{
"epoch": 0.09660107334525939,
"grad_norm": 0.37481924891471863,
"learning_rate": 0.00018067978533094814,
"loss": 0.4314,
"num_input_tokens_seen": 181578,
"step": 270
},
{
"epoch": 0.1001788908765653,
"grad_norm": 0.28545519709587097,
"learning_rate": 0.00017996422182468694,
"loss": 0.4332,
"num_input_tokens_seen": 187842,
"step": 280
},
{
"epoch": 0.1037567084078712,
"grad_norm": 0.38877248764038086,
"learning_rate": 0.00017924865831842578,
"loss": 0.4248,
"num_input_tokens_seen": 194651,
"step": 290
},
{
"epoch": 0.1073345259391771,
"grad_norm": 0.30087631940841675,
"learning_rate": 0.00017853309481216458,
"loss": 0.4405,
"num_input_tokens_seen": 202203,
"step": 300
},
{
"epoch": 0.11091234347048301,
"grad_norm": 0.33470776677131653,
"learning_rate": 0.00017781753130590342,
"loss": 0.4485,
"num_input_tokens_seen": 210364,
"step": 310
},
{
"epoch": 0.11449016100178891,
"grad_norm": 0.44205668568611145,
"learning_rate": 0.00017710196779964222,
"loss": 0.4431,
"num_input_tokens_seen": 217965,
"step": 320
},
{
"epoch": 0.11806797853309481,
"grad_norm": 0.39270082116127014,
"learning_rate": 0.00017638640429338106,
"loss": 0.4475,
"num_input_tokens_seen": 224249,
"step": 330
},
{
"epoch": 0.12164579606440072,
"grad_norm": 0.37138304114341736,
"learning_rate": 0.00017567084078711986,
"loss": 0.4139,
"num_input_tokens_seen": 230513,
"step": 340
},
{
"epoch": 0.1252236135957066,
"grad_norm": 0.32019296288490295,
"learning_rate": 0.00017495527728085867,
"loss": 0.4488,
"num_input_tokens_seen": 237103,
"step": 350
},
{
"epoch": 0.12880143112701253,
"grad_norm": 0.42748796939849854,
"learning_rate": 0.0001742397137745975,
"loss": 0.4059,
"num_input_tokens_seen": 242530,
"step": 360
},
{
"epoch": 0.13237924865831843,
"grad_norm": 0.39997342228889465,
"learning_rate": 0.00017352415026833634,
"loss": 0.4069,
"num_input_tokens_seen": 247524,
"step": 370
},
{
"epoch": 0.13595706618962433,
"grad_norm": 0.45402902364730835,
"learning_rate": 0.00017280858676207514,
"loss": 0.4354,
"num_input_tokens_seen": 252526,
"step": 380
},
{
"epoch": 0.13953488372093023,
"grad_norm": 0.500701904296875,
"learning_rate": 0.00017209302325581395,
"loss": 0.4421,
"num_input_tokens_seen": 258974,
"step": 390
},
{
"epoch": 0.14311270125223613,
"grad_norm": 0.2604714035987854,
"learning_rate": 0.00017137745974955278,
"loss": 0.4338,
"num_input_tokens_seen": 265411,
"step": 400
},
{
"epoch": 0.14669051878354203,
"grad_norm": 0.5467566251754761,
"learning_rate": 0.00017066189624329162,
"loss": 0.4336,
"num_input_tokens_seen": 271069,
"step": 410
},
{
"epoch": 0.15026833631484796,
"grad_norm": 0.4278429448604584,
"learning_rate": 0.00016994633273703042,
"loss": 0.4384,
"num_input_tokens_seen": 277913,
"step": 420
},
{
"epoch": 0.15384615384615385,
"grad_norm": 0.38349634408950806,
"learning_rate": 0.00016923076923076923,
"loss": 0.445,
"num_input_tokens_seen": 285726,
"step": 430
},
{
"epoch": 0.15742397137745975,
"grad_norm": 0.298753559589386,
"learning_rate": 0.00016851520572450806,
"loss": 0.411,
"num_input_tokens_seen": 293886,
"step": 440
},
{
"epoch": 0.16100178890876565,
"grad_norm": 0.4590730667114258,
"learning_rate": 0.00016779964221824687,
"loss": 0.4441,
"num_input_tokens_seen": 300933,
"step": 450
},
{
"epoch": 0.16457960644007155,
"grad_norm": 0.25614652037620544,
"learning_rate": 0.0001670840787119857,
"loss": 0.4201,
"num_input_tokens_seen": 306782,
"step": 460
},
{
"epoch": 0.16815742397137745,
"grad_norm": 0.3007524013519287,
"learning_rate": 0.0001663685152057245,
"loss": 0.444,
"num_input_tokens_seen": 314243,
"step": 470
},
{
"epoch": 0.17173524150268335,
"grad_norm": 0.4292968809604645,
"learning_rate": 0.00016565295169946334,
"loss": 0.4089,
"num_input_tokens_seen": 319432,
"step": 480
},
{
"epoch": 0.17531305903398928,
"grad_norm": 0.31139102578163147,
"learning_rate": 0.00016493738819320215,
"loss": 0.4244,
"num_input_tokens_seen": 325211,
"step": 490
},
{
"epoch": 0.17889087656529518,
"grad_norm": 0.34276363253593445,
"learning_rate": 0.00016422182468694098,
"loss": 0.4133,
"num_input_tokens_seen": 332100,
"step": 500
},
{
"epoch": 0.18246869409660108,
"grad_norm": 0.39435234665870667,
"learning_rate": 0.0001635062611806798,
"loss": 0.4428,
"num_input_tokens_seen": 337848,
"step": 510
},
{
"epoch": 0.18604651162790697,
"grad_norm": 0.2763209640979767,
"learning_rate": 0.00016279069767441862,
"loss": 0.4464,
"num_input_tokens_seen": 345759,
"step": 520
},
{
"epoch": 0.18962432915921287,
"grad_norm": 0.2753828763961792,
"learning_rate": 0.00016207513416815743,
"loss": 0.438,
"num_input_tokens_seen": 354078,
"step": 530
},
{
"epoch": 0.19320214669051877,
"grad_norm": 0.3228139579296112,
"learning_rate": 0.00016135957066189623,
"loss": 0.4314,
"num_input_tokens_seen": 360961,
"step": 540
},
{
"epoch": 0.1967799642218247,
"grad_norm": 0.30050089955329895,
"learning_rate": 0.00016064400715563507,
"loss": 0.4364,
"num_input_tokens_seen": 367083,
"step": 550
},
{
"epoch": 0.2003577817531306,
"grad_norm": 0.3418981432914734,
"learning_rate": 0.0001599284436493739,
"loss": 0.4329,
"num_input_tokens_seen": 373196,
"step": 560
},
{
"epoch": 0.2039355992844365,
"grad_norm": 0.36333030462265015,
"learning_rate": 0.0001592128801431127,
"loss": 0.4482,
"num_input_tokens_seen": 380449,
"step": 570
},
{
"epoch": 0.2075134168157424,
"grad_norm": 0.2979726195335388,
"learning_rate": 0.00015849731663685151,
"loss": 0.4258,
"num_input_tokens_seen": 386577,
"step": 580
},
{
"epoch": 0.2110912343470483,
"grad_norm": 0.2969113886356354,
"learning_rate": 0.00015778175313059035,
"loss": 0.433,
"num_input_tokens_seen": 392953,
"step": 590
},
{
"epoch": 0.2146690518783542,
"grad_norm": 0.4132014811038971,
"learning_rate": 0.00015706618962432918,
"loss": 0.4148,
"num_input_tokens_seen": 399368,
"step": 600
},
{
"epoch": 0.2182468694096601,
"grad_norm": 0.35863760113716125,
"learning_rate": 0.000156350626118068,
"loss": 0.4105,
"num_input_tokens_seen": 407343,
"step": 610
},
{
"epoch": 0.22182468694096602,
"grad_norm": 0.287056028842926,
"learning_rate": 0.0001556350626118068,
"loss": 0.4495,
"num_input_tokens_seen": 413867,
"step": 620
},
{
"epoch": 0.22540250447227192,
"grad_norm": 0.41710999608039856,
"learning_rate": 0.00015491949910554563,
"loss": 0.426,
"num_input_tokens_seen": 422712,
"step": 630
},
{
"epoch": 0.22898032200357782,
"grad_norm": 0.42847341299057007,
"learning_rate": 0.00015420393559928446,
"loss": 0.4163,
"num_input_tokens_seen": 428523,
"step": 640
},
{
"epoch": 0.23255813953488372,
"grad_norm": 0.35523882508277893,
"learning_rate": 0.00015348837209302327,
"loss": 0.4237,
"num_input_tokens_seen": 436283,
"step": 650
},
{
"epoch": 0.23613595706618962,
"grad_norm": 0.32238948345184326,
"learning_rate": 0.00015277280858676207,
"loss": 0.4527,
"num_input_tokens_seen": 444368,
"step": 660
},
{
"epoch": 0.23971377459749552,
"grad_norm": 0.3075689375400543,
"learning_rate": 0.0001520572450805009,
"loss": 0.4057,
"num_input_tokens_seen": 449919,
"step": 670
},
{
"epoch": 0.24329159212880144,
"grad_norm": 0.3819845914840698,
"learning_rate": 0.00015134168157423971,
"loss": 0.3951,
"num_input_tokens_seen": 455612,
"step": 680
},
{
"epoch": 0.24686940966010734,
"grad_norm": 0.3687816560268402,
"learning_rate": 0.00015062611806797855,
"loss": 0.4176,
"num_input_tokens_seen": 461846,
"step": 690
},
{
"epoch": 0.2504472271914132,
"grad_norm": 0.32070156931877136,
"learning_rate": 0.00014991055456171735,
"loss": 0.4111,
"num_input_tokens_seen": 468333,
"step": 700
}
],
"logging_steps": 10,
"max_steps": 2795,
"num_input_tokens_seen": 468333,
"num_train_epochs": 1,
"save_steps": 20,
"total_flos": 1.0531155302590464e+16,
"train_batch_size": 1,
"trial_name": null,
"trial_params": null
}