tinyllama-1.1b-mt-sft-qlora / trainer_state.json
martimfasantos's picture
Model save
7aefddb verified
{
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 2.0,
"eval_steps": 500,
"global_step": 1478,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.0,
"grad_norm": 1.03125,
"learning_rate": 1.3513513513513515e-06,
"loss": 2.2722,
"step": 1
},
{
"epoch": 0.01,
"grad_norm": 1.140625,
"learning_rate": 6.7567567567567575e-06,
"loss": 2.2825,
"step": 5
},
{
"epoch": 0.01,
"grad_norm": 0.953125,
"learning_rate": 1.3513513513513515e-05,
"loss": 2.3023,
"step": 10
},
{
"epoch": 0.02,
"grad_norm": 0.73828125,
"learning_rate": 2.0270270270270273e-05,
"loss": 2.3044,
"step": 15
},
{
"epoch": 0.03,
"grad_norm": 0.439453125,
"learning_rate": 2.702702702702703e-05,
"loss": 2.2383,
"step": 20
},
{
"epoch": 0.03,
"grad_norm": 0.326171875,
"learning_rate": 3.3783783783783784e-05,
"loss": 2.2166,
"step": 25
},
{
"epoch": 0.04,
"grad_norm": 0.302734375,
"learning_rate": 4.0540540540540545e-05,
"loss": 2.1633,
"step": 30
},
{
"epoch": 0.05,
"grad_norm": 0.265625,
"learning_rate": 4.72972972972973e-05,
"loss": 2.1206,
"step": 35
},
{
"epoch": 0.05,
"grad_norm": 0.25,
"learning_rate": 5.405405405405406e-05,
"loss": 2.1314,
"step": 40
},
{
"epoch": 0.06,
"grad_norm": 0.2412109375,
"learning_rate": 6.0810810810810814e-05,
"loss": 2.0912,
"step": 45
},
{
"epoch": 0.07,
"grad_norm": 0.265625,
"learning_rate": 6.756756756756757e-05,
"loss": 2.0372,
"step": 50
},
{
"epoch": 0.07,
"grad_norm": 0.283203125,
"learning_rate": 7.432432432432433e-05,
"loss": 1.9804,
"step": 55
},
{
"epoch": 0.08,
"grad_norm": 0.6171875,
"learning_rate": 8.108108108108109e-05,
"loss": 1.9327,
"step": 60
},
{
"epoch": 0.09,
"grad_norm": 0.2119140625,
"learning_rate": 8.783783783783784e-05,
"loss": 1.889,
"step": 65
},
{
"epoch": 0.09,
"grad_norm": 0.2119140625,
"learning_rate": 9.45945945945946e-05,
"loss": 1.8393,
"step": 70
},
{
"epoch": 0.1,
"grad_norm": 0.2216796875,
"learning_rate": 0.00010135135135135136,
"loss": 1.8477,
"step": 75
},
{
"epoch": 0.11,
"grad_norm": 0.2138671875,
"learning_rate": 0.00010810810810810812,
"loss": 1.839,
"step": 80
},
{
"epoch": 0.12,
"grad_norm": 0.197265625,
"learning_rate": 0.00011486486486486487,
"loss": 1.8043,
"step": 85
},
{
"epoch": 0.12,
"grad_norm": 0.197265625,
"learning_rate": 0.00012162162162162163,
"loss": 1.8177,
"step": 90
},
{
"epoch": 0.13,
"grad_norm": 0.201171875,
"learning_rate": 0.0001283783783783784,
"loss": 1.7922,
"step": 95
},
{
"epoch": 0.14,
"grad_norm": 0.1962890625,
"learning_rate": 0.00013513513513513514,
"loss": 1.791,
"step": 100
},
{
"epoch": 0.14,
"grad_norm": 0.2001953125,
"learning_rate": 0.00014189189189189188,
"loss": 1.7794,
"step": 105
},
{
"epoch": 0.15,
"grad_norm": 0.203125,
"learning_rate": 0.00014864864864864866,
"loss": 1.7939,
"step": 110
},
{
"epoch": 0.16,
"grad_norm": 0.2041015625,
"learning_rate": 0.0001554054054054054,
"loss": 1.7654,
"step": 115
},
{
"epoch": 0.16,
"grad_norm": 0.205078125,
"learning_rate": 0.00016216216216216218,
"loss": 1.7436,
"step": 120
},
{
"epoch": 0.17,
"grad_norm": 0.2158203125,
"learning_rate": 0.00016891891891891893,
"loss": 1.7431,
"step": 125
},
{
"epoch": 0.18,
"grad_norm": 0.2080078125,
"learning_rate": 0.00017567567567567568,
"loss": 1.7665,
"step": 130
},
{
"epoch": 0.18,
"grad_norm": 0.22265625,
"learning_rate": 0.00018243243243243245,
"loss": 1.7625,
"step": 135
},
{
"epoch": 0.19,
"grad_norm": 0.220703125,
"learning_rate": 0.0001891891891891892,
"loss": 1.7485,
"step": 140
},
{
"epoch": 0.2,
"grad_norm": 0.220703125,
"learning_rate": 0.00019594594594594594,
"loss": 1.7564,
"step": 145
},
{
"epoch": 0.2,
"grad_norm": 0.220703125,
"learning_rate": 0.00019999888409903948,
"loss": 1.76,
"step": 150
},
{
"epoch": 0.21,
"grad_norm": 0.224609375,
"learning_rate": 0.0001999863304992469,
"loss": 1.7365,
"step": 155
},
{
"epoch": 0.22,
"grad_norm": 0.23046875,
"learning_rate": 0.00019995983018035278,
"loss": 1.7291,
"step": 160
},
{
"epoch": 0.22,
"grad_norm": 0.22265625,
"learning_rate": 0.00019991938683878746,
"loss": 1.7449,
"step": 165
},
{
"epoch": 0.23,
"grad_norm": 0.2177734375,
"learning_rate": 0.0001998650061158413,
"loss": 1.7379,
"step": 170
},
{
"epoch": 0.24,
"grad_norm": 0.208984375,
"learning_rate": 0.0001997966955968779,
"loss": 1.7308,
"step": 175
},
{
"epoch": 0.24,
"grad_norm": 0.234375,
"learning_rate": 0.00019971446481027591,
"loss": 1.7395,
"step": 180
},
{
"epoch": 0.25,
"grad_norm": 0.216796875,
"learning_rate": 0.00019961832522610005,
"loss": 1.7533,
"step": 185
},
{
"epoch": 0.26,
"grad_norm": 0.2255859375,
"learning_rate": 0.00019950829025450114,
"loss": 1.7004,
"step": 190
},
{
"epoch": 0.26,
"grad_norm": 0.216796875,
"learning_rate": 0.0001993843752438457,
"loss": 1.7046,
"step": 195
},
{
"epoch": 0.27,
"grad_norm": 0.22265625,
"learning_rate": 0.00019924659747857484,
"loss": 1.7147,
"step": 200
},
{
"epoch": 0.28,
"grad_norm": 0.2314453125,
"learning_rate": 0.00019909497617679348,
"loss": 1.7213,
"step": 205
},
{
"epoch": 0.28,
"grad_norm": 0.2265625,
"learning_rate": 0.00019892953248758967,
"loss": 1.7334,
"step": 210
},
{
"epoch": 0.29,
"grad_norm": 0.212890625,
"learning_rate": 0.00019875028948808455,
"loss": 1.7004,
"step": 215
},
{
"epoch": 0.3,
"grad_norm": 0.2197265625,
"learning_rate": 0.0001985572721802134,
"loss": 1.7027,
"step": 220
},
{
"epoch": 0.3,
"grad_norm": 0.234375,
"learning_rate": 0.00019835050748723824,
"loss": 1.7132,
"step": 225
},
{
"epoch": 0.31,
"grad_norm": 0.2158203125,
"learning_rate": 0.0001981300242499924,
"loss": 1.7003,
"step": 230
},
{
"epoch": 0.32,
"grad_norm": 0.22265625,
"learning_rate": 0.0001978958532228576,
"loss": 1.6947,
"step": 235
},
{
"epoch": 0.32,
"grad_norm": 0.2353515625,
"learning_rate": 0.00019764802706947421,
"loss": 1.7206,
"step": 240
},
{
"epoch": 0.33,
"grad_norm": 0.2275390625,
"learning_rate": 0.00019738658035818495,
"loss": 1.7172,
"step": 245
},
{
"epoch": 0.34,
"grad_norm": 0.216796875,
"learning_rate": 0.00019711154955721335,
"loss": 1.7092,
"step": 250
},
{
"epoch": 0.35,
"grad_norm": 0.240234375,
"learning_rate": 0.00019682297302957665,
"loss": 1.6991,
"step": 255
},
{
"epoch": 0.35,
"grad_norm": 0.2265625,
"learning_rate": 0.00019652089102773488,
"loss": 1.6896,
"step": 260
},
{
"epoch": 0.36,
"grad_norm": 0.2158203125,
"learning_rate": 0.00019620534568797607,
"loss": 1.7012,
"step": 265
},
{
"epoch": 0.37,
"grad_norm": 0.2177734375,
"learning_rate": 0.000195876381024539,
"loss": 1.6948,
"step": 270
},
{
"epoch": 0.37,
"grad_norm": 0.23828125,
"learning_rate": 0.00019553404292347356,
"loss": 1.7095,
"step": 275
},
{
"epoch": 0.38,
"grad_norm": 0.23046875,
"learning_rate": 0.00019517837913624048,
"loss": 1.7052,
"step": 280
},
{
"epoch": 0.39,
"grad_norm": 0.2236328125,
"learning_rate": 0.00019480943927305058,
"loss": 1.6961,
"step": 285
},
{
"epoch": 0.39,
"grad_norm": 0.2177734375,
"learning_rate": 0.00019442727479594484,
"loss": 1.7012,
"step": 290
},
{
"epoch": 0.4,
"grad_norm": 0.21484375,
"learning_rate": 0.00019403193901161613,
"loss": 1.6753,
"step": 295
},
{
"epoch": 0.41,
"grad_norm": 0.2314453125,
"learning_rate": 0.00019362348706397373,
"loss": 1.7024,
"step": 300
},
{
"epoch": 0.41,
"grad_norm": 0.220703125,
"learning_rate": 0.00019320197592645138,
"loss": 1.6797,
"step": 305
},
{
"epoch": 0.42,
"grad_norm": 0.2314453125,
"learning_rate": 0.00019276746439406047,
"loss": 1.6716,
"step": 310
},
{
"epoch": 0.43,
"grad_norm": 0.236328125,
"learning_rate": 0.00019232001307518867,
"loss": 1.6757,
"step": 315
},
{
"epoch": 0.43,
"grad_norm": 0.2275390625,
"learning_rate": 0.00019185968438314616,
"loss": 1.6809,
"step": 320
},
{
"epoch": 0.44,
"grad_norm": 0.2353515625,
"learning_rate": 0.00019138654252745968,
"loss": 1.6766,
"step": 325
},
{
"epoch": 0.45,
"grad_norm": 0.23828125,
"learning_rate": 0.00019090065350491626,
"loss": 1.6687,
"step": 330
},
{
"epoch": 0.45,
"grad_norm": 0.2294921875,
"learning_rate": 0.00019040208509035745,
"loss": 1.6548,
"step": 335
},
{
"epoch": 0.46,
"grad_norm": 0.2314453125,
"learning_rate": 0.00018989090682722585,
"loss": 1.6639,
"step": 340
},
{
"epoch": 0.47,
"grad_norm": 0.220703125,
"learning_rate": 0.00018936719001786453,
"loss": 1.6849,
"step": 345
},
{
"epoch": 0.47,
"grad_norm": 0.23046875,
"learning_rate": 0.00018883100771357157,
"loss": 1.6981,
"step": 350
},
{
"epoch": 0.48,
"grad_norm": 0.2236328125,
"learning_rate": 0.00018828243470441028,
"loss": 1.6958,
"step": 355
},
{
"epoch": 0.49,
"grad_norm": 0.2177734375,
"learning_rate": 0.00018772154750877696,
"loss": 1.6754,
"step": 360
},
{
"epoch": 0.49,
"grad_norm": 0.2275390625,
"learning_rate": 0.00018714842436272773,
"loss": 1.6649,
"step": 365
},
{
"epoch": 0.5,
"grad_norm": 0.228515625,
"learning_rate": 0.00018656314520906571,
"loss": 1.6376,
"step": 370
},
{
"epoch": 0.51,
"grad_norm": 0.244140625,
"learning_rate": 0.0001859657916861899,
"loss": 1.6803,
"step": 375
},
{
"epoch": 0.51,
"grad_norm": 0.224609375,
"learning_rate": 0.00018535644711670804,
"loss": 1.6787,
"step": 380
},
{
"epoch": 0.52,
"grad_norm": 0.22265625,
"learning_rate": 0.00018473519649581398,
"loss": 1.6717,
"step": 385
},
{
"epoch": 0.53,
"grad_norm": 0.240234375,
"learning_rate": 0.00018410212647943216,
"loss": 1.6846,
"step": 390
},
{
"epoch": 0.53,
"grad_norm": 0.2216796875,
"learning_rate": 0.00018345732537213027,
"loss": 1.6464,
"step": 395
},
{
"epoch": 0.54,
"grad_norm": 0.2353515625,
"learning_rate": 0.00018280088311480201,
"loss": 1.6409,
"step": 400
},
{
"epoch": 0.55,
"grad_norm": 0.234375,
"learning_rate": 0.0001821328912721215,
"loss": 1.6639,
"step": 405
},
{
"epoch": 0.55,
"grad_norm": 0.2265625,
"learning_rate": 0.00018145344301977127,
"loss": 1.6596,
"step": 410
},
{
"epoch": 0.56,
"grad_norm": 0.23046875,
"learning_rate": 0.00018076263313144568,
"loss": 1.6841,
"step": 415
},
{
"epoch": 0.57,
"grad_norm": 0.2275390625,
"learning_rate": 0.00018006055796563104,
"loss": 1.6552,
"step": 420
},
{
"epoch": 0.58,
"grad_norm": 0.23046875,
"learning_rate": 0.00017934731545216515,
"loss": 1.6723,
"step": 425
},
{
"epoch": 0.58,
"grad_norm": 0.22265625,
"learning_rate": 0.00017862300507857733,
"loss": 1.6813,
"step": 430
},
{
"epoch": 0.59,
"grad_norm": 0.220703125,
"learning_rate": 0.00017788772787621126,
"loss": 1.6615,
"step": 435
},
{
"epoch": 0.6,
"grad_norm": 0.255859375,
"learning_rate": 0.0001771415864061326,
"loss": 1.6444,
"step": 440
},
{
"epoch": 0.6,
"grad_norm": 0.236328125,
"learning_rate": 0.00017638468474482296,
"loss": 1.6697,
"step": 445
},
{
"epoch": 0.61,
"grad_norm": 0.234375,
"learning_rate": 0.0001756171284696629,
"loss": 1.6725,
"step": 450
},
{
"epoch": 0.62,
"grad_norm": 0.236328125,
"learning_rate": 0.00017483902464420506,
"loss": 1.6637,
"step": 455
},
{
"epoch": 0.62,
"grad_norm": 0.2392578125,
"learning_rate": 0.00017405048180324043,
"loss": 1.6495,
"step": 460
},
{
"epoch": 0.63,
"grad_norm": 0.234375,
"learning_rate": 0.00017325160993765932,
"loss": 1.6688,
"step": 465
},
{
"epoch": 0.64,
"grad_norm": 0.234375,
"learning_rate": 0.00017244252047910892,
"loss": 1.6561,
"step": 470
},
{
"epoch": 0.64,
"grad_norm": 0.2333984375,
"learning_rate": 0.0001716233262844502,
"loss": 1.6558,
"step": 475
},
{
"epoch": 0.65,
"grad_norm": 0.2373046875,
"learning_rate": 0.00017079414162001613,
"loss": 1.6518,
"step": 480
},
{
"epoch": 0.66,
"grad_norm": 0.2333984375,
"learning_rate": 0.00016995508214567276,
"loss": 1.6415,
"step": 485
},
{
"epoch": 0.66,
"grad_norm": 0.224609375,
"learning_rate": 0.00016910626489868649,
"loss": 1.6574,
"step": 490
},
{
"epoch": 0.67,
"grad_norm": 0.248046875,
"learning_rate": 0.00016824780827739887,
"loss": 1.6581,
"step": 495
},
{
"epoch": 0.68,
"grad_norm": 0.2333984375,
"learning_rate": 0.0001673798320247118,
"loss": 1.6623,
"step": 500
},
{
"epoch": 0.68,
"grad_norm": 0.232421875,
"learning_rate": 0.0001665024572113848,
"loss": 1.6696,
"step": 505
},
{
"epoch": 0.69,
"grad_norm": 0.228515625,
"learning_rate": 0.00016561580621914765,
"loss": 1.6404,
"step": 510
},
{
"epoch": 0.7,
"grad_norm": 0.2353515625,
"learning_rate": 0.00016472000272362935,
"loss": 1.667,
"step": 515
},
{
"epoch": 0.7,
"grad_norm": 0.240234375,
"learning_rate": 0.00016381517167710755,
"loss": 1.6439,
"step": 520
},
{
"epoch": 0.71,
"grad_norm": 0.23828125,
"learning_rate": 0.00016290143929107912,
"loss": 1.6346,
"step": 525
},
{
"epoch": 0.72,
"grad_norm": 0.2353515625,
"learning_rate": 0.00016197893301865549,
"loss": 1.6616,
"step": 530
},
{
"epoch": 0.72,
"grad_norm": 0.2470703125,
"learning_rate": 0.00016104778153678466,
"loss": 1.651,
"step": 535
},
{
"epoch": 0.73,
"grad_norm": 0.2421875,
"learning_rate": 0.00016010811472830252,
"loss": 1.629,
"step": 540
},
{
"epoch": 0.74,
"grad_norm": 0.234375,
"learning_rate": 0.0001591600636638161,
"loss": 1.6497,
"step": 545
},
{
"epoch": 0.74,
"grad_norm": 0.2353515625,
"learning_rate": 0.00015820376058342078,
"loss": 1.6533,
"step": 550
},
{
"epoch": 0.75,
"grad_norm": 0.2294921875,
"learning_rate": 0.00015723933887825493,
"loss": 1.6424,
"step": 555
},
{
"epoch": 0.76,
"grad_norm": 0.2470703125,
"learning_rate": 0.00015626693307189336,
"loss": 1.6379,
"step": 560
},
{
"epoch": 0.76,
"grad_norm": 0.234375,
"learning_rate": 0.0001552866788015834,
"loss": 1.6344,
"step": 565
},
{
"epoch": 0.77,
"grad_norm": 0.2373046875,
"learning_rate": 0.00015429871279932513,
"loss": 1.6492,
"step": 570
},
{
"epoch": 0.78,
"grad_norm": 0.2333984375,
"learning_rate": 0.0001533031728727994,
"loss": 1.6245,
"step": 575
},
{
"epoch": 0.78,
"grad_norm": 0.236328125,
"learning_rate": 0.00015230019788614526,
"loss": 1.6325,
"step": 580
},
{
"epoch": 0.79,
"grad_norm": 0.23828125,
"learning_rate": 0.00015128992774059063,
"loss": 1.6367,
"step": 585
},
{
"epoch": 0.8,
"grad_norm": 0.2353515625,
"learning_rate": 0.0001502725033549377,
"loss": 1.6269,
"step": 590
},
{
"epoch": 0.81,
"grad_norm": 0.236328125,
"learning_rate": 0.000149248066645907,
"loss": 1.6199,
"step": 595
},
{
"epoch": 0.81,
"grad_norm": 0.2421875,
"learning_rate": 0.00014821676050834166,
"loss": 1.6358,
"step": 600
},
{
"epoch": 0.82,
"grad_norm": 0.2373046875,
"learning_rate": 0.00014717872879527576,
"loss": 1.6359,
"step": 605
},
{
"epoch": 0.83,
"grad_norm": 0.2451171875,
"learning_rate": 0.0001461341162978688,
"loss": 1.6371,
"step": 610
},
{
"epoch": 0.83,
"grad_norm": 0.2373046875,
"learning_rate": 0.00014508306872520912,
"loss": 1.6464,
"step": 615
},
{
"epoch": 0.84,
"grad_norm": 0.2431640625,
"learning_rate": 0.0001440257326839897,
"loss": 1.648,
"step": 620
},
{
"epoch": 0.85,
"grad_norm": 0.2333984375,
"learning_rate": 0.00014296225565805853,
"loss": 1.625,
"step": 625
},
{
"epoch": 0.85,
"grad_norm": 0.2373046875,
"learning_rate": 0.00014189278598784647,
"loss": 1.6327,
"step": 630
},
{
"epoch": 0.86,
"grad_norm": 0.23828125,
"learning_rate": 0.000140817472849676,
"loss": 1.6369,
"step": 635
},
{
"epoch": 0.87,
"grad_norm": 0.2333984375,
"learning_rate": 0.00013973646623495305,
"loss": 1.6194,
"step": 640
},
{
"epoch": 0.87,
"grad_norm": 0.2373046875,
"learning_rate": 0.00013864991692924523,
"loss": 1.6493,
"step": 645
},
{
"epoch": 0.88,
"grad_norm": 0.2431640625,
"learning_rate": 0.00013755797649124944,
"loss": 1.6316,
"step": 650
},
{
"epoch": 0.89,
"grad_norm": 0.2412109375,
"learning_rate": 0.0001364607972316515,
"loss": 1.618,
"step": 655
},
{
"epoch": 0.89,
"grad_norm": 0.2421875,
"learning_rate": 0.00013535853219188063,
"loss": 1.6317,
"step": 660
},
{
"epoch": 0.9,
"grad_norm": 0.2431640625,
"learning_rate": 0.00013425133512276282,
"loss": 1.6146,
"step": 665
},
{
"epoch": 0.91,
"grad_norm": 0.244140625,
"learning_rate": 0.0001331393604630741,
"loss": 1.6347,
"step": 670
},
{
"epoch": 0.91,
"grad_norm": 0.23828125,
"learning_rate": 0.0001320227633179989,
"loss": 1.6241,
"step": 675
},
{
"epoch": 0.92,
"grad_norm": 0.23046875,
"learning_rate": 0.00013090169943749476,
"loss": 1.6324,
"step": 680
},
{
"epoch": 0.93,
"grad_norm": 0.24609375,
"learning_rate": 0.00012977632519456744,
"loss": 1.6373,
"step": 685
},
{
"epoch": 0.93,
"grad_norm": 0.240234375,
"learning_rate": 0.00012864679756345904,
"loss": 1.6453,
"step": 690
},
{
"epoch": 0.94,
"grad_norm": 0.251953125,
"learning_rate": 0.00012751327409775228,
"loss": 1.6447,
"step": 695
},
{
"epoch": 0.95,
"grad_norm": 0.259765625,
"learning_rate": 0.00012637591290839376,
"loss": 1.6283,
"step": 700
},
{
"epoch": 0.95,
"grad_norm": 0.234375,
"learning_rate": 0.00012523487264163997,
"loss": 1.625,
"step": 705
},
{
"epoch": 0.96,
"grad_norm": 0.2373046875,
"learning_rate": 0.00012409031245692797,
"loss": 1.6049,
"step": 710
},
{
"epoch": 0.97,
"grad_norm": 0.2470703125,
"learning_rate": 0.00012294239200467516,
"loss": 1.6418,
"step": 715
},
{
"epoch": 0.97,
"grad_norm": 0.25390625,
"learning_rate": 0.00012179127140400997,
"loss": 1.6229,
"step": 720
},
{
"epoch": 0.98,
"grad_norm": 0.2373046875,
"learning_rate": 0.0001206371112204376,
"loss": 1.6021,
"step": 725
},
{
"epoch": 0.99,
"grad_norm": 0.2451171875,
"learning_rate": 0.00011948007244344332,
"loss": 1.6184,
"step": 730
},
{
"epoch": 0.99,
"grad_norm": 0.2431640625,
"learning_rate": 0.00011832031646403654,
"loss": 1.639,
"step": 735
},
{
"epoch": 1.0,
"eval_loss": 1.7224379777908325,
"eval_runtime": 56.8026,
"eval_samples_per_second": 7.658,
"eval_steps_per_second": 0.968,
"step": 739
},
{
"epoch": 1.0,
"grad_norm": 0.2353515625,
"learning_rate": 0.00011715800505223918,
"loss": 1.5945,
"step": 740
},
{
"epoch": 1.01,
"grad_norm": 0.2373046875,
"learning_rate": 0.00011599330033452079,
"loss": 1.5651,
"step": 745
},
{
"epoch": 1.01,
"grad_norm": 0.2421875,
"learning_rate": 0.0001148263647711842,
"loss": 1.6011,
"step": 750
},
{
"epoch": 1.02,
"grad_norm": 0.244140625,
"learning_rate": 0.00011365736113370462,
"loss": 1.5838,
"step": 755
},
{
"epoch": 1.03,
"grad_norm": 0.25,
"learning_rate": 0.0001124864524820251,
"loss": 1.5881,
"step": 760
},
{
"epoch": 1.04,
"grad_norm": 0.25,
"learning_rate": 0.00011131380214181203,
"loss": 1.5717,
"step": 765
},
{
"epoch": 1.04,
"grad_norm": 0.25,
"learning_rate": 0.00011013957368167342,
"loss": 1.5866,
"step": 770
},
{
"epoch": 1.05,
"grad_norm": 0.25,
"learning_rate": 0.00010896393089034336,
"loss": 1.5516,
"step": 775
},
{
"epoch": 1.06,
"grad_norm": 0.244140625,
"learning_rate": 0.00010778703775383558,
"loss": 1.5718,
"step": 780
},
{
"epoch": 1.06,
"grad_norm": 0.25,
"learning_rate": 0.00010660905843256994,
"loss": 1.5438,
"step": 785
},
{
"epoch": 1.07,
"grad_norm": 0.255859375,
"learning_rate": 0.00010543015723847403,
"loss": 1.5686,
"step": 790
},
{
"epoch": 1.08,
"grad_norm": 0.25390625,
"learning_rate": 0.00010425049861206411,
"loss": 1.5921,
"step": 795
},
{
"epoch": 1.08,
"grad_norm": 0.259765625,
"learning_rate": 0.00010307024709950774,
"loss": 1.5782,
"step": 800
},
{
"epoch": 1.09,
"grad_norm": 0.25,
"learning_rate": 0.00010188956732967208,
"loss": 1.5615,
"step": 805
},
{
"epoch": 1.1,
"grad_norm": 0.2451171875,
"learning_rate": 0.00010070862399116015,
"loss": 1.5866,
"step": 810
},
{
"epoch": 1.1,
"grad_norm": 0.255859375,
"learning_rate": 9.952758180933934e-05,
"loss": 1.5762,
"step": 815
},
{
"epoch": 1.11,
"grad_norm": 0.2490234375,
"learning_rate": 9.834660552336415e-05,
"loss": 1.5756,
"step": 820
},
{
"epoch": 1.12,
"grad_norm": 0.244140625,
"learning_rate": 9.716585986319769e-05,
"loss": 1.5669,
"step": 825
},
{
"epoch": 1.12,
"grad_norm": 0.2578125,
"learning_rate": 9.598550952663383e-05,
"loss": 1.5809,
"step": 830
},
{
"epoch": 1.13,
"grad_norm": 0.263671875,
"learning_rate": 9.480571915632421e-05,
"loss": 1.5898,
"step": 835
},
{
"epoch": 1.14,
"grad_norm": 0.25,
"learning_rate": 9.362665331681294e-05,
"loss": 1.5958,
"step": 840
},
{
"epoch": 1.14,
"grad_norm": 0.25,
"learning_rate": 9.244847647158202e-05,
"loss": 1.579,
"step": 845
},
{
"epoch": 1.15,
"grad_norm": 0.2451171875,
"learning_rate": 9.127135296011101e-05,
"loss": 1.5814,
"step": 850
},
{
"epoch": 1.16,
"grad_norm": 0.24609375,
"learning_rate": 9.009544697495374e-05,
"loss": 1.5581,
"step": 855
},
{
"epoch": 1.16,
"grad_norm": 0.2470703125,
"learning_rate": 8.892092253883601e-05,
"loss": 1.5927,
"step": 860
},
{
"epoch": 1.17,
"grad_norm": 0.2578125,
"learning_rate": 8.77479434817764e-05,
"loss": 1.5679,
"step": 865
},
{
"epoch": 1.18,
"grad_norm": 0.263671875,
"learning_rate": 8.657667341823448e-05,
"loss": 1.5774,
"step": 870
},
{
"epoch": 1.18,
"grad_norm": 0.2578125,
"learning_rate": 8.540727572428854e-05,
"loss": 1.5766,
"step": 875
},
{
"epoch": 1.19,
"grad_norm": 0.25,
"learning_rate": 8.423991351484716e-05,
"loss": 1.5588,
"step": 880
},
{
"epoch": 1.2,
"grad_norm": 0.25390625,
"learning_rate": 8.307474962089677e-05,
"loss": 1.5613,
"step": 885
},
{
"epoch": 1.2,
"grad_norm": 0.2490234375,
"learning_rate": 8.191194656678904e-05,
"loss": 1.5778,
"step": 890
},
{
"epoch": 1.21,
"grad_norm": 0.265625,
"learning_rate": 8.07516665475708e-05,
"loss": 1.5695,
"step": 895
},
{
"epoch": 1.22,
"grad_norm": 0.267578125,
"learning_rate": 7.959407140636034e-05,
"loss": 1.5787,
"step": 900
},
{
"epoch": 1.22,
"grad_norm": 0.2578125,
"learning_rate": 7.843932261177224e-05,
"loss": 1.5562,
"step": 905
},
{
"epoch": 1.23,
"grad_norm": 0.24609375,
"learning_rate": 7.728758123539499e-05,
"loss": 1.582,
"step": 910
},
{
"epoch": 1.24,
"grad_norm": 0.2578125,
"learning_rate": 7.613900792932332e-05,
"loss": 1.5676,
"step": 915
},
{
"epoch": 1.24,
"grad_norm": 0.259765625,
"learning_rate": 7.499376290374994e-05,
"loss": 1.5715,
"step": 920
},
{
"epoch": 1.25,
"grad_norm": 0.271484375,
"learning_rate": 7.385200590461803e-05,
"loss": 1.5816,
"step": 925
},
{
"epoch": 1.26,
"grad_norm": 0.259765625,
"learning_rate": 7.271389619133908e-05,
"loss": 1.577,
"step": 930
},
{
"epoch": 1.27,
"grad_norm": 0.251953125,
"learning_rate": 7.157959251457823e-05,
"loss": 1.5808,
"step": 935
},
{
"epoch": 1.27,
"grad_norm": 0.259765625,
"learning_rate": 7.044925309411093e-05,
"loss": 1.5559,
"step": 940
},
{
"epoch": 1.28,
"grad_norm": 0.267578125,
"learning_rate": 6.932303559675329e-05,
"loss": 1.5794,
"step": 945
},
{
"epoch": 1.29,
"grad_norm": 0.2578125,
"learning_rate": 6.820109711436988e-05,
"loss": 1.5645,
"step": 950
},
{
"epoch": 1.29,
"grad_norm": 0.255859375,
"learning_rate": 6.708359414196133e-05,
"loss": 1.5781,
"step": 955
},
{
"epoch": 1.3,
"grad_norm": 0.25390625,
"learning_rate": 6.59706825558357e-05,
"loss": 1.5714,
"step": 960
},
{
"epoch": 1.31,
"grad_norm": 0.248046875,
"learning_rate": 6.486251759186572e-05,
"loss": 1.5562,
"step": 965
},
{
"epoch": 1.31,
"grad_norm": 0.251953125,
"learning_rate": 6.37592538238356e-05,
"loss": 1.564,
"step": 970
},
{
"epoch": 1.32,
"grad_norm": 0.267578125,
"learning_rate": 6.266104514187997e-05,
"loss": 1.5725,
"step": 975
},
{
"epoch": 1.33,
"grad_norm": 0.259765625,
"learning_rate": 6.156804473101851e-05,
"loss": 1.5794,
"step": 980
},
{
"epoch": 1.33,
"grad_norm": 0.2578125,
"learning_rate": 6.04804050497886e-05,
"loss": 1.5662,
"step": 985
},
{
"epoch": 1.34,
"grad_norm": 0.255859375,
"learning_rate": 5.93982778089796e-05,
"loss": 1.5867,
"step": 990
},
{
"epoch": 1.35,
"grad_norm": 0.259765625,
"learning_rate": 5.832181395047098e-05,
"loss": 1.5863,
"step": 995
},
{
"epoch": 1.35,
"grad_norm": 0.267578125,
"learning_rate": 5.7251163626178394e-05,
"loss": 1.5694,
"step": 1000
},
{
"epoch": 1.36,
"grad_norm": 0.25390625,
"learning_rate": 5.618647617710935e-05,
"loss": 1.5634,
"step": 1005
},
{
"epoch": 1.37,
"grad_norm": 0.2578125,
"learning_rate": 5.5127900112532106e-05,
"loss": 1.5633,
"step": 1010
},
{
"epoch": 1.37,
"grad_norm": 0.255859375,
"learning_rate": 5.407558308926083e-05,
"loss": 1.5736,
"step": 1015
},
{
"epoch": 1.38,
"grad_norm": 0.265625,
"learning_rate": 5.302967189105941e-05,
"loss": 1.5675,
"step": 1020
},
{
"epoch": 1.39,
"grad_norm": 0.251953125,
"learning_rate": 5.199031240816714e-05,
"loss": 1.5509,
"step": 1025
},
{
"epoch": 1.39,
"grad_norm": 0.267578125,
"learning_rate": 5.095764961694922e-05,
"loss": 1.5624,
"step": 1030
},
{
"epoch": 1.4,
"grad_norm": 0.2578125,
"learning_rate": 4.9931827559674115e-05,
"loss": 1.5605,
"step": 1035
},
{
"epoch": 1.41,
"grad_norm": 0.27734375,
"learning_rate": 4.8912989324422164e-05,
"loss": 1.559,
"step": 1040
},
{
"epoch": 1.41,
"grad_norm": 0.271484375,
"learning_rate": 4.790127702512634e-05,
"loss": 1.5778,
"step": 1045
},
{
"epoch": 1.42,
"grad_norm": 0.263671875,
"learning_rate": 4.689683178174964e-05,
"loss": 1.5892,
"step": 1050
},
{
"epoch": 1.43,
"grad_norm": 0.2578125,
"learning_rate": 4.589979370060037e-05,
"loss": 1.5488,
"step": 1055
},
{
"epoch": 1.43,
"grad_norm": 0.251953125,
"learning_rate": 4.491030185478976e-05,
"loss": 1.5906,
"step": 1060
},
{
"epoch": 1.44,
"grad_norm": 0.2578125,
"learning_rate": 4.392849426483274e-05,
"loss": 1.5782,
"step": 1065
},
{
"epoch": 1.45,
"grad_norm": 0.259765625,
"learning_rate": 4.295450787939622e-05,
"loss": 1.559,
"step": 1070
},
{
"epoch": 1.45,
"grad_norm": 0.26171875,
"learning_rate": 4.198847855619652e-05,
"loss": 1.5792,
"step": 1075
},
{
"epoch": 1.46,
"grad_norm": 0.2734375,
"learning_rate": 4.103054104304912e-05,
"loss": 1.5893,
"step": 1080
},
{
"epoch": 1.47,
"grad_norm": 0.26171875,
"learning_rate": 4.0080828959073255e-05,
"loss": 1.5663,
"step": 1085
},
{
"epoch": 1.47,
"grad_norm": 0.26171875,
"learning_rate": 3.913947477605378e-05,
"loss": 1.5665,
"step": 1090
},
{
"epoch": 1.48,
"grad_norm": 0.275390625,
"learning_rate": 3.82066097999632e-05,
"loss": 1.5795,
"step": 1095
},
{
"epoch": 1.49,
"grad_norm": 0.255859375,
"learning_rate": 3.7282364152646297e-05,
"loss": 1.5862,
"step": 1100
},
{
"epoch": 1.5,
"grad_norm": 0.26953125,
"learning_rate": 3.636686675367006e-05,
"loss": 1.5705,
"step": 1105
},
{
"epoch": 1.5,
"grad_norm": 0.26953125,
"learning_rate": 3.546024530234091e-05,
"loss": 1.5794,
"step": 1110
},
{
"epoch": 1.51,
"grad_norm": 0.2578125,
"learning_rate": 3.45626262598926e-05,
"loss": 1.5704,
"step": 1115
},
{
"epoch": 1.52,
"grad_norm": 0.259765625,
"learning_rate": 3.367413483184654e-05,
"loss": 1.5677,
"step": 1120
},
{
"epoch": 1.52,
"grad_norm": 0.2578125,
"learning_rate": 3.279489495054742e-05,
"loss": 1.5562,
"step": 1125
},
{
"epoch": 1.53,
"grad_norm": 0.251953125,
"learning_rate": 3.19250292578762e-05,
"loss": 1.5317,
"step": 1130
},
{
"epoch": 1.54,
"grad_norm": 0.265625,
"learning_rate": 3.106465908814342e-05,
"loss": 1.571,
"step": 1135
},
{
"epoch": 1.54,
"grad_norm": 0.26171875,
"learning_rate": 3.021390445116462e-05,
"loss": 1.5841,
"step": 1140
},
{
"epoch": 1.55,
"grad_norm": 0.26171875,
"learning_rate": 2.937288401552063e-05,
"loss": 1.5712,
"step": 1145
},
{
"epoch": 1.56,
"grad_norm": 0.25,
"learning_rate": 2.8541715092005094e-05,
"loss": 1.5708,
"step": 1150
},
{
"epoch": 1.56,
"grad_norm": 0.265625,
"learning_rate": 2.7720513617260856e-05,
"loss": 1.567,
"step": 1155
},
{
"epoch": 1.57,
"grad_norm": 0.310546875,
"learning_rate": 2.6909394137608868e-05,
"loss": 1.563,
"step": 1160
},
{
"epoch": 1.58,
"grad_norm": 0.26171875,
"learning_rate": 2.6108469793070156e-05,
"loss": 1.5789,
"step": 1165
},
{
"epoch": 1.58,
"grad_norm": 0.275390625,
"learning_rate": 2.5317852301584643e-05,
"loss": 1.565,
"step": 1170
},
{
"epoch": 1.59,
"grad_norm": 0.2578125,
"learning_rate": 2.4537651943427665e-05,
"loss": 1.5401,
"step": 1175
},
{
"epoch": 1.6,
"grad_norm": 0.267578125,
"learning_rate": 2.3767977545827845e-05,
"loss": 1.5669,
"step": 1180
},
{
"epoch": 1.6,
"grad_norm": 0.255859375,
"learning_rate": 2.300893646778681e-05,
"loss": 1.5491,
"step": 1185
},
{
"epoch": 1.61,
"grad_norm": 0.2578125,
"learning_rate": 2.226063458510428e-05,
"loss": 1.566,
"step": 1190
},
{
"epoch": 1.62,
"grad_norm": 0.259765625,
"learning_rate": 2.152317627560979e-05,
"loss": 1.5675,
"step": 1195
},
{
"epoch": 1.62,
"grad_norm": 0.255859375,
"learning_rate": 2.0796664404603416e-05,
"loss": 1.5486,
"step": 1200
},
{
"epoch": 1.63,
"grad_norm": 0.263671875,
"learning_rate": 2.008120031050753e-05,
"loss": 1.5786,
"step": 1205
},
{
"epoch": 1.64,
"grad_norm": 0.271484375,
"learning_rate": 1.9376883790731414e-05,
"loss": 1.5747,
"step": 1210
},
{
"epoch": 1.64,
"grad_norm": 0.263671875,
"learning_rate": 1.86838130877509e-05,
"loss": 1.5687,
"step": 1215
},
{
"epoch": 1.65,
"grad_norm": 0.2578125,
"learning_rate": 1.8002084875404934e-05,
"loss": 1.5754,
"step": 1220
},
{
"epoch": 1.66,
"grad_norm": 0.263671875,
"learning_rate": 1.7331794245410926e-05,
"loss": 1.5715,
"step": 1225
},
{
"epoch": 1.66,
"grad_norm": 0.26953125,
"learning_rate": 1.6673034694100655e-05,
"loss": 1.58,
"step": 1230
},
{
"epoch": 1.67,
"grad_norm": 0.2578125,
"learning_rate": 1.6025898109378967e-05,
"loss": 1.5952,
"step": 1235
},
{
"epoch": 1.68,
"grad_norm": 0.2578125,
"learning_rate": 1.5390474757906446e-05,
"loss": 1.5688,
"step": 1240
},
{
"epoch": 1.68,
"grad_norm": 0.26953125,
"learning_rate": 1.4766853272508786e-05,
"loss": 1.5698,
"step": 1245
},
{
"epoch": 1.69,
"grad_norm": 0.259765625,
"learning_rate": 1.415512063981339e-05,
"loss": 1.5674,
"step": 1250
},
{
"epoch": 1.7,
"grad_norm": 0.259765625,
"learning_rate": 1.3555362188116172e-05,
"loss": 1.5391,
"step": 1255
},
{
"epoch": 1.71,
"grad_norm": 0.271484375,
"learning_rate": 1.2967661575479317e-05,
"loss": 1.5762,
"step": 1260
},
{
"epoch": 1.71,
"grad_norm": 0.255859375,
"learning_rate": 1.2392100778062122e-05,
"loss": 1.5743,
"step": 1265
},
{
"epoch": 1.72,
"grad_norm": 0.26171875,
"learning_rate": 1.1828760078686562e-05,
"loss": 1.5867,
"step": 1270
},
{
"epoch": 1.73,
"grad_norm": 0.25390625,
"learning_rate": 1.1277718055638819e-05,
"loss": 1.5623,
"step": 1275
},
{
"epoch": 1.73,
"grad_norm": 0.267578125,
"learning_rate": 1.0739051571708736e-05,
"loss": 1.5839,
"step": 1280
},
{
"epoch": 1.74,
"grad_norm": 0.25390625,
"learning_rate": 1.0212835763468487e-05,
"loss": 1.5692,
"step": 1285
},
{
"epoch": 1.75,
"grad_norm": 0.2578125,
"learning_rate": 9.699144030792162e-06,
"loss": 1.5643,
"step": 1290
},
{
"epoch": 1.75,
"grad_norm": 0.26171875,
"learning_rate": 9.198048026617323e-06,
"loss": 1.5515,
"step": 1295
},
{
"epoch": 1.76,
"grad_norm": 0.259765625,
"learning_rate": 8.709617646950564e-06,
"loss": 1.5713,
"step": 1300
},
{
"epoch": 1.77,
"grad_norm": 0.251953125,
"learning_rate": 8.233921021117863e-06,
"loss": 1.5542,
"step": 1305
},
{
"epoch": 1.77,
"grad_norm": 0.263671875,
"learning_rate": 7.771024502261526e-06,
"loss": 1.5724,
"step": 1310
},
{
"epoch": 1.78,
"grad_norm": 0.265625,
"learning_rate": 7.320992658084891e-06,
"loss": 1.5839,
"step": 1315
},
{
"epoch": 1.79,
"grad_norm": 0.267578125,
"learning_rate": 6.8838882618459165e-06,
"loss": 1.5818,
"step": 1320
},
{
"epoch": 1.79,
"grad_norm": 0.259765625,
"learning_rate": 6.459772283601218e-06,
"loss": 1.5659,
"step": 1325
},
{
"epoch": 1.8,
"grad_norm": 0.271484375,
"learning_rate": 6.048703881701578e-06,
"loss": 1.5646,
"step": 1330
},
{
"epoch": 1.81,
"grad_norm": 0.263671875,
"learning_rate": 5.650740394540255e-06,
"loss": 1.5613,
"step": 1335
},
{
"epoch": 1.81,
"grad_norm": 0.265625,
"learning_rate": 5.265937332554849e-06,
"loss": 1.566,
"step": 1340
},
{
"epoch": 1.82,
"grad_norm": 0.263671875,
"learning_rate": 4.8943483704846475e-06,
"loss": 1.5748,
"step": 1345
},
{
"epoch": 1.83,
"grad_norm": 0.26953125,
"learning_rate": 4.5360253398834765e-06,
"loss": 1.5706,
"step": 1350
},
{
"epoch": 1.83,
"grad_norm": 0.263671875,
"learning_rate": 4.191018221890097e-06,
"loss": 1.5862,
"step": 1355
},
{
"epoch": 1.84,
"grad_norm": 0.265625,
"learning_rate": 3.859375140256371e-06,
"loss": 1.5878,
"step": 1360
},
{
"epoch": 1.85,
"grad_norm": 0.267578125,
"learning_rate": 3.5411423546348075e-06,
"loss": 1.562,
"step": 1365
},
{
"epoch": 1.85,
"grad_norm": 0.2734375,
"learning_rate": 3.2363642541258676e-06,
"loss": 1.5348,
"step": 1370
},
{
"epoch": 1.86,
"grad_norm": 0.259765625,
"learning_rate": 2.9450833510863307e-06,
"loss": 1.5829,
"step": 1375
},
{
"epoch": 1.87,
"grad_norm": 0.265625,
"learning_rate": 2.667340275199426e-06,
"loss": 1.5797,
"step": 1380
},
{
"epoch": 1.87,
"grad_norm": 0.267578125,
"learning_rate": 2.4031737678074985e-06,
"loss": 1.5903,
"step": 1385
},
{
"epoch": 1.88,
"grad_norm": 0.25390625,
"learning_rate": 2.15262067650821e-06,
"loss": 1.5676,
"step": 1390
},
{
"epoch": 1.89,
"grad_norm": 0.26171875,
"learning_rate": 1.9157159500146958e-06,
"loss": 1.5557,
"step": 1395
},
{
"epoch": 1.89,
"grad_norm": 0.27734375,
"learning_rate": 1.6924926332807956e-06,
"loss": 1.5778,
"step": 1400
},
{
"epoch": 1.9,
"grad_norm": 0.26171875,
"learning_rate": 1.4829818628916526e-06,
"loss": 1.5598,
"step": 1405
},
{
"epoch": 1.91,
"grad_norm": 0.255859375,
"learning_rate": 1.287212862720677e-06,
"loss": 1.5683,
"step": 1410
},
{
"epoch": 1.91,
"grad_norm": 0.255859375,
"learning_rate": 1.1052129398531507e-06,
"loss": 1.545,
"step": 1415
},
{
"epoch": 1.92,
"grad_norm": 0.25390625,
"learning_rate": 9.370074807772966e-07,
"loss": 1.5648,
"step": 1420
},
{
"epoch": 1.93,
"grad_norm": 0.255859375,
"learning_rate": 7.826199478431551e-07,
"loss": 1.5441,
"step": 1425
},
{
"epoch": 1.94,
"grad_norm": 0.263671875,
"learning_rate": 6.420718759900357e-07,
"loss": 1.572,
"step": 1430
},
{
"epoch": 1.94,
"grad_norm": 0.2578125,
"learning_rate": 5.153828697425422e-07,
"loss": 1.5409,
"step": 1435
},
{
"epoch": 1.95,
"grad_norm": 0.267578125,
"learning_rate": 4.025706004760932e-07,
"loss": 1.5713,
"step": 1440
},
{
"epoch": 1.96,
"grad_norm": 0.25390625,
"learning_rate": 3.0365080395200473e-07,
"loss": 1.5608,
"step": 1445
},
{
"epoch": 1.96,
"grad_norm": 0.25390625,
"learning_rate": 2.1863727812254653e-07,
"loss": 1.5541,
"step": 1450
},
{
"epoch": 1.97,
"grad_norm": 0.267578125,
"learning_rate": 1.4754188120631452e-07,
"loss": 1.5628,
"step": 1455
},
{
"epoch": 1.98,
"grad_norm": 0.251953125,
"learning_rate": 9.037453003418739e-08,
"loss": 1.5841,
"step": 1460
},
{
"epoch": 1.98,
"grad_norm": 0.26171875,
"learning_rate": 4.7143198666077524e-08,
"loss": 1.5847,
"step": 1465
},
{
"epoch": 1.99,
"grad_norm": 0.255859375,
"learning_rate": 1.7853917278631838e-08,
"loss": 1.5713,
"step": 1470
},
{
"epoch": 2.0,
"grad_norm": 0.263671875,
"learning_rate": 2.5107713241045993e-09,
"loss": 1.571,
"step": 1475
},
{
"epoch": 2.0,
"eval_loss": 1.7227909564971924,
"eval_runtime": 56.8169,
"eval_samples_per_second": 7.656,
"eval_steps_per_second": 0.968,
"step": 1478
},
{
"epoch": 2.0,
"step": 1478,
"total_flos": 1.521149702706299e+17,
"train_loss": 1.645228040073173,
"train_runtime": 5797.8463,
"train_samples_per_second": 2.039,
"train_steps_per_second": 0.255
}
],
"logging_steps": 5,
"max_steps": 1478,
"num_input_tokens_seen": 0,
"num_train_epochs": 2,
"save_steps": 100,
"total_flos": 1.521149702706299e+17,
"train_batch_size": 4,
"trial_name": null,
"trial_params": null
}