llama-2-7b-fourierft-alpaca / trainer_state.json
vantaa32's picture
Upload folder using huggingface_hub
c7729fd verified
raw
history blame
23.5 kB
{
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 2.0,
"eval_steps": 500,
"global_step": 1294,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.015455950540958269,
"grad_norm": 0.357046514749527,
"learning_rate": 0.0003846153846153846,
"loss": 1.5383,
"step": 10
},
{
"epoch": 0.030911901081916538,
"grad_norm": 0.11198900640010834,
"learning_rate": 0.0007692307692307692,
"loss": 1.1091,
"step": 20
},
{
"epoch": 0.04636785162287481,
"grad_norm": 0.056582603603601456,
"learning_rate": 0.001153846153846154,
"loss": 0.7939,
"step": 30
},
{
"epoch": 0.061823802163833076,
"grad_norm": 0.0210476852953434,
"learning_rate": 0.0015384615384615385,
"loss": 0.6165,
"step": 40
},
{
"epoch": 0.07727975270479134,
"grad_norm": 0.012083015404641628,
"learning_rate": 0.0019230769230769232,
"loss": 0.5624,
"step": 50
},
{
"epoch": 0.09273570324574962,
"grad_norm": 0.008505144156515598,
"learning_rate": 0.002307692307692308,
"loss": 0.5269,
"step": 60
},
{
"epoch": 0.10819165378670788,
"grad_norm": 0.0063809980638325214,
"learning_rate": 0.0026923076923076926,
"loss": 0.5059,
"step": 70
},
{
"epoch": 0.12364760432766615,
"grad_norm": 0.005832094699144363,
"learning_rate": 0.0029950657894736842,
"loss": 0.5202,
"step": 80
},
{
"epoch": 0.1391035548686244,
"grad_norm": 0.004662094172090292,
"learning_rate": 0.0029703947368421055,
"loss": 0.5087,
"step": 90
},
{
"epoch": 0.1545595054095827,
"grad_norm": 0.004813206382095814,
"learning_rate": 0.0029457236842105267,
"loss": 0.4979,
"step": 100
},
{
"epoch": 0.17001545595054096,
"grad_norm": 0.003981301095336676,
"learning_rate": 0.0029210526315789475,
"loss": 0.4833,
"step": 110
},
{
"epoch": 0.18547140649149924,
"grad_norm": 0.0037942214403301477,
"learning_rate": 0.0028963815789473687,
"loss": 0.4842,
"step": 120
},
{
"epoch": 0.2009273570324575,
"grad_norm": 0.0041742450557649136,
"learning_rate": 0.0028717105263157895,
"loss": 0.4818,
"step": 130
},
{
"epoch": 0.21638330757341576,
"grad_norm": 0.005099099595099688,
"learning_rate": 0.0028470394736842108,
"loss": 0.4809,
"step": 140
},
{
"epoch": 0.23183925811437403,
"grad_norm": 0.0031047100201249123,
"learning_rate": 0.0028223684210526316,
"loss": 0.5016,
"step": 150
},
{
"epoch": 0.2472952086553323,
"grad_norm": 0.0036040199920535088,
"learning_rate": 0.002797697368421053,
"loss": 0.4775,
"step": 160
},
{
"epoch": 0.26275115919629055,
"grad_norm": 0.0033861789852380753,
"learning_rate": 0.0027730263157894736,
"loss": 0.4784,
"step": 170
},
{
"epoch": 0.2782071097372488,
"grad_norm": 0.003118926426395774,
"learning_rate": 0.002748355263157895,
"loss": 0.4962,
"step": 180
},
{
"epoch": 0.2936630602782071,
"grad_norm": 0.0035265563055872917,
"learning_rate": 0.002723684210526316,
"loss": 0.4829,
"step": 190
},
{
"epoch": 0.3091190108191654,
"grad_norm": 0.0035475995391607285,
"learning_rate": 0.002699013157894737,
"loss": 0.485,
"step": 200
},
{
"epoch": 0.32457496136012365,
"grad_norm": 0.0030264686793088913,
"learning_rate": 0.002674342105263158,
"loss": 0.4681,
"step": 210
},
{
"epoch": 0.3400309119010819,
"grad_norm": 0.0033854299690574408,
"learning_rate": 0.002649671052631579,
"loss": 0.4805,
"step": 220
},
{
"epoch": 0.3554868624420402,
"grad_norm": 0.0029569112230092287,
"learning_rate": 0.002625,
"loss": 0.4688,
"step": 230
},
{
"epoch": 0.37094281298299847,
"grad_norm": 0.0032272525131702423,
"learning_rate": 0.002600328947368421,
"loss": 0.4752,
"step": 240
},
{
"epoch": 0.38639876352395675,
"grad_norm": 0.003502602456137538,
"learning_rate": 0.002575657894736842,
"loss": 0.4699,
"step": 250
},
{
"epoch": 0.401854714064915,
"grad_norm": 0.0031522298231720924,
"learning_rate": 0.002550986842105263,
"loss": 0.4756,
"step": 260
},
{
"epoch": 0.41731066460587324,
"grad_norm": 0.003098264569416642,
"learning_rate": 0.0025263157894736842,
"loss": 0.4574,
"step": 270
},
{
"epoch": 0.4327666151468315,
"grad_norm": 0.0025676521472632885,
"learning_rate": 0.0025016447368421055,
"loss": 0.4779,
"step": 280
},
{
"epoch": 0.4482225656877898,
"grad_norm": 0.0034302272833883762,
"learning_rate": 0.0024769736842105263,
"loss": 0.4729,
"step": 290
},
{
"epoch": 0.46367851622874806,
"grad_norm": 0.003159865504130721,
"learning_rate": 0.0024523026315789475,
"loss": 0.4715,
"step": 300
},
{
"epoch": 0.47913446676970634,
"grad_norm": 0.003168923780322075,
"learning_rate": 0.0024276315789473683,
"loss": 0.4764,
"step": 310
},
{
"epoch": 0.4945904173106646,
"grad_norm": 0.0034859515726566315,
"learning_rate": 0.0024029605263157896,
"loss": 0.4652,
"step": 320
},
{
"epoch": 0.5100463678516228,
"grad_norm": 0.003067239187657833,
"learning_rate": 0.0023782894736842104,
"loss": 0.4648,
"step": 330
},
{
"epoch": 0.5255023183925811,
"grad_norm": 0.0032223982270807028,
"learning_rate": 0.0023536184210526316,
"loss": 0.4725,
"step": 340
},
{
"epoch": 0.5409582689335394,
"grad_norm": 0.0027090355288237333,
"learning_rate": 0.0023289473684210524,
"loss": 0.4704,
"step": 350
},
{
"epoch": 0.5564142194744977,
"grad_norm": 0.003484300570562482,
"learning_rate": 0.0023042763157894736,
"loss": 0.4616,
"step": 360
},
{
"epoch": 0.5718701700154559,
"grad_norm": 0.003339330432936549,
"learning_rate": 0.0022796052631578944,
"loss": 0.4665,
"step": 370
},
{
"epoch": 0.5873261205564142,
"grad_norm": 0.0029797593597322702,
"learning_rate": 0.002254934210526316,
"loss": 0.4573,
"step": 380
},
{
"epoch": 0.6027820710973725,
"grad_norm": 0.0030033981893211603,
"learning_rate": 0.002230263157894737,
"loss": 0.4618,
"step": 390
},
{
"epoch": 0.6182380216383307,
"grad_norm": 0.005113155115395784,
"learning_rate": 0.002205592105263158,
"loss": 0.4589,
"step": 400
},
{
"epoch": 0.633693972179289,
"grad_norm": 0.002975397277623415,
"learning_rate": 0.002180921052631579,
"loss": 0.4765,
"step": 410
},
{
"epoch": 0.6491499227202473,
"grad_norm": 0.004753004759550095,
"learning_rate": 0.00215625,
"loss": 0.4631,
"step": 420
},
{
"epoch": 0.6646058732612056,
"grad_norm": 0.003564928425475955,
"learning_rate": 0.002131578947368421,
"loss": 0.4488,
"step": 430
},
{
"epoch": 0.6800618238021638,
"grad_norm": 0.0032665496692061424,
"learning_rate": 0.0021069078947368422,
"loss": 0.457,
"step": 440
},
{
"epoch": 0.6955177743431221,
"grad_norm": 0.0030079709831625223,
"learning_rate": 0.002082236842105263,
"loss": 0.4667,
"step": 450
},
{
"epoch": 0.7109737248840804,
"grad_norm": 0.0025733078364282846,
"learning_rate": 0.0020575657894736843,
"loss": 0.4667,
"step": 460
},
{
"epoch": 0.7264296754250387,
"grad_norm": 0.00270587345585227,
"learning_rate": 0.0020328947368421055,
"loss": 0.4679,
"step": 470
},
{
"epoch": 0.7418856259659969,
"grad_norm": 0.00273908581584692,
"learning_rate": 0.0020082236842105263,
"loss": 0.4694,
"step": 480
},
{
"epoch": 0.7573415765069552,
"grad_norm": 0.002720112446695566,
"learning_rate": 0.0019835526315789475,
"loss": 0.4513,
"step": 490
},
{
"epoch": 0.7727975270479135,
"grad_norm": 0.0028910296969115734,
"learning_rate": 0.0019588815789473683,
"loss": 0.4592,
"step": 500
},
{
"epoch": 0.7882534775888718,
"grad_norm": 0.003034258494153619,
"learning_rate": 0.0019342105263157896,
"loss": 0.4611,
"step": 510
},
{
"epoch": 0.80370942812983,
"grad_norm": 0.0030727661214768887,
"learning_rate": 0.0019095394736842106,
"loss": 0.463,
"step": 520
},
{
"epoch": 0.8191653786707882,
"grad_norm": 0.0026562565471976995,
"learning_rate": 0.0018848684210526316,
"loss": 0.469,
"step": 530
},
{
"epoch": 0.8346213292117465,
"grad_norm": 0.002964869374409318,
"learning_rate": 0.0018601973684210526,
"loss": 0.462,
"step": 540
},
{
"epoch": 0.8500772797527048,
"grad_norm": 0.0029133500065654516,
"learning_rate": 0.0018355263157894736,
"loss": 0.4678,
"step": 550
},
{
"epoch": 0.865533230293663,
"grad_norm": 0.002890991512686014,
"learning_rate": 0.0018108552631578947,
"loss": 0.4646,
"step": 560
},
{
"epoch": 0.8809891808346213,
"grad_norm": 0.0025943962391465902,
"learning_rate": 0.001786184210526316,
"loss": 0.4645,
"step": 570
},
{
"epoch": 0.8964451313755796,
"grad_norm": 0.0027450553607195616,
"learning_rate": 0.001761513157894737,
"loss": 0.4691,
"step": 580
},
{
"epoch": 0.9119010819165378,
"grad_norm": 0.0027814917266368866,
"learning_rate": 0.001736842105263158,
"loss": 0.4549,
"step": 590
},
{
"epoch": 0.9273570324574961,
"grad_norm": 0.0030699821654707193,
"learning_rate": 0.001712171052631579,
"loss": 0.4694,
"step": 600
},
{
"epoch": 0.9428129829984544,
"grad_norm": 0.002675461582839489,
"learning_rate": 0.0016875,
"loss": 0.4593,
"step": 610
},
{
"epoch": 0.9582689335394127,
"grad_norm": 0.0026055267080664635,
"learning_rate": 0.001662828947368421,
"loss": 0.4681,
"step": 620
},
{
"epoch": 0.973724884080371,
"grad_norm": 0.002623435575515032,
"learning_rate": 0.001638157894736842,
"loss": 0.46,
"step": 630
},
{
"epoch": 0.9891808346213292,
"grad_norm": 0.0023981425911188126,
"learning_rate": 0.001613486842105263,
"loss": 0.4587,
"step": 640
},
{
"epoch": 1.0,
"eval_loss": 0.9165626764297485,
"eval_runtime": 1353.6255,
"eval_samples_per_second": 7.648,
"eval_steps_per_second": 0.478,
"step": 647
},
{
"epoch": 1.0046367851622875,
"grad_norm": 0.002703867619857192,
"learning_rate": 0.001588815789473684,
"loss": 0.4565,
"step": 650
},
{
"epoch": 1.0200927357032457,
"grad_norm": 0.002686214866116643,
"learning_rate": 0.0015641447368421055,
"loss": 0.4656,
"step": 660
},
{
"epoch": 1.035548686244204,
"grad_norm": 0.002944634296000004,
"learning_rate": 0.0015394736842105265,
"loss": 0.4609,
"step": 670
},
{
"epoch": 1.0510046367851622,
"grad_norm": 0.002919598249718547,
"learning_rate": 0.0015148026315789475,
"loss": 0.462,
"step": 680
},
{
"epoch": 1.0664605873261206,
"grad_norm": 0.0027975935954600573,
"learning_rate": 0.0014901315789473686,
"loss": 0.455,
"step": 690
},
{
"epoch": 1.0819165378670788,
"grad_norm": 0.002747561549767852,
"learning_rate": 0.0014654605263157896,
"loss": 0.4556,
"step": 700
},
{
"epoch": 1.0973724884080371,
"grad_norm": 0.0029210918582975864,
"learning_rate": 0.0014407894736842106,
"loss": 0.4486,
"step": 710
},
{
"epoch": 1.1128284389489953,
"grad_norm": 0.0026143298018723726,
"learning_rate": 0.0014161184210526316,
"loss": 0.4593,
"step": 720
},
{
"epoch": 1.1282843894899537,
"grad_norm": 0.002351797418668866,
"learning_rate": 0.0013914473684210526,
"loss": 0.4419,
"step": 730
},
{
"epoch": 1.1437403400309119,
"grad_norm": 0.0029544688295572996,
"learning_rate": 0.0013667763157894737,
"loss": 0.4535,
"step": 740
},
{
"epoch": 1.1591962905718702,
"grad_norm": 0.0025962782092392445,
"learning_rate": 0.0013421052631578947,
"loss": 0.4454,
"step": 750
},
{
"epoch": 1.1746522411128284,
"grad_norm": 0.0026020314544439316,
"learning_rate": 0.0013174342105263157,
"loss": 0.4569,
"step": 760
},
{
"epoch": 1.1901081916537868,
"grad_norm": 0.00269780564121902,
"learning_rate": 0.0012927631578947367,
"loss": 0.4657,
"step": 770
},
{
"epoch": 1.205564142194745,
"grad_norm": 0.0027555320411920547,
"learning_rate": 0.001268092105263158,
"loss": 0.4546,
"step": 780
},
{
"epoch": 1.2210200927357033,
"grad_norm": 0.002603672444820404,
"learning_rate": 0.001243421052631579,
"loss": 0.4547,
"step": 790
},
{
"epoch": 1.2364760432766615,
"grad_norm": 0.002504123141989112,
"learning_rate": 0.00121875,
"loss": 0.4627,
"step": 800
},
{
"epoch": 1.2519319938176197,
"grad_norm": 0.003078003181144595,
"learning_rate": 0.001194078947368421,
"loss": 0.4504,
"step": 810
},
{
"epoch": 1.267387944358578,
"grad_norm": 0.0032601715065538883,
"learning_rate": 0.001169407894736842,
"loss": 0.4508,
"step": 820
},
{
"epoch": 1.2828438948995364,
"grad_norm": 0.003114907769486308,
"learning_rate": 0.0011447368421052633,
"loss": 0.4448,
"step": 830
},
{
"epoch": 1.2982998454404946,
"grad_norm": 0.002611513016745448,
"learning_rate": 0.0011200657894736843,
"loss": 0.4611,
"step": 840
},
{
"epoch": 1.3137557959814528,
"grad_norm": 0.0028356926050037146,
"learning_rate": 0.0010953947368421053,
"loss": 0.4543,
"step": 850
},
{
"epoch": 1.3292117465224111,
"grad_norm": 0.0028607589192688465,
"learning_rate": 0.0010707236842105263,
"loss": 0.4729,
"step": 860
},
{
"epoch": 1.3446676970633695,
"grad_norm": 0.00289659365080297,
"learning_rate": 0.0010460526315789474,
"loss": 0.4567,
"step": 870
},
{
"epoch": 1.3601236476043277,
"grad_norm": 0.002599680330604315,
"learning_rate": 0.0010213815789473686,
"loss": 0.456,
"step": 880
},
{
"epoch": 1.3755795981452859,
"grad_norm": 0.0026973742060363293,
"learning_rate": 0.0009967105263157896,
"loss": 0.4552,
"step": 890
},
{
"epoch": 1.3910355486862442,
"grad_norm": 0.0025935762096196413,
"learning_rate": 0.0009720394736842105,
"loss": 0.4589,
"step": 900
},
{
"epoch": 1.4064914992272024,
"grad_norm": 0.0026942496187984943,
"learning_rate": 0.0009473684210526315,
"loss": 0.455,
"step": 910
},
{
"epoch": 1.4219474497681608,
"grad_norm": 0.0027507098857313395,
"learning_rate": 0.0009226973684210528,
"loss": 0.4627,
"step": 920
},
{
"epoch": 1.437403400309119,
"grad_norm": 0.0029431770090013742,
"learning_rate": 0.0008980263157894738,
"loss": 0.4557,
"step": 930
},
{
"epoch": 1.4528593508500773,
"grad_norm": 0.002638396341353655,
"learning_rate": 0.0008733552631578948,
"loss": 0.4546,
"step": 940
},
{
"epoch": 1.4683153013910355,
"grad_norm": 0.0026191219221800566,
"learning_rate": 0.0008486842105263158,
"loss": 0.4557,
"step": 950
},
{
"epoch": 1.4837712519319939,
"grad_norm": 0.002677230630069971,
"learning_rate": 0.0008240131578947368,
"loss": 0.4632,
"step": 960
},
{
"epoch": 1.499227202472952,
"grad_norm": 0.00285891885869205,
"learning_rate": 0.000799342105263158,
"loss": 0.4581,
"step": 970
},
{
"epoch": 1.5146831530139102,
"grad_norm": 0.0026907038409262896,
"learning_rate": 0.000774671052631579,
"loss": 0.4509,
"step": 980
},
{
"epoch": 1.5301391035548686,
"grad_norm": 0.0024703822564333677,
"learning_rate": 0.00075,
"loss": 0.4481,
"step": 990
},
{
"epoch": 1.545595054095827,
"grad_norm": 0.002853216603398323,
"learning_rate": 0.000725328947368421,
"loss": 0.4438,
"step": 1000
},
{
"epoch": 1.5610510046367851,
"grad_norm": 0.002754925051704049,
"learning_rate": 0.0007006578947368422,
"loss": 0.4656,
"step": 1010
},
{
"epoch": 1.5765069551777433,
"grad_norm": 0.002468267921358347,
"learning_rate": 0.0006759868421052632,
"loss": 0.4515,
"step": 1020
},
{
"epoch": 1.5919629057187017,
"grad_norm": 0.002553540049120784,
"learning_rate": 0.0006513157894736842,
"loss": 0.4506,
"step": 1030
},
{
"epoch": 1.60741885625966,
"grad_norm": 0.0030136853456497192,
"learning_rate": 0.0006266447368421052,
"loss": 0.4552,
"step": 1040
},
{
"epoch": 1.6228748068006182,
"grad_norm": 0.002497268607839942,
"learning_rate": 0.0006019736842105263,
"loss": 0.4506,
"step": 1050
},
{
"epoch": 1.6383307573415764,
"grad_norm": 0.002612602198496461,
"learning_rate": 0.0005773026315789474,
"loss": 0.4519,
"step": 1060
},
{
"epoch": 1.6537867078825348,
"grad_norm": 0.0023622452281415462,
"learning_rate": 0.0005526315789473684,
"loss": 0.4446,
"step": 1070
},
{
"epoch": 1.6692426584234932,
"grad_norm": 0.0023850842844694853,
"learning_rate": 0.0005279605263157895,
"loss": 0.4533,
"step": 1080
},
{
"epoch": 1.6846986089644513,
"grad_norm": 0.002770961495116353,
"learning_rate": 0.0005032894736842105,
"loss": 0.4523,
"step": 1090
},
{
"epoch": 1.7001545595054095,
"grad_norm": 0.002901398576796055,
"learning_rate": 0.0004786184210526316,
"loss": 0.4472,
"step": 1100
},
{
"epoch": 1.7156105100463679,
"grad_norm": 0.002840624190866947,
"learning_rate": 0.0004539473684210526,
"loss": 0.4496,
"step": 1110
},
{
"epoch": 1.7310664605873263,
"grad_norm": 0.002452421234920621,
"learning_rate": 0.00042927631578947365,
"loss": 0.4553,
"step": 1120
},
{
"epoch": 1.7465224111282844,
"grad_norm": 0.0025857773143798113,
"learning_rate": 0.0004046052631578948,
"loss": 0.4504,
"step": 1130
},
{
"epoch": 1.7619783616692426,
"grad_norm": 0.002501491457223892,
"learning_rate": 0.0003799342105263158,
"loss": 0.4479,
"step": 1140
},
{
"epoch": 1.7774343122102008,
"grad_norm": 0.002639561193063855,
"learning_rate": 0.0003552631578947368,
"loss": 0.4627,
"step": 1150
},
{
"epoch": 1.7928902627511591,
"grad_norm": 0.002627423033118248,
"learning_rate": 0.0003305921052631579,
"loss": 0.4496,
"step": 1160
},
{
"epoch": 1.8083462132921175,
"grad_norm": 0.0027368138544261456,
"learning_rate": 0.00030592105263157896,
"loss": 0.4567,
"step": 1170
},
{
"epoch": 1.8238021638330757,
"grad_norm": 0.0022819822188466787,
"learning_rate": 0.00028125000000000003,
"loss": 0.4546,
"step": 1180
},
{
"epoch": 1.8392581143740339,
"grad_norm": 0.0024920173455029726,
"learning_rate": 0.00025657894736842105,
"loss": 0.4571,
"step": 1190
},
{
"epoch": 1.8547140649149922,
"grad_norm": 0.002788729965686798,
"learning_rate": 0.00023190789473684213,
"loss": 0.4597,
"step": 1200
},
{
"epoch": 1.8701700154559506,
"grad_norm": 0.00281331455335021,
"learning_rate": 0.00020723684210526317,
"loss": 0.4561,
"step": 1210
},
{
"epoch": 1.8856259659969088,
"grad_norm": 0.0028766875620931387,
"learning_rate": 0.00018256578947368422,
"loss": 0.4626,
"step": 1220
},
{
"epoch": 1.901081916537867,
"grad_norm": 0.0027803743723779917,
"learning_rate": 0.00015789473684210527,
"loss": 0.4441,
"step": 1230
},
{
"epoch": 1.9165378670788253,
"grad_norm": 0.0023476784117519855,
"learning_rate": 0.0001332236842105263,
"loss": 0.4529,
"step": 1240
},
{
"epoch": 1.9319938176197837,
"grad_norm": 0.003161880187690258,
"learning_rate": 0.00010855263157894737,
"loss": 0.4489,
"step": 1250
},
{
"epoch": 1.947449768160742,
"grad_norm": 0.0026641101576387882,
"learning_rate": 8.388157894736842e-05,
"loss": 0.4608,
"step": 1260
},
{
"epoch": 1.9629057187017,
"grad_norm": 0.002320705447345972,
"learning_rate": 5.921052631578947e-05,
"loss": 0.4608,
"step": 1270
},
{
"epoch": 1.9783616692426584,
"grad_norm": 0.0025752289220690727,
"learning_rate": 3.4539473684210524e-05,
"loss": 0.4466,
"step": 1280
},
{
"epoch": 1.9938176197836168,
"grad_norm": 0.0026244991458952427,
"learning_rate": 9.868421052631579e-06,
"loss": 0.4555,
"step": 1290
}
],
"logging_steps": 10,
"max_steps": 1294,
"num_input_tokens_seen": 0,
"num_train_epochs": 2,
"save_steps": 500,
"stateful_callbacks": {
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": true
},
"attributes": {}
}
},
"total_flos": 1.3257051724162499e+18,
"train_batch_size": 4,
"trial_name": null,
"trial_params": null
}