llama3-1_8b_baseline_dcft_oh_v3 / trainer_state.json
gsmyrnis's picture
End of training
af1d5c2 verified
{
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 3.0,
"eval_steps": 500,
"global_step": 1269,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.02364066193853428,
"grad_norm": 1.5809403657913208,
"learning_rate": 5e-06,
"loss": 0.8881,
"step": 10
},
{
"epoch": 0.04728132387706856,
"grad_norm": 1.8624894618988037,
"learning_rate": 5e-06,
"loss": 0.7858,
"step": 20
},
{
"epoch": 0.07092198581560284,
"grad_norm": 2.2767162322998047,
"learning_rate": 5e-06,
"loss": 0.7546,
"step": 30
},
{
"epoch": 0.09456264775413711,
"grad_norm": 1.0326734781265259,
"learning_rate": 5e-06,
"loss": 0.7357,
"step": 40
},
{
"epoch": 0.1182033096926714,
"grad_norm": 1.808580756187439,
"learning_rate": 5e-06,
"loss": 0.7237,
"step": 50
},
{
"epoch": 0.14184397163120568,
"grad_norm": 0.6697216629981995,
"learning_rate": 5e-06,
"loss": 0.7205,
"step": 60
},
{
"epoch": 0.16548463356973994,
"grad_norm": 1.1802719831466675,
"learning_rate": 5e-06,
"loss": 0.7088,
"step": 70
},
{
"epoch": 0.18912529550827423,
"grad_norm": 0.6576663255691528,
"learning_rate": 5e-06,
"loss": 0.7001,
"step": 80
},
{
"epoch": 0.2127659574468085,
"grad_norm": 0.6217890977859497,
"learning_rate": 5e-06,
"loss": 0.6909,
"step": 90
},
{
"epoch": 0.2364066193853428,
"grad_norm": 0.6629725694656372,
"learning_rate": 5e-06,
"loss": 0.6971,
"step": 100
},
{
"epoch": 0.26004728132387706,
"grad_norm": 0.8252314329147339,
"learning_rate": 5e-06,
"loss": 0.6761,
"step": 110
},
{
"epoch": 0.28368794326241137,
"grad_norm": 0.6688860654830933,
"learning_rate": 5e-06,
"loss": 0.6834,
"step": 120
},
{
"epoch": 0.3073286052009456,
"grad_norm": 0.5039113759994507,
"learning_rate": 5e-06,
"loss": 0.6771,
"step": 130
},
{
"epoch": 0.3309692671394799,
"grad_norm": 0.5916517376899719,
"learning_rate": 5e-06,
"loss": 0.6793,
"step": 140
},
{
"epoch": 0.3546099290780142,
"grad_norm": 0.575602650642395,
"learning_rate": 5e-06,
"loss": 0.6831,
"step": 150
},
{
"epoch": 0.37825059101654845,
"grad_norm": 0.5347154140472412,
"learning_rate": 5e-06,
"loss": 0.6757,
"step": 160
},
{
"epoch": 0.40189125295508277,
"grad_norm": 0.6461175084114075,
"learning_rate": 5e-06,
"loss": 0.6727,
"step": 170
},
{
"epoch": 0.425531914893617,
"grad_norm": 0.6578442454338074,
"learning_rate": 5e-06,
"loss": 0.6707,
"step": 180
},
{
"epoch": 0.4491725768321513,
"grad_norm": 0.6522397398948669,
"learning_rate": 5e-06,
"loss": 0.6737,
"step": 190
},
{
"epoch": 0.4728132387706856,
"grad_norm": 0.7096086740493774,
"learning_rate": 5e-06,
"loss": 0.6648,
"step": 200
},
{
"epoch": 0.49645390070921985,
"grad_norm": 0.7721654772758484,
"learning_rate": 5e-06,
"loss": 0.6645,
"step": 210
},
{
"epoch": 0.5200945626477541,
"grad_norm": 0.9510824680328369,
"learning_rate": 5e-06,
"loss": 0.6656,
"step": 220
},
{
"epoch": 0.5437352245862884,
"grad_norm": 0.7571592926979065,
"learning_rate": 5e-06,
"loss": 0.6651,
"step": 230
},
{
"epoch": 0.5673758865248227,
"grad_norm": 0.6714581847190857,
"learning_rate": 5e-06,
"loss": 0.6573,
"step": 240
},
{
"epoch": 0.5910165484633569,
"grad_norm": 0.5642537474632263,
"learning_rate": 5e-06,
"loss": 0.6657,
"step": 250
},
{
"epoch": 0.6146572104018913,
"grad_norm": 0.6235966086387634,
"learning_rate": 5e-06,
"loss": 0.6539,
"step": 260
},
{
"epoch": 0.6382978723404256,
"grad_norm": 0.5237038731575012,
"learning_rate": 5e-06,
"loss": 0.6608,
"step": 270
},
{
"epoch": 0.6619385342789598,
"grad_norm": 0.5012194514274597,
"learning_rate": 5e-06,
"loss": 0.6621,
"step": 280
},
{
"epoch": 0.6855791962174941,
"grad_norm": 0.5406949520111084,
"learning_rate": 5e-06,
"loss": 0.6592,
"step": 290
},
{
"epoch": 0.7092198581560284,
"grad_norm": 0.5066617727279663,
"learning_rate": 5e-06,
"loss": 0.6532,
"step": 300
},
{
"epoch": 0.7328605200945626,
"grad_norm": 0.4942592680454254,
"learning_rate": 5e-06,
"loss": 0.6649,
"step": 310
},
{
"epoch": 0.7565011820330969,
"grad_norm": 0.4350595474243164,
"learning_rate": 5e-06,
"loss": 0.6584,
"step": 320
},
{
"epoch": 0.7801418439716312,
"grad_norm": 0.48511239886283875,
"learning_rate": 5e-06,
"loss": 0.6551,
"step": 330
},
{
"epoch": 0.8037825059101655,
"grad_norm": 0.5006919503211975,
"learning_rate": 5e-06,
"loss": 0.6511,
"step": 340
},
{
"epoch": 0.8274231678486997,
"grad_norm": 0.7380832433700562,
"learning_rate": 5e-06,
"loss": 0.6539,
"step": 350
},
{
"epoch": 0.851063829787234,
"grad_norm": 0.5691511034965515,
"learning_rate": 5e-06,
"loss": 0.6486,
"step": 360
},
{
"epoch": 0.8747044917257684,
"grad_norm": 0.542851448059082,
"learning_rate": 5e-06,
"loss": 0.6516,
"step": 370
},
{
"epoch": 0.8983451536643026,
"grad_norm": 0.6073257923126221,
"learning_rate": 5e-06,
"loss": 0.6566,
"step": 380
},
{
"epoch": 0.9219858156028369,
"grad_norm": 1.0101559162139893,
"learning_rate": 5e-06,
"loss": 0.646,
"step": 390
},
{
"epoch": 0.9456264775413712,
"grad_norm": 0.5881345868110657,
"learning_rate": 5e-06,
"loss": 0.6531,
"step": 400
},
{
"epoch": 0.9692671394799054,
"grad_norm": 0.5613337755203247,
"learning_rate": 5e-06,
"loss": 0.6466,
"step": 410
},
{
"epoch": 0.9929078014184397,
"grad_norm": 0.5331344604492188,
"learning_rate": 5e-06,
"loss": 0.6519,
"step": 420
},
{
"epoch": 1.0,
"eval_loss": 0.650029182434082,
"eval_runtime": 39.2092,
"eval_samples_per_second": 290.442,
"eval_steps_per_second": 1.148,
"step": 423
},
{
"epoch": 1.016548463356974,
"grad_norm": 0.6450212597846985,
"learning_rate": 5e-06,
"loss": 0.6193,
"step": 430
},
{
"epoch": 1.0401891252955082,
"grad_norm": 0.5882251858711243,
"learning_rate": 5e-06,
"loss": 0.6118,
"step": 440
},
{
"epoch": 1.0638297872340425,
"grad_norm": 0.5410990118980408,
"learning_rate": 5e-06,
"loss": 0.6112,
"step": 450
},
{
"epoch": 1.0874704491725768,
"grad_norm": 0.7050241827964783,
"learning_rate": 5e-06,
"loss": 0.6095,
"step": 460
},
{
"epoch": 1.1111111111111112,
"grad_norm": 0.6384491920471191,
"learning_rate": 5e-06,
"loss": 0.6167,
"step": 470
},
{
"epoch": 1.1347517730496455,
"grad_norm": 0.5661244988441467,
"learning_rate": 5e-06,
"loss": 0.6116,
"step": 480
},
{
"epoch": 1.1583924349881798,
"grad_norm": 0.4538739025592804,
"learning_rate": 5e-06,
"loss": 0.6191,
"step": 490
},
{
"epoch": 1.1820330969267139,
"grad_norm": 0.4373492896556854,
"learning_rate": 5e-06,
"loss": 0.612,
"step": 500
},
{
"epoch": 1.2056737588652482,
"grad_norm": 0.4394100606441498,
"learning_rate": 5e-06,
"loss": 0.6119,
"step": 510
},
{
"epoch": 1.2293144208037825,
"grad_norm": 0.45556795597076416,
"learning_rate": 5e-06,
"loss": 0.6078,
"step": 520
},
{
"epoch": 1.2529550827423168,
"grad_norm": 0.5348520278930664,
"learning_rate": 5e-06,
"loss": 0.6083,
"step": 530
},
{
"epoch": 1.2765957446808511,
"grad_norm": 0.548928439617157,
"learning_rate": 5e-06,
"loss": 0.6103,
"step": 540
},
{
"epoch": 1.3002364066193852,
"grad_norm": 0.5856196284294128,
"learning_rate": 5e-06,
"loss": 0.6121,
"step": 550
},
{
"epoch": 1.3238770685579198,
"grad_norm": 0.4891395568847656,
"learning_rate": 5e-06,
"loss": 0.6091,
"step": 560
},
{
"epoch": 1.3475177304964538,
"grad_norm": 0.45980435609817505,
"learning_rate": 5e-06,
"loss": 0.6144,
"step": 570
},
{
"epoch": 1.3711583924349882,
"grad_norm": 0.4417429566383362,
"learning_rate": 5e-06,
"loss": 0.6086,
"step": 580
},
{
"epoch": 1.3947990543735225,
"grad_norm": 0.48172110319137573,
"learning_rate": 5e-06,
"loss": 0.6084,
"step": 590
},
{
"epoch": 1.4184397163120568,
"grad_norm": 0.5505414009094238,
"learning_rate": 5e-06,
"loss": 0.609,
"step": 600
},
{
"epoch": 1.442080378250591,
"grad_norm": 0.48917776346206665,
"learning_rate": 5e-06,
"loss": 0.6079,
"step": 610
},
{
"epoch": 1.4657210401891252,
"grad_norm": 0.5522086024284363,
"learning_rate": 5e-06,
"loss": 0.6066,
"step": 620
},
{
"epoch": 1.4893617021276595,
"grad_norm": 0.4694415032863617,
"learning_rate": 5e-06,
"loss": 0.6168,
"step": 630
},
{
"epoch": 1.5130023640661938,
"grad_norm": 0.4723762273788452,
"learning_rate": 5e-06,
"loss": 0.6083,
"step": 640
},
{
"epoch": 1.5366430260047281,
"grad_norm": 0.4519807696342468,
"learning_rate": 5e-06,
"loss": 0.6108,
"step": 650
},
{
"epoch": 1.5602836879432624,
"grad_norm": 0.4712272584438324,
"learning_rate": 5e-06,
"loss": 0.6115,
"step": 660
},
{
"epoch": 1.5839243498817965,
"grad_norm": 0.4411340355873108,
"learning_rate": 5e-06,
"loss": 0.604,
"step": 670
},
{
"epoch": 1.607565011820331,
"grad_norm": 0.45366063714027405,
"learning_rate": 5e-06,
"loss": 0.6055,
"step": 680
},
{
"epoch": 1.6312056737588652,
"grad_norm": 0.5291415452957153,
"learning_rate": 5e-06,
"loss": 0.6089,
"step": 690
},
{
"epoch": 1.6548463356973995,
"grad_norm": 0.4964240491390228,
"learning_rate": 5e-06,
"loss": 0.6107,
"step": 700
},
{
"epoch": 1.6784869976359338,
"grad_norm": 0.4780677855014801,
"learning_rate": 5e-06,
"loss": 0.6086,
"step": 710
},
{
"epoch": 1.702127659574468,
"grad_norm": 0.5142894983291626,
"learning_rate": 5e-06,
"loss": 0.6028,
"step": 720
},
{
"epoch": 1.7257683215130024,
"grad_norm": 0.43228575587272644,
"learning_rate": 5e-06,
"loss": 0.6075,
"step": 730
},
{
"epoch": 1.7494089834515365,
"grad_norm": 0.46347859501838684,
"learning_rate": 5e-06,
"loss": 0.6123,
"step": 740
},
{
"epoch": 1.773049645390071,
"grad_norm": 0.5133971571922302,
"learning_rate": 5e-06,
"loss": 0.6033,
"step": 750
},
{
"epoch": 1.7966903073286051,
"grad_norm": 0.5592248439788818,
"learning_rate": 5e-06,
"loss": 0.603,
"step": 760
},
{
"epoch": 1.8203309692671394,
"grad_norm": 0.4413076639175415,
"learning_rate": 5e-06,
"loss": 0.6129,
"step": 770
},
{
"epoch": 1.8439716312056738,
"grad_norm": 0.5284017324447632,
"learning_rate": 5e-06,
"loss": 0.609,
"step": 780
},
{
"epoch": 1.867612293144208,
"grad_norm": 0.5193566083908081,
"learning_rate": 5e-06,
"loss": 0.6116,
"step": 790
},
{
"epoch": 1.8912529550827424,
"grad_norm": 0.4190606474876404,
"learning_rate": 5e-06,
"loss": 0.5997,
"step": 800
},
{
"epoch": 1.9148936170212765,
"grad_norm": 0.4558718800544739,
"learning_rate": 5e-06,
"loss": 0.5988,
"step": 810
},
{
"epoch": 1.938534278959811,
"grad_norm": 0.4878173768520355,
"learning_rate": 5e-06,
"loss": 0.6092,
"step": 820
},
{
"epoch": 1.962174940898345,
"grad_norm": 0.4382663667201996,
"learning_rate": 5e-06,
"loss": 0.6058,
"step": 830
},
{
"epoch": 1.9858156028368794,
"grad_norm": 0.49692317843437195,
"learning_rate": 5e-06,
"loss": 0.6088,
"step": 840
},
{
"epoch": 2.0,
"eval_loss": 0.6398401260375977,
"eval_runtime": 38.8232,
"eval_samples_per_second": 293.33,
"eval_steps_per_second": 1.159,
"step": 846
},
{
"epoch": 2.0094562647754137,
"grad_norm": 0.6104748249053955,
"learning_rate": 5e-06,
"loss": 0.5878,
"step": 850
},
{
"epoch": 2.033096926713948,
"grad_norm": 0.5593996644020081,
"learning_rate": 5e-06,
"loss": 0.5595,
"step": 860
},
{
"epoch": 2.0567375886524824,
"grad_norm": 0.5715161561965942,
"learning_rate": 5e-06,
"loss": 0.5664,
"step": 870
},
{
"epoch": 2.0803782505910164,
"grad_norm": 0.4689047932624817,
"learning_rate": 5e-06,
"loss": 0.5594,
"step": 880
},
{
"epoch": 2.104018912529551,
"grad_norm": 0.4636439383029938,
"learning_rate": 5e-06,
"loss": 0.5666,
"step": 890
},
{
"epoch": 2.127659574468085,
"grad_norm": 0.5165823101997375,
"learning_rate": 5e-06,
"loss": 0.5651,
"step": 900
},
{
"epoch": 2.1513002364066196,
"grad_norm": 0.4929666519165039,
"learning_rate": 5e-06,
"loss": 0.5712,
"step": 910
},
{
"epoch": 2.1749408983451537,
"grad_norm": 0.6087996363639832,
"learning_rate": 5e-06,
"loss": 0.5693,
"step": 920
},
{
"epoch": 2.198581560283688,
"grad_norm": 0.6086291074752808,
"learning_rate": 5e-06,
"loss": 0.569,
"step": 930
},
{
"epoch": 2.2222222222222223,
"grad_norm": 0.5552637577056885,
"learning_rate": 5e-06,
"loss": 0.565,
"step": 940
},
{
"epoch": 2.2458628841607564,
"grad_norm": 0.4776444435119629,
"learning_rate": 5e-06,
"loss": 0.5686,
"step": 950
},
{
"epoch": 2.269503546099291,
"grad_norm": 0.4616422653198242,
"learning_rate": 5e-06,
"loss": 0.567,
"step": 960
},
{
"epoch": 2.293144208037825,
"grad_norm": 0.5176410675048828,
"learning_rate": 5e-06,
"loss": 0.5755,
"step": 970
},
{
"epoch": 2.3167848699763596,
"grad_norm": 0.47728076577186584,
"learning_rate": 5e-06,
"loss": 0.566,
"step": 980
},
{
"epoch": 2.3404255319148937,
"grad_norm": 0.4709567725658417,
"learning_rate": 5e-06,
"loss": 0.5641,
"step": 990
},
{
"epoch": 2.3640661938534278,
"grad_norm": 0.7246980667114258,
"learning_rate": 5e-06,
"loss": 0.5632,
"step": 1000
},
{
"epoch": 2.3877068557919623,
"grad_norm": 0.5151357650756836,
"learning_rate": 5e-06,
"loss": 0.5652,
"step": 1010
},
{
"epoch": 2.4113475177304964,
"grad_norm": 0.46736642718315125,
"learning_rate": 5e-06,
"loss": 0.5739,
"step": 1020
},
{
"epoch": 2.434988179669031,
"grad_norm": 0.43881499767303467,
"learning_rate": 5e-06,
"loss": 0.5671,
"step": 1030
},
{
"epoch": 2.458628841607565,
"grad_norm": 0.4691537022590637,
"learning_rate": 5e-06,
"loss": 0.5691,
"step": 1040
},
{
"epoch": 2.482269503546099,
"grad_norm": 0.4675734043121338,
"learning_rate": 5e-06,
"loss": 0.5667,
"step": 1050
},
{
"epoch": 2.5059101654846336,
"grad_norm": 0.5383506417274475,
"learning_rate": 5e-06,
"loss": 0.5715,
"step": 1060
},
{
"epoch": 2.5295508274231677,
"grad_norm": 0.5362086892127991,
"learning_rate": 5e-06,
"loss": 0.5596,
"step": 1070
},
{
"epoch": 2.5531914893617023,
"grad_norm": 0.5002833008766174,
"learning_rate": 5e-06,
"loss": 0.5704,
"step": 1080
},
{
"epoch": 2.5768321513002364,
"grad_norm": 0.43481341004371643,
"learning_rate": 5e-06,
"loss": 0.5671,
"step": 1090
},
{
"epoch": 2.6004728132387704,
"grad_norm": 0.5383955836296082,
"learning_rate": 5e-06,
"loss": 0.566,
"step": 1100
},
{
"epoch": 2.624113475177305,
"grad_norm": 0.5254177451133728,
"learning_rate": 5e-06,
"loss": 0.5742,
"step": 1110
},
{
"epoch": 2.6477541371158395,
"grad_norm": 0.5556529760360718,
"learning_rate": 5e-06,
"loss": 0.569,
"step": 1120
},
{
"epoch": 2.6713947990543736,
"grad_norm": 0.7989609241485596,
"learning_rate": 5e-06,
"loss": 0.5718,
"step": 1130
},
{
"epoch": 2.6950354609929077,
"grad_norm": 0.4934903681278229,
"learning_rate": 5e-06,
"loss": 0.5675,
"step": 1140
},
{
"epoch": 2.7186761229314422,
"grad_norm": 0.47960445284843445,
"learning_rate": 5e-06,
"loss": 0.5699,
"step": 1150
},
{
"epoch": 2.7423167848699763,
"grad_norm": 0.462834894657135,
"learning_rate": 5e-06,
"loss": 0.5712,
"step": 1160
},
{
"epoch": 2.7659574468085104,
"grad_norm": 0.4646831452846527,
"learning_rate": 5e-06,
"loss": 0.5661,
"step": 1170
},
{
"epoch": 2.789598108747045,
"grad_norm": 0.4419684112071991,
"learning_rate": 5e-06,
"loss": 0.573,
"step": 1180
},
{
"epoch": 2.813238770685579,
"grad_norm": 0.512622058391571,
"learning_rate": 5e-06,
"loss": 0.5717,
"step": 1190
},
{
"epoch": 2.8368794326241136,
"grad_norm": 0.4194084107875824,
"learning_rate": 5e-06,
"loss": 0.5619,
"step": 1200
},
{
"epoch": 2.8605200945626477,
"grad_norm": 0.4545256793498993,
"learning_rate": 5e-06,
"loss": 0.5699,
"step": 1210
},
{
"epoch": 2.884160756501182,
"grad_norm": 0.4438764154911041,
"learning_rate": 5e-06,
"loss": 0.5725,
"step": 1220
},
{
"epoch": 2.9078014184397163,
"grad_norm": 0.4500187039375305,
"learning_rate": 5e-06,
"loss": 0.5685,
"step": 1230
},
{
"epoch": 2.9314420803782504,
"grad_norm": 0.5051234364509583,
"learning_rate": 5e-06,
"loss": 0.5696,
"step": 1240
},
{
"epoch": 2.955082742316785,
"grad_norm": 0.5184685587882996,
"learning_rate": 5e-06,
"loss": 0.5713,
"step": 1250
},
{
"epoch": 2.978723404255319,
"grad_norm": 0.5309048891067505,
"learning_rate": 5e-06,
"loss": 0.5839,
"step": 1260
},
{
"epoch": 3.0,
"eval_loss": 0.6423146724700928,
"eval_runtime": 39.1245,
"eval_samples_per_second": 291.071,
"eval_steps_per_second": 1.15,
"step": 1269
},
{
"epoch": 3.0,
"step": 1269,
"total_flos": 5.9918252490593665e+19,
"train_loss": 0.6196046063428418,
"train_runtime": 9031.2946,
"train_samples_per_second": 71.874,
"train_steps_per_second": 0.141
}
],
"logging_steps": 10,
"max_steps": 1269,
"num_input_tokens_seen": 0,
"num_train_epochs": 3,
"save_steps": 500,
"stateful_callbacks": {
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": true
},
"attributes": {}
}
},
"total_flos": 5.9918252490593665e+19,
"train_batch_size": 16,
"trial_name": null,
"trial_params": null
}