Safetensors
llama
Eval Results
FuseChat-Llama-3.1-8B-Instruct / trainer_state.json
AALF's picture
Upload folder using huggingface_hub
145ea88 verified
raw
history blame
43.6 kB
{
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 0.8063872255489022,
"eval_steps": 500,
"global_step": 404,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.001996007984031936,
"grad_norm": 29.364113674215965,
"learning_rate": 1.5686274509803922e-08,
"logits/chosen": -1.6221545934677124,
"logits/rejected": -1.6521912813186646,
"logps/chosen": -0.940773606300354,
"logps/rejected": -0.851032555103302,
"loss": 0.6931,
"rewards/accuracies": 0.0,
"rewards/chosen": 0.0,
"rewards/margins": 0.0,
"rewards/rejected": 0.0,
"step": 1
},
{
"epoch": 0.00998003992015968,
"grad_norm": 31.405892041917173,
"learning_rate": 7.843137254901961e-08,
"logits/chosen": -1.5107966661453247,
"logits/rejected": -1.3356468677520752,
"logps/chosen": -0.7684795260429382,
"logps/rejected": -0.8418906331062317,
"loss": 0.692,
"rewards/accuracies": 0.515625,
"rewards/chosen": 0.0024527055211365223,
"rewards/margins": 0.004762639291584492,
"rewards/rejected": -0.0023099337704479694,
"step": 5
},
{
"epoch": 0.01996007984031936,
"grad_norm": 32.33090024343072,
"learning_rate": 1.5686274509803921e-07,
"logits/chosen": -1.4846872091293335,
"logits/rejected": -1.3766502141952515,
"logps/chosen": -0.8155986070632935,
"logps/rejected": -0.9123713374137878,
"loss": 0.6918,
"rewards/accuracies": 0.550000011920929,
"rewards/chosen": -0.003020394593477249,
"rewards/margins": 0.00013434886932373047,
"rewards/rejected": -0.0031547434628009796,
"step": 10
},
{
"epoch": 0.029940119760479042,
"grad_norm": 28.485164448197256,
"learning_rate": 2.352941176470588e-07,
"logits/chosen": -1.5661466121673584,
"logits/rejected": -1.494202971458435,
"logps/chosen": -0.9453551173210144,
"logps/rejected": -1.0322425365447998,
"loss": 0.6913,
"rewards/accuracies": 0.4375,
"rewards/chosen": -0.0035083256661891937,
"rewards/margins": -0.0018780697137117386,
"rewards/rejected": -0.0016302559524774551,
"step": 15
},
{
"epoch": 0.03992015968063872,
"grad_norm": 38.07958741824287,
"learning_rate": 3.1372549019607843e-07,
"logits/chosen": -1.5159441232681274,
"logits/rejected": -1.506949782371521,
"logps/chosen": -1.148013710975647,
"logps/rejected": -1.2202709913253784,
"loss": 0.6887,
"rewards/accuracies": 0.4625000059604645,
"rewards/chosen": -0.04793555662035942,
"rewards/margins": 0.017127588391304016,
"rewards/rejected": -0.06506314128637314,
"step": 20
},
{
"epoch": 0.0499001996007984,
"grad_norm": 27.572568095419346,
"learning_rate": 3.92156862745098e-07,
"logits/chosen": -1.4518249034881592,
"logits/rejected": -1.3797534704208374,
"logps/chosen": -0.9855057001113892,
"logps/rejected": -1.0200985670089722,
"loss": 0.691,
"rewards/accuracies": 0.5625,
"rewards/chosen": -0.07226995378732681,
"rewards/margins": 0.020743293687701225,
"rewards/rejected": -0.09301324933767319,
"step": 25
},
{
"epoch": 0.059880239520958084,
"grad_norm": 31.06009539067789,
"learning_rate": 4.705882352941176e-07,
"logits/chosen": -1.499815583229065,
"logits/rejected": -1.5191256999969482,
"logps/chosen": -0.9442195892333984,
"logps/rejected": -1.075582504272461,
"loss": 0.6833,
"rewards/accuracies": 0.612500011920929,
"rewards/chosen": -0.08427385240793228,
"rewards/margins": 0.05866638571023941,
"rewards/rejected": -0.14294025301933289,
"step": 30
},
{
"epoch": 0.06986027944111776,
"grad_norm": 30.966967781625176,
"learning_rate": 5.490196078431373e-07,
"logits/chosen": -1.4179936647415161,
"logits/rejected": -1.3696165084838867,
"logps/chosen": -0.8713253140449524,
"logps/rejected": -0.910132884979248,
"loss": 0.6843,
"rewards/accuracies": 0.7124999761581421,
"rewards/chosen": -0.03732147440314293,
"rewards/margins": 0.025124484673142433,
"rewards/rejected": -0.06244595721364021,
"step": 35
},
{
"epoch": 0.07984031936127745,
"grad_norm": 24.405429771065002,
"learning_rate": 6.274509803921569e-07,
"logits/chosen": -1.500732660293579,
"logits/rejected": -1.4325664043426514,
"logps/chosen": -0.9898351430892944,
"logps/rejected": -1.0182645320892334,
"loss": 0.6711,
"rewards/accuracies": 0.625,
"rewards/chosen": 0.010954104363918304,
"rewards/margins": 0.05013161152601242,
"rewards/rejected": -0.039177510887384415,
"step": 40
},
{
"epoch": 0.08982035928143713,
"grad_norm": 58.76778208087395,
"learning_rate": 7.058823529411765e-07,
"logits/chosen": -1.6222164630889893,
"logits/rejected": -1.4863214492797852,
"logps/chosen": -0.9788524508476257,
"logps/rejected": -1.1273555755615234,
"loss": 0.6701,
"rewards/accuracies": 0.5874999761581421,
"rewards/chosen": 0.04946264624595642,
"rewards/margins": 0.041922032833099365,
"rewards/rejected": 0.007540611084550619,
"step": 45
},
{
"epoch": 0.0998003992015968,
"grad_norm": 32.39288675133871,
"learning_rate": 7.84313725490196e-07,
"logits/chosen": -1.4457480907440186,
"logits/rejected": -1.4977794885635376,
"logps/chosen": -0.9463974237442017,
"logps/rejected": -1.2008764743804932,
"loss": 0.6473,
"rewards/accuracies": 0.7875000238418579,
"rewards/chosen": -0.026257414370775223,
"rewards/margins": 0.270720511674881,
"rewards/rejected": -0.2969779074192047,
"step": 50
},
{
"epoch": 0.10978043912175649,
"grad_norm": 25.562422423609192,
"learning_rate": 7.998440460161416e-07,
"logits/chosen": -1.6372053623199463,
"logits/rejected": -1.4895769357681274,
"logps/chosen": -0.9624001383781433,
"logps/rejected": -1.001164197921753,
"loss": 0.6452,
"rewards/accuracies": 0.762499988079071,
"rewards/chosen": -0.22816535830497742,
"rewards/margins": 0.20386116206645966,
"rewards/rejected": -0.4320264756679535,
"step": 55
},
{
"epoch": 0.11976047904191617,
"grad_norm": 24.2619415054151,
"learning_rate": 7.992106913713086e-07,
"logits/chosen": -1.4938465356826782,
"logits/rejected": -1.5313457250595093,
"logps/chosen": -1.0081576108932495,
"logps/rejected": -1.2644726037979126,
"loss": 0.6479,
"rewards/accuracies": 0.75,
"rewards/chosen": -0.19145138561725616,
"rewards/margins": 0.5550881624221802,
"rewards/rejected": -0.7465395331382751,
"step": 60
},
{
"epoch": 0.12974051896207583,
"grad_norm": 53.09152893491566,
"learning_rate": 7.980909599927324e-07,
"logits/chosen": -1.6245317459106445,
"logits/rejected": -1.7302849292755127,
"logps/chosen": -0.9516083002090454,
"logps/rejected": -1.0930800437927246,
"loss": 0.6259,
"rewards/accuracies": 0.7250000238418579,
"rewards/chosen": -0.033558934926986694,
"rewards/margins": 0.2880553603172302,
"rewards/rejected": -0.3216143250465393,
"step": 65
},
{
"epoch": 0.13972055888223553,
"grad_norm": 22.54639277386895,
"learning_rate": 7.964862161006166e-07,
"logits/chosen": -1.6116857528686523,
"logits/rejected": -1.5303642749786377,
"logps/chosen": -0.9262536764144897,
"logps/rejected": -1.1940580606460571,
"loss": 0.5986,
"rewards/accuracies": 0.8125,
"rewards/chosen": 0.052712906152009964,
"rewards/margins": 0.4166509509086609,
"rewards/rejected": -0.3639380633831024,
"step": 70
},
{
"epoch": 0.1497005988023952,
"grad_norm": 42.93936100609094,
"learning_rate": 7.94398414828202e-07,
"logits/chosen": -1.7100902795791626,
"logits/rejected": -1.6730142831802368,
"logps/chosen": -0.9657354354858398,
"logps/rejected": -0.9953187108039856,
"loss": 0.5873,
"rewards/accuracies": 0.7749999761581421,
"rewards/chosen": -0.0733981654047966,
"rewards/margins": 0.29382631182670593,
"rewards/rejected": -0.36722445487976074,
"step": 75
},
{
"epoch": 0.1596806387225549,
"grad_norm": 33.085627858420565,
"learning_rate": 7.918300998397376e-07,
"logits/chosen": -1.5861061811447144,
"logits/rejected": -1.5445070266723633,
"logps/chosen": -0.9483486413955688,
"logps/rejected": -1.051520824432373,
"loss": 0.5927,
"rewards/accuracies": 0.699999988079071,
"rewards/chosen": -0.18298718333244324,
"rewards/margins": 0.16031306982040405,
"rewards/rejected": -0.3433002233505249,
"step": 80
},
{
"epoch": 0.16966067864271456,
"grad_norm": 32.604735894329295,
"learning_rate": 7.887844002314185e-07,
"logits/chosen": -1.456621527671814,
"logits/rejected": -1.550982117652893,
"logps/chosen": -0.8949592709541321,
"logps/rejected": -1.0014525651931763,
"loss": 0.6049,
"rewards/accuracies": 0.75,
"rewards/chosen": -0.37570685148239136,
"rewards/margins": 0.4748932421207428,
"rewards/rejected": -0.8506000638008118,
"step": 85
},
{
"epoch": 0.17964071856287425,
"grad_norm": 50.600905445142125,
"learning_rate": 7.852650267190632e-07,
"logits/chosen": -1.6303730010986328,
"logits/rejected": -1.609903335571289,
"logps/chosen": -1.0100046396255493,
"logps/rejected": -1.0304863452911377,
"loss": 0.5861,
"rewards/accuracies": 0.7250000238418579,
"rewards/chosen": -0.46551984548568726,
"rewards/margins": 0.5207703709602356,
"rewards/rejected": -0.9862901568412781,
"step": 90
},
{
"epoch": 0.18962075848303392,
"grad_norm": 46.677854954895324,
"learning_rate": 7.812762671171788e-07,
"logits/chosen": -1.8154528141021729,
"logits/rejected": -1.7442315816879272,
"logps/chosen": -1.245895504951477,
"logps/rejected": -1.381966233253479,
"loss": 0.5881,
"rewards/accuracies": 0.8125,
"rewards/chosen": -0.5333404541015625,
"rewards/margins": 0.9066336750984192,
"rewards/rejected": -1.439974069595337,
"step": 95
},
{
"epoch": 0.1996007984031936,
"grad_norm": 45.70458018922122,
"learning_rate": 7.768229811149186e-07,
"logits/chosen": -1.6239620447158813,
"logits/rejected": -1.7077280282974243,
"logps/chosen": -1.0647861957550049,
"logps/rejected": -1.128450632095337,
"loss": 0.5655,
"rewards/accuracies": 0.7875000238418579,
"rewards/chosen": 0.02701367810368538,
"rewards/margins": 0.667129635810852,
"rewards/rejected": -0.6401158571243286,
"step": 100
},
{
"epoch": 0.20958083832335328,
"grad_norm": 68.02288800224188,
"learning_rate": 7.719105943553005e-07,
"logits/chosen": -1.665117859840393,
"logits/rejected": -1.7087205648422241,
"logps/chosen": -0.872536301612854,
"logps/rejected": -1.0017918348312378,
"loss": 0.5936,
"rewards/accuracies": 0.824999988079071,
"rewards/chosen": -0.5589737892150879,
"rewards/margins": 0.5613604784011841,
"rewards/rejected": -1.1203341484069824,
"step": 105
},
{
"epoch": 0.21956087824351297,
"grad_norm": 26.445396621363834,
"learning_rate": 7.665450918248958e-07,
"logits/chosen": -1.7527316808700562,
"logits/rejected": -1.701978087425232,
"logps/chosen": -0.9653258323669434,
"logps/rejected": -1.0634543895721436,
"loss": 0.5066,
"rewards/accuracies": 0.824999988079071,
"rewards/chosen": -0.627923846244812,
"rewards/margins": 0.6550946235656738,
"rewards/rejected": -1.2830184698104858,
"step": 110
},
{
"epoch": 0.22954091816367264,
"grad_norm": 30.90025286425145,
"learning_rate": 7.607330105620455e-07,
"logits/chosen": -1.7857640981674194,
"logits/rejected": -1.668609619140625,
"logps/chosen": -1.0989948511123657,
"logps/rejected": -1.1228173971176147,
"loss": 0.5595,
"rewards/accuracies": 0.7749999761581421,
"rewards/chosen": -0.6360177993774414,
"rewards/margins": 0.5694562196731567,
"rewards/rejected": -1.2054741382598877,
"step": 115
},
{
"epoch": 0.23952095808383234,
"grad_norm": 40.011128656457046,
"learning_rate": 7.544814316924858e-07,
"logits/chosen": -1.7470954656600952,
"logits/rejected": -1.745476484298706,
"logps/chosen": -1.1192394495010376,
"logps/rejected": -1.2689249515533447,
"loss": 0.5494,
"rewards/accuracies": 0.7250000238418579,
"rewards/chosen": -1.0388853549957275,
"rewards/margins": 0.6685038805007935,
"rewards/rejected": -1.7073894739151,
"step": 120
},
{
"epoch": 0.249500998003992,
"grad_norm": 21.89811631617477,
"learning_rate": 7.477979718020876e-07,
"logits/chosen": -1.5766489505767822,
"logits/rejected": -1.4870827198028564,
"logps/chosen": -1.0617902278900146,
"logps/rejected": -1.2029839754104614,
"loss": 0.4894,
"rewards/accuracies": 0.8500000238418579,
"rewards/chosen": -0.9320518374443054,
"rewards/margins": 0.8520485758781433,
"rewards/rejected": -1.7841002941131592,
"step": 125
},
{
"epoch": 0.25948103792415167,
"grad_norm": 32.50408891288284,
"learning_rate": 7.40690773657219e-07,
"logits/chosen": -1.6710205078125,
"logits/rejected": -1.6744377613067627,
"logps/chosen": -0.9914701581001282,
"logps/rejected": -1.1012578010559082,
"loss": 0.4908,
"rewards/accuracies": 0.8374999761581421,
"rewards/chosen": -0.8213608860969543,
"rewards/margins": 0.6687670946121216,
"rewards/rejected": -1.4901279211044312,
"step": 130
},
{
"epoch": 0.2694610778443114,
"grad_norm": 52.90692812097415,
"learning_rate": 7.331684962840398e-07,
"logits/chosen": -1.5417057275772095,
"logits/rejected": -1.563847303390503,
"logps/chosen": -0.9478904008865356,
"logps/rejected": -1.1853077411651611,
"loss": 0.4906,
"rewards/accuracies": 0.824999988079071,
"rewards/chosen": -0.5953829884529114,
"rewards/margins": 1.0272996425628662,
"rewards/rejected": -1.6226825714111328,
"step": 135
},
{
"epoch": 0.27944111776447106,
"grad_norm": 29.247151675001287,
"learning_rate": 7.25240304418811e-07,
"logits/chosen": -1.723270058631897,
"logits/rejected": -1.6206943988800049,
"logps/chosen": -1.0017354488372803,
"logps/rejected": -1.1501209735870361,
"loss": 0.4895,
"rewards/accuracies": 0.862500011920929,
"rewards/chosen": -0.7104414701461792,
"rewards/margins": 1.0606929063796997,
"rewards/rejected": -1.771134376525879,
"step": 140
},
{
"epoch": 0.2894211576846307,
"grad_norm": 23.202561617382436,
"learning_rate": 7.169158573420763e-07,
"logits/chosen": -1.7494615316390991,
"logits/rejected": -1.741146445274353,
"logps/chosen": -1.208905816078186,
"logps/rejected": -1.3130097389221191,
"loss": 0.4886,
"rewards/accuracies": 0.8125,
"rewards/chosen": -1.2182276248931885,
"rewards/margins": 1.148219347000122,
"rewards/rejected": -2.3664469718933105,
"step": 145
},
{
"epoch": 0.2994011976047904,
"grad_norm": 31.417521603405053,
"learning_rate": 7.082052971103157e-07,
"logits/chosen": -1.614556074142456,
"logits/rejected": -1.5841604471206665,
"logps/chosen": -1.156620979309082,
"logps/rejected": -1.2092293500900269,
"loss": 0.5008,
"rewards/accuracies": 0.800000011920929,
"rewards/chosen": -1.330456256866455,
"rewards/margins": 0.7478895783424377,
"rewards/rejected": -2.078345775604248,
"step": 150
},
{
"epoch": 0.3093812375249501,
"grad_norm": 22.72615529430511,
"learning_rate": 6.991192361994127e-07,
"logits/chosen": -1.748228669166565,
"logits/rejected": -1.6922461986541748,
"logps/chosen": -1.078489065170288,
"logps/rejected": -1.3014332056045532,
"loss": 0.4438,
"rewards/accuracies": 0.8500000238418579,
"rewards/chosen": -1.2569700479507446,
"rewards/margins": 1.386672854423523,
"rewards/rejected": -2.6436429023742676,
"step": 155
},
{
"epoch": 0.3193612774451098,
"grad_norm": 53.34962173479164,
"learning_rate": 6.896687445749871e-07,
"logits/chosen": -1.7363059520721436,
"logits/rejected": -1.7595733404159546,
"logps/chosen": -1.115309476852417,
"logps/rejected": -1.239806890487671,
"loss": 0.4622,
"rewards/accuracies": 0.8500000238418579,
"rewards/chosen": -1.4574905633926392,
"rewards/margins": 1.336918830871582,
"rewards/rejected": -2.7944092750549316,
"step": 160
},
{
"epoch": 0.32934131736526945,
"grad_norm": 40.83000749193078,
"learning_rate": 6.798653362053462e-07,
"logits/chosen": -1.650973916053772,
"logits/rejected": -1.6539020538330078,
"logps/chosen": -1.1761391162872314,
"logps/rejected": -1.4524037837982178,
"loss": 0.4558,
"rewards/accuracies": 0.862500011920929,
"rewards/chosen": -1.311657190322876,
"rewards/margins": 1.8300256729125977,
"rewards/rejected": -3.1416831016540527,
"step": 165
},
{
"epoch": 0.3393213572854291,
"grad_norm": 28.447642135314243,
"learning_rate": 6.697209550334893e-07,
"logits/chosen": -1.7830873727798462,
"logits/rejected": -1.8306477069854736,
"logps/chosen": -1.1625844240188599,
"logps/rejected": -1.4296667575836182,
"loss": 0.4682,
"rewards/accuracies": 0.762499988079071,
"rewards/chosen": -1.1936951875686646,
"rewards/margins": 1.3315410614013672,
"rewards/rejected": -2.525236129760742,
"step": 170
},
{
"epoch": 0.34930139720558884,
"grad_norm": 42.529786752880625,
"learning_rate": 6.592479604252524e-07,
"logits/chosen": -1.6931123733520508,
"logits/rejected": -1.6376163959503174,
"logps/chosen": -1.1715461015701294,
"logps/rejected": -1.3761537075042725,
"loss": 0.4751,
"rewards/accuracies": 0.762499988079071,
"rewards/chosen": -1.1568340063095093,
"rewards/margins": 1.0651991367340088,
"rewards/rejected": -2.2220332622528076,
"step": 175
},
{
"epoch": 0.3592814371257485,
"grad_norm": 49.01740947758452,
"learning_rate": 6.484591121113241e-07,
"logits/chosen": -1.7936656475067139,
"logits/rejected": -1.7206109762191772,
"logps/chosen": -1.047082781791687,
"logps/rejected": -1.2329199314117432,
"loss": 0.4627,
"rewards/accuracies": 0.8125,
"rewards/chosen": -1.0403250455856323,
"rewards/margins": 1.458751916885376,
"rewards/rejected": -2.499077081680298,
"step": 180
},
{
"epoch": 0.36926147704590817,
"grad_norm": 33.35075365946081,
"learning_rate": 6.373675546414806e-07,
"logits/chosen": -1.7752662897109985,
"logits/rejected": -1.8046382665634155,
"logps/chosen": -1.105947494506836,
"logps/rejected": -1.4365394115447998,
"loss": 0.4533,
"rewards/accuracies": 0.8125,
"rewards/chosen": -1.559922456741333,
"rewards/margins": 1.3131450414657593,
"rewards/rejected": -2.8730673789978027,
"step": 185
},
{
"epoch": 0.37924151696606784,
"grad_norm": 33.28309177685594,
"learning_rate": 6.259868013699751e-07,
"logits/chosen": -1.7205069065093994,
"logits/rejected": -1.6720272302627563,
"logps/chosen": -1.1314589977264404,
"logps/rejected": -1.2337143421173096,
"loss": 0.4049,
"rewards/accuracies": 0.800000011920929,
"rewards/chosen": -1.9102380275726318,
"rewards/margins": 1.2990434169769287,
"rewards/rejected": -3.2092814445495605,
"step": 190
},
{
"epoch": 0.38922155688622756,
"grad_norm": 22.448955038966368,
"learning_rate": 6.143307179915986e-07,
"logits/chosen": -1.63016676902771,
"logits/rejected": -1.6702827215194702,
"logps/chosen": -1.1162363290786743,
"logps/rejected": -1.364017128944397,
"loss": 0.3814,
"rewards/accuracies": 0.875,
"rewards/chosen": -1.8935344219207764,
"rewards/margins": 1.6944122314453125,
"rewards/rejected": -3.5879464149475098,
"step": 195
},
{
"epoch": 0.3992015968063872,
"grad_norm": 40.67962108560719,
"learning_rate": 6.024135056484655e-07,
"logits/chosen": -1.7706257104873657,
"logits/rejected": -1.7765319347381592,
"logps/chosen": -1.2081105709075928,
"logps/rejected": -1.5039583444595337,
"loss": 0.4225,
"rewards/accuracies": 0.875,
"rewards/chosen": -1.8676178455352783,
"rewards/margins": 2.3755626678466797,
"rewards/rejected": -4.243180274963379,
"step": 200
},
{
"epoch": 0.4091816367265469,
"grad_norm": 22.600562301516938,
"learning_rate": 5.902496836281101e-07,
"logits/chosen": -1.772716760635376,
"logits/rejected": -1.7146966457366943,
"logps/chosen": -1.1653116941452026,
"logps/rejected": -1.2957953214645386,
"loss": 0.353,
"rewards/accuracies": 0.8125,
"rewards/chosen": -1.8657941818237305,
"rewards/margins": 1.3124840259552002,
"rewards/rejected": -3.1782779693603516,
"step": 205
},
{
"epoch": 0.41916167664670656,
"grad_norm": 31.666375625024422,
"learning_rate": 5.778540716739709e-07,
"logits/chosen": -1.7554540634155273,
"logits/rejected": -1.8094005584716797,
"logps/chosen": -1.2131294012069702,
"logps/rejected": -1.54654860496521,
"loss": 0.3751,
"rewards/accuracies": 0.8999999761581421,
"rewards/chosen": -1.522384524345398,
"rewards/margins": 2.483349561691284,
"rewards/rejected": -4.005733966827393,
"step": 210
},
{
"epoch": 0.4291417165668663,
"grad_norm": 23.231350885180493,
"learning_rate": 5.652417719298168e-07,
"logits/chosen": -1.7632023096084595,
"logits/rejected": -1.6775318384170532,
"logps/chosen": -1.254577875137329,
"logps/rejected": -1.4464714527130127,
"loss": 0.42,
"rewards/accuracies": 0.8125,
"rewards/chosen": -2.3697409629821777,
"rewards/margins": 1.543129324913025,
"rewards/rejected": -3.912869930267334,
"step": 215
},
{
"epoch": 0.43912175648702595,
"grad_norm": 75.06138455980248,
"learning_rate": 5.524281505401096e-07,
"logits/chosen": -1.7130699157714844,
"logits/rejected": -1.7215654850006104,
"logps/chosen": -1.1106432676315308,
"logps/rejected": -1.389941692352295,
"loss": 0.4627,
"rewards/accuracies": 0.8125,
"rewards/chosen": -2.0083611011505127,
"rewards/margins": 1.8821561336517334,
"rewards/rejected": -3.890516996383667,
"step": 220
},
{
"epoch": 0.4491017964071856,
"grad_norm": 38.6593639633161,
"learning_rate": 5.394288189287261e-07,
"logits/chosen": -1.787519097328186,
"logits/rejected": -1.7786083221435547,
"logps/chosen": -1.1749248504638672,
"logps/rejected": -1.4376263618469238,
"loss": 0.3756,
"rewards/accuracies": 0.862500011920929,
"rewards/chosen": -2.5106921195983887,
"rewards/margins": 2.196367025375366,
"rewards/rejected": -4.707058906555176,
"step": 225
},
{
"epoch": 0.4590818363273453,
"grad_norm": 78.42260836111647,
"learning_rate": 5.26259614778841e-07,
"logits/chosen": -1.642748475074768,
"logits/rejected": -1.6440712213516235,
"logps/chosen": -1.1253970861434937,
"logps/rejected": -1.2666562795639038,
"loss": 0.4147,
"rewards/accuracies": 0.862500011920929,
"rewards/chosen": -2.194728374481201,
"rewards/margins": 1.729097604751587,
"rewards/rejected": -3.923825740814209,
"step": 230
},
{
"epoch": 0.469061876247505,
"grad_norm": 29.199406660864277,
"learning_rate": 5.129365827371506e-07,
"logits/chosen": -1.8895553350448608,
"logits/rejected": -1.8804956674575806,
"logps/chosen": -1.1362788677215576,
"logps/rejected": -1.3920345306396484,
"loss": 0.373,
"rewards/accuracies": 0.8125,
"rewards/chosen": -1.8095152378082275,
"rewards/margins": 2.366717576980591,
"rewards/rejected": -4.17623233795166,
"step": 235
},
{
"epoch": 0.47904191616766467,
"grad_norm": 47.16394571127521,
"learning_rate": 4.99475954865942e-07,
"logits/chosen": -1.7921149730682373,
"logits/rejected": -1.7930948734283447,
"logps/chosen": -1.0978577136993408,
"logps/rejected": -1.2884962558746338,
"loss": 0.4121,
"rewards/accuracies": 0.824999988079071,
"rewards/chosen": -2.472763776779175,
"rewards/margins": 1.3844527006149292,
"rewards/rejected": -3.8572165966033936,
"step": 240
},
{
"epoch": 0.48902195608782434,
"grad_norm": 49.94184345559498,
"learning_rate": 4.858941308668253e-07,
"logits/chosen": -1.7998384237289429,
"logits/rejected": -1.7332870960235596,
"logps/chosen": -1.2298054695129395,
"logps/rejected": -1.583311915397644,
"loss": 0.3966,
"rewards/accuracies": 0.800000011920929,
"rewards/chosen": -2.484738349914551,
"rewards/margins": 2.6141140460968018,
"rewards/rejected": -5.09885311126709,
"step": 245
},
{
"epoch": 0.499001996007984,
"grad_norm": 24.230906422190216,
"learning_rate": 4.72207658100224e-07,
"logits/chosen": -1.8376038074493408,
"logits/rejected": -1.777984619140625,
"logps/chosen": -1.221793532371521,
"logps/rejected": -1.4775179624557495,
"loss": 0.355,
"rewards/accuracies": 0.9125000238418579,
"rewards/chosen": -2.4652364253997803,
"rewards/margins": 2.177865982055664,
"rewards/rejected": -4.643102169036865,
"step": 250
},
{
"epoch": 0.5089820359281437,
"grad_norm": 34.572626217381774,
"learning_rate": 4.5843321142496465e-07,
"logits/chosen": -1.7895371913909912,
"logits/rejected": -1.7715275287628174,
"logps/chosen": -1.2171119451522827,
"logps/rejected": -1.4471348524093628,
"loss": 0.3765,
"rewards/accuracies": 0.8500000238418579,
"rewards/chosen": -2.290224075317383,
"rewards/margins": 1.525314211845398,
"rewards/rejected": -3.8155384063720703,
"step": 255
},
{
"epoch": 0.5189620758483033,
"grad_norm": 50.38823987307752,
"learning_rate": 4.445875728825301e-07,
"logits/chosen": -1.759508490562439,
"logits/rejected": -1.7566909790039062,
"logps/chosen": -1.2831090688705444,
"logps/rejected": -1.553682565689087,
"loss": 0.407,
"rewards/accuracies": 0.8500000238418579,
"rewards/chosen": -2.9423370361328125,
"rewards/margins": 2.082186222076416,
"rewards/rejected": -5.024522304534912,
"step": 260
},
{
"epoch": 0.5289421157684631,
"grad_norm": 25.180525143347726,
"learning_rate": 4.3068761125072743e-07,
"logits/chosen": -1.7218453884124756,
"logits/rejected": -1.6865999698638916,
"logps/chosen": -1.2321453094482422,
"logps/rejected": -1.4395345449447632,
"loss": 0.378,
"rewards/accuracies": 0.8374999761581421,
"rewards/chosen": -2.701998472213745,
"rewards/margins": 1.778617262840271,
"rewards/rejected": -4.480616092681885,
"step": 265
},
{
"epoch": 0.5389221556886228,
"grad_norm": 34.94821484127908,
"learning_rate": 4.167502614916798e-07,
"logits/chosen": -1.7490154504776,
"logits/rejected": -1.7060623168945312,
"logps/chosen": -1.1463005542755127,
"logps/rejected": -1.4383161067962646,
"loss": 0.3744,
"rewards/accuracies": 0.8125,
"rewards/chosen": -2.461625576019287,
"rewards/margins": 1.8262317180633545,
"rewards/rejected": -4.287858009338379,
"step": 270
},
{
"epoch": 0.5489021956087824,
"grad_norm": 28.174483103692058,
"learning_rate": 4.027925041191846e-07,
"logits/chosen": -1.6615409851074219,
"logits/rejected": -1.717336893081665,
"logps/chosen": -1.0789427757263184,
"logps/rejected": -1.291114091873169,
"loss": 0.3802,
"rewards/accuracies": 0.800000011920929,
"rewards/chosen": -2.360185384750366,
"rewards/margins": 1.7126213312149048,
"rewards/rejected": -4.072806358337402,
"step": 275
},
{
"epoch": 0.5588822355289421,
"grad_norm": 65.52620829663537,
"learning_rate": 3.8883134451057253e-07,
"logits/chosen": -1.728354811668396,
"logits/rejected": -1.7853111028671265,
"logps/chosen": -1.1873013973236084,
"logps/rejected": -1.3926045894622803,
"loss": 0.3777,
"rewards/accuracies": 0.887499988079071,
"rewards/chosen": -2.1488184928894043,
"rewards/margins": 2.161184787750244,
"rewards/rejected": -4.31000280380249,
"step": 280
},
{
"epoch": 0.5688622754491018,
"grad_norm": 28.41327207057278,
"learning_rate": 3.7488379218827464e-07,
"logits/chosen": -1.8485610485076904,
"logits/rejected": -1.7869895696640015,
"logps/chosen": -1.1387436389923096,
"logps/rejected": -1.4143882989883423,
"loss": 0.3322,
"rewards/accuracies": 0.862500011920929,
"rewards/chosen": -2.3350417613983154,
"rewards/margins": 2.1998612880706787,
"rewards/rejected": -4.534903049468994,
"step": 285
},
{
"epoch": 0.5788423153692615,
"grad_norm": 54.15837971453539,
"learning_rate": 3.6096684009634026e-07,
"logits/chosen": -1.9337072372436523,
"logits/rejected": -1.8167577981948853,
"logps/chosen": -1.2900314331054688,
"logps/rejected": -1.5611326694488525,
"loss": 0.3949,
"rewards/accuracies": 0.925000011920929,
"rewards/chosen": -2.437575101852417,
"rewards/margins": 2.304208278656006,
"rewards/rejected": -4.741783142089844,
"step": 290
},
{
"epoch": 0.5888223552894212,
"grad_norm": 30.875206028913684,
"learning_rate": 3.4709744389715095e-07,
"logits/chosen": -1.7289743423461914,
"logits/rejected": -1.7040287256240845,
"logps/chosen": -1.1819794178009033,
"logps/rejected": -1.3941266536712646,
"loss": 0.3588,
"rewards/accuracies": 0.800000011920929,
"rewards/chosen": -2.6420161724090576,
"rewards/margins": 1.931423544883728,
"rewards/rejected": -4.573439598083496,
"step": 295
},
{
"epoch": 0.5988023952095808,
"grad_norm": 23.031518202722424,
"learning_rate": 3.332925013135591e-07,
"logits/chosen": -1.7638660669326782,
"logits/rejected": -1.8151865005493164,
"logps/chosen": -1.1131149530410767,
"logps/rejected": -1.3581509590148926,
"loss": 0.3593,
"rewards/accuracies": 0.800000011920929,
"rewards/chosen": -2.243455171585083,
"rewards/margins": 1.8504345417022705,
"rewards/rejected": -4.0938897132873535,
"step": 300
},
{
"epoch": 0.6087824351297405,
"grad_norm": 52.883386307510996,
"learning_rate": 3.195688315416142e-07,
"logits/chosen": -1.9481338262557983,
"logits/rejected": -1.877634048461914,
"logps/chosen": -1.3473719358444214,
"logps/rejected": -1.5660619735717773,
"loss": 0.3312,
"rewards/accuracies": 0.8374999761581421,
"rewards/chosen": -2.013882637023926,
"rewards/margins": 2.0933196544647217,
"rewards/rejected": -4.107202053070068,
"step": 305
},
{
"epoch": 0.6187624750499002,
"grad_norm": 40.31001639731489,
"learning_rate": 3.0594315475896397e-07,
"logits/chosen": -1.8702493906021118,
"logits/rejected": -1.8942811489105225,
"logps/chosen": -1.2539622783660889,
"logps/rejected": -1.5844415426254272,
"loss": 0.3233,
"rewards/accuracies": 0.9125000238418579,
"rewards/chosen": -2.855128049850464,
"rewards/margins": 2.254939556121826,
"rewards/rejected": -5.110067367553711,
"step": 310
},
{
"epoch": 0.6287425149700598,
"grad_norm": 34.57734687085236,
"learning_rate": 2.924320717538937e-07,
"logits/chosen": -1.9552714824676514,
"logits/rejected": -1.8983757495880127,
"logps/chosen": -1.235822319984436,
"logps/rejected": -1.557267427444458,
"loss": 0.3362,
"rewards/accuracies": 0.875,
"rewards/chosen": -2.798520803451538,
"rewards/margins": 2.358416795730591,
"rewards/rejected": -5.156937599182129,
"step": 315
},
{
"epoch": 0.6387225548902196,
"grad_norm": 22.131955927530566,
"learning_rate": 2.790520436998222e-07,
"logits/chosen": -1.8081642389297485,
"logits/rejected": -1.7835298776626587,
"logps/chosen": -1.2587134838104248,
"logps/rejected": -1.5615605115890503,
"loss": 0.3362,
"rewards/accuracies": 0.875,
"rewards/chosen": -2.6179587841033936,
"rewards/margins": 2.341552972793579,
"rewards/rejected": -4.959511756896973,
"step": 320
},
{
"epoch": 0.6487025948103793,
"grad_norm": 25.656535146073534,
"learning_rate": 2.6581937209989804e-07,
"logits/chosen": -1.857273817062378,
"logits/rejected": -1.8463045358657837,
"logps/chosen": -1.207121729850769,
"logps/rejected": -1.4980764389038086,
"loss": 0.2881,
"rewards/accuracies": 0.9125000238418579,
"rewards/chosen": -2.7167863845825195,
"rewards/margins": 2.327130079269409,
"rewards/rejected": -5.043917179107666,
"step": 325
},
{
"epoch": 0.6586826347305389,
"grad_norm": 80.80388415289183,
"learning_rate": 2.527501789261288e-07,
"logits/chosen": -1.7654087543487549,
"logits/rejected": -1.7988303899765015,
"logps/chosen": -1.2382586002349854,
"logps/rejected": -1.5011346340179443,
"loss": 0.3562,
"rewards/accuracies": 0.8999999761581421,
"rewards/chosen": -2.9657859802246094,
"rewards/margins": 1.982195258140564,
"rewards/rejected": -4.947981357574463,
"step": 330
},
{
"epoch": 0.6686626746506986,
"grad_norm": 58.701745588033376,
"learning_rate": 2.398603869772421e-07,
"logits/chosen": -1.8312063217163086,
"logits/rejected": -1.906640648841858,
"logps/chosen": -1.2148807048797607,
"logps/rejected": -1.609410047531128,
"loss": 0.3771,
"rewards/accuracies": 0.875,
"rewards/chosen": -2.625844955444336,
"rewards/margins": 2.595080614089966,
"rewards/rejected": -5.220925807952881,
"step": 335
},
{
"epoch": 0.6786427145708582,
"grad_norm": 53.98596461549184,
"learning_rate": 2.27165700479207e-07,
"logits/chosen": -1.7870060205459595,
"logits/rejected": -1.8084951639175415,
"logps/chosen": -1.24245023727417,
"logps/rejected": -1.5464107990264893,
"loss": 0.3441,
"rewards/accuracies": 0.8125,
"rewards/chosen": -3.0009231567382812,
"rewards/margins": 2.439561605453491,
"rewards/rejected": -5.440484523773193,
"step": 340
},
{
"epoch": 0.688622754491018,
"grad_norm": 31.40057350192729,
"learning_rate": 2.146815859520554e-07,
"logits/chosen": -1.655821442604065,
"logits/rejected": -1.7065486907958984,
"logps/chosen": -1.1306984424591064,
"logps/rejected": -1.4800045490264893,
"loss": 0.3349,
"rewards/accuracies": 0.8500000238418579,
"rewards/chosen": -2.8754384517669678,
"rewards/margins": 2.686849355697632,
"rewards/rejected": -5.562288284301758,
"step": 345
},
{
"epoch": 0.6986027944111777,
"grad_norm": 31.89392999133678,
"learning_rate": 2.0242325336630769e-07,
"logits/chosen": -1.770398497581482,
"logits/rejected": -1.7964344024658203,
"logps/chosen": -1.3358914852142334,
"logps/rejected": -1.713321328163147,
"loss": 0.3356,
"rewards/accuracies": 0.824999988079071,
"rewards/chosen": -3.2399375438690186,
"rewards/margins": 2.4227969646453857,
"rewards/rejected": -5.662735462188721,
"step": 350
},
{
"epoch": 0.7085828343313373,
"grad_norm": 36.33203288290445,
"learning_rate": 1.9040563761196839e-07,
"logits/chosen": -1.8168394565582275,
"logits/rejected": -1.877682089805603,
"logps/chosen": -1.2325422763824463,
"logps/rejected": -1.7707788944244385,
"loss": 0.3245,
"rewards/accuracies": 0.875,
"rewards/chosen": -2.476655960083008,
"rewards/margins": 3.2025744915008545,
"rewards/rejected": -5.6792311668396,
"step": 355
},
{
"epoch": 0.718562874251497,
"grad_norm": 43.416666324732795,
"learning_rate": 1.786433803026624e-07,
"logits/chosen": -1.8330310583114624,
"logits/rejected": -1.8215529918670654,
"logps/chosen": -1.3045927286148071,
"logps/rejected": -1.523819088935852,
"loss": 0.3075,
"rewards/accuracies": 0.8374999761581421,
"rewards/chosen": -3.1603009700775146,
"rewards/margins": 1.9945179224014282,
"rewards/rejected": -5.154818534851074,
"step": 360
},
{
"epoch": 0.7285429141716567,
"grad_norm": 41.308059452990165,
"learning_rate": 1.6715081193708426e-07,
"logits/chosen": -1.8217226266860962,
"logits/rejected": -1.7349847555160522,
"logps/chosen": -1.3033679723739624,
"logps/rejected": -1.5060852766036987,
"loss": 0.3159,
"rewards/accuracies": 0.824999988079071,
"rewards/chosen": -3.0869052410125732,
"rewards/margins": 2.2355074882507324,
"rewards/rejected": -5.322413444519043,
"step": 365
},
{
"epoch": 0.7385229540918163,
"grad_norm": 42.21028449654826,
"learning_rate": 1.5594193443949295e-07,
"logits/chosen": -1.8006670475006104,
"logits/rejected": -1.7459598779678345,
"logps/chosen": -1.3234542608261108,
"logps/rejected": -1.5523171424865723,
"loss": 0.3351,
"rewards/accuracies": 0.8500000238418579,
"rewards/chosen": -3.036769390106201,
"rewards/margins": 2.387256145477295,
"rewards/rejected": -5.424025535583496,
"step": 370
},
{
"epoch": 0.7485029940119761,
"grad_norm": 19.788273374335468,
"learning_rate": 1.450304041005241e-07,
"logits/chosen": -1.7887513637542725,
"logits/rejected": -1.7895513772964478,
"logps/chosen": -1.2865995168685913,
"logps/rejected": -1.5641953945159912,
"loss": 0.3256,
"rewards/accuracies": 0.862500011920929,
"rewards/chosen": -2.724120616912842,
"rewards/margins": 2.6693472862243652,
"rewards/rejected": -5.393467903137207,
"step": 375
},
{
"epoch": 0.7584830339321357,
"grad_norm": 22.398433956551006,
"learning_rate": 1.3442951493910334e-07,
"logits/chosen": -1.9577217102050781,
"logits/rejected": -1.888419508934021,
"logps/chosen": -1.1813005208969116,
"logps/rejected": -1.487975835800171,
"loss": 0.3656,
"rewards/accuracies": 0.887499988079071,
"rewards/chosen": -1.874016523361206,
"rewards/margins": 2.6297459602355957,
"rewards/rejected": -4.503762722015381,
"step": 380
},
{
"epoch": 0.7684630738522954,
"grad_norm": 25.43555535966853,
"learning_rate": 1.2415218250573207e-07,
"logits/chosen": -1.8119207620620728,
"logits/rejected": -1.808342695236206,
"logps/chosen": -1.3371237516403198,
"logps/rejected": -1.5443830490112305,
"loss": 0.3467,
"rewards/accuracies": 0.887499988079071,
"rewards/chosen": -2.7301838397979736,
"rewards/margins": 2.1141746044158936,
"rewards/rejected": -4.844358921051025,
"step": 385
},
{
"epoch": 0.7784431137724551,
"grad_norm": 42.565390920889676,
"learning_rate": 1.1421092814687874e-07,
"logits/chosen": -1.92118239402771,
"logits/rejected": -1.880392074584961,
"logps/chosen": -1.2233985662460327,
"logps/rejected": -1.5754501819610596,
"loss": 0.3129,
"rewards/accuracies": 0.887499988079071,
"rewards/chosen": -2.677239418029785,
"rewards/margins": 2.564099073410034,
"rewards/rejected": -5.24133825302124,
"step": 390
},
{
"epoch": 0.7884231536926147,
"grad_norm": 35.381985967589934,
"learning_rate": 1.0461786374964643e-07,
"logits/chosen": -2.013378620147705,
"logits/rejected": -1.9751570224761963,
"logps/chosen": -1.449527382850647,
"logps/rejected": -1.7529798746109009,
"loss": 0.2973,
"rewards/accuracies": 0.862500011920929,
"rewards/chosen": -3.0077102184295654,
"rewards/margins": 2.959484577178955,
"rewards/rejected": -5.9671950340271,
"step": 395
},
{
"epoch": 0.7984031936127745,
"grad_norm": 28.566925881122785,
"learning_rate": 9.538467698530536e-08,
"logits/chosen": -1.760108232498169,
"logits/rejected": -1.8218498229980469,
"logps/chosen": -1.2844475507736206,
"logps/rejected": -1.5693128108978271,
"loss": 0.299,
"rewards/accuracies": 0.875,
"rewards/chosen": -2.605151414871216,
"rewards/margins": 2.5104212760925293,
"rewards/rejected": -5.115572452545166,
"step": 400
}
],
"logging_steps": 5,
"max_steps": 501,
"num_input_tokens_seen": 0,
"num_train_epochs": 1,
"save_steps": 101,
"stateful_callbacks": {
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": false
},
"attributes": {}
}
},
"total_flos": 0.0,
"train_batch_size": 2,
"trial_name": null,
"trial_params": null
}