PaperExtractGPT / trainer_state.json
jackkuo's picture
Upload 9 files
63f3853
raw
history blame
8.03 kB
{
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 20.0,
"global_step": 620,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.32,
"learning_rate": 0.0009993582535855264,
"loss": 0.7008,
"step": 10
},
{
"epoch": 0.65,
"learning_rate": 0.0009974346616959476,
"loss": 0.4261,
"step": 20
},
{
"epoch": 0.97,
"learning_rate": 0.0009942341621640557,
"loss": 0.3999,
"step": 30
},
{
"epoch": 1.29,
"learning_rate": 0.0009897649706262473,
"loss": 0.403,
"step": 40
},
{
"epoch": 1.61,
"learning_rate": 0.000984038559433102,
"loss": 0.3025,
"step": 50
},
{
"epoch": 1.94,
"learning_rate": 0.0009770696282000244,
"loss": 0.3243,
"step": 60
},
{
"epoch": 2.26,
"learning_rate": 0.0009688760660735403,
"loss": 0.1971,
"step": 70
},
{
"epoch": 2.58,
"learning_rate": 0.0009594789058101153,
"loss": 0.3054,
"step": 80
},
{
"epoch": 2.9,
"learning_rate": 0.0009489022697853709,
"loss": 0.3146,
"step": 90
},
{
"epoch": 3.23,
"learning_rate": 0.0009371733080722911,
"loss": 0.1914,
"step": 100
},
{
"epoch": 3.55,
"learning_rate": 0.0009243221287473755,
"loss": 0.2848,
"step": 110
},
{
"epoch": 3.87,
"learning_rate": 0.0009103817206036382,
"loss": 0.1316,
"step": 120
},
{
"epoch": 4.19,
"learning_rate": 0.0008953878684688492,
"loss": 0.3139,
"step": 130
},
{
"epoch": 4.52,
"learning_rate": 0.0008793790613463954,
"loss": 0.1033,
"step": 140
},
{
"epoch": 4.84,
"learning_rate": 0.00086239639361456,
"loss": 0.1794,
"step": 150
},
{
"epoch": 5.16,
"learning_rate": 0.0008444834595378434,
"loss": 0.1794,
"step": 160
},
{
"epoch": 5.48,
"learning_rate": 0.0008256862413611113,
"loss": 0.1136,
"step": 170
},
{
"epoch": 5.81,
"learning_rate": 0.0008060529912738315,
"loss": 0.1584,
"step": 180
},
{
"epoch": 6.13,
"learning_rate": 0.0007856341075473961,
"loss": 0.1127,
"step": 190
},
{
"epoch": 6.45,
"learning_rate": 0.0007644820051634812,
"loss": 0.1422,
"step": 200
},
{
"epoch": 6.77,
"learning_rate": 0.0007426509812655407,
"loss": 0.1001,
"step": 210
},
{
"epoch": 7.1,
"learning_rate": 0.0007201970757788173,
"loss": 0.0521,
"step": 220
},
{
"epoch": 7.42,
"learning_rate": 0.0006971779275566593,
"loss": 0.0499,
"step": 230
},
{
"epoch": 7.74,
"learning_rate": 0.0006736526264224101,
"loss": 0.1083,
"step": 240
},
{
"epoch": 8.06,
"learning_rate": 0.0006496815614866791,
"loss": 0.0762,
"step": 250
},
{
"epoch": 8.39,
"learning_rate": 0.0006253262661293602,
"loss": 0.0363,
"step": 260
},
{
"epoch": 8.71,
"learning_rate": 0.0006006492600443301,
"loss": 0.0455,
"step": 270
},
{
"epoch": 9.03,
"learning_rate": 0.0005757138887522884,
"loss": 0.0614,
"step": 280
},
{
"epoch": 9.35,
"learning_rate": 0.0005505841609937161,
"loss": 0.0279,
"step": 290
},
{
"epoch": 9.68,
"learning_rate": 0.0005253245844193564,
"loss": 0.0383,
"step": 300
},
{
"epoch": 10.0,
"learning_rate": 0.0005,
"loss": 0.0219,
"step": 310
},
{
"epoch": 10.32,
"learning_rate": 0.0004746754155806437,
"loss": 0.0069,
"step": 320
},
{
"epoch": 10.65,
"learning_rate": 0.000449415839006284,
"loss": 0.0399,
"step": 330
},
{
"epoch": 10.97,
"learning_rate": 0.00042428611124771184,
"loss": 0.0208,
"step": 340
},
{
"epoch": 11.29,
"learning_rate": 0.0003993507399556699,
"loss": 0.0109,
"step": 350
},
{
"epoch": 11.61,
"learning_rate": 0.00037467373387063967,
"loss": 0.0249,
"step": 360
},
{
"epoch": 11.94,
"learning_rate": 0.000350318438513321,
"loss": 0.0041,
"step": 370
},
{
"epoch": 12.26,
"learning_rate": 0.0003263473735775899,
"loss": 0.009,
"step": 380
},
{
"epoch": 12.58,
"learning_rate": 0.00030282207244334083,
"loss": 0.007,
"step": 390
},
{
"epoch": 12.9,
"learning_rate": 0.0002798029242211828,
"loss": 0.0089,
"step": 400
},
{
"epoch": 13.23,
"learning_rate": 0.0002573490187344596,
"loss": 0.0103,
"step": 410
},
{
"epoch": 13.55,
"learning_rate": 0.0002355179948365189,
"loss": 0.0054,
"step": 420
},
{
"epoch": 13.87,
"learning_rate": 0.00021436589245260373,
"loss": 0.0037,
"step": 430
},
{
"epoch": 14.19,
"learning_rate": 0.00019394700872616856,
"loss": 0.0047,
"step": 440
},
{
"epoch": 14.52,
"learning_rate": 0.00017431375863888898,
"loss": 0.0061,
"step": 450
},
{
"epoch": 14.84,
"learning_rate": 0.00015551654046215668,
"loss": 0.0043,
"step": 460
},
{
"epoch": 15.16,
"learning_rate": 0.00013760360638544013,
"loss": 0.0058,
"step": 470
},
{
"epoch": 15.48,
"learning_rate": 0.00012062093865360457,
"loss": 0.0029,
"step": 480
},
{
"epoch": 15.81,
"learning_rate": 0.00010461213153115079,
"loss": 0.0037,
"step": 490
},
{
"epoch": 16.13,
"learning_rate": 8.961827939636197e-05,
"loss": 0.004,
"step": 500
},
{
"epoch": 16.45,
"learning_rate": 7.567787125262449e-05,
"loss": 0.0048,
"step": 510
},
{
"epoch": 16.77,
"learning_rate": 6.282669192770896e-05,
"loss": 0.0036,
"step": 520
},
{
"epoch": 17.1,
"learning_rate": 5.109773021462921e-05,
"loss": 0.0018,
"step": 530
},
{
"epoch": 17.42,
"learning_rate": 4.05210941898847e-05,
"loss": 0.0039,
"step": 540
},
{
"epoch": 17.74,
"learning_rate": 3.112393392645985e-05,
"loss": 0.004,
"step": 550
},
{
"epoch": 18.06,
"learning_rate": 2.2930371799975592e-05,
"loss": 0.0038,
"step": 560
},
{
"epoch": 18.39,
"learning_rate": 1.596144056689791e-05,
"loss": 0.0027,
"step": 570
},
{
"epoch": 18.71,
"learning_rate": 1.0235029373752758e-05,
"loss": 0.0043,
"step": 580
},
{
"epoch": 19.03,
"learning_rate": 5.76583783594431e-06,
"loss": 0.0034,
"step": 590
},
{
"epoch": 19.35,
"learning_rate": 2.5653383040524227e-06,
"loss": 0.0034,
"step": 600
},
{
"epoch": 19.68,
"learning_rate": 6.417464144736207e-07,
"loss": 0.0037,
"step": 610
},
{
"epoch": 20.0,
"learning_rate": 0.0,
"loss": 0.0038,
"step": 620
},
{
"epoch": 20.0,
"step": 620,
"total_flos": 7.220647174835405e+16,
"train_loss": 0.09885127827404969,
"train_runtime": 1180.114,
"train_samples_per_second": 2.085,
"train_steps_per_second": 0.525
}
],
"max_steps": 620,
"num_train_epochs": 20,
"total_flos": 7.220647174835405e+16,
"trial_name": null,
"trial_params": null
}