diaenra's picture
Training in progress, step 50, checkpoint
250e885 verified
raw
history blame
10.3 kB
{
"best_metric": 7.017041206359863,
"best_model_checkpoint": "miner_id_24/checkpoint-50",
"epoch": 0.011029614514972702,
"eval_steps": 25,
"global_step": 50,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.00022059229029945402,
"grad_norm": 2.4293482303619385,
"learning_rate": 0.0001,
"loss": 7.7331,
"step": 1
},
{
"epoch": 0.00022059229029945402,
"eval_loss": 7.659153461456299,
"eval_runtime": 20.0543,
"eval_samples_per_second": 95.192,
"eval_steps_per_second": 47.621,
"step": 1
},
{
"epoch": 0.00044118458059890804,
"grad_norm": 2.4448964595794678,
"learning_rate": 0.0002,
"loss": 7.8817,
"step": 2
},
{
"epoch": 0.0006617768708983621,
"grad_norm": 2.475456953048706,
"learning_rate": 0.00019978589232386035,
"loss": 7.5521,
"step": 3
},
{
"epoch": 0.0008823691611978161,
"grad_norm": 2.3668322563171387,
"learning_rate": 0.00019914448613738106,
"loss": 7.3641,
"step": 4
},
{
"epoch": 0.00110296145149727,
"grad_norm": 2.320056200027466,
"learning_rate": 0.00019807852804032305,
"loss": 7.76,
"step": 5
},
{
"epoch": 0.0013235537417967242,
"grad_norm": 2.2553532123565674,
"learning_rate": 0.00019659258262890683,
"loss": 7.1233,
"step": 6
},
{
"epoch": 0.0015441460320961783,
"grad_norm": 2.367339611053467,
"learning_rate": 0.0001946930129495106,
"loss": 7.2181,
"step": 7
},
{
"epoch": 0.0017647383223956322,
"grad_norm": 2.3431711196899414,
"learning_rate": 0.0001923879532511287,
"loss": 7.353,
"step": 8
},
{
"epoch": 0.0019853306126950865,
"grad_norm": 2.190365791320801,
"learning_rate": 0.00018968727415326884,
"loss": 6.9617,
"step": 9
},
{
"epoch": 0.00220592290299454,
"grad_norm": 2.226897954940796,
"learning_rate": 0.00018660254037844388,
"loss": 6.9584,
"step": 10
},
{
"epoch": 0.0024265151932939943,
"grad_norm": 2.4230480194091797,
"learning_rate": 0.00018314696123025454,
"loss": 7.5979,
"step": 11
},
{
"epoch": 0.0026471074835934484,
"grad_norm": 2.0584659576416016,
"learning_rate": 0.00017933533402912354,
"loss": 6.6134,
"step": 12
},
{
"epoch": 0.0028676997738929025,
"grad_norm": 2.11950945854187,
"learning_rate": 0.00017518398074789775,
"loss": 6.9112,
"step": 13
},
{
"epoch": 0.0030882920641923566,
"grad_norm": 2.642418146133423,
"learning_rate": 0.00017071067811865476,
"loss": 7.426,
"step": 14
},
{
"epoch": 0.0033088843544918107,
"grad_norm": 2.08109974861145,
"learning_rate": 0.00016593458151000688,
"loss": 6.7795,
"step": 15
},
{
"epoch": 0.0035294766447912644,
"grad_norm": 2.413881540298462,
"learning_rate": 0.00016087614290087208,
"loss": 7.1208,
"step": 16
},
{
"epoch": 0.0037500689350907185,
"grad_norm": 2.323194980621338,
"learning_rate": 0.00015555702330196023,
"loss": 6.6449,
"step": 17
},
{
"epoch": 0.003970661225390173,
"grad_norm": 2.729285717010498,
"learning_rate": 0.00015000000000000001,
"loss": 7.2977,
"step": 18
},
{
"epoch": 0.004191253515689626,
"grad_norm": 2.5769810676574707,
"learning_rate": 0.00014422886902190014,
"loss": 7.3119,
"step": 19
},
{
"epoch": 0.00441184580598908,
"grad_norm": 2.8009188175201416,
"learning_rate": 0.000138268343236509,
"loss": 7.187,
"step": 20
},
{
"epoch": 0.0046324380962885344,
"grad_norm": 2.8663763999938965,
"learning_rate": 0.00013214394653031616,
"loss": 6.7693,
"step": 21
},
{
"epoch": 0.0048530303865879885,
"grad_norm": 2.748577833175659,
"learning_rate": 0.00012588190451025207,
"loss": 6.6481,
"step": 22
},
{
"epoch": 0.005073622676887443,
"grad_norm": 2.5603766441345215,
"learning_rate": 0.00011950903220161285,
"loss": 6.9973,
"step": 23
},
{
"epoch": 0.005294214967186897,
"grad_norm": 2.7109546661376953,
"learning_rate": 0.00011305261922200519,
"loss": 7.2752,
"step": 24
},
{
"epoch": 0.005514807257486351,
"grad_norm": 3.340872049331665,
"learning_rate": 0.00010654031292301432,
"loss": 7.5969,
"step": 25
},
{
"epoch": 0.005514807257486351,
"eval_loss": 7.113040924072266,
"eval_runtime": 20.0439,
"eval_samples_per_second": 95.241,
"eval_steps_per_second": 47.645,
"step": 25
},
{
"epoch": 0.005735399547785805,
"grad_norm": 2.8481411933898926,
"learning_rate": 0.0001,
"loss": 7.006,
"step": 26
},
{
"epoch": 0.005955991838085259,
"grad_norm": 3.5465781688690186,
"learning_rate": 9.345968707698569e-05,
"loss": 7.1405,
"step": 27
},
{
"epoch": 0.006176584128384713,
"grad_norm": 3.2511773109436035,
"learning_rate": 8.694738077799488e-05,
"loss": 7.0427,
"step": 28
},
{
"epoch": 0.006397176418684167,
"grad_norm": 3.1548283100128174,
"learning_rate": 8.049096779838719e-05,
"loss": 7.1469,
"step": 29
},
{
"epoch": 0.006617768708983621,
"grad_norm": 3.4234230518341064,
"learning_rate": 7.411809548974792e-05,
"loss": 7.2329,
"step": 30
},
{
"epoch": 0.0068383609992830755,
"grad_norm": 3.0600433349609375,
"learning_rate": 6.785605346968386e-05,
"loss": 6.6121,
"step": 31
},
{
"epoch": 0.007058953289582529,
"grad_norm": 3.1275887489318848,
"learning_rate": 6.173165676349103e-05,
"loss": 7.1327,
"step": 32
},
{
"epoch": 0.007279545579881983,
"grad_norm": 4.214815139770508,
"learning_rate": 5.577113097809989e-05,
"loss": 7.3558,
"step": 33
},
{
"epoch": 0.007500137870181437,
"grad_norm": 3.6205644607543945,
"learning_rate": 5.000000000000002e-05,
"loss": 7.1521,
"step": 34
},
{
"epoch": 0.007720730160480891,
"grad_norm": 4.0161452293396,
"learning_rate": 4.444297669803981e-05,
"loss": 6.7042,
"step": 35
},
{
"epoch": 0.007941322450780346,
"grad_norm": 5.151815414428711,
"learning_rate": 3.9123857099127936e-05,
"loss": 6.7373,
"step": 36
},
{
"epoch": 0.0081619147410798,
"grad_norm": 4.190551280975342,
"learning_rate": 3.406541848999312e-05,
"loss": 8.0788,
"step": 37
},
{
"epoch": 0.008382507031379252,
"grad_norm": 4.7776570320129395,
"learning_rate": 2.9289321881345254e-05,
"loss": 7.3549,
"step": 38
},
{
"epoch": 0.008603099321678707,
"grad_norm": 5.777076721191406,
"learning_rate": 2.4816019252102273e-05,
"loss": 6.666,
"step": 39
},
{
"epoch": 0.00882369161197816,
"grad_norm": 4.582779407501221,
"learning_rate": 2.0664665970876496e-05,
"loss": 6.011,
"step": 40
},
{
"epoch": 0.009044283902277616,
"grad_norm": 5.211250305175781,
"learning_rate": 1.6853038769745467e-05,
"loss": 6.9135,
"step": 41
},
{
"epoch": 0.009264876192577069,
"grad_norm": 5.360510349273682,
"learning_rate": 1.339745962155613e-05,
"loss": 7.4839,
"step": 42
},
{
"epoch": 0.009485468482876524,
"grad_norm": 6.11507511138916,
"learning_rate": 1.0312725846731175e-05,
"loss": 6.7759,
"step": 43
},
{
"epoch": 0.009706060773175977,
"grad_norm": 6.635857582092285,
"learning_rate": 7.612046748871327e-06,
"loss": 7.8113,
"step": 44
},
{
"epoch": 0.009926653063475432,
"grad_norm": 7.438478469848633,
"learning_rate": 5.306987050489442e-06,
"loss": 7.3099,
"step": 45
},
{
"epoch": 0.010147245353774885,
"grad_norm": 6.5027923583984375,
"learning_rate": 3.40741737109318e-06,
"loss": 7.0473,
"step": 46
},
{
"epoch": 0.01036783764407434,
"grad_norm": 5.755157947540283,
"learning_rate": 1.921471959676957e-06,
"loss": 7.2164,
"step": 47
},
{
"epoch": 0.010588429934373794,
"grad_norm": 10.475558280944824,
"learning_rate": 8.555138626189618e-07,
"loss": 6.7425,
"step": 48
},
{
"epoch": 0.010809022224673248,
"grad_norm": 8.64743423461914,
"learning_rate": 2.141076761396521e-07,
"loss": 7.6537,
"step": 49
},
{
"epoch": 0.011029614514972702,
"grad_norm": 9.511824607849121,
"learning_rate": 0.0,
"loss": 7.135,
"step": 50
},
{
"epoch": 0.011029614514972702,
"eval_loss": 7.017041206359863,
"eval_runtime": 19.9636,
"eval_samples_per_second": 95.624,
"eval_steps_per_second": 47.837,
"step": 50
}
],
"logging_steps": 1,
"max_steps": 50,
"num_input_tokens_seen": 0,
"num_train_epochs": 1,
"save_steps": 25,
"stateful_callbacks": {
"EarlyStoppingCallback": {
"args": {
"early_stopping_patience": 1,
"early_stopping_threshold": 0.0
},
"attributes": {
"early_stopping_patience_counter": 0
}
},
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": true
},
"attributes": {}
}
},
"total_flos": 27149520076800.0,
"train_batch_size": 2,
"trial_name": null,
"trial_params": null
}