Rodo-Sami's picture
Training in progress, step 50, checkpoint
605c097 verified
{
"best_metric": 0.9738476872444153,
"best_model_checkpoint": "miner_id_24/checkpoint-50",
"epoch": 2.8771929824561404,
"eval_steps": 25,
"global_step": 50,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.056140350877192984,
"grad_norm": 40.988529205322266,
"learning_rate": 5e-05,
"loss": 9.9807,
"step": 1
},
{
"epoch": 0.056140350877192984,
"eval_loss": 10.257285118103027,
"eval_runtime": 1.7596,
"eval_samples_per_second": 68.198,
"eval_steps_per_second": 8.525,
"step": 1
},
{
"epoch": 0.11228070175438597,
"grad_norm": 42.96883010864258,
"learning_rate": 0.0001,
"loss": 10.3023,
"step": 2
},
{
"epoch": 0.16842105263157894,
"grad_norm": 37.69118881225586,
"learning_rate": 9.989294616193017e-05,
"loss": 8.9005,
"step": 3
},
{
"epoch": 0.22456140350877193,
"grad_norm": 36.252384185791016,
"learning_rate": 9.957224306869053e-05,
"loss": 5.791,
"step": 4
},
{
"epoch": 0.2807017543859649,
"grad_norm": 30.784360885620117,
"learning_rate": 9.903926402016153e-05,
"loss": 3.559,
"step": 5
},
{
"epoch": 0.3368421052631579,
"grad_norm": 24.132333755493164,
"learning_rate": 9.829629131445342e-05,
"loss": 2.6449,
"step": 6
},
{
"epoch": 0.3929824561403509,
"grad_norm": 27.09242820739746,
"learning_rate": 9.73465064747553e-05,
"loss": 2.0326,
"step": 7
},
{
"epoch": 0.44912280701754387,
"grad_norm": 13.805081367492676,
"learning_rate": 9.619397662556435e-05,
"loss": 1.5471,
"step": 8
},
{
"epoch": 0.5052631578947369,
"grad_norm": 36.310142517089844,
"learning_rate": 9.484363707663442e-05,
"loss": 1.351,
"step": 9
},
{
"epoch": 0.5614035087719298,
"grad_norm": 15.280241966247559,
"learning_rate": 9.330127018922194e-05,
"loss": 1.1087,
"step": 10
},
{
"epoch": 0.6175438596491228,
"grad_norm": 8.321746826171875,
"learning_rate": 9.157348061512727e-05,
"loss": 1.0499,
"step": 11
},
{
"epoch": 0.6736842105263158,
"grad_norm": 11.641770362854004,
"learning_rate": 8.966766701456177e-05,
"loss": 1.0376,
"step": 12
},
{
"epoch": 0.7298245614035088,
"grad_norm": 15.036391258239746,
"learning_rate": 8.759199037394887e-05,
"loss": 1.1732,
"step": 13
},
{
"epoch": 0.7859649122807018,
"grad_norm": 10.391888618469238,
"learning_rate": 8.535533905932738e-05,
"loss": 1.0371,
"step": 14
},
{
"epoch": 0.8421052631578947,
"grad_norm": 2.5320839881896973,
"learning_rate": 8.296729075500344e-05,
"loss": 0.9961,
"step": 15
},
{
"epoch": 0.8982456140350877,
"grad_norm": 2.271365165710449,
"learning_rate": 8.043807145043604e-05,
"loss": 0.9803,
"step": 16
},
{
"epoch": 0.9543859649122807,
"grad_norm": 3.9986636638641357,
"learning_rate": 7.777851165098012e-05,
"loss": 0.9929,
"step": 17
},
{
"epoch": 1.0456140350877192,
"grad_norm": 12.490577697753906,
"learning_rate": 7.500000000000001e-05,
"loss": 2.1306,
"step": 18
},
{
"epoch": 1.1017543859649124,
"grad_norm": 4.3462982177734375,
"learning_rate": 7.211443451095007e-05,
"loss": 0.9834,
"step": 19
},
{
"epoch": 1.1578947368421053,
"grad_norm": 3.1821000576019287,
"learning_rate": 6.91341716182545e-05,
"loss": 0.975,
"step": 20
},
{
"epoch": 1.2140350877192982,
"grad_norm": 2.209925413131714,
"learning_rate": 6.607197326515808e-05,
"loss": 0.9703,
"step": 21
},
{
"epoch": 1.2701754385964912,
"grad_norm": 5.219216823577881,
"learning_rate": 6.294095225512603e-05,
"loss": 1.0513,
"step": 22
},
{
"epoch": 1.3263157894736843,
"grad_norm": 4.299022674560547,
"learning_rate": 5.9754516100806423e-05,
"loss": 0.9855,
"step": 23
},
{
"epoch": 1.3824561403508773,
"grad_norm": 2.6355159282684326,
"learning_rate": 5.6526309611002594e-05,
"loss": 0.9949,
"step": 24
},
{
"epoch": 1.4385964912280702,
"grad_norm": 2.5065155029296875,
"learning_rate": 5.327015646150716e-05,
"loss": 0.9883,
"step": 25
},
{
"epoch": 1.4385964912280702,
"eval_loss": 0.9844273924827576,
"eval_runtime": 1.7474,
"eval_samples_per_second": 68.672,
"eval_steps_per_second": 8.584,
"step": 25
},
{
"epoch": 1.4947368421052631,
"grad_norm": 2.8815419673919678,
"learning_rate": 5e-05,
"loss": 0.9999,
"step": 26
},
{
"epoch": 1.550877192982456,
"grad_norm": 1.6184970140457153,
"learning_rate": 4.6729843538492847e-05,
"loss": 0.9744,
"step": 27
},
{
"epoch": 1.6070175438596492,
"grad_norm": 1.700230598449707,
"learning_rate": 4.347369038899744e-05,
"loss": 0.9848,
"step": 28
},
{
"epoch": 1.663157894736842,
"grad_norm": 1.7157793045043945,
"learning_rate": 4.0245483899193595e-05,
"loss": 0.9861,
"step": 29
},
{
"epoch": 1.719298245614035,
"grad_norm": 1.1912931203842163,
"learning_rate": 3.705904774487396e-05,
"loss": 0.9686,
"step": 30
},
{
"epoch": 1.775438596491228,
"grad_norm": 1.9062882661819458,
"learning_rate": 3.392802673484193e-05,
"loss": 1.0133,
"step": 31
},
{
"epoch": 1.831578947368421,
"grad_norm": 1.5787826776504517,
"learning_rate": 3.086582838174551e-05,
"loss": 0.9736,
"step": 32
},
{
"epoch": 1.8877192982456141,
"grad_norm": 3.0003042221069336,
"learning_rate": 2.7885565489049946e-05,
"loss": 0.9805,
"step": 33
},
{
"epoch": 1.943859649122807,
"grad_norm": 1.2490260601043701,
"learning_rate": 2.500000000000001e-05,
"loss": 0.9748,
"step": 34
},
{
"epoch": 2.0350877192982457,
"grad_norm": 5.135227203369141,
"learning_rate": 2.2221488349019903e-05,
"loss": 1.8213,
"step": 35
},
{
"epoch": 2.0912280701754384,
"grad_norm": 1.504193663597107,
"learning_rate": 1.9561928549563968e-05,
"loss": 0.9428,
"step": 36
},
{
"epoch": 2.1473684210526316,
"grad_norm": 1.865723729133606,
"learning_rate": 1.703270924499656e-05,
"loss": 0.9708,
"step": 37
},
{
"epoch": 2.2035087719298247,
"grad_norm": 1.2437411546707153,
"learning_rate": 1.4644660940672627e-05,
"loss": 0.9747,
"step": 38
},
{
"epoch": 2.2596491228070175,
"grad_norm": 3.998439073562622,
"learning_rate": 1.2408009626051137e-05,
"loss": 1.0126,
"step": 39
},
{
"epoch": 2.3157894736842106,
"grad_norm": 1.3937028646469116,
"learning_rate": 1.0332332985438248e-05,
"loss": 0.9638,
"step": 40
},
{
"epoch": 2.3719298245614033,
"grad_norm": 2.2477951049804688,
"learning_rate": 8.426519384872733e-06,
"loss": 0.9713,
"step": 41
},
{
"epoch": 2.4280701754385965,
"grad_norm": 1.3327833414077759,
"learning_rate": 6.698729810778065e-06,
"loss": 0.9563,
"step": 42
},
{
"epoch": 2.4842105263157896,
"grad_norm": 1.937410593032837,
"learning_rate": 5.156362923365588e-06,
"loss": 0.9716,
"step": 43
},
{
"epoch": 2.5403508771929824,
"grad_norm": 2.531614065170288,
"learning_rate": 3.8060233744356633e-06,
"loss": 0.9813,
"step": 44
},
{
"epoch": 2.5964912280701755,
"grad_norm": 1.3599098920822144,
"learning_rate": 2.653493525244721e-06,
"loss": 0.9793,
"step": 45
},
{
"epoch": 2.6526315789473687,
"grad_norm": 1.3307596445083618,
"learning_rate": 1.70370868554659e-06,
"loss": 0.9686,
"step": 46
},
{
"epoch": 2.7087719298245614,
"grad_norm": 1.9670759439468384,
"learning_rate": 9.607359798384785e-07,
"loss": 0.955,
"step": 47
},
{
"epoch": 2.7649122807017545,
"grad_norm": 3.397104024887085,
"learning_rate": 4.277569313094809e-07,
"loss": 1.0066,
"step": 48
},
{
"epoch": 2.8210526315789473,
"grad_norm": 1.5758522748947144,
"learning_rate": 1.0705383806982606e-07,
"loss": 0.9656,
"step": 49
},
{
"epoch": 2.8771929824561404,
"grad_norm": 1.1156949996948242,
"learning_rate": 0.0,
"loss": 0.9643,
"step": 50
},
{
"epoch": 2.8771929824561404,
"eval_loss": 0.9738476872444153,
"eval_runtime": 1.7164,
"eval_samples_per_second": 69.914,
"eval_steps_per_second": 8.739,
"step": 50
}
],
"logging_steps": 1,
"max_steps": 50,
"num_input_tokens_seen": 0,
"num_train_epochs": 3,
"save_steps": 25,
"stateful_callbacks": {
"EarlyStoppingCallback": {
"args": {
"early_stopping_patience": 1,
"early_stopping_threshold": 0.0
},
"attributes": {
"early_stopping_patience_counter": 0
}
},
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": true
},
"attributes": {}
}
},
"total_flos": 2.95300912644096e+16,
"train_batch_size": 2,
"trial_name": null,
"trial_params": null
}