BobaZooba's picture
Training in progress, step 50, checkpoint
86937bb
raw
history blame
6.23 kB
{
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 0.0012437810945273632,
"eval_steps": 1000,
"global_step": 50,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.0,
"learning_rate": 0,
"loss": 1.5524,
"step": 1
},
{
"epoch": 0.0,
"learning_rate": 0.0,
"loss": 2.1402,
"step": 2
},
{
"epoch": 0.0,
"learning_rate": 0.0,
"loss": 1.664,
"step": 3
},
{
"epoch": 0.0,
"learning_rate": 8.613531161467861e-05,
"loss": 1.7923,
"step": 4
},
{
"epoch": 0.0,
"learning_rate": 0.00013652123889719707,
"loss": 1.8119,
"step": 5
},
{
"epoch": 0.0,
"learning_rate": 0.00017227062322935723,
"loss": 1.6156,
"step": 6
},
{
"epoch": 0.0,
"learning_rate": 0.0002,
"loss": 1.4321,
"step": 7
},
{
"epoch": 0.0,
"learning_rate": 0.0002,
"loss": 1.4738,
"step": 8
},
{
"epoch": 0.0,
"learning_rate": 0.00019789473684210526,
"loss": 1.7251,
"step": 9
},
{
"epoch": 0.0,
"learning_rate": 0.00019578947368421054,
"loss": 1.6262,
"step": 10
},
{
"epoch": 0.0,
"learning_rate": 0.0001936842105263158,
"loss": 1.4947,
"step": 11
},
{
"epoch": 0.0,
"learning_rate": 0.00019157894736842104,
"loss": 1.8006,
"step": 12
},
{
"epoch": 0.0,
"learning_rate": 0.00018947368421052632,
"loss": 1.6487,
"step": 13
},
{
"epoch": 0.0,
"learning_rate": 0.0001873684210526316,
"loss": 1.7926,
"step": 14
},
{
"epoch": 0.0,
"learning_rate": 0.00018526315789473685,
"loss": 1.5979,
"step": 15
},
{
"epoch": 0.0,
"learning_rate": 0.0001831578947368421,
"loss": 1.6008,
"step": 16
},
{
"epoch": 0.0,
"learning_rate": 0.00018105263157894739,
"loss": 1.7569,
"step": 17
},
{
"epoch": 0.0,
"learning_rate": 0.00017894736842105264,
"loss": 1.7647,
"step": 18
},
{
"epoch": 0.0,
"learning_rate": 0.0001768421052631579,
"loss": 1.7386,
"step": 19
},
{
"epoch": 0.0,
"learning_rate": 0.00017473684210526317,
"loss": 1.5272,
"step": 20
},
{
"epoch": 0.0,
"learning_rate": 0.00017263157894736842,
"loss": 1.8591,
"step": 21
},
{
"epoch": 0.0,
"learning_rate": 0.0001705263157894737,
"loss": 1.5613,
"step": 22
},
{
"epoch": 0.0,
"learning_rate": 0.00016842105263157895,
"loss": 1.3797,
"step": 23
},
{
"epoch": 0.0,
"learning_rate": 0.00016631578947368423,
"loss": 1.3594,
"step": 24
},
{
"epoch": 0.0,
"learning_rate": 0.00016421052631578948,
"loss": 1.6027,
"step": 25
},
{
"epoch": 0.0,
"learning_rate": 0.00016210526315789473,
"loss": 1.5134,
"step": 26
},
{
"epoch": 0.0,
"learning_rate": 0.00016,
"loss": 1.7149,
"step": 27
},
{
"epoch": 0.0,
"learning_rate": 0.00015789473684210527,
"loss": 1.8614,
"step": 28
},
{
"epoch": 0.0,
"learning_rate": 0.00015578947368421052,
"loss": 1.4819,
"step": 29
},
{
"epoch": 0.0,
"learning_rate": 0.0001536842105263158,
"loss": 1.623,
"step": 30
},
{
"epoch": 0.0,
"learning_rate": 0.00015157894736842108,
"loss": 1.7829,
"step": 31
},
{
"epoch": 0.0,
"learning_rate": 0.00014947368421052633,
"loss": 1.4878,
"step": 32
},
{
"epoch": 0.0,
"learning_rate": 0.00014736842105263158,
"loss": 1.7675,
"step": 33
},
{
"epoch": 0.0,
"learning_rate": 0.00014526315789473686,
"loss": 1.6652,
"step": 34
},
{
"epoch": 0.0,
"learning_rate": 0.0001431578947368421,
"loss": 1.4829,
"step": 35
},
{
"epoch": 0.0,
"learning_rate": 0.00014105263157894736,
"loss": 1.6084,
"step": 36
},
{
"epoch": 0.0,
"learning_rate": 0.00013894736842105264,
"loss": 1.5299,
"step": 37
},
{
"epoch": 0.0,
"learning_rate": 0.0001368421052631579,
"loss": 1.5337,
"step": 38
},
{
"epoch": 0.0,
"learning_rate": 0.00013473684210526317,
"loss": 1.4584,
"step": 39
},
{
"epoch": 0.0,
"learning_rate": 0.00013263157894736842,
"loss": 1.5648,
"step": 40
},
{
"epoch": 0.0,
"learning_rate": 0.0001305263157894737,
"loss": 1.6003,
"step": 41
},
{
"epoch": 0.0,
"learning_rate": 0.00012842105263157895,
"loss": 1.5679,
"step": 42
},
{
"epoch": 0.0,
"learning_rate": 0.0001263157894736842,
"loss": 1.425,
"step": 43
},
{
"epoch": 0.0,
"learning_rate": 0.00012421052631578949,
"loss": 1.2781,
"step": 44
},
{
"epoch": 0.0,
"learning_rate": 0.00012210526315789474,
"loss": 1.7008,
"step": 45
},
{
"epoch": 0.0,
"learning_rate": 0.00012,
"loss": 1.4367,
"step": 46
},
{
"epoch": 0.0,
"learning_rate": 0.00011789473684210525,
"loss": 1.4628,
"step": 47
},
{
"epoch": 0.0,
"learning_rate": 0.00011578947368421053,
"loss": 1.6098,
"step": 48
},
{
"epoch": 0.0,
"learning_rate": 0.0001136842105263158,
"loss": 1.4618,
"step": 49
},
{
"epoch": 0.0,
"learning_rate": 0.00011157894736842105,
"loss": 1.3719,
"step": 50
}
],
"logging_steps": 1,
"max_steps": 100,
"num_train_epochs": 1,
"save_steps": 50,
"total_flos": 3065802843488256.0,
"trial_name": null,
"trial_params": null
}