cotysong113's picture
Training in progress, step 1908
3e776b5 verified
{
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 6.0,
"eval_steps": 500,
"global_step": 1908,
"is_hyper_param_search": true,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.9968553459119497,
"grad_norm": 0.6318477988243103,
"learning_rate": 1.6677148846960168e-05,
"loss": 0.3859,
"step": 317
},
{
"epoch": 1.0,
"eval_accuracy": 0.5580645161290323,
"eval_loss": 0.19784556329250336,
"eval_runtime": 1.3657,
"eval_samples_per_second": 2269.886,
"eval_steps_per_second": 47.594,
"step": 318
},
{
"epoch": 1.9937106918238994,
"grad_norm": 0.5229398012161255,
"learning_rate": 1.3354297693920338e-05,
"loss": 0.1669,
"step": 634
},
{
"epoch": 2.0,
"eval_accuracy": 0.8090322580645162,
"eval_loss": 0.1015559658408165,
"eval_runtime": 1.5656,
"eval_samples_per_second": 1980.027,
"eval_steps_per_second": 41.517,
"step": 636
},
{
"epoch": 2.990566037735849,
"grad_norm": 0.4804494082927704,
"learning_rate": 1.0031446540880504e-05,
"loss": 0.1087,
"step": 951
},
{
"epoch": 3.0,
"eval_accuracy": 0.8664516129032258,
"eval_loss": 0.07101532071828842,
"eval_runtime": 1.397,
"eval_samples_per_second": 2218.976,
"eval_steps_per_second": 46.527,
"step": 954
},
{
"epoch": 3.9874213836477987,
"grad_norm": 0.43196460604667664,
"learning_rate": 6.708595387840672e-06,
"loss": 0.0856,
"step": 1268
},
{
"epoch": 4.0,
"eval_accuracy": 0.8864516129032258,
"eval_loss": 0.057509638369083405,
"eval_runtime": 1.4059,
"eval_samples_per_second": 2205.034,
"eval_steps_per_second": 46.235,
"step": 1272
},
{
"epoch": 4.984276729559748,
"grad_norm": 0.34330040216445923,
"learning_rate": 3.385744234800839e-06,
"loss": 0.0741,
"step": 1585
},
{
"epoch": 5.0,
"eval_accuracy": 0.8916129032258064,
"eval_loss": 0.05096952244639397,
"eval_runtime": 1.5943,
"eval_samples_per_second": 1944.38,
"eval_steps_per_second": 40.769,
"step": 1590
},
{
"epoch": 5.981132075471698,
"grad_norm": 0.41723889112472534,
"learning_rate": 6.289308176100629e-08,
"loss": 0.0689,
"step": 1902
}
],
"logging_steps": 317,
"max_steps": 1908,
"num_input_tokens_seen": 0,
"num_train_epochs": 6,
"save_steps": 1000000000.0,
"stateful_callbacks": {
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": true
},
"attributes": {}
}
},
"total_flos": 495439677408900.0,
"train_batch_size": 48,
"trial_name": null,
"trial_params": {
"alpha": 0.8996260811262161,
"num_train_epochs": 6,
"temperature": 17
}
}