Tech-oriented's picture
Training in progress, epoch 3
62a7cf8 verified
raw
history blame
1.55 kB
{
"best_metric": 0.9139908256880734,
"best_model_checkpoint": "bert-base-uncased-finetuned-sst2/run-5/checkpoint-633",
"epoch": 3.0,
"eval_steps": 500,
"global_step": 633,
"is_hyper_param_search": true,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 1.0,
"eval_accuracy": 0.9002293577981652,
"eval_loss": 0.24879497289657593,
"eval_runtime": 2.3485,
"eval_samples_per_second": 371.303,
"eval_steps_per_second": 23.419,
"step": 211
},
{
"epoch": 2.0,
"eval_accuracy": 0.9059633027522935,
"eval_loss": 0.2516523599624634,
"eval_runtime": 2.3894,
"eval_samples_per_second": 364.945,
"eval_steps_per_second": 23.018,
"step": 422
},
{
"epoch": 2.37,
"grad_norm": 0.7729095220565796,
"learning_rate": 2.651478911079213e-05,
"loss": 0.2165,
"step": 500
},
{
"epoch": 3.0,
"eval_accuracy": 0.9139908256880734,
"eval_loss": 0.3265123963356018,
"eval_runtime": 2.3971,
"eval_samples_per_second": 363.775,
"eval_steps_per_second": 22.944,
"step": 633
}
],
"logging_steps": 500,
"max_steps": 844,
"num_input_tokens_seen": 0,
"num_train_epochs": 4,
"save_steps": 500,
"total_flos": 335486123357640.0,
"train_batch_size": 32,
"trial_name": null,
"trial_params": {
"learning_rate": 6.50537267718272e-05,
"num_train_epochs": 4,
"per_device_train_batch_size": 32,
"seed": 2
}
}