HW3 / checkpoint-2500 /trainer_state.json
jimboHsueh's picture
End of training
4108e31
{
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 2.0,
"eval_steps": 500,
"global_step": 2500,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.08,
"learning_rate": 0.0002,
"loss": 12.1569,
"step": 100
},
{
"epoch": 0.16,
"learning_rate": 0.0002,
"loss": 0.0485,
"step": 200
},
{
"epoch": 0.24,
"learning_rate": 0.0002,
"loss": 5.1593,
"step": 300
},
{
"epoch": 0.32,
"learning_rate": 0.0002,
"loss": 0.0,
"step": 400
},
{
"epoch": 0.4,
"learning_rate": 0.0002,
"loss": 0.116,
"step": 500
},
{
"epoch": 0.48,
"learning_rate": 0.0002,
"loss": 0.2684,
"step": 600
},
{
"epoch": 0.56,
"learning_rate": 0.0002,
"loss": 0.0943,
"step": 700
},
{
"epoch": 0.64,
"learning_rate": 0.0002,
"loss": 29.8504,
"step": 800
},
{
"epoch": 0.72,
"learning_rate": 0.0002,
"loss": 0.0173,
"step": 900
},
{
"epoch": 0.8,
"learning_rate": 0.0002,
"loss": 2.6532,
"step": 1000
},
{
"epoch": 0.88,
"learning_rate": 0.0002,
"loss": 0.6314,
"step": 1100
},
{
"epoch": 0.96,
"learning_rate": 0.0002,
"loss": 0.0475,
"step": 1200
},
{
"epoch": 1.04,
"learning_rate": 0.0002,
"loss": 0.0,
"step": 1300
},
{
"epoch": 1.12,
"learning_rate": 0.0002,
"loss": 0.9073,
"step": 1400
},
{
"epoch": 1.2,
"learning_rate": 0.0002,
"loss": 0.0704,
"step": 1500
},
{
"epoch": 1.28,
"learning_rate": 0.0002,
"loss": 21.957,
"step": 1600
},
{
"epoch": 1.36,
"learning_rate": 0.0002,
"loss": 1247.1927,
"step": 1700
},
{
"epoch": 1.44,
"learning_rate": 0.0002,
"loss": 0.0146,
"step": 1800
},
{
"epoch": 1.52,
"learning_rate": 0.0002,
"loss": 0.6174,
"step": 1900
},
{
"epoch": 1.6,
"learning_rate": 0.0002,
"loss": 1.1367,
"step": 2000
},
{
"epoch": 1.68,
"learning_rate": 0.0002,
"loss": 280.2191,
"step": 2100
},
{
"epoch": 1.76,
"learning_rate": 0.0002,
"loss": 3.5996,
"step": 2200
},
{
"epoch": 1.84,
"learning_rate": 0.0002,
"loss": 0.0,
"step": 2300
},
{
"epoch": 1.92,
"learning_rate": 0.0002,
"loss": 89.4102,
"step": 2400
},
{
"epoch": 2.0,
"learning_rate": 0.0002,
"loss": 3.0508,
"step": 2500
}
],
"logging_steps": 100,
"max_steps": 2500,
"num_train_epochs": 2,
"save_steps": 500,
"total_flos": 2.38468349952e+17,
"trial_name": null,
"trial_params": null
}