flan-t5-small-keywords / trainer_state.json
agentlans's picture
Retrained with larger dataset
0f821c2 verified
{
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 10.0,
"eval_steps": 1000.0,
"global_step": 25000,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.2,
"grad_norm": 3.1640737056732178,
"learning_rate": 4.9e-05,
"loss": 2.212,
"step": 500
},
{
"epoch": 0.4,
"grad_norm": 23.492929458618164,
"learning_rate": 4.8e-05,
"loss": 2.0008,
"step": 1000
},
{
"epoch": 0.6,
"grad_norm": 2.86879563331604,
"learning_rate": 4.7e-05,
"loss": 1.9454,
"step": 1500
},
{
"epoch": 0.8,
"grad_norm": 2.5470571517944336,
"learning_rate": 4.600000000000001e-05,
"loss": 1.9098,
"step": 2000
},
{
"epoch": 1.0,
"grad_norm": 3.299417018890381,
"learning_rate": 4.5e-05,
"loss": 1.8775,
"step": 2500
},
{
"epoch": 1.2,
"grad_norm": 3.0483977794647217,
"learning_rate": 4.4000000000000006e-05,
"loss": 1.8398,
"step": 3000
},
{
"epoch": 1.4,
"grad_norm": 3.0028903484344482,
"learning_rate": 4.3e-05,
"loss": 1.8147,
"step": 3500
},
{
"epoch": 1.6,
"grad_norm": 2.972844123840332,
"learning_rate": 4.2e-05,
"loss": 1.8035,
"step": 4000
},
{
"epoch": 1.8,
"grad_norm": 2.82574200630188,
"learning_rate": 4.1e-05,
"loss": 1.7974,
"step": 4500
},
{
"epoch": 2.0,
"grad_norm": 2.6531100273132324,
"learning_rate": 4e-05,
"loss": 1.7818,
"step": 5000
},
{
"epoch": 2.2,
"grad_norm": 3.6078109741210938,
"learning_rate": 3.9000000000000006e-05,
"loss": 1.7492,
"step": 5500
},
{
"epoch": 2.4,
"grad_norm": 2.573765516281128,
"learning_rate": 3.8e-05,
"loss": 1.7536,
"step": 6000
},
{
"epoch": 2.6,
"grad_norm": 3.07545804977417,
"learning_rate": 3.7e-05,
"loss": 1.7341,
"step": 6500
},
{
"epoch": 2.8,
"grad_norm": 2.4871368408203125,
"learning_rate": 3.6e-05,
"loss": 1.7362,
"step": 7000
},
{
"epoch": 3.0,
"grad_norm": 2.881721019744873,
"learning_rate": 3.5e-05,
"loss": 1.7426,
"step": 7500
},
{
"epoch": 3.2,
"grad_norm": 3.6232352256774902,
"learning_rate": 3.4000000000000007e-05,
"loss": 1.6958,
"step": 8000
},
{
"epoch": 3.4,
"grad_norm": 2.4172329902648926,
"learning_rate": 3.3e-05,
"loss": 1.6961,
"step": 8500
},
{
"epoch": 3.6,
"grad_norm": 2.5114290714263916,
"learning_rate": 3.2000000000000005e-05,
"loss": 1.6961,
"step": 9000
},
{
"epoch": 3.8,
"grad_norm": 2.7974891662597656,
"learning_rate": 3.1e-05,
"loss": 1.6831,
"step": 9500
},
{
"epoch": 4.0,
"grad_norm": 2.511326551437378,
"learning_rate": 3e-05,
"loss": 1.7117,
"step": 10000
},
{
"epoch": 4.2,
"grad_norm": 2.3756415843963623,
"learning_rate": 2.9e-05,
"loss": 1.6684,
"step": 10500
},
{
"epoch": 4.4,
"grad_norm": 2.2897322177886963,
"learning_rate": 2.8000000000000003e-05,
"loss": 1.6826,
"step": 11000
},
{
"epoch": 4.6,
"grad_norm": 2.7029635906219482,
"learning_rate": 2.7000000000000002e-05,
"loss": 1.6649,
"step": 11500
},
{
"epoch": 4.8,
"grad_norm": 2.453059673309326,
"learning_rate": 2.6000000000000002e-05,
"loss": 1.6488,
"step": 12000
},
{
"epoch": 5.0,
"grad_norm": 2.173687219619751,
"learning_rate": 2.5e-05,
"loss": 1.652,
"step": 12500
},
{
"epoch": 5.2,
"grad_norm": 2.9122655391693115,
"learning_rate": 2.4e-05,
"loss": 1.6354,
"step": 13000
},
{
"epoch": 5.4,
"grad_norm": 2.8792638778686523,
"learning_rate": 2.3000000000000003e-05,
"loss": 1.6423,
"step": 13500
},
{
"epoch": 5.6,
"grad_norm": 3.0389626026153564,
"learning_rate": 2.2000000000000003e-05,
"loss": 1.6591,
"step": 14000
},
{
"epoch": 5.8,
"grad_norm": 2.6412346363067627,
"learning_rate": 2.1e-05,
"loss": 1.6365,
"step": 14500
},
{
"epoch": 6.0,
"grad_norm": 2.6655163764953613,
"learning_rate": 2e-05,
"loss": 1.6133,
"step": 15000
},
{
"epoch": 6.2,
"grad_norm": 2.6776795387268066,
"learning_rate": 1.9e-05,
"loss": 1.603,
"step": 15500
},
{
"epoch": 6.4,
"grad_norm": 3.4206550121307373,
"learning_rate": 1.8e-05,
"loss": 1.6142,
"step": 16000
},
{
"epoch": 6.6,
"grad_norm": 3.59487247467041,
"learning_rate": 1.7000000000000003e-05,
"loss": 1.6193,
"step": 16500
},
{
"epoch": 6.8,
"grad_norm": 2.707260847091675,
"learning_rate": 1.6000000000000003e-05,
"loss": 1.618,
"step": 17000
},
{
"epoch": 7.0,
"grad_norm": 2.4932329654693604,
"learning_rate": 1.5e-05,
"loss": 1.6179,
"step": 17500
},
{
"epoch": 7.2,
"grad_norm": 2.7114312648773193,
"learning_rate": 1.4000000000000001e-05,
"loss": 1.6064,
"step": 18000
},
{
"epoch": 7.4,
"grad_norm": 2.9900901317596436,
"learning_rate": 1.3000000000000001e-05,
"loss": 1.6079,
"step": 18500
},
{
"epoch": 7.6,
"grad_norm": 2.5069007873535156,
"learning_rate": 1.2e-05,
"loss": 1.5917,
"step": 19000
},
{
"epoch": 7.8,
"grad_norm": 2.816830635070801,
"learning_rate": 1.1000000000000001e-05,
"loss": 1.6159,
"step": 19500
},
{
"epoch": 8.0,
"grad_norm": 2.500258445739746,
"learning_rate": 1e-05,
"loss": 1.586,
"step": 20000
},
{
"epoch": 8.2,
"grad_norm": 2.392134189605713,
"learning_rate": 9e-06,
"loss": 1.5948,
"step": 20500
},
{
"epoch": 8.4,
"grad_norm": 2.19275164604187,
"learning_rate": 8.000000000000001e-06,
"loss": 1.5904,
"step": 21000
},
{
"epoch": 8.6,
"grad_norm": 2.1356213092803955,
"learning_rate": 7.000000000000001e-06,
"loss": 1.5772,
"step": 21500
},
{
"epoch": 8.8,
"grad_norm": 2.0939230918884277,
"learning_rate": 6e-06,
"loss": 1.604,
"step": 22000
},
{
"epoch": 9.0,
"grad_norm": 2.8503177165985107,
"learning_rate": 5e-06,
"loss": 1.5878,
"step": 22500
},
{
"epoch": 9.2,
"grad_norm": 2.6212220191955566,
"learning_rate": 4.000000000000001e-06,
"loss": 1.5893,
"step": 23000
},
{
"epoch": 9.4,
"grad_norm": 2.9165279865264893,
"learning_rate": 3e-06,
"loss": 1.5796,
"step": 23500
},
{
"epoch": 9.6,
"grad_norm": 2.8491413593292236,
"learning_rate": 2.0000000000000003e-06,
"loss": 1.5832,
"step": 24000
},
{
"epoch": 9.8,
"grad_norm": 3.690595865249634,
"learning_rate": 1.0000000000000002e-06,
"loss": 1.5872,
"step": 24500
},
{
"epoch": 10.0,
"grad_norm": 3.193439245223999,
"learning_rate": 0.0,
"loss": 1.5781,
"step": 25000
},
{
"epoch": 10.0,
"step": 25000,
"total_flos": 3.2973681787109376e+16,
"train_loss": 1.6927287036132812,
"train_runtime": 2178.3738,
"train_samples_per_second": 91.812,
"train_steps_per_second": 11.476
}
],
"logging_steps": 500,
"max_steps": 25000,
"num_input_tokens_seen": 0,
"num_train_epochs": 10,
"save_steps": 500,
"stateful_callbacks": {
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": true
},
"attributes": {}
}
},
"total_flos": 3.2973681787109376e+16,
"train_batch_size": 8,
"trial_name": null,
"trial_params": null
}