granite-3.1-3b-a800m-t1 / trainer_state.json
win10's picture
Upload 16 files
516219c verified
{
"best_metric": 0.6242462992668152,
"best_model_checkpoint": "saves/granite-3.1-3b-a800m-instruct\\checkpoint-400",
"epoch": 0.6676403087836428,
"eval_steps": 100,
"global_step": 400,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.01669100771959107,
"grad_norm": 2.9202163219451904,
"learning_rate": 0.0004,
"loss": 1.278,
"step": 10
},
{
"epoch": 0.03338201543918214,
"grad_norm": 4.189798355102539,
"learning_rate": 0.0004,
"loss": 0.9626,
"step": 20
},
{
"epoch": 0.05007302315877321,
"grad_norm": 2.869903087615967,
"learning_rate": 0.0004,
"loss": 0.993,
"step": 30
},
{
"epoch": 0.06676403087836429,
"grad_norm": 2.438555955886841,
"learning_rate": 0.0004,
"loss": 0.8561,
"step": 40
},
{
"epoch": 0.08345503859795535,
"grad_norm": 2.5792276859283447,
"learning_rate": 0.0004,
"loss": 1.008,
"step": 50
},
{
"epoch": 0.10014604631754642,
"grad_norm": 2.982292890548706,
"learning_rate": 0.0004,
"loss": 0.7934,
"step": 60
},
{
"epoch": 0.1168370540371375,
"grad_norm": 1.8241057395935059,
"learning_rate": 0.0004,
"loss": 0.7811,
"step": 70
},
{
"epoch": 0.13352806175672857,
"grad_norm": 1.7634323835372925,
"learning_rate": 0.0004,
"loss": 0.8709,
"step": 80
},
{
"epoch": 0.15021906947631963,
"grad_norm": 2.9324874877929688,
"learning_rate": 0.0004,
"loss": 0.8429,
"step": 90
},
{
"epoch": 0.1669100771959107,
"grad_norm": 1.6204209327697754,
"learning_rate": 0.0004,
"loss": 0.7855,
"step": 100
},
{
"epoch": 0.1669100771959107,
"eval_loss": 0.6945974230766296,
"eval_runtime": 172.0416,
"eval_samples_per_second": 6.196,
"eval_steps_per_second": 3.098,
"step": 100
},
{
"epoch": 0.18360108491550178,
"grad_norm": 3.2142117023468018,
"learning_rate": 0.0004,
"loss": 0.802,
"step": 110
},
{
"epoch": 0.20029209263509284,
"grad_norm": 2.2638301849365234,
"learning_rate": 0.0004,
"loss": 0.7556,
"step": 120
},
{
"epoch": 0.2169831003546839,
"grad_norm": 1.815718412399292,
"learning_rate": 0.0004,
"loss": 0.8419,
"step": 130
},
{
"epoch": 0.233674108074275,
"grad_norm": 2.0672075748443604,
"learning_rate": 0.0004,
"loss": 0.9097,
"step": 140
},
{
"epoch": 0.25036511579386606,
"grad_norm": 1.7800904512405396,
"learning_rate": 0.0004,
"loss": 0.8887,
"step": 150
},
{
"epoch": 0.26705612351345714,
"grad_norm": 2.7289271354675293,
"learning_rate": 0.0004,
"loss": 0.8078,
"step": 160
},
{
"epoch": 0.2837471312330482,
"grad_norm": 1.8588060140609741,
"learning_rate": 0.0004,
"loss": 0.824,
"step": 170
},
{
"epoch": 0.30043813895263927,
"grad_norm": 3.1786084175109863,
"learning_rate": 0.0004,
"loss": 0.7997,
"step": 180
},
{
"epoch": 0.31712914667223036,
"grad_norm": 1.992241382598877,
"learning_rate": 0.0004,
"loss": 0.7627,
"step": 190
},
{
"epoch": 0.3338201543918214,
"grad_norm": 1.9906195402145386,
"learning_rate": 0.0004,
"loss": 0.7525,
"step": 200
},
{
"epoch": 0.3338201543918214,
"eval_loss": 0.6729084849357605,
"eval_runtime": 168.6462,
"eval_samples_per_second": 6.321,
"eval_steps_per_second": 3.16,
"step": 200
},
{
"epoch": 0.3505111621114125,
"grad_norm": 1.408159613609314,
"learning_rate": 0.0004,
"loss": 0.803,
"step": 210
},
{
"epoch": 0.36720216983100357,
"grad_norm": 2.2278130054473877,
"learning_rate": 0.0004,
"loss": 0.8844,
"step": 220
},
{
"epoch": 0.3838931775505946,
"grad_norm": 2.3945512771606445,
"learning_rate": 0.0004,
"loss": 0.9442,
"step": 230
},
{
"epoch": 0.4005841852701857,
"grad_norm": 1.1758439540863037,
"learning_rate": 0.0004,
"loss": 0.8379,
"step": 240
},
{
"epoch": 0.4172751929897768,
"grad_norm": 2.483109951019287,
"learning_rate": 0.0004,
"loss": 0.7718,
"step": 250
},
{
"epoch": 0.4339662007093678,
"grad_norm": 1.4695591926574707,
"learning_rate": 0.0004,
"loss": 0.7664,
"step": 260
},
{
"epoch": 0.4506572084289589,
"grad_norm": 1.5021220445632935,
"learning_rate": 0.0004,
"loss": 0.92,
"step": 270
},
{
"epoch": 0.46734821614855,
"grad_norm": 1.426329255104065,
"learning_rate": 0.0004,
"loss": 0.9022,
"step": 280
},
{
"epoch": 0.484039223868141,
"grad_norm": 1.4195940494537354,
"learning_rate": 0.0004,
"loss": 0.7857,
"step": 290
},
{
"epoch": 0.5007302315877321,
"grad_norm": 1.7680014371871948,
"learning_rate": 0.0004,
"loss": 0.8381,
"step": 300
},
{
"epoch": 0.5007302315877321,
"eval_loss": 0.6377778649330139,
"eval_runtime": 168.5375,
"eval_samples_per_second": 6.325,
"eval_steps_per_second": 3.163,
"step": 300
},
{
"epoch": 0.5174212393073232,
"grad_norm": 1.8080698251724243,
"learning_rate": 0.0004,
"loss": 0.8346,
"step": 310
},
{
"epoch": 0.5341122470269143,
"grad_norm": 7.522341728210449,
"learning_rate": 0.0004,
"loss": 0.7861,
"step": 320
},
{
"epoch": 0.5508032547465053,
"grad_norm": 1.397352933883667,
"learning_rate": 0.0004,
"loss": 0.8113,
"step": 330
},
{
"epoch": 0.5674942624660964,
"grad_norm": 1.4032018184661865,
"learning_rate": 0.0004,
"loss": 0.7936,
"step": 340
},
{
"epoch": 0.5841852701856874,
"grad_norm": 1.6604522466659546,
"learning_rate": 0.0004,
"loss": 0.7278,
"step": 350
},
{
"epoch": 0.6008762779052785,
"grad_norm": 1.8743423223495483,
"learning_rate": 0.0004,
"loss": 0.8652,
"step": 360
},
{
"epoch": 0.6175672856248696,
"grad_norm": 3.0662145614624023,
"learning_rate": 0.0004,
"loss": 0.8746,
"step": 370
},
{
"epoch": 0.6342582933444607,
"grad_norm": 1.4264730215072632,
"learning_rate": 0.0004,
"loss": 0.7431,
"step": 380
},
{
"epoch": 0.6509493010640517,
"grad_norm": 1.9815279245376587,
"learning_rate": 0.0004,
"loss": 0.828,
"step": 390
},
{
"epoch": 0.6676403087836428,
"grad_norm": 1.3689788579940796,
"learning_rate": 0.0004,
"loss": 0.8564,
"step": 400
},
{
"epoch": 0.6676403087836428,
"eval_loss": 0.6242462992668152,
"eval_runtime": 168.5455,
"eval_samples_per_second": 6.325,
"eval_steps_per_second": 3.162,
"step": 400
}
],
"logging_steps": 10,
"max_steps": 10000,
"num_input_tokens_seen": 0,
"num_train_epochs": 17,
"save_steps": 100,
"stateful_callbacks": {
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": false
},
"attributes": {}
}
},
"total_flos": 2.420426751907922e+17,
"train_batch_size": 2,
"trial_name": null,
"trial_params": null
}