viggo-llama2-7b-lora-16 / trainer_state.json
abcdabcd987's picture
upload
2f45d94 unverified
raw
history blame
8.31 kB
{
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 4.0,
"eval_steps": 500,
"global_step": 640,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.06,
"learning_rate": 4.996988640512931e-05,
"loss": 1.692,
"step": 10
},
{
"epoch": 0.12,
"learning_rate": 4.987961816680492e-05,
"loss": 1.2657,
"step": 20
},
{
"epoch": 0.19,
"learning_rate": 4.972941274911953e-05,
"loss": 1.055,
"step": 30
},
{
"epoch": 0.25,
"learning_rate": 4.951963201008076e-05,
"loss": 0.9921,
"step": 40
},
{
"epoch": 0.31,
"learning_rate": 4.9250781329863606e-05,
"loss": 0.9406,
"step": 50
},
{
"epoch": 0.38,
"learning_rate": 4.892350839330522e-05,
"loss": 0.8524,
"step": 60
},
{
"epoch": 0.44,
"learning_rate": 4.853860162957552e-05,
"loss": 0.8515,
"step": 70
},
{
"epoch": 0.5,
"learning_rate": 4.8096988312782174e-05,
"loss": 0.8268,
"step": 80
},
{
"epoch": 0.56,
"learning_rate": 4.759973232808609e-05,
"loss": 0.8694,
"step": 90
},
{
"epoch": 0.62,
"learning_rate": 4.7048031608708876e-05,
"loss": 0.8054,
"step": 100
},
{
"epoch": 0.69,
"learning_rate": 4.6443215250006806e-05,
"loss": 0.7789,
"step": 110
},
{
"epoch": 0.75,
"learning_rate": 4.5786740307563636e-05,
"loss": 0.8142,
"step": 120
},
{
"epoch": 0.81,
"learning_rate": 4.508018828701612e-05,
"loss": 0.7686,
"step": 130
},
{
"epoch": 0.88,
"learning_rate": 4.4325261334068426e-05,
"loss": 0.7904,
"step": 140
},
{
"epoch": 0.94,
"learning_rate": 4.352377813387398e-05,
"loss": 0.8099,
"step": 150
},
{
"epoch": 1.0,
"learning_rate": 4.267766952966369e-05,
"loss": 0.8211,
"step": 160
},
{
"epoch": 1.06,
"learning_rate": 4.178897387117546e-05,
"loss": 0.7577,
"step": 170
},
{
"epoch": 1.12,
"learning_rate": 4.085983210409114e-05,
"loss": 0.778,
"step": 180
},
{
"epoch": 1.19,
"learning_rate": 3.9892482612310836e-05,
"loss": 0.752,
"step": 190
},
{
"epoch": 1.25,
"learning_rate": 3.888925582549006e-05,
"loss": 0.7247,
"step": 200
},
{
"epoch": 1.31,
"learning_rate": 3.785256860483054e-05,
"loss": 0.7593,
"step": 210
},
{
"epoch": 1.38,
"learning_rate": 3.678491842064995e-05,
"loss": 0.7217,
"step": 220
},
{
"epoch": 1.44,
"learning_rate": 3.568887733575706e-05,
"loss": 0.7509,
"step": 230
},
{
"epoch": 1.5,
"learning_rate": 3.456708580912725e-05,
"loss": 0.7367,
"step": 240
},
{
"epoch": 1.56,
"learning_rate": 3.34222463348055e-05,
"loss": 0.7168,
"step": 250
},
{
"epoch": 1.62,
"learning_rate": 3.225711693136156e-05,
"loss": 0.7567,
"step": 260
},
{
"epoch": 1.69,
"learning_rate": 3.10745044975816e-05,
"loss": 0.7534,
"step": 270
},
{
"epoch": 1.75,
"learning_rate": 2.9877258050403212e-05,
"loss": 0.7681,
"step": 280
},
{
"epoch": 1.81,
"learning_rate": 2.8668261861384045e-05,
"loss": 0.7094,
"step": 290
},
{
"epoch": 1.88,
"learning_rate": 2.7450428508239024e-05,
"loss": 0.7504,
"step": 300
},
{
"epoch": 1.94,
"learning_rate": 2.6226691858185454e-05,
"loss": 0.715,
"step": 310
},
{
"epoch": 2.0,
"learning_rate": 2.5e-05,
"loss": 0.7322,
"step": 320
},
{
"epoch": 2.06,
"learning_rate": 2.3773308141814552e-05,
"loss": 0.7124,
"step": 330
},
{
"epoch": 2.12,
"learning_rate": 2.2549571491760986e-05,
"loss": 0.6626,
"step": 340
},
{
"epoch": 2.19,
"learning_rate": 2.1331738138615958e-05,
"loss": 0.7094,
"step": 350
},
{
"epoch": 2.25,
"learning_rate": 2.0122741949596797e-05,
"loss": 0.7211,
"step": 360
},
{
"epoch": 2.31,
"learning_rate": 1.8925495502418406e-05,
"loss": 0.7176,
"step": 370
},
{
"epoch": 2.38,
"learning_rate": 1.7742883068638447e-05,
"loss": 0.7019,
"step": 380
},
{
"epoch": 2.44,
"learning_rate": 1.65777536651945e-05,
"loss": 0.6754,
"step": 390
},
{
"epoch": 2.5,
"learning_rate": 1.5432914190872757e-05,
"loss": 0.6761,
"step": 400
},
{
"epoch": 2.56,
"learning_rate": 1.4311122664242954e-05,
"loss": 0.6527,
"step": 410
},
{
"epoch": 2.62,
"learning_rate": 1.3215081579350058e-05,
"loss": 0.678,
"step": 420
},
{
"epoch": 2.69,
"learning_rate": 1.2147431395169459e-05,
"loss": 0.6991,
"step": 430
},
{
"epoch": 2.75,
"learning_rate": 1.1110744174509952e-05,
"loss": 0.6507,
"step": 440
},
{
"epoch": 2.81,
"learning_rate": 1.0107517387689166e-05,
"loss": 0.7174,
"step": 450
},
{
"epoch": 2.88,
"learning_rate": 9.140167895908867e-06,
"loss": 0.6967,
"step": 460
},
{
"epoch": 2.94,
"learning_rate": 8.211026128824539e-06,
"loss": 0.6837,
"step": 470
},
{
"epoch": 3.0,
"learning_rate": 7.3223304703363135e-06,
"loss": 0.6855,
"step": 480
},
{
"epoch": 3.06,
"learning_rate": 6.476221866126029e-06,
"loss": 0.6642,
"step": 490
},
{
"epoch": 3.12,
"learning_rate": 5.674738665931575e-06,
"loss": 0.6427,
"step": 500
},
{
"epoch": 3.19,
"learning_rate": 4.91981171298388e-06,
"loss": 0.6512,
"step": 510
},
{
"epoch": 3.25,
"learning_rate": 4.213259692436367e-06,
"loss": 0.677,
"step": 520
},
{
"epoch": 3.31,
"learning_rate": 3.5567847499932e-06,
"loss": 0.6549,
"step": 530
},
{
"epoch": 3.38,
"learning_rate": 2.9519683912911266e-06,
"loss": 0.668,
"step": 540
},
{
"epoch": 3.44,
"learning_rate": 2.4002676719139166e-06,
"loss": 0.6617,
"step": 550
},
{
"epoch": 3.5,
"learning_rate": 1.9030116872178316e-06,
"loss": 0.6543,
"step": 560
},
{
"epoch": 3.56,
"learning_rate": 1.4613983704244826e-06,
"loss": 0.6546,
"step": 570
},
{
"epoch": 3.62,
"learning_rate": 1.0764916066947794e-06,
"loss": 0.6451,
"step": 580
},
{
"epoch": 3.69,
"learning_rate": 7.492186701364007e-07,
"loss": 0.6726,
"step": 590
},
{
"epoch": 3.75,
"learning_rate": 4.803679899192392e-07,
"loss": 0.6451,
"step": 600
},
{
"epoch": 3.81,
"learning_rate": 2.705872508804747e-07,
"loss": 0.6687,
"step": 610
},
{
"epoch": 3.88,
"learning_rate": 1.2038183319507955e-07,
"loss": 0.644,
"step": 620
},
{
"epoch": 3.94,
"learning_rate": 3.011359487068987e-08,
"loss": 0.6766,
"step": 630
},
{
"epoch": 4.0,
"learning_rate": 0.0,
"loss": 0.6681,
"step": 640
},
{
"epoch": 4.0,
"step": 640,
"total_flos": 1.3862359554185626e+17,
"train_loss": 0.7563483387231826,
"train_runtime": 1265.7157,
"train_samples_per_second": 16.127,
"train_steps_per_second": 0.506
}
],
"logging_steps": 10,
"max_steps": 640,
"num_train_epochs": 4,
"save_steps": 200,
"total_flos": 1.3862359554185626e+17,
"trial_name": null,
"trial_params": null
}