mistral-mm / trainer_state.json
cekal's picture
Upload 8 files
a1bba28 verified
{
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 3.0848329048843186,
"eval_steps": 400,
"global_step": 1200,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.06,
"learning_rate": 0.00019903980796159233,
"loss": 1.2412,
"step": 25
},
{
"epoch": 0.13,
"learning_rate": 0.00019803960792158433,
"loss": 1.2101,
"step": 50
},
{
"epoch": 0.19,
"learning_rate": 0.00019703940788157634,
"loss": 1.1308,
"step": 75
},
{
"epoch": 0.26,
"learning_rate": 0.00019603920784156832,
"loss": 1.1847,
"step": 100
},
{
"epoch": 0.32,
"learning_rate": 0.00019503900780156033,
"loss": 1.183,
"step": 125
},
{
"epoch": 0.39,
"learning_rate": 0.0001940388077615523,
"loss": 1.1129,
"step": 150
},
{
"epoch": 0.45,
"learning_rate": 0.00019303860772154432,
"loss": 1.2248,
"step": 175
},
{
"epoch": 0.51,
"learning_rate": 0.00019203840768153632,
"loss": 1.1082,
"step": 200
},
{
"epoch": 0.58,
"learning_rate": 0.0001910382076415283,
"loss": 1.2025,
"step": 225
},
{
"epoch": 0.64,
"learning_rate": 0.0001900380076015203,
"loss": 1.2208,
"step": 250
},
{
"epoch": 0.71,
"learning_rate": 0.0001890378075615123,
"loss": 1.1124,
"step": 275
},
{
"epoch": 0.77,
"learning_rate": 0.0001880376075215043,
"loss": 1.0934,
"step": 300
},
{
"epoch": 0.84,
"learning_rate": 0.00018703740748149633,
"loss": 1.2967,
"step": 325
},
{
"epoch": 0.9,
"learning_rate": 0.0001860372074414883,
"loss": 1.1463,
"step": 350
},
{
"epoch": 0.96,
"learning_rate": 0.00018503700740148032,
"loss": 1.1978,
"step": 375
},
{
"epoch": 1.03,
"learning_rate": 0.0001840368073614723,
"loss": 1.0147,
"step": 400
},
{
"epoch": 1.03,
"eval_loss": 0.8289958238601685,
"eval_runtime": 1938.2766,
"eval_samples_per_second": 0.401,
"eval_steps_per_second": 0.051,
"step": 400
},
{
"epoch": 1.09,
"learning_rate": 0.0001830366073214643,
"loss": 0.7315,
"step": 425
},
{
"epoch": 1.16,
"learning_rate": 0.0001820364072814563,
"loss": 0.8464,
"step": 450
},
{
"epoch": 1.22,
"learning_rate": 0.0001810362072414483,
"loss": 0.7709,
"step": 475
},
{
"epoch": 1.29,
"learning_rate": 0.0001800360072014403,
"loss": 0.8396,
"step": 500
},
{
"epoch": 1.35,
"learning_rate": 0.00017903580716143228,
"loss": 0.7598,
"step": 525
},
{
"epoch": 1.41,
"learning_rate": 0.00017803560712142429,
"loss": 0.7829,
"step": 550
},
{
"epoch": 1.48,
"learning_rate": 0.0001770354070814163,
"loss": 0.8044,
"step": 575
},
{
"epoch": 1.54,
"learning_rate": 0.0001760352070414083,
"loss": 0.7746,
"step": 600
},
{
"epoch": 1.61,
"learning_rate": 0.0001750350070014003,
"loss": 0.8287,
"step": 625
},
{
"epoch": 1.67,
"learning_rate": 0.0001740348069613923,
"loss": 0.7134,
"step": 650
},
{
"epoch": 1.74,
"learning_rate": 0.0001730346069213843,
"loss": 0.7763,
"step": 675
},
{
"epoch": 1.8,
"learning_rate": 0.00017203440688137627,
"loss": 0.8097,
"step": 700
},
{
"epoch": 1.86,
"learning_rate": 0.00017103420684136828,
"loss": 0.8152,
"step": 725
},
{
"epoch": 1.93,
"learning_rate": 0.0001700340068013603,
"loss": 0.7876,
"step": 750
},
{
"epoch": 1.99,
"learning_rate": 0.00016903380676135227,
"loss": 0.7261,
"step": 775
},
{
"epoch": 2.06,
"learning_rate": 0.00016803360672134428,
"loss": 0.3176,
"step": 800
},
{
"epoch": 2.06,
"eval_loss": 0.2893330454826355,
"eval_runtime": 1936.9393,
"eval_samples_per_second": 0.401,
"eval_steps_per_second": 0.051,
"step": 800
},
{
"epoch": 2.12,
"learning_rate": 0.00016703340668133626,
"loss": 0.3089,
"step": 825
},
{
"epoch": 2.19,
"learning_rate": 0.00016603320664132826,
"loss": 0.3265,
"step": 850
},
{
"epoch": 2.25,
"learning_rate": 0.00016503300660132027,
"loss": 0.2873,
"step": 875
},
{
"epoch": 2.31,
"learning_rate": 0.00016403280656131228,
"loss": 0.3553,
"step": 900
},
{
"epoch": 2.38,
"learning_rate": 0.00016303260652130428,
"loss": 0.3184,
"step": 925
},
{
"epoch": 2.44,
"learning_rate": 0.00016203240648129626,
"loss": 0.2688,
"step": 950
},
{
"epoch": 2.51,
"learning_rate": 0.00016103220644128827,
"loss": 0.2998,
"step": 975
},
{
"epoch": 2.57,
"learning_rate": 0.00016003200640128028,
"loss": 0.283,
"step": 1000
},
{
"epoch": 2.63,
"learning_rate": 0.00015903180636127226,
"loss": 0.313,
"step": 1025
},
{
"epoch": 2.7,
"learning_rate": 0.00015803160632126427,
"loss": 0.3122,
"step": 1050
},
{
"epoch": 2.76,
"learning_rate": 0.00015703140628125625,
"loss": 0.2782,
"step": 1075
},
{
"epoch": 2.83,
"learning_rate": 0.00015603120624124825,
"loss": 0.2886,
"step": 1100
},
{
"epoch": 2.89,
"learning_rate": 0.00015503100620124026,
"loss": 0.2898,
"step": 1125
},
{
"epoch": 2.96,
"learning_rate": 0.00015403080616123224,
"loss": 0.2956,
"step": 1150
},
{
"epoch": 3.02,
"learning_rate": 0.00015303060612122427,
"loss": 0.2609,
"step": 1175
},
{
"epoch": 3.08,
"learning_rate": 0.00015203040608121625,
"loss": 0.115,
"step": 1200
},
{
"epoch": 3.08,
"eval_loss": 0.10693752020597458,
"eval_runtime": 1938.4879,
"eval_samples_per_second": 0.401,
"eval_steps_per_second": 0.051,
"step": 1200
}
],
"logging_steps": 25,
"max_steps": 5000,
"num_input_tokens_seen": 0,
"num_train_epochs": 13,
"save_steps": 400,
"total_flos": 2.0361775954431836e+18,
"train_batch_size": 2,
"trial_name": null,
"trial_params": null
}