WordProblem / trainer_state.json
pepoo20's picture
End of training
6f51c81 verified
{
"best_metric": 0.1676628440618515,
"best_model_checkpoint": "saves/Qwen1.5-1.8B/WordProblem/checkpoint-9000",
"epoch": 0.9999725884707108,
"eval_steps": 1500,
"global_step": 9120,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.03289383514706285,
"grad_norm": 4.8125,
"learning_rate": 3e-05,
"loss": 0.7143,
"step": 300
},
{
"epoch": 0.0657876702941257,
"grad_norm": 3.5,
"learning_rate": 4.998339850669331e-05,
"loss": 0.2219,
"step": 600
},
{
"epoch": 0.09868150544118856,
"grad_norm": 4.40625,
"learning_rate": 4.9734816848192624e-05,
"loss": 0.2074,
"step": 900
},
{
"epoch": 0.1315753405882514,
"grad_norm": 4.03125,
"learning_rate": 4.9190839785031474e-05,
"loss": 0.1906,
"step": 1200
},
{
"epoch": 0.16446917573531428,
"grad_norm": 3.78125,
"learning_rate": 4.835796376008569e-05,
"loss": 0.1923,
"step": 1500
},
{
"epoch": 0.16446917573531428,
"eval_loss": 0.1849033087491989,
"eval_runtime": 79.6037,
"eval_samples_per_second": 37.423,
"eval_steps_per_second": 9.359,
"step": 1500
},
{
"epoch": 0.19736301088237712,
"grad_norm": 4.15625,
"learning_rate": 4.7246135390382216e-05,
"loss": 0.1839,
"step": 1800
},
{
"epoch": 0.23025684602944,
"grad_norm": 4.5,
"learning_rate": 4.586863267968384e-05,
"loss": 0.1938,
"step": 2100
},
{
"epoch": 0.2631506811765028,
"grad_norm": 3.1875,
"learning_rate": 4.4241906446007296e-05,
"loss": 0.1863,
"step": 2400
},
{
"epoch": 0.2960445163235657,
"grad_norm": 2.71875,
"learning_rate": 4.238538385782601e-05,
"loss": 0.1797,
"step": 2700
},
{
"epoch": 0.32893835147062855,
"grad_norm": 3.65625,
"learning_rate": 4.032123642522486e-05,
"loss": 0.176,
"step": 3000
},
{
"epoch": 0.32893835147062855,
"eval_loss": 0.1760552078485489,
"eval_runtime": 79.6333,
"eval_samples_per_second": 37.409,
"eval_steps_per_second": 9.355,
"step": 3000
},
{
"epoch": 0.3618321866176914,
"grad_norm": 4.0,
"learning_rate": 3.8074115216771435e-05,
"loss": 0.1791,
"step": 3300
},
{
"epoch": 0.39472602176475424,
"grad_norm": 3.90625,
"learning_rate": 3.567085646427478e-05,
"loss": 0.1808,
"step": 3600
},
{
"epoch": 0.4276198569118171,
"grad_norm": 3.421875,
"learning_rate": 3.3140161071244915e-05,
"loss": 0.1805,
"step": 3900
},
{
"epoch": 0.46051369205888,
"grad_norm": 2.640625,
"learning_rate": 3.05122518525215e-05,
"loss": 0.1738,
"step": 4200
},
{
"epoch": 0.49340752720594283,
"grad_norm": 4.5,
"learning_rate": 2.781851259848554e-05,
"loss": 0.1736,
"step": 4500
},
{
"epoch": 0.49340752720594283,
"eval_loss": 0.17090687155723572,
"eval_runtime": 79.6329,
"eval_samples_per_second": 37.409,
"eval_steps_per_second": 9.355,
"step": 4500
},
{
"epoch": 0.5263013623530056,
"grad_norm": 3.578125,
"learning_rate": 2.509111327432736e-05,
"loss": 0.1709,
"step": 4800
},
{
"epoch": 0.5591951975000685,
"grad_norm": 3.515625,
"learning_rate": 2.236262583042668e-05,
"loss": 0.1775,
"step": 5100
},
{
"epoch": 0.5920890326471314,
"grad_norm": 4.9375,
"learning_rate": 1.966563521202681e-05,
"loss": 0.1759,
"step": 5400
},
{
"epoch": 0.6249828677941942,
"grad_norm": 3.640625,
"learning_rate": 1.7032350213717874e-05,
"loss": 0.1754,
"step": 5700
},
{
"epoch": 0.6578767029412571,
"grad_norm": 3.578125,
"learning_rate": 1.4494218826096939e-05,
"loss": 0.1688,
"step": 6000
},
{
"epoch": 0.6578767029412571,
"eval_loss": 0.16823573410511017,
"eval_runtime": 79.6163,
"eval_samples_per_second": 37.417,
"eval_steps_per_second": 9.357,
"step": 6000
},
{
"epoch": 0.6907705380883199,
"grad_norm": 3.6875,
"learning_rate": 1.2081552668325321e-05,
"loss": 0.1707,
"step": 6300
},
{
"epoch": 0.7236643732353828,
"grad_norm": 4.0625,
"learning_rate": 9.82316499179518e-06,
"loss": 0.171,
"step": 6600
},
{
"epoch": 0.7565582083824457,
"grad_norm": 3.984375,
"learning_rate": 7.74602657804425e-06,
"loss": 0.1702,
"step": 6900
},
{
"epoch": 0.7894520435295085,
"grad_norm": 3.421875,
"learning_rate": 5.874943640356082e-06,
"loss": 0.1718,
"step": 7200
},
{
"epoch": 0.8223458786765714,
"grad_norm": 4.1875,
"learning_rate": 4.232261575703861e-06,
"loss": 0.1689,
"step": 7500
},
{
"epoch": 0.8223458786765714,
"eval_loss": 0.16773280501365662,
"eval_runtime": 79.6198,
"eval_samples_per_second": 37.415,
"eval_steps_per_second": 9.357,
"step": 7500
},
{
"epoch": 0.8552397138236342,
"grad_norm": 2.703125,
"learning_rate": 2.83759810497852e-06,
"loss": 0.1692,
"step": 7800
},
{
"epoch": 0.8881335489706971,
"grad_norm": 4.0625,
"learning_rate": 1.70760898847247e-06,
"loss": 0.1787,
"step": 8100
},
{
"epoch": 0.92102738411776,
"grad_norm": 4.71875,
"learning_rate": 8.557891145603042e-07,
"loss": 0.1733,
"step": 8400
},
{
"epoch": 0.9539212192648228,
"grad_norm": 3.6875,
"learning_rate": 2.923113370737779e-07,
"loss": 0.1741,
"step": 8700
},
{
"epoch": 0.9868150544118857,
"grad_norm": 4.3125,
"learning_rate": 2.3904986054812396e-08,
"loss": 0.168,
"step": 9000
},
{
"epoch": 0.9868150544118857,
"eval_loss": 0.1676628440618515,
"eval_runtime": 79.7085,
"eval_samples_per_second": 37.374,
"eval_steps_per_second": 9.347,
"step": 9000
},
{
"epoch": 0.9999725884707108,
"step": 9120,
"total_flos": 3.6976201313039155e+17,
"train_loss": 0.19690959160788019,
"train_runtime": 20589.0374,
"train_samples_per_second": 7.087,
"train_steps_per_second": 0.443
}
],
"logging_steps": 300,
"max_steps": 9120,
"num_input_tokens_seen": 0,
"num_train_epochs": 1,
"save_steps": 3000,
"total_flos": 3.6976201313039155e+17,
"train_batch_size": 4,
"trial_name": null,
"trial_params": null
}