|
{ |
|
"best_metric": null, |
|
"best_model_checkpoint": null, |
|
"epoch": 0.03904724716907458, |
|
"eval_steps": 13, |
|
"global_step": 50, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 0.0007809449433814916, |
|
"grad_norm": 175.4751434326172, |
|
"learning_rate": 1e-05, |
|
"loss": 36.1201, |
|
"step": 1 |
|
}, |
|
{ |
|
"epoch": 0.0007809449433814916, |
|
"eval_loss": 3.8799195289611816, |
|
"eval_runtime": 315.5656, |
|
"eval_samples_per_second": 6.835, |
|
"eval_steps_per_second": 3.419, |
|
"step": 1 |
|
}, |
|
{ |
|
"epoch": 0.0015618898867629833, |
|
"grad_norm": 238.4190673828125, |
|
"learning_rate": 2e-05, |
|
"loss": 50.8572, |
|
"step": 2 |
|
}, |
|
{ |
|
"epoch": 0.0023428348301444747, |
|
"grad_norm": 363.2473449707031, |
|
"learning_rate": 3e-05, |
|
"loss": 69.5408, |
|
"step": 3 |
|
}, |
|
{ |
|
"epoch": 0.0031237797735259665, |
|
"grad_norm": 166.27882385253906, |
|
"learning_rate": 4e-05, |
|
"loss": 36.4834, |
|
"step": 4 |
|
}, |
|
{ |
|
"epoch": 0.003904724716907458, |
|
"grad_norm": 206.5617218017578, |
|
"learning_rate": 5e-05, |
|
"loss": 43.0769, |
|
"step": 5 |
|
}, |
|
{ |
|
"epoch": 0.004685669660288949, |
|
"grad_norm": 192.99514770507812, |
|
"learning_rate": 6e-05, |
|
"loss": 57.0914, |
|
"step": 6 |
|
}, |
|
{ |
|
"epoch": 0.005466614603670442, |
|
"grad_norm": 187.74777221679688, |
|
"learning_rate": 7e-05, |
|
"loss": 41.0429, |
|
"step": 7 |
|
}, |
|
{ |
|
"epoch": 0.006247559547051933, |
|
"grad_norm": 242.54119873046875, |
|
"learning_rate": 8e-05, |
|
"loss": 50.7796, |
|
"step": 8 |
|
}, |
|
{ |
|
"epoch": 0.007028504490433424, |
|
"grad_norm": 88.46190643310547, |
|
"learning_rate": 9e-05, |
|
"loss": 23.8709, |
|
"step": 9 |
|
}, |
|
{ |
|
"epoch": 0.007809449433814916, |
|
"grad_norm": 102.14177703857422, |
|
"learning_rate": 0.0001, |
|
"loss": 22.4249, |
|
"step": 10 |
|
}, |
|
{ |
|
"epoch": 0.008590394377196407, |
|
"grad_norm": 71.25434875488281, |
|
"learning_rate": 9.98458666866564e-05, |
|
"loss": 20.7456, |
|
"step": 11 |
|
}, |
|
{ |
|
"epoch": 0.009371339320577899, |
|
"grad_norm": 97.2330551147461, |
|
"learning_rate": 9.938441702975689e-05, |
|
"loss": 20.3148, |
|
"step": 12 |
|
}, |
|
{ |
|
"epoch": 0.01015228426395939, |
|
"grad_norm": 71.13471221923828, |
|
"learning_rate": 9.861849601988383e-05, |
|
"loss": 17.7955, |
|
"step": 13 |
|
}, |
|
{ |
|
"epoch": 0.01015228426395939, |
|
"eval_loss": 1.2057011127471924, |
|
"eval_runtime": 300.8171, |
|
"eval_samples_per_second": 7.17, |
|
"eval_steps_per_second": 3.587, |
|
"step": 13 |
|
}, |
|
{ |
|
"epoch": 0.010933229207340883, |
|
"grad_norm": 70.08533477783203, |
|
"learning_rate": 9.755282581475769e-05, |
|
"loss": 17.0262, |
|
"step": 14 |
|
}, |
|
{ |
|
"epoch": 0.011714174150722375, |
|
"grad_norm": 69.94759368896484, |
|
"learning_rate": 9.619397662556435e-05, |
|
"loss": 18.1821, |
|
"step": 15 |
|
}, |
|
{ |
|
"epoch": 0.012495119094103866, |
|
"grad_norm": 58.61181640625, |
|
"learning_rate": 9.45503262094184e-05, |
|
"loss": 17.1924, |
|
"step": 16 |
|
}, |
|
{ |
|
"epoch": 0.013276064037485357, |
|
"grad_norm": 54.452816009521484, |
|
"learning_rate": 9.263200821770461e-05, |
|
"loss": 14.6744, |
|
"step": 17 |
|
}, |
|
{ |
|
"epoch": 0.014057008980866849, |
|
"grad_norm": 67.27265930175781, |
|
"learning_rate": 9.045084971874738e-05, |
|
"loss": 17.592, |
|
"step": 18 |
|
}, |
|
{ |
|
"epoch": 0.01483795392424834, |
|
"grad_norm": 51.187679290771484, |
|
"learning_rate": 8.802029828000156e-05, |
|
"loss": 20.2565, |
|
"step": 19 |
|
}, |
|
{ |
|
"epoch": 0.015618898867629832, |
|
"grad_norm": 45.95616912841797, |
|
"learning_rate": 8.535533905932738e-05, |
|
"loss": 16.9602, |
|
"step": 20 |
|
}, |
|
{ |
|
"epoch": 0.016399843811011325, |
|
"grad_norm": 46.94184112548828, |
|
"learning_rate": 8.247240241650918e-05, |
|
"loss": 16.319, |
|
"step": 21 |
|
}, |
|
{ |
|
"epoch": 0.017180788754392814, |
|
"grad_norm": 43.87798309326172, |
|
"learning_rate": 7.938926261462366e-05, |
|
"loss": 12.6792, |
|
"step": 22 |
|
}, |
|
{ |
|
"epoch": 0.017961733697774308, |
|
"grad_norm": 51.43169403076172, |
|
"learning_rate": 7.612492823579745e-05, |
|
"loss": 16.1705, |
|
"step": 23 |
|
}, |
|
{ |
|
"epoch": 0.018742678641155797, |
|
"grad_norm": 47.817901611328125, |
|
"learning_rate": 7.269952498697734e-05, |
|
"loss": 11.841, |
|
"step": 24 |
|
}, |
|
{ |
|
"epoch": 0.01952362358453729, |
|
"grad_norm": 56.87557601928711, |
|
"learning_rate": 6.91341716182545e-05, |
|
"loss": 17.5764, |
|
"step": 25 |
|
}, |
|
{ |
|
"epoch": 0.02030456852791878, |
|
"grad_norm": 45.88191223144531, |
|
"learning_rate": 6.545084971874738e-05, |
|
"loss": 13.4572, |
|
"step": 26 |
|
}, |
|
{ |
|
"epoch": 0.02030456852791878, |
|
"eval_loss": 0.8506395816802979, |
|
"eval_runtime": 300.4403, |
|
"eval_samples_per_second": 7.179, |
|
"eval_steps_per_second": 3.591, |
|
"step": 26 |
|
}, |
|
{ |
|
"epoch": 0.021085513471300273, |
|
"grad_norm": 44.15971374511719, |
|
"learning_rate": 6.167226819279528e-05, |
|
"loss": 12.4669, |
|
"step": 27 |
|
}, |
|
{ |
|
"epoch": 0.021866458414681766, |
|
"grad_norm": 70.013916015625, |
|
"learning_rate": 5.782172325201155e-05, |
|
"loss": 12.9177, |
|
"step": 28 |
|
}, |
|
{ |
|
"epoch": 0.022647403358063256, |
|
"grad_norm": 59.45874786376953, |
|
"learning_rate": 5.392295478639225e-05, |
|
"loss": 13.9473, |
|
"step": 29 |
|
}, |
|
{ |
|
"epoch": 0.02342834830144475, |
|
"grad_norm": 49.43613815307617, |
|
"learning_rate": 5e-05, |
|
"loss": 13.1336, |
|
"step": 30 |
|
}, |
|
{ |
|
"epoch": 0.02420929324482624, |
|
"grad_norm": 51.60984420776367, |
|
"learning_rate": 4.607704521360776e-05, |
|
"loss": 11.6511, |
|
"step": 31 |
|
}, |
|
{ |
|
"epoch": 0.024990238188207732, |
|
"grad_norm": 39.98220443725586, |
|
"learning_rate": 4.2178276747988446e-05, |
|
"loss": 11.9204, |
|
"step": 32 |
|
}, |
|
{ |
|
"epoch": 0.025771183131589222, |
|
"grad_norm": 53.87978744506836, |
|
"learning_rate": 3.832773180720475e-05, |
|
"loss": 14.3878, |
|
"step": 33 |
|
}, |
|
{ |
|
"epoch": 0.026552128074970715, |
|
"grad_norm": 55.93902587890625, |
|
"learning_rate": 3.4549150281252636e-05, |
|
"loss": 10.2647, |
|
"step": 34 |
|
}, |
|
{ |
|
"epoch": 0.027333073018352205, |
|
"grad_norm": 45.10630798339844, |
|
"learning_rate": 3.086582838174551e-05, |
|
"loss": 12.807, |
|
"step": 35 |
|
}, |
|
{ |
|
"epoch": 0.028114017961733698, |
|
"grad_norm": 57.13920593261719, |
|
"learning_rate": 2.7300475013022663e-05, |
|
"loss": 14.7782, |
|
"step": 36 |
|
}, |
|
{ |
|
"epoch": 0.02889496290511519, |
|
"grad_norm": 39.96442413330078, |
|
"learning_rate": 2.3875071764202563e-05, |
|
"loss": 13.0561, |
|
"step": 37 |
|
}, |
|
{ |
|
"epoch": 0.02967590784849668, |
|
"grad_norm": 38.73209762573242, |
|
"learning_rate": 2.061073738537635e-05, |
|
"loss": 11.2156, |
|
"step": 38 |
|
}, |
|
{ |
|
"epoch": 0.030456852791878174, |
|
"grad_norm": 49.080196380615234, |
|
"learning_rate": 1.7527597583490822e-05, |
|
"loss": 12.6033, |
|
"step": 39 |
|
}, |
|
{ |
|
"epoch": 0.030456852791878174, |
|
"eval_loss": 0.7845144271850586, |
|
"eval_runtime": 300.1758, |
|
"eval_samples_per_second": 7.186, |
|
"eval_steps_per_second": 3.595, |
|
"step": 39 |
|
}, |
|
{ |
|
"epoch": 0.031237797735259663, |
|
"grad_norm": 47.26570510864258, |
|
"learning_rate": 1.4644660940672627e-05, |
|
"loss": 12.213, |
|
"step": 40 |
|
}, |
|
{ |
|
"epoch": 0.03201874267864115, |
|
"grad_norm": 37.84298324584961, |
|
"learning_rate": 1.1979701719998453e-05, |
|
"loss": 12.9798, |
|
"step": 41 |
|
}, |
|
{ |
|
"epoch": 0.03279968762202265, |
|
"grad_norm": 37.72480773925781, |
|
"learning_rate": 9.549150281252633e-06, |
|
"loss": 10.6722, |
|
"step": 42 |
|
}, |
|
{ |
|
"epoch": 0.03358063256540414, |
|
"grad_norm": 44.485225677490234, |
|
"learning_rate": 7.367991782295391e-06, |
|
"loss": 12.6417, |
|
"step": 43 |
|
}, |
|
{ |
|
"epoch": 0.03436157750878563, |
|
"grad_norm": 36.807350158691406, |
|
"learning_rate": 5.449673790581611e-06, |
|
"loss": 11.1903, |
|
"step": 44 |
|
}, |
|
{ |
|
"epoch": 0.035142522452167126, |
|
"grad_norm": 34.367584228515625, |
|
"learning_rate": 3.8060233744356633e-06, |
|
"loss": 11.6601, |
|
"step": 45 |
|
}, |
|
{ |
|
"epoch": 0.035923467395548615, |
|
"grad_norm": 48.056583404541016, |
|
"learning_rate": 2.4471741852423237e-06, |
|
"loss": 13.341, |
|
"step": 46 |
|
}, |
|
{ |
|
"epoch": 0.036704412338930105, |
|
"grad_norm": 46.177330017089844, |
|
"learning_rate": 1.3815039801161721e-06, |
|
"loss": 13.4865, |
|
"step": 47 |
|
}, |
|
{ |
|
"epoch": 0.037485357282311595, |
|
"grad_norm": 54.51680374145508, |
|
"learning_rate": 6.15582970243117e-07, |
|
"loss": 12.0828, |
|
"step": 48 |
|
}, |
|
{ |
|
"epoch": 0.03826630222569309, |
|
"grad_norm": 43.835418701171875, |
|
"learning_rate": 1.5413331334360182e-07, |
|
"loss": 12.4233, |
|
"step": 49 |
|
}, |
|
{ |
|
"epoch": 0.03904724716907458, |
|
"grad_norm": 44.20928955078125, |
|
"learning_rate": 0.0, |
|
"loss": 13.5881, |
|
"step": 50 |
|
} |
|
], |
|
"logging_steps": 1, |
|
"max_steps": 50, |
|
"num_input_tokens_seen": 0, |
|
"num_train_epochs": 1, |
|
"save_steps": 13, |
|
"stateful_callbacks": { |
|
"TrainerControl": { |
|
"args": { |
|
"should_epoch_stop": false, |
|
"should_evaluate": false, |
|
"should_log": false, |
|
"should_save": true, |
|
"should_training_stop": true |
|
}, |
|
"attributes": {} |
|
} |
|
}, |
|
"total_flos": 2.812517994725376e+17, |
|
"train_batch_size": 2, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|