|
{ |
|
"best_metric": null, |
|
"best_model_checkpoint": null, |
|
"epoch": 0.4024144869215292, |
|
"eval_steps": 5, |
|
"global_step": 50, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 0.008048289738430584, |
|
"eval_loss": 0.38133421540260315, |
|
"eval_runtime": 33.5879, |
|
"eval_samples_per_second": 6.222, |
|
"eval_steps_per_second": 0.804, |
|
"step": 1 |
|
}, |
|
{ |
|
"epoch": 0.02414486921529175, |
|
"grad_norm": 0.7796753644943237, |
|
"learning_rate": 3e-05, |
|
"loss": 0.325, |
|
"step": 3 |
|
}, |
|
{ |
|
"epoch": 0.04024144869215292, |
|
"eval_loss": 0.21984921395778656, |
|
"eval_runtime": 32.9714, |
|
"eval_samples_per_second": 6.339, |
|
"eval_steps_per_second": 0.819, |
|
"step": 5 |
|
}, |
|
{ |
|
"epoch": 0.0482897384305835, |
|
"grad_norm": 0.5462653040885925, |
|
"learning_rate": 6e-05, |
|
"loss": 0.2524, |
|
"step": 6 |
|
}, |
|
{ |
|
"epoch": 0.07243460764587525, |
|
"grad_norm": 0.2789907455444336, |
|
"learning_rate": 9e-05, |
|
"loss": 0.094, |
|
"step": 9 |
|
}, |
|
{ |
|
"epoch": 0.08048289738430583, |
|
"eval_loss": 0.04736727476119995, |
|
"eval_runtime": 32.9992, |
|
"eval_samples_per_second": 6.333, |
|
"eval_steps_per_second": 0.818, |
|
"step": 10 |
|
}, |
|
{ |
|
"epoch": 0.096579476861167, |
|
"grad_norm": 0.2077365219593048, |
|
"learning_rate": 9.938441702975689e-05, |
|
"loss": 0.0431, |
|
"step": 12 |
|
}, |
|
{ |
|
"epoch": 0.12072434607645875, |
|
"grad_norm": 0.2362423539161682, |
|
"learning_rate": 9.619397662556435e-05, |
|
"loss": 0.0303, |
|
"step": 15 |
|
}, |
|
{ |
|
"epoch": 0.12072434607645875, |
|
"eval_loss": 0.022640502080321312, |
|
"eval_runtime": 32.9869, |
|
"eval_samples_per_second": 6.336, |
|
"eval_steps_per_second": 0.819, |
|
"step": 15 |
|
}, |
|
{ |
|
"epoch": 0.1448692152917505, |
|
"grad_norm": 0.11892444640398026, |
|
"learning_rate": 9.045084971874738e-05, |
|
"loss": 0.0165, |
|
"step": 18 |
|
}, |
|
{ |
|
"epoch": 0.16096579476861167, |
|
"eval_loss": 0.013070314191281796, |
|
"eval_runtime": 32.9925, |
|
"eval_samples_per_second": 6.335, |
|
"eval_steps_per_second": 0.818, |
|
"step": 20 |
|
}, |
|
{ |
|
"epoch": 0.16901408450704225, |
|
"grad_norm": 0.07342974841594696, |
|
"learning_rate": 8.247240241650918e-05, |
|
"loss": 0.0177, |
|
"step": 21 |
|
}, |
|
{ |
|
"epoch": 0.193158953722334, |
|
"grad_norm": 0.1093919426202774, |
|
"learning_rate": 7.269952498697734e-05, |
|
"loss": 0.013, |
|
"step": 24 |
|
}, |
|
{ |
|
"epoch": 0.2012072434607646, |
|
"eval_loss": 0.010123283602297306, |
|
"eval_runtime": 33.0079, |
|
"eval_samples_per_second": 6.332, |
|
"eval_steps_per_second": 0.818, |
|
"step": 25 |
|
}, |
|
{ |
|
"epoch": 0.21730382293762576, |
|
"grad_norm": 0.07795149087905884, |
|
"learning_rate": 6.167226819279528e-05, |
|
"loss": 0.0092, |
|
"step": 27 |
|
}, |
|
{ |
|
"epoch": 0.2414486921529175, |
|
"grad_norm": 0.0602896623313427, |
|
"learning_rate": 5e-05, |
|
"loss": 0.009, |
|
"step": 30 |
|
}, |
|
{ |
|
"epoch": 0.2414486921529175, |
|
"eval_loss": 0.008096303790807724, |
|
"eval_runtime": 33.0663, |
|
"eval_samples_per_second": 6.321, |
|
"eval_steps_per_second": 0.817, |
|
"step": 30 |
|
}, |
|
{ |
|
"epoch": 0.2655935613682093, |
|
"grad_norm": 0.037129033356904984, |
|
"learning_rate": 3.832773180720475e-05, |
|
"loss": 0.0068, |
|
"step": 33 |
|
}, |
|
{ |
|
"epoch": 0.28169014084507044, |
|
"eval_loss": 0.007025540340691805, |
|
"eval_runtime": 33.0263, |
|
"eval_samples_per_second": 6.328, |
|
"eval_steps_per_second": 0.818, |
|
"step": 35 |
|
}, |
|
{ |
|
"epoch": 0.289738430583501, |
|
"grad_norm": 0.05079665035009384, |
|
"learning_rate": 2.7300475013022663e-05, |
|
"loss": 0.0047, |
|
"step": 36 |
|
}, |
|
{ |
|
"epoch": 0.31388329979879276, |
|
"grad_norm": 0.053295738995075226, |
|
"learning_rate": 1.7527597583490822e-05, |
|
"loss": 0.0069, |
|
"step": 39 |
|
}, |
|
{ |
|
"epoch": 0.32193158953722334, |
|
"eval_loss": 0.006381278857588768, |
|
"eval_runtime": 33.0314, |
|
"eval_samples_per_second": 6.327, |
|
"eval_steps_per_second": 0.817, |
|
"step": 40 |
|
}, |
|
{ |
|
"epoch": 0.3380281690140845, |
|
"grad_norm": 0.06008715555071831, |
|
"learning_rate": 9.549150281252633e-06, |
|
"loss": 0.0047, |
|
"step": 42 |
|
}, |
|
{ |
|
"epoch": 0.36217303822937624, |
|
"grad_norm": 0.05889170616865158, |
|
"learning_rate": 3.8060233744356633e-06, |
|
"loss": 0.0063, |
|
"step": 45 |
|
}, |
|
{ |
|
"epoch": 0.36217303822937624, |
|
"eval_loss": 0.0061826580204069614, |
|
"eval_runtime": 33.0232, |
|
"eval_samples_per_second": 6.329, |
|
"eval_steps_per_second": 0.818, |
|
"step": 45 |
|
}, |
|
{ |
|
"epoch": 0.386317907444668, |
|
"grad_norm": 0.04322628304362297, |
|
"learning_rate": 6.15582970243117e-07, |
|
"loss": 0.0043, |
|
"step": 48 |
|
}, |
|
{ |
|
"epoch": 0.4024144869215292, |
|
"eval_loss": 0.006113050039857626, |
|
"eval_runtime": 33.0447, |
|
"eval_samples_per_second": 6.325, |
|
"eval_steps_per_second": 0.817, |
|
"step": 50 |
|
} |
|
], |
|
"logging_steps": 3, |
|
"max_steps": 50, |
|
"num_input_tokens_seen": 0, |
|
"num_train_epochs": 1, |
|
"save_steps": 5, |
|
"stateful_callbacks": { |
|
"TrainerControl": { |
|
"args": { |
|
"should_epoch_stop": false, |
|
"should_evaluate": false, |
|
"should_log": false, |
|
"should_save": true, |
|
"should_training_stop": true |
|
}, |
|
"attributes": {} |
|
} |
|
}, |
|
"total_flos": 1.497199726905262e+17, |
|
"train_batch_size": 8, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|