|
{ |
|
"best_metric": 0.5362756848335266, |
|
"best_model_checkpoint": "/kaggle/working/results/checkpoint-13500", |
|
"epoch": 0.977340186780569, |
|
"eval_steps": 500, |
|
"global_step": 13500, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 0.03619778469557663, |
|
"grad_norm": 8.067161560058594, |
|
"learning_rate": 9.638022153044234e-05, |
|
"loss": 0.873, |
|
"step": 500 |
|
}, |
|
{ |
|
"epoch": 0.03619778469557663, |
|
"eval_loss": 1.0014784336090088, |
|
"eval_runtime": 122.228, |
|
"eval_samples_per_second": 63.815, |
|
"eval_steps_per_second": 0.499, |
|
"step": 500 |
|
}, |
|
{ |
|
"epoch": 0.07239556939115326, |
|
"grad_norm": 8.213248252868652, |
|
"learning_rate": 9.276044306088468e-05, |
|
"loss": 0.8261, |
|
"step": 1000 |
|
}, |
|
{ |
|
"epoch": 0.07239556939115326, |
|
"eval_loss": 0.8797920346260071, |
|
"eval_runtime": 121.4743, |
|
"eval_samples_per_second": 64.211, |
|
"eval_steps_per_second": 0.502, |
|
"step": 1000 |
|
}, |
|
{ |
|
"epoch": 0.10859335408672989, |
|
"grad_norm": 8.027277946472168, |
|
"learning_rate": 8.914066459132702e-05, |
|
"loss": 0.8686, |
|
"step": 1500 |
|
}, |
|
{ |
|
"epoch": 0.10859335408672989, |
|
"eval_loss": 0.8256328105926514, |
|
"eval_runtime": 121.6337, |
|
"eval_samples_per_second": 64.127, |
|
"eval_steps_per_second": 0.502, |
|
"step": 1500 |
|
}, |
|
{ |
|
"epoch": 0.1447911387823065, |
|
"grad_norm": 5.501945972442627, |
|
"learning_rate": 8.552088612176935e-05, |
|
"loss": 0.8266, |
|
"step": 2000 |
|
}, |
|
{ |
|
"epoch": 0.1447911387823065, |
|
"eval_loss": 0.7719565033912659, |
|
"eval_runtime": 121.8754, |
|
"eval_samples_per_second": 64.0, |
|
"eval_steps_per_second": 0.501, |
|
"step": 2000 |
|
}, |
|
{ |
|
"epoch": 0.18098892347788315, |
|
"grad_norm": 7.218145370483398, |
|
"learning_rate": 8.190110765221169e-05, |
|
"loss": 0.801, |
|
"step": 2500 |
|
}, |
|
{ |
|
"epoch": 0.18098892347788315, |
|
"eval_loss": 0.766424834728241, |
|
"eval_runtime": 121.9461, |
|
"eval_samples_per_second": 63.963, |
|
"eval_steps_per_second": 0.5, |
|
"step": 2500 |
|
}, |
|
{ |
|
"epoch": 0.21718670817345978, |
|
"grad_norm": 9.288457870483398, |
|
"learning_rate": 7.828132918265403e-05, |
|
"loss": 0.7709, |
|
"step": 3000 |
|
}, |
|
{ |
|
"epoch": 0.21718670817345978, |
|
"eval_loss": 0.7332597970962524, |
|
"eval_runtime": 121.8362, |
|
"eval_samples_per_second": 64.02, |
|
"eval_steps_per_second": 0.501, |
|
"step": 3000 |
|
}, |
|
{ |
|
"epoch": 0.2533844928690364, |
|
"grad_norm": 9.63444709777832, |
|
"learning_rate": 7.466155071309636e-05, |
|
"loss": 0.7401, |
|
"step": 3500 |
|
}, |
|
{ |
|
"epoch": 0.2533844928690364, |
|
"eval_loss": 0.6940492987632751, |
|
"eval_runtime": 122.025, |
|
"eval_samples_per_second": 63.921, |
|
"eval_steps_per_second": 0.5, |
|
"step": 3500 |
|
}, |
|
{ |
|
"epoch": 0.289582277564613, |
|
"grad_norm": 4.352470874786377, |
|
"learning_rate": 7.10417722435387e-05, |
|
"loss": 0.7217, |
|
"step": 4000 |
|
}, |
|
{ |
|
"epoch": 0.289582277564613, |
|
"eval_loss": 0.7044554948806763, |
|
"eval_runtime": 122.039, |
|
"eval_samples_per_second": 63.914, |
|
"eval_steps_per_second": 0.5, |
|
"step": 4000 |
|
}, |
|
{ |
|
"epoch": 0.3257800622601897, |
|
"grad_norm": 8.299939155578613, |
|
"learning_rate": 6.742199377398104e-05, |
|
"loss": 0.7149, |
|
"step": 4500 |
|
}, |
|
{ |
|
"epoch": 0.3257800622601897, |
|
"eval_loss": 0.684947669506073, |
|
"eval_runtime": 122.0541, |
|
"eval_samples_per_second": 63.906, |
|
"eval_steps_per_second": 0.5, |
|
"step": 4500 |
|
}, |
|
{ |
|
"epoch": 0.3619778469557663, |
|
"grad_norm": 5.032785415649414, |
|
"learning_rate": 6.380221530442338e-05, |
|
"loss": 0.6913, |
|
"step": 5000 |
|
}, |
|
{ |
|
"epoch": 0.3619778469557663, |
|
"eval_loss": 0.6848333477973938, |
|
"eval_runtime": 122.0073, |
|
"eval_samples_per_second": 63.931, |
|
"eval_steps_per_second": 0.5, |
|
"step": 5000 |
|
}, |
|
{ |
|
"epoch": 0.39817563165134295, |
|
"grad_norm": 7.25339412689209, |
|
"learning_rate": 6.0182436834865705e-05, |
|
"loss": 0.6769, |
|
"step": 5500 |
|
}, |
|
{ |
|
"epoch": 0.39817563165134295, |
|
"eval_loss": 0.681818425655365, |
|
"eval_runtime": 121.9878, |
|
"eval_samples_per_second": 63.941, |
|
"eval_steps_per_second": 0.5, |
|
"step": 5500 |
|
}, |
|
{ |
|
"epoch": 0.43437341634691956, |
|
"grad_norm": 4.095344066619873, |
|
"learning_rate": 5.656265836530804e-05, |
|
"loss": 0.6542, |
|
"step": 6000 |
|
}, |
|
{ |
|
"epoch": 0.43437341634691956, |
|
"eval_loss": 0.647565484046936, |
|
"eval_runtime": 122.0947, |
|
"eval_samples_per_second": 63.885, |
|
"eval_steps_per_second": 0.5, |
|
"step": 6000 |
|
}, |
|
{ |
|
"epoch": 0.4705712010424962, |
|
"grad_norm": 8.288174629211426, |
|
"learning_rate": 5.2942879895750386e-05, |
|
"loss": 0.6545, |
|
"step": 6500 |
|
}, |
|
{ |
|
"epoch": 0.4705712010424962, |
|
"eval_loss": 0.6471512913703918, |
|
"eval_runtime": 122.0703, |
|
"eval_samples_per_second": 63.898, |
|
"eval_steps_per_second": 0.5, |
|
"step": 6500 |
|
}, |
|
{ |
|
"epoch": 0.5067689857380728, |
|
"grad_norm": 5.419597625732422, |
|
"learning_rate": 4.9323101426192716e-05, |
|
"loss": 0.6421, |
|
"step": 7000 |
|
}, |
|
{ |
|
"epoch": 0.5067689857380728, |
|
"eval_loss": 0.6336092948913574, |
|
"eval_runtime": 121.8453, |
|
"eval_samples_per_second": 64.016, |
|
"eval_steps_per_second": 0.501, |
|
"step": 7000 |
|
}, |
|
{ |
|
"epoch": 0.5429667704336495, |
|
"grad_norm": 4.020125865936279, |
|
"learning_rate": 4.570332295663506e-05, |
|
"loss": 0.6354, |
|
"step": 7500 |
|
}, |
|
{ |
|
"epoch": 0.5429667704336495, |
|
"eval_loss": 0.6177634596824646, |
|
"eval_runtime": 122.0498, |
|
"eval_samples_per_second": 63.908, |
|
"eval_steps_per_second": 0.5, |
|
"step": 7500 |
|
}, |
|
{ |
|
"epoch": 0.579164555129226, |
|
"grad_norm": 11.914624214172363, |
|
"learning_rate": 4.208354448707739e-05, |
|
"loss": 0.637, |
|
"step": 8000 |
|
}, |
|
{ |
|
"epoch": 0.579164555129226, |
|
"eval_loss": 0.60080486536026, |
|
"eval_runtime": 122.3284, |
|
"eval_samples_per_second": 63.763, |
|
"eval_steps_per_second": 0.499, |
|
"step": 8000 |
|
}, |
|
{ |
|
"epoch": 0.6153623398248027, |
|
"grad_norm": 6.125587463378906, |
|
"learning_rate": 3.8463766017519734e-05, |
|
"loss": 0.6093, |
|
"step": 8500 |
|
}, |
|
{ |
|
"epoch": 0.6153623398248027, |
|
"eval_loss": 0.6066346168518066, |
|
"eval_runtime": 122.3759, |
|
"eval_samples_per_second": 63.738, |
|
"eval_steps_per_second": 0.498, |
|
"step": 8500 |
|
}, |
|
{ |
|
"epoch": 0.6515601245203794, |
|
"grad_norm": 6.668566703796387, |
|
"learning_rate": 3.4843987547962065e-05, |
|
"loss": 0.6107, |
|
"step": 9000 |
|
}, |
|
{ |
|
"epoch": 0.6515601245203794, |
|
"eval_loss": 0.5826736688613892, |
|
"eval_runtime": 121.7463, |
|
"eval_samples_per_second": 64.068, |
|
"eval_steps_per_second": 0.501, |
|
"step": 9000 |
|
}, |
|
{ |
|
"epoch": 0.687757909215956, |
|
"grad_norm": 5.720597267150879, |
|
"learning_rate": 3.12242090784044e-05, |
|
"loss": 0.5869, |
|
"step": 9500 |
|
}, |
|
{ |
|
"epoch": 0.687757909215956, |
|
"eval_loss": 0.5872470736503601, |
|
"eval_runtime": 122.3512, |
|
"eval_samples_per_second": 63.751, |
|
"eval_steps_per_second": 0.499, |
|
"step": 9500 |
|
}, |
|
{ |
|
"epoch": 0.7239556939115326, |
|
"grad_norm": 12.91817855834961, |
|
"learning_rate": 2.760443060884674e-05, |
|
"loss": 0.5907, |
|
"step": 10000 |
|
}, |
|
{ |
|
"epoch": 0.7239556939115326, |
|
"eval_loss": 0.5741276741027832, |
|
"eval_runtime": 122.0844, |
|
"eval_samples_per_second": 63.89, |
|
"eval_steps_per_second": 0.5, |
|
"step": 10000 |
|
}, |
|
{ |
|
"epoch": 0.7601534786071092, |
|
"grad_norm": 4.865262031555176, |
|
"learning_rate": 2.3984652139289076e-05, |
|
"loss": 0.591, |
|
"step": 10500 |
|
}, |
|
{ |
|
"epoch": 0.7601534786071092, |
|
"eval_loss": 0.5652084350585938, |
|
"eval_runtime": 122.5281, |
|
"eval_samples_per_second": 63.659, |
|
"eval_steps_per_second": 0.498, |
|
"step": 10500 |
|
}, |
|
{ |
|
"epoch": 0.7963512633026859, |
|
"grad_norm": 3.508721351623535, |
|
"learning_rate": 2.0364873669731413e-05, |
|
"loss": 0.5786, |
|
"step": 11000 |
|
}, |
|
{ |
|
"epoch": 0.7963512633026859, |
|
"eval_loss": 0.5722984075546265, |
|
"eval_runtime": 122.2178, |
|
"eval_samples_per_second": 63.82, |
|
"eval_steps_per_second": 0.499, |
|
"step": 11000 |
|
}, |
|
{ |
|
"epoch": 0.8325490479982625, |
|
"grad_norm": 5.5506272315979, |
|
"learning_rate": 1.674509520017375e-05, |
|
"loss": 0.5777, |
|
"step": 11500 |
|
}, |
|
{ |
|
"epoch": 0.8325490479982625, |
|
"eval_loss": 0.5580069422721863, |
|
"eval_runtime": 122.1376, |
|
"eval_samples_per_second": 63.862, |
|
"eval_steps_per_second": 0.499, |
|
"step": 11500 |
|
}, |
|
{ |
|
"epoch": 0.8687468326938391, |
|
"grad_norm": 5.747720241546631, |
|
"learning_rate": 1.3125316730616085e-05, |
|
"loss": 0.5837, |
|
"step": 12000 |
|
}, |
|
{ |
|
"epoch": 0.8687468326938391, |
|
"eval_loss": 0.5526942014694214, |
|
"eval_runtime": 122.4022, |
|
"eval_samples_per_second": 63.724, |
|
"eval_steps_per_second": 0.498, |
|
"step": 12000 |
|
}, |
|
{ |
|
"epoch": 0.9049446173894158, |
|
"grad_norm": 5.70701265335083, |
|
"learning_rate": 9.505538261058424e-06, |
|
"loss": 0.5565, |
|
"step": 12500 |
|
}, |
|
{ |
|
"epoch": 0.9049446173894158, |
|
"eval_loss": 0.5394288301467896, |
|
"eval_runtime": 122.3157, |
|
"eval_samples_per_second": 63.769, |
|
"eval_steps_per_second": 0.499, |
|
"step": 12500 |
|
}, |
|
{ |
|
"epoch": 0.9411424020849924, |
|
"grad_norm": 3.8808445930480957, |
|
"learning_rate": 5.885759791500761e-06, |
|
"loss": 0.5645, |
|
"step": 13000 |
|
}, |
|
{ |
|
"epoch": 0.9411424020849924, |
|
"eval_loss": 0.5413234233856201, |
|
"eval_runtime": 122.1305, |
|
"eval_samples_per_second": 63.866, |
|
"eval_steps_per_second": 0.499, |
|
"step": 13000 |
|
}, |
|
{ |
|
"epoch": 0.977340186780569, |
|
"grad_norm": 7.781164169311523, |
|
"learning_rate": 2.2659813219430974e-06, |
|
"loss": 0.5288, |
|
"step": 13500 |
|
}, |
|
{ |
|
"epoch": 0.977340186780569, |
|
"eval_loss": 0.5362756848335266, |
|
"eval_runtime": 122.2122, |
|
"eval_samples_per_second": 63.823, |
|
"eval_steps_per_second": 0.499, |
|
"step": 13500 |
|
} |
|
], |
|
"logging_steps": 500, |
|
"max_steps": 13813, |
|
"num_input_tokens_seen": 0, |
|
"num_train_epochs": 1, |
|
"save_steps": 500, |
|
"stateful_callbacks": { |
|
"TrainerControl": { |
|
"args": { |
|
"should_epoch_stop": false, |
|
"should_evaluate": false, |
|
"should_log": false, |
|
"should_save": true, |
|
"should_training_stop": false |
|
}, |
|
"attributes": {} |
|
} |
|
}, |
|
"total_flos": 1.13688468946944e+17, |
|
"train_batch_size": 32, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|