|
{ |
|
"best_metric": null, |
|
"best_model_checkpoint": null, |
|
"epoch": 7.058997050147493, |
|
"eval_steps": 50, |
|
"global_step": 600, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 0.11799410029498525, |
|
"grad_norm": 2.4879140853881836, |
|
"learning_rate": 0.0003, |
|
"loss": 1.8567, |
|
"step": 10 |
|
}, |
|
{ |
|
"epoch": 0.2359882005899705, |
|
"grad_norm": 0.8311281204223633, |
|
"learning_rate": 0.0002963855421686747, |
|
"loss": 1.3245, |
|
"step": 20 |
|
}, |
|
{ |
|
"epoch": 0.35398230088495575, |
|
"grad_norm": 0.6665307879447937, |
|
"learning_rate": 0.0002927710843373494, |
|
"loss": 1.1834, |
|
"step": 30 |
|
}, |
|
{ |
|
"epoch": 0.471976401179941, |
|
"grad_norm": 0.5632417798042297, |
|
"learning_rate": 0.0002891566265060241, |
|
"loss": 1.0915, |
|
"step": 40 |
|
}, |
|
{ |
|
"epoch": 0.5899705014749262, |
|
"grad_norm": 0.4381982684135437, |
|
"learning_rate": 0.00028554216867469873, |
|
"loss": 1.0438, |
|
"step": 50 |
|
}, |
|
{ |
|
"epoch": 0.5899705014749262, |
|
"eval_loss": 1.0292344093322754, |
|
"eval_runtime": 20.4608, |
|
"eval_samples_per_second": 8.015, |
|
"eval_steps_per_second": 0.684, |
|
"step": 50 |
|
}, |
|
{ |
|
"epoch": 0.7079646017699115, |
|
"grad_norm": 0.43256932497024536, |
|
"learning_rate": 0.0002819277108433735, |
|
"loss": 1.0156, |
|
"step": 60 |
|
}, |
|
{ |
|
"epoch": 0.8259587020648967, |
|
"grad_norm": 0.3582414388656616, |
|
"learning_rate": 0.0002783132530120482, |
|
"loss": 0.9793, |
|
"step": 70 |
|
}, |
|
{ |
|
"epoch": 0.943952802359882, |
|
"grad_norm": 0.3671708405017853, |
|
"learning_rate": 0.00027469879518072284, |
|
"loss": 0.9644, |
|
"step": 80 |
|
}, |
|
{ |
|
"epoch": 1.0589970501474926, |
|
"grad_norm": 0.34048739075660706, |
|
"learning_rate": 0.0002710843373493976, |
|
"loss": 0.9173, |
|
"step": 90 |
|
}, |
|
{ |
|
"epoch": 1.176991150442478, |
|
"grad_norm": 0.29820743203163147, |
|
"learning_rate": 0.00026746987951807225, |
|
"loss": 0.8913, |
|
"step": 100 |
|
}, |
|
{ |
|
"epoch": 1.176991150442478, |
|
"eval_loss": 0.9288875460624695, |
|
"eval_runtime": 20.3929, |
|
"eval_samples_per_second": 8.042, |
|
"eval_steps_per_second": 0.687, |
|
"step": 100 |
|
}, |
|
{ |
|
"epoch": 1.294985250737463, |
|
"grad_norm": 0.31813672184944153, |
|
"learning_rate": 0.00026385542168674695, |
|
"loss": 0.8801, |
|
"step": 110 |
|
}, |
|
{ |
|
"epoch": 1.4129793510324484, |
|
"grad_norm": 0.2814970016479492, |
|
"learning_rate": 0.00026024096385542165, |
|
"loss": 0.875, |
|
"step": 120 |
|
}, |
|
{ |
|
"epoch": 1.5309734513274336, |
|
"grad_norm": 0.2815490663051605, |
|
"learning_rate": 0.00025662650602409636, |
|
"loss": 0.8642, |
|
"step": 130 |
|
}, |
|
{ |
|
"epoch": 1.648967551622419, |
|
"grad_norm": 0.2590789198875427, |
|
"learning_rate": 0.00025301204819277106, |
|
"loss": 0.8501, |
|
"step": 140 |
|
}, |
|
{ |
|
"epoch": 1.7669616519174043, |
|
"grad_norm": 0.25773778557777405, |
|
"learning_rate": 0.00024939759036144576, |
|
"loss": 0.8487, |
|
"step": 150 |
|
}, |
|
{ |
|
"epoch": 1.7669616519174043, |
|
"eval_loss": 0.8774459362030029, |
|
"eval_runtime": 20.4395, |
|
"eval_samples_per_second": 8.024, |
|
"eval_steps_per_second": 0.685, |
|
"step": 150 |
|
}, |
|
{ |
|
"epoch": 1.8849557522123894, |
|
"grad_norm": 0.2629424035549164, |
|
"learning_rate": 0.00024578313253012046, |
|
"loss": 0.8427, |
|
"step": 160 |
|
}, |
|
{ |
|
"epoch": 2.0, |
|
"grad_norm": 0.2865707278251648, |
|
"learning_rate": 0.00024216867469879517, |
|
"loss": 0.8258, |
|
"step": 170 |
|
}, |
|
{ |
|
"epoch": 2.117994100294985, |
|
"grad_norm": 0.27073633670806885, |
|
"learning_rate": 0.00023855421686746987, |
|
"loss": 0.7835, |
|
"step": 180 |
|
}, |
|
{ |
|
"epoch": 2.2359882005899703, |
|
"grad_norm": 0.2692751884460449, |
|
"learning_rate": 0.00023493975903614455, |
|
"loss": 0.7804, |
|
"step": 190 |
|
}, |
|
{ |
|
"epoch": 2.353982300884956, |
|
"grad_norm": 0.2548248767852783, |
|
"learning_rate": 0.00023132530120481928, |
|
"loss": 0.7756, |
|
"step": 200 |
|
}, |
|
{ |
|
"epoch": 2.353982300884956, |
|
"eval_loss": 0.8462932109832764, |
|
"eval_runtime": 20.4134, |
|
"eval_samples_per_second": 8.034, |
|
"eval_steps_per_second": 0.686, |
|
"step": 200 |
|
}, |
|
{ |
|
"epoch": 2.471976401179941, |
|
"grad_norm": 0.21938325464725494, |
|
"learning_rate": 0.00022771084337349395, |
|
"loss": 0.7695, |
|
"step": 210 |
|
}, |
|
{ |
|
"epoch": 2.589970501474926, |
|
"grad_norm": 0.23754891753196716, |
|
"learning_rate": 0.00022409638554216866, |
|
"loss": 0.7771, |
|
"step": 220 |
|
}, |
|
{ |
|
"epoch": 2.7079646017699117, |
|
"grad_norm": 0.2382248044013977, |
|
"learning_rate": 0.00022048192771084336, |
|
"loss": 0.7672, |
|
"step": 230 |
|
}, |
|
{ |
|
"epoch": 2.825958702064897, |
|
"grad_norm": 0.2536454200744629, |
|
"learning_rate": 0.00021686746987951806, |
|
"loss": 0.7645, |
|
"step": 240 |
|
}, |
|
{ |
|
"epoch": 2.943952802359882, |
|
"grad_norm": 0.21255475282669067, |
|
"learning_rate": 0.00021325301204819274, |
|
"loss": 0.7667, |
|
"step": 250 |
|
}, |
|
{ |
|
"epoch": 2.943952802359882, |
|
"eval_loss": 0.8220995664596558, |
|
"eval_runtime": 20.3922, |
|
"eval_samples_per_second": 8.042, |
|
"eval_steps_per_second": 0.687, |
|
"step": 250 |
|
}, |
|
{ |
|
"epoch": 3.0589970501474926, |
|
"grad_norm": 0.2579372227191925, |
|
"learning_rate": 0.00020963855421686747, |
|
"loss": 0.7418, |
|
"step": 260 |
|
}, |
|
{ |
|
"epoch": 3.1769911504424777, |
|
"grad_norm": 0.2223750501871109, |
|
"learning_rate": 0.00020602409638554214, |
|
"loss": 0.7198, |
|
"step": 270 |
|
}, |
|
{ |
|
"epoch": 3.2949852507374633, |
|
"grad_norm": 0.26154494285583496, |
|
"learning_rate": 0.00020240963855421685, |
|
"loss": 0.7188, |
|
"step": 280 |
|
}, |
|
{ |
|
"epoch": 3.4129793510324484, |
|
"grad_norm": 0.2428540140390396, |
|
"learning_rate": 0.00019879518072289155, |
|
"loss": 0.7158, |
|
"step": 290 |
|
}, |
|
{ |
|
"epoch": 3.5309734513274336, |
|
"grad_norm": 0.2056645303964615, |
|
"learning_rate": 0.00019518072289156625, |
|
"loss": 0.7103, |
|
"step": 300 |
|
}, |
|
{ |
|
"epoch": 3.5309734513274336, |
|
"eval_loss": 0.8080394864082336, |
|
"eval_runtime": 20.4084, |
|
"eval_samples_per_second": 8.036, |
|
"eval_steps_per_second": 0.686, |
|
"step": 300 |
|
}, |
|
{ |
|
"epoch": 3.6489675516224187, |
|
"grad_norm": 0.21578620374202728, |
|
"learning_rate": 0.00019156626506024093, |
|
"loss": 0.7133, |
|
"step": 310 |
|
}, |
|
{ |
|
"epoch": 3.7669616519174043, |
|
"grad_norm": 0.21399493515491486, |
|
"learning_rate": 0.00018795180722891566, |
|
"loss": 0.7146, |
|
"step": 320 |
|
}, |
|
{ |
|
"epoch": 3.8849557522123894, |
|
"grad_norm": 0.22075815498828888, |
|
"learning_rate": 0.00018433734939759034, |
|
"loss": 0.7183, |
|
"step": 330 |
|
}, |
|
{ |
|
"epoch": 4.0, |
|
"grad_norm": 0.2605201005935669, |
|
"learning_rate": 0.00018072289156626507, |
|
"loss": 0.7142, |
|
"step": 340 |
|
}, |
|
{ |
|
"epoch": 4.117994100294985, |
|
"grad_norm": 0.21863675117492676, |
|
"learning_rate": 0.00017710843373493974, |
|
"loss": 0.6761, |
|
"step": 350 |
|
}, |
|
{ |
|
"epoch": 4.117994100294985, |
|
"eval_loss": 0.8006919026374817, |
|
"eval_runtime": 20.379, |
|
"eval_samples_per_second": 8.047, |
|
"eval_steps_per_second": 0.687, |
|
"step": 350 |
|
}, |
|
{ |
|
"epoch": 4.23598820058997, |
|
"grad_norm": 0.19717003405094147, |
|
"learning_rate": 0.00017349397590361444, |
|
"loss": 0.6741, |
|
"step": 360 |
|
}, |
|
{ |
|
"epoch": 4.353982300884955, |
|
"grad_norm": 0.21431218087673187, |
|
"learning_rate": 0.00016987951807228915, |
|
"loss": 0.6767, |
|
"step": 370 |
|
}, |
|
{ |
|
"epoch": 4.4719764011799406, |
|
"grad_norm": 0.21559002995491028, |
|
"learning_rate": 0.00016626506024096385, |
|
"loss": 0.6775, |
|
"step": 380 |
|
}, |
|
{ |
|
"epoch": 4.589970501474927, |
|
"grad_norm": 0.22161749005317688, |
|
"learning_rate": 0.00016265060240963853, |
|
"loss": 0.6754, |
|
"step": 390 |
|
}, |
|
{ |
|
"epoch": 4.707964601769912, |
|
"grad_norm": 0.23757950961589813, |
|
"learning_rate": 0.00015903614457831326, |
|
"loss": 0.6718, |
|
"step": 400 |
|
}, |
|
{ |
|
"epoch": 4.707964601769912, |
|
"eval_loss": 0.7892524003982544, |
|
"eval_runtime": 20.3783, |
|
"eval_samples_per_second": 8.048, |
|
"eval_steps_per_second": 0.687, |
|
"step": 400 |
|
}, |
|
{ |
|
"epoch": 4.825958702064897, |
|
"grad_norm": 0.2082790583372116, |
|
"learning_rate": 0.00015542168674698793, |
|
"loss": 0.6774, |
|
"step": 410 |
|
}, |
|
{ |
|
"epoch": 4.943952802359882, |
|
"grad_norm": 0.22040750086307526, |
|
"learning_rate": 0.00015180722891566264, |
|
"loss": 0.6708, |
|
"step": 420 |
|
}, |
|
{ |
|
"epoch": 5.058997050147493, |
|
"grad_norm": 0.20725864171981812, |
|
"learning_rate": 0.00014819277108433734, |
|
"loss": 0.6561, |
|
"step": 430 |
|
}, |
|
{ |
|
"epoch": 5.176991150442478, |
|
"grad_norm": 0.20603080093860626, |
|
"learning_rate": 0.00014457831325301204, |
|
"loss": 0.642, |
|
"step": 440 |
|
}, |
|
{ |
|
"epoch": 5.294985250737463, |
|
"grad_norm": 0.21384873986244202, |
|
"learning_rate": 0.00014096385542168674, |
|
"loss": 0.6424, |
|
"step": 450 |
|
}, |
|
{ |
|
"epoch": 5.294985250737463, |
|
"eval_loss": 0.7870513200759888, |
|
"eval_runtime": 20.3804, |
|
"eval_samples_per_second": 8.047, |
|
"eval_steps_per_second": 0.687, |
|
"step": 450 |
|
}, |
|
{ |
|
"epoch": 5.412979351032448, |
|
"grad_norm": 0.20905406773090363, |
|
"learning_rate": 0.00013734939759036142, |
|
"loss": 0.6388, |
|
"step": 460 |
|
}, |
|
{ |
|
"epoch": 5.530973451327434, |
|
"grad_norm": 0.20094947516918182, |
|
"learning_rate": 0.00013373493975903612, |
|
"loss": 0.643, |
|
"step": 470 |
|
}, |
|
{ |
|
"epoch": 5.648967551622419, |
|
"grad_norm": 0.19701030850410461, |
|
"learning_rate": 0.00013012048192771083, |
|
"loss": 0.639, |
|
"step": 480 |
|
}, |
|
{ |
|
"epoch": 5.766961651917404, |
|
"grad_norm": 0.2031456083059311, |
|
"learning_rate": 0.00012650602409638553, |
|
"loss": 0.6417, |
|
"step": 490 |
|
}, |
|
{ |
|
"epoch": 5.88495575221239, |
|
"grad_norm": 0.20272108912467957, |
|
"learning_rate": 0.00012289156626506023, |
|
"loss": 0.6374, |
|
"step": 500 |
|
}, |
|
{ |
|
"epoch": 5.88495575221239, |
|
"eval_loss": 0.7786917686462402, |
|
"eval_runtime": 20.3582, |
|
"eval_samples_per_second": 8.056, |
|
"eval_steps_per_second": 0.688, |
|
"step": 500 |
|
}, |
|
{ |
|
"epoch": 6.0, |
|
"grad_norm": 0.2726292312145233, |
|
"learning_rate": 0.00011927710843373494, |
|
"loss": 0.6404, |
|
"step": 510 |
|
}, |
|
{ |
|
"epoch": 6.117994100294985, |
|
"grad_norm": 0.19304881989955902, |
|
"learning_rate": 0.00011566265060240964, |
|
"loss": 0.6101, |
|
"step": 520 |
|
}, |
|
{ |
|
"epoch": 6.23598820058997, |
|
"grad_norm": 0.18884815275669098, |
|
"learning_rate": 0.00011204819277108433, |
|
"loss": 0.6131, |
|
"step": 530 |
|
}, |
|
{ |
|
"epoch": 6.353982300884955, |
|
"grad_norm": 0.19081687927246094, |
|
"learning_rate": 0.00010843373493975903, |
|
"loss": 0.6119, |
|
"step": 540 |
|
}, |
|
{ |
|
"epoch": 6.4719764011799406, |
|
"grad_norm": 0.19593416154384613, |
|
"learning_rate": 0.00010481927710843373, |
|
"loss": 0.615, |
|
"step": 550 |
|
}, |
|
{ |
|
"epoch": 6.4719764011799406, |
|
"eval_loss": 0.7764475345611572, |
|
"eval_runtime": 20.3753, |
|
"eval_samples_per_second": 8.049, |
|
"eval_steps_per_second": 0.687, |
|
"step": 550 |
|
}, |
|
{ |
|
"epoch": 6.589970501474927, |
|
"grad_norm": 0.19110922515392303, |
|
"learning_rate": 0.00010120481927710842, |
|
"loss": 0.6135, |
|
"step": 560 |
|
}, |
|
{ |
|
"epoch": 6.707964601769912, |
|
"grad_norm": 0.19444629549980164, |
|
"learning_rate": 9.759036144578313e-05, |
|
"loss": 0.6128, |
|
"step": 570 |
|
}, |
|
{ |
|
"epoch": 6.825958702064897, |
|
"grad_norm": 0.19284790754318237, |
|
"learning_rate": 9.397590361445783e-05, |
|
"loss": 0.6126, |
|
"step": 580 |
|
}, |
|
{ |
|
"epoch": 6.943952802359882, |
|
"grad_norm": 0.19341598451137543, |
|
"learning_rate": 9.036144578313253e-05, |
|
"loss": 0.6136, |
|
"step": 590 |
|
}, |
|
{ |
|
"epoch": 7.058997050147493, |
|
"grad_norm": 0.18730440735816956, |
|
"learning_rate": 8.674698795180722e-05, |
|
"loss": 0.5993, |
|
"step": 600 |
|
}, |
|
{ |
|
"epoch": 7.058997050147493, |
|
"eval_loss": 0.7759349942207336, |
|
"eval_runtime": 20.3649, |
|
"eval_samples_per_second": 8.053, |
|
"eval_steps_per_second": 0.687, |
|
"step": 600 |
|
} |
|
], |
|
"logging_steps": 10, |
|
"max_steps": 840, |
|
"num_input_tokens_seen": 0, |
|
"num_train_epochs": 10, |
|
"save_steps": 100, |
|
"stateful_callbacks": { |
|
"TrainerControl": { |
|
"args": { |
|
"should_epoch_stop": false, |
|
"should_evaluate": false, |
|
"should_log": false, |
|
"should_save": true, |
|
"should_training_stop": false |
|
}, |
|
"attributes": {} |
|
} |
|
}, |
|
"total_flos": 1.0863014113211056e+19, |
|
"train_batch_size": 4, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|