{ "best_metric": null, "best_model_checkpoint": null, "epoch": 2.92436974789916, "eval_steps": 8, "global_step": 87, "is_hyper_param_search": false, "is_local_process_zero": true, "is_world_process_zero": true, "log_history": [ { "epoch": 0.03361344537815126, "grad_norm": 1.0234375, "learning_rate": 2e-05, "loss": 2.6905, "step": 1 }, { "epoch": 0.03361344537815126, "eval_loss": 2.729154109954834, "eval_runtime": 20.6566, "eval_samples_per_second": 2.421, "eval_steps_per_second": 0.339, "step": 1 }, { "epoch": 0.06722689075630252, "grad_norm": 1.0390625, "learning_rate": 4e-05, "loss": 2.687, "step": 2 }, { "epoch": 0.10084033613445378, "grad_norm": 1.1171875, "learning_rate": 6e-05, "loss": 2.8062, "step": 3 }, { "epoch": 0.13445378151260504, "grad_norm": 1.03125, "learning_rate": 8e-05, "loss": 2.6619, "step": 4 }, { "epoch": 0.16806722689075632, "grad_norm": 1.0, "learning_rate": 0.0001, "loss": 2.5939, "step": 5 }, { "epoch": 0.20168067226890757, "grad_norm": 0.95703125, "learning_rate": 0.00012, "loss": 2.6307, "step": 6 }, { "epoch": 0.23529411764705882, "grad_norm": 0.86328125, "learning_rate": 0.00014, "loss": 2.5751, "step": 7 }, { "epoch": 0.2689075630252101, "grad_norm": 0.82421875, "learning_rate": 0.00016, "loss": 2.4725, "step": 8 }, { "epoch": 0.2689075630252101, "eval_loss": 2.3971643447875977, "eval_runtime": 20.4746, "eval_samples_per_second": 2.442, "eval_steps_per_second": 0.342, "step": 8 }, { "epoch": 0.3025210084033613, "grad_norm": 0.6953125, "learning_rate": 0.00018, "loss": 2.2905, "step": 9 }, { "epoch": 0.33613445378151263, "grad_norm": 0.6015625, "learning_rate": 0.0002, "loss": 2.3883, "step": 10 }, { "epoch": 0.3697478991596639, "grad_norm": 0.53125, "learning_rate": 0.00019995608365087946, "loss": 2.3071, "step": 11 }, { "epoch": 0.40336134453781514, "grad_norm": 0.46484375, "learning_rate": 0.00019982437317643217, "loss": 2.0817, "step": 12 }, { "epoch": 0.4369747899159664, "grad_norm": 0.56640625, "learning_rate": 0.0001996049842615217, "loss": 2.0883, "step": 13 }, { "epoch": 0.47058823529411764, "grad_norm": 0.5, "learning_rate": 0.00019929810960135172, "loss": 2.1018, "step": 14 }, { "epoch": 0.5042016806722689, "grad_norm": 0.5390625, "learning_rate": 0.0001989040187322164, "loss": 2.1015, "step": 15 }, { "epoch": 0.5378151260504201, "grad_norm": 0.51953125, "learning_rate": 0.00019842305779475968, "loss": 1.9891, "step": 16 }, { "epoch": 0.5378151260504201, "eval_loss": 2.071833610534668, "eval_runtime": 20.5312, "eval_samples_per_second": 2.435, "eval_steps_per_second": 0.341, "step": 16 }, { "epoch": 0.5714285714285714, "grad_norm": 0.4921875, "learning_rate": 0.0001978556492299504, "loss": 2.1245, "step": 17 }, { "epoch": 0.6050420168067226, "grad_norm": 0.46875, "learning_rate": 0.0001972022914080411, "loss": 1.9485, "step": 18 }, { "epoch": 0.6386554621848739, "grad_norm": 0.4140625, "learning_rate": 0.00019646355819083589, "loss": 2.0621, "step": 19 }, { "epoch": 0.6722689075630253, "grad_norm": 0.41015625, "learning_rate": 0.00019564009842765225, "loss": 1.9076, "step": 20 }, { "epoch": 0.7058823529411765, "grad_norm": 0.4375, "learning_rate": 0.00019473263538541914, "loss": 1.8968, "step": 21 }, { "epoch": 0.7394957983193278, "grad_norm": 0.326171875, "learning_rate": 0.0001937419661134121, "loss": 1.8977, "step": 22 }, { "epoch": 0.773109243697479, "grad_norm": 0.314453125, "learning_rate": 0.00019266896074318334, "loss": 1.9926, "step": 23 }, { "epoch": 0.8067226890756303, "grad_norm": 0.27734375, "learning_rate": 0.00019151456172430183, "loss": 1.8345, "step": 24 }, { "epoch": 0.8067226890756303, "eval_loss": 1.932895302772522, "eval_runtime": 20.5148, "eval_samples_per_second": 2.437, "eval_steps_per_second": 0.341, "step": 24 }, { "epoch": 0.8403361344537815, "grad_norm": 0.291015625, "learning_rate": 0.00019027978299657436, "loss": 1.8168, "step": 25 }, { "epoch": 0.8739495798319328, "grad_norm": 0.29296875, "learning_rate": 0.00018896570909947475, "loss": 1.8794, "step": 26 }, { "epoch": 0.907563025210084, "grad_norm": 0.306640625, "learning_rate": 0.0001875734942195637, "loss": 1.9604, "step": 27 }, { "epoch": 0.9411764705882353, "grad_norm": 0.33984375, "learning_rate": 0.00018610436117673555, "loss": 1.824, "step": 28 }, { "epoch": 0.9747899159663865, "grad_norm": 0.365234375, "learning_rate": 0.0001845596003501826, "loss": 1.9186, "step": 29 }, { "epoch": 1.0084033613445378, "grad_norm": 0.40625, "learning_rate": 0.0001829405685450202, "loss": 1.9281, "step": 30 }, { "epoch": 1.0420168067226891, "grad_norm": 0.396484375, "learning_rate": 0.00018124868780056814, "loss": 1.833, "step": 31 }, { "epoch": 1.0756302521008403, "grad_norm": 0.28515625, "learning_rate": 0.00017948544414133534, "loss": 1.8088, "step": 32 }, { "epoch": 1.0756302521008403, "eval_loss": 1.8729643821716309, "eval_runtime": 20.5377, "eval_samples_per_second": 2.435, "eval_steps_per_second": 0.341, "step": 32 }, { "epoch": 1.1092436974789917, "grad_norm": 0.29296875, "learning_rate": 0.00017765238627180424, "loss": 1.8401, "step": 33 }, { "epoch": 1.1428571428571428, "grad_norm": 0.2451171875, "learning_rate": 0.00017575112421616202, "loss": 1.7736, "step": 34 }, { "epoch": 1.1764705882352942, "grad_norm": 0.2099609375, "learning_rate": 0.00017378332790417273, "loss": 1.7875, "step": 35 }, { "epoch": 1.2100840336134453, "grad_norm": 0.1767578125, "learning_rate": 0.00017175072570443312, "loss": 1.7743, "step": 36 }, { "epoch": 1.2436974789915967, "grad_norm": 0.1982421875, "learning_rate": 0.00016965510290629972, "loss": 1.8167, "step": 37 }, { "epoch": 1.2773109243697478, "grad_norm": 0.2119140625, "learning_rate": 0.00016749830015182107, "loss": 1.8317, "step": 38 }, { "epoch": 1.3109243697478992, "grad_norm": 0.1796875, "learning_rate": 0.00016528221181905217, "loss": 1.7498, "step": 39 }, { "epoch": 1.3445378151260505, "grad_norm": 0.197265625, "learning_rate": 0.00016300878435817113, "loss": 1.8183, "step": 40 }, { "epoch": 1.3445378151260505, "eval_loss": 1.8429957628250122, "eval_runtime": 20.5619, "eval_samples_per_second": 2.432, "eval_steps_per_second": 0.34, "step": 40 }, { "epoch": 1.3781512605042017, "grad_norm": 0.193359375, "learning_rate": 0.00016068001458185936, "loss": 1.7504, "step": 41 }, { "epoch": 1.4117647058823528, "grad_norm": 0.181640625, "learning_rate": 0.0001582979479114472, "loss": 1.7476, "step": 42 }, { "epoch": 1.4453781512605042, "grad_norm": 0.1787109375, "learning_rate": 0.00015586467658036524, "loss": 1.8082, "step": 43 }, { "epoch": 1.4789915966386555, "grad_norm": 0.1669921875, "learning_rate": 0.0001533823377964791, "loss": 1.6987, "step": 44 }, { "epoch": 1.5126050420168067, "grad_norm": 0.1728515625, "learning_rate": 0.00015085311186492206, "loss": 1.7417, "step": 45 }, { "epoch": 1.5462184873949578, "grad_norm": 0.1826171875, "learning_rate": 0.00014827922027307451, "loss": 1.779, "step": 46 }, { "epoch": 1.5798319327731094, "grad_norm": 0.1826171875, "learning_rate": 0.0001456629237393713, "loss": 1.7552, "step": 47 }, { "epoch": 1.6134453781512605, "grad_norm": 0.1767578125, "learning_rate": 0.00014300652022765207, "loss": 1.8004, "step": 48 }, { "epoch": 1.6134453781512605, "eval_loss": 1.8263332843780518, "eval_runtime": 20.4305, "eval_samples_per_second": 2.447, "eval_steps_per_second": 0.343, "step": 48 }, { "epoch": 1.6470588235294117, "grad_norm": 0.19140625, "learning_rate": 0.00014031234292879725, "loss": 1.7857, "step": 49 }, { "epoch": 1.680672268907563, "grad_norm": 0.181640625, "learning_rate": 0.00013758275821142382, "loss": 1.7193, "step": 50 }, { "epoch": 1.7142857142857144, "grad_norm": 0.1611328125, "learning_rate": 0.0001348201635434399, "loss": 1.7734, "step": 51 }, { "epoch": 1.7478991596638656, "grad_norm": 0.169921875, "learning_rate": 0.00013202698538628376, "loss": 1.7644, "step": 52 }, { "epoch": 1.7815126050420167, "grad_norm": 0.171875, "learning_rate": 0.00012920567706369758, "loss": 1.8631, "step": 53 }, { "epoch": 1.815126050420168, "grad_norm": 0.1845703125, "learning_rate": 0.00012635871660690676, "loss": 1.7735, "step": 54 }, { "epoch": 1.8487394957983194, "grad_norm": 0.1689453125, "learning_rate": 0.00012348860457809838, "loss": 1.7956, "step": 55 }, { "epoch": 1.8823529411764706, "grad_norm": 0.1708984375, "learning_rate": 0.00012059786187410984, "loss": 1.7674, "step": 56 }, { "epoch": 1.8823529411764706, "eval_loss": 1.8166663646697998, "eval_runtime": 20.5178, "eval_samples_per_second": 2.437, "eval_steps_per_second": 0.341, "step": 56 }, { "epoch": 1.9159663865546217, "grad_norm": 0.169921875, "learning_rate": 0.0001176890275122573, "loss": 1.7228, "step": 57 }, { "epoch": 1.949579831932773, "grad_norm": 0.169921875, "learning_rate": 0.00011476465640024814, "loss": 1.7496, "step": 58 }, { "epoch": 1.9831932773109244, "grad_norm": 0.173828125, "learning_rate": 0.00011182731709213659, "loss": 1.6776, "step": 59 }, { "epoch": 2.0168067226890756, "grad_norm": 0.171875, "learning_rate": 0.00010887958953229349, "loss": 1.7346, "step": 60 }, { "epoch": 2.0504201680672267, "grad_norm": 0.1669921875, "learning_rate": 0.00010592406278937144, "loss": 1.8371, "step": 61 }, { "epoch": 2.0840336134453783, "grad_norm": 0.158203125, "learning_rate": 0.00010296333278225599, "loss": 1.7113, "step": 62 }, { "epoch": 2.1176470588235294, "grad_norm": 0.14453125, "learning_rate": 0.0001, "loss": 1.7299, "step": 63 }, { "epoch": 2.1512605042016806, "grad_norm": 0.154296875, "learning_rate": 9.703666721774402e-05, "loss": 1.7164, "step": 64 }, { "epoch": 2.1512605042016806, "eval_loss": 1.810401439666748, "eval_runtime": 20.5114, "eval_samples_per_second": 2.438, "eval_steps_per_second": 0.341, "step": 64 }, { "epoch": 2.184873949579832, "grad_norm": 0.1572265625, "learning_rate": 9.407593721062859e-05, "loss": 1.746, "step": 65 }, { "epoch": 2.2184873949579833, "grad_norm": 0.169921875, "learning_rate": 9.112041046770653e-05, "loss": 1.7507, "step": 66 }, { "epoch": 2.2521008403361344, "grad_norm": 0.158203125, "learning_rate": 8.817268290786343e-05, "loss": 1.6836, "step": 67 }, { "epoch": 2.2857142857142856, "grad_norm": 0.154296875, "learning_rate": 8.523534359975189e-05, "loss": 1.7432, "step": 68 }, { "epoch": 2.3193277310924367, "grad_norm": 0.16796875, "learning_rate": 8.231097248774274e-05, "loss": 1.7734, "step": 69 }, { "epoch": 2.3529411764705883, "grad_norm": 0.1591796875, "learning_rate": 7.940213812589018e-05, "loss": 1.6683, "step": 70 }, { "epoch": 2.3865546218487395, "grad_norm": 0.154296875, "learning_rate": 7.651139542190164e-05, "loss": 1.6743, "step": 71 }, { "epoch": 2.4201680672268906, "grad_norm": 0.1708984375, "learning_rate": 7.364128339309326e-05, "loss": 1.6525, "step": 72 }, { "epoch": 2.4201680672268906, "eval_loss": 1.806851863861084, "eval_runtime": 20.4653, "eval_samples_per_second": 2.443, "eval_steps_per_second": 0.342, "step": 72 }, { "epoch": 2.453781512605042, "grad_norm": 0.16015625, "learning_rate": 7.079432293630244e-05, "loss": 1.708, "step": 73 }, { "epoch": 2.4873949579831933, "grad_norm": 0.16015625, "learning_rate": 6.797301461371625e-05, "loss": 1.6694, "step": 74 }, { "epoch": 2.5210084033613445, "grad_norm": 0.162109375, "learning_rate": 6.517983645656014e-05, "loss": 1.5762, "step": 75 }, { "epoch": 2.5546218487394956, "grad_norm": 0.1640625, "learning_rate": 6.24172417885762e-05, "loss": 1.7879, "step": 76 }, { "epoch": 2.588235294117647, "grad_norm": 0.1591796875, "learning_rate": 5.96876570712028e-05, "loss": 1.6566, "step": 77 }, { "epoch": 2.6218487394957983, "grad_norm": 0.16015625, "learning_rate": 5.699347977234799e-05, "loss": 1.686, "step": 78 }, { "epoch": 2.6554621848739495, "grad_norm": 0.158203125, "learning_rate": 5.43370762606287e-05, "loss": 1.7537, "step": 79 }, { "epoch": 2.689075630252101, "grad_norm": 0.162109375, "learning_rate": 5.172077972692553e-05, "loss": 1.7917, "step": 80 }, { "epoch": 2.689075630252101, "eval_loss": 1.805298924446106, "eval_runtime": 20.4745, "eval_samples_per_second": 2.442, "eval_steps_per_second": 0.342, "step": 80 }, { "epoch": 2.722689075630252, "grad_norm": 0.166015625, "learning_rate": 4.914688813507797e-05, "loss": 1.7791, "step": 81 }, { "epoch": 2.7563025210084033, "grad_norm": 0.1748046875, "learning_rate": 4.661766220352097e-05, "loss": 1.7805, "step": 82 }, { "epoch": 2.7899159663865545, "grad_norm": 0.1630859375, "learning_rate": 4.4135323419634766e-05, "loss": 1.6505, "step": 83 }, { "epoch": 2.8235294117647056, "grad_norm": 0.15625, "learning_rate": 4.170205208855281e-05, "loss": 1.6896, "step": 84 }, { "epoch": 2.857142857142857, "grad_norm": 0.1630859375, "learning_rate": 3.931998541814069e-05, "loss": 1.6374, "step": 85 }, { "epoch": 2.8907563025210083, "grad_norm": 0.1689453125, "learning_rate": 3.69912156418289e-05, "loss": 1.7444, "step": 86 }, { "epoch": 2.92436974789916, "grad_norm": 0.1494140625, "learning_rate": 3.471778818094785e-05, "loss": 1.7065, "step": 87 } ], "logging_steps": 1, "max_steps": 116, "num_input_tokens_seen": 0, "num_train_epochs": 4, "save_steps": 29, "stateful_callbacks": { "TrainerControl": { "args": { "should_epoch_stop": false, "should_evaluate": false, "should_log": false, "should_save": true, "should_training_stop": false }, "attributes": {} } }, "total_flos": 9933851341357056.0, "train_batch_size": 8, "trial_name": null, "trial_params": null }