|
{ |
|
"best_metric": 1.1396163702011108, |
|
"best_model_checkpoint": "miner_id_24/checkpoint-50", |
|
"epoch": 0.15273004963726614, |
|
"eval_steps": 25, |
|
"global_step": 50, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 0.0030546009927453228, |
|
"grad_norm": 411.8155212402344, |
|
"learning_rate": 5e-05, |
|
"loss": 72.0418, |
|
"step": 1 |
|
}, |
|
{ |
|
"epoch": 0.0030546009927453228, |
|
"eval_loss": 5.549919128417969, |
|
"eval_runtime": 56.6085, |
|
"eval_samples_per_second": 38.969, |
|
"eval_steps_per_second": 4.876, |
|
"step": 1 |
|
}, |
|
{ |
|
"epoch": 0.0061092019854906456, |
|
"grad_norm": 517.7806396484375, |
|
"learning_rate": 0.0001, |
|
"loss": 77.2635, |
|
"step": 2 |
|
}, |
|
{ |
|
"epoch": 0.009163802978235968, |
|
"grad_norm": 375.3448181152344, |
|
"learning_rate": 9.989294616193017e-05, |
|
"loss": 51.8792, |
|
"step": 3 |
|
}, |
|
{ |
|
"epoch": 0.012218403970981291, |
|
"grad_norm": 1181.99462890625, |
|
"learning_rate": 9.957224306869053e-05, |
|
"loss": 76.3616, |
|
"step": 4 |
|
}, |
|
{ |
|
"epoch": 0.015273004963726614, |
|
"grad_norm": 512.3447875976562, |
|
"learning_rate": 9.903926402016153e-05, |
|
"loss": 40.4179, |
|
"step": 5 |
|
}, |
|
{ |
|
"epoch": 0.018327605956471937, |
|
"grad_norm": 190.83563232421875, |
|
"learning_rate": 9.829629131445342e-05, |
|
"loss": 34.7413, |
|
"step": 6 |
|
}, |
|
{ |
|
"epoch": 0.021382206949217258, |
|
"grad_norm": 295.7945861816406, |
|
"learning_rate": 9.73465064747553e-05, |
|
"loss": 32.2792, |
|
"step": 7 |
|
}, |
|
{ |
|
"epoch": 0.024436807941962582, |
|
"grad_norm": 154.5236053466797, |
|
"learning_rate": 9.619397662556435e-05, |
|
"loss": 28.1299, |
|
"step": 8 |
|
}, |
|
{ |
|
"epoch": 0.027491408934707903, |
|
"grad_norm": 179.23910522460938, |
|
"learning_rate": 9.484363707663442e-05, |
|
"loss": 27.647, |
|
"step": 9 |
|
}, |
|
{ |
|
"epoch": 0.030546009927453228, |
|
"grad_norm": 154.86285400390625, |
|
"learning_rate": 9.330127018922194e-05, |
|
"loss": 29.4737, |
|
"step": 10 |
|
}, |
|
{ |
|
"epoch": 0.03360061092019855, |
|
"grad_norm": 205.29489135742188, |
|
"learning_rate": 9.157348061512727e-05, |
|
"loss": 31.0466, |
|
"step": 11 |
|
}, |
|
{ |
|
"epoch": 0.03665521191294387, |
|
"grad_norm": 173.7435760498047, |
|
"learning_rate": 8.966766701456177e-05, |
|
"loss": 32.8035, |
|
"step": 12 |
|
}, |
|
{ |
|
"epoch": 0.039709812905689194, |
|
"grad_norm": 177.4287567138672, |
|
"learning_rate": 8.759199037394887e-05, |
|
"loss": 35.287, |
|
"step": 13 |
|
}, |
|
{ |
|
"epoch": 0.042764413898434515, |
|
"grad_norm": 132.8975372314453, |
|
"learning_rate": 8.535533905932738e-05, |
|
"loss": 28.0127, |
|
"step": 14 |
|
}, |
|
{ |
|
"epoch": 0.045819014891179836, |
|
"grad_norm": 100.02606201171875, |
|
"learning_rate": 8.296729075500344e-05, |
|
"loss": 24.2048, |
|
"step": 15 |
|
}, |
|
{ |
|
"epoch": 0.048873615883925164, |
|
"grad_norm": 144.72496032714844, |
|
"learning_rate": 8.043807145043604e-05, |
|
"loss": 24.6628, |
|
"step": 16 |
|
}, |
|
{ |
|
"epoch": 0.051928216876670485, |
|
"grad_norm": 99.14219665527344, |
|
"learning_rate": 7.777851165098012e-05, |
|
"loss": 22.3685, |
|
"step": 17 |
|
}, |
|
{ |
|
"epoch": 0.054982817869415807, |
|
"grad_norm": 91.8460922241211, |
|
"learning_rate": 7.500000000000001e-05, |
|
"loss": 20.5469, |
|
"step": 18 |
|
}, |
|
{ |
|
"epoch": 0.05803741886216113, |
|
"grad_norm": 91.75062561035156, |
|
"learning_rate": 7.211443451095007e-05, |
|
"loss": 20.878, |
|
"step": 19 |
|
}, |
|
{ |
|
"epoch": 0.061092019854906456, |
|
"grad_norm": 87.56897735595703, |
|
"learning_rate": 6.91341716182545e-05, |
|
"loss": 20.1802, |
|
"step": 20 |
|
}, |
|
{ |
|
"epoch": 0.06414662084765177, |
|
"grad_norm": 111.00213623046875, |
|
"learning_rate": 6.607197326515808e-05, |
|
"loss": 23.1227, |
|
"step": 21 |
|
}, |
|
{ |
|
"epoch": 0.0672012218403971, |
|
"grad_norm": 112.78516387939453, |
|
"learning_rate": 6.294095225512603e-05, |
|
"loss": 23.5927, |
|
"step": 22 |
|
}, |
|
{ |
|
"epoch": 0.07025582283314243, |
|
"grad_norm": 120.71290588378906, |
|
"learning_rate": 5.9754516100806423e-05, |
|
"loss": 24.7728, |
|
"step": 23 |
|
}, |
|
{ |
|
"epoch": 0.07331042382588775, |
|
"grad_norm": 110.4635009765625, |
|
"learning_rate": 5.6526309611002594e-05, |
|
"loss": 23.4427, |
|
"step": 24 |
|
}, |
|
{ |
|
"epoch": 0.07636502481863307, |
|
"grad_norm": 152.16371154785156, |
|
"learning_rate": 5.327015646150716e-05, |
|
"loss": 30.3582, |
|
"step": 25 |
|
}, |
|
{ |
|
"epoch": 0.07636502481863307, |
|
"eval_loss": 1.358454704284668, |
|
"eval_runtime": 56.6567, |
|
"eval_samples_per_second": 38.936, |
|
"eval_steps_per_second": 4.871, |
|
"step": 25 |
|
}, |
|
{ |
|
"epoch": 0.07941962581137839, |
|
"grad_norm": 102.45426177978516, |
|
"learning_rate": 5e-05, |
|
"loss": 22.9259, |
|
"step": 26 |
|
}, |
|
{ |
|
"epoch": 0.08247422680412371, |
|
"grad_norm": 90.15953826904297, |
|
"learning_rate": 4.6729843538492847e-05, |
|
"loss": 21.9522, |
|
"step": 27 |
|
}, |
|
{ |
|
"epoch": 0.08552882779686903, |
|
"grad_norm": 92.5013427734375, |
|
"learning_rate": 4.347369038899744e-05, |
|
"loss": 22.2064, |
|
"step": 28 |
|
}, |
|
{ |
|
"epoch": 0.08858342878961435, |
|
"grad_norm": 72.16334533691406, |
|
"learning_rate": 4.0245483899193595e-05, |
|
"loss": 18.7869, |
|
"step": 29 |
|
}, |
|
{ |
|
"epoch": 0.09163802978235967, |
|
"grad_norm": 81.92129516601562, |
|
"learning_rate": 3.705904774487396e-05, |
|
"loss": 15.8735, |
|
"step": 30 |
|
}, |
|
{ |
|
"epoch": 0.09469263077510501, |
|
"grad_norm": 101.18927764892578, |
|
"learning_rate": 3.392802673484193e-05, |
|
"loss": 19.6555, |
|
"step": 31 |
|
}, |
|
{ |
|
"epoch": 0.09774723176785033, |
|
"grad_norm": 81.10553741455078, |
|
"learning_rate": 3.086582838174551e-05, |
|
"loss": 19.2793, |
|
"step": 32 |
|
}, |
|
{ |
|
"epoch": 0.10080183276059565, |
|
"grad_norm": 72.74671936035156, |
|
"learning_rate": 2.7885565489049946e-05, |
|
"loss": 18.6, |
|
"step": 33 |
|
}, |
|
{ |
|
"epoch": 0.10385643375334097, |
|
"grad_norm": 86.57416534423828, |
|
"learning_rate": 2.500000000000001e-05, |
|
"loss": 18.3163, |
|
"step": 34 |
|
}, |
|
{ |
|
"epoch": 0.10691103474608629, |
|
"grad_norm": 91.92635345458984, |
|
"learning_rate": 2.2221488349019903e-05, |
|
"loss": 19.9557, |
|
"step": 35 |
|
}, |
|
{ |
|
"epoch": 0.10996563573883161, |
|
"grad_norm": 120.62100982666016, |
|
"learning_rate": 1.9561928549563968e-05, |
|
"loss": 20.4593, |
|
"step": 36 |
|
}, |
|
{ |
|
"epoch": 0.11302023673157693, |
|
"grad_norm": 102.29236602783203, |
|
"learning_rate": 1.703270924499656e-05, |
|
"loss": 21.8171, |
|
"step": 37 |
|
}, |
|
{ |
|
"epoch": 0.11607483772432226, |
|
"grad_norm": 94.89315032958984, |
|
"learning_rate": 1.4644660940672627e-05, |
|
"loss": 23.6319, |
|
"step": 38 |
|
}, |
|
{ |
|
"epoch": 0.11912943871706758, |
|
"grad_norm": 80.66189575195312, |
|
"learning_rate": 1.2408009626051137e-05, |
|
"loss": 18.8452, |
|
"step": 39 |
|
}, |
|
{ |
|
"epoch": 0.12218403970981291, |
|
"grad_norm": 89.5391616821289, |
|
"learning_rate": 1.0332332985438248e-05, |
|
"loss": 20.5874, |
|
"step": 40 |
|
}, |
|
{ |
|
"epoch": 0.12523864070255822, |
|
"grad_norm": 81.86558532714844, |
|
"learning_rate": 8.426519384872733e-06, |
|
"loss": 20.799, |
|
"step": 41 |
|
}, |
|
{ |
|
"epoch": 0.12829324169530354, |
|
"grad_norm": 89.75981140136719, |
|
"learning_rate": 6.698729810778065e-06, |
|
"loss": 19.7842, |
|
"step": 42 |
|
}, |
|
{ |
|
"epoch": 0.13134784268804886, |
|
"grad_norm": 89.26224517822266, |
|
"learning_rate": 5.156362923365588e-06, |
|
"loss": 17.2257, |
|
"step": 43 |
|
}, |
|
{ |
|
"epoch": 0.1344024436807942, |
|
"grad_norm": 84.56063842773438, |
|
"learning_rate": 3.8060233744356633e-06, |
|
"loss": 17.7931, |
|
"step": 44 |
|
}, |
|
{ |
|
"epoch": 0.13745704467353953, |
|
"grad_norm": 77.53307342529297, |
|
"learning_rate": 2.653493525244721e-06, |
|
"loss": 19.046, |
|
"step": 45 |
|
}, |
|
{ |
|
"epoch": 0.14051164566628485, |
|
"grad_norm": 71.67435455322266, |
|
"learning_rate": 1.70370868554659e-06, |
|
"loss": 16.1301, |
|
"step": 46 |
|
}, |
|
{ |
|
"epoch": 0.14356624665903017, |
|
"grad_norm": 72.81458282470703, |
|
"learning_rate": 9.607359798384785e-07, |
|
"loss": 17.1189, |
|
"step": 47 |
|
}, |
|
{ |
|
"epoch": 0.1466208476517755, |
|
"grad_norm": 105.62673950195312, |
|
"learning_rate": 4.277569313094809e-07, |
|
"loss": 17.1779, |
|
"step": 48 |
|
}, |
|
{ |
|
"epoch": 0.14967544864452081, |
|
"grad_norm": 95.08515167236328, |
|
"learning_rate": 1.0705383806982606e-07, |
|
"loss": 20.9969, |
|
"step": 49 |
|
}, |
|
{ |
|
"epoch": 0.15273004963726614, |
|
"grad_norm": 113.62215423583984, |
|
"learning_rate": 0.0, |
|
"loss": 27.1015, |
|
"step": 50 |
|
}, |
|
{ |
|
"epoch": 0.15273004963726614, |
|
"eval_loss": 1.1396163702011108, |
|
"eval_runtime": 56.656, |
|
"eval_samples_per_second": 38.937, |
|
"eval_steps_per_second": 4.872, |
|
"step": 50 |
|
} |
|
], |
|
"logging_steps": 1, |
|
"max_steps": 50, |
|
"num_input_tokens_seen": 0, |
|
"num_train_epochs": 1, |
|
"save_steps": 25, |
|
"stateful_callbacks": { |
|
"EarlyStoppingCallback": { |
|
"args": { |
|
"early_stopping_patience": 1, |
|
"early_stopping_threshold": 0.0 |
|
}, |
|
"attributes": { |
|
"early_stopping_patience_counter": 0 |
|
} |
|
}, |
|
"TrainerControl": { |
|
"args": { |
|
"should_epoch_stop": false, |
|
"should_evaluate": false, |
|
"should_log": false, |
|
"should_save": true, |
|
"should_training_stop": true |
|
}, |
|
"attributes": {} |
|
} |
|
}, |
|
"total_flos": 5.658124417499136e+17, |
|
"train_batch_size": 2, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|