|
{ |
|
"best_metric": null, |
|
"best_model_checkpoint": null, |
|
"epoch": 0.001, |
|
"eval_steps": 500, |
|
"global_step": 100, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 1e-05, |
|
"grad_norm": 1.4574347149106512, |
|
"learning_rate": 3e-06, |
|
"loss": 10.8576, |
|
"step": 1 |
|
}, |
|
{ |
|
"epoch": 2e-05, |
|
"grad_norm": 1.4482443114713344, |
|
"learning_rate": 6e-06, |
|
"loss": 10.8575, |
|
"step": 2 |
|
}, |
|
{ |
|
"epoch": 3e-05, |
|
"grad_norm": 1.4642003089092823, |
|
"learning_rate": 9e-06, |
|
"loss": 10.8576, |
|
"step": 3 |
|
}, |
|
{ |
|
"epoch": 4e-05, |
|
"grad_norm": 1.4485584072048276, |
|
"learning_rate": 1.2e-05, |
|
"loss": 10.8569, |
|
"step": 4 |
|
}, |
|
{ |
|
"epoch": 5e-05, |
|
"grad_norm": 1.4574321657516995, |
|
"learning_rate": 1.5e-05, |
|
"loss": 10.8527, |
|
"step": 5 |
|
}, |
|
{ |
|
"epoch": 6e-05, |
|
"grad_norm": 1.4585537094370684, |
|
"learning_rate": 1.8e-05, |
|
"loss": 10.8518, |
|
"step": 6 |
|
}, |
|
{ |
|
"epoch": 7e-05, |
|
"grad_norm": 1.4183780170798466, |
|
"learning_rate": 2.1000000000000002e-05, |
|
"loss": 10.8383, |
|
"step": 7 |
|
}, |
|
{ |
|
"epoch": 8e-05, |
|
"grad_norm": 1.286272643239374, |
|
"learning_rate": 2.4e-05, |
|
"loss": 10.8119, |
|
"step": 8 |
|
}, |
|
{ |
|
"epoch": 9e-05, |
|
"grad_norm": 1.246364249616181, |
|
"learning_rate": 2.7e-05, |
|
"loss": 10.8063, |
|
"step": 9 |
|
}, |
|
{ |
|
"epoch": 0.0001, |
|
"grad_norm": 1.2300728857390288, |
|
"learning_rate": 3e-05, |
|
"loss": 10.7913, |
|
"step": 10 |
|
}, |
|
{ |
|
"epoch": 0.00011, |
|
"grad_norm": 1.1639358472437353, |
|
"learning_rate": 3.2999999999999996e-05, |
|
"loss": 10.7756, |
|
"step": 11 |
|
}, |
|
{ |
|
"epoch": 0.00012, |
|
"grad_norm": 1.1455069336623074, |
|
"learning_rate": 3.6e-05, |
|
"loss": 10.7622, |
|
"step": 12 |
|
}, |
|
{ |
|
"epoch": 0.00013, |
|
"grad_norm": 1.104711768149659, |
|
"learning_rate": 3.9e-05, |
|
"loss": 10.7415, |
|
"step": 13 |
|
}, |
|
{ |
|
"epoch": 0.00014, |
|
"grad_norm": 1.0916409179063882, |
|
"learning_rate": 4.2000000000000004e-05, |
|
"loss": 10.7296, |
|
"step": 14 |
|
}, |
|
{ |
|
"epoch": 0.00015, |
|
"grad_norm": 1.0723152562842644, |
|
"learning_rate": 4.4999999999999996e-05, |
|
"loss": 10.7183, |
|
"step": 15 |
|
}, |
|
{ |
|
"epoch": 0.00016, |
|
"grad_norm": 1.0297043497697835, |
|
"learning_rate": 4.8e-05, |
|
"loss": 10.7, |
|
"step": 16 |
|
}, |
|
{ |
|
"epoch": 0.00017, |
|
"grad_norm": 0.9882210863590011, |
|
"learning_rate": 5.1000000000000006e-05, |
|
"loss": 10.6838, |
|
"step": 17 |
|
}, |
|
{ |
|
"epoch": 0.00018, |
|
"grad_norm": 0.966791538351231, |
|
"learning_rate": 5.4e-05, |
|
"loss": 10.6659, |
|
"step": 18 |
|
}, |
|
{ |
|
"epoch": 0.00019, |
|
"grad_norm": 0.9358769725124266, |
|
"learning_rate": 5.7e-05, |
|
"loss": 10.6509, |
|
"step": 19 |
|
}, |
|
{ |
|
"epoch": 0.0002, |
|
"grad_norm": 0.9313245306892226, |
|
"learning_rate": 6e-05, |
|
"loss": 10.638, |
|
"step": 20 |
|
}, |
|
{ |
|
"epoch": 0.00021, |
|
"grad_norm": 0.9056933112957348, |
|
"learning_rate": 6.3e-05, |
|
"loss": 10.6235, |
|
"step": 21 |
|
}, |
|
{ |
|
"epoch": 0.00022, |
|
"grad_norm": 0.9013277325687112, |
|
"learning_rate": 6.599999999999999e-05, |
|
"loss": 10.6069, |
|
"step": 22 |
|
}, |
|
{ |
|
"epoch": 0.00023, |
|
"grad_norm": 0.8982860560109955, |
|
"learning_rate": 6.9e-05, |
|
"loss": 10.5935, |
|
"step": 23 |
|
}, |
|
{ |
|
"epoch": 0.00024, |
|
"grad_norm": 0.8941575476865218, |
|
"learning_rate": 7.2e-05, |
|
"loss": 10.5802, |
|
"step": 24 |
|
}, |
|
{ |
|
"epoch": 0.00025, |
|
"grad_norm": 0.8955811690787305, |
|
"learning_rate": 7.500000000000001e-05, |
|
"loss": 10.5652, |
|
"step": 25 |
|
}, |
|
{ |
|
"epoch": 0.00026, |
|
"grad_norm": 0.9005417636666915, |
|
"learning_rate": 7.8e-05, |
|
"loss": 10.5507, |
|
"step": 26 |
|
}, |
|
{ |
|
"epoch": 0.00027, |
|
"grad_norm": 0.8929394290279239, |
|
"learning_rate": 8.1e-05, |
|
"loss": 10.5379, |
|
"step": 27 |
|
}, |
|
{ |
|
"epoch": 0.00028, |
|
"grad_norm": 0.892893092343029, |
|
"learning_rate": 8.400000000000001e-05, |
|
"loss": 10.5234, |
|
"step": 28 |
|
}, |
|
{ |
|
"epoch": 0.00029, |
|
"grad_norm": 0.8951984198637418, |
|
"learning_rate": 8.7e-05, |
|
"loss": 10.5078, |
|
"step": 29 |
|
}, |
|
{ |
|
"epoch": 0.0003, |
|
"grad_norm": 0.901037080772758, |
|
"learning_rate": 8.999999999999999e-05, |
|
"loss": 10.4913, |
|
"step": 30 |
|
}, |
|
{ |
|
"epoch": 0.00031, |
|
"grad_norm": 0.899757879512845, |
|
"learning_rate": 9.3e-05, |
|
"loss": 10.4759, |
|
"step": 31 |
|
}, |
|
{ |
|
"epoch": 0.00032, |
|
"grad_norm": 0.8963415000423225, |
|
"learning_rate": 9.6e-05, |
|
"loss": 10.4612, |
|
"step": 32 |
|
}, |
|
{ |
|
"epoch": 0.00033, |
|
"grad_norm": 0.8971468155029705, |
|
"learning_rate": 9.900000000000001e-05, |
|
"loss": 10.4429, |
|
"step": 33 |
|
}, |
|
{ |
|
"epoch": 0.00034, |
|
"grad_norm": 0.8964368558533553, |
|
"learning_rate": 0.00010200000000000001, |
|
"loss": 10.426, |
|
"step": 34 |
|
}, |
|
{ |
|
"epoch": 0.00035, |
|
"grad_norm": 0.9007340868108898, |
|
"learning_rate": 0.00010500000000000002, |
|
"loss": 10.4083, |
|
"step": 35 |
|
}, |
|
{ |
|
"epoch": 0.00036, |
|
"grad_norm": 0.8977475222905443, |
|
"learning_rate": 0.000108, |
|
"loss": 10.3895, |
|
"step": 36 |
|
}, |
|
{ |
|
"epoch": 0.00037, |
|
"grad_norm": 0.8929896912849768, |
|
"learning_rate": 0.000111, |
|
"loss": 10.3721, |
|
"step": 37 |
|
}, |
|
{ |
|
"epoch": 0.00038, |
|
"grad_norm": 0.8925204062685723, |
|
"learning_rate": 0.000114, |
|
"loss": 10.3515, |
|
"step": 38 |
|
}, |
|
{ |
|
"epoch": 0.00039, |
|
"grad_norm": 0.8947925451707294, |
|
"learning_rate": 0.000117, |
|
"loss": 10.3314, |
|
"step": 39 |
|
}, |
|
{ |
|
"epoch": 0.0004, |
|
"grad_norm": 0.8990961452390619, |
|
"learning_rate": 0.00012, |
|
"loss": 10.3088, |
|
"step": 40 |
|
}, |
|
{ |
|
"epoch": 0.00041, |
|
"grad_norm": 0.8951984584897338, |
|
"learning_rate": 0.000123, |
|
"loss": 10.2891, |
|
"step": 41 |
|
}, |
|
{ |
|
"epoch": 0.00042, |
|
"grad_norm": 0.8942493459254965, |
|
"learning_rate": 0.000126, |
|
"loss": 10.2679, |
|
"step": 42 |
|
}, |
|
{ |
|
"epoch": 0.00043, |
|
"grad_norm": 0.8983684110980745, |
|
"learning_rate": 0.000129, |
|
"loss": 10.243, |
|
"step": 43 |
|
}, |
|
{ |
|
"epoch": 0.00044, |
|
"grad_norm": 0.8985869855625094, |
|
"learning_rate": 0.00013199999999999998, |
|
"loss": 10.2206, |
|
"step": 44 |
|
}, |
|
{ |
|
"epoch": 0.00045, |
|
"grad_norm": 0.899962207774676, |
|
"learning_rate": 0.000135, |
|
"loss": 10.1962, |
|
"step": 45 |
|
}, |
|
{ |
|
"epoch": 0.00046, |
|
"grad_norm": 0.8914153211826606, |
|
"learning_rate": 0.000138, |
|
"loss": 10.1735, |
|
"step": 46 |
|
}, |
|
{ |
|
"epoch": 0.00047, |
|
"grad_norm": 0.8980761529388176, |
|
"learning_rate": 0.000141, |
|
"loss": 10.146, |
|
"step": 47 |
|
}, |
|
{ |
|
"epoch": 0.00048, |
|
"grad_norm": 0.9007540011988344, |
|
"learning_rate": 0.000144, |
|
"loss": 10.1222, |
|
"step": 48 |
|
}, |
|
{ |
|
"epoch": 0.00049, |
|
"grad_norm": 0.896344611975273, |
|
"learning_rate": 0.000147, |
|
"loss": 10.0974, |
|
"step": 49 |
|
}, |
|
{ |
|
"epoch": 0.0005, |
|
"grad_norm": 0.9015948414141589, |
|
"learning_rate": 0.00015000000000000001, |
|
"loss": 10.0706, |
|
"step": 50 |
|
}, |
|
{ |
|
"epoch": 0.00051, |
|
"grad_norm": 0.9016102183936469, |
|
"learning_rate": 0.000153, |
|
"loss": 10.0433, |
|
"step": 51 |
|
}, |
|
{ |
|
"epoch": 0.00052, |
|
"grad_norm": 0.8993585803645682, |
|
"learning_rate": 0.000156, |
|
"loss": 10.0158, |
|
"step": 52 |
|
}, |
|
{ |
|
"epoch": 0.00053, |
|
"grad_norm": 0.8901736410301992, |
|
"learning_rate": 0.000159, |
|
"loss": 9.9917, |
|
"step": 53 |
|
}, |
|
{ |
|
"epoch": 0.00054, |
|
"grad_norm": 0.9083328832659304, |
|
"learning_rate": 0.000162, |
|
"loss": 9.9578, |
|
"step": 54 |
|
}, |
|
{ |
|
"epoch": 0.00055, |
|
"grad_norm": 0.8975570632788401, |
|
"learning_rate": 0.000165, |
|
"loss": 9.9337, |
|
"step": 55 |
|
}, |
|
{ |
|
"epoch": 0.00056, |
|
"grad_norm": 0.8924582131156604, |
|
"learning_rate": 0.00016800000000000002, |
|
"loss": 9.904, |
|
"step": 56 |
|
}, |
|
{ |
|
"epoch": 0.00057, |
|
"grad_norm": 0.8937117244788118, |
|
"learning_rate": 0.000171, |
|
"loss": 9.8765, |
|
"step": 57 |
|
}, |
|
{ |
|
"epoch": 0.00058, |
|
"grad_norm": 0.8912667557674427, |
|
"learning_rate": 0.000174, |
|
"loss": 9.8514, |
|
"step": 58 |
|
}, |
|
{ |
|
"epoch": 0.00059, |
|
"grad_norm": 0.899445317105929, |
|
"learning_rate": 0.000177, |
|
"loss": 9.819, |
|
"step": 59 |
|
}, |
|
{ |
|
"epoch": 0.0006, |
|
"grad_norm": 0.8827247644946434, |
|
"learning_rate": 0.00017999999999999998, |
|
"loss": 9.7927, |
|
"step": 60 |
|
}, |
|
{ |
|
"epoch": 0.00061, |
|
"grad_norm": 0.8856006114384688, |
|
"learning_rate": 0.000183, |
|
"loss": 9.7645, |
|
"step": 61 |
|
}, |
|
{ |
|
"epoch": 0.00062, |
|
"grad_norm": 0.8887861476206323, |
|
"learning_rate": 0.000186, |
|
"loss": 9.7329, |
|
"step": 62 |
|
}, |
|
{ |
|
"epoch": 0.00063, |
|
"grad_norm": 0.8870948257177573, |
|
"learning_rate": 0.000189, |
|
"loss": 9.7044, |
|
"step": 63 |
|
}, |
|
{ |
|
"epoch": 0.00064, |
|
"grad_norm": 0.8854465518366905, |
|
"learning_rate": 0.000192, |
|
"loss": 9.6741, |
|
"step": 64 |
|
}, |
|
{ |
|
"epoch": 0.00065, |
|
"grad_norm": 0.8913964558604793, |
|
"learning_rate": 0.00019500000000000002, |
|
"loss": 9.6417, |
|
"step": 65 |
|
}, |
|
{ |
|
"epoch": 0.00066, |
|
"grad_norm": 0.8860459843940278, |
|
"learning_rate": 0.00019800000000000002, |
|
"loss": 9.6181, |
|
"step": 66 |
|
}, |
|
{ |
|
"epoch": 0.00067, |
|
"grad_norm": 0.8855602986146246, |
|
"learning_rate": 0.000201, |
|
"loss": 9.5886, |
|
"step": 67 |
|
}, |
|
{ |
|
"epoch": 0.00068, |
|
"grad_norm": 0.89182613307124, |
|
"learning_rate": 0.00020400000000000003, |
|
"loss": 9.5554, |
|
"step": 68 |
|
}, |
|
{ |
|
"epoch": 0.00069, |
|
"grad_norm": 0.8788147221752699, |
|
"learning_rate": 0.00020700000000000002, |
|
"loss": 9.5276, |
|
"step": 69 |
|
}, |
|
{ |
|
"epoch": 0.0007, |
|
"grad_norm": 0.89013480384513, |
|
"learning_rate": 0.00021000000000000004, |
|
"loss": 9.4911, |
|
"step": 70 |
|
}, |
|
{ |
|
"epoch": 0.00071, |
|
"grad_norm": 0.8832608390392153, |
|
"learning_rate": 0.00021299999999999997, |
|
"loss": 9.4637, |
|
"step": 71 |
|
}, |
|
{ |
|
"epoch": 0.00072, |
|
"grad_norm": 0.8850551460752941, |
|
"learning_rate": 0.000216, |
|
"loss": 9.4371, |
|
"step": 72 |
|
}, |
|
{ |
|
"epoch": 0.00073, |
|
"grad_norm": 0.8854430272945265, |
|
"learning_rate": 0.00021899999999999998, |
|
"loss": 9.4029, |
|
"step": 73 |
|
}, |
|
{ |
|
"epoch": 0.00074, |
|
"grad_norm": 0.881388508551569, |
|
"learning_rate": 0.000222, |
|
"loss": 9.3766, |
|
"step": 74 |
|
}, |
|
{ |
|
"epoch": 0.00075, |
|
"grad_norm": 0.8804734413831777, |
|
"learning_rate": 0.000225, |
|
"loss": 9.3413, |
|
"step": 75 |
|
}, |
|
{ |
|
"epoch": 0.00076, |
|
"grad_norm": 0.891470306594604, |
|
"learning_rate": 0.000228, |
|
"loss": 9.3136, |
|
"step": 76 |
|
}, |
|
{ |
|
"epoch": 0.00077, |
|
"grad_norm": 0.8835806398314079, |
|
"learning_rate": 0.000231, |
|
"loss": 9.2843, |
|
"step": 77 |
|
}, |
|
{ |
|
"epoch": 0.00078, |
|
"grad_norm": 0.8778470434340718, |
|
"learning_rate": 0.000234, |
|
"loss": 9.2534, |
|
"step": 78 |
|
}, |
|
{ |
|
"epoch": 0.00079, |
|
"grad_norm": 0.8849142747864956, |
|
"learning_rate": 0.00023700000000000001, |
|
"loss": 9.2174, |
|
"step": 79 |
|
}, |
|
{ |
|
"epoch": 0.0008, |
|
"grad_norm": 0.8831859916332898, |
|
"learning_rate": 0.00024, |
|
"loss": 9.1835, |
|
"step": 80 |
|
}, |
|
{ |
|
"epoch": 0.00081, |
|
"grad_norm": 0.8883671155155716, |
|
"learning_rate": 0.00024300000000000002, |
|
"loss": 9.1516, |
|
"step": 81 |
|
}, |
|
{ |
|
"epoch": 0.00082, |
|
"grad_norm": 0.8919005998406658, |
|
"learning_rate": 0.000246, |
|
"loss": 9.1278, |
|
"step": 82 |
|
}, |
|
{ |
|
"epoch": 0.00083, |
|
"grad_norm": 0.8904109612127737, |
|
"learning_rate": 0.00024900000000000004, |
|
"loss": 9.0926, |
|
"step": 83 |
|
}, |
|
{ |
|
"epoch": 0.00084, |
|
"grad_norm": 0.8922919840670517, |
|
"learning_rate": 0.000252, |
|
"loss": 9.0627, |
|
"step": 84 |
|
}, |
|
{ |
|
"epoch": 0.00085, |
|
"grad_norm": 0.8851770394662277, |
|
"learning_rate": 0.000255, |
|
"loss": 9.0389, |
|
"step": 85 |
|
}, |
|
{ |
|
"epoch": 0.00086, |
|
"grad_norm": 0.892287423580011, |
|
"learning_rate": 0.000258, |
|
"loss": 9.0029, |
|
"step": 86 |
|
}, |
|
{ |
|
"epoch": 0.00087, |
|
"grad_norm": 0.8819038715566697, |
|
"learning_rate": 0.000261, |
|
"loss": 8.9819, |
|
"step": 87 |
|
}, |
|
{ |
|
"epoch": 0.00088, |
|
"grad_norm": 0.8825670273484024, |
|
"learning_rate": 0.00026399999999999997, |
|
"loss": 8.9452, |
|
"step": 88 |
|
}, |
|
{ |
|
"epoch": 0.00089, |
|
"grad_norm": 0.87734677010751, |
|
"learning_rate": 0.000267, |
|
"loss": 8.92, |
|
"step": 89 |
|
}, |
|
{ |
|
"epoch": 0.0009, |
|
"grad_norm": 0.8800982265590005, |
|
"learning_rate": 0.00027, |
|
"loss": 8.8935, |
|
"step": 90 |
|
}, |
|
{ |
|
"epoch": 0.00091, |
|
"grad_norm": 0.8751738333241339, |
|
"learning_rate": 0.000273, |
|
"loss": 8.8635, |
|
"step": 91 |
|
}, |
|
{ |
|
"epoch": 0.00092, |
|
"grad_norm": 0.8805112062017327, |
|
"learning_rate": 0.000276, |
|
"loss": 8.8313, |
|
"step": 92 |
|
}, |
|
{ |
|
"epoch": 0.00093, |
|
"grad_norm": 0.8782110873043902, |
|
"learning_rate": 0.000279, |
|
"loss": 8.8018, |
|
"step": 93 |
|
}, |
|
{ |
|
"epoch": 0.00094, |
|
"grad_norm": 0.8793675998346636, |
|
"learning_rate": 0.000282, |
|
"loss": 8.7769, |
|
"step": 94 |
|
}, |
|
{ |
|
"epoch": 0.00095, |
|
"grad_norm": 0.8739437022686155, |
|
"learning_rate": 0.000285, |
|
"loss": 8.7468, |
|
"step": 95 |
|
}, |
|
{ |
|
"epoch": 0.00096, |
|
"grad_norm": 0.8792093745975903, |
|
"learning_rate": 0.000288, |
|
"loss": 8.7137, |
|
"step": 96 |
|
}, |
|
{ |
|
"epoch": 0.00097, |
|
"grad_norm": 0.8733827683228169, |
|
"learning_rate": 0.000291, |
|
"loss": 8.6904, |
|
"step": 97 |
|
}, |
|
{ |
|
"epoch": 0.00098, |
|
"grad_norm": 0.8720585520116785, |
|
"learning_rate": 0.000294, |
|
"loss": 8.6531, |
|
"step": 98 |
|
}, |
|
{ |
|
"epoch": 0.00099, |
|
"grad_norm": 0.8706078195741852, |
|
"learning_rate": 0.000297, |
|
"loss": 8.6333, |
|
"step": 99 |
|
}, |
|
{ |
|
"epoch": 0.001, |
|
"grad_norm": 0.868963072522692, |
|
"learning_rate": 0.00030000000000000003, |
|
"loss": 8.6078, |
|
"step": 100 |
|
} |
|
], |
|
"logging_steps": 1, |
|
"max_steps": 100000, |
|
"num_input_tokens_seen": 0, |
|
"num_train_epochs": 1, |
|
"save_steps": 25, |
|
"stateful_callbacks": { |
|
"TrainerControl": { |
|
"args": { |
|
"should_epoch_stop": false, |
|
"should_evaluate": false, |
|
"should_log": false, |
|
"should_save": true, |
|
"should_training_stop": false |
|
}, |
|
"attributes": {} |
|
} |
|
}, |
|
"total_flos": 3964364285542400.0, |
|
"train_batch_size": 256, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|