|
{ |
|
"best_metric": null, |
|
"best_model_checkpoint": null, |
|
"epoch": 0.09995002498750624, |
|
"eval_steps": 500, |
|
"global_step": 100, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 0.0, |
|
"grad_norm": 3.3340563149001086, |
|
"learning_rate": 0.0, |
|
"loss": 11.0, |
|
"step": 1 |
|
}, |
|
{ |
|
"epoch": 0.0, |
|
"grad_norm": 2.398812329952019, |
|
"learning_rate": 5.9999999999999995e-05, |
|
"loss": 10.125, |
|
"step": 2 |
|
}, |
|
{ |
|
"epoch": 0.0, |
|
"grad_norm": 2.394322446895115, |
|
"learning_rate": 0.00011999999999999999, |
|
"loss": 10.1172, |
|
"step": 3 |
|
}, |
|
{ |
|
"epoch": 0.0, |
|
"grad_norm": 1.9958816684399585, |
|
"learning_rate": 0.00017999999999999998, |
|
"loss": 9.875, |
|
"step": 4 |
|
}, |
|
{ |
|
"epoch": 0.0, |
|
"grad_norm": 1.8270465897882062, |
|
"learning_rate": 0.00023999999999999998, |
|
"loss": 9.6641, |
|
"step": 5 |
|
}, |
|
{ |
|
"epoch": 0.01, |
|
"grad_norm": 1.7854046471397795, |
|
"learning_rate": 0.0003, |
|
"loss": 9.4844, |
|
"step": 6 |
|
}, |
|
{ |
|
"epoch": 0.01, |
|
"grad_norm": 1.719416749115252, |
|
"learning_rate": 0.00035999999999999997, |
|
"loss": 9.3281, |
|
"step": 7 |
|
}, |
|
{ |
|
"epoch": 0.01, |
|
"grad_norm": 1.4637825746112274, |
|
"learning_rate": 0.00041999999999999996, |
|
"loss": 9.2109, |
|
"step": 8 |
|
}, |
|
{ |
|
"epoch": 0.01, |
|
"grad_norm": 1.4393631015406718, |
|
"learning_rate": 0.00047999999999999996, |
|
"loss": 8.9453, |
|
"step": 9 |
|
}, |
|
{ |
|
"epoch": 0.01, |
|
"grad_norm": 1.2936734586915988, |
|
"learning_rate": 0.00054, |
|
"loss": 8.7109, |
|
"step": 10 |
|
}, |
|
{ |
|
"epoch": 0.01, |
|
"grad_norm": 1.0756922378227356, |
|
"learning_rate": 0.0005999986405514987, |
|
"loss": 8.4609, |
|
"step": 11 |
|
}, |
|
{ |
|
"epoch": 0.01, |
|
"grad_norm": 0.9277829127413892, |
|
"learning_rate": 0.0005999945622196846, |
|
"loss": 8.2344, |
|
"step": 12 |
|
}, |
|
{ |
|
"epoch": 0.01, |
|
"grad_norm": 0.8084581786682467, |
|
"learning_rate": 0.0005999877650456265, |
|
"loss": 8.125, |
|
"step": 13 |
|
}, |
|
{ |
|
"epoch": 0.01, |
|
"grad_norm": 0.7635084596900947, |
|
"learning_rate": 0.000599978249097772, |
|
"loss": 7.9766, |
|
"step": 14 |
|
}, |
|
{ |
|
"epoch": 0.01, |
|
"grad_norm": 0.9186699644247788, |
|
"learning_rate": 0.0005999660144719463, |
|
"loss": 7.8555, |
|
"step": 15 |
|
}, |
|
{ |
|
"epoch": 0.02, |
|
"grad_norm": 0.6609504256551479, |
|
"learning_rate": 0.0005999510612913519, |
|
"loss": 7.7734, |
|
"step": 16 |
|
}, |
|
{ |
|
"epoch": 0.02, |
|
"grad_norm": 0.7086232844782971, |
|
"learning_rate": 0.0005999333897065673, |
|
"loss": 7.7148, |
|
"step": 17 |
|
}, |
|
{ |
|
"epoch": 0.02, |
|
"grad_norm": 16.38048851691348, |
|
"learning_rate": 0.0005999129998955453, |
|
"loss": 8.4844, |
|
"step": 18 |
|
}, |
|
{ |
|
"epoch": 0.02, |
|
"grad_norm": 1.3057527590449889, |
|
"learning_rate": 0.0005998898920636111, |
|
"loss": 7.7539, |
|
"step": 19 |
|
}, |
|
{ |
|
"epoch": 0.02, |
|
"grad_norm": 0.6966048242948986, |
|
"learning_rate": 0.00059986406644346, |
|
"loss": 7.75, |
|
"step": 20 |
|
}, |
|
{ |
|
"epoch": 0.02, |
|
"grad_norm": 0.6348089115348993, |
|
"learning_rate": 0.0005998355232951559, |
|
"loss": 7.7031, |
|
"step": 21 |
|
}, |
|
{ |
|
"epoch": 0.02, |
|
"grad_norm": 0.7829163518610293, |
|
"learning_rate": 0.0005998042629061279, |
|
"loss": 7.6992, |
|
"step": 22 |
|
}, |
|
{ |
|
"epoch": 0.02, |
|
"grad_norm": 0.5900591778980369, |
|
"learning_rate": 0.0005997702855911678, |
|
"loss": 7.6016, |
|
"step": 23 |
|
}, |
|
{ |
|
"epoch": 0.02, |
|
"grad_norm": 0.4655170213064256, |
|
"learning_rate": 0.0005997335916924268, |
|
"loss": 7.5977, |
|
"step": 24 |
|
}, |
|
{ |
|
"epoch": 0.02, |
|
"grad_norm": 0.6287348258915756, |
|
"learning_rate": 0.0005996941815794121, |
|
"loss": 7.5586, |
|
"step": 25 |
|
}, |
|
{ |
|
"epoch": 0.03, |
|
"grad_norm": 0.6137321903884564, |
|
"learning_rate": 0.0005996520556489831, |
|
"loss": 7.5898, |
|
"step": 26 |
|
}, |
|
{ |
|
"epoch": 0.03, |
|
"grad_norm": 0.44962562710631065, |
|
"learning_rate": 0.0005996072143253473, |
|
"loss": 7.4336, |
|
"step": 27 |
|
}, |
|
{ |
|
"epoch": 0.03, |
|
"grad_norm": 0.46130046454703316, |
|
"learning_rate": 0.0005995596580600566, |
|
"loss": 7.4023, |
|
"step": 28 |
|
}, |
|
{ |
|
"epoch": 0.03, |
|
"grad_norm": 0.4686712675731326, |
|
"learning_rate": 0.0005995093873320018, |
|
"loss": 7.3789, |
|
"step": 29 |
|
}, |
|
{ |
|
"epoch": 0.03, |
|
"grad_norm": 0.4672147564288997, |
|
"learning_rate": 0.0005994564026474087, |
|
"loss": 7.3711, |
|
"step": 30 |
|
}, |
|
{ |
|
"epoch": 0.03, |
|
"grad_norm": 0.40408354581233474, |
|
"learning_rate": 0.0005994007045398324, |
|
"loss": 7.3672, |
|
"step": 31 |
|
}, |
|
{ |
|
"epoch": 0.03, |
|
"grad_norm": 0.46032146732584733, |
|
"learning_rate": 0.0005993422935701524, |
|
"loss": 7.3477, |
|
"step": 32 |
|
}, |
|
{ |
|
"epoch": 0.03, |
|
"grad_norm": 0.4765534634593268, |
|
"learning_rate": 0.0005992811703265664, |
|
"loss": 7.3555, |
|
"step": 33 |
|
}, |
|
{ |
|
"epoch": 0.03, |
|
"grad_norm": 0.46208489386235113, |
|
"learning_rate": 0.0005992173354245849, |
|
"loss": 7.3047, |
|
"step": 34 |
|
}, |
|
{ |
|
"epoch": 0.03, |
|
"grad_norm": 0.2956144524964961, |
|
"learning_rate": 0.0005991507895070244, |
|
"loss": 7.3125, |
|
"step": 35 |
|
}, |
|
{ |
|
"epoch": 0.04, |
|
"grad_norm": 0.4834645389868856, |
|
"learning_rate": 0.0005990815332440017, |
|
"loss": 7.207, |
|
"step": 36 |
|
}, |
|
{ |
|
"epoch": 0.04, |
|
"grad_norm": 0.4411831350968505, |
|
"learning_rate": 0.0005990095673329266, |
|
"loss": 7.1758, |
|
"step": 37 |
|
}, |
|
{ |
|
"epoch": 0.04, |
|
"grad_norm": 0.24809297748968667, |
|
"learning_rate": 0.0005989348924984951, |
|
"loss": 7.2188, |
|
"step": 38 |
|
}, |
|
{ |
|
"epoch": 0.04, |
|
"grad_norm": 0.39402988416840584, |
|
"learning_rate": 0.0005988575094926817, |
|
"loss": 7.1953, |
|
"step": 39 |
|
}, |
|
{ |
|
"epoch": 0.04, |
|
"grad_norm": 0.3868345222189167, |
|
"learning_rate": 0.0005987774190947328, |
|
"loss": 7.1641, |
|
"step": 40 |
|
}, |
|
{ |
|
"epoch": 0.04, |
|
"grad_norm": 0.3777261230135448, |
|
"learning_rate": 0.0005986946221111575, |
|
"loss": 7.1328, |
|
"step": 41 |
|
}, |
|
{ |
|
"epoch": 0.04, |
|
"grad_norm": 0.4687511444077827, |
|
"learning_rate": 0.0005986091193757206, |
|
"loss": 7.0898, |
|
"step": 42 |
|
}, |
|
{ |
|
"epoch": 0.04, |
|
"grad_norm": 0.34935796211612463, |
|
"learning_rate": 0.0005985209117494337, |
|
"loss": 7.1367, |
|
"step": 43 |
|
}, |
|
{ |
|
"epoch": 0.04, |
|
"grad_norm": 0.38764476686849886, |
|
"learning_rate": 0.0005984300001205466, |
|
"loss": 7.125, |
|
"step": 44 |
|
}, |
|
{ |
|
"epoch": 0.04, |
|
"grad_norm": 0.3956487898882936, |
|
"learning_rate": 0.0005983363854045386, |
|
"loss": 7.1094, |
|
"step": 45 |
|
}, |
|
{ |
|
"epoch": 0.05, |
|
"grad_norm": 0.31140257544677513, |
|
"learning_rate": 0.0005982400685441084, |
|
"loss": 7.0898, |
|
"step": 46 |
|
}, |
|
{ |
|
"epoch": 0.05, |
|
"grad_norm": 0.3664476570531787, |
|
"learning_rate": 0.0005981410505091662, |
|
"loss": 7.0664, |
|
"step": 47 |
|
}, |
|
{ |
|
"epoch": 0.05, |
|
"grad_norm": 0.31891741142945207, |
|
"learning_rate": 0.0005980393322968223, |
|
"loss": 7.0273, |
|
"step": 48 |
|
}, |
|
{ |
|
"epoch": 0.05, |
|
"grad_norm": 0.4533529037337155, |
|
"learning_rate": 0.0005979349149313778, |
|
"loss": 7.0586, |
|
"step": 49 |
|
}, |
|
{ |
|
"epoch": 0.05, |
|
"grad_norm": 0.30532331638835586, |
|
"learning_rate": 0.0005978277994643147, |
|
"loss": 7.0195, |
|
"step": 50 |
|
}, |
|
{ |
|
"epoch": 0.05, |
|
"grad_norm": 0.6501991746260075, |
|
"learning_rate": 0.0005977179869742844, |
|
"loss": 6.9648, |
|
"step": 51 |
|
}, |
|
{ |
|
"epoch": 0.05, |
|
"grad_norm": 0.43904455901717926, |
|
"learning_rate": 0.0005976054785670975, |
|
"loss": 6.9805, |
|
"step": 52 |
|
}, |
|
{ |
|
"epoch": 0.05, |
|
"grad_norm": 0.4826001598483571, |
|
"learning_rate": 0.0005974902753757124, |
|
"loss": 6.9297, |
|
"step": 53 |
|
}, |
|
{ |
|
"epoch": 0.05, |
|
"grad_norm": 0.2924998027034648, |
|
"learning_rate": 0.000597372378560224, |
|
"loss": 6.8984, |
|
"step": 54 |
|
}, |
|
{ |
|
"epoch": 0.05, |
|
"grad_norm": 0.4439033666380787, |
|
"learning_rate": 0.0005972517893078517, |
|
"loss": 6.8945, |
|
"step": 55 |
|
}, |
|
{ |
|
"epoch": 0.06, |
|
"grad_norm": 0.6135914255073411, |
|
"learning_rate": 0.0005971285088329284, |
|
"loss": 6.9727, |
|
"step": 56 |
|
}, |
|
{ |
|
"epoch": 0.06, |
|
"grad_norm": 0.5575686565598483, |
|
"learning_rate": 0.0005970025383768866, |
|
"loss": 6.9219, |
|
"step": 57 |
|
}, |
|
{ |
|
"epoch": 0.06, |
|
"grad_norm": 0.4820951675994578, |
|
"learning_rate": 0.0005968738792082478, |
|
"loss": 6.8516, |
|
"step": 58 |
|
}, |
|
{ |
|
"epoch": 0.06, |
|
"grad_norm": 0.40164190019465584, |
|
"learning_rate": 0.0005967425326226082, |
|
"loss": 6.7734, |
|
"step": 59 |
|
}, |
|
{ |
|
"epoch": 0.06, |
|
"grad_norm": 0.46129863945181293, |
|
"learning_rate": 0.0005966084999426265, |
|
"loss": 6.8125, |
|
"step": 60 |
|
}, |
|
{ |
|
"epoch": 0.06, |
|
"grad_norm": 0.33322355827118677, |
|
"learning_rate": 0.0005964717825180101, |
|
"loss": 6.7891, |
|
"step": 61 |
|
}, |
|
{ |
|
"epoch": 0.06, |
|
"grad_norm": 0.3847525153855558, |
|
"learning_rate": 0.0005963323817255024, |
|
"loss": 6.8242, |
|
"step": 62 |
|
}, |
|
{ |
|
"epoch": 0.06, |
|
"grad_norm": 0.3384433591375982, |
|
"learning_rate": 0.0005961902989688674, |
|
"loss": 6.707, |
|
"step": 63 |
|
}, |
|
{ |
|
"epoch": 0.06, |
|
"grad_norm": 0.3937003195165685, |
|
"learning_rate": 0.000596045535678877, |
|
"loss": 6.8203, |
|
"step": 64 |
|
}, |
|
{ |
|
"epoch": 0.06, |
|
"grad_norm": 0.35423488053528107, |
|
"learning_rate": 0.0005958980933132962, |
|
"loss": 6.7383, |
|
"step": 65 |
|
}, |
|
{ |
|
"epoch": 0.07, |
|
"grad_norm": 0.36005939745315396, |
|
"learning_rate": 0.0005957479733568675, |
|
"loss": 6.7109, |
|
"step": 66 |
|
}, |
|
{ |
|
"epoch": 0.07, |
|
"grad_norm": 0.3499278317706933, |
|
"learning_rate": 0.0005955951773212976, |
|
"loss": 6.7266, |
|
"step": 67 |
|
}, |
|
{ |
|
"epoch": 0.07, |
|
"grad_norm": 0.3708385192137018, |
|
"learning_rate": 0.0005954397067452407, |
|
"loss": 6.7617, |
|
"step": 68 |
|
}, |
|
{ |
|
"epoch": 0.07, |
|
"grad_norm": 0.3775657656205869, |
|
"learning_rate": 0.0005952815631942839, |
|
"loss": 6.7148, |
|
"step": 69 |
|
}, |
|
{ |
|
"epoch": 0.07, |
|
"grad_norm": 0.3040083750375816, |
|
"learning_rate": 0.0005951207482609307, |
|
"loss": 6.5938, |
|
"step": 70 |
|
}, |
|
{ |
|
"epoch": 0.07, |
|
"grad_norm": 0.3443020808841468, |
|
"learning_rate": 0.0005949572635645861, |
|
"loss": 6.6523, |
|
"step": 71 |
|
}, |
|
{ |
|
"epoch": 0.07, |
|
"grad_norm": 0.3520066316939, |
|
"learning_rate": 0.0005947911107515389, |
|
"loss": 6.6211, |
|
"step": 72 |
|
}, |
|
{ |
|
"epoch": 0.07, |
|
"grad_norm": 0.3739040572679613, |
|
"learning_rate": 0.0005946222914949462, |
|
"loss": 6.5547, |
|
"step": 73 |
|
}, |
|
{ |
|
"epoch": 0.07, |
|
"grad_norm": 0.34890731989025553, |
|
"learning_rate": 0.000594450807494816, |
|
"loss": 6.5859, |
|
"step": 74 |
|
}, |
|
{ |
|
"epoch": 0.07, |
|
"grad_norm": 0.40910932350136514, |
|
"learning_rate": 0.0005942766604779903, |
|
"loss": 6.5547, |
|
"step": 75 |
|
}, |
|
{ |
|
"epoch": 0.08, |
|
"grad_norm": 0.5698342865852906, |
|
"learning_rate": 0.0005940998521981274, |
|
"loss": 6.457, |
|
"step": 76 |
|
}, |
|
{ |
|
"epoch": 0.08, |
|
"grad_norm": 0.5179452709555474, |
|
"learning_rate": 0.0005939203844356852, |
|
"loss": 6.5547, |
|
"step": 77 |
|
}, |
|
{ |
|
"epoch": 0.08, |
|
"grad_norm": 0.5222512938673792, |
|
"learning_rate": 0.0005937382589979016, |
|
"loss": 6.5039, |
|
"step": 78 |
|
}, |
|
{ |
|
"epoch": 0.08, |
|
"grad_norm": 0.5682332793686307, |
|
"learning_rate": 0.0005935534777187781, |
|
"loss": 6.5547, |
|
"step": 79 |
|
}, |
|
{ |
|
"epoch": 0.08, |
|
"grad_norm": 0.3869287710460676, |
|
"learning_rate": 0.0005933660424590598, |
|
"loss": 6.5156, |
|
"step": 80 |
|
}, |
|
{ |
|
"epoch": 0.08, |
|
"grad_norm": 0.3078211032807607, |
|
"learning_rate": 0.000593175955106218, |
|
"loss": 6.4258, |
|
"step": 81 |
|
}, |
|
{ |
|
"epoch": 0.08, |
|
"grad_norm": 0.3611357511872241, |
|
"learning_rate": 0.00059298321757443, |
|
"loss": 6.4727, |
|
"step": 82 |
|
}, |
|
{ |
|
"epoch": 0.08, |
|
"grad_norm": 0.29633467844266953, |
|
"learning_rate": 0.0005927878318045608, |
|
"loss": 6.3281, |
|
"step": 83 |
|
}, |
|
{ |
|
"epoch": 0.08, |
|
"grad_norm": 0.3257574200776832, |
|
"learning_rate": 0.0005925897997641426, |
|
"loss": 6.3203, |
|
"step": 84 |
|
}, |
|
{ |
|
"epoch": 0.08, |
|
"grad_norm": 0.2824054533852328, |
|
"learning_rate": 0.0005923891234473562, |
|
"loss": 6.4062, |
|
"step": 85 |
|
}, |
|
{ |
|
"epoch": 0.09, |
|
"grad_norm": 0.3056199770204573, |
|
"learning_rate": 0.0005921858048750097, |
|
"loss": 6.3984, |
|
"step": 86 |
|
}, |
|
{ |
|
"epoch": 0.09, |
|
"grad_norm": 0.2966438824341908, |
|
"learning_rate": 0.000591979846094519, |
|
"loss": 6.3555, |
|
"step": 87 |
|
}, |
|
{ |
|
"epoch": 0.09, |
|
"grad_norm": 0.32782438676663733, |
|
"learning_rate": 0.0005917712491798866, |
|
"loss": 6.4023, |
|
"step": 88 |
|
}, |
|
{ |
|
"epoch": 0.09, |
|
"grad_norm": 0.3538316399620157, |
|
"learning_rate": 0.0005915600162316811, |
|
"loss": 6.2812, |
|
"step": 89 |
|
}, |
|
{ |
|
"epoch": 0.09, |
|
"grad_norm": 0.375858298192913, |
|
"learning_rate": 0.0005913461493770162, |
|
"loss": 6.3086, |
|
"step": 90 |
|
}, |
|
{ |
|
"epoch": 0.09, |
|
"grad_norm": 0.5189251339815161, |
|
"learning_rate": 0.0005911296507695284, |
|
"loss": 6.2812, |
|
"step": 91 |
|
}, |
|
{ |
|
"epoch": 0.09, |
|
"grad_norm": 0.6304909542669104, |
|
"learning_rate": 0.0005909105225893564, |
|
"loss": 6.2969, |
|
"step": 92 |
|
}, |
|
{ |
|
"epoch": 0.09, |
|
"grad_norm": 0.4655662819622591, |
|
"learning_rate": 0.0005906887670431187, |
|
"loss": 6.1953, |
|
"step": 93 |
|
}, |
|
{ |
|
"epoch": 0.09, |
|
"grad_norm": 0.39035390983920965, |
|
"learning_rate": 0.000590464386363891, |
|
"loss": 6.2617, |
|
"step": 94 |
|
}, |
|
{ |
|
"epoch": 0.09, |
|
"grad_norm": 0.4918417851770978, |
|
"learning_rate": 0.0005902373828111843, |
|
"loss": 6.2148, |
|
"step": 95 |
|
}, |
|
{ |
|
"epoch": 0.1, |
|
"grad_norm": 0.35670770889552555, |
|
"learning_rate": 0.0005900077586709219, |
|
"loss": 6.2461, |
|
"step": 96 |
|
}, |
|
{ |
|
"epoch": 0.1, |
|
"grad_norm": 0.4177985869939347, |
|
"learning_rate": 0.0005897755162554163, |
|
"loss": 6.1797, |
|
"step": 97 |
|
}, |
|
{ |
|
"epoch": 0.1, |
|
"grad_norm": 0.3742471130708234, |
|
"learning_rate": 0.000589540657903346, |
|
"loss": 6.1406, |
|
"step": 98 |
|
}, |
|
{ |
|
"epoch": 0.1, |
|
"grad_norm": 0.28627666723978284, |
|
"learning_rate": 0.0005893031859797322, |
|
"loss": 6.2031, |
|
"step": 99 |
|
}, |
|
{ |
|
"epoch": 0.1, |
|
"grad_norm": 0.32238563846046103, |
|
"learning_rate": 0.0005890631028759143, |
|
"loss": 6.0625, |
|
"step": 100 |
|
} |
|
], |
|
"logging_steps": 1, |
|
"max_steps": 1000, |
|
"num_input_tokens_seen": 0, |
|
"num_train_epochs": 1, |
|
"save_steps": 100, |
|
"total_flos": 0.0, |
|
"train_batch_size": 32, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|