|
{ |
|
"best_metric": 1.9137802124023438, |
|
"best_model_checkpoint": "/home/datta0/models/lora_final/Qwen2-7B_pct_reverse_r16/checkpoint-368", |
|
"epoch": 0.9996779388083736, |
|
"eval_steps": 8, |
|
"global_step": 388, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 0.0025764895330112722, |
|
"grad_norm": 6.036957740783691, |
|
"learning_rate": 1.25e-05, |
|
"loss": 2.0463, |
|
"step": 1 |
|
}, |
|
{ |
|
"epoch": 0.010305958132045089, |
|
"grad_norm": 4.019509315490723, |
|
"learning_rate": 5e-05, |
|
"loss": 2.1076, |
|
"step": 4 |
|
}, |
|
{ |
|
"epoch": 0.020611916264090178, |
|
"grad_norm": 10.257355690002441, |
|
"learning_rate": 0.0001, |
|
"loss": 2.0341, |
|
"step": 8 |
|
}, |
|
{ |
|
"epoch": 0.020611916264090178, |
|
"eval_loss": 1.9528307914733887, |
|
"eval_runtime": 45.8734, |
|
"eval_samples_per_second": 5.341, |
|
"eval_steps_per_second": 2.681, |
|
"step": 8 |
|
}, |
|
{ |
|
"epoch": 0.030917874396135265, |
|
"grad_norm": 2.879976272583008, |
|
"learning_rate": 9.997266286704631e-05, |
|
"loss": 2.0054, |
|
"step": 12 |
|
}, |
|
{ |
|
"epoch": 0.041223832528180356, |
|
"grad_norm": 2.654456615447998, |
|
"learning_rate": 9.989068136093873e-05, |
|
"loss": 2.0078, |
|
"step": 16 |
|
}, |
|
{ |
|
"epoch": 0.041223832528180356, |
|
"eval_loss": 1.9550031423568726, |
|
"eval_runtime": 44.7469, |
|
"eval_samples_per_second": 5.475, |
|
"eval_steps_per_second": 2.749, |
|
"step": 16 |
|
}, |
|
{ |
|
"epoch": 0.05152979066022544, |
|
"grad_norm": 2.678391695022583, |
|
"learning_rate": 9.975414512725057e-05, |
|
"loss": 1.9626, |
|
"step": 20 |
|
}, |
|
{ |
|
"epoch": 0.06183574879227053, |
|
"grad_norm": 3.189049005508423, |
|
"learning_rate": 9.956320346634876e-05, |
|
"loss": 2.0269, |
|
"step": 24 |
|
}, |
|
{ |
|
"epoch": 0.06183574879227053, |
|
"eval_loss": 1.9356740713119507, |
|
"eval_runtime": 45.2011, |
|
"eval_samples_per_second": 5.42, |
|
"eval_steps_per_second": 2.721, |
|
"step": 24 |
|
}, |
|
{ |
|
"epoch": 0.07214170692431562, |
|
"grad_norm": 2.709564447402954, |
|
"learning_rate": 9.931806517013612e-05, |
|
"loss": 2.0135, |
|
"step": 28 |
|
}, |
|
{ |
|
"epoch": 0.08244766505636071, |
|
"grad_norm": 2.585526466369629, |
|
"learning_rate": 9.901899829374047e-05, |
|
"loss": 1.9472, |
|
"step": 32 |
|
}, |
|
{ |
|
"epoch": 0.08244766505636071, |
|
"eval_loss": 1.9405081272125244, |
|
"eval_runtime": 81.9576, |
|
"eval_samples_per_second": 2.989, |
|
"eval_steps_per_second": 1.501, |
|
"step": 32 |
|
}, |
|
{ |
|
"epoch": 0.0927536231884058, |
|
"grad_norm": 2.4268321990966797, |
|
"learning_rate": 9.86663298624003e-05, |
|
"loss": 2.0166, |
|
"step": 36 |
|
}, |
|
{ |
|
"epoch": 0.10305958132045089, |
|
"grad_norm": 2.597174882888794, |
|
"learning_rate": 9.826044551386744e-05, |
|
"loss": 1.993, |
|
"step": 40 |
|
}, |
|
{ |
|
"epoch": 0.10305958132045089, |
|
"eval_loss": 1.9380648136138916, |
|
"eval_runtime": 45.2717, |
|
"eval_samples_per_second": 5.412, |
|
"eval_steps_per_second": 2.717, |
|
"step": 40 |
|
}, |
|
{ |
|
"epoch": 0.11336553945249597, |
|
"grad_norm": 2.8009908199310303, |
|
"learning_rate": 9.780178907671789e-05, |
|
"loss": 1.9178, |
|
"step": 44 |
|
}, |
|
{ |
|
"epoch": 0.12367149758454106, |
|
"grad_norm": 2.5525169372558594, |
|
"learning_rate": 9.729086208503174e-05, |
|
"loss": 1.9936, |
|
"step": 48 |
|
}, |
|
{ |
|
"epoch": 0.12367149758454106, |
|
"eval_loss": 1.9402122497558594, |
|
"eval_runtime": 43.9726, |
|
"eval_samples_per_second": 5.572, |
|
"eval_steps_per_second": 2.797, |
|
"step": 48 |
|
}, |
|
{ |
|
"epoch": 0.13397745571658615, |
|
"grad_norm": 2.791377305984497, |
|
"learning_rate": 9.672822322997305e-05, |
|
"loss": 1.9291, |
|
"step": 52 |
|
}, |
|
{ |
|
"epoch": 0.14428341384863125, |
|
"grad_norm": 2.8142764568328857, |
|
"learning_rate": 9.611448774886924e-05, |
|
"loss": 2.0043, |
|
"step": 56 |
|
}, |
|
{ |
|
"epoch": 0.14428341384863125, |
|
"eval_loss": 1.9410146474838257, |
|
"eval_runtime": 44.3537, |
|
"eval_samples_per_second": 5.524, |
|
"eval_steps_per_second": 2.773, |
|
"step": 56 |
|
}, |
|
{ |
|
"epoch": 0.15458937198067632, |
|
"grad_norm": 3.0118305683135986, |
|
"learning_rate": 9.545032675245813e-05, |
|
"loss": 1.9752, |
|
"step": 60 |
|
}, |
|
{ |
|
"epoch": 0.16489533011272142, |
|
"grad_norm": 3.247464656829834, |
|
"learning_rate": 9.473646649103818e-05, |
|
"loss": 1.9356, |
|
"step": 64 |
|
}, |
|
{ |
|
"epoch": 0.16489533011272142, |
|
"eval_loss": 1.9368629455566406, |
|
"eval_runtime": 44.9235, |
|
"eval_samples_per_second": 5.454, |
|
"eval_steps_per_second": 2.738, |
|
"step": 64 |
|
}, |
|
{ |
|
"epoch": 0.1752012882447665, |
|
"grad_norm": 2.7969186305999756, |
|
"learning_rate": 9.397368756032445e-05, |
|
"loss": 2.0485, |
|
"step": 68 |
|
}, |
|
{ |
|
"epoch": 0.1855072463768116, |
|
"grad_norm": 2.8225085735321045, |
|
"learning_rate": 9.316282404787871e-05, |
|
"loss": 1.9953, |
|
"step": 72 |
|
}, |
|
{ |
|
"epoch": 0.1855072463768116, |
|
"eval_loss": 1.9395699501037598, |
|
"eval_runtime": 77.6661, |
|
"eval_samples_per_second": 3.155, |
|
"eval_steps_per_second": 1.584, |
|
"step": 72 |
|
}, |
|
{ |
|
"epoch": 0.19581320450885667, |
|
"grad_norm": 2.9584035873413086, |
|
"learning_rate": 9.230476262104677e-05, |
|
"loss": 2.0198, |
|
"step": 76 |
|
}, |
|
{ |
|
"epoch": 0.20611916264090177, |
|
"grad_norm": 2.4394288063049316, |
|
"learning_rate": 9.140044155740101e-05, |
|
"loss": 2.0184, |
|
"step": 80 |
|
}, |
|
{ |
|
"epoch": 0.20611916264090177, |
|
"eval_loss": 1.940533995628357, |
|
"eval_runtime": 43.9748, |
|
"eval_samples_per_second": 5.571, |
|
"eval_steps_per_second": 2.797, |
|
"step": 80 |
|
}, |
|
{ |
|
"epoch": 0.21642512077294687, |
|
"grad_norm": 2.9622068405151367, |
|
"learning_rate": 9.045084971874738e-05, |
|
"loss": 2.0708, |
|
"step": 84 |
|
}, |
|
{ |
|
"epoch": 0.22673107890499195, |
|
"grad_norm": 3.059290647506714, |
|
"learning_rate": 8.945702546981969e-05, |
|
"loss": 1.995, |
|
"step": 88 |
|
}, |
|
{ |
|
"epoch": 0.22673107890499195, |
|
"eval_loss": 1.940983772277832, |
|
"eval_runtime": 44.7809, |
|
"eval_samples_per_second": 5.471, |
|
"eval_steps_per_second": 2.747, |
|
"step": 88 |
|
}, |
|
{ |
|
"epoch": 0.23703703703703705, |
|
"grad_norm": 2.5659518241882324, |
|
"learning_rate": 8.842005554284296e-05, |
|
"loss": 1.9141, |
|
"step": 92 |
|
}, |
|
{ |
|
"epoch": 0.24734299516908212, |
|
"grad_norm": 2.770142078399658, |
|
"learning_rate": 8.73410738492077e-05, |
|
"loss": 1.9307, |
|
"step": 96 |
|
}, |
|
{ |
|
"epoch": 0.24734299516908212, |
|
"eval_loss": 1.9407052993774414, |
|
"eval_runtime": 44.3926, |
|
"eval_samples_per_second": 5.519, |
|
"eval_steps_per_second": 2.771, |
|
"step": 96 |
|
}, |
|
{ |
|
"epoch": 0.2576489533011272, |
|
"grad_norm": 2.419646978378296, |
|
"learning_rate": 8.622126023955446e-05, |
|
"loss": 2.0221, |
|
"step": 100 |
|
}, |
|
{ |
|
"epoch": 0.2679549114331723, |
|
"grad_norm": 2.4743905067443848, |
|
"learning_rate": 8.506183921362443e-05, |
|
"loss": 2.0037, |
|
"step": 104 |
|
}, |
|
{ |
|
"epoch": 0.2679549114331723, |
|
"eval_loss": 1.9413758516311646, |
|
"eval_runtime": 43.7595, |
|
"eval_samples_per_second": 5.599, |
|
"eval_steps_per_second": 2.811, |
|
"step": 104 |
|
}, |
|
{ |
|
"epoch": 0.2782608695652174, |
|
"grad_norm": 2.4670968055725098, |
|
"learning_rate": 8.386407858128706e-05, |
|
"loss": 2.0279, |
|
"step": 108 |
|
}, |
|
{ |
|
"epoch": 0.2885668276972625, |
|
"grad_norm": 2.8095762729644775, |
|
"learning_rate": 8.262928807620843e-05, |
|
"loss": 1.889, |
|
"step": 112 |
|
}, |
|
{ |
|
"epoch": 0.2885668276972625, |
|
"eval_loss": 1.939664363861084, |
|
"eval_runtime": 79.1358, |
|
"eval_samples_per_second": 3.096, |
|
"eval_steps_per_second": 1.554, |
|
"step": 112 |
|
}, |
|
{ |
|
"epoch": 0.29887278582930754, |
|
"grad_norm": 2.5172698497772217, |
|
"learning_rate": 8.135881792367686e-05, |
|
"loss": 2.0408, |
|
"step": 116 |
|
}, |
|
{ |
|
"epoch": 0.30917874396135264, |
|
"grad_norm": 2.727034330368042, |
|
"learning_rate": 8.005405736415126e-05, |
|
"loss": 1.9455, |
|
"step": 120 |
|
}, |
|
{ |
|
"epoch": 0.30917874396135264, |
|
"eval_loss": 1.9401391744613647, |
|
"eval_runtime": 44.1305, |
|
"eval_samples_per_second": 5.552, |
|
"eval_steps_per_second": 2.787, |
|
"step": 120 |
|
}, |
|
{ |
|
"epoch": 0.31948470209339774, |
|
"grad_norm": 2.9831223487854004, |
|
"learning_rate": 7.871643313414718e-05, |
|
"loss": 2.032, |
|
"step": 124 |
|
}, |
|
{ |
|
"epoch": 0.32979066022544284, |
|
"grad_norm": 2.715611457824707, |
|
"learning_rate": 7.734740790612136e-05, |
|
"loss": 1.9789, |
|
"step": 128 |
|
}, |
|
{ |
|
"epoch": 0.32979066022544284, |
|
"eval_loss": 1.9438152313232422, |
|
"eval_runtime": 43.6559, |
|
"eval_samples_per_second": 5.612, |
|
"eval_steps_per_second": 2.817, |
|
"step": 128 |
|
}, |
|
{ |
|
"epoch": 0.34009661835748795, |
|
"grad_norm": 2.9471704959869385, |
|
"learning_rate": 7.594847868906076e-05, |
|
"loss": 1.9841, |
|
"step": 132 |
|
}, |
|
{ |
|
"epoch": 0.350402576489533, |
|
"grad_norm": 2.823962926864624, |
|
"learning_rate": 7.452117519152542e-05, |
|
"loss": 1.9642, |
|
"step": 136 |
|
}, |
|
{ |
|
"epoch": 0.350402576489533, |
|
"eval_loss": 1.940843939781189, |
|
"eval_runtime": 45.2787, |
|
"eval_samples_per_second": 5.411, |
|
"eval_steps_per_second": 2.717, |
|
"step": 136 |
|
}, |
|
{ |
|
"epoch": 0.3607085346215781, |
|
"grad_norm": 2.5701866149902344, |
|
"learning_rate": 7.30670581489344e-05, |
|
"loss": 1.928, |
|
"step": 140 |
|
}, |
|
{ |
|
"epoch": 0.3710144927536232, |
|
"grad_norm": 2.5628817081451416, |
|
"learning_rate": 7.158771761692464e-05, |
|
"loss": 1.9387, |
|
"step": 144 |
|
}, |
|
{ |
|
"epoch": 0.3710144927536232, |
|
"eval_loss": 1.9405008554458618, |
|
"eval_runtime": 77.1208, |
|
"eval_samples_per_second": 3.177, |
|
"eval_steps_per_second": 1.595, |
|
"step": 144 |
|
}, |
|
{ |
|
"epoch": 0.3813204508856683, |
|
"grad_norm": 2.6887667179107666, |
|
"learning_rate": 7.008477123264848e-05, |
|
"loss": 1.9983, |
|
"step": 148 |
|
}, |
|
{ |
|
"epoch": 0.39162640901771334, |
|
"grad_norm": 2.5755767822265625, |
|
"learning_rate": 6.855986244591104e-05, |
|
"loss": 2.0036, |
|
"step": 152 |
|
}, |
|
{ |
|
"epoch": 0.39162640901771334, |
|
"eval_loss": 1.9393625259399414, |
|
"eval_runtime": 77.0654, |
|
"eval_samples_per_second": 3.179, |
|
"eval_steps_per_second": 1.596, |
|
"step": 152 |
|
}, |
|
{ |
|
"epoch": 0.40193236714975844, |
|
"grad_norm": 2.772606134414673, |
|
"learning_rate": 6.701465872208216e-05, |
|
"loss": 1.9969, |
|
"step": 156 |
|
}, |
|
{ |
|
"epoch": 0.41223832528180354, |
|
"grad_norm": 2.1044411659240723, |
|
"learning_rate": 6.545084971874738e-05, |
|
"loss": 2.0407, |
|
"step": 160 |
|
}, |
|
{ |
|
"epoch": 0.41223832528180354, |
|
"eval_loss": 1.9393056631088257, |
|
"eval_runtime": 44.7521, |
|
"eval_samples_per_second": 5.475, |
|
"eval_steps_per_second": 2.748, |
|
"step": 160 |
|
}, |
|
{ |
|
"epoch": 0.42254428341384864, |
|
"grad_norm": 2.5281455516815186, |
|
"learning_rate": 6.387014543809223e-05, |
|
"loss": 2.0383, |
|
"step": 164 |
|
}, |
|
{ |
|
"epoch": 0.43285024154589374, |
|
"grad_norm": 2.8692080974578857, |
|
"learning_rate": 6.227427435703997e-05, |
|
"loss": 2.0519, |
|
"step": 168 |
|
}, |
|
{ |
|
"epoch": 0.43285024154589374, |
|
"eval_loss": 1.9384679794311523, |
|
"eval_runtime": 44.1246, |
|
"eval_samples_per_second": 5.552, |
|
"eval_steps_per_second": 2.788, |
|
"step": 168 |
|
}, |
|
{ |
|
"epoch": 0.4431561996779388, |
|
"grad_norm": 2.7248945236206055, |
|
"learning_rate": 6.066498153718735e-05, |
|
"loss": 1.9455, |
|
"step": 172 |
|
}, |
|
{ |
|
"epoch": 0.4534621578099839, |
|
"grad_norm": 2.897691011428833, |
|
"learning_rate": 5.90440267166055e-05, |
|
"loss": 1.9361, |
|
"step": 176 |
|
}, |
|
{ |
|
"epoch": 0.4534621578099839, |
|
"eval_loss": 1.939582347869873, |
|
"eval_runtime": 44.0913, |
|
"eval_samples_per_second": 5.557, |
|
"eval_steps_per_second": 2.79, |
|
"step": 176 |
|
}, |
|
{ |
|
"epoch": 0.463768115942029, |
|
"grad_norm": 3.040682077407837, |
|
"learning_rate": 5.74131823855921e-05, |
|
"loss": 2.0384, |
|
"step": 180 |
|
}, |
|
{ |
|
"epoch": 0.4740740740740741, |
|
"grad_norm": 2.464486598968506, |
|
"learning_rate": 5.577423184847932e-05, |
|
"loss": 1.9812, |
|
"step": 184 |
|
}, |
|
{ |
|
"epoch": 0.4740740740740741, |
|
"eval_loss": 1.9403743743896484, |
|
"eval_runtime": 78.7558, |
|
"eval_samples_per_second": 3.111, |
|
"eval_steps_per_second": 1.562, |
|
"step": 184 |
|
}, |
|
{ |
|
"epoch": 0.48438003220611914, |
|
"grad_norm": 2.2205276489257812, |
|
"learning_rate": 5.4128967273616625e-05, |
|
"loss": 1.9799, |
|
"step": 188 |
|
}, |
|
{ |
|
"epoch": 0.49468599033816424, |
|
"grad_norm": 2.5809481143951416, |
|
"learning_rate": 5.247918773366112e-05, |
|
"loss": 1.9947, |
|
"step": 192 |
|
}, |
|
{ |
|
"epoch": 0.49468599033816424, |
|
"eval_loss": 1.9381928443908691, |
|
"eval_runtime": 43.5951, |
|
"eval_samples_per_second": 5.62, |
|
"eval_steps_per_second": 2.821, |
|
"step": 192 |
|
}, |
|
{ |
|
"epoch": 0.5049919484702093, |
|
"grad_norm": 2.8167452812194824, |
|
"learning_rate": 5.0826697238317935e-05, |
|
"loss": 1.9157, |
|
"step": 196 |
|
}, |
|
{ |
|
"epoch": 0.5152979066022544, |
|
"grad_norm": 2.8858871459960938, |
|
"learning_rate": 4.917330276168208e-05, |
|
"loss": 1.9343, |
|
"step": 200 |
|
}, |
|
{ |
|
"epoch": 0.5152979066022544, |
|
"eval_loss": 1.935346007347107, |
|
"eval_runtime": 44.0224, |
|
"eval_samples_per_second": 5.565, |
|
"eval_steps_per_second": 2.794, |
|
"step": 200 |
|
}, |
|
{ |
|
"epoch": 0.5256038647342995, |
|
"grad_norm": 2.6140081882476807, |
|
"learning_rate": 4.7520812266338885e-05, |
|
"loss": 2.0486, |
|
"step": 204 |
|
}, |
|
{ |
|
"epoch": 0.5359098228663446, |
|
"grad_norm": 2.546766757965088, |
|
"learning_rate": 4.5871032726383386e-05, |
|
"loss": 1.9707, |
|
"step": 208 |
|
}, |
|
{ |
|
"epoch": 0.5359098228663446, |
|
"eval_loss": 1.9356623888015747, |
|
"eval_runtime": 44.3481, |
|
"eval_samples_per_second": 5.524, |
|
"eval_steps_per_second": 2.774, |
|
"step": 208 |
|
}, |
|
{ |
|
"epoch": 0.5462157809983897, |
|
"grad_norm": 2.5402872562408447, |
|
"learning_rate": 4.4225768151520694e-05, |
|
"loss": 1.9849, |
|
"step": 212 |
|
}, |
|
{ |
|
"epoch": 0.5565217391304348, |
|
"grad_norm": 2.748969078063965, |
|
"learning_rate": 4.2586817614407895e-05, |
|
"loss": 2.0131, |
|
"step": 216 |
|
}, |
|
{ |
|
"epoch": 0.5565217391304348, |
|
"eval_loss": 1.935119867324829, |
|
"eval_runtime": 69.0378, |
|
"eval_samples_per_second": 3.549, |
|
"eval_steps_per_second": 1.782, |
|
"step": 216 |
|
}, |
|
{ |
|
"epoch": 0.5668276972624798, |
|
"grad_norm": 2.5293121337890625, |
|
"learning_rate": 4.095597328339452e-05, |
|
"loss": 2.0378, |
|
"step": 220 |
|
}, |
|
{ |
|
"epoch": 0.577133655394525, |
|
"grad_norm": 2.650944232940674, |
|
"learning_rate": 3.933501846281267e-05, |
|
"loss": 1.9416, |
|
"step": 224 |
|
}, |
|
{ |
|
"epoch": 0.577133655394525, |
|
"eval_loss": 1.9310208559036255, |
|
"eval_runtime": 265.9368, |
|
"eval_samples_per_second": 0.921, |
|
"eval_steps_per_second": 0.463, |
|
"step": 224 |
|
}, |
|
{ |
|
"epoch": 0.58743961352657, |
|
"grad_norm": 2.5562586784362793, |
|
"learning_rate": 3.772572564296005e-05, |
|
"loss": 2.0221, |
|
"step": 228 |
|
}, |
|
{ |
|
"epoch": 0.5977455716586151, |
|
"grad_norm": 2.5279388427734375, |
|
"learning_rate": 3.612985456190778e-05, |
|
"loss": 1.9652, |
|
"step": 232 |
|
}, |
|
{ |
|
"epoch": 0.5977455716586151, |
|
"eval_loss": 1.9351328611373901, |
|
"eval_runtime": 142.4015, |
|
"eval_samples_per_second": 1.72, |
|
"eval_steps_per_second": 0.864, |
|
"step": 232 |
|
}, |
|
{ |
|
"epoch": 0.6080515297906602, |
|
"grad_norm": 2.3491861820220947, |
|
"learning_rate": 3.4549150281252636e-05, |
|
"loss": 1.9731, |
|
"step": 236 |
|
}, |
|
{ |
|
"epoch": 0.6183574879227053, |
|
"grad_norm": 2.6937315464019775, |
|
"learning_rate": 3.298534127791785e-05, |
|
"loss": 1.9156, |
|
"step": 240 |
|
}, |
|
{ |
|
"epoch": 0.6183574879227053, |
|
"eval_loss": 1.9266250133514404, |
|
"eval_runtime": 138.6395, |
|
"eval_samples_per_second": 1.767, |
|
"eval_steps_per_second": 0.887, |
|
"step": 240 |
|
}, |
|
{ |
|
"epoch": 0.6286634460547504, |
|
"grad_norm": 2.3568801879882812, |
|
"learning_rate": 3.144013755408895e-05, |
|
"loss": 1.9568, |
|
"step": 244 |
|
}, |
|
{ |
|
"epoch": 0.6389694041867955, |
|
"grad_norm": 2.565239906311035, |
|
"learning_rate": 2.991522876735154e-05, |
|
"loss": 1.9405, |
|
"step": 248 |
|
}, |
|
{ |
|
"epoch": 0.6389694041867955, |
|
"eval_loss": 1.9259775876998901, |
|
"eval_runtime": 134.4727, |
|
"eval_samples_per_second": 1.822, |
|
"eval_steps_per_second": 0.915, |
|
"step": 248 |
|
}, |
|
{ |
|
"epoch": 0.6492753623188405, |
|
"grad_norm": 2.49657940864563, |
|
"learning_rate": 2.8412282383075363e-05, |
|
"loss": 1.9631, |
|
"step": 252 |
|
}, |
|
{ |
|
"epoch": 0.6595813204508857, |
|
"grad_norm": 2.4308993816375732, |
|
"learning_rate": 2.693294185106562e-05, |
|
"loss": 1.9909, |
|
"step": 256 |
|
}, |
|
{ |
|
"epoch": 0.6595813204508857, |
|
"eval_loss": 1.9249815940856934, |
|
"eval_runtime": 140.3469, |
|
"eval_samples_per_second": 1.746, |
|
"eval_steps_per_second": 0.876, |
|
"step": 256 |
|
}, |
|
{ |
|
"epoch": 0.6698872785829307, |
|
"grad_norm": 2.4003243446350098, |
|
"learning_rate": 2.547882480847461e-05, |
|
"loss": 1.9771, |
|
"step": 260 |
|
}, |
|
{ |
|
"epoch": 0.6801932367149759, |
|
"grad_norm": 2.451406955718994, |
|
"learning_rate": 2.405152131093926e-05, |
|
"loss": 1.9179, |
|
"step": 264 |
|
}, |
|
{ |
|
"epoch": 0.6801932367149759, |
|
"eval_loss": 1.9231725931167603, |
|
"eval_runtime": 263.4422, |
|
"eval_samples_per_second": 0.93, |
|
"eval_steps_per_second": 0.467, |
|
"step": 264 |
|
}, |
|
{ |
|
"epoch": 0.6904991948470209, |
|
"grad_norm": 2.5658252239227295, |
|
"learning_rate": 2.2652592093878666e-05, |
|
"loss": 2.0138, |
|
"step": 268 |
|
}, |
|
{ |
|
"epoch": 0.700805152979066, |
|
"grad_norm": 2.2721314430236816, |
|
"learning_rate": 2.128356686585282e-05, |
|
"loss": 1.9877, |
|
"step": 272 |
|
}, |
|
{ |
|
"epoch": 0.700805152979066, |
|
"eval_loss": 1.921659231185913, |
|
"eval_runtime": 137.9314, |
|
"eval_samples_per_second": 1.776, |
|
"eval_steps_per_second": 0.892, |
|
"step": 272 |
|
}, |
|
{ |
|
"epoch": 0.7111111111111111, |
|
"grad_norm": 2.5887961387634277, |
|
"learning_rate": 1.9945942635848748e-05, |
|
"loss": 1.9245, |
|
"step": 276 |
|
}, |
|
{ |
|
"epoch": 0.7214170692431562, |
|
"grad_norm": 2.9357643127441406, |
|
"learning_rate": 1.8641182076323148e-05, |
|
"loss": 1.8745, |
|
"step": 280 |
|
}, |
|
{ |
|
"epoch": 0.7214170692431562, |
|
"eval_loss": 1.9206682443618774, |
|
"eval_runtime": 138.8928, |
|
"eval_samples_per_second": 1.764, |
|
"eval_steps_per_second": 0.886, |
|
"step": 280 |
|
}, |
|
{ |
|
"epoch": 0.7317230273752013, |
|
"grad_norm": 2.6738474369049072, |
|
"learning_rate": 1.7370711923791567e-05, |
|
"loss": 1.9979, |
|
"step": 284 |
|
}, |
|
{ |
|
"epoch": 0.7420289855072464, |
|
"grad_norm": 2.5297255516052246, |
|
"learning_rate": 1.6135921418712956e-05, |
|
"loss": 2.016, |
|
"step": 288 |
|
}, |
|
{ |
|
"epoch": 0.7420289855072464, |
|
"eval_loss": 1.919450044631958, |
|
"eval_runtime": 139.2841, |
|
"eval_samples_per_second": 1.759, |
|
"eval_steps_per_second": 0.883, |
|
"step": 288 |
|
}, |
|
{ |
|
"epoch": 0.7523349436392914, |
|
"grad_norm": 2.4732882976531982, |
|
"learning_rate": 1.4938160786375572e-05, |
|
"loss": 1.9837, |
|
"step": 292 |
|
}, |
|
{ |
|
"epoch": 0.7626409017713366, |
|
"grad_norm": 2.3005917072296143, |
|
"learning_rate": 1.3778739760445552e-05, |
|
"loss": 1.9238, |
|
"step": 296 |
|
}, |
|
{ |
|
"epoch": 0.7626409017713366, |
|
"eval_loss": 1.918469786643982, |
|
"eval_runtime": 131.8213, |
|
"eval_samples_per_second": 1.859, |
|
"eval_steps_per_second": 0.933, |
|
"step": 296 |
|
}, |
|
{ |
|
"epoch": 0.7729468599033816, |
|
"grad_norm": 2.417323350906372, |
|
"learning_rate": 1.2658926150792322e-05, |
|
"loss": 1.9988, |
|
"step": 300 |
|
}, |
|
{ |
|
"epoch": 0.7832528180354267, |
|
"grad_norm": 2.688710927963257, |
|
"learning_rate": 1.157994445715706e-05, |
|
"loss": 1.9414, |
|
"step": 304 |
|
}, |
|
{ |
|
"epoch": 0.7832528180354267, |
|
"eval_loss": 1.9193025827407837, |
|
"eval_runtime": 257.4069, |
|
"eval_samples_per_second": 0.952, |
|
"eval_steps_per_second": 0.478, |
|
"step": 304 |
|
}, |
|
{ |
|
"epoch": 0.7935587761674718, |
|
"grad_norm": 2.402606248855591, |
|
"learning_rate": 1.0542974530180327e-05, |
|
"loss": 1.9017, |
|
"step": 308 |
|
}, |
|
{ |
|
"epoch": 0.8038647342995169, |
|
"grad_norm": 2.4248220920562744, |
|
"learning_rate": 9.549150281252633e-06, |
|
"loss": 1.9417, |
|
"step": 312 |
|
}, |
|
{ |
|
"epoch": 0.8038647342995169, |
|
"eval_loss": 1.9172296524047852, |
|
"eval_runtime": 135.5526, |
|
"eval_samples_per_second": 1.807, |
|
"eval_steps_per_second": 0.907, |
|
"step": 312 |
|
}, |
|
{ |
|
"epoch": 0.814170692431562, |
|
"grad_norm": 2.163961172103882, |
|
"learning_rate": 8.599558442598998e-06, |
|
"loss": 1.9761, |
|
"step": 316 |
|
}, |
|
{ |
|
"epoch": 0.8244766505636071, |
|
"grad_norm": 2.284884452819824, |
|
"learning_rate": 7.695237378953223e-06, |
|
"loss": 1.9647, |
|
"step": 320 |
|
}, |
|
{ |
|
"epoch": 0.8244766505636071, |
|
"eval_loss": 1.916910171508789, |
|
"eval_runtime": 140.6132, |
|
"eval_samples_per_second": 1.742, |
|
"eval_steps_per_second": 0.875, |
|
"step": 320 |
|
}, |
|
{ |
|
"epoch": 0.8347826086956521, |
|
"grad_norm": 2.0887887477874756, |
|
"learning_rate": 6.837175952121306e-06, |
|
"loss": 2.0037, |
|
"step": 324 |
|
}, |
|
{ |
|
"epoch": 0.8450885668276973, |
|
"grad_norm": 2.22497820854187, |
|
"learning_rate": 6.026312439675552e-06, |
|
"loss": 1.9704, |
|
"step": 328 |
|
}, |
|
{ |
|
"epoch": 0.8450885668276973, |
|
"eval_loss": 1.91719388961792, |
|
"eval_runtime": 138.1524, |
|
"eval_samples_per_second": 1.773, |
|
"eval_steps_per_second": 0.89, |
|
"step": 328 |
|
}, |
|
{ |
|
"epoch": 0.8553945249597423, |
|
"grad_norm": 2.5339810848236084, |
|
"learning_rate": 5.263533508961827e-06, |
|
"loss": 1.9185, |
|
"step": 332 |
|
}, |
|
{ |
|
"epoch": 0.8657004830917875, |
|
"grad_norm": 2.198296308517456, |
|
"learning_rate": 4.549673247541875e-06, |
|
"loss": 1.9629, |
|
"step": 336 |
|
}, |
|
{ |
|
"epoch": 0.8657004830917875, |
|
"eval_loss": 1.9157441854476929, |
|
"eval_runtime": 250.478, |
|
"eval_samples_per_second": 0.978, |
|
"eval_steps_per_second": 0.491, |
|
"step": 336 |
|
}, |
|
{ |
|
"epoch": 0.8760064412238325, |
|
"grad_norm": 2.4720425605773926, |
|
"learning_rate": 3.885512251130763e-06, |
|
"loss": 1.9554, |
|
"step": 340 |
|
}, |
|
{ |
|
"epoch": 0.8863123993558776, |
|
"grad_norm": 2.5910909175872803, |
|
"learning_rate": 3.271776770026963e-06, |
|
"loss": 1.9574, |
|
"step": 344 |
|
}, |
|
{ |
|
"epoch": 0.8863123993558776, |
|
"eval_loss": 1.9149816036224365, |
|
"eval_runtime": 128.8874, |
|
"eval_samples_per_second": 1.901, |
|
"eval_steps_per_second": 0.954, |
|
"step": 344 |
|
}, |
|
{ |
|
"epoch": 0.8966183574879227, |
|
"grad_norm": 2.317239284515381, |
|
"learning_rate": 2.7091379149682685e-06, |
|
"loss": 1.895, |
|
"step": 348 |
|
}, |
|
{ |
|
"epoch": 0.9069243156199678, |
|
"grad_norm": 2.7753469944000244, |
|
"learning_rate": 2.1982109232821178e-06, |
|
"loss": 1.9278, |
|
"step": 352 |
|
}, |
|
{ |
|
"epoch": 0.9069243156199678, |
|
"eval_loss": 1.9143210649490356, |
|
"eval_runtime": 135.8994, |
|
"eval_samples_per_second": 1.803, |
|
"eval_steps_per_second": 0.905, |
|
"step": 352 |
|
}, |
|
{ |
|
"epoch": 0.9172302737520129, |
|
"grad_norm": 2.6023426055908203, |
|
"learning_rate": 1.7395544861325718e-06, |
|
"loss": 2.0422, |
|
"step": 356 |
|
}, |
|
{ |
|
"epoch": 0.927536231884058, |
|
"grad_norm": 2.2456705570220947, |
|
"learning_rate": 1.333670137599713e-06, |
|
"loss": 2.0079, |
|
"step": 360 |
|
}, |
|
{ |
|
"epoch": 0.927536231884058, |
|
"eval_loss": 1.9140007495880127, |
|
"eval_runtime": 139.4822, |
|
"eval_samples_per_second": 1.756, |
|
"eval_steps_per_second": 0.882, |
|
"step": 360 |
|
}, |
|
{ |
|
"epoch": 0.937842190016103, |
|
"grad_norm": 2.511979103088379, |
|
"learning_rate": 9.810017062595322e-07, |
|
"loss": 1.9815, |
|
"step": 364 |
|
}, |
|
{ |
|
"epoch": 0.9481481481481482, |
|
"grad_norm": 2.5906260013580322, |
|
"learning_rate": 6.819348298638839e-07, |
|
"loss": 1.9203, |
|
"step": 368 |
|
}, |
|
{ |
|
"epoch": 0.9481481481481482, |
|
"eval_loss": 1.9137802124023438, |
|
"eval_runtime": 137.4266, |
|
"eval_samples_per_second": 1.783, |
|
"eval_steps_per_second": 0.895, |
|
"step": 368 |
|
}, |
|
{ |
|
"epoch": 0.9584541062801932, |
|
"grad_norm": 2.399129867553711, |
|
"learning_rate": 4.367965336512403e-07, |
|
"loss": 1.8967, |
|
"step": 372 |
|
}, |
|
{ |
|
"epoch": 0.9687600644122383, |
|
"grad_norm": 2.576765537261963, |
|
"learning_rate": 2.458548727494292e-07, |
|
"loss": 1.9834, |
|
"step": 376 |
|
}, |
|
{ |
|
"epoch": 0.9687600644122383, |
|
"eval_loss": 1.9139444828033447, |
|
"eval_runtime": 253.9256, |
|
"eval_samples_per_second": 0.965, |
|
"eval_steps_per_second": 0.484, |
|
"step": 376 |
|
}, |
|
{ |
|
"epoch": 0.9790660225442834, |
|
"grad_norm": 2.6049747467041016, |
|
"learning_rate": 1.0931863906127327e-07, |
|
"loss": 1.8785, |
|
"step": 380 |
|
}, |
|
{ |
|
"epoch": 0.9893719806763285, |
|
"grad_norm": 2.822763204574585, |
|
"learning_rate": 2.7337132953697554e-08, |
|
"loss": 1.8809, |
|
"step": 384 |
|
}, |
|
{ |
|
"epoch": 0.9893719806763285, |
|
"eval_loss": 1.9143255949020386, |
|
"eval_runtime": 136.0427, |
|
"eval_samples_per_second": 1.801, |
|
"eval_steps_per_second": 0.904, |
|
"step": 384 |
|
}, |
|
{ |
|
"epoch": 0.9996779388083736, |
|
"grad_norm": 2.4256649017333984, |
|
"learning_rate": 0.0, |
|
"loss": 1.9735, |
|
"step": 388 |
|
} |
|
], |
|
"logging_steps": 4, |
|
"max_steps": 388, |
|
"num_input_tokens_seen": 0, |
|
"num_train_epochs": 1, |
|
"save_steps": 8, |
|
"stateful_callbacks": { |
|
"TrainerControl": { |
|
"args": { |
|
"should_epoch_stop": false, |
|
"should_evaluate": false, |
|
"should_log": false, |
|
"should_save": true, |
|
"should_training_stop": true |
|
}, |
|
"attributes": {} |
|
} |
|
}, |
|
"total_flos": 5.427475676604334e+17, |
|
"train_batch_size": 2, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|