|
{ |
|
"best_metric": null, |
|
"best_model_checkpoint": null, |
|
"epoch": 0.724309642372114, |
|
"eval_steps": 13, |
|
"global_step": 50, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 0.014486192847442281, |
|
"grad_norm": 1142.723876953125, |
|
"learning_rate": 5e-05, |
|
"loss": 366.0216, |
|
"step": 1 |
|
}, |
|
{ |
|
"epoch": 0.014486192847442281, |
|
"eval_loss": 11.197284698486328, |
|
"eval_runtime": 41.2659, |
|
"eval_samples_per_second": 11.268, |
|
"eval_steps_per_second": 2.835, |
|
"step": 1 |
|
}, |
|
{ |
|
"epoch": 0.028972385694884563, |
|
"grad_norm": 1126.6475830078125, |
|
"learning_rate": 0.0001, |
|
"loss": 347.0512, |
|
"step": 2 |
|
}, |
|
{ |
|
"epoch": 0.04345857854232685, |
|
"grad_norm": 1244.7637939453125, |
|
"learning_rate": 9.989294616193017e-05, |
|
"loss": 333.4102, |
|
"step": 3 |
|
}, |
|
{ |
|
"epoch": 0.057944771389769126, |
|
"grad_norm": 860.9097290039062, |
|
"learning_rate": 9.957224306869053e-05, |
|
"loss": 176.127, |
|
"step": 4 |
|
}, |
|
{ |
|
"epoch": 0.0724309642372114, |
|
"grad_norm": 2220.339599609375, |
|
"learning_rate": 9.903926402016153e-05, |
|
"loss": 224.2955, |
|
"step": 5 |
|
}, |
|
{ |
|
"epoch": 0.0869171570846537, |
|
"grad_norm": 2301.009033203125, |
|
"learning_rate": 9.829629131445342e-05, |
|
"loss": 208.5273, |
|
"step": 6 |
|
}, |
|
{ |
|
"epoch": 0.10140334993209597, |
|
"grad_norm": 852.5401611328125, |
|
"learning_rate": 9.73465064747553e-05, |
|
"loss": 146.1845, |
|
"step": 7 |
|
}, |
|
{ |
|
"epoch": 0.11588954277953825, |
|
"grad_norm": 379.1105651855469, |
|
"learning_rate": 9.619397662556435e-05, |
|
"loss": 109.2204, |
|
"step": 8 |
|
}, |
|
{ |
|
"epoch": 0.13037573562698054, |
|
"grad_norm": 428.03802490234375, |
|
"learning_rate": 9.484363707663442e-05, |
|
"loss": 90.4438, |
|
"step": 9 |
|
}, |
|
{ |
|
"epoch": 0.1448619284744228, |
|
"grad_norm": 516.3211669921875, |
|
"learning_rate": 9.330127018922194e-05, |
|
"loss": 93.6263, |
|
"step": 10 |
|
}, |
|
{ |
|
"epoch": 0.1593481213218651, |
|
"grad_norm": 312.0606689453125, |
|
"learning_rate": 9.157348061512727e-05, |
|
"loss": 73.8311, |
|
"step": 11 |
|
}, |
|
{ |
|
"epoch": 0.1738343141693074, |
|
"grad_norm": 232.24649047851562, |
|
"learning_rate": 8.966766701456177e-05, |
|
"loss": 62.2049, |
|
"step": 12 |
|
}, |
|
{ |
|
"epoch": 0.18832050701674966, |
|
"grad_norm": 209.9749298095703, |
|
"learning_rate": 8.759199037394887e-05, |
|
"loss": 53.5616, |
|
"step": 13 |
|
}, |
|
{ |
|
"epoch": 0.18832050701674966, |
|
"eval_loss": 1.432969570159912, |
|
"eval_runtime": 41.3466, |
|
"eval_samples_per_second": 11.246, |
|
"eval_steps_per_second": 2.83, |
|
"step": 13 |
|
}, |
|
{ |
|
"epoch": 0.20280669986419195, |
|
"grad_norm": 227.82803344726562, |
|
"learning_rate": 8.535533905932738e-05, |
|
"loss": 45.4463, |
|
"step": 14 |
|
}, |
|
{ |
|
"epoch": 0.2172928927116342, |
|
"grad_norm": 233.84213256835938, |
|
"learning_rate": 8.296729075500344e-05, |
|
"loss": 40.7913, |
|
"step": 15 |
|
}, |
|
{ |
|
"epoch": 0.2317790855590765, |
|
"grad_norm": 241.74923706054688, |
|
"learning_rate": 8.043807145043604e-05, |
|
"loss": 36.8332, |
|
"step": 16 |
|
}, |
|
{ |
|
"epoch": 0.2462652784065188, |
|
"grad_norm": 273.61724853515625, |
|
"learning_rate": 7.777851165098012e-05, |
|
"loss": 33.5054, |
|
"step": 17 |
|
}, |
|
{ |
|
"epoch": 0.2607514712539611, |
|
"grad_norm": 336.7424011230469, |
|
"learning_rate": 7.500000000000001e-05, |
|
"loss": 26.3073, |
|
"step": 18 |
|
}, |
|
{ |
|
"epoch": 0.2752376641014033, |
|
"grad_norm": 398.2518005371094, |
|
"learning_rate": 7.211443451095007e-05, |
|
"loss": 29.1703, |
|
"step": 19 |
|
}, |
|
{ |
|
"epoch": 0.2897238569488456, |
|
"grad_norm": 263.69000244140625, |
|
"learning_rate": 6.91341716182545e-05, |
|
"loss": 25.753, |
|
"step": 20 |
|
}, |
|
{ |
|
"epoch": 0.3042100497962879, |
|
"grad_norm": 140.5085906982422, |
|
"learning_rate": 6.607197326515808e-05, |
|
"loss": 24.2245, |
|
"step": 21 |
|
}, |
|
{ |
|
"epoch": 0.3186962426437302, |
|
"grad_norm": 126.79087829589844, |
|
"learning_rate": 6.294095225512603e-05, |
|
"loss": 20.9214, |
|
"step": 22 |
|
}, |
|
{ |
|
"epoch": 0.3331824354911725, |
|
"grad_norm": 126.19627380371094, |
|
"learning_rate": 5.9754516100806423e-05, |
|
"loss": 19.3473, |
|
"step": 23 |
|
}, |
|
{ |
|
"epoch": 0.3476686283386148, |
|
"grad_norm": 218.7227325439453, |
|
"learning_rate": 5.6526309611002594e-05, |
|
"loss": 23.4087, |
|
"step": 24 |
|
}, |
|
{ |
|
"epoch": 0.362154821186057, |
|
"grad_norm": 117.1246566772461, |
|
"learning_rate": 5.327015646150716e-05, |
|
"loss": 18.8893, |
|
"step": 25 |
|
}, |
|
{ |
|
"epoch": 0.3766410140334993, |
|
"grad_norm": 319.6368103027344, |
|
"learning_rate": 5e-05, |
|
"loss": 21.5772, |
|
"step": 26 |
|
}, |
|
{ |
|
"epoch": 0.3766410140334993, |
|
"eval_loss": 0.5946769714355469, |
|
"eval_runtime": 41.3369, |
|
"eval_samples_per_second": 11.249, |
|
"eval_steps_per_second": 2.83, |
|
"step": 26 |
|
}, |
|
{ |
|
"epoch": 0.3911272068809416, |
|
"grad_norm": 149.88087463378906, |
|
"learning_rate": 4.6729843538492847e-05, |
|
"loss": 21.5878, |
|
"step": 27 |
|
}, |
|
{ |
|
"epoch": 0.4056133997283839, |
|
"grad_norm": 119.75304412841797, |
|
"learning_rate": 4.347369038899744e-05, |
|
"loss": 21.2073, |
|
"step": 28 |
|
}, |
|
{ |
|
"epoch": 0.4200995925758262, |
|
"grad_norm": 320.1707763671875, |
|
"learning_rate": 4.0245483899193595e-05, |
|
"loss": 17.1099, |
|
"step": 29 |
|
}, |
|
{ |
|
"epoch": 0.4345857854232684, |
|
"grad_norm": 128.20896911621094, |
|
"learning_rate": 3.705904774487396e-05, |
|
"loss": 16.6537, |
|
"step": 30 |
|
}, |
|
{ |
|
"epoch": 0.4490719782707107, |
|
"grad_norm": 181.88687133789062, |
|
"learning_rate": 3.392802673484193e-05, |
|
"loss": 19.7434, |
|
"step": 31 |
|
}, |
|
{ |
|
"epoch": 0.463558171118153, |
|
"grad_norm": 182.82069396972656, |
|
"learning_rate": 3.086582838174551e-05, |
|
"loss": 20.6309, |
|
"step": 32 |
|
}, |
|
{ |
|
"epoch": 0.4780443639655953, |
|
"grad_norm": 173.1058807373047, |
|
"learning_rate": 2.7885565489049946e-05, |
|
"loss": 18.576, |
|
"step": 33 |
|
}, |
|
{ |
|
"epoch": 0.4925305568130376, |
|
"grad_norm": 266.4740295410156, |
|
"learning_rate": 2.500000000000001e-05, |
|
"loss": 17.5184, |
|
"step": 34 |
|
}, |
|
{ |
|
"epoch": 0.5070167496604798, |
|
"grad_norm": 405.75341796875, |
|
"learning_rate": 2.2221488349019903e-05, |
|
"loss": 15.6519, |
|
"step": 35 |
|
}, |
|
{ |
|
"epoch": 0.5215029425079222, |
|
"grad_norm": 356.82489013671875, |
|
"learning_rate": 1.9561928549563968e-05, |
|
"loss": 17.717, |
|
"step": 36 |
|
}, |
|
{ |
|
"epoch": 0.5359891353553644, |
|
"grad_norm": 91.6025619506836, |
|
"learning_rate": 1.703270924499656e-05, |
|
"loss": 17.8593, |
|
"step": 37 |
|
}, |
|
{ |
|
"epoch": 0.5504753282028066, |
|
"grad_norm": 138.64764404296875, |
|
"learning_rate": 1.4644660940672627e-05, |
|
"loss": 15.968, |
|
"step": 38 |
|
}, |
|
{ |
|
"epoch": 0.564961521050249, |
|
"grad_norm": 87.49166107177734, |
|
"learning_rate": 1.2408009626051137e-05, |
|
"loss": 15.2474, |
|
"step": 39 |
|
}, |
|
{ |
|
"epoch": 0.564961521050249, |
|
"eval_loss": 0.500713586807251, |
|
"eval_runtime": 41.3261, |
|
"eval_samples_per_second": 11.252, |
|
"eval_steps_per_second": 2.831, |
|
"step": 39 |
|
}, |
|
{ |
|
"epoch": 0.5794477138976912, |
|
"grad_norm": 100.62445068359375, |
|
"learning_rate": 1.0332332985438248e-05, |
|
"loss": 17.5808, |
|
"step": 40 |
|
}, |
|
{ |
|
"epoch": 0.5939339067451336, |
|
"grad_norm": 108.46626281738281, |
|
"learning_rate": 8.426519384872733e-06, |
|
"loss": 13.7374, |
|
"step": 41 |
|
}, |
|
{ |
|
"epoch": 0.6084200995925758, |
|
"grad_norm": 85.36103820800781, |
|
"learning_rate": 6.698729810778065e-06, |
|
"loss": 17.013, |
|
"step": 42 |
|
}, |
|
{ |
|
"epoch": 0.6229062924400182, |
|
"grad_norm": 83.11396789550781, |
|
"learning_rate": 5.156362923365588e-06, |
|
"loss": 13.9914, |
|
"step": 43 |
|
}, |
|
{ |
|
"epoch": 0.6373924852874604, |
|
"grad_norm": 93.9541015625, |
|
"learning_rate": 3.8060233744356633e-06, |
|
"loss": 16.1954, |
|
"step": 44 |
|
}, |
|
{ |
|
"epoch": 0.6518786781349026, |
|
"grad_norm": 102.81454467773438, |
|
"learning_rate": 2.653493525244721e-06, |
|
"loss": 14.4745, |
|
"step": 45 |
|
}, |
|
{ |
|
"epoch": 0.666364870982345, |
|
"grad_norm": 111.78718566894531, |
|
"learning_rate": 1.70370868554659e-06, |
|
"loss": 15.9655, |
|
"step": 46 |
|
}, |
|
{ |
|
"epoch": 0.6808510638297872, |
|
"grad_norm": 186.56785583496094, |
|
"learning_rate": 9.607359798384785e-07, |
|
"loss": 14.6932, |
|
"step": 47 |
|
}, |
|
{ |
|
"epoch": 0.6953372566772296, |
|
"grad_norm": 69.18374633789062, |
|
"learning_rate": 4.277569313094809e-07, |
|
"loss": 13.9869, |
|
"step": 48 |
|
}, |
|
{ |
|
"epoch": 0.7098234495246718, |
|
"grad_norm": 95.9025650024414, |
|
"learning_rate": 1.0705383806982606e-07, |
|
"loss": 16.5867, |
|
"step": 49 |
|
}, |
|
{ |
|
"epoch": 0.724309642372114, |
|
"grad_norm": 103.1650390625, |
|
"learning_rate": 0.0, |
|
"loss": 13.8776, |
|
"step": 50 |
|
} |
|
], |
|
"logging_steps": 1, |
|
"max_steps": 50, |
|
"num_input_tokens_seen": 0, |
|
"num_train_epochs": 1, |
|
"save_steps": 13, |
|
"stateful_callbacks": { |
|
"TrainerControl": { |
|
"args": { |
|
"should_epoch_stop": false, |
|
"should_evaluate": false, |
|
"should_log": false, |
|
"should_save": true, |
|
"should_training_stop": true |
|
}, |
|
"attributes": {} |
|
} |
|
}, |
|
"total_flos": 1.1250278137331712e+18, |
|
"train_batch_size": 2, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|