|
{ |
|
"best_metric": null, |
|
"best_model_checkpoint": null, |
|
"epoch": 0.14311270125223613, |
|
"eval_steps": 500, |
|
"global_step": 400, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 0.0035778175313059034, |
|
"grad_norm": 0.5825825929641724, |
|
"learning_rate": 0.00019928443649373882, |
|
"loss": 1.248, |
|
"num_input_tokens_seen": 6646, |
|
"step": 10 |
|
}, |
|
{ |
|
"epoch": 0.007155635062611807, |
|
"grad_norm": 0.5380188822746277, |
|
"learning_rate": 0.00019856887298747765, |
|
"loss": 0.5478, |
|
"num_input_tokens_seen": 13063, |
|
"step": 20 |
|
}, |
|
{ |
|
"epoch": 0.01073345259391771, |
|
"grad_norm": 0.3872911036014557, |
|
"learning_rate": 0.00019785330948121648, |
|
"loss": 0.5135, |
|
"num_input_tokens_seen": 19512, |
|
"step": 30 |
|
}, |
|
{ |
|
"epoch": 0.014311270125223614, |
|
"grad_norm": 0.4991438686847687, |
|
"learning_rate": 0.0001971377459749553, |
|
"loss": 0.5092, |
|
"num_input_tokens_seen": 26884, |
|
"step": 40 |
|
}, |
|
{ |
|
"epoch": 0.017889087656529516, |
|
"grad_norm": 0.6744784116744995, |
|
"learning_rate": 0.0001964221824686941, |
|
"loss": 0.4799, |
|
"num_input_tokens_seen": 34831, |
|
"step": 50 |
|
}, |
|
{ |
|
"epoch": 0.02146690518783542, |
|
"grad_norm": 0.5413841009140015, |
|
"learning_rate": 0.00019570661896243293, |
|
"loss": 0.4738, |
|
"num_input_tokens_seen": 40074, |
|
"step": 60 |
|
}, |
|
{ |
|
"epoch": 0.025044722719141325, |
|
"grad_norm": 0.33517029881477356, |
|
"learning_rate": 0.00019499105545617174, |
|
"loss": 0.4907, |
|
"num_input_tokens_seen": 47194, |
|
"step": 70 |
|
}, |
|
{ |
|
"epoch": 0.028622540250447227, |
|
"grad_norm": 0.34275758266448975, |
|
"learning_rate": 0.00019427549194991057, |
|
"loss": 0.4642, |
|
"num_input_tokens_seen": 53439, |
|
"step": 80 |
|
}, |
|
{ |
|
"epoch": 0.03220035778175313, |
|
"grad_norm": 0.4074145257472992, |
|
"learning_rate": 0.00019355992844364938, |
|
"loss": 0.4431, |
|
"num_input_tokens_seen": 59366, |
|
"step": 90 |
|
}, |
|
{ |
|
"epoch": 0.03577817531305903, |
|
"grad_norm": 0.372760534286499, |
|
"learning_rate": 0.0001928443649373882, |
|
"loss": 0.4824, |
|
"num_input_tokens_seen": 66414, |
|
"step": 100 |
|
}, |
|
{ |
|
"epoch": 0.03935599284436494, |
|
"grad_norm": 0.35169002413749695, |
|
"learning_rate": 0.00019212880143112702, |
|
"loss": 0.4863, |
|
"num_input_tokens_seen": 73451, |
|
"step": 110 |
|
}, |
|
{ |
|
"epoch": 0.04293381037567084, |
|
"grad_norm": 0.4088020324707031, |
|
"learning_rate": 0.00019141323792486585, |
|
"loss": 0.4792, |
|
"num_input_tokens_seen": 81934, |
|
"step": 120 |
|
}, |
|
{ |
|
"epoch": 0.046511627906976744, |
|
"grad_norm": 0.40062326192855835, |
|
"learning_rate": 0.00019069767441860466, |
|
"loss": 0.4607, |
|
"num_input_tokens_seen": 88335, |
|
"step": 130 |
|
}, |
|
{ |
|
"epoch": 0.05008944543828265, |
|
"grad_norm": 0.5044320225715637, |
|
"learning_rate": 0.0001899821109123435, |
|
"loss": 0.456, |
|
"num_input_tokens_seen": 96192, |
|
"step": 140 |
|
}, |
|
{ |
|
"epoch": 0.05366726296958855, |
|
"grad_norm": 0.4566495418548584, |
|
"learning_rate": 0.0001892665474060823, |
|
"loss": 0.429, |
|
"num_input_tokens_seen": 101609, |
|
"step": 150 |
|
}, |
|
{ |
|
"epoch": 0.057245080500894455, |
|
"grad_norm": 0.4657338559627533, |
|
"learning_rate": 0.0001885509838998211, |
|
"loss": 0.4445, |
|
"num_input_tokens_seen": 107467, |
|
"step": 160 |
|
}, |
|
{ |
|
"epoch": 0.06082289803220036, |
|
"grad_norm": 0.5721924304962158, |
|
"learning_rate": 0.00018783542039355994, |
|
"loss": 0.4304, |
|
"num_input_tokens_seen": 113612, |
|
"step": 170 |
|
}, |
|
{ |
|
"epoch": 0.06440071556350627, |
|
"grad_norm": 0.2883516848087311, |
|
"learning_rate": 0.00018711985688729877, |
|
"loss": 0.4525, |
|
"num_input_tokens_seen": 121416, |
|
"step": 180 |
|
}, |
|
{ |
|
"epoch": 0.06797853309481217, |
|
"grad_norm": 0.5061659216880798, |
|
"learning_rate": 0.00018640429338103758, |
|
"loss": 0.4439, |
|
"num_input_tokens_seen": 128284, |
|
"step": 190 |
|
}, |
|
{ |
|
"epoch": 0.07155635062611806, |
|
"grad_norm": 0.3323754072189331, |
|
"learning_rate": 0.00018568872987477638, |
|
"loss": 0.4489, |
|
"num_input_tokens_seen": 135571, |
|
"step": 200 |
|
}, |
|
{ |
|
"epoch": 0.07513416815742398, |
|
"grad_norm": 0.5354058742523193, |
|
"learning_rate": 0.00018497316636851522, |
|
"loss": 0.4634, |
|
"num_input_tokens_seen": 141479, |
|
"step": 210 |
|
}, |
|
{ |
|
"epoch": 0.07871198568872988, |
|
"grad_norm": 0.4328760504722595, |
|
"learning_rate": 0.00018425760286225405, |
|
"loss": 0.4545, |
|
"num_input_tokens_seen": 147677, |
|
"step": 220 |
|
}, |
|
{ |
|
"epoch": 0.08228980322003578, |
|
"grad_norm": 0.28675127029418945, |
|
"learning_rate": 0.00018354203935599286, |
|
"loss": 0.4814, |
|
"num_input_tokens_seen": 154847, |
|
"step": 230 |
|
}, |
|
{ |
|
"epoch": 0.08586762075134168, |
|
"grad_norm": 0.31572216749191284, |
|
"learning_rate": 0.00018282647584973166, |
|
"loss": 0.446, |
|
"num_input_tokens_seen": 162267, |
|
"step": 240 |
|
}, |
|
{ |
|
"epoch": 0.08944543828264759, |
|
"grad_norm": 0.360166996717453, |
|
"learning_rate": 0.0001821109123434705, |
|
"loss": 0.4549, |
|
"num_input_tokens_seen": 168817, |
|
"step": 250 |
|
}, |
|
{ |
|
"epoch": 0.09302325581395349, |
|
"grad_norm": 0.342385470867157, |
|
"learning_rate": 0.0001813953488372093, |
|
"loss": 0.4297, |
|
"num_input_tokens_seen": 174828, |
|
"step": 260 |
|
}, |
|
{ |
|
"epoch": 0.09660107334525939, |
|
"grad_norm": 0.37481924891471863, |
|
"learning_rate": 0.00018067978533094814, |
|
"loss": 0.4314, |
|
"num_input_tokens_seen": 181578, |
|
"step": 270 |
|
}, |
|
{ |
|
"epoch": 0.1001788908765653, |
|
"grad_norm": 0.28545519709587097, |
|
"learning_rate": 0.00017996422182468694, |
|
"loss": 0.4332, |
|
"num_input_tokens_seen": 187842, |
|
"step": 280 |
|
}, |
|
{ |
|
"epoch": 0.1037567084078712, |
|
"grad_norm": 0.38877248764038086, |
|
"learning_rate": 0.00017924865831842578, |
|
"loss": 0.4248, |
|
"num_input_tokens_seen": 194651, |
|
"step": 290 |
|
}, |
|
{ |
|
"epoch": 0.1073345259391771, |
|
"grad_norm": 0.30087631940841675, |
|
"learning_rate": 0.00017853309481216458, |
|
"loss": 0.4405, |
|
"num_input_tokens_seen": 202203, |
|
"step": 300 |
|
}, |
|
{ |
|
"epoch": 0.11091234347048301, |
|
"grad_norm": 0.33470776677131653, |
|
"learning_rate": 0.00017781753130590342, |
|
"loss": 0.4485, |
|
"num_input_tokens_seen": 210364, |
|
"step": 310 |
|
}, |
|
{ |
|
"epoch": 0.11449016100178891, |
|
"grad_norm": 0.44205668568611145, |
|
"learning_rate": 0.00017710196779964222, |
|
"loss": 0.4431, |
|
"num_input_tokens_seen": 217965, |
|
"step": 320 |
|
}, |
|
{ |
|
"epoch": 0.11806797853309481, |
|
"grad_norm": 0.39270082116127014, |
|
"learning_rate": 0.00017638640429338106, |
|
"loss": 0.4475, |
|
"num_input_tokens_seen": 224249, |
|
"step": 330 |
|
}, |
|
{ |
|
"epoch": 0.12164579606440072, |
|
"grad_norm": 0.37138304114341736, |
|
"learning_rate": 0.00017567084078711986, |
|
"loss": 0.4139, |
|
"num_input_tokens_seen": 230513, |
|
"step": 340 |
|
}, |
|
{ |
|
"epoch": 0.1252236135957066, |
|
"grad_norm": 0.32019296288490295, |
|
"learning_rate": 0.00017495527728085867, |
|
"loss": 0.4488, |
|
"num_input_tokens_seen": 237103, |
|
"step": 350 |
|
}, |
|
{ |
|
"epoch": 0.12880143112701253, |
|
"grad_norm": 0.42748796939849854, |
|
"learning_rate": 0.0001742397137745975, |
|
"loss": 0.4059, |
|
"num_input_tokens_seen": 242530, |
|
"step": 360 |
|
}, |
|
{ |
|
"epoch": 0.13237924865831843, |
|
"grad_norm": 0.39997342228889465, |
|
"learning_rate": 0.00017352415026833634, |
|
"loss": 0.4069, |
|
"num_input_tokens_seen": 247524, |
|
"step": 370 |
|
}, |
|
{ |
|
"epoch": 0.13595706618962433, |
|
"grad_norm": 0.45402902364730835, |
|
"learning_rate": 0.00017280858676207514, |
|
"loss": 0.4354, |
|
"num_input_tokens_seen": 252526, |
|
"step": 380 |
|
}, |
|
{ |
|
"epoch": 0.13953488372093023, |
|
"grad_norm": 0.500701904296875, |
|
"learning_rate": 0.00017209302325581395, |
|
"loss": 0.4421, |
|
"num_input_tokens_seen": 258974, |
|
"step": 390 |
|
}, |
|
{ |
|
"epoch": 0.14311270125223613, |
|
"grad_norm": 0.2604714035987854, |
|
"learning_rate": 0.00017137745974955278, |
|
"loss": 0.4338, |
|
"num_input_tokens_seen": 265411, |
|
"step": 400 |
|
} |
|
], |
|
"logging_steps": 10, |
|
"max_steps": 2795, |
|
"num_input_tokens_seen": 265411, |
|
"num_train_epochs": 1, |
|
"save_steps": 20, |
|
"total_flos": 5968156119717888.0, |
|
"train_batch_size": 1, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|