|
{ |
|
"best_metric": 12.451294898986816, |
|
"best_model_checkpoint": "miner_id_24/checkpoint-75", |
|
"epoch": 0.38860103626943004, |
|
"eval_steps": 25, |
|
"global_step": 75, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 0.0051813471502590676, |
|
"grad_norm": 0.009733538143336773, |
|
"learning_rate": 4e-05, |
|
"loss": 12.4632, |
|
"step": 1 |
|
}, |
|
{ |
|
"epoch": 0.0051813471502590676, |
|
"eval_loss": 12.459209442138672, |
|
"eval_runtime": 2.7206, |
|
"eval_samples_per_second": 30.141, |
|
"eval_steps_per_second": 15.07, |
|
"step": 1 |
|
}, |
|
{ |
|
"epoch": 0.010362694300518135, |
|
"grad_norm": 0.00900011882185936, |
|
"learning_rate": 8e-05, |
|
"loss": 12.4665, |
|
"step": 2 |
|
}, |
|
{ |
|
"epoch": 0.015544041450777202, |
|
"grad_norm": 0.010131618939340115, |
|
"learning_rate": 0.00012, |
|
"loss": 12.4591, |
|
"step": 3 |
|
}, |
|
{ |
|
"epoch": 0.02072538860103627, |
|
"grad_norm": 0.014053963124752045, |
|
"learning_rate": 0.00016, |
|
"loss": 12.4641, |
|
"step": 4 |
|
}, |
|
{ |
|
"epoch": 0.025906735751295335, |
|
"grad_norm": 0.013694445602595806, |
|
"learning_rate": 0.0002, |
|
"loss": 12.4626, |
|
"step": 5 |
|
}, |
|
{ |
|
"epoch": 0.031088082901554404, |
|
"grad_norm": 0.012138248421251774, |
|
"learning_rate": 0.00019994532573409262, |
|
"loss": 12.4688, |
|
"step": 6 |
|
}, |
|
{ |
|
"epoch": 0.03626943005181347, |
|
"grad_norm": 0.011199451982975006, |
|
"learning_rate": 0.00019978136272187747, |
|
"loss": 12.452, |
|
"step": 7 |
|
}, |
|
{ |
|
"epoch": 0.04145077720207254, |
|
"grad_norm": 0.01216774433851242, |
|
"learning_rate": 0.00019950829025450114, |
|
"loss": 12.4551, |
|
"step": 8 |
|
}, |
|
{ |
|
"epoch": 0.046632124352331605, |
|
"grad_norm": 0.010164499282836914, |
|
"learning_rate": 0.00019912640693269752, |
|
"loss": 12.4611, |
|
"step": 9 |
|
}, |
|
{ |
|
"epoch": 0.05181347150259067, |
|
"grad_norm": 0.011890110559761524, |
|
"learning_rate": 0.00019863613034027224, |
|
"loss": 12.4661, |
|
"step": 10 |
|
}, |
|
{ |
|
"epoch": 0.05699481865284974, |
|
"grad_norm": 0.011710001155734062, |
|
"learning_rate": 0.00019803799658748094, |
|
"loss": 12.4662, |
|
"step": 11 |
|
}, |
|
{ |
|
"epoch": 0.06217616580310881, |
|
"grad_norm": 0.011335656978189945, |
|
"learning_rate": 0.0001973326597248006, |
|
"loss": 12.4509, |
|
"step": 12 |
|
}, |
|
{ |
|
"epoch": 0.06735751295336788, |
|
"grad_norm": 0.011207724921405315, |
|
"learning_rate": 0.00019652089102773488, |
|
"loss": 12.461, |
|
"step": 13 |
|
}, |
|
{ |
|
"epoch": 0.07253886010362694, |
|
"grad_norm": 0.012399866245687008, |
|
"learning_rate": 0.00019560357815343577, |
|
"loss": 12.4604, |
|
"step": 14 |
|
}, |
|
{ |
|
"epoch": 0.07772020725388601, |
|
"grad_norm": 0.012533013708889484, |
|
"learning_rate": 0.00019458172417006347, |
|
"loss": 12.4634, |
|
"step": 15 |
|
}, |
|
{ |
|
"epoch": 0.08290155440414508, |
|
"grad_norm": 0.012151951901614666, |
|
"learning_rate": 0.0001934564464599461, |
|
"loss": 12.4559, |
|
"step": 16 |
|
}, |
|
{ |
|
"epoch": 0.08808290155440414, |
|
"grad_norm": 0.013444810174405575, |
|
"learning_rate": 0.00019222897549773848, |
|
"loss": 12.4702, |
|
"step": 17 |
|
}, |
|
{ |
|
"epoch": 0.09326424870466321, |
|
"grad_norm": 0.015751156955957413, |
|
"learning_rate": 0.00019090065350491626, |
|
"loss": 12.4622, |
|
"step": 18 |
|
}, |
|
{ |
|
"epoch": 0.09844559585492228, |
|
"grad_norm": 0.016734309494495392, |
|
"learning_rate": 0.00018947293298207635, |
|
"loss": 12.4618, |
|
"step": 19 |
|
}, |
|
{ |
|
"epoch": 0.10362694300518134, |
|
"grad_norm": 0.01627177745103836, |
|
"learning_rate": 0.0001879473751206489, |
|
"loss": 12.4723, |
|
"step": 20 |
|
}, |
|
{ |
|
"epoch": 0.10880829015544041, |
|
"grad_norm": 0.01785571686923504, |
|
"learning_rate": 0.00018632564809575742, |
|
"loss": 12.4617, |
|
"step": 21 |
|
}, |
|
{ |
|
"epoch": 0.11398963730569948, |
|
"grad_norm": 0.01880309171974659, |
|
"learning_rate": 0.00018460952524209355, |
|
"loss": 12.4674, |
|
"step": 22 |
|
}, |
|
{ |
|
"epoch": 0.11917098445595854, |
|
"grad_norm": 0.01608671247959137, |
|
"learning_rate": 0.00018280088311480201, |
|
"loss": 12.461, |
|
"step": 23 |
|
}, |
|
{ |
|
"epoch": 0.12435233160621761, |
|
"grad_norm": 0.020015588030219078, |
|
"learning_rate": 0.00018090169943749476, |
|
"loss": 12.4573, |
|
"step": 24 |
|
}, |
|
{ |
|
"epoch": 0.12953367875647667, |
|
"grad_norm": 0.018211856484413147, |
|
"learning_rate": 0.00017891405093963938, |
|
"loss": 12.4653, |
|
"step": 25 |
|
}, |
|
{ |
|
"epoch": 0.12953367875647667, |
|
"eval_loss": 12.45799446105957, |
|
"eval_runtime": 2.7173, |
|
"eval_samples_per_second": 30.177, |
|
"eval_steps_per_second": 15.089, |
|
"step": 25 |
|
}, |
|
{ |
|
"epoch": 0.13471502590673576, |
|
"grad_norm": 0.021804824471473694, |
|
"learning_rate": 0.00017684011108568592, |
|
"loss": 12.4638, |
|
"step": 26 |
|
}, |
|
{ |
|
"epoch": 0.13989637305699482, |
|
"grad_norm": 0.021040290594100952, |
|
"learning_rate": 0.0001746821476984154, |
|
"loss": 12.4644, |
|
"step": 27 |
|
}, |
|
{ |
|
"epoch": 0.14507772020725387, |
|
"grad_norm": 0.025091087445616722, |
|
"learning_rate": 0.00017244252047910892, |
|
"loss": 12.4708, |
|
"step": 28 |
|
}, |
|
{ |
|
"epoch": 0.15025906735751296, |
|
"grad_norm": 0.024071145802736282, |
|
"learning_rate": 0.00017012367842724887, |
|
"loss": 12.4589, |
|
"step": 29 |
|
}, |
|
{ |
|
"epoch": 0.15544041450777202, |
|
"grad_norm": 0.028311707079410553, |
|
"learning_rate": 0.00016772815716257412, |
|
"loss": 12.4677, |
|
"step": 30 |
|
}, |
|
{ |
|
"epoch": 0.16062176165803108, |
|
"grad_norm": 0.026514964178204536, |
|
"learning_rate": 0.00016525857615241687, |
|
"loss": 12.4489, |
|
"step": 31 |
|
}, |
|
{ |
|
"epoch": 0.16580310880829016, |
|
"grad_norm": 0.03242511302232742, |
|
"learning_rate": 0.0001627176358473537, |
|
"loss": 12.4624, |
|
"step": 32 |
|
}, |
|
{ |
|
"epoch": 0.17098445595854922, |
|
"grad_norm": 0.03276370093226433, |
|
"learning_rate": 0.00016010811472830252, |
|
"loss": 12.461, |
|
"step": 33 |
|
}, |
|
{ |
|
"epoch": 0.17616580310880828, |
|
"grad_norm": 0.031001713126897812, |
|
"learning_rate": 0.00015743286626829437, |
|
"loss": 12.4701, |
|
"step": 34 |
|
}, |
|
{ |
|
"epoch": 0.18134715025906736, |
|
"grad_norm": 0.03384930640459061, |
|
"learning_rate": 0.00015469481581224272, |
|
"loss": 12.461, |
|
"step": 35 |
|
}, |
|
{ |
|
"epoch": 0.18652849740932642, |
|
"grad_norm": 0.039669886231422424, |
|
"learning_rate": 0.00015189695737812152, |
|
"loss": 12.4614, |
|
"step": 36 |
|
}, |
|
{ |
|
"epoch": 0.19170984455958548, |
|
"grad_norm": 0.03610478714108467, |
|
"learning_rate": 0.00014904235038305083, |
|
"loss": 12.4614, |
|
"step": 37 |
|
}, |
|
{ |
|
"epoch": 0.19689119170984457, |
|
"grad_norm": 0.04812079295516014, |
|
"learning_rate": 0.0001461341162978688, |
|
"loss": 12.453, |
|
"step": 38 |
|
}, |
|
{ |
|
"epoch": 0.20207253886010362, |
|
"grad_norm": 0.04061286896467209, |
|
"learning_rate": 0.00014317543523384928, |
|
"loss": 12.4646, |
|
"step": 39 |
|
}, |
|
{ |
|
"epoch": 0.20725388601036268, |
|
"grad_norm": 0.048531003296375275, |
|
"learning_rate": 0.00014016954246529696, |
|
"loss": 12.4561, |
|
"step": 40 |
|
}, |
|
{ |
|
"epoch": 0.21243523316062177, |
|
"grad_norm": 0.05337219685316086, |
|
"learning_rate": 0.00013711972489182208, |
|
"loss": 12.4593, |
|
"step": 41 |
|
}, |
|
{ |
|
"epoch": 0.21761658031088082, |
|
"grad_norm": 0.04679669812321663, |
|
"learning_rate": 0.00013402931744416433, |
|
"loss": 12.4629, |
|
"step": 42 |
|
}, |
|
{ |
|
"epoch": 0.22279792746113988, |
|
"grad_norm": 0.05289416015148163, |
|
"learning_rate": 0.00013090169943749476, |
|
"loss": 12.4662, |
|
"step": 43 |
|
}, |
|
{ |
|
"epoch": 0.22797927461139897, |
|
"grad_norm": 0.06214183568954468, |
|
"learning_rate": 0.00012774029087618446, |
|
"loss": 12.4517, |
|
"step": 44 |
|
}, |
|
{ |
|
"epoch": 0.23316062176165803, |
|
"grad_norm": 0.06562865525484085, |
|
"learning_rate": 0.00012454854871407994, |
|
"loss": 12.4603, |
|
"step": 45 |
|
}, |
|
{ |
|
"epoch": 0.23834196891191708, |
|
"grad_norm": 0.06988850980997086, |
|
"learning_rate": 0.0001213299630743747, |
|
"loss": 12.4598, |
|
"step": 46 |
|
}, |
|
{ |
|
"epoch": 0.24352331606217617, |
|
"grad_norm": 0.06310445815324783, |
|
"learning_rate": 0.000118088053433211, |
|
"loss": 12.4625, |
|
"step": 47 |
|
}, |
|
{ |
|
"epoch": 0.24870466321243523, |
|
"grad_norm": 0.07609426230192184, |
|
"learning_rate": 0.0001148263647711842, |
|
"loss": 12.4652, |
|
"step": 48 |
|
}, |
|
{ |
|
"epoch": 0.2538860103626943, |
|
"grad_norm": 0.05050159618258476, |
|
"learning_rate": 0.00011154846369695863, |
|
"loss": 12.4593, |
|
"step": 49 |
|
}, |
|
{ |
|
"epoch": 0.25906735751295334, |
|
"grad_norm": 0.05446188151836395, |
|
"learning_rate": 0.00010825793454723325, |
|
"loss": 12.4652, |
|
"step": 50 |
|
}, |
|
{ |
|
"epoch": 0.25906735751295334, |
|
"eval_loss": 12.454829216003418, |
|
"eval_runtime": 2.7166, |
|
"eval_samples_per_second": 30.184, |
|
"eval_steps_per_second": 15.092, |
|
"step": 50 |
|
}, |
|
{ |
|
"epoch": 0.26424870466321243, |
|
"grad_norm": 0.05474114790558815, |
|
"learning_rate": 0.00010495837546732224, |
|
"loss": 12.4555, |
|
"step": 51 |
|
}, |
|
{ |
|
"epoch": 0.2694300518134715, |
|
"grad_norm": 0.06194544583559036, |
|
"learning_rate": 0.00010165339447663587, |
|
"loss": 12.4549, |
|
"step": 52 |
|
}, |
|
{ |
|
"epoch": 0.27461139896373055, |
|
"grad_norm": 0.07049047946929932, |
|
"learning_rate": 9.834660552336415e-05, |
|
"loss": 12.4587, |
|
"step": 53 |
|
}, |
|
{ |
|
"epoch": 0.27979274611398963, |
|
"grad_norm": 0.06702786684036255, |
|
"learning_rate": 9.504162453267777e-05, |
|
"loss": 12.4568, |
|
"step": 54 |
|
}, |
|
{ |
|
"epoch": 0.2849740932642487, |
|
"grad_norm": 0.06343399733304977, |
|
"learning_rate": 9.174206545276677e-05, |
|
"loss": 12.4554, |
|
"step": 55 |
|
}, |
|
{ |
|
"epoch": 0.29015544041450775, |
|
"grad_norm": 0.06678463518619537, |
|
"learning_rate": 8.845153630304139e-05, |
|
"loss": 12.4561, |
|
"step": 56 |
|
}, |
|
{ |
|
"epoch": 0.29533678756476683, |
|
"grad_norm": 0.07747721672058105, |
|
"learning_rate": 8.517363522881579e-05, |
|
"loss": 12.4523, |
|
"step": 57 |
|
}, |
|
{ |
|
"epoch": 0.3005181347150259, |
|
"grad_norm": 0.07417742162942886, |
|
"learning_rate": 8.191194656678904e-05, |
|
"loss": 12.4628, |
|
"step": 58 |
|
}, |
|
{ |
|
"epoch": 0.30569948186528495, |
|
"grad_norm": 0.07810303568840027, |
|
"learning_rate": 7.867003692562534e-05, |
|
"loss": 12.4572, |
|
"step": 59 |
|
}, |
|
{ |
|
"epoch": 0.31088082901554404, |
|
"grad_norm": 0.08061844110488892, |
|
"learning_rate": 7.54514512859201e-05, |
|
"loss": 12.4548, |
|
"step": 60 |
|
}, |
|
{ |
|
"epoch": 0.3160621761658031, |
|
"grad_norm": 0.0870494544506073, |
|
"learning_rate": 7.225970912381556e-05, |
|
"loss": 12.4503, |
|
"step": 61 |
|
}, |
|
{ |
|
"epoch": 0.32124352331606215, |
|
"grad_norm": 0.08460117876529694, |
|
"learning_rate": 6.909830056250527e-05, |
|
"loss": 12.4587, |
|
"step": 62 |
|
}, |
|
{ |
|
"epoch": 0.32642487046632124, |
|
"grad_norm": 0.08417250216007233, |
|
"learning_rate": 6.59706825558357e-05, |
|
"loss": 12.4613, |
|
"step": 63 |
|
}, |
|
{ |
|
"epoch": 0.3316062176165803, |
|
"grad_norm": 0.09139107912778854, |
|
"learning_rate": 6.28802751081779e-05, |
|
"loss": 12.453, |
|
"step": 64 |
|
}, |
|
{ |
|
"epoch": 0.33678756476683935, |
|
"grad_norm": 0.0929340347647667, |
|
"learning_rate": 5.983045753470308e-05, |
|
"loss": 12.4633, |
|
"step": 65 |
|
}, |
|
{ |
|
"epoch": 0.34196891191709844, |
|
"grad_norm": 0.09280311316251755, |
|
"learning_rate": 5.6824564766150726e-05, |
|
"loss": 12.4553, |
|
"step": 66 |
|
}, |
|
{ |
|
"epoch": 0.3471502590673575, |
|
"grad_norm": 0.08577651530504227, |
|
"learning_rate": 5.386588370213124e-05, |
|
"loss": 12.4622, |
|
"step": 67 |
|
}, |
|
{ |
|
"epoch": 0.35233160621761656, |
|
"grad_norm": 0.10077881813049316, |
|
"learning_rate": 5.095764961694922e-05, |
|
"loss": 12.4606, |
|
"step": 68 |
|
}, |
|
{ |
|
"epoch": 0.35751295336787564, |
|
"grad_norm": 0.09882701188325882, |
|
"learning_rate": 4.810304262187852e-05, |
|
"loss": 12.4532, |
|
"step": 69 |
|
}, |
|
{ |
|
"epoch": 0.3626943005181347, |
|
"grad_norm": 0.10326765477657318, |
|
"learning_rate": 4.530518418775733e-05, |
|
"loss": 12.4606, |
|
"step": 70 |
|
}, |
|
{ |
|
"epoch": 0.36787564766839376, |
|
"grad_norm": 0.101172536611557, |
|
"learning_rate": 4.256713373170564e-05, |
|
"loss": 12.4627, |
|
"step": 71 |
|
}, |
|
{ |
|
"epoch": 0.37305699481865284, |
|
"grad_norm": 0.09795894473791122, |
|
"learning_rate": 3.9891885271697496e-05, |
|
"loss": 12.4615, |
|
"step": 72 |
|
}, |
|
{ |
|
"epoch": 0.37823834196891193, |
|
"grad_norm": 0.10780131071805954, |
|
"learning_rate": 3.7282364152646297e-05, |
|
"loss": 12.4575, |
|
"step": 73 |
|
}, |
|
{ |
|
"epoch": 0.38341968911917096, |
|
"grad_norm": 0.11272040009498596, |
|
"learning_rate": 3.4741423847583134e-05, |
|
"loss": 12.4538, |
|
"step": 74 |
|
}, |
|
{ |
|
"epoch": 0.38860103626943004, |
|
"grad_norm": 0.10471823811531067, |
|
"learning_rate": 3.227184283742591e-05, |
|
"loss": 12.4525, |
|
"step": 75 |
|
}, |
|
{ |
|
"epoch": 0.38860103626943004, |
|
"eval_loss": 12.451294898986816, |
|
"eval_runtime": 2.7116, |
|
"eval_samples_per_second": 30.241, |
|
"eval_steps_per_second": 15.12, |
|
"step": 75 |
|
} |
|
], |
|
"logging_steps": 1, |
|
"max_steps": 100, |
|
"num_input_tokens_seen": 0, |
|
"num_train_epochs": 1, |
|
"save_steps": 25, |
|
"stateful_callbacks": { |
|
"EarlyStoppingCallback": { |
|
"args": { |
|
"early_stopping_patience": 1, |
|
"early_stopping_threshold": 0.0 |
|
}, |
|
"attributes": { |
|
"early_stopping_patience_counter": 0 |
|
} |
|
}, |
|
"TrainerControl": { |
|
"args": { |
|
"should_epoch_stop": false, |
|
"should_evaluate": false, |
|
"should_log": false, |
|
"should_save": true, |
|
"should_training_stop": false |
|
}, |
|
"attributes": {} |
|
} |
|
}, |
|
"total_flos": 92484403200.0, |
|
"train_batch_size": 2, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|