|
{ |
|
"best_metric": 8.905207633972168, |
|
"best_model_checkpoint": "miner_id_24/checkpoint-50", |
|
"epoch": 0.03555634569657103, |
|
"eval_steps": 25, |
|
"global_step": 50, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 0.0007111269139314207, |
|
"grad_norm": 4.391914367675781, |
|
"learning_rate": 5e-05, |
|
"loss": 10.2843, |
|
"step": 1 |
|
}, |
|
{ |
|
"epoch": 0.0007111269139314207, |
|
"eval_loss": 10.709673881530762, |
|
"eval_runtime": 0.4892, |
|
"eval_samples_per_second": 102.216, |
|
"eval_steps_per_second": 26.576, |
|
"step": 1 |
|
}, |
|
{ |
|
"epoch": 0.0014222538278628415, |
|
"grad_norm": 4.697262763977051, |
|
"learning_rate": 0.0001, |
|
"loss": 10.4354, |
|
"step": 2 |
|
}, |
|
{ |
|
"epoch": 0.002133380741794262, |
|
"grad_norm": 4.8142218589782715, |
|
"learning_rate": 9.990365154573717e-05, |
|
"loss": 10.5401, |
|
"step": 3 |
|
}, |
|
{ |
|
"epoch": 0.002844507655725683, |
|
"grad_norm": 4.4203386306762695, |
|
"learning_rate": 9.961501876182148e-05, |
|
"loss": 10.6847, |
|
"step": 4 |
|
}, |
|
{ |
|
"epoch": 0.0035556345696571034, |
|
"grad_norm": 4.616575717926025, |
|
"learning_rate": 9.913533761814537e-05, |
|
"loss": 10.5382, |
|
"step": 5 |
|
}, |
|
{ |
|
"epoch": 0.004266761483588524, |
|
"grad_norm": 4.453695297241211, |
|
"learning_rate": 9.846666218300807e-05, |
|
"loss": 10.493, |
|
"step": 6 |
|
}, |
|
{ |
|
"epoch": 0.004977888397519945, |
|
"grad_norm": 4.64595365524292, |
|
"learning_rate": 9.761185582727977e-05, |
|
"loss": 10.4906, |
|
"step": 7 |
|
}, |
|
{ |
|
"epoch": 0.005689015311451366, |
|
"grad_norm": 4.16403341293335, |
|
"learning_rate": 9.657457896300791e-05, |
|
"loss": 10.208, |
|
"step": 8 |
|
}, |
|
{ |
|
"epoch": 0.006400142225382786, |
|
"grad_norm": 4.626234531402588, |
|
"learning_rate": 9.535927336897098e-05, |
|
"loss": 10.3911, |
|
"step": 9 |
|
}, |
|
{ |
|
"epoch": 0.007111269139314207, |
|
"grad_norm": 4.048719882965088, |
|
"learning_rate": 9.397114317029975e-05, |
|
"loss": 10.3145, |
|
"step": 10 |
|
}, |
|
{ |
|
"epoch": 0.007822396053245627, |
|
"grad_norm": 4.476191997528076, |
|
"learning_rate": 9.241613255361455e-05, |
|
"loss": 10.4335, |
|
"step": 11 |
|
}, |
|
{ |
|
"epoch": 0.008533522967177049, |
|
"grad_norm": 4.421826362609863, |
|
"learning_rate": 9.070090031310558e-05, |
|
"loss": 10.2073, |
|
"step": 12 |
|
}, |
|
{ |
|
"epoch": 0.00924464988110847, |
|
"grad_norm": 3.3022351264953613, |
|
"learning_rate": 8.883279133655399e-05, |
|
"loss": 9.3604, |
|
"step": 13 |
|
}, |
|
{ |
|
"epoch": 0.00995577679503989, |
|
"grad_norm": 3.388110399246216, |
|
"learning_rate": 8.681980515339464e-05, |
|
"loss": 9.4965, |
|
"step": 14 |
|
}, |
|
{ |
|
"epoch": 0.01066690370897131, |
|
"grad_norm": 3.273960590362549, |
|
"learning_rate": 8.467056167950311e-05, |
|
"loss": 9.4797, |
|
"step": 15 |
|
}, |
|
{ |
|
"epoch": 0.011378030622902732, |
|
"grad_norm": 3.3199994564056396, |
|
"learning_rate": 8.239426430539243e-05, |
|
"loss": 9.495, |
|
"step": 16 |
|
}, |
|
{ |
|
"epoch": 0.012089157536834151, |
|
"grad_norm": 3.0522372722625732, |
|
"learning_rate": 8.000066048588211e-05, |
|
"loss": 9.6073, |
|
"step": 17 |
|
}, |
|
{ |
|
"epoch": 0.012800284450765573, |
|
"grad_norm": 2.893317222595215, |
|
"learning_rate": 7.75e-05, |
|
"loss": 9.8366, |
|
"step": 18 |
|
}, |
|
{ |
|
"epoch": 0.013511411364696994, |
|
"grad_norm": 2.8836116790771484, |
|
"learning_rate": 7.490299105985507e-05, |
|
"loss": 9.3707, |
|
"step": 19 |
|
}, |
|
{ |
|
"epoch": 0.014222538278628414, |
|
"grad_norm": 2.862773895263672, |
|
"learning_rate": 7.222075445642904e-05, |
|
"loss": 9.4298, |
|
"step": 20 |
|
}, |
|
{ |
|
"epoch": 0.014933665192559835, |
|
"grad_norm": 2.72392201423645, |
|
"learning_rate": 6.946477593864228e-05, |
|
"loss": 9.7437, |
|
"step": 21 |
|
}, |
|
{ |
|
"epoch": 0.015644792106491254, |
|
"grad_norm": 2.617037534713745, |
|
"learning_rate": 6.664685702961344e-05, |
|
"loss": 9.5262, |
|
"step": 22 |
|
}, |
|
{ |
|
"epoch": 0.016355919020422677, |
|
"grad_norm": 2.5886077880859375, |
|
"learning_rate": 6.377906449072578e-05, |
|
"loss": 9.6284, |
|
"step": 23 |
|
}, |
|
{ |
|
"epoch": 0.017067045934354097, |
|
"grad_norm": 2.6617650985717773, |
|
"learning_rate": 6.087367864990233e-05, |
|
"loss": 9.7558, |
|
"step": 24 |
|
}, |
|
{ |
|
"epoch": 0.017778172848285517, |
|
"grad_norm": 2.827671766281128, |
|
"learning_rate": 5.794314081535644e-05, |
|
"loss": 9.7337, |
|
"step": 25 |
|
}, |
|
{ |
|
"epoch": 0.017778172848285517, |
|
"eval_loss": 9.181326866149902, |
|
"eval_runtime": 0.4688, |
|
"eval_samples_per_second": 106.656, |
|
"eval_steps_per_second": 27.731, |
|
"step": 25 |
|
}, |
|
{ |
|
"epoch": 0.01848929976221694, |
|
"grad_norm": 2.6871771812438965, |
|
"learning_rate": 5.500000000000001e-05, |
|
"loss": 8.7595, |
|
"step": 26 |
|
}, |
|
{ |
|
"epoch": 0.01920042667614836, |
|
"grad_norm": 2.3026132583618164, |
|
"learning_rate": 5.205685918464356e-05, |
|
"loss": 9.0285, |
|
"step": 27 |
|
}, |
|
{ |
|
"epoch": 0.01991155359007978, |
|
"grad_norm": 2.0922391414642334, |
|
"learning_rate": 4.912632135009769e-05, |
|
"loss": 9.0976, |
|
"step": 28 |
|
}, |
|
{ |
|
"epoch": 0.0206226805040112, |
|
"grad_norm": 2.124882936477661, |
|
"learning_rate": 4.6220935509274235e-05, |
|
"loss": 9.0691, |
|
"step": 29 |
|
}, |
|
{ |
|
"epoch": 0.02133380741794262, |
|
"grad_norm": 2.0727596282958984, |
|
"learning_rate": 4.3353142970386564e-05, |
|
"loss": 9.2239, |
|
"step": 30 |
|
}, |
|
{ |
|
"epoch": 0.02204493433187404, |
|
"grad_norm": 1.893613576889038, |
|
"learning_rate": 4.053522406135775e-05, |
|
"loss": 9.3525, |
|
"step": 31 |
|
}, |
|
{ |
|
"epoch": 0.022756061245805464, |
|
"grad_norm": 1.9661246538162231, |
|
"learning_rate": 3.777924554357096e-05, |
|
"loss": 9.2901, |
|
"step": 32 |
|
}, |
|
{ |
|
"epoch": 0.023467188159736883, |
|
"grad_norm": 1.8640170097351074, |
|
"learning_rate": 3.509700894014496e-05, |
|
"loss": 9.332, |
|
"step": 33 |
|
}, |
|
{ |
|
"epoch": 0.024178315073668303, |
|
"grad_norm": 1.9543989896774292, |
|
"learning_rate": 3.250000000000001e-05, |
|
"loss": 9.3554, |
|
"step": 34 |
|
}, |
|
{ |
|
"epoch": 0.024889441987599726, |
|
"grad_norm": 1.9747636318206787, |
|
"learning_rate": 2.9999339514117912e-05, |
|
"loss": 9.4425, |
|
"step": 35 |
|
}, |
|
{ |
|
"epoch": 0.025600568901531146, |
|
"grad_norm": 2.04024600982666, |
|
"learning_rate": 2.760573569460757e-05, |
|
"loss": 9.7109, |
|
"step": 36 |
|
}, |
|
{ |
|
"epoch": 0.026311695815462565, |
|
"grad_norm": 2.1174285411834717, |
|
"learning_rate": 2.53294383204969e-05, |
|
"loss": 9.3722, |
|
"step": 37 |
|
}, |
|
{ |
|
"epoch": 0.027022822729393988, |
|
"grad_norm": 1.8444091081619263, |
|
"learning_rate": 2.3180194846605367e-05, |
|
"loss": 8.786, |
|
"step": 38 |
|
}, |
|
{ |
|
"epoch": 0.027733949643325408, |
|
"grad_norm": 1.920892596244812, |
|
"learning_rate": 2.1167208663446025e-05, |
|
"loss": 8.958, |
|
"step": 39 |
|
}, |
|
{ |
|
"epoch": 0.028445076557256827, |
|
"grad_norm": 1.868925929069519, |
|
"learning_rate": 1.9299099686894423e-05, |
|
"loss": 8.7157, |
|
"step": 40 |
|
}, |
|
{ |
|
"epoch": 0.02915620347118825, |
|
"grad_norm": 1.63834547996521, |
|
"learning_rate": 1.758386744638546e-05, |
|
"loss": 8.9606, |
|
"step": 41 |
|
}, |
|
{ |
|
"epoch": 0.02986733038511967, |
|
"grad_norm": 1.5350797176361084, |
|
"learning_rate": 1.602885682970026e-05, |
|
"loss": 9.0381, |
|
"step": 42 |
|
}, |
|
{ |
|
"epoch": 0.03057845729905109, |
|
"grad_norm": 1.5533965826034546, |
|
"learning_rate": 1.464072663102903e-05, |
|
"loss": 9.0667, |
|
"step": 43 |
|
}, |
|
{ |
|
"epoch": 0.03128958421298251, |
|
"grad_norm": 1.6352142095565796, |
|
"learning_rate": 1.3425421036992098e-05, |
|
"loss": 9.1235, |
|
"step": 44 |
|
}, |
|
{ |
|
"epoch": 0.03200071112691393, |
|
"grad_norm": 1.6297922134399414, |
|
"learning_rate": 1.2388144172720251e-05, |
|
"loss": 9.1851, |
|
"step": 45 |
|
}, |
|
{ |
|
"epoch": 0.032711838040845355, |
|
"grad_norm": 1.6186994314193726, |
|
"learning_rate": 1.1533337816991932e-05, |
|
"loss": 9.4025, |
|
"step": 46 |
|
}, |
|
{ |
|
"epoch": 0.03342296495477677, |
|
"grad_norm": 1.5999581813812256, |
|
"learning_rate": 1.0864662381854632e-05, |
|
"loss": 9.324, |
|
"step": 47 |
|
}, |
|
{ |
|
"epoch": 0.034134091868708194, |
|
"grad_norm": 1.7737746238708496, |
|
"learning_rate": 1.0384981238178534e-05, |
|
"loss": 9.326, |
|
"step": 48 |
|
}, |
|
{ |
|
"epoch": 0.03484521878263962, |
|
"grad_norm": 1.9884766340255737, |
|
"learning_rate": 1.0096348454262845e-05, |
|
"loss": 9.4523, |
|
"step": 49 |
|
}, |
|
{ |
|
"epoch": 0.03555634569657103, |
|
"grad_norm": 2.1337687969207764, |
|
"learning_rate": 1e-05, |
|
"loss": 9.5157, |
|
"step": 50 |
|
}, |
|
{ |
|
"epoch": 0.03555634569657103, |
|
"eval_loss": 8.905207633972168, |
|
"eval_runtime": 0.4716, |
|
"eval_samples_per_second": 106.032, |
|
"eval_steps_per_second": 27.568, |
|
"step": 50 |
|
} |
|
], |
|
"logging_steps": 1, |
|
"max_steps": 50, |
|
"num_input_tokens_seen": 0, |
|
"num_train_epochs": 1, |
|
"save_steps": 25, |
|
"stateful_callbacks": { |
|
"EarlyStoppingCallback": { |
|
"args": { |
|
"early_stopping_patience": 1, |
|
"early_stopping_threshold": 0.0 |
|
}, |
|
"attributes": { |
|
"early_stopping_patience_counter": 0 |
|
} |
|
}, |
|
"TrainerControl": { |
|
"args": { |
|
"should_epoch_stop": false, |
|
"should_evaluate": false, |
|
"should_log": false, |
|
"should_save": true, |
|
"should_training_stop": true |
|
}, |
|
"attributes": {} |
|
} |
|
}, |
|
"total_flos": 232013797785600.0, |
|
"train_batch_size": 1, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|