{ "best_metric": NaN, "best_model_checkpoint": "miner_id_24/checkpoint-25", "epoch": 0.006265860459287571, "eval_steps": 25, "global_step": 25, "is_hyper_param_search": false, "is_local_process_zero": true, "is_world_process_zero": true, "log_history": [ { "epoch": 0.0002506344183715029, "grad_norm": NaN, "learning_rate": 3.3333333333333335e-05, "loss": 0.0, "step": 1 }, { "epoch": 0.0002506344183715029, "eval_loss": NaN, "eval_runtime": 3.1097, "eval_samples_per_second": 16.079, "eval_steps_per_second": 4.181, "step": 1 }, { "epoch": 0.0005012688367430057, "grad_norm": NaN, "learning_rate": 6.666666666666667e-05, "loss": 0.0, "step": 2 }, { "epoch": 0.0007519032551145086, "grad_norm": NaN, "learning_rate": 0.0001, "loss": 0.0, "step": 3 }, { "epoch": 0.0010025376734860115, "grad_norm": NaN, "learning_rate": 9.99571699711836e-05, "loss": 0.0, "step": 4 }, { "epoch": 0.0012531720918575143, "grad_norm": NaN, "learning_rate": 9.982876141412856e-05, "loss": 0.0, "step": 5 }, { "epoch": 0.0015038065102290171, "grad_norm": NaN, "learning_rate": 9.961501876182148e-05, "loss": 0.0, "step": 6 }, { "epoch": 0.0017544409286005202, "grad_norm": NaN, "learning_rate": 9.931634888554937e-05, "loss": 0.0, "step": 7 }, { "epoch": 0.002005075346972023, "grad_norm": NaN, "learning_rate": 9.893332032039701e-05, "loss": 0.0, "step": 8 }, { "epoch": 0.002255709765343526, "grad_norm": NaN, "learning_rate": 9.846666218300807e-05, "loss": 0.0, "step": 9 }, { "epoch": 0.0025063441837150286, "grad_norm": NaN, "learning_rate": 9.791726278367022e-05, "loss": 0.0, "step": 10 }, { "epoch": 0.0027569786020865314, "grad_norm": NaN, "learning_rate": 9.728616793536588e-05, "loss": 0.0, "step": 11 }, { "epoch": 0.0030076130204580343, "grad_norm": NaN, "learning_rate": 9.657457896300791e-05, "loss": 0.0, "step": 12 }, { "epoch": 0.003258247438829537, "grad_norm": NaN, "learning_rate": 9.578385041664925e-05, "loss": 0.0, "step": 13 }, { "epoch": 0.0035088818572010403, "grad_norm": NaN, "learning_rate": 9.491548749301997e-05, "loss": 0.0, "step": 14 }, { "epoch": 0.003759516275572543, "grad_norm": NaN, "learning_rate": 9.397114317029975e-05, "loss": 0.0, "step": 15 }, { "epoch": 0.004010150693944046, "grad_norm": NaN, "learning_rate": 9.295261506157986e-05, "loss": 0.0, "step": 16 }, { "epoch": 0.004260785112315549, "grad_norm": NaN, "learning_rate": 9.186184199300464e-05, "loss": 0.0, "step": 17 }, { "epoch": 0.004511419530687052, "grad_norm": NaN, "learning_rate": 9.070090031310558e-05, "loss": 0.0, "step": 18 }, { "epoch": 0.004762053949058554, "grad_norm": NaN, "learning_rate": 8.947199994035401e-05, "loss": 0.0, "step": 19 }, { "epoch": 0.005012688367430057, "grad_norm": NaN, "learning_rate": 8.817748015645558e-05, "loss": 0.0, "step": 20 }, { "epoch": 0.00526332278580156, "grad_norm": NaN, "learning_rate": 8.681980515339464e-05, "loss": 0.0, "step": 21 }, { "epoch": 0.005513957204173063, "grad_norm": NaN, "learning_rate": 8.540155934270471e-05, "loss": 0.0, "step": 22 }, { "epoch": 0.005764591622544566, "grad_norm": NaN, "learning_rate": 8.392544243589427e-05, "loss": 0.0, "step": 23 }, { "epoch": 0.0060152260409160685, "grad_norm": NaN, "learning_rate": 8.239426430539243e-05, "loss": 0.0, "step": 24 }, { "epoch": 0.006265860459287571, "grad_norm": NaN, "learning_rate": 8.081093963579707e-05, "loss": 0.0, "step": 25 }, { "epoch": 0.006265860459287571, "eval_loss": NaN, "eval_runtime": 3.0769, "eval_samples_per_second": 16.25, "eval_steps_per_second": 4.225, "step": 25 } ], "logging_steps": 1, "max_steps": 75, "num_input_tokens_seen": 0, "num_train_epochs": 1, "save_steps": 25, "stateful_callbacks": { "EarlyStoppingCallback": { "args": { "early_stopping_patience": 1, "early_stopping_threshold": 0.0 }, "attributes": { "early_stopping_patience_counter": 0 } }, "TrainerControl": { "args": { "should_epoch_stop": false, "should_evaluate": false, "should_log": false, "should_save": true, "should_training_stop": false }, "attributes": {} } }, "total_flos": 1.114626154561536e+17, "train_batch_size": 1, "trial_name": null, "trial_params": null }