|
{ |
|
"best_metric": 0.35955047607421875, |
|
"best_model_checkpoint": "/tmp/model/checkpoint-2025", |
|
"epoch": 1.0, |
|
"eval_steps": 500, |
|
"global_step": 2025, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 0.05, |
|
"learning_rate": 4.93583415597236e-06, |
|
"loss": 0.5941, |
|
"step": 100 |
|
}, |
|
{ |
|
"epoch": 0.1, |
|
"learning_rate": 9.87166831194472e-06, |
|
"loss": 0.5625, |
|
"step": 200 |
|
}, |
|
{ |
|
"epoch": 0.15, |
|
"learning_rate": 1.4807502467917079e-05, |
|
"loss": 0.5233, |
|
"step": 300 |
|
}, |
|
{ |
|
"epoch": 0.2, |
|
"learning_rate": 1.974333662388944e-05, |
|
"loss": 0.443, |
|
"step": 400 |
|
}, |
|
{ |
|
"epoch": 0.25, |
|
"learning_rate": 2.46791707798618e-05, |
|
"loss": 0.4222, |
|
"step": 500 |
|
}, |
|
{ |
|
"epoch": 0.3, |
|
"learning_rate": 2.9615004935834158e-05, |
|
"loss": 0.4242, |
|
"step": 600 |
|
}, |
|
{ |
|
"epoch": 0.35, |
|
"learning_rate": 3.455083909180652e-05, |
|
"loss": 0.4401, |
|
"step": 700 |
|
}, |
|
{ |
|
"epoch": 0.4, |
|
"learning_rate": 3.948667324777888e-05, |
|
"loss": 0.3815, |
|
"step": 800 |
|
}, |
|
{ |
|
"epoch": 0.44, |
|
"learning_rate": 4.4422507403751234e-05, |
|
"loss": 0.3833, |
|
"step": 900 |
|
}, |
|
{ |
|
"epoch": 0.49, |
|
"learning_rate": 4.93583415597236e-05, |
|
"loss": 0.3968, |
|
"step": 1000 |
|
}, |
|
{ |
|
"epoch": 0.54, |
|
"learning_rate": 4.952260755048288e-05, |
|
"loss": 0.4156, |
|
"step": 1100 |
|
}, |
|
{ |
|
"epoch": 0.59, |
|
"learning_rate": 4.8973880597014924e-05, |
|
"loss": 0.4019, |
|
"step": 1200 |
|
}, |
|
{ |
|
"epoch": 0.64, |
|
"learning_rate": 4.842515364354697e-05, |
|
"loss": 0.3999, |
|
"step": 1300 |
|
}, |
|
{ |
|
"epoch": 0.69, |
|
"learning_rate": 4.7876426690079015e-05, |
|
"loss": 0.4215, |
|
"step": 1400 |
|
}, |
|
{ |
|
"epoch": 0.74, |
|
"learning_rate": 4.7327699736611064e-05, |
|
"loss": 0.3855, |
|
"step": 1500 |
|
}, |
|
{ |
|
"epoch": 0.79, |
|
"learning_rate": 4.677897278314311e-05, |
|
"loss": 0.3786, |
|
"step": 1600 |
|
}, |
|
{ |
|
"epoch": 0.84, |
|
"learning_rate": 4.6230245829675155e-05, |
|
"loss": 0.415, |
|
"step": 1700 |
|
}, |
|
{ |
|
"epoch": 0.89, |
|
"learning_rate": 4.5681518876207204e-05, |
|
"loss": 0.4079, |
|
"step": 1800 |
|
}, |
|
{ |
|
"epoch": 0.94, |
|
"learning_rate": 4.5132791922739246e-05, |
|
"loss": 0.4044, |
|
"step": 1900 |
|
}, |
|
{ |
|
"epoch": 0.99, |
|
"learning_rate": 4.4584064969271295e-05, |
|
"loss": 0.3671, |
|
"step": 2000 |
|
}, |
|
{ |
|
"epoch": 1.0, |
|
"eval_accuracy": 0.839, |
|
"eval_auc": 0.8849468102956475, |
|
"eval_f1": 0.5477528089887641, |
|
"eval_loss": 0.35955047607421875, |
|
"eval_precision": 0.8986175115207373, |
|
"eval_recall": 0.3939393939393939, |
|
"eval_runtime": 897.0094, |
|
"eval_samples_per_second": 4.459, |
|
"eval_steps_per_second": 0.279, |
|
"step": 2025 |
|
} |
|
], |
|
"logging_steps": 100, |
|
"max_steps": 10125, |
|
"num_input_tokens_seen": 0, |
|
"num_train_epochs": 5, |
|
"save_steps": 500, |
|
"total_flos": 1065599774208000.0, |
|
"train_batch_size": 8, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|