|
{ |
|
"best_metric": 0.9949109414758269, |
|
"best_model_checkpoint": "dit-base-finetuned-rvlcdip-finetuned-custom-first/checkpoint-237", |
|
"epoch": 3.0, |
|
"eval_steps": 500, |
|
"global_step": 237, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 0.13, |
|
"learning_rate": 2.0833333333333336e-05, |
|
"loss": 1.9058, |
|
"step": 10 |
|
}, |
|
{ |
|
"epoch": 0.25, |
|
"learning_rate": 4.166666666666667e-05, |
|
"loss": 1.6887, |
|
"step": 20 |
|
}, |
|
{ |
|
"epoch": 0.38, |
|
"learning_rate": 4.8591549295774653e-05, |
|
"loss": 1.4773, |
|
"step": 30 |
|
}, |
|
{ |
|
"epoch": 0.51, |
|
"learning_rate": 4.624413145539906e-05, |
|
"loss": 1.1597, |
|
"step": 40 |
|
}, |
|
{ |
|
"epoch": 0.63, |
|
"learning_rate": 4.389671361502348e-05, |
|
"loss": 0.792, |
|
"step": 50 |
|
}, |
|
{ |
|
"epoch": 0.76, |
|
"learning_rate": 4.154929577464789e-05, |
|
"loss": 0.5423, |
|
"step": 60 |
|
}, |
|
{ |
|
"epoch": 0.89, |
|
"learning_rate": 3.9201877934272305e-05, |
|
"loss": 0.3686, |
|
"step": 70 |
|
}, |
|
{ |
|
"epoch": 1.0, |
|
"eval_accuracy": 0.9745547073791349, |
|
"eval_loss": 0.2355559766292572, |
|
"eval_runtime": 120.4299, |
|
"eval_samples_per_second": 19.58, |
|
"eval_steps_per_second": 0.614, |
|
"step": 79 |
|
}, |
|
{ |
|
"epoch": 1.01, |
|
"learning_rate": 3.6854460093896714e-05, |
|
"loss": 0.2791, |
|
"step": 80 |
|
}, |
|
{ |
|
"epoch": 1.14, |
|
"learning_rate": 3.450704225352113e-05, |
|
"loss": 0.2155, |
|
"step": 90 |
|
}, |
|
{ |
|
"epoch": 1.27, |
|
"learning_rate": 3.215962441314554e-05, |
|
"loss": 0.1837, |
|
"step": 100 |
|
}, |
|
{ |
|
"epoch": 1.39, |
|
"learning_rate": 2.9812206572769952e-05, |
|
"loss": 0.1594, |
|
"step": 110 |
|
}, |
|
{ |
|
"epoch": 1.52, |
|
"learning_rate": 2.746478873239437e-05, |
|
"loss": 0.1428, |
|
"step": 120 |
|
}, |
|
{ |
|
"epoch": 1.65, |
|
"learning_rate": 2.511737089201878e-05, |
|
"loss": 0.1049, |
|
"step": 130 |
|
}, |
|
{ |
|
"epoch": 1.77, |
|
"learning_rate": 2.2769953051643194e-05, |
|
"loss": 0.0977, |
|
"step": 140 |
|
}, |
|
{ |
|
"epoch": 1.9, |
|
"learning_rate": 2.0422535211267607e-05, |
|
"loss": 0.0891, |
|
"step": 150 |
|
}, |
|
{ |
|
"epoch": 2.0, |
|
"eval_accuracy": 0.9936386768447837, |
|
"eval_loss": 0.07920637726783752, |
|
"eval_runtime": 116.9591, |
|
"eval_samples_per_second": 20.161, |
|
"eval_steps_per_second": 0.633, |
|
"step": 158 |
|
}, |
|
{ |
|
"epoch": 2.03, |
|
"learning_rate": 1.807511737089202e-05, |
|
"loss": 0.0908, |
|
"step": 160 |
|
}, |
|
{ |
|
"epoch": 2.15, |
|
"learning_rate": 1.5727699530516433e-05, |
|
"loss": 0.0788, |
|
"step": 170 |
|
}, |
|
{ |
|
"epoch": 2.28, |
|
"learning_rate": 1.3380281690140845e-05, |
|
"loss": 0.0866, |
|
"step": 180 |
|
}, |
|
{ |
|
"epoch": 2.41, |
|
"learning_rate": 1.1032863849765258e-05, |
|
"loss": 0.0754, |
|
"step": 190 |
|
}, |
|
{ |
|
"epoch": 2.53, |
|
"learning_rate": 8.685446009389673e-06, |
|
"loss": 0.0684, |
|
"step": 200 |
|
}, |
|
{ |
|
"epoch": 2.66, |
|
"learning_rate": 6.338028169014085e-06, |
|
"loss": 0.0585, |
|
"step": 210 |
|
}, |
|
{ |
|
"epoch": 2.78, |
|
"learning_rate": 3.990610328638498e-06, |
|
"loss": 0.0715, |
|
"step": 220 |
|
}, |
|
{ |
|
"epoch": 2.91, |
|
"learning_rate": 1.643192488262911e-06, |
|
"loss": 0.0652, |
|
"step": 230 |
|
}, |
|
{ |
|
"epoch": 3.0, |
|
"eval_accuracy": 0.9949109414758269, |
|
"eval_loss": 0.05667030066251755, |
|
"eval_runtime": 116.045, |
|
"eval_samples_per_second": 20.32, |
|
"eval_steps_per_second": 0.638, |
|
"step": 237 |
|
}, |
|
{ |
|
"epoch": 3.0, |
|
"step": 237, |
|
"total_flos": 2.349294757953454e+18, |
|
"train_loss": 0.41549516755317334, |
|
"train_runtime": 2328.1847, |
|
"train_samples_per_second": 13.02, |
|
"train_steps_per_second": 0.102 |
|
} |
|
], |
|
"logging_steps": 10, |
|
"max_steps": 237, |
|
"num_input_tokens_seen": 0, |
|
"num_train_epochs": 3, |
|
"save_steps": 500, |
|
"total_flos": 2.349294757953454e+18, |
|
"train_batch_size": 32, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|