|
{ |
|
"best_metric": null, |
|
"best_model_checkpoint": null, |
|
"epoch": 14.818181818181818, |
|
"eval_steps": 500, |
|
"global_step": 135, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 0.18, |
|
"learning_rate": 2.3529411764705884e-05, |
|
"loss": 1.1098, |
|
"step": 2 |
|
}, |
|
{ |
|
"epoch": 0.36, |
|
"learning_rate": 4.705882352941177e-05, |
|
"loss": 1.1831, |
|
"step": 4 |
|
}, |
|
{ |
|
"epoch": 0.55, |
|
"learning_rate": 7.058823529411765e-05, |
|
"loss": 1.1273, |
|
"step": 6 |
|
}, |
|
{ |
|
"epoch": 0.73, |
|
"learning_rate": 9.411764705882353e-05, |
|
"loss": 1.1295, |
|
"step": 8 |
|
}, |
|
{ |
|
"epoch": 1.09, |
|
"learning_rate": 0.00011764705882352942, |
|
"loss": 1.0906, |
|
"step": 10 |
|
}, |
|
{ |
|
"epoch": 1.27, |
|
"learning_rate": 0.0001411764705882353, |
|
"loss": 1.0239, |
|
"step": 12 |
|
}, |
|
{ |
|
"epoch": 1.45, |
|
"learning_rate": 0.0001647058823529412, |
|
"loss": 0.9996, |
|
"step": 14 |
|
}, |
|
{ |
|
"epoch": 1.64, |
|
"learning_rate": 0.00018823529411764707, |
|
"loss": 0.8502, |
|
"step": 16 |
|
}, |
|
{ |
|
"epoch": 1.82, |
|
"learning_rate": 0.00019864864864864865, |
|
"loss": 0.8154, |
|
"step": 18 |
|
}, |
|
{ |
|
"epoch": 2.18, |
|
"learning_rate": 0.00019594594594594594, |
|
"loss": 0.71, |
|
"step": 20 |
|
}, |
|
{ |
|
"epoch": 2.36, |
|
"learning_rate": 0.00019324324324324326, |
|
"loss": 0.7255, |
|
"step": 22 |
|
}, |
|
{ |
|
"epoch": 2.55, |
|
"learning_rate": 0.00019054054054054055, |
|
"loss": 0.6063, |
|
"step": 24 |
|
}, |
|
{ |
|
"epoch": 2.73, |
|
"learning_rate": 0.00018783783783783784, |
|
"loss": 0.6039, |
|
"step": 26 |
|
}, |
|
{ |
|
"epoch": 3.09, |
|
"learning_rate": 0.00018513513513513513, |
|
"loss": 0.5068, |
|
"step": 28 |
|
}, |
|
{ |
|
"epoch": 3.27, |
|
"learning_rate": 0.00018243243243243245, |
|
"loss": 0.5053, |
|
"step": 30 |
|
}, |
|
{ |
|
"epoch": 3.45, |
|
"learning_rate": 0.00017972972972972974, |
|
"loss": 0.4882, |
|
"step": 32 |
|
}, |
|
{ |
|
"epoch": 3.64, |
|
"learning_rate": 0.00017702702702702703, |
|
"loss": 0.4442, |
|
"step": 34 |
|
}, |
|
{ |
|
"epoch": 3.82, |
|
"learning_rate": 0.00017432432432432432, |
|
"loss": 0.5363, |
|
"step": 36 |
|
}, |
|
{ |
|
"epoch": 4.18, |
|
"learning_rate": 0.00017162162162162164, |
|
"loss": 0.4253, |
|
"step": 38 |
|
}, |
|
{ |
|
"epoch": 4.36, |
|
"learning_rate": 0.00016891891891891893, |
|
"loss": 0.408, |
|
"step": 40 |
|
}, |
|
{ |
|
"epoch": 4.55, |
|
"learning_rate": 0.00016621621621621622, |
|
"loss": 0.3472, |
|
"step": 42 |
|
}, |
|
{ |
|
"epoch": 4.73, |
|
"learning_rate": 0.0001635135135135135, |
|
"loss": 0.354, |
|
"step": 44 |
|
}, |
|
{ |
|
"epoch": 5.09, |
|
"learning_rate": 0.00016081081081081083, |
|
"loss": 0.3762, |
|
"step": 46 |
|
}, |
|
{ |
|
"epoch": 5.27, |
|
"learning_rate": 0.00015810810810810812, |
|
"loss": 0.3216, |
|
"step": 48 |
|
}, |
|
{ |
|
"epoch": 5.45, |
|
"learning_rate": 0.0001554054054054054, |
|
"loss": 0.2651, |
|
"step": 50 |
|
}, |
|
{ |
|
"epoch": 5.64, |
|
"learning_rate": 0.0001527027027027027, |
|
"loss": 0.2992, |
|
"step": 52 |
|
}, |
|
{ |
|
"epoch": 5.82, |
|
"learning_rate": 0.00015000000000000001, |
|
"loss": 0.282, |
|
"step": 54 |
|
}, |
|
{ |
|
"epoch": 6.18, |
|
"learning_rate": 0.0001472972972972973, |
|
"loss": 0.239, |
|
"step": 56 |
|
}, |
|
{ |
|
"epoch": 6.36, |
|
"learning_rate": 0.00014459459459459462, |
|
"loss": 0.2548, |
|
"step": 58 |
|
}, |
|
{ |
|
"epoch": 6.55, |
|
"learning_rate": 0.00014189189189189188, |
|
"loss": 0.2259, |
|
"step": 60 |
|
}, |
|
{ |
|
"epoch": 6.73, |
|
"learning_rate": 0.0001391891891891892, |
|
"loss": 0.2364, |
|
"step": 62 |
|
}, |
|
{ |
|
"epoch": 7.09, |
|
"learning_rate": 0.0001364864864864865, |
|
"loss": 0.1785, |
|
"step": 64 |
|
}, |
|
{ |
|
"epoch": 7.27, |
|
"learning_rate": 0.0001337837837837838, |
|
"loss": 0.1864, |
|
"step": 66 |
|
}, |
|
{ |
|
"epoch": 7.45, |
|
"learning_rate": 0.00013108108108108107, |
|
"loss": 0.1786, |
|
"step": 68 |
|
}, |
|
{ |
|
"epoch": 7.64, |
|
"learning_rate": 0.0001283783783783784, |
|
"loss": 0.1585, |
|
"step": 70 |
|
}, |
|
{ |
|
"epoch": 7.82, |
|
"learning_rate": 0.00012567567567567568, |
|
"loss": 0.1964, |
|
"step": 72 |
|
}, |
|
{ |
|
"epoch": 8.18, |
|
"learning_rate": 0.000122972972972973, |
|
"loss": 0.1177, |
|
"step": 74 |
|
}, |
|
{ |
|
"epoch": 8.36, |
|
"learning_rate": 0.00012027027027027027, |
|
"loss": 0.1259, |
|
"step": 76 |
|
}, |
|
{ |
|
"epoch": 8.55, |
|
"learning_rate": 0.00011756756756756758, |
|
"loss": 0.1448, |
|
"step": 78 |
|
}, |
|
{ |
|
"epoch": 8.73, |
|
"learning_rate": 0.00011486486486486487, |
|
"loss": 0.1562, |
|
"step": 80 |
|
}, |
|
{ |
|
"epoch": 9.09, |
|
"learning_rate": 0.00011216216216216217, |
|
"loss": 0.1193, |
|
"step": 82 |
|
}, |
|
{ |
|
"epoch": 9.27, |
|
"learning_rate": 0.00010945945945945946, |
|
"loss": 0.1375, |
|
"step": 84 |
|
}, |
|
{ |
|
"epoch": 9.45, |
|
"learning_rate": 0.00010675675675675677, |
|
"loss": 0.1109, |
|
"step": 86 |
|
}, |
|
{ |
|
"epoch": 9.64, |
|
"learning_rate": 0.00010405405405405406, |
|
"loss": 0.0994, |
|
"step": 88 |
|
}, |
|
{ |
|
"epoch": 9.82, |
|
"learning_rate": 0.00010135135135135136, |
|
"loss": 0.0918, |
|
"step": 90 |
|
}, |
|
{ |
|
"epoch": 10.18, |
|
"learning_rate": 9.864864864864865e-05, |
|
"loss": 0.0779, |
|
"step": 92 |
|
}, |
|
{ |
|
"epoch": 10.36, |
|
"learning_rate": 9.594594594594595e-05, |
|
"loss": 0.0748, |
|
"step": 94 |
|
}, |
|
{ |
|
"epoch": 10.55, |
|
"learning_rate": 9.324324324324324e-05, |
|
"loss": 0.087, |
|
"step": 96 |
|
}, |
|
{ |
|
"epoch": 10.73, |
|
"learning_rate": 9.054054054054055e-05, |
|
"loss": 0.0914, |
|
"step": 98 |
|
}, |
|
{ |
|
"epoch": 11.09, |
|
"learning_rate": 8.783783783783784e-05, |
|
"loss": 0.0768, |
|
"step": 100 |
|
}, |
|
{ |
|
"epoch": 11.27, |
|
"learning_rate": 8.513513513513514e-05, |
|
"loss": 0.0619, |
|
"step": 102 |
|
}, |
|
{ |
|
"epoch": 11.45, |
|
"learning_rate": 8.243243243243243e-05, |
|
"loss": 0.0764, |
|
"step": 104 |
|
}, |
|
{ |
|
"epoch": 11.64, |
|
"learning_rate": 7.972972972972974e-05, |
|
"loss": 0.0531, |
|
"step": 106 |
|
}, |
|
{ |
|
"epoch": 11.82, |
|
"learning_rate": 7.702702702702703e-05, |
|
"loss": 0.0571, |
|
"step": 108 |
|
}, |
|
{ |
|
"epoch": 12.18, |
|
"learning_rate": 7.432432432432433e-05, |
|
"loss": 0.0513, |
|
"step": 110 |
|
}, |
|
{ |
|
"epoch": 12.36, |
|
"learning_rate": 7.162162162162162e-05, |
|
"loss": 0.0478, |
|
"step": 112 |
|
}, |
|
{ |
|
"epoch": 12.55, |
|
"learning_rate": 6.891891891891892e-05, |
|
"loss": 0.0532, |
|
"step": 114 |
|
}, |
|
{ |
|
"epoch": 12.73, |
|
"learning_rate": 6.621621621621621e-05, |
|
"loss": 0.0535, |
|
"step": 116 |
|
}, |
|
{ |
|
"epoch": 13.09, |
|
"learning_rate": 6.351351351351352e-05, |
|
"loss": 0.0415, |
|
"step": 118 |
|
}, |
|
{ |
|
"epoch": 13.27, |
|
"learning_rate": 6.0810810810810814e-05, |
|
"loss": 0.0449, |
|
"step": 120 |
|
}, |
|
{ |
|
"epoch": 13.45, |
|
"learning_rate": 5.8108108108108105e-05, |
|
"loss": 0.0507, |
|
"step": 122 |
|
}, |
|
{ |
|
"epoch": 13.64, |
|
"learning_rate": 5.540540540540541e-05, |
|
"loss": 0.0411, |
|
"step": 124 |
|
}, |
|
{ |
|
"epoch": 13.82, |
|
"learning_rate": 5.27027027027027e-05, |
|
"loss": 0.0397, |
|
"step": 126 |
|
}, |
|
{ |
|
"epoch": 14.18, |
|
"learning_rate": 5e-05, |
|
"loss": 0.0386, |
|
"step": 128 |
|
}, |
|
{ |
|
"epoch": 14.36, |
|
"learning_rate": 4.72972972972973e-05, |
|
"loss": 0.0399, |
|
"step": 130 |
|
}, |
|
{ |
|
"epoch": 14.55, |
|
"learning_rate": 4.4594594594594596e-05, |
|
"loss": 0.0332, |
|
"step": 132 |
|
}, |
|
{ |
|
"epoch": 14.73, |
|
"learning_rate": 4.189189189189189e-05, |
|
"loss": 0.0376, |
|
"step": 134 |
|
} |
|
], |
|
"logging_steps": 2, |
|
"max_steps": 165, |
|
"num_input_tokens_seen": 0, |
|
"num_train_epochs": 15, |
|
"save_steps": 500, |
|
"total_flos": 1.250373390336e+16, |
|
"train_batch_size": 8, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|