File size: 4,279 Bytes
1c348c8 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 |
{
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 0.10552975939214859,
"eval_steps": 500,
"global_step": 1000,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.005276487969607429,
"grad_norm": 0.6827425360679626,
"learning_rate": 2.9947235120303926e-05,
"loss": 2.207,
"step": 50
},
{
"epoch": 0.010552975939214858,
"grad_norm": 0.5957860350608826,
"learning_rate": 2.989447024060785e-05,
"loss": 0.2398,
"step": 100
},
{
"epoch": 0.015829463908822287,
"grad_norm": 0.46854081749916077,
"learning_rate": 2.9841705360911777e-05,
"loss": 0.0525,
"step": 150
},
{
"epoch": 0.021105951878429716,
"grad_norm": 0.10302311927080154,
"learning_rate": 2.9788940481215702e-05,
"loss": 0.0037,
"step": 200
},
{
"epoch": 0.026382439848037148,
"grad_norm": 0.01405419036746025,
"learning_rate": 2.9736175601519628e-05,
"loss": 0.001,
"step": 250
},
{
"epoch": 0.031658927817644573,
"grad_norm": 0.12790632247924805,
"learning_rate": 2.9683410721823553e-05,
"loss": 0.0007,
"step": 300
},
{
"epoch": 0.036935415787252006,
"grad_norm": 0.0026043676771223545,
"learning_rate": 2.9630645842127478e-05,
"loss": 0.0004,
"step": 350
},
{
"epoch": 0.04221190375685943,
"grad_norm": 0.030596332624554634,
"learning_rate": 2.9577880962431404e-05,
"loss": 0.0016,
"step": 400
},
{
"epoch": 0.047488391726466864,
"grad_norm": 0.04252925142645836,
"learning_rate": 2.952511608273533e-05,
"loss": 0.0008,
"step": 450
},
{
"epoch": 0.052764879696074296,
"grad_norm": 0.03828505799174309,
"learning_rate": 2.947235120303926e-05,
"loss": 0.0003,
"step": 500
},
{
"epoch": 0.05804136766568172,
"grad_norm": 0.006123501807451248,
"learning_rate": 2.9419586323343186e-05,
"loss": 0.0004,
"step": 550
},
{
"epoch": 0.06331785563528915,
"grad_norm": 0.0026047523133456707,
"learning_rate": 2.9366821443647112e-05,
"loss": 0.0002,
"step": 600
},
{
"epoch": 0.06859434360489658,
"grad_norm": 0.0018419412663206458,
"learning_rate": 2.9314056563951037e-05,
"loss": 0.0002,
"step": 650
},
{
"epoch": 0.07387083157450401,
"grad_norm": 0.03250932693481445,
"learning_rate": 2.9261291684254962e-05,
"loss": 0.0009,
"step": 700
},
{
"epoch": 0.07914731954411144,
"grad_norm": 0.001018165610730648,
"learning_rate": 2.9208526804558888e-05,
"loss": 0.0003,
"step": 750
},
{
"epoch": 0.08442380751371886,
"grad_norm": 0.0010480296332389116,
"learning_rate": 2.9155761924862813e-05,
"loss": 0.0002,
"step": 800
},
{
"epoch": 0.0897002954833263,
"grad_norm": 0.000887015659827739,
"learning_rate": 2.910299704516674e-05,
"loss": 0.0001,
"step": 850
},
{
"epoch": 0.09497678345293373,
"grad_norm": 0.02170419879257679,
"learning_rate": 2.9050232165470664e-05,
"loss": 0.0001,
"step": 900
},
{
"epoch": 0.10025327142254116,
"grad_norm": 0.02256314642727375,
"learning_rate": 2.899746728577459e-05,
"loss": 0.0002,
"step": 950
},
{
"epoch": 0.10552975939214859,
"grad_norm": 0.007039123214781284,
"learning_rate": 2.8944702406078515e-05,
"loss": 0.0001,
"step": 1000
}
],
"logging_steps": 50,
"max_steps": 28428,
"num_input_tokens_seen": 0,
"num_train_epochs": 3,
"save_steps": 200,
"stateful_callbacks": {
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": true
},
"attributes": {}
}
},
"total_flos": 0.0,
"train_batch_size": 128,
"trial_name": null,
"trial_params": null
}
|