File size: 4,431 Bytes
45a933f |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 |
{
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 1.7167381974248928,
"eval_steps": 500,
"global_step": 2000,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.08583690987124463,
"grad_norm": 0.13883593678474426,
"learning_rate": 0.0001998960663781063,
"loss": 0.829,
"step": 100
},
{
"epoch": 0.17167381974248927,
"grad_norm": 0.13720104098320007,
"learning_rate": 0.00019906589321760313,
"loss": 0.648,
"step": 200
},
{
"epoch": 0.2575107296137339,
"grad_norm": 0.1670483946800232,
"learning_rate": 0.00019741244594178857,
"loss": 0.587,
"step": 300
},
{
"epoch": 0.34334763948497854,
"grad_norm": 0.16993218660354614,
"learning_rate": 0.00019494946530743566,
"loss": 0.5547,
"step": 400
},
{
"epoch": 0.4291845493562232,
"grad_norm": 0.16408105194568634,
"learning_rate": 0.00019169741959214142,
"loss": 0.5319,
"step": 500
},
{
"epoch": 0.5150214592274678,
"grad_norm": 0.1872694492340088,
"learning_rate": 0.0001876833344953899,
"loss": 0.5147,
"step": 600
},
{
"epoch": 0.6008583690987125,
"grad_norm": 0.18225158751010895,
"learning_rate": 0.0001829405685450202,
"loss": 0.5009,
"step": 700
},
{
"epoch": 0.6866952789699571,
"grad_norm": 0.1850147843360901,
"learning_rate": 0.00017750853587555535,
"loss": 0.4866,
"step": 800
},
{
"epoch": 0.7725321888412017,
"grad_norm": 0.20939144492149353,
"learning_rate": 0.00017143237868220553,
"loss": 0.4721,
"step": 900
},
{
"epoch": 0.8583690987124464,
"grad_norm": 0.21306931972503662,
"learning_rate": 0.00016476259207257407,
"loss": 0.4626,
"step": 1000
},
{
"epoch": 0.944206008583691,
"grad_norm": 0.13636501133441925,
"learning_rate": 0.0001575546044336872,
"loss": 0.4505,
"step": 1100
},
{
"epoch": 1.0300429184549356,
"grad_norm": 0.21951957046985626,
"learning_rate": 0.00014986831680165167,
"loss": 0.4371,
"step": 1200
},
{
"epoch": 1.1158798283261802,
"grad_norm": 0.183538556098938,
"learning_rate": 0.00014176760506194906,
"loss": 0.4241,
"step": 1300
},
{
"epoch": 1.201716738197425,
"grad_norm": 0.27745020389556885,
"learning_rate": 0.00013331978911726523,
"loss": 0.4247,
"step": 1400
},
{
"epoch": 1.2875536480686696,
"grad_norm": 0.16314753890037537,
"learning_rate": 0.00012459507343426653,
"loss": 0.4142,
"step": 1500
},
{
"epoch": 1.3733905579399142,
"grad_norm": 0.13504645228385925,
"learning_rate": 0.00011566596361858548,
"loss": 0.3982,
"step": 1600
},
{
"epoch": 1.4592274678111588,
"grad_norm": 0.21324139833450317,
"learning_rate": 0.0001066066638664925,
"loss": 0.4026,
"step": 1700
},
{
"epoch": 1.5450643776824036,
"grad_norm": 0.3690386116504669,
"learning_rate": 9.749246030065306e-05,
"loss": 0.3991,
"step": 1800
},
{
"epoch": 1.6309012875536482,
"grad_norm": 0.32246139645576477,
"learning_rate": 8.839909531467737e-05,
"loss": 0.3949,
"step": 1900
},
{
"epoch": 1.7167381974248928,
"grad_norm": 0.14840121567249298,
"learning_rate": 7.940213812589018e-05,
"loss": 0.3874,
"step": 2000
}
],
"logging_steps": 100,
"max_steps": 3495,
"num_input_tokens_seen": 0,
"num_train_epochs": 3,
"save_steps": 1000,
"stateful_callbacks": {
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": false
},
"attributes": {}
}
},
"total_flos": 7.37626356496466e+16,
"train_batch_size": 2,
"trial_name": null,
"trial_params": null
}
|