dimasik87 commited on
Commit
aabd682
·
verified ·
1 Parent(s): 136a646

Training in progress, step 20, checkpoint

Browse files
last-checkpoint/optimizer.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:2d7dfd01f9f4b15c9b4e3334a2f577b9eb974b34c06e9eafdb6b2bbb4668d3be
3
  size 320082106
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:8ae7109242cad5b517fbc68f69af9be79baceb0f9d918bf30c6a456aab36f73d
3
  size 320082106
last-checkpoint/rng_state.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:a098dffa38fa9c01aae74eebd50d55d442f41711e114eb99ba341d13be3779bf
3
  size 14244
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:a6150013c5a55351ef3c08536ad845f39a1b502b4c12c8f5e7721ae4addf788a
3
  size 14244
last-checkpoint/scheduler.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:55cc6a3635d19619caf820a77458fa3cfe7756f7bb9d10678c62733cb46f36c0
3
  size 1064
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:1e2ed9259304616a8ecebc61c5d000777b2978635f7a705b8d7081c480ce0bde
3
  size 1064
last-checkpoint/trainer_state.json CHANGED
@@ -1,9 +1,9 @@
1
  {
2
  "best_metric": null,
3
  "best_model_checkpoint": null,
4
- "epoch": 0.01288936627282492,
5
  "eval_steps": 5,
6
- "global_step": 15,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
@@ -144,6 +144,49 @@
144
  "eval_samples_per_second": 9.439,
145
  "eval_steps_per_second": 4.719,
146
  "step": 15
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
147
  }
148
  ],
149
  "logging_steps": 1,
@@ -158,12 +201,12 @@
158
  "should_evaluate": false,
159
  "should_log": false,
160
  "should_save": true,
161
- "should_training_stop": false
162
  },
163
  "attributes": {}
164
  }
165
  },
166
- "total_flos": 9888459569233920.0,
167
  "train_batch_size": 2,
168
  "trial_name": null,
169
  "trial_params": null
 
1
  {
2
  "best_metric": null,
3
  "best_model_checkpoint": null,
4
+ "epoch": 0.017185821697099892,
5
  "eval_steps": 5,
6
+ "global_step": 20,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
 
144
  "eval_samples_per_second": 9.439,
145
  "eval_steps_per_second": 4.719,
146
  "step": 15
147
+ },
148
+ {
149
+ "epoch": 0.013748657357679914,
150
+ "grad_norm": NaN,
151
+ "learning_rate": 6.909830056250527e-05,
152
+ "loss": 0.0,
153
+ "step": 16
154
+ },
155
+ {
156
+ "epoch": 0.014607948442534909,
157
+ "grad_norm": NaN,
158
+ "learning_rate": 4.12214747707527e-05,
159
+ "loss": 0.0,
160
+ "step": 17
161
+ },
162
+ {
163
+ "epoch": 0.015467239527389903,
164
+ "grad_norm": NaN,
165
+ "learning_rate": 1.9098300562505266e-05,
166
+ "loss": 0.0,
167
+ "step": 18
168
+ },
169
+ {
170
+ "epoch": 0.0163265306122449,
171
+ "grad_norm": NaN,
172
+ "learning_rate": 4.8943483704846475e-06,
173
+ "loss": 0.0,
174
+ "step": 19
175
+ },
176
+ {
177
+ "epoch": 0.017185821697099892,
178
+ "grad_norm": NaN,
179
+ "learning_rate": 0.0,
180
+ "loss": 0.0,
181
+ "step": 20
182
+ },
183
+ {
184
+ "epoch": 0.017185821697099892,
185
+ "eval_loss": NaN,
186
+ "eval_runtime": 51.9397,
187
+ "eval_samples_per_second": 9.434,
188
+ "eval_steps_per_second": 4.717,
189
+ "step": 20
190
  }
191
  ],
192
  "logging_steps": 1,
 
201
  "should_evaluate": false,
202
  "should_log": false,
203
  "should_save": true,
204
+ "should_training_stop": true
205
  },
206
  "attributes": {}
207
  }
208
  },
209
+ "total_flos": 1.318461275897856e+16,
210
  "train_batch_size": 2,
211
  "trial_name": null,
212
  "trial_params": null