kooff11 commited on
Commit
51bbf43
1 Parent(s): 50db80d

Training in progress, step 20, checkpoint

Browse files
last-checkpoint/optimizer.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:76d432bdfd75388e2f2bf97e41cbffbd758bca422a8064692ccd7123cc35f5da
3
  size 61226900
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:b3b5bb327faeac62e7892e785eb64bce846806695269e440cb38927f5a0c61a3
3
  size 61226900
last-checkpoint/rng_state_0.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:0baaed0401940e242338532f7a22ba84c70eacc745fe31806e46807a8617d14a
3
  size 14512
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:6d85e3216c9d23ea0904884a218dc46d117c269eb9122a8a493a3c4d28bba42f
3
  size 14512
last-checkpoint/rng_state_1.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:e1b61f4c32eeed29fe12cabb7f194fe85997dd2fe781338dc8ff5784b7ccda29
3
  size 14512
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:63fabb8ebd45581c1e1dd8e5806076cd4945d840d40d3cdd5ee1127201d878a5
3
  size 14512
last-checkpoint/scheduler.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:ff2736979009751c0c6b0ddcc5f6544d6f723aa752b4798eab0b70fb76cf0083
3
  size 1064
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:532138a5ca880d8da393ae449e5715b2766def36b8838785ca08d07228b119b7
3
  size 1064
last-checkpoint/trainer_state.json CHANGED
@@ -1,9 +1,9 @@
1
  {
2
  "best_metric": null,
3
  "best_model_checkpoint": null,
4
- "epoch": 0.44077134986225897,
5
  "eval_steps": 5,
6
- "global_step": 15,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
@@ -144,6 +144,49 @@
144
  "eval_samples_per_second": 12.655,
145
  "eval_steps_per_second": 3.191,
146
  "step": 15
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
147
  }
148
  ],
149
  "logging_steps": 1,
@@ -158,12 +201,12 @@
158
  "should_evaluate": false,
159
  "should_log": false,
160
  "should_save": true,
161
- "should_training_stop": false
162
  },
163
  "attributes": {}
164
  }
165
  },
166
- "total_flos": 1.770634359078912e+17,
167
  "train_batch_size": 2,
168
  "trial_name": null,
169
  "trial_params": null
 
1
  {
2
  "best_metric": null,
3
  "best_model_checkpoint": null,
4
+ "epoch": 0.5876951331496786,
5
  "eval_steps": 5,
6
+ "global_step": 20,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
 
144
  "eval_samples_per_second": 12.655,
145
  "eval_steps_per_second": 3.191,
146
  "step": 15
147
+ },
148
+ {
149
+ "epoch": 0.4701561065197429,
150
+ "grad_norm": NaN,
151
+ "learning_rate": 3.4549150281252636e-05,
152
+ "loss": 0.0,
153
+ "step": 16
154
+ },
155
+ {
156
+ "epoch": 0.4995408631772268,
157
+ "grad_norm": NaN,
158
+ "learning_rate": 2.061073738537635e-05,
159
+ "loss": 0.0,
160
+ "step": 17
161
+ },
162
+ {
163
+ "epoch": 0.5289256198347108,
164
+ "grad_norm": NaN,
165
+ "learning_rate": 9.549150281252633e-06,
166
+ "loss": 0.0,
167
+ "step": 18
168
+ },
169
+ {
170
+ "epoch": 0.5583103764921947,
171
+ "grad_norm": NaN,
172
+ "learning_rate": 2.4471741852423237e-06,
173
+ "loss": 0.0,
174
+ "step": 19
175
+ },
176
+ {
177
+ "epoch": 0.5876951331496786,
178
+ "grad_norm": NaN,
179
+ "learning_rate": 0.0,
180
+ "loss": 0.0,
181
+ "step": 20
182
+ },
183
+ {
184
+ "epoch": 0.5876951331496786,
185
+ "eval_loss": NaN,
186
+ "eval_runtime": 18.1756,
187
+ "eval_samples_per_second": 12.654,
188
+ "eval_steps_per_second": 3.191,
189
+ "step": 20
190
  }
191
  ],
192
  "logging_steps": 1,
 
201
  "should_evaluate": false,
202
  "should_log": false,
203
  "should_save": true,
204
+ "should_training_stop": true
205
  },
206
  "attributes": {}
207
  }
208
  },
209
+ "total_flos": 2.360845812105216e+17,
210
  "train_batch_size": 2,
211
  "trial_name": null,
212
  "trial_params": null