jssky commited on
Commit
774b3a4
1 Parent(s): ff5c437

Training in progress, step 50, checkpoint

Browse files
last-checkpoint/adapter_model.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:55e9827ef8cc91c97db840cad2f239b6b71d35f5ea90d086d5067d2b3deb1ede
3
  size 335604696
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:856364850102f377f0062d0e9c84143feb1c022737174642049a9cc55dd62ad9
3
  size 335604696
last-checkpoint/optimizer.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:29b33acf0a973a143175747a962ad9fe67cf71076dd0a78608f7dffc398bdd9f
3
  size 671466706
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:af2d3b4aec821562c8ac54c21cb4c4d16607ac4310ff2299b825a59e8e7c3771
3
  size 671466706
last-checkpoint/rng_state_0.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:6d6cdd65c8165fde81d3663c9f84b702e4335d40d869ca61535d14ef3a2ce201
3
  size 15024
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:ebbb161ad9fee5d3318aed5f3b97a5ddadad1c3e50981ab882c5e6f917156fb4
3
  size 15024
last-checkpoint/rng_state_1.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:ca6e44dab8ec5d376552b2a0bc9c569003e58c353795e6646ddfb7f18ee86910
3
  size 15024
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:b4fa4cf79a90071f090174dd057b7e7b83416ae2530d86a9dd53b9de88603d60
3
  size 15024
last-checkpoint/rng_state_2.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:94db75d9369a475d5cce29399e6c856e832925a56be67ed0e79f9db674f7da1b
3
  size 15024
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:4ca3dfa99d10aab5d84fb8926336da17714b6566eb2b9c08b42a34806fb0a501
3
  size 15024
last-checkpoint/rng_state_3.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:27a6f38089934418ed0e6718ed9d3f4491dfaf35c458aac37318721f735393b0
3
  size 15024
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:11f33d26a340a5279c04e5dfb5a843a9bff561aea658bed7ee23187f7dee4f8a
3
  size 15024
last-checkpoint/scheduler.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:f37b2aa490ccb1598b01e14cda36e9081f7ce646deab4d3c2d03de0d2169a755
3
  size 1064
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:b1df0528620c07325b8faa7567e59b0c1e86a1f1ee6af1245a69c6c0463fe4e2
3
  size 1064
last-checkpoint/trainer_state.json CHANGED
@@ -1,9 +1,9 @@
1
  {
2
- "best_metric": 0.8179048299789429,
3
- "best_model_checkpoint": "miner_id_24/checkpoint-25",
4
- "epoch": 0.06532745386248572,
5
  "eval_steps": 25,
6
- "global_step": 25,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
@@ -198,6 +198,189 @@
198
  "eval_samples_per_second": 31.841,
199
  "eval_steps_per_second": 3.989,
200
  "step": 25
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
201
  }
202
  ],
203
  "logging_steps": 1,
@@ -221,12 +404,12 @@
221
  "should_evaluate": false,
222
  "should_log": false,
223
  "should_save": true,
224
- "should_training_stop": false
225
  },
226
  "attributes": {}
227
  }
228
  },
229
- "total_flos": 2.8626573248272794e+17,
230
  "train_batch_size": 2,
231
  "trial_name": null,
232
  "trial_params": null
 
1
  {
2
+ "best_metric": 0.7647556662559509,
3
+ "best_model_checkpoint": "miner_id_24/checkpoint-50",
4
+ "epoch": 0.13065490772497143,
5
  "eval_steps": 25,
6
+ "global_step": 50,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
 
198
  "eval_samples_per_second": 31.841,
199
  "eval_steps_per_second": 3.989,
200
  "step": 25
201
+ },
202
+ {
203
+ "epoch": 0.06794055201698514,
204
+ "grad_norm": 19.688880920410156,
205
+ "learning_rate": 5e-05,
206
+ "loss": 0.6132,
207
+ "step": 26
208
+ },
209
+ {
210
+ "epoch": 0.07055365017148457,
211
+ "grad_norm": 17.49786376953125,
212
+ "learning_rate": 4.6729843538492847e-05,
213
+ "loss": 0.727,
214
+ "step": 27
215
+ },
216
+ {
217
+ "epoch": 0.073166748325984,
218
+ "grad_norm": 11.093067169189453,
219
+ "learning_rate": 4.347369038899744e-05,
220
+ "loss": 0.7427,
221
+ "step": 28
222
+ },
223
+ {
224
+ "epoch": 0.07577984648048343,
225
+ "grad_norm": 9.140443801879883,
226
+ "learning_rate": 4.0245483899193595e-05,
227
+ "loss": 0.7637,
228
+ "step": 29
229
+ },
230
+ {
231
+ "epoch": 0.07839294463498285,
232
+ "grad_norm": 8.738513946533203,
233
+ "learning_rate": 3.705904774487396e-05,
234
+ "loss": 0.7804,
235
+ "step": 30
236
+ },
237
+ {
238
+ "epoch": 0.08100604278948229,
239
+ "grad_norm": 9.069628715515137,
240
+ "learning_rate": 3.392802673484193e-05,
241
+ "loss": 0.7579,
242
+ "step": 31
243
+ },
244
+ {
245
+ "epoch": 0.0836191409439817,
246
+ "grad_norm": 10.622302055358887,
247
+ "learning_rate": 3.086582838174551e-05,
248
+ "loss": 0.7312,
249
+ "step": 32
250
+ },
251
+ {
252
+ "epoch": 0.08623223909848114,
253
+ "grad_norm": 14.509358406066895,
254
+ "learning_rate": 2.7885565489049946e-05,
255
+ "loss": 0.7611,
256
+ "step": 33
257
+ },
258
+ {
259
+ "epoch": 0.08884533725298056,
260
+ "grad_norm": 21.554981231689453,
261
+ "learning_rate": 2.500000000000001e-05,
262
+ "loss": 0.8172,
263
+ "step": 34
264
+ },
265
+ {
266
+ "epoch": 0.09145843540748,
267
+ "grad_norm": 30.103410720825195,
268
+ "learning_rate": 2.2221488349019903e-05,
269
+ "loss": 0.8458,
270
+ "step": 35
271
+ },
272
+ {
273
+ "epoch": 0.09407153356197942,
274
+ "grad_norm": 27.34261131286621,
275
+ "learning_rate": 1.9561928549563968e-05,
276
+ "loss": 0.9454,
277
+ "step": 36
278
+ },
279
+ {
280
+ "epoch": 0.09668463171647886,
281
+ "grad_norm": 33.892723083496094,
282
+ "learning_rate": 1.703270924499656e-05,
283
+ "loss": 0.9864,
284
+ "step": 37
285
+ },
286
+ {
287
+ "epoch": 0.09929772987097828,
288
+ "grad_norm": 35.015785217285156,
289
+ "learning_rate": 1.4644660940672627e-05,
290
+ "loss": 0.7745,
291
+ "step": 38
292
+ },
293
+ {
294
+ "epoch": 0.10191082802547771,
295
+ "grad_norm": 6.347834587097168,
296
+ "learning_rate": 1.2408009626051137e-05,
297
+ "loss": 0.6495,
298
+ "step": 39
299
+ },
300
+ {
301
+ "epoch": 0.10452392617997713,
302
+ "grad_norm": 6.344296455383301,
303
+ "learning_rate": 1.0332332985438248e-05,
304
+ "loss": 0.6931,
305
+ "step": 40
306
+ },
307
+ {
308
+ "epoch": 0.10713702433447657,
309
+ "grad_norm": 6.151936054229736,
310
+ "learning_rate": 8.426519384872733e-06,
311
+ "loss": 0.7352,
312
+ "step": 41
313
+ },
314
+ {
315
+ "epoch": 0.10975012248897599,
316
+ "grad_norm": 6.7457661628723145,
317
+ "learning_rate": 6.698729810778065e-06,
318
+ "loss": 0.7606,
319
+ "step": 42
320
+ },
321
+ {
322
+ "epoch": 0.11236322064347543,
323
+ "grad_norm": 7.498371124267578,
324
+ "learning_rate": 5.156362923365588e-06,
325
+ "loss": 0.7645,
326
+ "step": 43
327
+ },
328
+ {
329
+ "epoch": 0.11497631879797485,
330
+ "grad_norm": 8.821335792541504,
331
+ "learning_rate": 3.8060233744356633e-06,
332
+ "loss": 0.7449,
333
+ "step": 44
334
+ },
335
+ {
336
+ "epoch": 0.11758941695247428,
337
+ "grad_norm": 11.02287483215332,
338
+ "learning_rate": 2.653493525244721e-06,
339
+ "loss": 0.722,
340
+ "step": 45
341
+ },
342
+ {
343
+ "epoch": 0.1202025151069737,
344
+ "grad_norm": 15.275715827941895,
345
+ "learning_rate": 1.70370868554659e-06,
346
+ "loss": 0.693,
347
+ "step": 46
348
+ },
349
+ {
350
+ "epoch": 0.12281561326147314,
351
+ "grad_norm": 22.680950164794922,
352
+ "learning_rate": 9.607359798384785e-07,
353
+ "loss": 0.7993,
354
+ "step": 47
355
+ },
356
+ {
357
+ "epoch": 0.12542871141597256,
358
+ "grad_norm": 31.55802345275879,
359
+ "learning_rate": 4.277569313094809e-07,
360
+ "loss": 0.8813,
361
+ "step": 48
362
+ },
363
+ {
364
+ "epoch": 0.12804180957047198,
365
+ "grad_norm": 43.83702850341797,
366
+ "learning_rate": 1.0705383806982606e-07,
367
+ "loss": 0.8113,
368
+ "step": 49
369
+ },
370
+ {
371
+ "epoch": 0.13065490772497143,
372
+ "grad_norm": 50.75833511352539,
373
+ "learning_rate": 0.0,
374
+ "loss": 1.0654,
375
+ "step": 50
376
+ },
377
+ {
378
+ "epoch": 0.13065490772497143,
379
+ "eval_loss": 0.7647556662559509,
380
+ "eval_runtime": 81.2616,
381
+ "eval_samples_per_second": 31.725,
382
+ "eval_steps_per_second": 3.975,
383
+ "step": 50
384
  }
385
  ],
386
  "logging_steps": 1,
 
404
  "should_evaluate": false,
405
  "should_log": false,
406
  "should_save": true,
407
+ "should_training_stop": true
408
  },
409
  "attributes": {}
410
  }
411
  },
412
+ "total_flos": 5.727082812528394e+17,
413
  "train_batch_size": 2,
414
  "trial_name": null,
415
  "trial_params": null