ToastyPigeon commited on
Commit
a51e4b7
·
verified ·
1 Parent(s): 7c7c6dd

Training in progress, step 60, checkpoint

Browse files
last-checkpoint/adapter_model.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:d721a790ab5befbf35563f8e62addd5212b69733bcb986498e5a0351aa853ec5
3
  size 763470136
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:d7e2cd2dbf78c54ea3f0f06d49fd1af5b70a3e03ac9e6c82cb1d62722b2593b6
3
  size 763470136
last-checkpoint/global_step60/bf16_zero_pp_rank_0_mp_rank_00_optim_states.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:c2c53be24f2027e3537527acf2c72f243ac372ef5296b0dd98173b072143e9c6
3
+ size 1152331664
last-checkpoint/global_step60/bf16_zero_pp_rank_1_mp_rank_00_optim_states.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:f8e36472fe9954e3846daa2b5787cdf7707e89dbfdcbaa5a418ae9f7535da170
3
+ size 1152331664
last-checkpoint/global_step60/zero_pp_rank_0_mp_rank_00_model_states.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:4adaf206d7d10a411a9d3c1e15268f653adc1bc3ff240cbdc2135eb6539aae95
3
+ size 348711830
last-checkpoint/global_step60/zero_pp_rank_1_mp_rank_00_model_states.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:d11c80e3f8f259262c79d66e22bf1689af07690bf14e79dfa593c4f9569e8101
3
+ size 348711830
last-checkpoint/latest CHANGED
@@ -1 +1 @@
1
- global_step30
 
1
+ global_step60
last-checkpoint/rng_state_0.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:bc6fc0294149580fee62daf248bea7739212ecaeb599c0033b32dc7931a338bd
3
  size 14512
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:13d47ce3a312fdef8e844802a1273de45b2a1adbfb4848f261f3e8b747e28be5
3
  size 14512
last-checkpoint/rng_state_1.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:8dea40414adacf961911c7a5d76d4ec3ebef19fb5ff8114b0e7620b37315974f
3
  size 14512
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:df4302bd5b366946b267764b57680418f02385ae358386a07ebe9d879f34434f
3
  size 14512
last-checkpoint/scheduler.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:5bf43260b366a202201117f03e76fc3c209bc010a3f554220c3436f4ffaab520
3
  size 1064
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:aad2d65943628ea6b328be0e7f3056643998d5e6f7a4812061e3d7f5ba16c03e
3
  size 1064
last-checkpoint/trainer_state.json CHANGED
@@ -1,9 +1,9 @@
1
  {
2
  "best_metric": null,
3
  "best_model_checkpoint": null,
4
- "epoch": 0.2003338898163606,
5
  "eval_steps": 30,
6
- "global_step": 30,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
@@ -233,6 +233,224 @@
233
  "eval_samples_per_second": 0.314,
234
  "eval_steps_per_second": 0.157,
235
  "step": 30
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
236
  }
237
  ],
238
  "logging_steps": 1,
@@ -252,7 +470,7 @@
252
  "attributes": {}
253
  }
254
  },
255
- "total_flos": 3578661106089984.0,
256
  "train_batch_size": 1,
257
  "trial_name": null,
258
  "trial_params": null
 
1
  {
2
  "best_metric": null,
3
  "best_model_checkpoint": null,
4
+ "epoch": 0.4006677796327212,
5
  "eval_steps": 30,
6
+ "global_step": 60,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
 
233
  "eval_samples_per_second": 0.314,
234
  "eval_steps_per_second": 0.157,
235
  "step": 30
236
+ },
237
+ {
238
+ "epoch": 0.20701168614357263,
239
+ "grad_norm": 0.851155993395187,
240
+ "learning_rate": 4.7547646794238276e-05,
241
+ "loss": 1.4104,
242
+ "step": 31
243
+ },
244
+ {
245
+ "epoch": 0.21368948247078465,
246
+ "grad_norm": 0.702549052055791,
247
+ "learning_rate": 4.731339946677661e-05,
248
+ "loss": 1.2011,
249
+ "step": 32
250
+ },
251
+ {
252
+ "epoch": 0.22036727879799667,
253
+ "grad_norm": 0.5817018666981087,
254
+ "learning_rate": 4.7069175496003146e-05,
255
+ "loss": 1.3173,
256
+ "step": 33
257
+ },
258
+ {
259
+ "epoch": 0.2270450751252087,
260
+ "grad_norm": 0.7972250982594367,
261
+ "learning_rate": 4.6815097856041986e-05,
262
+ "loss": 1.3874,
263
+ "step": 34
264
+ },
265
+ {
266
+ "epoch": 0.2337228714524207,
267
+ "grad_norm": 0.6350958813047525,
268
+ "learning_rate": 4.655129448263639e-05,
269
+ "loss": 1.3187,
270
+ "step": 35
271
+ },
272
+ {
273
+ "epoch": 0.24040066777963273,
274
+ "grad_norm": 0.6000120791629922,
275
+ "learning_rate": 4.627789820872931e-05,
276
+ "loss": 1.3125,
277
+ "step": 36
278
+ },
279
+ {
280
+ "epoch": 0.24707846410684475,
281
+ "grad_norm": 0.6710282971369416,
282
+ "learning_rate": 4.599504669757798e-05,
283
+ "loss": 1.3592,
284
+ "step": 37
285
+ },
286
+ {
287
+ "epoch": 0.25375626043405675,
288
+ "grad_norm": 0.6662171102756398,
289
+ "learning_rate": 4.570288237343632e-05,
290
+ "loss": 1.4417,
291
+ "step": 38
292
+ },
293
+ {
294
+ "epoch": 0.2604340567612688,
295
+ "grad_norm": 0.7577271123527769,
296
+ "learning_rate": 4.5401552349840075e-05,
297
+ "loss": 1.4234,
298
+ "step": 39
299
+ },
300
+ {
301
+ "epoch": 0.2671118530884808,
302
+ "grad_norm": 0.9609826007095287,
303
+ "learning_rate": 4.509120835553067e-05,
304
+ "loss": 1.3688,
305
+ "step": 40
306
+ },
307
+ {
308
+ "epoch": 0.27378964941569284,
309
+ "grad_norm": 0.6247028843961614,
310
+ "learning_rate": 4.4772006658055256e-05,
311
+ "loss": 1.2724,
312
+ "step": 41
313
+ },
314
+ {
315
+ "epoch": 0.28046744574290483,
316
+ "grad_norm": 0.6740316202461178,
317
+ "learning_rate": 4.444410798508125e-05,
318
+ "loss": 1.5187,
319
+ "step": 42
320
+ },
321
+ {
322
+ "epoch": 0.2871452420701169,
323
+ "grad_norm": 0.9376016164673258,
324
+ "learning_rate": 4.4107677443465165e-05,
325
+ "loss": 1.5201,
326
+ "step": 43
327
+ },
328
+ {
329
+ "epoch": 0.2938230383973289,
330
+ "grad_norm": 0.705877055805098,
331
+ "learning_rate": 4.3762884436116316e-05,
332
+ "loss": 1.1164,
333
+ "step": 44
334
+ },
335
+ {
336
+ "epoch": 0.3005008347245409,
337
+ "grad_norm": 0.7895949954130703,
338
+ "learning_rate": 4.340990257669732e-05,
339
+ "loss": 1.3134,
340
+ "step": 45
341
+ },
342
+ {
343
+ "epoch": 0.3071786310517529,
344
+ "grad_norm": 0.6076223678732244,
345
+ "learning_rate": 4.3048909602204455e-05,
346
+ "loss": 1.1509,
347
+ "step": 46
348
+ },
349
+ {
350
+ "epoch": 0.31385642737896496,
351
+ "grad_norm": 0.7837822618333515,
352
+ "learning_rate": 4.268008728347168e-05,
353
+ "loss": 1.2497,
354
+ "step": 47
355
+ },
356
+ {
357
+ "epoch": 0.32053422370617696,
358
+ "grad_norm": 1.006832496085628,
359
+ "learning_rate": 4.230362133364354e-05,
360
+ "loss": 1.3635,
361
+ "step": 48
362
+ },
363
+ {
364
+ "epoch": 0.327212020033389,
365
+ "grad_norm": 0.5534116180176328,
366
+ "learning_rate": 4.191970131466304e-05,
367
+ "loss": 1.4538,
368
+ "step": 49
369
+ },
370
+ {
371
+ "epoch": 0.333889816360601,
372
+ "grad_norm": 0.6876766572814221,
373
+ "learning_rate": 4.1528520541821506e-05,
374
+ "loss": 1.2173,
375
+ "step": 50
376
+ },
377
+ {
378
+ "epoch": 0.34056761268781305,
379
+ "grad_norm": 2.3546709740261758,
380
+ "learning_rate": 4.1130275986418446e-05,
381
+ "loss": 1.1366,
382
+ "step": 51
383
+ },
384
+ {
385
+ "epoch": 0.34724540901502504,
386
+ "grad_norm": 0.4460440639527036,
387
+ "learning_rate": 4.072516817658065e-05,
388
+ "loss": 1.3418,
389
+ "step": 52
390
+ },
391
+ {
392
+ "epoch": 0.35392320534223703,
393
+ "grad_norm": 0.8189390229013667,
394
+ "learning_rate": 4.031340109629017e-05,
395
+ "loss": 1.4627,
396
+ "step": 53
397
+ },
398
+ {
399
+ "epoch": 0.3606010016694491,
400
+ "grad_norm": 0.8084346324283328,
401
+ "learning_rate": 3.9895182082672314e-05,
402
+ "loss": 1.3497,
403
+ "step": 54
404
+ },
405
+ {
406
+ "epoch": 0.3672787979966611,
407
+ "grad_norm": 0.6522054532264335,
408
+ "learning_rate": 3.947072172159507e-05,
409
+ "loss": 1.0559,
410
+ "step": 55
411
+ },
412
+ {
413
+ "epoch": 0.3739565943238731,
414
+ "grad_norm": 0.6662230199377867,
415
+ "learning_rate": 3.904023374163289e-05,
416
+ "loss": 1.3204,
417
+ "step": 56
418
+ },
419
+ {
420
+ "epoch": 0.3806343906510851,
421
+ "grad_norm": 0.7851899246690316,
422
+ "learning_rate": 3.860393490644781e-05,
423
+ "loss": 1.2382,
424
+ "step": 57
425
+ },
426
+ {
427
+ "epoch": 0.38731218697829717,
428
+ "grad_norm": 0.5976895902002597,
429
+ "learning_rate": 3.816204490564247e-05,
430
+ "loss": 1.2345,
431
+ "step": 58
432
+ },
433
+ {
434
+ "epoch": 0.39398998330550916,
435
+ "grad_norm": 0.6548990303111994,
436
+ "learning_rate": 3.771478624413981e-05,
437
+ "loss": 1.3118,
438
+ "step": 59
439
+ },
440
+ {
441
+ "epoch": 0.4006677796327212,
442
+ "grad_norm": 0.6224272564021037,
443
+ "learning_rate": 3.7262384130145054e-05,
444
+ "loss": 1.179,
445
+ "step": 60
446
+ },
447
+ {
448
+ "epoch": 0.4006677796327212,
449
+ "eval_loss": 1.0072436332702637,
450
+ "eval_runtime": 318.9557,
451
+ "eval_samples_per_second": 0.314,
452
+ "eval_steps_per_second": 0.157,
453
+ "step": 60
454
  }
455
  ],
456
  "logging_steps": 1,
 
470
  "attributes": {}
471
  }
472
  },
473
+ "total_flos": 7157458711609344.0,
474
  "train_batch_size": 1,
475
  "trial_name": null,
476
  "trial_params": null