masatochi commited on
Commit
88408fd
1 Parent(s): 34532ae

Training in progress, step 200, checkpoint

Browse files
last-checkpoint/adapter_model.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:9b0dee2372e0f18f2926fc6260eec903f5561fa89a64d712e0b7d8365d17c593
3
  size 59827904
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:7bd5721dcf900ef49f45f0ef10dbf11b456013923d0a2897f59b5787b3ae3720
3
  size 59827904
last-checkpoint/optimizer.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:3b975acebc204f6e79db6fa817f78ef570e8350fee5fb167ce316257a8482e30
3
  size 30875540
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:771ffe86212fa3fc844af729b623adbdc1713d63e8ef510a4f21862401e76313
3
  size 30875540
last-checkpoint/rng_state.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:e0dfa4c978e9e36a07b5616ff9458f53247c12dc4ecf4c596cc8b6cb0fe8c5e1
3
  size 14244
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:da87c0fa21811b5f2090e7e71a5104bc8bb10adee2d7d157218e5f725fa79433
3
  size 14244
last-checkpoint/scheduler.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:a35430a05f2b9748f37dd11667a782564c85a35d840d60cbaddfa2c905ab7c0a
3
  size 1064
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:ca9a25c72339c898b564e0c464a3f6fc75bbeec408008928b7ed05533156b98c
3
  size 1064
last-checkpoint/trainer_state.json CHANGED
@@ -1,9 +1,9 @@
1
  {
2
  "best_metric": null,
3
  "best_model_checkpoint": null,
4
- "epoch": 0.0953603520997616,
5
  "eval_steps": 34,
6
- "global_step": 195,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
@@ -1420,6 +1420,41 @@
1420
  "learning_rate": 4.2658237049655323e-07,
1421
  "loss": 9.1892,
1422
  "step": 195
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1423
  }
1424
  ],
1425
  "logging_steps": 1,
@@ -1434,12 +1469,12 @@
1434
  "should_evaluate": false,
1435
  "should_log": false,
1436
  "should_save": true,
1437
- "should_training_stop": false
1438
  },
1439
  "attributes": {}
1440
  }
1441
  },
1442
- "total_flos": 4.2987353681166336e+17,
1443
  "train_batch_size": 3,
1444
  "trial_name": null,
1445
  "trial_params": null
 
1
  {
2
  "best_metric": null,
3
  "best_model_checkpoint": null,
4
+ "epoch": 0.09780548933308882,
5
  "eval_steps": 34,
6
+ "global_step": 200,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
 
1420
  "learning_rate": 4.2658237049655323e-07,
1421
  "loss": 9.1892,
1422
  "step": 195
1423
+ },
1424
+ {
1425
+ "epoch": 0.09584937954642704,
1426
+ "grad_norm": 1.5588723026491867e+19,
1427
+ "learning_rate": 2.7308266142119785e-07,
1428
+ "loss": 8.4245,
1429
+ "step": 196
1430
+ },
1431
+ {
1432
+ "epoch": 0.09633840699309248,
1433
+ "grad_norm": Infinity,
1434
+ "learning_rate": 1.5363960325660565e-07,
1435
+ "loss": 10.2748,
1436
+ "step": 197
1437
+ },
1438
+ {
1439
+ "epoch": 0.09682743443975793,
1440
+ "grad_norm": Infinity,
1441
+ "learning_rate": 6.829398569770939e-08,
1442
+ "loss": 8.9335,
1443
+ "step": 198
1444
+ },
1445
+ {
1446
+ "epoch": 0.09731646188642337,
1447
+ "grad_norm": Infinity,
1448
+ "learning_rate": 1.7074954194729044e-08,
1449
+ "loss": 9.0168,
1450
+ "step": 199
1451
+ },
1452
+ {
1453
+ "epoch": 0.09780548933308882,
1454
+ "grad_norm": Infinity,
1455
+ "learning_rate": 0.0,
1456
+ "loss": 7.8164,
1457
+ "step": 200
1458
  }
1459
  ],
1460
  "logging_steps": 1,
 
1469
  "should_evaluate": false,
1470
  "should_log": false,
1471
  "should_save": true,
1472
+ "should_training_stop": true
1473
  },
1474
  "attributes": {}
1475
  }
1476
  },
1477
+ "total_flos": 4.408959351914496e+17,
1478
  "train_batch_size": 3,
1479
  "trial_name": null,
1480
  "trial_params": null