masatochi commited on
Commit
3464933
·
verified ·
1 Parent(s): 5360803

Training in progress, step 200, checkpoint

Browse files
last-checkpoint/adapter_model.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:4b6961ec56fb4a44ef494b0d658571c48ac344664dc6fe4a951f2610f0f03b8f
3
  size 83945296
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:3727237ae3935b62ae996af32d303af34e4cd5253f63eb2d60627cc5ef912920
3
  size 83945296
last-checkpoint/optimizer.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:345c3d0745bb4c43f0fbe85fc7c3ff9f29987a091928463d46244addf1e119f1
3
  size 43122580
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:777bdf50a841d4b9781e0ab8e00624d257f1bc7472e451827de97f2d11a85d33
3
  size 43122580
last-checkpoint/rng_state.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:523bfe0a33577b4182bf5e3b1a37489a75b989d79cb5926278ae5a0647ffb1d8
3
  size 14244
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:33c67ae2325c5a312a1fadb56ddccb9bf6f35a5c2e1dc133fc3f71609827e280
3
  size 14244
last-checkpoint/scheduler.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:a35430a05f2b9748f37dd11667a782564c85a35d840d60cbaddfa2c905ab7c0a
3
  size 1064
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:ca9a25c72339c898b564e0c464a3f6fc75bbeec408008928b7ed05533156b98c
3
  size 1064
last-checkpoint/trainer_state.json CHANGED
@@ -1,9 +1,9 @@
1
  {
2
  "best_metric": null,
3
  "best_model_checkpoint": null,
4
- "epoch": 0.0953603520997616,
5
  "eval_steps": 34,
6
- "global_step": 195,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
@@ -1420,6 +1420,41 @@
1420
  "learning_rate": 4.2658237049655323e-07,
1421
  "loss": 1.0008,
1422
  "step": 195
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1423
  }
1424
  ],
1425
  "logging_steps": 1,
@@ -1434,12 +1469,12 @@
1434
  "should_evaluate": false,
1435
  "should_log": false,
1436
  "should_save": true,
1437
- "should_training_stop": false
1438
  },
1439
  "attributes": {}
1440
  }
1441
  },
1442
- "total_flos": 8.655960681322906e+17,
1443
  "train_batch_size": 3,
1444
  "trial_name": null,
1445
  "trial_params": null
 
1
  {
2
  "best_metric": null,
3
  "best_model_checkpoint": null,
4
+ "epoch": 0.09780548933308882,
5
  "eval_steps": 34,
6
+ "global_step": 200,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
 
1420
  "learning_rate": 4.2658237049655323e-07,
1421
  "loss": 1.0008,
1422
  "step": 195
1423
+ },
1424
+ {
1425
+ "epoch": 0.09584937954642704,
1426
+ "grad_norm": 0.4474238157272339,
1427
+ "learning_rate": 2.7308266142119785e-07,
1428
+ "loss": 0.9659,
1429
+ "step": 196
1430
+ },
1431
+ {
1432
+ "epoch": 0.09633840699309248,
1433
+ "grad_norm": 0.27518409490585327,
1434
+ "learning_rate": 1.5363960325660565e-07,
1435
+ "loss": 1.134,
1436
+ "step": 197
1437
+ },
1438
+ {
1439
+ "epoch": 0.09682743443975793,
1440
+ "grad_norm": 0.4118005037307739,
1441
+ "learning_rate": 6.829398569770939e-08,
1442
+ "loss": 0.9985,
1443
+ "step": 198
1444
+ },
1445
+ {
1446
+ "epoch": 0.09731646188642337,
1447
+ "grad_norm": 0.38307955861091614,
1448
+ "learning_rate": 1.7074954194729044e-08,
1449
+ "loss": 1.0373,
1450
+ "step": 199
1451
+ },
1452
+ {
1453
+ "epoch": 0.09780548933308882,
1454
+ "grad_norm": 0.5424556136131287,
1455
+ "learning_rate": 0.0,
1456
+ "loss": 0.9834,
1457
+ "step": 200
1458
  }
1459
  ],
1460
  "logging_steps": 1,
 
1469
  "should_evaluate": false,
1470
  "should_log": false,
1471
  "should_save": true,
1472
+ "should_training_stop": true
1473
  },
1474
  "attributes": {}
1475
  }
1476
  },
1477
+ "total_flos": 8.877908391100416e+17,
1478
  "train_batch_size": 3,
1479
  "trial_name": null,
1480
  "trial_params": null