masatochi commited on
Commit
02bd5f4
1 Parent(s): 6aeed42

Training in progress, step 195, checkpoint

Browse files
last-checkpoint/adapter_model.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:a4a5c97dbf76ff33b307a5750999b650fdca855b6fccfb24cfb317d17b551db2
3
  size 83945296
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:4b6961ec56fb4a44ef494b0d658571c48ac344664dc6fe4a951f2610f0f03b8f
3
  size 83945296
last-checkpoint/optimizer.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:e14756a84c0c36abc832c0c104bd27a99e8fd2062b9726fdee254c458fe1f7b4
3
  size 43122580
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:345c3d0745bb4c43f0fbe85fc7c3ff9f29987a091928463d46244addf1e119f1
3
  size 43122580
last-checkpoint/rng_state.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:bf11cdd535ea662ff270636f900a467408683d2b7101cb9dbc848f0681f26dea
3
  size 14244
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:523bfe0a33577b4182bf5e3b1a37489a75b989d79cb5926278ae5a0647ffb1d8
3
  size 14244
last-checkpoint/scheduler.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:b289b438571df9d34409287e67864402aaad98d9ecdf87ccd44b9abb7f5b6982
3
  size 1064
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:a35430a05f2b9748f37dd11667a782564c85a35d840d60cbaddfa2c905ab7c0a
3
  size 1064
last-checkpoint/trainer_state.json CHANGED
@@ -1,9 +1,9 @@
1
  {
2
  "best_metric": null,
3
  "best_model_checkpoint": null,
4
- "epoch": 0.09291521486643438,
5
  "eval_steps": 34,
6
- "global_step": 190,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
@@ -1385,6 +1385,41 @@
1385
  "learning_rate": 1.7026900316098215e-06,
1386
  "loss": 1.1048,
1387
  "step": 190
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1388
  }
1389
  ],
1390
  "logging_steps": 1,
@@ -1404,7 +1439,7 @@
1404
  "attributes": {}
1405
  }
1406
  },
1407
- "total_flos": 8.434012971545395e+17,
1408
  "train_batch_size": 3,
1409
  "trial_name": null,
1410
  "trial_params": null
 
1
  {
2
  "best_metric": null,
3
  "best_model_checkpoint": null,
4
+ "epoch": 0.0953603520997616,
5
  "eval_steps": 34,
6
+ "global_step": 195,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
 
1385
  "learning_rate": 1.7026900316098215e-06,
1386
  "loss": 1.1048,
1387
  "step": 190
1388
+ },
1389
+ {
1390
+ "epoch": 0.09340424231309982,
1391
+ "grad_norm": 0.3293691873550415,
1392
+ "learning_rate": 1.3799252646597426e-06,
1393
+ "loss": 1.1607,
1394
+ "step": 191
1395
+ },
1396
+ {
1397
+ "epoch": 0.09389326975976527,
1398
+ "grad_norm": 0.28064343333244324,
1399
+ "learning_rate": 1.0908391628854041e-06,
1400
+ "loss": 0.9642,
1401
+ "step": 192
1402
+ },
1403
+ {
1404
+ "epoch": 0.09438229720643071,
1405
+ "grad_norm": 0.4100506603717804,
1406
+ "learning_rate": 8.355304489257254e-07,
1407
+ "loss": 0.931,
1408
+ "step": 193
1409
+ },
1410
+ {
1411
+ "epoch": 0.09487132465309615,
1412
+ "grad_norm": 0.298096626996994,
1413
+ "learning_rate": 6.140863104726391e-07,
1414
+ "loss": 0.9616,
1415
+ "step": 194
1416
+ },
1417
+ {
1418
+ "epoch": 0.0953603520997616,
1419
+ "grad_norm": 0.26067015528678894,
1420
+ "learning_rate": 4.2658237049655323e-07,
1421
+ "loss": 1.0008,
1422
+ "step": 195
1423
  }
1424
  ],
1425
  "logging_steps": 1,
 
1439
  "attributes": {}
1440
  }
1441
  },
1442
+ "total_flos": 8.655960681322906e+17,
1443
  "train_batch_size": 3,
1444
  "trial_name": null,
1445
  "trial_params": null