bhuvanmdev commited on
Commit
09c4d56
·
verified ·
1 Parent(s): 3072a50

Training in progress, step 1580, checkpoint

Browse files
last-checkpoint/adapter_model.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:534add263f0ab1fce24fa011018eefdd2dcddfad7e6b3e3167b8b88e43d1d335
3
  size 100697728
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:f701220ec81716a32a4d508f6822b66b2074ec86047f8e8f056187bdc1ab5c09
3
  size 100697728
last-checkpoint/optimizer.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:aae8e9e48314c3831c9e9b379cf12dda16febba948f23260c667cfcefdb811cd
3
  size 201541754
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:b7b80e2f0ca9bc62e7fcaa505129fa793f6189b638ee9cff07d5b5b21cf2d026
3
  size 201541754
last-checkpoint/rng_state.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:2490d49251dc1bfa591e0e3acdc7c7d22ca5de91839e3e61e572022e72d3d8d2
3
  size 14244
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:05ad872338f15e42202b963c58b83ef6edf29994e347518dbc0914f0cdbb97b0
3
  size 14244
last-checkpoint/scheduler.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:4a759d59fcc4923d52863d19dcf59b3835205ae6c3c6035bc1e96d3a9ecd6ff0
3
  size 1064
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:4510552f5f4688778b1947e191aa031f3e5576fcc244fe154f36949d3b59eca2
3
  size 1064
last-checkpoint/trainer_state.json CHANGED
@@ -1,9 +1,9 @@
1
  {
2
  "best_metric": null,
3
  "best_model_checkpoint": null,
4
- "epoch": 0.5581395348837209,
5
  "eval_steps": 500,
6
- "global_step": 1560,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
@@ -1255,14 +1255,30 @@
1255
  "loss": 0.3889,
1256
  "num_input_tokens_seen": 1047255,
1257
  "step": 1560
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1258
  }
1259
  ],
1260
  "logging_steps": 10,
1261
  "max_steps": 2795,
1262
- "num_input_tokens_seen": 1047255,
1263
  "num_train_epochs": 1,
1264
  "save_steps": 20,
1265
- "total_flos": 2.354906668207104e+16,
1266
  "train_batch_size": 1,
1267
  "trial_name": null,
1268
  "trial_params": null
 
1
  {
2
  "best_metric": null,
3
  "best_model_checkpoint": null,
4
+ "epoch": 0.5652951699463328,
5
  "eval_steps": 500,
6
+ "global_step": 1580,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
 
1255
  "loss": 0.3889,
1256
  "num_input_tokens_seen": 1047255,
1257
  "step": 1560
1258
+ },
1259
+ {
1260
+ "epoch": 0.5617173524150268,
1261
+ "grad_norm": 0.3687174916267395,
1262
+ "learning_rate": 8.765652951699464e-05,
1263
+ "loss": 0.3794,
1264
+ "num_input_tokens_seen": 1054159,
1265
+ "step": 1570
1266
+ },
1267
+ {
1268
+ "epoch": 0.5652951699463328,
1269
+ "grad_norm": 0.3228056728839874,
1270
+ "learning_rate": 8.694096601073346e-05,
1271
+ "loss": 0.4095,
1272
+ "num_input_tokens_seen": 1060584,
1273
+ "step": 1580
1274
  }
1275
  ],
1276
  "logging_steps": 10,
1277
  "max_steps": 2795,
1278
+ "num_input_tokens_seen": 1060584,
1279
  "num_train_epochs": 1,
1280
  "save_steps": 20,
1281
+ "total_flos": 2.384878882214707e+16,
1282
  "train_batch_size": 1,
1283
  "trial_name": null,
1284
  "trial_params": null