bhuvanmdev commited on
Commit
8766fab
·
verified ·
1 Parent(s): 443ba7e

Training in progress, step 2600, checkpoint

Browse files
last-checkpoint/adapter_model.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:fa10074e1d6ddc94d23153c9734747c8baf91d47d6dba6666b1ef724ab871f8c
3
  size 100697728
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:0e0fe0e9d900d8fecd195e1a719802ba1ac5f49e274f580b73f4466c3f57dd5c
3
  size 100697728
last-checkpoint/optimizer.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:d9ab1eb9cd7c1ea28ecdf3adae9b1c04bcbc49f42a37845326b6ae05057c7199
3
  size 201541754
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:8ed18925aeed2c70e87f6e85275b0e2e7c1d48fcea3988e938200f4125c88c46
3
  size 201541754
last-checkpoint/rng_state.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:66d8839ff66cb4b737720f4f9d1c90cc3202ad969b799aa3c2659a6ea5ff1a82
3
  size 14244
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:a353eb9058a377bea13c7e9eeff43101bb2c4a8b11951234da6a710bb5053f86
3
  size 14244
last-checkpoint/scheduler.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:42f27a730b6ee07b80f832c5a86ad2126ba907f4b4cf4aa2e99d8c5986c6b885
3
  size 1064
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:2e63d6cbad437bb2ff0881a0f3731290a112af9056e30047c433328ae16fc214
3
  size 1064
last-checkpoint/trainer_state.json CHANGED
@@ -1,9 +1,9 @@
1
  {
2
  "best_metric": null,
3
  "best_model_checkpoint": null,
4
- "epoch": 0.9230769230769231,
5
  "eval_steps": 500,
6
- "global_step": 2580,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
@@ -2071,14 +2071,30 @@
2071
  "loss": 0.3949,
2072
  "num_input_tokens_seen": 1748817,
2073
  "step": 2580
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2074
  }
2075
  ],
2076
  "logging_steps": 10,
2077
  "max_steps": 2795,
2078
- "num_input_tokens_seen": 1748817,
2079
  "num_train_epochs": 1,
2080
  "save_steps": 20,
2081
- "total_flos": 3.932471857163674e+16,
2082
  "train_batch_size": 1,
2083
  "trial_name": null,
2084
  "trial_params": null
 
1
  {
2
  "best_metric": null,
3
  "best_model_checkpoint": null,
4
+ "epoch": 0.9302325581395349,
5
  "eval_steps": 500,
6
+ "global_step": 2600,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
 
2071
  "loss": 0.3949,
2072
  "num_input_tokens_seen": 1748817,
2073
  "step": 2580
2074
+ },
2075
+ {
2076
+ "epoch": 0.9266547406082289,
2077
+ "grad_norm": 0.31920483708381653,
2078
+ "learning_rate": 1.4669051878354204e-05,
2079
+ "loss": 0.3741,
2080
+ "num_input_tokens_seen": 1754777,
2081
+ "step": 2590
2082
+ },
2083
+ {
2084
+ "epoch": 0.9302325581395349,
2085
+ "grad_norm": 0.26824864745140076,
2086
+ "learning_rate": 1.3953488372093024e-05,
2087
+ "loss": 0.386,
2088
+ "num_input_tokens_seen": 1762187,
2089
+ "step": 2600
2090
  }
2091
  ],
2092
  "logging_steps": 10,
2093
  "max_steps": 2795,
2094
+ "num_input_tokens_seen": 1762187,
2095
  "num_train_epochs": 1,
2096
  "save_steps": 20,
2097
+ "total_flos": 3.96253626569257e+16,
2098
  "train_batch_size": 1,
2099
  "trial_name": null,
2100
  "trial_params": null