bhuvanmdev commited on
Commit
76ea806
·
verified ·
1 Parent(s): 8bd6128

Training in progress, step 1380, checkpoint

Browse files
last-checkpoint/adapter_model.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:b4c36ddd34e5f64d6b8d72f7535780e3421a848c056451338ad60b3bc23355ca
3
  size 100697728
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:6b616fe2eb021d20d4875636738be3ca7342fc6db866fd391eb53743dd565cb0
3
  size 100697728
last-checkpoint/optimizer.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:d8441702a5d48998055bebf8bf17f52c9868b1176bcfd106766c5daef35e2005
3
  size 201541754
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:5356f6af8453685dfcd2ab29084ffd177cb62b8eaee4a3289a76f99910495ddc
3
  size 201541754
last-checkpoint/rng_state.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:a125d9b0ccf3b1e15af9c78c7573f334a287768d58700d4bae393ced8e6486ba
3
  size 14244
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:44f04742b905a4ba4f17b43b2fddc49db3e772b856badc745872559eec73cca5
3
  size 14244
last-checkpoint/scheduler.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:9822ffd7b1cdab49a87c2ee8fe2f970274c45f8492d363a6e7b14647c57922bd
3
  size 1064
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:bc40c38146bda7179f03752fd8672ec7d4feef1519e7b61e194eda1f35f35156
3
  size 1064
last-checkpoint/trainer_state.json CHANGED
@@ -1,9 +1,9 @@
1
  {
2
  "best_metric": null,
3
  "best_model_checkpoint": null,
4
- "epoch": 0.4865831842576029,
5
  "eval_steps": 500,
6
- "global_step": 1360,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
@@ -1095,14 +1095,30 @@
1095
  "loss": 0.4049,
1096
  "num_input_tokens_seen": 915272,
1097
  "step": 1360
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1098
  }
1099
  ],
1100
  "logging_steps": 10,
1101
  "max_steps": 2795,
1102
- "num_input_tokens_seen": 915272,
1103
  "num_train_epochs": 1,
1104
  "save_steps": 20,
1105
- "total_flos": 2.0581235095781376e+16,
1106
  "train_batch_size": 1,
1107
  "trial_name": null,
1108
  "trial_params": null
 
1
  {
2
  "best_metric": null,
3
  "best_model_checkpoint": null,
4
+ "epoch": 0.4937388193202147,
5
  "eval_steps": 500,
6
+ "global_step": 1380,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
 
1095
  "loss": 0.4049,
1096
  "num_input_tokens_seen": 915272,
1097
  "step": 1360
1098
+ },
1099
+ {
1100
+ "epoch": 0.49016100178890876,
1101
+ "grad_norm": 0.249292254447937,
1102
+ "learning_rate": 0.00010196779964221825,
1103
+ "loss": 0.4064,
1104
+ "num_input_tokens_seen": 921729,
1105
+ "step": 1370
1106
+ },
1107
+ {
1108
+ "epoch": 0.4937388193202147,
1109
+ "grad_norm": 0.2855275869369507,
1110
+ "learning_rate": 0.00010125223613595707,
1111
+ "loss": 0.4092,
1112
+ "num_input_tokens_seen": 928645,
1113
+ "step": 1380
1114
  }
1115
  ],
1116
  "logging_steps": 10,
1117
  "max_steps": 2795,
1118
+ "num_input_tokens_seen": 928645,
1119
  "num_train_epochs": 1,
1120
  "save_steps": 20,
1121
+ "total_flos": 2.088194664047616e+16,
1122
  "train_batch_size": 1,
1123
  "trial_name": null,
1124
  "trial_params": null