bhuvanmdev
commited on
Commit
•
1526e60
1
Parent(s):
7c36eed
Training in progress, step 320, checkpoint
Browse files
last-checkpoint/adapter_model.safetensors
CHANGED
@@ -1,3 +1,3 @@
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
-
oid sha256:
|
3 |
size 100697728
|
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:4d7c5c0bb66466d7e29226f80d5cc217cf4256670467b8a4081f5c23852a3af7
|
3 |
size 100697728
|
last-checkpoint/optimizer.pt
CHANGED
@@ -1,3 +1,3 @@
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
-
oid sha256:
|
3 |
size 201541754
|
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:a553c2eb71e73f0c1eda48c6b9ee7a1eaaa3208af2a76b8e5dbafcd6476db69a
|
3 |
size 201541754
|
last-checkpoint/rng_state.pth
CHANGED
@@ -1,3 +1,3 @@
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
-
oid sha256:
|
3 |
size 14244
|
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:7f2a2d99c0e8f2ac5f91854c3cc393c4031b8b2b320bd2130073e550304e4d8b
|
3 |
size 14244
|
last-checkpoint/scheduler.pt
CHANGED
@@ -1,3 +1,3 @@
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
-
oid sha256:
|
3 |
size 1064
|
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:7ae66eeabf39a4e204f66b969f521166fbc3f93e43500ebb37520ec3747848cb
|
3 |
size 1064
|
last-checkpoint/trainer_state.json
CHANGED
@@ -1,9 +1,9 @@
|
|
1 |
{
|
2 |
"best_metric": null,
|
3 |
"best_model_checkpoint": null,
|
4 |
-
"epoch": 0.
|
5 |
"eval_steps": 500,
|
6 |
-
"global_step":
|
7 |
"is_hyper_param_search": false,
|
8 |
"is_local_process_zero": true,
|
9 |
"is_world_process_zero": true,
|
@@ -247,14 +247,30 @@
|
|
247 |
"loss": 0.4405,
|
248 |
"num_input_tokens_seen": 202203,
|
249 |
"step": 300
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
250 |
}
|
251 |
],
|
252 |
"logging_steps": 10,
|
253 |
"max_steps": 2795,
|
254 |
-
"num_input_tokens_seen":
|
255 |
"num_train_epochs": 1,
|
256 |
"save_steps": 20,
|
257 |
-
"total_flos":
|
258 |
"train_batch_size": 1,
|
259 |
"trial_name": null,
|
260 |
"trial_params": null
|
|
|
1 |
{
|
2 |
"best_metric": null,
|
3 |
"best_model_checkpoint": null,
|
4 |
+
"epoch": 0.11449016100178891,
|
5 |
"eval_steps": 500,
|
6 |
+
"global_step": 320,
|
7 |
"is_hyper_param_search": false,
|
8 |
"is_local_process_zero": true,
|
9 |
"is_world_process_zero": true,
|
|
|
247 |
"loss": 0.4405,
|
248 |
"num_input_tokens_seen": 202203,
|
249 |
"step": 300
|
250 |
+
},
|
251 |
+
{
|
252 |
+
"epoch": 0.11091234347048301,
|
253 |
+
"grad_norm": 0.33470776677131653,
|
254 |
+
"learning_rate": 0.00017781753130590342,
|
255 |
+
"loss": 0.4485,
|
256 |
+
"num_input_tokens_seen": 210364,
|
257 |
+
"step": 310
|
258 |
+
},
|
259 |
+
{
|
260 |
+
"epoch": 0.11449016100178891,
|
261 |
+
"grad_norm": 0.44205668568611145,
|
262 |
+
"learning_rate": 0.00017710196779964222,
|
263 |
+
"loss": 0.4431,
|
264 |
+
"num_input_tokens_seen": 217965,
|
265 |
+
"step": 320
|
266 |
}
|
267 |
],
|
268 |
"logging_steps": 10,
|
269 |
"max_steps": 2795,
|
270 |
+
"num_input_tokens_seen": 217965,
|
271 |
"num_train_epochs": 1,
|
272 |
"save_steps": 20,
|
273 |
+
"total_flos": 4901263130142720.0,
|
274 |
"train_batch_size": 1,
|
275 |
"trial_name": null,
|
276 |
"trial_params": null
|