bhuvanmdev
commited on
Commit
•
a2b0c71
1
Parent(s):
abe460b
Training in progress, step 660, checkpoint
Browse files
last-checkpoint/adapter_model.safetensors
CHANGED
@@ -1,3 +1,3 @@
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
-
oid sha256:
|
3 |
size 100697728
|
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:66333af72e1725806c8e221908e16e38b77dc29f7dce82420a71ceea2996f731
|
3 |
size 100697728
|
last-checkpoint/optimizer.pt
CHANGED
@@ -1,3 +1,3 @@
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
-
oid sha256:
|
3 |
size 201541754
|
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:4cf47a1463ac1ceb4c0c1e0ba9c8532a53101f3ec7c5d55cdea529cb461d992b
|
3 |
size 201541754
|
last-checkpoint/rng_state.pth
CHANGED
@@ -1,3 +1,3 @@
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
-
oid sha256:
|
3 |
size 14244
|
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:fc6353f9c4c4d14c6900d066e87c1879de52f1aa09da8179a11a66235a737911
|
3 |
size 14244
|
last-checkpoint/scheduler.pt
CHANGED
@@ -1,3 +1,3 @@
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
-
oid sha256:
|
3 |
size 1064
|
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:5d7053aaab2445f905f900c08b42128e5713d6d142ebe37c511ff095c7697e08
|
3 |
size 1064
|
last-checkpoint/trainer_state.json
CHANGED
@@ -1,9 +1,9 @@
|
|
1 |
{
|
2 |
"best_metric": null,
|
3 |
"best_model_checkpoint": null,
|
4 |
-
"epoch": 0.
|
5 |
"eval_steps": 500,
|
6 |
-
"global_step":
|
7 |
"is_hyper_param_search": false,
|
8 |
"is_local_process_zero": true,
|
9 |
"is_world_process_zero": true,
|
@@ -519,14 +519,30 @@
|
|
519 |
"loss": 0.4163,
|
520 |
"num_input_tokens_seen": 428523,
|
521 |
"step": 640
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
522 |
}
|
523 |
],
|
524 |
"logging_steps": 10,
|
525 |
"max_steps": 2795,
|
526 |
-
"num_input_tokens_seen":
|
527 |
"num_train_epochs": 1,
|
528 |
"save_steps": 20,
|
529 |
-
"total_flos":
|
530 |
"train_batch_size": 1,
|
531 |
"trial_name": null,
|
532 |
"trial_params": null
|
|
|
1 |
{
|
2 |
"best_metric": null,
|
3 |
"best_model_checkpoint": null,
|
4 |
+
"epoch": 0.23613595706618962,
|
5 |
"eval_steps": 500,
|
6 |
+
"global_step": 660,
|
7 |
"is_hyper_param_search": false,
|
8 |
"is_local_process_zero": true,
|
9 |
"is_world_process_zero": true,
|
|
|
519 |
"loss": 0.4163,
|
520 |
"num_input_tokens_seen": 428523,
|
521 |
"step": 640
|
522 |
+
},
|
523 |
+
{
|
524 |
+
"epoch": 0.23255813953488372,
|
525 |
+
"grad_norm": 0.35523882508277893,
|
526 |
+
"learning_rate": 0.00015348837209302327,
|
527 |
+
"loss": 0.4237,
|
528 |
+
"num_input_tokens_seen": 436283,
|
529 |
+
"step": 650
|
530 |
+
},
|
531 |
+
{
|
532 |
+
"epoch": 0.23613595706618962,
|
533 |
+
"grad_norm": 0.32238948345184326,
|
534 |
+
"learning_rate": 0.00015277280858676207,
|
535 |
+
"loss": 0.4527,
|
536 |
+
"num_input_tokens_seen": 444368,
|
537 |
+
"step": 660
|
538 |
}
|
539 |
],
|
540 |
"logging_steps": 10,
|
541 |
"max_steps": 2795,
|
542 |
+
"num_input_tokens_seen": 444368,
|
543 |
"num_train_epochs": 1,
|
544 |
"save_steps": 20,
|
545 |
+
"total_flos": 9992267082399744.0,
|
546 |
"train_batch_size": 1,
|
547 |
"trial_name": null,
|
548 |
"trial_params": null
|