bhuvanmdev
commited on
Commit
•
3b5c51c
1
Parent(s):
f1191da
Training in progress, step 880, checkpoint
Browse files
last-checkpoint/adapter_model.safetensors
CHANGED
@@ -1,3 +1,3 @@
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
-
oid sha256:
|
3 |
size 100697728
|
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:10e278fe75c0875651454a628fb5081e25c9b4eddb980049aa53d473f22d3859
|
3 |
size 100697728
|
last-checkpoint/optimizer.pt
CHANGED
@@ -1,3 +1,3 @@
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
-
oid sha256:
|
3 |
size 201541754
|
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:04ed5db3d6a74db1fc85886f2b7a625f5118fae9676ddbbe7eb972f86ea748ae
|
3 |
size 201541754
|
last-checkpoint/rng_state.pth
CHANGED
@@ -1,3 +1,3 @@
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
-
oid sha256:
|
3 |
size 14244
|
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:57d455a0341a2c5c31eede937d31408b4b0a31f087c401955643a52ffd75f76a
|
3 |
size 14244
|
last-checkpoint/scheduler.pt
CHANGED
@@ -1,3 +1,3 @@
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
-
oid sha256:
|
3 |
size 1064
|
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:cf5b23a851e345e8e4081674591dcebfd10a22b109ff1badb1c585816cbde3fd
|
3 |
size 1064
|
last-checkpoint/trainer_state.json
CHANGED
@@ -1,9 +1,9 @@
|
|
1 |
{
|
2 |
"best_metric": null,
|
3 |
"best_model_checkpoint": null,
|
4 |
-
"epoch": 0.
|
5 |
"eval_steps": 500,
|
6 |
-
"global_step":
|
7 |
"is_hyper_param_search": false,
|
8 |
"is_local_process_zero": true,
|
9 |
"is_world_process_zero": true,
|
@@ -695,14 +695,30 @@
|
|
695 |
"loss": 0.4365,
|
696 |
"num_input_tokens_seen": 574126,
|
697 |
"step": 860
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
698 |
}
|
699 |
],
|
700 |
"logging_steps": 10,
|
701 |
"max_steps": 2795,
|
702 |
-
"num_input_tokens_seen":
|
703 |
"num_train_epochs": 1,
|
704 |
"save_steps": 20,
|
705 |
-
"total_flos": 1.
|
706 |
"train_batch_size": 1,
|
707 |
"trial_name": null,
|
708 |
"trial_params": null
|
|
|
1 |
{
|
2 |
"best_metric": null,
|
3 |
"best_model_checkpoint": null,
|
4 |
+
"epoch": 0.3148479427549195,
|
5 |
"eval_steps": 500,
|
6 |
+
"global_step": 880,
|
7 |
"is_hyper_param_search": false,
|
8 |
"is_local_process_zero": true,
|
9 |
"is_world_process_zero": true,
|
|
|
695 |
"loss": 0.4365,
|
696 |
"num_input_tokens_seen": 574126,
|
697 |
"step": 860
|
698 |
+
},
|
699 |
+
{
|
700 |
+
"epoch": 0.3112701252236136,
|
701 |
+
"grad_norm": 0.31020838022232056,
|
702 |
+
"learning_rate": 0.00013774597495527728,
|
703 |
+
"loss": 0.4076,
|
704 |
+
"num_input_tokens_seen": 581039,
|
705 |
+
"step": 870
|
706 |
+
},
|
707 |
+
{
|
708 |
+
"epoch": 0.3148479427549195,
|
709 |
+
"grad_norm": 0.2839311361312866,
|
710 |
+
"learning_rate": 0.00013703041144901611,
|
711 |
+
"loss": 0.4139,
|
712 |
+
"num_input_tokens_seen": 587384,
|
713 |
+
"step": 880
|
714 |
}
|
715 |
],
|
716 |
"logging_steps": 10,
|
717 |
"max_steps": 2795,
|
718 |
+
"num_input_tokens_seen": 587384,
|
719 |
"num_train_epochs": 1,
|
720 |
"save_steps": 20,
|
721 |
+
"total_flos": 1.3208191876841472e+16,
|
722 |
"train_batch_size": 1,
|
723 |
"trial_name": null,
|
724 |
"trial_params": null
|