bhuvanmdev
commited on
Commit
•
2688e0e
1
Parent(s):
f2952db
Training in progress, step 1780, checkpoint
Browse files
last-checkpoint/adapter_model.safetensors
CHANGED
@@ -1,3 +1,3 @@
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
-
oid sha256:
|
3 |
size 100697728
|
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:4dbb1b9bf8cecc82c992b1f0dd17c659790fe9b529483d08774f61e623e89591
|
3 |
size 100697728
|
last-checkpoint/optimizer.pt
CHANGED
@@ -1,3 +1,3 @@
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
-
oid sha256:
|
3 |
size 201541754
|
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:832bb0c6b1189792a3803205e28282733c69357c7a98db906a5b6648c86412ed
|
3 |
size 201541754
|
last-checkpoint/rng_state.pth
CHANGED
@@ -1,3 +1,3 @@
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
-
oid sha256:
|
3 |
size 14244
|
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:b16d7438102d976fb534815d7fb6c3fbd3eb0ad6701c9593c3dd315e6b65b9be
|
3 |
size 14244
|
last-checkpoint/scheduler.pt
CHANGED
@@ -1,3 +1,3 @@
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
-
oid sha256:
|
3 |
size 1064
|
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:2e7b2fb94aba803f2a2d188b149647000a6898e962a195b1f1cc848db18fce06
|
3 |
size 1064
|
last-checkpoint/trainer_state.json
CHANGED
@@ -1,9 +1,9 @@
|
|
1 |
{
|
2 |
"best_metric": null,
|
3 |
"best_model_checkpoint": null,
|
4 |
-
"epoch": 0.
|
5 |
"eval_steps": 500,
|
6 |
-
"global_step":
|
7 |
"is_hyper_param_search": false,
|
8 |
"is_local_process_zero": true,
|
9 |
"is_world_process_zero": true,
|
@@ -1415,14 +1415,30 @@
|
|
1415 |
"loss": 0.3975,
|
1416 |
"num_input_tokens_seen": 1189185,
|
1417 |
"step": 1760
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1418 |
}
|
1419 |
],
|
1420 |
"logging_steps": 10,
|
1421 |
"max_steps": 2795,
|
1422 |
-
"num_input_tokens_seen":
|
1423 |
"num_train_epochs": 1,
|
1424 |
"save_steps": 20,
|
1425 |
-
"total_flos": 2.
|
1426 |
"train_batch_size": 1,
|
1427 |
"trial_name": null,
|
1428 |
"trial_params": null
|
|
|
1 |
{
|
2 |
"best_metric": null,
|
3 |
"best_model_checkpoint": null,
|
4 |
+
"epoch": 0.6368515205724508,
|
5 |
"eval_steps": 500,
|
6 |
+
"global_step": 1780,
|
7 |
"is_hyper_param_search": false,
|
8 |
"is_local_process_zero": true,
|
9 |
"is_world_process_zero": true,
|
|
|
1415 |
"loss": 0.3975,
|
1416 |
"num_input_tokens_seen": 1189185,
|
1417 |
"step": 1760
|
1418 |
+
},
|
1419 |
+
{
|
1420 |
+
"epoch": 0.6332737030411449,
|
1421 |
+
"grad_norm": 0.262329638004303,
|
1422 |
+
"learning_rate": 7.334525939177103e-05,
|
1423 |
+
"loss": 0.3841,
|
1424 |
+
"num_input_tokens_seen": 1196883,
|
1425 |
+
"step": 1770
|
1426 |
+
},
|
1427 |
+
{
|
1428 |
+
"epoch": 0.6368515205724508,
|
1429 |
+
"grad_norm": 0.35897818207740784,
|
1430 |
+
"learning_rate": 7.262969588550985e-05,
|
1431 |
+
"loss": 0.4004,
|
1432 |
+
"num_input_tokens_seen": 1205284,
|
1433 |
+
"step": 1780
|
1434 |
}
|
1435 |
],
|
1436 |
"logging_steps": 10,
|
1437 |
"max_steps": 2795,
|
1438 |
+
"num_input_tokens_seen": 1205284,
|
1439 |
"num_train_epochs": 1,
|
1440 |
"save_steps": 20,
|
1441 |
+
"total_flos": 2.710258082972467e+16,
|
1442 |
"train_batch_size": 1,
|
1443 |
"trial_name": null,
|
1444 |
"trial_params": null
|