Training in progress, step 2500, checkpoint
Browse files- last-checkpoint/adapter_model.safetensors +1 -1
- last-checkpoint/global_step2499/bf16_zero_pp_rank_0_mp_rank_00_optim_states.pt +3 -0
- last-checkpoint/global_step2499/bf16_zero_pp_rank_1_mp_rank_00_optim_states.pt +3 -0
- last-checkpoint/global_step2499/bf16_zero_pp_rank_2_mp_rank_00_optim_states.pt +3 -0
- last-checkpoint/global_step2499/bf16_zero_pp_rank_3_mp_rank_00_optim_states.pt +3 -0
- last-checkpoint/global_step2499/zero_pp_rank_0_mp_rank_00_model_states.pt +3 -0
- last-checkpoint/global_step2499/zero_pp_rank_1_mp_rank_00_model_states.pt +3 -0
- last-checkpoint/global_step2499/zero_pp_rank_2_mp_rank_00_model_states.pt +3 -0
- last-checkpoint/global_step2499/zero_pp_rank_3_mp_rank_00_model_states.pt +3 -0
- last-checkpoint/latest +1 -1
- last-checkpoint/rng_state_0.pth +1 -1
- last-checkpoint/rng_state_1.pth +1 -1
- last-checkpoint/rng_state_2.pth +1 -1
- last-checkpoint/rng_state_3.pth +1 -1
- last-checkpoint/scheduler.pt +1 -1
- last-checkpoint/trainer_state.json +93 -4
last-checkpoint/adapter_model.safetensors
CHANGED
@@ -1,3 +1,3 @@
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
-
oid sha256:
|
3 |
size 18516456
|
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:ad9c5a90f0316ee953467cae5ec9a849c3cc7b35324d075196476f5e2a6e857e
|
3 |
size 18516456
|
last-checkpoint/global_step2499/bf16_zero_pp_rank_0_mp_rank_00_optim_states.pt
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:1fcc3f5d47981eca52aac72c6f1923e8e0b1ac2c99f7de1fb71a1119a2fa16e3
|
3 |
+
size 27700976
|
last-checkpoint/global_step2499/bf16_zero_pp_rank_1_mp_rank_00_optim_states.pt
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:89147f56d24767b4393162cef41e78d244f6f91fe989c692401bb8a24d7097f0
|
3 |
+
size 27700976
|
last-checkpoint/global_step2499/bf16_zero_pp_rank_2_mp_rank_00_optim_states.pt
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:99ddc4d3b8340e811524cbed30fc7cc07ac1370e1b47a428e151bddffc516ac5
|
3 |
+
size 27700976
|
last-checkpoint/global_step2499/bf16_zero_pp_rank_3_mp_rank_00_optim_states.pt
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:c5997e47e1d70d8913e1277da8de8f8effff04d72f786314af60edca7b5e6f64
|
3 |
+
size 27700976
|
last-checkpoint/global_step2499/zero_pp_rank_0_mp_rank_00_model_states.pt
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:5439f58a29e0366a7f50646773fb60ae2a5bb18562e3e838d0c9c82494b2bccf
|
3 |
+
size 411571
|
last-checkpoint/global_step2499/zero_pp_rank_1_mp_rank_00_model_states.pt
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:4397da1199b3676b64dda73bd387563bfbc197a42301176b630448704ab76503
|
3 |
+
size 411507
|
last-checkpoint/global_step2499/zero_pp_rank_2_mp_rank_00_model_states.pt
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:5488ff43420c1760e1387d8e3f7bcb220f9b9293aebfa2a49ff4fb3705841c9c
|
3 |
+
size 411507
|
last-checkpoint/global_step2499/zero_pp_rank_3_mp_rank_00_model_states.pt
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:3836948132309c757f9c1b8d26443f387effa62d14f97840356a6b4d7ff376de
|
3 |
+
size 411507
|
last-checkpoint/latest
CHANGED
@@ -1 +1 @@
|
|
1 |
-
|
|
|
1 |
+
global_step2499
|
last-checkpoint/rng_state_0.pth
CHANGED
@@ -1,3 +1,3 @@
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
-
oid sha256:
|
3 |
size 15024
|
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:0362dfd92e8da01e4a0deedcbd1c493b8162d5d1d84d5a4c1cd210c556f2cf9b
|
3 |
size 15024
|
last-checkpoint/rng_state_1.pth
CHANGED
@@ -1,3 +1,3 @@
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
-
oid sha256:
|
3 |
size 15024
|
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:e65c5adee1a22c5343e38495a6905880496fb22d5e3ec5b16b87aadb731969d2
|
3 |
size 15024
|
last-checkpoint/rng_state_2.pth
CHANGED
@@ -1,3 +1,3 @@
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
-
oid sha256:
|
3 |
size 15024
|
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:f8d8858483b0c6944d55621cc2633469e3e0d04c48b6671eee92d4abab2352c2
|
3 |
size 15024
|
last-checkpoint/rng_state_3.pth
CHANGED
@@ -1,3 +1,3 @@
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
-
oid sha256:
|
3 |
size 15024
|
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:ca42ef4f7a2f8c2285c4cf6cef585dcc0b132b21e8bb33d96d53b6db837f5e54
|
3 |
size 15024
|
last-checkpoint/scheduler.pt
CHANGED
@@ -1,3 +1,3 @@
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
-
oid sha256:
|
3 |
size 1064
|
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:be1e9cd300c4f4c1fc9be3848ef7e995abd4a81c17c7a3b103813aaad4725565
|
3 |
size 1064
|
last-checkpoint/trainer_state.json
CHANGED
@@ -1,9 +1,9 @@
|
|
1 |
{
|
2 |
"best_metric": 0.6319106221199036,
|
3 |
"best_model_checkpoint": "saves/CADICA_qwenvl_stenosis_classily_scale4_frozenVision/lora/sft/checkpoint-1600",
|
4 |
-
"epoch": 1.
|
5 |
"eval_steps": 50,
|
6 |
-
"global_step":
|
7 |
"is_hyper_param_search": false,
|
8 |
"is_local_process_zero": true,
|
9 |
"is_world_process_zero": true,
|
@@ -4368,11 +4368,100 @@
|
|
4368 |
"eval_steps_per_second": 0.935,
|
4369 |
"num_input_tokens_seen": 28649256,
|
4370 |
"step": 2450
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
4371 |
}
|
4372 |
],
|
4373 |
"logging_steps": 5,
|
4374 |
"max_steps": 3400,
|
4375 |
-
"num_input_tokens_seen":
|
4376 |
"num_train_epochs": 2,
|
4377 |
"save_steps": 50,
|
4378 |
"stateful_callbacks": {
|
@@ -4387,7 +4476,7 @@
|
|
4387 |
"attributes": {}
|
4388 |
}
|
4389 |
},
|
4390 |
-
"total_flos":
|
4391 |
"train_batch_size": 1,
|
4392 |
"trial_name": null,
|
4393 |
"trial_params": null
|
|
|
1 |
{
|
2 |
"best_metric": 0.6319106221199036,
|
3 |
"best_model_checkpoint": "saves/CADICA_qwenvl_stenosis_classily_scale4_frozenVision/lora/sft/checkpoint-1600",
|
4 |
+
"epoch": 1.2874066443471541,
|
5 |
"eval_steps": 50,
|
6 |
+
"global_step": 2500,
|
7 |
"is_hyper_param_search": false,
|
8 |
"is_local_process_zero": true,
|
9 |
"is_world_process_zero": true,
|
|
|
4368 |
"eval_steps_per_second": 0.935,
|
4369 |
"num_input_tokens_seen": 28649256,
|
4370 |
"step": 2450
|
4371 |
+
},
|
4372 |
+
{
|
4373 |
+
"epoch": 1.2642286891578676,
|
4374 |
+
"grad_norm": 10.005201788297592,
|
4375 |
+
"learning_rate": 1.967458152065857e-05,
|
4376 |
+
"loss": 0.2664,
|
4377 |
+
"num_input_tokens_seen": 28707736,
|
4378 |
+
"step": 2455
|
4379 |
+
},
|
4380 |
+
{
|
4381 |
+
"epoch": 1.2668040175122328,
|
4382 |
+
"grad_norm": 4.744134155404128,
|
4383 |
+
"learning_rate": 1.9481612060899646e-05,
|
4384 |
+
"loss": 0.3692,
|
4385 |
+
"num_input_tokens_seen": 28766232,
|
4386 |
+
"step": 2460
|
4387 |
+
},
|
4388 |
+
{
|
4389 |
+
"epoch": 1.269379345866598,
|
4390 |
+
"grad_norm": 8.49200897563331,
|
4391 |
+
"learning_rate": 1.928936436551661e-05,
|
4392 |
+
"loss": 0.315,
|
4393 |
+
"num_input_tokens_seen": 28824688,
|
4394 |
+
"step": 2465
|
4395 |
+
},
|
4396 |
+
{
|
4397 |
+
"epoch": 1.271954674220963,
|
4398 |
+
"grad_norm": 5.112500789477909,
|
4399 |
+
"learning_rate": 1.9097842981195834e-05,
|
4400 |
+
"loss": 0.3536,
|
4401 |
+
"num_input_tokens_seen": 28883176,
|
4402 |
+
"step": 2470
|
4403 |
+
},
|
4404 |
+
{
|
4405 |
+
"epoch": 1.2745300025753283,
|
4406 |
+
"grad_norm": 4.93472430343828,
|
4407 |
+
"learning_rate": 1.8907052437446272e-05,
|
4408 |
+
"loss": 0.3143,
|
4409 |
+
"num_input_tokens_seen": 28941592,
|
4410 |
+
"step": 2475
|
4411 |
+
},
|
4412 |
+
{
|
4413 |
+
"epoch": 1.2771053309296936,
|
4414 |
+
"grad_norm": 4.6754631245280365,
|
4415 |
+
"learning_rate": 1.871699724649244e-05,
|
4416 |
+
"loss": 0.3114,
|
4417 |
+
"num_input_tokens_seen": 29000064,
|
4418 |
+
"step": 2480
|
4419 |
+
},
|
4420 |
+
{
|
4421 |
+
"epoch": 1.2796806592840588,
|
4422 |
+
"grad_norm": 7.198381813960669,
|
4423 |
+
"learning_rate": 1.8527681903167644e-05,
|
4424 |
+
"loss": 0.3327,
|
4425 |
+
"num_input_tokens_seen": 29058496,
|
4426 |
+
"step": 2485
|
4427 |
+
},
|
4428 |
+
{
|
4429 |
+
"epoch": 1.2822559876384239,
|
4430 |
+
"grad_norm": 9.221713217692685,
|
4431 |
+
"learning_rate": 1.833911088480767e-05,
|
4432 |
+
"loss": 0.2543,
|
4433 |
+
"num_input_tokens_seen": 29116992,
|
4434 |
+
"step": 2490
|
4435 |
+
},
|
4436 |
+
{
|
4437 |
+
"epoch": 1.284831315992789,
|
4438 |
+
"grad_norm": 8.499870267936974,
|
4439 |
+
"learning_rate": 1.8151288651144893e-05,
|
4440 |
+
"loss": 0.2854,
|
4441 |
+
"num_input_tokens_seen": 29175496,
|
4442 |
+
"step": 2495
|
4443 |
+
},
|
4444 |
+
{
|
4445 |
+
"epoch": 1.2874066443471541,
|
4446 |
+
"grad_norm": 4.289294450742717,
|
4447 |
+
"learning_rate": 1.796421964420285e-05,
|
4448 |
+
"loss": 0.3221,
|
4449 |
+
"num_input_tokens_seen": 29233968,
|
4450 |
+
"step": 2500
|
4451 |
+
},
|
4452 |
+
{
|
4453 |
+
"epoch": 1.2874066443471541,
|
4454 |
+
"eval_loss": 0.7222262620925903,
|
4455 |
+
"eval_runtime": 16.106,
|
4456 |
+
"eval_samples_per_second": 3.725,
|
4457 |
+
"eval_steps_per_second": 0.931,
|
4458 |
+
"num_input_tokens_seen": 29233968,
|
4459 |
+
"step": 2500
|
4460 |
}
|
4461 |
],
|
4462 |
"logging_steps": 5,
|
4463 |
"max_steps": 3400,
|
4464 |
+
"num_input_tokens_seen": 29233968,
|
4465 |
"num_train_epochs": 2,
|
4466 |
"save_steps": 50,
|
4467 |
"stateful_callbacks": {
|
|
|
4476 |
"attributes": {}
|
4477 |
}
|
4478 |
},
|
4479 |
+
"total_flos": 1641625797525504.0,
|
4480 |
"train_batch_size": 1,
|
4481 |
"trial_name": null,
|
4482 |
"trial_params": null
|