Training in progress, step 2700, checkpoint
Browse files- last-checkpoint/adapter_model.safetensors +1 -1
- last-checkpoint/global_step2700/bf16_zero_pp_rank_0_mp_rank_00_optim_states.pt +3 -0
- last-checkpoint/global_step2700/bf16_zero_pp_rank_1_mp_rank_00_optim_states.pt +3 -0
- last-checkpoint/global_step2700/bf16_zero_pp_rank_2_mp_rank_00_optim_states.pt +3 -0
- last-checkpoint/global_step2700/bf16_zero_pp_rank_3_mp_rank_00_optim_states.pt +3 -0
- last-checkpoint/global_step2700/zero_pp_rank_0_mp_rank_00_model_states.pt +3 -0
- last-checkpoint/global_step2700/zero_pp_rank_1_mp_rank_00_model_states.pt +3 -0
- last-checkpoint/global_step2700/zero_pp_rank_2_mp_rank_00_model_states.pt +3 -0
- last-checkpoint/global_step2700/zero_pp_rank_3_mp_rank_00_model_states.pt +3 -0
- last-checkpoint/latest +1 -1
- last-checkpoint/rng_state_0.pth +1 -1
- last-checkpoint/rng_state_1.pth +1 -1
- last-checkpoint/rng_state_2.pth +1 -1
- last-checkpoint/rng_state_3.pth +1 -1
- last-checkpoint/scheduler.pt +1 -1
- last-checkpoint/trainer_state.json +93 -4
last-checkpoint/adapter_model.safetensors
CHANGED
@@ -1,3 +1,3 @@
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
-
oid sha256:
|
3 |
size 29034840
|
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:c1f0a1299753b652961a95b591427c87ce3772807fe346f4fffff8a783b0e00e
|
3 |
size 29034840
|
last-checkpoint/global_step2700/bf16_zero_pp_rank_0_mp_rank_00_optim_states.pt
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:ed7e5e674036436364db26c2fb093df880e7161282ad80f1a9ffbcb593cdf0ca
|
3 |
+
size 43429616
|
last-checkpoint/global_step2700/bf16_zero_pp_rank_1_mp_rank_00_optim_states.pt
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:deea6f1bbc6224f48319a93c685b88a0c33c58aa2c719b774a866a5849d5a698
|
3 |
+
size 43429616
|
last-checkpoint/global_step2700/bf16_zero_pp_rank_2_mp_rank_00_optim_states.pt
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:afb9716aa893ab3fed47090f22d1a7995c1108aa8ddc23cb2e60133fd05a2684
|
3 |
+
size 43429616
|
last-checkpoint/global_step2700/bf16_zero_pp_rank_3_mp_rank_00_optim_states.pt
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:b7e86fb385461d99683ba831c08492bbcd24d12057a758987b90e59828f4cd89
|
3 |
+
size 43429616
|
last-checkpoint/global_step2700/zero_pp_rank_0_mp_rank_00_model_states.pt
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:07653c959da414fae2d050ab7f94e043d9c9b24db42c193fdee0c381da235380
|
3 |
+
size 637299
|
last-checkpoint/global_step2700/zero_pp_rank_1_mp_rank_00_model_states.pt
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:d2cd6d33dfac1715cc7310636621c794fb2c4bc82ea9f78747ff361ddfc2d0bc
|
3 |
+
size 637171
|
last-checkpoint/global_step2700/zero_pp_rank_2_mp_rank_00_model_states.pt
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:d8b22641256262b58b47ab78d3087c36e2ab93894e42dddb21a15d9010bf7b87
|
3 |
+
size 637171
|
last-checkpoint/global_step2700/zero_pp_rank_3_mp_rank_00_model_states.pt
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:6f945c803427e31a4bfb7d69ec05db4391e922a7d4f5c9f65e457b4938ef0f5d
|
3 |
+
size 637171
|
last-checkpoint/latest
CHANGED
@@ -1 +1 @@
|
|
1 |
-
|
|
|
1 |
+
global_step2700
|
last-checkpoint/rng_state_0.pth
CHANGED
@@ -1,3 +1,3 @@
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
-
oid sha256:
|
3 |
size 15024
|
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:9e599331812a34463d102d64a4034a0b702a893f362f752003aa577fe71dcc1d
|
3 |
size 15024
|
last-checkpoint/rng_state_1.pth
CHANGED
@@ -1,3 +1,3 @@
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
-
oid sha256:
|
3 |
size 15024
|
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:5ed431e5e71393a0174ad2fd492755f8c1142596f1af3bfe7827c1f8f815dd80
|
3 |
size 15024
|
last-checkpoint/rng_state_2.pth
CHANGED
@@ -1,3 +1,3 @@
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
-
oid sha256:
|
3 |
size 15024
|
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:a2e70789f26a9f56b6b779e87cb1a405615af81562a256e5afe579f40972e827
|
3 |
size 15024
|
last-checkpoint/rng_state_3.pth
CHANGED
@@ -1,3 +1,3 @@
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
-
oid sha256:
|
3 |
size 15024
|
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:7c8c18bc74d5211e761da269c814d7da0687633993838ec22e81ac939a14e91b
|
3 |
size 15024
|
last-checkpoint/scheduler.pt
CHANGED
@@ -1,3 +1,3 @@
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
-
oid sha256:
|
3 |
size 1064
|
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:375d770aab1911f1464e2da712c8440a47a0edbad49710a4097aa7592f9fdb3e
|
3 |
size 1064
|
last-checkpoint/trainer_state.json
CHANGED
@@ -1,9 +1,9 @@
|
|
1 |
{
|
2 |
"best_metric": 0.1869634985923767,
|
3 |
"best_model_checkpoint": "saves/CADICA_qwenvl_direction_then_DetectAndClassify_scale6/lora/sft/checkpoint-2350",
|
4 |
-
"epoch": 0.
|
5 |
"eval_steps": 50,
|
6 |
-
"global_step":
|
7 |
"is_hyper_param_search": false,
|
8 |
"is_local_process_zero": true,
|
9 |
"is_world_process_zero": true,
|
@@ -4724,11 +4724,100 @@
|
|
4724 |
"eval_steps_per_second": 0.793,
|
4725 |
"num_input_tokens_seen": 27521088,
|
4726 |
"step": 2650
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
4727 |
}
|
4728 |
],
|
4729 |
"logging_steps": 5,
|
4730 |
"max_steps": 6770,
|
4731 |
-
"num_input_tokens_seen":
|
4732 |
"num_train_epochs": 2,
|
4733 |
"save_steps": 50,
|
4734 |
"stateful_callbacks": {
|
@@ -4743,7 +4832,7 @@
|
|
4743 |
"attributes": {}
|
4744 |
}
|
4745 |
},
|
4746 |
-
"total_flos":
|
4747 |
"train_batch_size": 1,
|
4748 |
"trial_name": null,
|
4749 |
"trial_params": null
|
|
|
1 |
{
|
2 |
"best_metric": 0.1869634985923767,
|
3 |
"best_model_checkpoint": "saves/CADICA_qwenvl_direction_then_DetectAndClassify_scale6/lora/sft/checkpoint-2350",
|
4 |
+
"epoch": 0.7976366322008862,
|
5 |
"eval_steps": 50,
|
6 |
+
"global_step": 2700,
|
7 |
"is_hyper_param_search": false,
|
8 |
"is_local_process_zero": true,
|
9 |
"is_world_process_zero": true,
|
|
|
4724 |
"eval_steps_per_second": 0.793,
|
4725 |
"num_input_tokens_seen": 27521088,
|
4726 |
"step": 2650
|
4727 |
+
},
|
4728 |
+
{
|
4729 |
+
"epoch": 0.7843426883308715,
|
4730 |
+
"grad_norm": 21.321277048653876,
|
4731 |
+
"learning_rate": 7.127039815931322e-05,
|
4732 |
+
"loss": 0.2047,
|
4733 |
+
"num_input_tokens_seen": 27573512,
|
4734 |
+
"step": 2655
|
4735 |
+
},
|
4736 |
+
{
|
4737 |
+
"epoch": 0.7858197932053176,
|
4738 |
+
"grad_norm": 17.20183228158753,
|
4739 |
+
"learning_rate": 7.11598097586445e-05,
|
4740 |
+
"loss": 0.2309,
|
4741 |
+
"num_input_tokens_seen": 27625488,
|
4742 |
+
"step": 2660
|
4743 |
+
},
|
4744 |
+
{
|
4745 |
+
"epoch": 0.7872968980797637,
|
4746 |
+
"grad_norm": 1.7315172011761586,
|
4747 |
+
"learning_rate": 7.104909511876293e-05,
|
4748 |
+
"loss": 0.2188,
|
4749 |
+
"num_input_tokens_seen": 27677824,
|
4750 |
+
"step": 2665
|
4751 |
+
},
|
4752 |
+
{
|
4753 |
+
"epoch": 0.7887740029542097,
|
4754 |
+
"grad_norm": 1.1907445578210916,
|
4755 |
+
"learning_rate": 7.0938254900191e-05,
|
4756 |
+
"loss": 0.2127,
|
4757 |
+
"num_input_tokens_seen": 27730048,
|
4758 |
+
"step": 2670
|
4759 |
+
},
|
4760 |
+
{
|
4761 |
+
"epoch": 0.7902511078286558,
|
4762 |
+
"grad_norm": 1.81826608908883,
|
4763 |
+
"learning_rate": 7.082728976420032e-05,
|
4764 |
+
"loss": 0.2534,
|
4765 |
+
"num_input_tokens_seen": 27781512,
|
4766 |
+
"step": 2675
|
4767 |
+
},
|
4768 |
+
{
|
4769 |
+
"epoch": 0.7917282127031019,
|
4770 |
+
"grad_norm": 1.2558672573153766,
|
4771 |
+
"learning_rate": 7.071620037280779e-05,
|
4772 |
+
"loss": 0.204,
|
4773 |
+
"num_input_tokens_seen": 27833808,
|
4774 |
+
"step": 2680
|
4775 |
+
},
|
4776 |
+
{
|
4777 |
+
"epoch": 0.793205317577548,
|
4778 |
+
"grad_norm": 16.755851819106958,
|
4779 |
+
"learning_rate": 7.060498738877159e-05,
|
4780 |
+
"loss": 0.2218,
|
4781 |
+
"num_input_tokens_seen": 27886232,
|
4782 |
+
"step": 2685
|
4783 |
+
},
|
4784 |
+
{
|
4785 |
+
"epoch": 0.794682422451994,
|
4786 |
+
"grad_norm": 1.6442173318750486,
|
4787 |
+
"learning_rate": 7.049365147558727e-05,
|
4788 |
+
"loss": 0.2157,
|
4789 |
+
"num_input_tokens_seen": 27938696,
|
4790 |
+
"step": 2690
|
4791 |
+
},
|
4792 |
+
{
|
4793 |
+
"epoch": 0.7961595273264401,
|
4794 |
+
"grad_norm": 20.30442824896465,
|
4795 |
+
"learning_rate": 7.038219329748376e-05,
|
4796 |
+
"loss": 0.2401,
|
4797 |
+
"num_input_tokens_seen": 27990816,
|
4798 |
+
"step": 2695
|
4799 |
+
},
|
4800 |
+
{
|
4801 |
+
"epoch": 0.7976366322008862,
|
4802 |
+
"grad_norm": 0.9565866694207389,
|
4803 |
+
"learning_rate": 7.027061351941948e-05,
|
4804 |
+
"loss": 0.2225,
|
4805 |
+
"num_input_tokens_seen": 28042992,
|
4806 |
+
"step": 2700
|
4807 |
+
},
|
4808 |
+
{
|
4809 |
+
"epoch": 0.7976366322008862,
|
4810 |
+
"eval_loss": 0.41549214720726013,
|
4811 |
+
"eval_runtime": 18.9524,
|
4812 |
+
"eval_samples_per_second": 3.166,
|
4813 |
+
"eval_steps_per_second": 0.791,
|
4814 |
+
"num_input_tokens_seen": 28042992,
|
4815 |
+
"step": 2700
|
4816 |
}
|
4817 |
],
|
4818 |
"logging_steps": 5,
|
4819 |
"max_steps": 6770,
|
4820 |
+
"num_input_tokens_seen": 28042992,
|
4821 |
"num_train_epochs": 2,
|
4822 |
"save_steps": 50,
|
4823 |
"stateful_callbacks": {
|
|
|
4832 |
"attributes": {}
|
4833 |
}
|
4834 |
},
|
4835 |
+
"total_flos": 1850037045297152.0,
|
4836 |
"train_batch_size": 1,
|
4837 |
"trial_name": null,
|
4838 |
"trial_params": null
|