Training in progress, step 1700, checkpoint
Browse files- last-checkpoint/adapter_model.safetensors +1 -1
- last-checkpoint/global_step1700/bf16_zero_pp_rank_0_mp_rank_00_optim_states.pt +3 -0
- last-checkpoint/global_step1700/bf16_zero_pp_rank_1_mp_rank_00_optim_states.pt +3 -0
- last-checkpoint/global_step1700/bf16_zero_pp_rank_2_mp_rank_00_optim_states.pt +3 -0
- last-checkpoint/global_step1700/bf16_zero_pp_rank_3_mp_rank_00_optim_states.pt +3 -0
- last-checkpoint/global_step1700/zero_pp_rank_0_mp_rank_00_model_states.pt +3 -0
- last-checkpoint/global_step1700/zero_pp_rank_1_mp_rank_00_model_states.pt +3 -0
- last-checkpoint/global_step1700/zero_pp_rank_2_mp_rank_00_model_states.pt +3 -0
- last-checkpoint/global_step1700/zero_pp_rank_3_mp_rank_00_model_states.pt +3 -0
- last-checkpoint/latest +1 -1
- last-checkpoint/rng_state_0.pth +1 -1
- last-checkpoint/rng_state_1.pth +1 -1
- last-checkpoint/rng_state_2.pth +1 -1
- last-checkpoint/rng_state_3.pth +1 -1
- last-checkpoint/scheduler.pt +1 -1
- last-checkpoint/trainer_state.json +93 -4
last-checkpoint/adapter_model.safetensors
CHANGED
@@ -1,3 +1,3 @@
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
-
oid sha256:
|
3 |
size 18516456
|
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:ba9914360b2dead740bb4f8973f7876b182880fd878912194cb890333ef7f606
|
3 |
size 18516456
|
last-checkpoint/global_step1700/bf16_zero_pp_rank_0_mp_rank_00_optim_states.pt
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:6f013102d025a0d93efddb53355cd46d133d540c05efd52903ac0e7df2b3a5a7
|
3 |
+
size 27700976
|
last-checkpoint/global_step1700/bf16_zero_pp_rank_1_mp_rank_00_optim_states.pt
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:b67057336dd5fc1d2e096cb5948311380f08eab0b592030c98e1c23d06e4cf18
|
3 |
+
size 27700976
|
last-checkpoint/global_step1700/bf16_zero_pp_rank_2_mp_rank_00_optim_states.pt
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:ebd4069f79d7af41073bdc6230a9f76d524e8139a7cbbcad75ec9bc78f29fb83
|
3 |
+
size 27700976
|
last-checkpoint/global_step1700/bf16_zero_pp_rank_3_mp_rank_00_optim_states.pt
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:0900113e897cbbde7284a3e85c986e82d7ea60e287a384a35faa4c82fe9e5ca6
|
3 |
+
size 27700976
|
last-checkpoint/global_step1700/zero_pp_rank_0_mp_rank_00_model_states.pt
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:90ad7fce713cc4c6aac6ee2e3650f41ff5283d99963b83f91237e69637d182eb
|
3 |
+
size 411571
|
last-checkpoint/global_step1700/zero_pp_rank_1_mp_rank_00_model_states.pt
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:6513101107ca1980d33a5e4d6a1aa48fa89f014eb205a44405a520e8f4cf57ea
|
3 |
+
size 411507
|
last-checkpoint/global_step1700/zero_pp_rank_2_mp_rank_00_model_states.pt
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:0de1889354594f3a0eb87a002836aa15e9e2142ec0caadfd90235a3df462fae3
|
3 |
+
size 411507
|
last-checkpoint/global_step1700/zero_pp_rank_3_mp_rank_00_model_states.pt
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:c9a67e6701e53e7386b0e2c7217ddd342c00b1cb8adc55c4fd155a01cf21c088
|
3 |
+
size 411507
|
last-checkpoint/latest
CHANGED
@@ -1 +1 @@
|
|
1 |
-
|
|
|
1 |
+
global_step1700
|
last-checkpoint/rng_state_0.pth
CHANGED
@@ -1,3 +1,3 @@
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
-
oid sha256:
|
3 |
size 15024
|
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:9e7c17922709137dd2f358be87cc431f7959a56821e2d051582e81679e2d388e
|
3 |
size 15024
|
last-checkpoint/rng_state_1.pth
CHANGED
@@ -1,3 +1,3 @@
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
-
oid sha256:
|
3 |
size 15024
|
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:eaf0ec3cc431efdc1cb595b27e74fc020021d53f5c11850d7490a79bf42b71d6
|
3 |
size 15024
|
last-checkpoint/rng_state_2.pth
CHANGED
@@ -1,3 +1,3 @@
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
-
oid sha256:
|
3 |
size 15024
|
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:148fd0a663c121acf74d815964bc0d08d07065e8503b03adfd967fdaf63a6abc
|
3 |
size 15024
|
last-checkpoint/rng_state_3.pth
CHANGED
@@ -1,3 +1,3 @@
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
-
oid sha256:
|
3 |
size 15024
|
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:56589f6c34527656fe0dc752874cfc1460efbb5c0c3f874c4cd09d6415dd16c1
|
3 |
size 15024
|
last-checkpoint/scheduler.pt
CHANGED
@@ -1,3 +1,3 @@
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
-
oid sha256:
|
3 |
size 1064
|
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:dcafe96450339d5b6e4ff3a8d909c473a6afaab9bb34bc798805cded569190ca
|
3 |
size 1064
|
last-checkpoint/trainer_state.json
CHANGED
@@ -1,9 +1,9 @@
|
|
1 |
{
|
2 |
"best_metric": 0.6319106221199036,
|
3 |
"best_model_checkpoint": "saves/CADICA_qwenvl_stenosis_classily_scale4_frozenVision/lora/sft/checkpoint-1600",
|
4 |
-
"epoch": 0.
|
5 |
"eval_steps": 50,
|
6 |
-
"global_step":
|
7 |
"is_hyper_param_search": false,
|
8 |
"is_local_process_zero": true,
|
9 |
"is_world_process_zero": true,
|
@@ -2944,11 +2944,100 @@
|
|
2944 |
"eval_steps_per_second": 0.936,
|
2945 |
"num_input_tokens_seen": 19298848,
|
2946 |
"step": 1650
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
2947 |
}
|
2948 |
],
|
2949 |
"logging_steps": 5,
|
2950 |
"max_steps": 3400,
|
2951 |
-
"num_input_tokens_seen":
|
2952 |
"num_train_epochs": 2,
|
2953 |
"save_steps": 50,
|
2954 |
"stateful_callbacks": {
|
@@ -2963,7 +3052,7 @@
|
|
2963 |
"attributes": {}
|
2964 |
}
|
2965 |
},
|
2966 |
-
"total_flos":
|
2967 |
"train_batch_size": 1,
|
2968 |
"trial_name": null,
|
2969 |
"trial_params": null
|
|
|
1 |
{
|
2 |
"best_metric": 0.6319106221199036,
|
3 |
"best_model_checkpoint": "saves/CADICA_qwenvl_stenosis_classily_scale4_frozenVision/lora/sft/checkpoint-1600",
|
4 |
+
"epoch": 0.8756116404841617,
|
5 |
"eval_steps": 50,
|
6 |
+
"global_step": 1700,
|
7 |
"is_hyper_param_search": false,
|
8 |
"is_local_process_zero": true,
|
9 |
"is_world_process_zero": true,
|
|
|
2944 |
"eval_steps_per_second": 0.936,
|
2945 |
"num_input_tokens_seen": 19298848,
|
2946 |
"step": 1650
|
2947 |
+
},
|
2948 |
+
{
|
2949 |
+
"epoch": 0.8524336852948751,
|
2950 |
+
"grad_norm": 3.2430676664218496,
|
2951 |
+
"learning_rate": 5.6305258170538676e-05,
|
2952 |
+
"loss": 0.584,
|
2953 |
+
"num_input_tokens_seen": 19357304,
|
2954 |
+
"step": 1655
|
2955 |
+
},
|
2956 |
+
{
|
2957 |
+
"epoch": 0.8550090136492403,
|
2958 |
+
"grad_norm": 3.140559424454581,
|
2959 |
+
"learning_rate": 5.606396838947988e-05,
|
2960 |
+
"loss": 0.5544,
|
2961 |
+
"num_input_tokens_seen": 19415800,
|
2962 |
+
"step": 1660
|
2963 |
+
},
|
2964 |
+
{
|
2965 |
+
"epoch": 0.8575843420036054,
|
2966 |
+
"grad_norm": 3.993528386539066,
|
2967 |
+
"learning_rate": 5.582253519467432e-05,
|
2968 |
+
"loss": 0.6269,
|
2969 |
+
"num_input_tokens_seen": 19474256,
|
2970 |
+
"step": 1665
|
2971 |
+
},
|
2972 |
+
{
|
2973 |
+
"epoch": 0.8601596703579707,
|
2974 |
+
"grad_norm": 2.202747116085024,
|
2975 |
+
"learning_rate": 5.558096429605263e-05,
|
2976 |
+
"loss": 0.5073,
|
2977 |
+
"num_input_tokens_seen": 19532736,
|
2978 |
+
"step": 1670
|
2979 |
+
},
|
2980 |
+
{
|
2981 |
+
"epoch": 0.8627349987123358,
|
2982 |
+
"grad_norm": 4.4094334133851625,
|
2983 |
+
"learning_rate": 5.533926140680221e-05,
|
2984 |
+
"loss": 0.5319,
|
2985 |
+
"num_input_tokens_seen": 19591184,
|
2986 |
+
"step": 1675
|
2987 |
+
},
|
2988 |
+
{
|
2989 |
+
"epoch": 0.865310327066701,
|
2990 |
+
"grad_norm": 4.01821546567579,
|
2991 |
+
"learning_rate": 5.509743224323203e-05,
|
2992 |
+
"loss": 0.4525,
|
2993 |
+
"num_input_tokens_seen": 19649656,
|
2994 |
+
"step": 1680
|
2995 |
+
},
|
2996 |
+
{
|
2997 |
+
"epoch": 0.8678856554210662,
|
2998 |
+
"grad_norm": 5.3033277992950385,
|
2999 |
+
"learning_rate": 5.485548252463749e-05,
|
3000 |
+
"loss": 0.5276,
|
3001 |
+
"num_input_tokens_seen": 19708144,
|
3002 |
+
"step": 1685
|
3003 |
+
},
|
3004 |
+
{
|
3005 |
+
"epoch": 0.8704609837754314,
|
3006 |
+
"grad_norm": 5.124737819396939,
|
3007 |
+
"learning_rate": 5.4613417973165106e-05,
|
3008 |
+
"loss": 0.5482,
|
3009 |
+
"num_input_tokens_seen": 19766592,
|
3010 |
+
"step": 1690
|
3011 |
+
},
|
3012 |
+
{
|
3013 |
+
"epoch": 0.8730363121297966,
|
3014 |
+
"grad_norm": 3.47304956996904,
|
3015 |
+
"learning_rate": 5.4371244313677225e-05,
|
3016 |
+
"loss": 0.4656,
|
3017 |
+
"num_input_tokens_seen": 19825064,
|
3018 |
+
"step": 1695
|
3019 |
+
},
|
3020 |
+
{
|
3021 |
+
"epoch": 0.8756116404841617,
|
3022 |
+
"grad_norm": 6.394279811127835,
|
3023 |
+
"learning_rate": 5.4128967273616625e-05,
|
3024 |
+
"loss": 0.5701,
|
3025 |
+
"num_input_tokens_seen": 19883504,
|
3026 |
+
"step": 1700
|
3027 |
+
},
|
3028 |
+
{
|
3029 |
+
"epoch": 0.8756116404841617,
|
3030 |
+
"eval_loss": 0.7144017815589905,
|
3031 |
+
"eval_runtime": 16.1358,
|
3032 |
+
"eval_samples_per_second": 3.718,
|
3033 |
+
"eval_steps_per_second": 0.93,
|
3034 |
+
"num_input_tokens_seen": 19883504,
|
3035 |
+
"step": 1700
|
3036 |
}
|
3037 |
],
|
3038 |
"logging_steps": 5,
|
3039 |
"max_steps": 3400,
|
3040 |
+
"num_input_tokens_seen": 19883504,
|
3041 |
"num_train_epochs": 2,
|
3042 |
"save_steps": 50,
|
3043 |
"stateful_callbacks": {
|
|
|
3052 |
"attributes": {}
|
3053 |
}
|
3054 |
},
|
3055 |
+
"total_flos": 1116505381011456.0,
|
3056 |
"train_batch_size": 1,
|
3057 |
"trial_name": null,
|
3058 |
"trial_params": null
|