ben81828 commited on
Commit
6c47238
·
verified ·
1 Parent(s): 7805331

Training in progress, step 1750, checkpoint

Browse files
last-checkpoint/adapter_model.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:ba9914360b2dead740bb4f8973f7876b182880fd878912194cb890333ef7f606
3
  size 18516456
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:0ef73e2283ef0a8265cb8500b9ad3f8507559811b231f9ac4422b8beb4ea24ca
3
  size 18516456
last-checkpoint/global_step1750/bf16_zero_pp_rank_0_mp_rank_00_optim_states.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:5e9fae63a7e44699fdfa04e2a64bd0cee94e2d47eef691bac08448532285e8a7
3
+ size 27700976
last-checkpoint/global_step1750/bf16_zero_pp_rank_1_mp_rank_00_optim_states.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:c5583adcea1a5be6aef8847ec3fe33fca89339e049bea5960f733fa00fc47b3a
3
+ size 27700976
last-checkpoint/global_step1750/bf16_zero_pp_rank_2_mp_rank_00_optim_states.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:942a54668d6e20d41b592b24432607bd4bc5d74a15c32e13e5395c88ee124f83
3
+ size 27700976
last-checkpoint/global_step1750/bf16_zero_pp_rank_3_mp_rank_00_optim_states.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:d2eb932b7b0a4a13c81baa3321767b78c6662a1534d360912037f0bbcf7dd28d
3
+ size 27700976
last-checkpoint/global_step1750/zero_pp_rank_0_mp_rank_00_model_states.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:fc762ab4c4fa09aee66d1cdc3bd9c18b0e089540ff771b477c3f14712574bd78
3
+ size 411571
last-checkpoint/global_step1750/zero_pp_rank_1_mp_rank_00_model_states.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:c7a5367ee82bfa4de533d053692b6361c80607824890bd86394bf93f0d1a7afc
3
+ size 411507
last-checkpoint/global_step1750/zero_pp_rank_2_mp_rank_00_model_states.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:42d996e234024b2b7cc1ed78e09085b9a5e4f18cfe3e6a9cdac547a6936e86ad
3
+ size 411507
last-checkpoint/global_step1750/zero_pp_rank_3_mp_rank_00_model_states.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:f80240f90b65712451f890a49680db51de5c309d6547f38e60d8ee2aec311d7c
3
+ size 411507
last-checkpoint/latest CHANGED
@@ -1 +1 @@
1
- global_step1700
 
1
+ global_step1750
last-checkpoint/rng_state_0.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:9e7c17922709137dd2f358be87cc431f7959a56821e2d051582e81679e2d388e
3
  size 15024
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:222e5f04f66dfcca4efb2d648f5a480d56c2a07755d7a1bae8232d01d4b479ce
3
  size 15024
last-checkpoint/rng_state_1.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:eaf0ec3cc431efdc1cb595b27e74fc020021d53f5c11850d7490a79bf42b71d6
3
  size 15024
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:150a9cfa07bd33135b1b8b22033907e44137689de662dda0a482f3af84c5a1ff
3
  size 15024
last-checkpoint/rng_state_2.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:148fd0a663c121acf74d815964bc0d08d07065e8503b03adfd967fdaf63a6abc
3
  size 15024
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:c0c0d7619e94c90efa0c89a1f208db53b48c726519761710da6fbc31a80651d8
3
  size 15024
last-checkpoint/rng_state_3.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:56589f6c34527656fe0dc752874cfc1460efbb5c0c3f874c4cd09d6415dd16c1
3
  size 15024
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:aa66a4b2f749b1ec7e8868668dc670ff3f6d8df765a5249122f980ae5aae6a54
3
  size 15024
last-checkpoint/scheduler.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:dcafe96450339d5b6e4ff3a8d909c473a6afaab9bb34bc798805cded569190ca
3
  size 1064
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:2a1c59a2217a7993f4a6258f4be38869d3dc42c6bc2349bde5d4858846c79cb4
3
  size 1064
last-checkpoint/trainer_state.json CHANGED
@@ -1,9 +1,9 @@
1
  {
2
  "best_metric": 0.6319106221199036,
3
  "best_model_checkpoint": "saves/CADICA_qwenvl_stenosis_classily_scale4_frozenVision/lora/sft/checkpoint-1600",
4
- "epoch": 0.8756116404841617,
5
  "eval_steps": 50,
6
- "global_step": 1700,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
@@ -3033,11 +3033,100 @@
3033
  "eval_steps_per_second": 0.93,
3034
  "num_input_tokens_seen": 19883504,
3035
  "step": 1700
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3036
  }
3037
  ],
3038
  "logging_steps": 5,
3039
  "max_steps": 3400,
3040
- "num_input_tokens_seen": 19883504,
3041
  "num_train_epochs": 2,
3042
  "save_steps": 50,
3043
  "stateful_callbacks": {
@@ -3052,7 +3141,7 @@
3052
  "attributes": {}
3053
  }
3054
  },
3055
- "total_flos": 1116505381011456.0,
3056
  "train_batch_size": 1,
3057
  "trial_name": null,
3058
  "trial_params": null
 
1
  {
2
  "best_metric": 0.6319106221199036,
3
  "best_model_checkpoint": "saves/CADICA_qwenvl_stenosis_classily_scale4_frozenVision/lora/sft/checkpoint-1600",
4
+ "epoch": 0.9013649240278135,
5
  "eval_steps": 50,
6
+ "global_step": 1750,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
 
3033
  "eval_steps_per_second": 0.93,
3034
  "num_input_tokens_seen": 19883504,
3035
  "step": 1700
3036
+ },
3037
+ {
3038
+ "epoch": 0.8781869688385269,
3039
+ "grad_norm": 4.527262723362309,
3040
+ "learning_rate": 5.388659258287102e-05,
3041
+ "loss": 0.5823,
3042
+ "num_input_tokens_seen": 19942000,
3043
+ "step": 1705
3044
+ },
3045
+ {
3046
+ "epoch": 0.8807622971928921,
3047
+ "grad_norm": 4.628112845411063,
3048
+ "learning_rate": 5.364412597363759e-05,
3049
+ "loss": 0.5446,
3050
+ "num_input_tokens_seen": 20000440,
3051
+ "step": 1710
3052
+ },
3053
+ {
3054
+ "epoch": 0.8833376255472573,
3055
+ "grad_norm": 6.077375809046342,
3056
+ "learning_rate": 5.3401573180287426e-05,
3057
+ "loss": 0.5769,
3058
+ "num_input_tokens_seen": 20058920,
3059
+ "step": 1715
3060
+ },
3061
+ {
3062
+ "epoch": 0.8859129539016225,
3063
+ "grad_norm": 6.492863688878202,
3064
+ "learning_rate": 5.315893993922986e-05,
3065
+ "loss": 0.5614,
3066
+ "num_input_tokens_seen": 20117416,
3067
+ "step": 1720
3068
+ },
3069
+ {
3070
+ "epoch": 0.8884882822559876,
3071
+ "grad_norm": 5.332057542240503,
3072
+ "learning_rate": 5.29162319887768e-05,
3073
+ "loss": 0.5215,
3074
+ "num_input_tokens_seen": 20175936,
3075
+ "step": 1725
3076
+ },
3077
+ {
3078
+ "epoch": 0.8910636106103528,
3079
+ "grad_norm": 3.8772752615113077,
3080
+ "learning_rate": 5.26734550690071e-05,
3081
+ "loss": 0.4968,
3082
+ "num_input_tokens_seen": 20234368,
3083
+ "step": 1730
3084
+ },
3085
+ {
3086
+ "epoch": 0.893638938964718,
3087
+ "grad_norm": 4.886426418731965,
3088
+ "learning_rate": 5.243061492163073e-05,
3089
+ "loss": 0.5029,
3090
+ "num_input_tokens_seen": 20292856,
3091
+ "step": 1735
3092
+ },
3093
+ {
3094
+ "epoch": 0.8962142673190832,
3095
+ "grad_norm": 4.031774194047053,
3096
+ "learning_rate": 5.2187717289852955e-05,
3097
+ "loss": 0.5249,
3098
+ "num_input_tokens_seen": 20351272,
3099
+ "step": 1740
3100
+ },
3101
+ {
3102
+ "epoch": 0.8987895956734484,
3103
+ "grad_norm": 5.344580011428224,
3104
+ "learning_rate": 5.1944767918238624e-05,
3105
+ "loss": 0.5801,
3106
+ "num_input_tokens_seen": 20409744,
3107
+ "step": 1745
3108
+ },
3109
+ {
3110
+ "epoch": 0.9013649240278135,
3111
+ "grad_norm": 3.923379435953565,
3112
+ "learning_rate": 5.170177255257618e-05,
3113
+ "loss": 0.546,
3114
+ "num_input_tokens_seen": 20468200,
3115
+ "step": 1750
3116
+ },
3117
+ {
3118
+ "epoch": 0.9013649240278135,
3119
+ "eval_loss": 0.672294020652771,
3120
+ "eval_runtime": 16.0203,
3121
+ "eval_samples_per_second": 3.745,
3122
+ "eval_steps_per_second": 0.936,
3123
+ "num_input_tokens_seen": 20468200,
3124
+ "step": 1750
3125
  }
3126
  ],
3127
  "logging_steps": 5,
3128
  "max_steps": 3400,
3129
+ "num_input_tokens_seen": 20468200,
3130
  "num_train_epochs": 2,
3131
  "save_steps": 50,
3132
  "stateful_callbacks": {
 
3141
  "attributes": {}
3142
  }
3143
  },
3144
+ "total_flos": 1149340689891328.0,
3145
  "train_batch_size": 1,
3146
  "trial_name": null,
3147
  "trial_params": null