ben81828 commited on
Commit
3ac9f71
·
verified ·
1 Parent(s): 494fc7d

Training in progress, step 2950, checkpoint

Browse files
last-checkpoint/adapter_model.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:f649bc97ca842140ab28c612e0e561ebbde8139ce799f23e401fad5ec0bf673a
3
  size 29034840
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:3b7a40fbb5d3ef8c199f94ef197e14510d714bce74ec2a360d2feaf74aeb1aee
3
  size 29034840
last-checkpoint/global_step2950/bf16_zero_pp_rank_0_mp_rank_00_optim_states.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:b9212095d1b2ad6dede71e3497e07f7008f864836ee652f7d721937f55416294
3
+ size 43429616
last-checkpoint/global_step2950/bf16_zero_pp_rank_1_mp_rank_00_optim_states.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:e1e820af5f58539147abfb7908d919cd87d04b274cdf0246b111d819a373d5df
3
+ size 43429616
last-checkpoint/global_step2950/bf16_zero_pp_rank_2_mp_rank_00_optim_states.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:29fa9402ebb7e280c65f2f1cbd5d0e9207a9da3e0d25f466996ebe2889a99a3d
3
+ size 43429616
last-checkpoint/global_step2950/bf16_zero_pp_rank_3_mp_rank_00_optim_states.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:602e6fda95ec4917ebd699b723c0a2bce3bd1b7ded81cd3fcb8bcb48477b055d
3
+ size 43429616
last-checkpoint/global_step2950/zero_pp_rank_0_mp_rank_00_model_states.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:512575152585707305753bb11f9e58a6063a289cf07716493a20893fbd1142b9
3
+ size 637299
last-checkpoint/global_step2950/zero_pp_rank_1_mp_rank_00_model_states.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:bd557eb4b9edd0a1910792057338dca17083fa9a8f40f41e28c2c90ec22a93ae
3
+ size 637171
last-checkpoint/global_step2950/zero_pp_rank_2_mp_rank_00_model_states.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:dbd7c1168f13cf3fc959af479e28a5a467ab4dd67204cffc114ba3660c0517c0
3
+ size 637171
last-checkpoint/global_step2950/zero_pp_rank_3_mp_rank_00_model_states.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:06808762d99a725be346c2151fb638282bfb9e97c3c8bcd58aaef62a9f5b3815
3
+ size 637171
last-checkpoint/latest CHANGED
@@ -1 +1 @@
1
- global_step2900
 
1
+ global_step2950
last-checkpoint/rng_state_0.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:ce92cea831a04716b4b472f1dad1cc986b2021dee9aac057217f5d455b27ec42
3
  size 15024
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:bfc5d4e344535f1dd0ff5275633ec3d55bb6249e432442365ff24445d82ec35c
3
  size 15024
last-checkpoint/rng_state_1.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:3cddb73bbdf0f6f6a2c3182d70f7ad5d587353b164c08dd4f383b940d6b61e4e
3
  size 15024
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:8a898928042c09dc123c1025557279997043b7f607bc91ee2ff2d4b4d2b9f1ba
3
  size 15024
last-checkpoint/rng_state_2.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:b24b508e466beb446d37377d2a04757d3bc2b4230de3ac56b25a65d7753a74c1
3
  size 15024
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:6923d07d979aa78d66765208f598662fd5092b5227cd87920feedfb923fa375f
3
  size 15024
last-checkpoint/rng_state_3.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:b4c6a18a7de8b25b21673ba2ff7efbaaae00ec8c453c7975b467c1df87b87022
3
  size 15024
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:e9ba35b9b3c512fbe857d909557329ba47dbefe5f521014123c05901c32edb6d
3
  size 15024
last-checkpoint/scheduler.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:6207ec0a6d3415bd88090ab549215521abec8051b511fdc1a1cfc0484ab44197
3
  size 1064
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:40ec7601b82ddf00dad2cf9910c40a0b3836b366525ddd347dc523d4b95c6345
3
  size 1064
last-checkpoint/trainer_state.json CHANGED
@@ -1,9 +1,9 @@
1
  {
2
  "best_metric": 0.1869634985923767,
3
  "best_model_checkpoint": "saves/CADICA_qwenvl_direction_then_DetectAndClassify_scale6/lora/sft/checkpoint-2350",
4
- "epoch": 0.8567208271787297,
5
  "eval_steps": 50,
6
- "global_step": 2900,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
@@ -5169,11 +5169,100 @@
5169
  "eval_steps_per_second": 0.784,
5170
  "num_input_tokens_seen": 30118072,
5171
  "step": 2900
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
5172
  }
5173
  ],
5174
  "logging_steps": 5,
5175
  "max_steps": 6770,
5176
- "num_input_tokens_seen": 30118072,
5177
  "num_train_epochs": 2,
5178
  "save_steps": 50,
5179
  "stateful_callbacks": {
@@ -5188,7 +5277,7 @@
5188
  "attributes": {}
5189
  }
5190
  },
5191
- "total_flos": 1986962888589312.0,
5192
  "train_batch_size": 1,
5193
  "trial_name": null,
5194
  "trial_params": null
 
1
  {
2
  "best_metric": 0.1869634985923767,
3
  "best_model_checkpoint": "saves/CADICA_qwenvl_direction_then_DetectAndClassify_scale6/lora/sft/checkpoint-2350",
4
+ "epoch": 0.8714918759231906,
5
  "eval_steps": 50,
6
+ "global_step": 2950,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
 
5169
  "eval_steps_per_second": 0.784,
5170
  "num_input_tokens_seen": 30118072,
5171
  "step": 2900
5172
+ },
5173
+ {
5174
+ "epoch": 0.8581979320531757,
5175
+ "grad_norm": 1.2779653791980783,
5176
+ "learning_rate": 6.559944544378072e-05,
5177
+ "loss": 0.2241,
5178
+ "num_input_tokens_seen": 30170248,
5179
+ "step": 2905
5180
+ },
5181
+ {
5182
+ "epoch": 0.8596750369276218,
5183
+ "grad_norm": 1.6407050078852088,
5184
+ "learning_rate": 6.548336798918411e-05,
5185
+ "loss": 0.2298,
5186
+ "num_input_tokens_seen": 30222016,
5187
+ "step": 2910
5188
+ },
5189
+ {
5190
+ "epoch": 0.8611521418020679,
5191
+ "grad_norm": 1.5078723595666699,
5192
+ "learning_rate": 6.536719816096935e-05,
5193
+ "loss": 0.2396,
5194
+ "num_input_tokens_seen": 30273312,
5195
+ "step": 2915
5196
+ },
5197
+ {
5198
+ "epoch": 0.8626292466765141,
5199
+ "grad_norm": 1.852309531147588,
5200
+ "learning_rate": 6.52509366522045e-05,
5201
+ "loss": 0.2324,
5202
+ "num_input_tokens_seen": 30324328,
5203
+ "step": 2920
5204
+ },
5205
+ {
5206
+ "epoch": 0.8641063515509602,
5207
+ "grad_norm": 1.3789560520965807,
5208
+ "learning_rate": 6.513458415650452e-05,
5209
+ "loss": 0.2263,
5210
+ "num_input_tokens_seen": 30376488,
5211
+ "step": 2925
5212
+ },
5213
+ {
5214
+ "epoch": 0.8655834564254062,
5215
+ "grad_norm": 1.904793400524472,
5216
+ "learning_rate": 6.501814136802725e-05,
5217
+ "loss": 0.1734,
5218
+ "num_input_tokens_seen": 30429504,
5219
+ "step": 2930
5220
+ },
5221
+ {
5222
+ "epoch": 0.8670605612998523,
5223
+ "grad_norm": 1.15273615308065,
5224
+ "learning_rate": 6.490160898146918e-05,
5225
+ "loss": 0.2235,
5226
+ "num_input_tokens_seen": 30480400,
5227
+ "step": 2935
5228
+ },
5229
+ {
5230
+ "epoch": 0.8685376661742984,
5231
+ "grad_norm": 1.4070108528869274,
5232
+ "learning_rate": 6.47849876920614e-05,
5233
+ "loss": 0.2297,
5234
+ "num_input_tokens_seen": 30531912,
5235
+ "step": 2940
5236
+ },
5237
+ {
5238
+ "epoch": 0.8700147710487445,
5239
+ "grad_norm": 1.182373794273276,
5240
+ "learning_rate": 6.46682781955653e-05,
5241
+ "loss": 0.1764,
5242
+ "num_input_tokens_seen": 30584688,
5243
+ "step": 2945
5244
+ },
5245
+ {
5246
+ "epoch": 0.8714918759231906,
5247
+ "grad_norm": 17.302996120392294,
5248
+ "learning_rate": 6.455148118826859e-05,
5249
+ "loss": 0.1692,
5250
+ "num_input_tokens_seen": 30637448,
5251
+ "step": 2950
5252
+ },
5253
+ {
5254
+ "epoch": 0.8714918759231906,
5255
+ "eval_loss": 0.20344533026218414,
5256
+ "eval_runtime": 18.8463,
5257
+ "eval_samples_per_second": 3.184,
5258
+ "eval_steps_per_second": 0.796,
5259
+ "num_input_tokens_seen": 30637448,
5260
+ "step": 2950
5261
  }
5262
  ],
5263
  "logging_steps": 5,
5264
  "max_steps": 6770,
5265
+ "num_input_tokens_seen": 30637448,
5266
  "num_train_epochs": 2,
5267
  "save_steps": 50,
5268
  "stateful_callbacks": {
 
5277
  "attributes": {}
5278
  }
5279
  },
5280
+ "total_flos": 2021200008314880.0,
5281
  "train_batch_size": 1,
5282
  "trial_name": null,
5283
  "trial_params": null