ben81828 commited on
Commit
cbfa102
·
verified ·
1 Parent(s): e096aa4

Training in progress, step 2400, checkpoint

Browse files
last-checkpoint/adapter_model.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:e470a7312966ac506e60fed47092ca459f2f086beab59e5b25dca2bf59a21f73
3
  size 18516456
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:cdf5f54c0020018c5eca1b4c1055878f2c1e58bfa3f188c951ed0eed9b279f0d
3
  size 18516456
last-checkpoint/global_step2399/bf16_zero_pp_rank_0_mp_rank_00_optim_states.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:0d7588db812bab425924f1d00f7d3253a69a0df1e0e50dd679f1dbd305f5584f
3
+ size 27700976
last-checkpoint/global_step2399/bf16_zero_pp_rank_1_mp_rank_00_optim_states.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:cf88782f06694860d81f0d5c8069259cec602afb588601c5062edb6a03d763e4
3
+ size 27700976
last-checkpoint/global_step2399/bf16_zero_pp_rank_2_mp_rank_00_optim_states.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:334ed4c0c6f60305b89445cb8541c3f0758999d4eeac7fcda188d84d85f23824
3
+ size 27700976
last-checkpoint/global_step2399/bf16_zero_pp_rank_3_mp_rank_00_optim_states.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:093234ca5f7b5a8219f276f71d1f61b3d4a31e6e737be35f63a9a58a9ba01cde
3
+ size 27700976
last-checkpoint/global_step2399/zero_pp_rank_0_mp_rank_00_model_states.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:7234e34a93ba129f360afd7e4671ea4ec12ed34e881f1d256d7ca7e6eb1beca0
3
+ size 411571
last-checkpoint/global_step2399/zero_pp_rank_1_mp_rank_00_model_states.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:ef284a886c71e441515bb4ddb38641d3db3759acda8d381b94eeabfbdb16dbae
3
+ size 411507
last-checkpoint/global_step2399/zero_pp_rank_2_mp_rank_00_model_states.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:f00d47842911b16cee9844d402b0ee6a63b826625851472d046966cc48855138
3
+ size 411507
last-checkpoint/global_step2399/zero_pp_rank_3_mp_rank_00_model_states.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:105abd94c9ebf288806f56c08a1751a5530e581bf52c16a7a8d8e0c1b0c4e212
3
+ size 411507
last-checkpoint/latest CHANGED
@@ -1 +1 @@
1
- global_step2349
 
1
+ global_step2399
last-checkpoint/rng_state_0.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:681faefe4cf303ca7f9bc3073b09b166da4f558d55bee3d5eee90ba5d83159bb
3
  size 15024
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:6b8aff7a1897a7eaf48c78ea1f8115c061edfa2b6fa42280e2c1c58fe66b1f8a
3
  size 15024
last-checkpoint/rng_state_1.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:c0fb250f1d137fb55cef85743a342508178b4fe3a20c6793c82e279730ea280b
3
  size 15024
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:1d73a7524f07999ef35d5d9b107dcc1678eae2ada841644e1bd00ec0734368c2
3
  size 15024
last-checkpoint/rng_state_2.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:34a0d7cfaa34f7e3738b4ef4989d693ed7864fed3b2a44ef1b6892fdcf026bb9
3
  size 15024
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:149a2ed30e88bf94d622f8d7693f382286a49ac536a3f63efc50cab63f6b9f39
3
  size 15024
last-checkpoint/rng_state_3.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:2a82aabe23bc62e289ef7d075c79f353bbc81286ec0f8964eabda4209d630e10
3
  size 15024
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:18f12c1b5aae2b7d4bb968649839fc7ff1ce6131508baad4b633693b04cee910
3
  size 15024
last-checkpoint/scheduler.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:d71a7ba4d3f18f2d8b75b4d8773a5d2a13dce7aa34d7060fa3e4c5c241599a29
3
  size 1064
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:33f198cd3e0b21f350098b16849fc1ced7c1d5261e89a1c503f4a0d3ce688f30
3
  size 1064
last-checkpoint/trainer_state.json CHANGED
@@ -1,9 +1,9 @@
1
  {
2
  "best_metric": 0.6319106221199036,
3
  "best_model_checkpoint": "saves/CADICA_qwenvl_stenosis_classily_scale4_frozenVision/lora/sft/checkpoint-1600",
4
- "epoch": 1.210146793716199,
5
  "eval_steps": 50,
6
- "global_step": 2350,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
@@ -4190,11 +4190,100 @@
4190
  "eval_steps_per_second": 0.937,
4191
  "num_input_tokens_seen": 27479840,
4192
  "step": 2350
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
4193
  }
4194
  ],
4195
  "logging_steps": 5,
4196
  "max_steps": 3400,
4197
- "num_input_tokens_seen": 27479840,
4198
  "num_train_epochs": 2,
4199
  "save_steps": 50,
4200
  "stateful_callbacks": {
@@ -4209,7 +4298,7 @@
4209
  "attributes": {}
4210
  }
4211
  },
4212
- "total_flos": 1543122079449088.0,
4213
  "train_batch_size": 1,
4214
  "trial_name": null,
4215
  "trial_params": null
 
1
  {
2
  "best_metric": 0.6319106221199036,
3
  "best_model_checkpoint": "saves/CADICA_qwenvl_stenosis_classily_scale4_frozenVision/lora/sft/checkpoint-1600",
4
+ "epoch": 1.2359000772598505,
5
  "eval_steps": 50,
6
+ "global_step": 2400,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
 
4190
  "eval_steps_per_second": 0.937,
4191
  "num_input_tokens_seen": 27479840,
4192
  "step": 2350
4193
+ },
4194
+ {
4195
+ "epoch": 1.212722122070564,
4196
+ "grad_norm": 8.004400275953609,
4197
+ "learning_rate": 2.3678391856132204e-05,
4198
+ "loss": 0.352,
4199
+ "num_input_tokens_seen": 27538344,
4200
+ "step": 2355
4201
+ },
4202
+ {
4203
+ "epoch": 1.2152974504249292,
4204
+ "grad_norm": 8.385547193425513,
4205
+ "learning_rate": 2.3471967400628513e-05,
4206
+ "loss": 0.347,
4207
+ "num_input_tokens_seen": 27596808,
4208
+ "step": 2360
4209
+ },
4210
+ {
4211
+ "epoch": 1.2178727787792945,
4212
+ "grad_norm": 3.9234442237475435,
4213
+ "learning_rate": 2.3266170337008398e-05,
4214
+ "loss": 0.3667,
4215
+ "num_input_tokens_seen": 27655272,
4216
+ "step": 2365
4217
+ },
4218
+ {
4219
+ "epoch": 1.2204481071336595,
4220
+ "grad_norm": 6.584480429736488,
4221
+ "learning_rate": 2.306100553240274e-05,
4222
+ "loss": 0.3311,
4223
+ "num_input_tokens_seen": 27713784,
4224
+ "step": 2370
4225
+ },
4226
+ {
4227
+ "epoch": 1.2230234354880247,
4228
+ "grad_norm": 5.791637874835276,
4229
+ "learning_rate": 2.2856477838989456e-05,
4230
+ "loss": 0.2964,
4231
+ "num_input_tokens_seen": 27772248,
4232
+ "step": 2375
4233
+ },
4234
+ {
4235
+ "epoch": 1.22559876384239,
4236
+ "grad_norm": 5.663503226529594,
4237
+ "learning_rate": 2.2652592093878666e-05,
4238
+ "loss": 0.3683,
4239
+ "num_input_tokens_seen": 27830704,
4240
+ "step": 2380
4241
+ },
4242
+ {
4243
+ "epoch": 1.228174092196755,
4244
+ "grad_norm": 9.657080260273457,
4245
+ "learning_rate": 2.244935311899829e-05,
4246
+ "loss": 0.3819,
4247
+ "num_input_tokens_seen": 27889160,
4248
+ "step": 2385
4249
+ },
4250
+ {
4251
+ "epoch": 1.2307494205511202,
4252
+ "grad_norm": 4.757552901440964,
4253
+ "learning_rate": 2.224676572098007e-05,
4254
+ "loss": 0.3084,
4255
+ "num_input_tokens_seen": 27947608,
4256
+ "step": 2390
4257
+ },
4258
+ {
4259
+ "epoch": 1.2333247489054855,
4260
+ "grad_norm": 5.188072586185411,
4261
+ "learning_rate": 2.2044834691045873e-05,
4262
+ "loss": 0.4267,
4263
+ "num_input_tokens_seen": 28006112,
4264
+ "step": 2395
4265
+ },
4266
+ {
4267
+ "epoch": 1.2359000772598505,
4268
+ "grad_norm": 7.221389028269126,
4269
+ "learning_rate": 2.184356480489432e-05,
4270
+ "loss": 0.3486,
4271
+ "num_input_tokens_seen": 28064552,
4272
+ "step": 2400
4273
+ },
4274
+ {
4275
+ "epoch": 1.2359000772598505,
4276
+ "eval_loss": 0.7410638928413391,
4277
+ "eval_runtime": 15.945,
4278
+ "eval_samples_per_second": 3.763,
4279
+ "eval_steps_per_second": 0.941,
4280
+ "num_input_tokens_seen": 28064552,
4281
+ "step": 2400
4282
  }
4283
  ],
4284
  "logging_steps": 5,
4285
  "max_steps": 3400,
4286
+ "num_input_tokens_seen": 28064552,
4287
  "num_train_epochs": 2,
4288
  "save_steps": 50,
4289
  "stateful_callbacks": {
 
4298
  "attributes": {}
4299
  }
4300
  },
4301
+ "total_flos": 1575955876020224.0,
4302
  "train_batch_size": 1,
4303
  "trial_name": null,
4304
  "trial_params": null