ben81828 commited on
Commit
21aa7ee
·
verified ·
1 Parent(s): a2bb40c

Training in progress, step 750, checkpoint

Browse files
last-checkpoint/adapter_model.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:cb5f3724b5d59ea4e0a9371b33e20124137112f04ac962fd4bc5e834b083db05
3
  size 18516456
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:b80d0303f147dcd83f97c68d1de338e51e85f076089eb9eec3b8eff422c8bc34
3
  size 18516456
last-checkpoint/global_step750/bf16_zero_pp_rank_0_mp_rank_00_optim_states.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:a10baa950729cba886265be9ae3297a797d1a563529b79fd37bbab7e23e50705
3
+ size 27700976
last-checkpoint/global_step750/bf16_zero_pp_rank_1_mp_rank_00_optim_states.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:91eaca47d12ba0676ccade07b28eb590f19a3780366bbd8b3fa5eb4f6a38619e
3
+ size 27700976
last-checkpoint/global_step750/bf16_zero_pp_rank_2_mp_rank_00_optim_states.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:b306483a8da253ae83e769790477b1b2011fd346e91017da21e2c455f048aed8
3
+ size 27700976
last-checkpoint/global_step750/bf16_zero_pp_rank_3_mp_rank_00_optim_states.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:29fcdb855034311c0f40a3de5596e146ab9bc1025d6d5bafe8a45b389db1d98c
3
+ size 27700976
last-checkpoint/global_step750/zero_pp_rank_0_mp_rank_00_model_states.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:fa751255b9d484acd655459378bb8b72f91109c691d64bfc34761c4482ac4ac3
3
+ size 411571
last-checkpoint/global_step750/zero_pp_rank_1_mp_rank_00_model_states.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:e94f3782d8f3cea94cdaad193aa9183d7002335cebd8149f92715121a02bb71e
3
+ size 411507
last-checkpoint/global_step750/zero_pp_rank_2_mp_rank_00_model_states.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:cf1d154884bdfe4eb65ceace27198db48fcf736453df0b71d12ed7d4b85531d7
3
+ size 411507
last-checkpoint/global_step750/zero_pp_rank_3_mp_rank_00_model_states.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:3e84cbb1188d1e661ae0a82169a8dafd393234eefdf44c3a3a11c248f9d54a08
3
+ size 411507
last-checkpoint/latest CHANGED
@@ -1 +1 @@
1
- global_step700
 
1
+ global_step750
last-checkpoint/rng_state_0.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:e7d74de51245105e1fbf57a6707ef3538b353952485508f6e2f8f74dc5d479d4
3
  size 15024
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:66b4ef73f3603a1b91082ee108fa8299ebe45fb3cdeec7d0bdca1982af5bf07d
3
  size 15024
last-checkpoint/rng_state_1.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:0617c9eb6cf7df57b2e0bb53cfe17c05f0910de56fe5b14427fe39ab54a44782
3
  size 15024
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:882eccb2a272cf97cd645050bd077c971e48e78584f717a1b1cc9b5f1c9326dc
3
  size 15024
last-checkpoint/rng_state_2.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:ed68a365057022897d9645ee60902a77102f43215dcdf2ddd5d3842b6a8446d8
3
  size 15024
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:1626bac54f5806a604b7efdd749c5b65d63bbb40fc55c3744aae6130aa24f3de
3
  size 15024
last-checkpoint/rng_state_3.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:63ebaa0c302cadbdfcd9f8ee2289e35ecf9c9fc8c9968fc0c05f100dac20c6b9
3
  size 15024
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:40cf83ff997228172cf0b991f9d5209728ccf2f0a75841db5e31e647779a1ad2
3
  size 15024
last-checkpoint/scheduler.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:9089a2e73aa73e2c09752311bdfa67c1ed286ab83bb4bc61dbf851e8193bb593
3
  size 1064
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:a2ccf9d8c4b5840071603429d56208abefd14e276e9351f1160a607485df78ae
3
  size 1064
last-checkpoint/trainer_state.json CHANGED
@@ -1,9 +1,9 @@
1
  {
2
  "best_metric": 0.8908902406692505,
3
  "best_model_checkpoint": "saves/CADICA_qwenvl_stenosis_classily_scale4_frozenVision/lora/sft/checkpoint-300",
4
- "epoch": 0.3605459696111254,
5
  "eval_steps": 50,
6
- "global_step": 700,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
@@ -1253,11 +1253,100 @@
1253
  "eval_steps_per_second": 0.928,
1254
  "num_input_tokens_seen": 8187320,
1255
  "step": 700
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1256
  }
1257
  ],
1258
  "logging_steps": 5,
1259
  "max_steps": 3400,
1260
- "num_input_tokens_seen": 8187320,
1261
  "num_train_epochs": 2,
1262
  "save_steps": 50,
1263
  "stateful_callbacks": {
@@ -1272,7 +1361,7 @@
1272
  "attributes": {}
1273
  }
1274
  },
1275
- "total_flos": 459709441638400.0,
1276
  "train_batch_size": 1,
1277
  "trial_name": null,
1278
  "trial_params": null
 
1
  {
2
  "best_metric": 0.8908902406692505,
3
  "best_model_checkpoint": "saves/CADICA_qwenvl_stenosis_classily_scale4_frozenVision/lora/sft/checkpoint-300",
4
+ "epoch": 0.38629925315477726,
5
  "eval_steps": 50,
6
+ "global_step": 750,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
 
1253
  "eval_steps_per_second": 0.928,
1254
  "num_input_tokens_seen": 8187320,
1255
  "step": 700
1256
+ },
1257
+ {
1258
+ "epoch": 0.3631212979654906,
1259
+ "grad_norm": 0.3215674690491891,
1260
+ "learning_rate": 9.338209491537257e-05,
1261
+ "loss": 0.8998,
1262
+ "num_input_tokens_seen": 8245776,
1263
+ "step": 705
1264
+ },
1265
+ {
1266
+ "epoch": 0.3656966263198558,
1267
+ "grad_norm": 0.36428692362396536,
1268
+ "learning_rate": 9.326068709243727e-05,
1269
+ "loss": 0.8999,
1270
+ "num_input_tokens_seen": 8304280,
1271
+ "step": 710
1272
+ },
1273
+ {
1274
+ "epoch": 0.36827195467422097,
1275
+ "grad_norm": 0.280459809393624,
1276
+ "learning_rate": 9.313825614787177e-05,
1277
+ "loss": 0.8983,
1278
+ "num_input_tokens_seen": 8362728,
1279
+ "step": 715
1280
+ },
1281
+ {
1282
+ "epoch": 0.37084728302858616,
1283
+ "grad_norm": 0.1819339731162554,
1284
+ "learning_rate": 9.301480497718593e-05,
1285
+ "loss": 0.892,
1286
+ "num_input_tokens_seen": 8421224,
1287
+ "step": 720
1288
+ },
1289
+ {
1290
+ "epoch": 0.37342261138295135,
1291
+ "grad_norm": 0.23784840563699303,
1292
+ "learning_rate": 9.289033650001817e-05,
1293
+ "loss": 0.9034,
1294
+ "num_input_tokens_seen": 8479720,
1295
+ "step": 725
1296
+ },
1297
+ {
1298
+ "epoch": 0.3759979397373165,
1299
+ "grad_norm": 0.24070744588741375,
1300
+ "learning_rate": 9.276485366006634e-05,
1301
+ "loss": 0.895,
1302
+ "num_input_tokens_seen": 8538192,
1303
+ "step": 730
1304
+ },
1305
+ {
1306
+ "epoch": 0.3785732680916817,
1307
+ "grad_norm": 0.24846723619231478,
1308
+ "learning_rate": 9.263835942501807e-05,
1309
+ "loss": 0.8973,
1310
+ "num_input_tokens_seen": 8596664,
1311
+ "step": 735
1312
+ },
1313
+ {
1314
+ "epoch": 0.3811485964460469,
1315
+ "grad_norm": 0.2601614440419362,
1316
+ "learning_rate": 9.251085678648072e-05,
1317
+ "loss": 0.8972,
1318
+ "num_input_tokens_seen": 8655128,
1319
+ "step": 740
1320
+ },
1321
+ {
1322
+ "epoch": 0.38372392480041206,
1323
+ "grad_norm": 0.30194733839751087,
1324
+ "learning_rate": 9.238234875991046e-05,
1325
+ "loss": 0.8987,
1326
+ "num_input_tokens_seen": 8713624,
1327
+ "step": 745
1328
+ },
1329
+ {
1330
+ "epoch": 0.38629925315477726,
1331
+ "grad_norm": 0.3015609177439829,
1332
+ "learning_rate": 9.225283838454111e-05,
1333
+ "loss": 0.9005,
1334
+ "num_input_tokens_seen": 8772104,
1335
+ "step": 750
1336
+ },
1337
+ {
1338
+ "epoch": 0.38629925315477726,
1339
+ "eval_loss": 0.8981761336326599,
1340
+ "eval_runtime": 16.0177,
1341
+ "eval_samples_per_second": 3.746,
1342
+ "eval_steps_per_second": 0.936,
1343
+ "num_input_tokens_seen": 8772104,
1344
+ "step": 750
1345
  }
1346
  ],
1347
  "logging_steps": 5,
1348
  "max_steps": 3400,
1349
+ "num_input_tokens_seen": 8772104,
1350
  "num_train_epochs": 2,
1351
  "save_steps": 50,
1352
  "stateful_callbacks": {
 
1361
  "attributes": {}
1362
  }
1363
  },
1364
+ "total_flos": 492548696834048.0,
1365
  "train_batch_size": 1,
1366
  "trial_name": null,
1367
  "trial_params": null