ben81828 commited on
Commit
0a3045b
·
verified ·
1 Parent(s): 1818f01

Training in progress, step 800, checkpoint

Browse files
last-checkpoint/adapter_model.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:b80d0303f147dcd83f97c68d1de338e51e85f076089eb9eec3b8eff422c8bc34
3
  size 18516456
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:618ddb5842b77a277ba2fb75499d245929924ad35e8899db3bc116e2707491d1
3
  size 18516456
last-checkpoint/global_step800/bf16_zero_pp_rank_0_mp_rank_00_optim_states.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:e13bc46742e9ac6b8826aaf99222596ad1b3d3642e6e019519e69780060aa8cb
3
+ size 27700976
last-checkpoint/global_step800/bf16_zero_pp_rank_1_mp_rank_00_optim_states.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:ebdaadb5ca1a803780a150d856fb183d325ff11b1fa39f812fd71d111080857d
3
+ size 27700976
last-checkpoint/global_step800/bf16_zero_pp_rank_2_mp_rank_00_optim_states.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:1c14ef3f1938255ceaa58d153907ddc24c7932c78f0b755a852b5df87f86a0e7
3
+ size 27700976
last-checkpoint/global_step800/bf16_zero_pp_rank_3_mp_rank_00_optim_states.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:5c8be0645c05ec7ac3928e44a3087fe670a26826c0c9ecd5a2c78c5e2a015ec6
3
+ size 27700976
last-checkpoint/global_step800/zero_pp_rank_0_mp_rank_00_model_states.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:a8b3ecca13012e7c7b1a2ef21b153dca6f20a38d9a089a9aae9d9df28bdbd80e
3
+ size 411571
last-checkpoint/global_step800/zero_pp_rank_1_mp_rank_00_model_states.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:080d4cee83c2987351a8f8936bb25b274b59e0bccfc2c831b3ccf7466a42f4ec
3
+ size 411507
last-checkpoint/global_step800/zero_pp_rank_2_mp_rank_00_model_states.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:8a4822198856a96d2b689d013db79d5a9307c4e3bff2580679b724c54d055d3a
3
+ size 411507
last-checkpoint/global_step800/zero_pp_rank_3_mp_rank_00_model_states.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:337cc0ab48878ede739aca0b4a98eff5a797126a290e7e34991a9f09dbbf1305
3
+ size 411507
last-checkpoint/latest CHANGED
@@ -1 +1 @@
1
- global_step750
 
1
+ global_step800
last-checkpoint/rng_state_0.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:66b4ef73f3603a1b91082ee108fa8299ebe45fb3cdeec7d0bdca1982af5bf07d
3
  size 15024
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:b4f003069486a57c6ac033f30cf4c4213eb6b7d659bab68a5a50fdb8da7c4118
3
  size 15024
last-checkpoint/rng_state_1.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:882eccb2a272cf97cd645050bd077c971e48e78584f717a1b1cc9b5f1c9326dc
3
  size 15024
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:a016ef89b4392d083b2c15a7cf06a39bc61a759f648cf6dc03f1c32b89a526aa
3
  size 15024
last-checkpoint/rng_state_2.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:1626bac54f5806a604b7efdd749c5b65d63bbb40fc55c3744aae6130aa24f3de
3
  size 15024
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:9b56fe0893036dc052d18d90feba4328b90ea71561942150b07406ac3d7a700e
3
  size 15024
last-checkpoint/rng_state_3.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:40cf83ff997228172cf0b991f9d5209728ccf2f0a75841db5e31e647779a1ad2
3
  size 15024
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:c0c203d12c2c308dab785ed672c9ca27fb6a2f72acd1e1552d1516c7b0006013
3
  size 15024
last-checkpoint/scheduler.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:a2ccf9d8c4b5840071603429d56208abefd14e276e9351f1160a607485df78ae
3
  size 1064
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:5d85b710a5709549c0b4daddcc052f2ed242a5d916ac9ca030c805e7ff501c88
3
  size 1064
last-checkpoint/trainer_state.json CHANGED
@@ -1,9 +1,9 @@
1
  {
2
  "best_metric": 0.8908902406692505,
3
  "best_model_checkpoint": "saves/CADICA_qwenvl_stenosis_classily_scale4_frozenVision/lora/sft/checkpoint-300",
4
- "epoch": 0.38629925315477726,
5
  "eval_steps": 50,
6
- "global_step": 750,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
@@ -1342,11 +1342,100 @@
1342
  "eval_steps_per_second": 0.936,
1343
  "num_input_tokens_seen": 8772104,
1344
  "step": 750
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1345
  }
1346
  ],
1347
  "logging_steps": 5,
1348
  "max_steps": 3400,
1349
- "num_input_tokens_seen": 8772104,
1350
  "num_train_epochs": 2,
1351
  "save_steps": 50,
1352
  "stateful_callbacks": {
@@ -1361,7 +1450,7 @@
1361
  "attributes": {}
1362
  }
1363
  },
1364
- "total_flos": 492548696834048.0,
1365
  "train_batch_size": 1,
1366
  "trial_name": null,
1367
  "trial_params": null
 
1
  {
2
  "best_metric": 0.8908902406692505,
3
  "best_model_checkpoint": "saves/CADICA_qwenvl_stenosis_classily_scale4_frozenVision/lora/sft/checkpoint-300",
4
+ "epoch": 0.41205253669842906,
5
  "eval_steps": 50,
6
+ "global_step": 800,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
 
1342
  "eval_steps_per_second": 0.936,
1343
  "num_input_tokens_seen": 8772104,
1344
  "step": 750
1345
+ },
1346
+ {
1347
+ "epoch": 0.3888745815091424,
1348
+ "grad_norm": 0.44991480631292463,
1349
+ "learning_rate": 9.21223287233121e-05,
1350
+ "loss": 0.8973,
1351
+ "num_input_tokens_seen": 8830568,
1352
+ "step": 755
1353
+ },
1354
+ {
1355
+ "epoch": 0.3914499098635076,
1356
+ "grad_norm": 0.22570310903133853,
1357
+ "learning_rate": 9.199082286279622e-05,
1358
+ "loss": 0.8974,
1359
+ "num_input_tokens_seen": 8889072,
1360
+ "step": 760
1361
+ },
1362
+ {
1363
+ "epoch": 0.3940252382178728,
1364
+ "grad_norm": 0.22090133233732026,
1365
+ "learning_rate": 9.185832391312644e-05,
1366
+ "loss": 0.8985,
1367
+ "num_input_tokens_seen": 8947568,
1368
+ "step": 765
1369
+ },
1370
+ {
1371
+ "epoch": 0.39660056657223797,
1372
+ "grad_norm": 0.23738058530347297,
1373
+ "learning_rate": 9.172483500792244e-05,
1374
+ "loss": 0.8935,
1375
+ "num_input_tokens_seen": 9006056,
1376
+ "step": 770
1377
+ },
1378
+ {
1379
+ "epoch": 0.39917589492660316,
1380
+ "grad_norm": 0.41232659301572594,
1381
+ "learning_rate": 9.159035930421658e-05,
1382
+ "loss": 0.8985,
1383
+ "num_input_tokens_seen": 9064592,
1384
+ "step": 775
1385
+ },
1386
+ {
1387
+ "epoch": 0.40175122328096835,
1388
+ "grad_norm": 0.2004855543001356,
1389
+ "learning_rate": 9.145489998237902e-05,
1390
+ "loss": 0.9105,
1391
+ "num_input_tokens_seen": 9123096,
1392
+ "step": 780
1393
+ },
1394
+ {
1395
+ "epoch": 0.4043265516353335,
1396
+ "grad_norm": 0.16209487510237375,
1397
+ "learning_rate": 9.131846024604274e-05,
1398
+ "loss": 0.8925,
1399
+ "num_input_tokens_seen": 9181576,
1400
+ "step": 785
1401
+ },
1402
+ {
1403
+ "epoch": 0.4069018799896987,
1404
+ "grad_norm": 0.24319930530142153,
1405
+ "learning_rate": 9.11810433220276e-05,
1406
+ "loss": 0.8955,
1407
+ "num_input_tokens_seen": 9240048,
1408
+ "step": 790
1409
+ },
1410
+ {
1411
+ "epoch": 0.40947720834406387,
1412
+ "grad_norm": 0.24311562892750557,
1413
+ "learning_rate": 9.104265246026415e-05,
1414
+ "loss": 0.8986,
1415
+ "num_input_tokens_seen": 9298528,
1416
+ "step": 795
1417
+ },
1418
+ {
1419
+ "epoch": 0.41205253669842906,
1420
+ "grad_norm": 0.2891177185942039,
1421
+ "learning_rate": 9.090329093371666e-05,
1422
+ "loss": 0.8881,
1423
+ "num_input_tokens_seen": 9357016,
1424
+ "step": 800
1425
+ },
1426
+ {
1427
+ "epoch": 0.41205253669842906,
1428
+ "eval_loss": 0.8973079919815063,
1429
+ "eval_runtime": 16.1396,
1430
+ "eval_samples_per_second": 3.718,
1431
+ "eval_steps_per_second": 0.929,
1432
+ "num_input_tokens_seen": 9357016,
1433
+ "step": 800
1434
  }
1435
  ],
1436
  "logging_steps": 5,
1437
  "max_steps": 3400,
1438
+ "num_input_tokens_seen": 9357016,
1439
  "num_train_epochs": 2,
1440
  "save_steps": 50,
1441
  "stateful_callbacks": {
 
1450
  "attributes": {}
1451
  }
1452
  },
1453
+ "total_flos": 525396292665344.0,
1454
  "train_batch_size": 1,
1455
  "trial_name": null,
1456
  "trial_params": null