ben81828 commited on
Commit
afd9df5
·
verified ·
1 Parent(s): 5b8ddb3

Training in progress, step 850, checkpoint

Browse files
last-checkpoint/adapter_model.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:f6287ca2ed8e9ca31735e5d987ece6f9b430689719f525a3a04c74ff709108ab
3
  size 29034840
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:6e045152a73f87855427fb379b7010340bcb5b7bc986cabfaa3a74bd91d905f7
3
  size 29034840
last-checkpoint/global_step850/bf16_zero_pp_rank_0_mp_rank_00_optim_states.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:aed5addf75b4dcc2d56305d0370876497c8dabfb4d48e695446fce476aec812b
3
+ size 43429616
last-checkpoint/global_step850/bf16_zero_pp_rank_1_mp_rank_00_optim_states.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:c1f82ff3ba505822d500c54af588282805a60dd32165278110e76128b42f988a
3
+ size 43429616
last-checkpoint/global_step850/bf16_zero_pp_rank_2_mp_rank_00_optim_states.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:2ff39496f99b859b31fae012b8e98000809ecb18114c736d7b6939b249d47cb8
3
+ size 43429616
last-checkpoint/global_step850/bf16_zero_pp_rank_3_mp_rank_00_optim_states.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:1ec4187a54aae0ea5e3a79e5b23f9839484130e81587192b2668b5b350aa889f
3
+ size 43429616
last-checkpoint/global_step850/zero_pp_rank_0_mp_rank_00_model_states.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:b18337b921f0aba380566f71c4857b67981734dc7975b4ed43e57711a412ef28
3
+ size 637299
last-checkpoint/global_step850/zero_pp_rank_1_mp_rank_00_model_states.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:d144742df08d3730450d7fb36709b9a81b06278b4025a5f5b7cf8306696ee8ed
3
+ size 637171
last-checkpoint/global_step850/zero_pp_rank_2_mp_rank_00_model_states.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:8a1e27c63d553ea81d8a5f4f57f82e6fd2dab396822dedba339d93c3e67b54fd
3
+ size 637171
last-checkpoint/global_step850/zero_pp_rank_3_mp_rank_00_model_states.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:4777d80a9e8c2d0c23709b7630c4e2ffc82a5624bbdac8e3a2d838a612e80caa
3
+ size 637171
last-checkpoint/latest CHANGED
@@ -1 +1 @@
1
- global_step800
 
1
+ global_step850
last-checkpoint/rng_state_0.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:b4f003069486a57c6ac033f30cf4c4213eb6b7d659bab68a5a50fdb8da7c4118
3
  size 15024
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:36c9044354f826de248840acaaec171f816609c147a664089731a0570deef948
3
  size 15024
last-checkpoint/rng_state_1.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:a016ef89b4392d083b2c15a7cf06a39bc61a759f648cf6dc03f1c32b89a526aa
3
  size 15024
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:54f4c4013326db4e7267b656aaf72b86570f8aeee91ad39242a416cf8b963191
3
  size 15024
last-checkpoint/rng_state_2.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:9b56fe0893036dc052d18d90feba4328b90ea71561942150b07406ac3d7a700e
3
  size 15024
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:43e910793831957d8685c316138c33eef8867edf60052477dc9ad6ec0c6da901
3
  size 15024
last-checkpoint/rng_state_3.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:c0c203d12c2c308dab785ed672c9ca27fb6a2f72acd1e1552d1516c7b0006013
3
  size 15024
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:7cfd0d71ef5a6c58d9f1d46851f4b1e699ca8a50ab3223cfb39668895cffeef2
3
  size 15024
last-checkpoint/scheduler.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:5d85b710a5709549c0b4daddcc052f2ed242a5d916ac9ca030c805e7ff501c88
3
  size 1064
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:3e24eaa8963cd872c048c2c655789d678b8c3fcd1c77ca0d663feee5857d2e34
3
  size 1064
last-checkpoint/trainer_state.json CHANGED
@@ -1,9 +1,9 @@
1
  {
2
  "best_metric": 0.0033526704646646976,
3
  "best_model_checkpoint": "saves/CADICA_qwenvl_direction_scale4/lora/sft/checkpoint-550",
4
- "epoch": 0.41205253669842906,
5
  "eval_steps": 50,
6
- "global_step": 800,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
@@ -1431,11 +1431,100 @@
1431
  "eval_steps_per_second": 0.776,
1432
  "num_input_tokens_seen": 7987200,
1433
  "step": 800
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1434
  }
1435
  ],
1436
  "logging_steps": 5,
1437
  "max_steps": 3400,
1438
- "num_input_tokens_seen": 7987200,
1439
  "num_train_epochs": 2,
1440
  "save_steps": 50,
1441
  "stateful_callbacks": {
@@ -1450,7 +1539,7 @@
1450
  "attributes": {}
1451
  }
1452
  },
1453
- "total_flos": 526984501526528.0,
1454
  "train_batch_size": 1,
1455
  "trial_name": null,
1456
  "trial_params": null
 
1
  {
2
  "best_metric": 0.0033526704646646976,
3
  "best_model_checkpoint": "saves/CADICA_qwenvl_direction_scale4/lora/sft/checkpoint-550",
4
+ "epoch": 0.43780582024208087,
5
  "eval_steps": 50,
6
+ "global_step": 850,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
 
1431
  "eval_steps_per_second": 0.776,
1432
  "num_input_tokens_seen": 7987200,
1433
  "step": 800
1434
+ },
1435
+ {
1436
+ "epoch": 0.41462786505279425,
1437
+ "grad_norm": 0.016984614729716928,
1438
+ "learning_rate": 9.076296203830579e-05,
1439
+ "loss": 0.0025,
1440
+ "num_input_tokens_seen": 8037120,
1441
+ "step": 805
1442
+ },
1443
+ {
1444
+ "epoch": 0.4172031934071594,
1445
+ "grad_norm": 8.87156169406028,
1446
+ "learning_rate": 9.062166909283062e-05,
1447
+ "loss": 0.0338,
1448
+ "num_input_tokens_seen": 8087040,
1449
+ "step": 810
1450
+ },
1451
+ {
1452
+ "epoch": 0.4197785217615246,
1453
+ "grad_norm": 0.07123492673307173,
1454
+ "learning_rate": 9.047941543889014e-05,
1455
+ "loss": 0.0505,
1456
+ "num_input_tokens_seen": 8136960,
1457
+ "step": 815
1458
+ },
1459
+ {
1460
+ "epoch": 0.4223538501158898,
1461
+ "grad_norm": 0.3486611879887143,
1462
+ "learning_rate": 9.033620444080428e-05,
1463
+ "loss": 0.0335,
1464
+ "num_input_tokens_seen": 8186880,
1465
+ "step": 820
1466
+ },
1467
+ {
1468
+ "epoch": 0.42492917847025496,
1469
+ "grad_norm": 0.2658900406741178,
1470
+ "learning_rate": 9.019203948553422e-05,
1471
+ "loss": 0.0196,
1472
+ "num_input_tokens_seen": 8236800,
1473
+ "step": 825
1474
+ },
1475
+ {
1476
+ "epoch": 0.42750450682462016,
1477
+ "grad_norm": 0.15327530188348007,
1478
+ "learning_rate": 9.004692398260244e-05,
1479
+ "loss": 0.018,
1480
+ "num_input_tokens_seen": 8286720,
1481
+ "step": 830
1482
+ },
1483
+ {
1484
+ "epoch": 0.43007983517898535,
1485
+ "grad_norm": 5.292901102191953,
1486
+ "learning_rate": 8.9900861364012e-05,
1487
+ "loss": 0.012,
1488
+ "num_input_tokens_seen": 8336640,
1489
+ "step": 835
1490
+ },
1491
+ {
1492
+ "epoch": 0.4326551635333505,
1493
+ "grad_norm": 3.2113529029102375,
1494
+ "learning_rate": 8.975385508416532e-05,
1495
+ "loss": 0.0062,
1496
+ "num_input_tokens_seen": 8386560,
1497
+ "step": 840
1498
+ },
1499
+ {
1500
+ "epoch": 0.4352304918877157,
1501
+ "grad_norm": 0.3685880212421972,
1502
+ "learning_rate": 8.960590861978265e-05,
1503
+ "loss": 0.0232,
1504
+ "num_input_tokens_seen": 8436480,
1505
+ "step": 845
1506
+ },
1507
+ {
1508
+ "epoch": 0.43780582024208087,
1509
+ "grad_norm": 1.4858234531184304,
1510
+ "learning_rate": 8.945702546981969e-05,
1511
+ "loss": 0.0306,
1512
+ "num_input_tokens_seen": 8486400,
1513
+ "step": 850
1514
+ },
1515
+ {
1516
+ "epoch": 0.43780582024208087,
1517
+ "eval_loss": 0.007198736071586609,
1518
+ "eval_runtime": 19.7019,
1519
+ "eval_samples_per_second": 3.045,
1520
+ "eval_steps_per_second": 0.761,
1521
+ "num_input_tokens_seen": 8486400,
1522
+ "step": 850
1523
  }
1524
  ],
1525
  "logging_steps": 5,
1526
  "max_steps": 3400,
1527
+ "num_input_tokens_seen": 8486400,
1528
  "num_train_epochs": 2,
1529
  "save_steps": 50,
1530
  "stateful_callbacks": {
 
1539
  "attributes": {}
1540
  }
1541
  },
1542
+ "total_flos": 559924481097728.0,
1543
  "train_batch_size": 1,
1544
  "trial_name": null,
1545
  "trial_params": null