ben81828 commited on
Commit
abbaf0a
·
verified ·
1 Parent(s): e0014b0

Training in progress, step 900, checkpoint

Browse files
last-checkpoint/adapter_model.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:6e045152a73f87855427fb379b7010340bcb5b7bc986cabfaa3a74bd91d905f7
3
  size 29034840
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:84586103a211c06663bd404e55c4d764d0a39628f772147e29017239dc0ff834
3
  size 29034840
last-checkpoint/global_step900/bf16_zero_pp_rank_0_mp_rank_00_optim_states.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:3162d8665920a7039fb31f5653e692dfa159ad1c3ec79a43a4ad091c587bec42
3
+ size 43429616
last-checkpoint/global_step900/bf16_zero_pp_rank_1_mp_rank_00_optim_states.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:ae6e4291d5589e06115045cc4eeb9a6e9f42c5707e975ee4573111e02947cbfb
3
+ size 43429616
last-checkpoint/global_step900/bf16_zero_pp_rank_2_mp_rank_00_optim_states.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:3415fbb0b88f14e91742873668a0b376d4c77e6fba3c87c920c0f41835ff0c55
3
+ size 43429616
last-checkpoint/global_step900/bf16_zero_pp_rank_3_mp_rank_00_optim_states.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:9dd5d41123de36f670814e5f1bba522d4f2c42320f1af5fb37fc1c7e6181839e
3
+ size 43429616
last-checkpoint/global_step900/zero_pp_rank_0_mp_rank_00_model_states.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:7162c4e09532f9490922bccb0dbbcfe539972e0f3d49af32428a76030c053274
3
+ size 637299
last-checkpoint/global_step900/zero_pp_rank_1_mp_rank_00_model_states.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:8a1fabfaa7ff216589f1f45e0de64de68782d934b86fc832996ada7c681c3bd9
3
+ size 637171
last-checkpoint/global_step900/zero_pp_rank_2_mp_rank_00_model_states.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:c68864aa3523e8a7232f1228e6e0dba67ac184874fa8332362ced37b3cbe041a
3
+ size 637171
last-checkpoint/global_step900/zero_pp_rank_3_mp_rank_00_model_states.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:834a420bd57a2a0fe552734d61c0bcc178574ff6e356d461ed6815bc79695c74
3
+ size 637171
last-checkpoint/latest CHANGED
@@ -1 +1 @@
1
- global_step850
 
1
+ global_step900
last-checkpoint/rng_state_0.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:36c9044354f826de248840acaaec171f816609c147a664089731a0570deef948
3
  size 15024
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:b8f22ced19e790cc864cefe3b7c711d9ae631c44f95d42fb4829688cc3de0153
3
  size 15024
last-checkpoint/rng_state_1.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:54f4c4013326db4e7267b656aaf72b86570f8aeee91ad39242a416cf8b963191
3
  size 15024
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:3e0407513eba77d34cbf3adf0e59a58bd80716f4f00f414854253637e82be43d
3
  size 15024
last-checkpoint/rng_state_2.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:43e910793831957d8685c316138c33eef8867edf60052477dc9ad6ec0c6da901
3
  size 15024
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:6060636c023258ce9b965e244b8a58b4c99d5784dde4405b39737550ef50cd4f
3
  size 15024
last-checkpoint/rng_state_3.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:7cfd0d71ef5a6c58d9f1d46851f4b1e699ca8a50ab3223cfb39668895cffeef2
3
  size 15024
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:c24ccdfdcde39cb2265c82c50c36ffdfcc670f757aba4bcf4bb0fdc6d1373c4c
3
  size 15024
last-checkpoint/scheduler.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:3e24eaa8963cd872c048c2c655789d678b8c3fcd1c77ca0d663feee5857d2e34
3
  size 1064
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:feb4015894f59edc29c71bc4938b5d4ab98daad34a38a3d387b308b3b1d4b280
3
  size 1064
last-checkpoint/trainer_state.json CHANGED
@@ -1,9 +1,9 @@
1
  {
2
  "best_metric": 0.0033526704646646976,
3
  "best_model_checkpoint": "saves/CADICA_qwenvl_direction_scale4/lora/sft/checkpoint-550",
4
- "epoch": 0.43780582024208087,
5
  "eval_steps": 50,
6
- "global_step": 850,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
@@ -1520,11 +1520,100 @@
1520
  "eval_steps_per_second": 0.761,
1521
  "num_input_tokens_seen": 8486400,
1522
  "step": 850
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1523
  }
1524
  ],
1525
  "logging_steps": 5,
1526
  "max_steps": 3400,
1527
- "num_input_tokens_seen": 8486400,
1528
  "num_train_epochs": 2,
1529
  "save_steps": 50,
1530
  "stateful_callbacks": {
@@ -1539,7 +1628,7 @@
1539
  "attributes": {}
1540
  }
1541
  },
1542
- "total_flos": 559924481097728.0,
1543
  "train_batch_size": 1,
1544
  "trial_name": null,
1545
  "trial_params": null
 
1
  {
2
  "best_metric": 0.0033526704646646976,
3
  "best_model_checkpoint": "saves/CADICA_qwenvl_direction_scale4/lora/sft/checkpoint-550",
4
+ "epoch": 0.4635591037857327,
5
  "eval_steps": 50,
6
+ "global_step": 900,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
 
1520
  "eval_steps_per_second": 0.761,
1521
  "num_input_tokens_seen": 8486400,
1522
  "step": 850
1523
+ },
1524
+ {
1525
+ "epoch": 0.44038114859644606,
1526
+ "grad_norm": 0.22602261311014887,
1527
+ "learning_rate": 8.930720915538487e-05,
1528
+ "loss": 0.0255,
1529
+ "num_input_tokens_seen": 8536320,
1530
+ "step": 855
1531
+ },
1532
+ {
1533
+ "epoch": 0.44295647695081125,
1534
+ "grad_norm": 0.1515497727795517,
1535
+ "learning_rate": 8.915646321965614e-05,
1536
+ "loss": 0.0267,
1537
+ "num_input_tokens_seen": 8586240,
1538
+ "step": 860
1539
+ },
1540
+ {
1541
+ "epoch": 0.4455318053051764,
1542
+ "grad_norm": 0.039053785243136956,
1543
+ "learning_rate": 8.900479122779712e-05,
1544
+ "loss": 0.0107,
1545
+ "num_input_tokens_seen": 8636160,
1546
+ "step": 865
1547
+ },
1548
+ {
1549
+ "epoch": 0.4481071336595416,
1550
+ "grad_norm": 0.42882717357803835,
1551
+ "learning_rate": 8.885219676687277e-05,
1552
+ "loss": 0.0277,
1553
+ "num_input_tokens_seen": 8686080,
1554
+ "step": 870
1555
+ },
1556
+ {
1557
+ "epoch": 0.45068246201390677,
1558
+ "grad_norm": 5.519940082555776,
1559
+ "learning_rate": 8.869868344576459e-05,
1560
+ "loss": 0.0175,
1561
+ "num_input_tokens_seen": 8736000,
1562
+ "step": 875
1563
+ },
1564
+ {
1565
+ "epoch": 0.45325779036827196,
1566
+ "grad_norm": 0.15983443395533375,
1567
+ "learning_rate": 8.854425489508532e-05,
1568
+ "loss": 0.0065,
1569
+ "num_input_tokens_seen": 8785920,
1570
+ "step": 880
1571
+ },
1572
+ {
1573
+ "epoch": 0.45583311872263715,
1574
+ "grad_norm": 0.42885948370195226,
1575
+ "learning_rate": 8.838891476709288e-05,
1576
+ "loss": 0.0476,
1577
+ "num_input_tokens_seen": 8835840,
1578
+ "step": 885
1579
+ },
1580
+ {
1581
+ "epoch": 0.4584084470770023,
1582
+ "grad_norm": 0.18209352469474613,
1583
+ "learning_rate": 8.823266673560426e-05,
1584
+ "loss": 0.018,
1585
+ "num_input_tokens_seen": 8885760,
1586
+ "step": 890
1587
+ },
1588
+ {
1589
+ "epoch": 0.4609837754313675,
1590
+ "grad_norm": 1.7895291298260116,
1591
+ "learning_rate": 8.807551449590846e-05,
1592
+ "loss": 0.0337,
1593
+ "num_input_tokens_seen": 8935680,
1594
+ "step": 895
1595
+ },
1596
+ {
1597
+ "epoch": 0.4635591037857327,
1598
+ "grad_norm": 0.11579050055375595,
1599
+ "learning_rate": 8.791746176467907e-05,
1600
+ "loss": 0.0063,
1601
+ "num_input_tokens_seen": 8985600,
1602
+ "step": 900
1603
+ },
1604
+ {
1605
+ "epoch": 0.4635591037857327,
1606
+ "eval_loss": 0.010749292559921741,
1607
+ "eval_runtime": 19.2918,
1608
+ "eval_samples_per_second": 3.11,
1609
+ "eval_steps_per_second": 0.778,
1610
+ "num_input_tokens_seen": 8985600,
1611
+ "step": 900
1612
  }
1613
  ],
1614
  "logging_steps": 5,
1615
  "max_steps": 3400,
1616
+ "num_input_tokens_seen": 8985600,
1617
  "num_train_epochs": 2,
1618
  "save_steps": 50,
1619
  "stateful_callbacks": {
 
1628
  "attributes": {}
1629
  }
1630
  },
1631
+ "total_flos": 592864460668928.0,
1632
  "train_batch_size": 1,
1633
  "trial_name": null,
1634
  "trial_params": null