ben81828 commited on
Commit
39cfbd7
·
verified ·
1 Parent(s): 8c4d4c1

Training in progress, step 1000, checkpoint

Browse files
last-checkpoint/adapter_model.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:dfd440e7fad2202aa5aadeb518f98ac0d3f292864f45e1beccd3794320180905
3
  size 29034840
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:877d476d50606e6083f6c78d34e77669367b6c6b165e3fabd4766d94d783d5e9
3
  size 29034840
last-checkpoint/global_step1000/bf16_zero_pp_rank_0_mp_rank_00_optim_states.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:28e7b2d318849eba02075de3f6c0a68896c793c307cd7375104a4424830347c0
3
+ size 43429616
last-checkpoint/global_step1000/bf16_zero_pp_rank_1_mp_rank_00_optim_states.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:11e22f67cadaea4bd6ffab182ea3c8e8b6c3372fef259366b4713a96539339b9
3
+ size 43429616
last-checkpoint/global_step1000/bf16_zero_pp_rank_2_mp_rank_00_optim_states.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:92797db8527f92f37e5331d56f179d8bbba64b37ee5224f6425133506d544096
3
+ size 43429616
last-checkpoint/global_step1000/bf16_zero_pp_rank_3_mp_rank_00_optim_states.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:f1ad06e43816bf277c18a1890fcf7d2a8a41fa28f579abff1f3982590aa3503f
3
+ size 43429616
last-checkpoint/global_step1000/zero_pp_rank_0_mp_rank_00_model_states.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:acd77f7915b0d991c0851d79075b241bd64c36eeb0d1e0e2f985a499c290a497
3
+ size 637299
last-checkpoint/global_step1000/zero_pp_rank_1_mp_rank_00_model_states.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:3b3f26bda9e2a2460f4f3cc5008eaa37999cec73f83483980476af2f03541f21
3
+ size 637171
last-checkpoint/global_step1000/zero_pp_rank_2_mp_rank_00_model_states.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:d7f0d6d4da9d606901f6d8359ce5b27f2560634e6ffb9fc47cc3a2d7bea3f975
3
+ size 637171
last-checkpoint/global_step1000/zero_pp_rank_3_mp_rank_00_model_states.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:5c1c7eda36ab2045cf9de55c87c4f12c530a7dab4d03922e901e1a447629a96e
3
+ size 637171
last-checkpoint/latest CHANGED
@@ -1 +1 @@
1
- global_step950
 
1
+ global_step1000
last-checkpoint/rng_state_0.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:7f2439da621f14c22b4f733e91bfc9de6b506d28d7b8d6f3eaca2e0b4f24c078
3
  size 15024
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:7d73dfcc09cf3d6f08149535e03920234febc15f7e9a166987f3bc01ee871abf
3
  size 15024
last-checkpoint/rng_state_1.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:c9e3fb386557f376b8946af5b8c91f9418f374dddb2ad9da4868b1ef16778c32
3
  size 15024
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:4396a64b6da4868d060d1e3c7c9ccb12c39d63bd0f7b146d2512400aff4c769c
3
  size 15024
last-checkpoint/rng_state_2.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:dc7774d06045635bece9e960378fdc6913bf7bbbc903444cc570d1ca6ac25645
3
  size 15024
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:95877efc8fb5eb302819ee7effca4222569cdcfdebb9fa5d9846e68ed9e833fe
3
  size 15024
last-checkpoint/rng_state_3.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:d98c54a80a914fecf43d06ea81432499f46e70664f1d04651bf339163e30fa9e
3
  size 15024
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:e9fa4f23377f00fdde731da68a8690098617a1fdd912e03cdaa8bde87c493179
3
  size 15024
last-checkpoint/scheduler.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:4d81b732d7d8d0c63d3b6f5f9a1de7f86b11b67cc06fc68b77724c2ae2e7a663
3
  size 1064
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:83dcc54318a7a7b0b5194bfa96123dd05d073085f7b3d157d28697ffcfb5e8bb
3
  size 1064
last-checkpoint/trainer_state.json CHANGED
@@ -1,9 +1,9 @@
1
  {
2
  "best_metric": 0.4665524661540985,
3
  "best_model_checkpoint": "saves/CADICA_qwenvl_direction_then_DetectAndClassify_scale6/lora/sft/checkpoint-750",
4
- "epoch": 0.28064992614475626,
5
  "eval_steps": 50,
6
- "global_step": 950,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
@@ -1698,11 +1698,100 @@
1698
  "eval_steps_per_second": 0.781,
1699
  "num_input_tokens_seen": 9863168,
1700
  "step": 950
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1701
  }
1702
  ],
1703
  "logging_steps": 5,
1704
  "max_steps": 6770,
1705
- "num_input_tokens_seen": 9863168,
1706
  "num_train_epochs": 2,
1707
  "save_steps": 50,
1708
  "stateful_callbacks": {
@@ -1717,7 +1806,7 @@
1717
  "attributes": {}
1718
  }
1719
  },
1720
- "total_flos": 650708688306176.0,
1721
  "train_batch_size": 1,
1722
  "trial_name": null,
1723
  "trial_params": null
 
1
  {
2
  "best_metric": 0.4665524661540985,
3
  "best_model_checkpoint": "saves/CADICA_qwenvl_direction_then_DetectAndClassify_scale6/lora/sft/checkpoint-750",
4
+ "epoch": 0.29542097488921715,
5
  "eval_steps": 50,
6
+ "global_step": 1000,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
 
1698
  "eval_steps_per_second": 0.781,
1699
  "num_input_tokens_seen": 9863168,
1700
  "step": 950
1701
+ },
1702
+ {
1703
+ "epoch": 0.2821270310192024,
1704
+ "grad_norm": 3.2511615728403744,
1705
+ "learning_rate": 9.775319981786445e-05,
1706
+ "loss": 0.4393,
1707
+ "num_input_tokens_seen": 9914672,
1708
+ "step": 955
1709
+ },
1710
+ {
1711
+ "epoch": 0.28360413589364847,
1712
+ "grad_norm": 16.435101279621147,
1713
+ "learning_rate": 9.771685903055277e-05,
1714
+ "loss": 0.4355,
1715
+ "num_input_tokens_seen": 9966736,
1716
+ "step": 960
1717
+ },
1718
+ {
1719
+ "epoch": 0.28508124076809455,
1720
+ "grad_norm": 15.842537939054491,
1721
+ "learning_rate": 9.768023356493864e-05,
1722
+ "loss": 0.4459,
1723
+ "num_input_tokens_seen": 10017984,
1724
+ "step": 965
1725
+ },
1726
+ {
1727
+ "epoch": 0.2865583456425406,
1728
+ "grad_norm": 4.234230919149069,
1729
+ "learning_rate": 9.764332363952927e-05,
1730
+ "loss": 0.4774,
1731
+ "num_input_tokens_seen": 10069520,
1732
+ "step": 970
1733
+ },
1734
+ {
1735
+ "epoch": 0.2880354505169867,
1736
+ "grad_norm": 4.408868276054397,
1737
+ "learning_rate": 9.760612947452884e-05,
1738
+ "loss": 0.413,
1739
+ "num_input_tokens_seen": 10122208,
1740
+ "step": 975
1741
+ },
1742
+ {
1743
+ "epoch": 0.2895125553914328,
1744
+ "grad_norm": 18.46536438022927,
1745
+ "learning_rate": 9.756865129183741e-05,
1746
+ "loss": 0.5433,
1747
+ "num_input_tokens_seen": 10173760,
1748
+ "step": 980
1749
+ },
1750
+ {
1751
+ "epoch": 0.29098966026587886,
1752
+ "grad_norm": 10.416515634178488,
1753
+ "learning_rate": 9.753088931504944e-05,
1754
+ "loss": 0.4096,
1755
+ "num_input_tokens_seen": 10224976,
1756
+ "step": 985
1757
+ },
1758
+ {
1759
+ "epoch": 0.29246676514032494,
1760
+ "grad_norm": 8.959580527519506,
1761
+ "learning_rate": 9.749284376945248e-05,
1762
+ "loss": 0.3916,
1763
+ "num_input_tokens_seen": 10276928,
1764
+ "step": 990
1765
+ },
1766
+ {
1767
+ "epoch": 0.29394387001477107,
1768
+ "grad_norm": 4.106784187834887,
1769
+ "learning_rate": 9.74545148820259e-05,
1770
+ "loss": 0.3899,
1771
+ "num_input_tokens_seen": 10328048,
1772
+ "step": 995
1773
+ },
1774
+ {
1775
+ "epoch": 0.29542097488921715,
1776
+ "grad_norm": 7.661197997005464,
1777
+ "learning_rate": 9.741590288143944e-05,
1778
+ "loss": 0.4005,
1779
+ "num_input_tokens_seen": 10379136,
1780
+ "step": 1000
1781
+ },
1782
+ {
1783
+ "epoch": 0.29542097488921715,
1784
+ "eval_loss": 0.5501028299331665,
1785
+ "eval_runtime": 19.0051,
1786
+ "eval_samples_per_second": 3.157,
1787
+ "eval_steps_per_second": 0.789,
1788
+ "num_input_tokens_seen": 10379136,
1789
+ "step": 1000
1790
  }
1791
  ],
1792
  "logging_steps": 5,
1793
  "max_steps": 6770,
1794
+ "num_input_tokens_seen": 10379136,
1795
  "num_train_epochs": 2,
1796
  "save_steps": 50,
1797
  "stateful_callbacks": {
 
1806
  "attributes": {}
1807
  }
1808
  },
1809
+ "total_flos": 684735085936640.0,
1810
  "train_batch_size": 1,
1811
  "trial_name": null,
1812
  "trial_params": null