ben81828 commited on
Commit
a287859
·
verified ·
1 Parent(s): 9069b9e

Training in progress, step 1000, checkpoint

Browse files
last-checkpoint/adapter_model.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:ed912ae9f29651ceebc41713ab5b91988158a52f69ac82baf6102389d31521a7
3
  size 29034840
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:0a3ab6d644ac3cb3a77587a21e5db04187d88cc24b3b2be9b5e4e433e0add233
3
  size 29034840
last-checkpoint/global_step1000/bf16_zero_pp_rank_0_mp_rank_00_optim_states.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:cdbcc35725e45469be0949f510a514f2e028ed30028d9cb2c1ddb1f85d456b1a
3
+ size 43429616
last-checkpoint/global_step1000/bf16_zero_pp_rank_1_mp_rank_00_optim_states.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:ed51fb9f30458061f2533af682cfa867c19da4faffc8105ff0a58912fd9e4b13
3
+ size 43429616
last-checkpoint/global_step1000/bf16_zero_pp_rank_2_mp_rank_00_optim_states.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:dec2aa56fa99e9c1ca5ee2081de809a2adc28ba922af4cee07606ca0c69dd305
3
+ size 43429616
last-checkpoint/global_step1000/bf16_zero_pp_rank_3_mp_rank_00_optim_states.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:9d47ed7db30cba6b3dea3185a3751767517ac753dbb5dcba531b1211c7b996ac
3
+ size 43429616
last-checkpoint/global_step1000/zero_pp_rank_0_mp_rank_00_model_states.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:acd77f7915b0d991c0851d79075b241bd64c36eeb0d1e0e2f985a499c290a497
3
+ size 637299
last-checkpoint/global_step1000/zero_pp_rank_1_mp_rank_00_model_states.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:3b3f26bda9e2a2460f4f3cc5008eaa37999cec73f83483980476af2f03541f21
3
+ size 637171
last-checkpoint/global_step1000/zero_pp_rank_2_mp_rank_00_model_states.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:d7f0d6d4da9d606901f6d8359ce5b27f2560634e6ffb9fc47cc3a2d7bea3f975
3
+ size 637171
last-checkpoint/global_step1000/zero_pp_rank_3_mp_rank_00_model_states.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:5c1c7eda36ab2045cf9de55c87c4f12c530a7dab4d03922e901e1a447629a96e
3
+ size 637171
last-checkpoint/latest CHANGED
@@ -1 +1 @@
1
- global_step950
 
1
+ global_step1000
last-checkpoint/rng_state_0.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:7f2439da621f14c22b4f733e91bfc9de6b506d28d7b8d6f3eaca2e0b4f24c078
3
  size 15024
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:7d73dfcc09cf3d6f08149535e03920234febc15f7e9a166987f3bc01ee871abf
3
  size 15024
last-checkpoint/rng_state_1.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:c9e3fb386557f376b8946af5b8c91f9418f374dddb2ad9da4868b1ef16778c32
3
  size 15024
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:4396a64b6da4868d060d1e3c7c9ccb12c39d63bd0f7b146d2512400aff4c769c
3
  size 15024
last-checkpoint/rng_state_2.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:dc7774d06045635bece9e960378fdc6913bf7bbbc903444cc570d1ca6ac25645
3
  size 15024
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:95877efc8fb5eb302819ee7effca4222569cdcfdebb9fa5d9846e68ed9e833fe
3
  size 15024
last-checkpoint/rng_state_3.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:d98c54a80a914fecf43d06ea81432499f46e70664f1d04651bf339163e30fa9e
3
  size 15024
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:e9fa4f23377f00fdde731da68a8690098617a1fdd912e03cdaa8bde87c493179
3
  size 15024
last-checkpoint/scheduler.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:b9f224baf5bd2044314606c1d88f84cce32f1b37c43c15835b14e72f6a72a4fc
3
  size 1064
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:bce7739c5bb5cf50e8f1c942e662e33e6aa589036d55e6fddd63bdf3171c1cae
3
  size 1064
last-checkpoint/trainer_state.json CHANGED
@@ -1,9 +1,9 @@
1
  {
2
- "best_metric": 0.0033526704646646976,
3
- "best_model_checkpoint": "saves/CADICA_qwenvl_direction_scale4/lora/sft/checkpoint-550",
4
- "epoch": 0.4893123873293845,
5
  "eval_steps": 50,
6
- "global_step": 950,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
@@ -1698,11 +1698,100 @@
1698
  "eval_steps_per_second": 0.778,
1699
  "num_input_tokens_seen": 9484800,
1700
  "step": 950
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1701
  }
1702
  ],
1703
  "logging_steps": 5,
1704
  "max_steps": 3400,
1705
- "num_input_tokens_seen": 9484800,
1706
  "num_train_epochs": 2,
1707
  "save_steps": 50,
1708
  "stateful_callbacks": {
@@ -1717,7 +1806,7 @@
1717
  "attributes": {}
1718
  }
1719
  },
1720
- "total_flos": 625804440240128.0,
1721
  "train_batch_size": 1,
1722
  "trial_name": null,
1723
  "trial_params": null
 
1
  {
2
+ "best_metric": 0.0007357922149822116,
3
+ "best_model_checkpoint": "saves/CADICA_qwenvl_direction_scale4/lora/sft/checkpoint-1000",
4
+ "epoch": 0.5150656708730363,
5
  "eval_steps": 50,
6
+ "global_step": 1000,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
 
1698
  "eval_steps_per_second": 0.778,
1699
  "num_input_tokens_seen": 9484800,
1700
  "step": 950
1701
+ },
1702
+ {
1703
+ "epoch": 0.49188771568374967,
1704
+ "grad_norm": 3.7704976412587468,
1705
+ "learning_rate": 8.612053338817581e-05,
1706
+ "loss": 0.0393,
1707
+ "num_input_tokens_seen": 9534720,
1708
+ "step": 955
1709
+ },
1710
+ {
1711
+ "epoch": 0.49446304403811486,
1712
+ "grad_norm": 0.2205817340051924,
1713
+ "learning_rate": 8.595197232161824e-05,
1714
+ "loss": 0.0102,
1715
+ "num_input_tokens_seen": 9584640,
1716
+ "step": 960
1717
+ },
1718
+ {
1719
+ "epoch": 0.49703837239248005,
1720
+ "grad_norm": 1.9646554268692766,
1721
+ "learning_rate": 8.578256098561275e-05,
1722
+ "loss": 0.0087,
1723
+ "num_input_tokens_seen": 9634560,
1724
+ "step": 965
1725
+ },
1726
+ {
1727
+ "epoch": 0.49961370074684525,
1728
+ "grad_norm": 2.08637430640156,
1729
+ "learning_rate": 8.561230338676239e-05,
1730
+ "loss": 0.0097,
1731
+ "num_input_tokens_seen": 9684480,
1732
+ "step": 970
1733
+ },
1734
+ {
1735
+ "epoch": 0.5021890291012104,
1736
+ "grad_norm": 0.018874732349478156,
1737
+ "learning_rate": 8.544120355168451e-05,
1738
+ "loss": 0.0013,
1739
+ "num_input_tokens_seen": 9734400,
1740
+ "step": 975
1741
+ },
1742
+ {
1743
+ "epoch": 0.5047643574555756,
1744
+ "grad_norm": 0.007747713318690538,
1745
+ "learning_rate": 8.526926552691544e-05,
1746
+ "loss": 0.0268,
1747
+ "num_input_tokens_seen": 9784320,
1748
+ "step": 980
1749
+ },
1750
+ {
1751
+ "epoch": 0.5073396858099408,
1752
+ "grad_norm": 4.2765032076143585,
1753
+ "learning_rate": 8.509649337881483e-05,
1754
+ "loss": 0.0418,
1755
+ "num_input_tokens_seen": 9834240,
1756
+ "step": 985
1757
+ },
1758
+ {
1759
+ "epoch": 0.509915014164306,
1760
+ "grad_norm": 0.016725809588179666,
1761
+ "learning_rate": 8.492289119346943e-05,
1762
+ "loss": 0.0011,
1763
+ "num_input_tokens_seen": 9884160,
1764
+ "step": 990
1765
+ },
1766
+ {
1767
+ "epoch": 0.5124903425186711,
1768
+ "grad_norm": 1.5189362175317798,
1769
+ "learning_rate": 8.474846307659658e-05,
1770
+ "loss": 0.0058,
1771
+ "num_input_tokens_seen": 9934080,
1772
+ "step": 995
1773
+ },
1774
+ {
1775
+ "epoch": 0.5150656708730363,
1776
+ "grad_norm": 0.5873213891971129,
1777
+ "learning_rate": 8.457321315344694e-05,
1778
+ "loss": 0.0547,
1779
+ "num_input_tokens_seen": 9984000,
1780
+ "step": 1000
1781
+ },
1782
+ {
1783
+ "epoch": 0.5150656708730363,
1784
+ "eval_loss": 0.0007357922149822116,
1785
+ "eval_runtime": 19.1915,
1786
+ "eval_samples_per_second": 3.126,
1787
+ "eval_steps_per_second": 0.782,
1788
+ "num_input_tokens_seen": 9984000,
1789
+ "step": 1000
1790
  }
1791
  ],
1792
  "logging_steps": 5,
1793
  "max_steps": 3400,
1794
+ "num_input_tokens_seen": 9984000,
1795
  "num_train_epochs": 2,
1796
  "save_steps": 50,
1797
  "stateful_callbacks": {
 
1806
  "attributes": {}
1807
  }
1808
  },
1809
+ "total_flos": 658744419811328.0,
1810
  "train_batch_size": 1,
1811
  "trial_name": null,
1812
  "trial_params": null