ben81828 commited on
Commit
16a6336
·
verified ·
1 Parent(s): 404d3b1

Training in progress, step 1050, checkpoint

Browse files
last-checkpoint/adapter_model.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:0a3ab6d644ac3cb3a77587a21e5db04187d88cc24b3b2be9b5e4e433e0add233
3
  size 29034840
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:4499fca7257a53d1a8959e9e138764d24facbc55103cd1d2e30b092c058fc6d9
3
  size 29034840
last-checkpoint/global_step1050/bf16_zero_pp_rank_0_mp_rank_00_optim_states.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:1dd37b9bb3c7ca4a2e1ac547a76cd314772017b0eee2e58e2c86444e0dfd52ed
3
+ size 43429616
last-checkpoint/global_step1050/bf16_zero_pp_rank_1_mp_rank_00_optim_states.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:64f584e3a146e5d4835f3ddce8eac0097d6c92edb4539be3c5891333747fa4a2
3
+ size 43429616
last-checkpoint/global_step1050/bf16_zero_pp_rank_2_mp_rank_00_optim_states.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:5db9da879469a496cdd9059d07e41a815553c093a28b1e5f757b50e9736d5819
3
+ size 43429616
last-checkpoint/global_step1050/bf16_zero_pp_rank_3_mp_rank_00_optim_states.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:49f55ceac858d890c28903f7fa52e3dd5e171674ec8b674e1c236daced759375
3
+ size 43429616
last-checkpoint/global_step1050/zero_pp_rank_0_mp_rank_00_model_states.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:5740907eeaf461a960ab6da055d43d89f41f6417899be566427552f76b967abe
3
+ size 637299
last-checkpoint/global_step1050/zero_pp_rank_1_mp_rank_00_model_states.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:28dcc960a14c72ef47fb9722c4b98cf1679b58db19513a1020156883879227c7
3
+ size 637171
last-checkpoint/global_step1050/zero_pp_rank_2_mp_rank_00_model_states.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:055280373e3009fe56853d9b9ef4c853cc09e485fa66a9bd20cba222d17c0bf5
3
+ size 637171
last-checkpoint/global_step1050/zero_pp_rank_3_mp_rank_00_model_states.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:179e95a89c200a847bd6de0c47ca8b3d721ccf69a889371f83c8cb19695431d6
3
+ size 637171
last-checkpoint/latest CHANGED
@@ -1 +1 @@
1
- global_step1000
 
1
+ global_step1050
last-checkpoint/rng_state_0.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:7d73dfcc09cf3d6f08149535e03920234febc15f7e9a166987f3bc01ee871abf
3
  size 15024
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:67db742b8aa1744a8224bf2a1f79d89caff63b15f78a455d92bb666df82183ea
3
  size 15024
last-checkpoint/rng_state_1.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:4396a64b6da4868d060d1e3c7c9ccb12c39d63bd0f7b146d2512400aff4c769c
3
  size 15024
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:c0f9acb7e6f8bbfb305c3601c71eb6189af24942fab5f99046412c03bb10c3eb
3
  size 15024
last-checkpoint/rng_state_2.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:95877efc8fb5eb302819ee7effca4222569cdcfdebb9fa5d9846e68ed9e833fe
3
  size 15024
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:713783338342f7486f6f186abd03c5963a0d22368f403efb2bf903ed083d2b64
3
  size 15024
last-checkpoint/rng_state_3.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:e9fa4f23377f00fdde731da68a8690098617a1fdd912e03cdaa8bde87c493179
3
  size 15024
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:83aaec0b5b7d8a2da4577075066cf434ce6e9feb9327edbea6677a2e51d76466
3
  size 15024
last-checkpoint/scheduler.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:bce7739c5bb5cf50e8f1c942e662e33e6aa589036d55e6fddd63bdf3171c1cae
3
  size 1064
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:ccde9da0d23fd32800f01283cdb6c677def9ab43edb7232a9c4c4e9101a14cc0
3
  size 1064
last-checkpoint/trainer_state.json CHANGED
@@ -1,9 +1,9 @@
1
  {
2
  "best_metric": 0.0007357922149822116,
3
  "best_model_checkpoint": "saves/CADICA_qwenvl_direction_scale4/lora/sft/checkpoint-1000",
4
- "epoch": 0.5150656708730363,
5
  "eval_steps": 50,
6
- "global_step": 1000,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
@@ -1787,11 +1787,100 @@
1787
  "eval_steps_per_second": 0.782,
1788
  "num_input_tokens_seen": 9984000,
1789
  "step": 1000
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1790
  }
1791
  ],
1792
  "logging_steps": 5,
1793
  "max_steps": 3400,
1794
- "num_input_tokens_seen": 9984000,
1795
  "num_train_epochs": 2,
1796
  "save_steps": 50,
1797
  "stateful_callbacks": {
@@ -1806,7 +1895,7 @@
1806
  "attributes": {}
1807
  }
1808
  },
1809
- "total_flos": 658744419811328.0,
1810
  "train_batch_size": 1,
1811
  "trial_name": null,
1812
  "trial_params": null
 
1
  {
2
  "best_metric": 0.0007357922149822116,
3
  "best_model_checkpoint": "saves/CADICA_qwenvl_direction_scale4/lora/sft/checkpoint-1000",
4
+ "epoch": 0.5408189544166881,
5
  "eval_steps": 50,
6
+ "global_step": 1050,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
 
1787
  "eval_steps_per_second": 0.782,
1788
  "num_input_tokens_seen": 9984000,
1789
  "step": 1000
1790
+ },
1791
+ {
1792
+ "epoch": 0.5176409992274015,
1793
+ "grad_norm": 0.06909777020752475,
1794
+ "learning_rate": 8.439714556870704e-05,
1795
+ "loss": 0.0042,
1796
+ "num_input_tokens_seen": 10033920,
1797
+ "step": 1005
1798
+ },
1799
+ {
1800
+ "epoch": 0.5202163275817667,
1801
+ "grad_norm": 0.15306455110846962,
1802
+ "learning_rate": 8.422026448640124e-05,
1803
+ "loss": 0.0419,
1804
+ "num_input_tokens_seen": 10083840,
1805
+ "step": 1010
1806
+ },
1807
+ {
1808
+ "epoch": 0.5227916559361319,
1809
+ "grad_norm": 0.06864588320363613,
1810
+ "learning_rate": 8.40425740897932e-05,
1811
+ "loss": 0.0313,
1812
+ "num_input_tokens_seen": 10133760,
1813
+ "step": 1015
1814
+ },
1815
+ {
1816
+ "epoch": 0.525366984290497,
1817
+ "grad_norm": 0.041539288845711536,
1818
+ "learning_rate": 8.386407858128706e-05,
1819
+ "loss": 0.005,
1820
+ "num_input_tokens_seen": 10183680,
1821
+ "step": 1020
1822
+ },
1823
+ {
1824
+ "epoch": 0.5279423126448622,
1825
+ "grad_norm": 3.866462743446183,
1826
+ "learning_rate": 8.368478218232787e-05,
1827
+ "loss": 0.0171,
1828
+ "num_input_tokens_seen": 10233600,
1829
+ "step": 1025
1830
+ },
1831
+ {
1832
+ "epoch": 0.5305176409992274,
1833
+ "grad_norm": 0.27036285469370147,
1834
+ "learning_rate": 8.350468913330192e-05,
1835
+ "loss": 0.0019,
1836
+ "num_input_tokens_seen": 10283520,
1837
+ "step": 1030
1838
+ },
1839
+ {
1840
+ "epoch": 0.5330929693535926,
1841
+ "grad_norm": 0.12025617159059553,
1842
+ "learning_rate": 8.33238036934364e-05,
1843
+ "loss": 0.0256,
1844
+ "num_input_tokens_seen": 10333440,
1845
+ "step": 1035
1846
+ },
1847
+ {
1848
+ "epoch": 0.5356682977079578,
1849
+ "grad_norm": 0.013500323699360073,
1850
+ "learning_rate": 8.31421301406986e-05,
1851
+ "loss": 0.0019,
1852
+ "num_input_tokens_seen": 10383360,
1853
+ "step": 1040
1854
+ },
1855
+ {
1856
+ "epoch": 0.5382436260623229,
1857
+ "grad_norm": 0.10118623585384687,
1858
+ "learning_rate": 8.29596727716949e-05,
1859
+ "loss": 0.018,
1860
+ "num_input_tokens_seen": 10433280,
1861
+ "step": 1045
1862
+ },
1863
+ {
1864
+ "epoch": 0.5408189544166881,
1865
+ "grad_norm": 0.23565575572231393,
1866
+ "learning_rate": 8.277643590156894e-05,
1867
+ "loss": 0.0007,
1868
+ "num_input_tokens_seen": 10483200,
1869
+ "step": 1050
1870
+ },
1871
+ {
1872
+ "epoch": 0.5408189544166881,
1873
+ "eval_loss": 0.05677948147058487,
1874
+ "eval_runtime": 19.0806,
1875
+ "eval_samples_per_second": 3.145,
1876
+ "eval_steps_per_second": 0.786,
1877
+ "num_input_tokens_seen": 10483200,
1878
+ "step": 1050
1879
  }
1880
  ],
1881
  "logging_steps": 5,
1882
  "max_steps": 3400,
1883
+ "num_input_tokens_seen": 10483200,
1884
  "num_train_epochs": 2,
1885
  "save_steps": 50,
1886
  "stateful_callbacks": {
 
1895
  "attributes": {}
1896
  }
1897
  },
1898
+ "total_flos": 691684399382528.0,
1899
  "train_batch_size": 1,
1900
  "trial_name": null,
1901
  "trial_params": null