ben81828 commited on
Commit
23e2970
1 Parent(s): 1898f4a

Training in progress, step 1100, checkpoint

Browse files
last-checkpoint/adapter_model.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:7a907044372c6e0d364bef10eddc568cb0dc263b2ee9cfe224809c3000cb0b54
3
  size 18516456
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:e47b9a2cfb5d827890098fe64e850aeaac9bcd5c19d855922f5746d06e17ed00
3
  size 18516456
last-checkpoint/global_step1100/bf16_zero_pp_rank_0_mp_rank_00_optim_states.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:bb8d76c18886c8ab16831ad77d620fbbcd417833f48486712dbde1107b5583f3
3
+ size 27700976
last-checkpoint/global_step1100/bf16_zero_pp_rank_1_mp_rank_00_optim_states.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:d4379c13db812765f56f289b4981653ea2ba9f9f6921812b5ea2a67950ba65f4
3
+ size 27700976
last-checkpoint/global_step1100/bf16_zero_pp_rank_2_mp_rank_00_optim_states.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:5aaae9b5f0c8b4dc6e09a322d7258de41c77452cb23081dee5c162cd3a238e77
3
+ size 27700976
last-checkpoint/global_step1100/bf16_zero_pp_rank_3_mp_rank_00_optim_states.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:815b17e7f8a0ce04a26097aeee64e3e65fc4862240bcc1d28ee7f8d25a081e7b
3
+ size 27700976
last-checkpoint/global_step1100/zero_pp_rank_0_mp_rank_00_model_states.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:368fbb4161e1067a9968bb59d3601f3d9eb6f3bab5f932e4b49fc97de2b0a311
3
+ size 411571
last-checkpoint/global_step1100/zero_pp_rank_1_mp_rank_00_model_states.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:bcbd3965e4a65d712a26fcd12c1ae310db2461b1a58537a8a10514bce45c4ae1
3
+ size 411507
last-checkpoint/global_step1100/zero_pp_rank_2_mp_rank_00_model_states.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:31cee3c13358fcc7e8a75b09cf8f44b82125253654955e740e76e4111806633e
3
+ size 411507
last-checkpoint/global_step1100/zero_pp_rank_3_mp_rank_00_model_states.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:98867ee24d8986d407c6dd8e4d8117dd091c6272eecf26b96dcc5f96203ae387
3
+ size 411507
last-checkpoint/latest CHANGED
@@ -1 +1 @@
1
- global_step1050
 
1
+ global_step1100
last-checkpoint/rng_state_0.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:67db742b8aa1744a8224bf2a1f79d89caff63b15f78a455d92bb666df82183ea
3
  size 15024
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:9d9fea52fb92cc51e76feeb2b139ce35723c0cb651da383e4f7eec2606ed6c2a
3
  size 15024
last-checkpoint/rng_state_1.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:c0f9acb7e6f8bbfb305c3601c71eb6189af24942fab5f99046412c03bb10c3eb
3
  size 15024
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:1b5820ebfcc2e1cfe1ad2619a05ea9a484ff21635e13e386bf14abd302f2c0f7
3
  size 15024
last-checkpoint/rng_state_2.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:713783338342f7486f6f186abd03c5963a0d22368f403efb2bf903ed083d2b64
3
  size 15024
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:eb8d96a68e732fca41980516622a50990bbd3ee989e72076a35c8608d9b4d136
3
  size 15024
last-checkpoint/rng_state_3.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:83aaec0b5b7d8a2da4577075066cf434ce6e9feb9327edbea6677a2e51d76466
3
  size 15024
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:ce6f1db6d09f4d89a9b2bd8dc8eeb99f1fada2ec04376e23b5a7a13004994005
3
  size 15024
last-checkpoint/scheduler.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:ccde9da0d23fd32800f01283cdb6c677def9ab43edb7232a9c4c4e9101a14cc0
3
  size 1064
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:1d0c7456eafeee3179566bb381c9153771d7e0f21738d2398944d053915d0651
3
  size 1064
last-checkpoint/trainer_state.json CHANGED
@@ -1,9 +1,9 @@
1
  {
2
- "best_metric": 0.8446129560470581,
3
- "best_model_checkpoint": "saves/CADICA_qwenvl_stenosis_classily_scale4_frozenVision/lora/sft/checkpoint-1050",
4
- "epoch": 0.5408189544166881,
5
  "eval_steps": 50,
6
- "global_step": 1050,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
@@ -1876,11 +1876,100 @@
1876
  "eval_steps_per_second": 0.935,
1877
  "num_input_tokens_seen": 12281072,
1878
  "step": 1050
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1879
  }
1880
  ],
1881
  "logging_steps": 5,
1882
  "max_steps": 3400,
1883
- "num_input_tokens_seen": 12281072,
1884
  "num_train_epochs": 2,
1885
  "save_steps": 50,
1886
  "stateful_callbacks": {
@@ -1895,7 +1984,7 @@
1895
  "attributes": {}
1896
  }
1897
  },
1898
- "total_flos": 689597635100672.0,
1899
  "train_batch_size": 1,
1900
  "trial_name": null,
1901
  "trial_params": null
 
1
  {
2
+ "best_metric": 0.7743102312088013,
3
+ "best_model_checkpoint": "saves/CADICA_qwenvl_stenosis_classily_scale4_frozenVision/lora/sft/checkpoint-1100",
4
+ "epoch": 0.56657223796034,
5
  "eval_steps": 50,
6
+ "global_step": 1100,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
 
1876
  "eval_steps_per_second": 0.935,
1877
  "num_input_tokens_seen": 12281072,
1878
  "step": 1050
1879
+ },
1880
+ {
1881
+ "epoch": 0.5433942827710533,
1882
+ "grad_norm": 0.5518554447099218,
1883
+ "learning_rate": 8.259242386389973e-05,
1884
+ "loss": 0.8602,
1885
+ "num_input_tokens_seen": 12339544,
1886
+ "step": 1055
1887
+ },
1888
+ {
1889
+ "epoch": 0.5459696111254185,
1890
+ "grad_norm": 0.7300911438509382,
1891
+ "learning_rate": 8.240764101059912e-05,
1892
+ "loss": 0.8615,
1893
+ "num_input_tokens_seen": 12397992,
1894
+ "step": 1060
1895
+ },
1896
+ {
1897
+ "epoch": 0.5485449394797837,
1898
+ "grad_norm": 0.7364983085887583,
1899
+ "learning_rate": 8.222209171180883e-05,
1900
+ "loss": 0.8732,
1901
+ "num_input_tokens_seen": 12456480,
1902
+ "step": 1065
1903
+ },
1904
+ {
1905
+ "epoch": 0.5511202678341488,
1906
+ "grad_norm": 0.4840408774949972,
1907
+ "learning_rate": 8.203578035579715e-05,
1908
+ "loss": 0.8691,
1909
+ "num_input_tokens_seen": 12515000,
1910
+ "step": 1070
1911
+ },
1912
+ {
1913
+ "epoch": 0.553695596188514,
1914
+ "grad_norm": 0.516278691776577,
1915
+ "learning_rate": 8.184871134885513e-05,
1916
+ "loss": 0.8544,
1917
+ "num_input_tokens_seen": 12573504,
1918
+ "step": 1075
1919
+ },
1920
+ {
1921
+ "epoch": 0.5562709245428792,
1922
+ "grad_norm": 0.8626943002609527,
1923
+ "learning_rate": 8.166088911519235e-05,
1924
+ "loss": 0.8501,
1925
+ "num_input_tokens_seen": 12632008,
1926
+ "step": 1080
1927
+ },
1928
+ {
1929
+ "epoch": 0.5588462528972444,
1930
+ "grad_norm": 0.7409465187036862,
1931
+ "learning_rate": 8.147231809683236e-05,
1932
+ "loss": 0.8646,
1933
+ "num_input_tokens_seen": 12690520,
1934
+ "step": 1085
1935
+ },
1936
+ {
1937
+ "epoch": 0.5614215812516096,
1938
+ "grad_norm": 0.5736639247313171,
1939
+ "learning_rate": 8.128300275350756e-05,
1940
+ "loss": 0.8327,
1941
+ "num_input_tokens_seen": 12749032,
1942
+ "step": 1090
1943
+ },
1944
+ {
1945
+ "epoch": 0.5639969096059748,
1946
+ "grad_norm": 0.7720514157947642,
1947
+ "learning_rate": 8.109294756255375e-05,
1948
+ "loss": 0.8218,
1949
+ "num_input_tokens_seen": 12807504,
1950
+ "step": 1095
1951
+ },
1952
+ {
1953
+ "epoch": 0.56657223796034,
1954
+ "grad_norm": 0.9129011996506371,
1955
+ "learning_rate": 8.090215701880419e-05,
1956
+ "loss": 0.8427,
1957
+ "num_input_tokens_seen": 12865992,
1958
+ "step": 1100
1959
+ },
1960
+ {
1961
+ "epoch": 0.56657223796034,
1962
+ "eval_loss": 0.7743102312088013,
1963
+ "eval_runtime": 16.1034,
1964
+ "eval_samples_per_second": 3.726,
1965
+ "eval_steps_per_second": 0.931,
1966
+ "num_input_tokens_seen": 12865992,
1967
+ "step": 1100
1968
  }
1969
  ],
1970
  "logging_steps": 5,
1971
  "max_steps": 3400,
1972
+ "num_input_tokens_seen": 12865992,
1973
  "num_train_epochs": 2,
1974
  "save_steps": 50,
1975
  "stateful_callbacks": {
 
1984
  "attributes": {}
1985
  }
1986
  },
1987
+ "total_flos": 722443216093184.0,
1988
  "train_batch_size": 1,
1989
  "trial_name": null,
1990
  "trial_params": null