ben81828 commited on
Commit
cdf4c4e
·
verified ·
1 Parent(s): a7329ba

Training in progress, step 1150, checkpoint

Browse files
last-checkpoint/adapter_model.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:f16f6078244d0994120e51d527f92194dad6fa70876b55e40b787da46626e91a
3
  size 29034840
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:050dc1f1854d68a42fb308d9dd143ddc3d6fc0d86870ea1eac6bf90317df973a
3
  size 29034840
last-checkpoint/global_step1150/bf16_zero_pp_rank_0_mp_rank_00_optim_states.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:e55a1500f8b0709fe979be0a0d2ee80da98483414a2d9cb6c031c4973ee192ef
3
+ size 43429616
last-checkpoint/global_step1150/bf16_zero_pp_rank_1_mp_rank_00_optim_states.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:e0caf7ee7bd7ce7e7503ae05524992763bb3bb77347dbe068bcae95b94b3fcc2
3
+ size 43429616
last-checkpoint/global_step1150/bf16_zero_pp_rank_2_mp_rank_00_optim_states.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:58dcc807ed02b7212af3f3181a1663fa62c42f0666df545a96e79db27d9d71e7
3
+ size 43429616
last-checkpoint/global_step1150/bf16_zero_pp_rank_3_mp_rank_00_optim_states.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:2d4070d3a090f246cdf0d8dc238f5a3d89f86e7b8eeba2f18d6facbb9d474005
3
+ size 43429616
last-checkpoint/global_step1150/zero_pp_rank_0_mp_rank_00_model_states.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:aa758cf6fa59b0c76382480e4cb3a02f4e3bcc7c5814e40e5eff7203544b973f
3
+ size 637299
last-checkpoint/global_step1150/zero_pp_rank_1_mp_rank_00_model_states.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:a739833690f638f8f3ea52ef250f6914f0d6cc7cf09d4ba85216fa45aed5b895
3
+ size 637171
last-checkpoint/global_step1150/zero_pp_rank_2_mp_rank_00_model_states.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:64a9559916dc6f652d640305c67706ef9b17164de4b12a7f651920cf80c4d653
3
+ size 637171
last-checkpoint/global_step1150/zero_pp_rank_3_mp_rank_00_model_states.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:94a93cc4afb60025355716cc53e16bf4936ae88fb37858092e9cfc5c70e1b9a9
3
+ size 637171
last-checkpoint/latest CHANGED
@@ -1 +1 @@
1
- global_step1100
 
1
+ global_step1150
last-checkpoint/rng_state_0.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:9d9fea52fb92cc51e76feeb2b139ce35723c0cb651da383e4f7eec2606ed6c2a
3
  size 15024
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:274dc3860ee0c7f4d5348f60910a4b568498c04adfefb89f905b1c78a82c1312
3
  size 15024
last-checkpoint/rng_state_1.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:1b5820ebfcc2e1cfe1ad2619a05ea9a484ff21635e13e386bf14abd302f2c0f7
3
  size 15024
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:e9aa441491b9ca89e796944520fa1db332a67c0a1a920be83edd2d96d741716d
3
  size 15024
last-checkpoint/rng_state_2.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:eb8d96a68e732fca41980516622a50990bbd3ee989e72076a35c8608d9b4d136
3
  size 15024
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:c1ee3434533b24fb771504fa8cceb5c2ea25fe0de1641128feaceccc65afe6ed
3
  size 15024
last-checkpoint/rng_state_3.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:ce6f1db6d09f4d89a9b2bd8dc8eeb99f1fada2ec04376e23b5a7a13004994005
3
  size 15024
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:93b4a44be1335173d2e3120bd0d1e6346f3e832d8935752c70ce1e98f017fa87
3
  size 15024
last-checkpoint/scheduler.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:1d0c7456eafeee3179566bb381c9153771d7e0f21738d2398944d053915d0651
3
  size 1064
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:7d461c8d7517d4b88333bff7984fc3bfc149292198b04bbc18a49aee698ffb5c
3
  size 1064
last-checkpoint/trainer_state.json CHANGED
@@ -1,9 +1,9 @@
1
  {
2
- "best_metric": 0.0003656313638202846,
3
- "best_model_checkpoint": "saves/CADICA_qwenvl_direction_scale4/lora/sft/checkpoint-1100",
4
- "epoch": 0.56657223796034,
5
  "eval_steps": 50,
6
- "global_step": 1100,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
@@ -1965,11 +1965,100 @@
1965
  "eval_steps_per_second": 0.777,
1966
  "num_input_tokens_seen": 10982400,
1967
  "step": 1100
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1968
  }
1969
  ],
1970
  "logging_steps": 5,
1971
  "max_steps": 3400,
1972
- "num_input_tokens_seen": 10982400,
1973
  "num_train_epochs": 2,
1974
  "save_steps": 50,
1975
  "stateful_callbacks": {
@@ -1984,7 +2073,7 @@
1984
  "attributes": {}
1985
  }
1986
  },
1987
- "total_flos": 724624378953728.0,
1988
  "train_batch_size": 1,
1989
  "trial_name": null,
1990
  "trial_params": null
 
1
  {
2
+ "best_metric": 1.5566551155643538e-05,
3
+ "best_model_checkpoint": "saves/CADICA_qwenvl_direction_scale4/lora/sft/checkpoint-1150",
4
+ "epoch": 0.5923255215039918,
5
  "eval_steps": 50,
6
+ "global_step": 1150,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
 
1965
  "eval_steps_per_second": 0.777,
1966
  "num_input_tokens_seen": 10982400,
1967
  "step": 1100
1968
+ },
1969
+ {
1970
+ "epoch": 0.5691475663147051,
1971
+ "grad_norm": 0.020540774848059624,
1972
+ "learning_rate": 8.07106356344834e-05,
1973
+ "loss": 0.0005,
1974
+ "num_input_tokens_seen": 11032320,
1975
+ "step": 1105
1976
+ },
1977
+ {
1978
+ "epoch": 0.5717228946690703,
1979
+ "grad_norm": 0.049646985711380834,
1980
+ "learning_rate": 8.051838793910038e-05,
1981
+ "loss": 0.0008,
1982
+ "num_input_tokens_seen": 11082240,
1983
+ "step": 1110
1984
+ },
1985
+ {
1986
+ "epoch": 0.5742982230234355,
1987
+ "grad_norm": 0.02410302194894019,
1988
+ "learning_rate": 8.032541847934146e-05,
1989
+ "loss": 0.001,
1990
+ "num_input_tokens_seen": 11132160,
1991
+ "step": 1115
1992
+ },
1993
+ {
1994
+ "epoch": 0.5768735513778007,
1995
+ "grad_norm": 0.03404095257940449,
1996
+ "learning_rate": 8.013173181896283e-05,
1997
+ "loss": 0.0219,
1998
+ "num_input_tokens_seen": 11182080,
1999
+ "step": 1120
2000
+ },
2001
+ {
2002
+ "epoch": 0.5794488797321659,
2003
+ "grad_norm": 0.0005611895912019751,
2004
+ "learning_rate": 7.993733253868256e-05,
2005
+ "loss": 0.0,
2006
+ "num_input_tokens_seen": 11232000,
2007
+ "step": 1125
2008
+ },
2009
+ {
2010
+ "epoch": 0.582024208086531,
2011
+ "grad_norm": 0.03678453984585236,
2012
+ "learning_rate": 7.974222523607236e-05,
2013
+ "loss": 0.0,
2014
+ "num_input_tokens_seen": 11281920,
2015
+ "step": 1130
2016
+ },
2017
+ {
2018
+ "epoch": 0.5845995364408962,
2019
+ "grad_norm": 0.002820815423322981,
2020
+ "learning_rate": 7.954641452544865e-05,
2021
+ "loss": 0.0001,
2022
+ "num_input_tokens_seen": 11331840,
2023
+ "step": 1135
2024
+ },
2025
+ {
2026
+ "epoch": 0.5871748647952614,
2027
+ "grad_norm": 0.003003123169716893,
2028
+ "learning_rate": 7.934990503776363e-05,
2029
+ "loss": 0.0,
2030
+ "num_input_tokens_seen": 11381760,
2031
+ "step": 1140
2032
+ },
2033
+ {
2034
+ "epoch": 0.5897501931496266,
2035
+ "grad_norm": 0.020018776153065302,
2036
+ "learning_rate": 7.915270142049566e-05,
2037
+ "loss": 0.0001,
2038
+ "num_input_tokens_seen": 11431680,
2039
+ "step": 1145
2040
+ },
2041
+ {
2042
+ "epoch": 0.5923255215039918,
2043
+ "grad_norm": 0.0005407295497242103,
2044
+ "learning_rate": 7.89548083375394e-05,
2045
+ "loss": 0.0127,
2046
+ "num_input_tokens_seen": 11481600,
2047
+ "step": 1150
2048
+ },
2049
+ {
2050
+ "epoch": 0.5923255215039918,
2051
+ "eval_loss": 1.5566551155643538e-05,
2052
+ "eval_runtime": 19.3425,
2053
+ "eval_samples_per_second": 3.102,
2054
+ "eval_steps_per_second": 0.775,
2055
+ "num_input_tokens_seen": 11481600,
2056
+ "step": 1150
2057
  }
2058
  ],
2059
  "logging_steps": 5,
2060
  "max_steps": 3400,
2061
+ "num_input_tokens_seen": 11481600,
2062
  "num_train_epochs": 2,
2063
  "save_steps": 50,
2064
  "stateful_callbacks": {
 
2073
  "attributes": {}
2074
  }
2075
  },
2076
+ "total_flos": 757564358524928.0,
2077
  "train_batch_size": 1,
2078
  "trial_name": null,
2079
  "trial_params": null