ben81828 commited on
Commit
d7d68ba
·
verified ·
1 Parent(s): 060fc2a

Training in progress, step 2200, checkpoint

Browse files
last-checkpoint/adapter_model.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:d1c3e74f99d6e6f70e37f4de247287b17ec6f7280a9a5e1856338be701a8ce67
3
  size 29034840
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:dc6d99739705e6763281ee09273da0b205242e8f372f5506b947c515878799f9
3
  size 29034840
last-checkpoint/global_step2200/bf16_zero_pp_rank_0_mp_rank_00_optim_states.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:e4fe043b5cadc8517f11d478e72cd248b162e3a0d1d5114a0ae922964ce360aa
3
+ size 43429616
last-checkpoint/global_step2200/bf16_zero_pp_rank_1_mp_rank_00_optim_states.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:5d80d00d7800514d352dd987d59a09c74b7f731db7ab318cda5d99a6207425ae
3
+ size 43429616
last-checkpoint/global_step2200/bf16_zero_pp_rank_2_mp_rank_00_optim_states.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:a2c8478e0eacdd6121b67ce4d4c24aaf708a067b154a4317c0140f5adecb156b
3
+ size 43429616
last-checkpoint/global_step2200/bf16_zero_pp_rank_3_mp_rank_00_optim_states.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:c43a286485090fd96465f49f9ad6a8761cf9c17b7fa07db6e5696433dec64cf8
3
+ size 43429616
last-checkpoint/global_step2200/zero_pp_rank_0_mp_rank_00_model_states.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:03e85e9a8f15d009b7a2c0d66ad0f9ccf6f71ee8b946421a9e8140daf02469c2
3
+ size 637299
last-checkpoint/global_step2200/zero_pp_rank_1_mp_rank_00_model_states.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:1eda17816cb6a54c13e3632551cc61e833387382dba58c7f5746cfe31e437c7e
3
+ size 637171
last-checkpoint/global_step2200/zero_pp_rank_2_mp_rank_00_model_states.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:b39cdeaf4ab8e921200387e3671b7631731d65aa5eb68167423b11ff4bd888fb
3
+ size 637171
last-checkpoint/global_step2200/zero_pp_rank_3_mp_rank_00_model_states.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:2745094b0911b0ead6af362a14d1f5978c54c570c87d42bee5b6064a4cd37e41
3
+ size 637171
last-checkpoint/latest CHANGED
@@ -1 +1 @@
1
- global_step2150
 
1
+ global_step2200
last-checkpoint/rng_state_0.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:b105708e2c99c8661b46698b8ccc5799ac83c1f0fc6a30c2d41c9fbfb349d480
3
  size 15024
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:1c2f72d01585273766959f0cc9805fab753b53f20e581399855a293176ace988
3
  size 15024
last-checkpoint/rng_state_1.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:bc44cd2015d8c8fc2f109f07c797876873a52f478c57b0350b8a2cf5dcb17f25
3
  size 15024
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:3fd1ecda2bb159be37a2a23800e098324f5b0334e7189df47c343ca6cb7605a2
3
  size 15024
last-checkpoint/rng_state_2.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:9843dec201b5a542ebd69abfc596f99ad5a000cf81dab52c6a2c52a5b9224ea7
3
  size 15024
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:cf71c84ea2995fbc545b918d03f7f94c92293ca2e33343f177e6fd04531b7b19
3
  size 15024
last-checkpoint/rng_state_3.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:2296f0efda653dd4c7e861f5a867baa09d6d8bb50e57bc69af930268b40de9ef
3
  size 15024
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:72c53116f0f4c80841c24cd681d5fbd5a5992b259583a4cfb493f8f3e4544d82
3
  size 15024
last-checkpoint/scheduler.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:b804035c19d1fffbc2b920172de721087c798cb2e8bcad45f6808db2d808af75
3
  size 1064
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:01d30aec100967976eb875a41f6c605190fbb2f410e1523b990ce51daf9cd1a5
3
  size 1064
last-checkpoint/trainer_state.json CHANGED
@@ -1,9 +1,9 @@
1
  {
2
  "best_metric": 0.25809118151664734,
3
  "best_model_checkpoint": "saves/CADICA_qwenvl_direction_then_DetectAndClassify_scale6/lora/sft/checkpoint-2150",
4
- "epoch": 0.6351550960118169,
5
  "eval_steps": 50,
6
- "global_step": 2150,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
@@ -3834,11 +3834,100 @@
3834
  "eval_steps_per_second": 0.777,
3835
  "num_input_tokens_seen": 22332144,
3836
  "step": 2150
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3837
  }
3838
  ],
3839
  "logging_steps": 5,
3840
  "max_steps": 6770,
3841
- "num_input_tokens_seen": 22332144,
3842
  "num_train_epochs": 2,
3843
  "save_steps": 50,
3844
  "stateful_callbacks": {
@@ -3853,7 +3942,7 @@
3853
  "attributes": {}
3854
  }
3855
  },
3856
- "total_flos": 1473404374351872.0,
3857
  "train_batch_size": 1,
3858
  "trial_name": null,
3859
  "trial_params": null
 
1
  {
2
  "best_metric": 0.25809118151664734,
3
  "best_model_checkpoint": "saves/CADICA_qwenvl_direction_then_DetectAndClassify_scale6/lora/sft/checkpoint-2150",
4
+ "epoch": 0.6499261447562777,
5
  "eval_steps": 50,
6
+ "global_step": 2200,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
 
3834
  "eval_steps_per_second": 0.777,
3835
  "num_input_tokens_seen": 22332144,
3836
  "step": 2150
3837
+ },
3838
+ {
3839
+ "epoch": 0.6366322008862629,
3840
+ "grad_norm": 48.075734297605706,
3841
+ "learning_rate": 8.158198600908405e-05,
3842
+ "loss": 0.2606,
3843
+ "num_input_tokens_seen": 22383912,
3844
+ "step": 2155
3845
+ },
3846
+ {
3847
+ "epoch": 0.638109305760709,
3848
+ "grad_norm": 12.349268472522956,
3849
+ "learning_rate": 8.148721153369411e-05,
3850
+ "loss": 0.2672,
3851
+ "num_input_tokens_seen": 22435504,
3852
+ "step": 2160
3853
+ },
3854
+ {
3855
+ "epoch": 0.6395864106351551,
3856
+ "grad_norm": 17.499211985691577,
3857
+ "learning_rate": 8.139224920591598e-05,
3858
+ "loss": 0.2771,
3859
+ "num_input_tokens_seen": 22487696,
3860
+ "step": 2165
3861
+ },
3862
+ {
3863
+ "epoch": 0.6410635155096012,
3864
+ "grad_norm": 7.176310226710563,
3865
+ "learning_rate": 8.129709959229388e-05,
3866
+ "loss": 0.3018,
3867
+ "num_input_tokens_seen": 22539664,
3868
+ "step": 2170
3869
+ },
3870
+ {
3871
+ "epoch": 0.6425406203840472,
3872
+ "grad_norm": 22.989487671947256,
3873
+ "learning_rate": 8.120176326048949e-05,
3874
+ "loss": 0.312,
3875
+ "num_input_tokens_seen": 22592240,
3876
+ "step": 2175
3877
+ },
3878
+ {
3879
+ "epoch": 0.6440177252584933,
3880
+ "grad_norm": 2.0595093158376825,
3881
+ "learning_rate": 8.110624077927842e-05,
3882
+ "loss": 0.2413,
3883
+ "num_input_tokens_seen": 22643648,
3884
+ "step": 2180
3885
+ },
3886
+ {
3887
+ "epoch": 0.6454948301329394,
3888
+ "grad_norm": 1.0826681349485223,
3889
+ "learning_rate": 8.101053271854682e-05,
3890
+ "loss": 0.2585,
3891
+ "num_input_tokens_seen": 22695208,
3892
+ "step": 2185
3893
+ },
3894
+ {
3895
+ "epoch": 0.6469719350073855,
3896
+ "grad_norm": 1.351248688875387,
3897
+ "learning_rate": 8.091463964928801e-05,
3898
+ "loss": 0.2621,
3899
+ "num_input_tokens_seen": 22746896,
3900
+ "step": 2190
3901
+ },
3902
+ {
3903
+ "epoch": 0.6484490398818316,
3904
+ "grad_norm": 7.190623982268162,
3905
+ "learning_rate": 8.081856214359908e-05,
3906
+ "loss": 0.324,
3907
+ "num_input_tokens_seen": 22797936,
3908
+ "step": 2195
3909
+ },
3910
+ {
3911
+ "epoch": 0.6499261447562777,
3912
+ "grad_norm": 0.9587990520200799,
3913
+ "learning_rate": 8.072230077467748e-05,
3914
+ "loss": 0.2662,
3915
+ "num_input_tokens_seen": 22849552,
3916
+ "step": 2200
3917
+ },
3918
+ {
3919
+ "epoch": 0.6499261447562777,
3920
+ "eval_loss": 0.48401138186454773,
3921
+ "eval_runtime": 19.182,
3922
+ "eval_samples_per_second": 3.128,
3923
+ "eval_steps_per_second": 0.782,
3924
+ "num_input_tokens_seen": 22849552,
3925
+ "step": 2200
3926
  }
3927
  ],
3928
  "logging_steps": 5,
3929
  "max_steps": 6770,
3930
+ "num_input_tokens_seen": 22849552,
3931
  "num_train_epochs": 2,
3932
  "save_steps": 50,
3933
  "stateful_callbacks": {
 
3942
  "attributes": {}
3943
  }
3944
  },
3945
+ "total_flos": 1507548125986816.0,
3946
  "train_batch_size": 1,
3947
  "trial_name": null,
3948
  "trial_params": null