ben81828 commited on
Commit
a1d6693
·
verified ·
1 Parent(s): defd805

Training in progress, step 3350, checkpoint

Browse files
last-checkpoint/adapter_model.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:edfec1beb3478ee41909b5ce24ea198317f07af35edd0e3dbe2802570cfa4c2f
3
  size 29034840
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:1b41c1be8b974e693981390b4f8ea3183b990f4c6cbeb85e09c310968df2c797
3
  size 29034840
last-checkpoint/global_step3350/bf16_zero_pp_rank_0_mp_rank_00_optim_states.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:8d20b600d06facb5d1a65de24264de7232da2e2cd6f969ab08631a24840a3405
3
+ size 43429616
last-checkpoint/global_step3350/bf16_zero_pp_rank_1_mp_rank_00_optim_states.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:e2b008e2b80435e1bd3bec146b2acf3573291d2d3f56e6e6243e3f4eb986ca65
3
+ size 43429616
last-checkpoint/global_step3350/bf16_zero_pp_rank_2_mp_rank_00_optim_states.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:41228dce70389c5be02f1d9fd4103e635a6d41918c34dc3d3e6a22a9d2fbdf3c
3
+ size 43429616
last-checkpoint/global_step3350/bf16_zero_pp_rank_3_mp_rank_00_optim_states.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:dd214d9e5759263f6575a06133cc7923a0629469860b9537d1a52db881455bc5
3
+ size 43429616
last-checkpoint/global_step3350/zero_pp_rank_0_mp_rank_00_model_states.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:073795d432f58b231e7787ae71bffd4b21abaa4a026ff18ff2e2ed8893c36861
3
+ size 637299
last-checkpoint/global_step3350/zero_pp_rank_1_mp_rank_00_model_states.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:643c6a357e2e8c148a6add3e575bb83cf59b2d675d40d1e16b4a4d241c96db90
3
+ size 637171
last-checkpoint/global_step3350/zero_pp_rank_2_mp_rank_00_model_states.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:c2f145fc244b1bf5a757743dfcb4b470273c31ce1d3708ccf6edfdc0edfd08ec
3
+ size 637171
last-checkpoint/global_step3350/zero_pp_rank_3_mp_rank_00_model_states.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:61ca17421513fa3bb768f1adbd5c20c63c1ca5515293cf864bfe65f82c03da1f
3
+ size 637171
last-checkpoint/latest CHANGED
@@ -1 +1 @@
1
- global_step3300
 
1
+ global_step3350
last-checkpoint/rng_state_0.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:9d8a584abdea9bed1d2dc22d8c9034ba07ae85ad6668d6cd55747af816dcecc1
3
  size 15024
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:26d08ced4d0f6490515a22c9e9401cc7f71de8b6e2c1525e9c8dac221d4b80ab
3
  size 15024
last-checkpoint/rng_state_1.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:7862a59bb6480df16b96e840172772c30d9d3037819d18063df33360e83b04a2
3
  size 15024
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:182d3e4bee7983edebdece2816547628c5ac3e14f5b53eac54c08ad9cbfbfac7
3
  size 15024
last-checkpoint/rng_state_2.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:3b509c445a6a6ed6d0215c0e232a66489dc20e39b5202d274561183fd43ef3e5
3
  size 15024
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:674c98433de6d8eccea8d9f711e54aa6a237220bb981e22133b7735c14dc835f
3
  size 15024
last-checkpoint/rng_state_3.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:1cf1a6a26872b9918c1086ace4fcda88a76d9b6eef501300fd67bf0cc8946d81
3
  size 15024
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:4e6ca2e2b0a88474561faf5e462ebee1de20672630f977a22533f633eb059d6d
3
  size 15024
last-checkpoint/scheduler.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:ecf2871d286bb06cddbfaac7d28300a567a2a36a68cd74d9cb437d09165d13f9
3
  size 1064
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:df3be552cf2524f9ece2b6a286f0ce246d18d14d42f9b8c771a555e051bcee33
3
  size 1064
last-checkpoint/trainer_state.json CHANGED
@@ -1,9 +1,9 @@
1
  {
2
  "best_metric": 0.4145541489124298,
3
  "best_model_checkpoint": "saves/CADICA_qwenvl_stenosis_detect_scale4/lora/sft/checkpoint-2350",
4
- "epoch": 0.8498583569405099,
5
  "eval_steps": 50,
6
- "global_step": 3300,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
@@ -5881,11 +5881,100 @@
5881
  "eval_steps_per_second": 0.798,
5882
  "num_input_tokens_seen": 34654480,
5883
  "step": 3300
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
5884
  }
5885
  ],
5886
  "logging_steps": 5,
5887
  "max_steps": 3400,
5888
- "num_input_tokens_seen": 34654480,
5889
  "num_train_epochs": 1,
5890
  "save_steps": 50,
5891
  "stateful_callbacks": {
@@ -5900,7 +5989,7 @@
5900
  "attributes": {}
5901
  }
5902
  },
5903
- "total_flos": 2286412268568576.0,
5904
  "train_batch_size": 1,
5905
  "trial_name": null,
5906
  "trial_params": null
 
1
  {
2
  "best_metric": 0.4145541489124298,
3
  "best_model_checkpoint": "saves/CADICA_qwenvl_stenosis_detect_scale4/lora/sft/checkpoint-2350",
4
+ "epoch": 0.8627349987123358,
5
  "eval_steps": 50,
6
+ "global_step": 3350,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
 
5881
  "eval_steps_per_second": 0.798,
5882
  "num_input_tokens_seen": 34654480,
5883
  "step": 3300
5884
+ },
5885
+ {
5886
+ "epoch": 0.8511460211176926,
5887
+ "grad_norm": 1.3186556509713656,
5888
+ "learning_rate": 2.1329118524827662e-07,
5889
+ "loss": 0.3195,
5890
+ "num_input_tokens_seen": 34706600,
5891
+ "step": 3305
5892
+ },
5893
+ {
5894
+ "epoch": 0.8524336852948751,
5895
+ "grad_norm": 5.6829477329970475,
5896
+ "learning_rate": 1.9144427761286222e-07,
5897
+ "loss": 0.2817,
5898
+ "num_input_tokens_seen": 34759528,
5899
+ "step": 3310
5900
+ },
5901
+ {
5902
+ "epoch": 0.8537213494720577,
5903
+ "grad_norm": 10.648820125115563,
5904
+ "learning_rate": 1.7077534966650766e-07,
5905
+ "loss": 0.3131,
5906
+ "num_input_tokens_seen": 34811832,
5907
+ "step": 3315
5908
+ },
5909
+ {
5910
+ "epoch": 0.8550090136492403,
5911
+ "grad_norm": 1.412998871252427,
5912
+ "learning_rate": 1.51284890232406e-07,
5913
+ "loss": 0.2926,
5914
+ "num_input_tokens_seen": 34864696,
5915
+ "step": 3320
5916
+ },
5917
+ {
5918
+ "epoch": 0.8562966778264228,
5919
+ "grad_norm": 2.3084307837646425,
5920
+ "learning_rate": 1.3297336026280027e-07,
5921
+ "loss": 0.2606,
5922
+ "num_input_tokens_seen": 34917584,
5923
+ "step": 3325
5924
+ },
5925
+ {
5926
+ "epoch": 0.8575843420036054,
5927
+ "grad_norm": 1.9582259889928806,
5928
+ "learning_rate": 1.158411928280645e-07,
5929
+ "loss": 0.3203,
5930
+ "num_input_tokens_seen": 34969720,
5931
+ "step": 3330
5932
+ },
5933
+ {
5934
+ "epoch": 0.8588720061807881,
5935
+ "grad_norm": 4.1936933517667825,
5936
+ "learning_rate": 9.988879310649513e-08,
5937
+ "loss": 0.3211,
5938
+ "num_input_tokens_seen": 35021296,
5939
+ "step": 3335
5940
+ },
5941
+ {
5942
+ "epoch": 0.8601596703579707,
5943
+ "grad_norm": 5.882661534247897,
5944
+ "learning_rate": 8.511653837470212e-08,
5945
+ "loss": 0.2923,
5946
+ "num_input_tokens_seen": 35073120,
5947
+ "step": 3340
5948
+ },
5949
+ {
5950
+ "epoch": 0.8614473345351532,
5951
+ "grad_norm": 6.708718647923996,
5952
+ "learning_rate": 7.152477799867719e-08,
5953
+ "loss": 0.289,
5954
+ "num_input_tokens_seen": 35126296,
5955
+ "step": 3345
5956
+ },
5957
+ {
5958
+ "epoch": 0.8627349987123358,
5959
+ "grad_norm": 2.5743366606144185,
5960
+ "learning_rate": 5.911383342556143e-08,
5961
+ "loss": 0.3253,
5962
+ "num_input_tokens_seen": 35179104,
5963
+ "step": 3350
5964
+ },
5965
+ {
5966
+ "epoch": 0.8627349987123358,
5967
+ "eval_loss": 0.4714648127555847,
5968
+ "eval_runtime": 37.5951,
5969
+ "eval_samples_per_second": 3.192,
5970
+ "eval_steps_per_second": 0.798,
5971
+ "num_input_tokens_seen": 35179104,
5972
+ "step": 3350
5973
  }
5974
  ],
5975
  "logging_steps": 5,
5976
  "max_steps": 3400,
5977
+ "num_input_tokens_seen": 35179104,
5978
  "num_train_epochs": 1,
5979
  "save_steps": 50,
5980
  "stateful_callbacks": {
 
5989
  "attributes": {}
5990
  }
5991
  },
5992
+ "total_flos": 2321016547704832.0,
5993
  "train_batch_size": 1,
5994
  "trial_name": null,
5995
  "trial_params": null