ben81828 commited on
Commit
a6c390b
·
verified ·
1 Parent(s): c2bdee9

Training in progress, step 3350, checkpoint

Browse files
last-checkpoint/adapter_model.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:45971737e3e2100e724159295c32390e6a57ebf38af6100e1947303c16949ead
3
  size 18516456
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:d8815c16681432a11b5ac188380ad3a07078a835a470d216013977c67742fdcc
3
  size 18516456
last-checkpoint/global_step3349/bf16_zero_pp_rank_0_mp_rank_00_optim_states.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:9714545356a6630ff7f755f70b5be0c5e7edf5d539477ee88ea052d1f5598ebd
3
+ size 27700976
last-checkpoint/global_step3349/bf16_zero_pp_rank_1_mp_rank_00_optim_states.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:3d1579920fea0ef33bc5b33ce6cf141616f20b0269e8a7bdd9f0a90f26f6a2c9
3
+ size 27700976
last-checkpoint/global_step3349/bf16_zero_pp_rank_2_mp_rank_00_optim_states.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:3fd6db98ef211e2c56dd2d666af47364ceb1233cd2d1ee905f273134b0d08006
3
+ size 27700976
last-checkpoint/global_step3349/bf16_zero_pp_rank_3_mp_rank_00_optim_states.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:04701505f90deb228f105d51a68b8700254f1af158c358157a7f5164a6337094
3
+ size 27700976
last-checkpoint/global_step3349/zero_pp_rank_0_mp_rank_00_model_states.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:fa9df0b0da29b86c93aae4c46944940ad56253950056266aac238ca870ad70eb
3
+ size 411571
last-checkpoint/global_step3349/zero_pp_rank_1_mp_rank_00_model_states.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:886be10efdee881980fd6ce46c1367af0920a4635a04619ca3555bda180eea9a
3
+ size 411507
last-checkpoint/global_step3349/zero_pp_rank_2_mp_rank_00_model_states.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:2ee14de80690392657e5287da53bf812aee796c74cd5fc6b8b0e06086a79e86e
3
+ size 411507
last-checkpoint/global_step3349/zero_pp_rank_3_mp_rank_00_model_states.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:24dbd4e75dd77a6428d2b7668112b6f9f43420d98041e84c360bd2332d8b128c
3
+ size 411507
last-checkpoint/latest CHANGED
@@ -1 +1 @@
1
- global_step3299
 
1
+ global_step3349
last-checkpoint/rng_state_0.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:26d08ced4d0f6490515a22c9e9401cc7f71de8b6e2c1525e9c8dac221d4b80ab
3
  size 15024
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:abe9163f042a56ab41ea5c2436dff084d8a4a6358e7f4cb1f18e04cb69810300
3
  size 15024
last-checkpoint/rng_state_1.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:182d3e4bee7983edebdece2816547628c5ac3e14f5b53eac54c08ad9cbfbfac7
3
  size 15024
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:3c621ead8d06a0f1d00c5217cc2cfdc90c8c62fa1cb0da0986461ec51fd1766b
3
  size 15024
last-checkpoint/rng_state_2.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:674c98433de6d8eccea8d9f711e54aa6a237220bb981e22133b7735c14dc835f
3
  size 15024
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:eff8dbefa4ff395a5376144d756cbe824baaab98a892f200d30b7916c24d27cf
3
  size 15024
last-checkpoint/rng_state_3.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:4e6ca2e2b0a88474561faf5e462ebee1de20672630f977a22533f633eb059d6d
3
  size 15024
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:dd67eb847a256b4f0de5857c5e8a43697485d1a0f6032004d0bc19149d77879c
3
  size 15024
last-checkpoint/scheduler.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:ecf2871d286bb06cddbfaac7d28300a567a2a36a68cd74d9cb437d09165d13f9
3
  size 1064
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:df3be552cf2524f9ece2b6a286f0ce246d18d14d42f9b8c771a555e051bcee33
3
  size 1064
last-checkpoint/trainer_state.json CHANGED
@@ -1,9 +1,9 @@
1
  {
2
  "best_metric": 0.6319106221199036,
3
  "best_model_checkpoint": "saves/CADICA_qwenvl_stenosis_classily_scale4_frozenVision/lora/sft/checkpoint-1600",
4
- "epoch": 1.6994591810455835,
5
  "eval_steps": 50,
6
- "global_step": 3300,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
@@ -5881,11 +5881,100 @@
5881
  "eval_steps_per_second": 0.929,
5882
  "num_input_tokens_seen": 38591128,
5883
  "step": 3300
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
5884
  }
5885
  ],
5886
  "logging_steps": 5,
5887
  "max_steps": 3400,
5888
- "num_input_tokens_seen": 38591128,
5889
  "num_train_epochs": 2,
5890
  "save_steps": 50,
5891
  "stateful_callbacks": {
@@ -5900,7 +5989,7 @@
5900
  "attributes": {}
5901
  }
5902
  },
5903
- "total_flos": 2167080547254272.0,
5904
  "train_batch_size": 1,
5905
  "trial_name": null,
5906
  "trial_params": null
 
1
  {
2
  "best_metric": 0.6319106221199036,
3
  "best_model_checkpoint": "saves/CADICA_qwenvl_stenosis_classily_scale4_frozenVision/lora/sft/checkpoint-1600",
4
+ "epoch": 1.725212464589235,
5
  "eval_steps": 50,
6
+ "global_step": 3350,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
 
5881
  "eval_steps_per_second": 0.929,
5882
  "num_input_tokens_seen": 38591128,
5883
  "step": 3300
5884
+ },
5885
+ {
5886
+ "epoch": 1.7020345093999485,
5887
+ "grad_norm": 4.935833215525081,
5888
+ "learning_rate": 2.1329118524827662e-07,
5889
+ "loss": 0.2337,
5890
+ "num_input_tokens_seen": 38649640,
5891
+ "step": 3305
5892
+ },
5893
+ {
5894
+ "epoch": 1.7046098377543135,
5895
+ "grad_norm": 5.746920185244728,
5896
+ "learning_rate": 1.9144427761286222e-07,
5897
+ "loss": 0.215,
5898
+ "num_input_tokens_seen": 38708112,
5899
+ "step": 3310
5900
+ },
5901
+ {
5902
+ "epoch": 1.7071851661086788,
5903
+ "grad_norm": 6.501004359690972,
5904
+ "learning_rate": 1.7077534966650766e-07,
5905
+ "loss": 0.2871,
5906
+ "num_input_tokens_seen": 38766624,
5907
+ "step": 3315
5908
+ },
5909
+ {
5910
+ "epoch": 1.709760494463044,
5911
+ "grad_norm": 6.996403813160393,
5912
+ "learning_rate": 1.51284890232406e-07,
5913
+ "loss": 0.3478,
5914
+ "num_input_tokens_seen": 38825104,
5915
+ "step": 3320
5916
+ },
5917
+ {
5918
+ "epoch": 1.7123358228174093,
5919
+ "grad_norm": 5.178545190033401,
5920
+ "learning_rate": 1.3297336026280027e-07,
5921
+ "loss": 0.2055,
5922
+ "num_input_tokens_seen": 38883560,
5923
+ "step": 3325
5924
+ },
5925
+ {
5926
+ "epoch": 1.7149111511717745,
5927
+ "grad_norm": 6.686144266429449,
5928
+ "learning_rate": 1.158411928280645e-07,
5929
+ "loss": 0.2992,
5930
+ "num_input_tokens_seen": 38942040,
5931
+ "step": 3330
5932
+ },
5933
+ {
5934
+ "epoch": 1.7174864795261395,
5935
+ "grad_norm": 4.337439288142164,
5936
+ "learning_rate": 9.988879310649513e-08,
5937
+ "loss": 0.2302,
5938
+ "num_input_tokens_seen": 39000488,
5939
+ "step": 3335
5940
+ },
5941
+ {
5942
+ "epoch": 1.7200618078805048,
5943
+ "grad_norm": 6.5240260149211755,
5944
+ "learning_rate": 8.511653837470212e-08,
5945
+ "loss": 0.265,
5946
+ "num_input_tokens_seen": 39058960,
5947
+ "step": 3340
5948
+ },
5949
+ {
5950
+ "epoch": 1.7226371362348698,
5951
+ "grad_norm": 7.592689596688837,
5952
+ "learning_rate": 7.152477799867719e-08,
5953
+ "loss": 0.3147,
5954
+ "num_input_tokens_seen": 39117416,
5955
+ "step": 3345
5956
+ },
5957
+ {
5958
+ "epoch": 1.725212464589235,
5959
+ "grad_norm": 6.429413076205037,
5960
+ "learning_rate": 5.911383342556143e-08,
5961
+ "loss": 0.2674,
5962
+ "num_input_tokens_seen": 39175888,
5963
+ "step": 3350
5964
+ },
5965
+ {
5966
+ "epoch": 1.725212464589235,
5967
+ "eval_loss": 0.8666485548019409,
5968
+ "eval_runtime": 16.1238,
5969
+ "eval_samples_per_second": 3.721,
5970
+ "eval_steps_per_second": 0.93,
5971
+ "num_input_tokens_seen": 39175888,
5972
+ "step": 3350
5973
  }
5974
  ],
5975
  "logging_steps": 5,
5976
  "max_steps": 3400,
5977
+ "num_input_tokens_seen": 39175888,
5978
  "num_train_epochs": 2,
5979
  "save_steps": 50,
5980
  "stateful_callbacks": {
 
5989
  "attributes": {}
5990
  }
5991
  },
5992
+ "total_flos": 2199919870083072.0,
5993
  "train_batch_size": 1,
5994
  "trial_name": null,
5995
  "trial_params": null