ben81828 commited on
Commit
afbeda0
·
verified ·
1 Parent(s): 6910ee6

Training in progress, step 2700, checkpoint

Browse files
last-checkpoint/adapter_model.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:827ad766e3a13984fdb182a9e1ff3663479a4a48e1ebea8bd0fa17625232d440
3
  size 18516456
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:87c5131642e0134183eead6ee8a652dcec18d08a73880b46e923d3ba034a1f2c
3
  size 18516456
last-checkpoint/global_step2699/bf16_zero_pp_rank_0_mp_rank_00_optim_states.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:49c1953dd55a71d816604c9d88f5dfaf3de7cd1ff785816c4692c773827c2ad9
3
+ size 27700976
last-checkpoint/global_step2699/bf16_zero_pp_rank_1_mp_rank_00_optim_states.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:1d0a9c2679718849ab4b5d4b2f1d614cc1567d24c74158b6955041b79af75809
3
+ size 27700976
last-checkpoint/global_step2699/bf16_zero_pp_rank_2_mp_rank_00_optim_states.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:523ee62505a7c73d100c34d0a02ed8a16d6b35bf45179043c6a4cdeee2af7e77
3
+ size 27700976
last-checkpoint/global_step2699/bf16_zero_pp_rank_3_mp_rank_00_optim_states.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:a9e9918d979a0f9cb8ee62ff3c09dca43ec4f7d49cdd8ad9a2db3d4dca84f22d
3
+ size 27700976
last-checkpoint/global_step2699/zero_pp_rank_0_mp_rank_00_model_states.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:7bc5e000dda8c9fd913e37f23f94be6c564e7be0729e32e4803afcd7fa6884f4
3
+ size 411571
last-checkpoint/global_step2699/zero_pp_rank_1_mp_rank_00_model_states.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:b602913ea9e3a7f86da6723379d18b2b0e40385954be7b0ae584fc9967b2fb4a
3
+ size 411507
last-checkpoint/global_step2699/zero_pp_rank_2_mp_rank_00_model_states.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:0485fe4c0ce08f8978f2d62d20e978fb3e99467ef8df597291bd1c294084aaae
3
+ size 411507
last-checkpoint/global_step2699/zero_pp_rank_3_mp_rank_00_model_states.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:8408e6b1fca8b179b6e186db6945579ebf2fb33f98daf209b41784641dad2483
3
+ size 411507
last-checkpoint/latest CHANGED
@@ -1 +1 @@
1
- global_step2649
 
1
+ global_step2699
last-checkpoint/rng_state_0.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:9e599331812a34463d102d64a4034a0b702a893f362f752003aa577fe71dcc1d
3
  size 15024
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:5d8d3c7739f9787ea797b86ff1b3a51f9e68197835ba3178915a8a77558f67fc
3
  size 15024
last-checkpoint/rng_state_1.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:5ed431e5e71393a0174ad2fd492755f8c1142596f1af3bfe7827c1f8f815dd80
3
  size 15024
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:a22a57799bc43e59db67d9a787ed73040020c5f35990602033f4dab1318787d7
3
  size 15024
last-checkpoint/rng_state_2.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:a2e70789f26a9f56b6b779e87cb1a405615af81562a256e5afe579f40972e827
3
  size 15024
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:29a624b936b77a04d6bfb6940acdd65a710bf39452e419e7ddb5c40fb2261072
3
  size 15024
last-checkpoint/rng_state_3.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:7c8c18bc74d5211e761da269c814d7da0687633993838ec22e81ac939a14e91b
3
  size 15024
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:3a79306817d4440cd621149537e8cf216b60f847fc6f9531a6147426aa02bb07
3
  size 15024
last-checkpoint/scheduler.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:bfcf85c736e61a53c653b2d1f3342ce104fa9dc3f2c57b7753ecb2c25635e267
3
  size 1064
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:5e5dcca4048a125fff8fd284657b0498882f3efcb97d36e331842fc3d6d7b6e6
3
  size 1064
last-checkpoint/trainer_state.json CHANGED
@@ -1,9 +1,9 @@
1
  {
2
  "best_metric": 0.6319106221199036,
3
  "best_model_checkpoint": "saves/CADICA_qwenvl_stenosis_classily_scale4_frozenVision/lora/sft/checkpoint-1600",
4
- "epoch": 1.3646664949781098,
5
  "eval_steps": 50,
6
- "global_step": 2650,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
@@ -4724,11 +4724,100 @@
4724
  "eval_steps_per_second": 0.933,
4725
  "num_input_tokens_seen": 30988344,
4726
  "step": 2650
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
4727
  }
4728
  ],
4729
  "logging_steps": 5,
4730
  "max_steps": 3400,
4731
- "num_input_tokens_seen": 30988344,
4732
  "num_train_epochs": 2,
4733
  "save_steps": 50,
4734
  "stateful_callbacks": {
@@ -4743,7 +4832,7 @@
4743
  "attributes": {}
4744
  }
4745
  },
4746
- "total_flos": 1740143807954944.0,
4747
  "train_batch_size": 1,
4748
  "trial_name": null,
4749
  "trial_params": null
 
1
  {
2
  "best_metric": 0.6319106221199036,
3
  "best_model_checkpoint": "saves/CADICA_qwenvl_stenosis_classily_scale4_frozenVision/lora/sft/checkpoint-1600",
4
+ "epoch": 1.3904197785217616,
5
  "eval_steps": 50,
6
+ "global_step": 2700,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
 
4724
  "eval_steps_per_second": 0.933,
4725
  "num_input_tokens_seen": 30988344,
4726
  "step": 2650
4727
+ },
4728
+ {
4729
+ "epoch": 1.367241823332475,
4730
+ "grad_norm": 5.445887797084714,
4731
+ "learning_rate": 1.2562061892553473e-05,
4732
+ "loss": 0.3207,
4733
+ "num_input_tokens_seen": 31046848,
4734
+ "step": 2655
4735
+ },
4736
+ {
4737
+ "epoch": 1.36981715168684,
4738
+ "grad_norm": 8.28343197617098,
4739
+ "learning_rate": 1.2401330199290367e-05,
4740
+ "loss": 0.3001,
4741
+ "num_input_tokens_seen": 31105352,
4742
+ "step": 2660
4743
+ },
4744
+ {
4745
+ "epoch": 1.3723924800412053,
4746
+ "grad_norm": 6.0349779847885054,
4747
+ "learning_rate": 1.224148772011346e-05,
4748
+ "loss": 0.2858,
4749
+ "num_input_tokens_seen": 31163848,
4750
+ "step": 2665
4751
+ },
4752
+ {
4753
+ "epoch": 1.3749678083955703,
4754
+ "grad_norm": 6.430225669948217,
4755
+ "learning_rate": 1.2082538235320929e-05,
4756
+ "loss": 0.2338,
4757
+ "num_input_tokens_seen": 31222360,
4758
+ "step": 2670
4759
+ },
4760
+ {
4761
+ "epoch": 1.3775431367499356,
4762
+ "grad_norm": 7.550675916086161,
4763
+ "learning_rate": 1.1924485504091565e-05,
4764
+ "loss": 0.2212,
4765
+ "num_input_tokens_seen": 31280840,
4766
+ "step": 2675
4767
+ },
4768
+ {
4769
+ "epoch": 1.3801184651043008,
4770
+ "grad_norm": 9.927835245980713,
4771
+ "learning_rate": 1.1767333264395736e-05,
4772
+ "loss": 0.3131,
4773
+ "num_input_tokens_seen": 31339264,
4774
+ "step": 2680
4775
+ },
4776
+ {
4777
+ "epoch": 1.382693793458666,
4778
+ "grad_norm": 6.940248775417007,
4779
+ "learning_rate": 1.1611085232907132e-05,
4780
+ "loss": 0.3616,
4781
+ "num_input_tokens_seen": 31397744,
4782
+ "step": 2685
4783
+ },
4784
+ {
4785
+ "epoch": 1.385269121813031,
4786
+ "grad_norm": 13.50108715364713,
4787
+ "learning_rate": 1.14557451049147e-05,
4788
+ "loss": 0.3153,
4789
+ "num_input_tokens_seen": 31456240,
4790
+ "step": 2690
4791
+ },
4792
+ {
4793
+ "epoch": 1.3878444501673963,
4794
+ "grad_norm": 5.379761157260886,
4795
+ "learning_rate": 1.1301316554235397e-05,
4796
+ "loss": 0.3044,
4797
+ "num_input_tokens_seen": 31514744,
4798
+ "step": 2695
4799
+ },
4800
+ {
4801
+ "epoch": 1.3904197785217616,
4802
+ "grad_norm": 6.480605347127299,
4803
+ "learning_rate": 1.114780323312724e-05,
4804
+ "loss": 0.3163,
4805
+ "num_input_tokens_seen": 31573240,
4806
+ "step": 2700
4807
+ },
4808
+ {
4809
+ "epoch": 1.3904197785217616,
4810
+ "eval_loss": 0.7473158240318298,
4811
+ "eval_runtime": 16.166,
4812
+ "eval_samples_per_second": 3.711,
4813
+ "eval_steps_per_second": 0.928,
4814
+ "num_input_tokens_seen": 31573240,
4815
+ "step": 2700
4816
  }
4817
  ],
4818
  "logging_steps": 5,
4819
  "max_steps": 3400,
4820
+ "num_input_tokens_seen": 31573240,
4821
  "num_train_epochs": 2,
4822
  "save_steps": 50,
4823
  "stateful_callbacks": {
 
4832
  "attributes": {}
4833
  }
4834
  },
4835
+ "total_flos": 1772988198813696.0,
4836
  "train_batch_size": 1,
4837
  "trial_name": null,
4838
  "trial_params": null