ben81828 commited on
Commit
bcd1446
·
verified ·
1 Parent(s): 0fb97e2

Training in progress, step 450, checkpoint

Browse files
last-checkpoint/adapter_model.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:8499cdded170833460860914d9976764a5d9563bba2689c4e8900b95bab5712a
3
  size 29034840
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:d6563ebf68bb7cb8cee897a19128b74ef4208c6802d855d4f6562f7c26273f70
3
  size 29034840
last-checkpoint/global_step450/bf16_zero_pp_rank_0_mp_rank_00_optim_states.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:5b55f4575188d91c76c82c6604d74d9072cb87f5a25c83a1422d18e9a3981296
3
+ size 43429616
last-checkpoint/global_step450/bf16_zero_pp_rank_1_mp_rank_00_optim_states.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:dfb4cf2ed3c3207ed357bfcf90b6cf77a8d8516195639f4f2f43f533a4d5baaf
3
+ size 43429616
last-checkpoint/global_step450/bf16_zero_pp_rank_2_mp_rank_00_optim_states.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:3b6959253acb0958e24e4d551f262ea041f55f82cafaf798926ce915ed93b31e
3
+ size 43429616
last-checkpoint/global_step450/bf16_zero_pp_rank_3_mp_rank_00_optim_states.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:5e2f83a95f144d26704aec59a04dedb11b8b66af0f86fe0761af35f1ea688f73
3
+ size 43429616
last-checkpoint/global_step450/zero_pp_rank_0_mp_rank_00_model_states.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:b2c43b6581640ec9140a8e671944ff69d96a0284c4069ddbe5a73547523d050e
3
+ size 637299
last-checkpoint/global_step450/zero_pp_rank_1_mp_rank_00_model_states.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:0e7b078653231149a903bc00280c61fc902a6ebe6078af02817b5efd13e9ef2b
3
+ size 637171
last-checkpoint/global_step450/zero_pp_rank_2_mp_rank_00_model_states.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:3014bb4485c273ec85ee6adab304455dcd75b99ca668f9c10e04f1ebc443fec8
3
+ size 637171
last-checkpoint/global_step450/zero_pp_rank_3_mp_rank_00_model_states.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:f23e7a4eb8e027c3a1e9ed72ab3097610a2c4faa00790d4b003da7cdb06018e2
3
+ size 637171
last-checkpoint/latest CHANGED
@@ -1 +1 @@
1
- global_step400
 
1
+ global_step450
last-checkpoint/rng_state_0.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:543ef05f530d40ee20b8d626b07a69b86597aca643e48897571062f973efe84f
3
  size 15024
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:7dbc6521b0b64cb12d818506108fcf257a4089ca8a9b1e453776ed3e032e7176
3
  size 15024
last-checkpoint/rng_state_1.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:7a23f732e43838ce0398d2636885ac16badbb9bcbc04d1406069ba3027bc5ae0
3
  size 15024
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:2b13e3da1b0679cab1bab94f893e385a9a224d3335b5a6f62602f33c2be88d03
3
  size 15024
last-checkpoint/rng_state_2.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:e10cce960e7068b051c05e35ed6160656be9091c63f13796ac2ed7e9c84e5a72
3
  size 15024
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:6a24f0e0f117b5a8236e0d12594c0c358f41ef00068d4460002e95ad1cc3cb1c
3
  size 15024
last-checkpoint/rng_state_3.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:b6f6049e212b1df5cefc5d834afcd8cc052c73f1457449e9fe8a38d514f54078
3
  size 15024
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:e46e4eab6c4a25d84ad36ddf1357401788adeeb6388c03cefa35a63b52ee7610
3
  size 15024
last-checkpoint/scheduler.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:32762cec872a9c6d771fb0f76b3f72991fda55aee1494a130c6c6b449c48a001
3
  size 1064
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:192829f095e6c906a86011515c9b1d243ad2d8f891793c9e500d2d433b84fc78
3
  size 1064
last-checkpoint/trainer_state.json CHANGED
@@ -1,9 +1,9 @@
1
  {
2
- "best_metric": 0.024433813989162445,
3
- "best_model_checkpoint": "saves/CADICA_qwenvl_direction_scale4/lora/sft/checkpoint-400",
4
- "epoch": 0.20602626834921453,
5
  "eval_steps": 50,
6
- "global_step": 400,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
@@ -719,11 +719,100 @@
719
  "eval_steps_per_second": 0.797,
720
  "num_input_tokens_seen": 3993600,
721
  "step": 400
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
722
  }
723
  ],
724
  "logging_steps": 5,
725
  "max_steps": 3400,
726
- "num_input_tokens_seen": 3993600,
727
  "num_train_epochs": 2,
728
  "save_steps": 50,
729
  "stateful_callbacks": {
@@ -738,7 +827,7 @@
738
  "attributes": {}
739
  }
740
  },
741
- "total_flos": 263464664956928.0,
742
  "train_batch_size": 1,
743
  "trial_name": null,
744
  "trial_params": null
 
1
  {
2
+ "best_metric": 0.006690301466733217,
3
+ "best_model_checkpoint": "saves/CADICA_qwenvl_direction_scale4/lora/sft/checkpoint-450",
4
+ "epoch": 0.23177955189286634,
5
  "eval_steps": 50,
6
+ "global_step": 450,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
 
719
  "eval_steps_per_second": 0.797,
720
  "num_input_tokens_seen": 3993600,
721
  "step": 400
722
+ },
723
+ {
724
+ "epoch": 0.2086015967035797,
725
+ "grad_norm": 0.06416132017781931,
726
+ "learning_rate": 9.86995943049533e-05,
727
+ "loss": 0.0074,
728
+ "num_input_tokens_seen": 4043520,
729
+ "step": 405
730
+ },
731
+ {
732
+ "epoch": 0.2111769250579449,
733
+ "grad_norm": 0.04809215529224866,
734
+ "learning_rate": 9.864392335627117e-05,
735
+ "loss": 0.0375,
736
+ "num_input_tokens_seen": 4093440,
737
+ "step": 410
738
+ },
739
+ {
740
+ "epoch": 0.21375225341231008,
741
+ "grad_norm": 0.40943321947836153,
742
+ "learning_rate": 9.858710197162721e-05,
743
+ "loss": 0.0217,
744
+ "num_input_tokens_seen": 4143360,
745
+ "step": 415
746
+ },
747
+ {
748
+ "epoch": 0.21632758176667524,
749
+ "grad_norm": 0.08069011480339518,
750
+ "learning_rate": 9.852913149485556e-05,
751
+ "loss": 0.016,
752
+ "num_input_tokens_seen": 4193280,
753
+ "step": 420
754
+ },
755
+ {
756
+ "epoch": 0.21890291012104043,
757
+ "grad_norm": 1.806585526467194,
758
+ "learning_rate": 9.847001329696653e-05,
759
+ "loss": 0.0832,
760
+ "num_input_tokens_seen": 4243200,
761
+ "step": 425
762
+ },
763
+ {
764
+ "epoch": 0.22147823847540563,
765
+ "grad_norm": 3.122914329597603,
766
+ "learning_rate": 9.840974877611422e-05,
767
+ "loss": 0.0444,
768
+ "num_input_tokens_seen": 4293120,
769
+ "step": 430
770
+ },
771
+ {
772
+ "epoch": 0.2240535668297708,
773
+ "grad_norm": 1.0101510644418257,
774
+ "learning_rate": 9.834833935756344e-05,
775
+ "loss": 0.0465,
776
+ "num_input_tokens_seen": 4343040,
777
+ "step": 435
778
+ },
779
+ {
780
+ "epoch": 0.22662889518413598,
781
+ "grad_norm": 1.7770164472545809,
782
+ "learning_rate": 9.828578649365601e-05,
783
+ "loss": 0.0428,
784
+ "num_input_tokens_seen": 4392960,
785
+ "step": 440
786
+ },
787
+ {
788
+ "epoch": 0.22920422353850115,
789
+ "grad_norm": 0.9798524063329249,
790
+ "learning_rate": 9.822209166377635e-05,
791
+ "loss": 0.02,
792
+ "num_input_tokens_seen": 4442880,
793
+ "step": 445
794
+ },
795
+ {
796
+ "epoch": 0.23177955189286634,
797
+ "grad_norm": 0.22305429096692395,
798
+ "learning_rate": 9.815725637431662e-05,
799
+ "loss": 0.0157,
800
+ "num_input_tokens_seen": 4492800,
801
+ "step": 450
802
+ },
803
+ {
804
+ "epoch": 0.23177955189286634,
805
+ "eval_loss": 0.006690301466733217,
806
+ "eval_runtime": 18.7725,
807
+ "eval_samples_per_second": 3.196,
808
+ "eval_steps_per_second": 0.799,
809
+ "num_input_tokens_seen": 4492800,
810
+ "step": 450
811
  }
812
  ],
813
  "logging_steps": 5,
814
  "max_steps": 3400,
815
+ "num_input_tokens_seen": 4492800,
816
  "num_train_epochs": 2,
817
  "save_steps": 50,
818
  "stateful_callbacks": {
 
827
  "attributes": {}
828
  }
829
  },
830
+ "total_flos": 296404644528128.0,
831
  "train_batch_size": 1,
832
  "trial_name": null,
833
  "trial_params": null