ben81828 commited on
Commit
55ebf06
·
verified ·
1 Parent(s): 9a520fc

Training in progress, step 1600, checkpoint

Browse files
last-checkpoint/adapter_model.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:364d970c0898c9aa3378ce697d8c156c845bc89c410b474424ed0f6f28d86575
3
  size 29034840
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:73467ea1ed97e925adf2917879e548677b9d96e9d65d80d0ea25c65250831ebe
3
  size 29034840
last-checkpoint/global_step1600/bf16_zero_pp_rank_0_mp_rank_00_optim_states.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:db44c651d1205a4ffc6b64d1bda33a96f664bfb88dbb1ca8f4e70efc85551158
3
+ size 43429616
last-checkpoint/global_step1600/bf16_zero_pp_rank_1_mp_rank_00_optim_states.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:b9236952598c5fdc3e6cc6955a86a2d8e019c1947aab171d15d9134eed78171b
3
+ size 43429616
last-checkpoint/global_step1600/bf16_zero_pp_rank_2_mp_rank_00_optim_states.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:5cb6a6e411c76e9260f78617d1be8fa0f2ea549caf0500e151fbf5e2cfdf405f
3
+ size 43429616
last-checkpoint/global_step1600/bf16_zero_pp_rank_3_mp_rank_00_optim_states.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:a484aa770157fce9e258d8ab3eba9c016ae0889b35b3110f180f3d868e26a7d6
3
+ size 43429616
last-checkpoint/global_step1600/zero_pp_rank_0_mp_rank_00_model_states.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:2d4afb4aaf2fe09b00f009636090ac8140a92a92061b0beb56860dbb1563a322
3
+ size 637299
last-checkpoint/global_step1600/zero_pp_rank_1_mp_rank_00_model_states.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:99752500f7623dfbbb0b804e1c22e1cbd188635e5967f6ff430fde200bbb865f
3
+ size 637171
last-checkpoint/global_step1600/zero_pp_rank_2_mp_rank_00_model_states.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:bf3c8ffb4a388a7c685af82e29e62c38dac41b19f00d659db4227bfe73f04b08
3
+ size 637171
last-checkpoint/global_step1600/zero_pp_rank_3_mp_rank_00_model_states.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:bab2075b924174a0c53aaeb6926f645d6f84f031f1c7a5a84a1e25330a267f11
3
+ size 637171
last-checkpoint/latest CHANGED
@@ -1 +1 @@
1
- global_step1550
 
1
+ global_step1600
last-checkpoint/rng_state_0.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:7f1e48a120d69830576f7b582aa6cc46f0ca41d30015a7a674eaec3dcdfc0f09
3
  size 15024
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:9279ed4b01716237e789d2631c1f29bc5d43c5633c014d4401de21b672c1b355
3
  size 15024
last-checkpoint/rng_state_1.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:4dbabb9273d3983e52a4a981b5f60f8c2e19da375765d05bb9f2caad284b9652
3
  size 15024
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:ca1990d68e57c70df5c56d395dd3f3befbe07b380521f4144677c20f6fe2a3eb
3
  size 15024
last-checkpoint/rng_state_2.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:554ac925bb9c9ea292b7a41caac1cf75285511cf8aa440f37090891ee457a178
3
  size 15024
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:e0790066885525e1b9a9390a40ae27abd57abb47f031abface27890732f9e684
3
  size 15024
last-checkpoint/rng_state_3.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:5be5e00123fc0a321e41599b50e07be02f4c165504c601192e5c73f5f5437c30
3
  size 15024
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:1325a2034fe48ebad4f00ac8a2b32ab5c4c43c2497712169a8e3b1112363d916
3
  size 15024
last-checkpoint/scheduler.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:ff20f7ad4c3f084d2ca3cdf144a9c716a7288c4cbc23f49744772554732ab7ea
3
  size 1064
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:62282c037a983d19544e509ed880c9744baa4fc67a0800fdb043c257f3c8ac9a
3
  size 1064
last-checkpoint/trainer_state.json CHANGED
@@ -1,9 +1,9 @@
1
  {
2
  "best_metric": 0.28714123368263245,
3
  "best_model_checkpoint": "saves/CADICA_qwenvl_direction_then_DetectAndClassify_scale6/lora/sft/checkpoint-1550",
4
- "epoch": 0.45790251107828656,
5
  "eval_steps": 50,
6
- "global_step": 1550,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
@@ -2766,11 +2766,100 @@
2766
  "eval_steps_per_second": 0.787,
2767
  "num_input_tokens_seen": 16091960,
2768
  "step": 1550
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2769
  }
2770
  ],
2771
  "logging_steps": 5,
2772
  "max_steps": 6770,
2773
- "num_input_tokens_seen": 16091960,
2774
  "num_train_epochs": 2,
2775
  "save_steps": 50,
2776
  "stateful_callbacks": {
@@ -2785,7 +2874,7 @@
2785
  "attributes": {}
2786
  }
2787
  },
2788
- "total_flos": 1061592778407936.0,
2789
  "train_batch_size": 1,
2790
  "trial_name": null,
2791
  "trial_params": null
 
1
  {
2
  "best_metric": 0.28714123368263245,
3
  "best_model_checkpoint": "saves/CADICA_qwenvl_direction_then_DetectAndClassify_scale6/lora/sft/checkpoint-1550",
4
+ "epoch": 0.4726735598227474,
5
  "eval_steps": 50,
6
+ "global_step": 1600,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
 
2766
  "eval_steps_per_second": 0.787,
2767
  "num_input_tokens_seen": 16091960,
2768
  "step": 1550
2769
+ },
2770
+ {
2771
+ "epoch": 0.45937961595273263,
2772
+ "grad_norm": 2.40581004555412,
2773
+ "learning_rate": 9.143472182578547e-05,
2774
+ "loss": 0.3501,
2775
+ "num_input_tokens_seen": 16143672,
2776
+ "step": 1555
2777
+ },
2778
+ {
2779
+ "epoch": 0.4608567208271787,
2780
+ "grad_norm": 4.013146156526438,
2781
+ "learning_rate": 9.136624370500554e-05,
2782
+ "loss": 0.2684,
2783
+ "num_input_tokens_seen": 16195776,
2784
+ "step": 1560
2785
+ },
2786
+ {
2787
+ "epoch": 0.4623338257016248,
2788
+ "grad_norm": 11.075749009255755,
2789
+ "learning_rate": 9.129751879363052e-05,
2790
+ "loss": 0.3294,
2791
+ "num_input_tokens_seen": 16247752,
2792
+ "step": 1565
2793
+ },
2794
+ {
2795
+ "epoch": 0.4638109305760709,
2796
+ "grad_norm": 1.5049575714077101,
2797
+ "learning_rate": 9.122854750167254e-05,
2798
+ "loss": 0.2906,
2799
+ "num_input_tokens_seen": 16300680,
2800
+ "step": 1570
2801
+ },
2802
+ {
2803
+ "epoch": 0.465288035450517,
2804
+ "grad_norm": 4.448041589727679,
2805
+ "learning_rate": 9.115933024061365e-05,
2806
+ "loss": 0.3498,
2807
+ "num_input_tokens_seen": 16352000,
2808
+ "step": 1575
2809
+ },
2810
+ {
2811
+ "epoch": 0.4667651403249631,
2812
+ "grad_norm": 9.574179858800763,
2813
+ "learning_rate": 9.108986742340331e-05,
2814
+ "loss": 0.3262,
2815
+ "num_input_tokens_seen": 16403784,
2816
+ "step": 1580
2817
+ },
2818
+ {
2819
+ "epoch": 0.46824224519940916,
2820
+ "grad_norm": 5.326539404357799,
2821
+ "learning_rate": 9.102015946445601e-05,
2822
+ "loss": 0.318,
2823
+ "num_input_tokens_seen": 16455080,
2824
+ "step": 1585
2825
+ },
2826
+ {
2827
+ "epoch": 0.46971935007385524,
2828
+ "grad_norm": 17.72769299345548,
2829
+ "learning_rate": 9.095020677964874e-05,
2830
+ "loss": 0.3257,
2831
+ "num_input_tokens_seen": 16507712,
2832
+ "step": 1590
2833
+ },
2834
+ {
2835
+ "epoch": 0.4711964549483013,
2836
+ "grad_norm": 6.912966772855359,
2837
+ "learning_rate": 9.08800097863185e-05,
2838
+ "loss": 0.3253,
2839
+ "num_input_tokens_seen": 16559392,
2840
+ "step": 1595
2841
+ },
2842
+ {
2843
+ "epoch": 0.4726735598227474,
2844
+ "grad_norm": 3.3138006639125344,
2845
+ "learning_rate": 9.080956890325985e-05,
2846
+ "loss": 0.3879,
2847
+ "num_input_tokens_seen": 16609960,
2848
+ "step": 1600
2849
+ },
2850
+ {
2851
+ "epoch": 0.4726735598227474,
2852
+ "eval_loss": 0.3135533034801483,
2853
+ "eval_runtime": 19.3013,
2854
+ "eval_samples_per_second": 3.109,
2855
+ "eval_steps_per_second": 0.777,
2856
+ "num_input_tokens_seen": 16609960,
2857
+ "step": 1600
2858
  }
2859
  ],
2860
  "logging_steps": 5,
2861
  "max_steps": 6770,
2862
+ "num_input_tokens_seen": 16609960,
2863
  "num_train_epochs": 2,
2864
  "save_steps": 50,
2865
  "stateful_callbacks": {
 
2874
  "attributes": {}
2875
  }
2876
  },
2877
+ "total_flos": 1095801178423296.0,
2878
  "train_batch_size": 1,
2879
  "trial_name": null,
2880
  "trial_params": null