ben81828 commited on
Commit
f1f0c4d
·
verified ·
1 Parent(s): b318390

Training in progress, step 1650, checkpoint

Browse files
last-checkpoint/adapter_model.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:73467ea1ed97e925adf2917879e548677b9d96e9d65d80d0ea25c65250831ebe
3
  size 29034840
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:1a1114471ad61d66a88788a2005b7be221cdbe9e2fd3f43e9a2185e89b6fad6f
3
  size 29034840
last-checkpoint/global_step1650/bf16_zero_pp_rank_0_mp_rank_00_optim_states.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:5add4929f3324c01b1475ea9b18eccfde6d5e29582e4ea048971d60f47d99b0d
3
+ size 43429616
last-checkpoint/global_step1650/bf16_zero_pp_rank_1_mp_rank_00_optim_states.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:2e859eadc0d763a55600f40d12de6bd95bd61cccbe158c79627dbe4cc63ceabb
3
+ size 43429616
last-checkpoint/global_step1650/bf16_zero_pp_rank_2_mp_rank_00_optim_states.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:28f3696c6a828086b5118a7bd50fee1f0dbb10b579094d2cc5ae42f2a94ccadd
3
+ size 43429616
last-checkpoint/global_step1650/bf16_zero_pp_rank_3_mp_rank_00_optim_states.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:b3ccf96f842c5cf5843174e479e5dcff218568e900811adafc4334b0a0b5b143
3
+ size 43429616
last-checkpoint/global_step1650/zero_pp_rank_0_mp_rank_00_model_states.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:b4c79466c87dc7a7bb2278fbdb456e69e6c6b0a01149fe0952fd362a1881b0c3
3
+ size 637299
last-checkpoint/global_step1650/zero_pp_rank_1_mp_rank_00_model_states.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:c4009b72c3e7da18b4a07fc0d3d8de5ac6d846834d93ff9946ac1044932cdd63
3
+ size 637171
last-checkpoint/global_step1650/zero_pp_rank_2_mp_rank_00_model_states.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:7a7cde2a2fb17884fd88468a6046779089bf8060a77027c24f561b10b27cca30
3
+ size 637171
last-checkpoint/global_step1650/zero_pp_rank_3_mp_rank_00_model_states.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:b9f660827bea0980ccb09f606308a98aeb8817e9ee50af4a17e22e579fd1e42d
3
+ size 637171
last-checkpoint/latest CHANGED
@@ -1 +1 @@
1
- global_step1600
 
1
+ global_step1650
last-checkpoint/rng_state_0.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:9279ed4b01716237e789d2631c1f29bc5d43c5633c014d4401de21b672c1b355
3
  size 15024
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:a90384755f5b036b42b1a465b39dbf24a925a02c04294f9d684bc1de7f4db1e5
3
  size 15024
last-checkpoint/rng_state_1.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:ca1990d68e57c70df5c56d395dd3f3befbe07b380521f4144677c20f6fe2a3eb
3
  size 15024
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:7621b41e55056218f97d5b32ae116de3304a677b9f27b6a62170d83a2bbff176
3
  size 15024
last-checkpoint/rng_state_2.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:e0790066885525e1b9a9390a40ae27abd57abb47f031abface27890732f9e684
3
  size 15024
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:997e9debadfd125b5c8b66ee6dd79ced3d40d353ff9250475f3814fd950012a6
3
  size 15024
last-checkpoint/rng_state_3.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:1325a2034fe48ebad4f00ac8a2b32ab5c4c43c2497712169a8e3b1112363d916
3
  size 15024
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:0f0256032419959580948d742425f66782bc8eb029126a091669a42c6ee0eba4
3
  size 15024
last-checkpoint/scheduler.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:62282c037a983d19544e509ed880c9744baa4fc67a0800fdb043c257f3c8ac9a
3
  size 1064
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:40442cc35519e6454a07ba94c151e6fd6557a2a4ddd9970fc5771865c93790aa
3
  size 1064
last-checkpoint/trainer_state.json CHANGED
@@ -1,9 +1,9 @@
1
  {
2
  "best_metric": 0.28714123368263245,
3
  "best_model_checkpoint": "saves/CADICA_qwenvl_direction_then_DetectAndClassify_scale6/lora/sft/checkpoint-1550",
4
- "epoch": 0.4726735598227474,
5
  "eval_steps": 50,
6
- "global_step": 1600,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
@@ -2855,11 +2855,100 @@
2855
  "eval_steps_per_second": 0.777,
2856
  "num_input_tokens_seen": 16609960,
2857
  "step": 1600
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2858
  }
2859
  ],
2860
  "logging_steps": 5,
2861
  "max_steps": 6770,
2862
- "num_input_tokens_seen": 16609960,
2863
  "num_train_epochs": 2,
2864
  "save_steps": 50,
2865
  "stateful_callbacks": {
@@ -2874,7 +2963,7 @@
2874
  "attributes": {}
2875
  }
2876
  },
2877
- "total_flos": 1095801178423296.0,
2878
  "train_batch_size": 1,
2879
  "trial_name": null,
2880
  "trial_params": null
 
1
  {
2
  "best_metric": 0.28714123368263245,
3
  "best_model_checkpoint": "saves/CADICA_qwenvl_direction_then_DetectAndClassify_scale6/lora/sft/checkpoint-1550",
4
+ "epoch": 0.4874446085672083,
5
  "eval_steps": 50,
6
+ "global_step": 1650,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
 
2855
  "eval_steps_per_second": 0.777,
2856
  "num_input_tokens_seen": 16609960,
2857
  "step": 1600
2858
+ },
2859
+ {
2860
+ "epoch": 0.4741506646971935,
2861
+ "grad_norm": 11.101854016846067,
2862
+ "learning_rate": 9.07388845507224e-05,
2863
+ "loss": 0.3644,
2864
+ "num_input_tokens_seen": 16661440,
2865
+ "step": 1605
2866
+ },
2867
+ {
2868
+ "epoch": 0.4756277695716396,
2869
+ "grad_norm": 12.638527847469565,
2870
+ "learning_rate": 9.066795715040825e-05,
2871
+ "loss": 0.2733,
2872
+ "num_input_tokens_seen": 16714200,
2873
+ "step": 1610
2874
+ },
2875
+ {
2876
+ "epoch": 0.4771048744460857,
2877
+ "grad_norm": 2.7966971298188694,
2878
+ "learning_rate": 9.059678712546963e-05,
2879
+ "loss": 0.3063,
2880
+ "num_input_tokens_seen": 16766904,
2881
+ "step": 1615
2882
+ },
2883
+ {
2884
+ "epoch": 0.47858197932053176,
2885
+ "grad_norm": 8.210735089414575,
2886
+ "learning_rate": 9.052537490050614e-05,
2887
+ "loss": 0.3769,
2888
+ "num_input_tokens_seen": 16818168,
2889
+ "step": 1620
2890
+ },
2891
+ {
2892
+ "epoch": 0.48005908419497784,
2893
+ "grad_norm": 8.942406690882898,
2894
+ "learning_rate": 9.045372090156243e-05,
2895
+ "loss": 0.3089,
2896
+ "num_input_tokens_seen": 16869952,
2897
+ "step": 1625
2898
+ },
2899
+ {
2900
+ "epoch": 0.4815361890694239,
2901
+ "grad_norm": 11.68569569670719,
2902
+ "learning_rate": 9.038182555612551e-05,
2903
+ "loss": 0.2953,
2904
+ "num_input_tokens_seen": 16922608,
2905
+ "step": 1630
2906
+ },
2907
+ {
2908
+ "epoch": 0.48301329394387,
2909
+ "grad_norm": 17.89504289247946,
2910
+ "learning_rate": 9.030968929312231e-05,
2911
+ "loss": 0.3286,
2912
+ "num_input_tokens_seen": 16974824,
2913
+ "step": 1635
2914
+ },
2915
+ {
2916
+ "epoch": 0.4844903988183161,
2917
+ "grad_norm": 1.1612341646192372,
2918
+ "learning_rate": 9.023731254291705e-05,
2919
+ "loss": 0.3552,
2920
+ "num_input_tokens_seen": 17026088,
2921
+ "step": 1640
2922
+ },
2923
+ {
2924
+ "epoch": 0.4859675036927622,
2925
+ "grad_norm": 4.624302477495485,
2926
+ "learning_rate": 9.016469573730869e-05,
2927
+ "loss": 0.326,
2928
+ "num_input_tokens_seen": 17077904,
2929
+ "step": 1645
2930
+ },
2931
+ {
2932
+ "epoch": 0.4874446085672083,
2933
+ "grad_norm": 1.126074328380494,
2934
+ "learning_rate": 9.009183930952836e-05,
2935
+ "loss": 0.2698,
2936
+ "num_input_tokens_seen": 17130896,
2937
+ "step": 1650
2938
+ },
2939
+ {
2940
+ "epoch": 0.4874446085672083,
2941
+ "eval_loss": 0.40201568603515625,
2942
+ "eval_runtime": 19.4814,
2943
+ "eval_samples_per_second": 3.08,
2944
+ "eval_steps_per_second": 0.77,
2945
+ "num_input_tokens_seen": 17130896,
2946
+ "step": 1650
2947
  }
2948
  ],
2949
  "logging_steps": 5,
2950
  "max_steps": 6770,
2951
+ "num_input_tokens_seen": 17130896,
2952
  "num_train_epochs": 2,
2953
  "save_steps": 50,
2954
  "stateful_callbacks": {
 
2963
  "attributes": {}
2964
  }
2965
  },
2966
+ "total_flos": 1130167189372928.0,
2967
  "train_batch_size": 1,
2968
  "trial_name": null,
2969
  "trial_params": null