ben81828 commited on
Commit
9473378
1 Parent(s): 4571284

Training in progress, step 1600, checkpoint

Browse files
last-checkpoint/adapter_model.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:c433a8aa8f1650b5f131001712aa61d13d2d36f41df4d65d21c83dee89eb91ab
3
  size 18516456
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:f11513d52bcbabbebdae5c22382754c386d07651a25539e1cdcc0f4b4982a0f7
3
  size 18516456
last-checkpoint/global_step1600/bf16_zero_pp_rank_0_mp_rank_00_optim_states.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:46463053469e5c128d18b59faffc7aa119512da4469fc23c9640b1c123d5397f
3
+ size 27700976
last-checkpoint/global_step1600/bf16_zero_pp_rank_1_mp_rank_00_optim_states.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:f0004c0959e3595e03710b21e4bd4fa3ba2c02502ed83372000018de6f50e1d3
3
+ size 27700976
last-checkpoint/global_step1600/bf16_zero_pp_rank_2_mp_rank_00_optim_states.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:7ee1ed10ee81a3036175bcbe2b80223be8b540d8636ada699171bfa75f66a1c2
3
+ size 27700976
last-checkpoint/global_step1600/bf16_zero_pp_rank_3_mp_rank_00_optim_states.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:3b54a8cff3c72b28b47e16db62ced38ea1cdd159c1f590fed439d1fdc6f9878a
3
+ size 27700976
last-checkpoint/global_step1600/zero_pp_rank_0_mp_rank_00_model_states.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:26acda4de0790975ebc9d4f0c12f3aebbcbccfe2cfd61f8c85e0ea9e9d99950a
3
+ size 411571
last-checkpoint/global_step1600/zero_pp_rank_1_mp_rank_00_model_states.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:760fc496088881394b803189c6aa215917ad5a1bb62ddd42914e38a5d2948529
3
+ size 411507
last-checkpoint/global_step1600/zero_pp_rank_2_mp_rank_00_model_states.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:cdf8aa6a9173571e234a2795a5345c292d9c46961b42871d2a86e31748f61f13
3
+ size 411507
last-checkpoint/global_step1600/zero_pp_rank_3_mp_rank_00_model_states.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:4ebdb6d3a5f2a383b8517fde2dce340179a2adc82a0a53d88e7375c7669fd5e7
3
+ size 411507
last-checkpoint/latest CHANGED
@@ -1 +1 @@
1
- global_step1550
 
1
+ global_step1600
last-checkpoint/rng_state_0.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:7f1e48a120d69830576f7b582aa6cc46f0ca41d30015a7a674eaec3dcdfc0f09
3
  size 15024
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:9279ed4b01716237e789d2631c1f29bc5d43c5633c014d4401de21b672c1b355
3
  size 15024
last-checkpoint/rng_state_1.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:4dbabb9273d3983e52a4a981b5f60f8c2e19da375765d05bb9f2caad284b9652
3
  size 15024
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:ca1990d68e57c70df5c56d395dd3f3befbe07b380521f4144677c20f6fe2a3eb
3
  size 15024
last-checkpoint/rng_state_2.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:554ac925bb9c9ea292b7a41caac1cf75285511cf8aa440f37090891ee457a178
3
  size 15024
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:e0790066885525e1b9a9390a40ae27abd57abb47f031abface27890732f9e684
3
  size 15024
last-checkpoint/rng_state_3.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:5be5e00123fc0a321e41599b50e07be02f4c165504c601192e5c73f5f5437c30
3
  size 15024
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:1325a2034fe48ebad4f00ac8a2b32ab5c4c43c2497712169a8e3b1112363d916
3
  size 15024
last-checkpoint/scheduler.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:8eb069683e7f84aa36296476346fc663361d9b05ad7b09b71f22f44afdb0ea48
3
  size 1064
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:f2487a6c511ed8055eb0842d87966b09ae8b62c1b4514727282ca413d6e9c4e2
3
  size 1064
last-checkpoint/trainer_state.json CHANGED
@@ -1,9 +1,9 @@
1
  {
2
- "best_metric": 0.632923424243927,
3
- "best_model_checkpoint": "saves/CADICA_qwenvl_stenosis_classily_scale4_frozenVision/lora/sft/checkpoint-1500",
4
- "epoch": 0.7983517898532063,
5
  "eval_steps": 50,
6
- "global_step": 1550,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
@@ -2766,11 +2766,100 @@
2766
  "eval_steps_per_second": 0.936,
2767
  "num_input_tokens_seen": 18129304,
2768
  "step": 1550
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2769
  }
2770
  ],
2771
  "logging_steps": 5,
2772
  "max_steps": 3400,
2773
- "num_input_tokens_seen": 18129304,
2774
  "num_train_epochs": 2,
2775
  "save_steps": 50,
2776
  "stateful_callbacks": {
@@ -2785,7 +2874,7 @@
2785
  "attributes": {}
2786
  }
2787
  },
2788
- "total_flos": 1017997776650240.0,
2789
  "train_batch_size": 1,
2790
  "trial_name": null,
2791
  "trial_params": null
 
1
  {
2
+ "best_metric": 0.6319106221199036,
3
+ "best_model_checkpoint": "saves/CADICA_qwenvl_stenosis_classily_scale4_frozenVision/lora/sft/checkpoint-1600",
4
+ "epoch": 0.8241050733968581,
5
  "eval_steps": 50,
6
+ "global_step": 1600,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
 
2766
  "eval_steps_per_second": 0.936,
2767
  "num_input_tokens_seen": 18129304,
2768
  "step": 1550
2769
+ },
2770
+ {
2771
+ "epoch": 0.8009271182075715,
2772
+ "grad_norm": 2.9735507158511987,
2773
+ "learning_rate": 6.109217817775139e-05,
2774
+ "loss": 0.5681,
2775
+ "num_input_tokens_seen": 18187728,
2776
+ "step": 1555
2777
+ },
2778
+ {
2779
+ "epoch": 0.8035024465619367,
2780
+ "grad_norm": 3.6620315644598778,
2781
+ "learning_rate": 6.085494954896156e-05,
2782
+ "loss": 0.6292,
2783
+ "num_input_tokens_seen": 18246192,
2784
+ "step": 1560
2785
+ },
2786
+ {
2787
+ "epoch": 0.8060777749163018,
2788
+ "grad_norm": 4.03631122919402,
2789
+ "learning_rate": 6.061746419901388e-05,
2790
+ "loss": 0.6512,
2791
+ "num_input_tokens_seen": 18304632,
2792
+ "step": 1565
2793
+ },
2794
+ {
2795
+ "epoch": 0.808653103270667,
2796
+ "grad_norm": 4.0040288177360805,
2797
+ "learning_rate": 6.0379727744471936e-05,
2798
+ "loss": 0.5476,
2799
+ "num_input_tokens_seen": 18363136,
2800
+ "step": 1570
2801
+ },
2802
+ {
2803
+ "epoch": 0.8112284316250322,
2804
+ "grad_norm": 3.9448861517599996,
2805
+ "learning_rate": 6.014174580783794e-05,
2806
+ "loss": 0.5632,
2807
+ "num_input_tokens_seen": 18421592,
2808
+ "step": 1575
2809
+ },
2810
+ {
2811
+ "epoch": 0.8138037599793974,
2812
+ "grad_norm": 3.8400680048739435,
2813
+ "learning_rate": 5.990352401741981e-05,
2814
+ "loss": 0.6225,
2815
+ "num_input_tokens_seen": 18480104,
2816
+ "step": 1580
2817
+ },
2818
+ {
2819
+ "epoch": 0.8163790883337626,
2820
+ "grad_norm": 2.7981339113543284,
2821
+ "learning_rate": 5.9665068007197976e-05,
2822
+ "loss": 0.5801,
2823
+ "num_input_tokens_seen": 18538600,
2824
+ "step": 1585
2825
+ },
2826
+ {
2827
+ "epoch": 0.8189544166881277,
2828
+ "grad_norm": 4.290843515697908,
2829
+ "learning_rate": 5.94263834166923e-05,
2830
+ "loss": 0.6364,
2831
+ "num_input_tokens_seen": 18597104,
2832
+ "step": 1590
2833
+ },
2834
+ {
2835
+ "epoch": 0.8215297450424929,
2836
+ "grad_norm": 3.9001572117535566,
2837
+ "learning_rate": 5.918747589082853e-05,
2838
+ "loss": 0.6088,
2839
+ "num_input_tokens_seen": 18655584,
2840
+ "step": 1595
2841
+ },
2842
+ {
2843
+ "epoch": 0.8241050733968581,
2844
+ "grad_norm": 3.5623412341260363,
2845
+ "learning_rate": 5.8948351079804875e-05,
2846
+ "loss": 0.6564,
2847
+ "num_input_tokens_seen": 18714072,
2848
+ "step": 1600
2849
+ },
2850
+ {
2851
+ "epoch": 0.8241050733968581,
2852
+ "eval_loss": 0.6319106221199036,
2853
+ "eval_runtime": 16.0199,
2854
+ "eval_samples_per_second": 3.745,
2855
+ "eval_steps_per_second": 0.936,
2856
+ "num_input_tokens_seen": 18714072,
2857
+ "step": 1600
2858
  }
2859
  ],
2860
  "logging_steps": 5,
2861
  "max_steps": 3400,
2862
+ "num_input_tokens_seen": 18714072,
2863
  "num_train_epochs": 2,
2864
  "save_steps": 50,
2865
  "stateful_callbacks": {
 
2874
  "attributes": {}
2875
  }
2876
  },
2877
+ "total_flos": 1050836028358656.0,
2878
  "train_batch_size": 1,
2879
  "trial_name": null,
2880
  "trial_params": null