ben81828 commited on
Commit
f280c1a
·
verified ·
1 Parent(s): a0de44e

Training in progress, step 2100, checkpoint

Browse files
last-checkpoint/adapter_model.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:4709ac7890db563cedfc9593a73be6ffe73b6111dd9fd8fdc9d8017c84f30b92
3
  size 29034840
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:a24a4c37b80664a17b4612dbf43f560521041adc8bc0cc8d0182857100b9274e
3
  size 29034840
last-checkpoint/global_step2100/bf16_zero_pp_rank_0_mp_rank_00_optim_states.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:31ed30ed84b974a6cf4cd074f31ea2b62dfbdf0a8cab4c7a836398205dee97cb
3
+ size 43429616
last-checkpoint/global_step2100/bf16_zero_pp_rank_1_mp_rank_00_optim_states.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:b8f2daa244be2c27823ce1258727d687c9f82aae1ffed91aa74574e55eaf02eb
3
+ size 43429616
last-checkpoint/global_step2100/bf16_zero_pp_rank_2_mp_rank_00_optim_states.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:837eefbed5e44ee11d16a7a2d903a8c3aef22ba2989f963648f90d605d907e2e
3
+ size 43429616
last-checkpoint/global_step2100/bf16_zero_pp_rank_3_mp_rank_00_optim_states.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:39bc0b0b4105b53ca3d3a240bbdde592787c13a1b9e96060d1c0cb6ebfb570b3
3
+ size 43429616
last-checkpoint/global_step2100/zero_pp_rank_0_mp_rank_00_model_states.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:eeda14a8789d1fd88f7eebe00675f2572359184439f7c4c60b672fbdb030c435
3
+ size 637299
last-checkpoint/global_step2100/zero_pp_rank_1_mp_rank_00_model_states.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:aec295f73f879f7574e82192c1ccc1329fd81288043991b95b358440fd8f90f7
3
+ size 637171
last-checkpoint/global_step2100/zero_pp_rank_2_mp_rank_00_model_states.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:abae9bad6bb987038dcd73f0e19d994e45cae40e20cd055d436706a0b5fe6064
3
+ size 637171
last-checkpoint/global_step2100/zero_pp_rank_3_mp_rank_00_model_states.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:86bfaf44e1fb94ad73e3fc28120b85b71fc1b7b472586a7f8b29d56ad2e71f3c
3
+ size 637171
last-checkpoint/latest CHANGED
@@ -1 +1 @@
1
- global_step2050
 
1
+ global_step2100
last-checkpoint/rng_state_0.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:f4a695de1db3382235d3f8ae213672491aa2fdc3ba3be96403a089077ad3c2bf
3
  size 15024
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:5d1ddd7e7b4dc44903837b0414e4659f8383cd8f16b41dd396d4eaf5b9829f79
3
  size 15024
last-checkpoint/rng_state_1.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:c2025b170fa1d4693537c2d73f89a6495c58940d033678742a74810c0154a6a7
3
  size 15024
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:eeddfb77fe4d3b495c4e08307767e08df90e96ef241c3eb80d5f75adec393e80
3
  size 15024
last-checkpoint/rng_state_2.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:0865c4d7d921b23a22c91c2f3b2c6cca03dae0eb27c43dee575c9602605c94d6
3
  size 15024
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:9e6260196fabb00061b1f1c8de6288382570dc14d02d2aa308050ca858880a97
3
  size 15024
last-checkpoint/rng_state_3.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:5f4ec19d9df4417359523e8cc4d27875614c1021ebcc6391b27632aa7897b7ea
3
  size 15024
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:ea023bc5b1def54e0c49389175c0fae812f5f764c502525ce775d993d5ab2c03
3
  size 15024
last-checkpoint/scheduler.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:4ce13f8f9c6f0eafa8c41a1ffb7cafd5f8d08bfe9e6dca6219533d00474bd826
3
  size 1064
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:d3830a1cbcf9a648c99ec3b1dc2ae21133bcdd817f5603e9cff209ca0e0afc75
3
  size 1064
last-checkpoint/trainer_state.json CHANGED
@@ -1,9 +1,9 @@
1
  {
2
  "best_metric": 0.28714123368263245,
3
  "best_model_checkpoint": "saves/CADICA_qwenvl_direction_then_DetectAndClassify_scale6/lora/sft/checkpoint-1550",
4
- "epoch": 0.6056129985228951,
5
  "eval_steps": 50,
6
- "global_step": 2050,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
@@ -3656,11 +3656,100 @@
3656
  "eval_steps_per_second": 0.776,
3657
  "num_input_tokens_seen": 21291664,
3658
  "step": 2050
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3659
  }
3660
  ],
3661
  "logging_steps": 5,
3662
  "max_steps": 6770,
3663
- "num_input_tokens_seen": 21291664,
3664
  "num_train_epochs": 2,
3665
  "save_steps": 50,
3666
  "stateful_callbacks": {
@@ -3675,7 +3764,7 @@
3675
  "attributes": {}
3676
  }
3677
  },
3678
- "total_flos": 1404747848351744.0,
3679
  "train_batch_size": 1,
3680
  "trial_name": null,
3681
  "trial_params": null
 
1
  {
2
  "best_metric": 0.28714123368263245,
3
  "best_model_checkpoint": "saves/CADICA_qwenvl_direction_then_DetectAndClassify_scale6/lora/sft/checkpoint-1550",
4
+ "epoch": 0.620384047267356,
5
  "eval_steps": 50,
6
+ "global_step": 2100,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
 
3656
  "eval_steps_per_second": 0.776,
3657
  "num_input_tokens_seen": 21291664,
3658
  "step": 2050
3659
+ },
3660
+ {
3661
+ "epoch": 0.6070901033973413,
3662
+ "grad_norm": 1.4664266327354816,
3663
+ "learning_rate": 8.343716407399019e-05,
3664
+ "loss": 0.2338,
3665
+ "num_input_tokens_seen": 21344232,
3666
+ "step": 2055
3667
+ },
3668
+ {
3669
+ "epoch": 0.6085672082717873,
3670
+ "grad_norm": 1.440796404258189,
3671
+ "learning_rate": 8.334626387025197e-05,
3672
+ "loss": 0.3026,
3673
+ "num_input_tokens_seen": 21396160,
3674
+ "step": 2060
3675
+ },
3676
+ {
3677
+ "epoch": 0.6100443131462334,
3678
+ "grad_norm": 3.112422612662143,
3679
+ "learning_rate": 8.325516472303792e-05,
3680
+ "loss": 0.2898,
3681
+ "num_input_tokens_seen": 21448032,
3682
+ "step": 2065
3683
+ },
3684
+ {
3685
+ "epoch": 0.6115214180206795,
3686
+ "grad_norm": 7.791629500233522,
3687
+ "learning_rate": 8.316386717584463e-05,
3688
+ "loss": 0.3265,
3689
+ "num_input_tokens_seen": 21499144,
3690
+ "step": 2070
3691
+ },
3692
+ {
3693
+ "epoch": 0.6129985228951256,
3694
+ "grad_norm": 7.664415048315268,
3695
+ "learning_rate": 8.307237177335239e-05,
3696
+ "loss": 0.2513,
3697
+ "num_input_tokens_seen": 21551328,
3698
+ "step": 2075
3699
+ },
3700
+ {
3701
+ "epoch": 0.6144756277695717,
3702
+ "grad_norm": 7.353575088308837,
3703
+ "learning_rate": 8.298067906142182e-05,
3704
+ "loss": 0.2864,
3705
+ "num_input_tokens_seen": 21603800,
3706
+ "step": 2080
3707
+ },
3708
+ {
3709
+ "epoch": 0.6159527326440177,
3710
+ "grad_norm": 6.877908422150131,
3711
+ "learning_rate": 8.288878958709072e-05,
3712
+ "loss": 0.243,
3713
+ "num_input_tokens_seen": 21656480,
3714
+ "step": 2085
3715
+ },
3716
+ {
3717
+ "epoch": 0.6174298375184638,
3718
+ "grad_norm": 7.551272364447902,
3719
+ "learning_rate": 8.279670389857079e-05,
3720
+ "loss": 0.2711,
3721
+ "num_input_tokens_seen": 21708824,
3722
+ "step": 2090
3723
+ },
3724
+ {
3725
+ "epoch": 0.6189069423929099,
3726
+ "grad_norm": 1.5109531594598573,
3727
+ "learning_rate": 8.27044225452443e-05,
3728
+ "loss": 0.2475,
3729
+ "num_input_tokens_seen": 21760744,
3730
+ "step": 2095
3731
+ },
3732
+ {
3733
+ "epoch": 0.620384047267356,
3734
+ "grad_norm": 10.98724867961312,
3735
+ "learning_rate": 8.26119460776609e-05,
3736
+ "loss": 0.1826,
3737
+ "num_input_tokens_seen": 21813984,
3738
+ "step": 2100
3739
+ },
3740
+ {
3741
+ "epoch": 0.620384047267356,
3742
+ "eval_loss": 0.5386325716972351,
3743
+ "eval_runtime": 19.2817,
3744
+ "eval_samples_per_second": 3.112,
3745
+ "eval_steps_per_second": 0.778,
3746
+ "num_input_tokens_seen": 21813984,
3747
+ "step": 2100
3748
  }
3749
  ],
3750
  "logging_steps": 5,
3751
  "max_steps": 6770,
3752
+ "num_input_tokens_seen": 21813984,
3753
  "num_train_epochs": 2,
3754
  "save_steps": 50,
3755
  "stateful_callbacks": {
 
3764
  "attributes": {}
3765
  }
3766
  },
3767
+ "total_flos": 1439243507597312.0,
3768
  "train_batch_size": 1,
3769
  "trial_name": null,
3770
  "trial_params": null