ben81828 commited on
Commit
ab4de7c
·
verified ·
1 Parent(s): 95f69c8

Training in progress, step 400, checkpoint

Browse files
last-checkpoint/adapter_model.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:7517520116a90df96714c96a11fa862e42a0dd35dffeff98e7454aeee1e7c1a9
3
  size 29034840
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:0775f6cac07758d2d22e3033dca03d974b5f0490a459fbe8445ae5bdd8abef6c
3
  size 29034840
last-checkpoint/global_step400/bf16_zero_pp_rank_0_mp_rank_00_optim_states.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:9bccbcfbd67fd78914b10781b648f703521ea0c522577105403fa269823f28b0
3
+ size 43429616
last-checkpoint/global_step400/bf16_zero_pp_rank_1_mp_rank_00_optim_states.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:a2579426f94f44a9e4d254d1b04482e54faf9cd65d885e91fc64af42098ccb2b
3
+ size 43429616
last-checkpoint/global_step400/bf16_zero_pp_rank_2_mp_rank_00_optim_states.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:88dea00d03c8e7cfa9608fbe011e645ab0293529cd8a405b03cac3b7a457bbbf
3
+ size 43429616
last-checkpoint/global_step400/bf16_zero_pp_rank_3_mp_rank_00_optim_states.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:fdaec349df36ab8e309e3aee2c2a94d67b90848e8e7ac4db08c3cd5714b78825
3
+ size 43429616
last-checkpoint/global_step400/zero_pp_rank_0_mp_rank_00_model_states.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:5156b71863558eb940883c2a53abc6ea0ecf35171e1c6ef6fd48a17669031953
3
+ size 637299
last-checkpoint/global_step400/zero_pp_rank_1_mp_rank_00_model_states.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:1c22d7604f5e9a963cc0beedf938177c8d8489af65df205ebac4c98a1b8ec451
3
+ size 637171
last-checkpoint/global_step400/zero_pp_rank_2_mp_rank_00_model_states.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:3741599f9c6a30d8f169f2bdf2dbef5196058896ed7f387c615c11e68675c1bf
3
+ size 637171
last-checkpoint/global_step400/zero_pp_rank_3_mp_rank_00_model_states.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:945fe57eb537c5ffd3d2e0d80ed5dfc750e49073d3d12e111bfb6c6bd93acce2
3
+ size 637171
last-checkpoint/latest CHANGED
@@ -1 +1 @@
1
- global_step350
 
1
+ global_step400
last-checkpoint/rng_state_0.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:ee97cd82dba4d425fdd8dfdb88d4a43d0d4b1979b5c81ab4a24914fb00d4f332
3
  size 15024
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:543ef05f530d40ee20b8d626b07a69b86597aca643e48897571062f973efe84f
3
  size 15024
last-checkpoint/rng_state_1.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:91dad95440fb85dc4a31745642117165c1a72173b2e389679ea8c0b2b6fcd7e2
3
  size 15024
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:7a23f732e43838ce0398d2636885ac16badbb9bcbc04d1406069ba3027bc5ae0
3
  size 15024
last-checkpoint/rng_state_2.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:98698326b023c2af02c94f18726ce52c7f7a6fe290734dd7edbe99bc807fcfa0
3
  size 15024
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:e10cce960e7068b051c05e35ed6160656be9091c63f13796ac2ed7e9c84e5a72
3
  size 15024
last-checkpoint/rng_state_3.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:708e7c6b5bf8a327e688779ebc08830ce249928bcb1ff5c82b1b1d0bf6d2660b
3
  size 15024
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:b6f6049e212b1df5cefc5d834afcd8cc052c73f1457449e9fe8a38d514f54078
3
  size 15024
last-checkpoint/scheduler.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:ff02966f5e9ff1944d3a06acaf420bd5bf127f85a91f6b1b2b1649b32fc1fd58
3
  size 1064
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:32762cec872a9c6d771fb0f76b3f72991fda55aee1494a130c6c6b449c48a001
3
  size 1064
last-checkpoint/trainer_state.json CHANGED
@@ -1,9 +1,9 @@
1
  {
2
- "best_metric": 0.7044599056243896,
3
- "best_model_checkpoint": "saves/CADICA_qwenvl_stenosis_detect_scale4/lora/sft/checkpoint-350",
4
- "epoch": 0.09013649240278135,
5
  "eval_steps": 50,
6
- "global_step": 350,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
@@ -630,11 +630,100 @@
630
  "eval_steps_per_second": 0.784,
631
  "num_input_tokens_seen": 3673152,
632
  "step": 350
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
633
  }
634
  ],
635
  "logging_steps": 5,
636
  "max_steps": 3400,
637
- "num_input_tokens_seen": 3673152,
638
  "num_train_epochs": 1,
639
  "save_steps": 50,
640
  "stateful_callbacks": {
@@ -649,7 +738,7 @@
649
  "attributes": {}
650
  }
651
  },
652
- "total_flos": 242291271794688.0,
653
  "train_batch_size": 1,
654
  "trial_name": null,
655
  "trial_params": null
 
1
  {
2
+ "best_metric": 0.6847750544548035,
3
+ "best_model_checkpoint": "saves/CADICA_qwenvl_stenosis_detect_scale4/lora/sft/checkpoint-400",
4
+ "epoch": 0.10301313417460727,
5
  "eval_steps": 50,
6
+ "global_step": 400,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
 
630
  "eval_steps_per_second": 0.784,
631
  "num_input_tokens_seen": 3673152,
632
  "step": 350
633
+ },
634
+ {
635
+ "epoch": 0.09142415657996394,
636
+ "grad_norm": 0.47530311368359207,
637
+ "learning_rate": 9.91927536630402e-05,
638
+ "loss": 0.6778,
639
+ "num_input_tokens_seen": 3725296,
640
+ "step": 355
641
+ },
642
+ {
643
+ "epoch": 0.09271182075714654,
644
+ "grad_norm": 0.38913022785688944,
645
+ "learning_rate": 9.91486549841951e-05,
646
+ "loss": 0.6857,
647
+ "num_input_tokens_seen": 3777552,
648
+ "step": 360
649
+ },
650
+ {
651
+ "epoch": 0.09399948493432912,
652
+ "grad_norm": 0.4834773141333328,
653
+ "learning_rate": 9.91033939324107e-05,
654
+ "loss": 0.7184,
655
+ "num_input_tokens_seen": 3830200,
656
+ "step": 365
657
+ },
658
+ {
659
+ "epoch": 0.09528714911151172,
660
+ "grad_norm": 0.5862045807150876,
661
+ "learning_rate": 9.905697157811761e-05,
662
+ "loss": 0.7196,
663
+ "num_input_tokens_seen": 3883200,
664
+ "step": 370
665
+ },
666
+ {
667
+ "epoch": 0.09657481328869431,
668
+ "grad_norm": 0.4576971522205563,
669
+ "learning_rate": 9.900938901921131e-05,
670
+ "loss": 0.6914,
671
+ "num_input_tokens_seen": 3935576,
672
+ "step": 375
673
+ },
674
+ {
675
+ "epoch": 0.0978624774658769,
676
+ "grad_norm": 0.49551517524520683,
677
+ "learning_rate": 9.896064738102635e-05,
678
+ "loss": 0.6681,
679
+ "num_input_tokens_seen": 3987624,
680
+ "step": 380
681
+ },
682
+ {
683
+ "epoch": 0.09915014164305949,
684
+ "grad_norm": 0.8198390819787913,
685
+ "learning_rate": 9.891074781630966e-05,
686
+ "loss": 0.6723,
687
+ "num_input_tokens_seen": 4039680,
688
+ "step": 385
689
+ },
690
+ {
691
+ "epoch": 0.10043780582024209,
692
+ "grad_norm": 0.7034626469978683,
693
+ "learning_rate": 9.885969150519331e-05,
694
+ "loss": 0.6498,
695
+ "num_input_tokens_seen": 4091216,
696
+ "step": 390
697
+ },
698
+ {
699
+ "epoch": 0.10172546999742467,
700
+ "grad_norm": 0.8838075623197742,
701
+ "learning_rate": 9.88074796551666e-05,
702
+ "loss": 0.7311,
703
+ "num_input_tokens_seen": 4144264,
704
+ "step": 395
705
+ },
706
+ {
707
+ "epoch": 0.10301313417460727,
708
+ "grad_norm": 0.7342758386202114,
709
+ "learning_rate": 9.875411350104744e-05,
710
+ "loss": 0.7089,
711
+ "num_input_tokens_seen": 4197072,
712
+ "step": 400
713
+ },
714
+ {
715
+ "epoch": 0.10301313417460727,
716
+ "eval_loss": 0.6847750544548035,
717
+ "eval_runtime": 37.9238,
718
+ "eval_samples_per_second": 3.164,
719
+ "eval_steps_per_second": 0.791,
720
+ "num_input_tokens_seen": 4197072,
721
+ "step": 400
722
  }
723
  ],
724
  "logging_steps": 5,
725
  "max_steps": 3400,
726
+ "num_input_tokens_seen": 4197072,
727
  "num_train_epochs": 1,
728
  "save_steps": 50,
729
  "stateful_callbacks": {
 
738
  "attributes": {}
739
  }
740
  },
741
+ "total_flos": 276835360047104.0,
742
  "train_batch_size": 1,
743
  "trial_name": null,
744
  "trial_params": null