ben81828 commited on
Commit
4086ddb
·
verified ·
1 Parent(s): 2a80523

Training in progress, step 600, checkpoint

Browse files
last-checkpoint/adapter_model.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:ad0a41144b9d8eea3b3f1de8e9e5e7c14c303c31098798928859d47cbd861a53
3
  size 18516456
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:d1513791e52117a6d639f05fdddf98b812e9b7c86a4830e79581dc7ae45e41a8
3
  size 18516456
last-checkpoint/global_step600/bf16_zero_pp_rank_0_mp_rank_00_optim_states.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:e001b716b7761d5f26afeeeca60792842fe9e83b9ffa87219c288d1151b43cea
3
+ size 27700976
last-checkpoint/global_step600/bf16_zero_pp_rank_1_mp_rank_00_optim_states.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:c980c157626ebe0b6572e765fb0ca4e03fdf830e1af619ed547c028b1adfc05f
3
+ size 27700976
last-checkpoint/global_step600/bf16_zero_pp_rank_2_mp_rank_00_optim_states.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:2e5e4e4bafd0c2065fb0b393c899470e5c04f884ad18922da69ffd600b76e537
3
+ size 27700976
last-checkpoint/global_step600/bf16_zero_pp_rank_3_mp_rank_00_optim_states.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:be505a41635be61a02fd592e1b7127e678696e4df49f6079916360dd93072365
3
+ size 27700976
last-checkpoint/global_step600/zero_pp_rank_0_mp_rank_00_model_states.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:b052900c51a35b486cf1d169e3f9bb43bb347a08993f04f7d2bc876d77a5b629
3
+ size 411571
last-checkpoint/global_step600/zero_pp_rank_1_mp_rank_00_model_states.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:02b726a6dc7d1f23f8e05626afada1a90c9c1803525016cb7066d9182d7a16c3
3
+ size 411507
last-checkpoint/global_step600/zero_pp_rank_2_mp_rank_00_model_states.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:ac220097d807b06f51202705dab924ba89f125ce707bbea38c4bed916e8188fa
3
+ size 411507
last-checkpoint/global_step600/zero_pp_rank_3_mp_rank_00_model_states.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:7eb0263e2815855153236c96d52c7c09d14d37a34a30e5fc2a8bd3e9bb7ee1d7
3
+ size 411507
last-checkpoint/latest CHANGED
@@ -1 +1 @@
1
- global_step550
 
1
+ global_step600
last-checkpoint/rng_state_0.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:ae78313eb528c8d3695eebaf4de3539bd0a0bc6ee18c66af1ee183442f1758a0
3
  size 15024
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:a81e3916b1392c4c49afb171dee5415c15f5a5a5af8749b28195fcfa0596699c
3
  size 15024
last-checkpoint/rng_state_1.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:1b38031f60d9e88601d369ef46bcdcf2b5b03f2cb4ba93853bcb2328df7ebb7c
3
  size 15024
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:9a781038dd714b87b8adb1aac8dbc8217ceb607428a992133954ad522365236e
3
  size 15024
last-checkpoint/rng_state_2.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:f58092375c93d237cd0e3149aecfbf83e2acdae46279e07a32920d01cb507e64
3
  size 15024
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:9446c3db15f382a5546f13622787fc99392a5e0bc8a9ca2da1838de7ab621a37
3
  size 15024
last-checkpoint/rng_state_3.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:83cd4bbff9962da7ec6787fcea8d65df7096917f9a5902e249ba7aee8887fe5f
3
  size 15024
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:1f11e7a6b3faa884fc23044e3772ff9dd72c257f02e121665061e2a03d518bd9
3
  size 15024
last-checkpoint/scheduler.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:6e8b881f6464ee76e192f8a5dbebbec89a38d087d3502270b9c7e6038613f3b2
3
  size 1064
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:b76b388bede074656df32b92902ac42b965557bfee0c930366af07d8382b1b4a
3
  size 1064
last-checkpoint/trainer_state.json CHANGED
@@ -1,9 +1,9 @@
1
  {
2
  "best_metric": 0.8908902406692505,
3
  "best_model_checkpoint": "saves/CADICA_qwenvl_stenosis_classily_scale4_frozenVision/lora/sft/checkpoint-300",
4
- "epoch": 0.28328611898017,
5
  "eval_steps": 50,
6
- "global_step": 550,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
@@ -986,11 +986,100 @@
986
  "eval_steps_per_second": 0.883,
987
  "num_input_tokens_seen": 6432936,
988
  "step": 550
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
989
  }
990
  ],
991
  "logging_steps": 5,
992
  "max_steps": 3400,
993
- "num_input_tokens_seen": 6432936,
994
  "num_train_epochs": 2,
995
  "save_steps": 50,
996
  "stateful_callbacks": {
@@ -1005,7 +1094,7 @@
1005
  "attributes": {}
1006
  }
1007
  },
1008
- "total_flos": 361192817164288.0,
1009
  "train_batch_size": 1,
1010
  "trial_name": null,
1011
  "trial_params": null
 
1
  {
2
  "best_metric": 0.8908902406692505,
3
  "best_model_checkpoint": "saves/CADICA_qwenvl_stenosis_classily_scale4_frozenVision/lora/sft/checkpoint-300",
4
+ "epoch": 0.3090394025238218,
5
  "eval_steps": 50,
6
+ "global_step": 600,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
 
986
  "eval_steps_per_second": 0.883,
987
  "num_input_tokens_seen": 6432936,
988
  "step": 550
989
+ },
990
+ {
991
+ "epoch": 0.28586144733453517,
992
+ "grad_norm": 0.5334970266367584,
993
+ "learning_rate": 9.653522193117013e-05,
994
+ "loss": 0.8981,
995
+ "num_input_tokens_seen": 6491400,
996
+ "step": 555
997
+ },
998
+ {
999
+ "epoch": 0.28843677568890036,
1000
+ "grad_norm": 0.33261202813259866,
1001
+ "learning_rate": 9.644573182726035e-05,
1002
+ "loss": 0.9041,
1003
+ "num_input_tokens_seen": 6549872,
1004
+ "step": 560
1005
+ },
1006
+ {
1007
+ "epoch": 0.2910121040432655,
1008
+ "grad_norm": 0.19122862132727417,
1009
+ "learning_rate": 9.63551432749426e-05,
1010
+ "loss": 0.9024,
1011
+ "num_input_tokens_seen": 6608296,
1012
+ "step": 565
1013
+ },
1014
+ {
1015
+ "epoch": 0.2935874323976307,
1016
+ "grad_norm": 0.27778009425329764,
1017
+ "learning_rate": 9.626345841664953e-05,
1018
+ "loss": 0.9002,
1019
+ "num_input_tokens_seen": 6666768,
1020
+ "step": 570
1021
+ },
1022
+ {
1023
+ "epoch": 0.2961627607519959,
1024
+ "grad_norm": 0.3065314332046026,
1025
+ "learning_rate": 9.617067942074153e-05,
1026
+ "loss": 0.9035,
1027
+ "num_input_tokens_seen": 6725248,
1028
+ "step": 575
1029
+ },
1030
+ {
1031
+ "epoch": 0.29873808910636107,
1032
+ "grad_norm": 0.24431496415058412,
1033
+ "learning_rate": 9.607680848145558e-05,
1034
+ "loss": 0.9019,
1035
+ "num_input_tokens_seen": 6783680,
1036
+ "step": 580
1037
+ },
1038
+ {
1039
+ "epoch": 0.30131341746072626,
1040
+ "grad_norm": 0.27088193021301504,
1041
+ "learning_rate": 9.598184781885318e-05,
1042
+ "loss": 0.9001,
1043
+ "num_input_tokens_seen": 6842144,
1044
+ "step": 585
1045
+ },
1046
+ {
1047
+ "epoch": 0.3038887458150914,
1048
+ "grad_norm": 0.33893098113605125,
1049
+ "learning_rate": 9.588579967876806e-05,
1050
+ "loss": 0.8961,
1051
+ "num_input_tokens_seen": 6900656,
1052
+ "step": 590
1053
+ },
1054
+ {
1055
+ "epoch": 0.3064640741694566,
1056
+ "grad_norm": 0.3038921833221806,
1057
+ "learning_rate": 9.578866633275288e-05,
1058
+ "loss": 0.9,
1059
+ "num_input_tokens_seen": 6959128,
1060
+ "step": 595
1061
+ },
1062
+ {
1063
+ "epoch": 0.3090394025238218,
1064
+ "grad_norm": 0.48929637235055645,
1065
+ "learning_rate": 9.569045007802559e-05,
1066
+ "loss": 0.9046,
1067
+ "num_input_tokens_seen": 7017576,
1068
+ "step": 600
1069
+ },
1070
+ {
1071
+ "epoch": 0.3090394025238218,
1072
+ "eval_loss": 0.9053278565406799,
1073
+ "eval_runtime": 17.1218,
1074
+ "eval_samples_per_second": 3.504,
1075
+ "eval_steps_per_second": 0.876,
1076
+ "num_input_tokens_seen": 7017576,
1077
+ "step": 600
1078
  }
1079
  ],
1080
  "logging_steps": 5,
1081
  "max_steps": 3400,
1082
+ "num_input_tokens_seen": 7017576,
1083
  "num_train_epochs": 2,
1084
  "save_steps": 50,
1085
  "stateful_callbacks": {
 
1094
  "attributes": {}
1095
  }
1096
  },
1097
+ "total_flos": 394023670644736.0,
1098
  "train_batch_size": 1,
1099
  "trial_name": null,
1100
  "trial_params": null