ben81828 commited on
Commit
62164c6
·
verified ·
1 Parent(s): fc25916

Training in progress, step 1200, checkpoint

Browse files
last-checkpoint/adapter_model.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:847b02ea6b5dcacf2ba31391c0399f758f14d0815ccad3f7b70c82822ffabbaf
3
  size 18516456
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:39e915e79b012114953a1589b00c8900ef9e4855bec6bd8aa9a11edc484871cf
3
  size 18516456
last-checkpoint/global_step1200/bf16_zero_pp_rank_0_mp_rank_00_optim_states.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:0efb098c1694146a0e80a979cced9a620aed1aae2dc6be201be025df62aa8560
3
+ size 27700976
last-checkpoint/global_step1200/bf16_zero_pp_rank_1_mp_rank_00_optim_states.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:138b961bc4780fb95a2c2c0be19299c9e15f089b5b0257206d15e151de3a79e7
3
+ size 27700976
last-checkpoint/global_step1200/bf16_zero_pp_rank_2_mp_rank_00_optim_states.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:4ce61ce78c8129b1837242abef544068fc1b7402b9b5509d695b968c9d67e6b3
3
+ size 27700976
last-checkpoint/global_step1200/bf16_zero_pp_rank_3_mp_rank_00_optim_states.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:e357c3f771375bd330948d92edd3f7b22f7595fa97b82118d77c7c77aef77d65
3
+ size 27700976
last-checkpoint/global_step1200/zero_pp_rank_0_mp_rank_00_model_states.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:a9e13644f376cff9c47ad54225b89c561303f550f5f81e911927275a74532578
3
+ size 411571
last-checkpoint/global_step1200/zero_pp_rank_1_mp_rank_00_model_states.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:df7cd6c3ef468d7af13347f379bf79f473002553e17b730bd6803e1c5952b5e8
3
+ size 411507
last-checkpoint/global_step1200/zero_pp_rank_2_mp_rank_00_model_states.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:9227fd633733be3bf14e37d1ea43270c97c1eede4ac8b497b846f4b82dcbede2
3
+ size 411507
last-checkpoint/global_step1200/zero_pp_rank_3_mp_rank_00_model_states.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:e13309a3c326c80e7d577358f945ca86270023863ef39e39a8d67522dd8d0fed
3
+ size 411507
last-checkpoint/latest CHANGED
@@ -1 +1 @@
1
- global_step1150
 
1
+ global_step1200
last-checkpoint/rng_state_0.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:274dc3860ee0c7f4d5348f60910a4b568498c04adfefb89f905b1c78a82c1312
3
  size 15024
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:a209a0c0025f9ce8e2beeba50c1f0828d5c34a2482310fcd0bf5fc24c2c67be2
3
  size 15024
last-checkpoint/rng_state_1.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:e9aa441491b9ca89e796944520fa1db332a67c0a1a920be83edd2d96d741716d
3
  size 15024
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:a67fb929b8c51f9b1c6ff9f11366e57e55128a1d36df85a9d37a008b49017a75
3
  size 15024
last-checkpoint/rng_state_2.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:c1ee3434533b24fb771504fa8cceb5c2ea25fe0de1641128feaceccc65afe6ed
3
  size 15024
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:1b9ef3b0c0978d0b611f4257c939f1c2c6f07e6227bfea6675532d285b0b64a7
3
  size 15024
last-checkpoint/rng_state_3.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:93b4a44be1335173d2e3120bd0d1e6346f3e832d8935752c70ce1e98f017fa87
3
  size 15024
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:0183d14c8ed52ee533139532e9bcf7bc34ec297a064845b35741cb501d92675f
3
  size 15024
last-checkpoint/scheduler.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:7d461c8d7517d4b88333bff7984fc3bfc149292198b04bbc18a49aee698ffb5c
3
  size 1064
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:16e0cffc6b063574ed312ee2198c86d3dddf2450d0400f042fdc08bd22dcbc7f
3
  size 1064
last-checkpoint/trainer_state.json CHANGED
@@ -1,9 +1,9 @@
1
  {
2
- "best_metric": 0.7743102312088013,
3
- "best_model_checkpoint": "saves/CADICA_qwenvl_stenosis_classily_scale4_frozenVision/lora/sft/checkpoint-1100",
4
- "epoch": 0.5923255215039918,
5
  "eval_steps": 50,
6
- "global_step": 1150,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
@@ -2054,11 +2054,100 @@
2054
  "eval_steps_per_second": 0.937,
2055
  "num_input_tokens_seen": 13450720,
2056
  "step": 1150
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2057
  }
2058
  ],
2059
  "logging_steps": 5,
2060
  "max_steps": 3400,
2061
- "num_input_tokens_seen": 13450720,
2062
  "num_train_epochs": 2,
2063
  "save_steps": 50,
2064
  "stateful_callbacks": {
@@ -2073,7 +2162,7 @@
2073
  "attributes": {}
2074
  }
2075
  },
2076
- "total_flos": 755277071908864.0,
2077
  "train_batch_size": 1,
2078
  "trial_name": null,
2079
  "trial_params": null
 
1
  {
2
+ "best_metric": 0.7593821287155151,
3
+ "best_model_checkpoint": "saves/CADICA_qwenvl_stenosis_classily_scale4_frozenVision/lora/sft/checkpoint-1200",
4
+ "epoch": 0.6180788050476436,
5
  "eval_steps": 50,
6
+ "global_step": 1200,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
 
2054
  "eval_steps_per_second": 0.937,
2055
  "num_input_tokens_seen": 13450720,
2056
  "step": 1150
2057
+ },
2058
+ {
2059
+ "epoch": 0.5949008498583569,
2060
+ "grad_norm": 1.5827740829243289,
2061
+ "learning_rate": 7.875623046909544e-05,
2062
+ "loss": 0.8168,
2063
+ "num_input_tokens_seen": 13509200,
2064
+ "step": 1155
2065
+ },
2066
+ {
2067
+ "epoch": 0.5974761782127221,
2068
+ "grad_norm": 2.344942216339615,
2069
+ "learning_rate": 7.855697251155967e-05,
2070
+ "loss": 0.7749,
2071
+ "num_input_tokens_seen": 13567656,
2072
+ "step": 1160
2073
+ },
2074
+ {
2075
+ "epoch": 0.6000515065670873,
2076
+ "grad_norm": 2.7313469239045305,
2077
+ "learning_rate": 7.835703917741212e-05,
2078
+ "loss": 0.9132,
2079
+ "num_input_tokens_seen": 13626136,
2080
+ "step": 1165
2081
+ },
2082
+ {
2083
+ "epoch": 0.6026268349214525,
2084
+ "grad_norm": 0.7410043911446527,
2085
+ "learning_rate": 7.81564351951057e-05,
2086
+ "loss": 0.8308,
2087
+ "num_input_tokens_seen": 13684608,
2088
+ "step": 1170
2089
+ },
2090
+ {
2091
+ "epoch": 0.6052021632758177,
2092
+ "grad_norm": 0.5628590604115411,
2093
+ "learning_rate": 7.795516530895414e-05,
2094
+ "loss": 0.8011,
2095
+ "num_input_tokens_seen": 13743080,
2096
+ "step": 1175
2097
+ },
2098
+ {
2099
+ "epoch": 0.6077774916301828,
2100
+ "grad_norm": 1.2008934424824649,
2101
+ "learning_rate": 7.775323427901993e-05,
2102
+ "loss": 0.8309,
2103
+ "num_input_tokens_seen": 13801552,
2104
+ "step": 1180
2105
+ },
2106
+ {
2107
+ "epoch": 0.610352819984548,
2108
+ "grad_norm": 1.2914156288367256,
2109
+ "learning_rate": 7.755064688100171e-05,
2110
+ "loss": 0.8089,
2111
+ "num_input_tokens_seen": 13860064,
2112
+ "step": 1185
2113
+ },
2114
+ {
2115
+ "epoch": 0.6129281483389132,
2116
+ "grad_norm": 1.420806774436513,
2117
+ "learning_rate": 7.734740790612136e-05,
2118
+ "loss": 0.8089,
2119
+ "num_input_tokens_seen": 13918552,
2120
+ "step": 1190
2121
+ },
2122
+ {
2123
+ "epoch": 0.6155034766932784,
2124
+ "grad_norm": 0.8352922832465102,
2125
+ "learning_rate": 7.714352216101055e-05,
2126
+ "loss": 0.8511,
2127
+ "num_input_tokens_seen": 13977056,
2128
+ "step": 1195
2129
+ },
2130
+ {
2131
+ "epoch": 0.6180788050476436,
2132
+ "grad_norm": 0.6321587989106885,
2133
+ "learning_rate": 7.693899446759727e-05,
2134
+ "loss": 0.8061,
2135
+ "num_input_tokens_seen": 14035544,
2136
+ "step": 1200
2137
+ },
2138
+ {
2139
+ "epoch": 0.6180788050476436,
2140
+ "eval_loss": 0.7593821287155151,
2141
+ "eval_runtime": 16.1368,
2142
+ "eval_samples_per_second": 3.718,
2143
+ "eval_steps_per_second": 0.93,
2144
+ "num_input_tokens_seen": 14035544,
2145
+ "step": 1200
2146
  }
2147
  ],
2148
  "logging_steps": 5,
2149
  "max_steps": 3400,
2150
+ "num_input_tokens_seen": 14035544,
2151
  "num_train_epochs": 2,
2152
  "save_steps": 50,
2153
  "stateful_callbacks": {
 
2162
  "attributes": {}
2163
  }
2164
  },
2165
+ "total_flos": 788117261910016.0,
2166
  "train_batch_size": 1,
2167
  "trial_name": null,
2168
  "trial_params": null