ben81828 commited on
Commit
270d1e8
·
verified ·
1 Parent(s): 41d06c9

Training in progress, step 700, checkpoint

Browse files
last-checkpoint/adapter_model.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:3ae4ae60a26ee99e11a2e5ab6fb595036c6ed6a11a5c18815e41835fd861d1bc
3
  size 18516456
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:cb5f3724b5d59ea4e0a9371b33e20124137112f04ac962fd4bc5e834b083db05
3
  size 18516456
last-checkpoint/global_step700/bf16_zero_pp_rank_0_mp_rank_00_optim_states.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:959a125ff18cd438ec4c80e2c5c95463a0446d6e8fccc4f0e5522dd9314fca8f
3
+ size 27700976
last-checkpoint/global_step700/bf16_zero_pp_rank_1_mp_rank_00_optim_states.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:08682e5ba6f3c3889ec79756688eb2a1e839e2e0b2d8d9b28484ef9db0d1b829
3
+ size 27700976
last-checkpoint/global_step700/bf16_zero_pp_rank_2_mp_rank_00_optim_states.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:2b29ae75df4490c491c4301676ec1b8d1154fed1e0a211aa5943a6f16f309cd9
3
+ size 27700976
last-checkpoint/global_step700/bf16_zero_pp_rank_3_mp_rank_00_optim_states.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:c9f755f55700d124eeb27776705e0e11433122a60b2aa0650a5420a931faeaf9
3
+ size 27700976
last-checkpoint/global_step700/zero_pp_rank_0_mp_rank_00_model_states.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:e217f19544c300bb2d3991b8a514f4a179cf76123f6edfd5999fba2e2243a4c8
3
+ size 411571
last-checkpoint/global_step700/zero_pp_rank_1_mp_rank_00_model_states.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:4368d7486d6127b13167c50cbc627b3993283a681daf72643efce836dd5d545a
3
+ size 411507
last-checkpoint/global_step700/zero_pp_rank_2_mp_rank_00_model_states.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:297e66e1bbff69bd1001231f461fc0448f550fb8d4cbe1bafd584eac352ee3d0
3
+ size 411507
last-checkpoint/global_step700/zero_pp_rank_3_mp_rank_00_model_states.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:59cbd2781ce6dd216ab1f908839e903ed36677341205d2403154d114808faa0e
3
+ size 411507
last-checkpoint/latest CHANGED
@@ -1 +1 @@
1
- global_step650
 
1
+ global_step700
last-checkpoint/rng_state_0.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:8044e4c53158c210a17648ba8f2dc2d25a25bbfc55f686015542618eb652a33e
3
  size 15024
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:e7d74de51245105e1fbf57a6707ef3538b353952485508f6e2f8f74dc5d479d4
3
  size 15024
last-checkpoint/rng_state_1.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:4cd85d7fa425e7888c973f1c2985ac15ca21b5e6171fe140a401c2bc75ca46ff
3
  size 15024
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:0617c9eb6cf7df57b2e0bb53cfe17c05f0910de56fe5b14427fe39ab54a44782
3
  size 15024
last-checkpoint/rng_state_2.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:d7915667371a58f1598639e0d1c20a0c59c783c14580cd040a6631eb4ea2311e
3
  size 15024
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:ed68a365057022897d9645ee60902a77102f43215dcdf2ddd5d3842b6a8446d8
3
  size 15024
last-checkpoint/rng_state_3.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:35dd78929ad7f0fbf37fdb1284e8edf0424350f6e6ce1cd5a3ee78979af3d3cb
3
  size 15024
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:63ebaa0c302cadbdfcd9f8ee2289e35ecf9c9fc8c9968fc0c05f100dac20c6b9
3
  size 15024
last-checkpoint/scheduler.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:8cd94ecf5c982ee0e060d3e07a575ce03dc3b0f289b5e32a1f65d3b6366a8a0e
3
  size 1064
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:9089a2e73aa73e2c09752311bdfa67c1ed286ab83bb4bc61dbf851e8193bb593
3
  size 1064
last-checkpoint/trainer_state.json CHANGED
@@ -1,9 +1,9 @@
1
  {
2
  "best_metric": 0.8908902406692505,
3
  "best_model_checkpoint": "saves/CADICA_qwenvl_stenosis_classily_scale4_frozenVision/lora/sft/checkpoint-300",
4
- "epoch": 0.3347926860674736,
5
  "eval_steps": 50,
6
- "global_step": 650,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
@@ -1164,11 +1164,100 @@
1164
  "eval_steps_per_second": 0.929,
1165
  "num_input_tokens_seen": 7602512,
1166
  "step": 650
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1167
  }
1168
  ],
1169
  "logging_steps": 5,
1170
  "max_steps": 3400,
1171
- "num_input_tokens_seen": 7602512,
1172
  "num_train_epochs": 2,
1173
  "save_steps": 50,
1174
  "stateful_callbacks": {
@@ -1183,7 +1272,7 @@
1183
  "attributes": {}
1184
  }
1185
  },
1186
- "total_flos": 426870884270080.0,
1187
  "train_batch_size": 1,
1188
  "trial_name": null,
1189
  "trial_params": null
 
1
  {
2
  "best_metric": 0.8908902406692505,
3
  "best_model_checkpoint": "saves/CADICA_qwenvl_stenosis_classily_scale4_frozenVision/lora/sft/checkpoint-300",
4
+ "epoch": 0.3605459696111254,
5
  "eval_steps": 50,
6
+ "global_step": 700,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
 
1164
  "eval_steps_per_second": 0.929,
1165
  "num_input_tokens_seen": 7602512,
1166
  "step": 650
1167
+ },
1168
+ {
1169
+ "epoch": 0.3373680144218388,
1170
+ "grad_norm": 0.620349194493935,
1171
+ "learning_rate": 9.453928183013385e-05,
1172
+ "loss": 0.8929,
1173
+ "num_input_tokens_seen": 7660968,
1174
+ "step": 655
1175
+ },
1176
+ {
1177
+ "epoch": 0.33994334277620397,
1178
+ "grad_norm": 0.18611846349930314,
1179
+ "learning_rate": 9.442825912005202e-05,
1180
+ "loss": 0.9078,
1181
+ "num_input_tokens_seen": 7719448,
1182
+ "step": 660
1183
+ },
1184
+ {
1185
+ "epoch": 0.34251867113056916,
1186
+ "grad_norm": 0.4448289413172567,
1187
+ "learning_rate": 9.431618567508933e-05,
1188
+ "loss": 0.8963,
1189
+ "num_input_tokens_seen": 7777928,
1190
+ "step": 665
1191
+ },
1192
+ {
1193
+ "epoch": 0.34509399948493436,
1194
+ "grad_norm": 0.6187189362250411,
1195
+ "learning_rate": 9.420306414579925e-05,
1196
+ "loss": 0.9134,
1197
+ "num_input_tokens_seen": 7836424,
1198
+ "step": 670
1199
+ },
1200
+ {
1201
+ "epoch": 0.3476693278392995,
1202
+ "grad_norm": 0.35247743418537675,
1203
+ "learning_rate": 9.408889720752266e-05,
1204
+ "loss": 0.8984,
1205
+ "num_input_tokens_seen": 7894904,
1206
+ "step": 675
1207
+ },
1208
+ {
1209
+ "epoch": 0.3502446561936647,
1210
+ "grad_norm": 0.20652916455346712,
1211
+ "learning_rate": 9.397368756032445e-05,
1212
+ "loss": 0.8997,
1213
+ "num_input_tokens_seen": 7953432,
1214
+ "step": 680
1215
+ },
1216
+ {
1217
+ "epoch": 0.3528199845480299,
1218
+ "grad_norm": 0.4289996063998063,
1219
+ "learning_rate": 9.385743792892982e-05,
1220
+ "loss": 0.8926,
1221
+ "num_input_tokens_seen": 8011888,
1222
+ "step": 685
1223
+ },
1224
+ {
1225
+ "epoch": 0.35539531290239507,
1226
+ "grad_norm": 0.13764054506536547,
1227
+ "learning_rate": 9.374015106265968e-05,
1228
+ "loss": 0.9008,
1229
+ "num_input_tokens_seen": 8070344,
1230
+ "step": 690
1231
+ },
1232
+ {
1233
+ "epoch": 0.35797064125676026,
1234
+ "grad_norm": 0.22142459689499855,
1235
+ "learning_rate": 9.362182973536569e-05,
1236
+ "loss": 0.8986,
1237
+ "num_input_tokens_seen": 8128816,
1238
+ "step": 695
1239
+ },
1240
+ {
1241
+ "epoch": 0.3605459696111254,
1242
+ "grad_norm": 0.3234539650829873,
1243
+ "learning_rate": 9.35024767453647e-05,
1244
+ "loss": 0.8972,
1245
+ "num_input_tokens_seen": 8187320,
1246
+ "step": 700
1247
+ },
1248
+ {
1249
+ "epoch": 0.3605459696111254,
1250
+ "eval_loss": 0.9028835892677307,
1251
+ "eval_runtime": 16.1635,
1252
+ "eval_samples_per_second": 3.712,
1253
+ "eval_steps_per_second": 0.928,
1254
+ "num_input_tokens_seen": 8187320,
1255
+ "step": 700
1256
  }
1257
  ],
1258
  "logging_steps": 5,
1259
  "max_steps": 3400,
1260
+ "num_input_tokens_seen": 8187320,
1261
  "num_train_epochs": 2,
1262
  "save_steps": 50,
1263
  "stateful_callbacks": {
 
1272
  "attributes": {}
1273
  }
1274
  },
1275
+ "total_flos": 459709441638400.0,
1276
  "train_batch_size": 1,
1277
  "trial_name": null,
1278
  "trial_params": null