ben81828 commited on
Commit
5a046b9
·
verified ·
1 Parent(s): 77e1285

Training in progress, step 150, checkpoint

Browse files
last-checkpoint/adapter_model.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:54617ace9be9da2e4e57ae55a0f5329011112b5c8868c9fffa532ea69e4c2d98
3
  size 29034840
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:e1db5746fd65250f2bf6817d2de9266703177b7d0c3decef80aefa9773e3ca7a
3
  size 29034840
last-checkpoint/global_step150/bf16_zero_pp_rank_0_mp_rank_00_optim_states.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:e6af200421fe1d73ecd1d42ad1ed20ccf899c9f21305ebaefb140aada662a759
3
+ size 43429616
last-checkpoint/global_step150/bf16_zero_pp_rank_1_mp_rank_00_optim_states.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:be78431879fe26d398ef6cc8424c45ea0c0d43e6486b3290d03d1a823c1d996f
3
+ size 43429616
last-checkpoint/global_step150/bf16_zero_pp_rank_2_mp_rank_00_optim_states.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:61ebcbde783ef855f8a86fcbaaef684c07b0acc7cc73acc0425ea5522428c95d
3
+ size 43429616
last-checkpoint/global_step150/bf16_zero_pp_rank_3_mp_rank_00_optim_states.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:eb9a0b3a0e13aef6722af4cbff4275803aab00e1349f0db43f5927fc6d027d43
3
+ size 43429616
last-checkpoint/global_step150/zero_pp_rank_0_mp_rank_00_model_states.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:34f144f33b7ac763950c684990157dad4d1ee523ae43797b9a6b90a998a9d314
3
+ size 637299
last-checkpoint/global_step150/zero_pp_rank_1_mp_rank_00_model_states.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:d3e0249ea361e86670960dca0ca756a903dbb19568eef1a99e26ae12785a1d69
3
+ size 637171
last-checkpoint/global_step150/zero_pp_rank_2_mp_rank_00_model_states.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:82afbf3944b11f68d9499cbaa773c1fcc29e1a0e9b4eb333a671ffa88a5222fe
3
+ size 637171
last-checkpoint/global_step150/zero_pp_rank_3_mp_rank_00_model_states.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:4a0caccf9e31a439cb573c5600a0e4625f1ff654d897f43ccfa9da9d8591fe0e
3
+ size 637171
last-checkpoint/latest CHANGED
@@ -1 +1 @@
1
- global_step100
 
1
+ global_step150
last-checkpoint/rng_state_0.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:308f94f9a5c24e1bad5c393d56ae7af7782600f4e791d9c6ac35b22fff2105b6
3
  size 15024
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:70cc56408014c410353d4dd58ae9b03f4be043f5f800324f66fd8e20e99b840e
3
  size 15024
last-checkpoint/rng_state_1.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:b056f3c23cb32dc77a2ec9e7651e0b64e4440e21f0fdf969b86bfc56a1cbdf06
3
  size 15024
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:49d1438e98cc9c53a6852464635ce62e9788e61eb3646b73e33813f487c4b6ae
3
  size 15024
last-checkpoint/rng_state_2.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:f3f8a05714bc528f4885a2816181652f2303b3e8150f89b56aaee6bec56aa520
3
  size 15024
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:4388add9cec90932f8ff0100d27a0574d98e1bad52ff89d44e31967d2b4fbfde
3
  size 15024
last-checkpoint/rng_state_3.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:4f755bd3c330281961e5c03af9d10ce8c1e1678619d384f6f1fd5fd7dce2ff50
3
  size 15024
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:a705d6dfaae4f2c1b4b2be6b25a6eb521ffae6fcba21cc1531e97b60037ed079
3
  size 15024
last-checkpoint/scheduler.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:4e451a9e086b06d7c667be8442b2115f5c088953bade0b625e61f2ce5c7fd404
3
  size 1064
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:c587ef55825cdfdbac47c29c3bd8a2996263c16833a5bd6aaa2fb014bea1e9d1
3
  size 1064
last-checkpoint/trainer_state.json CHANGED
@@ -1,9 +1,9 @@
1
  {
2
- "best_metric": 0.18663176894187927,
3
- "best_model_checkpoint": "saves/CADICA_qwenvl_direction_scale4/lora/sft/checkpoint-100",
4
- "epoch": 0.05150656708730363,
5
  "eval_steps": 50,
6
- "global_step": 100,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
@@ -185,11 +185,100 @@
185
  "eval_steps_per_second": 0.793,
186
  "num_input_tokens_seen": 998400,
187
  "step": 100
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
188
  }
189
  ],
190
  "logging_steps": 5,
191
  "max_steps": 3400,
192
- "num_input_tokens_seen": 998400,
193
  "num_train_epochs": 2,
194
  "save_steps": 50,
195
  "stateful_callbacks": {
@@ -204,7 +293,7 @@
204
  "attributes": {}
205
  }
206
  },
207
- "total_flos": 65824787529728.0,
208
  "train_batch_size": 1,
209
  "trial_name": null,
210
  "trial_params": null
 
1
  {
2
+ "best_metric": 0.09665286540985107,
3
+ "best_model_checkpoint": "saves/CADICA_qwenvl_direction_scale4/lora/sft/checkpoint-150",
4
+ "epoch": 0.07725985063095545,
5
  "eval_steps": 50,
6
+ "global_step": 150,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
 
185
  "eval_steps_per_second": 0.793,
186
  "num_input_tokens_seen": 998400,
187
  "step": 100
188
+ },
189
+ {
190
+ "epoch": 0.05408189544166881,
191
+ "grad_norm": 2.2181531422996716,
192
+ "learning_rate": 6.176470588235295e-05,
193
+ "loss": 0.168,
194
+ "num_input_tokens_seen": 1048320,
195
+ "step": 105
196
+ },
197
+ {
198
+ "epoch": 0.056657223796033995,
199
+ "grad_norm": 3.1829920225573236,
200
+ "learning_rate": 6.470588235294118e-05,
201
+ "loss": 0.0709,
202
+ "num_input_tokens_seen": 1098240,
203
+ "step": 110
204
+ },
205
+ {
206
+ "epoch": 0.05923255215039917,
207
+ "grad_norm": 4.337350477588576,
208
+ "learning_rate": 6.764705882352942e-05,
209
+ "loss": 0.1609,
210
+ "num_input_tokens_seen": 1148160,
211
+ "step": 115
212
+ },
213
+ {
214
+ "epoch": 0.06180788050476436,
215
+ "grad_norm": 2.1010046045637365,
216
+ "learning_rate": 7.058823529411765e-05,
217
+ "loss": 0.0354,
218
+ "num_input_tokens_seen": 1198080,
219
+ "step": 120
220
+ },
221
+ {
222
+ "epoch": 0.06438320885912954,
223
+ "grad_norm": 2.232308844812103,
224
+ "learning_rate": 7.352941176470589e-05,
225
+ "loss": 0.1133,
226
+ "num_input_tokens_seen": 1248000,
227
+ "step": 125
228
+ },
229
+ {
230
+ "epoch": 0.06695853721349472,
231
+ "grad_norm": 5.641631090993415,
232
+ "learning_rate": 7.647058823529411e-05,
233
+ "loss": 0.0867,
234
+ "num_input_tokens_seen": 1297920,
235
+ "step": 130
236
+ },
237
+ {
238
+ "epoch": 0.0695338655678599,
239
+ "grad_norm": 1.5031437609685787,
240
+ "learning_rate": 7.941176470588235e-05,
241
+ "loss": 0.1352,
242
+ "num_input_tokens_seen": 1347840,
243
+ "step": 135
244
+ },
245
+ {
246
+ "epoch": 0.07210919392222509,
247
+ "grad_norm": 3.2992644431188465,
248
+ "learning_rate": 8.23529411764706e-05,
249
+ "loss": 0.101,
250
+ "num_input_tokens_seen": 1397760,
251
+ "step": 140
252
+ },
253
+ {
254
+ "epoch": 0.07468452227659027,
255
+ "grad_norm": 3.494236832758233,
256
+ "learning_rate": 8.529411764705883e-05,
257
+ "loss": 0.0334,
258
+ "num_input_tokens_seen": 1447680,
259
+ "step": 145
260
+ },
261
+ {
262
+ "epoch": 0.07725985063095545,
263
+ "grad_norm": 0.0602113869322109,
264
+ "learning_rate": 8.823529411764706e-05,
265
+ "loss": 0.0667,
266
+ "num_input_tokens_seen": 1497600,
267
+ "step": 150
268
+ },
269
+ {
270
+ "epoch": 0.07725985063095545,
271
+ "eval_loss": 0.09665286540985107,
272
+ "eval_runtime": 19.2745,
273
+ "eval_samples_per_second": 3.113,
274
+ "eval_steps_per_second": 0.778,
275
+ "num_input_tokens_seen": 1497600,
276
+ "step": 150
277
  }
278
  ],
279
  "logging_steps": 5,
280
  "max_steps": 3400,
281
+ "num_input_tokens_seen": 1497600,
282
  "num_train_epochs": 2,
283
  "save_steps": 50,
284
  "stateful_callbacks": {
 
293
  "attributes": {}
294
  }
295
  },
296
+ "total_flos": 98764767100928.0,
297
  "train_batch_size": 1,
298
  "trial_name": null,
299
  "trial_params": null