ben81828 commited on
Commit
264d194
·
verified ·
1 Parent(s): abecdf0

Training in progress, step 250, checkpoint

Browse files
last-checkpoint/adapter_model.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:381d89dd7c1135d0334349b9f49a436d03516c244b2ccee7000cd0fdc6d88805
3
  size 29034840
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:ea0e59edded26f764540e30e37224e6309622e9f0d8713fac942b2956b39deb3
3
  size 29034840
last-checkpoint/global_step250/bf16_zero_pp_rank_0_mp_rank_00_optim_states.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:d8521d89f5d951cd266e5a0ad2c17de7b69aaa3e87e321317eea95aa55a91603
3
+ size 43429616
last-checkpoint/global_step250/bf16_zero_pp_rank_1_mp_rank_00_optim_states.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:238b9e16d8923db2d6c50269fdc6c44d42412382c52e25374eec633be975b99d
3
+ size 43429616
last-checkpoint/global_step250/bf16_zero_pp_rank_2_mp_rank_00_optim_states.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:13d3ffa108ad1c07e8075652d5b94d4eedcb26e11c0d50073c8aa775a0789168
3
+ size 43429616
last-checkpoint/global_step250/bf16_zero_pp_rank_3_mp_rank_00_optim_states.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:7e9f9a85f8e966a5ca76f86099edc9d961561b0203f32181b5c873488556f321
3
+ size 43429616
last-checkpoint/global_step250/zero_pp_rank_0_mp_rank_00_model_states.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:5e80b196c408fdbb93be47104d2e2ebb39806a8e2f671eb97b21520c1760537b
3
+ size 637299
last-checkpoint/global_step250/zero_pp_rank_1_mp_rank_00_model_states.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:50783d840386ee88fb64fd6d7bb5cc3c88c65818b6030a5cb92b901026aa8a7b
3
+ size 637171
last-checkpoint/global_step250/zero_pp_rank_2_mp_rank_00_model_states.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:f8a9975bb0ef92a03508b158d7fea0b886e853ff0adb21cca084bd76368bdf9a
3
+ size 637171
last-checkpoint/global_step250/zero_pp_rank_3_mp_rank_00_model_states.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:76693721fccb62522fa35aaa24f5d6f23796a4391cc1cbb825c3e42813ab3ab8
3
+ size 637171
last-checkpoint/latest CHANGED
@@ -1 +1 @@
1
- global_step200
 
1
+ global_step250
last-checkpoint/rng_state_0.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:5c5e18f922d0af74d820247ae97bee506ab412554a58345ddf2558abc94ee3e3
3
  size 15024
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:418a5f105ae834c3075024076916b2a9475918fe034c12d0dd5b6d91f1aba467
3
  size 15024
last-checkpoint/rng_state_1.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:2a2dcca6d9741f46592359768ea2212b9321da6408d1fd7d3a80b017bf37f434
3
  size 15024
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:6e07ace389d24bc1307b74f42a1e7b8f0117b0db853e2df64ff3f15cb92916a2
3
  size 15024
last-checkpoint/rng_state_2.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:69420ece2c255923c5cbb3c6c9c4a6b9cb38fb57e5d3033c8b7d436a1faf6f13
3
  size 15024
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:da6a990f346d7014dffb28fa2bc7d3b890bd3c53712503fce3656da48d3d6e50
3
  size 15024
last-checkpoint/rng_state_3.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:66f278b40a1e23b88a657c4e5d03afa8dbbbe14dfeb16f6b4beedaece6cdd0b9
3
  size 15024
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:e95f356ca38179b05993f55daece0223e96fa10b9a1b9ea2102a739211333f63
3
  size 15024
last-checkpoint/scheduler.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:fdde69e82f45c9368fe627c2082d010eb77a8ceb0b2354f60caa089407a428a6
3
  size 1064
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:4d7e2043e09ec2b328d02f0638a57759f62a1b72350c1a7738b78953d31c6142
3
  size 1064
last-checkpoint/trainer_state.json CHANGED
@@ -1,9 +1,9 @@
1
  {
2
- "best_metric": 0.09665286540985107,
3
- "best_model_checkpoint": "saves/CADICA_qwenvl_direction_scale4/lora/sft/checkpoint-150",
4
- "epoch": 0.10301313417460727,
5
  "eval_steps": 50,
6
- "global_step": 200,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
@@ -363,11 +363,100 @@
363
  "eval_steps_per_second": 0.779,
364
  "num_input_tokens_seen": 1996800,
365
  "step": 200
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
366
  }
367
  ],
368
  "logging_steps": 5,
369
  "max_steps": 3400,
370
- "num_input_tokens_seen": 1996800,
371
  "num_train_epochs": 2,
372
  "save_steps": 50,
373
  "stateful_callbacks": {
@@ -382,7 +471,7 @@
382
  "attributes": {}
383
  }
384
  },
385
- "total_flos": 131704746672128.0,
386
  "train_batch_size": 1,
387
  "trial_name": null,
388
  "trial_params": null
 
1
  {
2
+ "best_metric": 0.055875860154628754,
3
+ "best_model_checkpoint": "saves/CADICA_qwenvl_direction_scale4/lora/sft/checkpoint-250",
4
+ "epoch": 0.12876641771825909,
5
  "eval_steps": 50,
6
+ "global_step": 250,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
 
363
  "eval_steps_per_second": 0.779,
364
  "num_input_tokens_seen": 1996800,
365
  "step": 200
366
+ },
367
+ {
368
+ "epoch": 0.10558846252897244,
369
+ "grad_norm": 1.0969393658164421,
370
+ "learning_rate": 9.997103130994296e-05,
371
+ "loss": 0.0539,
372
+ "num_input_tokens_seen": 2046720,
373
+ "step": 205
374
+ },
375
+ {
376
+ "epoch": 0.10816379088333762,
377
+ "grad_norm": 2.885869194934028,
378
+ "learning_rate": 9.996216446216267e-05,
379
+ "loss": 0.0654,
380
+ "num_input_tokens_seen": 2096640,
381
+ "step": 210
382
+ },
383
+ {
384
+ "epoch": 0.11073911923770281,
385
+ "grad_norm": 0.5225257245731217,
386
+ "learning_rate": 9.995211600182397e-05,
387
+ "loss": 0.0316,
388
+ "num_input_tokens_seen": 2146560,
389
+ "step": 215
390
+ },
391
+ {
392
+ "epoch": 0.11331444759206799,
393
+ "grad_norm": 2.1553510734212797,
394
+ "learning_rate": 9.994088616657444e-05,
395
+ "loss": 0.1169,
396
+ "num_input_tokens_seen": 2196480,
397
+ "step": 220
398
+ },
399
+ {
400
+ "epoch": 0.11588977594643317,
401
+ "grad_norm": 1.1133884703723633,
402
+ "learning_rate": 9.992847522200133e-05,
403
+ "loss": 0.0382,
404
+ "num_input_tokens_seen": 2246400,
405
+ "step": 225
406
+ },
407
+ {
408
+ "epoch": 0.11846510430079835,
409
+ "grad_norm": 0.8875243341616034,
410
+ "learning_rate": 9.99148834616253e-05,
411
+ "loss": 0.0406,
412
+ "num_input_tokens_seen": 2296320,
413
+ "step": 230
414
+ },
415
+ {
416
+ "epoch": 0.12104043265516354,
417
+ "grad_norm": 1.81283533812695,
418
+ "learning_rate": 9.990011120689351e-05,
419
+ "loss": 0.0182,
420
+ "num_input_tokens_seen": 2346240,
421
+ "step": 235
422
+ },
423
+ {
424
+ "epoch": 0.12361576100952872,
425
+ "grad_norm": 3.873083258671571,
426
+ "learning_rate": 9.988415880717194e-05,
427
+ "loss": 0.0881,
428
+ "num_input_tokens_seen": 2396160,
429
+ "step": 240
430
+ },
431
+ {
432
+ "epoch": 0.1261910893638939,
433
+ "grad_norm": 3.427761103620865,
434
+ "learning_rate": 9.986702663973722e-05,
435
+ "loss": 0.0565,
436
+ "num_input_tokens_seen": 2446080,
437
+ "step": 245
438
+ },
439
+ {
440
+ "epoch": 0.12876641771825909,
441
+ "grad_norm": 1.531943599765959,
442
+ "learning_rate": 9.98487151097676e-05,
443
+ "loss": 0.0805,
444
+ "num_input_tokens_seen": 2496000,
445
+ "step": 250
446
+ },
447
+ {
448
+ "epoch": 0.12876641771825909,
449
+ "eval_loss": 0.055875860154628754,
450
+ "eval_runtime": 19.5106,
451
+ "eval_samples_per_second": 3.075,
452
+ "eval_steps_per_second": 0.769,
453
+ "num_input_tokens_seen": 2496000,
454
+ "step": 250
455
  }
456
  ],
457
  "logging_steps": 5,
458
  "max_steps": 3400,
459
+ "num_input_tokens_seen": 2496000,
460
  "num_train_epochs": 2,
461
  "save_steps": 50,
462
  "stateful_callbacks": {
 
471
  "attributes": {}
472
  }
473
  },
474
+ "total_flos": 164644726243328.0,
475
  "train_batch_size": 1,
476
  "trial_name": null,
477
  "trial_params": null