ben81828 commited on
Commit
f502a61
·
verified ·
1 Parent(s): d40acc8

Training in progress, step 1400, checkpoint

Browse files
last-checkpoint/adapter_model.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:c5539aab9f718348f04ec41613d234e7bd892b381194398a0eadf2adcfde0ddb
3
  size 29034840
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:e0eaf3c3fbb7e51224bc7a40e841d98120e282fdcc24346a2251f02af1f31442
3
  size 29034840
last-checkpoint/global_step1400/bf16_zero_pp_rank_0_mp_rank_00_optim_states.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:f2e342082d4542a64d191aa926861afcf05b77eaceea1f30ac3fc9550d2dd105
3
+ size 43429616
last-checkpoint/global_step1400/bf16_zero_pp_rank_1_mp_rank_00_optim_states.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:502251206a24a0a024f81c30de85799cd1c6b914f9e685359411506498a96167
3
+ size 43429616
last-checkpoint/global_step1400/bf16_zero_pp_rank_2_mp_rank_00_optim_states.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:e212f331b2681aab216c5852744fb0215a5ced4ea0ede628b65d8da873c9ed62
3
+ size 43429616
last-checkpoint/global_step1400/bf16_zero_pp_rank_3_mp_rank_00_optim_states.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:07b3b2f0c4e540817d85bff704f6250658a8e37e6d5762cb79853b83c1eb15c0
3
+ size 43429616
last-checkpoint/global_step1400/zero_pp_rank_0_mp_rank_00_model_states.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:1906adf1b95058095138f9b57795e6e0826a899c9c7e5a525c8eae36ecaa15f4
3
+ size 637299
last-checkpoint/global_step1400/zero_pp_rank_1_mp_rank_00_model_states.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:1a0bb562939d112dd834c43097bc2d1ef141d083d45e4138023cb41209e8c874
3
+ size 637171
last-checkpoint/global_step1400/zero_pp_rank_2_mp_rank_00_model_states.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:3eb2bc0e5fa21a633543ff99a5a9ea2924b55031a219c0558ebd60e5ccef7d0e
3
+ size 637171
last-checkpoint/global_step1400/zero_pp_rank_3_mp_rank_00_model_states.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:336e83dddf7ea2bb6e2ee90e33a484acff5b821f73e635c4aaaa44a24086519e
3
+ size 637171
last-checkpoint/latest CHANGED
@@ -1 +1 @@
1
- global_step1350
 
1
+ global_step1400
last-checkpoint/rng_state_0.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:9747fe881253e52a47314f48068ef9649032bec4cb284b1b4becbb8787f37faa
3
  size 15024
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:fe66a68e61de2221b30fd9749bc68b45a1474bb2cc95901bca9557ac87909355
3
  size 15024
last-checkpoint/rng_state_1.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:3ac02a5554a5ef9e3473dcd2926626ae41f4777354859c7d2bf0a0c1188c0583
3
  size 15024
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:0cd4f3162e46c3bb0f1fc4d3c52c7c33e60f56764458e0c8a73c3810b0a25f8c
3
  size 15024
last-checkpoint/rng_state_2.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:9405d230cc78dac3f3b2ab887674631c15f66fedab0042ab7bc1bd83b8575344
3
  size 15024
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:185cc99aaa81b1b49b3ddc74aa6f97aa3036330983a7b69d52bd191057f9a5d5
3
  size 15024
last-checkpoint/rng_state_3.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:b15237547030ac62d49d70a5465b2e29515e6334f62416eb16c0c6d073f7c6bf
3
  size 15024
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:0e37403c30cb4309e54e5defdde1906486716fc859274035d44aaac5d48a97ba
3
  size 15024
last-checkpoint/scheduler.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:5473741dbc1be4510ceec76e5e3cd10aedf1c7667da998c551f8978b6c6c33d3
3
  size 1064
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:9eb4a31de57d35a9d36a648c17d1ff1a00be01f744b83cf70f63cae24d418555
3
  size 1064
last-checkpoint/trainer_state.json CHANGED
@@ -1,9 +1,9 @@
1
  {
2
  "best_metric": 0.2966395914554596,
3
  "best_model_checkpoint": "saves/CADICA_qwenvl_direction_then_DetectAndClassify_scale6/lora/sft/checkpoint-1200",
4
- "epoch": 0.3988183161004431,
5
  "eval_steps": 50,
6
- "global_step": 1350,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
@@ -2410,11 +2410,100 @@
2410
  "eval_steps_per_second": 0.777,
2411
  "num_input_tokens_seen": 14014736,
2412
  "step": 1350
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2413
  }
2414
  ],
2415
  "logging_steps": 5,
2416
  "max_steps": 6770,
2417
- "num_input_tokens_seen": 14014736,
2418
  "num_train_epochs": 2,
2419
  "save_steps": 50,
2420
  "stateful_callbacks": {
@@ -2429,7 +2518,7 @@
2429
  "attributes": {}
2430
  }
2431
  },
2432
- "total_flos": 924522107633664.0,
2433
  "train_batch_size": 1,
2434
  "trial_name": null,
2435
  "trial_params": null
 
1
  {
2
  "best_metric": 0.2966395914554596,
3
  "best_model_checkpoint": "saves/CADICA_qwenvl_direction_then_DetectAndClassify_scale6/lora/sft/checkpoint-1200",
4
+ "epoch": 0.413589364844904,
5
  "eval_steps": 50,
6
+ "global_step": 1400,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
 
2410
  "eval_steps_per_second": 0.777,
2411
  "num_input_tokens_seen": 14014736,
2412
  "step": 1350
2413
+ },
2414
+ {
2415
+ "epoch": 0.40029542097488924,
2416
+ "grad_norm": 2.292705535408954,
2417
+ "learning_rate": 9.396695542455704e-05,
2418
+ "loss": 0.4115,
2419
+ "num_input_tokens_seen": 14066880,
2420
+ "step": 1355
2421
+ },
2422
+ {
2423
+ "epoch": 0.4017725258493353,
2424
+ "grad_norm": 10.68072230240614,
2425
+ "learning_rate": 9.390866796969577e-05,
2426
+ "loss": 0.365,
2427
+ "num_input_tokens_seen": 14118320,
2428
+ "step": 1360
2429
+ },
2430
+ {
2431
+ "epoch": 0.4032496307237814,
2432
+ "grad_norm": 28.45565288311722,
2433
+ "learning_rate": 9.385011855616177e-05,
2434
+ "loss": 0.3904,
2435
+ "num_input_tokens_seen": 14169208,
2436
+ "step": 1365
2437
+ },
2438
+ {
2439
+ "epoch": 0.40472673559822747,
2440
+ "grad_norm": 9.32794663574214,
2441
+ "learning_rate": 9.379130753326021e-05,
2442
+ "loss": 0.5425,
2443
+ "num_input_tokens_seen": 14220632,
2444
+ "step": 1370
2445
+ },
2446
+ {
2447
+ "epoch": 0.40620384047267355,
2448
+ "grad_norm": 4.737143544435888,
2449
+ "learning_rate": 9.373223525185709e-05,
2450
+ "loss": 0.3985,
2451
+ "num_input_tokens_seen": 14272640,
2452
+ "step": 1375
2453
+ },
2454
+ {
2455
+ "epoch": 0.4076809453471196,
2456
+ "grad_norm": 17.480173613134482,
2457
+ "learning_rate": 9.367290206437702e-05,
2458
+ "loss": 0.3528,
2459
+ "num_input_tokens_seen": 14324960,
2460
+ "step": 1380
2461
+ },
2462
+ {
2463
+ "epoch": 0.4091580502215657,
2464
+ "grad_norm": 4.40598964753602,
2465
+ "learning_rate": 9.361330832480124e-05,
2466
+ "loss": 0.3687,
2467
+ "num_input_tokens_seen": 14376792,
2468
+ "step": 1385
2469
+ },
2470
+ {
2471
+ "epoch": 0.41063515509601184,
2472
+ "grad_norm": 15.961709998187562,
2473
+ "learning_rate": 9.355345438866538e-05,
2474
+ "loss": 0.3552,
2475
+ "num_input_tokens_seen": 14428192,
2476
+ "step": 1390
2477
+ },
2478
+ {
2479
+ "epoch": 0.4121122599704579,
2480
+ "grad_norm": 4.033485652398453,
2481
+ "learning_rate": 9.349334061305743e-05,
2482
+ "loss": 0.3194,
2483
+ "num_input_tokens_seen": 14480568,
2484
+ "step": 1395
2485
+ },
2486
+ {
2487
+ "epoch": 0.413589364844904,
2488
+ "grad_norm": 9.187315388235644,
2489
+ "learning_rate": 9.343296735661557e-05,
2490
+ "loss": 0.388,
2491
+ "num_input_tokens_seen": 14532288,
2492
+ "step": 1400
2493
+ },
2494
+ {
2495
+ "epoch": 0.413589364844904,
2496
+ "eval_loss": 0.38656601309776306,
2497
+ "eval_runtime": 19.1495,
2498
+ "eval_samples_per_second": 3.133,
2499
+ "eval_steps_per_second": 0.783,
2500
+ "num_input_tokens_seen": 14532288,
2501
+ "step": 1400
2502
  }
2503
  ],
2504
  "logging_steps": 5,
2505
  "max_steps": 6770,
2506
+ "num_input_tokens_seen": 14532288,
2507
  "num_train_epochs": 2,
2508
  "save_steps": 50,
2509
  "stateful_callbacks": {
 
2518
  "attributes": {}
2519
  }
2520
  },
2521
+ "total_flos": 958658687795200.0,
2522
  "train_batch_size": 1,
2523
  "trial_name": null,
2524
  "trial_params": null