ben81828 commited on
Commit
9936092
·
verified ·
1 Parent(s): 21d6b42

Training in progress, step 1500, checkpoint

Browse files
last-checkpoint/adapter_model.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:e95045ab82949901554f82423b881753088f0bf319f5b2eb855236faa8d4d90c
3
  size 29034840
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:6289479e907da413d0601e67681f7846ac9804602b3a5c4cb53469f8420fd060
3
  size 29034840
last-checkpoint/global_step1500/bf16_zero_pp_rank_0_mp_rank_00_optim_states.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:d32c3db9bd47d3fd301d19afb9e65be568f162a529e5b395d56b562bff90f3fa
3
+ size 43429616
last-checkpoint/global_step1500/bf16_zero_pp_rank_1_mp_rank_00_optim_states.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:16e95b6c551561f8e28d2d5cae23096c9d3ed6b6aa830cd5d690389680532849
3
+ size 43429616
last-checkpoint/global_step1500/bf16_zero_pp_rank_2_mp_rank_00_optim_states.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:d7ecafa7b19a9899877a2ee1cf62b924940aea99ddbe0a2e41c1d2da49d51d50
3
+ size 43429616
last-checkpoint/global_step1500/bf16_zero_pp_rank_3_mp_rank_00_optim_states.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:999e1eaa71432819202d4d7c2c587bfe6e38a266275c2be56de7c4e6781a2677
3
+ size 43429616
last-checkpoint/global_step1500/zero_pp_rank_0_mp_rank_00_model_states.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:e26a054806a08fcc4699bcbb4ba16ad8b8ce9bd251f3f4f2ddea66428a6514f3
3
+ size 637299
last-checkpoint/global_step1500/zero_pp_rank_1_mp_rank_00_model_states.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:a0e6e56cb084d84e264a8def4093445029bf6f220c440facaf6f823c4ff482e1
3
+ size 637171
last-checkpoint/global_step1500/zero_pp_rank_2_mp_rank_00_model_states.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:dd13d26848f42e1fe0b5c0bc25ada85bb5a6562b98a05e03d77b9a1bb5695c65
3
+ size 637171
last-checkpoint/global_step1500/zero_pp_rank_3_mp_rank_00_model_states.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:5dca9d87505fd347b5f4b1f42a713081819b869c0819eaac51f4120208f87027
3
+ size 637171
last-checkpoint/latest CHANGED
@@ -1 +1 @@
1
- global_step1450
 
1
+ global_step1500
last-checkpoint/rng_state_0.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:dd4f3298d54e9509917373bcd359e11c92a8e0aa77b2cc0825602efd186ad77e
3
  size 15024
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:dd3566049ad1f65d2f434d990deb65584d2b2dcb1aac8e89c68ea37dc533eab7
3
  size 15024
last-checkpoint/rng_state_1.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:5fdab02a791039ff46df83a272c972ab0f366fcea91338790dc016dbbbf6de80
3
  size 15024
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:2f96a47dc4176412bc893ccb49c004c8fa1cc8c306d67689d87ed20944233c62
3
  size 15024
last-checkpoint/rng_state_2.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:e2808762a5f80587f05c1e2e8de26b5aef1010331cea366649319d593dbb0e66
3
  size 15024
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:ebd9fa3db1079ccf750b71f4eeedbf1f04422fc748026a4b866afe133f9fbfd1
3
  size 15024
last-checkpoint/rng_state_3.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:c052112cbe3f325fd4543a02558091b80c9c8abcd8db8822be9a8306eab6f2b7
3
  size 15024
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:383e9e252cc8292eef0120c964bdc8033972e800c085c97c42af97379e6b4b5c
3
  size 15024
last-checkpoint/scheduler.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:e9dc71bb3c767f5227b4aaf564240b2395077bbb61533f057d89b79d0ef0db35
3
  size 1064
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:f31de0059cfffab00767f1a3eadee4b6350e0f39836feff5d5d9263e375ce9aa
3
  size 1064
last-checkpoint/trainer_state.json CHANGED
@@ -1,9 +1,9 @@
1
  {
2
  "best_metric": 0.2966395914554596,
3
  "best_model_checkpoint": "saves/CADICA_qwenvl_direction_then_DetectAndClassify_scale6/lora/sft/checkpoint-1200",
4
- "epoch": 0.42836041358936483,
5
  "eval_steps": 50,
6
- "global_step": 1450,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
@@ -2588,11 +2588,100 @@
2588
  "eval_steps_per_second": 0.775,
2589
  "num_input_tokens_seen": 15050992,
2590
  "step": 1450
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2591
  }
2592
  ],
2593
  "logging_steps": 5,
2594
  "max_steps": 6770,
2595
- "num_input_tokens_seen": 15050992,
2596
  "num_train_epochs": 2,
2597
  "save_steps": 50,
2598
  "stateful_callbacks": {
@@ -2607,7 +2696,7 @@
2607
  "attributes": {}
2608
  }
2609
  },
2610
- "total_flos": 992909032161280.0,
2611
  "train_batch_size": 1,
2612
  "trial_name": null,
2613
  "trial_params": null
 
1
  {
2
  "best_metric": 0.2966395914554596,
3
  "best_model_checkpoint": "saves/CADICA_qwenvl_direction_then_DetectAndClassify_scale6/lora/sft/checkpoint-1200",
4
+ "epoch": 0.4431314623338257,
5
  "eval_steps": 50,
6
+ "global_step": 1500,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
 
2588
  "eval_steps_per_second": 0.775,
2589
  "num_input_tokens_seen": 15050992,
2590
  "step": 1450
2591
+ },
2592
+ {
2593
+ "epoch": 0.4298375184638109,
2594
+ "grad_norm": 5.756692171427544,
2595
+ "learning_rate": 9.275183991651558e-05,
2596
+ "loss": 0.3253,
2597
+ "num_input_tokens_seen": 15103328,
2598
+ "step": 1455
2599
+ },
2600
+ {
2601
+ "epoch": 0.43131462333825704,
2602
+ "grad_norm": 48.99329970671894,
2603
+ "learning_rate": 9.268838142516943e-05,
2604
+ "loss": 0.3999,
2605
+ "num_input_tokens_seen": 15154640,
2606
+ "step": 1460
2607
+ },
2608
+ {
2609
+ "epoch": 0.4327917282127031,
2610
+ "grad_norm": 4.8123322290098764,
2611
+ "learning_rate": 9.262466825536782e-05,
2612
+ "loss": 0.3529,
2613
+ "num_input_tokens_seen": 15206264,
2614
+ "step": 1465
2615
+ },
2616
+ {
2617
+ "epoch": 0.4342688330871492,
2618
+ "grad_norm": 8.626202826826654,
2619
+ "learning_rate": 9.256070078722287e-05,
2620
+ "loss": 0.3363,
2621
+ "num_input_tokens_seen": 15258160,
2622
+ "step": 1470
2623
+ },
2624
+ {
2625
+ "epoch": 0.4357459379615953,
2626
+ "grad_norm": 24.191365994287917,
2627
+ "learning_rate": 9.249647940236385e-05,
2628
+ "loss": 0.4133,
2629
+ "num_input_tokens_seen": 15309224,
2630
+ "step": 1475
2631
+ },
2632
+ {
2633
+ "epoch": 0.43722304283604135,
2634
+ "grad_norm": 15.447487032462668,
2635
+ "learning_rate": 9.243200448393492e-05,
2636
+ "loss": 0.3306,
2637
+ "num_input_tokens_seen": 15361480,
2638
+ "step": 1480
2639
+ },
2640
+ {
2641
+ "epoch": 0.43870014771048743,
2642
+ "grad_norm": 2.714376790067382,
2643
+ "learning_rate": 9.236727641659277e-05,
2644
+ "loss": 0.2993,
2645
+ "num_input_tokens_seen": 15414680,
2646
+ "step": 1485
2647
+ },
2648
+ {
2649
+ "epoch": 0.4401772525849335,
2650
+ "grad_norm": 11.85875780519428,
2651
+ "learning_rate": 9.230229558650442e-05,
2652
+ "loss": 0.3324,
2653
+ "num_input_tokens_seen": 15466552,
2654
+ "step": 1490
2655
+ },
2656
+ {
2657
+ "epoch": 0.44165435745937964,
2658
+ "grad_norm": 7.338287245679936,
2659
+ "learning_rate": 9.223706238134485e-05,
2660
+ "loss": 0.2615,
2661
+ "num_input_tokens_seen": 15519472,
2662
+ "step": 1495
2663
+ },
2664
+ {
2665
+ "epoch": 0.4431314623338257,
2666
+ "grad_norm": 11.54360838796349,
2667
+ "learning_rate": 9.217157719029469e-05,
2668
+ "loss": 0.2928,
2669
+ "num_input_tokens_seen": 15572048,
2670
+ "step": 1500
2671
+ },
2672
+ {
2673
+ "epoch": 0.4431314623338257,
2674
+ "eval_loss": 0.40494996309280396,
2675
+ "eval_runtime": 19.2568,
2676
+ "eval_samples_per_second": 3.116,
2677
+ "eval_steps_per_second": 0.779,
2678
+ "num_input_tokens_seen": 15572048,
2679
+ "step": 1500
2680
  }
2681
  ],
2682
  "logging_steps": 5,
2683
  "max_steps": 6770,
2684
+ "num_input_tokens_seen": 15572048,
2685
  "num_train_epochs": 2,
2686
  "save_steps": 50,
2687
  "stateful_callbacks": {
 
2696
  "attributes": {}
2697
  }
2698
  },
2699
+ "total_flos": 1027290982514688.0,
2700
  "train_batch_size": 1,
2701
  "trial_name": null,
2702
  "trial_params": null