ben81828 commited on
Commit
a8dfe74
·
verified ·
1 Parent(s): aa6f6c1

Training in progress, step 1450, checkpoint

Browse files
last-checkpoint/adapter_model.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:e0eaf3c3fbb7e51224bc7a40e841d98120e282fdcc24346a2251f02af1f31442
3
  size 29034840
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:e95045ab82949901554f82423b881753088f0bf319f5b2eb855236faa8d4d90c
3
  size 29034840
last-checkpoint/global_step1450/bf16_zero_pp_rank_0_mp_rank_00_optim_states.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:0b6d16394cb0ba5b558cf6e2fa28f9090c2c37a5e47b16869ac384ca7a0cb554
3
+ size 43429616
last-checkpoint/global_step1450/bf16_zero_pp_rank_1_mp_rank_00_optim_states.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:84b1d50120186fc06fb3748f8c18bd830f7fe23ee79c1d05fa69249e588fce5d
3
+ size 43429616
last-checkpoint/global_step1450/bf16_zero_pp_rank_2_mp_rank_00_optim_states.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:55b12c80c61daf38a8fc12d481807b29951388f8f922eaec0c0754f8d9abf84e
3
+ size 43429616
last-checkpoint/global_step1450/bf16_zero_pp_rank_3_mp_rank_00_optim_states.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:5ec23ce3cfa6169cdb35d9d96c38585307716513426999a46f3532bf3904cbc5
3
+ size 43429616
last-checkpoint/global_step1450/zero_pp_rank_0_mp_rank_00_model_states.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:07bcf228cd3445b5f81d82193f9981b94412753caac46837372bccbefabc8afb
3
+ size 637299
last-checkpoint/global_step1450/zero_pp_rank_1_mp_rank_00_model_states.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:451a08a86c16b5c280887423b2e3f77cbc4ff52bdf0818c12b05618d2438332c
3
+ size 637171
last-checkpoint/global_step1450/zero_pp_rank_2_mp_rank_00_model_states.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:d37d114e8d4c71e3b602ca91992bcc10700074bf3f6d72c53989b941258aac05
3
+ size 637171
last-checkpoint/global_step1450/zero_pp_rank_3_mp_rank_00_model_states.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:de0fa53cd0688eb5fe1f2eba519558bf9b4822cd883935db7d14ffea34356251
3
+ size 637171
last-checkpoint/latest CHANGED
@@ -1 +1 @@
1
- global_step1400
 
1
+ global_step1450
last-checkpoint/rng_state_0.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:fe66a68e61de2221b30fd9749bc68b45a1474bb2cc95901bca9557ac87909355
3
  size 15024
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:dd4f3298d54e9509917373bcd359e11c92a8e0aa77b2cc0825602efd186ad77e
3
  size 15024
last-checkpoint/rng_state_1.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:0cd4f3162e46c3bb0f1fc4d3c52c7c33e60f56764458e0c8a73c3810b0a25f8c
3
  size 15024
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:5fdab02a791039ff46df83a272c972ab0f366fcea91338790dc016dbbbf6de80
3
  size 15024
last-checkpoint/rng_state_2.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:185cc99aaa81b1b49b3ddc74aa6f97aa3036330983a7b69d52bd191057f9a5d5
3
  size 15024
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:e2808762a5f80587f05c1e2e8de26b5aef1010331cea366649319d593dbb0e66
3
  size 15024
last-checkpoint/rng_state_3.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:0e37403c30cb4309e54e5defdde1906486716fc859274035d44aaac5d48a97ba
3
  size 15024
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:c052112cbe3f325fd4543a02558091b80c9c8abcd8db8822be9a8306eab6f2b7
3
  size 15024
last-checkpoint/scheduler.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:9eb4a31de57d35a9d36a648c17d1ff1a00be01f744b83cf70f63cae24d418555
3
  size 1064
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:e9dc71bb3c767f5227b4aaf564240b2395077bbb61533f057d89b79d0ef0db35
3
  size 1064
last-checkpoint/trainer_state.json CHANGED
@@ -1,9 +1,9 @@
1
  {
2
  "best_metric": 0.2966395914554596,
3
  "best_model_checkpoint": "saves/CADICA_qwenvl_direction_then_DetectAndClassify_scale6/lora/sft/checkpoint-1200",
4
- "epoch": 0.413589364844904,
5
  "eval_steps": 50,
6
- "global_step": 1400,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
@@ -2499,11 +2499,100 @@
2499
  "eval_steps_per_second": 0.783,
2500
  "num_input_tokens_seen": 14532288,
2501
  "step": 1400
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2502
  }
2503
  ],
2504
  "logging_steps": 5,
2505
  "max_steps": 6770,
2506
- "num_input_tokens_seen": 14532288,
2507
  "num_train_epochs": 2,
2508
  "save_steps": 50,
2509
  "stateful_callbacks": {
@@ -2518,7 +2607,7 @@
2518
  "attributes": {}
2519
  }
2520
  },
2521
- "total_flos": 958658687795200.0,
2522
  "train_batch_size": 1,
2523
  "trial_name": null,
2524
  "trial_params": null
 
1
  {
2
  "best_metric": 0.2966395914554596,
3
  "best_model_checkpoint": "saves/CADICA_qwenvl_direction_then_DetectAndClassify_scale6/lora/sft/checkpoint-1200",
4
+ "epoch": 0.42836041358936483,
5
  "eval_steps": 50,
6
+ "global_step": 1450,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
 
2499
  "eval_steps_per_second": 0.783,
2500
  "num_input_tokens_seen": 14532288,
2501
  "step": 1400
2502
+ },
2503
+ {
2504
+ "epoch": 0.4150664697193501,
2505
+ "grad_norm": 2.8279779742550626,
2506
+ "learning_rate": 9.337233497952604e-05,
2507
+ "loss": 0.3751,
2508
+ "num_input_tokens_seen": 14583680,
2509
+ "step": 1405
2510
+ },
2511
+ {
2512
+ "epoch": 0.41654357459379615,
2513
+ "grad_norm": 12.899164703482258,
2514
+ "learning_rate": 9.331144384352099e-05,
2515
+ "loss": 0.3431,
2516
+ "num_input_tokens_seen": 14635712,
2517
+ "step": 1410
2518
+ },
2519
+ {
2520
+ "epoch": 0.41802067946824223,
2521
+ "grad_norm": 10.924332413291099,
2522
+ "learning_rate": 9.325029431187635e-05,
2523
+ "loss": 0.3786,
2524
+ "num_input_tokens_seen": 14687048,
2525
+ "step": 1415
2526
+ },
2527
+ {
2528
+ "epoch": 0.4194977843426883,
2529
+ "grad_norm": 13.950543503215677,
2530
+ "learning_rate": 9.318888674940958e-05,
2531
+ "loss": 0.3427,
2532
+ "num_input_tokens_seen": 14739336,
2533
+ "step": 1420
2534
+ },
2535
+ {
2536
+ "epoch": 0.42097488921713444,
2537
+ "grad_norm": 11.615845590184648,
2538
+ "learning_rate": 9.31272215224776e-05,
2539
+ "loss": 0.3307,
2540
+ "num_input_tokens_seen": 14791656,
2541
+ "step": 1425
2542
+ },
2543
+ {
2544
+ "epoch": 0.4224519940915805,
2545
+ "grad_norm": 1.4760161640643292,
2546
+ "learning_rate": 9.306529899897451e-05,
2547
+ "loss": 0.3509,
2548
+ "num_input_tokens_seen": 14843288,
2549
+ "step": 1430
2550
+ },
2551
+ {
2552
+ "epoch": 0.4239290989660266,
2553
+ "grad_norm": 5.925998864255826,
2554
+ "learning_rate": 9.300311954832952e-05,
2555
+ "loss": 0.4168,
2556
+ "num_input_tokens_seen": 14895040,
2557
+ "step": 1435
2558
+ },
2559
+ {
2560
+ "epoch": 0.4254062038404727,
2561
+ "grad_norm": 5.735184902025097,
2562
+ "learning_rate": 9.294068354150455e-05,
2563
+ "loss": 0.3203,
2564
+ "num_input_tokens_seen": 14947448,
2565
+ "step": 1440
2566
+ },
2567
+ {
2568
+ "epoch": 0.42688330871491875,
2569
+ "grad_norm": 19.73355339631789,
2570
+ "learning_rate": 9.287799135099225e-05,
2571
+ "loss": 0.3217,
2572
+ "num_input_tokens_seen": 14999480,
2573
+ "step": 1445
2574
+ },
2575
+ {
2576
+ "epoch": 0.42836041358936483,
2577
+ "grad_norm": 11.876408386949045,
2578
+ "learning_rate": 9.281504335081354e-05,
2579
+ "loss": 0.3131,
2580
+ "num_input_tokens_seen": 15050992,
2581
+ "step": 1450
2582
+ },
2583
+ {
2584
+ "epoch": 0.42836041358936483,
2585
+ "eval_loss": 0.4740215837955475,
2586
+ "eval_runtime": 19.3584,
2587
+ "eval_samples_per_second": 3.099,
2588
+ "eval_steps_per_second": 0.775,
2589
+ "num_input_tokens_seen": 15050992,
2590
+ "step": 1450
2591
  }
2592
  ],
2593
  "logging_steps": 5,
2594
  "max_steps": 6770,
2595
+ "num_input_tokens_seen": 15050992,
2596
  "num_train_epochs": 2,
2597
  "save_steps": 50,
2598
  "stateful_callbacks": {
 
2607
  "attributes": {}
2608
  }
2609
  },
2610
+ "total_flos": 992909032161280.0,
2611
  "train_batch_size": 1,
2612
  "trial_name": null,
2613
  "trial_params": null