ben81828 commited on
Commit
7c18492
·
verified ·
1 Parent(s): 0fa2aa6

Training in progress, step 900, checkpoint

Browse files
last-checkpoint/adapter_model.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:271e272f3f98dafac54eabf65fe7c941a42e4c849ca7ed4c3839a08fa408499d
3
  size 29034840
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:b2ab1b54ce042c169c01101e1c005c36e39e3afd27d938891c20b92f985d53f5
3
  size 29034840
last-checkpoint/global_step900/bf16_zero_pp_rank_0_mp_rank_00_optim_states.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:58ab9661acad608360251cd89464cc408f3343f6a7634f3c53194ae61b4ad6ca
3
+ size 43429616
last-checkpoint/global_step900/bf16_zero_pp_rank_1_mp_rank_00_optim_states.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:991672111a64fc25c5a9806fb82a4dabace627f936699d415624fca96d8fed70
3
+ size 43429616
last-checkpoint/global_step900/bf16_zero_pp_rank_2_mp_rank_00_optim_states.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:50f24af34e2730da78f3ab121c6577278e01e97404fe514680d4605dbf3d1921
3
+ size 43429616
last-checkpoint/global_step900/bf16_zero_pp_rank_3_mp_rank_00_optim_states.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:02ed897301f2520e1e17f77facfeb1296b22de983380d387aa2a0f24c54079af
3
+ size 43429616
last-checkpoint/global_step900/zero_pp_rank_0_mp_rank_00_model_states.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:7162c4e09532f9490922bccb0dbbcfe539972e0f3d49af32428a76030c053274
3
+ size 637299
last-checkpoint/global_step900/zero_pp_rank_1_mp_rank_00_model_states.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:8a1fabfaa7ff216589f1f45e0de64de68782d934b86fc832996ada7c681c3bd9
3
+ size 637171
last-checkpoint/global_step900/zero_pp_rank_2_mp_rank_00_model_states.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:c68864aa3523e8a7232f1228e6e0dba67ac184874fa8332362ced37b3cbe041a
3
+ size 637171
last-checkpoint/global_step900/zero_pp_rank_3_mp_rank_00_model_states.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:834a420bd57a2a0fe552734d61c0bcc178574ff6e356d461ed6815bc79695c74
3
+ size 637171
last-checkpoint/latest CHANGED
@@ -1 +1 @@
1
- global_step850
 
1
+ global_step900
last-checkpoint/rng_state_0.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:36c9044354f826de248840acaaec171f816609c147a664089731a0570deef948
3
  size 15024
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:b8f22ced19e790cc864cefe3b7c711d9ae631c44f95d42fb4829688cc3de0153
3
  size 15024
last-checkpoint/rng_state_1.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:54f4c4013326db4e7267b656aaf72b86570f8aeee91ad39242a416cf8b963191
3
  size 15024
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:3e0407513eba77d34cbf3adf0e59a58bd80716f4f00f414854253637e82be43d
3
  size 15024
last-checkpoint/rng_state_2.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:43e910793831957d8685c316138c33eef8867edf60052477dc9ad6ec0c6da901
3
  size 15024
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:6060636c023258ce9b965e244b8a58b4c99d5784dde4405b39737550ef50cd4f
3
  size 15024
last-checkpoint/rng_state_3.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:7cfd0d71ef5a6c58d9f1d46851f4b1e699ca8a50ab3223cfb39668895cffeef2
3
  size 15024
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:c24ccdfdcde39cb2265c82c50c36ffdfcc670f757aba4bcf4bb0fdc6d1373c4c
3
  size 15024
last-checkpoint/scheduler.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:0fb2a3d4eafda026308c94c6ad3628bdceb63f20926d1a2696b97a46e83370c4
3
  size 1064
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:7328934baedc90cac30c0635b4180443bb56c44db13a6f9445a42e3e72e30189
3
  size 1064
last-checkpoint/trainer_state.json CHANGED
@@ -1,9 +1,9 @@
1
  {
2
  "best_metric": 0.4665524661540985,
3
  "best_model_checkpoint": "saves/CADICA_qwenvl_direction_then_DetectAndClassify_scale6/lora/sft/checkpoint-750",
4
- "epoch": 0.2511078286558346,
5
  "eval_steps": 50,
6
- "global_step": 850,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
@@ -1520,11 +1520,100 @@
1520
  "eval_steps_per_second": 0.785,
1521
  "num_input_tokens_seen": 8822832,
1522
  "step": 850
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1523
  }
1524
  ],
1525
  "logging_steps": 5,
1526
  "max_steps": 6770,
1527
- "num_input_tokens_seen": 8822832,
1528
  "num_train_epochs": 2,
1529
  "save_steps": 50,
1530
  "stateful_callbacks": {
@@ -1539,7 +1628,7 @@
1539
  "attributes": {}
1540
  }
1541
  },
1542
- "total_flos": 582086830587904.0,
1543
  "train_batch_size": 1,
1544
  "trial_name": null,
1545
  "trial_params": null
 
1
  {
2
  "best_metric": 0.4665524661540985,
3
  "best_model_checkpoint": "saves/CADICA_qwenvl_direction_then_DetectAndClassify_scale6/lora/sft/checkpoint-750",
4
+ "epoch": 0.2658788774002954,
5
  "eval_steps": 50,
6
+ "global_step": 900,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
 
1520
  "eval_steps_per_second": 0.785,
1521
  "num_input_tokens_seen": 8822832,
1522
  "step": 850
1523
+ },
1524
+ {
1525
+ "epoch": 0.25258493353028066,
1526
+ "grad_norm": 8.111385407981246,
1527
+ "learning_rate": 9.841991170128374e-05,
1528
+ "loss": 0.4636,
1529
+ "num_input_tokens_seen": 8875608,
1530
+ "step": 855
1531
+ },
1532
+ {
1533
+ "epoch": 0.25406203840472674,
1534
+ "grad_norm": 4.789169716538139,
1535
+ "learning_rate": 9.838930774181285e-05,
1536
+ "loss": 0.4322,
1537
+ "num_input_tokens_seen": 8927600,
1538
+ "step": 860
1539
+ },
1540
+ {
1541
+ "epoch": 0.2555391432791728,
1542
+ "grad_norm": 3.6029916519925167,
1543
+ "learning_rate": 9.835841509221725e-05,
1544
+ "loss": 0.4302,
1545
+ "num_input_tokens_seen": 8980224,
1546
+ "step": 865
1547
+ },
1548
+ {
1549
+ "epoch": 0.2570162481536189,
1550
+ "grad_norm": 17.138905616592684,
1551
+ "learning_rate": 9.83272339368022e-05,
1552
+ "loss": 0.5231,
1553
+ "num_input_tokens_seen": 9032112,
1554
+ "step": 870
1555
+ },
1556
+ {
1557
+ "epoch": 0.258493353028065,
1558
+ "grad_norm": 6.810210745159563,
1559
+ "learning_rate": 9.829576446159416e-05,
1560
+ "loss": 0.4414,
1561
+ "num_input_tokens_seen": 9084480,
1562
+ "step": 875
1563
+ },
1564
+ {
1565
+ "epoch": 0.25997045790251105,
1566
+ "grad_norm": 6.785950897404188,
1567
+ "learning_rate": 9.826400685433968e-05,
1568
+ "loss": 0.4469,
1569
+ "num_input_tokens_seen": 9136816,
1570
+ "step": 880
1571
+ },
1572
+ {
1573
+ "epoch": 0.2614475627769572,
1574
+ "grad_norm": 14.335926789263953,
1575
+ "learning_rate": 9.823196130450434e-05,
1576
+ "loss": 0.3859,
1577
+ "num_input_tokens_seen": 9189808,
1578
+ "step": 885
1579
+ },
1580
+ {
1581
+ "epoch": 0.26292466765140327,
1582
+ "grad_norm": 24.791700587075013,
1583
+ "learning_rate": 9.819962800327156e-05,
1584
+ "loss": 0.4794,
1585
+ "num_input_tokens_seen": 9241712,
1586
+ "step": 890
1587
+ },
1588
+ {
1589
+ "epoch": 0.26440177252584934,
1590
+ "grad_norm": 16.38282434047279,
1591
+ "learning_rate": 9.81670071435415e-05,
1592
+ "loss": 0.4476,
1593
+ "num_input_tokens_seen": 9293328,
1594
+ "step": 895
1595
+ },
1596
+ {
1597
+ "epoch": 0.2658788774002954,
1598
+ "grad_norm": 3.8069696135300846,
1599
+ "learning_rate": 9.813409891992988e-05,
1600
+ "loss": 0.4852,
1601
+ "num_input_tokens_seen": 9345160,
1602
+ "step": 900
1603
+ },
1604
+ {
1605
+ "epoch": 0.2658788774002954,
1606
+ "eval_loss": 0.5605542063713074,
1607
+ "eval_runtime": 19.0274,
1608
+ "eval_samples_per_second": 3.153,
1609
+ "eval_steps_per_second": 0.788,
1610
+ "num_input_tokens_seen": 9345160,
1611
+ "step": 900
1612
  }
1613
  ],
1614
  "logging_steps": 5,
1615
  "max_steps": 6770,
1616
+ "num_input_tokens_seen": 9345160,
1617
  "num_train_epochs": 2,
1618
  "save_steps": 50,
1619
  "stateful_callbacks": {
 
1628
  "attributes": {}
1629
  }
1630
  },
1631
+ "total_flos": 616551927513088.0,
1632
  "train_batch_size": 1,
1633
  "trial_name": null,
1634
  "trial_params": null