ben81828 commited on
Commit
3bde8c0
·
verified ·
1 Parent(s): 4bfb543

Training in progress, step 900, checkpoint

Browse files
last-checkpoint/adapter_model.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:c194e4011ebc3d53bf21924236aa69f02cd3e886bdcc096049ceabfa8e037964
3
  size 29034840
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:40f0e52182147fdabf2ddf1a520d0249faf099e8bb9941553d5a51b579af5e1c
3
  size 29034840
last-checkpoint/global_step900/bf16_zero_pp_rank_0_mp_rank_00_optim_states.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:0c9631c106cb8b26664f394a3d23049a6f4c0d0bc6a6a8495950b281ae140f7e
3
+ size 43429616
last-checkpoint/global_step900/bf16_zero_pp_rank_1_mp_rank_00_optim_states.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:fe823b5b1c88136ad94b5c3a9d0afea951c64b43a6156d5e8670e2bbf78a2f7f
3
+ size 43429616
last-checkpoint/global_step900/bf16_zero_pp_rank_2_mp_rank_00_optim_states.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:a89a70cce1ec47138d3922ec3f729aca85f6c3337c0181dbfe59769ff36db918
3
+ size 43429616
last-checkpoint/global_step900/bf16_zero_pp_rank_3_mp_rank_00_optim_states.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:4fd1efff697a8e5667983434a9fc17a802976818d4ec8406c787241c33647977
3
+ size 43429616
last-checkpoint/global_step900/zero_pp_rank_0_mp_rank_00_model_states.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:7162c4e09532f9490922bccb0dbbcfe539972e0f3d49af32428a76030c053274
3
+ size 637299
last-checkpoint/global_step900/zero_pp_rank_1_mp_rank_00_model_states.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:8a1fabfaa7ff216589f1f45e0de64de68782d934b86fc832996ada7c681c3bd9
3
+ size 637171
last-checkpoint/global_step900/zero_pp_rank_2_mp_rank_00_model_states.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:c68864aa3523e8a7232f1228e6e0dba67ac184874fa8332362ced37b3cbe041a
3
+ size 637171
last-checkpoint/global_step900/zero_pp_rank_3_mp_rank_00_model_states.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:834a420bd57a2a0fe552734d61c0bcc178574ff6e356d461ed6815bc79695c74
3
+ size 637171
last-checkpoint/latest CHANGED
@@ -1 +1 @@
1
- global_step850
 
1
+ global_step900
last-checkpoint/rng_state_0.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:36c9044354f826de248840acaaec171f816609c147a664089731a0570deef948
3
  size 15024
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:b8f22ced19e790cc864cefe3b7c711d9ae631c44f95d42fb4829688cc3de0153
3
  size 15024
last-checkpoint/rng_state_1.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:54f4c4013326db4e7267b656aaf72b86570f8aeee91ad39242a416cf8b963191
3
  size 15024
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:3e0407513eba77d34cbf3adf0e59a58bd80716f4f00f414854253637e82be43d
3
  size 15024
last-checkpoint/rng_state_2.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:43e910793831957d8685c316138c33eef8867edf60052477dc9ad6ec0c6da901
3
  size 15024
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:6060636c023258ce9b965e244b8a58b4c99d5784dde4405b39737550ef50cd4f
3
  size 15024
last-checkpoint/rng_state_3.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:7cfd0d71ef5a6c58d9f1d46851f4b1e699ca8a50ab3223cfb39668895cffeef2
3
  size 15024
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:c24ccdfdcde39cb2265c82c50c36ffdfcc670f757aba4bcf4bb0fdc6d1373c4c
3
  size 15024
last-checkpoint/scheduler.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:3e24eaa8963cd872c048c2c655789d678b8c3fcd1c77ca0d663feee5857d2e34
3
  size 1064
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:feb4015894f59edc29c71bc4938b5d4ab98daad34a38a3d387b308b3b1d4b280
3
  size 1064
last-checkpoint/trainer_state.json CHANGED
@@ -1,9 +1,9 @@
1
  {
2
- "best_metric": 0.5219093561172485,
3
- "best_model_checkpoint": "saves/CADICA_qwenvl_stenosis_detect_scale4/lora/sft/checkpoint-800",
4
- "epoch": 0.21890291012104043,
5
  "eval_steps": 50,
6
- "global_step": 850,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
@@ -1520,11 +1520,100 @@
1520
  "eval_steps_per_second": 0.768,
1521
  "num_input_tokens_seen": 8919608,
1522
  "step": 850
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1523
  }
1524
  ],
1525
  "logging_steps": 5,
1526
  "max_steps": 3400,
1527
- "num_input_tokens_seen": 8919608,
1528
  "num_train_epochs": 1,
1529
  "save_steps": 50,
1530
  "stateful_callbacks": {
@@ -1539,7 +1628,7 @@
1539
  "attributes": {}
1540
  }
1541
  },
1542
- "total_flos": 588414853316608.0,
1543
  "train_batch_size": 1,
1544
  "trial_name": null,
1545
  "trial_params": null
 
1
  {
2
+ "best_metric": 0.49604204297065735,
3
+ "best_model_checkpoint": "saves/CADICA_qwenvl_stenosis_detect_scale4/lora/sft/checkpoint-900",
4
+ "epoch": 0.23177955189286634,
5
  "eval_steps": 50,
6
+ "global_step": 900,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
 
1520
  "eval_steps_per_second": 0.768,
1521
  "num_input_tokens_seen": 8919608,
1522
  "step": 850
1523
+ },
1524
+ {
1525
+ "epoch": 0.22019057429822303,
1526
+ "grad_norm": 4.339535962830116,
1527
+ "learning_rate": 8.930720915538487e-05,
1528
+ "loss": 0.5853,
1529
+ "num_input_tokens_seen": 8971048,
1530
+ "step": 855
1531
+ },
1532
+ {
1533
+ "epoch": 0.22147823847540563,
1534
+ "grad_norm": 6.118436891847819,
1535
+ "learning_rate": 8.915646321965614e-05,
1536
+ "loss": 0.5534,
1537
+ "num_input_tokens_seen": 9022936,
1538
+ "step": 860
1539
+ },
1540
+ {
1541
+ "epoch": 0.2227659026525882,
1542
+ "grad_norm": 3.3997835203618667,
1543
+ "learning_rate": 8.900479122779712e-05,
1544
+ "loss": 0.5623,
1545
+ "num_input_tokens_seen": 9075336,
1546
+ "step": 865
1547
+ },
1548
+ {
1549
+ "epoch": 0.2240535668297708,
1550
+ "grad_norm": 4.188326935911128,
1551
+ "learning_rate": 8.885219676687277e-05,
1552
+ "loss": 0.5561,
1553
+ "num_input_tokens_seen": 9127688,
1554
+ "step": 870
1555
+ },
1556
+ {
1557
+ "epoch": 0.22534123100695339,
1558
+ "grad_norm": 5.220175192497493,
1559
+ "learning_rate": 8.869868344576459e-05,
1560
+ "loss": 0.5449,
1561
+ "num_input_tokens_seen": 9180624,
1562
+ "step": 875
1563
+ },
1564
+ {
1565
+ "epoch": 0.22662889518413598,
1566
+ "grad_norm": 2.2022914161050577,
1567
+ "learning_rate": 8.854425489508532e-05,
1568
+ "loss": 0.5062,
1569
+ "num_input_tokens_seen": 9233176,
1570
+ "step": 880
1571
+ },
1572
+ {
1573
+ "epoch": 0.22791655936131858,
1574
+ "grad_norm": 4.62379059067999,
1575
+ "learning_rate": 8.838891476709288e-05,
1576
+ "loss": 0.5033,
1577
+ "num_input_tokens_seen": 9286688,
1578
+ "step": 885
1579
+ },
1580
+ {
1581
+ "epoch": 0.22920422353850115,
1582
+ "grad_norm": 3.639684630492015,
1583
+ "learning_rate": 8.823266673560426e-05,
1584
+ "loss": 0.4845,
1585
+ "num_input_tokens_seen": 9339600,
1586
+ "step": 890
1587
+ },
1588
+ {
1589
+ "epoch": 0.23049188771568374,
1590
+ "grad_norm": 4.131757647310936,
1591
+ "learning_rate": 8.807551449590846e-05,
1592
+ "loss": 0.5595,
1593
+ "num_input_tokens_seen": 9391536,
1594
+ "step": 895
1595
+ },
1596
+ {
1597
+ "epoch": 0.23177955189286634,
1598
+ "grad_norm": 4.771128685196347,
1599
+ "learning_rate": 8.791746176467907e-05,
1600
+ "loss": 0.5251,
1601
+ "num_input_tokens_seen": 9443616,
1602
+ "step": 900
1603
+ },
1604
+ {
1605
+ "epoch": 0.23177955189286634,
1606
+ "eval_loss": 0.49604204297065735,
1607
+ "eval_runtime": 39.5289,
1608
+ "eval_samples_per_second": 3.036,
1609
+ "eval_steps_per_second": 0.759,
1610
+ "num_input_tokens_seen": 9443616,
1611
+ "step": 900
1612
  }
1613
  ],
1614
  "logging_steps": 5,
1615
  "max_steps": 3400,
1616
+ "num_input_tokens_seen": 9443616,
1617
  "num_train_epochs": 1,
1618
  "save_steps": 50,
1619
  "stateful_callbacks": {
 
1628
  "attributes": {}
1629
  }
1630
  },
1631
+ "total_flos": 622973818765312.0,
1632
  "train_batch_size": 1,
1633
  "trial_name": null,
1634
  "trial_params": null