ben81828 commited on
Commit
8320abb
·
verified ·
1 Parent(s): a69aba7

Training in progress, step 900, checkpoint

Browse files
last-checkpoint/adapter_model.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:bbb88cd2ad49a28dfd1cc411e4818df1e3111793a50f870549a2cf9e7bc00e62
3
  size 18516456
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:bd3d3749c1bd6d89c5172acc406cbf998cb265de2f8a059883bd34374666967b
3
  size 18516456
last-checkpoint/global_step900/bf16_zero_pp_rank_0_mp_rank_00_optim_states.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:964dceb2d1c22487dfbebd0d53b2f63cedabf0e75e9fdd5e0cb695b97b0db0dc
3
+ size 27700976
last-checkpoint/global_step900/bf16_zero_pp_rank_1_mp_rank_00_optim_states.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:e4d4887bea23f6c293fde5bd52a7c93321a682d20078bd1062171d640d30fc58
3
+ size 27700976
last-checkpoint/global_step900/bf16_zero_pp_rank_2_mp_rank_00_optim_states.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:8685412a783a9bc817af6781ffae7ede79677eed452dc9dcec24a05927fc12bd
3
+ size 27700976
last-checkpoint/global_step900/bf16_zero_pp_rank_3_mp_rank_00_optim_states.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:e123ddc7d82c0c4a1d3c8efe3135d938104bd399f98cf72628c9c03187f96d03
3
+ size 27700976
last-checkpoint/global_step900/zero_pp_rank_0_mp_rank_00_model_states.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:f6784c2006ba47df1330cad40d3f78cd8df0c9123cc6f8133a332365da69d320
3
+ size 411571
last-checkpoint/global_step900/zero_pp_rank_1_mp_rank_00_model_states.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:593f93ba7969fb31276e8b0d92b9626d76b4d0011b4d4d9c5984437d53477ee1
3
+ size 411507
last-checkpoint/global_step900/zero_pp_rank_2_mp_rank_00_model_states.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:6d1ad7550f305c82bd88f505357dac1dd21c2a343a42c6fd0af8b64e3340d393
3
+ size 411507
last-checkpoint/global_step900/zero_pp_rank_3_mp_rank_00_model_states.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:3ae307050315e3d6f9b17d0fa78301afb1254ce58b4687170fb920516e319b71
3
+ size 411507
last-checkpoint/latest CHANGED
@@ -1 +1 @@
1
- global_step850
 
1
+ global_step900
last-checkpoint/rng_state_0.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:36c9044354f826de248840acaaec171f816609c147a664089731a0570deef948
3
  size 15024
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:b8f22ced19e790cc864cefe3b7c711d9ae631c44f95d42fb4829688cc3de0153
3
  size 15024
last-checkpoint/rng_state_1.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:54f4c4013326db4e7267b656aaf72b86570f8aeee91ad39242a416cf8b963191
3
  size 15024
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:3e0407513eba77d34cbf3adf0e59a58bd80716f4f00f414854253637e82be43d
3
  size 15024
last-checkpoint/rng_state_2.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:43e910793831957d8685c316138c33eef8867edf60052477dc9ad6ec0c6da901
3
  size 15024
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:6060636c023258ce9b965e244b8a58b4c99d5784dde4405b39737550ef50cd4f
3
  size 15024
last-checkpoint/rng_state_3.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:7cfd0d71ef5a6c58d9f1d46851f4b1e699ca8a50ab3223cfb39668895cffeef2
3
  size 15024
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:c24ccdfdcde39cb2265c82c50c36ffdfcc670f757aba4bcf4bb0fdc6d1373c4c
3
  size 15024
last-checkpoint/scheduler.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:3e24eaa8963cd872c048c2c655789d678b8c3fcd1c77ca0d663feee5857d2e34
3
  size 1064
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:feb4015894f59edc29c71bc4938b5d4ab98daad34a38a3d387b308b3b1d4b280
3
  size 1064
last-checkpoint/trainer_state.json CHANGED
@@ -1,9 +1,9 @@
1
  {
2
  "best_metric": 0.8779178261756897,
3
  "best_model_checkpoint": "saves/CADICA_qwenvl_stenosis_classily_scale4_frozenVision/lora/sft/checkpoint-850",
4
- "epoch": 0.43780582024208087,
5
  "eval_steps": 50,
6
- "global_step": 850,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
@@ -1520,11 +1520,100 @@
1520
  "eval_steps_per_second": 0.928,
1521
  "num_input_tokens_seen": 9941896,
1522
  "step": 850
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1523
  }
1524
  ],
1525
  "logging_steps": 5,
1526
  "max_steps": 3400,
1527
- "num_input_tokens_seen": 9941896,
1528
  "num_train_epochs": 2,
1529
  "save_steps": 50,
1530
  "stateful_callbacks": {
@@ -1539,7 +1628,7 @@
1539
  "attributes": {}
1540
  }
1541
  },
1542
- "total_flos": 558240938328064.0,
1543
  "train_batch_size": 1,
1544
  "trial_name": null,
1545
  "trial_params": null
 
1
  {
2
  "best_metric": 0.8779178261756897,
3
  "best_model_checkpoint": "saves/CADICA_qwenvl_stenosis_classily_scale4_frozenVision/lora/sft/checkpoint-850",
4
+ "epoch": 0.4635591037857327,
5
  "eval_steps": 50,
6
+ "global_step": 900,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
 
1520
  "eval_steps_per_second": 0.928,
1521
  "num_input_tokens_seen": 9941896,
1522
  "step": 850
1523
+ },
1524
+ {
1525
+ "epoch": 0.44038114859644606,
1526
+ "grad_norm": 0.8207188524660312,
1527
+ "learning_rate": 8.930720915538487e-05,
1528
+ "loss": 0.8516,
1529
+ "num_input_tokens_seen": 10000336,
1530
+ "step": 855
1531
+ },
1532
+ {
1533
+ "epoch": 0.44295647695081125,
1534
+ "grad_norm": 1.5881804699369033,
1535
+ "learning_rate": 8.915646321965614e-05,
1536
+ "loss": 0.9206,
1537
+ "num_input_tokens_seen": 10058816,
1538
+ "step": 860
1539
+ },
1540
+ {
1541
+ "epoch": 0.4455318053051764,
1542
+ "grad_norm": 0.3364043503653687,
1543
+ "learning_rate": 8.900479122779712e-05,
1544
+ "loss": 0.9028,
1545
+ "num_input_tokens_seen": 10117320,
1546
+ "step": 865
1547
+ },
1548
+ {
1549
+ "epoch": 0.4481071336595416,
1550
+ "grad_norm": 0.2888069815557639,
1551
+ "learning_rate": 8.885219676687277e-05,
1552
+ "loss": 0.8991,
1553
+ "num_input_tokens_seen": 10175824,
1554
+ "step": 870
1555
+ },
1556
+ {
1557
+ "epoch": 0.45068246201390677,
1558
+ "grad_norm": 0.26081919755231314,
1559
+ "learning_rate": 8.869868344576459e-05,
1560
+ "loss": 0.8934,
1561
+ "num_input_tokens_seen": 10234288,
1562
+ "step": 875
1563
+ },
1564
+ {
1565
+ "epoch": 0.45325779036827196,
1566
+ "grad_norm": 0.1672074260476841,
1567
+ "learning_rate": 8.854425489508532e-05,
1568
+ "loss": 0.8908,
1569
+ "num_input_tokens_seen": 10292736,
1570
+ "step": 880
1571
+ },
1572
+ {
1573
+ "epoch": 0.45583311872263715,
1574
+ "grad_norm": 0.3141498425127344,
1575
+ "learning_rate": 8.838891476709288e-05,
1576
+ "loss": 0.8988,
1577
+ "num_input_tokens_seen": 10351224,
1578
+ "step": 885
1579
+ },
1580
+ {
1581
+ "epoch": 0.4584084470770023,
1582
+ "grad_norm": 0.28442383194638554,
1583
+ "learning_rate": 8.823266673560426e-05,
1584
+ "loss": 0.8965,
1585
+ "num_input_tokens_seen": 10409736,
1586
+ "step": 890
1587
+ },
1588
+ {
1589
+ "epoch": 0.4609837754313675,
1590
+ "grad_norm": 0.24793143025843287,
1591
+ "learning_rate": 8.807551449590846e-05,
1592
+ "loss": 0.8989,
1593
+ "num_input_tokens_seen": 10468240,
1594
+ "step": 895
1595
+ },
1596
+ {
1597
+ "epoch": 0.4635591037857327,
1598
+ "grad_norm": 0.18173090045802157,
1599
+ "learning_rate": 8.791746176467907e-05,
1600
+ "loss": 0.8961,
1601
+ "num_input_tokens_seen": 10526712,
1602
+ "step": 900
1603
+ },
1604
+ {
1605
+ "epoch": 0.4635591037857327,
1606
+ "eval_loss": 0.891426146030426,
1607
+ "eval_runtime": 16.0357,
1608
+ "eval_samples_per_second": 3.742,
1609
+ "eval_steps_per_second": 0.935,
1610
+ "num_input_tokens_seen": 10526712,
1611
+ "step": 900
1612
  }
1613
  ],
1614
  "logging_steps": 5,
1615
  "max_steps": 3400,
1616
+ "num_input_tokens_seen": 10526712,
1617
  "num_train_epochs": 2,
1618
  "save_steps": 50,
1619
  "stateful_callbacks": {
 
1628
  "attributes": {}
1629
  }
1630
  },
1631
+ "total_flos": 591081256517632.0,
1632
  "train_batch_size": 1,
1633
  "trial_name": null,
1634
  "trial_params": null