Leensman commited on
Commit
1e4102a
1 Parent(s): be24bcb

Upload 7 files

Browse files
Files changed (5) hide show
  1. model.safetensors +1 -1
  2. optimizer.pt +1 -1
  3. rng_state.pth +2 -2
  4. scheduler.pt +1 -1
  5. trainer_state.json +424 -4
model.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:8574cb86343640341bd8327435c088653b37c2fd94cd02b8a3e79f0d17fb41e8
3
  size 15624300
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:bdec6f88038c81c955dda01b871d8d42eaadefd21b4f6f79e3758df6fd154fb6
3
  size 15624300
optimizer.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:103e71fa9539fe954fb924fc8711bc3ac443474890b858138227d25189426079
3
  size 31315002
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:97a166e4a10ada2cd3a3edd9954d00f2fd3d8748617dc2d55a97752e58da6434
3
  size 31315002
rng_state.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:66dffff880309c3ffd4353adfa35078070a3dd0eceaeed5843012743b07f5485
3
- size 14180
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:4297c6aa4fab2df159d13e5498d5bce0185f3ff8b450b5f350d59db6f13e2e25
3
+ size 14244
scheduler.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:07d269727d16e814cf8bc7a7a09468a8d9b1d31aa4b9c95bf8e4dbc1c031e0b2
3
  size 1000
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:9b35e8e40d820aef661ff46476809897e319a86923f89859ed00dadef19c7a95
3
  size 1000
trainer_state.json CHANGED
@@ -1,9 +1,9 @@
1
  {
2
- "best_metric": 0.9827606730601367,
3
- "best_model_checkpoint": "../results/SersegformerForSemanticSegmentation_decoder/checkpoint-21500",
4
- "epoch": 15.357142857142858,
5
  "eval_steps": 500,
6
- "global_step": 21500,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
@@ -1527,6 +1527,426 @@
1527
  "epoch": 15.36,
1528
  "eval_iou": 0.9827606730601367,
1529
  "step": 21500
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1530
  }
1531
  ],
1532
  "logging_steps": 100,
 
1
  {
2
+ "best_metric": 0.9828028941855711,
3
+ "best_model_checkpoint": "../results/SersegformerForSemanticSegmentation_decoder/checkpoint-27500",
4
+ "epoch": 19.642857142857142,
5
  "eval_steps": 500,
6
+ "global_step": 27500,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
 
1527
  "epoch": 15.36,
1528
  "eval_iou": 0.9827606730601367,
1529
  "step": 21500
1530
+ },
1531
+ {
1532
+ "epoch": 15.43,
1533
+ "learning_rate": 3.0271851851851857e-05,
1534
+ "loss": 0.0169,
1535
+ "step": 21600
1536
+ },
1537
+ {
1538
+ "epoch": 15.5,
1539
+ "learning_rate": 3.0123950617283952e-05,
1540
+ "loss": 0.0175,
1541
+ "step": 21700
1542
+ },
1543
+ {
1544
+ "epoch": 15.57,
1545
+ "learning_rate": 2.9976049382716048e-05,
1546
+ "loss": 0.0178,
1547
+ "step": 21800
1548
+ },
1549
+ {
1550
+ "epoch": 15.64,
1551
+ "learning_rate": 2.982814814814815e-05,
1552
+ "loss": 0.0184,
1553
+ "step": 21900
1554
+ },
1555
+ {
1556
+ "epoch": 15.71,
1557
+ "learning_rate": 2.9680246913580243e-05,
1558
+ "loss": 0.0167,
1559
+ "step": 22000
1560
+ },
1561
+ {
1562
+ "epoch": 15.71,
1563
+ "eval_iou": 0.9823599522020302,
1564
+ "step": 22000
1565
+ },
1566
+ {
1567
+ "epoch": 15.79,
1568
+ "learning_rate": 2.9532345679012346e-05,
1569
+ "loss": 0.0171,
1570
+ "step": 22100
1571
+ },
1572
+ {
1573
+ "epoch": 15.86,
1574
+ "learning_rate": 2.9384444444444445e-05,
1575
+ "loss": 0.0171,
1576
+ "step": 22200
1577
+ },
1578
+ {
1579
+ "epoch": 15.93,
1580
+ "learning_rate": 2.923654320987654e-05,
1581
+ "loss": 0.0162,
1582
+ "step": 22300
1583
+ },
1584
+ {
1585
+ "epoch": 16.0,
1586
+ "learning_rate": 2.908864197530864e-05,
1587
+ "loss": 0.0172,
1588
+ "step": 22400
1589
+ },
1590
+ {
1591
+ "epoch": 16.07,
1592
+ "learning_rate": 2.8940740740740743e-05,
1593
+ "loss": 0.0168,
1594
+ "step": 22500
1595
+ },
1596
+ {
1597
+ "epoch": 16.07,
1598
+ "eval_iou": 0.9826575839052013,
1599
+ "step": 22500
1600
+ },
1601
+ {
1602
+ "epoch": 16.14,
1603
+ "learning_rate": 2.8792839506172835e-05,
1604
+ "loss": 0.0183,
1605
+ "step": 22600
1606
+ },
1607
+ {
1608
+ "epoch": 16.21,
1609
+ "learning_rate": 2.8644938271604934e-05,
1610
+ "loss": 0.0174,
1611
+ "step": 22700
1612
+ },
1613
+ {
1614
+ "epoch": 16.29,
1615
+ "learning_rate": 2.8497037037037037e-05,
1616
+ "loss": 0.0166,
1617
+ "step": 22800
1618
+ },
1619
+ {
1620
+ "epoch": 16.36,
1621
+ "learning_rate": 2.8349135802469136e-05,
1622
+ "loss": 0.0166,
1623
+ "step": 22900
1624
+ },
1625
+ {
1626
+ "epoch": 16.43,
1627
+ "learning_rate": 2.8201234567901232e-05,
1628
+ "loss": 0.0184,
1629
+ "step": 23000
1630
+ },
1631
+ {
1632
+ "epoch": 16.43,
1633
+ "eval_iou": 0.9823403113028583,
1634
+ "step": 23000
1635
+ },
1636
+ {
1637
+ "epoch": 16.5,
1638
+ "learning_rate": 2.805333333333333e-05,
1639
+ "loss": 0.0172,
1640
+ "step": 23100
1641
+ },
1642
+ {
1643
+ "epoch": 16.57,
1644
+ "learning_rate": 2.7905432098765434e-05,
1645
+ "loss": 0.018,
1646
+ "step": 23200
1647
+ },
1648
+ {
1649
+ "epoch": 16.64,
1650
+ "learning_rate": 2.7757530864197526e-05,
1651
+ "loss": 0.0177,
1652
+ "step": 23300
1653
+ },
1654
+ {
1655
+ "epoch": 16.71,
1656
+ "learning_rate": 2.760962962962963e-05,
1657
+ "loss": 0.0174,
1658
+ "step": 23400
1659
+ },
1660
+ {
1661
+ "epoch": 16.79,
1662
+ "learning_rate": 2.7461728395061728e-05,
1663
+ "loss": 0.0176,
1664
+ "step": 23500
1665
+ },
1666
+ {
1667
+ "epoch": 16.79,
1668
+ "eval_iou": 0.982518440952488,
1669
+ "step": 23500
1670
+ },
1671
+ {
1672
+ "epoch": 16.86,
1673
+ "learning_rate": 2.7313827160493824e-05,
1674
+ "loss": 0.0173,
1675
+ "step": 23600
1676
+ },
1677
+ {
1678
+ "epoch": 16.93,
1679
+ "learning_rate": 2.7165925925925923e-05,
1680
+ "loss": 0.0173,
1681
+ "step": 23700
1682
+ },
1683
+ {
1684
+ "epoch": 17.0,
1685
+ "learning_rate": 2.7018024691358026e-05,
1686
+ "loss": 0.0173,
1687
+ "step": 23800
1688
+ },
1689
+ {
1690
+ "epoch": 17.07,
1691
+ "learning_rate": 2.6870123456790125e-05,
1692
+ "loss": 0.0163,
1693
+ "step": 23900
1694
+ },
1695
+ {
1696
+ "epoch": 17.14,
1697
+ "learning_rate": 2.672222222222222e-05,
1698
+ "loss": 0.0174,
1699
+ "step": 24000
1700
+ },
1701
+ {
1702
+ "epoch": 17.14,
1703
+ "eval_iou": 0.9825776663481021,
1704
+ "step": 24000
1705
+ },
1706
+ {
1707
+ "epoch": 17.21,
1708
+ "learning_rate": 2.657432098765432e-05,
1709
+ "loss": 0.0167,
1710
+ "step": 24100
1711
+ },
1712
+ {
1713
+ "epoch": 17.29,
1714
+ "learning_rate": 2.642641975308642e-05,
1715
+ "loss": 0.0172,
1716
+ "step": 24200
1717
+ },
1718
+ {
1719
+ "epoch": 17.36,
1720
+ "learning_rate": 2.6278518518518515e-05,
1721
+ "loss": 0.0167,
1722
+ "step": 24300
1723
+ },
1724
+ {
1725
+ "epoch": 17.43,
1726
+ "learning_rate": 2.6130617283950614e-05,
1727
+ "loss": 0.0173,
1728
+ "step": 24400
1729
+ },
1730
+ {
1731
+ "epoch": 17.5,
1732
+ "learning_rate": 2.5982716049382717e-05,
1733
+ "loss": 0.0167,
1734
+ "step": 24500
1735
+ },
1736
+ {
1737
+ "epoch": 17.5,
1738
+ "eval_iou": 0.9825901704676011,
1739
+ "step": 24500
1740
+ },
1741
+ {
1742
+ "epoch": 17.57,
1743
+ "learning_rate": 2.583481481481481e-05,
1744
+ "loss": 0.0169,
1745
+ "step": 24600
1746
+ },
1747
+ {
1748
+ "epoch": 17.64,
1749
+ "learning_rate": 2.5686913580246912e-05,
1750
+ "loss": 0.0168,
1751
+ "step": 24700
1752
+ },
1753
+ {
1754
+ "epoch": 17.71,
1755
+ "learning_rate": 2.553901234567901e-05,
1756
+ "loss": 0.0175,
1757
+ "step": 24800
1758
+ },
1759
+ {
1760
+ "epoch": 17.79,
1761
+ "learning_rate": 2.5391111111111114e-05,
1762
+ "loss": 0.0183,
1763
+ "step": 24900
1764
+ },
1765
+ {
1766
+ "epoch": 17.86,
1767
+ "learning_rate": 2.5243209876543206e-05,
1768
+ "loss": 0.0169,
1769
+ "step": 25000
1770
+ },
1771
+ {
1772
+ "epoch": 17.86,
1773
+ "eval_iou": 0.9825106870894339,
1774
+ "step": 25000
1775
+ },
1776
+ {
1777
+ "epoch": 17.93,
1778
+ "learning_rate": 2.509530864197531e-05,
1779
+ "loss": 0.0172,
1780
+ "step": 25100
1781
+ },
1782
+ {
1783
+ "epoch": 18.0,
1784
+ "learning_rate": 2.4947407407407408e-05,
1785
+ "loss": 0.017,
1786
+ "step": 25200
1787
+ },
1788
+ {
1789
+ "epoch": 18.07,
1790
+ "learning_rate": 2.4799506172839504e-05,
1791
+ "loss": 0.0175,
1792
+ "step": 25300
1793
+ },
1794
+ {
1795
+ "epoch": 18.14,
1796
+ "learning_rate": 2.4651604938271603e-05,
1797
+ "loss": 0.0162,
1798
+ "step": 25400
1799
+ },
1800
+ {
1801
+ "epoch": 18.21,
1802
+ "learning_rate": 2.4503703703703706e-05,
1803
+ "loss": 0.0161,
1804
+ "step": 25500
1805
+ },
1806
+ {
1807
+ "epoch": 18.21,
1808
+ "eval_iou": 0.9824175115893868,
1809
+ "step": 25500
1810
+ },
1811
+ {
1812
+ "epoch": 18.29,
1813
+ "learning_rate": 2.4355802469135798e-05,
1814
+ "loss": 0.0166,
1815
+ "step": 25600
1816
+ },
1817
+ {
1818
+ "epoch": 18.36,
1819
+ "learning_rate": 2.42079012345679e-05,
1820
+ "loss": 0.0171,
1821
+ "step": 25700
1822
+ },
1823
+ {
1824
+ "epoch": 18.43,
1825
+ "learning_rate": 2.406e-05,
1826
+ "loss": 0.0176,
1827
+ "step": 25800
1828
+ },
1829
+ {
1830
+ "epoch": 18.5,
1831
+ "learning_rate": 2.3912098765432092e-05,
1832
+ "loss": 0.0173,
1833
+ "step": 25900
1834
+ },
1835
+ {
1836
+ "epoch": 18.57,
1837
+ "learning_rate": 2.3764197530864195e-05,
1838
+ "loss": 0.0171,
1839
+ "step": 26000
1840
+ },
1841
+ {
1842
+ "epoch": 18.57,
1843
+ "eval_iou": 0.9823989424051023,
1844
+ "step": 26000
1845
+ },
1846
+ {
1847
+ "epoch": 18.64,
1848
+ "learning_rate": 2.3616296296296294e-05,
1849
+ "loss": 0.0163,
1850
+ "step": 26100
1851
+ },
1852
+ {
1853
+ "epoch": 18.71,
1854
+ "learning_rate": 2.3468395061728397e-05,
1855
+ "loss": 0.0185,
1856
+ "step": 26200
1857
+ },
1858
+ {
1859
+ "epoch": 18.79,
1860
+ "learning_rate": 2.332049382716049e-05,
1861
+ "loss": 0.0164,
1862
+ "step": 26300
1863
+ },
1864
+ {
1865
+ "epoch": 18.86,
1866
+ "learning_rate": 2.3172592592592592e-05,
1867
+ "loss": 0.0169,
1868
+ "step": 26400
1869
+ },
1870
+ {
1871
+ "epoch": 18.93,
1872
+ "learning_rate": 2.302469135802469e-05,
1873
+ "loss": 0.0173,
1874
+ "step": 26500
1875
+ },
1876
+ {
1877
+ "epoch": 18.93,
1878
+ "eval_iou": 0.9824401590169645,
1879
+ "step": 26500
1880
+ },
1881
+ {
1882
+ "epoch": 19.0,
1883
+ "learning_rate": 2.2876790123456787e-05,
1884
+ "loss": 0.0173,
1885
+ "step": 26600
1886
+ },
1887
+ {
1888
+ "epoch": 19.07,
1889
+ "learning_rate": 2.2728888888888886e-05,
1890
+ "loss": 0.017,
1891
+ "step": 26700
1892
+ },
1893
+ {
1894
+ "epoch": 19.14,
1895
+ "learning_rate": 2.258098765432099e-05,
1896
+ "loss": 0.0171,
1897
+ "step": 26800
1898
+ },
1899
+ {
1900
+ "epoch": 19.21,
1901
+ "learning_rate": 2.243308641975308e-05,
1902
+ "loss": 0.0169,
1903
+ "step": 26900
1904
+ },
1905
+ {
1906
+ "epoch": 19.29,
1907
+ "learning_rate": 2.2285185185185184e-05,
1908
+ "loss": 0.0169,
1909
+ "step": 27000
1910
+ },
1911
+ {
1912
+ "epoch": 19.29,
1913
+ "eval_iou": 0.9826161189406526,
1914
+ "step": 27000
1915
+ },
1916
+ {
1917
+ "epoch": 19.36,
1918
+ "learning_rate": 2.2137283950617283e-05,
1919
+ "loss": 0.0176,
1920
+ "step": 27100
1921
+ },
1922
+ {
1923
+ "epoch": 19.43,
1924
+ "learning_rate": 2.1989382716049386e-05,
1925
+ "loss": 0.0163,
1926
+ "step": 27200
1927
+ },
1928
+ {
1929
+ "epoch": 19.5,
1930
+ "learning_rate": 2.1841481481481478e-05,
1931
+ "loss": 0.0168,
1932
+ "step": 27300
1933
+ },
1934
+ {
1935
+ "epoch": 19.57,
1936
+ "learning_rate": 2.169358024691358e-05,
1937
+ "loss": 0.0154,
1938
+ "step": 27400
1939
+ },
1940
+ {
1941
+ "epoch": 19.64,
1942
+ "learning_rate": 2.154567901234568e-05,
1943
+ "loss": 0.0167,
1944
+ "step": 27500
1945
+ },
1946
+ {
1947
+ "epoch": 19.64,
1948
+ "eval_iou": 0.9828028941855711,
1949
+ "step": 27500
1950
  }
1951
  ],
1952
  "logging_steps": 100,