joelniklaus commited on
Commit
3fde640
1 Parent(s): b10cefd

Training in progress, step 950000

Browse files
last-checkpoint/optimizer.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:345bb733106532a0f4478559f3788cc16626f2b03843c21bbf46700517f092e5
3
  size 885325017
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:70e1cdb73c33912fdfbd44c7802198f3da92e5f2948f144c655672394f3783bb
3
  size 885325017
last-checkpoint/pytorch_model.bin CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:8c1921a1c1cbf8718fc703fed5923d8983ae5a3c94f3fb08cc5b295115ded811
3
  size 442675755
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:ba23237eabaae268f92a0c5e73a78704a91d8fb58eebea9dce3e91dd8ad4e295
3
  size 442675755
last-checkpoint/rng_state_0.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:78b6318c1c37a88a7a88d1bea333e6b55fd0f1d3338fd7f99e67179de2e57d78
3
  size 13611
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:94fe5ae4f0bd12aa4a80be8d9d76c7c97add23774d07fd0efb2832b983be474d
3
  size 13611
last-checkpoint/rng_state_1.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:78b6318c1c37a88a7a88d1bea333e6b55fd0f1d3338fd7f99e67179de2e57d78
3
  size 13611
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:94fe5ae4f0bd12aa4a80be8d9d76c7c97add23774d07fd0efb2832b983be474d
3
  size 13611
last-checkpoint/rng_state_2.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:78b6318c1c37a88a7a88d1bea333e6b55fd0f1d3338fd7f99e67179de2e57d78
3
  size 13611
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:94fe5ae4f0bd12aa4a80be8d9d76c7c97add23774d07fd0efb2832b983be474d
3
  size 13611
last-checkpoint/rng_state_3.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:78b6318c1c37a88a7a88d1bea333e6b55fd0f1d3338fd7f99e67179de2e57d78
3
  size 13611
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:94fe5ae4f0bd12aa4a80be8d9d76c7c97add23774d07fd0efb2832b983be474d
3
  size 13611
last-checkpoint/rng_state_4.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:78b6318c1c37a88a7a88d1bea333e6b55fd0f1d3338fd7f99e67179de2e57d78
3
  size 13611
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:94fe5ae4f0bd12aa4a80be8d9d76c7c97add23774d07fd0efb2832b983be474d
3
  size 13611
last-checkpoint/rng_state_5.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:78b6318c1c37a88a7a88d1bea333e6b55fd0f1d3338fd7f99e67179de2e57d78
3
  size 13611
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:94fe5ae4f0bd12aa4a80be8d9d76c7c97add23774d07fd0efb2832b983be474d
3
  size 13611
last-checkpoint/rng_state_6.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:78b6318c1c37a88a7a88d1bea333e6b55fd0f1d3338fd7f99e67179de2e57d78
3
  size 13611
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:94fe5ae4f0bd12aa4a80be8d9d76c7c97add23774d07fd0efb2832b983be474d
3
  size 13611
last-checkpoint/rng_state_7.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:78b6318c1c37a88a7a88d1bea333e6b55fd0f1d3338fd7f99e67179de2e57d78
3
  size 13611
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:94fe5ae4f0bd12aa4a80be8d9d76c7c97add23774d07fd0efb2832b983be474d
3
  size 13611
last-checkpoint/scheduler.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:98fbf159ce1bb90afdab5d6ac994b4ab633fc21d8eb6c04c41c7f3a26253e5b5
3
  size 623
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:bc2c2c8416f63b11e9c82d6dac05baa6ad73177ac658621e099b23ff71f2f801
3
  size 623
last-checkpoint/trainer_state.json CHANGED
@@ -1,8 +1,8 @@
1
  {
2
  "best_metric": null,
3
  "best_model_checkpoint": null,
4
- "epoch": 6.095208,
5
- "global_step": 900000,
6
  "is_hyper_param_search": false,
7
  "is_local_process_zero": true,
8
  "is_world_process_zero": true,
@@ -5550,11 +5550,319 @@
5550
  "eval_samples_per_second": 418.845,
5551
  "eval_steps_per_second": 3.351,
5552
  "step": 900000
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
5553
  }
5554
  ],
5555
  "max_steps": 1000000,
5556
  "num_train_epochs": 9223372036854775807,
5557
- "total_flos": 1.5160884013339509e+19,
5558
  "trial_name": null,
5559
  "trial_params": null
5560
  }
 
1
  {
2
  "best_metric": null,
3
  "best_model_checkpoint": null,
4
+ "epoch": 7.011076,
5
+ "global_step": 950000,
6
  "is_hyper_param_search": false,
7
  "is_local_process_zero": true,
8
  "is_world_process_zero": true,
 
5550
  "eval_samples_per_second": 418.845,
5551
  "eval_steps_per_second": 3.351,
5552
  "step": 900000
5553
+ },
5554
+ {
5555
+ "epoch": 6.1,
5556
+ "learning_rate": 2.6557085182532582e-06,
5557
+ "loss": 0.622,
5558
+ "step": 901000
5559
+ },
5560
+ {
5561
+ "epoch": 6.1,
5562
+ "learning_rate": 2.602796871124663e-06,
5563
+ "loss": 0.5507,
5564
+ "step": 902000
5565
+ },
5566
+ {
5567
+ "epoch": 6.1,
5568
+ "learning_rate": 2.5504035522157854e-06,
5569
+ "loss": 0.5153,
5570
+ "step": 903000
5571
+ },
5572
+ {
5573
+ "epoch": 6.1,
5574
+ "learning_rate": 2.4985291344915674e-06,
5575
+ "loss": 0.5228,
5576
+ "step": 904000
5577
+ },
5578
+ {
5579
+ "epoch": 6.1,
5580
+ "learning_rate": 2.4471741852423237e-06,
5581
+ "loss": 0.6137,
5582
+ "step": 905000
5583
+ },
5584
+ {
5585
+ "epoch": 6.1,
5586
+ "learning_rate": 2.3963392660775575e-06,
5587
+ "loss": 0.6166,
5588
+ "step": 906000
5589
+ },
5590
+ {
5591
+ "epoch": 6.1,
5592
+ "learning_rate": 2.3460249329197824e-06,
5593
+ "loss": 0.5621,
5594
+ "step": 907000
5595
+ },
5596
+ {
5597
+ "epoch": 6.1,
5598
+ "learning_rate": 2.296231735998511e-06,
5599
+ "loss": 0.5129,
5600
+ "step": 908000
5601
+ },
5602
+ {
5603
+ "epoch": 6.1,
5604
+ "learning_rate": 2.2469602198441573e-06,
5605
+ "loss": 0.4872,
5606
+ "step": 909000
5607
+ },
5608
+ {
5609
+ "epoch": 6.11,
5610
+ "learning_rate": 2.1982109232821178e-06,
5611
+ "loss": 0.5815,
5612
+ "step": 910000
5613
+ },
5614
+ {
5615
+ "epoch": 6.11,
5616
+ "learning_rate": 2.149984379426906e-06,
5617
+ "loss": 0.6018,
5618
+ "step": 911000
5619
+ },
5620
+ {
5621
+ "epoch": 6.11,
5622
+ "learning_rate": 2.102281115676258e-06,
5623
+ "loss": 0.618,
5624
+ "step": 912000
5625
+ },
5626
+ {
5627
+ "epoch": 6.11,
5628
+ "learning_rate": 2.0551016537054493e-06,
5629
+ "loss": 0.5653,
5630
+ "step": 913000
5631
+ },
5632
+ {
5633
+ "epoch": 6.11,
5634
+ "learning_rate": 2.008446509461498e-06,
5635
+ "loss": 0.5105,
5636
+ "step": 914000
5637
+ },
5638
+ {
5639
+ "epoch": 6.11,
5640
+ "learning_rate": 1.962316193157593e-06,
5641
+ "loss": 0.5194,
5642
+ "step": 915000
5643
+ },
5644
+ {
5645
+ "epoch": 6.11,
5646
+ "learning_rate": 1.91671120926748e-06,
5647
+ "loss": 0.6175,
5648
+ "step": 916000
5649
+ },
5650
+ {
5651
+ "epoch": 6.11,
5652
+ "learning_rate": 1.8716320565199618e-06,
5653
+ "loss": 0.6009,
5654
+ "step": 917000
5655
+ },
5656
+ {
5657
+ "epoch": 6.11,
5658
+ "learning_rate": 1.8270792278934302e-06,
5659
+ "loss": 0.5788,
5660
+ "step": 918000
5661
+ },
5662
+ {
5663
+ "epoch": 6.11,
5664
+ "learning_rate": 1.7830532106104747e-06,
5665
+ "loss": 0.51,
5666
+ "step": 919000
5667
+ },
5668
+ {
5669
+ "epoch": 6.12,
5670
+ "learning_rate": 1.7395544861325718e-06,
5671
+ "loss": 0.4913,
5672
+ "step": 920000
5673
+ },
5674
+ {
5675
+ "epoch": 6.12,
5676
+ "learning_rate": 1.696583530154794e-06,
5677
+ "loss": 0.572,
5678
+ "step": 921000
5679
+ },
5680
+ {
5681
+ "epoch": 6.12,
5682
+ "learning_rate": 1.6541408126006463e-06,
5683
+ "loss": 0.5939,
5684
+ "step": 922000
5685
+ },
5686
+ {
5687
+ "epoch": 6.12,
5688
+ "learning_rate": 1.6122267976168781e-06,
5689
+ "loss": 0.6208,
5690
+ "step": 923000
5691
+ },
5692
+ {
5693
+ "epoch": 6.12,
5694
+ "learning_rate": 1.5708419435684462e-06,
5695
+ "loss": 0.5745,
5696
+ "step": 924000
5697
+ },
5698
+ {
5699
+ "epoch": 6.12,
5700
+ "learning_rate": 1.5299867030334814e-06,
5701
+ "loss": 0.5067,
5702
+ "step": 925000
5703
+ },
5704
+ {
5705
+ "epoch": 6.12,
5706
+ "learning_rate": 1.4896615227983468e-06,
5707
+ "loss": 0.5172,
5708
+ "step": 926000
5709
+ },
5710
+ {
5711
+ "epoch": 6.12,
5712
+ "learning_rate": 1.4498668438527597e-06,
5713
+ "loss": 0.618,
5714
+ "step": 927000
5715
+ },
5716
+ {
5717
+ "epoch": 6.12,
5718
+ "learning_rate": 1.4106031013849496e-06,
5719
+ "loss": 0.5888,
5720
+ "step": 928000
5721
+ },
5722
+ {
5723
+ "epoch": 6.12,
5724
+ "learning_rate": 1.3718707247769135e-06,
5725
+ "loss": 0.5921,
5726
+ "step": 929000
5727
+ },
5728
+ {
5729
+ "epoch": 6.13,
5730
+ "learning_rate": 1.333670137599713e-06,
5731
+ "loss": 0.5074,
5732
+ "step": 930000
5733
+ },
5734
+ {
5735
+ "epoch": 6.13,
5736
+ "learning_rate": 1.2960017576088446e-06,
5737
+ "loss": 0.4926,
5738
+ "step": 931000
5739
+ },
5740
+ {
5741
+ "epoch": 6.13,
5742
+ "learning_rate": 1.2588659967397e-06,
5743
+ "loss": 0.5667,
5744
+ "step": 932000
5745
+ },
5746
+ {
5747
+ "epoch": 6.13,
5748
+ "learning_rate": 1.222263261102985e-06,
5749
+ "loss": 0.5864,
5750
+ "step": 933000
5751
+ },
5752
+ {
5753
+ "epoch": 6.13,
5754
+ "learning_rate": 1.1861939509803687e-06,
5755
+ "loss": 0.6213,
5756
+ "step": 934000
5757
+ },
5758
+ {
5759
+ "epoch": 6.13,
5760
+ "learning_rate": 1.1506584608200367e-06,
5761
+ "loss": 0.5844,
5762
+ "step": 935000
5763
+ },
5764
+ {
5765
+ "epoch": 6.13,
5766
+ "learning_rate": 1.1156571792324211e-06,
5767
+ "loss": 0.5034,
5768
+ "step": 936000
5769
+ },
5770
+ {
5771
+ "epoch": 6.13,
5772
+ "learning_rate": 1.0811904889859336e-06,
5773
+ "loss": 0.5158,
5774
+ "step": 937000
5775
+ },
5776
+ {
5777
+ "epoch": 6.13,
5778
+ "learning_rate": 1.0472587670027678e-06,
5779
+ "loss": 0.6165,
5780
+ "step": 938000
5781
+ },
5782
+ {
5783
+ "epoch": 7.0,
5784
+ "learning_rate": 1.0138623843548078e-06,
5785
+ "loss": 0.5796,
5786
+ "step": 939000
5787
+ },
5788
+ {
5789
+ "epoch": 7.0,
5790
+ "learning_rate": 9.810017062595322e-07,
5791
+ "loss": 0.5865,
5792
+ "step": 940000
5793
+ },
5794
+ {
5795
+ "epoch": 7.0,
5796
+ "learning_rate": 9.486770920760668e-07,
5797
+ "loss": 0.6477,
5798
+ "step": 941000
5799
+ },
5800
+ {
5801
+ "epoch": 7.0,
5802
+ "learning_rate": 9.168888953011989e-07,
5803
+ "loss": 0.5715,
5804
+ "step": 942000
5805
+ },
5806
+ {
5807
+ "epoch": 7.0,
5808
+ "learning_rate": 8.856374635655695e-07,
5809
+ "loss": 0.4792,
5810
+ "step": 943000
5811
+ },
5812
+ {
5813
+ "epoch": 7.01,
5814
+ "learning_rate": 8.549231386298151e-07,
5815
+ "loss": 0.496,
5816
+ "step": 944000
5817
+ },
5818
+ {
5819
+ "epoch": 7.01,
5820
+ "learning_rate": 8.247462563808817e-07,
5821
+ "loss": 0.5806,
5822
+ "step": 945000
5823
+ },
5824
+ {
5825
+ "epoch": 7.01,
5826
+ "learning_rate": 7.951071468283167e-07,
5827
+ "loss": 0.6195,
5828
+ "step": 946000
5829
+ },
5830
+ {
5831
+ "epoch": 7.01,
5832
+ "learning_rate": 7.66006134100672e-07,
5833
+ "loss": 0.6063,
5834
+ "step": 947000
5835
+ },
5836
+ {
5837
+ "epoch": 7.01,
5838
+ "learning_rate": 7.374435364419674e-07,
5839
+ "loss": 0.5259,
5840
+ "step": 948000
5841
+ },
5842
+ {
5843
+ "epoch": 7.01,
5844
+ "learning_rate": 7.094196662081831e-07,
5845
+ "loss": 0.4908,
5846
+ "step": 949000
5847
+ },
5848
+ {
5849
+ "epoch": 7.01,
5850
+ "learning_rate": 6.819348298638839e-07,
5851
+ "loss": 0.5899,
5852
+ "step": 950000
5853
+ },
5854
+ {
5855
+ "epoch": 7.01,
5856
+ "eval_loss": 0.4320693910121918,
5857
+ "eval_runtime": 10.8868,
5858
+ "eval_samples_per_second": 459.272,
5859
+ "eval_steps_per_second": 3.674,
5860
+ "step": 950000
5861
  }
5862
  ],
5863
  "max_steps": 1000000,
5864
  "num_train_epochs": 9223372036854775807,
5865
+ "total_flos": 1.600315815498036e+19,
5866
  "trial_name": null,
5867
  "trial_params": null
5868
  }
pytorch_model.bin CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:8c1921a1c1cbf8718fc703fed5923d8983ae5a3c94f3fb08cc5b295115ded811
3
  size 442675755
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:ba23237eabaae268f92a0c5e73a78704a91d8fb58eebea9dce3e91dd8ad4e295
3
  size 442675755
runs/Dec27_23-32-38_t1v-n-2d317d8b-w-0/events.out.tfevents.1672183981.t1v-n-2d317d8b-w-0.162730.0 CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:416c48e28ac8dda4b04a334dfbc00e4af731cc753ac2fe39df5f5ca94fbc8a9d
3
- size 152696
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:3bd6bf79762908df4de9c152fef88e518523027acee852f6a469aa4f787d95f6
3
+ size 160972