joelniklaus commited on
Commit
a07fb1a
1 Parent(s): 0e6e586

Training in progress, step 850000

Browse files
last-checkpoint/optimizer.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:0736aff937bc5cbe089a02ee96acafe29a3ce8b5ef41405f8f101efedc8d1867
3
  size 1475917081
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:fd4783c79d837195c8c28a24f0933d1e492e2e7dcaa298df4fb471ffea73d2a9
3
  size 1475917081
last-checkpoint/pytorch_model.bin CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:a83020031862aea2e988f120ee4fbfdb9e36868ffeeee41e4196243bad5ad70f
3
  size 737971755
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:5a530a13febf37ee76db07823fcaf2dde21e0920e1af9b68cf155f551129662b
3
  size 737971755
last-checkpoint/rng_state_0.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:ea5f4d07228ced52baeea808cd24096aeb8c1e411df6d964bc2778cd1f37bff3
3
  size 13611
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:3c21fdf578f3ba8c419f0f122cdc5b04fd848666ce4f24ed13deaeeda0b66dba
3
  size 13611
last-checkpoint/rng_state_1.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:ea5f4d07228ced52baeea808cd24096aeb8c1e411df6d964bc2778cd1f37bff3
3
  size 13611
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:3c21fdf578f3ba8c419f0f122cdc5b04fd848666ce4f24ed13deaeeda0b66dba
3
  size 13611
last-checkpoint/rng_state_2.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:ea5f4d07228ced52baeea808cd24096aeb8c1e411df6d964bc2778cd1f37bff3
3
  size 13611
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:3c21fdf578f3ba8c419f0f122cdc5b04fd848666ce4f24ed13deaeeda0b66dba
3
  size 13611
last-checkpoint/rng_state_3.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:ea5f4d07228ced52baeea808cd24096aeb8c1e411df6d964bc2778cd1f37bff3
3
  size 13611
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:3c21fdf578f3ba8c419f0f122cdc5b04fd848666ce4f24ed13deaeeda0b66dba
3
  size 13611
last-checkpoint/rng_state_4.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:ea5f4d07228ced52baeea808cd24096aeb8c1e411df6d964bc2778cd1f37bff3
3
  size 13611
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:3c21fdf578f3ba8c419f0f122cdc5b04fd848666ce4f24ed13deaeeda0b66dba
3
  size 13611
last-checkpoint/rng_state_5.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:ea5f4d07228ced52baeea808cd24096aeb8c1e411df6d964bc2778cd1f37bff3
3
  size 13611
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:3c21fdf578f3ba8c419f0f122cdc5b04fd848666ce4f24ed13deaeeda0b66dba
3
  size 13611
last-checkpoint/rng_state_6.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:ea5f4d07228ced52baeea808cd24096aeb8c1e411df6d964bc2778cd1f37bff3
3
  size 13611
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:3c21fdf578f3ba8c419f0f122cdc5b04fd848666ce4f24ed13deaeeda0b66dba
3
  size 13611
last-checkpoint/rng_state_7.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:ea5f4d07228ced52baeea808cd24096aeb8c1e411df6d964bc2778cd1f37bff3
3
  size 13611
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:3c21fdf578f3ba8c419f0f122cdc5b04fd848666ce4f24ed13deaeeda0b66dba
3
  size 13611
last-checkpoint/scheduler.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:50e51b9224ded3ddffee57f26ec45414409de0232579ddafb7f3e083076fa4c5
3
  size 623
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:adedebe0cc7e07de957a9e2967d6e9c3934a9fdca3245f46a29d125e5e36192e
3
  size 623
last-checkpoint/trainer_state.json CHANGED
@@ -1,8 +1,8 @@
1
  {
2
  "best_metric": null,
3
  "best_model_checkpoint": null,
4
- "epoch": 0.8,
5
- "global_step": 800000,
6
  "is_hyper_param_search": false,
7
  "is_local_process_zero": true,
8
  "is_world_process_zero": true,
@@ -4934,11 +4934,319 @@
4934
  "eval_samples_per_second": 241.45,
4935
  "eval_steps_per_second": 1.932,
4936
  "step": 800000
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
4937
  }
4938
  ],
4939
  "max_steps": 1000000,
4940
  "num_train_epochs": 9223372036854775807,
4941
- "total_flos": 1.34914183790592e+19,
4942
  "trial_name": null,
4943
  "trial_params": null
4944
  }
 
1
  {
2
  "best_metric": null,
3
  "best_model_checkpoint": null,
4
+ "epoch": 0.85,
5
+ "global_step": 850000,
6
  "is_hyper_param_search": false,
7
  "is_local_process_zero": true,
8
  "is_world_process_zero": true,
 
4934
  "eval_samples_per_second": 241.45,
4935
  "eval_steps_per_second": 1.932,
4936
  "step": 800000
4937
+ },
4938
+ {
4939
+ "epoch": 0.8,
4940
+ "learning_rate": 1.0441632244932237e-05,
4941
+ "loss": 0.7146,
4942
+ "step": 801000
4943
+ },
4944
+ {
4945
+ "epoch": 0.8,
4946
+ "learning_rate": 1.0340722563656107e-05,
4947
+ "loss": 0.746,
4948
+ "step": 802000
4949
+ },
4950
+ {
4951
+ "epoch": 0.8,
4952
+ "learning_rate": 1.0240246589884044e-05,
4953
+ "loss": 0.7146,
4954
+ "step": 803000
4955
+ },
4956
+ {
4957
+ "epoch": 0.8,
4958
+ "learning_rate": 1.0140205422405214e-05,
4959
+ "loss": 0.7564,
4960
+ "step": 804000
4961
+ },
4962
+ {
4963
+ "epoch": 0.81,
4964
+ "learning_rate": 1.0040600155253765e-05,
4965
+ "loss": 0.761,
4966
+ "step": 805000
4967
+ },
4968
+ {
4969
+ "epoch": 0.81,
4970
+ "learning_rate": 9.941431877696955e-06,
4971
+ "loss": 0.7907,
4972
+ "step": 806000
4973
+ },
4974
+ {
4975
+ "epoch": 0.81,
4976
+ "learning_rate": 9.842701674223187e-06,
4977
+ "loss": 0.7909,
4978
+ "step": 807000
4979
+ },
4980
+ {
4981
+ "epoch": 0.81,
4982
+ "learning_rate": 9.744410624530148e-06,
4983
+ "loss": 0.7931,
4984
+ "step": 808000
4985
+ },
4986
+ {
4987
+ "epoch": 0.81,
4988
+ "learning_rate": 9.646559803512994e-06,
4989
+ "loss": 0.7779,
4990
+ "step": 809000
4991
+ },
4992
+ {
4993
+ "epoch": 0.81,
4994
+ "learning_rate": 9.549150281252633e-06,
4995
+ "loss": 0.7252,
4996
+ "step": 810000
4997
+ },
4998
+ {
4999
+ "epoch": 0.81,
5000
+ "learning_rate": 9.452183123004e-06,
5001
+ "loss": 0.7497,
5002
+ "step": 811000
5003
+ },
5004
+ {
5005
+ "epoch": 0.81,
5006
+ "learning_rate": 9.355659389184396e-06,
5007
+ "loss": 0.7757,
5008
+ "step": 812000
5009
+ },
5010
+ {
5011
+ "epoch": 0.81,
5012
+ "learning_rate": 9.259580135361929e-06,
5013
+ "loss": 0.726,
5014
+ "step": 813000
5015
+ },
5016
+ {
5017
+ "epoch": 0.81,
5018
+ "learning_rate": 9.163946412243896e-06,
5019
+ "loss": 0.7048,
5020
+ "step": 814000
5021
+ },
5022
+ {
5023
+ "epoch": 0.81,
5024
+ "learning_rate": 9.068759265665384e-06,
5025
+ "loss": 0.681,
5026
+ "step": 815000
5027
+ },
5028
+ {
5029
+ "epoch": 0.82,
5030
+ "learning_rate": 8.974019736577777e-06,
5031
+ "loss": 0.7358,
5032
+ "step": 816000
5033
+ },
5034
+ {
5035
+ "epoch": 0.82,
5036
+ "learning_rate": 8.879728861037384e-06,
5037
+ "loss": 0.7676,
5038
+ "step": 817000
5039
+ },
5040
+ {
5041
+ "epoch": 0.82,
5042
+ "learning_rate": 8.785887670194138e-06,
5043
+ "loss": 0.7497,
5044
+ "step": 818000
5045
+ },
5046
+ {
5047
+ "epoch": 0.82,
5048
+ "learning_rate": 8.692497190280224e-06,
5049
+ "loss": 0.7809,
5050
+ "step": 819000
5051
+ },
5052
+ {
5053
+ "epoch": 0.82,
5054
+ "learning_rate": 8.599558442598998e-06,
5055
+ "loss": 0.8028,
5056
+ "step": 820000
5057
+ },
5058
+ {
5059
+ "epoch": 0.82,
5060
+ "learning_rate": 8.507072443513702e-06,
5061
+ "loss": 0.8323,
5062
+ "step": 821000
5063
+ },
5064
+ {
5065
+ "epoch": 0.82,
5066
+ "learning_rate": 8.415040204436426e-06,
5067
+ "loss": 0.8181,
5068
+ "step": 822000
5069
+ },
5070
+ {
5071
+ "epoch": 0.82,
5072
+ "learning_rate": 8.323462731816961e-06,
5073
+ "loss": 0.8814,
5074
+ "step": 823000
5075
+ },
5076
+ {
5077
+ "epoch": 0.82,
5078
+ "learning_rate": 8.232341027131885e-06,
5079
+ "loss": 0.829,
5080
+ "step": 824000
5081
+ },
5082
+ {
5083
+ "epoch": 0.82,
5084
+ "learning_rate": 8.141676086873572e-06,
5085
+ "loss": 0.7633,
5086
+ "step": 825000
5087
+ },
5088
+ {
5089
+ "epoch": 0.83,
5090
+ "learning_rate": 8.051468902539272e-06,
5091
+ "loss": 0.7566,
5092
+ "step": 826000
5093
+ },
5094
+ {
5095
+ "epoch": 0.83,
5096
+ "learning_rate": 7.96172046062032e-06,
5097
+ "loss": 0.7469,
5098
+ "step": 827000
5099
+ },
5100
+ {
5101
+ "epoch": 0.83,
5102
+ "learning_rate": 7.872431742591268e-06,
5103
+ "loss": 0.6765,
5104
+ "step": 828000
5105
+ },
5106
+ {
5107
+ "epoch": 0.83,
5108
+ "learning_rate": 7.783603724899257e-06,
5109
+ "loss": 0.7272,
5110
+ "step": 829000
5111
+ },
5112
+ {
5113
+ "epoch": 0.83,
5114
+ "learning_rate": 7.695237378953223e-06,
5115
+ "loss": 0.7554,
5116
+ "step": 830000
5117
+ },
5118
+ {
5119
+ "epoch": 0.83,
5120
+ "learning_rate": 7.607333671113409e-06,
5121
+ "loss": 0.7978,
5122
+ "step": 831000
5123
+ },
5124
+ {
5125
+ "epoch": 0.83,
5126
+ "learning_rate": 7.519893562680663e-06,
5127
+ "loss": 0.8094,
5128
+ "step": 832000
5129
+ },
5130
+ {
5131
+ "epoch": 0.83,
5132
+ "learning_rate": 7.432918009885997e-06,
5133
+ "loss": 0.786,
5134
+ "step": 833000
5135
+ },
5136
+ {
5137
+ "epoch": 0.83,
5138
+ "learning_rate": 7.3464079638801365e-06,
5139
+ "loss": 0.8359,
5140
+ "step": 834000
5141
+ },
5142
+ {
5143
+ "epoch": 0.83,
5144
+ "learning_rate": 7.260364370723044e-06,
5145
+ "loss": 0.862,
5146
+ "step": 835000
5147
+ },
5148
+ {
5149
+ "epoch": 0.84,
5150
+ "learning_rate": 7.174788171373731e-06,
5151
+ "loss": 0.8762,
5152
+ "step": 836000
5153
+ },
5154
+ {
5155
+ "epoch": 0.84,
5156
+ "learning_rate": 7.089680301679752e-06,
5157
+ "loss": 0.8808,
5158
+ "step": 837000
5159
+ },
5160
+ {
5161
+ "epoch": 0.84,
5162
+ "learning_rate": 7.005041692367154e-06,
5163
+ "loss": 0.8604,
5164
+ "step": 838000
5165
+ },
5166
+ {
5167
+ "epoch": 0.84,
5168
+ "learning_rate": 6.92087326903022e-06,
5169
+ "loss": 0.7643,
5170
+ "step": 839000
5171
+ },
5172
+ {
5173
+ "epoch": 0.84,
5174
+ "learning_rate": 6.837175952121306e-06,
5175
+ "loss": 0.7278,
5176
+ "step": 840000
5177
+ },
5178
+ {
5179
+ "epoch": 0.84,
5180
+ "learning_rate": 6.753950656940905e-06,
5181
+ "loss": 0.7171,
5182
+ "step": 841000
5183
+ },
5184
+ {
5185
+ "epoch": 0.84,
5186
+ "learning_rate": 6.671198293627479e-06,
5187
+ "loss": 0.6912,
5188
+ "step": 842000
5189
+ },
5190
+ {
5191
+ "epoch": 0.84,
5192
+ "learning_rate": 6.588919767147639e-06,
5193
+ "loss": 0.5905,
5194
+ "step": 843000
5195
+ },
5196
+ {
5197
+ "epoch": 0.84,
5198
+ "learning_rate": 6.5071159772861436e-06,
5199
+ "loss": 0.632,
5200
+ "step": 844000
5201
+ },
5202
+ {
5203
+ "epoch": 0.84,
5204
+ "learning_rate": 6.425787818636131e-06,
5205
+ "loss": 0.7451,
5206
+ "step": 845000
5207
+ },
5208
+ {
5209
+ "epoch": 0.85,
5210
+ "learning_rate": 6.344936180589351e-06,
5211
+ "loss": 0.7537,
5212
+ "step": 846000
5213
+ },
5214
+ {
5215
+ "epoch": 0.85,
5216
+ "learning_rate": 6.264561947326331e-06,
5217
+ "loss": 0.66,
5218
+ "step": 847000
5219
+ },
5220
+ {
5221
+ "epoch": 0.85,
5222
+ "learning_rate": 6.184665997806832e-06,
5223
+ "loss": 0.7213,
5224
+ "step": 848000
5225
+ },
5226
+ {
5227
+ "epoch": 0.85,
5228
+ "learning_rate": 6.1052492057601275e-06,
5229
+ "loss": 0.7708,
5230
+ "step": 849000
5231
+ },
5232
+ {
5233
+ "epoch": 0.85,
5234
+ "learning_rate": 6.026312439675552e-06,
5235
+ "loss": 0.8174,
5236
+ "step": 850000
5237
+ },
5238
+ {
5239
+ "epoch": 0.85,
5240
+ "eval_loss": 0.5826455354690552,
5241
+ "eval_runtime": 13.0928,
5242
+ "eval_samples_per_second": 381.888,
5243
+ "eval_steps_per_second": 3.055,
5244
+ "step": 850000
5245
  }
5246
  ],
5247
  "max_steps": 1000000,
5248
  "num_train_epochs": 9223372036854775807,
5249
+ "total_flos": 1.43346320277504e+19,
5250
  "trial_name": null,
5251
  "trial_params": null
5252
  }
pytorch_model.bin CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:a83020031862aea2e988f120ee4fbfdb9e36868ffeeee41e4196243bad5ad70f
3
  size 737971755
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:5a530a13febf37ee76db07823fcaf2dde21e0920e1af9b68cf155f551129662b
3
  size 737971755
runs/Feb09_18-20-41_t1v-n-0cfb531e-w-0/events.out.tfevents.1675967137.t1v-n-0cfb531e-w-0.3767571.0 CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:a89903c3ad93724a201222cc39ca9ad19e4291497e25d97bb164b6fca4688679
3
- size 12096
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:bfabe543bd4f71179f1898ab0b35a69b1e7be998b3da381bc1404de8dd44afcf
3
+ size 20372