joelniklaus commited on
Commit
e837c30
1 Parent(s): 66b7948

Training in progress, step 700000

Browse files
last-checkpoint/optimizer.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:9b5ab1c91a07485f01e742aaac433d58859ea278355bd001736e2f610377a2eb
3
  size 1475917081
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:6c5185e46c207afc923ce317904af7390e66cd4c6189e5217b9f3a505ed2bcdb
3
  size 1475917081
last-checkpoint/pytorch_model.bin CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:379d522dfd1a25b016cf3da204eca913e1ee15fc75bb4722b9721e68ccaf83d7
3
  size 737971755
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:6991897e52bae2e9d6504fe8851b5a7986102aa1d7e94ca5557ddb32076d1524
3
  size 737971755
last-checkpoint/rng_state_0.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:bbe4c5e4eff62822ea7f8840047f5407ef4c8722d1c6e6187e545d5a4b227925
3
  size 13611
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:52eb069e358852a5899ef82beb7fdbc9917637cbe5700c2510438440dfc1d58e
3
  size 13611
last-checkpoint/rng_state_1.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:bbe4c5e4eff62822ea7f8840047f5407ef4c8722d1c6e6187e545d5a4b227925
3
  size 13611
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:52eb069e358852a5899ef82beb7fdbc9917637cbe5700c2510438440dfc1d58e
3
  size 13611
last-checkpoint/rng_state_2.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:bbe4c5e4eff62822ea7f8840047f5407ef4c8722d1c6e6187e545d5a4b227925
3
  size 13611
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:52eb069e358852a5899ef82beb7fdbc9917637cbe5700c2510438440dfc1d58e
3
  size 13611
last-checkpoint/rng_state_3.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:bbe4c5e4eff62822ea7f8840047f5407ef4c8722d1c6e6187e545d5a4b227925
3
  size 13611
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:52eb069e358852a5899ef82beb7fdbc9917637cbe5700c2510438440dfc1d58e
3
  size 13611
last-checkpoint/rng_state_4.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:bbe4c5e4eff62822ea7f8840047f5407ef4c8722d1c6e6187e545d5a4b227925
3
  size 13611
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:52eb069e358852a5899ef82beb7fdbc9917637cbe5700c2510438440dfc1d58e
3
  size 13611
last-checkpoint/rng_state_5.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:bbe4c5e4eff62822ea7f8840047f5407ef4c8722d1c6e6187e545d5a4b227925
3
  size 13611
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:52eb069e358852a5899ef82beb7fdbc9917637cbe5700c2510438440dfc1d58e
3
  size 13611
last-checkpoint/rng_state_6.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:bbe4c5e4eff62822ea7f8840047f5407ef4c8722d1c6e6187e545d5a4b227925
3
  size 13611
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:52eb069e358852a5899ef82beb7fdbc9917637cbe5700c2510438440dfc1d58e
3
  size 13611
last-checkpoint/rng_state_7.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:bbe4c5e4eff62822ea7f8840047f5407ef4c8722d1c6e6187e545d5a4b227925
3
  size 13611
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:52eb069e358852a5899ef82beb7fdbc9917637cbe5700c2510438440dfc1d58e
3
  size 13611
last-checkpoint/scheduler.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:9d7fa20411577666fac76fe76348b4f9231439cc2e524d6e3185910c258591e9
3
  size 623
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:d2d6ab5c2c0046e2c66cf435ff00463ff3da7208ddb35b5b6f19c87d94f3623b
3
  size 623
last-checkpoint/trainer_state.json CHANGED
@@ -1,8 +1,8 @@
1
  {
2
  "best_metric": null,
3
  "best_model_checkpoint": null,
4
- "epoch": 0.65,
5
- "global_step": 650000,
6
  "is_hyper_param_search": false,
7
  "is_local_process_zero": true,
8
  "is_world_process_zero": true,
@@ -4010,11 +4010,319 @@
4010
  "eval_samples_per_second": 331.985,
4011
  "eval_steps_per_second": 2.656,
4012
  "step": 650000
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
4013
  }
4014
  ],
4015
  "max_steps": 1000000,
4016
  "num_train_epochs": 9223372036854775807,
4017
- "total_flos": 1.09617774329856e+19,
4018
  "trial_name": null,
4019
  "trial_params": null
4020
  }
 
1
  {
2
  "best_metric": null,
3
  "best_model_checkpoint": null,
4
+ "epoch": 0.7,
5
+ "global_step": 700000,
6
  "is_hyper_param_search": false,
7
  "is_local_process_zero": true,
8
  "is_world_process_zero": true,
 
4010
  "eval_samples_per_second": 331.985,
4011
  "eval_steps_per_second": 2.656,
4012
  "step": 650000
4013
+ },
4014
+ {
4015
+ "epoch": 0.65,
4016
+ "learning_rate": 2.976391850971065e-05,
4017
+ "loss": 0.7774,
4018
+ "step": 651000
4019
+ },
4020
+ {
4021
+ "epoch": 0.65,
4022
+ "learning_rate": 2.9612829550614836e-05,
4023
+ "loss": 0.7195,
4024
+ "step": 652000
4025
+ },
4026
+ {
4027
+ "epoch": 0.65,
4028
+ "learning_rate": 2.9461963542348737e-05,
4029
+ "loss": 0.6914,
4030
+ "step": 653000
4031
+ },
4032
+ {
4033
+ "epoch": 0.65,
4034
+ "learning_rate": 2.931132213475884e-05,
4035
+ "loss": 0.7277,
4036
+ "step": 654000
4037
+ },
4038
+ {
4039
+ "epoch": 0.66,
4040
+ "learning_rate": 2.916090697523549e-05,
4041
+ "loss": 0.7585,
4042
+ "step": 655000
4043
+ },
4044
+ {
4045
+ "epoch": 0.66,
4046
+ "learning_rate": 2.9010719708694722e-05,
4047
+ "loss": 0.7406,
4048
+ "step": 656000
4049
+ },
4050
+ {
4051
+ "epoch": 0.66,
4052
+ "learning_rate": 2.8860761977560436e-05,
4053
+ "loss": 0.8094,
4054
+ "step": 657000
4055
+ },
4056
+ {
4057
+ "epoch": 0.66,
4058
+ "learning_rate": 2.8711035421746367e-05,
4059
+ "loss": 0.7789,
4060
+ "step": 658000
4061
+ },
4062
+ {
4063
+ "epoch": 0.66,
4064
+ "learning_rate": 2.8561541678638142e-05,
4065
+ "loss": 0.793,
4066
+ "step": 659000
4067
+ },
4068
+ {
4069
+ "epoch": 0.66,
4070
+ "learning_rate": 2.8412282383075363e-05,
4071
+ "loss": 0.8005,
4072
+ "step": 660000
4073
+ },
4074
+ {
4075
+ "epoch": 0.66,
4076
+ "learning_rate": 2.8263259167333777e-05,
4077
+ "loss": 0.7959,
4078
+ "step": 661000
4079
+ },
4080
+ {
4081
+ "epoch": 0.66,
4082
+ "learning_rate": 2.811447366110741e-05,
4083
+ "loss": 0.7982,
4084
+ "step": 662000
4085
+ },
4086
+ {
4087
+ "epoch": 0.66,
4088
+ "learning_rate": 2.7965927491490705e-05,
4089
+ "loss": 0.7859,
4090
+ "step": 663000
4091
+ },
4092
+ {
4093
+ "epoch": 0.66,
4094
+ "learning_rate": 2.7817622282960815e-05,
4095
+ "loss": 0.7346,
4096
+ "step": 664000
4097
+ },
4098
+ {
4099
+ "epoch": 0.67,
4100
+ "learning_rate": 2.766955965735968e-05,
4101
+ "loss": 0.7484,
4102
+ "step": 665000
4103
+ },
4104
+ {
4105
+ "epoch": 0.67,
4106
+ "learning_rate": 2.7521741233876496e-05,
4107
+ "loss": 0.7365,
4108
+ "step": 666000
4109
+ },
4110
+ {
4111
+ "epoch": 0.67,
4112
+ "learning_rate": 2.7374168629029813e-05,
4113
+ "loss": 0.7243,
4114
+ "step": 667000
4115
+ },
4116
+ {
4117
+ "epoch": 0.67,
4118
+ "learning_rate": 2.7226843456650037e-05,
4119
+ "loss": 0.718,
4120
+ "step": 668000
4121
+ },
4122
+ {
4123
+ "epoch": 0.67,
4124
+ "learning_rate": 2.707976732786166e-05,
4125
+ "loss": 0.7448,
4126
+ "step": 669000
4127
+ },
4128
+ {
4129
+ "epoch": 0.67,
4130
+ "learning_rate": 2.693294185106562e-05,
4131
+ "loss": 0.8236,
4132
+ "step": 670000
4133
+ },
4134
+ {
4135
+ "epoch": 0.67,
4136
+ "learning_rate": 2.6786368631921836e-05,
4137
+ "loss": 0.8554,
4138
+ "step": 671000
4139
+ },
4140
+ {
4141
+ "epoch": 0.67,
4142
+ "learning_rate": 2.6640049273331515e-05,
4143
+ "loss": 0.8057,
4144
+ "step": 672000
4145
+ },
4146
+ {
4147
+ "epoch": 0.67,
4148
+ "learning_rate": 2.6493985375419778e-05,
4149
+ "loss": 0.7948,
4150
+ "step": 673000
4151
+ },
4152
+ {
4153
+ "epoch": 0.67,
4154
+ "learning_rate": 2.6348178535517966e-05,
4155
+ "loss": 0.8237,
4156
+ "step": 674000
4157
+ },
4158
+ {
4159
+ "epoch": 0.68,
4160
+ "learning_rate": 2.6202630348146324e-05,
4161
+ "loss": 0.8353,
4162
+ "step": 675000
4163
+ },
4164
+ {
4165
+ "epoch": 0.68,
4166
+ "learning_rate": 2.6057342404996522e-05,
4167
+ "loss": 0.8529,
4168
+ "step": 676000
4169
+ },
4170
+ {
4171
+ "epoch": 0.68,
4172
+ "learning_rate": 2.591231629491423e-05,
4173
+ "loss": 0.8263,
4174
+ "step": 677000
4175
+ },
4176
+ {
4177
+ "epoch": 0.68,
4178
+ "learning_rate": 2.5767553603881767e-05,
4179
+ "loss": 0.7322,
4180
+ "step": 678000
4181
+ },
4182
+ {
4183
+ "epoch": 0.68,
4184
+ "learning_rate": 2.562305591500069e-05,
4185
+ "loss": 0.7321,
4186
+ "step": 679000
4187
+ },
4188
+ {
4189
+ "epoch": 0.68,
4190
+ "learning_rate": 2.547882480847461e-05,
4191
+ "loss": 0.762,
4192
+ "step": 680000
4193
+ },
4194
+ {
4195
+ "epoch": 0.68,
4196
+ "learning_rate": 2.5334861861591753e-05,
4197
+ "loss": 0.7644,
4198
+ "step": 681000
4199
+ },
4200
+ {
4201
+ "epoch": 0.68,
4202
+ "learning_rate": 2.5191168648707887e-05,
4203
+ "loss": 0.7511,
4204
+ "step": 682000
4205
+ },
4206
+ {
4207
+ "epoch": 0.68,
4208
+ "learning_rate": 2.5047746741228978e-05,
4209
+ "loss": 0.764,
4210
+ "step": 683000
4211
+ },
4212
+ {
4213
+ "epoch": 0.68,
4214
+ "learning_rate": 2.490459770759398e-05,
4215
+ "loss": 0.8123,
4216
+ "step": 684000
4217
+ },
4218
+ {
4219
+ "epoch": 0.69,
4220
+ "learning_rate": 2.476172311325783e-05,
4221
+ "loss": 0.8623,
4222
+ "step": 685000
4223
+ },
4224
+ {
4225
+ "epoch": 0.69,
4226
+ "learning_rate": 2.4619124520674146e-05,
4227
+ "loss": 0.8868,
4228
+ "step": 686000
4229
+ },
4230
+ {
4231
+ "epoch": 0.69,
4232
+ "learning_rate": 2.447680348927837e-05,
4233
+ "loss": 0.8712,
4234
+ "step": 687000
4235
+ },
4236
+ {
4237
+ "epoch": 0.69,
4238
+ "learning_rate": 2.433476157547044e-05,
4239
+ "loss": 0.8855,
4240
+ "step": 688000
4241
+ },
4242
+ {
4243
+ "epoch": 0.69,
4244
+ "learning_rate": 2.419300033259798e-05,
4245
+ "loss": 0.8317,
4246
+ "step": 689000
4247
+ },
4248
+ {
4249
+ "epoch": 0.69,
4250
+ "learning_rate": 2.405152131093926e-05,
4251
+ "loss": 0.8379,
4252
+ "step": 690000
4253
+ },
4254
+ {
4255
+ "epoch": 0.69,
4256
+ "learning_rate": 2.3910326057686127e-05,
4257
+ "loss": 0.835,
4258
+ "step": 691000
4259
+ },
4260
+ {
4261
+ "epoch": 0.69,
4262
+ "learning_rate": 2.3769416116927335e-05,
4263
+ "loss": 0.786,
4264
+ "step": 692000
4265
+ },
4266
+ {
4267
+ "epoch": 0.69,
4268
+ "learning_rate": 2.362879302963135e-05,
4269
+ "loss": 0.6715,
4270
+ "step": 693000
4271
+ },
4272
+ {
4273
+ "epoch": 0.69,
4274
+ "learning_rate": 2.3488458333629777e-05,
4275
+ "loss": 0.7149,
4276
+ "step": 694000
4277
+ },
4278
+ {
4279
+ "epoch": 0.69,
4280
+ "learning_rate": 2.3348413563600325e-05,
4281
+ "loss": 0.7062,
4282
+ "step": 695000
4283
+ },
4284
+ {
4285
+ "epoch": 0.7,
4286
+ "learning_rate": 2.3208660251050158e-05,
4287
+ "loss": 0.7274,
4288
+ "step": 696000
4289
+ },
4290
+ {
4291
+ "epoch": 0.7,
4292
+ "learning_rate": 2.3069199924299174e-05,
4293
+ "loss": 0.7218,
4294
+ "step": 697000
4295
+ },
4296
+ {
4297
+ "epoch": 0.7,
4298
+ "learning_rate": 2.29300341084631e-05,
4299
+ "loss": 0.693,
4300
+ "step": 698000
4301
+ },
4302
+ {
4303
+ "epoch": 0.7,
4304
+ "learning_rate": 2.279116432543705e-05,
4305
+ "loss": 0.7884,
4306
+ "step": 699000
4307
+ },
4308
+ {
4309
+ "epoch": 0.7,
4310
+ "learning_rate": 2.2652592093878666e-05,
4311
+ "loss": 0.8613,
4312
+ "step": 700000
4313
+ },
4314
+ {
4315
+ "epoch": 0.7,
4316
+ "eval_loss": 0.5963565707206726,
4317
+ "eval_runtime": 18.6839,
4318
+ "eval_samples_per_second": 267.609,
4319
+ "eval_steps_per_second": 2.141,
4320
+ "step": 700000
4321
  }
4322
  ],
4323
  "max_steps": 1000000,
4324
  "num_train_epochs": 9223372036854775807,
4325
+ "total_flos": 1.18049910816768e+19,
4326
  "trial_name": null,
4327
  "trial_params": null
4328
  }
pytorch_model.bin CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:379d522dfd1a25b016cf3da204eca913e1ee15fc75bb4722b9721e68ccaf83d7
3
  size 737971755
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:6991897e52bae2e9d6504fe8851b5a7986102aa1d7e94ca5557ddb32076d1524
3
  size 737971755
runs/Dec28_00-25-33_t1v-n-07cfb9e3-w-0/events.out.tfevents.1672187175.t1v-n-07cfb9e3-w-0.13817.0 CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:00c57d810251fa8351bfab9069d19050415443ad66d34e882ba213f65b8c814c
3
- size 111312
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:004706d59d1c0a88edcbae30bec62b8a8d00ba856df0d75c5e619c53feed821d
3
+ size 119588