Rakhman16 commited on
Commit
19062b2
·
verified ·
1 Parent(s): 24138cd

Training in progress, step 1500, checkpoint

Browse files
last-checkpoint/model.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:b4914014d0048ad419fae9de3c869e8a9a3d01941ac99d4fb0d4e64993586925
3
  size 891558696
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:5c18809a2a8524e90e4ddf568d1d17bb67d1937003a32e0da3eb60d17f7f9e89
3
  size 891558696
last-checkpoint/optimizer.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:a0d6710d4838767ab1eabb79915f8d054b5869aa459ff20b6ec3b53afb8931b9
3
  size 1783272762
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:83ad02ce37e2b4dcfde5be370954593d1a517fdabb1afb6bf5cf71dbfbf597a9
3
  size 1783272762
last-checkpoint/rng_state.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:686e4f08928bc0c19b99582b6c5fe9ffd12480ee988aecee97f880477c357d0e
3
  size 14244
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:ee7003d2d3db8b5d062c3280168e1b356926dfcb2c85d0b9bea95ac9bb64d84f
3
  size 14244
last-checkpoint/scheduler.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:2b0190d5e62eedbb928211e3640963587aa6f015e5105ee6ffcc852c37876753
3
  size 1064
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:f97d58893698126522dba56d68d161fa3998d8e44e4b677f658d8d5c7b316319
3
  size 1064
last-checkpoint/trainer_state.json CHANGED
@@ -1,9 +1,9 @@
1
  {
2
- "best_metric": 0.12485189735889435,
3
- "best_model_checkpoint": "./fine-tuned/checkpoint-1000",
4
- "epoch": 0.1756388864494599,
5
  "eval_steps": 100,
6
- "global_step": 1000,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
@@ -227,6 +227,116 @@
227
  "eval_samples_per_second": 25.513,
228
  "eval_steps_per_second": 3.192,
229
  "step": 1000
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
230
  }
231
  ],
232
  "logging_steps": 50,
@@ -246,7 +356,7 @@
246
  "attributes": {}
247
  }
248
  },
249
- "total_flos": 4871663124480000.0,
250
  "train_batch_size": 8,
251
  "trial_name": null,
252
  "trial_params": null
 
1
  {
2
+ "best_metric": 0.11999432742595673,
3
+ "best_model_checkpoint": "./fine-tuned/checkpoint-1500",
4
+ "epoch": 0.26345832967418986,
5
  "eval_steps": 100,
6
+ "global_step": 1500,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
 
227
  "eval_samples_per_second": 25.513,
228
  "eval_steps_per_second": 3.192,
229
  "step": 1000
230
+ },
231
+ {
232
+ "epoch": 0.18442083077193291,
233
+ "grad_norm": 20060.55859375,
234
+ "learning_rate": 2.8616722290532233e-05,
235
+ "loss": 0.1303,
236
+ "step": 1050
237
+ },
238
+ {
239
+ "epoch": 0.1932027750944059,
240
+ "grad_norm": 10244.4052734375,
241
+ "learning_rate": 2.855085192341472e-05,
242
+ "loss": 0.1413,
243
+ "step": 1100
244
+ },
245
+ {
246
+ "epoch": 0.1932027750944059,
247
+ "eval_loss": 0.12359971553087234,
248
+ "eval_runtime": 175.122,
249
+ "eval_samples_per_second": 25.468,
250
+ "eval_steps_per_second": 3.186,
251
+ "step": 1100
252
+ },
253
+ {
254
+ "epoch": 0.2019847194168789,
255
+ "grad_norm": 36993.25,
256
+ "learning_rate": 2.848498155629721e-05,
257
+ "loss": 0.1275,
258
+ "step": 1150
259
+ },
260
+ {
261
+ "epoch": 0.2107666637393519,
262
+ "grad_norm": 11102.2646484375,
263
+ "learning_rate": 2.8419111189179697e-05,
264
+ "loss": 0.1377,
265
+ "step": 1200
266
+ },
267
+ {
268
+ "epoch": 0.2107666637393519,
269
+ "eval_loss": 0.12276890873908997,
270
+ "eval_runtime": 175.1309,
271
+ "eval_samples_per_second": 25.467,
272
+ "eval_steps_per_second": 3.186,
273
+ "step": 1200
274
+ },
275
+ {
276
+ "epoch": 0.21954860806182488,
277
+ "grad_norm": 10398.369140625,
278
+ "learning_rate": 2.835324082206218e-05,
279
+ "loss": 0.1356,
280
+ "step": 1250
281
+ },
282
+ {
283
+ "epoch": 0.2283305523842979,
284
+ "grad_norm": 14664.177734375,
285
+ "learning_rate": 2.828737045494467e-05,
286
+ "loss": 0.1309,
287
+ "step": 1300
288
+ },
289
+ {
290
+ "epoch": 0.2283305523842979,
291
+ "eval_loss": 0.1219501867890358,
292
+ "eval_runtime": 174.8703,
293
+ "eval_samples_per_second": 25.505,
294
+ "eval_steps_per_second": 3.191,
295
+ "step": 1300
296
+ },
297
+ {
298
+ "epoch": 0.23711249670677087,
299
+ "grad_norm": 9694.1875,
300
+ "learning_rate": 2.8221500087827156e-05,
301
+ "loss": 0.1271,
302
+ "step": 1350
303
+ },
304
+ {
305
+ "epoch": 0.24589444102924388,
306
+ "grad_norm": 17376.810546875,
307
+ "learning_rate": 2.8155629720709643e-05,
308
+ "loss": 0.1434,
309
+ "step": 1400
310
+ },
311
+ {
312
+ "epoch": 0.24589444102924388,
313
+ "eval_loss": 0.12065327912569046,
314
+ "eval_runtime": 174.9734,
315
+ "eval_samples_per_second": 25.49,
316
+ "eval_steps_per_second": 3.189,
317
+ "step": 1400
318
+ },
319
+ {
320
+ "epoch": 0.2546763853517169,
321
+ "grad_norm": 13443.2255859375,
322
+ "learning_rate": 2.808975935359213e-05,
323
+ "loss": 0.1383,
324
+ "step": 1450
325
+ },
326
+ {
327
+ "epoch": 0.26345832967418986,
328
+ "grad_norm": 10927.8994140625,
329
+ "learning_rate": 2.802388898647462e-05,
330
+ "loss": 0.125,
331
+ "step": 1500
332
+ },
333
+ {
334
+ "epoch": 0.26345832967418986,
335
+ "eval_loss": 0.11999432742595673,
336
+ "eval_runtime": 174.9084,
337
+ "eval_samples_per_second": 25.499,
338
+ "eval_steps_per_second": 3.19,
339
+ "step": 1500
340
  }
341
  ],
342
  "logging_steps": 50,
 
356
  "attributes": {}
357
  }
358
  },
359
+ "total_flos": 7307494686720000.0,
360
  "train_batch_size": 8,
361
  "trial_name": null,
362
  "trial_params": null