tmnam20 commited on
Commit
42dec4a
·
1 Parent(s): fbc35d4

Training in progress, step 200, checkpoint

Browse files
checkpoint-200/optimizer.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:6bafe9677d96eb63bae43314ebe31a30361b1347d590fab78e657d5093cda7e5
3
  size 1474893317
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:e261a8240f9ba007a28a67f46f5463f2d5850e2f5b3c08fc10ba3787cf10911e
3
  size 1474893317
checkpoint-200/pytorch_model.bin CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:7b06790aa3e1d971e160c719ebdd5ee22a691c28aff1977c8a9e4b150aaeac7a
3
  size 737457141
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:e6baa0ed3c0013d58345b0e3c81e7ffae409f63038ae73b3a5056a229c83bff9
3
  size 737457141
checkpoint-200/rng_state.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:8f3284e82c30f90f720fbb01dfc9a4aa926d82378c51e59b01cb9aa4aa352e1b
3
  size 14575
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:62ce52bbbc36bf5ba47e56ac99982edf5dc9b0952621007ccab5c2816e59d7f3
3
  size 14575
checkpoint-200/scheduler.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:f2099172cf643e0621a49c4fd812f032200315a0b3fd63d5198b626196954959
3
  size 627
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:738b3803967108b90cf940ac65ab8d2d54fdd8d26b5509e734c1ef7e40612c3c
3
  size 627
checkpoint-200/trainer_state.json CHANGED
@@ -1,6 +1,6 @@
1
  {
2
- "best_metric": 0.5658872723579407,
3
- "best_model_checkpoint": "/kaggle/output/checkpoint-200",
4
  "epoch": 0.4132231404958678,
5
  "eval_steps": 50,
6
  "global_step": 200,
@@ -10,52 +10,76 @@
10
  "log_history": [
11
  {
12
  "epoch": 0.0,
13
- "learning_rate": 1.6666666666666667e-06,
14
- "loss": 0.6979,
15
  "step": 1
16
  },
 
 
 
 
 
 
17
  {
18
  "epoch": 0.1,
19
  "eval_accuracy": 0.75,
20
- "eval_loss": 0.6723740696907043,
21
- "eval_runtime": 28.903,
22
- "eval_samples_per_second": 40.965,
23
- "eval_steps_per_second": 20.482,
24
  "step": 50
25
  },
 
 
 
 
 
 
26
  {
27
  "epoch": 0.21,
28
  "eval_accuracy": 0.75,
29
- "eval_loss": 0.6272028088569641,
30
- "eval_runtime": 28.756,
31
- "eval_samples_per_second": 41.174,
32
- "eval_steps_per_second": 20.587,
33
  "step": 100
34
  },
 
 
 
 
 
 
35
  {
36
  "epoch": 0.31,
37
  "eval_accuracy": 0.75,
38
- "eval_loss": 0.5726658701896667,
39
- "eval_runtime": 28.7791,
40
- "eval_samples_per_second": 41.141,
41
- "eval_steps_per_second": 20.57,
42
  "step": 150
43
  },
 
 
 
 
 
 
44
  {
45
  "epoch": 0.41,
46
  "eval_accuracy": 0.75,
47
- "eval_loss": 0.5658872723579407,
48
- "eval_runtime": 28.8005,
49
- "eval_samples_per_second": 41.11,
50
- "eval_steps_per_second": 20.555,
51
  "step": 200
52
  }
53
  ],
54
- "logging_steps": 1000,
55
  "max_steps": 1000,
56
  "num_train_epochs": 3,
57
  "save_steps": 100,
58
- "total_flos": 648800331133968.0,
59
  "trial_name": null,
60
  "trial_params": null
61
  }
 
1
  {
2
+ "best_metric": 0.5696164965629578,
3
+ "best_model_checkpoint": "/kaggle/output/checkpoint-100",
4
  "epoch": 0.4132231404958678,
5
  "eval_steps": 50,
6
  "global_step": 200,
 
10
  "log_history": [
11
  {
12
  "epoch": 0.0,
13
+ "learning_rate": 0.0,
14
+ "loss": 0.7246,
15
  "step": 1
16
  },
17
+ {
18
+ "epoch": 0.1,
19
+ "learning_rate": 7.333333333333332e-05,
20
+ "loss": 0.5982,
21
+ "step": 50
22
+ },
23
  {
24
  "epoch": 0.1,
25
  "eval_accuracy": 0.75,
26
+ "eval_loss": 0.6296697854995728,
27
+ "eval_runtime": 30.5612,
28
+ "eval_samples_per_second": 38.742,
29
+ "eval_steps_per_second": 19.371,
30
  "step": 50
31
  },
32
+ {
33
+ "epoch": 0.21,
34
+ "learning_rate": 0.00015,
35
+ "loss": 0.5505,
36
+ "step": 100
37
+ },
38
  {
39
  "epoch": 0.21,
40
  "eval_accuracy": 0.75,
41
+ "eval_loss": 0.5696164965629578,
42
+ "eval_runtime": 30.5816,
43
+ "eval_samples_per_second": 38.716,
44
+ "eval_steps_per_second": 19.358,
45
  "step": 100
46
  },
47
+ {
48
+ "epoch": 0.31,
49
+ "learning_rate": 0.0002333333333333333,
50
+ "loss": 0.5838,
51
+ "step": 150
52
+ },
53
  {
54
  "epoch": 0.31,
55
  "eval_accuracy": 0.75,
56
+ "eval_loss": 0.562861442565918,
57
+ "eval_runtime": 31.0771,
58
+ "eval_samples_per_second": 38.099,
59
+ "eval_steps_per_second": 19.049,
60
  "step": 150
61
  },
62
+ {
63
+ "epoch": 0.41,
64
+ "learning_rate": 0.00025333333333333333,
65
+ "loss": 0.5925,
66
+ "step": 200
67
+ },
68
  {
69
  "epoch": 0.41,
70
  "eval_accuracy": 0.75,
71
+ "eval_loss": 0.5931239724159241,
72
+ "eval_runtime": 30.6023,
73
+ "eval_samples_per_second": 38.69,
74
+ "eval_steps_per_second": 19.345,
75
  "step": 200
76
  }
77
  ],
78
+ "logging_steps": 50,
79
  "max_steps": 1000,
80
  "num_train_epochs": 3,
81
  "save_steps": 100,
82
+ "total_flos": 701423485924368.0,
83
  "trial_name": null,
84
  "trial_params": null
85
  }
checkpoint-200/training_args.bin CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:fbe5e88ca49040823bdb83dc035b8a4470c58ae105b0482f624b7d48f5e08cbe
3
  size 4027
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:cd0372335b8d69875fb40a5865f6914bab9ff539640d01b439e8f707b1e5a43d
3
  size 4027