tmnam20 commited on
Commit
1dc79d7
1 Parent(s): d28a500

Training in progress, step 13000, checkpoint

Browse files
checkpoint-13000/optimizer.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:5f8aafa3fa0febb58f8bce9472b590d5a948560d44429796fdc7e3f79caddd6e
3
  size 932401285
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:3892d6f84dc7f4c6de18f22d42ad86ca337084116aa952802e751f0d9f6c9652
3
  size 932401285
checkpoint-13000/pytorch_model.bin CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:32fbbdf6c911c988baa12fb16f24b26e48f1cccb0509e8c73cd3aef290e2b333
3
  size 466205145
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:6a20f207e133c00633f27194ef1fa06be2ba465ad279110d408ac1738cd0b5cf
3
  size 466205145
checkpoint-13000/rng_state.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:614dfe13436bce455814af7d0a874ae23824c1b251842e72fc798c464515463f
3
  size 14575
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:326b28681c4c95a80f1163556c7b33c6678b6b23895e86373566b2aabc38d1aa
3
  size 14575
checkpoint-13000/scheduler.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:67659fff1994fdf63091691ca550255bba641aba7834eb1b3cc1254efe96c038
3
  size 627
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:646e4c5269f455b101609dc925665e30c48eb0987e037f379fd5e2fc7568541a
3
  size 627
checkpoint-13000/trainer_state.json CHANGED
@@ -1,7 +1,7 @@
1
  {
2
- "best_metric": 1.1056212186813354,
3
- "best_model_checkpoint": "/kaggle/output/checkpoint-6000",
4
- "epoch": 208.0,
5
  "eval_steps": 1000,
6
  "global_step": 13000,
7
  "is_hyper_param_search": false,
@@ -9,210 +9,210 @@
9
  "is_world_process_zero": true,
10
  "log_history": [
11
  {
12
- "epoch": 0.02,
13
  "learning_rate": 2.7777777777777777e-11,
14
- "loss": 1.5477,
15
  "step": 1
16
  },
17
  {
18
- "epoch": 16.0,
19
  "learning_rate": 2.7750000000000004e-08,
20
- "loss": 1.2028,
21
  "step": 1000
22
  },
23
  {
24
- "epoch": 16.0,
25
- "eval_accuracy": 0.3387225548902196,
26
- "eval_loss": 1.1078879833221436,
27
- "eval_runtime": 54.1057,
28
- "eval_samples_per_second": 92.596,
29
- "eval_steps_per_second": 11.588,
30
  "step": 1000
31
  },
32
  {
33
- "epoch": 32.0,
34
  "learning_rate": 5.5527777777777784e-08,
35
- "loss": 1.1152,
36
  "step": 2000
37
  },
38
  {
39
- "epoch": 32.0,
40
- "eval_accuracy": 0.3349301397205589,
41
- "eval_loss": 1.1080944538116455,
42
- "eval_runtime": 54.2029,
43
- "eval_samples_per_second": 92.43,
44
- "eval_steps_per_second": 11.568,
45
  "step": 2000
46
  },
47
  {
48
- "epoch": 48.0,
49
  "learning_rate": 8.327777777777778e-08,
50
- "loss": 1.1036,
51
  "step": 3000
52
  },
53
  {
54
- "epoch": 48.0,
55
- "eval_accuracy": 0.3377245508982036,
56
- "eval_loss": 1.1081210374832153,
57
- "eval_runtime": 54.0467,
58
- "eval_samples_per_second": 92.698,
59
- "eval_steps_per_second": 11.601,
60
  "step": 3000
61
  },
62
  {
63
- "epoch": 64.0,
64
  "learning_rate": 1.1105555555555557e-07,
65
- "loss": 1.0895,
66
  "step": 4000
67
  },
68
  {
69
- "epoch": 64.0,
70
- "eval_accuracy": 0.33912175648702597,
71
- "eval_loss": 1.1072543859481812,
72
- "eval_runtime": 54.1318,
73
- "eval_samples_per_second": 92.552,
74
- "eval_steps_per_second": 11.583,
75
  "step": 4000
76
  },
77
  {
78
- "epoch": 80.0,
79
  "learning_rate": 1.3880555555555558e-07,
80
- "loss": 1.0748,
81
  "step": 5000
82
  },
83
  {
84
- "epoch": 80.0,
85
- "eval_accuracy": 0.3435129740518962,
86
- "eval_loss": 1.1071943044662476,
87
- "eval_runtime": 54.4121,
88
- "eval_samples_per_second": 92.075,
89
- "eval_steps_per_second": 11.523,
90
  "step": 5000
91
  },
92
  {
93
- "epoch": 96.0,
94
  "learning_rate": 1.6658333333333335e-07,
95
- "loss": 1.055,
96
  "step": 6000
97
  },
98
  {
99
- "epoch": 96.0,
100
- "eval_accuracy": 0.3459081836327345,
101
- "eval_loss": 1.1056212186813354,
102
- "eval_runtime": 54.092,
103
- "eval_samples_per_second": 92.62,
104
- "eval_steps_per_second": 11.591,
105
  "step": 6000
106
  },
107
  {
108
- "epoch": 112.0,
109
  "learning_rate": 1.9433333333333334e-07,
110
- "loss": 1.0352,
111
  "step": 7000
112
  },
113
  {
114
- "epoch": 112.0,
115
- "eval_accuracy": 0.3469061876247505,
116
- "eval_loss": 1.1121071577072144,
117
- "eval_runtime": 53.9318,
118
- "eval_samples_per_second": 92.895,
119
- "eval_steps_per_second": 11.626,
120
  "step": 7000
121
  },
122
  {
123
- "epoch": 128.0,
124
  "learning_rate": 2.2211111111111114e-07,
125
- "loss": 1.0085,
126
  "step": 8000
127
  },
128
  {
129
- "epoch": 128.0,
130
- "eval_accuracy": 0.3534930139720559,
131
- "eval_loss": 1.1201248168945312,
132
- "eval_runtime": 54.0094,
133
- "eval_samples_per_second": 92.762,
134
- "eval_steps_per_second": 11.609,
135
  "step": 8000
136
  },
137
  {
138
- "epoch": 144.0,
139
  "learning_rate": 2.4986111111111113e-07,
140
- "loss": 0.9772,
141
  "step": 9000
142
  },
143
  {
144
- "epoch": 144.0,
145
- "eval_accuracy": 0.3568862275449102,
146
- "eval_loss": 1.1434961557388306,
147
- "eval_runtime": 54.0639,
148
- "eval_samples_per_second": 92.668,
149
- "eval_steps_per_second": 11.597,
150
  "step": 9000
151
  },
152
  {
153
- "epoch": 160.0,
154
- "learning_rate": 2.7761111111111115e-07,
155
- "loss": 0.9323,
156
  "step": 10000
157
  },
158
  {
159
- "epoch": 160.0,
160
- "eval_accuracy": 0.36087824351297404,
161
- "eval_loss": 1.1742260456085205,
162
- "eval_runtime": 54.7624,
163
- "eval_samples_per_second": 91.486,
164
- "eval_steps_per_second": 11.449,
165
  "step": 10000
166
  },
167
  {
168
- "epoch": 176.0,
169
- "learning_rate": 3.053888888888889e-07,
170
- "loss": 0.8779,
171
  "step": 11000
172
  },
173
  {
174
- "epoch": 176.0,
175
- "eval_accuracy": 0.3568862275449102,
176
- "eval_loss": 1.2272026538848877,
177
- "eval_runtime": 54.0934,
178
- "eval_samples_per_second": 92.618,
179
- "eval_steps_per_second": 11.591,
180
  "step": 11000
181
  },
182
  {
183
- "epoch": 192.0,
184
- "learning_rate": 3.331388888888889e-07,
185
- "loss": 0.8276,
186
  "step": 12000
187
  },
188
  {
189
- "epoch": 192.0,
190
- "eval_accuracy": 0.3562874251497006,
191
- "eval_loss": 1.3099753856658936,
192
- "eval_runtime": 54.3996,
193
- "eval_samples_per_second": 92.096,
194
- "eval_steps_per_second": 11.526,
195
  "step": 12000
196
  },
197
  {
198
- "epoch": 208.0,
199
- "learning_rate": 3.608888888888889e-07,
200
- "loss": 0.7716,
201
  "step": 13000
202
  },
203
  {
204
- "epoch": 208.0,
205
- "eval_accuracy": 0.3588822355289421,
206
- "eval_loss": 1.4230743646621704,
207
- "eval_runtime": 54.193,
208
- "eval_samples_per_second": 92.447,
209
- "eval_steps_per_second": 11.57,
210
  "step": 13000
211
  }
212
  ],
213
  "logging_steps": 1000,
214
  "max_steps": 10000000,
215
- "num_train_epochs": 161291,
216
  "save_steps": 1000,
217
  "total_flos": 2.7174616694784e+16,
218
  "trial_name": null,
 
1
  {
2
+ "best_metric": 1.089038372039795,
3
+ "best_model_checkpoint": "/kaggle/output/checkpoint-12000",
4
+ "epoch": 0.5296610169491526,
5
  "eval_steps": 1000,
6
  "global_step": 13000,
7
  "is_hyper_param_search": false,
 
9
  "is_world_process_zero": true,
10
  "log_history": [
11
  {
12
+ "epoch": 0.0,
13
  "learning_rate": 2.7777777777777777e-11,
14
+ "loss": 1.1383,
15
  "step": 1
16
  },
17
  {
18
+ "epoch": 0.04,
19
  "learning_rate": 2.7750000000000004e-08,
20
+ "loss": 1.1424,
21
  "step": 1000
22
  },
23
  {
24
+ "epoch": 0.04,
25
+ "eval_accuracy": 0.32375249500998005,
26
+ "eval_loss": 1.1077626943588257,
27
+ "eval_runtime": 54.8633,
28
+ "eval_samples_per_second": 91.318,
29
+ "eval_steps_per_second": 11.428,
30
  "step": 1000
31
  },
32
  {
33
+ "epoch": 0.08,
34
  "learning_rate": 5.5527777777777784e-08,
35
+ "loss": 1.1244,
36
  "step": 2000
37
  },
38
  {
39
+ "epoch": 0.08,
40
+ "eval_accuracy": 0.33652694610778444,
41
+ "eval_loss": 1.1080161333084106,
42
+ "eval_runtime": 54.7384,
43
+ "eval_samples_per_second": 91.526,
44
+ "eval_steps_per_second": 11.454,
45
  "step": 2000
46
  },
47
  {
48
+ "epoch": 0.12,
49
  "learning_rate": 8.327777777777778e-08,
50
+ "loss": 1.1228,
51
  "step": 3000
52
  },
53
  {
54
+ "epoch": 0.12,
55
+ "eval_accuracy": 0.34331337325349304,
56
+ "eval_loss": 1.1084064245224,
57
+ "eval_runtime": 54.7948,
58
+ "eval_samples_per_second": 91.432,
59
+ "eval_steps_per_second": 11.443,
60
  "step": 3000
61
  },
62
  {
63
+ "epoch": 0.16,
64
  "learning_rate": 1.1105555555555557e-07,
65
+ "loss": 1.1216,
66
  "step": 4000
67
  },
68
  {
69
+ "epoch": 0.16,
70
+ "eval_accuracy": 0.3385229540918164,
71
+ "eval_loss": 1.1014840602874756,
72
+ "eval_runtime": 54.8508,
73
+ "eval_samples_per_second": 91.339,
74
+ "eval_steps_per_second": 11.431,
75
  "step": 4000
76
  },
77
  {
78
+ "epoch": 0.2,
79
  "learning_rate": 1.3880555555555558e-07,
80
+ "loss": 1.1181,
81
  "step": 5000
82
  },
83
  {
84
+ "epoch": 0.2,
85
+ "eval_accuracy": 0.33073852295409184,
86
+ "eval_loss": 1.1008135080337524,
87
+ "eval_runtime": 54.8304,
88
+ "eval_samples_per_second": 91.373,
89
+ "eval_steps_per_second": 11.435,
90
  "step": 5000
91
  },
92
  {
93
+ "epoch": 0.24,
94
  "learning_rate": 1.6658333333333335e-07,
95
+ "loss": 1.1132,
96
  "step": 6000
97
  },
98
  {
99
+ "epoch": 0.24,
100
+ "eval_accuracy": 0.3520958083832335,
101
+ "eval_loss": 1.0993762016296387,
102
+ "eval_runtime": 54.8804,
103
+ "eval_samples_per_second": 91.289,
104
+ "eval_steps_per_second": 11.425,
105
  "step": 6000
106
  },
107
  {
108
+ "epoch": 0.29,
109
  "learning_rate": 1.9433333333333334e-07,
110
+ "loss": 1.1113,
111
  "step": 7000
112
  },
113
  {
114
+ "epoch": 0.29,
115
+ "eval_accuracy": 0.3530938123752495,
116
+ "eval_loss": 1.0965770483016968,
117
+ "eval_runtime": 54.8881,
118
+ "eval_samples_per_second": 91.277,
119
+ "eval_steps_per_second": 11.423,
120
  "step": 7000
121
  },
122
  {
123
+ "epoch": 0.33,
124
  "learning_rate": 2.2211111111111114e-07,
125
+ "loss": 1.1111,
126
  "step": 8000
127
  },
128
  {
129
+ "epoch": 0.33,
130
+ "eval_accuracy": 0.35708582834331337,
131
+ "eval_loss": 1.094658613204956,
132
+ "eval_runtime": 54.8233,
133
+ "eval_samples_per_second": 91.384,
134
+ "eval_steps_per_second": 11.437,
135
  "step": 8000
136
  },
137
  {
138
+ "epoch": 0.37,
139
  "learning_rate": 2.4986111111111113e-07,
140
+ "loss": 1.109,
141
  "step": 9000
142
  },
143
  {
144
+ "epoch": 0.37,
145
+ "eval_accuracy": 0.34191616766467064,
146
+ "eval_loss": 1.106990933418274,
147
+ "eval_runtime": 54.9095,
148
+ "eval_samples_per_second": 91.241,
149
+ "eval_steps_per_second": 11.419,
150
  "step": 9000
151
  },
152
  {
153
+ "epoch": 0.41,
154
+ "learning_rate": 2.776388888888889e-07,
155
+ "loss": 1.1036,
156
  "step": 10000
157
  },
158
  {
159
+ "epoch": 0.41,
160
+ "eval_accuracy": 0.37584830339321357,
161
+ "eval_loss": 1.0930211544036865,
162
+ "eval_runtime": 54.9067,
163
+ "eval_samples_per_second": 91.246,
164
+ "eval_steps_per_second": 11.419,
165
  "step": 10000
166
  },
167
  {
168
+ "epoch": 0.45,
169
+ "learning_rate": 3.0541666666666667e-07,
170
+ "loss": 1.1045,
171
  "step": 11000
172
  },
173
  {
174
+ "epoch": 0.45,
175
+ "eval_accuracy": 0.3652694610778443,
176
+ "eval_loss": 1.092846393585205,
177
+ "eval_runtime": 54.8964,
178
+ "eval_samples_per_second": 91.263,
179
+ "eval_steps_per_second": 11.422,
180
  "step": 11000
181
  },
182
  {
183
+ "epoch": 0.49,
184
+ "learning_rate": 3.3319444444444444e-07,
185
+ "loss": 1.1024,
186
  "step": 12000
187
  },
188
  {
189
+ "epoch": 0.49,
190
+ "eval_accuracy": 0.39261477045908183,
191
+ "eval_loss": 1.089038372039795,
192
+ "eval_runtime": 54.9763,
193
+ "eval_samples_per_second": 91.13,
194
+ "eval_steps_per_second": 11.405,
195
  "step": 12000
196
  },
197
  {
198
+ "epoch": 0.53,
199
+ "learning_rate": 3.6094444444444446e-07,
200
+ "loss": 1.1007,
201
  "step": 13000
202
  },
203
  {
204
+ "epoch": 0.53,
205
+ "eval_accuracy": 0.34311377245508984,
206
+ "eval_loss": 1.0933948755264282,
207
+ "eval_runtime": 54.9285,
208
+ "eval_samples_per_second": 91.209,
209
+ "eval_steps_per_second": 11.415,
210
  "step": 13000
211
  }
212
  ],
213
  "logging_steps": 1000,
214
  "max_steps": 10000000,
215
+ "num_train_epochs": 408,
216
  "save_steps": 1000,
217
  "total_flos": 2.7174616694784e+16,
218
  "trial_name": null,
checkpoint-13000/training_args.bin CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:6c09d25f832c6c38e287ec7a7fe1b191faf603815bb3b91e32f16768a2d7d3ff
3
  size 4091
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:a9a52921b15d7eeb53b158a9c0f943d88288ccd9232004774ffdf3a600a048d5
3
  size 4091