saroyehun commited on
Commit
e75de2f
1 Parent(s): 53f1181

Upload folder using huggingface_hub

Browse files
adapter_config.json CHANGED
@@ -23,13 +23,13 @@
23
  "rank_pattern": {},
24
  "revision": null,
25
  "target_modules": [
 
 
26
  "v_proj",
27
- "o_proj",
28
- "down_proj",
29
  "k_proj",
30
- "up_proj",
31
- "gate_proj",
32
- "q_proj"
33
  ],
34
  "task_type": null,
35
  "use_dora": false,
 
23
  "rank_pattern": {},
24
  "revision": null,
25
  "target_modules": [
26
+ "gate_proj",
27
+ "q_proj",
28
  "v_proj",
 
 
29
  "k_proj",
30
+ "down_proj",
31
+ "o_proj",
32
+ "up_proj"
33
  ],
34
  "task_type": null,
35
  "use_dora": false,
adapter_model.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:f12d3bb063fc19126b27a69ace5966a164e31befcec1b0912c36a4c3836efcbc
3
  size 167829552
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:6923740a2122c499a8b57fa0e72800dfb5ad897744ed4c72ce1b92e8d30f6395
3
  size 167829552
trainer_state.json CHANGED
@@ -1,161 +1,371 @@
1
  {
2
  "best_metric": null,
3
  "best_model_checkpoint": null,
4
- "epoch": 0.06411078343377356,
5
  "eval_steps": 500,
6
- "global_step": 1000,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
10
  "log_history": [
11
  {
12
- "epoch": 0.003205539171688678,
13
- "grad_norm": 0.43528684973716736,
14
- "learning_rate": 2.9967944608283113e-05,
15
- "loss": 1.7809,
16
  "step": 50
17
  },
18
  {
19
- "epoch": 0.006411078343377356,
20
- "grad_norm": 0.6100763082504272,
21
- "learning_rate": 2.993588921656623e-05,
22
- "loss": 1.0224,
23
  "step": 100
24
  },
25
  {
26
- "epoch": 0.009616617515066034,
27
- "grad_norm": 0.12871447205543518,
28
- "learning_rate": 2.990383382484934e-05,
29
- "loss": 1.0167,
30
  "step": 150
31
  },
32
  {
33
- "epoch": 0.012822156686754712,
34
- "grad_norm": 0.08891259878873825,
35
- "learning_rate": 2.9871778433132457e-05,
36
- "loss": 0.9958,
37
  "step": 200
38
  },
39
  {
40
- "epoch": 0.01602769585844339,
41
- "grad_norm": 0.26497164368629456,
42
- "learning_rate": 2.9839723041415565e-05,
43
- "loss": 1.0257,
44
  "step": 250
45
  },
46
  {
47
- "epoch": 0.01923323503013207,
48
- "grad_norm": 0.10345622897148132,
49
- "learning_rate": 2.9807667649698678e-05,
50
- "loss": 1.0383,
51
  "step": 300
52
  },
53
  {
54
- "epoch": 0.022438774201820746,
55
- "grad_norm": 0.31675589084625244,
56
- "learning_rate": 2.9775612257981793e-05,
57
- "loss": 1.0503,
58
  "step": 350
59
  },
60
  {
61
- "epoch": 0.025644313373509423,
62
- "grad_norm": 0.13149690628051758,
63
- "learning_rate": 2.9743556866264905e-05,
64
- "loss": 1.0373,
65
  "step": 400
66
  },
67
  {
68
- "epoch": 0.0288498525451981,
69
- "grad_norm": 0.45333313941955566,
70
- "learning_rate": 2.971150147454802e-05,
71
- "loss": 0.9785,
72
  "step": 450
73
  },
74
  {
75
- "epoch": 0.03205539171688678,
76
- "grad_norm": 0.11852947622537613,
77
- "learning_rate": 2.9679446082831133e-05,
78
- "loss": 1.0165,
79
  "step": 500
80
  },
81
  {
82
- "epoch": 0.03526093088857546,
83
- "grad_norm": 0.13717707991600037,
84
- "learning_rate": 2.9647390691114246e-05,
85
- "loss": 0.9997,
86
  "step": 550
87
  },
88
  {
89
- "epoch": 0.03846647006026414,
90
- "grad_norm": 0.4000389277935028,
91
- "learning_rate": 2.961533529939736e-05,
92
- "loss": 1.012,
93
  "step": 600
94
  },
95
  {
96
- "epoch": 0.041672009231952814,
97
- "grad_norm": 0.08108354359865189,
98
- "learning_rate": 2.9583279907680473e-05,
99
- "loss": 1.0151,
100
  "step": 650
101
  },
102
  {
103
- "epoch": 0.04487754840364149,
104
- "grad_norm": 0.09668228775262833,
105
- "learning_rate": 2.955122451596359e-05,
106
- "loss": 0.9492,
107
  "step": 700
108
  },
109
  {
110
- "epoch": 0.04808308757533017,
111
- "grad_norm": 0.13635773956775665,
112
- "learning_rate": 2.9519169124246698e-05,
113
- "loss": 0.9951,
114
  "step": 750
115
  },
116
  {
117
- "epoch": 0.051288626747018846,
118
- "grad_norm": 0.1322096884250641,
119
- "learning_rate": 2.948711373252981e-05,
120
- "loss": 0.9404,
121
  "step": 800
122
  },
123
  {
124
- "epoch": 0.054494165918707524,
125
- "grad_norm": 0.30008432269096375,
126
- "learning_rate": 2.9455058340812926e-05,
127
- "loss": 0.9848,
128
  "step": 850
129
  },
130
  {
131
- "epoch": 0.0576997050903962,
132
- "grad_norm": 0.16301093995571136,
133
- "learning_rate": 2.9423002949096038e-05,
134
- "loss": 1.04,
135
  "step": 900
136
  },
137
  {
138
- "epoch": 0.060905244262084886,
139
- "grad_norm": 0.1954452395439148,
140
- "learning_rate": 2.939094755737915e-05,
141
- "loss": 0.992,
142
  "step": 950
143
  },
144
  {
145
- "epoch": 0.06411078343377356,
146
- "grad_norm": 0.34399452805519104,
147
- "learning_rate": 2.9358892165662266e-05,
148
- "loss": 0.967,
149
  "step": 1000
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
150
  }
151
  ],
152
  "logging_steps": 50,
153
- "max_steps": 46794,
154
  "num_input_tokens_seen": 0,
155
  "num_train_epochs": 3,
156
- "save_steps": 200,
157
  "total_flos": 0.0,
158
- "train_batch_size": 100,
159
  "trial_name": null,
160
  "trial_params": null
161
  }
 
1
  {
2
  "best_metric": null,
3
  "best_model_checkpoint": null,
4
+ "epoch": 0.0837464826477288,
5
  "eval_steps": 500,
6
+ "global_step": 2500,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
10
  "log_history": [
11
  {
12
+ "epoch": 0.001674929652954576,
13
+ "grad_norm": 0.30847179889678955,
14
+ "learning_rate": 2.9983250703470456e-05,
15
+ "loss": 0.5044,
16
  "step": 50
17
  },
18
  {
19
+ "epoch": 0.003349859305909152,
20
+ "grad_norm": 0.04476890340447426,
21
+ "learning_rate": 2.9966501406940907e-05,
22
+ "loss": 0.0044,
23
  "step": 100
24
  },
25
  {
26
+ "epoch": 0.005024788958863728,
27
+ "grad_norm": 0.08374588936567307,
28
+ "learning_rate": 2.9949752110411362e-05,
29
+ "loss": 0.0026,
30
  "step": 150
31
  },
32
  {
33
+ "epoch": 0.006699718611818304,
34
+ "grad_norm": 0.0017046213615685701,
35
+ "learning_rate": 2.9933002813881818e-05,
36
+ "loss": 0.0023,
37
  "step": 200
38
  },
39
  {
40
+ "epoch": 0.008374648264772879,
41
+ "grad_norm": 0.019402090460062027,
42
+ "learning_rate": 2.9916253517352273e-05,
43
+ "loss": 0.0013,
44
  "step": 250
45
  },
46
  {
47
+ "epoch": 0.010049577917727455,
48
+ "grad_norm": 0.010655886493623257,
49
+ "learning_rate": 2.9899504220822728e-05,
50
+ "loss": 0.002,
51
  "step": 300
52
  },
53
  {
54
+ "epoch": 0.011724507570682031,
55
+ "grad_norm": 0.006348441354930401,
56
+ "learning_rate": 2.9882754924293183e-05,
57
+ "loss": 0.0017,
58
  "step": 350
59
  },
60
  {
61
+ "epoch": 0.013399437223636608,
62
+ "grad_norm": 0.06810770183801651,
63
+ "learning_rate": 2.9866005627763634e-05,
64
+ "loss": 0.0015,
65
  "step": 400
66
  },
67
  {
68
+ "epoch": 0.015074366876591184,
69
+ "grad_norm": 0.003874759189784527,
70
+ "learning_rate": 2.984925633123409e-05,
71
+ "loss": 0.0012,
72
  "step": 450
73
  },
74
  {
75
+ "epoch": 0.016749296529545758,
76
+ "grad_norm": 0.003985659219324589,
77
+ "learning_rate": 2.9832507034704544e-05,
78
+ "loss": 0.0012,
79
  "step": 500
80
  },
81
  {
82
+ "epoch": 0.018424226182500336,
83
+ "grad_norm": 0.01902610994875431,
84
+ "learning_rate": 2.9815757738174996e-05,
85
+ "loss": 0.0014,
86
  "step": 550
87
  },
88
  {
89
+ "epoch": 0.02009915583545491,
90
+ "grad_norm": 0.01615321636199951,
91
+ "learning_rate": 2.979900844164545e-05,
92
+ "loss": 0.0013,
93
  "step": 600
94
  },
95
  {
96
+ "epoch": 0.02177408548840949,
97
+ "grad_norm": 0.0055249775759875774,
98
+ "learning_rate": 2.9782259145115903e-05,
99
+ "loss": 0.0008,
100
  "step": 650
101
  },
102
  {
103
+ "epoch": 0.023449015141364063,
104
+ "grad_norm": 0.0019460869953036308,
105
+ "learning_rate": 2.976550984858636e-05,
106
+ "loss": 0.0011,
107
  "step": 700
108
  },
109
  {
110
+ "epoch": 0.025123944794318637,
111
+ "grad_norm": 0.012369350530207157,
112
+ "learning_rate": 2.9748760552056816e-05,
113
+ "loss": 0.0011,
114
  "step": 750
115
  },
116
  {
117
+ "epoch": 0.026798874447273215,
118
+ "grad_norm": 0.00836873333901167,
119
+ "learning_rate": 2.9732011255527268e-05,
120
+ "loss": 0.0013,
121
  "step": 800
122
  },
123
  {
124
+ "epoch": 0.02847380410022779,
125
+ "grad_norm": 0.0034731472842395306,
126
+ "learning_rate": 2.9715261958997723e-05,
127
+ "loss": 0.0016,
128
  "step": 850
129
  },
130
  {
131
+ "epoch": 0.030148733753182368,
132
+ "grad_norm": 0.015365710482001305,
133
+ "learning_rate": 2.9698512662468178e-05,
134
+ "loss": 0.0013,
135
  "step": 900
136
  },
137
  {
138
+ "epoch": 0.031823663406136946,
139
+ "grad_norm": 0.017056584358215332,
140
+ "learning_rate": 2.968176336593863e-05,
141
+ "loss": 0.0013,
142
  "step": 950
143
  },
144
  {
145
+ "epoch": 0.033498593059091517,
146
+ "grad_norm": 0.0038616659585386515,
147
+ "learning_rate": 2.9665014069409085e-05,
148
+ "loss": 0.0009,
149
  "step": 1000
150
+ },
151
+ {
152
+ "epoch": 0.035173522712046094,
153
+ "grad_norm": 0.0008767916006036103,
154
+ "learning_rate": 2.964826477287954e-05,
155
+ "loss": 0.0006,
156
+ "step": 1050
157
+ },
158
+ {
159
+ "epoch": 0.03684845236500067,
160
+ "grad_norm": 0.050677187740802765,
161
+ "learning_rate": 2.963151547634999e-05,
162
+ "loss": 0.0014,
163
+ "step": 1100
164
+ },
165
+ {
166
+ "epoch": 0.03852338201795524,
167
+ "grad_norm": 0.011451843194663525,
168
+ "learning_rate": 2.961476617982045e-05,
169
+ "loss": 0.0008,
170
+ "step": 1150
171
+ },
172
+ {
173
+ "epoch": 0.04019831167090982,
174
+ "grad_norm": 0.00335301854647696,
175
+ "learning_rate": 2.9598016883290905e-05,
176
+ "loss": 0.0013,
177
+ "step": 1200
178
+ },
179
+ {
180
+ "epoch": 0.0418732413238644,
181
+ "grad_norm": 0.01707889698445797,
182
+ "learning_rate": 2.9581267586761356e-05,
183
+ "loss": 0.0005,
184
+ "step": 1250
185
+ },
186
+ {
187
+ "epoch": 0.04354817097681898,
188
+ "grad_norm": 0.0004460318305063993,
189
+ "learning_rate": 2.956451829023181e-05,
190
+ "loss": 0.0011,
191
+ "step": 1300
192
+ },
193
+ {
194
+ "epoch": 0.04522310062977355,
195
+ "grad_norm": 0.00859643705189228,
196
+ "learning_rate": 2.9547768993702266e-05,
197
+ "loss": 0.0007,
198
+ "step": 1350
199
+ },
200
+ {
201
+ "epoch": 0.046898030282728126,
202
+ "grad_norm": 0.012995535507798195,
203
+ "learning_rate": 2.9531019697172718e-05,
204
+ "loss": 0.0009,
205
+ "step": 1400
206
+ },
207
+ {
208
+ "epoch": 0.048572959935682704,
209
+ "grad_norm": 0.004834771156311035,
210
+ "learning_rate": 2.9514270400643173e-05,
211
+ "loss": 0.0009,
212
+ "step": 1450
213
+ },
214
+ {
215
+ "epoch": 0.050247889588637275,
216
+ "grad_norm": 0.006121751386672258,
217
+ "learning_rate": 2.9497521104113628e-05,
218
+ "loss": 0.0009,
219
+ "step": 1500
220
+ },
221
+ {
222
+ "epoch": 0.05192281924159185,
223
+ "grad_norm": 0.004112472757697105,
224
+ "learning_rate": 2.9480771807584083e-05,
225
+ "loss": 0.0007,
226
+ "step": 1550
227
+ },
228
+ {
229
+ "epoch": 0.05359774889454643,
230
+ "grad_norm": 0.0025941322091966867,
231
+ "learning_rate": 2.9464022511054538e-05,
232
+ "loss": 0.0008,
233
+ "step": 1600
234
+ },
235
+ {
236
+ "epoch": 0.055272678547501,
237
+ "grad_norm": 0.0033354111947119236,
238
+ "learning_rate": 2.9447273214524993e-05,
239
+ "loss": 0.0014,
240
+ "step": 1650
241
+ },
242
+ {
243
+ "epoch": 0.05694760820045558,
244
+ "grad_norm": 0.0006163300131447613,
245
+ "learning_rate": 2.9430523917995445e-05,
246
+ "loss": 0.0013,
247
+ "step": 1700
248
+ },
249
+ {
250
+ "epoch": 0.05862253785341016,
251
+ "grad_norm": 0.03437214344739914,
252
+ "learning_rate": 2.94137746214659e-05,
253
+ "loss": 0.0011,
254
+ "step": 1750
255
+ },
256
+ {
257
+ "epoch": 0.060297467506364735,
258
+ "grad_norm": 0.004870133940130472,
259
+ "learning_rate": 2.939702532493635e-05,
260
+ "loss": 0.0011,
261
+ "step": 1800
262
+ },
263
+ {
264
+ "epoch": 0.061972397159319306,
265
+ "grad_norm": 0.011027672328054905,
266
+ "learning_rate": 2.9380276028406807e-05,
267
+ "loss": 0.0011,
268
+ "step": 1850
269
+ },
270
+ {
271
+ "epoch": 0.06364732681227389,
272
+ "grad_norm": 0.005804801359772682,
273
+ "learning_rate": 2.936352673187726e-05,
274
+ "loss": 0.0009,
275
+ "step": 1900
276
+ },
277
+ {
278
+ "epoch": 0.06532225646522846,
279
+ "grad_norm": 0.001421495107933879,
280
+ "learning_rate": 2.9346777435347713e-05,
281
+ "loss": 0.0007,
282
+ "step": 1950
283
+ },
284
+ {
285
+ "epoch": 0.06699718611818303,
286
+ "grad_norm": 0.00775284506380558,
287
+ "learning_rate": 2.9330028138818172e-05,
288
+ "loss": 0.0008,
289
+ "step": 2000
290
+ },
291
+ {
292
+ "epoch": 0.06867211577113762,
293
+ "grad_norm": 0.009989109821617603,
294
+ "learning_rate": 2.9313278842288627e-05,
295
+ "loss": 0.0013,
296
+ "step": 2050
297
+ },
298
+ {
299
+ "epoch": 0.07034704542409219,
300
+ "grad_norm": 0.004933220334351063,
301
+ "learning_rate": 2.929652954575908e-05,
302
+ "loss": 0.0008,
303
+ "step": 2100
304
+ },
305
+ {
306
+ "epoch": 0.07202197507704676,
307
+ "grad_norm": 0.0003998636966571212,
308
+ "learning_rate": 2.9279780249229533e-05,
309
+ "loss": 0.0005,
310
+ "step": 2150
311
+ },
312
+ {
313
+ "epoch": 0.07369690473000134,
314
+ "grad_norm": 0.00017748262325767428,
315
+ "learning_rate": 2.926303095269999e-05,
316
+ "loss": 0.0006,
317
+ "step": 2200
318
+ },
319
+ {
320
+ "epoch": 0.07537183438295592,
321
+ "grad_norm": 0.0009646079852245748,
322
+ "learning_rate": 2.924628165617044e-05,
323
+ "loss": 0.0007,
324
+ "step": 2250
325
+ },
326
+ {
327
+ "epoch": 0.07704676403591049,
328
+ "grad_norm": 0.004842822439968586,
329
+ "learning_rate": 2.9229532359640895e-05,
330
+ "loss": 0.0011,
331
+ "step": 2300
332
+ },
333
+ {
334
+ "epoch": 0.07872169368886507,
335
+ "grad_norm": 0.010380016639828682,
336
+ "learning_rate": 2.921278306311135e-05,
337
+ "loss": 0.001,
338
+ "step": 2350
339
+ },
340
+ {
341
+ "epoch": 0.08039662334181964,
342
+ "grad_norm": 0.0021645210217684507,
343
+ "learning_rate": 2.9196033766581802e-05,
344
+ "loss": 0.0009,
345
+ "step": 2400
346
+ },
347
+ {
348
+ "epoch": 0.08207155299477421,
349
+ "grad_norm": 0.0019599520601332188,
350
+ "learning_rate": 2.917928447005226e-05,
351
+ "loss": 0.0009,
352
+ "step": 2450
353
+ },
354
+ {
355
+ "epoch": 0.0837464826477288,
356
+ "grad_norm": 0.03942473977804184,
357
+ "learning_rate": 2.9162535173522715e-05,
358
+ "loss": 0.0011,
359
+ "step": 2500
360
  }
361
  ],
362
  "logging_steps": 50,
363
+ "max_steps": 89556,
364
  "num_input_tokens_seen": 0,
365
  "num_train_epochs": 3,
366
+ "save_steps": 500,
367
  "total_flos": 0.0,
368
+ "train_batch_size": 64,
369
  "trial_name": null,
370
  "trial_params": null
371
  }
training_args.bin CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:5a3016ede18977651f64e35e7937a6eedad1efb5f8d5b0283902a432b80950ff
3
  size 5048
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:740c8229cdedb3d1d6b326158ea54f02a778419a7c50fd4a45de07c549a9e9fe
3
  size 5048