nikhilbh commited on
Commit
aabc2d0
1 Parent(s): 6e22e9a

Uploaded after manual correction in README

Browse files
Files changed (3) hide show
  1. README.md +21 -4
  2. train_results.json +8 -0
  3. trainer_state.json +718 -0
README.md CHANGED
@@ -1,21 +1,38 @@
1
  ---
 
 
2
  license: apache-2.0
3
  base_model: openai/whisper-large-v2
4
  tags:
5
  - generated_from_trainer
 
 
6
  metrics:
7
  - wer
8
  model-index:
9
- - name: whisper-large-v2-custom-hi
10
- results: []
 
 
 
 
 
 
 
 
 
 
 
 
 
11
  ---
12
 
13
  <!-- This model card has been generated automatically according to the information the Trainer had access to. You
14
  should probably proofread and complete it, then remove this comment. -->
15
 
16
- # whisper-large-v2-custom-hi
17
 
18
- This model is a fine-tuned version of [openai/whisper-large-v2](https://huggingface.co/openai/whisper-large-v2) on the None dataset.
19
  It achieves the following results on the evaluation set:
20
  - Loss: 0.3389
21
  - Wer: 0.2186
 
1
  ---
2
+ language:
3
+ - hi
4
  license: apache-2.0
5
  base_model: openai/whisper-large-v2
6
  tags:
7
  - generated_from_trainer
8
+ datasets:
9
+ - mozilla-foundation/common_voice_11_0
10
  metrics:
11
  - wer
12
  model-index:
13
+ - name: Whisper Large v2 Custom Hi - Nikhil Bhargava
14
+ results:
15
+ - task:
16
+ name: Automatic Speech Recognition
17
+ type: automatic-speech-recognition
18
+ dataset:
19
+ name: mozilla-foundation/common_voice_11_0 hi
20
+ type: mozilla-foundation/common_voice_11_0
21
+ config: hi
22
+ split: test
23
+ args: 'config: hi, split: test'
24
+ metrics:
25
+ - name: Wer
26
+ type: wer
27
+ value: 0.21857275882502328
28
  ---
29
 
30
  <!-- This model card has been generated automatically according to the information the Trainer had access to. You
31
  should probably proofread and complete it, then remove this comment. -->
32
 
33
+ # Whisper Large v2 Custom Hi - Nikhil Bhargava
34
 
35
+ This model is a fine-tuned version of [openai/whisper-large-v2](https://huggingface.co/openai/whisper-large-v2) on the mozilla-foundation/common_voice_11_0 hi dataset.
36
  It achieves the following results on the evaluation set:
37
  - Loss: 0.3389
38
  - Wer: 0.2186
train_results.json ADDED
@@ -0,0 +1,8 @@
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "epoch": 24.45,
3
+ "train_loss": 0.016903733740281313,
4
+ "train_runtime": 68439.6404,
5
+ "train_samples": 6540,
6
+ "train_samples_per_second": 2.338,
7
+ "train_steps_per_second": 0.073
8
+ }
trainer_state.json ADDED
@@ -0,0 +1,718 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "best_metric": null,
3
+ "best_model_checkpoint": null,
4
+ "epoch": 24.449877750611247,
5
+ "eval_steps": 500,
6
+ "global_step": 5000,
7
+ "is_hyper_param_search": false,
8
+ "is_local_process_zero": true,
9
+ "is_world_process_zero": true,
10
+ "log_history": [
11
+ {
12
+ "epoch": 0.24,
13
+ "learning_rate": 9.4e-06,
14
+ "loss": 0.3047,
15
+ "step": 50
16
+ },
17
+ {
18
+ "epoch": 0.49,
19
+ "learning_rate": 9.905050505050506e-06,
20
+ "loss": 0.179,
21
+ "step": 100
22
+ },
23
+ {
24
+ "epoch": 0.73,
25
+ "learning_rate": 9.804040404040405e-06,
26
+ "loss": 0.1628,
27
+ "step": 150
28
+ },
29
+ {
30
+ "epoch": 0.98,
31
+ "learning_rate": 9.703030303030305e-06,
32
+ "loss": 0.1468,
33
+ "step": 200
34
+ },
35
+ {
36
+ "epoch": 1.22,
37
+ "learning_rate": 9.602020202020203e-06,
38
+ "loss": 0.0966,
39
+ "step": 250
40
+ },
41
+ {
42
+ "epoch": 1.47,
43
+ "learning_rate": 9.501010101010102e-06,
44
+ "loss": 0.0854,
45
+ "step": 300
46
+ },
47
+ {
48
+ "epoch": 1.71,
49
+ "learning_rate": 9.4e-06,
50
+ "loss": 0.0881,
51
+ "step": 350
52
+ },
53
+ {
54
+ "epoch": 1.96,
55
+ "learning_rate": 9.2989898989899e-06,
56
+ "loss": 0.0841,
57
+ "step": 400
58
+ },
59
+ {
60
+ "epoch": 2.2,
61
+ "learning_rate": 9.197979797979799e-06,
62
+ "loss": 0.0541,
63
+ "step": 450
64
+ },
65
+ {
66
+ "epoch": 2.44,
67
+ "learning_rate": 9.096969696969698e-06,
68
+ "loss": 0.0523,
69
+ "step": 500
70
+ },
71
+ {
72
+ "epoch": 2.44,
73
+ "eval_loss": 0.21230381727218628,
74
+ "eval_runtime": 1923.3381,
75
+ "eval_samples_per_second": 1.505,
76
+ "eval_steps_per_second": 0.188,
77
+ "eval_wer": 0.2664437484127656,
78
+ "step": 500
79
+ },
80
+ {
81
+ "epoch": 2.69,
82
+ "learning_rate": 8.995959595959598e-06,
83
+ "loss": 0.0502,
84
+ "step": 550
85
+ },
86
+ {
87
+ "epoch": 2.93,
88
+ "learning_rate": 8.896969696969697e-06,
89
+ "loss": 0.0468,
90
+ "step": 600
91
+ },
92
+ {
93
+ "epoch": 3.18,
94
+ "learning_rate": 8.795959595959596e-06,
95
+ "loss": 0.0328,
96
+ "step": 650
97
+ },
98
+ {
99
+ "epoch": 3.42,
100
+ "learning_rate": 8.694949494949496e-06,
101
+ "loss": 0.0273,
102
+ "step": 700
103
+ },
104
+ {
105
+ "epoch": 3.67,
106
+ "learning_rate": 8.593939393939395e-06,
107
+ "loss": 0.0283,
108
+ "step": 750
109
+ },
110
+ {
111
+ "epoch": 3.91,
112
+ "learning_rate": 8.492929292929295e-06,
113
+ "loss": 0.0316,
114
+ "step": 800
115
+ },
116
+ {
117
+ "epoch": 4.16,
118
+ "learning_rate": 8.391919191919192e-06,
119
+ "loss": 0.0194,
120
+ "step": 850
121
+ },
122
+ {
123
+ "epoch": 4.4,
124
+ "learning_rate": 8.290909090909092e-06,
125
+ "loss": 0.0151,
126
+ "step": 900
127
+ },
128
+ {
129
+ "epoch": 4.65,
130
+ "learning_rate": 8.18989898989899e-06,
131
+ "loss": 0.0164,
132
+ "step": 950
133
+ },
134
+ {
135
+ "epoch": 4.89,
136
+ "learning_rate": 8.08888888888889e-06,
137
+ "loss": 0.0187,
138
+ "step": 1000
139
+ },
140
+ {
141
+ "epoch": 4.89,
142
+ "eval_loss": 0.22370614111423492,
143
+ "eval_runtime": 1885.8595,
144
+ "eval_samples_per_second": 1.535,
145
+ "eval_steps_per_second": 0.192,
146
+ "eval_wer": 0.23702700414797256,
147
+ "step": 1000
148
+ },
149
+ {
150
+ "epoch": 5.13,
151
+ "learning_rate": 7.987878787878789e-06,
152
+ "loss": 0.0122,
153
+ "step": 1050
154
+ },
155
+ {
156
+ "epoch": 5.38,
157
+ "learning_rate": 7.886868686868686e-06,
158
+ "loss": 0.0104,
159
+ "step": 1100
160
+ },
161
+ {
162
+ "epoch": 5.62,
163
+ "learning_rate": 7.785858585858586e-06,
164
+ "loss": 0.0102,
165
+ "step": 1150
166
+ },
167
+ {
168
+ "epoch": 5.87,
169
+ "learning_rate": 7.684848484848485e-06,
170
+ "loss": 0.0124,
171
+ "step": 1200
172
+ },
173
+ {
174
+ "epoch": 6.11,
175
+ "learning_rate": 7.583838383838384e-06,
176
+ "loss": 0.0084,
177
+ "step": 1250
178
+ },
179
+ {
180
+ "epoch": 6.36,
181
+ "learning_rate": 7.4828282828282835e-06,
182
+ "loss": 0.0067,
183
+ "step": 1300
184
+ },
185
+ {
186
+ "epoch": 6.6,
187
+ "learning_rate": 7.381818181818182e-06,
188
+ "loss": 0.0067,
189
+ "step": 1350
190
+ },
191
+ {
192
+ "epoch": 6.85,
193
+ "learning_rate": 7.280808080808082e-06,
194
+ "loss": 0.0074,
195
+ "step": 1400
196
+ },
197
+ {
198
+ "epoch": 7.09,
199
+ "learning_rate": 7.17979797979798e-06,
200
+ "loss": 0.0059,
201
+ "step": 1450
202
+ },
203
+ {
204
+ "epoch": 7.33,
205
+ "learning_rate": 7.07878787878788e-06,
206
+ "loss": 0.0041,
207
+ "step": 1500
208
+ },
209
+ {
210
+ "epoch": 7.33,
211
+ "eval_loss": 0.26467418670654297,
212
+ "eval_runtime": 1880.1582,
213
+ "eval_samples_per_second": 1.539,
214
+ "eval_steps_per_second": 0.193,
215
+ "eval_wer": 0.23097435029205113,
216
+ "step": 1500
217
+ },
218
+ {
219
+ "epoch": 7.58,
220
+ "learning_rate": 6.977777777777779e-06,
221
+ "loss": 0.0038,
222
+ "step": 1550
223
+ },
224
+ {
225
+ "epoch": 7.82,
226
+ "learning_rate": 6.876767676767677e-06,
227
+ "loss": 0.005,
228
+ "step": 1600
229
+ },
230
+ {
231
+ "epoch": 8.07,
232
+ "learning_rate": 6.7757575757575765e-06,
233
+ "loss": 0.0035,
234
+ "step": 1650
235
+ },
236
+ {
237
+ "epoch": 8.31,
238
+ "learning_rate": 6.674747474747475e-06,
239
+ "loss": 0.0034,
240
+ "step": 1700
241
+ },
242
+ {
243
+ "epoch": 8.56,
244
+ "learning_rate": 6.5737373737373746e-06,
245
+ "loss": 0.0034,
246
+ "step": 1750
247
+ },
248
+ {
249
+ "epoch": 8.8,
250
+ "learning_rate": 6.472727272727272e-06,
251
+ "loss": 0.0045,
252
+ "step": 1800
253
+ },
254
+ {
255
+ "epoch": 9.05,
256
+ "learning_rate": 6.371717171717172e-06,
257
+ "loss": 0.0034,
258
+ "step": 1850
259
+ },
260
+ {
261
+ "epoch": 9.29,
262
+ "learning_rate": 6.270707070707071e-06,
263
+ "loss": 0.0025,
264
+ "step": 1900
265
+ },
266
+ {
267
+ "epoch": 9.54,
268
+ "learning_rate": 6.16969696969697e-06,
269
+ "loss": 0.0038,
270
+ "step": 1950
271
+ },
272
+ {
273
+ "epoch": 9.78,
274
+ "learning_rate": 6.068686868686869e-06,
275
+ "loss": 0.0028,
276
+ "step": 2000
277
+ },
278
+ {
279
+ "epoch": 9.78,
280
+ "eval_loss": 0.29039227962493896,
281
+ "eval_runtime": 1891.4656,
282
+ "eval_samples_per_second": 1.53,
283
+ "eval_steps_per_second": 0.191,
284
+ "eval_wer": 0.23436045035130787,
285
+ "step": 2000
286
+ },
287
+ {
288
+ "epoch": 10.02,
289
+ "learning_rate": 5.967676767676768e-06,
290
+ "loss": 0.0021,
291
+ "step": 2050
292
+ },
293
+ {
294
+ "epoch": 10.27,
295
+ "learning_rate": 5.8666666666666675e-06,
296
+ "loss": 0.0015,
297
+ "step": 2100
298
+ },
299
+ {
300
+ "epoch": 10.51,
301
+ "learning_rate": 5.765656565656567e-06,
302
+ "loss": 0.0018,
303
+ "step": 2150
304
+ },
305
+ {
306
+ "epoch": 10.76,
307
+ "learning_rate": 5.664646464646465e-06,
308
+ "loss": 0.0019,
309
+ "step": 2200
310
+ },
311
+ {
312
+ "epoch": 11.0,
313
+ "learning_rate": 5.563636363636364e-06,
314
+ "loss": 0.0023,
315
+ "step": 2250
316
+ },
317
+ {
318
+ "epoch": 11.25,
319
+ "learning_rate": 5.462626262626263e-06,
320
+ "loss": 0.0015,
321
+ "step": 2300
322
+ },
323
+ {
324
+ "epoch": 11.49,
325
+ "learning_rate": 5.361616161616162e-06,
326
+ "loss": 0.0018,
327
+ "step": 2350
328
+ },
329
+ {
330
+ "epoch": 11.74,
331
+ "learning_rate": 5.26060606060606e-06,
332
+ "loss": 0.002,
333
+ "step": 2400
334
+ },
335
+ {
336
+ "epoch": 11.98,
337
+ "learning_rate": 5.15959595959596e-06,
338
+ "loss": 0.0014,
339
+ "step": 2450
340
+ },
341
+ {
342
+ "epoch": 12.22,
343
+ "learning_rate": 5.058585858585859e-06,
344
+ "loss": 0.0015,
345
+ "step": 2500
346
+ },
347
+ {
348
+ "epoch": 12.22,
349
+ "eval_loss": 0.29083308577537537,
350
+ "eval_runtime": 1888.3025,
351
+ "eval_samples_per_second": 1.533,
352
+ "eval_steps_per_second": 0.192,
353
+ "eval_wer": 0.2268263777194616,
354
+ "step": 2500
355
+ },
356
+ {
357
+ "epoch": 12.47,
358
+ "learning_rate": 4.957575757575758e-06,
359
+ "loss": 0.0012,
360
+ "step": 2550
361
+ },
362
+ {
363
+ "epoch": 12.71,
364
+ "learning_rate": 4.856565656565657e-06,
365
+ "loss": 0.0012,
366
+ "step": 2600
367
+ },
368
+ {
369
+ "epoch": 12.96,
370
+ "learning_rate": 4.755555555555556e-06,
371
+ "loss": 0.001,
372
+ "step": 2650
373
+ },
374
+ {
375
+ "epoch": 13.2,
376
+ "learning_rate": 4.654545454545455e-06,
377
+ "loss": 0.0008,
378
+ "step": 2700
379
+ },
380
+ {
381
+ "epoch": 13.45,
382
+ "learning_rate": 4.553535353535354e-06,
383
+ "loss": 0.0007,
384
+ "step": 2750
385
+ },
386
+ {
387
+ "epoch": 13.69,
388
+ "learning_rate": 4.452525252525253e-06,
389
+ "loss": 0.0004,
390
+ "step": 2800
391
+ },
392
+ {
393
+ "epoch": 13.94,
394
+ "learning_rate": 4.351515151515152e-06,
395
+ "loss": 0.0005,
396
+ "step": 2850
397
+ },
398
+ {
399
+ "epoch": 14.18,
400
+ "learning_rate": 4.250505050505051e-06,
401
+ "loss": 0.0004,
402
+ "step": 2900
403
+ },
404
+ {
405
+ "epoch": 14.43,
406
+ "learning_rate": 4.14949494949495e-06,
407
+ "loss": 0.0004,
408
+ "step": 2950
409
+ },
410
+ {
411
+ "epoch": 14.67,
412
+ "learning_rate": 4.048484848484849e-06,
413
+ "loss": 0.0003,
414
+ "step": 3000
415
+ },
416
+ {
417
+ "epoch": 14.67,
418
+ "eval_loss": 0.3021999001502991,
419
+ "eval_runtime": 1889.0309,
420
+ "eval_samples_per_second": 1.532,
421
+ "eval_steps_per_second": 0.192,
422
+ "eval_wer": 0.21967324134428173,
423
+ "step": 3000
424
+ },
425
+ {
426
+ "epoch": 14.91,
427
+ "learning_rate": 3.9474747474747474e-06,
428
+ "loss": 0.0003,
429
+ "step": 3050
430
+ },
431
+ {
432
+ "epoch": 15.16,
433
+ "learning_rate": 3.846464646464647e-06,
434
+ "loss": 0.0002,
435
+ "step": 3100
436
+ },
437
+ {
438
+ "epoch": 15.4,
439
+ "learning_rate": 3.745454545454546e-06,
440
+ "loss": 0.0001,
441
+ "step": 3150
442
+ },
443
+ {
444
+ "epoch": 15.65,
445
+ "learning_rate": 3.644444444444445e-06,
446
+ "loss": 0.0004,
447
+ "step": 3200
448
+ },
449
+ {
450
+ "epoch": 15.89,
451
+ "learning_rate": 3.5434343434343437e-06,
452
+ "loss": 0.0001,
453
+ "step": 3250
454
+ },
455
+ {
456
+ "epoch": 16.14,
457
+ "learning_rate": 3.4424242424242427e-06,
458
+ "loss": 0.0001,
459
+ "step": 3300
460
+ },
461
+ {
462
+ "epoch": 16.38,
463
+ "learning_rate": 3.3414141414141413e-06,
464
+ "loss": 0.0001,
465
+ "step": 3350
466
+ },
467
+ {
468
+ "epoch": 16.63,
469
+ "learning_rate": 3.2404040404040404e-06,
470
+ "loss": 0.0001,
471
+ "step": 3400
472
+ },
473
+ {
474
+ "epoch": 16.87,
475
+ "learning_rate": 3.13939393939394e-06,
476
+ "loss": 0.0001,
477
+ "step": 3450
478
+ },
479
+ {
480
+ "epoch": 17.11,
481
+ "learning_rate": 3.038383838383839e-06,
482
+ "loss": 0.0003,
483
+ "step": 3500
484
+ },
485
+ {
486
+ "epoch": 17.11,
487
+ "eval_loss": 0.3248833417892456,
488
+ "eval_runtime": 1885.9913,
489
+ "eval_samples_per_second": 1.534,
490
+ "eval_steps_per_second": 0.192,
491
+ "eval_wer": 0.2195462625920596,
492
+ "step": 3500
493
+ },
494
+ {
495
+ "epoch": 17.36,
496
+ "learning_rate": 2.9373737373737376e-06,
497
+ "loss": 0.0001,
498
+ "step": 3550
499
+ },
500
+ {
501
+ "epoch": 17.6,
502
+ "learning_rate": 2.8363636363636366e-06,
503
+ "loss": 0.0004,
504
+ "step": 3600
505
+ },
506
+ {
507
+ "epoch": 17.85,
508
+ "learning_rate": 2.7353535353535353e-06,
509
+ "loss": 0.0001,
510
+ "step": 3650
511
+ },
512
+ {
513
+ "epoch": 18.09,
514
+ "learning_rate": 2.6343434343434343e-06,
515
+ "loss": 0.0001,
516
+ "step": 3700
517
+ },
518
+ {
519
+ "epoch": 18.34,
520
+ "learning_rate": 2.5333333333333338e-06,
521
+ "loss": 0.0002,
522
+ "step": 3750
523
+ },
524
+ {
525
+ "epoch": 18.58,
526
+ "learning_rate": 2.432323232323233e-06,
527
+ "loss": 0.0001,
528
+ "step": 3800
529
+ },
530
+ {
531
+ "epoch": 18.83,
532
+ "learning_rate": 2.3313131313131315e-06,
533
+ "loss": 0.0002,
534
+ "step": 3850
535
+ },
536
+ {
537
+ "epoch": 19.07,
538
+ "learning_rate": 2.2303030303030305e-06,
539
+ "loss": 0.0001,
540
+ "step": 3900
541
+ },
542
+ {
543
+ "epoch": 19.32,
544
+ "learning_rate": 2.1292929292929296e-06,
545
+ "loss": 0.0001,
546
+ "step": 3950
547
+ },
548
+ {
549
+ "epoch": 19.56,
550
+ "learning_rate": 2.0282828282828286e-06,
551
+ "loss": 0.0003,
552
+ "step": 4000
553
+ },
554
+ {
555
+ "epoch": 19.56,
556
+ "eval_loss": 0.3216637372970581,
557
+ "eval_runtime": 1889.5891,
558
+ "eval_samples_per_second": 1.532,
559
+ "eval_steps_per_second": 0.192,
560
+ "eval_wer": 0.21611783628206213,
561
+ "step": 4000
562
+ },
563
+ {
564
+ "epoch": 19.8,
565
+ "learning_rate": 1.9272727272727273e-06,
566
+ "loss": 0.0001,
567
+ "step": 4050
568
+ },
569
+ {
570
+ "epoch": 20.05,
571
+ "learning_rate": 1.8262626262626265e-06,
572
+ "loss": 0.0001,
573
+ "step": 4100
574
+ },
575
+ {
576
+ "epoch": 20.29,
577
+ "learning_rate": 1.7252525252525254e-06,
578
+ "loss": 0.0002,
579
+ "step": 4150
580
+ },
581
+ {
582
+ "epoch": 20.54,
583
+ "learning_rate": 1.6242424242424242e-06,
584
+ "loss": 0.0001,
585
+ "step": 4200
586
+ },
587
+ {
588
+ "epoch": 20.78,
589
+ "learning_rate": 1.5232323232323235e-06,
590
+ "loss": 0.0,
591
+ "step": 4250
592
+ },
593
+ {
594
+ "epoch": 21.03,
595
+ "learning_rate": 1.4222222222222223e-06,
596
+ "loss": 0.0,
597
+ "step": 4300
598
+ },
599
+ {
600
+ "epoch": 21.27,
601
+ "learning_rate": 1.3212121212121212e-06,
602
+ "loss": 0.0,
603
+ "step": 4350
604
+ },
605
+ {
606
+ "epoch": 21.52,
607
+ "learning_rate": 1.2202020202020202e-06,
608
+ "loss": 0.0,
609
+ "step": 4400
610
+ },
611
+ {
612
+ "epoch": 21.76,
613
+ "learning_rate": 1.1191919191919193e-06,
614
+ "loss": 0.0,
615
+ "step": 4450
616
+ },
617
+ {
618
+ "epoch": 22.0,
619
+ "learning_rate": 1.0181818181818183e-06,
620
+ "loss": 0.0,
621
+ "step": 4500
622
+ },
623
+ {
624
+ "epoch": 22.0,
625
+ "eval_loss": 0.3335433900356293,
626
+ "eval_runtime": 1886.0688,
627
+ "eval_samples_per_second": 1.534,
628
+ "eval_steps_per_second": 0.192,
629
+ "eval_wer": 0.21814949631761618,
630
+ "step": 4500
631
+ },
632
+ {
633
+ "epoch": 22.25,
634
+ "learning_rate": 9.171717171717172e-07,
635
+ "loss": 0.0,
636
+ "step": 4550
637
+ },
638
+ {
639
+ "epoch": 22.49,
640
+ "learning_rate": 8.161616161616162e-07,
641
+ "loss": 0.0,
642
+ "step": 4600
643
+ },
644
+ {
645
+ "epoch": 22.74,
646
+ "learning_rate": 7.151515151515153e-07,
647
+ "loss": 0.0,
648
+ "step": 4650
649
+ },
650
+ {
651
+ "epoch": 22.98,
652
+ "learning_rate": 6.141414141414142e-07,
653
+ "loss": 0.0,
654
+ "step": 4700
655
+ },
656
+ {
657
+ "epoch": 23.23,
658
+ "learning_rate": 5.131313131313132e-07,
659
+ "loss": 0.0,
660
+ "step": 4750
661
+ },
662
+ {
663
+ "epoch": 23.47,
664
+ "learning_rate": 4.121212121212122e-07,
665
+ "loss": 0.0,
666
+ "step": 4800
667
+ },
668
+ {
669
+ "epoch": 23.72,
670
+ "learning_rate": 3.111111111111111e-07,
671
+ "loss": 0.0,
672
+ "step": 4850
673
+ },
674
+ {
675
+ "epoch": 23.96,
676
+ "learning_rate": 2.1010101010101013e-07,
677
+ "loss": 0.0,
678
+ "step": 4900
679
+ },
680
+ {
681
+ "epoch": 24.21,
682
+ "learning_rate": 1.090909090909091e-07,
683
+ "loss": 0.0,
684
+ "step": 4950
685
+ },
686
+ {
687
+ "epoch": 24.45,
688
+ "learning_rate": 8.080808080808081e-09,
689
+ "loss": 0.0,
690
+ "step": 5000
691
+ },
692
+ {
693
+ "epoch": 24.45,
694
+ "eval_loss": 0.33888712525367737,
695
+ "eval_runtime": 1887.2294,
696
+ "eval_samples_per_second": 1.533,
697
+ "eval_steps_per_second": 0.192,
698
+ "eval_wer": 0.21857275882502328,
699
+ "step": 5000
700
+ },
701
+ {
702
+ "epoch": 24.45,
703
+ "step": 5000,
704
+ "total_flos": 3.395035389100032e+20,
705
+ "train_loss": 0.016903733740281313,
706
+ "train_runtime": 68439.6404,
707
+ "train_samples_per_second": 2.338,
708
+ "train_steps_per_second": 0.073
709
+ }
710
+ ],
711
+ "logging_steps": 50,
712
+ "max_steps": 5000,
713
+ "num_train_epochs": 25,
714
+ "save_steps": 500,
715
+ "total_flos": 3.395035389100032e+20,
716
+ "trial_name": null,
717
+ "trial_params": null
718
+ }