csikasote commited on
Commit
d870465
1 Parent(s): 067f840

End of training

Browse files
README.md CHANGED
@@ -3,6 +3,9 @@ library_name: transformers
3
  license: cc-by-nc-4.0
4
  base_model: facebook/mms-1b-all
5
  tags:
 
 
 
6
  - generated_from_trainer
7
  metrics:
8
  - wer
@@ -16,10 +19,10 @@ should probably proofread and complete it, then remove this comment. -->
16
 
17
  # mms-1b-all-bem-natbed-nn-model
18
 
19
- This model is a fine-tuned version of [facebook/mms-1b-all](https://huggingface.co/facebook/mms-1b-all) on an unknown dataset.
20
  It achieves the following results on the evaluation set:
21
- - Loss: 0.6036
22
- - Wer: 0.5197
23
 
24
  ## Model description
25
 
 
3
  license: cc-by-nc-4.0
4
  base_model: facebook/mms-1b-all
5
  tags:
6
+ - automatic-speech-recognition
7
+ - natbed
8
+ - mms
9
  - generated_from_trainer
10
  metrics:
11
  - wer
 
19
 
20
  # mms-1b-all-bem-natbed-nn-model
21
 
22
+ This model is a fine-tuned version of [facebook/mms-1b-all](https://huggingface.co/facebook/mms-1b-all) on the NATBED - BEM dataset.
23
  It achieves the following results on the evaluation set:
24
+ - Loss: 0.5884
25
+ - Wer: 0.5333
26
 
27
  ## Model description
28
 
adapter.bem.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:ea2306c85baea18513943b267ad30910539e3f2d2b52c1cfc5d36f927c2d9a74
3
+ size 8819028
all_results.json ADDED
@@ -0,0 +1,15 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "epoch": 6.320541760722348,
3
+ "eval_loss": 0.5884432792663574,
4
+ "eval_runtime": 45.0521,
5
+ "eval_samples": 708,
6
+ "eval_samples_per_second": 15.715,
7
+ "eval_steps_per_second": 1.975,
8
+ "eval_wer": 0.5332880434782609,
9
+ "total_flos": 1.3313268682658783e+19,
10
+ "train_loss": 0.972996187210083,
11
+ "train_runtime": 3933.908,
12
+ "train_samples": 3542,
13
+ "train_samples_per_second": 27.011,
14
+ "train_steps_per_second": 3.378
15
+ }
eval_results.json ADDED
@@ -0,0 +1,9 @@
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "epoch": 6.320541760722348,
3
+ "eval_loss": 0.5884432792663574,
4
+ "eval_runtime": 45.0521,
5
+ "eval_samples": 708,
6
+ "eval_samples_per_second": 15.715,
7
+ "eval_steps_per_second": 1.975,
8
+ "eval_wer": 0.5332880434782609
9
+ }
train_results.json ADDED
@@ -0,0 +1,9 @@
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "epoch": 6.320541760722348,
3
+ "total_flos": 1.3313268682658783e+19,
4
+ "train_loss": 0.972996187210083,
5
+ "train_runtime": 3933.908,
6
+ "train_samples": 3542,
7
+ "train_samples_per_second": 27.011,
8
+ "train_steps_per_second": 3.378
9
+ }
trainer_state.json ADDED
@@ -0,0 +1,499 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "best_metric": 0.5884432792663574,
3
+ "best_model_checkpoint": "/scratch/skscla001/results/mms-1b-all-bem-natbed-nn-model/checkpoint-2200",
4
+ "epoch": 6.320541760722348,
5
+ "eval_steps": 100,
6
+ "global_step": 2800,
7
+ "is_hyper_param_search": false,
8
+ "is_local_process_zero": true,
9
+ "is_world_process_zero": true,
10
+ "log_history": [
11
+ {
12
+ "epoch": 0.22573363431151242,
13
+ "grad_norm": 1.8729794025421143,
14
+ "learning_rate": 0.00028799999999999995,
15
+ "loss": 7.9244,
16
+ "step": 100
17
+ },
18
+ {
19
+ "epoch": 0.22573363431151242,
20
+ "eval_loss": 1.35137140750885,
21
+ "eval_runtime": 45.1782,
22
+ "eval_samples_per_second": 15.671,
23
+ "eval_steps_per_second": 1.97,
24
+ "eval_wer": 1.0237771739130435,
25
+ "step": 100
26
+ },
27
+ {
28
+ "epoch": 0.45146726862302483,
29
+ "grad_norm": 2.6380980014801025,
30
+ "learning_rate": 0.0002978165276724791,
31
+ "loss": 1.0236,
32
+ "step": 200
33
+ },
34
+ {
35
+ "epoch": 0.45146726862302483,
36
+ "eval_loss": 0.8354936838150024,
37
+ "eval_runtime": 44.7017,
38
+ "eval_samples_per_second": 15.838,
39
+ "eval_steps_per_second": 1.991,
40
+ "eval_wer": 0.6594769021739131,
41
+ "step": 200
42
+ },
43
+ {
44
+ "epoch": 0.6772009029345373,
45
+ "grad_norm": 1.9870244264602661,
46
+ "learning_rate": 0.0002955420773313116,
47
+ "loss": 0.8005,
48
+ "step": 300
49
+ },
50
+ {
51
+ "epoch": 0.6772009029345373,
52
+ "eval_loss": 0.7836518287658691,
53
+ "eval_runtime": 45.0039,
54
+ "eval_samples_per_second": 15.732,
55
+ "eval_steps_per_second": 1.978,
56
+ "eval_wer": 0.6141304347826086,
57
+ "step": 300
58
+ },
59
+ {
60
+ "epoch": 0.9029345372460497,
61
+ "grad_norm": 2.012474536895752,
62
+ "learning_rate": 0.00029326762699014404,
63
+ "loss": 0.8968,
64
+ "step": 400
65
+ },
66
+ {
67
+ "epoch": 0.9029345372460497,
68
+ "eval_loss": 0.7808529138565063,
69
+ "eval_runtime": 44.7704,
70
+ "eval_samples_per_second": 15.814,
71
+ "eval_steps_per_second": 1.988,
72
+ "eval_wer": 0.6042798913043478,
73
+ "step": 400
74
+ },
75
+ {
76
+ "epoch": 1.1286681715575622,
77
+ "grad_norm": 8.587899208068848,
78
+ "learning_rate": 0.00029099317664897645,
79
+ "loss": 0.8909,
80
+ "step": 500
81
+ },
82
+ {
83
+ "epoch": 1.1286681715575622,
84
+ "eval_loss": 0.7146816849708557,
85
+ "eval_runtime": 44.5851,
86
+ "eval_samples_per_second": 15.88,
87
+ "eval_steps_per_second": 1.996,
88
+ "eval_wer": 0.5952785326086957,
89
+ "step": 500
90
+ },
91
+ {
92
+ "epoch": 1.3544018058690745,
93
+ "grad_norm": 8.078141212463379,
94
+ "learning_rate": 0.0002887187263078089,
95
+ "loss": 0.7983,
96
+ "step": 600
97
+ },
98
+ {
99
+ "epoch": 1.3544018058690745,
100
+ "eval_loss": 0.6989510655403137,
101
+ "eval_runtime": 44.4701,
102
+ "eval_samples_per_second": 15.921,
103
+ "eval_steps_per_second": 2.001,
104
+ "eval_wer": 0.5930706521739131,
105
+ "step": 600
106
+ },
107
+ {
108
+ "epoch": 1.580135440180587,
109
+ "grad_norm": 2.5096583366394043,
110
+ "learning_rate": 0.00028644427596664137,
111
+ "loss": 0.8563,
112
+ "step": 700
113
+ },
114
+ {
115
+ "epoch": 1.580135440180587,
116
+ "eval_loss": 0.6804757118225098,
117
+ "eval_runtime": 44.8305,
118
+ "eval_samples_per_second": 15.793,
119
+ "eval_steps_per_second": 1.985,
120
+ "eval_wer": 0.5964673913043478,
121
+ "step": 700
122
+ },
123
+ {
124
+ "epoch": 1.8058690744920993,
125
+ "grad_norm": 2.3668673038482666,
126
+ "learning_rate": 0.00028416982562547383,
127
+ "loss": 0.7094,
128
+ "step": 800
129
+ },
130
+ {
131
+ "epoch": 1.8058690744920993,
132
+ "eval_loss": 0.6849333047866821,
133
+ "eval_runtime": 44.6427,
134
+ "eval_samples_per_second": 15.859,
135
+ "eval_steps_per_second": 1.994,
136
+ "eval_wer": 0.5808423913043478,
137
+ "step": 800
138
+ },
139
+ {
140
+ "epoch": 2.0316027088036117,
141
+ "grad_norm": 1.9050077199935913,
142
+ "learning_rate": 0.0002819181197877179,
143
+ "loss": 0.7499,
144
+ "step": 900
145
+ },
146
+ {
147
+ "epoch": 2.0316027088036117,
148
+ "eval_loss": 0.6456880569458008,
149
+ "eval_runtime": 44.6644,
150
+ "eval_samples_per_second": 15.852,
151
+ "eval_steps_per_second": 1.993,
152
+ "eval_wer": 0.5934103260869565,
153
+ "step": 900
154
+ },
155
+ {
156
+ "epoch": 2.2573363431151243,
157
+ "grad_norm": 2.829195737838745,
158
+ "learning_rate": 0.0002796436694465504,
159
+ "loss": 0.7722,
160
+ "step": 1000
161
+ },
162
+ {
163
+ "epoch": 2.2573363431151243,
164
+ "eval_loss": 0.6565266251564026,
165
+ "eval_runtime": 44.7397,
166
+ "eval_samples_per_second": 15.825,
167
+ "eval_steps_per_second": 1.989,
168
+ "eval_wer": 0.5874660326086957,
169
+ "step": 1000
170
+ },
171
+ {
172
+ "epoch": 2.4830699774266365,
173
+ "grad_norm": 0.8484971523284912,
174
+ "learning_rate": 0.00027736921910538284,
175
+ "loss": 0.7099,
176
+ "step": 1100
177
+ },
178
+ {
179
+ "epoch": 2.4830699774266365,
180
+ "eval_loss": 0.6419216394424438,
181
+ "eval_runtime": 44.7023,
182
+ "eval_samples_per_second": 15.838,
183
+ "eval_steps_per_second": 1.991,
184
+ "eval_wer": 0.5596127717391305,
185
+ "step": 1100
186
+ },
187
+ {
188
+ "epoch": 2.708803611738149,
189
+ "grad_norm": 2.4867944717407227,
190
+ "learning_rate": 0.0002750947687642153,
191
+ "loss": 0.7416,
192
+ "step": 1200
193
+ },
194
+ {
195
+ "epoch": 2.708803611738149,
196
+ "eval_loss": 0.6195096373558044,
197
+ "eval_runtime": 44.7966,
198
+ "eval_samples_per_second": 15.805,
199
+ "eval_steps_per_second": 1.987,
200
+ "eval_wer": 0.561141304347826,
201
+ "step": 1200
202
+ },
203
+ {
204
+ "epoch": 2.9345372460496613,
205
+ "grad_norm": 4.84053373336792,
206
+ "learning_rate": 0.00027282031842304776,
207
+ "loss": 0.6385,
208
+ "step": 1300
209
+ },
210
+ {
211
+ "epoch": 2.9345372460496613,
212
+ "eval_loss": 0.6227733492851257,
213
+ "eval_runtime": 44.9228,
214
+ "eval_samples_per_second": 15.76,
215
+ "eval_steps_per_second": 1.981,
216
+ "eval_wer": 0.5647078804347826,
217
+ "step": 1300
218
+ },
219
+ {
220
+ "epoch": 3.160270880361174,
221
+ "grad_norm": 0.9543440341949463,
222
+ "learning_rate": 0.0002705458680818802,
223
+ "loss": 0.6436,
224
+ "step": 1400
225
+ },
226
+ {
227
+ "epoch": 3.160270880361174,
228
+ "eval_loss": 0.6184154152870178,
229
+ "eval_runtime": 44.7251,
230
+ "eval_samples_per_second": 15.83,
231
+ "eval_steps_per_second": 1.99,
232
+ "eval_wer": 0.5509510869565217,
233
+ "step": 1400
234
+ },
235
+ {
236
+ "epoch": 3.386004514672686,
237
+ "grad_norm": 0.8133373856544495,
238
+ "learning_rate": 0.0002682714177407126,
239
+ "loss": 0.6795,
240
+ "step": 1500
241
+ },
242
+ {
243
+ "epoch": 3.386004514672686,
244
+ "eval_loss": 0.6156527996063232,
245
+ "eval_runtime": 45.3372,
246
+ "eval_samples_per_second": 15.616,
247
+ "eval_steps_per_second": 1.963,
248
+ "eval_wer": 0.553328804347826,
249
+ "step": 1500
250
+ },
251
+ {
252
+ "epoch": 3.6117381489841986,
253
+ "grad_norm": 25.58840560913086,
254
+ "learning_rate": 0.0002659969673995451,
255
+ "loss": 0.7027,
256
+ "step": 1600
257
+ },
258
+ {
259
+ "epoch": 3.6117381489841986,
260
+ "eval_loss": 0.6343082785606384,
261
+ "eval_runtime": 45.529,
262
+ "eval_samples_per_second": 15.551,
263
+ "eval_steps_per_second": 1.955,
264
+ "eval_wer": 0.5426290760869565,
265
+ "step": 1600
266
+ },
267
+ {
268
+ "epoch": 3.837471783295711,
269
+ "grad_norm": 0.6009318828582764,
270
+ "learning_rate": 0.00026372251705837754,
271
+ "loss": 0.6585,
272
+ "step": 1700
273
+ },
274
+ {
275
+ "epoch": 3.837471783295711,
276
+ "eval_loss": 0.6057115793228149,
277
+ "eval_runtime": 44.8336,
278
+ "eval_samples_per_second": 15.792,
279
+ "eval_steps_per_second": 1.985,
280
+ "eval_wer": 0.5427989130434783,
281
+ "step": 1700
282
+ },
283
+ {
284
+ "epoch": 4.063205417607223,
285
+ "grad_norm": 0.929165244102478,
286
+ "learning_rate": 0.00026144806671720994,
287
+ "loss": 0.6351,
288
+ "step": 1800
289
+ },
290
+ {
291
+ "epoch": 4.063205417607223,
292
+ "eval_loss": 0.6017059683799744,
293
+ "eval_runtime": 44.5067,
294
+ "eval_samples_per_second": 15.908,
295
+ "eval_steps_per_second": 2.0,
296
+ "eval_wer": 0.54296875,
297
+ "step": 1800
298
+ },
299
+ {
300
+ "epoch": 4.288939051918736,
301
+ "grad_norm": 1.4761062860488892,
302
+ "learning_rate": 0.00025917361637604246,
303
+ "loss": 0.6528,
304
+ "step": 1900
305
+ },
306
+ {
307
+ "epoch": 4.288939051918736,
308
+ "eval_loss": 0.6098975539207458,
309
+ "eval_runtime": 44.8754,
310
+ "eval_samples_per_second": 15.777,
311
+ "eval_steps_per_second": 1.983,
312
+ "eval_wer": 0.5339673913043478,
313
+ "step": 1900
314
+ },
315
+ {
316
+ "epoch": 4.514672686230249,
317
+ "grad_norm": 1.2957922220230103,
318
+ "learning_rate": 0.00025689916603487486,
319
+ "loss": 0.6603,
320
+ "step": 2000
321
+ },
322
+ {
323
+ "epoch": 4.514672686230249,
324
+ "eval_loss": 0.621790885925293,
325
+ "eval_runtime": 45.2703,
326
+ "eval_samples_per_second": 15.639,
327
+ "eval_steps_per_second": 1.966,
328
+ "eval_wer": 0.5334578804347826,
329
+ "step": 2000
330
+ },
331
+ {
332
+ "epoch": 4.74040632054176,
333
+ "grad_norm": 3.225343942642212,
334
+ "learning_rate": 0.0002546247156937073,
335
+ "loss": 0.6676,
336
+ "step": 2100
337
+ },
338
+ {
339
+ "epoch": 4.74040632054176,
340
+ "eval_loss": 0.5977216958999634,
341
+ "eval_runtime": 44.9573,
342
+ "eval_samples_per_second": 15.748,
343
+ "eval_steps_per_second": 1.98,
344
+ "eval_wer": 0.5322690217391305,
345
+ "step": 2100
346
+ },
347
+ {
348
+ "epoch": 4.966139954853273,
349
+ "grad_norm": 1.4750922918319702,
350
+ "learning_rate": 0.0002523502653525398,
351
+ "loss": 0.6304,
352
+ "step": 2200
353
+ },
354
+ {
355
+ "epoch": 4.966139954853273,
356
+ "eval_loss": 0.5884432792663574,
357
+ "eval_runtime": 45.456,
358
+ "eval_samples_per_second": 15.575,
359
+ "eval_steps_per_second": 1.958,
360
+ "eval_wer": 0.5332880434782609,
361
+ "step": 2200
362
+ },
363
+ {
364
+ "epoch": 5.191873589164786,
365
+ "grad_norm": 0.7652086615562439,
366
+ "learning_rate": 0.00025007581501137224,
367
+ "loss": 0.5976,
368
+ "step": 2300
369
+ },
370
+ {
371
+ "epoch": 5.191873589164786,
372
+ "eval_loss": 0.5955621600151062,
373
+ "eval_runtime": 45.3065,
374
+ "eval_samples_per_second": 15.627,
375
+ "eval_steps_per_second": 1.964,
376
+ "eval_wer": 0.5227581521739131,
377
+ "step": 2300
378
+ },
379
+ {
380
+ "epoch": 5.417607223476298,
381
+ "grad_norm": 3.0203540325164795,
382
+ "learning_rate": 0.00024780136467020465,
383
+ "loss": 0.6564,
384
+ "step": 2400
385
+ },
386
+ {
387
+ "epoch": 5.417607223476298,
388
+ "eval_loss": 0.5956509709358215,
389
+ "eval_runtime": 45.0964,
390
+ "eval_samples_per_second": 15.7,
391
+ "eval_steps_per_second": 1.974,
392
+ "eval_wer": 0.5302309782608695,
393
+ "step": 2400
394
+ },
395
+ {
396
+ "epoch": 5.643340857787811,
397
+ "grad_norm": 1.8780221939086914,
398
+ "learning_rate": 0.00024552691432903716,
399
+ "loss": 0.6717,
400
+ "step": 2500
401
+ },
402
+ {
403
+ "epoch": 5.643340857787811,
404
+ "eval_loss": 0.5767239332199097,
405
+ "eval_runtime": 44.9865,
406
+ "eval_samples_per_second": 15.738,
407
+ "eval_steps_per_second": 1.978,
408
+ "eval_wer": 0.5183423913043478,
409
+ "step": 2500
410
+ },
411
+ {
412
+ "epoch": 5.8690744920993225,
413
+ "grad_norm": 0.8542383909225464,
414
+ "learning_rate": 0.00024325246398786956,
415
+ "loss": 0.6091,
416
+ "step": 2600
417
+ },
418
+ {
419
+ "epoch": 5.8690744920993225,
420
+ "eval_loss": 0.592084527015686,
421
+ "eval_runtime": 44.6533,
422
+ "eval_samples_per_second": 15.855,
423
+ "eval_steps_per_second": 1.993,
424
+ "eval_wer": 0.52734375,
425
+ "step": 2600
426
+ },
427
+ {
428
+ "epoch": 6.094808126410835,
429
+ "grad_norm": 1.4033461809158325,
430
+ "learning_rate": 0.00024097801364670205,
431
+ "loss": 0.6168,
432
+ "step": 2700
433
+ },
434
+ {
435
+ "epoch": 6.094808126410835,
436
+ "eval_loss": 0.5894186496734619,
437
+ "eval_runtime": 45.0539,
438
+ "eval_samples_per_second": 15.715,
439
+ "eval_steps_per_second": 1.975,
440
+ "eval_wer": 0.5275135869565217,
441
+ "step": 2700
442
+ },
443
+ {
444
+ "epoch": 6.320541760722348,
445
+ "grad_norm": 10.08028507232666,
446
+ "learning_rate": 0.00023870356330553448,
447
+ "loss": 0.6495,
448
+ "step": 2800
449
+ },
450
+ {
451
+ "epoch": 6.320541760722348,
452
+ "eval_loss": 0.6036040782928467,
453
+ "eval_runtime": 45.0407,
454
+ "eval_samples_per_second": 15.719,
455
+ "eval_steps_per_second": 1.976,
456
+ "eval_wer": 0.5197010869565217,
457
+ "step": 2800
458
+ },
459
+ {
460
+ "epoch": 6.320541760722348,
461
+ "step": 2800,
462
+ "total_flos": 1.3313268682658783e+19,
463
+ "train_loss": 0.972996187210083,
464
+ "train_runtime": 3933.908,
465
+ "train_samples_per_second": 27.011,
466
+ "train_steps_per_second": 3.378
467
+ }
468
+ ],
469
+ "logging_steps": 100,
470
+ "max_steps": 13290,
471
+ "num_input_tokens_seen": 0,
472
+ "num_train_epochs": 30,
473
+ "save_steps": 200,
474
+ "stateful_callbacks": {
475
+ "EarlyStoppingCallback": {
476
+ "args": {
477
+ "early_stopping_patience": 3,
478
+ "early_stopping_threshold": 0.0
479
+ },
480
+ "attributes": {
481
+ "early_stopping_patience_counter": 3
482
+ }
483
+ },
484
+ "TrainerControl": {
485
+ "args": {
486
+ "should_epoch_stop": false,
487
+ "should_evaluate": false,
488
+ "should_log": false,
489
+ "should_save": true,
490
+ "should_training_stop": true
491
+ },
492
+ "attributes": {}
493
+ }
494
+ },
495
+ "total_flos": 1.3313268682658783e+19,
496
+ "train_batch_size": 8,
497
+ "trial_name": null,
498
+ "trial_params": null
499
+ }