csikasote commited on
Commit
c0f6b95
·
verified ·
1 Parent(s): d484c0c

End of training

Browse files
README.md CHANGED
@@ -3,6 +3,9 @@ library_name: transformers
3
  license: cc-by-nc-4.0
4
  base_model: facebook/mms-1b-all
5
  tags:
 
 
 
6
  - generated_from_trainer
7
  metrics:
8
  - wer
@@ -16,7 +19,7 @@ should probably proofread and complete it, then remove this comment. -->
16
 
17
  # mms-1b-bem-genbed-all
18
 
19
- This model is a fine-tuned version of [facebook/mms-1b-all](https://huggingface.co/facebook/mms-1b-all) on an unknown dataset.
20
  It achieves the following results on the evaluation set:
21
  - Loss: 0.2479
22
  - Wer: 0.4062
 
3
  license: cc-by-nc-4.0
4
  base_model: facebook/mms-1b-all
5
  tags:
6
+ - automatic-speech-recognition
7
+ - BembaSpeech
8
+ - mms
9
  - generated_from_trainer
10
  metrics:
11
  - wer
 
19
 
20
  # mms-1b-bem-genbed-all
21
 
22
+ This model is a fine-tuned version of [facebook/mms-1b-all](https://huggingface.co/facebook/mms-1b-all) on the BEMBASPEECH - BEM dataset.
23
  It achieves the following results on the evaluation set:
24
  - Loss: 0.2479
25
  - Wer: 0.4062
adapter.default.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:85c80b5c01f498914dad744c4e7a9c4d7844c266d6f41539bf67bccc73d85641
3
+ size 8798532
all_results.json ADDED
@@ -0,0 +1,15 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "epoch": 5.0,
3
+ "eval_loss": 0.2479187548160553,
4
+ "eval_runtime": 128.9642,
5
+ "eval_samples": 1939,
6
+ "eval_samples_per_second": 15.035,
7
+ "eval_steps_per_second": 1.884,
8
+ "eval_wer": 0.40616216216216217,
9
+ "total_flos": 1.737871213031041e+19,
10
+ "train_loss": 0.719452987922417,
11
+ "train_runtime": 6513.7768,
12
+ "train_samples": 5817,
13
+ "train_samples_per_second": 4.465,
14
+ "train_steps_per_second": 0.559
15
+ }
eval_results.json ADDED
@@ -0,0 +1,9 @@
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "epoch": 5.0,
3
+ "eval_loss": 0.2479187548160553,
4
+ "eval_runtime": 128.9642,
5
+ "eval_samples": 1939,
6
+ "eval_samples_per_second": 15.035,
7
+ "eval_steps_per_second": 1.884,
8
+ "eval_wer": 0.40616216216216217
9
+ }
runs/Sep06_22-35-02_957b6e261270/events.out.tfevents.1725669414.957b6e261270.11504.1 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:2594dcedb84ff07ad49b92d8531123f8b351d5ea380d71c2dc1f34236b3b3992
3
+ size 406
train_results.json ADDED
@@ -0,0 +1,9 @@
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "epoch": 5.0,
3
+ "total_flos": 1.737871213031041e+19,
4
+ "train_loss": 0.719452987922417,
5
+ "train_runtime": 6513.7768,
6
+ "train_samples": 5817,
7
+ "train_samples_per_second": 4.465,
8
+ "train_steps_per_second": 0.559
9
+ }
trainer_state.json ADDED
@@ -0,0 +1,253 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "best_metric": 0.2479187548160553,
3
+ "best_model_checkpoint": "./mms-1b-bem-genbed-all/checkpoint-3600",
4
+ "epoch": 5.0,
5
+ "eval_steps": 200,
6
+ "global_step": 3640,
7
+ "is_hyper_param_search": false,
8
+ "is_local_process_zero": true,
9
+ "is_world_process_zero": true,
10
+ "log_history": [
11
+ {
12
+ "epoch": 0.27472527472527475,
13
+ "eval_loss": 0.6311588883399963,
14
+ "eval_runtime": 129.4588,
15
+ "eval_samples_per_second": 14.978,
16
+ "eval_steps_per_second": 1.877,
17
+ "eval_wer": 0.6736216216216216,
18
+ "step": 200
19
+ },
20
+ {
21
+ "epoch": 0.5494505494505495,
22
+ "eval_loss": 0.3198663890361786,
23
+ "eval_runtime": 127.9369,
24
+ "eval_samples_per_second": 15.156,
25
+ "eval_steps_per_second": 1.899,
26
+ "eval_wer": 0.49194594594594593,
27
+ "step": 400
28
+ },
29
+ {
30
+ "epoch": 0.6868131868131868,
31
+ "grad_norm": 1.675441861152649,
32
+ "learning_rate": 0.00029699999999999996,
33
+ "loss": 2.9901,
34
+ "step": 500
35
+ },
36
+ {
37
+ "epoch": 0.8241758241758241,
38
+ "eval_loss": 0.3013916313648224,
39
+ "eval_runtime": 128.0261,
40
+ "eval_samples_per_second": 15.145,
41
+ "eval_steps_per_second": 1.898,
42
+ "eval_wer": 0.4612972972972973,
43
+ "step": 600
44
+ },
45
+ {
46
+ "epoch": 1.098901098901099,
47
+ "eval_loss": 0.28253573179244995,
48
+ "eval_runtime": 127.3129,
49
+ "eval_samples_per_second": 15.23,
50
+ "eval_steps_per_second": 1.909,
51
+ "eval_wer": 0.4432972972972973,
52
+ "step": 800
53
+ },
54
+ {
55
+ "epoch": 1.3736263736263736,
56
+ "grad_norm": 0.6800592541694641,
57
+ "learning_rate": 0.00025280254777070065,
58
+ "loss": 0.3968,
59
+ "step": 1000
60
+ },
61
+ {
62
+ "epoch": 1.3736263736263736,
63
+ "eval_loss": 0.2782987058162689,
64
+ "eval_runtime": 127.2721,
65
+ "eval_samples_per_second": 15.235,
66
+ "eval_steps_per_second": 1.909,
67
+ "eval_wer": 0.4541081081081081,
68
+ "step": 1000
69
+ },
70
+ {
71
+ "epoch": 1.6483516483516483,
72
+ "eval_loss": 0.27315396070480347,
73
+ "eval_runtime": 128.0724,
74
+ "eval_samples_per_second": 15.14,
75
+ "eval_steps_per_second": 1.897,
76
+ "eval_wer": 0.4294054054054054,
77
+ "step": 1200
78
+ },
79
+ {
80
+ "epoch": 1.9230769230769231,
81
+ "eval_loss": 0.26492610573768616,
82
+ "eval_runtime": 127.9148,
83
+ "eval_samples_per_second": 15.159,
84
+ "eval_steps_per_second": 1.9,
85
+ "eval_wer": 0.4243783783783784,
86
+ "step": 1400
87
+ },
88
+ {
89
+ "epoch": 2.0604395604395602,
90
+ "grad_norm": 0.9411349296569824,
91
+ "learning_rate": 0.00020503184713375794,
92
+ "loss": 0.3766,
93
+ "step": 1500
94
+ },
95
+ {
96
+ "epoch": 2.197802197802198,
97
+ "eval_loss": 0.26205211877822876,
98
+ "eval_runtime": 128.7617,
99
+ "eval_samples_per_second": 15.059,
100
+ "eval_steps_per_second": 1.887,
101
+ "eval_wer": 0.42043243243243245,
102
+ "step": 1600
103
+ },
104
+ {
105
+ "epoch": 2.4725274725274726,
106
+ "eval_loss": 0.262787789106369,
107
+ "eval_runtime": 128.5209,
108
+ "eval_samples_per_second": 15.087,
109
+ "eval_steps_per_second": 1.891,
110
+ "eval_wer": 0.4170810810810811,
111
+ "step": 1800
112
+ },
113
+ {
114
+ "epoch": 2.7472527472527473,
115
+ "grad_norm": 1.4841364622116089,
116
+ "learning_rate": 0.00015726114649681526,
117
+ "loss": 0.3537,
118
+ "step": 2000
119
+ },
120
+ {
121
+ "epoch": 2.7472527472527473,
122
+ "eval_loss": 0.2579393982887268,
123
+ "eval_runtime": 128.5847,
124
+ "eval_samples_per_second": 15.08,
125
+ "eval_steps_per_second": 1.89,
126
+ "eval_wer": 0.4187027027027027,
127
+ "step": 2000
128
+ },
129
+ {
130
+ "epoch": 3.021978021978022,
131
+ "eval_loss": 0.25568732619285583,
132
+ "eval_runtime": 128.193,
133
+ "eval_samples_per_second": 15.126,
134
+ "eval_steps_per_second": 1.896,
135
+ "eval_wer": 0.40335135135135136,
136
+ "step": 2200
137
+ },
138
+ {
139
+ "epoch": 3.2967032967032965,
140
+ "eval_loss": 0.252371609210968,
141
+ "eval_runtime": 128.8222,
142
+ "eval_samples_per_second": 15.052,
143
+ "eval_steps_per_second": 1.886,
144
+ "eval_wer": 0.4090810810810811,
145
+ "step": 2400
146
+ },
147
+ {
148
+ "epoch": 3.4340659340659343,
149
+ "grad_norm": 2.9654958248138428,
150
+ "learning_rate": 0.00010949044585987259,
151
+ "loss": 0.3529,
152
+ "step": 2500
153
+ },
154
+ {
155
+ "epoch": 3.571428571428571,
156
+ "eval_loss": 0.25349992513656616,
157
+ "eval_runtime": 128.7325,
158
+ "eval_samples_per_second": 15.062,
159
+ "eval_steps_per_second": 1.888,
160
+ "eval_wer": 0.4061081081081081,
161
+ "step": 2600
162
+ },
163
+ {
164
+ "epoch": 3.8461538461538463,
165
+ "eval_loss": 0.24949324131011963,
166
+ "eval_runtime": 129.6607,
167
+ "eval_samples_per_second": 14.954,
168
+ "eval_steps_per_second": 1.874,
169
+ "eval_wer": 0.4034054054054054,
170
+ "step": 2800
171
+ },
172
+ {
173
+ "epoch": 4.1208791208791204,
174
+ "grad_norm": 0.9809963703155518,
175
+ "learning_rate": 6.171974522292994e-05,
176
+ "loss": 0.3393,
177
+ "step": 3000
178
+ },
179
+ {
180
+ "epoch": 4.1208791208791204,
181
+ "eval_loss": 0.24937787652015686,
182
+ "eval_runtime": 130.0963,
183
+ "eval_samples_per_second": 14.904,
184
+ "eval_steps_per_second": 1.868,
185
+ "eval_wer": 0.4064864864864865,
186
+ "step": 3000
187
+ },
188
+ {
189
+ "epoch": 4.395604395604396,
190
+ "eval_loss": 0.24878770112991333,
191
+ "eval_runtime": 129.7286,
192
+ "eval_samples_per_second": 14.947,
193
+ "eval_steps_per_second": 1.873,
194
+ "eval_wer": 0.40664864864864864,
195
+ "step": 3200
196
+ },
197
+ {
198
+ "epoch": 4.670329670329671,
199
+ "eval_loss": 0.2481740117073059,
200
+ "eval_runtime": 129.3443,
201
+ "eval_samples_per_second": 14.991,
202
+ "eval_steps_per_second": 1.879,
203
+ "eval_wer": 0.40275675675675676,
204
+ "step": 3400
205
+ },
206
+ {
207
+ "epoch": 4.8076923076923075,
208
+ "grad_norm": 0.8750921487808228,
209
+ "learning_rate": 1.394904458598726e-05,
210
+ "loss": 0.3332,
211
+ "step": 3500
212
+ },
213
+ {
214
+ "epoch": 4.945054945054945,
215
+ "eval_loss": 0.2479187548160553,
216
+ "eval_runtime": 129.4727,
217
+ "eval_samples_per_second": 14.976,
218
+ "eval_steps_per_second": 1.877,
219
+ "eval_wer": 0.40616216216216217,
220
+ "step": 3600
221
+ },
222
+ {
223
+ "epoch": 5.0,
224
+ "step": 3640,
225
+ "total_flos": 1.737871213031041e+19,
226
+ "train_loss": 0.719452987922417,
227
+ "train_runtime": 6513.7768,
228
+ "train_samples_per_second": 4.465,
229
+ "train_steps_per_second": 0.559
230
+ }
231
+ ],
232
+ "logging_steps": 500,
233
+ "max_steps": 3640,
234
+ "num_input_tokens_seen": 0,
235
+ "num_train_epochs": 5,
236
+ "save_steps": 200,
237
+ "stateful_callbacks": {
238
+ "TrainerControl": {
239
+ "args": {
240
+ "should_epoch_stop": false,
241
+ "should_evaluate": false,
242
+ "should_log": false,
243
+ "should_save": true,
244
+ "should_training_stop": true
245
+ },
246
+ "attributes": {}
247
+ }
248
+ },
249
+ "total_flos": 1.737871213031041e+19,
250
+ "train_batch_size": 8,
251
+ "trial_name": null,
252
+ "trial_params": null
253
+ }