hanasim commited on
Commit
90468b4
1 Parent(s): c507f76

End of training

Browse files
README.md CHANGED
@@ -1,21 +1,24 @@
1
  ---
 
 
2
  license: apache-2.0
3
  base_model: openai/whisper-base
4
  tags:
 
5
  - generated_from_trainer
6
  datasets:
7
- - common_voice_16_0
8
  metrics:
9
  - wer
10
  model-index:
11
- - name: breeze-listen-dsw-base-ta
12
  results:
13
  - task:
14
  name: Automatic Speech Recognition
15
  type: automatic-speech-recognition
16
  dataset:
17
- name: common_voice_16_0
18
- type: common_voice_16_0
19
  config: ta
20
  split: test
21
  args: ta
@@ -28,9 +31,9 @@ model-index:
28
  <!-- This model card has been generated automatically according to the information the Trainer had access to. You
29
  should probably proofread and complete it, then remove this comment. -->
30
 
31
- # breeze-listen-dsw-base-ta
32
 
33
- This model is a fine-tuned version of [openai/whisper-base](https://huggingface.co/openai/whisper-base) on the common_voice_16_0 dataset.
34
  It achieves the following results on the evaluation set:
35
  - Loss: 0.375
36
  - Wer: 21.4071
 
1
  ---
2
+ language:
3
+ - ta
4
  license: apache-2.0
5
  base_model: openai/whisper-base
6
  tags:
7
+ - whisper-event
8
  - generated_from_trainer
9
  datasets:
10
+ - mozilla-foundation/common_voice_16_0
11
  metrics:
12
  - wer
13
  model-index:
14
+ - name: Breeze DSW Tamil - base
15
  results:
16
  - task:
17
  name: Automatic Speech Recognition
18
  type: automatic-speech-recognition
19
  dataset:
20
+ name: mozilla-foundation/common_voice_16_0 ta
21
+ type: mozilla-foundation/common_voice_16_0
22
  config: ta
23
  split: test
24
  args: ta
 
31
  <!-- This model card has been generated automatically according to the information the Trainer had access to. You
32
  should probably proofread and complete it, then remove this comment. -->
33
 
34
+ # Breeze DSW Tamil - base
35
 
36
+ This model is a fine-tuned version of [openai/whisper-base](https://huggingface.co/openai/whisper-base) on the mozilla-foundation/common_voice_16_0 ta dataset.
37
  It achieves the following results on the evaluation set:
38
  - Loss: 0.375
39
  - Wer: 21.4071
all_results.json ADDED
@@ -0,0 +1,12 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "epoch": 1.0,
3
+ "eval_loss": 0.375,
4
+ "eval_runtime": 14742.073,
5
+ "eval_samples_per_second": 0.787,
6
+ "eval_steps_per_second": 0.049,
7
+ "eval_wer": 21.407068619939793,
8
+ "train_loss": 0.2634566650390625,
9
+ "train_runtime": 177344.9657,
10
+ "train_samples_per_second": 0.18,
11
+ "train_steps_per_second": 0.006
12
+ }
eval_results.json ADDED
@@ -0,0 +1,8 @@
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "epoch": 1.0,
3
+ "eval_loss": 0.375,
4
+ "eval_runtime": 14742.073,
5
+ "eval_samples_per_second": 0.787,
6
+ "eval_steps_per_second": 0.049,
7
+ "eval_wer": 21.407068619939793
8
+ }
runs/Jan24_08-50-23_knight/events.out.tfevents.1706274745.knight.122137.1 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:f318fc1af9f908999c7b2c7627d8f5028afe86bd5e90d2c5a45e542166aa6965
3
+ size 406
train_results.json ADDED
@@ -0,0 +1,7 @@
 
 
 
 
 
 
 
 
1
+ {
2
+ "epoch": 1.0,
3
+ "train_loss": 0.2634566650390625,
4
+ "train_runtime": 177344.9657,
5
+ "train_samples_per_second": 0.18,
6
+ "train_steps_per_second": 0.006
7
+ }
trainer_state.json ADDED
@@ -0,0 +1,360 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "best_metric": 21.407068619939793,
3
+ "best_model_checkpoint": "/cosmos/home/sp-operator/ai/training/models/huggingface/scripts/../breeze-listen-dsw-base-ta/checkpoint-1000",
4
+ "epoch": 1.0,
5
+ "eval_steps": 100,
6
+ "global_step": 1000,
7
+ "is_hyper_param_search": false,
8
+ "is_local_process_zero": true,
9
+ "is_world_process_zero": true,
10
+ "log_history": [
11
+ {
12
+ "epoch": 0.03,
13
+ "learning_rate": 4.973833272194737e-06,
14
+ "loss": 0.5836,
15
+ "step": 25
16
+ },
17
+ {
18
+ "epoch": 0.05,
19
+ "learning_rate": 6.195318418690893e-06,
20
+ "loss": 0.308,
21
+ "step": 50
22
+ },
23
+ {
24
+ "epoch": 0.07,
25
+ "learning_rate": 6.881634451095711e-06,
26
+ "loss": 0.2177,
27
+ "step": 75
28
+ },
29
+ {
30
+ "epoch": 0.1,
31
+ "learning_rate": 7.361221988663844e-06,
32
+ "loss": 0.1698,
33
+ "step": 100
34
+ },
35
+ {
36
+ "epoch": 0.1,
37
+ "eval_loss": 0.572265625,
38
+ "eval_runtime": 14672.9249,
39
+ "eval_samples_per_second": 0.79,
40
+ "eval_steps_per_second": 0.049,
41
+ "eval_wer": 30.440569885233597,
42
+ "step": 100
43
+ },
44
+ {
45
+ "epoch": 0.12,
46
+ "learning_rate": 7.730207550743121e-06,
47
+ "loss": 0.1375,
48
+ "step": 125
49
+ },
50
+ {
51
+ "epoch": 0.15,
52
+ "learning_rate": 8.03016458599496e-06,
53
+ "loss": 0.4086,
54
+ "step": 150
55
+ },
56
+ {
57
+ "epoch": 0.17,
58
+ "learning_rate": 8.282894746203441e-06,
59
+ "loss": 0.4454,
60
+ "step": 175
61
+ },
62
+ {
63
+ "epoch": 0.2,
64
+ "learning_rate": 8.501266121799902e-06,
65
+ "loss": 0.3578,
66
+ "step": 200
67
+ },
68
+ {
69
+ "epoch": 0.2,
70
+ "eval_loss": 0.43017578125,
71
+ "eval_runtime": 14830.0095,
72
+ "eval_samples_per_second": 0.782,
73
+ "eval_steps_per_second": 0.049,
74
+ "eval_wer": 25.686238091921464,
75
+ "step": 200
76
+ },
77
+ {
78
+ "epoch": 0.23,
79
+ "learning_rate": 8.693512601774437e-06,
80
+ "loss": 0.3099,
81
+ "step": 225
82
+ },
83
+ {
84
+ "epoch": 0.25,
85
+ "learning_rate": 8.865222471593567e-06,
86
+ "loss": 0.3064,
87
+ "step": 250
88
+ },
89
+ {
90
+ "epoch": 0.28,
91
+ "learning_rate": 9.020362953730323e-06,
92
+ "loss": 0.2555,
93
+ "step": 275
94
+ },
95
+ {
96
+ "epoch": 0.3,
97
+ "learning_rate": 9.161852281961698e-06,
98
+ "loss": 0.2832,
99
+ "step": 300
100
+ },
101
+ {
102
+ "epoch": 0.3,
103
+ "eval_loss": 0.396728515625,
104
+ "eval_runtime": 14915.7095,
105
+ "eval_samples_per_second": 0.778,
106
+ "eval_steps_per_second": 0.049,
107
+ "eval_wer": 23.204791864973416,
108
+ "step": 300
109
+ },
110
+ {
111
+ "epoch": 0.33,
112
+ "learning_rate": 9.29189975311636e-06,
113
+ "loss": 0.239,
114
+ "step": 325
115
+ },
116
+ {
117
+ "epoch": 0.35,
118
+ "learning_rate": 9.412218256259678e-06,
119
+ "loss": 0.2391,
120
+ "step": 350
121
+ },
122
+ {
123
+ "epoch": 0.38,
124
+ "learning_rate": 9.524162683365145e-06,
125
+ "loss": 0.3022,
126
+ "step": 375
127
+ },
128
+ {
129
+ "epoch": 0.4,
130
+ "learning_rate": 9.62882322733502e-06,
131
+ "loss": 0.2663,
132
+ "step": 400
133
+ },
134
+ {
135
+ "epoch": 0.4,
136
+ "eval_loss": 0.40380859375,
137
+ "eval_runtime": 14958.2575,
138
+ "eval_samples_per_second": 0.775,
139
+ "eval_steps_per_second": 0.048,
140
+ "eval_wer": 23.852529426787083,
141
+ "step": 400
142
+ },
143
+ {
144
+ "epoch": 0.42,
145
+ "learning_rate": 9.727090137141168e-06,
146
+ "loss": 0.2667,
147
+ "step": 425
148
+ },
149
+ {
150
+ "epoch": 0.45,
151
+ "learning_rate": 9.819699807237934e-06,
152
+ "loss": 0.2368,
153
+ "step": 450
154
+ },
155
+ {
156
+ "epoch": 0.47,
157
+ "learning_rate": 9.907268307310855e-06,
158
+ "loss": 0.4503,
159
+ "step": 475
160
+ },
161
+ {
162
+ "epoch": 0.5,
163
+ "learning_rate": 9.990316248055788e-06,
164
+ "loss": 0.5175,
165
+ "step": 500
166
+ },
167
+ {
168
+ "epoch": 0.5,
169
+ "eval_loss": 0.396240234375,
170
+ "eval_runtime": 14832.7471,
171
+ "eval_samples_per_second": 0.782,
172
+ "eval_steps_per_second": 0.049,
173
+ "eval_wer": 24.14660382761049,
174
+ "step": 500
175
+ },
176
+ {
177
+ "epoch": 0.53,
178
+ "learning_rate": 9.58e-06,
179
+ "loss": 0.4672,
180
+ "step": 525
181
+ },
182
+ {
183
+ "epoch": 0.55,
184
+ "learning_rate": 9.080000000000001e-06,
185
+ "loss": 0.4226,
186
+ "step": 550
187
+ },
188
+ {
189
+ "epoch": 0.57,
190
+ "learning_rate": 8.580000000000001e-06,
191
+ "loss": 0.3758,
192
+ "step": 575
193
+ },
194
+ {
195
+ "epoch": 0.6,
196
+ "learning_rate": 8.08e-06,
197
+ "loss": 0.2365,
198
+ "step": 600
199
+ },
200
+ {
201
+ "epoch": 0.6,
202
+ "eval_loss": 0.385009765625,
203
+ "eval_runtime": 14663.0923,
204
+ "eval_samples_per_second": 0.791,
205
+ "eval_steps_per_second": 0.049,
206
+ "eval_wer": 22.25949744232659,
207
+ "step": 600
208
+ },
209
+ {
210
+ "epoch": 0.62,
211
+ "learning_rate": 7.58e-06,
212
+ "loss": 0.205,
213
+ "step": 625
214
+ },
215
+ {
216
+ "epoch": 0.65,
217
+ "learning_rate": 7.08e-06,
218
+ "loss": 0.2127,
219
+ "step": 650
220
+ },
221
+ {
222
+ "epoch": 0.68,
223
+ "learning_rate": 6.5800000000000005e-06,
224
+ "loss": 0.1918,
225
+ "step": 675
226
+ },
227
+ {
228
+ "epoch": 0.7,
229
+ "learning_rate": 6.08e-06,
230
+ "loss": 0.1692,
231
+ "step": 700
232
+ },
233
+ {
234
+ "epoch": 0.7,
235
+ "eval_loss": 0.39599609375,
236
+ "eval_runtime": 14696.2041,
237
+ "eval_samples_per_second": 0.789,
238
+ "eval_steps_per_second": 0.049,
239
+ "eval_wer": 21.868688041232325,
240
+ "step": 700
241
+ },
242
+ {
243
+ "epoch": 0.72,
244
+ "learning_rate": 5.580000000000001e-06,
245
+ "loss": 0.155,
246
+ "step": 725
247
+ },
248
+ {
249
+ "epoch": 0.75,
250
+ "learning_rate": 5.0800000000000005e-06,
251
+ "loss": 0.1639,
252
+ "step": 750
253
+ },
254
+ {
255
+ "epoch": 0.78,
256
+ "learning_rate": 4.58e-06,
257
+ "loss": 0.1611,
258
+ "step": 775
259
+ },
260
+ {
261
+ "epoch": 0.8,
262
+ "learning_rate": 4.08e-06,
263
+ "loss": 0.1815,
264
+ "step": 800
265
+ },
266
+ {
267
+ "epoch": 0.8,
268
+ "eval_loss": 0.38232421875,
269
+ "eval_runtime": 14658.9058,
270
+ "eval_samples_per_second": 0.791,
271
+ "eval_steps_per_second": 0.049,
272
+ "eval_wer": 22.077248701816295,
273
+ "step": 800
274
+ },
275
+ {
276
+ "epoch": 0.82,
277
+ "learning_rate": 3.58e-06,
278
+ "loss": 0.1854,
279
+ "step": 825
280
+ },
281
+ {
282
+ "epoch": 0.85,
283
+ "learning_rate": 3.08e-06,
284
+ "loss": 0.175,
285
+ "step": 850
286
+ },
287
+ {
288
+ "epoch": 0.88,
289
+ "learning_rate": 2.5800000000000003e-06,
290
+ "loss": 0.1684,
291
+ "step": 875
292
+ },
293
+ {
294
+ "epoch": 0.9,
295
+ "learning_rate": 2.08e-06,
296
+ "loss": 0.1612,
297
+ "step": 900
298
+ },
299
+ {
300
+ "epoch": 0.9,
301
+ "eval_loss": 0.3701171875,
302
+ "eval_runtime": 14864.357,
303
+ "eval_samples_per_second": 0.78,
304
+ "eval_steps_per_second": 0.049,
305
+ "eval_wer": 21.805616821055725,
306
+ "step": 900
307
+ },
308
+ {
309
+ "epoch": 0.93,
310
+ "learning_rate": 1.5800000000000001e-06,
311
+ "loss": 0.1528,
312
+ "step": 925
313
+ },
314
+ {
315
+ "epoch": 0.95,
316
+ "learning_rate": 1.08e-06,
317
+ "loss": 0.1651,
318
+ "step": 950
319
+ },
320
+ {
321
+ "epoch": 0.97,
322
+ "learning_rate": 5.800000000000001e-07,
323
+ "loss": 0.1474,
324
+ "step": 975
325
+ },
326
+ {
327
+ "epoch": 1.0,
328
+ "learning_rate": 8e-08,
329
+ "loss": 0.1393,
330
+ "step": 1000
331
+ },
332
+ {
333
+ "epoch": 1.0,
334
+ "eval_loss": 0.375,
335
+ "eval_runtime": 14729.6157,
336
+ "eval_samples_per_second": 0.787,
337
+ "eval_steps_per_second": 0.049,
338
+ "eval_wer": 21.407068619939793,
339
+ "step": 1000
340
+ },
341
+ {
342
+ "epoch": 1.0,
343
+ "step": 1000,
344
+ "total_flos": 2.0755199247672934e+18,
345
+ "train_loss": 0.2634566650390625,
346
+ "train_runtime": 177344.9657,
347
+ "train_samples_per_second": 0.18,
348
+ "train_steps_per_second": 0.006
349
+ }
350
+ ],
351
+ "logging_steps": 25,
352
+ "max_steps": 1000,
353
+ "num_input_tokens_seen": 0,
354
+ "num_train_epochs": 9223372036854775807,
355
+ "save_steps": 100,
356
+ "total_flos": 2.0755199247672934e+18,
357
+ "train_batch_size": 32,
358
+ "trial_name": null,
359
+ "trial_params": null
360
+ }