emilios commited on
Commit
1d9af43
·
1 Parent(s): a11b257

Training in progress, step 1000

Browse files
config.json CHANGED
@@ -5,7 +5,7 @@
5
  "architectures": [
6
  "WhisperForConditionalGeneration"
7
  ],
8
- "attention_dropout": 0.0,
9
  "begin_suppress_tokens": [
10
  220,
11
  50257
@@ -17,7 +17,7 @@
17
  "decoder_layerdrop": 0.0,
18
  "decoder_layers": 24,
19
  "decoder_start_token_id": 50258,
20
- "dropout": 0.0,
21
  "encoder_attention_heads": 16,
22
  "encoder_ffn_dim": 4096,
23
  "encoder_layerdrop": 0.0,
 
5
  "architectures": [
6
  "WhisperForConditionalGeneration"
7
  ],
8
+ "attention_dropout": 0.1,
9
  "begin_suppress_tokens": [
10
  220,
11
  50257
 
17
  "decoder_layerdrop": 0.0,
18
  "decoder_layers": 24,
19
  "decoder_start_token_id": 50258,
20
+ "dropout": 0.1,
21
  "encoder_attention_heads": 16,
22
  "encoder_ffn_dim": 4096,
23
  "encoder_layerdrop": 0.0,
pytorch_model.bin CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:75f4a40185f3049845d498184bdc92fa2b508ff9778e24a740b97fd16d6778f7
3
  size 1527847357
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:77372a6d52c655e4dece9a267b336001382a5ea2e303e1fc9b17fc92db04b3bb
3
  size 1527847357
run-ba.py ADDED
@@ -0,0 +1,709 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/usr/bin/env python
2
+ # coding=utf-8
3
+ # Copyright 2022 The HuggingFace Team. All rights reserved.
4
+ #
5
+ # Licensed under the Apache License, Version 2.0 (the "License");
6
+ # you may not use this file except in compliance with the License.
7
+ # You may obtain a copy of the License at
8
+ #
9
+ # http://www.apache.org/licenses/LICENSE-2.0
10
+ #
11
+ # Unless required by applicable law or agreed to in writing, software
12
+ # distributed under the License is distributed on an "AS IS" BASIS,
13
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14
+ # See the License for the specific language governing permissions and
15
+ # limitations under the License.
16
+ """
17
+ Fine-tuning the library models for sequence to sequence speech recognition
18
+ with 🤗 Datasets' streaming mode.
19
+ """
20
+ # You can also adapt this script for your own sequence to sequence speech
21
+ # recognition task. Pointers for this are left as comments.
22
+
23
+ import logging
24
+ import os
25
+ import sys
26
+ import numpy as np
27
+ from dataclasses import dataclass, field
28
+ from typing import Any, Dict, List, Optional, Union
29
+
30
+ import datasets
31
+ import torch
32
+ from datasets import DatasetDict, IterableDatasetDict, interleave_datasets, load_dataset
33
+ from torch.utils.data import IterableDataset
34
+
35
+ import evaluate
36
+ import transformers
37
+ from transformers import (
38
+ AutoConfig,
39
+ AutoFeatureExtractor,
40
+ AutoModelForSpeechSeq2Seq,
41
+ AutoProcessor,
42
+ AutoTokenizer,
43
+ HfArgumentParser,
44
+ Seq2SeqTrainer,
45
+ Seq2SeqTrainingArguments,
46
+ TrainerCallback,
47
+ set_seed,
48
+ )
49
+ from transformers.models.whisper.english_normalizer import BasicTextNormalizer
50
+ from transformers.trainer_pt_utils import IterableDatasetShard
51
+ from transformers.trainer_utils import get_last_checkpoint, is_main_process
52
+ from transformers.utils import check_min_version, send_example_telemetry
53
+ from transformers.utils.versions import require_version
54
+
55
+
56
+ # Will error if the minimal version of Transformers is not installed. Remove at your own risks.
57
+ check_min_version("4.25.0.dev0")
58
+
59
+ require_version("datasets>=1.18.2", "To fix: pip install -r examples/pytorch/speech-recognition/requirements.txt")
60
+
61
+ logger = logging.getLogger(__name__)
62
+
63
+
64
+ @dataclass
65
+ class ModelArguments:
66
+ """
67
+ Arguments pertaining to which model/config/tokenizer we are going to fine-tune from.
68
+ """
69
+
70
+ model_name_or_path: str = field(
71
+ metadata={"help": "Path to pretrained model or model identifier from huggingface.co/models"}
72
+ )
73
+ config_name: Optional[str] = field(
74
+ default=None, metadata={"help": "Pretrained config name or path if not the same as model_name"}
75
+ )
76
+ tokenizer_name: Optional[str] = field(
77
+ default=None, metadata={"help": "Pretrained tokenizer name or path if not the same as model_name"}
78
+ )
79
+ feature_extractor_name: Optional[str] = field(
80
+ default=None, metadata={"help": "feature extractor name or path if not the same as model_name"}
81
+ )
82
+ cache_dir: Optional[str] = field(
83
+ default=None,
84
+ metadata={"help": "Where to store the pretrained models downloaded from huggingface.co"},
85
+ )
86
+ use_fast_tokenizer: bool = field(
87
+ default=True,
88
+ metadata={"help": "Whether to use one of the fast tokenizer (backed by the tokenizers library) or not."},
89
+ )
90
+ model_revision: str = field(
91
+ default="main",
92
+ metadata={"help": "The specific model version to use (can be a branch name, tag name or commit id)."},
93
+ )
94
+ use_auth_token: bool = field(
95
+ default=False,
96
+ metadata={
97
+ "help": (
98
+ "Will use the token generated when running `huggingface-cli login` (necessary to use this script "
99
+ "with private models)."
100
+ )
101
+ },
102
+ )
103
+ freeze_feature_encoder: bool = field(
104
+ default=True, metadata={"help": "Whether to freeze the feature encoder layers of the model."}
105
+ )
106
+ freeze_encoder: bool = field(
107
+ default=False, metadata={"help": "Whether to freeze the entire encoder of the seq2seq model."}
108
+ )
109
+ forced_decoder_ids: List[List[int]] = field(
110
+ default=None,
111
+ metadata={
112
+ "help": (
113
+ "A list of pairs of integers which indicates a mapping from generation indices to token indices "
114
+ "that will be forced before sampling. For example, [[0, 123]] means the first generated token "
115
+ "will always be a token of index 123."
116
+ )
117
+ },
118
+ )
119
+ suppress_tokens: List[int] = field(
120
+ default=None, metadata={"help": "A list of tokens that will be suppressed at generation."}
121
+ )
122
+ model_index_name: str = field(default=None, metadata={"help": "Pretty name for the model card."})
123
+
124
+
125
+ @dataclass
126
+ class DataTrainingArguments:
127
+ """
128
+ Arguments pertaining to what data we are going to input our model for training and eval.
129
+ """
130
+
131
+ dataset_name: str = field(
132
+ default=None, metadata={"help": "The name of the dataset to use (via the datasets library)."}
133
+ )
134
+ dataset_config_name: Optional[str] = field(
135
+ default=None, metadata={"help": "The configuration name of the dataset to use (via the datasets library)."}
136
+ )
137
+ text_column: Optional[str] = field(
138
+ default=None,
139
+ metadata={"help": "The name of the column in the datasets containing the full texts (for summarization)."},
140
+ )
141
+ max_train_samples: Optional[int] = field(
142
+ default=None,
143
+ metadata={
144
+ "help": (
145
+ "For debugging purposes or quicker training, truncate the number of training examples to this "
146
+ "value if set."
147
+ )
148
+ },
149
+ )
150
+ max_eval_samples: Optional[int] = field(
151
+ default=None,
152
+ metadata={
153
+ "help": (
154
+ "For debugging purposes or quicker training, truncate the number of evaluation examples to this "
155
+ "value if set."
156
+ )
157
+ },
158
+ )
159
+ audio_column_name: str = field(
160
+ default="audio",
161
+ metadata={"help": "The name of the dataset column containing the audio data. Defaults to 'audio'"},
162
+ )
163
+ text_column_name: str = field(
164
+ default="text",
165
+ metadata={"help": "The name of the dataset column containing the text data. Defaults to 'text'"},
166
+ )
167
+ max_duration_in_seconds: float = field(
168
+ default=20.0,
169
+ metadata={
170
+ "help": (
171
+ "Truncate audio files that are longer than `max_duration_in_seconds` seconds to"
172
+ " 'max_duration_in_seconds`"
173
+ )
174
+ },
175
+ )
176
+ min_duration_in_seconds: float = field(
177
+ default=0.0, metadata={"help": "Filter audio files that are shorter than `min_duration_in_seconds` seconds"}
178
+ )
179
+ train_split_name: str = field(
180
+ default="train",
181
+ metadata={
182
+ "help": "The name of the training data set split to use (via the datasets library). Defaults to 'train'"
183
+ },
184
+ )
185
+ eval_split_name: str = field(
186
+ default="test",
187
+ metadata={
188
+ "help": "The name of the training data set split to use (via the datasets library). Defaults to 'train'"
189
+ },
190
+ )
191
+ do_lower_case: bool = field(
192
+ default=False,
193
+ metadata={"help": "Whether the target text should be lower cased."},
194
+ )
195
+ do_remove_punctuation: bool = field(
196
+ default=False,
197
+ metadata={"help": "Whether the target text should be striped of punctuation."},
198
+ )
199
+ do_normalize_eval: bool = field(
200
+ default=True,
201
+ metadata={"help": "Whether to normalise the references and predictions in the eval WER calculation."},
202
+ )
203
+ language: str = field(
204
+ default=None,
205
+ metadata={
206
+ "help": (
207
+ "Language for multilingual fine-tuning. This argument should be set for multilingual fine-tuning "
208
+ "only. For English speech recognition, it should be set to `None`."
209
+ )
210
+ },
211
+ )
212
+ task: str = field(
213
+ default="transcribe",
214
+ metadata={"help": "Task, either `transcribe` for speech recognition or `translate` for speech translation."},
215
+ )
216
+ shuffle_buffer_size: Optional[int] = field(
217
+ default=500,
218
+ metadata={
219
+ "help": (
220
+ "The number of streamed examples to download before shuffling them. The large the buffer, "
221
+ "the closer it is to real offline shuffling."
222
+ )
223
+ },
224
+ )
225
+ streaming: bool = field(
226
+ default=True,
227
+ metadata={"help": "Whether to use streaming mode to load and pre-process the data."},
228
+ )
229
+
230
+
231
+ @dataclass
232
+ class DataCollatorSpeechSeq2SeqWithPadding:
233
+ """
234
+ Data collator that will dynamically pad the inputs received.
235
+ Args:
236
+ processor ([`WhisperProcessor`])
237
+ The processor used for processing the data.
238
+ decoder_start_token_id (`int`)
239
+ The begin-of-sentence of the decoder.
240
+ """
241
+
242
+ processor: Any
243
+ decoder_start_token_id: int
244
+
245
+ def __call__(self, features: List[Dict[str, Union[List[int], torch.Tensor]]]) -> Dict[str, torch.Tensor]:
246
+ # split inputs and labels since they have to be of different lengths and need
247
+ # different padding methods
248
+ model_input_name = self.processor.model_input_names[0]
249
+ input_features = [{model_input_name: feature[model_input_name]} for feature in features]
250
+ label_features = [{"input_ids": feature["labels"]} for feature in features]
251
+
252
+ batch = self.processor.feature_extractor.pad(input_features, return_tensors="pt")
253
+
254
+ labels_batch = self.processor.tokenizer.pad(label_features, return_tensors="pt")
255
+
256
+ # replace padding with -100 to ignore loss correctly
257
+ labels = labels_batch["input_ids"].masked_fill(labels_batch.attention_mask.ne(1), -100)
258
+
259
+ # if bos token is appended in previous tokenization step,
260
+ # cut bos token here as it's append later anyways
261
+ if (labels[:, 0] == self.decoder_start_token_id).all().cpu().item():
262
+ labels = labels[:, 1:]
263
+
264
+ batch["labels"] = labels
265
+
266
+ return batch
267
+
268
+
269
+ def load_maybe_streaming_dataset(dataset_name, dataset_config_name, split="train", streaming=True, **kwargs):
270
+ """
271
+ Utility function to load a dataset in streaming mode. For datasets with multiple splits,
272
+ each split is loaded individually and then splits combined by taking alternating examples from
273
+ each (interleaving).
274
+ """
275
+ if "+" in split:
276
+ # load multiple splits separated by the `+` symbol with streaming mode
277
+ dataset_splits = [
278
+ load_dataset(dataset_name, dataset_config_name, split=split_name, streaming=streaming, **kwargs)
279
+ for split_name in split.split("+")
280
+ ]
281
+ # interleave multiple splits to form one dataset
282
+ interleaved_dataset = interleave_datasets(dataset_splits)
283
+ return interleaved_dataset
284
+ else:
285
+ # load a single split *with* streaming mode
286
+ dataset = load_dataset(dataset_name, dataset_config_name, split=split, streaming=streaming, **kwargs)
287
+ return dataset
288
+
289
+
290
+ def main():
291
+ # 1. Parse input arguments
292
+ # See all possible arguments in src/transformers/training_args.py
293
+ # or by passing the --help flag to this script.
294
+ # We now keep distinct sets of args, for a cleaner separation of concerns.
295
+ parser = HfArgumentParser((ModelArguments, DataTrainingArguments, Seq2SeqTrainingArguments))
296
+
297
+ if len(sys.argv) == 2 and sys.argv[1].endswith(".json"):
298
+ # If we pass only one argument to the script and it's the path to a json file,
299
+ # let's parse it to get our arguments.
300
+ model_args, data_args, training_args = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1]))
301
+ else:
302
+ model_args, data_args, training_args = parser.parse_args_into_dataclasses()
303
+
304
+ # Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The
305
+ # information sent is the one passed as arguments along with your Python/PyTorch versions.
306
+ send_example_telemetry("run_speech_recognition_seq2seq_streaming", model_args, data_args)
307
+
308
+ # 2. Setup logging
309
+ logging.basicConfig(
310
+ format="%(asctime)s - %(levelname)s - %(name)s - %(message)s",
311
+ datefmt="%m/%d/%Y %H:%M:%S",
312
+ handlers=[logging.StreamHandler(sys.stdout)],
313
+ )
314
+ log_level = training_args.get_process_log_level()
315
+ logger.setLevel(log_level)
316
+ datasets.utils.logging.set_verbosity(log_level)
317
+ transformers.utils.logging.set_verbosity(log_level)
318
+ transformers.utils.logging.enable_default_handler()
319
+ transformers.utils.logging.enable_explicit_format()
320
+
321
+ logger.setLevel(logging.INFO if is_main_process(training_args.local_rank) else logging.WARN)
322
+
323
+ # Log on each process the small summary:
324
+ logger.warning(
325
+ f"Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}"
326
+ f"distributed training: {bool(training_args.local_rank != -1)}, 16-bits training: {training_args.fp16}"
327
+ )
328
+ logger.info(f"Training/evaluation parameters {training_args}")
329
+
330
+ # Set the verbosity to info of the Transformers logger (on main process only):
331
+ if is_main_process(training_args.local_rank):
332
+ transformers.utils.logging.set_verbosity_info()
333
+ logger.info("Training/evaluation parameters %s", training_args)
334
+
335
+ # 3. Detecting last checkpoint and eventually continue from last checkpoint
336
+ last_checkpoint = None
337
+ if os.path.isdir(training_args.output_dir) and training_args.do_train and not training_args.overwrite_output_dir:
338
+ last_checkpoint = get_last_checkpoint(training_args.output_dir)
339
+ if last_checkpoint is None and len(os.listdir(training_args.output_dir)) > 0:
340
+ raise ValueError(
341
+ f"Output directory ({training_args.output_dir}) already exists and is not empty. "
342
+ "Use --overwrite_output_dir to overcome."
343
+ )
344
+ elif last_checkpoint is not None and training_args.resume_from_checkpoint is None:
345
+ logger.info(
346
+ f"Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change "
347
+ "the `--output_dir` or add `--overwrite_output_dir` to train from scratch."
348
+ )
349
+
350
+ # Set seed before initializing model.
351
+ set_seed(training_args.seed)
352
+
353
+ # 4. Load dataset
354
+ raw_datasets = IterableDatasetDict() if data_args.streaming else DatasetDict()
355
+
356
+ if training_args.do_train:
357
+ raw_datasets["train"] = load_maybe_streaming_dataset(
358
+ data_args.dataset_name,
359
+ data_args.dataset_config_name,
360
+ split=data_args.train_split_name,
361
+ use_auth_token=True if model_args.use_auth_token else None,
362
+ streaming=data_args.streaming,
363
+ )
364
+
365
+ if training_args.do_eval:
366
+ raw_datasets["eval"] = load_maybe_streaming_dataset(
367
+ data_args.dataset_name,
368
+ data_args.dataset_config_name,
369
+ split=data_args.eval_split_name,
370
+ use_auth_token=True if model_args.use_auth_token else None,
371
+ streaming=data_args.streaming,
372
+ )
373
+
374
+ raw_datasets_features = list(next(iter(raw_datasets.values())).features.keys())
375
+
376
+ if data_args.audio_column_name not in raw_datasets_features:
377
+ raise ValueError(
378
+ f"--audio_column_name '{data_args.audio_column_name}' not found in dataset '{data_args.dataset_name}'. "
379
+ "Make sure to set `--audio_column_name` to the correct audio column - one of "
380
+ f"{', '.join(raw_datasets_features)}."
381
+ )
382
+
383
+ if data_args.text_column_name not in raw_datasets_features:
384
+ raise ValueError(
385
+ f"--text_column_name {data_args.text_column_name} not found in dataset '{data_args.dataset_name}'. "
386
+ "Make sure to set `--text_column_name` to the correct text column - one of "
387
+ f"{', '.join(raw_datasets_features)}."
388
+ )
389
+
390
+ # 5. Load pretrained model, tokenizer, and feature extractor
391
+ #
392
+ # Distributed training:
393
+ # The .from_pretrained methods guarantee that only one local process can concurrently
394
+ config = AutoConfig.from_pretrained(
395
+ model_args.config_name if model_args.config_name else model_args.model_name_or_path,
396
+ cache_dir=model_args.cache_dir,
397
+ revision=model_args.model_revision,
398
+ use_auth_token=True if model_args.use_auth_token else None,
399
+ )
400
+
401
+ config.update({"forced_decoder_ids": model_args.forced_decoder_ids, "suppress_tokens": model_args.suppress_tokens})
402
+
403
+ if training_args.gradient_checkpointing:
404
+ config.update({"use_cache": False})
405
+
406
+ feature_extractor = AutoFeatureExtractor.from_pretrained(
407
+ model_args.feature_extractor_name if model_args.feature_extractor_name else model_args.model_name_or_path,
408
+ cache_dir=model_args.cache_dir,
409
+ revision=model_args.model_revision,
410
+ use_auth_token=True if model_args.use_auth_token else None,
411
+ )
412
+ tokenizer = AutoTokenizer.from_pretrained(
413
+ model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path,
414
+ cache_dir=model_args.cache_dir,
415
+ use_fast=model_args.use_fast_tokenizer,
416
+ revision=model_args.model_revision,
417
+ use_auth_token=True if model_args.use_auth_token else None,
418
+ )
419
+ model = AutoModelForSpeechSeq2Seq.from_pretrained(
420
+ model_args.model_name_or_path,
421
+ config=config,
422
+ cache_dir=model_args.cache_dir,
423
+ revision=model_args.model_revision,
424
+ use_auth_token=True if model_args.use_auth_token else None,
425
+ )
426
+
427
+ if model.config.decoder_start_token_id is None:
428
+ raise ValueError("Make sure that `config.decoder_start_token_id` is correctly defined")
429
+
430
+ if model_args.freeze_feature_encoder:
431
+ model.freeze_feature_encoder()
432
+
433
+ if model_args.freeze_encoder:
434
+ model.freeze_encoder()
435
+
436
+ if data_args.language is not None:
437
+ # We only need to set the task id when the language is specified (i.e. in a multilingual setting)
438
+ tokenizer.set_prefix_tokens(language=data_args.language, task=data_args.task)
439
+
440
+ # 6. Resample speech dataset if necessary
441
+ dataset_sampling_rate = next(iter(raw_datasets.values())).features[data_args.audio_column_name].sampling_rate
442
+ if dataset_sampling_rate != feature_extractor.sampling_rate:
443
+ raw_datasets = raw_datasets.cast_column(
444
+ data_args.audio_column_name, datasets.features.Audio(sampling_rate=feature_extractor.sampling_rate)
445
+ )
446
+
447
+ # 7. Preprocessing the datasets.
448
+ # We need to read the audio files as arrays and tokenize the targets.
449
+ max_input_length = data_args.max_duration_in_seconds * feature_extractor.sampling_rate
450
+ min_input_length = data_args.min_duration_in_seconds * feature_extractor.sampling_rate
451
+ audio_column_name = data_args.audio_column_name
452
+ text_column_name = data_args.text_column_name
453
+ model_input_name = feature_extractor.model_input_names[0]
454
+ do_lower_case = data_args.do_lower_case
455
+ do_remove_punctuation = data_args.do_remove_punctuation
456
+ normalizer = BasicTextNormalizer() # 'official' text normalizer from OpenAI
457
+
458
+ if data_args.max_train_samples is not None:
459
+ raw_datasets["train"] = (
460
+ raw_datasets["train"].take(data_args.max_train_samples)
461
+ if data_args.streaming
462
+ else raw_datasets["train"].select(range(data_args.max_train_samples))
463
+ )
464
+
465
+ if data_args.max_eval_samples is not None:
466
+ raw_datasets["eval"] = (
467
+ raw_datasets["eval"].take(data_args.max_eval_samples)
468
+ if data_args.streaming
469
+ else raw_datasets["eval"].select(range(data_args.max_eval_samples))
470
+ )
471
+
472
+ def prepare_dataset(batch):
473
+ # process audio
474
+ sample = batch[audio_column_name]
475
+ inputs = feature_extractor(sample["array"], sampling_rate=sample["sampling_rate"])
476
+ # process audio length
477
+ batch[model_input_name] = inputs.get(model_input_name)[0]
478
+ batch["input_length"] = len(sample["array"])
479
+
480
+ # process targets
481
+ input_str = batch[text_column_name].lower() if do_lower_case else batch[text_column_name]
482
+ if do_remove_punctuation:
483
+ input_str = normalizer(input_str).strip()
484
+ batch["labels"] = tokenizer(input_str).input_ids
485
+ return batch
486
+
487
+ def prepare_dataset_bayar(batch):
488
+ MAX_LENGTH = 225
489
+ MAX_AUDIO_DURATION = 30
490
+ sample = batch[audio_column_name][0]
491
+ DEFAULT_SAMPLING_RATE = sample['sampling_rate']
492
+
493
+ bs = len(batch[text_column_name])
494
+ result = {'input_features': [], 'labels': []}
495
+ list_arr, list_text, total = [], [], 0
496
+ for i in range(bs + 1):
497
+ if i == bs or total + batch[audio_column_name][i]['array'].shape[0] / DEFAULT_SAMPLING_RATE > MAX_AUDIO_DURATION:
498
+ if total == 0: continue # because it could be evenly distributed when i == bs
499
+ tokens = tokenizer((' '.join(list_text))).input_ids
500
+ if len(tokens) > MAX_LENGTH: continue # too long -> might mislead to not-aligning problem
501
+
502
+ result['input_features'].append(feature_extractor(np.concatenate(list_arr), sampling_rate=DEFAULT_SAMPLING_RATE).input_features[0])
503
+ result['labels'].append(tokens)
504
+ # result['input_length'].append(len(np.concatenate(list_arr)))
505
+ list_arr, list_text, total = [], [], 0
506
+ if i < bs:
507
+ duration = batch[audio_column_name][i]['array'].shape[0] / DEFAULT_SAMPLING_RATE
508
+ if duration > MAX_AUDIO_DURATION: continue
509
+ total += duration
510
+ list_arr.append(batch[audio_column_name][i]['array'])
511
+ list_text.append(batch[text_column_name][i])
512
+ return result
513
+
514
+ def prepare_dataset_concatenated(batch):
515
+ """
516
+ - ignoring do_lower_case, do_remove_punctuation
517
+ batch on input
518
+ result = {
519
+ 'model_input_name': ,
520
+ 'input_length': ,
521
+ 'texts': [],
522
+ 'labels': []
523
+ }"""
524
+ MAX_AUDIO_DURATION = 30
525
+ result = {
526
+ model_input_name: [],
527
+ 'input_features': [],
528
+ 'labels': []
529
+ }
530
+
531
+ buffer = {
532
+ 'duration': 0,
533
+ 'array': [],
534
+ 'text': []
535
+ }
536
+ sample = batch[audio_column_name]
537
+ batch_size = len(batch[text_column_name])
538
+ for i in range(batch_size):
539
+ audio_array = batch[audio_column_name][i]['array']
540
+ text = batch[text_column_name][i]
541
+ audio_duration = audio_array.shape[0] / sample['sampling_rate']
542
+ if buffer['duration'] + audio_duration < MAX_AUDIO_DURATION:
543
+ buffer['array'].append(audio_array)
544
+ buffer['text'].append(text)
545
+ buffer['duration'] += audio_duration
546
+ else:
547
+ tokens = tokenizer(' '.join(buffer['text'])).input_ids
548
+ result['labels'].append(tokens)
549
+ feature = feature_extractor(np.concatenate(buffer['array']), sampling_rate=sample['sampling_rate']).input_features[0]
550
+ result['input_features'].append(feature)
551
+ # result[model_input_name].append(feature.get(model_input_name)[0])
552
+ buffer = {
553
+ 'duration': 0,
554
+ 'array': [],
555
+ 'text': []
556
+ }
557
+
558
+ # flush buffer
559
+ if len(buffer['array']):
560
+ tokens = tokenizer(' '.join(buffer['text'])).input_ids
561
+ result['labels'].append(tokens)
562
+ feature = feature_extractor(np.concatenate(buffer['array']), sampling_rate=sample['sampling_rate']).input_features[0]
563
+ result['input_features'].append(feature)
564
+ # result[model_input_name].append(feature.get(model_input_name)[0])
565
+
566
+ return result
567
+
568
+
569
+ with training_args.main_process_first(desc="dataset map pre-processing"):
570
+ vectorized_datasets = raw_datasets.map(
571
+ prepare_dataset_bayar,
572
+ batched=True,
573
+ batch_size=64,
574
+ remove_columns=raw_datasets_features,
575
+ ).with_format("torch")
576
+
577
+ if training_args.do_train and data_args.streaming:
578
+ # manually shuffle if streaming (done by the trainer for non-streaming)
579
+ vectorized_datasets["train"] = vectorized_datasets["train"].shuffle(
580
+ buffer_size=data_args.shuffle_buffer_size,
581
+ seed=training_args.seed,
582
+ )
583
+
584
+ # filter training data that is shorter than min_input_length or longer than
585
+ # max_input_length
586
+ def is_audio_in_length_range(length):
587
+ return min_input_length < length < max_input_length
588
+
589
+
590
+ # 8. Load Metric
591
+ metric = evaluate.load("wer")
592
+ do_normalize_eval = data_args.do_normalize_eval
593
+
594
+ def compute_metrics(pred):
595
+ pred_ids = pred.predictions
596
+
597
+ pred.label_ids[pred.label_ids == -100] = tokenizer.pad_token_id
598
+
599
+ pred_str = tokenizer.batch_decode(pred_ids, skip_special_tokens=True)
600
+ # we do not want to group tokens when computing the metrics
601
+ label_str = tokenizer.batch_decode(pred.label_ids, skip_special_tokens=True)
602
+
603
+ if do_normalize_eval:
604
+ pred_str = [normalizer(pred) for pred in pred_str]
605
+ label_str = [normalizer(label) for label in label_str]
606
+ # filtering step to only evaluate the samples that correspond to non-zero references:
607
+ pred_str = [pred_str[i] for i in range(len(pred_str)) if len(label_str[i]) > 0]
608
+ label_str = [label_str[i] for i in range(len(label_str)) if len(label_str[i]) > 0]
609
+
610
+ wer = 100 * metric.compute(predictions=pred_str, references=label_str)
611
+
612
+ return {"wer": wer}
613
+
614
+ # 9. Create a single speech processor
615
+ if is_main_process(training_args.local_rank):
616
+ # save feature extractor, tokenizer and config
617
+ feature_extractor.save_pretrained(training_args.output_dir)
618
+ tokenizer.save_pretrained(training_args.output_dir)
619
+ config.save_pretrained(training_args.output_dir)
620
+
621
+ processor = AutoProcessor.from_pretrained(training_args.output_dir)
622
+
623
+ # 10. Define data collator
624
+ data_collator = DataCollatorSpeechSeq2SeqWithPadding(
625
+ processor=processor,
626
+ decoder_start_token_id=model.config.decoder_start_token_id,
627
+ )
628
+
629
+ # 11. Configure Trainer
630
+ # Trainer callback to reinitialise and reshuffle the streamable datasets at the beginning of each epoch
631
+ # Only required for streaming: Trainer automatically shuffles non-streaming datasets
632
+ class ShuffleCallback(TrainerCallback):
633
+ def on_epoch_begin(self, args, state, control, train_dataloader, **kwargs):
634
+ if isinstance(train_dataloader.dataset, IterableDatasetShard):
635
+ pass # set_epoch() is handled by the Trainer
636
+ elif isinstance(train_dataloader.dataset, IterableDataset):
637
+ train_dataloader.dataset.set_epoch(train_dataloader.dataset._epoch + 1)
638
+
639
+ # Initialize Trainer
640
+ trainer = Seq2SeqTrainer(
641
+ model=model,
642
+ args=training_args,
643
+ train_dataset=vectorized_datasets["train"] if training_args.do_train else None,
644
+ eval_dataset=vectorized_datasets["eval"] if training_args.do_eval else None,
645
+ tokenizer=feature_extractor,
646
+ data_collator=data_collator,
647
+ compute_metrics=compute_metrics if training_args.predict_with_generate else None,
648
+ callbacks=[ShuffleCallback()] if data_args.streaming else None,
649
+ )
650
+
651
+ # 12. Training
652
+ if training_args.do_train:
653
+ checkpoint = None
654
+ if training_args.resume_from_checkpoint is not None:
655
+ checkpoint = training_args.resume_from_checkpoint
656
+ elif last_checkpoint is not None:
657
+ checkpoint = last_checkpoint
658
+ train_result = trainer.train(resume_from_checkpoint=checkpoint)
659
+ trainer.save_model() # Saves the feature extractor too for easy upload
660
+
661
+ metrics = train_result.metrics
662
+ if data_args.max_train_samples:
663
+ metrics["train_samples"] = data_args.max_train_samples
664
+ trainer.log_metrics("train", metrics)
665
+ trainer.save_metrics("train", metrics)
666
+ trainer.save_state()
667
+
668
+ # 13. Evaluation
669
+ results = {}
670
+ if training_args.do_eval:
671
+ logger.info("*** Evaluate ***")
672
+ metrics = trainer.evaluate(
673
+ metric_key_prefix="eval",
674
+ max_length=training_args.generation_max_length,
675
+ num_beams=training_args.generation_num_beams,
676
+ )
677
+ if data_args.max_eval_samples:
678
+ metrics["eval_samples"] = data_args.max_eval_samples
679
+
680
+ trainer.log_metrics("eval", metrics)
681
+ trainer.save_metrics("eval", metrics)
682
+
683
+ # 14. Write Training Stats
684
+ kwargs = {
685
+ "finetuned_from": model_args.model_name_or_path,
686
+ "tasks": "automatic-speech-recognition",
687
+ "tags": "whisper-event",
688
+ }
689
+ if data_args.dataset_name is not None:
690
+ kwargs["dataset_tags"] = data_args.dataset_name
691
+ if data_args.dataset_config_name is not None:
692
+ kwargs["dataset"] = f"{data_args.dataset_name} {data_args.dataset_config_name}"
693
+ else:
694
+ kwargs["dataset"] = data_args.dataset_name
695
+ if "common_voice" in data_args.dataset_name:
696
+ kwargs["language"] = data_args.dataset_config_name.split('-')[0]
697
+ if model_args.model_index_name is not None:
698
+ kwargs["model_name"] = model_args.model_index_name
699
+
700
+ if training_args.push_to_hub:
701
+ trainer.push_to_hub(**kwargs)
702
+ else:
703
+ trainer.create_model_card(**kwargs)
704
+
705
+ return results
706
+
707
+
708
+ if __name__ == "__main__":
709
+ main()
run.sh CHANGED
@@ -1,13 +1,13 @@
1
- deepspeed run_speech_recognition_seq2seq_streaming-farsipal.py \
2
  --deepspeed="ds_config.json" \
3
  --model_name_or_path="emilios/whisper-medium-el" \
4
- --dataset_name="mozilla-foundation/common_voice_11_0,google/fleurs,MLCommons/ml_spoken_words" \
5
- --dataset_config_name="el,el_gr,el_wav" \
6
  --language="greek" \
7
- --train_split_name="train+validation,train+validation,train+validation" \
8
- --eval_split_name="test,-,-" \
9
  --model_index_name="Whisper medium Greek El Greco" \
10
- --text_column_name="sentence,transcription,keyword" \
11
  --torch_compile="True" \
12
  --torch_compile_mode="reduce-overhead" \
13
  --torch_compile_mode="max-autotune" \
@@ -39,7 +39,7 @@ deepspeed run_speech_recognition_seq2seq_streaming-farsipal.py \
39
  --do_eval \
40
  --predict_with_generate \
41
  --do_normalize_eval \
42
- --streaming="True" \
43
  --use_auth_token \
44
  --push_to_hub
45
 
 
1
+ deepspeed run-ba.py \
2
  --deepspeed="ds_config.json" \
3
  --model_name_or_path="emilios/whisper-medium-el" \
4
+ --dataset_name="MLCommons/ml_spoken_words" \
5
+ --dataset_config_name="el_wav" \
6
  --language="greek" \
7
+ --train_split_name="train+validation" \
8
+ --eval_split_name="test" \
9
  --model_index_name="Whisper medium Greek El Greco" \
10
+ --text_column_name="keyword" \
11
  --torch_compile="True" \
12
  --torch_compile_mode="reduce-overhead" \
13
  --torch_compile_mode="max-autotune" \
 
39
  --do_eval \
40
  --predict_with_generate \
41
  --do_normalize_eval \
42
+ --streaming="false" \
43
  --use_auth_token \
44
  --push_to_hub
45
 
run.sh-ml ADDED
@@ -0,0 +1,45 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ deepspeed run_speech_recognition_seq2seq_streaming-farsipal.py \
2
+ --deepspeed="ds_config.json" \
3
+ --model_name_or_path="emilios/whisper-medium-el" \
4
+ --dataset_name="MLCommons/ml_spoken_words" \
5
+ --dataset_config_name="el_wav" \
6
+ --language="greek" \
7
+ --train_split_name="train+validation" \
8
+ --eval_split_name="test" \
9
+ --model_index_name="Whisper medium Greek El Greco" \
10
+ --text_column_name="keyword" \
11
+ --torch_compile="True" \
12
+ --torch_compile_mode="reduce-overhead" \
13
+ --torch_compile_mode="max-autotune" \
14
+ --logging_steps="25" \
15
+ --learning_rate="1e-5" \
16
+ --max_steps="5000" \
17
+ --output_dir="./" \
18
+ --per_device_train_batch_size="32" \
19
+ --gradient_accumulation_steps="1" \
20
+ --per_device_eval_batch_size="16" \
21
+ --seed="42" \
22
+ --warmup_steps="500" \
23
+ --evaluation_strategy="steps" \
24
+ --eval_steps="1000" \
25
+ --save_strategy="steps" \
26
+ --save_steps="1000" \
27
+ --generation_max_length="225" \
28
+ --length_column_name="input_length" \
29
+ --max_duration_in_seconds="30" \
30
+ --freeze_feature_encoder="False" \
31
+ --report_to="tensorboard" \
32
+ --metric_for_best_model="wer" \
33
+ --greater_is_better="False" \
34
+ --load_best_model_at_end \
35
+ --gradient_checkpointing \
36
+ --fp16 \
37
+ --overwrite_output_dir \
38
+ --do_train \
39
+ --do_eval \
40
+ --predict_with_generate \
41
+ --do_normalize_eval \
42
+ --streaming="false" \
43
+ --use_auth_token \
44
+ --push_to_hub
45
+
run_speech_recognition_seq2seq_streaming-farsipal.py CHANGED
@@ -626,7 +626,8 @@ def main():
626
  vectorized_datasets = raw_datasets.map(
627
  prepare_dataset,
628
  remove_columns=raw_datasets_features,
629
- num_proc=training_args.dataloader_num_workers if training_args.dataloader_num_workers else 1
 
630
  ).with_format("torch")
631
 
632
  if training_args.do_train and data_args.streaming:
 
626
  vectorized_datasets = raw_datasets.map(
627
  prepare_dataset,
628
  remove_columns=raw_datasets_features,
629
+ num_proc=1
630
+ #training_args.dataloader_num_workers if training_args.dataloader_num_workers else 1
631
  ).with_format("torch")
632
 
633
  if training_args.do_train and data_args.streaming:
runs/Dec21_15-10-30_129-146-176-120/1671635623.0396266/events.out.tfevents.1671635623.129-146-176-120.336202.1 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:7e637a709c0a92f986e621b2cf0a18316cfd6c4ad09a5058e2b055cf33f9a33a
3
+ size 5893
runs/Dec21_15-10-30_129-146-176-120/events.out.tfevents.1671635623.129-146-176-120.336202.0 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:4e051d9ddb006f0759a1395e84814c14869d7e431a8f70be36267b6e66fa2f55
3
+ size 4454
runs/Dec21_15-23-45_129-146-176-120/1671638237.3718066/events.out.tfevents.1671638237.129-146-176-120.404327.1 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:e6269df941ed5de5bc43fa52f035a4028a154e7ad3f3d722aaf526fed2715cdc
3
+ size 5893
runs/Dec21_15-23-45_129-146-176-120/events.out.tfevents.1671638237.129-146-176-120.404327.0 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:061ab622ea6ceb87b32eeb4da76a26a6869aa0fc859857bbfc23f1a211bd0a53
3
+ size 6483
runs/Dec21_16-27-53_129-146-176-120/1671640138.7645435/events.out.tfevents.1671640138.129-146-176-120.405220.1 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:59a79bd71c0e7d62b4a046c0a400971f2ac8593db92f494759a7ca6f07a6f2db
3
+ size 5893
runs/Dec21_16-27-53_129-146-176-120/events.out.tfevents.1671640138.129-146-176-120.405220.0 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:797ce9ce2e24095627d5c6440c64d62b8158d481fae06ae23c654ded7e67f093
3
+ size 10883
training_args.bin CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:6da1c9c120402e04d595fed3d17b52b3cda01b8dca371d611d7c3182cb9aa3ce
3
  size 4731
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:09c68b7169b60f8c66aaf3de7076f81ce4ddd82e92e374499f1d9e4886a849c1
3
  size 4731