Ranjit commited on
Commit
62b8e70
1 Parent(s): 165f6de

Delete run_speech_recognition_seq2seq_streaming.py

Browse files
run_speech_recognition_seq2seq_streaming.py DELETED
@@ -1,630 +0,0 @@
1
- #!/usr/bin/env python
2
- # coding=utf-8
3
- # Copyright 2022 The HuggingFace Team. All rights reserved.
4
- #
5
- # Licensed under the Apache License, Version 2.0 (the "License");
6
- # you may not use this file except in compliance with the License.
7
- # You may obtain a copy of the License at
8
- #
9
- # http://www.apache.org/licenses/LICENSE-2.0
10
- #
11
- # Unless required by applicable law or agreed to in writing, software
12
- # distributed under the License is distributed on an "AS IS" BASIS,
13
- # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14
- # See the License for the specific language governing permissions and
15
- # limitations under the License.
16
- """
17
- Fine-tuning the library models for sequence to sequence speech recognition
18
- with 🤗 Datasets' streaming mode.
19
- """
20
- # You can also adapt this script for your own sequence to sequence speech
21
- # recognition task. Pointers for this are left as comments.
22
-
23
- import logging
24
- import os
25
- import sys
26
- from dataclasses import dataclass, field
27
- from typing import Any, Dict, List, Optional, Union
28
-
29
- import datasets
30
- import torch
31
- from datasets import DatasetDict, IterableDatasetDict, interleave_datasets, load_dataset
32
- from torch.utils.data import IterableDataset
33
-
34
- import evaluate
35
- import transformers
36
- from transformers import (
37
- AutoConfig,
38
- AutoFeatureExtractor,
39
- AutoModelForSpeechSeq2Seq,
40
- AutoProcessor,
41
- AutoTokenizer,
42
- HfArgumentParser,
43
- Seq2SeqTrainer,
44
- Seq2SeqTrainingArguments,
45
- TrainerCallback,
46
- set_seed,
47
- )
48
- from transformers.models.whisper.english_normalizer import BasicTextNormalizer
49
- from transformers.trainer_pt_utils import IterableDatasetShard
50
- from transformers.trainer_utils import get_last_checkpoint, is_main_process
51
- from transformers.utils import check_min_version, send_example_telemetry
52
- from transformers.utils.versions import require_version
53
-
54
-
55
- # Will error if the minimal version of Transformers is not installed. Remove at your own risks.
56
- check_min_version("4.25.0.dev0")
57
-
58
- require_version("datasets>=1.18.2", "To fix: pip install -r examples/pytorch/speech-recognition/requirements.txt")
59
-
60
- logger = logging.getLogger(__name__)
61
-
62
-
63
- @dataclass
64
- class ModelArguments:
65
- """
66
- Arguments pertaining to which model/config/tokenizer we are going to fine-tune from.
67
- """
68
-
69
- model_name_or_path: str = field(
70
- metadata={"help": "Path to pretrained model or model identifier from huggingface.co/models"}
71
- )
72
- config_name: Optional[str] = field(
73
- default=None, metadata={"help": "Pretrained config name or path if not the same as model_name"}
74
- )
75
- tokenizer_name: Optional[str] = field(
76
- default=None, metadata={"help": "Pretrained tokenizer name or path if not the same as model_name"}
77
- )
78
- feature_extractor_name: Optional[str] = field(
79
- default=None, metadata={"help": "feature extractor name or path if not the same as model_name"}
80
- )
81
- cache_dir: Optional[str] = field(
82
- default=None,
83
- metadata={"help": "Where to store the pretrained models downloaded from huggingface.co"},
84
- )
85
- use_fast_tokenizer: bool = field(
86
- default=True,
87
- metadata={"help": "Whether to use one of the fast tokenizer (backed by the tokenizers library) or not."},
88
- )
89
- model_revision: str = field(
90
- default="main",
91
- metadata={"help": "The specific model version to use (can be a branch name, tag name or commit id)."},
92
- )
93
- use_auth_token: bool = field(
94
- default=False,
95
- metadata={
96
- "help": (
97
- "Will use the token generated when running `huggingface-cli login` (necessary to use this script "
98
- "with private models)."
99
- )
100
- },
101
- )
102
- freeze_feature_encoder: bool = field(
103
- default=True, metadata={"help": "Whether to freeze the feature encoder layers of the model."}
104
- )
105
- freeze_encoder: bool = field(
106
- default=False, metadata={"help": "Whether to freeze the entire encoder of the seq2seq model."}
107
- )
108
- forced_decoder_ids: List[List[int]] = field(
109
- default=None,
110
- metadata={
111
- "help": (
112
- "A list of pairs of integers which indicates a mapping from generation indices to token indices "
113
- "that will be forced before sampling. For example, [[0, 123]] means the first generated token "
114
- "will always be a token of index 123."
115
- )
116
- },
117
- )
118
- suppress_tokens: List[int] = field(
119
- default=None, metadata={"help": "A list of tokens that will be suppressed at generation."}
120
- )
121
- model_index_name: str = field(default=None, metadata={"help": "Pretty name for the model card."})
122
-
123
-
124
- @dataclass
125
- class DataTrainingArguments:
126
- """
127
- Arguments pertaining to what data we are going to input our model for training and eval.
128
- """
129
-
130
- dataset_name: str = field(
131
- default=None, metadata={"help": "The name of the dataset to use (via the datasets library)."}
132
- )
133
- dataset_config_name: Optional[str] = field(
134
- default=None, metadata={"help": "The configuration name of the dataset to use (via the datasets library)."}
135
- )
136
- text_column: Optional[str] = field(
137
- default=None,
138
- metadata={"help": "The name of the column in the datasets containing the full texts (for summarization)."},
139
- )
140
- max_train_samples: Optional[int] = field(
141
- default=None,
142
- metadata={
143
- "help": (
144
- "For debugging purposes or quicker training, truncate the number of training examples to this "
145
- "value if set."
146
- )
147
- },
148
- )
149
- max_eval_samples: Optional[int] = field(
150
- default=None,
151
- metadata={
152
- "help": (
153
- "For debugging purposes or quicker training, truncate the number of evaluation examples to this "
154
- "value if set."
155
- )
156
- },
157
- )
158
- audio_column_name: str = field(
159
- default="audio",
160
- metadata={"help": "The name of the dataset column containing the audio data. Defaults to 'audio'"},
161
- )
162
- text_column_name: str = field(
163
- default="text",
164
- metadata={"help": "The name of the dataset column containing the text data. Defaults to 'text'"},
165
- )
166
- max_duration_in_seconds: float = field(
167
- default=20.0,
168
- metadata={
169
- "help": (
170
- "Truncate audio files that are longer than `max_duration_in_seconds` seconds to"
171
- " 'max_duration_in_seconds`"
172
- )
173
- },
174
- )
175
- min_duration_in_seconds: float = field(
176
- default=0.0, metadata={"help": "Filter audio files that are shorter than `min_duration_in_seconds` seconds"}
177
- )
178
- train_split_name: str = field(
179
- default="train",
180
- metadata={
181
- "help": "The name of the training data set split to use (via the datasets library). Defaults to 'train'"
182
- },
183
- )
184
- eval_split_name: str = field(
185
- default="test",
186
- metadata={
187
- "help": "The name of the training data set split to use (via the datasets library). Defaults to 'train'"
188
- },
189
- )
190
- do_lower_case: bool = field(
191
- default=False,
192
- metadata={"help": "Whether the target text should be lower cased."},
193
- )
194
- do_remove_punctuation: bool = field(
195
- default=False,
196
- metadata={"help": "Whether the target text should be striped of punctuation."},
197
- )
198
- do_normalize_eval: bool = field(
199
- default=True,
200
- metadata={"help": "Whether to normalise the references and predictions in the eval WER calculation."},
201
- )
202
- language: str = field(
203
- default=None,
204
- metadata={
205
- "help": (
206
- "Language for multilingual fine-tuning. This argument should be set for multilingual fine-tuning "
207
- "only. For English speech recognition, it should be set to `None`."
208
- )
209
- },
210
- )
211
- task: str = field(
212
- default="transcribe",
213
- metadata={"help": "Task, either `transcribe` for speech recognition or `translate` for speech translation."},
214
- )
215
- shuffle_buffer_size: Optional[int] = field(
216
- default=500,
217
- metadata={
218
- "help": (
219
- "The number of streamed examples to download before shuffling them. The large the buffer, "
220
- "the closer it is to real offline shuffling."
221
- )
222
- },
223
- )
224
- streaming: bool = field(
225
- default=True,
226
- metadata={"help": "Whether to use streaming mode to load and pre-process the data."},
227
- )
228
-
229
-
230
- @dataclass
231
- class DataCollatorSpeechSeq2SeqWithPadding:
232
- """
233
- Data collator that will dynamically pad the inputs received.
234
- Args:
235
- processor ([`WhisperProcessor`])
236
- The processor used for processing the data.
237
- decoder_start_token_id (`int`)
238
- The begin-of-sentence of the decoder.
239
- """
240
-
241
- processor: Any
242
- decoder_start_token_id: int
243
-
244
- def __call__(self, features: List[Dict[str, Union[List[int], torch.Tensor]]]) -> Dict[str, torch.Tensor]:
245
- # split inputs and labels since they have to be of different lengths and need
246
- # different padding methods
247
- model_input_name = self.processor.model_input_names[0]
248
- input_features = [{model_input_name: feature[model_input_name]} for feature in features]
249
- label_features = [{"input_ids": feature["labels"]} for feature in features]
250
-
251
- batch = self.processor.feature_extractor.pad(input_features, return_tensors="pt")
252
-
253
- labels_batch = self.processor.tokenizer.pad(label_features, return_tensors="pt")
254
-
255
- # replace padding with -100 to ignore loss correctly
256
- labels = labels_batch["input_ids"].masked_fill(labels_batch.attention_mask.ne(1), -100)
257
-
258
- # if bos token is appended in previous tokenization step,
259
- # cut bos token here as it's append later anyways
260
- if (labels[:, 0] == self.decoder_start_token_id).all().cpu().item():
261
- labels = labels[:, 1:]
262
-
263
- batch["labels"] = labels
264
-
265
- return batch
266
-
267
-
268
- def load_maybe_streaming_dataset(dataset_name, dataset_config_name, split="train", streaming=True, **kwargs):
269
- """
270
- Utility function to load a dataset in streaming mode. For datasets with multiple splits,
271
- each split is loaded individually and then splits combined by taking alternating examples from
272
- each (interleaving).
273
- """
274
- if "+" in split:
275
- # load multiple splits separated by the `+` symbol with streaming mode
276
- dataset_splits = [
277
- load_dataset(dataset_name, dataset_config_name, split=split_name, streaming=streaming, **kwargs)
278
- for split_name in split.split("+")
279
- ]
280
- # interleave multiple splits to form one dataset
281
- interleaved_dataset = interleave_datasets(dataset_splits)
282
- return interleaved_dataset
283
- else:
284
- # load a single split *with* streaming mode
285
- dataset = load_dataset(dataset_name, dataset_config_name, split=split, streaming=streaming, **kwargs)
286
- return dataset
287
-
288
-
289
- def main():
290
- # 1. Parse input arguments
291
- # See all possible arguments in src/transformers/training_args.py
292
- # or by passing the --help flag to this script.
293
- # We now keep distinct sets of args, for a cleaner separation of concerns.
294
- parser = HfArgumentParser((ModelArguments, DataTrainingArguments, Seq2SeqTrainingArguments))
295
-
296
- if len(sys.argv) == 2 and sys.argv[1].endswith(".json"):
297
- # If we pass only one argument to the script and it's the path to a json file,
298
- # let's parse it to get our arguments.
299
- model_args, data_args, training_args = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1]))
300
- else:
301
- model_args, data_args, training_args = parser.parse_args_into_dataclasses()
302
-
303
- # Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The
304
- # information sent is the one passed as arguments along with your Python/PyTorch versions.
305
- send_example_telemetry("run_speech_recognition_seq2seq_streaming", model_args, data_args)
306
-
307
- # 2. Setup logging
308
- logging.basicConfig(
309
- format="%(asctime)s - %(levelname)s - %(name)s - %(message)s",
310
- datefmt="%m/%d/%Y %H:%M:%S",
311
- handlers=[logging.StreamHandler(sys.stdout)],
312
- )
313
- log_level = training_args.get_process_log_level()
314
- logger.setLevel(log_level)
315
- datasets.utils.logging.set_verbosity(log_level)
316
- transformers.utils.logging.set_verbosity(log_level)
317
- transformers.utils.logging.enable_default_handler()
318
- transformers.utils.logging.enable_explicit_format()
319
-
320
- logger.setLevel(logging.INFO if is_main_process(training_args.local_rank) else logging.WARN)
321
-
322
- # Log on each process the small summary:
323
- logger.warning(
324
- f"Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}"
325
- f"distributed training: {bool(training_args.local_rank != -1)}, 16-bits training: {training_args.fp16}"
326
- )
327
- logger.info(f"Training/evaluation parameters {training_args}")
328
-
329
- # Set the verbosity to info of the Transformers logger (on main process only):
330
- if is_main_process(training_args.local_rank):
331
- transformers.utils.logging.set_verbosity_info()
332
- logger.info("Training/evaluation parameters %s", training_args)
333
-
334
- # 3. Detecting last checkpoint and eventually continue from last checkpoint
335
- last_checkpoint = None
336
- if os.path.isdir(training_args.output_dir) and training_args.do_train and not training_args.overwrite_output_dir:
337
- last_checkpoint = get_last_checkpoint(training_args.output_dir)
338
- if last_checkpoint is None and len(os.listdir(training_args.output_dir)) > 0:
339
- raise ValueError(
340
- f"Output directory ({training_args.output_dir}) already exists and is not empty. "
341
- "Use --overwrite_output_dir to overcome."
342
- )
343
- elif last_checkpoint is not None and training_args.resume_from_checkpoint is None:
344
- logger.info(
345
- f"Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change "
346
- "the `--output_dir` or add `--overwrite_output_dir` to train from scratch."
347
- )
348
-
349
- # Set seed before initializing model.
350
- set_seed(training_args.seed)
351
-
352
- # 4. Load dataset
353
- raw_datasets = IterableDatasetDict() if data_args.streaming else DatasetDict()
354
-
355
- if training_args.do_train:
356
- raw_datasets["train"] = load_maybe_streaming_dataset(
357
- data_args.dataset_name,
358
- data_args.dataset_config_name,
359
- split=data_args.train_split_name,
360
- use_auth_token=True if model_args.use_auth_token else None,
361
- streaming=data_args.streaming,
362
- )
363
-
364
- if training_args.do_eval:
365
- raw_datasets["eval"] = load_maybe_streaming_dataset(
366
- data_args.dataset_name,
367
- data_args.dataset_config_name,
368
- split=data_args.eval_split_name,
369
- use_auth_token=True if model_args.use_auth_token else None,
370
- streaming=data_args.streaming,
371
- )
372
-
373
- raw_datasets_features = list(next(iter(raw_datasets.values())).features.keys())
374
-
375
- if data_args.audio_column_name not in raw_datasets_features:
376
- raise ValueError(
377
- f"--audio_column_name '{data_args.audio_column_name}' not found in dataset '{data_args.dataset_name}'. "
378
- "Make sure to set `--audio_column_name` to the correct audio column - one of "
379
- f"{', '.join(raw_datasets_features)}."
380
- )
381
-
382
- if data_args.text_column_name not in raw_datasets_features:
383
- raise ValueError(
384
- f"--text_column_name {data_args.text_column_name} not found in dataset '{data_args.dataset_name}'. "
385
- "Make sure to set `--text_column_name` to the correct text column - one of "
386
- f"{', '.join(raw_datasets_features)}."
387
- )
388
-
389
- # 5. Load pretrained model, tokenizer, and feature extractor
390
- #
391
- # Distributed training:
392
- # The .from_pretrained methods guarantee that only one local process can concurrently
393
- config = AutoConfig.from_pretrained(
394
- model_args.config_name if model_args.config_name else model_args.model_name_or_path,
395
- cache_dir=model_args.cache_dir,
396
- revision=model_args.model_revision,
397
- use_auth_token=True if model_args.use_auth_token else None,
398
- )
399
-
400
- config.update({"forced_decoder_ids": model_args.forced_decoder_ids, "suppress_tokens": model_args.suppress_tokens})
401
-
402
- if training_args.gradient_checkpointing:
403
- config.update({"use_cache": False})
404
-
405
- feature_extractor = AutoFeatureExtractor.from_pretrained(
406
- model_args.feature_extractor_name if model_args.feature_extractor_name else model_args.model_name_or_path,
407
- cache_dir=model_args.cache_dir,
408
- revision=model_args.model_revision,
409
- use_auth_token=True if model_args.use_auth_token else None,
410
- )
411
- tokenizer = AutoTokenizer.from_pretrained(
412
- model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path,
413
- cache_dir=model_args.cache_dir,
414
- use_fast=model_args.use_fast_tokenizer,
415
- revision=model_args.model_revision,
416
- use_auth_token=True if model_args.use_auth_token else None,
417
- )
418
- model = AutoModelForSpeechSeq2Seq.from_pretrained(
419
- model_args.model_name_or_path,
420
- config=config,
421
- cache_dir=model_args.cache_dir,
422
- revision=model_args.model_revision,
423
- use_auth_token=True if model_args.use_auth_token else None,
424
- )
425
-
426
- if model.config.decoder_start_token_id is None:
427
- raise ValueError("Make sure that `config.decoder_start_token_id` is correctly defined")
428
-
429
- if model_args.freeze_feature_encoder:
430
- model.freeze_feature_encoder()
431
-
432
- if model_args.freeze_encoder:
433
- model.freeze_encoder()
434
-
435
- if data_args.language is not None:
436
- # We only need to set the task id when the language is specified (i.e. in a multilingual setting)
437
- tokenizer.set_prefix_tokens(language=data_args.language, task=data_args.task)
438
-
439
- # 6. Resample speech dataset if necessary
440
- dataset_sampling_rate = next(iter(raw_datasets.values())).features[data_args.audio_column_name].sampling_rate
441
- if dataset_sampling_rate != feature_extractor.sampling_rate:
442
- raw_datasets = raw_datasets.cast_column(
443
- data_args.audio_column_name, datasets.features.Audio(sampling_rate=feature_extractor.sampling_rate)
444
- )
445
-
446
- # 7. Preprocessing the datasets.
447
- # We need to read the audio files as arrays and tokenize the targets.
448
- max_input_length = data_args.max_duration_in_seconds * feature_extractor.sampling_rate
449
- min_input_length = data_args.min_duration_in_seconds * feature_extractor.sampling_rate
450
- audio_column_name = data_args.audio_column_name
451
- text_column_name = data_args.text_column_name
452
- model_input_name = feature_extractor.model_input_names[0]
453
- do_lower_case = data_args.do_lower_case
454
- do_remove_punctuation = data_args.do_remove_punctuation
455
- normalizer = BasicTextNormalizer() # 'official' text normalizer from OpenAI
456
-
457
- if data_args.max_train_samples is not None:
458
- raw_datasets["train"] = (
459
- raw_datasets["train"].take(data_args.max_train_samples)
460
- if data_args.streaming
461
- else raw_datasets["train"].select(range(data_args.max_train_samples))
462
- )
463
-
464
- if data_args.max_eval_samples is not None:
465
- raw_datasets["eval"] = (
466
- raw_datasets["eval"].take(data_args.max_eval_samples)
467
- if data_args.streaming
468
- else raw_datasets["eval"].select(range(data_args.max_eval_samples))
469
- )
470
-
471
- def prepare_dataset(batch):
472
- # process audio
473
- sample = batch[audio_column_name]
474
- inputs = feature_extractor(sample["array"], sampling_rate=sample["sampling_rate"])
475
- # process audio length
476
- batch[model_input_name] = inputs.get(model_input_name)[0]
477
- batch["input_length"] = len(sample["array"])
478
-
479
- # process targets
480
- input_str = batch[text_column_name].lower() if do_lower_case else batch[text_column_name]
481
- if do_remove_punctuation:
482
- input_str = normalizer(input_str).strip()
483
- batch["labels"] = tokenizer(input_str, max_length=448, truncation=True).input_ids
484
- #batch["labels"] = tokenizer(input_str).input_ids
485
- return batch
486
-
487
- with training_args.main_process_first(desc="dataset map pre-processing"):
488
- vectorized_datasets = raw_datasets.map(
489
- prepare_dataset,
490
- remove_columns=raw_datasets_features,
491
- ).with_format("torch")
492
-
493
- if training_args.do_train and data_args.streaming:
494
- # manually shuffle if streaming (done by the trainer for non-streaming)
495
- vectorized_datasets["train"] = vectorized_datasets["train"].shuffle(
496
- buffer_size=data_args.shuffle_buffer_size,
497
- seed=training_args.seed,
498
- )
499
-
500
- # filter training data that is shorter than min_input_length or longer than
501
- # max_input_length
502
- def is_audio_in_length_range(length):
503
- return min_input_length < length < max_input_length
504
-
505
- if training_args.do_train:
506
- vectorized_datasets["train"] = vectorized_datasets["train"].filter(
507
- is_audio_in_length_range,
508
- input_columns=["input_length"],
509
- )
510
-
511
- # 8. Load Metric
512
- metric = evaluate.load("wer")
513
- do_normalize_eval = data_args.do_normalize_eval
514
-
515
- def compute_metrics(pred):
516
- pred_ids = pred.predictions
517
-
518
- pred.label_ids[pred.label_ids == -100] = tokenizer.pad_token_id
519
-
520
- pred_str = tokenizer.batch_decode(pred_ids, skip_special_tokens=True)
521
- # we do not want to group tokens when computing the metrics
522
- label_str = tokenizer.batch_decode(pred.label_ids, skip_special_tokens=True)
523
-
524
- if do_normalize_eval:
525
- pred_str = [normalizer(pred) for pred in pred_str]
526
- label_str = [normalizer(label) for label in label_str]
527
- # filtering step to only evaluate the samples that correspond to non-zero references:
528
- pred_str = [pred_str[i] for i in range(len(pred_str)) if len(label_str[i]) > 0]
529
- label_str = [label_str[i] for i in range(len(label_str)) if len(label_str[i]) > 0]
530
-
531
- wer = 100 * metric.compute(predictions=pred_str, references=label_str)
532
-
533
- return {"wer": wer}
534
-
535
- # 9. Create a single speech processor
536
- if is_main_process(training_args.local_rank):
537
- # save feature extractor, tokenizer and config
538
- feature_extractor.save_pretrained(training_args.output_dir)
539
- tokenizer.save_pretrained(training_args.output_dir)
540
- config.save_pretrained(training_args.output_dir)
541
-
542
- processor = AutoProcessor.from_pretrained(training_args.output_dir)
543
-
544
- # 10. Define data collator
545
- data_collator = DataCollatorSpeechSeq2SeqWithPadding(
546
- processor=processor,
547
- decoder_start_token_id=model.config.decoder_start_token_id,
548
- )
549
-
550
- # 11. Configure Trainer
551
- # Trainer callback to reinitialise and reshuffle the streamable datasets at the beginning of each epoch
552
- # Only required for streaming: Trainer automatically shuffles non-streaming datasets
553
- class ShuffleCallback(TrainerCallback):
554
- def on_epoch_begin(self, args, state, control, train_dataloader, **kwargs):
555
- if isinstance(train_dataloader.dataset, IterableDatasetShard):
556
- pass # set_epoch() is handled by the Trainer
557
- elif isinstance(train_dataloader.dataset, IterableDataset):
558
- train_dataloader.dataset.set_epoch(train_dataloader.dataset._epoch + 1)
559
-
560
- # Initialize Trainer
561
- trainer = Seq2SeqTrainer(
562
- model=model,
563
- args=training_args,
564
- train_dataset=vectorized_datasets["train"] if training_args.do_train else None,
565
- eval_dataset=vectorized_datasets["eval"] if training_args.do_eval else None,
566
- tokenizer=feature_extractor,
567
- data_collator=data_collator,
568
- compute_metrics=compute_metrics if training_args.predict_with_generate else None,
569
- callbacks=[ShuffleCallback()] if data_args.streaming else None,
570
- )
571
-
572
- # 12. Training
573
- if training_args.do_train:
574
- checkpoint = None
575
- if training_args.resume_from_checkpoint is not None:
576
- checkpoint = training_args.resume_from_checkpoint
577
- elif last_checkpoint is not None:
578
- checkpoint = last_checkpoint
579
- train_result = trainer.train(resume_from_checkpoint=checkpoint)
580
- trainer.save_model() # Saves the feature extractor too for easy upload
581
-
582
- metrics = train_result.metrics
583
- if data_args.max_train_samples:
584
- metrics["train_samples"] = data_args.max_train_samples
585
- trainer.log_metrics("train", metrics)
586
- trainer.save_metrics("train", metrics)
587
- trainer.save_state()
588
-
589
- # 13. Evaluation
590
- results = {}
591
- if training_args.do_eval:
592
- logger.info("*** Evaluate ***")
593
- metrics = trainer.evaluate(
594
- metric_key_prefix="eval",
595
- max_length=training_args.generation_max_length,
596
- num_beams=training_args.generation_num_beams,
597
- )
598
- if data_args.max_eval_samples:
599
- metrics["eval_samples"] = data_args.max_eval_samples
600
-
601
- trainer.log_metrics("eval", metrics)
602
- trainer.save_metrics("eval", metrics)
603
-
604
- # 14. Write Training Stats
605
- kwargs = {
606
- "finetuned_from": model_args.model_name_or_path,
607
- "tasks": "automatic-speech-recognition",
608
- "tags": "whisper-event",
609
- }
610
- if data_args.dataset_name is not None:
611
- kwargs["dataset_tags"] = data_args.dataset_name
612
- if data_args.dataset_config_name is not None:
613
- kwargs["dataset"] = f"{data_args.dataset_name} {data_args.dataset_config_name}"
614
- else:
615
- kwargs["dataset"] = data_args.dataset_name
616
- if "common_voice" in data_args.dataset_name:
617
- kwargs["language"] = data_args.dataset_config_name.split('-')[0]
618
- if model_args.model_index_name is not None:
619
- kwargs["model_name"] = model_args.model_index_name
620
-
621
- if training_args.push_to_hub:
622
- trainer.push_to_hub(**kwargs)
623
- else:
624
- trainer.create_model_card(**kwargs)
625
-
626
- return results
627
-
628
-
629
- if __name__ == "__main__":
630
- main()