Porjaz commited on
Commit
cc94482
1 Parent(s): a62fdca

Create train.py

Browse files
Files changed (1) hide show
  1. train.py +373 -0
train.py ADDED
@@ -0,0 +1,373 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/usr/bin/env/python3
2
+
3
+ import sys
4
+ import os
5
+
6
+ import torch
7
+ from torch.utils.data import DataLoader
8
+ import torchaudio
9
+ from hyperpyyaml import load_hyperpyyaml
10
+
11
+ import speechbrain as sb
12
+ from speechbrain.utils.data_utils import undo_padding
13
+ from speechbrain.utils.distributed import if_main_process, run_on_main
14
+ import logging
15
+ from transformers import AutoTokenizer
16
+
17
+ from jiwer import wer, cer
18
+
19
+ logger = logging.getLogger(__name__)
20
+
21
+
22
+ # Define training procedure
23
+ class ASR(sb.Brain):
24
+ def compute_forward(self, batch, stage):
25
+ """Forward computations from the waveform batches to the output probabilities."""
26
+ batch = batch.to(self.device)
27
+ wavs, wav_lens = batch.sig
28
+ bos_tokens, bos_tokens_lens = batch.tokens_bos
29
+
30
+ if stage == sb.Stage.TRAIN:
31
+ wavs, self.wav_lens = self.hparams.wav_augment(wavs, wav_lens)
32
+
33
+ # We compute the padding mask and replace the values with the pad_token_id
34
+ # that the Whisper decoder expect to see.
35
+ abs_tokens_lens = (bos_tokens_lens * bos_tokens.shape[1]).long()
36
+ pad_mask = (torch.arange(abs_tokens_lens.max(), device=self.device)[None, :] < abs_tokens_lens[:, None])
37
+ bos_tokens[~pad_mask] = self.tokenizer.pad_token_id
38
+
39
+ # Forward encoder + decoder
40
+ enc_out, logits, _ = self.modules.whisper(wavs, bos_tokens)
41
+ log_probs = self.hparams.log_softmax(logits)
42
+
43
+ hyps = None
44
+ if stage == sb.Stage.VALID:
45
+ hyps, _, _, _ = self.hparams.valid_search(enc_out.detach(), wav_lens)
46
+ elif stage == sb.Stage.TEST:
47
+ hyps, _, _, _ = self.hparams.test_search(enc_out.detach(), wav_lens)
48
+
49
+ return log_probs, hyps, wav_lens
50
+
51
+ def compute_objectives(self, predictions, batch, stage):
52
+ """Computes the loss NLL given predictions and targets."""
53
+
54
+ (log_probs, hyps, wav_lens) = predictions
55
+ batch = batch.to(self.device)
56
+ ids = batch.id
57
+ tokens_eos, tokens_eos_lens = batch.tokens_eos
58
+
59
+ # Augment Labels
60
+ # if stage == sb.Stage.TRAIN and hasattr(self.hparams, "wav_augment"):
61
+ # tokens_eos = self.hparams.wav_augment.replicate_labels(tokens_eos)
62
+ # tokens_eos_lens = self.hparams.wav_augment.replicate_labels(
63
+ # tokens_eos_lens
64
+ # )
65
+
66
+ loss = self.hparams.nll_loss(log_probs, tokens_eos, length=tokens_eos_lens)
67
+
68
+ if stage != sb.Stage.TRAIN:
69
+ tokens, tokens_lens = batch.tokens
70
+
71
+ # Decode token terms to words
72
+ predicted_words = [self.tokenizer.decode(t, skip_special_tokens=True).strip() for t in hyps]
73
+
74
+ # Convert indices to words
75
+ target_words = undo_padding(tokens, tokens_lens)
76
+ target_words = self.tokenizer.batch_decode(target_words, skip_special_tokens=True)
77
+
78
+ if hasattr(self.hparams, "normalized_transcripts"):
79
+ predicted_words = [self.tokenizer.normalize(text).split(" ") for text in predicted_words]
80
+ target_words = [self.tokenizer.normalize(text).split(" ") for text in target_words]
81
+ else:
82
+ predicted_words = [text.split(" ") for text in predicted_words]
83
+ target_words = [text.split(" ") for text in target_words]
84
+
85
+ self.wer_metric.append(ids, predicted_words, target_words)
86
+ self.cer_metric.append(ids, predicted_words, target_words)
87
+
88
+ return loss
89
+
90
+ def on_stage_start(self, stage, epoch):
91
+ """Gets called at the beginning of each epoch"""
92
+ if stage != sb.Stage.TRAIN:
93
+ self.cer_metric = self.hparams.cer_computer()
94
+ self.wer_metric = self.hparams.error_rate_computer()
95
+
96
+ def on_stage_end(self, stage, stage_loss, epoch):
97
+ """Gets called at the end of an epoch."""
98
+ # Compute/store important stats
99
+ stage_stats = {"loss": stage_loss}
100
+ if stage == sb.Stage.TRAIN:
101
+ self.train_stats = stage_stats
102
+ else:
103
+ stage_stats["CER"] = self.cer_metric.summarize("error_rate")
104
+ stage_stats["WER"] = self.wer_metric.summarize("error_rate")
105
+
106
+ # Perform end-of-iteration things, like annealing, logging, etc.
107
+ if stage == sb.Stage.VALID:
108
+ lr = self.hparams.lr_annealing_whisper.current_lr
109
+ self.hparams.train_logger.log_stats(
110
+ stats_meta={"epoch": epoch, "lr": lr},
111
+ train_stats=self.train_stats,
112
+ valid_stats=stage_stats,
113
+ )
114
+ self.checkpointer.save_and_keep_only(
115
+ meta={"WER": stage_stats["WER"]},
116
+ min_keys=["WER"],
117
+ )
118
+ elif stage == sb.Stage.TEST:
119
+ self.hparams.train_logger.log_stats(
120
+ stats_meta={"Epoch loaded": self.hparams.epoch_counter.current},
121
+ test_stats=stage_stats,
122
+ )
123
+ if if_main_process():
124
+ with open(self.hparams.test_wer_file, "w") as w:
125
+ self.wer_metric.write_stats(w)
126
+
127
+ def run_inference(
128
+ self,
129
+ dataset, # Must be obtained from the dataio_function
130
+ min_key, # We load the model with the lowest error rate
131
+ loader_kwargs, # opts for the dataloading
132
+ ):
133
+
134
+ # If dataset isn't a Dataloader, we create it.
135
+ if not isinstance(dataset, DataLoader):
136
+ loader_kwargs["ckpt_prefix"] = None
137
+ dataset = self.make_dataloader(
138
+ dataset, sb.Stage.TEST, **loader_kwargs
139
+ )
140
+
141
+ self.checkpointer.recover_if_possible(min_key=min_key)
142
+ self.modules.eval() # We set the model to eval mode (remove dropout etc)
143
+
144
+ with torch.no_grad():
145
+ true_labels = []
146
+ pred_labels = []
147
+ #for batch in tqdm(dataset, dynamic_ncols=True):
148
+
149
+ for batch in dataset:
150
+ # Make sure that your compute_forward returns the predictions !!!
151
+ # In the case of the template, when stage = TEST, a beam search is applied
152
+ # in compute_forward().
153
+
154
+ tokens, tokens_lens = batch.tokens
155
+ log_probs, predictions, wav_lens = self.compute_forward(batch, stage=sb.Stage.TEST)
156
+ pred_batch = []
157
+ predicted_words = []
158
+
159
+ # Decode token terms to words
160
+ predicted_words = [tokenizer.decode(token, skip_special_tokens=True).strip() for token in predictions]
161
+ # predicted_words = [tokenizer.decode(pred) for pred in predictions]
162
+ # labels = [tokenizer.decode(trn) for trn in batch.tokens_list]
163
+
164
+ # Convert indices to words
165
+ target_words = undo_padding(tokens, tokens_lens)
166
+ target_words = tokenizer.batch_decode(target_words, skip_special_tokens=True)
167
+
168
+ # if hasattr(self.hparams, "normalized_transcripts"):
169
+ # predicted_words = [tokenizer.normalize(text) for text in predicted_words]
170
+ # target_words = [tokenizer.normalize(text) for text in target_words]
171
+
172
+ for sent in predicted_words:
173
+ sent = filter_repetitions([sent], 3)
174
+ sent = " ".join(sent)
175
+ pred_batch.append(sent)
176
+
177
+ # if len(pred_batch[0].split()) > 50:
178
+ # continue
179
+ pred_labels.append(pred_batch[0])
180
+ true_labels.append(target_words[0])
181
+
182
+ # print("True: ", batch.transcript[0])
183
+ # print("Pred: ", pred_batch[0])
184
+ # with open("predictions/predictions_arhiv.txt", "a") as f:
185
+ # f.write("True: " + batch.transcript[0] + "\n")
186
+ # f.write("Pred: " + pred_batch[0] + "\n\n")
187
+
188
+ if self.hparams.restore_capitalization:
189
+ inputs = recap_tokenizer(["restore capitalization and punctuation: " + pred_batch[0]], return_tensors="pt", padding=True).to(self.device)
190
+ outputs = recap_model.generate(**inputs, max_length=1024, num_beams=5, early_stopping=True).squeeze(0)
191
+ pred_batch[0] = recap_tokenizer.decode(outputs, skip_special_tokens=True)
192
+
193
+
194
+ # print("True: ", target_words[0])
195
+ # print("Pred: ", pred_batch[0])
196
+ # print('WER: ', wer(target_words, pred_batch[0]) * 100)
197
+ # print("\n")
198
+
199
+ # with open("predictions/predictions_eaz.txt", "a") as f:
200
+ # f.write(str(batch.id[0]) + "\t" + pred_batch[0] + "\n")
201
+
202
+ print('WER: ', wer(true_labels, pred_labels) * 100)
203
+ print('CER: ', cer(true_labels, pred_labels) * 100)
204
+
205
+
206
+ def filter_repetitions(seq, max_repetition_length):
207
+ seq = list(seq)
208
+ output = []
209
+ max_n = len(seq) // 2
210
+ for n in range(max_n, 0, -1):
211
+ max_repetitions = max(max_repetition_length // n, 1)
212
+ # Don't need to iterate over impossible n values:
213
+ # len(seq) can change a lot during iteration
214
+ if (len(seq) <= n*2) or (len(seq) <= max_repetition_length):
215
+ continue
216
+ iterator = enumerate(seq)
217
+ # Fill first buffers:
218
+ buffers = [[next(iterator)[1]] for _ in range(n)]
219
+ for seq_index, token in iterator:
220
+ current_buffer = seq_index % n
221
+ if token != buffers[current_buffer][-1]:
222
+ # No repeat, we can flush some tokens
223
+ buf_len = sum(map(len, buffers))
224
+ flush_start = (current_buffer-buf_len) % n
225
+ # Keep n-1 tokens, but possibly mark some for removal
226
+ for flush_index in range(buf_len - buf_len%n):
227
+ if (buf_len - flush_index) > n-1:
228
+ to_flush = buffers[(flush_index + flush_start) % n].pop(0)
229
+ else:
230
+ to_flush = None
231
+ # Here, repetitions get removed:
232
+ if (flush_index // n < max_repetitions) and to_flush is not None:
233
+ output.append(to_flush)
234
+ elif (flush_index // n >= max_repetitions) and to_flush is None:
235
+ output.append(to_flush)
236
+ buffers[current_buffer].append(token)
237
+ # At the end, final flush
238
+ current_buffer += 1
239
+ buf_len = sum(map(len, buffers))
240
+ flush_start = (current_buffer-buf_len) % n
241
+ for flush_index in range(buf_len):
242
+ to_flush = buffers[(flush_index + flush_start) % n].pop(0)
243
+ # Here, repetitions just get removed:
244
+ if flush_index // n < max_repetitions:
245
+ output.append(to_flush)
246
+ seq = []
247
+ to_delete = 0
248
+ for token in output:
249
+ if token is None:
250
+ to_delete += 1
251
+ elif to_delete > 0:
252
+ to_delete -= 1
253
+ else:
254
+ seq.append(token)
255
+ output = []
256
+ return seq
257
+
258
+
259
+ def dataio_prepare(hparams, tokenizer):
260
+ """This function prepares the datasets to be used in the brain class.
261
+ It also defines the data processing pipeline through user-defined functions.
262
+ """
263
+ data_folder = hparams["data_folder"]
264
+
265
+ train_data = sb.dataio.dataset.DynamicItemDataset.from_json(json_path=os.path.join(hparams["data_folder"], "train_dev.json"), replacements={"data_root": data_folder})
266
+ train_data = train_data.filtered_sorted(sort_key="duration")
267
+ hparams["train_dataloader_opts"]["shuffle"] = False
268
+
269
+ valid_data = sb.dataio.dataset.DynamicItemDataset.from_json(json_path=os.path.join(hparams["data_folder"], "test_all.json"), replacements={"data_root": data_folder})
270
+ valid_data = valid_data.filtered_sorted(sort_key="duration")
271
+
272
+ test_data = sb.dataio.dataset.DynamicItemDataset.from_json(json_path=os.path.join(hparams["data_folder"], "test_eaz.json"), replacements={"data_root": data_folder})
273
+
274
+ datasets = [train_data, valid_data, test_data]
275
+
276
+ # 2. Define audio pipeline:
277
+ @sb.utils.data_pipeline.takes("data_path")
278
+ @sb.utils.data_pipeline.provides("sig")
279
+ def audio_pipeline(data_path):
280
+ info = torchaudio.info(data_path)
281
+ sig = sb.dataio.dataio.read_audio(data_path)
282
+ if info.sample_rate != hparams["sample_rate"]:
283
+ sig = torchaudio.transforms.Resample(info.sample_rate, hparams["sample_rate"])(sig)
284
+ return sig
285
+
286
+ sb.dataio.dataset.add_dynamic_item(datasets, audio_pipeline)
287
+
288
+ # 3. Define text pipeline:
289
+ @sb.utils.data_pipeline.takes("transcript")
290
+ @sb.utils.data_pipeline.provides("transcript", "tokens_list", "tokens_bos", "tokens_eos", "tokens")
291
+ def text_pipeline(transcript):
292
+ # if hasattr(hparams, "normalized_transcripts"):
293
+ # transcript = tokenizer.normalize(transcript)
294
+ yield transcript
295
+ tokens_list = tokenizer.encode(transcript, add_special_tokens=False)
296
+ yield tokens_list
297
+ tokens_list = tokenizer.build_inputs_with_special_tokens(tokens_list)
298
+ tokens_bos = torch.LongTensor(tokens_list[:-1])
299
+ yield tokens_bos
300
+ tokens_eos = torch.LongTensor(tokens_list[1:])
301
+ yield tokens_eos
302
+ tokens = torch.LongTensor(tokens_list)
303
+ yield tokens
304
+
305
+ sb.dataio.dataset.add_dynamic_item(datasets, text_pipeline)
306
+
307
+ # 4. Set output:
308
+ sb.dataio.dataset.set_output_keys(
309
+ datasets,
310
+ ["id", "sig", "tokens_list", "tokens_bos", "tokens_eos", "tokens"],
311
+ )
312
+
313
+ return train_data, valid_data, test_data
314
+
315
+
316
+ if __name__ == "__main__":
317
+ # CLI:
318
+ hparams_file, run_opts, overrides = sb.parse_arguments(sys.argv[1:])
319
+
320
+ # create ddp_group with the right communication protocol
321
+ sb.utils.distributed.ddp_init_group(run_opts)
322
+
323
+ with open(hparams_file) as fin:
324
+ hparams = load_hyperpyyaml(fin, overrides)
325
+
326
+ # Create experiment directory
327
+ sb.create_experiment_directory(
328
+ experiment_directory=hparams["output_folder"],
329
+ hyperparams_to_save=hparams_file,
330
+ overrides=overrides,
331
+ )
332
+
333
+ # Defining tokenizer and loading it
334
+ tokenizer = hparams["whisper"].tokenizer
335
+
336
+ # here we create the datasets objects as well as tokenization and encoding
337
+ train_data, valid_data, test_data = dataio_prepare(hparams, tokenizer)
338
+
339
+ # Trainer initialization
340
+ asr_brain = ASR(
341
+ modules=hparams["modules"],
342
+ hparams=hparams,
343
+ run_opts=run_opts,
344
+ checkpointer=hparams["checkpointer"],
345
+ opt_class=hparams["whisper_opt_class"],
346
+ )
347
+
348
+ # We load the pretrained whisper model
349
+ if "pretrainer" in hparams.keys():
350
+ hparams["pretrainer"].collect_files()
351
+ hparams["pretrainer"].load_collected(asr_brain.device)
352
+
353
+ # We dynamically add the tokenizer to our brain class.
354
+ # NB: This tokenizer corresponds to the one used for Whisper.
355
+ asr_brain.tokenizer = tokenizer
356
+
357
+
358
+ # Training/validation loop
359
+ if hparams["skip_training"] == False:
360
+ print("Training...")
361
+ # Training
362
+ asr_brain.fit(
363
+ asr_brain.hparams.epoch_counter,
364
+ train_data,
365
+ valid_data,
366
+ train_loader_kwargs=hparams["train_dataloader_opts"],
367
+ valid_loader_kwargs=hparams["valid_dataloader_opts"],
368
+ )
369
+
370
+ else:
371
+ # evaluate
372
+ print("Evaluating")
373
+ asr_brain.run_inference(test_data, "WER", hparams["test_dataloader_opts"])