Sin2pi commited on
Commit
a222a2c
1 Parent(s): f00a982

Upload asr_collator_hf.py

Browse files
Files changed (1) hide show
  1. asr_collator_hf.py +98 -0
asr_collator_hf.py ADDED
@@ -0,0 +1,98 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from dataclasses import dataclass
2
+ from typing import Any, Dict, List, Union
3
+ import torch
4
+ import torchaudio
5
+ import random
6
+
7
+ @dataclass
8
+ class DataCollatorSpeechSeq2SeqWithPadding:
9
+ processor: Any
10
+ decoder_start_token_id: int
11
+ apply_augmentation: bool = False
12
+ n_fft_choices: List[int] = (400, 800, 1024)
13
+ hop_length_choices: List[int] = (160, 320, 512)
14
+ apply_noise_injection: bool = False # Toggle for noise injection
15
+ noise_profiles: List[str] = ('white', 'pink', 'environmental') # Example noise profiles
16
+
17
+ def add_adaptive_noise(self, audio, noise_type='white', base_intensity=0.005):
18
+ amplitude = audio.abs().mean()
19
+ noise_intensity = base_intensity * amplitude # Scale noise intensity based on amplitude
20
+
21
+ noise = torch.randn_like(audio) * noise_intensity
22
+ if noise_type == 'pink':
23
+ noise = torchaudio.functional.highpass_biquad(noise, sample_rate=16000, cutoff_freq=200)
24
+ elif noise_type == 'environmental':
25
+ # Load an example environmental noise file
26
+ noise, _ = torchaudio.load('environmental_noise.wav')
27
+ noise = torch.nn.functional.interpolate(noise.unsqueeze(0), size=audio.size()).squeeze() * noise_intensity
28
+ return audio + noise
29
+
30
+ def __call__(self, features: List[Dict[str, Union[List[int], torch.Tensor]]]) -> Dict[str, torch.Tensor]:
31
+ input_features = []
32
+ labels_list = []
33
+ dec_input_features = []
34
+
35
+ for feature in features:
36
+ audio = feature["input_features"]
37
+ if self.apply_augmentation:
38
+ # Randomly select n_fft and hop_length for augmentation
39
+ n_fft = random.choice(self.n_fft_choices)
40
+ hop_length = random.choice(self.hop_length_choices)
41
+ if self.apply_noise_injection:
42
+ noise_type = random.choice(self.noise_profiles)
43
+ audio = self.add_adaptive_noise(audio, noise_type=noise_type)
44
+ else:
45
+ # Use default values if augmentation is not applied
46
+ n_fft = 1024
47
+ hop_length = 512
48
+
49
+ # Apply MelSpectrogram transformation with the selected parameters
50
+ mel_spectrogram = torchaudio.transforms.MelSpectrogram(
51
+ sample_rate=16000, # Sample rate is assumed; update if necessary
52
+ n_fft=n_fft,
53
+ hop_length=hop_length,
54
+ n_mels=80
55
+ )(torch.tensor(audio))
56
+
57
+ log_mel_spectrogram = torch.log(mel_spectrogram + 1e-9)
58
+ input_features.append({"input_features": log_mel_spectrogram})
59
+
60
+ label = feature["labels"]
61
+ label_tokens = [self.processor.tokenizer.bos_token_id] + self.processor.tokenizer.encode(label) + [self.processor.tokenizer.eos_token_id]
62
+ dec_input_feature = label_tokens[:-1]
63
+ label = label_tokens[1:]
64
+
65
+ labels_list.append({"input_ids": label})
66
+ dec_input_features.append({"input_ids": dec_input_feature})
67
+
68
+ batch = self.processor.feature_extractor.pad(input_features, return_tensors="pt")
69
+ labels_batch = self.processor.tokenizer.pad(labels_list, return_tensors="pt")
70
+ dec_input_batch = self.processor.tokenizer.pad(dec_input_features, return_tensors="pt")
71
+
72
+ labels = labels_batch["input_ids"].masked_fill(labels_batch.attention_mask.ne(1), -100)
73
+ if (labels[:, 0] == self.decoder_start_token_id).all().cpu().item():
74
+ labels = labels[:, 1:]
75
+ batch["labels"] = labels
76
+
77
+ dec_input_features = dec_input_batch["input_ids"]
78
+ if (dec_input_features[:, 0] == self.decoder_start_token_id).all().cpu().item():
79
+ dec_input_features = dec_input_features[:, 1:]
80
+ batch["dec_input_features"] = dec_input_features
81
+
82
+ return batch
83
+
84
+
85
+ # example usage
86
+
87
+ data_collator = DataCollatorSpeechSeq2SeqWithPadding(
88
+ processor=processor,
89
+ decoder_start_token_id=model.config.decoder_start_token_id,
90
+ apply_augmentation=True, # Enable augmentation
91
+ apply_noise_injection=True # Enable adaptive noise injection
92
+ )
93
+
94
+ dataloader = torch.utils.data.DataLoader(dataset, batch_size=2, shuffle=True, collate_fn=data_collator)
95
+
96
+ for batch in dataloader:
97
+ # Pass the batch to your model
98
+ outputs = model(batch)