yangwang825 commited on
Commit
47c2e51
1 Parent(s): 9debce6

Create feature_extraction_xvector.py

Browse files
Files changed (1) hide show
  1. feature_extraction_xvector.py +129 -0
feature_extraction_xvector.py ADDED
@@ -0,0 +1,129 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import numpy as np
2
+ from typing import List, Optional, Union
3
+ from transformers.feature_extraction_sequence_utils import SequenceFeatureExtractor
4
+ from transformers.feature_extraction_utils import BatchFeature
5
+ from transformers.utils import PaddingStrategy, TensorType, logging
6
+
7
+ logger = logging.get_logger(__name__)
8
+
9
+
10
+ class XvectorFeatureExtractor(SequenceFeatureExtractor):
11
+
12
+ model_input_names = ["input_values", "attention_mask"]
13
+
14
+ def __init__(
15
+ self,
16
+ feature_size=1,
17
+ sampling_rate=16000,
18
+ padding_value=0.0,
19
+ return_attention_mask=False,
20
+ do_normalize=True,
21
+ **kwargs,
22
+ ):
23
+ super().__init__(feature_size=feature_size, sampling_rate=sampling_rate, padding_value=padding_value, **kwargs)
24
+ self.return_attention_mask = return_attention_mask
25
+ self.do_normalize = do_normalize
26
+
27
+ @staticmethod
28
+ def zero_mean_unit_var_norm(
29
+ input_values: List[np.ndarray], attention_mask: List[np.ndarray], padding_value: float = 0.0
30
+ ) -> List[np.ndarray]:
31
+ """
32
+ Every array in the list is normalized to have zero mean and unit variance
33
+ """
34
+ if attention_mask is not None:
35
+ attention_mask = np.array(attention_mask, np.int32)
36
+ normed_input_values = []
37
+
38
+ for vector, length in zip(input_values, attention_mask.sum(-1)):
39
+ normed_slice = (vector - vector[:length].mean()) / np.sqrt(vector[:length].var() + 1e-7)
40
+ if length < normed_slice.shape[0]:
41
+ normed_slice[length:] = padding_value
42
+
43
+ normed_input_values.append(normed_slice)
44
+ else:
45
+ normed_input_values = [(x - x.mean()) / np.sqrt(x.var() + 1e-7) for x in input_values]
46
+
47
+ return normed_input_values
48
+
49
+ def __call__(
50
+ self,
51
+ raw_speech: Union[np.ndarray, List[float], List[np.ndarray], List[List[float]]],
52
+ padding: Union[bool, str, PaddingStrategy] = False,
53
+ max_length: Optional[int] = None,
54
+ truncation: bool = False,
55
+ pad_to_multiple_of: Optional[int] = None,
56
+ return_attention_mask: Optional[bool] = None,
57
+ return_tensors: Optional[Union[str, TensorType]] = None,
58
+ sampling_rate: Optional[int] = None,
59
+ **kwargs,
60
+ ) -> BatchFeature:
61
+ if sampling_rate is not None:
62
+ if sampling_rate != self.sampling_rate:
63
+ raise ValueError(
64
+ f"The model corresponding to this feature extractor: {self} was trained using a sampling rate of"
65
+ f" {self.sampling_rate}. Please make sure that the provided `raw_speech` input was sampled with"
66
+ f" {self.sampling_rate} and not {sampling_rate}."
67
+ )
68
+ else:
69
+ logger.warning(
70
+ "It is strongly recommended to pass the ``sampling_rate`` argument to this function. "
71
+ "Failing to do so can result in silent errors that might be hard to debug."
72
+ )
73
+
74
+ is_batched_numpy = isinstance(raw_speech, np.ndarray) and len(raw_speech.shape) > 1
75
+ if is_batched_numpy and len(raw_speech.shape) > 2:
76
+ raise ValueError(f"Only mono-channel audio is supported for input to {self}")
77
+ is_batched = is_batched_numpy or (
78
+ isinstance(raw_speech, (list, tuple)) and (isinstance(raw_speech[0], (np.ndarray, tuple, list)))
79
+ )
80
+
81
+ # always return batch
82
+ if not is_batched:
83
+ raw_speech = [raw_speech]
84
+
85
+ # convert into correct format for padding
86
+ encoded_inputs = BatchFeature({"input_values": raw_speech})
87
+
88
+ padded_inputs = self.pad(
89
+ encoded_inputs,
90
+ padding=padding,
91
+ max_length=max_length,
92
+ truncation=truncation,
93
+ pad_to_multiple_of=pad_to_multiple_of,
94
+ return_attention_mask=return_attention_mask,
95
+ )
96
+
97
+ # convert input values to correct format
98
+ input_values = padded_inputs["input_values"]
99
+ if not isinstance(input_values[0], np.ndarray):
100
+ padded_inputs["input_values"] = [np.asarray(array, dtype=np.float32) for array in input_values]
101
+ elif (
102
+ not isinstance(input_values, np.ndarray)
103
+ and isinstance(input_values[0], np.ndarray)
104
+ and input_values[0].dtype is np.dtype(np.float64)
105
+ ):
106
+ padded_inputs["input_values"] = [array.astype(np.float32) for array in input_values]
107
+ elif isinstance(input_values, np.ndarray) and input_values.dtype is np.dtype(np.float64):
108
+ padded_inputs["input_values"] = input_values.astype(np.float32)
109
+
110
+ # convert attention_mask to correct format
111
+ attention_mask = padded_inputs.get("attention_mask")
112
+ if attention_mask is not None:
113
+ padded_inputs["attention_mask"] = [np.asarray(array, dtype=np.int32) for array in attention_mask]
114
+
115
+ # zero-mean and unit-variance normalization
116
+ if self.do_normalize:
117
+ attention_mask = (
118
+ attention_mask
119
+ if self._get_padding_strategies(padding, max_length=max_length) is not PaddingStrategy.DO_NOT_PAD
120
+ else None
121
+ )
122
+ padded_inputs["input_values"] = self.zero_mean_unit_var_norm(
123
+ padded_inputs["input_values"], attention_mask=attention_mask, padding_value=self.padding_value
124
+ )
125
+
126
+ if return_tensors is not None:
127
+ padded_inputs = padded_inputs.convert_to_tensors(return_tensors)
128
+
129
+ return padded_inputs