Update README.md
Browse files
README.md
CHANGED
@@ -23,7 +23,7 @@ model-index:
|
|
23 |
metrics:
|
24 |
- name: Test WER
|
25 |
type: wer
|
26 |
-
value:
|
27 |
---
|
28 |
|
29 |
# Wav2Vec2-Large-XLSR-53-lg
|
@@ -79,6 +79,7 @@ import torchaudio
|
|
79 |
from datasets import load_dataset, load_metric
|
80 |
from transformers import Wav2Vec2ForCTC, Wav2Vec2Processor
|
81 |
import re
|
|
|
82 |
|
83 |
test_dataset = load_dataset("common_voice", "lg", split="test")
|
84 |
wer = load_metric("wer")
|
@@ -87,21 +88,29 @@ processor = Wav2Vec2Processor.from_pretrained("lucio/wav2vec2-large-xlsr-luganda
|
|
87 |
model = Wav2Vec2ForCTC.from_pretrained("lucio/wav2vec2-large-xlsr-luganda")
|
88 |
model.to("cuda")
|
89 |
|
90 |
-
chars_to_ignore_regex = '[
|
91 |
resampler = torchaudio.transforms.Resample(48_000, 16_000)
|
92 |
|
93 |
# Preprocessing the datasets.
|
94 |
# We need to read the audio files as arrays
|
95 |
def speech_file_to_array_fn(batch):
|
96 |
-
batch["sentence"] = re.sub(chars_to_ignore_regex, '', batch["sentence"]).lower()
|
97 |
speech_array, sampling_rate = torchaudio.load(batch["path"])
|
98 |
batch["speech"] = resampler(speech_array).squeeze().numpy()
|
99 |
return batch
|
100 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
101 |
test_dataset = test_dataset.map(speech_file_to_array_fn)
|
|
|
102 |
|
103 |
-
# Preprocessing the datasets.
|
104 |
-
# We need to read the audio files as arrays
|
105 |
def evaluate(batch):
|
106 |
inputs = processor(batch["speech"], sampling_rate=16_000, return_tensors="pt", padding=True)
|
107 |
|
@@ -114,13 +123,13 @@ def evaluate(batch):
|
|
114 |
|
115 |
result = test_dataset.map(evaluate, batched=True, batch_size=8)
|
116 |
|
117 |
-
print("WER: {:2f}".format(100 * wer.compute(predictions=result["pred_strings"], references=result["
|
118 |
```
|
119 |
|
120 |
-
**Test Result**:
|
121 |
|
122 |
## Training
|
123 |
|
124 |
-
The Common Voice `train`, `validation` and `other` datasets were used for training,
|
125 |
|
126 |
The script used for training was just the `run_finetuning.py` script provided in OVHcloud's databuzzword/hf-wav2vec image.
|
|
|
23 |
metrics:
|
24 |
- name: Test WER
|
25 |
type: wer
|
26 |
+
value: 22.82
|
27 |
---
|
28 |
|
29 |
# Wav2Vec2-Large-XLSR-53-lg
|
|
|
79 |
from datasets import load_dataset, load_metric
|
80 |
from transformers import Wav2Vec2ForCTC, Wav2Vec2Processor
|
81 |
import re
|
82 |
+
import unidecode
|
83 |
|
84 |
test_dataset = load_dataset("common_voice", "lg", split="test")
|
85 |
wer = load_metric("wer")
|
|
|
88 |
model = Wav2Vec2ForCTC.from_pretrained("lucio/wav2vec2-large-xlsr-luganda")
|
89 |
model.to("cuda")
|
90 |
|
91 |
+
chars_to_ignore_regex = '[\[\],?.!;:%"“”(){}‟ˮʺ″«»/…‽�–]'
|
92 |
resampler = torchaudio.transforms.Resample(48_000, 16_000)
|
93 |
|
94 |
# Preprocessing the datasets.
|
95 |
# We need to read the audio files as arrays
|
96 |
def speech_file_to_array_fn(batch):
|
|
|
97 |
speech_array, sampling_rate = torchaudio.load(batch["path"])
|
98 |
batch["speech"] = resampler(speech_array).squeeze().numpy()
|
99 |
return batch
|
100 |
|
101 |
+
def remove_special_characters(batch):
|
102 |
+
# word-internal apostrophes are marking contractions
|
103 |
+
batch["norm_text"] = re.sub(r'[‘’´`]', r"'", batch["sentence"])
|
104 |
+
# most other punctuation is ignored
|
105 |
+
batch["norm_text"] = re.sub(chars_to_ignore_regex, "", batch["norm_text"]).lower().strip()
|
106 |
+
batch["norm_text"] = re.sub(r"(-|' | '| +)", " ", batch["norm_text"])
|
107 |
+
# remove accents from a few characters (from loanwords, not tones)
|
108 |
+
batch["norm_text"] = unidecode.unidecode(batch["norm_text"])
|
109 |
+
return batch
|
110 |
+
|
111 |
test_dataset = test_dataset.map(speech_file_to_array_fn)
|
112 |
+
test_dataset = test_dataset.map(remove_special_characters)
|
113 |
|
|
|
|
|
114 |
def evaluate(batch):
|
115 |
inputs = processor(batch["speech"], sampling_rate=16_000, return_tensors="pt", padding=True)
|
116 |
|
|
|
123 |
|
124 |
result = test_dataset.map(evaluate, batched=True, batch_size=8)
|
125 |
|
126 |
+
print("WER: {:2f}".format(100 * wer.compute(predictions=result["pred_strings"], references=result["norm_text"])))
|
127 |
```
|
128 |
|
129 |
+
**Test Result**: 22.82 %
|
130 |
|
131 |
## Training
|
132 |
|
133 |
+
The Common Voice `train`, `validation` and `other` datasets were used for training, augmented to twice the original size with added noise and manipulated pitch, phase and intensity.
|
134 |
|
135 |
The script used for training was just the `run_finetuning.py` script provided in OVHcloud's databuzzword/hf-wav2vec image.
|