--- language: zh datasets: - common_voice tags: - audio - automatic-speech-recognition - speech - xlsr-fine-tuning-week license: apache-2.0 model-index: - name: XLSR Wav2Vec2 Chinese (Taiwan) by Voidful results: - task: name: Speech Recognition type: automatic-speech-recognition dataset: name: Common Voice zh-TW type: common_voice args: zh-TW metrics: - name: Test CER type: cer value: 16.41 --- # Wav2Vec2-Large-XLSR-53-tw-gpt Fine-tuned [facebook/wav2vec2-large-xlsr-53](https://huggingface.co/facebook/wav2vec2-large-xlsr-53) on zh-tw using the [Common Voice](https://huggingface.co/datasets/common_voice). When using this model, make sure that your speech input is sampled at 16kHz. ## Usage [Colab trial](https://colab.research.google.com/drive/1e_z5jQHYbO2YKEaUgzb1ww1WwiAyydAj?usp=sharing) ``` import torchaudio from datasets import load_dataset, load_metric from transformers import ( Wav2Vec2ForCTC, Wav2Vec2Processor, AutoTokenizer, AutoModelWithLMHead ) import torch import re import sys model_name = "voidful/wav2vec2-large-xlsr-53-tw-gpt" device = "cuda" processor_name = "voidful/wav2vec2-large-xlsr-53-tw-gpt" chars_to_ignore_regex = r"[¥•"#$%&'()*+,-/:;<=>@[\]^_`{|}~⦅⦆「」、 、〃〈〉《》「」『』【】〔〕〖〗〘〙〚〛〜〝〞〟〰〾〿–—‘’‛“”„‟…‧﹏﹑﹔·'℃°•·.﹑︰〈〉─《﹖﹣﹂﹁﹔!?。。"#$%&'()*+,﹐-/:;<=>@[\]^_`{|}~⦅⦆「」、、〃》「」『』【】〔〕〖〗〘〙〚〛〜〝〞〟〰〾〿–—‘’‛“”„‟…‧﹏..!\\"#$%&()*+,\\-.\\:;<=>?@\\[\\]\\\\\\/^_`{|}~]" model = Wav2Vec2ForCTC.from_pretrained(model_name).to(device) processor = Wav2Vec2Processor.from_pretrained(processor_name) tokenizer = AutoTokenizer.from_pretrained("ckiplab/gpt2-base-chinese") gpt_model = AutoModelWithLMHead.from_pretrained("ckiplab/gpt2-base-chinese").to(device) resampler = torchaudio.transforms.Resample(orig_freq=48_000, new_freq=16_000) def load_file_to_data(file): batch = {} speech, _ = torchaudio.load(file) batch["speech"] = resampler.forward(speech.squeeze(0)).numpy() batch["sampling_rate"] = resampler.new_freq return batch def predict(data): features = processor(data["speech"], sampling_rate=data["sampling_rate"], padding=True, return_tensors="pt") input_values = features.input_values.to(device) attention_mask = features.attention_mask.to(device) with torch.no_grad(): logits = model(input_values, attention_mask=attention_mask).logits decoded_results = [] for logit in logits: pred_ids = torch.argmax(logit, dim=-1) mask = pred_ids.ge(1).unsqueeze(-1).expand(logit.size()) vocab_size = logit.size()[-1] voice_prob = torch.nn.functional.softmax((torch.masked_select(logit, mask).view(-1,vocab_size)),dim=-1) gpt_input = torch.cat((torch.tensor([tokenizer.cls_token_id]).to(device),pred_ids[pred_ids>0]), 0) gpt_prob = torch.nn.functional.softmax(gpt_model(gpt_input).logits, dim=-1)[:voice_prob.size()[0],:] comb_pred_ids = torch.argmax(gpt_prob*voice_prob, dim=-1) decoded_results.append(processor.decode(comb_pred_ids)) return decoded_results ``` Predict ```python predict(load_file_to_data('voice file path')) ``` ## Evaluation The model can be evaluated as follows on the zh-tw test data of Common Voice. CER calculation refer to https://huggingface.co/ctl/wav2vec2-large-xlsr-cantonese ```python !mkdir cer !pip install jiwer import torchaudio from datasets import load_dataset, load_metric from transformers import ( Wav2Vec2ForCTC, Wav2Vec2Processor, ) import torch import re import sys model_name = "voidful/wav2vec2-large-xlsr-53-tw-gpt" device = "cuda" processor_name = "voidful/wav2vec2-large-xlsr-53-tw-gpt" chars_to_ignore_regex = r"[¥•"#$%&'()*+,-/:;<=>@[\]^_`{|}~⦅⦆「」、 、〃〈〉《》「」『』【】〔〕〖〗〘〙〚〛〜〝〞〟〰〾〿–—‘’‛“”„‟…‧﹏﹑﹔·'℃°•·.﹑︰〈〉─《﹖﹣﹂﹁﹔!?。。"#$%&'()*+,﹐-/:;<=>@[\]^_`{|}~⦅⦆「」、、〃》「」『』【】〔〕〖〗〘〙〚〛〜〝〞〟〰〾〿–—‘’‛“”„‟…‧﹏..!\\"#$%&()*+,\\-.\\:;<=>?@\\[\\]\\\\\\/^_`{|}~]" model = Wav2Vec2ForCTC.from_pretrained(model_name).to(device) processor = Wav2Vec2Processor.from_pretrained(processor_name) ds = load_dataset("common_voice", 'zh-TW', split="test") resampler = torchaudio.transforms.Resample(orig_freq=48_000, new_freq=16_000) def map_to_array(batch): speech, _ = torchaudio.load(batch["path"]) batch["speech"] = resampler.forward(speech.squeeze(0)).numpy() batch["sampling_rate"] = resampler.new_freq batch["sentence"] = re.sub(chars_to_ignore_regex, '', batch["sentence"]).lower().replace("’", "'") return batch ds = ds.map(map_to_array) def map_to_pred(batch): features = processor(batch["speech"], sampling_rate=batch["sampling_rate"][0], padding=True, return_tensors="pt") input_values = features.input_values.to(device) attention_mask = features.attention_mask.to(device) with torch.no_grad(): logits = model(input_values, attention_mask=attention_mask).logits pred_ids = torch.argmax(logits, dim=-1) batch["predicted"] = processor.batch_decode(pred_ids) batch["target"] = batch["sentence"] return batch result = ds.map(map_to_pred, batched=True, batch_size=16, remove_columns=list(ds.features.keys())) cer = load_metric("./cer") print("CER: {:2f}".format(100 * cer.compute(predictions=result["predicted"], references=result["target"]))) ``` `CER: 28.734822` ## Evaluation with GPT: ```python !mkdir cer !wget -O cer/cer.py https://huggingface.co/ctl/wav2vec2-large-xlsr-cantonese/raw/main/cer.py !pip install jiwer import torchaudio from datasets import load_dataset, load_metric from transformers import ( Wav2Vec2ForCTC, Wav2Vec2Processor, ) import torch import re import sys from transformers import AutoTokenizer, AutoModelWithLMHead model_name = "voidful/wav2vec2-large-xlsr-53-tw-gpt" device = "cuda" processor_name = "voidful/wav2vec2-large-xlsr-53-tw-gpt" chars_to_ignore_regex = r"[¥•"#$%&'()*+,-/:;<=>@[\]^_`{|}~⦅⦆「」、 、〃〈〉《》「」『』【】〔〕〖〗〘〙〚〛〜〝〞〟〰〾〿–—‘’‛“”„‟…‧﹏﹑﹔·'℃°•·.﹑︰〈〉─《﹖﹣﹂﹁﹔!?。。"#$%&'()*+,﹐-/:;<=>@[\]^_`{|}~⦅⦆「」、、〃》「」『』【】〔〕〖〗〘〙〚〛〜〝〞〟〰〾〿–—‘’‛“”„‟…‧﹏..!\\"#$%&()*+,\\-.\\:;<=>?@\\[\\]\\\\\\/^_`{|}~]" tokenizer = AutoTokenizer.from_pretrained("ckiplab/gpt2-base-chinese") gpt_model = AutoModelWithLMHead.from_pretrained("ckiplab/gpt2-base-chinese").to(device) model = Wav2Vec2ForCTC.from_pretrained(model_name).to(device) processor = Wav2Vec2Processor.from_pretrained(processor_name) ds = load_dataset("common_voice", 'zh-TW', data_dir="./cv-corpus-6.1-2020-12-11", split="test") resampler = torchaudio.transforms.Resample(orig_freq=48_000, new_freq=16_000) def map_to_array(batch): speech, _ = torchaudio.load(batch["path"]) batch["speech"] = resampler.forward(speech.squeeze(0)).numpy() batch["sampling_rate"] = resampler.new_freq batch["sentence"] = re.sub(chars_to_ignore_regex, '', batch["sentence"]).lower().replace("’", "'") return batch ds = ds.map(map_to_array) def map_to_pred(batch): features = processor(batch["speech"], sampling_rate=batch["sampling_rate"][0], padding=True, return_tensors="pt") input_values = features.input_values.to(device) attention_mask = features.attention_mask.to(device) with torch.no_grad(): logits = model(input_values, attention_mask=attention_mask).logits decoded_results = [] for logit in logits: pred_ids = torch.argmax(logit, dim=-1) mask = pred_ids.ge(1).unsqueeze(-1).expand(logit.size()) vocab_size = logit.size()[-1] voice_prob = torch.nn.functional.softmax((torch.masked_select(logit, mask).view(-1,vocab_size)),dim=-1) gpt_input = torch.cat((torch.tensor([tokenizer.cls_token_id]).to(device),pred_ids[pred_ids>0]), 0) gpt_prob = torch.nn.functional.softmax(gpt_model(gpt_input).logits, dim=-1)[:voice_prob.size()[0],:] comb_pred_ids = torch.argmax(gpt_prob*voice_prob, dim=-1) decoded_results.append(processor.decode(comb_pred_ids)) batch["predicted"] = decoded_results batch["target"] = batch["sentence"] return batch result = ds.map(map_to_pred, batched=True, batch_size=16, remove_columns=list(ds.features.keys())) cer = load_metric("./cer") print("CER: {:2f}".format(100 * cer.compute(predictions=result["predicted"], references=result["target"]))) ``` `CER 25.69`