metadata
license: cc-by-4.0
language:
- qu
metrics:
- cer
- wer
pipeline_tag: automatic-speech-recognition
Usage
The model can be used directly (without a language model) as follows:
from transformers import Wav2Vec2Processor, Wav2Vec2ForCTC
import torch
import torchaudio
# load model and processor
processor = Wav2Vec2Processor.from_pretrained("ivangtorre/wav2vec2-xlsr-300m-quechua")
model = Wav2Vec2ForCTC.from_pretrained("ivangtorre/wav2vec2-xlsr-300m-quechua")
# load dummy dataset and read soundfiles
file = torchaudio.load("quechua000573.wav")
# retrieve logits
logits = model(file[0]).logits
# take argmax and decode
predicted_ids = torch.argmax(logits, dim=-1)
transcription = processor.batch_decode(predicted_ids)
print("HF prediction: ", transcription)
This code snipnet shows how to Evaluate the wav2vec2-xlsr-300m-quechua in Second Americas NLP 2022 Quechua dev set
from datasets import load_dataset
from transformers import Wav2Vec2ForCTC, Wav2Vec2Processor
import torch
from jiwer import cer
import torch.nn.functional as F
librispeech_eval = load_dataset("ivangtorre/second_americas_nlp_2022", split="validation")
model = Wav2Vec2ForCTC.from_pretrained("ivangtorre/wav2vec2-xlsr-300m-quechua")
processor = Wav2Vec2Processor.from_pretrained("ivangtorre/wav2vec2-xlsr-300m-quechua")
def map_to_pred(batch):
wav = batch["audio"][0]["array"]
feats = torch.from_numpy(wav).float()
feats = F.layer_norm(feats, feats.shape) # Normalization performed during finetuning
feats = torch.unsqueeze(feats, 0)
logits = model(feats).logits
predicted_ids = torch.argmax(logits, dim=-1)
batch["transcription"] = processor.batch_decode(predicted_ids)
return batch
result = librispeech_eval.map(map_to_pred, batched=True, batch_size=1)
print("CER:", cer(result["source_processed"], result["transcription"]))
Citation
@misc{grosman2021xlsr-1b-russian,
title={Fine-tuned {XLS-R} 1{B} model for speech recognition in {R}ussian},
author={Grosman, Jonatas},
howpublished={\url{https://huggingface.co/jonatasgrosman/wav2vec2-xls-r-1b-russian}},
year={2022}
}