|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
"""Leading and Trailing Silences Removed Large Nepali ASR Dataset""" |
|
|
|
import os |
|
import csv |
|
|
|
import datasets |
|
|
|
|
|
_CITATION = """\ |
|
@inproceedings{kjartansson-etal-sltu2018, |
|
title = {{Crowd-Sourced Speech Corpora for Javanese, Sundanese, Sinhala, Nepali, and Bangladeshi Bengali}}, |
|
author = {Oddur Kjartansson and Supheakmungkol Sarin and Knot Pipatsrisawat and Martin Jansche and Linne Ha}, |
|
booktitle = {Proc. The 6th Intl. Workshop on Spoken Language Technologies for Under-Resourced Languages (SLTU)}, |
|
year = {2018}, |
|
address = {Gurugram, India}, |
|
month = aug, |
|
pages = {52--55}, |
|
URL = {http://dx.doi.org/10.21437/SLTU.2018-11} |
|
} |
|
""" |
|
|
|
_DESCRIPTION = """\ |
|
This data set contains transcribed audio data for Nepali. The data set consists of flac files, and a TSV file. The file utt_spk_text.tsv contains a FileID, anonymized UserID and the transcription of audio in the file. |
|
The data set has been manually quality checked, but there might still be errors. |
|
The audio files are sampled at rate of 16KHz, and leading and trailing silences are trimmed using torchaudio's voice activity detection. |
|
""" |
|
|
|
|
|
_HOMEPAGE = "https://www.openslr.org/54/" |
|
|
|
|
|
_LICENSE = "license:cc-by-sa-4.0" |
|
|
|
|
|
|
|
|
|
|
|
_URL = "https://huggingface.co/datasets/SumitMdhr/SANT-ASR/resolve/main/" |
|
_URLS = { |
|
"zipfile": _URL + "CLEAN_DATA.zip", |
|
"index_file": _URL + "metedata1.csv", |
|
} |
|
|
|
|
|
|
|
class OpenslrNepaliAsrCleaned(datasets.GeneratorBasedBuilder): |
|
"""End Silences Removed Large Nepali ASR Dataset""" |
|
|
|
VERSION = datasets.Version("1.0.0") |
|
|
|
DEFAULT_CONFIG_NAME = "original" |
|
|
|
def _info(self): |
|
features = datasets.Features( |
|
{ |
|
"utterance_id": datasets.Value("string"), |
|
"speaker_id": datasets.Value("string"), |
|
"utterance": datasets.Audio(), |
|
"transcription": datasets.Value("string"), |
|
"num_frames": datasets.Value("int32"), |
|
} |
|
) |
|
return datasets.DatasetInfo( |
|
description=_DESCRIPTION, |
|
|
|
features=features, |
|
|
|
|
|
|
|
|
|
homepage=_HOMEPAGE, |
|
|
|
license=_LICENSE, |
|
|
|
citation=_CITATION, |
|
task_templates=[ |
|
datasets.tasks.AutomaticSpeechRecognition( |
|
audio_column="utterance", transcription_column="transcription" |
|
) |
|
], |
|
) |
|
|
|
def _split_generators(self, dl_manager): |
|
index_file = dl_manager.download(_URLS["index_file"]) |
|
zip_paths = dl_manager.download(_URLS["zipfiles"]) |
|
audio_paths = dl_manager.extract(zip_paths) |
|
for path in zip_paths: |
|
if os.path.exists(path): |
|
os.remove(path) |
|
return [ |
|
datasets.SplitGenerator( |
|
name=datasets.Split.TRAIN, |
|
gen_kwargs={ |
|
"index_file": index_file, |
|
"audio_paths": audio_paths, |
|
}, |
|
), |
|
] |
|
|
|
def _generate_examples(self, index_file, audio_paths): |
|
with open(index_file, encoding="utf-8") as f: |
|
reader = csv.DictReader(f, delimiter="\t") |
|
for key, row in enumerate(reader): |
|
path = os.path.join(audio_paths, "CLEAN_DATA", row["utterance_id"]) |
|
yield key, { |
|
"utterance_id": row["utterance_id"], |
|
"speaker_id": row["speaker_id"], |
|
"utterance": path, |
|
"transcription": row["transcription"], |
|
"num_frames": int(row["num_frames"]), |
|
} |
|
|