# Copyright 2020 The HuggingFace Datasets Authors and the current dataset script contributor. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Leading and Trailing Silences Removed Large Nepali ASR Dataset""" import os import csv import datasets from datasets.tasks import QuestionAnsweringExtractive _DESCRIPTION = """\ This data set contains transcribed audio data for Nepali. The data set consists of flac files, and a TSV file. The file utt_spk_text.tsv contains a FileID, anonymized UserID and the transcription of audio in the file. The data set has been manually quality checked, but there might still be errors. The audio files are sampled at rate of 16KHz, and leading and trailing silences are trimmed using torchaudio's voice activity detection. """ # TODO: Add link to the official dataset URLs here # The HuggingFace Datasets library doesn't host the datasets but only points to the original files. # This can be an arbitrary nested dict/list of URLs (see below in `_split_generators` method) _URL = "https://huggingface.co/datasets/SumitMdhr/ASR/resolve/main/" _URLS = { "zipfile": _URL + "CLEAN_DATA.zip", "index_file": _URL + "metadata1.tsv", } class ASR_NEPALI(datasets.GeneratorBasedBuilder): """End Silences Removed Large Nepali ASR Dataset""" VERSION = datasets.Version("1.0.0") def _info(self): return datasets.DatasetInfo( description=_DESCRIPTION, features=datasets.Features( { "utterance_id": datasets.Value("string"), "transcription": datasets.Value("string"), "utterance": datasets.Audio(), } ), homepage="https://www.openslr.org/54/", task_templates=[ QuestionAnsweringExtractive( question_column="question", context_column="context", answers_column="answers", ) ], ) def _split_generators(self, dl_manager): index_file = dl_manager.download(_URLS["index_file"]) audio_paths = dl_manager.download_and_extract(_URLS["zipfile"]) return [ datasets.SplitGenerator( name=datasets.Split.TRAIN, gen_kwargs={ "index_file": index_file, "audio_paths": audio_paths, }, ), ] def _generate_examples(self, index_file, audio_paths): with open(index_file, encoding="utf-8") as f: reader = csv.DictReader(f, delimiter="\t") for key, row in enumerate(reader): path = os.path.join(audio_paths, "CLEAN_DATA", row["utterance_id"]) yield key, { "utterance_id": row["utterance_id"], "utterance": path, "transcription": row["transcription"], }