quran-data / quran_data.py
ashraf-ali's picture
Update folder
a9b01bb
# Copyright 2022 The HuggingFace Datasets Authors and the current dataset script contributor.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
import os
import datasets
from datasets.tasks import AutomaticSpeechRecognition
from tqdm.auto import tqdm
# Find for instance the citation on arxiv or on the dataset repo/website
_CITATION = """\
@InProceedings{quran:dataset,
title = {Quran data},
author={Tarteel.io},
year={2022}
}
"""
# You can copy an official description
_DESCRIPTION = """\
Quran recitation dataset from various Qari's and quran recitation from Tarteel users
"""
_HOMEPAGE = "https://huggingface.co/datasets/ashraf-ali/quran-data"
_LICENSE = [
"cc-by-sa-4.0"
]
_BASE_URL = "https://huggingface.co/datasets/ashraf-ali/quran-data/blob/main/"
# relative path to data inside dataset's repo
_DATA_URL = _BASE_URL + "{split}/{config}/{config}_{archive_id:06d}.tar"
# relative path to file containing number of audio archives inside dataset's repo
_N_SHARDS_URL = _BASE_URL + "n_shards.json"
# relative path to metadata inside dataset's repo
_MANIFEST_URL = _BASE_URL + "{split}/{config}.json"
class QuranDataConfig(datasets.BuilderConfig):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
class QuranData(datasets.GeneratorBasedBuilder):
"""Quran recitation dataset."""
VERSION = datasets.Version("1.0.0")
BUILDER_CONFIGS = [
QuranDataConfig(name="qari", version=VERSION,
description="Qari quran recitation"),
QuranDataConfig(name="user", version=VERSION,
description="Quran recitation from various users"),
]
DEFAULT_CONFIG_NAME = "qari"
DEFAULT_WRITER_BATCH_SIZE = 512
def _info(self):
return datasets.DatasetInfo(
description=_DESCRIPTION,
features=datasets.Features(
{
"id": datasets.Value("string"),
"quran_id": datasets.Value("string"),
"audio": datasets.Audio(sampling_rate=16_000),
"reciter": datasets.Value("string"),
"duration_in_seconds": datasets.Value("float32"),
"text": datasets.Value("string"),
}
),
task_templates=[AutomaticSpeechRecognition()],
homepage=_HOMEPAGE,
license="/".join(_LICENSE), # license must be a string
citation=_CITATION,
)
def _split_generators(self, dl_manager):
if self.config.name == "microset":
# take only first data archive for demo purposes
url = [_DATA_URL.format(
split="train", config="clean", archive_id=0)]
archive_path = dl_manager.download(url)
local_extracted_archive_path = dl_manager.extract(
archive_path) if not dl_manager.is_streaming else [None]
manifest_url = _MANIFEST_URL.format(
split="train", config="clean_000000") # train/clean_000000.json
manifest_path = dl_manager.download_and_extract(manifest_url)
return [
datasets.SplitGenerator(
name=datasets.Split.TRAIN,
gen_kwargs={
"local_extracted_archive_paths": local_extracted_archive_path,
# use iter_archive here to access the files in the TAR archives:
"archives": [dl_manager.iter_archive(path) for path in archive_path],
"manifest_path": manifest_path,
},
),
]
n_shards_path = dl_manager.download_and_extract(_N_SHARDS_URL)
with open(n_shards_path, encoding="utf-8") as f:
n_shards = json.load(f)
if self.config.name in ["validation", "test"]:
splits_to_configs = {self.config.name: self.config.name}
else:
splits_to_configs = {
"train": self.config.name,
"validation": "validation",
"test": "test"
}
audio_urls = {
split: [
_DATA_URL.format(split=split, config=config, archive_id=i) for i in range(n_shards[split][config])
] for split, config in splits_to_configs.items()
}
audio_archive_paths = dl_manager.download(audio_urls)
# In non-streaming mode, we extract the archives to have the data locally:
local_extracted_archive_paths = dl_manager.extract(audio_archive_paths) \
if not dl_manager.is_streaming else \
{split: [None] * len(audio_archive_paths)
for split in splits_to_configs}
manifest_urls = {
split: _MANIFEST_URL.format(split=split, config=config) for split, config in splits_to_configs.items()
}
manifest_paths = dl_manager.download_and_extract(manifest_urls)
# To access the audio data from the TAR archives using the download manager,
# we have to use the dl_manager.iter_archive method
#
# This is because dl_manager.download_and_extract
# doesn't work to stream TAR archives in streaming mode.
# (we have to stream the files of a TAR archive one by one)
#
# The iter_archive method returns an iterable of (path_within_archive, file_obj) for every
# file in a TAR archive.
splits_to_names = {
"train": datasets.Split.TRAIN,
"validation": datasets.Split.VALIDATION,
"test": datasets.Split.TEST,
}
split_generators = []
for split in splits_to_configs:
split_generators.append(
datasets.SplitGenerator(
name=splits_to_names[split],
gen_kwargs={
"local_extracted_archive_paths": local_extracted_archive_paths[split],
# use iter_archive here to access the files in the TAR archives:
"archives": [dl_manager.iter_archive(path) for path in audio_archive_paths[split]],
"manifest_path": manifest_paths[split],
}
)
)
return split_generators
def _generate_examples(self, local_extracted_archive_paths, archives, manifest_path):
meta = dict()
with open(manifest_path, "r", encoding="utf-8") as f:
for line in tqdm(f, desc="reading metadata file"):
sample_meta = json.loads(line)
_id = sample_meta["audio_document_id"]
texts = sample_meta["training_data"]["label"]
audio_filenames = sample_meta["training_data"]["name"]
durations = sample_meta["training_data"]["duration_ms"]
for audio_filename, text, duration in zip(audio_filenames, texts, durations):
audio_filename = audio_filename.lstrip("./")
meta[audio_filename] = {
"audio_document_id": _id,
"text": text,
"duration_ms": duration
}
for local_extracted_archive_path, archive in zip(local_extracted_archive_paths, archives):
# Here we iterate over all the files within the TAR archive:
for audio_filename, audio_file in archive:
audio_filename = audio_filename.lstrip("./")
# if an audio file exists locally (i.e. in default, non-streaming mode) set the full path to it
# joining path to directory that the archive was extracted to and audio filename.
path = os.path.join(local_extracted_archive_path, audio_filename) if local_extracted_archive_path \
else audio_filename
yield audio_filename, {
"id": audio_filename,
"audio": {"path": path, "bytes": audio_file.read()},
"text": meta[audio_filename]["text"],
"duration_ms": meta[audio_filename]["duration_ms"]
}