|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
import json |
|
import os |
|
|
|
import datasets |
|
from datasets.tasks import AutomaticSpeechRecognition |
|
from tqdm.auto import tqdm |
|
|
|
|
|
|
|
_CITATION = """\ |
|
@InProceedings{quran:dataset, |
|
title = {Quran data}, |
|
author={Tarteel.io}, |
|
year={2022} |
|
} |
|
""" |
|
|
|
|
|
_DESCRIPTION = """\ |
|
Quran recitation dataset from various Qari's and quran recitation from Tarteel users |
|
""" |
|
|
|
_HOMEPAGE = "https://huggingface.co/datasets/ashraf-ali/quran-data" |
|
|
|
_LICENSE = [ |
|
"cc-by-sa-4.0" |
|
] |
|
|
|
_BASE_URL = "https://huggingface.co/datasets/ashraf-ali/quran-data/blob/main/" |
|
|
|
|
|
_DATA_URL = _BASE_URL + "{split}/{config}/{config}_{archive_id:06d}.tar" |
|
|
|
|
|
_N_SHARDS_URL = _BASE_URL + "n_shards.json" |
|
|
|
|
|
_MANIFEST_URL = _BASE_URL + "{split}/{config}.json" |
|
|
|
|
|
class QuranDataConfig(datasets.BuilderConfig): |
|
def __init__(self, *args, **kwargs): |
|
super().__init__(*args, **kwargs) |
|
|
|
|
|
class QuranData(datasets.GeneratorBasedBuilder): |
|
"""Quran recitation dataset.""" |
|
|
|
VERSION = datasets.Version("1.0.0") |
|
|
|
BUILDER_CONFIGS = [ |
|
QuranDataConfig(name="qari", version=VERSION, |
|
description="Qari quran recitation"), |
|
QuranDataConfig(name="user", version=VERSION, |
|
description="Quran recitation from various users"), |
|
] |
|
|
|
DEFAULT_CONFIG_NAME = "qari" |
|
DEFAULT_WRITER_BATCH_SIZE = 512 |
|
|
|
def _info(self): |
|
return datasets.DatasetInfo( |
|
description=_DESCRIPTION, |
|
features=datasets.Features( |
|
{ |
|
"id": datasets.Value("string"), |
|
"quran_id": datasets.Value("string"), |
|
"audio": datasets.Audio(sampling_rate=16_000), |
|
"reciter": datasets.Value("string"), |
|
"duration_in_seconds": datasets.Value("float32"), |
|
"text": datasets.Value("string"), |
|
} |
|
), |
|
task_templates=[AutomaticSpeechRecognition()], |
|
homepage=_HOMEPAGE, |
|
license="/".join(_LICENSE), |
|
citation=_CITATION, |
|
) |
|
|
|
def _split_generators(self, dl_manager): |
|
|
|
if self.config.name == "microset": |
|
|
|
url = [_DATA_URL.format( |
|
split="train", config="clean", archive_id=0)] |
|
archive_path = dl_manager.download(url) |
|
local_extracted_archive_path = dl_manager.extract( |
|
archive_path) if not dl_manager.is_streaming else [None] |
|
manifest_url = _MANIFEST_URL.format( |
|
split="train", config="clean_000000") |
|
manifest_path = dl_manager.download_and_extract(manifest_url) |
|
|
|
return [ |
|
datasets.SplitGenerator( |
|
name=datasets.Split.TRAIN, |
|
gen_kwargs={ |
|
"local_extracted_archive_paths": local_extracted_archive_path, |
|
|
|
"archives": [dl_manager.iter_archive(path) for path in archive_path], |
|
"manifest_path": manifest_path, |
|
}, |
|
), |
|
] |
|
|
|
n_shards_path = dl_manager.download_and_extract(_N_SHARDS_URL) |
|
with open(n_shards_path, encoding="utf-8") as f: |
|
n_shards = json.load(f) |
|
|
|
if self.config.name in ["validation", "test"]: |
|
splits_to_configs = {self.config.name: self.config.name} |
|
else: |
|
splits_to_configs = { |
|
"train": self.config.name, |
|
"validation": "validation", |
|
"test": "test" |
|
} |
|
|
|
audio_urls = { |
|
split: [ |
|
_DATA_URL.format(split=split, config=config, archive_id=i) for i in range(n_shards[split][config]) |
|
] for split, config in splits_to_configs.items() |
|
} |
|
audio_archive_paths = dl_manager.download(audio_urls) |
|
|
|
|
|
local_extracted_archive_paths = dl_manager.extract(audio_archive_paths) \ |
|
if not dl_manager.is_streaming else \ |
|
{split: [None] * len(audio_archive_paths) |
|
for split in splits_to_configs} |
|
|
|
manifest_urls = { |
|
split: _MANIFEST_URL.format(split=split, config=config) for split, config in splits_to_configs.items() |
|
} |
|
manifest_paths = dl_manager.download_and_extract(manifest_urls) |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
splits_to_names = { |
|
"train": datasets.Split.TRAIN, |
|
"validation": datasets.Split.VALIDATION, |
|
"test": datasets.Split.TEST, |
|
} |
|
split_generators = [] |
|
for split in splits_to_configs: |
|
split_generators.append( |
|
datasets.SplitGenerator( |
|
name=splits_to_names[split], |
|
gen_kwargs={ |
|
"local_extracted_archive_paths": local_extracted_archive_paths[split], |
|
|
|
"archives": [dl_manager.iter_archive(path) for path in audio_archive_paths[split]], |
|
"manifest_path": manifest_paths[split], |
|
} |
|
) |
|
) |
|
|
|
return split_generators |
|
|
|
def _generate_examples(self, local_extracted_archive_paths, archives, manifest_path): |
|
meta = dict() |
|
with open(manifest_path, "r", encoding="utf-8") as f: |
|
for line in tqdm(f, desc="reading metadata file"): |
|
sample_meta = json.loads(line) |
|
_id = sample_meta["audio_document_id"] |
|
texts = sample_meta["training_data"]["label"] |
|
audio_filenames = sample_meta["training_data"]["name"] |
|
durations = sample_meta["training_data"]["duration_ms"] |
|
|
|
for audio_filename, text, duration in zip(audio_filenames, texts, durations): |
|
audio_filename = audio_filename.lstrip("./") |
|
meta[audio_filename] = { |
|
"audio_document_id": _id, |
|
"text": text, |
|
"duration_ms": duration |
|
} |
|
|
|
for local_extracted_archive_path, archive in zip(local_extracted_archive_paths, archives): |
|
|
|
for audio_filename, audio_file in archive: |
|
audio_filename = audio_filename.lstrip("./") |
|
|
|
|
|
path = os.path.join(local_extracted_archive_path, audio_filename) if local_extracted_archive_path \ |
|
else audio_filename |
|
yield audio_filename, { |
|
"id": audio_filename, |
|
"audio": {"path": path, "bytes": audio_file.read()}, |
|
"text": meta[audio_filename]["text"], |
|
"duration_ms": meta[audio_filename]["duration_ms"] |
|
} |
|
|