|
|
|
import os |
|
import pandas as pd |
|
from datasets import GeneratorBasedBuilder, DatasetInfo, SplitGenerator, Split, Features, Value, Audio, Version |
|
|
|
_URLS = {"train": "https://huggingface.co/datasets/aburnazy/hy_asr_grqaser/resolve/main/data/saquln_uxt_gnac.tar.gz"} |
|
|
|
class HyAsrGrqaser(GeneratorBasedBuilder): |
|
"""Armenian Audio-Transcription Dataset""" |
|
|
|
VERSION = Version("1.0.0") |
|
|
|
def _info(self): |
|
return DatasetInfo( |
|
description="This dataset contains Armenian speech and transcriptions.", |
|
features=Features({ |
|
'audio': Audio(sampling_rate=16_000), |
|
'sentence': Value('string') |
|
}), |
|
supervised_keys=("audio", "sentence"), |
|
) |
|
|
|
def _split_generators(self, dl_manager): |
|
"""Returns SplitGenerators.""" |
|
|
|
|
|
metadata_path = dl_manager.download_and_extract( |
|
"https://huggingface.co/datasets/aburnazy/hy_asr_grqaser/resolve/main/metadata.csv") |
|
data_dir = dl_manager.download_and_extract(_URLS) |
|
print(f"----------data_dir: {data_dir}, \n----------metadata_path: {metadata_path}") |
|
return [ |
|
SplitGenerator( |
|
name=Split.TRAIN, |
|
gen_kwargs={"data_dir": data_dir['train'], "metadata_path": metadata_path} |
|
), |
|
] |
|
|
|
def _generate_examples(self, data_dir, metadata_path): |
|
print(f"data_dir: {data_dir}, metadata_path: {metadata_path}") |
|
"""Yields examples.""" |
|
|
|
metadata = pd.read_csv(metadata_path) |
|
|
|
|
|
for idx, row in metadata.iterrows(): |
|
file_path = os.path.join(data_dir, row['file_name']) |
|
transcription = row['transcription'] |
|
yield idx, { |
|
'audio': {'path': file_path}, |
|
'sentence': transcription |
|
} |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
from datasets import load_dataset |
|
|
|
dataset = load_dataset("aburnazy/hy_asr_grqaser") |
|
print('------------------') |
|
print(dataset["train"][0]) |
|
|