|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
"""TODO: Add a description here.""" |
|
|
|
import os |
|
import csv |
|
import json |
|
import pandas as pd |
|
|
|
import datasets |
|
|
|
|
|
_CITATION = """\ |
|
@inproceedings{Raju2022SnowMD, |
|
title={Snow Mountain: Dataset of Audio Recordings of The Bible in Low Resource Languages}, |
|
author={Kavitha Raju and V. Anjaly and R. Allen Lish and Joel Mathew}, |
|
year={2022} |
|
} |
|
|
|
""" |
|
|
|
_DESCRIPTION = """\ |
|
The Snow Mountain dataset contains the audio recordings (in .mp3 format) and the corresponding text of The Bible |
|
in 11 Indian languages. The recordings were done in a studio setting by native speakers. Each language has a single |
|
speaker in the dataset. Most of these languages are geographically concentrated in the Northern part of India around |
|
the state of Himachal Pradesh. Being related to Hindi they all use the Devanagari script for transcription. |
|
""" |
|
|
|
_HOMEPAGE = "https://gitlabdev.bridgeconn.com/software/research/datasets/snow-mountain" |
|
|
|
_LICENSE = "" |
|
|
|
_URL = "https://gitlabdev.bridgeconn.com/software/research/datasets/snow-mountain/" |
|
|
|
_FILES = { |
|
"hindi": { |
|
"train_500": "data/experiments/hindi/train_500.csv", |
|
"val_500": "data/experiments/hindi/val_500.csv", |
|
"train_1000": "data/experiments/hindi/train_1000.csv", |
|
"val_1000": "data/experiments/hindi/val_1000.csv", |
|
"test_common": "data/experiments/hindi/test_common.csv", |
|
}, |
|
"haryanvi": { |
|
"train_500": "data/experiments/haryanvi/train_500.csv", |
|
"val_500": "data/experiments/haryanvi/val_500.csv", |
|
"train_1000": "data/experiments/haryanvi/train_1000.csv", |
|
"val_1000": "data/experiments/haryanvi/val_1000.csv", |
|
"test_common": "data/experiments/haryanvi/test_common.csv", |
|
} |
|
} |
|
|
|
|
|
class Test(datasets.GeneratorBasedBuilder): |
|
|
|
VERSION = datasets.Version("1.0.0") |
|
|
|
BUILDER_CONFIGS = [ |
|
datasets.BuilderConfig(name="hindi", version=VERSION, description="Hindi data"), |
|
datasets.BuilderConfig(name="haryanvi", version=VERSION, description="Haryanvi data"), |
|
] |
|
|
|
DEFAULT_CONFIG_NAME = "hindi" |
|
|
|
def _info(self): |
|
features = datasets.Features( |
|
{ |
|
"unnamed": datasets.Value("int64"), |
|
"sentence": datasets.Value("string"), |
|
"path": datasets.Value("string"), |
|
} |
|
) |
|
return datasets.DatasetInfo( |
|
description=_DESCRIPTION, |
|
features=features, |
|
supervised_keys=("sentence", "path"), |
|
homepage=_HOMEPAGE, |
|
license=_LICENSE, |
|
citation=_CITATION, |
|
) |
|
|
|
def _split_generators(self, dl_manager): |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
downloaded_files = dl_manager.download(_FILES[self.config.name]) |
|
|
|
train_splits = [ |
|
datasets.SplitGenerator( |
|
name="train_500", |
|
gen_kwargs={ |
|
"filepath": downloaded_files["train_500"], |
|
}, |
|
), |
|
datasets.SplitGenerator( |
|
name="train_1000", |
|
gen_kwargs={ |
|
"filepath": downloaded_files["train_1000"], |
|
}, |
|
), |
|
] |
|
|
|
dev_splits = [ |
|
datasets.SplitGenerator( |
|
name="val_500", |
|
gen_kwargs={ |
|
"filepath": downloaded_files["val_500"], |
|
}, |
|
), |
|
datasets.SplitGenerator( |
|
name="val_1000", |
|
gen_kwargs={ |
|
"filepath": downloaded_files["val_1000"], |
|
}, |
|
), |
|
] |
|
|
|
test_splits = [ |
|
datasets.SplitGenerator( |
|
name="test_common", |
|
gen_kwargs={ |
|
"filepath": downloaded_files["test_common"], |
|
}, |
|
), |
|
] |
|
return train_splits + dev_splits + test_splits |
|
|
|
|
|
def _generate_examples(self, filepath): |
|
print("**********", filepath) |
|
with open(filepath) as f: |
|
data_df = pd.read_csv(f,sep='\t') |
|
for index,row in data_df.iterrows(): |
|
print(row) |
|
print(data_df.head()) |
|
yield data_df["path"], { |
|
"sentence": data_df["sentence"], |
|
"path": data_df["path"], |
|
} |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|