# Copyright 2020 The HuggingFace Datasets Authors and the current dataset script contributor. | |
# | |
# Licensed under the Apache License, Version 2.0 (the "License"); | |
# you may not use this file except in compliance with the License. | |
# You may obtain a copy of the License at | |
# | |
# http://www.apache.org/licenses/LICENSE-2.0 | |
# | |
# Unless required by applicable law or agreed to in writing, software | |
# distributed under the License is distributed on an "AS IS" BASIS, | |
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | |
# See the License for the specific language governing permissions and | |
# limitations under the License. | |
# TODO: Address all TODOs and remove all explanatory comments | |
"""TODO: Add a description here.""" | |
import os | |
import csv | |
import json | |
import datasets | |
import pandas as pd | |
from scipy.io import wavfile | |
_CITATION = """\ | |
@inproceedings{Raju2022SnowMD, | |
title={Snow Mountain: Dataset of Audio Recordings of The Bible in Low Resource Languages}, | |
author={Kavitha Raju and V. Anjaly and R. Allen Lish and Joel Mathew}, | |
year={2022} | |
} | |
""" | |
_DESCRIPTION = """\ | |
The Snow Mountain dataset contains the audio recordings (in .mp3 format) and the corresponding text of The Bible | |
in 11 Indian languages. The recordings were done in a studio setting by native speakers. Each language has a single | |
speaker in the dataset. Most of these languages are geographically concentrated in the Northern part of India around | |
the state of Himachal Pradesh. Being related to Hindi they all use the Devanagari script for transcription. | |
""" | |
_HOMEPAGE = "https://gitlabdev.bridgeconn.com/software/research/datasets/snow-mountain" | |
_LICENSE = "" | |
_URL = "https://gitlabdev.bridgeconn.com/software/research/datasets/snow-mountain/" | |
_FILES = { | |
"hindi": { | |
"train_500": "data/experiments/hindi/train_500.csv", | |
# "val_500": "data/experiments/hindi/val_500.csv", | |
# "train_1000": "data/experiments/hindi/train_1000.csv", | |
# "val_1000": "data/experiments/hindi/val_1000.csv", | |
# "test_common": "data/experiments/hindi/test_common.csv", | |
}, | |
# "haryanvi": { | |
# "train_500": "data/experiments/haryanvi/train_500.csv", | |
# "val_500": "data/experiments/haryanvi/val_500.csv", | |
# "train_1000": "data/experiments/haryanvi/train_1000.csv", | |
# "val_1000": "data/experiments/haryanvi/val_1000.csv", | |
# "test_common": "data/experiments/haryanvi/test_common.csv", | |
# } | |
} | |
class Test(datasets.GeneratorBasedBuilder): | |
VERSION = datasets.Version("1.0.0") | |
BUILDER_CONFIGS = [ | |
datasets.BuilderConfig(name="hindi", version=VERSION, description="Hindi data"), | |
# datasets.BuilderConfig(name="haryanvi", version=VERSION, description="Haryanvi data"), | |
] | |
DEFAULT_CONFIG_NAME = "hindi" | |
def _info(self): | |
features = datasets.Features( | |
{ | |
# "unnamed": datasets.Value("int64"), | |
"sentence": datasets.Value("string"), | |
"path": datasets.Value("string"), | |
"audio": datasets.Audio(sampling_rate=16_000), | |
} | |
) | |
return datasets.DatasetInfo( | |
description=_DESCRIPTION, | |
features=features, | |
supervised_keys=("sentence", "path"), | |
homepage=_HOMEPAGE, | |
license=_LICENSE, | |
citation=_CITATION, | |
) | |
def _split_generators(self, dl_manager): | |
# urls_to_download = { | |
# "train_500": os.path.join(_URL, _FILES[self.config.name]["train_500"]), | |
# "val_500": os.path.join(_URL, _FILES[self.config.name]["val_500"]), | |
# "train_1000": os.path.join(_URL, _FILES[self.config.name]["train_1000"]), | |
# "val_1000": os.path.join(_URL, _FILES[self.config.name]["val_1000"]), | |
# "test_common": os.path.join(_URL, _FILES[self.config.name]["test_common"]), | |
# } | |
downloaded_files = dl_manager.download(_FILES[self.config.name]) | |
train_splits = [ | |
datasets.SplitGenerator( | |
name="train_500", | |
gen_kwargs={ | |
"filepath": downloaded_files["train_500"], | |
}, | |
), | |
# datasets.SplitGenerator( | |
# name="train_1000", | |
# gen_kwargs={ | |
# "filepath": downloaded_files["train_1000"], | |
# }, | |
# ), | |
] | |
# dev_splits = [ | |
# datasets.SplitGenerator( | |
# name="val_500", | |
# gen_kwargs={ | |
# "filepath": downloaded_files["val_500"], | |
# }, | |
# ), | |
# datasets.SplitGenerator( | |
# name="val_1000", | |
# gen_kwargs={ | |
# "filepath": downloaded_files["val_1000"], | |
# }, | |
# ), | |
# ] | |
# test_splits = [ | |
# datasets.SplitGenerator( | |
# name="test_common", | |
# gen_kwargs={ | |
# "filepath": downloaded_files["test_common"], | |
# }, | |
# ), | |
# ] | |
dev_splits = [] | |
test_splits = [] | |
return train_splits + dev_splits + test_splits | |
def _generate_examples(self, filepath): | |
key = 0 | |
cwd = os.getcwd()+'/' | |
with open(filepath) as f: | |
data_df = pd.read_csv(f,sep=',') | |
transcripts = [] | |
for index,row in data_df.iterrows(): | |
samplerate, audio_data = wavfile.read(row["path"]) | |
yield key, { | |
"sentence": row["sentence"], | |
"path": row["path"], | |
"audio":{"path": row["path"], "bytes": audio_data} | |
} | |
key+=1 | |