|
import os |
|
import glob |
|
import datasets |
|
import pandas as pd |
|
from sklearn.model_selection import train_test_split |
|
|
|
_DESCRIPTION = """\ |
|
This new dataset is designed to solve this great NLP task and is crafted with a lot of care. |
|
""" |
|
|
|
_CITATION = """\ |
|
""" |
|
_CHANNEL_CONFIGS = sorted([ |
|
"CHANNEL0", "CHANNEL1", "CHANNEL2" |
|
]) |
|
|
|
_GENDER_CONFIGS = sorted(["F", "M"]) |
|
|
|
_RACE_CONFIGS = sorted(["CHINESE", "MALAY", "INDIAN", "OTHERS"]) |
|
|
|
_HOMEPAGE = "https://huggingface.co/indonesian-nlp/librivox-indonesia" |
|
|
|
_LICENSE = "https://creativecommons.org/publicdomain/zero/1.0/" |
|
|
|
_PATH_TO_DATA = r'C:\Users\calic\Downloads\huggingface-dataset\imda-dataset\IMDA - National Speech Corpus\PART1' |
|
|
|
|
|
class Minds14Config(datasets.BuilderConfig): |
|
"""BuilderConfig for xtreme-s""" |
|
|
|
def __init__( |
|
self, channel, gender, race, description, homepage, path_to_data |
|
): |
|
super(Minds14Config, self).__init__( |
|
name=channel+gender+race, |
|
version=datasets.Version("1.0.0", ""), |
|
description=self.description, |
|
) |
|
self.channel = channel |
|
self.gender = gender |
|
self.race = race |
|
self.description = description |
|
self.homepage = homepage |
|
self.path_to_data = path_to_data |
|
|
|
|
|
def _build_config(channel, gender, race): |
|
return Minds14Config( |
|
channel=channel, |
|
gender=gender, |
|
race=race, |
|
description=_DESCRIPTION, |
|
homepage=_HOMEPAGE, |
|
path_to_data=_PATH_TO_DATA, |
|
) |
|
|
|
|
|
class NewDataset(datasets.GeneratorBasedBuilder): |
|
"""TODO: Short description of my dataset.""" |
|
|
|
VERSION = datasets.Version("1.1.0") |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
BUILDER_CONFIGS = [] |
|
for channel in _CHANNEL_CONFIGS + ["all"]: |
|
for gender in _GENDER_CONFIGS + ["all"]: |
|
for race in _RACE_CONFIGS + ["all"]: |
|
BUILDER_CONFIGS.append(_build_config(channel, gender, race)) |
|
|
|
|
|
DEFAULT_CONFIG_NAME = "allallall" |
|
|
|
def _info(self): |
|
|
|
task_templates = None |
|
|
|
features = datasets.Features( |
|
{ |
|
"audio": datasets.features.Audio(sampling_rate=16000), |
|
"transcript": datasets.Value("string"), |
|
"mic": datasets.Value("string"), |
|
"audio_name": datasets.Value("string"), |
|
"gender": datasets.Value("string"), |
|
"race": datasets.Value("string"), |
|
} |
|
) |
|
|
|
return datasets.DatasetInfo( |
|
|
|
description=_DESCRIPTION, |
|
|
|
features=features, |
|
|
|
|
|
supervised_keys=("audio", "transcript"), |
|
|
|
homepage=_HOMEPAGE, |
|
|
|
license=_LICENSE, |
|
|
|
citation=_CITATION, |
|
task_templates=task_templates, |
|
) |
|
|
|
def _split_generators(self, dl_manager): |
|
|
|
|
|
mics = ( |
|
_CHANNEL_CONFIGS |
|
if self.config.channel == "all" |
|
else [self.config.channel] |
|
) |
|
|
|
gender = ( |
|
_GENDER_CONFIGS |
|
if self.config.gender == "all" |
|
else [self.config.gender] |
|
) |
|
|
|
race = ( |
|
_RACE_CONFIGS |
|
if self.config.race == "all" |
|
else [self.config.race] |
|
) |
|
|
|
|
|
|
|
train_speaker_ids = [] |
|
test_speaker_ids = [] |
|
path_to_speaker = os.path.join(self.config.path_to_data, "DOC", "Speaker Information (Part 1).XLSX") |
|
speaker_df = pd.read_excel(path_to_speaker, dtype={'SCD/PART1': object}) |
|
for g in gender: |
|
for r in race: |
|
X = speaker_df[(speaker_df["ACC"]==r) & (speaker_df["SEX"]==g)] |
|
X_train, X_test = train_test_split(X, test_size=0.3, random_state=42, shuffle=True) |
|
train_speaker_ids.extend(X_train["SCD/PART1"]) |
|
test_speaker_ids.extend(X_test["SCD/PART1"]) |
|
|
|
|
|
|
|
|
|
return [ |
|
datasets.SplitGenerator( |
|
name=datasets.Split.TRAIN, |
|
gen_kwargs={ |
|
"path_to_data": self.config.path_to_data, |
|
"speaker_metadata":speaker_df, |
|
|
|
"speaker_ids":["0001"], |
|
"mics": mics, |
|
"dl_manager": dl_manager |
|
}, |
|
), |
|
datasets.SplitGenerator( |
|
name=datasets.Split.TEST, |
|
gen_kwargs={ |
|
"path_to_data": self.config.path_to_data, |
|
"speaker_metadata":speaker_df, |
|
|
|
"speaker_ids": ["0003"], |
|
"mics": mics, |
|
"dl_manager": dl_manager |
|
}, |
|
), |
|
] |
|
|
|
|
|
def _generate_examples( |
|
self, |
|
path_to_data, |
|
speaker_metadata, |
|
speaker_ids, |
|
mics, |
|
dl_manager |
|
): |
|
id_ = 0 |
|
for mic in mics: |
|
for speaker in speaker_ids: |
|
|
|
metadata_path = os.path.join(path_to_data, "DATA", mic, "SCRIPT", mic[-1]+speaker+'*.TXT') |
|
script_list = glob.glob(metadata_path) |
|
d = {} |
|
for script in script_list: |
|
line_num = 0 |
|
with open(script, encoding='utf-8-sig') as f: |
|
for line in f: |
|
if line_num == 0: |
|
key = line.split("\t")[0] |
|
line_num += 1 |
|
elif line_num == 1: |
|
d[key] = line.strip() |
|
line_num -= 1 |
|
|
|
archive_path = os.path.join(path_to_data, "DATA", mic, "WAVE", "SPEAKER"+speaker+'.zip') |
|
|
|
if os.path.exists(archive_path): |
|
audio_files = dl_manager.iter_archive(archive_path) |
|
for path, f in audio_files: |
|
|
|
result = {} |
|
full_path = os.path.join(archive_path, path) if archive_path else path |
|
result["audio"] = {"path": full_path, "bytes": f.read()} |
|
result["transcript"] = d[f.name[-13:-4]] |
|
result["audio_name"] = path |
|
result["mic"] = mic |
|
metadata_row = speaker_metadata.loc[speaker_metadata["SCD/PART1"]==speaker].iloc[0] |
|
result["gender"]=metadata_row["SEX"] |
|
result["race"]=metadata_row["ACC"] |
|
yield id_, result |
|
id_ += 1 |