Datasets:
License:
import csv | |
import os | |
import datasets | |
import pandas as pd | |
# Metadata | |
_DESCRIPTION = """\ | |
Mumospee is a continuously growing, comprehensive, multilingual dataset across different modalities. | |
This is the small version include no more 1000 rows. | |
""" | |
_LICENSE = "Creative Commons Attribution 4.0 International" | |
_LANGUAGES = ["en", "bg", "de", "ar"] | |
_TAGS = ["CoVoST", "GigaSpeech", "peoples_speech", "Librispeech", "LibriTTS", "Emilia", "MOSEL"] | |
_SPLITS = ["train", "validation", "test"] | |
# BuilderConfig class for your dataset | |
class MumospeeDatasetConfig(datasets.BuilderConfig): | |
def __init__(self, split, language=None, tag=None, **kwargs): | |
super().__init__(**kwargs) | |
self.split=split | |
self.language = language | |
self.tag = tag | |
class MumospeeDataset(datasets.GeneratorBasedBuilder): | |
VERSION = datasets.Version("1.0.0") | |
# Define the available configurations (could be subsets like split or language) | |
BUILDER_CONFIGS = [ | |
MumospeeDatasetConfig( | |
name="default", | |
version=datasets.Version("1.0.0"), | |
description=_DESCRIPTION, | |
split="train", | |
language=None, | |
tag=None | |
) | |
] | |
DEFAULT_CONFIG_NAME = "default" | |
def _info(self): | |
# Define the features of your dataset | |
features = datasets.Features({ | |
"path": datasets.Value("string"), | |
"url": datasets.Value("string"), | |
"type": datasets.Value("string"), | |
"duration": datasets.Value("string"), | |
"language": datasets.Value("string"), | |
"transcript": datasets.Value("string"), | |
"tag": datasets.Value("string"), | |
"split": datasets.Value("string"), | |
"license": datasets.Value("string") | |
}) | |
return datasets.DatasetInfo( | |
description=_DESCRIPTION, | |
features=features, | |
license=_LICENSE, | |
) | |
def _adapt_args(self, arg, accepted_arg): | |
""" | |
Adpat the input and make sure it outs as list | |
and all the elements within the list are accpeted. | |
""" | |
if arg: | |
if isinstance(arg, str): | |
adapted_arg = [arg] | |
else: | |
adapted_arg = arg | |
for aa in adapted_arg: | |
if aa not in accepted_arg: | |
raise ValueError(f"Invalid input: '{aa}'. Accepted values are: {', '.join(accepted_arg)}.") | |
else: | |
adapted_arg = accepted_arg | |
return adapted_arg | |
def _split_generators(self, dl_manager): | |
"""Split the dataset into train, validation, and test.""" | |
# Your dataset might have specific splits like "train", "dev", "test" | |
splits = ["train", "validation", "test"] | |
csv_path = dl_manager.download_and_extract("dataset.csv") | |
# ===To download the url | |
# Load CSV to retrieve URLs for audio files | |
# data = pd.read_csv(csv_path) | |
# url_list = data["url"].tolist() # List of all URLs in the CSV file | |
# url_list = list(set(url_list)) | |
# # Download all files listed in the 'url' column and store the local paths | |
# downloaded_files = dl_manager.download(url_list) | |
# # Add the downloaded file paths to the DataFrame to make them accessible in `_generate_examples` | |
# data["local_path"] = downloaded_files | |
#=== | |
# Define the splits and pass the language and tag filters to _generate_examples | |
return [ | |
datasets.SplitGenerator( | |
name=datasets.Split.TRAIN, | |
gen_kwargs={"filepath": csv_path} | |
), | |
datasets.SplitGenerator( | |
name=datasets.Split.VALIDATION, | |
gen_kwargs={"filepath": csv_path} | |
), | |
datasets.SplitGenerator( | |
name=datasets.Split.TEST, | |
gen_kwargs={"filepath": csv_path} | |
), | |
] | |
def _generate_examples(self, filepath): | |
data = pd.read_csv(filepath) | |
split = self.config.split | |
language = self.config.language | |
tag = self.config.tag | |
print(f'Return {split} dataset in langauge of {language}, originally from {tag}.') | |
data_split = data[data["split"] == split] | |
language_list = self._adapt_args(language, _LANGUAGES) | |
tag_list = self._adapt_args(tag, _TAGS) | |
print(f"Following langauges will be loaded: {language_list}") | |
print(f"Following dataset will be loaded: {tag_list}") | |
data_split = data_split[data_split["language"].isin(language_list)] | |
data_split = data_split[data_split["tag"].isin(tag_list)] | |
if data_split.empty: | |
print(f"No data found for split='{split}', language='{language}', tag='{tag}'. Returning None.") | |
return # This exits the generator without yielding any examples | |
else: | |
for i, row in data_split.iterrows(): | |
yield i, { | |
"path": row["path"], | |
#"local_path": row["local_path"], | |
"url": row["url"], | |
"type": row["type"], | |
"duration": float(row["duration"]), | |
"language": row["language"], | |
"transcript": row["transcript"], | |
"tag": row["tag"], | |
"split": row["split"], | |
"license": row["license"] | |
} | |
def _download_audio(self, audio_url): | |
"""Download audio from a URL if needed (you could also implement streaming).""" | |
# This is an example function for downloading audio if it's needed | |
# You can integrate this within your data processing pipeline if required | |
pass | |