mumospee_small / mumospee_small.py
jayliqinzhang's picture
Upload mumospee_small.py with huggingface_hub
1b9fccd verified
raw
history blame
4.42 kB
import csv
import os
import datasets
# Metadata
_DESCRIPTION = """\
Mumospee is a continuously growing, comprehensive, multilingual dataset across different modalities.
This is the small version include no more 1000 rows.
"""
_LICENSE = "Creative Commons Attribution 4.0 International"
# Defining categories or tags for filtering
_LANGUAGES = ["en", "bg", "de", "es", "it", "ar"] # Example languages
_TAGS = ["CoVoST", "GigaSpeech", "peoples_speech", "Librispeech", "LibriTTS", "Emilia", "MOSEL"]
_SPLITS = ["train", "validation", "test"]
# BuilderConfig class for your dataset
class MumospeeDatasetConfig(datasets.BuilderConfig):
def __init__(self, name, *args, **kwargs):
super().__init__(name=name, *args, **kwargs)
self.language = kwargs.get("language", None) # Add language as a key in the config
self.tag = kwargs.get("tag", None) # Add tag as a key in the config
class MumospeeDataset(datasets.GeneratorBasedBuilder):
"""Your custom dataset for Hugging Face based on the CSV metadata and audio URLs."""
VERSION = datasets.Version("1.0.0")
# Define the available configurations (could be subsets like split or language)
BUILDER_CONFIGS = [MumospeeDatasetConfig(name="default")]
def _info(self):
# Define the features of your dataset
features = datasets.Features({
"path": datasets.Value("string"),
"url": datasets.Value("string"),
"type": datasets.Value("string"),
"duration": datasets.Value("float32"),
"language": datasets.Value("string"),
"transcript": datasets.Value("string"),
"tag": datasets.Value("string"),
"split": datasets.Value("string"),
"license": datasets.Value("string")
})
return datasets.DatasetInfo(
description=_DESCRIPTION,
features=features,
license=_LICENSE,
)
def _split_generators(self, dl_manager):
"""Split the dataset into train, validation, and test."""
# Your dataset might have specific splits like "train", "dev", "test"
splits = ["train", "validation", "test"]
# Define the splits and pass the language and tag filters to _generate_examples
return [
datasets.SplitGenerator(
name=datasets.Split.TRAIN,
gen_kwargs={"split": "train", "languages": _LANGUAGES, "tags": _TAGS}
),
datasets.SplitGenerator(
name=datasets.Split.VALIDATION,
gen_kwargs={"split": "validation", "languages": _LANGUAGES, "tags": _TAGS}
),
datasets.SplitGenerator(
name=datasets.Split.TEST,
gen_kwargs={"split": "test", "languages": _LANGUAGES, "tags": _TAGS}
),
]
def _generate_examples(self, split, dl_manager):
"""Generate examples from the CSV data and audio URLs."""
# Use dl_manager to get the CSV file from the Hugging Face repo
csv_file = dl_manager.download_and_extract("metadata.csv")
# Read the CSV metadata
with open(csv_file, mode='r', encoding='utf-8') as f:
reader = csv.DictReader(f)
for row in reader:
# Filter by tag or language here if necessary
if row["language"] not in _LANGUAGES:
continue # Skip entries with unsupported languages
if row["tag"] not in _TAGS:
continue # Skip entries with unsupported tags
# Construct the full audio path or URL
audio_url = row["url"]
# Prepare the data entry for this example
yield row["path"], {
"audio": audio_url,
"duration": float(row["duration"]),
"language": row["language"],
"transcript": row["transcript"],
"tag": row["tag"],
"split": row["split"],
"license": row["license"]
}
def _download_audio(self, audio_url):
"""Download audio from a URL if needed (you could also implement streaming)."""
# This is an example function for downloading audio if it's needed
# You can integrate this within your data processing pipeline if required
pass