mumospee_small / mumospee_small.py
jayliqinzhang's picture
Upload mumospee_small.py with huggingface_hub
02192be verified
raw
history blame
5.63 kB
import csv
import os
import datasets
import pandas as pd
# Metadata
_DESCRIPTION = """\
Mumospee is a continuously growing, comprehensive, multilingual dataset across different modalities.
This is the small version include no more 1000 rows.
"""
_LICENSE = "Creative Commons Attribution 4.0 International"
# Defining categories or tags for filtering
_LANGUAGES = ["en", "bg", "de", "es", "it", "ar"] # Example languages
_TAGS = ["CoVoST", "GigaSpeech", "peoples_speech", "Librispeech", "LibriTTS", "Emilia", "MOSEL"]
_SPLITS = ["train", "validation", "test"]
# BuilderConfig class for your dataset
class MumospeeDatasetConfig(datasets.BuilderConfig):
def __init__(self, name, *args, **kwargs):
super().__init__(name=name, *args, **kwargs)
self.language = kwargs.get("language", None) # Add language as a key in the config
self.tag = kwargs.get("tag", None) # Add tag as a key in the config
class MumospeeDataset(datasets.GeneratorBasedBuilder):
"""Your custom dataset for Hugging Face based on the CSV metadata and audio URLs."""
VERSION = datasets.Version("1.0.0")
# Define the available configurations (could be subsets like split or language)
BUILDER_CONFIGS = [
MumospeeDatasetConfig(
name="default",
version=datasets.Version("1.0.0"),
description=_DESCRIPTION)
]
DEFAULT_CONFIG_NAME = "default"
def _info(self):
# Define the features of your dataset
features = datasets.Features({
"path": datasets.Value("string"),
"url": datasets.Value("string"),
"type": datasets.Value("string"),
"duration": datasets.Value("float32"),
"language": datasets.Value("string"),
"transcript": datasets.Value("string"),
"tag": datasets.Value("string"),
"split": datasets.Value("string"),
"license": datasets.Value("string")
})
return datasets.DatasetInfo(
description=_DESCRIPTION,
features=features,
license=_LICENSE,
)
def _split_generators(self, dl_manager):
"""Split the dataset into train, validation, and test."""
# Your dataset might have specific splits like "train", "dev", "test"
splits = ["train", "validation", "test"]
csv_path = dl_manager.download_and_extract("dataset.csv")
# Define the splits and pass the language and tag filters to _generate_examples
return [
datasets.SplitGenerator(
name=datasets.Split.TRAIN,
gen_kwargs={"csv_path": csv_path, "split": "train", "language": _LANGUAGES, "tag": _TAGS}
),
datasets.SplitGenerator(
name=datasets.Split.VALIDATION,
gen_kwargs={"csv_path": csv_path, "split": "validation", "language": _LANGUAGES, "tag": _TAGS}
),
datasets.SplitGenerator(
name=datasets.Split.TEST,
gen_kwargs={"csv_path": csv_path, "split": "test", "language": _LANGUAGES, "tag": _TAGS}
),
]
def _generate_examples(self, csv_file, split, language, tag):
"""Generate examples from the CSV data and audio URLs."""
# Use dl_manager to get the CSV file from the Hugging Face repo
# csv_file = dl_manager.download_and_extract("metadata.csv")
# Read the CSV metadata
data = pd.read_csv(csv_file)
# Filter by primary split
data_split = data[data["split"] == split]
if language:
data_split = data_split[data_split["language"] == language]
if tag:
data_split = data_split[data_split["tag"] == tag]
for i, row in data_split.iterrows():
# Construct the full audio path or URL
audio_url = row["url"]
# Prepare the data entry for this example
yield i, {
"path": row["path"],
"audio": audio_url,
"duration": float(row["duration"]),
"language": row["language"],
"transcript": row["transcript"],
"tag": row["tag"],
"split": row["split"],
"license": row["license"]
}
# with open(csv_file, mode='r', encoding='utf-8') as f:
# reader = csv.DictReader(f)
# for row in reader:
# # Filter by tag or language here if necessary
# if row["language"] not in _LANGUAGES:
# continue # Skip entries with unsupported languages
# if row["tag"] not in _TAGS:
# continue # Skip entries with unsupported tags
# # Construct the full audio path or URL
# audio_url = row["url"]
# # Prepare the data entry for this example
# yield row["path"], {
# "audio": audio_url,
# "duration": float(row["duration"]),
# "language": row["language"],
# "transcript": row["transcript"],
# "tag": row["tag"],
# "split": row["split"],
# "license": row["license"]
# }
def _download_audio(self, audio_url):
"""Download audio from a URL if needed (you could also implement streaming)."""
# This is an example function for downloading audio if it's needed
# You can integrate this within your data processing pipeline if required
pass