Datasets:
License:
File size: 5,634 Bytes
05da4fa 734c8eb 927b3bd 734c8eb 05da4fa 734c8eb 05da4fa 1b9fccd 05da4fa 1b9fccd 05da4fa 1b9fccd 05da4fa 927b3bd 734c8eb 05da4fa 734c8eb 05da4fa 734c8eb 05da4fa 927b3bd 6d32bc3 927b3bd 05da4fa 734c8eb 927b3bd 734c8eb 05da4fa 927b3bd 734c8eb 05da4fa 927b3bd 734c8eb 05da4fa 7d8e581 05da4fa 7d8e581 05da4fa 927b3bd 05da4fa 1b9fccd |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 |
import csv
import os
import datasets
import pandas as pd
# Metadata
_DESCRIPTION = """\
Mumospee is a continuously growing, comprehensive, multilingual dataset across different modalities.
This is the small version include no more 1000 rows.
"""
_LICENSE = "Creative Commons Attribution 4.0 International"
# Defining categories or tags for filtering
_LANGUAGES = ["en", "bg", "de", "es", "it", "ar"] # Example languages
_TAGS = ["CoVoST", "GigaSpeech", "peoples_speech", "Librispeech", "LibriTTS", "Emilia", "MOSEL"]
_SPLITS = ["train", "validation", "test"]
# BuilderConfig class for your dataset
class MumospeeDatasetConfig(datasets.BuilderConfig):
def __init__(self, name, *args, **kwargs):
super().__init__(name=name, *args, **kwargs)
self.language = kwargs.get("language", None) # Add language as a key in the config
self.tag = kwargs.get("tag", None) # Add tag as a key in the config
class MumospeeDataset(datasets.GeneratorBasedBuilder):
"""Your custom dataset for Hugging Face based on the CSV metadata and audio URLs."""
VERSION = datasets.Version("1.0.0")
# Define the available configurations (could be subsets like split or language)
BUILDER_CONFIGS = [
MumospeeDatasetConfig(
name="default",
version=datasets.Version("1.0.0"),
description=_DESCRIPTION)
]
DEFAULT_CONFIG_NAME = "default"
def _info(self):
# Define the features of your dataset
features = datasets.Features({
"path": datasets.Value("string"),
"url": datasets.Value("string"),
"type": datasets.Value("string"),
"duration": datasets.Value("float32"),
"language": datasets.Value("string"),
"transcript": datasets.Value("string"),
"tag": datasets.Value("string"),
"split": datasets.Value("string"),
"license": datasets.Value("string")
})
return datasets.DatasetInfo(
description=_DESCRIPTION,
features=features,
license=_LICENSE,
)
def _split_generators(self, dl_manager):
"""Split the dataset into train, validation, and test."""
# Your dataset might have specific splits like "train", "dev", "test"
splits = ["train", "validation", "test"]
csv_path = dl_manager.download_and_extract("dataset.csv")
# Define the splits and pass the language and tag filters to _generate_examples
return [
datasets.SplitGenerator(
name=datasets.Split.TRAIN,
gen_kwargs={"filepath": csv_path, "split": "train", "language": _LANGUAGES, "tag": _TAGS}
),
datasets.SplitGenerator(
name=datasets.Split.VALIDATION,
gen_kwargs={"filepath": csv_path, "split": "validation", "language": _LANGUAGES, "tag": _TAGS}
),
datasets.SplitGenerator(
name=datasets.Split.TEST,
gen_kwargs={"filepath": csv_path, "split": "test", "language": _LANGUAGES, "tag": _TAGS}
),
]
def _generate_examples(self, csv_file, split, language, tag):
"""Generate examples from the CSV data and audio URLs."""
# Use dl_manager to get the CSV file from the Hugging Face repo
# csv_file = dl_manager.download_and_extract("metadata.csv")
# Read the CSV metadata
data = pd.read_csv(csv_file)
# Filter by primary split
data_split = data[data["split"] == split]
if language:
data_split = data_split[data_split["language"] == language]
if tag:
data_split = data_split[data_split["tag"] == tag]
for i, row in data_split.iterrows():
# Construct the full audio path or URL
audio_url = row["url"]
# Prepare the data entry for this example
yield i, {
"path": row["path"],
"audio": audio_url,
"duration": float(row["duration"]),
"language": row["language"],
"transcript": row["transcript"],
"tag": row["tag"],
"split": row["split"],
"license": row["license"]
}
# with open(csv_file, mode='r', encoding='utf-8') as f:
# reader = csv.DictReader(f)
# for row in reader:
# # Filter by tag or language here if necessary
# if row["language"] not in _LANGUAGES:
# continue # Skip entries with unsupported languages
# if row["tag"] not in _TAGS:
# continue # Skip entries with unsupported tags
# # Construct the full audio path or URL
# audio_url = row["url"]
# # Prepare the data entry for this example
# yield row["path"], {
# "audio": audio_url,
# "duration": float(row["duration"]),
# "language": row["language"],
# "transcript": row["transcript"],
# "tag": row["tag"],
# "split": row["split"],
# "license": row["license"]
# }
def _download_audio(self, audio_url):
"""Download audio from a URL if needed (you could also implement streaming)."""
# This is an example function for downloading audio if it's needed
# You can integrate this within your data processing pipeline if required
pass
|