mnazari commited on
Commit
8434d7c
1 Parent(s): 3428b9a

Initial commit with my dataset

Browse files
.gitattributes ADDED
@@ -0,0 +1,55 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ *.7z filter=lfs diff=lfs merge=lfs -text
2
+ *.arrow filter=lfs diff=lfs merge=lfs -text
3
+ *.bin filter=lfs diff=lfs merge=lfs -text
4
+ *.bz2 filter=lfs diff=lfs merge=lfs -text
5
+ *.ckpt filter=lfs diff=lfs merge=lfs -text
6
+ *.ftz filter=lfs diff=lfs merge=lfs -text
7
+ *.gz filter=lfs diff=lfs merge=lfs -text
8
+ *.h5 filter=lfs diff=lfs merge=lfs -text
9
+ *.joblib filter=lfs diff=lfs merge=lfs -text
10
+ *.lfs.* filter=lfs diff=lfs merge=lfs -text
11
+ *.lz4 filter=lfs diff=lfs merge=lfs -text
12
+ *.mlmodel filter=lfs diff=lfs merge=lfs -text
13
+ *.model filter=lfs diff=lfs merge=lfs -text
14
+ *.msgpack filter=lfs diff=lfs merge=lfs -text
15
+ *.npy filter=lfs diff=lfs merge=lfs -text
16
+ *.npz filter=lfs diff=lfs merge=lfs -text
17
+ *.onnx filter=lfs diff=lfs merge=lfs -text
18
+ *.ot filter=lfs diff=lfs merge=lfs -text
19
+ *.parquet filter=lfs diff=lfs merge=lfs -text
20
+ *.pb filter=lfs diff=lfs merge=lfs -text
21
+ *.pickle filter=lfs diff=lfs merge=lfs -text
22
+ *.pkl filter=lfs diff=lfs merge=lfs -text
23
+ *.pt filter=lfs diff=lfs merge=lfs -text
24
+ *.pth filter=lfs diff=lfs merge=lfs -text
25
+ *.rar filter=lfs diff=lfs merge=lfs -text
26
+ *.safetensors filter=lfs diff=lfs merge=lfs -text
27
+ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
28
+ *.tar.* filter=lfs diff=lfs merge=lfs -text
29
+ *.tar filter=lfs diff=lfs merge=lfs -text
30
+ *.tflite filter=lfs diff=lfs merge=lfs -text
31
+ *.tgz filter=lfs diff=lfs merge=lfs -text
32
+ *.wasm filter=lfs diff=lfs merge=lfs -text
33
+ *.xz filter=lfs diff=lfs merge=lfs -text
34
+ *.zip filter=lfs diff=lfs merge=lfs -text
35
+ *.zst filter=lfs diff=lfs merge=lfs -text
36
+ *tfevents* filter=lfs diff=lfs merge=lfs -text
37
+ # Audio files - uncompressed
38
+ *.pcm filter=lfs diff=lfs merge=lfs -text
39
+ *.sam filter=lfs diff=lfs merge=lfs -text
40
+ *.raw filter=lfs diff=lfs merge=lfs -text
41
+ # Audio files - compressed
42
+ *.aac filter=lfs diff=lfs merge=lfs -text
43
+ *.flac filter=lfs diff=lfs merge=lfs -text
44
+ *.mp3 filter=lfs diff=lfs merge=lfs -text
45
+ *.ogg filter=lfs diff=lfs merge=lfs -text
46
+ *.wav filter=lfs diff=lfs merge=lfs -text
47
+ # Image files - uncompressed
48
+ *.bmp filter=lfs diff=lfs merge=lfs -text
49
+ *.gif filter=lfs diff=lfs merge=lfs -text
50
+ *.png filter=lfs diff=lfs merge=lfs -text
51
+ *.tiff filter=lfs diff=lfs merge=lfs -text
52
+ # Image files - compressed
53
+ *.jpg filter=lfs diff=lfs merge=lfs -text
54
+ *.jpeg filter=lfs diff=lfs merge=lfs -text
55
+ *.webp filter=lfs diff=lfs merge=lfs -text
_common_voice.py ADDED
@@ -0,0 +1,182 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2022 The HuggingFace Datasets Authors and the current dataset script contributor.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ """ Common Voice Dataset"""
16
+
17
+
18
+ import csv
19
+ import os
20
+ import json
21
+
22
+ import datasets
23
+ from datasets.utils.py_utils import size_str
24
+ from tqdm import tqdm
25
+
26
+
27
+ _CITATION = """\
28
+ @inproceedings{commonvoice:2020,
29
+ author = {Ardila, R. and Branson, M. and Davis, K. and Henretty, M. and Kohler, M. and Meyer, J. and Morais, R. and Saunders, L. and Tyers, F. M. and Weber, G.},
30
+ title = {Common Voice: A Massively-Multilingual Speech Corpus},
31
+ booktitle = {Proceedings of the 12th Conference on Language Resources and Evaluation (LREC 2020)},
32
+ pages = {4211--4215},
33
+ year = 2020
34
+ }
35
+ """
36
+
37
+ _HOMEPAGE = "https://commonvoice.mozilla.org/en/datasets"
38
+
39
+ _LICENSE = "https://creativecommons.org/publicdomain/zero/1.0/"
40
+
41
+ # TODO: change "streaming" to "main" after merge!
42
+ _BASE_URL = "https://huggingface.co/datasets/mozilla-foundation/common_voice_13_0/resolve/main/"
43
+
44
+ _AUDIO_URL = _BASE_URL + "audio/{lang}/{split}.tar"
45
+
46
+ _TRANSCRIPT_URL = _BASE_URL + "transcript/{lang}/{split}.tsv"
47
+
48
+ class CommonVoiceConfig(datasets.BuilderConfig):
49
+ """BuilderConfig for CommonVoice."""
50
+
51
+ def __init__(self, name, version, **kwargs):
52
+ self.language = kwargs.pop("language", None)
53
+ description = (
54
+ f"This is a test. "
55
+ )
56
+ super(CommonVoiceConfig, self).__init__(
57
+ name=name,
58
+ version=datasets.Version(version),
59
+ description=description,
60
+ **kwargs,
61
+ )
62
+
63
+
64
+ class CommonVoice(datasets.GeneratorBasedBuilder):
65
+ DEFAULT_WRITER_BATCH_SIZE = 1000
66
+
67
+ BUILDER_CONFIGS = [
68
+ CommonVoiceConfig(
69
+ name=lang,
70
+ version=STATS["version"],
71
+ language=LANGUAGES[lang],
72
+ release_date=STATS["date"],
73
+ num_clips=lang_stats["clips"],
74
+ num_speakers=lang_stats["users"],
75
+ validated_hr=float(lang_stats["validHrs"]) if lang_stats["validHrs"] else None,
76
+ total_hr=float(lang_stats["totalHrs"]) if lang_stats["totalHrs"] else None,
77
+ size_bytes=int(lang_stats["size"]) if lang_stats["size"] else None,
78
+ )
79
+ for lang, lang_stats in STATS["locales"].items()
80
+ ]
81
+
82
+ def _info(self):
83
+ total_languages = len(STATS["locales"])
84
+ total_valid_hours = STATS["totalValidHrs"]
85
+ description = (
86
+ "Common Voice is Mozilla's initiative to help teach machines how real people speak. "
87
+ f"The dataset currently consists of {total_valid_hours} validated hours of speech "
88
+ f" in {total_languages} languages, but more voices and languages are always added."
89
+ )
90
+ features = datasets.Features(
91
+ {
92
+ "client_id": datasets.Value("string"),
93
+ "path": datasets.Value("string"),
94
+ "audio": datasets.features.Audio(sampling_rate=48_000),
95
+ "sentence": datasets.Value("string"),
96
+ "up_votes": datasets.Value("int64"),
97
+ "down_votes": datasets.Value("int64"),
98
+ "age": datasets.Value("string"),
99
+ "gender": datasets.Value("string"),
100
+ "accent": datasets.Value("string"),
101
+ "locale": datasets.Value("string"),
102
+ "segment": datasets.Value("string"),
103
+ "variant": datasets.Value("string"),
104
+ }
105
+ )
106
+
107
+ return datasets.DatasetInfo(
108
+ description=description,
109
+ features=features,
110
+ supervised_keys=None,
111
+ homepage=_HOMEPAGE,
112
+ license=_LICENSE,
113
+ citation=_CITATION,
114
+ version=self.config.version,
115
+ )
116
+
117
+ def _split_generators(self, dl_manager):
118
+ lang = self.config.name
119
+ n_shards_path = dl_manager.download_and_extract(_N_SHARDS_URL)
120
+ with open(n_shards_path, encoding="utf-8") as f:
121
+ n_shards = json.load(f)
122
+
123
+ audio_urls = {}
124
+ splits = ("train", "dev", "test", "other", "invalidated")
125
+ for split in splits:
126
+ audio_urls[split] = [
127
+ _AUDIO_URL.format(lang=lang, split=split, shard_idx=i) for i in range(n_shards[lang][split])
128
+ ]
129
+ archive_paths = dl_manager.download(audio_urls)
130
+ local_extracted_archive_paths = dl_manager.extract(archive_paths) if not dl_manager.is_streaming else {}
131
+
132
+ meta_urls = {split: _TRANSCRIPT_URL.format(lang=lang, split=split) for split in splits}
133
+ meta_paths = dl_manager.download_and_extract(meta_urls)
134
+
135
+ split_generators = []
136
+ split_names = {
137
+ "train": datasets.Split.TRAIN,
138
+ "dev": datasets.Split.VALIDATION,
139
+ "test": datasets.Split.TEST,
140
+ }
141
+ for split in splits:
142
+ split_generators.append(
143
+ datasets.SplitGenerator(
144
+ name=split_names.get(split, split),
145
+ gen_kwargs={
146
+ "local_extracted_archive_paths": local_extracted_archive_paths.get(split),
147
+ "archives": [dl_manager.iter_archive(path) for path in archive_paths.get(split)],
148
+ "meta_path": meta_paths[split],
149
+ },
150
+ ),
151
+ )
152
+
153
+ return split_generators
154
+
155
+ def _generate_examples(self, local_extracted_archive_paths, archives, meta_path):
156
+ data_fields = list(self._info().features.keys())
157
+ metadata = {}
158
+ with open(meta_path, encoding="utf-8") as f:
159
+ reader = csv.DictReader(f, delimiter="\t", quoting=csv.QUOTE_NONE)
160
+ for row in tqdm(reader, desc="Reading metadata..."):
161
+ if not row["path"].endswith(".mp3"):
162
+ row["path"] += ".mp3"
163
+ # accent -> accents in CV 8.0
164
+ if "accents" in row:
165
+ row["accent"] = row["accents"]
166
+ del row["accents"]
167
+ # if data is incomplete, fill with empty values
168
+ for field in data_fields:
169
+ if field not in row:
170
+ row[field] = ""
171
+ metadata[row["path"]] = row
172
+
173
+ for i, audio_archive in enumerate(archives):
174
+ for path, file in audio_archive:
175
+ _, filename = os.path.split(path)
176
+ if filename in metadata:
177
+ result = dict(metadata[filename])
178
+ # set the audio feature and the path to the extracted file
179
+ path = os.path.join(local_extracted_archive_paths[i], path) if local_extracted_archive_paths else path
180
+ result["audio"] = {"path": path, "bytes": file.read()}
181
+ result["path"] = path
182
+ yield path, result
_giga_speech.py ADDED
@@ -0,0 +1,329 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2020 The HuggingFace Datasets Authors and the current dataset script contributor.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+ """
15
+ GigaSpeech is an evolving, multi-domain English speech recognition corpus with 10,000 hours of high quality
16
+ labeled audio suitable for supervised training, and 40,000 hours of total audio suitable for semi-supervised
17
+ and unsupervised training. Around 40,000 hours of transcribed audio is first collected from audiobooks, podcasts
18
+ and YouTube, covering both read and spontaneous speaking styles, and a variety of topics, such as arts, science,
19
+ sports, etc. A new forced alignment and segmentation pipeline is proposed to create sentence segments suitable
20
+ for speech recognition training, and to filter out segments with low-quality transcription. For system training,
21
+ GigaSpeech provides five subsets of different sizes, 10h, 250h, 1000h, 2500h, and 10000h.
22
+ For our 10,000-hour XL training subset, we cap the word error rate at 4% during the filtering/validation stage,
23
+ and for all our other smaller training subsets, we cap it at 0%. The DEV and TEST evaluation sets, on the other hand,
24
+ are re-processed by professional human transcribers to ensure high transcription quality.
25
+ """
26
+
27
+ import csv
28
+ import os
29
+
30
+ import datasets
31
+
32
+ # _CITATION = """\
33
+ # """
34
+
35
+ # _DESCRIPTION = """\
36
+ # """
37
+
38
+ # _HOMEPAGE = "https://github.com/SpeechColab/GigaSpeech"
39
+
40
+ # _LICENSE = "https://creativecommons.org/publicdomain/zero/1.0/"
41
+
42
+ # _CATEGORIES = (
43
+ # "People and Blogs",
44
+ # "Business",
45
+ # "Nonprofits and Activism",
46
+ # "Crime",
47
+ # "History",
48
+ # "Pets and Animals",
49
+ # "News and Politics",
50
+ # "Travel and Events",
51
+ # "Kids and Family",
52
+ # "Leisure",
53
+ # "N/A",
54
+ # "Comedy",
55
+ # "News and Politics",
56
+ # "Sports",
57
+ # "Arts",
58
+ # "Science and Technology",
59
+ # "Autos and Vehicles",
60
+ # "Science and Technology",
61
+ # "People and Blogs",
62
+ # "Music",
63
+ # "Society and Culture",
64
+ # "Education",
65
+ # "Howto and Style",
66
+ # "Film and Animation",
67
+ # "Gaming",
68
+ # "Entertainment",
69
+ # "Travel and Events",
70
+ # "Health and Fitness",
71
+ # "audiobook",
72
+ # )
73
+
74
+ # _SOURCES = ("audiobook", "podcast", "youtube")
75
+
76
+ # _SUBSETS = ("xs", "s", "m", "l", "xl")
77
+
78
+ # _BASE_DATA_URL = "https://huggingface.co/datasets/speechcolab/gigaspeech/resolve/main/data/"
79
+
80
+ _AUDIO_ARCHIVE_URL = _BASE_DATA_URL + "audio/{subset}_files{is_additional}/{subset}_chunks_{archive_id:04}.tar.gz"
81
+
82
+ _META_URL = _BASE_DATA_URL + "metadata/{subset}_metadata{is_additional}/{subset}_chunks_{archive_id:04}_metadata.csv"
83
+
84
+ _N_ARCHIVES_URL = _BASE_DATA_URL + "{subset}_n_archives{is_additional}.txt"
85
+
86
+
87
+ class GigaspeechConfig(datasets.BuilderConfig):
88
+ """BuilderConfig for Gigaspeech."""
89
+
90
+ def __init__(self, name, *args, **kwargs):
91
+ """BuilderConfig for Gigaspeech
92
+ """
93
+ super().__init__(name=name, *args, **kwargs)
94
+ # larger subsets are supersets of smaller subsets,
95
+ # if we want to download "m", we need to download "xs" and "s" data too.
96
+ # so if name == "m", self.subsets_to_download will be ("xs", "s", "m")
97
+ if name not in {"dev", "test"}:
98
+ self.subsets_to_download = _SUBSETS[:_SUBSETS.index(name) + 1]
99
+ else:
100
+ self.subsets_to_download = (name,)
101
+
102
+
103
+ class Gigaspeech(datasets.GeneratorBasedBuilder):
104
+ """
105
+ GigaSpeech is an evolving, multi-domain English speech recognition corpus with 10,000 hours of high quality
106
+ labeled audio suitable for supervised training, and 40,000 hours of total audio suitable for semi-supervised
107
+ and unsupervised training (this implementation contains only labelled data for now).
108
+ Around 40,000 hours of transcribed audio is first collected from audiobooks, podcasts
109
+ and YouTube, covering both read and spontaneous speaking styles, and a variety of topics, such as arts, science,
110
+ sports, etc. A new forced alignment and segmentation pipeline is proposed to create sentence segments suitable
111
+ for speech recognition training, and to filter out segments with low-quality transcription. For system training,
112
+ GigaSpeech provides five subsets of different sizes, 10h, 250h, 1000h, 2500h, and 10000h.
113
+ For our 10,000-hour XL training subset, we cap the word error rate at 4% during the filtering/validation stage,
114
+ and for all our other smaller training subsets, we cap it at 0%. The DEV and TEST evaluation sets, on the other hand,
115
+ are re-processed by professional human transcribers to ensure high transcription quality.
116
+ """
117
+
118
+ VERSION = datasets.Version("1.0.0")
119
+
120
+ BUILDER_CONFIGS = [GigaspeechConfig(name=subset) for subset in _SUBSETS + ("dev", "test")]
121
+
122
+ DEFAULT_WRITER_BATCH_SIZE = 128
123
+
124
+ def _info(self):
125
+ features = datasets.Features(
126
+ {
127
+ "segment_id": datasets.Value("string"),
128
+ "speaker": datasets.Value("string"),
129
+ "text": datasets.Value("string"),
130
+ "audio": datasets.Audio(sampling_rate=16_000),
131
+ "begin_time": datasets.Value("float32"),
132
+ "end_time": datasets.Value("float32"),
133
+ "audio_id": datasets.Value("string"),
134
+ "title": datasets.Value("string"),
135
+ "url": datasets.Value("string"),
136
+ "source": datasets.ClassLabel(names=_SOURCES),
137
+ "category": datasets.ClassLabel(names=_CATEGORIES),
138
+ "original_full_path": datasets.Value("string"), # relative path to full audio in original data dirs
139
+ }
140
+ )
141
+ return datasets.DatasetInfo(
142
+ description=_DESCRIPTION,
143
+ features=features,
144
+ homepage=_HOMEPAGE,
145
+ license=_LICENSE,
146
+ citation=_CITATION,
147
+ )
148
+
149
+ def _is_additional_data(self, name):
150
+ if name in {"s", "m", "l", "xl"}:
151
+ return "_additional"
152
+ return ""
153
+
154
+ @property
155
+ def _splits_to_subsets(self):
156
+ return {
157
+ "train": self.config.subsets_to_download,
158
+ "dev": ["dev"],
159
+ "test": ["test"]
160
+ }
161
+
162
+ def _read_n_archives(self, n_archives_path):
163
+ with open(n_archives_path, encoding="utf-8") as f:
164
+ return int(f.read().strip())
165
+
166
+ def _split_generators(self, dl_manager):
167
+ splits_to_subsets = self._splits_to_subsets
168
+ if self.config.name in {"dev", "test"}:
169
+ splits = (self.config.name,)
170
+ else:
171
+ splits = ("train", "dev", "test")
172
+
173
+ # 1. get number of archives (shards) in each subset
174
+ n_archives_links = {
175
+ split: {
176
+ subset: _N_ARCHIVES_URL.format(subset=subset, is_additional=self._is_additional_data(subset))
177
+ for subset in splits_to_subsets[split]
178
+ }
179
+ for split in splits
180
+ }
181
+ n_archives_paths = dl_manager.download_and_extract(n_archives_links)
182
+ n_archives = {
183
+ # mapping from a subset to a single number - number of audio archives (shards) in a subset
184
+ split: {
185
+ subset: self._read_n_archives(n_archives_paths[split][subset])
186
+ for subset in splits_to_subsets[split]
187
+ }
188
+ for split in splits
189
+ }
190
+
191
+ # 2. prepare sharded archives with audio files
192
+ audio_archives_urls = {
193
+ split: {
194
+ subset: [
195
+ _AUDIO_ARCHIVE_URL.format(subset=subset, is_additional=self._is_additional_data(subset),
196
+ archive_id=i)
197
+ for i in range(n_archives[split][subset])
198
+ ]
199
+ for subset in splits_to_subsets[split]
200
+ }
201
+ for split in splits
202
+ }
203
+ audio_archives_paths = dl_manager.download(audio_archives_urls)
204
+ # flatten archives paths from
205
+ # {"train": {"xs": [path1, path2,], "s": [path3], "m": [path5, path5]}, "dev": {"dev": [path6,...]}, "test": {"test": [...]}}
206
+ # to {"train": [path1, path2, path3, path4, path5], "dev": [path6, ...], "test": [...]}
207
+ audio_archives_paths = _flatten_nested_dict(audio_archives_paths)
208
+ local_audio_archives_paths = dl_manager.extract(audio_archives_paths) if not dl_manager.is_streaming \
209
+ else None
210
+
211
+ # 3. prepare sharded metadata csv files
212
+ meta_urls = {
213
+ split: {
214
+ subset: [
215
+ _META_URL.format(subset=subset, is_additional=self._is_additional_data(subset), archive_id=i)
216
+ for i in range(n_archives[split][subset])
217
+ ]
218
+ for subset in splits_to_subsets[split]
219
+ }
220
+ for split in splits
221
+ }
222
+ meta_paths = dl_manager.download_and_extract(meta_urls)
223
+ meta_paths = _flatten_nested_dict(meta_paths)
224
+
225
+ if self.config.name not in {"dev", "test"}:
226
+ return [
227
+ datasets.SplitGenerator(
228
+ name=datasets.Split.TRAIN,
229
+ gen_kwargs={
230
+ "audio_archives_iterators": [
231
+ dl_manager.iter_archive(archive_path) for archive_path in audio_archives_paths["train"]
232
+ ],
233
+ "local_audio_archives_paths": local_audio_archives_paths[
234
+ "train"] if local_audio_archives_paths else None,
235
+ "meta_paths": meta_paths["train"]
236
+ },
237
+ ),
238
+ datasets.SplitGenerator(
239
+ name=datasets.Split.VALIDATION,
240
+ gen_kwargs={
241
+ "audio_archives_iterators": [
242
+ dl_manager.iter_archive(archive_path) for archive_path in audio_archives_paths["dev"]
243
+ ],
244
+ "local_audio_archives_paths": local_audio_archives_paths[
245
+ "dev"] if local_audio_archives_paths else None,
246
+ "meta_paths": meta_paths["dev"]
247
+ },
248
+ ),
249
+ datasets.SplitGenerator(
250
+ name=datasets.Split.TEST,
251
+ gen_kwargs={
252
+ "audio_archives_iterators": [
253
+ dl_manager.iter_archive(archive_path) for archive_path in audio_archives_paths["test"]
254
+ ],
255
+ "local_audio_archives_paths": local_audio_archives_paths[
256
+ "test"] if local_audio_archives_paths else None,
257
+ "meta_paths": meta_paths["test"]
258
+ },
259
+ ),
260
+ ]
261
+
262
+ if self.config.name == "dev":
263
+ return [
264
+ datasets.SplitGenerator(
265
+ name=datasets.Split.VALIDATION,
266
+ gen_kwargs={
267
+ "audio_archives_iterators": [
268
+ dl_manager.iter_archive(archive_path) for archive_path in audio_archives_paths["dev"]
269
+ ],
270
+ "local_audio_archives_paths": local_audio_archives_paths[
271
+ "dev"] if local_audio_archives_paths else None,
272
+ "meta_paths": meta_paths["dev"]
273
+ },
274
+ ),
275
+ ]
276
+
277
+ if self.config.name == "test":
278
+ return [
279
+ datasets.SplitGenerator(
280
+ name=datasets.Split.TEST,
281
+ gen_kwargs={
282
+ "audio_archives_iterators": [
283
+ dl_manager.iter_archive(archive_path) for archive_path in audio_archives_paths["test"]
284
+ ],
285
+ "local_audio_archives_paths": local_audio_archives_paths[
286
+ "test"] if local_audio_archives_paths else None,
287
+ "meta_paths": meta_paths["test"]
288
+ },
289
+ ),
290
+ ]
291
+
292
+ def _generate_examples(self, audio_archives_iterators, local_audio_archives_paths, meta_paths):
293
+ assert len(audio_archives_iterators) == len(meta_paths)
294
+ if local_audio_archives_paths:
295
+ assert len(audio_archives_iterators) == len(local_audio_archives_paths)
296
+
297
+ for i, (meta_path, audio_archive_iterator) in enumerate(zip(meta_paths, audio_archives_iterators)):
298
+ meta_dict = dict()
299
+ with open(meta_path) as csvfile:
300
+ meta_csv = csv.DictReader(csvfile)
301
+ for line in meta_csv:
302
+ meta_dict[line["sid"]] = line
303
+
304
+ for audio_path_in_archive, audio_file in audio_archive_iterator:
305
+ # `audio_path_in_archive` is like "dev_chunks_0000/YOU1000000029_S0000095.wav"
306
+ audio_filename = os.path.split(audio_path_in_archive)[1]
307
+ audio_id = audio_filename.split(".wav")[0]
308
+ audio_meta = meta_dict[audio_id]
309
+ audio_meta["segment_id"] = audio_meta.pop("sid")
310
+ audio_meta["original_full_path"] = audio_meta.pop("path")
311
+ audio_meta["text"] = audio_meta.pop("text_tn")
312
+ audio_meta["audio_id"] = audio_meta.pop("aid")
313
+ if not audio_meta["category"]:
314
+ audio_meta["category"] = "N/A"
315
+
316
+ path = os.path.join(local_audio_archives_paths[i], audio_path_in_archive) if local_audio_archives_paths \
317
+ else audio_path_in_archive
318
+
319
+ yield audio_id, {
320
+ "audio": {"path": path , "bytes": audio_file.read()},
321
+ **{feature: value for feature, value in audio_meta.items() if feature in self.info.features}
322
+ }
323
+
324
+
325
+ def _flatten_nested_dict(nested_dict):
326
+ return {
327
+ key: [inner_list_element for inner_list in value_to_lists.values() for inner_list_element in inner_list]
328
+ for key, value_to_lists in nested_dict.items()
329
+ }
audio/urmi (christian)/dev.tar ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:fbf86dafd01656196af2e98c8fc9638ca96fa0cb1ddf6e8a4bc0b140649da30b
3
+ size 30720
audio/urmi (christian)/test.tar ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:a2ecd175686cde03f3be0491431a1bdea6cf6b44ee32b1c52a672b4087cd81ab
3
+ size 30720
audio/urmi (christian)/train.tar ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:32185858f3d78d8c37f041d54ea1e0ea67740f14b99af8900173a2a56ac34a03
3
+ size 40960
build.py CHANGED
@@ -87,6 +87,8 @@ def save_data(subsets):
87
  'path': audio_file_name,
88
  })
89
 
 
 
90
  pbar.set_description(f"Saving audios ({dialect}/{split})")
91
  audio_tar_path = f"{audio_dir_path}.tar"
92
  with tarfile.open(audio_tar_path, 'w') as tar:
 
87
  'path': audio_file_name,
88
  })
89
 
90
+ break
91
+
92
  pbar.set_description(f"Saving audios ({dialect}/{split})")
93
  audio_tar_path = f"{audio_dir_path}.tar"
94
  with tarfile.open(audio_tar_path, 'w') as tar:
main.ipynb CHANGED
@@ -7,6 +7,33 @@
7
  "# Creating the NENA Speech Dataset"
8
  ]
9
  },
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
10
  {
11
  "cell_type": "markdown",
12
  "metadata": {},
@@ -16,7 +43,7 @@
16
  },
17
  {
18
  "cell_type": "code",
19
- "execution_count": 8,
20
  "metadata": {},
21
  "outputs": [],
22
  "source": [
@@ -35,7 +62,7 @@
35
  },
36
  {
37
  "cell_type": "code",
38
- "execution_count": 9,
39
  "metadata": {},
40
  "outputs": [],
41
  "source": [
@@ -51,7 +78,7 @@
51
  },
52
  {
53
  "cell_type": "code",
54
- "execution_count": 10,
55
  "metadata": {},
56
  "outputs": [],
57
  "source": [
@@ -86,7 +113,7 @@
86
  },
87
  {
88
  "cell_type": "code",
89
- "execution_count": 11,
90
  "metadata": {},
91
  "outputs": [],
92
  "source": [
@@ -102,7 +129,7 @@
102
  },
103
  {
104
  "cell_type": "code",
105
- "execution_count": 24,
106
  "metadata": {},
107
  "outputs": [],
108
  "source": [
@@ -158,27 +185,9 @@
158
  },
159
  {
160
  "cell_type": "code",
161
- "execution_count": 25,
162
  "metadata": {},
163
- "outputs": [
164
- {
165
- "ename": "KeyboardInterrupt",
166
- "evalue": "",
167
- "output_type": "error",
168
- "traceback": [
169
- "\u001b[0;31m---------------------------------------------------------------------------\u001b[0m",
170
- "\u001b[0;31mKeyboardInterrupt\u001b[0m Traceback (most recent call last)",
171
- "\u001b[1;32m/Users/matthew/Documents/nenadb/dataloader/main.ipynb Cell 10\u001b[0m line \u001b[0;36m1\n\u001b[0;32m----> <a href='vscode-notebook-cell:/Users/matthew/Documents/nenadb/dataloader/main.ipynb#X10sZmlsZQ%3D%3D?line=0'>1</a>\u001b[0m save_data(subsets)\n",
172
- "\u001b[1;32m/Users/matthew/Documents/nenadb/dataloader/main.ipynb Cell 10\u001b[0m line \u001b[0;36m2\n\u001b[1;32m <a href='vscode-notebook-cell:/Users/matthew/Documents/nenadb/dataloader/main.ipynb#X10sZmlsZQ%3D%3D?line=23'>24</a>\u001b[0m f\u001b[39m.\u001b[39mwrite(response\u001b[39m.\u001b[39mcontent)\n\u001b[1;32m <a href='vscode-notebook-cell:/Users/matthew/Documents/nenadb/dataloader/main.ipynb#X10sZmlsZQ%3D%3D?line=24'>25</a>\u001b[0m f\u001b[39m.\u001b[39mflush()\n\u001b[0;32m---> <a href='vscode-notebook-cell:/Users/matthew/Documents/nenadb/dataloader/main.ipynb#X10sZmlsZQ%3D%3D?line=25'>26</a>\u001b[0m audio \u001b[39m=\u001b[39m AudioSegment\u001b[39m.\u001b[39;49mfrom_file(f\u001b[39m.\u001b[39;49mname)\n\u001b[1;32m <a href='vscode-notebook-cell:/Users/matthew/Documents/nenadb/dataloader/main.ipynb#X10sZmlsZQ%3D%3D?line=26'>27</a>\u001b[0m audio \u001b[39m=\u001b[39m audio\u001b[39m.\u001b[39mset_frame_rate(\u001b[39m48000\u001b[39m)\n\u001b[1;32m <a href='vscode-notebook-cell:/Users/matthew/Documents/nenadb/dataloader/main.ipynb#X10sZmlsZQ%3D%3D?line=27'>28</a>\u001b[0m audio_file_name \u001b[39m=\u001b[39m \u001b[39mf\u001b[39m\u001b[39m\"\u001b[39m\u001b[39mnena_speech_\u001b[39m\u001b[39m{\u001b[39;00mexample\u001b[39m.\u001b[39mid\u001b[39m}\u001b[39;00m\u001b[39m.mp3\u001b[39m\u001b[39m\"\u001b[39m\n",
173
- "File \u001b[0;32m~/Documents/nenadb/dataloader/venv/lib/python3.11/site-packages/pydub/audio_segment.py:728\u001b[0m, in \u001b[0;36mAudioSegment.from_file\u001b[0;34m(cls, file, format, codec, parameters, start_second, duration, **kwargs)\u001b[0m\n\u001b[1;32m 726\u001b[0m info \u001b[39m=\u001b[39m \u001b[39mNone\u001b[39;00m\n\u001b[1;32m 727\u001b[0m \u001b[39melse\u001b[39;00m:\n\u001b[0;32m--> 728\u001b[0m info \u001b[39m=\u001b[39m mediainfo_json(orig_file, read_ahead_limit\u001b[39m=\u001b[39;49mread_ahead_limit)\n\u001b[1;32m 729\u001b[0m \u001b[39mif\u001b[39;00m info:\n\u001b[1;32m 730\u001b[0m audio_streams \u001b[39m=\u001b[39m [x \u001b[39mfor\u001b[39;00m x \u001b[39min\u001b[39;00m info[\u001b[39m'\u001b[39m\u001b[39mstreams\u001b[39m\u001b[39m'\u001b[39m]\n\u001b[1;32m 731\u001b[0m \u001b[39mif\u001b[39;00m x[\u001b[39m'\u001b[39m\u001b[39mcodec_type\u001b[39m\u001b[39m'\u001b[39m] \u001b[39m==\u001b[39m \u001b[39m'\u001b[39m\u001b[39maudio\u001b[39m\u001b[39m'\u001b[39m]\n",
174
- "File \u001b[0;32m~/Documents/nenadb/dataloader/venv/lib/python3.11/site-packages/pydub/utils.py:275\u001b[0m, in \u001b[0;36mmediainfo_json\u001b[0;34m(filepath, read_ahead_limit)\u001b[0m\n\u001b[1;32m 273\u001b[0m command \u001b[39m=\u001b[39m [prober, \u001b[39m'\u001b[39m\u001b[39m-of\u001b[39m\u001b[39m'\u001b[39m, \u001b[39m'\u001b[39m\u001b[39mjson\u001b[39m\u001b[39m'\u001b[39m] \u001b[39m+\u001b[39m command_args\n\u001b[1;32m 274\u001b[0m res \u001b[39m=\u001b[39m Popen(command, stdin\u001b[39m=\u001b[39mstdin_parameter, stdout\u001b[39m=\u001b[39mPIPE, stderr\u001b[39m=\u001b[39mPIPE)\n\u001b[0;32m--> 275\u001b[0m output, stderr \u001b[39m=\u001b[39m res\u001b[39m.\u001b[39;49mcommunicate(\u001b[39minput\u001b[39;49m\u001b[39m=\u001b[39;49mstdin_data)\n\u001b[1;32m 276\u001b[0m output \u001b[39m=\u001b[39m output\u001b[39m.\u001b[39mdecode(\u001b[39m\"\u001b[39m\u001b[39mutf-8\u001b[39m\u001b[39m\"\u001b[39m, \u001b[39m'\u001b[39m\u001b[39mignore\u001b[39m\u001b[39m'\u001b[39m)\n\u001b[1;32m 277\u001b[0m stderr \u001b[39m=\u001b[39m stderr\u001b[39m.\u001b[39mdecode(\u001b[39m\"\u001b[39m\u001b[39mutf-8\u001b[39m\u001b[39m\"\u001b[39m, \u001b[39m'\u001b[39m\u001b[39mignore\u001b[39m\u001b[39m'\u001b[39m)\n",
175
- "File \u001b[0;32m/opt/homebrew/Cellar/python@3.11/3.11.5/Frameworks/Python.framework/Versions/3.11/lib/python3.11/subprocess.py:1209\u001b[0m, in \u001b[0;36mPopen.communicate\u001b[0;34m(self, input, timeout)\u001b[0m\n\u001b[1;32m 1206\u001b[0m endtime \u001b[39m=\u001b[39m \u001b[39mNone\u001b[39;00m\n\u001b[1;32m 1208\u001b[0m \u001b[39mtry\u001b[39;00m:\n\u001b[0;32m-> 1209\u001b[0m stdout, stderr \u001b[39m=\u001b[39m \u001b[39mself\u001b[39;49m\u001b[39m.\u001b[39;49m_communicate(\u001b[39minput\u001b[39;49m, endtime, timeout)\n\u001b[1;32m 1210\u001b[0m \u001b[39mexcept\u001b[39;00m \u001b[39mKeyboardInterrupt\u001b[39;00m:\n\u001b[1;32m 1211\u001b[0m \u001b[39m# https://bugs.python.org/issue25942\u001b[39;00m\n\u001b[1;32m 1212\u001b[0m \u001b[39m# See the detailed comment in .wait().\u001b[39;00m\n\u001b[1;32m 1213\u001b[0m \u001b[39mif\u001b[39;00m timeout \u001b[39mis\u001b[39;00m \u001b[39mnot\u001b[39;00m \u001b[39mNone\u001b[39;00m:\n",
176
- "File \u001b[0;32m/opt/homebrew/Cellar/python@3.11/3.11.5/Frameworks/Python.framework/Versions/3.11/lib/python3.11/subprocess.py:2108\u001b[0m, in \u001b[0;36mPopen._communicate\u001b[0;34m(self, input, endtime, orig_timeout)\u001b[0m\n\u001b[1;32m 2101\u001b[0m \u001b[39mself\u001b[39m\u001b[39m.\u001b[39m_check_timeout(endtime, orig_timeout,\n\u001b[1;32m 2102\u001b[0m stdout, stderr,\n\u001b[1;32m 2103\u001b[0m skip_check_and_raise\u001b[39m=\u001b[39m\u001b[39mTrue\u001b[39;00m)\n\u001b[1;32m 2104\u001b[0m \u001b[39mraise\u001b[39;00m \u001b[39mRuntimeError\u001b[39;00m( \u001b[39m# Impossible :)\u001b[39;00m\n\u001b[1;32m 2105\u001b[0m \u001b[39m'\u001b[39m\u001b[39m_check_timeout(..., skip_check_and_raise=True) \u001b[39m\u001b[39m'\u001b[39m\n\u001b[1;32m 2106\u001b[0m \u001b[39m'\u001b[39m\u001b[39mfailed to raise TimeoutExpired.\u001b[39m\u001b[39m'\u001b[39m)\n\u001b[0;32m-> 2108\u001b[0m ready \u001b[39m=\u001b[39m selector\u001b[39m.\u001b[39;49mselect(timeout)\n\u001b[1;32m 2109\u001b[0m \u001b[39mself\u001b[39m\u001b[39m.\u001b[39m_check_timeout(endtime, orig_timeout, stdout, stderr)\n\u001b[1;32m 2111\u001b[0m \u001b[39m# XXX Rewrite these to use non-blocking I/O on the file\u001b[39;00m\n\u001b[1;32m 2112\u001b[0m \u001b[39m# objects; they are no longer using C stdio!\u001b[39;00m\n",
177
- "File \u001b[0;32m/opt/homebrew/Cellar/python@3.11/3.11.5/Frameworks/Python.framework/Versions/3.11/lib/python3.11/selectors.py:415\u001b[0m, in \u001b[0;36m_PollLikeSelector.select\u001b[0;34m(self, timeout)\u001b[0m\n\u001b[1;32m 413\u001b[0m ready \u001b[39m=\u001b[39m []\n\u001b[1;32m 414\u001b[0m \u001b[39mtry\u001b[39;00m:\n\u001b[0;32m--> 415\u001b[0m fd_event_list \u001b[39m=\u001b[39m \u001b[39mself\u001b[39m\u001b[39m.\u001b[39m_selector\u001b[39m.\u001b[39mpoll(timeout)\n\u001b[1;32m 416\u001b[0m \u001b[39mexcept\u001b[39;00m \u001b[39mInterruptedError\u001b[39;00m:\n\u001b[1;32m 417\u001b[0m \u001b[39mreturn\u001b[39;00m ready\n",
178
- "\u001b[0;31mKeyboardInterrupt\u001b[0m: "
179
- ]
180
- }
181
- ],
182
  "source": [
183
  "save_data(subsets)"
184
  ]
 
7
  "# Creating the NENA Speech Dataset"
8
  ]
9
  },
10
+ {
11
+ "cell_type": "code",
12
+ "execution_count": 2,
13
+ "metadata": {},
14
+ "outputs": [
15
+ {
16
+ "name": "stderr",
17
+ "output_type": "stream",
18
+ "text": [
19
+ "Repo card metadata block was not found. Setting CardData to empty.\n"
20
+ ]
21
+ },
22
+ {
23
+ "data": {
24
+ "text/plain": [
25
+ "<nena_speech_1_0.NENASpeech at 0x15ae1c0d0>"
26
+ ]
27
+ },
28
+ "execution_count": 2,
29
+ "metadata": {},
30
+ "output_type": "execute_result"
31
+ }
32
+ ],
33
+ "source": [
34
+ "from nena_speech_1_0 import NENASpeech\n"
35
+ ]
36
+ },
37
  {
38
  "cell_type": "markdown",
39
  "metadata": {},
 
43
  },
44
  {
45
  "cell_type": "code",
46
+ "execution_count": null,
47
  "metadata": {},
48
  "outputs": [],
49
  "source": [
 
62
  },
63
  {
64
  "cell_type": "code",
65
+ "execution_count": null,
66
  "metadata": {},
67
  "outputs": [],
68
  "source": [
 
78
  },
79
  {
80
  "cell_type": "code",
81
+ "execution_count": null,
82
  "metadata": {},
83
  "outputs": [],
84
  "source": [
 
113
  },
114
  {
115
  "cell_type": "code",
116
+ "execution_count": null,
117
  "metadata": {},
118
  "outputs": [],
119
  "source": [
 
129
  },
130
  {
131
  "cell_type": "code",
132
+ "execution_count": null,
133
  "metadata": {},
134
  "outputs": [],
135
  "source": [
 
185
  },
186
  {
187
  "cell_type": "code",
188
+ "execution_count": null,
189
  "metadata": {},
190
+ "outputs": [],
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
191
  "source": [
192
  "save_data(subsets)"
193
  ]
nena_speech_1_0.py CHANGED
@@ -1,11 +1,143 @@
1
  """ NENA Speech Dataset"""
2
 
3
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
4
  import datasets
5
 
6
  class NENASpeechConfig(datasets.BuilderConfig):
7
  """BuilderConfig for NENASpeech."""
8
- pass
 
 
 
 
 
 
 
 
 
 
9
 
10
  class NENASpeech(datasets.GeneratorBasedBuilder):
11
- pass
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
  """ NENA Speech Dataset"""
2
 
3
 
4
+ import csv
5
+ import os
6
+ import json
7
+
8
+ import datasets
9
+ from datasets.utils.py_utils import size_str
10
+ from tqdm import tqdm
11
+
12
+
13
+ # _CITATION = """\
14
+ # """
15
+
16
+ # _HOMEPAGE = "https://commonvoice.mozilla.org/en/datasets"
17
+
18
+ # _LICENSE = "https://creativecommons.org/publicdomain/zero/1.0/"
19
+
20
+ # TODO: change this
21
+ _BASE_URL = "./"
22
+
23
+ _AUDIO_URL = _BASE_URL + "audio/{dialect}/{split}.tar"
24
+
25
+ _TRANSCRIPT_URL = _BASE_URL + "transcript/{dialect}/{split}.tsv"
26
+
27
  import datasets
28
 
29
  class NENASpeechConfig(datasets.BuilderConfig):
30
  """BuilderConfig for NENASpeech."""
31
+ def __init__(self, name, version, **kwargs):
32
+ self.language = kwargs.pop("language", None)
33
+ description = (
34
+ f"This is a test. "
35
+ )
36
+ super(NENASpeechConfig, self).__init__(
37
+ name=name,
38
+ version=datasets.Version(version),
39
+ description=description,
40
+ **kwargs,
41
+ )
42
 
43
  class NENASpeech(datasets.GeneratorBasedBuilder):
44
+ DEFAULT_WRITER_BATCH_SIZE = 1000
45
+
46
+ BUILDER_CONFIGS = [
47
+ NENASpeechConfig(
48
+ name='urmi (christian)',
49
+ version='1.0.0',
50
+ language='assyrian',
51
+ )
52
+ # for lang, lang_stats in STATS["locales"].items()
53
+ ]
54
+
55
+ def _info(self):
56
+ # total_languages = len(STATS["locales"])
57
+ # total_valid_hours = STATS["totalValidHrs"]
58
+ description = (
59
+ "description from _info"
60
+ # "Common Voice is Mozilla's initiative to help teach machines how real people speak. "
61
+ # f"The dataset currently consists of {total_valid_hours} validated hours of speech "
62
+ # f" in {total_languages} languages, but more voices and languages are always added."
63
+ )
64
+ features = datasets.Features(
65
+ {
66
+ "age": datasets.Value("string"),
67
+ "transcription": datasets.Value("string"),
68
+ "translation": datasets.Value("string"),
69
+ "path": datasets.Value("string"),
70
+ }
71
+ )
72
+
73
+ return datasets.DatasetInfo(
74
+ description=description,
75
+ # citation=_CITATION,
76
+ # homepage=_HOMEPAGE,
77
+ # license=_LICENSE,
78
+ features=features,
79
+ supervised_keys=None,
80
+ version=self.config.version,
81
+ )
82
+
83
+ def _split_generators(self, dl_manager):
84
+ dialect = self.config.name
85
+
86
+ audio_urls = {}
87
+ splits = ("train", "dev", "test", "other", "invalidated")
88
+ for split in splits:
89
+ audio_urls[split] = _AUDIO_URL.format(dialect=dialect, split=split)
90
+ archive_paths = dl_manager.download(audio_urls)
91
+ local_extracted_archive_paths = dl_manager.extract(archive_paths) if not dl_manager.is_streaming else {}
92
+
93
+ meta_urls = {split: _TRANSCRIPT_URL.format(dialect=dialect, split=split) for split in splits}
94
+ meta_paths = dl_manager.download_and_extract(meta_urls)
95
+
96
+ split_generators = []
97
+ split_names = {
98
+ "train": datasets.Split.TRAIN,
99
+ "dev": datasets.Split.VALIDATION,
100
+ "test": datasets.Split.TEST,
101
+ }
102
+ for split in splits:
103
+ split_generators.append(
104
+ datasets.SplitGenerator(
105
+ name=split_names.get(split, split),
106
+ gen_kwargs={
107
+ "local_extracted_archive_paths": local_extracted_archive_paths.get(split),
108
+ "archives": [dl_manager.iter_archive(path) for path in archive_paths.get(split)],
109
+ "meta_path": meta_paths[split],
110
+ },
111
+ ),
112
+ )
113
+
114
+ return split_generators
115
+
116
+ def _generate_examples(self, local_extracted_archive_paths, archives, meta_path):
117
+ data_fields = list(self._info().features.keys())
118
+ metadata = {}
119
+ with open(meta_path, encoding="utf-8") as f:
120
+ reader = csv.DictReader(f, delimiter="\t", quoting=csv.QUOTE_NONE)
121
+ for row in tqdm(reader, desc="Reading metadata..."):
122
+ if not row["path"].endswith(".mp3"):
123
+ row["path"] += ".mp3"
124
+ # accent -> accents in CV 8.0
125
+ if "accents" in row:
126
+ row["accent"] = row["accents"]
127
+ del row["accents"]
128
+ # if data is incomplete, fill with empty values
129
+ for field in data_fields:
130
+ if field not in row:
131
+ row[field] = ""
132
+ metadata[row["path"]] = row
133
+
134
+ for i, audio_archive in enumerate(archives):
135
+ for path, file in audio_archive:
136
+ _, filename = os.path.split(path)
137
+ if filename in metadata:
138
+ result = dict(metadata[filename])
139
+ # set the audio feature and the path to the extracted file
140
+ path = os.path.join(local_extracted_archive_paths[i], path) if local_extracted_archive_paths else path
141
+ result["audio"] = {"path": path, "bytes": file.read()}
142
+ result["path"] = path
143
+ yield path, result
release_stats.py ADDED
@@ -0,0 +1,50 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ STATS = {
2
+ "bundleURLTemplate": "https://voice-prod-bundler-ee1969a6ce8178826482b88e843c335139bd3fb4.s3.amazonaws.com/cv-corpus-13.0-2023-03-09/{locale}.tar.gz",
3
+ "locales": {
4
+ "de": {
5
+ "duration": 4821107393,
6
+ "buckets": {
7
+ "dev": 16143,
8
+ "invalidated": 50705,
9
+ "other": 6381,
10
+ "reported": 9131,
11
+ "test": 16143,
12
+ "train": 540437,
13
+ "validated": 868264,
14
+ },
15
+ "reportedSentences": 9100,
16
+ "clips": 925350,
17
+ "splits": {
18
+ "accent": {"": 1},
19
+ "age": {
20
+ "twenties": 0.18,
21
+ "fourties": 0.17,
22
+ "": 0.32,
23
+ "thirties": 0.16,
24
+ "teens": 0.03,
25
+ "sixties": 0.02,
26
+ "fifties": 0.11,
27
+ "seventies": 0,
28
+ "eighties": 0,
29
+ "nineties": 0,
30
+ },
31
+ "gender": {"male": 0.59, "": 0.32, "female": 0.08, "other": 0.01},
32
+ },
33
+ "users": 17867,
34
+ "size": 33828262029,
35
+ "checksum": "71664fadd4189922f3c814889f640111e925fb511b290242e10e7a768bd7b1bb",
36
+ "avgDurationSecs": 5.21,
37
+ "validDurationSecs": 4523687.242,
38
+ "totalHrs": 1339.19,
39
+ "validHrs": 1256.57,
40
+ },
41
+ },
42
+ "totalDuration": 97709611853,
43
+ "totalValidDurationSecs": 63681475,
44
+ "totalHrs": 27141,
45
+ "totalValidHrs": 17689,
46
+ "version": "13.0.0",
47
+ "date": "2022-03-15",
48
+ "name": "Common Voice Corpus 13",
49
+ "multilingual": True,
50
+ }
transcript/urmi (christian)/dev.tsv ADDED
@@ -0,0 +1,2 @@
 
 
 
1
+ age transcription translation path
2
+ 70's ʾa-mùt ⁺xábrələ?ˈ What is this all about?’ nena_speech_is5yh1hcxg6p3gd.mp3
transcript/urmi (christian)/test.tsv ADDED
@@ -0,0 +1,2 @@
 
 
 
1
+ age transcription translation path
2
+ 70's hì,ˈ ʾàyya꞊da ʾátxa. Yes, that too is such. nena_speech_c12wj7acuhfzube.mp3
transcript/urmi (christian)/train.tsv ADDED
@@ -0,0 +1,2 @@
 
 
 
1
+ age transcription translation path
2
+ 70's ʾət-k̭àšə,ˈ of priests. nena_speech_6rcr536rfodtmog.mp3