carlosdanielhernandezmena commited on
Commit
2d47833
β€’
1 Parent(s): 444e7e4

Creating the repo structure along with the dataloader

Browse files
commonvoice_benchmark_catalan_accents.py ADDED
@@ -0,0 +1,282 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from collections import defaultdict
2
+ import os
3
+ import json
4
+ import csv
5
+
6
+ import datasets
7
+
8
+ _NAME="commonvoice_benchmark_catalan_accents"
9
+ _VERSION="1.0.0"
10
+ _AUDIO_EXTENSIONS=".mp3"
11
+
12
+ _DESCRIPTION = """
13
+ A new presentation of the corpus Catalan Common Voice v16.1 - metadata annotated version with the splits redefined to benchmark ASR models with various Catalan accent
14
+ """
15
+
16
+ _CITATION = """
17
+ @misc{armentanoaccents2024,
18
+ title={Common Voice Benchmark Catalan Accents},
19
+ author={Armenatno, Carme},
20
+ publisher={Barcelona Supercomputing Center}
21
+ year={2024},
22
+ url={https://huggingface.co/datasets/projecte-aina/commonvoice_benchmark_catalan_accents},
23
+ }
24
+ """
25
+
26
+ _HOMEPAGE = "https://huggingface.co/datasets/projecte-aina/commonvoice_benchmark_catalan_accents"
27
+
28
+ _LICENSE = "CC-BY-4.0, See https://creativecommons.org/licenses/by/4.0/"
29
+
30
+ _BASE_DATA_DIR = "corpus/"
31
+
32
+ _METADATA_BALEARIC_FEM = os.path.join(_BASE_DATA_DIR,"files","balearic_female.tsv")
33
+ _METADATA_BALEARIC_MALE = os.path.join(_BASE_DATA_DIR,"files","balearic_male.tsv")
34
+
35
+ _METADATA_CENTRAL_FEMALE = os.path.join(_BASE_DATA_DIR,"files","central_female.tsv")
36
+ _METADATA_CENTRAL_MALE = os.path.join(_BASE_DATA_DIR,"files","central_male.tsv")
37
+
38
+ _METADATA_NORTHERN_FEMALE = os.path.join(_BASE_DATA_DIR,"files","northern_female.tsv")
39
+ _METADATA_NORTHERN_MALE = os.path.join(_BASE_DATA_DIR,"files","northern_male.tsv")
40
+
41
+ _METADATA_NORTHWESTERN_FEMALE = os.path.join(_BASE_DATA_DIR,"files","northwestern_female.tsv")
42
+ _METADATA_NORTHWESTERN_MALE = os.path.join(_BASE_DATA_DIR,"files","northwestern_male.tsv")
43
+
44
+ _METADATA_VALENCIAN_FEMALE = os.path.join(_BASE_DATA_DIR,"files","valencian_female.tsv")
45
+ _METADATA_VALENCIAN_MALE = os.path.join(_BASE_DATA_DIR,"files","valencian_male.tsv")
46
+
47
+ _TARS_REPO = os.path.join(_BASE_DATA_DIR,"files","tars_repo.paths")
48
+
49
+ class CommonVoiceBenchmarkCatalanAccentsConfig(datasets.BuilderConfig):
50
+ """BuilderConfig for The Common Voice Benchmark Catalan Accents"""
51
+
52
+ def __init__(self, name, **kwargs):
53
+ name=_NAME
54
+ super().__init__(name=name, **kwargs)
55
+
56
+ class CommonVoiceBenchmarkCatalanAccents(datasets.GeneratorBasedBuilder):
57
+ """Common Voice Benchmark Catalan Accents"""
58
+
59
+ VERSION = datasets.Version(_VERSION)
60
+ BUILDER_CONFIGS = [
61
+ CommonVoiceBenchmarkCatalanAccentsConfig(
62
+ name=_NAME,
63
+ version=datasets.Version(_VERSION),
64
+ )
65
+ ]
66
+
67
+ def _info(self):
68
+ features = datasets.Features(
69
+ {
70
+ "audio": datasets.Audio(sampling_rate=16000),
71
+ "client_id": datasets.Value("string"),
72
+ "path": datasets.Value("string"),
73
+ "sentence": datasets.Value("string"),
74
+ "up_votes": datasets.Value("int32"),
75
+ "down_votes": datasets.Value("int32"),
76
+ "age": datasets.Value("string"),
77
+ "gender": datasets.Value("string"),
78
+ "accents": datasets.Value("string"),
79
+ "variant": datasets.Value("string"),
80
+ "locale": datasets.Value("string"),
81
+ "segment": datasets.Value("string"),
82
+ "mean quality": datasets.Value("string"),
83
+ "stdev quality": datasets.Value("string"),
84
+ "annotated_accent": datasets.Value("string"),
85
+ "annotated_accent_agreement": datasets.Value("string"),
86
+ "annotated_gender": datasets.Value("string"),
87
+ "annotated_gender_agreement": datasets.Value("string"),
88
+ "propagated_gender": datasets.Value("string"),
89
+ "propagated_accents": datasets.Value("string"),
90
+ "propagated_accents_norm": datasets.Value("string"),
91
+ "variant_norm": datasets.Value("string"),
92
+ "assigned_accent": datasets.Value("string"),
93
+ "assigned_gender": datasets.Value("string"),
94
+ "duration": datasets.Value("float32"),
95
+ }
96
+ )
97
+ return datasets.DatasetInfo(
98
+ description=_DESCRIPTION,
99
+ features=features,
100
+ homepage=_HOMEPAGE,
101
+ license=_LICENSE,
102
+ citation=_CITATION,
103
+ )
104
+
105
+ def _split_generators(self, dl_manager):
106
+
107
+ metadata_balearic_fem=dl_manager.download_and_extract(_METADATA_BALEARIC_FEM)
108
+ metadata_balearic_male=dl_manager.download_and_extract(_METADATA_BALEARIC_MALE)
109
+
110
+ metadata_central_female=dl_manager.download_and_extract(_METADATA_CENTRAL_FEMALE)
111
+ metadata_central_male=dl_manager.download_and_extract(_METADATA_CENTRAL_MALE)
112
+
113
+ metadata_northern_female=dl_manager.download_and_extract(_METADATA_NORTHERN_FEMALE)
114
+ metadata_northern_male=dl_manager.download_and_extract(_METADATA_NORTHWESTERN_MALE)
115
+
116
+ metadata_northwestern_female=dl_manager.download_and_extract(_METADATA_NORTHWESTERN_FEMALE)
117
+ metadata_northwestern_male=dl_manager.download_and_extract(_METADATA_NORTHWESTERN_MALE)
118
+
119
+ metadata_valencian_female=dl_manager.download_and_extract(_METADATA_VALENCIAN_FEMALE)
120
+ metadata_valencian_male=dl_manager.download_and_extract(_METADATA_VALENCIAN_MALE)
121
+
122
+ tars_repo=dl_manager.download_and_extract(_TARS_REPO)
123
+
124
+ hash_tar_files=defaultdict(dict)
125
+
126
+ with open(tars_repo,'r') as f:
127
+ hash_tar_files['balearic_fem']=[path.replace('\n','') for path in f]
128
+ with open(tars_repo,'r') as f:
129
+ hash_tar_files['balearic_male']=[path.replace('\n','') for path in f]
130
+
131
+ with open(tars_repo,'r') as f:
132
+ hash_tar_files['central_female']=[path.replace('\n','') for path in f]
133
+ with open(tars_repo,'r') as f:
134
+ hash_tar_files['central_male']=[path.replace('\n','') for path in f]
135
+
136
+ with open(tars_repo,'r') as f:
137
+ hash_tar_files['northern_female']=[path.replace('\n','') for path in f]
138
+ with open(tars_repo,'r') as f:
139
+ hash_tar_files['northern_male']=[path.replace('\n','') for path in f]
140
+
141
+ with open(tars_repo,'r') as f:
142
+ hash_tar_files['northwestern_female']=[path.replace('\n','') for path in f]
143
+ with open(tars_repo,'r') as f:
144
+ hash_tar_files['northwestern_male']=[path.replace('\n','') for path in f]
145
+
146
+ with open(tars_repo,'r') as f:
147
+ hash_tar_files['valencian_female']=[path.replace('\n','') for path in f]
148
+ with open(tars_repo,'r') as f:
149
+ hash_tar_files['valencian_male']=[path.replace('\n','') for path in f]
150
+
151
+ hash_meta_paths={"balearic_fem":metadata_balearic_fem,
152
+ "balearic_male":metadata_balearic_male,
153
+ "central_female":metadata_central_female,
154
+ "central_male":metadata_central_male,
155
+ "northern_female":metadata_northern_female,
156
+ "northern_male":metadata_northern_male,
157
+ "northwestern_female":metadata_northwestern_female,
158
+ "northwestern_male":metadata_northwestern_male,
159
+ "valencian_female":metadata_valencian_female,
160
+ "valencian_male":metadata_valencian_male}
161
+
162
+ audio_paths = dl_manager.download(hash_tar_files)
163
+
164
+ splits=["balearic_fem","balearic_male","central_female","central_male","northern_female",
165
+ "northern_male","northwestern_female","northwestern_male","valencian_female","valencian_male"]
166
+ local_extracted_audio_paths = (
167
+ dl_manager.extract(audio_paths) if not dl_manager.is_streaming else
168
+ {
169
+ split:[None] * len(audio_paths[split]) for split in splits
170
+ }
171
+ )
172
+
173
+ return [
174
+ datasets.SplitGenerator(
175
+ name="balearic_fem",
176
+ gen_kwargs={
177
+ "audio_archives":[dl_manager.iter_archive(archive) for archive in audio_paths["balearic_fem"]],
178
+ "local_extracted_archives_paths": local_extracted_audio_paths["balearic_fem"],
179
+ "metadata_paths": hash_meta_paths["balearic_fem"],
180
+ }
181
+ ),
182
+ datasets.SplitGenerator(
183
+ name="balearic_male",
184
+ gen_kwargs={
185
+ "audio_archives": [dl_manager.iter_archive(archive) for archive in audio_paths["balearic_male"]],
186
+ "local_extracted_archives_paths": local_extracted_audio_paths["balearic_male"],
187
+ "metadata_paths": hash_meta_paths["balearic_male"],
188
+ }
189
+ ),
190
+ datasets.SplitGenerator(
191
+ name="central_female",
192
+ gen_kwargs={
193
+ "audio_archives": [dl_manager.iter_archive(archive) for archive in audio_paths["central_female"]],
194
+ "local_extracted_archives_paths": local_extracted_audio_paths["central_female"],
195
+ "metadata_paths": hash_meta_paths["central_female"],
196
+ }
197
+ ),
198
+ datasets.SplitGenerator(
199
+ name="central_male",
200
+ gen_kwargs={
201
+ "audio_archives":[dl_manager.iter_archive(archive) for archive in audio_paths["central_male"]],
202
+ "local_extracted_archives_paths": local_extracted_audio_paths["central_male"],
203
+ "metadata_paths": hash_meta_paths["central_male"],
204
+ }
205
+ ),
206
+ datasets.SplitGenerator(
207
+ name="northern_female",
208
+ gen_kwargs={
209
+ "audio_archives": [dl_manager.iter_archive(archive) for archive in audio_paths["northern_female"]],
210
+ "local_extracted_archives_paths": local_extracted_audio_paths["northern_female"],
211
+ "metadata_paths": hash_meta_paths["northern_female"],
212
+ }
213
+ ),
214
+ datasets.SplitGenerator(
215
+ name="northern_male",
216
+ gen_kwargs={
217
+ "audio_archives": [dl_manager.iter_archive(archive) for archive in audio_paths["northern_male"]],
218
+ "local_extracted_archives_paths": local_extracted_audio_paths["northern_male"],
219
+ "metadata_paths": hash_meta_paths["northern_male"],
220
+ }
221
+ ),
222
+ datasets.SplitGenerator(
223
+ name="northwestern_female",
224
+ gen_kwargs={
225
+ "audio_archives": [dl_manager.iter_archive(archive) for archive in audio_paths["northwestern_female"]],
226
+ "local_extracted_archives_paths": local_extracted_audio_paths["northwestern_female"],
227
+ "metadata_paths": hash_meta_paths["northwestern_female"],
228
+ }
229
+ ),
230
+ datasets.SplitGenerator(
231
+ name="northwestern_male",
232
+ gen_kwargs={
233
+ "audio_archives": [dl_manager.iter_archive(archive) for archive in audio_paths["northwestern_male"]],
234
+ "local_extracted_archives_paths": local_extracted_audio_paths["northwestern_male"],
235
+ "metadata_paths": hash_meta_paths["northwestern_male"],
236
+ }
237
+ ),
238
+ datasets.SplitGenerator(
239
+ name="valencian_female",
240
+ gen_kwargs={
241
+ "audio_archives": [dl_manager.iter_archive(archive) for archive in audio_paths["valencian_female"]],
242
+ "local_extracted_archives_paths": local_extracted_audio_paths["valencian_female"],
243
+ "metadata_paths": hash_meta_paths["valencian_female"],
244
+ }
245
+ ),
246
+ datasets.SplitGenerator(
247
+ name="valencian_male",
248
+ gen_kwargs={
249
+ "audio_archives": [dl_manager.iter_archive(archive) for archive in audio_paths["valencian_male"]],
250
+ "local_extracted_archives_paths": local_extracted_audio_paths["valencian_male"],
251
+ "metadata_paths": hash_meta_paths["valencian_male"],
252
+ }
253
+ ),
254
+ ]
255
+
256
+ def _generate_examples(self, audio_archives, local_extracted_archives_paths, metadata_paths):
257
+
258
+ features = ["client_id","sentence","up_votes","down_votes","age","gender",
259
+ "accents","variant","locale","segment","mean quality","stdev quality",
260
+ "annotated_accent","annotated_accent_agreement","annotated_gender",
261
+ "annotated_gender_agreement","propagated_gender","propagated_accents",
262
+ "propagated_accents_norm","variant_norm","assigned_accent","assigned_gender",
263
+ "duration","path"]
264
+
265
+ with open(metadata_paths) as f:
266
+ metadata = {x["path"]: x for x in csv.DictReader(f, delimiter="\t")}
267
+
268
+ for audio_archive, local_extracted_archive_path in zip(audio_archives, local_extracted_archives_paths):
269
+ for audio_filename, audio_file in audio_archive:
270
+ audio_id =os.path.splitext(os.path.basename(audio_filename))[0]
271
+ audio_id=audio_id+_AUDIO_EXTENSIONS
272
+ path = os.path.join(local_extracted_archive_path, audio_filename) if local_extracted_archive_path else audio_filename
273
+
274
+ try:
275
+ yield audio_id, {
276
+ "path": audio_id,
277
+ **{feature: metadata[audio_id][feature] for feature in features},
278
+ "audio": {"path": path, "bytes": audio_file.read()},
279
+ }
280
+ except:
281
+ continue
282
+
balearic_female.tsv β†’ corpus/files/balearic_female.tsv RENAMED
File without changes
balearic_male.tsv β†’ corpus/files/balearic_male.tsv RENAMED
File without changes
central_female.tsv β†’ corpus/files/central_female.tsv RENAMED
File without changes
central_male.tsv β†’ corpus/files/central_male.tsv RENAMED
File without changes
northern_female.tsv β†’ corpus/files/northern_female.tsv RENAMED
File without changes
northern_male.tsv β†’ corpus/files/northern_male.tsv RENAMED
File without changes
northwestern_female.tsv β†’ corpus/files/northwestern_female.tsv RENAMED
File without changes
northwestern_male.tsv β†’ corpus/files/northwestern_male.tsv RENAMED
File without changes
corpus/files/tars_repo.paths ADDED
@@ -0,0 +1,46 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ https://huggingface.co/datasets/mozilla-foundation/common_voice_17_0/resolve/main/audio/ca/validated/ca_validated_0.tar
2
+ https://huggingface.co/datasets/mozilla-foundation/common_voice_17_0/resolve/main/audio/ca/validated/ca_validated_1.tar
3
+ https://huggingface.co/datasets/mozilla-foundation/common_voice_17_0/resolve/main/audio/ca/validated/ca_validated_2.tar
4
+ https://huggingface.co/datasets/mozilla-foundation/common_voice_17_0/resolve/main/audio/ca/validated/ca_validated_3.tar
5
+ https://huggingface.co/datasets/mozilla-foundation/common_voice_17_0/resolve/main/audio/ca/validated/ca_validated_4.tar
6
+ https://huggingface.co/datasets/mozilla-foundation/common_voice_17_0/resolve/main/audio/ca/validated/ca_validated_5.tar
7
+ https://huggingface.co/datasets/mozilla-foundation/common_voice_17_0/resolve/main/audio/ca/validated/ca_validated_6.tar
8
+ https://huggingface.co/datasets/mozilla-foundation/common_voice_17_0/resolve/main/audio/ca/validated/ca_validated_7.tar
9
+ https://huggingface.co/datasets/mozilla-foundation/common_voice_17_0/resolve/main/audio/ca/validated/ca_validated_8.tar
10
+ https://huggingface.co/datasets/mozilla-foundation/common_voice_17_0/resolve/main/audio/ca/validated/ca_validated_9.tar
11
+ https://huggingface.co/datasets/mozilla-foundation/common_voice_17_0/resolve/main/audio/ca/validated/ca_validated_10.tar
12
+ https://huggingface.co/datasets/mozilla-foundation/common_voice_17_0/resolve/main/audio/ca/validated/ca_validated_11.tar
13
+ https://huggingface.co/datasets/mozilla-foundation/common_voice_17_0/resolve/main/audio/ca/validated/ca_validated_12.tar
14
+ https://huggingface.co/datasets/mozilla-foundation/common_voice_17_0/resolve/main/audio/ca/validated/ca_validated_13.tar
15
+ https://huggingface.co/datasets/mozilla-foundation/common_voice_17_0/resolve/main/audio/ca/validated/ca_validated_14.tar
16
+ https://huggingface.co/datasets/mozilla-foundation/common_voice_17_0/resolve/main/audio/ca/validated/ca_validated_15.tar
17
+ https://huggingface.co/datasets/mozilla-foundation/common_voice_17_0/resolve/main/audio/ca/validated/ca_validated_16.tar
18
+ https://huggingface.co/datasets/mozilla-foundation/common_voice_17_0/resolve/main/audio/ca/validated/ca_validated_17.tar
19
+ https://huggingface.co/datasets/mozilla-foundation/common_voice_17_0/resolve/main/audio/ca/validated/ca_validated_18.tar
20
+ https://huggingface.co/datasets/mozilla-foundation/common_voice_17_0/resolve/main/audio/ca/validated/ca_validated_19.tar
21
+ https://huggingface.co/datasets/mozilla-foundation/common_voice_17_0/resolve/main/audio/ca/validated/ca_validated_20.tar
22
+ https://huggingface.co/datasets/mozilla-foundation/common_voice_17_0/resolve/main/audio/ca/validated/ca_validated_21.tar
23
+ https://huggingface.co/datasets/mozilla-foundation/common_voice_17_0/resolve/main/audio/ca/validated/ca_validated_22.tar
24
+ https://huggingface.co/datasets/mozilla-foundation/common_voice_17_0/resolve/main/audio/ca/validated/ca_validated_23.tar
25
+ https://huggingface.co/datasets/mozilla-foundation/common_voice_17_0/resolve/main/audio/ca/validated/ca_validated_24.tar
26
+ https://huggingface.co/datasets/mozilla-foundation/common_voice_17_0/resolve/main/audio/ca/validated/ca_validated_25.tar
27
+ https://huggingface.co/datasets/mozilla-foundation/common_voice_17_0/resolve/main/audio/ca/validated/ca_validated_26.tar
28
+ https://huggingface.co/datasets/mozilla-foundation/common_voice_17_0/resolve/main/audio/ca/validated/ca_validated_27.tar
29
+ https://huggingface.co/datasets/mozilla-foundation/common_voice_17_0/resolve/main/audio/ca/validated/ca_validated_28.tar
30
+ https://huggingface.co/datasets/mozilla-foundation/common_voice_17_0/resolve/main/audio/ca/validated/ca_validated_29.tar
31
+ https://huggingface.co/datasets/mozilla-foundation/common_voice_17_0/resolve/main/audio/ca/validated/ca_validated_30.tar
32
+ https://huggingface.co/datasets/mozilla-foundation/common_voice_17_0/resolve/main/audio/ca/validated/ca_validated_31.tar
33
+ https://huggingface.co/datasets/mozilla-foundation/common_voice_17_0/resolve/main/audio/ca/validated/ca_validated_32.tar
34
+ https://huggingface.co/datasets/mozilla-foundation/common_voice_17_0/resolve/main/audio/ca/validated/ca_validated_33.tar
35
+ https://huggingface.co/datasets/mozilla-foundation/common_voice_17_0/resolve/main/audio/ca/validated/ca_validated_34.tar
36
+ https://huggingface.co/datasets/mozilla-foundation/common_voice_17_0/resolve/main/audio/ca/validated/ca_validated_35.tar
37
+ https://huggingface.co/datasets/mozilla-foundation/common_voice_17_0/resolve/main/audio/ca/validated/ca_validated_36.tar
38
+ https://huggingface.co/datasets/mozilla-foundation/common_voice_17_0/resolve/main/audio/ca/validated/ca_validated_37.tar
39
+ https://huggingface.co/datasets/mozilla-foundation/common_voice_17_0/resolve/main/audio/ca/validated/ca_validated_38.tar
40
+ https://huggingface.co/datasets/mozilla-foundation/common_voice_17_0/resolve/main/audio/ca/validated/ca_validated_39.tar
41
+ https://huggingface.co/datasets/mozilla-foundation/common_voice_17_0/resolve/main/audio/ca/validated/ca_validated_40.tar
42
+ https://huggingface.co/datasets/mozilla-foundation/common_voice_17_0/resolve/main/audio/ca/validated/ca_validated_41.tar
43
+ https://huggingface.co/datasets/mozilla-foundation/common_voice_17_0/resolve/main/audio/ca/validated/ca_validated_42.tar
44
+ https://huggingface.co/datasets/mozilla-foundation/common_voice_17_0/resolve/main/audio/ca/validated/ca_validated_43.tar
45
+ https://huggingface.co/datasets/mozilla-foundation/common_voice_17_0/resolve/main/audio/ca/validated/ca_validated_44.tar
46
+ https://huggingface.co/datasets/mozilla-foundation/common_voice_17_0/resolve/main/audio/ca/validated/ca_validated_45.tar
train.tsv β†’ corpus/files/train.tsv RENAMED
File without changes
valencian_female.tsv β†’ corpus/files/valencian_female.tsv RENAMED
File without changes
valencian_male.tsv β†’ corpus/files/valencian_male.tsv RENAMED
File without changes