ProgramComputer commited on
Commit
9654d7a
1 Parent(s): 1171a05

Create test.py

Browse files
Files changed (1) hide show
  1. test.py +343 -0
test.py ADDED
@@ -0,0 +1,343 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2022 The HuggingFace Datasets Authors and Arjun Barrett.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+
16
+ # Lint as: python3
17
+ """VoxCeleb audio-visual human speech dataset."""
18
+
19
+ import json
20
+ import os
21
+ from getpass import getpass
22
+ from hashlib import sha256
23
+ from itertools import repeat
24
+ from multiprocessing import Manager, Pool, Process
25
+ from pathlib import Path
26
+ from shutil import copyfileobj
27
+
28
+ import pandas as pd
29
+ import requests
30
+
31
+ import datasets
32
+ import urllib3
33
+
34
+ urllib3.disable_warnings(urllib3.exceptions.InsecureRequestWarning)
35
+
36
+ _CITATION = """\
37
+ @Article{Nagrani19,
38
+ author = "Arsha Nagrani and Joon~Son Chung and Weidi Xie and Andrew Zisserman",
39
+ title = "Voxceleb: Large-scale speaker verification in the wild",
40
+ journal = "Computer Science and Language",
41
+ year = "2019",
42
+ publisher = "Elsevier",
43
+ }
44
+
45
+ @InProceedings{Chung18b,
46
+ author = "Chung, J.~S. and Nagrani, A. and Zisserman, A.",
47
+ title = "VoxCeleb2: Deep Speaker Recognition",
48
+ booktitle = "INTERSPEECH",
49
+ year = "2018",
50
+ }
51
+
52
+ @InProceedings{Nagrani17,
53
+ author = "Nagrani, A. and Chung, J.~S. and Zisserman, A.",
54
+ title = "VoxCeleb: a large-scale speaker identification dataset",
55
+ booktitle = "INTERSPEECH",
56
+ year = "2017",
57
+ }
58
+ """
59
+
60
+ _DESCRIPTION = """\
61
+ VoxCeleb is an audio-visual dataset consisting of short clips of human speech, extracted from interview videos uploaded to YouTube
62
+ """
63
+
64
+ _URL = "https://mm.kaist.ac.kr/datasets/voxceleb"
65
+
66
+ _URLS = {
67
+ "video": {
68
+ "placeholder": "https://huggingface.co/datasets/ProgramComputer/voxceleb/resolve/main/vox2/vox2_dev_mp4_parta",
69
+ "dev": (
70
+ "https://huggingface.co/datasets/ProgramComputer/voxceleb/resolve/main/vox2/vox2_dev_mp4_partaa",
71
+ "https://huggingface.co/datasets/ProgramComputer/voxceleb/resolve/main/vox2/vox2_dev_mp4_partab",
72
+ "https://huggingface.co/datasets/ProgramComputer/voxceleb/resolve/main/vox2/vox2_dev_mp4_partac",
73
+ "https://huggingface.co/datasets/ProgramComputer/voxceleb/resolve/main/vox2/vox2_dev_mp4_partad",
74
+ "https://huggingface.co/datasets/ProgramComputer/voxceleb/resolve/main/vox2/vox2_dev_mp4_partae",
75
+ "https://huggingface.co/datasets/ProgramComputer/voxceleb/resolve/main/vox2/vox2_dev_mp4_partaf",
76
+ "https://huggingface.co/datasets/ProgramComputer/voxceleb/resolve/main/vox2/vox2_dev_mp4_partag",
77
+ "https://huggingface.co/datasets/ProgramComputer/voxceleb/resolve/main/vox2/vox2_dev_mp4_partah",
78
+ "https://huggingface.co/datasets/ProgramComputer/voxceleb/resolve/main/vox2/vox2_dev_mp4_partai",
79
+ ),
80
+ "test": "https://huggingface.co/datasets/ProgramComputer/voxceleb/resolve/main/vox2/vox2_test_mp4.zip",
81
+ },
82
+ "audio1": {
83
+ "placeholder": "https://huggingface.co/datasets/ProgramComputer/voxceleb/resolve/main/vox1/vox1_dev_wav_parta",
84
+ "dev": (
85
+ "https://huggingface.co/datasets/ProgramComputer/voxceleb/resolve/main/vox1/vox1_dev_wav_partaa",
86
+ "https://huggingface.co/datasets/ProgramComputer/voxceleb/resolve/main/vox1/vox1_dev_wav_partab",
87
+ "https://huggingface.co/datasets/ProgramComputer/voxceleb/resolve/main/vox1/vox1_dev_wav_partac",
88
+ "https://huggingface.co/datasets/ProgramComputer/voxceleb/resolve/main/vox1/vox1_dev_wav_partad",
89
+ ),
90
+ "test": "https://huggingface.co/datasets/ProgramComputer/voxceleb/resolve/main/vox1/vox1_test_wav.zip",
91
+ },
92
+ "audio2": {
93
+ "placeholder": "https://huggingface.co/datasets/ProgramComputer/voxceleb/resolve/main/vox2/vox2_dev_aac_parta",
94
+ "dev": (
95
+ "https://huggingface.co/datasets/ProgramComputer/voxceleb/resolve/main/vox2/vox2_dev_aac_partaa",
96
+ "https://huggingface.co/datasets/ProgramComputer/voxceleb/resolve/main/vox2/vox2_dev_aac_partab",
97
+ "https://huggingface.co/datasets/ProgramComputer/voxceleb/resolve/main/vox2/vox2_dev_aac_partac",
98
+ "https://huggingface.co/datasets/ProgramComputer/voxceleb/resolve/main/vox2/vox2_dev_aac_partad",
99
+ "https://huggingface.co/datasets/ProgramComputer/voxceleb/resolve/main/vox2/vox2_dev_aac_partae",
100
+ "https://huggingface.co/datasets/ProgramComputer/voxceleb/resolve/main/vox2/vox2_dev_aac_partaf",
101
+ "https://huggingface.co/datasets/ProgramComputer/voxceleb/resolve/main/vox2/vox2_dev_aac_partag",
102
+ "https://huggingface.co/datasets/ProgramComputer/voxceleb/resolve/main/vox2/vox2_dev_aac_partah",
103
+ ),
104
+ "test": "https://huggingface.co/datasets/ProgramComputer/voxceleb/resolve/main/vox2/vox2_test_aac.zip",
105
+ },
106
+ }
107
+
108
+ _DATASET_IDS = {"video": "vox2", "audio1": "vox1", "audio2": "vox2"}
109
+
110
+ _PLACEHOLDER_MAPS = dict(
111
+ value
112
+ for urls in _URLS.values()
113
+ for value in ((urls["placeholder"], urls["dev"]), (urls["test"], (urls["test"],)))
114
+ )
115
+
116
+
117
+ def _mp_download(
118
+ url,
119
+ tmp_path,
120
+ resume_pos,
121
+ length,
122
+ queue,
123
+ ):
124
+ if length == resume_pos:
125
+ return
126
+ with open(tmp_path, "ab" if resume_pos else "wb") as tmp:
127
+ headers = {}
128
+ if resume_pos != 0:
129
+ headers["Range"] = f"bytes={resume_pos}-"
130
+ response = requests.get(
131
+ url, headers=headers, stream=True
132
+ )
133
+ if response.status_code >= 200 and response.status_code < 300:
134
+ for chunk in response.iter_content(chunk_size=65536):
135
+ queue.put(len(chunk))
136
+ tmp.write(chunk)
137
+ else:
138
+ raise ConnectionError("failed to fetch dataset")
139
+
140
+
141
+ class VoxCeleb(datasets.GeneratorBasedBuilder):
142
+ """VoxCeleb is an unlabled dataset consisting of short clips of human speech from interviews on YouTube"""
143
+
144
+ VERSION = datasets.Version("1.0.0")
145
+
146
+ BUILDER_CONFIGS = [
147
+ datasets.BuilderConfig(
148
+ name="video", version=VERSION, description="Video clips of human speech"
149
+ ),
150
+ datasets.BuilderConfig(
151
+ name="audio", version=VERSION, description="Audio clips of human speech"
152
+ ),
153
+ datasets.BuilderConfig(
154
+ name="audio1",
155
+ version=datasets.Version("1.0.0"),
156
+ description="Audio clips of human speech from VoxCeleb1",
157
+ ),
158
+ datasets.BuilderConfig(
159
+ name="audio2",
160
+ version=datasets.Version("2.0.0"),
161
+ description="Audio clips of human speech from VoxCeleb2",
162
+ ),
163
+ ]
164
+
165
+ def _info(self):
166
+ features = {
167
+ "file": datasets.Value("string"),
168
+ "file_format": datasets.Value("string"),
169
+ "dataset_id": datasets.Value("string"),
170
+ "speaker_id": datasets.Value("string"),
171
+ "speaker_gender": datasets.Value("string"),
172
+ "video_id": datasets.Value("string"),
173
+ "clip_index": datasets.Value("int32"),
174
+ }
175
+ if self.config.name == "audio1":
176
+ features["speaker_name"] = datasets.Value("string")
177
+ features["speaker_nationality"] = datasets.Value("string")
178
+ if self.config.name.startswith("audio"):
179
+ features["audio"] = datasets.Audio(sampling_rate=16000)
180
+
181
+ return datasets.DatasetInfo(
182
+ description=_DESCRIPTION,
183
+ homepage=_URL,
184
+ supervised_keys=datasets.info.SupervisedKeysData("file", "speaker_id"),
185
+ features=datasets.Features(features),
186
+ citation=_CITATION,
187
+ )
188
+
189
+ def _split_generators(self, dl_manager):
190
+ if dl_manager.is_streaming:
191
+ raise TypeError("Streaming is not supported for VoxCeleb")
192
+ targets = (
193
+ ["audio1", "audio2"] if self.config.name == "audio" else [self.config.name]
194
+ )
195
+
196
+
197
+ def download_custom(placeholder_url, path):
198
+ nonlocal dl_manager
199
+ sources = _PLACEHOLDER_MAPS[placeholder_url]
200
+ tmp_paths = []
201
+ lengths = []
202
+ start_positions = []
203
+ for url in sources:
204
+ head = requests.head(url,timeout=5,stream=True,allow_redirects=True,verify=False)
205
+ if head.status_code == 401:
206
+ raise ValueError("failed to authenticate with VoxCeleb host")
207
+ if head.status_code < 200 or head.status_code >= 300:
208
+ raise ValueError("failed to fetch dataset")
209
+ content_length = head.headers.get("Content-Length")
210
+ if content_length is None:
211
+ raise ValueError("expected non-empty Content-Length")
212
+ content_length = int(content_length)
213
+ tmp_path = Path(path + "." + sha256(url.encode("utf-8")).hexdigest())
214
+ tmp_paths.append(tmp_path)
215
+ lengths.append(content_length)
216
+ start_positions.append(
217
+ tmp_path.stat().st_size
218
+ if tmp_path.exists() and dl_manager.download_config.resume_download
219
+ else 0
220
+ )
221
+
222
+ def progress(q, cur, total):
223
+ with datasets.utils.logging.tqdm(
224
+ unit="B",
225
+ unit_scale=True,
226
+ total=total,
227
+ initial=cur,
228
+ desc="Downloading",
229
+ disable=not datasets.utils.logging.is_progress_bar_enabled(),
230
+ ) as progress:
231
+ while cur < total:
232
+ try:
233
+ added = q.get(timeout=1)
234
+ progress.update(added)
235
+ cur += added
236
+ except:
237
+ continue
238
+
239
+ manager = Manager()
240
+ q = manager.Queue()
241
+ with Pool(len(sources)) as pool:
242
+ proc = Process(
243
+ target=progress,
244
+ args=(q, sum(start_positions), sum(lengths)),
245
+ daemon=True,
246
+ )
247
+ proc.start()
248
+ pool.starmap(
249
+ _mp_download,
250
+ zip(
251
+ sources,
252
+ tmp_paths,
253
+ start_positions,
254
+ lengths,
255
+ repeat(q),
256
+ ),
257
+ )
258
+ pool.close()
259
+ proc.join()
260
+ with open(path, "wb") as out:
261
+ for tmp_path in tmp_paths:
262
+ with open(tmp_path, "rb") as tmp:
263
+ copyfileobj(tmp, out)
264
+ tmp_path.unlink()
265
+
266
+ metadata = dl_manager.download(
267
+ dict(
268
+ (
269
+ target,
270
+ f"https://mm.kaist.ac.kr/datasets/voxceleb/meta/{_DATASET_IDS[target]}_meta.csv",
271
+ )
272
+ for target in targets
273
+ )
274
+ )
275
+
276
+ mapped_paths = dl_manager.extract(
277
+ dl_manager.download_custom(
278
+ dict(
279
+ (
280
+ placeholder_key,
281
+ dict(
282
+ (target, _URLS[target][placeholder_key])
283
+ for target in targets
284
+ ),
285
+ )
286
+ for placeholder_key in ("placeholder", "test")
287
+ ),
288
+ download_custom,
289
+ )
290
+ )
291
+
292
+ return [
293
+ datasets.SplitGenerator(
294
+ name="train",
295
+ gen_kwargs={
296
+ "paths": mapped_paths["placeholder"],
297
+ "meta_paths": metadata,
298
+ },
299
+ ),
300
+ datasets.SplitGenerator(
301
+ name="test",
302
+ gen_kwargs={
303
+ "paths": mapped_paths["test"],
304
+ "meta_paths": metadata,
305
+ },
306
+ ),
307
+ ]
308
+
309
+ def _generate_examples(self, paths, meta_paths):
310
+ key = 0
311
+ for conf in paths:
312
+ dataset_id = "vox1" if conf == "audio1" else "vox2"
313
+ meta = pd.read_csv(
314
+ meta_paths[conf],
315
+ sep="\t" if conf == "audio1" else " ,",
316
+ index_col=0,
317
+ engine="python",
318
+ )
319
+ dataset_path = next(Path(paths[conf]).iterdir())
320
+ dataset_format = dataset_path.name
321
+ for speaker_path in dataset_path.iterdir():
322
+ speaker = speaker_path.name
323
+ speaker_info = meta.loc[speaker]
324
+ for video in speaker_path.iterdir():
325
+ video_id = video.name
326
+ for clip in video.iterdir():
327
+ clip_index = int(clip.stem)
328
+ info = {
329
+ "file": str(clip),
330
+ "file_format": dataset_format,
331
+ "dataset_id": dataset_id,
332
+ "speaker_id": speaker,
333
+ "speaker_gender": speaker_info["Gender"],
334
+ "video_id": video_id,
335
+ "clip_index": clip_index,
336
+ }
337
+ if dataset_id == "vox1":
338
+ info["speaker_name"] = speaker_info["VGGFace1 ID"]
339
+ info["speaker_nationality"] = speaker_info["Nationality"]
340
+ if conf.startswith("audio"):
341
+ info["audio"] = info["file"]
342
+ yield key, info
343
+ key += 1