|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
"""VoxCeleb audio-visual human speech dataset.""" |
|
|
|
import json |
|
import os |
|
from getpass import getpass |
|
from hashlib import sha256 |
|
from itertools import repeat |
|
from multiprocessing import Manager, Pool, Process |
|
from pathlib import Path |
|
from shutil import copyfileobj |
|
|
|
import pandas as pd |
|
import requests |
|
|
|
import datasets |
|
import urllib3 |
|
import zipfile |
|
|
|
|
|
urllib3.disable_warnings(urllib3.exceptions.InsecureRequestWarning) |
|
|
|
_CITATION = """\ |
|
@Article{Nagrani19, |
|
author = "Arsha Nagrani and Joon~Son Chung and Weidi Xie and Andrew Zisserman", |
|
title = "Voxceleb: Large-scale speaker verification in the wild", |
|
journal = "Computer Science and Language", |
|
year = "2019", |
|
publisher = "Elsevier", |
|
} |
|
|
|
@InProceedings{Chung18b, |
|
author = "Chung, J.~S. and Nagrani, A. and Zisserman, A.", |
|
title = "VoxCeleb2: Deep Speaker Recognition", |
|
booktitle = "INTERSPEECH", |
|
year = "2018", |
|
} |
|
|
|
@InProceedings{Nagrani17, |
|
author = "Nagrani, A. and Chung, J.~S. and Zisserman, A.", |
|
title = "VoxCeleb: a large-scale speaker identification dataset", |
|
booktitle = "INTERSPEECH", |
|
year = "2017", |
|
} |
|
""" |
|
|
|
_DESCRIPTION = """\ |
|
VoxCeleb is an audio-visual dataset consisting of short clips of human speech, extracted from interview videos uploaded to YouTube |
|
""" |
|
|
|
_URL = "https://mm.kaist.ac.kr/datasets/voxceleb" |
|
|
|
_URLS = { |
|
"video": { |
|
"dev": ( |
|
"https://huggingface.co/datasets/ProgramComputer/voxceleb/resolve/main/vox2/vox2_mp4_1.zip", |
|
"https://huggingface.co/datasets/ProgramComputer/voxceleb/resolve/main/vox2/vox2_mp4_2.zip", |
|
"https://huggingface.co/datasets/ProgramComputer/voxceleb/resolve/main/vox2/vox2_mp4_3.zip", |
|
"https://huggingface.co/datasets/ProgramComputer/voxceleb/resolve/main/vox2/vox2_mp4_4.zip", |
|
"https://huggingface.co/datasets/ProgramComputer/voxceleb/resolve/main/vox2/vox2_mp4_5.zip", |
|
"https://huggingface.co/datasets/ProgramComputer/voxceleb/resolve/main/vox2/vox2_mp4_6.zip", |
|
|
|
), |
|
"test": ("https://huggingface.co/datasets/ProgramComputer/voxceleb/resolve/main/vox2/vox2_test_mp4.zip",) |
|
}, |
|
"audio1": { |
|
"dev": ("https://huggingface.co/datasets/ProgramComputer/voxceleb/resolve/main/vox1/vox1_dev_wav.zip",), |
|
"test": ("https://huggingface.co/datasets/ProgramComputer/voxceleb/resolve/main/vox1/vox1_test_wav.zip",), |
|
}, |
|
"audio2": { |
|
"dev": |
|
("https://huggingface.co/datasets/ProgramComputer/voxceleb/resolve/main/vox2/vox2_aac_1.zip","https://huggingface.co/datasets/ProgramComputer/voxceleb/resolve/main/vox2/vox2_aac_2.zip"), |
|
"test": ("https://huggingface.co/datasets/ProgramComputer/voxceleb/resolve/main/vox2/vox2_test_aac.zip",), |
|
}, |
|
} |
|
|
|
_DATASET_IDS = {"video": "vox2", "audio1": "vox1", "audio2": "vox2"} |
|
|
|
_PLACEHOLDER_MAPS = dict( |
|
value |
|
for urls in _URLS.values() |
|
for value in ((urls["dev"], urls["dev"]), (urls["test"], (urls["test"],))) |
|
) |
|
|
|
|
|
def _mp_download( |
|
url, |
|
tmp_path, |
|
resume_pos, |
|
length, |
|
queue, |
|
): |
|
if length == resume_pos: |
|
return |
|
with open(tmp_path, "ab" if resume_pos else "wb") as tmp: |
|
headers = {} |
|
if resume_pos != 0: |
|
headers["Range"] = f"bytes={resume_pos}-" |
|
response = requests.get( |
|
url, headers=headers, stream=True |
|
) |
|
if response.status_code >= 200 and response.status_code < 300: |
|
for chunk in response.iter_content(chunk_size=65536): |
|
queue.put(len(chunk)) |
|
tmp.write(chunk) |
|
else: |
|
raise ConnectionError("failed to fetch dataset") |
|
|
|
|
|
class VoxCeleb(datasets.GeneratorBasedBuilder): |
|
"""VoxCeleb is an unlabled dataset consisting of short clips of human speech from interviews on YouTube""" |
|
|
|
VERSION = datasets.Version("1.0.0") |
|
|
|
BUILDER_CONFIGS = [ |
|
datasets.BuilderConfig( |
|
name="video", version=VERSION, description="Video clips of human speech" |
|
), |
|
datasets.BuilderConfig( |
|
name="audio", version=VERSION, description="Audio clips of human speech" |
|
), |
|
datasets.BuilderConfig( |
|
name="audio1", |
|
version=datasets.Version("1.0.0"), |
|
description="Audio clips of human speech from VoxCeleb1", |
|
), |
|
datasets.BuilderConfig( |
|
name="audio2", |
|
version=datasets.Version("2.0.0"), |
|
description="Audio clips of human speech from VoxCeleb2", |
|
), |
|
] |
|
|
|
def _info(self): |
|
features = { |
|
"file": datasets.Value("string"), |
|
"file_format": datasets.Value("string"), |
|
"dataset_id": datasets.Value("string"), |
|
"speaker_id": datasets.Value("string"), |
|
"speaker_gender": datasets.Value("string"), |
|
"video_id": datasets.Value("string"), |
|
"clip_index": datasets.Value("int32"), |
|
} |
|
if self.config.name == "audio1": |
|
features["speaker_name"] = datasets.Value("string") |
|
features["speaker_nationality"] = datasets.Value("string") |
|
if self.config.name.startswith("audio"): |
|
features["audio"] = datasets.Audio(sampling_rate=16000) |
|
|
|
return datasets.DatasetInfo( |
|
description=_DESCRIPTION, |
|
homepage=_URL, |
|
supervised_keys=datasets.info.SupervisedKeysData("file", "speaker_id"), |
|
features=datasets.Features(features), |
|
citation=_CITATION, |
|
) |
|
|
|
def _split_generators(self, dl_manager): |
|
|
|
|
|
targets = ( |
|
["audio1", "audio2"] if self.config.name == "audio" else [self.config.name] |
|
) |
|
|
|
|
|
def download_custom(placeholder_url, path): |
|
nonlocal dl_manager |
|
sources = _PLACEHOLDER_MAPS[placeholder_url] |
|
tmp_paths = [] |
|
lengths = [] |
|
start_positions = [] |
|
for url in sources: |
|
head = requests.head(url,timeout=5,stream=True,allow_redirects=True,verify=False) |
|
if head.status_code == 401: |
|
raise ValueError("failed to authenticate with VoxCeleb host") |
|
if head.status_code < 200 or head.status_code >= 300: |
|
raise ValueError("failed to fetch dataset") |
|
content_length = head.headers.get("Content-Length") |
|
if content_length is None: |
|
raise ValueError("expected non-empty Content-Length") |
|
content_length = int(content_length) |
|
tmp_path = Path(path + "." + sha256(url.encode("utf-8")).hexdigest()) |
|
tmp_paths.append(tmp_path) |
|
lengths.append(content_length) |
|
start_positions.append( |
|
tmp_path.stat().st_size |
|
if tmp_path.exists() and dl_manager.download_config.resume_download |
|
else 0 |
|
) |
|
|
|
def progress(q, cur, total): |
|
with datasets.utils.logging.tqdm( |
|
unit="B", |
|
unit_scale=True, |
|
total=total, |
|
initial=cur, |
|
desc="Downloading", |
|
disable=not datasets.utils.logging.is_progress_bar_enabled(), |
|
) as progress: |
|
while cur < total: |
|
try: |
|
added = q.get(timeout=1) |
|
progress.update(added) |
|
cur += added |
|
except: |
|
continue |
|
|
|
manager = Manager() |
|
q = manager.Queue() |
|
with Pool(len(sources)) as pool: |
|
proc = Process( |
|
target=progress, |
|
args=(q, sum(start_positions), sum(lengths)), |
|
daemon=True, |
|
) |
|
proc.start() |
|
pool.starmap( |
|
_mp_download, |
|
zip( |
|
sources, |
|
tmp_paths, |
|
start_positions, |
|
lengths, |
|
repeat(q), |
|
), |
|
) |
|
pool.close() |
|
proc.join() |
|
with open(path, "wb") as out: |
|
for tmp_path in tmp_paths: |
|
with open(tmp_path, "rb") as tmp: |
|
copyfileobj(tmp, out) |
|
tmp_path.unlink() |
|
|
|
metadata = dl_manager.download( |
|
dict( |
|
( |
|
target, |
|
f"https://mm.kaist.ac.kr/datasets/voxceleb/meta/{_DATASET_IDS[target]}_meta.csv", |
|
) |
|
for target in targets |
|
) |
|
) |
|
|
|
mapped_paths = dl_manager.download( |
|
dict( |
|
( |
|
placeholder_key, |
|
dict( |
|
(target, _URLS[target][placeholder_key]) |
|
for target in targets |
|
), |
|
) |
|
for placeholder_key in ("dev", "test") |
|
) |
|
) |
|
|
|
|
|
return [ |
|
datasets.SplitGenerator( |
|
name="train", |
|
gen_kwargs={ |
|
"paths":mapped_paths["dev"], |
|
"meta_paths": metadata, |
|
}, |
|
), |
|
datasets.SplitGenerator( |
|
name="test", |
|
gen_kwargs={ |
|
"paths": mapped_paths["test"], |
|
"meta_paths": metadata, |
|
}, |
|
), |
|
] |
|
|
|
def _generate_examples(self, paths, meta_paths): |
|
key = 0 |
|
for conf in paths: |
|
dataset_id = "vox1" if conf == "audio1" else "vox2" |
|
meta = pd.read_csv( |
|
meta_paths[conf], |
|
sep="\t" if conf == "audio1" else " ,", |
|
index_col=0, |
|
engine="python", |
|
) |
|
|
|
for path in paths[conf]: |
|
raise Exception(zipfile.is_zipfile(path)) |
|
dataset_path = next(Path(path).iterdir()) |
|
dataset_format = dataset_path.name |
|
for speaker_path in dataset_path.iterdir(): |
|
speaker = speaker_path.name |
|
speaker_info = meta.loc[speaker] |
|
for video in speaker_path.iterdir(): |
|
video_id = video.name |
|
for clip in video.iterdir(): |
|
clip_index = int(clip.stem) |
|
info = { |
|
"file": str(clip), |
|
"file_format": dataset_format, |
|
"dataset_id": dataset_id, |
|
"speaker_id": speaker, |
|
"speaker_gender": speaker_info["Gender"], |
|
"video_id": video_id, |
|
"clip_index": clip_index, |
|
} |
|
if dataset_id == "vox1": |
|
info["speaker_name"] = speaker_info["VGGFace1 ID"] |
|
info["speaker_nationality"] = speaker_info["Nationality"] |
|
if conf.startswith("audio"): |
|
info["audio"] = info["file"] |
|
yield key, info |
|
key += 1 |