audioset-qiuqiangkong / audioset.py
yangwang825's picture
Update audioset.py
78c90f7 verified
raw
history blame
5.62 kB
# coding=utf-8
"""AudioSet sound event classification dataset."""
import os
import json
import textwrap
import datasets
import itertools
import typing as tp
import pandas as pd
from pathlib import Path
from huggingface_hub import hf_hub_download
SAMPLE_RATE = 32_000
_HOMEPAGE = "https://huggingface.co/datasets/confit/audioset"
_BALANCED_TRAIN_FILENAME = 'balanced_train_segments.zip'
_EVAL_FILENAME = 'eval_segments.zip'
ID2LABEL = json.load(
open(hf_hub_download("huggingface/label-files", "audioset-id2label.json", repo_type="dataset"), "r")
)
LABEL2ID = {v:k for k, v in ID2LABEL.items()}
CLASSES = list(set(LABEL2ID.keys()))
class AudioSetConfig(datasets.BuilderConfig):
"""BuilderConfig for AudioSet."""
def __init__(self, features, **kwargs):
super(AudioSetConfig, self).__init__(version=datasets.Version("0.0.1", ""), **kwargs)
self.features = features
class AudioSet(datasets.GeneratorBasedBuilder):
BUILDER_CONFIGS = [
AudioSetConfig(
features=datasets.Features(
{
"file": datasets.Value("string"),
"audio": datasets.Audio(sampling_rate=SAMPLE_RATE),
"sound": datasets.Sequence(datasets.Value("string")),
"label": datasets.Sequence(datasets.features.ClassLabel(names=CLASSES)),
}
),
name="balanced",
description="",
),
]
DEFAULT_CONFIG_NAME = "balanced"
def _info(self):
return datasets.DatasetInfo(
description="",
features=self.config.features,
supervised_keys=None,
homepage="",
citation="",
task_templates=None,
)
def _preprocess_metadata_csv(self, csv_file):
df = pd.read_csv(csv_file, skiprows=2, sep=', ', engine='python')
df.rename(columns={'positive_labels': 'ids'}, inplace=True)
df['ids'] = [label.strip('\"').split(',') for label in df['ids']]
df['filename'] = (
'Y' + df['# YTID'] + '.wav'
)
return df[['filename', 'ids']]
def _split_generators(self, dl_manager):
"""Returns SplitGenerators."""
if self.config.name == 'balanced':
archive_path = dl_manager.extract(_BALANCED_TRAIN_FILENAME)
elif self.config.name == 'unbalanced':
archive_path = dl_manager.extract(_UNBALANCED_TRAIN_FILENAME)
test_archive_path = dl_manager.extract(_EVAL_FILENAME)
return [
datasets.SplitGenerator(
name=datasets.Split.TRAIN, gen_kwargs={"archive_path": archive_path, "split": "train"}
),
datasets.SplitGenerator(
name=datasets.Split.TEST, gen_kwargs={"archive_path": test_archive_path, "split": "test"}
),
]
def _generate_examples(self, archive_path, split=None):
extensions = ['.wav']
if split == 'train':
if self.config.name == 'balanced':
train_metadata_csv = f"{_HOMEPAGE}/resolve/main/metadata/balanced_train_segments.csv"
elif self.config.name == 'unbalanced':
train_metadata_csv = f"{_HOMEPAGE}/resolve/main/metadata/unbalanced_train_segments.csv"
metadata_df = self._preprocess_metadata_csv(train_metadata_csv) # ['filename', 'ids']
elif split == 'test':
test_metadata_csv = f"{_HOMEPAGE}/resolve/main/metadata/eval_segments.csv"
metadata_df = self._preprocess_metadata_csv(test_metadata_csv) # ['filename', 'ids']
class_labels_indices_df = pd.read_csv(
f"{_HOMEPAGE}/resolve/main/metadata/class_labels_indices.csv"
) # ['index', 'mid', 'display_name']
mid2label = {
row['mid']:row['display_name'] for idx, row in class_labels_indices_df.iterrows()
}
def default_find_classes(audio_path):
fileid = Path(audio_path).name
ids = metadata_df.query(f'filename=="{fileid}"')['ids'].values.tolist()
ids = [
mid2label.get(mid, None) for mid in flatten(ids)
]
return ids
_, _walker = fast_scandir(archive_path, extensions, recursive=True)
for guid, audio_path in enumerate(_walker):
yield guid, {
"id": str(guid),
"file": audio_path,
"audio": audio_path,
"sound": default_find_classes(audio_path),
"label": default_find_classes(audio_path),
}
def flatten(list2d):
return list(itertools.chain.from_iterable(list2d))
def fast_scandir(path: str, exts: tp.List[str], recursive: bool = False):
# Scan files recursively faster than glob
# From github.com/drscotthawley/aeiou/blob/main/aeiou/core.py
subfolders, files = [], []
try: # hope to avoid 'permission denied' by this try
for f in os.scandir(path):
try: # 'hope to avoid too many levels of symbolic links' error
if f.is_dir():
subfolders.append(f.path)
elif f.is_file():
if os.path.splitext(f.name)[1].lower() in exts:
files.append(f.path)
except Exception:
pass
except Exception:
pass
if recursive:
for path in list(subfolders):
sf, f = fast_scandir(path, exts, recursive=recursive)
subfolders.extend(sf)
files.extend(f) # type: ignore
return subfolders, files