File size: 5,618 Bytes
caf4084
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
da3bbd4
caf4084
89d1dd2
 
caf4084
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
ad8db59
5ec0662
caf4084
 
 
 
 
 
 
9ada73e
 
caf4084
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
a6f4a82
 
 
 
 
 
 
 
 
 
caf4084
 
89d1dd2
caf4084
 
 
 
 
 
 
 
 
 
 
 
 
7ba4b9e
caf4084
 
 
 
 
 
78c90f7
ad8db59
caf4084
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
# coding=utf-8

"""AudioSet sound event classification dataset."""


import os
import json
import textwrap
import datasets
import itertools
import typing as tp
import pandas as pd
from pathlib import Path
from huggingface_hub import hf_hub_download


SAMPLE_RATE = 32_000

_HOMEPAGE = "https://huggingface.co/datasets/confit/audioset"

_BALANCED_TRAIN_FILENAME = 'balanced_train_segments.zip'
_EVAL_FILENAME = 'eval_segments.zip'

ID2LABEL = json.load(
    open(hf_hub_download("huggingface/label-files", "audioset-id2label.json", repo_type="dataset"), "r")
)
LABEL2ID = {v:k for k, v in ID2LABEL.items()}
CLASSES = list(set(LABEL2ID.keys()))


class AudioSetConfig(datasets.BuilderConfig):
    """BuilderConfig for AudioSet."""
    
    def __init__(self, features, **kwargs):
        super(AudioSetConfig, self).__init__(version=datasets.Version("0.0.1", ""), **kwargs)
        self.features = features


class AudioSet(datasets.GeneratorBasedBuilder):

    BUILDER_CONFIGS = [
        AudioSetConfig(
            features=datasets.Features(
                {
                    "file": datasets.Value("string"),
                    "audio": datasets.Audio(sampling_rate=SAMPLE_RATE),
                    "sound": datasets.Sequence(datasets.Value("string")), 
                    "label": datasets.Sequence(datasets.features.ClassLabel(names=CLASSES)), 
                }
            ),
            name="balanced", 
            description="",
        ), 
    ]

    DEFAULT_CONFIG_NAME = "balanced"

    def _info(self):
        return datasets.DatasetInfo(
            description="",
            features=self.config.features,
            supervised_keys=None,
            homepage="",
            citation="",
            task_templates=None,
        )

    def _preprocess_metadata_csv(self, csv_file):
        df = pd.read_csv(csv_file, skiprows=2, sep=', ', engine='python')
        df.rename(columns={'positive_labels': 'ids'}, inplace=True)
        df['ids'] = [label.strip('\"').split(',') for label in df['ids']]
        df['filename'] = (
            'Y' + df['# YTID'] + '.wav'
        )
        return df[['filename', 'ids']]

    def _split_generators(self, dl_manager):
        """Returns SplitGenerators."""
        if self.config.name == 'balanced':
            archive_path = dl_manager.extract(_BALANCED_TRAIN_FILENAME)
        elif self.config.name == 'unbalanced':
            archive_path = dl_manager.extract(_UNBALANCED_TRAIN_FILENAME)
        test_archive_path = dl_manager.extract(_EVAL_FILENAME)
        
        return [
            datasets.SplitGenerator(
                name=datasets.Split.TRAIN, gen_kwargs={"archive_path": archive_path, "split": "train"}
            ), 
            datasets.SplitGenerator(
                name=datasets.Split.TEST, gen_kwargs={"archive_path": test_archive_path, "split": "test"}
            ), 
        ]

    def _generate_examples(self, archive_path, split=None):
        extensions = ['.wav']

        if split == 'train':
            if self.config.name == 'balanced':
                train_metadata_csv = f"{_HOMEPAGE}/resolve/main/metadata/balanced_train_segments.csv"
            elif self.config.name == 'unbalanced':
                train_metadata_csv = f"{_HOMEPAGE}/resolve/main/metadata/unbalanced_train_segments.csv"
            metadata_df = self._preprocess_metadata_csv(train_metadata_csv) # ['filename', 'ids']
        elif split == 'test':
            test_metadata_csv = f"{_HOMEPAGE}/resolve/main/metadata/eval_segments.csv"
            metadata_df = self._preprocess_metadata_csv(test_metadata_csv) # ['filename', 'ids']
        
        class_labels_indices_df = pd.read_csv(
            f"{_HOMEPAGE}/resolve/main/metadata/class_labels_indices.csv"
        ) # ['index', 'mid', 'display_name']
        mid2label = {
            row['mid']:row['display_name'] for idx, row in class_labels_indices_df.iterrows()
        }

        def default_find_classes(audio_path):
            fileid = Path(audio_path).name
            ids = metadata_df.query(f'filename=="{fileid}"')['ids'].values.tolist()
            ids = [
                mid2label.get(mid, None) for mid in flatten(ids)
            ]
            return ids

        _, _walker = fast_scandir(archive_path, extensions, recursive=True)

        for guid, audio_path in enumerate(_walker):
            yield guid, {
                "id": str(guid),
                "file": audio_path, 
                "audio": audio_path, 
                "sound": default_find_classes(audio_path), 
                "label": default_find_classes(audio_path), 
            }


def flatten(list2d):
    return list(itertools.chain.from_iterable(list2d))


def fast_scandir(path: str, exts: tp.List[str], recursive: bool = False):
    # Scan files recursively faster than glob
    # From github.com/drscotthawley/aeiou/blob/main/aeiou/core.py
    subfolders, files = [], []

    try:  # hope to avoid 'permission denied' by this try
        for f in os.scandir(path):
            try:  # 'hope to avoid too many levels of symbolic links' error
                if f.is_dir():
                    subfolders.append(f.path)
                elif f.is_file():
                    if os.path.splitext(f.name)[1].lower() in exts:
                        files.append(f.path)
            except Exception:
                pass
    except Exception:
        pass

    if recursive:
        for path in list(subfolders):
            sf, f = fast_scandir(path, exts, recursive=recursive)
            subfolders.extend(sf)
            files.extend(f)  # type: ignore

    return subfolders, files