yangwang825
commited on
Commit
•
b975e7e
1
Parent(s):
01fdfdd
Create magnatagatune.py
Browse files- magnatagatune.py +322 -0
magnatagatune.py
ADDED
@@ -0,0 +1,322 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# coding=utf-8
|
2 |
+
|
3 |
+
"""MagnaTagATune dataset."""
|
4 |
+
|
5 |
+
|
6 |
+
import os
|
7 |
+
import json
|
8 |
+
import gzip
|
9 |
+
import shutil
|
10 |
+
import pathlib
|
11 |
+
import logging
|
12 |
+
import textwrap
|
13 |
+
import datasets
|
14 |
+
import itertools
|
15 |
+
import typing as tp
|
16 |
+
import pandas as pd
|
17 |
+
import urllib.request
|
18 |
+
from pathlib import Path
|
19 |
+
from copy import deepcopy
|
20 |
+
from tqdm.auto import tqdm
|
21 |
+
from rich.logging import RichHandler
|
22 |
+
|
23 |
+
logger = logging.getLogger(__name__)
|
24 |
+
logger.addHandler(RichHandler())
|
25 |
+
logger.setLevel(logging.INFO)
|
26 |
+
|
27 |
+
SAMPLE_RATE = 32_000
|
28 |
+
|
29 |
+
# Cache location
|
30 |
+
VERSION = "0.0.1"
|
31 |
+
DEFAULT_XDG_CACHE_HOME = "~/.cache"
|
32 |
+
XDG_CACHE_HOME = os.getenv("XDG_CACHE_HOME", DEFAULT_XDG_CACHE_HOME)
|
33 |
+
DEFAULT_HF_CACHE_HOME = os.path.join(XDG_CACHE_HOME, "huggingface")
|
34 |
+
HF_CACHE_HOME = os.path.expanduser(os.getenv("HF_HOME", DEFAULT_HF_CACHE_HOME))
|
35 |
+
DEFAULT_HF_DATASETS_CACHE = os.path.join(HF_CACHE_HOME, "datasets")
|
36 |
+
HF_DATASETS_CACHE = Path(os.getenv("HF_DATASETS_CACHE", DEFAULT_HF_DATASETS_CACHE))
|
37 |
+
|
38 |
+
TOP_50_CLASSES = [
|
39 |
+
'ambient', 'beat', 'beats', 'cello', 'choir', 'choral', 'classic', 'classical', 'country', 'dance',
|
40 |
+
'drums', 'electronic', 'fast', 'female', 'female vocal', 'female voice', 'flute', 'guitar', 'harp', 'harpsichord',
|
41 |
+
'indian', 'loud', 'male', 'male vocal', 'male voice', 'man', 'metal', 'new age', 'no vocal', 'no vocals',
|
42 |
+
'no voice', 'opera', 'piano', 'pop', 'quiet', 'rock', 'singing', 'sitar', 'slow', 'soft',
|
43 |
+
'solo', 'strings', 'synth', 'techno', 'violin', 'vocal', 'vocals', 'voice', 'weird', 'woman'
|
44 |
+
]
|
45 |
+
CLASS2INDEX = {cls:idx for idx, cls in enumerate(TOP_50_CLASSES)}
|
46 |
+
INDEX2CLASS = {idx:cls for idx, cls in enumerate(TOP_50_CLASSES)}
|
47 |
+
|
48 |
+
|
49 |
+
class MagnaTagATuneConfig(datasets.BuilderConfig):
|
50 |
+
"""BuilderConfig for MagnaTagATune."""
|
51 |
+
|
52 |
+
def __init__(self, features, **kwargs):
|
53 |
+
super(MagnaTagATuneConfig, self).__init__(version=datasets.Version(VERSION, ""), **kwargs)
|
54 |
+
self.features = features
|
55 |
+
|
56 |
+
|
57 |
+
class MagnaTagATune(datasets.GeneratorBasedBuilder):
|
58 |
+
|
59 |
+
BUILDER_CONFIGS = [
|
60 |
+
MagnaTagATuneConfig(
|
61 |
+
features=datasets.Features(
|
62 |
+
{
|
63 |
+
"file": datasets.Value("string"),
|
64 |
+
"audio": datasets.Audio(sampling_rate=SAMPLE_RATE),
|
65 |
+
"tags": datasets.Sequence(datasets.Value("string")),
|
66 |
+
"label": datasets.Sequence(datasets.features.ClassLabel(names=TOP_50_CLASSES)),
|
67 |
+
}
|
68 |
+
),
|
69 |
+
name="top50",
|
70 |
+
description="",
|
71 |
+
),
|
72 |
+
]
|
73 |
+
|
74 |
+
DEFAULT_CONFIG_NAME = "top50"
|
75 |
+
|
76 |
+
def _info(self):
|
77 |
+
return datasets.DatasetInfo(
|
78 |
+
description="",
|
79 |
+
features=self.config.features,
|
80 |
+
supervised_keys=None,
|
81 |
+
homepage="",
|
82 |
+
citation="",
|
83 |
+
task_templates=None,
|
84 |
+
)
|
85 |
+
|
86 |
+
def _load_metadata(self):
|
87 |
+
# Read metadata
|
88 |
+
df = pd.read_csv("https://mirg.city.ac.uk/datasets/magnatagatune/annotations_final.csv", sep="\t")
|
89 |
+
df = df[df[TOP_50_CLASSES].sum(axis=1) > 0]
|
90 |
+
df = df[TOP_50_CLASSES + ["mp3_path", "clip_id"]]
|
91 |
+
|
92 |
+
train_ids_df = pd.read_csv(
|
93 |
+
'https://raw.githubusercontent.com/jordipons/musicnn-training/master/data/index/mtt/train_gt_mtt.tsv',
|
94 |
+
sep='\t', header=None
|
95 |
+
)
|
96 |
+
train_ids = train_ids_df[0].tolist()
|
97 |
+
train_df = df[df["clip_id"].isin(train_ids)]
|
98 |
+
|
99 |
+
validation_ids_df = pd.read_csv(
|
100 |
+
"https://raw.githubusercontent.com/jordipons/musicnn-training/master/data/index/mtt/val_gt_mtt.tsv",
|
101 |
+
sep="\t", header=None
|
102 |
+
)
|
103 |
+
validation_ids = validation_ids_df[0].tolist()
|
104 |
+
validation_df = df[df["clip_id"].isin(validation_ids)]
|
105 |
+
|
106 |
+
test_ids_df = pd.read_csv(
|
107 |
+
"https://raw.githubusercontent.com/jordipons/musicnn-training/master/data/index/mtt/test_gt_mtt.tsv",
|
108 |
+
sep="\t", header=None
|
109 |
+
)
|
110 |
+
test_ids = test_ids_df[0].tolist()
|
111 |
+
test_df = df[df["clip_id"].isin(test_ids)]
|
112 |
+
|
113 |
+
label_names = df.columns
|
114 |
+
label_names = label_names.drop(["mp3_path", "clip_id"])
|
115 |
+
|
116 |
+
return train_df, validation_df, test_df, label_names
|
117 |
+
|
118 |
+
def _split_generators(self, dl_manager):
|
119 |
+
"""Returns SplitGenerators."""
|
120 |
+
if self.config.name == 'top50':
|
121 |
+
mp3_zip_files = [
|
122 |
+
'https://mirg.city.ac.uk/datasets/magnatagatune/mp3.zip.001',
|
123 |
+
'https://mirg.city.ac.uk/datasets/magnatagatune/mp3.zip.002',
|
124 |
+
'https://mirg.city.ac.uk/datasets/magnatagatune/mp3.zip.003',
|
125 |
+
]
|
126 |
+
for zip_file_url in mp3_zip_files:
|
127 |
+
_filename = zip_file_url.split('/')[-1]
|
128 |
+
_save_path = os.path.join(
|
129 |
+
HF_DATASETS_CACHE, 'confit___magnatagatune/top50', VERSION, _filename
|
130 |
+
)
|
131 |
+
download_file(zip_file_url, _save_path)
|
132 |
+
logger.info(f"`{_filename}` is downloaded to {_save_path}")
|
133 |
+
|
134 |
+
main_zip_filename = 'mp3.zip'
|
135 |
+
_save_dir = os.path.join(HF_DATASETS_CACHE, 'confit___magnatagatune/top50', VERSION)
|
136 |
+
_output_file = os.path.join(_save_dir, main_zip_filename)
|
137 |
+
|
138 |
+
if not os.path.exists(_output_file):
|
139 |
+
logger.info(f"Concatenate zip files to {main_zip_filename}")
|
140 |
+
os.system(f"cat {os.path.join(_save_dir, 'mp3.zip.*')} > {_output_file}")
|
141 |
+
|
142 |
+
archive_path = dl_manager.extract(_output_file)
|
143 |
+
logger.info(f"`{main_zip_filename}` is now extracted to {archive_path}")
|
144 |
+
|
145 |
+
return [
|
146 |
+
datasets.SplitGenerator(
|
147 |
+
name=datasets.Split.TRAIN, gen_kwargs={"archive_path": archive_path, "split": "train"}
|
148 |
+
),
|
149 |
+
datasets.SplitGenerator(
|
150 |
+
name=datasets.Split.VALIDATION, gen_kwargs={"archive_path": archive_path, "split": "validation"}
|
151 |
+
),
|
152 |
+
datasets.SplitGenerator(
|
153 |
+
name=datasets.Split.TEST, gen_kwargs={"archive_path": archive_path, "split": "test"}
|
154 |
+
),
|
155 |
+
]
|
156 |
+
|
157 |
+
def _generate_examples(self, archive_path, split=None, metadata_df=None):
|
158 |
+
train_df, validation_df, test_df, label_names = self._load_metadata()
|
159 |
+
extensions = ['.mp3']
|
160 |
+
_, _walker = fast_scandir(archive_path, extensions, recursive=True)
|
161 |
+
|
162 |
+
class2index = {cls:idx for idx, cls in enumerate(label_names)}
|
163 |
+
index2class = {idx:cls for idx, cls in enumerate(label_names)}
|
164 |
+
|
165 |
+
if split == 'train':
|
166 |
+
fileid2class = {}
|
167 |
+
for idx, row in train_df.iterrows():
|
168 |
+
fileid = row['mp3_path']
|
169 |
+
class_ = row[label_names].tolist()
|
170 |
+
if len(class_) == 0:
|
171 |
+
continue
|
172 |
+
class_ = [index2class.get(idx) for idx in class_]
|
173 |
+
fileid2class[fileid] = class_
|
174 |
+
elif split == 'validation':
|
175 |
+
fileid2class = {}
|
176 |
+
for idx, row in validation_df.iterrows():
|
177 |
+
fileid = row['mp3_path']
|
178 |
+
class_ = row[label_names].tolist()
|
179 |
+
if len(class_) == 0:
|
180 |
+
continue
|
181 |
+
class_ = [index2class.get(idx) for idx in class_]
|
182 |
+
fileid2class[fileid] = class_
|
183 |
+
elif split == 'test':
|
184 |
+
fileid2class = {}
|
185 |
+
for idx, row in test_df.iterrows():
|
186 |
+
fileid = row['mp3_path']
|
187 |
+
class_ = row[label_names].tolist()
|
188 |
+
if len(class_) == 0:
|
189 |
+
continue
|
190 |
+
class_ = [index2class.get(idx) for idx in class_]
|
191 |
+
fileid2class[fileid] = class_
|
192 |
+
|
193 |
+
for guid, audio_path in enumerate(_walker):
|
194 |
+
tags = fileid2class.get(Path(audio_path).stem)
|
195 |
+
yield guid, {
|
196 |
+
"id": str(guid),
|
197 |
+
"file": audio_path,
|
198 |
+
"audio": audio_path,
|
199 |
+
"tags": tags,
|
200 |
+
"label": tags,
|
201 |
+
}
|
202 |
+
|
203 |
+
|
204 |
+
def fast_scandir(path: str, exts: tp.List[str], recursive: bool = False):
|
205 |
+
# Scan files recursively faster than glob
|
206 |
+
# From github.com/drscotthawley/aeiou/blob/main/aeiou/core.py
|
207 |
+
subfolders, files = [], []
|
208 |
+
|
209 |
+
try: # hope to avoid 'permission denied' by this try
|
210 |
+
for f in os.scandir(path):
|
211 |
+
try: # 'hope to avoid too many levels of symbolic links' error
|
212 |
+
if f.is_dir():
|
213 |
+
subfolders.append(f.path)
|
214 |
+
elif f.is_file():
|
215 |
+
if os.path.splitext(f.name)[1].lower() in exts:
|
216 |
+
files.append(f.path)
|
217 |
+
except Exception:
|
218 |
+
pass
|
219 |
+
except Exception:
|
220 |
+
pass
|
221 |
+
|
222 |
+
if recursive:
|
223 |
+
for path in list(subfolders):
|
224 |
+
sf, f = fast_scandir(path, exts, recursive=recursive)
|
225 |
+
subfolders.extend(sf)
|
226 |
+
files.extend(f) # type: ignore
|
227 |
+
|
228 |
+
return subfolders, files
|
229 |
+
|
230 |
+
|
231 |
+
def download_file(
|
232 |
+
source,
|
233 |
+
dest,
|
234 |
+
unpack=False,
|
235 |
+
dest_unpack=None,
|
236 |
+
replace_existing=False,
|
237 |
+
write_permissions=False,
|
238 |
+
):
|
239 |
+
"""Downloads the file from the given source and saves it in the given
|
240 |
+
destination path.
|
241 |
+
Arguments
|
242 |
+
---------
|
243 |
+
source : path or url
|
244 |
+
Path of the source file. If the source is an URL, it downloads it from
|
245 |
+
the web.
|
246 |
+
dest : path
|
247 |
+
Destination path.
|
248 |
+
unpack : bool
|
249 |
+
If True, it unpacks the data in the dest folder.
|
250 |
+
dest_unpack: path
|
251 |
+
Path where to store the unpacked dataset
|
252 |
+
replace_existing : bool
|
253 |
+
If True, replaces the existing files.
|
254 |
+
write_permissions: bool
|
255 |
+
When set to True, all the files in the dest_unpack directory will be granted write permissions.
|
256 |
+
This option is active only when unpack=True.
|
257 |
+
"""
|
258 |
+
class DownloadProgressBar(tqdm):
|
259 |
+
"""DownloadProgressBar class."""
|
260 |
+
|
261 |
+
def update_to(self, b=1, bsize=1, tsize=None):
|
262 |
+
"""Needed to support multigpu training."""
|
263 |
+
if tsize is not None:
|
264 |
+
self.total = tsize
|
265 |
+
self.update(b * bsize - self.n)
|
266 |
+
|
267 |
+
# Create the destination directory if it doesn't exist
|
268 |
+
dest_dir = pathlib.Path(dest).resolve().parent
|
269 |
+
dest_dir.mkdir(parents=True, exist_ok=True)
|
270 |
+
if "http" not in source:
|
271 |
+
shutil.copyfile(source, dest)
|
272 |
+
|
273 |
+
elif not os.path.isfile(dest) or (
|
274 |
+
os.path.isfile(dest) and replace_existing
|
275 |
+
):
|
276 |
+
print(f"Downloading {source} to {dest}")
|
277 |
+
with DownloadProgressBar(
|
278 |
+
unit="B",
|
279 |
+
unit_scale=True,
|
280 |
+
miniters=1,
|
281 |
+
desc=source.split("/")[-1],
|
282 |
+
) as t:
|
283 |
+
urllib.request.urlretrieve(
|
284 |
+
source, filename=dest, reporthook=t.update_to
|
285 |
+
)
|
286 |
+
else:
|
287 |
+
print(f"{dest} exists. Skipping download")
|
288 |
+
|
289 |
+
# Unpack if necessary
|
290 |
+
if unpack:
|
291 |
+
if dest_unpack is None:
|
292 |
+
dest_unpack = os.path.dirname(dest)
|
293 |
+
print(f"Extracting {dest} to {dest_unpack}")
|
294 |
+
# shutil unpack_archive does not work with tar.gz files
|
295 |
+
if (
|
296 |
+
source.endswith(".tar.gz")
|
297 |
+
or source.endswith(".tgz")
|
298 |
+
or source.endswith(".gz")
|
299 |
+
):
|
300 |
+
out = dest.replace(".gz", "")
|
301 |
+
with gzip.open(dest, "rb") as f_in:
|
302 |
+
with open(out, "wb") as f_out:
|
303 |
+
shutil.copyfileobj(f_in, f_out)
|
304 |
+
else:
|
305 |
+
shutil.unpack_archive(dest, dest_unpack)
|
306 |
+
if write_permissions:
|
307 |
+
set_writing_permissions(dest_unpack)
|
308 |
+
|
309 |
+
|
310 |
+
def set_writing_permissions(folder_path):
|
311 |
+
"""
|
312 |
+
This function sets user writing permissions to all the files in the given folder.
|
313 |
+
Arguments
|
314 |
+
---------
|
315 |
+
folder_path : folder
|
316 |
+
Folder whose files will be granted write permissions.
|
317 |
+
"""
|
318 |
+
for root, dirs, files in os.walk(folder_path):
|
319 |
+
for file_name in files:
|
320 |
+
file_path = os.path.join(root, file_name)
|
321 |
+
# Set writing permissions (mode 0o666) to the file
|
322 |
+
os.chmod(file_path, 0o666)
|