yangwang825 commited on
Commit
aefbc02
1 Parent(s): 6941c43

Update magnatagatune.py

Browse files
Files changed (1) hide show
  1. magnatagatune.py +45 -107
magnatagatune.py CHANGED
@@ -1,6 +1,6 @@
1
  # coding=utf-8
2
 
3
- """MagnaTagATune dataset."""
4
 
5
 
6
  import os
@@ -24,7 +24,7 @@ logger = logging.getLogger(__name__)
24
  logger.addHandler(RichHandler())
25
  logger.setLevel(logging.INFO)
26
 
27
- SAMPLE_RATE = 16_000
28
 
29
  # Cache location
30
  VERSION = "0.0.1"
@@ -35,43 +35,39 @@ HF_CACHE_HOME = os.path.expanduser(os.getenv("HF_HOME", DEFAULT_HF_CACHE_HOME))
35
  DEFAULT_HF_DATASETS_CACHE = os.path.join(HF_CACHE_HOME, "datasets")
36
  HF_DATASETS_CACHE = Path(os.getenv("HF_DATASETS_CACHE", DEFAULT_HF_DATASETS_CACHE))
37
 
38
- TOP_50_CLASSES = [
39
- 'ambient', 'beat', 'beats', 'cello', 'choir', 'choral', 'classic', 'classical', 'country', 'dance',
40
- 'drums', 'electronic', 'fast', 'female', 'female vocal', 'female voice', 'flute', 'guitar', 'harp', 'harpsichord',
41
- 'indian', 'loud', 'male', 'male vocal', 'male voice', 'man', 'metal', 'new age', 'no vocal', 'no vocals',
42
- 'no voice', 'opera', 'piano', 'pop', 'quiet', 'rock', 'singing', 'sitar', 'slow', 'soft',
43
- 'solo', 'strings', 'synth', 'techno', 'violin', 'vocal', 'vocals', 'voice', 'weird', 'woman'
44
  ]
45
- CLASS2INDEX = {cls:idx for idx, cls in enumerate(TOP_50_CLASSES)}
46
- INDEX2CLASS = {idx:cls for idx, cls in enumerate(TOP_50_CLASSES)}
47
 
48
 
49
- class MagnaTagATuneConfig(datasets.BuilderConfig):
50
- """BuilderConfig for MagnaTagATune."""
51
 
52
  def __init__(self, features, **kwargs):
53
- super(MagnaTagATuneConfig, self).__init__(version=datasets.Version(VERSION, ""), **kwargs)
54
  self.features = features
55
 
56
 
57
- class MagnaTagATune(datasets.GeneratorBasedBuilder):
58
 
59
  BUILDER_CONFIGS = [
60
- MagnaTagATuneConfig(
61
  features=datasets.Features(
62
  {
63
  "file": datasets.Value("string"),
64
  "audio": datasets.Audio(sampling_rate=SAMPLE_RATE),
65
- "tags": datasets.Sequence(datasets.Value("string")),
66
- "label": datasets.Sequence(datasets.features.ClassLabel(names=TOP_50_CLASSES)),
67
  }
68
  ),
69
- name="top50",
70
  description="",
71
  ),
72
  ]
73
 
74
- DEFAULT_CONFIG_NAME = "top50"
75
 
76
  def _info(self):
77
  return datasets.DatasetInfo(
@@ -83,65 +79,18 @@ class MagnaTagATune(datasets.GeneratorBasedBuilder):
83
  task_templates=None,
84
  )
85
 
86
- def _load_metadata(self):
87
- # Read metadata
88
- df = pd.read_csv("https://mirg.city.ac.uk/datasets/magnatagatune/annotations_final.csv", sep="\t")
89
- df = df[df[TOP_50_CLASSES].sum(axis=1) > 0]
90
- df = df[TOP_50_CLASSES + ["mp3_path", "clip_id"]]
91
-
92
- train_ids_df = pd.read_csv(
93
- 'https://raw.githubusercontent.com/jordipons/musicnn-training/master/data/index/mtt/train_gt_mtt.tsv',
94
- sep='\t', header=None
95
- )
96
- train_ids = train_ids_df[0].tolist()
97
- train_df = df[df["clip_id"].isin(train_ids)]
98
-
99
- validation_ids_df = pd.read_csv(
100
- "https://raw.githubusercontent.com/jordipons/musicnn-training/master/data/index/mtt/val_gt_mtt.tsv",
101
- sep="\t", header=None
102
- )
103
- validation_ids = validation_ids_df[0].tolist()
104
- validation_df = df[df["clip_id"].isin(validation_ids)]
105
-
106
- test_ids_df = pd.read_csv(
107
- "https://raw.githubusercontent.com/jordipons/musicnn-training/master/data/index/mtt/test_gt_mtt.tsv",
108
- sep="\t", header=None
109
- )
110
- test_ids = test_ids_df[0].tolist()
111
- test_df = df[df["clip_id"].isin(test_ids)]
112
-
113
- label_names = df.columns
114
- label_names = label_names.drop(["mp3_path", "clip_id"])
115
-
116
- return train_df, validation_df, test_df, label_names
117
-
118
  def _split_generators(self, dl_manager):
119
  """Returns SplitGenerators."""
120
- if self.config.name == 'top50':
121
- mp3_zip_files = [
122
- 'https://mirg.city.ac.uk/datasets/magnatagatune/mp3.zip.001',
123
- 'https://mirg.city.ac.uk/datasets/magnatagatune/mp3.zip.002',
124
- 'https://mirg.city.ac.uk/datasets/magnatagatune/mp3.zip.003',
125
- ]
126
- for zip_file_url in mp3_zip_files:
127
- _filename = zip_file_url.split('/')[-1]
128
- _save_path = os.path.join(
129
- HF_DATASETS_CACHE, 'confit___magnatagatune/top50', VERSION, _filename
130
- )
131
- download_file(zip_file_url, _save_path)
132
- logger.info(f"`{_filename}` is downloaded to {_save_path}")
133
-
134
- main_zip_filename = 'mp3.zip'
135
- _save_dir = os.path.join(HF_DATASETS_CACHE, 'confit___magnatagatune/top50', VERSION)
136
- _output_file = os.path.join(_save_dir, main_zip_filename)
137
-
138
- if not os.path.exists(_output_file):
139
- logger.info(f"Concatenate zip files to {main_zip_filename}")
140
- os.system(f"cat {os.path.join(_save_dir, 'mp3.zip.*')} > {_output_file}")
141
-
142
- archive_path = dl_manager.extract(_output_file)
143
- logger.info(f"`{main_zip_filename}` is now extracted to {archive_path}")
144
-
145
  return [
146
  datasets.SplitGenerator(
147
  name=datasets.Split.TRAIN, gen_kwargs={"archive_path": archive_path, "split": "train"}
@@ -154,58 +103,47 @@ class MagnaTagATune(datasets.GeneratorBasedBuilder):
154
  ),
155
  ]
156
 
157
- def _generate_examples(self, archive_path, split=None, metadata_df=None):
158
- train_df, validation_df, test_df, label_names = self._load_metadata()
159
- extensions = ['.mp3']
160
- _, _walker = fast_scandir(archive_path, extensions, recursive=True)
 
161
 
162
- class2index = {cls:idx for idx, cls in enumerate(label_names)}
163
- index2class = {idx:cls for idx, cls in enumerate(label_names)}
164
 
165
  if split == 'train':
166
  fileid2class = {}
167
  for idx, row in train_df.iterrows():
168
- fileid = row['mp3_path']
169
- class_ = row[label_names].tolist()
170
- if sum(class_) == 0:
171
- continue
172
- class_ = [idx for idx, val in enumerate(class_) if val != 0]
173
- class_ = [index2class.get(idx) for idx in class_]
174
  fileid2class[fileid] = class_
175
  elif split == 'validation':
176
  fileid2class = {}
177
  for idx, row in validation_df.iterrows():
178
- fileid = row['mp3_path']
179
- class_ = row[label_names].tolist()
180
- if sum(class_) == 0:
181
- continue
182
- class_ = [idx for idx, val in enumerate(class_) if val != 0]
183
- class_ = [index2class.get(idx) for idx in class_]
184
  fileid2class[fileid] = class_
185
  elif split == 'test':
186
  fileid2class = {}
187
  for idx, row in test_df.iterrows():
188
- fileid = row['mp3_path']
189
- class_ = row[label_names].tolist()
190
- if sum(class_) == 0:
191
- continue
192
- class_ = [idx for idx, val in enumerate(class_) if val != 0]
193
- class_ = [index2class.get(idx) for idx in class_]
194
  fileid2class[fileid] = class_
195
-
 
196
  for guid, audio_path in enumerate(_walker):
197
- parent = Path(audio_path).parent.stem
198
- filename = Path(audio_path).name
199
- fileid = f"{parent}/{filename}"
200
  if fileid not in fileid2class:
201
  continue
202
- tags = fileid2class.get(fileid)
203
  yield guid, {
204
  "id": str(guid),
205
  "file": audio_path,
206
  "audio": audio_path,
207
- "tags": tags,
208
- "label": tags,
209
  }
210
 
211
 
 
1
  # coding=utf-8
2
 
3
+ """Medley-Solos-DB dataset."""
4
 
5
 
6
  import os
 
24
  logger.addHandler(RichHandler())
25
  logger.setLevel(logging.INFO)
26
 
27
+ SAMPLE_RATE = 44_100
28
 
29
  # Cache location
30
  VERSION = "0.0.1"
 
35
  DEFAULT_HF_DATASETS_CACHE = os.path.join(HF_CACHE_HOME, "datasets")
36
  HF_DATASETS_CACHE = Path(os.getenv("HF_DATASETS_CACHE", DEFAULT_HF_DATASETS_CACHE))
37
 
38
+ CLASSES = [
39
+ 'clarinet', 'distorted electric guitar', 'female singer', 'flute', 'piano', 'tenor saxophone', 'trumpet', 'violin'
 
 
 
 
40
  ]
41
+ CLASS2INDEX = {cls:idx for idx, cls in enumerate(CLASSES)}
42
+ INDEX2CLASS = {idx:cls for idx, cls in enumerate(CLASSES)}
43
 
44
 
45
+ class MedleySolosDBConfig(datasets.BuilderConfig):
46
+ """BuilderConfig for Medley-Solos-DB."""
47
 
48
  def __init__(self, features, **kwargs):
49
+ super(MedleySolosDBConfig, self).__init__(version=datasets.Version(VERSION, ""), **kwargs)
50
  self.features = features
51
 
52
 
53
+ class MedleySolosDB(datasets.GeneratorBasedBuilder):
54
 
55
  BUILDER_CONFIGS = [
56
+ MedleySolosDBConfig(
57
  features=datasets.Features(
58
  {
59
  "file": datasets.Value("string"),
60
  "audio": datasets.Audio(sampling_rate=SAMPLE_RATE),
61
+ "instrument": datasets.Value("string"),
62
+ "label": datasets.features.ClassLabel(names=CLASSES),
63
  }
64
  ),
65
+ name="v1.2",
66
  description="",
67
  ),
68
  ]
69
 
70
+ DEFAULT_CONFIG_NAME = "v1.2"
71
 
72
  def _info(self):
73
  return datasets.DatasetInfo(
 
79
  task_templates=None,
80
  )
81
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
82
  def _split_generators(self, dl_manager):
83
  """Returns SplitGenerators."""
84
+ zip_file_url = "https://zenodo.org/records/3464194/files/Medley-solos-DB.tar.gz"
85
+ _filename = zip_file_url.split('/')[-1]
86
+ _save_path = os.path.join(
87
+ HF_DATASETS_CACHE, 'confit___medley-solos-db/v1.2', VERSION, _filename
88
+ )
89
+ download_file(zip_file_url, _save_path)
90
+ logger.info(f"`{_filename}` is downloaded to {_save_path}")
91
+ archive_path = dl_manager.extract(_save_path)
92
+ logger.info(f"`{_filename}` is now extracted to {archive_path}")
93
+
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
94
  return [
95
  datasets.SplitGenerator(
96
  name=datasets.Split.TRAIN, gen_kwargs={"archive_path": archive_path, "split": "train"}
 
103
  ),
104
  ]
105
 
106
+ def _generate_examples(self, archive_path, split=None):
107
+ metadata_df = pd.read_csv("https://zenodo.org/records/3464194/files/Medley-solos-DB_metadata.csv")
108
+ train_df = metadata_df[metadata_df["subset"] == "training"].reset_index(drop=True)
109
+ validation_df = metadata_df[metadata_df["subset"] == "validation"].reset_index(drop=True)
110
+ test_df = metadata_df[metadata_df["subset"] == "test"].reset_index(drop=True)
111
 
112
+ extensions = ['.wav']
113
+ _, _walker = fast_scandir(archive_path, extensions, recursive=True)
114
 
115
  if split == 'train':
116
  fileid2class = {}
117
  for idx, row in train_df.iterrows():
118
+ fileid = row['uuid4']
119
+ class_ = row['instrument']
 
 
 
 
120
  fileid2class[fileid] = class_
121
  elif split == 'validation':
122
  fileid2class = {}
123
  for idx, row in validation_df.iterrows():
124
+ fileid = row['uuid4']
125
+ class_ = row['instrument']
 
 
 
 
126
  fileid2class[fileid] = class_
127
  elif split == 'test':
128
  fileid2class = {}
129
  for idx, row in test_df.iterrows():
130
+ fileid = row['uuid4']
131
+ class_ = row['instrument']
 
 
 
 
132
  fileid2class[fileid] = class_
133
+
134
+ _walker = [fileid for fileid in _walker if not Path(fileid).name.startswith('._Medley')]
135
  for guid, audio_path in enumerate(_walker):
136
+ fileid = Path(audio_path).stem
137
+ fileid = fileid.split('_')[-1]
 
138
  if fileid not in fileid2class:
139
  continue
140
+ instrument = fileid2class.get(fileid)
141
  yield guid, {
142
  "id": str(guid),
143
  "file": audio_path,
144
  "audio": audio_path,
145
+ "instrument": instrument,
146
+ "label": instrument,
147
  }
148
 
149