|
import os |
|
import random |
|
import hashlib |
|
import datasets |
|
|
|
|
|
_NAMES = { |
|
"4_classes": [ |
|
"trill", |
|
"staccato", |
|
"slide", |
|
"others", |
|
], |
|
"7_classes": [ |
|
"trill_short_up", |
|
"trill_long", |
|
"staccato", |
|
"slide_up", |
|
"slide_legato", |
|
"slide_down", |
|
"others", |
|
], |
|
"11_classes": [ |
|
"vibrato", |
|
"trill", |
|
"tremolo", |
|
"staccato", |
|
"ricochet", |
|
"pizzicato", |
|
"percussive", |
|
"legato_slide_glissando", |
|
"harmonic", |
|
"diangong", |
|
"detache", |
|
], |
|
} |
|
|
|
_DBNAME = os.path.basename(__file__).split(".")[0] |
|
|
|
_DOMAIN = f"https://www.modelscope.cn/api/v1/datasets/ccmusic-database/{_DBNAME}/repo?Revision=master&FilePath=data" |
|
|
|
_HOMEPAGE = f"https://www.modelscope.cn/datasets/ccmusic-database/{_DBNAME}" |
|
|
|
|
|
_URLS = { |
|
"audio": f"{_DOMAIN}/audio.zip", |
|
"mel": f"{_DOMAIN}/mel.zip", |
|
"eval": f"{_DOMAIN}/eval.zip", |
|
} |
|
|
|
|
|
class erhu_playing_tech(datasets.GeneratorBasedBuilder): |
|
def _info(self): |
|
if self.config.name == "default": |
|
self.config.name = "11_classes" |
|
|
|
return datasets.DatasetInfo( |
|
features=( |
|
datasets.Features( |
|
{ |
|
"audio": datasets.Audio(sampling_rate=44100), |
|
"mel": datasets.Image(), |
|
"label": datasets.features.ClassLabel( |
|
names=_NAMES[self.config.name] |
|
), |
|
} |
|
) |
|
if self.config.name != "eval" |
|
else datasets.Features( |
|
{ |
|
"mel": datasets.Image(), |
|
"cqt": datasets.Image(), |
|
"chroma": datasets.Image(), |
|
"label": datasets.features.ClassLabel( |
|
names=_NAMES["11_classes"] |
|
), |
|
} |
|
) |
|
), |
|
homepage=_HOMEPAGE, |
|
license="CC-BY-NC-ND", |
|
version="1.2.0", |
|
) |
|
|
|
def _str2md5(self, original_string: str): |
|
md5_obj = hashlib.md5() |
|
md5_obj.update(original_string.encode("utf-8")) |
|
return md5_obj.hexdigest() |
|
|
|
def _split_generators(self, dl_manager): |
|
if self.config.name != "eval": |
|
audio_files = dl_manager.download_and_extract(_URLS["audio"]) |
|
mel_files = dl_manager.download_and_extract(_URLS["mel"]) |
|
files = {} |
|
for fpath in dl_manager.iter_files([audio_files]): |
|
fname = os.path.basename(fpath) |
|
dirname = os.path.dirname(fpath) |
|
subset = os.path.basename(os.path.dirname(dirname)) |
|
if self.config.name == subset and fname.endswith(".wav"): |
|
cls = f"{subset}/{os.path.basename(dirname)}/" |
|
item_id = self._str2md5(cls + fname.split(".wa")[0]) |
|
files[item_id] = {"audio": fpath} |
|
|
|
for fpath in dl_manager.iter_files([mel_files]): |
|
fname = os.path.basename(fpath) |
|
dirname = os.path.dirname(fpath) |
|
subset = os.path.basename(os.path.dirname(dirname)) |
|
if self.config.name == subset and fname.endswith(".jpg"): |
|
cls = f"{subset}/{os.path.basename(dirname)}/" |
|
item_id = self._str2md5(cls + fname.split(".jp")[0]) |
|
files[item_id]["mel"] = fpath |
|
|
|
dataset = list(files.values()) |
|
|
|
else: |
|
eval_files = dl_manager.download_and_extract(_URLS["eval"]) |
|
dataset = [] |
|
for fpath in dl_manager.iter_files([eval_files]): |
|
fname: str = os.path.basename(fpath) |
|
if "_mel" in fname and fname.endswith(".jpg"): |
|
dataset.append({"mel": fpath, "label": fname.split("__")[0]}) |
|
|
|
categories = {} |
|
names = _NAMES["11_classes" if "eval" in self.config.name else self.config.name] |
|
for name in names: |
|
categories[name] = [] |
|
|
|
for data in dataset: |
|
if self.config.name != "eval": |
|
data["label"] = os.path.basename(os.path.dirname(data["audio"])) |
|
|
|
categories[data["label"]].append(data) |
|
|
|
testset, validset, trainset = [], [], [] |
|
for cls in categories: |
|
random.shuffle(categories[cls]) |
|
count = len(categories[cls]) |
|
p60 = int(count * 0.6) |
|
p80 = int(count * 0.8) |
|
trainset += categories[cls][:p60] |
|
validset += categories[cls][p60:p80] |
|
testset += categories[cls][p80:] |
|
|
|
random.shuffle(trainset) |
|
random.shuffle(validset) |
|
random.shuffle(testset) |
|
|
|
return [ |
|
datasets.SplitGenerator( |
|
name=datasets.Split.TRAIN, gen_kwargs={"files": trainset} |
|
), |
|
datasets.SplitGenerator( |
|
name=datasets.Split.VALIDATION, gen_kwargs={"files": validset} |
|
), |
|
datasets.SplitGenerator( |
|
name=datasets.Split.TEST, gen_kwargs={"files": testset} |
|
), |
|
] |
|
|
|
def _generate_examples(self, files): |
|
if self.config.name != "eval": |
|
for i, item in enumerate(files): |
|
yield i, item |
|
|
|
else: |
|
for i, item in enumerate(files): |
|
yield i, { |
|
"mel": item["mel"], |
|
"cqt": item["mel"].replace("_mel", "_cqt"), |
|
"chroma": item["mel"].replace("_mel", "_chroma"), |
|
"label": item["label"], |
|
} |
|
|