|
|
|
|
|
""" |
|
Created on Tue Apr 18 16:14:58 2023 |
|
|
|
@author: lin.kinwahedward |
|
""" |
|
|
|
|
|
import datasets |
|
import numpy as np |
|
import os |
|
|
|
"""The Audio, Speech, and Vision Processing Lab - Emotional Sound Database (ASVP - ESD)""" |
|
|
|
_CITATION = """\ |
|
@article{gsj2020asvpesd, |
|
title={ASVP-ESD:A dataset and its benchmark for emotion recognition using both speech and non-speech utterances}, |
|
author={Dejoli Tientcheu Touko Landry and Qianhua He and Haikang Yan and Yanxiong Li}, |
|
journal={Global Scientific Journals}, |
|
volume={8}, |
|
issue={6}, |
|
pages={1793--1798}, |
|
year={2020} |
|
} |
|
""" |
|
|
|
_DESCRIPTION = """\ |
|
ASVP-ESD |
|
""" |
|
|
|
_HOMEPAGE = "https://www.kaggle.com/datasets/dejolilandry/asvpesdspeech-nonspeech-emotional-utterances?resource=download-directory" |
|
|
|
_LICENSE = "CC BY 4.0" |
|
|
|
_DATA_URL = "https://drive.google.com/uc?export=download&id=1aKnr5kXgUjMB5MAhUTZmm3b8gjP8qA3O" |
|
|
|
|
|
id2labels = { |
|
1: "boredom,sigh", |
|
2: "neutral,calm", |
|
3: "happy,laugh,gaggle", |
|
4: "sad,cry", |
|
5: "angry,grunt,frustration", |
|
6: "fearful,scream,panic", |
|
7: "disgust,dislike,contempt", |
|
8: "surprised,gasp,amazed", |
|
9: "excited", |
|
10: "pleasure", |
|
11: "pain,groan", |
|
12: "disappointment,disapproval", |
|
13: "breath" |
|
} |
|
|
|
|
|
|
|
class ASVP_ESD_Config(datasets.BuilderConfig): |
|
|
|
def __init__(self, name, description, homepage, data_url): |
|
|
|
super(ASVP_ESD_Config, self).__init__( |
|
name = self.name, |
|
version = datasets.Version("1.0.0"), |
|
description = self.description, |
|
) |
|
self.name = name |
|
self.description = description |
|
self.homepage = homepage |
|
self.data_url = data_url |
|
|
|
|
|
class ASVP_ESD(datasets.GeneratorBasedBuilder): |
|
|
|
BUILDER_CONFIGS = [ASVP_ESD_Config( |
|
name = "ASVP_ESD", |
|
description = _DESCRIPTION, |
|
homepage = _HOMEPAGE, |
|
data_url = _DATA_URL |
|
)] |
|
|
|
''' |
|
Define the "column header" (feature) of a datum. |
|
3 Features: |
|
1) path_to_file |
|
2) audio samples |
|
3) emotion label |
|
''' |
|
def _info(self): |
|
|
|
features = datasets.Features( |
|
{ |
|
"path": datasets.Value("string"), |
|
"audio": datasets.Audio(sampling_rate = 16000), |
|
"label": datasets.ClassLabel( |
|
names = [ |
|
"boredom,sigh", |
|
"neutral,calm", |
|
"happy,laugh,gaggle", |
|
"sad,cry", |
|
"angry,grunt,frustration", |
|
"fearful,scream,panic", |
|
"disgust,dislike,contempt", |
|
"surprised,gasp,amazed", |
|
"excited", |
|
"pleasure", |
|
"pain,groan", |
|
"disappointment,disapproval", |
|
"breath" |
|
]) |
|
} |
|
) |
|
|
|
|
|
return datasets.DatasetInfo( |
|
description = _DESCRIPTION, |
|
features = features, |
|
homepage = _HOMEPAGE, |
|
citation = _CITATION, |
|
) |
|
|
|
def _split_generators(self, dl_manager): |
|
|
|
dataset_path = dl_manager.download_and_extract(self.config.data_url) |
|
|
|
return [ |
|
datasets.SplitGenerator( |
|
|
|
name = datasets.Split.TRAIN, |
|
|
|
gen_kwargs = { |
|
"dataset_path": dataset_path |
|
}, |
|
) |
|
] |
|
|
|
def _generate_examples(self, dataset_path): |
|
''' |
|
Get the audio file and set the corresponding labels |
|
''' |
|
key = 0 |
|
actors = np.arange(129) |
|
for dir_name in actors: |
|
|
|
dir_path = dataset_path + "/ASVP_ESD/Speech/actor_" + str(dir_name) |
|
for filename in os.listdir(dir_path): |
|
if filename.endswith(".wav"): |
|
labels = filename[:-4].split("_") |
|
yield key, { |
|
"path": dir_path + "/" + filename, |
|
|
|
"audio": dir_path + "/" + filename, |
|
"label": id2labels[int(labels[0])], |
|
} |
|
key += 1 |
|
|
|
dir_path = dataset_path + "/ASVP_ESD/NonSpeech/actor_" + str(dir_name) |
|
for filename in os.listdir(dir_path): |
|
if filename.endswith(".wav"): |
|
labels = filename[:-4].split("_") |
|
yield key, { |
|
"path": dir_path + "/" + filename, |
|
|
|
"audio": dir_path + "/" + filename, |
|
"label": id2labels[int(labels[0])], |
|
} |
|
key += 1 |
|
|