holylovenia
commited on
Commit
•
214cdcd
1
Parent(s):
1d61a43
Upload audio_keyword_spotting.py with huggingface_hub
Browse files- audio_keyword_spotting.py +198 -0
audio_keyword_spotting.py
ADDED
@@ -0,0 +1,198 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
"""
|
2 |
+
SEA Crowd Data Loader for Audio Keyword Spotting.
|
3 |
+
"""
|
4 |
+
from typing import Dict, List, Tuple
|
5 |
+
|
6 |
+
import datasets
|
7 |
+
from datasets.download.download_manager import DownloadManager
|
8 |
+
|
9 |
+
from seacrowd.utils import schemas
|
10 |
+
from seacrowd.utils.configs import SEACrowdConfig
|
11 |
+
from seacrowd.utils.constants import TASK_TO_SCHEMA, Licenses, Tasks
|
12 |
+
|
13 |
+
# since the dataset doesn't have any citation and it was derived using someone else's work, this citation variable will cite source work instead (total of 3, ML Spoken Words 1 and Trabina 2)
|
14 |
+
_CITATION = r"""
|
15 |
+
@inproceedings{mazumder2021multilingual,
|
16 |
+
title={Multilingual Spoken Words Corpus},
|
17 |
+
author={Mazumder, Mark and Chitlangia, Sharad and Banbury, Colby and Kang, Yiping and Ciro, Juan Manuel and Achorn, Keith and Galvez, Daniel and Sabini, Mark and Mattson, Peter and Kanter, David and others},
|
18 |
+
booktitle={Thirty-fifth Conference on Neural Information Processing Systems Datasets and Benchmarks Track (Round 2)},
|
19 |
+
year={2021}
|
20 |
+
}
|
21 |
+
@inproceedings{wu-etal-2018-creating,
|
22 |
+
title = "Creating a Translation Matrix of the {B}ible{'}s Names Across 591 Languages",
|
23 |
+
author = "Wu, Winston and
|
24 |
+
Vyas, Nidhi and
|
25 |
+
Yarowsky, David",
|
26 |
+
editor = "Calzolari, Nicoletta and
|
27 |
+
Choukri, Khalid and
|
28 |
+
Cieri, Christopher and
|
29 |
+
Declerck, Thierry and
|
30 |
+
Goggi, Sara and
|
31 |
+
Hasida, Koiti and
|
32 |
+
Isahara, Hitoshi and
|
33 |
+
Maegaard, Bente and
|
34 |
+
Mariani, Joseph and
|
35 |
+
Mazo, H{\'e}l{\`e}ne and
|
36 |
+
Moreno, Asuncion and
|
37 |
+
Odijk, Jan and
|
38 |
+
Piperidis, Stelios and
|
39 |
+
Tokunaga, Takenobu",
|
40 |
+
booktitle = "Proceedings of the Eleventh International Conference on Language Resources and Evaluation ({LREC} 2018)",
|
41 |
+
month = may,
|
42 |
+
year = "2018",
|
43 |
+
address = "Miyazaki, Japan",
|
44 |
+
publisher = "European Language Resources Association (ELRA)",
|
45 |
+
url = "https://aclanthology.org/L18-1263",
|
46 |
+
}
|
47 |
+
@inproceedings{wu-yarowsky-2018-comparative,
|
48 |
+
title = "A Comparative Study of Extremely Low-Resource Transliteration of the World{'}s Languages",
|
49 |
+
author = "Wu, Winston and
|
50 |
+
Yarowsky, David",
|
51 |
+
editor = "Calzolari, Nicoletta and
|
52 |
+
Choukri, Khalid and
|
53 |
+
Cieri, Christopher and
|
54 |
+
Declerck, Thierry and
|
55 |
+
Goggi, Sara and
|
56 |
+
Hasida, Koiti and
|
57 |
+
Isahara, Hitoshi and
|
58 |
+
Maegaard, Bente and
|
59 |
+
Mariani, Joseph and
|
60 |
+
Mazo, H{\'e}l{\`e}ne and
|
61 |
+
Moreno, Asuncion and
|
62 |
+
Odijk, Jan and
|
63 |
+
Piperidis, Stelios and
|
64 |
+
Tokunaga, Takenobu",
|
65 |
+
booktitle = "Proceedings of the Eleventh International Conference on Language Resources and Evaluation ({LREC} 2018)",
|
66 |
+
month = may,
|
67 |
+
year = "2018",
|
68 |
+
address = "Miyazaki, Japan",
|
69 |
+
publisher = "European Language Resources Association (ELRA)",
|
70 |
+
url = "https://aclanthology.org/L18-1150",
|
71 |
+
}
|
72 |
+
"""
|
73 |
+
|
74 |
+
logger = datasets.logging.get_logger(__name__)
|
75 |
+
|
76 |
+
_LOCAL = False
|
77 |
+
_LANGUAGES = ["ind"]
|
78 |
+
|
79 |
+
_DATASETNAME = "audio_keyword_spotting"
|
80 |
+
_DESCRIPTION = r"This dataset is a ASR for short text & voices, focusing in identifying common words (or keywords) with entities of Person name and Place Name found in Bible, as found in trabina (https://github.com/wswu/trabina)."
|
81 |
+
|
82 |
+
_HOMEPAGE = "https://huggingface.co/datasets/sil-ai/audio-keyword-spotting"
|
83 |
+
_LICENSE = Licenses.CC_BY_4_0.value
|
84 |
+
|
85 |
+
_URL = "https://huggingface.co/datasets/sil-ai/audio-keyword-spotting"
|
86 |
+
_HF_REMOTE_REF = "/".join(_URL.split("/")[-2:])
|
87 |
+
|
88 |
+
_SUPPORTED_TASKS = [Tasks.SPEECH_RECOGNITION]
|
89 |
+
_SOURCE_VERSION = "0.0.1"
|
90 |
+
_SEACROWD_VERSION = "2024.06.20"
|
91 |
+
|
92 |
+
CONFIG_SUFFIXES_FOR_TASK = [TASK_TO_SCHEMA.get(task).lower() for task in _SUPPORTED_TASKS]
|
93 |
+
|
94 |
+
|
95 |
+
def construct_configs() -> List[SEACrowdConfig]:
|
96 |
+
"""
|
97 |
+
The function `construct_configs` constructs a list of SEACrowdConfig objects and returns the config list.
|
98 |
+
|
99 |
+
input:
|
100 |
+
None
|
101 |
+
output:
|
102 |
+
a list of `SEACrowdConfig` objects based on instantiated init variables
|
103 |
+
"""
|
104 |
+
|
105 |
+
# set output var
|
106 |
+
config_list = []
|
107 |
+
|
108 |
+
# construct zipped arg for config instantiation
|
109 |
+
TASKS_AND_CONFIG_SUFFIX_PAIRS = list(zip(_SUPPORTED_TASKS, CONFIG_SUFFIXES_FOR_TASK))
|
110 |
+
|
111 |
+
# implement source schema
|
112 |
+
version, config_name_prefix = _SOURCE_VERSION, "source"
|
113 |
+
config_list += [
|
114 |
+
SEACrowdConfig(
|
115 |
+
name=f"{_DATASETNAME}_{config_name_prefix}",
|
116 |
+
version=datasets.Version(version),
|
117 |
+
description=f"{_DATASETNAME} {config_name_prefix} schema",
|
118 |
+
schema=f"{config_name_prefix}",
|
119 |
+
subset_id=config_name_prefix,
|
120 |
+
)
|
121 |
+
]
|
122 |
+
|
123 |
+
# implement SEACrowd schema
|
124 |
+
version, config_name_prefix = _SEACROWD_VERSION, "seacrowd"
|
125 |
+
for task_obj, config_name_suffix in TASKS_AND_CONFIG_SUFFIX_PAIRS:
|
126 |
+
config_list += [
|
127 |
+
SEACrowdConfig(
|
128 |
+
name=f"{_DATASETNAME}_{config_name_prefix}_{config_name_suffix}",
|
129 |
+
version=datasets.Version(version),
|
130 |
+
description=f"{_DATASETNAME} {config_name_prefix} schema for {task_obj.name}",
|
131 |
+
schema=f"{config_name_prefix}_{config_name_suffix}",
|
132 |
+
subset_id=config_name_prefix,
|
133 |
+
)
|
134 |
+
]
|
135 |
+
return config_list
|
136 |
+
|
137 |
+
|
138 |
+
class AudioKeywordSpottingDataset(datasets.GeneratorBasedBuilder):
|
139 |
+
"""AudioKeywordSpotting dataset, subsetted from https://huggingface.co/datasets/sil-ai/audio-keyword-spotting"""
|
140 |
+
|
141 |
+
# get all schema w/o lang arg + get all schema w/ lang arg
|
142 |
+
BUILDER_CONFIGS = construct_configs()
|
143 |
+
DEFAULT_CONFIG_NAME = f"{_DATASETNAME}_source"
|
144 |
+
|
145 |
+
def _info(self) -> datasets.DatasetInfo:
|
146 |
+
_config_schema_name = self.config.schema
|
147 |
+
logger.info(f"Received schema name: {self.config.schema}")
|
148 |
+
# source schema
|
149 |
+
if _config_schema_name == "source":
|
150 |
+
_GENDERS = ["MALE", "FEMALE", "OTHER", "NAN"]
|
151 |
+
features = datasets.Features(
|
152 |
+
{
|
153 |
+
"file": datasets.Value("string"),
|
154 |
+
"is_valid": datasets.Value("bool"),
|
155 |
+
"language": datasets.Value("string"),
|
156 |
+
"speaker_id": datasets.Value("string"),
|
157 |
+
"gender": datasets.ClassLabel(names=_GENDERS),
|
158 |
+
"keyword": datasets.Value("string"),
|
159 |
+
"audio": datasets.Audio(sampling_rate=16_000),
|
160 |
+
}
|
161 |
+
)
|
162 |
+
|
163 |
+
# speech-text schema
|
164 |
+
elif _config_schema_name == "seacrowd_sptext":
|
165 |
+
features = schemas.speech_text_features
|
166 |
+
|
167 |
+
else:
|
168 |
+
raise ValueError(f"Received unexpected config schema of {_config_schema_name}!")
|
169 |
+
|
170 |
+
return datasets.DatasetInfo(
|
171 |
+
description=_DESCRIPTION,
|
172 |
+
features=features,
|
173 |
+
homepage=_HOMEPAGE,
|
174 |
+
license=_LICENSE,
|
175 |
+
citation=_CITATION,
|
176 |
+
)
|
177 |
+
|
178 |
+
def _split_generators(self, dl_manager: DownloadManager) -> List[datasets.SplitGenerator]:
|
179 |
+
hf_dset_dict = datasets.load_dataset(_HF_REMOTE_REF, "ind")
|
180 |
+
|
181 |
+
return [datasets.SplitGenerator(name=datasets.Split(dset_key), gen_kwargs={"hf_dset": dset}) for dset_key, dset in hf_dset_dict.items() if dset.num_rows > 0]
|
182 |
+
|
183 |
+
def _generate_examples(self, hf_dset) -> Tuple[int, Dict]:
|
184 |
+
_config_schema_name = self.config.schema
|
185 |
+
|
186 |
+
_idx = 0
|
187 |
+
for datapoints in hf_dset:
|
188 |
+
# since no _idx is available to be used, we're creating it manually for both schema
|
189 |
+
if _config_schema_name == "source":
|
190 |
+
yield _idx, {colname: datapoints[colname] for colname in self.info.features}
|
191 |
+
|
192 |
+
elif _config_schema_name == "seacrowd_sptext":
|
193 |
+
yield _idx, {"id": _idx, "path": datapoints["file"], "audio": datapoints["audio"], "text": datapoints["keyword"], "speaker_id": datapoints["speaker_id"], "metadata": {"speaker_age": None, "speaker_gender": datapoints["gender"]}}
|
194 |
+
|
195 |
+
else:
|
196 |
+
raise ValueError(f"Received unexpected config schema of {_config_schema_name}!")
|
197 |
+
|
198 |
+
_idx += 1
|