Lawrence Adu-Gyamfi commited on
Commit
b0b69c2
1 Parent(s): 8df7da3

First version of the akan_audio dataset

Browse files
Files changed (1) hide show
  1. asr_nlpghana.py +162 -0
asr_nlpghana.py ADDED
@@ -0,0 +1,162 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2021 The HuggingFace Datasets Authors and the current dataset script contributor.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ """ NLPGhana Voice Dataset"""
16
+
17
+ from __future__ import absolute_import, division, print_function
18
+
19
+ import os
20
+
21
+ import datasets
22
+
23
+
24
+ #_DATA_URL = "https://zenodo.org/record/4641533/files/ak.tar.gz?download=1"
25
+ #_DATA_URL = 'https://www.dropbox.com/s/o6k13voiy8kdhhk/ak.tar.gz?dl=1'
26
+ _DATA_URL = "ak.tar.gz"
27
+
28
+ _CITATION = """\
29
+ """
30
+
31
+ _DESCRIPTION = """\
32
+ This work is comprised of audio data of Twi, a low resourced language spoken by the Akan people in Ghana.
33
+ This has been adapted by NLPGhana.
34
+ """
35
+
36
+ _HOMEPAGE = "https://ghananlp.org/"
37
+
38
+ _LICENSE = ""
39
+
40
+ _LANGUAGES = {
41
+ "ak": {
42
+ "Language": "Twi",
43
+ "Date": "2023-07-08",
44
+ "Size": "753 MB",
45
+ "Version": "tw_05_2023-07-08",
46
+ },
47
+ }
48
+
49
+
50
+ class NLPGhanaVoiceConfig(datasets.BuilderConfig):
51
+ """BuilderConfig for NLPGhana."""
52
+
53
+ def __init__(self, name, sub_version, **kwargs):
54
+ """
55
+ Args:
56
+ data_dir: `string`, the path to the folder containing the files in the
57
+ downloaded .tar
58
+ citation: `string`, citation for the data set
59
+ url: `string`, url for information about the data set
60
+ **kwargs: keyword arguments forwarded to super.
61
+ """
62
+ self.sub_version = sub_version
63
+ self.language = kwargs.pop("language", None)
64
+ self.date_of_snapshot = kwargs.pop("date", None)
65
+ self.size = kwargs.pop("size", None)
66
+ description = f"NLPGhana speech to text dataset in {self.language} version {self.sub_version} of {self.date_of_snapshot}. The dataset has a size of {self.size}"
67
+ super(NLPGhanaVoiceConfig, self).__init__(
68
+ name=name, version=datasets.Version("1.0.5", ""), description=description, **kwargs
69
+ )
70
+
71
+
72
+ class NLPGhanaVoice(datasets.GeneratorBasedBuilder):
73
+
74
+ BUILDER_CONFIGS = [
75
+ NLPGhanaVoiceConfig(
76
+ name=lang_id,
77
+ language=_LANGUAGES[lang_id]["Language"],
78
+ sub_version=_LANGUAGES[lang_id]["Version"],
79
+ date=_LANGUAGES[lang_id]["Date"],
80
+ size=_LANGUAGES[lang_id]["Size"],
81
+ )
82
+ for lang_id in _LANGUAGES.keys()
83
+ ]
84
+
85
+ def _info(self):
86
+ features = datasets.Features(
87
+ {
88
+ "user_id": datasets.Value("string"),
89
+ "path": datasets.Value("string"),
90
+ "text": datasets.Value("string"),
91
+ "durationMsec": datasets.Value("int64"),
92
+ "sampleRate": datasets.Value("int64"),
93
+ "speaker_gender": datasets.Value("string"),
94
+ "mother_tongue": datasets.Value("string"),
95
+ "date": datasets.Value("string"),
96
+ }
97
+ )
98
+
99
+ return datasets.DatasetInfo(
100
+ description=_DESCRIPTION,
101
+ features=features,
102
+ supervised_keys=None,
103
+ homepage=_HOMEPAGE,
104
+ license=_LICENSE,
105
+ citation=None,
106
+ )
107
+
108
+ def _split_generators(self, dl_manager):
109
+ """Returns SplitGenerators."""
110
+ dl_path = dl_manager.download_and_extract(_DATA_URL)
111
+ abs_path_to_data = os.path.join(dl_path, self.config.name)
112
+ abs_path_to_clips = os.path.join(abs_path_to_data, "clips")
113
+
114
+ return [
115
+ datasets.SplitGenerator(
116
+ name=datasets.Split.TRAIN,
117
+ gen_kwargs={
118
+ "filepath": os.path.join(abs_path_to_data, "train.tsv"),
119
+ "path_to_clips": abs_path_to_clips,
120
+ },
121
+ ),
122
+ datasets.SplitGenerator(
123
+ name=datasets.Split.TEST,
124
+ gen_kwargs={
125
+ "filepath": os.path.join(abs_path_to_data, "test.tsv"),
126
+ "path_to_clips": abs_path_to_clips,
127
+ },
128
+ ),
129
+ datasets.SplitGenerator(
130
+ name=datasets.Split.VALIDATION,
131
+ gen_kwargs={
132
+ "filepath": os.path.join(abs_path_to_data, "validation.tsv"),
133
+ "path_to_clips": abs_path_to_clips,
134
+ },
135
+ ),
136
+ ]
137
+
138
+ def _generate_examples(self, filepath, path_to_clips):
139
+ """ Yields examples. """
140
+ data_fields = list(self._info().features.keys())
141
+ path_idx = data_fields.index("path")
142
+
143
+ with open(filepath, encoding="utf-8") as f:
144
+ lines = f.readlines()
145
+ headline = lines[0]
146
+
147
+ column_names = headline.strip().split("\t")
148
+ assert (
149
+ column_names == data_fields
150
+ ), f"The file should have {data_fields} as column names, but has {column_names}"
151
+
152
+ for id_, line in enumerate(lines[1:]):
153
+ field_values = line.strip().split("\t")
154
+
155
+ # set absolute path for mp3 audio file
156
+ field_values[path_idx] = os.path.join(path_to_clips, field_values[path_idx])
157
+
158
+ # if data is incomplete, fill with empty values
159
+ if len(field_values) < len(data_fields):
160
+ field_values += (len(data_fields) - len(field_values)) * ["''"]
161
+
162
+ yield id_, {key: value for key, value in zip(data_fields, field_values)}