rename file
Browse files- imda-dataset_temp.py → imda-dataset-p1.py +112 -183
- imda-dataset.py +183 -112
imda-dataset_temp.py → imda-dataset-p1.py
RENAMED
@@ -1,97 +1,54 @@
|
|
1 |
import os
|
|
|
2 |
import datasets
|
3 |
-
|
4 |
from sklearn.model_selection import train_test_split
|
5 |
-
from textgrid import textgrid
|
6 |
-
import soundfile as sf
|
7 |
-
import re
|
8 |
-
import json
|
9 |
-
|
10 |
-
def cleanup_string(line):
|
11 |
-
|
12 |
-
words_to_remove = ['(ppo)','(ppc)', '(ppb)', '(ppl)', '<s/>','<c/>','<q/>', '<fil/>', '<sta/>', '<nps/>', '<spk/>', '<non/>', '<unk>', '<s>', '<z>', '<nen>']
|
13 |
-
|
14 |
-
formatted_line = re.sub(r'\s+', ' ', line).strip().lower()
|
15 |
-
|
16 |
-
#detect all word that matches words in the words_to_remove list
|
17 |
-
for word in words_to_remove:
|
18 |
-
if re.search(word,formatted_line):
|
19 |
-
# formatted_line = re.sub(word,'', formatted_line)
|
20 |
-
formatted_line = formatted_line.replace(word,'')
|
21 |
-
formatted_line = re.sub(r'\s+', ' ', formatted_line).strip().lower()
|
22 |
-
# print("*** removed words: " + formatted_line)
|
23 |
-
|
24 |
-
#detect '\[(.*?)\].' e.g. 'Okay [ah], why did I gamble?'
|
25 |
-
#remove [ ] and keep text within
|
26 |
-
if re.search('\[(.*?)\]', formatted_line):
|
27 |
-
formatted_line = re.sub('\[(.*?)\]', r'\1', formatted_line).strip()
|
28 |
-
#print("***: " + formatted_line)
|
29 |
-
|
30 |
-
#detect '\((.*?)\).' e.g. 'Okay (um), why did I gamble?'
|
31 |
-
#remove ( ) and keep text within
|
32 |
-
if re.search('\((.*?)\)', formatted_line):
|
33 |
-
formatted_line = re.sub('\((.*?)\)', r'\1', formatted_line).strip()
|
34 |
-
# print("***: " + formatted_line)
|
35 |
-
|
36 |
-
#detect '\'(.*?)\'' e.g. 'not 'hot' per se'
|
37 |
-
#remove ' ' and keep text within
|
38 |
-
if re.search('\'(.*?)\'', formatted_line):
|
39 |
-
formatted_line = re.sub('\'(.*?)\'', r'\1', formatted_line).strip()
|
40 |
-
#print("***: " + formatted_line)
|
41 |
-
|
42 |
-
#remove punctation '''!()-[]{};:'"\, <>./?@#$%^&*_~'''
|
43 |
-
punctuation = '''!–;"\,./?@#$%^&*~'''
|
44 |
-
punctuation_list = str.maketrans("","",punctuation)
|
45 |
-
formatted_line = re.sub(r'-', ' ', formatted_line)
|
46 |
-
formatted_line = re.sub(r'_', ' ', formatted_line)
|
47 |
-
formatted_line = formatted_line.translate(punctuation_list)
|
48 |
-
formatted_line = re.sub(r'\s+', ' ', formatted_line).strip().lower()
|
49 |
-
#print("***: " + formatted_line)
|
50 |
-
|
51 |
-
return formatted_line
|
52 |
-
|
53 |
-
|
54 |
|
55 |
_DESCRIPTION = """\
|
56 |
-
|
57 |
-
spearheaded by the Info-communications and Media Development Authority (IMDA) of Singapore.
|
58 |
"""
|
59 |
|
60 |
_CITATION = """\
|
61 |
"""
|
62 |
_CHANNEL_CONFIGS = sorted([
|
63 |
-
"
|
64 |
])
|
65 |
|
66 |
-
|
67 |
|
68 |
-
|
69 |
|
70 |
-
|
71 |
-
# _PATH_TO_DATA = './PART1/DATA'
|
72 |
|
73 |
-
|
|
|
|
|
|
|
74 |
|
75 |
class Minds14Config(datasets.BuilderConfig):
|
76 |
"""BuilderConfig for xtreme-s"""
|
77 |
|
78 |
def __init__(
|
79 |
-
self, channel, description, homepage, path_to_data
|
80 |
):
|
81 |
super(Minds14Config, self).__init__(
|
82 |
-
name=channel,
|
83 |
version=datasets.Version("1.0.0", ""),
|
84 |
description=self.description,
|
85 |
)
|
86 |
self.channel = channel
|
|
|
|
|
87 |
self.description = description
|
88 |
self.homepage = homepage
|
89 |
self.path_to_data = path_to_data
|
90 |
|
91 |
|
92 |
-
def _build_config(channel):
|
93 |
return Minds14Config(
|
94 |
channel=channel,
|
|
|
|
|
95 |
description=_DESCRIPTION,
|
96 |
homepage=_HOMEPAGE,
|
97 |
path_to_data=_PATH_TO_DATA,
|
@@ -116,21 +73,25 @@ class NewDataset(datasets.GeneratorBasedBuilder):
|
|
116 |
# data = datasets.load_dataset('my_dataset', 'second_domain')
|
117 |
BUILDER_CONFIGS = []
|
118 |
for channel in _CHANNEL_CONFIGS + ["all"]:
|
119 |
-
|
|
|
|
|
120 |
# BUILDER_CONFIGS = [_build_config(name) for name in _CHANNEL_CONFIGS + ["all"]]
|
121 |
|
122 |
-
DEFAULT_CONFIG_NAME = "
|
123 |
|
124 |
def _info(self):
|
125 |
# TODO: This method specifies the datasets.DatasetInfo object which contains informations and typings for the dataset
|
126 |
task_templates = None
|
|
|
127 |
features = datasets.Features(
|
128 |
{
|
129 |
"audio": datasets.features.Audio(sampling_rate=16000),
|
130 |
"transcript": datasets.Value("string"),
|
131 |
"mic": datasets.Value("string"),
|
132 |
"audio_name": datasets.Value("string"),
|
133 |
-
"
|
|
|
134 |
}
|
135 |
)
|
136 |
|
@@ -160,48 +121,31 @@ class NewDataset(datasets.GeneratorBasedBuilder):
|
|
160 |
else [self.config.channel]
|
161 |
)
|
162 |
|
163 |
-
|
164 |
-
|
165 |
-
|
166 |
-
|
167 |
-
|
168 |
-
|
169 |
-
|
170 |
-
|
171 |
-
|
172 |
-
|
173 |
-
|
174 |
-
|
175 |
-
|
176 |
-
|
177 |
-
|
178 |
-
|
179 |
-
|
180 |
-
|
181 |
-
|
182 |
-
|
183 |
-
|
184 |
-
|
185 |
-
|
186 |
-
|
187 |
-
|
188 |
-
train_audio_list.extend(audios)
|
189 |
-
for folder in test:
|
190 |
-
audios = [os.path.join(self.config.path_to_data, mic, x) for x in directory_dict[mic] if (x.split("\\")[0]==folder)]
|
191 |
-
test_audio_list.extend(audios)
|
192 |
-
elif mic == "Audio Separate StandingMic":
|
193 |
-
audio_list = [x[:14] for x in directory_dict[mic]]
|
194 |
-
audio_list = list(set(audio_list))
|
195 |
-
train, test = train_test_split(audio_list, test_size=0.3, random_state=42, shuffle=True)
|
196 |
-
for folder in train:
|
197 |
-
audios = [os.path.join(self.config.path_to_data, mic, x) for x in directory_dict[mic] if (x[:14]==folder)]
|
198 |
-
train_audio_list.extend(audios)
|
199 |
-
for folder in test:
|
200 |
-
audios = [os.path.join(self.config.path_to_data, mic, x) for x in directory_dict[mic] if (x[:14]==folder)]
|
201 |
-
test_audio_list.extend(audios)
|
202 |
-
|
203 |
-
print(f"train_audio_list: { train_audio_list}")
|
204 |
-
print(f"test_audio_list: { test_audio_list}")
|
205 |
|
206 |
# dl_manager is a datasets.download.DownloadManager that can be used to download and extract URLS
|
207 |
# It can accept any type or nested list/dict and will give back the same structure with the url replaced with path to local files.
|
@@ -210,15 +154,23 @@ class NewDataset(datasets.GeneratorBasedBuilder):
|
|
210 |
datasets.SplitGenerator(
|
211 |
name=datasets.Split.TRAIN,
|
212 |
gen_kwargs={
|
213 |
-
|
214 |
-
"
|
|
|
|
|
|
|
|
|
215 |
},
|
216 |
),
|
217 |
datasets.SplitGenerator(
|
218 |
name=datasets.Split.TEST,
|
219 |
gen_kwargs={
|
220 |
-
|
221 |
-
"
|
|
|
|
|
|
|
|
|
222 |
},
|
223 |
),
|
224 |
]
|
@@ -226,78 +178,55 @@ class NewDataset(datasets.GeneratorBasedBuilder):
|
|
226 |
# method parameters are unpacked from `gen_kwargs` as given in `_split_generators`
|
227 |
def _generate_examples(
|
228 |
self,
|
229 |
-
|
|
|
|
|
|
|
|
|
230 |
):
|
231 |
id_ = 0
|
232 |
-
for
|
233 |
-
|
234 |
-
|
235 |
-
|
236 |
-
|
237 |
-
|
238 |
-
|
239 |
-
|
240 |
-
|
241 |
-
|
242 |
-
|
243 |
-
|
244 |
-
|
245 |
-
|
246 |
-
|
247 |
-
|
248 |
-
|
249 |
-
|
250 |
-
|
251 |
-
|
252 |
-
|
253 |
-
|
254 |
-
|
255 |
-
|
256 |
-
|
257 |
-
|
258 |
-
|
259 |
-
|
260 |
-
|
261 |
-
|
262 |
-
|
263 |
-
|
264 |
-
|
265 |
-
|
266 |
-
|
267 |
-
|
268 |
-
|
269 |
-
|
270 |
-
|
271 |
-
|
272 |
-
|
273 |
-
|
274 |
-
|
275 |
-
|
276 |
-
|
277 |
-
result["transcript"] = transcript
|
278 |
-
result["interval"] = "start:"+str(tg[0][i].minTime)+", end:"+str(tg[0][i].maxTime)
|
279 |
-
result["audio"] = {"path": audio_path, "bytes": data[int(tg[0][i].minTime*sr):int(tg[0][i].maxTime*sr)], "sampling_rate":sr}
|
280 |
-
yield id_, result
|
281 |
-
id_+= 1
|
282 |
-
intervalLength = 0
|
283 |
-
else:
|
284 |
-
if (intervalLength + tg[0][i+1].maxTime-tg[0][i+1].minTime) < INTERVAL_MAX_LENGTH:
|
285 |
-
if len(transcript) != 0:
|
286 |
-
transcript_list.append(transcript)
|
287 |
-
i+=1
|
288 |
-
continue
|
289 |
-
if len(transcript) == 0:
|
290 |
-
spliced_audio = data[int(intervalStart*sr):int(tg[0][i].minTime*sr)]
|
291 |
-
else:
|
292 |
-
transcript_list.append(transcript)
|
293 |
-
spliced_audio = data[int(intervalStart*sr):int(tg[0][i].maxTime*sr)]
|
294 |
-
sf.write(filepath, spliced_audio, sr)
|
295 |
-
result["interval"] = "start:"+str(intervalStart)+", end:"+str(tg[0][i].maxTime)
|
296 |
-
result["audio"] = {"path": filepath, "bytes": spliced_audio, "sampling_rate":sr}
|
297 |
-
result["transcript"] = ' '.join(transcript_list)
|
298 |
-
yield id_, result
|
299 |
-
id_+= 1
|
300 |
-
intervalLength=0
|
301 |
-
intervalStart=tg[0][i].maxTime
|
302 |
-
transcript_list = []
|
303 |
-
i+=1
|
|
|
1 |
import os
|
2 |
+
import glob
|
3 |
import datasets
|
4 |
+
import pandas as pd
|
5 |
from sklearn.model_selection import train_test_split
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
6 |
|
7 |
_DESCRIPTION = """\
|
8 |
+
This new dataset is designed to solve this great NLP task and is crafted with a lot of care.
|
|
|
9 |
"""
|
10 |
|
11 |
_CITATION = """\
|
12 |
"""
|
13 |
_CHANNEL_CONFIGS = sorted([
|
14 |
+
"CHANNEL0", "CHANNEL1", "CHANNEL2"
|
15 |
])
|
16 |
|
17 |
+
_GENDER_CONFIGS = sorted(["F", "M"])
|
18 |
|
19 |
+
_RACE_CONFIGS = sorted(["CHINESE", "MALAY", "INDIAN", "OTHERS"])
|
20 |
|
21 |
+
_HOMEPAGE = "https://huggingface.co/indonesian-nlp/librivox-indonesia"
|
|
|
22 |
|
23 |
+
_LICENSE = "https://creativecommons.org/publicdomain/zero/1.0/"
|
24 |
+
|
25 |
+
_PATH_TO_DATA = './IMDA - National Speech Corpus/PART1'
|
26 |
+
# _PATH_TO_DATA = './PART1/DATA'
|
27 |
|
28 |
class Minds14Config(datasets.BuilderConfig):
|
29 |
"""BuilderConfig for xtreme-s"""
|
30 |
|
31 |
def __init__(
|
32 |
+
self, channel, gender, race, description, homepage, path_to_data
|
33 |
):
|
34 |
super(Minds14Config, self).__init__(
|
35 |
+
name=channel+gender+race,
|
36 |
version=datasets.Version("1.0.0", ""),
|
37 |
description=self.description,
|
38 |
)
|
39 |
self.channel = channel
|
40 |
+
self.gender = gender
|
41 |
+
self.race = race
|
42 |
self.description = description
|
43 |
self.homepage = homepage
|
44 |
self.path_to_data = path_to_data
|
45 |
|
46 |
|
47 |
+
def _build_config(channel, gender, race):
|
48 |
return Minds14Config(
|
49 |
channel=channel,
|
50 |
+
gender=gender,
|
51 |
+
race=race,
|
52 |
description=_DESCRIPTION,
|
53 |
homepage=_HOMEPAGE,
|
54 |
path_to_data=_PATH_TO_DATA,
|
|
|
73 |
# data = datasets.load_dataset('my_dataset', 'second_domain')
|
74 |
BUILDER_CONFIGS = []
|
75 |
for channel in _CHANNEL_CONFIGS + ["all"]:
|
76 |
+
for gender in _GENDER_CONFIGS + ["all"]:
|
77 |
+
for race in _RACE_CONFIGS + ["all"]:
|
78 |
+
BUILDER_CONFIGS.append(_build_config(channel, gender, race))
|
79 |
# BUILDER_CONFIGS = [_build_config(name) for name in _CHANNEL_CONFIGS + ["all"]]
|
80 |
|
81 |
+
DEFAULT_CONFIG_NAME = "allallall" # It's not mandatory to have a default configuration. Just use one if it make sense.
|
82 |
|
83 |
def _info(self):
|
84 |
# TODO: This method specifies the datasets.DatasetInfo object which contains informations and typings for the dataset
|
85 |
task_templates = None
|
86 |
+
# mics = _CHANNEL_CONFIGS
|
87 |
features = datasets.Features(
|
88 |
{
|
89 |
"audio": datasets.features.Audio(sampling_rate=16000),
|
90 |
"transcript": datasets.Value("string"),
|
91 |
"mic": datasets.Value("string"),
|
92 |
"audio_name": datasets.Value("string"),
|
93 |
+
"gender": datasets.Value("string"),
|
94 |
+
"race": datasets.Value("string"),
|
95 |
}
|
96 |
)
|
97 |
|
|
|
121 |
else [self.config.channel]
|
122 |
)
|
123 |
|
124 |
+
gender = (
|
125 |
+
_GENDER_CONFIGS
|
126 |
+
if self.config.gender == "all"
|
127 |
+
else [self.config.gender]
|
128 |
+
)
|
129 |
+
|
130 |
+
race = (
|
131 |
+
_RACE_CONFIGS
|
132 |
+
if self.config.race == "all"
|
133 |
+
else [self.config.race]
|
134 |
+
)
|
135 |
+
|
136 |
+
# augment speaker ids directly here
|
137 |
+
# read the speaker information
|
138 |
+
train_speaker_ids = []
|
139 |
+
test_speaker_ids = []
|
140 |
+
# path_to_speaker = os.path.join(self.config.path_to_data, "DOC", "Speaker Information (Part 1).XLSX")
|
141 |
+
path_to_speaker = dl_manager.download(os.path.join(self.config.path_to_data, "DOC", "Speaker Information (Part 1).XLSX"))
|
142 |
+
speaker_df = pd.read_excel(path_to_speaker, dtype={'SCD/PART1': object})
|
143 |
+
for g in gender:
|
144 |
+
for r in race:
|
145 |
+
X = speaker_df[(speaker_df["ACC"]==r) & (speaker_df["SEX"]==g)]
|
146 |
+
X_train, X_test = train_test_split(X, test_size=0.3, random_state=42, shuffle=True)
|
147 |
+
train_speaker_ids.extend(X_train["SCD/PART1"])
|
148 |
+
test_speaker_ids.extend(X_test["SCD/PART1"])
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
149 |
|
150 |
# dl_manager is a datasets.download.DownloadManager that can be used to download and extract URLS
|
151 |
# It can accept any type or nested list/dict and will give back the same structure with the url replaced with path to local files.
|
|
|
154 |
datasets.SplitGenerator(
|
155 |
name=datasets.Split.TRAIN,
|
156 |
gen_kwargs={
|
157 |
+
"path_to_data": self.config.path_to_data,
|
158 |
+
"speaker_metadata":speaker_df,
|
159 |
+
# "speaker_ids": train_speaker_ids,
|
160 |
+
"speaker_ids":["0001"],
|
161 |
+
"mics": mics,
|
162 |
+
"dl_manager": dl_manager
|
163 |
},
|
164 |
),
|
165 |
datasets.SplitGenerator(
|
166 |
name=datasets.Split.TEST,
|
167 |
gen_kwargs={
|
168 |
+
"path_to_data": self.config.path_to_data,
|
169 |
+
"speaker_metadata":speaker_df,
|
170 |
+
# "speaker_ids": test_speaker_ids,
|
171 |
+
"speaker_ids": ["0003"],
|
172 |
+
"mics": mics,
|
173 |
+
"dl_manager": dl_manager
|
174 |
},
|
175 |
),
|
176 |
]
|
|
|
178 |
# method parameters are unpacked from `gen_kwargs` as given in `_split_generators`
|
179 |
def _generate_examples(
|
180 |
self,
|
181 |
+
path_to_data,
|
182 |
+
speaker_metadata,
|
183 |
+
speaker_ids,
|
184 |
+
mics,
|
185 |
+
dl_manager
|
186 |
):
|
187 |
id_ = 0
|
188 |
+
for mic in mics:
|
189 |
+
for speaker in speaker_ids:
|
190 |
+
# TRANSCRIPT: in the case of error, if no file found then dictionary will b empty
|
191 |
+
d = {}
|
192 |
+
counter = 0
|
193 |
+
while counter < 10:
|
194 |
+
data = dl_manager.download(os.path.join(path_to_data, "DATA", mic, "SCRIPT", mic[-1]+speaker+str(counter)+'.TXT'))
|
195 |
+
try:
|
196 |
+
line_num = 0
|
197 |
+
with open(data, encoding='utf-8-sig') as f:
|
198 |
+
for line in f:
|
199 |
+
if line_num == 0:
|
200 |
+
key = line.split("\t")[0]
|
201 |
+
line_num += 1
|
202 |
+
elif line_num == 1:
|
203 |
+
d[key] = line.strip()
|
204 |
+
line_num -= 1
|
205 |
+
except:
|
206 |
+
print(f"{counter}")
|
207 |
+
break
|
208 |
+
counter+=1
|
209 |
+
# AUDIO: in the case of error it will skip the speaker
|
210 |
+
# archive_path = os.path.join(path_to_data, "DATA", mic, "WAVE", "SPEAKER"+speaker+'.zip')
|
211 |
+
archive_path = dl_manager.download(os.path.join(path_to_data, "DATA", mic, "WAVE", "SPEAKER"+speaker+'.zip'))
|
212 |
+
# check that archive path exists, else will not open the archive
|
213 |
+
if os.path.exists(archive_path):
|
214 |
+
audio_files = dl_manager.iter_archive(archive_path)
|
215 |
+
for path, f in audio_files:
|
216 |
+
# bug catching if any error?
|
217 |
+
result = {}
|
218 |
+
full_path = os.path.join(archive_path, path) if archive_path else path # bug catching here
|
219 |
+
result["audio"] = {"path": full_path, "bytes": f.read()}
|
220 |
+
result["audio_name"] = path
|
221 |
+
result["mic"] = mic
|
222 |
+
metadata_row = speaker_metadata.loc[speaker_metadata["SCD/PART1"]==speaker].iloc[0]
|
223 |
+
result["gender"]=metadata_row["SEX"]
|
224 |
+
result["race"]=metadata_row["ACC"]
|
225 |
+
try:
|
226 |
+
result["transcript"] = d[f.name[-13:-4]]
|
227 |
+
yield id_, result
|
228 |
+
id_ += 1
|
229 |
+
except:
|
230 |
+
print(f"unable to find transcript")
|
231 |
+
|
232 |
+
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
imda-dataset.py
CHANGED
@@ -1,54 +1,97 @@
|
|
1 |
import os
|
2 |
-
import glob
|
3 |
import datasets
|
4 |
-
import pandas as pd
|
5 |
from sklearn.model_selection import train_test_split
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
6 |
|
7 |
_DESCRIPTION = """\
|
8 |
-
|
|
|
9 |
"""
|
10 |
|
11 |
_CITATION = """\
|
12 |
"""
|
13 |
_CHANNEL_CONFIGS = sorted([
|
14 |
-
"
|
15 |
])
|
16 |
|
17 |
-
|
18 |
-
|
19 |
-
_RACE_CONFIGS = sorted(["CHINESE", "MALAY", "INDIAN", "OTHERS"])
|
20 |
|
21 |
-
|
22 |
|
23 |
-
|
24 |
-
|
25 |
-
_PATH_TO_DATA = './IMDA - National Speech Corpus/PART1'
|
26 |
# _PATH_TO_DATA = './PART1/DATA'
|
27 |
|
|
|
|
|
28 |
class Minds14Config(datasets.BuilderConfig):
|
29 |
"""BuilderConfig for xtreme-s"""
|
30 |
|
31 |
def __init__(
|
32 |
-
self, channel,
|
33 |
):
|
34 |
super(Minds14Config, self).__init__(
|
35 |
-
name=channel
|
36 |
version=datasets.Version("1.0.0", ""),
|
37 |
description=self.description,
|
38 |
)
|
39 |
self.channel = channel
|
40 |
-
self.gender = gender
|
41 |
-
self.race = race
|
42 |
self.description = description
|
43 |
self.homepage = homepage
|
44 |
self.path_to_data = path_to_data
|
45 |
|
46 |
|
47 |
-
def _build_config(channel
|
48 |
return Minds14Config(
|
49 |
channel=channel,
|
50 |
-
gender=gender,
|
51 |
-
race=race,
|
52 |
description=_DESCRIPTION,
|
53 |
homepage=_HOMEPAGE,
|
54 |
path_to_data=_PATH_TO_DATA,
|
@@ -73,25 +116,21 @@ class NewDataset(datasets.GeneratorBasedBuilder):
|
|
73 |
# data = datasets.load_dataset('my_dataset', 'second_domain')
|
74 |
BUILDER_CONFIGS = []
|
75 |
for channel in _CHANNEL_CONFIGS + ["all"]:
|
76 |
-
|
77 |
-
for race in _RACE_CONFIGS + ["all"]:
|
78 |
-
BUILDER_CONFIGS.append(_build_config(channel, gender, race))
|
79 |
# BUILDER_CONFIGS = [_build_config(name) for name in _CHANNEL_CONFIGS + ["all"]]
|
80 |
|
81 |
-
DEFAULT_CONFIG_NAME = "
|
82 |
|
83 |
def _info(self):
|
84 |
# TODO: This method specifies the datasets.DatasetInfo object which contains informations and typings for the dataset
|
85 |
task_templates = None
|
86 |
-
# mics = _CHANNEL_CONFIGS
|
87 |
features = datasets.Features(
|
88 |
{
|
89 |
"audio": datasets.features.Audio(sampling_rate=16000),
|
90 |
"transcript": datasets.Value("string"),
|
91 |
"mic": datasets.Value("string"),
|
92 |
"audio_name": datasets.Value("string"),
|
93 |
-
"
|
94 |
-
"race": datasets.Value("string"),
|
95 |
}
|
96 |
)
|
97 |
|
@@ -121,31 +160,48 @@ class NewDataset(datasets.GeneratorBasedBuilder):
|
|
121 |
else [self.config.channel]
|
122 |
)
|
123 |
|
124 |
-
|
125 |
-
|
126 |
-
|
127 |
-
|
128 |
-
|
129 |
-
|
130 |
-
|
131 |
-
|
132 |
-
|
133 |
-
|
134 |
-
|
135 |
-
|
136 |
-
|
137 |
-
|
138 |
-
|
139 |
-
|
140 |
-
|
141 |
-
|
142 |
-
|
143 |
-
|
144 |
-
|
145 |
-
|
146 |
-
|
147 |
-
|
148 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
149 |
|
150 |
# dl_manager is a datasets.download.DownloadManager that can be used to download and extract URLS
|
151 |
# It can accept any type or nested list/dict and will give back the same structure with the url replaced with path to local files.
|
@@ -154,23 +210,15 @@ class NewDataset(datasets.GeneratorBasedBuilder):
|
|
154 |
datasets.SplitGenerator(
|
155 |
name=datasets.Split.TRAIN,
|
156 |
gen_kwargs={
|
157 |
-
"path_to_data": self.config.path_to_data,
|
158 |
-
"
|
159 |
-
# "speaker_ids": train_speaker_ids,
|
160 |
-
"speaker_ids":["0001"],
|
161 |
-
"mics": mics,
|
162 |
-
"dl_manager": dl_manager
|
163 |
},
|
164 |
),
|
165 |
datasets.SplitGenerator(
|
166 |
name=datasets.Split.TEST,
|
167 |
gen_kwargs={
|
168 |
-
"path_to_data": self.config.path_to_data,
|
169 |
-
"
|
170 |
-
# "speaker_ids": test_speaker_ids,
|
171 |
-
"speaker_ids": ["0003"],
|
172 |
-
"mics": mics,
|
173 |
-
"dl_manager": dl_manager
|
174 |
},
|
175 |
),
|
176 |
]
|
@@ -178,55 +226,78 @@ class NewDataset(datasets.GeneratorBasedBuilder):
|
|
178 |
# method parameters are unpacked from `gen_kwargs` as given in `_split_generators`
|
179 |
def _generate_examples(
|
180 |
self,
|
181 |
-
|
182 |
-
speaker_metadata,
|
183 |
-
speaker_ids,
|
184 |
-
mics,
|
185 |
-
dl_manager
|
186 |
):
|
187 |
id_ = 0
|
188 |
-
for
|
189 |
-
|
190 |
-
|
191 |
-
|
192 |
-
|
193 |
-
|
194 |
-
|
195 |
-
|
196 |
-
|
197 |
-
|
198 |
-
|
199 |
-
|
200 |
-
|
201 |
-
|
202 |
-
|
203 |
-
|
204 |
-
|
205 |
-
|
206 |
-
|
207 |
-
|
208 |
-
|
209 |
-
|
210 |
-
|
211 |
-
|
212 |
-
|
213 |
-
|
214 |
-
|
215 |
-
|
216 |
-
|
217 |
-
|
218 |
-
|
219 |
-
|
220 |
-
|
221 |
-
|
222 |
-
|
223 |
-
|
224 |
-
|
225 |
-
|
226 |
-
|
227 |
-
|
228 |
-
|
229 |
-
|
230 |
-
|
231 |
-
|
232 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
import os
|
|
|
2 |
import datasets
|
3 |
+
# import pandas as pd
|
4 |
from sklearn.model_selection import train_test_split
|
5 |
+
from textgrid import textgrid
|
6 |
+
import soundfile as sf
|
7 |
+
import re
|
8 |
+
import json
|
9 |
+
|
10 |
+
def cleanup_string(line):
|
11 |
+
|
12 |
+
words_to_remove = ['(ppo)','(ppc)', '(ppb)', '(ppl)', '<s/>','<c/>','<q/>', '<fil/>', '<sta/>', '<nps/>', '<spk/>', '<non/>', '<unk>', '<s>', '<z>', '<nen>']
|
13 |
+
|
14 |
+
formatted_line = re.sub(r'\s+', ' ', line).strip().lower()
|
15 |
+
|
16 |
+
#detect all word that matches words in the words_to_remove list
|
17 |
+
for word in words_to_remove:
|
18 |
+
if re.search(word,formatted_line):
|
19 |
+
# formatted_line = re.sub(word,'', formatted_line)
|
20 |
+
formatted_line = formatted_line.replace(word,'')
|
21 |
+
formatted_line = re.sub(r'\s+', ' ', formatted_line).strip().lower()
|
22 |
+
# print("*** removed words: " + formatted_line)
|
23 |
+
|
24 |
+
#detect '\[(.*?)\].' e.g. 'Okay [ah], why did I gamble?'
|
25 |
+
#remove [ ] and keep text within
|
26 |
+
if re.search('\[(.*?)\]', formatted_line):
|
27 |
+
formatted_line = re.sub('\[(.*?)\]', r'\1', formatted_line).strip()
|
28 |
+
#print("***: " + formatted_line)
|
29 |
+
|
30 |
+
#detect '\((.*?)\).' e.g. 'Okay (um), why did I gamble?'
|
31 |
+
#remove ( ) and keep text within
|
32 |
+
if re.search('\((.*?)\)', formatted_line):
|
33 |
+
formatted_line = re.sub('\((.*?)\)', r'\1', formatted_line).strip()
|
34 |
+
# print("***: " + formatted_line)
|
35 |
+
|
36 |
+
#detect '\'(.*?)\'' e.g. 'not 'hot' per se'
|
37 |
+
#remove ' ' and keep text within
|
38 |
+
if re.search('\'(.*?)\'', formatted_line):
|
39 |
+
formatted_line = re.sub('\'(.*?)\'', r'\1', formatted_line).strip()
|
40 |
+
#print("***: " + formatted_line)
|
41 |
+
|
42 |
+
#remove punctation '''!()-[]{};:'"\, <>./?@#$%^&*_~'''
|
43 |
+
punctuation = '''!–;"\,./?@#$%^&*~'''
|
44 |
+
punctuation_list = str.maketrans("","",punctuation)
|
45 |
+
formatted_line = re.sub(r'-', ' ', formatted_line)
|
46 |
+
formatted_line = re.sub(r'_', ' ', formatted_line)
|
47 |
+
formatted_line = formatted_line.translate(punctuation_list)
|
48 |
+
formatted_line = re.sub(r'\s+', ' ', formatted_line).strip().lower()
|
49 |
+
#print("***: " + formatted_line)
|
50 |
+
|
51 |
+
return formatted_line
|
52 |
+
|
53 |
+
|
54 |
|
55 |
_DESCRIPTION = """\
|
56 |
+
The National Speech Corpus (NSC) is the first large-scale Singapore English corpus
|
57 |
+
spearheaded by the Info-communications and Media Development Authority (IMDA) of Singapore.
|
58 |
"""
|
59 |
|
60 |
_CITATION = """\
|
61 |
"""
|
62 |
_CHANNEL_CONFIGS = sorted([
|
63 |
+
"Audio Same CloseMic", "Audio Separate IVR", "Audio Separate StandingMic"
|
64 |
])
|
65 |
|
66 |
+
_HOMEPAGE = "https://www.imda.gov.sg/how-we-can-help/national-speech-corpus"
|
|
|
|
|
67 |
|
68 |
+
_LICENSE = ""
|
69 |
|
70 |
+
_PATH_TO_DATA = './IMDA - National Speech Corpus/PART3'
|
|
|
|
|
71 |
# _PATH_TO_DATA = './PART1/DATA'
|
72 |
|
73 |
+
INTERVAL_MAX_LENGTH = 25
|
74 |
+
|
75 |
class Minds14Config(datasets.BuilderConfig):
|
76 |
"""BuilderConfig for xtreme-s"""
|
77 |
|
78 |
def __init__(
|
79 |
+
self, channel, description, homepage, path_to_data
|
80 |
):
|
81 |
super(Minds14Config, self).__init__(
|
82 |
+
name=channel,
|
83 |
version=datasets.Version("1.0.0", ""),
|
84 |
description=self.description,
|
85 |
)
|
86 |
self.channel = channel
|
|
|
|
|
87 |
self.description = description
|
88 |
self.homepage = homepage
|
89 |
self.path_to_data = path_to_data
|
90 |
|
91 |
|
92 |
+
def _build_config(channel):
|
93 |
return Minds14Config(
|
94 |
channel=channel,
|
|
|
|
|
95 |
description=_DESCRIPTION,
|
96 |
homepage=_HOMEPAGE,
|
97 |
path_to_data=_PATH_TO_DATA,
|
|
|
116 |
# data = datasets.load_dataset('my_dataset', 'second_domain')
|
117 |
BUILDER_CONFIGS = []
|
118 |
for channel in _CHANNEL_CONFIGS + ["all"]:
|
119 |
+
BUILDER_CONFIGS.append(_build_config(channel))
|
|
|
|
|
120 |
# BUILDER_CONFIGS = [_build_config(name) for name in _CHANNEL_CONFIGS + ["all"]]
|
121 |
|
122 |
+
DEFAULT_CONFIG_NAME = "all" # It's not mandatory to have a default configuration. Just use one if it make sense.
|
123 |
|
124 |
def _info(self):
|
125 |
# TODO: This method specifies the datasets.DatasetInfo object which contains informations and typings for the dataset
|
126 |
task_templates = None
|
|
|
127 |
features = datasets.Features(
|
128 |
{
|
129 |
"audio": datasets.features.Audio(sampling_rate=16000),
|
130 |
"transcript": datasets.Value("string"),
|
131 |
"mic": datasets.Value("string"),
|
132 |
"audio_name": datasets.Value("string"),
|
133 |
+
"interval": datasets.Value("string")
|
|
|
134 |
}
|
135 |
)
|
136 |
|
|
|
160 |
else [self.config.channel]
|
161 |
)
|
162 |
|
163 |
+
with (os.path.join(self.config.path_to_data, "directory_list.json"), "r") as f:
|
164 |
+
directory_dict = json.load(f)
|
165 |
+
|
166 |
+
train_audio_list = []
|
167 |
+
test_audio_list = []
|
168 |
+
for mic in mics:
|
169 |
+
audio_list = []
|
170 |
+
if mic == "Audio Same CloseMic":
|
171 |
+
audio_list = [x for x in directory_dict[mic] if (x[-5] == 1) ]
|
172 |
+
train, test = train_test_split(audio_list, test_size=0.3, random_state=42, shuffle=True)
|
173 |
+
for path in train:
|
174 |
+
train_audio_list.append(os.path.join(self.config.path_to_data, mic, path))
|
175 |
+
s = list(path)
|
176 |
+
s[-5] = "2"
|
177 |
+
train_audio_list.append(os.path.join(self.config.path_to_data, mic, "".join(s)))
|
178 |
+
for path in test:
|
179 |
+
test_audio_list.append(os.path.join(self.config.path_to_data, mic, path))
|
180 |
+
s = list(path)
|
181 |
+
s[-5] = "2"
|
182 |
+
test_audio_list.append(os.path.join(self.config.path_to_data, mic, "".join(s)))
|
183 |
+
elif mic == "Audio Separate IVR":
|
184 |
+
audio_list = [x.split("\\")[0] for x in directory_dict[mic]]
|
185 |
+
train, test = train_test_split(audio_list, test_size=0.3, random_state=42, shuffle=True)
|
186 |
+
for folder in train:
|
187 |
+
audios = [os.path.join(self.config.path_to_data, mic, x) for x in directory_dict[mic] if (x.split("\\")[0]==folder)]
|
188 |
+
train_audio_list.extend(audios)
|
189 |
+
for folder in test:
|
190 |
+
audios = [os.path.join(self.config.path_to_data, mic, x) for x in directory_dict[mic] if (x.split("\\")[0]==folder)]
|
191 |
+
test_audio_list.extend(audios)
|
192 |
+
elif mic == "Audio Separate StandingMic":
|
193 |
+
audio_list = [x[:14] for x in directory_dict[mic]]
|
194 |
+
audio_list = list(set(audio_list))
|
195 |
+
train, test = train_test_split(audio_list, test_size=0.3, random_state=42, shuffle=True)
|
196 |
+
for folder in train:
|
197 |
+
audios = [os.path.join(self.config.path_to_data, mic, x) for x in directory_dict[mic] if (x[:14]==folder)]
|
198 |
+
train_audio_list.extend(audios)
|
199 |
+
for folder in test:
|
200 |
+
audios = [os.path.join(self.config.path_to_data, mic, x) for x in directory_dict[mic] if (x[:14]==folder)]
|
201 |
+
test_audio_list.extend(audios)
|
202 |
+
|
203 |
+
print(f"train_audio_list: { train_audio_list}")
|
204 |
+
print(f"test_audio_list: { test_audio_list}")
|
205 |
|
206 |
# dl_manager is a datasets.download.DownloadManager that can be used to download and extract URLS
|
207 |
# It can accept any type or nested list/dict and will give back the same structure with the url replaced with path to local files.
|
|
|
210 |
datasets.SplitGenerator(
|
211 |
name=datasets.Split.TRAIN,
|
212 |
gen_kwargs={
|
213 |
+
# "path_to_data": os.path.join(self.config.path_to_data, "Audio Same CloseMic"),
|
214 |
+
"audio_list": train_audio_list,
|
|
|
|
|
|
|
|
|
215 |
},
|
216 |
),
|
217 |
datasets.SplitGenerator(
|
218 |
name=datasets.Split.TEST,
|
219 |
gen_kwargs={
|
220 |
+
# "path_to_data": os.path.join(self.config.path_to_data, "Audio Same CloseMic"),
|
221 |
+
"audio_list": test_audio_list,
|
|
|
|
|
|
|
|
|
222 |
},
|
223 |
),
|
224 |
]
|
|
|
226 |
# method parameters are unpacked from `gen_kwargs` as given in `_split_generators`
|
227 |
def _generate_examples(
|
228 |
self,
|
229 |
+
audio_list,
|
|
|
|
|
|
|
|
|
230 |
):
|
231 |
id_ = 0
|
232 |
+
for audio_path in audio_list:
|
233 |
+
file = os.path.split(audio_path)[-1]
|
234 |
+
folder = os.path.split(os.path.split(audio_path)[0])[-1]
|
235 |
+
|
236 |
+
# get script_path
|
237 |
+
if folder.split("_")[0] == "conf":
|
238 |
+
# mic == "Audio Separate IVR"
|
239 |
+
script_path = os.path.join(self.config.path_to_data, "Scripts Separate", folder+"_"+file[:-4]+".TextGrid")
|
240 |
+
elif folder.split()[1] == "Same":
|
241 |
+
# mic == "Audio Same CloseMic IVR"
|
242 |
+
script_path = os.path.join(self.config.path_to_data, "Scripts Same", file[:-4]+".TextGrid")
|
243 |
+
elif folder.split()[1] == "Separate":
|
244 |
+
# mic == "Audio Separate StandingMic":
|
245 |
+
script_path = os.path.join(self.config.path_to_data, "Scripts Separate", file[:-4]+".TextGrid")
|
246 |
+
|
247 |
+
|
248 |
+
# LOAD TRANSCRIPT
|
249 |
+
# script_path = os.path.join(self.config.path_to_data, 'Scripts Same', '3000-1.TextGrid')
|
250 |
+
# check that the textgrid file can be read
|
251 |
+
try:
|
252 |
+
tg = textgrid.TextGrid.fromFile(script_path)
|
253 |
+
except:
|
254 |
+
print(f"error reading textgrid file")
|
255 |
+
continue
|
256 |
+
# LOAD AUDIO
|
257 |
+
# archive_path = os.path.join(path_to_data, '3000-1.wav')
|
258 |
+
# check that archive path exists, else will not open the archive
|
259 |
+
if os.path.exists(audio_path):
|
260 |
+
# read into a numpy array using soundfile
|
261 |
+
data, sr = sf.read(audio_path)
|
262 |
+
result = {}
|
263 |
+
i = 0
|
264 |
+
intervalLength = 0
|
265 |
+
intervalStart = 0
|
266 |
+
transcript_list = []
|
267 |
+
filepath = os.path.join(self.config.path_to_data, 'tmp_clip.wav')
|
268 |
+
while i < (len(tg[0])-1):
|
269 |
+
transcript = cleanup_string(tg[0][i].mark)
|
270 |
+
if intervalLength == 0 and len(transcript) == 0:
|
271 |
+
intervalStart = tg[0][i].maxTime
|
272 |
+
i+=1
|
273 |
+
continue
|
274 |
+
intervalLength += tg[0][i].maxTime-tg[0][i].minTime
|
275 |
+
if intervalLength > INTERVAL_MAX_LENGTH:
|
276 |
+
print(f"INTERVAL LONGER THAN {intervalLength}")
|
277 |
+
result["transcript"] = transcript
|
278 |
+
result["interval"] = "start:"+str(tg[0][i].minTime)+", end:"+str(tg[0][i].maxTime)
|
279 |
+
result["audio"] = {"path": audio_path, "bytes": data[int(tg[0][i].minTime*sr):int(tg[0][i].maxTime*sr)], "sampling_rate":sr}
|
280 |
+
yield id_, result
|
281 |
+
id_+= 1
|
282 |
+
intervalLength = 0
|
283 |
+
else:
|
284 |
+
if (intervalLength + tg[0][i+1].maxTime-tg[0][i+1].minTime) < INTERVAL_MAX_LENGTH:
|
285 |
+
if len(transcript) != 0:
|
286 |
+
transcript_list.append(transcript)
|
287 |
+
i+=1
|
288 |
+
continue
|
289 |
+
if len(transcript) == 0:
|
290 |
+
spliced_audio = data[int(intervalStart*sr):int(tg[0][i].minTime*sr)]
|
291 |
+
else:
|
292 |
+
transcript_list.append(transcript)
|
293 |
+
spliced_audio = data[int(intervalStart*sr):int(tg[0][i].maxTime*sr)]
|
294 |
+
sf.write(filepath, spliced_audio, sr)
|
295 |
+
result["interval"] = "start:"+str(intervalStart)+", end:"+str(tg[0][i].maxTime)
|
296 |
+
result["audio"] = {"path": filepath, "bytes": spliced_audio, "sampling_rate":sr}
|
297 |
+
result["transcript"] = ' '.join(transcript_list)
|
298 |
+
yield id_, result
|
299 |
+
id_+= 1
|
300 |
+
intervalLength=0
|
301 |
+
intervalStart=tg[0][i].maxTime
|
302 |
+
transcript_list = []
|
303 |
+
i+=1
|