yoom618 commited on
Commit
fc2554c
1 Parent(s): 3245442

Create librispeech_pc.py

Browse files
Files changed (1) hide show
  1. librispeech_pc.py +349 -0
librispeech_pc.py ADDED
@@ -0,0 +1,349 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2021 The TensorFlow Datasets Authors and the HuggingFace Datasets Authors.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+
16
+ # Lint as: python3
17
+ """LibriSpeech-PC dataset module refered from LibriSpeech dataset module."""
18
+
19
+
20
+ import os
21
+
22
+ import datasets
23
+
24
+ import json
25
+
26
+
27
+ _CITATION = {
28
+ "librispeech":
29
+ """\
30
+ @inproceedings{panayotov2015librispeech,
31
+ title={Librispeech: an ASR corpus based on public domain audio books},
32
+ author={Panayotov, Vassil and Chen, Guoguo and Povey, Daniel and Khudanpur, Sanjeev},
33
+ booktitle={Acoustics, Speech and Signal Processing (ICASSP), 2015 IEEE International Conference on},
34
+ pages={5206--5210},
35
+ year={2015},
36
+ organization={IEEE}
37
+ }""",
38
+ "librispeech_pc":
39
+ """\
40
+ @article{meister2023librispeechpc,
41
+ title={LibriSpeech-PC: Benchmark for Evaluation of Punctuation and Capitalization Capabilities of end-to-end ASR Models},
42
+ author={A. Meister and M. Novikov and N. Karpov and E. Bakhturina and V. Lavrukhin and B. Ginsburg},
43
+ journal={arXiv preprint arXiv:2310.02943},
44
+ year={2023},
45
+ }
46
+ """
47
+ }
48
+
49
+ _DESCRIPTION = """\
50
+ Merge Librispeech audio files with punctuation and captalization restored transcripts from LibriSpeech-PC.
51
+ I refered to the original LibriSpeech dataset module script from HuggingFace Datasets (https://huggingface.co/datasets/openslr/librispeech_asr).
52
+ If you already have downloaded the LibriSpeech dataset via `load_dataset('openslr/librispeech_asr')`, the script will use the extracted audio files from the local directory and not download them twice. (only tested in my local environment though)
53
+ """
54
+
55
+ _URL = "http://www.openslr.org/12"
56
+ _DL_URL = "http://www.openslr.org/resources/12/"
57
+
58
+ _URL_PC = "https://www.openslr.org/145"
59
+ _DL_URL_PC = "https://www.openslr.org/resources/145/"
60
+
61
+
62
+ _DL_URLS = {
63
+ "clean": {
64
+ "dev": _DL_URL + "dev-clean.tar.gz",
65
+ "test": _DL_URL + "test-clean.tar.gz",
66
+ "train.100": _DL_URL + "train-clean-100.tar.gz",
67
+ "train.360": _DL_URL + "train-clean-360.tar.gz",
68
+ "transcript_pc": _DL_URL_PC + "manifests.tar.gz",
69
+ },
70
+ "other": {
71
+ "test": _DL_URL + "test-other.tar.gz",
72
+ "dev": _DL_URL + "dev-other.tar.gz",
73
+ "train.500": _DL_URL + "train-other-500.tar.gz",
74
+ "transcript_pc": _DL_URL_PC + "manifests.tar.gz",
75
+ },
76
+ "all": {
77
+ "dev.clean": _DL_URL + "dev-clean.tar.gz",
78
+ "dev.other": _DL_URL + "dev-other.tar.gz",
79
+ "test.clean": _DL_URL + "test-clean.tar.gz",
80
+ "test.other": _DL_URL + "test-other.tar.gz",
81
+ "train.clean.100": _DL_URL + "train-clean-100.tar.gz",
82
+ "train.clean.360": _DL_URL + "train-clean-360.tar.gz",
83
+ "train.other.500": _DL_URL + "train-other-500.tar.gz",
84
+ "transcript_pc": _DL_URL_PC + "manifests.tar.gz",
85
+ },
86
+ }
87
+
88
+
89
+ class LibrispeechASRConfig(datasets.BuilderConfig):
90
+ """BuilderConfig for LibriSpeechASR."""
91
+
92
+ def __init__(self, **kwargs):
93
+ """
94
+ Args:
95
+ data_dir: `string`, the path to the folder containing the files in the
96
+ downloaded .tar
97
+ citation: `string`, citation for the data set
98
+ url: `string`, url for information about the data set
99
+ **kwargs: keyword arguments forwarded to super.
100
+ """
101
+ super(LibrispeechASRConfig, self).__init__(version=datasets.Version("2.1.0", ""), **kwargs)
102
+
103
+
104
+ class LibrispeechASR(datasets.GeneratorBasedBuilder):
105
+ """Librispeech dataset."""
106
+
107
+ DEFAULT_WRITER_BATCH_SIZE = 256
108
+ DEFAULT_CONFIG_NAME = "all"
109
+ BUILDER_CONFIGS = [
110
+ LibrispeechASRConfig(name="clean", description="'Clean' speech."),
111
+ LibrispeechASRConfig(name="other", description="'Other', more challenging, speech."),
112
+ LibrispeechASRConfig(name="all", description="Combined clean and other dataset."),
113
+ ]
114
+
115
+ def _info(self):
116
+ return datasets.DatasetInfo(
117
+ description=_DESCRIPTION,
118
+ features=datasets.Features(
119
+ {
120
+ "file": datasets.Value("string"),
121
+ "audio": datasets.Audio(sampling_rate=16_000),
122
+ "text": datasets.Value("string"),
123
+ "text_raw": datasets.Value("string"),
124
+ "text_normalized": datasets.Value("string"),
125
+ "speaker_id": datasets.Value("int64"),
126
+ "chapter_id": datasets.Value("int64"),
127
+ "id": datasets.Value("string"),
128
+ "duration": datasets.Value("float"),
129
+ }
130
+ ),
131
+ supervised_keys=("file", "text"),
132
+ homepage=_URL,
133
+ citation=_CITATION,
134
+ )
135
+
136
+ def _split_generators(self, dl_manager):
137
+ archive_path = dl_manager.download(_DL_URLS[self.config.name])
138
+ # (Optional) In non-streaming mode, we can extract the archive locally to have actual local audio files:
139
+ local_extracted_archive = dl_manager.extract(archive_path) if not dl_manager.is_streaming else {}
140
+
141
+ # print(local_extracted_archive)
142
+ # print(list(dl_manager.iter_archive(archive_path["transcript_pc"])))
143
+ transcript_pc_dir = local_extracted_archive.get("transcript_pc")
144
+
145
+ if self.config.name == "clean":
146
+ train_splits = [
147
+ datasets.SplitGenerator(
148
+ name="train.100",
149
+ gen_kwargs={
150
+ "local_extracted_archive": local_extracted_archive.get("train.100"),
151
+ "files": dl_manager.iter_archive(archive_path["train.100"]),
152
+ "transcript_pc_fname": os.path.join(transcript_pc_dir, "train-clean-100.json"),
153
+ },
154
+ ),
155
+ datasets.SplitGenerator(
156
+ name="train.360",
157
+ gen_kwargs={
158
+ "local_extracted_archive": local_extracted_archive.get("train.360"),
159
+ "files": dl_manager.iter_archive(archive_path["train.360"]),
160
+ "transcript_pc_fname": os.path.join(transcript_pc_dir, "train-clean-360.json"),
161
+ },
162
+ ),
163
+ ]
164
+ dev_splits = [
165
+ datasets.SplitGenerator(
166
+ name=datasets.Split.VALIDATION,
167
+ gen_kwargs={
168
+ "local_extracted_archive": local_extracted_archive.get("dev"),
169
+ "files": dl_manager.iter_archive(archive_path["dev"]),
170
+ "transcript_pc_fname": os.path.join(transcript_pc_dir, "dev-clean.json"),
171
+ },
172
+ )
173
+ ]
174
+ test_splits = [
175
+ datasets.SplitGenerator(
176
+ name=datasets.Split.TEST,
177
+ gen_kwargs={
178
+ "local_extracted_archive": local_extracted_archive.get("test"),
179
+ "files": dl_manager.iter_archive(archive_path["test"]),
180
+ "transcript_pc_fname": os.path.join(transcript_pc_dir, "test-clean.json"),
181
+ },
182
+ )
183
+ ]
184
+ elif self.config.name == "other":
185
+ train_splits = [
186
+ datasets.SplitGenerator(
187
+ name="train.500",
188
+ gen_kwargs={
189
+ "local_extracted_archive": local_extracted_archive.get("train.500"),
190
+ "files": dl_manager.iter_archive(archive_path["train.500"]),
191
+ "transcript_pc_fname": os.path.join(transcript_pc_dir, "train-other-500.json"),
192
+ },
193
+ )
194
+ ]
195
+ dev_splits = [
196
+ datasets.SplitGenerator(
197
+ name=datasets.Split.VALIDATION,
198
+ gen_kwargs={
199
+ "local_extracted_archive": local_extracted_archive.get("dev"),
200
+ "files": dl_manager.iter_archive(archive_path["dev"]),
201
+ "transcript_pc_fname": os.path.join(transcript_pc_dir, "dev-other.json"),
202
+ },
203
+ )
204
+ ]
205
+ test_splits = [
206
+ datasets.SplitGenerator(
207
+ name=datasets.Split.TEST,
208
+ gen_kwargs={
209
+ "local_extracted_archive": local_extracted_archive.get("test"),
210
+ "files": dl_manager.iter_archive(archive_path["test"]),
211
+ "transcript_pc_fname": os.path.join(transcript_pc_dir, "test-other.json"),
212
+ },
213
+ )
214
+ ]
215
+ elif self.config.name == "all":
216
+ train_splits = [
217
+ datasets.SplitGenerator(
218
+ name="train.clean.100",
219
+ gen_kwargs={
220
+ "local_extracted_archive": local_extracted_archive.get("train.clean.100"),
221
+ "files": dl_manager.iter_archive(archive_path["train.clean.100"]),
222
+ "transcript_pc_fname": os.path.join(transcript_pc_dir, "train-clean-100.json"),
223
+ },
224
+ ),
225
+ datasets.SplitGenerator(
226
+ name="train.clean.360",
227
+ gen_kwargs={
228
+ "local_extracted_archive": local_extracted_archive.get("train.clean.360"),
229
+ "files": dl_manager.iter_archive(archive_path["train.clean.360"]),
230
+ "transcript_pc_fname": os.path.join(transcript_pc_dir, "train-clean-360.json"),
231
+ },
232
+ ),
233
+ datasets.SplitGenerator(
234
+ name="train.other.500",
235
+ gen_kwargs={
236
+ "local_extracted_archive": local_extracted_archive.get("train.other.500"),
237
+ "files": dl_manager.iter_archive(archive_path["train.other.500"]),
238
+ "transcript_pc_fname": os.path.join(transcript_pc_dir, "train-other-500.json"),
239
+ },
240
+ ),
241
+ ]
242
+ dev_splits = [
243
+ datasets.SplitGenerator(
244
+ name="validation.clean",
245
+ gen_kwargs={
246
+ "local_extracted_archive": local_extracted_archive.get("dev.clean"),
247
+ "files": dl_manager.iter_archive(archive_path["dev.clean"]),
248
+ "transcript_pc_fname": os.path.join(transcript_pc_dir, "dev-clean.json"),
249
+ },
250
+ ),
251
+ datasets.SplitGenerator(
252
+ name="validation.other",
253
+ gen_kwargs={
254
+ "local_extracted_archive": local_extracted_archive.get("dev.other"),
255
+ "files": dl_manager.iter_archive(archive_path["dev.other"]),
256
+ "transcript_pc_fname": os.path.join(transcript_pc_dir, "dev-other.json"),
257
+ },
258
+ ),
259
+ ]
260
+ test_splits = [
261
+ datasets.SplitGenerator(
262
+ name="test.clean",
263
+ gen_kwargs={
264
+ "local_extracted_archive": local_extracted_archive.get("test.clean"),
265
+ "files": dl_manager.iter_archive(archive_path["test.clean"]),
266
+ "transcript_pc_fname": os.path.join(transcript_pc_dir, "test-clean.json"),
267
+ },
268
+ ),
269
+ datasets.SplitGenerator(
270
+ name="test.other",
271
+ gen_kwargs={
272
+ "local_extracted_archive": local_extracted_archive.get("test.other"),
273
+ "files": dl_manager.iter_archive(archive_path["test.other"]),
274
+ "transcript_pc_fname": os.path.join(transcript_pc_dir, "test-other.json"),
275
+ },
276
+ ),
277
+ ]
278
+
279
+ return train_splits + dev_splits + test_splits
280
+
281
+
282
+ def _generate_examples(self, files, local_extracted_archive, transcript_pc_fname): # original
283
+ """Generate examples from a LibriSpeech archive_path."""
284
+ key, unseen = 0, 0
285
+ audio_data = {}
286
+ transcripts = []
287
+
288
+ # Load transcripts from LibriSpeech-PC
289
+ transcripts_pc = dict()
290
+ with open(transcript_pc_fname, mode='r') as f:
291
+ data = (f.read().splitlines())
292
+ data = [json.loads(d) for d in data]
293
+ for d in data:
294
+ _id = d['audio_filepath'].split("/")[-1][: -len(".flac")]
295
+ del d['audio_filepath']
296
+ transcripts_pc.update(
297
+ {_id: d} # keys in d : duration, text, text_raw
298
+ )
299
+
300
+ os.makedirs("./unexisting_transcripts_id", exist_ok=True)
301
+ try:
302
+ os.remove(f"./unexisting_transcripts_id/{os.path.basename(transcript_pc_fname)[:-5]}.txt")
303
+ except FileNotFoundError:
304
+ pass
305
+
306
+ for path, f in files:
307
+ if path.endswith(".flac"):
308
+ id_ = path.split("/")[-1][: -len(".flac")]
309
+ audio_data[id_] = f.read()
310
+ elif path.endswith(".trans.txt"):
311
+ for line in f:
312
+ if line:
313
+ line = line.decode("utf-8").strip()
314
+ id_, transcript = line.split(" ", 1)
315
+ audio_file = f"{id_}.flac"
316
+ speaker_id, chapter_id = [int(el) for el in id_.split("-")[:2]]
317
+ audio_file = (
318
+ os.path.join(local_extracted_archive, audio_file)
319
+ if local_extracted_archive
320
+ else audio_file
321
+ )
322
+ transcripts.append(
323
+ {
324
+ "id": id_,
325
+ "speaker_id": speaker_id,
326
+ "chapter_id": chapter_id,
327
+ "file": audio_file,
328
+ "text_normalized": transcript,
329
+ }
330
+ )
331
+
332
+ if audio_data and len(audio_data) == len(transcripts):
333
+ for transcript in transcripts:
334
+ audio = {"path": transcript["file"], "bytes": audio_data[transcript["id"]]}
335
+ transcript_pc = transcripts_pc.pop(transcript["id"], {})
336
+ if transcript_pc:
337
+ yield key, {"audio": audio, **transcript, **transcript_pc}
338
+ key += 1
339
+ else:
340
+ with open(f"./unexisting_transcripts_id/{os.path.basename(transcript_pc_fname)[:-5]}.txt", mode='a') as log:
341
+ log.write(f"{transcript['id']}\n")
342
+ unseen += 1
343
+ audio_data = {}
344
+ transcripts = []
345
+
346
+ print(f"{unseen} transcripts are dropped in LibriSpeech-PC dataset {os.path.basename(transcript_pc_fname)[:-5]} compared to LibriSpeech dataset.")
347
+
348
+
349
+