andreagasparini commited on
Commit
cf6d53c
1 Parent(s): ba97954

Create new file

Browse files
Files changed (1) hide show
  1. librispeech_test_only.py +288 -0
librispeech_test_only.py ADDED
@@ -0,0 +1,288 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2021 The TensorFlow Datasets Authors and the HuggingFace Datasets Authors.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+
16
+ # Lint as: python3
17
+ """Librispeech automatic speech recognition dataset."""
18
+
19
+
20
+ import os
21
+
22
+ import datasets
23
+ from datasets.tasks import AutomaticSpeechRecognition
24
+
25
+
26
+ _CITATION = """\
27
+ @inproceedings{panayotov2015librispeech,
28
+ title={Librispeech: an ASR corpus based on public domain audio books},
29
+ author={Panayotov, Vassil and Chen, Guoguo and Povey, Daniel and Khudanpur, Sanjeev},
30
+ booktitle={Acoustics, Speech and Signal Processing (ICASSP), 2015 IEEE International Conference on},
31
+ pages={5206--5210},
32
+ year={2015},
33
+ organization={IEEE}
34
+ }
35
+ """
36
+
37
+ _DESCRIPTION = """\
38
+ LibriSpeech is a corpus of approximately 1000 hours of read English speech with sampling rate of 16 kHz,
39
+ prepared by Vassil Panayotov with the assistance of Daniel Povey. The data is derived from read
40
+ audiobooks from the LibriVox project, and has been carefully segmented and aligned.87
41
+ """
42
+
43
+ _URL = "http://www.openslr.org/12"
44
+ _DL_URL = "http://www.openslr.org/resources/12/"
45
+
46
+
47
+ _DL_URLS = {
48
+ "clean": {
49
+ "dev": _DL_URL + "dev-clean.tar.gz",
50
+ "test": _DL_URL + "test-clean.tar.gz",
51
+ "train.100": _DL_URL + "train-clean-100.tar.gz",
52
+ "train.360": _DL_URL + "train-clean-360.tar.gz",
53
+ },
54
+ "other": {
55
+ "test": _DL_URL + "test-other.tar.gz",
56
+ "dev": _DL_URL + "dev-other.tar.gz",
57
+ "train.500": _DL_URL + "train-other-500.tar.gz",
58
+ },
59
+ "all": {
60
+ "dev.clean": _DL_URL + "dev-clean.tar.gz",
61
+ "dev.other": _DL_URL + "dev-other.tar.gz",
62
+ "test.clean": _DL_URL + "test-clean.tar.gz",
63
+ "test.other": _DL_URL + "test-other.tar.gz",
64
+ "train.clean.100": _DL_URL + "train-clean-100.tar.gz",
65
+ "train.clean.360": _DL_URL + "train-clean-360.tar.gz",
66
+ "train.other.500": _DL_URL + "train-other-500.tar.gz",
67
+ },
68
+ }
69
+
70
+
71
+ class LibrispeechASRConfig(datasets.BuilderConfig):
72
+ """BuilderConfig for LibriSpeechASR."""
73
+
74
+ def __init__(self, **kwargs):
75
+ """
76
+ Args:
77
+ data_dir: `string`, the path to the folder containing the files in the
78
+ downloaded .tar
79
+ citation: `string`, citation for the data set
80
+ url: `string`, url for information about the data set
81
+ **kwargs: keyword arguments forwarded to super.
82
+ """
83
+ super(LibrispeechASRConfig, self).__init__(version=datasets.Version("2.1.0", ""), **kwargs)
84
+
85
+
86
+ class LibrispeechASR(datasets.GeneratorBasedBuilder):
87
+ """Librispeech dataset."""
88
+
89
+ DEFAULT_WRITER_BATCH_SIZE = 256
90
+ DEFAULT_CONFIG_NAME = "all"
91
+ BUILDER_CONFIGS = [
92
+ LibrispeechASRConfig(name="clean", description="'Clean' speech."),
93
+ LibrispeechASRConfig(name="other", description="'Other', more challenging, speech."),
94
+ LibrispeechASRConfig(name="all", description="Combined clean and other dataset."),
95
+ ]
96
+
97
+ def _info(self):
98
+ return datasets.DatasetInfo(
99
+ description=_DESCRIPTION,
100
+ features=datasets.Features(
101
+ {
102
+ "file": datasets.Value("string"),
103
+ "audio": datasets.Audio(sampling_rate=16_000),
104
+ "text": datasets.Value("string"),
105
+ "speaker_id": datasets.Value("int64"),
106
+ "chapter_id": datasets.Value("int64"),
107
+ "id": datasets.Value("string"),
108
+ }
109
+ ),
110
+ supervised_keys=("file", "text"),
111
+ homepage=_URL,
112
+ citation=_CITATION,
113
+ task_templates=[AutomaticSpeechRecognition(audio_column="audio", transcription_column="text")],
114
+ )
115
+
116
+ def _split_generators(self, dl_manager):
117
+ archive_path = dl_manager.download(_DL_URLS[self.config.name])
118
+ # (Optional) In non-streaming mode, we can extract the archive locally to have actual local audio files:
119
+ local_extracted_archive = dl_manager.extract(archive_path) if not dl_manager.is_streaming else {}
120
+
121
+ train_splits = list()
122
+ dev_splits = list()
123
+
124
+ if self.config.name == "clean":
125
+ """
126
+ train_splits = [
127
+ datasets.SplitGenerator(
128
+ name="train.100",
129
+ gen_kwargs={
130
+ "local_extracted_archive": local_extracted_archive.get("train.100"),
131
+ "files": dl_manager.iter_archive(archive_path["train.100"]),
132
+ },
133
+ ),
134
+ datasets.SplitGenerator(
135
+ name="train.360",
136
+ gen_kwargs={
137
+ "local_extracted_archive": local_extracted_archive.get("train.360"),
138
+ "files": dl_manager.iter_archive(archive_path["train.360"]),
139
+ },
140
+ ),
141
+ ]
142
+ dev_splits = [
143
+ datasets.SplitGenerator(
144
+ name=datasets.Split.VALIDATION,
145
+ gen_kwargs={
146
+ "local_extracted_archive": local_extracted_archive.get("dev"),
147
+ "files": dl_manager.iter_archive(archive_path["dev"]),
148
+ },
149
+ )
150
+ ]
151
+ """
152
+ test_splits = [
153
+ datasets.SplitGenerator(
154
+ name=datasets.Split.TEST,
155
+ gen_kwargs={
156
+ "local_extracted_archive": local_extracted_archive.get("test"),
157
+ "files": dl_manager.iter_archive(archive_path["test"]),
158
+ },
159
+ )
160
+ ]
161
+ elif self.config.name == "other":
162
+ """
163
+ train_splits = [
164
+ datasets.SplitGenerator(
165
+ name="train.500",
166
+ gen_kwargs={
167
+ "local_extracted_archive": local_extracted_archive.get("train.500"),
168
+ "files": dl_manager.iter_archive(archive_path["train.500"]),
169
+ },
170
+ )
171
+ ]
172
+ dev_splits = [
173
+ datasets.SplitGenerator(
174
+ name=datasets.Split.VALIDATION,
175
+ gen_kwargs={
176
+ "local_extracted_archive": local_extracted_archive.get("dev"),
177
+ "files": dl_manager.iter_archive(archive_path["dev"]),
178
+ },
179
+ )
180
+ ]
181
+ """
182
+ test_splits = [
183
+ datasets.SplitGenerator(
184
+ name=datasets.Split.TEST,
185
+ gen_kwargs={
186
+ "local_extracted_archive": local_extracted_archive.get("test"),
187
+ "files": dl_manager.iter_archive(archive_path["test"]),
188
+ },
189
+ )
190
+ ]
191
+ elif self.config.name == "all":
192
+ """
193
+ train_splits = [
194
+ datasets.SplitGenerator(
195
+ name="train.clean.100",
196
+ gen_kwargs={
197
+ "local_extracted_archive": local_extracted_archive.get("train.clean.100"),
198
+ "files": dl_manager.iter_archive(archive_path["train.clean.100"]),
199
+ },
200
+ ),
201
+ datasets.SplitGenerator(
202
+ name="train.clean.360",
203
+ gen_kwargs={
204
+ "local_extracted_archive": local_extracted_archive.get("train.clean.360"),
205
+ "files": dl_manager.iter_archive(archive_path["train.clean.360"]),
206
+ },
207
+ ),
208
+ datasets.SplitGenerator(
209
+ name="train.other.500",
210
+ gen_kwargs={
211
+ "local_extracted_archive": local_extracted_archive.get("train.other.500"),
212
+ "files": dl_manager.iter_archive(archive_path["train.other.500"]),
213
+ },
214
+ ),
215
+ ]
216
+ dev_splits = [
217
+ datasets.SplitGenerator(
218
+ name="validation.clean",
219
+ gen_kwargs={
220
+ "local_extracted_archive": local_extracted_archive.get("validation.clean"),
221
+ "files": dl_manager.iter_archive(archive_path["dev.clean"]),
222
+ },
223
+ ),
224
+ datasets.SplitGenerator(
225
+ name="validation.other",
226
+ gen_kwargs={
227
+ "local_extracted_archive": local_extracted_archive.get("validation.other"),
228
+ "files": dl_manager.iter_archive(archive_path["dev.other"]),
229
+ },
230
+ ),
231
+ ]
232
+ """
233
+ test_splits = [
234
+ datasets.SplitGenerator(
235
+ name="test.clean",
236
+ gen_kwargs={
237
+ "local_extracted_archive": local_extracted_archive.get("test.clean"),
238
+ "files": dl_manager.iter_archive(archive_path["test.clean"]),
239
+ },
240
+ ),
241
+ datasets.SplitGenerator(
242
+ name="test.other",
243
+ gen_kwargs={
244
+ "local_extracted_archive": local_extracted_archive.get("test.other"),
245
+ "files": dl_manager.iter_archive(archive_path["test.other"]),
246
+ },
247
+ ),
248
+ ]
249
+
250
+ return train_splits + dev_splits + test_splits
251
+
252
+ def _generate_examples(self, files, local_extracted_archive):
253
+ """Generate examples from a LibriSpeech archive_path."""
254
+ key = 0
255
+ audio_data = {}
256
+ transcripts = []
257
+ for path, f in files:
258
+ if path.endswith(".flac"):
259
+ id_ = path.split("/")[-1][: -len(".flac")]
260
+ audio_data[id_] = f.read()
261
+ elif path.endswith(".trans.txt"):
262
+ for line in f:
263
+ if line:
264
+ line = line.decode("utf-8").strip()
265
+ id_, transcript = line.split(" ", 1)
266
+ audio_file = f"{id_}.flac"
267
+ speaker_id, chapter_id = [int(el) for el in id_.split("-")[:2]]
268
+ audio_file = (
269
+ os.path.join(local_extracted_archive, audio_file)
270
+ if local_extracted_archive
271
+ else audio_file
272
+ )
273
+ transcripts.append(
274
+ {
275
+ "id": id_,
276
+ "speaker_id": speaker_id,
277
+ "chapter_id": chapter_id,
278
+ "file": audio_file,
279
+ "text": transcript,
280
+ }
281
+ )
282
+ if audio_data and len(audio_data) == len(transcripts):
283
+ for transcript in transcripts:
284
+ audio = {"path": transcript["file"], "bytes": audio_data[transcript["id"]]}
285
+ yield key, {"audio": audio, **transcript}
286
+ key += 1
287
+ audio_data = {}
288
+ transcripts = []