sanchit-gandhi commited on
Commit
c6eaa32
·
1 Parent(s): 447e7ad

Create new file

Browse files
Files changed (1) hide show
  1. librispeech_asr_clean.py +161 -0
librispeech_asr_clean.py ADDED
@@ -0,0 +1,161 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2021 The TensorFlow Datasets Authors and the HuggingFace Datasets Authors.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+
16
+ # Lint as: python3
17
+ """Librispeech automatic speech recognition dataset."""
18
+
19
+ import os
20
+
21
+ import datasets
22
+ from datasets.tasks import AutomaticSpeechRecognition
23
+
24
+ _CITATION = """\
25
+ @inproceedings{panayotov2015librispeech,
26
+ title={Librispeech: an ASR corpus based on public domain audio books},
27
+ author={Panayotov, Vassil and Chen, Guoguo and Povey, Daniel and Khudanpur, Sanjeev},
28
+ booktitle={Acoustics, Speech and Signal Processing (ICASSP), 2015 IEEE International Conference on},
29
+ pages={5206--5210},
30
+ year={2015},
31
+ organization={IEEE}
32
+ }
33
+ """
34
+
35
+ _DESCRIPTION = """\
36
+ LibriSpeech is a corpus of approximately 1000 hours of read English speech with sampling rate of 16 kHz,
37
+ prepared by Vassil Panayotov with the assistance of Daniel Povey. The data is derived from read
38
+ audiobooks from the LibriVox project, and has been carefully segmented and aligned.87
39
+ """
40
+
41
+ _URL = "http://www.openslr.org/12"
42
+ _DL_URL = "http://www.openslr.org/resources/12/"
43
+
44
+ _DL_URLS = {"dev": _DL_URL + "dev-clean.tar.gz",
45
+ "test": _DL_URL + "test-clean.tar.gz",
46
+ "train.100": _DL_URL + "train-clean-100.tar.gz",
47
+ }
48
+
49
+
50
+ class LibrispeechASRConfig(datasets.BuilderConfig):
51
+ """BuilderConfig for LibriSpeechASR."""
52
+
53
+ def __init__(self, **kwargs):
54
+ """
55
+ Args:
56
+ data_dir: `string`, the path to the folder containing the files in the
57
+ downloaded .tar
58
+ citation: `string`, citation for the data set
59
+ url: `string`, url for information about the data set
60
+ **kwargs: keyword arguments forwarded to super.
61
+ """
62
+ super(LibrispeechASRConfig, self).__init__(version=datasets.Version("2.1.0", ""), **kwargs)
63
+
64
+
65
+ class LibrispeechASR(datasets.GeneratorBasedBuilder):
66
+ """Librispeech dataset."""
67
+
68
+ DEFAULT_WRITER_BATCH_SIZE = 256
69
+ DEFAULT_CONFIG_NAME = "all"
70
+ BUILDER_CONFIG = LibrispeechASRConfig(name="clean", description="'Clean' speech.")
71
+
72
+ def _info(self):
73
+ return datasets.DatasetInfo(
74
+ description=_DESCRIPTION,
75
+ features=datasets.Features(
76
+ {
77
+ "file": datasets.Value("string"),
78
+ "audio": datasets.Audio(sampling_rate=16_000),
79
+ "text": datasets.Value("string"),
80
+ "speaker_id": datasets.Value("int64"),
81
+ "chapter_id": datasets.Value("int64"),
82
+ "id": datasets.Value("string"),
83
+ }
84
+ ),
85
+ supervised_keys=("file", "text"),
86
+ homepage=_URL,
87
+ citation=_CITATION,
88
+ task_templates=[AutomaticSpeechRecognition(audio_column="audio", transcription_column="text")],
89
+ )
90
+
91
+ def _split_generators(self, dl_manager):
92
+ archive_path = dl_manager.download(_DL_URLS)
93
+ # (Optional) In non-streaming mode, we can extract the archive locally to have actual local audio files:
94
+ local_extracted_archive = dl_manager.extract(archive_path) if not dl_manager.is_streaming else {}
95
+
96
+ train_splits = [
97
+ datasets.SplitGenerator(
98
+ name="train.100",
99
+ gen_kwargs={
100
+ "local_extracted_archive": local_extracted_archive.get("train.100"),
101
+ "files": dl_manager.iter_archive(archive_path["train.100"]),
102
+ },
103
+ ),
104
+ ]
105
+ dev_splits = [
106
+ datasets.SplitGenerator(
107
+ name=datasets.Split.VALIDATION,
108
+ gen_kwargs={
109
+ "local_extracted_archive": local_extracted_archive.get("dev"),
110
+ "files": dl_manager.iter_archive(archive_path["dev"]),
111
+ },
112
+ )
113
+ ]
114
+ test_splits = [
115
+ datasets.SplitGenerator(
116
+ name=datasets.Split.TEST,
117
+ gen_kwargs={
118
+ "local_extracted_archive": local_extracted_archive.get("test"),
119
+ "files": dl_manager.iter_archive(archive_path["test"]),
120
+ },
121
+ )
122
+ ]
123
+ return train_splits + dev_splits + test_splits
124
+
125
+ def _generate_examples(self, files, local_extracted_archive):
126
+ """Generate examples from a LibriSpeech archive_path."""
127
+ key = 0
128
+ audio_data = {}
129
+ transcripts = []
130
+ for path, f in files:
131
+ if path.endswith(".flac"):
132
+ id_ = path.split("/")[-1][: -len(".flac")]
133
+ audio_data[id_] = f.read()
134
+ elif path.endswith(".trans.txt"):
135
+ for line in f:
136
+ if line:
137
+ line = line.decode("utf-8").strip()
138
+ id_, transcript = line.split(" ", 1)
139
+ audio_file = f"{id_}.flac"
140
+ speaker_id, chapter_id = [int(el) for el in id_.split("-")[:2]]
141
+ audio_file = (
142
+ os.path.join(local_extracted_archive, audio_file)
143
+ if local_extracted_archive
144
+ else audio_file
145
+ )
146
+ transcripts.append(
147
+ {
148
+ "id": id_,
149
+ "speaker_id": speaker_id,
150
+ "chapter_id": chapter_id,
151
+ "file": audio_file,
152
+ "text": transcript,
153
+ }
154
+ )
155
+ if audio_data and len(audio_data) == len(transcripts):
156
+ for transcript in transcripts:
157
+ audio = {"path": transcript["file"], "bytes": audio_data[transcript["id"]]}
158
+ yield key, {"audio": audio, **transcript}
159
+ key += 1
160
+ audio_data = {}
161
+ transcripts = []