andybi7676 commited on
Commit
e7dd21d
·
1 Parent(s): 545d0de

add dev-clean and generate script

Browse files
data/dev-clean.tar.gz ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:a8b068dd92aaceeec4483e972e485d7577ea4d2b993601073a83869ccea88918
3
+ size 304721145
reborn_uasr-librispeech_no_silence_100h.py CHANGED
@@ -0,0 +1,288 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2022 The TensorFlow Datasets Authors and the HuggingFace Datasets Authors.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+
16
+ # Lint as: python3
17
+ """
18
+ Librispeech automatic speech recognition dataset for reproducing Reborn UASR results.
19
+ Note that the silence in each audio has been removed by performing unsupervised VAD (https://github.com/zhenghuatan/rVADfast).
20
+ We only process the 100-hour split from LibriSpeech 'train-clean-100' as the training split.
21
+ """
22
+
23
+ import os
24
+
25
+ import datasets
26
+
27
+
28
+ _CITATION = """\
29
+ @inproceedings{panayotov2015librispeech,
30
+ title={Librispeech: an ASR corpus based on public domain audio books},
31
+ author={Panayotov, Vassil and Chen, Guoguo and Povey, Daniel and Khudanpur, Sanjeev},
32
+ booktitle={Acoustics, Speech and Signal Processing (ICASSP), 2015 IEEE International Conference on},
33
+ pages={5206--5210},
34
+ year={2015},
35
+ organization={IEEE}
36
+ }
37
+ @article{tan2020rvad,
38
+ title={rVAD: An unsupervised segment-based robust voice activity detection method},
39
+ author={Tan, Zheng-Hua and Dehak, Najim and others},
40
+ journal={Computer speech \& language},
41
+ volume={59},
42
+ pages={1--21},
43
+ year={2020},
44
+ publisher={Elsevier}
45
+ }
46
+ @article{tseng2024reborn,
47
+ title={REBORN: Reinforcement-Learned Boundary Segmentation with Iterative Training for Unsupervised ASR},
48
+ author={Tseng, Liang-Hsuan and Hu, En-Pei and Chiang, Cheng-Han and Tseng, Yuan and Lee, Hung-yi and Lee, Lin-shan and Sun, Shao-Hua},
49
+ journal={arXiv preprint arXiv:2402.03988},
50
+ year={2024}
51
+ }
52
+ """
53
+
54
+ _DESCRIPTION = """\
55
+ LibriSpeech is a corpus of approximately 1000 hours of read English speech with sampling rate of 16 kHz,
56
+ prepared by Vassil Panayotov with the assistance of Daniel Povey. The data is derived from read
57
+ audiobooks from the LibriVox project, and has been carefully segmented and aligned
58
+
59
+ This dataset is the 100-hour subset of LibriSpeech 'train-clean-100' split, with silence removed.
60
+ Additionally, all the dev and test sets are included for fair comparison and evaluation if needed.
61
+ The dataset is prepared by the Reborn UASR team.
62
+ Arxiv paper link: https://arxiv.org/abs/2402.03988
63
+ """
64
+
65
+ _URL = "http://www.openslr.org/12"
66
+
67
+ _DL_URL_FORMAT = "data"
68
+
69
+
70
+ class RebornLibrispeechConfig(datasets.BuilderConfig):
71
+ """BuilderConfig for Reborn-Librispeech."""
72
+
73
+ def __init__(self, name, **kwargs):
74
+ """
75
+ Args:
76
+ name: `string`, name of dataset config (=language)
77
+ **kwargs: keyword arguments forwarded to super.
78
+ """
79
+ super(RebornLibrispeechConfig, self).__init__(
80
+ version=datasets.Version("2.12.0", ""), name=name, **kwargs
81
+ )
82
+ # relative path to full data inside a repo (for example `data/train-clean-100`)
83
+ self.data_root_url = _DL_URL_FORMAT
84
+
85
+
86
+ class RebornLibrispeech(datasets.GeneratorBasedBuilder):
87
+ """Multilingual Librispeech dataset."""
88
+
89
+ BUILDER_CONFIGS = [
90
+ RebornLibrispeechConfig(name="reborn_ls100hr", description="train-clean-100 LibriSpeech dataset without silence"),
91
+ ]
92
+
93
+ def _info(self):
94
+ return datasets.DatasetInfo(
95
+ description=_DESCRIPTION,
96
+ features=datasets.Features(
97
+ {
98
+ "file": datasets.Value("string"),
99
+ "audio": datasets.features.Audio(sampling_rate=16_000),
100
+ "word": datasets.Value("string"),
101
+ "phoneme": datasets.Value("string"),
102
+ "speaker_id": datasets.Value("int64"),
103
+ "chapter_id": datasets.Value("int64"),
104
+ "id": datasets.Value("string"),
105
+ }
106
+ ),
107
+ supervised_keys=("file", "phone"),
108
+ homepage=_URL,
109
+ citation=_CITATION,
110
+ task_templates=None,
111
+ )
112
+
113
+ def _split_generators(self, dl_manager):
114
+
115
+ metadata = dl_manager.download({
116
+ "train-clean-100": self.config.data_root_url + "/metadata/train-clean-100.tsv",
117
+ "dev-clean": self.config.data_root_url + "/metadata/dev-clean.tsv",
118
+ "dev-clean-small": self.config.data_root_url + "/metadata/dev-clean-small.tsv",
119
+ "dev-other": self.config.data_root_url + "/metadata/dev-other.tsv",
120
+ "test-clean": self.config.data_root_url + "/metadata/test-clean.tsv",
121
+ "test-other": self.config.data_root_url + "/metadata/test-other.tsv",
122
+ })
123
+
124
+ all_splits = [
125
+ "train-clean-100",
126
+ "dev-clean",
127
+ "dev-other",
128
+ "test-clean",
129
+ "test-other",
130
+ ]
131
+
132
+ # # Download handles.txt files containing ids for limited supervision train sets
133
+ # limited_supervision_9h = dl_manager.download(
134
+ # [self.config.data_root_url + "/train/limited_supervision/9hr/handles.txt"],
135
+ # )
136
+ # # in our case of 1 hour limited supervision ("train.1h") there are always 6 subfolders like:
137
+ # # "limited_supervision/1h/0/handles.txt", "limited_supervision/1h/1/handles.txt", ...
138
+ # limited_supervision_1h = dl_manager.download([
139
+ # self.config.data_root_url + f"/train/limited_supervision/1hr/{i}/handles.txt" for i in range(6)
140
+ # ])
141
+
142
+ # each split contains many .tar.gz archives with its audio files
143
+ # audio_filenames.txt contains the names of these archives
144
+ # audio_filenames_paths = dl_manager.download({
145
+ # "train": self.config.data_root_url + "/train/audio_filenames.txt",
146
+ # "dev": self.config.data_root_url + "/dev/audio_filenames.txt",
147
+ # "test": self.config.data_root_url + "/test/audio_filenames.txt",
148
+ # })
149
+
150
+ audio_archives = {}
151
+ for split in all_splits:
152
+ audio_archives[split] = dl_manager.download(
153
+ os.path.join(self.config.data_root_url, f"{split}.tar.gz")
154
+ )
155
+
156
+ # (Optional) In non-streaming mode, we can extract the archive locally to have actual local audio files:
157
+ local_extracted_archives = dl_manager.extract(audio_archives) if not dl_manager.is_streaming else {}
158
+
159
+ train_splits = [
160
+ datasets.SplitGenerator(
161
+ name=datasets.Split.TRAIN,
162
+ gen_kwargs={
163
+ "metadata_fpaths": [metadata["train-clean-100"]],
164
+ "audio_archives": [dl_manager.iter_archive(audio_archives["train"])],
165
+ "local_extracted_archives": [local_extracted_archives.get("train")],
166
+ }
167
+ ),
168
+ datasets.SplitGenerator(
169
+ name="train-clean-100",
170
+ gen_kwargs={
171
+ "metadata_fpaths": [metadata["train-clean-100"]],
172
+ "audio_archives": [dl_manager.iter_archive(audio_archives["train"])],
173
+ "local_extracted_archives": [local_extracted_archives.get("train")],
174
+ }
175
+ ),
176
+ ]
177
+
178
+ dev_splits = [
179
+ datasets.SplitGenerator(
180
+ name=datasets.Split.VALIDATION,
181
+ gen_kwargs={
182
+ "metadata_fpath": [metadata["dev-clean"], metadata["dev-other"]],
183
+ "audio_archives": [dl_manager.iter_archive(audio_archives["dev-clean"]), dl_manager.iter_archive(audio_archives["dev-other"])],
184
+ "local_extracted_archives": [local_extracted_archives.get("dev-clean"), local_extracted_archives.get("dev-other")],
185
+ }
186
+ ),
187
+ datasets.SplitGenerator(
188
+ name="dev-clean",
189
+ gen_kwargs={
190
+ "metadata_fpaths": [metadata["dev-clean"]],
191
+ "audio_archives": [dl_manager.iter_archive(audio_archives["dev-clean"])],
192
+ "local_extracted_archives": [local_extracted_archives.get("dev-clean")],
193
+ },
194
+ ),
195
+ datasets.SplitGenerator(
196
+ name="dev-other",
197
+ gen_kwargs={
198
+ "metadata_fpaths": [metadata["dev-other"]],
199
+ "audio_archives": [dl_manager.iter_archive(audio_archives["dev-other"])],
200
+ "local_extracted_archives": [local_extracted_archives.get("dev-other")],
201
+ },
202
+ ),
203
+ datasets.SplitGenerator(
204
+ name="dev-clean-small",
205
+ gen_kwargs={
206
+ "metadata_fpaths": [metadata["dev-clean-small"]],
207
+ "audio_archives": [dl_manager.iter_archive(audio_archives["dev-clean"])],
208
+ "local_extracted_archives": [local_extracted_archives.get("dev-clean")],
209
+ },
210
+ ),
211
+ ]
212
+
213
+ test_splits = [
214
+ datasets.SplitGenerator(
215
+ name=datasets.Split.TEST,
216
+ gen_kwargs={
217
+ "metadata_fpaths": [metadata["test-clean"], metadata["test-other"]],
218
+ "audio_archives": [dl_manager.iter_archive(audio_archives["test-clean"]), dl_manager.iter_archive(audio_archives["test-other"])],
219
+ "local_extracted_archives": [local_extracted_archives.get("test-clean"), local_extracted_archives.get("test-other")],
220
+ }
221
+ ),
222
+ datasets.SplitGenerator(
223
+ name="test-clean",
224
+ gen_kwargs={
225
+ "metadata_fpaths": [metadata["test-clean"]],
226
+ "audio_archives": [dl_manager.iter_archive(audio_archives["test-clean"])],
227
+ "local_extracted_archives": [local_extracted_archives.get("test-clean")],
228
+ }
229
+ ),
230
+ datasets.SplitGenerator(
231
+ name="test-other",
232
+ gen_kwargs={
233
+ "metadata_fpaths": [metadata["test-other"]],
234
+ "audio_archives": [dl_manager.iter_archive(audio_archives["test-other"])],
235
+ "local_extracted_archives": [local_extracted_archives.get("test-other")],
236
+ }
237
+ ),
238
+ ]
239
+
240
+ return train_splits + dev_splits + test_splits
241
+
242
+ def _generate_examples(self, metadata_fpaths, audio_archives, local_extracted_archives):
243
+ """Generate examples from a Multilingual LibriSpeech data dir."""
244
+ words, phones = dict(), dict()
245
+ for metadata_fpath in metadata_fpaths:
246
+ with open(metadata_fpath, "r", encoding="utf-8") as file:
247
+ for line in file:
248
+ audio_fpath, word, phone = line.strip().split("\t")
249
+ audio_id = audio_fpath.split('/')[-1].split(".flac")[0]
250
+ words[audio_id] = word
251
+ phones[audio_id] = phone
252
+
253
+ # limited_ids, limited_ids_archives_names = [], []
254
+ # if limited_ids_paths:
255
+ # for path in limited_ids_paths:
256
+ # with open(path, "r", encoding="utf-8") as file:
257
+ # limited_ids.extend([line.strip() for line in file.readlines()])
258
+
259
+ # limited_ids = set(limited_ids)
260
+
261
+ for archive_idx, audio_archive in enumerate(audio_archives):
262
+ # TODO: check that archive doesn't contain needed ids
263
+ # if limited_ids and audio_archive not in limited_ids_archives_names:
264
+ # continue
265
+
266
+ for audio_filename, file in audio_archive:
267
+ audio_id = audio_filename.split('/')[-1].split(".flac")[0]
268
+ speaker_id, chapter_id = (int(item) for item in audio_id.split("-")[:2])
269
+ word = words.get(audio_id, None)
270
+ if word == None:
271
+ continue
272
+
273
+ local_audio_file_path = os.path.join(
274
+ local_extracted_archives[archive_idx], audio_filename
275
+ ) if local_extracted_archives[archive_idx] else None
276
+
277
+ yield audio_filename, {
278
+ "file": local_audio_file_path,
279
+ "audio": {
280
+ "path": local_audio_file_path if local_audio_file_path else audio_filename,
281
+ "bytes": file.read()
282
+ },
283
+ "word": word,
284
+ "phoneme": phones.get(audio_id, None),
285
+ "speaker_id": speaker_id,
286
+ "chapter_id": chapter_id,
287
+ "id": audio_id
288
+ }