# coding=utf-8 # Copyright 2022 The TensorFlow Datasets Authors and the HuggingFace Datasets Authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # Lint as: python3 """Multilingual Librispeech automatic speech recognition dataset.""" import os import datasets _CITATION = """\ @article{Pratap2020MLSAL, title={MLS: A Large-Scale Multilingual Dataset for Speech Research}, author={Vineel Pratap and Qiantong Xu and Anuroop Sriram and Gabriel Synnaeve and Ronan Collobert}, journal={ArXiv}, year={2020}, volume={abs/2012.03411} } """ _DESCRIPTION = """\ This is a streamable version of the Multilingual LibriSpeech (MLS) dataset. The data archives were restructured from the original ones from [OpenSLR](http://www.openslr.org/94) to make it easier to stream. MLS dataset is a large multilingual corpus suitable for speech research. The dataset is derived from read audiobooks from LibriVox and consists of 8 languages: English, German, Dutch, Spanish, French, Italian, Portuguese, Polish. """ _URL = "http://www.openslr.org/94" _DL_URL_FORMAT = "data/mls_{name}" class MultilingualLibrispeechConfig(datasets.BuilderConfig): """BuilderConfig for MultilingualLibrispeech.""" def __init__(self, name, **kwargs): """ Args: name: `string`, name of dataset config (=language) **kwargs: keyword arguments forwarded to super. """ super(MultilingualLibrispeechConfig, self).__init__( version=datasets.Version("2.1.0", ""), name=name, **kwargs ) # relative path to full data inside a repo (for example `data/mls_german`) self.data_root_url = _DL_URL_FORMAT.format(name=name) class MultilingualLibrispeech(datasets.GeneratorBasedBuilder): """Multilingual Librispeech dataset.""" BUILDER_CONFIGS = [ MultilingualLibrispeechConfig(name="german", description="German LibriSpeech dataset"), MultilingualLibrispeechConfig(name="dutch", description="Dutch LibriSpeech dataset"), MultilingualLibrispeechConfig(name="french", description="French LibriSpeech dataset"), MultilingualLibrispeechConfig(name="spanish", description="Spanish LibriSpeech dataset"), MultilingualLibrispeechConfig(name="italian", description="Italian LibriSpeech dataset"), MultilingualLibrispeechConfig(name="portuguese", description="Portuguese LibriSpeech dataset"), MultilingualLibrispeechConfig(name="polish", description="Polish LibriSpeech dataset"), ] def _info(self): return datasets.DatasetInfo( description=_DESCRIPTION, features=datasets.Features( { "file": datasets.Value("string"), "audio": datasets.features.Audio(sampling_rate=16_000), "text": datasets.Value("string"), "speaker_id": datasets.Value("int64"), "chapter_id": datasets.Value("int64"), "id": datasets.Value("string"), } ), supervised_keys=("file", "text"), homepage=_URL, citation=_CITATION, task_templates=None, ) def _split_generators(self, dl_manager): transcripts = dl_manager.download({ "train": self.config.data_root_url + "/train/transcripts.txt", "dev": self.config.data_root_url + "/dev/transcripts.txt", "test": self.config.data_root_url + "/test/transcripts.txt", }) # Download handles.txt files containing ids for limited supervision train sets limited_supervision_9h = dl_manager.download( [self.config.data_root_url + "/train/limited_supervision/9hr/handles.txt"], ) # in our case of 1 hour limited supervision ("train.1h") there are always 6 subfolders like: # "limited_supervision/1h/0/handles.txt", "limited_supervision/1h/1/handles.txt", ... limited_supervision_1h = dl_manager.download([ self.config.data_root_url + f"/train/limited_supervision/1hr/{i}/handles.txt" for i in range(6) ]) # each split contains many .tar.gz archives with its audio files # audio_filenames.txt contains the names of these archives audio_filenames_paths = dl_manager.download({ "train": self.config.data_root_url + "/train/audio_filenames.txt", "dev": self.config.data_root_url + "/dev/audio_filenames.txt", "test": self.config.data_root_url + "/test/audio_filenames.txt", }) audio_archives = {} for split in audio_filenames_paths: with open(audio_filenames_paths[split], encoding="utf-8") as f: audio_filenames = [line.strip() for line in f.readlines()] audio_archives[split] = dl_manager.download([ self.config.data_root_url + "/" + split + "/audio/" + filename for filename in audio_filenames ]) # (Optional) In non-streaming mode, we can extract the archive locally to have actual local audio files: local_extracted_archives = dl_manager.extract(audio_archives) if not dl_manager.is_streaming else {} train_splits = [ datasets.SplitGenerator( name=datasets.Split.TRAIN, gen_kwargs={ "transcript_path": transcripts["train"], "audio_archives": [dl_manager.iter_archive(archive) for archive in audio_archives["train"]], "local_extracted_archive": local_extracted_archives.get("train"), } ), datasets.SplitGenerator( name="train.9h", gen_kwargs={ "transcript_path": transcripts["train"], "audio_archives": [dl_manager.iter_archive(archive) for archive in audio_archives["train"]], "local_extracted_archive": local_extracted_archives.get("train"), "limited_ids_paths": (limited_supervision_9h,), }, ), datasets.SplitGenerator( name="train.1h", gen_kwargs={ "transcript_path": transcripts["train"], "audio_archives": [dl_manager.iter_archive(archive) for archive in audio_archives["train"]], "local_extracted_archive": local_extracted_archives.get("train"), "limited_ids_paths": (limited_supervision_1h,), }, ), ] return train_splits + [ datasets.SplitGenerator( name=datasets.Split.VALIDATION, gen_kwargs={ "transcript_path": transcripts["dev"], "audio_archives": [dl_manager.iter_archive(archive) for archive in audio_archives["dev"]], "local_extracted_archive": local_extracted_archives.get("dev"), } ), datasets.SplitGenerator( name=datasets.Split.TEST, gen_kwargs={ "transcript_path": transcripts["test"], "audio_archives": [dl_manager.iter_archive(archive) for archive in audio_archives["test"]], "local_extracted_archive": local_extracted_archives.get("test"), } ), ] def _generate_examples(self, transcript_path, audio_archives, local_extracted_archive, limited_ids_paths=None): """Generate examples from a Multilingual LibriSpeech data dir.""" transcripts = dict() with open(transcript_path, "r", encoding="utf-8") as file: for line in file: audio_id, transcript = line.strip().split("\t") transcripts[audio_id] = transcript limited_ids, limited_ids_archives_names = [], [] if limited_ids_paths: for path in limited_ids_paths: with open(path, "r", encoding="utf-8") as file: limited_ids.extend([line.strip() for line in file.readlines()]) limited_ids = set(limited_ids) for archive_idx, audio_archive in enumerate(audio_archives): # TODO: check that archive doesn't contain needed ids # if limited_ids and audio_archive not in limited_ids_archives_names: # continue for audio_filename, file in audio_archive: speaker_id, chapter_id = audio_filename.split("_")[:2] speaker_id, chapter_id = int(speaker_id), int(chapter_id) audio_id = audio_filename.split(".flac")[0] audio_transcript = transcripts[audio_id] if limited_ids and audio_id not in limited_ids: # this only can be true in limited supervision sets ("train.9h" and "train.1h") continue local_audio_file_path = os.path.join( local_extracted_archive[archive_idx], audio_filename ) if local_extracted_archive else None yield audio_filename, { "file": local_audio_file_path, "audio": { "path": local_audio_file_path if local_audio_file_path else audio_filename, "bytes": file.read() }, "text": audio_transcript, "speaker_id": speaker_id, "chapter_id": chapter_id, "id": audio_id }