mmcrsc / mmcrsc.py
Jaegeon's picture
Initial commit
3d3615f
raw
history blame
5.07 kB
# coding=utf-8
# Copyright 2021 The TensorFlow Datasets Authors and the HuggingFace Datasets Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Lint as: python3
"""MAGICDATA Mandarin Chinese Read Speech Corpus."""
import os
import datasets
from datasets.tasks import AutomaticSpeechRecognition
_CITATION = """\
@misc{magicdata_2019,
title={MAGICDATA Mandarin Chinese Read Speech Corpus},
url={https://openslr.org/68/},
publisher={Magic Data Technology Co., Ltd.},
year={2019},
month={May}}
"""
_DESCRIPTION = """\
The corpus by Magic Data Technology Co., Ltd. , containing 755 hours of scripted read speech data
from 1080 native speakers of the Mandarin Chinese spoken in mainland China.
The sentence transcription accuracy is higher than 98%.
"""
_URL = "https://openslr.org/68/"
_DL_URL = "http://www.openslr.org/resources/68/"
_DL_URLS = {
"train": _DL_URL + "train_set.tar.gz",
"dev": _DL_URL + "dev_set.tar.gz",
"test": _DL_URL + "test_set.tar.gz",
}
class MMCRSCConfig(datasets.BuilderConfig):
"""BuilderConfig for MMCRSC."""
def __init__(self, **kwargs):
"""
Args:
data_dir: `string`, the path to the folder containing the files in the
downloaded .tar
citation: `string`, citation for the data set
url: `string`, url for information about the data set
**kwargs: keyword arguments forwarded to super.
"""
# version history
# 0.1.0: First release on Huggingface
super(MMCRSCConfig, self).__init__(version=datasets.Version("0.1.0", ""), **kwargs)
class MMCRSC(datasets.GeneratorBasedBuilder):
"""MMCRSC dataset."""
DEFAULT_WRITER_BATCH_SIZE = 256
DEFAULT_CONFIG_NAME = "all"
def _info(self):
return datasets.DatasetInfo(
description=_DESCRIPTION,
features=datasets.Features(
{
"file": datasets.Value("string"),
"audio": datasets.Audio(sampling_rate=16_000),
"text": datasets.Value("string"),
"speaker_id": datasets.Value("int64"),
"id": datasets.Value("string"),
}
),
supervised_keys=("file", "text"),
homepage=_URL,
citation=_CITATION,
task_templates=[AutomaticSpeechRecognition(audio_column="audio", transcription_column="text")],
)
def _split_generators(self, dl_manager):
archive_path = dl_manager.download(_DL_URLS)
# (Optional) In non-streaming mode, we can extract the archive locally to have actual local audio files:
local_extracted_archive = dl_manager.extract(archive_path) if not dl_manager.is_streaming else {}
return [
datasets.SplitGenerator(
name=datasets.Split.TRAIN,
gen_kwargs={
"local_extracted_archive": local_extracted_archive.get("train"),
"files": dl_manager.iter_archive(archive_path["train"]),
},
),
datasets.SplitGenerator(
name=datasets.Split.VALIDATION,
gen_kwargs={
"local_extracted_archive": local_extracted_archive.get("dev"),
"files": dl_manager.iter_archive(archive_path["dev"]),
},
),
datasets.SplitGenerator(
name=datasets.Split.TEST,
gen_kwargs={
"local_extracted_archive": local_extracted_archive.get("test"),
"files": dl_manager.iter_archive(archive_path["test"]),
},
),
]
def _generate_examples(self, files, local_extracted_archive):
"""Generate examples from a LibriSpeech archive_path."""
audio_data = {}
transcripts = []
for path, f in files:
if path.endswith(".wav"):
id_ = path.split("/")[-1]
audio_data[id_] = f.read()
elif path.endswith("TRANS.txt"):
for line in f:
if line and (b'.wav' in line):
line = line.decode("utf-8").strip()
id_, speaker_id, transcript = line.split("\t")
audio_file = id_
audio_file = (
os.path.join(local_extracted_archive, audio_file)
if local_extracted_archive
else audio_file
)
transcripts.append(
{
"id": id_,
"speaker_id": speaker_id,
"file": audio_file,
"text": transcript,
}
)
if audio_data:
for key, transcript in enumerate(transcripts):
if transcript["id"] in audio_data:
audio = {"path": transcript["file"], "bytes": audio_data[transcript["id"]]}
yield key, {"audio": audio, **transcript}
audio_data = {}
transcripts = []