|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
""" Common Voice Dataset""" |
|
|
|
import json |
|
import os |
|
from copy import deepcopy |
|
import re |
|
import unicodedata |
|
from more_itertools import windowed |
|
import datasets |
|
|
|
_CITATION = """\ |
|
""" |
|
|
|
_DESCRIPTION = """\ |
|
coraalを音声認識した誤り訂正用データセット |
|
""" |
|
_HOMEPAGE = "" |
|
_LICENSE = "" |
|
|
|
URLS = { |
|
"v1": { |
|
"text": "https://huggingface.co/datasets/Padomin/coraal-asr/resolve/main/coraal-asr.tar.gz", |
|
} |
|
} |
|
|
|
|
|
class coraal_asr_config(datasets.BuilderConfig): |
|
def __init__(self, n_fronts=0, n_bodies=1, n_rears=0, front_prefix='front:\n', body_prefix='body:\n', rear_prefix='rear:\n', **kwargs): |
|
super(coraal_asr_config, self).__init__(**kwargs) |
|
self.n_fronts = n_fronts |
|
self.n_bodies = n_bodies |
|
self.n_rears = n_rears |
|
self.front_prefix = front_prefix |
|
self.body_prefix = body_prefix |
|
self.rear_prefix = rear_prefix |
|
|
|
class coraal_asr(datasets.GeneratorBasedBuilder): |
|
VERSION = datasets.Version("0.2.0") |
|
BUILDER_CONFIGS = [ |
|
coraal_asr_config(name="v1", version=VERSION), |
|
] |
|
DEFAULT_CONFIG_NAME = "v1" |
|
BUILDER_CONFIG_CLASS = coraal_asr_config |
|
|
|
def _info(self): |
|
feature_dict = { |
|
"text": datasets.Value("string"), |
|
"text_asr": datasets.Value("string"), |
|
"src": datasets.Value("string"), |
|
"tgt": datasets.Value("string"), |
|
"id": datasets.Value("string") |
|
} |
|
|
|
features = datasets.Features(feature_dict) |
|
return datasets.DatasetInfo( |
|
description=_DESCRIPTION, |
|
features=features, |
|
supervised_keys=None, |
|
homepage=_HOMEPAGE, |
|
license=_LICENSE, |
|
citation=_CITATION, |
|
) |
|
|
|
def _split_generators(self, dl_manager): |
|
"""Returns SplitGenerators.""" |
|
if "v1" in self.config.name: |
|
urls = deepcopy(URLS["v1"]) |
|
|
|
dl_path = dl_manager.download_and_extract(urls) |
|
|
|
return [ |
|
datasets.SplitGenerator( |
|
name=datasets.Split.TRAIN, |
|
gen_kwargs={ |
|
"filepath": os.path.join(dl_path["text"], "train.jsonl"), |
|
"split": "train", |
|
}, |
|
), |
|
datasets.SplitGenerator( |
|
name=datasets.Split.TEST, |
|
gen_kwargs={ |
|
"filepath": os.path.join(dl_path["text"], "test.jsonl"), |
|
"split": "test", |
|
}, |
|
), |
|
datasets.SplitGenerator( |
|
name=datasets.Split.VALIDATION, |
|
gen_kwargs={ |
|
"filepath": os.path.join(dl_path["text"], "validation.jsonl"), |
|
"split": "validation", |
|
}, |
|
), |
|
] |
|
|
|
def _generate_examples(self, filepath, split): |
|
"""Yields examples.""" |
|
id_ = 0 |
|
with open(filepath, encoding="utf-8") as f: |
|
for line in f: |
|
doc = json.loads(line) |
|
utterances = doc['utterances'] |
|
|
|
texts_asr = [utt['asr'] for utt in utterances] |
|
texts = [utt['text'] for utt in utterances] |
|
|
|
windowed_texts_asr = windowed([''] * self.config.n_fronts + texts_asr + [''] * self.config.n_rears, self.config.n_bodies + self.config.n_fronts + self.config.n_rears) |
|
windowed_texts = windowed(texts, self.config.n_bodies) |
|
|
|
for text_asr, text, utt in zip(windowed_texts_asr, windowed_texts, utterances): |
|
src = '' |
|
if self.config.n_fronts > 0: |
|
src += self.config.front_prefix |
|
src += '\n'.join(text_asr[:self.config.n_fronts]) |
|
src += '\n' |
|
src += self.config.body_prefix |
|
src += '\n'.join(text_asr[self.config.n_fronts:self.config.n_fronts + self.config.n_bodies]) |
|
if self.config.n_rears > 0: |
|
src += '\n' + self.config.rear_prefix |
|
src += '\n'.join(text_asr[self.config.n_fronts + self.config.n_bodies:]) |
|
tgt = '\n'.join(text) |
|
|
|
data = { |
|
"text": utt["text"], |
|
"text_asr": utt["asr"], |
|
'src': src, |
|
'tgt': tgt, |
|
'id': doc["id"], |
|
} |
|
|
|
yield id_, data |
|
|
|
id_ += 1 |