File size: 3,731 Bytes
4ffa3a7 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 |
# coding=utf-8
# Copyright 2021 Artem Ploujnikov
# Lint as: python3
import json
import datasets
_DESCRIPTION = """\
Grapheme-to-Phoneme training, validation and test sets
"""
_BASE_URL = "https://media.githubusercontent.com/flexthink/librig2p-nostress-space/develop/dataset"
_HOMEPAGE_URL = "https://github.com/flexthink/librig2p-nostress-space/tree/develop"
_PHONEMES = [
"AA",
"AE",
"AH",
"AO",
"AW",
"AY",
"B",
"CH",
"D",
"DH",
"EH",
"ER",
"EY",
"F",
"G",
"HH",
"IH",
"IY",
"JH",
"K",
"L",
"M",
"N",
"NG",
"OW",
"OY",
"P",
"R",
"S",
"SH",
"T",
"TH",
"UH",
"UW",
"V",
"W",
"Y",
"Z",
"ZH",
" "
]
_ORIGINS = ["librispeech", "librispeech-lex", "wikipedia-homograph"]
_NA = "N/A"
_SPLIT_TYPES = ["train", "valid", "test"]
_DATA_TYPES = ["lexicon", "sentence", "homograph"]
_SPLITS = [
f"{data_type}_{split_type}"
for data_type in _DATA_TYPES
for split_type in _SPLIT_TYPES]
class GraphemeToPhoneme(datasets.GeneratorBasedBuilder):
def __init__(self, base_url=None, splits=None, *args, **kwargs):
super().__init__(*args, **kwargs)
self.base_url = base_url or _BASE_URL
self.splits = splits or _SPLITS
def _info(self):
return datasets.DatasetInfo(
description=_DESCRIPTION,
features=datasets.Features(
{
"id": datasets.Value("string"),
"speaker_id": datasets.Value("string"),
"origin": datasets.ClassLabel(names=_ORIGINS),
"char": datasets.Value("string"),
"phn": datasets.Sequence(datasets.ClassLabel(names=_PHONEMES)),
"homograph": datasets.Value("string"),
"homograph_wordid": datasets.Value("string"),
"homograph_char_start": datasets.Value("int32"),
"homograph_char_end": datasets.Value("int32"),
"homograph_phn_start": datasets.Value("int32"),
"homograph_phn_end": datasets.Value("int32"),
},
),
supervised_keys=None,
homepage=_HOMEPAGE_URL,
)
def _get_url(self, split):
return f'{self.base_url}/{split}.json'
def _split_generator(self, dl_manager, split):
url = self._get_url(split)
path = dl_manager.download_and_extract(url)
return datasets.SplitGenerator(
name=split,
gen_kwargs={"datapath": path, "datatype": split},
)
def _split_generators(self, dl_manager):
return [
self._split_generator(dl_manager, split)
for split in self.splits
]
def _generate_examples(self, datapath, datatype):
with open(datapath, encoding="utf-8") as f:
data = json.load(f)
for sentence_counter, (item_id, item) in enumerate(data.items()):
resp = {
"id": item_id,
"speaker_id": str(item.get("speaker_id") or _NA),
"origin": item["origin"],
"char": item["char"],
"phn": item["phn"],
"homograph": item.get("homograph", _NA),
"homograph_wordid": item.get("homograph_wordid", _NA),
"homograph_char_start": item.get("homograph_char_start", 0),
"homograph_char_end": item.get("homograph_char_end", 0),
"homograph_phn_start": item.get("homograph_phn_start", 0),
"homograph_phn_end": item.get("homograph_phn_end", 0)
}
yield sentence_counter, resp
|