Datasets:
File size: 3,576 Bytes
e80201f |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 |
import datasets as ds
import json
_SUBSET_NAMES = ["all", "spring", "summer", "autumn", "winter", "none", "kigo"]
_COMMON_FEATURES = {
"id": ds.Value("int64"),
"haiku": ds.Value("string"),
"author": ds.Value("string"),
"foreword": ds.Value("string"),
"source": ds.Value("string"),
"comment": ds.Value("string"),
"reviewer": ds.Value("string"),
"note": ds.Value("string"),
}
_KIGO_FEATURES = {
"id": ds.Value("int64"),
"word": ds.Value("string"),
"kana": ds.Value("string"),
"old_kana": ds.Value("string"),
"season": ds.Value("string"),
"subtitle": ds.Sequence(ds.Value("string")),
}
# TODO: Add description of the dataset here
# You can copy an official description
_DESCRIPTION = """\
This new dataset is designed to solve this great NLP task and is crafted with a lot of care.
"""
# TODO: Add a link to an official homepage for the dataset here
_HOMEPAGE = ""
# TODO: Add the licence for the dataset here if you can find it
_LICENSE = ""
_FEATURES = {
"all": ds.Features(
{**_COMMON_FEATURES, "season": ds.Value("string"), "kigo": _KIGO_FEATURES}
),
"spring": ds.Features({**_COMMON_FEATURES, "kigo": _KIGO_FEATURES}),
"summer": ds.Features({**_COMMON_FEATURES, "kigo": _KIGO_FEATURES}),
"autumn": ds.Features({**_COMMON_FEATURES, "kigo": _KIGO_FEATURES}),
"winter": ds.Features({**_COMMON_FEATURES, "kigo": _KIGO_FEATURES}),
"none": ds.Features(_COMMON_FEATURES),
"kigo": ds.Features(_KIGO_FEATURES),
}
_DATA_URL = "https://pub-6dee886ee0a5425c8fb25fe18f3acc73.r2.dev/public/datasets/modern_haiku/data.json"
class ModernHaikuDataset(ds.GeneratorBasedBuilder):
VERSION = ds.Version("0.0.1")
BUILDER_CONFIGS = [ds.BuilderConfig(name=subset) for subset in _SUBSET_NAMES]
DEFAULT_CONFIG_NAME = "all"
def _info(self):
return ds.DatasetInfo(
description=_DESCRIPTION,
features=_FEATURES.get(self.config.name),
homepage=_HOMEPAGE,
license=_LICENSE,
)
def _split_generators(self, dl_manager):
data_dir = dl_manager.download(_DATA_URL)
return [
ds.SplitGenerator(
name=ds.Split.TRAIN,
gen_kwargs={
"filepath": data_dir,
},
),
]
def _generate_examples(self, filepath):
with open(filepath, "r", encoding="utf-8") as f:
data: list[dict] = json.load(f)
key = 0
if self.config.name == "all":
for row in data:
yield key, row
key += 1
elif self.config.name == "none":
data = [row for row in data if row["season"] == self.config.name]
for row in data:
row.pop("season")
row.pop("kigo")
yield key, row
key += 1
elif self.config.name != "kigo":
data = [row for row in data if row["season"] == self.config.name]
for row in data:
row.pop("season")
yield key, row
key += 1
else:
all_kigo = {}
for row in data:
if row["kigo"] is None:
continue
kigo = row["kigo"]
id = kigo["id"]
if all_kigo.get(id) is None:
all_kigo[id] = kigo
else:
continue
for id, kigo in all_kigo.items():
yield key, kigo
key += 1
|