File size: 3,858 Bytes
4c1597e |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 |
import csv
import json
import os
from typing import Any
import datasets
from datasets.utils import logging
_DESCRIPTION = """\
HuggingFace wrapper for https://github.com/askplatypus/wikidata-simplequestions dataset
Simplequestions dataset based on Wikidata.
"""
# TODO: Add a link to an official homepage for the dataset here
_HOMEPAGE = ""
# TODO: Add the licence for the dataset here if you can find it
_LICENSE = ""
_LANGS = [
"ru",
"en",
]
_URL = "https://raw.githubusercontent.com/askplatypus/wikidata-simplequestions/master/"
_DATA_DIRECTORY = "./simplequestion"
VERSION = datasets.Version("0.0.1")
class WikidataSimpleQuestionsConfig(datasets.BuilderConfig):
"""BuilderConfig for WikidataSimpleQuestions."""
def __init__(self, **kwargs):
"""BuilderConfig for WikidataSimpleQuestions.
Args:
**kwargs: keyword arguments forwarded to super.
"""
super(WikidataSimpleQuestionsConfig, self).__init__(**kwargs)
class WikidataSimpleQuestions(datasets.GeneratorBasedBuilder):
"""HuggingFace wrapper for https://github.com/askplatypus/wikidata-simplequestions dataset"""
BUILDER_CONFIG_CLASS = WikidataSimpleQuestionsConfig
BUILDER_CONFIGS = []
BUILDER_CONFIGS += [
WikidataSimpleQuestionsConfig(
name=f"main_{ln}",
version=VERSION,
description="main version of wikidata simplequestions",
)
for ln in _LANGS
]
BUILDER_CONFIGS += [
WikidataSimpleQuestionsConfig(
name=f"answerable_{ln}",
version=VERSION,
description="answerable version of wikidata simplequestions",
)
for ln in _LANGS
]
DEFAULT_CONFIG_NAME = "answerable_en"
def _info(self):
features = datasets.Features(
{
"subject": datasets.Value("string"),
"property": datasets.Value("string"),
"object": datasets.Value("string"),
"question": datasets.Value("string"),
}
)
return datasets.DatasetInfo(
description=_DESCRIPTION,
features=features,
homepage=_HOMEPAGE,
license=_LICENSE,
)
def _split_generators(self, dl_manager):
if self.config.name == "default":
version, lang = "main", "en"
else:
version, lang = self.config.name.split("_")
if version == "main":
version = ""
else:
version = "_" + version
data_dir = os.path.join(self.base_path, _DATA_DIRECTORY)
return [
datasets.SplitGenerator(
name=datasets.Split.TRAIN,
gen_kwargs={
"filepath": os.path.join(data_dir, f"annotated_wd_data_train{version}_{lang}.txt"),
},
),
datasets.SplitGenerator(
name=datasets.Split.VALIDATION,
gen_kwargs={
"filepath": os.path.join(data_dir, f"annotated_wd_data_valid{version}_{lang}.txt"),
},
),
datasets.SplitGenerator(
name=datasets.Split.TEST,
gen_kwargs={
"filepath": os.path.join(data_dir, f"annotated_wd_data_test{version}_{lang}.txt"),
},
),
]
def _generate_examples(self, filepath, vocab_path):
with open(filepath, encoding="utf-8") as f:
for key, row in enumerate(f):
data = row.split("\t")
yield (
key,
{
"subject": data[0],
"property": data[1],
"object": data[2],
"question": data[3],
},
)
|