rvashurin commited on
Commit
4c1597e
1 Parent(s): 8b1c3aa

Add dataset

Browse files
README.md ADDED
@@ -0,0 +1,14 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Wikidata Simplequestions
2
+
3
+ Huggingface Dataset wrapper for Wikidata-simplequestion dataset
4
+
5
+ ### Usage
6
+
7
+ ```bash
8
+ git clone git@github.com:skoltech-nlp/wikidata-simplequestions-hf.git wikidata_simplequestions
9
+ ```
10
+
11
+ ```python3
12
+ from datasets import load_dataset;
13
+ load_dataset('../wikidata_simplequestions', 'answerable_en', cache_dir='/YOUR_PATH_TO_CACHE/', ignore_verifications=True)
14
+ ```
simplequestion/annotated_wd_data_test.txt ADDED
The diff for this file is too large to render. See raw diff
 
simplequestion/annotated_wd_data_test_answerable.txt ADDED
The diff for this file is too large to render. See raw diff
 
simplequestion/annotated_wd_data_test_answerable_ru.txt ADDED
The diff for this file is too large to render. See raw diff
 
simplequestion/annotated_wd_data_train.txt ADDED
The diff for this file is too large to render. See raw diff
 
simplequestion/annotated_wd_data_train_answerable.txt ADDED
The diff for this file is too large to render. See raw diff
 
simplequestion/annotated_wd_data_train_answerable_ru.txt ADDED
The diff for this file is too large to render. See raw diff
 
simplequestion/annotated_wd_data_valid.txt ADDED
The diff for this file is too large to render. See raw diff
 
simplequestion/annotated_wd_data_valid_answerable.txt ADDED
The diff for this file is too large to render. See raw diff
 
simplequestion/annotated_wd_data_valid_answerable_ru.txt ADDED
The diff for this file is too large to render. See raw diff
 
wikidata_simplequestions.py ADDED
@@ -0,0 +1,129 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import csv
2
+ import json
3
+ import os
4
+ from typing import Any
5
+
6
+ import datasets
7
+ from datasets.utils import logging
8
+
9
+
10
+ _DESCRIPTION = """\
11
+ HuggingFace wrapper for https://github.com/askplatypus/wikidata-simplequestions dataset
12
+ Simplequestions dataset based on Wikidata.
13
+ """
14
+
15
+ # TODO: Add a link to an official homepage for the dataset here
16
+ _HOMEPAGE = ""
17
+
18
+ # TODO: Add the licence for the dataset here if you can find it
19
+ _LICENSE = ""
20
+
21
+ _LANGS = [
22
+ "ru",
23
+ "en",
24
+ ]
25
+
26
+ _URL = "https://raw.githubusercontent.com/askplatypus/wikidata-simplequestions/master/"
27
+ _DATA_DIRECTORY = "./simplequestion"
28
+ VERSION = datasets.Version("0.0.1")
29
+
30
+
31
+ class WikidataSimpleQuestionsConfig(datasets.BuilderConfig):
32
+ """BuilderConfig for WikidataSimpleQuestions."""
33
+
34
+ def __init__(self, **kwargs):
35
+ """BuilderConfig for WikidataSimpleQuestions.
36
+ Args:
37
+ **kwargs: keyword arguments forwarded to super.
38
+ """
39
+ super(WikidataSimpleQuestionsConfig, self).__init__(**kwargs)
40
+
41
+
42
+ class WikidataSimpleQuestions(datasets.GeneratorBasedBuilder):
43
+ """HuggingFace wrapper for https://github.com/askplatypus/wikidata-simplequestions dataset"""
44
+
45
+ BUILDER_CONFIG_CLASS = WikidataSimpleQuestionsConfig
46
+ BUILDER_CONFIGS = []
47
+ BUILDER_CONFIGS += [
48
+ WikidataSimpleQuestionsConfig(
49
+ name=f"main_{ln}",
50
+ version=VERSION,
51
+ description="main version of wikidata simplequestions",
52
+ )
53
+ for ln in _LANGS
54
+ ]
55
+ BUILDER_CONFIGS += [
56
+ WikidataSimpleQuestionsConfig(
57
+ name=f"answerable_{ln}",
58
+ version=VERSION,
59
+ description="answerable version of wikidata simplequestions",
60
+ )
61
+ for ln in _LANGS
62
+ ]
63
+
64
+ DEFAULT_CONFIG_NAME = "answerable_en"
65
+
66
+ def _info(self):
67
+ features = datasets.Features(
68
+ {
69
+ "subject": datasets.Value("string"),
70
+ "property": datasets.Value("string"),
71
+ "object": datasets.Value("string"),
72
+ "question": datasets.Value("string"),
73
+ }
74
+ )
75
+
76
+ return datasets.DatasetInfo(
77
+ description=_DESCRIPTION,
78
+ features=features,
79
+ homepage=_HOMEPAGE,
80
+ license=_LICENSE,
81
+ )
82
+
83
+ def _split_generators(self, dl_manager):
84
+ if self.config.name == "default":
85
+ version, lang = "main", "en"
86
+ else:
87
+ version, lang = self.config.name.split("_")
88
+
89
+ if version == "main":
90
+ version = ""
91
+ else:
92
+ version = "_" + version
93
+
94
+ data_dir = os.path.join(self.base_path, _DATA_DIRECTORY)
95
+
96
+ return [
97
+ datasets.SplitGenerator(
98
+ name=datasets.Split.TRAIN,
99
+ gen_kwargs={
100
+ "filepath": os.path.join(data_dir, f"annotated_wd_data_train{version}_{lang}.txt"),
101
+ },
102
+ ),
103
+ datasets.SplitGenerator(
104
+ name=datasets.Split.VALIDATION,
105
+ gen_kwargs={
106
+ "filepath": os.path.join(data_dir, f"annotated_wd_data_valid{version}_{lang}.txt"),
107
+ },
108
+ ),
109
+ datasets.SplitGenerator(
110
+ name=datasets.Split.TEST,
111
+ gen_kwargs={
112
+ "filepath": os.path.join(data_dir, f"annotated_wd_data_test{version}_{lang}.txt"),
113
+ },
114
+ ),
115
+ ]
116
+
117
+ def _generate_examples(self, filepath, vocab_path):
118
+ with open(filepath, encoding="utf-8") as f:
119
+ for key, row in enumerate(f):
120
+ data = row.split("\t")
121
+ yield (
122
+ key,
123
+ {
124
+ "subject": data[0],
125
+ "property": data[1],
126
+ "object": data[2],
127
+ "question": data[3],
128
+ },
129
+ )