File size: 4,613 Bytes
f4f0d7e
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
# coding=utf-8
# Copyright 2020 The TensorFlow Datasets Authors and the HuggingFace Datasets Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#     http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

# Lint as: python3
"""HotpotQA: A Dataset for Diverse, Explainable Multi-hop Question Answering."""


import json
import textwrap

import datasets


_CITATION = """
@inproceedings{xanh2020_2wikimultihop,
    title = "Constructing A Multi-hop {QA} Dataset for Comprehensive Evaluation of Reasoning Steps",
    author = "Ho, Xanh  and
      Duong Nguyen, Anh-Khoa  and
      Sugawara, Saku  and
      Aizawa, Akiko",
    booktitle = "Proceedings of the 28th International Conference on Computational Linguistics",
    month = dec,
    year = "2020",
    address = "Barcelona, Spain (Online)",
    publisher = "International Committee on Computational Linguistics",
    url = "https://www.aclweb.org/anthology/2020.coling-main.580",
    pages = "6609--6625",
}
"""

_DESCRIPTION = """\
"""

_URL_BASE = "data"


class TwowikimultihopQA(datasets.GeneratorBasedBuilder):
    """2wikimultihopQA is a Dataset for Diverse, Explainable Multi-hop Question Answering."""

    BUILDER_CONFIGS = [
        datasets.BuilderConfig(),
    ]

    def _info(self):
        return datasets.DatasetInfo(
            description=_DESCRIPTION,
            features=datasets.Features(
                {
                    "id": datasets.Value("string"),
                    "question": datasets.Value("string"),
                    "answer": datasets.Value("string"),
                    "type": datasets.Value("string"),
                    "supporting_facts": datasets.features.Sequence(
                        {
                            "title": datasets.Value("string"),
                            "sent_id": datasets.Value("int32"),
                        }
                    ),
                    "context": datasets.features.Sequence(
                        {
                            "title": datasets.Value("string"),
                            "sentences": datasets.features.Sequence(datasets.Value("string")),
                        }
                    ),
                    "evidences": datasets.features.Sequence(
                        datasets.features.Sequence(
                            datasets.Value("string")
                        )
                    ),
                    "entity_ids": datasets.Value("string")
                }
            ),
            supervised_keys=None,
            homepage="https://github.com/Alab-NII/2wikimultihop",
            citation=_CITATION,
        )

    def _split_generators(self, dl_manager: datasets.DownloadManager):
        """Returns SplitGenerators."""
        paths = {
            datasets.Split.TRAIN: f"{_URL_BASE}/train.json",
            datasets.Split.VALIDATION: f"{_URL_BASE}/dev.json",
            datasets.Split.TEST: f"{_URL_BASE}/test.json",
        }

        files = dl_manager.download(paths)

        split_generators = []
        for split in files:
            split_generators.append(datasets.SplitGenerator(name=split, gen_kwargs={"data_file": files[split]}))

        return split_generators

    def _generate_examples(self, data_file):
        """This function returns the examples."""
        data = json.load(open(data_file))
        for idx, example in enumerate(data):

            # Test set has missing keys
            for k in ["answer", "type", "level"]:
                if k not in example.keys():
                    example[k] = None

            if "supporting_facts" not in example.keys():
                example["supporting_facts"] = []

            yield idx, {
                "id": example["_id"],
                "question": example["question"],
                "answer": example["answer"],
                "type": example["type"],
                "supporting_facts": [{"title": f[0], "sent_id": f[1]} for f in example["supporting_facts"]],
                "context": [{"title": f[0], "sentences": f[1]} for f in example["context"]],
                "evidences": example["evidences"],
                "entity_ids": example["entity_ids"]
            }