Datasets:

Languages:
English
License:
File size: 11,303 Bytes
7d57d43
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
import json
import logging
import os
from collections import defaultdict
from copy import copy
from typing import Any, Dict, Iterable, List

import datasets
from datasets import GeneratorBasedBuilder

logger = logging.getLogger(__name__)

_DESCRIPTION = """\
SciFact, a dataset of 1.4K expert-written scientific claims paired with evidence-containing abstracts, and annotated \\
with labels and rationales. This version differs from `allenai/scifact` on HF because we do not have separate splits \\
for claims and a corpus, instead we combine documents with claims that it supports or refutes, note that there are \\
also some documents that do not have any claims associated with them as well as there are some claims that do not \\
have any evidence. In the latter case we assign all such claims to the DUMMY document with ID -1 and without any text \\
(i.e. abstract sentences).
"""

DATA_URL = "https://scifact.s3-us-west-2.amazonaws.com/release/latest/data.tar.gz"
SUBDIR = "data"

VARIANT_DOCUMENTS = "as_documents"
VARIANT_CLAIMS = "as_claims"


class ScifactConfig(datasets.BuilderConfig):
    """BuilderConfig for Scifact."""

    def __init__(self, **kwargs):
        super().__init__(**kwargs)


class SciFact(GeneratorBasedBuilder):
    BUILDER_CONFIGS = [
        ScifactConfig(
            name=VARIANT_DOCUMENTS,
            description="Documents that serve as evidence for some claims that are split into train, test, dev",
        ),
        ScifactConfig(
            name=VARIANT_CLAIMS,
            description="Documents that serve as evidence for some claims that are split into train, test, dev",
        ),
    ]

    def _info(self):
        # Specifies the datasets.DatasetInfo object
        if self.config.name == VARIANT_DOCUMENTS:
            features = {
                "doc_id": datasets.Value("int32"),  # document ID
                "title": datasets.Value("string"),  # document title
                "abstract": datasets.features.Sequence(
                    datasets.Value("string")
                ),  # document sentences
                "structured": datasets.Value(
                    "bool"
                ),  # whether the abstract is structured, i.e. has OBJECTIVE, CONCLUSION, METHODS marked in the text
                "claims": datasets.features.Sequence(
                    feature={
                        "id": datasets.Value(dtype="int32", id=None),
                        "claim": datasets.Value(dtype="string", id=None),
                        "evidence": datasets.features.Sequence(
                            feature={
                                "label": datasets.Value(dtype="string", id=None),
                                "sentences": datasets.features.Sequence(
                                    datasets.Value(dtype="int32", id=None)
                                ),
                            }
                        ),
                    }
                ),  # list of claims associated with the document
            }
        elif self.config.name == VARIANT_CLAIMS:
            features = {
                "id": datasets.Value("int32"),  # document ID
                "claim": datasets.Value(dtype="string", id=None),
                "cited_docs": datasets.features.Sequence(
                    feature={
                        "doc_id": datasets.Value(dtype="int32", id=None),
                        "title": datasets.Value("string"),  # document title
                        "abstract": datasets.features.Sequence(
                            datasets.Value("string")
                        ),  # document sentences
                        "structured": datasets.Value(
                            "bool"
                        ),  # whether the abstract is structured, i.e. has OBJECTIVE, CONCLUSION, METHODS marked in the text
                        "evidence": datasets.features.Sequence(
                            feature={
                                "label": datasets.Value(dtype="string", id=None),
                                "sentences": datasets.features.Sequence(
                                    datasets.Value(dtype="int32", id=None)
                                ),
                            }
                        ),
                    }
                ),  # list of claims associated with the document
            }
        else:
            raise ValueError(f"unknown dataset variant: {self.config.name}")

        return datasets.DatasetInfo(
            # This is the description that will appear on the datasets page
            description=_DESCRIPTION,
            features=datasets.Features(features),
            supervised_keys=None,
            # Homepage of the dataset for documentation
            homepage="https://scifact.apps.allenai.org/",
        )

    def _generate_examples(self, claims_filepath: str, corpus_filepath: str):
        """Yields examples."""
        with open(claims_filepath) as f:
            claim_data = [json.loads(line) for line in f.readlines()]

        with open(corpus_filepath) as f:
            corpus_docs = [json.loads(line) for line in f.readlines()]

        if self.config.name == VARIANT_DOCUMENTS:
            doc_id2claims = defaultdict(list)
            for claim in claim_data:
                cited_doc_ids = claim.pop("cited_doc_ids", [-1])
                evidence = claim.pop("evidence", dict())
                for cited_doc_id in cited_doc_ids:
                    current_claim = claim.copy()
                    current_claim["evidence"] = evidence.get(str(cited_doc_id), [])
                    doc_id2claims[cited_doc_id].append(current_claim)
            dummy_doc = {"doc_id": -1, "title": "", "abstract": [], "structured": False}
            corpus_docs = [dummy_doc] + corpus_docs

            for id_, doc in enumerate(corpus_docs):
                doc = doc.copy()
                doc["claims"] = doc_id2claims.get(doc["doc_id"], [])
                yield id_, doc
        elif self.config.name == VARIANT_CLAIMS:
            doc_id2doc = {doc["doc_id"]: doc for doc in corpus_docs}
            for _id, claim in enumerate(claim_data):
                evidence = claim.pop("evidence", {})
                cited_doc_ids = claim.pop("cited_doc_ids", [])
                claim["cited_docs"] = []
                for cited_doc_id in cited_doc_ids:
                    doc = copy(doc_id2doc[cited_doc_id])
                    doc["evidence"] = evidence.get(str(cited_doc_id), [])
                    claim["cited_docs"].append(doc)
                yield _id, claim
        else:
            raise ValueError(f"unknown dataset variant: {self.config.name}")

    def _split_generators(self, dl_manager):
        """We handle string, list and dicts in datafiles."""
        if dl_manager.manual_dir is None:
            data_dir = os.path.join(dl_manager.download_and_extract(DATA_URL), SUBDIR)
        else:
            # Absolute path of the manual_dir
            data_dir = os.path.abspath(dl_manager.manual_dir)

        return [
            datasets.SplitGenerator(
                name=datasets.Split.TRAIN,
                # These kwargs will be passed to _generate_examples
                gen_kwargs={
                    "claims_filepath": os.path.join(data_dir, "claims_train.jsonl"),
                    "corpus_filepath": os.path.join(data_dir, "corpus.jsonl"),
                },
            ),
            datasets.SplitGenerator(
                name=datasets.Split.VALIDATION,
                # These kwargs will be passed to _generate_examples
                gen_kwargs={
                    "claims_filepath": os.path.join(data_dir, "claims_dev.jsonl"),
                    "corpus_filepath": os.path.join(data_dir, "corpus.jsonl"),
                },
            ),
            datasets.SplitGenerator(
                name=datasets.Split.TEST,
                # These kwargs will be passed to _generate_examples
                gen_kwargs={
                    "claims_filepath": os.path.join(data_dir, "claims_test.jsonl"),
                    "corpus_filepath": os.path.join(data_dir, "corpus.jsonl"),
                },
            ),
        ]

    def _convert_to_output_eval_format(
        self, data: Iterable[Dict[str, Any]]
    ) -> List[Dict[str, Any]]:
        """Output should have the format as specified here:

        https://github.com/allenai/scifact/blob/68b98a56d93e0f9da0d2aab4e6c3294699a0f72e/doc/evaluation.md#submission-format
        Each claim is represented as Dict with:
            "id": int An integer claim ID.
            "evidence": Dict[str, Dict] The evidence for the claim.
            "doc_id": Dict[str, Any] The sentences and label for a single document.
                 "sentences": List[int]
                 "label": str
        """
        if self.config.name == VARIANT_DOCUMENTS:
            # Collect all claim-level annotations from all documents
            claim2doc2sent_with_label = dict()
            for document in data:
                doc_id = document["doc_id"]
                # Skip if document does not have any related claims
                if len(document["claims"]["claim"]) == 0:
                    continue
                for idx in range(len(document["claims"]["claim"])):
                    claim_id = document["claims"]["id"][idx]
                    claim_text = document["claims"]["claim"][idx]
                    claim_evidence = document["claims"]["evidence"][idx]
                    if claim_id not in claim2doc2sent_with_label:
                        claim2doc2sent_with_label[claim_id] = dict()
                    if doc_id not in claim2doc2sent_with_label[claim_id]:
                        if len(claim_evidence["label"]) > 0:
                            ev_label = claim_evidence["label"][0]
                            claim2doc2sent_with_label[claim_id][doc_id] = {
                                "label": ev_label,
                                "sentences": [],
                            }
                            for ev_sentences in claim_evidence["sentences"]:
                                claim2doc2sent_with_label[claim_id][doc_id]["sentences"].extend(
                                    ev_sentences
                                )

            outputs = []
            for claim_id in claim2doc2sent_with_label:
                claim_dict = {"id": claim_id, "evidence": dict()}
                for doc_id in claim2doc2sent_with_label[claim_id]:
                    claim_dict["evidence"][doc_id] = {
                        "sentences": claim2doc2sent_with_label[claim_id][doc_id]["sentences"],
                        "label": claim2doc2sent_with_label[claim_id][doc_id]["label"],
                    }
                outputs.append((int(claim_id), claim_dict.copy()))

            outputs_sorted_by_claim_ids = [
                claim for claim_id, claim in sorted(outputs, key=lambda x: x[0])
            ]

            return outputs_sorted_by_claim_ids

        elif self.config.name == VARIANT_CLAIMS:
            raise NotImplementedError(
                f"_convert_to_output_eval_format is not yet implemented for dataset variant {self.config.name}"
            )
        else:
            raise ValueError(f"unknown dataset variant: {self.config.name}")