David Wadden commited on
Commit
9c66dd8
·
1 Parent(s): cd4604b

Making progress.

Browse files
Files changed (1) hide show
  1. scifact_entailment.py +146 -0
scifact_entailment.py ADDED
@@ -0,0 +1,146 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """Scientific fact-checking dataset. Verifies claims based on citation sentences
2
+ using evidence from the cited abstracts. Formatted as a paragraph-level entailment task."""
3
+
4
+
5
+ import json
6
+
7
+ import datasets
8
+
9
+
10
+ _CITATION = """\
11
+ @inproceedings{Wadden2020FactOF,
12
+ title={Fact or Fiction: Verifying Scientific Claims},
13
+ author={David Wadden and Shanchuan Lin and Kyle Lo and Lucy Lu Wang and Madeleine van Zuylen and Arman Cohan and Hannaneh Hajishirzi},
14
+ booktitle={EMNLP},
15
+ year={2020},
16
+ }
17
+ """
18
+
19
+ _DESCRIPTION = """\
20
+ SciFact, a dataset of 1.4K expert-written scientific claims paired with evidence-containing abstracts, and annotated with labels and rationales.
21
+ """
22
+
23
+
24
+ class ScifactEntailmentConfig(datasets.BuilderConfig):
25
+ """BuilderConfig for Scifact"""
26
+
27
+ def __init__(self, **kwargs):
28
+ """
29
+
30
+ Args:
31
+ **kwargs: keyword arguments forwarded to super.
32
+ """
33
+ super(ScifactEntailmentConfig, self).__init__(
34
+ version=datasets.Version("1.0.0", ""), **kwargs
35
+ )
36
+
37
+
38
+ class ScifactEntailment(datasets.GeneratorBasedBuilder):
39
+ """TODO(scifact): Short description of my dataset."""
40
+
41
+ # TODO(scifact): Set up version.
42
+ VERSION = datasets.Version("0.1.0")
43
+
44
+ def _info(self):
45
+ # TODO(scifact): Specifies the datasets.DatasetInfo object
46
+ features = {
47
+ "id": datasets.Value("int32"), # An integer claim ID.
48
+ "claim": datasets.Value("string"), # The text of the claim.
49
+ "evidence_doc_id": datasets.Value("string"),
50
+ "evidence_label": datasets.Value("string"), # Label for the rationale.
51
+ "evidence_sentences": datasets.features.Sequence(
52
+ datasets.Value("int32")
53
+ ), # Rationale sentences.
54
+ "cited_doc_ids": datasets.features.Sequence(
55
+ datasets.Value("int32")
56
+ ), # The claim's "cited documents".
57
+ }
58
+
59
+ return datasets.DatasetInfo(
60
+ # This is the description that will appear on the datasets page.
61
+ description=_DESCRIPTION,
62
+ # datasets.features.FeatureConnectors
63
+ features=datasets.Features(
64
+ features
65
+ # These are the features of your dataset like images, labels ...
66
+ ),
67
+ # If there's a common (input, target) tuple from the features,
68
+ # specify them here. They'll be used if as_supervised=True in
69
+ # builder.as_dataset.
70
+ supervised_keys=None,
71
+ # Homepage of the dataset for documentation
72
+ homepage="https://scifact.apps.allenai.org/",
73
+ citation=_CITATION,
74
+ )
75
+
76
+ def _split_generators(self, dl_manager):
77
+ """Returns SplitGenerators."""
78
+ # TODO(scifact): Downloads the data and defines the splits
79
+ # dl_manager is a datasets.download.DownloadManager that can be used to
80
+ # download and extract URLs
81
+ return [
82
+ datasets.SplitGenerator(
83
+ name=datasets.Split.TRAIN,
84
+ # These kwargs will be passed to _generate_examples
85
+ gen_kwargs={
86
+ "split": "train",
87
+ },
88
+ ),
89
+ datasets.SplitGenerator(
90
+ name=datasets.Split.TEST,
91
+ # These kwargs will be passed to _generate_examples
92
+ gen_kwargs={
93
+ "split": "test",
94
+ },
95
+ ),
96
+ datasets.SplitGenerator(
97
+ name=datasets.Split.VALIDATION,
98
+ # These kwargs will be passed to _generate_examples
99
+ gen_kwargs={
100
+ "split": "dev",
101
+ },
102
+ ),
103
+ ]
104
+
105
+ def _generate_examples(self, split):
106
+ """Yields examples."""
107
+ # TODO(scifact): Yields (key, example) tuples from the dataset
108
+
109
+ # Load corpus and convert to dict.
110
+ corpus = datasets.load_dataset("bigbio/scifact", "scifact_corpus_source", split="train")
111
+ corpus = {x["doc_id"]: x for x in corpus}
112
+
113
+ # Load claims.
114
+ claims = datasets.load_dataset("bigbio/scifact", "scifact_claims_source", split=split)
115
+
116
+ for id_, claim in enumerate(claims):
117
+ evidence = {x["doc_id"]: x for x in claim["evidences"]}
118
+ for cited_doc_id in claim["cited_doc_ids"]:
119
+ cited_doc = corpus[cited_doc_id]
120
+ # Format the abstract.
121
+ sent_ids = [f"[{i}]" for i in range(len(cited_doc["abstract"]))]
122
+ # Get rid of newlines.
123
+ sents = [sent.strip() for sent in cited_doc["abstract"]]
124
+ zipped = zip(sent_ids, sents)
125
+ cited_abstract = " ".join(
126
+ [f"{entry[0]} {entry[1]}" for entry in zipped]
127
+ )
128
+
129
+ if cited_doc_id in evidence:
130
+ verdict = evidence[cited_doc_id]["label"]
131
+ sents = evidence[cited_doc_id]["sentence_ids"]
132
+ else:
133
+ verdict = "NEI"
134
+ sents = []
135
+
136
+ instance = {
137
+ "id": claim["id"],
138
+ "claim": claim["claim"],
139
+ "abstract_id": cited_doc_id,
140
+ "title": cited_doc["title"],
141
+ "abstract": cited_abstract,
142
+ "verdict": verdict,
143
+ "evidence": sents,
144
+ }
145
+
146
+ yield id_, instance