Datasets:
Add RE configuration to science_ie.py
Browse files- science_ie.py +123 -41
science_ie.py
CHANGED
@@ -16,6 +16,8 @@
|
|
16 |
|
17 |
import glob
|
18 |
import datasets
|
|
|
|
|
19 |
from spacy.lang.en import English
|
20 |
|
21 |
# Find for instance the citation on arxiv or on the dataset repo/website
|
@@ -70,31 +72,53 @@ class ScienceIE(datasets.GeneratorBasedBuilder):
|
|
70 |
VERSION = datasets.Version("1.0.0")
|
71 |
|
72 |
BUILDER_CONFIGS = [
|
73 |
-
datasets.BuilderConfig(name="ner", version=VERSION, description="NER part of ScienceIE")
|
|
|
74 |
]
|
75 |
|
76 |
DEFAULT_CONFIG_NAME = "ner"
|
77 |
|
78 |
def _info(self):
|
79 |
-
|
80 |
-
|
81 |
-
|
82 |
-
|
83 |
-
|
84 |
-
datasets.
|
|
|
|
|
|
|
|
|
|
|
|
|
85 |
names=[
|
86 |
"O",
|
87 |
-
"
|
88 |
-
"
|
89 |
-
"B-Task",
|
90 |
-
"I-Task",
|
91 |
-
"B-Material",
|
92 |
-
"I-Material"
|
93 |
]
|
94 |
)
|
95 |
-
|
96 |
-
|
97 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
98 |
return datasets.DatasetInfo(
|
99 |
# This is the description that will appear on the datasets page.
|
100 |
description=_DESCRIPTION,
|
@@ -134,41 +158,99 @@ class ScienceIE(datasets.GeneratorBasedBuilder):
|
|
134 |
open(f_text_path, mode="r", encoding="utf8") as f_text:
|
135 |
text = f_text.read()
|
136 |
entities = []
|
|
|
|
|
137 |
for line in f_anno:
|
138 |
-
|
139 |
-
|
140 |
-
|
141 |
-
|
142 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
143 |
else:
|
144 |
-
|
145 |
-
if not keytype.endswith("-of"): # NER annotation, relation annotation contains "-of"
|
146 |
# look up span in text and print error message if it doesn't match the .ann span text
|
147 |
keyphr_text_lookup = text[int(start):int(end)]
|
148 |
-
keyphr_ann =
|
149 |
if keyphr_text_lookup != keyphr_ann:
|
150 |
print("Spans don't match for anno " + line.strip() + " in file " + f_anno_path)
|
151 |
-
|
152 |
entities.append({
|
153 |
-
"id":
|
154 |
-
"
|
155 |
-
"
|
156 |
-
"type":
|
157 |
})
|
158 |
doc = word_splitter(text)
|
159 |
tokens = [token.text for token in doc]
|
160 |
ner_tags = ["O" for _ in tokens]
|
161 |
for entity in entities:
|
162 |
-
entity_span = doc.char_span(entity["
|
163 |
-
|
164 |
-
|
165 |
-
ner_tags[
|
166 |
-
for i in range(
|
167 |
ner_tags[i] = "I-" + entity["type"]
|
168 |
-
|
169 |
-
|
170 |
-
|
171 |
-
|
172 |
-
|
173 |
-
|
174 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
16 |
|
17 |
import glob
|
18 |
import datasets
|
19 |
+
|
20 |
+
from itertools import permutations
|
21 |
from spacy.lang.en import English
|
22 |
|
23 |
# Find for instance the citation on arxiv or on the dataset repo/website
|
|
|
72 |
VERSION = datasets.Version("1.0.0")
|
73 |
|
74 |
BUILDER_CONFIGS = [
|
75 |
+
datasets.BuilderConfig(name="ner", version=VERSION, description="NER part of ScienceIE"),
|
76 |
+
datasets.BuilderConfig(name="re", version=VERSION, description="Relation extraction part of ScienceIE"),
|
77 |
]
|
78 |
|
79 |
DEFAULT_CONFIG_NAME = "ner"
|
80 |
|
81 |
def _info(self):
|
82 |
+
if self.config.name == "re":
|
83 |
+
features = datasets.Features(
|
84 |
+
{
|
85 |
+
"id": datasets.Value("string"),
|
86 |
+
"tokens": datasets.Value("string"),
|
87 |
+
"arg1_start": datasets.Value("int32"),
|
88 |
+
"arg1_end": datasets.Value("int32"),
|
89 |
+
"arg1_type": datasets.Value("string"),
|
90 |
+
"arg2_start": datasets.Value("int32"),
|
91 |
+
"arg2_end": datasets.Value("int32"),
|
92 |
+
"arg2_type": datasets.Value("string"),
|
93 |
+
"relation": datasets.features.ClassLabel(
|
94 |
names=[
|
95 |
"O",
|
96 |
+
"Synonym-of",
|
97 |
+
"Hyponym-of"
|
|
|
|
|
|
|
|
|
98 |
]
|
99 |
)
|
100 |
+
}
|
101 |
+
)
|
102 |
+
else:
|
103 |
+
features = datasets.Features(
|
104 |
+
{
|
105 |
+
"id": datasets.Value("string"),
|
106 |
+
"tokens": datasets.Sequence(datasets.Value("string")),
|
107 |
+
"ner_tags": datasets.Sequence(
|
108 |
+
datasets.features.ClassLabel(
|
109 |
+
names=[
|
110 |
+
"O",
|
111 |
+
"B-Process",
|
112 |
+
"I-Process",
|
113 |
+
"B-Task",
|
114 |
+
"I-Task",
|
115 |
+
"B-Material",
|
116 |
+
"I-Material"
|
117 |
+
]
|
118 |
+
)
|
119 |
+
)
|
120 |
+
}
|
121 |
+
)
|
122 |
return datasets.DatasetInfo(
|
123 |
# This is the description that will appear on the datasets page.
|
124 |
description=_DESCRIPTION,
|
|
|
158 |
open(f_text_path, mode="r", encoding="utf8") as f_text:
|
159 |
text = f_text.read()
|
160 |
entities = []
|
161 |
+
synonym_groups = []
|
162 |
+
hyponyms = []
|
163 |
for line in f_anno:
|
164 |
+
split_line = line.strip("\n").split("\t")
|
165 |
+
identifier = split_line[0]
|
166 |
+
annotation = split_line[1].split(" ")
|
167 |
+
key_type = annotation[0]
|
168 |
+
if key_type == "Synonym-of":
|
169 |
+
synonym_ids = annotation[1:]
|
170 |
+
synonym_groups.append(synonym_ids)
|
171 |
+
else:
|
172 |
+
if len(annotation) == 3:
|
173 |
+
_, start, end = annotation
|
174 |
+
else:
|
175 |
+
_, start, _, end = annotation
|
176 |
+
if key_type == "Hyponym-of":
|
177 |
+
assert start.startswith("Arg1:") and end.startswith("Arg2:")
|
178 |
+
hyponyms.append({
|
179 |
+
"id": identifier,
|
180 |
+
"arg1_id": start[5:],
|
181 |
+
"arg2_id": end[5:]
|
182 |
+
})
|
183 |
else:
|
184 |
+
# NER annotation
|
|
|
185 |
# look up span in text and print error message if it doesn't match the .ann span text
|
186 |
keyphr_text_lookup = text[int(start):int(end)]
|
187 |
+
keyphr_ann = split_line[2]
|
188 |
if keyphr_text_lookup != keyphr_ann:
|
189 |
print("Spans don't match for anno " + line.strip() + " in file " + f_anno_path)
|
|
|
190 |
entities.append({
|
191 |
+
"id": identifier,
|
192 |
+
"char_start": int(start),
|
193 |
+
"char_end": int(end),
|
194 |
+
"type": key_type
|
195 |
})
|
196 |
doc = word_splitter(text)
|
197 |
tokens = [token.text for token in doc]
|
198 |
ner_tags = ["O" for _ in tokens]
|
199 |
for entity in entities:
|
200 |
+
entity_span = doc.char_span(entity["char_start"], entity["char_end"], alignment_mode="expand")
|
201 |
+
entity["start"] = entity_span.start
|
202 |
+
entity["end"] = entity_span.end
|
203 |
+
ner_tags[entity["start"]] = "B-" + entity["type"]
|
204 |
+
for i in range(entity["start"] + 1, entity["end"]):
|
205 |
ner_tags[i] = "I-" + entity["type"]
|
206 |
+
|
207 |
+
if self.config.name == "re":
|
208 |
+
entity_pairs = list(permutations([entity["id"] for entity in entities], 2))
|
209 |
+
relations = []
|
210 |
+
|
211 |
+
def add_relation(_arg1_id, _arg2_id, _relation):
|
212 |
+
arg1 = None
|
213 |
+
arg2 = None
|
214 |
+
for e in entities:
|
215 |
+
if e["id"] == _arg1_id:
|
216 |
+
arg1 = e
|
217 |
+
elif e["id"] == _arg2_id:
|
218 |
+
arg2 = e
|
219 |
+
assert arg1 is not None and arg2 is not None
|
220 |
+
relations.append({
|
221 |
+
"arg1_start": arg1["start"],
|
222 |
+
"arg1_end": arg1["end"],
|
223 |
+
"arg1_type": arg1["type"],
|
224 |
+
"arg2_start": arg2["start"],
|
225 |
+
"arg2_end": arg2["end"],
|
226 |
+
"arg2_type": arg2["type"],
|
227 |
+
"relation": _relation
|
228 |
+
})
|
229 |
+
# noinspection PyTypeChecker
|
230 |
+
entity_pairs.remove((_arg1_id, _arg2_id))
|
231 |
+
|
232 |
+
for synonym_group in synonym_groups:
|
233 |
+
for arg1_id, arg2_id in permutations(synonym_group, 2):
|
234 |
+
add_relation(arg1_id, arg2_id, _relation="Synonym-of")
|
235 |
+
for hyponym in hyponyms:
|
236 |
+
add_relation(hyponym["arg1_id"], hyponym["arg2_id"], _relation="Hyponym-of")
|
237 |
+
for arg1_id, arg2_id in entity_pairs:
|
238 |
+
add_relation(arg1_id, arg2_id, _relation="O")
|
239 |
+
for relation in relations:
|
240 |
+
key += 1
|
241 |
+
# Yields examples as (key, example) tuples
|
242 |
+
example = {
|
243 |
+
"id": str(key),
|
244 |
+
"tokens": tokens
|
245 |
+
}
|
246 |
+
for k, v in relation.items():
|
247 |
+
example[k] = v
|
248 |
+
yield key, example
|
249 |
+
else:
|
250 |
+
key += 1
|
251 |
+
# Yields examples as (key, example) tuples
|
252 |
+
yield key, {
|
253 |
+
"id": str(key),
|
254 |
+
"tokens": tokens,
|
255 |
+
"ner_tags": ner_tags
|
256 |
+
}
|