Datasets:

Modalities:
Text
Languages:
English
Libraries:
Datasets
License:
gabrielaltay commited on
Commit
81bbf87
1 Parent(s): 2226765

upload hubscripts/bionlp_st_2011_rel_hub.py to hub from bigbio repo

Browse files
Files changed (1) hide show
  1. bionlp_st_2011_rel.py +250 -0
bionlp_st_2011_rel.py ADDED
@@ -0,0 +1,250 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2020 The HuggingFace Datasets Authors and the current dataset script contributor.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+
16
+ from pathlib import Path
17
+ from typing import List
18
+
19
+ import datasets
20
+
21
+ from .bigbiohub import kb_features
22
+ from .bigbiohub import BigBioConfig
23
+ from .bigbiohub import Tasks
24
+
25
+ _DATASETNAME = "bionlp_st_2011_rel"
26
+ _DISPLAYNAME = "BioNLP 2011 REL"
27
+
28
+ _SOURCE_VIEW_NAME = "source"
29
+ _UNIFIED_VIEW_NAME = "bigbio"
30
+
31
+ _LANGUAGES = ['English']
32
+ _PUBMED = True
33
+ _LOCAL = False
34
+ _CITATION = """\
35
+ @inproceedings{10.5555/2107691.2107703,
36
+ author = {Pyysalo, Sampo and Ohta, Tomoko and Tsujii, Jun'ichi},
37
+ title = {Overview of the Entity Relations (REL) Supporting Task of BioNLP Shared Task 2011},
38
+ year = {2011},
39
+ isbn = {9781937284091},
40
+ publisher = {Association for Computational Linguistics},
41
+ address = {USA},
42
+ abstract = {This paper presents the Entity Relations (REL) task,
43
+ a supporting task of the BioNLP Shared Task 2011. The task concerns
44
+ the extraction of two types of part-of relations between a gene/protein
45
+ and an associated entity. Four teams submitted final results for
46
+ the REL task, with the highest-performing system achieving 57.7%
47
+ F-score. While experiments suggest use of the data can help improve
48
+ event extraction performance, the task data has so far received only
49
+ limited use in support of event extraction. The REL task continues
50
+ as an open challenge, with all resources available from the shared
51
+ task website.},
52
+ booktitle = {Proceedings of the BioNLP Shared Task 2011 Workshop},
53
+ pages = {83–88},
54
+ numpages = {6},
55
+ location = {Portland, Oregon},
56
+ series = {BioNLP Shared Task '11}
57
+ }
58
+ """
59
+
60
+ _DESCRIPTION = """\
61
+ The Entity Relations (REL) task is a supporting task of the BioNLP Shared Task 2011.
62
+ The task concerns the extraction of two types of part-of relations between a
63
+ gene/protein and an associated entity.
64
+ """
65
+
66
+ _HOMEPAGE = "https://github.com/openbiocorpora/bionlp-st-2011-rel"
67
+
68
+ _LICENSE = 'GENIA Project License for Annotated Corpora'
69
+
70
+ _URLs = {
71
+ "source": "https://github.com/openbiocorpora/bionlp-st-2011-rel/archive/refs/heads/master.zip",
72
+ "bigbio_kb": "https://github.com/openbiocorpora/bionlp-st-2011-rel/archive/refs/heads/master.zip",
73
+ }
74
+
75
+ _SUPPORTED_TASKS = [
76
+ Tasks.NAMED_ENTITY_RECOGNITION,
77
+ Tasks.RELATION_EXTRACTION,
78
+ Tasks.COREFERENCE_RESOLUTION,
79
+ ]
80
+ _SOURCE_VERSION = "1.0.0"
81
+ _BIGBIO_VERSION = "1.0.0"
82
+
83
+
84
+ class bionlp_st_2011_rel(datasets.GeneratorBasedBuilder):
85
+ """The Entity Relations (REL) task is a supporting task of the BioNLP Shared Task 2011."""
86
+
87
+ SOURCE_VERSION = datasets.Version(_SOURCE_VERSION)
88
+ BIGBIO_VERSION = datasets.Version(_BIGBIO_VERSION)
89
+
90
+ BUILDER_CONFIGS = [
91
+ BigBioConfig(
92
+ name="bionlp_st_2011_rel_source",
93
+ version=SOURCE_VERSION,
94
+ description="bionlp_st_2011_rel source schema",
95
+ schema="source",
96
+ subset_id="bionlp_st_2011_rel",
97
+ ),
98
+ BigBioConfig(
99
+ name="bionlp_st_2011_rel_bigbio_kb",
100
+ version=BIGBIO_VERSION,
101
+ description="bionlp_st_2011_rel BigBio schema",
102
+ schema="bigbio_kb",
103
+ subset_id="bionlp_st_2011_rel",
104
+ ),
105
+ ]
106
+
107
+ DEFAULT_CONFIG_NAME = "bionlp_st_2011_rel_source"
108
+
109
+ _FILE_SUFFIX = [".a1", ".rel", ".ann"]
110
+
111
+ def _info(self):
112
+ """
113
+ - `features` defines the schema of the parsed data set. The schema depends on the
114
+ chosen `config`: If it is `_SOURCE_VIEW_NAME` the schema is the schema of the
115
+ original data. If `config` is `_UNIFIED_VIEW_NAME`, then the schema is the
116
+ canonical KB-task schema defined in `biomedical/schemas/kb.py`.
117
+ """
118
+ if self.config.schema == "source":
119
+ features = datasets.Features(
120
+ {
121
+ "id": datasets.Value("string"),
122
+ "document_id": datasets.Value("string"),
123
+ "text": datasets.Value("string"),
124
+ "text_bound_annotations": [ # T line in brat, e.g. type or event trigger
125
+ {
126
+ "offsets": datasets.Sequence([datasets.Value("int32")]),
127
+ "text": datasets.Sequence(datasets.Value("string")),
128
+ "type": datasets.Value("string"),
129
+ "id": datasets.Value("string"),
130
+ }
131
+ ],
132
+ "events": [ # E line in brat
133
+ {
134
+ "trigger": datasets.Value(
135
+ "string"
136
+ ), # refers to the text_bound_annotation of the trigger,
137
+ "id": datasets.Value("string"),
138
+ "type": datasets.Value("string"),
139
+ "arguments": datasets.Sequence(
140
+ {
141
+ "role": datasets.Value("string"),
142
+ "ref_id": datasets.Value("string"),
143
+ }
144
+ ),
145
+ }
146
+ ],
147
+ "relations": [ # R line in brat
148
+ {
149
+ "id": datasets.Value("string"),
150
+ "head": {
151
+ "ref_id": datasets.Value("string"),
152
+ "role": datasets.Value("string"),
153
+ },
154
+ "tail": {
155
+ "ref_id": datasets.Value("string"),
156
+ "role": datasets.Value("string"),
157
+ },
158
+ "type": datasets.Value("string"),
159
+ }
160
+ ],
161
+ "equivalences": [ # Equiv line in brat
162
+ {
163
+ "id": datasets.Value("string"),
164
+ "ref_ids": datasets.Sequence(datasets.Value("string")),
165
+ }
166
+ ],
167
+ "attributes": [ # M or A lines in brat
168
+ {
169
+ "id": datasets.Value("string"),
170
+ "type": datasets.Value("string"),
171
+ "ref_id": datasets.Value("string"),
172
+ "value": datasets.Value("string"),
173
+ }
174
+ ],
175
+ "normalizations": [ # N lines in brat
176
+ {
177
+ "id": datasets.Value("string"),
178
+ "type": datasets.Value("string"),
179
+ "ref_id": datasets.Value("string"),
180
+ "resource_name": datasets.Value(
181
+ "string"
182
+ ), # Name of the resource, e.g. "Wikipedia"
183
+ "cuid": datasets.Value(
184
+ "string"
185
+ ), # ID in the resource, e.g. 534366
186
+ "text": datasets.Value(
187
+ "string"
188
+ ), # Human readable description/name of the entity, e.g. "Barack Obama"
189
+ }
190
+ ],
191
+ },
192
+ )
193
+ elif self.config.schema == "bigbio_kb":
194
+ features = kb_features
195
+
196
+ return datasets.DatasetInfo(
197
+ description=_DESCRIPTION,
198
+ features=features,
199
+ homepage=_HOMEPAGE,
200
+ license=str(_LICENSE),
201
+ citation=_CITATION,
202
+ )
203
+
204
+ def _split_generators(
205
+ self, dl_manager: datasets.DownloadManager
206
+ ) -> List[datasets.SplitGenerator]:
207
+
208
+ my_urls = _URLs[self.config.schema]
209
+ data_dir = Path(dl_manager.download_and_extract(my_urls))
210
+ data_files = {
211
+ "train": data_dir
212
+ / f"bionlp-st-2011-rel-master"
213
+ / "original-data"
214
+ / "train",
215
+ "dev": data_dir / f"bionlp-st-2011-rel-master" / "original-data" / "devel",
216
+ "test": data_dir / f"bionlp-st-2011-rel-master" / "original-data" / "test",
217
+ }
218
+
219
+ return [
220
+ datasets.SplitGenerator(
221
+ name=datasets.Split.TRAIN,
222
+ gen_kwargs={"data_files": data_files["train"]},
223
+ ),
224
+ datasets.SplitGenerator(
225
+ name=datasets.Split.VALIDATION,
226
+ gen_kwargs={"data_files": data_files["dev"]},
227
+ ),
228
+ datasets.SplitGenerator(
229
+ name=datasets.Split.TEST,
230
+ gen_kwargs={"data_files": data_files["test"]},
231
+ ),
232
+ ]
233
+
234
+ def _generate_examples(self, data_files: Path):
235
+ if self.config.schema == "source":
236
+ txt_files = list(data_files.glob("*txt"))
237
+ for guid, txt_file in enumerate(txt_files):
238
+ example = parsing.parse_brat_file(txt_file, self._FILE_SUFFIX)
239
+ example["id"] = str(guid)
240
+ yield guid, example
241
+ elif self.config.schema == "bigbio_kb":
242
+ txt_files = list(data_files.glob("*txt"))
243
+ for guid, txt_file in enumerate(txt_files):
244
+ example = parsing.brat_parse_to_bigbio_kb(
245
+ parsing.parse_brat_file(txt_file, self._FILE_SUFFIX)
246
+ )
247
+ example["id"] = str(guid)
248
+ yield guid, example
249
+ else:
250
+ raise ValueError(f"Invalid config: {self.config.name}")