Datasets:

Modalities:
Text
Languages:
English
Size:
< 1K
Libraries:
Datasets
License:
gabrielaltay commited on
Commit
ac9365b
1 Parent(s): 1f639a9

upload hubscripts/scai_disease_hub.py to hub from bigbio repo

Browse files
Files changed (1) hide show
  1. scai_disease.py +261 -0
scai_disease.py ADDED
@@ -0,0 +1,261 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2022 The HuggingFace Datasets Authors and the current dataset script contributor.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+
16
+ """
17
+ A dataset loader for the SCAI Disease dataset.
18
+
19
+ SCAI Disease is a dataset annotated in 2010 with mentions of diseases and
20
+ adverse effects. It is a corpus containing 400 randomly selected MEDLINE
21
+ abstracts generated using ‘Disease OR Adverse effect’ as a PubMed query. This
22
+ evaluation corpus was annotated by two individuals who hold a Master’s degree
23
+ in life sciences.
24
+ """
25
+
26
+ import os
27
+ from typing import Dict, List, Tuple
28
+
29
+ import datasets
30
+
31
+ from .bigbiohub import kb_features
32
+ from .bigbiohub import BigBioConfig
33
+ from .bigbiohub import Tasks
34
+
35
+ _LANGUAGES = ['English']
36
+ _PUBMED = True
37
+ _LOCAL = False
38
+ _CITATION = """\
39
+ @inproceedings{gurulingappa:lrec-ws10,
40
+ author = {Harsha Gurulingappa and Roman Klinger and Martin Hofmann-Apitius and Juliane Fluck},
41
+ title = {An Empirical Evaluation of Resources for the Identification of Diseases and Adverse Effects in Biomedical Literature},
42
+ booktitle = {LREC Workshop on Building and Evaluating Resources for Biomedical Text Mining},
43
+ year = {2010},
44
+ }
45
+ """
46
+
47
+ _DATASETNAME = "scai_disease"
48
+ _DISPLAYNAME = "SCAI Disease"
49
+
50
+ _DESCRIPTION = """\
51
+ SCAI Disease is a dataset annotated in 2010 with mentions of diseases and
52
+ adverse effects. It is a corpus containing 400 randomly selected MEDLINE
53
+ abstracts generated using ‘Disease OR Adverse effect’ as a PubMed query. This
54
+ evaluation corpus was annotated by two individuals who hold a Master’s degree
55
+ in life sciences.
56
+ """
57
+
58
+ _HOMEPAGE = "https://www.scai.fraunhofer.de/en/business-research-areas/bioinformatics/downloads/corpus-for-disease-names-and-adverse-effects.html"
59
+
60
+ _LICENSE = 'License information unavailable'
61
+
62
+ _URLS = {
63
+ _DATASETNAME: "https://www.scai.fraunhofer.de/content/dam/scai/de/downloads/bioinformatik/Disease-ae-corpus.iob",
64
+ }
65
+
66
+ _SUPPORTED_TASKS = [Tasks.NAMED_ENTITY_RECOGNITION]
67
+
68
+ _SOURCE_VERSION = "1.0.0"
69
+
70
+ _BIGBIO_VERSION = "1.0.0"
71
+
72
+
73
+ class ScaiDiseaseDataset(datasets.GeneratorBasedBuilder):
74
+ """SCAI Disease is a dataset annotated in 2010 with mentions of diseases and
75
+ adverse effects."""
76
+
77
+ SOURCE_VERSION = datasets.Version(_SOURCE_VERSION)
78
+ BIGBIO_VERSION = datasets.Version(_BIGBIO_VERSION)
79
+
80
+ BUILDER_CONFIGS = [
81
+ BigBioConfig(
82
+ name="scai_disease_source",
83
+ version=SOURCE_VERSION,
84
+ description="SCAI Disease source schema",
85
+ schema="source",
86
+ subset_id="scai_disease",
87
+ ),
88
+ BigBioConfig(
89
+ name="scai_disease_bigbio_kb",
90
+ version=BIGBIO_VERSION,
91
+ description="SCAI Disease BigBio schema",
92
+ schema="bigbio_kb",
93
+ subset_id="scai_disease",
94
+ ),
95
+ ]
96
+
97
+ DEFAULT_CONFIG_NAME = "scai_disease_source"
98
+
99
+ def _info(self) -> datasets.DatasetInfo:
100
+ if self.config.schema == "source":
101
+ features = datasets.Features(
102
+ {
103
+ "document_id": datasets.Value("string"),
104
+ "text": datasets.Value("string"),
105
+ "tokens": [
106
+ {
107
+ "offsets": [datasets.Value("int64")],
108
+ "text": datasets.Value("string"),
109
+ "tag": datasets.Value("string"),
110
+ }
111
+ ],
112
+ "entities": [
113
+ {
114
+ "offsets": [datasets.Value("int64")],
115
+ "text": datasets.Value("string"),
116
+ "type": datasets.Value("string"),
117
+ }
118
+ ],
119
+ }
120
+ )
121
+
122
+ elif self.config.schema == "bigbio_kb":
123
+ features = kb_features
124
+ else:
125
+ raise ValueError("Unrecognized schema: %s" % self.config.schema)
126
+
127
+ return datasets.DatasetInfo(
128
+ description=_DESCRIPTION,
129
+ features=features,
130
+ homepage=_HOMEPAGE,
131
+ license=str(_LICENSE),
132
+ citation=_CITATION,
133
+ )
134
+
135
+ def _split_generators(self, dl_manager) -> List[datasets.SplitGenerator]:
136
+ """Returns SplitGenerators."""
137
+ url = _URLS[_DATASETNAME]
138
+ filepath = dl_manager.download(url)
139
+
140
+ return [
141
+ datasets.SplitGenerator(
142
+ name=datasets.Split.TRAIN,
143
+ gen_kwargs={
144
+ "filepath": filepath,
145
+ "split": "train",
146
+ },
147
+ ),
148
+ ]
149
+
150
+ def _generate_examples(self, filepath, split: str) -> Tuple[int, Dict]:
151
+ """Yields examples as (key, example) tuples."""
152
+
153
+ # Iterates through lines in file, collecting all lines belonging
154
+ # to an example and converting into a single dict
155
+ examples = []
156
+ tokens = None
157
+ with open(filepath, "r") as data_file:
158
+ for line in data_file:
159
+ line = line.strip()
160
+ if line.startswith("###"):
161
+ tokens = [line]
162
+ elif line == "":
163
+ examples.append(self._make_example(tokens))
164
+ else:
165
+ tokens.append(line)
166
+
167
+ # Returns the examples using the desired schema
168
+ if self.config.schema == "source":
169
+ for i, example in enumerate(examples):
170
+ yield i, example
171
+
172
+ elif self.config.schema == "bigbio_kb":
173
+ for i, example in enumerate(examples):
174
+ bigbio_example = {
175
+ "id": "example-" + str(i),
176
+ "document_id": example["document_id"],
177
+ "passages": [
178
+ {
179
+ "id": "passage-" + str(i),
180
+ "type": "abstract",
181
+ "text": [example["text"]],
182
+ "offsets": [[0, len(example["text"])]],
183
+ }
184
+ ],
185
+ "entities": [],
186
+ "events": [],
187
+ "coreferences": [],
188
+ "relations": [],
189
+ }
190
+
191
+ # Converts entities to BigBio format
192
+ for j, entity in enumerate(example["entities"]):
193
+ bigbio_example["entities"].append(
194
+ {
195
+ "id": "entity-" + str(i) + "-" + str(j),
196
+ "offsets": [entity["offsets"]],
197
+ "text": [entity["text"]],
198
+ "type": entity["type"],
199
+ "normalized": [],
200
+ }
201
+ )
202
+
203
+ yield i, bigbio_example
204
+
205
+ @staticmethod
206
+ def _make_example(tokens):
207
+ """
208
+ Converts a list of lines representing tokens into an example dictionary
209
+ formatted according to the source schema
210
+
211
+ :param tokens: list of strings
212
+ :return: dictionary in the source schema
213
+ """
214
+ document_id = tokens[0][4:]
215
+
216
+ text = ""
217
+ processed_tokens = []
218
+ entities = []
219
+ last_offset = 0
220
+
221
+ for token in tokens[1:]:
222
+ token_pieces = token.split("\t")
223
+ if len(token_pieces) != 5:
224
+ raise ValueError("Failed to parse line: %s" % token)
225
+
226
+ token_text = str(token_pieces[0])
227
+ token_start = int(token_pieces[1])
228
+ token_end = int(token_pieces[2])
229
+ entity_text = str(token_pieces[3])
230
+ token_tag = str(token_pieces[4])[1:]
231
+
232
+ if token_start > last_offset:
233
+ for _ in range(token_start - last_offset):
234
+ text += " "
235
+ elif token_start < last_offset:
236
+ raise ValueError("Invalid start index: %s" % token)
237
+ last_offset = token_end
238
+
239
+ text += token_text
240
+ processed_tokens.append(
241
+ {
242
+ "offsets": [token_start, token_end],
243
+ "text": token_text,
244
+ "tag": token_tag,
245
+ }
246
+ )
247
+ if entity_text != "":
248
+ entities.append(
249
+ {
250
+ "offsets": [token_start, token_start + len(entity_text)],
251
+ "text": entity_text,
252
+ "type": token_tag[2:],
253
+ }
254
+ )
255
+
256
+ return {
257
+ "document_id": document_id,
258
+ "text": text,
259
+ "entities": entities,
260
+ "tokens": processed_tokens,
261
+ }