Datasets:

Languages:
English
License:
gabrielaltay commited on
Commit
0be64ef
·
1 Parent(s): c6f1773

upload hub_repos/gnormplus/gnormplus.py to hub from bigbio repo

Browse files
Files changed (1) hide show
  1. gnormplus.py +90 -69
gnormplus.py CHANGED
@@ -27,7 +27,7 @@ from .bigbiohub import Tasks
27
  from .bigbiohub import get_texts_and_offsets_from_bioc_ann
28
 
29
 
30
- _LANGUAGES = ['English']
31
  _PUBMED = True
32
  _LOCAL = False
33
  _CITATION = """\
@@ -62,7 +62,7 @@ PubTator was used as our annotation tool along with BioC formats.
62
 
63
  _HOMEPAGE = "https://www.ncbi.nlm.nih.gov/research/bionlp/Tools/gnormplus/"
64
 
65
- _LICENSE = 'License information unavailable'
66
 
67
  _URLS = {
68
  _DATASETNAME: "https://www.ncbi.nlm.nih.gov/CBBresearch/Lu/Demo/tmTools/download/GNormPlus/GNormPlusCorpus.zip"
@@ -157,17 +157,22 @@ class GnormplusDataset(datasets.GeneratorBasedBuilder):
157
  name=datasets.Split.TRAIN,
158
  # Whatever you put in gen_kwargs will be passed to _generate_examples
159
  gen_kwargs={
160
- "filepath": os.path.join(
161
- data_dir, "GNormPlusCorpus/BC2GNtrain.BioC.xml"
162
- ),
 
 
 
 
 
163
  },
164
  ),
165
  datasets.SplitGenerator(
166
  name=datasets.Split.TEST,
167
  gen_kwargs={
168
- "filepath": os.path.join(
169
- data_dir, "GNormPlusCorpus/BC2GNtest.BioC.xml"
170
- ),
171
  },
172
  ),
173
  ]
@@ -197,66 +202,82 @@ class GnormplusDataset(datasets.GeneratorBasedBuilder):
197
  "normalized": normalized,
198
  }
199
 
200
- def _generate_examples(self, filepath) -> Tuple[int, Dict]:
201
  uid = map(str, itertools.count(start=0, step=1))
202
 
203
- with open(filepath, "r") as fp:
204
- collection = biocxml.load(fp)
205
-
206
- for idx, document in enumerate(collection.documents):
207
- if self.config.schema == "source":
208
- features = {
209
- "doc_id": document.id,
210
- "passages": [
211
- {
212
- "text": passage.text,
213
- "type": passage.infons["type"],
214
- "location": {
215
- "offset": passage.offset,
216
- "length": passage.total_span.length,
217
- },
218
- }
219
- for passage in document.passages
220
- ],
221
- "entities": [
222
- self._parse_bioc_entity(
223
- next(uid), entity, insert_tax_id=True
224
- )
225
- for passage in document.passages
226
- for entity in passage.annotations
227
- ],
228
- }
229
- yield idx, features
230
- elif self.config.schema == "bigbio_kb":
231
- # passage offsets/lengths do not connect, recalculate them for this schema.
232
- passage_spans = []
233
- start = 0
234
- for passage in document.passages:
235
- end = start + len(passage.text)
236
- passage_spans.append((start, end))
237
- start = end + 1
238
-
239
- features = {
240
- "id": next(uid),
241
- "document_id": document.id,
242
- "passages": [
243
- {
244
- "id": next(uid),
245
- "type": passage.infons["type"],
246
- "text": [passage.text],
247
- "offsets": [span],
248
- }
249
- for passage, span in zip(document.passages, passage_spans)
250
- ],
251
- "entities": [
252
- self._parse_bioc_entity(next(uid), entity)
253
- for passage in document.passages
254
- for entity in passage.annotations
255
- ],
256
- "events": [],
257
- "coreferences": [],
258
- "relations": [],
259
- }
260
- yield idx, features
261
- else:
262
- raise NotImplementedError(self.config.schema)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
27
  from .bigbiohub import get_texts_and_offsets_from_bioc_ann
28
 
29
 
30
+ _LANGUAGES = ["English"]
31
  _PUBMED = True
32
  _LOCAL = False
33
  _CITATION = """\
 
62
 
63
  _HOMEPAGE = "https://www.ncbi.nlm.nih.gov/research/bionlp/Tools/gnormplus/"
64
 
65
+ _LICENSE = "UNKNOWN"
66
 
67
  _URLS = {
68
  _DATASETNAME: "https://www.ncbi.nlm.nih.gov/CBBresearch/Lu/Demo/tmTools/download/GNormPlus/GNormPlusCorpus.zip"
 
157
  name=datasets.Split.TRAIN,
158
  # Whatever you put in gen_kwargs will be passed to _generate_examples
159
  gen_kwargs={
160
+ "filepaths": [
161
+ os.path.join(data_dir, "GNormPlusCorpus/BC2GNtrain.BioC.xml"),
162
+
163
+ # This sub-part of the corpus is part of the GIA Test Collection, however in
164
+ # the paper they used it only for training their models. So we also add it to the
165
+ # training split.
166
+ os.path.join(data_dir, "GNormPlusCorpus/NLMIAT.BioC.xml"),
167
+ ],
168
  },
169
  ),
170
  datasets.SplitGenerator(
171
  name=datasets.Split.TEST,
172
  gen_kwargs={
173
+ "filepaths": [
174
+ os.path.join(data_dir, "GNormPlusCorpus/BC2GNtest.BioC.xml"),
175
+ ]
176
  },
177
  ),
178
  ]
 
202
  "normalized": normalized,
203
  }
204
 
205
+ def _generate_examples(self, filepaths) -> Tuple[int, Dict]:
206
  uid = map(str, itertools.count(start=0, step=1))
207
 
208
+ for filepath in filepaths:
209
+ with open(filepath, "r") as fp:
210
+ collection = biocxml.load(fp)
211
+
212
+ for _, document in enumerate(collection.documents):
213
+ idx = next(uid)
214
+ text = " ".join([passage.text for passage in document.passages])
215
+
216
+ insert_tax = self.config.schema == "source"
217
+ entities = [
218
+ self._parse_bioc_entity(next(uid), entity, insert_tax_id=insert_tax)
219
+ for passage in document.passages
220
+ for entity in passage.annotations
221
+ ]
222
+
223
+ # Some of the entities have a off-by-one error. Correct these annotations!
224
+ self.adjust_entity_offsets(text, entities)
225
+
226
+ if self.config.schema == "source":
227
+ features = {
228
+ "doc_id": document.id,
229
+ "passages": [
230
+ {
231
+ "text": passage.text,
232
+ "type": passage.infons["type"],
233
+ "location": {
234
+ "offset": passage.offset,
235
+ "length": passage.total_span.length,
236
+ },
237
+ }
238
+ for passage in document.passages
239
+ ],
240
+ "entities": entities,
241
+ }
242
+
243
+ yield idx, features
244
+ elif self.config.schema == "bigbio_kb":
245
+ # passage offsets/lengths do not connect, recalculate them for this schema.
246
+ passage_spans = []
247
+ start = 0
248
+ for passage in document.passages:
249
+ end = start + len(passage.text)
250
+ passage_spans.append((start, end))
251
+ start = end + 1
252
+
253
+ features = {
254
+ "id": next(uid),
255
+ "document_id": document.id,
256
+ "passages": [
257
+ {
258
+ "id": next(uid),
259
+ "type": passage.infons["type"],
260
+ "text": [passage.text],
261
+ "offsets": [span],
262
+ }
263
+ for passage, span in zip(document.passages, passage_spans)
264
+ ],
265
+ "entities": entities,
266
+ "events": [],
267
+ "coreferences": [],
268
+ "relations": [],
269
+ }
270
+
271
+ yield idx, features
272
+ else:
273
+ raise NotImplementedError(self.config.schema)
274
+
275
+ def adjust_entity_offsets(self, text: str, entities: List[Dict]):
276
+ for entity in entities:
277
+ start, end = entity["offsets"][0]
278
+ entity_mention = entity["text"][0]
279
+ if not text[start:end] == entity_mention:
280
+ if text[start - 1 : end - 1] == entity_mention:
281
+ entity["offsets"] = [(start - 1, end - 1)]
282
+ elif text[start : end - 1] == entity_mention:
283
+ entity["offsets"] = [(start, end - 1)]