Datasets:

License:
leonweber commited on
Commit
babc253
·
verified ·
1 Parent(s): c250567

Update mantra_gsc based on git version adec7d7

Browse files
Files changed (3) hide show
  1. README.md +71 -0
  2. bigbiohub.py +590 -0
  3. mantra_gsc.py +298 -0
README.md ADDED
@@ -0,0 +1,71 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ language:
3
+ - en
4
+ - fr
5
+ - de
6
+ - nl
7
+ - es
8
+ bigbio_language:
9
+ - English
10
+ - French
11
+ - German
12
+ - Dutch
13
+ - Spanish
14
+ license: gpl-3.0
15
+ bigbio_license_shortname: GPL_3p0_ONLY
16
+ multilinguality: multilingual
17
+ pretty_name: MantraGSC
18
+ homepage: https://github.com/mi-erasmusmc/Mantra-Gold-Standard-Corpus
19
+ bigbio_pubmed: true
20
+ bigbio_public: true
21
+ bigbio_tasks:
22
+ - NAMED_ENTITY_RECOGNITION
23
+ - NAMED_ENTITY_DISAMBIGUATION
24
+ ---
25
+
26
+
27
+ # Dataset Card for Mantra GSC
28
+
29
+ ## Dataset Description
30
+
31
+ - **Homepage:** https://github.com/mi-erasmusmc/Mantra-Gold-Standard-Corpus
32
+ - **Pubmed:** True
33
+ - **Public:** True
34
+ - **Tasks:** NER, NED
35
+
36
+ We selected text units from different parallel corpora (Medline abstract titles, drug labels, biomedical patent claims) in English, French, German, Spanish, and Dutch. Three annotators per language independently annotated the biomedical concepts, based on a subset of the Unified Medical Language System and covering a wide range of semantic groups.
37
+
38
+ ## Citation Information
39
+
40
+ ```
41
+ @article{10.1093/jamia/ocv037,
42
+ author = {Kors, Jan A and Clematide, Simon and Akhondi,
43
+ Saber A and van Mulligen, Erik M and Rebholz-Schuhmann, Dietrich},
44
+ title = "{A multilingual gold-standard corpus for biomedical concept recognition: the Mantra GSC}",
45
+ journal = {Journal of the American Medical Informatics Association},
46
+ volume = {22},
47
+ number = {5},
48
+ pages = {948-956},
49
+ year = {2015},
50
+ month = {05},
51
+ abstract = "{Objective To create a multilingual gold-standard corpus for biomedical concept recognition.Materials
52
+ and methods We selected text units from different parallel corpora (Medline abstract titles, drug labels,
53
+ biomedical patent claims) in English, French, German, Spanish, and Dutch. Three annotators per language
54
+ independently annotated the biomedical concepts, based on a subset of the Unified Medical Language System and
55
+ covering a wide range of semantic groups. To reduce the annotation workload, automatically generated
56
+ preannotations were provided. Individual annotations were automatically harmonized and then adjudicated, and
57
+ cross-language consistency checks were carried out to arrive at the final annotations.Results The number of final
58
+ annotations was 5530. Inter-annotator agreement scores indicate good agreement (median F-score 0.79), and are
59
+ similar to those between individual annotators and the gold standard. The automatically generated harmonized
60
+ annotation set for each language performed equally well as the best annotator for that language.Discussion The use
61
+ of automatic preannotations, harmonized annotations, and parallel corpora helped to keep the manual annotation
62
+ efforts manageable. The inter-annotator agreement scores provide a reference standard for gauging the performance
63
+ of automatic annotation techniques.Conclusion To our knowledge, this is the first gold-standard corpus for
64
+ biomedical concept recognition in languages other than English. Other distinguishing features are the wide variety
65
+ of semantic groups that are being covered, and the diversity of text genres that were annotated.}",
66
+ issn = {1067-5027},
67
+ doi = {10.1093/jamia/ocv037},
68
+ url = {https://doi.org/10.1093/jamia/ocv037},
69
+ eprint = {https://academic.oup.com/jamia/article-pdf/22/5/948/34146393/ocv037.pdf},
70
+ }
71
+ ```
bigbiohub.py ADDED
@@ -0,0 +1,590 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from collections import defaultdict
2
+ from dataclasses import dataclass
3
+ from enum import Enum
4
+ import logging
5
+ from pathlib import Path
6
+ from types import SimpleNamespace
7
+ from typing import TYPE_CHECKING, Dict, Iterable, List, Tuple
8
+
9
+ import datasets
10
+
11
+ if TYPE_CHECKING:
12
+ import bioc
13
+
14
+ logger = logging.getLogger(__name__)
15
+
16
+
17
+ BigBioValues = SimpleNamespace(NULL="<BB_NULL_STR>")
18
+
19
+
20
+ @dataclass
21
+ class BigBioConfig(datasets.BuilderConfig):
22
+ """BuilderConfig for BigBio."""
23
+
24
+ name: str = None
25
+ version: datasets.Version = None
26
+ description: str = None
27
+ schema: str = None
28
+ subset_id: str = None
29
+
30
+
31
+ class Tasks(Enum):
32
+ NAMED_ENTITY_RECOGNITION = "NER"
33
+ NAMED_ENTITY_DISAMBIGUATION = "NED"
34
+ EVENT_EXTRACTION = "EE"
35
+ RELATION_EXTRACTION = "RE"
36
+ COREFERENCE_RESOLUTION = "COREF"
37
+ QUESTION_ANSWERING = "QA"
38
+ TEXTUAL_ENTAILMENT = "TE"
39
+ SEMANTIC_SIMILARITY = "STS"
40
+ TEXT_PAIRS_CLASSIFICATION = "TXT2CLASS"
41
+ PARAPHRASING = "PARA"
42
+ TRANSLATION = "TRANSL"
43
+ SUMMARIZATION = "SUM"
44
+ TEXT_CLASSIFICATION = "TXTCLASS"
45
+
46
+
47
+ entailment_features = datasets.Features(
48
+ {
49
+ "id": datasets.Value("string"),
50
+ "premise": datasets.Value("string"),
51
+ "hypothesis": datasets.Value("string"),
52
+ "label": datasets.Value("string"),
53
+ }
54
+ )
55
+
56
+ pairs_features = datasets.Features(
57
+ {
58
+ "id": datasets.Value("string"),
59
+ "document_id": datasets.Value("string"),
60
+ "text_1": datasets.Value("string"),
61
+ "text_2": datasets.Value("string"),
62
+ "label": datasets.Value("string"),
63
+ }
64
+ )
65
+
66
+ qa_features = datasets.Features(
67
+ {
68
+ "id": datasets.Value("string"),
69
+ "question_id": datasets.Value("string"),
70
+ "document_id": datasets.Value("string"),
71
+ "question": datasets.Value("string"),
72
+ "type": datasets.Value("string"),
73
+ "choices": [datasets.Value("string")],
74
+ "context": datasets.Value("string"),
75
+ "answer": datasets.Sequence(datasets.Value("string")),
76
+ }
77
+ )
78
+
79
+ text_features = datasets.Features(
80
+ {
81
+ "id": datasets.Value("string"),
82
+ "document_id": datasets.Value("string"),
83
+ "text": datasets.Value("string"),
84
+ "labels": [datasets.Value("string")],
85
+ }
86
+ )
87
+
88
+ text2text_features = datasets.Features(
89
+ {
90
+ "id": datasets.Value("string"),
91
+ "document_id": datasets.Value("string"),
92
+ "text_1": datasets.Value("string"),
93
+ "text_2": datasets.Value("string"),
94
+ "text_1_name": datasets.Value("string"),
95
+ "text_2_name": datasets.Value("string"),
96
+ }
97
+ )
98
+
99
+ kb_features = datasets.Features(
100
+ {
101
+ "id": datasets.Value("string"),
102
+ "document_id": datasets.Value("string"),
103
+ "passages": [
104
+ {
105
+ "id": datasets.Value("string"),
106
+ "type": datasets.Value("string"),
107
+ "text": datasets.Sequence(datasets.Value("string")),
108
+ "offsets": datasets.Sequence([datasets.Value("int32")]),
109
+ }
110
+ ],
111
+ "entities": [
112
+ {
113
+ "id": datasets.Value("string"),
114
+ "type": datasets.Value("string"),
115
+ "text": datasets.Sequence(datasets.Value("string")),
116
+ "offsets": datasets.Sequence([datasets.Value("int32")]),
117
+ "normalized": [
118
+ {
119
+ "db_name": datasets.Value("string"),
120
+ "db_id": datasets.Value("string"),
121
+ }
122
+ ],
123
+ }
124
+ ],
125
+ "events": [
126
+ {
127
+ "id": datasets.Value("string"),
128
+ "type": datasets.Value("string"),
129
+ # refers to the text_bound_annotation of the trigger
130
+ "trigger": {
131
+ "text": datasets.Sequence(datasets.Value("string")),
132
+ "offsets": datasets.Sequence([datasets.Value("int32")]),
133
+ },
134
+ "arguments": [
135
+ {
136
+ "role": datasets.Value("string"),
137
+ "ref_id": datasets.Value("string"),
138
+ }
139
+ ],
140
+ }
141
+ ],
142
+ "coreferences": [
143
+ {
144
+ "id": datasets.Value("string"),
145
+ "entity_ids": datasets.Sequence(datasets.Value("string")),
146
+ }
147
+ ],
148
+ "relations": [
149
+ {
150
+ "id": datasets.Value("string"),
151
+ "type": datasets.Value("string"),
152
+ "arg1_id": datasets.Value("string"),
153
+ "arg2_id": datasets.Value("string"),
154
+ "normalized": [
155
+ {
156
+ "db_name": datasets.Value("string"),
157
+ "db_id": datasets.Value("string"),
158
+ }
159
+ ],
160
+ }
161
+ ],
162
+ }
163
+ )
164
+
165
+
166
+ TASK_TO_SCHEMA = {
167
+ Tasks.NAMED_ENTITY_RECOGNITION.name: "KB",
168
+ Tasks.NAMED_ENTITY_DISAMBIGUATION.name: "KB",
169
+ Tasks.EVENT_EXTRACTION.name: "KB",
170
+ Tasks.RELATION_EXTRACTION.name: "KB",
171
+ Tasks.COREFERENCE_RESOLUTION.name: "KB",
172
+ Tasks.QUESTION_ANSWERING.name: "QA",
173
+ Tasks.TEXTUAL_ENTAILMENT.name: "TE",
174
+ Tasks.SEMANTIC_SIMILARITY.name: "PAIRS",
175
+ Tasks.TEXT_PAIRS_CLASSIFICATION.name: "PAIRS",
176
+ Tasks.PARAPHRASING.name: "T2T",
177
+ Tasks.TRANSLATION.name: "T2T",
178
+ Tasks.SUMMARIZATION.name: "T2T",
179
+ Tasks.TEXT_CLASSIFICATION.name: "TEXT",
180
+ }
181
+
182
+ SCHEMA_TO_TASKS = defaultdict(set)
183
+ for task, schema in TASK_TO_SCHEMA.items():
184
+ SCHEMA_TO_TASKS[schema].add(task)
185
+ SCHEMA_TO_TASKS = dict(SCHEMA_TO_TASKS)
186
+
187
+ VALID_TASKS = set(TASK_TO_SCHEMA.keys())
188
+ VALID_SCHEMAS = set(TASK_TO_SCHEMA.values())
189
+
190
+ SCHEMA_TO_FEATURES = {
191
+ "KB": kb_features,
192
+ "QA": qa_features,
193
+ "TE": entailment_features,
194
+ "T2T": text2text_features,
195
+ "TEXT": text_features,
196
+ "PAIRS": pairs_features,
197
+ }
198
+
199
+
200
+ def get_texts_and_offsets_from_bioc_ann(ann: "bioc.BioCAnnotation") -> Tuple:
201
+
202
+ offsets = [(loc.offset, loc.offset + loc.length) for loc in ann.locations]
203
+
204
+ text = ann.text
205
+
206
+ if len(offsets) > 1:
207
+ i = 0
208
+ texts = []
209
+ for start, end in offsets:
210
+ chunk_len = end - start
211
+ texts.append(text[i : chunk_len + i])
212
+ i += chunk_len
213
+ while i < len(text) and text[i] == " ":
214
+ i += 1
215
+ else:
216
+ texts = [text]
217
+
218
+ return offsets, texts
219
+
220
+
221
+ def remove_prefix(a: str, prefix: str) -> str:
222
+ if a.startswith(prefix):
223
+ a = a[len(prefix) :]
224
+ return a
225
+
226
+
227
+ def parse_brat_file(
228
+ txt_file: Path,
229
+ annotation_file_suffixes: List[str] = None,
230
+ parse_notes: bool = False,
231
+ ) -> Dict:
232
+ """
233
+ Parse a brat file into the schema defined below.
234
+ `txt_file` should be the path to the brat '.txt' file you want to parse, e.g. 'data/1234.txt'
235
+ Assumes that the annotations are contained in one or more of the corresponding '.a1', '.a2' or '.ann' files,
236
+ e.g. 'data/1234.ann' or 'data/1234.a1' and 'data/1234.a2'.
237
+ Will include annotator notes, when `parse_notes == True`.
238
+ brat_features = datasets.Features(
239
+ {
240
+ "id": datasets.Value("string"),
241
+ "document_id": datasets.Value("string"),
242
+ "text": datasets.Value("string"),
243
+ "text_bound_annotations": [ # T line in brat, e.g. type or event trigger
244
+ {
245
+ "offsets": datasets.Sequence([datasets.Value("int32")]),
246
+ "text": datasets.Sequence(datasets.Value("string")),
247
+ "type": datasets.Value("string"),
248
+ "id": datasets.Value("string"),
249
+ }
250
+ ],
251
+ "events": [ # E line in brat
252
+ {
253
+ "trigger": datasets.Value(
254
+ "string"
255
+ ), # refers to the text_bound_annotation of the trigger,
256
+ "id": datasets.Value("string"),
257
+ "type": datasets.Value("string"),
258
+ "arguments": datasets.Sequence(
259
+ {
260
+ "role": datasets.Value("string"),
261
+ "ref_id": datasets.Value("string"),
262
+ }
263
+ ),
264
+ }
265
+ ],
266
+ "relations": [ # R line in brat
267
+ {
268
+ "id": datasets.Value("string"),
269
+ "head": {
270
+ "ref_id": datasets.Value("string"),
271
+ "role": datasets.Value("string"),
272
+ },
273
+ "tail": {
274
+ "ref_id": datasets.Value("string"),
275
+ "role": datasets.Value("string"),
276
+ },
277
+ "type": datasets.Value("string"),
278
+ }
279
+ ],
280
+ "equivalences": [ # Equiv line in brat
281
+ {
282
+ "id": datasets.Value("string"),
283
+ "ref_ids": datasets.Sequence(datasets.Value("string")),
284
+ }
285
+ ],
286
+ "attributes": [ # M or A lines in brat
287
+ {
288
+ "id": datasets.Value("string"),
289
+ "type": datasets.Value("string"),
290
+ "ref_id": datasets.Value("string"),
291
+ "value": datasets.Value("string"),
292
+ }
293
+ ],
294
+ "normalizations": [ # N lines in brat
295
+ {
296
+ "id": datasets.Value("string"),
297
+ "type": datasets.Value("string"),
298
+ "ref_id": datasets.Value("string"),
299
+ "resource_name": datasets.Value(
300
+ "string"
301
+ ), # Name of the resource, e.g. "Wikipedia"
302
+ "cuid": datasets.Value(
303
+ "string"
304
+ ), # ID in the resource, e.g. 534366
305
+ "text": datasets.Value(
306
+ "string"
307
+ ), # Human readable description/name of the entity, e.g. "Barack Obama"
308
+ }
309
+ ],
310
+ ### OPTIONAL: Only included when `parse_notes == True`
311
+ "notes": [ # # lines in brat
312
+ {
313
+ "id": datasets.Value("string"),
314
+ "type": datasets.Value("string"),
315
+ "ref_id": datasets.Value("string"),
316
+ "text": datasets.Value("string"),
317
+ }
318
+ ],
319
+ },
320
+ )
321
+ """
322
+
323
+ example = {}
324
+ example["document_id"] = txt_file.with_suffix("").name
325
+ with txt_file.open() as f:
326
+ example["text"] = f.read()
327
+
328
+ # If no specific suffixes of the to-be-read annotation files are given - take standard suffixes
329
+ # for event extraction
330
+ if annotation_file_suffixes is None:
331
+ annotation_file_suffixes = [".a1", ".a2", ".ann"]
332
+
333
+ if len(annotation_file_suffixes) == 0:
334
+ raise AssertionError(
335
+ "At least one suffix for the to-be-read annotation files should be given!"
336
+ )
337
+
338
+ ann_lines = []
339
+ for suffix in annotation_file_suffixes:
340
+ annotation_file = txt_file.with_suffix(suffix)
341
+ if annotation_file.exists():
342
+ with annotation_file.open() as f:
343
+ ann_lines.extend(f.readlines())
344
+
345
+ example["text_bound_annotations"] = []
346
+ example["events"] = []
347
+ example["relations"] = []
348
+ example["equivalences"] = []
349
+ example["attributes"] = []
350
+ example["normalizations"] = []
351
+
352
+ if parse_notes:
353
+ example["notes"] = []
354
+
355
+ for line in ann_lines:
356
+ line = line.strip()
357
+ if not line:
358
+ continue
359
+
360
+ if line.startswith("T"): # Text bound
361
+ ann = {}
362
+ fields = line.split("\t")
363
+
364
+ ann["id"] = fields[0]
365
+ ann["type"] = fields[1].split()[0]
366
+ ann["offsets"] = []
367
+ span_str = remove_prefix(fields[1], (ann["type"] + " "))
368
+ text = fields[2]
369
+ for span in span_str.split(";"):
370
+ start, end = span.split()
371
+ ann["offsets"].append([int(start), int(end)])
372
+
373
+ # Heuristically split text of discontiguous entities into chunks
374
+ ann["text"] = []
375
+ if len(ann["offsets"]) > 1:
376
+ i = 0
377
+ for start, end in ann["offsets"]:
378
+ chunk_len = end - start
379
+ ann["text"].append(text[i : chunk_len + i])
380
+ i += chunk_len
381
+ while i < len(text) and text[i] == " ":
382
+ i += 1
383
+ else:
384
+ ann["text"] = [text]
385
+
386
+ example["text_bound_annotations"].append(ann)
387
+
388
+ elif line.startswith("E"):
389
+ ann = {}
390
+ fields = line.split("\t")
391
+
392
+ ann["id"] = fields[0]
393
+
394
+ ann["type"], ann["trigger"] = fields[1].split()[0].split(":")
395
+
396
+ ann["arguments"] = []
397
+ for role_ref_id in fields[1].split()[1:]:
398
+ argument = {
399
+ "role": (role_ref_id.split(":"))[0],
400
+ "ref_id": (role_ref_id.split(":"))[1],
401
+ }
402
+ ann["arguments"].append(argument)
403
+
404
+ example["events"].append(ann)
405
+
406
+ elif line.startswith("R"):
407
+ ann = {}
408
+ fields = line.split("\t")
409
+
410
+ ann["id"] = fields[0]
411
+ ann["type"] = fields[1].split()[0]
412
+
413
+ ann["head"] = {
414
+ "role": fields[1].split()[1].split(":")[0],
415
+ "ref_id": fields[1].split()[1].split(":")[1],
416
+ }
417
+ ann["tail"] = {
418
+ "role": fields[1].split()[2].split(":")[0],
419
+ "ref_id": fields[1].split()[2].split(":")[1],
420
+ }
421
+
422
+ example["relations"].append(ann)
423
+
424
+ # '*' seems to be the legacy way to mark equivalences,
425
+ # but I couldn't find any info on the current way
426
+ # this might have to be adapted dependent on the brat version
427
+ # of the annotation
428
+ elif line.startswith("*"):
429
+ ann = {}
430
+ fields = line.split("\t")
431
+
432
+ ann["id"] = fields[0]
433
+ ann["ref_ids"] = fields[1].split()[1:]
434
+
435
+ example["equivalences"].append(ann)
436
+
437
+ elif line.startswith("A") or line.startswith("M"):
438
+ ann = {}
439
+ fields = line.split("\t")
440
+
441
+ ann["id"] = fields[0]
442
+
443
+ info = fields[1].split()
444
+ ann["type"] = info[0]
445
+ ann["ref_id"] = info[1]
446
+
447
+ if len(info) > 2:
448
+ ann["value"] = info[2]
449
+ else:
450
+ ann["value"] = ""
451
+
452
+ example["attributes"].append(ann)
453
+
454
+ elif line.startswith("N"):
455
+ ann = {}
456
+ fields = line.split("\t")
457
+
458
+ ann["id"] = fields[0]
459
+ ann["text"] = fields[2]
460
+
461
+ info = fields[1].split()
462
+
463
+ ann["type"] = info[0]
464
+ ann["ref_id"] = info[1]
465
+ ann["resource_name"] = info[2].split(":")[0]
466
+ ann["cuid"] = info[2].split(":")[1]
467
+ example["normalizations"].append(ann)
468
+
469
+ elif parse_notes and line.startswith("#"):
470
+ ann = {}
471
+ fields = line.split("\t")
472
+
473
+ ann["id"] = fields[0]
474
+ ann["text"] = fields[2] if len(fields) == 3 else BigBioValues.NULL
475
+
476
+ info = fields[1].split()
477
+
478
+ ann["type"] = info[0]
479
+ ann["ref_id"] = info[1]
480
+ example["notes"].append(ann)
481
+
482
+ return example
483
+
484
+
485
+ def brat_parse_to_bigbio_kb(brat_parse: Dict) -> Dict:
486
+ """
487
+ Transform a brat parse (conforming to the standard brat schema) obtained with
488
+ `parse_brat_file` into a dictionary conforming to the `bigbio-kb` schema (as defined in ../schemas/kb.py)
489
+ :param brat_parse:
490
+ """
491
+
492
+ unified_example = {}
493
+
494
+ # Prefix all ids with document id to ensure global uniqueness,
495
+ # because brat ids are only unique within their document
496
+ id_prefix = brat_parse["document_id"] + "_"
497
+
498
+ # identical
499
+ unified_example["document_id"] = brat_parse["document_id"]
500
+ unified_example["passages"] = [
501
+ {
502
+ "id": id_prefix + "_text",
503
+ "type": "abstract",
504
+ "text": [brat_parse["text"]],
505
+ "offsets": [[0, len(brat_parse["text"])]],
506
+ }
507
+ ]
508
+
509
+ # get normalizations
510
+ ref_id_to_normalizations = defaultdict(list)
511
+ for normalization in brat_parse["normalizations"]:
512
+ ref_id_to_normalizations[normalization["ref_id"]].append(
513
+ {
514
+ "db_name": normalization["resource_name"],
515
+ "db_id": normalization["cuid"],
516
+ }
517
+ )
518
+
519
+ # separate entities and event triggers
520
+ unified_example["events"] = []
521
+ non_event_ann = brat_parse["text_bound_annotations"].copy()
522
+ for event in brat_parse["events"]:
523
+ event = event.copy()
524
+ event["id"] = id_prefix + event["id"]
525
+ trigger = next(
526
+ tr
527
+ for tr in brat_parse["text_bound_annotations"]
528
+ if tr["id"] == event["trigger"]
529
+ )
530
+ if trigger in non_event_ann:
531
+ non_event_ann.remove(trigger)
532
+ event["trigger"] = {
533
+ "text": trigger["text"].copy(),
534
+ "offsets": trigger["offsets"].copy(),
535
+ }
536
+ for argument in event["arguments"]:
537
+ argument["ref_id"] = id_prefix + argument["ref_id"]
538
+
539
+ unified_example["events"].append(event)
540
+
541
+ unified_example["entities"] = []
542
+ anno_ids = [ref_id["id"] for ref_id in non_event_ann]
543
+ for ann in non_event_ann:
544
+ entity_ann = ann.copy()
545
+ entity_ann["id"] = id_prefix + entity_ann["id"]
546
+ entity_ann["normalized"] = ref_id_to_normalizations[ann["id"]]
547
+ unified_example["entities"].append(entity_ann)
548
+
549
+ # massage relations
550
+ unified_example["relations"] = []
551
+ skipped_relations = set()
552
+ for ann in brat_parse["relations"]:
553
+ if (
554
+ ann["head"]["ref_id"] not in anno_ids
555
+ or ann["tail"]["ref_id"] not in anno_ids
556
+ ):
557
+ skipped_relations.add(ann["id"])
558
+ continue
559
+ unified_example["relations"].append(
560
+ {
561
+ "arg1_id": id_prefix + ann["head"]["ref_id"],
562
+ "arg2_id": id_prefix + ann["tail"]["ref_id"],
563
+ "id": id_prefix + ann["id"],
564
+ "type": ann["type"],
565
+ "normalized": [],
566
+ }
567
+ )
568
+ if len(skipped_relations) > 0:
569
+ example_id = brat_parse["document_id"]
570
+ logger.info(
571
+ f"Example:{example_id}: The `bigbio_kb` schema allows `relations` only between entities."
572
+ f" Skip (for now): "
573
+ f"{list(skipped_relations)}"
574
+ )
575
+
576
+ # get coreferences
577
+ unified_example["coreferences"] = []
578
+ for i, ann in enumerate(brat_parse["equivalences"], start=1):
579
+ is_entity_cluster = True
580
+ for ref_id in ann["ref_ids"]:
581
+ if not ref_id.startswith("T"): # not textbound -> no entity
582
+ is_entity_cluster = False
583
+ elif ref_id not in anno_ids: # event trigger -> no entity
584
+ is_entity_cluster = False
585
+ if is_entity_cluster:
586
+ entity_ids = [id_prefix + i for i in ann["ref_ids"]]
587
+ unified_example["coreferences"].append(
588
+ {"id": id_prefix + str(i), "entity_ids": entity_ids}
589
+ )
590
+ return unified_example
mantra_gsc.py ADDED
@@ -0,0 +1,298 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2022 The HuggingFace Datasets Authors and the current dataset script contributor.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+
16
+ import ast
17
+ from itertools import product
18
+ from pathlib import Path
19
+ from typing import Dict, List, Tuple
20
+
21
+ import datasets
22
+
23
+ from .bigbiohub import (BigBioConfig, Tasks, brat_parse_to_bigbio_kb,
24
+ kb_features, parse_brat_file)
25
+
26
+ _LANGUAGES = ["English", "French", "German", "Dutch", "Spanish"]
27
+
28
+ _LOCAL = False
29
+ _PUBMED = True
30
+
31
+ _CITATION = """\
32
+ @article{10.1093/jamia/ocv037,
33
+ author = {Kors, Jan A and Clematide, Simon and Akhondi,
34
+ Saber A and van Mulligen, Erik M and Rebholz-Schuhmann, Dietrich},
35
+ title = "{A multilingual gold-standard corpus for biomedical concept recognition: the Mantra GSC}",
36
+ journal = {Journal of the American Medical Informatics Association},
37
+ volume = {22},
38
+ number = {5},
39
+ pages = {948-956},
40
+ year = {2015},
41
+ month = {05},
42
+ abstract = "{Objective To create a multilingual gold-standard corpus for biomedical concept recognition.Materials
43
+ and methods We selected text units from different parallel corpora (Medline abstract titles, drug labels,
44
+ biomedical patent claims) in English, French, German, Spanish, and Dutch. Three annotators per language
45
+ independently annotated the biomedical concepts, based on a subset of the Unified Medical Language System and
46
+ covering a wide range of semantic groups. To reduce the annotation workload, automatically generated
47
+ preannotations were provided. Individual annotations were automatically harmonized and then adjudicated, and
48
+ cross-language consistency checks were carried out to arrive at the final annotations.Results The number of final
49
+ annotations was 5530. Inter-annotator agreement scores indicate good agreement (median F-score 0.79), and are
50
+ similar to those between individual annotators and the gold standard. The automatically generated harmonized
51
+ annotation set for each language performed equally well as the best annotator for that language.Discussion The use
52
+ of automatic preannotations, harmonized annotations, and parallel corpora helped to keep the manual annotation
53
+ efforts manageable. The inter-annotator agreement scores provide a reference standard for gauging the performance
54
+ of automatic annotation techniques.Conclusion To our knowledge, this is the first gold-standard corpus for
55
+ biomedical concept recognition in languages other than English. Other distinguishing features are the wide variety
56
+ of semantic groups that are being covered, and the diversity of text genres that were annotated.}",
57
+ issn = {1067-5027},
58
+ doi = {10.1093/jamia/ocv037},
59
+ url = {https://doi.org/10.1093/jamia/ocv037},
60
+ eprint = {https://academic.oup.com/jamia/article-pdf/22/5/948/34146393/ocv037.pdf},
61
+ }
62
+ """
63
+
64
+ _DATASETNAME = "mantra_gsc"
65
+ _DISPLAYNAME = "Mantra GSC"
66
+
67
+ _DESCRIPTION = """\
68
+ We selected text units from different parallel corpora (Medline abstract titles, drug labels, biomedical patent claims)
69
+ in English, French, German, Spanish, and Dutch. Three annotators per language independently annotated the biomedical
70
+ concepts, based on a subset of the Unified Medical Language System and covering a wide range of semantic groups.
71
+ """
72
+
73
+ _HOMEPAGE = "https://github.com/mi-erasmusmc/Mantra-Gold-Standard-Corpus"
74
+
75
+ _LICENSE = "GPL_3p0_ONLY"
76
+
77
+ _URLS = {
78
+ _DATASETNAME: "https://github.com/mi-erasmusmc/Mantra-Gold-Standard-Corpus/raw/main/Mantra-GSC-brat.zip",
79
+ }
80
+
81
+ _SUPPORTED_TASKS = [Tasks.NAMED_ENTITY_RECOGNITION, Tasks.NAMED_ENTITY_DISAMBIGUATION]
82
+
83
+ _SOURCE_VERSION = "1.0.0"
84
+
85
+ _BIGBIO_VERSION = "1.0.0"
86
+
87
+ _LANGUAGES_2 = {
88
+ "es": "Spanish",
89
+ "fr": "French",
90
+ "de": "German",
91
+ "nl": "Dutch",
92
+ "en": "English",
93
+ }
94
+
95
+ _DATASET_TYPES = {
96
+ "emea": "EMEA",
97
+ "medline": "Medline",
98
+ "patents": "Patents",
99
+ }
100
+
101
+
102
+ class MantraGSCDataset(datasets.GeneratorBasedBuilder):
103
+ """Mantra Gold Standard Corpus (GSC) dataset."""
104
+
105
+ SOURCE_VERSION = datasets.Version(_SOURCE_VERSION)
106
+ BIGBIO_VERSION = datasets.Version(_BIGBIO_VERSION)
107
+
108
+ BUILDER_CONFIGS = []
109
+
110
+ for language, dataset_type in product(_LANGUAGES_2, _DATASET_TYPES):
111
+ if dataset_type == "patents" and language in ["nl", "es"]:
112
+ continue
113
+
114
+ BUILDER_CONFIGS.append(
115
+ BigBioConfig(
116
+ name=f"mantra_gsc_{language}_{dataset_type}_source",
117
+ version=SOURCE_VERSION,
118
+ description=f"Mantra GSC {_LANGUAGES_2[language]} {_DATASET_TYPES[dataset_type]} source schema",
119
+ schema="source",
120
+ subset_id=f"mantra_gsc_{language}_{_DATASET_TYPES[dataset_type]}",
121
+ )
122
+ )
123
+ BUILDER_CONFIGS.append(
124
+ BigBioConfig(
125
+ name=f"mantra_gsc_{language}_{dataset_type}_bigbio_kb",
126
+ version=SOURCE_VERSION,
127
+ description=f"Mantra GSC {_LANGUAGES_2[language]} {_DATASET_TYPES[dataset_type]} BigBio schema",
128
+ schema="bigbio_kb",
129
+ subset_id=f"mantra_gsc_{language}_{_DATASET_TYPES[dataset_type]}",
130
+ )
131
+ )
132
+
133
+ DEFAULT_CONFIG_NAME = "mantra_gsc_en_medline_source"
134
+
135
+ def _info(self) -> datasets.DatasetInfo:
136
+
137
+ if self.config.schema == "source":
138
+ features = datasets.Features(
139
+ {
140
+ "document_id": datasets.Value("string"),
141
+ "text": datasets.Value("string"),
142
+ "entities": [
143
+ {
144
+ "entity_id": datasets.Value("string"),
145
+ "type": datasets.Value("string"),
146
+ "offsets": datasets.Sequence([datasets.Value("int32")]),
147
+ "text": datasets.Sequence(datasets.Value("string")),
148
+ "cui": datasets.Value("string"),
149
+ "preferred_term": datasets.Value("string"),
150
+ "semantic_type": datasets.Value("string"),
151
+ "normalized": [
152
+ {
153
+ "db_name": datasets.Value("string"),
154
+ "db_id": datasets.Value("string"),
155
+ }
156
+ ],
157
+ }
158
+ ],
159
+ }
160
+ )
161
+
162
+ elif self.config.schema == "bigbio_kb":
163
+ features = kb_features
164
+
165
+ return datasets.DatasetInfo(
166
+ description=_DESCRIPTION,
167
+ features=features,
168
+ homepage=_HOMEPAGE,
169
+ license=str(_LICENSE),
170
+ citation=_CITATION,
171
+ )
172
+
173
+ def _split_generators(self, dl_manager) -> List[datasets.SplitGenerator]:
174
+ """Returns SplitGenerators."""
175
+
176
+ urls = _URLS[_DATASETNAME]
177
+ data_dir = dl_manager.download_and_extract(urls)
178
+ data_dir = Path(data_dir) / "Mantra-GSC"
179
+
180
+ language, dataset_type = self.config.name.split("_")[2:4]
181
+
182
+ return [
183
+ datasets.SplitGenerator(
184
+ name=datasets.Split.TRAIN,
185
+ gen_kwargs={
186
+ "data_dir": data_dir,
187
+ "language": language,
188
+ "dataset_type": dataset_type,
189
+ },
190
+ ),
191
+ ]
192
+
193
+ def _generate_examples(self, data_dir: Path, language: str, dataset_type: str) -> Tuple[int, Dict]:
194
+ """Yields examples as (key, example) tuples."""
195
+ data_dir = data_dir / f"{_LANGUAGES_2[language]}"
196
+
197
+ if dataset_type in ["patents", "emea"]:
198
+ data_dir = data_dir / f"{_DATASET_TYPES[dataset_type]}_ec22-cui-best_man"
199
+ else:
200
+ # It is Medline now
201
+ if language != "en":
202
+ data_dir = data_dir / f"{_DATASET_TYPES[dataset_type]}_EN_{language.upper()}_ec22-cui-best_man"
203
+ else:
204
+ data_dir = [
205
+ data_dir / f"{_DATASET_TYPES[dataset_type]}_EN_{_lang.upper()}_ec22-cui-best_man"
206
+ for _lang in _LANGUAGES_2
207
+ if _lang != "en"
208
+ ]
209
+
210
+ if not isinstance(data_dir, list):
211
+ data_dir: List[Path] = [data_dir]
212
+
213
+ raw_files = [raw_file for _dir in data_dir for raw_file in _dir.glob("*.txt")]
214
+
215
+ if self.config.schema == "source":
216
+ for i, raw_file in enumerate(raw_files):
217
+ brat_example = parse_brat_file(raw_file, parse_notes=True)
218
+ source_example = self._to_source_example(brat_example)
219
+ yield i, source_example
220
+
221
+ elif self.config.schema == "bigbio_kb":
222
+ for i, raw_file in enumerate(raw_files):
223
+ brat_example = parse_brat_file(raw_file, parse_notes=True)
224
+ brat_to_bigbio_example = self._brat_to_bigbio_example(brat_example)
225
+ kb_example = brat_parse_to_bigbio_kb(brat_to_bigbio_example)
226
+ kb_example["id"] = i
227
+ yield i, kb_example
228
+
229
+ def _to_source_example(self, brat_example: Dict) -> Dict:
230
+ source_example = {
231
+ "document_id": brat_example["document_id"],
232
+ "text": brat_example["text"],
233
+ }
234
+
235
+ source_example["entities"] = []
236
+ for entity_annotation, ann_notes in zip(brat_example["text_bound_annotations"], brat_example["notes"]):
237
+ entity_ann = entity_annotation.copy()
238
+
239
+ # Change id property name
240
+ entity_ann["entity_id"] = entity_ann["id"]
241
+ entity_ann.pop("id")
242
+
243
+ # Get values from annotator notes
244
+ assert entity_ann["entity_id"] == ann_notes["ref_id"]
245
+ notes_values = ast.literal_eval(ann_notes["text"])
246
+ if len(notes_values) == 4:
247
+ cui, preferred_term, semantic_type, semantic_group = notes_values
248
+ else:
249
+ preferred_term, semantic_type, semantic_group = notes_values
250
+ cui = entity_ann["type"]
251
+ entity_ann["cui"] = cui
252
+ entity_ann["preferred_term"] = preferred_term
253
+ entity_ann["semantic_type"] = semantic_type
254
+ entity_ann["type"] = semantic_group
255
+ entity_ann["normalized"] = [{"db_name": "UMLS", "db_id": cui}]
256
+
257
+ # Add entity annotation to sample
258
+ source_example["entities"].append(entity_ann)
259
+
260
+ return source_example
261
+
262
+ def _brat_to_bigbio_example(self, brat_example: Dict) -> Dict:
263
+ kb_example = {
264
+ "document_id": brat_example["document_id"],
265
+ # "unit_id": unit_id,
266
+ "text": brat_example["text"],
267
+ }
268
+ kb_example["text_bound_annotations"] = []
269
+ kb_example["normalizations"] = []
270
+ for entity_annotation, ann_notes in zip(brat_example["text_bound_annotations"], brat_example["notes"]):
271
+ entity_ann = entity_annotation.copy()
272
+ # Get values from annotator notes
273
+ assert entity_ann["id"] == ann_notes["ref_id"]
274
+ notes_values = ast.literal_eval(ann_notes["text"])
275
+ if len(notes_values) == 4:
276
+ cui, _, _, semantic_group = notes_values
277
+ else:
278
+ _, _, semantic_group = notes_values
279
+ cui = entity_ann["type"]
280
+ entity_ann["type"] = semantic_group
281
+ kb_example["text_bound_annotations"].append(entity_ann)
282
+ kb_example["normalizations"].append(
283
+ {
284
+ "type": semantic_group,
285
+ "ref_id": entity_ann["id"],
286
+ "resource_name": "UMLS",
287
+ "cuid": cui,
288
+ "text": "",
289
+ }
290
+ )
291
+
292
+ kb_example["events"] = brat_example["events"]
293
+ kb_example["relations"] = brat_example["relations"]
294
+ kb_example["equivalences"] = brat_example["equivalences"]
295
+ kb_example["attributes"] = brat_example["attributes"]
296
+ kb_example["notes"] = brat_example["notes"]
297
+
298
+ return kb_example