Datasets:

Languages:
Romanian
License:
phlobo commited on
Commit
e3789d2
·
verified ·
1 Parent(s): ea4e0d3

Update monero based on git version 974c531

Browse files
Files changed (3) hide show
  1. README.md +48 -0
  2. bigbiohub.py +592 -0
  3. monero.py +291 -0
README.md ADDED
@@ -0,0 +1,48 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+
2
+ ---
3
+ language:
4
+ - ro
5
+ bigbio_language:
6
+ - Romanian
7
+ license: cc-by-sa-4.0
8
+ multilinguality: monolingual
9
+ bigbio_license_shortname: CC_BY_SA_4p0
10
+ pretty_name: MoNERo
11
+ homepage: https://www.racai.ro/en/tools/text/
12
+ bigbio_pubmed: False
13
+ bigbio_public: True
14
+ bigbio_tasks:
15
+ - NAMED_ENTITY_RECOGNITION
16
+ ---
17
+
18
+
19
+ # Dataset Card for MoNERo
20
+
21
+ ## Dataset Description
22
+
23
+ - **Homepage:** https://www.racai.ro/en/tools/text/
24
+ - **Pubmed:** False
25
+ - **Public:** True
26
+ - **Tasks:** NER
27
+
28
+ MoNERo: a Biomedical Gold Standard Corpus for the Romanian Language for part of speech tagging and named entity recognition.
29
+
30
+
31
+ ## Citation Information
32
+
33
+ ```
34
+ @inproceedings{,
35
+ title = {{M}o{NER}o: a Biomedical Gold Standard Corpus for the {R}omanian Language},
36
+ author = {Mitrofan, Maria and Barbu Mititelu, Verginica and Mitrofan, Grigorina},
37
+ booktitle = "Proceedings of the 18th BioNLP Workshop and Shared Task",
38
+ month = aug,
39
+ year = "2019",
40
+ address = "Florence, Italy",
41
+ publisher = "Association for Computational Linguistics",
42
+ url = "https://aclanthology.org/W19-5008",
43
+ doi = "10.18653/v1/W19-5008",
44
+ pages = "71--79",
45
+ biburl = {https://aclanthology.org/W19-5008.bib},
46
+ bibsource = {https://aclanthology.org/W19-5008/}
47
+ }
48
+ ```
bigbiohub.py ADDED
@@ -0,0 +1,592 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from collections import defaultdict
2
+ from dataclasses import dataclass
3
+ from enum import Enum
4
+ import logging
5
+ from pathlib import Path
6
+ from types import SimpleNamespace
7
+ from typing import TYPE_CHECKING, Dict, Iterable, List, Tuple
8
+
9
+ import datasets
10
+
11
+ if TYPE_CHECKING:
12
+ import bioc
13
+
14
+ logger = logging.getLogger(__name__)
15
+
16
+
17
+ BigBioValues = SimpleNamespace(NULL="<BB_NULL_STR>")
18
+
19
+
20
+ @dataclass
21
+ class BigBioConfig(datasets.BuilderConfig):
22
+ """BuilderConfig for BigBio."""
23
+
24
+ name: str = None
25
+ version: datasets.Version = None
26
+ description: str = None
27
+ schema: str = None
28
+ subset_id: str = None
29
+
30
+
31
+ class Tasks(Enum):
32
+ NAMED_ENTITY_RECOGNITION = "NER"
33
+ NAMED_ENTITY_DISAMBIGUATION = "NED"
34
+ EVENT_EXTRACTION = "EE"
35
+ RELATION_EXTRACTION = "RE"
36
+ COREFERENCE_RESOLUTION = "COREF"
37
+ QUESTION_ANSWERING = "QA"
38
+ TEXTUAL_ENTAILMENT = "TE"
39
+ SEMANTIC_SIMILARITY = "STS"
40
+ TEXT_PAIRS_CLASSIFICATION = "TXT2CLASS"
41
+ PARAPHRASING = "PARA"
42
+ TRANSLATION = "TRANSL"
43
+ SUMMARIZATION = "SUM"
44
+ TEXT_CLASSIFICATION = "TXTCLASS"
45
+
46
+
47
+ entailment_features = datasets.Features(
48
+ {
49
+ "id": datasets.Value("string"),
50
+ "premise": datasets.Value("string"),
51
+ "hypothesis": datasets.Value("string"),
52
+ "label": datasets.Value("string"),
53
+ }
54
+ )
55
+
56
+ pairs_features = datasets.Features(
57
+ {
58
+ "id": datasets.Value("string"),
59
+ "document_id": datasets.Value("string"),
60
+ "text_1": datasets.Value("string"),
61
+ "text_2": datasets.Value("string"),
62
+ "label": datasets.Value("string"),
63
+ }
64
+ )
65
+
66
+ qa_features = datasets.Features(
67
+ {
68
+ "id": datasets.Value("string"),
69
+ "question_id": datasets.Value("string"),
70
+ "document_id": datasets.Value("string"),
71
+ "question": datasets.Value("string"),
72
+ "type": datasets.Value("string"),
73
+ "choices": [datasets.Value("string")],
74
+ "context": datasets.Value("string"),
75
+ "answer": datasets.Sequence(datasets.Value("string")),
76
+ }
77
+ )
78
+
79
+ text_features = datasets.Features(
80
+ {
81
+ "id": datasets.Value("string"),
82
+ "document_id": datasets.Value("string"),
83
+ "text": datasets.Value("string"),
84
+ "labels": [datasets.Value("string")],
85
+ }
86
+ )
87
+
88
+ text2text_features = datasets.Features(
89
+ {
90
+ "id": datasets.Value("string"),
91
+ "document_id": datasets.Value("string"),
92
+ "text_1": datasets.Value("string"),
93
+ "text_2": datasets.Value("string"),
94
+ "text_1_name": datasets.Value("string"),
95
+ "text_2_name": datasets.Value("string"),
96
+ }
97
+ )
98
+
99
+ kb_features = datasets.Features(
100
+ {
101
+ "id": datasets.Value("string"),
102
+ "document_id": datasets.Value("string"),
103
+ "passages": [
104
+ {
105
+ "id": datasets.Value("string"),
106
+ "type": datasets.Value("string"),
107
+ "text": datasets.Sequence(datasets.Value("string")),
108
+ "offsets": datasets.Sequence([datasets.Value("int32")]),
109
+ }
110
+ ],
111
+ "entities": [
112
+ {
113
+ "id": datasets.Value("string"),
114
+ "type": datasets.Value("string"),
115
+ "text": datasets.Sequence(datasets.Value("string")),
116
+ "offsets": datasets.Sequence([datasets.Value("int32")]),
117
+ "normalized": [
118
+ {
119
+ "db_name": datasets.Value("string"),
120
+ "db_id": datasets.Value("string"),
121
+ }
122
+ ],
123
+ }
124
+ ],
125
+ "events": [
126
+ {
127
+ "id": datasets.Value("string"),
128
+ "type": datasets.Value("string"),
129
+ # refers to the text_bound_annotation of the trigger
130
+ "trigger": {
131
+ "text": datasets.Sequence(datasets.Value("string")),
132
+ "offsets": datasets.Sequence([datasets.Value("int32")]),
133
+ },
134
+ "arguments": [
135
+ {
136
+ "role": datasets.Value("string"),
137
+ "ref_id": datasets.Value("string"),
138
+ }
139
+ ],
140
+ }
141
+ ],
142
+ "coreferences": [
143
+ {
144
+ "id": datasets.Value("string"),
145
+ "entity_ids": datasets.Sequence(datasets.Value("string")),
146
+ }
147
+ ],
148
+ "relations": [
149
+ {
150
+ "id": datasets.Value("string"),
151
+ "type": datasets.Value("string"),
152
+ "arg1_id": datasets.Value("string"),
153
+ "arg2_id": datasets.Value("string"),
154
+ "normalized": [
155
+ {
156
+ "db_name": datasets.Value("string"),
157
+ "db_id": datasets.Value("string"),
158
+ }
159
+ ],
160
+ }
161
+ ],
162
+ }
163
+ )
164
+
165
+
166
+ TASK_TO_SCHEMA = {
167
+ Tasks.NAMED_ENTITY_RECOGNITION.name: "KB",
168
+ Tasks.NAMED_ENTITY_DISAMBIGUATION.name: "KB",
169
+ Tasks.EVENT_EXTRACTION.name: "KB",
170
+ Tasks.RELATION_EXTRACTION.name: "KB",
171
+ Tasks.COREFERENCE_RESOLUTION.name: "KB",
172
+ Tasks.QUESTION_ANSWERING.name: "QA",
173
+ Tasks.TEXTUAL_ENTAILMENT.name: "TE",
174
+ Tasks.SEMANTIC_SIMILARITY.name: "PAIRS",
175
+ Tasks.TEXT_PAIRS_CLASSIFICATION.name: "PAIRS",
176
+ Tasks.PARAPHRASING.name: "T2T",
177
+ Tasks.TRANSLATION.name: "T2T",
178
+ Tasks.SUMMARIZATION.name: "T2T",
179
+ Tasks.TEXT_CLASSIFICATION.name: "TEXT",
180
+ }
181
+
182
+ SCHEMA_TO_TASKS = defaultdict(set)
183
+ for task, schema in TASK_TO_SCHEMA.items():
184
+ SCHEMA_TO_TASKS[schema].add(task)
185
+ SCHEMA_TO_TASKS = dict(SCHEMA_TO_TASKS)
186
+
187
+ VALID_TASKS = set(TASK_TO_SCHEMA.keys())
188
+ VALID_SCHEMAS = set(TASK_TO_SCHEMA.values())
189
+
190
+ SCHEMA_TO_FEATURES = {
191
+ "KB": kb_features,
192
+ "QA": qa_features,
193
+ "TE": entailment_features,
194
+ "T2T": text2text_features,
195
+ "TEXT": text_features,
196
+ "PAIRS": pairs_features,
197
+ }
198
+
199
+
200
+ def get_texts_and_offsets_from_bioc_ann(ann: "bioc.BioCAnnotation") -> Tuple:
201
+
202
+ offsets = [(loc.offset, loc.offset + loc.length) for loc in ann.locations]
203
+
204
+ text = ann.text
205
+
206
+ if len(offsets) > 1:
207
+ i = 0
208
+ texts = []
209
+ for start, end in offsets:
210
+ chunk_len = end - start
211
+ texts.append(text[i : chunk_len + i])
212
+ i += chunk_len
213
+ while i < len(text) and text[i] == " ":
214
+ i += 1
215
+ else:
216
+ texts = [text]
217
+
218
+ return offsets, texts
219
+
220
+
221
+ def remove_prefix(a: str, prefix: str) -> str:
222
+ if a.startswith(prefix):
223
+ a = a[len(prefix) :]
224
+ return a
225
+
226
+
227
+ def parse_brat_file(
228
+ txt_file: Path,
229
+ annotation_file_suffixes: List[str] = None,
230
+ parse_notes: bool = False,
231
+ ) -> Dict:
232
+ """
233
+ Parse a brat file into the schema defined below.
234
+ `txt_file` should be the path to the brat '.txt' file you want to parse, e.g. 'data/1234.txt'
235
+ Assumes that the annotations are contained in one or more of the corresponding '.a1', '.a2' or '.ann' files,
236
+ e.g. 'data/1234.ann' or 'data/1234.a1' and 'data/1234.a2'.
237
+ Will include annotator notes, when `parse_notes == True`.
238
+ brat_features = datasets.Features(
239
+ {
240
+ "id": datasets.Value("string"),
241
+ "document_id": datasets.Value("string"),
242
+ "text": datasets.Value("string"),
243
+ "text_bound_annotations": [ # T line in brat, e.g. type or event trigger
244
+ {
245
+ "offsets": datasets.Sequence([datasets.Value("int32")]),
246
+ "text": datasets.Sequence(datasets.Value("string")),
247
+ "type": datasets.Value("string"),
248
+ "id": datasets.Value("string"),
249
+ }
250
+ ],
251
+ "events": [ # E line in brat
252
+ {
253
+ "trigger": datasets.Value(
254
+ "string"
255
+ ), # refers to the text_bound_annotation of the trigger,
256
+ "id": datasets.Value("string"),
257
+ "type": datasets.Value("string"),
258
+ "arguments": datasets.Sequence(
259
+ {
260
+ "role": datasets.Value("string"),
261
+ "ref_id": datasets.Value("string"),
262
+ }
263
+ ),
264
+ }
265
+ ],
266
+ "relations": [ # R line in brat
267
+ {
268
+ "id": datasets.Value("string"),
269
+ "head": {
270
+ "ref_id": datasets.Value("string"),
271
+ "role": datasets.Value("string"),
272
+ },
273
+ "tail": {
274
+ "ref_id": datasets.Value("string"),
275
+ "role": datasets.Value("string"),
276
+ },
277
+ "type": datasets.Value("string"),
278
+ }
279
+ ],
280
+ "equivalences": [ # Equiv line in brat
281
+ {
282
+ "id": datasets.Value("string"),
283
+ "ref_ids": datasets.Sequence(datasets.Value("string")),
284
+ }
285
+ ],
286
+ "attributes": [ # M or A lines in brat
287
+ {
288
+ "id": datasets.Value("string"),
289
+ "type": datasets.Value("string"),
290
+ "ref_id": datasets.Value("string"),
291
+ "value": datasets.Value("string"),
292
+ }
293
+ ],
294
+ "normalizations": [ # N lines in brat
295
+ {
296
+ "id": datasets.Value("string"),
297
+ "type": datasets.Value("string"),
298
+ "ref_id": datasets.Value("string"),
299
+ "resource_name": datasets.Value(
300
+ "string"
301
+ ), # Name of the resource, e.g. "Wikipedia"
302
+ "cuid": datasets.Value(
303
+ "string"
304
+ ), # ID in the resource, e.g. 534366
305
+ "text": datasets.Value(
306
+ "string"
307
+ ), # Human readable description/name of the entity, e.g. "Barack Obama"
308
+ }
309
+ ],
310
+ ### OPTIONAL: Only included when `parse_notes == True`
311
+ "notes": [ # # lines in brat
312
+ {
313
+ "id": datasets.Value("string"),
314
+ "type": datasets.Value("string"),
315
+ "ref_id": datasets.Value("string"),
316
+ "text": datasets.Value("string"),
317
+ }
318
+ ],
319
+ },
320
+ )
321
+ """
322
+
323
+ example = {}
324
+ example["document_id"] = txt_file.with_suffix("").name
325
+ with txt_file.open() as f:
326
+ example["text"] = f.read()
327
+
328
+ # If no specific suffixes of the to-be-read annotation files are given - take standard suffixes
329
+ # for event extraction
330
+ if annotation_file_suffixes is None:
331
+ annotation_file_suffixes = [".a1", ".a2", ".ann"]
332
+
333
+ if len(annotation_file_suffixes) == 0:
334
+ raise AssertionError(
335
+ "At least one suffix for the to-be-read annotation files should be given!"
336
+ )
337
+
338
+ ann_lines = []
339
+ for suffix in annotation_file_suffixes:
340
+ annotation_file = txt_file.with_suffix(suffix)
341
+ try:
342
+ with annotation_file.open() as f:
343
+ ann_lines.extend(f.readlines())
344
+ except Exception:
345
+ continue
346
+
347
+ example["text_bound_annotations"] = []
348
+ example["events"] = []
349
+ example["relations"] = []
350
+ example["equivalences"] = []
351
+ example["attributes"] = []
352
+ example["normalizations"] = []
353
+
354
+ if parse_notes:
355
+ example["notes"] = []
356
+
357
+ for line in ann_lines:
358
+ line = line.strip()
359
+ if not line:
360
+ continue
361
+
362
+ if line.startswith("T"): # Text bound
363
+ ann = {}
364
+ fields = line.split("\t")
365
+
366
+ ann["id"] = fields[0]
367
+ ann["type"] = fields[1].split()[0]
368
+ ann["offsets"] = []
369
+ span_str = remove_prefix(fields[1], (ann["type"] + " "))
370
+ text = fields[2]
371
+ for span in span_str.split(";"):
372
+ start, end = span.split()
373
+ ann["offsets"].append([int(start), int(end)])
374
+
375
+ # Heuristically split text of discontiguous entities into chunks
376
+ ann["text"] = []
377
+ if len(ann["offsets"]) > 1:
378
+ i = 0
379
+ for start, end in ann["offsets"]:
380
+ chunk_len = end - start
381
+ ann["text"].append(text[i : chunk_len + i])
382
+ i += chunk_len
383
+ while i < len(text) and text[i] == " ":
384
+ i += 1
385
+ else:
386
+ ann["text"] = [text]
387
+
388
+ example["text_bound_annotations"].append(ann)
389
+
390
+ elif line.startswith("E"):
391
+ ann = {}
392
+ fields = line.split("\t")
393
+
394
+ ann["id"] = fields[0]
395
+
396
+ ann["type"], ann["trigger"] = fields[1].split()[0].split(":")
397
+
398
+ ann["arguments"] = []
399
+ for role_ref_id in fields[1].split()[1:]:
400
+ argument = {
401
+ "role": (role_ref_id.split(":"))[0],
402
+ "ref_id": (role_ref_id.split(":"))[1],
403
+ }
404
+ ann["arguments"].append(argument)
405
+
406
+ example["events"].append(ann)
407
+
408
+ elif line.startswith("R"):
409
+ ann = {}
410
+ fields = line.split("\t")
411
+
412
+ ann["id"] = fields[0]
413
+ ann["type"] = fields[1].split()[0]
414
+
415
+ ann["head"] = {
416
+ "role": fields[1].split()[1].split(":")[0],
417
+ "ref_id": fields[1].split()[1].split(":")[1],
418
+ }
419
+ ann["tail"] = {
420
+ "role": fields[1].split()[2].split(":")[0],
421
+ "ref_id": fields[1].split()[2].split(":")[1],
422
+ }
423
+
424
+ example["relations"].append(ann)
425
+
426
+ # '*' seems to be the legacy way to mark equivalences,
427
+ # but I couldn't find any info on the current way
428
+ # this might have to be adapted dependent on the brat version
429
+ # of the annotation
430
+ elif line.startswith("*"):
431
+ ann = {}
432
+ fields = line.split("\t")
433
+
434
+ ann["id"] = fields[0]
435
+ ann["ref_ids"] = fields[1].split()[1:]
436
+
437
+ example["equivalences"].append(ann)
438
+
439
+ elif line.startswith("A") or line.startswith("M"):
440
+ ann = {}
441
+ fields = line.split("\t")
442
+
443
+ ann["id"] = fields[0]
444
+
445
+ info = fields[1].split()
446
+ ann["type"] = info[0]
447
+ ann["ref_id"] = info[1]
448
+
449
+ if len(info) > 2:
450
+ ann["value"] = info[2]
451
+ else:
452
+ ann["value"] = ""
453
+
454
+ example["attributes"].append(ann)
455
+
456
+ elif line.startswith("N"):
457
+ ann = {}
458
+ fields = line.split("\t")
459
+
460
+ ann["id"] = fields[0]
461
+ ann["text"] = fields[2]
462
+
463
+ info = fields[1].split()
464
+
465
+ ann["type"] = info[0]
466
+ ann["ref_id"] = info[1]
467
+ ann["resource_name"] = info[2].split(":")[0]
468
+ ann["cuid"] = info[2].split(":")[1]
469
+ example["normalizations"].append(ann)
470
+
471
+ elif parse_notes and line.startswith("#"):
472
+ ann = {}
473
+ fields = line.split("\t")
474
+
475
+ ann["id"] = fields[0]
476
+ ann["text"] = fields[2] if len(fields) == 3 else BigBioValues.NULL
477
+
478
+ info = fields[1].split()
479
+
480
+ ann["type"] = info[0]
481
+ ann["ref_id"] = info[1]
482
+ example["notes"].append(ann)
483
+
484
+ return example
485
+
486
+
487
+ def brat_parse_to_bigbio_kb(brat_parse: Dict) -> Dict:
488
+ """
489
+ Transform a brat parse (conforming to the standard brat schema) obtained with
490
+ `parse_brat_file` into a dictionary conforming to the `bigbio-kb` schema (as defined in ../schemas/kb.py)
491
+ :param brat_parse:
492
+ """
493
+
494
+ unified_example = {}
495
+
496
+ # Prefix all ids with document id to ensure global uniqueness,
497
+ # because brat ids are only unique within their document
498
+ id_prefix = brat_parse["document_id"] + "_"
499
+
500
+ # identical
501
+ unified_example["document_id"] = brat_parse["document_id"]
502
+ unified_example["passages"] = [
503
+ {
504
+ "id": id_prefix + "_text",
505
+ "type": "abstract",
506
+ "text": [brat_parse["text"]],
507
+ "offsets": [[0, len(brat_parse["text"])]],
508
+ }
509
+ ]
510
+
511
+ # get normalizations
512
+ ref_id_to_normalizations = defaultdict(list)
513
+ for normalization in brat_parse["normalizations"]:
514
+ ref_id_to_normalizations[normalization["ref_id"]].append(
515
+ {
516
+ "db_name": normalization["resource_name"],
517
+ "db_id": normalization["cuid"],
518
+ }
519
+ )
520
+
521
+ # separate entities and event triggers
522
+ unified_example["events"] = []
523
+ non_event_ann = brat_parse["text_bound_annotations"].copy()
524
+ for event in brat_parse["events"]:
525
+ event = event.copy()
526
+ event["id"] = id_prefix + event["id"]
527
+ trigger = next(
528
+ tr
529
+ for tr in brat_parse["text_bound_annotations"]
530
+ if tr["id"] == event["trigger"]
531
+ )
532
+ if trigger in non_event_ann:
533
+ non_event_ann.remove(trigger)
534
+ event["trigger"] = {
535
+ "text": trigger["text"].copy(),
536
+ "offsets": trigger["offsets"].copy(),
537
+ }
538
+ for argument in event["arguments"]:
539
+ argument["ref_id"] = id_prefix + argument["ref_id"]
540
+
541
+ unified_example["events"].append(event)
542
+
543
+ unified_example["entities"] = []
544
+ anno_ids = [ref_id["id"] for ref_id in non_event_ann]
545
+ for ann in non_event_ann:
546
+ entity_ann = ann.copy()
547
+ entity_ann["id"] = id_prefix + entity_ann["id"]
548
+ entity_ann["normalized"] = ref_id_to_normalizations[ann["id"]]
549
+ unified_example["entities"].append(entity_ann)
550
+
551
+ # massage relations
552
+ unified_example["relations"] = []
553
+ skipped_relations = set()
554
+ for ann in brat_parse["relations"]:
555
+ if (
556
+ ann["head"]["ref_id"] not in anno_ids
557
+ or ann["tail"]["ref_id"] not in anno_ids
558
+ ):
559
+ skipped_relations.add(ann["id"])
560
+ continue
561
+ unified_example["relations"].append(
562
+ {
563
+ "arg1_id": id_prefix + ann["head"]["ref_id"],
564
+ "arg2_id": id_prefix + ann["tail"]["ref_id"],
565
+ "id": id_prefix + ann["id"],
566
+ "type": ann["type"],
567
+ "normalized": [],
568
+ }
569
+ )
570
+ if len(skipped_relations) > 0:
571
+ example_id = brat_parse["document_id"]
572
+ logger.info(
573
+ f"Example:{example_id}: The `bigbio_kb` schema allows `relations` only between entities."
574
+ f" Skip (for now): "
575
+ f"{list(skipped_relations)}"
576
+ )
577
+
578
+ # get coreferences
579
+ unified_example["coreferences"] = []
580
+ for i, ann in enumerate(brat_parse["equivalences"], start=1):
581
+ is_entity_cluster = True
582
+ for ref_id in ann["ref_ids"]:
583
+ if not ref_id.startswith("T"): # not textbound -> no entity
584
+ is_entity_cluster = False
585
+ elif ref_id not in anno_ids: # event trigger -> no entity
586
+ is_entity_cluster = False
587
+ if is_entity_cluster:
588
+ entity_ids = [id_prefix + i for i in ann["ref_ids"]]
589
+ unified_example["coreferences"].append(
590
+ {"id": id_prefix + str(i), "entity_ids": entity_ids}
591
+ )
592
+ return unified_example
monero.py ADDED
@@ -0,0 +1,291 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2022 The HuggingFace Datasets Authors and the current dataset script contributor.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+
16
+ import os
17
+ from pathlib import Path
18
+ from typing import Any, Dict, List, Tuple
19
+
20
+ import datasets
21
+
22
+ from .bigbiohub import BigBioConfig, Tasks, kb_features
23
+
24
+ _LANGUAGES = ["Romanian"]
25
+ _PUBMED = False
26
+ _LOCAL = False
27
+
28
+ _CITATION = """\
29
+ @inproceedings{,
30
+ title = {{M}o{NER}o: a Biomedical Gold Standard Corpus for the {R}omanian Language},
31
+ author = {Mitrofan, Maria and Barbu Mititelu, Verginica and Mitrofan, Grigorina},
32
+ booktitle = "Proceedings of the 18th BioNLP Workshop and Shared Task",
33
+ month = aug,
34
+ year = "2019",
35
+ address = "Florence, Italy",
36
+ publisher = "Association for Computational Linguistics",
37
+ url = "https://aclanthology.org/W19-5008",
38
+ doi = "10.18653/v1/W19-5008",
39
+ pages = "71--79",
40
+ biburl = {https://aclanthology.org/W19-5008.bib},
41
+ bibsource = {https://aclanthology.org/W19-5008/}
42
+ }
43
+ """
44
+
45
+ _DATASETNAME = "monero"
46
+ _DISPLAYNAME = "MoNERo"
47
+
48
+ _DESCRIPTION = """\
49
+ MoNERo: a Biomedical Gold Standard Corpus for the Romanian Language for part of speech tagging and named \
50
+ entity recognition.
51
+ """
52
+
53
+ _HOMEPAGE = "https://www.racai.ro/en/tools/text/"
54
+ _LICENSE = "CC_BY_SA_4p0"
55
+
56
+ _URLS = {
57
+ # The original dataset is in 7z format hence I have downloaded and reuploded it as tar.gz format.
58
+ # Converted via the following command:
59
+ # curl -JLO https://www.racai.ro/media/MoNERo_2019.7z
60
+ # mkdir -p ./MoNERo
61
+ # pushd ./MoNERo && 7z x ../MoNERo_2019.7z && popd
62
+ # tar -czf MoNERo.tar.gz ./MoNERo
63
+ _DATASETNAME: "https://github.com/bigscience-workshop/biomedical/files/8550757/MoNERo.tar.gz",
64
+ }
65
+
66
+ _SUPPORTED_TASKS = [Tasks.NAMED_ENTITY_RECOGNITION]
67
+
68
+ _SOURCE_VERSION = "1.0.0"
69
+ _BIGBIO_VERSION = "1.0.0"
70
+
71
+
72
+ class MoneroDataset(datasets.GeneratorBasedBuilder):
73
+ """MoNERo: a Biomedical Gold Standard Corpus for the Romanian Language for part of speech tagging
74
+ and named entity recognition."""
75
+
76
+ SOURCE_VERSION = datasets.Version(_SOURCE_VERSION)
77
+ BIGBIO_VERSION = datasets.Version(_BIGBIO_VERSION)
78
+
79
+ BUILDER_CONFIGS = [
80
+ BigBioConfig(
81
+ name=f"{_DATASETNAME}_source",
82
+ version=SOURCE_VERSION,
83
+ description=f"{_DATASETNAME} source schema",
84
+ schema="source",
85
+ subset_id=f"{_DATASETNAME}",
86
+ ),
87
+ BigBioConfig(
88
+ name=f"{_DATASETNAME}_bigbio_kb",
89
+ version=BIGBIO_VERSION,
90
+ description=f"{_DATASETNAME} BigBio schema",
91
+ schema="bigbio_kb",
92
+ subset_id=f"{_DATASETNAME}",
93
+ ),
94
+ ]
95
+
96
+ DEFAULT_CONFIG_NAME = f"{_DATASETNAME}_source"
97
+
98
+ def _info(self) -> datasets.DatasetInfo:
99
+ if self.config.schema == "source":
100
+ features = datasets.Features(
101
+ {
102
+ "doc_id": datasets.Value("string"),
103
+ "tokens": [datasets.Value("string")],
104
+ "lemmas": [datasets.Value("string")],
105
+ "ner_tags": [datasets.Value("string")],
106
+ "pos_tags": [datasets.Value("string")],
107
+ }
108
+ )
109
+
110
+ elif self.config.schema == "bigbio_kb":
111
+ features = kb_features
112
+ else:
113
+ raise NotImplementedError(f"Schema {self.config.schema} not supported")
114
+
115
+ return datasets.DatasetInfo(
116
+ description=_DESCRIPTION,
117
+ features=features,
118
+ homepage=_HOMEPAGE,
119
+ license=_LICENSE,
120
+ citation=_CITATION,
121
+ )
122
+
123
+ def _split_generators(self, dl_manager) -> List[datasets.SplitGenerator]:
124
+ """Returns SplitGenerators."""
125
+ urls = _URLS[_DATASETNAME]
126
+ data_dir = dl_manager.download_and_extract(urls)
127
+
128
+ return [
129
+ datasets.SplitGenerator(
130
+ name=datasets.Split.TRAIN,
131
+ gen_kwargs={
132
+ "filepath": Path(os.path.join(data_dir, "MoNERo", "MoNERo.txt")),
133
+ "split": "train",
134
+ },
135
+ ),
136
+ ] + [
137
+ datasets.SplitGenerator(
138
+ name=split,
139
+ gen_kwargs={
140
+ "filepath": Path(os.path.join(data_dir, "MoNERo", f"MoNERo_{split}.txt")),
141
+ "split": split,
142
+ },
143
+ )
144
+ for split in ["cardiology", "endocrinology", "diabetes"]
145
+ ]
146
+
147
+ def _generate_examples(self, filepath: Path, split: str) -> Tuple[int, Dict]:
148
+ """Yields examples as (key, example) tuples."""
149
+ if self.config.schema == "source":
150
+ for key, example in self._read_example_from_file(filepath):
151
+ yield key, example
152
+
153
+ elif self.config.schema == "bigbio_kb":
154
+ for key, example in self._read_example_from_file_in_kb_schema(filepath):
155
+ yield key, example
156
+
157
+ def _read_example_from_file(self, filepath: Path) -> Tuple[str, Dict]:
158
+ """Read examples from the given file in source schema"""
159
+ with filepath.open("r", encoding="utf8") as fp:
160
+ sequences = fp.read().split("\n\n")
161
+
162
+ for i, seq in enumerate(sequences):
163
+ key = f"docid-{i}"
164
+ seq = [line.rstrip().split("\t") for line in seq.rstrip().splitlines()]
165
+
166
+ # There are few lines which only have two columns. Skipping those.
167
+ seq = [line for line in seq if len(line) == 4]
168
+ tokens, lemmas, ner_tags, pos_tags = zip(*seq)
169
+ example = {
170
+ "doc_id": key,
171
+ "tokens": tokens,
172
+ "lemmas": lemmas,
173
+ "ner_tags": ner_tags,
174
+ "pos_tags": pos_tags,
175
+ }
176
+ yield key, example
177
+
178
+ @staticmethod
179
+ def _assign_offsets(tokens: List[str]) -> List[Tuple[int, int]]:
180
+ """Compute token offsets from list of tokens"""
181
+
182
+ offsets = []
183
+ start = 0
184
+ for t in tokens:
185
+ s = start
186
+ e = s + len(t)
187
+ offsets.append((s, e))
188
+ start = e + 1 # Add one to include space.
189
+
190
+ return offsets
191
+
192
+ @staticmethod
193
+ def _extract_entities(ner_tags: List[str]) -> List[Dict]:
194
+ """Extract the entity token offsets / indices given the NER tags.
195
+
196
+ Note: The dataset contains discontinuous entities, unfortunately, in some cases it's not transparent to
197
+ which entity a part (i.e., an I-Tag without having a B-Tag before) should be linked. In this implementation
198
+ we append the entity part to previous entity of that type. If there is no previous entity we construct
199
+ a new entity from the part.
200
+ """
201
+ ner_tags = tuple(ner_tags) + ("O",)
202
+ entities = []
203
+ stack = []
204
+ is_discontinuation = False
205
+
206
+ for index, ner_tag in enumerate(ner_tags):
207
+ if stack and (ner_tag == "O" or ner_tag.startswith("B-")):
208
+ entity_type, start_index = stack[0]
209
+ entity_type, end_index = stack[-1]
210
+
211
+ if not is_discontinuation:
212
+ # Standard case - create a new entity
213
+ entities.append({"type": entity_type, "offsets": [(start_index, end_index)]})
214
+ else:
215
+ # Try to append the offsets to the previous entity of the same type
216
+ prev_entity = None
217
+ for i in range(len(entities) - 1, 0, -1):
218
+ if entities[i]["type"] == entity_type:
219
+ prev_entity = entities[i]
220
+ break
221
+
222
+ if prev_entity:
223
+ prev_entity["offsets"].append((start_index, end_index))
224
+ else:
225
+ # If can't find a previous entity - create a new one
226
+ entities.append({"type": entity_type, "offsets": [(start_index, end_index)]})
227
+
228
+ stack = []
229
+ is_discontinuation = False
230
+
231
+ if ner_tag.startswith("I-") and len(stack) == 0 and len(entities) > 0:
232
+ # The corpus contains some discontinuous entities
233
+ is_discontinuation = True
234
+
235
+ if ner_tag.startswith(("B-", "I-")):
236
+ _, entity_type = ner_tag.split("-", 1)
237
+ stack.append((entity_type, index))
238
+
239
+ return entities
240
+
241
+ def _parse_example_to_kb_schema(self, example: Dict) -> Dict[str, Any]:
242
+ """Maps a source example to BigBio kb schema"""
243
+
244
+ text = " ".join(example["tokens"])
245
+ doc_id = example["doc_id"]
246
+ passages = [
247
+ {
248
+ "id": f"{doc_id}-P0",
249
+ "type": "abstract",
250
+ "text": [text],
251
+ "offsets": [[0, len(text)]],
252
+ }
253
+ ]
254
+
255
+ offsets = self._assign_offsets(example["tokens"])
256
+ entities_with_token_indices = self._extract_entities(example["ner_tags"])
257
+
258
+ entities = []
259
+ for i, entity_type_and_token_indices in enumerate(entities_with_token_indices):
260
+ entity_texts = []
261
+ entity_offsets = []
262
+
263
+ for start_token, end_token in entity_type_and_token_indices["offsets"]:
264
+ start_offset, end_offset = offsets[start_token][0], offsets[end_token][1]
265
+ entity_offsets.append((start_offset, end_offset))
266
+ entity_texts.append(text[start_offset:end_offset])
267
+
268
+ entity = {
269
+ "id": f"{doc_id}-E{i}",
270
+ "text": entity_texts,
271
+ "offsets": entity_offsets,
272
+ "type": entity_type_and_token_indices["type"],
273
+ "normalized": [],
274
+ }
275
+ entities.append(entity)
276
+
277
+ data = {
278
+ "id": doc_id,
279
+ "document_id": doc_id,
280
+ "passages": passages,
281
+ "entities": entities,
282
+ "relations": [],
283
+ "events": [],
284
+ "coreferences": [],
285
+ }
286
+ return data
287
+
288
+ def _read_example_from_file_in_kb_schema(self, filepath: Path) -> Tuple[str, Dict]:
289
+ for key, example in self._read_example_from_file(filepath):
290
+ example = self._parse_example_to_kb_schema(example)
291
+ yield key, example