gabrielaltay commited on
Commit
2e0672f
1 Parent(s): c5692b6

upload hubscripts/n2c2_2010_hub.py to hub from bigbio repo

Browse files
Files changed (1) hide show
  1. n2c2_2010.py +609 -0
n2c2_2010.py ADDED
@@ -0,0 +1,609 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2022 The HuggingFace Datasets Authors and
3
+ #
4
+ # * Ayush Singh (singhay)
5
+ #
6
+ # Licensed under the Apache License, Version 2.0 (the "License");
7
+ # you may not use this file except in compliance with the License.
8
+ # You may obtain a copy of the License at
9
+ #
10
+ # http://www.apache.org/licenses/LICENSE-2.0
11
+ #
12
+ # Unless required by applicable law or agreed to in writing, software
13
+ # distributed under the License is distributed on an "AS IS" BASIS,
14
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
15
+ # See the License for the specific language governing permissions and
16
+ # limitations under the License.
17
+
18
+ """
19
+ A dataset loader for the n2c2 2010 relations dataset.
20
+
21
+ The dataset consists of three archive files,
22
+ ├── concept_assertion_relation_training_data.tar.gz
23
+ ├── reference_standard_for_test_data.tar.gz
24
+ └── test_data.tar.gz
25
+
26
+ The individual data files (inside the zip and tar archives) come in 4 types,
27
+
28
+ * docs (*.txt files): text of a patient record
29
+ * concepts (*.con files): entities along with offsets used as input to a named entity recognition model
30
+ * assertions (*.ast files): entities, offsets and their assertion used as input to a named entity recognition model
31
+ * relations (*.rel files): pairs of entities related by relation type used as input to a relation extraction model
32
+
33
+
34
+ The files comprising this dataset must be on the users local machine
35
+ in a single directory that is passed to `datasets.load_dataset` via
36
+ the `data_dir` kwarg. This loader script will read the archive files
37
+ directly (i.e. the user should not uncompress, untar or unzip any of
38
+ the files).
39
+
40
+ Data Access from https://portal.dbmi.hms.harvard.edu/projects/n2c2-nlp/
41
+ """
42
+
43
+ import os
44
+ import re
45
+ import tarfile
46
+ from collections import defaultdict
47
+ from dataclasses import dataclass
48
+ from typing import List, Tuple
49
+
50
+ import datasets
51
+ from datasets import Version
52
+
53
+ from .bigbiohub import kb_features
54
+ from .bigbiohub import BigBioConfig
55
+ from .bigbiohub import Tasks
56
+
57
+ _LANGUAGES = ['English']
58
+ _PUBMED = False
59
+ _LOCAL = True
60
+ _CITATION = """\
61
+ @article{DBLP:journals/jamia/UzunerSSD11,
62
+ author = {
63
+ Ozlem Uzuner and
64
+ Brett R. South and
65
+ Shuying Shen and
66
+ Scott L. DuVall
67
+ },
68
+ title = {2010 i2b2/VA challenge on concepts, assertions, and relations in clinical
69
+ text},
70
+ journal = {J. Am. Medical Informatics Assoc.},
71
+ volume = {18},
72
+ number = {5},
73
+ pages = {552--556},
74
+ year = {2011},
75
+ url = {https://doi.org/10.1136/amiajnl-2011-000203},
76
+ doi = {10.1136/amiajnl-2011-000203},
77
+ timestamp = {Mon, 11 May 2020 23:00:20 +0200},
78
+ biburl = {https://dblp.org/rec/journals/jamia/UzunerSSD11.bib},
79
+ bibsource = {dblp computer science bibliography, https://dblp.org}
80
+ }
81
+ """
82
+
83
+ _DATASETNAME = "n2c2_2010"
84
+ _DISPLAYNAME = "n2c2 2010 Concepts, Assertions, and Relations"
85
+
86
+ _DESCRIPTION = """\
87
+ The i2b2/VA corpus contained de-identified discharge summaries from Beth Israel
88
+ Deaconess Medical Center, Partners Healthcare, and University of Pittsburgh Medical
89
+ Center (UPMC). In addition, UPMC contributed de-identified progress notes to the
90
+ i2b2/VA corpus. This dataset contains the records from Beth Israel and Partners.
91
+
92
+ The 2010 i2b2/VA Workshop on Natural Language Processing Challenges for Clinical Records comprises three tasks:
93
+ 1) a concept extraction task focused on the extraction of medical concepts from patient reports;
94
+ 2) an assertion classification task focused on assigning assertion types for medical problem concepts;
95
+ 3) a relation classification task focused on assigning relation types that hold between medical problems,
96
+ tests, and treatments.
97
+
98
+ i2b2 and the VA provided an annotated reference standard corpus for the three tasks.
99
+ Using this reference standard, 22 systems were developed for concept extraction,
100
+ 21 for assertion classification, and 16 for relation classification.
101
+ """
102
+
103
+ _HOMEPAGE = "https://portal.dbmi.hms.harvard.edu/projects/n2c2-nlp/"
104
+
105
+ _LICENSE = 'Data User Agreement'
106
+
107
+ _SUPPORTED_TASKS = [Tasks.NAMED_ENTITY_RECOGNITION, Tasks.RELATION_EXTRACTION]
108
+
109
+ _SOURCE_VERSION = "1.0.0"
110
+
111
+ _BIGBIO_VERSION = "1.0.0"
112
+
113
+
114
+ def _read_tar_gz(file_path: str, samples=None):
115
+ if samples is None:
116
+ samples = defaultdict(dict)
117
+ with tarfile.open(file_path, "r:gz") as tf:
118
+
119
+ for member in tf.getmembers():
120
+ base, filename = os.path.split(member.name)
121
+ _, ext = os.path.splitext(filename)
122
+ ext = ext[1:] # get rid of dot
123
+ sample_id = filename.split(".")[0]
124
+
125
+ if ext in ["txt", "ast", "con", "rel"]:
126
+ samples[sample_id][f"{ext}_source"] = (
127
+ os.path.basename(file_path) + "|" + member.name
128
+ )
129
+
130
+ with tf.extractfile(member) as fp:
131
+ content_bytes = fp.read()
132
+
133
+ content = content_bytes.decode("utf-8")
134
+ samples[sample_id][ext] = content
135
+
136
+ return samples
137
+
138
+
139
+ C_PATTERN = r"c=\"(.+?)\" (\d+):(\d+) (\d+):(\d+)"
140
+ T_PATTERN = r"t=\"(.+?)\""
141
+ A_PATTERN = r"a=\"(.+?)\""
142
+ R_PATTERN = r"r=\"(.+?)\""
143
+
144
+ # Constants
145
+ DELIMITER = "||"
146
+ SOURCE = "source"
147
+ BIGBIO_KB = "bigbio_kb"
148
+
149
+
150
+ def _parse_con_line(line: str) -> dict:
151
+ """Parse one line from a *.con file.
152
+
153
+ A typical line has the form,
154
+ 'c="angie cm johnson , m.d." 13:2 13:6||t="person"
155
+
156
+ This represents one concept to be placed into a coreference group.
157
+ It can be interpreted as follows,
158
+ 'c="<string>" <start_line>:<start_token> <end_line>:<end_token>||t="<concept type>"'
159
+
160
+ """
161
+ c_part, t_part = line.split(DELIMITER)
162
+ c_match, t_match = re.match(C_PATTERN, c_part), re.match(T_PATTERN, t_part)
163
+ return {
164
+ "text": c_match.group(1),
165
+ "start_line": int(c_match.group(2)),
166
+ "start_token": int(c_match.group(3)),
167
+ "end_line": int(c_match.group(4)),
168
+ "end_token": int(c_match.group(5)),
169
+ "concept": t_match.group(1),
170
+ }
171
+
172
+
173
+ def _parse_rel_line(line: str) -> dict:
174
+ """Parse one line from a *.rel file.
175
+
176
+ A typical line has the form,
177
+ 'c="coronary artery bypass graft" 115:4 115:7||r="TrAP"||c="coronary artery disease" 115:0 115:2'
178
+
179
+ This represents two concepts related to one another.
180
+ It can be interpreted as follows,
181
+ 'c="<string>" <start_line>:<start_token> <end_line>:<end_token>||r="<type>"||c="<string>"
182
+ <start_line>:<start_token> <end_line>:<end_token>'
183
+
184
+ """
185
+ c1_part, r_part, c2_part = line.split(DELIMITER)
186
+ c1_match, r_match, c2_match = (
187
+ re.match(C_PATTERN, c1_part),
188
+ re.match(R_PATTERN, r_part),
189
+ re.match(C_PATTERN, c2_part),
190
+ )
191
+ return {
192
+ "concept_1": {
193
+ "text": c1_match.group(1),
194
+ "start_line": int(c1_match.group(2)),
195
+ "start_token": int(c1_match.group(3)),
196
+ "end_line": int(c1_match.group(4)),
197
+ "end_token": int(c1_match.group(5)),
198
+ },
199
+ "concept_2": {
200
+ "text": c2_match.group(1),
201
+ "start_line": int(c2_match.group(2)),
202
+ "start_token": int(c2_match.group(3)),
203
+ "end_line": int(c2_match.group(4)),
204
+ "end_token": int(c2_match.group(5)),
205
+ },
206
+ "relation": r_match.group(1),
207
+ }
208
+
209
+
210
+ def _parse_ast_line(line: str) -> dict:
211
+ """Parse one line from a *.ast file.
212
+
213
+ A typical line has the form,
214
+ 'c="mild inferior wall hypokinesis" 42:2 42:5||t="problem"||a="present"'
215
+
216
+ This represents one concept along with it's assertion.
217
+ It can be interpreted as follows,
218
+ 'c="<string>" <start_line>:<start_token> <end_line>:<end_token>||t="<concept type>"||a="<assertion type>"'
219
+
220
+ """
221
+ c_part, t_part, a_part = line.split(DELIMITER)
222
+ c_match, t_match, a_match = (
223
+ re.match(C_PATTERN, c_part),
224
+ re.match(T_PATTERN, t_part),
225
+ re.match(A_PATTERN, a_part),
226
+ )
227
+ return {
228
+ "text": c_match.group(1),
229
+ "start_line": int(c_match.group(2)),
230
+ "start_token": int(c_match.group(3)),
231
+ "end_line": int(c_match.group(4)),
232
+ "end_token": int(c_match.group(5)),
233
+ "concept": t_match.group(1),
234
+ "assertion": a_match.group(1),
235
+ }
236
+
237
+
238
+ def _tokoff_from_line(text: str) -> List[Tuple[int, int]]:
239
+ """Produce character offsets for each token (whitespace split)
240
+
241
+ For example,
242
+ text = " one two three ."
243
+ tokoff = [(1,4), (6,9), (10,15), (16,17)]
244
+ """
245
+ tokoff = []
246
+ start = None
247
+ end = None
248
+ for ii, char in enumerate(text):
249
+ if char != " " and start is None:
250
+ start = ii
251
+ if char == " " and start is not None:
252
+ end = ii
253
+ tokoff.append((start, end))
254
+ start = None
255
+ if start is not None:
256
+ end = ii + 1
257
+ tokoff.append((start, end))
258
+ return tokoff
259
+
260
+
261
+ def _form_entity_id(sample_id, split, start_line, start_token, end_line, end_token):
262
+ return "{}-entity-{}-{}-{}-{}-{}".format(
263
+ sample_id,
264
+ split,
265
+ start_line,
266
+ start_token,
267
+ end_line,
268
+ end_token,
269
+ )
270
+
271
+
272
+ def _get_relations_from_sample(sample_id, sample, split):
273
+ rel_lines = sample["rel"].splitlines()
274
+
275
+ relations = []
276
+ for i, rel_line in enumerate(rel_lines):
277
+ a = {}
278
+ rel = _parse_rel_line(rel_line)
279
+ a["arg1_id"] = _form_entity_id(
280
+ sample_id,
281
+ split,
282
+ rel["concept_1"]["start_line"],
283
+ rel["concept_1"]["start_token"],
284
+ rel["concept_1"]["end_line"],
285
+ rel["concept_1"]["end_token"],
286
+ )
287
+ a["arg2_id"] = _form_entity_id(
288
+ sample_id,
289
+ split,
290
+ rel["concept_2"]["start_line"],
291
+ rel["concept_2"]["start_token"],
292
+ rel["concept_2"]["end_line"],
293
+ rel["concept_2"]["end_token"],
294
+ )
295
+ a["id"] = (
296
+ sample_id + "_" + a["arg1_id"] + "_" + rel["relation"] + "_" + a["arg2_id"]
297
+ )
298
+ a["normalized"] = []
299
+ a["type"] = rel["relation"]
300
+ relations.append(a)
301
+
302
+ return relations
303
+
304
+
305
+ def _get_entities_from_sample(sample_id, sample, split):
306
+ """Parse the lines of a *.con concept file into entity objects"""
307
+ con_lines = sample["con"].splitlines()
308
+
309
+ text = sample["txt"]
310
+ text_lines = text.splitlines()
311
+ text_line_lengths = [len(el) for el in text_lines]
312
+
313
+ # parsed concepts (sort is just a convenience)
314
+ con_parsed = sorted(
315
+ [_parse_con_line(line) for line in con_lines],
316
+ key=lambda x: (x["start_line"], x["start_token"]),
317
+ )
318
+
319
+ entities = []
320
+ for ii_cp, cp in enumerate(con_parsed):
321
+
322
+ # annotations can span multiple lines
323
+ # we loop over all lines and build up the character offsets
324
+ for ii_line in range(cp["start_line"], cp["end_line"] + 1):
325
+
326
+ # character offset to the beginning of the line
327
+ # line length of each line + 1 new line character for each line
328
+ start_line_off = sum(text_line_lengths[: ii_line - 1]) + (ii_line - 1)
329
+
330
+ # offsets for each token relative to the beginning of the line
331
+ # "one two" -> [(0,3), (4,6)]
332
+ tokoff = _tokoff_from_line(text_lines[ii_line - 1])
333
+
334
+ # if this is a single line annotation
335
+ if ii_line == cp["start_line"] == cp["end_line"]:
336
+ start_off = start_line_off + tokoff[cp["start_token"]][0]
337
+ end_off = start_line_off + tokoff[cp["end_token"]][1]
338
+
339
+ # if multi-line and on first line
340
+ # end_off gets a +1 for new line character
341
+ elif (ii_line == cp["start_line"]) and (ii_line != cp["end_line"]):
342
+ start_off = start_line_off + tokoff[cp["start_token"]][0]
343
+ end_off = start_line_off + text_line_lengths[ii_line - 1] + 1
344
+
345
+ # if multi-line and on last line
346
+ elif (ii_line != cp["start_line"]) and (ii_line == cp["end_line"]):
347
+ end_off = end_off + tokoff[cp["end_token"]][1]
348
+
349
+ # if mult-line and not on first or last line
350
+ # (this does not seem to occur in this corpus)
351
+ else:
352
+ end_off += text_line_lengths[ii_line - 1] + 1
353
+
354
+ text_slice = text[start_off:end_off]
355
+ text_slice_norm_1 = text_slice.replace("\n", "").lower()
356
+ text_slice_norm_2 = text_slice.replace("\n", " ").lower()
357
+ match = text_slice_norm_1 == cp["text"] or text_slice_norm_2 == cp["text"]
358
+ if not match:
359
+ continue
360
+
361
+ entity_id = _form_entity_id(
362
+ sample_id,
363
+ split,
364
+ cp["start_line"],
365
+ cp["start_token"],
366
+ cp["end_line"],
367
+ cp["end_token"],
368
+ )
369
+ entity = {
370
+ "id": entity_id,
371
+ "offsets": [(start_off, end_off)],
372
+ # this is the difference between taking text from the entity
373
+ # or taking the text from the offsets. the differences are
374
+ # almost all casing with some small number of new line characters
375
+ # making up the rest
376
+ # "text": [cp["text"]],
377
+ "text": [text_slice],
378
+ "type": cp["concept"],
379
+ "normalized": [],
380
+ }
381
+ entities.append(entity)
382
+
383
+ # IDs are constructed such that duplicate IDs indicate duplicate (i.e. redundant) entities
384
+ # In practive this removes one duplicate sample from the test set
385
+ # {
386
+ # 'id': 'clinical-627-entity-test-122-9-122-9',
387
+ # 'offsets': [(5600, 5603)],
388
+ # 'text': ['her'],
389
+ # 'type': 'person'
390
+ # }
391
+ dedupe_entities = []
392
+ dedupe_entity_ids = set()
393
+ for entity in entities:
394
+ if entity["id"] in dedupe_entity_ids:
395
+ continue
396
+ else:
397
+ dedupe_entity_ids.add(entity["id"])
398
+ dedupe_entities.append(entity)
399
+
400
+ return dedupe_entities
401
+
402
+
403
+ class N2C22010RelationsDataset(datasets.GeneratorBasedBuilder):
404
+ """i2b2 2010 task comprising concept, assertion and relation extraction"""
405
+
406
+ SOURCE_VERSION = datasets.Version(_SOURCE_VERSION)
407
+ BIGBIO_VERSION = datasets.Version(_BIGBIO_VERSION)
408
+
409
+ # You will be able to load the "source" or "bigbio" configurations with
410
+ # ds_source = datasets.load_dataset('my_dataset', name='source')
411
+ # ds_bigbio = datasets.load_dataset('my_dataset', name='bigbio')
412
+
413
+ # For local datasets you can make use of the `data_dir` and `data_files` kwargs
414
+ # https://huggingface.co/docs/datasets/add_dataset.html#downloading-data-files-and-organizing-splits
415
+ # ds_source = datasets.load_dataset('my_dataset', name='source', data_dir="/path/to/data/files")
416
+ # ds_bigbio = datasets.load_dataset('my_dataset', name='bigbio', data_dir="/path/to/data/files")
417
+
418
+ _SOURCE_CONFIG_NAME = _DATASETNAME + "_" + SOURCE
419
+ _BIGBIO_CONFIG_NAME = _DATASETNAME + "_" + BIGBIO_KB
420
+
421
+ BUILDER_CONFIGS = [
422
+ BigBioConfig(
423
+ name=_SOURCE_CONFIG_NAME,
424
+ version=SOURCE_VERSION,
425
+ description=_DATASETNAME + " source schema",
426
+ schema=SOURCE,
427
+ subset_id=_DATASETNAME,
428
+ ),
429
+ BigBioConfig(
430
+ name=_BIGBIO_CONFIG_NAME,
431
+ version=BIGBIO_VERSION,
432
+ description=_DATASETNAME + " BigBio schema",
433
+ schema=BIGBIO_KB,
434
+ subset_id=_DATASETNAME,
435
+ ),
436
+ ]
437
+
438
+ DEFAULT_CONFIG_NAME = _SOURCE_CONFIG_NAME
439
+
440
+ def _info(self) -> datasets.DatasetInfo:
441
+
442
+ if self.config.schema == SOURCE:
443
+ features = datasets.Features(
444
+ {
445
+ "doc_id": datasets.Value("string"),
446
+ "text": datasets.Value("string"),
447
+ "concepts": [
448
+ {
449
+ "start_line": datasets.Value("int64"),
450
+ "start_token": datasets.Value("int64"),
451
+ "end_line": datasets.Value("int64"),
452
+ "end_token": datasets.Value("int64"),
453
+ "text": datasets.Value("string"),
454
+ "concept": datasets.Value("string"),
455
+ }
456
+ ],
457
+ "assertions": [
458
+ {
459
+ "start_line": datasets.Value("int64"),
460
+ "start_token": datasets.Value("int64"),
461
+ "end_line": datasets.Value("int64"),
462
+ "end_token": datasets.Value("int64"),
463
+ "text": datasets.Value("string"),
464
+ "concept": datasets.Value("string"),
465
+ "assertion": datasets.Value("string"),
466
+ }
467
+ ],
468
+ "relations": [
469
+ {
470
+ "concept_1": {
471
+ "text": datasets.Value("string"),
472
+ "start_line": datasets.Value("int64"),
473
+ "start_token": datasets.Value("int64"),
474
+ "end_line": datasets.Value("int64"),
475
+ "end_token": datasets.Value("int64"),
476
+ },
477
+ "concept_2": {
478
+ "text": datasets.Value("string"),
479
+ "start_line": datasets.Value("int64"),
480
+ "start_token": datasets.Value("int64"),
481
+ "end_line": datasets.Value("int64"),
482
+ "end_token": datasets.Value("int64"),
483
+ },
484
+ "relation": datasets.Value("string"),
485
+ }
486
+ ],
487
+ "unannotated": [
488
+ {
489
+ "text": datasets.Value("string"),
490
+ }
491
+ ],
492
+ "metadata": {
493
+ "txt_source": datasets.Value("string"),
494
+ "con_source": datasets.Value("string"),
495
+ "ast_source": datasets.Value("string"),
496
+ "rel_source": datasets.Value("string"),
497
+ "unannotated_source": datasets.Value("string"),
498
+ },
499
+ }
500
+ )
501
+
502
+ elif self.config.schema == BIGBIO_KB:
503
+ features = kb_features
504
+
505
+ return datasets.DatasetInfo(
506
+ description=_DESCRIPTION,
507
+ features=features,
508
+ homepage=_HOMEPAGE,
509
+ license=str(_LICENSE),
510
+ citation=_CITATION,
511
+ )
512
+
513
+ def _split_generators(self, dl_manager) -> List[datasets.SplitGenerator]:
514
+
515
+ if self.config.data_dir is None or self.config.name is None:
516
+ raise ValueError(
517
+ "This is a local dataset. Please pass the data_dir and name kwarg to load_dataset."
518
+ )
519
+ else:
520
+ data_dir = self.config.data_dir
521
+
522
+ return [
523
+ datasets.SplitGenerator(
524
+ name=datasets.Split.TRAIN,
525
+ # Whatever you put in gen_kwargs will be passed to _generate_examples
526
+ gen_kwargs={
527
+ "data_dir": data_dir,
528
+ "split": str(datasets.Split.TRAIN),
529
+ },
530
+ ),
531
+ datasets.SplitGenerator(
532
+ name=datasets.Split.TEST,
533
+ gen_kwargs={
534
+ "data_dir": data_dir,
535
+ "split": str(datasets.Split.TEST),
536
+ },
537
+ ),
538
+ ]
539
+
540
+ @staticmethod
541
+ def _get_source_sample(sample_id, sample):
542
+ return {
543
+ "doc_id": sample_id,
544
+ "text": sample.get("txt", ""),
545
+ "concepts": list(map(_parse_con_line, sample.get("con", "").splitlines())),
546
+ "assertions": list(
547
+ map(_parse_ast_line, sample.get("ast", "").splitlines())
548
+ ),
549
+ "relations": list(map(_parse_rel_line, sample.get("rel", "").splitlines())),
550
+ "unannotated": sample.get("unannotated", ""),
551
+ "metadata": {
552
+ "txt_source": sample.get("txt_source", ""),
553
+ "con_source": sample.get("con_source", ""),
554
+ "ast_source": sample.get("ast_source", ""),
555
+ "rel_source": sample.get("rel_source", ""),
556
+ "unannotated_source": sample.get("unannotated_source", ""),
557
+ },
558
+ }
559
+
560
+ @staticmethod
561
+ def _get_bigbio_sample(sample_id, sample, split) -> dict:
562
+
563
+ passage_text = sample.get("txt", "")
564
+ entities = _get_entities_from_sample(sample_id, sample, split)
565
+ relations = _get_relations_from_sample(sample_id, sample, split)
566
+ return {
567
+ "id": sample_id,
568
+ "document_id": sample_id,
569
+ "passages": [
570
+ {
571
+ "id": f"{sample_id}-passage-0",
572
+ "type": "discharge summary",
573
+ "text": [passage_text],
574
+ "offsets": [(0, len(passage_text))],
575
+ }
576
+ ],
577
+ "entities": entities,
578
+ "relations": relations,
579
+ "events": [],
580
+ "coreferences": [],
581
+ }
582
+
583
+ def _generate_examples(self, data_dir, split):
584
+ if split == "train":
585
+ samples = _read_tar_gz(
586
+ os.path.join(
587
+ data_dir, "concept_assertion_relation_training_data.tar.gz"
588
+ )
589
+ )
590
+ elif split == "test":
591
+ # This file adds con, ast and rel
592
+ samples = _read_tar_gz(
593
+ os.path.join(data_dir, "reference_standard_for_test_data.tar.gz")
594
+ )
595
+ # This file adds txt to already existing samples
596
+ samples = _read_tar_gz(os.path.join(data_dir, "test_data.tar.gz"), samples)
597
+
598
+ _id = 0
599
+
600
+ for sample_id, sample in samples.items():
601
+
602
+ if self.config.name == N2C22010RelationsDataset._SOURCE_CONFIG_NAME:
603
+ yield _id, self._get_source_sample(sample_id, sample)
604
+ elif self.config.name == N2C22010RelationsDataset._BIGBIO_CONFIG_NAME:
605
+ # This is to make sure unannotated data does not end up in big bio
606
+ if "unannotated" not in sample["txt_source"]:
607
+ yield _id, self._get_bigbio_sample(sample_id, sample, split)
608
+
609
+ _id += 1