Datasets:

Languages:
English
License:
gabrielaltay commited on
Commit
e1512bf
1 Parent(s): 75e2e38

upload hubscripts/n2c2_2018_track1_hub.py to hub from bigbio repo

Browse files
Files changed (1) hide show
  1. n2c2_2018_track1.py +298 -0
n2c2_2018_track1.py ADDED
@@ -0,0 +1,298 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2022 The HuggingFace Datasets Authors and the current dataset script contributor.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+
16
+ """
17
+ A dataset loader for the n2c2 2018 cohort selection dataset.
18
+
19
+ The dataset consists of three archive files,
20
+ ├── train.zip - 202 records
21
+ └── n2c2-t1_gold_standard_test_data.zip - 86 records
22
+
23
+ The individual data files (inside the zip and tar archives) come in
24
+ xml files that contains text as well as labels.
25
+
26
+
27
+ The files comprising this dataset must be on the users local machine
28
+ in a single directory that is passed to `datasets.load_dataset` via
29
+ the `data_dir` kwarg. This loader script will read the archive files
30
+ directly (i.e. the user should not uncompress, untar or unzip any of
31
+ the files).
32
+
33
+ Data Access from https://portal.dbmi.hms.harvard.edu/projects/n2c2-nlp/
34
+ """
35
+
36
+ import os
37
+ import zipfile
38
+ from collections import defaultdict
39
+ from typing import List
40
+
41
+ import datasets
42
+ from lxml import etree
43
+
44
+ from .bigbiohub import text.features
45
+ from .bigbiohub import BigBioConfig
46
+ from .bigbiohub import Tasks
47
+
48
+ _LANGUAGES = ['English']
49
+ _PUBMED = False
50
+ _LOCAL = True
51
+ _CITATION = """\
52
+ @article{DBLP:journals/jamia/StubbsFSHU19,
53
+ author = {
54
+ Amber Stubbs and
55
+ Michele Filannino and
56
+ Ergin Soysal and
57
+ Samuel Henry and
58
+ Ozlem Uzuner
59
+ },
60
+ title = {Cohort selection for clinical trials: n2c2 2018 shared task track 1},
61
+ journal = {J. Am. Medical Informatics Assoc.},
62
+ volume = {26},
63
+ number = {11},
64
+ pages = {1163--1171},
65
+ year = {2019},
66
+ url = {https://doi.org/10.1093/jamia/ocz163},
67
+ doi = {10.1093/jamia/ocz163},
68
+ timestamp = {Mon, 15 Jun 2020 16:56:11 +0200},
69
+ biburl = {https://dblp.org/rec/journals/jamia/StubbsFSHU19.bib},
70
+ bibsource = {dblp computer science bibliography, https://dblp.org}
71
+ }
72
+ """
73
+
74
+ _DATASETNAME = "n2c2_2018_track1"
75
+ _DISPLAYNAME = "n2c2 2018 Selection Criteria"
76
+
77
+ _DESCRIPTION = """\
78
+ Track 1 of the 2018 National NLP Clinical Challenges shared tasks focused
79
+ on identifying which patients in a corpus of longitudinal medical records
80
+ meet and do not meet identified selection criteria.
81
+
82
+ This shared task aimed to determine whether NLP systems could be trained to identify if patients met or did not meet
83
+ a set of selection criteria taken from real clinical trials. The selected criteria required measurement detection (
84
+ “Any HbA1c value between 6.5 and 9.5%”), inference (“Use of aspirin to prevent myocardial infarction”),
85
+ temporal reasoning (“Diagnosis of ketoacidosis in the past year”), and expert judgment to assess (“Major
86
+ diabetes-related complication”). For the corpus, we used the dataset of American English, longitudinal clinical
87
+ narratives from the 2014 i2b2/UTHealth shared task 4.
88
+
89
+ The final selected 13 selection criteria are as follows:
90
+ 1. DRUG-ABUSE: Drug abuse, current or past
91
+ 2. ALCOHOL-ABUSE: Current alcohol use over weekly recommended limits
92
+ 3. ENGLISH: Patient must speak English
93
+ 4. MAKES-DECISIONS: Patient must make their own medical decisions
94
+ 5. ABDOMINAL: History of intra-abdominal surgery, small or large intestine
95
+ resection, or small bowel obstruction.
96
+ 6. MAJOR-DIABETES: Major diabetes-related complication. For the purposes of
97
+ this annotation, we define “major complication” (as opposed to “minor complication”)
98
+ as any of the following that are a result of (or strongly correlated with) uncontrolled diabetes:
99
+ a. Amputation
100
+ b. Kidney damage
101
+ c. Skin conditions
102
+ d. Retinopathy
103
+ e. nephropathy
104
+ f. neuropathy
105
+ 7. ADVANCED-CAD: Advanced cardiovascular disease (CAD).
106
+ For the purposes of this annotation, we define “advanced” as having 2 or more of the following:
107
+ a. Taking 2 or more medications to treat CAD
108
+ b. History of myocardial infarction (MI)
109
+ c. Currently experiencing angina
110
+ d. Ischemia, past or present
111
+ 8. MI-6MOS: MI in the past 6 months
112
+ 9. KETO-1YR: Diagnosis of ketoacidosis in the past year
113
+ 10. DIETSUPP-2MOS: Taken a dietary supplement (excluding vitamin D) in the past 2 months
114
+ 11. ASP-FOR-MI: Use of aspirin to prevent MI
115
+ 12. HBA1C: Any hemoglobin A1c (HbA1c) value between 6.5% and 9.5%
116
+ 13. CREATININE: Serum creatinine > upper limit of normal
117
+
118
+ The training consists of 202 patient records with document-level annotations, 10 records
119
+ with textual spans indicating annotator’s evidence for their annotations while test set contains 86.
120
+
121
+ Note:
122
+ * The inter-annotator average agreement is 84.9%
123
+ * Whereabouts of 10 records with textual spans indicating annotator’s evidence are unknown.
124
+ However, author did a simple script based validation to check if any of the tags contained any text
125
+ in any of the training set and they do not, which confirms that atleast train and test do not
126
+ have any evidence tagged alongside corresponding tags.
127
+ """
128
+
129
+ _HOMEPAGE = "https://portal.dbmi.hms.harvard.edu/projects/n2c2-nlp/"
130
+
131
+ _LICENSE = 'Data User Agreement'
132
+
133
+ _SUPPORTED_TASKS = [Tasks.TEXT_CLASSIFICATION]
134
+
135
+ _SOURCE_VERSION = "1.0.0"
136
+
137
+ _BIGBIO_VERSION = "1.0.0"
138
+
139
+ # Constants
140
+ SOURCE = "source"
141
+ BIGBIO_TEXT = "bigbio_text"
142
+
143
+
144
+ def _read_zip(file_path):
145
+ samples = defaultdict(dict)
146
+ with zipfile.ZipFile(file_path) as zf:
147
+ for info in zf.infolist():
148
+
149
+ base, filename = os.path.split(info.filename)
150
+ _, ext = os.path.splitext(filename)
151
+ ext = ext[1:] # get rid of dot
152
+ sample_id = filename.split(".")[0]
153
+
154
+ if ext == "xml" and not filename.startswith("."):
155
+ content = zf.read(info).decode("utf-8").encode()
156
+ root = etree.XML(content)
157
+ text, tags = root.getchildren()
158
+ samples[sample_id]["txt"] = text.text
159
+ samples[sample_id]["tags"] = {}
160
+ for child in tags:
161
+ samples[sample_id]["tags"][child.tag] = child.get("met")
162
+
163
+ return samples
164
+
165
+
166
+ class N2C22018CohortSelectionDataset(datasets.GeneratorBasedBuilder):
167
+ """i2b2 2018 track 1 cohort selection task"""
168
+
169
+ SOURCE_VERSION = datasets.Version(_SOURCE_VERSION)
170
+ BIGBIO_VERSION = datasets.Version(_BIGBIO_VERSION)
171
+
172
+ _SOURCE_CONFIG_NAME = _DATASETNAME + "_" + SOURCE
173
+ _BIGBIO_CONFIG_NAME = _DATASETNAME + "_" + BIGBIO_TEXT
174
+
175
+ BUILDER_CONFIGS = [
176
+ BigBioConfig(
177
+ name=_SOURCE_CONFIG_NAME,
178
+ version=SOURCE_VERSION,
179
+ description=_DATASETNAME + " source schema",
180
+ schema=SOURCE,
181
+ subset_id=_DATASETNAME,
182
+ ),
183
+ BigBioConfig(
184
+ name=_BIGBIO_CONFIG_NAME,
185
+ version=BIGBIO_VERSION,
186
+ description=_DATASETNAME + " BigBio schema",
187
+ schema=BIGBIO_TEXT,
188
+ subset_id=_DATASETNAME,
189
+ ),
190
+ ]
191
+
192
+ DEFAULT_CONFIG_NAME = _SOURCE_CONFIG_NAME
193
+ LABEL_CLASS_NAMES = [
194
+ "ABDOMINAL",
195
+ "ADVANCED-CAD",
196
+ "ALCOHOL-ABUSE",
197
+ "ASP-FOR-MI",
198
+ "CREATININE",
199
+ "DIETSUPP-2MOS",
200
+ "DRUG-ABUSE",
201
+ "ENGLISH",
202
+ "HBA1C",
203
+ "KETO-1YR",
204
+ "MAJOR-DIABETES",
205
+ "MAKES-DECISIONS",
206
+ "MI-6MOS",
207
+ ]
208
+
209
+ def _info(self) -> datasets.DatasetInfo:
210
+
211
+ if self.config.schema == SOURCE:
212
+ labels = {
213
+ key: datasets.ClassLabel(names=["met", "not met"])
214
+ for key in self.LABEL_CLASS_NAMES
215
+ }
216
+ features = datasets.Features(
217
+ {
218
+ "id": datasets.Value("string"),
219
+ "document_id": datasets.Value("string"),
220
+ "text": datasets.Value("string"),
221
+ "tags": labels,
222
+ }
223
+ )
224
+
225
+ elif self.config.schema == BIGBIO_TEXT:
226
+ features = text.features
227
+
228
+ return datasets.DatasetInfo(
229
+ description=_DESCRIPTION,
230
+ features=features,
231
+ homepage=_HOMEPAGE,
232
+ license=str(_LICENSE),
233
+ citation=_CITATION,
234
+ )
235
+
236
+ def _split_generators(self, dl_manager) -> List[datasets.SplitGenerator]:
237
+
238
+ if self.config.data_dir is None or self.config.name is None:
239
+ raise ValueError(
240
+ "This is a local dataset. Please pass the data_dir and name kwarg to load_dataset."
241
+ )
242
+ else:
243
+ data_dir = self.config.data_dir
244
+
245
+ return [
246
+ datasets.SplitGenerator(
247
+ name=datasets.Split.TRAIN,
248
+ gen_kwargs={
249
+ "file_path": os.path.join(data_dir, "train.zip"),
250
+ },
251
+ ),
252
+ datasets.SplitGenerator(
253
+ name=datasets.Split.TEST,
254
+ gen_kwargs={
255
+ "file_path": os.path.join(
256
+ data_dir, "n2c2-t1_gold_standard_test_data.zip"
257
+ ),
258
+ },
259
+ ),
260
+ ]
261
+
262
+ @staticmethod
263
+ def _get_source_sample(sample_id, sample):
264
+ return {
265
+ "id": sample_id,
266
+ "document_id": sample_id,
267
+ "text": sample.get("txt", ""),
268
+ "tags": sample.get("tags", {}),
269
+ }
270
+
271
+ @staticmethod
272
+ def _get_bigbio_sample(sample_id, sample) -> dict:
273
+
274
+ tags = sample.get("tags", None)
275
+ if tags:
276
+ labels = [name for name, met_status in tags.items() if met_status == "met"]
277
+ else:
278
+ labels = []
279
+
280
+ return {
281
+ "id": sample_id,
282
+ "document_id": sample_id,
283
+ "text": sample.get("txt", ""),
284
+ "labels": labels,
285
+ }
286
+
287
+ def _generate_examples(self, file_path):
288
+ samples = _read_zip(file_path)
289
+
290
+ _id = 0
291
+ for sample_id, sample in samples.items():
292
+
293
+ if self.config.name == self._SOURCE_CONFIG_NAME:
294
+ yield _id, self._get_source_sample(sample_id, sample)
295
+ elif self.config.name == self._BIGBIO_CONFIG_NAME:
296
+ yield _id, self._get_bigbio_sample(sample_id, sample)
297
+
298
+ _id += 1