thbndi commited on
Commit
e85f125
1 Parent(s): d17168a

Upload day_intervals_cohort_v22.py

Browse files
Files changed (1) hide show
  1. day_intervals_cohort_v22.py +392 -0
day_intervals_cohort_v22.py ADDED
@@ -0,0 +1,392 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import datetime
2
+ import os
3
+ import sys
4
+ import numpy as np
5
+ import pandas as pd
6
+ from pathlib import Path
7
+ from tqdm import tqdm
8
+ import importlib
9
+ import disease_cohort
10
+ importlib.reload(disease_cohort)
11
+ import disease_cohort
12
+ sys.path.append(os.path.dirname(os.path.abspath(__file__)) + './../..')
13
+ if not os.path.exists("./data/cohort"):
14
+ os.makedirs("./data/cohort")
15
+
16
+ def get_visit_pts(mimic4_path:str, group_col:str, visit_col:str, admit_col:str, disch_col:str, adm_visit_col:str, use_mort:bool, use_los:bool, los:int, use_admn:bool, disease_label:str,use_ICU:bool):
17
+ """Combines the MIMIC-IV core/patients table information with either the icu/icustays or core/admissions data.
18
+
19
+ Parameters:
20
+ mimic4_path: path to mimic-iv folder containing MIMIC-IV data
21
+ group_col: patient identifier to group patients (normally subject_id)
22
+ visit_col: visit identifier for individual patient visits (normally hadm_id or stay_id)
23
+ admit_col: column for visit start date information (normally admittime or intime)
24
+ disch_col: column for visit end date information (normally dischtime or outtime)
25
+ use_ICU: describes whether to speficially look at ICU visits in icu/icustays OR look at general admissions from core/admissions
26
+ """
27
+
28
+ visit = None # df containing visit information depending on using ICU or not
29
+ if use_ICU:
30
+ visit = pd.read_csv(mimic4_path + "icu/icustays.csv.gz", compression='gzip', header=0, index_col=None, parse_dates=[admit_col, disch_col])
31
+ if use_admn:
32
+ # icustays doesn't have a way to identify if patient died during visit; must
33
+ # use core/patients to remove such stay_ids for readmission labels
34
+ pts = pd.read_csv(mimic4_path + "hosp/patients.csv.gz", compression='gzip', header=0, index_col=None, usecols=['subject_id', 'dod'], parse_dates=['dod'])
35
+ visit = visit.merge(pts, how='inner', left_on='subject_id', right_on='subject_id')
36
+ visit = visit.loc[(visit.dod.isna()) | (visit.dod >= visit[disch_col])]
37
+ if len(disease_label):
38
+ hids=disease_cohort.extract_diag_cohort(visit['hadm_id'],disease_label,mimic4_path)
39
+ visit=visit[visit['hadm_id'].isin(hids['hadm_id'])]
40
+ print("[ READMISSION DUE TO "+disease_label+" ]")
41
+
42
+ else:
43
+ visit = pd.read_csv(mimic4_path + "hosp/admissions.csv.gz", compression='gzip', header=0, index_col=None, parse_dates=[admit_col, disch_col])
44
+ visit['los']=visit[disch_col]-visit[admit_col]
45
+
46
+ visit[admit_col] = pd.to_datetime(visit[admit_col])
47
+ visit[disch_col] = pd.to_datetime(visit[disch_col])
48
+ visit['los']=pd.to_timedelta(visit[disch_col]-visit[admit_col],unit='h')
49
+ visit['los']=visit['los'].astype(str)
50
+ visit[['days', 'dummy','hours']] = visit['los'].str.split(' ', -1, expand=True)
51
+ visit['los']=pd.to_numeric(visit['days'])
52
+ visit=visit.drop(columns=['days', 'dummy','hours'])
53
+
54
+
55
+ if use_admn:
56
+ # remove hospitalizations with a death; impossible for readmission for such visits
57
+ visit = visit.loc[visit.hospital_expire_flag == 0]
58
+ if len(disease_label):
59
+ hids=disease_cohort.extract_diag_cohort(visit['hadm_id'],disease_label,mimic4_path)
60
+ visit=visit[visit['hadm_id'].isin(hids['hadm_id'])]
61
+ print("[ READMISSION DUE TO "+disease_label+" ]")
62
+
63
+ pts = pd.read_csv(
64
+ mimic4_path + "hosp/patients.csv.gz", compression='gzip', header=0, index_col = None, usecols=[group_col, 'anchor_year', 'anchor_age', 'anchor_year_group', 'dod','gender']
65
+ )
66
+ pts['yob']= pts['anchor_year'] - pts['anchor_age'] # get yob to ensure a given visit is from an adult
67
+ pts['min_valid_year'] = pts['anchor_year'] + (2019 - pts['anchor_year_group'].str.slice(start=-4).astype(int))
68
+
69
+ # Define anchor_year corresponding to the anchor_year_group 2017-2019. This is later used to prevent consideration
70
+ # of visits with prediction windows outside the dataset's time range (2008-2019)
71
+ #[[group_col, visit_col, admit_col, disch_col]]
72
+ if use_ICU:
73
+ visit_pts = visit[[group_col, visit_col, adm_visit_col, admit_col, disch_col,'los']].merge(
74
+ pts[[group_col, 'anchor_year', 'anchor_age', 'yob', 'min_valid_year', 'dod','gender']], how='inner', left_on=group_col, right_on=group_col
75
+ )
76
+ else:
77
+ visit_pts = visit[[group_col, visit_col, admit_col, disch_col,'los']].merge(
78
+ pts[[group_col, 'anchor_year', 'anchor_age', 'yob', 'min_valid_year', 'dod','gender']], how='inner', left_on=group_col, right_on=group_col
79
+ )
80
+
81
+ # only take adult patients
82
+ # visit_pts['Age']=visit_pts[admit_col].dt.year - visit_pts['yob']
83
+ # visit_pts = visit_pts.loc[visit_pts['Age'] >= 18]
84
+ visit_pts['Age']=visit_pts['anchor_age']
85
+ visit_pts = visit_pts.loc[visit_pts['Age'] >= 18]
86
+
87
+ ##Add Demo data
88
+ eth = pd.read_csv(mimic4_path + "hosp/admissions.csv.gz", compression='gzip', header=0, usecols=['hadm_id', 'insurance','race'], index_col=None)
89
+ visit_pts= visit_pts.merge(eth, how='inner', left_on='hadm_id', right_on='hadm_id')
90
+
91
+ if use_ICU:
92
+ return visit_pts[[group_col, visit_col, adm_visit_col, admit_col, disch_col,'los', 'min_valid_year', 'dod','Age','gender','race', 'insurance']]
93
+ else:
94
+ return visit_pts.dropna(subset=['min_valid_year'])[[group_col, visit_col, admit_col, disch_col,'los', 'min_valid_year', 'dod','Age','gender','race', 'insurance']]
95
+
96
+
97
+ def validate_row(row, ctrl, invalid, max_year, disch_col, valid_col, gap):
98
+ """Checks if visit's prediction window potentially extends beyond the dataset range (2008-2019).
99
+ An 'invalid row' is NOT guaranteed to be outside the range, only potentially outside due to
100
+ de-identification of MIMIC-IV being done through 3-year time ranges.
101
+
102
+ To be invalid, the end of the prediction window's year must both extend beyond the maximum seen year
103
+ for a patient AND beyond the year that corresponds to the 2017-2019 anchor year range for a patient"""
104
+ print("disch_col",row[disch_col])
105
+ print(gap)
106
+ pred_year = (row[disch_col] + gap).year
107
+ if max_year < pred_year and pred_year > row[valid_col]:
108
+ invalid = invalid.append(row)
109
+ else:
110
+ ctrl = ctrl.append(row)
111
+ return ctrl, invalid
112
+
113
+
114
+ def partition_by_los(df:pd.DataFrame, los:int, group_col:str, visit_col:str, admit_col:str, disch_col:str, valid_col:str):
115
+
116
+ invalid = df.loc[(df[admit_col].isna()) | (df[disch_col].isna()) | (df['los'].isna())]
117
+ cohort = df.loc[(~df[admit_col].isna()) & (~df[disch_col].isna()) & (~df['los'].isna())]
118
+
119
+
120
+ #cohort=cohort.fillna(0)
121
+ pos_cohort=cohort[cohort['los']>los]
122
+ neg_cohort=cohort[cohort['los']<=los]
123
+ neg_cohort=neg_cohort.fillna(0)
124
+ pos_cohort=pos_cohort.fillna(0)
125
+
126
+ pos_cohort['label']=1
127
+ neg_cohort['label']=0
128
+
129
+ cohort=pd.concat([pos_cohort,neg_cohort], axis=0)
130
+ cohort=cohort.sort_values(by=[group_col,admit_col])
131
+ #print("cohort",cohort.shape)
132
+ print("[ LOS LABELS FINISHED ]")
133
+ return cohort, invalid
134
+
135
+
136
+ def partition_by_readmit(df:pd.DataFrame, gap:datetime.timedelta, group_col:str, visit_col:str, admit_col:str, disch_col:str, valid_col:str):
137
+ """Applies labels to individual visits according to whether or not a readmission has occurred within the specified `gap` days.
138
+ For a given visit, another visit must occur within the gap window for a positive readmission label.
139
+ The gap window starts from the disch_col time and the admit_col of subsequent visits are considered."""
140
+
141
+ case = pd.DataFrame() # hadm_ids with readmission within the gap period
142
+ ctrl = pd.DataFrame() # hadm_ids without readmission within the gap period
143
+ invalid = pd.DataFrame() # hadm_ids that are not considered in the cohort
144
+
145
+ # Iterate through groupbys based on group_col (subject_id). Data is sorted by subject_id and admit_col (admittime)
146
+ # to ensure that the most current hadm_id is last in a group.
147
+ #grouped= df[[group_col, visit_col, admit_col, disch_col, valid_col]].sort_values(by=[group_col, admit_col]).groupby(group_col)
148
+ grouped= df.sort_values(by=[group_col, admit_col]).groupby(group_col)
149
+ for subject, group in tqdm(grouped):
150
+ max_year = group.max()[disch_col].year
151
+
152
+ if group.shape[0] <= 1:
153
+ #ctrl, invalid = validate_row(group.iloc[0], ctrl, invalid, max_year, disch_col, valid_col, gap) # A group with 1 row has no readmission; goes to ctrl
154
+ ctrl = ctrl.append(group.iloc[0])
155
+ else:
156
+ for idx in range(group.shape[0]-1):
157
+ visit_time = group.iloc[idx][disch_col] # For each index (a unique hadm_id), get its timestamp
158
+ if group.loc[
159
+ (group[admit_col] > visit_time) & # Readmissions must come AFTER the current timestamp
160
+ (group[admit_col] - visit_time <= gap) # Distance between a timestamp and readmission must be within gap
161
+ ].shape[0] >= 1: # If ANY rows meet above requirements, a readmission has occurred after that visit
162
+
163
+ case = case.append(group.iloc[idx])
164
+ else:
165
+ # If no readmission is found, only add to ctrl if prediction window is guaranteed to be within the
166
+ # time range of the dataset (2008-2019). Visits with prediction windows existing in potentially out-of-range
167
+ # dates (like 2018-2020) are excluded UNLESS the prediction window takes place the same year as the visit,
168
+ # in which case it is guaranteed to be within 2008-2019
169
+
170
+ ctrl = ctrl.append(group.iloc[idx])
171
+
172
+ #ctrl, invalid = validate_row(group.iloc[-1], ctrl, invalid, max_year, disch_col, valid_col, gap) # The last hadm_id datewise is guaranteed to have no readmission logically
173
+ ctrl = ctrl.append(group.iloc[-1])
174
+ #print(f"[ {gap.days} DAYS ] {case.shape[0] + ctrl.shape[0]}/{df.shape[0]} {visit_col}s processed")
175
+
176
+ print("[ READMISSION LABELS FINISHED ]")
177
+ return case, ctrl, invalid
178
+
179
+
180
+ def partition_by_mort(df:pd.DataFrame, group_col:str, visit_col:str, admit_col:str, disch_col:str, death_col:str):
181
+ """Applies labels to individual visits according to whether or not a death has occurred within
182
+ the times of the specified admit_col and disch_col"""
183
+
184
+ invalid = df.loc[(df[admit_col].isna()) | (df[disch_col].isna())]
185
+
186
+ cohort = df.loc[(~df[admit_col].isna()) & (~df[disch_col].isna())]
187
+
188
+ # cohort["label"] = (
189
+ # (~cohort[death_col].isna())
190
+ # & (cohort[death_col] >= cohort[admit_col])
191
+ # & (cohort[death_col] <= cohort[disch_col])
192
+ # )
193
+ # cohort["label"] = cohort["label"].astype("Int32")
194
+ #print("cohort",cohort.shape)
195
+ #print(np.where(~cohort[death_col].isna(),1,0))
196
+ #print(np.where(cohort.loc[death_col] >= cohort.loc[admit_col],1,0))
197
+ #print(np.where(cohort.loc[death_col] <= cohort.loc[disch_col],1,0))
198
+ cohort['label']=0
199
+ #cohort=cohort.fillna(0)
200
+ pos_cohort=cohort[~cohort[death_col].isna()]
201
+ neg_cohort=cohort[cohort[death_col].isna()]
202
+ neg_cohort=neg_cohort.fillna(0)
203
+ pos_cohort=pos_cohort.fillna(0)
204
+ pos_cohort[death_col] = pd.to_datetime(pos_cohort[death_col])
205
+
206
+ pos_cohort['label'] = np.where((pos_cohort[death_col] >= pos_cohort[admit_col]) & (pos_cohort[death_col] <= pos_cohort[disch_col]),1,0)
207
+
208
+ pos_cohort['label'] = pos_cohort['label'].astype("Int32")
209
+ cohort=pd.concat([pos_cohort,neg_cohort], axis=0)
210
+ cohort=cohort.sort_values(by=[group_col,admit_col])
211
+ #print("cohort",cohort.shape)
212
+ print("[ MORTALITY LABELS FINISHED ]")
213
+ return cohort, invalid
214
+
215
+
216
+ def get_case_ctrls(df:pd.DataFrame, gap:int, group_col:str, visit_col:str, admit_col:str, disch_col:str, valid_col:str, death_col:str, use_mort=False,use_admn=False,use_los=False) -> pd.DataFrame:
217
+ """Handles logic for creating the labelled cohort based on arguments passed to extract().
218
+
219
+ Parameters:
220
+ df: dataframe with patient data
221
+ gap: specified time interval gap for readmissions
222
+ group_col: patient identifier to group patients (normally subject_id)
223
+ visit_col: visit identifier for individual patient visits (normally hadm_id or stay_id)
224
+ admit_col: column for visit start date information (normally admittime or intime)
225
+ disch_col: column for visit end date information (normally dischtime or outtime)
226
+ valid_col: generated column containing a patient's year that corresponds to the 2017-2019 anchor time range
227
+ dod_col: Date of death column
228
+ """
229
+
230
+ case = None # hadm_ids with readmission within the gap period
231
+ ctrl = None # hadm_ids without readmission within the gap period
232
+ invalid = None # hadm_ids that are not considered in the cohort
233
+
234
+ if use_mort:
235
+ return partition_by_mort(df, group_col, visit_col, admit_col, disch_col, death_col)
236
+ elif use_admn:
237
+ gap = datetime.timedelta(days=gap)
238
+ # transform gap into a timedelta to compare with datetime columns
239
+ case, ctrl, invalid = partition_by_readmit(df, gap, group_col, visit_col, admit_col, disch_col, valid_col)
240
+
241
+ # case hadm_ids are labelled 1 for readmission, ctrls have a 0 label
242
+ case['label'] = np.ones(case.shape[0]).astype(int)
243
+ ctrl['label'] = np.zeros(ctrl.shape[0]).astype(int)
244
+
245
+ return pd.concat([case, ctrl], axis=0), invalid
246
+ elif use_los:
247
+ return partition_by_los(df, gap, group_col, visit_col, admit_col, disch_col, death_col)
248
+
249
+ # print(f"[ {gap.days} DAYS ] {invalid.shape[0]} hadm_ids are invalid")
250
+
251
+
252
+ def extract_data(use_ICU:str, label:str, time:int, icd_code:str, root_dir, disease_label, cohort_output=None, summary_output=None):
253
+ """Extracts cohort data and summary from MIMIC-IV data based on provided parameters.
254
+
255
+ Parameters:
256
+ cohort_output: name of labelled cohort output file
257
+ summary_output: name of summary output file
258
+ use_ICU: state whether to use ICU patient data or not
259
+ label: Can either be '{day} day Readmission' or 'Mortality', decides what binary data label signifies"""
260
+ print("===========MIMIC-IV v2.0============")
261
+ if not cohort_output:
262
+ cohort_output="cohort_" + use_ICU.lower() + "_" + label.lower().replace(" ", "_") + "_" + str(time) + "_" + disease_label
263
+ if not summary_output:
264
+ summary_output="summary_" + use_ICU.lower() + "_" + label.lower().replace(" ", "_") + "_" + str(time) + "_" + disease_label
265
+
266
+ if icd_code=="No Disease Filter":
267
+ if len(disease_label):
268
+ print(f"EXTRACTING FOR: | {use_ICU.upper()} | {label.upper()} DUE TO {disease_label.upper()} | {str(time)} | ")
269
+ else:
270
+ print(f"EXTRACTING FOR: | {use_ICU.upper()} | {label.upper()} | {str(time)} |")
271
+ else:
272
+ if len(disease_label):
273
+ print(f"EXTRACTING FOR: | {use_ICU.upper()} | {label.upper()} DUE TO {disease_label.upper()} | ADMITTED DUE TO {icd_code.upper()} | {str(time)} |")
274
+ else:
275
+ print(f"EXTRACTING FOR: | {use_ICU.upper()} | {label.upper()} | ADMITTED DUE TO {icd_code.upper()} | {str(time)} |")
276
+ #print(label)
277
+ cohort, invalid = None, None # final labelled output and df of invalid records, respectively
278
+ pts = None # valid patients generated by get_visit_pts based on use_ICU and label
279
+ ICU=use_ICU
280
+ group_col, visit_col, admit_col, disch_col, death_col, adm_visit_col = "", "", "", "", "", ""
281
+ #print(label)
282
+ use_mort = label == "Mortality" # change to boolean value
283
+ use_admn=label=='Readmission'
284
+ los=0
285
+ use_los= label=='Length of Stay'
286
+
287
+ #print(use_mort)
288
+ #print(use_admn)
289
+ #print(use_los)
290
+ if use_los:
291
+ los=time
292
+ use_ICU = use_ICU == "ICU" # change to boolean value
293
+ use_disease=icd_code!="No Disease Filter"
294
+
295
+ if use_ICU:
296
+ group_col='subject_id'
297
+ visit_col='stay_id'
298
+ admit_col='intime'
299
+ disch_col='outtime'
300
+ death_col='dod'
301
+ adm_visit_col='hadm_id'
302
+ else:
303
+ group_col='subject_id'
304
+ visit_col='hadm_id'
305
+ admit_col='admittime'
306
+ disch_col='dischtime'
307
+ death_col='dod'
308
+
309
+ pts = get_visit_pts(
310
+ mimic4_path=root_dir,
311
+ group_col=group_col,
312
+ visit_col=visit_col,
313
+ admit_col=admit_col,
314
+ disch_col=disch_col,
315
+ adm_visit_col=adm_visit_col,
316
+ use_mort=use_mort,
317
+ use_los=use_los,
318
+ los=los,
319
+ use_admn=use_admn,
320
+ disease_label=disease_label,
321
+ use_ICU=use_ICU
322
+ )
323
+ #print("pts",pts.head())
324
+
325
+ # cols to be extracted from get_case_ctrls
326
+ cols = [group_col, visit_col, admit_col, disch_col, 'Age','gender','ethnicity','insurance','label']
327
+
328
+ if use_mort:
329
+ cols.append(death_col)
330
+ cohort, invalid = get_case_ctrls(pts, None, group_col, visit_col, admit_col, disch_col,'min_valid_year', death_col, use_mort=True,use_admn=False,use_los=False)
331
+ elif use_admn:
332
+ interval = time
333
+ cohort, invalid = get_case_ctrls(pts, interval, group_col, visit_col, admit_col, disch_col,'min_valid_year', death_col, use_mort=False,use_admn=True,use_los=False)
334
+ elif use_los:
335
+ cohort, invalid = get_case_ctrls(pts, los, group_col, visit_col, admit_col, disch_col,'min_valid_year', death_col, use_mort=False,use_admn=False,use_los=True)
336
+ #print(cohort.head())
337
+
338
+ if use_ICU:
339
+ cols.append(adm_visit_col)
340
+ #print(cohort.head())
341
+
342
+ if use_disease:
343
+ hids=disease_cohort.extract_diag_cohort(cohort['hadm_id'],icd_code,root_dir+"/mimiciv/2.0/")
344
+ #print(hids.shape)
345
+ #print(cohort.shape)
346
+ #print(len(list(set(hids['hadm_id'].unique()).intersection(set(cohort['hadm_id'].unique())))))
347
+ cohort=cohort[cohort['hadm_id'].isin(hids['hadm_id'])]
348
+ cohort_output=cohort_output+"_"+icd_code
349
+ summary_output=summary_output+"_"+icd_code
350
+ #print(cohort[cols].head())
351
+ # save output
352
+ cohort=cohort.rename(columns={"race":"ethnicity"})
353
+ cohort[cols].to_csv(root_dir+"/data/cohort/"+cohort_output+".csv.gz", index=False, compression='gzip')
354
+ print("[ COHORT SUCCESSFULLY SAVED ]")
355
+
356
+ summary = "\n".join([
357
+ f"{label} FOR {ICU} DATA",
358
+ f"# Admission Records: {cohort.shape[0]}",
359
+ f"# Patients: {cohort[group_col].nunique()}",
360
+ f"# Positive cases: {cohort[cohort['label']==1].shape[0]}",
361
+ f"# Negative cases: {cohort[cohort['label']==0].shape[0]}"
362
+ ])
363
+
364
+ # save basic summary of data
365
+ with open(f"./data/cohort/{summary_output}.txt", "w") as f:
366
+ f.write(summary)
367
+
368
+ print("[ SUMMARY SUCCESSFULLY SAVED ]")
369
+ print(summary)
370
+
371
+ return cohort_output
372
+
373
+
374
+ if __name__ == '__main__':
375
+ # use_ICU = input("Use ICU Data? (ICU/Non_ICU)\n").strip()
376
+ # label = input("Please input the intended label:\n").strip()
377
+
378
+ # extract(use_ICU, label)
379
+
380
+ response = input('Extra all datasets? (y/n)').strip().lower()
381
+ if response == 'y':
382
+ extract_data("ICU", "Mortality")
383
+ extract_data("Non-ICU", "Mortality")
384
+
385
+ extract_data("ICU", "30 Day Readmission")
386
+ extract_data("Non-ICU", "30 Day Readmission")
387
+
388
+ extract_data("ICU", "60 Day Readmission")
389
+ extract_data("Non-ICU", "60 Day Readmission")
390
+
391
+ extract_data("ICU", "120 Day Readmission")
392
+ extract_data("Non-ICU", "120 Day Readmission")