thbndi commited on
Commit
2905dfe
1 Parent(s): 69ba4e3

Update Mimic4Dataset.py

Browse files
Files changed (1) hide show
  1. Mimic4Dataset.py +65 -110
Mimic4Dataset.py CHANGED
@@ -3,43 +3,61 @@ import json
3
  import os
4
  import pandas as pd
5
  import datasets
 
 
6
 
7
  _DESCRIPTION = """\
8
  Dataset for mimic4 data, by default for the Mortality task.
9
  Available tasks are: Mortality, Length of Stay, Readmission, Phenotype.
10
  The data is extracted from the mimic4 database using this pipeline: 'https://github.com/healthylaife/MIMIC-IV-Data-Pipeline/tree/main'
11
- #TODO ADD DESCRIPTION COHORTS
12
  """
13
 
14
  _HOMEPAGE = "https://huggingface.co/datasets/thbndi/Mimic4Dataset"
15
  _CITATION = "https://proceedings.mlr.press/v193/gupta22a.html"
 
16
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
17
  class Mimic4Dataset(datasets.GeneratorBasedBuilder):
18
  VERSION = datasets.Version("1.0.0")
19
 
20
  BUILDER_CONFIGS = [
21
- datasets.BuilderConfig(
22
  name="Phenotype",
23
  version=VERSION,
24
- data_dir=os.path.abspath("./data/csv/Phenotype"),
25
  description="Dataset for mimic4 Phenotype task",
26
  ),
27
- datasets.BuilderConfig(
28
  name="Readmission",
29
  version=VERSION,
30
- data_dir=os.path.abspath("./data/csv/Readmission"),
31
  description="Dataset for mimic4 Readmission task",
32
  ),
33
- datasets.BuilderConfig(
34
  name="Length of Stay",
35
  version=VERSION,
36
- data_dir=os.path.abspath("./data/csv/Lenght_of_Stay"),
37
  description="Dataset for mimic4 Length of Stay task",
38
  ),
39
- datasets.BuilderConfig(
40
  name="Mortality",
41
  version=VERSION,
42
- data_dir=os.path.abspath("./data/csv/Mortality"),
43
  description="Dataset for mimic4 Mortality task",
44
  ),
45
  ]
@@ -53,33 +71,25 @@ class Mimic4Dataset(datasets.GeneratorBasedBuilder):
53
  {
54
  "gender": datasets.Value("string"),
55
  "ethnicity": datasets.Value("string"),
56
- "insurance": datasets.Value("string"),
57
  "age": datasets.Value("int32"),
58
- "COND": datasets.Sequence(
59
- {
60
- "cond" : datasets.Value("string"),
61
- "value" : datasets.Value("int32")
62
- }),
63
  "MEDS": datasets.Sequence(
64
  {
65
- "med" : datasets.Value("string"),
66
- "value" : datasets.Sequence(datasets.Value("int32"))
 
67
  }),
68
  "PROC": datasets.Sequence(
69
- {
70
- "proc" : datasets.Value("string"),
71
- "value" : datasets.Sequence(datasets.Value("int32"))
72
- }),
73
  "CHART": datasets.Sequence(
74
  {
75
- "chart" : datasets.Value("string"),
76
- "value" : datasets.Sequence(datasets.Value("int32"))
77
  }),
78
  "OUT": datasets.Sequence(
79
- {
80
- "out" : datasets.Value("string"),
81
- "value" : datasets.Sequence(datasets.Value("int32"))
82
- }),
83
  "label": datasets.ClassLabel(names=["0", "1"]),
84
  }
85
  )
@@ -88,100 +98,45 @@ class Mimic4Dataset(datasets.GeneratorBasedBuilder):
88
  features=features,
89
  homepage=_HOMEPAGE,
90
  citation=_CITATION,
 
91
  )
92
 
93
  def _split_generators(self, dl_manager):
94
- data_dir = self.config.data_dir
95
-
96
- # Collect file paths for all CSV files in the subfolders
97
- train_files = []
98
- for split_name in os.listdir(data_dir):
99
- split_dir = os.path.join(data_dir, split_name)
100
- if os.path.isdir(split_dir):
101
- for file_name in os.listdir(split_dir):
102
- if file_name.endswith(".csv"):
103
- file_path = os.path.join(split_dir, file_name)
104
- train_files.append(file_path)
105
-
106
- # Return a single SplitGenerator for the train split
107
  return [
108
- datasets.SplitGenerator(
109
- name=datasets.Split.TRAIN,
110
- gen_kwargs={
111
- "filepaths": train_files,
112
- "split": datasets.Split.TRAIN,
113
- },
114
- )
115
  ]
116
 
117
 
118
- def _generate_examples(self, filepaths, split):
119
- #each 3 successive files are the same admission (demographic, static, dynamic)
120
- labels = pd.read_csv("./data/csv/"+self.config.name +"labels.csv")
121
- labels_dict = dict(zip(labels['stay_id'], labels['label']))
122
- for i in range(0, len(filepaths), 3):
123
- file1, file2, file3 = filepaths[i:i+3]
124
- static_file = file1 if "static.csv" in file1 else file2 if "static.csv" in file2 else file3
125
- demographic_file = file1 if "demo.csv" in file1 else file2 if "demo.csv" in file2 else file3
126
- dynamic_file = file1 if "dynamic.csv" in file1 else file2 if "dynamic.csv" in file2 else file3
127
-
128
- #dynamic
129
- dyn = pd.read_csv(dynamic_file, header=[0, 1])
130
- meds = dyn['MEDS']
131
- proc = dyn['PROC']
132
- chart = dyn['CHART']
133
- out = dyn['OUT']
134
-
135
- #static
136
- stat = pd.read_csv(static_file, header=[0, 1])
137
- stat = stat['COND']
138
-
139
- #demo
140
- demo = pd.read_csv(demographic_file, header=0)
141
-
142
- #dict
143
- stat_dict = stat.iloc[0].to_dict()
144
- demo_dict = demo.iloc[0].to_dict()
145
- meds_dict = meds.to_dict(orient='list')
146
- proc_dict = proc.to_dict(orient='list')
147
- chart_dict = chart.to_dict(orient='list')
148
- out_dict = out.to_dict(orient='list')
149
-
150
- stat2 = {'cond': list(stat_dict.keys()), 'value': list(stat_dict.values())}
151
- meds = {'med': list(meds_dict.keys()), 'value': list(meds_dict.values())}
152
- procs = {'proc': list(proc_dict.keys()), 'value': list(proc_dict.values())}
153
- charts = {'chart': list(chart_dict.keys()), 'value': list(chart_dict.values())}
154
- outs = {'out': list(out_dict.keys()), 'value': list(out_dict.values())}
155
 
156
- #get stay_id which is the name of the folder containing the files
157
- stay_id = int(demographic_file.split("/")[-2])
158
- #get the label
159
- label = int(labels_dict[stay_id])
160
-
161
- yield stay_id, {
162
- "gender" : demo_dict['gender'],
163
- "ethnicity" : demo_dict['ethnicity'],
164
- "insurance" : demo_dict['insurance'],
165
- "age" : demo_dict['Age'],
166
  "MEDS" : {
167
- "med" : meds['med'],
168
- "value" : meds['value']
169
- },
170
- "PROC" : {
171
- "proc" : procs['proc'],
172
- "value" : procs['value']
173
  },
 
174
  "CHART" : {
175
- "chart" : charts['chart'],
176
- "value" : charts['value']
177
- },
178
- "OUT" : {
179
- "out" : outs['out'],
180
- "value" : outs['value']
181
- },
182
- "COND" : {
183
- "cond" : stat2['cond'],
184
- "value" : stat2['value']
185
  },
 
 
186
  "label" : label
187
  }
 
3
  import os
4
  import pandas as pd
5
  import datasets
6
+ import pickle
7
+ #import cohort
8
 
9
  _DESCRIPTION = """\
10
  Dataset for mimic4 data, by default for the Mortality task.
11
  Available tasks are: Mortality, Length of Stay, Readmission, Phenotype.
12
  The data is extracted from the mimic4 database using this pipeline: 'https://github.com/healthylaife/MIMIC-IV-Data-Pipeline/tree/main'
13
+ mimic path should have this form :
14
  """
15
 
16
  _HOMEPAGE = "https://huggingface.co/datasets/thbndi/Mimic4Dataset"
17
  _CITATION = "https://proceedings.mlr.press/v193/gupta22a.html"
18
+ _GITHUB = "https://github.com/healthylaife/MIMIC-IV-Data-Pipeline/tree/main"
19
 
20
+ class Mimic4DatasetConfig(datasets.BuilderConfig):
21
+ """BuilderConfig for Mimic4Dataset."""
22
+
23
+ def __init__(
24
+ self,
25
+ mimic_path,
26
+ #config,
27
+ **kwargs,
28
+ ):
29
+ super(Mimic4DatasetConfig, self).__init__(version=datasets.Version("1.0.0", ""), **kwargs)
30
+ self.mimic_path = mimic_path
31
+ #self.config = config
32
+ #cohort.task_cohort(self.task,self.mimic_path)
33
+
34
+
35
  class Mimic4Dataset(datasets.GeneratorBasedBuilder):
36
  VERSION = datasets.Version("1.0.0")
37
 
38
  BUILDER_CONFIGS = [
39
+ Mimic4DatasetConfig(
40
  name="Phenotype",
41
  version=VERSION,
42
+ data_dir=os.path.abspath("./data/dict"),
43
  description="Dataset for mimic4 Phenotype task",
44
  ),
45
+ Mimic4DatasetConfig(
46
  name="Readmission",
47
  version=VERSION,
48
+ data_dir=os.path.abspath("./data/dict"),
49
  description="Dataset for mimic4 Readmission task",
50
  ),
51
+ Mimic4DatasetConfig(
52
  name="Length of Stay",
53
  version=VERSION,
54
+ data_dir=os.path.abspath("./data/dict"),
55
  description="Dataset for mimic4 Length of Stay task",
56
  ),
57
+ Mimic4DatasetConfig(
58
  name="Mortality",
59
  version=VERSION,
60
+ data_dir=os.path.abspath("./data/dict"),
61
  description="Dataset for mimic4 Mortality task",
62
  ),
63
  ]
 
71
  {
72
  "gender": datasets.Value("string"),
73
  "ethnicity": datasets.Value("string"),
 
74
  "age": datasets.Value("int32"),
75
+ "COND": datasets.Sequence(datasets.Value("string")),
 
 
 
 
76
  "MEDS": datasets.Sequence(
77
  {
78
+ "signal" : { datasets.Value("int32") : datasets.Sequence(datasets.Value("int32")) },
79
+ "rate" : { datasets.Value("int32") : datasets.Sequence(datasets.Value("int32")) },
80
+ "amount" : { datasets.Value("int32") : datasets.Sequence(datasets.Value("int32")) }
81
  }),
82
  "PROC": datasets.Sequence(
83
+ {datasets.Value("int32") : datasets.Sequence(datasets.Value("int32"))}
84
+ ),
 
 
85
  "CHART": datasets.Sequence(
86
  {
87
+ "signal" : { datasets.Value("int32") : datasets.Sequence(datasets.Value("int32")) },
88
+ "val" : { datasets.Value("int32") : datasets.Sequence(datasets.Value("int32")) }
89
  }),
90
  "OUT": datasets.Sequence(
91
+ {datasets.Value("int32") : datasets.Sequence(datasets.Value("int32"))}
92
+ ),
 
 
93
  "label": datasets.ClassLabel(names=["0", "1"]),
94
  }
95
  )
 
98
  features=features,
99
  homepage=_HOMEPAGE,
100
  citation=_CITATION,
101
+ github=_GITHUB,
102
  )
103
 
104
  def _split_generators(self, dl_manager):
105
+ data_dir = self.config.data_dir + "/dataDic"
 
 
 
 
 
 
 
 
 
 
 
 
106
  return [
107
+ datasets.SplitGenerator(name=datasets.Split.TRAIN, gen_kwargs={"filepath": data_dir}),
 
 
 
 
 
 
108
  ]
109
 
110
 
111
+ def _generate_examples(self, filepath):
112
+ with open(filepath, 'rb') as fp:
113
+ dataDic = pickle.load(fp)
114
+ for hid, data in dataDic.items():
115
+ proc_features = data['Proc']
116
+ chart_features = data['Chart']
117
+ meds_features = data['Med']
118
+ out_features = data['Out']
119
+ cond_features = data['Cond']['fids']
120
+ eth= data['ethnicity']
121
+ age = data['age']
122
+ gender = data['gender']
123
+ label = data['label']
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
124
 
125
+ yield hid, {
126
+ "gender" : gender,
127
+ "ethnicity" : eth,
128
+ "age" : age,
 
 
 
 
 
 
129
  "MEDS" : {
130
+ "signal" : meds_features['signal'],
131
+ "rate" : meds_features['rate'],
132
+ "amount" : meds_features['amount']
 
 
 
133
  },
134
+ "PROC" : proc_features,
135
  "CHART" : {
136
+ "signal" : chart_features['signal'],
137
+ "val" : chart_features['val']
 
 
 
 
 
 
 
 
138
  },
139
+ "OUT" : out_features,
140
+ "COND" : cond_features,
141
  "label" : label
142
  }