Update Mimic4Dataset.py
Browse files- Mimic4Dataset.py +37 -12
Mimic4Dataset.py
CHANGED
@@ -767,8 +767,8 @@ class Mimic4Dataset(datasets.GeneratorBasedBuilder):
|
|
767 |
###########################################################ENCODED##################################################################
|
768 |
|
769 |
def _info_encoded(self,X_encoded):
|
770 |
-
|
771 |
-
features = datasets.Features(
|
772 |
return datasets.DatasetInfo(
|
773 |
description=_DESCRIPTION,
|
774 |
features=features,
|
@@ -790,7 +790,8 @@ class Mimic4Dataset(datasets.GeneratorBasedBuilder):
|
|
790 |
for i, row in df.iterrows():
|
791 |
yield i, row.to_dict()
|
792 |
#############################################################################################################################
|
793 |
-
def _info_deep(self,
|
|
|
794 |
columns = {col: self.map_dtype(X_deep[col].dtype) for col in X_deep.columns}
|
795 |
features = datasets.Features(columns)
|
796 |
return datasets.DatasetInfo(
|
@@ -804,15 +805,34 @@ class Mimic4Dataset(datasets.GeneratorBasedBuilder):
|
|
804 |
data_dir = "./data/dict/"+self.config.name.replace(" ","_")
|
805 |
|
806 |
return [
|
807 |
-
datasets.SplitGenerator(name=datasets.Split.TRAIN, gen_kwargs={"filepath": data_dir+'/X_train_deep.
|
808 |
-
datasets.SplitGenerator(name=datasets.Split.VALIDATION, gen_kwargs={"filepath": data_dir+'/X_val_deep.
|
809 |
-
datasets.SplitGenerator(name=datasets.Split.TEST, gen_kwargs={"filepath": data_dir+'/X_test_deep.
|
810 |
]
|
811 |
|
812 |
def _generate_examples_deep(self, filepath):
|
813 |
-
|
814 |
-
|
815 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
816 |
|
817 |
#############################################################################################################################
|
818 |
def _info(self):
|
@@ -827,14 +847,19 @@ class Mimic4Dataset(datasets.GeneratorBasedBuilder):
|
|
827 |
X_test_encoded.to_csv(self.path+"/X_test_encoded.csv", index=False)
|
828 |
X_val_encoded.to_csv(self.path+"/X_val_encoded.csv", index=False)
|
829 |
return self._info_encoded(X_train_encoded)
|
|
|
830 |
elif self.encoding == 'deep' :
|
831 |
X_train_deep = generate_split_deep(self.path+'/train_data.pkl',self.config.name,self.feat_cond, self.feat_chart, self.feat_proc, self.feat_meds, self.feat_out)
|
832 |
X_test_deep = generate_split_deep(self.path+'/test_data.pkl',self.config.name,self.feat_cond, self.feat_chart, self.feat_proc, self.feat_meds, self.feat_out)
|
833 |
X_val_deep = generate_split_deep(self.path+'/val_data.pkl',self.config.name,self.feat_cond, self.feat_chart, self.feat_proc, self.feat_meds, self.feat_out)
|
834 |
|
835 |
-
|
836 |
-
|
837 |
-
|
|
|
|
|
|
|
|
|
838 |
return self._info_deep(X_train_deep)
|
839 |
else:
|
840 |
return self._info_raw()
|
|
|
767 |
###########################################################ENCODED##################################################################
|
768 |
|
769 |
def _info_encoded(self,X_encoded):
|
770 |
+
keys=list(set(key for item in X_encoded.values() for key in item.keys()))
|
771 |
+
features = datasets.Features({key: self.map_dtype(X_encoded[key].dtype) for key in keys})
|
772 |
return datasets.DatasetInfo(
|
773 |
description=_DESCRIPTION,
|
774 |
features=features,
|
|
|
790 |
for i, row in df.iterrows():
|
791 |
yield i, row.to_dict()
|
792 |
#############################################################################################################################
|
793 |
+
def _info_deep(self,X_deep_dict):
|
794 |
+
keys= list(X_deep_dict.keys())
|
795 |
columns = {col: self.map_dtype(X_deep[col].dtype) for col in X_deep.columns}
|
796 |
features = datasets.Features(columns)
|
797 |
return datasets.DatasetInfo(
|
|
|
805 |
data_dir = "./data/dict/"+self.config.name.replace(" ","_")
|
806 |
|
807 |
return [
|
808 |
+
datasets.SplitGenerator(name=datasets.Split.TRAIN, gen_kwargs={"filepath": data_dir+'/X_train_deep.pkl'}),
|
809 |
+
datasets.SplitGenerator(name=datasets.Split.VALIDATION, gen_kwargs={"filepath": data_dir+'/X_val_deep.pkl'}),
|
810 |
+
datasets.SplitGenerator(name=datasets.Split.TEST, gen_kwargs={"filepath": data_dir+'/X_test_deep.pkl'}),
|
811 |
]
|
812 |
|
813 |
def _generate_examples_deep(self, filepath):
|
814 |
+
with open(filepath, 'rb') as fp:
|
815 |
+
dico = pickle.load(fp)
|
816 |
+
for key, data in dico.items():
|
817 |
+
proc_features = data['proc']
|
818 |
+
chart_features = data['chart']
|
819 |
+
meds_features = data['meds']
|
820 |
+
out_features = data['out']
|
821 |
+
cond_features = data['stat']
|
822 |
+
demo= data['demo']
|
823 |
+
label = data['y']
|
824 |
+
lab=data['lab']
|
825 |
+
|
826 |
+
yield int(key), {
|
827 |
+
'proc': proc_features,
|
828 |
+
'chart': chart_features,
|
829 |
+
'meds': meds_features,
|
830 |
+
'out': out_features,
|
831 |
+
'stat': cond_features,
|
832 |
+
'demo': demo,
|
833 |
+
'lab': lab,
|
834 |
+
'y': label
|
835 |
+
}
|
836 |
|
837 |
#############################################################################################################################
|
838 |
def _info(self):
|
|
|
847 |
X_test_encoded.to_csv(self.path+"/X_test_encoded.csv", index=False)
|
848 |
X_val_encoded.to_csv(self.path+"/X_val_encoded.csv", index=False)
|
849 |
return self._info_encoded(X_train_encoded)
|
850 |
+
|
851 |
elif self.encoding == 'deep' :
|
852 |
X_train_deep = generate_split_deep(self.path+'/train_data.pkl',self.config.name,self.feat_cond, self.feat_chart, self.feat_proc, self.feat_meds, self.feat_out)
|
853 |
X_test_deep = generate_split_deep(self.path+'/test_data.pkl',self.config.name,self.feat_cond, self.feat_chart, self.feat_proc, self.feat_meds, self.feat_out)
|
854 |
X_val_deep = generate_split_deep(self.path+'/val_data.pkl',self.config.name,self.feat_cond, self.feat_chart, self.feat_proc, self.feat_meds, self.feat_out)
|
855 |
|
856 |
+
with open(self.path+"/X_train_deep.pkl", 'wb') as f:
|
857 |
+
pickle.dump(X_train_deep, f)
|
858 |
+
with open(self.path+"/X_test_deep.pkl", 'wb') as f:
|
859 |
+
pickle.dump(X_test_deep, f)
|
860 |
+
with open(self.path+"/X_val_deep.pkl", 'wb') as f:
|
861 |
+
pickle.dump(X_val_deep, f)
|
862 |
+
|
863 |
return self._info_deep(X_train_deep)
|
864 |
else:
|
865 |
return self._info_raw()
|