Mimic4Dataset / Mimic4Dataset.py
thbndi's picture
Update Mimic4Dataset.py
e23ced1
raw
history blame
5.23 kB
import csv
import json
import os
import pandas as pd
import datasets
import pickle
#import cohort
_DESCRIPTION = """\
Dataset for mimic4 data, by default for the Mortality task.
Available tasks are: Mortality, Length of Stay, Readmission, Phenotype.
The data is extracted from the mimic4 database using this pipeline: 'https://github.com/healthylaife/MIMIC-IV-Data-Pipeline/tree/main'
mimic path should have this form :
"""
_HOMEPAGE = "https://huggingface.co/datasets/thbndi/Mimic4Dataset"
_CITATION = "https://proceedings.mlr.press/v193/gupta22a.html"
_GITHUB = "https://github.com/healthylaife/MIMIC-IV-Data-Pipeline/tree/main"
class Mimic4DatasetConfig(datasets.BuilderConfig):
"""BuilderConfig for Mimic4Dataset."""
def __init__(
self,
mimic_path,
#config,
**kwargs,
):
super().__init__(**kwargs)
self.mimic_path =mimic_path
#self.config = config
#cohort.task_cohort(self.task,self.mimic_path)
class Mimic4Dataset(datasets.GeneratorBasedBuilder):
VERSION = datasets.Version("1.0.0")
BUILDER_CONFIGS = [
Mimic4DatasetConfig(
name="Phenotype",
version=VERSION,
data_dir=os.path.abspath("./data/dict/cohort_icu_readmission_30_I50"),
description="Dataset for mimic4 Phenotype task",
mimic_path = None
),
Mimic4DatasetConfig(
name="Readmission",
version=VERSION,
data_dir=os.path.abspath("./data/dict"),
description="Dataset for mimic4 Readmission task",
mimic_path = None
),
Mimic4DatasetConfig(
name="Length of Stay",
version=VERSION,
data_dir=os.path.abspath("./data/dict"),
description="Dataset for mimic4 Length of Stay task",
mimic_path = None
),
Mimic4DatasetConfig(
name="Mortality",
version=VERSION,
data_dir=os.path.abspath("./data/dict"),
description="Dataset for mimic4 Mortality task",
mimic_path = None
),
]
DEFAULT_CONFIG_NAME = "Mortality"
def _info(self):
features = datasets.Features(
{
"gender": datasets.Value("string"),
"ethnicity": datasets.Value("string"),
"age": datasets.Value("int32"),
"COND": datasets.Sequence(datasets.Value("string")),
"MEDS": datasets.Sequence(
{
"signal" : { datasets.Value("int32") : datasets.Sequence(datasets.Value("int32")) },
"rate" : { datasets.Value("int32") : datasets.Sequence(datasets.Value("int32")) },
"amount" : { datasets.Value("int32") : datasets.Sequence(datasets.Value("int32")) }
}),
"PROC": datasets.Sequence(
{datasets.Value("int32") : datasets.Sequence(datasets.Value("int32"))}
),
"CHART": datasets.Sequence(
{
"signal" : { datasets.Value("int32") : datasets.Sequence(datasets.Value("int32")) },
"val" : { datasets.Value("int32") : datasets.Sequence(datasets.Value("int32")) }
}),
"OUT": datasets.Sequence(
{datasets.Value("int32") : datasets.Sequence(datasets.Value("int32"))}
),
"label": datasets.ClassLabel(names=["0", "1"]),
}
)
return datasets.DatasetInfo(
description=_DESCRIPTION,
features=features,
homepage=_HOMEPAGE,
citation=_CITATION,
github=_GITHUB,
)
def _split_generators(self, dl_manager):
data_dir = self.config.data_dir + "/dataDic"
mimic=self.mimic_path
return [
datasets.SplitGenerator(name=datasets.Split.TRAIN, gen_kwargs={"filepath": data_dir}),
]
def _generate_examples(self, filepath):
with open(filepath, 'rb') as fp:
dataDic = pickle.load(fp)
for hid, data in dataDic.items():
proc_features = data['Proc']
chart_features = data['Chart']
meds_features = data['Med']
out_features = data['Out']
cond_features = data['Cond']['fids']
eth= data['ethnicity']
age = data['age']
gender = data['gender']
label = data['label']
yield hid, {
"gender" : gender,
"ethnicity" : eth,
"age" : age,
"MEDS" : {
"signal" : meds_features['signal'],
"rate" : meds_features['rate'],
"amount" : meds_features['amount']
},
"PROC" : proc_features,
"CHART" : {
"signal" : chart_features['signal'],
"val" : chart_features['val']
},
"OUT" : out_features,
"COND" : cond_features,
"label" : label
}