|
import os |
|
import pandas as pd |
|
import datasets |
|
import sys |
|
import pickle |
|
import subprocess |
|
import shutil |
|
from urllib.request import urlretrieve |
|
from sklearn.model_selection import train_test_split |
|
from sklearn.preprocessing import LabelEncoder |
|
import yaml |
|
from git import Repo |
|
import numpy as np |
|
from .dataset_utils import vocab, concat_data, generate_deep, generate_ml, generate_text |
|
from .task_cohort import create_cohort |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
_DESCRIPTION = """\ |
|
Dataset for mimic4 data, by default for the Mortality task. |
|
Available tasks are: Mortality, Length of Stay, Readmission, Phenotype. |
|
The data is extracted from the mimic4 database using this pipeline: 'https://github.com/healthylaife/MIMIC-IV-Data-Pipeline/tree/main' |
|
mimic path should have this form : "path/to/mimic4data/from/username/mimiciv/2.2" |
|
If you choose a Custom task provide a configuration file for the Time series. |
|
Currently working with Mimic-IV ICU Data. |
|
""" |
|
_BASE_URL = "https://huggingface.co/datasets/thbndi/Mimic4Dataset/resolve/main" |
|
_HOMEPAGE = "https://huggingface.co/datasets/thbndi/Mimic4Dataset" |
|
|
|
_CITATION = "https://proceedings.mlr.press/v193/gupta22a.html" |
|
_GIT_URL = "https://github.com/healthylaife/MIMIC-IV-Data-Pipeline" |
|
|
|
_DATA_GEN = f"{_BASE_URL}/data_generation_icu_modify.py" |
|
_DATA_GEN_HOSP= f"{_BASE_URL}/data_generation_modify.py" |
|
_DAY_INT= f"{_BASE_URL}/day_intervals_cohort_v22.py" |
|
_CONFIG_URLS = {'los' : f"{_BASE_URL}/config/los.config", |
|
'mortality' : f"{_BASE_URL}/config/mortality.config", |
|
'phenotype' : f"{_BASE_URL}/config/phenotype.config", |
|
'readmission' : f"{_BASE_URL}/config/readmission.config" |
|
} |
|
|
|
|
|
class Mimic4DatasetConfig(datasets.BuilderConfig): |
|
"""BuilderConfig for Mimic4Dataset.""" |
|
|
|
def __init__( |
|
self, |
|
**kwargs, |
|
): |
|
super().__init__(**kwargs) |
|
|
|
|
|
class Mimic4Dataset(datasets.GeneratorBasedBuilder): |
|
"""Create Mimic4Dataset dataset from Mimic-IV data stored in user machine.""" |
|
VERSION = datasets.Version("1.1.0") |
|
|
|
def __init__(self, **kwargs): |
|
self.mimic_path = kwargs.pop("mimic_path", None) |
|
self.encoding = kwargs.pop("encoding",'concat') |
|
self.config_path = kwargs.pop("config_path",None) |
|
self.test_size = kwargs.pop("test_size",0.2) |
|
self.val_size = kwargs.pop("val_size",0.1) |
|
self.generate_cohort = kwargs.pop("generate_cohort",True) |
|
|
|
if self.encoding == 'concat': |
|
self.concat = True |
|
else: |
|
self.concat = False |
|
|
|
super().__init__(**kwargs) |
|
|
|
|
|
BUILDER_CONFIGS = [ |
|
Mimic4DatasetConfig( |
|
name="Phenotype", |
|
version=VERSION, |
|
description="Dataset for mimic4 Phenotype task" |
|
), |
|
Mimic4DatasetConfig( |
|
name="Readmission", |
|
version=VERSION, |
|
description="Dataset for mimic4 Readmission task" |
|
), |
|
Mimic4DatasetConfig( |
|
name="Length of Stay", |
|
version=VERSION, |
|
description="Dataset for mimic4 Length of Stay task" |
|
), |
|
Mimic4DatasetConfig( |
|
name="Mortality", |
|
version=VERSION, |
|
description="Dataset for mimic4 Mortality task" |
|
), |
|
] |
|
|
|
DEFAULT_CONFIG_NAME = "Mortality" |
|
|
|
def init_cohort(self): |
|
if self.config_path==None: |
|
if self.config.name == 'Phenotype' : self.config_path = _CONFIG_URLS['phenotype'] |
|
if self.config.name == 'Readmission' : self.config_path = _CONFIG_URLS['readmission'] |
|
if self.config.name == 'Length of Stay' : self.config_path = _CONFIG_URLS['los'] |
|
if self.config.name == 'Mortality' : self.config_path = _CONFIG_URLS['mortality'] |
|
|
|
version = self.mimic_path.split('/')[-1] |
|
mimic_folder= self.mimic_path.split('/')[-2] |
|
mimic_complete_path='/'+mimic_folder+'/'+version |
|
|
|
current_directory = os.getcwd() |
|
if os.path.exists(os.path.dirname(current_directory)+'/MIMIC-IV-Data-Pipeline'): |
|
dir =os.path.dirname(current_directory) |
|
os.chdir(dir) |
|
else: |
|
|
|
dir = self.mimic_path.replace(mimic_complete_path,'') |
|
if dir[-1]!='/': |
|
dir=dir+'/' |
|
elif dir=='': |
|
dir="./" |
|
parent_dir = os.path.dirname(self.mimic_path) |
|
os.chdir(parent_dir) |
|
|
|
|
|
repo_url='https://github.com/healthylaife/MIMIC-IV-Data-Pipeline' |
|
|
|
path_bench = './MIMIC-IV-Data-Pipeline' |
|
if not os.path.exists(path_bench): |
|
|
|
|
|
Repo.clone_from(repo_url, path_bench) |
|
print("Repository cloned. \n") |
|
os.makedirs(path_bench+'/'+'mimic-iv') |
|
shutil.move(version,path_bench+'/'+'mimic-iv') |
|
|
|
os.chdir(path_bench) |
|
self.mimic_path = './'+'mimic-iv'+'/'+version |
|
|
|
|
|
|
|
if self.config_path[0:4] == 'http': |
|
c = self.config_path.split('/')[-1] |
|
file_path, head = urlretrieve(self.config_path,c) |
|
else : |
|
file_path = self.config_path |
|
if not os.path.exists('./config'): |
|
os.makedirs('config') |
|
|
|
|
|
self.conf='./config/'+file_path.split('/')[-1] |
|
if not os.path.exists(self.conf): |
|
shutil.move(file_path,'./config') |
|
with open(self.conf) as f: |
|
config = yaml.safe_load(f) |
|
|
|
|
|
timeW = config['timeWindow'] |
|
self.timeW=int(timeW.split()[1]) |
|
self.bucket = config['timebucket'] |
|
self.predW = config['predW'] |
|
|
|
self.data_icu = config['icu_no_icu']=='ICU' |
|
|
|
if self.data_icu: |
|
self.feat_cond, self.feat_chart, self.feat_proc, self.feat_meds, self.feat_out, self.feat_lab = config['diagnosis'], config['chart'], config['proc'], config['meds'], config['output'], False |
|
else: |
|
self.feat_cond, self.feat_lab, self.feat_proc, self.feat_meds, self.feat_chart, self.feat_out = config['diagnosis'], config['lab'], config['proc'], config['meds'], False, False |
|
|
|
|
|
|
|
if not os.path.exists('./model/data_generation_icu_modify.py'): |
|
file_path, head = urlretrieve(_DATA_GEN, "data_generation_icu_modify.py") |
|
shutil.move(file_path, './model') |
|
|
|
if not os.path.exists('./model/data_generation_modify.py'): |
|
file_path, head = urlretrieve(_DATA_GEN_HOSP, "data_generation_modify.py") |
|
shutil.move(file_path, './model') |
|
|
|
if not os.path.exists('./preprocessing/day_intervals_preproc/day_intervals_cohort_v22.py'): |
|
file_path, head = urlretrieve(_DAY_INT, "day_intervals_cohort_v22.py") |
|
shutil.move(file_path, './preprocessing/day_intervals_preproc') |
|
|
|
data_dir = "./data/dict/"+self.config.name.replace(" ","_")+"/dataDic" |
|
sys.path.append(path_bench) |
|
config = self.config_path.split('/')[-1] |
|
|
|
|
|
if self.generate_cohort: |
|
create_cohort(self.config.name.replace(" ","_"),self.mimic_path,config) |
|
|
|
|
|
with open(data_dir, 'rb') as fp: |
|
dataDic = pickle.load(fp) |
|
data = pd.DataFrame.from_dict(dataDic) |
|
|
|
dict_dir = "./data/dict/"+self.config.name.replace(" ","_") |
|
|
|
data=data.T |
|
train_data, test_data = train_test_split(data, test_size=self.test_size, random_state=42) |
|
if self.val_size > 0 : |
|
train_data, val_data = train_test_split(train_data, test_size=self.val_size, random_state=42) |
|
val_dic = val_data.to_dict('index') |
|
val_path = dict_dir+'/val_data.pkl' |
|
with open(val_path, 'wb') as f: |
|
pickle.dump(val_dic, f) |
|
|
|
train_dic = train_data.to_dict('index') |
|
test_dic = test_data.to_dict('index') |
|
|
|
train_path = dict_dir+'/train_data.pkl' |
|
test_path = dict_dir+'/test_data.pkl' |
|
|
|
with open(train_path, 'wb') as f: |
|
pickle.dump(train_dic, f) |
|
with open(test_path, 'wb') as f: |
|
pickle.dump(test_dic, f) |
|
return dict_dir |
|
|
|
|
|
def verif_dim_tensor(self, proc, out, chart, meds, lab,interv): |
|
verif=True |
|
if self.feat_proc: |
|
if (len(proc)!= interv): |
|
verif=False |
|
if self.feat_out: |
|
if (len(out)!=interv): |
|
verif=False |
|
if self.feat_chart: |
|
if (len(chart)!=interv): |
|
verif=False |
|
if self.feat_meds: |
|
if (len(meds)!=interv): |
|
verif=False |
|
if self.feat_lab: |
|
if (len(lab)!=interv): |
|
verif=False |
|
return verif |
|
|
|
def save_features(self,concat_cols,dyn_df,cond_df,demo): |
|
|
|
df_feats = pd.DataFrame(columns=['feature','description']) |
|
icd = pd.read_csv(self.mimic_path+'/hosp/d_icd_diagnoses.csv.gz',compression='gzip', header=0) |
|
items= pd.read_csv(self.mimic_path+'/icu/d_items.csv.gz',compression='gzip', header=0) |
|
|
|
if self.encoding == 'concat': |
|
feats = concat_cols.copy() |
|
df_feats['feature'] = feats |
|
for _, data in df_feats.iterrows(): |
|
txt=(items[items['itemid'] == int(data['feature'].split('_')[0])]['label']).to_string(index=False) |
|
data['description']=txt+' at interval '+data['feature'].split('_')[1] |
|
else: |
|
feats = list(dyn_df.columns.droplevel(0)) |
|
for _, data in df_feats.iterrows(): |
|
data['description']=(items[items['itemid'] == int(data['feature'])]['label']).to_string(index=False) |
|
|
|
for diag in list(cond_df.columns): |
|
df_feats.loc[len(df_feats)] = [diag,icd[icd['icd_code'] == diag]['long_title'].to_string(index=False)] |
|
|
|
df_feats.loc[len(df_feats)]='Age' |
|
df_feats.loc[len(df_feats)]='gender' |
|
df_feats.loc[len(df_feats)]='ethnicity' |
|
df_feats.loc[len(df_feats)]='insurance' |
|
feats.extend(list(cond_df.columns)) |
|
feats.extend(list(demo.columns)) |
|
|
|
path = './data/dict/'+self.config.name.replace(" ","_")+'/features_description_'+self.encoding+'.csv' |
|
df_feats.to_csv(path,index=False) |
|
feat_tocsv=False |
|
return feat_tocsv, feats |
|
|
|
|
|
def _info_raw(self): |
|
features = datasets.Features( |
|
{ |
|
"label": datasets.ClassLabel(num_classes=2,names=["0", "1"]), |
|
"gender": datasets.Value("string"), |
|
"ethnicity": datasets.Value("string"), |
|
"insurance": datasets.Value("string"), |
|
"age": datasets.Value("int32"), |
|
"COND": datasets.Sequence(datasets.Value("string")), |
|
"MEDS": { |
|
"signal": |
|
{ |
|
"id": datasets.Sequence(datasets.Value("int32")), |
|
"value": datasets.Sequence(datasets.Sequence(datasets.Value("float32"))) |
|
} |
|
, |
|
"rate": |
|
{ |
|
"id": datasets.Sequence(datasets.Value("int32")), |
|
"value": datasets.Sequence(datasets.Sequence(datasets.Value("float32"))) |
|
} |
|
, |
|
"amount": |
|
{ |
|
"id": datasets.Sequence(datasets.Value("int32")), |
|
"value": datasets.Sequence(datasets.Sequence(datasets.Value("float32"))) |
|
} |
|
|
|
}, |
|
"PROC": { |
|
"id": datasets.Sequence(datasets.Value("int32")), |
|
"value": datasets.Sequence(datasets.Sequence(datasets.Value("float32"))) |
|
}, |
|
"CHART/LAB": |
|
{ |
|
"signal" : { |
|
"id": datasets.Sequence(datasets.Value("int32")), |
|
"value": datasets.Sequence(datasets.Sequence(datasets.Value("float32"))) |
|
}, |
|
"val" : { |
|
"id": datasets.Sequence(datasets.Value("int32")), |
|
"value": datasets.Sequence(datasets.Sequence(datasets.Value("float32"))) |
|
}, |
|
}, |
|
"OUT": { |
|
"id": datasets.Sequence(datasets.Value("int32")), |
|
"value": datasets.Sequence(datasets.Sequence(datasets.Value("float32"))) |
|
}, |
|
|
|
} |
|
) |
|
return datasets.DatasetInfo( |
|
description=_DESCRIPTION, |
|
features=features, |
|
homepage=_HOMEPAGE, |
|
citation=_CITATION, |
|
) |
|
|
|
def _generate_examples_raw(self, filepath): |
|
with open(filepath, 'rb') as fp: |
|
dataDic = pickle.load(fp) |
|
for hid, data in dataDic.items(): |
|
proc_features = data['Proc'] |
|
meds_features = data['Med'] |
|
out_features = data['Out'] |
|
cond_features = data['Cond']['fids'] |
|
eth= data['ethnicity'] |
|
age = data['age'] |
|
gender = data['gender'] |
|
label = data['label'] |
|
insurance=data['insurance'] |
|
|
|
items = list(proc_features.keys()) |
|
values =[proc_features[i] for i in items ] |
|
procs = {"id" : items, |
|
"value": values} |
|
|
|
items_outs = list(out_features.keys()) |
|
values_outs =[out_features[i] for i in items_outs ] |
|
outs = {"id" : items_outs, |
|
"value": values_outs} |
|
|
|
if self.data_icu: |
|
chart_features = data['Chart'] |
|
else: |
|
chart_features = data['Lab'] |
|
|
|
|
|
if ('signal' in chart_features): |
|
items_chart_sig = list(chart_features['signal'].keys()) |
|
values_chart_sig =[chart_features['signal'][i] for i in items_chart_sig ] |
|
chart_sig = {"id" : items_chart_sig, |
|
"value": values_chart_sig} |
|
else: |
|
chart_sig = {"id" : [], |
|
"value": []} |
|
|
|
if ('val' in chart_features): |
|
items_chart_val = list(chart_features['val'].keys()) |
|
values_chart_val =[chart_features['val'][i] for i in items_chart_val ] |
|
chart_val = {"id" : items_chart_val, |
|
"value": values_chart_val} |
|
else: |
|
chart_val = {"id" : [], |
|
"value": []} |
|
|
|
charts = {"signal" : chart_sig, |
|
"val" : chart_val} |
|
|
|
|
|
if ('signal' in meds_features): |
|
items_meds_sig = list(meds_features['signal'].keys()) |
|
values_meds_sig =[meds_features['signal'][i] for i in items_meds_sig ] |
|
meds_sig = {"id" : items_meds_sig, |
|
"value": values_meds_sig} |
|
else: |
|
meds_sig = {"id" : [], |
|
"value": []} |
|
|
|
if ('rate' in meds_features): |
|
items_meds_rate = list(meds_features['rate'].keys()) |
|
values_meds_rate =[meds_features['rate'][i] for i in items_meds_rate ] |
|
meds_rate = {"id" : items_meds_rate, |
|
"value": values_meds_rate} |
|
else: |
|
meds_rate = {"id" : [], |
|
"value": []} |
|
|
|
if ('amount' in meds_features): |
|
items_meds_amount = list(meds_features['amount'].keys()) |
|
values_meds_amount =[meds_features['amount'][i] for i in items_meds_amount ] |
|
meds_amount = {"id" : items_meds_amount, |
|
"value": values_meds_amount} |
|
else: |
|
meds_amount = {"id" : [], |
|
"value": []} |
|
|
|
meds = {"signal" : meds_sig, |
|
"rate" : meds_rate, |
|
"amount" : meds_amount} |
|
|
|
|
|
yield int(hid), { |
|
"label" : label, |
|
"gender" : gender, |
|
"ethnicity" : eth, |
|
"insurance" : insurance, |
|
"age" : age, |
|
"COND" : cond_features, |
|
"PROC" : procs, |
|
"CHART/LAB" : charts, |
|
"OUT" : outs, |
|
"MEDS" : meds |
|
} |
|
|
|
|
|
|
|
|
|
|
|
def _info_encoded(self): |
|
features = datasets.Features( |
|
{ |
|
"label": datasets.ClassLabel(num_classes=2,names=["0", "1"]), |
|
"features" : datasets.Sequence(datasets.Value("float32")), |
|
"features_names" : datasets.Sequence(datasets.Value("string")), |
|
} |
|
) |
|
return datasets.DatasetInfo( |
|
description=_DESCRIPTION, |
|
features=features, |
|
homepage=_HOMEPAGE, |
|
citation=_CITATION, |
|
) |
|
|
|
def _generate_examples_encoded(self, filepath): |
|
path= './data/dict/'+self.config.name.replace(" ","_")+'/ethVocab' |
|
with open(path, 'rb') as fp: |
|
ethVocab = pickle.load(fp) |
|
|
|
path= './data/dict/'+self.config.name.replace(" ","_")+'/insVocab' |
|
with open(path, 'rb') as fp: |
|
insVocab = pickle.load(fp) |
|
|
|
genVocab = ['<PAD>', 'M', 'F'] |
|
gen_encoder = LabelEncoder() |
|
eth_encoder = LabelEncoder() |
|
ins_encoder = LabelEncoder() |
|
gen_encoder.fit(genVocab) |
|
eth_encoder.fit(ethVocab) |
|
ins_encoder.fit(insVocab) |
|
with open(filepath, 'rb') as fp: |
|
dico = pickle.load(fp) |
|
|
|
df = pd.DataFrame.from_dict(dico, orient='index') |
|
feat_tocsv=True |
|
for i, data in df.iterrows(): |
|
dyn_df,cond_df,demo=concat_data(data,self.interval,self.feat_cond,self.feat_proc,self.feat_out, self.feat_chart, self.feat_meds,self.feat_lab,self.condDict, self.procDict, self.outDict, self.chartDict, self.medDict) |
|
dyn=dyn_df.copy() |
|
dyn.columns=dyn.columns.droplevel(0) |
|
concat_cols = [f"{col}_{t}" for t in range(dyn.shape[0]) for col in dyn.columns] |
|
demo['gender']=gen_encoder.transform(demo['gender']) |
|
demo['ethnicity']=eth_encoder.transform(demo['ethnicity']) |
|
demo['insurance']=ins_encoder.transform(demo['insurance']) |
|
label = data['label'] |
|
demo=demo.drop(['label'],axis=1) |
|
if feat_tocsv: |
|
feat_tocsv, feats = self.save_features(concat_cols,dyn_df,cond_df,demo) |
|
X= generate_ml(dyn_df,cond_df,demo,concat_cols,self.concat) |
|
X=X.values[0] |
|
|
|
size_concat = self.size_cond+ self.size_proc * self.interval + self.size_meds * self.interval+ self.size_out * self.interval+ self.size_chart *self.interval+ self.size_lab * self.interval + 4 |
|
size_aggreg = self.size_cond+ self.size_proc + self.size_meds+ self.size_out+ self.size_chart+ self.size_lab + 4 |
|
|
|
if ((self.concat and len(X)==size_concat) or ((not self.concat) and len(X)==size_aggreg)): |
|
yield int(i), { |
|
"label": label, |
|
"features": X, |
|
"features_names" : feats |
|
} |
|
|
|
|
|
def _info_deep(self): |
|
features = datasets.Features( |
|
{ |
|
"label": datasets.ClassLabel(num_classes=2,names=["0", "1"]), |
|
"DEMO": datasets.Sequence(datasets.Value("int64")), |
|
"COND" : datasets.Sequence(datasets.Value("int64")), |
|
"MEDS" : datasets.Array2D(shape=(None, self.size_meds), dtype='int64') , |
|
"PROC" : datasets.Array2D(shape=(None, self.size_proc), dtype='int64') , |
|
"CHART/LAB" : datasets.Array2D(shape=(None, self.size_chart), dtype='int64') , |
|
"OUT" : datasets.Array2D(shape=(None, self.size_out), dtype='int64') , |
|
|
|
} |
|
) |
|
return datasets.DatasetInfo( |
|
description=_DESCRIPTION, |
|
features=features, |
|
homepage=_HOMEPAGE, |
|
citation=_CITATION, |
|
) |
|
|
|
|
|
def _generate_examples_deep(self, filepath): |
|
with open(filepath, 'rb') as fp: |
|
dico = pickle.load(fp) |
|
|
|
for key, data in dico.items(): |
|
stat, demo, meds, chart, out, proc, lab, y = generate_deep(data,self.interval, self.config.name.replace(" ","_"), self.feat_cond, self.feat_proc, self.feat_out, self.feat_chart, self.feat_meds,self.feat_lab,self.condDict, self.procDict, self.outDict, self.chartDict, self.medDict, self.eth_vocab,self.gender_vocab,self.age_vocab,self.ins_vocab) |
|
if self.verif_dim_tensor(proc, out, chart, meds, lab, self.interval): |
|
if self.data_icu: |
|
yield int(key), { |
|
'label': y, |
|
'DEMO': demo, |
|
'COND': stat, |
|
'MEDS': meds, |
|
'PROC': proc, |
|
'CHART/LAB': chart, |
|
'OUT': out, |
|
} |
|
else: |
|
yield int(key), { |
|
'label': y, |
|
'DEMO': demo, |
|
'COND': stat, |
|
'MEDS': meds, |
|
'PROC': proc, |
|
'CHART/LAB': lab, |
|
'OUT': out, |
|
} |
|
|
|
def _info_text(self): |
|
features = datasets.Features( |
|
{ |
|
"label": datasets.ClassLabel(num_classes=2,names=["0", "1"]), |
|
"text" : datasets.Value(dtype='string', id=None), |
|
} |
|
) |
|
return datasets.DatasetInfo( |
|
description=_DESCRIPTION, |
|
features=features, |
|
homepage=_HOMEPAGE, |
|
citation=_CITATION, |
|
) |
|
|
|
def _generate_examples_text(self, filepath): |
|
icd = pd.read_csv(self.mimic_path+'/hosp/d_icd_diagnoses.csv.gz',compression='gzip', header=0) |
|
items= pd.read_csv(self.mimic_path+'/icu/d_items.csv.gz',compression='gzip', header=0) |
|
with open(filepath, 'rb') as fp: |
|
dico = pickle.load(fp) |
|
|
|
for key, data in dico.items(): |
|
cond_text,chart_text,meds_text,proc_text,out_text = generate_text(data,icd,items, self.feat_cond, self.feat_chart, self.feat_meds, self.feat_proc, self.feat_out) |
|
text= cond_text+chart_text+meds_text+proc_text+out_text |
|
|
|
yield int(key),{ |
|
'label' : data['label'], |
|
'text': text |
|
} |
|
|
|
|
|
def _info(self): |
|
self.path = self.init_cohort() |
|
self.interval = (self.timeW//self.bucket) |
|
self.size_cond, self.size_proc, self.size_meds, self.size_out, self.size_chart, self.size_lab, self.eth_vocab,self.gender_vocab,self.age_vocab,self.ins_vocab,self.condDict,self.procDict,self.medDict,self.outDict,self.chartDict,self.labDict=vocab(self.config.name.replace(" ","_"),self.feat_cond,self.feat_proc,self.feat_out,self.feat_chart,self.feat_meds,self.feat_lab) |
|
if (self.encoding == 'concat' or self.encoding =='aggreg'): |
|
return self._info_encoded() |
|
|
|
elif self.encoding == 'tensor' : |
|
return self._info_deep() |
|
|
|
elif self.encoding == 'text' : |
|
return self._info_text() |
|
|
|
else: |
|
return self._info_raw() |
|
|
|
def _split_generators(self, dl_manager): |
|
data_dir = "./data/dict/"+self.config.name.replace(" ","_") |
|
if self.val_size > 0 : |
|
return [ |
|
datasets.SplitGenerator(name=datasets.Split.TRAIN, gen_kwargs={"filepath": data_dir+'/train_data.pkl'}), |
|
datasets.SplitGenerator(name=datasets.Split.VALIDATION, gen_kwargs={"filepath": data_dir+'/val_data.pkl'}), |
|
datasets.SplitGenerator(name=datasets.Split.TEST, gen_kwargs={"filepath": data_dir+'/test_data.pkl'}), |
|
] |
|
else : |
|
return [ |
|
datasets.SplitGenerator(name=datasets.Split.TRAIN, gen_kwargs={"filepath": data_dir+'/train_data.pkl'}), |
|
datasets.SplitGenerator(name=datasets.Split.TEST, gen_kwargs={"filepath": data_dir+'/test_data.pkl'}), |
|
] |
|
|
|
def _generate_examples(self, filepath): |
|
if (self.encoding == 'concat' or self.encoding == 'aggreg'): |
|
yield from self._generate_examples_encoded(filepath) |
|
|
|
elif self.encoding == 'tensor' : |
|
yield from self._generate_examples_deep(filepath) |
|
|
|
elif self.encoding == 'text' : |
|
yield from self._generate_examples_text(filepath) |
|
|
|
else : |
|
yield from self._generate_examples_raw(filepath) |