|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
"""TODO: Add a description here.""" |
|
|
|
|
|
import csv |
|
import json |
|
import os |
|
from typing import Dict, List, Optional, Set, Tuple |
|
|
|
import numpy as np |
|
|
|
import datasets |
|
import SimpleITK as sitk |
|
|
|
|
|
def import_csv_data(filepath: str) -> List[Dict[str, str]]: |
|
"""Import all rows of CSV file.""" |
|
results = [] |
|
with open(filepath, encoding='utf-8') as f: |
|
reader = csv.DictReader(f) |
|
for line in reader: |
|
results.append(line) |
|
return results |
|
|
|
|
|
|
|
|
|
_CITATION = """\ |
|
@InProceedings{huggingface:dataset, |
|
title = {A great new dataset}, |
|
author={huggingface, Inc. |
|
}, |
|
year={2020} |
|
} |
|
""" |
|
|
|
|
|
|
|
_DESCRIPTION = """\ |
|
This new dataset is designed to solve this great NLP task and is crafted with a lot of care. |
|
""" |
|
|
|
_HOMEPAGE = "https://zenodo.org/records/10159290" |
|
|
|
_LICENSE = """Creative Commons Attribution 4.0 International License \ |
|
(https://creativecommons.org/licenses/by/4.0/legalcode)""" |
|
|
|
|
|
|
|
|
|
_URLS = { |
|
"first_domain": { |
|
"images":"https://zenodo.org/records/10159290/files/images.zip", |
|
"masks":"https://zenodo.org/records/10159290/files/masks.zip", |
|
"overview":"https://zenodo.org/records/10159290/files/overview.csv", |
|
"gradings":"https://zenodo.org/records/10159290/files/radiological_gradings.csv", |
|
} |
|
} |
|
|
|
class SPIDER(datasets.GeneratorBasedBuilder): |
|
"""TODO: Short description of my dataset.""" |
|
|
|
VERSION = datasets.Version("1.1.0") |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
BUILDER_CONFIGS = [ |
|
datasets.BuilderConfig(name="first_domain", version=VERSION, description="This part of my dataset covers a first domain"), |
|
datasets.BuilderConfig(name="second_domain", version=VERSION, description="This part of my dataset covers a second domain"), |
|
] |
|
|
|
DEFAULT_CONFIG_NAME = "first_domain" |
|
|
|
def _info(self): |
|
|
|
if self.config.name == "first_domain": |
|
features = datasets.Features( |
|
{ |
|
"sentence": datasets.Value("string"), |
|
"option1": datasets.Value("string"), |
|
"answer": datasets.Value("string") |
|
|
|
} |
|
) |
|
else: |
|
features = datasets.Features( |
|
{ |
|
"sentence": datasets.Value("string"), |
|
"option2": datasets.Value("string"), |
|
"second_domain_answer": datasets.Value("string") |
|
|
|
} |
|
) |
|
return datasets.DatasetInfo( |
|
|
|
description=_DESCRIPTION, |
|
|
|
features=features, |
|
|
|
|
|
|
|
|
|
homepage=_HOMEPAGE, |
|
|
|
license=_LICENSE, |
|
|
|
citation=_CITATION, |
|
) |
|
|
|
def _split_generators(self, dl_manager): |
|
|
|
|
|
|
|
|
|
|
|
|
|
urls = _URLS[self.config.name] |
|
paths_dict = dl_manager.download_and_extract(urls) |
|
|
|
return [ |
|
datasets.SplitGenerator( |
|
name=datasets.Split.TRAIN, |
|
|
|
gen_kwargs={ |
|
"paths_dict": paths_dict, |
|
"split": "train", |
|
}, |
|
), |
|
datasets.SplitGenerator( |
|
name=datasets.Split.VALIDATION, |
|
|
|
gen_kwargs={ |
|
"paths_dict": paths_dict, |
|
"split": "dev", |
|
}, |
|
), |
|
datasets.SplitGenerator( |
|
name=datasets.Split.TEST, |
|
|
|
gen_kwargs={ |
|
"paths_dict": paths_dict, |
|
"split": "test" |
|
}, |
|
), |
|
] |
|
|
|
|
|
def _generate_examples( |
|
self, |
|
paths_dict: Dict[str, str], |
|
split: str = 'train', |
|
validate_share: float = 0.3, |
|
test_share: float = 0.2, |
|
raw_image: bool = True, |
|
numeric_array: bool = True, |
|
metadata: bool = True, |
|
rad_gradings: bool = True, |
|
) -> Tuple[str, Dict]: |
|
""" |
|
This method handles input defined in _split_generators to yield |
|
(key, example) tuples from the dataset. The `key` is for legacy reasons |
|
(tfds) and is not important in itself, but must be unique for each example. |
|
|
|
Args |
|
paths_dict |
|
split: |
|
validate_share |
|
test_share |
|
raw_image |
|
numeric_array |
|
metadata |
|
rad_gradings |
|
|
|
Yields |
|
|
|
""" |
|
|
|
|
|
np.random.seed(9999) |
|
N_PATIENTS = 257 |
|
VALIDATE_SHARE = 0.3 |
|
TEST_SHARE = 0.2 |
|
TRAIN_SHARE = (1.0 - VALIDATE_SHARE - TEST_SHARE) |
|
|
|
scan_types = ['t1', 't2', 't2_SPACE'] |
|
|
|
|
|
|
|
|
|
partition = np.random.choice( |
|
['train', 'dev', 'test'], |
|
p=[TRAIN_SHARE, VALIDATE_SHARE, TEST_SHARE], |
|
size=N_PATIENTS, |
|
) |
|
patient_ids = (np.arange(N_PATIENTS) + 1) |
|
train_ids = set(patient_ids[partition == 'train']) |
|
validate_ids = set(patient_ids[partition == 'dev']) |
|
test_ids = set(patient_ids[partition == 'test']) |
|
assert len(train_ids.union(validate_ids, test_ids)) == N_PATIENTS |
|
|
|
|
|
overview_data = import_csv_data(paths_dict['overview']) |
|
grades_data = import_csv_data(paths_dict['gradings']) |
|
|
|
|
|
overview_dict = {} |
|
for item in overview_data: |
|
key = item['new_file_name'] |
|
overview_dict[key] = item |
|
|
|
|
|
grades_dict = {} |
|
for patient_id in patient_ids: |
|
patient_grades = [ |
|
x for x in grades_data if x['Patient'] == str(patient_id) |
|
] |
|
if patient_grades: |
|
grades_dict[str(patient_id)] = patient_grades |
|
|
|
|
|
image_files = [ |
|
file for file in os.listdir(os.path.join(paths_dict['images'], 'images')) |
|
if file.endswith('.mha') |
|
] |
|
assert len(image_files) > 0, "No image files found--check directory path." |
|
|
|
mask_files = [ |
|
file for file in os.listdir(os.path.join(paths_dict['masks'], 'masks')) |
|
if file.endswith('.mha') |
|
] |
|
assert len(mask_files) > 0, "No mask files found--check directory path." |
|
|
|
|
|
image_files = [ |
|
file for file in image_files |
|
if any(scan_type in file for scan_type in scan_types) |
|
] |
|
|
|
mask_files = [ |
|
file for file in mask_files |
|
if any(scan_type in file for scan_type in scan_types) |
|
] |
|
|
|
|
|
if split == 'train': |
|
subset_ids = train_ids |
|
elif split == 'validate': |
|
subset_ids = validate_ids |
|
elif split == 'test': |
|
subset_ids = test_ids |
|
else: |
|
subset_ids = None |
|
raise ValueError( |
|
f'Split argument "{split}" is not recognized. \ |
|
Please enter one of ["train", "validate", "test"]' |
|
) |
|
|
|
image_files = [ |
|
file for file in image_files |
|
if any(str(patient_id) in file.split('_')[0] for patient_id in subset_ids) |
|
] |
|
|
|
mask_files = [ |
|
file for file in mask_files |
|
if any(str(patient_id) in file.split('_')[0] for patient_id in subset_ids) |
|
] |
|
assert len(image_files) == len(mask_files), "The number of image files\ |
|
does not match the number of mask files--verify subsetting operation." |
|
|
|
|
|
|
|
|
|
np.random.shuffle(image_files) |
|
|
|
|
|
|
|
for example in image_files: |
|
|
|
|
|
scan_id = example.replace('.mha', '') |
|
patient_id = scan_id.split('_')[0] |
|
scan_type = '_'.join(scan_id.split('_')[1:]) |
|
|
|
|
|
image_path = os.path.join(paths_dict['images'], 'images', example) |
|
image = sitk.ReadImage(image_path) |
|
|
|
|
|
image_array = sitk.GetArrayFromImage(image) |
|
|
|
|
|
image_overview = overview_dict[scan_id] |
|
|
|
|
|
patient_grades_dict = {} |
|
for item in grades_dict[patient_id]: |
|
key = f'IVD{item["IVD label"]}' |
|
value = {k:v for k,v in item.items() if k not in ['Patient', 'IVD label']} |
|
patient_grades_dict[key] = value |
|
|
|
|
|
return_dict = {'patient_id':patient_id, 'scan_type':scan_type} |
|
if raw_image: |
|
return_dict['raw_image'] = image |
|
if numeric_array: |
|
return_dict['numeric_array'] = image_array |
|
if metadata: |
|
return_dict['metadata'] = image_overview |
|
if rad_gradings: |
|
return_dict['rad_gradings'] = patient_grades_dict |
|
|
|
|
|
yield (scan_id, return_dict) |
|
|