Datasets:
Tasks:
Image Classification
Sub-tasks:
multi-label-image-classification
Languages:
English
Size:
100B<n<1T
License:
File size: 7,205 Bytes
bca91fa |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 |
import pydicom
from PIL import Image
import numpy as np
import io
import datasets
import gdown
import re
import s3fs
import random
manifest_url = "https://drive.google.com/uc?id=1JBkQTXeieyN9_6BGdTF_DDlFFyZrGyU6"
manifest_file = gdown.download(manifest_url, 'manifest_file.s5cmd', quiet=False)
fs = s3fs.S3FileSystem(anon=True)
_DESCRIPTION = """
This dataset, curated from the comprehensive collection by the National Cancer Institute (NCI)
and hosted on AWS, contains over 900,000 colon CT images, along with the corresponding patients'
information. It is designed to help researcher in developing advanced machine learning models
for in-depth studies in colon cancer.
"""
_HOMEPAGE = "https://imaging.datacommons.cancer.gov/"
_LICENSE = "https://fairsharing.org/FAIRsharing.0b5a1d"
_CITATION = """\
@article{fedorov2021nci,
title={NCI imaging data commons},
author={Fedorov, Andrey and Longabaugh, William JR and Pot, David
and Clunie, David A and Pieper, Steve and Aerts, Hugo JWL and
Homeyer, Andr{\'e} and Lewis, Rob and Akbarzadeh, Afshin and
Bontempi, Dennis and others},
journal={Cancer research},
volume={81},
number={16},
pages={4188--4193},
year={2021},
publisher={AACR}
}
"""
class ColonCancerCTDataset(datasets.GeneratorBasedBuilder):
"""This dataset script retrieves the dataset using a manifest file from the original dataset's
homepage. The file lists the S3 paths for each series of CT images and metadata, guiding the download
from AWS. After processing the original content, this dataset will contian the image of the colonography,
image type, study date, series date, manufacturer details, study descriptions, series descriptions,
and patient demographics including sex, age, and pregnancy status.
"""
VERSION = datasets.Version("1.1.0")
def _info(self):
"""Returns DatasetInfo."""
# This method specifies the datasets.DatasetInfo object which contains informations and typings for the dataset
return datasets.DatasetInfo(
description=_DESCRIPTION,
features=datasets.Features(
{
"image": datasets.Image(),
"ImageType": datasets.Sequence(datasets.Value('string')),
"StudyDate": datasets.Value('string'),
"SeriesDate": datasets.Value('string'),
"Manufacturer": datasets.Value('string'),
"StudyDescription": datasets.Value('string'),
"SeriesDescription": datasets.Value('string'),
"PatientSex": datasets.Value('string'),
"PatientAge": datasets.Value('string'),
"PregnancyStatus": datasets.Value('string'),
"BodyPartExamined": datasets.Value('string'),
}),
homepage = _HOMEPAGE,
license = _LICENSE,
citation = _CITATION
)
def _split_generators(self, dl_manager):
"""Returns a list of SplitGenerators."""
# This method is tasked with extracting the S3 paths of the data and defining the splits
# by shuffling and randomly partitioning the paths in the manifest file.
s3_series_paths = []
s3_individual_paths = []
with open(manifest_file, 'r') as file:
for line in file:
match = re.search(r'cp (s3://[\S]+) .', line)
if match:
s3_series_paths.append(match.group(1)[:-2]) # Deleting the '/*' in directories
for series in s3_series_paths:
for content in fs.ls(series):
s3_individual_paths.append(fs.info(content)['Key']) # Retrieve the individual DICOM file's S3 path
random.shuffle(s3_individual_paths) # Randomly shuffles the paths for partitioning
# Define the split sizes
train_size = int(0.7 * len(s3_individual_paths))
val_size = int(0.15 * len(s3_individual_paths))
# Split the paths into train, validation, and test sets
train_paths = s3_individual_paths[:train_size]
val_paths = s3_individual_paths[train_size:train_size + val_size]
test_paths = s3_individual_paths[train_size + val_size:]
return [
datasets.SplitGenerator(
name=datasets.Split.TRAIN,
gen_kwargs={
"paths": train_paths,
"split": "train"
},
),
datasets.SplitGenerator(
name=datasets.Split.VALIDATION,
gen_kwargs={
"paths": val_paths,
"split": "dev"
},
),
datasets.SplitGenerator(
name=datasets.Split.TEST,
gen_kwargs={
"paths": test_paths,
"split": "test"
},
),
]
def _generate_examples(self, paths, split):
"""Yields examples."""
# This method will yield examples, i.e. rows in the dataset.
for path in paths:
key = path
with fs.open(path, 'rb') as f:
dicom_data = pydicom.dcmread(f)
pixel_array = dicom_data.pixel_array
# Converting pixel array into PNG image
# Adjust for MONOCHROME1 to invert the grayscale values
if dicom_data.PhotometricInterpretation == "MONOCHROME1":
pixel_array = np.max(pixel_array) - pixel_array
# Normalize or scale 16-bit or other depth images to 8-bit
if pixel_array.dtype != np.uint8:
pixel_array = (np.divide(pixel_array, np.max(pixel_array)) * 255).astype(np.uint8)
# Convert to RGB if it is not already (e.g., for color images)
if len(pixel_array.shape) == 2:
im = Image.fromarray(pixel_array, mode="L") # L mode is for grayscale
elif len(pixel_array.shape) == 3 and pixel_array.shape[2] in [3, 4]:
im = Image.fromarray(pixel_array, mode="RGB")
else:
raise ValueError("Unsupported DICOM image format")
with io.BytesIO() as output:
im.save(output, format="PNG")
png_image = output.getvalue()
# Extracting metadata
ImageType = dicom_data.get("ImageType", "")
StudyDate = dicom_data.get("StudyDate", "")
SeriesDate = dicom_data.get("SeriesDate", "")
Manufacturer = dicom_data.get("Manufacturer", "")
StudyDescription = dicom_data.get("StudyDescription", "")
SeriesDescription = dicom_data.get("SeriesDescription", "")
PatientSex = dicom_data.get("PatientSex", "")
PatientAge = dicom_data.get("PatientAge", "")
PregnancyStatus = dicom_data.get("PregnancyStatus", "")
if PregnancyStatus == None:
PregnancyStatus = "None"
else:
PregnancyStatus = "Yes"
BodyPartExamined = dicom_data.get("BodyPartExamined", "")
yield key, {"image": png_image,
"ImageType": ImageType,
"StudyDate": StudyDate,
"SeriesDate": SeriesDate,
"Manufacturer": Manufacturer,
"StudyDescription": StudyDescription,
"SeriesDescription": SeriesDescription,
"PatientSex": PatientSex,
"PatientAge": PatientAge,
"PregnancyStatus": PregnancyStatus,
"BodyPartExamined": BodyPartExamined} |