YuxuanZhang888 commited on
Commit
bca91fa
1 Parent(s): d3dd21c

Upload ColonCancerCTDatasetScript.py

Browse files
Files changed (1) hide show
  1. ColonCancerCTDatasetScript.py +171 -0
ColonCancerCTDatasetScript.py ADDED
@@ -0,0 +1,171 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import pydicom
2
+ from PIL import Image
3
+ import numpy as np
4
+ import io
5
+ import datasets
6
+ import gdown
7
+ import re
8
+ import s3fs
9
+ import random
10
+
11
+ manifest_url = "https://drive.google.com/uc?id=1JBkQTXeieyN9_6BGdTF_DDlFFyZrGyU6"
12
+ manifest_file = gdown.download(manifest_url, 'manifest_file.s5cmd', quiet=False)
13
+ fs = s3fs.S3FileSystem(anon=True)
14
+
15
+ _DESCRIPTION = """
16
+ This dataset, curated from the comprehensive collection by the National Cancer Institute (NCI)
17
+ and hosted on AWS, contains over 900,000 colon CT images, along with the corresponding patients'
18
+ information. It is designed to help researcher in developing advanced machine learning models
19
+ for in-depth studies in colon cancer.
20
+ """
21
+ _HOMEPAGE = "https://imaging.datacommons.cancer.gov/"
22
+ _LICENSE = "https://fairsharing.org/FAIRsharing.0b5a1d"
23
+ _CITATION = """\
24
+ @article{fedorov2021nci,
25
+ title={NCI imaging data commons},
26
+ author={Fedorov, Andrey and Longabaugh, William JR and Pot, David
27
+ and Clunie, David A and Pieper, Steve and Aerts, Hugo JWL and
28
+ Homeyer, Andr{\'e} and Lewis, Rob and Akbarzadeh, Afshin and
29
+ Bontempi, Dennis and others},
30
+ journal={Cancer research},
31
+ volume={81},
32
+ number={16},
33
+ pages={4188--4193},
34
+ year={2021},
35
+ publisher={AACR}
36
+ }
37
+ """
38
+
39
+ class ColonCancerCTDataset(datasets.GeneratorBasedBuilder):
40
+ """This dataset script retrieves the dataset using a manifest file from the original dataset's
41
+ homepage. The file lists the S3 paths for each series of CT images and metadata, guiding the download
42
+ from AWS. After processing the original content, this dataset will contian the image of the colonography,
43
+ image type, study date, series date, manufacturer details, study descriptions, series descriptions,
44
+ and patient demographics including sex, age, and pregnancy status.
45
+ """
46
+ VERSION = datasets.Version("1.1.0")
47
+
48
+ def _info(self):
49
+ """Returns DatasetInfo."""
50
+ # This method specifies the datasets.DatasetInfo object which contains informations and typings for the dataset
51
+ return datasets.DatasetInfo(
52
+ description=_DESCRIPTION,
53
+ features=datasets.Features(
54
+ {
55
+ "image": datasets.Image(),
56
+ "ImageType": datasets.Sequence(datasets.Value('string')),
57
+ "StudyDate": datasets.Value('string'),
58
+ "SeriesDate": datasets.Value('string'),
59
+ "Manufacturer": datasets.Value('string'),
60
+ "StudyDescription": datasets.Value('string'),
61
+ "SeriesDescription": datasets.Value('string'),
62
+ "PatientSex": datasets.Value('string'),
63
+ "PatientAge": datasets.Value('string'),
64
+ "PregnancyStatus": datasets.Value('string'),
65
+ "BodyPartExamined": datasets.Value('string'),
66
+ }),
67
+ homepage = _HOMEPAGE,
68
+ license = _LICENSE,
69
+ citation = _CITATION
70
+ )
71
+
72
+ def _split_generators(self, dl_manager):
73
+ """Returns a list of SplitGenerators."""
74
+ # This method is tasked with extracting the S3 paths of the data and defining the splits
75
+ # by shuffling and randomly partitioning the paths in the manifest file.
76
+ s3_series_paths = []
77
+ s3_individual_paths = []
78
+ with open(manifest_file, 'r') as file:
79
+ for line in file:
80
+ match = re.search(r'cp (s3://[\S]+) .', line)
81
+ if match:
82
+ s3_series_paths.append(match.group(1)[:-2]) # Deleting the '/*' in directories
83
+ for series in s3_series_paths:
84
+ for content in fs.ls(series):
85
+ s3_individual_paths.append(fs.info(content)['Key']) # Retrieve the individual DICOM file's S3 path
86
+
87
+ random.shuffle(s3_individual_paths) # Randomly shuffles the paths for partitioning
88
+
89
+ # Define the split sizes
90
+ train_size = int(0.7 * len(s3_individual_paths))
91
+ val_size = int(0.15 * len(s3_individual_paths))
92
+ # Split the paths into train, validation, and test sets
93
+ train_paths = s3_individual_paths[:train_size]
94
+ val_paths = s3_individual_paths[train_size:train_size + val_size]
95
+ test_paths = s3_individual_paths[train_size + val_size:]
96
+
97
+ return [
98
+ datasets.SplitGenerator(
99
+ name=datasets.Split.TRAIN,
100
+ gen_kwargs={
101
+ "paths": train_paths,
102
+ "split": "train"
103
+ },
104
+ ),
105
+ datasets.SplitGenerator(
106
+ name=datasets.Split.VALIDATION,
107
+ gen_kwargs={
108
+ "paths": val_paths,
109
+ "split": "dev"
110
+ },
111
+ ),
112
+ datasets.SplitGenerator(
113
+ name=datasets.Split.TEST,
114
+ gen_kwargs={
115
+ "paths": test_paths,
116
+ "split": "test"
117
+ },
118
+ ),
119
+ ]
120
+
121
+ def _generate_examples(self, paths, split):
122
+ """Yields examples."""
123
+ # This method will yield examples, i.e. rows in the dataset.
124
+ for path in paths:
125
+ key = path
126
+ with fs.open(path, 'rb') as f:
127
+ dicom_data = pydicom.dcmread(f)
128
+ pixel_array = dicom_data.pixel_array
129
+ # Converting pixel array into PNG image
130
+ # Adjust for MONOCHROME1 to invert the grayscale values
131
+ if dicom_data.PhotometricInterpretation == "MONOCHROME1":
132
+ pixel_array = np.max(pixel_array) - pixel_array
133
+ # Normalize or scale 16-bit or other depth images to 8-bit
134
+ if pixel_array.dtype != np.uint8:
135
+ pixel_array = (np.divide(pixel_array, np.max(pixel_array)) * 255).astype(np.uint8)
136
+ # Convert to RGB if it is not already (e.g., for color images)
137
+ if len(pixel_array.shape) == 2:
138
+ im = Image.fromarray(pixel_array, mode="L") # L mode is for grayscale
139
+ elif len(pixel_array.shape) == 3 and pixel_array.shape[2] in [3, 4]:
140
+ im = Image.fromarray(pixel_array, mode="RGB")
141
+ else:
142
+ raise ValueError("Unsupported DICOM image format")
143
+ with io.BytesIO() as output:
144
+ im.save(output, format="PNG")
145
+ png_image = output.getvalue()
146
+ # Extracting metadata
147
+ ImageType = dicom_data.get("ImageType", "")
148
+ StudyDate = dicom_data.get("StudyDate", "")
149
+ SeriesDate = dicom_data.get("SeriesDate", "")
150
+ Manufacturer = dicom_data.get("Manufacturer", "")
151
+ StudyDescription = dicom_data.get("StudyDescription", "")
152
+ SeriesDescription = dicom_data.get("SeriesDescription", "")
153
+ PatientSex = dicom_data.get("PatientSex", "")
154
+ PatientAge = dicom_data.get("PatientAge", "")
155
+ PregnancyStatus = dicom_data.get("PregnancyStatus", "")
156
+ if PregnancyStatus == None:
157
+ PregnancyStatus = "None"
158
+ else:
159
+ PregnancyStatus = "Yes"
160
+ BodyPartExamined = dicom_data.get("BodyPartExamined", "")
161
+ yield key, {"image": png_image,
162
+ "ImageType": ImageType,
163
+ "StudyDate": StudyDate,
164
+ "SeriesDate": SeriesDate,
165
+ "Manufacturer": Manufacturer,
166
+ "StudyDescription": StudyDescription,
167
+ "SeriesDescription": SeriesDescription,
168
+ "PatientSex": PatientSex,
169
+ "PatientAge": PatientAge,
170
+ "PregnancyStatus": PregnancyStatus,
171
+ "BodyPartExamined": BodyPartExamined}