File size: 6,584 Bytes
62ec5aa
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3afeebb
62ec5aa
3afeebb
0bd9b54
5c7307b
16efcee
5c7307b
3dcb747
 
3afeebb
62ec5aa
 
 
 
 
 
 
 
 
 
3afeebb
 
 
62ec5aa
3afeebb
 
 
 
 
 
62ec5aa
 
 
 
 
 
3afeebb
5c7307b
5ea83e2
0bd9b54
 
 
 
 
 
e3efd43
0bd9b54
e3efd43
 
0bd9b54
e062905
 
5c7307b
 
62ec5aa
5c7307b
62ec5aa
 
 
8419101
62ec5aa
5c7307b
 
62ec5aa
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3afeebb
527c8a6
5c7307b
 
7486ab9
f01b9e2
7486ab9
 
 
 
 
 
 
 
 
 
 
 
f01b9e2
3afeebb
7486ab9
 
 
 
 
527c8a6
3afeebb
0eb57e5
d960239
995af13
d960239
 
 
1c45d68
 
 
f01b9e2
995af13
1c45d68
7486ab9
 
4f4bb12
51ee386
4f4bb12
efc2896
4f4bb12
 
 
 
 
efc2896
4f4bb12
 
 
51ee386
4f4bb12
 
 
 
 
03c852d
f01b9e2
7486ab9
 
 
 
3afeebb
 
 
5c7307b
 
 
7c4579a
3dcb747
5c7307b
 
814dcde
7c4579a
 
 
 
 
 
62ec5aa
7c4579a
 
 
7376c1e
7c4579a
3dcb747
7376c1e
 
3655d3c
5cddd15
7c4579a
58fcc18
7c4579a
 
217a7da
7c4579a
 
e7ec57d
7c4579a
 
 
 
7e19fce
 
e7ec57d
7e19fce
7c4579a
 
5c7307b
 
7486ab9
5596448
7486ab9
 
 
 
 
3655d3c
7486ab9
 
 
 
 
 
3afeebb
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
# Copyright 2022 Cristóbal Alcázar
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#     http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""NIH Chest X-ray Dataset"""


import os
import datasets

from requests import get
from pandas import read_csv

logger = datasets.logging.get_logger(__name__)

_CITATION = """\
@inproceedings{Wang_2017,
	doi = {10.1109/cvpr.2017.369},
	url = {https://doi.org/10.1109%2Fcvpr.2017.369},
	year = 2017,
	month = {jul},
	publisher = {{IEEE}
},
	author = {Xiaosong Wang and Yifan Peng and Le Lu and Zhiyong Lu and Mohammadhadi Bagheri and Ronald M. Summers},
	title = {{ChestX}-Ray8: Hospital-Scale Chest X-Ray Database and Benchmarks on Weakly-Supervised Classification and Localization of Common Thorax Diseases},
	booktitle = {2017 {IEEE} Conference on Computer Vision and Pattern Recognition ({CVPR})}
}
"""


_DESCRIPTION = """\
The NIH Chest X-ray dataset consists of 100,000 de-identified images of chest x-rays. The images are in PNG format.

The data is provided by the NIH Clinical Center and is available through the NIH download site: https://nihcc.app.box.com/v/ChestXray-NIHCC
"""


_HOMEPAGE = "https://nihcc.app.box.com/v/chestxray-nihcc"


_REPO = "https://huggingface.co/datasets/alkzar90/NIH-Chest-X-ray-dataset/resolve/main/data"


_IMAGE_URLS = [
	f"{_REPO}/images/images_001.zip",
	f"{_REPO}/images/images_002.zip",
	f"{_REPO}/images/images_003.zip",
	f"{_REPO}/images/images_004.zip",
	f"{_REPO}/images/images_005.zip",
	f"{_REPO}/images/images_006.zip",
	f"{_REPO}/images/images_007.zip",
	f"{_REPO}/images/images_008.zip",
	f"{_REPO}/images/images_009.zip",
    f"{_REPO}/images/images_010.zip",
	f"{_REPO}/images/images_011.zip",
	f"{_REPO}/images/images_012.zip"
	#'https://huggingface.co/datasets/alkzar90/NIH-Chest-X-ray-dataset/resolve/main/dummy/0.0.0/images_001.tar.gz',
	#'https://huggingface.co/datasets/alkzar90/NIH-Chest-X-ray-dataset/resolve/main/dummy/0.0.0/images_002.tar.gz'
]


_URLS = {
	"train_val_list": f"{_REPO}/train_val_list.txt",
	"test_list": f"{_REPO}/test_list.txt",
	"labels": f"{_REPO}/Data_Entry_2017_v2020.csv",
	"BBox": f"{_REPO}/BBox_List_2017.csv",
	"image_urls": _IMAGE_URLS
}


_LABEL2IDX = {"No Finding": 0,
	     "Atelectasis": 1,
	     "Cardiomegaly": 2,
	     "Effusion": 3,
	     "Infiltration": 4,
	     "Mass": 5,
	     "Nodule": 6,
	     "Pneumonia": 7,
	     "Pneumothorax": 8,
  	     "Consolidation": 9,
	     "Edema": 10,
	     "Emphysema": 11,
	     "Fibrosis": 12,
	     "Pleural_Thickening": 13,
	     "Hernia": 14}


_NAMES = list(_LABEL2IDX.keys())


class ChestXray14Config(datasets.BuilderConfig):
	"""NIH Image Chest X-ray14 configuration."""
	
	def __init__(self, name, **kwargs):
	    super(ChestXray14Config, self).__init__(
		version=datasets.Version("1.0.0"),
		name=name,
		description="NIH ChestX-ray14",
		**kwargs,
	    )



class ChestXray14(datasets.GeneratorBasedBuilder):
	"""NIH Image Chest X-ray14 dataset."""


	BUILDER_CONFIGS = [
		ChestXray14Config("image-classification"),
		ChestXray14Config("object-detection"),
	]

	def _info(self):
		if self.config.name == "image-classification":
		    features = datasets.Features(
                       {
		        "image": datasets.Image(),
  		        "labels": datasets.features.Sequence(
                                     datasets.features.ClassLabel(
                                        num_classes=len(_NAMES),
                                        names=_NAMES
                                     )
                                 ),
                       }
                    )
		    keys = ("image", "labels")


		if self.config.name == "object-detection":
		    features = datasets.Features(
                       {
			"image_id": datasets.Value("string"),
			"patient_id": datasets.Value("int32"),
		        "image": datasets.Image(),
			"width": datasets.Value("int32"),
			"height": datasets.Value("int32"),
                       }
                    )
		    object_dict = {
			"image_id": datasets.Value("string"), 
			"area": datasets.Value("int64"),
			"bbox": datasets.Sequence(datasets.Value("float32"), length=4),
			}
		    features["objects"] = [object_dict]
		    keys = ("image", "objects")



		return datasets.DatasetInfo(
		    description=_DESCRIPTION,
		    features=features,
		    supervised_keys=keys,
		    homepage=_HOMEPAGE,
		    citation=_CITATION,
		)


	def _split_generators(self, dl_manager):
		# Get the image names that belong to the train-val dataset
		logger.info("Downloading the train_val_list image names")
		train_val_list = get(_URLS['train_val_list']).iter_lines()
		train_val_list = set([x.decode('UTF8') for x in train_val_list])
		logger.info(f"Check train_val_list: {train_val_list}")

		# Create list for store the name of the images for each dataset
		train_files = []
		test_files = []
		
		# Download batches
		data_files = dl_manager.download_and_extract(_URLS["image_urls"])

		# Iterate trought image folder and check if they belong to
		# the trainset or testset

		for batch in data_files:
		  logger.info(f"Batch for data_files: {batch}")
		  path_files = dl_manager.iter_files(batch)
		  for img in path_files:
		    if os.path.basename(img) in train_val_list:
		      train_files.append(img)
		    else:
		      test_files.append(img)
		
		return [
		    datasets.SplitGenerator(
			name=datasets.Split.TRAIN,
			gen_kwargs={
				"files": train_files
			}

		    ),
		    datasets.SplitGenerator(
			name=datasets.Split.TEST,
			gen_kwargs={
				"files": test_files
			}
		    )
		]

	def _generate_examples(self, files):

		if self.config.name == "image-classification":
		    # Read csv with image labels
		    label_csv = read_csv(_URLS["labels"])
		    for i, path in enumerate(files):
		        file_name = os.path.basename(path)
		        # Get image id to filter the respective row of the csv 	
		        image_id = file_name
		        image_labels = label_csv[label_csv["Image Index"] == image_id]["Finding Labels"].values[0].split("|")
		        if file_name.endswith(".png"):
		            yield i, {
			        "image": path,
			        "labels": image_labels,
			    }