NIH-Chest-X-ray-dataset / NIH-Chest-X-ray-dataset.py
alkzar90's picture
Image batch 004 available
13c8886
raw
history blame
5.66 kB
import os
import datasets
from datasets.tasks import ImageClassification
from requests import get
from pandas import read_csv
logger = datasets.logging.get_logger(__name__)
_HOMEPAGE = "https://nihcc.app.box.com/v/ChestXray-NIHCC"
_CITATION = """\
@ONLINE {beansdata,
author="Xiaosong Wang, Yifan Peng, Le Lu, Zhiyong Lu, Mohammadhadi Bagheri, Ronald Summer",
title="ChestX-ray8: Hospital-scale Chest X-ray Database and Benchmarks on Weakly-Supervised Classification and Localization of Common Thorax Diseases",
month="January",
year="2017",
url="https://nihcc.app.box.com/v/ChestXray-NIHCC"
}
"""
_DESCRIPTION = """\
The NIH Chest X-ray dataset consists of 100,000 de-identified images of chest x-rays. The images are in PNG format.
The data is provided by the NIH Clinical Center and is available through the NIH download site: https://nihcc.app.box.com/v/ChestXray-NIHCC
"""
_IMAGE_URLS2 = [
'https://nihcc.box.com/shared/static/vfk49d74nhbxq3nqjg0900w5nvkorp5c.gz',
'https://nihcc.box.com/shared/static/i28rlmbvmfjbl8p2n3ril0pptcmcu9d1.gz',
'https://nihcc.box.com/shared/static/f1t00wrtdk94satdfb9olcolqx20z2jp.gz',
'https://nihcc.box.com/shared/static/0aowwzs5lhjrceb3qp67ahp0rd1l1etg.gz',
'https://nihcc.box.com/shared/static/v5e3goj22zr6h8tzualxfsqlqaygfbsn.gz',
'https://nihcc.box.com/shared/static/asi7ikud9jwnkrnkj99jnpfkjdes7l6l.gz',
'https://nihcc.box.com/shared/static/jn1b4mw4n6lnh74ovmcjb8y48h8xj07n.gz',
'https://nihcc.box.com/shared/static/tvpxmn7qyrgl0w8wfh9kqfjskv6nmm1j.gz',
'https://nihcc.box.com/shared/static/upyy3ml7qdumlgk2rfcvlb9k6gvqq2pj.gz',
'https://nihcc.box.com/shared/static/l6nilvfa9cg3s28tqv1qc1olm3gnz54p.gz',
'https://nihcc.box.com/shared/static/hhq8fkdgvcari67vfhs7ppg2w6ni4jze.gz',
'https://nihcc.box.com/shared/static/ioqwiy20ihqwyr8pf4c24eazhh281pbu.gz'
]
_IMAGE_URLS = [
"https://huggingface.co/datasets/alkzar90/NIH-Chest-X-ray-dataset/resolve/main/data/images/images_001.zip",
"https://huggingface.co/datasets/alkzar90/NIH-Chest-X-ray-dataset/resolve/main/data/images/images_003.zip",
"https://huggingface.co/datasets/alkzar90/NIH-Chest-X-ray-dataset/resolve/main/data/images/images_004.zip"
#'https://huggingface.co/datasets/alkzar90/NIH-Chest-X-ray-dataset/resolve/main/dummy/0.0.0/images_001.tar.gz',
#'https://huggingface.co/datasets/alkzar90/NIH-Chest-X-ray-dataset/resolve/main/dummy/0.0.0/images_002.tar.gz'
]
_URLS = {
'train_val_list': 'https://huggingface.co/datasets/alkzar90/NIH-Chest-X-ray-dataset/raw/main/data/train_val_list.txt',
'test_list': 'https://huggingface.co/datasets/alkzar90/NIH-Chest-X-ray-dataset/raw/main/data/test_list.txt',
'labels': 'https://huggingface.co/datasets/alkzar90/NIH-Chest-X-ray-dataset/raw/main/data/Data_Entry_2017_v2020.csv',
'image_urls': _IMAGE_URLS
}
_LABEL2IDX = {'No Finding': 0,
'Atelectasis': 1,
'Cardiomegaly': 2,
'Effusion': 3,
'Infiltration': 4,
'Mass': 5,
'Nodule': 6,
'Pneumonia': 7,
'Pneumothorax': 8,
'Consolidation': 9,
'Edema': 10,
'Emphysema': 11,
'Fibrosis': 12,
'Pleural_Thickening': 13,
'Hernia': 14}
_NAMES = list(_LABEL2IDX.keys())
class XChest(datasets.GeneratorBasedBuilder):
"""NIH Image Chest X-ray dataset."""
VERSION = datasets.Version("0.0.0")
def _info(self):
return datasets.DatasetInfo(
description=_DESCRIPTION,
features=datasets.Features(
{
"image_file_path": datasets.Value("string"),
"image": datasets.Image(),
#"labels": datasets.features.ClassLabel(names=_NAMES),
"labels": datasets.features.Sequence(
datasets.features.ClassLabel(num_classes=len(_NAMES),
names=_NAMES)
)
}
),
supervised_keys=("image", "labels"),
homepage=_HOMEPAGE,
citation=_CITATION,
#task_templates=[ImageClassification(image_column="image",
# label_column="labels")],
)
def _split_generators(self, dl_manager):
# Get the image names that belong to the train-val dataset
logger.info("Downloading the train_val_list image names")
train_val_list = get(_URLS['train_val_list']).iter_lines()
train_val_list = set([x.decode('UTF8') for x in train_val_list])
logger.info(f"Check train_val_list: {train_val_list}")
# Create list for store the name of the images for each dataset
train_files = []
test_files = []
# Download batches
data_files = dl_manager.download_and_extract(_URLS['image_urls'])
# Iterate trought image folder and check if they belong to
# the trainset or testset
for batch in data_files:
logger.info(f"Batch for data_files: {batch}")
path_files = dl_manager.iter_files(batch)
for img in path_files:
if img.split('/')[-1] in train_val_list:
train_files.append(img)
else:
test_files.append(img)
return [
datasets.SplitGenerator(
name=datasets.Split.TRAIN,
gen_kwargs={
'files': iter(train_files)
}
),
datasets.SplitGenerator(
name=datasets.Split.TEST,
gen_kwargs={
'files': iter(test_files)
}
)
]
def _generate_examples(self, files):
# Read csv with image labels
label_csv = read_csv(_URLS['labels'])
for i, path in enumerate(files):
file_name = os.path.basename(path)
# Get image id to filter the respective row of the csv
image_id = file_name.split('/')[-1]
image_labels = label_csv[label_csv['Image Index'] == image_id]['Finding Labels'].values[0].split('|')
if file_name.endswith(".png"):
yield i, {
"image_file_path": path,
"image": path,
"labels": image_labels,
}