Datasets:

ArXiv:
DOI:
License:
msynth / msynth.py
SiddiqueAkhonda's picture
Update msynth.py
f805278
raw
history blame
16.8 kB
# Copyright 2022 for msynth dataset
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
'''
Custom dataset-builder for msynth dataset
'''
import os
import datasets
import glob
import re
logger = datasets.logging.get_logger(__name__)
_CITATION = """\
@article{sizikova2023knowledge,
title={Knowledge-based in silico models and dataset for the comparative evaluation of mammography AI for a range of breast characteristics, lesion conspicuities and doses},
author={Sizikova, Elena and Saharkhiz, Niloufar and Sharma, Diksha and Lago, Miguel and Sahiner, Berkman and Delfino, Jana G. and Badano, Aldo},
journal={Advances in Neural Information Processing Systems},
volume={},
pages={16764--16778},
year={2023}
"""
_DESCRIPTION = """\
M-SYNTH is a synthetic digital mammography (DM) dataset with four breast fibroglandular density distributions imaged using Monte Carlo x-ray simulations with the publicly available Virtual Imaging Clinical Trial for Regulatory Evaluation (VICTRE) toolkit.
Curated by: Elena Sizikova, Niloufar Saharkhiz, Diksha Sharma, Miguel Lago, Berkman Sahiner, Jana Gut Delfino, Aldo Badano
License: Creative Commons 1.0 Universal License (CC0)
"""
_HOMEPAGE = "link to the dataset description page (FDA/CDRH/OSEL/DIDSR/VICTRE_project)"
_REPO = "https://huggingface.co/datasets/didsr/msynth/resolve/main/data"
# satting parameters for the URLS
_LESIONDENSITY = ["1.0","1.06", "1.1"]
_DOSE = ["20%","40%","60%","80%","100%"]
_DENSITY = ["fatty", "dense", "hetero","scattered"]
_SIZE = ["5.0","7.0", "9.0"]
_DETECTOR = 'SIM'
_DOSETABLE = {
"dense": {
"20%": '1.73e09',
"40%": '3.47e09',
"60%": '5.20e09',
"80%": '6.94e09',
"100%": '8.67e09'
},
"hetero": {
"20%": '2.04e09',
"40%": '4.08e09',
"60%": '6.12e09',
"80%": '8.16e09',
"100%": '1.02e10'
},
"scattered": {
"20%": '4.08e09',
"40%": '8.16e09',
"60%": '1.22e10',
"80%": '1.63e10',
"100%": '2.04e10'
},
"fatty": {
"20%": '4.44e09',
"40%": '8.88e09',
"60%": '1.33e10',
"80%": '1.78e10',
"100%": '2.22e10'
}
}
# Links to download readme files
_URLS = {
"meta_data": f"{_REPO}/metadata/bounds.zip",
"read_me": f"{_REPO}/README.md"
}
# Define the labels or classes in your dataset
#_NAMES = ["raw", "mhd", "dicom", "loc"]
DATA_DIR = {"all_data": "SIM", "seg": "SIM", "info": "bounds"}
class msynthConfig(datasets.BuilderConfig):
"""msynth dataset"""
lesion_density = _LESIONDENSITY
dose = _DOSE
density = _DENSITY
size = _SIZE
def __init__(self, name, **kwargs):
super(msynthConfig, self).__init__(
version=datasets.Version("1.0.0"),
name=name,
description="msynth",
**kwargs,
)
class msynth(datasets.GeneratorBasedBuilder):
"""msynth dataset."""
DEFAULT_WRITER_BATCH_SIZE = 256
BUILDER_CONFIGS = [
msynthConfig("device_data"),
msynthConfig("segmentation_mask"),
msynthConfig("metadata"),
]
def _info(self):
if self.config.name == "device_data":
# Define dataset features and keys
features = datasets.Features(
{
"Raw": datasets.Value("string"),
"mhd": datasets.Value("string"),
"loc": datasets.Value("string"),
"dcm": datasets.Value("string"),
"density": datasets.Value("string"),
"mass_radius": datasets.Value("float32")
}
)
#keys = ("image", "metadata")
elif self.config.name == "segmentation_mask":
# Define features and keys
features = datasets.Features(
{
"Raw": datasets.Value("string"),
"mhd": datasets.Value("string"),
"loc": datasets.Value("string"),
"density": datasets.Value("string"),
"mass_radius": datasets.Value("string")
}
)
elif self.config.name == "metadata":
# Define features and keys
features = datasets.Features(
{
"fatty": datasets.Value("string"),
"dense": datasets.Value("string"),
"hetero": datasets.Value("string"),
"scattered": datasets.Value("string")
}
)
return datasets.DatasetInfo(
description=_DESCRIPTION,
features=features,
supervised_keys=None,
homepage=_HOMEPAGE,
citation=_CITATION,
)
def _split_generators(
self, dl_manager: datasets.utils.download_manager.DownloadManager):
# Setting up the **config_kwargs parameters
if self.config.lesion_density == "all":
self.config.lesion_density = _LESIONDENSITY
if self.config.dose == "all":
self.config.dose = _DOSE
if self.config.density == "all":
self.config.density = _DENSITY
if self.config.size == "all":
self.config.size = _SIZE
if self.config.name == "device_data":
file_name = []
for ld in self.config.lesion_density:
for ds in self.config.dose:
for den in self.config.density:
value = _DOSETABLE[den][ds]
for sz in self.config.size:
temp_name = []
temp_name = (
"device_data_VICTREPhantoms_spic_"
+ ld
+ "/"
+ value
+ "/"
+ den
+ "/2/"
+ sz
+ "/"
+ _DETECTOR
+ ".zip"
)
file_name.append(_REPO +"/"+ temp_name)
# Downloading the data files
# data_dir = dl_manager.download_and_extract(file_name)
data_dir = []
for url in file_name:
try:
temp_down_file = []
# Attempt to download the file
temp_down_file = dl_manager.download_and_extract(url)
data_dir.append(temp_down_file)
except Exception as e:
# If an exception occurs (e.g., file not found), log the error and add the URL to the failed_urls list
logger.error(f"Failed to download {url}: {e}")
return [
datasets.SplitGenerator(
name="device_data",
gen_kwargs={
"files": [data_dir_t for data_dir_t in data_dir],
"name": "all_data",
},
),
]
elif self.config.name == "segmentation_mask":
seg_file_name = []
for den in self.config.density:
for sz in self.config.size:
temp_name = []
temp_name = (
"segmentation_masks"
+ "/"
+ den
+ "/2/"
+ sz
+ "/"
+ _DETECTOR
+ ".zip"
)
seg_file_name.append(_REPO+ "/" + temp_name)
# Downloading the files
seg_dir = []
#seg_dir = dl_manager.download_and_extract(seg_file_name)
for url in seg_file_name:
try:
# Attempt to download the file
temp_down_file = []
temp_down_file = dl_manager.download_and_extract(url)
seg_dir.append(temp_down_file)
except Exception as e:
# If an exception occurs (e.g., file not found), log the error and add the URL to the failed_urls list
logger.error(f"Failed to download {url}: {e}")
return [
datasets.SplitGenerator(
name="segmentation_mask",
gen_kwargs={
"files": [data_dir_t for data_dir_t in seg_dir],
"name": "seg",
},
),
]
elif self.config.name == "metadata":
meta_dir = dl_manager.download_and_extract(_URLS['meta_data'])
return [
datasets.SplitGenerator(
name="metadata",
gen_kwargs={
"files": meta_dir,
"name": "info",
},
),
]
def get_all_file_paths(self, root_directory):
file_paths = [] # List to store file paths
# Walk through the directory and its subdirectories using os.walk
for folder, _, files in os.walk(root_directory):
for file in files:
if file.endswith('.raw'):
# Get the full path of the file
file_path = os.path.join(folder, file)
file_paths.append(file_path)
return file_paths
def get_support_file_path(self, raw_file_path, ext):
folder_path = os.path.dirname(raw_file_path)
# Use os.path.basename() to extract the filename
raw_file_name = os.path.basename(raw_file_path)
# Use os.path.splitext() to split the filename into root and extension
root, extension = os.path.splitext(raw_file_name)
if ext == "dcm":
supp_file_name = f"000.{ext}"
file_path = os.path.join(folder_path,"DICOM_dm",supp_file_name)
else:
supp_file_name = f"{root}.{ext}"
file_path = os.path.join(folder_path, supp_file_name)
if os.path.isfile(file_path):
return file_path
else:
return "Not available for this raw file"
def sort_file_paths(self, file_paths):
digit_numbers = []
for file_path in file_paths:
for word in _DENSITY:
if word in file_path:
if self.config.name == "device_data":
pattern = rf"{word}.(\d+\.)(\d+)"
elif self.config.name == "segmentation_mask":
pattern = rf"{word}.(\d+)"
match = re.search(pattern, file_path)
if self.config.name == "device_data":
digit_numbers.append(int(match.group(2)))
elif self.config.name == "segmentation_mask":
digit_numbers.append(int(match.group(1)))
break
# Sort the list of numbers while keeping track of the original indices
sorted_numbers_with_indices = sorted(enumerate(digit_numbers), key=lambda x: x[1])
# Extract the sorted numbers and their original indices
sorted_indices, sorted_numbers = zip(*sorted_numbers_with_indices)
# Sort the file paths
sorted_file_paths = [file_paths[i] for i in sorted_indices]
return sorted_file_paths
def _generate_examples(self, files, name):
if self.config.name == "device_data":
key = 0
data_dir = []
for folder in files:
tmp_dir = []
tmp_dir = self.get_all_file_paths(os.path.join(folder, DATA_DIR[name]))
data_dir = data_dir + tmp_dir
data_dir = self.sort_file_paths(data_dir)
for path in data_dir:
res_dic = {}
for word in _DENSITY:
if word in path:
breast_density = word
pattern = rf"(\d+\.\d+)_{word}"
match = re.search(pattern, path)
matched_text = match.group(1)
break
# Get image id to filter the respective row of the csv
image_id = os.path.basename(path)
# Use os.path.splitext() to split the filename into root and extension
root, extension = os.path.splitext(image_id)
# Get the extension without the dot
image_labels = extension.lstrip(".")
res_dic["Raw"] = path
res_dic["mhd"] = self.get_support_file_path(path, "mhd")
res_dic["loc"] = self.get_support_file_path(path, "loc")
res_dic["dcm"] = self.get_support_file_path(path, "dcm")
res_dic["density"] = breast_density
res_dic["mass_radius"] = matched_text
yield key, res_dic
key += 1
if self.config.name == "segmentation_mask":
key = 0
data_dir = []
examples = []
for folder in files:
tmp_dir = []
tmp_dir = self.get_all_file_paths(os.path.join(folder, DATA_DIR[name]))
data_dir = data_dir + tmp_dir
data_dir = self.sort_file_paths(data_dir)
new_data_dir = [];
count = 1;
loc = 0;
while loc < len(data_dir):
if count % 2 == 1:
new_data_dir.append(data_dir[loc])
loc += 1
else:
new_data_dir.append("None")
count += 1
new_data_dir.append("None")
for path in new_data_dir:
res_dic = {}
if path == "None":
res_dic["Raw"] = "None"
res_dic["mhd"] = "None"
res_dic["loc"] = "None"
res_dic["density"] = "None"
res_dic["mass_radius"] = "None"
else:
for word in _DENSITY:
if word in path:
breast_density = word
pattern = rf"(\d+\.\d+)_{word}"
match = re.search(pattern, path)
matched_text = match.group(1)
break
# Get image id to filter the respective row of the csv
image_id = os.path.basename(path)
# Use os.path.splitext() to split the filename into root and extension
root, extension = os.path.splitext(image_id)
# Get the extension without the dot
image_labels = extension.lstrip(".")
res_dic["Raw"] = path
res_dic["mhd"] = self.get_support_file_path(path, "mhd")
res_dic["loc"] = self.get_support_file_path(path, "loc")
res_dic["density"] = breast_density
res_dic["mass_radius"] = matched_text
examples.append(res_dic)
for example in examples:
yield key, {**example}
key += 1
examples = []
if self.config.name == "metadata":
key = 0
examples = list()
meta_dir = os.path.join(files, DATA_DIR[name])
res_dic = {
"fatty": os.path.join(meta_dir,'bounds_fatty.npy'),
"dense": os.path.join(meta_dir,'bounds_dense.npy'),
"hetero": os.path.join(meta_dir,'bounds_hetero.npy'),
"scattered": os.path.join(meta_dir,'bounds_scattered.npy')
}
yield key, res_dic
key +=1