Commit of spleen_ct_segmentation_v0.1.0 from Project-MONAI/model-zoo/hosting_storage_v1
Browse files- .gitattributes +1 -0
- README.md +56 -0
- configs/evaluate.json +77 -0
- configs/inference.json +153 -0
- configs/logging.conf +21 -0
- configs/metadata.json +78 -0
- configs/multi_gpu_train.json +36 -0
- configs/train.json +288 -0
- docs/README.md +56 -0
- docs/license.txt +6 -0
- models/model.pt +3 -0
- models/model.ts +3 -0
.gitattributes
CHANGED
@@ -29,3 +29,4 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
|
|
29 |
*.zip filter=lfs diff=lfs merge=lfs -text
|
30 |
*.zstandard filter=lfs diff=lfs merge=lfs -text
|
31 |
*tfevents* filter=lfs diff=lfs merge=lfs -text
|
|
|
|
29 |
*.zip filter=lfs diff=lfs merge=lfs -text
|
30 |
*.zstandard filter=lfs diff=lfs merge=lfs -text
|
31 |
*tfevents* filter=lfs diff=lfs merge=lfs -text
|
32 |
+
models/model.ts filter=lfs diff=lfs merge=lfs -text
|
README.md
ADDED
@@ -0,0 +1,56 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Description
|
2 |
+
A pre-trained model for volumetric (3D) segmentation of the spleen from CT image.
|
3 |
+
|
4 |
+
# Model Overview
|
5 |
+
This model is trained using the runner-up [1] awarded pipeline of the "Medical Segmentation Decathlon Challenge 2018" using the UNet architecture [2] with 32 training images and 9 validation images.
|
6 |
+
|
7 |
+
## Data
|
8 |
+
The training dataset is Task09_Spleen.tar from http://medicaldecathlon.com/.
|
9 |
+
|
10 |
+
## Training configuration
|
11 |
+
The training was performed with at least 12GB-memory GPUs.
|
12 |
+
|
13 |
+
Actual Model Input: 96 x 96 x 96
|
14 |
+
|
15 |
+
## Input and output formats
|
16 |
+
Input: 1 channel CT image
|
17 |
+
|
18 |
+
Output: 2 channels: Label 1: spleen; Label 0: everything else
|
19 |
+
|
20 |
+
## Scores
|
21 |
+
This model achieves the following Dice score on the validation data (our own split from the training dataset):
|
22 |
+
|
23 |
+
Mean Dice = 0.96
|
24 |
+
|
25 |
+
## commands example
|
26 |
+
Execute training:
|
27 |
+
|
28 |
+
```
|
29 |
+
python -m monai.bundle run training --meta_file configs/metadata.json --config_file configs/train.json --logging_file configs/logging.conf
|
30 |
+
```
|
31 |
+
|
32 |
+
Override the `train` config to execute multi-GPU training:
|
33 |
+
|
34 |
+
```
|
35 |
+
torchrun --standalone --nnodes=1 --nproc_per_node=2 -m monai.bundle run training --meta_file configs/metadata.json --config_file "['configs/train.json','configs/multi_gpu_train.json']" --logging_file configs/logging.conf
|
36 |
+
```
|
37 |
+
|
38 |
+
Override the `train` config to execute evaluation with the trained model:
|
39 |
+
|
40 |
+
```
|
41 |
+
python -m monai.bundle run evaluating --meta_file configs/metadata.json --config_file "['configs/train.json','configs/evaluate.json']" --logging_file configs/logging.conf
|
42 |
+
```
|
43 |
+
|
44 |
+
Execute inference:
|
45 |
+
|
46 |
+
```
|
47 |
+
python -m monai.bundle run evaluating --meta_file configs/metadata.json --config_file configs/inference.json --logging_file configs/logging.conf
|
48 |
+
```
|
49 |
+
|
50 |
+
# Disclaimer
|
51 |
+
This is an example, not to be used for diagnostic purposes.
|
52 |
+
|
53 |
+
# References
|
54 |
+
[1] Xia, Yingda, et al. "3D Semi-Supervised Learning with Uncertainty-Aware Multi-View Co-Training." arXiv preprint arXiv:1811.12506 (2018). https://arxiv.org/abs/1811.12506.
|
55 |
+
|
56 |
+
[2] Kerfoot E., Clough J., Oksuz I., Lee J., King A.P., Schnabel J.A. (2019) Left-Ventricle Quantification Using Residual U-Net. In: Pop M. et al. (eds) Statistical Atlases and Computational Models of the Heart. Atrial Segmentation and LV Quantification Challenges. STACOM 2018. Lecture Notes in Computer Science, vol 11395. Springer, Cham. https://doi.org/10.1007/978-3-030-12029-0_40
|
configs/evaluate.json
ADDED
@@ -0,0 +1,77 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"validate#postprocessing": {
|
3 |
+
"_target_": "Compose",
|
4 |
+
"transforms": [
|
5 |
+
{
|
6 |
+
"_target_": "Activationsd",
|
7 |
+
"keys": "pred",
|
8 |
+
"softmax": true
|
9 |
+
},
|
10 |
+
{
|
11 |
+
"_target_": "Invertd",
|
12 |
+
"keys": [
|
13 |
+
"pred",
|
14 |
+
"label"
|
15 |
+
],
|
16 |
+
"transform": "@validate#preprocessing",
|
17 |
+
"orig_keys": "image",
|
18 |
+
"meta_key_postfix": "meta_dict",
|
19 |
+
"nearest_interp": [
|
20 |
+
false,
|
21 |
+
true
|
22 |
+
],
|
23 |
+
"to_tensor": true
|
24 |
+
},
|
25 |
+
{
|
26 |
+
"_target_": "AsDiscreted",
|
27 |
+
"keys": [
|
28 |
+
"pred",
|
29 |
+
"label"
|
30 |
+
],
|
31 |
+
"argmax": [
|
32 |
+
true,
|
33 |
+
false
|
34 |
+
],
|
35 |
+
"to_onehot": 2
|
36 |
+
},
|
37 |
+
{
|
38 |
+
"_target_": "SaveImaged",
|
39 |
+
"keys": "pred",
|
40 |
+
"meta_keys": "pred_meta_dict",
|
41 |
+
"output_dir": "@output_dir",
|
42 |
+
"resample": false,
|
43 |
+
"squeeze_end_dims": true
|
44 |
+
}
|
45 |
+
]
|
46 |
+
},
|
47 |
+
"validate#handlers": [
|
48 |
+
{
|
49 |
+
"_target_": "CheckpointLoader",
|
50 |
+
"load_path": "$@ckpt_dir + '/model.pt'",
|
51 |
+
"load_dict": {
|
52 |
+
"model": "@network"
|
53 |
+
}
|
54 |
+
},
|
55 |
+
{
|
56 |
+
"_target_": "StatsHandler",
|
57 |
+
"iteration_log": false
|
58 |
+
},
|
59 |
+
{
|
60 |
+
"_target_": "MetricsSaver",
|
61 |
+
"save_dir": "@output_dir",
|
62 |
+
"metrics": [
|
63 |
+
"val_mean_dice",
|
64 |
+
"val_acc"
|
65 |
+
],
|
66 |
+
"metric_details": [
|
67 |
+
"val_mean_dice"
|
68 |
+
],
|
69 |
+
"batch_transform": "$monai.handlers.from_engine(['image_meta_dict'])",
|
70 |
+
"summary_ops": "*"
|
71 |
+
}
|
72 |
+
],
|
73 |
+
"evaluating": [
|
74 |
+
"$setattr(torch.backends.cudnn, 'benchmark', True)",
|
75 |
+
"$@validate#evaluator.run()"
|
76 |
+
]
|
77 |
+
}
|
configs/inference.json
ADDED
@@ -0,0 +1,153 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"imports": [
|
3 |
+
"$import glob",
|
4 |
+
"$import os"
|
5 |
+
],
|
6 |
+
"bundle_root": "/workspace/data/tutorials/modules/bundle/spleen_segmentation",
|
7 |
+
"output_dir": "$@bundle_root + '/eval'",
|
8 |
+
"dataset_dir": "/workspace/data/Task09_Spleen",
|
9 |
+
"datalist": "$list(sorted(glob.glob(@dataset_dir + '/imagesTs/*.nii.gz')))",
|
10 |
+
"device": "$torch.device('cuda:0' if torch.cuda.is_available() else 'cpu')",
|
11 |
+
"network_def": {
|
12 |
+
"_target_": "UNet",
|
13 |
+
"spatial_dims": 3,
|
14 |
+
"in_channels": 1,
|
15 |
+
"out_channels": 2,
|
16 |
+
"channels": [
|
17 |
+
16,
|
18 |
+
32,
|
19 |
+
64,
|
20 |
+
128,
|
21 |
+
256
|
22 |
+
],
|
23 |
+
"strides": [
|
24 |
+
2,
|
25 |
+
2,
|
26 |
+
2,
|
27 |
+
2
|
28 |
+
],
|
29 |
+
"num_res_units": 2,
|
30 |
+
"norm": "batch"
|
31 |
+
},
|
32 |
+
"network": "$@network_def.to(@device)",
|
33 |
+
"preprocessing": {
|
34 |
+
"_target_": "Compose",
|
35 |
+
"transforms": [
|
36 |
+
{
|
37 |
+
"_target_": "LoadImaged",
|
38 |
+
"keys": "image"
|
39 |
+
},
|
40 |
+
{
|
41 |
+
"_target_": "EnsureChannelFirstd",
|
42 |
+
"keys": "image"
|
43 |
+
},
|
44 |
+
{
|
45 |
+
"_target_": "Orientationd",
|
46 |
+
"keys": "image",
|
47 |
+
"axcodes": "RAS"
|
48 |
+
},
|
49 |
+
{
|
50 |
+
"_target_": "Spacingd",
|
51 |
+
"keys": "image",
|
52 |
+
"pixdim": [
|
53 |
+
1.5,
|
54 |
+
1.5,
|
55 |
+
2.0
|
56 |
+
],
|
57 |
+
"mode": "bilinear"
|
58 |
+
},
|
59 |
+
{
|
60 |
+
"_target_": "ScaleIntensityRanged",
|
61 |
+
"keys": "image",
|
62 |
+
"a_min": -57,
|
63 |
+
"a_max": 164,
|
64 |
+
"b_min": 0,
|
65 |
+
"b_max": 1,
|
66 |
+
"clip": true
|
67 |
+
},
|
68 |
+
{
|
69 |
+
"_target_": "EnsureTyped",
|
70 |
+
"keys": "image"
|
71 |
+
}
|
72 |
+
]
|
73 |
+
},
|
74 |
+
"dataset": {
|
75 |
+
"_target_": "Dataset",
|
76 |
+
"data": "$[{'image': i} for i in @datalist]",
|
77 |
+
"transform": "@preprocessing"
|
78 |
+
},
|
79 |
+
"dataloader": {
|
80 |
+
"_target_": "DataLoader",
|
81 |
+
"dataset": "@dataset",
|
82 |
+
"batch_size": 1,
|
83 |
+
"shuffle": false,
|
84 |
+
"num_workers": 4
|
85 |
+
},
|
86 |
+
"inferer": {
|
87 |
+
"_target_": "SlidingWindowInferer",
|
88 |
+
"roi_size": [
|
89 |
+
96,
|
90 |
+
96,
|
91 |
+
96
|
92 |
+
],
|
93 |
+
"sw_batch_size": 4,
|
94 |
+
"overlap": 0.5
|
95 |
+
},
|
96 |
+
"postprocessing": {
|
97 |
+
"_target_": "Compose",
|
98 |
+
"transforms": [
|
99 |
+
{
|
100 |
+
"_target_": "Activationsd",
|
101 |
+
"keys": "pred",
|
102 |
+
"softmax": true
|
103 |
+
},
|
104 |
+
{
|
105 |
+
"_target_": "Invertd",
|
106 |
+
"keys": "pred",
|
107 |
+
"transform": "@preprocessing",
|
108 |
+
"orig_keys": "image",
|
109 |
+
"meta_key_postfix": "meta_dict",
|
110 |
+
"nearest_interp": false,
|
111 |
+
"to_tensor": true
|
112 |
+
},
|
113 |
+
{
|
114 |
+
"_target_": "AsDiscreted",
|
115 |
+
"keys": "pred",
|
116 |
+
"argmax": true
|
117 |
+
},
|
118 |
+
{
|
119 |
+
"_target_": "SaveImaged",
|
120 |
+
"keys": "pred",
|
121 |
+
"meta_keys": "pred_meta_dict",
|
122 |
+
"output_dir": "@output_dir"
|
123 |
+
}
|
124 |
+
]
|
125 |
+
},
|
126 |
+
"handlers": [
|
127 |
+
{
|
128 |
+
"_target_": "CheckpointLoader",
|
129 |
+
"load_path": "$@bundle_root + '/models/model.pt'",
|
130 |
+
"load_dict": {
|
131 |
+
"model": "@network"
|
132 |
+
}
|
133 |
+
},
|
134 |
+
{
|
135 |
+
"_target_": "StatsHandler",
|
136 |
+
"iteration_log": false
|
137 |
+
}
|
138 |
+
],
|
139 |
+
"evaluator": {
|
140 |
+
"_target_": "SupervisedEvaluator",
|
141 |
+
"device": "@device",
|
142 |
+
"val_data_loader": "@dataloader",
|
143 |
+
"network": "@network",
|
144 |
+
"inferer": "@inferer",
|
145 |
+
"postprocessing": "@postprocessing",
|
146 |
+
"val_handlers": "@handlers",
|
147 |
+
"amp": true
|
148 |
+
},
|
149 |
+
"evaluating": [
|
150 |
+
"$setattr(torch.backends.cudnn, 'benchmark', True)",
|
151 |
+
"$@evaluator.run()"
|
152 |
+
]
|
153 |
+
}
|
configs/logging.conf
ADDED
@@ -0,0 +1,21 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
[loggers]
|
2 |
+
keys=root
|
3 |
+
|
4 |
+
[handlers]
|
5 |
+
keys=consoleHandler
|
6 |
+
|
7 |
+
[formatters]
|
8 |
+
keys=fullFormatter
|
9 |
+
|
10 |
+
[logger_root]
|
11 |
+
level=INFO
|
12 |
+
handlers=consoleHandler
|
13 |
+
|
14 |
+
[handler_consoleHandler]
|
15 |
+
class=StreamHandler
|
16 |
+
level=INFO
|
17 |
+
formatter=fullFormatter
|
18 |
+
args=(sys.stdout,)
|
19 |
+
|
20 |
+
[formatter_fullFormatter]
|
21 |
+
format=%(asctime)s - %(name)s - %(levelname)s - %(message)s
|
configs/metadata.json
ADDED
@@ -0,0 +1,78 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"schema": "https://github.com/Project-MONAI/MONAI-extra-test-data/releases/download/0.8.1/meta_schema_20220324.json",
|
3 |
+
"version": "0.1.0",
|
4 |
+
"changelog": {
|
5 |
+
"0.1.0": "complete the model package",
|
6 |
+
"0.0.1": "initialize the model package structure"
|
7 |
+
},
|
8 |
+
"monai_version": "0.9.0",
|
9 |
+
"pytorch_version": "1.10.0",
|
10 |
+
"numpy_version": "1.21.2",
|
11 |
+
"optional_packages_version": {
|
12 |
+
"nibabel": "3.2.1",
|
13 |
+
"pytorch-ignite": "0.4.8"
|
14 |
+
},
|
15 |
+
"task": "Decathlon spleen segmentation",
|
16 |
+
"description": "A pre-trained model for volumetric (3D) segmentation of the spleen from CT image",
|
17 |
+
"authors": "MONAI team",
|
18 |
+
"copyright": "Copyright (c) MONAI Consortium",
|
19 |
+
"data_source": "Task09_Spleen.tar from http://medicaldecathlon.com/",
|
20 |
+
"data_type": "nibabel",
|
21 |
+
"image_classes": "single channel data, intensity scaled to [0, 1]",
|
22 |
+
"label_classes": "single channel data, 1 is spleen, 0 is everything else",
|
23 |
+
"pred_classes": "2 channels OneHot data, channel 1 is spleen, channel 0 is background",
|
24 |
+
"eval_metrics": {
|
25 |
+
"mean_dice": 0.96
|
26 |
+
},
|
27 |
+
"intended_use": "This is an example, not to be used for diagnostic purposes",
|
28 |
+
"references": [
|
29 |
+
"Xia, Yingda, et al. '3D Semi-Supervised Learning with Uncertainty-Aware Multi-View Co-Training. arXiv preprint arXiv:1811.12506 (2018). https://arxiv.org/abs/1811.12506.",
|
30 |
+
"Kerfoot E., Clough J., Oksuz I., Lee J., King A.P., Schnabel J.A. (2019) Left-Ventricle Quantification Using Residual U-Net. In: Pop M. et al. (eds) Statistical Atlases and Computational Models of the Heart. Atrial Segmentation and LV Quantification Challenges. STACOM 2018. Lecture Notes in Computer Science, vol 11395. Springer, Cham. https://doi.org/10.1007/978-3-030-12029-0_40"
|
31 |
+
],
|
32 |
+
"network_data_format": {
|
33 |
+
"inputs": {
|
34 |
+
"image": {
|
35 |
+
"type": "image",
|
36 |
+
"format": "hounsfield",
|
37 |
+
"modality": "CT",
|
38 |
+
"num_channels": 1,
|
39 |
+
"spatial_shape": [
|
40 |
+
96,
|
41 |
+
96,
|
42 |
+
96
|
43 |
+
],
|
44 |
+
"dtype": "float32",
|
45 |
+
"value_range": [
|
46 |
+
0,
|
47 |
+
1
|
48 |
+
],
|
49 |
+
"is_patch_data": true,
|
50 |
+
"channel_def": {
|
51 |
+
"0": "image"
|
52 |
+
}
|
53 |
+
}
|
54 |
+
},
|
55 |
+
"outputs": {
|
56 |
+
"pred": {
|
57 |
+
"type": "image",
|
58 |
+
"format": "segmentation",
|
59 |
+
"num_channels": 2,
|
60 |
+
"spatial_shape": [
|
61 |
+
96,
|
62 |
+
96,
|
63 |
+
96
|
64 |
+
],
|
65 |
+
"dtype": "float32",
|
66 |
+
"value_range": [
|
67 |
+
0,
|
68 |
+
1
|
69 |
+
],
|
70 |
+
"is_patch_data": true,
|
71 |
+
"channel_def": {
|
72 |
+
"0": "background",
|
73 |
+
"1": "spleen"
|
74 |
+
}
|
75 |
+
}
|
76 |
+
}
|
77 |
+
}
|
78 |
+
}
|
configs/multi_gpu_train.json
ADDED
@@ -0,0 +1,36 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"device": "$torch.device(f'cuda:{dist.get_rank()}')",
|
3 |
+
"network": {
|
4 |
+
"_target_": "torch.nn.parallel.DistributedDataParallel",
|
5 |
+
"module": "$@network_def.to(@device)",
|
6 |
+
"device_ids": [
|
7 |
+
"@device"
|
8 |
+
]
|
9 |
+
},
|
10 |
+
"train#sampler": {
|
11 |
+
"_target_": "DistributedSampler",
|
12 |
+
"dataset": "@train#dataset",
|
13 |
+
"even_divisible": true,
|
14 |
+
"shuffle": true
|
15 |
+
},
|
16 |
+
"train#dataloader#sampler": "@train#sampler",
|
17 |
+
"train#dataloader#shuffle": false,
|
18 |
+
"train#trainer#train_handlers": "$@train#handlers[: -2 if dist.get_rank() > 0 else None]",
|
19 |
+
"validate#sampler": {
|
20 |
+
"_target_": "DistributedSampler",
|
21 |
+
"dataset": "@validate#dataset",
|
22 |
+
"even_divisible": false,
|
23 |
+
"shuffle": false
|
24 |
+
},
|
25 |
+
"validate#dataloader#sampler": "@validate#sampler",
|
26 |
+
"validate#evaluator#val_handlers": "$None if dist.get_rank() > 0 else @validate#handlers",
|
27 |
+
"training": [
|
28 |
+
"$import torch.distributed as dist",
|
29 |
+
"$dist.init_process_group(backend='nccl')",
|
30 |
+
"$torch.cuda.set_device(@device)",
|
31 |
+
"$monai.utils.set_determinism(seed=123)",
|
32 |
+
"$setattr(torch.backends.cudnn, 'benchmark', True)",
|
33 |
+
"$@train#trainer.run()",
|
34 |
+
"$dist.destroy_process_group()"
|
35 |
+
]
|
36 |
+
}
|
configs/train.json
ADDED
@@ -0,0 +1,288 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"imports": [
|
3 |
+
"$import glob",
|
4 |
+
"$import os",
|
5 |
+
"$import ignite"
|
6 |
+
],
|
7 |
+
"bundle_root": "/workspace/data/tutorials/modules/bundle/spleen_segmentation",
|
8 |
+
"ckpt_dir": "$@bundle_root + '/models'",
|
9 |
+
"output_dir": "$@bundle_root + '/eval'",
|
10 |
+
"dataset_dir": "/workspace/data/Task09_Spleen",
|
11 |
+
"images": "$list(sorted(glob.glob(@dataset_dir + '/imagesTr/*.nii.gz')))",
|
12 |
+
"labels": "$list(sorted(glob.glob(@dataset_dir + '/labelsTr/*.nii.gz')))",
|
13 |
+
"device": "$torch.device('cuda:0' if torch.cuda.is_available() else 'cpu')",
|
14 |
+
"network_def": {
|
15 |
+
"_target_": "UNet",
|
16 |
+
"spatial_dims": 3,
|
17 |
+
"in_channels": 1,
|
18 |
+
"out_channels": 2,
|
19 |
+
"channels": [
|
20 |
+
16,
|
21 |
+
32,
|
22 |
+
64,
|
23 |
+
128,
|
24 |
+
256
|
25 |
+
],
|
26 |
+
"strides": [
|
27 |
+
2,
|
28 |
+
2,
|
29 |
+
2,
|
30 |
+
2
|
31 |
+
],
|
32 |
+
"num_res_units": 2,
|
33 |
+
"norm": "batch"
|
34 |
+
},
|
35 |
+
"network": "$@network_def.to(@device)",
|
36 |
+
"loss": {
|
37 |
+
"_target_": "DiceCELoss",
|
38 |
+
"to_onehot_y": true,
|
39 |
+
"softmax": true,
|
40 |
+
"squared_pred": true,
|
41 |
+
"batch": true
|
42 |
+
},
|
43 |
+
"optimizer": {
|
44 |
+
"_target_": "torch.optim.Adam",
|
45 |
+
"params": "$@network.parameters()",
|
46 |
+
"lr": 0.0001
|
47 |
+
},
|
48 |
+
"train": {
|
49 |
+
"deterministic_transforms": [
|
50 |
+
{
|
51 |
+
"_target_": "LoadImaged",
|
52 |
+
"keys": [
|
53 |
+
"image",
|
54 |
+
"label"
|
55 |
+
]
|
56 |
+
},
|
57 |
+
{
|
58 |
+
"_target_": "EnsureChannelFirstd",
|
59 |
+
"keys": [
|
60 |
+
"image",
|
61 |
+
"label"
|
62 |
+
]
|
63 |
+
},
|
64 |
+
{
|
65 |
+
"_target_": "Orientationd",
|
66 |
+
"keys": [
|
67 |
+
"image",
|
68 |
+
"label"
|
69 |
+
],
|
70 |
+
"axcodes": "RAS"
|
71 |
+
},
|
72 |
+
{
|
73 |
+
"_target_": "Spacingd",
|
74 |
+
"keys": [
|
75 |
+
"image",
|
76 |
+
"label"
|
77 |
+
],
|
78 |
+
"pixdim": [
|
79 |
+
1.5,
|
80 |
+
1.5,
|
81 |
+
2.0
|
82 |
+
],
|
83 |
+
"mode": [
|
84 |
+
"bilinear",
|
85 |
+
"nearest"
|
86 |
+
]
|
87 |
+
},
|
88 |
+
{
|
89 |
+
"_target_": "ScaleIntensityRanged",
|
90 |
+
"keys": "image",
|
91 |
+
"a_min": -57,
|
92 |
+
"a_max": 164,
|
93 |
+
"b_min": 0,
|
94 |
+
"b_max": 1,
|
95 |
+
"clip": true
|
96 |
+
},
|
97 |
+
{
|
98 |
+
"_target_": "EnsureTyped",
|
99 |
+
"keys": [
|
100 |
+
"image",
|
101 |
+
"label"
|
102 |
+
]
|
103 |
+
}
|
104 |
+
],
|
105 |
+
"random_transforms": [
|
106 |
+
{
|
107 |
+
"_target_": "RandCropByPosNegLabeld",
|
108 |
+
"keys": [
|
109 |
+
"image",
|
110 |
+
"label"
|
111 |
+
],
|
112 |
+
"label_key": "label",
|
113 |
+
"spatial_size": [
|
114 |
+
96,
|
115 |
+
96,
|
116 |
+
96
|
117 |
+
],
|
118 |
+
"pos": 1,
|
119 |
+
"neg": 1,
|
120 |
+
"num_samples": 4,
|
121 |
+
"image_key": "image",
|
122 |
+
"image_threshold": 0
|
123 |
+
}
|
124 |
+
],
|
125 |
+
"preprocessing": {
|
126 |
+
"_target_": "Compose",
|
127 |
+
"transforms": "$@train#deterministic_transforms + @train#random_transforms"
|
128 |
+
},
|
129 |
+
"dataset": {
|
130 |
+
"_target_": "CacheDataset",
|
131 |
+
"data": "$[{'image': i, 'label': l} for i, l in zip(@images[:-9], @labels[:-9])]",
|
132 |
+
"transform": "@train#preprocessing",
|
133 |
+
"cache_rate": 1.0,
|
134 |
+
"num_workers": 4
|
135 |
+
},
|
136 |
+
"dataloader": {
|
137 |
+
"_target_": "DataLoader",
|
138 |
+
"dataset": "@train#dataset",
|
139 |
+
"batch_size": 2,
|
140 |
+
"shuffle": true,
|
141 |
+
"num_workers": 4
|
142 |
+
},
|
143 |
+
"inferer": {
|
144 |
+
"_target_": "SimpleInferer"
|
145 |
+
},
|
146 |
+
"postprocessing": {
|
147 |
+
"_target_": "Compose",
|
148 |
+
"transforms": [
|
149 |
+
{
|
150 |
+
"_target_": "Activationsd",
|
151 |
+
"keys": "pred",
|
152 |
+
"softmax": true
|
153 |
+
},
|
154 |
+
{
|
155 |
+
"_target_": "AsDiscreted",
|
156 |
+
"keys": [
|
157 |
+
"pred",
|
158 |
+
"label"
|
159 |
+
],
|
160 |
+
"argmax": [
|
161 |
+
true,
|
162 |
+
false
|
163 |
+
],
|
164 |
+
"to_onehot": 2
|
165 |
+
}
|
166 |
+
]
|
167 |
+
},
|
168 |
+
"handlers": [
|
169 |
+
{
|
170 |
+
"_target_": "ValidationHandler",
|
171 |
+
"validator": "@validate#evaluator",
|
172 |
+
"epoch_level": true,
|
173 |
+
"interval": 5
|
174 |
+
},
|
175 |
+
{
|
176 |
+
"_target_": "StatsHandler",
|
177 |
+
"tag_name": "train_loss",
|
178 |
+
"output_transform": "$monai.handlers.from_engine(['loss'], first=True)"
|
179 |
+
},
|
180 |
+
{
|
181 |
+
"_target_": "TensorBoardStatsHandler",
|
182 |
+
"log_dir": "@output_dir",
|
183 |
+
"tag_name": "train_loss",
|
184 |
+
"output_transform": "$monai.handlers.from_engine(['loss'], first=True)"
|
185 |
+
}
|
186 |
+
],
|
187 |
+
"key_metric": {
|
188 |
+
"train_accuracy": {
|
189 |
+
"_target_": "ignite.metrics.Accuracy",
|
190 |
+
"output_transform": "$monai.handlers.from_engine(['pred', 'label'])"
|
191 |
+
}
|
192 |
+
},
|
193 |
+
"trainer": {
|
194 |
+
"_target_": "SupervisedTrainer",
|
195 |
+
"max_epochs": 100,
|
196 |
+
"device": "@device",
|
197 |
+
"train_data_loader": "@train#dataloader",
|
198 |
+
"network": "@network",
|
199 |
+
"loss_function": "@loss",
|
200 |
+
"optimizer": "@optimizer",
|
201 |
+
"inferer": "@train#inferer",
|
202 |
+
"postprocessing": "@train#postprocessing",
|
203 |
+
"key_train_metric": "@train#key_metric",
|
204 |
+
"train_handlers": "@train#handlers",
|
205 |
+
"amp": true
|
206 |
+
}
|
207 |
+
},
|
208 |
+
"validate": {
|
209 |
+
"preprocessing": {
|
210 |
+
"_target_": "Compose",
|
211 |
+
"transforms": "%train#deterministic_transforms"
|
212 |
+
},
|
213 |
+
"dataset": {
|
214 |
+
"_target_": "CacheDataset",
|
215 |
+
"data": "$[{'image': i, 'label': l} for i, l in zip(@images[-9:], @labels[-9:])]",
|
216 |
+
"transform": "@validate#preprocessing",
|
217 |
+
"cache_rate": 1.0
|
218 |
+
},
|
219 |
+
"dataloader": {
|
220 |
+
"_target_": "DataLoader",
|
221 |
+
"dataset": "@validate#dataset",
|
222 |
+
"batch_size": 1,
|
223 |
+
"shuffle": false,
|
224 |
+
"num_workers": 4
|
225 |
+
},
|
226 |
+
"inferer": {
|
227 |
+
"_target_": "SlidingWindowInferer",
|
228 |
+
"roi_size": [
|
229 |
+
96,
|
230 |
+
96,
|
231 |
+
96
|
232 |
+
],
|
233 |
+
"sw_batch_size": 4,
|
234 |
+
"overlap": 0.5
|
235 |
+
},
|
236 |
+
"postprocessing": "%train#postprocessing",
|
237 |
+
"handlers": [
|
238 |
+
{
|
239 |
+
"_target_": "StatsHandler",
|
240 |
+
"iteration_log": false
|
241 |
+
},
|
242 |
+
{
|
243 |
+
"_target_": "TensorBoardStatsHandler",
|
244 |
+
"log_dir": "@output_dir",
|
245 |
+
"iteration_log": false
|
246 |
+
},
|
247 |
+
{
|
248 |
+
"_target_": "CheckpointSaver",
|
249 |
+
"save_dir": "@ckpt_dir",
|
250 |
+
"save_dict": {
|
251 |
+
"model": "@network"
|
252 |
+
},
|
253 |
+
"save_key_metric": true,
|
254 |
+
"key_metric_filename": "model.pt"
|
255 |
+
}
|
256 |
+
],
|
257 |
+
"key_metric": {
|
258 |
+
"val_mean_dice": {
|
259 |
+
"_target_": "MeanDice",
|
260 |
+
"include_background": false,
|
261 |
+
"output_transform": "$monai.handlers.from_engine(['pred', 'label'])"
|
262 |
+
}
|
263 |
+
},
|
264 |
+
"additional_metrics": {
|
265 |
+
"val_accuracy": {
|
266 |
+
"_target_": "ignite.metrics.Accuracy",
|
267 |
+
"output_transform": "$monai.handlers.from_engine(['pred', 'label'])"
|
268 |
+
}
|
269 |
+
},
|
270 |
+
"evaluator": {
|
271 |
+
"_target_": "SupervisedEvaluator",
|
272 |
+
"device": "@device",
|
273 |
+
"val_data_loader": "@validate#dataloader",
|
274 |
+
"network": "@network",
|
275 |
+
"inferer": "@validate#inferer",
|
276 |
+
"postprocessing": "@validate#postprocessing",
|
277 |
+
"key_val_metric": "@validate#key_metric",
|
278 |
+
"additional_metrics": "@validate#additional_metrics",
|
279 |
+
"val_handlers": "@validate#handlers",
|
280 |
+
"amp": true
|
281 |
+
}
|
282 |
+
},
|
283 |
+
"training": [
|
284 |
+
"$monai.utils.set_determinism(seed=123)",
|
285 |
+
"$setattr(torch.backends.cudnn, 'benchmark', True)",
|
286 |
+
"$@train#trainer.run()"
|
287 |
+
]
|
288 |
+
}
|
docs/README.md
ADDED
@@ -0,0 +1,56 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Description
|
2 |
+
A pre-trained model for volumetric (3D) segmentation of the spleen from CT image.
|
3 |
+
|
4 |
+
# Model Overview
|
5 |
+
This model is trained using the runner-up [1] awarded pipeline of the "Medical Segmentation Decathlon Challenge 2018" using the UNet architecture [2] with 32 training images and 9 validation images.
|
6 |
+
|
7 |
+
## Data
|
8 |
+
The training dataset is Task09_Spleen.tar from http://medicaldecathlon.com/.
|
9 |
+
|
10 |
+
## Training configuration
|
11 |
+
The training was performed with at least 12GB-memory GPUs.
|
12 |
+
|
13 |
+
Actual Model Input: 96 x 96 x 96
|
14 |
+
|
15 |
+
## Input and output formats
|
16 |
+
Input: 1 channel CT image
|
17 |
+
|
18 |
+
Output: 2 channels: Label 1: spleen; Label 0: everything else
|
19 |
+
|
20 |
+
## Scores
|
21 |
+
This model achieves the following Dice score on the validation data (our own split from the training dataset):
|
22 |
+
|
23 |
+
Mean Dice = 0.96
|
24 |
+
|
25 |
+
## commands example
|
26 |
+
Execute training:
|
27 |
+
|
28 |
+
```
|
29 |
+
python -m monai.bundle run training --meta_file configs/metadata.json --config_file configs/train.json --logging_file configs/logging.conf
|
30 |
+
```
|
31 |
+
|
32 |
+
Override the `train` config to execute multi-GPU training:
|
33 |
+
|
34 |
+
```
|
35 |
+
torchrun --standalone --nnodes=1 --nproc_per_node=2 -m monai.bundle run training --meta_file configs/metadata.json --config_file "['configs/train.json','configs/multi_gpu_train.json']" --logging_file configs/logging.conf
|
36 |
+
```
|
37 |
+
|
38 |
+
Override the `train` config to execute evaluation with the trained model:
|
39 |
+
|
40 |
+
```
|
41 |
+
python -m monai.bundle run evaluating --meta_file configs/metadata.json --config_file "['configs/train.json','configs/evaluate.json']" --logging_file configs/logging.conf
|
42 |
+
```
|
43 |
+
|
44 |
+
Execute inference:
|
45 |
+
|
46 |
+
```
|
47 |
+
python -m monai.bundle run evaluating --meta_file configs/metadata.json --config_file configs/inference.json --logging_file configs/logging.conf
|
48 |
+
```
|
49 |
+
|
50 |
+
# Disclaimer
|
51 |
+
This is an example, not to be used for diagnostic purposes.
|
52 |
+
|
53 |
+
# References
|
54 |
+
[1] Xia, Yingda, et al. "3D Semi-Supervised Learning with Uncertainty-Aware Multi-View Co-Training." arXiv preprint arXiv:1811.12506 (2018). https://arxiv.org/abs/1811.12506.
|
55 |
+
|
56 |
+
[2] Kerfoot E., Clough J., Oksuz I., Lee J., King A.P., Schnabel J.A. (2019) Left-Ventricle Quantification Using Residual U-Net. In: Pop M. et al. (eds) Statistical Atlases and Computational Models of the Heart. Atrial Segmentation and LV Quantification Challenges. STACOM 2018. Lecture Notes in Computer Science, vol 11395. Springer, Cham. https://doi.org/10.1007/978-3-030-12029-0_40
|
docs/license.txt
ADDED
@@ -0,0 +1,6 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
Third Party Licenses
|
2 |
+
-----------------------------------------------------------------------
|
3 |
+
|
4 |
+
/*********************************************************************/
|
5 |
+
i. Medical Segmentation Decathlon
|
6 |
+
http://medicaldecathlon.com/
|
models/model.pt
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:aeb453bda5be3653f3eec5795de5c5435c41e4b712e7d39e2d44f2461aab7ac8
|
3 |
+
size 19303897
|
models/model.ts
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:1bfeacbda35620f7a8edd7a5b75dc34255a234bb516dfd5c8df1408191c5159a
|
3 |
+
size 19398019
|