farrell236
commited on
Commit
•
0db6a31
1
Parent(s):
29935ef
Upload Model
Browse files- .gitattributes +5 -0
- Dockerfile +13 -0
- README.md +153 -0
- inference.sh +9 -0
- nnUNet_preprocessed/.gitkeep +0 -0
- nnUNet_raw_data_base/.gitkeep +0 -0
- nnUNet_trained_models/nnUNet/3d_fullres/Task505_TCGA-OV/nnUNetTrainerV2__nnUNetPlansv2.1/fold_0/debug.json +87 -0
- nnUNet_trained_models/nnUNet/3d_fullres/Task505_TCGA-OV/nnUNetTrainerV2__nnUNetPlansv2.1/fold_0/model_final_checkpoint.model +3 -0
- nnUNet_trained_models/nnUNet/3d_fullres/Task505_TCGA-OV/nnUNetTrainerV2__nnUNetPlansv2.1/fold_0/model_final_checkpoint.model.pkl +3 -0
- nnUNet_trained_models/nnUNet/3d_fullres/Task505_TCGA-OV/nnUNetTrainerV2__nnUNetPlansv2.1/fold_0/progress.png +3 -0
- nnUNet_trained_models/nnUNet/3d_fullres/Task505_TCGA-OV/nnUNetTrainerV2__nnUNetPlansv2.1/fold_1/debug.json +87 -0
- nnUNet_trained_models/nnUNet/3d_fullres/Task505_TCGA-OV/nnUNetTrainerV2__nnUNetPlansv2.1/fold_1/model_final_checkpoint.model +3 -0
- nnUNet_trained_models/nnUNet/3d_fullres/Task505_TCGA-OV/nnUNetTrainerV2__nnUNetPlansv2.1/fold_1/model_final_checkpoint.model.pkl +3 -0
- nnUNet_trained_models/nnUNet/3d_fullres/Task505_TCGA-OV/nnUNetTrainerV2__nnUNetPlansv2.1/fold_1/progress.png +3 -0
- nnUNet_trained_models/nnUNet/3d_fullres/Task505_TCGA-OV/nnUNetTrainerV2__nnUNetPlansv2.1/fold_2/model_final_checkpoint.model +3 -0
- nnUNet_trained_models/nnUNet/3d_fullres/Task505_TCGA-OV/nnUNetTrainerV2__nnUNetPlansv2.1/fold_2/model_final_checkpoint.model.pkl +3 -0
- nnUNet_trained_models/nnUNet/3d_fullres/Task505_TCGA-OV/nnUNetTrainerV2__nnUNetPlansv2.1/fold_2/progress.png +3 -0
- nnUNet_trained_models/nnUNet/3d_fullres/Task505_TCGA-OV/nnUNetTrainerV2__nnUNetPlansv2.1/fold_3/model_final_checkpoint.model +3 -0
- nnUNet_trained_models/nnUNet/3d_fullres/Task505_TCGA-OV/nnUNetTrainerV2__nnUNetPlansv2.1/fold_3/model_final_checkpoint.model.pkl +3 -0
- nnUNet_trained_models/nnUNet/3d_fullres/Task505_TCGA-OV/nnUNetTrainerV2__nnUNetPlansv2.1/fold_3/progress.png +3 -0
- nnUNet_trained_models/nnUNet/3d_fullres/Task505_TCGA-OV/nnUNetTrainerV2__nnUNetPlansv2.1/fold_4/model_final_checkpoint.model +3 -0
- nnUNet_trained_models/nnUNet/3d_fullres/Task505_TCGA-OV/nnUNetTrainerV2__nnUNetPlansv2.1/fold_4/model_final_checkpoint.model.pkl +3 -0
- nnUNet_trained_models/nnUNet/3d_fullres/Task505_TCGA-OV/nnUNetTrainerV2__nnUNetPlansv2.1/fold_4/progress.png +3 -0
- nnUNet_trained_models/nnUNet/3d_fullres/Task505_TCGA-OV/nnUNetTrainerV2__nnUNetPlansv2.1/plans.pkl +3 -0
- nnunet_predict.py +52 -0
.gitattributes
CHANGED
@@ -32,3 +32,8 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
|
|
32 |
*.zip filter=lfs diff=lfs merge=lfs -text
|
33 |
*.zst filter=lfs diff=lfs merge=lfs -text
|
34 |
*tfevents* filter=lfs diff=lfs merge=lfs -text
|
|
|
|
|
|
|
|
|
|
|
|
32 |
*.zip filter=lfs diff=lfs merge=lfs -text
|
33 |
*.zst filter=lfs diff=lfs merge=lfs -text
|
34 |
*tfevents* filter=lfs diff=lfs merge=lfs -text
|
35 |
+
nnUNet_trained_models/nnUNet/3d_fullres/Task505_TCGA-OV/nnUNetTrainerV2__nnUNetPlansv2.1/fold_0/progress.png filter=lfs diff=lfs merge=lfs -text
|
36 |
+
nnUNet_trained_models/nnUNet/3d_fullres/Task505_TCGA-OV/nnUNetTrainerV2__nnUNetPlansv2.1/fold_1/progress.png filter=lfs diff=lfs merge=lfs -text
|
37 |
+
nnUNet_trained_models/nnUNet/3d_fullres/Task505_TCGA-OV/nnUNetTrainerV2__nnUNetPlansv2.1/fold_2/progress.png filter=lfs diff=lfs merge=lfs -text
|
38 |
+
nnUNet_trained_models/nnUNet/3d_fullres/Task505_TCGA-OV/nnUNetTrainerV2__nnUNetPlansv2.1/fold_3/progress.png filter=lfs diff=lfs merge=lfs -text
|
39 |
+
nnUNet_trained_models/nnUNet/3d_fullres/Task505_TCGA-OV/nnUNetTrainerV2__nnUNetPlansv2.1/fold_4/progress.png filter=lfs diff=lfs merge=lfs -text
|
Dockerfile
ADDED
@@ -0,0 +1,13 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
FROM nvidia/cuda:11.6.2-base-ubuntu20.04
|
2 |
+
|
3 |
+
RUN apt update
|
4 |
+
RUN apt upgrade -y
|
5 |
+
RUN apt install -y python3 python3-pip
|
6 |
+
RUN pip install torch torchvision torchaudio nnunet matplotlib
|
7 |
+
|
8 |
+
ENV RESULTS_FOLDER="/root/results"
|
9 |
+
|
10 |
+
COPY inference.sh inference.sh
|
11 |
+
COPY nnUNet_trained_models /root/results
|
12 |
+
|
13 |
+
RUN chmod +x inference.sh
|
README.md
CHANGED
@@ -1,3 +1,156 @@
|
|
1 |
---
|
2 |
license: mit
|
3 |
---
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
---
|
2 |
license: mit
|
3 |
---
|
4 |
+
|
5 |
+
# Ascites Segmentation with nnUNet
|
6 |
+
|
7 |
+
## Method 1: Run Inference using `nnunet_predict.py`
|
8 |
+
|
9 |
+
1. Install the latest version of [nnUNet](https://github.com/MIC-DKFZ/nnUNet#installation) and [PyTorch](https://pytorch.org/get-started/locally/).
|
10 |
+
|
11 |
+
```shell
|
12 |
+
user@machine:~/ascites_segmentation$ pip install torch torchvision torchaudio nnunet matplotlib
|
13 |
+
```
|
14 |
+
|
15 |
+
2. Run inference with command:
|
16 |
+
|
17 |
+
```shell
|
18 |
+
user@machine:~/ascites_segmentation$ python nnunet_predict.py -i file_list.txt -t TMP_DIR -o OUTPUT_FOLDER -m /path/to/nnunet/model_weights
|
19 |
+
```
|
20 |
+
|
21 |
+
```shell
|
22 |
+
usage: tmp.py [-h] [-i INPUT_LIST] -t TMP_FOLDER -o OUTPUT_FOLDER -m MODEL [-v]
|
23 |
+
|
24 |
+
Inference using nnU-Net predict_from_folder Python API
|
25 |
+
|
26 |
+
optional arguments:
|
27 |
+
-h, --help show this help message and exit
|
28 |
+
-i INPUT_LIST, --input_list INPUT_LIST
|
29 |
+
Input image file_list.txt
|
30 |
+
-t TMP_FOLDER, --tmp_folder TMP_FOLDER
|
31 |
+
Temporary folder
|
32 |
+
-o OUTPUT_FOLDER, --output_folder OUTPUT_FOLDER
|
33 |
+
Output Segmentation folder
|
34 |
+
-m MODEL, --model MODEL
|
35 |
+
Trained Model
|
36 |
+
-v, --verbose Verbose Output
|
37 |
+
```
|
38 |
+
|
39 |
+
|
40 |
+
|
41 |
+
N.B.
|
42 |
+
- `model_weights` folder should contain `fold0`, `fold1`, etc...
|
43 |
+
- WARNING: the program will try to create file links first, but will fallback to filecopy if fails
|
44 |
+
|
45 |
+
|
46 |
+
## Method 2: Run Inference using `nnUNet_predict` from shell
|
47 |
+
|
48 |
+
1. Install the latest version of [nnUNet](https://github.com/MIC-DKFZ/nnUNet#installation) and [PyTorch](https://pytorch.org/get-started/locally/).
|
49 |
+
|
50 |
+
```shell
|
51 |
+
user@machine:~/ascites_segmentation$ pip install torch torchvision torchaudio nnunet matplotlib
|
52 |
+
```
|
53 |
+
|
54 |
+
2. Place checkpoints in directory tree:
|
55 |
+
|
56 |
+
```shell
|
57 |
+
user@machine:~/ascites_segmentation$ tree .
|
58 |
+
.
|
59 |
+
├── nnUNet_preprocessed
|
60 |
+
├── nnUNet_raw_data_base
|
61 |
+
└── nnUNet_trained_models
|
62 |
+
└── nnUNet
|
63 |
+
└── 3d_fullres
|
64 |
+
└── Task505_TCGA-OV
|
65 |
+
└── nnUNetTrainerV2__nnUNetPlansv2.1
|
66 |
+
├── fold_0
|
67 |
+
│ ├── debug.json
|
68 |
+
│ ├── model_final_checkpoint.model
|
69 |
+
│ ├── model_final_checkpoint.model.pkl
|
70 |
+
│ └── progress.png
|
71 |
+
├── fold_1
|
72 |
+
│ ├── debug.json
|
73 |
+
│ ├── model_final_checkpoint.model
|
74 |
+
│ ├── model_final_checkpoint.model.pkl
|
75 |
+
│ └── progress.png
|
76 |
+
├── fold_2
|
77 |
+
│ ├── model_final_checkpoint.model
|
78 |
+
│ ├── model_final_checkpoint.model.pkl
|
79 |
+
│ └── progress.png
|
80 |
+
├── fold_3
|
81 |
+
│ ├── model_final_checkpoint.model
|
82 |
+
│ ├── model_final_checkpoint.model.pkl
|
83 |
+
│ └── progress.png
|
84 |
+
├── fold_4
|
85 |
+
│ ├── model_final_checkpoint.model
|
86 |
+
│ ├── model_final_checkpoint.model.pkl
|
87 |
+
│ └── progress.png
|
88 |
+
└── plans.pkl
|
89 |
+
```
|
90 |
+
|
91 |
+
3. Setup environment variables so that nnU-Net knows where to find trained models:
|
92 |
+
|
93 |
+
```shell
|
94 |
+
user@machine:~/ascites_segmentation$ export nnUNet_raw_data_base="/absolute/path/to/nnUNet_raw_data_base"
|
95 |
+
user@machine:~/ascites_segmentation$ export nnUNet_preprocessed="/absolute/path/to/nnUNet_preprocessed"
|
96 |
+
user@machine:~/ascites_segmentation$ export RESULTS_FOLDER="/absolute/path/to/nnUNet_trained_models"
|
97 |
+
```
|
98 |
+
|
99 |
+
4. Run inference with command:
|
100 |
+
|
101 |
+
```shell
|
102 |
+
user@machine:~/ascites_segmentation$ nnUNet_predict -i INPUT_FOLDER -o OUTPUT_FOLDER -t 505 -m 3d_fullres -f N --save_npz
|
103 |
+
```
|
104 |
+
|
105 |
+
where:
|
106 |
+
- `-i`: input folder of `.nii.gz` scans to predict. NB, filename needs to end with `_0000.nii.gz` to tell nnU-Net only one kind of modality
|
107 |
+
- `-o`: output folder to store predicted segmentations, automatically created if not exist
|
108 |
+
- `-t 505`: (do not change) Ascites pretrained model name
|
109 |
+
- `-m 3d_fullres` (do not change) Ascites pretrained model name
|
110 |
+
- `N`: Ascites pretrained model fold, can be `[0, 1, 2, 3, 4]`
|
111 |
+
- `--save_npz`: save softmax scores, required for ensembling multiple folds
|
112 |
+
|
113 |
+
### Optional [Additional] Inference Steps
|
114 |
+
|
115 |
+
a. use `nnUNet_find_best_configuration` to automatically get the inference commands needed to run the trained model on data.
|
116 |
+
|
117 |
+
b. ensemble predictions using `nnUNet_ensemble` by running:
|
118 |
+
|
119 |
+
```shell
|
120 |
+
user@machine:~/ascites_segmentation$ nnUNet_ensemble -f FOLDER1 FOLDER2 ... -o OUTPUT_FOLDER -pp POSTPROCESSING_FILE
|
121 |
+
```
|
122 |
+
|
123 |
+
where `FOLDER1` and `FOLDER2` are predicted outputs by nnUNet (requires `--save_npz` when running `nnUNet_predict`).
|
124 |
+
|
125 |
+
## Method 3: Docker Inference
|
126 |
+
|
127 |
+
Requires `nvidia-docker` to be installed on the system ([Installation Guide](https://docs.nvidia.com/datacenter/cloud-native/container-toolkit/install-guide.html)). This `nnunet_docker` predicts ascites with all 5 trained folds and ensembles output to a single prediction.
|
128 |
+
|
129 |
+
1. Build the `nnunet_docker` image from `Dockerfile`:
|
130 |
+
|
131 |
+
```shell
|
132 |
+
user@machine:~/ascites_segmentation$ sudo docker build -t nnunet_docker .
|
133 |
+
```
|
134 |
+
|
135 |
+
2. Run docker image on test volumes:
|
136 |
+
|
137 |
+
```shell
|
138 |
+
user@machine:~/ascites_segmentation$ sudo docker run \
|
139 |
+
--gpus 0 \
|
140 |
+
--volume /absolute/path/to/INPUT_FOLDER:/tmp/INPUT_FOLDER \
|
141 |
+
--volume /absolute/path/to/OUTPUT_FOLDER:/tmp/OUTPUT_FOLDER \
|
142 |
+
nnunet_docker /bin/sh inference.sh
|
143 |
+
```
|
144 |
+
|
145 |
+
|
146 |
+
|
147 |
+
- `--gpus` parameter:
|
148 |
+
- `0, 1, 2, ..., n` for integer number of GPUs
|
149 |
+
- `all` for all available GPUs on the system
|
150 |
+
- `'"device=2,3"'` for specific GPU with ID
|
151 |
+
|
152 |
+
- `--volume` parameter
|
153 |
+
- `/absolute/path/to/INPUT_FOLDER` and `/absolute/path/to/OUTPUT_FOLDER` folders on the host system needs to be specified
|
154 |
+
- `INPUT_FOLDER` contains all `.nii.gz` volumes to be predicted
|
155 |
+
- predicted results will be written to `OUTPUT_FOLDER`
|
156 |
+
|
inference.sh
ADDED
@@ -0,0 +1,9 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#! /bin/bash
|
2 |
+
|
3 |
+
nnUNet_predict -i /tmp/INPUT_FOLDER/ -o output_fold_0 -t 505 -m 3d_fullres -f 0 --save_npz
|
4 |
+
nnUNet_predict -i /tmp/INPUT_FOLDER/ -o output_fold_1 -t 505 -m 3d_fullres -f 1 --save_npz
|
5 |
+
nnUNet_predict -i /tmp/INPUT_FOLDER/ -o output_fold_2 -t 505 -m 3d_fullres -f 2 --save_npz
|
6 |
+
nnUNet_predict -i /tmp/INPUT_FOLDER/ -o output_fold_3 -t 505 -m 3d_fullres -f 3 --save_npz
|
7 |
+
nnUNet_predict -i /tmp/INPUT_FOLDER/ -o output_fold_4 -t 505 -m 3d_fullres -f 4 --save_npz
|
8 |
+
|
9 |
+
nnUNet_ensemble -f output_fold_0 output_fold_1 output_fold_2 output_fold_3 output_fold_4 -o /tmp/OUTPUT_FOLDER/
|
nnUNet_preprocessed/.gitkeep
ADDED
File without changes
|
nnUNet_raw_data_base/.gitkeep
ADDED
File without changes
|
nnUNet_trained_models/nnUNet/3d_fullres/Task505_TCGA-OV/nnUNetTrainerV2__nnUNetPlansv2.1/fold_0/debug.json
ADDED
@@ -0,0 +1,87 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"all_tr_losses": "[]",
|
3 |
+
"all_val_eval_metrics": "[]",
|
4 |
+
"all_val_losses": "[]",
|
5 |
+
"all_val_losses_tr_mode": "[]",
|
6 |
+
"also_val_in_tr_mode": "False",
|
7 |
+
"amp_grad_scaler": "None",
|
8 |
+
"base_num_features": "32",
|
9 |
+
"basic_generator_patch_size": "[ 32 263 263]",
|
10 |
+
"batch_dice": "True",
|
11 |
+
"batch_size": "2",
|
12 |
+
"best_MA_tr_loss_for_patience": "None",
|
13 |
+
"best_epoch_based_on_MA_tr_loss": "None",
|
14 |
+
"best_val_eval_criterion_MA": "None",
|
15 |
+
"classes": "[1]",
|
16 |
+
"conv_per_stage": "2",
|
17 |
+
"data_aug_params": "{'selected_data_channels': None, 'selected_seg_channels': [0], 'do_elastic': False, 'elastic_deform_alpha': (0.0, 200.0), 'elastic_deform_sigma': (9.0, 13.0), 'p_eldef': 0.2, 'do_scaling': True, 'scale_range': (0.7, 1.4), 'independent_scale_factor_for_each_axis': False, 'p_independent_scale_per_axis': 1, 'p_scale': 0.2, 'do_rotation': True, 'rotation_x': (-3.141592653589793, 3.141592653589793), 'rotation_y': (-0.5235987755982988, 0.5235987755982988), 'rotation_z': (-0.5235987755982988, 0.5235987755982988), 'rotation_p_per_axis': 1, 'p_rot': 0.2, 'random_crop': False, 'random_crop_dist_to_border': None, 'do_gamma': True, 'gamma_retain_stats': True, 'gamma_range': (0.7, 1.5), 'p_gamma': 0.3, 'do_mirror': True, 'mirror_axes': (0, 1, 2), 'dummy_2D': True, 'mask_was_used_for_normalization': OrderedDict([(0, False)]), 'border_mode_data': 'constant', 'all_segmentation_labels': None, 'move_last_seg_chanel_to_data': False, 'cascade_do_cascade_augmentations': False, 'cascade_random_binary_transform_p': 0.4, 'cascade_random_binary_transform_p_per_label': 1, 'cascade_random_binary_transform_size': (1, 8), 'cascade_remove_conn_comp_p': 0.2, 'cascade_remove_conn_comp_max_size_percent_threshold': 0.15, 'cascade_remove_conn_comp_fill_with_other_class_p': 0.0, 'do_additive_brightness': False, 'additive_brightness_p_per_sample': 0.15, 'additive_brightness_p_per_channel': 0.5, 'additive_brightness_mu': 0.0, 'additive_brightness_sigma': 0.1, 'num_threads': 12, 'num_cached_per_thread': 2, 'patch_size_for_spatialtransform': array([ 32, 224, 224])}",
|
18 |
+
"dataset_directory": "/data/houbb/nnunet/nnUNet_preprocessed/Task505_TCGA-OV",
|
19 |
+
"deep_supervision_scales": "[[1, 1, 1], [1.0, 0.5, 0.5], [1.0, 0.25, 0.25], [0.5, 0.125, 0.125], [0.25, 0.0625, 0.0625]]",
|
20 |
+
"deterministic": "False",
|
21 |
+
"dl_tr": "<nnunet.training.dataloading.dataset_loading.DataLoader3D object at 0x2aab60629a90>",
|
22 |
+
"dl_val": "<nnunet.training.dataloading.dataset_loading.DataLoader3D object at 0x2aab60840a60>",
|
23 |
+
"do_dummy_2D_aug": "True",
|
24 |
+
"ds_loss_weights": "[0.53333333 0.26666667 0.13333333 0.06666667 0. ]",
|
25 |
+
"epoch": "0",
|
26 |
+
"experiment_name": "nnUNetTrainerV2",
|
27 |
+
"fold": "0",
|
28 |
+
"folder_with_preprocessed_data": "/data/houbb/nnunet/nnUNet_preprocessed/Task505_TCGA-OV/nnUNetData_plans_v2.1_stage1",
|
29 |
+
"fp16": "True",
|
30 |
+
"gt_niftis_folder": "/data/houbb/nnunet/nnUNet_preprocessed/Task505_TCGA-OV/gt_segmentations",
|
31 |
+
"inference_pad_border_mode": "constant",
|
32 |
+
"inference_pad_kwargs": "{'constant_values': 0}",
|
33 |
+
"init_args": "('/data/houbb/nnunet/nnUNet_preprocessed/Task505_TCGA-OV/nnUNetPlansv2.1_plans_3D.pkl', 0, '/data/houbb/nnunet/nnUNet_trained_models/nnUNet/3d_fullres/Task505_TCGA-OV/nnUNetTrainerV2__nnUNetPlansv2.1', '/data/houbb/nnunet/nnUNet_preprocessed/Task505_TCGA-OV', True, 1, True, False, True)",
|
34 |
+
"initial_lr": "0.01",
|
35 |
+
"log_file": "/data/houbb/nnunet/nnUNet_trained_models/nnUNet/3d_fullres/Task505_TCGA-OV/nnUNetTrainerV2__nnUNetPlansv2.1/fold_0/training_log_2023_3_6_15_19_10.txt",
|
36 |
+
"lr_scheduler": "None",
|
37 |
+
"lr_scheduler_eps": "0.001",
|
38 |
+
"lr_scheduler_patience": "30",
|
39 |
+
"lr_threshold": "1e-06",
|
40 |
+
"max_num_epochs": "1000",
|
41 |
+
"min_region_size_per_class": "None",
|
42 |
+
"min_size_per_class": "None",
|
43 |
+
"net_conv_kernel_sizes": "[[1, 3, 3], [1, 3, 3], [3, 3, 3], [3, 3, 3], [3, 3, 3], [3, 3, 3]]",
|
44 |
+
"net_num_pool_op_kernel_sizes": "[[1, 2, 2], [1, 2, 2], [2, 2, 2], [2, 2, 2], [2, 2, 2]]",
|
45 |
+
"net_pool_per_axis": "[3, 5, 5]",
|
46 |
+
"normalization_schemes": "OrderedDict([(0, 'CT')])",
|
47 |
+
"num_batches_per_epoch": "250",
|
48 |
+
"num_classes": "2",
|
49 |
+
"num_input_channels": "1",
|
50 |
+
"num_val_batches_per_epoch": "50",
|
51 |
+
"online_eval_fn": "[]",
|
52 |
+
"online_eval_foreground_dc": "[]",
|
53 |
+
"online_eval_fp": "[]",
|
54 |
+
"online_eval_tp": "[]",
|
55 |
+
"only_keep_largest_connected_component": "None",
|
56 |
+
"optimizer": "SGD (\nParameter Group 0\n dampening: 0\n foreach: None\n lr: 0.01\n maximize: False\n momentum: 0.99\n nesterov: True\n weight_decay: 3e-05\n)",
|
57 |
+
"output_folder": "/data/houbb/nnunet/nnUNet_trained_models/nnUNet/3d_fullres/Task505_TCGA-OV/nnUNetTrainerV2__nnUNetPlansv2.1/fold_0",
|
58 |
+
"output_folder_base": "/data/houbb/nnunet/nnUNet_trained_models/nnUNet/3d_fullres/Task505_TCGA-OV/nnUNetTrainerV2__nnUNetPlansv2.1",
|
59 |
+
"oversample_foreground_percent": "0.33",
|
60 |
+
"pad_all_sides": "None",
|
61 |
+
"patch_size": "[ 32 224 224]",
|
62 |
+
"patience": "50",
|
63 |
+
"pin_memory": "True",
|
64 |
+
"plans_file": "/data/houbb/nnunet/nnUNet_preprocessed/Task505_TCGA-OV/nnUNetPlansv2.1_plans_3D.pkl",
|
65 |
+
"regions_class_order": "None",
|
66 |
+
"save_best_checkpoint": "True",
|
67 |
+
"save_every": "50",
|
68 |
+
"save_final_checkpoint": "True",
|
69 |
+
"save_intermediate_checkpoints": "True",
|
70 |
+
"save_latest_only": "True",
|
71 |
+
"stage": "1",
|
72 |
+
"threeD": "True",
|
73 |
+
"tr_gen": "<batchgenerators.dataloading.multi_threaded_augmenter.MultiThreadedAugmenter object at 0x2aab60860e50>",
|
74 |
+
"train_loss_MA": "None",
|
75 |
+
"train_loss_MA_alpha": "0.93",
|
76 |
+
"train_loss_MA_eps": "0.0005",
|
77 |
+
"transpose_backward": "[0, 1, 2]",
|
78 |
+
"transpose_forward": "[0, 1, 2]",
|
79 |
+
"unpack_data": "True",
|
80 |
+
"use_mask_for_norm": "OrderedDict([(0, False)])",
|
81 |
+
"use_progress_bar": "False",
|
82 |
+
"val_eval_criterion_MA": "None",
|
83 |
+
"val_eval_criterion_alpha": "0.9",
|
84 |
+
"val_gen": "<batchgenerators.dataloading.multi_threaded_augmenter.MultiThreadedAugmenter object at 0x2aab60860070>",
|
85 |
+
"was_initialized": "True",
|
86 |
+
"weight_decay": "3e-05"
|
87 |
+
}
|
nnUNet_trained_models/nnUNet/3d_fullres/Task505_TCGA-OV/nnUNetTrainerV2__nnUNetPlansv2.1/fold_0/model_final_checkpoint.model
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:56644bf1f0b2f32355a157a7382f328fb21baae7c50a03764b27a0c4422146cd
|
3 |
+
size 248004539
|
nnUNet_trained_models/nnUNet/3d_fullres/Task505_TCGA-OV/nnUNetTrainerV2__nnUNetPlansv2.1/fold_0/model_final_checkpoint.model.pkl
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:a628a985ecf0ea47a291512e0f73153fd3c74ece2bc1ad568d5506a6afb29206
|
3 |
+
size 124440
|
nnUNet_trained_models/nnUNet/3d_fullres/Task505_TCGA-OV/nnUNetTrainerV2__nnUNetPlansv2.1/fold_0/progress.png
ADDED
Git LFS Details
|
nnUNet_trained_models/nnUNet/3d_fullres/Task505_TCGA-OV/nnUNetTrainerV2__nnUNetPlansv2.1/fold_1/debug.json
ADDED
@@ -0,0 +1,87 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"all_tr_losses": "[]",
|
3 |
+
"all_val_eval_metrics": "[]",
|
4 |
+
"all_val_losses": "[]",
|
5 |
+
"all_val_losses_tr_mode": "[]",
|
6 |
+
"also_val_in_tr_mode": "False",
|
7 |
+
"amp_grad_scaler": "None",
|
8 |
+
"base_num_features": "32",
|
9 |
+
"basic_generator_patch_size": "[ 32 263 263]",
|
10 |
+
"batch_dice": "True",
|
11 |
+
"batch_size": "2",
|
12 |
+
"best_MA_tr_loss_for_patience": "None",
|
13 |
+
"best_epoch_based_on_MA_tr_loss": "None",
|
14 |
+
"best_val_eval_criterion_MA": "None",
|
15 |
+
"classes": "[1]",
|
16 |
+
"conv_per_stage": "2",
|
17 |
+
"data_aug_params": "{'selected_data_channels': None, 'selected_seg_channels': [0], 'do_elastic': False, 'elastic_deform_alpha': (0.0, 200.0), 'elastic_deform_sigma': (9.0, 13.0), 'p_eldef': 0.2, 'do_scaling': True, 'scale_range': (0.7, 1.4), 'independent_scale_factor_for_each_axis': False, 'p_independent_scale_per_axis': 1, 'p_scale': 0.2, 'do_rotation': True, 'rotation_x': (-3.141592653589793, 3.141592653589793), 'rotation_y': (-0.5235987755982988, 0.5235987755982988), 'rotation_z': (-0.5235987755982988, 0.5235987755982988), 'rotation_p_per_axis': 1, 'p_rot': 0.2, 'random_crop': False, 'random_crop_dist_to_border': None, 'do_gamma': True, 'gamma_retain_stats': True, 'gamma_range': (0.7, 1.5), 'p_gamma': 0.3, 'do_mirror': True, 'mirror_axes': (0, 1, 2), 'dummy_2D': True, 'mask_was_used_for_normalization': OrderedDict([(0, False)]), 'border_mode_data': 'constant', 'all_segmentation_labels': None, 'move_last_seg_chanel_to_data': False, 'cascade_do_cascade_augmentations': False, 'cascade_random_binary_transform_p': 0.4, 'cascade_random_binary_transform_p_per_label': 1, 'cascade_random_binary_transform_size': (1, 8), 'cascade_remove_conn_comp_p': 0.2, 'cascade_remove_conn_comp_max_size_percent_threshold': 0.15, 'cascade_remove_conn_comp_fill_with_other_class_p': 0.0, 'do_additive_brightness': False, 'additive_brightness_p_per_sample': 0.15, 'additive_brightness_p_per_channel': 0.5, 'additive_brightness_mu': 0.0, 'additive_brightness_sigma': 0.1, 'num_threads': 12, 'num_cached_per_thread': 2, 'patch_size_for_spatialtransform': array([ 32, 224, 224])}",
|
18 |
+
"dataset_directory": "/data/houbb/nnunet/nnUNet_preprocessed/Task505_TCGA-OV",
|
19 |
+
"deep_supervision_scales": "[[1, 1, 1], [1.0, 0.5, 0.5], [1.0, 0.25, 0.25], [0.5, 0.125, 0.125], [0.25, 0.0625, 0.0625]]",
|
20 |
+
"deterministic": "False",
|
21 |
+
"dl_tr": "<nnunet.training.dataloading.dataset_loading.DataLoader3D object at 0x2aab60629a90>",
|
22 |
+
"dl_val": "<nnunet.training.dataloading.dataset_loading.DataLoader3D object at 0x2aab60840a60>",
|
23 |
+
"do_dummy_2D_aug": "True",
|
24 |
+
"ds_loss_weights": "[0.53333333 0.26666667 0.13333333 0.06666667 0. ]",
|
25 |
+
"epoch": "0",
|
26 |
+
"experiment_name": "nnUNetTrainerV2",
|
27 |
+
"fold": "1",
|
28 |
+
"folder_with_preprocessed_data": "/data/houbb/nnunet/nnUNet_preprocessed/Task505_TCGA-OV/nnUNetData_plans_v2.1_stage1",
|
29 |
+
"fp16": "True",
|
30 |
+
"gt_niftis_folder": "/data/houbb/nnunet/nnUNet_preprocessed/Task505_TCGA-OV/gt_segmentations",
|
31 |
+
"inference_pad_border_mode": "constant",
|
32 |
+
"inference_pad_kwargs": "{'constant_values': 0}",
|
33 |
+
"init_args": "('/data/houbb/nnunet/nnUNet_preprocessed/Task505_TCGA-OV/nnUNetPlansv2.1_plans_3D.pkl', 1, '/data/houbb/nnunet/nnUNet_trained_models/nnUNet/3d_fullres/Task505_TCGA-OV/nnUNetTrainerV2__nnUNetPlansv2.1', '/data/houbb/nnunet/nnUNet_preprocessed/Task505_TCGA-OV', True, 1, True, False, True)",
|
34 |
+
"initial_lr": "0.01",
|
35 |
+
"log_file": "/data/houbb/nnunet/nnUNet_trained_models/nnUNet/3d_fullres/Task505_TCGA-OV/nnUNetTrainerV2__nnUNetPlansv2.1/fold_1/training_log_2023_3_6_15_23_41.txt",
|
36 |
+
"lr_scheduler": "None",
|
37 |
+
"lr_scheduler_eps": "0.001",
|
38 |
+
"lr_scheduler_patience": "30",
|
39 |
+
"lr_threshold": "1e-06",
|
40 |
+
"max_num_epochs": "1000",
|
41 |
+
"min_region_size_per_class": "None",
|
42 |
+
"min_size_per_class": "None",
|
43 |
+
"net_conv_kernel_sizes": "[[1, 3, 3], [1, 3, 3], [3, 3, 3], [3, 3, 3], [3, 3, 3], [3, 3, 3]]",
|
44 |
+
"net_num_pool_op_kernel_sizes": "[[1, 2, 2], [1, 2, 2], [2, 2, 2], [2, 2, 2], [2, 2, 2]]",
|
45 |
+
"net_pool_per_axis": "[3, 5, 5]",
|
46 |
+
"normalization_schemes": "OrderedDict([(0, 'CT')])",
|
47 |
+
"num_batches_per_epoch": "250",
|
48 |
+
"num_classes": "2",
|
49 |
+
"num_input_channels": "1",
|
50 |
+
"num_val_batches_per_epoch": "50",
|
51 |
+
"online_eval_fn": "[]",
|
52 |
+
"online_eval_foreground_dc": "[]",
|
53 |
+
"online_eval_fp": "[]",
|
54 |
+
"online_eval_tp": "[]",
|
55 |
+
"only_keep_largest_connected_component": "None",
|
56 |
+
"optimizer": "SGD (\nParameter Group 0\n dampening: 0\n foreach: None\n lr: 0.01\n maximize: False\n momentum: 0.99\n nesterov: True\n weight_decay: 3e-05\n)",
|
57 |
+
"output_folder": "/data/houbb/nnunet/nnUNet_trained_models/nnUNet/3d_fullres/Task505_TCGA-OV/nnUNetTrainerV2__nnUNetPlansv2.1/fold_1",
|
58 |
+
"output_folder_base": "/data/houbb/nnunet/nnUNet_trained_models/nnUNet/3d_fullres/Task505_TCGA-OV/nnUNetTrainerV2__nnUNetPlansv2.1",
|
59 |
+
"oversample_foreground_percent": "0.33",
|
60 |
+
"pad_all_sides": "None",
|
61 |
+
"patch_size": "[ 32 224 224]",
|
62 |
+
"patience": "50",
|
63 |
+
"pin_memory": "True",
|
64 |
+
"plans_file": "/data/houbb/nnunet/nnUNet_preprocessed/Task505_TCGA-OV/nnUNetPlansv2.1_plans_3D.pkl",
|
65 |
+
"regions_class_order": "None",
|
66 |
+
"save_best_checkpoint": "True",
|
67 |
+
"save_every": "50",
|
68 |
+
"save_final_checkpoint": "True",
|
69 |
+
"save_intermediate_checkpoints": "True",
|
70 |
+
"save_latest_only": "True",
|
71 |
+
"stage": "1",
|
72 |
+
"threeD": "True",
|
73 |
+
"tr_gen": "<batchgenerators.dataloading.multi_threaded_augmenter.MultiThreadedAugmenter object at 0x2aab60860c10>",
|
74 |
+
"train_loss_MA": "None",
|
75 |
+
"train_loss_MA_alpha": "0.93",
|
76 |
+
"train_loss_MA_eps": "0.0005",
|
77 |
+
"transpose_backward": "[0, 1, 2]",
|
78 |
+
"transpose_forward": "[0, 1, 2]",
|
79 |
+
"unpack_data": "True",
|
80 |
+
"use_mask_for_norm": "OrderedDict([(0, False)])",
|
81 |
+
"use_progress_bar": "False",
|
82 |
+
"val_eval_criterion_MA": "None",
|
83 |
+
"val_eval_criterion_alpha": "0.9",
|
84 |
+
"val_gen": "<batchgenerators.dataloading.multi_threaded_augmenter.MultiThreadedAugmenter object at 0x2aab60860370>",
|
85 |
+
"was_initialized": "True",
|
86 |
+
"weight_decay": "3e-05"
|
87 |
+
}
|
nnUNet_trained_models/nnUNet/3d_fullres/Task505_TCGA-OV/nnUNetTrainerV2__nnUNetPlansv2.1/fold_1/model_final_checkpoint.model
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:960d42b0ce9a6dc9cb9f2bd1466b77b309dbc2c88d4603d9142d279a400b4816
|
3 |
+
size 248004347
|
nnUNet_trained_models/nnUNet/3d_fullres/Task505_TCGA-OV/nnUNetTrainerV2__nnUNetPlansv2.1/fold_1/model_final_checkpoint.model.pkl
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:3877369906a58ea40e8ba93bc61b55557c413d5ba0c1c3e5d348b955cbd15a59
|
3 |
+
size 124440
|
nnUNet_trained_models/nnUNet/3d_fullres/Task505_TCGA-OV/nnUNetTrainerV2__nnUNetPlansv2.1/fold_1/progress.png
ADDED
Git LFS Details
|
nnUNet_trained_models/nnUNet/3d_fullres/Task505_TCGA-OV/nnUNetTrainerV2__nnUNetPlansv2.1/fold_2/model_final_checkpoint.model
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:dc631cca47f1312fb67e05cc72734ff1efb6403945eaecbb8b98983661bba303
|
3 |
+
size 248004667
|
nnUNet_trained_models/nnUNet/3d_fullres/Task505_TCGA-OV/nnUNetTrainerV2__nnUNetPlansv2.1/fold_2/model_final_checkpoint.model.pkl
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:1225873cf4566e40fb775e58084c42a1c7fc826d29c77e117dbf71909b884104
|
3 |
+
size 124440
|
nnUNet_trained_models/nnUNet/3d_fullres/Task505_TCGA-OV/nnUNetTrainerV2__nnUNetPlansv2.1/fold_2/progress.png
ADDED
Git LFS Details
|
nnUNet_trained_models/nnUNet/3d_fullres/Task505_TCGA-OV/nnUNetTrainerV2__nnUNetPlansv2.1/fold_3/model_final_checkpoint.model
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:5eeb73292862ef9e00e3efe0a8790a51cc9ec48c979deeceac16f5bfe83982b6
|
3 |
+
size 248004731
|
nnUNet_trained_models/nnUNet/3d_fullres/Task505_TCGA-OV/nnUNetTrainerV2__nnUNetPlansv2.1/fold_3/model_final_checkpoint.model.pkl
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:37825c0116208da93671ddd1fd7dd6e0aa2195f1d63594c0f0d92a7ca554fa94
|
3 |
+
size 124440
|
nnUNet_trained_models/nnUNet/3d_fullres/Task505_TCGA-OV/nnUNetTrainerV2__nnUNetPlansv2.1/fold_3/progress.png
ADDED
Git LFS Details
|
nnUNet_trained_models/nnUNet/3d_fullres/Task505_TCGA-OV/nnUNetTrainerV2__nnUNetPlansv2.1/fold_4/model_final_checkpoint.model
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:a2516e80b12354bda0aafed366307ef707f7e203488d8ec18a1367f2f67d7dc5
|
3 |
+
size 248004475
|
nnUNet_trained_models/nnUNet/3d_fullres/Task505_TCGA-OV/nnUNetTrainerV2__nnUNetPlansv2.1/fold_4/model_final_checkpoint.model.pkl
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:cf068fd718b62f38878729a1a8667d73dcd1abe8ec1d6dc4de3fc5d499d9b701
|
3 |
+
size 124440
|
nnUNet_trained_models/nnUNet/3d_fullres/Task505_TCGA-OV/nnUNetTrainerV2__nnUNetPlansv2.1/fold_4/progress.png
ADDED
Git LFS Details
|
nnUNet_trained_models/nnUNet/3d_fullres/Task505_TCGA-OV/nnUNetTrainerV2__nnUNetPlansv2.1/plans.pkl
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:9e337a85325a1b318c215a551e395a96ba9fa7e89fce8ad4a3a112e71a479e2e
|
3 |
+
size 123970
|
nnunet_predict.py
ADDED
@@ -0,0 +1,52 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import argparse
|
2 |
+
import os
|
3 |
+
import shutil
|
4 |
+
import random
|
5 |
+
# import time
|
6 |
+
|
7 |
+
from nnunet.inference.predict import predict_from_folder
|
8 |
+
|
9 |
+
|
10 |
+
if __name__ == '__main__':
|
11 |
+
|
12 |
+
parser = argparse.ArgumentParser(description='Inference using nnU-Net predict_from_folder Python API')
|
13 |
+
parser.add_argument('-i', '--input_list', help='Input image file_list.txt')
|
14 |
+
parser.add_argument('-t', '--tmp_folder', help='Temporary folder', required=True)
|
15 |
+
parser.add_argument('-o', '--output_folder', help='Output Segmentation folder', required=True)
|
16 |
+
parser.add_argument('-m', '--model', help='Trained Model', required=True)
|
17 |
+
parser.add_argument('-v', '--verbose', help='Verbose Output', action='store_true', default=False)
|
18 |
+
args = vars(parser.parse_args())
|
19 |
+
|
20 |
+
# Append 8bit random hex string to ensure tmp_folder is unique
|
21 |
+
args['tmp_folder'] += f'_{str(hex(random.getrandbits(8)))}'
|
22 |
+
|
23 |
+
# Create temp directory
|
24 |
+
os.mkdir(args['tmp_folder'])
|
25 |
+
|
26 |
+
# Read input filelist
|
27 |
+
with open(args['input_list']) as f:
|
28 |
+
image_list = f.read().splitlines()
|
29 |
+
|
30 |
+
# Make destination file paths to tmp_folder
|
31 |
+
image_list_link = [os.path.join(args['tmp_folder'], os.path.basename(x).replace('.nii.gz', '_0000.nii.gz'))
|
32 |
+
for x in image_list]
|
33 |
+
|
34 |
+
# Create hard link or copy
|
35 |
+
for src, dst in zip(image_list, image_list_link):
|
36 |
+
try:
|
37 |
+
os.link(src, dst)
|
38 |
+
except:
|
39 |
+
shutil.copyfile(src, dst)
|
40 |
+
|
41 |
+
# Run nnU-Net predict on tmp_folder
|
42 |
+
# start = time.time()
|
43 |
+
predict_from_folder(args['model'], args['tmp_folder'], args['output_folder'], folds=None, save_npz=False,
|
44 |
+
num_threads_preprocessing=6, num_threads_nifti_save=2,
|
45 |
+
lowres_segmentations=None, part_id=0, num_parts=1, tta=False,
|
46 |
+
overwrite_existing=False, mode="fastest", overwrite_all_in_gpu=None,
|
47 |
+
mixed_precision=True, step_size=0.5, checkpoint_name="model_final_checkpoint")
|
48 |
+
# end = time.time()
|
49 |
+
# print(f"pred time: {end - start}")
|
50 |
+
|
51 |
+
# Cleanup and delete tmp_folder
|
52 |
+
shutil.rmtree(args['tmp_folder'])
|