File size: 4,184 Bytes
9384dae c0c50b7 9384dae |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 |
{
"imports": [
"$import torch",
"$from datetime import datetime",
"$from pathlib import Path"
],
"bundle_root": ".",
"model_dir": "$@bundle_root + '/models'",
"dataset_dir": "/workspace/data/medical",
"output_dir": "$@bundle_root + '/output'",
"create_output_dir": "$Path(@output_dir).mkdir(exist_ok=True)",
"device": "$torch.device('cuda:0' if torch.cuda.is_available() else 'cpu')",
"output_orig_postfix": "recon",
"output_recon_postfix": "orig",
"channel": 0,
"spacing": [
1.1,
1.1,
1.1
],
"spatial_dims": 3,
"image_channels": 1,
"latent_channels": 8,
"infer_patch_size": [
144,
176,
112
],
"autoencoder_def": {
"_target_": "generative.networks.nets.AutoencoderKL",
"spatial_dims": "@spatial_dims",
"in_channels": "@image_channels",
"out_channels": "@image_channels",
"latent_channels": "@latent_channels",
"num_channels": [
64,
128,
256
],
"num_res_blocks": 2,
"norm_num_groups": 32,
"norm_eps": 1e-06,
"attention_levels": [
false,
false,
false
],
"with_encoder_nonlocal_attn": false,
"with_decoder_nonlocal_attn": false
},
"load_autoencoder_path": "$@bundle_root + '/models/model_autoencoder.pt'",
"load_autoencoder": "$@autoencoder_def.load_state_dict(torch.load(@load_autoencoder_path))",
"autoencoder": "$@autoencoder_def.to(@device)",
"preprocessing_transforms": [
{
"_target_": "LoadImaged",
"keys": "image"
},
{
"_target_": "EnsureChannelFirstd",
"keys": "image"
},
{
"_target_": "Lambdad",
"keys": "image",
"func": "$lambda x: x[@channel, :, :, :]"
},
{
"_target_": "AddChanneld",
"keys": "image"
},
{
"_target_": "EnsureTyped",
"keys": "image"
},
{
"_target_": "Orientationd",
"keys": "image",
"axcodes": "RAS"
},
{
"_target_": "Spacingd",
"keys": "image",
"pixdim": "@spacing",
"mode": "bilinear"
}
],
"crop_transforms": [
{
"_target_": "CenterSpatialCropd",
"keys": "image",
"roi_size": "@infer_patch_size"
}
],
"final_transforms": [
{
"_target_": "ScaleIntensityRangePercentilesd",
"keys": "image",
"lower": 0,
"upper": 99.5,
"b_min": 0,
"b_max": 1
}
],
"preprocessing": {
"_target_": "Compose",
"transforms": "$@preprocessing_transforms + @crop_transforms + @final_transforms"
},
"dataset": {
"_target_": "monai.apps.DecathlonDataset",
"root_dir": "@dataset_dir",
"task": "Task01_BrainTumour",
"section": "validation",
"cache_rate": 0.0,
"num_workers": 8,
"download": false,
"transform": "@preprocessing"
},
"dataloader": {
"_target_": "DataLoader",
"dataset": "@dataset",
"batch_size": 1,
"shuffle": true,
"num_workers": 0
},
"saver_orig": {
"_target_": "SaveImage",
"_requires_": "@create_output_dir",
"output_dir": "@output_dir",
"output_postfix": "@output_orig_postfix",
"resample": false,
"padding_mode": "zeros"
},
"saver_recon": {
"_target_": "SaveImage",
"_requires_": "@create_output_dir",
"output_dir": "@output_dir",
"output_postfix": "@output_recon_postfix",
"resample": false,
"padding_mode": "zeros"
},
"input_img": "$monai.utils.first(@dataloader)['image'].to(@device)",
"recon_img": "$@autoencoder(@input_img)[0][0]",
"run": [
"$@load_autoencoder",
"$@saver_orig(@input_img[0][0])",
"$@saver_recon(@recon_img)"
]
}
|