|
{ |
|
"4": { |
|
"inputs": { |
|
"ckpt_name": "sd_xl_base_1.0.safetensors" |
|
}, |
|
"class_type": "CheckpointLoaderSimple" |
|
}, |
|
"5": { |
|
"inputs": { |
|
"width": 1024, |
|
"height": 1024, |
|
"batch_size": 1 |
|
}, |
|
"class_type": "EmptyLatentImage" |
|
}, |
|
"6": { |
|
"inputs": { |
|
"text": "a photo of a cat", |
|
"clip": [ |
|
"4", |
|
1 |
|
] |
|
}, |
|
"class_type": "CLIPTextEncode" |
|
}, |
|
"10": { |
|
"inputs": { |
|
"add_noise": "enable", |
|
"noise_seed": 42, |
|
"steps": 20, |
|
"cfg": 7.5, |
|
"sampler_name": "euler", |
|
"scheduler": "normal", |
|
"start_at_step": 0, |
|
"end_at_step": 32, |
|
"return_with_leftover_noise": "enable", |
|
"model": [ |
|
"4", |
|
0 |
|
], |
|
"positive": [ |
|
"6", |
|
0 |
|
], |
|
"negative": [ |
|
"15", |
|
0 |
|
], |
|
"latent_image": [ |
|
"5", |
|
0 |
|
] |
|
}, |
|
"class_type": "KSamplerAdvanced" |
|
}, |
|
"12": { |
|
"inputs": { |
|
"samples": [ |
|
"14", |
|
0 |
|
], |
|
"vae": [ |
|
"4", |
|
2 |
|
] |
|
}, |
|
"class_type": "VAEDecode" |
|
}, |
|
"13": { |
|
"inputs": { |
|
"filename_prefix": "test_inference", |
|
"images": [ |
|
"12", |
|
0 |
|
] |
|
}, |
|
"class_type": "SaveImage" |
|
}, |
|
"14": { |
|
"inputs": { |
|
"add_noise": "disable", |
|
"noise_seed": 42, |
|
"steps": 20, |
|
"cfg": 7.5, |
|
"sampler_name": "euler", |
|
"scheduler": "normal", |
|
"start_at_step": 32, |
|
"end_at_step": 10000, |
|
"return_with_leftover_noise": "disable", |
|
"model": [ |
|
"16", |
|
0 |
|
], |
|
"positive": [ |
|
"17", |
|
0 |
|
], |
|
"negative": [ |
|
"20", |
|
0 |
|
], |
|
"latent_image": [ |
|
"10", |
|
0 |
|
] |
|
}, |
|
"class_type": "KSamplerAdvanced" |
|
}, |
|
"15": { |
|
"inputs": { |
|
"conditioning": [ |
|
"6", |
|
0 |
|
] |
|
}, |
|
"class_type": "ConditioningZeroOut" |
|
}, |
|
"16": { |
|
"inputs": { |
|
"ckpt_name": "sd_xl_refiner_1.0.safetensors" |
|
}, |
|
"class_type": "CheckpointLoaderSimple" |
|
}, |
|
"17": { |
|
"inputs": { |
|
"text": "a photo of a cat", |
|
"clip": [ |
|
"16", |
|
1 |
|
] |
|
}, |
|
"class_type": "CLIPTextEncode" |
|
}, |
|
"20": { |
|
"inputs": { |
|
"text": "", |
|
"clip": [ |
|
"16", |
|
1 |
|
] |
|
}, |
|
"class_type": "CLIPTextEncode" |
|
} |
|
} |