Spaces:
Runtime error
Runtime error
Upload 9 files
Browse files- scripts/custom_code.py +42 -0
- scripts/img2imgalt.py +183 -0
- scripts/loopback.py +83 -0
- scripts/outpainting_mk_2.py +262 -0
- scripts/poor_mans_outpainting.py +147 -0
- scripts/prompt_matrix.py +87 -0
- scripts/prompts_from_file.py +55 -0
- scripts/sd_upscale.py +97 -0
- scripts/xy_grid.py +313 -0
scripts/custom_code.py
ADDED
@@ -0,0 +1,42 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import modules.scripts as scripts
|
2 |
+
import gradio as gr
|
3 |
+
|
4 |
+
from modules.processing import Processed
|
5 |
+
from modules.shared import opts, cmd_opts, state
|
6 |
+
|
7 |
+
class Script(scripts.Script):
|
8 |
+
|
9 |
+
def title(self):
|
10 |
+
return "Custom code"
|
11 |
+
|
12 |
+
|
13 |
+
def show(self, is_img2img):
|
14 |
+
return cmd_opts.allow_code
|
15 |
+
|
16 |
+
def ui(self, is_img2img):
|
17 |
+
code = gr.Textbox(label="Python code", visible=False, lines=1)
|
18 |
+
|
19 |
+
return [code]
|
20 |
+
|
21 |
+
|
22 |
+
def run(self, p, code):
|
23 |
+
assert cmd_opts.allow_code, '--allow-code option must be enabled'
|
24 |
+
|
25 |
+
display_result_data = [[], -1, ""]
|
26 |
+
|
27 |
+
def display(imgs, s=display_result_data[1], i=display_result_data[2]):
|
28 |
+
display_result_data[0] = imgs
|
29 |
+
display_result_data[1] = s
|
30 |
+
display_result_data[2] = i
|
31 |
+
|
32 |
+
from types import ModuleType
|
33 |
+
compiled = compile(code, '', 'exec')
|
34 |
+
module = ModuleType("testmodule")
|
35 |
+
module.__dict__.update(globals())
|
36 |
+
module.p = p
|
37 |
+
module.display = display
|
38 |
+
exec(compiled, module.__dict__)
|
39 |
+
|
40 |
+
return Processed(p, *display_result_data)
|
41 |
+
|
42 |
+
|
scripts/img2imgalt.py
ADDED
@@ -0,0 +1,183 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from collections import namedtuple
|
2 |
+
|
3 |
+
import numpy as np
|
4 |
+
from tqdm import trange
|
5 |
+
|
6 |
+
import modules.scripts as scripts
|
7 |
+
import gradio as gr
|
8 |
+
|
9 |
+
from modules import processing, shared, sd_samplers, prompt_parser
|
10 |
+
from modules.processing import Processed
|
11 |
+
from modules.shared import opts, cmd_opts, state
|
12 |
+
|
13 |
+
import torch
|
14 |
+
import k_diffusion as K
|
15 |
+
|
16 |
+
from PIL import Image
|
17 |
+
from torch import autocast
|
18 |
+
from einops import rearrange, repeat
|
19 |
+
|
20 |
+
|
21 |
+
def find_noise_for_image(p, cond, uncond, cfg_scale, steps):
|
22 |
+
x = p.init_latent
|
23 |
+
|
24 |
+
s_in = x.new_ones([x.shape[0]])
|
25 |
+
dnw = K.external.CompVisDenoiser(shared.sd_model)
|
26 |
+
sigmas = dnw.get_sigmas(steps).flip(0)
|
27 |
+
|
28 |
+
shared.state.sampling_steps = steps
|
29 |
+
|
30 |
+
for i in trange(1, len(sigmas)):
|
31 |
+
shared.state.sampling_step += 1
|
32 |
+
|
33 |
+
x_in = torch.cat([x] * 2)
|
34 |
+
sigma_in = torch.cat([sigmas[i] * s_in] * 2)
|
35 |
+
cond_in = torch.cat([uncond, cond])
|
36 |
+
|
37 |
+
c_out, c_in = [K.utils.append_dims(k, x_in.ndim) for k in dnw.get_scalings(sigma_in)]
|
38 |
+
t = dnw.sigma_to_t(sigma_in)
|
39 |
+
|
40 |
+
eps = shared.sd_model.apply_model(x_in * c_in, t, cond=cond_in)
|
41 |
+
denoised_uncond, denoised_cond = (x_in + eps * c_out).chunk(2)
|
42 |
+
|
43 |
+
denoised = denoised_uncond + (denoised_cond - denoised_uncond) * cfg_scale
|
44 |
+
|
45 |
+
d = (x - denoised) / sigmas[i]
|
46 |
+
dt = sigmas[i] - sigmas[i - 1]
|
47 |
+
|
48 |
+
x = x + d * dt
|
49 |
+
|
50 |
+
sd_samplers.store_latent(x)
|
51 |
+
|
52 |
+
# This shouldn't be necessary, but solved some VRAM issues
|
53 |
+
del x_in, sigma_in, cond_in, c_out, c_in, t,
|
54 |
+
del eps, denoised_uncond, denoised_cond, denoised, d, dt
|
55 |
+
|
56 |
+
shared.state.nextjob()
|
57 |
+
|
58 |
+
return x / x.std()
|
59 |
+
|
60 |
+
|
61 |
+
Cached = namedtuple("Cached", ["noise", "cfg_scale", "steps", "latent", "original_prompt", "original_negative_prompt", "sigma_adjustment"])
|
62 |
+
|
63 |
+
|
64 |
+
# Based on changes suggested by briansemrau in https://github.com/AUTOMATIC1111/stable-diffusion-webui/issues/736
|
65 |
+
def find_noise_for_image_sigma_adjustment(p, cond, uncond, cfg_scale, steps):
|
66 |
+
x = p.init_latent
|
67 |
+
|
68 |
+
s_in = x.new_ones([x.shape[0]])
|
69 |
+
dnw = K.external.CompVisDenoiser(shared.sd_model)
|
70 |
+
sigmas = dnw.get_sigmas(steps).flip(0)
|
71 |
+
|
72 |
+
shared.state.sampling_steps = steps
|
73 |
+
|
74 |
+
for i in trange(1, len(sigmas)):
|
75 |
+
shared.state.sampling_step += 1
|
76 |
+
|
77 |
+
x_in = torch.cat([x] * 2)
|
78 |
+
sigma_in = torch.cat([sigmas[i - 1] * s_in] * 2)
|
79 |
+
cond_in = torch.cat([uncond, cond])
|
80 |
+
|
81 |
+
c_out, c_in = [K.utils.append_dims(k, x_in.ndim) for k in dnw.get_scalings(sigma_in)]
|
82 |
+
|
83 |
+
if i == 1:
|
84 |
+
t = dnw.sigma_to_t(torch.cat([sigmas[i] * s_in] * 2))
|
85 |
+
else:
|
86 |
+
t = dnw.sigma_to_t(sigma_in)
|
87 |
+
|
88 |
+
eps = shared.sd_model.apply_model(x_in * c_in, t, cond=cond_in)
|
89 |
+
denoised_uncond, denoised_cond = (x_in + eps * c_out).chunk(2)
|
90 |
+
|
91 |
+
denoised = denoised_uncond + (denoised_cond - denoised_uncond) * cfg_scale
|
92 |
+
|
93 |
+
if i == 1:
|
94 |
+
d = (x - denoised) / (2 * sigmas[i])
|
95 |
+
else:
|
96 |
+
d = (x - denoised) / sigmas[i - 1]
|
97 |
+
|
98 |
+
dt = sigmas[i] - sigmas[i - 1]
|
99 |
+
x = x + d * dt
|
100 |
+
|
101 |
+
sd_samplers.store_latent(x)
|
102 |
+
|
103 |
+
# This shouldn't be necessary, but solved some VRAM issues
|
104 |
+
del x_in, sigma_in, cond_in, c_out, c_in, t,
|
105 |
+
del eps, denoised_uncond, denoised_cond, denoised, d, dt
|
106 |
+
|
107 |
+
shared.state.nextjob()
|
108 |
+
|
109 |
+
return x / sigmas[-1]
|
110 |
+
|
111 |
+
|
112 |
+
class Script(scripts.Script):
|
113 |
+
def __init__(self):
|
114 |
+
self.cache = None
|
115 |
+
|
116 |
+
def title(self):
|
117 |
+
return "img2img alternative test"
|
118 |
+
|
119 |
+
def show(self, is_img2img):
|
120 |
+
return is_img2img
|
121 |
+
|
122 |
+
def ui(self, is_img2img):
|
123 |
+
original_prompt = gr.Textbox(label="Original prompt", lines=1)
|
124 |
+
original_negative_prompt = gr.Textbox(label="Original negative prompt", lines=1)
|
125 |
+
cfg = gr.Slider(label="Decode CFG scale", minimum=0.0, maximum=15.0, step=0.1, value=1.0)
|
126 |
+
st = gr.Slider(label="Decode steps", minimum=1, maximum=150, step=1, value=50)
|
127 |
+
randomness = gr.Slider(label="Randomness", minimum=0.0, maximum=1.0, step=0.01, value=0.0)
|
128 |
+
sigma_adjustment = gr.Checkbox(label="Sigma adjustment for finding noise for image", value=False)
|
129 |
+
return [original_prompt, original_negative_prompt, cfg, st, randomness, sigma_adjustment]
|
130 |
+
|
131 |
+
def run(self, p, original_prompt, original_negative_prompt, cfg, st, randomness, sigma_adjustment):
|
132 |
+
p.batch_size = 1
|
133 |
+
p.batch_count = 1
|
134 |
+
|
135 |
+
|
136 |
+
def sample_extra(conditioning, unconditional_conditioning, seeds, subseeds, subseed_strength):
|
137 |
+
lat = (p.init_latent.cpu().numpy() * 10).astype(int)
|
138 |
+
|
139 |
+
same_params = self.cache is not None and self.cache.cfg_scale == cfg and self.cache.steps == st \
|
140 |
+
and self.cache.original_prompt == original_prompt \
|
141 |
+
and self.cache.original_negative_prompt == original_negative_prompt \
|
142 |
+
and self.cache.sigma_adjustment == sigma_adjustment
|
143 |
+
same_everything = same_params and self.cache.latent.shape == lat.shape and np.abs(self.cache.latent-lat).sum() < 100
|
144 |
+
|
145 |
+
if same_everything:
|
146 |
+
rec_noise = self.cache.noise
|
147 |
+
else:
|
148 |
+
shared.state.job_count += 1
|
149 |
+
cond = p.sd_model.get_learned_conditioning(p.batch_size * [original_prompt])
|
150 |
+
uncond = p.sd_model.get_learned_conditioning(p.batch_size * [original_negative_prompt])
|
151 |
+
if sigma_adjustment:
|
152 |
+
rec_noise = find_noise_for_image_sigma_adjustment(p, cond, uncond, cfg, st)
|
153 |
+
else:
|
154 |
+
rec_noise = find_noise_for_image(p, cond, uncond, cfg, st)
|
155 |
+
self.cache = Cached(rec_noise, cfg, st, lat, original_prompt, original_negative_prompt, sigma_adjustment)
|
156 |
+
|
157 |
+
rand_noise = processing.create_random_tensors(p.init_latent.shape[1:], [p.seed + x + 1 for x in range(p.init_latent.shape[0])])
|
158 |
+
|
159 |
+
combined_noise = ((1 - randomness) * rec_noise + randomness * rand_noise) / ((randomness**2 + (1-randomness)**2) ** 0.5)
|
160 |
+
|
161 |
+
sampler = sd_samplers.create_sampler_with_index(sd_samplers.samplers, p.sampler_index, p.sd_model)
|
162 |
+
|
163 |
+
sigmas = sampler.model_wrap.get_sigmas(p.steps)
|
164 |
+
|
165 |
+
noise_dt = combined_noise - (p.init_latent / sigmas[0])
|
166 |
+
|
167 |
+
p.seed = p.seed + 1
|
168 |
+
|
169 |
+
return sampler.sample_img2img(p, p.init_latent, noise_dt, conditioning, unconditional_conditioning)
|
170 |
+
|
171 |
+
p.sample = sample_extra
|
172 |
+
|
173 |
+
p.extra_generation_params["Decode prompt"] = original_prompt
|
174 |
+
p.extra_generation_params["Decode negative prompt"] = original_negative_prompt
|
175 |
+
p.extra_generation_params["Decode CFG scale"] = cfg
|
176 |
+
p.extra_generation_params["Decode steps"] = st
|
177 |
+
p.extra_generation_params["Randomness"] = randomness
|
178 |
+
p.extra_generation_params["Sigma Adjustment"] = sigma_adjustment
|
179 |
+
|
180 |
+
processed = processing.process_images(p)
|
181 |
+
|
182 |
+
return processed
|
183 |
+
|
scripts/loopback.py
ADDED
@@ -0,0 +1,83 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import numpy as np
|
2 |
+
from tqdm import trange
|
3 |
+
|
4 |
+
import modules.scripts as scripts
|
5 |
+
import gradio as gr
|
6 |
+
|
7 |
+
from modules import processing, shared, sd_samplers, images
|
8 |
+
from modules.processing import Processed
|
9 |
+
from modules.sd_samplers import samplers
|
10 |
+
from modules.shared import opts, cmd_opts, state
|
11 |
+
|
12 |
+
class Script(scripts.Script):
|
13 |
+
def title(self):
|
14 |
+
return "Loopback"
|
15 |
+
|
16 |
+
def show(self, is_img2img):
|
17 |
+
return is_img2img
|
18 |
+
|
19 |
+
def ui(self, is_img2img):
|
20 |
+
loops = gr.Slider(minimum=1, maximum=32, step=1, label='Loops', value=4)
|
21 |
+
denoising_strength_change_factor = gr.Slider(minimum=0.9, maximum=1.1, step=0.01, label='Denoising strength change factor', value=1)
|
22 |
+
|
23 |
+
return [loops, denoising_strength_change_factor]
|
24 |
+
|
25 |
+
def run(self, p, loops, denoising_strength_change_factor):
|
26 |
+
processing.fix_seed(p)
|
27 |
+
batch_count = p.n_iter
|
28 |
+
p.extra_generation_params = {
|
29 |
+
"Denoising strength change factor": denoising_strength_change_factor,
|
30 |
+
}
|
31 |
+
|
32 |
+
p.batch_size = 1
|
33 |
+
p.n_iter = 1
|
34 |
+
|
35 |
+
output_images, info = None, None
|
36 |
+
initial_seed = None
|
37 |
+
initial_info = None
|
38 |
+
|
39 |
+
grids = []
|
40 |
+
all_images = []
|
41 |
+
state.job_count = loops * batch_count
|
42 |
+
|
43 |
+
initial_color_corrections = [processing.setup_color_correction(p.init_images[0])]
|
44 |
+
|
45 |
+
for n in range(batch_count):
|
46 |
+
history = []
|
47 |
+
|
48 |
+
for i in range(loops):
|
49 |
+
p.n_iter = 1
|
50 |
+
p.batch_size = 1
|
51 |
+
p.do_not_save_grid = True
|
52 |
+
|
53 |
+
if opts.img2img_color_correction:
|
54 |
+
p.color_corrections = initial_color_corrections
|
55 |
+
|
56 |
+
state.job = f"Iteration {i + 1}/{loops}, batch {n + 1}/{batch_count}"
|
57 |
+
|
58 |
+
processed = processing.process_images(p)
|
59 |
+
|
60 |
+
if initial_seed is None:
|
61 |
+
initial_seed = processed.seed
|
62 |
+
initial_info = processed.info
|
63 |
+
|
64 |
+
init_img = processed.images[0]
|
65 |
+
|
66 |
+
p.init_images = [init_img]
|
67 |
+
p.seed = processed.seed + 1
|
68 |
+
p.denoising_strength = min(max(p.denoising_strength * denoising_strength_change_factor, 0.1), 1)
|
69 |
+
history.append(processed.images[0])
|
70 |
+
|
71 |
+
grid = images.image_grid(history, rows=1)
|
72 |
+
if opts.grid_save:
|
73 |
+
images.save_image(grid, p.outpath_grids, "grid", initial_seed, p.prompt, opts.grid_format, info=info, short_filename=not opts.grid_extended_filename, grid=True, p=p)
|
74 |
+
|
75 |
+
grids.append(grid)
|
76 |
+
all_images += history
|
77 |
+
|
78 |
+
if opts.return_grid:
|
79 |
+
all_images = grids + all_images
|
80 |
+
|
81 |
+
processed = Processed(p, all_images, initial_seed, initial_info)
|
82 |
+
|
83 |
+
return processed
|
scripts/outpainting_mk_2.py
ADDED
@@ -0,0 +1,262 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import math
|
2 |
+
|
3 |
+
import numpy as np
|
4 |
+
import skimage
|
5 |
+
|
6 |
+
import modules.scripts as scripts
|
7 |
+
import gradio as gr
|
8 |
+
from PIL import Image, ImageDraw
|
9 |
+
|
10 |
+
from modules import images, processing, devices
|
11 |
+
from modules.processing import Processed, process_images
|
12 |
+
from modules.shared import opts, cmd_opts, state
|
13 |
+
|
14 |
+
|
15 |
+
# this function is taken from https://github.com/parlance-zz/g-diffuser-bot
|
16 |
+
def get_matched_noise(_np_src_image, np_mask_rgb, noise_q=1, color_variation=0.05):
|
17 |
+
# helper fft routines that keep ortho normalization and auto-shift before and after fft
|
18 |
+
def _fft2(data):
|
19 |
+
if data.ndim > 2: # has channels
|
20 |
+
out_fft = np.zeros((data.shape[0], data.shape[1], data.shape[2]), dtype=np.complex128)
|
21 |
+
for c in range(data.shape[2]):
|
22 |
+
c_data = data[:, :, c]
|
23 |
+
out_fft[:, :, c] = np.fft.fft2(np.fft.fftshift(c_data), norm="ortho")
|
24 |
+
out_fft[:, :, c] = np.fft.ifftshift(out_fft[:, :, c])
|
25 |
+
else: # one channel
|
26 |
+
out_fft = np.zeros((data.shape[0], data.shape[1]), dtype=np.complex128)
|
27 |
+
out_fft[:, :] = np.fft.fft2(np.fft.fftshift(data), norm="ortho")
|
28 |
+
out_fft[:, :] = np.fft.ifftshift(out_fft[:, :])
|
29 |
+
|
30 |
+
return out_fft
|
31 |
+
|
32 |
+
def _ifft2(data):
|
33 |
+
if data.ndim > 2: # has channels
|
34 |
+
out_ifft = np.zeros((data.shape[0], data.shape[1], data.shape[2]), dtype=np.complex128)
|
35 |
+
for c in range(data.shape[2]):
|
36 |
+
c_data = data[:, :, c]
|
37 |
+
out_ifft[:, :, c] = np.fft.ifft2(np.fft.fftshift(c_data), norm="ortho")
|
38 |
+
out_ifft[:, :, c] = np.fft.ifftshift(out_ifft[:, :, c])
|
39 |
+
else: # one channel
|
40 |
+
out_ifft = np.zeros((data.shape[0], data.shape[1]), dtype=np.complex128)
|
41 |
+
out_ifft[:, :] = np.fft.ifft2(np.fft.fftshift(data), norm="ortho")
|
42 |
+
out_ifft[:, :] = np.fft.ifftshift(out_ifft[:, :])
|
43 |
+
|
44 |
+
return out_ifft
|
45 |
+
|
46 |
+
def _get_gaussian_window(width, height, std=3.14, mode=0):
|
47 |
+
window_scale_x = float(width / min(width, height))
|
48 |
+
window_scale_y = float(height / min(width, height))
|
49 |
+
|
50 |
+
window = np.zeros((width, height))
|
51 |
+
x = (np.arange(width) / width * 2. - 1.) * window_scale_x
|
52 |
+
for y in range(height):
|
53 |
+
fy = (y / height * 2. - 1.) * window_scale_y
|
54 |
+
if mode == 0:
|
55 |
+
window[:, y] = np.exp(-(x ** 2 + fy ** 2) * std)
|
56 |
+
else:
|
57 |
+
window[:, y] = (1 / ((x ** 2 + 1.) * (fy ** 2 + 1.))) ** (std / 3.14) # hey wait a minute that's not gaussian
|
58 |
+
|
59 |
+
return window
|
60 |
+
|
61 |
+
def _get_masked_window_rgb(np_mask_grey, hardness=1.):
|
62 |
+
np_mask_rgb = np.zeros((np_mask_grey.shape[0], np_mask_grey.shape[1], 3))
|
63 |
+
if hardness != 1.:
|
64 |
+
hardened = np_mask_grey[:] ** hardness
|
65 |
+
else:
|
66 |
+
hardened = np_mask_grey[:]
|
67 |
+
for c in range(3):
|
68 |
+
np_mask_rgb[:, :, c] = hardened[:]
|
69 |
+
return np_mask_rgb
|
70 |
+
|
71 |
+
width = _np_src_image.shape[0]
|
72 |
+
height = _np_src_image.shape[1]
|
73 |
+
num_channels = _np_src_image.shape[2]
|
74 |
+
|
75 |
+
np_src_image = _np_src_image[:] * (1. - np_mask_rgb)
|
76 |
+
np_mask_grey = (np.sum(np_mask_rgb, axis=2) / 3.)
|
77 |
+
img_mask = np_mask_grey > 1e-6
|
78 |
+
ref_mask = np_mask_grey < 1e-3
|
79 |
+
|
80 |
+
windowed_image = _np_src_image * (1. - _get_masked_window_rgb(np_mask_grey))
|
81 |
+
windowed_image /= np.max(windowed_image)
|
82 |
+
windowed_image += np.average(_np_src_image) * np_mask_rgb # / (1.-np.average(np_mask_rgb)) # rather than leave the masked area black, we get better results from fft by filling the average unmasked color
|
83 |
+
|
84 |
+
src_fft = _fft2(windowed_image) # get feature statistics from masked src img
|
85 |
+
src_dist = np.absolute(src_fft)
|
86 |
+
src_phase = src_fft / src_dist
|
87 |
+
|
88 |
+
# create a generator with a static seed to make outpainting deterministic / only follow global seed
|
89 |
+
rng = np.random.default_rng(0)
|
90 |
+
|
91 |
+
noise_window = _get_gaussian_window(width, height, mode=1) # start with simple gaussian noise
|
92 |
+
noise_rgb = rng.random((width, height, num_channels))
|
93 |
+
noise_grey = (np.sum(noise_rgb, axis=2) / 3.)
|
94 |
+
noise_rgb *= color_variation # the colorfulness of the starting noise is blended to greyscale with a parameter
|
95 |
+
for c in range(num_channels):
|
96 |
+
noise_rgb[:, :, c] += (1. - color_variation) * noise_grey
|
97 |
+
|
98 |
+
noise_fft = _fft2(noise_rgb)
|
99 |
+
for c in range(num_channels):
|
100 |
+
noise_fft[:, :, c] *= noise_window
|
101 |
+
noise_rgb = np.real(_ifft2(noise_fft))
|
102 |
+
shaped_noise_fft = _fft2(noise_rgb)
|
103 |
+
shaped_noise_fft[:, :, :] = np.absolute(shaped_noise_fft[:, :, :]) ** 2 * (src_dist ** noise_q) * src_phase # perform the actual shaping
|
104 |
+
|
105 |
+
brightness_variation = 0. # color_variation # todo: temporarily tieing brightness variation to color variation for now
|
106 |
+
contrast_adjusted_np_src = _np_src_image[:] * (brightness_variation + 1.) - brightness_variation * 2.
|
107 |
+
|
108 |
+
# scikit-image is used for histogram matching, very convenient!
|
109 |
+
shaped_noise = np.real(_ifft2(shaped_noise_fft))
|
110 |
+
shaped_noise -= np.min(shaped_noise)
|
111 |
+
shaped_noise /= np.max(shaped_noise)
|
112 |
+
shaped_noise[img_mask, :] = skimage.exposure.match_histograms(shaped_noise[img_mask, :] ** 1., contrast_adjusted_np_src[ref_mask, :], channel_axis=1)
|
113 |
+
shaped_noise = _np_src_image[:] * (1. - np_mask_rgb) + shaped_noise * np_mask_rgb
|
114 |
+
|
115 |
+
matched_noise = shaped_noise[:]
|
116 |
+
|
117 |
+
return np.clip(matched_noise, 0., 1.)
|
118 |
+
|
119 |
+
|
120 |
+
|
121 |
+
class Script(scripts.Script):
|
122 |
+
def title(self):
|
123 |
+
return "Outpainting mk2"
|
124 |
+
|
125 |
+
def show(self, is_img2img):
|
126 |
+
return is_img2img
|
127 |
+
|
128 |
+
def ui(self, is_img2img):
|
129 |
+
if not is_img2img:
|
130 |
+
return None
|
131 |
+
|
132 |
+
info = gr.HTML("<p style=\"margin-bottom:0.75em\">Recommended settings: Sampling Steps: 80-100, Sampler: Euler a, Denoising strength: 0.8</p>")
|
133 |
+
|
134 |
+
pixels = gr.Slider(label="Pixels to expand", minimum=8, maximum=256, step=8, value=128)
|
135 |
+
mask_blur = gr.Slider(label='Mask blur', minimum=0, maximum=64, step=1, value=8, visible=False)
|
136 |
+
direction = gr.CheckboxGroup(label="Outpainting direction", choices=['left', 'right', 'up', 'down'], value=['left', 'right', 'up', 'down'])
|
137 |
+
noise_q = gr.Slider(label="Fall-off exponent (lower=higher detail)", minimum=0.0, maximum=4.0, step=0.01, value=1.0)
|
138 |
+
color_variation = gr.Slider(label="Color variation", minimum=0.0, maximum=1.0, step=0.01, value=0.05)
|
139 |
+
|
140 |
+
return [info, pixels, mask_blur, direction, noise_q, color_variation]
|
141 |
+
|
142 |
+
def run(self, p, _, pixels, mask_blur, direction, noise_q, color_variation):
|
143 |
+
initial_seed_and_info = [None, None]
|
144 |
+
|
145 |
+
process_width = p.width
|
146 |
+
process_height = p.height
|
147 |
+
|
148 |
+
p.mask_blur = mask_blur*4
|
149 |
+
p.inpaint_full_res = False
|
150 |
+
p.inpainting_fill = 1
|
151 |
+
p.do_not_save_samples = True
|
152 |
+
p.do_not_save_grid = True
|
153 |
+
|
154 |
+
left = pixels if "left" in direction else 0
|
155 |
+
right = pixels if "right" in direction else 0
|
156 |
+
up = pixels if "up" in direction else 0
|
157 |
+
down = pixels if "down" in direction else 0
|
158 |
+
|
159 |
+
init_img = p.init_images[0]
|
160 |
+
target_w = math.ceil((init_img.width + left + right) / 64) * 64
|
161 |
+
target_h = math.ceil((init_img.height + up + down) / 64) * 64
|
162 |
+
|
163 |
+
if left > 0:
|
164 |
+
left = left * (target_w - init_img.width) // (left + right)
|
165 |
+
|
166 |
+
if right > 0:
|
167 |
+
right = target_w - init_img.width - left
|
168 |
+
|
169 |
+
if up > 0:
|
170 |
+
up = up * (target_h - init_img.height) // (up + down)
|
171 |
+
|
172 |
+
if down > 0:
|
173 |
+
down = target_h - init_img.height - up
|
174 |
+
|
175 |
+
init_image = p.init_images[0]
|
176 |
+
|
177 |
+
state.job_count = (1 if left > 0 else 0) + (1 if right > 0 else 0) + (1 if up > 0 else 0) + (1 if down > 0 else 0)
|
178 |
+
|
179 |
+
def expand(init, expand_pixels, is_left=False, is_right=False, is_top=False, is_bottom=False):
|
180 |
+
is_horiz = is_left or is_right
|
181 |
+
is_vert = is_top or is_bottom
|
182 |
+
pixels_horiz = expand_pixels if is_horiz else 0
|
183 |
+
pixels_vert = expand_pixels if is_vert else 0
|
184 |
+
|
185 |
+
res_w = init.width + pixels_horiz
|
186 |
+
res_h = init.height + pixels_vert
|
187 |
+
process_res_w = math.ceil(res_w / 64) * 64
|
188 |
+
process_res_h = math.ceil(res_h / 64) * 64
|
189 |
+
|
190 |
+
img = Image.new("RGB", (process_res_w, process_res_h))
|
191 |
+
img.paste(init, (pixels_horiz if is_left else 0, pixels_vert if is_top else 0))
|
192 |
+
mask = Image.new("RGB", (process_res_w, process_res_h), "white")
|
193 |
+
draw = ImageDraw.Draw(mask)
|
194 |
+
draw.rectangle((
|
195 |
+
expand_pixels + mask_blur if is_left else 0,
|
196 |
+
expand_pixels + mask_blur if is_top else 0,
|
197 |
+
mask.width - expand_pixels - mask_blur if is_right else res_w,
|
198 |
+
mask.height - expand_pixels - mask_blur if is_bottom else res_h,
|
199 |
+
), fill="black")
|
200 |
+
|
201 |
+
np_image = (np.asarray(img) / 255.0).astype(np.float64)
|
202 |
+
np_mask = (np.asarray(mask) / 255.0).astype(np.float64)
|
203 |
+
noised = get_matched_noise(np_image, np_mask, noise_q, color_variation)
|
204 |
+
out = Image.fromarray(np.clip(noised * 255., 0., 255.).astype(np.uint8), mode="RGB")
|
205 |
+
|
206 |
+
target_width = min(process_width, init.width + pixels_horiz) if is_horiz else img.width
|
207 |
+
target_height = min(process_height, init.height + pixels_vert) if is_vert else img.height
|
208 |
+
|
209 |
+
crop_region = (
|
210 |
+
0 if is_left else out.width - target_width,
|
211 |
+
0 if is_top else out.height - target_height,
|
212 |
+
target_width if is_left else out.width,
|
213 |
+
target_height if is_top else out.height,
|
214 |
+
)
|
215 |
+
|
216 |
+
image_to_process = out.crop(crop_region)
|
217 |
+
mask = mask.crop(crop_region)
|
218 |
+
|
219 |
+
p.width = target_width if is_horiz else img.width
|
220 |
+
p.height = target_height if is_vert else img.height
|
221 |
+
p.init_images = [image_to_process]
|
222 |
+
p.image_mask = mask
|
223 |
+
|
224 |
+
latent_mask = Image.new("RGB", (p.width, p.height), "white")
|
225 |
+
draw = ImageDraw.Draw(latent_mask)
|
226 |
+
draw.rectangle((
|
227 |
+
expand_pixels + mask_blur * 2 if is_left else 0,
|
228 |
+
expand_pixels + mask_blur * 2 if is_top else 0,
|
229 |
+
mask.width - expand_pixels - mask_blur * 2 if is_right else res_w,
|
230 |
+
mask.height - expand_pixels - mask_blur * 2 if is_bottom else res_h,
|
231 |
+
), fill="black")
|
232 |
+
p.latent_mask = latent_mask
|
233 |
+
|
234 |
+
proc = process_images(p)
|
235 |
+
proc_img = proc.images[0]
|
236 |
+
|
237 |
+
if initial_seed_and_info[0] is None:
|
238 |
+
initial_seed_and_info[0] = proc.seed
|
239 |
+
initial_seed_and_info[1] = proc.info
|
240 |
+
|
241 |
+
out.paste(proc_img, (0 if is_left else out.width - proc_img.width, 0 if is_top else out.height - proc_img.height))
|
242 |
+
out = out.crop((0, 0, res_w, res_h))
|
243 |
+
return out
|
244 |
+
|
245 |
+
img = init_image
|
246 |
+
|
247 |
+
if left > 0:
|
248 |
+
img = expand(img, left, is_left=True)
|
249 |
+
if right > 0:
|
250 |
+
img = expand(img, right, is_right=True)
|
251 |
+
if up > 0:
|
252 |
+
img = expand(img, up, is_top=True)
|
253 |
+
if down > 0:
|
254 |
+
img = expand(img, down, is_bottom=True)
|
255 |
+
|
256 |
+
res = Processed(p, [img], initial_seed_and_info[0], initial_seed_and_info[1])
|
257 |
+
|
258 |
+
if opts.samples_save:
|
259 |
+
images.save_image(img, p.outpath_samples, "", res.seed, p.prompt, opts.grid_format, info=res.info, p=p)
|
260 |
+
|
261 |
+
return res
|
262 |
+
|
scripts/poor_mans_outpainting.py
ADDED
@@ -0,0 +1,147 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import math
|
2 |
+
|
3 |
+
import modules.scripts as scripts
|
4 |
+
import gradio as gr
|
5 |
+
from PIL import Image, ImageDraw
|
6 |
+
|
7 |
+
from modules import images, processing, devices
|
8 |
+
from modules.processing import Processed, process_images
|
9 |
+
from modules.shared import opts, cmd_opts, state
|
10 |
+
|
11 |
+
|
12 |
+
|
13 |
+
class Script(scripts.Script):
|
14 |
+
def title(self):
|
15 |
+
return "Poor man's outpainting"
|
16 |
+
|
17 |
+
def show(self, is_img2img):
|
18 |
+
return is_img2img
|
19 |
+
|
20 |
+
def ui(self, is_img2img):
|
21 |
+
if not is_img2img:
|
22 |
+
return None
|
23 |
+
|
24 |
+
pixels = gr.Slider(label="Pixels to expand", minimum=8, maximum=256, step=8, value=128)
|
25 |
+
mask_blur = gr.Slider(label='Mask blur', minimum=0, maximum=64, step=1, value=4, visible=False)
|
26 |
+
inpainting_fill = gr.Radio(label='Masked content', choices=['fill', 'original', 'latent noise', 'latent nothing'], value='fill', type="index", visible=False)
|
27 |
+
direction = gr.CheckboxGroup(label="Outpainting direction", choices=['left', 'right', 'up', 'down'], value=['left', 'right', 'up', 'down'])
|
28 |
+
|
29 |
+
return [pixels, mask_blur, inpainting_fill, direction]
|
30 |
+
|
31 |
+
def run(self, p, pixels, mask_blur, inpainting_fill, direction):
|
32 |
+
initial_seed = None
|
33 |
+
initial_info = None
|
34 |
+
|
35 |
+
p.mask_blur = mask_blur * 2
|
36 |
+
p.inpainting_fill = inpainting_fill
|
37 |
+
p.inpaint_full_res = False
|
38 |
+
|
39 |
+
left = pixels if "left" in direction else 0
|
40 |
+
right = pixels if "right" in direction else 0
|
41 |
+
up = pixels if "up" in direction else 0
|
42 |
+
down = pixels if "down" in direction else 0
|
43 |
+
|
44 |
+
init_img = p.init_images[0]
|
45 |
+
target_w = math.ceil((init_img.width + left + right) / 64) * 64
|
46 |
+
target_h = math.ceil((init_img.height + up + down) / 64) * 64
|
47 |
+
|
48 |
+
if left > 0:
|
49 |
+
left = left * (target_w - init_img.width) // (left + right)
|
50 |
+
if right > 0:
|
51 |
+
right = target_w - init_img.width - left
|
52 |
+
|
53 |
+
if up > 0:
|
54 |
+
up = up * (target_h - init_img.height) // (up + down)
|
55 |
+
|
56 |
+
if down > 0:
|
57 |
+
down = target_h - init_img.height - up
|
58 |
+
|
59 |
+
img = Image.new("RGB", (target_w, target_h))
|
60 |
+
img.paste(init_img, (left, up))
|
61 |
+
|
62 |
+
mask = Image.new("L", (img.width, img.height), "white")
|
63 |
+
draw = ImageDraw.Draw(mask)
|
64 |
+
draw.rectangle((
|
65 |
+
left + (mask_blur * 2 if left > 0 else 0),
|
66 |
+
up + (mask_blur * 2 if up > 0 else 0),
|
67 |
+
mask.width - right - (mask_blur * 2 if right > 0 else 0),
|
68 |
+
mask.height - down - (mask_blur * 2 if down > 0 else 0)
|
69 |
+
), fill="black")
|
70 |
+
|
71 |
+
latent_mask = Image.new("L", (img.width, img.height), "white")
|
72 |
+
latent_draw = ImageDraw.Draw(latent_mask)
|
73 |
+
latent_draw.rectangle((
|
74 |
+
left + (mask_blur//2 if left > 0 else 0),
|
75 |
+
up + (mask_blur//2 if up > 0 else 0),
|
76 |
+
mask.width - right - (mask_blur//2 if right > 0 else 0),
|
77 |
+
mask.height - down - (mask_blur//2 if down > 0 else 0)
|
78 |
+
), fill="black")
|
79 |
+
|
80 |
+
devices.torch_gc()
|
81 |
+
|
82 |
+
grid = images.split_grid(img, tile_w=p.width, tile_h=p.height, overlap=pixels)
|
83 |
+
grid_mask = images.split_grid(mask, tile_w=p.width, tile_h=p.height, overlap=pixels)
|
84 |
+
grid_latent_mask = images.split_grid(latent_mask, tile_w=p.width, tile_h=p.height, overlap=pixels)
|
85 |
+
|
86 |
+
p.n_iter = 1
|
87 |
+
p.batch_size = 1
|
88 |
+
p.do_not_save_grid = True
|
89 |
+
p.do_not_save_samples = True
|
90 |
+
|
91 |
+
work = []
|
92 |
+
work_mask = []
|
93 |
+
work_latent_mask = []
|
94 |
+
work_results = []
|
95 |
+
|
96 |
+
for (y, h, row), (_, _, row_mask), (_, _, row_latent_mask) in zip(grid.tiles, grid_mask.tiles, grid_latent_mask.tiles):
|
97 |
+
for tiledata, tiledata_mask, tiledata_latent_mask in zip(row, row_mask, row_latent_mask):
|
98 |
+
x, w = tiledata[0:2]
|
99 |
+
|
100 |
+
if x >= left and x+w <= img.width - right and y >= up and y+h <= img.height - down:
|
101 |
+
continue
|
102 |
+
|
103 |
+
work.append(tiledata[2])
|
104 |
+
work_mask.append(tiledata_mask[2])
|
105 |
+
work_latent_mask.append(tiledata_latent_mask[2])
|
106 |
+
|
107 |
+
batch_count = len(work)
|
108 |
+
print(f"Poor man's outpainting will process a total of {len(work)} images tiled as {len(grid.tiles[0][2])}x{len(grid.tiles)}.")
|
109 |
+
|
110 |
+
state.job_count = batch_count
|
111 |
+
|
112 |
+
for i in range(batch_count):
|
113 |
+
p.init_images = [work[i]]
|
114 |
+
p.image_mask = work_mask[i]
|
115 |
+
p.latent_mask = work_latent_mask[i]
|
116 |
+
|
117 |
+
state.job = f"Batch {i + 1} out of {batch_count}"
|
118 |
+
processed = process_images(p)
|
119 |
+
|
120 |
+
if initial_seed is None:
|
121 |
+
initial_seed = processed.seed
|
122 |
+
initial_info = processed.info
|
123 |
+
|
124 |
+
p.seed = processed.seed + 1
|
125 |
+
work_results += processed.images
|
126 |
+
|
127 |
+
|
128 |
+
image_index = 0
|
129 |
+
for y, h, row in grid.tiles:
|
130 |
+
for tiledata in row:
|
131 |
+
x, w = tiledata[0:2]
|
132 |
+
|
133 |
+
if x >= left and x+w <= img.width - right and y >= up and y+h <= img.height - down:
|
134 |
+
continue
|
135 |
+
|
136 |
+
tiledata[2] = work_results[image_index] if image_index < len(work_results) else Image.new("RGB", (p.width, p.height))
|
137 |
+
image_index += 1
|
138 |
+
|
139 |
+
combined_image = images.combine_grid(grid)
|
140 |
+
|
141 |
+
if opts.samples_save:
|
142 |
+
images.save_image(combined_image, p.outpath_samples, "", initial_seed, p.prompt, opts.grid_format, info=initial_info, p=p)
|
143 |
+
|
144 |
+
processed = Processed(p, [combined_image], initial_seed, initial_info)
|
145 |
+
|
146 |
+
return processed
|
147 |
+
|
scripts/prompt_matrix.py
ADDED
@@ -0,0 +1,87 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import math
|
2 |
+
from collections import namedtuple
|
3 |
+
from copy import copy
|
4 |
+
import random
|
5 |
+
|
6 |
+
import modules.scripts as scripts
|
7 |
+
import gradio as gr
|
8 |
+
|
9 |
+
from modules import images
|
10 |
+
from modules.processing import process_images, Processed
|
11 |
+
from modules.shared import opts, cmd_opts, state
|
12 |
+
import modules.sd_samplers
|
13 |
+
|
14 |
+
|
15 |
+
def draw_xy_grid(xs, ys, x_label, y_label, cell):
|
16 |
+
res = []
|
17 |
+
|
18 |
+
ver_texts = [[images.GridAnnotation(y_label(y))] for y in ys]
|
19 |
+
hor_texts = [[images.GridAnnotation(x_label(x))] for x in xs]
|
20 |
+
|
21 |
+
first_pocessed = None
|
22 |
+
|
23 |
+
state.job_count = len(xs) * len(ys)
|
24 |
+
|
25 |
+
for iy, y in enumerate(ys):
|
26 |
+
for ix, x in enumerate(xs):
|
27 |
+
state.job = f"{ix + iy * len(xs) + 1} out of {len(xs) * len(ys)}"
|
28 |
+
|
29 |
+
processed = cell(x, y)
|
30 |
+
if first_pocessed is None:
|
31 |
+
first_pocessed = processed
|
32 |
+
|
33 |
+
res.append(processed.images[0])
|
34 |
+
|
35 |
+
grid = images.image_grid(res, rows=len(ys))
|
36 |
+
grid = images.draw_grid_annotations(grid, res[0].width, res[0].height, hor_texts, ver_texts)
|
37 |
+
|
38 |
+
first_pocessed.images = [grid]
|
39 |
+
|
40 |
+
return first_pocessed
|
41 |
+
|
42 |
+
|
43 |
+
class Script(scripts.Script):
|
44 |
+
def title(self):
|
45 |
+
return "Prompt matrix"
|
46 |
+
|
47 |
+
def ui(self, is_img2img):
|
48 |
+
put_at_start = gr.Checkbox(label='Put variable parts at start of prompt', value=False)
|
49 |
+
|
50 |
+
return [put_at_start]
|
51 |
+
|
52 |
+
def run(self, p, put_at_start):
|
53 |
+
modules.processing.fix_seed(p)
|
54 |
+
|
55 |
+
original_prompt = p.prompt[0] if type(p.prompt) == list else p.prompt
|
56 |
+
|
57 |
+
all_prompts = []
|
58 |
+
prompt_matrix_parts = original_prompt.split("|")
|
59 |
+
combination_count = 2 ** (len(prompt_matrix_parts) - 1)
|
60 |
+
for combination_num in range(combination_count):
|
61 |
+
selected_prompts = [text.strip().strip(',') for n, text in enumerate(prompt_matrix_parts[1:]) if combination_num & (1 << n)]
|
62 |
+
|
63 |
+
if put_at_start:
|
64 |
+
selected_prompts = selected_prompts + [prompt_matrix_parts[0]]
|
65 |
+
else:
|
66 |
+
selected_prompts = [prompt_matrix_parts[0]] + selected_prompts
|
67 |
+
|
68 |
+
all_prompts.append(", ".join(selected_prompts))
|
69 |
+
|
70 |
+
p.n_iter = math.ceil(len(all_prompts) / p.batch_size)
|
71 |
+
p.do_not_save_grid = True
|
72 |
+
|
73 |
+
print(f"Prompt matrix will create {len(all_prompts)} images using a total of {p.n_iter} batches.")
|
74 |
+
|
75 |
+
p.prompt = all_prompts
|
76 |
+
p.seed = [p.seed for _ in all_prompts]
|
77 |
+
p.prompt_for_display = original_prompt
|
78 |
+
processed = process_images(p)
|
79 |
+
|
80 |
+
grid = images.image_grid(processed.images, p.batch_size, rows=1 << ((len(prompt_matrix_parts) - 1) // 2))
|
81 |
+
grid = images.draw_prompt_matrix(grid, p.width, p.height, prompt_matrix_parts)
|
82 |
+
processed.images.insert(0, grid)
|
83 |
+
|
84 |
+
if opts.grid_save:
|
85 |
+
images.save_image(processed.images[0], p.outpath_grids, "prompt_matrix", prompt=original_prompt, seed=processed.seed, grid=True, p=p)
|
86 |
+
|
87 |
+
return processed
|
scripts/prompts_from_file.py
ADDED
@@ -0,0 +1,55 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import math
|
2 |
+
import os
|
3 |
+
import sys
|
4 |
+
import traceback
|
5 |
+
|
6 |
+
import modules.scripts as scripts
|
7 |
+
import gradio as gr
|
8 |
+
|
9 |
+
from modules.processing import Processed, process_images
|
10 |
+
from PIL import Image
|
11 |
+
from modules.shared import opts, cmd_opts, state
|
12 |
+
|
13 |
+
|
14 |
+
class Script(scripts.Script):
|
15 |
+
def title(self):
|
16 |
+
return "Prompts from file or textbox"
|
17 |
+
|
18 |
+
def ui(self, is_img2img):
|
19 |
+
# This checkbox would look nicer as two tabs, but there are two problems:
|
20 |
+
# 1) There is a bug in Gradio 3.3 that prevents visibility from working on Tabs
|
21 |
+
# 2) Even with Gradio 3.3.1, returning a control (like Tabs) that can't be used as input
|
22 |
+
# causes a AttributeError: 'Tabs' object has no attribute 'preprocess' assert,
|
23 |
+
# due to the way Script assumes all controls returned can be used as inputs.
|
24 |
+
# Therefore, there's no good way to use grouping components right now,
|
25 |
+
# so we will use a checkbox! :)
|
26 |
+
checkbox_txt = gr.Checkbox(label="Show Textbox", value=False)
|
27 |
+
file = gr.File(label="File with inputs", type='bytes')
|
28 |
+
prompt_txt = gr.TextArea(label="Prompts")
|
29 |
+
checkbox_txt.change(fn=lambda x: [gr.File.update(visible = not x), gr.TextArea.update(visible = x)], inputs=[checkbox_txt], outputs=[file, prompt_txt])
|
30 |
+
return [checkbox_txt, file, prompt_txt]
|
31 |
+
|
32 |
+
def run(self, p, checkbox_txt, data: bytes, prompt_txt: str):
|
33 |
+
if (checkbox_txt):
|
34 |
+
lines = [x.strip() for x in prompt_txt.splitlines()]
|
35 |
+
else:
|
36 |
+
lines = [x.strip() for x in data.decode('utf8', errors='ignore').split("\n")]
|
37 |
+
lines = [x for x in lines if len(x) > 0]
|
38 |
+
|
39 |
+
img_count = len(lines) * p.n_iter
|
40 |
+
batch_count = math.ceil(img_count / p.batch_size)
|
41 |
+
loop_count = math.ceil(batch_count / p.n_iter)
|
42 |
+
print(f"Will process {img_count} images in {batch_count} batches.")
|
43 |
+
|
44 |
+
p.do_not_save_grid = True
|
45 |
+
|
46 |
+
state.job_count = batch_count
|
47 |
+
|
48 |
+
images = []
|
49 |
+
for loop_no in range(loop_count):
|
50 |
+
state.job = f"{loop_no + 1} out of {loop_count}"
|
51 |
+
p.prompt = lines[loop_no*p.batch_size:(loop_no+1)*p.batch_size] * p.n_iter
|
52 |
+
proc = process_images(p)
|
53 |
+
images += proc.images
|
54 |
+
|
55 |
+
return Processed(p, images, p.seed, "")
|
scripts/sd_upscale.py
ADDED
@@ -0,0 +1,97 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import math
|
2 |
+
|
3 |
+
import modules.scripts as scripts
|
4 |
+
import gradio as gr
|
5 |
+
from PIL import Image
|
6 |
+
|
7 |
+
from modules import processing, shared, sd_samplers, images, devices
|
8 |
+
from modules.processing import Processed
|
9 |
+
from modules.shared import opts, cmd_opts, state
|
10 |
+
|
11 |
+
|
12 |
+
class Script(scripts.Script):
|
13 |
+
def title(self):
|
14 |
+
return "SD upscale"
|
15 |
+
|
16 |
+
def show(self, is_img2img):
|
17 |
+
return is_img2img
|
18 |
+
|
19 |
+
def ui(self, is_img2img):
|
20 |
+
info = gr.HTML("<p style=\"margin-bottom:0.75em\">Will upscale the image to twice the dimensions; use width and height sliders to set tile size</p>")
|
21 |
+
overlap = gr.Slider(minimum=0, maximum=256, step=16, label='Tile overlap', value=64, visible=False)
|
22 |
+
upscaler_index = gr.Radio(label='Upscaler', choices=[x.name for x in shared.sd_upscalers], value=shared.sd_upscalers[0].name, type="index", visible=False)
|
23 |
+
|
24 |
+
return [info, overlap, upscaler_index]
|
25 |
+
|
26 |
+
def run(self, p, _, overlap, upscaler_index):
|
27 |
+
processing.fix_seed(p)
|
28 |
+
upscaler = shared.sd_upscalers[upscaler_index]
|
29 |
+
|
30 |
+
p.extra_generation_params["SD upscale overlap"] = overlap
|
31 |
+
p.extra_generation_params["SD upscale upscaler"] = upscaler.name
|
32 |
+
|
33 |
+
initial_info = None
|
34 |
+
seed = p.seed
|
35 |
+
|
36 |
+
init_img = p.init_images[0]
|
37 |
+
|
38 |
+
if(upscaler.name != "None"):
|
39 |
+
img = upscaler.scaler.upscale(init_img, 2, upscaler.data_path)
|
40 |
+
else:
|
41 |
+
img = init_img
|
42 |
+
|
43 |
+
devices.torch_gc()
|
44 |
+
|
45 |
+
grid = images.split_grid(img, tile_w=p.width, tile_h=p.height, overlap=overlap)
|
46 |
+
|
47 |
+
batch_size = p.batch_size
|
48 |
+
upscale_count = p.n_iter
|
49 |
+
p.n_iter = 1
|
50 |
+
p.do_not_save_grid = True
|
51 |
+
p.do_not_save_samples = True
|
52 |
+
|
53 |
+
work = []
|
54 |
+
|
55 |
+
for y, h, row in grid.tiles:
|
56 |
+
for tiledata in row:
|
57 |
+
work.append(tiledata[2])
|
58 |
+
|
59 |
+
batch_count = math.ceil(len(work) / batch_size)
|
60 |
+
state.job_count = batch_count * upscale_count
|
61 |
+
|
62 |
+
print(f"SD upscaling will process a total of {len(work)} images tiled as {len(grid.tiles[0][2])}x{len(grid.tiles)} per upscale in a total of {state.job_count} batches.")
|
63 |
+
|
64 |
+
result_images = []
|
65 |
+
for n in range(upscale_count):
|
66 |
+
start_seed = seed + n
|
67 |
+
p.seed = start_seed
|
68 |
+
|
69 |
+
work_results = []
|
70 |
+
for i in range(batch_count):
|
71 |
+
p.batch_size = batch_size
|
72 |
+
p.init_images = work[i*batch_size:(i+1)*batch_size]
|
73 |
+
|
74 |
+
state.job = f"Batch {i + 1 + n * batch_count} out of {state.job_count}"
|
75 |
+
processed = processing.process_images(p)
|
76 |
+
|
77 |
+
if initial_info is None:
|
78 |
+
initial_info = processed.info
|
79 |
+
|
80 |
+
p.seed = processed.seed + 1
|
81 |
+
work_results += processed.images
|
82 |
+
|
83 |
+
image_index = 0
|
84 |
+
for y, h, row in grid.tiles:
|
85 |
+
for tiledata in row:
|
86 |
+
tiledata[2] = work_results[image_index] if image_index < len(work_results) else Image.new("RGB", (p.width, p.height))
|
87 |
+
image_index += 1
|
88 |
+
|
89 |
+
combined_image = images.combine_grid(grid)
|
90 |
+
result_images.append(combined_image)
|
91 |
+
|
92 |
+
if opts.samples_save:
|
93 |
+
images.save_image(combined_image, p.outpath_samples, "", start_seed, p.prompt, opts.samples_format, info=initial_info, p=p)
|
94 |
+
|
95 |
+
processed = Processed(p, result_images, seed, initial_info)
|
96 |
+
|
97 |
+
return processed
|
scripts/xy_grid.py
ADDED
@@ -0,0 +1,313 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from collections import namedtuple
|
2 |
+
from copy import copy
|
3 |
+
from itertools import permutations, chain
|
4 |
+
import random
|
5 |
+
import csv
|
6 |
+
from io import StringIO
|
7 |
+
from PIL import Image
|
8 |
+
import numpy as np
|
9 |
+
|
10 |
+
import modules.scripts as scripts
|
11 |
+
import gradio as gr
|
12 |
+
|
13 |
+
from modules import images
|
14 |
+
from modules.processing import process_images, Processed
|
15 |
+
from modules.shared import opts, cmd_opts, state
|
16 |
+
import modules.shared as shared
|
17 |
+
import modules.sd_samplers
|
18 |
+
import modules.sd_models
|
19 |
+
import re
|
20 |
+
|
21 |
+
|
22 |
+
def apply_field(field):
|
23 |
+
def fun(p, x, xs):
|
24 |
+
setattr(p, field, x)
|
25 |
+
|
26 |
+
return fun
|
27 |
+
|
28 |
+
|
29 |
+
def apply_prompt(p, x, xs):
|
30 |
+
p.prompt = p.prompt.replace(xs[0], x)
|
31 |
+
p.negative_prompt = p.negative_prompt.replace(xs[0], x)
|
32 |
+
|
33 |
+
|
34 |
+
def apply_order(p, x, xs):
|
35 |
+
token_order = []
|
36 |
+
|
37 |
+
# Initally grab the tokens from the prompt, so they can be replaced in order of earliest seen
|
38 |
+
for token in x:
|
39 |
+
token_order.append((p.prompt.find(token), token))
|
40 |
+
|
41 |
+
token_order.sort(key=lambda t: t[0])
|
42 |
+
|
43 |
+
prompt_parts = []
|
44 |
+
|
45 |
+
# Split the prompt up, taking out the tokens
|
46 |
+
for _, token in token_order:
|
47 |
+
n = p.prompt.find(token)
|
48 |
+
prompt_parts.append(p.prompt[0:n])
|
49 |
+
p.prompt = p.prompt[n + len(token):]
|
50 |
+
|
51 |
+
# Rebuild the prompt with the tokens in the order we want
|
52 |
+
prompt_tmp = ""
|
53 |
+
for idx, part in enumerate(prompt_parts):
|
54 |
+
prompt_tmp += part
|
55 |
+
prompt_tmp += x[idx]
|
56 |
+
p.prompt = prompt_tmp + p.prompt
|
57 |
+
|
58 |
+
|
59 |
+
samplers_dict = {}
|
60 |
+
for i, sampler in enumerate(modules.sd_samplers.samplers):
|
61 |
+
samplers_dict[sampler.name.lower()] = i
|
62 |
+
for alias in sampler.aliases:
|
63 |
+
samplers_dict[alias.lower()] = i
|
64 |
+
|
65 |
+
|
66 |
+
def apply_sampler(p, x, xs):
|
67 |
+
sampler_index = samplers_dict.get(x.lower(), None)
|
68 |
+
if sampler_index is None:
|
69 |
+
raise RuntimeError(f"Unknown sampler: {x}")
|
70 |
+
|
71 |
+
p.sampler_index = sampler_index
|
72 |
+
|
73 |
+
|
74 |
+
def apply_checkpoint(p, x, xs):
|
75 |
+
info = modules.sd_models.get_closet_checkpoint_match(x)
|
76 |
+
assert info is not None, f'Checkpoint for {x} not found'
|
77 |
+
modules.sd_models.reload_model_weights(shared.sd_model, info)
|
78 |
+
|
79 |
+
|
80 |
+
def apply_hypernetwork(p, x, xs):
|
81 |
+
hn = shared.hypernetworks.get(x, None)
|
82 |
+
opts.data["sd_hypernetwork"] = hn.name if hn is not None else 'None'
|
83 |
+
|
84 |
+
|
85 |
+
def format_value_add_label(p, opt, x):
|
86 |
+
if type(x) == float:
|
87 |
+
x = round(x, 8)
|
88 |
+
|
89 |
+
return f"{opt.label}: {x}"
|
90 |
+
|
91 |
+
|
92 |
+
def format_value(p, opt, x):
|
93 |
+
if type(x) == float:
|
94 |
+
x = round(x, 8)
|
95 |
+
return x
|
96 |
+
|
97 |
+
|
98 |
+
def format_value_join_list(p, opt, x):
|
99 |
+
return ", ".join(x)
|
100 |
+
|
101 |
+
|
102 |
+
def do_nothing(p, x, xs):
|
103 |
+
pass
|
104 |
+
|
105 |
+
|
106 |
+
def format_nothing(p, opt, x):
|
107 |
+
return ""
|
108 |
+
|
109 |
+
|
110 |
+
def str_permutations(x):
|
111 |
+
"""dummy function for specifying it in AxisOption's type when you want to get a list of permutations"""
|
112 |
+
return x
|
113 |
+
|
114 |
+
|
115 |
+
AxisOption = namedtuple("AxisOption", ["label", "type", "apply", "format_value"])
|
116 |
+
AxisOptionImg2Img = namedtuple("AxisOptionImg2Img", ["label", "type", "apply", "format_value"])
|
117 |
+
|
118 |
+
|
119 |
+
axis_options = [
|
120 |
+
AxisOption("Nothing", str, do_nothing, format_nothing),
|
121 |
+
AxisOption("Seed", int, apply_field("seed"), format_value_add_label),
|
122 |
+
AxisOption("Var. seed", int, apply_field("subseed"), format_value_add_label),
|
123 |
+
AxisOption("Var. strength", float, apply_field("subseed_strength"), format_value_add_label),
|
124 |
+
AxisOption("Steps", int, apply_field("steps"), format_value_add_label),
|
125 |
+
AxisOption("CFG Scale", float, apply_field("cfg_scale"), format_value_add_label),
|
126 |
+
AxisOption("Prompt S/R", str, apply_prompt, format_value),
|
127 |
+
AxisOption("Prompt order", str_permutations, apply_order, format_value_join_list),
|
128 |
+
AxisOption("Sampler", str, apply_sampler, format_value),
|
129 |
+
AxisOption("Checkpoint name", str, apply_checkpoint, format_value),
|
130 |
+
AxisOption("Hypernetwork", str, apply_hypernetwork, format_value),
|
131 |
+
AxisOption("Sigma Churn", float, apply_field("s_churn"), format_value_add_label),
|
132 |
+
AxisOption("Sigma min", float, apply_field("s_tmin"), format_value_add_label),
|
133 |
+
AxisOption("Sigma max", float, apply_field("s_tmax"), format_value_add_label),
|
134 |
+
AxisOption("Sigma noise", float, apply_field("s_noise"), format_value_add_label),
|
135 |
+
AxisOption("Eta", float, apply_field("eta"), format_value_add_label),
|
136 |
+
AxisOptionImg2Img("Denoising", float, apply_field("denoising_strength"), format_value_add_label), # as it is now all AxisOptionImg2Img items must go after AxisOption ones
|
137 |
+
]
|
138 |
+
|
139 |
+
|
140 |
+
def draw_xy_grid(p, xs, ys, x_labels, y_labels, cell, draw_legend):
|
141 |
+
res = []
|
142 |
+
|
143 |
+
ver_texts = [[images.GridAnnotation(y)] for y in y_labels]
|
144 |
+
hor_texts = [[images.GridAnnotation(x)] for x in x_labels]
|
145 |
+
|
146 |
+
first_pocessed = None
|
147 |
+
|
148 |
+
state.job_count = len(xs) * len(ys) * p.n_iter
|
149 |
+
|
150 |
+
for iy, y in enumerate(ys):
|
151 |
+
for ix, x in enumerate(xs):
|
152 |
+
state.job = f"{ix + iy * len(xs) + 1} out of {len(xs) * len(ys)}"
|
153 |
+
|
154 |
+
processed = cell(x, y)
|
155 |
+
if first_pocessed is None:
|
156 |
+
first_pocessed = processed
|
157 |
+
|
158 |
+
try:
|
159 |
+
res.append(processed.images[0])
|
160 |
+
except:
|
161 |
+
res.append(Image.new(res[0].mode, res[0].size))
|
162 |
+
|
163 |
+
grid = images.image_grid(res, rows=len(ys))
|
164 |
+
if draw_legend:
|
165 |
+
grid = images.draw_grid_annotations(grid, res[0].width, res[0].height, hor_texts, ver_texts)
|
166 |
+
|
167 |
+
first_pocessed.images = [grid]
|
168 |
+
|
169 |
+
return first_pocessed
|
170 |
+
|
171 |
+
|
172 |
+
re_range = re.compile(r"\s*([+-]?\s*\d+)\s*-\s*([+-]?\s*\d+)(?:\s*\(([+-]\d+)\s*\))?\s*")
|
173 |
+
re_range_float = re.compile(r"\s*([+-]?\s*\d+(?:.\d*)?)\s*-\s*([+-]?\s*\d+(?:.\d*)?)(?:\s*\(([+-]\d+(?:.\d*)?)\s*\))?\s*")
|
174 |
+
|
175 |
+
re_range_count = re.compile(r"\s*([+-]?\s*\d+)\s*-\s*([+-]?\s*\d+)(?:\s*\[(\d+)\s*\])?\s*")
|
176 |
+
re_range_count_float = re.compile(r"\s*([+-]?\s*\d+(?:.\d*)?)\s*-\s*([+-]?\s*\d+(?:.\d*)?)(?:\s*\[(\d+(?:.\d*)?)\s*\])?\s*")
|
177 |
+
|
178 |
+
class Script(scripts.Script):
|
179 |
+
def title(self):
|
180 |
+
return "X/Y plot"
|
181 |
+
|
182 |
+
def ui(self, is_img2img):
|
183 |
+
current_axis_options = [x for x in axis_options if type(x) == AxisOption or type(x) == AxisOptionImg2Img and is_img2img]
|
184 |
+
|
185 |
+
with gr.Row():
|
186 |
+
x_type = gr.Dropdown(label="X type", choices=[x.label for x in current_axis_options], value=current_axis_options[1].label, visible=False, type="index", elem_id="x_type")
|
187 |
+
x_values = gr.Textbox(label="X values", visible=False, lines=1)
|
188 |
+
|
189 |
+
with gr.Row():
|
190 |
+
y_type = gr.Dropdown(label="Y type", choices=[x.label for x in current_axis_options], value=current_axis_options[4].label, visible=False, type="index", elem_id="y_type")
|
191 |
+
y_values = gr.Textbox(label="Y values", visible=False, lines=1)
|
192 |
+
|
193 |
+
draw_legend = gr.Checkbox(label='Draw legend', value=True)
|
194 |
+
no_fixed_seeds = gr.Checkbox(label='Keep -1 for seeds', value=False)
|
195 |
+
|
196 |
+
return [x_type, x_values, y_type, y_values, draw_legend, no_fixed_seeds]
|
197 |
+
|
198 |
+
def run(self, p, x_type, x_values, y_type, y_values, draw_legend, no_fixed_seeds):
|
199 |
+
modules.processing.fix_seed(p)
|
200 |
+
p.batch_size = 1
|
201 |
+
|
202 |
+
initial_hn = opts.sd_hypernetwork
|
203 |
+
|
204 |
+
def process_axis(opt, vals):
|
205 |
+
if opt.label == 'Nothing':
|
206 |
+
return [0]
|
207 |
+
|
208 |
+
valslist = [x.strip() for x in chain.from_iterable(csv.reader(StringIO(vals)))]
|
209 |
+
|
210 |
+
if opt.type == int:
|
211 |
+
valslist_ext = []
|
212 |
+
|
213 |
+
for val in valslist:
|
214 |
+
m = re_range.fullmatch(val)
|
215 |
+
mc = re_range_count.fullmatch(val)
|
216 |
+
if m is not None:
|
217 |
+
|
218 |
+
start = int(m.group(1))
|
219 |
+
end = int(m.group(2))+1
|
220 |
+
step = int(m.group(3)) if m.group(3) is not None else 1
|
221 |
+
|
222 |
+
valslist_ext += list(range(start, end, step))
|
223 |
+
elif mc is not None:
|
224 |
+
start = int(mc.group(1))
|
225 |
+
end = int(mc.group(2))
|
226 |
+
num = int(mc.group(3)) if mc.group(3) is not None else 1
|
227 |
+
|
228 |
+
valslist_ext += [int(x) for x in np.linspace(start=start, stop=end, num=num).tolist()]
|
229 |
+
else:
|
230 |
+
valslist_ext.append(val)
|
231 |
+
|
232 |
+
valslist = valslist_ext
|
233 |
+
elif opt.type == float:
|
234 |
+
valslist_ext = []
|
235 |
+
|
236 |
+
for val in valslist:
|
237 |
+
m = re_range_float.fullmatch(val)
|
238 |
+
mc = re_range_count_float.fullmatch(val)
|
239 |
+
if m is not None:
|
240 |
+
start = float(m.group(1))
|
241 |
+
end = float(m.group(2))
|
242 |
+
step = float(m.group(3)) if m.group(3) is not None else 1
|
243 |
+
|
244 |
+
valslist_ext += np.arange(start, end + step, step).tolist()
|
245 |
+
elif mc is not None:
|
246 |
+
start = float(mc.group(1))
|
247 |
+
end = float(mc.group(2))
|
248 |
+
num = int(mc.group(3)) if mc.group(3) is not None else 1
|
249 |
+
|
250 |
+
valslist_ext += np.linspace(start=start, stop=end, num=num).tolist()
|
251 |
+
else:
|
252 |
+
valslist_ext.append(val)
|
253 |
+
|
254 |
+
valslist = valslist_ext
|
255 |
+
elif opt.type == str_permutations:
|
256 |
+
valslist = list(permutations(valslist))
|
257 |
+
|
258 |
+
valslist = [opt.type(x) for x in valslist]
|
259 |
+
|
260 |
+
return valslist
|
261 |
+
|
262 |
+
x_opt = axis_options[x_type]
|
263 |
+
xs = process_axis(x_opt, x_values)
|
264 |
+
|
265 |
+
y_opt = axis_options[y_type]
|
266 |
+
ys = process_axis(y_opt, y_values)
|
267 |
+
|
268 |
+
def fix_axis_seeds(axis_opt, axis_list):
|
269 |
+
if axis_opt.label == 'Seed':
|
270 |
+
return [int(random.randrange(4294967294)) if val is None or val == '' or val == -1 else val for val in axis_list]
|
271 |
+
else:
|
272 |
+
return axis_list
|
273 |
+
|
274 |
+
if not no_fixed_seeds:
|
275 |
+
xs = fix_axis_seeds(x_opt, xs)
|
276 |
+
ys = fix_axis_seeds(y_opt, ys)
|
277 |
+
|
278 |
+
if x_opt.label == 'Steps':
|
279 |
+
total_steps = sum(xs) * len(ys)
|
280 |
+
elif y_opt.label == 'Steps':
|
281 |
+
total_steps = sum(ys) * len(xs)
|
282 |
+
else:
|
283 |
+
total_steps = p.steps * len(xs) * len(ys)
|
284 |
+
|
285 |
+
print(f"X/Y plot will create {len(xs) * len(ys) * p.n_iter} images on a {len(xs)}x{len(ys)} grid. (Total steps to process: {total_steps * p.n_iter})")
|
286 |
+
shared.total_tqdm.updateTotal(total_steps * p.n_iter)
|
287 |
+
|
288 |
+
def cell(x, y):
|
289 |
+
pc = copy(p)
|
290 |
+
x_opt.apply(pc, x, xs)
|
291 |
+
y_opt.apply(pc, y, ys)
|
292 |
+
|
293 |
+
return process_images(pc)
|
294 |
+
|
295 |
+
processed = draw_xy_grid(
|
296 |
+
p,
|
297 |
+
xs=xs,
|
298 |
+
ys=ys,
|
299 |
+
x_labels=[x_opt.format_value(p, x_opt, x) for x in xs],
|
300 |
+
y_labels=[y_opt.format_value(p, y_opt, y) for y in ys],
|
301 |
+
cell=cell,
|
302 |
+
draw_legend=draw_legend
|
303 |
+
)
|
304 |
+
|
305 |
+
if opts.grid_save:
|
306 |
+
images.save_image(processed.images[0], p.outpath_grids, "xy_grid", prompt=p.prompt, seed=processed.seed, grid=True, p=p)
|
307 |
+
|
308 |
+
# restore checkpoint in case it was changed by axes
|
309 |
+
modules.sd_models.reload_model_weights(shared.sd_model)
|
310 |
+
|
311 |
+
opts.data["sd_hypernetwork"] = initial_hn
|
312 |
+
|
313 |
+
return processed
|