Upload outpaint_region.py
Browse files- outpaint_region.py +290 -0
outpaint_region.py
ADDED
@@ -0,0 +1,290 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import math
|
2 |
+
|
3 |
+
import numpy as np
|
4 |
+
import skimage
|
5 |
+
|
6 |
+
import modules.scripts as scripts
|
7 |
+
import gradio as gr
|
8 |
+
from PIL import Image, ImageDraw
|
9 |
+
|
10 |
+
from modules import images, processing, devices
|
11 |
+
from modules.processing import Processed, process_images
|
12 |
+
from modules.shared import opts, cmd_opts, state
|
13 |
+
|
14 |
+
# this function is taken from https://github.com/parlance-zz/g-diffuser-bot
|
15 |
+
def get_matched_noise(_np_src_image, np_mask_rgb, noise_q=1, color_variation=0.05):
|
16 |
+
# helper fft routines that keep ortho normalization and auto-shift before and after fft
|
17 |
+
def _fft2(data):
|
18 |
+
if data.ndim > 2: # has channels
|
19 |
+
out_fft = np.zeros((data.shape[0], data.shape[1], data.shape[2]), dtype=np.complex128)
|
20 |
+
for c in range(data.shape[2]):
|
21 |
+
c_data = data[:, :, c]
|
22 |
+
out_fft[:, :, c] = np.fft.fft2(np.fft.fftshift(c_data), norm="ortho")
|
23 |
+
out_fft[:, :, c] = np.fft.ifftshift(out_fft[:, :, c])
|
24 |
+
else: # one channel
|
25 |
+
out_fft = np.zeros((data.shape[0], data.shape[1]), dtype=np.complex128)
|
26 |
+
out_fft[:, :] = np.fft.fft2(np.fft.fftshift(data), norm="ortho")
|
27 |
+
out_fft[:, :] = np.fft.ifftshift(out_fft[:, :])
|
28 |
+
|
29 |
+
return out_fft
|
30 |
+
|
31 |
+
def _ifft2(data):
|
32 |
+
if data.ndim > 2: # has channels
|
33 |
+
out_ifft = np.zeros((data.shape[0], data.shape[1], data.shape[2]), dtype=np.complex128)
|
34 |
+
for c in range(data.shape[2]):
|
35 |
+
c_data = data[:, :, c]
|
36 |
+
out_ifft[:, :, c] = np.fft.ifft2(np.fft.fftshift(c_data), norm="ortho")
|
37 |
+
out_ifft[:, :, c] = np.fft.ifftshift(out_ifft[:, :, c])
|
38 |
+
else: # one channel
|
39 |
+
out_ifft = np.zeros((data.shape[0], data.shape[1]), dtype=np.complex128)
|
40 |
+
out_ifft[:, :] = np.fft.ifft2(np.fft.fftshift(data), norm="ortho")
|
41 |
+
out_ifft[:, :] = np.fft.ifftshift(out_ifft[:, :])
|
42 |
+
|
43 |
+
return out_ifft
|
44 |
+
|
45 |
+
def _get_gaussian_window(width, height, std=3.14, mode=0):
|
46 |
+
window_scale_x = float(width / min(width, height))
|
47 |
+
window_scale_y = float(height / min(width, height))
|
48 |
+
|
49 |
+
window = np.zeros((width, height))
|
50 |
+
x = (np.arange(width) / width * 2. - 1.) * window_scale_x
|
51 |
+
for y in range(height):
|
52 |
+
fy = (y / height * 2. - 1.) * window_scale_y
|
53 |
+
if mode == 0:
|
54 |
+
window[:, y] = np.exp(-(x ** 2 + fy ** 2) * std)
|
55 |
+
else:
|
56 |
+
window[:, y] = (1 / ((x ** 2 + 1.) * (fy ** 2 + 1.))) ** (std / 3.14) # hey wait a minute that's not gaussian
|
57 |
+
|
58 |
+
return window
|
59 |
+
|
60 |
+
def _get_masked_window_rgb(np_mask_grey, hardness=1.):
|
61 |
+
np_mask_rgb = np.zeros((np_mask_grey.shape[0], np_mask_grey.shape[1], 3))
|
62 |
+
if hardness != 1.:
|
63 |
+
hardened = np_mask_grey[:] ** hardness
|
64 |
+
else:
|
65 |
+
hardened = np_mask_grey[:]
|
66 |
+
for c in range(3):
|
67 |
+
np_mask_rgb[:, :, c] = hardened[:]
|
68 |
+
return np_mask_rgb
|
69 |
+
|
70 |
+
width = _np_src_image.shape[0]
|
71 |
+
height = _np_src_image.shape[1]
|
72 |
+
num_channels = _np_src_image.shape[2]
|
73 |
+
|
74 |
+
np_src_image = _np_src_image[:] * (1. - np_mask_rgb)
|
75 |
+
np_mask_grey = (np.sum(np_mask_rgb, axis=2) / 3.)
|
76 |
+
img_mask = np_mask_grey > 1e-6
|
77 |
+
ref_mask = np_mask_grey < 1e-3
|
78 |
+
|
79 |
+
windowed_image = _np_src_image * (1. - _get_masked_window_rgb(np_mask_grey))
|
80 |
+
windowed_image /= np.max(windowed_image)
|
81 |
+
windowed_image += np.average(_np_src_image) * np_mask_rgb # / (1.-np.average(np_mask_rgb)) # rather than leave the masked area black, we get better results from fft by filling the average unmasked color
|
82 |
+
|
83 |
+
src_fft = _fft2(windowed_image) # get feature statistics from masked src img
|
84 |
+
src_dist = np.absolute(src_fft)
|
85 |
+
src_phase = src_fft / src_dist
|
86 |
+
|
87 |
+
# create a generator with a static seed to make outpainting deterministic / only follow global seed
|
88 |
+
rng = np.random.default_rng(0)
|
89 |
+
|
90 |
+
noise_window = _get_gaussian_window(width, height, mode=1) # start with simple gaussian noise
|
91 |
+
noise_rgb = rng.random((width, height, num_channels))
|
92 |
+
noise_grey = (np.sum(noise_rgb, axis=2) / 3.)
|
93 |
+
noise_rgb *= color_variation # the colorfulness of the starting noise is blended to greyscale with a parameter
|
94 |
+
for c in range(num_channels):
|
95 |
+
noise_rgb[:, :, c] += (1. - color_variation) * noise_grey
|
96 |
+
|
97 |
+
noise_fft = _fft2(noise_rgb)
|
98 |
+
for c in range(num_channels):
|
99 |
+
noise_fft[:, :, c] *= noise_window
|
100 |
+
noise_rgb = np.real(_ifft2(noise_fft))
|
101 |
+
shaped_noise_fft = _fft2(noise_rgb)
|
102 |
+
shaped_noise_fft[:, :, :] = np.absolute(shaped_noise_fft[:, :, :]) ** 2 * (src_dist ** noise_q) * src_phase # perform the actual shaping
|
103 |
+
|
104 |
+
brightness_variation = 0. # color_variation # todo: temporarily tieing brightness variation to color variation for now
|
105 |
+
contrast_adjusted_np_src = _np_src_image[:] * (brightness_variation + 1.) - brightness_variation * 2.
|
106 |
+
|
107 |
+
# scikit-image is used for histogram matching, very convenient!
|
108 |
+
shaped_noise = np.real(_ifft2(shaped_noise_fft))
|
109 |
+
shaped_noise -= np.min(shaped_noise)
|
110 |
+
shaped_noise /= np.max(shaped_noise)
|
111 |
+
shaped_noise[img_mask, :] = skimage.exposure.match_histograms(shaped_noise[img_mask, :] ** 1., contrast_adjusted_np_src[ref_mask, :], channel_axis=1)
|
112 |
+
shaped_noise = _np_src_image[:] * (1. - np_mask_rgb) + shaped_noise * np_mask_rgb
|
113 |
+
|
114 |
+
matched_noise = shaped_noise[:]
|
115 |
+
|
116 |
+
return np.clip(matched_noise, 0., 1.)
|
117 |
+
|
118 |
+
class Script(scripts.Script):
|
119 |
+
def title(self):
|
120 |
+
return "Outpaint Canvas Region"
|
121 |
+
|
122 |
+
def show(self, is_img2img):
|
123 |
+
return is_img2img
|
124 |
+
|
125 |
+
def ui(self, is_img2img):
|
126 |
+
if not is_img2img:
|
127 |
+
return None
|
128 |
+
|
129 |
+
canvasButton = gr.Button("Show/Hide Canvas")
|
130 |
+
leftcoord = gr.Slider(label="Left start coord", minimum=-400, maximum=2048, step=1, value=0, elem_id="leftCoord")
|
131 |
+
topcoord = gr.Slider(label="top start coord", minimum=-400, maximum=2048, step=1, value=0, elem_id="topCoord")
|
132 |
+
dummy = gr.Slider(label="unused", minimum=-1, maximum=1, step=1, value=0)
|
133 |
+
|
134 |
+
canvasButton.click(None, [], dummy, _js="(x) => { let grap = document.body.children[0];\
|
135 |
+
let tabDiv = grap.shadowRoot.getElementById('tab_img2img');\
|
136 |
+
let img2imgDiv = grap.shadowRoot.getElementById('img2img_image');\
|
137 |
+
let imgB64 = img2imgDiv.children[2].children[0].children[1].src;\
|
138 |
+
let canvDiv = grap.shadowRoot.getElementById('outDrawCanvasDiv');\
|
139 |
+
let canv = grap.shadowRoot.getElementById('outDrawCanvas');\
|
140 |
+
console.info('run',canvDiv);\
|
141 |
+
if (!canvDiv) {\
|
142 |
+
canvDiv = document.createElement('div');\
|
143 |
+
canvDiv.id = 'outDrawCanvasDiv';\
|
144 |
+
canv = document.createElement('canvas');\
|
145 |
+
canv.id = 'outDrawCanvas';\
|
146 |
+
canvDiv.append(canv);\
|
147 |
+
tabDiv.append(canvDiv);\
|
148 |
+
canvDiv.style.display = 'none';\
|
149 |
+
canvDiv.style.position = 'absolute';\
|
150 |
+
canvDiv.style.left = '50px';\
|
151 |
+
canvDiv.style.right = '50px';\
|
152 |
+
canvDiv.style.top = '50px';\
|
153 |
+
canvDiv.style.bottom = '50px';\
|
154 |
+
canvDiv.style.zIndex = '1000';\
|
155 |
+
canvDiv.style.background = '#d0d0d0';\
|
156 |
+
canvDiv.style.overflow = 'auto';\
|
157 |
+
canv.onclick = function(event) {\
|
158 |
+
event.stopPropagation();\
|
159 |
+
let rect = canv.getBoundingClientRect();\
|
160 |
+
let x = event.clientX - rect.left;\
|
161 |
+
let y = event.clientY - rect.top;\
|
162 |
+
if (x>canv.width-512 || y>canv.height-512) return;\
|
163 |
+
let ctx = canv.getContext('2d');\
|
164 |
+
ctx.fillStyle = 'black';\
|
165 |
+
ctx.fillRect(0, 0, canv.width, canv.height);\
|
166 |
+
ctx.drawImage(canv.storeImage, 400, 400, canv.width-800, canv.height-800);\
|
167 |
+
ctx.beginPath();\
|
168 |
+
ctx.lineWidth = '2';\
|
169 |
+
ctx.strokeStyle = 'white';\
|
170 |
+
ctx.rect(x, y, 512, 512);\
|
171 |
+
ctx.stroke();\
|
172 |
+
grap.shadowRoot.getElementById('leftCoord').getElementsByTagName('input')[0].value = x - 400;\
|
173 |
+
grap.shadowRoot.getElementById('leftCoord').getElementsByTagName('input')[1].value = x - 400;\
|
174 |
+
grap.shadowRoot.getElementById('topCoord').getElementsByTagName('input')[0].value = y -400;\
|
175 |
+
grap.shadowRoot.getElementById('topCoord').getElementsByTagName('input')[1].value = y - 400;\
|
176 |
+
grap.shadowRoot.getElementById('leftCoord').getElementsByTagName('input')[0].dispatchEvent(new Event('input'));\
|
177 |
+
grap.shadowRoot.getElementById('topCoord').getElementsByTagName('input')[0].dispatchEvent(new Event('input'));\
|
178 |
+
}\
|
179 |
+
}\
|
180 |
+
console.info(canvDiv.style.display);\
|
181 |
+
if (canvDiv.style.display!=='none') {\
|
182 |
+
canvDiv.style.display = 'none';\
|
183 |
+
return 0;\
|
184 |
+
}\
|
185 |
+
if (canv && imgB64) {\
|
186 |
+
let ctx = canv.getContext('2d');\
|
187 |
+
let image = new Image();\
|
188 |
+
image.onload = function() {\
|
189 |
+
console.info('onLoad');\
|
190 |
+
canv.width = this.width;\
|
191 |
+
canv.height = this.height;\
|
192 |
+
ctx.drawImage(this, 0, 0);\
|
193 |
+
let pixelData = ctx.getImageData(0, 0, canv.width, canv.height).data;\
|
194 |
+
let firstX = 9999;\
|
195 |
+
let firstY = 9999;\
|
196 |
+
let lastX = 0;\
|
197 |
+
let lastY = 0;\
|
198 |
+
for (let y=0;y<this.height;y=y+10) {\
|
199 |
+
for (let x=0;x<this.width;x++) {\
|
200 |
+
if (pixelData[y*this.width*3+x*3] || pixelData[y*this.width*3+x*3+1] || pixelData[y*this.width*3+x*3+2]) {\
|
201 |
+
if (x<firstX) firstX = x;\
|
202 |
+
if (x>lastX) lastX = x;\
|
203 |
+
}\
|
204 |
+
}\
|
205 |
+
}\
|
206 |
+
for (let x=0;x<this.width;x=x+10) {\
|
207 |
+
for (let y=0;y<this.height;y++) {\
|
208 |
+
if (pixelData[y*this.width*3+x*3] || pixelData[y*this.width*3+x*3+1] || pixelData[y*this.width*3+x*3+2]) {\
|
209 |
+
if (y<firstY) firstY = y;\
|
210 |
+
if (y>lastY) lastY = y;\
|
211 |
+
}\
|
212 |
+
}\
|
213 |
+
}\
|
214 |
+
if (lastX<firstX || lastY < firstY) return 0;\
|
215 |
+
canv.width = (lastX - firstX) + 800;\
|
216 |
+
canv.style.width = canv.width + 'px';\
|
217 |
+
canv.height = (lastY - firstY) + 800;\
|
218 |
+
canv.style.height = canv.height + 'px';\
|
219 |
+
ctx.fillStyle = 'black';\
|
220 |
+
ctx.fillRect(0, 0, canv.width, canv.height);\
|
221 |
+
ctx.drawImage(image, 400, 400, (lastX - firstX), (lastY - firstY));\
|
222 |
+
canvDiv.style.display = 'block';\
|
223 |
+
canvDiv.style.position = 'fixed';\
|
224 |
+
canvDiv.style.left = '400px';\
|
225 |
+
canvDiv.style.width = 'calc(100% - 400px)';\
|
226 |
+
canvDiv.style.top = '0px';\
|
227 |
+
canvDiv.style.height = '100%';\
|
228 |
+
canv.storeImage = this; \
|
229 |
+
};\
|
230 |
+
console.info('loading image');\
|
231 |
+
image.src = imgB64;\
|
232 |
+
};\
|
233 |
+
return 0}")
|
234 |
+
return [leftcoord, topcoord,canvasButton,dummy]
|
235 |
+
|
236 |
+
def run(self, p, leftcoord, topcoord,canvasButton,dummy):
|
237 |
+
initial_seed = None
|
238 |
+
initial_info = None
|
239 |
+
p.mask_blur = 0
|
240 |
+
p.inpaint_full_res = False
|
241 |
+
p.do_not_save_samples = True
|
242 |
+
p.do_not_save_grid = True
|
243 |
+
origInBaseLeft = 0
|
244 |
+
origInBaseTop = 0
|
245 |
+
workItemLeft = leftcoord
|
246 |
+
workItemTop = topcoord
|
247 |
+
newwidth = p.init_images[0].width
|
248 |
+
newheight = p.init_images[0].height
|
249 |
+
if leftcoord<0:
|
250 |
+
newwidth = newwidth - leftcoord
|
251 |
+
origInBaseLeft = -leftcoord
|
252 |
+
workItemLeft = 0
|
253 |
+
if topcoord<0:
|
254 |
+
newheight = newheight - topcoord
|
255 |
+
origInBaseTop = -topcoord
|
256 |
+
workItemTop = 0
|
257 |
+
if leftcoord + p.width > newwidth:
|
258 |
+
newwidth = leftcoord + p.width
|
259 |
+
if topcoord + p.height > newheight:
|
260 |
+
newheight = topcoord + p.height
|
261 |
+
newBase = Image.new("RGB", (newwidth, newheight), "black")
|
262 |
+
newBase.paste(p.init_images[0], (origInBaseLeft, origInBaseTop))
|
263 |
+
workItem = Image.new("RGB", (p.width, p.height))
|
264 |
+
region = newBase.crop((workItemLeft, workItemTop, workItemLeft+p.width, workItemTop + p.height))
|
265 |
+
workItem.paste(region, (0,0))
|
266 |
+
workData = np.array(workItem).astype(np.float32) / 255.0
|
267 |
+
mask = Image.new("L", (p.width, p.height),color=255)
|
268 |
+
maskData = np.array(mask)
|
269 |
+
for y in range(p.height):
|
270 |
+
for x in range(p.width):
|
271 |
+
if workData[y][x][0] + workData[y][x][1] + workData[y][x][2] > 0.001:
|
272 |
+
maskData[y][x] = 0
|
273 |
+
p.image_mask = Image.fromarray(maskData, mode="L")
|
274 |
+
np_image = (np.asarray(workItem) / 255.0).astype(np.float64)
|
275 |
+
np_mask = (np.asarray(p.image_mask.convert('RGB')) / 255.0).astype(np.float64)
|
276 |
+
noised = get_matched_noise(np_image, np_mask)
|
277 |
+
workItem = Image.fromarray(np.clip(noised * 255., 0., 255.).astype(np.uint8), mode="RGB")
|
278 |
+
workImages = []
|
279 |
+
for n in range(p.batch_size):
|
280 |
+
workImages.append(workItem)
|
281 |
+
p.init_images = workImages
|
282 |
+
p.latent_mask = None
|
283 |
+
proc = process_images(p)
|
284 |
+
results = []
|
285 |
+
for n in range(p.batch_size):
|
286 |
+
proc_img = proc.images[n]
|
287 |
+
final_image = newBase.copy()
|
288 |
+
final_image.paste(proc_img,(workItemLeft,workItemTop))
|
289 |
+
proc.images[n] = final_image
|
290 |
+
return proc
|