Update ComfyUI/custom_nodes/ComfyUI-BrushNet/brushnet_nodes.py
Browse files
ComfyUI/custom_nodes/ComfyUI-BrushNet/brushnet_nodes.py
CHANGED
@@ -487,7 +487,12 @@ class BlendInpaint:
|
|
487 |
blurred = []
|
488 |
for i in range(inpaint.shape[0]):
|
489 |
height, width, _ = original[i].shape
|
490 |
-
|
|
|
|
|
|
|
|
|
|
|
491 |
|
492 |
# Ensure cut dimensions don't exceed original image dimensions
|
493 |
cut_width = min(cut_width, width - x0)
|
@@ -518,7 +523,7 @@ class BlendInpaint:
|
|
518 |
|
519 |
|
520 |
|
521 |
-
def scale_mask_and_image(image, mask, width, height):
|
522 |
h0, w0 = mask.shape
|
523 |
iy, ix = (mask == 1).nonzero(as_tuple=True)
|
524 |
|
@@ -541,7 +546,7 @@ def scale_mask_and_image(image, mask, width, height):
|
|
541 |
new_mask_height = mask_height
|
542 |
new_mask_width = mask_height * aspect_ratio
|
543 |
|
544 |
-
margin = 0
|
545 |
cut_width = int(new_mask_width * (1 + 2 * margin))
|
546 |
cut_height = int(new_mask_height * (1 + 2 * margin))
|
547 |
|
@@ -572,8 +577,7 @@ class CutForInpaint:
|
|
572 |
{
|
573 |
"image": ("IMAGE",),
|
574 |
"mask": ("MASK",),
|
575 |
-
"
|
576 |
-
"height": ("INT", {"default": 512, "min": 64, "max": 2048}),
|
577 |
},
|
578 |
}
|
579 |
|
@@ -583,12 +587,12 @@ class CutForInpaint:
|
|
583 |
|
584 |
FUNCTION = "cut_for_inpaint"
|
585 |
|
586 |
-
def cut_for_inpaint(self, image: torch.Tensor, mask: torch.Tensor,
|
587 |
ret = []
|
588 |
msk = []
|
589 |
org = []
|
590 |
for i in range(image.shape[0]):
|
591 |
-
cut_image, cut_mask, (x0, y0, cut_width, cut_height) = scale_mask_and_image(image[i], mask[i],
|
592 |
ret.append(cut_image)
|
593 |
msk.append(cut_mask)
|
594 |
org.append(torch.IntTensor([x0, y0, cut_width, cut_height]))
|
@@ -723,31 +727,6 @@ def prepare_image(image, mask):
|
|
723 |
return (masked_image, mask)
|
724 |
|
725 |
|
726 |
-
# Get origin of the mask
|
727 |
-
def cut_with_mask(mask, width, height):
|
728 |
-
iy, ix = (mask == 1).nonzero(as_tuple=True)
|
729 |
-
h0, w0 = mask.shape
|
730 |
-
|
731 |
-
if iy.numel() == 0:
|
732 |
-
x_c, y_c = w0 / 2.0, h0 / 2.0
|
733 |
-
mask_width, mask_height = 0, 0
|
734 |
-
else:
|
735 |
-
x_min, x_max = ix.min().item(), ix.max().item()
|
736 |
-
y_min, y_max = iy.min().item(), iy.max().item()
|
737 |
-
x_c, y_c = (x_min + x_max) / 2.0, (y_min + y_max) / 2.0
|
738 |
-
mask_width, mask_height = x_max - x_min + 1, y_max - y_min + 1
|
739 |
-
|
740 |
-
cut_width = max(width, mask_width * 1.4) # 140% of mask width
|
741 |
-
cut_height = max(height, mask_height * 1.4) # 140% of mask height
|
742 |
-
|
743 |
-
cut_width = min(cut_width, w0)
|
744 |
-
cut_height = min(cut_height, h0)
|
745 |
-
|
746 |
-
x0 = max(0, min(w0 - cut_width, x_c - cut_width / 2))
|
747 |
-
y0 = max(0, min(h0 - cut_height, y_c - cut_height / 2))
|
748 |
-
|
749 |
-
return (int(x0), int(y0), int(cut_width), int(cut_height))
|
750 |
-
|
751 |
|
752 |
# Prepare conditioning_latents
|
753 |
@torch.inference_mode()
|
|
|
487 |
blurred = []
|
488 |
for i in range(inpaint.shape[0]):
|
489 |
height, width, _ = original[i].shape
|
490 |
+
|
491 |
+
if origin is not None:
|
492 |
+
x0, y0, cut_width, cut_height = origin[i]
|
493 |
+
else:
|
494 |
+
x0, y0 = 0, 0
|
495 |
+
cut_width, cut_height = width, height
|
496 |
|
497 |
# Ensure cut dimensions don't exceed original image dimensions
|
498 |
cut_width = min(cut_width, width - x0)
|
|
|
523 |
|
524 |
|
525 |
|
526 |
+
def scale_mask_and_image(image, mask, width, height, side_margin):
|
527 |
h0, w0 = mask.shape
|
528 |
iy, ix = (mask == 1).nonzero(as_tuple=True)
|
529 |
|
|
|
546 |
new_mask_height = mask_height
|
547 |
new_mask_width = mask_height * aspect_ratio
|
548 |
|
549 |
+
margin = side_margin/100.0
|
550 |
cut_width = int(new_mask_width * (1 + 2 * margin))
|
551 |
cut_height = int(new_mask_height * (1 + 2 * margin))
|
552 |
|
|
|
577 |
{
|
578 |
"image": ("IMAGE",),
|
579 |
"mask": ("MASK",),
|
580 |
+
"side_margin_percent": ("INT", {"default": 10, "min": 0, "max": 1000})
|
|
|
581 |
},
|
582 |
}
|
583 |
|
|
|
587 |
|
588 |
FUNCTION = "cut_for_inpaint"
|
589 |
|
590 |
+
def cut_for_inpaint(self, image: torch.Tensor, mask: torch.Tensor, side_margin_percent: int):
|
591 |
ret = []
|
592 |
msk = []
|
593 |
org = []
|
594 |
for i in range(image.shape[0]):
|
595 |
+
cut_image, cut_mask, (x0, y0, cut_width, cut_height) = scale_mask_and_image(image[i], mask[i], 512, 512, side_margin_percent)
|
596 |
ret.append(cut_image)
|
597 |
msk.append(cut_mask)
|
598 |
org.append(torch.IntTensor([x0, y0, cut_width, cut_height]))
|
|
|
727 |
return (masked_image, mask)
|
728 |
|
729 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
730 |
|
731 |
# Prepare conditioning_latents
|
732 |
@torch.inference_mode()
|