Skip to content

Commit 1af83f6

Browse files
authored
Add files via upload
1 parent 75f67fa commit 1af83f6

File tree

4 files changed

+576
-99
lines changed

4 files changed

+576
-99
lines changed

AILab_ImageMaskTools.py

Lines changed: 312 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -1,4 +1,4 @@
1-
# ComfyUI-RMBG v2.4.0
1+
# ComfyUI-RMBG v2.5.0
22
#
33
# This node facilitates background removal using various models, including RMBG-2.0, INSPYRENET, BEN, BEN2, and BIREFNET-HR.
44
# It utilizes advanced deep learning techniques to process images and generate accurate masks for background removal.
@@ -11,11 +11,11 @@
1111
# - Preview: A universal preview tool for both images and masks.
1212
# - ImagePreview: A specialized preview tool for images.
1313
# - MaskPreview: A specialized preview tool for masks.
14+
#
15+
# 2. Image and Mask Processing Nodes:
16+
# - MaskOverlay: A node for overlaying a mask on an image.
1417
# - LoadImage: A node for loading images with some Frequently used options.
15-
#
16-
# 2. Conversion Node:
1718
# - ImageMaskConvert: Converts between image and mask formats and extracts masks from image channels.
18-
# - ColorInput: A node for inputting colors in various formats.
1919
#
2020
# 3. Mask Processing Nodes:
2121
# - MaskEnhancer: Refines masks through techniques such as blur, smoothing, expansion/contraction, and hole filling.
@@ -28,6 +28,9 @@
2828
# - ICLoRAConcat: Concatenates images with a mask using IC LoRA.
2929
# - CropObject: Crops an image to the object in the image.
3030
# - ImageCompare: Compares two images and returns a mask of the differences.
31+
#
32+
# 5. Input Nodes:
33+
# - ColorInput: A node for inputting colors in various formats.
3134

3235
# These nodes are crafted to streamline common image and mask operations within ComfyUI workflows.
3336

@@ -42,6 +45,9 @@
4245
from PIL import Image, ImageFilter, ImageOps, ImageSequence, ImageChops, ImageDraw, ImageFont
4346
import torchvision.transforms.functional as T
4447
from comfy.utils import common_upscale
48+
import torch.nn.functional as F
49+
from comfy import model_management
50+
from comfy_extras.nodes_mask import ImageCompositeMasked
4551
from scipy import ndimage
4652

4753
# Utility functions
@@ -214,6 +220,91 @@ def preview(self, image=None, mask=None, prompt=None, extra_pnginfo=None):
214220
"result": (image if image is not None else None, mask if mask is not None else None)
215221
}
216222

223+
# Mask overlay node
224+
class AILab_MaskOverlay(AILab_PreviewBase):
225+
def __init__(self):
226+
super().__init__()
227+
self.prefix_append = "_preview_" + ''.join(random.choice("abcdefghijklmnopqrstupvxyz") for x in range(5))
228+
self.compress_level = 4
229+
230+
@classmethod
231+
def INPUT_TYPES(s):
232+
tooltips = {
233+
"mask_opacity": "Control mask opacity (0.0-1.0)",
234+
"mask_color": "Color for the mask overlay",
235+
"image": "Input image (RGBA will be converted to RGB)",
236+
"mask": "Input mask"
237+
}
238+
239+
return {
240+
"required": {
241+
"mask_opacity": ("FLOAT", {"default": 0.5, "min": 0.0, "max": 1.0, "step": 0.01, "tooltip": tooltips["mask_opacity"]}),
242+
"mask_color": ("COLOR", {"default": "#0000FF", "tooltip": tooltips["mask_color"]}),
243+
},
244+
"optional": {
245+
"image": ("IMAGE", {"tooltip": tooltips["image"]}),
246+
"mask": ("MASK", {"tooltip": tooltips["mask"]}),
247+
},
248+
"hidden": {"prompt": "PROMPT", "extra_pnginfo": "EXTRA_PNGINFO"},
249+
}
250+
RETURN_TYPES = ("IMAGE", "MASK")
251+
RETURN_NAMES = ("IMAGE", "MASK")
252+
FUNCTION = "execute"
253+
CATEGORY = "🧪AILab/🖼️IMAGE"
254+
OUTPUT_NODE = True
255+
256+
def hex_to_rgb(self, hex_color):
257+
"""Convert hex color code to RGB values (0-1 range)"""
258+
hex_color = hex_color.lstrip('#')
259+
r = int(hex_color[0:2], 16) / 255.0
260+
g = int(hex_color[2:4], 16) / 255.0
261+
b = int(hex_color[4:6], 16) / 255.0
262+
return r, g, b
263+
264+
def ensure_rgb(self, image):
265+
"""Ensure image is RGB format, convert from RGBA if needed"""
266+
if image.shape[-1] == 4:
267+
rgb_image = image[..., :3]
268+
return rgb_image
269+
return image
270+
271+
def execute(self, mask_opacity, mask_color, filename_prefix="ComfyUI", image=None, mask=None, prompt=None, extra_pnginfo=None):
272+
"""Execute image and mask composition"""
273+
if image is not None:
274+
image = self.ensure_rgb(image)
275+
276+
preview = None
277+
278+
if mask is not None and image is None:
279+
preview = mask.reshape((-1, 1, mask.shape[-2], mask.shape[-1])).movedim(1, -1).expand(-1, -1, -1, 3)
280+
elif mask is None and image is not None:
281+
preview = image
282+
elif mask is not None and image is not None:
283+
mask_adjusted = mask * mask_opacity
284+
mask_image = mask.reshape((-1, 1, mask.shape[-2], mask.shape[-1])).movedim(1, -1).expand(-1, -1, -1, 3).clone()
285+
286+
r, g, b = self.hex_to_rgb(mask_color)
287+
mask_image[:, :, :, 0] = r
288+
mask_image[:, :, :, 1] = g
289+
mask_image[:, :, :, 2] = b
290+
291+
preview, = ImageCompositeMasked.composite(self, image, mask_image, 0, 0, True, mask_adjusted)
292+
293+
if preview is None:
294+
preview = empty_image(64, 64)
295+
296+
if mask is None:
297+
mask = torch.zeros((1, 64, 64))
298+
299+
# Save preview for display
300+
result = self.save_image(preview, filename_prefix, prompt, extra_pnginfo)
301+
302+
# Return both the image and mask for further processing
303+
return {
304+
"ui": result["ui"] if "ui" in result else {},
305+
"result": (preview, mask)
306+
}
307+
217308
# Mask preview node
218309
class AILab_MaskPreview(AILab_PreviewBase):
219310
def __init__(self):
@@ -1334,10 +1425,222 @@ def get_color(self, preset, color):
13341425
except Exception as e:
13351426
raise RuntimeError(f"Invalid color format: {color}. Please use format like #FF0000 or #F00")
13361427

1428+
# Image Mask Resize node
1429+
class AILab_ImageMaskResize:
1430+
upscale_methods = ["nearest-exact", "bilinear", "area", "bicubic", "lanczos"]
1431+
@classmethod
1432+
def INPUT_TYPES(s):
1433+
tooltips = {
1434+
"image": "Input image to resize",
1435+
"width": "Target width in pixels (0 to keep original width)",
1436+
"height": "Target height in pixels (0 to keep original height)",
1437+
"scale_by": "Scale image by this factor (ignored if width or height > 0)",
1438+
"upscale_method": "Method used for resizing the image",
1439+
"resize_mode": "How to handle aspect ratio: stretch (ignore ratio), resize (maintain ratio by scaling), pad/pad_edge (maintain ratio with padding), crop (maintain ratio by cropping)",
1440+
"pad_color": "Color to use for padding when resize_mode is set to pad",
1441+
"crop_position": "Position to crop from when resize_mode is set to crop",
1442+
"divisible_by": "Make dimensions divisible by this value (useful for some models that require specific dimensions)",
1443+
"mask": "Optional mask to resize along with the image",
1444+
"device": "Device to perform resizing on (CPU or GPU)"
1445+
}
1446+
1447+
return {
1448+
"required": {
1449+
"image": ("IMAGE", {"tooltip": tooltips["image"]}),
1450+
"width": ("INT", { "default": 0, "min": 0, "max": MAX_RESOLUTION, "step": 1, "tooltip": tooltips["width"] }),
1451+
"height": ("INT", { "default": 0, "min": 0, "max": MAX_RESOLUTION, "step": 1, "tooltip": tooltips["height"] }),
1452+
"scale_by": ("FLOAT", { "default": 1.0, "min": 0.01, "max": 8.0, "step": 0.01, "tooltip": tooltips["scale_by"] }),
1453+
"upscale_method": (s.upscale_methods, {"tooltip": tooltips["upscale_method"]}),
1454+
"resize_mode": (["stretch", "resize", "pad", "pad_edge", "crop"], { "default": "stretch", "tooltip": tooltips["resize_mode"] }),
1455+
"pad_color": ("COLOR", { "default": "#FFFFFF", "tooltip": tooltips["pad_color"] }),
1456+
"crop_position": (["center", "top", "bottom", "left", "right"], { "default": "center", "tooltip": tooltips["crop_position"] }),
1457+
"divisible_by": ("INT", { "default": 2, "min": 0, "max": 512, "step": 1, "tooltip": tooltips["divisible_by"] }),
1458+
},
1459+
"optional" : {
1460+
"mask": ("MASK", {"tooltip": tooltips["mask"]}),
1461+
"device": (["cpu", "gpu"], {"default": "cpu", "tooltip": tooltips["device"]}),
1462+
}
1463+
}
1464+
1465+
RETURN_TYPES = ("IMAGE", "MASK", "INT", "INT",)
1466+
RETURN_NAMES = ("IMAGE", "MASK", "WIDTH", "HEIGHT",)
1467+
FUNCTION = "resize"
1468+
CATEGORY = "🧪AILab/🖼️IMAGE"
1469+
1470+
def resize(self, image, width, height, scale_by, upscale_method, resize_mode, pad_color, crop_position, divisible_by, device="cpu", mask=None):
1471+
B, H, W, C = image.shape
1472+
1473+
if device == "gpu":
1474+
if upscale_method == "lanczos":
1475+
raise Exception("Lanczos is not supported on the GPU")
1476+
device = model_management.get_torch_device()
1477+
else:
1478+
device = torch.device("cpu")
1479+
1480+
if width == 0 and height == 0:
1481+
if scale_by != 1.0:
1482+
width = int(W * scale_by)
1483+
height = int(H * scale_by)
1484+
else:
1485+
width = W
1486+
height = H
1487+
elif width == 0:
1488+
width = W
1489+
elif height == 0:
1490+
height = H
1491+
1492+
new_width = width
1493+
new_height = height
1494+
1495+
if resize_mode == "resize" or resize_mode.startswith("pad"):
1496+
if width != W or height != H:
1497+
if width == W and height != H:
1498+
ratio = height / H
1499+
new_width = round(W * ratio)
1500+
new_height = height
1501+
elif height == H and width != W:
1502+
ratio = width / W
1503+
new_height = round(H * ratio)
1504+
new_width = width
1505+
else:
1506+
ratio = min(width / W, height / H)
1507+
new_width = round(W * ratio)
1508+
new_height = round(H * ratio)
1509+
1510+
if resize_mode.startswith("pad"):
1511+
pad_left = (width - new_width) // 2
1512+
pad_right = width - new_width - pad_left
1513+
pad_top = (height - new_height) // 2
1514+
pad_bottom = height - new_height - pad_top
1515+
1516+
width = new_width
1517+
height = new_height
1518+
1519+
width = max(1, width)
1520+
height = max(1, height)
1521+
1522+
if divisible_by > 1:
1523+
width = width - (width % divisible_by) if width >= divisible_by else divisible_by
1524+
height = height - (height % divisible_by) if height >= divisible_by else divisible_by
1525+
1526+
out_image = image.clone().to(device)
1527+
if mask is not None:
1528+
out_mask = mask.clone().to(device)
1529+
1530+
if resize_mode == "crop":
1531+
old_width = W
1532+
old_height = H
1533+
old_aspect = old_width / old_height
1534+
new_aspect = width / height
1535+
1536+
if old_aspect > new_aspect:
1537+
crop_w = round(old_height * new_aspect)
1538+
crop_h = old_height
1539+
else:
1540+
crop_w = old_width
1541+
crop_h = round(old_width / new_aspect)
1542+
1543+
if crop_position == "center":
1544+
x = (old_width - crop_w) // 2
1545+
y = (old_height - crop_h) // 2
1546+
elif crop_position == "top":
1547+
x = (old_width - crop_w) // 2
1548+
y = 0
1549+
elif crop_position == "bottom":
1550+
x = (old_width - crop_w) // 2
1551+
y = old_height - crop_h
1552+
elif crop_position == "left":
1553+
x = 0
1554+
y = (old_height - crop_h) // 2
1555+
elif crop_position == "right":
1556+
x = old_width - crop_w
1557+
y = (old_height - crop_h) // 2
1558+
1559+
out_image = out_image.narrow(-2, x, crop_w).narrow(-3, y, crop_h)
1560+
if mask is not None:
1561+
out_mask = out_mask.narrow(-1, x, crop_w).narrow(-2, y, crop_h)
1562+
1563+
if (width != W or height != H) or (width != out_image.shape[2] or height != out_image.shape[1]):
1564+
out_image = common_upscale(out_image.movedim(-1,1), width, height, upscale_method, crop="disabled").movedim(1,-1)
1565+
1566+
if mask is not None:
1567+
if upscale_method == "lanczos":
1568+
out_mask = common_upscale(out_mask.unsqueeze(1).repeat(1, 3, 1, 1), width, height, upscale_method, crop="disabled").movedim(1,-1)[:, :, :, 0]
1569+
else:
1570+
out_mask = common_upscale(out_mask.unsqueeze(1), width, height, upscale_method, crop="disabled").squeeze(1)
1571+
1572+
if resize_mode.startswith("pad"):
1573+
if pad_left > 0 or pad_right > 0 or pad_top > 0 or pad_bottom > 0:
1574+
padded_width = width + pad_left + pad_right
1575+
padded_height = height + pad_top + pad_bottom
1576+
if divisible_by > 1:
1577+
width_remainder = padded_width % divisible_by
1578+
height_remainder = padded_height % divisible_by
1579+
if width_remainder > 0:
1580+
extra_width = divisible_by - width_remainder
1581+
pad_right += extra_width
1582+
if height_remainder > 0:
1583+
extra_height = divisible_by - height_remainder
1584+
pad_bottom += extra_height
1585+
1586+
hex_color = fix_color_format(pad_color)
1587+
r, g, b = tuple(int(hex_color[i:i+2], 16) for i in (1, 3, 5))
1588+
color = f"{r}, {g}, {b}"
1589+
1590+
B, H, W, C = out_image.shape
1591+
padded_width = W + pad_left + pad_right
1592+
padded_height = H + pad_top + pad_bottom
1593+
1594+
bg_color = [int(x.strip())/255.0 for x in color.split(",")]
1595+
if len(bg_color) == 1:
1596+
bg_color = bg_color * 3
1597+
bg_color = torch.tensor(bg_color, dtype=out_image.dtype, device=out_image.device)
1598+
1599+
padded_image = torch.zeros((B, padded_height, padded_width, C), dtype=out_image.dtype, device=out_image.device)
1600+
1601+
for b in range(B):
1602+
if resize_mode == "pad_edge":
1603+
top_edge = out_image[b, 0, :, :]
1604+
bottom_edge = out_image[b, H-1, :, :]
1605+
left_edge = out_image[b, :, 0, :]
1606+
right_edge = out_image[b, :, W-1, :]
1607+
1608+
padded_image[b, :pad_top, :, :] = top_edge.mean(dim=0)
1609+
padded_image[b, pad_top+H:, :, :] = bottom_edge.mean(dim=0)
1610+
padded_image[b, :, :pad_left, :] = left_edge.mean(dim=0)
1611+
padded_image[b, :, pad_left+W:, :] = right_edge.mean(dim=0)
1612+
else:
1613+
padded_image[b, :, :, :] = bg_color.unsqueeze(0).unsqueeze(0)
1614+
1615+
padded_image[b, pad_top:pad_top+H, pad_left:pad_left+W, :] = out_image[b]
1616+
1617+
if mask is not None:
1618+
padded_mask = F.pad(
1619+
out_mask,
1620+
(pad_left, pad_right, pad_top, pad_bottom),
1621+
mode='constant',
1622+
value=0
1623+
)
1624+
out_mask = padded_mask
1625+
1626+
out_image = padded_image
1627+
1628+
final_width = out_image.shape[2]
1629+
final_height = out_image.shape[1]
1630+
1631+
# 创建默认掩码(如果没有提供)
1632+
if mask is None:
1633+
out_mask = torch.zeros((B, final_height, final_width), device=torch.device("cpu"), dtype=torch.float32)
1634+
else:
1635+
out_mask = out_mask.cpu()
1636+
1637+
return (out_image.cpu(), out_mask, final_width, final_height)
1638+
13371639
# Node class mappings
13381640
NODE_CLASS_MAPPINGS = {
13391641
"AILab_LoadImage": AILab_LoadImage,
13401642
"AILab_Preview": AILab_Preview,
1643+
"AILab_MaskOverlay": AILab_MaskOverlay,
13411644
"AILab_ImagePreview": AILab_ImagePreview,
13421645
"AILab_MaskPreview": AILab_MaskPreview,
13431646
"AILab_ImageMaskConvert": AILab_ImageMaskConvert,
@@ -1350,13 +1653,15 @@ def get_color(self, preset, color):
13501653
"AILab_ICLoRAConcat": AILab_ICLoRAConcat,
13511654
"AILab_CropObject": AILab_CropObject,
13521655
"AILab_ImageCompare": AILab_ImageCompare,
1353-
"AILab_ColorInput": AILab_ColorInput
1656+
"AILab_ColorInput": AILab_ColorInput,
1657+
"AILab_ImageMaskResize": AILab_ImageMaskResize
13541658
}
13551659

13561660
# Node display name mappings
13571661
NODE_DISPLAY_NAME_MAPPINGS = {
13581662
"AILab_LoadImage": "Load Image (RMBG) 🖼️",
13591663
"AILab_Preview": "Image / Mask Preview (RMBG) 🖼️🎭",
1664+
"AILab_MaskOverlay": "Mask Overlay (RMBG) 🖼️🎭",
13601665
"AILab_ImagePreview": "Image Preview (RMBG) 🖼️",
13611666
"AILab_MaskPreview": "Mask Preview (RMBG) 🎭",
13621667
"AILab_ImageMaskConvert": "Image/Mask Converter (RMBG) 🖼️🎭",
@@ -1369,5 +1674,6 @@ def get_color(self, preset, color):
13691674
"AILab_ICLoRAConcat": "IC LoRA Concat (RMBG) 🖼️🎭",
13701675
"AILab_CropObject": "Crop To Object (RMBG) 🖼️🎭",
13711676
"AILab_ImageCompare": "Image Compare (RMBG) 🖼️🖼️",
1372-
"AILab_ColorInput": "Color Input (RMBG) 🎨"
1677+
"AILab_ColorInput": "Color Input (RMBG) 🎨",
1678+
"AILab_ImageMaskResize": "Image Mask Resize (RMBG) 🖼️🎭"
13731679
}

0 commit comments

Comments
 (0)