diff --git a/modules/impact/core.py b/modules/impact/core.py index 801b71fb..c0638e82 100644 --- a/modules/impact/core.py +++ b/modules/impact/core.py @@ -255,7 +255,7 @@ def enhance_detail(image, model, clip, vae, guide_size, guide_size_for_bbox, max refiner_ratio=None, refiner_model=None, refiner_clip=None, refiner_positive=None, refiner_negative=None, control_net_wrapper=None, cycle=1, inpaint_model=False, noise_mask_feather=0, scheduler_func=None, - vae_tiled_encode=False, vae_tiled_decode=False): + vae_tiled_encode=False, vae_tiled_decode=False, return_by_cycle_step=False): if noise_mask is not None: noise_mask = utils.tensor_gaussian_blur_mask(noise_mask, noise_mask_feather) @@ -277,6 +277,9 @@ def enhance_detail(image, model, clip, vae, guide_size, guide_size_for_bbox, max elif 'pooled_output' in positive[0][1]: del positive[0][1]['pooled_output'] + refined_latents_by_step:list[torch.Tensor] = [] + refined_images_by_step:list[torch.Tensor] = [] + h = image.shape[1] w = image.shape[2] @@ -363,6 +366,7 @@ def enhance_detail(image, model, clip, vae, guide_size, guide_size_for_bbox, max sampler_opt = detailer_hook.get_custom_sampler() # ksampler + for i in range(0, cycle): if detailer_hook is not None: if detailer_hook is not None: @@ -384,42 +388,54 @@ def enhance_detail(image, model, clip, vae, guide_size, guide_size_for_bbox, max refined_latent, denoise2, refiner_ratio, refiner_model, refiner_clip, refiner_positive, refiner_negative, noise=noise, scheduler_func=scheduler_func, sampler_opt=sampler_opt) + if return_by_cycle_step: refined_latents_by_step.append(refined_latent) + if detailer_hook is not None: refined_latent = detailer_hook.pre_decode(refined_latent) + if return_by_cycle_step: refined_latents_by_step = [detailer_hook.pre_decode(latent) for latent in refined_latents_by_step] # non-latent downscale - latent downscale cause bad quality start = time.time() if vae_tiled_decode: (refined_image,) = nodes.VAEDecodeTiled().decode(vae, refined_latent, 512) # using default settings + if return_by_cycle_step: refined_images_by_step = [nodes.VAEDecodeTiled().decode(vae, refined_latent, 512) for refined_latent in refined_latents_by_step] logging.info(f"[Impact Pack] vae decoded (tiled) in {time.time() - start:.1f}s") else: try: refined_image = vae.decode(refined_latent['samples']) + if return_by_cycle_step: refined_images_by_step = [vae.decode(refined_latent['samples']) for refined_latent in refined_latents_by_step] except Exception: # usually an out-of-memory exception from the decode, so try a tiled approach logging.warning(f"[Impact Pack] failed after {time.time() - start:.1f}s, doing vae.decode_tiled 64...") refined_image = vae.decode_tiled(refined_latent["samples"], tile_x=64, tile_y=64, ) + if return_by_cycle_step: refined_images_by_step = [vae.decode_tiled(refined_latent["samples"], tile_x=64, tile_y=64) for refined_latent in refined_latents_by_step] logging.info(f"[Impact Pack] vae decoded in {time.time() - start:.1f}s") else: # skipped refined_image = upscaled_image + if return_by_cycle_step: refined_images_by_step = [upscaled_image,] if detailer_hook is not None: refined_image = detailer_hook.post_decode(refined_image) + if return_by_cycle_step: refined_images_by_step = [detailer_hook.post_decode(refined_image) for refined_image in refined_images_by_step] # downscale # workaround: support WAN as an i2i model if len(refined_image.shape) == 5: refined_image = refined_image.squeeze(0) + if return_by_cycle_step: refined_images_by_step = [ i.squeeze(0) for i in refined_images_by_step ] refined_image = utils.tensor_resize(refined_image, w, h) + if return_by_cycle_step: refined_images_by_step = [ utils.tensor_resize(i,w,h) for i in refined_images_by_step ] # prevent mixing of device refined_image = refined_image.cpu() + if return_by_cycle_step: refined_images_by_step = [ i.cpu() for i in refined_images_by_step ] # don't convert to latent - latent break image # preserving pil is much better + if return_by_cycle_step: return refined_image, cnet_pils, refined_images_by_step return refined_image, cnet_pils diff --git a/modules/impact/impact_pack.py b/modules/impact/impact_pack.py index 5de7119f..3f9cc2e8 100644 --- a/modules/impact/impact_pack.py +++ b/modules/impact/impact_pack.py @@ -238,6 +238,7 @@ def INPUT_TYPES(s): "wildcard": ("STRING", {"multiline": True, "dynamicPrompts": False}), "cycle": ("INT", {"default": 1, "min": 1, "max": 10, "step": 1}), + "return_by_cycle_step": ("BOOLEAN", {"default": False, "label_on": "enabled", "label_off": "disabled", "tooltip":"Return a batch of images by cycle steps"}), }, "optional": { "detailer_hook": ("DETAILER_HOOK",), @@ -264,7 +265,7 @@ def get_core_module(): def do_detail(image, segs, model, clip, vae, guide_size, guide_size_for_bbox, max_size, seed, steps, cfg, sampler_name, scheduler, positive, negative, denoise, feather, noise_mask, force_inpaint, wildcard_opt=None, detailer_hook=None, refiner_ratio=None, refiner_model=None, refiner_clip=None, refiner_positive=None, refiner_negative=None, - cycle=1, inpaint_model=False, noise_mask_feather=0, scheduler_func_opt=None, tiled_encode=False, tiled_decode=False): + cycle=1, inpaint_model=False, noise_mask_feather=0, scheduler_func_opt=None, tiled_encode=False, tiled_decode=False, return_by_cycle_step=False): if len(image) > 1: raise Exception('[Impact Pack] ERROR: DetailerForEach does not allow image batches.\nPlease refer to https://github.com/ltdrdata/ComfyUI-extension-tutorials/blob/Main/ComfyUI-Impact-Pack/tutorial/batching-detailer.md for more information.') @@ -303,6 +304,8 @@ def do_detail(image, segs, model, clip, vae, guide_size, guide_size_for_bbox, ma if not (isinstance(model, str) and model == "DUMMY") and noise_mask_feather > 0 and 'denoise_mask_function' not in model.model_options: model = nodes_differential_diffusion.DifferentialDiffusion().execute(model)[0] + if return_by_cycle_step: images_by_step = [ image.cpu().clone() for _ in range(cycle) ] + for i, seg in enumerate(ordered_segs): cropped_image = utils.crop_ndarray4(image.cpu().numpy(), seg.crop_region) # Never use seg.cropped_image to handle overlapping area cropped_image = utils.to_tensor(cropped_image) @@ -359,7 +362,7 @@ def do_detail(image, segs, model, clip, vae, guide_size, guide_size_for_bbox, ma orig_cropped_image = cropped_image.clone() if not (isinstance(model, str) and model == "DUMMY"): - enhanced_image, cnet_pils = core.enhance_detail(cropped_image, model, clip, vae, guide_size, guide_size_for_bbox, max_size, + result = core.enhance_detail(cropped_image, model, clip, vae, guide_size, guide_size_for_bbox, max_size, seg.bbox, seg_seed, steps, cfg, sampler_name, scheduler, cropped_positive, cropped_negative, denoise, cropped_mask, force_inpaint, wildcard_opt=wildcard_item, wildcard_opt_concat_mode=wildcard_concat_mode, @@ -369,7 +372,12 @@ def do_detail(image, segs, model, clip, vae, guide_size, guide_size_for_bbox, ma refiner_negative=refiner_negative, control_net_wrapper=seg.control_net_wrapper, cycle=cycle, inpaint_model=inpaint_model, noise_mask_feather=noise_mask_feather, scheduler_func=scheduler_func_opt, vae_tiled_encode=tiled_encode, - vae_tiled_decode=tiled_decode) + vae_tiled_decode=tiled_decode, return_by_cycle_step=return_by_cycle_step) + if return_by_cycle_step: + enhanced_image, cnet_pils, by_step = result + else: + enhanced_image, cnet_pils = result + else: enhanced_image = cropped_image cnet_pils = None @@ -385,6 +393,10 @@ def do_detail(image, segs, model, clip, vae, guide_size, guide_size_for_bbox, ma utils.tensor_paste(image, enhanced_image, (seg.crop_region[0], seg.crop_region[1]), mask) # this code affecting to `cropped_image`. enhanced_list.append(enhanced_image) + if return_by_cycle_step: + for i, ei in enumerate(by_step): + utils.tensor_paste(images_by_step[i], ei.cpu(), (seg.crop_region[0], seg.crop_region[1]), mask) + if detailer_hook is not None: image = detailer_hook.post_paste(image) @@ -406,24 +418,31 @@ def do_detail(image, segs, model, clip, vae, guide_size, guide_size_for_bbox, ma new_segs.append(new_seg) image_tensor = utils.tensor_convert_rgb(image) + if return_by_cycle_step: + if len(ordered_segs): + images_tensors_by_step = [ utils.tensor_convert_rgb(im) for im in images_by_step ] + else: + images_tensors_by_step = [ image_tensor, ] cropped_list.sort(key=lambda x: x.shape, reverse=True) enhanced_list.sort(key=lambda x: x.shape, reverse=True) enhanced_alpha_list.sort(key=lambda x: x.shape, reverse=True) - return image_tensor, cropped_list, enhanced_list, enhanced_alpha_list, cnet_pil_list, (segs[0], new_segs) + returnable_image_tensor = torch.cat(images_tensors_by_step, dim=0) if return_by_cycle_step else image_tensor + + return returnable_image_tensor, cropped_list, enhanced_list, enhanced_alpha_list, cnet_pil_list, (segs[0], new_segs) def doit(self, image, segs, model, clip, vae, guide_size, guide_size_for, max_size, seed, steps, cfg, sampler_name, scheduler, positive, negative, denoise, feather, noise_mask, force_inpaint, wildcard, cycle=1, detailer_hook=None, inpaint_model=False, noise_mask_feather=0, scheduler_func_opt=None, - tiled_encode=False, tiled_decode=False): + tiled_encode=False, tiled_decode=False, return_by_cycle_step=False): enhanced_img, *_ = \ DetailerForEach.do_detail(image, segs, model, clip, vae, guide_size, guide_size_for, max_size, seed, steps, cfg, sampler_name, scheduler, positive, negative, denoise, feather, noise_mask, force_inpaint, wildcard, detailer_hook, cycle=cycle, inpaint_model=inpaint_model, noise_mask_feather=noise_mask_feather, - scheduler_func_opt=scheduler_func_opt, tiled_encode=tiled_encode, tiled_decode=tiled_decode) + scheduler_func_opt=scheduler_func_opt, tiled_encode=tiled_encode, tiled_decode=tiled_decode, return_by_cycle_step=return_by_cycle_step) return (enhanced_img, ) @@ -454,6 +473,7 @@ def INPUT_TYPES(s): "wildcard": ("STRING", {"multiline": True, "dynamicPrompts": False}), "cycle": ("INT", {"default": 1, "min": 1, "max": 10, "step": 1}), + "return_by_cycle_step": ("BOOLEAN", {"default": False, "label_on": "enabled", "label_off": "disabled", "tooltip":"Return a batch of images by cycle steps"}), "max_retries": ("INT", {"default": 1, "min": 1, "max": 10, "step": 1}), }, "optional": { @@ -481,7 +501,8 @@ def get_core_module(): def do_detail(image, segs, model, clip, vae, guide_size, guide_size_for_bbox, max_size, seed, steps, cfg, sampler_name, scheduler, positive, negative, denoise, feather, noise_mask, force_inpaint, wildcard_opt=None, detailer_hook=None, refiner_ratio=None, refiner_model=None, refiner_clip=None, refiner_positive=None, refiner_negative=None, - cycle=1, inpaint_model=False, noise_mask_feather=0, scheduler_func_opt=None, tiled_encode=False, tiled_decode=False, max_retries=1): + cycle=1, inpaint_model=False, noise_mask_feather=0, scheduler_func_opt=None, tiled_encode=False, tiled_decode=False, + return_by_cycle_step=False, max_retries=1): if len(image) > 1: raise Exception('[Impact Pack] ERROR: DetailerForEach does not allow image batches.\nPlease refer to https://github.com/ltdrdata/ComfyUI-extension-tutorials/blob/Main/ComfyUI-Impact-Pack/tutorial/batching-detailer.md for more information.') @@ -592,7 +613,7 @@ def do_detail(image, segs, model, clip, vae, guide_size, guide_size_for_bbox, ma refiner_negative=refiner_negative, control_net_wrapper=seg.control_net_wrapper, cycle=cycle, inpaint_model=inpaint_model, noise_mask_feather=noise_mask_feather, scheduler_func=scheduler_func_opt, vae_tiled_encode=tiled_encode, - vae_tiled_decode=tiled_decode) + vae_tiled_decode=tiled_decode, return_by_cycle_step=return_by_cycle_step) if detailer_hook is None or not detailer_hook.should_retry_patch(enhanced_image): break @@ -679,6 +700,7 @@ def INPUT_TYPES(s): "refiner_ratio": ("FLOAT", {"default": 0.2, "min": 0.0, "max": 1.0}), "cycle": ("INT", {"default": 1, "min": 1, "max": 10, "step": 1}), + "return_by_cycle_step": ("BOOLEAN", {"default": False, "label_on": "enabled", "label_off": "disabled", "tooltip":"Return a batch of images by cycle steps"}), }, "optional": { "detailer_hook": ("DETAILER_HOOK",), @@ -704,7 +726,7 @@ def doit(self, image, segs, guide_size, guide_size_for, max_size, seed, steps, c denoise, feather, noise_mask, force_inpaint, basic_pipe, wildcard, refiner_ratio=None, detailer_hook=None, refiner_basic_pipe_opt=None, cycle=1, inpaint_model=False, noise_mask_feather=0, scheduler_func_opt=None, - tiled_encode=False, tiled_decode=False): + tiled_encode=False, tiled_decode=False, return_by_cycle_step=False): if len(image) > 1: raise Exception('[Impact Pack] ERROR: DetailerForEach does not allow image batches.\nPlease refer to https://github.com/ltdrdata/ComfyUI-extension-tutorials/blob/Main/ComfyUI-Impact-Pack/tutorial/batching-detailer.md for more information.') @@ -723,7 +745,7 @@ def doit(self, image, segs, guide_size, guide_size_for, max_size, seed, steps, c refiner_ratio=refiner_ratio, refiner_model=refiner_model, refiner_clip=refiner_clip, refiner_positive=refiner_positive, refiner_negative=refiner_negative, cycle=cycle, inpaint_model=inpaint_model, noise_mask_feather=noise_mask_feather, scheduler_func_opt=scheduler_func_opt, - tiled_encode=tiled_encode, tiled_decode=tiled_decode) + tiled_encode=tiled_encode, tiled_decode=tiled_decode, return_by_cycle_step=return_by_cycle_step) # set fallback image if len(cnet_pil_list) == 0: @@ -772,6 +794,7 @@ def INPUT_TYPES(s): "wildcard": ("STRING", {"multiline": True, "dynamicPrompts": False}), "cycle": ("INT", {"default": 1, "min": 1, "max": 10, "step": 1}), + "return_by_cycle_step": ("BOOLEAN", {"default": False, "label_on": "enabled", "label_off": "disabled", "tooltip":"Return a batch of images by cycle steps"}), }, "optional": { "sam_model_opt": ("SAM_MODEL", ), @@ -801,7 +824,7 @@ def enhance_face(image, model, clip, vae, guide_size, guide_size_for_bbox, max_s sam_mask_hint_use_negative, drop_size, bbox_detector, segm_detector=None, sam_model_opt=None, wildcard_opt=None, detailer_hook=None, refiner_ratio=None, refiner_model=None, refiner_clip=None, refiner_positive=None, refiner_negative=None, cycle=1, - inpaint_model=False, noise_mask_feather=0, scheduler_func_opt=None, tiled_encode=False, tiled_decode=False): + inpaint_model=False, noise_mask_feather=0, scheduler_func_opt=None, tiled_encode=False, tiled_decode=False, return_by_cycle_step=False): # make default prompt as 'face' if empty prompt for CLIPSeg bbox_detector.setAux('face') @@ -834,7 +857,7 @@ def enhance_face(image, model, clip, vae, guide_size, guide_size_for_bbox, max_s refiner_clip=refiner_clip, refiner_positive=refiner_positive, refiner_negative=refiner_negative, cycle=cycle, inpaint_model=inpaint_model, noise_mask_feather=noise_mask_feather, - scheduler_func_opt=scheduler_func_opt, tiled_encode=tiled_encode, tiled_decode=tiled_decode) + scheduler_func_opt=scheduler_func_opt, tiled_encode=tiled_encode, tiled_decode=tiled_decode, return_by_cycle_step=return_by_cycle_step) else: enhanced_img = image cropped_enhanced = [] @@ -861,7 +884,7 @@ def doit(self, image, model, clip, vae, guide_size, guide_size_for, max_size, se sam_detection_hint, sam_dilation, sam_threshold, sam_bbox_expansion, sam_mask_hint_threshold, sam_mask_hint_use_negative, drop_size, bbox_detector, wildcard, cycle=1, sam_model_opt=None, segm_detector_opt=None, detailer_hook=None, inpaint_model=False, noise_mask_feather=0, - scheduler_func_opt=None, tiled_encode=False, tiled_decode=False): + scheduler_func_opt=None, tiled_encode=False, tiled_decode=False, return_by_cycle_step=False): result_img = None result_mask = None @@ -880,7 +903,7 @@ def doit(self, image, model, clip, vae, guide_size, guide_size_for, max_size, se sam_detection_hint, sam_dilation, sam_threshold, sam_bbox_expansion, sam_mask_hint_threshold, sam_mask_hint_use_negative, drop_size, bbox_detector, segm_detector_opt, sam_model_opt, wildcard, detailer_hook, cycle=cycle, inpaint_model=inpaint_model, noise_mask_feather=noise_mask_feather, scheduler_func_opt=scheduler_func_opt, - tiled_encode=tiled_encode, tiled_decode=tiled_decode) + tiled_encode=tiled_encode, tiled_decode=tiled_decode, return_by_cycle_step=return_by_cycle_step) result_img = torch.cat((result_img, enhanced_img), dim=0) if result_img is not None else enhanced_img result_mask = torch.cat((result_mask, mask), dim=0) if result_mask is not None else mask @@ -1664,6 +1687,7 @@ def INPUT_TYPES(s): "refiner_ratio": ("FLOAT", {"default": 0.2, "min": 0.0, "max": 1.0}), "cycle": ("INT", {"default": 1, "min": 1, "max": 10, "step": 1}), + "return_by_cycle_step": ("BOOLEAN", {"default": False, "label_on": "enabled", "label_off": "disabled", "tooltip":"Return a batch of images by cycle steps"}), }, "optional": { "inpaint_model": ("BOOLEAN", {"default": False, "label_on": "enabled", "label_off": "disabled"}), @@ -1688,7 +1712,7 @@ def doit(self, image, detailer_pipe, guide_size, guide_size_for, max_size, seed, sam_detection_hint, sam_dilation, sam_threshold, sam_bbox_expansion, sam_mask_hint_threshold, sam_mask_hint_use_negative, drop_size, refiner_ratio=None, cycle=1, inpaint_model=False, noise_mask_feather=0, scheduler_func_opt=None, - tiled_encode=False, tiled_decode=False): + tiled_encode=False, tiled_decode=False, return_by_cycle_step=False): result_img = None result_mask = None @@ -1712,7 +1736,7 @@ def doit(self, image, detailer_pipe, guide_size, guide_size_for, max_size, seed, refiner_ratio=refiner_ratio, refiner_model=refiner_model, refiner_clip=refiner_clip, refiner_positive=refiner_positive, refiner_negative=refiner_negative, cycle=cycle, inpaint_model=inpaint_model, noise_mask_feather=noise_mask_feather, scheduler_func_opt=scheduler_func_opt, - tiled_encode=tiled_encode, tiled_decode=tiled_decode) + tiled_encode=tiled_encode, tiled_decode=tiled_decode, return_by_cycle_step=return_by_cycle_step) result_img = torch.cat((result_img, enhanced_img), dim=0) if result_img is not None else enhanced_img result_mask = torch.cat((result_mask, mask), dim=0) if result_mask is not None else mask @@ -1759,6 +1783,7 @@ def INPUT_TYPES(s): "batch_size": ("INT", {"default": 1, "min": 1, "max": 100}), "cycle": ("INT", {"default": 1, "min": 1, "max": 10, "step": 1}), + "return_by_cycle_step": ("BOOLEAN", {"default": False, "label_on": "enabled", "label_off": "disabled", "tooltip":"Return a batch of images by cycle steps"}), }, "optional": { "refiner_basic_pipe_opt": ("BASIC_PIPE", ), @@ -1784,7 +1809,7 @@ def doit(self, image, mask, basic_pipe, guide_size, guide_size_for, max_size, ma seed, steps, cfg, sampler_name, scheduler, denoise, feather, crop_factor, drop_size, refiner_ratio, batch_size, cycle=1, refiner_basic_pipe_opt=None, detailer_hook=None, inpaint_model=False, noise_mask_feather=0, - bbox_fill=False, contour_fill=True, scheduler_func_opt=None): + bbox_fill=False, contour_fill=True, scheduler_func_opt=None, return_by_cycle_step=False): if len(image) > 1: raise Exception('[Impact Pack] ERROR: MaskDetailer does not allow image batches.\nPlease refer to https://github.com/ltdrdata/ComfyUI-extension-tutorials/blob/Main/ComfyUI-Impact-Pack/tutorial/batching-detailer.md for more information.') @@ -1815,7 +1840,8 @@ def doit(self, image, mask, basic_pipe, guide_size, guide_size_for, max_size, ma force_inpaint=True, wildcard_opt=None, detailer_hook=detailer_hook, refiner_ratio=refiner_ratio, refiner_model=refiner_model, refiner_clip=refiner_clip, refiner_positive=refiner_positive, refiner_negative=refiner_negative, - cycle=cycle, inpaint_model=inpaint_model, noise_mask_feather=noise_mask_feather, scheduler_func_opt=scheduler_func_opt) + cycle=cycle, inpaint_model=inpaint_model, noise_mask_feather=noise_mask_feather, + scheduler_func_opt=scheduler_func_opt, return_by_cycle_step=return_by_cycle_step) else: enhanced_img, cropped_enhanced, cropped_enhanced_alpha = image, [], [] @@ -1848,7 +1874,8 @@ class DetailerForEachTest(DetailerForEach): def doit(self, image, segs, model, clip, vae, guide_size, guide_size_for, max_size, seed, steps, cfg, sampler_name, scheduler, positive, negative, denoise, feather, noise_mask, force_inpaint, wildcard, detailer_hook=None, - cycle=1, inpaint_model=False, noise_mask_feather=0, scheduler_func_opt=None, tiled_encode=False, tiled_decode=False): + cycle=1, inpaint_model=False, noise_mask_feather=0, scheduler_func_opt=None, tiled_encode=False, tiled_decode=False, + return_by_cycle_step=False): if len(image) > 1: raise Exception('[Impact Pack] ERROR: DetailerForEach does not allow image batches.\nPlease refer to https://github.com/ltdrdata/ComfyUI-extension-tutorials/blob/Main/ComfyUI-Impact-Pack/tutorial/batching-detailer.md for more information.') @@ -1858,7 +1885,8 @@ def doit(self, image, segs, model, clip, vae, guide_size, guide_size_for, max_si cfg, sampler_name, scheduler, positive, negative, denoise, feather, noise_mask, force_inpaint, wildcard, detailer_hook, cycle=cycle, inpaint_model=inpaint_model, noise_mask_feather=noise_mask_feather, - scheduler_func_opt=scheduler_func_opt, tiled_encode=tiled_encode, tiled_decode=tiled_decode) + scheduler_func_opt=scheduler_func_opt, tiled_encode=tiled_encode, tiled_decode=tiled_decode, + return_by_cycle_step=return_by_cycle_step) # set fallback image if len(cropped) == 0: @@ -1890,7 +1918,7 @@ class DetailerForEachTestPipe(DetailerForEachPipe): def doit(self, image, segs, guide_size, guide_size_for, max_size, seed, steps, cfg, sampler_name, scheduler, denoise, feather, noise_mask, force_inpaint, basic_pipe, wildcard, cycle=1, refiner_ratio=None, detailer_hook=None, refiner_basic_pipe_opt=None, inpaint_model=False, noise_mask_feather=0, - scheduler_func_opt=None, tiled_encode=False, tiled_decode=False): + scheduler_func_opt=None, tiled_encode=False, tiled_decode=False, return_by_cycle_step=False): if len(image) > 1: raise Exception('[Impact Pack] ERROR: DetailerForEach does not allow image batches.\nPlease refer to https://github.com/ltdrdata/ComfyUI-extension-tutorials/blob/Main/ComfyUI-Impact-Pack/tutorial/batching-detailer.md for more information.') @@ -1910,7 +1938,8 @@ def doit(self, image, segs, guide_size, guide_size_for, max_size, seed, steps, c refiner_clip=refiner_clip, refiner_positive=refiner_positive, refiner_negative=refiner_negative, cycle=cycle, inpaint_model=inpaint_model, noise_mask_feather=noise_mask_feather, - scheduler_func_opt=scheduler_func_opt, tiled_encode=tiled_encode, tiled_decode=tiled_decode) + scheduler_func_opt=scheduler_func_opt, tiled_encode=tiled_encode, tiled_decode=tiled_decode, + return_by_cycle_step=return_by_cycle_step) # set fallback image if len(cropped) == 0: