|
| 1 | +from __future__ import annotations |
| 2 | + |
| 3 | +from typing import TYPE_CHECKING |
| 4 | + |
| 5 | +import einops |
| 6 | +import torch |
| 7 | +from diffusers import UNet2DConditionModel |
| 8 | + |
| 9 | +from invokeai.backend.stable_diffusion.extension_callback_type import ExtensionCallbackType |
| 10 | +from invokeai.backend.stable_diffusion.extensions.base import ExtensionBase, callback |
| 11 | + |
| 12 | +if TYPE_CHECKING: |
| 13 | + from invokeai.backend.stable_diffusion.denoise_context import DenoiseContext |
| 14 | + |
| 15 | + |
| 16 | +class InpaintExt(ExtensionBase): |
| 17 | + def __init__( |
| 18 | + self, |
| 19 | + mask: torch.Tensor, |
| 20 | + is_gradient_mask: bool, |
| 21 | + ): |
| 22 | + super().__init__() |
| 23 | + self.mask = mask |
| 24 | + self.is_gradient_mask = is_gradient_mask |
| 25 | + |
| 26 | + @staticmethod |
| 27 | + def _is_normal_model(unet: UNet2DConditionModel): |
| 28 | + return unet.conv_in.in_channels == 4 |
| 29 | + |
| 30 | + def _apply_mask(self, ctx: DenoiseContext, latents: torch.Tensor, t: torch.Tensor) -> torch.Tensor: |
| 31 | + batch_size = latents.size(0) |
| 32 | + mask = einops.repeat(self.mask, "b c h w -> (repeat b) c h w", repeat=batch_size) |
| 33 | + if t.dim() == 0: |
| 34 | + # some schedulers expect t to be one-dimensional. |
| 35 | + # TODO: file diffusers bug about inconsistency? |
| 36 | + t = einops.repeat(t, "-> batch", batch=batch_size) |
| 37 | + # Noise shouldn't be re-randomized between steps here. The multistep schedulers |
| 38 | + # get very confused about what is happening from step to step when we do that. |
| 39 | + mask_latents = ctx.scheduler.add_noise(ctx.inputs.orig_latents, self.noise, t) |
| 40 | + # TODO: Do we need to also apply scheduler.scale_model_input? Or is add_noise appropriately scaled already? |
| 41 | + # mask_latents = self.scheduler.scale_model_input(mask_latents, t) |
| 42 | + mask_latents = einops.repeat(mask_latents, "b c h w -> (repeat b) c h w", repeat=batch_size) |
| 43 | + if self.is_gradient_mask: |
| 44 | + threshhold = (t.item()) / ctx.scheduler.config.num_train_timesteps |
| 45 | + mask_bool = mask > threshhold # I don't know when mask got inverted, but it did |
| 46 | + masked_input = torch.where(mask_bool, latents, mask_latents) |
| 47 | + else: |
| 48 | + masked_input = torch.lerp(mask_latents.to(dtype=latents.dtype), latents, mask.to(dtype=latents.dtype)) |
| 49 | + return masked_input |
| 50 | + |
| 51 | + @callback(ExtensionCallbackType.PRE_DENOISE_LOOP) |
| 52 | + def init_tensors(self, ctx: DenoiseContext): |
| 53 | + if not self._is_normal_model(ctx.unet): |
| 54 | + raise Exception("InpaintExt should be used only on normal models!") |
| 55 | + |
| 56 | + self.mask = self.mask.to(device=ctx.latents.device, dtype=ctx.latents.dtype) |
| 57 | + |
| 58 | + self.noise = ctx.inputs.noise |
| 59 | + if self.noise is None: |
| 60 | + self.noise = torch.randn( |
| 61 | + ctx.latents.shape, |
| 62 | + dtype=torch.float32, |
| 63 | + device="cpu", |
| 64 | + generator=torch.Generator(device="cpu").manual_seed(ctx.seed), |
| 65 | + ).to(device=ctx.latents.device, dtype=ctx.latents.dtype) |
| 66 | + |
| 67 | + # TODO: order value |
| 68 | + @callback(ExtensionCallbackType.PRE_STEP, order=-100) |
| 69 | + def apply_mask_to_initial_latents(self, ctx: DenoiseContext): |
| 70 | + ctx.latents = self._apply_mask(ctx, ctx.latents, ctx.timestep) |
| 71 | + |
| 72 | + # TODO: order value |
| 73 | + # TODO: redo this with preview events rewrite |
| 74 | + @callback(ExtensionCallbackType.POST_STEP, order=-100) |
| 75 | + def apply_mask_to_step_output(self, ctx: DenoiseContext): |
| 76 | + timestep = ctx.scheduler.timesteps[-1] |
| 77 | + if hasattr(ctx.step_output, "denoised"): |
| 78 | + ctx.step_output.denoised = self._apply_mask(ctx, ctx.step_output.denoised, timestep) |
| 79 | + elif hasattr(ctx.step_output, "pred_original_sample"): |
| 80 | + ctx.step_output.pred_original_sample = self._apply_mask(ctx, ctx.step_output.pred_original_sample, timestep) |
| 81 | + else: |
| 82 | + ctx.step_output.pred_original_sample = self._apply_mask(ctx, ctx.step_output.prev_sample, timestep) |
| 83 | + |
| 84 | + # TODO: should here be used order? |
| 85 | + # restore unmasked part after the last step is completed |
| 86 | + @callback(ExtensionCallbackType.POST_DENOISE_LOOP) |
| 87 | + def restore_unmasked(self, ctx: DenoiseContext): |
| 88 | + if self.is_gradient_mask: |
| 89 | + ctx.latents = torch.where(self.mask > 0, ctx.latents, ctx.inputs.orig_latents) |
| 90 | + else: |
| 91 | + ctx.latents = torch.lerp(ctx.inputs.orig_latents, ctx.latents, self.mask) |
0 commit comments