Skip to content

Commit c9081a8

Browse files
authored
[Fix bugs] pipeline_controlnet_sd_xl.py (#6653)
* Update pipeline_controlnet_sd_xl.py * Update pipeline_controlnet_xs_sd_xl.py
1 parent 0eb68d9 commit c9081a8

File tree

2 files changed

+0
-10
lines changed

2 files changed

+0
-10
lines changed

examples/research_projects/controlnetxs/pipeline_controlnet_xs_sd_xl.py

Lines changed: 0 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -1041,11 +1041,6 @@ def __call__(
10411041
step_idx = i // getattr(self.scheduler, "order", 1)
10421042
callback(step_idx, t, latents)
10431043

1044-
# manually for max memory savings
1045-
if self.vae.dtype == torch.float16 and self.vae.config.force_upcast:
1046-
self.upcast_vae()
1047-
latents = latents.to(next(iter(self.vae.post_quant_conv.parameters())).dtype)
1048-
10491044
if not output_type == "latent":
10501045
# make sure the VAE is in float32 mode, as it overflows in float16
10511046
needs_upcasting = self.vae.dtype == torch.float16 and self.vae.config.force_upcast

src/diffusers/pipelines/controlnet/pipeline_controlnet_sd_xl.py

Lines changed: 0 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -1404,11 +1404,6 @@ def __call__(
14041404
step_idx = i // getattr(self.scheduler, "order", 1)
14051405
callback(step_idx, t, latents)
14061406

1407-
# manually for max memory savings
1408-
if self.vae.dtype == torch.float16 and self.vae.config.force_upcast:
1409-
self.upcast_vae()
1410-
latents = latents.to(next(iter(self.vae.post_quant_conv.parameters())).dtype)
1411-
14121407
if not output_type == "latent":
14131408
# make sure the VAE is in float32 mode, as it overflows in float16
14141409
needs_upcasting = self.vae.dtype == torch.float16 and self.vae.config.force_upcast

0 commit comments

Comments
 (0)