Skip to content

Commit 6ea4c47

Browse files
chore: ruff
1 parent 91f91aa commit 6ea4c47

File tree

4 files changed

+21
-5
lines changed

4 files changed

+21
-5
lines changed

invokeai/app/invocations/cogview4_image_to_latents.py

Lines changed: 3 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -75,7 +75,9 @@ def invoke(self, context: InvocationContext) -> LatentsOutput:
7575
assert isinstance(vae_info.model, AutoencoderKL)
7676

7777
estimated_working_memory = self._estimate_working_memory(image_tensor, vae_info.model)
78-
latents = self.vae_encode(vae_info=vae_info, image_tensor=image_tensor, estimated_working_memory=estimated_working_memory)
78+
latents = self.vae_encode(
79+
vae_info=vae_info, image_tensor=image_tensor, estimated_working_memory=estimated_working_memory
80+
)
7981

8082
latents = latents.to("cpu")
8183
name = context.tensors.save(tensor=latents)

invokeai/app/invocations/flux_vae_encode.py

Lines changed: 3 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -71,7 +71,9 @@ def invoke(self, context: InvocationContext) -> LatentsOutput:
7171

7272
context.util.signal_progress("Running VAE")
7373
estimated_working_memory = self._estimate_working_memory(image_tensor, vae_info.model)
74-
latents = self.vae_encode(vae_info=vae_info, image_tensor=image_tensor, estimated_working_memory=estimated_working_memory)
74+
latents = self.vae_encode(
75+
vae_info=vae_info, image_tensor=image_tensor, estimated_working_memory=estimated_working_memory
76+
)
7577

7678
latents = latents.to("cpu")
7779
name = context.tensors.save(tensor=latents)

invokeai/app/invocations/image_to_latents.py

Lines changed: 12 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -86,7 +86,12 @@ def _estimate_working_memory(
8686

8787
@staticmethod
8888
def vae_encode(
89-
vae_info: LoadedModel, upcast: bool, tiled: bool, image_tensor: torch.Tensor, tile_size: int = 0, estimated_working_memory: int = 0
89+
vae_info: LoadedModel,
90+
upcast: bool,
91+
tiled: bool,
92+
image_tensor: torch.Tensor,
93+
tile_size: int = 0,
94+
estimated_working_memory: int = 0,
9095
) -> torch.Tensor:
9196
with vae_info.model_on_device(working_mem_bytes=estimated_working_memory) as (_, vae):
9297
assert isinstance(vae, (AutoencoderKL, AutoencoderTiny))
@@ -156,7 +161,12 @@ def invoke(self, context: InvocationContext) -> LatentsOutput:
156161

157162
context.util.signal_progress("Running VAE encoder")
158163
latents = self.vae_encode(
159-
vae_info=vae_info, upcast=self.fp32, tiled=self.tiled, image_tensor=image_tensor, tile_size=self.tile_size, estimated_working_memory=estimated_working_memory
164+
vae_info=vae_info,
165+
upcast=self.fp32,
166+
tiled=self.tiled,
167+
image_tensor=image_tensor,
168+
tile_size=self.tile_size,
169+
estimated_working_memory=estimated_working_memory,
160170
)
161171

162172
latents = latents.to("cpu")

invokeai/app/invocations/sd3_image_to_latents.py

Lines changed: 3 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -71,7 +71,9 @@ def invoke(self, context: InvocationContext) -> LatentsOutput:
7171
assert isinstance(vae_info.model, AutoencoderKL)
7272

7373
estimated_working_memory = self._estimate_working_memory(image_tensor, vae_info.model)
74-
latents = self.vae_encode(vae_info=vae_info, image_tensor=image_tensor, estimated_working_memory=estimated_working_memory)
74+
latents = self.vae_encode(
75+
vae_info=vae_info, image_tensor=image_tensor, estimated_working_memory=estimated_working_memory
76+
)
7577

7678
latents = latents.to("cpu")
7779
name = context.tensors.save(tensor=latents)

0 commit comments

Comments
 (0)