Skip to content

Commit 3f349d3

Browse files
committed
Fixed linter issues
1 parent 5307368 commit 3f349d3

File tree

3 files changed

+16
-7
lines changed

3 files changed

+16
-7
lines changed

examples/models/stable_diffusion/model.py

Lines changed: 5 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -173,7 +173,11 @@ def get_dummy_inputs(self):
173173

174174
unet_inputs = (
175175
torch.randn(
176-
batch_size, latent_channels, latent_height, latent_width, dtype=self.dtype
176+
batch_size,
177+
latent_channels,
178+
latent_height,
179+
latent_width,
180+
dtype=self.dtype,
177181
),
178182
torch.tensor([981]), # Random timestep
179183
torch.randn(batch_size, text_seq_len, text_embed_dim, dtype=self.dtype),

examples/openvino/stable_diffusion/export_lcm.py

Lines changed: 7 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -9,15 +9,16 @@
99
import argparse
1010
import logging
1111
import os
12-
import sys
1312

1413
import torch
15-
from torch.export import export
1614

1715
from executorch.backends.openvino.partitioner import OpenvinoPartitioner
16+
from executorch.examples.models.stable_diffusion.model import ( # type: ignore[import-untyped]
17+
LCMModelLoader,
18+
)
1819
from executorch.exir import ExecutorchBackendConfig, to_edge_transform_and_lower
1920
from executorch.exir.backend.backend_details import CompileSpec
20-
from executorch.examples.models.stable_diffusion.model import LCMModelLoader
21+
from torch.export import export
2122

2223
# Configure logging
2324
logging.basicConfig(level=logging.INFO)
@@ -48,7 +49,9 @@ def export_text_encoder(self, output_path: str, device: str = "CPU") -> bool:
4849
dummy_inputs = self.model_loader.get_dummy_inputs()
4950

5051
# Export to ATEN graph
51-
exported_program = export(text_encoder_wrapper, dummy_inputs["text_encoder"])
52+
exported_program = export(
53+
text_encoder_wrapper, dummy_inputs["text_encoder"]
54+
)
5255

5356
# Configure OpenVINO compilation
5457
compile_spec = [CompileSpec("device", device.encode())]

examples/openvino/stable_diffusion/openvino_lcm.py

Lines changed: 4 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -111,7 +111,9 @@ def encode_prompt(self, prompt: str):
111111
exec_time = time.time() - exec_start
112112
self.exec_time += exec_time
113113

114-
logger.info(f"Text encoder - Load: {load_time:.3f}s, Execute: {exec_time:.3f}s")
114+
logger.info(
115+
f"Text encoder - Load: {load_time:.3f}s, Execute: {exec_time:.3f}s"
116+
)
115117
return embeddings
116118
except Exception as e:
117119
logger.error(f"Failed to encode prompt: {e}")
@@ -262,7 +264,7 @@ def generate_image(
262264
total_time = time.time() - total_start
263265

264266
logger.info("=" * 60)
265-
logger.info(f"✓ Generation completed!")
267+
logger.info("✓ Generation completed!")
266268
logger.info(f" Total time: {total_time:.3f}s")
267269
logger.info(f" Total load time: {self.models_load_time:.3f}s")
268270
logger.info(f" Total Inference time: {self.exec_time:.3f}s")

0 commit comments

Comments
 (0)