diff --git a/src/diffusers/pipelines/ltx/pipeline_ltx.py b/src/diffusers/pipelines/ltx/pipeline_ltx.py index 96d41bb3224b..d65c0b1f6a8b 100644 --- a/src/diffusers/pipelines/ltx/pipeline_ltx.py +++ b/src/diffusers/pipelines/ltx/pipeline_ltx.py @@ -186,16 +186,22 @@ def __init__( scheduler=scheduler, ) - self.vae_spatial_compression_ratio = self.vae.spatial_compression_ratio if hasattr(self, "vae") else 32 - self.vae_temporal_compression_ratio = self.vae.temporal_compression_ratio if hasattr(self, "vae") else 8 - self.transformer_spatial_patch_size = self.transformer.config.patch_size if hasattr(self, "transformer") else 1 + self.vae_spatial_compression_ratio = ( + self.vae.spatial_compression_ratio if getattr(self, "vae", None) is not None else 32 + ) + self.vae_temporal_compression_ratio = ( + self.vae.temporal_compression_ratio if getattr(self, "vae", None) is not None else 8 + ) + self.transformer_spatial_patch_size = ( + self.transformer.config.patch_size if getattr(self, "transformer", None) is not None else 1 + ) self.transformer_temporal_patch_size = ( - self.transformer.config.patch_size_t if hasattr(self, "transformer") else 1 + self.transformer.config.patch_size_t if getattr(self, "transformer") is not None else 1 ) self.video_processor = VideoProcessor(vae_scale_factor=self.vae_spatial_compression_ratio) self.tokenizer_max_length = ( - self.tokenizer.model_max_length if hasattr(self, "tokenizer") and self.tokenizer is not None else 128 + self.tokenizer.model_max_length if getattr(self, "tokenizer", None) is not None else 128 ) def _get_t5_prompt_embeds( diff --git a/src/diffusers/pipelines/ltx/pipeline_ltx_image2video.py b/src/diffusers/pipelines/ltx/pipeline_ltx_image2video.py index 71fd725c915b..f8b6d4873a7c 100644 --- a/src/diffusers/pipelines/ltx/pipeline_ltx_image2video.py +++ b/src/diffusers/pipelines/ltx/pipeline_ltx_image2video.py @@ -205,16 +205,22 @@ def __init__( scheduler=scheduler, ) - self.vae_spatial_compression_ratio = self.vae.spatial_compression_ratio if hasattr(self, "vae") else 32 - self.vae_temporal_compression_ratio = self.vae.temporal_compression_ratio if hasattr(self, "vae") else 8 - self.transformer_spatial_patch_size = self.transformer.config.patch_size if hasattr(self, "transformer") else 1 + self.vae_spatial_compression_ratio = ( + self.vae.spatial_compression_ratio if getattr(self, "vae", None) is not None else 32 + ) + self.vae_temporal_compression_ratio = ( + self.vae.temporal_compression_ratio if getattr(self, "vae", None) is not None else 8 + ) + self.transformer_spatial_patch_size = ( + self.transformer.config.patch_size if getattr(self, "transformer", None) is not None else 1 + ) self.transformer_temporal_patch_size = ( - self.transformer.config.patch_size_t if hasattr(self, "transformer") else 1 + self.transformer.config.patch_size_t if getattr(self, "transformer") is not None else 1 ) self.video_processor = VideoProcessor(vae_scale_factor=self.vae_spatial_compression_ratio) self.tokenizer_max_length = ( - self.tokenizer.model_max_length if hasattr(self, "tokenizer") and self.tokenizer is not None else 128 + self.tokenizer.model_max_length if getattr(self, "tokenizer", None) is not None else 128 ) self.default_height = 512