Skip to content

Commit f862bae

Browse files
committed
fix tests
1 parent b6e8fba commit f862bae

File tree

2 files changed

+5
-5
lines changed

2 files changed

+5
-5
lines changed

src/diffusers/models/attention_processor.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -786,8 +786,8 @@ def __init__(
786786
in_channels: int,
787787
out_channels: int,
788788
num_attention_heads: Optional[int] = None,
789-
mult: float = 1.0,
790789
attention_head_dim: int = 8,
790+
mult: float = 1.0,
791791
norm_type: str = "batch_norm",
792792
kernel_sizes: Tuple[int, ...] = (5,),
793793
eps: float = 1e-15,

src/diffusers/models/autoencoders/autoencoder_dc.py

Lines changed: 4 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -631,8 +631,8 @@ def tiled_decode(self, z: torch.Tensor, return_dict: bool = True) -> Union[Decod
631631
raise NotImplementedError("`tiled_decode` has not been implemented for AutoencoderDC.")
632632

633633
def forward(self, sample: torch.Tensor, return_dict: bool = True) -> torch.Tensor:
634-
z = self.encode(sample)
635-
dec = self.decode(z)
634+
z = self.encode(sample, return_dict=False)[0]
635+
decoded = self.decode(z, return_dict=False)[0]
636636
if not return_dict:
637-
return (dec,)
638-
return DecoderOutput(sample=dec)
637+
return (decoded,)
638+
return DecoderOutput(sample=decoded)

0 commit comments

Comments
 (0)