Skip to content

Commit 3f79467

Browse files
committed
Ruff format
1 parent 2c2ec8f commit 3f79467

File tree

1 file changed

+3
-5
lines changed

1 file changed

+3
-5
lines changed

invokeai/backend/stable_diffusion/diffusion/conditioning_data.py

Lines changed: 3 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -3,7 +3,7 @@
33
import math
44
from dataclasses import dataclass
55
from enum import Enum
6-
from typing import TYPE_CHECKING, List, Optional, Union
6+
from typing import TYPE_CHECKING, List, Optional, Tuple, Union
77

88
import torch
99

@@ -231,7 +231,7 @@ def _concat_conditionings_for_batch(
231231
conditionings: List[torch.Tensor],
232232
) -> Tuple[torch.Tensor, Optional[torch.Tensor]]:
233233
"""Concatenate provided conditioning tensors to one batched tensor.
234-
If tensors have different sizes then pad them by zeros and creates
234+
If tensors have different sizes then pad them by zeros and creates
235235
encoder_attention_mask to exclude padding from attention.
236236
237237
Args:
@@ -242,9 +242,7 @@ def _concat_conditionings_for_batch(
242242
if any(c.shape[1] != max_len for c in conditionings):
243243
encoder_attention_masks = [None] * len(conditionings)
244244
for i in range(len(conditionings)):
245-
conditionings[i], encoder_attention_masks[i] = cls._pad_conditioning(
246-
conditionings[i], max_len
247-
)
245+
conditionings[i], encoder_attention_masks[i] = cls._pad_conditioning(conditionings[i], max_len)
248246
encoder_attention_mask = torch.cat(encoder_attention_masks)
249247

250248
return torch.cat(conditionings), encoder_attention_mask

0 commit comments

Comments
 (0)