Skip to content

Commit 3107fe0

Browse files
committed
fix many type hint errors
1 parent 68d7db3 commit 3107fe0

9 files changed

+10
-10
lines changed

src/diffusers/models/transformers/auraflow_transformer_2d.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -472,7 +472,7 @@ def forward(
472472
timestep: torch.LongTensor = None,
473473
attention_kwargs: Optional[Dict[str, Any]] = None,
474474
return_dict: bool = True,
475-
) -> Union[torch.Tensor, Transformer2DModelOutput]:
475+
) -> Union[Tuple[torch.Tensor], Transformer2DModelOutput]:
476476
if attention_kwargs is not None:
477477
attention_kwargs = attention_kwargs.copy()
478478
lora_scale = attention_kwargs.pop("scale", 1.0)

src/diffusers/models/transformers/cogvideox_transformer_3d.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -441,7 +441,7 @@ def forward(
441441
image_rotary_emb: Optional[Tuple[torch.Tensor, torch.Tensor]] = None,
442442
attention_kwargs: Optional[Dict[str, Any]] = None,
443443
return_dict: bool = True,
444-
) -> Union[torch.Tensor, Transformer2DModelOutput]:
444+
) -> Union[Tuple[torch.Tensor], Transformer2DModelOutput]:
445445
if attention_kwargs is not None:
446446
attention_kwargs = attention_kwargs.copy()
447447
lora_scale = attention_kwargs.pop("scale", 1.0)

src/diffusers/models/transformers/consisid_transformer_3d.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -691,7 +691,7 @@ def forward(
691691
id_cond: Optional[torch.Tensor] = None,
692692
id_vit_hidden: Optional[torch.Tensor] = None,
693693
return_dict: bool = True,
694-
) -> Union[torch.Tensor, Transformer2DModelOutput]:
694+
) -> Union[Tuple[torch.Tensor], Transformer2DModelOutput]:
695695
if attention_kwargs is not None:
696696
attention_kwargs = attention_kwargs.copy()
697697
lora_scale = attention_kwargs.pop("scale", 1.0)

src/diffusers/models/transformers/lumina_nextdit2d.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -12,7 +12,7 @@
1212
# See the License for the specific language governing permissions and
1313
# limitations under the License.
1414

15-
from typing import Any, Dict, Optional, Union
15+
from typing import Any, Dict, Optional, Union, Tuple
1616

1717
import torch
1818
import torch.nn as nn
@@ -297,7 +297,7 @@ def forward(
297297
image_rotary_emb: torch.Tensor,
298298
cross_attention_kwargs: Dict[str, Any] = None,
299299
return_dict=True,
300-
) -> Union[torch.Tensor, Transformer2DModelOutput]:
300+
) -> Union[Tuple[torch.Tensor], Transformer2DModelOutput]:
301301
"""
302302
Forward pass of LuminaNextDiT.
303303

src/diffusers/models/transformers/transformer_bria.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -588,7 +588,7 @@ def forward(
588588
return_dict: bool = True,
589589
controlnet_block_samples=None,
590590
controlnet_single_block_samples=None,
591-
) -> Union[torch.FloatTensor, Transformer2DModelOutput]:
591+
) -> Union[Tuple[torch.Tensor], Transformer2DModelOutput]:
592592
"""
593593
The [`BriaTransformer2DModel`] forward method.
594594

src/diffusers/models/transformers/transformer_cogview3plus.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -293,7 +293,7 @@ def forward(
293293
target_size: torch.Tensor,
294294
crop_coords: torch.Tensor,
295295
return_dict: bool = True,
296-
) -> Union[torch.Tensor, Transformer2DModelOutput]:
296+
) -> Union[Tuple[torch.Tensor], Transformer2DModelOutput]:
297297
"""
298298
The [`CogView3PlusTransformer2DModel`] forward method.
299299

src/diffusers/models/transformers/transformer_cogview4.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -717,7 +717,7 @@ def forward(
717717
image_rotary_emb: Optional[
718718
Union[Tuple[torch.Tensor, torch.Tensor], List[Tuple[torch.Tensor, torch.Tensor]]]
719719
] = None,
720-
) -> Union[torch.Tensor, Transformer2DModelOutput]:
720+
) -> Union[Tuple[torch.Tensor], Transformer2DModelOutput]:
721721
if attention_kwargs is not None:
722722
attention_kwargs = attention_kwargs.copy()
723723
lora_scale = attention_kwargs.pop("scale", 1.0)

src/diffusers/models/transformers/transformer_hidream_image.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -786,7 +786,7 @@ def forward(
786786
attention_kwargs: Optional[Dict[str, Any]] = None,
787787
return_dict: bool = True,
788788
**kwargs,
789-
) -> Union[torch.Tensor, Transformer2DModelOutput]:
789+
) -> Union[Tuple[torch.Tensor], Transformer2DModelOutput]:
790790
encoder_hidden_states = kwargs.get("encoder_hidden_states", None)
791791

792792
if encoder_hidden_states is not None:

src/diffusers/models/transformers/transformer_hunyuan_video_framepack.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -216,7 +216,7 @@ def forward(
216216
indices_latents_history_4x: Optional[torch.Tensor] = None,
217217
attention_kwargs: Optional[Dict[str, Any]] = None,
218218
return_dict: bool = True,
219-
) -> Union[torch.Tensor, Transformer2DModelOutput]:
219+
) -> Union[Tuple[torch.Tensor], Transformer2DModelOutput]:
220220
if attention_kwargs is not None:
221221
attention_kwargs = attention_kwargs.copy()
222222
lora_scale = attention_kwargs.pop("scale", 1.0)

0 commit comments

Comments
 (0)