Skip to content

Commit 95c8148

Browse files
committed
make fix-copies
1 parent 2436b3f commit 95c8148

File tree

2 files changed

+19
-23
lines changed

2 files changed

+19
-23
lines changed

src/diffusers/utils/dummy_pt_objects.py

Lines changed: 19 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -2,6 +2,25 @@
22
from ..utils import DummyObject, requires_backends
33

44

5+
class PyramidAttentionBroadcastConfig(metaclass=DummyObject):
6+
_backends = ["torch"]
7+
8+
def __init__(self, *args, **kwargs):
9+
requires_backends(self, ["torch"])
10+
11+
@classmethod
12+
def from_config(cls, *args, **kwargs):
13+
requires_backends(cls, ["torch"])
14+
15+
@classmethod
16+
def from_pretrained(cls, *args, **kwargs):
17+
requires_backends(cls, ["torch"])
18+
19+
20+
def apply_pyramid_attention_broadcast(*args, **kwargs):
21+
requires_backends(apply_pyramid_attention_broadcast, ["torch"])
22+
23+
524
class AllegroTransformer3DModel(metaclass=DummyObject):
625
_backends = ["torch"]
726

src/diffusers/utils/dummy_torch_and_transformers_objects.py

Lines changed: 0 additions & 23 deletions
Original file line numberDiff line numberDiff line change
@@ -1277,21 +1277,6 @@ def from_pretrained(cls, *args, **kwargs):
12771277
requires_backends(cls, ["torch", "transformers"])
12781278

12791279

1280-
class PyramidAttentionBroadcastConfig(metaclass=DummyObject):
1281-
_backends = ["torch", "transformers"]
1282-
1283-
def __init__(self, *args, **kwargs):
1284-
requires_backends(self, ["torch", "transformers"])
1285-
1286-
@classmethod
1287-
def from_config(cls, *args, **kwargs):
1288-
requires_backends(cls, ["torch", "transformers"])
1289-
1290-
@classmethod
1291-
def from_pretrained(cls, *args, **kwargs):
1292-
requires_backends(cls, ["torch", "transformers"])
1293-
1294-
12951280
class ReduxImageEncoder(metaclass=DummyObject):
12961281
_backends = ["torch", "transformers"]
12971282

@@ -2550,11 +2535,3 @@ def from_config(cls, *args, **kwargs):
25502535
@classmethod
25512536
def from_pretrained(cls, *args, **kwargs):
25522537
requires_backends(cls, ["torch", "transformers"])
2553-
2554-
2555-
def apply_pyramid_attention_broadcast(*args, **kwargs):
2556-
requires_backends(apply_pyramid_attention_broadcast, ["torch", "transformers"])
2557-
2558-
2559-
def apply_pyramid_attention_broadcast_on_module(*args, **kwargs):
2560-
requires_backends(apply_pyramid_attention_broadcast_on_module, ["torch", "transformers"])

0 commit comments

Comments
 (0)