Skip to content

Commit 0d26c14

Browse files
committed
feedback
1 parent b28f3eb commit 0d26c14

File tree

2 files changed

+0
-93
lines changed

2 files changed

+0
-93
lines changed

src/diffusers/__init__.py

Lines changed: 0 additions & 17 deletions
Original file line numberDiff line numberDiff line change
@@ -160,15 +160,6 @@
160160
"apply_pyramid_attention_broadcast",
161161
]
162162
)
163-
_import_structure["image_processor"] = [
164-
"IPAdapterMaskProcessor",
165-
"PixArtImageProcessor",
166-
"VaeImageProcessor",
167-
"VaeImageProcessorLDM3D",
168-
]
169-
_import_structure["video_processor"] = [
170-
"VideoProcessor",
171-
]
172163
_import_structure["models"].extend(
173164
[
174165
"AllegroTransformer3DModel",
@@ -840,12 +831,6 @@
840831
apply_layer_skip,
841832
apply_pyramid_attention_broadcast,
842833
)
843-
from .image_processor import (
844-
IPAdapterMaskProcessor,
845-
PixArtImageProcessor,
846-
VaeImageProcessor,
847-
VaeImageProcessorLDM3D,
848-
)
849834
from .models import (
850835
AllegroTransformer3DModel,
851836
AsymmetricAutoencoderKL,
@@ -1005,8 +990,6 @@
1005990
VQDiffusionScheduler,
1006991
)
1007992
from .training_utils import EMAModel
1008-
from .video_processor import VideoProcessor
1009-
1010993
try:
1011994
if not (is_torch_available() and is_scipy_available()):
1012995
raise OptionalDependencyNotAvailable()

src/diffusers/utils/dummy_pt_objects.py

Lines changed: 0 additions & 76 deletions
Original file line numberDiff line numberDiff line change
@@ -242,67 +242,6 @@ def apply_layer_skip(*args, **kwargs):
242242
def apply_pyramid_attention_broadcast(*args, **kwargs):
243243
requires_backends(apply_pyramid_attention_broadcast, ["torch"])
244244

245-
246-
class IPAdapterMaskProcessor(metaclass=DummyObject):
247-
_backends = ["torch"]
248-
249-
def __init__(self, *args, **kwargs):
250-
requires_backends(self, ["torch"])
251-
252-
@classmethod
253-
def from_config(cls, *args, **kwargs):
254-
requires_backends(cls, ["torch"])
255-
256-
@classmethod
257-
def from_pretrained(cls, *args, **kwargs):
258-
requires_backends(cls, ["torch"])
259-
260-
261-
class PixArtImageProcessor(metaclass=DummyObject):
262-
_backends = ["torch"]
263-
264-
def __init__(self, *args, **kwargs):
265-
requires_backends(self, ["torch"])
266-
267-
@classmethod
268-
def from_config(cls, *args, **kwargs):
269-
requires_backends(cls, ["torch"])
270-
271-
@classmethod
272-
def from_pretrained(cls, *args, **kwargs):
273-
requires_backends(cls, ["torch"])
274-
275-
276-
class VaeImageProcessor(metaclass=DummyObject):
277-
_backends = ["torch"]
278-
279-
def __init__(self, *args, **kwargs):
280-
requires_backends(self, ["torch"])
281-
282-
@classmethod
283-
def from_config(cls, *args, **kwargs):
284-
requires_backends(cls, ["torch"])
285-
286-
@classmethod
287-
def from_pretrained(cls, *args, **kwargs):
288-
requires_backends(cls, ["torch"])
289-
290-
291-
class VaeImageProcessorLDM3D(metaclass=DummyObject):
292-
_backends = ["torch"]
293-
294-
def __init__(self, *args, **kwargs):
295-
requires_backends(self, ["torch"])
296-
297-
@classmethod
298-
def from_config(cls, *args, **kwargs):
299-
requires_backends(cls, ["torch"])
300-
301-
@classmethod
302-
def from_pretrained(cls, *args, **kwargs):
303-
requires_backends(cls, ["torch"])
304-
305-
306245
class AllegroTransformer3DModel(metaclass=DummyObject):
307246
_backends = ["torch"]
308247

@@ -2523,18 +2462,3 @@ def from_config(cls, *args, **kwargs):
25232462
@classmethod
25242463
def from_pretrained(cls, *args, **kwargs):
25252464
requires_backends(cls, ["torch"])
2526-
2527-
2528-
class VideoProcessor(metaclass=DummyObject):
2529-
_backends = ["torch"]
2530-
2531-
def __init__(self, *args, **kwargs):
2532-
requires_backends(self, ["torch"])
2533-
2534-
@classmethod
2535-
def from_config(cls, *args, **kwargs):
2536-
requires_backends(cls, ["torch"])
2537-
2538-
@classmethod
2539-
def from_pretrained(cls, *args, **kwargs):
2540-
requires_backends(cls, ["torch"])

0 commit comments

Comments
 (0)