Skip to content

Commit 2cd908e

Browse files
committed
feedback
1 parent 0dc0888 commit 2cd908e

File tree

2 files changed

+0
-93
lines changed

2 files changed

+0
-93
lines changed

src/diffusers/__init__.py

Lines changed: 0 additions & 17 deletions
Original file line numberDiff line numberDiff line change
@@ -160,15 +160,6 @@
160160
"apply_pyramid_attention_broadcast",
161161
]
162162
)
163-
_import_structure["image_processor"] = [
164-
"IPAdapterMaskProcessor",
165-
"PixArtImageProcessor",
166-
"VaeImageProcessor",
167-
"VaeImageProcessorLDM3D",
168-
]
169-
_import_structure["video_processor"] = [
170-
"VideoProcessor",
171-
]
172163
_import_structure["models"].extend(
173164
[
174165
"AllegroTransformer3DModel",
@@ -834,12 +825,6 @@
834825
apply_layer_skip,
835826
apply_pyramid_attention_broadcast,
836827
)
837-
from .image_processor import (
838-
IPAdapterMaskProcessor,
839-
PixArtImageProcessor,
840-
VaeImageProcessor,
841-
VaeImageProcessorLDM3D,
842-
)
843828
from .models import (
844829
AllegroTransformer3DModel,
845830
AsymmetricAutoencoderKL,
@@ -996,8 +981,6 @@
996981
VQDiffusionScheduler,
997982
)
998983
from .training_utils import EMAModel
999-
from .video_processor import VideoProcessor
1000-
1001984
try:
1002985
if not (is_torch_available() and is_scipy_available()):
1003986
raise OptionalDependencyNotAvailable()

src/diffusers/utils/dummy_pt_objects.py

Lines changed: 0 additions & 76 deletions
Original file line numberDiff line numberDiff line change
@@ -242,67 +242,6 @@ def apply_layer_skip(*args, **kwargs):
242242
def apply_pyramid_attention_broadcast(*args, **kwargs):
243243
requires_backends(apply_pyramid_attention_broadcast, ["torch"])
244244

245-
246-
class IPAdapterMaskProcessor(metaclass=DummyObject):
247-
_backends = ["torch"]
248-
249-
def __init__(self, *args, **kwargs):
250-
requires_backends(self, ["torch"])
251-
252-
@classmethod
253-
def from_config(cls, *args, **kwargs):
254-
requires_backends(cls, ["torch"])
255-
256-
@classmethod
257-
def from_pretrained(cls, *args, **kwargs):
258-
requires_backends(cls, ["torch"])
259-
260-
261-
class PixArtImageProcessor(metaclass=DummyObject):
262-
_backends = ["torch"]
263-
264-
def __init__(self, *args, **kwargs):
265-
requires_backends(self, ["torch"])
266-
267-
@classmethod
268-
def from_config(cls, *args, **kwargs):
269-
requires_backends(cls, ["torch"])
270-
271-
@classmethod
272-
def from_pretrained(cls, *args, **kwargs):
273-
requires_backends(cls, ["torch"])
274-
275-
276-
class VaeImageProcessor(metaclass=DummyObject):
277-
_backends = ["torch"]
278-
279-
def __init__(self, *args, **kwargs):
280-
requires_backends(self, ["torch"])
281-
282-
@classmethod
283-
def from_config(cls, *args, **kwargs):
284-
requires_backends(cls, ["torch"])
285-
286-
@classmethod
287-
def from_pretrained(cls, *args, **kwargs):
288-
requires_backends(cls, ["torch"])
289-
290-
291-
class VaeImageProcessorLDM3D(metaclass=DummyObject):
292-
_backends = ["torch"]
293-
294-
def __init__(self, *args, **kwargs):
295-
requires_backends(self, ["torch"])
296-
297-
@classmethod
298-
def from_config(cls, *args, **kwargs):
299-
requires_backends(cls, ["torch"])
300-
301-
@classmethod
302-
def from_pretrained(cls, *args, **kwargs):
303-
requires_backends(cls, ["torch"])
304-
305-
306245
class AllegroTransformer3DModel(metaclass=DummyObject):
307246
_backends = ["torch"]
308247

@@ -2478,18 +2417,3 @@ def from_config(cls, *args, **kwargs):
24782417
@classmethod
24792418
def from_pretrained(cls, *args, **kwargs):
24802419
requires_backends(cls, ["torch"])
2481-
2482-
2483-
class VideoProcessor(metaclass=DummyObject):
2484-
_backends = ["torch"]
2485-
2486-
def __init__(self, *args, **kwargs):
2487-
requires_backends(self, ["torch"])
2488-
2489-
@classmethod
2490-
def from_config(cls, *args, **kwargs):
2491-
requires_backends(cls, ["torch"])
2492-
2493-
@classmethod
2494-
def from_pretrained(cls, *args, **kwargs):
2495-
requires_backends(cls, ["torch"])

0 commit comments

Comments
 (0)