|
36 | 36 | "configuration_utils": ["ConfigMixin"], |
37 | 37 | "guiders": [], |
38 | 38 | "hooks": [], |
39 | | - "image_processor": [ |
40 | | - "VaeImageProcessor", |
41 | | - "VaeImageProcessorLDM3D", |
42 | | - "PixArtImageProcessor", |
43 | | - "IPAdapterMaskProcessor", |
44 | | - ], |
45 | 39 | "video_processor": [ |
46 | 40 | "VideoProcessor", |
47 | 41 | ], |
|
169 | 163 | "apply_pyramid_attention_broadcast", |
170 | 164 | ] |
171 | 165 | ) |
| 166 | + _import_structure["image_processor"] = [ |
| 167 | + "IPAdapterMaskProcessor", |
| 168 | + "PixArtImageProcessor", |
| 169 | + "VaeImageProcessor", |
| 170 | + "VaeImageProcessorLDM3D", |
| 171 | + ] |
172 | 172 | _import_structure["models"].extend( |
173 | 173 | [ |
174 | 174 | "AllegroTransformer3DModel", |
|
840 | 840 | apply_layer_skip, |
841 | 841 | apply_pyramid_attention_broadcast, |
842 | 842 | ) |
| 843 | + from .image_processor import ( |
| 844 | + IPAdapterMaskProcessor, |
| 845 | + PixArtImageProcessor, |
| 846 | + VaeImageProcessor, |
| 847 | + VaeImageProcessorLDM3D, |
| 848 | + ) |
843 | 849 | from .models import ( |
844 | 850 | AllegroTransformer3DModel, |
845 | 851 | AsymmetricAutoencoderKL, |
|
958 | 964 | ScoreSdeVePipeline, |
959 | 965 | StableDiffusionMixin, |
960 | 966 | ) |
961 | | - from .image_processor import ( |
962 | | - VaeImageProcessor, |
963 | | - VaeImageProcessorLDM3D, |
964 | | - PixArtImageProcessor, |
965 | | - IPAdapterMaskProcessor, |
966 | | - ) |
967 | | - from .video_processor import VideoProcessor |
968 | 967 | from .quantizers import DiffusersQuantizer |
969 | 968 | from .schedulers import ( |
970 | 969 | AmusedScheduler, |
|
1006 | 1005 | VQDiffusionScheduler, |
1007 | 1006 | ) |
1008 | 1007 | from .training_utils import EMAModel |
| 1008 | + from .video_processor import VideoProcessor |
1009 | 1009 |
|
1010 | 1010 | try: |
1011 | 1011 | if not (is_torch_available() and is_scipy_available()): |
|
0 commit comments