2323 ComponentsManager ,
2424 LCMScheduler ,
2525 ModularPipeline ,
26- StableDiffusionXLPipeline ,
26+ StableDiffusionXLAutoBlocks ,
27+ StableDiffusionXLModularPipeline ,
2728)
2829from diffusers .utils .testing_utils import (
2930 enable_full_determinism ,
3839 TEXT_TO_IMAGE_IMAGE_PARAMS ,
3940 TEXT_TO_IMAGE_PARAMS ,
4041)
41- from ..test_pipelines_common import (
42- IPAdapterTesterMixin ,
43- PipelineLatentTesterMixin ,
44- PipelineTesterMixin ,
45- SDFunctionTesterMixin ,
42+ from ..test_modular_pipelines_common import (
43+ ModularIPAdapterTesterMixin ,
44+ ModularPipelineTesterMixin ,
4645)
4746
4847
4948enable_full_determinism ()
5049
5150
5251class StableDiffusionXLModularPipelineFastTests (
53- SDFunctionTesterMixin ,
54- IPAdapterTesterMixin ,
55- PipelineLatentTesterMixin ,
56- PipelineTesterMixin ,
52+ ModularIPAdapterTesterMixin ,
53+ ModularPipelineTesterMixin ,
5754 unittest .TestCase ,
5855):
59- pipeline_class = StableDiffusionXLPipeline
60- params = (TEXT_TO_IMAGE_PARAMS | IMAGE_INPAINTING_PARAMS ) - {"guidance_scale" }
56+ pipeline_class = StableDiffusionXLModularPipeline
57+ pipeline_blocks_class = StableDiffusionXLAutoBlocks
58+ repo = "hf-internal-testing/tiny-sdxl-modular"
59+ params = (TEXT_TO_IMAGE_PARAMS | IMAGE_INPAINTING_PARAMS ) - {
60+ "guidance_scale" ,
61+ "prompt_embeds" ,
62+ "negative_prompt_embeds" ,
63+ }
6164 batch_params = TEXT_TO_IMAGE_BATCH_PARAMS | IMAGE_INPAINTING_BATCH_PARAMS
6265 image_params = TEXT_TO_IMAGE_IMAGE_PARAMS
63- test_layerwise_casting = False
64- test_group_offloading = False
66+
67+ def get_pipeline (self , components_manager = None , torch_dtype = torch .float32 ):
68+ pipeline = self .pipeline_blocks_class ().init_pipeline (self .repo , components_manager = components_manager )
69+ pipeline .load_default_components (torch_dtype = torch_dtype )
70+ return pipeline
6571
6672 def get_dummy_inputs (self , device , seed = 0 ):
6773 if str (device ).startswith ("mps" ):
@@ -78,7 +84,7 @@ def get_dummy_inputs(self, device, seed=0):
7884
7985 def test_stable_diffusion_xl_euler (self ):
8086 device = "cpu" # ensure determinism for the device-dependent torch.Generator
81- sd_pipe = ModularPipeline . from_pretrained ( "hf-internal-testing/tiny-sd-pipe" )
87+ sd_pipe = self . get_pipeline ( )
8288 sd_pipe = sd_pipe .to (device )
8389 sd_pipe .set_progress_bar_config (disable = None )
8490
@@ -87,13 +93,17 @@ def test_stable_diffusion_xl_euler(self):
8793 image_slice = image [0 , - 3 :, - 3 :, - 1 ]
8894
8995 assert image .shape == (1 , 64 , 64 , 3 )
90- expected_slice = np .array ([0.5388 , 0.5452 , 0.4694 , 0.4583 , 0.5253 , 0.4832 , 0.5288 , 0.5035 , 0.47 ])
96+ expected_slice = np .array (
97+ [0.5966781 , 0.62939394 , 0.48465094 , 0.51573336 , 0.57593524 , 0.47035995 , 0.53410417 , 0.51436996 , 0.47313565 ]
98+ )
9199
92- assert np .abs (image_slice .flatten () - expected_slice ).max () < 1e-2
100+ assert np .abs (image_slice .flatten () - expected_slice ).max () < 1e-2 , (
101+ f"image_slice: { image_slice .flatten ()} , expected_slice: { expected_slice .flatten ()} "
102+ )
93103
94104 def test_stable_diffusion_xl_euler_lcm (self ):
95105 device = "cpu" # ensure determinism for the device-dependent torch.Generator
96- sd_pipe = ModularPipeline . from_pretrained ( "hf-internal-testing/tiny-sd-pipe" )
106+ sd_pipe = self . get_pipeline ( )
97107 sd_pipe .update_components (scheduler = LCMScheduler .from_config (sd_pipe .scheduler .config ))
98108 sd_pipe = sd_pipe .to (device )
99109 sd_pipe .set_progress_bar_config (disable = None )
@@ -103,41 +113,23 @@ def test_stable_diffusion_xl_euler_lcm(self):
103113 image_slice = image [0 , - 3 :, - 3 :, - 1 ]
104114
105115 assert image .shape == (1 , 64 , 64 , 3 )
106- expected_slice = np .array ([0.4917 , 0.6555 , 0.4348 , 0.5219 , 0.7324 , 0.4855 , 0.5168 , 0.5447 , 0.5156 ])
107-
108- assert np .abs (image_slice .flatten () - expected_slice ).max () < 1e-2
109-
110- def test_stable_diffusion_xl_euler_lcm_custom_timesteps (self ):
111- device = "cpu" # ensure determinism for the device-dependent torch.Generator
112- sd_pipe = ModularPipeline .from_pretrained ("hf-internal-testing/tiny-sd-pipe" )
113- sd_pipe .update_components (scheduler = LCMScheduler .from_config (sd_pipe .scheduler .config ))
114- sd_pipe = sd_pipe .to (device )
115- sd_pipe .set_progress_bar_config (disable = None )
116-
117- inputs = self .get_dummy_inputs (device )
118- del inputs ["num_inference_steps" ]
119- inputs ["timesteps" ] = [999 , 499 ]
120- image = sd_pipe (** inputs , output = "images" )
121- image_slice = image [0 , - 3 :, - 3 :, - 1 ]
122-
123- assert image .shape == (1 , 64 , 64 , 3 )
124- expected_slice = np .array ([0.4917 , 0.6555 , 0.4348 , 0.5219 , 0.7324 , 0.4855 , 0.5168 , 0.5447 , 0.5156 ])
116+ expected_slice = np .array (
117+ [0.6880376 , 0.6511651 , 0.587455 , 0.61763 , 0.55432945 , 0.52064973 , 0.5783733 , 0.54915607 , 0.5460011 ]
118+ )
125119
126- assert np .abs (image_slice .flatten () - expected_slice ).max () < 1e-2
120+ assert np .abs (image_slice .flatten () - expected_slice ).max () < 1e-2 , (
121+ f"image_slice: { image_slice .flatten ()} , expected_slice: { expected_slice .flatten ()} "
122+ )
127123
128124 @require_torch_accelerator
129125 def test_stable_diffusion_xl_offloads (self ):
130126 pipes = []
131- sd_pipe = ModularPipeline .from_pretrained (
132- "hf-internal-testing/tiny-sd-pipe" ,
133- ).to (torch_device )
127+ sd_pipe = self .get_pipeline ().to (torch_device )
134128 pipes .append (sd_pipe )
135129
136130 cm = ComponentsManager ()
137131 cm .enable_auto_cpu_offload (device = torch_device )
138- sd_pipe = ModularPipeline .from_pretrained ("hf-internal-testing/tiny-sd-pipe" , components_manager = cm ).to (
139- torch_device
140- )
132+ sd_pipe = self .get_pipeline (components_manager = cm )
141133 pipes .append (sd_pipe )
142134
143135 image_slices = []
@@ -148,21 +140,20 @@ def test_stable_diffusion_xl_offloads(self):
148140 image_slices .append (image [0 , - 3 :, - 3 :, - 1 ].flatten ())
149141
150142 assert np .abs (image_slices [0 ] - image_slices [1 ]).max () < 1e-3
151- assert np .abs (image_slices [0 ] - image_slices [2 ]).max () < 1e-3
152143
153144 def test_stable_diffusion_xl_multi_prompts (self ):
154- sd_pipe = ModularPipeline . from_pretrained ( "hf-internal-testing/tiny-sd-pipe" ).to (torch_device )
145+ sd_pipe = self . get_pipeline ( ).to (torch_device )
155146
156147 # forward with single prompt
157148 inputs = self .get_dummy_inputs (torch_device )
158149 output = sd_pipe (** inputs , output = "images" )
159- image_slice_1 = output . images [0 , - 3 :, - 3 :, - 1 ]
150+ image_slice_1 = output [0 , - 3 :, - 3 :, - 1 ]
160151
161152 # forward with same prompt duplicated
162153 inputs = self .get_dummy_inputs (torch_device )
163154 inputs ["prompt_2" ] = inputs ["prompt" ]
164155 output = sd_pipe (** inputs , output = "images" )
165- image_slice_2 = output . images [0 , - 3 :, - 3 :, - 1 ]
156+ image_slice_2 = output [0 , - 3 :, - 3 :, - 1 ]
166157
167158 # ensure the results are equal
168159 assert np .abs (image_slice_1 .flatten () - image_slice_2 .flatten ()).max () < 1e-4
@@ -171,7 +162,7 @@ def test_stable_diffusion_xl_multi_prompts(self):
171162 inputs = self .get_dummy_inputs (torch_device )
172163 inputs ["prompt_2" ] = "different prompt"
173164 output = sd_pipe (** inputs , output = "images" )
174- image_slice_3 = output . images [0 , - 3 :, - 3 :, - 1 ]
165+ image_slice_3 = output [0 , - 3 :, - 3 :, - 1 ]
175166
176167 # ensure the results are not equal
177168 assert np .abs (image_slice_1 .flatten () - image_slice_3 .flatten ()).max () > 1e-4
@@ -180,14 +171,14 @@ def test_stable_diffusion_xl_multi_prompts(self):
180171 inputs = self .get_dummy_inputs (torch_device )
181172 inputs ["negative_prompt" ] = "negative prompt"
182173 output = sd_pipe (** inputs , output = "images" )
183- image_slice_1 = output . images [0 , - 3 :, - 3 :, - 1 ]
174+ image_slice_1 = output [0 , - 3 :, - 3 :, - 1 ]
184175
185176 # forward with same negative_prompt duplicated
186177 inputs = self .get_dummy_inputs (torch_device )
187178 inputs ["negative_prompt" ] = "negative prompt"
188179 inputs ["negative_prompt_2" ] = inputs ["negative_prompt" ]
189180 output = sd_pipe (** inputs , output = "images" )
190- image_slice_2 = output . images [0 , - 3 :, - 3 :, - 1 ]
181+ image_slice_2 = output [0 , - 3 :, - 3 :, - 1 ]
191182
192183 # ensure the results are equal
193184 assert np .abs (image_slice_1 .flatten () - image_slice_2 .flatten ()).max () < 1e-4
@@ -197,15 +188,14 @@ def test_stable_diffusion_xl_multi_prompts(self):
197188 inputs ["negative_prompt" ] = "negative prompt"
198189 inputs ["negative_prompt_2" ] = "different negative prompt"
199190 output = sd_pipe (** inputs , output = "images" )
200- image_slice_3 = output . images [0 , - 3 :, - 3 :, - 1 ]
191+ image_slice_3 = output [0 , - 3 :, - 3 :, - 1 ]
201192
202193 # ensure the results are not equal
203194 assert np .abs (image_slice_1 .flatten () - image_slice_3 .flatten ()).max () > 1e-4
204195
205196 def test_stable_diffusion_xl_negative_conditions (self ):
206197 device = "cpu" # ensure determinism for the device-dependent torch.Generator
207- sd_pipe = ModularPipeline .from_pretrained ("hf-internal-testing/tiny-sd-pipe" ).to (torch_device )
208- sd_pipe = sd_pipe .to (device )
198+ sd_pipe = self .get_pipeline ().to (device )
209199 sd_pipe .set_progress_bar_config (disable = None )
210200
211201 inputs = self .get_dummy_inputs (device )
@@ -225,21 +215,24 @@ def test_stable_diffusion_xl_negative_conditions(self):
225215
226216 def test_stable_diffusion_xl_save_from_pretrained (self ):
227217 pipes = []
228- sd_pipe = ModularPipeline . from_pretrained ( "hf-internal-testing/tiny-sd-pipe" ).to (torch_device )
218+ sd_pipe = self . get_pipeline ( ).to (torch_device )
229219 pipes .append (sd_pipe )
230220
231221 with tempfile .TemporaryDirectory () as tmpdirname :
232222 sd_pipe .save_pretrained (tmpdirname )
233223 sd_pipe = ModularPipeline .from_pretrained (tmpdirname ).to (torch_device )
224+ sd_pipe .load_default_components (torch_dtype = torch .float32 )
225+ sd_pipe .to (torch_device )
234226 pipes .append (sd_pipe )
235227
236228 image_slices = []
237229 for pipe in pipes :
238- pipe .unet .set_default_attn_processor ()
239-
240230 inputs = self .get_dummy_inputs (torch_device )
241231 image = pipe (** inputs , output = "images" )
242232
243233 image_slices .append (image [0 , - 3 :, - 3 :, - 1 ].flatten ())
244234
245235 assert np .abs (image_slices [0 ] - image_slices [1 ]).max () < 1e-3
236+
237+ def test_inference_batch_single_identical (self ):
238+ super ().test_inference_batch_single_identical (expected_max_diff = 3e-3 )
0 commit comments