23
23
from diffusers import (
24
24
AutoencoderKL ,
25
25
DDIMScheduler ,
26
+ DEISMultistepScheduler ,
27
+ DPMSolverMultistepScheduler ,
28
+ EulerDiscreteScheduler ,
26
29
StableDiffusionSAGPipeline ,
27
30
UNet2DConditionModel ,
28
31
)
@@ -45,14 +48,15 @@ class StableDiffusionSAGPipelineFastTests(PipelineLatentTesterMixin, PipelineTes
45
48
def get_dummy_components (self ):
46
49
torch .manual_seed (0 )
47
50
unet = UNet2DConditionModel (
48
- block_out_channels = (32 , 64 ),
51
+ block_out_channels = (4 , 8 ),
49
52
layers_per_block = 2 ,
50
- sample_size = 32 ,
53
+ sample_size = 8 ,
54
+ norm_num_groups = 1 ,
51
55
in_channels = 4 ,
52
56
out_channels = 4 ,
53
57
down_block_types = ("DownBlock2D" , "CrossAttnDownBlock2D" ),
54
58
up_block_types = ("CrossAttnUpBlock2D" , "UpBlock2D" ),
55
- cross_attention_dim = 32 ,
59
+ cross_attention_dim = 8 ,
56
60
)
57
61
scheduler = DDIMScheduler (
58
62
beta_start = 0.00085 ,
@@ -63,7 +67,8 @@ def get_dummy_components(self):
63
67
)
64
68
torch .manual_seed (0 )
65
69
vae = AutoencoderKL (
66
- block_out_channels = [32 , 64 ],
70
+ block_out_channels = [4 , 8 ],
71
+ norm_num_groups = 1 ,
67
72
in_channels = 3 ,
68
73
out_channels = 3 ,
69
74
down_block_types = ["DownEncoderBlock2D" , "DownEncoderBlock2D" ],
@@ -74,11 +79,11 @@ def get_dummy_components(self):
74
79
text_encoder_config = CLIPTextConfig (
75
80
bos_token_id = 0 ,
76
81
eos_token_id = 2 ,
77
- hidden_size = 32 ,
82
+ hidden_size = 8 ,
83
+ num_hidden_layers = 2 ,
78
84
intermediate_size = 37 ,
79
85
layer_norm_eps = 1e-05 ,
80
86
num_attention_heads = 4 ,
81
- num_hidden_layers = 5 ,
82
87
pad_token_id = 1 ,
83
88
vocab_size = 1000 ,
84
89
)
@@ -108,13 +113,35 @@ def get_dummy_inputs(self, device, seed=0):
108
113
"num_inference_steps" : 2 ,
109
114
"guidance_scale" : 1.0 ,
110
115
"sag_scale" : 1.0 ,
111
- "output_type" : "numpy " ,
116
+ "output_type" : "np " ,
112
117
}
113
118
return inputs
114
119
115
120
def test_inference_batch_single_identical (self ):
116
121
super ().test_inference_batch_single_identical (expected_max_diff = 3e-3 )
117
122
123
+ @unittest .skip ("Not necessary to test here." )
124
+ def test_xformers_attention_forwardGenerator_pass (self ):
125
+ pass
126
+
127
+ def test_pipeline_different_schedulers (self ):
128
+ pipeline = self .pipeline_class (** self .get_dummy_components ())
129
+ inputs = self .get_dummy_inputs ("cpu" )
130
+
131
+ expected_image_size = (16 , 16 , 3 )
132
+ for scheduler_cls in [DDIMScheduler , DEISMultistepScheduler , DPMSolverMultistepScheduler ]:
133
+ pipeline .scheduler = scheduler_cls .from_config (pipeline .scheduler .config )
134
+ image = pipeline (** inputs ).images [0 ]
135
+
136
+ shape = image .shape
137
+ assert shape == expected_image_size
138
+
139
+ pipeline .scheduler = EulerDiscreteScheduler .from_config (pipeline .scheduler .config )
140
+
141
+ with self .assertRaises (ValueError ):
142
+ # Karras schedulers are not supported
143
+ image = pipeline (** inputs ).images [0 ]
144
+
118
145
119
146
@nightly
120
147
@require_torch_gpu
0 commit comments