44import torch
55from transformers import AutoTokenizer , T5EncoderModel
66
7- from diffusers import AutoencoderKL , ChromaPipeline , ChromaTransformer2DModel , FlowMatchEulerDiscreteScheduler
8- from diffusers .utils .testing_utils import torch_device
7+ from diffusers import (
8+ AutoencoderKL ,
9+ ChromaPipeline ,
10+ ChromaTransformer2DModel ,
11+ FlowMatchEulerDiscreteScheduler ,
12+ )
13+ from diffusers .utils .testing_utils import (
14+ torch_device ,
15+ )
916
1017from ..test_pipelines_common import (
1118 FluxIPAdapterTesterMixin ,
@@ -22,9 +29,6 @@ class ChromaPipelineFastTests(
2229):
2330 pipeline_class = ChromaPipeline
2431 params = frozenset (["prompt" , "height" , "width" , "guidance_scale" , "prompt_embeds" ])
25-
26- pipeline_class = ChromaPipeline
27- params = frozenset (["prompt" , "negative_prompt" , "height" , "width" , "guidance_scale" , "prompt_embeds" ])
2832 batch_params = frozenset (["prompt" ])
2933
3034 # there is no xformers processor for Flux
@@ -39,14 +43,13 @@ def get_dummy_components(self, num_layers: int = 1, num_single_layers: int = 1):
3943 in_channels = 4 ,
4044 num_layers = num_layers ,
4145 num_single_layers = num_single_layers ,
42- attention_head_dim = 4 ,
43- num_attention_heads = 4 ,
46+ attention_head_dim = 16 ,
47+ num_attention_heads = 2 ,
4448 joint_attention_dim = 32 ,
4549 axes_dims_rope = [4 , 4 , 8 ],
46- approximator_in_factor = 1 ,
4750 approximator_hidden_dim = 32 ,
48- approximator_out_dim = 64 ,
49- approximator_layers = 5 ,
51+ approximator_layers = 1 ,
52+ approximator_num_channels = 16 ,
5053 )
5154
5255 torch .manual_seed (0 )
0 commit comments