Skip to content

Commit aeef340

Browse files
committed
fix import; remove lumina2 integration test
1 parent 75e5c31 commit aeef340

File tree

2 files changed

+2
-67
lines changed

2 files changed

+2
-67
lines changed

src/diffusers/models/transformers/__init__.py

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -21,6 +21,7 @@
2121
from .transformer_flux import FluxTransformer2DModel
2222
from .transformer_hunyuan_video import HunyuanVideoTransformer3DModel
2323
from .transformer_ltx import LTXVideoTransformer3DModel
24+
from .transformer_lumina2 import Lumina2Transformer2DModel
2425
from .transformer_mochi import MochiTransformer3DModel
2526
from .transformer_sd3 import SD3Transformer2DModel
2627
from .transformer_temporal import TransformerTemporalModel

tests/pipelines/lumina2/test_pipeline_lumina2.py

Lines changed: 1 addition & 67 deletions
Original file line numberDiff line numberDiff line change
@@ -1,8 +1,6 @@
1-
import gc
21
import unittest
32

43
import numpy as np
5-
import pytest
64
import torch
75
from transformers import AutoTokenizer, GemmaConfig, GemmaForCausalLM
86

@@ -12,12 +10,7 @@
1210
Lumina2Text2ImgPipeline,
1311
Lumina2Transformer2DModel,
1412
)
15-
from diffusers.utils.testing_utils import (
16-
nightly,
17-
numpy_cosine_similarity_distance,
18-
require_big_gpu_with_torch_cuda,
19-
torch_device,
20-
)
13+
from diffusers.utils.testing_utils import torch_device
2114

2215
from ..test_pipelines_common import PipelineTesterMixin
2316

@@ -151,62 +144,3 @@ def test_lumina_prompt_embeds(self):
151144

152145
max_diff = np.abs(output_with_prompt - output_with_embeds).max()
153146
assert max_diff < 1e-4
154-
155-
156-
@nightly
157-
@require_big_gpu_with_torch_cuda
158-
@pytest.mark.big_gpu_with_torch_cuda
159-
class Lumina2Text2ImgPipelineSlowTests(unittest.TestCase):
160-
pipeline_class = Lumina2Text2ImgPipeline
161-
repo_id = "Alpha-VLLM/Lumina-Image-2.0"
162-
163-
def setUp(self):
164-
super().setUp()
165-
gc.collect()
166-
torch.cuda.empty_cache()
167-
168-
def tearDown(self):
169-
super().tearDown()
170-
gc.collect()
171-
torch.cuda.empty_cache()
172-
173-
def get_inputs(self, device, seed=0):
174-
if str(device).startswith("mps"):
175-
generator = torch.manual_seed(seed)
176-
else:
177-
generator = torch.Generator(device="cpu").manual_seed(seed)
178-
179-
return {
180-
"prompt": "A photo of a cat",
181-
"num_inference_steps": 2,
182-
"guidance_scale": 5.0,
183-
"output_type": "np",
184-
"generator": generator,
185-
}
186-
187-
def test_lumina_inference(self):
188-
pipe = self.pipeline_class.from_pretrained(self.repo_id, torch_dtype=torch.bfloat16)
189-
pipe.enable_model_cpu_offload()
190-
191-
inputs = self.get_inputs(torch_device)
192-
image = pipe(**inputs).images[0]
193-
image_slice = image[0, :10, :10]
194-
expected_slice = np.array(
195-
[
196-
[0.17773438, 0.18554688, 0.22070312],
197-
[0.046875, 0.06640625, 0.10351562],
198-
[0.0, 0.0, 0.02148438],
199-
[0.0, 0.0, 0.0],
200-
[0.0, 0.0, 0.0],
201-
[0.0, 0.0, 0.0],
202-
[0.0, 0.0, 0.0],
203-
[0.0, 0.0, 0.0],
204-
[0.0, 0.0, 0.0],
205-
[0.0, 0.0, 0.0],
206-
],
207-
dtype=np.float32,
208-
)
209-
210-
max_diff = numpy_cosine_similarity_distance(expected_slice.flatten(), image_slice.flatten())
211-
212-
assert max_diff < 1e-4

0 commit comments

Comments
 (0)