Skip to content

Commit 75f2598

Browse files
committed
up
1 parent 35744eb commit 75f2598

File tree

3 files changed

+29
-8
lines changed

3 files changed

+29
-8
lines changed

src/diffusers/models/transformers/transformer_qwenimage.py

Lines changed: 2 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -251,9 +251,10 @@ def _compute_video_freqs(self, frame, height, width, idx=0):
251251

252252
freqs = torch.cat([freqs_frame, freqs_height, freqs_width], dim=-1).reshape(seq_lens, -1)
253253
freqs = freqs.clone().contiguous()
254-
254+
255255
return freqs
256256

257+
257258
class QwenDoubleStreamAttnProcessor2_0:
258259
"""
259260
Attention processor for Qwen double-stream architecture, matching DoubleStreamLayerMegatron logic. This processor

src/diffusers/pipelines/qwenimage/pipeline_qwenimage_edit.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -258,7 +258,7 @@ def _get_qwen_prompt_embeds(
258258
template = self.prompt_template_encode
259259
drop_idx = self.prompt_template_encode_start_idx
260260
txt = [template.format(e) for e in prompt]
261-
261+
262262
model_inputs = self.processor(
263263
text=txt,
264264
images=image,

tests/pipelines/qwenimage/test_qwenimage_edit.py

Lines changed: 26 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -13,8 +13,9 @@
1313
# limitations under the License.
1414

1515
import unittest
16-
import pytest
16+
1717
import numpy as np
18+
import pytest
1819
import torch
1920
from PIL import Image
2021
from transformers import Qwen2_5_VLConfig, Qwen2_5_VLForConditionalGeneration, Qwen2Tokenizer, Qwen2VLProcessor
@@ -27,7 +28,7 @@
2728
)
2829
from diffusers.utils.testing_utils import enable_full_determinism, torch_device
2930

30-
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS
31+
from ..pipeline_params import TEXT_TO_IMAGE_PARAMS
3132
from ..test_pipelines_common import PipelineTesterMixin, to_np
3233

3334

@@ -57,7 +58,7 @@ class QwenImageEditPipelineFastTests(PipelineTesterMixin, unittest.TestCase):
5758

5859
def get_dummy_components(self):
5960
tiny_ckpt_id = "hf-internal-testing/tiny-random-Qwen2VLForConditionalGeneration"
60-
61+
6162
torch.manual_seed(0)
6263
transformer = QwenImageTransformer2DModel(
6364
patch_size=2,
@@ -162,8 +163,27 @@ def test_inference(self):
162163
self.assertEqual(generated_image.shape, (3, 32, 32))
163164

164165
expected_slice = torch.tensor(
165-
[[0.5637, 0.6341, 0.6001, 0.5620, 0.5794, 0.5498, 0.5757, 0.6389, 0.4174,
166-
0.3597, 0.5649, 0.4894, 0.4969, 0.5255, 0.4083, 0.4986]])
166+
[
167+
[
168+
0.5637,
169+
0.6341,
170+
0.6001,
171+
0.5620,
172+
0.5794,
173+
0.5498,
174+
0.5757,
175+
0.6389,
176+
0.4174,
177+
0.3597,
178+
0.5649,
179+
0.4894,
180+
0.4969,
181+
0.5255,
182+
0.4083,
183+
0.4986,
184+
]
185+
]
186+
)
167187
# fmt: on
168188

169189
generated_slice = generated_image.flatten()
@@ -240,4 +260,4 @@ def test_vae_tiling(self, expected_diff_max: float = 0.2):
240260

241261
@pytest.mark.xfail(condition=True, reason="Preconfigured embeddings need to be revisited.", strict=True)
242262
def test_encode_prompt_works_in_isolation(self, extra_required_param_value_dict=None, atol=1e-4, rtol=1e-4):
243-
super().test_encode_prompt_works_in_isolation(extra_required_param_value_dict, atol, rtol)
263+
super().test_encode_prompt_works_in_isolation(extra_required_param_value_dict, atol, rtol)

0 commit comments

Comments
 (0)