Skip to content

Commit 270c63f

Browse files
committed
Merge branch 'txt_seq_lens' of https://github.com/kashif/diffusers into txt_seq_lens
2 parents 2ef38e2 + 61f5265 commit 270c63f

File tree

3 files changed

+25
-8
lines changed

3 files changed

+25
-8
lines changed

src/diffusers/models/controlnets/controlnet_qwenimage.py

Lines changed: 4 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -174,9 +174,9 @@ def forward(
174174
if txt_seq_lens is not None:
175175
deprecate(
176176
"txt_seq_lens",
177-
"0.37.0",
177+
"0.39.0",
178178
"Passing `txt_seq_lens` to `QwenImageControlNetModel.forward()` is deprecated and will be removed in "
179-
"version 0.37.0. The text sequence length is now automatically inferred from `encoder_hidden_states` "
179+
"version 0.39.0. The text sequence length is now automatically inferred from `encoder_hidden_states` "
180180
"and `encoder_hidden_states_mask`.",
181181
standard_warn=False,
182182
)
@@ -291,9 +291,9 @@ def forward(
291291
if txt_seq_lens is not None:
292292
deprecate(
293293
"txt_seq_lens",
294-
"0.37.0",
294+
"0.39.0",
295295
"Passing `txt_seq_lens` to `QwenImageMultiControlNetModel.forward()` is deprecated and will be "
296-
"removed in version 0.37.0. The text sequence length is now automatically inferred from "
296+
"removed in version 0.39.0. The text sequence length is now automatically inferred from "
297297
"`encoder_hidden_states` and `encoder_hidden_states_mask`.",
298298
standard_warn=False,
299299
)

src/diffusers/models/transformers/transformer_qwenimage.py

Lines changed: 4 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -253,8 +253,8 @@ def forward(
253253
if txt_seq_lens is not None:
254254
deprecate(
255255
"txt_seq_lens",
256-
"0.37.0",
257-
"Passing `txt_seq_lens` is deprecated and will be removed in version 0.37.0. "
256+
"0.39.0",
257+
"Passing `txt_seq_lens` is deprecated and will be removed in version 0.39.0. "
258258
"Please use `max_txt_seq_len` instead. "
259259
"The new parameter accepts a single int or tensor value representing the maximum text sequence length.",
260260
standard_warn=False,
@@ -907,8 +907,8 @@ def forward(
907907
if txt_seq_lens is not None:
908908
deprecate(
909909
"txt_seq_lens",
910-
"0.37.0",
911-
"Passing `txt_seq_lens` is deprecated and will be removed in version 0.37.0. "
910+
"0.39.0",
911+
"Passing `txt_seq_lens` is deprecated and will be removed in version 0.39.0. "
912912
"Please use `encoder_hidden_states_mask` instead. "
913913
"The mask-based approach is more flexible and supports variable-length sequences.",
914914
standard_warn=False,

tests/models/transformers/test_models_transformer_qwenimage.py

Lines changed: 17 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -145,6 +145,23 @@ def test_infers_text_seq_len_from_mask(self):
145145
self.assertIsNone(per_sample_len_none)
146146
self.assertIsNone(normalized_mask_none)
147147

148+
def test_deprecated_txt_seq_lens_warning(self):
149+
"""Test that passing the deprecated txt_seq_lens parameter raises a FutureWarning."""
150+
init_dict, inputs = self.prepare_init_args_and_inputs_for_common()
151+
model = self.model_class(**init_dict).to(torch_device)
152+
model.eval()
153+
154+
# Add the deprecated txt_seq_lens parameter
155+
inputs["txt_seq_lens"] = [inputs["encoder_hidden_states"].shape[1]]
156+
157+
with self.assertWarns(FutureWarning) as warning:
158+
with torch.no_grad():
159+
_ = model(**inputs)
160+
161+
# Verify the warning message mentions the deprecated parameter
162+
self.assertIn("txt_seq_lens", str(warning.warning))
163+
self.assertIn("deprecated", str(warning.warning).lower())
164+
148165
def test_non_contiguous_attention_mask(self):
149166
"""Test that non-contiguous masks work correctly (e.g., [1, 0, 1, 0, 1, 0, 0])"""
150167
init_dict, inputs = self.prepare_init_args_and_inputs_for_common()

0 commit comments

Comments
 (0)