Skip to content

Commit 4e9de5c

Browse files
authored
fix ut error in idefics3 and qwen2.5-omni (#1182)
1 parent 0f57e51 commit 4e9de5c

File tree

2 files changed

+7
-12
lines changed

2 files changed

+7
-12
lines changed

tests/transformers_tests/models/idefics3/test_modeling_idefics3.py

Lines changed: 3 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -112,7 +112,7 @@ def get_config(self):
112112
max_position_embeddings=self.max_position_embeddings,
113113
use_cache=self.use_cache,
114114
attn_implementation=self.attn_implementation,
115-
torch_dtype=self.torch_dtype, # ??
115+
torch_dtype=self.torch_dtype,
116116
)
117117
vision_config = Idefics3VisionConfig(
118118
hidden_size=self.hidden_size,
@@ -123,12 +123,14 @@ def get_config(self):
123123
image_size=64,
124124
patch_size=32,
125125
attn_implementation=self.attn_implementation,
126+
torch_dtype=self.torch_dtype,
126127
)
127128
config = Idefics3Config(
128129
use_cache=self.use_cache,
129130
vision_config=vision_config,
130131
text_config=text_config,
131132
attn_implementation=self.attn_implementation,
133+
torch_dtype=self.torch_dtype,
132134
)
133135

134136
return config

tests/transformers_tests/models/qwen2_5_omni/test_modeling_qwen2_5_omni.py

Lines changed: 4 additions & 11 deletions
Original file line numberDiff line numberDiff line change
@@ -10,9 +10,7 @@
1010
# In cases where models have unique initialization procedures or require testing with specialized output formats,
1111
# it is necessary to develop distinct, dedicated test cases.
1212

13-
# NOTE: need to install transformers by
14-
# pip install git+https://github.com/huggingface/transformers@f742a644ca32e65758c3adb36225aef1731bd2a8
15-
# or download https://github.com/huggingface/transformers/archive/f742a644ca32e65758c3adb36225aef1731bd2a8.zip
13+
# NOTE: need to install transformers by `pip install transformers>=4.52.0`
1614

1715
import inspect
1816

@@ -39,9 +37,8 @@
3937
)
4038
from tests.transformers_tests.models.modeling_common import ids_numpy
4139

42-
DTYPE_AND_THRESHOLDS = {"fp32": 5e-4, "fp16": 5e-4, "bf16": 6e-3} # Thinker
43-
# DTYPE_AND_THRESHOLDS = {"fp32": 5e-4, "fp16": 5e-4, "bf16": 7e-3} # Talker
44-
MODES = [1] # TODO: graph mode
40+
DTYPE_AND_THRESHOLDS = {"fp32": 5e-2, "fp16": 5e-4, "bf16": 5e-2}
41+
MODES = [1]
4542

4643

4744
class Qwen2_5_OmniModelTester:
@@ -111,10 +108,6 @@ def prepare_config_and_inputs(self):
111108
lm_labels = ids_numpy([self.batch_size, self.decoder_seq_length], self.vocab_size)
112109

113110
thinker_config, talker_config, token2wav_config = self.get_config()
114-
# config = self.get_large_model_config()
115-
# thinker_config = config.thinker_config.text_config
116-
# talker_config = config.talker_config
117-
# token2wav_config = config.token2wav_config
118111

119112
return (
120113
thinker_config,
@@ -189,7 +182,7 @@ def get_config(self):
189182
T5_CASES = [
190183
[
191184
"Qwen2_5OmniThinkerTextModel",
192-
"transformers.models.qwen2_5_omni.Qwen2_5OmniThinkerModel", # NOTE: name is different from latest version
185+
"transformers.models.qwen2_5_omni.Qwen2_5OmniThinkerTextModel", # NOTE: name is different
193186
"mindone.transformers.Qwen2_5OmniThinkerTextModel",
194187
(thinker_config,),
195188
{},

0 commit comments

Comments
 (0)