Skip to content

Commit 13ecbd8

Browse files
committed
disable
1 parent de3bf91 commit 13ecbd8

File tree

5 files changed

+15
-90
lines changed

5 files changed

+15
-90
lines changed

_doc/recipes/plot_dynamic_shapes_max.py

Lines changed: 3 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -10,6 +10,8 @@
1010
in the exported program is something very aggreessive. Here is a case where
1111
it takes a wrong decision and how to get around it.
1212
13+
**This bug was fixed after 4/24/2025**.
14+
1315
Wrong Model
1416
+++++++++++
1517
"""
@@ -183,4 +185,4 @@ def forward(self, x, y, fact):
183185
# is hidden in a custom operator.
184186

185187

186-
doc.plot_legend("dynamic shapes\nworkaround\nmax(d1, d2)", "dynamic shapes", "yellow")
188+
doc.plot_legend("max(d1, d2)\nwith d1, d2 dimensions", "dynamic shapes", "green")

_unittests/ut_tasks/try_tasks.py

Lines changed: 4 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -125,6 +125,10 @@ def test_text_generation_phi4_mini(self):
125125
print(tokenizer.decode(generated_ids[0], skip_special_tokens=True))
126126

127127
@never_test()
128+
@unittest.skip(
129+
reason="AttributeError: 'Phi4MMModel' object has no attribute "
130+
"'prepare_inputs_for_generation'"
131+
)
128132
def test_text_generation_phi4_moe(self):
129133
# clear&&NEVERTEST=1 python _unittests/ut_tasks/try_tasks.py -k phi4_moe
130134

onnx_diagnostic/tasks/automatic_speech_recognition.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -36,7 +36,7 @@ def get_inputs(
3636
**kwargs, # unused
3737
):
3838
"""
39-
Generates inputs for task ``text2text-generation``.
39+
Generates inputs for task ``automatic-speech-recognition``.
4040
Example:
4141
4242
::

onnx_diagnostic/tasks/image_text_to_text.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -35,7 +35,7 @@ def get_inputs(
3535
**kwargs, # unused
3636
):
3737
"""
38-
Generates input for task ``text-generation``.
38+
Generates input for task ``image-text-to-text``.
3939
4040
:param model: model to get the missing information
4141
:param config: configuration used to generate the model
Lines changed: 6 additions & 87 deletions
Original file line numberDiff line numberDiff line change
@@ -1,7 +1,7 @@
11
from typing import Any, Callable, Dict, Optional, Tuple
22
import torch
3-
from ..helpers.cache_helper import make_dynamic_cache
4-
from ..helpers.config_helper import update_config, check_hasattr, _pick
3+
# from ..helpers.cache_helper import make_dynamic_cache
4+
from ..helpers.config_helper import update_config # , check_hasattr, _pick
55

66
__TASK__ = "MoE"
77

@@ -43,7 +43,7 @@ def get_inputs(
4343
**kwargs, # unused
4444
):
4545
"""
46-
Generates input for task ``text-generation``.
46+
Generates input for task ``MoE``.
4747
4848
:param model: model to get the missing information
4949
:param config: configuration used to generate the model
@@ -59,55 +59,7 @@ def get_inputs(
5959
:param dynamic_rope: use dynamic rope (see :class:`transformers.LlamaConfig`)
6060
:return: dictionary
6161
"""
62-
batch = torch.export.Dim("batch", min=1, max=1024)
63-
seq_length = "seq_length" # torch.export.Dim("seq_length", min=1, max=4096)
64-
cache_length = "cache_length" # torch.export.Dim("cache_length", min=1, max=4096)
65-
images = "images" # torch.export.Dim("images", min=1, max=4096)
66-
67-
shapes = {
68-
"input_ids": {0: batch, 1: seq_length},
69-
"attention_mask": {
70-
0: batch,
71-
1: "cache+seq", # cache_length + seq_length
72-
},
73-
"position_ids": {
74-
0: batch,
75-
1: "cache+seq", # cache_length + seq_length
76-
},
77-
"past_key_values": [
78-
[{0: batch, 2: cache_length} for _ in range(num_hidden_layers)],
79-
[{0: batch, 2: cache_length} for _ in range(num_hidden_layers)],
80-
],
81-
"pixel_values": {0: batch, 1: images},
82-
"image_attention_mask": {0: batch, 1: seq_length, 2: images},
83-
}
84-
inputs = dict(
85-
input_ids=torch.randint(0, dummy_max_token_id, (batch_size, sequence_length2)).to(
86-
torch.int64
87-
),
88-
attention_mask=torch.ones((batch_size, sequence_length + sequence_length2)).to(
89-
torch.int64
90-
),
91-
position_ids=torch.arange(sequence_length, sequence_length + sequence_length2)
92-
.to(torch.int64)
93-
.expand((batch_size, -1)),
94-
past_key_values=make_dynamic_cache(
95-
[
96-
(
97-
torch.randn(batch_size, num_key_value_heads, sequence_length, head_dim),
98-
torch.randn(batch_size, num_key_value_heads, sequence_length, head_dim),
99-
)
100-
for i in range(num_hidden_layers)
101-
]
102-
),
103-
image_attention_mask=torch.ones((batch_size, sequence_length2, n_images)).to(
104-
torch.int64
105-
),
106-
pixel_values=torch.ones((batch_size, n_images, num_channels, width, height)).to(
107-
torch.int64
108-
),
109-
)
110-
return dict(inputs=inputs, dynamic_shapes=shapes)
62+
raise NotImplementedError(f"get_inputs not yet implemented for task {__TASK__!r}.")
11163

11264

11365
def random_input_kwargs(config: Any) -> Tuple[Dict[str, Any], Callable]:
@@ -116,39 +68,6 @@ def random_input_kwargs(config: Any) -> Tuple[Dict[str, Any], Callable]:
11668
11769
If the configuration is None, the function selects typical dimensions.
11870
"""
119-
if config is not None:
120-
check_hasattr(
121-
config,
122-
"vocab_size",
123-
"hidden_size",
124-
"num_attention_heads",
125-
("num_key_value_heads", "num_attention_heads"),
126-
"intermediate_size",
127-
"hidden_size",
128-
"vision_config",
129-
"audio_processor",
130-
)
131-
check_hasattr(config.vision_config, "image_size", "num_channels")
132-
kwargs = dict(
133-
batch_size=2,
134-
sequence_length=30,
135-
sequence_length2=3,
136-
head_dim=(
137-
16
138-
if config is None
139-
else getattr(config, "head_dim", config.hidden_size // config.num_attention_heads)
140-
),
141-
dummy_max_token_id=31999 if config is None else config.vocab_size - 1,
142-
num_hidden_layers=4 if config is None else config.num_hidden_layers,
143-
num_key_value_heads=(
144-
8
145-
if config is None
146-
else _pick(config, "num_key_value_heads", "num_attention_heads")
147-
),
148-
intermediate_size=1024 if config is None else config.intermediate_size,
149-
hidden_size=512 if config is None else config.hidden_size,
150-
width=224 if config is None else config.vision_config.image_size,
151-
height=224 if config is None else config.vision_config.image_size,
152-
num_channels=3 if config is None else config.vision_config.num_channels,
71+
raise NotImplementedError(
72+
f"random_input_kwargs not yet implemented for task {__TASK__!r}."
15373
)
154-
return kwargs, get_inputs

0 commit comments

Comments
 (0)