Skip to content

Commit 519fa95

Browse files
authored
Add option to disable patches for torch in command line validate (#203)
* Add option to disable patches for torch in command line validate * changelogs * changelogs * nhl * 2 * ut
1 parent 945e778 commit 519fa95

20 files changed

+168
-41
lines changed

.github/workflows/ci.yml

Lines changed: 7 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -101,28 +101,28 @@ jobs:
101101
run: python -m pip freeze
102102

103103
- name: tiny-llm torch.export.export
104-
run: PYTHONPATH=. python _unittests/ut_torch_models/test_tiny_llms.py
104+
run: PYTHONPATH=. UNITTEST_GOING=1 python _unittests/ut_torch_models/test_tiny_llms.py
105105

106106
- name: tiny-llm onnx
107-
run: PYTHONPATH=. python _unittests/ut_torch_models/test_tiny_llms_onnx.py
107+
run: PYTHONPATH=. UNITTEST_GOING=1 python _unittests/ut_torch_models/test_tiny_llms_onnx.py
108108
continue-on-error: true # connectivity issues
109109

110110
- name: tiny-llm example
111-
run: PYTHONPATH=. python _doc/examples/plot_export_tiny_llm.py
111+
run: PYTHONPATH=. UNITTEST_GOING=1 python _doc/examples/plot_export_tiny_llm.py
112112
continue-on-error: true # connectivity issues
113113

114114
- name: tiny-llm bypass
115-
run: PYTHONPATH=. python _doc/examples/plot_export_tiny_llm_patched.py
115+
run: PYTHONPATH=. UNITTEST_GOING=1 python _doc/examples/plot_export_tiny_llm_patched.py
116116
continue-on-error: true # connectivity issues
117117

118118
- name: run tests bypassed
119-
run: PYTHONPATH=. python _unittests/ut_torch_models/test_tiny_llms_bypassed.py
119+
run: PYTHONPATH=. UNITTEST_GOING=1 python _unittests/ut_torch_models/test_tiny_llms_bypassed.py
120120

121121
- name: test image_classification
122-
run: PYTHONPATH=. python _unittests/ut_tasks/test_tasks_image_classification.py
122+
run: PYTHONPATH=. UNITTEST_GOING=1 python _unittests/ut_tasks/test_tasks_image_classification.py
123123

124124
- name: test zero_shot_image_classification
125-
run: PYTHONPATH=. python _unittests/ut_tasks/test_tasks_zero_shot_image_classification.py
125+
run: PYTHONPATH=. UNITTEST_GOING=1 python _unittests/ut_tasks/test_tasks_zero_shot_image_classification.py
126126

127127
- name: run tests
128128
run: |

CHANGELOGS.rst

Lines changed: 5 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -4,7 +4,11 @@ Change Logs
44
0.7.7
55
+++++
66

7-
7+
* :pr:`205`: add in_channels in image_text_to_text
8+
* :pr:`204`: switch default num_hidden_layers to 4
9+
* :pr:`203`: Add option to disable patches for torch in command line validate
10+
* :pr:`202`: add models DeepseekV3ForCausalLM, Gemma3ForCausalLM, Glm4vMoeForConditionalGeneration
11+
* :pr:`201`: switch CI to 4.55.4
812
* :pr:`200`: fixes patches for 4.55.1+, DynamicCache is no longer registered by default,
913
this code moved to executorch.py in transformers
1014
* :pr:`199`: delete hidden_size and num_attention_heads modification in a config

_unittests/ut_tasks/test_tasks.py

Lines changed: 6 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -1,3 +1,4 @@
1+
import os
12
import unittest
23
import torch
34
from onnx_diagnostic.ext_test_case import (
@@ -13,6 +14,11 @@
1314

1415

1516
class TestTasks(ExtTestCase):
17+
def test_unittest_going(self):
18+
assert (
19+
os.environ.get("UNITTEST_GOING", "0") == "1"
20+
), "UNITTEST_GOING=1 must be defined for these tests"
21+
1622
@hide_stdout()
1723
def test_text2text_generation(self):
1824
mid = "sshleifer/tiny-marian-en-de"

onnx_diagnostic/_command_lines_parser.py

Lines changed: 56 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -306,7 +306,7 @@ def __call__(self, parser, namespace, values, option_string=None):
306306
value = split_items[1]
307307

308308
if value in ("True", "true", "False", "false"):
309-
d[key] = bool(value)
309+
d[key] = value in ("True", "true")
310310
continue
311311
try:
312312
d[key] = int(value)
@@ -323,6 +323,54 @@ def __call__(self, parser, namespace, values, option_string=None):
323323
setattr(namespace, self.dest, d)
324324

325325

326+
class _BoolOrParseDictPatch(argparse.Action):
327+
def __call__(self, parser, namespace, values, option_string=None):
328+
329+
if not values:
330+
return
331+
if len(values) == 1 and values[0] in (
332+
"True",
333+
"False",
334+
"true",
335+
"false",
336+
"0",
337+
"1",
338+
0,
339+
1,
340+
):
341+
setattr(namespace, self.dest, values[0] in ("True", "true", 1, "1"))
342+
return
343+
d = getattr(namespace, self.dest) or {}
344+
if not isinstance(d, dict):
345+
d = {
346+
"patch_sympy": d,
347+
"patch_torch": d,
348+
"patch_transformers": d,
349+
"patch_diffusers": d,
350+
}
351+
for item in values:
352+
split_items = item.split("=", 1)
353+
key = split_items[0].strip() # we remove blanks around keys, as is logical
354+
value = split_items[1]
355+
356+
if value in ("True", "true", "False", "false"):
357+
d[key] = value in ("True", "true")
358+
continue
359+
try:
360+
d[key] = int(value)
361+
continue
362+
except (TypeError, ValueError):
363+
pass
364+
try:
365+
d[key] = float(value)
366+
continue
367+
except (TypeError, ValueError):
368+
pass
369+
d[key] = _parse_json(value)
370+
371+
setattr(namespace, self.dest, d)
372+
373+
326374
def get_parser_validate() -> ArgumentParser:
327375
parser = ArgumentParser(
328376
prog="validate",
@@ -383,8 +431,13 @@ def get_parser_validate() -> ArgumentParser:
383431
parser.add_argument(
384432
"--patch",
385433
default=True,
386-
action=BooleanOptionalAction,
387-
help="Applies patches before exporting.",
434+
action=_BoolOrParseDictPatch,
435+
nargs="*",
436+
help="Applies patches before exporting, it can be a boolean "
437+
"to enable to disable the patches or be more finetuned. It is possible to "
438+
"disable patch for torch by adding "
439+
'--patch "patch_sympy=False" --patch "patch_torch=False", '
440+
"default is True.",
388441
)
389442
parser.add_argument(
390443
"--rewrite",

onnx_diagnostic/helpers/config_helper.py

Lines changed: 10 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -1,6 +1,7 @@
11
import functools
22
import importlib
33
import inspect
4+
import os
45
import re
56
from typing import Any, Callable, Dict, Optional, Tuple, Union
67
import transformers
@@ -110,3 +111,12 @@ def config_class_from_architecture(arch: str, exc: bool = False) -> Optional[typ
110111
)
111112
cls_name = unique.pop()
112113
return getattr(transformers, cls_name)
114+
115+
116+
def default_num_hidden_layers():
117+
"""
118+
Returns the default number of layers.
119+
It is lower when the unit tests are running
120+
when ``UNITTEST_GOING=1``.
121+
"""
122+
return 2 if os.environ.get("UNITTEST_GOING", "0") == "1" else 4

onnx_diagnostic/tasks/automatic_speech_recognition.py

Lines changed: 6 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -2,7 +2,11 @@
22
import torch
33
import transformers
44
from ..helpers.cache_helper import make_dynamic_cache, make_encoder_decoder_cache
5-
from ..helpers.config_helper import update_config, check_hasattr
5+
from ..helpers.config_helper import (
6+
update_config,
7+
check_hasattr,
8+
default_num_hidden_layers as nhl,
9+
)
610

711
__TASK__ = "automatic-speech-recognition"
812

@@ -15,7 +19,7 @@ def reduce_model_config(config: Any) -> Dict[str, Any]:
1519
if hasattr(config, "decoder_layers"):
1620
config.decoder_layers = min(config.decoder_layers, 2)
1721
if hasattr(config, "num_hidden_layers"):
18-
config.num_hidden_layers = min(config.num_hidden_layers, 4)
22+
config.num_hidden_layers = min(config.num_hidden_layers, nhl())
1923
update_config(config, kwargs)
2024
return kwargs
2125

onnx_diagnostic/tasks/feature_extraction.py

Lines changed: 7 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -1,15 +1,20 @@
11
from typing import Any, Callable, Dict, Optional, Tuple
22
import torch
3-
from ..helpers.config_helper import update_config, check_hasattr
3+
from ..helpers.config_helper import (
4+
update_config,
5+
check_hasattr,
6+
default_num_hidden_layers as nhl,
7+
)
48
from ..helpers.cache_helper import make_dynamic_cache, make_encoder_decoder_cache
59

10+
611
__TASK__ = "feature-extraction"
712

813

914
def reduce_model_config(config: Any) -> Dict[str, Any]:
1015
"""Reduces a model size."""
1116
check_hasattr(config, "num_hidden_layers")
12-
kwargs = dict(num_hidden_layers=min(config.num_hidden_layers, 4))
17+
kwargs = dict(num_hidden_layers=min(config.num_hidden_layers, nhl()))
1318
update_config(config, kwargs)
1419
return kwargs
1520

onnx_diagnostic/tasks/fill_mask.py

Lines changed: 6 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -1,6 +1,10 @@
11
from typing import Any, Callable, Dict, Optional, Tuple
22
import torch
3-
from ..helpers.config_helper import update_config, check_hasattr
3+
from ..helpers.config_helper import (
4+
update_config,
5+
check_hasattr,
6+
default_num_hidden_layers as nhl,
7+
)
48

59
__TASK__ = "fill-mask"
610

@@ -9,7 +13,7 @@ def reduce_model_config(config: Any) -> Dict[str, Any]:
913
"""Reduces a model size."""
1014
check_hasattr(config, "num_attention_heads", "num_hidden_layers")
1115
kwargs = dict(
12-
num_hidden_layers=min(config.num_hidden_layers, 4),
16+
num_hidden_layers=min(config.num_hidden_layers, nhl()),
1317
num_attention_heads=min(config.num_attention_heads, 4),
1418
)
1519
update_config(config, kwargs)

onnx_diagnostic/tasks/image_classification.py

Lines changed: 6 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -1,6 +1,10 @@
11
from typing import Any, Callable, Dict, Optional, Tuple
22
import torch
3-
from ..helpers.config_helper import update_config, check_hasattr
3+
from ..helpers.config_helper import (
4+
update_config,
5+
check_hasattr,
6+
default_num_hidden_layers as nhl,
7+
)
48

59
__TASK__ = "image-classification"
610

@@ -17,7 +21,7 @@ def reduce_model_config(config: Any) -> Dict[str, Any]:
1721
check_hasattr(config, ("num_hidden_layers", "hidden_sizes"))
1822
kwargs = dict(
1923
num_hidden_layers=(
20-
min(config.num_hidden_layers, 4)
24+
min(config.num_hidden_layers, nhl())
2125
if hasattr(config, "num_hidden_layers")
2226
else len(config.hidden_sizes)
2327
)

onnx_diagnostic/tasks/image_text_to_text.py

Lines changed: 7 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -1,7 +1,12 @@
11
from typing import Any, Callable, Dict, Optional, Tuple
22
import torch
33
from ..helpers.cache_helper import make_dynamic_cache, make_hybrid_cache
4-
from ..helpers.config_helper import update_config, check_hasattr, _pick
4+
from ..helpers.config_helper import (
5+
update_config,
6+
check_hasattr,
7+
_pick,
8+
default_num_hidden_layers as nhl,
9+
)
510

611
__TASK__ = "image-text-to-text"
712

@@ -10,7 +15,7 @@ def reduce_model_config(config: Any) -> Dict[str, Any]:
1015
"""Reduces a model size."""
1116
kwargs: Dict[str, Any] = {}
1217
if hasattr(config, "num_hidden_layers"):
13-
config.num_hidden_layers = min(config.num_hidden_layers, 4)
18+
config.num_hidden_layers = min(config.num_hidden_layers, nhl())
1419
if hasattr(config, "mm_tokens_per_image"):
1520
config.mm_tokens_per_image = min(config.mm_tokens_per_image, 2)
1621
if hasattr(config, "vision_config"):

0 commit comments

Comments
 (0)