Skip to content

Commit 431618e

Browse files
committed
Fixed ONNX opset version
1 parent 01ac364 commit 431618e

File tree

7 files changed

+50
-22
lines changed

7 files changed

+50
-22
lines changed

CHANGELOG.md

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -25,6 +25,7 @@ limitations under the License.
2525
- change: Removed PyTorch dependencies from `nav.profile`
2626
- change: Collect all Python packages in status instead of filtered list
2727
- change: Use default throughput cutoff threshold for max batch size heuristic when `None` provided in configuration
28+
- change: Updated default ONNX opset to 20 for Torch >= 2.5
2829
- fix: Exception is raised with Python >=3.11 due to wrong dataclass initialization
2930
- fix: Removed option from ExportOption removed from Torch 2.5
3031
- fix: Improved preprocessing stage in Torch based runners

model_navigator/configuration/constants.py

Lines changed: 6 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -60,23 +60,24 @@
6060
NAVIGATOR_USE_MULTIPROCESSING = "NAVIGATOR_USE_MULTIPROCESSING"
6161

6262
# ONNX Opset
63-
_DEFAULT_ONNX_OPSET_2_4 = 17
64-
_DEFAULT_ONNX_OPSET_2_5 = 22
63+
_DEFAULT_ONNX_OPSET_TORCH_2_4 = 17
64+
_DEFAULT_ONNX_OPSET_TORCH_2_5 = 20
65+
_DEFAULT_ONNX_OPSET = 18
6566

6667

6768
def default_onnx_opset():
6869
"""Dynamically set default ONNX opset based on Torch version."""
6970
from model_navigator.frameworks import is_torch_available
7071

7172
if not is_torch_available():
72-
return _DEFAULT_ONNX_OPSET_2_4
73+
return _DEFAULT_ONNX_OPSET
7374

7475
from model_navigator.frameworks import _TORCH_VERSION
7576

7677
if _TORCH_VERSION >= Version("2.5.0"):
77-
return _DEFAULT_ONNX_OPSET_2_5
78+
return _DEFAULT_ONNX_OPSET_TORCH_2_5
7879

79-
return _DEFAULT_ONNX_OPSET_2_4
80+
return _DEFAULT_ONNX_OPSET_TORCH_2_4
8081

8182

8283
DEFAULT_ONNX_OPSET = default_onnx_opset()

tests/unit/base/test_package_optimize.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -18,7 +18,7 @@
1818

1919
import pytest
2020

21-
from model_navigator.configuration import Format, JitType, OptimizationProfile, TorchScriptConfig
21+
from model_navigator.configuration import DEFAULT_ONNX_OPSET, Format, JitType, OptimizationProfile, TorchScriptConfig
2222
from model_navigator.exceptions import ModelNavigatorEmptyPackageError, ModelNavigatorMissingSourceModelError
2323
from model_navigator.package import _get_model_configs, _update_config, optimize
2424
from model_navigator.runners.registry import runner_registry
@@ -75,7 +75,7 @@ def test_update_config_returns_updated_custom_config_when_defaults_is_true():
7575
torch_script_config = config.custom_configs["TorchScript"]
7676
tensorrt_config = config.custom_configs["TensorRT"]
7777

78-
assert onnx_config.opset == 17 # pytype: disable=attribute-error
78+
assert onnx_config.opset == DEFAULT_ONNX_OPSET # pytype: disable=attribute-error
7979
assert torch_script_config.jit_type == (JitType.SCRIPT, JitType.TRACE) # pytype: disable=attribute-error
8080
assert tensorrt_config.trt_profiles is None # pytype: disable=attribute-error
8181

tests/unit/base/test_pipelines_builders_find_device_max_batch_size.py

Lines changed: 21 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -19,7 +19,7 @@
1919
TensorRTPrecisionMode,
2020
)
2121
from model_navigator.configuration.common_config import CommonConfig
22-
from model_navigator.configuration.constants import DEFAULT_MAX_WORKSPACE_SIZE
22+
from model_navigator.configuration.constants import DEFAULT_MAX_WORKSPACE_SIZE, DEFAULT_ONNX_OPSET
2323
from model_navigator.configuration.model.model_config import (
2424
ONNXModelConfig,
2525
TensorFlowSavedModelConfig,
@@ -65,7 +65,11 @@ def test_find_device_max_batch_size_builder_return_execution_unit_when_torch_fra
6565
inference_mode=True,
6666
)
6767
],
68-
Format.ONNX: [ONNXModelConfig(opset=17, dynamic_axes={}, dynamo_export=False, graph_surgeon_optimization=True)],
68+
Format.ONNX: [
69+
ONNXModelConfig(
70+
opset=DEFAULT_ONNX_OPSET, dynamic_axes={}, dynamo_export=False, graph_surgeon_optimization=True
71+
)
72+
],
6973
Format.TENSORRT: [
7074
TensorRTModelConfig(
7175
precision=TensorRTPrecision.FP16,
@@ -103,7 +107,11 @@ def test_find_device_max_batch_size_builder_return_execution_unit_when_tensorflo
103107

104108
models_config = {
105109
Format.TF_SAVEDMODEL: [TensorFlowSavedModelConfig(enable_xla=False, jit_compile=False)],
106-
Format.ONNX: [ONNXModelConfig(opset=17, dynamic_axes={}, dynamo_export=False, graph_surgeon_optimization=True)],
110+
Format.ONNX: [
111+
ONNXModelConfig(
112+
opset=DEFAULT_ONNX_OPSET, dynamic_axes={}, dynamo_export=False, graph_surgeon_optimization=True
113+
)
114+
],
107115
Format.TENSORRT: [
108116
TensorRTModelConfig(
109117
precision=TensorRTPrecision.FP16,
@@ -142,7 +150,11 @@ def test_find_device_max_batch_size_builder_return_execution_unit_when_jax_frame
142150

143151
models_config = {
144152
Format.TF_SAVEDMODEL: [TensorFlowSavedModelConfig(enable_xla=True, jit_compile=True)],
145-
Format.ONNX: [ONNXModelConfig(opset=17, dynamic_axes={}, dynamo_export=False, graph_surgeon_optimization=True)],
153+
Format.ONNX: [
154+
ONNXModelConfig(
155+
opset=DEFAULT_ONNX_OPSET, dynamic_axes={}, dynamo_export=False, graph_surgeon_optimization=True
156+
)
157+
],
146158
Format.TENSORRT: [
147159
TensorRTModelConfig(
148160
precision=TensorRTPrecision.FP16,
@@ -180,7 +192,11 @@ def test_find_device_max_batch_size_builder_return_execution_unit_when_onnx_fram
180192
)
181193

182194
models_config = {
183-
Format.ONNX: [ONNXModelConfig(opset=17, dynamic_axes={}, dynamo_export=False, graph_surgeon_optimization=True)],
195+
Format.ONNX: [
196+
ONNXModelConfig(
197+
opset=DEFAULT_ONNX_OPSET, dynamic_axes={}, dynamo_export=False, graph_surgeon_optimization=True
198+
)
199+
],
184200
Format.TENSORRT: [
185201
TensorRTModelConfig(
186202
precision=TensorRTPrecision.FP16,

tests/unit/base/test_utils_config_helpers.py

Lines changed: 6 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -20,7 +20,7 @@
2020
TensorRTProfile,
2121
)
2222
from model_navigator.configuration.common_config import CommonConfig
23-
from model_navigator.configuration.constants import DEFAULT_MAX_WORKSPACE_SIZE
23+
from model_navigator.configuration.constants import DEFAULT_MAX_WORKSPACE_SIZE, DEFAULT_ONNX_OPSET
2424
from model_navigator.configuration.model.model_config import (
2525
ONNXModelConfig,
2626
TensorRTModelConfig,
@@ -70,7 +70,11 @@ def test_do_find_device_max_batch_size_return_false_when_no_adaptive_formats():
7070
)
7171

7272
models_config = {
73-
Format.ONNX: [ONNXModelConfig(opset=17, dynamic_axes={}, dynamo_export=False, graph_surgeon_optimization=True)],
73+
Format.ONNX: [
74+
ONNXModelConfig(
75+
opset=DEFAULT_ONNX_OPSET, dynamic_axes={}, dynamo_export=False, graph_surgeon_optimization=True
76+
)
77+
],
7478
Format.TORCH: [TorchModelConfig(autocast=False, inference_mode=True)],
7579
}
7680

tests/unit/tensorflow/test_pipelines_builders_graph_surgeon.py

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -12,7 +12,7 @@
1212
# See the License for the specific language governing permissions and
1313

1414
from model_navigator.commands.optimize.graph_surgeon import GraphSurgeonOptimize
15-
from model_navigator.configuration import DeviceKind, Format, OptimizationProfile
15+
from model_navigator.configuration import DEFAULT_ONNX_OPSET, DeviceKind, Format, OptimizationProfile
1616
from model_navigator.configuration.common_config import CommonConfig
1717
from model_navigator.configuration.model.model_config import ONNXModelConfig
1818
from model_navigator.frameworks import Framework
@@ -34,7 +34,7 @@ def test_tensorflow_conversion_builder_return_graph_surgeon_optimization_when_en
3434
models_config = {
3535
Format.ONNX: [
3636
ONNXModelConfig(
37-
opset=17,
37+
opset=DEFAULT_ONNX_OPSET,
3838
dynamic_axes={},
3939
dynamo_export=False,
4040
graph_surgeon_optimization=True,
@@ -61,7 +61,7 @@ def test_tensorflow_conversion_builder_does_not_return_graph_surgeon_optimizatio
6161
models_config = {
6262
Format.ONNX: [
6363
ONNXModelConfig(
64-
opset=17,
64+
opset=DEFAULT_ONNX_OPSET,
6565
dynamic_axes={},
6666
dynamo_export=False,
6767
graph_surgeon_optimization=False,

tests/unit/torch/test_pipelines_builders_graph_surgeon.py

Lines changed: 11 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -12,7 +12,7 @@
1212
# See the License for the specific language governing permissions and
1313

1414
from model_navigator.commands.optimize.graph_surgeon import GraphSurgeonOptimize
15-
from model_navigator.configuration import DeviceKind, Format, JitType, OptimizationProfile
15+
from model_navigator.configuration import DEFAULT_ONNX_OPSET, DeviceKind, Format, JitType, OptimizationProfile
1616
from model_navigator.configuration.common_config import CommonConfig
1717
from model_navigator.configuration.model.model_config import ONNXModelConfig, TorchScriptModelConfig
1818
from model_navigator.frameworks import Framework
@@ -32,7 +32,11 @@ def test_torch_export_builder_return_graph_surgeon_optimization_when_enabled():
3232
)
3333

3434
models_config = {
35-
Format.ONNX: [ONNXModelConfig(opset=17, dynamic_axes={}, dynamo_export=False, graph_surgeon_optimization=True)],
35+
Format.ONNX: [
36+
ONNXModelConfig(
37+
opset=DEFAULT_ONNX_OPSET, dynamic_axes={}, dynamo_export=False, graph_surgeon_optimization=True
38+
)
39+
],
3640
}
3741
pipeline = torch_export_builder(config=config, models_config=models_config)
3842
assert len(pipeline.execution_units) == 2
@@ -53,7 +57,9 @@ def test_torch_export_builder_does_not_return_graph_surgeon_optimization_when_di
5357

5458
models_config = {
5559
Format.ONNX: [
56-
ONNXModelConfig(opset=17, dynamic_axes={}, dynamo_export=False, graph_surgeon_optimization=False)
60+
ONNXModelConfig(
61+
opset=DEFAULT_ONNX_OPSET, dynamic_axes={}, dynamo_export=False, graph_surgeon_optimization=False
62+
)
5763
],
5864
}
5965
pipeline = torch_export_builder(config=config, models_config=models_config)
@@ -75,7 +81,7 @@ def test_torch_conversion_builder_return_graph_surgeon_optimization_when_enabled
7581
models_config = {
7682
Format.ONNX: [
7783
ONNXModelConfig(
78-
opset=17,
84+
opset=DEFAULT_ONNX_OPSET,
7985
dynamic_axes={},
8086
dynamo_export=False,
8187
graph_surgeon_optimization=True,
@@ -105,7 +111,7 @@ def test_torch_conversion_builder_does_not_return_graph_surgeon_optimization_whe
105111
models_config = {
106112
Format.ONNX: [
107113
ONNXModelConfig(
108-
opset=17,
114+
opset=DEFAULT_ONNX_OPSET,
109115
dynamic_axes={},
110116
dynamo_export=False,
111117
graph_surgeon_optimization=False,

0 commit comments

Comments
 (0)