Skip to content

Commit c1d0d3e

Browse files
NXP backend: Remove residual onnx references (#14457)
### Summary This PR removes old references to ONNX which were left over from the rapid development of the early stages. ### Test plan No new features are added. Correct function is tested by all the existing tests.
1 parent 0786faa commit c1d0d3e

File tree

13 files changed

+141
-902
lines changed

13 files changed

+141
-902
lines changed

backends/nxp/backend/ir/conversion_config.py

Lines changed: 1 addition & 11 deletions
Original file line numberDiff line numberDiff line change
@@ -1,4 +1,4 @@
1-
# Copyright 2024 NXP
1+
# Copyright 2024-2025 NXP
22
#
33
# This source code is licensed under the BSD-style license found in the
44
# LICENSE file in the root directory of this source tree.
@@ -14,7 +14,6 @@ def __init__(self, args: dict | None = None):
1414
:param args: Optional dictionary with conversion arguments. Unknown arguments are ignored.
1515
"""
1616
self.keep_io_format: bool = False
17-
self.skip_shape_inference: bool = False
1817
self.allow_inputs_stripping: bool = True
1918
self.qdq_aware_conversion: bool = True
2019
self.symbolic_dimensions_mapping: dict[str, int] | None = None
@@ -46,15 +45,6 @@ def __repr__(self):
4645
return "ConversionConfig[" + ", ".join(attrs) + "]"
4746

4847

49-
class SkipShapeInferenceConfig(ConversionConfig):
50-
51-
def __init__(self):
52-
"""
53-
Conversion config shortcut with disabled shape inference.
54-
"""
55-
super().__init__({"skip_shape_inference": True})
56-
57-
5848
class QDQAwareConfig(ConversionConfig):
5949

6050
def __init__(self):

backends/nxp/backend/ir/converter/builder/model_builder.py

Lines changed: 13 additions & 34 deletions
Original file line numberDiff line numberDiff line change
@@ -1,6 +1,6 @@
11
#
22
# Copyright 2023 Martin Pavella
3-
# Copyright 2023-2024 NXP
3+
# Copyright 2023-2025 NXP
44
#
55
# License: MIT
66
# See the LICENSE_MIT for more details.
@@ -795,29 +795,8 @@ def _remove_tensor_with_name(self, name):
795795

796796
def append_new_tensor(self, t_tensor: tflite_model.Tensor, overwrite: bool = False):
797797
"""Append the TFLite tensor 't_tensor' to the 'SubGraph.tensors' and register it."""
798-
799-
if t_tensor.name in self._tensor_name_map.keys():
800-
"""Tensor has already been added. Sometimes however, ONNX models
801-
will have tensors in their 'inputs' or 'outputs', which don't
802-
belong there and are in fact static. I this case we need to
803-
overwrite the existing tensors."""
804-
805-
if overwrite:
806-
self._remove_tensor_with_name(t_tensor.name)
807-
808-
# If the tenor previously appeared in ONNX 'inputs' or 'outputs',
809-
# the old version MUST be removed from there.
810-
self._remove_input_with_name(t_tensor.name)
811-
self._remove_output_with_name(t_tensor.name)
812-
813-
self.get_tensors().append(t_tensor)
814-
self._tensor_name_map[t_tensor.name] = t_tensor
815-
else:
816-
logger.w(f"Tensor '{t_tensor.name}' is already in the tensors!")
817-
818-
else:
819-
self._tensor_name_map[t_tensor.name] = t_tensor
820-
self.get_tensors().append(t_tensor)
798+
self._tensor_name_map[t_tensor.name] = t_tensor
799+
self.get_tensors().append(t_tensor)
821800

822801
def append_new_buffer(self, buffer: tflite_model.Buffer):
823802
"""Append the 'buffer' to the 'model.buffers'."""
@@ -1515,7 +1494,7 @@ def prepare_dynamic_tensor_for_correct_broadcasting_with_channels_first_tensors(
15151494
# Prepend a partial identity, to keep leading dimensions unchanged.
15161495
revert_perm = list(range(rank_diff)) + list(revert_perm)
15171496

1518-
# Now add a permutation to convert the extended ONNX shape to a TFLite shape
1497+
# Now add a permutation to convert the extended ExecuTorch shape to a TFLite shape
15191498
to_tflite_perm = (
15201499
translator.create_channels_first_to_channels_last_permutation(
15211500
output_rank
@@ -1579,37 +1558,37 @@ def prepare_static_tensor_for_correct_broadcasting_with_channels_first_tensors(
15791558

15801559
original_shape = translator.dims_to_channels_first(
15811560
shape
1582-
) # Same shape as in the ONNX model
1561+
) # Same shape as in the ExecuTorch model
15831562

15841563
# Prepend 1s to the shape
1585-
extended_onnx_shape = [1] * rank_diff + original_shape
1564+
extended_executorch_shape = [1] * rank_diff + original_shape
15861565

15871566
# Convert the full shape to TFLite format
1588-
tflite_shape = translator.dims_to_channels_last(extended_onnx_shape)
1567+
tflite_shape = translator.dims_to_channels_last(extended_executorch_shape)
15891568
tensor.shape = tflite_model.Shape(tflite_shape)
15901569

15911570
# Statically transpose the data
15921571
data = translator.convert_data_to_channels_first(
15931572
data
1594-
) # To the same shape as in the ONNX model
1595-
data = data.reshape(extended_onnx_shape) # Extend with leading 1s
1573+
) # To the same shape as in the ExecuTorch model
1574+
data = data.reshape(extended_executorch_shape) # Extend with leading 1s
15961575
tensor.tmp_buffer.data = translator.convert_data_to_channels_last(
15971576
data
15981577
) # Convert to TFLite format
15991578

16001579
assert tflite_shape == list(tensor.tmp_buffer.data.shape)
16011580

16021581
else:
1603-
# The tensor is the same as in the ONNX model.
1582+
# The tensor is the same as in the ExecuTorch model.
16041583

1605-
extended_onnx_shape = [1] * rank_diff + shape
1584+
extended_executorch_shape = [1] * rank_diff + shape
16061585

16071586
# Convert the full shape to TFLite format
1608-
tflite_shape = translator.dims_to_channels_last(extended_onnx_shape)
1587+
tflite_shape = translator.dims_to_channels_last(extended_executorch_shape)
16091588
tensor.shape = tflite_model.Shape(tflite_shape)
16101589

16111590
# Statically transpose the data
1612-
data = data.reshape(extended_onnx_shape) # Extend with leading 1s
1591+
data = data.reshape(extended_executorch_shape) # Extend with leading 1s
16131592
tensor.tmp_buffer.data = translator.convert_data_to_channels_last(
16141593
data
16151594
) # Convert to TFLite format

backends/nxp/backend/ir/converter/conversion/common.py

Lines changed: 6 additions & 57 deletions
Original file line numberDiff line numberDiff line change
@@ -1,6 +1,6 @@
11
#
22
# Copyright 2023 Martin Pavella
3-
# Copyright 2023-2024 NXP
3+
# Copyright 2023-2025 NXP
44
#
55
# License: MIT
66
# See the LICENSE_MIT for more details.
@@ -12,7 +12,7 @@
1212
'conversion/builtin/' directory.
1313
"""
1414

15-
from typing import Any, List, MutableSequence, Optional
15+
from typing import List, MutableSequence, Optional
1616

1717
import executorch.backends.nxp.backend.ir.logger as logger
1818
from executorch.backends.nxp.backend.ir.tflite_generator import tflite_model
@@ -22,28 +22,8 @@
2222
max_pool_2d_options,
2323
transpose_conv_options,
2424
)
25-
from torch.fx import Node
26-
27-
28-
def exactly_one_is_none(obj1: Optional, obj2: Optional) -> bool:
29-
"""Determine if exactly 1 of the arguments is None, or not."""
30-
return (obj1 is None and obj2 is not None) or (obj1 is not None and obj2 is None)
31-
32-
33-
def contains_duplicates(list_to_check: List[Any]) -> bool:
34-
"""Determine if given list has duplicate elements or not."""
35-
return len(list_to_check) != len(set(list_to_check))
36-
37-
38-
def clamp(val: int, start: int, end: int) -> int:
39-
"""Clamp an int value between start and end (inclusive) and return it."""
40-
if val < start:
41-
return start
42-
43-
elif val > end:
44-
return end
4525

46-
return val
26+
from torch.fx import Node
4727

4828

4929
def try_get_input(t_op: tflite_model.Operator, idx: int) -> tflite_model.Tensor | None:
@@ -62,11 +42,6 @@ def try_get_input(t_op: tflite_model.Operator, idx: int) -> tflite_model.Tensor
6242

6343
tensor = t_op.tmp_inputs[idx]
6444

65-
if tensor.name == "":
66-
# ONNX allows the name "" for optional tensors. It indicates that the tensor should be ignored, and a default
67-
# value should be used. Just like if the tensor was omitted altogether.
68-
return None
69-
7045
return tensor
7146

7247

@@ -101,7 +76,7 @@ def assign_2d_strides(options: StridedOptions, strides: Optional[List[int]]):
10176
If 'strides' is None, assign 1s.
10277
10378
:param options: TFLite AveragePool2D, Conv2D, MaxPool2D or TransposeConv options object.
104-
:param strides: An optional list of ONNX strides attribute.
79+
:param strides: An optional list of ExecuTorch strides attribute.
10580
"""
10681

10782
if strides is None:
@@ -115,8 +90,8 @@ def assign_2d_strides(options: StridedOptions, strides: Optional[List[int]]):
11590

11691
else:
11792
logger.e(
118-
logger.Code.INVALID_ONNX_OPERATOR_ATTRIBUTE,
119-
f"ONNX operator has invalid 'strides' attribute! ('{strides}')",
93+
logger.Code.INVALID_OPERATOR_ATTRIBUTE,
94+
f"ExecuTorch operator has invalid 'strides' attribute! ('{strides}')",
12095
)
12196

12297

@@ -188,32 +163,6 @@ def node_uses_shape_broadcasting(node: Node) -> bool:
188163
)
189164

190165

191-
def uses_multiple_input_types(t_op: tflite_model.Operator) -> bool:
192-
"""Determine if the input tensors of given TFLite operator use different data types or not.
193-
194-
:param t_op: TFLite operator with 'tmp_inputs' initialized.
195-
:return: True, if any two input tensors have a different data type.
196-
False, if all input tensors use the same data type.
197-
"""
198-
199-
if t_op.tmp_inputs is None:
200-
logger.e(
201-
logger.Code.INTERNAL_ERROR,
202-
"common.uses_multiple_input_types(): 'tmp_inputs' are None!",
203-
)
204-
205-
if len(t_op.tmp_inputs) == 0:
206-
logger.e(
207-
logger.Code.INTERNAL_ERROR,
208-
"common.uses_multiple_input_types(): Operator has no inputs!",
209-
)
210-
211-
first_input_type = t_op.tmp_inputs[0].type
212-
return any(
213-
input_tensor.type != first_input_type for input_tensor in t_op.tmp_inputs[1:]
214-
)
215-
216-
217166
class OpsList:
218167
"""
219168
Holder of TFLite operator (middle_op) that can be prefixed (pre_ops) of suffixed (post_ops)

0 commit comments

Comments
 (0)