|
1 | 1 | # |
2 | 2 | # Copyright 2023 Martin Pavella |
3 | | -# Copyright 2023-2024 NXP |
| 3 | +# Copyright 2023-2025 NXP |
4 | 4 | # |
5 | 5 | # License: MIT |
6 | 6 | # See the LICENSE_MIT for more details. |
@@ -795,29 +795,8 @@ def _remove_tensor_with_name(self, name): |
795 | 795 |
|
796 | 796 | def append_new_tensor(self, t_tensor: tflite_model.Tensor, overwrite: bool = False): |
797 | 797 | """Append the TFLite tensor 't_tensor' to the 'SubGraph.tensors' and register it.""" |
798 | | - |
799 | | - if t_tensor.name in self._tensor_name_map.keys(): |
800 | | - """Tensor has already been added. Sometimes however, ONNX models |
801 | | - will have tensors in their 'inputs' or 'outputs', which don't |
802 | | - belong there and are in fact static. I this case we need to |
803 | | - overwrite the existing tensors.""" |
804 | | - |
805 | | - if overwrite: |
806 | | - self._remove_tensor_with_name(t_tensor.name) |
807 | | - |
808 | | - # If the tenor previously appeared in ONNX 'inputs' or 'outputs', |
809 | | - # the old version MUST be removed from there. |
810 | | - self._remove_input_with_name(t_tensor.name) |
811 | | - self._remove_output_with_name(t_tensor.name) |
812 | | - |
813 | | - self.get_tensors().append(t_tensor) |
814 | | - self._tensor_name_map[t_tensor.name] = t_tensor |
815 | | - else: |
816 | | - logger.w(f"Tensor '{t_tensor.name}' is already in the tensors!") |
817 | | - |
818 | | - else: |
819 | | - self._tensor_name_map[t_tensor.name] = t_tensor |
820 | | - self.get_tensors().append(t_tensor) |
| 798 | + self._tensor_name_map[t_tensor.name] = t_tensor |
| 799 | + self.get_tensors().append(t_tensor) |
821 | 800 |
|
822 | 801 | def append_new_buffer(self, buffer: tflite_model.Buffer): |
823 | 802 | """Append the 'buffer' to the 'model.buffers'.""" |
@@ -1515,7 +1494,7 @@ def prepare_dynamic_tensor_for_correct_broadcasting_with_channels_first_tensors( |
1515 | 1494 | # Prepend a partial identity, to keep leading dimensions unchanged. |
1516 | 1495 | revert_perm = list(range(rank_diff)) + list(revert_perm) |
1517 | 1496 |
|
1518 | | - # Now add a permutation to convert the extended ONNX shape to a TFLite shape |
| 1497 | + # Now add a permutation to convert the extended ExecuTorch shape to a TFLite shape |
1519 | 1498 | to_tflite_perm = ( |
1520 | 1499 | translator.create_channels_first_to_channels_last_permutation( |
1521 | 1500 | output_rank |
@@ -1579,37 +1558,37 @@ def prepare_static_tensor_for_correct_broadcasting_with_channels_first_tensors( |
1579 | 1558 |
|
1580 | 1559 | original_shape = translator.dims_to_channels_first( |
1581 | 1560 | shape |
1582 | | - ) # Same shape as in the ONNX model |
| 1561 | + ) # Same shape as in the ExecuTorch model |
1583 | 1562 |
|
1584 | 1563 | # Prepend 1s to the shape |
1585 | | - extended_onnx_shape = [1] * rank_diff + original_shape |
| 1564 | + extended_executorch_shape = [1] * rank_diff + original_shape |
1586 | 1565 |
|
1587 | 1566 | # Convert the full shape to TFLite format |
1588 | | - tflite_shape = translator.dims_to_channels_last(extended_onnx_shape) |
| 1567 | + tflite_shape = translator.dims_to_channels_last(extended_executorch_shape) |
1589 | 1568 | tensor.shape = tflite_model.Shape(tflite_shape) |
1590 | 1569 |
|
1591 | 1570 | # Statically transpose the data |
1592 | 1571 | data = translator.convert_data_to_channels_first( |
1593 | 1572 | data |
1594 | | - ) # To the same shape as in the ONNX model |
1595 | | - data = data.reshape(extended_onnx_shape) # Extend with leading 1s |
| 1573 | + ) # To the same shape as in the ExecuTorch model |
| 1574 | + data = data.reshape(extended_executorch_shape) # Extend with leading 1s |
1596 | 1575 | tensor.tmp_buffer.data = translator.convert_data_to_channels_last( |
1597 | 1576 | data |
1598 | 1577 | ) # Convert to TFLite format |
1599 | 1578 |
|
1600 | 1579 | assert tflite_shape == list(tensor.tmp_buffer.data.shape) |
1601 | 1580 |
|
1602 | 1581 | else: |
1603 | | - # The tensor is the same as in the ONNX model. |
| 1582 | + # The tensor is the same as in the ExecuTorch model. |
1604 | 1583 |
|
1605 | | - extended_onnx_shape = [1] * rank_diff + shape |
| 1584 | + extended_executorch_shape = [1] * rank_diff + shape |
1606 | 1585 |
|
1607 | 1586 | # Convert the full shape to TFLite format |
1608 | | - tflite_shape = translator.dims_to_channels_last(extended_onnx_shape) |
| 1587 | + tflite_shape = translator.dims_to_channels_last(extended_executorch_shape) |
1609 | 1588 | tensor.shape = tflite_model.Shape(tflite_shape) |
1610 | 1589 |
|
1611 | 1590 | # Statically transpose the data |
1612 | | - data = data.reshape(extended_onnx_shape) # Extend with leading 1s |
| 1591 | + data = data.reshape(extended_executorch_shape) # Extend with leading 1s |
1613 | 1592 | tensor.tmp_buffer.data = translator.convert_data_to_channels_last( |
1614 | 1593 | data |
1615 | 1594 | ) # Convert to TFLite format |
|
0 commit comments