Skip to content

Commit 2fa2d48

Browse files
authored
Merge branch 'main' into export-D80754181
2 parents 09baae4 + 2100527 commit 2fa2d48

File tree

24 files changed

+994
-109
lines changed

24 files changed

+994
-109
lines changed

.ci/scripts/unittest-macos-cmake.sh

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -11,3 +11,4 @@ ${CONDA_RUN} pytest -n auto --cov=./ --cov-report=xml
1111
# Run gtest
1212
LLVM_PROFDATA="xcrun llvm-profdata" LLVM_COV="xcrun llvm-cov" \
1313
${CONDA_RUN} test/run_oss_cpp_tests.sh
14+
${CONDA_RUN} test/check_for_installed_private_headers_in_cmake_out.sh

.github/workflows/periodic.yml

Lines changed: 5 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -11,6 +11,8 @@ on:
1111
branches:
1212
- release/*
1313
workflow_dispatch:
14+
pull_request:
15+
types: [opened, synchronize, reopened, labeled, unlabeled]
1416

1517
concurrency:
1618
group: ${{ github.workflow }}-${{ github.event.pull_request.number || github.ref_name }}-${{ github.ref_type == 'branch' && github.sha }}-${{ github.event_name == 'workflow_dispatch' }}-${{ github.event_name == 'schedule' }}-${{ github.event.schedule }}
@@ -32,10 +34,11 @@ jobs:
3234
python-version: '3.10'
3335
- name: Extract the list of models to test
3436
id: gather-models
37+
env:
38+
EFFECTIVE_EVENT: ${{ github.event_name == 'pull_request' && contains(github.event.pull_request.labels.*.name, 'ciflow/periodic') && 'schedule' || github.event_name }}
3539
run: |
3640
set -eux
37-
38-
PYTHONPATH="${PWD}" python .ci/scripts/gather_test_models.py --event "${GITHUB_EVENT_NAME}"
41+
PYTHONPATH="${PWD}" python .ci/scripts/gather_test_models.py --event "${EFFECTIVE_EVENT}"
3942
4043
test-models-linux:
4144
name: test-models-linux

CMakeLists.txt

Lines changed: 13 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -485,24 +485,29 @@ install(
485485
DESTINATION ${CMAKE_INSTALL_INCLUDEDIR}/executorch/runtime/core
486486
FILES_MATCHING
487487
PATTERN "*.h"
488+
PATTERN "testing_util" EXCLUDE
488489
)
489490
install(
490491
DIRECTORY runtime/executor/
491492
DESTINATION ${CMAKE_INSTALL_INCLUDEDIR}/executorch/runtime/executor
492493
FILES_MATCHING
493494
PATTERN "*.h"
495+
PATTERN "test" EXCLUDE
496+
PATTERN "platform_memory_allocator.h" EXCLUDE
494497
)
495498
install(
496499
DIRECTORY runtime/kernel/
497500
DESTINATION ${CMAKE_INSTALL_INCLUDEDIR}/executorch/runtime/kernel
498501
FILES_MATCHING
499502
PATTERN "*.h"
503+
PATTERN "test" EXCLUDE
500504
)
501505
install(
502506
DIRECTORY runtime/platform/
503507
DESTINATION ${CMAKE_INSTALL_INCLUDEDIR}/executorch/runtime/platform
504508
FILES_MATCHING
505509
PATTERN "*.h"
510+
PATTERN "test" EXCLUDE
506511
)
507512
install(
508513
DIRECTORY extension/kernel_util/
@@ -587,11 +592,15 @@ endif()
587592

588593
if(EXECUTORCH_BUILD_EXTENSION_DATA_LOADER)
589594
add_subdirectory(${CMAKE_CURRENT_SOURCE_DIR}/extension/data_loader)
595+
if(NOT WIN32)
596+
set(data_loader_exclude_pattern "*mman_windows.h")
597+
endif()
590598
install(
591599
DIRECTORY extension/data_loader/
592600
DESTINATION ${CMAKE_INSTALL_INCLUDEDIR}/executorch/extension/data_loader
593601
FILES_MATCHING
594602
PATTERN "*.h"
603+
PATTERN ${data_loader_exclude_pattern} EXCLUDE
595604
)
596605
list(APPEND _executorch_extensions extension_data_loader)
597606
endif()
@@ -871,6 +880,10 @@ if(EXECUTORCH_BUILD_WASM)
871880
add_subdirectory(${CMAKE_CURRENT_SOURCE_DIR}/extension/wasm)
872881
endif()
873882

883+
if(EXECUTORCH_BUILD_TOKENIZERS_WASM)
884+
add_subdirectory(${CMAKE_CURRENT_SOURCE_DIR}/extension/wasm/tokenizers)
885+
endif()
886+
874887
if(EXECUTORCH_BUILD_EXTENSION_TRAINING)
875888
add_subdirectory(${CMAKE_CURRENT_SOURCE_DIR}/extension/training)
876889
list(APPEND _executorch_extensions extension_training)

backends/apple/coreml/compiler/torch_ops.py

Lines changed: 41 additions & 17 deletions
Original file line numberDiff line numberDiff line change
@@ -47,24 +47,48 @@ def split_copy(context, node):
4747
split(context, node)
4848

4949

50-
@register_torch_op(
51-
torch_alias=[
52-
"dim_order_ops::_to_dim_order_copy",
53-
"dim_order_ops._to_dim_order_copy",
54-
],
55-
override=False,
56-
)
57-
def _to_dim_order_copy(context, node):
58-
dim_order = _get_kwinputs(context, node, "dim_order", default=[None])[0]
59-
node.kwinputs.pop("dim_order")
50+
def is_fbcode():
51+
return not hasattr(_torch.version, "git_version")
6052

61-
# In CoreML, dim_order.val will be an ndarray, so we convert it to a list
62-
dim_order = [int(d) for d in dim_order.val]
63-
memory_format = get_memory_format(dim_order)
64-
assert (
65-
memory_format == _torch.contiguous_format
66-
), "Only contiguous memory format is supported in CoreML"
67-
to(context, node)
53+
54+
if not is_fbcode():
55+
from coremltools.converters.mil.frontend.torch.dim_order_ops import (
56+
_empty_dim_order,
57+
_to_dim_order_copy,
58+
)
59+
60+
# This is a temporary hack to register the alias "dim_order_ops._to_dim_order_copy",
61+
# which was missed by coremltools
62+
@register_torch_op(torch_alias=["dim_order_ops._to_dim_order_copy"], override=False)
63+
def _to_dim_order_copy_TMP_EXECUTORCH_ALIAS_HACK(context, node):
64+
_to_dim_order_copy(context, node)
65+
66+
# This is a temporary hack to register the alias "dim_order_ops._empty_dim_order",
67+
# which was missed by coremltools
68+
@register_torch_op(torch_alias=["dim_order_ops._empty_dim_order"], override=False)
69+
def _empty_dim_order_TMP_EXECUTORCH_ALIAS_HACK(context, node):
70+
_empty_dim_order(context, node)
71+
72+
else:
73+
# TODO: remove this case when fbcode updates to coremltools 9.0
74+
@register_torch_op(
75+
torch_alias=[
76+
"dim_order_ops::_to_dim_order_copy",
77+
"dim_order_ops._to_dim_order_copy",
78+
],
79+
override=False,
80+
)
81+
def _to_dim_order_copy(context, node):
82+
dim_order = _get_kwinputs(context, node, "dim_order", default=[None])[0]
83+
node.kwinputs.pop("dim_order")
84+
85+
# In CoreML, dim_order.val will be an ndarray, so we convert it to a list
86+
dim_order = [int(d) for d in dim_order.val]
87+
memory_format = get_memory_format(dim_order)
88+
assert (
89+
memory_format == _torch.contiguous_format
90+
), "Only contiguous memory format is supported in CoreML"
91+
to(context, node)
6892

6993

7094
# https://github.com/apple/coremltools/pull/2558

backends/apple/coreml/scripts/install_requirements.sh

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -12,7 +12,7 @@ SCRIPT_DIR_PATH="$(
1212

1313
# TODO(jathu): remove the need to fetch coremltools to build deps for coreml_executor_runner.
1414
# Keep this version in sync with: pyproject.toml
15-
COREMLTOOLS_VERSION="8.3"
15+
COREMLTOOLS_VERSION="9.0b1"
1616

1717
red=`tput setaf 1`
1818
green=`tput setaf 2`

backends/nxp/backend/ir/converter/conversion/common.py

Lines changed: 12 additions & 19 deletions
Original file line numberDiff line numberDiff line change
@@ -70,29 +70,22 @@ def try_get_input(t_op: tflite_model.Operator, idx: int) -> tflite_model.Tensor
7070
return tensor
7171

7272

73-
def extend_1d_pads_to_2d(onnx_1d_pads: MutableSequence):
74-
"""Extend the onnx 'pads' operator attribute that represents padding for a 1D kernel to 2D, by adding '0's."""
75-
if onnx_1d_pads is not None:
76-
onnx_1d_pads.insert(1, 0)
77-
onnx_1d_pads.append(0)
73+
def extend_1d_padding_to_2d(tflite_1d_padding: MutableSequence):
74+
"""Extend the PyTorch 'padding' operator attribute that represents padding for a 1D kernel to 2D, by adding '0's."""
75+
if tflite_1d_padding is not None:
76+
tflite_1d_padding.append(0)
7877

7978

80-
def extend_1d_strides_to_2d(onnx_1d_strides: MutableSequence):
81-
"""Extend the onnx 'strides' operator attribute that represents strides for a 1D kernel to 2D, by adding '1'."""
82-
if onnx_1d_strides is not None:
83-
onnx_1d_strides.append(1)
79+
def extend_1d_stride_to_2d(tflite_1d_stride: MutableSequence):
80+
"""Extend the PyTorch 'stride' operator attribute that represents stride for a 1D kernel to 2D, by adding '1'."""
81+
if tflite_1d_stride is not None:
82+
tflite_1d_stride.append(1)
8483

8584

86-
def extend_1d_dilations_to_2d(onnx_1d_dilations: MutableSequence):
87-
"""Extend the onnx 'dilations' operator attribute that represents dilations for a 1D kernel to 2D, by adding '1'."""
88-
if onnx_1d_dilations is not None:
89-
onnx_1d_dilations.append(1)
90-
91-
92-
def extend_1d_kernel_shape_to_2d(onnx_1d_kernel_shape: MutableSequence):
93-
"""Extend the onnx 1D 'kernel_shape' operator attribute to 2D, by adding '1'."""
94-
if onnx_1d_kernel_shape is not None:
95-
onnx_1d_kernel_shape.append(1)
85+
def extend_1d_dilation_to_2d(tflite_1d_dilation: MutableSequence):
86+
"""Extend the PyTorch 'dilation' operator attribute that represents dilation for a 1D kernel to 2D, by adding '1'."""
87+
if tflite_1d_dilation is not None:
88+
tflite_1d_dilation.append(1)
9689

9790

9891
StridedOptions = (

backends/nxp/backend/ir/converter/node_converters/ops_converters/convolution_converter.py

Lines changed: 109 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -14,6 +14,7 @@
1414
from executorch.backends.nxp.backend.ir.converter.conversion import (
1515
aten_translator,
1616
common,
17+
translator,
1718
)
1819
from executorch.backends.nxp.backend.ir.converter.conversion.common import try_get_input
1920
from executorch.backends.nxp.backend.ir.converter.conversion.translator import (
@@ -40,6 +41,7 @@
4041
from executorch.backends.nxp.backend.ir.tflite_generator.builtin_options import (
4142
conv_2d_options,
4243
depthwise_conv_2d_options,
44+
reshape_options,
4345
)
4446
from torch.fx import Node
4547
from torch.nn import Parameter
@@ -94,13 +96,15 @@ def _is_supported_in_IR(
9496
parameters_mapping: dict[str, Parameter],
9597
custom_delegation_options: CustomDelegationOptions,
9698
) -> bool:
99+
input_tensor_rank = len(node.meta["val"].shape)
100+
dimensions = input_tensor_rank - 2
97101
is_transposed = node.args[6]
98102
output_padding = node.args[7]
99103

100104
if is_transposed:
101105
return False
102106

103-
if output_padding != [0, 0]:
107+
if output_padding != [0] * dimensions:
104108
return False
105109

106110
if input_tensor_safe(node, 2) is None:
@@ -125,7 +129,107 @@ def _get_convolution_arguments(
125129
_, _, _, stride, padding, dilation, transposed, out_padding, groups = (
126130
conv_node.args
127131
)
128-
return stride, padding, dilation, transposed, out_padding, groups
132+
return (
133+
list(stride),
134+
list(padding),
135+
list(dilation),
136+
transposed,
137+
out_padding,
138+
groups,
139+
)
140+
141+
def _convert_1d_conv(
142+
self, t_op: tflite_model.Operator, conv_params: ConvParameters
143+
) -> list[tflite_model.Operator]:
144+
"""Convert the 'Conv' operator with a 1D kernel to TFLite 'Conv2D'.
145+
TFLite doesn't support 1D convolution, but this behaviour can be represented using
146+
Reshape -> Conv2D -> Reshape.
147+
The first reshape introduces a 4th dimension with size 1. The second Reshape removes the temporary dimension.
148+
"""
149+
# -- Calculate the shapes for equivalent 2D convolution --
150+
conv_2d_input_shape = translator.nhc_dimensions_to_nhwc(
151+
t_op.tmp_inputs[0].shape.vector
152+
)
153+
conv_2d_weight_shape = translator.nhc_dimensions_to_nhwc(
154+
t_op.tmp_inputs[1].shape.vector
155+
)
156+
conv_2d_output_shape = translator.nhc_dimensions_to_nhwc(
157+
t_op.tmp_outputs[0].shape.vector
158+
)
159+
160+
# -- Generate tensors taking part in the conversion --
161+
reshape1_input = t_op.tmp_inputs[0]
162+
163+
reshape1_output = self.builder.duplicate_tensor(
164+
reshape1_input, name_suffix="_4D_"
165+
)
166+
reshape1_output.shape = tflite_model.Shape(conv_2d_input_shape)
167+
168+
reshape2_input = self.builder.duplicate_tensor(
169+
t_op.tmp_outputs[0], name_suffix="_4D_"
170+
)
171+
reshape2_input.shape = tflite_model.Shape(conv_2d_output_shape)
172+
173+
reshape2_output = t_op.tmp_outputs[0]
174+
175+
pre_reshapes = []
176+
177+
# Extend the weights tensor to 4D
178+
weights_tensor = t_op.tmp_inputs[1]
179+
if tensor_has_data(weights_tensor):
180+
# Do it statically
181+
weights_tensor.shape = tflite_model.Shape(conv_2d_weight_shape)
182+
weights_tensor.tmp_buffer.data = weights_tensor.tmp_buffer.data.reshape(
183+
conv_2d_weight_shape
184+
)
185+
186+
else:
187+
# Add a Reshape before the weights tensor
188+
new_weights_tensor = self.builder.duplicate_tensor(
189+
weights_tensor, name_suffix="_4D_"
190+
)
191+
new_weights_tensor.shape = tflite_model.Shape(conv_2d_weight_shape)
192+
193+
weight_reshape = tflite_model.Operator(
194+
builtin_options=reshape_options.Reshape(conv_2d_weight_shape)
195+
)
196+
weight_reshape.tmp_inputs = [weights_tensor]
197+
weight_reshape.tmp_outputs = [new_weights_tensor]
198+
199+
pre_reshapes.append(weight_reshape)
200+
201+
# Save the new weights tensor, to assign it later.
202+
weights_tensor = new_weights_tensor
203+
204+
# -- Create the new operators --
205+
reshape1 = tflite_model.Operator(
206+
builtin_options=reshape_options.Reshape(conv_2d_input_shape)
207+
)
208+
reshape1.tmp_inputs = [reshape1_input]
209+
reshape1.tmp_outputs = [reshape1_output]
210+
pre_reshapes.append(reshape1)
211+
212+
reshape2 = tflite_model.Operator(
213+
builtin_options=reshape_options.Reshape(reshape2_output.shape.vector)
214+
)
215+
reshape2.tmp_inputs = [reshape2_input]
216+
reshape2.tmp_outputs = [reshape2_output]
217+
218+
# Assign the new input and output of the Conv2D
219+
t_op.tmp_inputs = [reshape1_output, weights_tensor] + t_op.tmp_inputs[
220+
2:
221+
] # Add bias as well, if present
222+
t_op.tmp_outputs = [reshape2_input]
223+
224+
# Extend all Conv attributes to 2D
225+
common.extend_1d_stride_to_2d(conv_params.stride)
226+
common.extend_1d_dilation_to_2d(conv_params.dilation)
227+
common.extend_1d_padding_to_2d(conv_params.padding)
228+
229+
# Convert the now 2D Conv
230+
converted_conv_ops = self._convert_2d_conv(t_op, conv_params)
231+
232+
return pre_reshapes + converted_conv_ops + [reshape2]
129233

130234
# noinspection PyPep8Naming
131235
def _convert_unpadded_2D(
@@ -266,7 +370,9 @@ def convert(self, node: Node):
266370
conv_params = ConvParameters(stride, padding, dilation, groups)
267371

268372
rank = t_op.tmp_inputs[1].shape.len()
269-
if rank == 4: # Conv2D
373+
if rank == 3: # Conv1D
374+
ops_to_add = self._convert_1d_conv(t_op, conv_params)
375+
elif rank == 4: # Conv2D
270376
ops_to_add = self._convert_2d_conv(t_op, conv_params)
271377
else:
272378
raise NotImplementedError(

backends/nxp/tests/executorch_pipeline.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -104,7 +104,7 @@ def to_quantized_edge_program(
104104

105105

106106
def to_quantized_executorch_program(
107-
model: torch.nn.Module, input_shapes: tuple[int] | list[tuple[int]]
107+
model: torch.nn.Module, input_shapes: tuple[int, ...] | list[tuple[int, ...]]
108108
) -> ExecutorchProgramManager:
109109
edge_program_manager = to_quantized_edge_program(model, input_shapes)
110110

@@ -114,7 +114,7 @@ def to_quantized_executorch_program(
114114

115115

116116
def to_edge_program(
117-
model: nn.Module, input_shapes: tuple[int] | list[tuple[int]]
117+
model: nn.Module, input_shapes: tuple[int, ...] | list[tuple[int, ...]]
118118
) -> EdgeProgramManager:
119119
if isinstance(input_shapes, list):
120120
assert all(isinstance(input_shape, tuple) for input_shape in input_shapes), (

0 commit comments

Comments
 (0)