Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
15 changes: 13 additions & 2 deletions backends/cortex_m/test/build_test_runner.sh
Original file line number Diff line number Diff line change
Expand Up @@ -14,9 +14,20 @@ et_root_dir=$(realpath "${script_dir}/../../..")
build_executorch="${et_root_dir}/backends/arm/scripts/build_executorch.sh"
${build_executorch}

# Build executor runner with all portable ops selected and semi hosting
# Build executor runner with selected aten ops and semi hosting
build_dir="${et_root_dir}/arm_test"
build_executor_runner="${et_root_dir}/backends/arm/scripts/build_executor_runner.sh"
build_root_test_dir="${et_root_dir}/arm_test/arm_semihosting_executor_runner_corstone-300"

${build_executor_runner} --pte=semihosting --target=ethos-u55-128 --output="${build_root_test_dir}"
select_ops_list="\
aten::add.out,\
aten::clamp.out,\
aten::convolution.out,\
aten::div.out,\
aten::mean.out,\
aten::mul.out,\
aten::relu.out,\
aten::view_copy.out,\
dim_order_ops::_to_dim_order_copy.out"

${build_executor_runner} --pte=semihosting --target=ethos-u55-128 --output="${build_root_test_dir}" --select_ops_list="${select_ops_list}"
4 changes: 4 additions & 0 deletions backends/cortex_m/test/models/__init__.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,4 @@
# Copyright 2025 Arm Limited and/or its affiliates.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
74 changes: 74 additions & 0 deletions backends/cortex_m/test/models/test_mobilenet_v3.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,74 @@
# Copyright 2025 Arm Limited and/or its affiliates.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.

import pytest
import torch

from executorch.backends.cortex_m.test.tester import CortexMTester, McuTestCase
from torchvision import models


# TODO: Update as more ops are converted by CMSIS-NN ops.
ops_before_transforms: dict[str, int] = {
"executorch_exir_dialects_edge__ops_aten_add_Tensor": 34,
"executorch_exir_dialects_edge__ops_aten_addmm_default": 2,
"executorch_exir_dialects_edge__ops_aten_clamp_default": 56,
"executorch_exir_dialects_edge__ops_aten_convolution_default": 52,
"executorch_exir_dialects_edge__ops_aten_div_Tensor": 28,
"executorch_exir_dialects_edge__ops_aten_mean_dim": 10,
"executorch_exir_dialects_edge__ops_aten_mul_Tensor": 28,
"executorch_exir_dialects_edge__ops_aten_permute_copy_default": 2,
"executorch_exir_dialects_edge__ops_aten_relu_default": 14,
"executorch_exir_dialects_edge__ops_aten_view_copy_default": 1,
"executorch_exir_dialects_edge__ops_dim_order_ops__to_dim_order_copy_default": 56,
"executorch_exir_dialects_edge__ops_quantized_decomposed_dequantize_per_tensor_default": 178,
"executorch_exir_dialects_edge__ops_quantized_decomposed_quantize_per_tensor_default": 109,
}
ops_after_transforms: dict[str, int] = {
"executorch_exir_dialects_edge__ops_aten_add_Tensor": 28, # Not lowered due to broadcasting
"executorch_exir_dialects_edge__ops_aten_addmm_default": 0,
"executorch_exir_dialects_edge__ops_cortex_m_quantized_add_default": 6,
"executorch_exir_dialects_edge__ops_cortex_m_quantized_linear_default": 2,
"executorch_exir_dialects_edge__ops_aten_clamp_default": 56,
"executorch_exir_dialects_edge__ops_aten_convolution_default": 52,
"executorch_exir_dialects_edge__ops_aten_div_Tensor": 28,
"executorch_exir_dialects_edge__ops_aten_mean_dim": 10,
"executorch_exir_dialects_edge__ops_aten_mul_Tensor": 28,
"executorch_exir_dialects_edge__ops_aten_permute_copy_default": 0,
"executorch_exir_dialects_edge__ops_aten_relu_default": 14,
"executorch_exir_dialects_edge__ops_aten_view_copy_default": 1,
"executorch_exir_dialects_edge__ops_dim_order_ops__to_dim_order_copy_default": 56,
"executorch_exir_dialects_edge__ops_quantized_decomposed_dequantize_per_tensor_default": 0,
"executorch_exir_dialects_edge__ops_quantized_decomposed_quantize_per_tensor_default": 0,
"executorch_exir_dialects_edge__ops_cortex_m_dequantize_per_tensor_default": 162,
"executorch_exir_dialects_edge__ops_cortex_m_quantize_per_tensor_default": 101,
}

model = models.mobilenet_v3_small(weights=None)
example_input = torch.randn(1, 3, 224, 224)


test_cases = {
"mobilenet_v3_small": McuTestCase(
model=models.mobilenet_v3_small(weights=None),
example_inputs=(example_input,),
),
}


@pytest.mark.skip("Skip until add + linear fix are upstreamed.")
def test_dialect_mv3(test_case):
tester = CortexMTester(test_case.model, test_case.example_inputs)
tester.test_dialect(
ops_before_transforms,
ops_after_transforms,
qtol=1,
)


@pytest.mark.skip("Skip until add + linear fix are upstreamed.")
def test_implementation_mv3(test_case):
tester = CortexMTester(test_case.model, test_case.example_inputs)
tester.test_implementation(qtol=1)
98 changes: 98 additions & 0 deletions backends/cortex_m/test/ops/test_lstm.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,98 @@
# Copyright 2025 Arm Limited and/or its affiliates.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.


import pytest
import torch
from executorch.backends.cortex_m.test.tester import (
CortexMTester,
McuTestCase,
ramp_tensor,
)


class CortexMLSTM(torch.nn.Module):
ops_before_transforms = {
"executorch_exir_dialects_edge__ops_aten_full_default": 2,
"executorch_exir_dialects_edge__ops_aten_squeeze_copy_dims": 4,
"executorch_exir_dialects_edge__ops_aten_unsqueeze_copy_default": 2,
"executorch_exir_dialects_edge__ops_aten_view_copy_default": 6,
"executorch_exir_dialects_edge__ops_aten_permute_copy_default": 3,
"executorch_exir_dialects_edge__ops_aten_addmm_default": 3,
"executorch_exir_dialects_edge__ops_aten_slice_copy_Tensor": 2,
"executorch_exir_dialects_edge__ops_aten_add_Tensor": 4,
"executorch_exir_dialects_edge__ops_aten_split_with_sizes_copy_default": 2,
"executorch_exir_dialects_edge__ops_aten_sigmoid_default": 6,
"executorch_exir_dialects_edge__ops_aten_tanh_default": 4,
"executorch_exir_dialects_edge__ops_aten_mul_Tensor": 6,
"executorch_exir_dialects_edge__ops_aten_cat_default": 1,
}

ops_after_transforms = {}

def __init__(self, input_size: int = 4, hidden_size: int = 3) -> None:
super().__init__()
self.lstm = torch.nn.LSTM(input_size=input_size, hidden_size=hidden_size)

def forward(self, x: torch.Tensor) -> torch.Tensor:
y, _ = self.lstm(x)
return y


class CortexMQuantizableLSTM(torch.nn.Module):
ops_before_transforms = {
"executorch_exir_dialects_edge__ops_aten_add_Tensor": 4,
"executorch_exir_dialects_edge__ops_aten_addmm_default": 4,
"executorch_exir_dialects_edge__ops_aten_cat_default": 1,
"executorch_exir_dialects_edge__ops_aten_full_default": 1,
"executorch_exir_dialects_edge__ops_aten_mul_Tensor": 6,
"executorch_exir_dialects_edge__ops_aten_permute_copy_default": 4,
"executorch_exir_dialects_edge__ops_aten_select_copy_int": 2,
"executorch_exir_dialects_edge__ops_aten_sigmoid_default": 6,
"executorch_exir_dialects_edge__ops_aten_split_with_sizes_copy_default": 2,
"executorch_exir_dialects_edge__ops_aten_squeeze_copy_dims": 1,
"executorch_exir_dialects_edge__ops_aten_tanh_default": 4,
"executorch_exir_dialects_edge__ops_aten_view_copy_default": 1,
"executorch_exir_dialects_edge__ops_quantized_decomposed_dequantize_per_tensor_default": 34,
"executorch_exir_dialects_edge__ops_quantized_decomposed_quantize_per_tensor_default": 27,
}

ops_after_transforms = {}

def __init__(self, input_size: int = 4, hidden_size: int = 3) -> None:
super().__init__()
self.lstm = torch.ao.nn.quantizable.LSTM(
input_size=input_size, hidden_size=hidden_size
)

def forward(self, x: torch.Tensor) -> torch.Tensor:
y, _ = self.lstm(x)
return y


test_cases = {
"lstm_fp32": McuTestCase(
model=CortexMLSTM(),
example_inputs=(ramp_tensor(-1, 1, (2, 1, 4)),),
),
"lstm_quantizable": McuTestCase(
model=CortexMQuantizableLSTM(),
example_inputs=(ramp_tensor(-1, 1, (2, 1, 4)),),
),
}


@pytest.mark.skip("Not implemented yet.")
def test_dialect_lstm(test_case: McuTestCase) -> None:
tester = CortexMTester(test_case.model, test_case.example_inputs)
tester.test_dialect(
test_case.model.ops_before_transforms, test_case.model.ops_after_transforms
)


@pytest.mark.skip("Not implemented yet.")
def test_implementation_lstm(test_case: McuTestCase) -> None:
tester = CortexMTester(test_case.model, test_case.example_inputs)
tester.test_implementation()
Loading