Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
13 changes: 9 additions & 4 deletions backends/arm/quantizer/quantization_annotator.py
Original file line number Diff line number Diff line change
Expand Up @@ -334,6 +334,7 @@ def _match_pattern(
torch.ops.aten.transpose.Dimname,
torch.ops.aten.transpose.int,
torch.ops.aten.transpose_copy.int,
torch.ops.aten.t_copy.default,
torch.ops.aten.tile.default,
torch.ops.aten.flip.default,
torch.ops.aten.chunk.default,
Expand Down Expand Up @@ -513,7 +514,8 @@ def any_or_hardtanh_min_zero(n: Node):
quant_properties.quant_inputs = [
_QuantProperty(0, input_act_qspec),
_QuantProperty(
1, input_act_qspec if node.args[0] == node.args[1] else shared_qspec # type: ignore[arg-type]
1,
input_act_qspec if node.args[0] == node.args[1] else shared_qspec, # type: ignore[arg-type]
),
]
quant_properties.quant_output = _QuantProperty(0, shared_qspec) # type: ignore[arg-type]
Expand All @@ -532,7 +534,8 @@ def any_or_hardtanh_min_zero(n: Node):
)
quant_properties.quant_inputs = [_QuantProperty(0, input_qspec)] # type: ignore[arg-type]
quant_properties.quant_output = _QuantProperty(
0, SharedQuantizationSpec((node.args[0], node)) # type: ignore[arg-type]
0,
SharedQuantizationSpec((node.args[0], node)), # type: ignore[arg-type]
)
elif node.target in (
torch.ops.aten.cat.default,
Expand Down Expand Up @@ -565,7 +568,8 @@ def any_or_hardtanh_min_zero(n: Node):
elif node.target in _one_to_one_shared_input_qspec:
quant_properties.quant_inputs = [_QuantProperty(0, input_act_qspec)]
quant_properties.quant_output = _QuantProperty(
0, SharedQuantizationSpec((node.args[0], node)) # type: ignore[arg-type]
0,
SharedQuantizationSpec((node.args[0], node)), # type: ignore[arg-type]
)
elif node.target in [
torch.ops.aten.eq.Tensor,
Expand All @@ -578,7 +582,8 @@ def any_or_hardtanh_min_zero(n: Node):
quant_properties.quant_inputs = [
_QuantProperty(0, input_act_qspec),
_QuantProperty(
1, input_act_qspec if node.args[0] == node.args[1] else shared_qspec # type: ignore[arg-type]
1,
input_act_qspec if node.args[0] == node.args[1] else shared_qspec, # type: ignore[arg-type]
),
]
quant_properties.quant_output = None
Expand Down
126 changes: 126 additions & 0 deletions backends/arm/test/ops/test_rsub.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,126 @@
# Copyright 2025 Arm Limited and/or its affiliates.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.

from typing import Tuple

import torch

from executorch.backends.arm.test import common
from executorch.backends.arm.test.tester.test_pipeline import (
EthosU55PipelineINT,
EthosU85PipelineINT,
TosaPipelineFP,
TosaPipelineINT,
VgfPipeline,
)

rsub_test_data = {
"rand_2D_4x4": lambda: (torch.rand(4, 4), 2),
"rand_3D_4x4x4": lambda: (torch.rand(4, 2, 2), 1.5),
"rand_4D_2x2x4x4": lambda: (torch.rand(2, 2, 4, 4), -1.1),
"rand_4D_big_small": lambda: (
(10e30) * torch.randn(1, 20, 30, 40),
-0.25,
),
"zero": lambda: (torch.rand(4, 4), 0),
# "swapped": lambda: (2, torch.rand(4, 4)), # torch.rsub(Scalar, Tensor) is not supported as it is not supported in eager mode.
}


class Rsub(torch.nn.Module):
aten_op = "torch.ops.aten.rsub.Scalar"
exir_op = "executorch_exir_dialects_edge__ops_aten_sub_Tensor"

def forward(self, x: torch.Tensor, y: int):
return torch.rsub(x, y)


input_t1 = Tuple[torch.Tensor, torch.Tensor]


@common.parametrize("test_data", rsub_test_data)
def test_rsub_scalar_tosa_FP(test_data):
pipeline = TosaPipelineFP[input_t1](
Rsub(),
test_data(),
aten_op=Rsub.aten_op,
exir_op=Rsub.exir_op,
use_to_edge_transform_and_lower=False,
)
pipeline.run()


@common.parametrize("test_data", rsub_test_data)
def test_rsub_scalar_tosa_INT(test_data):
"""Test Subtraction (TOSA INT)"""
pipeline = TosaPipelineINT[input_t1](
Rsub(),
test_data(),
aten_op="torch.ops.aten.sub.Tensor",
exir_op=Rsub.exir_op,
use_to_edge_transform_and_lower=False,
qtol=0,
)
pipeline.run()


@common.parametrize("test_data", rsub_test_data)
@common.XfailIfNoCorstone300
def test_rsub_scalar_u55_INT(test_data):
"""Test Subtraction on Ethos-U55 (FVP Mode)"""
pipeline = EthosU55PipelineINT[input_t1](
Rsub(),
test_data(),
aten_ops="torch.ops.aten.sub.Tensor",
exir_ops=Rsub.exir_op,
run_on_fvp=True,
use_to_edge_transform_and_lower=False,
)
pipeline.run()


@common.parametrize("test_data", rsub_test_data)
@common.XfailIfNoCorstone320
def test_rsub_scalar_u85_INT(test_data):
"""Test Subtraction on Ethos-U85 (FVP Mode)"""
pipeline = EthosU85PipelineINT[input_t1](
Rsub(),
test_data(),
aten_ops="torch.ops.aten.sub.Tensor",
exir_ops=Rsub.exir_op,
run_on_fvp=True,
use_to_edge_transform_and_lower=False,
)
pipeline.run()


@common.parametrize("test_data", rsub_test_data)
@common.SkipIfNoModelConverter
def test_rsub_scalar_vgf_FP(test_data: Tuple[torch.Tensor]):
"""Test Subtraction (VGF FP)"""
pipeline = VgfPipeline[input_t1](
Rsub(),
test_data(),
Rsub.aten_op,
Rsub.exir_op,
tosa_version="TOSA-1.0+FP",
use_to_edge_transform_and_lower=False,
)
pipeline.run()


@common.parametrize("test_data", rsub_test_data)
@common.SkipIfNoModelConverter
def test_rsub_scalar_vgf_INT(test_data: Tuple[torch.Tensor]):
"""Test Subtraction (VGF INT)"""
pipeline = VgfPipeline[input_t1](
Rsub(),
test_data(),
aten_op="torch.ops.aten.sub.Tensor",
exir_op=Rsub.exir_op,
tosa_version="TOSA-1.0+INT",
use_to_edge_transform_and_lower=False,
)
pipeline.run()
1 change: 1 addition & 0 deletions backends/arm/test/ops/test_sub.py
Original file line number Diff line number Diff line change
Expand Up @@ -149,6 +149,7 @@ def test_sub_tensor_tosa_INT_2(test_data: Tuple[torch.Tensor, torch.Tensor]):
@common.parametrize("test_data", sub_tan_test_data)
def test_sub_tensor_tosa_INT_3(test_data: Tuple[torch.Tensor, torch.Tensor]):
"""Test Two-Operand Subtraction (TOSA INT)"""
# This test has only been added to the tosa INT profile in order to catch quantization-induced errors.
pipeline = TosaPipelineINT[input_t2](
SubTan(), test_data(), aten_op, exir_op, qtol=0
)
Expand Down
115 changes: 115 additions & 0 deletions backends/arm/test/ops/test_t_copy.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,115 @@
# Copyright 2025 Arm Limited and/or its affiliates.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.

from typing import Tuple

import torch

from executorch.backends.arm.test import common
from executorch.backends.arm.test.tester.test_pipeline import (
EthosU55PipelineINT,
EthosU85PipelineINT,
TosaPipelineFP,
TosaPipelineINT,
VgfPipeline,
)

test_data_suite = {
# test_name: (test_data, min, max)
"rand": lambda: (torch.rand(2, 3),),
"rand_multiplied": lambda: (torch.rand(3, 4) * 10,),
"ones": lambda: (torch.ones(5, 10),),
"randn": lambda: (torch.randn(1, 10) * 2,),
}


class TCopy(torch.nn.Module):
aten_op = "torch.ops.aten.t_copy.default"
exir_op = "executorch_exir_dialects_edge__ops_aten_permute_copy_default"

def forward(self, x: torch.Tensor):
return torch.t_copy(x)


input_t1 = Tuple[torch.Tensor]


@common.parametrize("test_data", test_data_suite)
def test_t_tosa_FP(test_data: Tuple):
pipeline = TosaPipelineFP[input_t1](
TCopy(),
test_data(),
aten_op=TCopy.aten_op,
exir_op=TCopy.exir_op,
use_to_edge_transform_and_lower=False,
)

pipeline.run()


@common.parametrize("test_data", test_data_suite)
def test_t_tosa_INT(test_data: Tuple):
pipeline = TosaPipelineINT[input_t1](
TCopy(),
test_data(),
aten_op=TCopy.aten_op,
exir_op=TCopy.exir_op,
use_to_edge_transform_and_lower=False,
)
pipeline.run()


@common.XfailIfNoCorstone300
@common.parametrize("test_data", test_data_suite)
def test_t_u55_INT(test_data: Tuple):
pipeline = EthosU55PipelineINT[input_t1](
TCopy(),
test_data(),
aten_ops=TCopy.aten_op,
exir_ops=[],
use_to_edge_transform_and_lower=True,
)
pipeline.run()


@common.XfailIfNoCorstone320
@common.parametrize("test_data", test_data_suite)
def test_t_u85_INT(test_data: Tuple):
pipeline = EthosU85PipelineINT[input_t1](
TCopy(),
test_data(),
aten_ops=TCopy.aten_op,
exir_ops=TCopy.exir_op,
use_to_edge_transform_and_lower=False,
)
pipeline.run()


@common.parametrize("test_data", test_data_suite)
@common.SkipIfNoModelConverter
def test_t_vgf_FP(test_data: Tuple):
pipeline = VgfPipeline[input_t1](
TCopy(),
test_data(),
aten_op=TCopy.aten_op,
exir_op=TCopy.exir_op,
tosa_version="TOSA-1.0+FP",
use_to_edge_transform_and_lower=False,
)
pipeline.run()


@common.parametrize("test_data", test_data_suite)
@common.SkipIfNoModelConverter
def test_t_vgf_INT(test_data: Tuple):
pipeline = VgfPipeline[input_t1](
TCopy(),
test_data(),
aten_op=TCopy.aten_op,
exir_op=TCopy.exir_op,
tosa_version="TOSA-1.0+INT",
use_to_edge_transform_and_lower=False,
)
pipeline.run()
Loading
Loading