Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
60 changes: 60 additions & 0 deletions backends/arm/test/ops/test_upsample_bilinear2d.py
Original file line number Diff line number Diff line change
Expand Up @@ -10,11 +10,13 @@

from executorch.backends.arm.test.tester.test_pipeline import (
EthosU85PipelineBI,
OpNotSupportedPipeline,
TosaPipelineBI,
TosaPipelineMI,
)

aten_op = "torch.ops.aten.upsample_bilinear2d.vec"
exir_op = "executorch_exir_dialects_edge__ops_aten_upsample_bilinear2d_vec"
input_t1 = Tuple[torch.Tensor] # Input x

test_data_suite_tosa = {
Expand Down Expand Up @@ -57,6 +59,10 @@
"rand_one_and_half_size": (torch.rand(2, 4, 8, 3), (12, 4), None, False),
}

test_data_u55 = {
"rand_double_size": (torch.rand(2, 4, 8, 3), (16, 6), None, True),
}


class UpsamplingBilinear2d(torch.nn.Module):
def __init__(
Expand Down Expand Up @@ -189,6 +195,60 @@ def test_upsample_bilinear2d_vec_tosa_BI_Upsample(
pipeline.run()


@common.parametrize("test_data", test_data_u55)
@common.XfailIfNoCorstone300
def test_upsample_bilinear2d_vec_U55_BI_Upsample_not_delegated(
test_data: torch.Tensor,
):
test_data, size, scale_factor, compare_outputs = test_data
pipeline = OpNotSupportedPipeline[input_t1](
Upsample(size, scale_factor),
(test_data,),
{exir_op: 1},
n_expected_delegates=0,
quantize=True,
u55_subset=True,
)

pipeline.run()


@common.parametrize("test_data", test_data_u55)
@common.XfailIfNoCorstone300
def test_upsample_bilinear2d_vec_U55_BI_Interpolate_not_delegated(
test_data: torch.Tensor,
):
test_data, size, scale_factor, compare_outputs = test_data
pipeline = OpNotSupportedPipeline[input_t1](
Interpolate(size, scale_factor),
(test_data,),
{exir_op: 1},
n_expected_delegates=0,
quantize=True,
u55_subset=True,
)

pipeline.run()


@common.parametrize("test_data", test_data_u55)
@common.XfailIfNoCorstone300
def test_upsample_bilinear2d_vec_U55_BI_UpsamplingBilinear2d_not_delegated(
test_data: torch.Tensor,
):
test_data, size, scale_factor, compare_outputs = test_data
pipeline = OpNotSupportedPipeline[input_t1](
UpsamplingBilinear2d(size, scale_factor),
(test_data,),
{exir_op: 1},
n_expected_delegates=0,
quantize=True,
u55_subset=True,
)

pipeline.run()


@common.parametrize("test_data", test_data_suite_Uxx)
@common.XfailIfNoCorstone320
def test_upsample_bilinear2d_vec_U85_BI_Upsample(test_data: input_t1):
Expand Down
60 changes: 60 additions & 0 deletions backends/arm/test/ops/test_upsample_nearest2d.py
Original file line number Diff line number Diff line change
Expand Up @@ -9,11 +9,13 @@
from executorch.backends.arm.test import common

from executorch.backends.arm.test.tester.test_pipeline import (
OpNotSupportedPipeline,
TosaPipelineBI,
TosaPipelineMI,
)

aten_op = "torch.ops.aten.upsample_nearest2d.vec"
exir_op = "executorch_exir_dialects_edge__ops_aten_upsample_nearest2d_vec"
input_t1 = Tuple[torch.Tensor] # Input x

test_data_suite = {
Expand All @@ -40,6 +42,10 @@
"rand_one_and_half_size": lambda: (torch.rand(2, 4, 8, 3), (12, 4), None, False),
}

test_data_u55 = {
"rand_double_size": lambda: (torch.rand(2, 4, 8, 3), (16, 6), None, True),
}

test_data_suite_dynamic = {
# (test_name, test_data, size, scale_factor, compare_outputs)
"rand_double_scale": lambda: (torch.rand(2, 4, 8, 3), None, 2.0, False),
Expand Down Expand Up @@ -170,6 +176,59 @@ def test_upsample_nearest2d_vec_tosa_BI_nearest(test_data: torch.Tensor):
)
if not compare_outputs:
pipeline.pop_stage(-1)
pipeline.run()


@common.parametrize("test_data", test_data_u55)
@common.XfailIfNoCorstone300
def test_upsample_nearest2d_vec_U55_BI_Upsample_not_delegated(
test_data: torch.Tensor,
):
test_data, size, scale_factor, compare_outputs = test_data()
pipeline = OpNotSupportedPipeline[input_t1](
Upsample(size, scale_factor),
(test_data,),
{exir_op: 1},
n_expected_delegates=0,
quantize=True,
u55_subset=True,
)

pipeline.run()


@common.parametrize("test_data", test_data_u55)
@common.XfailIfNoCorstone300
def test_upsample_nearest2d_vec_U55_BI_Interpolate_not_delegated(
test_data: torch.Tensor,
):
test_data, size, scale_factor, compare_outputs = test_data()
pipeline = OpNotSupportedPipeline[input_t1](
Interpolate(size, scale_factor),
(test_data,),
{exir_op: 1},
n_expected_delegates=0,
quantize=True,
u55_subset=True,
)

pipeline.run()


@common.parametrize("test_data", test_data_u55)
@common.XfailIfNoCorstone300
def test_upsample_nearest2d_vec_U55_BI_UpsamplingBilinear2d_not_delegated(
test_data: torch.Tensor,
):
test_data, size, scale_factor, compare_outputs = test_data()
pipeline = OpNotSupportedPipeline[input_t1](
UpsamplingNearest2d(size, scale_factor),
(test_data,),
{exir_op: 1},
n_expected_delegates=0,
quantize=True,
u55_subset=True,
)

pipeline.run()

Expand Down Expand Up @@ -327,4 +386,5 @@ def test_upsample_nearest2d_dynamic_BI_upsample(test_data: torch.Tensor):
)
if not compare_outputs:
pipeline.pop_stage(-1)

pipeline.run()
12 changes: 10 additions & 2 deletions backends/arm/tosa_partitioner.py
Original file line number Diff line number Diff line change
Expand Up @@ -174,10 +174,18 @@ def filter_fn(node: torch.fx.Node) -> bool:

ops_to_not_decompose = [
torch.ops.aten.linear.default,
torch.ops.aten.upsample_bilinear2d.vec,
torch.ops.aten.upsample_nearest2d.vec,
torch.ops.aten.eye.default,
torch.ops.aten.linspace.default,
] + ops_to_not_decompose_if_quant_op

tosa_spec = get_tosa_spec(self.delegation_spec.compile_specs)
if not tosa_spec.is_U55_subset:
# Tosa operator "RESIZE" is not supported on U55. Since upsample_bilinear2d
# and upsample_nearest2d decompose into that it will not be possible to
# delegate those operators on U55. If we have said here to not decompose
# them there will be an error saying the operator was not decomposed. It
# will not be possible for it to end up on either CPU or NPU.
ops_to_not_decompose.append(torch.ops.aten.upsample_nearest2d.vec)
ops_to_not_decompose.append(torch.ops.aten.upsample_bilinear2d.vec)

return (ops_to_not_decompose, filter_fn)
Loading