Skip to content

Commit 530d4a1

Browse files
authored
Upsample Bilinear 2d
Differential Revision: D60323281 Pull Request resolved: #4535
1 parent 92edd04 commit 530d4a1

File tree

6 files changed

+20
-16
lines changed

6 files changed

+20
-16
lines changed

backends/xnnpack/operators/op_static_resize_bilinear_2d.py

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -4,7 +4,7 @@
44
# This source code is licensed under the BSD-style license found in the
55
# LICENSE file in the root directory of this source tree.
66

7-
from typing import cast, Dict, List
7+
from typing import cast, Dict
88

99
import torch
1010
from executorch.backends.xnnpack.operators.node_visitor import (
@@ -23,7 +23,7 @@
2323

2424
@register_node_visitor
2525
class StaticResizeBilinear2DVisitor(NodeVisitor):
26-
target = "aten.upsample_bilinear2d.default"
26+
target = "aten.upsample_bilinear2d.vec"
2727

2828
def __init__(self, *args) -> None:
2929
super().__init__(*args)
@@ -44,7 +44,7 @@ def define_node(
4444
# output
4545
output_id = vals_to_ids[node]
4646

47-
new_size = cast(List[int], node.args[1])
47+
new_size = node.meta["val"].shape[-2:]
4848

4949
flags = XNN_FLAG_ALIGN_CORNERS if cast(bool, node.args[2]) else 0
5050

backends/xnnpack/partition/config/__init__.py

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -32,6 +32,7 @@
3232
ReLUConfig,
3333
SigmoidConfig,
3434
SoftmaxConfig,
35+
UpsampleBilinear2dConfig,
3536
)
3637
from executorch.backends.xnnpack.partition.config.node_configs import (
3738
BatchNormConfig,
@@ -66,6 +67,7 @@
6667
PermuteConfig,
6768
# EluConfig, # Waiting for PyTorch Pin Update
6869
ReLUConfig,
70+
UpsampleBilinear2dConfig,
6971
# Quantization Op Configs
7072
QuantizedPerTensorConfig,
7173
DeQuantizedPerTensorConfig,

backends/xnnpack/partition/config/generic_node_configs.py

Lines changed: 10 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -261,3 +261,13 @@ def supported_precision_types(self) -> List[ConfigPrecisionType]:
261261

262262
def get_original_aten(self) -> Optional[torch._ops.OpOverload]:
263263
return torch.ops.aten.max_pool2d.default
264+
265+
266+
class UpsampleBilinear2dConfig(GenericNodePartitionerConfig):
267+
target_name = "upsample_bilinear2d.vec"
268+
269+
def supported_precision_types(self) -> List[ConfigPrecisionType]:
270+
return [ConfigPrecisionType.FP32]
271+
272+
def get_original_aten(self) -> Optional[torch._ops.OpOverload]:
273+
return torch.ops.aten.upsample_bilinear2d.vec

backends/xnnpack/passes/channels_last_tagged_reshape_pass.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -44,7 +44,7 @@ class ChannelsLastTaggedReshapePass(XNNPACKPass):
4444
# Set of ops that require memory format to be channels last (NHWC)
4545
memory_sensitive_ops_nhwc = {
4646
exir_ops.edge.aten.convolution.default,
47-
exir_ops.edge.aten.upsample_bilinear2d.default,
47+
exir_ops.edge.aten.upsample_bilinear2d.vec,
4848
exir_ops.edge.aten.mean.dim,
4949
exir_ops.edge.aten.max_pool2d.default,
5050
exir_ops.edge.aten.amax.default,

backends/xnnpack/passes/convert_to_upsample_bilinear2d.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -36,7 +36,7 @@ def create_upsample_bilinear_2d(
3636
with graph_module.graph.inserting_before(output):
3737
upsample_node = graph_module.graph.create_node(
3838
"call_function",
39-
exir_ops.edge.aten.upsample_bilinear2d.default,
39+
exir_ops.edge.aten.upsample_bilinear2d.vec,
4040
# TODO(T166527012): Using output_h and output_w here only works with static shapes
4141
args=(input_node, [output_h, output_w], align_corners, None),
4242
)

backends/xnnpack/test/ops/bilinear2d.py

Lines changed: 3 additions & 11 deletions
Original file line numberDiff line numberDiff line change
@@ -83,8 +83,7 @@ def test_fp32_static_resize_bilinear2d(self):
8383
(
8484
Tester(self.StaticResizeBilinear2dModule(), example_inputs)
8585
.export()
86-
.to_edge()
87-
.partition()
86+
.to_edge_transform_and_lower()
8887
.check_not(self.ops)
8988
.check_count({"torch.ops.higher_order.executorch_call_delegate": 1})
9089
.to_executorch()
@@ -97,8 +96,7 @@ def test_fp32_static_resize_bilinear2d_with_align_corners(self):
9796
(
9897
Tester(self.StaticResizeBilinear2dModuleWithAlignCorners(), example_inputs)
9998
.export()
100-
.to_edge()
101-
.partition()
99+
.to_edge_transform_and_lower()
102100
.check_not(self.ops)
103101
.check_count({"torch.ops.higher_order.executorch_call_delegate": 1})
104102
.to_executorch()
@@ -112,13 +110,7 @@ def test_fp32_static_resize_bilinear2d_antialiased(self):
112110
(
113111
Tester(self.Bilinear2dAntiAlias(), example_inputs)
114112
.export()
115-
.to_edge()
116-
.check_count(
117-
{
118-
"executorch_exir_dialects_edge__ops_aten__upsample_bilinear2d_aa_default": 2
119-
}
120-
)
121-
.partition()
113+
.to_edge_transform_and_lower()
122114
.check_count(
123115
{
124116
"executorch_exir_dialects_edge__ops_aten__upsample_bilinear2d_aa_default": 2

0 commit comments

Comments
 (0)