Skip to content

Commit 0dc2bd3

Browse files
authored
Fix XNN partitioning dynamic upsample with constant scales
Differential Revision: D66611750 Pull Request resolved: #7131
1 parent 5f0a14a commit 0dc2bd3

File tree

2 files changed

+36
-0
lines changed

2 files changed

+36
-0
lines changed

backends/xnnpack/partition/config/generic_node_configs.py

Lines changed: 15 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -303,6 +303,21 @@ def get_original_aten(self) -> Optional[torch._ops.OpOverload]:
303303
class UpsampleBilinear2dConfig(GenericNodePartitionerConfig):
304304
target_name = "upsample_bilinear2d.vec"
305305

306+
def check_constraints(self, node: torch.fx.Node, ep: ExportedProgram) -> bool:
307+
"""
308+
XNNPACK's static_resize_bilinear does not support dynamic output sizes
309+
"""
310+
if not self.check_common_constraints(node, ep):
311+
return False
312+
313+
is_output_dynamic = "val" in node.meta and any(
314+
isinstance(d, torch.SymInt) for d in node.meta["val"].shape
315+
)
316+
if is_output_dynamic:
317+
why(node, reason="dynamic output sizes are not supported")
318+
return False
319+
return True
320+
306321
def supported_precision_types(self) -> List[ConfigPrecisionType]:
307322
return [ConfigPrecisionType.FP32]
308323

backends/xnnpack/test/ops/bilinear2d.py

Lines changed: 21 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -8,6 +8,7 @@
88

99
import torch
1010
from executorch.backends.xnnpack.test.tester import Tester
11+
from executorch.backends.xnnpack.test.tester.tester import Export
1112

1213

1314
class TestUpsampleBilinear2d(unittest.TestCase):
@@ -118,3 +119,23 @@ def test_fp32_static_resize_bilinear2d_antialiased(self):
118119
)
119120
.check_not(["torch.ops.higher_order.executorch_call_delegate"])
120121
)
122+
123+
def test_fp32_bilinear2d_dynamic_bilinear2d_not_partitioned(self):
124+
"""
125+
Verify that upsample_bilinear2d ops with dynamic output sizes are not partitioned.
126+
"""
127+
example_inputs = (torch.randn(2, 3, 4, 5),)
128+
dynamic_shapes = {
129+
"x": {
130+
2: torch.export.Dim("h", min=1, max=10),
131+
3: torch.export.Dim("w", min=1, max=12),
132+
}
133+
}
134+
(
135+
Tester(self.StaticResizeBilinear2dModule(), example_inputs)
136+
.export(Export(dynamic_shapes))
137+
.to_edge_transform_and_lower()
138+
# NOTE The decomposition is partially delegated. This will need to be replaced
139+
# with the aten upsample op once decomp is removed.
140+
.check("executorch_exir_dialects_edge__ops_aten_index_Tensor")
141+
)

0 commit comments

Comments
 (0)