diff --git a/backends/qualcomm/_passes/remove_redundancy.py b/backends/qualcomm/_passes/remove_redundancy.py index 07b13d4dd67..2a6baa1236f 100644 --- a/backends/qualcomm/_passes/remove_redundancy.py +++ b/backends/qualcomm/_passes/remove_redundancy.py @@ -22,6 +22,7 @@ def __init__(self): exir_ops.edge.aten.clone.default: self._default_condition, torch.ops.aten.alias.default: self._default_condition, exir_ops.edge.aten.alias.default: self._default_condition, + exir_ops.edge.aten.alias_copy.default: self._default_condition, exir_ops.edge.aten.lift_fresh_copy.default: self._default_condition, # remove this target if '_skip_dim_order' is set to False exir_ops.edge.dim_order_ops._to_dim_order_copy.default: self._dim_order_op_condition, diff --git a/backends/qualcomm/tests/models.py b/backends/qualcomm/tests/models.py index adf6e256f54..71edb6303a1 100644 --- a/backends/qualcomm/tests/models.py +++ b/backends/qualcomm/tests/models.py @@ -10,6 +10,17 @@ # module with related operator only +# Ensure alias_copy is removed in remove_redundancy pass +class Alias(torch.nn.Module): + def __init__(self): + super().__init__() + self.relu = torch.nn.ReLU() + + def forward(self, x): + alias_x = torch.ops.aten.alias.default(x) + return self.relu(alias_x) + + class And(torch.nn.Module): def __init__(self, pos, neg): super().__init__() diff --git a/backends/qualcomm/tests/test_qnn_delegate.py b/backends/qualcomm/tests/test_qnn_delegate.py index 74c85b773c2..3373166928c 100644 --- a/backends/qualcomm/tests/test_qnn_delegate.py +++ b/backends/qualcomm/tests/test_qnn_delegate.py @@ -118,6 +118,11 @@ def test_qnn_backend_adaptive_avg_pool2d(self): sample_input = (torch.randn(1, 512, 7, 7),) self.lower_module_and_test_output(module, sample_input) + def test_qnn_backend_alias(self): + module = Alias() # noqa: F405 + sample_input = (torch.randn(1, 10),) + self.lower_module_and_test_output(module, sample_input) + def test_qnn_backend_amax(self): modules = [AMax(dim=1, keepdim=False), AMax(dim=1, keepdim=True)] # noqa: F405 sample_input = (torch.randn(4, 4),) @@ -1156,6 +1161,12 @@ def test_qnn_backend_adaptive_avg_pool2d(self): module = self.get_qdq_module(module, sample_input) self.lower_module_and_test_output(module, sample_input) + def test_qnn_backend_alias(self): + module = Alias() # noqa: F405 + sample_input = (torch.randn(1, 10),) + module = self.get_qdq_module(module, sample_input) + self.lower_module_and_test_output(module, sample_input) + def test_qnn_backend_amax(self): modules = [AMax(dim=1, keepdim=False), AMax(dim=1, keepdim=True)] # noqa: F405 sample_input = (torch.randn(4, 4),)