Skip to content

Commit 69ec0bb

Browse files
cccclaifacebook-github-bot
authored andcommitted
Add logical and op
Summary: As title, add the logical and op for an internal model Rollback Plan: Differential Revision: D80122607
1 parent 76a4062 commit 69ec0bb

File tree

6 files changed

+92
-0
lines changed

6 files changed

+92
-0
lines changed

backends/qualcomm/_passes/layout_transform.py

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -89,6 +89,7 @@ class LayoutTransform(ExportPass):
8989
exir_ops.edge.aten.le.Tensor,
9090
exir_ops.edge.aten.linear.default,
9191
exir_ops.edge.aten.log.default,
92+
exir_ops.edge.aten.logical_and.default,
9293
exir_ops.edge.aten.logical_not.default,
9394
exir_ops.edge.aten.lt.Scalar,
9495
exir_ops.edge.aten.lt.Tensor,

backends/qualcomm/builders/__init__.py

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -52,6 +52,7 @@
5252
op_linear,
5353
op_log,
5454
op_log_softmax,
55+
op_logical_and,
5556
op_logical_not,
5657
op_lt,
5758
op_matmul,
@@ -148,6 +149,7 @@
148149
op_le,
149150
op_linear,
150151
op_log,
152+
op_logical_and,
151153
op_logical_not,
152154
op_log_softmax,
153155
op_lt,
Lines changed: 68 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,68 @@
1+
# Copyright (c) Qualcomm Innovation Center, Inc.
2+
# All rights reserved
3+
#
4+
# This source code is licensed under the BSD-style license found in the
5+
# LICENSE file in the root directory of this source tree.
6+
from typing import Dict
7+
8+
import executorch.backends.qualcomm.python.PyQnnWrapperAdaptor as PyQnnWrapper
9+
10+
import torch
11+
12+
from .node_visitor import NodeVisitor
13+
from .node_visitor_manager import register_node_visitor
14+
from .qnn_constants import OpElementWiseAnd, QNN_OP_PACKAGE_NAME_QTI_AISW
15+
16+
17+
@register_node_visitor
18+
class And(NodeVisitor):
19+
target = ["aten.logical_and.default"]
20+
21+
def __init__(self, *args) -> None:
22+
super().__init__(*args)
23+
24+
def define_node(
25+
self,
26+
node: torch.fx.Node,
27+
nodes_to_wrappers: Dict[torch.fx.Node, PyQnnWrapper.TensorWrapper],
28+
) -> PyQnnWrapper.PyQnnOpWrapper:
29+
print("Processing node: ", node.name, node.op, node.target, node.args, node.kwargs)
30+
input_node_1 = self.get_node(node.args[0])
31+
input_tensor_1 = self.get_tensor(input_node_1, node)
32+
input_tensor_wrapper_1 = self.define_tensor(
33+
input_node_1,
34+
node,
35+
input_tensor_1,
36+
PyQnnWrapper.Qnn_TensorType_t.QNN_TENSOR_TYPE_NATIVE,
37+
nodes_to_wrappers,
38+
)
39+
40+
input_node_2 = self.get_node(node.args[1])
41+
input_tensor_2 = self.get_tensor(input_node_2, node)
42+
input_tensor_wrapper_2 = self.define_tensor(
43+
input_node_2,
44+
node,
45+
input_tensor_2,
46+
PyQnnWrapper.Qnn_TensorType_t.QNN_TENSOR_TYPE_NATIVE,
47+
nodes_to_wrappers,
48+
)
49+
print("input_node_1", input_node_1, "input_node_2", input_node_2)
50+
51+
output_tensor = self.get_tensor(node, node)
52+
output_tensor_wrapper = self.define_tensor(
53+
node,
54+
node,
55+
output_tensor,
56+
PyQnnWrapper.Qnn_TensorType_t.QNN_TENSOR_TYPE_NATIVE,
57+
nodes_to_wrappers,
58+
)
59+
60+
logical_not_op = PyQnnWrapper.PyQnnOpWrapper(
61+
node.name,
62+
QNN_OP_PACKAGE_NAME_QTI_AISW,
63+
OpElementWiseAnd.op_name,
64+
)
65+
logical_not_op.AddInputTensors([input_tensor_wrapper_1, input_tensor_wrapper_2])
66+
logical_not_op.AddOutputTensors([output_tensor_wrapper])
67+
68+
return logical_not_op

backends/qualcomm/builders/qnn_constants.py

Lines changed: 5 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -200,6 +200,11 @@ class OpElementWiseNeuron:
200200
param_beta: str = "beta"
201201

202202

203+
@dataclass(init=False, frozen=True)
204+
class OpElementWiseNot:
205+
op_name: str = "ElementWiseAnd"
206+
207+
203208
@dataclass(init=False, frozen=True)
204209
class OpElementWiseNot:
205210
op_name: str = "ElementWiseNot"

backends/qualcomm/tests/models.py

Lines changed: 8 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -1108,6 +1108,14 @@ def forward(self, x):
11081108
return torch.log(x)
11091109

11101110

1111+
class LogicalAnd(torch.nn.Module):
1112+
def __init__(self):
1113+
super().__init__()
1114+
1115+
def forward(self, x, y):
1116+
return torch.logical_and(x, y)
1117+
1118+
11111119
class LogicalNot(torch.nn.Module):
11121120
def __init__(self):
11131121
super().__init__()

backends/qualcomm/tests/test_qnn_delegate.py

Lines changed: 8 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -856,6 +856,14 @@ def test_qnn_backend_log(self):
856856
sample_input = (torch.rand([1, 2, 3, 4]),)
857857
self.lower_module_and_test_output(module, sample_input)
858858

859+
def test_qnn_backend_logical_and(self):
860+
module = LogicalAnd() # noqa: F405
861+
input1 = torch.tensor([True, False, True, False])
862+
input2 = torch.tensor([True, True, False, False])
863+
sample_input = (input1, input2)
864+
self.lower_module_and_test_output(module, sample_input)
865+
866+
859867
def test_qnn_backend_logical_not(self):
860868
module = LogicalNot() # noqa: F405
861869
sample_input = (torch.rand([1, 2, 3, 4]),)

0 commit comments

Comments
 (0)