Skip to content

Commit e5f94da

Browse files
authored
Qualcomm AI Engine Direct - op enablement sign asin xor floor_divide binary (#13675)
Summary • Enable sign,asin,xor,floor_divide,binary • test cases Test plan python backends/qualcomm/tests/test_qnn_delegate.py -k TestQNNQuantizedOperators.test_qnn_backend_ -s $DEVICE_SERIAL -m SM8650 -b build-android/ python backends/qualcomm/tests/test_qnn_delegate.py -k TestQNNFloatingPointOperators.test_qnn_backend_ -s $DEVICE_SERIAL -m SM8650 -b build-android/
1 parent f099cfb commit e5f94da

File tree

14 files changed

+530
-11
lines changed

14 files changed

+530
-11
lines changed

backends/qualcomm/_passes/layout_transform.py

Lines changed: 4 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -64,8 +64,10 @@ class LayoutTransform(ExportPass):
6464
exir_ops.edge.aten.add.Tensor,
6565
exir_ops.edge.aten.amax.default,
6666
exir_ops.edge.aten.amin.default,
67+
exir_ops.edge.aten.asin.default,
6768
exir_ops.edge.aten.atan.default,
6869
exir_ops.edge.aten.bitwise_or.Tensor,
70+
exir_ops.edge.aten.bitwise_xor.Tensor,
6971
exir_ops.edge.aten.bmm.default,
7072
exir_ops.edge.aten.bitwise_and.Tensor,
7173
exir_ops.edge.aten.cat.default,
@@ -78,6 +80,7 @@ class LayoutTransform(ExportPass):
7880
exir_ops.edge.aten.eq.Tensor,
7981
exir_ops.edge.aten.exp.default,
8082
exir_ops.edge.aten.floor.default,
83+
exir_ops.edge.aten.floor_divide.default,
8184
exir_ops.edge.aten.full.default,
8285
exir_ops.edge.aten.full_like.default,
8386
exir_ops.edge.aten.ge.Tensor,
@@ -107,6 +110,7 @@ class LayoutTransform(ExportPass):
107110
exir_ops.edge.aten.relu.default,
108111
exir_ops.edge.aten.round.default,
109112
exir_ops.edge.aten.sigmoid.default,
113+
exir_ops.edge.aten.sign.default,
110114
exir_ops.edge.aten.split_with_sizes.default,
111115
exir_ops.edge.aten.split_with_sizes_copy.default,
112116
exir_ops.edge.aten.sqrt.default,

backends/qualcomm/_passes/lift_constant_scalar_operands.py

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -54,6 +54,7 @@ class TensorOpInfo:
5454
aten.where.ScalarOther: TensorOpInfo(aten.where.self, False, True),
5555
aten.where.Scalar: TensorOpInfo(aten.where.self, False, True),
5656
aten.masked_fill.Scalar: TensorOpInfo(aten.masked_fill.Tensor, False, False),
57+
aten.bitwise_xor.Scalar: TensorOpInfo(aten.bitwise_xor.Tensor, False, False),
5758
}
5859

5960

@@ -64,6 +65,7 @@ class TensorOpInfo:
6465
aten.arange.default,
6566
aten.scalar_tensor.default,
6667
aten.elu.default,
68+
aten.hardtanh.default,
6769
}
6870

6971

backends/qualcomm/builders/README.md

Lines changed: 14 additions & 9 deletions
Original file line numberDiff line numberDiff line change
@@ -360,7 +360,12 @@ The operator now should be functional for Qualcomm backends. For operator to wor
360360
## Operator Support Status
361361
Please help update following table if you are contributing new operators:
362362

363-
| Operators | HTP - 82/116 Enabled |
363+
+ ✓ = Supported
364+
+ ✗ = Not Supported
365+
+ 🚫 = Deprecated, supported with other QNN Ops
366+
367+
368+
| Operators | HTP - 90/116 Enabled |
364369
|-----------|---------|
365370
| Argmax | ✓ |
366371
| Argmin | ✓ |
@@ -381,16 +386,16 @@ Please help update following table if you are contributing new operators:
381386
| ElementWiseAbs | ✓ |
382387
| ElementWiseAdd | ✓ |
383388
| ElementWiseAnd | ✓ |
384-
| ElementWiseAsin | ✗ |
389+
| ElementWiseAsin | ✓ |
385390
| ElementWiseAtan | ✓ |
386-
| ElementWiseBinary | ✗ |
391+
| ElementWiseBinary | ✓ |
387392
| ElementWiseCeil | ✓ |
388393
| ElementWiseCos | ✓ |
389394
| ElementWiseDivide | ✓ |
390395
| ElementWiseEqual | ✓ |
391396
| ElementWiseExp | ✓ |
392397
| ElementWiseFloor | ✓ |
393-
| ElementWiseFloorDiv | ✗ |
398+
| ElementWiseFloorDiv | ✓ |
394399
| ElementWiseGreater | ✓ |
395400
| ElementWiseGreaterEqual | ✓ |
396401
| ElementWiseLess | ✓ |
@@ -408,13 +413,13 @@ Please help update following table if you are contributing new operators:
408413
| ElementWiseRound | ✓ |
409414
| ElementWiseRsqrt | ✓ |
410415
| ElementWiseSelect | ✓ |
411-
| ElementWiseSign | ✗ |
416+
| ElementWiseSign | ✓ |
412417
| ElementWiseSin | ✓ |
413418
| ElementWiseSquaredDifference | ✗ |
414419
| ElementWiseSquareRoot | ✓ |
415420
| ElementWiseSubtract | ✓ |
416421
| ElementWiseUnary | ✗ |
417-
| ElementWiseXor | ✗ |
422+
| ElementWiseXor | ✓ |
418423
| Elu | ✓ |
419424
| ExpandDims | ✓ |
420425
| ExtractGlimpse | ✗ |
@@ -452,11 +457,11 @@ Please help update following table if you are contributing new operators:
452457
| ReduceMin | ✓ |
453458
| ReduceSum | ✓ |
454459
| Relu | ✓ |
455-
| Relu1 | ✗ |
456-
| Relu6 | ✗ |
460+
| Relu1 | 🚫 |
461+
| Relu6 | 🚫 |
457462
| ReluMinMax | ✓ |
458463
| Reshape | ✓ |
459-
| Resize | ✗ |
464+
| Resize | ✓ |
460465
| ResizeBilinear | ✓ |
461466
| ResizeNearestNeighbor | ✓ |
462467
| RoiAlign | ✗ |

backends/qualcomm/builders/__init__.py

Lines changed: 8 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -15,9 +15,11 @@
1515
op_arange,
1616
op_argmax,
1717
op_argmin,
18+
op_asin,
1819
op_atan,
1920
op_avg_pool2d,
2021
op_batch_norm,
22+
op_binary,
2123
op_bmm,
2224
op_cat,
2325
op_ceil,
@@ -79,6 +81,7 @@
7981
op_scalar_tensor,
8082
op_select_copy,
8183
op_sigmoid,
84+
op_sign,
8285
op_sin,
8386
op_skip_ops,
8487
op_slice_copy,
@@ -99,6 +102,7 @@
99102
op_upsample_bilinear2d,
100103
op_upsample_nearest2d,
101104
op_where,
105+
op_xor,
102106
)
103107

104108
__all__ = [
@@ -112,9 +116,11 @@
112116
op_arange,
113117
op_argmax,
114118
op_argmin,
119+
op_asin,
115120
op_atan,
116121
op_avg_pool2d,
117122
op_batch_norm,
123+
op_binary,
118124
op_bmm,
119125
op_cat,
120126
op_ceil,
@@ -176,6 +182,7 @@
176182
op_scalar_tensor,
177183
op_select_copy,
178184
op_sigmoid,
185+
op_sign,
179186
op_sin,
180187
op_skip_ops,
181188
op_slice_copy,
@@ -196,4 +203,5 @@
196203
op_upsample_bilinear2d,
197204
op_upsample_nearest2d,
198205
op_where,
206+
op_xor,
199207
]
Lines changed: 56 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,56 @@
1+
# Copyright (c) Qualcomm Innovation Center, Inc.
2+
# All rights reserved
3+
#
4+
# This source code is licensed under the BSD-style license found in the
5+
# LICENSE file in the root directory of this source tree.from typing import cast, Dict
6+
from typing import Dict
7+
8+
import executorch.backends.qualcomm.python.PyQnnWrapperAdaptor as PyQnnWrapper
9+
import torch
10+
11+
from .node_visitor import NodeVisitor
12+
from .node_visitor_manager import register_node_visitor
13+
14+
from .qnn_constants import OpElementWiseAsin, QNN_OP_PACKAGE_NAME_QTI_AISW
15+
16+
17+
@register_node_visitor
18+
class asin(NodeVisitor):
19+
target = ["aten.asin.default"]
20+
21+
def __init__(self, *args) -> None:
22+
super().__init__(*args)
23+
24+
def define_node(
25+
self,
26+
node: torch.fx.Node,
27+
nodes_to_wrappers: Dict[torch.fx.Node, PyQnnWrapper.TensorWrapper],
28+
) -> PyQnnWrapper.PyQnnOpWrapper:
29+
input_node = self.get_node(node.args[0])
30+
input_tensor = self.get_tensor(input_node, node)
31+
input_tensor_wrapper = self.define_tensor(
32+
input_node,
33+
node,
34+
input_tensor,
35+
PyQnnWrapper.Qnn_TensorType_t.QNN_TENSOR_TYPE_NATIVE,
36+
nodes_to_wrappers,
37+
)
38+
39+
output_tensor = self.get_tensor(node, node)
40+
output_tensor_wrapper = self.define_tensor(
41+
node,
42+
node,
43+
output_tensor,
44+
PyQnnWrapper.Qnn_TensorType_t.QNN_TENSOR_TYPE_NATIVE,
45+
nodes_to_wrappers,
46+
)
47+
48+
asin_op = PyQnnWrapper.PyQnnOpWrapper(
49+
node.name,
50+
QNN_OP_PACKAGE_NAME_QTI_AISW,
51+
OpElementWiseAsin.op_name,
52+
)
53+
asin_op.AddInputTensors([input_tensor_wrapper])
54+
asin_op.AddOutputTensors([output_tensor_wrapper])
55+
56+
return asin_op
Lines changed: 84 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,84 @@
1+
# Copyright (c) Qualcomm Innovation Center, Inc.
2+
# All rights reserved
3+
#
4+
# This source code is licensed under the BSD-style license found in the
5+
# LICENSE file in the root directory of this source tree.
6+
import warnings
7+
from typing import Dict
8+
9+
import executorch.backends.qualcomm.python.PyQnnWrapperAdaptor as PyQnnWrapper
10+
import numpy as np
11+
import torch
12+
from executorch.backends.qualcomm.utils.constants import QCOM_DATA
13+
from executorch.exir.dialects._ops import ops as exir_ops
14+
15+
from .node_visitor import NodeVisitor
16+
from .node_visitor_manager import register_node_visitor
17+
from .qnn_constants import OpElementWiseBinary, QNN_OP_PACKAGE_NAME_QTI_AISW
18+
19+
20+
# Refer to QnnOpDef.h for the value.
21+
QNN_BINARY_OPERATOR = {
22+
exir_ops.edge.aten.floor_divide.default: 4,
23+
}
24+
25+
26+
@register_node_visitor
27+
class Binary(NodeVisitor):
28+
target = ["aten.floor_divide.default"]
29+
30+
def __init__(self, *args) -> None:
31+
super().__init__(*args)
32+
33+
def define_node(
34+
self,
35+
node: torch.fx.Node,
36+
nodes_to_wrappers: Dict[torch.fx.Node, PyQnnWrapper.TensorWrapper],
37+
) -> PyQnnWrapper.PyQnnOpWrapper:
38+
out_tensor = self.get_tensor(node, node)
39+
output_tensor_wrapper = self.define_tensor(
40+
node,
41+
node,
42+
out_tensor,
43+
PyQnnWrapper.Qnn_TensorType_t.QNN_TENSOR_TYPE_NATIVE,
44+
nodes_to_wrappers,
45+
)
46+
binary_output_tensors = [output_tensor_wrapper]
47+
48+
binary_input_tensors = []
49+
for index in range(2):
50+
input_node = self.get_node(node.args[index])
51+
input_tensor = self.get_tensor(input_node, node)
52+
tensor_type = PyQnnWrapper.Qnn_TensorType_t.QNN_TENSOR_TYPE_NATIVE
53+
54+
input_tensor_wrapper = self.define_tensor(
55+
input_node,
56+
node,
57+
input_tensor,
58+
tensor_type,
59+
nodes_to_wrappers,
60+
)
61+
binary_input_tensors.append(input_tensor_wrapper)
62+
63+
binary_op = PyQnnWrapper.PyQnnOpWrapper(
64+
node.name,
65+
QNN_OP_PACKAGE_NAME_QTI_AISW,
66+
OpElementWiseBinary.op_name,
67+
)
68+
binary_op.AddInputTensors(binary_input_tensors)
69+
binary_op.AddOutputTensors(binary_output_tensors)
70+
71+
if node.target not in QNN_BINARY_OPERATOR:
72+
warnings.warn(
73+
"[QNN Delegate Op Builder]: This binary operator is not yet supported.",
74+
stacklevel=1,
75+
)
76+
return None
77+
78+
binary_op.AddScalarParam(
79+
OpElementWiseBinary.param_operation,
80+
PyQnnWrapper.Qnn_DataType_t.QNN_DATATYPE_UINT_32,
81+
{QCOM_DATA: np.uint32(QNN_BINARY_OPERATOR[node.target])},
82+
)
83+
84+
return binary_op

backends/qualcomm/builders/op_ne.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -16,7 +16,7 @@
1616

1717
@register_node_visitor
1818
class NotEqual(NodeVisitor):
19-
target = ["aten.ne.Tensor", "aten.ne.Scalar"]
19+
target = ["aten.ne.Tensor"]
2020

2121
def __init__(self, *args) -> None:
2222
super().__init__(*args)
Lines changed: 56 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,56 @@
1+
# Copyright (c) Qualcomm Innovation Center, Inc.
2+
# All rights reserved
3+
#
4+
# This source code is licensed under the BSD-style license found in the
5+
# LICENSE file in the root directory of this source tree.
6+
from typing import Dict
7+
8+
import executorch.backends.qualcomm.python.PyQnnWrapperAdaptor as PyQnnWrapper
9+
10+
import torch
11+
12+
from .node_visitor import NodeVisitor
13+
from .node_visitor_manager import register_node_visitor
14+
from .qnn_constants import OpElementWiseSign, QNN_OP_PACKAGE_NAME_QTI_AISW
15+
16+
17+
@register_node_visitor
18+
class Sign(NodeVisitor):
19+
target = ["aten.sign.default"]
20+
21+
def __init__(self, *args) -> None:
22+
super().__init__(*args)
23+
24+
def define_node(
25+
self,
26+
node: torch.fx.Node,
27+
nodes_to_wrappers: Dict[torch.fx.Node, PyQnnWrapper.TensorWrapper],
28+
) -> PyQnnWrapper.PyQnnOpWrapper:
29+
input_node = self.get_node(node.args[0])
30+
input_tensor = self.get_tensor(input_node, node)
31+
input_tensor_wrapper = self.define_tensor(
32+
input_node,
33+
node,
34+
input_tensor,
35+
PyQnnWrapper.Qnn_TensorType_t.QNN_TENSOR_TYPE_NATIVE,
36+
nodes_to_wrappers,
37+
)
38+
39+
output_tensor = self.get_tensor(node, node)
40+
output_tensor_wrapper = self.define_tensor(
41+
node,
42+
node,
43+
output_tensor,
44+
PyQnnWrapper.Qnn_TensorType_t.QNN_TENSOR_TYPE_NATIVE,
45+
nodes_to_wrappers,
46+
)
47+
48+
sign_op = PyQnnWrapper.PyQnnOpWrapper(
49+
node.name,
50+
QNN_OP_PACKAGE_NAME_QTI_AISW,
51+
OpElementWiseSign.op_name,
52+
)
53+
sign_op.AddInputTensors([input_tensor_wrapper])
54+
sign_op.AddOutputTensors([output_tensor_wrapper])
55+
56+
return sign_op

0 commit comments

Comments
 (0)