|
4 | 4 | # This source code is licensed under the BSD-style license found in the
|
5 | 5 | # LICENSE file in the root directory of this source tree.
|
6 | 6 |
|
| 7 | +from typing import cast, Tuple |
| 8 | + |
7 | 9 | import torch
|
| 10 | + |
8 | 11 | from executorch.backends.qualcomm.builders.utils import get_parameter, set_parameter
|
9 | 12 | from executorch.backends.qualcomm.utils.constants import QCOM_REQUANTIZE
|
10 | 13 | from executorch.exir.pass_base import ExportPass, PassResult
|
| 14 | +from torch._guards import detect_fake_mode |
11 | 15 |
|
12 | 16 | from .utils import append_qdq, copy_meta
|
13 | 17 |
|
14 | 18 |
|
15 |
| -class ConvertConv1dToConv2d(ExportPass): |
| 19 | +class CanonicalizeConv(ExportPass): |
16 | 20 | """
|
17 |
| - Conv1d is not supported by QNN. |
18 |
| - Change it to input -> unsqueeze -> conv2d -> squeeze -> output |
| 21 | + 1. QNN does not support dilation on TransposeConvND |
| 22 | + Dilate the kernel manually for math-equivalent operation |
| 23 | + 2. Conv1d is not supported by QNN. |
| 24 | + Change it to input -> unsqueeze -> conv2d -> squeeze -> output |
19 | 25 | """
|
20 | 26 |
|
21 | 27 | def __init__(self, edge_program: torch.export.ExportedProgram):
|
22 |
| - super(ConvertConv1dToConv2d, self).__init__() |
| 28 | + super(CanonicalizeConv, self).__init__() |
23 | 29 | self.edge_program = edge_program
|
24 |
| - self.conv_op_map = { |
| 30 | + self.conv1d_op_map = { |
25 | 31 | torch.ops.aten.conv1d.default: torch.ops.aten.conv2d.default,
|
26 | 32 | torch.ops.aten.conv_transpose1d.default: torch.ops.aten.conv_transpose2d.input,
|
27 | 33 | }
|
| 34 | + self.transpose_conv_set = { |
| 35 | + torch.ops.aten.conv_transpose1d.default, |
| 36 | + torch.ops.aten.conv_transpose2d.input, |
| 37 | + } |
| 38 | + |
| 39 | + def dilate(self, tensor, dilation): |
| 40 | + # e.g. |
| 41 | + # for 3x3 kernel with dilation == (2, 3) |
| 42 | + # 1, 0, 0, 2, 0, 0, 3 |
| 43 | + # 1, 2, 3 0, 0, 0, 0, 0, 0, 0 |
| 44 | + # 4, 5, 6 --> 4, 0, 0, 5, 0, 0, 6 |
| 45 | + # 7, 8, 9 0, 0, 0, 0, 0, 0, 0 |
| 46 | + # 7, 0, 0, 8, 0, 0, 9 |
| 47 | + i, o, *k = tensor.shape |
| 48 | + new_k = [dim + (dim - 1) * (s - 1) for s, dim in zip(dilation, k)] |
| 49 | + new_tensor = torch.zeros((i, o, *new_k), dtype=tensor.dtype) |
| 50 | + indexing = (...,) + tuple([slice(None, None, d) for d in dilation]) |
| 51 | + new_tensor[indexing] = tensor |
| 52 | + return new_tensor |
28 | 53 |
|
29 | 54 | def call(self, graph_module: torch.fx.GraphModule):
|
30 | 55 | graph = graph_module.graph
|
| 56 | + # condition 1 |
| 57 | + for node in graph.nodes: |
| 58 | + # arg order (https://docs.pytorch.org/docs/stable/generated/torch.nn.functional.conv_transpose2d.html) |
| 59 | + # > input, weight, bias, stride, padding, output_padding, groups, dilation |
| 60 | + if node.target in self.transpose_conv_set and len(node.args) > 7: |
| 61 | + dilation = cast(Tuple[int], node.args[7]) |
| 62 | + # dilate kernel in advance |
| 63 | + filter_arg = node.args[1] |
| 64 | + filter_node = ( |
| 65 | + # fp graph |
| 66 | + filter_arg |
| 67 | + if filter_arg.op == "placeholder" |
| 68 | + # qdq graph |
| 69 | + else node.args[1].args[0] |
| 70 | + ) |
| 71 | + filter_tensor = self.dilate( |
| 72 | + get_parameter(filter_node, self.edge_program), |
| 73 | + dilation, |
| 74 | + ) |
| 75 | + # update tensor meta for kernel node |
| 76 | + fake_mode = detect_fake_mode(filter_node.meta["val"]) |
| 77 | + converter = fake_mode.fake_tensor_converter |
| 78 | + filter_node.meta["val"] = converter.from_real_tensor( |
| 79 | + fake_mode, filter_tensor |
| 80 | + ) |
| 81 | + # update kernel |
| 82 | + set_parameter( |
| 83 | + ( |
| 84 | + torch.nn.Parameter(filter_tensor) |
| 85 | + if filter_tensor.dtype == torch.float |
| 86 | + else filter_tensor |
| 87 | + ), |
| 88 | + filter_node, |
| 89 | + self.edge_program, |
| 90 | + ) |
| 91 | + # pop dilation for graph in cpu |
| 92 | + node.args = node.args[0:-1] |
| 93 | + |
| 94 | + # condition 2 |
31 | 95 | for node in graph.nodes:
|
32 |
| - if node.target in self.conv_op_map: |
| 96 | + if node.target in self.conv1d_op_map: |
33 | 97 | input_node = node.args[0]
|
34 | 98 | with graph_module.graph.inserting_after(input_node):
|
35 | 99 | unsqueeze_op = torch.ops.aten.unsqueeze_copy.default
|
@@ -108,7 +172,7 @@ def call(self, graph_module: torch.fx.GraphModule):
|
108 | 172 | )
|
109 | 173 | conv2d_node = graph.create_node(
|
110 | 174 | "call_function",
|
111 |
| - self.conv_op_map[node.target], |
| 175 | + self.conv1d_op_map[node.target], |
112 | 176 | conv_args,
|
113 | 177 | )
|
114 | 178 | conv2d_node.meta = copy_meta(
|
|
0 commit comments