-
Notifications
You must be signed in to change notification settings - Fork 752
[XNNPACK] Support 2d Transposed Convolution in XNNPACK delegate #7514
New issue
Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.
By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.
Already on GitHub? Sign in to your account
Changes from 3 commits
File filter
Filter by extension
Conversations
Jump to
Diff view
Diff view
There are no files selected for viewing
| Original file line number | Diff line number | Diff line change |
|---|---|---|
|
|
@@ -83,11 +83,16 @@ def is_dynamically_quantized(self, node: torch.fx.Node) -> bool: | |
| return is_dynamic_qdq(node) | ||
|
|
||
| def is_supported_quant_op(self, node: torch.fx.Node) -> bool: | ||
| return ( | ||
| node.op == "call_function" | ||
| and cast(torch._ops.OpOverload, node.target).name() | ||
| in SUPPORTED_IMPLICIT_Q_DQ_OP_NAMES_SET | ||
| ) | ||
| if node.op != "call_function": | ||
| return False | ||
|
|
||
| op_name = cast(torch._ops.OpOverload, node.target).name() | ||
|
|
||
| # Weight and Input should both be quantized | ||
| if op_name == exir_ops.edge.aten.convolution.default.name(): | ||
|
Contributor
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. what was the reasoning for this? I imagine it should've returned true in the previous implementation as well?
Contributor
Author
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. The issue arises when a non-quantized operation is interleaved between two quantized operations. That operation also match the dequantize-op-quantize pattern. However, an operation with quantized input and float weight is not supported by XNNPACK. |
||
| return is_dequant(node.args[1]) | ||
|
|
||
| return op_name in SUPPORTED_IMPLICIT_Q_DQ_OP_NAMES_SET | ||
|
|
||
| def is_supported_quant_module(self, node: torch.fx.Node) -> bool: | ||
| is_supported = ( | ||
|
|
||
| Original file line number | Diff line number | Diff line change |
|---|---|---|
|
|
@@ -9,6 +9,7 @@ | |
| from typing import cast, List, Optional, Tuple | ||
|
|
||
| import torch | ||
| from executorch.backends.xnnpack.operators.quant_params import QuantParams | ||
| from executorch.backends.xnnpack.partition.config.xnnpack_config import ( | ||
| ConfigPrecisionType, | ||
| XNNPartitionerConfig, | ||
|
|
@@ -317,7 +318,7 @@ def __init__(self, **kwargs): | |
|
|
||
| def check_constraints(self, node: torch.fx.Node, ep: ExportedProgram) -> bool: | ||
| """ | ||
| Currently we have no support for convolution 3d and transposed convolution | ||
| Currently we have no support for convolution 3d | ||
| """ | ||
| if not super().check_constraints(node, ep): | ||
| return False | ||
|
|
@@ -327,11 +328,24 @@ def check_constraints(self, node: torch.fx.Node, ep: ExportedProgram) -> bool: | |
| why(node, "Only support 1D + 2D Conv") | ||
| return False # Only support 1D + 2D Conv | ||
|
|
||
| transposed = cast(bool, node.args[6]) | ||
| if transposed: | ||
| why(node, "Transposed Conv is not supported") | ||
| return False # Currently don't support transposed conv | ||
| kernel_node = get_input_node(node, 1) | ||
digantdesai marked this conversation as resolved.
Show resolved
Hide resolved
|
||
| weight_quant_params = QuantParams.from_weights(kernel_node, ep) | ||
|
|
||
| is_transpose = node.args[6] | ||
| groups = cast(int, node.args[8]) | ||
|
|
||
| if ( | ||
| is_transpose | ||
| and weight_quant_params is not None | ||
| and weight_quant_params.per_channel | ||
| and (groups > 1 or weight_quant_params.axis != 1) | ||
| ): | ||
| why( | ||
|
Contributor
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. pretty neat way of checking this constraint |
||
| node, | ||
| "XNNPACK does not support per input channel quantization" | ||
| "for transpose convolutions with groups > 1", | ||
| ) | ||
| return False | ||
|
Contributor
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Let's add a test which triggers this constraint? |
||
| return True | ||
|
|
||
| def supported_precision_types(self): | ||
|
|
||
Uh oh!
There was an error while loading. Please reload this page.