|
| 1 | +# Copyright 2025 Arm Limited and/or its affiliates. |
| 2 | +# |
| 3 | +# This source code is licensed under the BSD-style license found in the |
| 4 | +# LICENSE file in the root directory of this source tree. |
| 5 | + |
| 6 | +import torch |
| 7 | +from executorch.exir.pass_base import ExportPass |
| 8 | + |
| 9 | + |
| 10 | +class DecomposeLinearVectorNormPass(ExportPass): |
| 11 | + """ |
| 12 | + This pass decomposes aten.linalg_vector_norm.default into more primitive ops. |
| 13 | + We need to add this pass before quantization for graph annotation. |
| 14 | + By default, aten.linalg_vector_norm op is decomposed during legalization to Edge IR. |
| 15 | +
|
| 16 | + The decomposition is as follows: |
| 17 | +
|
| 18 | + For p == 1: |
| 19 | + out = REDUCE_SUM(ABS(x), dims, keepdim) |
| 20 | +
|
| 21 | + For p == 2: |
| 22 | + out = SQRT(REDUCE_SUM(MUL(x, x), dims, keepdim)) |
| 23 | +
|
| 24 | + For arbitrary p: |
| 25 | + We dont support arbitrary p, because our decomposition looks like |
| 26 | + out = POW(REDUCE_SUM(POW(ABS(x), p), dims, keepdim), 1/p) |
| 27 | + In this case we need to wrap p into Tensor and we need to know |
| 28 | + dtype prior, but we dont know this from FX graph. |
| 29 | + """ |
| 30 | + |
| 31 | + torch_linalg_vector_norm = (torch.ops.aten.linalg_vector_norm.default,) |
| 32 | + |
| 33 | + def call_operator(self, op, args, kwargs, meta): |
| 34 | + if op not in self.torch_linalg_vector_norm: |
| 35 | + return super().call_operator(op, args, kwargs, meta) |
| 36 | + |
| 37 | + # Extract inputs and optional arguments. |
| 38 | + # Expected args: |
| 39 | + # args[0]: input tensor |
| 40 | + # args[1]: norm order 'p' (optional, default: 2.0) |
| 41 | + # args[2]: dimensions to reduce (should be provided) |
| 42 | + # args[3]: keepdim flag (optional, default: False) |
| 43 | + input_tensor = args[0] |
| 44 | + norm_order = args[1] if len(args) > 1 else 2.0 |
| 45 | + norm_dim = args[2] if len(args) > 2 else None |
| 46 | + keepdim = args[3] if len(args) > 3 else False |
| 47 | + |
| 48 | + if norm_order not in (1, 2): |
| 49 | + raise ValueError( |
| 50 | + f"The order of {norm_order}\n" |
| 51 | + f"is not supported for linalg_vector_norm operator" |
| 52 | + ) |
| 53 | + |
| 54 | + if norm_dim is None: |
| 55 | + raise ValueError("The norm_dim for linalg_vector_norm is None.") |
| 56 | + |
| 57 | + dims = [norm_dim] if isinstance(norm_dim, int) else list(norm_dim) |
| 58 | + |
| 59 | + # Decomposition based on norm order. |
| 60 | + if norm_order == 1: |
| 61 | + op1 = super().call_operator( |
| 62 | + torch.ops.aten.abs.default, (input_tensor,), {}, meta |
| 63 | + ) |
| 64 | + op2 = super().call_operator( |
| 65 | + torch.ops.aten.sum.dim_IntList, (op1, dims, keepdim), {}, meta |
| 66 | + ) |
| 67 | + return op2 |
| 68 | + |
| 69 | + elif norm_order == 2: |
| 70 | + # For p == 2, decomposition is sqrt(sum(x * x, dims, keepdim)) |
| 71 | + op1 = super().call_operator( |
| 72 | + torch.ops.aten.mul.Tensor, (input_tensor, input_tensor), {}, meta |
| 73 | + ) |
| 74 | + op2 = super().call_operator( |
| 75 | + torch.ops.aten.sum.dim_IntList, (op1, dims, keepdim), {}, meta |
| 76 | + ) |
| 77 | + op3 = super().call_operator(torch.ops.aten.sqrt.default, (op2,), {}, meta) |
| 78 | + return op3 |
0 commit comments