|
| 1 | +# Copyright 2025 Arm Limited and/or its affiliates. |
| 2 | +# |
| 3 | +# This source code is licensed under the BSD-style license found in the |
| 4 | +# LICENSE file in the root directory of this source tree. |
| 5 | + |
| 6 | +# The TOSA BITWISE_AND, BITWISE_OR, and BITWISE_XOR don't handle bool inputs. |
| 7 | +# When a targeted op receives boolean tensors, we promote them to an integer type before |
| 8 | +# invocation and cast the result back to the expected dtype afterwards. |
| 9 | + |
| 10 | +from typing import Set, Type |
| 11 | + |
| 12 | +import torch |
| 13 | + |
| 14 | +from executorch.backends.arm._passes.arm_pass import ArmPass |
| 15 | +from executorch.exir.dialects._ops import ops as exir_ops |
| 16 | +from executorch.exir.pass_base import ExportPass |
| 17 | + |
| 18 | + |
| 19 | +class PromoteBoolOperandsPass(ArmPass): |
| 20 | + """Promote boolean operands to the appropriate integer dtype for unsupported ops.""" |
| 21 | + |
| 22 | + _passes_required_after: Set[Type[ExportPass]] = set() |
| 23 | + |
| 24 | + targeted_ops = { |
| 25 | + exir_ops.edge.aten.bitwise_and.Tensor, |
| 26 | + exir_ops.edge.aten.bitwise_or.Tensor, |
| 27 | + exir_ops.edge.aten.bitwise_xor.Tensor, |
| 28 | + exir_ops.edge.aten.mul.Tensor, |
| 29 | + } |
| 30 | + |
| 31 | + def call_operator(self, op, args, kwargs, meta): |
| 32 | + if op not in self.targeted_ops: |
| 33 | + return super().call_operator(op, args, kwargs, meta) |
| 34 | + |
| 35 | + original_dtypes = [arg.data.dtype for arg in args] |
| 36 | + if torch.bool not in original_dtypes: |
| 37 | + return super().call_operator(op, args, kwargs, meta) |
| 38 | + |
| 39 | + # select the first non-bool dtype, or None if all bool |
| 40 | + promoted_dtype = next((dt for dt in original_dtypes if dt != torch.bool), None) |
| 41 | + |
| 42 | + # if we don't have a dtype specified by the op, promote to default choice for the op |
| 43 | + if promoted_dtype is None: |
| 44 | + if op == exir_ops.edge.aten.mul.Tensor: |
| 45 | + # mul as int32 |
| 46 | + promoted_dtype = torch.int32 |
| 47 | + else: |
| 48 | + # bitwise ops can be int8 |
| 49 | + promoted_dtype = torch.int8 |
| 50 | + |
| 51 | + target_dtypes = [] |
| 52 | + for dt in original_dtypes: |
| 53 | + if dt == torch.bool: |
| 54 | + target_dtypes.append(promoted_dtype) |
| 55 | + else: |
| 56 | + target_dtypes.append(dt) |
| 57 | + |
| 58 | + new_args = [] |
| 59 | + for arg, original_dtype, target_dtype in zip( |
| 60 | + args, original_dtypes, target_dtypes |
| 61 | + ): |
| 62 | + if original_dtype == target_dtype: |
| 63 | + new_args.append(arg) |
| 64 | + else: |
| 65 | + new_args.append( |
| 66 | + super().call_operator( |
| 67 | + exir_ops.edge.dim_order_ops._to_dim_order_copy.default, |
| 68 | + (arg,), |
| 69 | + {"dtype": target_dtype}, |
| 70 | + meta, |
| 71 | + ) |
| 72 | + ) |
| 73 | + |
| 74 | + output = super().call_operator( |
| 75 | + op, |
| 76 | + tuple(new_args), |
| 77 | + kwargs, |
| 78 | + meta, |
| 79 | + ) |
| 80 | + |
| 81 | + if all(dtype == torch.bool for dtype in original_dtypes): |
| 82 | + output = super().call_operator( |
| 83 | + exir_ops.edge.dim_order_ops._to_dim_order_copy.default, |
| 84 | + (output,), |
| 85 | + {"dtype": torch.bool}, |
| 86 | + meta, |
| 87 | + ) |
| 88 | + return output |
0 commit comments