|
| 1 | +# Copyright 2025 Arm Limited and/or its affiliates. |
| 2 | +# |
| 3 | +# This source code is licensed under the BSD-style license found in the |
| 4 | +# LICENSE file in the root directory of this source tree. |
| 5 | + |
| 6 | + |
| 7 | +from typing import Callable, List, Optional |
| 8 | + |
| 9 | +import torch |
| 10 | + |
| 11 | +from executorch.backends.arm._passes.arm_pass_utils import get_first_fake_tensor |
| 12 | + |
| 13 | +from executorch.backends.arm.quantizer.quantization_config import QuantizationConfig |
| 14 | +from executorch.backends.cortex_m.passes.cortex_m_pass_manager import CortexMPassManager |
| 15 | +from executorch.backends.cortex_m.quantizer.operator_configs import ( |
| 16 | + INT8_BINARY_OPS_OPERATOR_CONFIG, |
| 17 | + INT8_LINEAR_OPERATOR_CONFIG, |
| 18 | +) |
| 19 | +from torch._ops import OpOverload |
| 20 | +from torch.fx import GraphModule, Node |
| 21 | +from torchao.quantization.pt2e.quantizer import ( |
| 22 | + ComposableQuantizer, |
| 23 | + QuantizationAnnotation, |
| 24 | + Quantizer, |
| 25 | +) |
| 26 | +from torchao.quantization.pt2e.quantizer.quantizer import Q_ANNOTATION_KEY |
| 27 | + |
| 28 | + |
| 29 | +class CortexMQuantizer(ComposableQuantizer): |
| 30 | + |
| 31 | + def broadcasting_filter(self, node: Optional[Node]) -> bool: |
| 32 | + """ |
| 33 | + Filter function to exclude nodes that perform broadcasting. |
| 34 | + """ |
| 35 | + if node is None: |
| 36 | + return False |
| 37 | + if node.target not in [torch.ops.aten.add.Tensor]: |
| 38 | + return False |
| 39 | + |
| 40 | + if len(node.all_input_nodes) == 2: |
| 41 | + t1 = get_first_fake_tensor(node.all_input_nodes[0]) |
| 42 | + t2 = get_first_fake_tensor(node.all_input_nodes[1]) |
| 43 | + return t1.shape != t2.shape |
| 44 | + |
| 45 | + return False |
| 46 | + |
| 47 | + def __init__(self) -> None: |
| 48 | + quantizers: List[OperatorConfigQuantizer] = [ |
| 49 | + OperatorConfigQuantizer( |
| 50 | + INT8_BINARY_OPS_OPERATOR_CONFIG, filter_fn=self.broadcasting_filter |
| 51 | + ), |
| 52 | + OperatorConfigQuantizer(INT8_LINEAR_OPERATOR_CONFIG), |
| 53 | + ] |
| 54 | + super().__init__(quantizers) |
| 55 | + |
| 56 | + def validate(self, model: GraphModule) -> bool: |
| 57 | + return True |
| 58 | + |
| 59 | + def transform_for_annotation(self, model: GraphModule) -> GraphModule: |
| 60 | + pass_manager = CortexMPassManager(None) |
| 61 | + return pass_manager.transform_for_annotation(model) |
| 62 | + |
| 63 | + |
| 64 | +class OperatorConfigQuantizer(Quantizer): |
| 65 | + """ |
| 66 | + Quantizes a graph according to an OperatorConfig. |
| 67 | +
|
| 68 | + Args: |
| 69 | + operator_config (OperatorConfig): The operator config to use for quantization. |
| 70 | + filter_fn (Callable): Negative filter function. If it returns True on any node in the pattern, the pattern is |
| 71 | + skipped. Used to match for example particular targets or modules. |
| 72 | + """ |
| 73 | + |
| 74 | + def __init__( |
| 75 | + self, |
| 76 | + operator_config: QuantizationConfig, |
| 77 | + filter_fn: Callable[[Node], bool] = lambda node: False, |
| 78 | + ) -> None: |
| 79 | + self.operator_config = operator_config |
| 80 | + self.filter_fn = filter_fn |
| 81 | + |
| 82 | + def check_node(self, node: Optional[Node], target: str) -> bool: |
| 83 | + """ |
| 84 | + Return true if the node is a valid match for the given target. |
| 85 | + """ |
| 86 | + if node is None: |
| 87 | + return False |
| 88 | + if not node.target == target: |
| 89 | + return False |
| 90 | + if node.meta.get("quantizer_matched", False): |
| 91 | + return False |
| 92 | + if self.filter_fn(node): |
| 93 | + return False |
| 94 | + |
| 95 | + return True |
| 96 | + |
| 97 | + def check_pattern( |
| 98 | + self, node: Optional[Node], pattern: List[OpOverload] |
| 99 | + ) -> Optional[List[Node]]: |
| 100 | + """ |
| 101 | + Returns the matched nodes if the given node matches the given pattern, otherwise None. |
| 102 | + """ |
| 103 | + match: List[Node] = [] |
| 104 | + node = list(node.users)[0] if node and len(node.users) > 0 else None |
| 105 | + |
| 106 | + for pattern_target in pattern: |
| 107 | + if self.check_node(node, pattern_target): |
| 108 | + match.append(node) |
| 109 | + node = list(node.users)[0] if len(node.users) > 0 else None |
| 110 | + else: |
| 111 | + return None |
| 112 | + |
| 113 | + return match |
| 114 | + |
| 115 | + def match_patterns( |
| 116 | + self, model: GraphModule, patterns: List[List[str]] |
| 117 | + ) -> List[List[Node]]: |
| 118 | + """ |
| 119 | + Match all given patterns in the graph and return list of matches. |
| 120 | + Each node can only be part of one match, larger patterns are prioritized. |
| 121 | + Currently only linear patterns (single chain) are supported. |
| 122 | + """ |
| 123 | + patterns.sort(key=len, reverse=True) |
| 124 | + matches: List[List[Node]] = [] |
| 125 | + for pattern in patterns: |
| 126 | + for node in model.graph.nodes: |
| 127 | + potential_match = self.check_pattern(node, pattern) |
| 128 | + if potential_match: |
| 129 | + matches.append(potential_match) |
| 130 | + for node in potential_match: |
| 131 | + node.meta["quantizer_matched"] = True |
| 132 | + |
| 133 | + return matches |
| 134 | + |
| 135 | + def is_parameter(self, node: Node, model: GraphModule) -> bool: |
| 136 | + """Returns True if the given node is a parameter of the model.""" |
| 137 | + try: |
| 138 | + _ = model.get_parameter(node.target) |
| 139 | + return True |
| 140 | + except Exception: |
| 141 | + return False |
| 142 | + |
| 143 | + def is_weight(self, node: Node, params: List[Node], model: GraphModule) -> bool: |
| 144 | + """Returns True if node is the first parameter of the given parameters""" |
| 145 | + return len(params) > 0 and node == params[0] |
| 146 | + |
| 147 | + def is_bias(self, node: Node, params: List[Node], model: GraphModule) -> bool: |
| 148 | + """Returns True if node is the second parameter of the given parameters""" |
| 149 | + return len(params) == 2 and node == params[1] |
| 150 | + |
| 151 | + def annotate_match( |
| 152 | + self, match: List[Node], config: QuantizationConfig, model: GraphModule |
| 153 | + ) -> None: |
| 154 | + """ |
| 155 | + Annotates a matched pattern according to the given quantization config. The |
| 156 | + following assumptions are made: |
| 157 | +
|
| 158 | + - All operators have either no parameters, only weights, or weights and biases |
| 159 | + - Tensors which are the first parameter of an operator are annotated as weights |
| 160 | + - Tensors which are the second parameter of an operator are annotated as biases |
| 161 | + - All other tensors going into the matched pattern are annotated as input activations. |
| 162 | + - All other outputs coming out of the matched pattern are annotated as output activations. |
| 163 | +
|
| 164 | + """ |
| 165 | + for node in match: |
| 166 | + input_qspec_map = {} |
| 167 | + output_qspec = None |
| 168 | + |
| 169 | + params = [n for n in node.all_input_nodes if self.is_parameter(n, model)] |
| 170 | + # Check that the assumptions on number of parameters hold to avoid silent errors |
| 171 | + assert ( |
| 172 | + 0 <= len(params) <= 2 |
| 173 | + ), f"{self.__class__.__name__} expected 0 params, 1 params (weight) or 2 params (weight, bias), but got {len(params)} for node {node}." |
| 174 | + |
| 175 | + for input_node in node.all_input_nodes: |
| 176 | + if self.is_weight(input_node, params, model): |
| 177 | + input_qspec_map[input_node] = config.weight if config else None |
| 178 | + elif self.is_bias(input_node, params, model): |
| 179 | + # Bias qspec is derived from input + weight qspecs |
| 180 | + input_qspec_map[input_node] = config.bias(node) if config else None |
| 181 | + elif input_node not in match: |
| 182 | + input_qspec_map[input_node] = ( |
| 183 | + config.input_activation if config else None |
| 184 | + ) |
| 185 | + |
| 186 | + if all(node not in match for node in node.users): |
| 187 | + output_qspec = config.output_activation if config else None |
| 188 | + |
| 189 | + node.meta[Q_ANNOTATION_KEY] = QuantizationAnnotation( |
| 190 | + input_qspec_map, output_qspec |
| 191 | + ) |
| 192 | + |
| 193 | + def annotate(self, model: GraphModule) -> None: |
| 194 | + matches = self.match_patterns(model, self.operator_config.operators) |
| 195 | + for match in matches: |
| 196 | + self.annotate_match(match, self.operator_config.config, model) |
| 197 | + |
| 198 | + def validate(self, model: GraphModule) -> bool: |
| 199 | + return True |
0 commit comments