|
| 1 | +# Copyright (c) Meta Platforms, Inc. and affiliates. |
| 2 | +# All rights reserved. |
| 3 | +# |
| 4 | +# This source code is licensed under the BSD-style license found in the |
| 5 | +# LICENSE file in the root directory of this source tree. |
| 6 | + |
| 7 | +from dataclasses import dataclass |
| 8 | +from typing import Literal |
| 9 | + |
| 10 | +import torch |
| 11 | +import torch.nn as nn |
| 12 | +from torchtitan.config import Configurable |
| 13 | +from torchtitan.tools.logging import logger |
| 14 | + |
| 15 | + |
| 16 | +class QATConverter(Configurable): |
| 17 | + """Replace nn.Linear with FakeQuantizedLinear for quantization-aware training. |
| 18 | +
|
| 19 | + Uses torchao's FakeQuantizedLinear to simulate int4 weight quantization during |
| 20 | + training. The fake quantization is applied in the forward pass so the model |
| 21 | + learns to compensate for quantization error. |
| 22 | +
|
| 23 | + When composed with LoRA (QATConverter listed before LoRAConverter in converters), |
| 24 | + LoRA will inherit from FakeQuantizedLinear so base weights are fake-quantized |
| 25 | + while LoRA adapters stay full-precision. |
| 26 | + """ |
| 27 | + |
| 28 | + @dataclass(kw_only=True, slots=True) |
| 29 | + class Config(Configurable.Config): |
| 30 | + dtype: Literal["int4", "int8"] = "int4" |
| 31 | + """Data type for fake quantization. Supported: 'int4', 'int8'.""" |
| 32 | + |
| 33 | + group_size: int = 256 |
| 34 | + """Group size for per-group weight quantization. |
| 35 | + Must divide in_features of all Linear layers in the model.""" |
| 36 | + |
| 37 | + def __init__(self, config: Config, **kwargs): |
| 38 | + self.dtype = config.dtype |
| 39 | + self.group_size = config.group_size |
| 40 | + logger.info( |
| 41 | + f"QAT training active (dtype={self.dtype}, group_size={self.group_size})" |
| 42 | + ) |
| 43 | + |
| 44 | + def convert(self, model: nn.Module) -> None: |
| 45 | + from torchao.quantization.qat import FakeQuantizedLinear, IntxFakeQuantizeConfig |
| 46 | + |
| 47 | + dtype_map = { |
| 48 | + "int4": torch.int4, |
| 49 | + "int8": torch.int8, |
| 50 | + } |
| 51 | + torch_dtype = dtype_map[self.dtype] |
| 52 | + |
| 53 | + weight_config = IntxFakeQuantizeConfig( |
| 54 | + dtype=torch_dtype, |
| 55 | + group_size=self.group_size, |
| 56 | + is_symmetric=True, |
| 57 | + ) |
| 58 | + |
| 59 | + def _replace_recursive(parent: nn.Module) -> None: |
| 60 | + for name, child in list(parent.named_children()): |
| 61 | + if isinstance(child, nn.Linear): |
| 62 | + fq = FakeQuantizedLinear.from_linear( |
| 63 | + child, weight_config=weight_config |
| 64 | + ) |
| 65 | + setattr(parent, name, fq) |
| 66 | + else: |
| 67 | + _replace_recursive(child) |
| 68 | + |
| 69 | + _replace_recursive(model) |
| 70 | + logger.info( |
| 71 | + "Swapped to FakeQuantizedLinear layers " |
| 72 | + f"(dtype={self.dtype}, group_size={self.group_size})" |
| 73 | + ) |
| 74 | + |
| 75 | + def post_optimizer_hook(self, model: nn.Module | list[nn.Module]) -> None: |
| 76 | + pass |
0 commit comments