|
| 1 | +from types import SimpleNamespace |
| 2 | + |
| 3 | +import pytest |
| 4 | +import torch |
| 5 | +from torch import nn |
| 6 | + |
| 7 | +from llmcompressor.modeling.offset_norm import ( |
| 8 | + CalibrationOffsetNorm, |
| 9 | + NormCalibrationModule, |
| 10 | + norm_calibration_context, |
| 11 | +) |
| 12 | + |
| 13 | +# --------------------------------------------------------------------------- |
| 14 | +# Mock offset-norm module matching Gemma's (1 + weight) convention |
| 15 | +# --------------------------------------------------------------------------- |
| 16 | + |
| 17 | + |
| 18 | +class FakeGemmaRMSNorm(nn.Module): |
| 19 | + """Minimal mock matching the GemmaRMSNorm forward: output * (1 + weight)""" |
| 20 | + |
| 21 | + def __init__(self, dim, eps=1e-6, dtype=torch.bfloat16): |
| 22 | + super().__init__() |
| 23 | + self.eps = eps |
| 24 | + self.weight = nn.Parameter(torch.zeros(dim, dtype=dtype)) |
| 25 | + |
| 26 | + def _norm(self, x): |
| 27 | + return x * torch.rsqrt(x.pow(2).mean(-1, keepdim=True) + self.eps) |
| 28 | + |
| 29 | + def forward(self, x): |
| 30 | + output = self._norm(x.float()) |
| 31 | + output = output * (1.0 + self.weight.float()) |
| 32 | + return output.type_as(x) |
| 33 | + |
| 34 | + |
| 35 | +# Patch class name so the registry picks it up |
| 36 | +FakeGemmaRMSNorm.__name__ = "GemmaRMSNorm" |
| 37 | +FakeGemmaRMSNorm.__qualname__ = "GemmaRMSNorm" |
| 38 | + |
| 39 | + |
| 40 | +# --------------------------------------------------------------------------- |
| 41 | +# Tests |
| 42 | +# --------------------------------------------------------------------------- |
| 43 | + |
| 44 | + |
| 45 | +@pytest.mark.unit |
| 46 | +class TestCalibrationOffsetNormInit: |
| 47 | + """Test that __init__ converts weights and stores dtype.""" |
| 48 | + |
| 49 | + def test_weight_conversion(self): |
| 50 | + original = FakeGemmaRMSNorm(dim=4) |
| 51 | + original.weight.data = torch.tensor([0.1, -0.05, 0.0, 0.2]) |
| 52 | + calib = CalibrationOffsetNorm(original, config=None) |
| 53 | + |
| 54 | + expected = torch.tensor([1.1, 0.95, 1.0, 1.2]) |
| 55 | + assert torch.allclose(calib.weight.data, expected) |
| 56 | + |
| 57 | + def test_dtype_stored(self): |
| 58 | + original = FakeGemmaRMSNorm(dim=4, dtype=torch.bfloat16) |
| 59 | + calib = CalibrationOffsetNorm(original, config=None) |
| 60 | + |
| 61 | + assert calib._orig_dtype == torch.bfloat16 |
| 62 | + assert calib.weight.dtype == torch.bfloat16 |
| 63 | + |
| 64 | + |
| 65 | +@pytest.mark.unit |
| 66 | +class TestCalibrationOffsetNormForward: |
| 67 | + """Test that forward produces the same result as the original.""" |
| 68 | + |
| 69 | + def test_output_matches_original(self): |
| 70 | + original = FakeGemmaRMSNorm(dim=8, dtype=torch.float32) |
| 71 | + original.weight.data = torch.randn(8) * 0.1 |
| 72 | + calib = CalibrationOffsetNorm(original, config=None) |
| 73 | + |
| 74 | + x = torch.randn(2, 4, 8) |
| 75 | + original_out = original(x) |
| 76 | + calib_out = calib(x) |
| 77 | + |
| 78 | + assert torch.allclose(original_out, calib_out, atol=1e-5) |
| 79 | + |
| 80 | + |
| 81 | +@pytest.mark.unit |
| 82 | +class TestCalibrationOffsetNormRestore: |
| 83 | + """Test that restore reconverts weights correctly.""" |
| 84 | + |
| 85 | + def test_restore_roundtrip(self): |
| 86 | + original = FakeGemmaRMSNorm(dim=4, dtype=torch.bfloat16) |
| 87 | + original.weight.data = torch.tensor( |
| 88 | + [0.1, -0.05, 0.0, 0.2], dtype=torch.bfloat16 |
| 89 | + ) |
| 90 | + saved = original.weight.data.clone() |
| 91 | + |
| 92 | + calib = CalibrationOffsetNorm(original, config=None) |
| 93 | + calib.restore(original) |
| 94 | + |
| 95 | + assert original.weight.dtype == torch.bfloat16 |
| 96 | + assert torch.allclose(original.weight.data.float(), saved.float(), atol=2e-2) |
| 97 | + |
| 98 | + def test_restore_after_smoothing(self): |
| 99 | + original = FakeGemmaRMSNorm(dim=4, dtype=torch.float32) |
| 100 | + original.weight.data = torch.tensor([0.1, -0.05, 0.0, 0.2]) |
| 101 | + |
| 102 | + calib = CalibrationOffsetNorm(original, config=None) |
| 103 | + # Simulate a modifier dividing weights by scales=2 |
| 104 | + calib.weight.data.div_(2.0) |
| 105 | + calib.restore(original) |
| 106 | + |
| 107 | + # Standard weight after smoothing: [1.1, 0.95, 1.0, 1.2] / 2 |
| 108 | + # = [0.55, 0.475, 0.5, 0.6] |
| 109 | + # Restored offset weight: standard - 1 |
| 110 | + # = [-0.45, -0.525, -0.5, -0.4] |
| 111 | + expected = torch.tensor([-0.45, -0.525, -0.5, -0.4]) |
| 112 | + assert torch.allclose(original.weight.data, expected, atol=1e-5) |
| 113 | + |
| 114 | + # Verify: 1 + restored_weight == smoothed standard weight |
| 115 | + effective = 1.0 + original.weight.data |
| 116 | + expected_effective = torch.tensor([0.55, 0.475, 0.5, 0.6]) |
| 117 | + assert torch.allclose(effective, expected_effective, atol=1e-5) |
| 118 | + |
| 119 | + |
| 120 | +@pytest.mark.unit |
| 121 | +class TestNormRegistration: |
| 122 | + """Test that registered norms are detected and standard norms are not.""" |
| 123 | + |
| 124 | + def test_gemma_detected(self): |
| 125 | + """GemmaRMSNorm (and aliases) should be in the registry.""" |
| 126 | + names = NormCalibrationModule.registered_names() |
| 127 | + aliases = NormCalibrationModule.registered_aliases() |
| 128 | + all_registered = names + aliases |
| 129 | + for name in [ |
| 130 | + "gemmarmsnorm", |
| 131 | + "gemma2rmsnorm", |
| 132 | + "gemma3rmsnorm", |
| 133 | + "qwen3nextrmsnorm", |
| 134 | + ]: |
| 135 | + assert name in all_registered, f"{name} not in registry" |
| 136 | + |
| 137 | + def test_standard_norm_not_detected(self): |
| 138 | + """Standard LayerNorm should not be in the registry.""" |
| 139 | + registered = NormCalibrationModule.registered_names() |
| 140 | + assert "layernorm" not in registered |
| 141 | + assert "rmsnorm" not in registered |
| 142 | + |
| 143 | + |
| 144 | +@pytest.mark.unit |
| 145 | +class TestNormCalibrationContext: |
| 146 | + """Test that norm_calibration_context replaces and restores modules.""" |
| 147 | + |
| 148 | + def test_modules_replaced_inside_context(self): |
| 149 | + """Offset norms should be replaced with CalibrationOffsetNorm inside.""" |
| 150 | + layer = nn.Module() |
| 151 | + layer.input_layernorm = FakeGemmaRMSNorm(dim=8, dtype=torch.float32) |
| 152 | + layer.post_attention_layernorm = FakeGemmaRMSNorm(dim=8, dtype=torch.float32) |
| 153 | + |
| 154 | + model = nn.Module() |
| 155 | + model.layer = layer |
| 156 | + model.config = SimpleNamespace(hidden_size=8) |
| 157 | + |
| 158 | + with norm_calibration_context(model): |
| 159 | + assert isinstance(layer.input_layernorm, CalibrationOffsetNorm) |
| 160 | + assert isinstance(layer.post_attention_layernorm, CalibrationOffsetNorm) |
| 161 | + |
| 162 | + def test_modules_restored_after_context(self): |
| 163 | + """Original modules should be restored with correct weights.""" |
| 164 | + layer = nn.Module() |
| 165 | + layer.input_layernorm = FakeGemmaRMSNorm(dim=4, dtype=torch.bfloat16) |
| 166 | + layer.input_layernorm.weight.data = torch.tensor( |
| 167 | + [0.1, -0.05, 0.0, 0.2], dtype=torch.bfloat16 |
| 168 | + ) |
| 169 | + saved = layer.input_layernorm.weight.data.clone() |
| 170 | + |
| 171 | + model = nn.Module() |
| 172 | + model.layer = layer |
| 173 | + model.config = SimpleNamespace(hidden_size=4) |
| 174 | + |
| 175 | + with norm_calibration_context(model): |
| 176 | + pass |
| 177 | + |
| 178 | + assert isinstance(layer.input_layernorm, FakeGemmaRMSNorm) |
| 179 | + assert layer.input_layernorm.weight.dtype == torch.bfloat16 |
| 180 | + assert torch.allclose( |
| 181 | + layer.input_layernorm.weight.data.float(), saved.float(), atol=2e-2 |
| 182 | + ) |
| 183 | + |
| 184 | + def test_weights_updated_after_smoothing(self): |
| 185 | + """Weights modified inside the context should be reflected after.""" |
| 186 | + layer = nn.Module() |
| 187 | + layer.norm = FakeGemmaRMSNorm(dim=4, dtype=torch.float32) |
| 188 | + layer.norm.weight.data = torch.tensor([0.1, -0.05, 0.0, 0.2]) |
| 189 | + |
| 190 | + model = nn.Module() |
| 191 | + model.layer = layer |
| 192 | + model.config = SimpleNamespace(hidden_size=4) |
| 193 | + |
| 194 | + with norm_calibration_context(model): |
| 195 | + # Simulate modifier dividing weights by scales=2 |
| 196 | + layer.norm.weight.data.div_(2.0) |
| 197 | + |
| 198 | + # Standard weight was [1.1, 0.95, 1.0, 1.2] / 2 = [0.55, 0.475, 0.5, 0.6] |
| 199 | + # Restored offset weight: standard - 1 = [-0.45, -0.525, -0.5, -0.4] |
| 200 | + expected = torch.tensor([-0.45, -0.525, -0.5, -0.4]) |
| 201 | + assert torch.allclose(layer.norm.weight.data, expected, atol=1e-5) |
0 commit comments