Skip to content

Commit cd6518a

Browse files
irenabirenab
authored andcommitted
add tests
1 parent bae7a45 commit cd6518a

File tree

2 files changed

+184
-0
lines changed

2 files changed

+184
-0
lines changed
Lines changed: 94 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,94 @@
1+
# Copyright 2025 Sony Semiconductor Israel, Inc. All rights reserved.
2+
#
3+
# Licensed under the Apache License, Version 2.0 (the "License");
4+
# you may not use this file except in compliance with the License.
5+
# You may obtain a copy of the License at
6+
#
7+
# http://www.apache.org/licenses/LICENSE-2.0
8+
#
9+
# Unless required by applicable law or agreed to in writing, software
10+
# distributed under the License is distributed on an "AS IS" BASIS,
11+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12+
# See the License for the specific language governing permissions and
13+
# limitations under the License.
14+
# ==============================================================================
15+
import keras
16+
import numpy as np
17+
import pytest
18+
from mct_quantizers import KerasActivationQuantizationHolder, KerasQuantizationWrapper
19+
20+
from model_compression_toolkit.core.common.mixed_precision.set_layer_to_bitwidth import \
21+
set_activation_quant_layer_to_bitwidth, set_weights_quant_layer_to_bitwidth
22+
from model_compression_toolkit.core.keras.constants import KERNEL
23+
from model_compression_toolkit.core.keras.mixed_precision.configurable_activation_quantizer import \
24+
ConfigurableActivationQuantizer
25+
from model_compression_toolkit.core.keras.mixed_precision.configurable_weights_quantizer import \
26+
ConfigurableWeightsQuantizer
27+
from tests_pytest._test_util.graph_builder_utils import build_nbits_qc
28+
from tests_pytest.keras_tests.keras_test_util.keras_test_mixin import KerasFwMixin
29+
30+
31+
class TestConfigureQLayer(KerasFwMixin):
32+
@pytest.mark.parametrize('ind', [None, 0, 1, 2])
33+
def test_configure_activation(self, ind):
34+
""" Test correct activation quantizer is set and applied. """
35+
def quant_fn(nbits, *args, **kwargs):
36+
return lambda x: x*nbits
37+
abits = [8, 4, 2]
38+
quantizer = ConfigurableActivationQuantizer(node_q_cfg=[
39+
build_nbits_qc(abit, activation_quantization_fn=quant_fn) for abit in abits
40+
])
41+
layer = KerasActivationQuantizationHolder(quantizer)
42+
set_activation_quant_layer_to_bitwidth(layer, ind, self.fw_impl)
43+
assert quantizer.active_quantization_config_index == ind
44+
x = np.random.rand(100)
45+
y = layer(x)
46+
if ind is None:
47+
assert np.allclose(x, y)
48+
else:
49+
assert np.allclose(x*abits[ind], y)
50+
51+
@pytest.mark.parametrize('ind', [None, 0, 1, 2])
52+
def test_configure_weights(self, ind):
53+
""" Test correct weights quantizer is set and applied. """
54+
inp = keras.layers.Input(shape=(16, 16, 3))
55+
out = keras.layers.Conv2D(8, kernel_size=5)(inp)
56+
model = keras.Model(inp, out)
57+
inner_layer = model.layers[1]
58+
orig_weight = inner_layer.kernel.numpy()
59+
orig_bias = inner_layer.bias.numpy()
60+
61+
wbits = [8, 4, 2]
62+
qcs = [build_nbits_qc(w_attr={KERNEL: (wbit, True)}) for wbit in wbits]
63+
for qc in qcs:
64+
attr_cfg = qc.weights_quantization_cfg.get_attr_config(KERNEL)
65+
attr_cfg.weights_channels_axis = (0,)
66+
attr_cfg.weights_quantization_fn = lambda x, nbits, *args: x*nbits
67+
quantizer = ConfigurableWeightsQuantizer(
68+
node_q_cfg=qcs,
69+
float_weights=inner_layer.kernel.numpy(),
70+
kernel_attr=KERNEL
71+
)
72+
layer = KerasQuantizationWrapper(inner_layer, {KERNEL: quantizer})
73+
74+
set_weights_quant_layer_to_bitwidth(layer, ind, self.fw_impl)
75+
76+
assert quantizer.active_quantization_config_index == ind
77+
x = np.random.rand(1, 16, 16, 3).astype(np.float32)
78+
y = layer(x)
79+
# check that correct quantizer was indeed applied by applying quantization function to kernel manually
80+
# and comparing the outputs
81+
ref_inp = keras.layers.Input(shape=(16, 16, 3))
82+
weight = orig_weight
83+
if ind is not None:
84+
weight *= wbits[ind]
85+
ref_out = keras.layers.Conv2D(8, kernel_size=5, kernel_initializer=keras.initializers.Constant(weight),
86+
bias_initializer=keras.initializers.Constant(orig_bias))(ref_inp)
87+
ref_model = keras.Model(ref_inp, ref_out)
88+
ref_layer = ref_model.layers[1]
89+
y_ref = ref_layer(x)
90+
assert np.allclose(y, y_ref)
91+
92+
# check that can be configured and run again
93+
set_weights_quant_layer_to_bitwidth(layer, 1, self.fw_impl)
94+
layer(x)
Lines changed: 90 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,90 @@
1+
# Copyright 2025 Sony Semiconductor Israel, Inc. All rights reserved.
2+
#
3+
# Licensed under the Apache License, Version 2.0 (the "License");
4+
# you may not use this file except in compliance with the License.
5+
# You may obtain a copy of the License at
6+
#
7+
# http://www.apache.org/licenses/LICENSE-2.0
8+
#
9+
# Unless required by applicable law or agreed to in writing, software
10+
# distributed under the License is distributed on an "AS IS" BASIS,
11+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12+
# See the License for the specific language governing permissions and
13+
# limitations under the License.
14+
# ==============================================================================
15+
import pytest
16+
import torch
17+
from mct_quantizers import PytorchActivationQuantizationHolder, PytorchQuantizationWrapper
18+
from mct_quantizers.pytorch.quantizer_utils import to_torch_tensor
19+
20+
from model_compression_toolkit.core.common.mixed_precision.set_layer_to_bitwidth import \
21+
set_activation_quant_layer_to_bitwidth, set_weights_quant_layer_to_bitwidth
22+
from model_compression_toolkit.core.pytorch.constants import KERNEL
23+
from model_compression_toolkit.core.pytorch.mixed_precision.configurable_activation_quantizer import \
24+
ConfigurableActivationQuantizer
25+
from model_compression_toolkit.core.pytorch.mixed_precision.configurable_weights_quantizer import \
26+
ConfigurableWeightsQuantizer
27+
from model_compression_toolkit.core.pytorch.pytorch_device_config import get_working_device
28+
from tests_pytest._test_util.graph_builder_utils import build_nbits_qc
29+
from tests_pytest.pytorch_tests.torch_test_util.torch_test_mixin import TorchFwMixin
30+
31+
32+
class TestConfigureQLayer(TorchFwMixin):
33+
@pytest.mark.parametrize('ind', [None, 0, 1, 2])
34+
def test_configure_activation(self, ind):
35+
""" Test correct activation quantizer is set and applied. """
36+
def quant_fn(nbits, *args, **kwargs):
37+
return lambda x: x*nbits
38+
abits = [8, 4, 2]
39+
quantizer = ConfigurableActivationQuantizer(node_q_cfg=[
40+
build_nbits_qc(abit, activation_quantization_fn=quant_fn) for abit in abits
41+
])
42+
layer = PytorchActivationQuantizationHolder(quantizer)
43+
set_activation_quant_layer_to_bitwidth(layer, ind, self.fw_impl)
44+
assert quantizer.active_quantization_config_index == ind
45+
x = torch.rand(100)
46+
y = layer(x)
47+
if ind is None:
48+
assert torch.equal(x, y)
49+
else:
50+
assert torch.allclose(x*abits[ind], y)
51+
52+
@pytest.mark.parametrize('ind', [None, 0, 1, 2])
53+
def test_configure_weights(self, ind):
54+
""" Test correct weights quantizer is set and applied. """
55+
inner_layer = torch.nn.Conv2d(3, 8, kernel_size=5).to(get_working_device())
56+
orig_weight = inner_layer.weight.clone()
57+
orig_bias = inner_layer.bias.clone()
58+
59+
wbits = [8, 4, 2]
60+
qcs = [build_nbits_qc(w_attr={KERNEL: (wbit, True)}) for wbit in wbits]
61+
for qc in qcs:
62+
attr_cfg = qc.weights_quantization_cfg.get_attr_config(KERNEL)
63+
attr_cfg.weights_channels_axis = (0,)
64+
attr_cfg.weights_quantization_fn = lambda x, nbits, *args: x*nbits
65+
66+
quantizer = ConfigurableWeightsQuantizer(
67+
node_q_cfg=qcs,
68+
float_weights=inner_layer.weight,
69+
kernel_attr=KERNEL
70+
)
71+
layer = PytorchQuantizationWrapper(inner_layer, {KERNEL: quantizer})
72+
73+
set_weights_quant_layer_to_bitwidth(layer, ind, self.fw_impl)
74+
75+
assert quantizer.active_quantization_config_index == ind
76+
x = to_torch_tensor(torch.rand((1, 3, 16, 16), dtype=torch.float32))
77+
y = layer(x)
78+
# check that correct quantizer was indeed applied by applying quantization function to kernel manually
79+
# and comparing the outputs
80+
ref_layer = torch.nn.Conv2d(3, 8, kernel_size=5).to(get_working_device())
81+
ref_layer.weight.data = orig_weight.data
82+
ref_layer.bias.data = orig_bias.data
83+
if ind is not None:
84+
ref_layer.weight.data *= wbits[ind]
85+
y_ref = ref_layer(x)
86+
assert torch.allclose(y, y_ref)
87+
88+
# check that can be configured and run again
89+
set_weights_quant_layer_to_bitwidth(layer, 1, self.fw_impl)
90+
layer(x)

0 commit comments

Comments
 (0)