Skip to content

Commit 5aa0e02

Browse files
Add ptq+mixed precision, gptq test
1 parent b36a372 commit 5aa0e02

File tree

3 files changed

+228
-108
lines changed

3 files changed

+228
-108
lines changed

tests_pytest/pytorch_tests/e2e_tests/test_ptq_for_1d_tensor.py

Lines changed: 0 additions & 105 deletions
This file was deleted.
Lines changed: 165 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,165 @@
1+
# Copyright 2026 Sony Semiconductor Solutions, Inc. All rights reserved.
2+
#
3+
# Licensed under the Apache License, Version 2.0 (the "License");
4+
# you may not use this file except in compliance with the License.
5+
# You may obtain a copy of the License at
6+
#
7+
# http://www.apache.org/licenses/LICENSE-2.0
8+
#
9+
# Unless required by applicable law or agreed to in writing, software
10+
# distributed under the License is distributed on an "AS IS" BASIS,
11+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12+
# See the License for the specific language governing permissions and
13+
# limitations under the License.
14+
# ==============================================================================
15+
import model_compression_toolkit as mct
16+
import torch
17+
import torch.nn as nn
18+
import pytest
19+
20+
# This test checks whether an ActivationQuantizationHolder can be attached to a layer that accepts 1D tensor input.
21+
# These layers were selected from operators supported by the SDSP converter.
22+
23+
class Model(nn.Module):
24+
25+
def __init__(self, name):
26+
super().__init__()
27+
self.name = name
28+
self.conv = nn.Conv2d(3, 3, kernel_size=3, padding=1)
29+
self.relu = nn.ReLU()
30+
self.tensor = nn.Parameter(2.0 * torch.ones([1])) # 1D tensor
31+
32+
def forward(self, x):
33+
x = self.conv(x)
34+
x = self.relu(x)
35+
36+
if self.name == 'add':
37+
const = torch.add(self.tensor, 1)
38+
elif self.name == 'relu6':
39+
const = torch.nn.functional.relu6(self.tensor)
40+
elif self.name == 'relu':
41+
const = torch.nn.functional.relu(self.tensor)
42+
elif self.name == 'sigmoid':
43+
const = torch.nn.functional.sigmoid(self.tensor)
44+
elif self.name == 'eq':
45+
const = torch.eq(self.tensor, 1)
46+
elif self.name == 'leaky_relu':
47+
const = torch.nn.functional.leaky_relu(self.tensor)
48+
elif self.name == 'mul':
49+
const = torch.mul(self.tensor, 1)
50+
elif self.name == 'sub':
51+
const = torch.sub(self.tensor, 1)
52+
elif self.name == 'div':
53+
const = torch.div(self.tensor, 1)
54+
elif self.name == 'softmax':
55+
const = torch.nn.functional.softmax(self.tensor)
56+
elif self.name == 'tanh':
57+
const = torch.nn.functional.tanh(self.tensor)
58+
elif self.name == 'negative':
59+
const = torch.negative(self.tensor)
60+
elif self.name == 'abs':
61+
const = torch.abs(self.tensor)
62+
elif self.name == 'sqrt':
63+
const = torch.sqrt(self.tensor)
64+
elif self.name == 'sum':
65+
const = torch.sum(self.tensor)
66+
elif self.name == 'rsqrt':
67+
const = torch.rsqrt(self.tensor)
68+
elif self.name == 'silu':
69+
const = torch.nn.functional.silu(self.tensor)
70+
elif self.name == 'hardswish':
71+
const = torch.nn.functional.hardswish(self.tensor)
72+
elif self.name == 'hardsigmoid':
73+
const = torch.nn.functional.hardsigmoid(self.tensor)
74+
elif self.name == 'pow':
75+
const = torch.pow(self.tensor, 1)
76+
elif self.name == 'gelu':
77+
const = torch.nn.functional.gelu(self.tensor)
78+
elif self.name == 'cos':
79+
const = torch.cos(self.tensor)
80+
elif self.name == 'sin':
81+
const = torch.sin(self.tensor)
82+
elif self.name == 'exp':
83+
const = torch.exp(self.tensor)
84+
85+
y = x + const
86+
return y
87+
88+
def representative_data_gen():
89+
yield [torch.randn(1, 3, 8, 8)]
90+
91+
@pytest.mark.parametrize("layer", [
92+
'add', 'relu6', 'relu', 'sigmoid', 'eq', 'leaky_relu', 'mul', 'sub', 'div', 'softmax',
93+
'tanh', 'negative', 'abs', 'sqrt', 'sum', 'rsqrt', 'silu', 'hardswish', 'hardsigmoid',
94+
'pow', 'gelu', 'cos', 'sin', 'exp'
95+
])
96+
def test_ptq_1d_tensor(layer):
97+
98+
float_model = Model(name=layer)
99+
100+
tpc = mct.get_target_platform_capabilities("6.0")
101+
quantized_model, _ = mct.ptq.pytorch_post_training_quantization(float_model,
102+
representative_data_gen=representative_data_gen,
103+
target_platform_capabilities=tpc)
104+
105+
if layer in ['abs', 'sum', 'pow']:
106+
activation_holder = f'{layer}_1_activation_holder_quantizer'
107+
else:
108+
activation_holder = f'{layer}_activation_holder_quantizer'
109+
110+
assert hasattr(quantized_model, activation_holder)
111+
112+
113+
@pytest.mark.parametrize("layer", [
114+
'add', 'relu6', 'relu', 'sigmoid', 'eq', 'leaky_relu', 'mul', 'sub', 'div', 'softmax',
115+
'tanh', 'negative', 'abs', 'sqrt', 'sum', 'rsqrt', 'silu', 'hardswish', 'hardsigmoid',
116+
'pow', 'gelu', 'cos', 'sin', 'exp'
117+
])
118+
def test_ptq_mixed_precision_1d_tensor(layer):
119+
120+
float_model = Model(name=layer)
121+
122+
tpc = mct.get_target_platform_capabilities("6.0")
123+
core_config = mct.core.CoreConfig(mixed_precision_config=mct.core.MixedPrecisionQuantizationConfig(num_of_images=1,
124+
use_hessian_based_scores=False))
125+
resource_utilization_data = mct.core.pytorch_resource_utilization_data(float_model,
126+
representative_data_gen,
127+
core_config,
128+
target_platform_capabilities=tpc)
129+
resource_utilization = mct.core.ResourceUtilization(resource_utilization_data.weights_memory * 0.9)
130+
quantized_model, _ = mct.ptq.pytorch_post_training_quantization(float_model,
131+
representative_data_gen,
132+
target_resource_utilization=resource_utilization,
133+
core_config=core_config,
134+
target_platform_capabilities=tpc)
135+
136+
if layer in ['abs', 'sum', 'pow']:
137+
activation_holder = f'{layer}_1_activation_holder_quantizer'
138+
else:
139+
activation_holder = f'{layer}_activation_holder_quantizer'
140+
141+
assert hasattr(quantized_model, activation_holder)
142+
143+
144+
@pytest.mark.parametrize("layer", [
145+
'add', 'relu6', 'relu', 'sigmoid', 'eq', 'leaky_relu', 'mul', 'sub', 'div', 'softmax',
146+
'tanh', 'negative', 'abs', 'sqrt', 'sum', 'rsqrt', 'silu', 'hardswish', 'hardsigmoid',
147+
'pow', 'gelu', 'cos', 'sin', 'exp'
148+
])
149+
def test_gptq_1d_tensor(layer):
150+
151+
float_model = Model(name=layer)
152+
153+
tpc = mct.get_target_platform_capabilities("6.0")
154+
gptq_config = mct.gptq.get_pytorch_gptq_config(n_epochs=5)
155+
quantized_model, _ = mct.gptq.pytorch_gradient_post_training_quantization(float_model,
156+
representative_data_gen,
157+
gptq_config=gptq_config,
158+
target_platform_capabilities=tpc)
159+
160+
if layer in ['abs', 'sum', 'pow']:
161+
activation_holder = f'{layer}_1_activation_holder_quantizer'
162+
else:
163+
activation_holder = f'{layer}_activation_holder_quantizer'
164+
165+
assert hasattr(quantized_model, activation_holder)

tests_pytest/pytorch_tests/e2e_tests/test_ptq_for_scalar.py renamed to tests_pytest/pytorch_tests/e2e_tests/test_quantization_for_scalar.py

Lines changed: 63 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -20,14 +20,19 @@
2020
# This test checks whether an ActivationQuantizationHolder can be attached to a layer that accepts scalar input.
2121
# These layers were selected from operators supported by the SDSP converter.
2222

23-
class ScalarModel(nn.Module):
23+
class Model(nn.Module):
2424

2525
def __init__(self, name):
2626
super().__init__()
2727
self.name = name
28+
self.conv = nn.Conv2d(3, 3, kernel_size=3, padding=1)
29+
self.relu = nn.ReLU()
2830
self.scalar = nn.Parameter(2.0 * torch.ones([])) # Scalar
2931

3032
def forward(self, x):
33+
x = self.conv(x)
34+
x = self.relu(x)
35+
3136
if self.name == 'add':
3237
const = torch.add(self.scalar, 1)
3338
elif self.name == 'relu6':
@@ -88,9 +93,9 @@ def representative_data_gen():
8893
'tanh', 'negative', 'abs', 'sqrt', 'sum', 'rsqrt', 'silu', 'hardswish', 'hardsigmoid',
8994
'pow', 'gelu', 'cos', 'sin', 'exp'
9095
])
91-
def test_scalar_layer(layer):
96+
def test_ptq_scalar(layer):
9297

93-
float_model = ScalarModel(name=layer)
98+
float_model = Model(name=layer)
9499

95100
tpc = mct.get_target_platform_capabilities("6.0")
96101
quantized_model, _ = mct.ptq.pytorch_post_training_quantization(float_model,
@@ -102,4 +107,59 @@ def test_scalar_layer(layer):
102107
else:
103108
activation_holder = f'{layer}_activation_holder_quantizer'
104109

110+
assert hasattr(quantized_model, activation_holder)
111+
112+
113+
@pytest.mark.parametrize("layer", [
114+
'add', 'relu6', 'relu', 'sigmoid', 'eq', 'leaky_relu', 'mul', 'sub', 'div', 'softmax',
115+
'tanh', 'negative', 'abs', 'sqrt', 'sum', 'rsqrt', 'silu', 'hardswish', 'hardsigmoid',
116+
'pow', 'gelu', 'cos', 'sin', 'exp'
117+
])
118+
def test_ptq_mixed_precision_scalar(layer):
119+
120+
float_model = Model(name=layer)
121+
122+
tpc = mct.get_target_platform_capabilities("6.0")
123+
core_config = mct.core.CoreConfig(mixed_precision_config=mct.core.MixedPrecisionQuantizationConfig(num_of_images=1,
124+
use_hessian_based_scores=False))
125+
resource_utilization_data = mct.core.pytorch_resource_utilization_data(float_model,
126+
representative_data_gen,
127+
core_config,
128+
target_platform_capabilities=tpc)
129+
resource_utilization = mct.core.ResourceUtilization(resource_utilization_data.weights_memory * 0.9)
130+
quantized_model, _ = mct.ptq.pytorch_post_training_quantization(float_model,
131+
representative_data_gen,
132+
target_resource_utilization=resource_utilization,
133+
core_config=core_config,
134+
target_platform_capabilities=tpc)
135+
136+
if layer in ['abs', 'sum', 'pow']:
137+
activation_holder = f'{layer}_1_activation_holder_quantizer'
138+
else:
139+
activation_holder = f'{layer}_activation_holder_quantizer'
140+
141+
assert hasattr(quantized_model, activation_holder)
142+
143+
144+
@pytest.mark.parametrize("layer", [
145+
'add', 'relu6', 'relu', 'sigmoid', 'eq', 'leaky_relu', 'mul', 'sub', 'div', 'softmax',
146+
'tanh', 'negative', 'abs', 'sqrt', 'sum', 'rsqrt', 'silu', 'hardswish', 'hardsigmoid',
147+
'pow', 'gelu', 'cos', 'sin', 'exp'
148+
])
149+
def test_gptq_scalar(layer):
150+
151+
float_model = Model(name=layer)
152+
153+
tpc = mct.get_target_platform_capabilities("6.0")
154+
gptq_config = mct.gptq.get_pytorch_gptq_config(n_epochs=5)
155+
quantized_model, _ = mct.gptq.pytorch_gradient_post_training_quantization(float_model,
156+
representative_data_gen,
157+
gptq_config=gptq_config,
158+
target_platform_capabilities=tpc)
159+
160+
if layer in ['abs', 'sum', 'pow']:
161+
activation_holder = f'{layer}_1_activation_holder_quantizer'
162+
else:
163+
activation_holder = f'{layer}_activation_holder_quantizer'
164+
105165
assert hasattr(quantized_model, activation_holder)

0 commit comments

Comments
 (0)