1+ # Copyright 2026 Sony Semiconductor Solutions, Inc. All rights reserved.
2+ #
3+ # Licensed under the Apache License, Version 2.0 (the "License");
4+ # you may not use this file except in compliance with the License.
5+ # You may obtain a copy of the License at
6+ #
7+ # http://www.apache.org/licenses/LICENSE-2.0
8+ #
9+ # Unless required by applicable law or agreed to in writing, software
10+ # distributed under the License is distributed on an "AS IS" BASIS,
11+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12+ # See the License for the specific language governing permissions and
13+ # limitations under the License.
14+ # ==============================================================================
15+ import model_compression_toolkit as mct
16+ import torch
17+ import torch .nn as nn
18+ import pytest
19+
20+ # This test checks whether an ActivationQuantizationHolder can be attached to a layer that accepts 1D tensor input.
21+ # These layers were selected from operators supported by the SDSP converter.
22+
23+ class Model (nn .Module ):
24+
25+ def __init__ (self , name ):
26+ super ().__init__ ()
27+ self .name = name
28+ self .conv = nn .Conv2d (3 , 3 , kernel_size = 3 , padding = 1 )
29+ self .relu = nn .ReLU ()
30+ self .tensor = nn .Parameter (2.0 * torch .ones ([1 ])) # 1D tensor
31+
32+ def forward (self , x ):
33+ x = self .conv (x )
34+ x = self .relu (x )
35+
36+ if self .name == 'add' :
37+ const = torch .add (self .tensor , 1 )
38+ elif self .name == 'relu6' :
39+ const = torch .nn .functional .relu6 (self .tensor )
40+ elif self .name == 'relu' :
41+ const = torch .nn .functional .relu (self .tensor )
42+ elif self .name == 'sigmoid' :
43+ const = torch .nn .functional .sigmoid (self .tensor )
44+ elif self .name == 'eq' :
45+ const = torch .eq (self .tensor , 1 )
46+ elif self .name == 'leaky_relu' :
47+ const = torch .nn .functional .leaky_relu (self .tensor )
48+ elif self .name == 'mul' :
49+ const = torch .mul (self .tensor , 1 )
50+ elif self .name == 'sub' :
51+ const = torch .sub (self .tensor , 1 )
52+ elif self .name == 'div' :
53+ const = torch .div (self .tensor , 1 )
54+ elif self .name == 'softmax' :
55+ const = torch .nn .functional .softmax (self .tensor )
56+ elif self .name == 'tanh' :
57+ const = torch .nn .functional .tanh (self .tensor )
58+ elif self .name == 'negative' :
59+ const = torch .negative (self .tensor )
60+ elif self .name == 'abs' :
61+ const = torch .abs (self .tensor )
62+ elif self .name == 'sqrt' :
63+ const = torch .sqrt (self .tensor )
64+ elif self .name == 'sum' :
65+ const = torch .sum (self .tensor )
66+ elif self .name == 'rsqrt' :
67+ const = torch .rsqrt (self .tensor )
68+ elif self .name == 'silu' :
69+ const = torch .nn .functional .silu (self .tensor )
70+ elif self .name == 'hardswish' :
71+ const = torch .nn .functional .hardswish (self .tensor )
72+ elif self .name == 'hardsigmoid' :
73+ const = torch .nn .functional .hardsigmoid (self .tensor )
74+ elif self .name == 'pow' :
75+ const = torch .pow (self .tensor , 1 )
76+ elif self .name == 'gelu' :
77+ const = torch .nn .functional .gelu (self .tensor )
78+ elif self .name == 'cos' :
79+ const = torch .cos (self .tensor )
80+ elif self .name == 'sin' :
81+ const = torch .sin (self .tensor )
82+ elif self .name == 'exp' :
83+ const = torch .exp (self .tensor )
84+
85+ y = x + const
86+ return y
87+
88+ def representative_data_gen ():
89+ yield [torch .randn (1 , 3 , 8 , 8 )]
90+
91+ @pytest .mark .parametrize ("layer" , [
92+ 'add' , 'relu6' , 'relu' , 'sigmoid' , 'eq' , 'leaky_relu' , 'mul' , 'sub' , 'div' , 'softmax' ,
93+ 'tanh' , 'negative' , 'abs' , 'sqrt' , 'sum' , 'rsqrt' , 'silu' , 'hardswish' , 'hardsigmoid' ,
94+ 'pow' , 'gelu' , 'cos' , 'sin' , 'exp'
95+ ])
96+ def test_ptq_1d_tensor (layer ):
97+
98+ float_model = Model (name = layer )
99+
100+ tpc = mct .get_target_platform_capabilities ("6.0" )
101+ quantized_model , _ = mct .ptq .pytorch_post_training_quantization (float_model ,
102+ representative_data_gen = representative_data_gen ,
103+ target_platform_capabilities = tpc )
104+
105+ if layer in ['abs' , 'sum' , 'pow' ]:
106+ activation_holder = f'{ layer } _1_activation_holder_quantizer'
107+ else :
108+ activation_holder = f'{ layer } _activation_holder_quantizer'
109+
110+ assert hasattr (quantized_model , activation_holder )
111+
112+
113+ @pytest .mark .parametrize ("layer" , [
114+ 'add' , 'relu6' , 'relu' , 'sigmoid' , 'eq' , 'leaky_relu' , 'mul' , 'sub' , 'div' , 'softmax' ,
115+ 'tanh' , 'negative' , 'abs' , 'sqrt' , 'sum' , 'rsqrt' , 'silu' , 'hardswish' , 'hardsigmoid' ,
116+ 'pow' , 'gelu' , 'cos' , 'sin' , 'exp'
117+ ])
118+ def test_ptq_mixed_precision_1d_tensor (layer ):
119+
120+ float_model = Model (name = layer )
121+
122+ tpc = mct .get_target_platform_capabilities ("6.0" )
123+ core_config = mct .core .CoreConfig (mixed_precision_config = mct .core .MixedPrecisionQuantizationConfig (num_of_images = 1 ,
124+ use_hessian_based_scores = False ))
125+ resource_utilization_data = mct .core .pytorch_resource_utilization_data (float_model ,
126+ representative_data_gen ,
127+ core_config ,
128+ target_platform_capabilities = tpc )
129+ resource_utilization = mct .core .ResourceUtilization (resource_utilization_data .weights_memory * 0.9 )
130+ quantized_model , _ = mct .ptq .pytorch_post_training_quantization (float_model ,
131+ representative_data_gen ,
132+ target_resource_utilization = resource_utilization ,
133+ core_config = core_config ,
134+ target_platform_capabilities = tpc )
135+
136+ if layer in ['abs' , 'sum' , 'pow' ]:
137+ activation_holder = f'{ layer } _1_activation_holder_quantizer'
138+ else :
139+ activation_holder = f'{ layer } _activation_holder_quantizer'
140+
141+ assert hasattr (quantized_model , activation_holder )
142+
143+
144+ @pytest .mark .parametrize ("layer" , [
145+ 'add' , 'relu6' , 'relu' , 'sigmoid' , 'eq' , 'leaky_relu' , 'mul' , 'sub' , 'div' , 'softmax' ,
146+ 'tanh' , 'negative' , 'abs' , 'sqrt' , 'sum' , 'rsqrt' , 'silu' , 'hardswish' , 'hardsigmoid' ,
147+ 'pow' , 'gelu' , 'cos' , 'sin' , 'exp'
148+ ])
149+ def test_gptq_1d_tensor (layer ):
150+
151+ float_model = Model (name = layer )
152+
153+ tpc = mct .get_target_platform_capabilities ("6.0" )
154+ gptq_config = mct .gptq .get_pytorch_gptq_config (n_epochs = 5 )
155+ quantized_model , _ = mct .gptq .pytorch_gradient_post_training_quantization (float_model ,
156+ representative_data_gen ,
157+ gptq_config = gptq_config ,
158+ target_platform_capabilities = tpc )
159+
160+ if layer in ['abs' , 'sum' , 'pow' ]:
161+ activation_holder = f'{ layer } _1_activation_holder_quantizer'
162+ else :
163+ activation_holder = f'{ layer } _activation_holder_quantizer'
164+
165+ assert hasattr (quantized_model , activation_holder )
0 commit comments