Skip to content

Commit b1c0c5e

Browse files
irenabirenab
authored andcommitted
remove min_threshold from NodeActivationQuantizationConfig and from trainable activation quantizer config
1 parent ef9214d commit b1c0c5e

12 files changed

Lines changed: 9 additions & 36 deletions

File tree

model_compression_toolkit/core/common/quantization/node_quantization_config.py

Lines changed: 2 additions & 12 deletions
Original file line numberDiff line numberDiff line change
@@ -101,14 +101,6 @@ def __init__(self, op_cfg: OpQuantizationConfig):
101101
# TODO irena: computed by compute_activation_bias_correction. shouldnt really be here
102102
self.activation_bias_correction_term = None
103103

104-
# TODO irena remove along with set_qc. Keeping for eq and hash to work without set_qc being called
105-
self.min_threshold = None
106-
107-
def set_qc(self, qc: QuantizationConfig):
108-
""" TODO irena: temporary keep all the attributes as before not to break all code at once.
109-
Eventually all of them should be removed from here. """
110-
self.min_threshold = qc.min_threshold
111-
112104
@property
113105
def enable_activation_quantization(self):
114106
return self.quant_mode == ActivationQuantizationMode.QUANT
@@ -148,14 +140,12 @@ def __eq__(self, other: Any) -> bool:
148140

149141
return self.activation_quantization_method == other.activation_quantization_method and \
150142
self.activation_n_bits == other.activation_n_bits and \
151-
self.quant_mode == other.quant_mode and \
152-
self.min_threshold == other.min_threshold
143+
self.quant_mode == other.quant_mode
153144

154145
def __hash__(self):
155146
return hash((self.activation_quantization_method,
156147
self.activation_n_bits,
157-
self.quant_mode,
158-
self.min_threshold))
148+
self.quant_mode))
159149

160150

161151
class WeightsAttrQuantizationConfig:

model_compression_toolkit/core/common/quantization/quantization_params_generation/qparams_activations_computation.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -64,7 +64,7 @@ def compute_activation_qparams(quant_cfg: QuantizationConfig,
6464
node_activation_quant_cfg.activation_n_bits,
6565
min_value,
6666
max_value,
67-
min_threshold=node_activation_quant_cfg.min_threshold,
67+
min_threshold=quant_cfg.min_threshold,
6868
quant_error_method=quant_cfg.activation_error_method,
6969
is_signed=signed
7070
)

model_compression_toolkit/core/graph_prep_runner.py

Lines changed: 0 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -158,7 +158,6 @@ def get_finalized_graph(initial_graph: Graph,
158158
# not to break all the code at once. Eventually we need to handle quant_config directly, without injecting into candidates.
159159
# TODO 2: Also we adjust candidates for single precision, which we shouldn't do here.
160160
def update(qc):
161-
qc.activation_quantization_cfg.set_qc(quant_config)
162161
qc.weights_quantization_cfg.set_qc(quant_config)
163162
for attr_cfg in qc.weights_quantization_cfg.get_all_weight_attrs_configs().values():
164163
attr_cfg.weights_error_method = quant_config.weights_error_method

model_compression_toolkit/trainable_infrastructure/common/get_quantizer_config.py

Lines changed: 0 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -76,7 +76,6 @@ def get_trainable_quantizer_activation_config(
7676
final_cfg.activation_n_bits,
7777
final_cfg.activation_quantization_params,
7878
final_cfg.enable_activation_quantization,
79-
final_cfg.min_threshold,
8079
activation_quantization_candidates)
8180

8281

model_compression_toolkit/trainable_infrastructure/common/trainable_quantizer_config.py

Lines changed: 0 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -44,7 +44,6 @@ def __init__(self,
4444
activation_n_bits: int,
4545
activation_quantization_params: Dict,
4646
enable_activation_quantization: bool,
47-
min_threshold: float,
4847
activation_quantization_candidates: List[TrainableQuantizerCandidateConfig] = None,
4948
):
5049
"""
@@ -55,13 +54,11 @@ def __init__(self,
5554
activation_n_bits (int): Number of bits to quantize the activations.
5655
activation_quantization_params (Dict): Dictionary that contains activation quantization params.
5756
enable_activation_quantization (bool): Whether to quantize the layer's activations or not.
58-
min_threshold (float): Minimum threshold to use during thresholds selection.
5957
"""
6058
self.activation_quantization_method = activation_quantization_method
6159
self.activation_n_bits = activation_n_bits
6260
self.activation_quantization_params = activation_quantization_params
6361
self.enable_activation_quantization = enable_activation_quantization
64-
self.min_threshold = min_threshold
6562
self.activation_bits_candidates = activation_quantization_candidates
6663

6764

model_compression_toolkit/trainable_infrastructure/keras/config_serialization.py

Lines changed: 1 addition & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -83,7 +83,6 @@ def config_deserialization(in_config: dict) -> Union[TrainableQuantizerWeightsCo
8383
return TrainableQuantizerActivationConfig(activation_quantization_method=QuantizationMethod(in_config[C.ACTIVATION_QUANTIZATION_METHOD]),
8484
activation_n_bits=in_config[C.ACTIVATION_N_BITS],
8585
activation_quantization_params=in_config[C.ACTIVATION_QUANTIZATION_PARAMS],
86-
enable_activation_quantization=in_config[C.ENABLE_ACTIVATION_QUANTIZATION],
87-
min_threshold=in_config[C.MIN_THRESHOLD])
86+
enable_activation_quantization=in_config[C.ENABLE_ACTIVATION_QUANTIZATION])
8887
else:
8988
raise NotImplemented # pragma: no cover

tests/keras_tests/non_parallel_tests/test_lp_search_bitwidth.py

Lines changed: 0 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -74,7 +74,6 @@ def dummy_representative_dataset():
7474
for node in graph.nodes:
7575
# TODO irena remove set_qc:
7676
for c in node.quantization_cfg.candidates_quantization_cfg:
77-
c.activation_quantization_cfg.set_qc(core_config.quantization_config)
7877
c.weights_quantization_cfg.set_qc(core_config.quantization_config)
7978
for attr_cfg in c.weights_quantization_cfg.get_all_weight_attrs_configs().values():
8079
attr_cfg.weights_error_method = core_config.quantization_config.weights_error_method

tests/keras_tests/trainable_infrastructure_tests/base_keras_trainable_infra_test.py

Lines changed: 1 addition & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -123,5 +123,4 @@ def get_activation_quantization_config(self):
123123
return TrainableQuantizerActivationConfig(activation_quantization_method=QuantizationMethod.POWER_OF_TWO,
124124
activation_n_bits=8,
125125
activation_quantization_params={},
126-
enable_activation_quantization=True,
127-
min_threshold=0)
126+
enable_activation_quantization=True)

tests/keras_tests/trainable_infrastructure_tests/trainable_keras/test_keras_base_quantizer.py

Lines changed: 1 addition & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -27,9 +27,6 @@
2727

2828
class TestKerasBaseWeightsQuantizer(BaseKerasTrainableInfrastructureTest):
2929

30-
def __init__(self, unit_test):
31-
super().__init__(unit_test)
32-
3330
def get_weights_quantization_config(self):
3431
return TrainableQuantizerWeightsConfig(weights_quantization_method=QuantizationMethod.UNIFORM,
3532
weights_n_bits=8,
@@ -68,8 +65,7 @@ def get_activation_quantization_config(self):
6865
return TrainableQuantizerActivationConfig(activation_quantization_method=QuantizationMethod.UNIFORM,
6966
activation_n_bits=8,
7067
activation_quantization_params={},
71-
enable_activation_quantization=True,
72-
min_threshold=0)
68+
enable_activation_quantization=True)
7369

7470
def run_test(self):
7571
with self.unit_test.assertRaises(Exception) as e:

tests/pytorch_tests/trainable_infrastructure_tests/base_pytorch_trainable_infra_test.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -110,5 +110,5 @@ def get_activation_quantization_config(self, quant_method=QuantizationMethod.POW
110110
return TrainableQuantizerActivationConfig(activation_quantization_method=quant_method,
111111
activation_n_bits=8,
112112
activation_quantization_params=activation_quant_params or {},
113-
enable_activation_quantization=True,
114-
min_threshold=0)
113+
enable_activation_quantization=True)
114+

0 commit comments

Comments
 (0)