Skip to content

Commit bceaea2

Browse files
Dev adding fln actq holder init in model (#25)
* add test code for FLN * modified the model * add space * minor corrections * fix fw_info in test code * add FLN_NO_QUANT test * change 16bit for FLN * Resolve test errors * Revert "Resolve test errors" This reverts commit 0e77645. * Support for QuantizationConfig changes * Added an argument to the GradientPTQConfig function. * Change description format * Revert "Change description format" This reverts commit c83f4a4. --------- Co-authored-by: kawakami-masaki0 <kawakami.masaki@jp.panasonic.com>
1 parent a56eb16 commit bceaea2

2 files changed

Lines changed: 9 additions & 4 deletions

File tree

tests/external_tests/keras_tests/models_tests/test_networks_runner.py

Lines changed: 9 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -97,15 +97,21 @@ def compare(self, inputs_list, quantized_model, qc, tpc):
9797
self.unit_test.assertTrue(False, f'fail TFLite convertion with the following error: {error_msg}')
9898

9999
def run_network(self, inputs_list, qc, tpc):
100+
from model_compression_toolkit.gptq.common.gptq_config import GradualActivationQuantizationConfig
100101
def representative_data_gen():
101102
for _ in range(self.num_calibration_iter):
102103
yield inputs_list
103104

104105
core_config = mct.core.CoreConfig(quantization_config=qc)
105106
if self.gptq:
106-
arc = mct.gptq.GradientPTQConfig(n_epochs=2, optimizer=tf.keras.optimizers.Adam(
107-
learning_rate=0.0001), optimizer_rest=tf.keras.optimizers.Adam(
108-
learning_rate=0.0001), loss=multiple_tensors_mse_loss)
107+
arc = mct.gptq.GradientPTQConfig(n_epochs=2,
108+
optimizer=tf.keras.optimizers.Adam(learning_rate=0.0001),
109+
optimizer_rest=tf.keras.optimizers.Adam(learning_rate=0.0001),
110+
loss=multiple_tensors_mse_loss,
111+
train_bias=True,
112+
hessian_weights_config=None,
113+
gradual_activation_quantization_config= GradualActivationQuantizationConfig(),
114+
regularization_factor=1)
109115

110116
ptq_model, quantization_info = mct.gptq.keras_gradient_post_training_quantization(
111117
self.model_float,

tests_pytest/pytorch_tests/unit_tests/core/back2framework/test_pytorch_model_builder_fln.py

Lines changed: 0 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -59,7 +59,6 @@ def build_qc(q_mode=ActivationQuantizationMode.QUANT):
5959
signedness=Signedness.AUTO
6060
)
6161
a_qcfg = NodeActivationQuantizationConfig(op_cfg=op_cfg)
62-
a_qcfg.set_qc(QuantizationConfig())
6362
a_qcfg.quant_mode = q_mode
6463
w_qcfg = NodeWeightsQuantizationConfig(op_cfg=op_cfg,
6564
weights_channels_axis=ChannelAxisMapping(0, 1),

0 commit comments

Comments
 (0)