Skip to content

Commit 4a96efe

Browse files
nutsiepullytensorflower-gardener
authored andcommitted
Update Quantizer params to match new scheme.
Quantizer params now match the defaults for the new quantization spec at https://www.tensorflow.org/lite/performance/quantization_spec Note that this does not include special case handling of per axis operations. PiperOrigin-RevId: 278751220
1 parent 062500a commit 4a96efe

File tree

2 files changed

+3
-3
lines changed

2 files changed

+3
-3
lines changed

tensorflow_model_optimization/python/core/quantization/keras/layers/conv_batchnorm.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -261,7 +261,7 @@ def __init__(
261261
if self.is_quantized:
262262
# TODO(b/142132535): update when we move to new quantization scheme.
263263
self.weight_quantizer = quantizers.LastValueQuantizer(
264-
num_bits=8, per_axis=False, symmetric=False, narrow_range=True)
264+
num_bits=8, per_axis=False, symmetric=True, narrow_range=True)
265265

266266
self.activation_quantizer = quantizers.MovingAverageQuantizer(
267267
num_bits=8, per_axis=False, symmetric=False, narrow_range=False)
@@ -444,7 +444,7 @@ def __init__(
444444
self.is_quantized = is_quantized
445445
if self.is_quantized:
446446
self.weight_quantizer = quantizers.LastValueQuantizer(
447-
num_bits=8, per_axis=False, symmetric=False, narrow_range=True)
447+
num_bits=8, per_axis=False, symmetric=True, narrow_range=True)
448448

449449
self.activation_quantizer = quantizers.MovingAverageQuantizer(
450450
num_bits=8, per_axis=False, symmetric=False, narrow_range=False)

tensorflow_model_optimization/python/core/quantization/keras/tflite/tflite_quantize_registry.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -261,7 +261,7 @@ def __init__(self, weight_attrs, activation_attrs, quantize_output):
261261
self.weight_quantizer = quantizers.LastValueQuantizer(
262262
num_bits=8, per_axis=False, symmetric=True, narrow_range=True)
263263
self.activation_quantizer = quantizers.MovingAverageQuantizer(
264-
num_bits=8, per_axis=False, symmetric=True, narrow_range=False)
264+
num_bits=8, per_axis=False, symmetric=False, narrow_range=False)
265265

266266
def get_weights_and_quantizers(self, layer):
267267
return [(getattr(layer, weight_attr), self.weight_quantizer)

0 commit comments

Comments
 (0)