Skip to content

Commit 8b2bd1f

Browse files
Xharktensorflower-gardener
authored andcommitted
Add tanh as supported activation in QAT
PiperOrigin-RevId: 377211156
1 parent f15ad8e commit 8b2bd1f

File tree

3 files changed

+3
-3
lines changed

3 files changed

+3
-3
lines changed

tensorflow_model_optimization/python/core/quantization/keras/default_8bit/default_8bit_quantize_registry.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -489,7 +489,7 @@ def get_output_quantizers(self, layer):
489489
# 'relu' should generally get fused into the previous layer.
490490
return [quantizers.MovingAverageQuantizer(
491491
num_bits=8, per_axis=False, symmetric=False, narrow_range=False)]
492-
elif layer.activation.__name__ in ['linear', 'softmax', 'sigmoid']:
492+
elif layer.activation.__name__ in ['linear', 'softmax', 'sigmoid', 'tanh']:
493493
return []
494494

495495
raise ValueError('Activation {} not supported by '

tensorflow_model_optimization/python/core/quantization/keras/default_8bit/default_8bit_quantize_registry_test.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -476,7 +476,7 @@ def testRaisesErrorUnsupportedActivation(self):
476476
)
477477

478478
with self.assertRaises(ValueError):
479-
quantize_config.get_output_quantizers(keras.layers.Activation('tanh'))
479+
quantize_config.get_output_quantizers(keras.layers.Activation('swish'))
480480

481481
with self.assertRaises(ValueError):
482482
quantize_config.get_output_quantizers(

tensorflow_model_optimization/python/core/quantization/keras/quantize_aware_activation.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -77,7 +77,7 @@ class QuantizeAwareActivation(object):
7777
# on inclusion. Verify in TFLite before enabling.
7878

7979
# These activations should be quantized prior to the activation being applied.
80-
_PRE_QUANT_ACTIVATIONS = frozenset({'softmax', 'sigmoid'})
80+
_PRE_QUANT_ACTIVATIONS = frozenset({'softmax', 'sigmoid', 'tanh'})
8181

8282
# These activations should be quantized after the activation has been applied.
8383
_POST_QUANT_ACTIVATIONS = frozenset({'linear', 'relu'})

0 commit comments

Comments
 (0)