Skip to content

Commit 4f2545a

Browse files
nutsiepullytensorflower-gardener
authored andcommitted
Removing activation quantize code.
Code to apply transformations will now go into a separate interface which will be added in future CLs. Wrapping using QuantizeAwareActivation will be done in the QuantizeWrapper. Removing this code, before adding the features back in in the correct place. PiperOrigin-RevId: 263803284
1 parent fb389ff commit 4f2545a

File tree

2 files changed

+0
-86
lines changed

2 files changed

+0
-86
lines changed

tensorflow_model_optimization/python/core/quantization/keras/quantize_emulate.py

Lines changed: 0 additions & 43 deletions
Original file line numberDiff line numberDiff line change
@@ -168,24 +168,6 @@ def _clone_model_with_weights(model_to_clone):
168168

169169
return cloned_model
170170

171-
def _quantize_activation(activation, parent_class, quantize_params):
172-
try:
173-
return quantize_aware_activation.QuantizeAwareActivation(
174-
activation.__name__, parent_class, **quantize_params)
175-
except TypeError:
176-
# Non-standard activation. Could be a custom callable, or an advanced
177-
# activation. Simply return the original activation for now.
178-
# TODO(pulkitb): Determine how to handle custom activations and advanced
179-
# activations.
180-
return activation
181-
182-
def _get_quantize_activation_params(layer):
183-
quant_params = layer.get_quantize_params()
184-
# narrow_range is not relevant to quantizing activations.
185-
quant_params.pop('narrow_range')
186-
187-
return quant_params
188-
189171
def _apply_quantization(quant_annotate_layer):
190172
return QuantizeEmulateWrapper(
191173
quant_annotate_layer.layer,
@@ -195,32 +177,7 @@ def _apply_quantization(quant_annotate_layer):
195177
# model without modifying the weights of the original model.
196178
model_copy = _clone_model_with_weights(model)
197179

198-
# Apply all graph level transformations.
199-
replace_map = {}
200-
201-
# Replace activations in layers with QuantAwareActivation.
202-
# Dense(activation='relu') -> Dense(activation=QuantAwareActivation('relu'))
203-
# TODO(pulkitb): Not all layers (LSTMs) have just activation. Add
204-
# generic handling for all layers.
205-
for layer in model_copy.layers:
206-
if isinstance(layer, quant_annotate.QuantizeAnnotate) and \
207-
(layer.layer.activation is not None and
208-
layer.layer.activation != keras.activations.linear):
209-
quantized_layer = _apply_quantization(layer)
210-
211-
quantized_layer.layer.activation = _quantize_activation(
212-
layer.layer.activation, layer.layer.__class__,
213-
_get_quantize_activation_params(layer))
214-
215-
replace_map[layer] = quantized_layer
216-
217-
# TODO(pulkitb): Transform [Dense(), ReLU()] to be quant aware.
218-
219180
def _add_quant_emulate_wrapper(layer): # pylint: disable=missing-docstring
220-
# Quantized layer has been constructed during graph transformation. Return.
221-
if layer in replace_map:
222-
return replace_map[layer]
223-
224181
if not isinstance(layer, quant_annotate.QuantizeAnnotate):
225182
return layer
226183

tensorflow_model_optimization/python/core/quantization/keras/quantize_emulate_test.py

Lines changed: 0 additions & 43 deletions
Original file line numberDiff line numberDiff line change
@@ -32,7 +32,6 @@
3232
quantize_annotate = quantize_emulate.quantize_annotate
3333
QuantizeEmulate = quantize_emulate.QuantizeEmulate
3434
QuantizeEmulateWrapper = quantize_emulate_wrapper.QuantizeEmulateWrapper
35-
QuantizeAwareActivation = quantize_aware_activation.QuantizeAwareActivation
3635

3736

3837
class QuantizeEmulateTest(test.TestCase):
@@ -270,48 +269,6 @@ def testAppliesQuantizationToAnnotatedModel_Functional(self):
270269

271270
self._assert_model_emulated(model, quantized_model)
272271

273-
# Transformation Tests
274-
275-
def testQuantizesActivationsWithinLayer_Sequential(self):
276-
quant_params = {'num_bits': 8, 'symmetric': True}
277-
model = keras.Sequential([
278-
quantize_annotate(
279-
keras.layers.Conv2D(32, 5, activation='relu'),
280-
input_shape=(28, 28, 1))
281-
])
282-
283-
quantized_model = quantize_emulate.quantize_apply(model)
284-
285-
# We expect activation to be modified.
286-
self._assert_model_emulated(model, quantized_model, ['activation'])
287-
288-
conv_layer = quantized_model.layers[0].layer
289-
self.assertIsInstance(conv_layer.activation, QuantizeAwareActivation)
290-
self.assertEqual(
291-
keras.activations.get('relu'), conv_layer.activation.activation)
292-
self.assertEqual(keras.layers.Conv2D, conv_layer.activation.parent_layer)
293-
self.assertEqual(quant_params, conv_layer.activation.get_quantize_params())
294-
295-
def testQuantizesActivationsWithinLayer_Functional(self):
296-
quant_params = {'num_bits': 8, 'symmetric': True}
297-
298-
inputs = keras.Input(shape=(28, 28, 1))
299-
results = quantize_annotate(
300-
keras.layers.Conv2D(32, 5, activation='relu'))(inputs)
301-
model = keras.Model(inputs=inputs, outputs=results)
302-
303-
quantized_model = quantize_emulate.quantize_apply(model)
304-
305-
# We expect activation to be modified.
306-
self._assert_model_emulated(model, quantized_model, ['activation'])
307-
308-
conv_layer = quantized_model.layers[1].layer
309-
self.assertIsInstance(conv_layer.activation, QuantizeAwareActivation)
310-
self.assertEqual(
311-
keras.activations.get('relu'), conv_layer.activation.activation)
312-
self.assertEqual(keras.layers.Conv2D, conv_layer.activation.parent_layer)
313-
self.assertEqual(quant_params, conv_layer.activation.get_quantize_params())
314-
315272

316273
if __name__ == '__main__':
317274
test.main()

0 commit comments

Comments
 (0)