2424from tensorflow_model_optimization .python .core .quantization .keras import quantize_aware_activation
2525from tensorflow_model_optimization .python .core .quantization .keras import quantize_layer
2626from tensorflow_model_optimization .python .core .quantization .keras import quantizers
27+ from tensorflow_model_optimization .python .core .quantization .keras import utils as quantize_utils
2728from tensorflow_model_optimization .python .core .quantization .keras .default_8bit import default_8bit_quantize_configs
2829from tensorflow_model_optimization .python .core .quantization .keras .default_8bit import default_8bit_quantize_registry
2930from tensorflow_model_optimization .python .core .quantization .keras .graph_transformations import transforms
@@ -67,13 +68,17 @@ def _get_params(conv_layer, bn_layer, relu_layer=None):
6768 list (conv_layer ['config' ].items ()) + list (bn_layer ['config' ].items ()))
6869
6970 if relu_layer is not None :
70- params ['post_activation' ] = keras .layers .deserialize (relu_layer )
71+ params ['post_activation' ] = quantize_utils .deserialize_layer (
72+ relu_layer , use_legacy_format = True
73+ )
7174
7275 return params
7376
7477
7578def _get_layer_node (fused_layer , weights ):
76- layer_config = keras .layers .serialize (fused_layer )
79+ layer_config = quantize_utils .serialize_layer (
80+ fused_layer , use_legacy_format = True
81+ )
7782 layer_config ['name' ] = layer_config ['config' ]['name' ]
7883 # This config tracks which layers get quantized, and whether they have a
7984 # custom QuantizeConfig.
@@ -118,7 +123,10 @@ def _replace(self, bn_layer_node, conv_layer_node):
118123 return bn_layer_node
119124
120125 conv_layer_node .layer ['config' ]['activation' ] = (
121- keras .activations .serialize (quantize_aware_activation .NoOpActivation ()))
126+ quantize_utils .serialize_activation (
127+ quantize_aware_activation .NoOpActivation (), use_legacy_format = True
128+ )
129+ )
122130 bn_layer_node .metadata ['quantize_config' ] = (
123131 default_8bit_quantize_configs .Default8BitOutputQuantizeConfig ())
124132
@@ -180,7 +188,10 @@ def _replace(self, relu_layer_node, bn_layer_node, conv_layer_node):
180188 return relu_layer_node
181189
182190 conv_layer_node .layer ['config' ]['activation' ] = (
183- keras .activations .serialize (quantize_aware_activation .NoOpActivation ()))
191+ quantize_utils .serialize_activation (
192+ quantize_aware_activation .NoOpActivation (), use_legacy_format = True
193+ )
194+ )
184195 bn_layer_node .metadata ['quantize_config' ] = (
185196 default_8bit_quantize_configs .NoOpQuantizeConfig ())
186197
@@ -261,7 +272,10 @@ def _replace(self, bn_layer_node, dense_layer_node):
261272 return bn_layer_node
262273
263274 dense_layer_node .layer ['config' ]['activation' ] = (
264- keras .activations .serialize (quantize_aware_activation .NoOpActivation ()))
275+ quantize_utils .serialize_activation (
276+ quantize_aware_activation .NoOpActivation (), use_legacy_format = True
277+ )
278+ )
265279 bn_layer_node .metadata ['quantize_config' ] = (
266280 default_8bit_quantize_configs .Default8BitOutputQuantizeConfig ())
267281
@@ -297,7 +311,10 @@ def _replace(self, relu_layer_node, bn_layer_node, dense_layer_node):
297311 return relu_layer_node
298312
299313 dense_layer_node .layer ['config' ]['activation' ] = (
300- keras .activations .serialize (quantize_aware_activation .NoOpActivation ()))
314+ quantize_utils .serialize_activation (
315+ quantize_aware_activation .NoOpActivation (), use_legacy_format = True
316+ )
317+ )
301318 bn_layer_node .metadata ['quantize_config' ] = (
302319 default_8bit_quantize_configs .NoOpQuantizeConfig ())
303320
@@ -408,7 +425,9 @@ def replacement(self, match_layer):
408425 else :
409426 spatial_dim = 2
410427
411- sepconv2d_layer_config = keras .layers .serialize (sepconv2d_layer )
428+ sepconv2d_layer_config = quantize_utils .serialize_layer (
429+ sepconv2d_layer , use_legacy_format = True
430+ )
412431 sepconv2d_layer_config ['name' ] = sepconv2d_layer .name
413432
414433 # Needed to ensure these new layers are considered for quantization.
@@ -420,15 +439,19 @@ def replacement(self, match_layer):
420439 expand_layer = tf .keras .layers .Lambda (
421440 lambda x : tf .expand_dims (x , spatial_dim ),
422441 name = self ._get_name ('sepconv1d_expand' ))
423- expand_layer_config = keras .layers .serialize (expand_layer )
442+ expand_layer_config = quantize_utils .serialize_layer (
443+ expand_layer , use_legacy_format = True
444+ )
424445 expand_layer_config ['name' ] = expand_layer .name
425446 expand_layer_metadata = {
426447 'quantize_config' : default_8bit_quantize_configs .NoOpQuantizeConfig ()}
427448
428449 squeeze_layer = tf .keras .layers .Lambda (
429450 lambda x : tf .squeeze (x , [spatial_dim ]),
430451 name = self ._get_name ('sepconv1d_squeeze' ))
431- squeeze_layer_config = keras .layers .serialize (squeeze_layer )
452+ squeeze_layer_config = quantize_utils .serialize_layer (
453+ squeeze_layer , use_legacy_format = True
454+ )
432455 squeeze_layer_config ['name' ] = squeeze_layer .name
433456 squeeze_layer_metadata = {
434457 'quantize_config' : default_8bit_quantize_configs .NoOpQuantizeConfig ()}
@@ -493,7 +516,9 @@ def replacement(self, match_layer):
493516 )
494517 dconv_weights = collections .OrderedDict ()
495518 dconv_weights ['depthwise_kernel:0' ] = sepconv_weights [0 ]
496- dconv_layer_config = keras .layers .serialize (dconv_layer )
519+ dconv_layer_config = quantize_utils .serialize_layer (
520+ dconv_layer , use_legacy_format = True
521+ )
497522 dconv_layer_config ['name' ] = dconv_layer .name
498523 # Needed to ensure these new layers are considered for quantization.
499524 dconv_metadata = {'quantize_config' : None }
@@ -521,7 +546,9 @@ def replacement(self, match_layer):
521546 conv_weights ['kernel:0' ] = sepconv_weights [1 ]
522547 if sepconv_layer ['config' ]['use_bias' ]:
523548 conv_weights ['bias:0' ] = sepconv_weights [2 ]
524- conv_layer_config = keras .layers .serialize (conv_layer )
549+ conv_layer_config = quantize_utils .serialize_layer (
550+ conv_layer , use_legacy_format = True
551+ )
525552 conv_layer_config ['name' ] = conv_layer .name
526553 # Needed to ensure these new layers are considered for quantization.
527554 conv_metadata = {'quantize_config' : None }
@@ -588,7 +615,9 @@ def replacement(self, match_layer):
588615 quant_layer = quantize_layer .QuantizeLayer (
589616 quantizers .AllValuesQuantizer (
590617 num_bits = 8 , per_axis = False , symmetric = False , narrow_range = False ))
591- layer_config = keras .layers .serialize (quant_layer )
618+ layer_config = quantize_utils .serialize_layer (
619+ quant_layer , use_legacy_format = True
620+ )
592621 layer_config ['name' ] = quant_layer .name
593622
594623 quant_layer_node = LayerNode (
0 commit comments