19
19
from __future__ import print_function
20
20
21
21
import tensorflow as tf
22
-
23
- from tensorflow .python .framework import dtypes
24
- from tensorflow .python .keras import activations
25
- from tensorflow .python .keras import backend as K
26
- from tensorflow .python .keras import initializers
27
- from tensorflow .python .keras .layers import convolutional
28
- from tensorflow .python .keras .layers import serialization
29
22
from tensorflow .python .keras .utils import conv_utils
30
- from tensorflow .python .ops import array_ops
31
23
from tensorflow .python .ops import math_ops
32
- from tensorflow .python .ops import nn
33
- from tensorflow .python .ops import nn_ops
34
24
from tensorflow_model_optimization .python .core .keras import utils
35
25
from tensorflow_model_optimization .python .core .quantization .keras import quantizers
36
26
from tensorflow_model_optimization .python .core .quantization .keras .default_8bit import default_8bit_quantizers
@@ -51,19 +41,19 @@ def _build_for_quantization(self):
51
41
52
42
self .optimizer_step = self .add_weight (
53
43
'optimizer_step' ,
54
- initializer = initializers .Constant (- 1 ),
55
- dtype = dtypes .int32 ,
44
+ initializer = tf . compat . v1 . keras . initializers .constant (- 1 ),
45
+ dtype = tf .int32 ,
56
46
trainable = False )
57
47
58
48
# TODO(alanchiao): re-explore if we can handle this with
59
49
# QuantizeAwareActivation.
60
50
self ._activation_min_var = self .add_variable ( # pylint: disable=protected-access
61
51
'activation_min' ,
62
- initializer = initializers .Constant (- 6.0 ),
52
+ initializer = tf . compat . v1 . keras . initializers .constant (- 6.0 ),
63
53
trainable = False )
64
54
self ._activation_max_var = self .add_variable ( # pylint: disable=protected-access
65
55
'activation_max' ,
66
- initializer = initializers .Constant (6.0 ),
56
+ initializer = tf . compat . v1 . keras . initializers .constant (6.0 ),
67
57
trainable = False )
68
58
69
59
def _apply_weight_quantizer (self , training , folded_conv_kernel ):
@@ -112,10 +102,10 @@ def _from_config(cls_initializer, config):
112
102
config .pop ('use_bias' )
113
103
is_advanced_activation = 'class_name' in config ['post_activation' ]
114
104
if is_advanced_activation :
115
- config ['post_activation' ] = serialization .deserialize (
105
+ config ['post_activation' ] = tf . keras . layers .deserialize (
116
106
config ['post_activation' ])
117
107
else :
118
- config ['post_activation' ] = activations .deserialize (
108
+ config ['post_activation' ] = tf . keras . activations .deserialize (
119
109
config ['post_activation' ])
120
110
121
111
return cls_initializer (** config )
@@ -138,7 +128,8 @@ def _get_config(self, conv_config):
138
128
serialized_activation = keras .utils .serialize_keras_object (
139
129
self .post_activation )
140
130
else :
141
- serialized_activation = activations .serialize (self .post_activation )
131
+ serialized_activation = tf .keras .activations .serialize (
132
+ self .post_activation )
142
133
config = {
143
134
'is_quantized' : self .is_quantized ,
144
135
'post_activation' : serialized_activation
@@ -149,7 +140,7 @@ def _get_config(self, conv_config):
149
140
list (config .items ()))
150
141
151
142
152
- class _ConvBatchNorm2D (_ConvBatchNormMixin , convolutional . Conv2D ):
143
+ class _ConvBatchNorm2D (_ConvBatchNormMixin , tf . keras . layers . Convolution2D ):
153
144
"""Layer for emulating the folding of batch normalization into Conv during serving.
154
145
155
146
Implements the emulation, as described in https://arxiv.org/abs/1712.05877.
@@ -255,7 +246,7 @@ def __init__(
255
246
)
256
247
257
248
# Named as post_activation to not conflict with Layer self.activation.
258
- self .post_activation = activations .get (post_activation )
249
+ self .post_activation = tf . keras . activations .get (post_activation )
259
250
260
251
self .is_quantized = is_quantized
261
252
if self .is_quantized :
@@ -276,20 +267,20 @@ def build(self, input_shape):
276
267
277
268
def call (self , inputs , training = None ):
278
269
if training is None :
279
- training = K .learning_phase ()
270
+ training = tf . keras . backend .learning_phase ()
280
271
281
272
conv_out = super (_ConvBatchNorm2D , self ).call (inputs )
282
273
283
274
# Not all the computations in the batchnorm need to happen,
284
275
# but this avoids duplicating code (e.g. moving_average).
285
276
self .batchnorm .call (conv_out )
286
277
287
- folded_conv_kernel_multiplier = self .batchnorm .gamma * math_ops .rsqrt (
278
+ folded_conv_kernel_multiplier = self .batchnorm .gamma * tf . math .rsqrt (
288
279
self .batchnorm .moving_variance + self .batchnorm .epsilon )
289
280
folded_conv_kernel = math_ops .mul (
290
281
folded_conv_kernel_multiplier , self .kernel , name = 'folded_conv_kernel' )
291
282
292
- folded_conv_bias = math_ops .subtract (
283
+ folded_conv_bias = tf . math .subtract (
293
284
self .batchnorm .beta ,
294
285
self .batchnorm .moving_mean * folded_conv_kernel_multiplier ,
295
286
name = 'folded_conv_bias' )
@@ -313,7 +304,7 @@ def call(self, inputs, training=None):
313
304
if not isinstance (op_padding , (list , tuple )):
314
305
op_padding = op_padding .upper ()
315
306
316
- folded_conv_out = nn_ops .conv2d (
307
+ folded_conv_out = tf . compat . v1 . nn .conv2d (
317
308
inputs ,
318
309
folded_conv_kernel ,
319
310
strides = self .strides ,
@@ -328,13 +319,13 @@ def call(self, inputs, training=None):
328
319
if self .data_format == 'channels_first' :
329
320
if self .rank == 1 :
330
321
# nn.bias_add does not accept a 1D input tensor.
331
- bias = array_ops .reshape (folded_conv_bias , (1 , self .filters , 1 ))
322
+ bias = tf .reshape (folded_conv_bias , (1 , self .filters , 1 ))
332
323
folded_conv_out += bias
333
324
else :
334
- outputs = nn .bias_add (
325
+ outputs = tf . nn .bias_add (
335
326
folded_conv_out , folded_conv_bias , data_format = 'NCHW' )
336
327
else :
337
- outputs = nn .bias_add (
328
+ outputs = tf . nn .bias_add (
338
329
folded_conv_out , folded_conv_bias , data_format = 'NHWC' )
339
330
340
331
if self .post_activation is not None :
@@ -353,7 +344,7 @@ def from_config(cls, config):
353
344
354
345
355
346
class _DepthwiseConvBatchNorm2D (_ConvBatchNormMixin ,
356
- convolutional .DepthwiseConv2D ):
347
+ tf . keras . layers .DepthwiseConv2D ):
357
348
"""Layer for emulating the folding of batch normalization into DepthwiseConv during serving.
358
349
359
350
See ConvBatchNorm2D for detailed comments.
@@ -439,7 +430,7 @@ def __init__(
439
430
virtual_batch_size = virtual_batch_size ,
440
431
adjustment = adjustment ,
441
432
)
442
- self .post_activation = activations .get (post_activation )
433
+ self .post_activation = tf . keras . activations .get (post_activation )
443
434
444
435
self .is_quantized = is_quantized
445
436
if self .is_quantized :
@@ -460,16 +451,16 @@ def build(self, input_shape):
460
451
461
452
def call (self , inputs , training = None ):
462
453
if training is None :
463
- training = K .learning_phase ()
454
+ training = tf . keras . backend .learning_phase ()
464
455
465
456
conv_out = super (_DepthwiseConvBatchNorm2D , self ).call (inputs )
466
457
467
458
self .batchnorm .call (conv_out )
468
459
469
- folded_conv_kernel_multiplier = self .batchnorm .gamma * math_ops .rsqrt (
460
+ folded_conv_kernel_multiplier = self .batchnorm .gamma * tf . math .rsqrt (
470
461
self .batchnorm .moving_variance + self .batchnorm .epsilon )
471
462
472
- folded_conv_bias = math_ops .subtract (
463
+ folded_conv_bias = tf . math .subtract (
473
464
self .batchnorm .beta ,
474
465
self .batchnorm .moving_mean * folded_conv_kernel_multiplier ,
475
466
name = 'folded_conv_bias' )
@@ -478,8 +469,8 @@ def call(self, inputs, training=None):
478
469
self .depthwise_kernel .get_shape ().as_list ()[2 ],
479
470
self .depthwise_kernel .get_shape ().as_list ()[3 ]
480
471
]
481
- folded_conv_kernel_multiplier = array_ops .reshape (
482
- folded_conv_kernel_multiplier , depthwise_weights_shape )
472
+ folded_conv_kernel_multiplier = tf .reshape (folded_conv_kernel_multiplier ,
473
+ depthwise_weights_shape )
483
474
484
475
folded_conv_kernel = math_ops .mul (
485
476
folded_conv_kernel_multiplier ,
@@ -495,7 +486,7 @@ def call(self, inputs, training=None):
495
486
# backend.conv2d is.
496
487
#
497
488
# From DepthwiseConv2D layer call() function.
498
- folded_conv_out = K .depthwise_conv2d (
489
+ folded_conv_out = tf . keras . backend .depthwise_conv2d (
499
490
inputs ,
500
491
folded_conv_kernel ,
501
492
strides = self .strides ,
@@ -504,7 +495,7 @@ def call(self, inputs, training=None):
504
495
data_format = self .data_format ,
505
496
)
506
497
507
- outputs = K .bias_add (
498
+ outputs = tf . keras . backend .bias_add (
508
499
folded_conv_out , folded_conv_bias , data_format = self .data_format )
509
500
510
501
if self .post_activation is not None :
0 commit comments