21
21
22
22
# Dependency imports
23
23
24
- from tensorflow .python .eager import context
25
- from tensorflow .python .framework import ops
26
- from tensorflow .python .framework import tensor_shape
27
- from tensorflow .python .layers import base
28
- from tensorflow .python .ops import array_ops
29
- from tensorflow .python .ops import init_ops
30
- from tensorflow .python .ops import math_ops
31
- from tensorflow .python .ops import nn
24
+ import tensorflow as tf
32
25
33
26
from tensorflow_compression .python .layers import parameterizers
34
27
38
31
_default_gamma_param = parameterizers .NonnegativeParameterizer ()
39
32
40
33
41
- class GDN (base .Layer ):
34
+ class GDN (tf . keras . layers .Layer ):
42
35
"""Generalized divisive normalization layer.
43
36
44
37
Based on the papers:
@@ -69,30 +62,44 @@ class GDN(base.Layer):
69
62
the division is replaced by multiplication).
70
63
rectify: Boolean. If `True`, apply a `relu` nonlinearity to the inputs
71
64
before calculating GDN response.
72
- gamma_init: The gamma matrix will be initialized as the identity matrix
73
- multiplied with this value. If set to zero, the layer is effectively
74
- initialized to the identity operation, since beta is initialized as one.
75
- A good default setting is somewhere between 0 and 0.5.
65
+ gamma_init: Float. The gamma matrix will be initialized as the identity
66
+ matrix multiplied with this value. If set to zero, the layer is
67
+ effectively initialized to the identity operation, since beta is
68
+ initialized as one. A good default setting is somewhere between 0 and 0.5.
76
69
data_format: Format of input tensor. Currently supports `'channels_first'`
77
70
and `'channels_last'`.
78
- beta_parameterizer: Reparameterization for beta parameter. Defaults to
79
- `NonnegativeParameterizer` with a minimum value of ` 1e-6` .
80
- gamma_parameterizer: Reparameterization for gamma parameter. Defaults to
81
- `NonnegativeParameterizer` with a minimum value of `0` .
71
+ beta_parameterizer: `Parameterizer` object for beta parameter. Defaults
72
+ to `NonnegativeParameterizer` with a minimum value of 1e-6.
73
+ gamma_parameterizer: `Parameterizer` object for gamma parameter.
74
+ Defaults to `NonnegativeParameterizer` with a minimum value of 0 .
82
75
activity_regularizer: Regularizer function for the output.
83
- trainable: Boolean, if `True`, also add variables to the graph collection
84
- `GraphKeys.TRAINABLE_VARIABLES` (see `tf.Variable`).
85
- name: String, the name of the layer. Layers with the same name will
86
- share weights, but to avoid mistakes we require `reuse=True` in such
87
- cases.
76
+ trainable: Boolean. Whether the layer should be trained.
77
+ name: String. The name of the layer.
78
+ dtype: `DType` of the layer's inputs (default of `None` means use the type
79
+ of the first input).
88
80
89
- Properties :
81
+ Read-only properties :
90
82
inverse: Boolean, whether GDN is computed (`True`) or IGDN (`False`).
91
83
rectify: Boolean, whether to apply `relu` before normalization or not.
92
- data_format: Format of input tensor. Currently supports `'channels_first'`
93
- and `'channels_last'`.
84
+ gamma_init: See above.
85
+ data_format: See above.
86
+ activity_regularizer: See above.
87
+ name: See above.
88
+ dtype: See above.
94
89
beta: The beta parameter as defined above (1D `Tensor`).
95
90
gamma: The gamma parameter as defined above (2D `Tensor`).
91
+ trainable_variables: List of trainable variables.
92
+ non_trainable_variables: List of non-trainable variables.
93
+ variables: List of all variables of this layer, trainable and non-trainable.
94
+ updates: List of update ops of this layer.
95
+ losses: List of losses added by this layer.
96
+
97
+ Mutable properties:
98
+ beta_parameterizer: See above.
99
+ gamma_parameterizer: See above.
100
+ trainable: Boolean. Whether the layer should be trained.
101
+ input_spec: Optional `InputSpec` object specifying the constraints on inputs
102
+ that can be accepted by the layer.
96
103
"""
97
104
98
105
def __init__ (self ,
@@ -102,87 +109,122 @@ def __init__(self,
102
109
data_format = "channels_last" ,
103
110
beta_parameterizer = _default_beta_param ,
104
111
gamma_parameterizer = _default_gamma_param ,
105
- activity_regularizer = None ,
106
- trainable = True ,
107
- name = None ,
108
112
** kwargs ):
109
- super (GDN , self ).__init__ (trainable = trainable , name = name ,
110
- activity_regularizer = activity_regularizer ,
111
- ** kwargs )
112
- self .inverse = bool (inverse )
113
- self .rectify = bool (rectify )
113
+ super (GDN , self ).__init__ (** kwargs )
114
+ self ._inverse = bool (inverse )
115
+ self ._rectify = bool (rectify )
114
116
self ._gamma_init = float (gamma_init )
115
- self .data_format = data_format
117
+ self ._data_format = str ( data_format )
116
118
self ._beta_parameterizer = beta_parameterizer
117
119
self ._gamma_parameterizer = gamma_parameterizer
118
- self ._channel_axis () # trigger ValueError early
119
- self .input_spec = base .InputSpec (min_ndim = 2 )
120
+
121
+ if self .data_format not in ("channels_first" , "channels_last" ):
122
+ raise ValueError ("Unknown data format: '{}'." .format (self .data_format ))
123
+
124
+ self .input_spec = tf .keras .layers .InputSpec (min_ndim = 2 )
125
+
126
+ @property
127
+ def inverse (self ):
128
+ return self ._inverse
129
+
130
+ @property
131
+ def rectify (self ):
132
+ return self ._rectify
133
+
134
+ @property
135
+ def gamma_init (self ):
136
+ return self ._gamma_init
137
+
138
+ @property
139
+ def data_format (self ):
140
+ return self ._data_format
141
+
142
+ @property
143
+ def beta_parameterizer (self ):
144
+ return self ._beta_parameterizer
145
+
146
+ @beta_parameterizer .setter
147
+ def beta_parameterizer (self , val ):
148
+ if self .built :
149
+ raise RuntimeError (
150
+ "Can't set `beta_parameterizer` once layer has been built." )
151
+ self ._beta_parameterizer = val
152
+
153
+ @property
154
+ def gamma_parameterizer (self ):
155
+ return self ._gamma_parameterizer
156
+
157
+ @gamma_parameterizer .setter
158
+ def gamma_parameterizer (self , val ):
159
+ if self .built :
160
+ raise RuntimeError (
161
+ "Can't set `gamma_parameterizer` once layer has been built." )
162
+ self ._gamma_parameterizer = val
120
163
121
164
def _channel_axis (self ):
122
- try :
123
- return {"channels_first" : 1 , "channels_last" : - 1 }[self .data_format ]
124
- except KeyError :
125
- raise ValueError ("Unsupported `data_format` for GDN layer: {}." .format (
126
- self .data_format ))
165
+ return {"channels_first" : 1 , "channels_last" : - 1 }[self .data_format ]
127
166
128
167
def build (self , input_shape ):
129
168
channel_axis = self ._channel_axis ()
130
- input_shape = tensor_shape .TensorShape (input_shape )
169
+ input_shape = tf .TensorShape (input_shape )
131
170
num_channels = input_shape [channel_axis ].value
132
171
if num_channels is None :
133
172
raise ValueError ("The channel dimension of the inputs to `GDN` "
134
173
"must be defined." )
135
174
self ._input_rank = input_shape .ndims
136
- self .input_spec = base . InputSpec (ndim = input_shape . ndims ,
137
- axes = {channel_axis : num_channels })
175
+ self .input_spec = tf . keras . layers . InputSpec (
176
+ ndim = input_shape . ndims , axes = {channel_axis : num_channels })
138
177
139
- self .beta = self ._beta_parameterizer (
178
+ # Sorry, lint, but these objects really are callable ...
179
+ # pylint:disable=not-callable
180
+ self .beta = self .beta_parameterizer (
140
181
name = "beta" , shape = [num_channels ], dtype = self .dtype ,
141
- getter = self .add_variable , initializer = init_ops . Ones ())
182
+ getter = self .add_variable , initializer = tf . initializers . ones ())
142
183
143
- self .gamma = self ._gamma_parameterizer (
184
+ self .gamma = self .gamma_parameterizer (
144
185
name = "gamma" , shape = [num_channels , num_channels ], dtype = self .dtype ,
145
186
getter = self .add_variable ,
146
- initializer = init_ops .Identity (gain = self ._gamma_init ))
187
+ initializer = tf .initializers .identity (gain = self ._gamma_init ))
188
+ # pylint:enable=not-callable
147
189
148
190
self .built = True
149
191
150
192
def call (self , inputs ):
151
- inputs = ops .convert_to_tensor (inputs , dtype = self .dtype )
193
+ inputs = tf .convert_to_tensor (inputs , dtype = self .dtype )
152
194
ndim = self ._input_rank
153
195
154
196
if self .rectify :
155
- inputs = nn .relu (inputs )
197
+ inputs = tf . nn .relu (inputs )
156
198
157
199
# Compute normalization pool.
158
200
if ndim == 2 :
159
- norm_pool = math_ops . matmul (math_ops .square (inputs ), self .gamma )
160
- norm_pool = nn .bias_add (norm_pool , self .beta )
201
+ norm_pool = tf . linalg . matmul (tf . math .square (inputs ), self .gamma )
202
+ norm_pool = tf . nn .bias_add (norm_pool , self .beta )
161
203
elif self .data_format == "channels_last" and ndim <= 5 :
162
204
shape = self .gamma .shape .as_list ()
163
- gamma = array_ops .reshape (self .gamma , (ndim - 2 ) * [1 ] + shape )
164
- norm_pool = nn .convolution (math_ops .square (inputs ), gamma , "VALID" )
165
- norm_pool = nn .bias_add (norm_pool , self .beta )
205
+ gamma = tf .reshape (self .gamma , (ndim - 2 ) * [1 ] + shape )
206
+ norm_pool = tf . nn .convolution (tf . math .square (inputs ), gamma , "VALID" )
207
+ norm_pool = tf . nn .bias_add (norm_pool , self .beta )
166
208
else : # generic implementation
167
209
# This puts channels in the last dimension regardless of input.
168
- norm_pool = math_ops .tensordot (
169
- math_ops .square (inputs ), self .gamma , [[self ._channel_axis ()], [0 ]])
210
+ norm_pool = tf . linalg .tensordot (
211
+ tf . math .square (inputs ), self .gamma , [[self ._channel_axis ()], [0 ]])
170
212
norm_pool += self .beta
171
213
if self .data_format == "channels_first" :
172
214
# Return to channels_first format if necessary.
173
215
axes = list (range (ndim - 1 ))
174
216
axes .insert (1 , ndim - 1 )
175
- norm_pool = array_ops .transpose (norm_pool , axes )
217
+ norm_pool = tf .transpose (norm_pool , axes )
176
218
177
219
if self .inverse :
178
- norm_pool = math_ops .sqrt (norm_pool )
220
+ norm_pool = tf . math .sqrt (norm_pool )
179
221
else :
180
- norm_pool = math_ops .rsqrt (norm_pool )
222
+ norm_pool = tf . math .rsqrt (norm_pool )
181
223
outputs = inputs * norm_pool
182
224
183
- if not context .executing_eagerly ():
225
+ if not tf .executing_eagerly ():
184
226
outputs .set_shape (self .compute_output_shape (inputs .shape ))
185
227
return outputs
186
228
187
229
def compute_output_shape (self , input_shape ):
188
- return tensor_shape .TensorShape (input_shape )
230
+ return tf .TensorShape (input_shape )
0 commit comments