22
22
# Dependency imports
23
23
24
24
import numpy as np
25
+ import tensorflow as tf
25
26
26
- from tensorflow .python .framework import dtypes
27
- from tensorflow .python .ops import array_ops
28
- from tensorflow .python .ops import math_ops
29
- from tensorflow .python .ops import variables
30
27
from tensorflow .python .platform import test
31
- from tensorflow .python .training import gradient_descent
32
28
33
- from tensorflow_compression . python . layers import entropy_models
29
+ import tensorflow_compression as tfc
34
30
35
31
36
32
class EntropyBottleneckTest (test .TestCase ):
37
33
38
34
def test_noise (self ):
39
35
# Tests that the noise added is uniform noise between -0.5 and 0.5.
40
- inputs = array_ops .placeholder (dtypes .float32 , (None , 1 ))
41
- layer = entropy_models .EntropyBottleneck ()
36
+ inputs = tf .placeholder (tf .float32 , (None , 1 ))
37
+ layer = tfc .EntropyBottleneck ()
42
38
noisy , _ = layer (inputs , training = True )
43
39
with self .test_session () as sess :
44
- sess .run (variables .global_variables_initializer ())
40
+ sess .run (tf .global_variables_initializer ())
45
41
values = np .linspace (- 50 , 50 , 100 )[:, None ]
46
42
noisy , = sess .run ([noisy ], {inputs : values })
47
43
self .assertFalse (np .allclose (values , noisy , rtol = 0 , atol = .49 ))
@@ -50,14 +46,14 @@ def test_noise(self):
50
46
def test_quantization (self ):
51
47
# Tests that inputs are quantized to full integer values, even after
52
48
# quantiles have been updated.
53
- inputs = array_ops .placeholder (dtypes .float32 , (None , 1 ))
54
- layer = entropy_models .EntropyBottleneck (optimize_integer_offset = False )
49
+ inputs = tf .placeholder (tf .float32 , (None , 1 ))
50
+ layer = tfc .EntropyBottleneck (optimize_integer_offset = False )
55
51
quantized , _ = layer (inputs , training = False )
56
- opt = gradient_descent .GradientDescentOptimizer (learning_rate = 1 )
52
+ opt = tf . train .GradientDescentOptimizer (learning_rate = 1 )
57
53
self .assertTrue (len (layer .losses ) == 1 )
58
54
step = opt .minimize (layer .losses [0 ])
59
55
with self .test_session () as sess :
60
- sess .run (variables .global_variables_initializer ())
56
+ sess .run (tf .global_variables_initializer ())
61
57
sess .run (step )
62
58
values = np .linspace (- 50 , 50 , 100 )[:, None ]
63
59
quantized , = sess .run ([quantized ], {inputs : values })
@@ -67,14 +63,14 @@ def test_quantization_optimized_offset(self):
67
63
# Tests that inputs are not quantized to full integer values after quantiles
68
64
# have been updated. However, the difference between input and output should
69
65
# be between -0.5 and 0.5, and the offset must be consistent.
70
- inputs = array_ops .placeholder (dtypes .float32 , (None , 1 ))
71
- layer = entropy_models .EntropyBottleneck (optimize_integer_offset = True )
66
+ inputs = tf .placeholder (tf .float32 , (None , 1 ))
67
+ layer = tfc .EntropyBottleneck (optimize_integer_offset = True )
72
68
quantized , _ = layer (inputs , training = False )
73
- opt = gradient_descent .GradientDescentOptimizer (learning_rate = 1 )
69
+ opt = tf . train .GradientDescentOptimizer (learning_rate = 1 )
74
70
self .assertTrue (len (layer .losses ) == 1 )
75
71
step = opt .minimize (layer .losses [0 ])
76
72
with self .test_session () as sess :
77
- sess .run (variables .global_variables_initializer ())
73
+ sess .run (tf .global_variables_initializer ())
78
74
sess .run (step )
79
75
values = np .linspace (- 50 , 50 , 100 )[:, None ]
80
76
quantized , = sess .run ([quantized ], {inputs : values })
@@ -86,17 +82,17 @@ def test_quantization_optimized_offset(self):
86
82
def test_codec (self ):
87
83
# Tests that inputs are compressed and decompressed correctly, and quantized
88
84
# to full integer values, even after quantiles have been updated.
89
- inputs = array_ops .placeholder (dtypes .float32 , (1 , None , 1 ))
90
- layer = entropy_models .EntropyBottleneck (
85
+ inputs = tf .placeholder (tf .float32 , (1 , None , 1 ))
86
+ layer = tfc .EntropyBottleneck (
91
87
data_format = "channels_last" , init_scale = 60 ,
92
88
optimize_integer_offset = False )
93
89
bitstrings = layer .compress (inputs )
94
- decoded = layer .decompress (bitstrings , array_ops .shape (inputs )[1 :])
95
- opt = gradient_descent .GradientDescentOptimizer (learning_rate = 1 )
90
+ decoded = layer .decompress (bitstrings , tf .shape (inputs )[1 :])
91
+ opt = tf . train .GradientDescentOptimizer (learning_rate = 1 )
96
92
self .assertTrue (len (layer .losses ) == 1 )
97
93
step = opt .minimize (layer .losses [0 ])
98
94
with self .test_session () as sess :
99
- sess .run (variables .global_variables_initializer ())
95
+ sess .run (tf .global_variables_initializer ())
100
96
sess .run (step )
101
97
self .assertTrue (len (layer .updates ) == 1 )
102
98
sess .run (layer .updates [0 ])
@@ -109,17 +105,17 @@ def test_codec_optimized_offset(self):
109
105
# quantized to full integer values after quantiles have been updated.
110
106
# However, the difference between input and output should be between -0.5
111
107
# and 0.5, and the offset must be consistent.
112
- inputs = array_ops .placeholder (dtypes .float32 , (1 , None , 1 ))
113
- layer = entropy_models .EntropyBottleneck (
108
+ inputs = tf .placeholder (tf .float32 , (1 , None , 1 ))
109
+ layer = tfc .EntropyBottleneck (
114
110
data_format = "channels_last" , init_scale = 60 ,
115
111
optimize_integer_offset = True )
116
112
bitstrings = layer .compress (inputs )
117
- decoded = layer .decompress (bitstrings , array_ops .shape (inputs )[1 :])
118
- opt = gradient_descent .GradientDescentOptimizer (learning_rate = 1 )
113
+ decoded = layer .decompress (bitstrings , tf .shape (inputs )[1 :])
114
+ opt = tf . train .GradientDescentOptimizer (learning_rate = 1 )
119
115
self .assertTrue (len (layer .losses ) == 1 )
120
116
step = opt .minimize (layer .losses [0 ])
121
117
with self .test_session () as sess :
122
- sess .run (variables .global_variables_initializer ())
118
+ sess .run (tf .global_variables_initializer ())
123
119
sess .run (step )
124
120
self .assertTrue (len (layer .updates ) == 1 )
125
121
sess .run (layer .updates [0 ])
@@ -133,13 +129,13 @@ def test_codec_optimized_offset(self):
133
129
def test_codec_clipping (self ):
134
130
# Tests that inputs are compressed and decompressed correctly, and clipped
135
131
# to the expected range.
136
- inputs = array_ops .placeholder (dtypes .float32 , (1 , None , 1 ))
137
- layer = entropy_models .EntropyBottleneck (
132
+ inputs = tf .placeholder (tf .float32 , (1 , None , 1 ))
133
+ layer = tfc .EntropyBottleneck (
138
134
data_format = "channels_last" , init_scale = 40 )
139
135
bitstrings = layer .compress (inputs )
140
- decoded = layer .decompress (bitstrings , array_ops .shape (inputs )[1 :])
136
+ decoded = layer .decompress (bitstrings , tf .shape (inputs )[1 :])
141
137
with self .test_session () as sess :
142
- sess .run (variables .global_variables_initializer ())
138
+ sess .run (tf .global_variables_initializer ())
143
139
self .assertTrue (len (layer .updates ) == 1 )
144
140
sess .run (layer .updates [0 ])
145
141
values = np .linspace (- 50 , 50 , 100 )[None , :, None ]
@@ -150,15 +146,15 @@ def test_codec_clipping(self):
150
146
def test_channels_last (self ):
151
147
# Test the layer with more than one channel and multiple input dimensions,
152
148
# with the channels in the last dimension.
153
- inputs = array_ops .placeholder (dtypes .float32 , (None , None , None , 2 ))
154
- layer = entropy_models .EntropyBottleneck (
149
+ inputs = tf .placeholder (tf .float32 , (None , None , None , 2 ))
150
+ layer = tfc .EntropyBottleneck (
155
151
data_format = "channels_last" , init_scale = 50 )
156
152
noisy , _ = layer (inputs , training = True )
157
153
quantized , _ = layer (inputs , training = False )
158
154
bitstrings = layer .compress (inputs )
159
- decoded = layer .decompress (bitstrings , array_ops .shape (inputs )[1 :])
155
+ decoded = layer .decompress (bitstrings , tf .shape (inputs )[1 :])
160
156
with self .test_session () as sess :
161
- sess .run (variables .global_variables_initializer ())
157
+ sess .run (tf .global_variables_initializer ())
162
158
self .assertTrue (len (layer .updates ) == 1 )
163
159
sess .run (layer .updates [0 ])
164
160
values = 5 * np .random .normal (size = (7 , 5 , 3 , 2 ))
@@ -171,15 +167,15 @@ def test_channels_last(self):
171
167
def test_channels_first (self ):
172
168
# Test the layer with more than one channel and multiple input dimensions,
173
169
# with the channel dimension right after the batch dimension.
174
- inputs = array_ops .placeholder (dtypes .float32 , (None , 3 , None , None ))
175
- layer = entropy_models .EntropyBottleneck (
170
+ inputs = tf .placeholder (tf .float32 , (None , 3 , None , None ))
171
+ layer = tfc .EntropyBottleneck (
176
172
data_format = "channels_first" , init_scale = 50 )
177
173
noisy , _ = layer (inputs , training = True )
178
174
quantized , _ = layer (inputs , training = False )
179
175
bitstrings = layer .compress (inputs )
180
- decoded = layer .decompress (bitstrings , array_ops .shape (inputs )[1 :])
176
+ decoded = layer .decompress (bitstrings , tf .shape (inputs )[1 :])
181
177
with self .test_session () as sess :
182
- sess .run (variables .global_variables_initializer ())
178
+ sess .run (tf .global_variables_initializer ())
183
179
self .assertTrue (len (layer .updates ) == 1 )
184
180
sess .run (layer .updates [0 ])
185
181
values = 5 * np .random .normal (size = (2 , 3 , 5 , 7 ))
@@ -193,13 +189,13 @@ def test_compress(self):
193
189
# Test compression and decompression, and produce test data for
194
190
# `test_decompress`. If you set the constant at the end to `True`, this test
195
191
# will fail and the log will contain the new test data.
196
- inputs = array_ops .placeholder (dtypes .float32 , (2 , 3 , 10 ))
197
- layer = entropy_models .EntropyBottleneck (
192
+ inputs = tf .placeholder (tf .float32 , (2 , 3 , 10 ))
193
+ layer = tfc .EntropyBottleneck (
198
194
data_format = "channels_first" , filters = (), init_scale = 2 )
199
195
bitstrings = layer .compress (inputs )
200
- decoded = layer .decompress (bitstrings , array_ops .shape (inputs )[1 :])
196
+ decoded = layer .decompress (bitstrings , tf .shape (inputs )[1 :])
201
197
with self .test_session () as sess :
202
- sess .run (variables .global_variables_initializer ())
198
+ sess .run (tf .global_variables_initializer ())
203
199
self .assertTrue (len (layer .updates ) == 1 )
204
200
sess .run (layer .updates [0 ])
205
201
values = 5 * np .random .uniform (size = (2 , 3 , 10 )) - 2.5
@@ -236,54 +232,54 @@ def test_compress(self):
236
232
def test_decompress (self ):
237
233
# Test that decompression of values compressed with a previous version
238
234
# works, i.e. that the file format doesn't change across revisions.
239
- bitstrings = array_ops .placeholder (dtypes .string )
240
- input_shape = array_ops .placeholder (dtypes .int32 )
241
- quantized_cdf = array_ops .placeholder (dtypes .int32 )
242
- layer = entropy_models .EntropyBottleneck (
243
- data_format = "channels_first" , filters = (), dtype = dtypes .float32 )
235
+ bitstrings = tf .placeholder (tf .string )
236
+ input_shape = tf .placeholder (tf .int32 )
237
+ quantized_cdf = tf .placeholder (tf .int32 )
238
+ layer = tfc .EntropyBottleneck (
239
+ data_format = "channels_first" , filters = (), dtype = tf .float32 )
244
240
layer .build (self .expected .shape )
245
241
layer ._quantized_cdf = quantized_cdf
246
242
decoded = layer .decompress (bitstrings , input_shape [1 :])
247
243
with self .test_session () as sess :
248
- sess .run (variables .global_variables_initializer ())
244
+ sess .run (tf .global_variables_initializer ())
249
245
decoded , = sess .run ([decoded ], {
250
246
bitstrings : self .bitstrings , input_shape : self .expected .shape ,
251
247
quantized_cdf : self .quantized_cdf })
252
248
self .assertAllClose (self .expected , decoded , rtol = 0 , atol = 1e-6 )
253
249
254
250
def test_build_decompress (self ):
255
251
# Test that layer can be built when `decompress` is the first call to it.
256
- bitstrings = array_ops .placeholder (dtypes .string )
257
- input_shape = array_ops .placeholder (dtypes .int32 , shape = [3 ])
258
- layer = entropy_models .EntropyBottleneck (dtype = dtypes .float32 )
252
+ bitstrings = tf .placeholder (tf .string )
253
+ input_shape = tf .placeholder (tf .int32 , shape = [3 ])
254
+ layer = tfc .EntropyBottleneck (dtype = tf .float32 )
259
255
layer .decompress (bitstrings , input_shape [1 :], channels = 5 )
260
256
self .assertTrue (layer .built )
261
257
262
258
def test_pmf_normalization (self ):
263
259
# Test that probability mass functions are normalized correctly.
264
- layer = entropy_models .EntropyBottleneck (dtype = dtypes .float32 )
260
+ layer = tfc .EntropyBottleneck (dtype = tf .float32 )
265
261
layer .build ((None , 10 ))
266
262
with self .test_session () as sess :
267
- sess .run (variables .global_variables_initializer ())
263
+ sess .run (tf .global_variables_initializer ())
268
264
pmf , = sess .run ([layer ._pmf ])
269
265
self .assertAllClose (np .ones (10 ), np .sum (pmf , axis = - 1 ), rtol = 0 , atol = 1e-6 )
270
266
271
267
def test_visualize (self ):
272
268
# Test that summary op can be constructed.
273
- layer = entropy_models .EntropyBottleneck (dtype = dtypes .float32 )
269
+ layer = tfc .EntropyBottleneck (dtype = tf .float32 )
274
270
layer .build ((None , 10 ))
275
271
summary = layer .visualize ()
276
272
with self .test_session () as sess :
277
- sess .run (variables .global_variables_initializer ())
273
+ sess .run (tf .global_variables_initializer ())
278
274
sess .run ([summary ])
279
275
280
276
def test_normalization (self ):
281
277
# Test that densities are normalized correctly.
282
- inputs = array_ops .placeholder (dtypes .float32 , (None , 1 ))
283
- layer = entropy_models .EntropyBottleneck (filters = (2 ,))
278
+ inputs = tf .placeholder (tf .float32 , (None , 1 ))
279
+ layer = tfc .EntropyBottleneck (filters = (2 ,))
284
280
_ , likelihood = layer (inputs , training = True )
285
281
with self .test_session () as sess :
286
- sess .run (variables .global_variables_initializer ())
282
+ sess .run (tf .global_variables_initializer ())
287
283
x = np .repeat (np .arange (- 200 , 201 ), 1000 )[:, None ]
288
284
likelihood , = sess .run ([likelihood ], {inputs : x })
289
285
self .assertEqual (x .shape , likelihood .shape )
@@ -292,16 +288,16 @@ def test_normalization(self):
292
288
293
289
def test_entropy_estimates (self ):
294
290
# Test that entropy estimates match actual range coding.
295
- inputs = array_ops .placeholder (dtypes .float32 , (1 , None , 1 ))
296
- layer = entropy_models .EntropyBottleneck (
291
+ inputs = tf .placeholder (tf .float32 , (1 , None , 1 ))
292
+ layer = tfc .EntropyBottleneck (
297
293
filters = (2 , 3 ), data_format = "channels_last" )
298
294
_ , likelihood = layer (inputs , training = True )
299
- diff_entropy = math_ops .reduce_sum (math_ops .log (likelihood )) / - np .log (2 )
295
+ diff_entropy = tf .reduce_sum (tf .log (likelihood )) / - np .log (2 )
300
296
_ , likelihood = layer (inputs , training = False )
301
- disc_entropy = math_ops .reduce_sum (math_ops .log (likelihood )) / - np .log (2 )
297
+ disc_entropy = tf .reduce_sum (tf .log (likelihood )) / - np .log (2 )
302
298
bitstrings = layer .compress (inputs )
303
299
with self .test_session () as sess :
304
- sess .run (variables .global_variables_initializer ())
300
+ sess .run (tf .global_variables_initializer ())
305
301
self .assertTrue (len (layer .updates ) == 1 )
306
302
sess .run (layer .updates [0 ])
307
303
diff_entropy , disc_entropy , bitstrings = sess .run (
0 commit comments