19
19
from __future__ import division
20
20
from __future__ import print_function
21
21
22
- import numpy as np
22
+ # Dependency imports
23
23
24
- from tensorflow_compression . python . layers import entropybottleneck
24
+ import numpy as np
25
25
26
26
from tensorflow .python .framework import dtypes
27
27
from tensorflow .python .ops import array_ops
30
30
from tensorflow .python .platform import test
31
31
from tensorflow .python .training import gradient_descent
32
32
33
+ from tensorflow_compression .python .layers import entropy_models
34
+
33
35
34
36
class EntropyBottleneckTest (test .TestCase ):
35
37
36
38
def test_noise (self ):
37
39
# Tests that the noise added is uniform noise between -0.5 and 0.5.
38
40
inputs = array_ops .placeholder (dtypes .float32 , (None , 1 ))
39
- layer = entropybottleneck .EntropyBottleneck ()
41
+ layer = entropy_models .EntropyBottleneck ()
40
42
noisy , _ = layer (inputs , training = True )
41
43
with self .test_session () as sess :
42
44
sess .run (variables .global_variables_initializer ())
@@ -49,7 +51,7 @@ def test_quantization(self):
49
51
# Tests that inputs are quantized to full integer values, even after
50
52
# quantiles have been updated.
51
53
inputs = array_ops .placeholder (dtypes .float32 , (None , 1 ))
52
- layer = entropybottleneck .EntropyBottleneck (optimize_integer_offset = False )
54
+ layer = entropy_models .EntropyBottleneck (optimize_integer_offset = False )
53
55
quantized , _ = layer (inputs , training = False )
54
56
opt = gradient_descent .GradientDescentOptimizer (learning_rate = 1 )
55
57
self .assertTrue (len (layer .losses ) == 1 )
@@ -66,7 +68,7 @@ def test_quantization_optimized_offset(self):
66
68
# have been updated. However, the difference between input and output should
67
69
# be between -0.5 and 0.5, and the offset must be consistent.
68
70
inputs = array_ops .placeholder (dtypes .float32 , (None , 1 ))
69
- layer = entropybottleneck .EntropyBottleneck (optimize_integer_offset = True )
71
+ layer = entropy_models .EntropyBottleneck (optimize_integer_offset = True )
70
72
quantized , _ = layer (inputs , training = False )
71
73
opt = gradient_descent .GradientDescentOptimizer (learning_rate = 1 )
72
74
self .assertTrue (len (layer .losses ) == 1 )
@@ -85,7 +87,7 @@ def test_codec(self):
85
87
# Tests that inputs are compressed and decompressed correctly, and quantized
86
88
# to full integer values, even after quantiles have been updated.
87
89
inputs = array_ops .placeholder (dtypes .float32 , (1 , None , 1 ))
88
- layer = entropybottleneck .EntropyBottleneck (
90
+ layer = entropy_models .EntropyBottleneck (
89
91
data_format = "channels_last" , init_scale = 60 ,
90
92
optimize_integer_offset = False )
91
93
bitstrings = layer .compress (inputs )
@@ -108,7 +110,7 @@ def test_codec_optimized_offset(self):
108
110
# However, the difference between input and output should be between -0.5
109
111
# and 0.5, and the offset must be consistent.
110
112
inputs = array_ops .placeholder (dtypes .float32 , (1 , None , 1 ))
111
- layer = entropybottleneck .EntropyBottleneck (
113
+ layer = entropy_models .EntropyBottleneck (
112
114
data_format = "channels_last" , init_scale = 60 ,
113
115
optimize_integer_offset = True )
114
116
bitstrings = layer .compress (inputs )
@@ -132,7 +134,7 @@ def test_codec_clipping(self):
132
134
# Tests that inputs are compressed and decompressed correctly, and clipped
133
135
# to the expected range.
134
136
inputs = array_ops .placeholder (dtypes .float32 , (1 , None , 1 ))
135
- layer = entropybottleneck .EntropyBottleneck (
137
+ layer = entropy_models .EntropyBottleneck (
136
138
data_format = "channels_last" , init_scale = 40 )
137
139
bitstrings = layer .compress (inputs )
138
140
decoded = layer .decompress (bitstrings , array_ops .shape (inputs )[1 :])
@@ -149,7 +151,7 @@ def test_channels_last(self):
149
151
# Test the layer with more than one channel and multiple input dimensions,
150
152
# with the channels in the last dimension.
151
153
inputs = array_ops .placeholder (dtypes .float32 , (None , None , None , 2 ))
152
- layer = entropybottleneck .EntropyBottleneck (
154
+ layer = entropy_models .EntropyBottleneck (
153
155
data_format = "channels_last" , init_scale = 50 )
154
156
noisy , _ = layer (inputs , training = True )
155
157
quantized , _ = layer (inputs , training = False )
@@ -170,7 +172,7 @@ def test_channels_first(self):
170
172
# Test the layer with more than one channel and multiple input dimensions,
171
173
# with the channel dimension right after the batch dimension.
172
174
inputs = array_ops .placeholder (dtypes .float32 , (None , 3 , None , None ))
173
- layer = entropybottleneck .EntropyBottleneck (
175
+ layer = entropy_models .EntropyBottleneck (
174
176
data_format = "channels_first" , init_scale = 50 )
175
177
noisy , _ = layer (inputs , training = True )
176
178
quantized , _ = layer (inputs , training = False )
@@ -192,7 +194,7 @@ def test_compress(self):
192
194
# `test_decompress`. If you set the constant at the end to `True`, this test
193
195
# will fail and the log will contain the new test data.
194
196
inputs = array_ops .placeholder (dtypes .float32 , (2 , 3 , 10 ))
195
- layer = entropybottleneck .EntropyBottleneck (
197
+ layer = entropy_models .EntropyBottleneck (
196
198
data_format = "channels_first" , filters = (), init_scale = 2 )
197
199
bitstrings = layer .compress (inputs )
198
200
decoded = layer .decompress (bitstrings , array_ops .shape (inputs )[1 :])
@@ -237,7 +239,7 @@ def test_decompress(self):
237
239
bitstrings = array_ops .placeholder (dtypes .string )
238
240
input_shape = array_ops .placeholder (dtypes .int32 )
239
241
quantized_cdf = array_ops .placeholder (dtypes .int32 )
240
- layer = entropybottleneck .EntropyBottleneck (
242
+ layer = entropy_models .EntropyBottleneck (
241
243
data_format = "channels_first" , filters = (), dtype = dtypes .float32 )
242
244
layer .build (self .expected .shape )
243
245
layer ._quantized_cdf = quantized_cdf
@@ -253,13 +255,13 @@ def test_build_decompress(self):
253
255
# Test that layer can be built when `decompress` is the first call to it.
254
256
bitstrings = array_ops .placeholder (dtypes .string )
255
257
input_shape = array_ops .placeholder (dtypes .int32 , shape = [3 ])
256
- layer = entropybottleneck .EntropyBottleneck (dtype = dtypes .float32 )
258
+ layer = entropy_models .EntropyBottleneck (dtype = dtypes .float32 )
257
259
layer .decompress (bitstrings , input_shape [1 :], channels = 5 )
258
260
self .assertTrue (layer .built )
259
261
260
262
def test_pmf_normalization (self ):
261
263
# Test that probability mass functions are normalized correctly.
262
- layer = entropybottleneck .EntropyBottleneck (dtype = dtypes .float32 )
264
+ layer = entropy_models .EntropyBottleneck (dtype = dtypes .float32 )
263
265
layer .build ((None , 10 ))
264
266
with self .test_session () as sess :
265
267
sess .run (variables .global_variables_initializer ())
@@ -268,7 +270,7 @@ def test_pmf_normalization(self):
268
270
269
271
def test_visualize (self ):
270
272
# Test that summary op can be constructed.
271
- layer = entropybottleneck .EntropyBottleneck (dtype = dtypes .float32 )
273
+ layer = entropy_models .EntropyBottleneck (dtype = dtypes .float32 )
272
274
layer .build ((None , 10 ))
273
275
summary = layer .visualize ()
274
276
with self .test_session () as sess :
@@ -278,7 +280,7 @@ def test_visualize(self):
278
280
def test_normalization (self ):
279
281
# Test that densities are normalized correctly.
280
282
inputs = array_ops .placeholder (dtypes .float32 , (None , 1 ))
281
- layer = entropybottleneck .EntropyBottleneck (filters = (2 ,))
283
+ layer = entropy_models .EntropyBottleneck (filters = (2 ,))
282
284
_ , likelihood = layer (inputs , training = True )
283
285
with self .test_session () as sess :
284
286
sess .run (variables .global_variables_initializer ())
@@ -291,7 +293,7 @@ def test_normalization(self):
291
293
def test_entropy_estimates (self ):
292
294
# Test that entropy estimates match actual range coding.
293
295
inputs = array_ops .placeholder (dtypes .float32 , (1 , None , 1 ))
294
- layer = entropybottleneck .EntropyBottleneck (
296
+ layer = entropy_models .EntropyBottleneck (
295
297
filters = (2 , 3 ), data_format = "channels_last" )
296
298
_ , likelihood = layer (inputs , training = True )
297
299
diff_entropy = math_ops .reduce_sum (math_ops .log (likelihood )) / - np .log (2 )
0 commit comments