Skip to content

Commit ee7254a

Browse files
author
Johannes Ballé
committed
Converted unit tests to import top-level modules.
1 parent 1329dff commit ee7254a

File tree

8 files changed

+108
-122
lines changed

8 files changed

+108
-122
lines changed

tensorflow_compression/python/layers/entropy_models_test.py

Lines changed: 60 additions & 64 deletions
Original file line numberDiff line numberDiff line change
@@ -22,26 +22,22 @@
2222
# Dependency imports
2323

2424
import numpy as np
25+
import tensorflow as tf
2526

26-
from tensorflow.python.framework import dtypes
27-
from tensorflow.python.ops import array_ops
28-
from tensorflow.python.ops import math_ops
29-
from tensorflow.python.ops import variables
3027
from tensorflow.python.platform import test
31-
from tensorflow.python.training import gradient_descent
3228

33-
from tensorflow_compression.python.layers import entropy_models
29+
import tensorflow_compression as tfc
3430

3531

3632
class EntropyBottleneckTest(test.TestCase):
3733

3834
def test_noise(self):
3935
# Tests that the noise added is uniform noise between -0.5 and 0.5.
40-
inputs = array_ops.placeholder(dtypes.float32, (None, 1))
41-
layer = entropy_models.EntropyBottleneck()
36+
inputs = tf.placeholder(tf.float32, (None, 1))
37+
layer = tfc.EntropyBottleneck()
4238
noisy, _ = layer(inputs, training=True)
4339
with self.test_session() as sess:
44-
sess.run(variables.global_variables_initializer())
40+
sess.run(tf.global_variables_initializer())
4541
values = np.linspace(-50, 50, 100)[:, None]
4642
noisy, = sess.run([noisy], {inputs: values})
4743
self.assertFalse(np.allclose(values, noisy, rtol=0, atol=.49))
@@ -50,14 +46,14 @@ def test_noise(self):
5046
def test_quantization(self):
5147
# Tests that inputs are quantized to full integer values, even after
5248
# quantiles have been updated.
53-
inputs = array_ops.placeholder(dtypes.float32, (None, 1))
54-
layer = entropy_models.EntropyBottleneck(optimize_integer_offset=False)
49+
inputs = tf.placeholder(tf.float32, (None, 1))
50+
layer = tfc.EntropyBottleneck(optimize_integer_offset=False)
5551
quantized, _ = layer(inputs, training=False)
56-
opt = gradient_descent.GradientDescentOptimizer(learning_rate=1)
52+
opt = tf.train.GradientDescentOptimizer(learning_rate=1)
5753
self.assertTrue(len(layer.losses) == 1)
5854
step = opt.minimize(layer.losses[0])
5955
with self.test_session() as sess:
60-
sess.run(variables.global_variables_initializer())
56+
sess.run(tf.global_variables_initializer())
6157
sess.run(step)
6258
values = np.linspace(-50, 50, 100)[:, None]
6359
quantized, = sess.run([quantized], {inputs: values})
@@ -67,14 +63,14 @@ def test_quantization_optimized_offset(self):
6763
# Tests that inputs are not quantized to full integer values after quantiles
6864
# have been updated. However, the difference between input and output should
6965
# be between -0.5 and 0.5, and the offset must be consistent.
70-
inputs = array_ops.placeholder(dtypes.float32, (None, 1))
71-
layer = entropy_models.EntropyBottleneck(optimize_integer_offset=True)
66+
inputs = tf.placeholder(tf.float32, (None, 1))
67+
layer = tfc.EntropyBottleneck(optimize_integer_offset=True)
7268
quantized, _ = layer(inputs, training=False)
73-
opt = gradient_descent.GradientDescentOptimizer(learning_rate=1)
69+
opt = tf.train.GradientDescentOptimizer(learning_rate=1)
7470
self.assertTrue(len(layer.losses) == 1)
7571
step = opt.minimize(layer.losses[0])
7672
with self.test_session() as sess:
77-
sess.run(variables.global_variables_initializer())
73+
sess.run(tf.global_variables_initializer())
7874
sess.run(step)
7975
values = np.linspace(-50, 50, 100)[:, None]
8076
quantized, = sess.run([quantized], {inputs: values})
@@ -86,17 +82,17 @@ def test_quantization_optimized_offset(self):
8682
def test_codec(self):
8783
# Tests that inputs are compressed and decompressed correctly, and quantized
8884
# to full integer values, even after quantiles have been updated.
89-
inputs = array_ops.placeholder(dtypes.float32, (1, None, 1))
90-
layer = entropy_models.EntropyBottleneck(
85+
inputs = tf.placeholder(tf.float32, (1, None, 1))
86+
layer = tfc.EntropyBottleneck(
9187
data_format="channels_last", init_scale=60,
9288
optimize_integer_offset=False)
9389
bitstrings = layer.compress(inputs)
94-
decoded = layer.decompress(bitstrings, array_ops.shape(inputs)[1:])
95-
opt = gradient_descent.GradientDescentOptimizer(learning_rate=1)
90+
decoded = layer.decompress(bitstrings, tf.shape(inputs)[1:])
91+
opt = tf.train.GradientDescentOptimizer(learning_rate=1)
9692
self.assertTrue(len(layer.losses) == 1)
9793
step = opt.minimize(layer.losses[0])
9894
with self.test_session() as sess:
99-
sess.run(variables.global_variables_initializer())
95+
sess.run(tf.global_variables_initializer())
10096
sess.run(step)
10197
self.assertTrue(len(layer.updates) == 1)
10298
sess.run(layer.updates[0])
@@ -109,17 +105,17 @@ def test_codec_optimized_offset(self):
109105
# quantized to full integer values after quantiles have been updated.
110106
# However, the difference between input and output should be between -0.5
111107
# and 0.5, and the offset must be consistent.
112-
inputs = array_ops.placeholder(dtypes.float32, (1, None, 1))
113-
layer = entropy_models.EntropyBottleneck(
108+
inputs = tf.placeholder(tf.float32, (1, None, 1))
109+
layer = tfc.EntropyBottleneck(
114110
data_format="channels_last", init_scale=60,
115111
optimize_integer_offset=True)
116112
bitstrings = layer.compress(inputs)
117-
decoded = layer.decompress(bitstrings, array_ops.shape(inputs)[1:])
118-
opt = gradient_descent.GradientDescentOptimizer(learning_rate=1)
113+
decoded = layer.decompress(bitstrings, tf.shape(inputs)[1:])
114+
opt = tf.train.GradientDescentOptimizer(learning_rate=1)
119115
self.assertTrue(len(layer.losses) == 1)
120116
step = opt.minimize(layer.losses[0])
121117
with self.test_session() as sess:
122-
sess.run(variables.global_variables_initializer())
118+
sess.run(tf.global_variables_initializer())
123119
sess.run(step)
124120
self.assertTrue(len(layer.updates) == 1)
125121
sess.run(layer.updates[0])
@@ -133,13 +129,13 @@ def test_codec_optimized_offset(self):
133129
def test_codec_clipping(self):
134130
# Tests that inputs are compressed and decompressed correctly, and clipped
135131
# to the expected range.
136-
inputs = array_ops.placeholder(dtypes.float32, (1, None, 1))
137-
layer = entropy_models.EntropyBottleneck(
132+
inputs = tf.placeholder(tf.float32, (1, None, 1))
133+
layer = tfc.EntropyBottleneck(
138134
data_format="channels_last", init_scale=40)
139135
bitstrings = layer.compress(inputs)
140-
decoded = layer.decompress(bitstrings, array_ops.shape(inputs)[1:])
136+
decoded = layer.decompress(bitstrings, tf.shape(inputs)[1:])
141137
with self.test_session() as sess:
142-
sess.run(variables.global_variables_initializer())
138+
sess.run(tf.global_variables_initializer())
143139
self.assertTrue(len(layer.updates) == 1)
144140
sess.run(layer.updates[0])
145141
values = np.linspace(-50, 50, 100)[None, :, None]
@@ -150,15 +146,15 @@ def test_codec_clipping(self):
150146
def test_channels_last(self):
151147
# Test the layer with more than one channel and multiple input dimensions,
152148
# with the channels in the last dimension.
153-
inputs = array_ops.placeholder(dtypes.float32, (None, None, None, 2))
154-
layer = entropy_models.EntropyBottleneck(
149+
inputs = tf.placeholder(tf.float32, (None, None, None, 2))
150+
layer = tfc.EntropyBottleneck(
155151
data_format="channels_last", init_scale=50)
156152
noisy, _ = layer(inputs, training=True)
157153
quantized, _ = layer(inputs, training=False)
158154
bitstrings = layer.compress(inputs)
159-
decoded = layer.decompress(bitstrings, array_ops.shape(inputs)[1:])
155+
decoded = layer.decompress(bitstrings, tf.shape(inputs)[1:])
160156
with self.test_session() as sess:
161-
sess.run(variables.global_variables_initializer())
157+
sess.run(tf.global_variables_initializer())
162158
self.assertTrue(len(layer.updates) == 1)
163159
sess.run(layer.updates[0])
164160
values = 5 * np.random.normal(size=(7, 5, 3, 2))
@@ -171,15 +167,15 @@ def test_channels_last(self):
171167
def test_channels_first(self):
172168
# Test the layer with more than one channel and multiple input dimensions,
173169
# with the channel dimension right after the batch dimension.
174-
inputs = array_ops.placeholder(dtypes.float32, (None, 3, None, None))
175-
layer = entropy_models.EntropyBottleneck(
170+
inputs = tf.placeholder(tf.float32, (None, 3, None, None))
171+
layer = tfc.EntropyBottleneck(
176172
data_format="channels_first", init_scale=50)
177173
noisy, _ = layer(inputs, training=True)
178174
quantized, _ = layer(inputs, training=False)
179175
bitstrings = layer.compress(inputs)
180-
decoded = layer.decompress(bitstrings, array_ops.shape(inputs)[1:])
176+
decoded = layer.decompress(bitstrings, tf.shape(inputs)[1:])
181177
with self.test_session() as sess:
182-
sess.run(variables.global_variables_initializer())
178+
sess.run(tf.global_variables_initializer())
183179
self.assertTrue(len(layer.updates) == 1)
184180
sess.run(layer.updates[0])
185181
values = 5 * np.random.normal(size=(2, 3, 5, 7))
@@ -193,13 +189,13 @@ def test_compress(self):
193189
# Test compression and decompression, and produce test data for
194190
# `test_decompress`. If you set the constant at the end to `True`, this test
195191
# will fail and the log will contain the new test data.
196-
inputs = array_ops.placeholder(dtypes.float32, (2, 3, 10))
197-
layer = entropy_models.EntropyBottleneck(
192+
inputs = tf.placeholder(tf.float32, (2, 3, 10))
193+
layer = tfc.EntropyBottleneck(
198194
data_format="channels_first", filters=(), init_scale=2)
199195
bitstrings = layer.compress(inputs)
200-
decoded = layer.decompress(bitstrings, array_ops.shape(inputs)[1:])
196+
decoded = layer.decompress(bitstrings, tf.shape(inputs)[1:])
201197
with self.test_session() as sess:
202-
sess.run(variables.global_variables_initializer())
198+
sess.run(tf.global_variables_initializer())
203199
self.assertTrue(len(layer.updates) == 1)
204200
sess.run(layer.updates[0])
205201
values = 5 * np.random.uniform(size=(2, 3, 10)) - 2.5
@@ -236,54 +232,54 @@ def test_compress(self):
236232
def test_decompress(self):
237233
# Test that decompression of values compressed with a previous version
238234
# works, i.e. that the file format doesn't change across revisions.
239-
bitstrings = array_ops.placeholder(dtypes.string)
240-
input_shape = array_ops.placeholder(dtypes.int32)
241-
quantized_cdf = array_ops.placeholder(dtypes.int32)
242-
layer = entropy_models.EntropyBottleneck(
243-
data_format="channels_first", filters=(), dtype=dtypes.float32)
235+
bitstrings = tf.placeholder(tf.string)
236+
input_shape = tf.placeholder(tf.int32)
237+
quantized_cdf = tf.placeholder(tf.int32)
238+
layer = tfc.EntropyBottleneck(
239+
data_format="channels_first", filters=(), dtype=tf.float32)
244240
layer.build(self.expected.shape)
245241
layer._quantized_cdf = quantized_cdf
246242
decoded = layer.decompress(bitstrings, input_shape[1:])
247243
with self.test_session() as sess:
248-
sess.run(variables.global_variables_initializer())
244+
sess.run(tf.global_variables_initializer())
249245
decoded, = sess.run([decoded], {
250246
bitstrings: self.bitstrings, input_shape: self.expected.shape,
251247
quantized_cdf: self.quantized_cdf})
252248
self.assertAllClose(self.expected, decoded, rtol=0, atol=1e-6)
253249

254250
def test_build_decompress(self):
255251
# Test that layer can be built when `decompress` is the first call to it.
256-
bitstrings = array_ops.placeholder(dtypes.string)
257-
input_shape = array_ops.placeholder(dtypes.int32, shape=[3])
258-
layer = entropy_models.EntropyBottleneck(dtype=dtypes.float32)
252+
bitstrings = tf.placeholder(tf.string)
253+
input_shape = tf.placeholder(tf.int32, shape=[3])
254+
layer = tfc.EntropyBottleneck(dtype=tf.float32)
259255
layer.decompress(bitstrings, input_shape[1:], channels=5)
260256
self.assertTrue(layer.built)
261257

262258
def test_pmf_normalization(self):
263259
# Test that probability mass functions are normalized correctly.
264-
layer = entropy_models.EntropyBottleneck(dtype=dtypes.float32)
260+
layer = tfc.EntropyBottleneck(dtype=tf.float32)
265261
layer.build((None, 10))
266262
with self.test_session() as sess:
267-
sess.run(variables.global_variables_initializer())
263+
sess.run(tf.global_variables_initializer())
268264
pmf, = sess.run([layer._pmf])
269265
self.assertAllClose(np.ones(10), np.sum(pmf, axis=-1), rtol=0, atol=1e-6)
270266

271267
def test_visualize(self):
272268
# Test that summary op can be constructed.
273-
layer = entropy_models.EntropyBottleneck(dtype=dtypes.float32)
269+
layer = tfc.EntropyBottleneck(dtype=tf.float32)
274270
layer.build((None, 10))
275271
summary = layer.visualize()
276272
with self.test_session() as sess:
277-
sess.run(variables.global_variables_initializer())
273+
sess.run(tf.global_variables_initializer())
278274
sess.run([summary])
279275

280276
def test_normalization(self):
281277
# Test that densities are normalized correctly.
282-
inputs = array_ops.placeholder(dtypes.float32, (None, 1))
283-
layer = entropy_models.EntropyBottleneck(filters=(2,))
278+
inputs = tf.placeholder(tf.float32, (None, 1))
279+
layer = tfc.EntropyBottleneck(filters=(2,))
284280
_, likelihood = layer(inputs, training=True)
285281
with self.test_session() as sess:
286-
sess.run(variables.global_variables_initializer())
282+
sess.run(tf.global_variables_initializer())
287283
x = np.repeat(np.arange(-200, 201), 1000)[:, None]
288284
likelihood, = sess.run([likelihood], {inputs: x})
289285
self.assertEqual(x.shape, likelihood.shape)
@@ -292,16 +288,16 @@ def test_normalization(self):
292288

293289
def test_entropy_estimates(self):
294290
# Test that entropy estimates match actual range coding.
295-
inputs = array_ops.placeholder(dtypes.float32, (1, None, 1))
296-
layer = entropy_models.EntropyBottleneck(
291+
inputs = tf.placeholder(tf.float32, (1, None, 1))
292+
layer = tfc.EntropyBottleneck(
297293
filters=(2, 3), data_format="channels_last")
298294
_, likelihood = layer(inputs, training=True)
299-
diff_entropy = math_ops.reduce_sum(math_ops.log(likelihood)) / -np.log(2)
295+
diff_entropy = tf.reduce_sum(tf.log(likelihood)) / -np.log(2)
300296
_, likelihood = layer(inputs, training=False)
301-
disc_entropy = math_ops.reduce_sum(math_ops.log(likelihood)) / -np.log(2)
297+
disc_entropy = tf.reduce_sum(tf.log(likelihood)) / -np.log(2)
302298
bitstrings = layer.compress(inputs)
303299
with self.test_session() as sess:
304-
sess.run(variables.global_variables_initializer())
300+
sess.run(tf.global_variables_initializer())
305301
self.assertTrue(len(layer.updates) == 1)
306302
sess.run(layer.updates[0])
307303
diff_entropy, disc_entropy, bitstrings = sess.run(

tensorflow_compression/python/layers/gdn_test.py

Lines changed: 2 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -23,15 +23,14 @@
2323

2424
import numpy as np
2525
import tensorflow as tf
26-
27-
from tensorflow_compression.python.layers import gdn
26+
import tensorflow_compression as tfc
2827

2928

3029
class GDNTest(tf.test.TestCase):
3130

3231
def _run_gdn(self, x, shape, inverse, rectify, data_format):
3332
inputs = tf.placeholder(tf.float32, shape)
34-
layer = gdn.GDN(
33+
layer = tfc.GDN(
3534
inverse=inverse, rectify=rectify, data_format=data_format)
3635
outputs = layer(inputs)
3736
with self.test_session() as sess:

tensorflow_compression/python/layers/parameterizers_test.py

Lines changed: 5 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -23,8 +23,7 @@
2323

2424
import numpy as np
2525
import tensorflow as tf
26-
27-
from tensorflow_compression.python.layers import parameterizers
26+
import tensorflow_compression as tfc
2827

2928

3029
class ParameterizersTest(tf.test.TestCase):
@@ -41,31 +40,31 @@ def _test_parameterizer(self, param, init, shape):
4140
def test_static_parameterizer(self):
4241
shape = (1, 2, 3, 4)
4342
var = self._test_parameterizer(
44-
parameterizers.StaticParameterizer(tf.initializers.zeros()),
43+
tfc.StaticParameterizer(tf.initializers.zeros()),
4544
tf.initializers.random_uniform(), shape)
4645
self.assertEqual(var.shape, shape)
4746
self.assertAllClose(var, np.zeros(shape), rtol=0, atol=1e-7)
4847

4948
def test_rdft_parameterizer(self):
5049
shape = (3, 4, 2, 1)
5150
var = self._test_parameterizer(
52-
parameterizers.RDFTParameterizer(),
51+
tfc.RDFTParameterizer(),
5352
tf.initializers.ones(), shape)
5453
self.assertEqual(var.shape, shape)
5554
self.assertAllClose(var, np.ones(shape), rtol=0, atol=1e-6)
5655

5756
def test_nonnegative_parameterizer(self):
5857
shape = (1, 2, 3, 4)
5958
var = self._test_parameterizer(
60-
parameterizers.NonnegativeParameterizer(),
59+
tfc.NonnegativeParameterizer(),
6160
tf.initializers.random_uniform(), shape)
6261
self.assertEqual(var.shape, shape)
6362
self.assertTrue(np.all(var >= 0))
6463

6564
def test_positive_parameterizer(self):
6665
shape = (1, 2, 3, 4)
6766
var = self._test_parameterizer(
68-
parameterizers.NonnegativeParameterizer(minimum=.1),
67+
tfc.NonnegativeParameterizer(minimum=.1),
6968
tf.initializers.random_uniform(), shape)
7069
self.assertEqual(var.shape, shape)
7170
self.assertTrue(np.all(var >= .1))

tensorflow_compression/python/layers/signal_test.py

Lines changed: 10 additions & 13 deletions
Original file line numberDiff line numberDiff line change
@@ -26,9 +26,8 @@
2626
import tensorflow as tf
2727

2828
from tensorflow.python.platform import test
29-
from tensorflow_compression.python.layers import initializers
30-
from tensorflow_compression.python.layers import parameterizers
31-
from tensorflow_compression.python.layers import signal
29+
30+
import tensorflow_compression as tfc
3231

3332

3433
class SignalTest(tf.test.TestCase):
@@ -98,14 +97,13 @@ def run_valid(self, batch, input_support, channels, filters, kernel_support,
9897
# Create kernel array.
9998
kernel = np.random.randint(16, size=kernel_support + (channels, filters))
10099
kernel = kernel.astype(np.float32)
101-
tf_kernel = parameterizers.StaticParameterizer(
102-
tf.constant_initializer(kernel))
100+
tf_kernel = tfc.StaticParameterizer(tf.constant_initializer(kernel))
103101

104102
# Run SignalConv* layer.
105103
layer_class = {
106-
3: signal.SignalConv1D,
107-
4: signal.SignalConv2D,
108-
5: signal.SignalConv3D,
104+
3: tfc.SignalConv1D,
105+
4: tfc.SignalConv2D,
106+
5: tfc.SignalConv3D,
109107
}[inputs.ndim]
110108
layer = layer_class(
111109
filters, kernel_support, corr=corr, strides_down=strides_down,
@@ -148,14 +146,13 @@ def run_same(self, batch, input_support, channels, filters, kernel_support,
148146

149147
# Create kernel array. This is an identity kernel, so the outputs should
150148
# be equal to the inputs except for up- and downsampling.
151-
tf_kernel = parameterizers.StaticParameterizer(
152-
initializers.IdentityInitializer())
149+
tf_kernel = tfc.StaticParameterizer(tfc.IdentityInitializer())
153150

154151
# Run SignalConv* layer.
155152
layer_class = {
156-
3: signal.SignalConv1D,
157-
4: signal.SignalConv2D,
158-
5: signal.SignalConv3D,
153+
3: tfc.SignalConv1D,
154+
4: tfc.SignalConv2D,
155+
5: tfc.SignalConv3D,
159156
}[inputs.ndim]
160157
layer = layer_class(
161158
1, kernel_support, corr=corr, strides_down=strides_down,

0 commit comments

Comments
 (0)