Skip to content

Commit ee7bfc0

Browse files
authored
Merge branch 'master' into clusterable_layer
2 parents 417aa54 + c35fc4c commit ee7bfc0

File tree

15 files changed

+316
-32
lines changed

15 files changed

+316
-32
lines changed

RELEASE.md

Lines changed: 8 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -30,6 +30,14 @@ Keras pruning API:
3030
* Tested against TensorFlow 1.14.0, 2.0.0, and nightly, and Python 3.
3131

3232

33+
# TensorFlow Model Optimization next release TBD
34+
35+
Keras clustering API:
36+
37+
* Added *ClusteringSummaries* to create additional output for the clustering
38+
progress for TensorBoard.
39+
* Tested against TensorFlow 1.14.0, 2.0.0, and nightly, and Python 3.
40+
3341
# TensorFlow Model Optimization 0.5.0
3442

3543
TFMOT 0.5.0 adds some additional features for Quantization Aware Training. QAT

tensorflow_model_optimization/python/core/api/clustering/keras/__init__.py

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -23,4 +23,6 @@
2323

2424
from tensorflow_model_optimization.python.core.clustering.keras.cluster_config import CentroidInitialization
2525
from tensorflow_model_optimization.python.core.clustering.keras.clustering_algorithm import AbstractClusteringAlgorithm
26+
from tensorflow_model_optimization.python.core.clustering.keras.clustering_callbacks import ClusteringSummaries
27+
2628
# pylint: enable=g-bad-import-order

tensorflow_model_optimization/python/core/clustering/keras/BUILD

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -12,6 +12,7 @@ py_library(
1212
srcs_version = "PY3",
1313
deps = [
1414
":cluster",
15+
":clustering_callbacks",
1516
"//tensorflow_model_optimization/python/core/clustering/keras/experimental",
1617
],
1718
)

tensorflow_model_optimization/python/core/internal/tensor_encoding/utils/tf_utils.py

Lines changed: 128 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -311,6 +311,10 @@ def pack_into_int(value, input_bitrange, target_bitrange):
311311
"""
312312
# TODO(b/161433177): Provide a general solution without memory overhead.
313313
# Special cases implemented without extra memory overhead.
314+
if input_bitrange == 6 and target_bitrange == 28:
315+
return _pack_into_int_6_28(value)
316+
if input_bitrange == 7 and target_bitrange == 28:
317+
return _pack_into_int_7_28(value)
314318
if input_bitrange == 8 and target_bitrange == 28:
315319
return _pack_into_int_8_28(value)
316320
if input_bitrange == 12 and target_bitrange == 28:
@@ -349,6 +353,10 @@ def unpack_from_int(value, original_bitrange, target_bitrange, shape):
349353
"""
350354
# TODO(b/161433177): Provide a general solution without memory overhead.
351355
# Special cases implemented without extra memory overhead.
356+
if original_bitrange == 6 and target_bitrange == 28:
357+
return _unpack_from_int_6_28(value, shape)
358+
if original_bitrange == 7 and target_bitrange == 28:
359+
return _unpack_from_int_7_28(value, shape)
352360
if original_bitrange == 8 and target_bitrange == 28:
353361
return _unpack_from_int_8_28(value, shape)
354362
if original_bitrange == 12 and target_bitrange == 28:
@@ -386,6 +394,85 @@ def _expand_to_binary_form(value, input_bits):
386394
return tf.reshape(bits, [-1])
387395

388396

397+
def _pack_into_int_6_28(value):
398+
"""Implementation of `pack_into_int` for specific bitranges.
399+
400+
This method corresponts to `(input_bitrange, target_bitrange)` form the
401+
`pack_into_int` method equal to `(6, 28)`. This method relies on the fact that
402+
14 values in 6-bit bitrange can be packed into 3 values in 28-bitrange
403+
(14 = least_common_multiple(6, 28) / 6).
404+
405+
It reshapes the input into matrix of 14 columns and performs operations on the
406+
columns of the matrix, thus vectorizing the operations and avoiding memory
407+
overhead of an earlier general implementation.
408+
409+
Args:
410+
value: An integer Tensor to be packed with values in [0, 2**6 - 1].
411+
412+
Returns:
413+
An integer Tensor representing `value` of the same dtype as `value`.
414+
"""
415+
value = tf.reshape(value, [-1])
416+
extra_zeros = tf.zeros(tf.math.mod(-tf.shape(value), 14), value.dtype)
417+
val = tf.reshape(tf.concat([value, extra_zeros], 0), [-1, 14])
418+
419+
a = (val[:, 0] +
420+
val[:, 1] * 2**6 +
421+
val[:, 2] * 2**12 +
422+
val[:, 3] * 2**18 +
423+
tf.math.mod(val[:, 4], 2**4) * 2**24)
424+
b = (tf.math.floordiv(val[:, 4], 2**4) +
425+
val[:, 5] * 2**2 +
426+
val[:, 6] * 2**8 +
427+
val[:, 7] * 2**14 +
428+
val[:, 8] * 2**20 +
429+
tf.math.mod(val[:, 9], 2**2) * 2**26)
430+
c = (tf.math.floordiv(val[:, 9], 2**2) +
431+
val[:, 10] * 2**4 +
432+
val[:, 11] * 2**10 +
433+
val[:, 12] * 2**16 +
434+
val[:, 13] * 2**22)
435+
436+
packed_val = tf.reshape(tf.stack([a, b, c], 1), [-1, 1])
437+
if extra_zeros.shape[0] in [5, 6, 7, 8, 9]:
438+
# We added unnecessary product of zeros to the representation.
439+
packed_val = tf.slice(packed_val, [0, 0], [packed_val.shape[0] - 1, 1])
440+
if extra_zeros.shape[0] in [10, 11, 12, 13]:
441+
# We added unnecessary two products of zeros to the representation.
442+
packed_val = tf.slice(packed_val, [0, 0], [packed_val.shape[0] - 2, 1])
443+
return packed_val
444+
445+
446+
def _pack_into_int_7_28(value):
447+
"""Implementation of `pack_into_int` for specific bitranges.
448+
449+
This method corresponts to `(input_bitrange, target_bitrange)` form the
450+
`pack_into_int` method equal to `(7, 28)`. This method relies on the fact that
451+
4 values in 7-bit bitrange can be packed into 1 value in 28-bitrange
452+
(4 = least_common_multiple(7, 28) / 7).
453+
454+
It reshapes the input into matrix of 4 columns and performs operations on the
455+
columns of the matrix, thus vectorizing the operations and avoiding memory
456+
overhead of an earlier general implementation.
457+
458+
Args:
459+
value: An integer Tensor to be packed with values in [0, 2**7 - 1].
460+
461+
Returns:
462+
An integer Tensor representing `value` of the same dtype as `value`.
463+
"""
464+
value = tf.reshape(value, [-1])
465+
extra_zeros = tf.zeros(tf.math.mod(-tf.shape(value), 4), value.dtype)
466+
val = tf.reshape(tf.concat([value, extra_zeros], 0), [-1, 4])
467+
468+
packed_val = (val[:, 0] +
469+
val[:, 1] * 2**7 +
470+
val[:, 2] * 2**14 +
471+
val[:, 3] * 2**21)
472+
473+
return tf.reshape(packed_val, [-1, 1])
474+
475+
389476
def _pack_into_int_8_28(value):
390477
"""Implementation of `pack_into_int` for specific bitranges.
391478
@@ -466,6 +553,47 @@ def _pack_into_int_12_28(value):
466553
return packed_val
467554

468555

556+
def _unpack_from_int_6_28(value, shape):
557+
"""Inverse operation of `_pack_into_int_6_28`."""
558+
value = tf.reshape(value, [-1])
559+
extra_zeros = tf.zeros(tf.math.mod(-tf.shape(value), 3), value.dtype)
560+
val = tf.reshape(tf.concat([value, extra_zeros], 0), [-1, 3])
561+
562+
a = tf.math.mod(val[:, 0], 2**6)
563+
b = tf.math.mod(tf.math.floordiv(val[:, 0], 2**6), 2**6)
564+
c = tf.math.mod(tf.math.floordiv(val[:, 0], 2**12), 2**6)
565+
d = tf.math.mod(tf.math.floordiv(val[:, 0], 2**18), 2**6)
566+
e = tf.math.floordiv(val[:, 0], 2**24) + tf.math.mod(val[:, 1], 2**2) * 2**4
567+
f = tf.math.mod(tf.math.floordiv(val[:, 1], 2**2), 2**6)
568+
g = tf.math.mod(tf.math.floordiv(val[:, 1], 2**8), 2**6)
569+
h = tf.math.mod(tf.math.floordiv(val[:, 1], 2**14), 2**6)
570+
i = tf.math.mod(tf.math.floordiv(val[:, 1], 2**20), 2**6)
571+
j = tf.math.floordiv(val[:, 1], 2**26) + tf.math.mod(val[:, 2], 2**4) * 2**2
572+
k = tf.math.mod(tf.math.floordiv(val[:, 2], 2**4), 2**6)
573+
l = tf.math.mod(tf.math.floordiv(val[:, 2], 2**10), 2**6)
574+
m = tf.math.mod(tf.math.floordiv(val[:, 2], 2**16), 2**6)
575+
n = tf.math.mod(tf.math.floordiv(val[:, 2], 2**22), 2**6)
576+
577+
unpacked_val = tf.reshape(
578+
tf.stack([a, b, c, d, e, f, g, h, i, j, k, l, m, n], 1), [-1,])
579+
unpacked_val = tf.slice(unpacked_val, [0], [tf.reduce_prod(shape)])
580+
return tf.reshape(unpacked_val, shape)
581+
582+
583+
def _unpack_from_int_7_28(value, shape):
584+
"""Inverse operation of `_pack_into_int_7_28`."""
585+
val = tf.reshape(value, [-1, 1])
586+
587+
a = tf.math.mod(val[:, 0], 2**7)
588+
b = tf.math.mod(tf.math.floordiv(val[:, 0], 2**7), 2**7)
589+
c = tf.math.mod(tf.math.floordiv(val[:, 0], 2**14), 2**7)
590+
d = tf.math.mod(tf.math.floordiv(val[:, 0], 2**21), 2**7)
591+
592+
unpacked_val = tf.reshape(tf.stack([a, b, c, d], 1), [-1,])
593+
unpacked_val = tf.slice(unpacked_val, [0], [tf.reduce_prod(shape)])
594+
return tf.reshape(unpacked_val, shape)
595+
596+
469597
def _unpack_from_int_8_28(value, shape):
470598
"""Inverse operation of `_pack_into_int_8_28`."""
471599
value = tf.reshape(value, [-1])

tensorflow_model_optimization/python/core/internal/tensor_encoding/utils/tf_utils_test.py

Lines changed: 40 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -375,7 +375,8 @@ def test_unpack_from_int_different_outputs(self):
375375
packed_value, original_bitrange=4, target_bitrange=28, shape=(2,))
376376
self.assertAllEqual([9, 0], self.evaluate(unpacked_value))
377377

378-
@parameterized.parameters([(1, 28), (2, 28), (8, 28), (12, 28)])
378+
@parameterized.parameters([(1, 28), (2, 28), (6, 28), (7, 28), (8, 28),
379+
(12, 28)])
379380
def test_boundary_conditions(self, input_bitrange, target_bitrange):
380381
max_v = 2**input_bitrange - 1
381382
input_value = tf.constant(
@@ -390,7 +391,8 @@ def test_boundary_conditions(self, input_bitrange, target_bitrange):
390391

391392
self.assertAllEqual(self.evaluate(value), self.evaluate(unpacked_value))
392393

393-
@parameterized.parameters([(1, 28), (2, 28), (8, 28), (12, 28)])
394+
@parameterized.parameters([(1, 28), (2, 28), (6, 28), (7, 28), (8, 28),
395+
(12, 28)])
394396
def test_random_input(self, input_bitrange, target_bitrange):
395397
# Tests that packing/unpacking amounts to identity, regardless of the input.
396398
num_elements = np.random.randint(low=1, high=50)
@@ -407,6 +409,42 @@ def test_random_input(self, input_bitrange, target_bitrange):
407409
except: # pylint: disable=bare-except
408410
self.fail(f'Random input test failed with input value: {value}')
409411

412+
def test_pack_into_int_special_case_6_28(self):
413+
value = tf.constant(
414+
[50, 19, 51, 59, 10, 53, 36, 44, 31, 44, 31, 10, 31, 56, 49, 48, 35])
415+
packed_value = tf_utils.pack_into_int(
416+
value, input_bitrange=6, target_bitrange=28)
417+
expected_packed_value = tf.constant([[183448818], [33236180], [236923387],
418+
[146481]])
419+
self.assertAllEqual(self.evaluate(expected_packed_value),
420+
self.evaluate(packed_value))
421+
422+
def test_unpack_from_int_special_case_6_28(self):
423+
packed_value = tf.constant([[183448818], [33236180], [236923387], [146481]])
424+
unpacked_value = tf_utils.unpack_from_int(
425+
packed_value, original_bitrange=6, target_bitrange=28, shape=(17,))
426+
expected_unpacked_value = tf.constant(
427+
[50, 19, 51, 59, 10, 53, 36, 44, 31, 44, 31, 10, 31, 56, 49, 48, 35])
428+
self.assertAllEqual(self.evaluate(expected_unpacked_value),
429+
self.evaluate(unpacked_value))
430+
431+
def test_pack_into_int_special_case_7_28(self):
432+
value = tf.constant([117, 86, 42, 69, 9, 70, 66, 8, 112, 116])
433+
packed_value = tf_utils.pack_into_int(
434+
value, input_bitrange=7, target_bitrange=28)
435+
expected_packed_value = tf.constant([[145402741], [17867529], [14960]])
436+
self.assertAllEqual(self.evaluate(expected_packed_value),
437+
self.evaluate(packed_value))
438+
439+
def test_unpack_from_int_special_case_7_28(self):
440+
packed_value = tf.constant([[145402741], [17867529], [14960]])
441+
unpacked_value = tf_utils.unpack_from_int(
442+
packed_value, original_bitrange=7, target_bitrange=28, shape=(10,))
443+
expected_unpacked_value = tf.constant(
444+
[117, 86, 42, 69, 9, 70, 66, 8, 112, 116])
445+
self.assertAllEqual(self.evaluate(expected_unpacked_value),
446+
self.evaluate(unpacked_value))
447+
410448
def test_pack_into_int_special_case_8_28(self):
411449
value = tf.constant([38, 147, 1, 201, 205, 36, 155, 78, 163, 98])
412450
packed_value = tf_utils.pack_into_int(

tensorflow_model_optimization/python/core/quantization/keras/default_8bit/default_8bit_quantize_registry.py

Lines changed: 14 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -92,7 +92,6 @@ class Default8BitQuantizeRegistry(
9292
# layers.DepthwiseConv2D is supported and handled in code below.
9393

9494
# _QuantizeInfo(layers.Conv3D, ['kernel'], ['activation']),
95-
# _QuantizeInfo(layers.Conv2DTranspose, ['kernel'], ['activation']),
9695
# _QuantizeInfo(layers.Conv3DTranspose, ['kernel'], ['activation']),
9796
_no_quantize(layers.Cropping1D),
9897
_no_quantize(layers.Cropping2D),
@@ -198,6 +197,9 @@ def __init__(self):
198197
self._layer_quantize_map[
199198
layers.DepthwiseConv2D] = Default8BitConvQuantizeConfig(
200199
['depthwise_kernel'], ['activation'], False)
200+
self._layer_quantize_map[layers.Conv2DTranspose] = \
201+
Default8BitConvTransposeQuantizeConfig(
202+
['kernel'], ['activation'], False)
201203

202204
def _is_supported_layer(self, layer_class):
203205
return layer_class in self._layer_quantize_map
@@ -509,6 +511,17 @@ def __init__(self, weight_attrs, activation_attrs, quantize_output):
509511
)
510512

511513

514+
class Default8BitConvTransposeQuantizeConfig(Default8BitQuantizeConfig):
515+
"""QuantizeConfig for Conv2DTranspose layers."""
516+
517+
def __init__(self, weight_attrs, activation_attrs, quantize_output):
518+
super(Default8BitConvTransposeQuantizeConfig,
519+
self).__init__(weight_attrs, activation_attrs, quantize_output)
520+
521+
self.weight_quantizer = default_8bit_quantizers.Default8BitConvTransposeWeightsQuantizer(
522+
)
523+
524+
512525
def _types_dict():
513526
return {
514527
'Default8BitQuantizeConfig':

tensorflow_model_optimization/python/core/quantization/keras/default_8bit/default_8bit_quantizers.py

Lines changed: 17 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -41,3 +41,20 @@ def build(self, tensor_shape, name, layer):
4141
trainable=False)
4242

4343
return {'min_var': min_weight, 'max_var': max_weight}
44+
45+
46+
class Default8BitConvTransposeWeightsQuantizer(quantizers.LastValueQuantizer):
47+
"""Quantizer for handling weights in Conv2DTranspose layers."""
48+
49+
def __init__(self):
50+
"""Construct LastValueQuantizer with params specific for TFLite Conv2DTranpose."""
51+
52+
super(Default8BitConvTransposeWeightsQuantizer, self).__init__(
53+
num_bits=8, per_axis=False, symmetric=True, narrow_range=True)
54+
55+
def __call__(self, inputs, training, weights, **kwargs):
56+
outputs = tf.transpose(inputs, (0, 1, 3, 2))
57+
outputs = super(Default8BitConvTransposeWeightsQuantizer,
58+
self).__call__(outputs, training, weights, **kwargs)
59+
outputs = tf.transpose(outputs, (0, 1, 3, 2))
60+
return outputs

tensorflow_model_optimization/python/core/quantization/keras/default_8bit/quantize_numerical_test.py

Lines changed: 8 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -153,6 +153,13 @@ def _get_upsampling2d_bilinear_model(self):
153153
x = tf.keras.layers.UpSampling2D(size=(1, 5), interpolation='bilinear')(i)
154154
return tf.keras.Model(i, x)
155155

156+
def _get_conv2d_transpose_model(self):
157+
i = tf.keras.Input(shape=(32, 32, 3))
158+
x = tf.keras.layers.Conv2DTranspose(
159+
2, kernel_size=(3, 3), strides=(2, 2))(
160+
i)
161+
return tf.keras.Model(i, x)
162+
156163
@parameterized.parameters([
157164
_get_single_conv_model, _get_single_dense_model,
158165
_get_single_conv_relu_model, _get_stacked_convs_model,
@@ -165,6 +172,7 @@ def _get_upsampling2d_bilinear_model(self):
165172
# TODO(tfmot): There are gaps between ResizeBilinear with FakeQuant and
166173
# TFLite quantized ResizeBilinear op. It has a bit more quantization
167174
# error than other ops in this test now.
175+
_get_conv2d_transpose_model,
168176
])
169177
def testModelEndToEnd(self, model_fn):
170178
# 1. Check whether quantized model graph can be constructed.

tensorflow_model_optimization/python/core/quantization/keras/quantize_functional_test.py

Lines changed: 0 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -298,7 +298,6 @@ class QuantizeFullIntegerModelTest(tf.test.TestCase, parameterized.TestCase):
298298
layers.UpSampling3D,
299299
# Not done since not registered since not per-axis yet.
300300
layers.Conv1D,
301-
layers.Conv2DTranspose,
302301
]
303302
])
304303
def testQuantizeSingleLayer_ProducesFullIntegerModel_TF2(

tensorflow_model_optimization/python/core/sparsity/keras/BUILD

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -91,6 +91,7 @@ py_library(
9191
# numpy dep1,
9292
# tensorflow dep1,
9393
# python/keras/utils:generic_utils tensorflow dep2,
94+
"//tensorflow_model_optimization/python/core/keras:compat",
9495
"//tensorflow_model_optimization/python/core/keras:utils",
9596
],
9697
)

0 commit comments

Comments
 (0)