Skip to content

Commit 66773c2

Browse files
committed
Fix typos, mark visibility private for internal files.
Change-Id: I02b34dd4c91dccc0abcde2fa7bf6193bf5615b61
1 parent bfa7b3c commit 66773c2

File tree

3 files changed

+18
-16
lines changed

3 files changed

+18
-16
lines changed

tensorflow_model_optimization/python/core/quantization/keras/prune_preserve/BUILD

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -21,6 +21,7 @@ py_library(
2121
"prune_preserve_quantize_registry.py",
2222
],
2323
srcs_version = "PY3",
24+
visibility = ["//visibility:private"],
2425
deps = [
2526
# tensorflow dep1,
2627
"//tensorflow_model_optimization/python/core/quantization/keras/default_8bit:default_8bit_quantizers",
@@ -34,6 +35,7 @@ py_test(
3435
"prune_preserve_quantize_registry_test.py",
3536
],
3637
python_version = "PY3",
38+
visibility = ["//visibility:private"],
3739
deps = [
3840
":prune_preserve_quantize_registry",
3941
# tensorflow dep1,

tensorflow_model_optimization/python/core/quantization/keras/prune_preserve/prune_preserve_quantize_registry.py

Lines changed: 15 additions & 15 deletions
Original file line numberDiff line numberDiff line change
@@ -90,9 +90,9 @@ def __init__(self):
9090

9191
self._config_quantizer_map = {
9292
'Default8BitQuantizeConfig':
93-
PrunePerserveDefault8BitWeightsQuantizer(),
93+
PrunePreserveDefault8BitWeightsQuantizer(),
9494
'Default8BitConvQuantizeConfig':
95-
PrunePerserveDefault8BitConvWeightsQuantizer(),
95+
PrunePreserveDefault8BitConvWeightsQuantizer(),
9696
}
9797

9898
@classmethod
@@ -224,7 +224,7 @@ def get_quantize_config(self, layer):
224224
Returns:
225225
Returns the quantization config with sparsity preserve weight_quantizer.
226226
"""
227-
quantize_config = default_8bit_quantize_registry.QuantizeRegistry(
227+
quantize_config = default_8bit_quantize_registry.Default8BitQuantizeRegistry(
228228
).get_quantize_config(layer)
229229
prune_aware_quantize_config = super(
230230
Default8bitPrunePreserveQuantizeRegistry,
@@ -233,10 +233,10 @@ def get_quantize_config(self, layer):
233233
return prune_aware_quantize_config
234234

235235

236-
class PrunePerserveDefaultWeightsQuantizer(quantizers.LastValueQuantizer):
236+
class PrunePreserveDefaultWeightsQuantizer(quantizers.LastValueQuantizer):
237237
"""Quantize weights while preserve sparsity."""
238238
def __init__(self, num_bits, per_axis, symmetric, narrow_range):
239-
"""PrunePerserveDefaultWeightsQuantizer
239+
"""PrunePreserveDefaultWeightsQuantizer
240240
241241
Args:
242242
num_bits: Number of bits for quantization
@@ -249,7 +249,7 @@ def __init__(self, num_bits, per_axis, symmetric, narrow_range):
249249
range has 0 as the centre.
250250
"""
251251

252-
super(PrunePerserveDefaultWeightsQuantizer, self).__init__(
252+
super(PrunePreserveDefaultWeightsQuantizer, self).__init__(
253253
num_bits=num_bits,
254254
per_axis=per_axis,
255255
symmetric=symmetric,
@@ -276,7 +276,7 @@ def build(self, tensor_shape, name, layer):
276276
"""
277277
result = self._build_sparsity_mask(name, layer)
278278
result.update(
279-
super(PrunePerserveDefaultWeightsQuantizer,
279+
super(PrunePreserveDefaultWeightsQuantizer,
280280
self).build(tensor_shape, name, layer))
281281
return result
282282

@@ -308,27 +308,27 @@ def __call__(self, inputs, training, weights, **kwargs):
308308
)
309309

310310

311-
class PrunePerserveDefault8BitWeightsQuantizer(
312-
PrunePerserveDefaultWeightsQuantizer):
313-
"""PrunePerserveWeightsQuantizer for default 8bit weights"""
311+
class PrunePreserveDefault8BitWeightsQuantizer(
312+
PrunePreserveDefaultWeightsQuantizer):
313+
"""PrunePreserveWeightsQuantizer for default 8bit weights"""
314314
def __init__(self):
315-
super(PrunePerserveDefault8BitWeightsQuantizer,
315+
super(PrunePreserveDefault8BitWeightsQuantizer,
316316
self).__init__(num_bits=8,
317317
per_axis=False,
318318
symmetric=True,
319319
narrow_range=True)
320320

321321

322-
class PrunePerserveDefault8BitConvWeightsQuantizer(
323-
PrunePerserveDefaultWeightsQuantizer,
322+
class PrunePreserveDefault8BitConvWeightsQuantizer(
323+
PrunePreserveDefaultWeightsQuantizer,
324324
default_8bit_quantizers.Default8BitConvWeightsQuantizer,
325325
):
326-
"""PrunePerserveWeightsQuantizer for default 8bit Conv2D/DepthwiseConv2D weights"""
326+
"""PrunePreserveWeightsQuantizer for default 8bit Conv2D/DepthwiseConv2D weights"""
327327
def __init__(self):
328328
default_8bit_quantizers.Default8BitConvWeightsQuantizer.__init__(self)
329329

330330
def build(self, tensor_shape, name, layer):
331-
result = PrunePerserveDefaultWeightsQuantizer._build_sparsity_mask(
331+
result = PrunePreserveDefaultWeightsQuantizer._build_sparsity_mask(
332332
self, name, layer)
333333
result.update(
334334
default_8bit_quantizers.Default8BitConvWeightsQuantizer.build(

tensorflow_model_optimization/python/core/quantization/keras/prune_preserve/prune_preserve_quantize_registry_test.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -108,7 +108,7 @@ def testRaisesError_Unsupported_QuantizeConfigWithLayer(self):
108108
class PrunePreserveDefault8bitQuantizeRegistryTest(tf.test.TestCase):
109109
def setUp(self):
110110
super(PrunePreserveDefault8bitQuantizeRegistryTest, self).setUp()
111-
self.default_8bit_quantize_registry = default_8bit_quantize_registry.QuantizeRegistry(
111+
self.default_8bit_quantize_registry = default_8bit_quantize_registry.Default8BitQuantizeRegistry(
112112
)
113113
self.prune_registry = prune_registry.PruneRegistry()
114114
self.prune_preserve_quantize_registry = prune_preserve_quantize_registry.PrunePreserveQuantizeRegistry(

0 commit comments

Comments
 (0)