Skip to content

Commit 5ef40db

Browse files
Johannes Ballécopybara-github
authored andcommitted
Replaces "Arguments" with "Args" to be consistent with TF PR #45420.
PiperOrigin-RevId: 352505520 Change-Id: I3810dde4fb012701febe61ec1b27e8198916cba6
1 parent 5b890d8 commit 5ef40db

File tree

15 files changed

+61
-61
lines changed

15 files changed

+61
-61
lines changed

models/hific/archs.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -654,7 +654,7 @@ def call(self, latents, image_shape, mode: ModelMode) -> FactorizedPriorInfo:
654654
def estimate_entropy(entropy_model, inputs, spatial_shape=None) -> EntropyInfo:
655655
"""Compresses `inputs` with the given entropy model and estimates entropy.
656656
657-
Arguments:
657+
Args:
658658
entropy_model: An `EntropyModel` instance.
659659
inputs: The input tensor to be fed to the entropy model.
660660
spatial_shape: Shape of the input image (HxW). Must be provided for

models/toy_sources/compression_model.py

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -46,7 +46,7 @@ def ndim_source(self):
4646
def quantize(self, x):
4747
"""Determines an equivalent vector quantizer for `x`.
4848
49-
Arguments:
49+
Args:
5050
x: A batch of source vectors.
5151
5252
Returns:
@@ -61,7 +61,7 @@ def quantize(self, x):
6161
def train_losses(self, x):
6262
"""Computes the training losses for `x`.
6363
64-
Arguments:
64+
Args:
6565
x: A batch of source vectors.
6666
6767
Returns:
@@ -74,7 +74,7 @@ def train_losses(self, x):
7474
def test_losses(self, x):
7575
"""Computes the rate and distortion for each element of `x`.
7676
77-
Arguments:
77+
Args:
7878
x: A batch of source vectors.
7979
8080
Returns:

models/toy_sources/sawbridge.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -12,7 +12,7 @@ def __init__(self, index_points, stationary=True, order=1,
1212
name="sawbridge"):
1313
"""Initializer.
1414
15-
Arguments:
15+
Args:
1616
index_points: 1-D `Tensor` representing the locations at which to evaluate
1717
the process. The intent is that all locations are in [0,1], but the
1818
process has a natural extrapolation outside this range so no error is

tensorflow_compression/python/distributions/deep_factorized.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -69,7 +69,7 @@ def __init__(self,
6969
allow_nan_stats=False, dtype=tf.float32, name="DeepFactorized"):
7070
"""Initializer.
7171
72-
Arguments:
72+
Args:
7373
batch_shape: Iterable of integers. The desired batch shape for the
7474
`Distribution` (rightmost dimensions which are assumed independent, but
7575
not identically distributed).
@@ -164,7 +164,7 @@ def _broadcast_inputs(self, inputs):
164164
def _logits_cumulative(self, inputs):
165165
"""Evaluate logits of the cumulative densities.
166166
167-
Arguments:
167+
Args:
168168
inputs: The values at which to evaluate the cumulative densities.
169169
170170
Returns:

tensorflow_compression/python/distributions/helpers.py

Lines changed: 4 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -42,7 +42,7 @@ def estimate_tails(func, target, shape, dtype):
4242
This operation is vectorized. The tensor shape of `x` is given by `shape`, and
4343
`target` must have a shape that is broadcastable to the output of `func(x)`.
4444
45-
Arguments:
45+
Args:
4646
func: A callable that computes cumulative distribution function, survival
4747
function, or similar.
4848
target: The desired target value.
@@ -107,7 +107,7 @@ def quantization_offset(distribution):
107107
Note the offset is always in the range [-.5, .5] as it is assumed to be
108108
combined with a round quantizer.
109109
110-
Arguments:
110+
Args:
111111
distribution: A `tfp.distributions.Distribution` object.
112112
113113
Returns:
@@ -142,7 +142,7 @@ def lower_tail(distribution, tail_mass):
142142
functionality of the range coder implementation (using a Golomb-like
143143
universal code).
144144
145-
Arguments:
145+
Args:
146146
distribution: A `tfp.distributions.Distribution` object.
147147
tail_mass: Float between 0 and 1. Desired probability mass for the tails.
148148
@@ -178,7 +178,7 @@ def upper_tail(distribution, tail_mass):
178178
functionality of the range coder implementation (using a Golomb-like
179179
universal code).
180180
181-
Arguments:
181+
Args:
182182
distribution: A `tfp.distributions.Distribution` object.
183183
tail_mass: Float between 0 and 1. Desired probability mass for the tails.
184184

tensorflow_compression/python/distributions/round_adapters.py

Lines changed: 4 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -44,7 +44,7 @@ class MonotonicAdapter(tfp.distributions.Distribution):
4444
def __init__(self, base, name="MonotonicAdapter"):
4545
"""Initializer.
4646
47-
Arguments:
47+
Args:
4848
base: A `tfp.distributions.Distribution` object representing a
4949
continuous-valued random variable.
5050
name: String. A name for this distribution.
@@ -196,7 +196,7 @@ class NoisyRoundAdapter(uniform_noise.UniformNoiseAdapter):
196196
def __init__(self, base, name="NoisyRoundAdapter"):
197197
"""Initializer.
198198
199-
Arguments:
199+
Args:
200200
base: A `tfp.distributions.Distribution` object representing a
201201
continuous-valued random variable.
202202
name: String. A name for this distribution.
@@ -225,7 +225,7 @@ class SoftRoundAdapter(MonotonicAdapter):
225225
def __init__(self, base, alpha, name="SoftRoundAdapter"):
226226
"""Initializer.
227227
228-
Arguments:
228+
Args:
229229
base: A `tfp.distributions.Distribution` object representing a
230230
continuous-valued random variable.
231231
alpha: Float or tf.Tensor. Controls smoothness of the approximation.
@@ -247,7 +247,7 @@ class NoisySoftRoundAdapter(uniform_noise.UniformNoiseAdapter):
247247
def __init__(self, base, alpha, name="NoisySoftRoundAdapter"):
248248
"""Initializer.
249249
250-
Arguments:
250+
Args:
251251
base: A `tfp.distributions.Distribution` object representing a
252252
continuous-valued random variable.
253253
alpha: Float or tf.Tensor. Controls smoothness of soft round.

tensorflow_compression/python/distributions/uniform_noise.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -36,7 +36,7 @@ def _logsum_expbig_minus_expsmall(big, small):
3636
This assumes `small <= big` and arguments that can be broadcast against each
3737
other.
3838
39-
Arguments:
39+
Args:
4040
big: Floating-point `tf.Tensor`.
4141
small: Floating-point `tf.Tensor`.
4242
@@ -73,7 +73,7 @@ class UniformNoiseAdapter(tfp.distributions.Distribution):
7373
def __init__(self, base, name="UniformNoiseAdapter"):
7474
"""Initializer.
7575
76-
Arguments:
76+
Args:
7777
base: A `tfp.distributions.Distribution` object representing a
7878
continuous-valued random variable.
7979
name: String. A name for this distribution.

tensorflow_compression/python/entropy_models/continuous_base.py

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -51,7 +51,7 @@ def __init__(
5151
):
5252
"""Initializes the instance.
5353
54-
Arguments:
54+
Args:
5555
prior: A `tfp.distributions.Distribution` object. A density model fitting
5656
the marginal distribution of the bottleneck data with additive uniform
5757
noise, which is shared a priori between the sender and the receiver. For
@@ -226,7 +226,7 @@ def _build_tables(self, prior):
226226
`compression=True`, and then distribute the model to a sender and a
227227
receiver.
228228
229-
Arguments:
229+
Args:
230230
prior: The `tfp.distributions.Distribution` object (see initializer).
231231
"""
232232
# TODO(jonycgn, relational): Consider not using offset when soft quantization
@@ -343,7 +343,7 @@ def get_config(self):
343343
def from_config(cls, config):
344344
"""Instantiates an entropy model from a configuration dictionary.
345345
346-
Arguments:
346+
Args:
347347
config: A `dict`, typically the output of `get_config`.
348348
349349
Returns:

tensorflow_compression/python/entropy_models/continuous_batched.py

Lines changed: 6 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -83,7 +83,7 @@ def __init__(self,
8383
no_variables=False):
8484
"""Initializes the instance.
8585
86-
Arguments:
86+
Args:
8787
prior: A `tfp.distributions.Distribution` object. A density model fitting
8888
the marginal distribution of the bottleneck data with additive uniform
8989
noise, which is shared a priori between the sender and the receiver. For
@@ -167,7 +167,7 @@ def __call__(self, bottleneck, training=True):
167167
"""Perturbs a tensor with (quantization) noise and estimates bitcost.
168168
169169
170-
Arguments:
170+
Args:
171171
bottleneck: `tf.Tensor` containing the data to be compressed. Must have at
172172
least `self.coding_rank` dimensions, and the innermost dimensions must
173173
be broadcastable to `self.prior_shape`.
@@ -209,7 +209,7 @@ def quantize(self, bottleneck):
209209
The gradient of this rounding operation is overridden with the identity
210210
(straight-through gradient estimator).
211211
212-
Arguments:
212+
Args:
213213
bottleneck: `tf.Tensor` containing the data to be quantized. The innermost
214214
dimensions must be broadcastable to `self.prior_shape`.
215215
@@ -231,7 +231,7 @@ def compress(self, bottleneck):
231231
i.e. are compressed into one string each. Any additional dimensions to the
232232
left are treated as batch dimensions.
233233
234-
Arguments:
234+
Args:
235235
bottleneck: `tf.Tensor` containing the data to be compressed. Must have at
236236
least `self.coding_rank` dimensions, and the innermost dimensions must
237237
be broadcastable to `self.prior_shape`.
@@ -279,7 +279,7 @@ def decompress(self, strings, broadcast_shape):
279279
Reconstructs the quantized tensor from bit strings produced by `compress()`.
280280
It is necessary to provide a part of the output shape in `broadcast_shape`.
281281
282-
Arguments:
282+
Args:
283283
strings: `tf.Tensor` containing the compressed bit strings.
284284
broadcast_shape: Iterable of ints. The part of the output tensor shape
285285
between the shape of `strings` on the left and
@@ -338,7 +338,7 @@ def get_config(self):
338338
def from_config(cls, config):
339339
"""Instantiates an entropy model from a configuration dictionary.
340340
341-
Arguments:
341+
Args:
342342
config: A `dict`, typically the output of `get_config`.
343343
344344
Returns:

tensorflow_compression/python/entropy_models/continuous_indexed.py

Lines changed: 10 additions & 10 deletions
Original file line numberDiff line numberDiff line change
@@ -140,7 +140,7 @@ def __init__(self,
140140
no_variables=False):
141141
"""Initializes the instance.
142142
143-
Arguments:
143+
Args:
144144
prior_fn: A callable returning a `tfp.distributions.Distribution` object,
145145
typically a `Distribution` class or factory function. This is a density
146146
model fitting the marginal distribution of the bottleneck data with
@@ -285,7 +285,7 @@ def __call__(self, bottleneck, indexes, training=True):
285285
"""Perturbs a tensor with (quantization) noise and estimates bitcost.
286286
287287
288-
Arguments:
288+
Args:
289289
bottleneck: `tf.Tensor` containing the data to be compressed.
290290
indexes: `tf.Tensor` specifying the scalar distribution for each element
291291
in `bottleneck`. See class docstring for examples.
@@ -340,7 +340,7 @@ def quantize(self, bottleneck, indexes):
340340
The gradient of this rounding operation is overridden with the identity
341341
(straight-through gradient estimator).
342342
343-
Arguments:
343+
Args:
344344
bottleneck: `tf.Tensor` containing the data to be quantized.
345345
indexes: `tf.Tensor` specifying the scalar distribution for each element
346346
in `bottleneck`. See class docstring for examples.
@@ -365,7 +365,7 @@ def compress(self, bottleneck, indexes):
365365
i.e. are compressed into one string each. Any additional dimensions to the
366366
left are treated as batch dimensions.
367367
368-
Arguments:
368+
Args:
369369
bottleneck: `tf.Tensor` containing the data to be compressed.
370370
indexes: `tf.Tensor` specifying the scalar distribution for each element
371371
in `bottleneck`. See class docstring for examples.
@@ -412,7 +412,7 @@ def decompress(self, strings, indexes):
412412
413413
Reconstructs the quantized tensor from bit strings produced by `compress()`.
414414
415-
Arguments:
415+
Args:
416416
strings: `tf.Tensor` containing the compressed bit strings.
417417
indexes: `tf.Tensor` specifying the scalar distribution for each output
418418
element. See class docstring for examples.
@@ -478,7 +478,7 @@ def __init__(self, prior_fn, num_scales, scale_fn, coding_rank,
478478
range_coder_precision=12, no_variables=False):
479479
"""Initializes the instance.
480480
481-
Arguments:
481+
Args:
482482
prior_fn: A callable returning a `tfp.distributions.Distribution` object,
483483
typically a `Distribution` class or factory function. This is a density
484484
model fitting the marginal distribution of the bottleneck data with
@@ -527,7 +527,7 @@ def __init__(self, prior_fn, num_scales, scale_fn, coding_rank,
527527
def __call__(self, bottleneck, scale_indexes, loc=None, training=True):
528528
"""Perturbs a tensor with (quantization) noise and estimates bitcost.
529529
530-
Arguments:
530+
Args:
531531
bottleneck: `tf.Tensor` containing the data to be compressed.
532532
scale_indexes: `tf.Tensor` indexing the scale parameter for each element
533533
in `bottleneck`. Must have the same shape as `bottleneck`.
@@ -568,7 +568,7 @@ def quantize(self, bottleneck, scale_indexes, loc=None):
568568
The gradient of this rounding operation is overridden with the identity
569569
(straight-through gradient estimator).
570570
571-
Arguments:
571+
Args:
572572
bottleneck: `tf.Tensor` containing the data to be quantized.
573573
scale_indexes: `tf.Tensor` indexing the scale parameter for each element
574574
in `bottleneck`. Must have the same shape as `bottleneck`.
@@ -598,7 +598,7 @@ def compress(self, bottleneck, scale_indexes, loc=None):
598598
i.e. are compressed into one string each. Any additional dimensions to the
599599
left are treated as batch dimensions.
600600
601-
Arguments:
601+
Args:
602602
bottleneck: `tf.Tensor` containing the data to be compressed.
603603
scale_indexes: `tf.Tensor` indexing the scale parameter for each element
604604
in `bottleneck`. Must have the same shape as `bottleneck`.
@@ -622,7 +622,7 @@ def decompress(self, strings, scale_indexes, loc=None):
622622
623623
Reconstructs the quantized tensor from bit strings produced by `compress()`.
624624
625-
Arguments:
625+
Args:
626626
strings: `tf.Tensor` containing the compressed bit strings.
627627
scale_indexes: `tf.Tensor` indexing the scale parameter for each output
628628
element.

0 commit comments

Comments
 (0)