@@ -44,7 +44,7 @@ def round_st(inputs, offset=None):
44
44
45
45
46
46
def soft_round (x , alpha , eps = 1e-3 ):
47
- """Differentiable approximation to round() .
47
+ """Differentiable approximation to ` round` .
48
48
49
49
Larger alphas correspond to closer approximations of the round function.
50
50
If alpha is close to zero, this function reduces to the identity.
@@ -55,12 +55,12 @@ def soft_round(x, alpha, eps=1e-3):
55
55
> https://arxiv.org/abs/2006.09952
56
56
57
57
Args:
58
- x: tf.Tensor. Inputs to the rounding function.
59
- alpha: Float or tf.Tensor. Controls smoothness of the approximation.
60
- eps: Float. Threshold below which soft_round() will return identity.
58
+ x: ` tf.Tensor` . Inputs to the rounding function.
59
+ alpha: Float or ` tf.Tensor` . Controls smoothness of the approximation.
60
+ eps: Float. Threshold below which ` soft_round` will return identity.
61
61
62
62
Returns:
63
- tf.Tensor
63
+ ` tf.Tensor`
64
64
"""
65
65
# This guards the gradient of tf.where below against NaNs, while maintaining
66
66
# correctness, as for alpha < eps the result is ignored.
@@ -76,21 +76,21 @@ def soft_round(x, alpha, eps=1e-3):
76
76
77
77
78
78
def soft_round_inverse (y , alpha , eps = 1e-3 ):
79
- """Inverse of soft_round() .
79
+ """Inverse of ` soft_round` .
80
80
81
81
This is described in Sec. 4.1. in the paper
82
82
> "Universally Quantized Neural Compression"<br />
83
83
> Eirikur Agustsson & Lucas Theis<br />
84
84
> https://arxiv.org/abs/2006.09952
85
85
86
86
Args:
87
- y: tf.Tensor. Inputs to this function.
88
- alpha: Float or tf.Tensor. Controls smoothness of the approximation.
89
- eps: Float. Threshold below which soft_round() is assumed to equal the
87
+ y: ` tf.Tensor` . Inputs to this function.
88
+ alpha: Float or ` tf.Tensor` . Controls smoothness of the approximation.
89
+ eps: Float. Threshold below which ` soft_round` is assumed to equal the
90
90
identity function.
91
91
92
92
Returns:
93
- tf.Tensor
93
+ ` tf.Tensor`
94
94
"""
95
95
# This guards the gradient of tf.where below against NaNs, while maintaining
96
96
# correctness, as for alpha < eps the result is ignored.
@@ -108,11 +108,11 @@ def soft_round_inverse(y, alpha, eps=1e-3):
108
108
return tf .where (alpha < eps , y , m + r , name = "soft_round_inverse" )
109
109
110
110
111
- def soft_round_conditional_mean (inputs , alpha ):
111
+ def soft_round_conditional_mean (y , alpha ):
112
112
"""Conditional mean of inputs given noisy soft rounded values.
113
113
114
114
Computes g(z) = E[Y | s(Y) + U = z] where s is the soft-rounding function,
115
- U is uniform between -0.5 and 0.5 and `Y` is considered uniform when truncated
115
+ U is uniform between -0.5 and 0.5 and Y is considered uniform when truncated
116
116
to the interval [z-0.5, z+0.5].
117
117
118
118
This is described in Sec. 4.1. in the paper
@@ -121,10 +121,10 @@ def soft_round_conditional_mean(inputs, alpha):
121
121
> https://arxiv.org/abs/2006.09952
122
122
123
123
Args:
124
- inputs: The input tensor .
125
- alpha: The softround alpha .
124
+ y: `tf.Tensor`. Inputs to this function .
125
+ alpha: Float or `tf.Tensor`. Controls smoothness of the approximation .
126
126
127
127
Returns:
128
128
The conditional mean, of same shape as `inputs`.
129
129
"""
130
- return soft_round_inverse (inputs - .5 , alpha ) + .5
130
+ return soft_round_inverse (y - .5 , alpha ) + .5
0 commit comments