From 8c1970e3ecb8ec803cbb9d79dcf16ade07eb1e13 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Johannes=20Ball=C3=A9?= Date: Mon, 30 May 2022 11:09:54 -0700 Subject: [PATCH] Documentation tweaks. PiperOrigin-RevId: 451898744 Change-Id: I10b1f0900fd578c00cc5f4062cc34e98cbffda19 --- README.md | 3 +- .../python/distributions/deep_factorized.py | 2 +- .../python/distributions/round_adapters.py | 4 +-- tensorflow_compression/python/ops/__init__.py | 2 +- .../python/ops/round_ops.py | 30 +++++++++---------- 5 files changed, 21 insertions(+), 20 deletions(-) diff --git a/README.md b/README.md index 421f5d8..cc78b7f 100644 --- a/README.md +++ b/README.md @@ -34,7 +34,8 @@ docs](https://www.tensorflow.org/api_docs/python/tfc) for details): - Additional TensorFlow functions and Keras layers that are useful in the context of learned data compression, such as methods to numerically find quantiles of density functions, take expectations with respect to dithering - noise, convolution layers with more flexible padding options, and an + noise, convolution layers with more flexible padding options and support for + reparameterizing kernels and biases in the Fourier domain, and an implementation of generalized divisive normalization (GDN). diff --git a/tensorflow_compression/python/distributions/deep_factorized.py b/tensorflow_compression/python/distributions/deep_factorized.py index ca2a897..fa11c57 100644 --- a/tensorflow_compression/python/distributions/deep_factorized.py +++ b/tensorflow_compression/python/distributions/deep_factorized.py @@ -261,7 +261,7 @@ def _parameter_properties(cls, dtype=tf.float32, num_classes=None): class NoisyDeepFactorized(uniform_noise.UniformNoiseAdapter): - """DeepFactorized that is convolved with uniform noise.""" + """`DeepFactorized` that is convolved with uniform noise.""" def __init__(self, name="NoisyDeepFactorized", **kwargs): super().__init__(DeepFactorized(**kwargs), name=name) diff --git a/tensorflow_compression/python/distributions/round_adapters.py b/tensorflow_compression/python/distributions/round_adapters.py index 4d1ac88..21a7c4a 100644 --- a/tensorflow_compression/python/distributions/round_adapters.py +++ b/tensorflow_compression/python/distributions/round_adapters.py @@ -214,7 +214,7 @@ def __init__(self, base, name="NoisyRoundAdapter"): class NoisyRoundedDeepFactorized(NoisyRoundAdapter): - """Rounded DeepFactorized + uniform noise.""" + """Rounded `DeepFactorized` + uniform noise.""" def __init__(self, name="NoisyRoundedDeepFactorized", **kwargs): prior = deep_factorized.DeepFactorized(**kwargs) @@ -276,7 +276,7 @@ def __init__(self, alpha=5.0, name="NoisySoftRoundedNormal", **kwargs): class NoisySoftRoundedDeepFactorized(NoisySoftRoundAdapter): - """Soft rounded deep factorized distribution + uniform noise.""" + """Soft rounded `DeepFactorized` + uniform noise.""" def __init__(self, alpha=5.0, diff --git a/tensorflow_compression/python/ops/__init__.py b/tensorflow_compression/python/ops/__init__.py index ee86f30..0bad7e9 100644 --- a/tensorflow_compression/python/ops/__init__.py +++ b/tensorflow_compression/python/ops/__init__.py @@ -12,7 +12,7 @@ # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== -"""Operations.""" +"""TensorFlow operations and functions.""" from tensorflow_compression.python.ops.gen_ops import * from tensorflow_compression.python.ops.math_ops import * diff --git a/tensorflow_compression/python/ops/round_ops.py b/tensorflow_compression/python/ops/round_ops.py index c521bc3..8316961 100644 --- a/tensorflow_compression/python/ops/round_ops.py +++ b/tensorflow_compression/python/ops/round_ops.py @@ -44,7 +44,7 @@ def round_st(inputs, offset=None): def soft_round(x, alpha, eps=1e-3): - """Differentiable approximation to round(). + """Differentiable approximation to `round`. Larger alphas correspond to closer approximations of the round function. If alpha is close to zero, this function reduces to the identity. @@ -55,12 +55,12 @@ def soft_round(x, alpha, eps=1e-3): > https://arxiv.org/abs/2006.09952 Args: - x: tf.Tensor. Inputs to the rounding function. - alpha: Float or tf.Tensor. Controls smoothness of the approximation. - eps: Float. Threshold below which soft_round() will return identity. + x: `tf.Tensor`. Inputs to the rounding function. + alpha: Float or `tf.Tensor`. Controls smoothness of the approximation. + eps: Float. Threshold below which `soft_round` will return identity. Returns: - tf.Tensor + `tf.Tensor` """ # This guards the gradient of tf.where below against NaNs, while maintaining # correctness, as for alpha < eps the result is ignored. @@ -76,7 +76,7 @@ def soft_round(x, alpha, eps=1e-3): def soft_round_inverse(y, alpha, eps=1e-3): - """Inverse of soft_round(). + """Inverse of `soft_round`. This is described in Sec. 4.1. in the paper > "Universally Quantized Neural Compression"
@@ -84,13 +84,13 @@ def soft_round_inverse(y, alpha, eps=1e-3): > https://arxiv.org/abs/2006.09952 Args: - y: tf.Tensor. Inputs to this function. - alpha: Float or tf.Tensor. Controls smoothness of the approximation. - eps: Float. Threshold below which soft_round() is assumed to equal the + y: `tf.Tensor`. Inputs to this function. + alpha: Float or `tf.Tensor`. Controls smoothness of the approximation. + eps: Float. Threshold below which `soft_round` is assumed to equal the identity function. Returns: - tf.Tensor + `tf.Tensor` """ # This guards the gradient of tf.where below against NaNs, while maintaining # correctness, as for alpha < eps the result is ignored. @@ -108,11 +108,11 @@ def soft_round_inverse(y, alpha, eps=1e-3): return tf.where(alpha < eps, y, m + r, name="soft_round_inverse") -def soft_round_conditional_mean(inputs, alpha): +def soft_round_conditional_mean(y, alpha): """Conditional mean of inputs given noisy soft rounded values. Computes g(z) = E[Y | s(Y) + U = z] where s is the soft-rounding function, - U is uniform between -0.5 and 0.5 and `Y` is considered uniform when truncated + U is uniform between -0.5 and 0.5 and Y is considered uniform when truncated to the interval [z-0.5, z+0.5]. This is described in Sec. 4.1. in the paper @@ -121,10 +121,10 @@ def soft_round_conditional_mean(inputs, alpha): > https://arxiv.org/abs/2006.09952 Args: - inputs: The input tensor. - alpha: The softround alpha. + y: `tf.Tensor`. Inputs to this function. + alpha: Float or `tf.Tensor`. Controls smoothness of the approximation. Returns: The conditional mean, of same shape as `inputs`. """ - return soft_round_inverse(inputs - .5, alpha) + .5 + return soft_round_inverse(y - .5, alpha) + .5