Skip to content

Commit 5717745

Browse files
committed
Fix docs for sphynx
1 parent 59064b1 commit 5717745

File tree

5 files changed

+158
-142
lines changed

5 files changed

+158
-142
lines changed

bayesflow/amortizers.py

Lines changed: 7 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -36,9 +36,9 @@ class AmortizedTarget(ABC):
3636
"""An abstract interface for an amortized learned distribution. Children should
3737
implement the following public methods:
3838
39-
- compute_loss(self, input_dict, **kwargs)
40-
- sample(input_dict, **kwargs)
41-
- log_prob(input_dict, **kwargs)
39+
1. ``compute_loss(self, input_dict, **kwargs)``
40+
2. ``sample(input_dict, **kwargs)``
41+
3. ``log_prob(input_dict, **kwargs)``
4242
"""
4343

4444
@abstractmethod
@@ -77,6 +77,8 @@ class AmortizedPosterior(tf.keras.Model, AmortizedTarget):
7777
[3] Jaini, P., Kobyzev, I., Yu, Y., & Brubaker, M. (2020, November).
7878
Tails of lipschitz triangular flows.
7979
In International Conference on Machine Learning (pp. 4673-4681). PMLR.
80+
81+
Serves as in interface for learning ``p(parameters | data, context).``
8082
"""
8183

8284
def __init__(self, inference_net, summary_net=None, latent_dist=None, latent_is_dynamic=False,
@@ -410,7 +412,7 @@ def _determine_summary_loss(self, loss_fun):
410412

411413
class AmortizedLikelihood(tf.keras.Model, AmortizedTarget):
412414
"""An interface for a surrogate model of a simulator, or an implicit likelihood
413-
``p(params | data, context).''
415+
``p(data | parameters, context).``
414416
"""
415417

416418
def __init__(self, surrogate_net, latent_dist=None, **kwargs):
@@ -471,7 +473,7 @@ def call_loop(self, input_list, **kwargs):
471473
-------
472474
net_out or (net_out, summary_out) : tuple of tf.Tensor
473475
the outputs of ``inference_net(theta, summary_net(x, c_s), c_d)``, usually a latent variable and
474-
log(det(Jacobian)), that is a tuple ``(z, log_det_J).
476+
log(det(Jacobian)), that is a tuple ``(z, log_det_J)``.
475477
"""
476478

477479
outputs = []

bayesflow/inference_networks.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -178,8 +178,8 @@ def forward(self, targets, condition, **kwargs):
178178

179179
@tf.function
180180
def inverse(self, z, condition, **kwargs):
181-
"""Performs a reverse pass through the chain. Assumes only used
182-
in inference mode, so **kwargs contains `training=False`."""
181+
"""Performs a reverse pass through the chain. Assumes that it is only used
182+
in inference mode, so ``**kwargs`` contains ``training=False``."""
183183

184184
# Add noise to target if using SoftFlow, use explicitly
185185
# not in call(), since methods are public

bayesflow/mcmc.py

Lines changed: 44 additions & 42 deletions
Original file line numberDiff line numberDiff line change
@@ -27,31 +27,31 @@
2727

2828
class MCMCSurrogateLikelihood:
2929
"""An interface to provide likelihood evaluation and gradient estimation of a pre-trained
30-
`AmortizedLikelihood` instance, which can be used in tandem with (HMC)-MCMC, as implemented,
31-
for instance, in `PyMC3`.
30+
``AmortizedLikelihood`` instance, which can be used in tandem with (HMC)-MCMC, as implemented,
31+
for instance, in ``PyMC3``.
3232
"""
3333

3434
@tf.function
3535
def __init__(self, amortized_likelihood, configurator=None, likelihood_postprocessor=None,
3636
grad_postprocessor=None):
37-
"""Creates in instance of the surrogate likelihood using a pre-trained `AmortizedLikelihood` instance.
37+
"""Creates in instance of the surrogate likelihood using a pre-trained ``AmortizedLikelihood`` instance.
3838
3939
Parameters
4040
----------
4141
amortized_likelihood : bayesflow.amortized_inference.AmortizedLikelihood
4242
A pre-trained (invertible) inference network which processes the outputs of the generative model.
4343
configurator : callable, optional, default: None
44-
A function that takes the input to the `log_likelihood` and `log_likelihood_grad`
44+
A function that takes the input to the ``log_likelihood`` and ``log_likelihood_grad``
4545
calls and converts them to a dictionary containing the following mandatory keys,
4646
if DEFAULT_KEYS unchanged:
47-
`observables` - the variables over which a condition density is learned (i.e., the observables)
48-
`conditions` - the conditioning variables that the directly passed to the inference network
47+
``observables`` - the variables over which a condition density is learned (i.e., the observables)
48+
``conditions`` - the conditioning variables that the directly passed to the inference network
4949
default: Return the first parameter - has to be a dicitionary with the mentioned characteristics
5050
likelihood_postprocessor : callable, optional, default: None
5151
A function that takes the likelihood for each observable as an input. Can be used for aggregation
5252
default: sum all likelihood values and return a single value.
5353
grad_postprocessor : callable, optional, default: None
54-
A function that takes the gradient for each value in `conditions` as returned by the preprocessor
54+
A function that takes the gradient for each value in ``conditions`` as returned by the preprocessor
5555
default: Leave the values unchanged.
5656
"""
5757

@@ -84,15 +84,16 @@ def log_likelihood(self, *args, **kwargs):
8484
8585
Parameters
8686
----------
87-
The parameters as expected by `configurator`. For the default configurator,
87+
The parameters as expected by ``configurator``. For the default configurator,
8888
the first parameter has to be a dictionary containing the following mandatory keys,
8989
if DEFAULT_KEYS unchanged:
90-
`observables` - the variables over which a condition density is learned (i.e., the observables)
91-
`conditions` - the conditioning variables that the directly passed to the inference network
90+
``observables`` - the variables over which a condition density is learned (i.e., the observables)
91+
``conditions`` - the conditioning variables that the directly passed to the inference network
92+
9293
Returns
9394
-------
94-
out :
95-
The output as returned by `likelihood_postprocessor`. For the default postprocessor,
95+
out : np.ndarray
96+
The output as returned by ``likelihood_postprocessor``. For the default postprocessor,
9697
this is the total log-likelihood given by the sum of all log-likelihood values.
9798
"""
9899

@@ -103,21 +104,22 @@ def log_likelihood(self, *args, **kwargs):
103104

104105
def log_likelihood_grad(self, *args, **kwargs):
105106
"""Calculates the gradient of the surrogate likelihood with respect to
106-
every parameter in `conditions`.
107+
every parameter in ``conditions``.
107108
108109
Parameters
109110
----------
110-
The parameters as expected by `configurator`. For the default configurator,
111+
The parameters as expected by ``configurator``. For the default configurator,
111112
the first parameter has to be a dictionary containing the following mandatory keys,
112-
if DEFAULT_KEYS unchanged:
113-
`observables` - the variables over which a condition density is learned (i.e., the observables)
114-
`conditions` - the conditioning variables that the directly passed to the inference network
113+
if ``DEFAULT_KEYS`` unchanged:
114+
``observables`` - the variables over which a condition density is learned (i.e., the observables)
115+
``conditions`` - the conditioning variables that the directly passed to the inference network
116+
115117
Returns
116118
-------
117-
out :
118-
The output as returned by `grad_postprocessor`. For the default postprocessor,
119-
this is an array containing the derivative with respect to each value in `conditions`
120-
as returned by `configurator`.
119+
out : np.ndarray
120+
The output as returned by ``grad_postprocessor``. For the default postprocessor,
121+
this is an array containing the derivative with respect to each value in ``conditions``
122+
as returned by ``configurator``.
121123
"""
122124

123125
input_dict = self.configurator(*args, **kwargs)
@@ -130,14 +132,14 @@ def log_likelihood_grad(self, *args, **kwargs):
130132

131133
@tf.function
132134
def _log_likelihood_grad(self, input_dict, **kwargs):
133-
"""Calculates the gradient with respect to every parameter contained in `input_dict`.
135+
"""Calculates the gradient with respect to every parameter contained in ``input_dict``.
134136
135137
Parameters
136138
----------
137139
input_dict : dict
138140
Input dictionary containing the following mandatory keys, if DEFAULT_KEYS unchanged:
139-
`observables` - tf.constant: the variables over which a condition density is learned (i.e., the observables)
140-
`conditions` - tf.Variable: the conditioning variables that the directly passed to the inference network
141+
``observables`` - tf.constant: the variables over which a condition density is learned (i.e., the observables)
142+
``conditions`` - tf.Variable: the conditioning variables that the directly passed to the inference network
141143
142144
Returns
143145
-------
@@ -155,7 +157,7 @@ class _LogLikGrad(at.Op):
155157
"""Custom log-likelihood operator, based on:
156158
https://www.pymc.io/projects/examples/en/latest/case_studies/blackbox_external_likelihood_numpy.html#aesara-op-with-grad
157159
158-
This Op will execute the `log_lik_grad` function, supplying the gradient
160+
This Op will execute the ``log_lik_grad`` function, supplying the gradient
159161
for the custom surrogate likelihood function.
160162
"""
161163

@@ -169,7 +171,7 @@ def __init__(self, log_lik_grad, observables, default_type=np.float64):
169171
----------
170172
log_lik_grad : callable
171173
The object that provides the gradient of the log-likelihood.
172-
observables : tf.constant, the shape depends on `log_lik_grad`.
174+
observables : tf.constant, the shape depends on ``log_lik_grad``.
173175
The variables over which a condition density is learned (i.e., the observables).
174176
default_type : np.dtype, optional, default: np.float64
175177
The default float type to use for the gradient vectors.
@@ -180,15 +182,15 @@ def __init__(self, log_lik_grad, observables, default_type=np.float64):
180182
self.default_type = default_type
181183

182184
def perform(self, node, inputs, outputs):
183-
"""Computes gradients with respect to `inputs` (corresponding to the parameters of a model).
185+
"""Computes gradients with respect to ``inputs`` (corresponding to the parameters of a model).
184186
185187
Parameters
186188
----------
187-
node : The symbolic `aesara.graph.basic.Apply` node that represents this computation.
189+
node : The symbolic ``aesara.graph.basic.Apply`` node that represents this computation.
188190
inputs : Immutable sequence of non-symbolic/numeric inputs. These are the values of each
189-
`Variable` in `node.inputs`.
191+
``Variable`` in ``node.inputs``.
190192
outputs : List of mutable single-element lists (do not change the length of these lists).
191-
Each sub-list corresponds to value of each `Variable` in `node.outputs`.
193+
Each sub-list corresponds to value of each ``Variable`` in ``node.outputs``.
192194
The primary purpose of this method is to set the values of these sub-lists.
193195
"""
194196

@@ -208,28 +210,28 @@ class PyMCSurrogateLikelihood(at.Op, MCMCSurrogateLikelihood):
208210

209211
def __init__(self, amortized_likelihood, observables, configurator=None, likelihood_postprocessor=None,
210212
grad_postprocessor=None, default_pymc_type=np.float64, default_tf_type=np.float32):
211-
"""A custom surrogate likelihood function for integration with `PyMC3`, to be used with pymc.Potential
213+
"""A custom surrogate likelihood function for integration with ``PyMC3``, to be used with pymc.Potential
212214
213215
Parameters
214216
----------
215217
amortized_likelihood : bayesflow.amortized_inference.AmortizedLikelihood
216218
A pre-trained (invertible) inference network which processes the outputs of the generative model.
217219
observables :
218220
The "observed" data that will be passed to the configurator.
219-
For the default `configurator`, an np.array of shape (N, x_dim).
221+
For the default ``configurator``, an np.array of shape (N, x_dim).
220222
configurator : callable, optional, default None
221223
A function that takes the input to the log_likelihood and log_likelihood_grad
222224
calls and converts it to a dictionary containing the following mandatory keys,
223225
if DEFAULT_KEYS unchanged:
224-
`observables` - the variables over which a condition density is learned (i.e., the observables)
225-
`conditions` - the conditioning variables that the directly passed to the inference network
226-
default behavior: convert `observables` to shape (1, N, x_dim),
226+
``observables`` - the variables over which a condition density is learned (i.e., the observables)
227+
``conditions`` - the conditioning variables that the directly passed to the inference network
228+
default behavior: convert ``observables`` to shape (1, N, x_dim),
227229
expand parameters of shape (cond_dim) to shape (1, N, cond_dim)
228230
likelihood_postprocessor : callable, optional, default: None
229231
A function that takes the likelihood for each observable, can be used for aggregation
230232
default behavior: sum all likelihoods and return a single value
231233
grad_postprocessor : callable, optional, default: None
232-
A function that takes the gradient for each value in `conditions` as returned by the preprocessor
234+
A function that takes the gradient for each value in ``conditions`` as returned by the preprocessor
233235
default behavior: Reduce shape from (1, N, cond_dim) to (cond_dim) by summing the corresponding values
234236
default_pymc_type : np.dtype, optional, default: np.float64
235237
The default float type to use for numpy arrays as required by PyMC.
@@ -265,15 +267,15 @@ def _default_grad_postprocessor(self, grads):
265267
return tf.reduce_sum(grads[0], axis=0)
266268

267269
def perform(self, node, inputs, outputs):
268-
"""Computes the log-likelihood of `inputs` (typically the parameter vector of a model).
270+
"""Computes the log-likelihood of ``inputs`` (typically the parameter vector of a model).
269271
270272
Parameters
271273
----------
272-
node : The symbolic `aesara.graph.basic.Apply` node that represents this computation.
274+
node : The symbolic ``aesara.graph.basic.Apply`` node that represents this computation.
273275
inputs : Immutable sequence of non-symbolic/numeric inputs. These are the values of each
274-
`Variable` in `node.inputs`.
276+
``Variable`` in ``node.inputs``.
275277
outputs : List of mutable single-element lists (do not change the length of these lists).
276-
Each sub-list corresponds to value of each `Variable` in `node.outputs`.
278+
Each sub-list corresponds to value of each ``Variable`` in ``node.outputs``.
277279
The primary purpose of this method is to set the values of these sub-lists.
278280
"""
279281

@@ -282,7 +284,7 @@ def perform(self, node, inputs, outputs):
282284
outputs[0][0] = np.array(logl, dtype=self.default_pymc_type)
283285

284286
def grad(self, inputs, output_grads):
285-
"""Aggregates gradients with respect to `inputs` (typically the parameter vector)
287+
"""Aggregates gradients with respect to ``inputs`` (typically the parameter vector)
286288
287289
Parameters
288290
----------
@@ -291,7 +293,7 @@ def grad(self, inputs, output_grads):
291293
292294
Returns
293295
-------
294-
grads : The gradients with respect to each `Variable` in `inputs`.
296+
grads : The gradients with respect to each ``Variable`` in ``inputs``.
295297
"""
296298

297299
# the method that calculates the gradients - it actually returns the

0 commit comments

Comments
 (0)