Skip to content

Commit 9f653a6

Browse files
Use rst code-block over code directive (#7882)
* use rst code-block * Update pymc/distributions/multivariate.py Co-authored-by: Ricardo Vieira <[email protected]> * run pre-commit --------- Co-authored-by: Ricardo Vieira <[email protected]>
1 parent 0960323 commit 9f653a6

File tree

12 files changed

+91
-82
lines changed

12 files changed

+91
-82
lines changed

docs/source/contributing/developer_guide.md

Lines changed: 6 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -245,7 +245,7 @@ usually created in order to optimise performance. But getting a
245245
possible (see also in
246246
{ref}`doc <pymc_overview##Transformed-distributions-and-changes-of-variables>`):
247247
248-
.. code:: python
248+
.. code-block:: python
249249
250250
251251
lognorm = Exp().apply(pm.Normal.dist(0., 1.))
@@ -262,15 +262,15 @@ Now, back to ``model.RV(...)`` - things returned from ``model.RV(...)``
262262
are PyTensor tensor variables, and it is clear from looking at
263263
``TransformedRV``:
264264
265-
.. code:: python
265+
.. code-block:: python
266266
267267
class TransformedRV(TensorVariable):
268268
...
269269
270270
as for ``FreeRV`` and ``ObservedRV``, they are ``TensorVariable``\s with
271271
``Factor`` as mixin:
272272
273-
.. code:: python
273+
.. code-block:: python
274274
275275
class FreeRV(Factor, TensorVariable):
276276
...
@@ -283,7 +283,7 @@ distribution into a ``TransformedDistribution``, and then ``model.Var`` is
283283
called again to added the RV associated with the
284284
``TransformedDistribution`` as a ``FreeRV``:
285285
286-
.. code:: python
286+
.. code-block:: python
287287
288288
...
289289
self.transformed = model.Var(
@@ -295,13 +295,13 @@ only add one ``FreeRV``. In another word, you *cannot* do chain
295295
transformation by nested applying multiple transforms to a Distribution
296296
(however, you can use ``Chain`` transformation.
297297
298-
.. code:: python
298+
.. code-block:: python
299299
300300
z = pm.LogNormal.dist(mu=0., sigma=1., transform=tr.Log)
301301
z.transform # ==> pymc.distributions.transforms.Log
302302
303303
304-
.. code:: python
304+
.. code-block:: python
305305
306306
z2 = Exp().apply(z)
307307
z2.transform is None # ==> True

pymc/distributions/discrete.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1357,7 +1357,7 @@ class OrderedProbit:
13571357
13581358
Examples
13591359
--------
1360-
.. code:: python
1360+
.. code-block:: python
13611361
13621362
# Generate data for a simple 1 dimensional example problem
13631363
n1_c = 300; n2_c = 300; n3_c = 300

pymc/distributions/multivariate.py

Lines changed: 33 additions & 33 deletions
Original file line numberDiff line numberDiff line change
@@ -1390,7 +1390,7 @@ class LKJCholeskyCov:
13901390
13911391
Examples
13921392
--------
1393-
.. code:: python
1393+
.. code-block:: python
13941394
13951395
with pm.Model() as model:
13961396
# Note that we access the distribution for the standard
@@ -1682,28 +1682,25 @@ class LKJCorr:
16821682
16831683
Examples
16841684
--------
1685-
.. code:: python
1685+
.. code-block:: python
16861686
16871687
with pm.Model() as model:
1688-
16891688
# Define the vector of fixed standard deviations
1690-
sds = 3*np.ones(10)
1689+
sds = 3 * np.ones(10)
16911690
1692-
corr = pm.LKJCorr(
1693-
'corr', eta=4, n=10, return_matrix=True
1694-
)
1691+
corr = pm.LKJCorr("corr", eta=4, n=10, return_matrix=True)
16951692
16961693
# Define a new MvNormal with the given correlation matrix
1697-
vals = sds*pm.MvNormal('vals', mu=np.zeros(10), cov=corr, shape=10)
1694+
vals = sds * pm.MvNormal("vals", mu=np.zeros(10), cov=corr, shape=10)
16981695
16991696
# Or transform an uncorrelated normal distribution:
1700-
vals_raw = pm.Normal('vals_raw', shape=10)
1697+
vals_raw = pm.Normal("vals_raw", shape=10)
17011698
chol = pt.linalg.cholesky(corr)
1702-
vals = sds*pt.dot(chol,vals_raw)
1699+
vals = sds * pt.dot(chol, vals_raw)
17031700
17041701
# The matrix is internally still sampled as a upper triangular vector
17051702
# If you want access to it in matrix form in the trace, add
1706-
pm.Deterministic('corr_mat', corr)
1703+
pm.Deterministic("corr_mat", corr)
17071704
17081705
17091706
References
@@ -1797,7 +1794,7 @@ class MatrixNormal(Continuous):
17971794
Define a matrixvariate normal variable for given row and column covariance
17981795
matrices.
17991796
1800-
.. code:: python
1797+
.. code-block:: python
18011798
18021799
import pymc as pm
18031800
import numpy as np
@@ -1820,16 +1817,20 @@ class MatrixNormal(Continuous):
18201817
constant, both the covariance and scaling could be learned as follows
18211818
(see the docstring of `LKJCholeskyCov` for more information about this)
18221819
1823-
.. code:: python
1820+
.. code-block:: python
18241821
18251822
# Setup data
1826-
true_colcov = np.array([[1.0, 0.5, 0.1],
1827-
[0.5, 1.0, 0.2],
1828-
[0.1, 0.2, 1.0]])
1823+
true_colcov = np.array(
1824+
[
1825+
[1.0, 0.5, 0.1],
1826+
[0.5, 1.0, 0.2],
1827+
[0.1, 0.2, 1.0],
1828+
]
1829+
)
18291830
m = 3
18301831
n = true_colcov.shape[0]
18311832
true_scale = 3
1832-
true_rowcov = np.diag([true_scale**(2*i) for i in range(m)])
1833+
true_rowcov = np.diag([true_scale ** (2 * i) for i in range(m)])
18331834
mu = np.zeros((m, n))
18341835
true_kron = np.kron(true_rowcov, true_colcov)
18351836
data = np.random.multivariate_normal(mu.flatten(), true_kron)
@@ -1838,13 +1839,12 @@ class MatrixNormal(Continuous):
18381839
with pm.Model() as model:
18391840
# Setup right cholesky matrix
18401841
sd_dist = pm.HalfCauchy.dist(beta=2.5, shape=3)
1841-
colchol,_,_ = pm.LKJCholeskyCov('colchol', n=3, eta=2,sd_dist=sd_dist)
1842+
colchol, _, _ = pm.LKJCholeskyCov("colchol", n=3, eta=2, sd_dist=sd_dist)
18421843
# Setup left covariance matrix
1843-
scale = pm.LogNormal('scale', mu=np.log(true_scale), sigma=0.5)
1844-
rowcov = pt.diag([scale**(2*i) for i in range(m)])
1844+
scale = pm.LogNormal("scale", mu=np.log(true_scale), sigma=0.5)
1845+
rowcov = pt.diag([scale ** (2 * i) for i in range(m)])
18451846
1846-
vals = pm.MatrixNormal('vals', mu=mu, colchol=colchol, rowcov=rowcov,
1847-
observed=data)
1847+
vals = pm.MatrixNormal("vals", mu=mu, colchol=colchol, rowcov=rowcov, observed=data)
18481848
"""
18491849

18501850
rv_op = matrixnormal
@@ -2010,30 +2010,30 @@ class KroneckerNormal(Continuous):
20102010
Define a multivariate normal variable with a covariance
20112011
:math:`K = K_1 \otimes K_2`
20122012
2013-
.. code:: python
2013+
.. code-block:: python
20142014
2015-
K1 = np.array([[1., 0.5], [0.5, 2]])
2016-
K2 = np.array([[1., 0.4, 0.2], [0.4, 2, 0.3], [0.2, 0.3, 1]])
2015+
K1 = np.array([[1.0, 0.5], [0.5, 2]])
2016+
K2 = np.array([[1.0, 0.4, 0.2], [0.4, 2, 0.3], [0.2, 0.3, 1]])
20172017
covs = [K1, K2]
20182018
N = 6
20192019
mu = np.zeros(N)
20202020
with pm.Model() as model:
2021-
vals = pm.KroneckerNormal('vals', mu=mu, covs=covs, shape=N)
2021+
vals = pm.KroneckerNormal("vals", mu=mu, covs=covs, shape=N)
20222022
20232023
Efficiency gains are made by cholesky decomposing :math:`K_1` and
20242024
:math:`K_2` individually rather than the larger :math:`K` matrix. Although
20252025
only two matrices :math:`K_1` and :math:`K_2` are shown here, an arbitrary
20262026
number of submatrices can be combined in this way. Choleskys and
20272027
eigendecompositions can be provided instead
20282028
2029-
.. code:: python
2029+
.. code-block:: python
20302030
20312031
chols = [np.linalg.cholesky(Ki) for Ki in covs]
20322032
evds = [np.linalg.eigh(Ki) for Ki in covs]
20332033
with pm.Model() as model:
2034-
vals2 = pm.KroneckerNormal('vals2', mu=mu, chols=chols, shape=N)
2034+
vals2 = pm.KroneckerNormal("vals2", mu=mu, chols=chols, shape=N)
20352035
# or
2036-
vals3 = pm.KroneckerNormal('vals3', mu=mu, evds=evds, shape=N)
2036+
vals3 = pm.KroneckerNormal("vals3", mu=mu, evds=evds, shape=N)
20372037
20382038
neither of which will be converted. Diagonal noise can also be added to
20392039
the covariance matrix, :math:`K = K_1 \otimes K_2 + \sigma^2 I_N`.
@@ -2042,13 +2042,13 @@ class KroneckerNormal(Continuous):
20422042
utilizing eigendecompositons of the submatrices behind the scenes [1].
20432043
Thus,
20442044
2045-
.. code:: python
2045+
.. code-block:: python
20462046
20472047
sigma = 0.1
20482048
with pm.Model() as noise_model:
2049-
vals = pm.KroneckerNormal('vals', mu=mu, covs=covs, sigma=sigma, shape=N)
2050-
vals2 = pm.KroneckerNormal('vals2', mu=mu, chols=chols, sigma=sigma, shape=N)
2051-
vals3 = pm.KroneckerNormal('vals3', mu=mu, evds=evds, sigma=sigma, shape=N)
2049+
vals = pm.KroneckerNormal("vals", mu=mu, covs=covs, sigma=sigma, shape=N)
2050+
vals2 = pm.KroneckerNormal("vals2", mu=mu, chols=chols, sigma=sigma, shape=N)
2051+
vals3 = pm.KroneckerNormal("vals3", mu=mu, evds=evds, sigma=sigma, shape=N)
20522052
20532053
are identical, with `covs` and `chols` each converted to
20542054
eigendecompositions.

pymc/gp/cov.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -956,7 +956,7 @@ class WrappedPeriodic(Covariance):
956956
In order to construct a kernel equivalent to the `Periodic` kernel you
957957
can do the following (though using `Periodic` will likely be a bit faster):
958958
959-
.. code:: python
959+
.. code-block:: python
960960
961961
exp_quad = pm.gp.cov.ExpQuad(1, ls=0.5)
962962
cov = pm.gp.cov.WrappedPeriodic(exp_quad, period=5)

pymc/gp/gp.py

Lines changed: 8 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -98,7 +98,7 @@ class Latent(Base):
9898
9999
Examples
100100
--------
101-
.. code:: python
101+
.. code-block:: python
102102
103103
# A one dimensional column vector of inputs.
104104
X = np.linspace(0, 1, 10)[:, None]
@@ -420,7 +420,7 @@ class Marginal(Base):
420420
421421
Examples
422422
--------
423-
.. code:: python
423+
.. code-block:: python
424424
425425
# A one dimensional column vector of inputs.
426426
X = np.linspace(0, 1, 10)[:, None]
@@ -691,7 +691,7 @@ class MarginalApprox(Marginal):
691691
692692
Examples
693693
--------
694-
.. code:: python
694+
.. code-block:: python
695695
696696
# A one dimensional column vector of inputs.
697697
X = np.linspace(0, 1, 10)[:, None]
@@ -931,7 +931,7 @@ class LatentKron(Base):
931931
932932
Examples
933933
--------
934-
.. code:: python
934+
.. code-block:: python
935935
936936
# One dimensional column vectors of inputs
937937
X1 = np.linspace(0, 1, 10)[:, None]
@@ -1034,7 +1034,7 @@ def conditional(self, name, Xnew, jitter=JITTER_DEFAULT, **kwargs):
10341034
1, and 4, respectively, then `Xnew` must have 7 columns and a
10351035
covariance between the prediction points
10361036
1037-
.. code:: python
1037+
.. code-block:: python
10381038
10391039
cov_func(Xnew) = cov_func1(Xnew[:, :2]) * cov_func1(Xnew[:, 2:3]) * cov_func1(Xnew[:, 3:])
10401040
@@ -1086,13 +1086,13 @@ class MarginalKron(Base):
10861086
10871087
Examples
10881088
--------
1089-
.. code:: python
1089+
.. code-block:: python
10901090
10911091
# One dimensional column vectors of inputs
10921092
X1 = np.linspace(0, 1, 10)[:, None]
10931093
X2 = np.linspace(0, 2, 5)[:, None]
10941094
Xs = [X1, X2]
1095-
y = np.random.randn(len(X1)*len(X2)) # toy data
1095+
y = np.random.randn(len(X1) * len(X2)) # toy data
10961096
with pm.Model() as model:
10971097
# Specify the covariance functions for each Xi
10981098
cov_func1 = pm.gp.cov.ExpQuad(1, ls=0.1) # Must accept X1 without error
@@ -1234,7 +1234,7 @@ def conditional(self, name, Xnew, pred_noise=False, diag=False, **kwargs):
12341234
1, and 4, respectively, then `Xnew` must have 7 columns and a
12351235
covariance between the prediction points
12361236
1237-
.. code:: python
1237+
.. code-block:: python
12381238
12391239
cov_func(Xnew) = cov_func1(Xnew[:, :2]) * cov_func1(Xnew[:, 2:3]) * cov_func1(Xnew[:, 3:])
12401240

pymc/gp/hsgp_approx.py

Lines changed: 5 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -215,7 +215,7 @@ class HSGP(Base):
215215
216216
Examples
217217
--------
218-
.. code:: python
218+
.. code-block:: python
219219
220220
# A three dimensional column vector of inputs.
221221
X = np.random.rand(100, 3)
@@ -357,7 +357,7 @@ def prior_linearized(self, X: TensorLike):
357357
358358
Examples
359359
--------
360-
.. code:: python
360+
.. code-block:: python
361361
362362
# A one dimensional column vector of inputs.
363363
X = np.linspace(0, 10, 100)[:, None]
@@ -544,7 +544,7 @@ class HSGPPeriodic(Base):
544544
545545
Examples
546546
--------
547-
.. code:: python
547+
.. code-block:: python
548548
549549
# A three dimensional column vector of inputs.
550550
X = np.random.rand(100, 3)
@@ -640,7 +640,7 @@ def prior_linearized(self, X: TensorLike):
640640
641641
Examples
642642
--------
643-
.. code:: python
643+
.. code-block:: python
644644
645645
# A one dimensional column vector of inputs.
646646
X = np.linspace(0, 10, 100)[:, None]
@@ -665,8 +665,7 @@ def prior_linearized(self, X: TensorLike):
665665
666666
# The (non-centered) GP approximation is given by
667667
f = pm.Deterministic(
668-
"f",
669-
phi_cos @ (psd * beta[:m]) + phi_sin[..., 1:] @ (psd[1:] * beta[m:])
668+
"f", phi_cos @ (psd * beta[:m]) + phi_sin[..., 1:] @ (psd[1:] * beta[m:])
670669
)
671670
...
672671

0 commit comments

Comments
 (0)