Skip to content

Commit 53cdb86

Browse files
committed
use rst code-block
1 parent 011fb35 commit 53cdb86

File tree

12 files changed

+85
-82
lines changed

12 files changed

+85
-82
lines changed

docs/source/contributing/developer_guide.md

Lines changed: 6 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -245,7 +245,7 @@ usually created in order to optimise performance. But getting a
245245
possible (see also in
246246
{ref}`doc <pymc_overview##Transformed-distributions-and-changes-of-variables>`):
247247
248-
.. code:: python
248+
.. code-block:: python
249249
250250
251251
lognorm = Exp().apply(pm.Normal.dist(0., 1.))
@@ -262,15 +262,15 @@ Now, back to ``model.RV(...)`` - things returned from ``model.RV(...)``
262262
are PyTensor tensor variables, and it is clear from looking at
263263
``TransformedRV``:
264264
265-
.. code:: python
265+
.. code-block:: python
266266
267267
class TransformedRV(TensorVariable):
268268
...
269269
270270
as for ``FreeRV`` and ``ObservedRV``, they are ``TensorVariable``\s with
271271
``Factor`` as mixin:
272272
273-
.. code:: python
273+
.. code-block:: python
274274
275275
class FreeRV(Factor, TensorVariable):
276276
...
@@ -283,7 +283,7 @@ distribution into a ``TransformedDistribution``, and then ``model.Var`` is
283283
called again to added the RV associated with the
284284
``TransformedDistribution`` as a ``FreeRV``:
285285
286-
.. code:: python
286+
.. code-block:: python
287287
288288
...
289289
self.transformed = model.Var(
@@ -295,13 +295,13 @@ only add one ``FreeRV``. In another word, you *cannot* do chain
295295
transformation by nested applying multiple transforms to a Distribution
296296
(however, you can use ``Chain`` transformation.
297297
298-
.. code:: python
298+
.. code-block:: python
299299
300300
z = pm.LogNormal.dist(mu=0., sigma=1., transform=tr.Log)
301301
z.transform # ==> pymc.distributions.transforms.Log
302302
303303
304-
.. code:: python
304+
.. code-block:: python
305305
306306
z2 = Exp().apply(z)
307307
z2.transform is None # ==> True

pymc/distributions/discrete.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1357,7 +1357,7 @@ class OrderedProbit:
13571357
13581358
Examples
13591359
--------
1360-
.. code:: python
1360+
.. code-block:: python
13611361
13621362
# Generate data for a simple 1 dimensional example problem
13631363
n1_c = 300; n2_c = 300; n3_c = 300

pymc/distributions/multivariate.py

Lines changed: 27 additions & 33 deletions
Original file line numberDiff line numberDiff line change
@@ -1390,7 +1390,7 @@ class LKJCholeskyCov:
13901390
13911391
Examples
13921392
--------
1393-
.. code:: python
1393+
.. code-block:: python
13941394
13951395
with pm.Model() as model:
13961396
# Note that we access the distribution for the standard
@@ -1682,28 +1682,25 @@ class LKJCorr:
16821682
16831683
Examples
16841684
--------
1685-
.. code:: python
1685+
.. code-block:: python
16861686
16871687
with pm.Model() as model:
1688-
16891688
# Define the vector of fixed standard deviations
1690-
sds = 3*np.ones(10)
1689+
sds = 3 * np.ones(10)
16911690
1692-
corr = pm.LKJCorr(
1693-
'corr', eta=4, n=10, return_matrix=True
1694-
)
1691+
corr = pm.LKJCorr("corr", eta=4, n=10, return_matrix=True)
16951692
16961693
# Define a new MvNormal with the given correlation matrix
1697-
vals = sds*pm.MvNormal('vals', mu=np.zeros(10), cov=corr, shape=10)
1694+
vals = sds * pm.MvNormal("vals", mu=np.zeros(10), cov=corr, shape=10)
16981695
16991696
# Or transform an uncorrelated normal distribution:
1700-
vals_raw = pm.Normal('vals_raw', shape=10)
1697+
vals_raw = pm.Normal("vals_raw", shape=10)
17011698
chol = pt.linalg.cholesky(corr)
1702-
vals = sds*pt.dot(chol,vals_raw)
1699+
vals = sds * pt.dot(chol, vals_raw)
17031700
17041701
# The matrix is internally still sampled as a upper triangular vector
17051702
# If you want access to it in matrix form in the trace, add
1706-
pm.Deterministic('corr_mat', corr)
1703+
pm.Deterministic("corr_mat", corr)
17071704
17081705
17091706
References
@@ -1797,7 +1794,7 @@ class MatrixNormal(Continuous):
17971794
Define a matrixvariate normal variable for given row and column covariance
17981795
matrices.
17991796
1800-
.. code:: python
1797+
.. code-block:: python
18011798
18021799
import pymc as pm
18031800
import numpy as np
@@ -1820,16 +1817,14 @@ class MatrixNormal(Continuous):
18201817
constant, both the covariance and scaling could be learned as follows
18211818
(see the docstring of `LKJCholeskyCov` for more information about this)
18221819
1823-
.. code:: python
1820+
.. code-block:: python
18241821
18251822
# Setup data
1826-
true_colcov = np.array([[1.0, 0.5, 0.1],
1827-
[0.5, 1.0, 0.2],
1828-
[0.1, 0.2, 1.0]])
1823+
true_colcov = np.array([[1.0, 0.5, 0.1], [0.5, 1.0, 0.2], [0.1, 0.2, 1.0]])
18291824
m = 3
18301825
n = true_colcov.shape[0]
18311826
true_scale = 3
1832-
true_rowcov = np.diag([true_scale**(2*i) for i in range(m)])
1827+
true_rowcov = np.diag([true_scale ** (2 * i) for i in range(m)])
18331828
mu = np.zeros((m, n))
18341829
true_kron = np.kron(true_rowcov, true_colcov)
18351830
data = np.random.multivariate_normal(mu.flatten(), true_kron)
@@ -1838,13 +1833,12 @@ class MatrixNormal(Continuous):
18381833
with pm.Model() as model:
18391834
# Setup right cholesky matrix
18401835
sd_dist = pm.HalfCauchy.dist(beta=2.5, shape=3)
1841-
colchol,_,_ = pm.LKJCholeskyCov('colchol', n=3, eta=2,sd_dist=sd_dist)
1836+
colchol, _, _ = pm.LKJCholeskyCov("colchol", n=3, eta=2, sd_dist=sd_dist)
18421837
# Setup left covariance matrix
1843-
scale = pm.LogNormal('scale', mu=np.log(true_scale), sigma=0.5)
1844-
rowcov = pt.diag([scale**(2*i) for i in range(m)])
1838+
scale = pm.LogNormal("scale", mu=np.log(true_scale), sigma=0.5)
1839+
rowcov = pt.diag([scale ** (2 * i) for i in range(m)])
18451840
1846-
vals = pm.MatrixNormal('vals', mu=mu, colchol=colchol, rowcov=rowcov,
1847-
observed=data)
1841+
vals = pm.MatrixNormal("vals", mu=mu, colchol=colchol, rowcov=rowcov, observed=data)
18481842
"""
18491843

18501844
rv_op = matrixnormal
@@ -2010,30 +2004,30 @@ class KroneckerNormal(Continuous):
20102004
Define a multivariate normal variable with a covariance
20112005
:math:`K = K_1 \otimes K_2`
20122006
2013-
.. code:: python
2007+
.. code-block:: python
20142008
2015-
K1 = np.array([[1., 0.5], [0.5, 2]])
2016-
K2 = np.array([[1., 0.4, 0.2], [0.4, 2, 0.3], [0.2, 0.3, 1]])
2009+
K1 = np.array([[1.0, 0.5], [0.5, 2]])
2010+
K2 = np.array([[1.0, 0.4, 0.2], [0.4, 2, 0.3], [0.2, 0.3, 1]])
20172011
covs = [K1, K2]
20182012
N = 6
20192013
mu = np.zeros(N)
20202014
with pm.Model() as model:
2021-
vals = pm.KroneckerNormal('vals', mu=mu, covs=covs, shape=N)
2015+
vals = pm.KroneckerNormal("vals", mu=mu, covs=covs, shape=N)
20222016
20232017
Efficiency gains are made by cholesky decomposing :math:`K_1` and
20242018
:math:`K_2` individually rather than the larger :math:`K` matrix. Although
20252019
only two matrices :math:`K_1` and :math:`K_2` are shown here, an arbitrary
20262020
number of submatrices can be combined in this way. Choleskys and
20272021
eigendecompositions can be provided instead
20282022
2029-
.. code:: python
2023+
.. code-block:: python
20302024
20312025
chols = [np.linalg.cholesky(Ki) for Ki in covs]
20322026
evds = [np.linalg.eigh(Ki) for Ki in covs]
20332027
with pm.Model() as model:
2034-
vals2 = pm.KroneckerNormal('vals2', mu=mu, chols=chols, shape=N)
2028+
vals2 = pm.KroneckerNormal("vals2", mu=mu, chols=chols, shape=N)
20352029
# or
2036-
vals3 = pm.KroneckerNormal('vals3', mu=mu, evds=evds, shape=N)
2030+
vals3 = pm.KroneckerNormal("vals3", mu=mu, evds=evds, shape=N)
20372031
20382032
neither of which will be converted. Diagonal noise can also be added to
20392033
the covariance matrix, :math:`K = K_1 \otimes K_2 + \sigma^2 I_N`.
@@ -2042,13 +2036,13 @@ class KroneckerNormal(Continuous):
20422036
utilizing eigendecompositons of the submatrices behind the scenes [1].
20432037
Thus,
20442038
2045-
.. code:: python
2039+
.. code-block:: python
20462040
20472041
sigma = 0.1
20482042
with pm.Model() as noise_model:
2049-
vals = pm.KroneckerNormal('vals', mu=mu, covs=covs, sigma=sigma, shape=N)
2050-
vals2 = pm.KroneckerNormal('vals2', mu=mu, chols=chols, sigma=sigma, shape=N)
2051-
vals3 = pm.KroneckerNormal('vals3', mu=mu, evds=evds, sigma=sigma, shape=N)
2043+
vals = pm.KroneckerNormal("vals", mu=mu, covs=covs, sigma=sigma, shape=N)
2044+
vals2 = pm.KroneckerNormal("vals2", mu=mu, chols=chols, sigma=sigma, shape=N)
2045+
vals3 = pm.KroneckerNormal("vals3", mu=mu, evds=evds, sigma=sigma, shape=N)
20522046
20532047
are identical, with `covs` and `chols` each converted to
20542048
eigendecompositions.

pymc/gp/cov.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -956,7 +956,7 @@ class WrappedPeriodic(Covariance):
956956
In order to construct a kernel equivalent to the `Periodic` kernel you
957957
can do the following (though using `Periodic` will likely be a bit faster):
958958
959-
.. code:: python
959+
.. code-block:: python
960960
961961
exp_quad = pm.gp.cov.ExpQuad(1, ls=0.5)
962962
cov = pm.gp.cov.WrappedPeriodic(exp_quad, period=5)

pymc/gp/gp.py

Lines changed: 8 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -117,7 +117,7 @@ class Latent(Base):
117117
118118
Examples
119119
--------
120-
.. code:: python
120+
.. code-block:: python
121121
122122
# A one dimensional column vector of inputs.
123123
X = np.linspace(0, 1, 10)[:, None]
@@ -439,7 +439,7 @@ class Marginal(Base):
439439
440440
Examples
441441
--------
442-
.. code:: python
442+
.. code-block:: python
443443
444444
# A one dimensional column vector of inputs.
445445
X = np.linspace(0, 1, 10)[:, None]
@@ -719,7 +719,7 @@ class MarginalApprox(Marginal):
719719
720720
Examples
721721
--------
722-
.. code:: python
722+
.. code-block:: python
723723
724724
# A one dimensional column vector of inputs.
725725
X = np.linspace(0, 1, 10)[:, None]
@@ -963,7 +963,7 @@ class LatentKron(Base):
963963
964964
Examples
965965
--------
966-
.. code:: python
966+
.. code-block:: python
967967
968968
# One dimensional column vectors of inputs
969969
X1 = np.linspace(0, 1, 10)[:, None]
@@ -1066,7 +1066,7 @@ def conditional(self, name, Xnew, jitter=JITTER_DEFAULT, **kwargs):
10661066
1, and 4, respectively, then `Xnew` must have 7 columns and a
10671067
covariance between the prediction points
10681068
1069-
.. code:: python
1069+
.. code-block:: python
10701070
10711071
cov_func(Xnew) = cov_func1(Xnew[:, :2]) * cov_func1(Xnew[:, 2:3]) * cov_func1(Xnew[:, 3:])
10721072
@@ -1118,13 +1118,13 @@ class MarginalKron(Base):
11181118
11191119
Examples
11201120
--------
1121-
.. code:: python
1121+
.. code-block:: python
11221122
11231123
# One dimensional column vectors of inputs
11241124
X1 = np.linspace(0, 1, 10)[:, None]
11251125
X2 = np.linspace(0, 2, 5)[:, None]
11261126
Xs = [X1, X2]
1127-
y = np.random.randn(len(X1)*len(X2)) # toy data
1127+
y = np.random.randn(len(X1) * len(X2)) # toy data
11281128
with pm.Model() as model:
11291129
# Specify the covariance functions for each Xi
11301130
cov_func1 = pm.gp.cov.ExpQuad(1, ls=0.1) # Must accept X1 without error
@@ -1266,7 +1266,7 @@ def conditional(self, name, Xnew, pred_noise=False, diag=False, **kwargs):
12661266
1, and 4, respectively, then `Xnew` must have 7 columns and a
12671267
covariance between the prediction points
12681268
1269-
.. code:: python
1269+
.. code-block:: python
12701270
12711271
cov_func(Xnew) = cov_func1(Xnew[:, :2]) * cov_func1(Xnew[:, 2:3]) * cov_func1(Xnew[:, 3:])
12721272

pymc/gp/hsgp_approx.py

Lines changed: 5 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -215,7 +215,7 @@ class HSGP(Base):
215215
216216
Examples
217217
--------
218-
.. code:: python
218+
.. code-block:: python
219219
220220
# A three dimensional column vector of inputs.
221221
X = np.random.rand(100, 3)
@@ -357,7 +357,7 @@ def prior_linearized(self, X: TensorLike):
357357
358358
Examples
359359
--------
360-
.. code:: python
360+
.. code-block:: python
361361
362362
# A one dimensional column vector of inputs.
363363
X = np.linspace(0, 10, 100)[:, None]
@@ -544,7 +544,7 @@ class HSGPPeriodic(Base):
544544
545545
Examples
546546
--------
547-
.. code:: python
547+
.. code-block:: python
548548
549549
# A three dimensional column vector of inputs.
550550
X = np.random.rand(100, 3)
@@ -640,7 +640,7 @@ def prior_linearized(self, X: TensorLike):
640640
641641
Examples
642642
--------
643-
.. code:: python
643+
.. code-block:: python
644644
645645
# A one dimensional column vector of inputs.
646646
X = np.linspace(0, 10, 100)[:, None]
@@ -665,8 +665,7 @@ def prior_linearized(self, X: TensorLike):
665665
666666
# The (non-centered) GP approximation is given by
667667
f = pm.Deterministic(
668-
"f",
669-
phi_cos @ (psd * beta[:m]) + phi_sin[..., 1:] @ (psd[1:] * beta[m:])
668+
"f", phi_cos @ (psd * beta[:m]) + phi_sin[..., 1:] @ (psd[1:] * beta[m:])
670669
)
671670
...
672671

0 commit comments

Comments
 (0)