@@ -1390,7 +1390,7 @@ class LKJCholeskyCov:
13901390
13911391 Examples
13921392 --------
1393- .. code:: python
1393+ .. code-block :: python
13941394
13951395 with pm.Model() as model:
13961396 # Note that we access the distribution for the standard
@@ -1682,28 +1682,25 @@ class LKJCorr:
16821682
16831683 Examples
16841684 --------
1685- .. code:: python
1685+ .. code-block :: python
16861686
16871687 with pm.Model() as model:
1688-
16891688 # Define the vector of fixed standard deviations
1690- sds = 3* np.ones(10)
1689+ sds = 3 * np.ones(10)
16911690
1692- corr = pm.LKJCorr(
1693- 'corr', eta=4, n=10, return_matrix=True
1694- )
1691+ corr = pm.LKJCorr("corr", eta=4, n=10, return_matrix=True)
16951692
16961693 # Define a new MvNormal with the given correlation matrix
1697- vals = sds* pm.MvNormal(' vals' , mu=np.zeros(10), cov=corr, shape=10)
1694+ vals = sds * pm.MvNormal(" vals" , mu=np.zeros(10), cov=corr, shape=10)
16981695
16991696 # Or transform an uncorrelated normal distribution:
1700- vals_raw = pm.Normal(' vals_raw' , shape=10)
1697+ vals_raw = pm.Normal(" vals_raw" , shape=10)
17011698 chol = pt.linalg.cholesky(corr)
1702- vals = sds* pt.dot(chol,vals_raw)
1699+ vals = sds * pt.dot(chol, vals_raw)
17031700
17041701 # The matrix is internally still sampled as a upper triangular vector
17051702 # If you want access to it in matrix form in the trace, add
1706- pm.Deterministic(' corr_mat' , corr)
1703+ pm.Deterministic(" corr_mat" , corr)
17071704
17081705
17091706 References
@@ -1797,7 +1794,7 @@ class MatrixNormal(Continuous):
17971794 Define a matrixvariate normal variable for given row and column covariance
17981795 matrices.
17991796
1800- .. code:: python
1797+ .. code-block :: python
18011798
18021799 import pymc as pm
18031800 import numpy as np
@@ -1820,16 +1817,14 @@ class MatrixNormal(Continuous):
18201817 constant, both the covariance and scaling could be learned as follows
18211818 (see the docstring of `LKJCholeskyCov` for more information about this)
18221819
1823- .. code:: python
1820+ .. code-block :: python
18241821
18251822 # Setup data
1826- true_colcov = np.array([[1.0, 0.5, 0.1],
1827- [0.5, 1.0, 0.2],
1828- [0.1, 0.2, 1.0]])
1823+ true_colcov = np.array([[1.0, 0.5, 0.1], [0.5, 1.0, 0.2], [0.1, 0.2, 1.0]])
18291824 m = 3
18301825 n = true_colcov.shape[0]
18311826 true_scale = 3
1832- true_rowcov = np.diag([true_scale**(2* i) for i in range(m)])
1827+ true_rowcov = np.diag([true_scale ** (2 * i) for i in range(m)])
18331828 mu = np.zeros((m, n))
18341829 true_kron = np.kron(true_rowcov, true_colcov)
18351830 data = np.random.multivariate_normal(mu.flatten(), true_kron)
@@ -1838,13 +1833,12 @@ class MatrixNormal(Continuous):
18381833 with pm.Model() as model:
18391834 # Setup right cholesky matrix
18401835 sd_dist = pm.HalfCauchy.dist(beta=2.5, shape=3)
1841- colchol,_, _ = pm.LKJCholeskyCov(' colchol' , n=3, eta=2,sd_dist=sd_dist)
1836+ colchol, _, _ = pm.LKJCholeskyCov(" colchol" , n=3, eta=2, sd_dist=sd_dist)
18421837 # Setup left covariance matrix
1843- scale = pm.LogNormal(' scale' , mu=np.log(true_scale), sigma=0.5)
1844- rowcov = pt.diag([scale**(2* i) for i in range(m)])
1838+ scale = pm.LogNormal(" scale" , mu=np.log(true_scale), sigma=0.5)
1839+ rowcov = pt.diag([scale ** (2 * i) for i in range(m)])
18451840
1846- vals = pm.MatrixNormal('vals', mu=mu, colchol=colchol, rowcov=rowcov,
1847- observed=data)
1841+ vals = pm.MatrixNormal("vals", mu=mu, colchol=colchol, rowcov=rowcov, observed=data)
18481842 """
18491843
18501844 rv_op = matrixnormal
@@ -2010,30 +2004,30 @@ class KroneckerNormal(Continuous):
20102004 Define a multivariate normal variable with a covariance
20112005 :math:`K = K_1 \otimes K_2`
20122006
2013- .. code:: python
2007+ .. code-block :: python
20142008
2015- K1 = np.array([[1., 0.5], [0.5, 2]])
2016- K2 = np.array([[1., 0.4, 0.2], [0.4, 2, 0.3], [0.2, 0.3, 1]])
2009+ K1 = np.array([[1.0 , 0.5], [0.5, 2]])
2010+ K2 = np.array([[1.0 , 0.4, 0.2], [0.4, 2, 0.3], [0.2, 0.3, 1]])
20172011 covs = [K1, K2]
20182012 N = 6
20192013 mu = np.zeros(N)
20202014 with pm.Model() as model:
2021- vals = pm.KroneckerNormal(' vals' , mu=mu, covs=covs, shape=N)
2015+ vals = pm.KroneckerNormal(" vals" , mu=mu, covs=covs, shape=N)
20222016
20232017 Efficiency gains are made by cholesky decomposing :math:`K_1` and
20242018 :math:`K_2` individually rather than the larger :math:`K` matrix. Although
20252019 only two matrices :math:`K_1` and :math:`K_2` are shown here, an arbitrary
20262020 number of submatrices can be combined in this way. Choleskys and
20272021 eigendecompositions can be provided instead
20282022
2029- .. code:: python
2023+ .. code-block :: python
20302024
20312025 chols = [np.linalg.cholesky(Ki) for Ki in covs]
20322026 evds = [np.linalg.eigh(Ki) for Ki in covs]
20332027 with pm.Model() as model:
2034- vals2 = pm.KroneckerNormal(' vals2' , mu=mu, chols=chols, shape=N)
2028+ vals2 = pm.KroneckerNormal(" vals2" , mu=mu, chols=chols, shape=N)
20352029 # or
2036- vals3 = pm.KroneckerNormal(' vals3' , mu=mu, evds=evds, shape=N)
2030+ vals3 = pm.KroneckerNormal(" vals3" , mu=mu, evds=evds, shape=N)
20372031
20382032 neither of which will be converted. Diagonal noise can also be added to
20392033 the covariance matrix, :math:`K = K_1 \otimes K_2 + \sigma^2 I_N`.
@@ -2042,13 +2036,13 @@ class KroneckerNormal(Continuous):
20422036 utilizing eigendecompositons of the submatrices behind the scenes [1].
20432037 Thus,
20442038
2045- .. code:: python
2039+ .. code-block :: python
20462040
20472041 sigma = 0.1
20482042 with pm.Model() as noise_model:
2049- vals = pm.KroneckerNormal(' vals' , mu=mu, covs=covs, sigma=sigma, shape=N)
2050- vals2 = pm.KroneckerNormal(' vals2' , mu=mu, chols=chols, sigma=sigma, shape=N)
2051- vals3 = pm.KroneckerNormal(' vals3' , mu=mu, evds=evds, sigma=sigma, shape=N)
2043+ vals = pm.KroneckerNormal(" vals" , mu=mu, covs=covs, sigma=sigma, shape=N)
2044+ vals2 = pm.KroneckerNormal(" vals2" , mu=mu, chols=chols, sigma=sigma, shape=N)
2045+ vals3 = pm.KroneckerNormal(" vals3" , mu=mu, evds=evds, sigma=sigma, shape=N)
20522046
20532047 are identical, with `covs` and `chols` each converted to
20542048 eigendecompositions.
0 commit comments