@@ -1390,7 +1390,7 @@ class LKJCholeskyCov:
1390
1390
1391
1391
Examples
1392
1392
--------
1393
- .. code:: python
1393
+ .. code-block :: python
1394
1394
1395
1395
with pm.Model() as model:
1396
1396
# Note that we access the distribution for the standard
@@ -1682,28 +1682,25 @@ class LKJCorr:
1682
1682
1683
1683
Examples
1684
1684
--------
1685
- .. code:: python
1685
+ .. code-block :: python
1686
1686
1687
1687
with pm.Model() as model:
1688
-
1689
1688
# Define the vector of fixed standard deviations
1690
- sds = 3* np.ones(10)
1689
+ sds = 3 * np.ones(10)
1691
1690
1692
- corr = pm.LKJCorr(
1693
- 'corr', eta=4, n=10, return_matrix=True
1694
- )
1691
+ corr = pm.LKJCorr("corr", eta=4, n=10, return_matrix=True)
1695
1692
1696
1693
# Define a new MvNormal with the given correlation matrix
1697
- vals = sds* pm.MvNormal(' vals' , mu=np.zeros(10), cov=corr, shape=10)
1694
+ vals = sds * pm.MvNormal(" vals" , mu=np.zeros(10), cov=corr, shape=10)
1698
1695
1699
1696
# Or transform an uncorrelated normal distribution:
1700
- vals_raw = pm.Normal(' vals_raw' , shape=10)
1697
+ vals_raw = pm.Normal(" vals_raw" , shape=10)
1701
1698
chol = pt.linalg.cholesky(corr)
1702
- vals = sds* pt.dot(chol,vals_raw)
1699
+ vals = sds * pt.dot(chol, vals_raw)
1703
1700
1704
1701
# The matrix is internally still sampled as a upper triangular vector
1705
1702
# If you want access to it in matrix form in the trace, add
1706
- pm.Deterministic(' corr_mat' , corr)
1703
+ pm.Deterministic(" corr_mat" , corr)
1707
1704
1708
1705
1709
1706
References
@@ -1797,7 +1794,7 @@ class MatrixNormal(Continuous):
1797
1794
Define a matrixvariate normal variable for given row and column covariance
1798
1795
matrices.
1799
1796
1800
- .. code:: python
1797
+ .. code-block :: python
1801
1798
1802
1799
import pymc as pm
1803
1800
import numpy as np
@@ -1820,16 +1817,20 @@ class MatrixNormal(Continuous):
1820
1817
constant, both the covariance and scaling could be learned as follows
1821
1818
(see the docstring of `LKJCholeskyCov` for more information about this)
1822
1819
1823
- .. code:: python
1820
+ .. code-block :: python
1824
1821
1825
1822
# Setup data
1826
- true_colcov = np.array([[1.0, 0.5, 0.1],
1827
- [0.5, 1.0, 0.2],
1828
- [0.1, 0.2, 1.0]])
1823
+ true_colcov = np.array(
1824
+ [
1825
+ [1.0, 0.5, 0.1],
1826
+ [0.5, 1.0, 0.2],
1827
+ [0.1, 0.2, 1.0],
1828
+ ]
1829
+ )
1829
1830
m = 3
1830
1831
n = true_colcov.shape[0]
1831
1832
true_scale = 3
1832
- true_rowcov = np.diag([true_scale**(2* i) for i in range(m)])
1833
+ true_rowcov = np.diag([true_scale ** (2 * i) for i in range(m)])
1833
1834
mu = np.zeros((m, n))
1834
1835
true_kron = np.kron(true_rowcov, true_colcov)
1835
1836
data = np.random.multivariate_normal(mu.flatten(), true_kron)
@@ -1838,13 +1839,12 @@ class MatrixNormal(Continuous):
1838
1839
with pm.Model() as model:
1839
1840
# Setup right cholesky matrix
1840
1841
sd_dist = pm.HalfCauchy.dist(beta=2.5, shape=3)
1841
- colchol,_, _ = pm.LKJCholeskyCov(' colchol' , n=3, eta=2,sd_dist=sd_dist)
1842
+ colchol, _, _ = pm.LKJCholeskyCov(" colchol" , n=3, eta=2, sd_dist=sd_dist)
1842
1843
# Setup left covariance matrix
1843
- scale = pm.LogNormal(' scale' , mu=np.log(true_scale), sigma=0.5)
1844
- rowcov = pt.diag([scale**(2* i) for i in range(m)])
1844
+ scale = pm.LogNormal(" scale" , mu=np.log(true_scale), sigma=0.5)
1845
+ rowcov = pt.diag([scale ** (2 * i) for i in range(m)])
1845
1846
1846
- vals = pm.MatrixNormal('vals', mu=mu, colchol=colchol, rowcov=rowcov,
1847
- observed=data)
1847
+ vals = pm.MatrixNormal("vals", mu=mu, colchol=colchol, rowcov=rowcov, observed=data)
1848
1848
"""
1849
1849
1850
1850
rv_op = matrixnormal
@@ -2010,30 +2010,30 @@ class KroneckerNormal(Continuous):
2010
2010
Define a multivariate normal variable with a covariance
2011
2011
:math:`K = K_1 \otimes K_2`
2012
2012
2013
- .. code:: python
2013
+ .. code-block :: python
2014
2014
2015
- K1 = np.array([[1., 0.5], [0.5, 2]])
2016
- K2 = np.array([[1., 0.4, 0.2], [0.4, 2, 0.3], [0.2, 0.3, 1]])
2015
+ K1 = np.array([[1.0 , 0.5], [0.5, 2]])
2016
+ K2 = np.array([[1.0 , 0.4, 0.2], [0.4, 2, 0.3], [0.2, 0.3, 1]])
2017
2017
covs = [K1, K2]
2018
2018
N = 6
2019
2019
mu = np.zeros(N)
2020
2020
with pm.Model() as model:
2021
- vals = pm.KroneckerNormal(' vals' , mu=mu, covs=covs, shape=N)
2021
+ vals = pm.KroneckerNormal(" vals" , mu=mu, covs=covs, shape=N)
2022
2022
2023
2023
Efficiency gains are made by cholesky decomposing :math:`K_1` and
2024
2024
:math:`K_2` individually rather than the larger :math:`K` matrix. Although
2025
2025
only two matrices :math:`K_1` and :math:`K_2` are shown here, an arbitrary
2026
2026
number of submatrices can be combined in this way. Choleskys and
2027
2027
eigendecompositions can be provided instead
2028
2028
2029
- .. code:: python
2029
+ .. code-block :: python
2030
2030
2031
2031
chols = [np.linalg.cholesky(Ki) for Ki in covs]
2032
2032
evds = [np.linalg.eigh(Ki) for Ki in covs]
2033
2033
with pm.Model() as model:
2034
- vals2 = pm.KroneckerNormal(' vals2' , mu=mu, chols=chols, shape=N)
2034
+ vals2 = pm.KroneckerNormal(" vals2" , mu=mu, chols=chols, shape=N)
2035
2035
# or
2036
- vals3 = pm.KroneckerNormal(' vals3' , mu=mu, evds=evds, shape=N)
2036
+ vals3 = pm.KroneckerNormal(" vals3" , mu=mu, evds=evds, shape=N)
2037
2037
2038
2038
neither of which will be converted. Diagonal noise can also be added to
2039
2039
the covariance matrix, :math:`K = K_1 \otimes K_2 + \sigma^2 I_N`.
@@ -2042,13 +2042,13 @@ class KroneckerNormal(Continuous):
2042
2042
utilizing eigendecompositons of the submatrices behind the scenes [1].
2043
2043
Thus,
2044
2044
2045
- .. code:: python
2045
+ .. code-block :: python
2046
2046
2047
2047
sigma = 0.1
2048
2048
with pm.Model() as noise_model:
2049
- vals = pm.KroneckerNormal(' vals' , mu=mu, covs=covs, sigma=sigma, shape=N)
2050
- vals2 = pm.KroneckerNormal(' vals2' , mu=mu, chols=chols, sigma=sigma, shape=N)
2051
- vals3 = pm.KroneckerNormal(' vals3' , mu=mu, evds=evds, sigma=sigma, shape=N)
2049
+ vals = pm.KroneckerNormal(" vals" , mu=mu, covs=covs, sigma=sigma, shape=N)
2050
+ vals2 = pm.KroneckerNormal(" vals2" , mu=mu, chols=chols, sigma=sigma, shape=N)
2051
+ vals3 = pm.KroneckerNormal(" vals3" , mu=mu, evds=evds, sigma=sigma, shape=N)
2052
2052
2053
2053
are identical, with `covs` and `chols` each converted to
2054
2054
eigendecompositions.
0 commit comments