3
3
from pymc3 import Model , Normal , DiscreteUniform , Poisson , switch , Exponential
4
4
from pymc3 .theanof import inputvars
5
5
from pymc3 .variational import advi , advi_minibatch , sample_vp
6
- from pymc3 .variational .advi import variational_gradient_estimate
6
+ from pymc3 .variational .advi import _calc_elbo , adagrad_optimizer
7
+ from pymc3 .theanof import CallableTensor
7
8
from theano import function , shared
8
9
import theano .tensor as tt
9
10
@@ -17,20 +18,21 @@ def test_elbo():
17
18
# Create a model for test
18
19
with Model () as model :
19
20
mu = Normal ('mu' , mu = mu0 , sd = sigma )
20
- y = Normal ('y' , mu = mu , sd = 1 , observed = y_obs )
21
+ Normal ('y' , mu = mu , sd = 1 , observed = y_obs )
21
22
22
23
vars = inputvars (model .vars )
23
24
24
25
# Create variational gradient tensor
25
- grad , elbo , shared , uw = variational_gradient_estimate (
26
- vars , model , n_mcsamples = 10000 , random_seed = 1 )
26
+ elbo , _ = _calc_elbo (vars , model , n_mcsamples = 10000 , random_seed = 1 )
27
27
28
28
# Variational posterior parameters
29
29
uw_ = np .array ([1.88 , np .log (1 )])
30
30
31
31
# Calculate elbo computed with MonteCarlo
32
- f = function ([uw ], elbo )
33
- elbo_mc = f (uw_ )
32
+ uw_shared = shared (uw_ , 'uw_shared' )
33
+ elbo = CallableTensor (elbo )(uw_shared )
34
+ f = function ([], elbo )
35
+ elbo_mc = f ()
34
36
35
37
# Exact value
36
38
elbo_true = (- 0.5 * (
@@ -107,7 +109,7 @@ def test_advi():
107
109
108
110
with Model () as model :
109
111
mu_ = Normal ('mu' , mu = mu0 , sd = sd0 , testval = 0 )
110
- x = Normal ('x' , mu = mu_ , sd = sd , observed = data )
112
+ Normal ('x' , mu = mu_ , sd = sd , observed = data )
111
113
112
114
advi_fit = advi (
113
115
model = model , n = 1000 , accurate_elbo = False , learning_rate = 1e-1 ,
@@ -120,6 +122,32 @@ def test_advi():
120
122
np .testing .assert_allclose (np .mean (trace ['mu' ]), mu_post , rtol = 0.4 )
121
123
np .testing .assert_allclose (np .std (trace ['mu' ]), np .sqrt (1. / d ), rtol = 0.4 )
122
124
125
+ def test_advi_optimizer ():
126
+ n = 1000
127
+ sd0 = 2.
128
+ mu0 = 4.
129
+ sd = 3.
130
+ mu = - 5.
131
+
132
+ data = sd * np .random .RandomState (0 ).randn (n ) + mu
133
+
134
+ d = n / sd ** 2 + 1 / sd0 ** 2
135
+ mu_post = (n * np .mean (data ) / sd ** 2 + mu0 / sd0 ** 2 ) / d
136
+
137
+ with Model () as model :
138
+ mu_ = Normal ('mu' , mu = mu0 , sd = sd0 , testval = 0 )
139
+ Normal ('x' , mu = mu_ , sd = sd , observed = data )
140
+
141
+ optimizer = adagrad_optimizer (learning_rate = 0.1 , epsilon = 0.1 )
142
+ advi_fit = advi (model = model , n = 1000 , optimizer = optimizer , random_seed = 1 )
143
+
144
+ np .testing .assert_allclose (advi_fit .means ['mu' ], mu_post , rtol = 0.1 )
145
+
146
+ trace = sample_vp (advi_fit , 10000 , model )
147
+
148
+ np .testing .assert_allclose (np .mean (trace ['mu' ]), mu_post , rtol = 0.4 )
149
+ np .testing .assert_allclose (np .std (trace ['mu' ]), np .sqrt (1. / d ), rtol = 0.4 )
150
+
123
151
def test_advi_minibatch ():
124
152
n = 1000
125
153
sd0 = 2.
0 commit comments