Skip to content

Commit bc38179

Browse files
committed
FIX Fix tests for refactoring of advi().
1 parent f34c0b3 commit bc38179

File tree

1 file changed

+35
-7
lines changed

1 file changed

+35
-7
lines changed

pymc3/tests/test_advi.py

Lines changed: 35 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -3,7 +3,8 @@
33
from pymc3 import Model, Normal, DiscreteUniform, Poisson, switch, Exponential
44
from pymc3.theanof import inputvars
55
from pymc3.variational import advi, advi_minibatch, sample_vp
6-
from pymc3.variational.advi import variational_gradient_estimate
6+
from pymc3.variational.advi import _calc_elbo, adagrad_optimizer
7+
from pymc3.theanof import CallableTensor
78
from theano import function, shared
89
import theano.tensor as tt
910

@@ -17,20 +18,21 @@ def test_elbo():
1718
# Create a model for test
1819
with Model() as model:
1920
mu = Normal('mu', mu=mu0, sd=sigma)
20-
y = Normal('y', mu=mu, sd=1, observed=y_obs)
21+
Normal('y', mu=mu, sd=1, observed=y_obs)
2122

2223
vars = inputvars(model.vars)
2324

2425
# Create variational gradient tensor
25-
grad, elbo, shared, uw = variational_gradient_estimate(
26-
vars, model, n_mcsamples=10000, random_seed=1)
26+
elbo, _ = _calc_elbo(vars, model, n_mcsamples=10000, random_seed=1)
2727

2828
# Variational posterior parameters
2929
uw_ = np.array([1.88, np.log(1)])
3030

3131
# Calculate elbo computed with MonteCarlo
32-
f = function([uw], elbo)
33-
elbo_mc = f(uw_)
32+
uw_shared = shared(uw_, 'uw_shared')
33+
elbo = CallableTensor(elbo)(uw_shared)
34+
f = function([], elbo)
35+
elbo_mc = f()
3436

3537
# Exact value
3638
elbo_true = (-0.5 * (
@@ -107,7 +109,7 @@ def test_advi():
107109

108110
with Model() as model:
109111
mu_ = Normal('mu', mu=mu0, sd=sd0, testval=0)
110-
x = Normal('x', mu=mu_, sd=sd, observed=data)
112+
Normal('x', mu=mu_, sd=sd, observed=data)
111113

112114
advi_fit = advi(
113115
model=model, n=1000, accurate_elbo=False, learning_rate=1e-1,
@@ -120,6 +122,32 @@ def test_advi():
120122
np.testing.assert_allclose(np.mean(trace['mu']), mu_post, rtol=0.4)
121123
np.testing.assert_allclose(np.std(trace['mu']), np.sqrt(1. / d), rtol=0.4)
122124

125+
def test_advi_optimizer():
126+
n = 1000
127+
sd0 = 2.
128+
mu0 = 4.
129+
sd = 3.
130+
mu = -5.
131+
132+
data = sd * np.random.RandomState(0).randn(n) + mu
133+
134+
d = n / sd**2 + 1 / sd0**2
135+
mu_post = (n * np.mean(data) / sd**2 + mu0 / sd0**2) / d
136+
137+
with Model() as model:
138+
mu_ = Normal('mu', mu=mu0, sd=sd0, testval=0)
139+
Normal('x', mu=mu_, sd=sd, observed=data)
140+
141+
optimizer = adagrad_optimizer(learning_rate=0.1, epsilon=0.1)
142+
advi_fit = advi(model=model, n=1000, optimizer=optimizer, random_seed=1)
143+
144+
np.testing.assert_allclose(advi_fit.means['mu'], mu_post, rtol=0.1)
145+
146+
trace = sample_vp(advi_fit, 10000, model)
147+
148+
np.testing.assert_allclose(np.mean(trace['mu']), mu_post, rtol=0.4)
149+
np.testing.assert_allclose(np.std(trace['mu']), np.sqrt(1. / d), rtol=0.4)
150+
123151
def test_advi_minibatch():
124152
n = 1000
125153
sd0 = 2.

0 commit comments

Comments
 (0)