Skip to content

Commit be00ad7

Browse files
committed
many small fixes related to simple defaults
1 parent 3fd5db4 commit be00ad7

File tree

9 files changed

+35
-30
lines changed

9 files changed

+35
-30
lines changed

examples/logistic.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -36,6 +36,6 @@ def tinvlogit(x):
3636
start = find_MAP()
3737
h = np.diag(approx_hess(start)) #find a good orientation using the hessian at the MAP
3838

39-
step = HamiltonianMC(model, model.vars, h)
39+
step = HamiltonianMC(model.vars, h)
4040

4141
trace = sample(3e2, step, start)

examples/stochastic_volatility.ipynb

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -139,7 +139,7 @@
139139
"collapsed": false,
140140
"input": [
141141
"with model:\n",
142-
" start = find_MAP(model, vars = [s], fmin = optimize.fmin_l_bfgs_b)"
142+
" start = find_MAP(vars = [s], fmin = optimize.fmin_l_bfgs_b)"
143143
],
144144
"language": "python",
145145
"metadata": {},

examples/stochastic_volatility.py

Lines changed: 18 additions & 14 deletions
Original file line numberDiff line numberDiff line change
@@ -14,15 +14,15 @@
1414
# <markdowncell>
1515

1616
# Asset prices have time-varying volatility (variance of day over day `returns`). In some periods, returns are highly vaiable, and in others very stable. Stochastic volatility models model this with a latent volatility variable, modeled as a stochastic process. The following model is similar to the one described in the No-U-Turn Sampler paper, Hoffman (2011) p21.
17-
#
17+
#
1818
# $$ \sigma \sim Exponential(50) $$
19-
#
19+
#
2020
# $$ \nu \sim Exponential(.1) $$
21-
#
21+
#
2222
# $$ s_i \sim Normal(s_{i-1}, \sigma^{-2}) $$
23-
#
23+
#
2424
# $$ log(\frac{y_i}{y_{i-1}}) \sim t(\nu, 0, exp(-2 s_i)) $$
25-
#
25+
#
2626
# Here, $y$ is the daily return series and $s$ is the latent volatility process.
2727

2828
# <markdowncell>
@@ -58,16 +58,16 @@
5858

5959
# Fit Model
6060
# ------------
61-
# To get a decent scale for the hamiltonaian sampler, we find the hessian at a point. However, the 2nd derivatives for the degrees of freedom are negative and thus not very informative, so we make an educated guess. The interactions between `log_sigma`/`nu` and `s` are also not very useful, so we set them to zero.
62-
#
61+
# To get a decent scale for the hamiltonaian sampler, we find the hessian at a point. However, the 2nd derivatives for the degrees of freedom are negative and thus not very informative, so we make an educated guess. The interactions between `log_sigma`/`nu` and `s` are also not very useful, so we set them to zero.
62+
#
6363
# The hessian matrix is also very sparse, so we make it a sparse matrix for faster sampling.
6464

6565
# <codecell>
6666

6767
H = model.d2logpc()
6868

69-
def hessian(point, nusd):
70-
h = H(point)
69+
def hessian(point, nusd):
70+
h = H(Point(point))
7171
h[1,1] = nusd**-2
7272
h[:2,2:] = h[2:,:2] = 0
7373

@@ -80,7 +80,7 @@ def hessian(point, nusd):
8080
# <codecell>
8181

8282
with model:
83-
start = find_MAP(model, vars = [s], fmin = optimize.fmin_l_bfgs_b)
83+
start = find_MAP(vars = [s], fmin = optimize.fmin_l_bfgs_b)
8484

8585
# <markdowncell>
8686

@@ -90,7 +90,7 @@ def hessian(point, nusd):
9090

9191
with model:
9292
step = HamiltonianMC(model.vars, hessian(start, 6))
93-
trace = sample(200, step, start)
93+
trace = sample(200, step, start, trace = model.vars + [sigma])
9494

9595
start2 = trace.point(-1)
9696
step = HamiltonianMC(model.vars, hessian(start2, 6), path_length = 4.)
@@ -99,15 +99,19 @@ def hessian(point, nusd):
9999
# <codecell>
100100

101101
#figsize(12,6)
102-
plt.title(str(s))
103-
plt.plot(trace[s][::10].T,'b', alpha = .01);
102+
title(str(s))
103+
plot(trace[s][::10].T,'b', alpha = .01);
104104

105105
#figsize(12,6)
106106
traceplot(trace, model.vars[:-1]);
107107

108+
# <codecell>
109+
110+
trace.samples.keys()
111+
108112
# <markdowncell>
109113

110114
# References
111115
# -------------
112-
# 1. Hoffman & Gelman. (2011). The No-U-Turn Sampler: Adaptively Setting Path Lengths in Hamiltonian Monte Carlo. http://arxiv.org/abs/1111.4246
116+
# 1. Hoffman & Gelman. (2011). The No-U-Turn Sampler: Adaptively Setting Path Lengths in Hamiltonian Monte Carlo. http://arxiv.org/abs/1111.4246
113117

pymc/model.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -40,7 +40,7 @@ def get_context(cls):
4040

4141
def modelcontext(model):
4242
if model is None:
43-
return Model.get_context()
43+
return Model.get_context()
4444
return model
4545

4646
class Model(Context):
@@ -87,7 +87,7 @@ def d2logpc(model, vars = None):
8787
@property
8888
def test_point(self):
8989
"""Test point used to check that the model doesn't generate errors"""
90-
return Point(self, ((var, var.tag.test_value) for var in self.vars))
90+
return Point(((var, var.tag.test_value) for var in self.vars), model = self)
9191

9292
@property
9393
def cont_vars(model):

pymc/sample.py

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -39,7 +39,7 @@ def sample(draws, step, start = None, trace = None, track_progress = True, model
3939
draws = int(draws)
4040
if start is None:
4141
start = trace[-1]
42-
point = Point(model, start)
42+
point = Point(start, model = model)
4343

4444
if not hasattr(trace, 'record'):
4545
if trace is None:
@@ -66,7 +66,7 @@ def argsample(args):
6666
""" defined at top level so it can be pickled"""
6767
return sample(*args)
6868

69-
def psample(draws, step, start, mtrace = None, threads = None, track = None, model = None):
69+
def psample(draws, step, start, mtrace = None, track = None, model = None, threads = None):
7070
"""draw a number of samples using the given step method. Multiple step methods supported via compound step method
7171
returns the amount of time taken"""
7272

@@ -86,7 +86,7 @@ def psample(draws, step, start, mtrace = None, threads = None, track = None, mod
8686

8787
p = mp.Pool(threads)
8888

89-
argset = zip([model] *threads, [draws]*threads, [step]*threads, start, mtrace.traces)
89+
argset = zip([draws]*threads, [step]*threads, start, mtrace.traces, [False]*threads, [model] *threads)
9090

9191
traces = p.map(argsample, argset)
9292

pymc/tuning/scaling.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -24,7 +24,7 @@ def approx_hess(point, vars=None, model = None):
2424
if vars is None :
2525
vars = model.cont_vars
2626

27-
point = Point(model, point)
27+
point = Point(point, model = model)
2828

2929
bij = DictToArrayBijection(ArrayOrdering(vars), point)
3030
dlogp = bij.mapf(model.dlogpc(vars))
@@ -53,7 +53,7 @@ def find_hessian(point, vars = None, model = None):
5353
"""
5454
model = modelcontext(model)
5555
H = model.d2logpc(vars)
56-
return H(Point(model, point))
56+
return H(Point(point, model = model))
5757

5858
def trace_cov(trace, vars = None):
5959
"""

pymc/tuning/starting.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -11,7 +11,7 @@
1111
__all__ = ['find_MAP', 'scipyminimize']
1212

1313

14-
def find_MAP(start = None, vars=None, fmin = optimize.fmin_bfgs, return_raw = False, disp = False, model = None, model = None, *args, **kwargs):
14+
def find_MAP(start = None, vars=None, fmin = optimize.fmin_bfgs, return_raw = False, disp = False, model = None, *args, **kwargs):
1515
"""
1616
Sets state to the local maximum a posteriori point given a model.
1717
Current default of fmin_Hessian does not deal well with optimizing close
@@ -37,7 +37,7 @@ def find_MAP(start = None, vars=None, fmin = optimize.fmin_bfgs, return_raw = Fa
3737
if vars is None:
3838
vars = model.cont_vars
3939

40-
start = Point(model, start)
40+
start = Point(start, model = model)
4141
bij = DictToArrayBijection(ArrayOrdering(vars), start)
4242

4343
logp = bij.mapf(model.logpc)

tests/test_distributions.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -115,7 +115,7 @@ def check_int_to_1(model, value, domains):
115115

116116
for a in its.product(*domains):
117117
a = a + (value.tag.test_value,)
118-
pt = Point(dict( (str(var), val) for var,val in zip(model.vars, a)))
118+
pt = Point(dict( (str(var), val) for var,val in zip(model.vars, a)), model = model)
119119

120120
bij = DictToVarBijection(value,() , pt)
121121

@@ -145,6 +145,6 @@ def check_dlogp(model, value, domains):
145145
ndlogp = Gradient(logp)
146146

147147
for a in its.product(*domains):
148-
pt = Point(dict( (str(var), val) for var,val in zip(model.vars, a)))
148+
pt = Point(dict( (str(var), val) for var,val in zip(model.vars, a)), model = model)
149149

150150
pt = bij.map(pt)

tests/test_sampling.py

Lines changed: 4 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -16,9 +16,10 @@ def test_sample():
1616
if test_parallel:
1717
test_samplers.append(psample)
1818

19-
for samplr in test_samplers:
20-
for n in [0, 10, 1000]:
21-
yield samplr, model, n, step, start
19+
with model:
20+
for samplr in test_samplers:
21+
for n in [0, 10, 1000]:
22+
yield samplr, n, step, start
2223

2324

2425

0 commit comments

Comments
 (0)