Skip to content

Commit b15743d

Browse files
author
Christopher Fonnesbeck
committed
First stab at revising Metropolis. Can now specify alternative proposals. Still need to add tuning.
1 parent 72c719e commit b15743d

File tree

4 files changed

+124
-89
lines changed

4 files changed

+124
-89
lines changed

pymc/model.py

Lines changed: 41 additions & 46 deletions
Original file line numberDiff line numberDiff line change
@@ -1,23 +1,18 @@
1-
'''
2-
Created on Mar 7, 2011
3-
4-
@author: johnsalvatier
5-
'''
61
from point import *
72
from types import *
83

94
from theano import theano, tensor as t, function
105
from theano.gof.graph import inputs
116

12-
import numpy as np
7+
import numpy as np
138
from functools import wraps
149

15-
__all__ = ['Model', 'compilef', 'gradient', 'hessian', 'modelcontext', 'Point']
10+
__all__ = ['Model', 'compilef', 'gradient', 'hessian', 'modelcontext', 'Point']
1611

1712

1813

19-
class Context(object):
20-
def __enter__(self):
14+
class Context(object):
15+
def __enter__(self):
2116
type(self).get_contexts().append(self)
2217
return self
2318

@@ -28,7 +23,7 @@ def __exit__(self, typ, value, traceback):
2823
def get_contexts(cls):
2924
if not hasattr(cls, "contexts"):
3025
cls.contexts = []
31-
26+
3227
return cls.contexts
3328

3429
@classmethod
@@ -38,45 +33,45 @@ def get_context(cls):
3833
except IndexError:
3934
raise TypeError("No context on context stack")
4035

41-
def modelcontext(model):
36+
def modelcontext(model):
4237
if model is None:
43-
return Model.get_context()
38+
return Model.get_context()
4439
return model
4540

4641
class Model(Context):
4742
"""
48-
Base class for encapsulation of the variables and
43+
Base class for encapsulation of the variables and
4944
likelihood factors of a model.
5045
"""
5146

5247
def __init__(self):
5348
self.vars = []
5449
self.factors = []
55-
50+
5651
@property
5752
def logp(model):
5853
"""
5954
log-probability of the model
60-
55+
6156
Parameters
6257
----------
63-
64-
model : Model
58+
59+
model : Model
6560
6661
Returns
6762
-------
6863
6964
logp : Theano scalar
70-
65+
7166
"""
7267
return t.add(*map(t.sum, model.factors))
7368

7469
@property
75-
def logpc(model):
70+
def logpc(model):
7671
"""Compiled log probability density function"""
7772
return compilef(model.logp)
7873

79-
def dlogpc(model, vars = None):
74+
def dlogpc(model, vars = None):
8075
"""Compiled log probability density gradient function"""
8176
return compilef(gradient(model.logp, vars))
8277

@@ -92,7 +87,7 @@ def test_point(self):
9287
@property
9388
def cont_vars(model):
9489
"""All the continuous variables in the model"""
95-
return typefilter(model.vars, continuous_types)
90+
return typefilter(model.vars, continuous_types)
9691

9792
"""
9893
these functions add random variables
@@ -108,8 +103,8 @@ def Var(model, name, dist):
108103
model.factors.append(dist.logp(var))
109104
return var
110105

111-
def TransformedVar(model, name, dist, trans):
112-
tvar = model.Var(trans.name + '_' + name, trans.apply(dist))
106+
def TransformedVar(model, name, dist, trans):
107+
tvar = model.Var(trans.name + '_' + name, trans.apply(dist))
113108

114109
return named(trans.backward(tvar),name), tvar
115110

@@ -118,26 +113,26 @@ def AddPotential(model, potential):
118113

119114

120115
def Point(*args,**kwargs):
121-
"""
122-
Build a point. Uses same args as dict() does.
123-
Filters out variables not in the model. All keys are strings.
116+
"""
117+
Build a point. Uses same args as dict() does.
118+
Filters out variables not in the model. All keys are strings.
124119
125120
Parameters
126121
----------
127-
*args, **kwargs
122+
*args, **kwargs
128123
arguments to build a dict
129124
"""
130125
if 'model' in kwargs :
131126
model = kwargs['model']
132127
del kwargs['model']
133-
else:
128+
else:
134129
model = Model.get_context()
135130

136131
d = dict(*args, **kwargs)
137132
varnames = map(str, model.vars)
138-
return dict((str(k),np.array(v))
139-
for (k,v) in d.iteritems()
140-
if str(k) in varnames)
133+
return dict((str(k),np.array(v))
134+
for (k,v) in d.iteritems()
135+
if str(k) in varnames)
141136

142137

143138
def compilef(outs, mode = None):
@@ -148,42 +143,42 @@ def compilef(outs, mode = None):
148143
----------
149144
outs : Theano variable or iterable of Theano variables
150145
mode : Theano compilation mode
151-
146+
152147
Returns
153148
-------
154149
Compiled Theano function
155150
"""
156151
return PointFunc(
157-
function(inputvars(outs), outs,
158-
allow_input_downcast = True,
152+
function(inputvars(outs), outs,
153+
allow_input_downcast = True,
159154
on_unused_input = 'ignore',
160155
mode = mode)
161156
)
162157

163158
def named(var, name):
164-
var.name = name
159+
var.name = name
165160
return var
166161

167162
def as_iterargs(data):
168-
if isinstance(data, tuple):
163+
if isinstance(data, tuple):
169164
return data
170165
if hasattr(data, 'columns'): #data frames
171-
return [np.asarray(data[c]) for c in data.columns]
166+
return [np.asarray(data[c]) for c in data.columns]
172167
else:
173168
return [data]
174169

175-
def makeiter(a):
170+
def makeiter(a):
176171
if isinstance(a, (tuple, list)):
177172
return a
178173
else :
179174
return [a]
180175

181-
def inputvars(a):
176+
def inputvars(a):
182177
return [v for v in inputs(makeiter(a)) if isinstance(v, t.TensorVariable)]
183178

184179
"""
185-
Theano derivative functions
186-
"""
180+
Theano derivative functions
181+
"""
187182

188183
def cont_inputs(f):
189184
return typefilter(inputvars(f), continuous_types)
@@ -193,7 +188,7 @@ def gradient1(f, v):
193188
return t.flatten(t.grad(f, v, disconnected_inputs='warn'))
194189

195190
def gradient(f, vars = None):
196-
if not vars:
191+
if not vars:
197192
vars = cont_inputs(f)
198193

199194
return t.concatenate([gradient1(f, v) for v in vars], axis = 0)
@@ -202,14 +197,14 @@ def jacobian1(f, v):
202197
"""jacobian of f wrt v"""
203198
f = t.flatten(f)
204199
idx = t.arange(f.shape[0])
205-
206-
def grad_i(i):
200+
201+
def grad_i(i):
207202
return gradient1(f[i], v)
208203

209204
return theano.map(grad_i, idx)[0]
210205

211206
def jacobian(f, vars = None):
212-
if not vars:
207+
if not vars:
213208
vars = cont_inputs(f)
214209

215210
return t.concatenate([jacobian1(f, v) for v in vars], axis = 1)
@@ -218,6 +213,6 @@ def hessian(f, vars = None):
218213
return -jacobian(gradient(f, vars), vars)
219214

220215

221-
#theano stuff
216+
#theano stuff
222217
theano.config.warn.sum_div_dimshuffle_bug = False
223218
theano.config.compute_test_value = 'raise'

pymc/step_methods/arraystep.py

Lines changed: 13 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -1,32 +1,38 @@
1-
from ..core import *
2-
import numpy as np
1+
from ..core import *
2+
import numpy as np
33
from numpy.random import uniform
44
from numpy import log , isfinite
55

66
__all__ = ['ArrayStep', 'metrop_select', 'SamplerHist']
77

8+
# TODO Add docstrings to ArrayStep
89
class ArrayStep(object):
910
def __init__(self, vars, fs, allvars = False):
1011
self.ordering = ArrayOrdering(vars)
1112
self.fs = fs
1213
self.allvars = allvars
13-
14+
1415
def step(self, point):
1516
bij = DictToArrayBijection(self.ordering, point)
16-
17-
inputs = map(bij.mapf, self.fs)
17+
18+
inputs = map(bij.mapf, self.fs)
1819
if self.allvars:
1920
inputs += [point]
2021

2122
apoint = self.astep(bij.map(point), *inputs)
2223
return bij.rmap(apoint)
2324

2425
def metrop_select(mr, q, q0):
26+
# Perform rejection/acceptance step for Metropolis class samplers
27+
28+
# Compare acceptance ratio to uniform random number
2529
if isfinite(mr) and log(uniform()) < mr:
30+
# Accept proposed value
2631
return q
27-
else:
32+
else:
33+
# Reject proposed value
2834
return q0
29-
35+
3036

3137
class SamplerHist(object):
3238
def __init__(self):

pymc/step_methods/metropolis.py

Lines changed: 47 additions & 15 deletions
Original file line numberDiff line numberDiff line change
@@ -1,31 +1,63 @@
1-
'''
2-
Created on Mar 7, 2011
3-
4-
@author: johnsalvatier
5-
'''
61
from numpy.linalg import cholesky
72

83
from ..core import *
94
from quadpotential import quad_potential
105

116
from arraystep import *
7+
from numpy.random import normal, standard_cauchy, standard_exponential
128

139
__all__ = ['Metropolis']
1410

15-
# TODO Implement tuning for Metropolis step
11+
# Available proposal distributions for Metropolis
12+
def normal_proposal(s, n):
13+
return lambda: normal(scale=s, size=n)
14+
15+
def cauchy_proposal(s, n):
16+
return lambda: standard_cauchy(size=n) * s
17+
18+
def laplace_proposal(s, n):
19+
return lambda: (standard_exponential(size=n) - standard_exponential(size=n)) * s
20+
21+
1622
class Metropolis(ArrayStep):
17-
def __init__(self, vars, C, scaling=.25, is_cov = False, model = None):
23+
"""
24+
Metropolis-Hastings sampling step
25+
26+
Parameters
27+
----------
28+
vars : list
29+
List of variables for sampler
30+
S : standard deviation or covariance matrix
31+
Some measure of variance to parameterize proposal distribution
32+
proposal_dist : function
33+
Function that returns zero-mean deviates when parameterized with
34+
S (and n). Defaults to quad_potential.
35+
scaling : scalar or array
36+
Initial scale factor for proposal. Defaults to 1.
37+
tune : bool
38+
Flag for tuning. Defaults to True.
39+
model : PyMC Model
40+
Optional model for sampling step. Defaults to None (taken from context).
41+
42+
"""
43+
def __init__(self, vars, S, proposal_dist=quad_potential, scaling=1.,
44+
tune=True, model=None):
45+
1846
model = modelcontext(model)
1947

20-
self.potential = quad_potential(C, not is_cov)
48+
try:
49+
self.proposal_dist = proposal_dist(S, n=len(vars))
50+
except TypeError:
51+
# quadpotential does not require n
52+
self.proposal_dist = proposal_dist(S)
2153
self.scaling = scaling
54+
self.tune = tune
2255
super(Metropolis,self).__init__(vars, [model.logpc])
23-
56+
2457
def astep(self, q0, logp):
2558

26-
delta = self.potential.random() * self.scaling
27-
28-
q = q0 + delta
29-
30-
return metrop_select(logp(q) - logp(q0),
31-
q, q0)
59+
delta = self.proposal_dist() * self.scaling
60+
61+
q = q0 + delta
62+
63+
return metrop_select(logp(q) - logp(q0), q, q0)

0 commit comments

Comments
 (0)