Skip to content

Commit ab437e4

Browse files
ColCarrollspringcoil
authored andcommitted
Bounded now works with array bounds (#1604)
* Bounded now works with array bounds * Bound is now a function returning a type
1 parent d9e57e8 commit ab437e4

File tree

2 files changed

+70
-61
lines changed

2 files changed

+70
-61
lines changed

pymc3/distributions/continuous.py

Lines changed: 48 additions & 45 deletions
Original file line numberDiff line numberDiff line change
@@ -1114,31 +1114,55 @@ class Bounded(Continuous):
11141114

11151115
def __init__(self, distribution, lower, upper, transform='infer', *args, **kwargs):
11161116
self.dist = distribution.dist(*args, **kwargs)
1117-
11181117
self.__dict__.update(self.dist.__dict__)
11191118
self.__dict__.update(locals())
11201119

11211120
if hasattr(self.dist, 'mode'):
11221121
self.mode = self.dist.mode
11231122

11241123
if transform == 'infer':
1124+
self.transform, self.testval = self._infer(lower, upper)
11251125

1126-
default = self.dist.default()
1127-
1128-
if not np.isinf(lower) and not np.isinf(upper):
1129-
self.transform = transforms.interval(lower, upper)
1130-
if default <= lower or default >= upper:
1131-
self.testval = 0.5 * (upper + lower)
1126+
def _infer(self, lower, upper):
1127+
"""Infer proper transforms for the variable, and adjust test_value.
11321128
1133-
if not np.isinf(lower) and np.isinf(upper):
1134-
self.transform = transforms.lowerbound(lower)
1135-
if default <= lower:
1136-
self.testval = lower + 1
1129+
In particular, this deals with the case where lower or upper may be +/-inf, or an
1130+
`ndarray` or a `theano.tensor.TensorVariable`
1131+
"""
1132+
if isinstance(upper, tt.TensorVariable):
1133+
_upper = upper.tag.test_value
1134+
else:
1135+
_upper = upper
1136+
if isinstance(lower, tt.TensorVariable):
1137+
_lower = lower.tag.test_value
1138+
else:
1139+
_lower = lower
1140+
1141+
testval = self.dist.default()
1142+
if not self._isinf(_lower) and not self._isinf(_upper):
1143+
transform = transforms.interval(lower, upper)
1144+
if (testval <= _lower).any() or (testval >= _upper).any():
1145+
testval = 0.5 * (_upper + _lower)
1146+
elif not self._isinf(_lower) and self._isinf(_upper):
1147+
transform = transforms.lowerbound(lower)
1148+
if (testval <= _lower).any():
1149+
testval = lower + 1
1150+
elif self._isinf(_lower) and not self._isinf(_upper):
1151+
transform = transforms.upperbound(upper)
1152+
if (testval >= _upper).any():
1153+
testval = _upper - 1
1154+
else:
1155+
transform = None
1156+
return transform, testval
11371157

1138-
if np.isinf(lower) and not np.isinf(upper):
1139-
self.transform = transforms.upperbound(upper)
1140-
if default >= upper:
1141-
self.testval = upper - 1
1158+
def _isinf(self, value):
1159+
"""Checks whether the value is +/-inf, or else returns 0 (even if an ndarray with
1160+
entries that are +/-inf)
1161+
"""
1162+
try:
1163+
return bool(np.isinf(value))
1164+
except ValueError:
1165+
return False
11421166

11431167
def _random(self, lower, upper, point=None, size=None):
11441168
samples = np.zeros(size).flatten()
@@ -1165,38 +1189,17 @@ def logp(self, value):
11651189
value >= self.lower, value <= self.upper)
11661190

11671191

1168-
class Bound(object):
1169-
R"""
1170-
Creates a new upper, lower or upper+lower bounded distribution
1171-
1172-
Parameters
1173-
----------
1174-
distribution : pymc3 distribution
1175-
Distribution to be transformed into a bounded distribution
1176-
lower : float (optional)
1177-
Lower bound of the distribution
1178-
upper : float (optional)
1179-
1180-
Example
1181-
-------
1182-
boundedNormal = pymc3.Bound(pymc3.Normal, lower=0.0)
1183-
par = boundedNormal(mu=0.0, sd=1.0, testval=1.0)
1184-
"""
1185-
1186-
def __init__(self, distribution, lower=-np.inf, upper=np.inf):
1187-
self.distribution = distribution
1188-
self.lower = lower
1189-
self.upper = upper
1190-
1191-
def __call__(self, *args, **kwargs):
1192-
first, args = args[0], args[1:]
1192+
def Bound(distribution, lower=-np.inf, upper=np.inf):
1193+
class _BoundedDist(Bounded):
1194+
def __init__(self, *args, **kwargs):
1195+
first, args = args[0], args[1:]
1196+
super(self, _BoundedDist).__init__(first, distribution, lower, upper, *args, **kwargs)
11931197

1194-
return Bounded(first, self.distribution, self.lower, self.upper,
1195-
*args, **kwargs)
1198+
@classmethod
1199+
def dist(cls, *args, **kwargs):
1200+
return Bounded.dist(distribution, lower, upper, *args, **kwargs)
11961201

1197-
def dist(self, *args, **kwargs):
1198-
return Bounded.dist(self.distribution, self.lower, self.upper,
1199-
*args, **kwargs)
1202+
return _BoundedDist
12001203

12011204

12021205
def StudentTpos(*args, **kwargs):

pymc3/examples/garch_example.py

Lines changed: 22 additions & 16 deletions
Original file line numberDiff line numberDiff line change
@@ -1,4 +1,4 @@
1-
from pymc3 import Normal, sample, Model, Bound
1+
from pymc3 import Normal, sample, Model, Bound, summary
22
import theano.tensor as tt
33
import numpy as np
44

@@ -29,29 +29,35 @@
2929
r ~ normal(mu,sigma);
3030
}
3131
"""
32-
J = 8
33-
r = np.array([28, 8, -3, 7, -1, 1, 18, 12])
34-
sigma1 = np.array([15, 10, 16, 11, 9, 11, 10, 18])
35-
alpha0 = np.array([10, 10, 16, 8, 9, 11, 12, 18])
3632

37-
with Model() as garch:
38-
alpha1 = Normal('alpha1', 0, 1, shape=J)
39-
BoundedNormal = Bound(Normal, upper=(1 - alpha1))
40-
beta1 = BoundedNormal('beta1', 0, sd=1e6)
41-
mu = Normal('mu', 0, sd=1e6)
4233

43-
theta = tt.sqrt(alpha0 + alpha1 * tt.pow(r - mu, 2) +
44-
beta1 * tt.pow(sigma1, 2))
34+
def get_garch_model():
35+
r = np.array([28, 8, -3, 7, -1, 1, 18, 12])
36+
sigma1 = np.array([15, 10, 16, 11, 9, 11, 10, 18])
37+
alpha0 = np.array([10, 10, 16, 8, 9, 11, 12, 18])
38+
shape = r.shape
4539

46-
obs = Normal('obs', mu, sd=theta, observed=r)
40+
with Model() as garch:
41+
alpha1 = Normal('alpha1', mu=np.zeros(shape=shape), sd=np.ones(shape=shape), shape=shape)
42+
BoundedNormal = Bound(Normal, upper=(1 - alpha1))
43+
beta1 = BoundedNormal('beta1',
44+
mu=np.zeros(shape=shape),
45+
sd=1e6 * np.ones(shape=shape),
46+
shape=shape)
47+
mu = Normal('mu', mu=np.zeros(shape=shape), sd=1e6 * np.ones(shape=shape), shape=shape)
48+
theta = tt.sqrt(alpha0 + alpha1 * tt.pow(r - mu, 2) +
49+
beta1 * tt.pow(sigma1, 2))
50+
Normal('obs', mu, sd=theta, observed=r)
51+
return garch
4752

4853

4954
def run(n=1000):
5055
if n == "short":
5156
n = 50
52-
with garch:
53-
tr = sample(n)
57+
with get_garch_model():
58+
tr = sample(n, n_init=10000)
59+
return tr
5460

5561

5662
if __name__ == '__main__':
57-
run()
63+
print(summary(run()))

0 commit comments

Comments
 (0)