1
+ from scipy import stats
1
2
import theano .tensor as tt
2
3
from theano import scan
3
4
4
5
from pymc3 .util import get_variable_name
5
6
from .continuous import get_tau_sigma , Normal , Flat
6
7
from . import multivariate
7
8
from . import distribution
9
+ from .shape_utils import to_tuple
8
10
9
11
10
12
__all__ = [
@@ -166,17 +168,22 @@ def logp(self, value):
166
168
167
169
168
170
class GaussianRandomWalk (distribution .Continuous ):
169
- R"""
170
- Random Walk with Normal innovations
171
+ R"""Random Walk with Normal innovations
171
172
172
173
Parameters
173
174
----------
174
175
mu: tensor
175
176
innovation drift, defaults to 0.0
177
+ For vector valued mu, first dimension must match shape of the random walk, and
178
+ the first element will be discarded (since there is no innovation in the first timestep)
176
179
sigma : tensor
177
180
sigma > 0, innovation standard deviation (only required if tau is not specified)
181
+ For vector valued sigma, first dimension must match shape of the random walk, and
182
+ the first element will be discarded (since there is no innovation in the first timestep)
178
183
tau : tensor
179
184
tau > 0, innovation precision (only required if sigma is not specified)
185
+ For vector valued tau, first dimension must match shape of the random walk, and
186
+ the first element will be discarded (since there is no innovation in the first timestep)
180
187
init : distribution
181
188
distribution for initial value (Defaults to Flat())
182
189
"""
@@ -187,9 +194,14 @@ def __init__(self, tau=None, init=Flat.dist(), sigma=None, mu=0.,
187
194
if sd is not None :
188
195
sigma = sd
189
196
tau , sigma = get_tau_sigma (tau = tau , sigma = sigma )
190
- self .tau = tau = tt .as_tensor_variable (tau )
191
- self .sigma = self .sd = sigma = tt .as_tensor_variable (sigma )
192
- self .mu = mu = tt .as_tensor_variable (mu )
197
+ self .tau = tt .as_tensor_variable (tau )
198
+ sigma = tt .as_tensor_variable (sigma )
199
+ if sigma .ndim > 0 :
200
+ sigma = sigma [:- 1 ]
201
+ self .sigma = self .sd = sigma
202
+ self .mu = tt .as_tensor_variable (mu )
203
+ if self .mu .ndim > 0 :
204
+ self .mu = self .mu [:- 1 ]
193
205
self .init = init
194
206
self .mean = tt .as_tensor_variable (0. )
195
207
@@ -206,15 +218,41 @@ def logp(self, x):
206
218
-------
207
219
TensorVariable
208
220
"""
209
- sigma = self . sigma
210
- mu = self . mu
211
- init = self . init
221
+ if x . ndim > 0 :
222
+ x_im1 = x [: - 1 ]
223
+ x_i = x [ 1 :]
212
224
213
- x_im1 = x [:- 1 ]
214
- x_i = x [1 :]
225
+ sigma = self .sigma
226
+ mu = self .mu
227
+
228
+ innov_like = Normal .dist (mu = x_im1 + mu , sigma = sigma ).logp (x_i )
229
+ return self .init .logp (x [0 ]) + tt .sum (innov_like )
230
+ return self .init .logp (x )
231
+
232
+ def random (self , point = None , size = None ):
233
+ """Draw random values from GaussianRandomWalk.
215
234
216
- innov_like = Normal .dist (mu = x_im1 + mu , sigma = sigma ).logp (x_i )
217
- return init .logp (x [0 ]) + tt .sum (innov_like )
235
+ Parameters
236
+ ----------
237
+ point : dict, optional
238
+ Dict of variable values on which random values are to be
239
+ conditioned (uses default point if not specified).
240
+ size : int, optional
241
+ Desired size of random sample (returns one sample if not
242
+ specified).
243
+
244
+ Returns
245
+ -------
246
+ array
247
+ """
248
+ sigma , mu = distribution .draw_values ([self .sigma , self .mu ], point = point , size = size )
249
+ return distribution .generate_samples (self ._random , sigma = sigma , mu = mu , size = size ,
250
+ dist_shape = self .shape )
251
+
252
+ def _random (self , sigma , mu , size ):
253
+ """Implement a Gaussian random walk as a cumulative sum of normals."""
254
+ rv = stats .norm (mu , sigma )
255
+ return rv .rvs (size ).cumsum (axis = 0 )
218
256
219
257
def _repr_latex_ (self , name = None , dist = None ):
220
258
if dist is None :
0 commit comments