@@ -125,22 +125,24 @@ class GaussianRandomWalkRV(RandomVariable):
125
125
dtype = "floatX"
126
126
_print_name = ("GaussianRandomWalk" , "\\ operatorname{GaussianRandomWalk}" )
127
127
128
- def make_node (self , rng , size , dtype , mu , sigma , init , steps ):
128
+ def make_node (self , rng , size , dtype , mu , sigma , init_dist , steps ):
129
129
steps = at .as_tensor_variable (steps )
130
130
if not steps .ndim == 0 or not steps .dtype .startswith ("int" ):
131
131
raise ValueError ("steps must be an integer scalar (ndim=0)." )
132
132
133
133
mu = at .as_tensor_variable (mu )
134
134
sigma = at .as_tensor_variable (sigma )
135
- init = at .as_tensor_variable (init )
135
+ init_dist = at .as_tensor_variable (init_dist )
136
136
137
137
# Resize init distribution
138
138
size = normalize_size_param (size )
139
139
# If not explicit, size is determined by the shapes of mu, sigma, and init
140
- init_size = size if not rv_size_is_none (size ) else at .broadcast_shape (mu , sigma , init )
141
- init = change_rv_size (init , init_size )
140
+ init_dist_size = (
141
+ size if not rv_size_is_none (size ) else at .broadcast_shape (mu , sigma , init_dist )
142
+ )
143
+ init_dist = change_rv_size (init_dist , init_dist_size )
142
144
143
- return super ().make_node (rng , size , dtype , mu , sigma , init , steps )
145
+ return super ().make_node (rng , size , dtype , mu , sigma , init_dist , steps )
144
146
145
147
def _supp_shape_from_params (self , dist_params , reop_param_idx = 0 , param_shapes = None ):
146
148
steps = dist_params [3 ]
@@ -153,7 +155,7 @@ def rng_fn(
153
155
rng : np .random .RandomState ,
154
156
mu : Union [np .ndarray , float ],
155
157
sigma : Union [np .ndarray , float ],
156
- init : float ,
158
+ init_dist : Union [ np . ndarray , float ] ,
157
159
steps : int ,
158
160
size : Tuple [int ],
159
161
) -> np .ndarray :
@@ -170,16 +172,16 @@ def rng_fn(
170
172
----------
171
173
rng: np.random.RandomState
172
174
Numpy random number generator
173
- mu: array_like
175
+ mu: array_like of float
174
176
Random walk mean
175
- sigma: np.ndarray
177
+ sigma: array_like of float
176
178
Standard deviation of innovation (sigma > 0)
177
- init: float
179
+ init_dist: array_like of float
178
180
Initialization value for GaussianRandomWalk
179
181
steps: int
180
182
Length of random walk, must be greater than 1. Returned array will be of size+1 to
181
183
account as first value is initial value
182
- size: int
184
+ size: tuple of int
183
185
The number of Random Walk time series generated
184
186
185
187
Returns
@@ -196,7 +198,7 @@ def rng_fn(
196
198
bcast_shape = np .broadcast_shapes (
197
199
np .asarray (mu ).shape ,
198
200
np .asarray (sigma ).shape ,
199
- np .asarray (init ).shape ,
201
+ np .asarray (init_dist ).shape ,
200
202
)
201
203
dist_shape = (* bcast_shape , int (steps ))
202
204
@@ -207,7 +209,7 @@ def rng_fn(
207
209
# Add one dimension to the right, so that mu and sigma broadcast safely along
208
210
# the steps dimension
209
211
innovations = rng .normal (loc = mu [..., None ], scale = sigma [..., None ], size = dist_shape )
210
- grw = np .concatenate ([init [..., None ], innovations ], axis = - 1 )
212
+ grw = np .concatenate ([init_dist [..., None ], innovations ], axis = - 1 )
211
213
return np .cumsum (grw , axis = - 1 )
212
214
213
215
@@ -223,7 +225,7 @@ class GaussianRandomWalk(distribution.Continuous):
223
225
innovation drift, defaults to 0.0
224
226
sigma : tensor_like of float, optional
225
227
sigma > 0, innovation standard deviation, defaults to 1.0
226
- init : unnamed distribution
228
+ init_dist : unnamed distribution
227
229
Univariate distribution of the initial value, created with the `.dist()` API.
228
230
Defaults to a unit Normal.
229
231
@@ -248,7 +250,7 @@ def __new__(cls, *args, steps=None, **kwargs):
248
250
249
251
@classmethod
250
252
def dist (
251
- cls , mu = 0.0 , sigma = 1.0 , * , init = None , steps = None , size = None , ** kwargs
253
+ cls , mu = 0.0 , sigma = 1.0 , * , init_dist = None , steps = None , size = None , ** kwargs
252
254
) -> at .TensorVariable :
253
255
254
256
mu = at .as_tensor_variable (floatX (mu ))
@@ -263,27 +265,34 @@ def dist(
263
265
raise ValueError ("Must specify steps or shape parameter" )
264
266
steps = at .as_tensor_variable (intX (steps ))
265
267
268
+ if "init" in kwargs :
269
+ warnings .warn (
270
+ "init parameter is now called init_dist. Using init will raise an error in a future release." ,
271
+ FutureWarning ,
272
+ )
273
+ init_dist = kwargs .pop ("init" )
274
+
266
275
# If no scalar distribution is passed then initialize with a Normal of same mu and sigma
267
- if init is None :
268
- init = Normal .dist (0 , 1 )
276
+ if init_dist is None :
277
+ init_dist = Normal .dist (0 , 1 )
269
278
else :
270
279
if not (
271
- isinstance (init , at .TensorVariable )
272
- and init .owner is not None
273
- and isinstance (init .owner .op , RandomVariable )
274
- and init .owner .op .ndim_supp == 0
280
+ isinstance (init_dist , at .TensorVariable )
281
+ and init_dist .owner is not None
282
+ and isinstance (init_dist .owner .op , RandomVariable )
283
+ and init_dist .owner .op .ndim_supp == 0
275
284
):
276
285
raise TypeError ("init must be a univariate distribution variable" )
277
- check_dist_not_registered (init )
286
+ check_dist_not_registered (init_dist )
278
287
279
288
# Ignores logprob of init var because that's accounted for in the logp method
280
- init = ignore_logprob (init )
289
+ init_dist = ignore_logprob (init_dist )
281
290
282
- return super ().dist ([mu , sigma , init , steps ], size = size , ** kwargs )
291
+ return super ().dist ([mu , sigma , init_dist , steps ], size = size , ** kwargs )
283
292
284
- def moment (rv , size , mu , sigma , init , steps ):
293
+ def moment (rv , size , mu , sigma , init_dist , steps ):
285
294
grw_moment = at .zeros_like (rv )
286
- grw_moment = at .set_subtensor (grw_moment [..., 0 ], moment (init ))
295
+ grw_moment = at .set_subtensor (grw_moment [..., 0 ], moment (init_dist ))
287
296
# Add one dimension to the right, so that mu broadcasts safely along the steps
288
297
# dimension
289
298
grw_moment = at .set_subtensor (grw_moment [..., 1 :], mu [..., None ])
@@ -293,13 +302,13 @@ def logp(
293
302
value : at .Variable ,
294
303
mu : at .Variable ,
295
304
sigma : at .Variable ,
296
- init : at .Variable ,
305
+ init_dist : at .Variable ,
297
306
steps : at .Variable ,
298
307
) -> at .TensorVariable :
299
308
"""Calculate log-probability of Gaussian Random Walk distribution at specified value."""
300
309
301
310
# Calculate initialization logp
302
- init_logp = logp (init , value [..., 0 ])
311
+ init_logp = logp (init_dist , value [..., 0 ])
303
312
304
313
# Make time series stationary around the mean value
305
314
stationary_series = value [..., 1 :] - value [..., :- 1 ]
@@ -429,7 +438,7 @@ def dist(
429
438
"init parameter is now called init_dist. Using init will raise an error in a future release." ,
430
439
FutureWarning ,
431
440
)
432
- init_dist = kwargs [ "init" ]
441
+ init_dist = kwargs . pop ( "init" )
433
442
434
443
ar_order = cls ._get_ar_order (rhos = rhos , constant = constant , ar_order = ar_order )
435
444
steps = get_steps (steps = steps , shape = kwargs .get ("shape" , None ), step_shape_offset = ar_order )
0 commit comments