@@ -78,7 +78,7 @@ class AR(distribution.Continuous):
78
78
Parameters
79
79
----------
80
80
rho : tensor
81
- Vector of autoregressive coefficients.
81
+ Tensor of autoregressive coefficients. The first dimension is the p lag .
82
82
sd : float
83
83
Standard deviation of innovation (sd > 0). (only required if tau is not specified)
84
84
tau : float
@@ -100,29 +100,30 @@ def __init__(self, rho, sd=None, tau=None,
100
100
101
101
self .mean = tt .as_tensor_variable (0. )
102
102
103
- rho = tt .as_tensor_variable (rho , ndim = 1 )
103
+ if isinstance (rho , list ):
104
+ p = len (rho )
105
+ else :
106
+ try :
107
+ p = rho .shape .tag .test_value [0 ]
108
+ except AttributeError :
109
+ p = rho .shape [0 ]
110
+
104
111
if constant :
105
- self .p = rho . shape [ 0 ] - 1
112
+ self .p = p - 1
106
113
else :
107
- self .p = rho . shape [ 0 ]
114
+ self .p = p
108
115
109
116
self .constant = constant
110
- self .rho = rho
117
+ self .rho = rho = tt . as_tensor_variable ( rho )
111
118
self .init = init
112
119
113
120
def logp (self , value ):
114
-
115
- y = value [self .p :]
116
- results , _ = scan (lambda l , obs , p : obs [p - l :- l ],
117
- outputs_info = None , sequences = [tt .arange (1 , self .p + 1 )],
118
- non_sequences = [value , self .p ])
119
- x = tt .stack (results )
120
-
121
121
if self .constant :
122
- y = y - self .rho [ 0 ]
123
- eps = y - self .rho [1 :]. dot ( x )
122
+ x = tt . add ( * [ self . rho [ i + 1 ] * value [ self . p - ( i + 1 ): - ( i + 1 )] for i in range ( self .p )])
123
+ eps = value [ self . p :] - self .rho [0 ] - x
124
124
else :
125
- eps = y - self .rho .dot (x )
125
+ x = tt .add (* [self .rho [i ] * value [self .p - (i + 1 ):- (i + 1 )] for i in range (self .p )])
126
+ eps = value [self .p :] - x
126
127
127
128
innov_like = Normal .dist (mu = 0.0 , tau = self .tau ).logp (eps )
128
129
init_like = self .init .logp (value [:self .p ])
0 commit comments