1
1
import theano .tensor as tt
2
2
from theano import scan
3
3
4
+ from .multivariate import get_tau_cov , MvNormal , MvStudentT
4
5
from .continuous import Normal , Flat
5
6
from .distribution import Continuous
6
7
7
- __all__ = ['AR1' , 'GaussianRandomWalk' , 'GARCH11' , 'EulerMaruyama' ]
8
+ __all__ = [
9
+ 'AR1' ,
10
+ 'GaussianRandomWalk' ,
11
+ 'GARCH11' ,
12
+ 'EulerMaruyama' ,
13
+ 'MvGaussianRandomWalk' ,
14
+ 'MvStudentTRandomWalk'
15
+ ]
8
16
9
17
10
18
class AR1 (Continuous ):
@@ -108,7 +116,8 @@ def __init__(self, omega=None, alpha_1=None, beta_1=None,
108
116
self .initial_vol = initial_vol
109
117
self .mean = 0
110
118
111
- def _get_volatility (self , x ):
119
+ def get_volatility (self , x ):
120
+ x = x [:- 1 ]
112
121
113
122
def volatility_update (x , vol , w , a , b ):
114
123
return tt .sqrt (w + a * tt .square (x ) + b * tt .square (vol ))
@@ -118,12 +127,11 @@ def volatility_update(x, vol, w, a, b):
118
127
outputs_info = [self .initial_vol ],
119
128
non_sequences = [self .omega , self .alpha_1 ,
120
129
self .beta_1 ])
121
- return vol
130
+ return tt . concatenate ( self . initial_vol , vol )
122
131
123
132
def logp (self , x ):
124
- vol = self ._get_volatility (x [:- 1 ])
125
- return (Normal .dist (0. , sd = self .initial_vol ).logp (x [0 ]) +
126
- tt .sum (Normal .dist (0 , sd = vol ).logp (x [1 :])))
133
+ vol = self .get_volatility (x )
134
+ return tt .sum (Normal .dist (0 , sd = vol ).logp (x ))
127
135
128
136
129
137
class EulerMaruyama (Continuous ):
@@ -151,3 +159,79 @@ def logp(self, x):
151
159
mu = xt + self .dt * f
152
160
sd = tt .sqrt (self .dt ) * g
153
161
return tt .sum (Normal .dist (mu = mu , sd = sd ).logp (x [1 :]))
162
+
163
+
164
+ class MvGaussianRandomWalk (Continuous ):
165
+ """
166
+ Multivariate Random Walk with Normal innovations
167
+
168
+ Parameters
169
+ ----------
170
+ mu : tensor
171
+ innovation drift, defaults to 0.0
172
+ cov : tensor
173
+ pos def matrix, innovation covariance matrix
174
+ tau : tensor
175
+ pos def matrix, innovation precision (alternative to specifying cov)
176
+ init : distribution
177
+ distribution for initial value (Defaults to Flat())
178
+ """
179
+ def __init__ (self , mu = 0. , cov = None , tau = None , init = Flat .dist (),
180
+ * args , ** kwargs ):
181
+ super (MvGaussianRandomWalk , self ).__init__ (* args , ** kwargs )
182
+ tau , cov = get_tau_cov (mu , tau = tau , cov = cov )
183
+ self .tau = tau
184
+ self .cov = cov
185
+ self .mu = mu
186
+ self .init = init
187
+ self .mean = 0.
188
+
189
+ def logp (self , x ):
190
+ tau = self .tau
191
+ mu = self .mu
192
+ init = self .init
193
+
194
+ x_im1 = x [:- 1 ]
195
+ x_i = x [1 :]
196
+
197
+ innov_like = MvNormal .dist (mu = x_im1 + mu , tau = tau ).logp (x_i )
198
+ return init .logp (x [0 ]) + tt .sum (innov_like )
199
+
200
+
201
+ class MvStudentTRandomWalk (Continuous ):
202
+ """
203
+ Multivariate Random Walk with StudentT innovations
204
+
205
+ Parameters
206
+ ----------
207
+ nu : degrees of freedom
208
+ mu : tensor
209
+ innovation drift, defaults to 0.0
210
+ cov : tensor
211
+ pos def matrix, innovation covariance matrix
212
+ tau : tensor
213
+ pos def matrix, innovation precision (alternative to specifying cov)
214
+ init : distribution
215
+ distribution for initial value (Defaults to Flat())
216
+ """
217
+ def __init__ (self , nu , mu = 0. , cov = None , tau = None , init = Flat .dist (),
218
+ * args , ** kwargs ):
219
+ super (MvStudentTRandomWalk , self ).__init__ (* args , ** kwargs )
220
+ tau , cov = get_tau_cov (mu , tau = tau , cov = cov )
221
+ self .tau = tau
222
+ self .cov = cov
223
+ self .mu = mu
224
+ self .nu = nu
225
+ self .init = init
226
+ self .mean = 0.
227
+
228
+ def logp (self , x ):
229
+ cov = self .cov
230
+ mu = self .mu
231
+ nu = self .nu
232
+ init = self .init
233
+
234
+ x_im1 = x [:- 1 ]
235
+ x_i = x [1 :]
236
+ innov_like = MvStudentT .dist (nu , cov , mu = x_im1 + mu ).logp (x_i )
237
+ return init .logp (x [0 ]) + tt .sum (innov_like )
0 commit comments