Skip to content

refactored GARCH and added Mv(Gaussian/StudentT)RandomWalk #1603

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Merged
merged 6 commits into from
Dec 15, 2016
Merged
Changes from 3 commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
97 changes: 91 additions & 6 deletions pymc3/distributions/timeseries.py
Original file line number Diff line number Diff line change
@@ -1,10 +1,18 @@
import theano.tensor as tt
from theano import scan

from .multivariate import get_tau_cov, MvNormal, MvStudentT
from .continuous import Normal, Flat
from .distribution import Continuous

__all__ = ['AR1', 'GaussianRandomWalk', 'GARCH11', 'EulerMaruyama']
__all__ = [
'AR1',
'GaussianRandomWalk',
'GARCH11',
'EulerMaruyama',
'MvGaussianRandomWalk',
'MvStudentTRandomWalk'
]


class AR1(Continuous):
Expand Down Expand Up @@ -108,7 +116,8 @@ def __init__(self, omega=None, alpha_1=None, beta_1=None,
self.initial_vol = initial_vol
self.mean = 0

def _get_volatility(self, x):
def get_volatility(self, x):
x = x[:-1]

def volatility_update(x, vol, w, a, b):
return tt.sqrt(w + a * tt.square(x) + b * tt.square(vol))
Expand All @@ -118,12 +127,11 @@ def volatility_update(x, vol, w, a, b):
outputs_info=[self.initial_vol],
non_sequences=[self.omega, self.alpha_1,
self.beta_1])
return vol
return tt.concatenate(self.initial_vol, vol)

def logp(self, x):
vol = self._get_volatility(x[:-1])
return (Normal.dist(0., sd=self.initial_vol).logp(x[0]) +
tt.sum(Normal.dist(0, sd=vol).logp(x[1:])))
vol = self.get_volatility(x)
return tt.sum(Normal.dist(0, sd=vol).logp(x))


class EulerMaruyama(Continuous):
Expand Down Expand Up @@ -151,3 +159,80 @@ def logp(self, x):
mu = xt + self.dt * f
sd = tt.sqrt(self.dt) * g
return tt.sum(Normal.dist(mu=mu, sd=sd).logp(x[1:]))


class MvGaussianRandomWalk(Continuous):
"""
Multivariate Random Walk with Normal innovations

Parameters
----------
tau : tensor
tau > 0, innovation precision
sd : tensor
cov - pos def matrix, innovation standard covariance matrix (alternative to specifying tau)
mu: tensor
innovation drift, defaults to 0.0
init : distribution
distribution for initial value (Defaults to Flat())
"""
def __init__(self, mu=0., cov=None, tau=None, init=Flat.dist(),
*args, **kwargs):
super(MvGaussianRandomWalk, self).__init__(*args, **kwargs)
tau, cov = get_tau_cov(mu, tau=tau, cov=cov)
self.tau = tau
self.cov = cov
self.mu = mu
self.init = init
self.mean = 0.

def logp(self, x):
tau = self.tau
mu = self.mu
init = self.init

x_im1 = x[:-1]
x_i = x[1:]

innov_like = MvNormal.dist(mu=x_im1 + mu, tau=tau).logp(x_i)
return init.logp(x[0]) + tt.sum(innov_like)


class MvStudentTRandomWalk(Continuous):
"""
Multivariate Random Walk with StudentT innovations

Parameters
----------
nu : degrees of freedom

tau : tensor
tau > 0, innovation precision
sd : tensor
cov - pos def matrix, innovation standard covariance matrix (alternative to specifying tau)
mu: tensor
innovation drift, defaults to 0.0
init : distribution
distribution for initial value (Defaults to Flat())
"""
def __init__(self, nu, mu=0., cov=None, tau=None, init=Flat.dist(),
*args, **kwargs):
super(MvStudentTRandomWalk, self).__init__(*args, **kwargs)
tau, cov = get_tau_cov(mu, tau=tau, cov=cov)
self.tau = tau
self.cov = cov
self.mu = mu
self.nu = nu
self.init = init
self.mean = 0.

def logp(self, x):
cov = self.cov
mu = self.mu
nu = self.nu
init = self.init

x_im1 = x[:-1]
x_i = x[1:]
innov_like = MvStudentT.dist(nu, cov, mu=x_im1 + mu).logp(x_i)
return init.logp(x[0]) + tt.sum(innov_like)