|
1 |
| -import warnings |
| 1 | +#!/usr/bin/env python |
| 2 | +# -*- coding: utf-8 -*- |
2 | 3 |
|
| 4 | +import warnings |
3 | 5 | import numpy as np
|
4 |
| -import theano.tensor as tt |
| 6 | +import scipy |
5 | 7 | import theano
|
| 8 | +import theano.tensor as tt |
6 | 9 |
|
7 | 10 | from scipy import stats
|
8 | 11 | from theano.tensor.nlinalg import det, matrix_inverse, trace, eigh
|
9 | 12 |
|
10 | 13 | from . import transforms
|
11 | 14 | from .distribution import Continuous, Discrete, draw_values, generate_samples
|
| 15 | +from ..model import Deterministic |
| 16 | +from .continuous import ChiSquared, Normal |
12 | 17 | from .special import gammaln, multigammaln
|
13 | 18 | from .dist_math import bound, logpow, factln
|
14 | 19 |
|
15 |
| -__all__ = ['MvNormal', 'Dirichlet', 'Multinomial', 'Wishart', 'LKJCorr'] |
| 20 | +__all__ = ['MvNormal', 'Dirichlet', 'Multinomial', 'Wishart', 'WishartBartlett', 'LKJCorr'] |
16 | 21 |
|
17 | 22 |
|
18 | 23 | class MvNormal(Continuous):
|
@@ -310,6 +315,65 @@ def logp(self, X):
|
310 | 315 | n > (p - 1))
|
311 | 316 |
|
312 | 317 |
|
| 318 | +def WishartBartlett(name, S, nu, is_cholesky=False, return_cholesky=False): |
| 319 | + """ |
| 320 | + Bartlett decomposition of the Wishart distribution. As the Wishart |
| 321 | + distribution requires the matrix to be symmetric positive semi-definite |
| 322 | + it is impossible for MCMC to ever propose acceptable matrices. |
| 323 | +
|
| 324 | + Instead, we can use the Barlett decomposition which samples a lower |
| 325 | + diagonal matrix. Specifically: |
| 326 | +
|
| 327 | + If L ~ [[sqrt(c_1), 0, ...], |
| 328 | + [z_21, sqrt(c_1), 0, ...], |
| 329 | + [z_31, z32, sqrt(c3), ...]] |
| 330 | + with c_i ~ Chi²(n-i+1) and n_ij ~ N(0, 1), then |
| 331 | + L * A * A.T * L.T ~ Wishart(L * L.T, nu) |
| 332 | +
|
| 333 | + See http://en.wikipedia.org/wiki/Wishart_distribution#Bartlett_decomposition |
| 334 | + for more information. |
| 335 | +
|
| 336 | + :Parameters: |
| 337 | + S : ndarray |
| 338 | + p x p positive definite matrix |
| 339 | + Or: |
| 340 | + p x p lower-triangular matrix that is the Cholesky factor |
| 341 | + of the covariance matrix. |
| 342 | + nu : int |
| 343 | + Degrees of freedom, > dim(S). |
| 344 | + is_cholesky : bool (default=False) |
| 345 | + Input matrix S is already Cholesky decomposed as S.T * S |
| 346 | + return_cholesky : bool (default=False) |
| 347 | + Only return the Cholesky decomposed matrix. |
| 348 | +
|
| 349 | + :Note: |
| 350 | + This is not a standard Distribution class but follows a similar |
| 351 | + interface. Besides the Wishart distribution, it will add RVs |
| 352 | + c and z to your model which make up the matrix. |
| 353 | + """ |
| 354 | + |
| 355 | + L = S if is_cholesky else scipy.linalg.cholesky(S) |
| 356 | + |
| 357 | + diag_idx = np.diag_indices_from(S) |
| 358 | + tril_idx = np.tril_indices_from(S, k=-1) |
| 359 | + n_diag = len(diag_idx[0]) |
| 360 | + n_tril = len(tril_idx[0]) |
| 361 | + c = T.sqrt(ChiSquared('c', nu - np.arange(2, 2+n_diag), shape=n_diag)) |
| 362 | + print('Added new variable c to model diagonal of Wishart.') |
| 363 | + z = Normal('z', 0, 1, shape=n_tril) |
| 364 | + print('Added new variable z to model off-diagonals of Wishart.') |
| 365 | + # Construct A matrix |
| 366 | + A = T.zeros(S.shape, dtype=np.float32) |
| 367 | + A = T.set_subtensor(A[diag_idx], c) |
| 368 | + A = T.set_subtensor(A[tril_idx], z) |
| 369 | + |
| 370 | + # L * A * A.T * L.T ~ Wishart(L*L.T, nu) |
| 371 | + if return_cholesky: |
| 372 | + return Deterministic(name, T.dot(L, A)) |
| 373 | + else: |
| 374 | + return Deterministic(name, T.dot(T.dot(T.dot(L, A), A.T), L.T)) |
| 375 | + |
| 376 | + |
313 | 377 |
|
314 | 378 | class LKJCorr(Continuous):
|
315 | 379 | R"""
|
|
0 commit comments