24
24
import numpy as np
25
25
import scipy
26
26
27
+ from aesara .assert_op import Assert
27
28
from aesara .graph .basic import Apply
28
29
from aesara .graph .op import Op
30
+ from aesara .sparse .basic import sp_sum
29
31
from aesara .tensor import gammaln , sigmoid
30
32
from aesara .tensor .nlinalg import det , eigh , matrix_inverse , trace
31
33
from aesara .tensor .random .basic import MultinomialRV , dirichlet , multivariate_normal
@@ -397,7 +399,6 @@ def __new__(cls, name, *args, **kwargs):
397
399
398
400
@classmethod
399
401
def dist (cls , a , ** kwargs ):
400
-
401
402
a = at .as_tensor_variable (a )
402
403
# mean = a / at.sum(a)
403
404
# mode = at.switch(at.all(a > 1), (a - 1) / at.sum(a - 1), np.nan)
@@ -491,7 +492,6 @@ class Multinomial(Discrete):
491
492
492
493
@classmethod
493
494
def dist (cls , n , p , * args , ** kwargs ):
494
-
495
495
p = p / at .sum (p , axis = - 1 , keepdims = True )
496
496
n = at .as_tensor_variable (n )
497
497
p = at .as_tensor_variable (p )
@@ -1925,6 +1925,81 @@ def _distr_parameters_for_repr(self):
1925
1925
return ["mu" ]
1926
1926
1927
1927
1928
+ class CARRV (RandomVariable ):
1929
+ name = "car"
1930
+ ndim_supp = 1
1931
+ ndims_params = [1 , 2 , 0 , 0 ]
1932
+ dtype = "floatX"
1933
+ _print_name = ("CAR" , "\\ operatorname{CAR}" )
1934
+
1935
+ def make_node (self , rng , size , dtype , mu , W , alpha , tau ):
1936
+ mu = at .as_tensor_variable (floatX (mu ))
1937
+
1938
+ W = aesara .sparse .as_sparse_or_tensor_variable (floatX (W ))
1939
+ if not W .ndim == 2 :
1940
+ raise ValueError ("W must be a matrix (ndim=2)." )
1941
+
1942
+ sparse = isinstance (W , aesara .sparse .SparseVariable )
1943
+ msg = "W must be a symmetric adjacency matrix."
1944
+ if sparse :
1945
+ abs_diff = aesara .sparse .basic .mul (aesara .sparse .basic .sgn (W - W .T ), W - W .T )
1946
+ W = Assert (msg )(W , at .isclose (aesara .sparse .basic .sp_sum (abs_diff ), 0 ))
1947
+ else :
1948
+ W = Assert (msg )(W , at .allclose (W , W .T ))
1949
+
1950
+ tau = at .as_tensor_variable (floatX (tau ))
1951
+ alpha = at .as_tensor_variable (floatX (alpha ))
1952
+
1953
+ return super ().make_node (rng , size , dtype , mu , W , alpha , tau )
1954
+
1955
+ def _infer_shape (self , size , dist_params , param_shapes = None ):
1956
+ shape = tuple (size ) + tuple (dist_params [0 ].shape )
1957
+ return shape
1958
+
1959
+ @classmethod
1960
+ def rng_fn (cls , rng : np .random .RandomState , mu , W , alpha , tau , size ):
1961
+ """
1962
+ Implementation of algorithm from paper
1963
+ Havard Rue, 2001. "Fast sampling of Gaussian Markov random fields,"
1964
+ Journal of the Royal Statistical Society Series B, Royal Statistical Society,
1965
+ vol. 63(2), pages 325-338. DOI: 10.1111/1467-9868.00288
1966
+ """
1967
+ if not scipy .sparse .issparse (W ):
1968
+ W = scipy .sparse .csr_matrix (W )
1969
+ s = np .asarray (W .sum (axis = 0 ))[0 ]
1970
+ D = scipy .sparse .diags (s )
1971
+ tau = scipy .sparse .csr_matrix (tau )
1972
+ alpha = scipy .sparse .csr_matrix (alpha )
1973
+
1974
+ Q = tau .multiply (D - alpha .multiply (W ))
1975
+
1976
+ perm_array = scipy .sparse .csgraph .reverse_cuthill_mckee (Q , symmetric_mode = True )
1977
+ inv_perm = np .argsort (perm_array )
1978
+
1979
+ Q = Q [perm_array , :][:, perm_array ]
1980
+
1981
+ Qb = Q .diagonal ()
1982
+ u = 1
1983
+ while np .count_nonzero (Q .diagonal (u )) > 0 :
1984
+ Qb = np .vstack ((np .pad (Q .diagonal (u ), (u , 0 ), constant_values = (0 , 0 )), Qb ))
1985
+ u += 1
1986
+
1987
+ L = scipy .linalg .cholesky_banded (Qb , lower = False )
1988
+
1989
+ size = tuple (size or ())
1990
+ if size :
1991
+ mu = np .broadcast_to (mu , size + mu .shape )
1992
+ z = rng .normal (size = mu .shape )
1993
+ samples = np .empty (z .shape )
1994
+ for idx in np .ndindex (mu .shape [:- 1 ]):
1995
+ samples [idx ] = scipy .linalg .cho_solve_banded ((L , False ), z [idx ]) + mu [idx ][perm_array ]
1996
+ samples = samples [..., inv_perm ]
1997
+ return samples
1998
+
1999
+
2000
+ car = CARRV ()
2001
+
2002
+
1928
2003
class CAR (Continuous ):
1929
2004
r"""
1930
2005
Likelihood for a conditional autoregression. This is a special case of the
@@ -1966,45 +2041,13 @@ class CAR(Continuous):
1966
2041
"Generalized Hierarchical Multivariate CAR Models for Areal Data"
1967
2042
Biometrics, Vol. 61, No. 4 (Dec., 2005), pp. 950-961
1968
2043
"""
2044
+ rv_op = car
1969
2045
1970
- def __init__ (self , mu , W , alpha , tau , sparse = False , * args , ** kwargs ):
1971
- super ().__init__ (* args , ** kwargs )
1972
-
1973
- D = W .sum (axis = 0 )
1974
- d , _ = W .shape
1975
-
1976
- self .d = d
1977
- self .median = self .mode = self .mean = self .mu = at .as_tensor_variable (mu )
1978
- self .sparse = sparse
1979
-
1980
- if not W .ndim == 2 or not np .allclose (W , W .T ):
1981
- raise ValueError ("W must be a symmetric adjacency matrix." )
1982
-
1983
- if sparse :
1984
- W_sparse = scipy .sparse .csr_matrix (W )
1985
- self .W = aesara .sparse .as_sparse_variable (W_sparse )
1986
- else :
1987
- self .W = at .as_tensor_variable (W )
1988
-
1989
- # eigenvalues of D^−1/2 * W * D^−1/2
1990
- Dinv_sqrt = np .diag (1 / np .sqrt (D ))
1991
- DWD = np .matmul (np .matmul (Dinv_sqrt , W ), Dinv_sqrt )
1992
- self .lam = scipy .linalg .eigvalsh (DWD )
1993
- self .D = at .as_tensor_variable (D )
1994
-
1995
- tau = at .as_tensor_variable (tau )
1996
- if tau .ndim > 0 :
1997
- self .tau = tau [:, None ]
1998
- else :
1999
- self .tau = tau
2000
-
2001
- alpha = at .as_tensor_variable (alpha )
2002
- if alpha .ndim > 0 :
2003
- self .alpha = alpha [:, None ]
2004
- else :
2005
- self .alpha = alpha
2046
+ @classmethod
2047
+ def dist (cls , mu , W , alpha , tau , * args , ** kwargs ):
2048
+ return super ().dist ([mu , W , alpha , tau ], ** kwargs )
2006
2049
2007
- def logp (self , value ):
2050
+ def logp (value , mu , W , alpha , tau ):
2008
2051
"""
2009
2052
Calculate log-probability of a CAR-distributed vector
2010
2053
at specified value. This log probability function differs from
@@ -2021,30 +2064,37 @@ def logp(self, value):
2021
2064
TensorVariable
2022
2065
"""
2023
2066
2067
+ sparse = isinstance (W , aesara .sparse .SparseVariable )
2068
+
2069
+ if sparse :
2070
+ D = sp_sum (W , axis = 0 )
2071
+ Dinv_sqrt = at .diag (1 / at .sqrt (D ))
2072
+ DWD = at .dot (aesara .sparse .dot (Dinv_sqrt , W ), Dinv_sqrt )
2073
+ else :
2074
+ D = W .sum (axis = 0 )
2075
+ Dinv_sqrt = at .diag (1 / at .sqrt (D ))
2076
+ DWD = at .dot (at .dot (Dinv_sqrt , W ), Dinv_sqrt )
2077
+ lam = at .slinalg .eigvalsh (DWD , at .eye (DWD .shape [0 ]))
2078
+
2079
+ d , _ = W .shape
2080
+
2024
2081
if value .ndim == 1 :
2025
2082
value = value [None , :]
2026
2083
2027
- logtau = self . d * at .log (self . tau ).sum ()
2028
- logdet = at .log (1 - self . alpha .T * self . lam [:, None ]).sum ()
2029
- delta = value - self . mu
2084
+ logtau = d * at .log (tau ).sum ()
2085
+ logdet = at .log (1 - alpha .T * lam [:, None ]).sum ()
2086
+ delta = value - mu
2030
2087
2031
- if self . sparse :
2032
- Wdelta = aesara .sparse .dot (delta , self . W )
2088
+ if sparse :
2089
+ Wdelta = aesara .sparse .dot (delta , W )
2033
2090
else :
2034
- Wdelta = at .dot (delta , self . W )
2091
+ Wdelta = at .dot (delta , W )
2035
2092
2036
- tau_dot_delta = self . D [None , :] * delta - self . alpha * Wdelta
2037
- logquad = (self . tau * delta * tau_dot_delta ).sum (axis = - 1 )
2093
+ tau_dot_delta = D [None , :] * delta - alpha * Wdelta
2094
+ logquad = (tau * delta * tau_dot_delta ).sum (axis = - 1 )
2038
2095
return bound (
2039
2096
0.5 * (logtau + logdet - logquad ),
2040
- self .alpha >= - 1 ,
2041
- self .alpha <= 1 ,
2042
- self .tau > 0 ,
2043
- broadcast_conditions = False ,
2097
+ at .all (alpha <= 1 ),
2098
+ at .all (alpha >= - 1 ),
2099
+ tau > 0 ,
2044
2100
)
2045
-
2046
- def random (self , point = None , size = None ):
2047
- raise NotImplementedError ("Sampling from a CAR distribution is not supported." )
2048
-
2049
- def _distr_parameters_for_repr (self ):
2050
- return ["mu" , "W" , "alpha" , "tau" ]
0 commit comments