@@ -1668,44 +1668,53 @@ class AsymmetricLaplace(Continuous):
1668
1668
1669
1669
The pdf of this distribution is
1670
1670
1671
- ..math::
1671
+ .. math::
1672
1672
{f(x|\\b,\kappa) =
1673
1673
\left({\frac{\\b}{\kappa + 1/\kappa}}\right)\,e^{-(x)\\b\,s\kappa ^{s}}}
1674
- where s = sgn(x)
1674
+
1675
+ where
1676
+
1677
+ .. math::
1678
+
1679
+ s = sgn(x)
1680
+
1681
+ ======== ========================
1682
+ Support :math:`x \in \mathbb{R}`
1683
+ Mean :math:`\mu-\frac{\\\kappa-1/\kappa}b`
1684
+ Variance :math:`\frac{1+\kappa^{4}}{b^2\kappa^2 }`
1685
+ ======== ========================
1675
1686
1676
1687
Parameters
1677
1688
----------
1678
- b:
1689
+ b: float
1679
1690
Scale parameter (b > 0)
1680
- kappa:
1691
+ kappa: float
1681
1692
Symmetry parameter (kappa > 0)
1693
+ mu: float
1694
+ Location parameter
1682
1695
1683
- See also: https://en.wikipedia.org/wiki/Asymmetric_Laplace_distribution
1696
+ See Also:
1697
+ --------
1698
+ `Reference <https://en.wikipedia.org/wiki/Asymmetric_Laplace_distribution>`_
1684
1699
"""
1685
1700
1686
- def __init__ (self , b , kappa , testval = 0.0 , * args , ** kwargs ):
1687
- self .b = tt .as_tensor_variable (b )
1688
- self .kappa = tt .as_tensor_variable (kappa )
1701
+ def __init__ (self , b , kappa , mu = 0 , * args , ** kwargs ):
1702
+ self .b = tt .as_tensor_variable (floatX (b ))
1703
+ self .kappa = tt .as_tensor_variable (floatX (kappa ))
1704
+ self .mu = mu = tt .as_tensor_variable (floatX (mu ))
1689
1705
1690
- super ().__init__ (* args , ** kwargs , testval = testval )
1706
+ self .mean = self .mu - (self .kappa - 1 / self .kappa ) / b
1707
+ self .variance = (1 + self .kappa ** 4 ) / (self .kappa ** 2 * self .b ** 2 )
1691
1708
1692
- def _random (self , b , kappa , size = None ):
1693
- if size is not None :
1694
- u = np .random .uniform (size = size )
1695
- x = - np .log ((1 - u ) * (1 + kappa ** 2 )) / (kappa * b ) * (
1696
- u > ((kappa ** 2 ) / (1 + kappa ** 2 ))
1697
- ) + kappa * np .log (u * (1 + kappa ** 2 ) / (kappa ** 2 )) / b * (
1698
- u < ((kappa ** 2 ) / (1 + kappa ** 2 ))
1699
- )
1700
- return x
1701
-
1702
- u = np .random .uniform ()
1703
- if u > (kappa ** 2 ) / (1 + kappa ** 2 ):
1704
- x = - np .log ((1 - u ) * (1 + kappa ** 2 )) / (kappa * b )
1705
- else :
1706
- x = kappa * np .log (u * (1 + kappa ** 2 ) / (kappa ** 2 )) / b
1709
+ super ().__init__ (* args , ** kwargs )
1707
1710
1708
- return x
1711
+ def _random (self , b , kappa , size = None ):
1712
+ u = np .random .uniform (size = size )
1713
+ switch = kappa ** 2 / (1 + kappa ** 2 )
1714
+ non_positive_x = kappa * np .log (u * (1 / switch )) / b
1715
+ positive_x = - np .log ((1 - u ) * (1 + kappa ** 2 )) / (kappa * b )
1716
+ draws = non_positive_x * (u <= switch ) + positive_x * (u > switch )
1717
+ return draws
1709
1718
1710
1719
def random (self , point = None , size = None ):
1711
1720
"""
@@ -1741,6 +1750,7 @@ def logp(self, value):
1741
1750
-------
1742
1751
TensorVariable
1743
1752
"""
1753
+ value = value - self .mu
1744
1754
return bound (
1745
1755
tt .log (self .b / (self .kappa + (self .kappa ** - 1 )))
1746
1756
+ (- value * self .b * tt .sgn (value ) * (self .kappa ** tt .sgn (value ))),
0 commit comments