52
52
vonmises ,
53
53
)
54
54
from pytensor .tensor .random .op import RandomVariable
55
+ from pytensor .tensor .random .utils import normalize_size_param
55
56
from pytensor .tensor .variable import TensorConstant
56
57
57
58
from pymc .logprob .abstract import _logprob_helper
58
59
from pymc .logprob .basic import icdf
60
+ from pymc .pytensorf import normalize_rng_param
59
61
60
62
try :
61
63
from polyagamma import polyagamma_cdf , polyagamma_pdf , random_polyagamma
@@ -73,7 +75,6 @@ def polyagamma_cdf(*args, **kwargs):
73
75
74
76
from scipy import stats
75
77
from scipy .interpolate import InterpolatedUnivariateSpline
76
- from scipy .special import expit
77
78
78
79
from pymc .distributions import transforms
79
80
from pymc .distributions .dist_math import (
@@ -90,8 +91,8 @@ def polyagamma_cdf(*args, **kwargs):
90
91
normal_lcdf ,
91
92
zvalue ,
92
93
)
93
- from pymc .distributions .distribution import DIST_PARAMETER_TYPES , Continuous
94
- from pymc .distributions .shape_utils import rv_size_is_none
94
+ from pymc .distributions .distribution import DIST_PARAMETER_TYPES , Continuous , SymbolicRandomVariable
95
+ from pymc .distributions .shape_utils import implicit_size_from_params , rv_size_is_none
95
96
from pymc .distributions .transforms import _default_transform
96
97
from pymc .math import invlogit , logdiffexp , logit
97
98
@@ -1236,20 +1237,28 @@ def icdf(value, alpha, beta):
1236
1237
)
1237
1238
1238
1239
1239
- class KumaraswamyRV (RandomVariable ):
1240
+ class KumaraswamyRV (SymbolicRandomVariable ):
1240
1241
name = "kumaraswamy"
1241
- ndim_supp = 0
1242
- ndims_params = [0 , 0 ]
1243
- dtype = "floatX"
1242
+ signature = "[rng],[size],(),()->[rng],()"
1244
1243
_print_name = ("Kumaraswamy" , "\\ operatorname{Kumaraswamy}" )
1245
1244
1246
1245
@classmethod
1247
- def rng_fn (cls , rng , a , b , size ) -> np .ndarray :
1248
- u = rng .uniform (size = size )
1249
- return np .asarray ((1 - (1 - u ) ** (1 / b )) ** (1 / a ))
1246
+ def rv_op (cls , a , b , * , size = None , rng = None ):
1247
+ a = pt .as_tensor (a )
1248
+ b = pt .as_tensor (b )
1249
+ rng = normalize_rng_param (rng )
1250
+ size = normalize_size_param (size )
1250
1251
1252
+ if rv_size_is_none (size ):
1253
+ size = implicit_size_from_params (a , b , ndims_params = cls .ndims_params )
1251
1254
1252
- kumaraswamy = KumaraswamyRV ()
1255
+ next_rng , u = uniform (size = size , rng = rng ).owner .outputs
1256
+ draws = (1 - (1 - u ) ** (1 / b )) ** (1 / a )
1257
+
1258
+ return cls (
1259
+ inputs = [rng , size , a , b ],
1260
+ outputs = [next_rng , draws ],
1261
+ )(rng , size , a , b )
1253
1262
1254
1263
1255
1264
class Kumaraswamy (UnitContinuous ):
@@ -1296,13 +1305,11 @@ class Kumaraswamy(UnitContinuous):
1296
1305
b > 0.
1297
1306
"""
1298
1307
1299
- rv_op = kumaraswamy
1308
+ rv_type = KumaraswamyRV
1309
+ rv_op = KumaraswamyRV .rv_op
1300
1310
1301
1311
@classmethod
1302
1312
def dist (cls , a : DIST_PARAMETER_TYPES , b : DIST_PARAMETER_TYPES , * args , ** kwargs ):
1303
- a = pt .as_tensor_variable (a )
1304
- b = pt .as_tensor_variable (b )
1305
-
1306
1313
return super ().dist ([a , b ], * args , ** kwargs )
1307
1314
1308
1315
def support_point (rv , size , a , b ):
@@ -1533,24 +1540,32 @@ def icdf(value, mu, b):
1533
1540
return check_icdf_parameters (res , b > 0 , msg = "b > 0" )
1534
1541
1535
1542
1536
- class AsymmetricLaplaceRV (RandomVariable ):
1543
+ class AsymmetricLaplaceRV (SymbolicRandomVariable ):
1537
1544
name = "asymmetriclaplace"
1538
- ndim_supp = 0
1539
- ndims_params = [0 , 0 , 0 ]
1540
- dtype = "floatX"
1545
+ signature = "[rng],[size],(),(),()->[rng],()"
1541
1546
_print_name = ("AsymmetricLaplace" , "\\ operatorname{AsymmetricLaplace}" )
1542
1547
1543
1548
@classmethod
1544
- def rng_fn (cls , rng , b , kappa , mu , size = None ) -> np .ndarray :
1545
- u = rng .uniform (size = size )
1549
+ def rv_op (cls , b , kappa , mu , * , size = None , rng = None ):
1550
+ b = pt .as_tensor (b )
1551
+ kappa = pt .as_tensor (kappa )
1552
+ mu = pt .as_tensor (mu )
1553
+ rng = normalize_rng_param (rng )
1554
+ size = normalize_size_param (size )
1555
+
1556
+ if rv_size_is_none (size ):
1557
+ size = implicit_size_from_params (b , kappa , mu , ndims_params = cls .ndims_params )
1558
+
1559
+ next_rng , u = uniform (size = size , rng = rng ).owner .outputs
1546
1560
switch = kappa ** 2 / (1 + kappa ** 2 )
1547
- non_positive_x = mu + kappa * np .log (u * (1 / switch )) / b
1548
- positive_x = mu - np .log ((1 - u ) * (1 + kappa ** 2 )) / (kappa * b )
1561
+ non_positive_x = mu + kappa * pt .log (u * (1 / switch )) / b
1562
+ positive_x = mu - pt .log ((1 - u ) * (1 + kappa ** 2 )) / (kappa * b )
1549
1563
draws = non_positive_x * (u <= switch ) + positive_x * (u > switch )
1550
- return np .asarray (draws )
1551
-
1552
1564
1553
- asymmetriclaplace = AsymmetricLaplaceRV ()
1565
+ return cls (
1566
+ inputs = [rng , size , b , kappa , mu ],
1567
+ outputs = [next_rng , draws ],
1568
+ )(rng , size , b , kappa , mu )
1554
1569
1555
1570
1556
1571
class AsymmetricLaplace (Continuous ):
@@ -1599,15 +1614,12 @@ class AsymmetricLaplace(Continuous):
1599
1614
of interest.
1600
1615
"""
1601
1616
1602
- rv_op = asymmetriclaplace
1617
+ rv_type = AsymmetricLaplaceRV
1618
+ rv_op = AsymmetricLaplaceRV .rv_op
1603
1619
1604
1620
@classmethod
1605
1621
def dist (cls , kappa = None , mu = None , b = None , q = None , * args , ** kwargs ):
1606
1622
kappa = cls .get_kappa (kappa , q )
1607
- b = pt .as_tensor_variable (b )
1608
- kappa = pt .as_tensor_variable (kappa )
1609
- mu = pt .as_tensor_variable (mu )
1610
-
1611
1623
return super ().dist ([b , kappa , mu ], * args , ** kwargs )
1612
1624
1613
1625
@classmethod
@@ -2475,7 +2487,6 @@ def dist(cls, nu, **kwargs):
2475
2487
return Gamma .dist (alpha = nu / 2 , beta = 1 / 2 , ** kwargs )
2476
2488
2477
2489
2478
- # TODO: Remove this once logp for multiplication is working!
2479
2490
class WeibullBetaRV (RandomVariable ):
2480
2491
name = "weibull"
2481
2492
ndim_supp = 0
@@ -2597,19 +2608,22 @@ def icdf(value, alpha, beta):
2597
2608
)
2598
2609
2599
2610
2600
- class HalfStudentTRV (RandomVariable ):
2611
+ class HalfStudentTRV (SymbolicRandomVariable ):
2601
2612
name = "halfstudentt"
2602
- ndim_supp = 0
2603
- ndims_params = [0 , 0 ]
2604
- dtype = "floatX"
2613
+ signature = "[rng],[size],(),()->[rng],()"
2605
2614
_print_name = ("HalfStudentT" , "\\ operatorname{HalfStudentT}" )
2606
2615
2607
2616
@classmethod
2608
- def rng_fn (cls , rng , nu , sigma , size = None ) -> np .ndarray :
2609
- return np .asarray (np .abs (stats .t .rvs (nu , scale = sigma , size = size , random_state = rng )))
2617
+ def rv_op (cls , nu , sigma , * , size = None , rng = None ) -> np .ndarray :
2618
+ nu = pt .as_tensor (nu )
2619
+ sigma = pt .as_tensor (sigma )
2620
+ rng = normalize_rng_param (rng )
2621
+ size = normalize_size_param (size )
2610
2622
2623
+ next_rng , t_draws = t (df = nu , scale = sigma , size = size , rng = rng ).owner .outputs
2624
+ draws = pt .abs (t_draws )
2611
2625
2612
- halfstudentt = HalfStudentTRV ( )
2626
+ return cls ( inputs = [ rng , size , nu , sigma ], outputs = [ next_rng , draws ])( rng , size , nu , sigma )
2613
2627
2614
2628
2615
2629
class HalfStudentT (PositiveContinuous ):
@@ -2671,14 +2685,12 @@ class HalfStudentT(PositiveContinuous):
2671
2685
x = pm.HalfStudentT('x', lam=4, nu=10)
2672
2686
"""
2673
2687
2674
- rv_op = halfstudentt
2688
+ rv_type = HalfStudentTRV
2689
+ rv_op = HalfStudentTRV .rv_op
2675
2690
2676
2691
@classmethod
2677
2692
def dist (cls , nu , sigma = None , lam = None , * args , ** kwargs ):
2678
- nu = pt .as_tensor_variable (nu )
2679
2693
lam , sigma = get_tau_sigma (lam , sigma )
2680
- sigma = pt .as_tensor_variable (sigma )
2681
-
2682
2694
return super ().dist ([nu , sigma ], * args , ** kwargs )
2683
2695
2684
2696
def support_point (rv , size , nu , sigma ):
@@ -2710,19 +2722,29 @@ def logp(value, nu, sigma):
2710
2722
)
2711
2723
2712
2724
2713
- class ExGaussianRV (RandomVariable ):
2725
+ class ExGaussianRV (SymbolicRandomVariable ):
2714
2726
name = "exgaussian"
2715
- ndim_supp = 0
2716
- ndims_params = [0 , 0 , 0 ]
2717
- dtype = "floatX"
2727
+ signature = "[rng],[size],(),(),()->[rng],()"
2718
2728
_print_name = ("ExGaussian" , "\\ operatorname{ExGaussian}" )
2719
2729
2720
2730
@classmethod
2721
- def rng_fn (cls , rng , mu , sigma , nu , size = None ) -> np .ndarray :
2722
- return np .asarray (rng .normal (mu , sigma , size = size ) + rng .exponential (scale = nu , size = size ))
2731
+ def rv_op (cls , mu , sigma , nu , * , size = None , rng = None ):
2732
+ mu = pt .as_tensor (mu )
2733
+ sigma = pt .as_tensor (sigma )
2734
+ nu = pt .as_tensor (nu )
2735
+ rng = normalize_rng_param (rng )
2736
+ size = normalize_size_param (size )
2723
2737
2738
+ if rv_size_is_none (size ):
2739
+ size = implicit_size_from_params (mu , sigma , nu , ndims_params = cls .ndims_params )
2724
2740
2725
- exgaussian = ExGaussianRV ()
2741
+ next_rng , normal_draws = normal (loc = mu , scale = sigma , size = size , rng = rng ).owner .outputs
2742
+ final_rng , exponential_draws = exponential (scale = nu , size = size , rng = next_rng ).owner .outputs
2743
+ draws = normal_draws + exponential_draws
2744
+
2745
+ return cls (inputs = [rng , size , mu , sigma , nu ], outputs = [final_rng , draws ])(
2746
+ rng , size , mu , sigma , nu
2747
+ )
2726
2748
2727
2749
2728
2750
class ExGaussian (Continuous ):
@@ -2792,14 +2814,11 @@ class ExGaussian(Continuous):
2792
2814
Vol. 4, No. 1, pp 35-45.
2793
2815
"""
2794
2816
2795
- rv_op = exgaussian
2817
+ rv_type = ExGaussianRV
2818
+ rv_op = ExGaussianRV .rv_op
2796
2819
2797
2820
@classmethod
2798
2821
def dist (cls , mu = 0.0 , sigma = None , nu = None , * args , ** kwargs ):
2799
- mu = pt .as_tensor_variable (mu )
2800
- sigma = pt .as_tensor_variable (sigma )
2801
- nu = pt .as_tensor_variable (nu )
2802
-
2803
2822
return super ().dist ([mu , sigma , nu ], * args , ** kwargs )
2804
2823
2805
2824
def support_point (rv , size , mu , sigma , nu ):
@@ -3477,19 +3496,25 @@ def icdf(value, mu, s):
3477
3496
)
3478
3497
3479
3498
3480
- class LogitNormalRV (RandomVariable ):
3499
+ class LogitNormalRV (SymbolicRandomVariable ):
3481
3500
name = "logit_normal"
3482
- ndim_supp = 0
3483
- ndims_params = [0 , 0 ]
3484
- dtype = "floatX"
3501
+ signature = "[rng],[size],(),()->[rng],()"
3485
3502
_print_name = ("logitNormal" , "\\ operatorname{logitNormal}" )
3486
3503
3487
3504
@classmethod
3488
- def rng_fn (cls , rng , mu , sigma , size = None ) -> np .ndarray :
3489
- return np .asarray (expit (stats .norm .rvs (loc = mu , scale = sigma , size = size , random_state = rng )))
3505
+ def rv_op (cls , mu , sigma , * , size = None , rng = None ):
3506
+ mu = pt .as_tensor (mu )
3507
+ sigma = pt .as_tensor (sigma )
3508
+ rng = normalize_rng_param (rng )
3509
+ size = normalize_size_param (size )
3490
3510
3511
+ next_rng , normal_draws = normal (loc = mu , scale = sigma , size = size , rng = rng ).owner .outputs
3512
+ draws = pt .expit (normal_draws )
3491
3513
3492
- logit_normal = LogitNormalRV ()
3514
+ return cls (
3515
+ inputs = [rng , size , mu , sigma ],
3516
+ outputs = [next_rng , draws ],
3517
+ )(rng , size , mu , sigma )
3493
3518
3494
3519
3495
3520
class LogitNormal (UnitContinuous ):
@@ -3540,15 +3565,12 @@ class LogitNormal(UnitContinuous):
3540
3565
Defaults to 1.
3541
3566
"""
3542
3567
3543
- rv_op = logit_normal
3568
+ rv_type = LogitNormalRV
3569
+ rv_op = LogitNormalRV .rv_op
3544
3570
3545
3571
@classmethod
3546
3572
def dist (cls , mu = 0 , sigma = None , tau = None , ** kwargs ):
3547
- mu = pt .as_tensor_variable (mu )
3548
- tau , sigma = get_tau_sigma (tau = tau , sigma = sigma )
3549
- sigma = pt .as_tensor_variable (sigma )
3550
- tau = pt .as_tensor_variable (tau )
3551
-
3573
+ _ , sigma = get_tau_sigma (tau = tau , sigma = sigma )
3552
3574
return super ().dist ([mu , sigma ], ** kwargs )
3553
3575
3554
3576
def support_point (rv , size , mu , sigma ):
0 commit comments