@@ -889,15 +889,14 @@ def test_half_flat(self):
889
889
assert 0.0 == HalfFlat .dist ().logcdf (np .inf ).tag .test_value
890
890
assert - np .inf == HalfFlat .dist ().logcdf (- np .inf ).tag .test_value
891
891
892
- def test_normal_logp (self ):
892
+ def test_normal (self ):
893
893
self .check_logp (
894
894
Normal ,
895
895
R ,
896
896
{"mu" : R , "sigma" : Rplus },
897
897
lambda value , mu , sigma : sp .norm .logpdf (value , mu , sigma ),
898
898
decimal = select_by_precision (float64 = 6 , float32 = 1 ),
899
899
)
900
-
901
900
self .check_logcdf (
902
901
Normal ,
903
902
R ,
@@ -943,27 +942,6 @@ def test_chi_squared(self):
943
942
lambda value , nu : sp .chi2 .logpdf (value , df = nu ),
944
943
)
945
944
946
- def test_wald_scipy_logp (self ):
947
- self .check_logp (
948
- Wald ,
949
- Rplus ,
950
- {"mu" : Rplus , "alpha" : Rplus },
951
- lambda value , mu , alpha : sp .invgauss .logpdf (value , mu = mu , loc = alpha ),
952
- decimal = select_by_precision (float64 = 6 , float32 = 1 ),
953
- )
954
-
955
- @pytest .mark .xfail (
956
- condition = (aesara .config .floatX == "float32" ),
957
- reason = "Poor CDF in SciPy. See scipy/scipy#869 for details." ,
958
- )
959
- def test_wald_scipy_logcdf (self ):
960
- self .check_logcdf (
961
- Wald ,
962
- Rplus ,
963
- {"mu" : Rplus , "alpha" : Rplus },
964
- lambda value , mu , alpha : sp .invgauss .logcdf (value , mu = mu , loc = alpha ),
965
- )
966
-
967
945
@pytest .mark .parametrize (
968
946
"value,mu,lam,phi,alpha,logp" ,
969
947
[
@@ -983,7 +961,7 @@ def test_wald_scipy_logcdf(self):
983
961
(50.0 , 15.0 , None , 0.666666 , 10.0 , - 5.6481874 ),
984
962
],
985
963
)
986
- def test_wald (self , value , mu , lam , phi , alpha , logp ):
964
+ def test_wald_logp_custom_points (self , value , mu , lam , phi , alpha , logp ):
987
965
# Log probabilities calculated using the dIG function from the R package gamlss.
988
966
# See e.g., doi: 10.1111/j.1467-9876.2005.00510.x, or
989
967
# http://www.gamlss.org/.
@@ -993,6 +971,27 @@ def test_wald(self, value, mu, lam, phi, alpha, logp):
993
971
decimals = select_by_precision (float64 = 6 , float32 = 1 )
994
972
assert_almost_equal (model .fastlogp (pt ), logp , decimal = decimals , err_msg = str (pt ))
995
973
974
+ def test_wald_logp (self ):
975
+ self .check_logp (
976
+ Wald ,
977
+ Rplus ,
978
+ {"mu" : Rplus , "alpha" : Rplus },
979
+ lambda value , mu , alpha : sp .invgauss .logpdf (value , mu = mu , loc = alpha ),
980
+ decimal = select_by_precision (float64 = 6 , float32 = 1 ),
981
+ )
982
+
983
+ @pytest .mark .xfail (
984
+ condition = (aesara .config .floatX == "float32" ),
985
+ reason = "Poor CDF in SciPy. See scipy/scipy#869 for details." ,
986
+ )
987
+ def test_wald_logcdf (self ):
988
+ self .check_logcdf (
989
+ Wald ,
990
+ Rplus ,
991
+ {"mu" : Rplus , "alpha" : Rplus },
992
+ lambda value , mu , alpha : sp .invgauss .logcdf (value , mu = mu , loc = alpha ),
993
+ )
994
+
996
995
def test_beta (self ):
997
996
self .check_logp (
998
997
Beta ,
@@ -1381,23 +1380,34 @@ def test_binomial(self):
1381
1380
1382
1381
# Too lazy to propagate decimal parameter through the whole chain of deps
1383
1382
@pytest .mark .xfail (condition = (aesara .config .floatX == "float32" ), reason = "Fails on float32" )
1383
+ def test_beta_binomial_distribution (self ):
1384
+ self .checkd (
1385
+ BetaBinomial ,
1386
+ Nat ,
1387
+ {"alpha" : Rplus , "beta" : Rplus , "n" : NatSmall },
1388
+ )
1389
+
1384
1390
@pytest .mark .skipif (
1385
1391
condition = (SCIPY_VERSION < parse ("1.4.0" )), reason = "betabinom is new in Scipy 1.4.0"
1386
1392
)
1387
- def test_beta_binomial_logcdf (self ):
1388
- self .check_logcdf (
1393
+ def test_beta_binomial_logp (self ):
1394
+ self .check_logp (
1389
1395
BetaBinomial ,
1390
1396
Nat ,
1391
1397
{"alpha" : Rplus , "beta" : Rplus , "n" : NatSmall },
1392
- lambda value , alpha , beta , n : sp .betabinom .logcdf (value , a = alpha , b = beta , n = n ),
1398
+ lambda value , alpha , beta , n : sp .betabinom .logpmf (value , a = alpha , b = beta , n = n ),
1393
1399
)
1394
1400
1395
1401
@pytest .mark .xfail (condition = (aesara .config .floatX == "float32" ), reason = "Fails on float32" )
1396
- def test_beta_binomial_distribution (self ):
1397
- self .checkd (
1402
+ @pytest .mark .skipif (
1403
+ condition = (SCIPY_VERSION < parse ("1.4.0" )), reason = "betabinom is new in Scipy 1.4.0"
1404
+ )
1405
+ def test_beta_binomial_logcdf (self ):
1406
+ self .check_logcdf (
1398
1407
BetaBinomial ,
1399
1408
Nat ,
1400
1409
{"alpha" : Rplus , "beta" : Rplus , "n" : NatSmall },
1410
+ lambda value , alpha , beta , n : sp .betabinom .logcdf (value , a = alpha , b = beta , n = n ),
1401
1411
)
1402
1412
1403
1413
def test_beta_binomial_selfconsistency (self ):
@@ -1407,17 +1417,6 @@ def test_beta_binomial_selfconsistency(self):
1407
1417
{"alpha" : Rplus , "beta" : Rplus , "n" : NatSmall },
1408
1418
)
1409
1419
1410
- @pytest .mark .skipif (
1411
- condition = (SCIPY_VERSION < parse ("1.4.0" )), reason = "betabinom is new in Scipy 1.4.0"
1412
- )
1413
- def test_beta_binomial_logp (self ):
1414
- self .check_logp (
1415
- BetaBinomial ,
1416
- Nat ,
1417
- {"alpha" : Rplus , "beta" : Rplus , "n" : NatSmall },
1418
- lambda value , alpha , beta , n : sp .betabinom .logpmf (value , a = alpha , b = beta , n = n ),
1419
- )
1420
-
1421
1420
def test_bernoulli (self ):
1422
1421
self .check_logp (
1423
1422
Bernoulli ,
0 commit comments