Skip to content

Commit 4a53c7e

Browse files
authored
Merge branch 'master' into merge_kwargs
2 parents 2fb6e44 + 2b5dd34 commit 4a53c7e

19 files changed

+698
-315
lines changed

.gitignore

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -33,5 +33,5 @@ benchmarks/html/
3333
benchmarks/results/
3434
.pytest_cache/
3535

36-
# VS Code editor
37-
.vscode
36+
# VSCode
37+
.vscode/

.mailmap

Lines changed: 0 additions & 10 deletions
Original file line numberDiff line numberDiff line change
@@ -20,14 +20,4 @@ Chris Fonnesbeck <[email protected]> Christopher Fonnesbeck <chris
2020
Chris Fonnesbeck <[email protected]> Christopher Fonnesbeck <[email protected]>
2121
Chris Fonnesbeck <[email protected]> Christopher Fonnesbeck <[email protected]>
2222
Chris Fonnesbeck <[email protected]> fonnesbeck <fonnesbeck@15d7aa0b-6f1a-0410-991a-d59f85d14984>
23-
24-
John Salvatier <[email protected]> john salvatier <[email protected]>
25-
John Salvatier <[email protected]> jsalvatier <[email protected]>
2623
Thomas Wiecki <[email protected]> twiecki <[email protected]>
27-
David Huard <[email protected]> <davhua1@OURA-024.(none)>
28-
David Huard <[email protected]> <davhua1@OURA-035.(none)>
29-
David Huard <[email protected]> <david@huard.(none)>
30-
David Huard <[email protected]> david.huard <david.huard@15d7aa0b-6f1a-0410-991a-d59f85d14984>
31-
Anand Patil <[email protected]> anand.prabhakar.patil <anand.prabhakar.patil@15d7aa0b-6f1a-0410-991a-d59f85d14984>
32-
Chad Heyne <[email protected]> chadheyne <[email protected]>
33-

RELEASE-NOTES.md

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -51,6 +51,7 @@ This will be the last release to support Python 2.
5151
- Fixed `Rice` distribution, which inconsistently mixed two parametrizations (#3286).
5252
- `Rice` distribution now accepts multiple parameters and observations and is usable with NUTS (#3289).
5353
- `sample_posterior_predictive` no longer calls `draw_values` to initialize the shape of the ppc trace. This called could lead to `ValueError`'s when sampling the ppc from a model with `Flat` or `HalfFlat` prior distributions (Fix issue #3294).
54+
- Added explicit conversion to `floatX` and `int32` for the continuous and discrete probability distribution parameters (addresses issue #3223).
5455

5556

5657
### Deprecations

docs/source/notebooks/GLM-logistic.ipynb

Lines changed: 76 additions & 79 deletions
Large diffs are not rendered by default.

docs/source/notebooks/GP-Kron.ipynb

Lines changed: 92 additions & 58 deletions
Large diffs are not rendered by default.

pymc3/data.py

Lines changed: 0 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -355,7 +355,6 @@ def check(t):
355355
if t is not None else tt.arange(shp_end[i])
356356
for i, t in enumerate(end)]
357357
slc = slc_begin + mid + slc_end
358-
slc = slc
359358
else:
360359
raise TypeError('Unrecognized size type, %r' % batch_size)
361360
return pm.theanof.ix_(*slc)

pymc3/distributions/continuous.py

Lines changed: 49 additions & 50 deletions
Original file line numberDiff line numberDiff line change
@@ -425,7 +425,7 @@ def __init__(self, mu=0, sigma=None, tau=None, sd=None, **kwargs):
425425
self.sigma = self.sd = tt.as_tensor_variable(sigma)
426426
self.tau = tt.as_tensor_variable(tau)
427427

428-
self.mean = self.median = self.mode = self.mu = mu = tt.as_tensor_variable(mu)
428+
self.mean = self.median = self.mode = self.mu = mu = tt.as_tensor_variable(floatX(mu))
429429
self.variance = 1. / self.tau
430430

431431
assert_negative_support(sigma, 'sigma', 'Normal')
@@ -572,9 +572,9 @@ def __init__(self, mu=0, sigma=None, tau=None, lower=None, upper=None,
572572
tau, sigma = get_tau_sigma(tau=tau, sigma=sigma)
573573
self.sigma = self.sd = tt.as_tensor_variable(sigma)
574574
self.tau = tt.as_tensor_variable(tau)
575-
self.lower = tt.as_tensor_variable(lower) if lower is not None else lower
576-
self.upper = tt.as_tensor_variable(upper) if upper is not None else upper
577-
self.mu = tt.as_tensor_variable(mu)
575+
self.lower = tt.as_tensor_variable(floatX(lower)) if lower is not None else lower
576+
self.upper = tt.as_tensor_variable(floatX(upper)) if upper is not None else upper
577+
self.mu = tt.as_tensor_variable(floatX(mu))
578578

579579
if self.lower is None and self.upper is None:
580580
self._defaultval = mu
@@ -906,10 +906,10 @@ class Wald(PositiveContinuous):
906906
def __init__(self, mu=None, lam=None, phi=None, alpha=0., *args, **kwargs):
907907
super().__init__(*args, **kwargs)
908908
mu, lam, phi = self.get_mu_lam_phi(mu, lam, phi)
909-
self.alpha = alpha = tt.as_tensor_variable(alpha)
910-
self.mu = mu = tt.as_tensor_variable(mu)
911-
self.lam = lam = tt.as_tensor_variable(lam)
912-
self.phi = phi = tt.as_tensor_variable(phi)
909+
self.alpha = alpha = tt.as_tensor_variable(floatX(alpha))
910+
self.mu = mu = tt.as_tensor_variable(floatX(mu))
911+
self.lam = lam = tt.as_tensor_variable(floatX(lam))
912+
self.phi = phi = tt.as_tensor_variable(floatX(phi))
913913

914914
self.mean = self.mu + self.alpha
915915
self.mode = self.mu * (tt.sqrt(1. + (1.5 * self.mu / self.lam)**2)
@@ -1120,8 +1120,8 @@ def __init__(self, alpha=None, beta=None, mu=None, sigma=None,
11201120
if sd is not None:
11211121
sigma = sd
11221122
alpha, beta = self.get_alpha_beta(alpha, beta, mu, sigma)
1123-
self.alpha = alpha = tt.as_tensor_variable(alpha)
1124-
self.beta = beta = tt.as_tensor_variable(beta)
1123+
self.alpha = alpha = tt.as_tensor_variable(floatX(alpha))
1124+
self.beta = beta = tt.as_tensor_variable(floatX(beta))
11251125

11261126
self.mean = self.alpha / (self.alpha + self.beta)
11271127
self.variance = self.alpha * self.beta / (
@@ -1262,8 +1262,8 @@ class Kumaraswamy(UnitContinuous):
12621262
def __init__(self, a, b, *args, **kwargs):
12631263
super().__init__(*args, **kwargs)
12641264

1265-
self.a = a = tt.as_tensor_variable(a)
1266-
self.b = b = tt.as_tensor_variable(b)
1265+
self.a = a = tt.as_tensor_variable(floatX(a))
1266+
self.b = b = tt.as_tensor_variable(floatX(b))
12671267

12681268
ln_mean = tt.log(b) + tt.gammaln(1 + 1 / a) + tt.gammaln(b) - tt.gammaln(1 + 1 / a + b)
12691269
self.mean = tt.exp(ln_mean)
@@ -1374,7 +1374,7 @@ class Exponential(PositiveContinuous):
13741374

13751375
def __init__(self, lam, *args, **kwargs):
13761376
super().__init__(*args, **kwargs)
1377-
self.lam = lam = tt.as_tensor_variable(lam)
1377+
self.lam = lam = tt.as_tensor_variable(floatX(lam))
13781378
self.mean = 1. / self.lam
13791379
self.median = self.mean * tt.log(2)
13801380
self.mode = tt.zeros_like(self.lam)
@@ -1498,8 +1498,8 @@ class Laplace(Continuous):
14981498

14991499
def __init__(self, mu, b, *args, **kwargs):
15001500
super().__init__(*args, **kwargs)
1501-
self.b = b = tt.as_tensor_variable(b)
1502-
self.mean = self.median = self.mode = self.mu = mu = tt.as_tensor_variable(mu)
1501+
self.b = b = tt.as_tensor_variable(floatX(b))
1502+
self.mean = self.median = self.mode = self.mu = mu = tt.as_tensor_variable(floatX(mu))
15031503

15041504
self.variance = 2 * self.b**2
15051505

@@ -1639,7 +1639,7 @@ def __init__(self, mu=0, sigma=None, tau=None, sd=None, *args, **kwargs):
16391639

16401640
tau, sigma = get_tau_sigma(tau=tau, sigma=sigma)
16411641

1642-
self.mu = mu = tt.as_tensor_variable(mu)
1642+
self.mu = mu = tt.as_tensor_variable(floatX(mu))
16431643
self.tau = tau = tt.as_tensor_variable(tau)
16441644
self.sigma = self.sd = sigma = tt.as_tensor_variable(sigma)
16451645

@@ -1791,10 +1791,10 @@ class StudentT(Continuous):
17911791

17921792
def __init__(self, nu, mu=0, lam=None, sigma=None, sd=None, *args, **kwargs):
17931793
super().__init__(*args, **kwargs)
1794+
super(StudentT, self).__init__(*args, **kwargs)
17941795
if sd is not None:
17951796
sigma = sd
1796-
1797-
self.nu = nu = tt.as_tensor_variable(nu)
1797+
self.nu = nu = tt.as_tensor_variable(floatX(nu))
17981798
lam, sigma = get_tau_sigma(tau=lam, sigma=sigma)
17991799
self.lam = lam = tt.as_tensor_variable(lam)
18001800
self.sigma = self.sd = sigma = tt.as_tensor_variable(sigma)
@@ -1923,8 +1923,8 @@ class Pareto(Continuous):
19231923
"""
19241924

19251925
def __init__(self, alpha, m, transform='lowerbound', *args, **kwargs):
1926-
self.alpha = alpha = tt.as_tensor_variable(alpha)
1927-
self.m = m = tt.as_tensor_variable(m)
1926+
self.alpha = alpha = tt.as_tensor_variable(floatX(alpha))
1927+
self.m = m = tt.as_tensor_variable(floatX(m))
19281928

19291929
self.mean = tt.switch(tt.gt(alpha, 1), alpha *
19301930
m / (alpha - 1.), np.inf)
@@ -2061,8 +2061,8 @@ class Cauchy(Continuous):
20612061

20622062
def __init__(self, alpha, beta, *args, **kwargs):
20632063
super().__init__(*args, **kwargs)
2064-
self.median = self.mode = self.alpha = tt.as_tensor_variable(alpha)
2065-
self.beta = tt.as_tensor_variable(beta)
2064+
self.median = self.mode = self.alpha = tt.as_tensor_variable(floatX(alpha))
2065+
self.beta = tt.as_tensor_variable(floatX(beta))
20662066

20672067
assert_negative_support(beta, 'beta', 'Cauchy')
20682068

@@ -2171,8 +2171,7 @@ class HalfCauchy(PositiveContinuous):
21712171
def __init__(self, beta, *args, **kwargs):
21722172
super().__init__(*args, **kwargs)
21732173
self.mode = tt.as_tensor_variable(0)
2174-
self.median = tt.as_tensor_variable(beta)
2175-
self.beta = tt.as_tensor_variable(beta)
2174+
self.median = self.beta = tt.as_tensor_variable(floatX(beta))
21762175

21772176
assert_negative_support(beta, 'beta', 'HalfCauchy')
21782177

@@ -2303,8 +2302,8 @@ def __init__(self, alpha=None, beta=None, mu=None, sigma=None,
23032302
sigma = sd
23042303

23052304
alpha, beta = self.get_alpha_beta(alpha, beta, mu, sigma)
2306-
self.alpha = alpha = tt.as_tensor_variable(alpha)
2307-
self.beta = beta = tt.as_tensor_variable(beta)
2305+
self.alpha = alpha = tt.as_tensor_variable(floatX(alpha))
2306+
self.beta = beta = tt.as_tensor_variable(floatX(beta))
23082307
self.mean = alpha / beta
23092308
self.mode = tt.maximum((alpha - 1) / beta, 0)
23102309
self.variance = alpha / beta**2
@@ -2438,8 +2437,8 @@ def __init__(self, alpha=None, beta=None, mu=None, sigma=None, sd=None,
24382437
sigma = sd
24392438

24402439
alpha, beta = InverseGamma._get_alpha_beta(alpha, beta, mu, sigma)
2441-
self.alpha = alpha = tt.as_tensor_variable(alpha)
2442-
self.beta = beta = tt.as_tensor_variable(beta)
2440+
self.alpha = alpha = tt.as_tensor_variable(floatX(alpha))
2441+
self.beta = beta = tt.as_tensor_variable(floatX(beta))
24432442

24442443
self.mean = self._calculate_mean()
24452444
self.mode = beta / (alpha + 1.)
@@ -2567,7 +2566,7 @@ class ChiSquared(Gamma):
25672566
"""
25682567

25692568
def __init__(self, nu, *args, **kwargs):
2570-
self.nu = nu = tt.as_tensor_variable(nu)
2569+
self.nu = nu = tt.as_tensor_variable(floatX(nu))
25712570
super().__init__(alpha=nu / 2., beta=0.5, *args, **kwargs)
25722571

25732572
def _repr_latex_(self, name=None, dist=None):
@@ -2625,8 +2624,8 @@ class Weibull(PositiveContinuous):
26252624

26262625
def __init__(self, alpha, beta, *args, **kwargs):
26272626
super().__init__(*args, **kwargs)
2628-
self.alpha = alpha = tt.as_tensor_variable(alpha)
2629-
self.beta = beta = tt.as_tensor_variable(beta)
2627+
self.alpha = alpha = tt.as_tensor_variable(floatX(alpha))
2628+
self.beta = beta = tt.as_tensor_variable(floatX(beta))
26302629
self.mean = beta * tt.exp(gammaln(1 + 1. / alpha))
26312630
self.median = beta * tt.exp(gammaln(tt.log(2)))**(1. / alpha)
26322631
self.variance = (beta**2) * \
@@ -2788,7 +2787,7 @@ def __init__(self, nu=1, sigma=None, lam=None, sd=None,
27882787
self.median = tt.as_tensor_variable(sigma)
27892788
self.sigma = self.sd = tt.as_tensor_variable(sigma)
27902789
self.lam = tt.as_tensor_variable(lam)
2791-
self.nu = nu = tt.as_tensor_variable(nu)
2790+
self.nu = nu = tt.as_tensor_variable(floatX(nu))
27922791

27932792
assert_negative_support(sigma, 'sigma', 'HalfStudentT')
27942793
assert_negative_support(lam, 'lam', 'HalfStudentT')
@@ -2923,9 +2922,9 @@ def __init__(self, mu=0., sigma=None, nu=None, sd=None,
29232922
if sd is not None:
29242923
sigma = sd
29252924

2926-
self.mu = mu = tt.as_tensor_variable(mu)
2927-
self.sigma = self.sd = sigma = tt.as_tensor_variable(sigma)
2928-
self.nu = nu = tt.as_tensor_variable(nu)
2925+
self.mu = mu = tt.as_tensor_variable(floatX(mu))
2926+
self.sigma = self.sd = sigma = tt.as_tensor_variable(floatX(sigma))
2927+
self.nu = nu = tt.as_tensor_variable(floatX(nu))
29292928
self.mean = mu + nu
29302929
self.variance = (sigma**2) + (nu**2)
29312930

@@ -3074,8 +3073,8 @@ def __init__(self, mu=0.0, kappa=None, transform='circular',
30743073
if transform == 'circular':
30753074
transform = transforms.Circular()
30763075
super().__init__(transform=transform, *args, **kwargs)
3077-
self.mean = self.median = self.mode = self.mu = mu = tt.as_tensor_variable(mu)
3078-
self.kappa = kappa = floatX(tt.as_tensor_variable(kappa))
3076+
self.mean = self.median = self.mode = self.mu = mu = tt.as_tensor_variable(floatX(mu))
3077+
self.kappa = kappa = tt.as_tensor_variable(floatX(kappa))
30793078

30803079
assert_negative_support(kappa, 'kappa', 'VonMises')
30813080

@@ -3199,11 +3198,11 @@ def __init__(self, mu=0.0, sigma=None, tau=None, alpha=1, sd=None,
31993198
sigma = sd
32003199

32013200
tau, sigma = get_tau_sigma(tau=tau, sigma=sigma)
3202-
self.mu = mu = tt.as_tensor_variable(mu)
3201+
self.mu = mu = tt.as_tensor_variable(floatX(mu))
32033202
self.tau = tt.as_tensor_variable(tau)
32043203
self.sigma = self.sd = tt.as_tensor_variable(sigma)
32053204

3206-
self.alpha = alpha = tt.as_tensor_variable(alpha)
3205+
self.alpha = alpha = tt.as_tensor_variable(floatX(alpha))
32073206

32083207
self.mean = mu + self.sigma * (2 / np.pi)**0.5 * alpha / (1 + alpha**2)**0.5
32093208
self.variance = self.sigma**2 * (1 - (2 * alpha**2) / ((1 + alpha**2) * np.pi))
@@ -3329,9 +3328,9 @@ class Triangular(BoundedContinuous):
33293328

33303329
def __init__(self, lower=0, upper=1, c=0.5,
33313330
*args, **kwargs):
3332-
self.median = self.mean = self.c = c = tt.as_tensor_variable(c)
3333-
self.lower = lower = tt.as_tensor_variable(lower)
3334-
self.upper = upper = tt.as_tensor_variable(upper)
3331+
self.median = self.mean = self.c = c = tt.as_tensor_variable(floatX(c))
3332+
self.lower = lower = tt.as_tensor_variable(floatX(lower))
3333+
self.upper = upper = tt.as_tensor_variable(floatX(upper))
33353334

33363335
super().__init__(lower=lower, upper=upper, *args, **kwargs)
33373336

@@ -3464,8 +3463,8 @@ class Gumbel(Continuous):
34643463
"""
34653464

34663465
def __init__(self, mu=0, beta=1.0, **kwargs):
3467-
self.mu = tt.as_tensor_variable(mu)
3468-
self.beta = tt.as_tensor_variable(beta)
3466+
self.mu = tt.as_tensor_variable(floatX(mu))
3467+
self.beta = tt.as_tensor_variable(floatX(beta))
34693468

34703469
assert_negative_support(beta, 'beta', 'Gumbel')
34713470

@@ -3580,9 +3579,9 @@ def __init__(self, nu=None, sigma=None, b=None, sd=None, *args, **kwargs):
35803579
sigma = sd
35813580

35823581
nu, b, sigma = self.get_nu_b(nu, b, sigma)
3583-
self.nu = nu = tt.as_tensor_variable(nu)
3584-
self.sigma = self.sd = sigma = tt.as_tensor_variable(sigma)
3585-
self.b = b = tt.as_tensor_variable(b)
3582+
self.nu = nu = tt.as_tensor_variable(floatX(nu))
3583+
self.sigma = self.sd = sigma = tt.as_tensor_variable(floatX(sigma))
3584+
self.b = b = tt.as_tensor_variable(floatX(b))
35863585
self.mean = sigma * np.sqrt(np.pi / 2) * tt.exp((-nu**2 / (2 * sigma**2)) / 2) * ((1 - (-nu**2 / (2 * sigma**2)))
35873586
* tt.i0(-(-nu**2 / (2 * sigma**2)) / 2) - (-nu**2 / (2 * sigma**2)) * tt.i1(-(-nu**2 / (2 * sigma**2)) / 2))
35883587
self.variance = 2 * sigma**2 + nu**2 - (np.pi * sigma**2 / 2) * (tt.exp((-nu**2 / (2 * sigma**2)) / 2) * ((1 - (-nu**2 / (
@@ -3693,8 +3692,8 @@ class Logistic(Continuous):
36933692
def __init__(self, mu=0., s=1., *args, **kwargs):
36943693
super().__init__(*args, **kwargs)
36953694

3696-
self.mu = tt.as_tensor_variable(mu)
3697-
self.s = tt.as_tensor_variable(s)
3695+
self.mu = tt.as_tensor_variable(floatX(mu))
3696+
self.s = tt.as_tensor_variable(floatX(s))
36983697

36993698
self.mean = self.mode = mu
37003699
self.variance = s**2 * np.pi**2 / 3.
@@ -3826,7 +3825,7 @@ class LogitNormal(UnitContinuous):
38263825
def __init__(self, mu=0, sigma=None, tau=None, sd=None, **kwargs):
38273826
if sd is not None:
38283827
sigma = sd
3829-
self.mu = mu = tt.as_tensor_variable(mu)
3828+
self.mu = mu = tt.as_tensor_variable(floatX(mu))
38303829
tau, sigma = get_tau_sigma(tau=tau, sigma=sigma)
38313830
self.sigma = self.sd = tt.as_tensor_variable(sigma)
38323831
self.tau = tau = tt.as_tensor_variable(tau)

0 commit comments

Comments
 (0)