Skip to content

Commit a90c420

Browse files
authored
Rename cov_func/cov to scale_func/scale for TP/MvStudentT (#6068)
* Rename cov_func/cov to scale_func/scale for TP/MvStudentT * Fixed syntax error * Black formatting * pop optional cov argument from kwargs * Replaced DeprecationWarning with FutureWarning * Removed unused import
1 parent 193e867 commit a90c420

File tree

3 files changed

+40
-26
lines changed

3 files changed

+40
-26
lines changed

pymc/distributions/multivariate.py

Lines changed: 21 additions & 14 deletions
Original file line numberDiff line numberDiff line change
@@ -352,42 +352,49 @@ class MvStudentT(Continuous):
352352
nu : tensor_like of float
353353
Degrees of freedom, should be a positive scalar.
354354
Sigma : tensor_like of float, optional
355-
Covariance matrix. Use `cov` in new code.
355+
Scale matrix. Use `scale` in new code.
356356
mu : tensor_like of float, optional
357357
Vector of means.
358-
cov : tensor_like of float, optional
359-
The covariance matrix.
358+
scale : tensor_like of float, optional
359+
The scale matrix.
360360
tau : tensor_like of float, optional
361361
The precision matrix.
362362
chol : tensor_like of float, optional
363-
The cholesky factor of the covariance matrix.
363+
The cholesky factor of the scale matrix.
364364
lower : bool, default=True
365365
Whether the cholesky fatcor is given as a lower triangular matrix.
366366
"""
367367
rv_op = mv_studentt
368368

369369
@classmethod
370-
def dist(cls, nu, Sigma=None, mu=None, cov=None, tau=None, chol=None, lower=True, **kwargs):
370+
def dist(cls, nu, Sigma=None, mu=None, scale=None, tau=None, chol=None, lower=True, **kwargs):
371+
if kwargs.get("cov") is not None:
372+
warnings.warn(
373+
"Use the scale argument to specify the scale matrix."
374+
"cov will be removed in future versions.",
375+
FutureWarning,
376+
)
377+
scale = kwargs.pop("cov")
371378
if Sigma is not None:
372-
if cov is not None:
373-
raise ValueError("Specify only one of cov and Sigma")
374-
cov = Sigma
379+
if scale is not None:
380+
raise ValueError("Specify only one of scale and Sigma")
381+
scale = Sigma
375382
nu = at.as_tensor_variable(floatX(nu))
376383
mu = at.as_tensor_variable(floatX(mu))
377-
cov = quaddist_matrix(cov, chol, tau, lower)
384+
scale = quaddist_matrix(scale, chol, tau, lower)
378385
# Aesara is stricter about the shape of mu, than PyMC used to be
379-
mu = at.broadcast_arrays(mu, cov[..., -1])[0]
386+
mu = at.broadcast_arrays(mu, scale[..., -1])[0]
380387

381-
return super().dist([nu, mu, cov], **kwargs)
388+
return super().dist([nu, mu, scale], **kwargs)
382389

383-
def moment(rv, size, nu, mu, cov):
390+
def moment(rv, size, nu, mu, scale):
384391
moment = mu
385392
if not rv_size_is_none(size):
386393
moment_size = at.concatenate([size, [mu.shape[-1]]])
387394
moment = at.full(moment_size, moment)
388395
return moment
389396

390-
def logp(value, nu, mu, cov):
397+
def logp(value, nu, mu, scale):
391398
"""
392399
Calculate log-probability of Multivariate Student's T distribution
393400
at specified value.
@@ -401,7 +408,7 @@ def logp(value, nu, mu, cov):
401408
-------
402409
TensorVariable
403410
"""
404-
quaddist, logdet, ok = quaddist_parse(value, mu, cov)
411+
quaddist, logdet, ok = quaddist_parse(value, mu, scale)
405412
k = floatX(value.shape[-1])
406413

407414
norm = gammaln((nu + k) / 2.0) - gammaln(nu / 2.0) - 0.5 * k * at.log(nu * np.pi)

pymc/gp/gp.py

Lines changed: 11 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -250,8 +250,8 @@ class TP(Latent):
250250
251251
Parameters
252252
----------
253-
cov_func : None, 2D array, or instance of Covariance
254-
The covariance function. Defaults to zero.
253+
scale_func : None, 2D array, or instance of Covariance
254+
The scale function. Defaults to zero.
255255
mean_func : None, instance of Mean
256256
The mean function. Defaults to zero.
257257
nu : float
@@ -263,11 +263,18 @@ class TP(Latent):
263263
Processes as Alternatives to Gaussian Processes. arXiv preprint arXiv:1402.4306.
264264
"""
265265

266-
def __init__(self, *, mean_func=Zero(), cov_func=Constant(0.0), nu=None):
266+
def __init__(self, *, mean_func=Zero(), scale_func=Constant(0.0), cov_func=None, nu=None):
267267
if nu is None:
268268
raise ValueError("Student's T process requires a degrees of freedom parameter, 'nu'")
269+
if cov_func is not None:
270+
warnings.warn(
271+
"Use the scale_func argument to specify the scale function."
272+
"cov_func will be removed in future versions.",
273+
FutureWarning,
274+
)
275+
scale_func = cov_func
269276
self.nu = nu
270-
super().__init__(mean_func=mean_func, cov_func=cov_func)
277+
super().__init__(mean_func=mean_func, cov_func=scale_func)
271278

272279
def __add__(self, other):
273280
raise TypeError("Student's T processes aren't additive")

pymc/tests/test_gp.py

Lines changed: 8 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -1068,8 +1068,8 @@ def setup_method(self):
10681068

10691069
def testTPvsLatent(self):
10701070
with pm.Model() as model:
1071-
cov_func = pm.gp.cov.ExpQuad(3, [0.1, 0.2, 0.3])
1072-
tp = pm.gp.TP(cov_func=cov_func, nu=self.nu)
1071+
scale_func = pm.gp.cov.ExpQuad(3, [0.1, 0.2, 0.3])
1072+
tp = pm.gp.TP(scale_func=scale_func, nu=self.nu)
10731073
f = tp.prior("f", self.X, reparameterize=False)
10741074
p = tp.conditional("p", self.Xnew)
10751075
assert tuple(f.shape.eval()) == (self.X.shape[0],)
@@ -1079,22 +1079,22 @@ def testTPvsLatent(self):
10791079

10801080
def testTPvsLatentReparameterized(self):
10811081
with pm.Model() as model:
1082-
cov_func = pm.gp.cov.ExpQuad(3, [0.1, 0.2, 0.3])
1083-
tp = pm.gp.TP(cov_func=cov_func, nu=self.nu)
1082+
scale_func = pm.gp.cov.ExpQuad(3, [0.1, 0.2, 0.3])
1083+
tp = pm.gp.TP(scale_func=scale_func, nu=self.nu)
10841084
f = tp.prior("f", self.X, reparameterize=True)
10851085
p = tp.conditional("p", self.Xnew)
10861086
assert tuple(f.shape.eval()) == (self.X.shape[0],)
10871087
assert tuple(p.shape.eval()) == (self.Xnew.shape[0],)
1088-
chol = np.linalg.cholesky(cov_func(self.X).eval())
1088+
chol = np.linalg.cholesky(scale_func(self.X).eval())
10891089
f_rotated = np.linalg.solve(chol, self.y)
10901090
tp_logp = model.compile_logp()({"f_rotated_": f_rotated, "p": self.pnew})
10911091
npt.assert_allclose(self.gp_latent_logp, tp_logp, atol=0, rtol=1e-2)
10921092

10931093
def testAdditiveTPRaises(self):
10941094
with pm.Model() as model:
1095-
cov_func = pm.gp.cov.ExpQuad(3, [0.1, 0.2, 0.3])
1096-
gp1 = pm.gp.TP(cov_func=cov_func, nu=10)
1097-
gp2 = pm.gp.TP(cov_func=cov_func, nu=10)
1095+
scale_func = pm.gp.cov.ExpQuad(3, [0.1, 0.2, 0.3])
1096+
gp1 = pm.gp.TP(scale_func=scale_func, nu=10)
1097+
gp2 = pm.gp.TP(scale_func=scale_func, nu=10)
10981098
with pytest.raises(Exception) as e_info:
10991099
gp1 + gp2
11001100

0 commit comments

Comments
 (0)