Skip to content

Commit 5246abd

Browse files
Default to pm.Data(mutable=False) as announced
1 parent c8525eb commit 5246abd

File tree

4 files changed

+32
-35
lines changed

4 files changed

+32
-35
lines changed

pymc/data.py

Lines changed: 8 additions & 10 deletions
Original file line numberDiff line numberDiff line change
@@ -661,16 +661,14 @@ def Data(
661661
arr = convert_observed_data(value)
662662

663663
if mutable is None:
664-
major, minor = (int(v) for v in pm.__version__.split(".")[:2])
665-
mutable = major == 4 and minor < 1
666-
if mutable:
667-
warnings.warn(
668-
"The `mutable` kwarg was not specified. Currently it defaults to `pm.Data(mutable=True)`,"
669-
" which is equivalent to using `pm.MutableData()`."
670-
" In v4.1.0 the default will change to `pm.Data(mutable=False)`, equivalent to `pm.ConstantData`."
671-
" Set `pm.Data(..., mutable=False/True)`, or use `pm.ConstantData`/`pm.MutableData`.",
672-
FutureWarning,
673-
)
664+
warnings.warn(
665+
"The `mutable` kwarg was not specified. Before v4.1.0 it defaulted to `pm.Data(mutable=True)`,"
666+
" which is equivalent to using `pm.MutableData()`."
667+
" In v4.1.0 the default changed to `pm.Data(mutable=False)`, equivalent to `pm.ConstantData`."
668+
" Use `pm.ConstantData`/`pm.MutableData` or pass `pm.Data(..., mutable=False/True)` to avoid this warning.",
669+
UserWarning,
670+
)
671+
mutable = False
674672
if mutable:
675673
x = aesara.shared(arr, name, **kwargs)
676674
else:

pymc/step_methods/mlda.py

Lines changed: 7 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -260,7 +260,7 @@ class MLDA(ArrayStepShared):
260260
sum of the quantity of interest after sampling. In order to use
261261
variance reduction, the user needs to do the following when defining
262262
the PyMC model (also demonstrated in the example notebook):
263-
- Include a `pm.Data()` variable with the name `Q` in the
263+
- Include a `pm.MutableData()` variable with the name `Q` in the
264264
model description of all levels.
265265
- Use an Aesara Op to calculate the forward model (or the
266266
combination of a forward model and a likelihood). This Op
@@ -286,7 +286,7 @@ class MLDA(ArrayStepShared):
286286
definition at all levels except the finest one, the
287287
extra variables mu_B and Sigma_B, which will capture
288288
the bias between different levels. All these variables
289-
should be instantiated using the pm.Data method.
289+
should be instantiated using the pm.MutableData method.
290290
- Use an Aesara Op to define the forward model (and
291291
optionally the likelihood) for all levels. The Op needs
292292
to store the result of each forward model calculation
@@ -401,12 +401,12 @@ def __init__(
401401
"the variable in the model definition"
402402
"for variance reduction to work or"
403403
"for storing the fine Q."
404-
"Use pm.Data() to define it."
404+
"Use pm.MutableData() to define it."
405405
)
406406
if not isinstance(self.model.Q, TensorSharedVariable):
407407
raise TypeError(
408408
"The variable 'Q' in the model definition is not of type "
409-
"'TensorSharedVariable'. Use pm.Data() to define the"
409+
"'TensorSharedVariable'. Use pm.MutableData() to define the"
410410
"variable."
411411
)
412412

@@ -427,15 +427,15 @@ def __init__(
427427
"variable 'mu_B'. You need to include"
428428
"the variable in the model definition"
429429
"for adaptive error model to work."
430-
"Use pm.Data() to define it."
430+
"Use pm.MutableData() to define it."
431431
)
432432
if not hasattr(self.model_below, "Sigma_B"):
433433
raise AttributeError(
434434
"Model below in hierarchy does not contain"
435435
"variable 'Sigma_B'. You need to include"
436436
"the variable in the model definition"
437437
"for adaptive error model to work."
438-
"Use pm.Data() to define it."
438+
"Use pm.MutableData() to define it."
439439
)
440440
if not (
441441
isinstance(self.model_below.mu_B, TensorSharedVariable)
@@ -444,7 +444,7 @@ def __init__(
444444
raise TypeError(
445445
"At least one of the variables 'mu_B' and 'Sigma_B' "
446446
"in the definition of the below model is not of type "
447-
"'TensorSharedVariable'. Use pm.Data() to define those "
447+
"'TensorSharedVariable'. Use pm.MutableData() to define those "
448448
"variables."
449449
)
450450

pymc/tests/test_data_container.py

Lines changed: 2 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -18,7 +18,6 @@
1818
import pytest
1919

2020
from aesara import shared
21-
from aesara.compile.sharedvalue import SharedVariable
2221
from aesara.tensor import TensorConstant
2322
from aesara.tensor.var import TensorVariable
2423

@@ -431,9 +430,9 @@ def test_data_kwargs(self):
431430

432431
def test_data_mutable_default_warning(self):
433432
with pm.Model():
434-
with pytest.warns(FutureWarning, match="`mutable` kwarg was not specified"):
433+
with pytest.warns(UserWarning, match="`mutable` kwarg was not specified"):
435434
data = pm.Data("x", [1, 2, 3])
436-
assert isinstance(data, SharedVariable)
435+
assert isinstance(data, TensorConstant)
437436
pass
438437

439438

pymc/tests/test_step.py

Lines changed: 15 additions & 15 deletions
Original file line numberDiff line numberDiff line change
@@ -29,7 +29,7 @@
2929
import pymc as pm
3030

3131
from pymc.aesaraf import floatX
32-
from pymc.data import Data
32+
from pymc.data import Data, MutableData
3333
from pymc.distributions import (
3434
Bernoulli,
3535
Beta,
@@ -1109,7 +1109,7 @@ def perform(self, node, inputs, outputs):
11091109
intercept = inputs[0][0]
11101110
x_coeff = inputs[0][1]
11111111

1112-
temp = intercept + x_coeff * x + self.pymc_model.bias.get_value()
1112+
temp = intercept + x_coeff * x + self.pymc_model.bias.data
11131113
with self.pymc_model:
11141114
set_data({"model_output": temp})
11151115
outputs[0][0] = np.array(temp)
@@ -1120,9 +1120,9 @@ def perform(self, node, inputs, outputs):
11201120

11211121
with Model() as coarse_model_0:
11221122
bias = Data("bias", 3.5 * np.ones(y.shape, dtype=p))
1123-
mu_B = Data("mu_B", -1.3 * np.ones(y.shape, dtype=p))
1124-
Sigma_B = Data("Sigma_B", np.zeros((y.shape[0], y.shape[0]), dtype=p))
1125-
model_output = Data("model_output", np.zeros(y.shape, dtype=p))
1123+
mu_B = MutableData("mu_B", -1.3 * np.ones(y.shape, dtype=p))
1124+
Sigma_B = MutableData("Sigma_B", np.zeros((y.shape[0], y.shape[0]), dtype=p))
1125+
model_output = MutableData("model_output", np.zeros(y.shape, dtype=p))
11261126
Sigma_e = Data("Sigma_e", s)
11271127

11281128
# Define priors
@@ -1140,9 +1140,9 @@ def perform(self, node, inputs, outputs):
11401140

11411141
with Model() as coarse_model_1:
11421142
bias = Data("bias", 2.2 * np.ones(y.shape, dtype=p))
1143-
mu_B = Data("mu_B", -2.2 * np.ones(y.shape, dtype=p))
1144-
Sigma_B = Data("Sigma_B", np.zeros((y.shape[0], y.shape[0]), dtype=p))
1145-
model_output = Data("model_output", np.zeros(y.shape, dtype=p))
1143+
mu_B = MutableData("mu_B", -2.2 * np.ones(y.shape, dtype=p))
1144+
Sigma_B = MutableData("Sigma_B", np.zeros((y.shape[0], y.shape[0]), dtype=p))
1145+
model_output = MutableData("model_output", np.zeros(y.shape, dtype=p))
11461146
Sigma_e = Data("Sigma_e", s)
11471147

11481148
# Define priors
@@ -1161,7 +1161,7 @@ def perform(self, node, inputs, outputs):
11611161
# fine model and inference
11621162
with Model() as model:
11631163
bias = Data("bias", np.zeros(y.shape, dtype=p))
1164-
model_output = Data("model_output", np.zeros(y.shape, dtype=p))
1164+
model_output = MutableData("model_output", np.zeros(y.shape, dtype=p))
11651165
Sigma_e = Data("Sigma_e", s)
11661166

11671167
# Define priors
@@ -1268,9 +1268,9 @@ def perform(self, node, inputs, outputs):
12681268

12691269
with Model() as coarse_model_0:
12701270
if aesara.config.floatX == "float32":
1271-
Q = Data("Q", np.float32(0.0))
1271+
Q = MutableData("Q", np.float32(0.0))
12721272
else:
1273-
Q = Data("Q", np.float64(0.0))
1273+
Q = MutableData("Q", np.float64(0.0))
12741274

12751275
# Define priors
12761276
intercept = Normal("Intercept", true_intercept, sigma=1)
@@ -1285,9 +1285,9 @@ def perform(self, node, inputs, outputs):
12851285

12861286
with Model() as coarse_model_1:
12871287
if aesara.config.floatX == "float32":
1288-
Q = Data("Q", np.float32(0.0))
1288+
Q = MutableData("Q", np.float32(0.0))
12891289
else:
1290-
Q = Data("Q", np.float64(0.0))
1290+
Q = MutableData("Q", np.float64(0.0))
12911291

12921292
# Define priors
12931293
intercept = Normal("Intercept", true_intercept, sigma=1)
@@ -1302,9 +1302,9 @@ def perform(self, node, inputs, outputs):
13021302

13031303
with Model() as model:
13041304
if aesara.config.floatX == "float32":
1305-
Q = Data("Q", np.float32(0.0))
1305+
Q = MutableData("Q", np.float32(0.0))
13061306
else:
1307-
Q = Data("Q", np.float64(0.0))
1307+
Q = MutableData("Q", np.float64(0.0))
13081308

13091309
# Define priors
13101310
intercept = Normal("Intercept", true_intercept, sigma=1)

0 commit comments

Comments
 (0)