diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index b7ff1c319..d28105289 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -83,8 +83,6 @@ repos: examples/howto/custom_distribution.ipynb) entry: > (?x)(arviz-devs.github.io| - aesara.readthedocs.io| - aeppl.readthedocs.io| pymc-experimental.readthedocs.io| docs.pymc.io| numpy.org/doc| @@ -92,6 +90,7 @@ repos: docs.python.org| xarray.pydata.org python.arviz.org| + pytensor.readthedocs.io| docs.xarray.dev| www.pymc.io| docs.scipy.org/doc) diff --git a/examples/case_studies/GEV.ipynb b/examples/case_studies/GEV.ipynb index d84910559..f9cc50132 100644 --- a/examples/case_studies/GEV.ipynb +++ b/examples/case_studies/GEV.ipynb @@ -50,7 +50,7 @@ "import numpy as np\n", "import pymc as pm\n", "import pymc_experimental.distributions as pmx\n", - "import pytensor.tensor as at\n", + "import pytensor.tensor as pt\n", "\n", "from arviz.plots import plot_utils as azpu" ] diff --git a/examples/case_studies/GEV.myst.md b/examples/case_studies/GEV.myst.md index fd5b3c32f..eecc303c1 100644 --- a/examples/case_studies/GEV.myst.md +++ b/examples/case_studies/GEV.myst.md @@ -45,7 +45,7 @@ import matplotlib.pyplot as plt import numpy as np import pymc as pm import pymc_experimental.distributions as pmx -import pytensor.tensor as at +import pytensor.tensor as pt from arviz.plots import plot_utils as azpu ``` diff --git a/examples/case_studies/binning.ipynb b/examples/case_studies/binning.ipynb index 954897f0f..8c2a9c02c 100644 --- a/examples/case_studies/binning.ipynb +++ b/examples/case_studies/binning.ipynb @@ -72,7 +72,7 @@ "We are now in a position to sketch out a generative PyMC model:\n", "\n", "```python\n", - "import pytensor.tensor as at\n", + "import pytensor.tensor as pt\n", "\n", "with pm.Model() as model:\n", " # priors\n", @@ -81,7 +81,7 @@ " # generative process\n", " probs = pm.math.exp(pm.logcdf(pm.Normal.dist(mu=mu, sigma=sigma), cutpoints))\n", " probs = pm.math.concatenate([[0], probs, [1]])\n", - " probs = at.extra_ops.diff(probs)\n", + " probs = pt.extra_ops.diff(probs)\n", " # likelihood\n", " pm.Multinomial(\"counts\", p=probs, n=sum(counts), observed=counts)\n", "```\n", @@ -98,7 +98,7 @@ "simply concatenates the cumulative density at $-\\infty$ (which is zero) and at $\\infty$ (which is 1).\n", "The third line\n", "```python\n", - "probs = at.extra_ops.diff(probs)\n", + "probs = pt.extra_ops.diff(probs)\n", "```\n", "calculates the difference between consecutive cumulative densities to give the actual probability of a datum falling in any given bin.\n", "\n", @@ -125,7 +125,7 @@ "import numpy as np\n", "import pandas as pd\n", "import pymc as pm\n", - "import pytensor.tensor as at\n", + "import pytensor.tensor as pt\n", "import seaborn as sns\n", "\n", "warnings.filterwarnings(action=\"ignore\", category=UserWarning)" @@ -320,7 +320,7 @@ " mu = pm.Normal(\"mu\")\n", "\n", " probs1 = pm.math.exp(pm.logcdf(pm.Normal.dist(mu=mu, sigma=sigma), d1))\n", - " probs1 = at.extra_ops.diff(pm.math.concatenate([[0], probs1, [1]]))\n", + " probs1 = pt.extra_ops.diff(pm.math.concatenate([[0], probs1, [1]]))\n", " pm.Multinomial(\"counts1\", p=probs1, n=c1.sum(), observed=c1.values)" ] }, @@ -841,7 +841,7 @@ " mu = pm.Normal(\"mu\")\n", "\n", " probs2 = pm.math.exp(pm.logcdf(pm.Normal.dist(mu=mu, sigma=sigma), d2))\n", - " probs2 = at.extra_ops.diff(pm.math.concatenate([[0], probs2, [1]]))\n", + " probs2 = pt.extra_ops.diff(pm.math.concatenate([[0], probs2, [1]]))\n", " pm.Multinomial(\"counts2\", p=probs2, n=c2.sum(), observed=c2.values)" ] }, @@ -1238,11 +1238,11 @@ " mu = pm.Normal(\"mu\")\n", "\n", " probs1 = pm.math.exp(pm.logcdf(pm.Normal.dist(mu=mu, sigma=sigma), d1))\n", - " probs1 = at.extra_ops.diff(pm.math.concatenate([np.array([0]), probs1, np.array([1])]))\n", + " probs1 = pt.extra_ops.diff(pm.math.concatenate([np.array([0]), probs1, np.array([1])]))\n", " probs1 = pm.Deterministic(\"normal1_cdf\", probs1)\n", "\n", " probs2 = pm.math.exp(pm.logcdf(pm.Normal.dist(mu=mu, sigma=sigma), d2))\n", - " probs2 = at.extra_ops.diff(pm.math.concatenate([np.array([0]), probs2, np.array([1])]))\n", + " probs2 = pt.extra_ops.diff(pm.math.concatenate([np.array([0]), probs2, np.array([1])]))\n", " probs2 = pm.Deterministic(\"normal2_cdf\", probs2)\n", "\n", " pm.Multinomial(\"counts1\", p=probs1, n=c1.sum(), observed=c1.values)\n", @@ -1719,7 +1719,7 @@ " mu = pm.Normal(\"mu\")\n", " # study 1\n", " probs1 = pm.math.exp(pm.logcdf(pm.Normal.dist(mu=mu, sigma=sigma), d1))\n", - " probs1 = at.extra_ops.diff(pm.math.concatenate([np.array([0]), probs1, np.array([1])]))\n", + " probs1 = pt.extra_ops.diff(pm.math.concatenate([np.array([0]), probs1, np.array([1])]))\n", " probs1 = pm.Deterministic(\"normal1_cdf\", probs1)\n", " pm.Multinomial(\"counts1\", p=probs1, n=c1.sum(), observed=c1.values)\n", " # study 2\n", @@ -2149,12 +2149,12 @@ "\n", " # Study 1\n", " probs1 = pm.math.exp(pm.logcdf(pm.Normal.dist(mu=mu[0], sigma=sigma[0]), d1))\n", - " probs1 = at.extra_ops.diff(pm.math.concatenate([np.array([0]), probs1, np.array([1])]))\n", + " probs1 = pt.extra_ops.diff(pm.math.concatenate([np.array([0]), probs1, np.array([1])]))\n", " probs1 = pm.Deterministic(\"normal1_cdf\", probs1, dims=\"bin1\")\n", "\n", " # Study 2\n", " probs2 = pm.math.exp(pm.logcdf(pm.Normal.dist(mu=mu[1], sigma=sigma[1]), d2))\n", - " probs2 = at.extra_ops.diff(pm.math.concatenate([np.array([0]), probs2, np.array([1])]))\n", + " probs2 = pt.extra_ops.diff(pm.math.concatenate([np.array([0]), probs2, np.array([1])]))\n", " probs2 = pm.Deterministic(\"normal2_cdf\", probs2, dims=\"bin2\")\n", "\n", " # Likelihood\n", @@ -2392,12 +2392,12 @@ "\n", " # Study 1\n", " probs1 = pm.math.exp(pm.logcdf(pm.Normal.dist(mu=mu[0], sigma=sigma[0]), d1))\n", - " probs1 = at.extra_ops.diff(pm.math.concatenate([np.array([0]), probs1, np.array([1])]))\n", + " probs1 = pt.extra_ops.diff(pm.math.concatenate([np.array([0]), probs1, np.array([1])]))\n", " probs1 = pm.Deterministic(\"normal1_cdf\", probs1, dims=\"bin1\")\n", "\n", " # Study 2\n", " probs2 = pm.math.exp(pm.logcdf(pm.Normal.dist(mu=mu[1], sigma=sigma[1]), d2))\n", - " probs2 = at.extra_ops.diff(pm.math.concatenate([np.array([0]), probs2, np.array([1])]))\n", + " probs2 = pt.extra_ops.diff(pm.math.concatenate([np.array([0]), probs2, np.array([1])]))\n", " probs2 = pm.Deterministic(\"normal2_cdf\", probs2, dims=\"bin2\")\n", "\n", " # Likelihood\n", @@ -2927,12 +2927,12 @@ " \n", " # Study 1\n", " probs1 = pm.math.exp(pm.logcdf(pm.Normal.dist(mu=mu[0], sigma=sigma[0]), d1))\n", - " probs1 = at.extra_ops.diff(pm.math.concatenate([np.array([0]), probs1, np.array([1])]))\n", + " probs1 = pt.extra_ops.diff(pm.math.concatenate([np.array([0]), probs1, np.array([1])]))\n", " probs1 = pm.Deterministic(\"normal1_cdf\", probs1, dims='bin1')\n", "\n", " # Study 2\n", " probs2 = pm.math.exp(pm.logcdf(pm.Normal.dist(mu=mu[1], sigma=sigma[1]), d2))\n", - " probs2 = at.extra_ops.diff(pm.math.concatenate([np.array([0]), probs2, np.array([1])]))\n", + " probs2 = pt.extra_ops.diff(pm.math.concatenate([np.array([0]), probs2, np.array([1])]))\n", " probs2 = pm.Deterministic(\"normal2_cdf\", probs2, dims='bin2')\n", "\n", " # Likelihood\n", @@ -3091,11 +3091,11 @@ " beta = pm.HalfNormal(\"beta\", 10)\n", "\n", " probs1 = pm.math.exp(pm.logcdf(pm.Gumbel.dist(mu=mu, beta=beta), d1))\n", - " probs1 = at.extra_ops.diff(pm.math.concatenate([np.array([0]), probs1, np.array([1])]))\n", + " probs1 = pt.extra_ops.diff(pm.math.concatenate([np.array([0]), probs1, np.array([1])]))\n", " probs1 = pm.Deterministic(\"gumbel_cdf1\", probs1)\n", "\n", " probs2 = pm.math.exp(pm.logcdf(pm.Gumbel.dist(mu=mu, beta=beta), d2))\n", - " probs2 = at.extra_ops.diff(pm.math.concatenate([np.array([0]), probs2, np.array([1])]))\n", + " probs2 = pt.extra_ops.diff(pm.math.concatenate([np.array([0]), probs2, np.array([1])]))\n", " probs2 = pm.Deterministic(\"gumbel_cdf2\", probs2)\n", "\n", " pm.Multinomial(\"counts1\", p=probs1, n=c1.sum(), observed=c1.values)\n", diff --git a/examples/case_studies/binning.myst.md b/examples/case_studies/binning.myst.md index 1fc0fac22..bad6f44b8 100644 --- a/examples/case_studies/binning.myst.md +++ b/examples/case_studies/binning.myst.md @@ -69,7 +69,7 @@ In ordinal regression, the cutpoints are treated as latent variables and the par We are now in a position to sketch out a generative PyMC model: ```python -import pytensor.tensor as at +import pytensor.tensor as pt with pm.Model() as model: # priors @@ -78,7 +78,7 @@ with pm.Model() as model: # generative process probs = pm.math.exp(pm.logcdf(pm.Normal.dist(mu=mu, sigma=sigma), cutpoints)) probs = pm.math.concatenate([[0], probs, [1]]) - probs = at.extra_ops.diff(probs) + probs = pt.extra_ops.diff(probs) # likelihood pm.Multinomial("counts", p=probs, n=sum(counts), observed=counts) ``` @@ -95,7 +95,7 @@ probs = pm.math.concatenate([[0], probs, [1]]) simply concatenates the cumulative density at $-\infty$ (which is zero) and at $\infty$ (which is 1). The third line ```python -probs = at.extra_ops.diff(probs) +probs = pt.extra_ops.diff(probs) ``` calculates the difference between consecutive cumulative densities to give the actual probability of a datum falling in any given bin. @@ -115,7 +115,7 @@ import matplotlib.pyplot as plt import numpy as np import pandas as pd import pymc as pm -import pytensor.tensor as at +import pytensor.tensor as pt import seaborn as sns warnings.filterwarnings(action="ignore", category=UserWarning) @@ -226,7 +226,7 @@ with pm.Model() as model1: mu = pm.Normal("mu") probs1 = pm.math.exp(pm.logcdf(pm.Normal.dist(mu=mu, sigma=sigma), d1)) - probs1 = at.extra_ops.diff(pm.math.concatenate([[0], probs1, [1]])) + probs1 = pt.extra_ops.diff(pm.math.concatenate([[0], probs1, [1]])) pm.Multinomial("counts1", p=probs1, n=c1.sum(), observed=c1.values) ``` @@ -331,7 +331,7 @@ with pm.Model() as model2: mu = pm.Normal("mu") probs2 = pm.math.exp(pm.logcdf(pm.Normal.dist(mu=mu, sigma=sigma), d2)) - probs2 = at.extra_ops.diff(pm.math.concatenate([[0], probs2, [1]])) + probs2 = pt.extra_ops.diff(pm.math.concatenate([[0], probs2, [1]])) pm.Multinomial("counts2", p=probs2, n=c2.sum(), observed=c2.values) ``` @@ -426,11 +426,11 @@ with pm.Model() as model3: mu = pm.Normal("mu") probs1 = pm.math.exp(pm.logcdf(pm.Normal.dist(mu=mu, sigma=sigma), d1)) - probs1 = at.extra_ops.diff(pm.math.concatenate([np.array([0]), probs1, np.array([1])])) + probs1 = pt.extra_ops.diff(pm.math.concatenate([np.array([0]), probs1, np.array([1])])) probs1 = pm.Deterministic("normal1_cdf", probs1) probs2 = pm.math.exp(pm.logcdf(pm.Normal.dist(mu=mu, sigma=sigma), d2)) - probs2 = at.extra_ops.diff(pm.math.concatenate([np.array([0]), probs2, np.array([1])])) + probs2 = pt.extra_ops.diff(pm.math.concatenate([np.array([0]), probs2, np.array([1])])) probs2 = pm.Deterministic("normal2_cdf", probs2) pm.Multinomial("counts1", p=probs1, n=c1.sum(), observed=c1.values) @@ -519,7 +519,7 @@ with pm.Model() as model4: mu = pm.Normal("mu") # study 1 probs1 = pm.math.exp(pm.logcdf(pm.Normal.dist(mu=mu, sigma=sigma), d1)) - probs1 = at.extra_ops.diff(pm.math.concatenate([np.array([0]), probs1, np.array([1])])) + probs1 = pt.extra_ops.diff(pm.math.concatenate([np.array([0]), probs1, np.array([1])])) probs1 = pm.Deterministic("normal1_cdf", probs1) pm.Multinomial("counts1", p=probs1, n=c1.sum(), observed=c1.values) # study 2 @@ -612,12 +612,12 @@ with pm.Model(coords=coords) as model5: # Study 1 probs1 = pm.math.exp(pm.logcdf(pm.Normal.dist(mu=mu[0], sigma=sigma[0]), d1)) - probs1 = at.extra_ops.diff(pm.math.concatenate([np.array([0]), probs1, np.array([1])])) + probs1 = pt.extra_ops.diff(pm.math.concatenate([np.array([0]), probs1, np.array([1])])) probs1 = pm.Deterministic("normal1_cdf", probs1, dims="bin1") # Study 2 probs2 = pm.math.exp(pm.logcdf(pm.Normal.dist(mu=mu[1], sigma=sigma[1]), d2)) - probs2 = at.extra_ops.diff(pm.math.concatenate([np.array([0]), probs2, np.array([1])])) + probs2 = pt.extra_ops.diff(pm.math.concatenate([np.array([0]), probs2, np.array([1])])) probs2 = pm.Deterministic("normal2_cdf", probs2, dims="bin2") # Likelihood @@ -645,12 +645,12 @@ with pm.Model(coords=coords) as model5: # Study 1 probs1 = pm.math.exp(pm.logcdf(pm.Normal.dist(mu=mu[0], sigma=sigma[0]), d1)) - probs1 = at.extra_ops.diff(pm.math.concatenate([np.array([0]), probs1, np.array([1])])) + probs1 = pt.extra_ops.diff(pm.math.concatenate([np.array([0]), probs1, np.array([1])])) probs1 = pm.Deterministic("normal1_cdf", probs1, dims="bin1") # Study 2 probs2 = pm.math.exp(pm.logcdf(pm.Normal.dist(mu=mu[1], sigma=sigma[1]), d2)) - probs2 = at.extra_ops.diff(pm.math.concatenate([np.array([0]), probs2, np.array([1])])) + probs2 = pt.extra_ops.diff(pm.math.concatenate([np.array([0]), probs2, np.array([1])])) probs2 = pm.Deterministic("normal2_cdf", probs2, dims="bin2") # Likelihood @@ -748,12 +748,12 @@ with pm.Model(coords=coords) as model5: # Study 1 probs1 = pm.math.exp(pm.logcdf(pm.Normal.dist(mu=mu[0], sigma=sigma[0]), d1)) - probs1 = at.extra_ops.diff(pm.math.concatenate([np.array([0]), probs1, np.array([1])])) + probs1 = pt.extra_ops.diff(pm.math.concatenate([np.array([0]), probs1, np.array([1])])) probs1 = pm.Deterministic("normal1_cdf", probs1, dims='bin1') # Study 2 probs2 = pm.math.exp(pm.logcdf(pm.Normal.dist(mu=mu[1], sigma=sigma[1]), d2)) - probs2 = at.extra_ops.diff(pm.math.concatenate([np.array([0]), probs2, np.array([1])])) + probs2 = pt.extra_ops.diff(pm.math.concatenate([np.array([0]), probs2, np.array([1])])) probs2 = pm.Deterministic("normal2_cdf", probs2, dims='bin2') # Likelihood @@ -855,11 +855,11 @@ with pm.Model() as model6: beta = pm.HalfNormal("beta", 10) probs1 = pm.math.exp(pm.logcdf(pm.Gumbel.dist(mu=mu, beta=beta), d1)) - probs1 = at.extra_ops.diff(pm.math.concatenate([np.array([0]), probs1, np.array([1])])) + probs1 = pt.extra_ops.diff(pm.math.concatenate([np.array([0]), probs1, np.array([1])])) probs1 = pm.Deterministic("gumbel_cdf1", probs1) probs2 = pm.math.exp(pm.logcdf(pm.Gumbel.dist(mu=mu, beta=beta), d2)) - probs2 = at.extra_ops.diff(pm.math.concatenate([np.array([0]), probs2, np.array([1])])) + probs2 = pt.extra_ops.diff(pm.math.concatenate([np.array([0]), probs2, np.array([1])])) probs2 = pm.Deterministic("gumbel_cdf2", probs2) pm.Multinomial("counts1", p=probs1, n=c1.sum(), observed=c1.values) diff --git a/examples/case_studies/blackbox_external_likelihood_numpy.ipynb b/examples/case_studies/blackbox_external_likelihood_numpy.ipynb index 8666a2b6b..aa46425a6 100644 --- a/examples/case_studies/blackbox_external_likelihood_numpy.ipynb +++ b/examples/case_studies/blackbox_external_likelihood_numpy.ipynb @@ -47,7 +47,7 @@ "import numpy as np\n", "import pymc as pm\n", "import pytensor\n", - "import pytensor.tensor as at\n", + "import pytensor.tensor as pt\n", "\n", "print(f\"Running on PyMC v{pm.__version__}\")" ] @@ -180,7 +180,7 @@ "outputs": [], "source": [ "# define a pytensor Op for our likelihood function\n", - "class LogLike(at.Op):\n", + "class LogLike(pt.Op):\n", "\n", " \"\"\"\n", " Specify what type of object will be passed and returned to the Op when it is\n", @@ -189,8 +189,8 @@ " log-likelihood)\n", " \"\"\"\n", "\n", - " itypes = [at.dvector] # expects a vector of parameter values when called\n", - " otypes = [at.dscalar] # outputs a single scalar value (the log likelihood)\n", + " itypes = [pt.dvector] # expects a vector of parameter values when called\n", + " otypes = [pt.dscalar] # outputs a single scalar value (the log likelihood)\n", "\n", " def __init__(self, loglike, data, x, sigma):\n", " \"\"\"\n", @@ -336,7 +336,7 @@ " c = pm.Uniform(\"c\", lower=-10.0, upper=10.0)\n", "\n", " # convert m and c to a tensor vector\n", - " theta = at.as_tensor_variable([m, c])\n", + " theta = pt.as_tensor_variable([m, c])\n", "\n", " # use a Potential to \"call\" the Op and include it in the logp computation\n", " pm.Potential(\"likelihood\", logl(theta))\n", @@ -426,10 +426,10 @@ "outputs": [], "source": [ "# define a pytensor Op for our likelihood function\n", - "class LogLikeWithGrad(at.Op):\n", + "class LogLikeWithGrad(pt.Op):\n", "\n", - " itypes = [at.dvector] # expects a vector of parameter values when called\n", - " otypes = [at.dscalar] # outputs a single scalar value (the log likelihood)\n", + " itypes = [pt.dvector] # expects a vector of parameter values when called\n", + " otypes = [pt.dscalar] # outputs a single scalar value (the log likelihood)\n", "\n", " def __init__(self, loglike, data, x, sigma):\n", " \"\"\"\n", @@ -473,15 +473,15 @@ " return [g[0] * self.logpgrad(theta)]\n", "\n", "\n", - "class LogLikeGrad(at.Op):\n", + "class LogLikeGrad(pt.Op):\n", "\n", " \"\"\"\n", " This Op will be called with a vector of values and also return a vector of\n", " values - the gradients in each dimension.\n", " \"\"\"\n", "\n", - " itypes = [at.dvector]\n", - " otypes = [at.dvector]\n", + " itypes = [pt.dvector]\n", + " otypes = [pt.dvector]\n", "\n", " def __init__(self, data, x, sigma):\n", " \"\"\"\n", @@ -609,7 +609,7 @@ " c = pm.Uniform(\"c\", lower=-10.0, upper=10.0)\n", "\n", " # convert m and c to a tensor vector\n", - " theta = at.as_tensor_variable([m, c])\n", + " theta = pt.as_tensor_variable([m, c])\n", "\n", " # use a Potential\n", " pm.Potential(\"likelihood\", logl(theta))\n", @@ -710,7 +710,7 @@ " c = pm.Uniform(\"c\", lower=-10.0, upper=10.0)\n", "\n", " # convert m and c to a tensor vector\n", - " theta = at.as_tensor_variable([m, c])\n", + " theta = pt.as_tensor_variable([m, c])\n", "\n", " # use a Normal distribution\n", " pm.Normal(\"likelihood\", mu=(m * x + c), sd=sigma, observed=data)\n", @@ -947,7 +947,7 @@ "pytensor.config.compute_test_value = \"ignore\"\n", "pytensor.config.exception_verbosity = \"high\"\n", "\n", - "var = at.dvector()\n", + "var = pt.dvector()\n", "test_grad_op = LogLikeGrad(data, x, sigma)\n", "test_grad_op_func = pytensor.function([var], test_grad_op(var))\n", "grad_vals = test_grad_op_func([mtrue, ctrue])\n", @@ -956,7 +956,7 @@ "\n", "# test the gradient called through LogLikeWithGrad\n", "test_gradded_op = LogLikeWithGrad(my_loglike, data, x, sigma)\n", - "test_gradded_op_grad = at.grad(test_gradded_op(var), var)\n", + "test_gradded_op_grad = pt.grad(test_gradded_op(var), var)\n", "test_gradded_op_grad_func = pytensor.function([var], test_gradded_op_grad)\n", "grad_vals_2 = test_gradded_op_grad_func([mtrue, ctrue])\n", "\n", diff --git a/examples/case_studies/blackbox_external_likelihood_numpy.myst.md b/examples/case_studies/blackbox_external_likelihood_numpy.myst.md index 6b83a6984..b3206f13f 100644 --- a/examples/case_studies/blackbox_external_likelihood_numpy.myst.md +++ b/examples/case_studies/blackbox_external_likelihood_numpy.myst.md @@ -32,7 +32,7 @@ import matplotlib.pyplot as plt import numpy as np import pymc as pm import pytensor -import pytensor.tensor as at +import pytensor.tensor as pt print(f"Running on PyMC v{pm.__version__}") ``` @@ -116,7 +116,7 @@ So, what we actually need to do is create a [PyTensor Op](http://deeplearning.ne ```{code-cell} ipython3 # define a pytensor Op for our likelihood function -class LogLike(at.Op): +class LogLike(pt.Op): """ Specify what type of object will be passed and returned to the Op when it is @@ -125,8 +125,8 @@ class LogLike(at.Op): log-likelihood) """ - itypes = [at.dvector] # expects a vector of parameter values when called - otypes = [at.dscalar] # outputs a single scalar value (the log likelihood) + itypes = [pt.dvector] # expects a vector of parameter values when called + otypes = [pt.dscalar] # outputs a single scalar value (the log likelihood) def __init__(self, loglike, data, x, sigma): """ @@ -189,7 +189,7 @@ with pm.Model(): c = pm.Uniform("c", lower=-10.0, upper=10.0) # convert m and c to a tensor vector - theta = at.as_tensor_variable([m, c]) + theta = pt.as_tensor_variable([m, c]) # use a Potential to "call" the Op and include it in the logp computation pm.Potential("likelihood", logl(theta)) @@ -245,10 +245,10 @@ It's not quite so simple! The `grad()` method itself requires that its inputs ar ```{code-cell} ipython3 # define a pytensor Op for our likelihood function -class LogLikeWithGrad(at.Op): +class LogLikeWithGrad(pt.Op): - itypes = [at.dvector] # expects a vector of parameter values when called - otypes = [at.dscalar] # outputs a single scalar value (the log likelihood) + itypes = [pt.dvector] # expects a vector of parameter values when called + otypes = [pt.dscalar] # outputs a single scalar value (the log likelihood) def __init__(self, loglike, data, x, sigma): """ @@ -292,15 +292,15 @@ class LogLikeWithGrad(at.Op): return [g[0] * self.logpgrad(theta)] -class LogLikeGrad(at.Op): +class LogLikeGrad(pt.Op): """ This Op will be called with a vector of values and also return a vector of values - the gradients in each dimension. """ - itypes = [at.dvector] - otypes = [at.dvector] + itypes = [pt.dvector] + otypes = [pt.dvector] def __init__(self, data, x, sigma): """ @@ -344,7 +344,7 @@ with pm.Model() as opmodel: c = pm.Uniform("c", lower=-10.0, upper=10.0) # convert m and c to a tensor vector - theta = at.as_tensor_variable([m, c]) + theta = pt.as_tensor_variable([m, c]) # use a Potential pm.Potential("likelihood", logl(theta)) @@ -365,7 +365,7 @@ with pm.Model() as pymodel: c = pm.Uniform("c", lower=-10.0, upper=10.0) # convert m and c to a tensor vector - theta = at.as_tensor_variable([m, c]) + theta = pt.as_tensor_variable([m, c]) # use a Normal distribution pm.Normal("likelihood", mu=(m * x + c), sd=sigma, observed=data) @@ -416,7 +416,7 @@ We can now check that the gradient Op works as expected. First, just create and pytensor.config.compute_test_value = "ignore" pytensor.config.exception_verbosity = "high" -var = at.dvector() +var = pt.dvector() test_grad_op = LogLikeGrad(data, x, sigma) test_grad_op_func = pytensor.function([var], test_grad_op(var)) grad_vals = test_grad_op_func([mtrue, ctrue]) @@ -425,7 +425,7 @@ print(f'Gradient returned by "LogLikeGrad": {grad_vals}') # test the gradient called through LogLikeWithGrad test_gradded_op = LogLikeWithGrad(my_loglike, data, x, sigma) -test_gradded_op_grad = at.grad(test_gradded_op(var), var) +test_gradded_op_grad = pt.grad(test_gradded_op(var), var) test_gradded_op_grad_func = pytensor.function([var], test_gradded_op_grad) grad_vals_2 = test_gradded_op_grad_func([mtrue, ctrue]) diff --git a/examples/case_studies/factor_analysis.ipynb b/examples/case_studies/factor_analysis.ipynb index cc019c28f..d68adb0e7 100644 --- a/examples/case_studies/factor_analysis.ipynb +++ b/examples/case_studies/factor_analysis.ipynb @@ -47,7 +47,7 @@ "import matplotlib\n", "import numpy as np\n", "import pymc as pm\n", - "import pytensor.tensor as at\n", + "import pytensor.tensor as pt\n", "import scipy as sp\n", "import seaborn as sns\n", "import xarray as xr\n", @@ -271,7 +271,7 @@ " W = pm.Normal(\"W\", dims=(\"observed_columns\", \"latent_columns\"))\n", " F = pm.Normal(\"F\", dims=(\"latent_columns\", \"rows\"))\n", " psi = pm.HalfNormal(\"psi\", 1.0)\n", - " X = pm.Normal(\"X\", mu=at.dot(W, F), sigma=psi, observed=Y, dims=(\"observed_columns\", \"rows\"))\n", + " X = pm.Normal(\"X\", mu=pt.dot(W, F), sigma=psi, observed=Y, dims=(\"observed_columns\", \"rows\"))\n", "\n", " trace = pm.sample(tune=2000, random_seed=RANDOM_SEED) # target_accept=0.9" ] @@ -342,11 +342,11 @@ "\n", " def set_(M, i_, v_):\n", " if mtype == \"pytensor\":\n", - " return at.set_subtensor(M[i_], v_)\n", + " return pt.set_subtensor(M[i_], v_)\n", " M[i_] = v_\n", " return M\n", "\n", - " out = at.zeros((d, k), dtype=float) if mtype == \"pytensor\" else np.zeros((d, k), dtype=float)\n", + " out = pt.zeros((d, k), dtype=float) if mtype == \"pytensor\" else np.zeros((d, k), dtype=float)\n", " if diag is None:\n", " idxs = np.tril_indices(d, m=k)\n", " out = set_(out, idxs, packed)\n", @@ -378,8 +378,8 @@ " # trick: the cumulative sum of z will be positive increasing\n", " z = pm.HalfNormal(\"W_z\", 1.0, dims=\"latent_columns\")\n", " b = pm.HalfNormal(\"W_b\", 1.0, shape=(n_od,), dims=\"packed_dim\")\n", - " L = expand_packed_block_triangular(d, k, b, at.ones(k))\n", - " W = pm.Deterministic(\"W\", at.dot(L, at.diag(at.extra_ops.cumsum(z))), dims=dim_names)\n", + " L = expand_packed_block_triangular(d, k, b, pt.ones(k))\n", + " W = pm.Deterministic(\"W\", pt.dot(L, pt.diag(pt.extra_ops.cumsum(z))), dims=dim_names)\n", " return W" ] }, @@ -480,7 +480,7 @@ " W = makeW(d, k, (\"observed_columns\", \"latent_columns\"))\n", " F = pm.Normal(\"F\", dims=(\"latent_columns\", \"rows\"))\n", " psi = pm.HalfNormal(\"psi\", 1.0)\n", - " X = pm.Normal(\"X\", mu=at.dot(W, F), sigma=psi, observed=Y, dims=(\"observed_columns\", \"rows\"))\n", + " X = pm.Normal(\"X\", mu=pt.dot(W, F), sigma=psi, observed=Y, dims=(\"observed_columns\", \"rows\"))\n", " trace = pm.sample(tune=2000) # target_accept=0.9\n", "\n", "for i in range(4):\n", @@ -579,7 +579,7 @@ " psi = pm.HalfNormal(\"psi\", 1.0)\n", " E = pm.Deterministic(\n", " \"cov\",\n", - " at.dot(W, at.transpose(W)) + psi * at.diag(at.ones(d)),\n", + " pt.dot(W, pt.transpose(W)) + psi * pt.diag(pt.ones(d)),\n", " dims=(\"observed_columns\", \"observed_columns2\"),\n", " )\n", " X = pm.MvNormal(\"X\", 0.0, cov=E, observed=Y_mb)\n", diff --git a/examples/case_studies/factor_analysis.myst.md b/examples/case_studies/factor_analysis.myst.md index cc811b814..66f23a692 100644 --- a/examples/case_studies/factor_analysis.myst.md +++ b/examples/case_studies/factor_analysis.myst.md @@ -36,7 +36,7 @@ import arviz as az import matplotlib import numpy as np import pymc as pm -import pytensor.tensor as at +import pytensor.tensor as pt import scipy as sp import seaborn as sns import xarray as xr @@ -120,7 +120,7 @@ with pm.Model(coords=coords) as PPCA: W = pm.Normal("W", dims=("observed_columns", "latent_columns")) F = pm.Normal("F", dims=("latent_columns", "rows")) psi = pm.HalfNormal("psi", 1.0) - X = pm.Normal("X", mu=at.dot(W, F), sigma=psi, observed=Y, dims=("observed_columns", "rows")) + X = pm.Normal("X", mu=pt.dot(W, F), sigma=psi, observed=Y, dims=("observed_columns", "rows")) trace = pm.sample(tune=2000, random_seed=RANDOM_SEED) # target_accept=0.9 ``` @@ -155,11 +155,11 @@ def expand_packed_block_triangular(d, k, packed, diag=None, mtype="pytensor"): def set_(M, i_, v_): if mtype == "pytensor": - return at.set_subtensor(M[i_], v_) + return pt.set_subtensor(M[i_], v_) M[i_] = v_ return M - out = at.zeros((d, k), dtype=float) if mtype == "pytensor" else np.zeros((d, k), dtype=float) + out = pt.zeros((d, k), dtype=float) if mtype == "pytensor" else np.zeros((d, k), dtype=float) if diag is None: idxs = np.tril_indices(d, m=k) out = set_(out, idxs, packed) @@ -181,8 +181,8 @@ def makeW(d, k, dim_names): # trick: the cumulative sum of z will be positive increasing z = pm.HalfNormal("W_z", 1.0, dims="latent_columns") b = pm.HalfNormal("W_b", 1.0, shape=(n_od,), dims="packed_dim") - L = expand_packed_block_triangular(d, k, b, at.ones(k)) - W = pm.Deterministic("W", at.dot(L, at.diag(at.extra_ops.cumsum(z))), dims=dim_names) + L = expand_packed_block_triangular(d, k, b, pt.ones(k)) + W = pm.Deterministic("W", pt.dot(L, pt.diag(pt.extra_ops.cumsum(z))), dims=dim_names) return W ``` @@ -193,7 +193,7 @@ with pm.Model(coords=coords) as PPCA_identified: W = makeW(d, k, ("observed_columns", "latent_columns")) F = pm.Normal("F", dims=("latent_columns", "rows")) psi = pm.HalfNormal("psi", 1.0) - X = pm.Normal("X", mu=at.dot(W, F), sigma=psi, observed=Y, dims=("observed_columns", "rows")) + X = pm.Normal("X", mu=pt.dot(W, F), sigma=psi, observed=Y, dims=("observed_columns", "rows")) trace = pm.sample(tune=2000) # target_accept=0.9 for i in range(4): @@ -223,7 +223,7 @@ with pm.Model(coords=coords) as PPCA_scaling: psi = pm.HalfNormal("psi", 1.0) E = pm.Deterministic( "cov", - at.dot(W, at.transpose(W)) + psi * at.diag(at.ones(d)), + pt.dot(W, pt.transpose(W)) + psi * pt.diag(pt.ones(d)), dims=("observed_columns", "observed_columns2"), ) X = pm.MvNormal("X", 0.0, cov=E, observed=Y_mb) diff --git a/examples/case_studies/hierarchical_partial_pooling.ipynb b/examples/case_studies/hierarchical_partial_pooling.ipynb index 146f5317c..a114d5f5d 100644 --- a/examples/case_studies/hierarchical_partial_pooling.ipynb +++ b/examples/case_studies/hierarchical_partial_pooling.ipynb @@ -65,7 +65,7 @@ "import numpy as np\n", "import pandas as pd\n", "import pymc as pm\n", - "import pytensor.tensor as at\n", + "import pytensor.tensor as pt\n", "\n", "%matplotlib inline" ] @@ -159,7 +159,7 @@ " phi = pm.Uniform(\"phi\", lower=0.0, upper=1.0)\n", "\n", " kappa_log = pm.Exponential(\"kappa_log\", lam=1.5)\n", - " kappa = pm.Deterministic(\"kappa\", at.exp(kappa_log))\n", + " kappa = pm.Deterministic(\"kappa\", pt.exp(kappa_log))\n", "\n", " theta = pm.Beta(\"theta\", alpha=phi * kappa, beta=(1.0 - phi) * kappa, dims=\"player_names\")\n", " y = pm.Binomial(\"y\", n=at_bats, p=theta, dims=\"player_names\", observed=hits)" diff --git a/examples/case_studies/hierarchical_partial_pooling.myst.md b/examples/case_studies/hierarchical_partial_pooling.myst.md index c69bde334..185dffde1 100644 --- a/examples/case_studies/hierarchical_partial_pooling.myst.md +++ b/examples/case_studies/hierarchical_partial_pooling.myst.md @@ -55,7 +55,7 @@ import matplotlib.pyplot as plt import numpy as np import pandas as pd import pymc as pm -import pytensor.tensor as at +import pytensor.tensor as pt %matplotlib inline ``` @@ -100,7 +100,7 @@ with pm.Model(coords=coords) as baseball_model: phi = pm.Uniform("phi", lower=0.0, upper=1.0) kappa_log = pm.Exponential("kappa_log", lam=1.5) - kappa = pm.Deterministic("kappa", at.exp(kappa_log)) + kappa = pm.Deterministic("kappa", pt.exp(kappa_log)) theta = pm.Beta("theta", alpha=phi * kappa, beta=(1.0 - phi) * kappa, dims="player_names") y = pm.Binomial("y", n=at_bats, p=theta, dims="player_names", observed=hits) diff --git a/examples/case_studies/multilevel_modeling.ipynb b/examples/case_studies/multilevel_modeling.ipynb index c573613f6..183c2b68d 100644 --- a/examples/case_studies/multilevel_modeling.ipynb +++ b/examples/case_studies/multilevel_modeling.ipynb @@ -90,7 +90,7 @@ "import numpy as np\n", "import pandas as pd\n", "import pymc as pm\n", - "import pytensor.tensor as at\n", + "import pytensor.tensor as pt\n", "import seaborn as sns\n", "import xarray as xr\n", "\n", @@ -2952,7 +2952,7 @@ " # population of varying effects:\n", " z = pm.Normal(\"z\", 0.0, 1.0, dims=(\"param\", \"county\"))\n", " alpha_beta_county = pm.Deterministic(\n", - " \"alpha_beta_county\", at.dot(chol, z).T, dims=(\"county\", \"param\")\n", + " \"alpha_beta_county\", pt.dot(chol, z).T, dims=(\"county\", \"param\")\n", " )\n", "\n", " # Expected value per county:\n", diff --git a/examples/case_studies/multilevel_modeling.myst.md b/examples/case_studies/multilevel_modeling.myst.md index 822a2bf33..3e98ba6a6 100644 --- a/examples/case_studies/multilevel_modeling.myst.md +++ b/examples/case_studies/multilevel_modeling.myst.md @@ -70,7 +70,7 @@ import matplotlib.pyplot as plt import numpy as np import pandas as pd import pymc as pm -import pytensor.tensor as at +import pytensor.tensor as pt import seaborn as sns import xarray as xr @@ -838,7 +838,7 @@ with pm.Model(coords=coords) as covariation_intercept_slope: # population of varying effects: z = pm.Normal("z", 0.0, 1.0, dims=("param", "county")) alpha_beta_county = pm.Deterministic( - "alpha_beta_county", at.dot(chol, z).T, dims=("county", "param") + "alpha_beta_county", pt.dot(chol, z).T, dims=("county", "param") ) # Expected value per county: diff --git a/examples/case_studies/putting_workflow.ipynb b/examples/case_studies/putting_workflow.ipynb index 6735c9bff..1557dd7a8 100644 --- a/examples/case_studies/putting_workflow.ipynb +++ b/examples/case_studies/putting_workflow.ipynb @@ -42,7 +42,7 @@ "import numpy as np\n", "import pandas as pd\n", "import pymc as pm\n", - "import pytensor.tensor as at\n", + "import pytensor.tensor as pt\n", "import scipy\n", "import scipy.stats as st\n", "import xarray as xr\n", @@ -801,7 +801,7 @@ "source": [ "def phi(x):\n", " \"\"\"Calculates the standard normal cumulative distribution function.\"\"\"\n", - " return 0.5 + 0.5 * at.erf(x / at.sqrt(2.0))\n", + " return 0.5 + 0.5 * pt.erf(x / pt.sqrt(2.0))\n", "\n", "\n", "with pm.Model() as angle_model:\n", @@ -812,7 +812,7 @@ " variance_of_shot = pm.HalfNormal(\"variance_of_shot\")\n", " p_goes_in = pm.Deterministic(\n", " \"p_goes_in\",\n", - " 2 * phi(at.arcsin((CUP_RADIUS - BALL_RADIUS) / distance_) / variance_of_shot) - 1,\n", + " 2 * phi(pt.arcsin((CUP_RADIUS - BALL_RADIUS) / distance_) / variance_of_shot) - 1,\n", " dims=\"obs_id\",\n", " )\n", " success = pm.Binomial(\"success\", n=tries_, p=p_goes_in, observed=successes_, dims=\"obs_id\")\n", @@ -1485,7 +1485,7 @@ " variance_of_distance = pm.HalfNormal(\"variance_of_distance\")\n", " p_good_angle = pm.Deterministic(\n", " \"p_good_angle\",\n", - " 2 * phi(at.arcsin((CUP_RADIUS - BALL_RADIUS) / distance_) / variance_of_shot) - 1,\n", + " 2 * phi(pt.arcsin((CUP_RADIUS - BALL_RADIUS) / distance_) / variance_of_shot) - 1,\n", " dims=\"obs_id\",\n", " )\n", " p_good_distance = pm.Deterministic(\n", @@ -1924,7 +1924,7 @@ "\n", " p_good_angle = pm.Deterministic(\n", " \"p_good_angle\",\n", - " 2 * phi(at.arcsin((CUP_RADIUS - BALL_RADIUS) / distance_) / variance_of_shot) - 1,\n", + " 2 * phi(pt.arcsin((CUP_RADIUS - BALL_RADIUS) / distance_) / variance_of_shot) - 1,\n", " dims=\"obs_id\",\n", " )\n", " p_good_distance = pm.Deterministic(\n", @@ -1938,7 +1938,7 @@ " p_success = pm.Normal(\n", " \"p_success\",\n", " mu=p,\n", - " sigma=at.sqrt(((p * (1 - p)) / tries_) + dispersion**2),\n", + " sigma=pt.sqrt(((p * (1 - p)) / tries_) + dispersion**2),\n", " observed=obs_prop_, # successes_ / tries_\n", " dims=\"obs_id\",\n", " )\n", diff --git a/examples/case_studies/putting_workflow.myst.md b/examples/case_studies/putting_workflow.myst.md index 2d63e9a39..3f21f15ee 100644 --- a/examples/case_studies/putting_workflow.myst.md +++ b/examples/case_studies/putting_workflow.myst.md @@ -37,7 +37,7 @@ import matplotlib.pyplot as plt import numpy as np import pandas as pd import pymc as pm -import pytensor.tensor as at +import pytensor.tensor as pt import scipy import scipy.stats as st import xarray as xr @@ -274,7 +274,7 @@ PyMC has $\Phi$ implemented, but it is pretty hidden (`pm.distributions.dist_mat ```{code-cell} ipython3 def phi(x): """Calculates the standard normal cumulative distribution function.""" - return 0.5 + 0.5 * at.erf(x / at.sqrt(2.0)) + return 0.5 + 0.5 * pt.erf(x / pt.sqrt(2.0)) with pm.Model() as angle_model: @@ -285,7 +285,7 @@ with pm.Model() as angle_model: variance_of_shot = pm.HalfNormal("variance_of_shot") p_goes_in = pm.Deterministic( "p_goes_in", - 2 * phi(at.arcsin((CUP_RADIUS - BALL_RADIUS) / distance_) / variance_of_shot) - 1, + 2 * phi(pt.arcsin((CUP_RADIUS - BALL_RADIUS) / distance_) / variance_of_shot) - 1, dims="obs_id", ) success = pm.Binomial("success", n=tries_, p=p_goes_in, observed=successes_, dims="obs_id") @@ -531,7 +531,7 @@ with pm.Model() as distance_angle_model: variance_of_distance = pm.HalfNormal("variance_of_distance") p_good_angle = pm.Deterministic( "p_good_angle", - 2 * phi(at.arcsin((CUP_RADIUS - BALL_RADIUS) / distance_) / variance_of_shot) - 1, + 2 * phi(pt.arcsin((CUP_RADIUS - BALL_RADIUS) / distance_) / variance_of_shot) - 1, dims="obs_id", ) p_good_distance = pm.Deterministic( @@ -643,7 +643,7 @@ with pm.Model() as disp_distance_angle_model: p_good_angle = pm.Deterministic( "p_good_angle", - 2 * phi(at.arcsin((CUP_RADIUS - BALL_RADIUS) / distance_) / variance_of_shot) - 1, + 2 * phi(pt.arcsin((CUP_RADIUS - BALL_RADIUS) / distance_) / variance_of_shot) - 1, dims="obs_id", ) p_good_distance = pm.Deterministic( @@ -657,7 +657,7 @@ with pm.Model() as disp_distance_angle_model: p_success = pm.Normal( "p_success", mu=p, - sigma=at.sqrt(((p * (1 - p)) / tries_) + dispersion**2), + sigma=pt.sqrt(((p * (1 - p)) / tries_) + dispersion**2), observed=obs_prop_, # successes_ / tries_ dims="obs_id", ) diff --git a/examples/case_studies/reinforcement_learning.ipynb b/examples/case_studies/reinforcement_learning.ipynb index 19b12631b..e76216fc7 100644 --- a/examples/case_studies/reinforcement_learning.ipynb +++ b/examples/case_studies/reinforcement_learning.ipynb @@ -53,7 +53,7 @@ "import numpy as np\n", "import pymc as pm\n", "import pytensor\n", - "import pytensor.tensor as at\n", + "import pytensor.tensor as pt\n", "import scipy\n", "\n", "from matplotlib.lines import Line2D" @@ -505,10 +505,10 @@ " This function updates the Q table according to the RL update rule.\n", " It will be called by pytensor.scan to do so recursevely, given the observed data and the alpha parameter\n", " This could have been replaced be the following lamba expression in the pytensor.scan fn argument:\n", - " fn=lamba action, reward, Qs, alpha: at.set_subtensor(Qs[action], Qs[action] + alpha * (reward - Qs[action]))\n", + " fn=lamba action, reward, Qs, alpha: pt.set_subtensor(Qs[action], Qs[action] + alpha * (reward - Qs[action]))\n", " \"\"\"\n", "\n", - " Qs = at.set_subtensor(Qs[action], Qs[action] + alpha * (reward - Qs[action]))\n", + " Qs = pt.set_subtensor(Qs[action], Qs[action] + alpha * (reward - Qs[action]))\n", " return Qs" ] }, @@ -521,14 +521,14 @@ "outputs": [], "source": [ "# Transform the variables into appropriate PyTensor objects\n", - "rewards_ = at.as_tensor_variable(rewards, dtype=\"int32\")\n", - "actions_ = at.as_tensor_variable(actions, dtype=\"int32\")\n", + "rewards_ = pt.as_tensor_variable(rewards, dtype=\"int32\")\n", + "actions_ = pt.as_tensor_variable(actions, dtype=\"int32\")\n", "\n", - "alpha = at.scalar(\"alpha\")\n", - "beta = at.scalar(\"beta\")\n", + "alpha = pt.scalar(\"alpha\")\n", + "beta = pt.scalar(\"beta\")\n", "\n", "# Initialize the Q table\n", - "Qs = 0.5 * at.ones((2,), dtype=\"float64\")\n", + "Qs = 0.5 * pt.ones((2,), dtype=\"float64\")\n", "\n", "# Compute the Q values for each trial\n", "Qs, _ = pytensor.scan(\n", @@ -537,11 +537,11 @@ "\n", "# Apply the softmax transformation\n", "Qs = Qs * beta\n", - "logp_actions = Qs - at.logsumexp(Qs, axis=1, keepdims=True)\n", + "logp_actions = Qs - pt.logsumexp(Qs, axis=1, keepdims=True)\n", "\n", "# Calculate the negative log likelihod of the observed actions\n", - "logp_actions = logp_actions[at.arange(actions_.shape[0] - 1), actions_[1:]]\n", - "neg_loglike = -at.sum(logp_actions)" + "logp_actions = logp_actions[pt.arange(actions_.shape[0] - 1), actions_[1:]]\n", + "neg_loglike = -pt.sum(logp_actions)" ] }, { @@ -602,22 +602,22 @@ "outputs": [], "source": [ "def pytensor_llik_td(alpha, beta, actions, rewards):\n", - " rewards = at.as_tensor_variable(rewards, dtype=\"int32\")\n", - " actions = at.as_tensor_variable(actions, dtype=\"int32\")\n", + " rewards = pt.as_tensor_variable(rewards, dtype=\"int32\")\n", + " actions = pt.as_tensor_variable(actions, dtype=\"int32\")\n", "\n", " # Compute the Qs values\n", - " Qs = 0.5 * at.ones((2,), dtype=\"float64\")\n", + " Qs = 0.5 * pt.ones((2,), dtype=\"float64\")\n", " Qs, updates = pytensor.scan(\n", " fn=update_Q, sequences=[actions, rewards], outputs_info=[Qs], non_sequences=[alpha]\n", " )\n", "\n", " # Apply the sotfmax transformation\n", " Qs = Qs[:-1] * beta\n", - " logp_actions = Qs - at.logsumexp(Qs, axis=1, keepdims=True)\n", + " logp_actions = Qs - pt.logsumexp(Qs, axis=1, keepdims=True)\n", "\n", " # Calculate the log likelihood of the observed actions\n", - " logp_actions = logp_actions[at.arange(actions.shape[0] - 1), actions[1:]]\n", - " return at.sum(logp_actions) # PyMC expects the standard log-likelihood" + " logp_actions = logp_actions[pt.arange(actions.shape[0] - 1), actions[1:]]\n", + " return pt.sum(logp_actions) # PyMC expects the standard log-likelihood" ] }, { @@ -799,21 +799,21 @@ "outputs": [], "source": [ "def right_action_probs(alpha, beta, actions, rewards):\n", - " rewards = at.as_tensor_variable(rewards, dtype=\"int32\")\n", - " actions = at.as_tensor_variable(actions, dtype=\"int32\")\n", + " rewards = pt.as_tensor_variable(rewards, dtype=\"int32\")\n", + " actions = pt.as_tensor_variable(actions, dtype=\"int32\")\n", "\n", " # Compute the Qs values\n", - " Qs = 0.5 * at.ones((2,), dtype=\"float64\")\n", + " Qs = 0.5 * pt.ones((2,), dtype=\"float64\")\n", " Qs, updates = pytensor.scan(\n", " fn=update_Q, sequences=[actions, rewards], outputs_info=[Qs], non_sequences=[alpha]\n", " )\n", "\n", " # Apply the sotfmax transformation\n", " Qs = Qs[:-1] * beta\n", - " logp_actions = Qs - at.logsumexp(Qs, axis=1, keepdims=True)\n", + " logp_actions = Qs - pt.logsumexp(Qs, axis=1, keepdims=True)\n", "\n", " # Return the probabilities for the right action, in the original scale\n", - " return at.exp(logp_actions[:, 1])" + " return pt.exp(logp_actions[:, 1])" ] }, { diff --git a/examples/case_studies/reinforcement_learning.myst.md b/examples/case_studies/reinforcement_learning.myst.md index 0cfd6fae7..1c9afba63 100644 --- a/examples/case_studies/reinforcement_learning.myst.md +++ b/examples/case_studies/reinforcement_learning.myst.md @@ -53,7 +53,7 @@ import matplotlib.pyplot as plt import numpy as np import pymc as pm import pytensor -import pytensor.tensor as at +import pytensor.tensor as pt import scipy from matplotlib.lines import Line2D @@ -328,10 +328,10 @@ def update_Q(action, reward, Qs, alpha): This function updates the Q table according to the RL update rule. It will be called by pytensor.scan to do so recursevely, given the observed data and the alpha parameter This could have been replaced be the following lamba expression in the pytensor.scan fn argument: - fn=lamba action, reward, Qs, alpha: at.set_subtensor(Qs[action], Qs[action] + alpha * (reward - Qs[action])) + fn=lamba action, reward, Qs, alpha: pt.set_subtensor(Qs[action], Qs[action] + alpha * (reward - Qs[action])) """ - Qs = at.set_subtensor(Qs[action], Qs[action] + alpha * (reward - Qs[action])) + Qs = pt.set_subtensor(Qs[action], Qs[action] + alpha * (reward - Qs[action])) return Qs ``` @@ -339,14 +339,14 @@ def update_Q(action, reward, Qs, alpha): :id: dHzhTy20g4vh # Transform the variables into appropriate PyTensor objects -rewards_ = at.as_tensor_variable(rewards, dtype="int32") -actions_ = at.as_tensor_variable(actions, dtype="int32") +rewards_ = pt.as_tensor_variable(rewards, dtype="int32") +actions_ = pt.as_tensor_variable(actions, dtype="int32") -alpha = at.scalar("alpha") -beta = at.scalar("beta") +alpha = pt.scalar("alpha") +beta = pt.scalar("beta") # Initialize the Q table -Qs = 0.5 * at.ones((2,), dtype="float64") +Qs = 0.5 * pt.ones((2,), dtype="float64") # Compute the Q values for each trial Qs, _ = pytensor.scan( @@ -355,11 +355,11 @@ Qs, _ = pytensor.scan( # Apply the softmax transformation Qs = Qs * beta -logp_actions = Qs - at.logsumexp(Qs, axis=1, keepdims=True) +logp_actions = Qs - pt.logsumexp(Qs, axis=1, keepdims=True) # Calculate the negative log likelihod of the observed actions -logp_actions = logp_actions[at.arange(actions_.shape[0] - 1), actions_[1:]] -neg_loglike = -at.sum(logp_actions) +logp_actions = logp_actions[pt.arange(actions_.shape[0] - 1), actions_[1:]] +neg_loglike = -pt.sum(logp_actions) ``` +++ {"id": "C9Ayn6-kzhPN"} @@ -389,22 +389,22 @@ The same result is obtained, so we can be confident that the PyTensor loop is wo :id: c70L4ZBT7QLr def pytensor_llik_td(alpha, beta, actions, rewards): - rewards = at.as_tensor_variable(rewards, dtype="int32") - actions = at.as_tensor_variable(actions, dtype="int32") + rewards = pt.as_tensor_variable(rewards, dtype="int32") + actions = pt.as_tensor_variable(actions, dtype="int32") # Compute the Qs values - Qs = 0.5 * at.ones((2,), dtype="float64") + Qs = 0.5 * pt.ones((2,), dtype="float64") Qs, updates = pytensor.scan( fn=update_Q, sequences=[actions, rewards], outputs_info=[Qs], non_sequences=[alpha] ) # Apply the sotfmax transformation Qs = Qs[:-1] * beta - logp_actions = Qs - at.logsumexp(Qs, axis=1, keepdims=True) + logp_actions = Qs - pt.logsumexp(Qs, axis=1, keepdims=True) # Calculate the log likelihood of the observed actions - logp_actions = logp_actions[at.arange(actions.shape[0] - 1), actions[1:]] - return at.sum(logp_actions) # PyMC expects the standard log-likelihood + logp_actions = logp_actions[pt.arange(actions.shape[0] - 1), actions[1:]] + return pt.sum(logp_actions) # PyMC expects the standard log-likelihood ``` ```{code-cell} ipython3 @@ -464,21 +464,21 @@ One reason why it's useful to use the Bernoulli likelihood is that one can then :id: pQdszDk_qYCX def right_action_probs(alpha, beta, actions, rewards): - rewards = at.as_tensor_variable(rewards, dtype="int32") - actions = at.as_tensor_variable(actions, dtype="int32") + rewards = pt.as_tensor_variable(rewards, dtype="int32") + actions = pt.as_tensor_variable(actions, dtype="int32") # Compute the Qs values - Qs = 0.5 * at.ones((2,), dtype="float64") + Qs = 0.5 * pt.ones((2,), dtype="float64") Qs, updates = pytensor.scan( fn=update_Q, sequences=[actions, rewards], outputs_info=[Qs], non_sequences=[alpha] ) # Apply the sotfmax transformation Qs = Qs[:-1] * beta - logp_actions = Qs - at.logsumexp(Qs, axis=1, keepdims=True) + logp_actions = Qs - pt.logsumexp(Qs, axis=1, keepdims=True) # Return the probabilities for the right action, in the original scale - return at.exp(logp_actions[:, 1]) + return pt.exp(logp_actions[:, 1]) ``` ```{code-cell} ipython3 diff --git a/examples/case_studies/rugby_analytics.ipynb b/examples/case_studies/rugby_analytics.ipynb index 7a3c05e4d..daeac5205 100644 --- a/examples/case_studies/rugby_analytics.ipynb +++ b/examples/case_studies/rugby_analytics.ipynb @@ -67,7 +67,7 @@ "import numpy as np\n", "import pandas as pd\n", "import pymc as pm\n", - "import pytensor.tensor as at\n", + "import pytensor.tensor as pt\n", "import seaborn as sns\n", "\n", "from matplotlib.ticker import StrMethodFormatter\n", @@ -796,10 +796,10 @@ " atts_star = pm.Normal(\"atts_star\", mu=0, sigma=sd_att, dims=\"team\")\n", " defs_star = pm.Normal(\"defs_star\", mu=0, sigma=sd_def, dims=\"team\")\n", "\n", - " atts = pm.Deterministic(\"atts\", atts_star - at.mean(atts_star), dims=\"team\")\n", - " defs = pm.Deterministic(\"defs\", defs_star - at.mean(defs_star), dims=\"team\")\n", - " home_theta = at.exp(intercept + home + atts[home_idx] + defs[away_idx])\n", - " away_theta = at.exp(intercept + atts[away_idx] + defs[home_idx])\n", + " atts = pm.Deterministic(\"atts\", atts_star - pt.mean(atts_star), dims=\"team\")\n", + " defs = pm.Deterministic(\"defs\", defs_star - pt.mean(defs_star), dims=\"team\")\n", + " home_theta = pt.exp(intercept + home + atts[home_idx] + defs[away_idx])\n", + " away_theta = pt.exp(intercept + atts[away_idx] + defs[home_idx])\n", "\n", " # likelihood of observed data\n", " home_points = pm.Poisson(\n", diff --git a/examples/case_studies/rugby_analytics.myst.md b/examples/case_studies/rugby_analytics.myst.md index 6a30835aa..03454b1df 100644 --- a/examples/case_studies/rugby_analytics.myst.md +++ b/examples/case_studies/rugby_analytics.myst.md @@ -52,7 +52,7 @@ import matplotlib.pyplot as plt import numpy as np import pandas as pd import pymc as pm -import pytensor.tensor as at +import pytensor.tensor as pt import seaborn as sns from matplotlib.ticker import StrMethodFormatter @@ -252,10 +252,10 @@ with pm.Model(coords=coords) as model: atts_star = pm.Normal("atts_star", mu=0, sigma=sd_att, dims="team") defs_star = pm.Normal("defs_star", mu=0, sigma=sd_def, dims="team") - atts = pm.Deterministic("atts", atts_star - at.mean(atts_star), dims="team") - defs = pm.Deterministic("defs", defs_star - at.mean(defs_star), dims="team") - home_theta = at.exp(intercept + home + atts[home_idx] + defs[away_idx]) - away_theta = at.exp(intercept + atts[away_idx] + defs[home_idx]) + atts = pm.Deterministic("atts", atts_star - pt.mean(atts_star), dims="team") + defs = pm.Deterministic("defs", defs_star - pt.mean(defs_star), dims="team") + home_theta = pt.exp(intercept + home + atts[home_idx] + defs[away_idx]) + away_theta = pt.exp(intercept + atts[away_idx] + defs[home_idx]) # likelihood of observed data home_points = pm.Poisson( diff --git a/examples/case_studies/wrapping_jax_function.ipynb b/examples/case_studies/wrapping_jax_function.ipynb index 52e5c4b3e..14c8cfc4f 100644 --- a/examples/case_studies/wrapping_jax_function.ipynb +++ b/examples/case_studies/wrapping_jax_function.ipynb @@ -25,7 +25,7 @@ "import numpy as np\n", "import pymc as pm\n", "import pytensor\n", - "import pytensor.tensor as at\n", + "import pytensor.tensor as pt\n", "\n", "from pytensor.graph import Apply, Op" ] @@ -601,14 +601,14 @@ " ):\n", " # Convert our inputs to symbolic variables\n", " inputs = [\n", - " at.as_tensor_variable(emission_observed),\n", - " at.as_tensor_variable(emission_signal),\n", - " at.as_tensor_variable(emission_noise),\n", - " at.as_tensor_variable(logp_initial_state),\n", - " at.as_tensor_variable(logp_transition),\n", + " pt.as_tensor_variable(emission_observed),\n", + " pt.as_tensor_variable(emission_signal),\n", + " pt.as_tensor_variable(emission_noise),\n", + " pt.as_tensor_variable(logp_initial_state),\n", + " pt.as_tensor_variable(logp_transition),\n", " ]\n", " # Define the type of the output returned by the wrapped JAX function\n", - " outputs = [at.dscalar()]\n", + " outputs = [pt.dscalar()]\n", " return Apply(self, inputs, outputs)\n", "\n", " def perform(self, node, inputs, outputs):\n", @@ -652,11 +652,11 @@ " logp_transition,\n", " ):\n", " inputs = [\n", - " at.as_tensor_variable(emission_observed),\n", - " at.as_tensor_variable(emission_signal),\n", - " at.as_tensor_variable(emission_noise),\n", - " at.as_tensor_variable(logp_initial_state),\n", - " at.as_tensor_variable(logp_transition),\n", + " pt.as_tensor_variable(emission_observed),\n", + " pt.as_tensor_variable(emission_signal),\n", + " pt.as_tensor_variable(emission_noise),\n", + " pt.as_tensor_variable(logp_initial_state),\n", + " pt.as_tensor_variable(logp_transition),\n", " ]\n", " # This `Op` will return one gradient per input. For simplicity, we assume\n", " # each output is of the same type as the input. In practice, you should use\n", @@ -774,7 +774,7 @@ "source": [ "# We define the symbolic `emission_signal` variable outside of the `Op`\n", "# so that we can request the gradient wrt to it\n", - "emission_signal_variable = at.as_tensor_variable(emission_signal_true)\n", + "emission_signal_variable = pt.as_tensor_variable(emission_signal_true)\n", "x = hmm_logp_op(\n", " emission_observed,\n", " emission_signal_variable,\n", @@ -782,7 +782,7 @@ " logp_initial_state_true,\n", " logp_transition_true,\n", ")\n", - "x_grad_wrt_emission_signal = at.grad(x, wrt=emission_signal_variable)\n", + "x_grad_wrt_emission_signal = pt.grad(x, wrt=emission_signal_variable)\n", "x_grad_wrt_emission_signal.eval()" ] }, @@ -811,10 +811,10 @@ " emission_noise = pm.HalfNormal(\"emission_noise\", 1)\n", "\n", " p_initial_state = pm.Dirichlet(\"p_initial_state\", np.ones(3))\n", - " logp_initial_state = at.log(p_initial_state)\n", + " logp_initial_state = pt.log(p_initial_state)\n", "\n", " p_transition = pm.Dirichlet(\"p_transition\", np.ones(3), size=3)\n", - " logp_transition = at.log(p_transition)\n", + " logp_transition = pt.log(p_transition)\n", "\n", " loglike = pm.Potential(\n", " \"hmm_loglike\",\n", @@ -1489,9 +1489,9 @@ " default_output = 0\n", "\n", " def make_node(self, *inputs):\n", - " inputs = [at.as_tensor_variable(inp) for inp in inputs]\n", + " inputs = [pt.as_tensor_variable(inp) for inp in inputs]\n", " # We now have one output for the function value, and one output for each gradient\n", - " outputs = [at.dscalar()] + [inp.type() for inp in inputs]\n", + " outputs = [pt.dscalar()] + [inp.type() for inp in inputs]\n", " return Apply(self, inputs, outputs)\n", "\n", " def perform(self, node, inputs, outputs):\n", @@ -1544,7 +1544,7 @@ } ], "source": [ - "emission_signal_variable = at.as_tensor_variable(emission_signal_true)\n", + "emission_signal_variable = pt.as_tensor_variable(emission_signal_true)\n", "# Only the first output is assigned to the variable `x`, due to `default_output=0`\n", "x = hmm_logp_value_grad_op(\n", " emission_observed,\n", @@ -1553,7 +1553,7 @@ " logp_initial_state_true,\n", " logp_transition_true,\n", ")\n", - "at.grad(x, emission_signal_variable).eval()" + "pt.grad(x, emission_signal_variable).eval()" ] }, { diff --git a/examples/case_studies/wrapping_jax_function.myst.md b/examples/case_studies/wrapping_jax_function.myst.md index 8b9147ded..45c6a9ed3 100644 --- a/examples/case_studies/wrapping_jax_function.myst.md +++ b/examples/case_studies/wrapping_jax_function.myst.md @@ -27,7 +27,7 @@ import matplotlib.pyplot as plt import numpy as np import pymc as pm import pytensor -import pytensor.tensor as at +import pytensor.tensor as pt from pytensor.graph import Apply, Op ``` @@ -347,14 +347,14 @@ class HMMLogpOp(Op): ): # Convert our inputs to symbolic variables inputs = [ - at.as_tensor_variable(emission_observed), - at.as_tensor_variable(emission_signal), - at.as_tensor_variable(emission_noise), - at.as_tensor_variable(logp_initial_state), - at.as_tensor_variable(logp_transition), + pt.as_tensor_variable(emission_observed), + pt.as_tensor_variable(emission_signal), + pt.as_tensor_variable(emission_noise), + pt.as_tensor_variable(logp_initial_state), + pt.as_tensor_variable(logp_transition), ] # Define the type of the output returned by the wrapped JAX function - outputs = [at.dscalar()] + outputs = [pt.dscalar()] return Apply(self, inputs, outputs) def perform(self, node, inputs, outputs): @@ -398,11 +398,11 @@ class HMMLogpGradOp(Op): logp_transition, ): inputs = [ - at.as_tensor_variable(emission_observed), - at.as_tensor_variable(emission_signal), - at.as_tensor_variable(emission_noise), - at.as_tensor_variable(logp_initial_state), - at.as_tensor_variable(logp_transition), + pt.as_tensor_variable(emission_observed), + pt.as_tensor_variable(emission_signal), + pt.as_tensor_variable(emission_noise), + pt.as_tensor_variable(logp_initial_state), + pt.as_tensor_variable(logp_transition), ] # This `Op` will return one gradient per input. For simplicity, we assume # each output is of the same type as the input. In practice, you should use @@ -460,7 +460,7 @@ It's also useful to check the gradient of our {class}`~pytensor.graph.op.Op` can ```{code-cell} ipython3 # We define the symbolic `emission_signal` variable outside of the `Op` # so that we can request the gradient wrt to it -emission_signal_variable = at.as_tensor_variable(emission_signal_true) +emission_signal_variable = pt.as_tensor_variable(emission_signal_true) x = hmm_logp_op( emission_observed, emission_signal_variable, @@ -468,7 +468,7 @@ x = hmm_logp_op( logp_initial_state_true, logp_transition_true, ) -x_grad_wrt_emission_signal = at.grad(x, wrt=emission_signal_variable) +x_grad_wrt_emission_signal = pt.grad(x, wrt=emission_signal_variable) x_grad_wrt_emission_signal.eval() ``` @@ -484,10 +484,10 @@ with pm.Model(rng_seeder=int(rng.integers(2**30))) as model: emission_noise = pm.HalfNormal("emission_noise", 1) p_initial_state = pm.Dirichlet("p_initial_state", np.ones(3)) - logp_initial_state = at.log(p_initial_state) + logp_initial_state = pt.log(p_initial_state) p_transition = pm.Dirichlet("p_transition", np.ones(3), size=3) - logp_transition = at.log(p_transition) + logp_transition = pt.log(p_transition) loglike = pm.Potential( "hmm_loglike", @@ -701,9 +701,9 @@ class HmmLogpValueGradOp(Op): default_output = 0 def make_node(self, *inputs): - inputs = [at.as_tensor_variable(inp) for inp in inputs] + inputs = [pt.as_tensor_variable(inp) for inp in inputs] # We now have one output for the function value, and one output for each gradient - outputs = [at.dscalar()] + [inp.type() for inp in inputs] + outputs = [pt.dscalar()] + [inp.type() for inp in inputs] return Apply(self, inputs, outputs) def perform(self, node, inputs, outputs): @@ -735,7 +735,7 @@ hmm_logp_value_grad_op = HmmLogpValueGradOp() We check again that we can take the gradient using PyTensor `grad` interface ```{code-cell} ipython3 -emission_signal_variable = at.as_tensor_variable(emission_signal_true) +emission_signal_variable = pt.as_tensor_variable(emission_signal_true) # Only the first output is assigned to the variable `x`, due to `default_output=0` x = hmm_logp_value_grad_op( emission_observed, @@ -744,7 +744,7 @@ x = hmm_logp_value_grad_op( logp_initial_state_true, logp_transition_true, ) -at.grad(x, emission_signal_variable).eval() +pt.grad(x, emission_signal_variable).eval() ``` ## Authors diff --git a/examples/causal_inference/excess_deaths.ipynb b/examples/causal_inference/excess_deaths.ipynb index 787cb791c..9cca2edc2 100644 --- a/examples/causal_inference/excess_deaths.ipynb +++ b/examples/causal_inference/excess_deaths.ipynb @@ -88,7 +88,7 @@ "import numpy as np\n", "import pandas as pd\n", "import pymc as pm\n", - "import pytensor.tensor as at\n", + "import pytensor.tensor as pt\n", "import seaborn as sns\n", "import xarray as xr" ] @@ -140,9 +140,9 @@ " def extend_axis(value, axis):\n", " n_out = value.shape[axis] + 1\n", " sum_vals = value.sum(axis, keepdims=True)\n", - " norm = sum_vals / (at.sqrt(n_out) + n_out)\n", - " fill_val = norm - sum_vals / at.sqrt(n_out)\n", - " out = at.concatenate([value, fill_val], axis=axis)\n", + " norm = sum_vals / (pt.sqrt(n_out) + n_out)\n", + " fill_val = norm - sum_vals / pt.sqrt(n_out)\n", + " out = pt.concatenate([value, fill_val], axis=axis)\n", " return out - norm\n", "\n", " dims_reduced = []\n", diff --git a/examples/causal_inference/excess_deaths.myst.md b/examples/causal_inference/excess_deaths.myst.md index c9e60f012..fdfab03f1 100644 --- a/examples/causal_inference/excess_deaths.myst.md +++ b/examples/causal_inference/excess_deaths.myst.md @@ -72,7 +72,7 @@ import matplotlib.pyplot as plt import numpy as np import pandas as pd import pymc as pm -import pytensor.tensor as at +import pytensor.tensor as pt import seaborn as sns import xarray as xr ``` @@ -104,9 +104,9 @@ def ZeroSumNormal(name, *, sigma=None, active_dims=None, dims, model=None): def extend_axis(value, axis): n_out = value.shape[axis] + 1 sum_vals = value.sum(axis, keepdims=True) - norm = sum_vals / (at.sqrt(n_out) + n_out) - fill_val = norm - sum_vals / at.sqrt(n_out) - out = at.concatenate([value, fill_val], axis=axis) + norm = sum_vals / (pt.sqrt(n_out) + n_out) + fill_val = norm - sum_vals / pt.sqrt(n_out) + out = pt.concatenate([value, fill_val], axis=axis) return out - norm dims_reduced = [] diff --git a/examples/conf.py b/examples/conf.py index b76491ac2..162623e33 100644 --- a/examples/conf.py +++ b/examples/conf.py @@ -226,7 +226,6 @@ def setup(app: Sphinx): # intersphinx mappings intersphinx_mapping = { - "aesara": ("https://aesara.readthedocs.io/en/latest/", None), "arviz": ("https://python.arviz.org/en/latest/", None), "bambi": ("https://bambinos.github.io/bambi", None), "einstats": ("https://einstats.python.arviz.org/en/latest/", None), @@ -234,6 +233,7 @@ def setup(app: Sphinx): "numpy": ("https://numpy.org/doc/stable/", None), "pandas": ("https://pandas.pydata.org/pandas-docs/stable/", None), "pymc": ("https://www.pymc.io/projects/docs/en/stable/", None), + "pytensor": ("https://pytensor.readthedocs.io/en/latest/", None), "pmx": ("https://www.pymc.io/projects/experimental/en/latest/", None), "scipy": ("https://docs.scipy.org/doc/scipy/reference/", None), "xarray": ("https://docs.xarray.dev/en/stable/", None), diff --git a/examples/gaussian_processes/GP-MeansAndCovs.ipynb b/examples/gaussian_processes/GP-MeansAndCovs.ipynb index a8ef7e51f..06faa0628 100644 --- a/examples/gaussian_processes/GP-MeansAndCovs.ipynb +++ b/examples/gaussian_processes/GP-MeansAndCovs.ipynb @@ -48,7 +48,7 @@ "import numpy as np\n", "import pymc as pm\n", "import pytensor\n", - "import pytensor.tensor as at\n", + "import pytensor.tensor as pt\n", "import scipy.stats as stats" ] }, @@ -230,7 +230,7 @@ } ], "source": [ - "const_func_vec = pm.gp.mean.Constant(at.ones(5))\n", + "const_func_vec = pm.gp.mean.Constant(pt.ones(5))\n", "\n", "print(const_func_vec(X).eval())" ] @@ -1375,7 +1375,7 @@ ], "source": [ "def warp_func(x, a, b, c):\n", - " return 1.0 + x + (a * at.tanh(b * (x - c)))\n", + " return 1.0 + x + (a * pt.tanh(b * (x - c)))\n", "\n", "\n", "a = 1.0\n", @@ -1466,7 +1466,7 @@ "source": [ "def mapping(x, T):\n", " c = 2.0 * np.pi * (1.0 / T)\n", - " u = at.concatenate((at.sin(c * x), at.cos(c * x)), 1)\n", + " u = pt.concatenate((pt.sin(c * x), pt.cos(c * x)), 1)\n", " return u\n", "\n", "\n", @@ -1750,7 +1750,7 @@ " w: transition width\n", " x0: transition location.\n", " \"\"\"\n", - " return (ls1 + ls2) / 2.0 - (ls1 - ls2) / 2.0 * at.tanh((x - x0) / w)\n", + " return (ls1 + ls2) / 2.0 - (ls1 - ls2) / 2.0 * pt.tanh((x - x0) / w)\n", "\n", "\n", "ls1 = 0.05\n", diff --git a/examples/gaussian_processes/GP-MeansAndCovs.myst.md b/examples/gaussian_processes/GP-MeansAndCovs.myst.md index c37af6606..ac0e8ffb6 100644 --- a/examples/gaussian_processes/GP-MeansAndCovs.myst.md +++ b/examples/gaussian_processes/GP-MeansAndCovs.myst.md @@ -39,7 +39,7 @@ import matplotlib.pyplot as plt import numpy as np import pymc as pm import pytensor -import pytensor.tensor as at +import pytensor.tensor as pt import scipy.stats as stats ``` @@ -129,7 +129,7 @@ papermill: status: completed tags: [] --- -const_func_vec = pm.gp.mean.Constant(at.ones(5)) +const_func_vec = pm.gp.mean.Constant(pt.ones(5)) print(const_func_vec(X).eval()) ``` @@ -705,7 +705,7 @@ papermill: tags: [] --- def warp_func(x, a, b, c): - return 1.0 + x + (a * at.tanh(b * (x - c))) + return 1.0 + x + (a * pt.tanh(b * (x - c))) a = 1.0 @@ -764,7 +764,7 @@ tags: [] --- def mapping(x, T): c = 2.0 * np.pi * (1.0 / T) - u = at.concatenate((at.sin(c * x), at.cos(c * x)), 1) + u = pt.concatenate((pt.sin(c * x), pt.cos(c * x)), 1) return u @@ -928,7 +928,7 @@ def tanh_func(x, ls1, ls2, w, x0): w: transition width x0: transition location. """ - return (ls1 + ls2) / 2.0 - (ls1 - ls2) / 2.0 * at.tanh((x - x0) / w) + return (ls1 + ls2) / 2.0 - (ls1 - ls2) / 2.0 * pt.tanh((x - x0) / w) ls1 = 0.05 diff --git a/examples/gaussian_processes/MOGP-Coregion-Hadamard.ipynb b/examples/gaussian_processes/MOGP-Coregion-Hadamard.ipynb index ae2a399e4..102384dbb 100644 --- a/examples/gaussian_processes/MOGP-Coregion-Hadamard.ipynb +++ b/examples/gaussian_processes/MOGP-Coregion-Hadamard.ipynb @@ -37,7 +37,7 @@ "import numpy as np\n", "import pandas as pd\n", "import pymc as pm\n", - "import pytensor.tensor as at\n", + "import pytensor.tensor as pt\n", "\n", "from pymc.gp.util import plot_gp_dist" ] @@ -798,7 +798,7 @@ " # Get the ICM kernel\n", " W = pm.Normal(\"W\", mu=0, sigma=3, shape=(n_outputs, 2), initval=np.random.randn(n_outputs, 2))\n", " kappa = pm.Gamma(\"kappa\", alpha=1.5, beta=1, shape=n_outputs)\n", - " B = pm.Deterministic(\"B\", at.dot(W, W.T) + at.diag(kappa))\n", + " B = pm.Deterministic(\"B\", pt.dot(W, W.T) + pt.diag(kappa))\n", " cov_icm = get_icm(input_dim=2, kernel=kernel, B=B, active_dims=[1])\n", "\n", " # Define a Multi-output GP\n", diff --git a/examples/gaussian_processes/MOGP-Coregion-Hadamard.myst.md b/examples/gaussian_processes/MOGP-Coregion-Hadamard.myst.md index 30c2e0ebc..c2c2e2bfd 100644 --- a/examples/gaussian_processes/MOGP-Coregion-Hadamard.myst.md +++ b/examples/gaussian_processes/MOGP-Coregion-Hadamard.myst.md @@ -31,7 +31,7 @@ import matplotlib.pyplot as plt import numpy as np import pandas as pd import pymc as pm -import pytensor.tensor as at +import pytensor.tensor as pt from pymc.gp.util import plot_gp_dist ``` @@ -171,7 +171,7 @@ with pm.Model() as model: # Get the ICM kernel W = pm.Normal("W", mu=0, sigma=3, shape=(n_outputs, 2), initval=np.random.randn(n_outputs, 2)) kappa = pm.Gamma("kappa", alpha=1.5, beta=1, shape=n_outputs) - B = pm.Deterministic("B", at.dot(W, W.T) + at.diag(kappa)) + B = pm.Deterministic("B", pt.dot(W, W.T) + pt.diag(kappa)) cov_icm = get_icm(input_dim=2, kernel=kernel, B=B, active_dims=[1]) # Define a Multi-output GP diff --git a/examples/gaussian_processes/gaussian_process.ipynb b/examples/gaussian_processes/gaussian_process.ipynb index 7ad01a480..a6f7d40fd 100644 --- a/examples/gaussian_processes/gaussian_process.ipynb +++ b/examples/gaussian_processes/gaussian_process.ipynb @@ -41,7 +41,7 @@ "import matplotlib.pyplot as plt\n", "import numpy as np\n", "import pymc as pm\n", - "import pytensor.tensor as at\n", + "import pytensor.tensor as pt\n", "import seaborn as sns\n", "\n", "from xarray_einstats.stats import multivariate_normal\n", @@ -140,7 +140,7 @@ " D = squared_distance(x, x)\n", "\n", " # Squared exponential\n", - " sigma = at.fill_diagonal(eta_sq * at.exp(-rho_sq * D), eta_sq + sigma_sq)\n", + " sigma = pt.fill_diagonal(eta_sq * pt.exp(-rho_sq * D), eta_sq + sigma_sq)\n", "\n", " obs = pm.MvNormal(\"obs\", mu, sigma, observed=y)" ] @@ -196,18 +196,18 @@ "\n", "with gp_fit as gp:\n", " # Covariance matrices for prediction\n", - " sigma_pred = eta_sq * at.exp(-rho_sq * D_pred)\n", - " sigma_off_diag = eta_sq * at.exp(-rho_sq * D_off_diag)\n", + " sigma_pred = eta_sq * pt.exp(-rho_sq * D_pred)\n", + " sigma_off_diag = eta_sq * pt.exp(-rho_sq * D_off_diag)\n", "\n", " # Posterior mean\n", " mu_post = pm.Deterministic(\n", - " \"mu_post\", at.dot(at.dot(sigma_off_diag, pm.math.matrix_inverse(sigma)), y), dims=\"pred_id\"\n", + " \"mu_post\", pt.dot(pt.dot(sigma_off_diag, pm.math.matrix_inverse(sigma)), y), dims=\"pred_id\"\n", " )\n", " # Posterior covariance\n", " sigma_post = pm.Deterministic(\n", " \"sigma_post\",\n", " sigma_pred\n", - " - at.dot(at.dot(sigma_off_diag, pm.math.matrix_inverse(sigma)), sigma_off_diag.T),\n", + " - pt.dot(pt.dot(sigma_off_diag, pm.math.matrix_inverse(sigma)), sigma_off_diag.T),\n", " dims=(\"pred_id\", \"pred_id2\"),\n", " )" ] diff --git a/examples/gaussian_processes/gaussian_process.myst.md b/examples/gaussian_processes/gaussian_process.myst.md index 9f6e33913..51d3719f7 100644 --- a/examples/gaussian_processes/gaussian_process.myst.md +++ b/examples/gaussian_processes/gaussian_process.myst.md @@ -30,7 +30,7 @@ import arviz as az import matplotlib.pyplot as plt import numpy as np import pymc as pm -import pytensor.tensor as at +import pytensor.tensor as pt import seaborn as sns from xarray_einstats.stats import multivariate_normal @@ -104,7 +104,7 @@ with pm.Model() as gp_fit: D = squared_distance(x, x) # Squared exponential - sigma = at.fill_diagonal(eta_sq * at.exp(-rho_sq * D), eta_sq + sigma_sq) + sigma = pt.fill_diagonal(eta_sq * pt.exp(-rho_sq * D), eta_sq + sigma_sq) obs = pm.MvNormal("obs", mu, sigma, observed=y) ``` @@ -127,18 +127,18 @@ gp_fit.add_coords({"pred_id": xgrid, "pred_id2": xgrid}) with gp_fit as gp: # Covariance matrices for prediction - sigma_pred = eta_sq * at.exp(-rho_sq * D_pred) - sigma_off_diag = eta_sq * at.exp(-rho_sq * D_off_diag) + sigma_pred = eta_sq * pt.exp(-rho_sq * D_pred) + sigma_off_diag = eta_sq * pt.exp(-rho_sq * D_off_diag) # Posterior mean mu_post = pm.Deterministic( - "mu_post", at.dot(at.dot(sigma_off_diag, pm.math.matrix_inverse(sigma)), y), dims="pred_id" + "mu_post", pt.dot(pt.dot(sigma_off_diag, pm.math.matrix_inverse(sigma)), y), dims="pred_id" ) # Posterior covariance sigma_post = pm.Deterministic( "sigma_post", sigma_pred - - at.dot(at.dot(sigma_off_diag, pm.math.matrix_inverse(sigma)), sigma_off_diag.T), + - pt.dot(pt.dot(sigma_off_diag, pm.math.matrix_inverse(sigma)), sigma_off_diag.T), dims=("pred_id", "pred_id2"), ) ``` diff --git a/examples/generalized_linear_models/GLM-hierarchical-binomial-model.ipynb b/examples/generalized_linear_models/GLM-hierarchical-binomial-model.ipynb index 1c6c5a2e7..5c6a264e3 100644 --- a/examples/generalized_linear_models/GLM-hierarchical-binomial-model.ipynb +++ b/examples/generalized_linear_models/GLM-hierarchical-binomial-model.ipynb @@ -23,7 +23,7 @@ "import numpy as np\n", "import pandas as pd\n", "import pymc as pm\n", - "import pytensor.tensor as at\n", + "import pytensor.tensor as pt\n", "\n", "from scipy.special import gammaln" ] @@ -364,7 +364,7 @@ "source": [ "def logp_ab(value):\n", " \"\"\"prior density\"\"\"\n", - " return at.log(at.pow(at.sum(value), -5 / 2))\n", + " return pt.log(pt.pow(pt.sum(value), -5 / 2))\n", "\n", "\n", "with pm.Model(coords=coords) as model:\n", @@ -373,8 +373,8 @@ " ab = pm.HalfNormal(\"ab\", sigma=10, dims=\"param\")\n", " pm.Potential(\"p(a, b)\", logp_ab(ab))\n", "\n", - " X = pm.Deterministic(\"X\", at.log(ab[0] / ab[1]))\n", - " Z = pm.Deterministic(\"Z\", at.log(at.sum(ab)))\n", + " X = pm.Deterministic(\"X\", pt.log(ab[0] / ab[1]))\n", + " Z = pm.Deterministic(\"Z\", pt.log(pt.sum(ab)))\n", "\n", " theta = pm.Beta(\"theta\", alpha=ab[0], beta=ab[1], dims=\"obs_id\")\n", "\n", diff --git a/examples/generalized_linear_models/GLM-hierarchical-binomial-model.myst.md b/examples/generalized_linear_models/GLM-hierarchical-binomial-model.myst.md index 03e2288c2..9928e2012 100644 --- a/examples/generalized_linear_models/GLM-hierarchical-binomial-model.myst.md +++ b/examples/generalized_linear_models/GLM-hierarchical-binomial-model.myst.md @@ -23,7 +23,7 @@ import matplotlib.pyplot as plt import numpy as np import pandas as pd import pymc as pm -import pytensor.tensor as at +import pytensor.tensor as pt from scipy.special import gammaln ``` @@ -209,7 +209,7 @@ coords = { ```{code-cell} ipython3 def logp_ab(value): """prior density""" - return at.log(at.pow(at.sum(value), -5 / 2)) + return pt.log(pt.pow(pt.sum(value), -5 / 2)) with pm.Model(coords=coords) as model: @@ -218,8 +218,8 @@ with pm.Model(coords=coords) as model: ab = pm.HalfNormal("ab", sigma=10, dims="param") pm.Potential("p(a, b)", logp_ab(ab)) - X = pm.Deterministic("X", at.log(ab[0] / ab[1])) - Z = pm.Deterministic("Z", at.log(at.sum(ab))) + X = pm.Deterministic("X", pt.log(ab[0] / ab[1])) + Z = pm.Deterministic("Z", pt.log(pt.sum(ab))) theta = pm.Beta("theta", alpha=ab[0], beta=ab[1], dims="obs_id") diff --git a/examples/generalized_linear_models/GLM-robust.ipynb b/examples/generalized_linear_models/GLM-robust.ipynb index 509bb93b8..5107452d8 100644 --- a/examples/generalized_linear_models/GLM-robust.ipynb +++ b/examples/generalized_linear_models/GLM-robust.ipynb @@ -63,7 +63,7 @@ "import pandas as pd\n", "import pymc as pm\n", "import pytensor\n", - "import pytensor.tensor as at\n", + "import pytensor.tensor as pt\n", "import xarray as xr" ] }, diff --git a/examples/generalized_linear_models/GLM-robust.myst.md b/examples/generalized_linear_models/GLM-robust.myst.md index 397fc2d1a..0673621f7 100644 --- a/examples/generalized_linear_models/GLM-robust.myst.md +++ b/examples/generalized_linear_models/GLM-robust.myst.md @@ -53,7 +53,7 @@ import numpy as np import pandas as pd import pymc as pm import pytensor -import pytensor.tensor as at +import pytensor.tensor as pt import xarray as xr ``` diff --git a/examples/howto/api_quickstart.ipynb b/examples/howto/api_quickstart.ipynb index b8fe62b32..3ced3ebd3 100644 --- a/examples/howto/api_quickstart.ipynb +++ b/examples/howto/api_quickstart.ipynb @@ -33,7 +33,7 @@ "import matplotlib.pyplot as plt\n", "import numpy as np\n", "import pymc as pm\n", - "import pytensor.tensor as at" + "import pytensor.tensor as pt" ] }, { diff --git a/examples/howto/api_quickstart.myst.md b/examples/howto/api_quickstart.myst.md index 65069c64c..90928a116 100644 --- a/examples/howto/api_quickstart.myst.md +++ b/examples/howto/api_quickstart.myst.md @@ -25,7 +25,7 @@ import arviz as az import matplotlib.pyplot as plt import numpy as np import pymc as pm -import pytensor.tensor as at +import pytensor.tensor as pt ``` ```{code-cell} ipython3 diff --git a/examples/howto/custom_distribution.ipynb b/examples/howto/custom_distribution.ipynb index 855f93b21..59b332f86 100644 --- a/examples/howto/custom_distribution.ipynb +++ b/examples/howto/custom_distribution.ipynb @@ -179,7 +179,7 @@ "source": [ "## 3. Class Definition\n", "\n", - "Every PyMC3 distribution requires the following basic format. A few things to keep in mind:\n", + "Every PyMC3 distribution requires the following basic formpt. A few things to keep in mind:\n", "- Your class should have the parent class `pm.Discrete` if your distribution is discrete, or `pm.Continuous` if your distriution is continuous.\n", "- For continuous distributions you also have to define the default transform, or inherit from a more specific class like `PositiveContinuous` which specifies what the default transform should be.\n", "- You'll need specify at least one \"default value\" for the distribution during `init` such as `self.mode`, `self.median`, or `self.mean` (the latter only for continuous distributions). This is used by some samplers or other compound distributions." diff --git a/examples/howto/custom_distribution.myst.md b/examples/howto/custom_distribution.myst.md index 53c509346..24312f45b 100644 --- a/examples/howto/custom_distribution.myst.md +++ b/examples/howto/custom_distribution.myst.md @@ -146,7 +146,7 @@ def genpoisson_rvs(theta, lam, size=None): ## 3. Class Definition -Every PyMC3 distribution requires the following basic format. A few things to keep in mind: +Every PyMC3 distribution requires the following basic formpt. A few things to keep in mind: - Your class should have the parent class `pm.Discrete` if your distribution is discrete, or `pm.Continuous` if your distriution is continuous. - For continuous distributions you also have to define the default transform, or inherit from a more specific class like `PositiveContinuous` which specifies what the default transform should be. - You'll need specify at least one "default value" for the distribution during `init` such as `self.mode`, `self.median`, or `self.mean` (the latter only for continuous distributions). This is used by some samplers or other compound distributions. diff --git a/examples/howto/howto_debugging.ipynb b/examples/howto/howto_debugging.ipynb index 7fba7f357..8b78ba74f 100644 --- a/examples/howto/howto_debugging.ipynb +++ b/examples/howto/howto_debugging.ipynb @@ -63,7 +63,7 @@ "metadata": {}, "outputs": [], "source": [ - "import pytensor.tensor as at\n", + "import pytensor.tensor as pt\n", "\n", "from pytensor import function\n", "from pytensor.printing import Print" @@ -86,8 +86,8 @@ } ], "source": [ - "x = at.dvector(\"x\")\n", - "y = at.dvector(\"y\")\n", + "x = pt.dvector(\"x\")\n", + "y = pt.dvector(\"y\")\n", "func = function([x, y], 1 / (x - y))\n", "func([1, 2, 3], [1, 0, -1])" ] diff --git a/examples/howto/howto_debugging.myst.md b/examples/howto/howto_debugging.myst.md index 72c12391b..ab00419b9 100644 --- a/examples/howto/howto_debugging.myst.md +++ b/examples/howto/howto_debugging.myst.md @@ -45,15 +45,15 @@ RANDOM_SEED = 8927 Since `PyTensor` functions are compiled to C, you have to use `pytensor.printing.Print` class to print intermediate values (imported below as `Print`). Python `print` function will not work. Below is a simple example of using `Print`. For more information, see {ref}`Debugging PyTensor `. ```{code-cell} ipython3 -import pytensor.tensor as at +import pytensor.tensor as pt from pytensor import function from pytensor.printing import Print ``` ```{code-cell} ipython3 -x = at.dvector("x") -y = at.dvector("y") +x = pt.dvector("x") +y = pt.dvector("y") func = function([x, y], 1 / (x - y)) func([1, 2, 3], [1, 0, -1]) ``` diff --git a/examples/ode_models/ODE_API_shapes_and_benchmarking.ipynb b/examples/ode_models/ODE_API_shapes_and_benchmarking.ipynb index 6b465d060..e5376aeb8 100644 --- a/examples/ode_models/ODE_API_shapes_and_benchmarking.ipynb +++ b/examples/ode_models/ODE_API_shapes_and_benchmarking.ipynb @@ -150,8 +150,8 @@ " func=Chem.reaction, times=times, n_states=len(y0_true), n_theta=len(theta_true)\n", " )(y0=[s0, y0_true[1]], theta=[vmax, K_S], return_sens=False)\n", "\n", - " red_hat = y_hat.T[0][red]\n", - " blue_hat = y_hat.T[1][blue]\n", + " red_hat = y_hpt.T[0][red]\n", + " blue_hat = y_hpt.T[1][blue]\n", "\n", " Y_red = pm.Normal(\"Y_red\", mu=red_hat, sigma=sigma, observed=y_obs_1)\n", " Y_blue = pm.Normal(\"Y_blue\", mu=blue_hat, sigma=sigma, observed=y_obs_2)\n", diff --git a/examples/ode_models/ODE_API_shapes_and_benchmarking.myst.md b/examples/ode_models/ODE_API_shapes_and_benchmarking.myst.md index 57e02ca6d..9a841592c 100644 --- a/examples/ode_models/ODE_API_shapes_and_benchmarking.myst.md +++ b/examples/ode_models/ODE_API_shapes_and_benchmarking.myst.md @@ -109,8 +109,8 @@ def get_model(): func=Chem.reaction, times=times, n_states=len(y0_true), n_theta=len(theta_true) )(y0=[s0, y0_true[1]], theta=[vmax, K_S], return_sens=False) - red_hat = y_hat.T[0][red] - blue_hat = y_hat.T[1][blue] + red_hat = y_hpt.T[0][red] + blue_hat = y_hpt.T[1][blue] Y_red = pm.Normal("Y_red", mu=red_hat, sigma=sigma, observed=y_obs_1) Y_blue = pm.Normal("Y_blue", mu=blue_hat, sigma=sigma, observed=y_obs_2) diff --git a/examples/samplers/SMC2_gaussians.ipynb b/examples/samplers/SMC2_gaussians.ipynb index 9c9677a66..71fe0a67c 100644 --- a/examples/samplers/SMC2_gaussians.ipynb +++ b/examples/samplers/SMC2_gaussians.ipynb @@ -29,7 +29,7 @@ "import arviz as az\n", "import numpy as np\n", "import pymc as pm\n", - "import pytensor.tensor as at\n", + "import pytensor.tensor as pt\n", "\n", "print(f\"Running on PyMC v{pm.__version__}\")" ] @@ -133,16 +133,16 @@ "\n", "def two_gaussians(x):\n", " log_like1 = (\n", - " -0.5 * n * at.log(2 * np.pi)\n", - " - 0.5 * at.log(dsigma)\n", + " -0.5 * n * pt.log(2 * np.pi)\n", + " - 0.5 * pt.log(dsigma)\n", " - 0.5 * (x - mu1).T.dot(isigma).dot(x - mu1)\n", " )\n", " log_like2 = (\n", - " -0.5 * n * at.log(2 * np.pi)\n", - " - 0.5 * at.log(dsigma)\n", + " -0.5 * n * pt.log(2 * np.pi)\n", + " - 0.5 * pt.log(dsigma)\n", " - 0.5 * (x - mu2).T.dot(isigma).dot(x - mu2)\n", " )\n", - " return pm.math.logsumexp([at.log(w1) + log_like1, at.log(w2) + log_like2])" + " return pm.math.logsumexp([pt.log(w1) + log_like1, pt.log(w2) + log_like2])" ] }, { @@ -302,16 +302,16 @@ "\n", "def two_gaussians(x):\n", " log_like1 = (\n", - " -0.5 * n * at.log(2 * np.pi)\n", - " - 0.5 * at.log(dsigma)\n", + " -0.5 * n * pt.log(2 * np.pi)\n", + " - 0.5 * pt.log(dsigma)\n", " - 0.5 * (x - mu1).T.dot(isigma).dot(x - mu1)\n", " )\n", " log_like2 = (\n", - " -0.5 * n * at.log(2 * np.pi)\n", - " - 0.5 * at.log(dsigma)\n", + " -0.5 * n * pt.log(2 * np.pi)\n", + " - 0.5 * pt.log(dsigma)\n", " - 0.5 * (x - mu2).T.dot(isigma).dot(x - mu2)\n", " )\n", - " return pm.math.logsumexp([at.log(w1) + log_like1, at.log(w2) + log_like2])" + " return pm.math.logsumexp([pt.log(w1) + log_like1, pt.log(w2) + log_like2])" ] }, { diff --git a/examples/samplers/SMC2_gaussians.myst.md b/examples/samplers/SMC2_gaussians.myst.md index ed8be2d29..a756ba336 100644 --- a/examples/samplers/SMC2_gaussians.myst.md +++ b/examples/samplers/SMC2_gaussians.myst.md @@ -21,7 +21,7 @@ kernelspec: import arviz as az import numpy as np import pymc as pm -import pytensor.tensor as at +import pytensor.tensor as pt print(f"Running on PyMC v{pm.__version__}") ``` @@ -107,16 +107,16 @@ w2 = 1 - w1 # the other mode with 0.9 of the mass def two_gaussians(x): log_like1 = ( - -0.5 * n * at.log(2 * np.pi) - - 0.5 * at.log(dsigma) + -0.5 * n * pt.log(2 * np.pi) + - 0.5 * pt.log(dsigma) - 0.5 * (x - mu1).T.dot(isigma).dot(x - mu1) ) log_like2 = ( - -0.5 * n * at.log(2 * np.pi) - - 0.5 * at.log(dsigma) + -0.5 * n * pt.log(2 * np.pi) + - 0.5 * pt.log(dsigma) - 0.5 * (x - mu2).T.dot(isigma).dot(x - mu2) ) - return pm.math.logsumexp([at.log(w1) + log_like1, at.log(w2) + log_like2]) + return pm.math.logsumexp([pt.log(w1) + log_like1, pt.log(w2) + log_like2]) ``` ```{code-cell} ipython3 @@ -170,16 +170,16 @@ w2 = 1 - w1 # the other mode with 0.9 of the mass def two_gaussians(x): log_like1 = ( - -0.5 * n * at.log(2 * np.pi) - - 0.5 * at.log(dsigma) + -0.5 * n * pt.log(2 * np.pi) + - 0.5 * pt.log(dsigma) - 0.5 * (x - mu1).T.dot(isigma).dot(x - mu1) ) log_like2 = ( - -0.5 * n * at.log(2 * np.pi) - - 0.5 * at.log(dsigma) + -0.5 * n * pt.log(2 * np.pi) + - 0.5 * pt.log(dsigma) - 0.5 * (x - mu2).T.dot(isigma).dot(x - mu2) ) - return pm.math.logsumexp([at.log(w1) + log_like1, at.log(w2) + log_like2]) + return pm.math.logsumexp([pt.log(w1) + log_like1, pt.log(w2) + log_like2]) ``` ```{code-cell} ipython3 diff --git a/examples/survival_analysis/survival_analysis.ipynb b/examples/survival_analysis/survival_analysis.ipynb index 5eee9fe0b..6d241aa46 100644 --- a/examples/survival_analysis/survival_analysis.ipynb +++ b/examples/survival_analysis/survival_analysis.ipynb @@ -1226,7 +1226,7 @@ "\n", "ax.scatter(\n", " interval_bounds[last_period[(df.event.values == 1) & (df.metastasized == 1)]],\n", - " beta_hat.isel(intervals=last_period[(df.event.values == 1) & (df.metastasized == 1)]),\n", + " beta_hpt.isel(intervals=last_period[(df.event.values == 1) & (df.metastasized == 1)]),\n", " color=\"C1\",\n", " zorder=10,\n", " label=\"Died, cancer metastasized\",\n", @@ -1234,7 +1234,7 @@ "\n", "ax.scatter(\n", " interval_bounds[last_period[(df.event.values == 0) & (df.metastasized == 1)]],\n", - " beta_hat.isel(intervals=last_period[(df.event.values == 0) & (df.metastasized == 1)]),\n", + " beta_hpt.isel(intervals=last_period[(df.event.values == 0) & (df.metastasized == 1)]),\n", " color=\"C0\",\n", " zorder=10,\n", " label=\"Censored, cancer metastasized\",\n", diff --git a/examples/survival_analysis/survival_analysis.myst.md b/examples/survival_analysis/survival_analysis.myst.md index 42d75d2ce..2dc0c48dc 100644 --- a/examples/survival_analysis/survival_analysis.myst.md +++ b/examples/survival_analysis/survival_analysis.myst.md @@ -384,7 +384,7 @@ ax.step(interval_bounds[:-1], beta_hat, color="C0") ax.scatter( interval_bounds[last_period[(df.event.values == 1) & (df.metastasized == 1)]], - beta_hat.isel(intervals=last_period[(df.event.values == 1) & (df.metastasized == 1)]), + beta_hpt.isel(intervals=last_period[(df.event.values == 1) & (df.metastasized == 1)]), color="C1", zorder=10, label="Died, cancer metastasized", @@ -392,7 +392,7 @@ ax.scatter( ax.scatter( interval_bounds[last_period[(df.event.values == 0) & (df.metastasized == 1)]], - beta_hat.isel(intervals=last_period[(df.event.values == 0) & (df.metastasized == 1)]), + beta_hpt.isel(intervals=last_period[(df.event.values == 0) & (df.metastasized == 1)]), color="C0", zorder=10, label="Censored, cancer metastasized", diff --git a/examples/variational_inference/bayesian_neural_network_advi.ipynb b/examples/variational_inference/bayesian_neural_network_advi.ipynb index d62234a36..bd0322c22 100644 --- a/examples/variational_inference/bayesian_neural_network_advi.ipynb +++ b/examples/variational_inference/bayesian_neural_network_advi.ipynb @@ -81,7 +81,7 @@ "import numpy as np\n", "import pymc as pm\n", "import pytensor\n", - "import pytensor.tensor as at\n", + "import pytensor.tensor as pt\n", "import seaborn as sns\n", "\n", "from sklearn.datasets import make_moons\n", diff --git a/examples/variational_inference/bayesian_neural_network_advi.myst.md b/examples/variational_inference/bayesian_neural_network_advi.myst.md index 4cd0f8881..df82af4f3 100644 --- a/examples/variational_inference/bayesian_neural_network_advi.myst.md +++ b/examples/variational_inference/bayesian_neural_network_advi.myst.md @@ -69,7 +69,7 @@ import matplotlib.pyplot as plt import numpy as np import pymc as pm import pytensor -import pytensor.tensor as at +import pytensor.tensor as pt import seaborn as sns from sklearn.datasets import make_moons