Skip to content

Replace at -> pt. #485

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Merged
merged 5 commits into from
Dec 28, 2022
Merged
Show file tree
Hide file tree
Changes from 1 commit
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 1 addition & 1 deletion examples/case_studies/GEV.ipynb
Original file line number Diff line number Diff line change
Expand Up @@ -50,7 +50,7 @@
"import numpy as np\n",
"import pymc as pm\n",
"import pymc_experimental.distributions as pmx\n",
"import pytensor.tensor as at\n",
"import pytensor.tensor as pt\n",
"\n",
"from arviz.plots import plot_utils as azpu"
]
Expand Down
2 changes: 1 addition & 1 deletion examples/case_studies/GEV.myst.md
Original file line number Diff line number Diff line change
Expand Up @@ -45,7 +45,7 @@ import matplotlib.pyplot as plt
import numpy as np
import pymc as pm
import pymc_experimental.distributions as pmx
import pytensor.tensor as at
import pytensor.tensor as pt

from arviz.plots import plot_utils as azpu
```
Expand Down
34 changes: 17 additions & 17 deletions examples/case_studies/binning.ipynb
Original file line number Diff line number Diff line change
Expand Up @@ -72,7 +72,7 @@
"We are now in a position to sketch out a generative PyMC model:\n",
"\n",
"```python\n",
"import pytensor.tensor as at\n",
"import pytensor.tensor as pt\n",
"\n",
"with pm.Model() as model:\n",
" # priors\n",
Expand All @@ -81,7 +81,7 @@
" # generative process\n",
" probs = pm.math.exp(pm.logcdf(pm.Normal.dist(mu=mu, sigma=sigma), cutpoints))\n",
" probs = pm.math.concatenate([[0], probs, [1]])\n",
" probs = at.extra_ops.diff(probs)\n",
" probs = pt.extra_ops.diff(probs)\n",
" # likelihood\n",
" pm.Multinomial(\"counts\", p=probs, n=sum(counts), observed=counts)\n",
"```\n",
Expand All @@ -98,7 +98,7 @@
"simply concatenates the cumulative density at $-\\infty$ (which is zero) and at $\\infty$ (which is 1).\n",
"The third line\n",
"```python\n",
"probs = at.extra_ops.diff(probs)\n",
"probs = pt.extra_ops.diff(probs)\n",
"```\n",
"calculates the difference between consecutive cumulative densities to give the actual probability of a datum falling in any given bin.\n",
"\n",
Expand All @@ -125,7 +125,7 @@
"import numpy as np\n",
"import pandas as pd\n",
"import pymc as pm\n",
"import pytensor.tensor as at\n",
"import pytensor.tensor as pt\n",
"import seaborn as sns\n",
"\n",
"warnings.filterwarnings(action=\"ignore\", category=UserWarning)"
Expand Down Expand Up @@ -320,7 +320,7 @@
" mu = pm.Normal(\"mu\")\n",
"\n",
" probs1 = pm.math.exp(pm.logcdf(pm.Normal.dist(mu=mu, sigma=sigma), d1))\n",
" probs1 = at.extra_ops.diff(pm.math.concatenate([[0], probs1, [1]]))\n",
" probs1 = pt.extra_ops.diff(pm.math.concatenate([[0], probs1, [1]]))\n",
" pm.Multinomial(\"counts1\", p=probs1, n=c1.sum(), observed=c1.values)"
]
},
Expand Down Expand Up @@ -841,7 +841,7 @@
" mu = pm.Normal(\"mu\")\n",
"\n",
" probs2 = pm.math.exp(pm.logcdf(pm.Normal.dist(mu=mu, sigma=sigma), d2))\n",
" probs2 = at.extra_ops.diff(pm.math.concatenate([[0], probs2, [1]]))\n",
" probs2 = pt.extra_ops.diff(pm.math.concatenate([[0], probs2, [1]]))\n",
" pm.Multinomial(\"counts2\", p=probs2, n=c2.sum(), observed=c2.values)"
]
},
Expand Down Expand Up @@ -1238,11 +1238,11 @@
" mu = pm.Normal(\"mu\")\n",
"\n",
" probs1 = pm.math.exp(pm.logcdf(pm.Normal.dist(mu=mu, sigma=sigma), d1))\n",
" probs1 = at.extra_ops.diff(pm.math.concatenate([np.array([0]), probs1, np.array([1])]))\n",
" probs1 = pt.extra_ops.diff(pm.math.concatenate([np.array([0]), probs1, np.array([1])]))\n",
" probs1 = pm.Deterministic(\"normal1_cdf\", probs1)\n",
"\n",
" probs2 = pm.math.exp(pm.logcdf(pm.Normal.dist(mu=mu, sigma=sigma), d2))\n",
" probs2 = at.extra_ops.diff(pm.math.concatenate([np.array([0]), probs2, np.array([1])]))\n",
" probs2 = pt.extra_ops.diff(pm.math.concatenate([np.array([0]), probs2, np.array([1])]))\n",
" probs2 = pm.Deterministic(\"normal2_cdf\", probs2)\n",
"\n",
" pm.Multinomial(\"counts1\", p=probs1, n=c1.sum(), observed=c1.values)\n",
Expand Down Expand Up @@ -1719,7 +1719,7 @@
" mu = pm.Normal(\"mu\")\n",
" # study 1\n",
" probs1 = pm.math.exp(pm.logcdf(pm.Normal.dist(mu=mu, sigma=sigma), d1))\n",
" probs1 = at.extra_ops.diff(pm.math.concatenate([np.array([0]), probs1, np.array([1])]))\n",
" probs1 = pt.extra_ops.diff(pm.math.concatenate([np.array([0]), probs1, np.array([1])]))\n",
" probs1 = pm.Deterministic(\"normal1_cdf\", probs1)\n",
" pm.Multinomial(\"counts1\", p=probs1, n=c1.sum(), observed=c1.values)\n",
" # study 2\n",
Expand Down Expand Up @@ -2149,12 +2149,12 @@
"\n",
" # Study 1\n",
" probs1 = pm.math.exp(pm.logcdf(pm.Normal.dist(mu=mu[0], sigma=sigma[0]), d1))\n",
" probs1 = at.extra_ops.diff(pm.math.concatenate([np.array([0]), probs1, np.array([1])]))\n",
" probs1 = pt.extra_ops.diff(pm.math.concatenate([np.array([0]), probs1, np.array([1])]))\n",
" probs1 = pm.Deterministic(\"normal1_cdf\", probs1, dims=\"bin1\")\n",
"\n",
" # Study 2\n",
" probs2 = pm.math.exp(pm.logcdf(pm.Normal.dist(mu=mu[1], sigma=sigma[1]), d2))\n",
" probs2 = at.extra_ops.diff(pm.math.concatenate([np.array([0]), probs2, np.array([1])]))\n",
" probs2 = pt.extra_ops.diff(pm.math.concatenate([np.array([0]), probs2, np.array([1])]))\n",
" probs2 = pm.Deterministic(\"normal2_cdf\", probs2, dims=\"bin2\")\n",
"\n",
" # Likelihood\n",
Expand Down Expand Up @@ -2392,12 +2392,12 @@
"\n",
" # Study 1\n",
" probs1 = pm.math.exp(pm.logcdf(pm.Normal.dist(mu=mu[0], sigma=sigma[0]), d1))\n",
" probs1 = at.extra_ops.diff(pm.math.concatenate([np.array([0]), probs1, np.array([1])]))\n",
" probs1 = pt.extra_ops.diff(pm.math.concatenate([np.array([0]), probs1, np.array([1])]))\n",
" probs1 = pm.Deterministic(\"normal1_cdf\", probs1, dims=\"bin1\")\n",
"\n",
" # Study 2\n",
" probs2 = pm.math.exp(pm.logcdf(pm.Normal.dist(mu=mu[1], sigma=sigma[1]), d2))\n",
" probs2 = at.extra_ops.diff(pm.math.concatenate([np.array([0]), probs2, np.array([1])]))\n",
" probs2 = pt.extra_ops.diff(pm.math.concatenate([np.array([0]), probs2, np.array([1])]))\n",
" probs2 = pm.Deterministic(\"normal2_cdf\", probs2, dims=\"bin2\")\n",
"\n",
" # Likelihood\n",
Expand Down Expand Up @@ -2927,12 +2927,12 @@
" \n",
" # Study 1\n",
" probs1 = pm.math.exp(pm.logcdf(pm.Normal.dist(mu=mu[0], sigma=sigma[0]), d1))\n",
" probs1 = at.extra_ops.diff(pm.math.concatenate([np.array([0]), probs1, np.array([1])]))\n",
" probs1 = pt.extra_ops.diff(pm.math.concatenate([np.array([0]), probs1, np.array([1])]))\n",
" probs1 = pm.Deterministic(\"normal1_cdf\", probs1, dims='bin1')\n",
"\n",
" # Study 2\n",
" probs2 = pm.math.exp(pm.logcdf(pm.Normal.dist(mu=mu[1], sigma=sigma[1]), d2))\n",
" probs2 = at.extra_ops.diff(pm.math.concatenate([np.array([0]), probs2, np.array([1])]))\n",
" probs2 = pt.extra_ops.diff(pm.math.concatenate([np.array([0]), probs2, np.array([1])]))\n",
" probs2 = pm.Deterministic(\"normal2_cdf\", probs2, dims='bin2')\n",
"\n",
" # Likelihood\n",
Expand Down Expand Up @@ -3091,11 +3091,11 @@
" beta = pm.HalfNormal(\"beta\", 10)\n",
"\n",
" probs1 = pm.math.exp(pm.logcdf(pm.Gumbel.dist(mu=mu, beta=beta), d1))\n",
" probs1 = at.extra_ops.diff(pm.math.concatenate([np.array([0]), probs1, np.array([1])]))\n",
" probs1 = pt.extra_ops.diff(pm.math.concatenate([np.array([0]), probs1, np.array([1])]))\n",
" probs1 = pm.Deterministic(\"gumbel_cdf1\", probs1)\n",
"\n",
" probs2 = pm.math.exp(pm.logcdf(pm.Gumbel.dist(mu=mu, beta=beta), d2))\n",
" probs2 = at.extra_ops.diff(pm.math.concatenate([np.array([0]), probs2, np.array([1])]))\n",
" probs2 = pt.extra_ops.diff(pm.math.concatenate([np.array([0]), probs2, np.array([1])]))\n",
" probs2 = pm.Deterministic(\"gumbel_cdf2\", probs2)\n",
"\n",
" pm.Multinomial(\"counts1\", p=probs1, n=c1.sum(), observed=c1.values)\n",
Expand Down
34 changes: 17 additions & 17 deletions examples/case_studies/binning.myst.md
Original file line number Diff line number Diff line change
Expand Up @@ -69,7 +69,7 @@ In ordinal regression, the cutpoints are treated as latent variables and the par
We are now in a position to sketch out a generative PyMC model:

```python
import pytensor.tensor as at
import pytensor.tensor as pt

with pm.Model() as model:
# priors
Expand All @@ -78,7 +78,7 @@ with pm.Model() as model:
# generative process
probs = pm.math.exp(pm.logcdf(pm.Normal.dist(mu=mu, sigma=sigma), cutpoints))
probs = pm.math.concatenate([[0], probs, [1]])
probs = at.extra_ops.diff(probs)
probs = pt.extra_ops.diff(probs)
# likelihood
pm.Multinomial("counts", p=probs, n=sum(counts), observed=counts)
```
Expand All @@ -95,7 +95,7 @@ probs = pm.math.concatenate([[0], probs, [1]])
simply concatenates the cumulative density at $-\infty$ (which is zero) and at $\infty$ (which is 1).
The third line
```python
probs = at.extra_ops.diff(probs)
probs = pt.extra_ops.diff(probs)
```
calculates the difference between consecutive cumulative densities to give the actual probability of a datum falling in any given bin.

Expand All @@ -115,7 +115,7 @@ import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import pymc as pm
import pytensor.tensor as at
import pytensor.tensor as pt
import seaborn as sns

warnings.filterwarnings(action="ignore", category=UserWarning)
Expand Down Expand Up @@ -226,7 +226,7 @@ with pm.Model() as model1:
mu = pm.Normal("mu")

probs1 = pm.math.exp(pm.logcdf(pm.Normal.dist(mu=mu, sigma=sigma), d1))
probs1 = at.extra_ops.diff(pm.math.concatenate([[0], probs1, [1]]))
probs1 = pt.extra_ops.diff(pm.math.concatenate([[0], probs1, [1]]))
pm.Multinomial("counts1", p=probs1, n=c1.sum(), observed=c1.values)
```

Expand Down Expand Up @@ -331,7 +331,7 @@ with pm.Model() as model2:
mu = pm.Normal("mu")

probs2 = pm.math.exp(pm.logcdf(pm.Normal.dist(mu=mu, sigma=sigma), d2))
probs2 = at.extra_ops.diff(pm.math.concatenate([[0], probs2, [1]]))
probs2 = pt.extra_ops.diff(pm.math.concatenate([[0], probs2, [1]]))
pm.Multinomial("counts2", p=probs2, n=c2.sum(), observed=c2.values)
```

Expand Down Expand Up @@ -426,11 +426,11 @@ with pm.Model() as model3:
mu = pm.Normal("mu")

probs1 = pm.math.exp(pm.logcdf(pm.Normal.dist(mu=mu, sigma=sigma), d1))
probs1 = at.extra_ops.diff(pm.math.concatenate([np.array([0]), probs1, np.array([1])]))
probs1 = pt.extra_ops.diff(pm.math.concatenate([np.array([0]), probs1, np.array([1])]))
probs1 = pm.Deterministic("normal1_cdf", probs1)

probs2 = pm.math.exp(pm.logcdf(pm.Normal.dist(mu=mu, sigma=sigma), d2))
probs2 = at.extra_ops.diff(pm.math.concatenate([np.array([0]), probs2, np.array([1])]))
probs2 = pt.extra_ops.diff(pm.math.concatenate([np.array([0]), probs2, np.array([1])]))
probs2 = pm.Deterministic("normal2_cdf", probs2)

pm.Multinomial("counts1", p=probs1, n=c1.sum(), observed=c1.values)
Expand Down Expand Up @@ -519,7 +519,7 @@ with pm.Model() as model4:
mu = pm.Normal("mu")
# study 1
probs1 = pm.math.exp(pm.logcdf(pm.Normal.dist(mu=mu, sigma=sigma), d1))
probs1 = at.extra_ops.diff(pm.math.concatenate([np.array([0]), probs1, np.array([1])]))
probs1 = pt.extra_ops.diff(pm.math.concatenate([np.array([0]), probs1, np.array([1])]))
probs1 = pm.Deterministic("normal1_cdf", probs1)
pm.Multinomial("counts1", p=probs1, n=c1.sum(), observed=c1.values)
# study 2
Expand Down Expand Up @@ -612,12 +612,12 @@ with pm.Model(coords=coords) as model5:

# Study 1
probs1 = pm.math.exp(pm.logcdf(pm.Normal.dist(mu=mu[0], sigma=sigma[0]), d1))
probs1 = at.extra_ops.diff(pm.math.concatenate([np.array([0]), probs1, np.array([1])]))
probs1 = pt.extra_ops.diff(pm.math.concatenate([np.array([0]), probs1, np.array([1])]))
probs1 = pm.Deterministic("normal1_cdf", probs1, dims="bin1")

# Study 2
probs2 = pm.math.exp(pm.logcdf(pm.Normal.dist(mu=mu[1], sigma=sigma[1]), d2))
probs2 = at.extra_ops.diff(pm.math.concatenate([np.array([0]), probs2, np.array([1])]))
probs2 = pt.extra_ops.diff(pm.math.concatenate([np.array([0]), probs2, np.array([1])]))
probs2 = pm.Deterministic("normal2_cdf", probs2, dims="bin2")

# Likelihood
Expand Down Expand Up @@ -645,12 +645,12 @@ with pm.Model(coords=coords) as model5:

# Study 1
probs1 = pm.math.exp(pm.logcdf(pm.Normal.dist(mu=mu[0], sigma=sigma[0]), d1))
probs1 = at.extra_ops.diff(pm.math.concatenate([np.array([0]), probs1, np.array([1])]))
probs1 = pt.extra_ops.diff(pm.math.concatenate([np.array([0]), probs1, np.array([1])]))
probs1 = pm.Deterministic("normal1_cdf", probs1, dims="bin1")

# Study 2
probs2 = pm.math.exp(pm.logcdf(pm.Normal.dist(mu=mu[1], sigma=sigma[1]), d2))
probs2 = at.extra_ops.diff(pm.math.concatenate([np.array([0]), probs2, np.array([1])]))
probs2 = pt.extra_ops.diff(pm.math.concatenate([np.array([0]), probs2, np.array([1])]))
probs2 = pm.Deterministic("normal2_cdf", probs2, dims="bin2")

# Likelihood
Expand Down Expand Up @@ -748,12 +748,12 @@ with pm.Model(coords=coords) as model5:

# Study 1
probs1 = pm.math.exp(pm.logcdf(pm.Normal.dist(mu=mu[0], sigma=sigma[0]), d1))
probs1 = at.extra_ops.diff(pm.math.concatenate([np.array([0]), probs1, np.array([1])]))
probs1 = pt.extra_ops.diff(pm.math.concatenate([np.array([0]), probs1, np.array([1])]))
probs1 = pm.Deterministic("normal1_cdf", probs1, dims='bin1')

# Study 2
probs2 = pm.math.exp(pm.logcdf(pm.Normal.dist(mu=mu[1], sigma=sigma[1]), d2))
probs2 = at.extra_ops.diff(pm.math.concatenate([np.array([0]), probs2, np.array([1])]))
probs2 = pt.extra_ops.diff(pm.math.concatenate([np.array([0]), probs2, np.array([1])]))
probs2 = pm.Deterministic("normal2_cdf", probs2, dims='bin2')

# Likelihood
Expand Down Expand Up @@ -855,11 +855,11 @@ with pm.Model() as model6:
beta = pm.HalfNormal("beta", 10)

probs1 = pm.math.exp(pm.logcdf(pm.Gumbel.dist(mu=mu, beta=beta), d1))
probs1 = at.extra_ops.diff(pm.math.concatenate([np.array([0]), probs1, np.array([1])]))
probs1 = pt.extra_ops.diff(pm.math.concatenate([np.array([0]), probs1, np.array([1])]))
probs1 = pm.Deterministic("gumbel_cdf1", probs1)

probs2 = pm.math.exp(pm.logcdf(pm.Gumbel.dist(mu=mu, beta=beta), d2))
probs2 = at.extra_ops.diff(pm.math.concatenate([np.array([0]), probs2, np.array([1])]))
probs2 = pt.extra_ops.diff(pm.math.concatenate([np.array([0]), probs2, np.array([1])]))
probs2 = pm.Deterministic("gumbel_cdf2", probs2)

pm.Multinomial("counts1", p=probs1, n=c1.sum(), observed=c1.values)
Expand Down
30 changes: 15 additions & 15 deletions examples/case_studies/blackbox_external_likelihood_numpy.ipynb
Original file line number Diff line number Diff line change
Expand Up @@ -47,7 +47,7 @@
"import numpy as np\n",
"import pymc as pm\n",
"import pytensor\n",
"import pytensor.tensor as at\n",
"import pytensor.tensor as pt\n",
"\n",
"print(f\"Running on PyMC v{pm.__version__}\")"
]
Expand Down Expand Up @@ -180,7 +180,7 @@
"outputs": [],
"source": [
"# define a pytensor Op for our likelihood function\n",
"class LogLike(at.Op):\n",
"class LogLike(pt.Op):\n",
"\n",
" \"\"\"\n",
" Specify what type of object will be passed and returned to the Op when it is\n",
Expand All @@ -189,8 +189,8 @@
" log-likelihood)\n",
" \"\"\"\n",
"\n",
" itypes = [at.dvector] # expects a vector of parameter values when called\n",
" otypes = [at.dscalar] # outputs a single scalar value (the log likelihood)\n",
" itypes = [pt.dvector] # expects a vector of parameter values when called\n",
" otypes = [pt.dscalar] # outputs a single scalar value (the log likelihood)\n",
"\n",
" def __init__(self, loglike, data, x, sigma):\n",
" \"\"\"\n",
Expand Down Expand Up @@ -336,7 +336,7 @@
" c = pm.Uniform(\"c\", lower=-10.0, upper=10.0)\n",
"\n",
" # convert m and c to a tensor vector\n",
" theta = at.as_tensor_variable([m, c])\n",
" theta = pt.as_tensor_variable([m, c])\n",
"\n",
" # use a Potential to \"call\" the Op and include it in the logp computation\n",
" pm.Potential(\"likelihood\", logl(theta))\n",
Expand Down Expand Up @@ -426,10 +426,10 @@
"outputs": [],
"source": [
"# define a pytensor Op for our likelihood function\n",
"class LogLikeWithGrad(at.Op):\n",
"class LogLikeWithGrad(pt.Op):\n",
"\n",
" itypes = [at.dvector] # expects a vector of parameter values when called\n",
" otypes = [at.dscalar] # outputs a single scalar value (the log likelihood)\n",
" itypes = [pt.dvector] # expects a vector of parameter values when called\n",
" otypes = [pt.dscalar] # outputs a single scalar value (the log likelihood)\n",
"\n",
" def __init__(self, loglike, data, x, sigma):\n",
" \"\"\"\n",
Expand Down Expand Up @@ -473,15 +473,15 @@
" return [g[0] * self.logpgrad(theta)]\n",
"\n",
"\n",
"class LogLikeGrad(at.Op):\n",
"class LogLikeGrad(pt.Op):\n",
"\n",
" \"\"\"\n",
" This Op will be called with a vector of values and also return a vector of\n",
" values - the gradients in each dimension.\n",
" \"\"\"\n",
"\n",
" itypes = [at.dvector]\n",
" otypes = [at.dvector]\n",
" itypes = [pt.dvector]\n",
" otypes = [pt.dvector]\n",
"\n",
" def __init__(self, data, x, sigma):\n",
" \"\"\"\n",
Expand Down Expand Up @@ -609,7 +609,7 @@
" c = pm.Uniform(\"c\", lower=-10.0, upper=10.0)\n",
"\n",
" # convert m and c to a tensor vector\n",
" theta = at.as_tensor_variable([m, c])\n",
" theta = pt.as_tensor_variable([m, c])\n",
"\n",
" # use a Potential\n",
" pm.Potential(\"likelihood\", logl(theta))\n",
Expand Down Expand Up @@ -710,7 +710,7 @@
" c = pm.Uniform(\"c\", lower=-10.0, upper=10.0)\n",
"\n",
" # convert m and c to a tensor vector\n",
" theta = at.as_tensor_variable([m, c])\n",
" theta = pt.as_tensor_variable([m, c])\n",
"\n",
" # use a Normal distribution\n",
" pm.Normal(\"likelihood\", mu=(m * x + c), sd=sigma, observed=data)\n",
Expand Down Expand Up @@ -947,7 +947,7 @@
"pytensor.config.compute_test_value = \"ignore\"\n",
"pytensor.config.exception_verbosity = \"high\"\n",
"\n",
"var = at.dvector()\n",
"var = pt.dvector()\n",
"test_grad_op = LogLikeGrad(data, x, sigma)\n",
"test_grad_op_func = pytensor.function([var], test_grad_op(var))\n",
"grad_vals = test_grad_op_func([mtrue, ctrue])\n",
Expand All @@ -956,7 +956,7 @@
"\n",
"# test the gradient called through LogLikeWithGrad\n",
"test_gradded_op = LogLikeWithGrad(my_loglike, data, x, sigma)\n",
"test_gradded_op_grad = at.grad(test_gradded_op(var), var)\n",
"test_gradded_op_grad = pt.grad(test_gradded_op(var), var)\n",
"test_gradded_op_grad_func = pytensor.function([var], test_gradded_op_grad)\n",
"grad_vals_2 = test_gradded_op_grad_func([mtrue, ctrue])\n",
"\n",
Expand Down
Loading