Skip to content

Commit 9df55cc

Browse files
Update aliases to reflect package name (#547)
* Change `import pytensor.tensor as at` to `as pt` everywhere in the docs * Change `import pytensor.tensor as at` to `as pt` everywhere Change `import pytensor.scalar as aes` to `as ps` everywhere Change `import pytensor.tensor.random as aer` to `as ptr` everywhere Change test variables with `_at` suffix or `at_` prefix to `_pt` and `pt_`, respectively * More renaming * Rename remaining instances of `aes` and `aer`
1 parent c38eea0 commit 9df55cc

File tree

156 files changed

+3663
-3667
lines changed

Some content is hidden

Large Commits have some content hidden by default. Use the searchbox below for content that may be hidden.

156 files changed

+3663
-3667
lines changed

doc/extending/creating_a_numba_jax_op.rst

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -135,16 +135,16 @@ Here's a small example of a test for :class:`Eye`:
135135

136136
.. code:: python
137137
138-
import pytensor.tensor as at
138+
import pytensor.tensor as pt
139139
140140
def test_jax_Eye():
141141
"""Test JAX conversion of the `Eye` `Op`."""
142142
143143
# Create a symbolic input for `Eye`
144-
x_at = at.scalar()
144+
x_at = pt.scalar()
145145
146146
# Create a variable that is the output of an `Eye` `Op`
147-
eye_var = at.eye(x_at)
147+
eye_var = pt.eye(x_at)
148148
149149
# Create an PyTensor `FunctionGraph`
150150
out_fg = FunctionGraph(outputs=[eye_var])

doc/extending/creating_an_op.rst

Lines changed: 5 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -786,7 +786,7 @@ signature:
786786
.. testcode:: asop
787787

788788
import pytensor
789-
import pytensor.tensor as at
789+
import pytensor.tensor as pt
790790
import numpy as np
791791
from pytensor import function
792792
from pytensor.compile.ops import as_op
@@ -797,17 +797,17 @@ signature:
797797
return [ashp[:-1] + bshp[-1:]]
798798

799799

800-
@as_op(itypes=[at.matrix, at.matrix],
801-
otypes=[at.matrix], infer_shape=infer_shape_numpy_dot)
800+
@as_op(itypes=[pt.matrix, pt.matrix],
801+
otypes=[pt.matrix], infer_shape=infer_shape_numpy_dot)
802802
def numpy_dot(a, b):
803803
return np.dot(a, b)
804804

805805
You can try it as follows:
806806

807807
.. testcode:: asop
808808

809-
x = at.matrix()
810-
y = at.matrix()
809+
x = pt.matrix()
810+
y = pt.matrix()
811811
f = function([x, y], numpy_dot(x, y))
812812
inp1 = np.random.random_sample((5, 4))
813813
inp2 = np.random.random_sample((4, 7))

doc/extending/extending_pytensor_solution_1.py

Lines changed: 13 additions & 17 deletions
Original file line numberDiff line numberDiff line change
@@ -14,8 +14,8 @@
1414

1515
class ProdOp(Op):
1616
def make_node(self, x, y):
17-
x = at.as_tensor_variable(x)
18-
y = at.as_tensor_variable(y)
17+
x = pt.as_tensor_variable(x)
18+
y = pt.as_tensor_variable(y)
1919
outdim = x.type.ndim
2020
output = TensorType(
2121
dtype=pytensor.scalar.upcast(x.dtype, y.dtype), shape=(None,) * outdim
@@ -39,8 +39,8 @@ def grad(self, inputs, output_grads):
3939

4040
class SumDiffOp(Op):
4141
def make_node(self, x, y):
42-
x = at.as_tensor_variable(x)
43-
y = at.as_tensor_variable(y)
42+
x = pt.as_tensor_variable(x)
43+
y = pt.as_tensor_variable(y)
4444
outdim = x.type.ndim
4545
output1 = TensorType(
4646
dtype=pytensor.scalar.upcast(x.dtype, y.dtype), shape=(None,) * outdim
@@ -62,20 +62,16 @@ def infer_shape(self, fgraph, node, i0_shapes):
6262
def grad(self, inputs, output_grads):
6363
og1, og2 = output_grads
6464
if og1 is None:
65-
og1 = at.zeros_like(og2)
65+
og1 = pt.zeros_like(og2)
6666
if og2 is None:
67-
og2 = at.zeros_like(og1)
67+
og2 = pt.zeros_like(og1)
6868
return [og1 + og2, og1 - og2]
6969

7070

7171
# 3. Testing apparatus
72-
73-
import numpy as np
74-
7572
from tests import unittest_tools as utt
76-
from pytensor import tensor as at
73+
from pytensor import tensor as pt
7774
from pytensor.graph.basic import Apply
78-
from pytensor.graph.op import Op
7975
from pytensor.tensor.type import dmatrix, matrix
8076

8177

@@ -182,8 +178,8 @@ def infer_shape_numpy_dot(fgraph, node, input_shapes):
182178

183179

184180
@as_op(
185-
itypes=[at.fmatrix, at.fmatrix],
186-
otypes=[at.fmatrix],
181+
itypes=[pt.fmatrix, pt.fmatrix],
182+
otypes=[pt.fmatrix],
187183
infer_shape=infer_shape_numpy_dot,
188184
)
189185
def numpy_add(a, b):
@@ -197,17 +193,17 @@ def infer_shape_numpy_add_sub(fgraph, node, input_shapes):
197193

198194

199195
@as_op(
200-
itypes=[at.fmatrix, at.fmatrix],
201-
otypes=[at.fmatrix],
196+
itypes=[pt.fmatrix, pt.fmatrix],
197+
otypes=[pt.fmatrix],
202198
infer_shape=infer_shape_numpy_add_sub,
203199
)
204200
def numpy_add(a, b):
205201
return np.add(a, b)
206202

207203

208204
@as_op(
209-
itypes=[at.fmatrix, at.fmatrix],
210-
otypes=[at.fmatrix],
205+
itypes=[pt.fmatrix, pt.fmatrix],
206+
otypes=[pt.fmatrix],
211207
infer_shape=infer_shape_numpy_add_sub,
212208
)
213209
def numpy_sub(a, b):

doc/extending/graph_rewriting.rst

Lines changed: 14 additions & 14 deletions
Original file line numberDiff line numberDiff line change
@@ -443,7 +443,7 @@ The following is an example that distributes dot products across additions.
443443
.. code::
444444
445445
import pytensor
446-
import pytensor.tensor as at
446+
import pytensor.tensor as pt
447447
from pytensor.graph.rewriting.kanren import KanrenRelationSub
448448
from pytensor.graph.rewriting.basic import EquilibriumGraphRewriter
449449
from pytensor.graph.rewriting.utils import rewrite_graph
@@ -462,7 +462,7 @@ The following is an example that distributes dot products across additions.
462462
)
463463
464464
# Tell `kanren` that `add` is associative
465-
fact(associative, at.add)
465+
fact(associative, pt.add)
466466
467467
468468
def dot_distributeo(in_lv, out_lv):
@@ -473,13 +473,13 @@ The following is an example that distributes dot products across additions.
473473
# Make sure the input is a `_dot`
474474
eq(in_lv, etuple(_dot, A_lv, add_term_lv)),
475475
# Make sure the term being `_dot`ed is an `add`
476-
heado(at.add, add_term_lv),
476+
heado(pt.add, add_term_lv),
477477
# Flatten the associative pairings of `add` operations
478478
assoc_flatten(add_term_lv, add_flat_lv),
479479
# Get the flattened `add` arguments
480480
tailo(add_cdr_lv, add_flat_lv),
481481
# Add all the `_dot`ed arguments and set the output
482-
conso(at.add, dot_cdr_lv, out_lv),
482+
conso(pt.add, dot_cdr_lv, out_lv),
483483
# Apply the `_dot` to all the flattened `add` arguments
484484
mapo(lambda x, y: conso(_dot, etuple(A_lv, x), y), add_cdr_lv, dot_cdr_lv),
485485
)
@@ -490,10 +490,10 @@ The following is an example that distributes dot products across additions.
490490
491491
Below, we apply `dot_distribute_rewrite` to a few example graphs. First we create simple test graph:
492492

493-
>>> x_at = at.vector("x")
494-
>>> y_at = at.vector("y")
495-
>>> A_at = at.matrix("A")
496-
>>> test_at = A_at.dot(x_at + y_at)
493+
>>> x_at = pt.vector("x")
494+
>>> y_at = pt.vector("y")
495+
>>> A_at = pt.matrix("A")
496+
>>> test_at = A_pt.dot(x_at + y_at)
497497
>>> print(pytensor.pprint(test_at))
498498
(A @ (x + y))
499499

@@ -506,18 +506,18 @@ Next we apply the rewrite to the graph:
506506
We see that the dot product has been distributed, as desired. Now, let's try a
507507
few more test cases:
508508

509-
>>> z_at = at.vector("z")
510-
>>> w_at = at.vector("w")
511-
>>> test_at = A_at.dot((x_at + y_at) + (z_at + w_at))
509+
>>> z_at = pt.vector("z")
510+
>>> w_at = pt.vector("w")
511+
>>> test_at = A_pt.dot((x_at + y_at) + (z_at + w_at))
512512
>>> print(pytensor.pprint(test_at))
513513
(A @ ((x + y) + (z + w)))
514514
>>> res = rewrite_graph(test_at, include=[], custom_rewrite=dot_distribute_rewrite, clone=False)
515515
>>> print(pytensor.pprint(res))
516516
(((A @ x) + (A @ y)) + ((A @ z) + (A @ w)))
517517

518-
>>> B_at = at.matrix("B")
519-
>>> w_at = at.vector("w")
520-
>>> test_at = A_at.dot(x_at + (y_at + B_at.dot(z_at + w_at)))
518+
>>> B_at = pt.matrix("B")
519+
>>> w_at = pt.vector("w")
520+
>>> test_at = A_pt.dot(x_at + (y_at + B_pt.dot(z_at + w_at)))
521521
>>> print(pytensor.pprint(test_at))
522522
(A @ (x + (y + ((B @ z) + (B @ w)))))
523523
>>> res = rewrite_graph(test_at, include=[], custom_rewrite=dot_distribute_rewrite, clone=False)

doc/extending/graphstructures.rst

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -28,10 +28,10 @@ The following illustrates these elements:
2828

2929
.. testcode::
3030

31-
import pytensor.tensor as at
31+
import pytensor.tensor as pt
3232

33-
x = at.dmatrix('x')
34-
y = at.dmatrix('y')
33+
x = pt.dmatrix('x')
34+
y = pt.dmatrix('y')
3535
z = x + y
3636

3737
**Diagram**

doc/extending/tips.rst

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -20,10 +20,10 @@ simple function:
2020

2121
.. code::
2222
23-
from pytensor import tensor as at
23+
from pytensor import tensor as pt
2424
2525
def sum_square_difference(a, b):
26-
return at.sum((a - b)**2)
26+
return pt.sum((a - b)**2)
2727
2828
Even without taking PyTensor's rewrites into account, it is likely
2929
to work just as well as a custom implementation. It also supports all

doc/extending/unittest.rst

Lines changed: 6 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -98,13 +98,13 @@ Example:
9898
.. code-block:: python
9999
100100
import numpy as np
101-
import pytensor.tensor as at
101+
import pytensor.tensor as pt
102102
103103
104104
def test_dot_validity():
105-
a = at.dmatrix('a')
106-
b = at.dmatrix('b')
107-
c = at.dot(a, b)
105+
a = pt.dmatrix('a')
106+
b = pt.dmatrix('b')
107+
c = pt.dot(a, b)
108108
109109
c_fn = pytensor.function([a, b], [c])
110110
@@ -187,7 +187,7 @@ symbolic variable:
187187

188188
def test_verify_exprgrad():
189189
def fun(x,y,z):
190-
return (x + at.cos(y)) / (4 * z)**2
190+
return (x + pt.cos(y)) / (4 * z)**2
191191

192192
x_val = np.asarray([[1], [1.1], [1.2]])
193193
y_val = np.asarray([0.1, 0.2])
@@ -207,7 +207,7 @@ Here is an example showing how to use :func:`verify_grad` on an :class:`Op` inst
207207
"""
208208
a_val = np.asarray([[0,1,2],[3,4,5]], dtype='float64')
209209
rng = np.random.default_rng(42)
210-
pytensor.gradient.verify_grad(at.Flatten(), [a_val], rng=rng)
210+
pytensor.gradient.verify_grad(pt.Flatten(), [a_val], rng=rng)
211211

212212
.. note::
213213

doc/glossary.rst

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -6,7 +6,7 @@ Glossary
66
.. testsetup::
77

88
import pytensor
9-
import pytensor.tensor as at
9+
import pytensor.tensor as pt
1010

1111
.. glossary::
1212

@@ -31,7 +31,7 @@ Glossary
3131
A variable with an immutable value.
3232
For example, when you type
3333

34-
>>> x = at.ivector()
34+
>>> x = pt.ivector()
3535
>>> y = x + 3
3636

3737
Then a `constant` is created to represent the ``3`` in the graph.
@@ -151,7 +151,7 @@ Glossary
151151
The the main data structure you work with when using PyTensor.
152152
For example,
153153

154-
>>> x = at.ivector()
154+
>>> x = pt.ivector()
155155
>>> y = -x**2
156156

157157
``x`` and ``y`` are both :class:`Variable`\s, i.e. instances of the :class:`Variable` class.

doc/introduction.rst

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -66,11 +66,11 @@ its features, but it illustrates concretely what PyTensor is.
6666
.. code-block:: python
6767
6868
import pytensor
69-
from pytensor import tensor as at
69+
from pytensor import tensor as pt
7070
7171
# declare two symbolic floating-point scalars
72-
a = at.dscalar()
73-
b = at.dscalar()
72+
a = pt.dscalar()
73+
b = pt.dscalar()
7474
7575
# create a simple expression
7676
c = a + b

doc/library/compile/debugmode.rst

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -28,10 +28,10 @@ a cluster.
2828
.. testcode::
2929

3030
import pytensor
31-
from pytensor import tensor as at
31+
from pytensor import tensor as pt
3232
from pytensor.compile.debugmode import DebugMode
3333

34-
x = at.dscalar('x')
34+
x = pt.dscalar('x')
3535

3636
f = pytensor.function([x], 10*x, mode='DebugMode')
3737

doc/library/compile/io.rst

Lines changed: 10 additions & 10 deletions
Original file line numberDiff line numberDiff line change
@@ -80,10 +80,10 @@ A non-None `value` argument makes an In() instance an optional parameter
8080
of the compiled function. For example, in the following code we are
8181
defining an arity-2 function ``inc``.
8282

83-
>>> import pytensor.tensor as at
83+
>>> import pytensor.tensor as pt
8484
>>> from pytensor import function
8585
>>> from pytensor.compile.io import In
86-
>>> u, x, s = at.scalars('u', 'x', 's')
86+
>>> u, x, s = pt.scalars('u', 'x', 's')
8787
>>> inc = function([u, In(x, value=3), In(s, update=(s+x*u), value=10.0)], [])
8888

8989
Since we provided a ``value`` for ``s`` and ``x``, we can call it with just a value for ``u`` like this:
@@ -183,8 +183,8 @@ method to access values by indexing a Function directly by typing
183183
To show some examples of these access methods...
184184

185185

186-
>>> from pytensor import tensor as at, function
187-
>>> a, b, c = at.scalars('xys') # set the internal names of graph nodes
186+
>>> from pytensor import tensor as pt, function
187+
>>> a, b, c = pt.scalars('xys') # set the internal names of graph nodes
188188
>>> # Note that the name of c is 's', not 'c'!
189189
>>> fn = function([a, b, ((c, c+a+b), 10.0)], [])
190190

@@ -236,12 +236,12 @@ Every element of the inputs list will be upgraded to an In instance if necessary
236236
Example:
237237

238238
>>> import pytensor
239-
>>> from pytensor import tensor as at
239+
>>> from pytensor import tensor as pt
240240
>>> from pytensor.compile.io import In
241-
>>> x = at.scalar()
242-
>>> y = at.scalar('y')
243-
>>> z = at.scalar('z')
244-
>>> w = at.scalar('w')
241+
>>> x = pt.scalar()
242+
>>> y = pt.scalar('y')
243+
>>> z = pt.scalar('z')
244+
>>> w = pt.scalar('w')
245245

246246
>>> fn = pytensor.function(inputs=[x, y, In(z, value=42), ((w, w+x), 0)],
247247
... outputs=x + y + z)
@@ -308,7 +308,7 @@ If a list of ``Variable`` or ``Out`` instances is given as argument, then the co
308308

309309
>>> import numpy
310310
>>> from pytensor.compile.io import Out
311-
>>> x, y, s = at.matrices('xys')
311+
>>> x, y, s = pt.matrices('xys')
312312

313313
>>> # print a list of 2 ndarrays
314314
>>> fn1 = pytensor.function([x], [x+x, Out((x+x).T, borrow=True)])

doc/library/compile/nanguardmode.rst

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -25,12 +25,12 @@ of abnormal values: NaNs, Infs, and abnormally big values.
2525

2626
import numpy as np
2727
import pytensor
28-
import pytensor.tensor as at
28+
import pytensor.tensor as pt
2929
from pytensor.compile.nanguardmode import NanGuardMode
3030

31-
x = at.matrix()
31+
x = pt.matrix()
3232
w = pytensor.shared(np.random.standard_normal((5, 7)).astype(pytensor.config.floatX))
33-
y = at.dot(x, w)
33+
y = pt.dot(x, w)
3434
fun = pytensor.function(
3535
[x], y,
3636
mode=NanGuardMode(nan_is_error=True, inf_is_error=True, big_is_error=True)

0 commit comments

Comments
 (0)