Skip to content

Commit d913b96

Browse files
Change import pytensor.tensor as at to as pt everywhere
Change `import pytensor.scalar as aes` to `as ps` everywhere Change `import pytensor.tensor.random as aer` to `as ptr` everywhere Change test variables with `_at` suffix or `at_` prefix to `_pt` and `pt_`, respectively
1 parent 619b74e commit d913b96

Some content is hidden

Large Commits have some content hidden by default. Use the searchbox below for content that may be hidden.

79 files changed

+2151
-2153
lines changed

doc/extending/creating_a_numba_jax_op.rst

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -135,16 +135,16 @@ Here's a small example of a test for :class:`Eye`:
135135

136136
.. code:: python
137137
138-
import pytensor.tensor as at
138+
import pytensor.tensor as pt
139139
140140
def test_jax_Eye():
141141
"""Test JAX conversion of the `Eye` `Op`."""
142142
143143
# Create a symbolic input for `Eye`
144-
x_at = at.scalar()
144+
x_at = pt.scalar()
145145
146146
# Create a variable that is the output of an `Eye` `Op`
147-
eye_var = at.eye(x_at)
147+
eye_var = pt.eye(x_at)
148148
149149
# Create an PyTensor `FunctionGraph`
150150
out_fg = FunctionGraph(outputs=[eye_var])

doc/extending/creating_an_op.rst

Lines changed: 5 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -786,7 +786,7 @@ signature:
786786
.. testcode:: asop
787787

788788
import pytensor
789-
import pytensor.tensor as at
789+
import pytensor.tensor as pt
790790
import numpy as np
791791
from pytensor import function
792792
from pytensor.compile.ops import as_op
@@ -797,17 +797,17 @@ signature:
797797
return [ashp[:-1] + bshp[-1:]]
798798

799799

800-
@as_op(itypes=[at.matrix, at.matrix],
801-
otypes=[at.matrix], infer_shape=infer_shape_numpy_dot)
800+
@as_op(itypes=[pt.matrix, pt.matrix],
801+
otypes=[pt.matrix], infer_shape=infer_shape_numpy_dot)
802802
def numpy_dot(a, b):
803803
return np.dot(a, b)
804804

805805
You can try it as follows:
806806

807807
.. testcode:: asop
808808

809-
x = at.matrix()
810-
y = at.matrix()
809+
x = pt.matrix()
810+
y = pt.matrix()
811811
f = function([x, y], numpy_dot(x, y))
812812
inp1 = np.random.random_sample((5, 4))
813813
inp2 = np.random.random_sample((4, 7))

doc/extending/graph_rewriting.rst

Lines changed: 14 additions & 14 deletions
Original file line numberDiff line numberDiff line change
@@ -443,7 +443,7 @@ The following is an example that distributes dot products across additions.
443443
.. code::
444444
445445
import pytensor
446-
import pytensor.tensor as at
446+
import pytensor.tensor as pt
447447
from pytensor.graph.rewriting.kanren import KanrenRelationSub
448448
from pytensor.graph.rewriting.basic import EquilibriumGraphRewriter
449449
from pytensor.graph.rewriting.utils import rewrite_graph
@@ -462,7 +462,7 @@ The following is an example that distributes dot products across additions.
462462
)
463463
464464
# Tell `kanren` that `add` is associative
465-
fact(associative, at.add)
465+
fact(associative, pt.add)
466466
467467
468468
def dot_distributeo(in_lv, out_lv):
@@ -473,13 +473,13 @@ The following is an example that distributes dot products across additions.
473473
# Make sure the input is a `_dot`
474474
eq(in_lv, etuple(_dot, A_lv, add_term_lv)),
475475
# Make sure the term being `_dot`ed is an `add`
476-
heado(at.add, add_term_lv),
476+
heado(pt.add, add_term_lv),
477477
# Flatten the associative pairings of `add` operations
478478
assoc_flatten(add_term_lv, add_flat_lv),
479479
# Get the flattened `add` arguments
480480
tailo(add_cdr_lv, add_flat_lv),
481481
# Add all the `_dot`ed arguments and set the output
482-
conso(at.add, dot_cdr_lv, out_lv),
482+
conso(pt.add, dot_cdr_lv, out_lv),
483483
# Apply the `_dot` to all the flattened `add` arguments
484484
mapo(lambda x, y: conso(_dot, etuple(A_lv, x), y), add_cdr_lv, dot_cdr_lv),
485485
)
@@ -490,10 +490,10 @@ The following is an example that distributes dot products across additions.
490490
491491
Below, we apply `dot_distribute_rewrite` to a few example graphs. First we create simple test graph:
492492

493-
>>> x_at = at.vector("x")
494-
>>> y_at = at.vector("y")
495-
>>> A_at = at.matrix("A")
496-
>>> test_at = A_at.dot(x_at + y_at)
493+
>>> x_at = pt.vector("x")
494+
>>> y_at = pt.vector("y")
495+
>>> A_at = pt.matrix("A")
496+
>>> test_at = A_pt.dot(x_at + y_at)
497497
>>> print(pytensor.pprint(test_at))
498498
(A @ (x + y))
499499

@@ -506,18 +506,18 @@ Next we apply the rewrite to the graph:
506506
We see that the dot product has been distributed, as desired. Now, let's try a
507507
few more test cases:
508508

509-
>>> z_at = at.vector("z")
510-
>>> w_at = at.vector("w")
511-
>>> test_at = A_at.dot((x_at + y_at) + (z_at + w_at))
509+
>>> z_at = pt.vector("z")
510+
>>> w_at = pt.vector("w")
511+
>>> test_at = A_pt.dot((x_at + y_at) + (z_at + w_at))
512512
>>> print(pytensor.pprint(test_at))
513513
(A @ ((x + y) + (z + w)))
514514
>>> res = rewrite_graph(test_at, include=[], custom_rewrite=dot_distribute_rewrite, clone=False)
515515
>>> print(pytensor.pprint(res))
516516
(((A @ x) + (A @ y)) + ((A @ z) + (A @ w)))
517517

518-
>>> B_at = at.matrix("B")
519-
>>> w_at = at.vector("w")
520-
>>> test_at = A_at.dot(x_at + (y_at + B_at.dot(z_at + w_at)))
518+
>>> B_at = pt.matrix("B")
519+
>>> w_at = pt.vector("w")
520+
>>> test_at = A_pt.dot(x_at + (y_at + B_pt.dot(z_at + w_at)))
521521
>>> print(pytensor.pprint(test_at))
522522
(A @ (x + (y + ((B @ z) + (B @ w)))))
523523
>>> res = rewrite_graph(test_at, include=[], custom_rewrite=dot_distribute_rewrite, clone=False)

doc/extending/graphstructures.rst

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -28,10 +28,10 @@ The following illustrates these elements:
2828

2929
.. testcode::
3030

31-
import pytensor.tensor as at
31+
import pytensor.tensor as pt
3232

33-
x = at.dmatrix('x')
34-
y = at.dmatrix('y')
33+
x = pt.dmatrix('x')
34+
y = pt.dmatrix('y')
3535
z = x + y
3636

3737
**Diagram**

doc/extending/unittest.rst

Lines changed: 6 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -98,13 +98,13 @@ Example:
9898
.. code-block:: python
9999
100100
import numpy as np
101-
import pytensor.tensor as at
101+
import pytensor.tensor as pt
102102
103103
104104
def test_dot_validity():
105-
a = at.dmatrix('a')
106-
b = at.dmatrix('b')
107-
c = at.dot(a, b)
105+
a = pt.dmatrix('a')
106+
b = pt.dmatrix('b')
107+
c = pt.dot(a, b)
108108
109109
c_fn = pytensor.function([a, b], [c])
110110
@@ -187,7 +187,7 @@ symbolic variable:
187187

188188
def test_verify_exprgrad():
189189
def fun(x,y,z):
190-
return (x + at.cos(y)) / (4 * z)**2
190+
return (x + pt.cos(y)) / (4 * z)**2
191191

192192
x_val = np.asarray([[1], [1.1], [1.2]])
193193
y_val = np.asarray([0.1, 0.2])
@@ -207,7 +207,7 @@ Here is an example showing how to use :func:`verify_grad` on an :class:`Op` inst
207207
"""
208208
a_val = np.asarray([[0,1,2],[3,4,5]], dtype='float64')
209209
rng = np.random.default_rng(42)
210-
pytensor.gradient.verify_grad(at.Flatten(), [a_val], rng=rng)
210+
pytensor.gradient.verify_grad(pt.Flatten(), [a_val], rng=rng)
211211

212212
.. note::
213213

doc/library/compile/io.rst

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -80,7 +80,7 @@ A non-None `value` argument makes an In() instance an optional parameter
8080
of the compiled function. For example, in the following code we are
8181
defining an arity-2 function ``inc``.
8282

83-
>>> import pytensor.tensor as at
83+
>>> import pytensor.tensor as pt
8484
>>> from pytensor import function
8585
>>> from pytensor.compile.io import In
8686
>>> u, x, s = pt.scalars('u', 'x', 's')

doc/library/compile/nanguardmode.rst

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -25,12 +25,12 @@ of abnormal values: NaNs, Infs, and abnormally big values.
2525

2626
import numpy as np
2727
import pytensor
28-
import pytensor.tensor as at
28+
import pytensor.tensor as pt
2929
from pytensor.compile.nanguardmode import NanGuardMode
3030

31-
x = at.matrix()
31+
x = pt.matrix()
3232
w = pytensor.shared(np.random.standard_normal((5, 7)).astype(pytensor.config.floatX))
33-
y = at.dot(x, w)
33+
y = pt.dot(x, w)
3434
fun = pytensor.function(
3535
[x], y,
3636
mode=NanGuardMode(nan_is_error=True, inf_is_error=True, big_is_error=True)

doc/library/d3viz/index.ipynb

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -71,7 +71,7 @@
7171
"outputs": [],
7272
"source": [
7373
"import pytensor\n",
74-
"import pytensor.tensor as at\n",
74+
"import pytensor.tensor as pt\n",
7575
"import numpy as np"
7676
]
7777
},

doc/library/scan.rst

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -164,7 +164,7 @@ downcast** of the latter.
164164

165165
import numpy as np
166166
import pytensor
167-
import pytensor.tensor as at
167+
import pytensor.tensor as pt
168168

169169
up_to = pt.iscalar("up_to")
170170

@@ -257,7 +257,7 @@ the following:
257257
.. testcode:: scan1
258258

259259
import pytensor
260-
import pytensor.tensor as at
260+
import pytensor.tensor as pt
261261
import numpy as np
262262

263263
rng = np.random.default_rng(203940)

doc/library/tensor/basic.rst

Lines changed: 5 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -10,7 +10,7 @@ Basic Tensor Functionality
1010

1111
import numpy as np
1212
import pytensor
13-
import pytensor.tensor as at
13+
import pytensor.tensor as pt
1414
from pytensor.tensor.type import scalar, iscalar, TensorType, dmatrix, ivector, fmatrix
1515
from pytensor.tensor import set_subtensor, inc_subtensor, batched_dot
1616
from pytensor import shared
@@ -1220,7 +1220,7 @@ Casting
12201220

12211221
.. testcode:: cast
12221222

1223-
import pytensor.tensor as at
1223+
import pytensor.tensor as pt
12241224
x = pt.matrix()
12251225
x_as_int = pt.cast(x, 'int32')
12261226

@@ -1256,7 +1256,7 @@ The six usual equality and inequality operators share the same interface.
12561256

12571257
.. testcode:: oper
12581258

1259-
import pytensor.tensor as at
1259+
import pytensor.tensor as pt
12601260
x,y = pt.dmatrices('x','y')
12611261
z = pt.le(x,y)
12621262

@@ -1349,7 +1349,7 @@ Condition
13491349

13501350
.. testcode:: switch
13511351

1352-
import pytensor.tensor as at
1352+
import pytensor.tensor as pt
13531353
a,b = pt.dmatrices('a','b')
13541354
x,y = pt.dmatrices('x','y')
13551355
z = pt.switch(pt.lt(a,b), x, y)
@@ -1420,7 +1420,7 @@ Here is an example using the bit-wise ``and_`` via the ``&`` operator:
14201420

14211421
.. testcode:: bitwise
14221422

1423-
import pytensor.tensor as at
1423+
import pytensor.tensor as pt
14241424
x,y = pt.imatrices('x','y')
14251425
z = x & y
14261426

pytensor/breakpoint.py

Lines changed: 4 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -36,11 +36,11 @@ class PdbBreakpoint(Op):
3636
.. code-block:: python
3737
3838
import pytensor
39-
import pytensor.tensor as at
39+
import pytensor.tensor as pt
4040
from pytensor.breakpoint import PdbBreakpoint
4141
42-
input = at.fvector()
43-
target = at.fvector()
42+
input = pt.fvector()
43+
target = pt.fvector()
4444
4545
# Mean squared error between input and target
4646
mse = (input - target) ** 2
@@ -49,7 +49,7 @@ class PdbBreakpoint(Op):
4949
# than 100. The breakpoint will monitor the inputs, targets as well
5050
# as the individual error values
5151
breakpointOp = PdbBreakpoint("MSE too high")
52-
condition = at.gt(mse.sum(), 100)
52+
condition = pt.gt(mse.sum(), 100)
5353
mse, monitored_input, monitored_target = breakpointOp(condition, mse,
5454
input, target)
5555

pytensor/compile/builders.py

Lines changed: 5 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -5,7 +5,7 @@
55
from functools import partial
66
from typing import Optional, cast
77

8-
import pytensor.tensor as at
8+
import pytensor.tensor as pt
99
from pytensor import function
1010
from pytensor.compile.function.pfunc import rebuild_collect_shared
1111
from pytensor.compile.mode import optdb
@@ -207,7 +207,7 @@ class OpFromGraph(Op, HasInnerGraph):
207207
208208
from pytensor import function, tensor as at
209209
from pytensor.compile.builders import OpFromGraph
210-
x, y, z = at.scalars('xyz')
210+
x, y, z = pt.scalars('xyz')
211211
e = x + y * z
212212
op = OpFromGraph([x, y, z], [e])
213213
# op behaves like a normal pytensor op
@@ -223,7 +223,7 @@ class OpFromGraph(Op, HasInnerGraph):
223223
from pytensor import config, function, tensor as at
224224
from pytensor.compile.builders import OpFromGraph
225225
226-
x, y, z = at.scalars('xyz')
226+
x, y, z = pt.scalars('xyz')
227227
s = pytensor.shared(np.random.random((2, 2)).astype(config.floatX))
228228
e = x + y * z + s
229229
op = OpFromGraph([x, y, z], [e])
@@ -238,7 +238,7 @@ class OpFromGraph(Op, HasInnerGraph):
238238
from pytensor import function, tensor as at, grad
239239
from pytensor.compile.builders import OpFromGraph
240240
241-
x, y, z = at.scalars('xyz')
241+
x, y, z = pt.scalars('xyz')
242242
e = x + y * z
243243
def rescale_dy(inps, grads):
244244
x, y, z = inps
@@ -289,7 +289,7 @@ def _filter_grad_var(grad, inp):
289289
if hasattr(inp, "zeros_like"):
290290
return inp.zeros_like(), grad
291291
else:
292-
return at.constant(0.0), grad
292+
return pt.constant(0.0), grad
293293
else:
294294
return grad, None
295295

pytensor/graph/basic.py

Lines changed: 6 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -383,10 +383,10 @@ class Variable(Node, Generic[_TypeType, OptionalApplyType]):
383383
.. code-block:: python
384384
385385
import pytensor
386-
import pytensor.tensor as at
386+
import pytensor.tensor as pt
387387
388-
a = at.constant(1.5) # declare a symbolic constant
389-
b = at.fscalar() # declare a symbolic floating-point scalar
388+
a = pt.constant(1.5) # declare a symbolic constant
389+
b = pt.fscalar() # declare a symbolic floating-point scalar
390390
391391
c = a + b # create a simple expression
392392
@@ -565,9 +565,9 @@ def eval(self, inputs_to_values=None):
565565
--------
566566
567567
>>> import numpy as np
568-
>>> import pytensor.tensor as at
569-
>>> x = at.dscalar('x')
570-
>>> y = at.dscalar('y')
568+
>>> import pytensor.tensor as pt
569+
>>> x = pt.dscalar('x')
570+
>>> y = pt.dscalar('y')
571571
>>> z = x + y
572572
>>> np.allclose(z.eval({x : 16.3, y : 12.1}), 28.4)
573573
True

pytensor/graph/rewriting/kanren.py

Lines changed: 4 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -24,15 +24,15 @@ class KanrenRelationSub(NodeRewriter):
2424
2525
from kanren import eq, conso, var
2626
27-
import pytensor.tensor as at
27+
import pytensor.tensor as pt
2828
from pytensor.graph.rewriting.kanren import KanrenRelationSub
2929
3030
3131
def relation(in_lv, out_lv):
32-
# A `kanren` goal that changes `at.log` terms to `at.exp`
32+
# A `kanren` goal that changes `pt.log` terms to `pt.exp`
3333
cdr_lv = var()
34-
return eq(conso(at.log, cdr_lv, in_lv),
35-
conso(at.exp, cdr_lv, out_lv))
34+
return eq(conso(pt.log, cdr_lv, in_lv),
35+
conso(pt.exp, cdr_lv, out_lv))
3636
3737
3838
kanren_sub_opt = KanrenRelationSub(relation)

0 commit comments

Comments
 (0)