Skip to content

Commit 99d5ec4

Browse files
More renaming
1 parent d913b96 commit 99d5ec4

Some content is hidden

Large Commits have some content hidden by default. Use the searchbox below for content that may be hidden.

54 files changed

+971
-973
lines changed

doc/extending/extending_pytensor_solution_1.py

Lines changed: 13 additions & 17 deletions
Original file line numberDiff line numberDiff line change
@@ -14,8 +14,8 @@
1414

1515
class ProdOp(Op):
1616
def make_node(self, x, y):
17-
x = at.as_tensor_variable(x)
18-
y = at.as_tensor_variable(y)
17+
x = pt.as_tensor_variable(x)
18+
y = pt.as_tensor_variable(y)
1919
outdim = x.type.ndim
2020
output = TensorType(
2121
dtype=pytensor.scalar.upcast(x.dtype, y.dtype), shape=(None,) * outdim
@@ -39,8 +39,8 @@ def grad(self, inputs, output_grads):
3939

4040
class SumDiffOp(Op):
4141
def make_node(self, x, y):
42-
x = at.as_tensor_variable(x)
43-
y = at.as_tensor_variable(y)
42+
x = pt.as_tensor_variable(x)
43+
y = pt.as_tensor_variable(y)
4444
outdim = x.type.ndim
4545
output1 = TensorType(
4646
dtype=pytensor.scalar.upcast(x.dtype, y.dtype), shape=(None,) * outdim
@@ -62,20 +62,16 @@ def infer_shape(self, fgraph, node, i0_shapes):
6262
def grad(self, inputs, output_grads):
6363
og1, og2 = output_grads
6464
if og1 is None:
65-
og1 = at.zeros_like(og2)
65+
og1 = pt.zeros_like(og2)
6666
if og2 is None:
67-
og2 = at.zeros_like(og1)
67+
og2 = pt.zeros_like(og1)
6868
return [og1 + og2, og1 - og2]
6969

7070

7171
# 3. Testing apparatus
72-
73-
import numpy as np
74-
7572
from tests import unittest_tools as utt
76-
from pytensor import tensor as at
73+
from pytensor import tensor as pt
7774
from pytensor.graph.basic import Apply
78-
from pytensor.graph.op import Op
7975
from pytensor.tensor.type import dmatrix, matrix
8076

8177

@@ -182,8 +178,8 @@ def infer_shape_numpy_dot(fgraph, node, input_shapes):
182178

183179

184180
@as_op(
185-
itypes=[at.fmatrix, at.fmatrix],
186-
otypes=[at.fmatrix],
181+
itypes=[pt.fmatrix, pt.fmatrix],
182+
otypes=[pt.fmatrix],
187183
infer_shape=infer_shape_numpy_dot,
188184
)
189185
def numpy_add(a, b):
@@ -197,17 +193,17 @@ def infer_shape_numpy_add_sub(fgraph, node, input_shapes):
197193

198194

199195
@as_op(
200-
itypes=[at.fmatrix, at.fmatrix],
201-
otypes=[at.fmatrix],
196+
itypes=[pt.fmatrix, pt.fmatrix],
197+
otypes=[pt.fmatrix],
202198
infer_shape=infer_shape_numpy_add_sub,
203199
)
204200
def numpy_add(a, b):
205201
return np.add(a, b)
206202

207203

208204
@as_op(
209-
itypes=[at.fmatrix, at.fmatrix],
210-
otypes=[at.fmatrix],
205+
itypes=[pt.fmatrix, pt.fmatrix],
206+
otypes=[pt.fmatrix],
211207
infer_shape=infer_shape_numpy_add_sub,
212208
)
213209
def numpy_sub(a, b):

doc/extending/tips.rst

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -20,10 +20,10 @@ simple function:
2020

2121
.. code::
2222
23-
from pytensor import tensor as at
23+
from pytensor import tensor as pt
2424
2525
def sum_square_difference(a, b):
26-
return at.sum((a - b)**2)
26+
return pt.sum((a - b)**2)
2727
2828
Even without taking PyTensor's rewrites into account, it is likely
2929
to work just as well as a custom implementation. It also supports all

doc/introduction.rst

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -66,7 +66,7 @@ its features, but it illustrates concretely what PyTensor is.
6666
.. code-block:: python
6767
6868
import pytensor
69-
from pytensor import tensor as at
69+
from pytensor import tensor as pt
7070
7171
# declare two symbolic floating-point scalars
7272
a = pt.dscalar()

doc/library/compile/debugmode.rst

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -28,10 +28,10 @@ a cluster.
2828
.. testcode::
2929

3030
import pytensor
31-
from pytensor import tensor as at
31+
from pytensor import tensor as pt
3232
from pytensor.compile.debugmode import DebugMode
3333

34-
x = at.dscalar('x')
34+
x = pt.dscalar('x')
3535

3636
f = pytensor.function([x], 10*x, mode='DebugMode')
3737

doc/library/compile/io.rst

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -183,7 +183,7 @@ method to access values by indexing a Function directly by typing
183183
To show some examples of these access methods...
184184

185185

186-
>>> from pytensor import tensor as at, function
186+
>>> from pytensor import tensor as pt, function
187187
>>> a, b, c = pt.scalars('xys') # set the internal names of graph nodes
188188
>>> # Note that the name of c is 's', not 'c'!
189189
>>> fn = function([a, b, ((c, c+a+b), 10.0)], [])
@@ -236,7 +236,7 @@ Every element of the inputs list will be upgraded to an In instance if necessary
236236
Example:
237237

238238
>>> import pytensor
239-
>>> from pytensor import tensor as at
239+
>>> from pytensor import tensor as pt
240240
>>> from pytensor.compile.io import In
241241
>>> x = pt.scalar()
242242
>>> y = pt.scalar('y')

doc/library/printing.rst

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -52,7 +52,7 @@ PyTensor also provides :func:`pytensor.printing.pydotprint` that creates a png i
5252
1) The first is :func:`pytensor.pp`.
5353

5454
>>> from pytensor import pp, grad,
55-
>>> from pytensor import tensor as at
55+
>>> from pytensor import tensor as pt
5656
>>> x = pt.dscalar('x')
5757
>>> y = x ** 2
5858
>>> gy = grad(y, x)

doc/library/scan.rst

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -460,7 +460,7 @@ construct a function that computes one iteration step :
460460
.. testsetup:: scan3
461461

462462
import pytensor
463-
from pytensor import tensor as at
463+
from pytensor import tensor as pt
464464

465465
.. testcode:: scan3
466466

doc/sandbox/logistic_regression_example.rst

Lines changed: 10 additions & 10 deletions
Original file line numberDiff line numberDiff line change
@@ -13,25 +13,25 @@ BUT, YOU GOTTA RUN THIS CODE AND MAKE SURE IT STILL WORKS NICELY, HEY?
1313
def build_logistic_regression_model(n_in, n_out, l2_coef=30.0)
1414
# DECLARE SOME VARIABLES
1515
16-
import tensor as at
16+
import pytensor.tensor as pt
1717
18-
x = at.matrix() #our points, one point per row
19-
y = at.matrix() #store our labels as place codes (label 3 of 5 is vector [00100])
18+
x = pt.matrix() #our points, one point per row
19+
y = pt.matrix() #store our labels as place codes (label 3 of 5 is vector [00100])
2020
21-
w = at.matrix() #the linear transform to apply to our input points
22-
b = at.vector() #a vector of biases, which make our transform affine instead of linear
21+
w = pt.matrix() #the linear transform to apply to our input points
22+
b = pt.vector() #a vector of biases, which make our transform affine instead of linear
2323
24-
stepsize = at.scalar('stepsize') # a stepsize for gradient descent
24+
stepsize = pt.scalar('stepsize') # a stepsize for gradient descent
2525
2626
# REGRESSION MODEL AND COSTS TO MINIMIZE
2727
28-
prediction = at.softmax(at.dot(x, w) + b)
29-
cross_entropy = at.sum(y * at.log(prediction), axis=1)
30-
cost = at.sum(cross_entropy) + l2_coef * at.sum(at.sum(w*w))
28+
prediction = pt.softmax(pt.dot(x, w) + b)
29+
cross_entropy = pt.sum(y * pt.log(prediction), axis=1)
30+
cost = pt.sum(cross_entropy) + l2_coef * pt.sum(pt.sum(w*w))
3131
3232
# GET THE GRADIENTS NECESSARY TO FIT OUR PARAMETERS
3333
34-
grad_w, grad_b = at.grad(cost, [w, b])
34+
grad_w, grad_b = pt.grad(cost, [w, b])
3535
3636
#
3737
# GET THE GRADIENTS NECESSARY TO FIT OUR PARAMETERS

doc/tutorial/conditions.rst

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -20,7 +20,7 @@ IfElse vs Switch
2020

2121
.. testcode::
2222

23-
from pytensor import tensor as at
23+
from pytensor import tensor as pt
2424
from pytensor.ifelse import ifelse
2525
import pytensor, time, numpy
2626

pytensor/compile/builders.py

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -205,7 +205,7 @@ class OpFromGraph(Op, HasInnerGraph):
205205
206206
.. code-block:: python
207207
208-
from pytensor import function, tensor as at
208+
from pytensor import function, tensor as pt
209209
from pytensor.compile.builders import OpFromGraph
210210
x, y, z = pt.scalars('xyz')
211211
e = x + y * z
@@ -220,7 +220,7 @@ class OpFromGraph(Op, HasInnerGraph):
220220
221221
import numpy as np
222222
import pytensor
223-
from pytensor import config, function, tensor as at
223+
from pytensor import config, function, tensor as pt
224224
from pytensor.compile.builders import OpFromGraph
225225
226226
x, y, z = pt.scalars('xyz')
@@ -235,7 +235,7 @@ class OpFromGraph(Op, HasInnerGraph):
235235
236236
.. code-block:: python
237237
238-
from pytensor import function, tensor as at, grad
238+
from pytensor import function, tensor as pt, grad
239239
from pytensor.compile.builders import OpFromGraph
240240
241241
x, y, z = pt.scalars('xyz')

pytensor/scan/checkpoints.py

Lines changed: 5 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -1,4 +1,4 @@
1-
import pytensor.tensor.basic as at
1+
import pytensor.tensor.basic as ptb
22
from pytensor.scan.basic import scan
33
from pytensor.tensor.basic import Join
44
from pytensor.tensor.math import ceil, eq
@@ -117,12 +117,12 @@ def scan_checkpoints(
117117
n_steps = sequences[0].shape[0]
118118

119119
# Compute the number of steps of the outer scan
120-
o_n_steps = at.cast(ceil(n_steps / save_every_N), "int64")
120+
o_n_steps = ptb.cast(ceil(n_steps / save_every_N), "int64")
121121

122122
# Compute the number of steps of the inner scan
123-
i_n_steps = save_every_N * at.ones((o_n_steps,), "int64")
123+
i_n_steps = save_every_N * ptb.ones((o_n_steps,), "int64")
124124
mod = n_steps % save_every_N
125-
last_n_steps = at.switch(eq(mod, 0), save_every_N, mod)
125+
last_n_steps = ptb.switch(eq(mod, 0), save_every_N, mod)
126126
i_n_steps = set_subtensor(i_n_steps[-1], last_n_steps)
127127

128128
# Pad the sequences if needed
@@ -131,7 +131,7 @@ def scan_checkpoints(
131131
join = Join(view=0)
132132
for i, s in enumerate(sequences):
133133
n = s.shape[0] % save_every_N
134-
z = at.zeros((n, s.shape[1:]), dtype=s.dtype)
134+
z = ptb.zeros((n, s.shape[1:]), dtype=s.dtype)
135135
sequences[i] = join(0, [s, z])
136136

137137
# Establish the input variables of the outer scan

pytensor/scan/op.py

Lines changed: 17 additions & 17 deletions
Original file line numberDiff line numberDiff line change
@@ -55,7 +55,7 @@
5555

5656
import pytensor
5757
import pytensor.link.utils as link_utils
58-
from pytensor import tensor as at
58+
from pytensor import tensor as pt
5959
from pytensor.compile.builders import construct_nominal_fgraph, infer_shape
6060
from pytensor.compile.function.pfunc import pfunc
6161
from pytensor.compile.io import In, Out
@@ -2568,7 +2568,7 @@ def compute_all_gradients(known_grads):
25682568
# mask inputs that get no gradients
25692569
for dx in range(len(dC_dinps_t)):
25702570
if not dC_dinps_t[dx]:
2571-
dC_dinps_t[dx] = at.zeros_like(diff_inputs[dx])
2571+
dC_dinps_t[dx] = pt.zeros_like(diff_inputs[dx])
25722572
else:
25732573
disconnected_dC_dinps_t[dx] = False
25742574
for Xt, Xt_placeholder in zip(diff_outputs[info.n_mit_mot_outs :], Xts):
@@ -2696,7 +2696,7 @@ def compute_all_gradients(known_grads):
26962696
for idx, taps in enumerate(info.mit_mot_in_slices):
26972697
if isinstance(dC_douts[idx].type, DisconnectedType):
26982698
out = outs[idx]
2699-
outer_inp_mitmot.append(at.zeros_like(out))
2699+
outer_inp_mitmot.append(pt.zeros_like(out))
27002700
else:
27012701
outer_inp_mitmot.append(dC_douts[idx][::-1])
27022702
mitmot_inp_taps.append([])
@@ -2723,7 +2723,7 @@ def compute_all_gradients(known_grads):
27232723
# We cannot use Null in the inner graph, so we
27242724
# use a zero tensor of the appropriate shape instead.
27252725
inner_out_mitmot.append(
2726-
at.zeros(diff_inputs[ins_pos].shape, dtype=config.floatX)
2726+
pt.zeros(diff_inputs[ins_pos].shape, dtype=config.floatX)
27272727
)
27282728
undefined_msg = dC_dinps_t[ins_pos].type.why_null
27292729
else:
@@ -2792,7 +2792,7 @@ def compute_all_gradients(known_grads):
27922792
# We cannot use Null in the inner graph, so we
27932793
# use a zero tensor of the appropriate shape instead.
27942794
inner_out_mitmot.append(
2795-
at.zeros(diff_inputs[ins_pos].shape, dtype=config.floatX)
2795+
pt.zeros(diff_inputs[ins_pos].shape, dtype=config.floatX)
27962796
)
27972797
undefined_msg = dC_dinps_t[ins_pos].type.why_null
27982798
else:
@@ -2834,11 +2834,11 @@ def compute_all_gradients(known_grads):
28342834
# floatX instead, as it is a dummy value that will not
28352835
# be used anyway.
28362836
outer_inp_mitmot.append(
2837-
at.zeros(outs[idx + offset].shape, dtype=config.floatX)
2837+
pt.zeros(outs[idx + offset].shape, dtype=config.floatX)
28382838
)
28392839
else:
28402840
outer_inp_mitmot.append(
2841-
at.zeros(
2841+
pt.zeros(
28422842
outs[idx + offset].shape, dtype=dC_dinps_t[ins_pos].dtype
28432843
)
28442844
)
@@ -2847,7 +2847,7 @@ def compute_all_gradients(known_grads):
28472847
# We cannot use Null in the inner graph, so we
28482848
# use a zero tensor of the appropriate shape instead.
28492849
inner_out_mitmot.append(
2850-
at.zeros(diff_inputs[ins_pos].shape, dtype=config.floatX)
2850+
pt.zeros(diff_inputs[ins_pos].shape, dtype=config.floatX)
28512851
)
28522852
else:
28532853
inner_out_mitmot.append(dC_dinps_t[ins_pos])
@@ -2887,7 +2887,7 @@ def compute_all_gradients(known_grads):
28872887
type_outs.append(vl.type.why_null)
28882888
# Replace the inner output with a zero tensor of
28892889
# the right shape
2890-
inner_out_sitsot[_p] = at.zeros(
2890+
inner_out_sitsot[_p] = pt.zeros(
28912891
diff_inputs[ins_pos + _p].shape, dtype=config.floatX
28922892
)
28932893
elif through_shared:
@@ -2906,7 +2906,7 @@ def compute_all_gradients(known_grads):
29062906
type_outs.append(vl.type.why_null)
29072907
# Replace the inner output with a zero tensor of
29082908
# the right shape
2909-
inner_out_nitsot[_p] = at.zeros(
2909+
inner_out_nitsot[_p] = pt.zeros(
29102910
diff_inputs[_p].shape, dtype=config.floatX
29112911
)
29122912

@@ -2924,19 +2924,19 @@ def compute_all_gradients(known_grads):
29242924
if isinstance(y.type, NullType):
29252925
# Cannot use dC_dXtm1s.dtype, so we use floatX instead.
29262926
outer_inp_sitsot.append(
2927-
at.zeros(
2927+
pt.zeros(
29282928
[grad_steps + 1] + [x.shape[i] for i in range(x.ndim)],
29292929
dtype=config.floatX,
29302930
)
29312931
)
29322932
# replace y by a zero tensor of the right shape
2933-
inner_inp_sitsot[_idx] = at.zeros(
2933+
inner_inp_sitsot[_idx] = pt.zeros(
29342934
diff_inputs[ins_pos + _idx].shape, dtype=config.floatX
29352935
)
29362936

29372937
else:
29382938
outer_inp_sitsot.append(
2939-
at.zeros(
2939+
pt.zeros(
29402940
[grad_steps + 1] + [x.shape[i] for i in range(x.ndim)],
29412941
dtype=y.dtype,
29422942
)
@@ -3008,8 +3008,8 @@ def compute_all_gradients(known_grads):
30083008
shp = (n_zeros,)
30093009
if x.ndim > 1:
30103010
shp = shp + tuple(x.shape[i] for i in range(1, x.ndim))
3011-
z = at.zeros(shp, dtype=x.dtype)
3012-
x = at.concatenate([x[::-1], z], axis=0)
3011+
z = pt.zeros(shp, dtype=x.dtype)
3012+
x = pt.concatenate([x[::-1], z], axis=0)
30133013
gradients.append(x)
30143014
else:
30153015
gradients.append(x[::-1])
@@ -3036,8 +3036,8 @@ def compute_all_gradients(known_grads):
30363036
shp = (n_zeros,)
30373037
if x.ndim > 1:
30383038
shp = shp + tuple(x.shape[i] for i in range(1, x.ndim))
3039-
z = at.zeros(shp, dtype=x.dtype)
3040-
x = at.concatenate([x[::-1], z], axis=0)
3039+
z = pt.zeros(shp, dtype=x.dtype)
3040+
x = pt.concatenate([x[::-1], z], axis=0)
30413041
gradients.append(x)
30423042
else:
30433043
gradients.append(x[::-1])

0 commit comments

Comments
 (0)