Skip to content

Commit d3dd34e

Browse files
ArmavicaricardoV94
authored andcommitted
Manual simplification of RUF005 fixes
1 parent 8aeda39 commit d3dd34e

30 files changed

+75
-81
lines changed

pytensor/breakpoint.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -92,7 +92,7 @@ def make_node(self, condition, *monitored_vars):
9292
new_op.inp_types.append(monitored_vars[i].type)
9393

9494
# Build the Apply node
95-
inputs = [condition, *list(monitored_vars)]
95+
inputs = [condition, *monitored_vars]
9696
outputs = [inp.type() for inp in monitored_vars]
9797
return Apply(op=new_op, inputs=inputs, outputs=outputs)
9898

pytensor/graph/rewriting/basic.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1139,7 +1139,7 @@ def decorator(f):
11391139
if inplace:
11401140
dh_handler = dh.DestroyHandler
11411141
req = (
1142-
*tuple(requirements),
1142+
*requirements,
11431143
lambda fgraph: fgraph.attach_feature(dh_handler()),
11441144
)
11451145
rval = FromFunctionNodeRewriter(f, tracks, req)

pytensor/graph/rewriting/unify.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -283,7 +283,7 @@ def _convert(y):
283283
var_map[pattern] = v
284284
return v
285285
elif isinstance(y, tuple):
286-
return etuple(*tuple(_convert(e) for e in y))
286+
return etuple(*(_convert(e) for e in y))
287287
elif isinstance(y, (Number, np.ndarray)):
288288
from pytensor.tensor import as_tensor_variable
289289

pytensor/ifelse.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -397,7 +397,7 @@ def ifelse(
397397

398398
new_ifelse = IfElse(n_outs=len(then_branch), as_view=False, name=name)
399399

400-
ins = [condition, *list(then_branch), *list(else_branch)]
400+
ins = [condition, *then_branch, *else_branch]
401401
rval = new_ifelse(*ins, return_list=True)
402402

403403
if rval_type is None:

pytensor/link/c/type.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -508,8 +508,8 @@ def __hash__(self):
508508
(
509509
type(self),
510510
self.ctype,
511-
*tuple((k, self[k]) for k in sorted(self.keys())),
512-
*tuple((a, self.aliases[a]) for a in sorted(self.aliases.keys())),
511+
*((k, self[k]) for k in sorted(self.keys())),
512+
*((a, self.aliases[a]) for a in sorted(self.aliases.keys())),
513513
)
514514
)
515515

pytensor/link/numba/dispatch/elemwise.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -447,7 +447,7 @@ def jit_compile_reducer(
447447
def create_axis_apply_fn(fn, axis, ndim, dtype):
448448
axis = normalize_axis_index(axis, ndim)
449449

450-
reaxis_first = (*tuple(i for i in range(ndim) if i != axis), axis)
450+
reaxis_first = (*(i for i in range(ndim) if i != axis), axis)
451451

452452
@numba_basic.numba_njit(boundscheck=False)
453453
def axis_apply_fn(x):

pytensor/link/numba/dispatch/extra_ops.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -44,7 +44,7 @@ def numba_funcify_CumOp(op: CumOp, node: Apply, **kwargs):
4444
if axis < 0 or axis >= ndim:
4545
raise ValueError(f"Invalid axis {axis} for array with ndim {ndim}")
4646

47-
reaxis_first = (axis, *tuple(i for i in range(ndim) if i != axis))
47+
reaxis_first = (axis, *(i for i in range(ndim) if i != axis))
4848
reaxis_first_inv = tuple(np.argsort(reaxis_first))
4949

5050
if mode == "add":

pytensor/link/numba/dispatch/random.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -240,7 +240,7 @@ def create_numba_random_fn(
240240
np_global_env["numba_vectorize"] = numba_basic.numba_vectorize
241241

242242
unique_names = unique_name_generator(
243-
[np_random_fn_name, *list(np_global_env.keys()), "rng", "size", "dtype"],
243+
[np_random_fn_name, *np_global_env.keys(), "rng", "size", "dtype"],
244244
suffix_sep="_",
245245
)
246246

pytensor/link/numba/dispatch/scalar.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -115,7 +115,7 @@ def {scalar_op_fn_name}({input_names}):
115115
global_env.update(input_tmp_dtype_names)
116116

117117
unique_names = unique_name_generator(
118-
[scalar_op_fn_name, "scalar_func_numba", *list(global_env.keys())],
118+
[scalar_op_fn_name, "scalar_func_numba", *global_env.keys()],
119119
suffix_sep="_",
120120
)
121121

pytensor/scalar/basic.py

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -416,7 +416,7 @@ def dtype_specs(self):
416416
)
417417

418418
def upcast(self, *others):
419-
return upcast(*[x.dtype for x in [self, *list(others)]])
419+
return upcast(*[x.dtype for x in [self, *others]])
420420

421421
def make_variable(self, name=None):
422422
return ScalarVariable(self, None, name=name)
@@ -1501,7 +1501,7 @@ def c_code(self, node, name, inputs, outputs, sub):
15011501

15021502
def c_code_cache_version(self):
15031503
scalarop_version = super().c_code_cache_version()
1504-
return (*tuple(scalarop_version), 3)
1504+
return (*scalarop_version, 3)
15051505

15061506

15071507
isnan = IsNan()
@@ -1529,7 +1529,7 @@ def c_code(self, node, name, inputs, outputs, sub):
15291529

15301530
def c_code_cache_version(self):
15311531
scalarop_version = super().c_code_cache_version()
1532-
return (*tuple(scalarop_version), 3)
1532+
return (*scalarop_version, 3)
15331533

15341534

15351535
isinf = IsInf()

pytensor/scan/rewriting.py

Lines changed: 5 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -203,7 +203,7 @@ def remove_constants_and_unused_inputs_scan(fgraph, node):
203203
allow_gc=op.allow_gc,
204204
)
205205
nw_outs = nwScan(*nw_outer, return_list=True)
206-
return dict([("remove", [node]), *list(zip(node.outputs, nw_outs))])
206+
return dict([("remove", [node]), *zip(node.outputs, nw_outs)])
207207
else:
208208
return False
209209

@@ -1664,7 +1664,7 @@ def save_mem_new_scan(fgraph, node):
16641664
)
16651665
else:
16661666
fslice = sanitize(cnf_slice[0])
1667-
nw_slice = (fslice, *tuple(old_slices[1:]))
1667+
nw_slice = (fslice, *old_slices[1:])
16681668

16691669
nw_pos = inv_compress_map[idx]
16701670

@@ -1711,7 +1711,7 @@ def save_mem_new_scan(fgraph, node):
17111711
sanitize(stop),
17121712
sanitize(cnf_slice[0].step),
17131713
),
1714-
*tuple(old_slices[1:]),
1714+
*old_slices[1:],
17151715
)
17161716

17171717
else:
@@ -1726,7 +1726,7 @@ def save_mem_new_scan(fgraph, node):
17261726
cnf_slice[0] - nw_steps - init_l[pos] + store_steps[pos]
17271727
)
17281728

1729-
nw_slice = (sanitize(position), *tuple(old_slices[1:]))
1729+
nw_slice = (sanitize(position), *old_slices[1:])
17301730
subtens = Subtensor(nw_slice)
17311731
sl_ins = get_slice_elements(
17321732
nw_slice, lambda entry: isinstance(entry, Variable)
@@ -2275,7 +2275,7 @@ def map_out(outer_i, inner_o, outer_o, seen):
22752275
new_outer_out_mit_mot.append(outer_omm)
22762276
na.outer_out_mit_mot = new_outer_out_mit_mot
22772277
if remove:
2278-
return dict([("remove", remove), *list(zip(node.outputs, na.outer_outputs))])
2278+
return dict([("remove", remove), *zip(node.outputs, na.outer_outputs)])
22792279
return na.outer_outputs
22802280

22812281

pytensor/tensor/basic.py

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -2394,7 +2394,7 @@ def make_node(self, axis, *tensors):
23942394
"Only tensors with the same number of dimensions can be joined"
23952395
)
23962396

2397-
inputs = [as_tensor_variable(axis), *list(tensors)]
2397+
inputs = [as_tensor_variable(axis), *tensors]
23982398

23992399
if inputs[0].type.dtype not in int_dtypes:
24002400
raise TypeError(f"Axis value {inputs[0]} must be an integer type")
@@ -2854,7 +2854,7 @@ def flatten(x, ndim=1):
28542854
raise ValueError(f"ndim {ndim} out of bound [1, {_x.ndim + 1})")
28552855

28562856
if ndim > 1:
2857-
dims = (*tuple(_x.shape[: ndim - 1]), -1)
2857+
dims = (*_x.shape[: ndim - 1], -1)
28582858
else:
28592859
dims = (-1,)
28602860

@@ -4217,7 +4217,7 @@ def _make_along_axis_idx(arr_shape, indices, axis):
42174217
raise IndexError("`indices` must be an integer array")
42184218

42194219
shape_ones = (1,) * indices.ndim
4220-
dest_dims = [*list(range(axis)), None, *list(range(axis + 1, indices.ndim))]
4220+
dest_dims = [*range(axis), None, *range(axis + 1, indices.ndim)]
42214221

42224222
# build a fancy index, consisting of orthogonal aranges, with the
42234223
# requested index inserted at the right location

pytensor/tensor/conv/abstract_conv.py

Lines changed: 27 additions & 31 deletions
Original file line numberDiff line numberDiff line change
@@ -1883,23 +1883,23 @@ def frac_bilinear_upsampling(input, frac_ratio):
18831883
pad_kern = pt.concatenate(
18841884
(
18851885
pt.zeros(
1886-
(*tuple(kern.shape[:2]), pad[0], kern.shape[-1]),
1886+
(*kern.shape[:2], pad[0], kern.shape[-1]),
18871887
dtype=config.floatX,
18881888
),
18891889
kern,
18901890
pt.zeros(
1891-
(*tuple(kern.shape[:2]), double_pad[0] - pad[0], kern.shape[-1]),
1891+
(*kern.shape[:2], double_pad[0] - pad[0], kern.shape[-1]),
18921892
dtype=config.floatX,
18931893
),
18941894
),
18951895
axis=2,
18961896
)
18971897
pad_kern = pt.concatenate(
18981898
(
1899-
pt.zeros((*tuple(pad_kern.shape[:3]), pad[1]), dtype=config.floatX),
1899+
pt.zeros((*pad_kern.shape[:3], pad[1]), dtype=config.floatX),
19001900
pad_kern,
19011901
pt.zeros(
1902-
(*tuple(pad_kern.shape[:3]), double_pad[1] - pad[1]),
1902+
(*pad_kern.shape[:3], double_pad[1] - pad[1]),
19031903
dtype=config.floatX,
19041904
),
19051905
),
@@ -2520,7 +2520,7 @@ def perform(self, node, inp, out_):
25202520
(
25212521
img.shape[0],
25222522
img.shape[1],
2523-
*tuple(
2523+
*(
25242524
img.shape[i + 2] + pad[i][0] + pad[i][1]
25252525
for i in range(self.convdim)
25262526
),
@@ -2531,7 +2531,7 @@ def perform(self, node, inp, out_):
25312531
(
25322532
slice(None),
25332533
slice(None),
2534-
*tuple(
2534+
*(
25352535
slice(pad[i][0], img.shape[i + 2] + pad[i][0])
25362536
for i in range(self.convdim)
25372537
),
@@ -2584,8 +2584,8 @@ def perform(self, node, inp, out_):
25842584
axes_order = (
25852585
0,
25862586
1 + self.convdim,
2587-
*tuple(range(1, 1 + self.convdim)),
2588-
*tuple(range(2 + self.convdim, kern.ndim)),
2587+
*range(1, 1 + self.convdim),
2588+
*range(2 + self.convdim, kern.ndim),
25892589
)
25902590
kern = kern.transpose(axes_order)
25912591

@@ -2601,9 +2601,7 @@ def perform(self, node, inp, out_):
26012601
(
26022602
slice(None),
26032603
slice(None),
2604-
*tuple(
2605-
slice(None, None, self.subsample[i]) for i in range(self.convdim)
2606-
),
2604+
*(slice(None, None, self.subsample[i]) for i in range(self.convdim)),
26072605
)
26082606
]
26092607
o[0] = node.outputs[0].type.filter(conv_out)
@@ -2860,7 +2858,7 @@ def perform(self, node, inp, out_):
28602858
(
28612859
img.shape[0],
28622860
img.shape[1],
2863-
*tuple(
2861+
*(
28642862
img.shape[i + 2] + pad[i][0] + pad[i][1]
28652863
for i in range(self.convdim)
28662864
),
@@ -2871,7 +2869,7 @@ def perform(self, node, inp, out_):
28712869
(
28722870
slice(None),
28732871
slice(None),
2874-
*tuple(
2872+
*(
28752873
slice(pad[i][0], img.shape[i + 2] + pad[i][0])
28762874
for i in range(self.convdim)
28772875
),
@@ -2883,32 +2881,30 @@ def perform(self, node, inp, out_):
28832881
new_shape = (
28842882
topgrad.shape[0],
28852883
topgrad.shape[1],
2886-
*tuple(
2887-
img.shape[i + 2] - dil_shape[i] + 1 for i in range(self.convdim)
2888-
),
2884+
*(img.shape[i + 2] - dil_shape[i] + 1 for i in range(self.convdim)),
28892885
)
28902886
new_topgrad = np.zeros((new_shape), dtype=topgrad.dtype)
28912887
new_topgrad[
28922888
(
28932889
slice(None),
28942890
slice(None),
2895-
*tuple(
2891+
*(
28962892
slice(None, None, self.subsample[i])
28972893
for i in range(self.convdim)
28982894
),
28992895
)
29002896
] = topgrad
29012897
topgrad = new_topgrad
29022898

2903-
axes_order = (1, 0, *tuple(range(2, self.convdim + 2)))
2899+
axes_order = (1, 0, *range(2, self.convdim + 2))
29042900
topgrad = topgrad.transpose(axes_order)
29052901
img = img.transpose(axes_order)
29062902

29072903
def correct_for_groups(mat):
29082904
mshp0 = mat.shape[0] // self.num_groups
29092905
mshp1 = mat.shape[1] * self.num_groups
29102906
mat = mat.reshape((self.num_groups, mshp0) + mat.shape[1:])
2911-
mat = mat.transpose((1, 0, 2, *tuple(range(3, 3 + self.convdim))))
2907+
mat = mat.transpose((1, 0, 2, *range(3, 3 + self.convdim)))
29122908
mat = mat.reshape((mshp0, mshp1) + mat.shape[-self.convdim :])
29132909
return mat
29142910

@@ -2941,17 +2937,17 @@ def correct_for_groups(mat):
29412937
# to (nFilters, out_rows, out_cols, nChannels, kH, kW)
29422938
kern_axes = (
29432939
1,
2944-
*tuple(range(2, self.convdim + 2)),
2940+
*range(2, self.convdim + 2),
29452941
0,
2946-
*tuple(range(self.convdim + 2, kern.ndim)),
2942+
*range(self.convdim + 2, kern.ndim),
29472943
)
29482944
else:
29492945
flip_topgrad = flip_kern = (slice(None), slice(None)) + (
29502946
slice(None, None, -1),
29512947
) * self.convdim
29522948
topgrad = topgrad[flip_topgrad]
29532949
kern = self.conv(img, topgrad, mode="valid", num_groups=self.num_groups)
2954-
kern_axes = (1, 0, *tuple(range(2, self.convdim + 2)))
2950+
kern_axes = (1, 0, *range(2, self.convdim + 2))
29552951

29562952
kern = kern.transpose(kern_axes)
29572953

@@ -3249,7 +3245,7 @@ def perform(self, node, inp, out_):
32493245
new_shape = (
32503246
topgrad.shape[0],
32513247
topgrad.shape[1],
3252-
*tuple(
3248+
*(
32533249
shape[i] + pad[i][0] + pad[i][1] - dil_kernshp[i] + 1
32543250
for i in range(self.convdim)
32553251
),
@@ -3259,7 +3255,7 @@ def perform(self, node, inp, out_):
32593255
(
32603256
slice(None),
32613257
slice(None),
3262-
*tuple(
3258+
*(
32633259
slice(None, None, self.subsample[i])
32643260
for i in range(self.convdim)
32653261
),
@@ -3291,9 +3287,9 @@ def correct_for_groups(mat):
32913287
# for 2D -> (1, 2, 3, 0, 4, 5, 6)
32923288
mat = mat.transpose(
32933289
(
3294-
*tuple(range(1, 2 + self.convdim)),
3290+
*range(1, 2 + self.convdim),
32953291
0,
3296-
*tuple(range(2 + self.convdim, mat.ndim)),
3292+
*range(2 + self.convdim, mat.ndim),
32973293
)
32983294
)
32993295
mat = mat.reshape(
@@ -3303,7 +3299,7 @@ def correct_for_groups(mat):
33033299
+ mat.shape[-self.convdim :]
33043300
)
33053301
else:
3306-
mat = mat.transpose((1, 0, 2, *tuple(range(3, 3 + self.convdim))))
3302+
mat = mat.transpose((1, 0, 2, *range(3, 3 + self.convdim)))
33073303
mat = mat.reshape((mshp0, mshp1) + mat.shape[-self.convdim :])
33083304
return mat
33093305

@@ -3315,8 +3311,8 @@ def correct_for_groups(mat):
33153311
axes_order = (
33163312
1 + self.convdim,
33173313
0,
3318-
*tuple(range(1, 1 + self.convdim)),
3319-
*tuple(range(2 + self.convdim, kern.ndim)),
3314+
*range(1, 1 + self.convdim),
3315+
*range(2 + self.convdim, kern.ndim),
33203316
)
33213317
kern = kern.transpose(axes_order)
33223318
if not self.filter_flip:
@@ -3334,7 +3330,7 @@ def correct_for_groups(mat):
33343330
direction="backprop inputs",
33353331
)
33363332
else:
3337-
axes_order = (1, 0, *tuple(range(2, 2 + self.convdim)))
3333+
axes_order = (1, 0, *range(2, 2 + self.convdim))
33383334
kern = kern.transpose(axes_order)
33393335
flip_filters = (slice(None), slice(None)) + (
33403336
slice(None, None, -1),
@@ -3356,7 +3352,7 @@ def correct_for_groups(mat):
33563352
(
33573353
slice(None),
33583354
slice(None),
3359-
*tuple(
3355+
*(
33603356
slice(pad[i][0], img.shape[i + 2] - pad[i][1])
33613357
for i in range(self.convdim)
33623358
),

0 commit comments

Comments
 (0)