Skip to content

Commit 2fdd67a

Browse files
committed
Apply rule RUF059
1 parent f37ee84 commit 2fdd67a

Some content is hidden

Large Commits have some content hidden by default. Use the searchbox below for content that may be hidden.

70 files changed

+277
-273
lines changed

pytensor/compile/builders.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -122,7 +122,7 @@ def construct_nominal_fgraph(
122122
(
123123
local_inputs,
124124
local_outputs,
125-
(clone_d, update_d, update_expr, new_shared_inputs),
125+
(_clone_d, update_d, update_expr, new_shared_inputs),
126126
) = new
127127

128128
assert len(local_inputs) == len(inputs) + len(implicit_shared_inputs)

pytensor/compile/function/pfunc.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -591,7 +591,7 @@ def construct_pfunc_ins_and_outs(
591591
clone_inner_graphs=True,
592592
)
593593
input_variables, cloned_extended_outputs, other_stuff = output_vars
594-
clone_d, update_d, update_expr, shared_inputs = other_stuff
594+
clone_d, update_d, _update_expr, shared_inputs = other_stuff
595595

596596
# Recover only the clones of the original outputs
597597
if outputs is None:

pytensor/compile/mode.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -407,7 +407,7 @@ def register(self, *optimizations):
407407
optimizations.
408408
"""
409409

410-
link, opt = self.get_linker_optimizer(
410+
_link, opt = self.get_linker_optimizer(
411411
self.provided_linker, self.provided_optimizer
412412
)
413413
return self.clone(optimizer=opt.register(*optimizations))

pytensor/compile/profiling.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1323,7 +1323,7 @@ def print_stats(stats1, stats2):
13231323
)
13241324
print_stats(stats[1], stats[3])
13251325

1326-
(max_node_memory_size, _, _, _) = stats[0]
1326+
(_max_node_memory_size, _, _, _) = stats[0]
13271327
(new_max_node_memory_size, _, _, _) = stats[2]
13281328
print(
13291329
" Max peak memory if allow_gc=False (linker don't make a difference)",

pytensor/graph/fg.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -559,7 +559,7 @@ def remove_node(self, node: Apply, reason: str | None = None):
559559

560560
out_clients = clients.get(out, ())
561561
while out_clients:
562-
out_client, out_idx = out_clients.pop()
562+
out_client, _out_idx = out_clients.pop()
563563

564564
if isinstance(out_client.op, Output):
565565
self.remove_output(out_client.op.idx, remove_client=False)

pytensor/graph/rewriting/basic.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -2827,7 +2827,7 @@ def local_recursive_function(
28272827
else:
28282828
out_index = 0
28292829

2830-
final_outs, rewritten_nodes = local_recursive_function(rewrites, out, {}, 0)
2830+
final_outs, _rewritten_nodes = local_recursive_function(rewrites, out, {}, 0)
28312831
return final_outs[out_index]
28322832

28332833

pytensor/link/basic.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -509,7 +509,7 @@ def make_thunk(self, **kwargs):
509509
kwargs.pop("input_storage", None)
510510
make_all += [x.make_all(**kwargs) for x in self.linkers[1:]]
511511

512-
fns, input_lists, output_lists, thunk_lists, order_lists = zip(
512+
_fns, input_lists, output_lists, thunk_lists, order_lists = zip(
513513
*make_all, strict=True
514514
)
515515

pytensor/link/c/basic.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -1738,7 +1738,7 @@ def find_task(self, failure_code):
17381738
def __call__(self):
17391739
failure = self.run_cthunk(self.cthunk)
17401740
if failure:
1741-
task, taskname, id = self.find_task(failure)
1741+
task, _taskname, _id = self.find_task(failure)
17421742
try:
17431743
trace = task.trace
17441744
except AttributeError:
@@ -1980,7 +1980,7 @@ def make_thunk(self, **kwargs):
19801980
.make_all(**kwargs)
19811981
)
19821982
kwargs.pop("input_storage", None)
1983-
_f, i2, o2, thunks2, order2 = (
1983+
_f, i2, _o2, thunks2, order2 = (
19841984
OpWiseCLinker(schedule=self.schedule)
19851985
.accept(fgraph, no_recycling=no_recycling)
19861986
.make_all(**kwargs)

pytensor/link/c/cmodule.py

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -1496,7 +1496,7 @@ def clear_unversioned(self, min_age=None):
14961496
# May happen for broken versioned keys.
14971497
continue
14981498
for key_idx, key in enumerate(key_data.keys):
1499-
version, rest = key
1499+
version, _rest = key
15001500
if version:
15011501
# Since the version is included in the module hash,
15021502
# it should not be possible to mix versioned and
@@ -2243,7 +2243,7 @@ def join_options(init_part):
22432243
if "target-cpu" in p:
22442244
opt = p.split()
22452245
if len(opt) == 2:
2246-
opt_name, opt_val = opt
2246+
_opt_name, opt_val = opt
22472247
new_flags[i] = f"-march={opt_val}"
22482248

22492249
# Some versions of GCC report the native arch
@@ -2474,7 +2474,7 @@ def linking_patch(lib_dirs: list[str], libs: list[str]) -> list[str]:
24742474
else:
24752475
# In explicit else because of https://github.com/python/mypy/issues/10773
24762476
def sort_key(lib):
2477-
name, *numbers, extension = lib.split(".")
2477+
_name, *numbers, extension = lib.split(".")
24782478
return (extension == "dll", tuple(map(int, numbers)))
24792479

24802480
patched_lib_ldflags = []

pytensor/link/c/op.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -84,7 +84,7 @@ def is_f16(t):
8484
outputs = cl.make_thunk(
8585
input_storage=node_input_storage, output_storage=node_output_storage
8686
)
87-
thunk, node_input_filters, node_output_filters = outputs
87+
thunk, _node_input_filters, _node_output_filters = outputs
8888

8989
if compute_map is None:
9090
rval = is_cthunk_wrapper_type(thunk)

0 commit comments

Comments
 (0)