Skip to content

Commit eb58b8e

Browse files
committed
Run ruff-format
1 parent 443fcc5 commit eb58b8e

Some content is hidden

Large Commits have some content hidden by default. Use the searchbox below for content that may be hidden.

44 files changed

+133
-150
lines changed

pytensor/bin/pytensor_cache.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -24,7 +24,7 @@
2424

2525
def print_help(exit_status):
2626
if exit_status:
27-
print(f"command \"{' '.join(sys.argv)}\" not recognized")
27+
print(f'command "{" ".join(sys.argv)}" not recognized')
2828
print('Type "pytensor-cache" to print the cache location')
2929
print('Type "pytensor-cache help" to print this help')
3030
print('Type "pytensor-cache clear" to erase the cache')

pytensor/compile/function/pfunc.py

Lines changed: 1 addition & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -328,8 +328,7 @@ def clone_inputs(i):
328328
cloned_outputs = [] # TODO: get Function.__call__ to return None
329329
else:
330330
raise TypeError(
331-
"output must be an PyTensor Variable or Out "
332-
"instance (or list of them)",
331+
"output must be an PyTensor Variable or Out instance (or list of them)",
333332
outputs,
334333
)
335334

pytensor/compile/monitormode.py

Lines changed: 1 addition & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -45,8 +45,7 @@ def __init__(
4545
optimizer = config.optimizer
4646
if linker is not None and not isinstance(linker.mode, MonitorMode):
4747
raise Exception(
48-
"MonitorMode can only use its own linker! You "
49-
"should not provide one.",
48+
"MonitorMode can only use its own linker! You should not provide one.",
5049
linker,
5150
)
5251

pytensor/compile/profiling.py

Lines changed: 5 additions & 12 deletions
Original file line numberDiff line numberDiff line change
@@ -935,18 +935,14 @@ def count_running_memory(order, fgraph, nodes_mem, ignore_dmap=False):
935935
if dmap and idx2 in dmap:
936936
vidx = dmap[idx2]
937937
assert len(vidx) == 1, (
938-
"Here we only support the "
939-
"possibility to destroy one "
940-
"input"
938+
"Here we only support the possibility to destroy one input"
941939
)
942940
ins = node.inputs[vidx[0]]
943941
if vmap and idx2 in vmap:
944942
assert ins is None
945943
vidx = vmap[idx2]
946944
assert len(vidx) == 1, (
947-
"Here we only support the "
948-
"possibility to view one "
949-
"input"
945+
"Here we only support the possibility to view one input"
950946
)
951947
ins = node.inputs[vidx[0]]
952948
if ins is not None:
@@ -1093,9 +1089,7 @@ def min_memory_generator(executable_nodes, viewed_by, view_of):
10931089
assert ins is None
10941090
vidx = vmap[idx]
10951091
assert len(vidx) == 1, (
1096-
"Here we only support "
1097-
"the possibility to "
1098-
"view one input"
1092+
"Here we only support the possibility to view one input"
10991093
)
11001094
ins = node.inputs[vidx[0]]
11011095
if ins is not None:
@@ -1405,7 +1399,7 @@ def print_stats(stats1, stats2):
14051399
print(
14061400
(
14071401
f" ... (remaining {max(0, len(node_mem) - N)} Apply account for "
1408-
f"{sum_remaining:4d}B/{size_sum_dense :d}B ({p}) of the"
1402+
f"{sum_remaining:4d}B/{size_sum_dense:d}B ({p}) of the"
14091403
" Apply with dense outputs sizes)"
14101404
),
14111405
file=file,
@@ -1545,8 +1539,7 @@ def amdlibm_speed_up(op):
15451539
return True
15461540
elif s_op.__class__ not in scalar_op_amdlibm_no_speed_up:
15471541
print(
1548-
"We don't know if amdlibm will accelerate "
1549-
"this scalar op.",
1542+
"We don't know if amdlibm will accelerate this scalar op.",
15501543
s_op,
15511544
file=file,
15521545
)

pytensor/configdefaults.py

Lines changed: 1 addition & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -67,8 +67,7 @@ def _warn_cxx(val):
6767
"""We only support clang++ as otherwise we hit strange g++/OSX bugs."""
6868
if sys.platform == "darwin" and val and "clang++" not in val:
6969
_logger.warning(
70-
"Only clang++ is supported. With g++,"
71-
" we end up with strange g++/OSX bugs."
70+
"Only clang++ is supported. With g++, we end up with strange g++/OSX bugs."
7271
)
7372
return True
7473

pytensor/gradient.py

Lines changed: 4 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -624,8 +624,7 @@ def grad(
624624

625625
if cost is not None and isinstance(cost.type, NullType):
626626
raise ValueError(
627-
"Can't differentiate a NaN cost. "
628-
f"Cost is NaN because {cost.type.why_null}"
627+
f"Can't differentiate a NaN cost. Cost is NaN because {cost.type.why_null}"
629628
)
630629

631630
if cost is not None and cost.type.ndim != 0:
@@ -2199,9 +2198,9 @@ def hessian(cost, wrt, consider_constant=None, disconnected_inputs="raise"):
21992198
sequences=pytensor.tensor.arange(expr.shape[0]),
22002199
non_sequences=[expr, input],
22012200
)
2202-
assert (
2203-
not updates
2204-
), "Scan has returned a list of updates; this should not happen."
2201+
assert not updates, (
2202+
"Scan has returned a list of updates; this should not happen."
2203+
)
22052204
hessians.append(hess)
22062205
return as_list_or_tuple(using_list, using_tuple, hessians)
22072206

pytensor/graph/basic.py

Lines changed: 8 additions & 10 deletions
Original file line numberDiff line numberDiff line change
@@ -427,19 +427,17 @@ class Variable(Node, Generic[_TypeType, OptionalApplyType]):
427427
428428
c = a + b # create a simple expression
429429
430-
f = pytensor.function(
431-
[b], [c]
432-
) # this works because a has a value associated with it already
430+
# this works because a has a value associated with it already
431+
f = pytensor.function([b], [c])
433432
434-
assert 4.0 == f(2.5) # bind 2.5 to an internal copy of b and evaluate an internal c
433+
# bind 2.5 to an internal copy of b and evaluate an internal c
434+
assert 4.0 == f(2.5)
435435
436-
pytensor.function(
437-
[a], [c]
438-
) # compilation error because b (required by c) is undefined
436+
# compilation error because b (required by c) is undefined
437+
pytensor.function([a], [c])
439438
440-
pytensor.function(
441-
[a, b], [c]
442-
) # compilation error because a is constant, it can't be an input
439+
# compilation error because a is constant, it can't be an input
440+
pytensor.function([a, b], [c])
443441
444442
445443
The python variables ``a, b, c`` all refer to instances of type

pytensor/graph/features.py

Lines changed: 1 addition & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -391,8 +391,7 @@ def __init__(self):
391391
def on_attach(self, fgraph):
392392
if hasattr(fgraph, "checkpoint") or hasattr(fgraph, "revert"):
393393
raise AlreadyThere(
394-
"History feature is already present or in"
395-
" conflict with another plugin."
394+
"History feature is already present or in conflict with another plugin."
396395
)
397396
self.history[fgraph] = []
398397
# Don't call unpickle here, as ReplaceValidate.on_attach()

pytensor/graph/rewriting/basic.py

Lines changed: 1 addition & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -2975,10 +2975,7 @@ def check_stack_trace(f_or_fgraph, ops_to_check="last", bug_print="raise"):
29752975
raise ValueError("ops_to_check does not have the right type")
29762976

29772977
if not apply_nodes_to_check:
2978-
msg = (
2979-
"Provided op instances/classes are not in the graph or the "
2980-
"graph is empty"
2981-
)
2978+
msg = "Provided op instances/classes are not in the graph or the graph is empty"
29822979
if bug_print == "warn":
29832980
warnings.warn(msg)
29842981
elif bug_print == "raise":

pytensor/graph/rewriting/db.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -464,7 +464,7 @@ def query(self, *tags, position_cutoff: int | float | None = None, **kwtags):
464464
return ret
465465

466466
def print_summary(self, stream=sys.stdout):
467-
print(f"{self.__class__.__name__ } (id {id(self)})", file=stream)
467+
print(f"{self.__class__.__name__} (id {id(self)})", file=stream)
468468
positions = list(self.__position__.items())
469469

470470
def c(a, b):

0 commit comments

Comments
 (0)