Skip to content

Commit ffb72e4

Browse files
Armavicatwiecki
authored andcommitted
Enable ruff to format code in docstrings
1 parent 86c8a00 commit ffb72e4

File tree

19 files changed

+213
-144
lines changed

19 files changed

+213
-144
lines changed

pyproject.toml

Lines changed: 3 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -124,6 +124,9 @@ testpaths = "tests/"
124124
line-length = 88
125125
exclude = ["doc/", "pytensor/_version.py"]
126126

127+
[tool.ruff.format]
128+
docstring-code-format = true
129+
127130
[tool.ruff.lint]
128131
select = ["C", "E", "F", "I", "UP", "W", "RUF", "PERF", "PTH", "ISC"]
129132
ignore = ["C408", "C901", "E501", "E741", "RUF012", "PERF203", "ISC001"]

pytensor/compile/builders.py

Lines changed: 11 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -190,7 +190,8 @@ class OpFromGraph(Op, HasInnerGraph):
190190
191191
from pytensor import function, tensor as pt
192192
from pytensor.compile.builders import OpFromGraph
193-
x, y, z = pt.scalars('xyz')
193+
194+
x, y, z = pt.scalars("xyz")
194195
e = x + y * z
195196
op = OpFromGraph([x, y, z], [e])
196197
# op behaves like a normal pytensor op
@@ -206,7 +207,7 @@ class OpFromGraph(Op, HasInnerGraph):
206207
from pytensor import config, function, tensor as pt
207208
from pytensor.compile.builders import OpFromGraph
208209
209-
x, y, z = pt.scalars('xyz')
210+
x, y, z = pt.scalars("xyz")
210211
s = pytensor.shared(np.random.random((2, 2)).astype(config.floatX))
211212
e = x + y * z + s
212213
op = OpFromGraph([x, y, z], [e])
@@ -221,12 +222,16 @@ class OpFromGraph(Op, HasInnerGraph):
221222
from pytensor import function, tensor as pt, grad
222223
from pytensor.compile.builders import OpFromGraph
223224
224-
x, y, z = pt.scalars('xyz')
225+
x, y, z = pt.scalars("xyz")
225226
e = x + y * z
227+
228+
226229
def rescale_dy(inps, outputs, out_grads):
227230
x, y, z = inps
228-
g, = out_grads
229-
return z*2
231+
(g,) = out_grads
232+
return z * 2
233+
234+
230235
op = OpFromGraph(
231236
[x, y, z],
232237
[e],
@@ -236,7 +241,7 @@ def rescale_dy(inps, outputs, out_grads):
236241
dx, dy, dz = grad(e2, [x, y, z])
237242
fn = function([x, y, z], [dx, dy, dz])
238243
# the gradient wrt y is now doubled
239-
fn(2., 3., 4.) # [1., 8., 3.]
244+
fn(2.0, 3.0, 4.0) # [1., 8., 3.]
240245
241246
"""
242247

pytensor/gradient.py

Lines changed: 16 additions & 15 deletions
Original file line numberDiff line numberDiff line change
@@ -692,25 +692,24 @@ def subgraph_grad(wrt, end, start=None, cost=None, details=False):
692692
693693
.. code-block:: python
694694
695-
x, t = pytensor.tensor.fvector('x'), pytensor.tensor.fvector('t')
696-
w1 = pytensor.shared(np.random.standard_normal((3,4)))
697-
w2 = pytensor.shared(np.random.standard_normal((4,2)))
698-
a1 = pytensor.tensor.tanh(pytensor.tensor.dot(x,w1))
699-
a2 = pytensor.tensor.tanh(pytensor.tensor.dot(a1,w2))
695+
x, t = pytensor.tensor.fvector("x"), pytensor.tensor.fvector("t")
696+
w1 = pytensor.shared(np.random.standard_normal((3, 4)))
697+
w2 = pytensor.shared(np.random.standard_normal((4, 2)))
698+
a1 = pytensor.tensor.tanh(pytensor.tensor.dot(x, w1))
699+
a2 = pytensor.tensor.tanh(pytensor.tensor.dot(a1, w2))
700700
cost2 = pytensor.tensor.sqr(a2 - t).sum()
701701
cost2 += pytensor.tensor.sqr(w2.sum())
702702
cost1 = pytensor.tensor.sqr(w1.sum())
703703
704-
params = [[w2],[w1]]
705-
costs = [cost2,cost1]
704+
params = [[w2], [w1]]
705+
costs = [cost2, cost1]
706706
grad_ends = [[a1], [x]]
707707
708708
next_grad = None
709709
param_grads = []
710710
for i in range(2):
711711
param_grad, next_grad = pytensor.subgraph_grad(
712-
wrt=params[i], end=grad_ends[i],
713-
start=next_grad, cost=costs[i]
712+
wrt=params[i], end=grad_ends[i], start=next_grad, cost=costs[i]
714713
)
715714
next_grad = dict(zip(grad_ends[i], next_grad))
716715
param_grads.extend(param_grad)
@@ -1704,9 +1703,11 @@ def verify_grad(
17041703
17051704
Examples
17061705
--------
1707-
>>> verify_grad(pytensor.tensor.tanh,
1708-
... (np.asarray([[2, 3, 4], [-1, 3.3, 9.9]]),),
1709-
... rng=np.random.default_rng(23098))
1706+
>>> verify_grad(
1707+
... pytensor.tensor.tanh,
1708+
... (np.asarray([[2, 3, 4], [-1, 3.3, 9.9]]),),
1709+
... rng=np.random.default_rng(23098),
1710+
... )
17101711
17111712
Parameters
17121713
----------
@@ -2342,9 +2343,9 @@ def grad_clip(x, lower_bound, upper_bound):
23422343
Examples
23432344
--------
23442345
>>> x = pytensor.tensor.type.scalar()
2345-
>>> z = pytensor.gradient.grad(grad_clip(x, -1, 1)**2, x)
2346+
>>> z = pytensor.gradient.grad(grad_clip(x, -1, 1) ** 2, x)
23462347
>>> z2 = pytensor.gradient.grad(x**2, x)
2347-
>>> f = pytensor.function([x], outputs = [z, z2])
2348+
>>> f = pytensor.function([x], outputs=[z, z2])
23482349
>>> print(f(2.0))
23492350
[array(1.), array(4.)]
23502351
@@ -2383,7 +2384,7 @@ def grad_scale(x, multiplier):
23832384
>>> fprime = pytensor.function([x], fp)
23842385
>>> print(fprime(2)) # doctest: +ELLIPSIS
23852386
-0.416...
2386-
>>> f_inverse=grad_scale(fx, -1.)
2387+
>>> f_inverse = grad_scale(fx, -1.0)
23872388
>>> fpp = pytensor.grad(f_inverse, wrt=x)
23882389
>>> fpprime = pytensor.function([x], fpp)
23892390
>>> print(fpprime(2)) # doctest: +ELLIPSIS

pytensor/graph/basic.py

Lines changed: 19 additions & 13 deletions
Original file line numberDiff line numberDiff line change
@@ -399,18 +399,24 @@ class Variable(Node, Generic[_TypeType, OptionalApplyType]):
399399
import pytensor
400400
import pytensor.tensor as pt
401401
402-
a = pt.constant(1.5) # declare a symbolic constant
403-
b = pt.fscalar() # declare a symbolic floating-point scalar
402+
a = pt.constant(1.5) # declare a symbolic constant
403+
b = pt.fscalar() # declare a symbolic floating-point scalar
404404
405-
c = a + b # create a simple expression
405+
c = a + b # create a simple expression
406406
407-
f = pytensor.function([b], [c]) # this works because a has a value associated with it already
407+
f = pytensor.function(
408+
[b], [c]
409+
) # this works because a has a value associated with it already
408410
409-
assert 4.0 == f(2.5) # bind 2.5 to an internal copy of b and evaluate an internal c
411+
assert 4.0 == f(2.5) # bind 2.5 to an internal copy of b and evaluate an internal c
410412
411-
pytensor.function([a], [c]) # compilation error because b (required by c) is undefined
413+
pytensor.function(
414+
[a], [c]
415+
) # compilation error because b (required by c) is undefined
412416
413-
pytensor.function([a,b], [c]) # compilation error because a is constant, it can't be an input
417+
pytensor.function(
418+
[a, b], [c]
419+
) # compilation error because a is constant, it can't be an input
414420
415421
416422
The python variables ``a, b, c`` all refer to instances of type
@@ -587,10 +593,10 @@ def eval(
587593
588594
>>> import numpy as np
589595
>>> import pytensor.tensor as pt
590-
>>> x = pt.dscalar('x')
591-
>>> y = pt.dscalar('y')
596+
>>> x = pt.dscalar("x")
597+
>>> y = pt.dscalar("y")
592598
>>> z = x + y
593-
>>> np.allclose(z.eval({x : 16.3, y : 12.1}), 28.4)
599+
>>> np.allclose(z.eval({x: 16.3, y: 12.1}), 28.4)
594600
True
595601
596602
We passed :meth:`eval` a dictionary mapping symbolic PyTensor
@@ -963,9 +969,9 @@ def explicit_graph_inputs(
963969
import pytensor.tensor as pt
964970
from pytensor.graph.basic import explicit_graph_inputs
965971
966-
x = pt.vector('x')
972+
x = pt.vector("x")
967973
y = pt.constant(2)
968-
z = pt.mul(x*y)
974+
z = pt.mul(x * y)
969975
970976
inputs = list(explicit_graph_inputs(z))
971977
f = pytensor.function(inputs, z)
@@ -1041,7 +1047,7 @@ def orphans_between(
10411047
>>> from pytensor.graph.basic import orphans_between
10421048
>>> from pytensor.tensor import scalars
10431049
>>> x, y = scalars("xy")
1044-
>>> list(orphans_between([x], [(x+y)]))
1050+
>>> list(orphans_between([x], [(x + y)]))
10451051
[y]
10461052
10471053
"""

pytensor/link/c/interface.py

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -30,7 +30,7 @@ def c_headers(self, **kwargs) -> list[str]:
3030
.. code-block:: python
3131
3232
def c_headers(self, **kwargs):
33-
return ['<iostream>', '<math.h>', '/full/path/to/header.h']
33+
return ["<iostream>", "<math.h>", "/full/path/to/header.h"]
3434
3535
3636
"""
@@ -54,7 +54,7 @@ def c_header_dirs(self, **kwargs) -> list[str]:
5454
.. code-block:: python
5555
5656
def c_header_dirs(self, **kwargs):
57-
return ['/usr/local/include', '/opt/weirdpath/src/include']
57+
return ["/usr/local/include", "/opt/weirdpath/src/include"]
5858
5959
"""
6060
return []
@@ -134,7 +134,7 @@ def c_compile_args(self, **kwargs) -> list[str]:
134134
.. code-block:: python
135135
136136
def c_compile_args(self, **kwargs):
137-
return ['-ffast-math']
137+
return ["-ffast-math"]
138138
139139
"""
140140
return []

pytensor/link/c/params_type.py

Lines changed: 52 additions & 31 deletions
Original file line numberDiff line numberDiff line change
@@ -29,7 +29,9 @@
2929
3030
.. code-block:: python
3131
32-
params_type = ParamsType(attr1=TensorType('int32', shape=(None, None)), attr2=ScalarType('float64'))
32+
params_type = ParamsType(
33+
attr1=TensorType("int32", shape=(None, None)), attr2=ScalarType("float64")
34+
)
3335
3436
If your op contains attributes ``attr1`` **and** ``attr2``, the default ``op.get_params()``
3537
implementation will automatically try to look for it and generate an appropriate Params object.
@@ -77,38 +79,48 @@ def __init__(value_attr1, value_attr2):
7779
from pytensor.link.c.params_type import ParamsType
7880
from pytensor.link.c.type import EnumType, EnumList
7981
80-
wrapper = ParamsType(enum1=EnumList('CONSTANT_1', 'CONSTANT_2', 'CONSTANT_3'),
81-
enum2=EnumType(PI=3.14, EPSILON=0.001))
82+
wrapper = ParamsType(
83+
enum1=EnumList("CONSTANT_1", "CONSTANT_2", "CONSTANT_3"),
84+
enum2=EnumType(PI=3.14, EPSILON=0.001),
85+
)
8286
8387
# Each enum constant is available as a wrapper attribute:
84-
print(wrapper.CONSTANT_1, wrapper.CONSTANT_2, wrapper.CONSTANT_3,
85-
wrapper.PI, wrapper.EPSILON)
88+
print(
89+
wrapper.CONSTANT_1,
90+
wrapper.CONSTANT_2,
91+
wrapper.CONSTANT_3,
92+
wrapper.PI,
93+
wrapper.EPSILON,
94+
)
8695
8796
# For convenience, you can also look for a constant by name with
8897
# ``ParamsType.get_enum()`` method.
89-
pi = wrapper.get_enum('PI')
90-
epsilon = wrapper.get_enum('EPSILON')
91-
constant_2 = wrapper.get_enum('CONSTANT_2')
98+
pi = wrapper.get_enum("PI")
99+
epsilon = wrapper.get_enum("EPSILON")
100+
constant_2 = wrapper.get_enum("CONSTANT_2")
92101
print(pi, epsilon, constant_2)
93102
94103
This implies that a ParamsType cannot contain different enum types with common enum names::
95104
96105
# Following line will raise an error,
97106
# as there is a "CONSTANT_1" defined both in enum1 and enum2.
98-
wrapper = ParamsType(enum1=EnumList('CONSTANT_1', 'CONSTANT_2'),
99-
enum2=EnumType(CONSTANT_1=0, CONSTANT_3=5))
107+
wrapper = ParamsType(
108+
enum1=EnumList("CONSTANT_1", "CONSTANT_2"),
109+
enum2=EnumType(CONSTANT_1=0, CONSTANT_3=5),
110+
)
100111
101112
If your enum types contain constant aliases, you can retrieve them from ParamsType
102113
with ``ParamsType.enum_from_alias(alias)`` method (see :class:`pytensor.link.c.type.EnumType`
103114
for more info about enumeration aliases).
104115
105116
.. code-block:: python
106117
107-
wrapper = ParamsType(enum1=EnumList('A', ('B', 'beta'), 'C'),
108-
enum2=EnumList(('D', 'delta'), 'E', 'F'))
118+
wrapper = ParamsType(
119+
enum1=EnumList("A", ("B", "beta"), "C"), enum2=EnumList(("D", "delta"), "E", "F")
120+
)
109121
b1 = wrapper.B
110-
b2 = wrapper.get_enum('B')
111-
b3 = wrapper.enum_from_alias('beta')
122+
b2 = wrapper.get_enum("B")
123+
b3 = wrapper.enum_from_alias("beta")
112124
assert b1 == b2 == b3
113125
114126
"""
@@ -236,10 +248,13 @@ class Params(dict):
236248
237249
from pytensor.link.c.params_type import ParamsType, Params
238250
from pytensor.scalar import ScalarType
251+
239252
# You must create a ParamsType first:
240-
params_type = ParamsType(attr1=ScalarType('int32'),
241-
key2=ScalarType('float32'),
242-
field3=ScalarType('int64'))
253+
params_type = ParamsType(
254+
attr1=ScalarType("int32"),
255+
key2=ScalarType("float32"),
256+
field3=ScalarType("int64"),
257+
)
243258
# Then you can create a Params object with
244259
# the params type defined above and values for attributes.
245260
params = Params(params_type, attr1=1, key2=2.0, field3=3)
@@ -491,11 +506,13 @@ def get_enum(self, key):
491506
from pytensor.link.c.type import EnumType, EnumList
492507
from pytensor.scalar import ScalarType
493508
494-
wrapper = ParamsType(scalar=ScalarType('int32'),
495-
letters=EnumType(A=1, B=2, C=3),
496-
digits=EnumList('ZERO', 'ONE', 'TWO'))
497-
print(wrapper.get_enum('C')) # 3
498-
print(wrapper.get_enum('TWO')) # 2
509+
wrapper = ParamsType(
510+
scalar=ScalarType("int32"),
511+
letters=EnumType(A=1, B=2, C=3),
512+
digits=EnumList("ZERO", "ONE", "TWO"),
513+
)
514+
print(wrapper.get_enum("C")) # 3
515+
print(wrapper.get_enum("TWO")) # 2
499516
500517
# You can also directly do:
501518
print(wrapper.C)
@@ -520,17 +537,19 @@ def enum_from_alias(self, alias):
520537
from pytensor.link.c.type import EnumType, EnumList
521538
from pytensor.scalar import ScalarType
522539
523-
wrapper = ParamsType(scalar=ScalarType('int32'),
524-
letters=EnumType(A=(1, 'alpha'), B=(2, 'beta'), C=3),
525-
digits=EnumList(('ZERO', 'nothing'), ('ONE', 'unit'), ('TWO', 'couple')))
526-
print(wrapper.get_enum('C')) # 3
527-
print(wrapper.get_enum('TWO')) # 2
528-
print(wrapper.enum_from_alias('alpha')) # 1
529-
print(wrapper.enum_from_alias('nothing')) # 0
540+
wrapper = ParamsType(
541+
scalar=ScalarType("int32"),
542+
letters=EnumType(A=(1, "alpha"), B=(2, "beta"), C=3),
543+
digits=EnumList(("ZERO", "nothing"), ("ONE", "unit"), ("TWO", "couple")),
544+
)
545+
print(wrapper.get_enum("C")) # 3
546+
print(wrapper.get_enum("TWO")) # 2
547+
print(wrapper.enum_from_alias("alpha")) # 1
548+
print(wrapper.enum_from_alias("nothing")) # 0
530549
531550
# For the following, alias 'C' is not defined, so the method looks for
532551
# a constant named 'C', and finds it.
533-
print(wrapper.enum_from_alias('C')) # 3
552+
print(wrapper.enum_from_alias("C")) # 3
534553
535554
.. note::
536555
@@ -567,12 +586,14 @@ def get_params(self, *objects, **kwargs) -> Params:
567586
from pytensor.tensor.type import dmatrix
568587
from pytensor.scalar import ScalarType
569588
589+
570590
class MyObject:
571591
def __init__(self):
572592
self.a = 10
573593
self.b = numpy.asarray([[1, 2, 3], [4, 5, 6]])
574594
575-
params_type = ParamsType(a=ScalarType('int32'), b=dmatrix, c=ScalarType('bool'))
595+
596+
params_type = ParamsType(a=ScalarType("int32"), b=dmatrix, c=ScalarType("bool"))
576597
577598
o = MyObject()
578599
value_for_c = False

0 commit comments

Comments
 (0)