Skip to content

Commit d1aaa1d

Browse files
committed
Decompose Tridiagonal Solve into core steps
1 parent 249a69a commit d1aaa1d

File tree

7 files changed

+368
-26
lines changed

7 files changed

+368
-26
lines changed

pytensor/link/numba/dispatch/linalg/solve/tridiagonal.py

Lines changed: 50 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -6,6 +6,7 @@
66
from numpy import ndarray
77
from scipy import linalg
88

9+
from pytensor.link.numba.dispatch import numba_funcify
910
from pytensor.link.numba.dispatch.basic import numba_njit
1011
from pytensor.link.numba.dispatch.linalg._LAPACK import (
1112
_LAPACK,
@@ -20,6 +21,10 @@
2021
_solve_check,
2122
_trans_char_to_int,
2223
)
24+
from pytensor.tensor._linalg.solve.tridiagonal import (
25+
LUFactorTridiagonal,
26+
SolveLUFactorTridiagonal,
27+
)
2328

2429

2530
@numba_njit
@@ -297,3 +302,48 @@ def impl(
297302
return X
298303

299304
return impl
305+
306+
307+
@numba_funcify.register(LUFactorTridiagonal)
308+
def numba_funcify_LUFactorTridiagonal(op: LUFactorTridiagonal, node, **kwargs):
309+
overwrite_dl = op.overwrite_dl
310+
overwrite_d = op.overwrite_d
311+
overwrite_du = op.overwrite_du
312+
313+
@numba_njit(cache=False)
314+
def lu_factor_tridiagonal(dl, d, du):
315+
if not overwrite_dl:
316+
dl = dl.copy()
317+
if not overwrite_d:
318+
d = d.copy()
319+
if not overwrite_du:
320+
du = du.copy()
321+
322+
dl, d, du, du2, ipiv, _ = _gttrf(dl, d, du)
323+
return dl, d, du, du2, ipiv
324+
325+
return lu_factor_tridiagonal
326+
327+
328+
@numba_funcify.register(SolveLUFactorTridiagonal)
329+
def numba_funcify_SolveLUFactorTridiagonal(
330+
op: SolveLUFactorTridiagonal, node, **kwargs
331+
):
332+
overwrite_b = op.overwrite_b
333+
transposed = op.transposed
334+
335+
@numba_njit(cache=False)
336+
def solve_lu_factor_tridiagonal(dl, d, du, du2, ipiv, b):
337+
x, _ = _gttrs(
338+
dl,
339+
d,
340+
du,
341+
du2,
342+
ipiv,
343+
b,
344+
overwrite_b=overwrite_b,
345+
trans=transposed,
346+
)
347+
return x
348+
349+
return solve_lu_factor_tridiagonal

pytensor/tensor/_linalg/solve/rewriting.py

Lines changed: 9 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -4,6 +4,10 @@
44
from pytensor.graph.rewriting.basic import copy_stack_trace, in2out, node_rewriter
55
from pytensor.scan.op import Scan
66
from pytensor.scan.rewriting import scan_seqopt1
7+
from pytensor.tensor._linalg.solve.tridiagonal import (
8+
tridiagonal_lu_factor,
9+
tridiagonal_lu_solve,
10+
)
711
from pytensor.tensor.basic import atleast_Nd
812
from pytensor.tensor.blockwise import Blockwise
913
from pytensor.tensor.elemwise import DimShuffle
@@ -16,18 +20,22 @@
1620
def decompose_A(A, assume_a):
1721
if assume_a == "gen":
1822
return lu_factor(A, check_finite=False)
23+
elif assume_a == "tridiagonal":
24+
return tridiagonal_lu_factor(A)
1925
else:
2026
raise NotImplementedError
2127

2228

2329
def solve_lu_decomposed_system(A_decomp, b, b_ndim, assume_a, transposed=False):
2430
if assume_a == "gen":
2531
return lu_solve(A_decomp, b, b_ndim=b_ndim, trans=transposed)
32+
elif assume_a == "tridiagonal":
33+
return tridiagonal_lu_solve(A_decomp, b, b_ndim=b_ndim, transposed=transposed)
2634
else:
2735
raise NotImplementedError
2836

2937

30-
_SPLITTABLE_SOLVE_ASSUME_A = {"gen"}
38+
_SPLITTABLE_SOLVE_ASSUME_A = {"gen", "tridiagonal"}
3139

3240

3341
def _split_lu_solve_steps(fgraph, node, *, eager: bool):
Lines changed: 154 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,154 @@
1+
import numpy as np
2+
from scipy.linalg import get_lapack_funcs
3+
4+
from pytensor.graph import Apply, Op
5+
from pytensor.tensor.basic import as_tensor, diagonal
6+
from pytensor.tensor.blockwise import Blockwise
7+
from pytensor.tensor.type import tensor, vector
8+
9+
10+
class LUFactorTridiagonal(Op):
11+
"""Compute LU factorization of a tridiagonal matrix (lapack gttrf)"""
12+
13+
__props__ = (
14+
"overwrite_dl",
15+
"overwrite_d",
16+
"overwrite_du",
17+
)
18+
gufunc_signature = "(dl),(d),(dl)->(dl),(d),(dl),(du2),(d)"
19+
20+
def __init__(self, overwrite_dl=False, overwrite_d=False, overwrite_du=False):
21+
self.destroy_map = dm = {}
22+
if overwrite_dl:
23+
dm[0] = [0]
24+
if overwrite_d:
25+
dm[1] = [1]
26+
if overwrite_du:
27+
dm[2] = [2]
28+
self.overwrite_dl = overwrite_dl
29+
self.overwrite_d = overwrite_d
30+
self.overwrite_du = overwrite_du
31+
super().__init__()
32+
33+
def make_node(self, dl, d, du):
34+
dl, d, du = map(as_tensor, (dl, d, du))
35+
36+
if not all(inp.type.ndim == 1 for inp in (dl, d, du)):
37+
raise ValueError("Diagonals must be vectors")
38+
39+
ndl, nd, ndu = (inp.type.shape[-1] for inp in (dl, d, du))
40+
n = (
41+
ndl + 1
42+
if ndl is not None
43+
else (nd if nd is not None else (ndu + 1 if ndu is not None else None))
44+
)
45+
dummy_arrays = [np.zeros((), dtype=inp.type.dtype) for inp in (dl, d, du)]
46+
out_dtype = get_lapack_funcs("gttrf", dummy_arrays).dtype
47+
outputs = [
48+
vector(shape=(None if n is None else (n - 1),), dtype=out_dtype),
49+
vector(shape=(n,), dtype=out_dtype),
50+
vector(shape=(None if n is None else n - 1,), dtype=out_dtype),
51+
vector(shape=(None if n is None else n - 2,), dtype=out_dtype),
52+
vector(shape=(n,), dtype=np.int32),
53+
]
54+
return Apply(self, [dl, d, du], outputs)
55+
56+
def perform(self, node, inputs, output_storage):
57+
gttrf = get_lapack_funcs("gttrf", dtype=node.outputs[0].type.dtype)
58+
dl, d, du, du2, ipiv, _ = gttrf(
59+
*inputs,
60+
overwrite_dl=self.overwrite_dl,
61+
overwrite_d=self.overwrite_d,
62+
overwrite_du=self.overwrite_du,
63+
)
64+
output_storage[0][0] = dl
65+
output_storage[1][0] = d
66+
output_storage[2][0] = du
67+
output_storage[3][0] = du2
68+
output_storage[4][0] = ipiv
69+
70+
71+
class SolveLUFactorTridiagonal(Op):
72+
"""Solve a system of linear equations with a tridiagonal coefficient matrix (lapack gttrs)."""
73+
74+
__props__ = ("b_ndim", "overwrite_b", "transposed")
75+
76+
def __init__(self, b_ndim: int, transposed: bool, overwrite_b=False):
77+
if b_ndim not in (1, 2):
78+
raise ValueError("b_ndim must be 1 or 2")
79+
if b_ndim == 1:
80+
self.gufunc_signature = "(dl),(d),(dl),(du2),(d),(d)->(d)"
81+
else:
82+
self.gufunc_signature = "(dl),(d),(dl),(du2),(d),(d,rhs)->(d,rhs)"
83+
if overwrite_b:
84+
self.destroy_map = {0: [5]}
85+
self.b_ndim = b_ndim
86+
self.transposed = transposed
87+
self.overwrite_b = overwrite_b
88+
super().__init__()
89+
90+
def make_node(self, dl, d, du, du2, ipiv, b):
91+
dl, d, du, du2, ipiv, b = map(as_tensor, (dl, d, du, du2, ipiv, b))
92+
93+
if b.type.ndim != self.b_ndim:
94+
raise ValueError("Wrang number of dimensions for input b.")
95+
96+
if not all(inp.type.ndim == 1 for inp in (dl, d, du, du2, ipiv)):
97+
raise ValueError("Inputs must be vectors")
98+
99+
ndl, nd, ndu, ndu2, nipiv = (
100+
inp.type.shape[-1] for inp in (dl, d, du, du2, ipiv)
101+
)
102+
nb = b.type.shape[0]
103+
n = (
104+
ndl + 1
105+
if ndl is not None
106+
else (
107+
nd
108+
if nd is not None
109+
else (
110+
ndu + 1
111+
if ndu is not None
112+
else (
113+
ndu2 + 2
114+
if ndu2 is not None
115+
else (nipiv if nipiv is not None else nb)
116+
)
117+
)
118+
)
119+
)
120+
dummy_arrays = [
121+
np.zeros((), dtype=inp.type.dtype) for inp in (dl, d, du, du2, ipiv)
122+
]
123+
# Seems to always be float64?
124+
out_dtype = get_lapack_funcs("gttrs", dummy_arrays).dtype
125+
if self.b_ndim == 1:
126+
output_shape = (n,)
127+
else:
128+
output_shape = (n, b.type.shape[-1])
129+
130+
outputs = [tensor(shape=output_shape, dtype=out_dtype)]
131+
return Apply(self, [dl, d, du, du2, ipiv, b], outputs)
132+
133+
def perform(self, node, inputs, output_storage):
134+
gttrs = get_lapack_funcs("gttrs", dtype=node.outputs[0].type.dtype)
135+
x, _ = gttrs(
136+
*inputs,
137+
overwrite_b=self.overwrite_b,
138+
trans="N" if not self.transposed else "T",
139+
)
140+
output_storage[0][0] = x
141+
142+
143+
def tridiagonal_lu_factor(a):
144+
# Return the decomposition of A implied by a solve tridiagonal
145+
dl, d, du = (diagonal(a, offset=o, axis1=-2, axis2=-1) for o in (-1, 0, 1))
146+
dl, d, du, du2, ipiv = Blockwise(LUFactorTridiagonal())(dl, d, du)
147+
return dl, d, du, du2, ipiv
148+
149+
150+
def tridiagonal_lu_solve(a_diagonals, b, *, b_ndim: int, transposed: bool = False):
151+
dl, d, du, du2, ipiv = a_diagonals
152+
return Blockwise(SolveLUFactorTridiagonal(b_ndim=b_ndim, transposed=transposed))(
153+
dl, d, du, du2, ipiv, b
154+
)

pytensor/tensor/rewriting/subtensor.py

Lines changed: 81 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -19,12 +19,14 @@
1919
from pytensor.scalar import constant as scalar_constant
2020
from pytensor.tensor.basic import (
2121
Alloc,
22+
ExtractDiag,
2223
Join,
2324
ScalarFromTensor,
2425
TensorFromScalar,
2526
alloc,
2627
cast,
2728
concatenate,
29+
full,
2830
get_scalar_constant_value,
2931
get_underlying_scalar_constant_value,
3032
register_infer_shape,
@@ -1784,3 +1786,82 @@ def ravel_multidimensional_int_idx(fgraph, node):
17841786
"numba",
17851787
use_db_name_as_tag=False, # Not included if only "specialize" is requested
17861788
)
1789+
1790+
1791+
@register_canonicalize
1792+
@register_stabilize
1793+
@register_specialize
1794+
@node_rewriter([ExtractDiag])
1795+
def extract_diag_of_diagonal_set_subtensor(fgraph, node):
1796+
def is_contant_arange(var) -> bool:
1797+
if not (isinstance(var, TensorConstant) and var.type.ndim == 1):
1798+
return False
1799+
1800+
data = var.data
1801+
start, stop = data[0], data[-1] + 1
1802+
return data.size == (stop - start) and (data == np.arange(start, stop)).all()
1803+
1804+
[diag_x] = node.inputs
1805+
if not (
1806+
diag_x.owner is not None
1807+
and isinstance(diag_x.owner.op, AdvancedIncSubtensor)
1808+
and diag_x.owner.op.set_instead_of_inc
1809+
):
1810+
return None
1811+
1812+
x, y, *idxs = diag_x.owner.inputs
1813+
1814+
if not (
1815+
x.type.ndim >= 2
1816+
and None not in x.type.shape[-2:]
1817+
and x.type.shape[-2] == x.type.shape[-1]
1818+
):
1819+
# For now we only support rewrite with static square shape for x
1820+
return None
1821+
1822+
op = node.op
1823+
if op.axis2 > len(idxs):
1824+
return None
1825+
1826+
# Check all non-axis indices are full slices
1827+
axis = {op.axis1, op.axis2}
1828+
if not all(is_full_slice(idx) for i, idx in enumerate(idxs) if i not in axis):
1829+
return None
1830+
1831+
# Check axis indices are arange we would expect from setting on the diagonal
1832+
axis1_idx = idxs[op.axis1]
1833+
axis2_idx = idxs[op.axis2]
1834+
if not (is_contant_arange(axis1_idx) and is_contant_arange(axis2_idx)):
1835+
return None
1836+
1837+
dim_length = x.type.shape[-1]
1838+
offset = op.offset
1839+
start_stop1 = (axis1_idx.data[0], axis1_idx.data[-1] + 1)
1840+
start_stop2 = (axis2_idx.data[0], axis2_idx.data[-1] + 1)
1841+
orig_start1, orig_start2 = start_stop1[0], start_stop2[0]
1842+
1843+
if offset < 0:
1844+
# The logic for checking if we are selecting or not a diagonal for negative offset is the same
1845+
# as the one with positive offset but swapped axis
1846+
start_stop1, start_stop2 = start_stop2, start_stop1
1847+
offset = -offset
1848+
1849+
start1, stop1 = start_stop1
1850+
start2, stop2 = start_stop2
1851+
if (
1852+
start1 == 0
1853+
and start2 == offset
1854+
and stop1 == dim_length - offset
1855+
and stop2 == dim_length
1856+
):
1857+
# We are extracting the just written diagonal
1858+
if y.type.ndim == 0 or y.type.shape[-1] == 1:
1859+
# We may need to broadcast y
1860+
y = full((*x.shape[:-2], dim_length - offset), y, dtype=x.type.dtype)
1861+
return [y]
1862+
elif (orig_start2 - orig_start1) != op.offset:
1863+
# Some other diagonal was written, ignore it
1864+
return [op(x)]
1865+
else:
1866+
# A portion, but no the whole diagonal was written, don't do anything
1867+
return None

pytensor/tensor/subtensor.py

Lines changed: 1 addition & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -3021,12 +3021,7 @@ def make_node(self, x, y, *inputs):
30213021
return Apply(
30223022
self,
30233023
(x, y, *new_inputs),
3024-
[
3025-
tensor(
3026-
dtype=x.type.dtype,
3027-
shape=tuple(1 if s == 1 else None for s in x.type.shape),
3028-
)
3029-
],
3024+
[x.type()],
30303025
)
30313026

30323027
def perform(self, node, inputs, out_):

0 commit comments

Comments
 (0)