Skip to content

Commit 39c8bf0

Browse files
authored
Merge pull request #311 from python-adaptive/coeff-cache
lazily evaluate the integrator coefficients
2 parents 1e3d26a + ebae237 commit 39c8bf0

File tree

3 files changed

+64
-64
lines changed

3 files changed

+64
-64
lines changed

adaptive/learner/integrator_coeffs.py

Lines changed: 38 additions & 27 deletions
Original file line numberDiff line numberDiff line change
@@ -2,6 +2,7 @@
22

33
from collections import defaultdict
44
from fractions import Fraction
5+
from functools import lru_cache
56

67
import numpy as np
78
import scipy.linalg
@@ -141,39 +142,49 @@ def calc_V(x, n):
141142
return np.array(V).T
142143

143144

144-
eps = np.spacing(1)
145+
@lru_cache(maxsize=None)
146+
def _coefficients():
147+
"""Compute the coefficients on demand, in order to avoid doing linear algebra on import."""
148+
eps = np.spacing(1)
145149

146-
# the nodes and Newton polynomials
147-
ns = (5, 9, 17, 33)
148-
xi = [-np.cos(np.linspace(0, np.pi, n)) for n in ns]
150+
# the nodes and Newton polynomials
151+
ns = (5, 9, 17, 33)
152+
xi = [-np.cos(np.linspace(0, np.pi, n)) for n in ns]
149153

150-
# Make `xi` perfectly anti-symmetric, important for splitting the intervals
151-
xi = [(row - row[::-1]) / 2 for row in xi]
154+
# Make `xi` perfectly anti-symmetric, important for splitting the intervals
155+
xi = [(row - row[::-1]) / 2 for row in xi]
152156

153-
# Compute the Vandermonde-like matrix and its inverse.
154-
V = [calc_V(x, n) for x, n in zip(xi, ns)]
155-
V_inv = list(map(scipy.linalg.inv, V))
156-
Vcond = [scipy.linalg.norm(a, 2) * scipy.linalg.norm(b, 2) for a, b in zip(V, V_inv)]
157+
# Compute the Vandermonde-like matrix and its inverse.
158+
V = [calc_V(x, n) for x, n in zip(xi, ns)]
159+
V_inv = list(map(scipy.linalg.inv, V))
160+
Vcond = [
161+
scipy.linalg.norm(a, 2) * scipy.linalg.norm(b, 2) for a, b in zip(V, V_inv)
162+
]
157163

158-
# Compute the shift matrices.
159-
T_left, T_right = [V_inv[3] @ calc_V((xi[3] + a) / 2, ns[3]) for a in [-1, 1]]
164+
# Compute the shift matrices.
165+
T_left, T_right = [V_inv[3] @ calc_V((xi[3] + a) / 2, ns[3]) for a in [-1, 1]]
160166

161-
# If the relative difference between two consecutive approximations is
162-
# lower than this value, the error estimate is considered reliable.
163-
# See section 6.2 of Pedro Gonnet's thesis.
164-
hint = 0.1
167+
# If the relative difference between two consecutive approximations is
168+
# lower than this value, the error estimate is considered reliable.
169+
# See section 6.2 of Pedro Gonnet's thesis.
170+
hint = 0.1
165171

166-
# Smallest acceptable relative difference of points in a rule. This was chosen
167-
# such that no artifacts are apparent in plots of (i, log(a_i)), where a_i is
168-
# the sequence of estimates of the integral value of an interval and all its
169-
# ancestors..
170-
min_sep = 16 * eps
172+
# Smallest acceptable relative difference of points in a rule. This was chosen
173+
# such that no artifacts are apparent in plots of (i, log(a_i)), where a_i is
174+
# the sequence of estimates of the integral value of an interval and all its
175+
# ancestors..
176+
min_sep = 16 * eps
171177

172-
ndiv_max = 20
178+
ndiv_max = 20
173179

174-
# set-up the downdate matrix
175-
k = np.arange(ns[3])
176-
alpha = np.sqrt((k + 1) ** 2 / (2 * k + 1) / (2 * k + 3))
177-
gamma = np.concatenate([[0, 0], np.sqrt(k[2:] ** 2 / (4 * k[2:] ** 2 - 1))])
180+
# set-up the downdate matrix
181+
k = np.arange(ns[3])
182+
alpha = np.sqrt((k + 1) ** 2 / (2 * k + 1) / (2 * k + 3))
183+
gamma = np.concatenate([[0, 0], np.sqrt(k[2:] ** 2 / (4 * k[2:] ** 2 - 1))])
178184

179-
b_def = calc_bdef(ns)
185+
b_def = calc_bdef(ns)
186+
return locals()
187+
188+
189+
def __getattr__(attr):
190+
return _coefficients()[attr]

adaptive/learner/integrator_learner.py

Lines changed: 23 additions & 34 deletions
Original file line numberDiff line numberDiff line change
@@ -10,37 +10,24 @@
1010
from scipy.linalg import norm
1111
from sortedcontainers import SortedSet
1212

13+
import adaptive.learner.integrator_coeffs as coeff
1314
from adaptive.learner.base_learner import BaseLearner
1415
from adaptive.notebook_integration import ensure_holoviews
1516
from adaptive.utils import cache_latest, restore
1617

17-
from .integrator_coeffs import (
18-
T_left,
19-
T_right,
20-
V_inv,
21-
Vcond,
22-
alpha,
23-
b_def,
24-
eps,
25-
gamma,
26-
hint,
27-
min_sep,
28-
ndiv_max,
29-
ns,
30-
xi,
31-
)
32-
3318

3419
def _downdate(c, nans, depth):
3520
# This is algorithm 5 from the thesis of Pedro Gonnet.
36-
b = b_def[depth].copy()
37-
m = ns[depth] - 1
21+
b = coeff.b_def[depth].copy()
22+
m = coeff.ns[depth] - 1
3823
for i in nans:
39-
b[m + 1] /= alpha[m]
40-
xii = xi[depth][i]
41-
b[m] = (b[m] + xii * b[m + 1]) / alpha[m - 1]
24+
b[m + 1] /= coeff.alpha[m]
25+
xii = coeff.xi[depth][i]
26+
b[m] = (b[m] + xii * b[m + 1]) / coeff.alpha[m - 1]
4227
for j in range(m - 1, 0, -1):
43-
b[j] = (b[j] + xii * b[j + 1] - gamma[j + 1] * b[j + 2]) / alpha[j - 1]
28+
b[j] = (
29+
b[j] + xii * b[j + 1] - coeff.gamma[j + 1] * b[j + 2]
30+
) / coeff.alpha[j - 1]
4431
b = b[1:]
4532

4633
c[:m] -= c[m] / b[m] * b[:m]
@@ -62,7 +49,7 @@ def _zero_nans(fx):
6249
def _calc_coeffs(fx, depth):
6350
"""Caution: this function modifies fx."""
6451
nans = _zero_nans(fx)
65-
c_new = V_inv[depth] @ fx
52+
c_new = coeff.V_inv[depth] @ fx
6653
if nans:
6754
fx[nans] = np.nan
6855
c_new = _downdate(c_new, nans, depth)
@@ -168,11 +155,11 @@ def T(self):
168155
left = self.a == self.parent.a
169156
right = self.b == self.parent.b
170157
assert left != right
171-
return T_left if left else T_right
158+
return coeff.T_left if left else coeff.T_right
172159

173160
def refinement_complete(self, depth):
174161
"""The interval has all the y-values to calculate the intergral."""
175-
if len(self.data) < ns[depth]:
162+
if len(self.data) < coeff.ns[depth]:
176163
return False
177164
return all(p in self.data for p in self.points(depth))
178165

@@ -181,7 +168,7 @@ def points(self, depth=None):
181168
depth = self.depth
182169
a = self.a
183170
b = self.b
184-
return (a + b) / 2 + (b - a) * xi[depth] / 2
171+
return (a + b) / 2 + (b - a) * coeff.xi[depth] / 2
185172

186173
def refine(self):
187174
self.depth += 1
@@ -234,7 +221,7 @@ def calc_ndiv(self):
234221
div = self.parent.c00 and self.c00 / self.parent.c00 > 2
235222
self.ndiv += div
236223

237-
if self.ndiv > ndiv_max and 2 * self.ndiv > self.rdepth:
224+
if self.ndiv > coeff.ndiv_max and 2 * self.ndiv > self.rdepth:
238225
raise DivergentIntegralError
239226

240227
if div:
@@ -243,7 +230,7 @@ def calc_ndiv(self):
243230

244231
def update_ndiv_recursively(self):
245232
self.ndiv += 1
246-
if self.ndiv > ndiv_max and 2 * self.ndiv > self.rdepth:
233+
if self.ndiv > coeff.ndiv_max and 2 * self.ndiv > self.rdepth:
247234
raise DivergentIntegralError
248235

249236
for child in self.children:
@@ -276,21 +263,23 @@ def complete_process(self, depth):
276263
if depth:
277264
# Refine
278265
c_diff = self.calc_err(c_old)
279-
force_split = c_diff > hint * norm(self.c)
266+
force_split = c_diff > coeff.hint * norm(self.c)
280267
else:
281268
# Split
282269
self.c00 = self.c[0]
283270

284271
if self.parent.depth_complete is not None:
285-
c_old = self.T[:, : ns[self.parent.depth_complete]] @ self.parent.c
272+
c_old = (
273+
self.T[:, : coeff.ns[self.parent.depth_complete]] @ self.parent.c
274+
)
286275
self.calc_err(c_old)
287276
self.calc_ndiv()
288277

289278
for child in self.children:
290279
if child.depth_complete is not None:
291280
child.calc_ndiv()
292281
if child.depth_complete == 0:
293-
c_old = child.T[:, : ns[self.depth_complete]] @ self.c
282+
c_old = child.T[:, : coeff.ns[self.depth_complete]] @ self.c
294283
child.calc_err(c_old)
295284

296285
if self.done_leaves is not None and not len(self.done_leaves):
@@ -320,7 +309,7 @@ def complete_process(self, depth):
320309
ival.done_leaves -= old_leaves
321310
ival = ival.parent
322311

323-
remove = self.err < (abs(self.igral) * eps * Vcond[depth])
312+
remove = self.err < (abs(self.igral) * coeff.eps * coeff.Vcond[depth])
324313

325314
return force_split, remove
326315

@@ -496,8 +485,8 @@ def _fill_stack(self):
496485
points = ival.points()
497486

498487
if (
499-
points[1] - points[0] < points[0] * min_sep
500-
or points[-1] - points[-2] < points[-2] * min_sep
488+
points[1] - points[0] < points[0] * coeff.min_sep
489+
or points[-1] - points[-2] < points[-2] * coeff.min_sep
501490
):
502491
self.ivals.remove(ival)
503492
elif ival.depth == 3 or force_split:

adaptive/tests/test_cquad.py

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -4,8 +4,8 @@
44
import numpy as np
55
import pytest
66

7+
import adaptive.learner.integrator_coeffs as coeff
78
from adaptive.learner import IntegratorLearner
8-
from adaptive.learner.integrator_coeffs import ns
99
from adaptive.learner.integrator_learner import DivergentIntegralError
1010

1111
from .algorithm_4 import DivergentIntegralError as A4DivergentIntegralError
@@ -245,7 +245,7 @@ def test_removed_choose_mutiple_points_at_once():
245245
we should use the high-precision interval.
246246
"""
247247
learner = IntegratorLearner(np.exp, bounds=(0, 1), tol=1e-15)
248-
n = ns[-1] + 2 * (ns[0] - 2) # first + two children (33+6=39)
248+
n = coeff.ns[-1] + 2 * (coeff.ns[0] - 2) # first + two children (33+6=39)
249249
xs, _ = learner.ask(n)
250250
for x in xs:
251251
learner.tell(x, learner.function(x))
@@ -257,7 +257,7 @@ def test_removed_ask_one_by_one():
257257
# This test should raise because integrating np.exp should be done
258258
# after the 33th point
259259
learner = IntegratorLearner(np.exp, bounds=(0, 1), tol=1e-15)
260-
n = ns[-1] + 2 * (ns[0] - 2) # first + two children (33+6=39)
260+
n = coeff.ns[-1] + 2 * (coeff.ns[0] - 2) # first + two children (33+6=39)
261261
for _ in range(n):
262262
xs, _ = learner.ask(1)
263263
for x in xs:

0 commit comments

Comments
 (0)