Skip to content

Commit 2f2e80d

Browse files
committed
Merge branch 'test_loss_functions' into 'master'
test all the different loss functions in each test See merge request qt/adaptive!135
2 parents d82fbae + d2d955e commit 2f2e80d

File tree

2 files changed

+100
-43
lines changed

2 files changed

+100
-43
lines changed

adaptive/learner/learner1D.py

Lines changed: 14 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -123,6 +123,7 @@ def triangle_loss(xs, ys):
123123

124124

125125
def curvature_loss_function(area_factor=1, euclid_factor=0.02, horizontal_factor=0.02):
126+
# XXX: add a doc-string
126127
@uses_nth_neighbors(1)
127128
def curvature_loss(xs, ys):
128129
xs_middle = xs[1:3]
@@ -227,6 +228,11 @@ def __init__(self, function, bounds, loss_per_interval=None):
227228
self.losses = {}
228229
self.losses_combined = {}
229230

231+
# When the scale changes by a factor 2, the losses are
232+
# recomputed. This is tunable such that we can test
233+
# the learners behavior in the tests.
234+
self._recompute_losses_factor = 2
235+
230236
self.data = {}
231237
self.pending_points = set()
232238

@@ -446,7 +452,7 @@ def tell(self, x, y):
446452
self._update_losses(x, real=True)
447453

448454
# If the scale has increased enough, recompute all losses.
449-
if self._scale[1] > 2 * self._oldscale[1]:
455+
if self._scale[1] > self._recompute_losses_factor * self._oldscale[1]:
450456

451457
for interval in self.losses:
452458
self._update_interpolated_loss_in_interval(*interval)
@@ -562,8 +568,13 @@ def _ask_points_without_adding(self, n):
562568
def finite_loss(loss, xs):
563569
# If the loss is infinite we return the
564570
# distance between the two points.
565-
return (loss if not math.isinf(loss)
566-
else (xs[1] - xs[0]) / self._scale[0])
571+
if math.isinf(loss):
572+
loss = (xs[1] - xs[0]) / self._scale[0]
573+
574+
# We round the loss to 12 digits such that losses
575+
# are equal up to numerical precision will be considered
576+
# equal.
577+
return round(loss, ndigits=12)
567578

568579
quals = [(-finite_loss(loss, x), x, 1)
569580
for x, loss in self.losses_combined.items()]

adaptive/tests/test_learners.py

Lines changed: 86 additions & 40 deletions
Original file line numberDiff line numberDiff line change
@@ -15,18 +15,39 @@
1515
import pytest
1616
import scipy.spatial
1717

18-
from ..learner import (AverageLearner, BalancingLearner, DataSaver,
18+
import adaptive
19+
from adaptive.learner import (AverageLearner, BalancingLearner, DataSaver,
1920
IntegratorLearner, Learner1D, Learner2D, LearnerND)
20-
from ..runner import simple
21+
from adaptive.runner import simple
2122

2223

2324
try:
2425
import skopt
25-
from ..learner import SKOptLearner
26+
from adaptive.learner import SKOptLearner
2627
except ModuleNotFoundError:
2728
SKOptLearner = None
2829

2930

31+
LOSS_FUNCTIONS = {
32+
Learner1D: ('loss_per_interval', (
33+
adaptive.learner.learner1D.default_loss,
34+
adaptive.learner.learner1D.uniform_loss,
35+
adaptive.learner.learner1D.curvature_loss_function(),
36+
)),
37+
Learner2D: ('loss_per_triangle', (
38+
adaptive.learner.learner2D.default_loss,
39+
adaptive.learner.learner2D.uniform_loss,
40+
adaptive.learner.learner2D.minimize_triangle_surface_loss,
41+
adaptive.learner.learner2D.resolution_loss_function(),
42+
)),
43+
LearnerND: ('loss_per_simplex', (
44+
adaptive.learner.learnerND.default_loss,
45+
adaptive.learner.learnerND.std_loss,
46+
adaptive.learner.learnerND.uniform_loss,
47+
)),
48+
}
49+
50+
3051
def generate_random_parametrization(f):
3152
"""Return a realization of 'f' with parameters bound to random values.
3253
@@ -74,7 +95,6 @@ def maybe_skip(learner):
7495
# All parameters except the first must be annotated with a callable that
7596
# returns a random value for that parameter.
7697

77-
7898
@learn_with(Learner1D, bounds=(-1, 1))
7999
def quadratic(x, m: uniform(0, 10), b: uniform(0, 1)):
80100
return m * x**2 + b
@@ -108,20 +128,33 @@ def gaussian(n):
108128

109129
# Decorators for tests.
110130

111-
def run_with(*learner_types):
131+
132+
# Create a sequence of learner parameters by adding all
133+
# possible loss functions to an existing parameter set.
134+
def add_loss_to_params(learner_type, existing_params):
135+
if learner_type not in LOSS_FUNCTIONS:
136+
return [existing_params]
137+
loss_param, loss_functions = LOSS_FUNCTIONS[learner_type]
138+
loss_params = [{loss_param: f} for f in loss_functions]
139+
return [dict(**existing_params, **lp) for lp in loss_params]
140+
141+
142+
def run_with(*learner_types, with_all_loss_functions=True):
112143
pars = []
113144
for l in learner_types:
114145
has_marker = isinstance(l, tuple)
115146
if has_marker:
116147
marker, l = l
117148
for f, k in learner_function_combos[l]:
118-
# Check if learner was marked with our `xfail` decorator
119-
# XXX: doesn't work when feeding kwargs to xfail.
120-
if has_marker:
121-
pars.append(pytest.param(l, f, dict(k),
122-
marks=[marker]))
123-
else:
124-
pars.append((l, f, dict(k)))
149+
ks = add_loss_to_params(l, k) if with_all_loss_functions else [k]
150+
for k in ks:
151+
# Check if learner was marked with our `xfail` decorator
152+
# XXX: doesn't work when feeding kwargs to xfail.
153+
if has_marker:
154+
pars.append(pytest.param(l, f, dict(k),
155+
marks=[marker]))
156+
else:
157+
pars.append((l, f, dict(k)))
125158
return pytest.mark.parametrize('learner_type, f, learner_kwargs', pars)
126159

127160

@@ -196,22 +229,19 @@ def f(x):
196229
simple(learner, goal=lambda l: l.npoints > 10)
197230

198231

199-
@run_with(xfail(Learner1D), Learner2D, LearnerND)
232+
@run_with(Learner1D, Learner2D, LearnerND)
200233
def test_adding_existing_data_is_idempotent(learner_type, f, learner_kwargs):
201234
"""Adding already existing data is an idempotent operation.
202235
203236
Either it is idempotent, or it is an error.
204237
This is the only sane behaviour.
205-
206-
This test will fail for the Learner1D because the losses are normalized by
207-
_scale which is updated after every point. After one iteration of adding
208-
points, the _scale could be different from what it was when calculating
209-
the losses of the intervals. Readding the points a second time means
210-
that the losses are now all normalized by the correct _scale.
211238
"""
212239
f = generate_random_parametrization(f)
213240
learner = learner_type(f, **learner_kwargs)
214241
control = learner_type(f, **learner_kwargs)
242+
if learner_type is Learner1D:
243+
learner._recompute_losses_factor = 1
244+
control._recompute_losses_factor = 1
215245

216246
N = random.randint(10, 30)
217247
control.ask(N)
@@ -265,14 +295,11 @@ def test_adding_non_chosen_data(learner_type, f, learner_kwargs):
265295
assert set(pls) == set(cpls)
266296

267297

268-
@run_with(xfail(Learner1D), xfail(Learner2D), xfail(LearnerND), AverageLearner)
298+
@run_with(Learner1D, xfail(Learner2D), xfail(LearnerND), AverageLearner)
269299
def test_point_adding_order_is_irrelevant(learner_type, f, learner_kwargs):
270300
"""The order of calls to 'tell' between calls to 'ask'
271301
is arbitrary.
272302
273-
This test will fail for the Learner1D for the same reason as described in
274-
the doc-string in `test_adding_existing_data_is_idempotent`.
275-
276303
This test will fail for the Learner2D because
277304
`interpolate.interpnd.estimate_gradients_2d_global` will give different
278305
outputs based on the order of the triangles and values in
@@ -282,6 +309,10 @@ def test_point_adding_order_is_irrelevant(learner_type, f, learner_kwargs):
282309
learner = learner_type(f, **learner_kwargs)
283310
control = learner_type(f, **learner_kwargs)
284311

312+
if learner_type is Learner1D:
313+
learner._recompute_losses_factor = 1
314+
control._recompute_losses_factor = 1
315+
285316
N = random.randint(10, 30)
286317
control.ask(N)
287318
xs, _ = learner.ask(N)
@@ -353,7 +384,7 @@ def test_learner_performance_is_invariant_under_scaling(learner_type, f, learner
353384
learner = learner_type(lambda x: yscale * f(np.array(x) / xscale),
354385
**l_kwargs)
355386

356-
npoints = random.randrange(1000, 2000)
387+
npoints = random.randrange(300, 500)
357388

358389
for n in range(npoints):
359390
cxs, _ = control.ask(1)
@@ -366,10 +397,11 @@ def test_learner_performance_is_invariant_under_scaling(learner_type, f, learner
366397
assert np.allclose(xs_unscaled, cxs)
367398

368399
# Check if the losses are close
369-
assert abs(learner.loss() - control.loss()) / learner.loss() < 1e-11
400+
assert math.isclose(learner.loss(), control.loss(), rel_tol=1e-10)
370401

371402

372-
@run_with(Learner1D, Learner2D, LearnerND, AverageLearner)
403+
@run_with(Learner1D, Learner2D, LearnerND, AverageLearner,
404+
with_all_loss_functions=False)
373405
def test_balancing_learner(learner_type, f, learner_kwargs):
374406
"""Test if the BalancingLearner works with the different types of learners."""
375407
learners = [learner_type(generate_random_parametrization(f), **learner_kwargs)
@@ -403,19 +435,22 @@ def test_balancing_learner(learner_type, f, learner_kwargs):
403435

404436

405437
@run_with(Learner1D, Learner2D, LearnerND, AverageLearner,
406-
maybe_skip(SKOptLearner), IntegratorLearner)
438+
maybe_skip(SKOptLearner), IntegratorLearner,
439+
with_all_loss_functions=False)
407440
def test_saving(learner_type, f, learner_kwargs):
408441
f = generate_random_parametrization(f)
409442
learner = learner_type(f, **learner_kwargs)
410443
control = learner_type(f, **learner_kwargs)
444+
if learner_type is Learner1D:
445+
learner._recompute_losses_factor = 1
446+
control._recompute_losses_factor = 1
411447
simple(learner, lambda l: l.npoints > 100)
412448
fd, path = tempfile.mkstemp()
413449
try:
414450
learner.save(path)
415451
control.load(path)
416-
if learner_type is not Learner1D:
417-
# Because different scales result in differnt losses
418-
np.testing.assert_almost_equal(learner.loss(), control.loss())
452+
453+
np.testing.assert_almost_equal(learner.loss(), control.loss())
419454

420455
# Try if the control is runnable
421456
simple(control, lambda l: l.npoints > 200)
@@ -424,24 +459,29 @@ def test_saving(learner_type, f, learner_kwargs):
424459

425460

426461
@run_with(Learner1D, Learner2D, LearnerND, AverageLearner,
427-
maybe_skip(SKOptLearner), IntegratorLearner)
462+
maybe_skip(SKOptLearner), IntegratorLearner,
463+
with_all_loss_functions=False)
428464
def test_saving_of_balancing_learner(learner_type, f, learner_kwargs):
429465
f = generate_random_parametrization(f)
430466
learner = BalancingLearner([learner_type(f, **learner_kwargs)])
431467
control = BalancingLearner([learner_type(f, **learner_kwargs)])
432468

469+
if learner_type is Learner1D:
470+
for l, c in zip(learner.learners, control.learners):
471+
l._recompute_losses_factor = 1
472+
c._recompute_losses_factor = 1
473+
433474
simple(learner, lambda l: l.learners[0].npoints > 100)
434475
folder = tempfile.mkdtemp()
435476

436477
def fname(learner):
437478
return folder + 'test'
438479

439480
try:
440-
learner.save(fname)
441-
control.load(fname)
442-
if learner_type is not Learner1D:
443-
# Because different scales result in differnt losses
444-
np.testing.assert_almost_equal(learner.loss(), control.loss())
481+
learner.save(fname=fname)
482+
control.load(fname=fname)
483+
484+
np.testing.assert_almost_equal(learner.loss(), control.loss())
445485

446486
# Try if the control is runnable
447487
simple(control, lambda l: l.learners[0].npoints > 200)
@@ -450,21 +490,27 @@ def fname(learner):
450490

451491

452492
@run_with(Learner1D, Learner2D, LearnerND, AverageLearner,
453-
maybe_skip(SKOptLearner), IntegratorLearner)
493+
maybe_skip(SKOptLearner), IntegratorLearner,
494+
with_all_loss_functions=False)
454495
def test_saving_with_datasaver(learner_type, f, learner_kwargs):
455496
f = generate_random_parametrization(f)
456497
g = lambda x: {'y': f(x), 't': random.random()}
457498
arg_picker = operator.itemgetter('y')
458499
learner = DataSaver(learner_type(g, **learner_kwargs), arg_picker)
459500
control = DataSaver(learner_type(g, **learner_kwargs), arg_picker)
501+
502+
if learner_type is Learner1D:
503+
learner.learner._recompute_losses_factor = 1
504+
control.learner._recompute_losses_factor = 1
505+
460506
simple(learner, lambda l: l.npoints > 100)
461507
fd, path = tempfile.mkstemp()
462508
try:
463509
learner.save(path)
464510
control.load(path)
465-
if learner_type is not Learner1D:
466-
# Because different scales result in differnt losses
467-
np.testing.assert_almost_equal(learner.loss(), control.loss())
511+
512+
np.testing.assert_almost_equal(learner.loss(), control.loss())
513+
468514
assert learner.extra_data == control.extra_data
469515

470516
# Try if the control is runnable

0 commit comments

Comments
 (0)