@@ -229,22 +229,19 @@ def f(x):
229
229
simple (learner , goal = lambda l : l .npoints > 10 )
230
230
231
231
232
- @run_with (xfail ( Learner1D ) , Learner2D , LearnerND )
232
+ @run_with (Learner1D , Learner2D , LearnerND )
233
233
def test_adding_existing_data_is_idempotent (learner_type , f , learner_kwargs ):
234
234
"""Adding already existing data is an idempotent operation.
235
235
236
236
Either it is idempotent, or it is an error.
237
237
This is the only sane behaviour.
238
-
239
- This test will fail for the Learner1D because the losses are normalized by
240
- _scale which is updated after every point. After one iteration of adding
241
- points, the _scale could be different from what it was when calculating
242
- the losses of the intervals. Readding the points a second time means
243
- that the losses are now all normalized by the correct _scale.
244
238
"""
245
239
f = generate_random_parametrization (f )
246
240
learner = learner_type (f , ** learner_kwargs )
247
241
control = learner_type (f , ** learner_kwargs )
242
+ if learner_type is Learner1D :
243
+ learner ._recompute_losses_factor = 1
244
+ control ._recompute_losses_factor = 1
248
245
249
246
N = random .randint (10 , 30 )
250
247
control .ask (N )
@@ -298,14 +295,11 @@ def test_adding_non_chosen_data(learner_type, f, learner_kwargs):
298
295
assert set (pls ) == set (cpls )
299
296
300
297
301
- @run_with (xfail ( Learner1D ) , xfail (Learner2D ), xfail (LearnerND ), AverageLearner )
298
+ @run_with (Learner1D , xfail (Learner2D ), xfail (LearnerND ), AverageLearner )
302
299
def test_point_adding_order_is_irrelevant (learner_type , f , learner_kwargs ):
303
300
"""The order of calls to 'tell' between calls to 'ask'
304
301
is arbitrary.
305
302
306
- This test will fail for the Learner1D for the same reason as described in
307
- the doc-string in `test_adding_existing_data_is_idempotent`.
308
-
309
303
This test will fail for the Learner2D because
310
304
`interpolate.interpnd.estimate_gradients_2d_global` will give different
311
305
outputs based on the order of the triangles and values in
@@ -315,6 +309,10 @@ def test_point_adding_order_is_irrelevant(learner_type, f, learner_kwargs):
315
309
learner = learner_type (f , ** learner_kwargs )
316
310
control = learner_type (f , ** learner_kwargs )
317
311
312
+ if learner_type is Learner1D :
313
+ learner ._recompute_losses_factor = 1
314
+ control ._recompute_losses_factor = 1
315
+
318
316
N = random .randint (10 , 30 )
319
317
control .ask (N )
320
318
xs , _ = learner .ask (N )
@@ -443,14 +441,16 @@ def test_saving(learner_type, f, learner_kwargs):
443
441
f = generate_random_parametrization (f )
444
442
learner = learner_type (f , ** learner_kwargs )
445
443
control = learner_type (f , ** learner_kwargs )
444
+ if learner_type is Learner1D :
445
+ learner ._recompute_losses_factor = 1
446
+ control ._recompute_losses_factor = 1
446
447
simple (learner , lambda l : l .npoints > 100 )
447
448
fd , path = tempfile .mkstemp ()
448
449
try :
449
450
learner .save (path )
450
451
control .load (path )
451
- if learner_type is not Learner1D :
452
- # Because different scales result in differnt losses
453
- np .testing .assert_almost_equal (learner .loss (), control .loss ())
452
+
453
+ np .testing .assert_almost_equal (learner .loss (), control .loss ())
454
454
455
455
# Try if the control is runnable
456
456
simple (control , lambda l : l .npoints > 200 )
@@ -466,18 +466,22 @@ def test_saving_of_balancing_learner(learner_type, f, learner_kwargs):
466
466
learner = BalancingLearner ([learner_type (f , ** learner_kwargs )])
467
467
control = BalancingLearner ([learner_type (f , ** learner_kwargs )])
468
468
469
+ if learner_type is Learner1D :
470
+ for l , c in zip (learner .learners , control .learners ):
471
+ l ._recompute_losses_factor = 1
472
+ c ._recompute_losses_factor = 1
473
+
469
474
simple (learner , lambda l : l .learners [0 ].npoints > 100 )
470
475
folder = tempfile .mkdtemp ()
471
476
472
477
def fname (learner ):
473
478
return folder + 'test'
474
479
475
480
try :
476
- learner .save (fname )
477
- control .load (fname )
478
- if learner_type is not Learner1D :
479
- # Because different scales result in differnt losses
480
- np .testing .assert_almost_equal (learner .loss (), control .loss ())
481
+ learner .save (fname = fname )
482
+ control .load (fname = fname )
483
+
484
+ np .testing .assert_almost_equal (learner .loss (), control .loss ())
481
485
482
486
# Try if the control is runnable
483
487
simple (control , lambda l : l .learners [0 ].npoints > 200 )
@@ -494,14 +498,19 @@ def test_saving_with_datasaver(learner_type, f, learner_kwargs):
494
498
arg_picker = operator .itemgetter ('y' )
495
499
learner = DataSaver (learner_type (g , ** learner_kwargs ), arg_picker )
496
500
control = DataSaver (learner_type (g , ** learner_kwargs ), arg_picker )
501
+
502
+ if learner_type is Learner1D :
503
+ learner .learner ._recompute_losses_factor = 1
504
+ control .learner ._recompute_losses_factor = 1
505
+
497
506
simple (learner , lambda l : l .npoints > 100 )
498
507
fd , path = tempfile .mkstemp ()
499
508
try :
500
509
learner .save (path )
501
510
control .load (path )
502
- if learner_type is not Learner1D :
503
- # Because different scales result in differnt losses
504
- np . testing . assert_almost_equal ( learner . loss (), control . loss ())
511
+
512
+ np . testing . assert_almost_equal ( learner . loss (), control . loss ())
513
+
505
514
assert learner .extra_data == control .extra_data
506
515
507
516
# Try if the control is runnable
0 commit comments