@@ -469,6 +469,9 @@ def test_learner_performance_is_invariant_under_scaling(
469
469
"""
470
470
# for now we just scale X and Y by random factors
471
471
f = generate_random_parametrization (f )
472
+ if learner_type is AverageLearner1D :
473
+ # no noise for AverageLearner1D to make it deterministic
474
+ f = ft .partial (f , sigma = 0 )
472
475
473
476
control_kwargs = dict (learner_kwargs )
474
477
control = learner_type (f , ** control_kwargs )
@@ -478,7 +481,14 @@ def test_learner_performance_is_invariant_under_scaling(
478
481
479
482
l_kwargs = dict (learner_kwargs )
480
483
l_kwargs ["bounds" ] = xscale * np .array (l_kwargs ["bounds" ])
481
- learner = learner_type (lambda x : yscale * f (np .array (x ) / xscale ), ** l_kwargs )
484
+
485
+ def scale_x (x ):
486
+ if isinstance (learner , AverageLearner1D ):
487
+ seed , x = x
488
+ return (seed , x / xscale )
489
+ return np .array (x ) / xscale
490
+
491
+ learner = learner_type (lambda x : yscale * f (scale_x (x )), ** l_kwargs )
482
492
483
493
if learner_type in [Learner1D , LearnerND , AverageLearner1D ]:
484
494
learner ._recompute_losses_factor = 1
@@ -497,7 +507,7 @@ def test_learner_performance_is_invariant_under_scaling(
497
507
learner .tell_many (xs , [learner .function (x ) for x in xs ])
498
508
499
509
# Check whether the points returned are the same
500
- xs_unscaled = np . array ( xs ) / xscale
510
+ xs_unscaled = [ scale_x ( x ) for x in xs ]
501
511
assert np .allclose (xs_unscaled , cxs )
502
512
503
513
# Check if the losses are close
0 commit comments