16
16
import scipy .spatial
17
17
18
18
from ..learner import (AverageLearner , BalancingLearner , DataSaver ,
19
- IntegratorLearner , Learner1D , Learner2D , LearnerND , SKOptLearner )
19
+ IntegratorLearner , Learner1D , Learner2D , LearnerND )
20
20
from ..runner import simple
21
21
22
22
23
+ try :
24
+ import skopt
25
+ from ..learner import SKOptLearner
26
+ except ModuleNotFoundError :
27
+ SKOptLearner = None
28
+
29
+
23
30
def generate_random_parametrization (f ):
24
31
"""Return a realization of 'f' with parameters bound to random values.
25
32
@@ -60,6 +67,10 @@ def xfail(learner):
60
67
return pytest .mark .xfail , learner
61
68
62
69
70
+ def maybe_skip (learner ):
71
+ return (pytest .mark .skip , learner ) if learner is None else learner
72
+
73
+
63
74
# All parameters except the first must be annotated with a callable that
64
75
# returns a random value for that parameter.
65
76
@@ -100,15 +111,15 @@ def gaussian(n):
100
111
def run_with (* learner_types ):
101
112
pars = []
102
113
for l in learner_types :
103
- is_xfail = isinstance (l , tuple )
104
- if is_xfail :
105
- xfail , l = l
114
+ has_marker = isinstance (l , tuple )
115
+ if has_marker :
116
+ marker , l = l
106
117
for f , k in learner_function_combos [l ]:
107
118
# Check if learner was marked with our `xfail` decorator
108
119
# XXX: doesn't work when feeding kwargs to xfail.
109
- if is_xfail :
120
+ if has_marker :
110
121
pars .append (pytest .param (l , f , dict (k ),
111
- marks = [pytest . mark . xfail ]))
122
+ marks = [marker ]))
112
123
else :
113
124
pars .append ((l , f , dict (k )))
114
125
return pytest .mark .parametrize ('learner_type, f, learner_kwargs' , pars )
@@ -391,8 +402,8 @@ def test_balancing_learner(learner_type, f, learner_kwargs):
391
402
assert all (l .npoints > 10 for l in learner .learners ), [l .npoints for l in learner .learners ]
392
403
393
404
394
- @run_with (Learner1D , Learner2D , LearnerND , AverageLearner , SKOptLearner ,
395
- IntegratorLearner )
405
+ @run_with (Learner1D , Learner2D , LearnerND , AverageLearner ,
406
+ maybe_skip ( SKOptLearner ), IntegratorLearner )
396
407
def test_saving (learner_type , f , learner_kwargs ):
397
408
f = generate_random_parametrization (f )
398
409
learner = learner_type (f , ** learner_kwargs )
@@ -412,8 +423,8 @@ def test_saving(learner_type, f, learner_kwargs):
412
423
os .remove (path )
413
424
414
425
415
- @run_with (Learner1D , Learner2D , LearnerND , AverageLearner , SKOptLearner ,
416
- IntegratorLearner )
426
+ @run_with (Learner1D , Learner2D , LearnerND , AverageLearner ,
427
+ maybe_skip ( SKOptLearner ), IntegratorLearner )
417
428
def test_saving_of_balancing_learner (learner_type , f , learner_kwargs ):
418
429
f = generate_random_parametrization (f )
419
430
learner = BalancingLearner ([learner_type (f , ** learner_kwargs )])
@@ -438,8 +449,8 @@ def test_saving_of_balancing_learner(learner_type, f, learner_kwargs):
438
449
shutil .rmtree (folder )
439
450
440
451
441
- @run_with (Learner1D , Learner2D , LearnerND , AverageLearner , SKOptLearner ,
442
- IntegratorLearner )
452
+ @run_with (Learner1D , Learner2D , LearnerND , AverageLearner ,
453
+ maybe_skip ( SKOptLearner ), IntegratorLearner )
443
454
def test_saving_with_datasaver (learner_type , f , learner_kwargs ):
444
455
f = generate_random_parametrization (f )
445
456
g = lambda x : {'y' : f (x ), 't' : random .random ()}
0 commit comments