28
28
SKOptLearner = None
29
29
30
30
31
+ LOSS_FUNCTIONS = {
32
+ Learner1D : ('loss_per_interval' , (
33
+ adaptive .learner .learner1D .default_loss ,
34
+ adaptive .learner .learner1D .uniform_loss ,
35
+ adaptive .learner .learner1D .curvature_loss_function (),
36
+ )),
37
+ Learner2D : ('loss_per_triangle' , (
38
+ adaptive .learner .learner2D .default_loss ,
39
+ adaptive .learner .learner2D .uniform_loss ,
40
+ adaptive .learner .learner2D .minimize_triangle_surface_loss ,
41
+ adaptive .learner .learner2D .resolution_loss_function (),
42
+ )),
43
+ LearnerND : ('loss_per_simplex' , (
44
+ adaptive .learner .learnerND .default_loss ,
45
+ adaptive .learner .learnerND .std_loss ,
46
+ adaptive .learner .learnerND .uniform_loss ,
47
+ )),
48
+ }
49
+
50
+
31
51
def generate_random_parametrization (f ):
32
52
"""Return a realization of 'f' with parameters bound to random values.
33
53
@@ -75,38 +95,26 @@ def maybe_skip(learner):
75
95
# All parameters except the first must be annotated with a callable that
76
96
# returns a random value for that parameter.
77
97
78
-
79
- @learn_with (Learner1D , bounds = (- 1 , 1 ), loss_per_interval = adaptive .learner .learner1D .default_loss )
80
- @learn_with (Learner1D , bounds = (- 1 , 1 ), loss_per_interval = adaptive .learner .learner1D .uniform_loss )
81
- @learn_with (Learner1D , bounds = (- 1 , 1 ), loss_per_interval = adaptive .learner .learner1D .curvature_loss_function ())
98
+ @learn_with (Learner1D , bounds = (- 1 , 1 ))
82
99
def quadratic (x , m : uniform (0 , 10 ), b : uniform (0 , 1 )):
83
100
return m * x ** 2 + b
84
101
85
102
86
- @learn_with (Learner1D , bounds = (- 1 , 1 ), loss_per_interval = adaptive .learner .learner1D .default_loss )
87
- @learn_with (Learner1D , bounds = (- 1 , 1 ), loss_per_interval = adaptive .learner .learner1D .uniform_loss )
88
- @learn_with (Learner1D , bounds = (- 1 , 1 ), loss_per_interval = adaptive .learner .learner1D .curvature_loss_function ())
103
+ @learn_with (Learner1D , bounds = (- 1 , 1 ))
89
104
def linear_with_peak (x , d : uniform (- 1 , 1 )):
90
105
a = 0.01
91
106
return x + a ** 2 / (a ** 2 + (x - d )** 2 )
92
107
93
108
94
- @learn_with (LearnerND , bounds = ((- 1 , 1 ), (- 1 , 1 )), loss_per_simplex = adaptive .learner .learnerND .default_loss )
95
- @learn_with (LearnerND , bounds = ((- 1 , 1 ), (- 1 , 1 )), loss_per_simplex = adaptive .learner .learnerND .std_loss )
96
- @learn_with (LearnerND , bounds = ((- 1 , 1 ), (- 1 , 1 )), loss_per_simplex = adaptive .learner .learnerND .uniform_loss )
97
- @learn_with (Learner2D , bounds = ((- 1 , 1 ), (- 1 , 1 )), loss_per_triangle = adaptive .learner .learner2D .default_loss )
98
- @learn_with (Learner2D , bounds = ((- 1 , 1 ), (- 1 , 1 )), loss_per_triangle = adaptive .learner .learner2D .uniform_loss )
99
- @learn_with (Learner2D , bounds = ((- 1 , 1 ), (- 1 , 1 )), loss_per_triangle = adaptive .learner .learner2D .minimize_triangle_surface_loss )
100
- @learn_with (Learner2D , bounds = ((- 1 , 1 ), (- 1 , 1 )), loss_per_triangle = adaptive .learner .learner2D .resolution_loss_function ())
109
+ @learn_with (LearnerND , bounds = ((- 1 , 1 ), (- 1 , 1 )))
110
+ @learn_with (Learner2D , bounds = ((- 1 , 1 ), (- 1 , 1 )))
101
111
def ring_of_fire (xy , d : uniform (0.2 , 1 )):
102
112
a = 0.2
103
113
x , y = xy
104
114
return x + math .exp (- (x ** 2 + y ** 2 - d ** 2 )** 2 / a ** 4 )
105
115
106
116
107
- @learn_with (LearnerND , bounds = ((- 1 , 1 ), (- 1 , 1 ), (- 1 , 1 )), loss_per_simplex = adaptive .learner .learnerND .default_loss )
108
- @learn_with (LearnerND , bounds = ((- 1 , 1 ), (- 1 , 1 ), (- 1 , 1 )), loss_per_simplex = adaptive .learner .learnerND .std_loss )
109
- @learn_with (LearnerND , bounds = ((- 1 , 1 ), (- 1 , 1 ), (- 1 , 1 )), loss_per_simplex = adaptive .learner .learnerND .uniform_loss )
117
+ @learn_with (LearnerND , bounds = ((- 1 , 1 ), (- 1 , 1 ), (- 1 , 1 )))
110
118
def sphere_of_fire (xyz , d : uniform (0.2 , 1 )):
111
119
a = 0.2
112
120
x , y , z = xyz
@@ -120,20 +128,33 @@ def gaussian(n):
120
128
121
129
# Decorators for tests.
122
130
131
+
132
+ # Create a sequence of learner parameters by adding all
133
+ # possible loss functions to an existing parameter set.
134
+ def add_loss_to_params (learner_type , existing_params ):
135
+ if learner_type not in LOSS_FUNCTIONS :
136
+ return [existing_params ]
137
+ loss_param , loss_functions = LOSS_FUNCTIONS [learner_type ]
138
+ loss_params = [{loss_param : f } for f in loss_functions ]
139
+ return [dict (** existing_params , ** lp ) for lp in loss_params ]
140
+
141
+
123
142
def run_with (* learner_types ):
124
143
pars = []
125
144
for l in learner_types :
126
145
has_marker = isinstance (l , tuple )
127
146
if has_marker :
128
147
marker , l = l
129
148
for f , k in learner_function_combos [l ]:
130
- # Check if learner was marked with our `xfail` decorator
131
- # XXX: doesn't work when feeding kwargs to xfail.
132
- if has_marker :
133
- pars .append (pytest .param (l , f , dict (k ),
134
- marks = [marker ]))
135
- else :
136
- pars .append ((l , f , dict (k )))
149
+ ks = add_loss_to_params (l , k )
150
+ for k in ks :
151
+ # Check if learner was marked with our `xfail` decorator
152
+ # XXX: doesn't work when feeding kwargs to xfail.
153
+ if has_marker :
154
+ pars .append (pytest .param (l , f , dict (k ),
155
+ marks = [marker ]))
156
+ else :
157
+ pars .append ((l , f , dict (k )))
137
158
return pytest .mark .parametrize ('learner_type, f, learner_kwargs' , pars )
138
159
139
160
0 commit comments