@@ -21,44 +21,42 @@ def uses_nth_neighbors(n):
21
21
Wraps loss functions to indicate that they expect intervals together
22
22
with ``n`` nearest neighbors
23
23
24
- The loss function is then guaranteed to receive the data of at least the
25
- N nearest neighbors (``nth_neighbors``) in a dict that tells you what the
26
- neighboring points of these are. And the `~adaptive.Learner1D` will
27
- then make sure that the loss is updated whenever one of the
28
- ``nth_neighbors`` changes.
24
+ The loss function will then receive the data of the N nearest neighbors
25
+ (``nth_neighbors``) aling with the data of the interval itself in a dict.
26
+ The `~adaptive.Learner1D` will also make sure that the loss is updated
27
+ whenever one of the ``nth_neighbors`` changes.
29
28
30
29
Examples
31
30
--------
32
31
33
- The next function is a part of the `get_curvature_loss ` function.
32
+ The next function is a part of the `curvature_loss_function ` function.
34
33
35
34
>>> @uses_nth_neighbors(1)
36
- ... def triangle_loss(interval, scale, data, neighbors):
37
- ... x_left, x_right = interval
38
- ... xs = [neighbors[x_left][0], x_left, x_right, neighbors[x_right][1]]
39
- ... # at the boundary, neighbors[<left boundary x>] is (None, <some other x>)
40
- ... xs = [x for x in xs if x is not None]
41
- ... if len(xs) <= 2:
42
- ... return (x_right - x_left) / scale[0]
35
+ ...def triangle_loss(xs, ys):
36
+ ... xs = [x for x in xs if x is not None]
37
+ ... ys = [y for y in ys if y is not None]
43
38
...
44
- ... y_scale = scale[1] or 1
45
- ... ys_scaled = [data[x] / y_scale for x in xs]
46
- ... xs_scaled = [x / scale[0] for x in xs]
47
- ... N = len(xs) - 2
48
- ... pts = [(x, y) for x, y in zip(xs_scaled, ys_scaled)]
49
- ... return sum(volume(pts[i:i+3]) for i in range(N)) / N
50
-
51
- Or you may define a loss that favours the (local) minima of a function.
39
+ ... if len(xs) == 2: # we do not have enough points for a triangle
40
+ ... return xs[1] - xs[0]
41
+ ...
42
+ ... N = len(xs) - 2 # number of constructed triangles
43
+ ... if isinstance(ys[0], Iterable):
44
+ ... pts = [(x, *y) for x, y in zip(xs, ys)]
45
+ ... vol = simplex_volume_in_embedding
46
+ ... else:
47
+ ... pts = [(x, y) for x, y in zip(xs, ys)]
48
+ ... vol = volume
49
+ ... return sum(vol(pts[i:i+3]) for i in range(N)) / N
50
+
51
+ Or you may define a loss that favours the (local) minima of a function,
52
+ assuming that you know your function will have a single float as output.
52
53
53
54
>>> @uses_nth_neighbors(1)
54
- ... def local_minima_resolving_loss(interval, scale, data, neighbors):
55
- ... x_left, x_right = interval
56
- ... n_left = neighbors[x_left][0]
57
- ... n_right = neighbors[x_right][1]
58
- ... loss = (x_right - x_left) / scale[0]
55
+ ... def local_minima_resolving_loss(xs, ys):
56
+ ... dx = xs[2] - xs[1] # the width of the interval of interest
59
57
...
60
- ... if not ((n_left is not None and data[x_left ] > data[n_left ])
61
- ... or (n_right is not None and data[x_right ] > data[n_right ])):
58
+ ... if not ((ys[0] is not None and ys[0 ] > ys[1 ])
59
+ ... or (ys[3] is not None and ys[3 ] > ys[2 ])):
62
60
... return loss * 100
63
61
...
64
62
... return loss
@@ -68,9 +66,8 @@ def _wrapped(loss_per_interval):
68
66
return loss_per_interval
69
67
return _wrapped
70
68
71
-
72
69
@uses_nth_neighbors (0 )
73
- def uniform_loss (interval , scale , data , neighbors ):
70
+ def uniform_loss (xs , ys ):
74
71
"""Loss function that samples the domain uniformly.
75
72
76
73
Works with `~adaptive.Learner1D` only.
@@ -85,38 +82,36 @@ def uniform_loss(interval, scale, data, neighbors):
85
82
... loss_per_interval=uniform_sampling_1d)
86
83
>>>
87
84
"""
88
- x_left , x_right = interval
89
- x_scale , _ = scale
90
- dx = (x_right - x_left ) / x_scale
85
+ dx = xs [1 ] - xs [0 ]
91
86
return dx
92
87
93
88
94
89
@uses_nth_neighbors (0 )
95
- def default_loss (interval , scale , data , neighbors ):
90
+ def default_loss (xs , ys ):
96
91
"""Calculate loss on a single interval.
97
92
98
93
Currently returns the rescaled length of the interval. If one of the
99
94
y-values is missing, returns 0 (so the intervals with missing data are
100
95
never touched. This behavior should be improved later.
101
96
"""
102
- x_left , x_right = interval
103
- y_right , y_left = data [x_right ], data [x_left ]
104
- x_scale , y_scale = scale
105
- dx = (x_right - x_left ) / x_scale
106
- if y_scale == 0 :
107
- loss = dx
97
+ dx = xs [1 ] - xs [0 ]
98
+ if isinstance (ys [0 ], Iterable ):
99
+ dy = [abs (a - b ) for a , b in zip (* ys )]
100
+ return np .hypot (dx , dy ).max ()
108
101
else :
109
- dy = (y_right - y_left ) / y_scale
110
- try :
111
- len (dy )
112
- loss = np .hypot (dx , dy ).max ()
113
- except TypeError :
114
- loss = math .hypot (dx , dy )
115
- return loss
102
+ dy = ys [1 ] - ys [0 ]
103
+ return np .hypot (dx , dy )
116
104
117
105
118
- def _loss_of_multi_interval (xs , ys ):
119
- N = len (xs ) - 2
106
+ @uses_nth_neighbors (1 )
107
+ def triangle_loss (xs , ys ):
108
+ xs = [x for x in xs if x is not None ]
109
+ ys = [y for y in ys if y is not None ]
110
+
111
+ if len (xs ) == 2 : # we do not have enough points for a triangle
112
+ return xs [1 ] - xs [0 ]
113
+
114
+ N = len (xs ) - 2 # number of constructed triangles
120
115
if isinstance (ys [0 ], Iterable ):
121
116
pts = [(x , * y ) for x , y in zip (xs , ys )]
122
117
vol = simplex_volume_in_embedding
@@ -126,27 +121,15 @@ def _loss_of_multi_interval(xs, ys):
126
121
return sum (vol (pts [i :i + 3 ]) for i in range (N )) / N
127
122
128
123
129
- @uses_nth_neighbors (1 )
130
- def triangle_loss (interval , scale , data , neighbors ):
131
- x_left , x_right = interval
132
- xs = [neighbors [x_left ][0 ], x_left , x_right , neighbors [x_right ][1 ]]
133
- xs = [x for x in xs if x is not None ]
134
-
135
- if len (xs ) <= 2 :
136
- return (x_right - x_left ) / scale [0 ]
137
- else :
138
- y_scale = scale [1 ] or 1
139
- ys_scaled = [data [x ] / y_scale for x in xs ]
140
- xs_scaled = [x / scale [0 ] for x in xs ]
141
- return _loss_of_multi_interval (xs_scaled , ys_scaled )
142
-
143
-
144
- def get_curvature_loss (area_factor = 1 , euclid_factor = 0.02 , horizontal_factor = 0.02 ):
124
+ def curvature_loss_function (area_factor = 1 , euclid_factor = 0.02 , horizontal_factor = 0.02 ):
145
125
@uses_nth_neighbors (1 )
146
- def curvature_loss (interval , scale , data , neighbors ):
147
- triangle_loss_ = triangle_loss (interval , scale , data , neighbors )
148
- default_loss_ = default_loss (interval , scale , data , neighbors )
149
- dx = (interval [1 ] - interval [0 ]) / scale [0 ]
126
+ def curvature_loss (xs , ys ):
127
+ xs_middle = xs [1 :3 ]
128
+ ys_middle = xs [1 :3 ]
129
+
130
+ triangle_loss_ = triangle_loss (xs , ys )
131
+ default_loss_ = default_loss (xs_middle , ys_middle )
132
+ dx = xs_middle [0 ] - xs_middle [0 ]
150
133
return (area_factor * (triangle_loss_ ** 0.5 )
151
134
+ euclid_factor * default_loss_
152
135
+ horizontal_factor * dx )
@@ -209,29 +192,24 @@ class Learner1D(BaseLearner):
209
192
210
193
Notes
211
194
-----
212
- `loss_per_interval` takes 4 parameters: ``interval``, ``scale``,
213
- ``data``, and ``neighbors``, and returns a scalar; the loss over
214
- the interval.
215
- interval : (float, float)
216
- The bounds of the interval.
217
- scale : (float, float)
218
- The x and y scale over all the intervals, useful for rescaling the
219
- interval loss.
220
- data : dict(float → float)
221
- A map containing evaluated function values. It is guaranteed
222
- to have values for both of the points in 'interval'.
223
- neighbors : dict(float → (float, float))
224
- A map containing points as keys to its neighbors as a tuple.
225
- At the left ``x_left`` and right ``x_left`` most boundary it has
226
- ``x_left: (None, float)`` and ``x_right: (float, None)``.
227
-
228
- The `loss_per_interval` function should also have
229
- an attribute `nth_neighbors` that indicates how many of the neighboring
230
- intervals to `interval` are used. If `loss_per_interval` doesn't
231
- have such an attribute, it's assumed that is uses **no** neighboring
232
- intervals. Also see the `uses_nth_neighbors` decorator.
233
- **WARNING**: When modifying the `data` and `neighbors` datastructures
234
- the learner will behave in an undefined way.
195
+ `loss_per_interval` takes 2 parameters: ``xs`` and ``ys``, and returns a
196
+ scalar; the loss over the interval.
197
+ xs : tuple of floats
198
+ The x values of the interval, if `nth_neighbors` is greater than zero it
199
+ also contains the x-values of the neighbors of the interval, in ascending
200
+ order. The interval we want to know the loss of is then the middle
201
+ interval. If no neighbor is available (at the edges of the domain) then
202
+ `None` will take the place of the x-value of the neighbor.
203
+ ys : tuple of function values
204
+ The output values of the function when evaluated at the `xs`. This is
205
+ either a float or a tuple of floats in the case of vector output.
206
+
207
+
208
+ The `loss_per_interval` function may also have an attribute `nth_neighbors`
209
+ that indicates how many of the neighboring intervals to `interval` are used.
210
+ If `loss_per_interval` doesn't have such an attribute, it's assumed that is
211
+ uses **no** neighboring intervals. Also see the `uses_nth_neighbors`
212
+ decorator for more information.
235
213
"""
236
214
237
215
def __init__ (self , function , bounds , loss_per_interval = None ):
@@ -300,16 +278,41 @@ def loss(self, real=True):
300
278
losses = self .losses if real else self .losses_combined
301
279
return max (losses .values ()) if len (losses ) > 0 else float ('inf' )
302
280
281
+ def _scale_x (self , x ):
282
+ if x is None :
283
+ return None
284
+ return x / self ._scale [0 ]
285
+
286
+ def _scale_y (self , y ):
287
+ if y is None :
288
+ return None
289
+ y_scale = self ._scale [1 ] or 1
290
+ return y / y_scale
291
+
292
+ def _get_point_by_index (self , ind ):
293
+ if ind < 0 or ind >= len (self .neighbors ):
294
+ return None
295
+ return self .neighbors .keys ()[ind ]
296
+
303
297
def _get_loss_in_interval (self , x_left , x_right ):
304
298
assert x_left is not None and x_right is not None
305
299
306
300
if x_right - x_left < self ._dx_eps :
307
301
return 0
308
302
309
- # we need to compute the loss for this interval
310
- return self .loss_per_interval (
311
- (x_left , x_right ), self ._scale , self .data , self .neighbors )
303
+ nn = self .nth_neighbors
304
+ i = self .neighbors .index (x_left )
305
+ start = i - nn
306
+ end = i + nn + 2
307
+
308
+ xs = [self ._get_point_by_index (i ) for i in range (start , end )]
309
+ ys = [self .data .get (x , None ) for x in xs ]
312
310
311
+ xs_scaled = tuple (self ._scale_x (x ) for x in xs )
312
+ ys_scaled = tuple (self ._scale_y (y ) for y in ys )
313
+
314
+ # we need to compute the loss for this interval
315
+ return self .loss_per_interval (xs_scaled , ys_scaled )
313
316
314
317
def _update_interpolated_loss_in_interval (self , x_left , x_right ):
315
318
if x_left is None or x_right is None :
@@ -419,6 +422,9 @@ def tell(self, x, y):
419
422
if x in self .data :
420
423
# The point is already evaluated before
421
424
return
425
+ if y is None :
426
+ raise TypeError ("Y-value may not be None, use learner.tell_pending(x)"
427
+ "to indicate that this value is currently being calculated" )
422
428
423
429
# either it is a float/int, if not, try casting to a np.array
424
430
if not isinstance (y , (float , int )):
0 commit comments