@@ -49,7 +49,7 @@ tl;dr, one can use the following *loss functions* that
49
49
+ `adaptive.learner.learner2D.default_loss `
50
50
+ `adaptive.learner.learner2D.uniform_loss `
51
51
+ `adaptive.learner.learner2D.minimize_triangle_surface_loss `
52
- + `adaptive.learner.learner2D.resolution_loss `
52
+ + `adaptive.learner.learner2D.resolution_loss_function `
53
53
54
54
55
55
Uniform sampling
@@ -132,34 +132,23 @@ small (0 loss).
132
132
133
133
%%opts EdgePaths (color='w') Image [logz=True colorbar=True]
134
134
135
- def resolution_loss(ip, min_distance=0, max_distance=1):
135
+ def resolution_loss_function( min_distance=0, max_distance=1):
136
136
"""min_distance and max_distance should be in between 0 and 1
137
137
because the total area is normalized to 1."""
138
+ def resolution_loss(ip):
139
+ from adaptive.learner.learner2D import default_loss, areas
140
+ loss = default_loss(ip)
138
141
139
- from adaptive.learner.learner2D import areas, deviations
142
+ A = areas(ip)
143
+ # Setting areas with a small area to zero such that they won't be chosen again
144
+ loss[A < min_distance**2] = 0
140
145
141
- A = areas(ip)
142
-
143
- # 'deviations' returns an array of shape '(n, len(ip))', where
144
- # 'n' is the is the dimension of the output of the learned function
145
- # In this case we know that the learned function returns a scalar,
146
- # so 'deviations' returns an array of shape '(1, len(ip))'.
147
- # It represents the deviation of the function value from a linear estimate
148
- # over each triangular subdomain.
149
- dev = deviations(ip)[0]
150
-
151
- # we add terms of the same dimension: dev == [distance], A == [distance**2]
152
- loss = np.sqrt(A) * dev + A
153
-
154
- # Setting areas with a small area to zero such that they won't be chosen again
155
- loss[A < min_distance**2] = 0
156
-
157
- # Setting triangles that have a size larger than max_distance to infinite loss
158
- loss[A > max_distance**2] = np.inf
159
-
160
- return loss
146
+ # Setting triangles that have a size larger than max_distance to infinite loss
147
+ loss[A > max_distance**2] = np.inf
161
148
162
- loss = partial(resolution_loss, min_distance=0.01)
149
+ return loss
150
+ return resolution_loss
151
+ loss = resolution_loss_function(min_distance=0.01)
163
152
164
153
learner = adaptive.Learner2D(f_divergent_2d, [(-1, 1), (-1, 1)], loss_per_triangle=loss)
165
154
runner = adaptive.BlockingRunner(learner, goal=lambda l: l.loss() < 0.02)
@@ -169,4 +158,4 @@ Awesome! We zoom in on the singularity, but not at the expense of
169
158
sampling the rest of the domain a reasonable amount.
170
159
171
160
The above strategy is available as
172
- `adaptive.learner.learner2D.resolution_loss `.
161
+ `adaptive.learner.learner2D.resolution_loss_function `.
0 commit comments