Skip to content

Commit d5a63a3

Browse files
committed
change resolution_loss to a factory function
1 parent b76ac11 commit d5a63a3

File tree

3 files changed

+27
-40
lines changed

3 files changed

+27
-40
lines changed

adaptive/learner/learner2D.py

Lines changed: 12 additions & 14 deletions
Original file line numberDiff line numberDiff line change
@@ -89,7 +89,7 @@ def uniform_loss(ip):
8989
return np.sqrt(areas(ip))
9090

9191

92-
def resolution_loss(ip, min_distance=0, max_distance=1):
92+
def resolution_loss_function(min_distance=0, max_distance=1):
9393
"""Loss function that is similar to the `default_loss` function, but you
9494
can set the maximimum and minimum size of a triangle.
9595
@@ -104,27 +104,25 @@ def resolution_loss(ip, min_distance=0, max_distance=1):
104104
... x, y = xy
105105
... return x**2 + y**2
106106
>>>
107-
>>> from functools import partial
108-
>>> loss = partial(resolution_loss, min_distance=0.01)
107+
>>> loss = resolution_loss_function(min_distance=0.01, max_distance=1)
109108
>>> learner = adaptive.Learner2D(f,
110109
... bounds=[(-1, -1), (1, 1)],
111110
... loss_per_triangle=loss)
112111
>>>
113112
"""
114-
A = areas(ip)
115-
dev = np.sum(deviations(ip), axis=0)
116-
117-
# similar to the default_loss
118-
loss = np.sqrt(A) * dev + A
113+
def resolution_loss(ip):
114+
loss = default_loss(ip)
119115

120-
# Setting areas with a small area to zero such that they won't be chosen again
121-
loss[A < min_distance**2] = 0
116+
A = areas(ip)
117+
# Setting areas with a small area to zero such that they won't be chosen again
118+
loss[A < min_distance**2] = 0
122119

123-
# Setting triangles that have a size larger than max_distance to infinite loss
124-
# such that these triangles will be picked
125-
loss[A > max_distance**2] = np.inf
120+
# Setting triangles that have a size larger than max_distance to infinite loss
121+
# such that these triangles will be picked
122+
loss[A > max_distance**2] = np.inf
126123

127-
return loss
124+
return loss
125+
return resolution_loss
128126

129127

130128
def minimize_triangle_surface_loss(ip):

docs/source/reference/adaptive.learner.learner2D.rst

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -15,7 +15,7 @@ Custom loss functions
1515

1616
.. autofunction:: adaptive.learner.learner2D.uniform_loss
1717

18-
.. autofunction:: adaptive.learner.learner2D.resolution_loss
18+
.. autofunction:: adaptive.learner.learner2D.resolution_loss_function
1919

2020

2121
Helper functions

docs/source/tutorial/tutorial.custom_loss.rst

Lines changed: 14 additions & 25 deletions
Original file line numberDiff line numberDiff line change
@@ -49,7 +49,7 @@ tl;dr, one can use the following *loss functions* that
4949
+ `adaptive.learner.learner2D.default_loss`
5050
+ `adaptive.learner.learner2D.uniform_loss`
5151
+ `adaptive.learner.learner2D.minimize_triangle_surface_loss`
52-
+ `adaptive.learner.learner2D.resolution_loss`
52+
+ `adaptive.learner.learner2D.resolution_loss_function`
5353

5454

5555
Uniform sampling
@@ -132,34 +132,23 @@ small (0 loss).
132132

133133
%%opts EdgePaths (color='w') Image [logz=True colorbar=True]
134134

135-
def resolution_loss(ip, min_distance=0, max_distance=1):
135+
def resolution_loss_function(min_distance=0, max_distance=1):
136136
"""min_distance and max_distance should be in between 0 and 1
137137
because the total area is normalized to 1."""
138+
def resolution_loss(ip):
139+
from adaptive.learner.learner2D import default_loss, areas
140+
loss = default_loss(ip)
138141

139-
from adaptive.learner.learner2D import areas, deviations
142+
A = areas(ip)
143+
# Setting areas with a small area to zero such that they won't be chosen again
144+
loss[A < min_distance**2] = 0
140145

141-
A = areas(ip)
142-
143-
# 'deviations' returns an array of shape '(n, len(ip))', where
144-
# 'n' is the is the dimension of the output of the learned function
145-
# In this case we know that the learned function returns a scalar,
146-
# so 'deviations' returns an array of shape '(1, len(ip))'.
147-
# It represents the deviation of the function value from a linear estimate
148-
# over each triangular subdomain.
149-
dev = deviations(ip)[0]
150-
151-
# we add terms of the same dimension: dev == [distance], A == [distance**2]
152-
loss = np.sqrt(A) * dev + A
153-
154-
# Setting areas with a small area to zero such that they won't be chosen again
155-
loss[A < min_distance**2] = 0
156-
157-
# Setting triangles that have a size larger than max_distance to infinite loss
158-
loss[A > max_distance**2] = np.inf
159-
160-
return loss
146+
# Setting triangles that have a size larger than max_distance to infinite loss
147+
loss[A > max_distance**2] = np.inf
161148

162-
loss = partial(resolution_loss, min_distance=0.01)
149+
return loss
150+
return resolution_loss
151+
loss = resolution_loss_function(min_distance=0.01)
163152

164153
learner = adaptive.Learner2D(f_divergent_2d, [(-1, 1), (-1, 1)], loss_per_triangle=loss)
165154
runner = adaptive.BlockingRunner(learner, goal=lambda l: l.loss() < 0.02)
@@ -169,4 +158,4 @@ Awesome! We zoom in on the singularity, but not at the expense of
169158
sampling the rest of the domain a reasonable amount.
170159

171160
The above strategy is available as
172-
`adaptive.learner.learner2D.resolution_loss`.
161+
`adaptive.learner.learner2D.resolution_loss_function`.

0 commit comments

Comments
 (0)