Skip to content

Commit 30bccb7

Browse files
committed
Use new auto goal functionality
1 parent e7f2179 commit 30bccb7

15 files changed

+60
-59
lines changed

README.md

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -75,7 +75,7 @@ def peak(x, a=0.01):
7575

7676

7777
learner = Learner1D(peak, bounds=(-1, 1))
78-
runner = Runner(learner, goal=lambda l: l.loss() < 0.01)
78+
runner = Runner(learner, goal=0.01)
7979
runner.live_info()
8080
runner.live_plot()
8181
```

adaptive/tests/test_average_learner.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -61,7 +61,7 @@ def constant_function(seed):
6161
learner = AverageLearner(
6262
constant_function, atol=0.01, rtol=0.01, min_npoints=min_npoints
6363
)
64-
simple(learner, lambda l: l.loss() < 1)
64+
simple(learner, 1.0)
6565
assert learner.npoints >= max(2, min_npoints)
6666

6767

adaptive/tests/test_balancing_learner.py

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -52,10 +52,10 @@ def test_ask_0(strategy):
5252
@pytest.mark.parametrize(
5353
"strategy, goal",
5454
[
55-
("loss", lambda l: l.loss() < 0.1),
56-
("loss_improvements", lambda l: l.loss() < 0.1),
55+
("loss", 0.1),
56+
("loss_improvements", 0.1),
5757
("npoints", lambda bl: all(l.npoints > 10 for l in bl.learners)),
58-
("cycle", lambda l: l.loss() < 0.1),
58+
("cycle", 0.1),
5959
],
6060
)
6161
def test_strategies(strategy, goal):

adaptive/tests/test_learnernd.py

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -33,8 +33,8 @@ def test_interior_vs_bbox_gives_same_result():
3333
hull = scipy.spatial.ConvexHull(control._bounds_points)
3434
learner = LearnerND(f, bounds=hull)
3535

36-
simple(control, goal=lambda l: l.loss() < 0.1)
37-
simple(learner, goal=lambda l: l.loss() < 0.1)
36+
simple(control, goal=0.1)
37+
simple(learner, goal=0.1)
3838

3939
assert learner.data == control.data
4040

@@ -47,4 +47,4 @@ def test_vector_return_with_a_flat_layer():
4747
h3 = lambda xy: np.array([0 * f(xy), g(xy)]) # noqa: E731
4848
for function in [h1, h2, h3]:
4949
learner = LearnerND(function, bounds=[(-1, 1), (-1, 1)])
50-
simple(learner, goal=lambda l: l.loss() < 0.1)
50+
simple(learner, goal=0.1)

docs/logo.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -22,7 +22,7 @@ def ring(xy):
2222
return x + np.exp(-((x**2 + y**2 - 0.75**2) ** 2) / a**4)
2323

2424
learner = adaptive.Learner2D(ring, bounds=[(-1, 1), (-1, 1)])
25-
adaptive.runner.simple(learner, goal=lambda l: l.loss() < 0.01)
25+
adaptive.runner.simple(learner, goal=0.01)
2626
return learner
2727

2828

docs/source/logo.md

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -110,7 +110,7 @@ def create_and_run_learner():
110110
return x + np.exp(-((x**2 + y**2 - 0.75**2) ** 2) / a**4)
111111
112112
learner = adaptive.Learner2D(ring, bounds=[(-1, 1), (-1, 1)])
113-
adaptive.runner.simple(learner, goal=lambda l: l.loss() < 0.005)
113+
adaptive.runner.simple(learner, goal=0.005)
114114
return learner
115115
116116

docs/source/tutorial/tutorial.AverageLearner.md

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -45,8 +45,8 @@ def g(n):
4545

4646
```{code-cell} ipython3
4747
learner = adaptive.AverageLearner(g, atol=None, rtol=0.01)
48-
# `loss < 1` means that we reached the `rtol` or `atol`
49-
runner = adaptive.Runner(learner, goal=lambda l: l.loss() < 1)
48+
# `loss < 1.0` means that we reached the `rtol` or `atol`
49+
runner = adaptive.Runner(learner, goal=1.0)
5050
```
5151

5252
```{code-cell} ipython3

docs/source/tutorial/tutorial.BalancingLearner.md

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -46,7 +46,7 @@ learners = [
4646
]
4747
4848
bal_learner = adaptive.BalancingLearner(learners)
49-
runner = adaptive.Runner(bal_learner, goal=lambda l: l.loss() < 0.01)
49+
runner = adaptive.Runner(bal_learner, goal=0.01)
5050
```
5151

5252
```{code-cell} ipython3
@@ -86,7 +86,7 @@ learner = adaptive.BalancingLearner.from_product(
8686
jacobi, adaptive.Learner1D, dict(bounds=(0, 1)), combos
8787
)
8888
89-
runner = adaptive.BlockingRunner(learner, goal=lambda l: l.loss() < 0.01)
89+
runner = adaptive.BlockingRunner(learner, goal=0.01)
9090
9191
# The `cdims` will automatically be set when using `from_product`, so
9292
# `plot()` will return a HoloMap with correctly labeled sliders.

docs/source/tutorial/tutorial.Learner1D.md

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -65,7 +65,7 @@ A {class}`~concurrent.futures.ProcessPoolExecutor` cannot be used on Windows for
6565
```{code-cell} ipython3
6666
# The end condition is when the "loss" is less than 0.1. In the context of the
6767
# 1D learner this means that we will resolve features in 'func' with width 0.1 or wider.
68-
runner = adaptive.Runner(learner, goal=lambda l: l.loss() < 0.01)
68+
runner = adaptive.Runner(learner, goal=0.01)
6969
```
7070

7171
```{code-cell} ipython3
@@ -124,7 +124,7 @@ The `Learner1D` can be used for such functions:
124124

125125
```{code-cell} ipython3
126126
learner = adaptive.Learner1D(f_levels, bounds=(-1, 1))
127-
runner = adaptive.Runner(learner, goal=lambda l: l.loss() < 0.01)
127+
runner = adaptive.Runner(learner, goal=0.01)
128128
```
129129

130130
```{code-cell} ipython3
@@ -156,7 +156,7 @@ from adaptive.learner.learner1D import (
156156
157157
curvature_loss = curvature_loss_function()
158158
learner = adaptive.Learner1D(f, bounds=(-1, 1), loss_per_interval=curvature_loss)
159-
runner = adaptive.Runner(learner, goal=lambda l: l.loss() < 0.01)
159+
runner = adaptive.Runner(learner, goal=0.01)
160160
```
161161

162162
```{code-cell} ipython3

docs/source/tutorial/tutorial.Learner2D.md

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -46,7 +46,7 @@ learner = adaptive.Learner2D(ring, bounds=[(-1, 1), (-1, 1)])
4646
```
4747

4848
```{code-cell} ipython3
49-
runner = adaptive.Runner(learner, goal=lambda l: l.loss() < 0.01)
49+
runner = adaptive.Runner(learner, goal=0.01)
5050
```
5151

5252
```{code-cell} ipython3

0 commit comments

Comments
 (0)