1
1
import random
2
+ from typing import Tuple , Union
2
3
3
4
import numpy as np
4
5
7
8
from adaptive .runner import simple
8
9
9
10
10
- def test_pending_loss_intervals ():
11
+ def test_pending_loss_intervals () -> None :
11
12
# https://github.com/python-adaptive/adaptive/issues/40
12
13
learner = Learner1D (lambda x : x , (0 , 4 ))
13
14
@@ -21,7 +22,7 @@ def test_pending_loss_intervals():
21
22
assert set (learner .losses_combined .keys ()) == {(0 , 1 ), (1 , 2 ), (2 , 3.5 ), (3.5 , 4.0 )}
22
23
23
24
24
- def test_loss_interpolation_for_unasked_point ():
25
+ def test_loss_interpolation_for_unasked_point () -> None :
25
26
# https://github.com/python-adaptive/adaptive/issues/40
26
27
learner = Learner1D (lambda x : x , (0 , 4 ))
27
28
@@ -53,7 +54,7 @@ def test_loss_interpolation_for_unasked_point():
53
54
}
54
55
55
56
56
- def test_first_iteration ():
57
+ def test_first_iteration () -> None :
57
58
"""Edge cases where we ask for a few points at the start."""
58
59
learner = Learner1D (lambda x : None , (- 1 , 1 ))
59
60
points , loss_improvements = learner .ask (2 )
@@ -87,7 +88,7 @@ def test_first_iteration():
87
88
assert points == [1 ]
88
89
89
90
90
- def test_loss_interpolation ():
91
+ def test_loss_interpolation () -> None :
91
92
learner = Learner1D (lambda _ : 0 , bounds = (- 1 , 1 ))
92
93
93
94
learner .tell (- 1 , 0 )
@@ -104,7 +105,9 @@ def test_loss_interpolation():
104
105
assert abs (expected_loss - loss ) < 1e-15 , (expected_loss , loss )
105
106
106
107
107
- def _run_on_discontinuity (x_0 , bounds ):
108
+ def _run_on_discontinuity (
109
+ x_0 : Union [int , float ], bounds : Union [Tuple [int , int ], Tuple [float , float ]]
110
+ ) -> Learner1D :
108
111
def f (x ):
109
112
return - 1 if x < x_0 else + 1
110
113
@@ -116,7 +119,7 @@ def f(x):
116
119
return learner
117
120
118
121
119
- def test_termination_on_discontinuities ():
122
+ def test_termination_on_discontinuities () -> None :
120
123
121
124
learner = _run_on_discontinuity (0 , (- 1 , 1 ))
122
125
smallest_interval = min (abs (a - b ) for a , b in learner .losses .keys ())
@@ -131,7 +134,7 @@ def test_termination_on_discontinuities():
131
134
assert smallest_interval >= 0.5e3 * np .finfo (float ).eps
132
135
133
136
134
- def test_order_adding_points ():
137
+ def test_order_adding_points () -> None :
135
138
# and https://github.com/python-adaptive/adaptive/issues/41
136
139
learner = Learner1D (lambda x : x , (0 , 1 ))
137
140
learner .tell_many ([1 , 0 , 0.5 ], [0 , 0 , 0 ])
@@ -140,7 +143,7 @@ def test_order_adding_points():
140
143
learner .ask (1 )
141
144
142
145
143
- def test_adding_existing_point_passes_silently ():
146
+ def test_adding_existing_point_passes_silently () -> None :
144
147
# See https://github.com/python-adaptive/adaptive/issues/42
145
148
learner = Learner1D (lambda x : x , (0 , 4 ))
146
149
learner .tell (0 , 0 )
@@ -149,7 +152,7 @@ def test_adding_existing_point_passes_silently():
149
152
learner .tell (1 , 100 )
150
153
151
154
152
- def test_loss_at_machine_precision_interval_is_zero ():
155
+ def test_loss_at_machine_precision_interval_is_zero () -> None :
153
156
"""The loss of an interval smaller than _dx_eps
154
157
should be set to zero."""
155
158
@@ -166,11 +169,11 @@ def goal(l):
166
169
assert learner .npoints != 1000
167
170
168
171
169
- def small_deviations (x ) :
172
+ def small_deviations (x : float ) -> Union [ int , float ] :
170
173
return 0 if x <= 1 else 1 + 10 ** (- random .randint (12 , 14 ))
171
174
172
175
173
- def test_small_deviations ():
176
+ def test_small_deviations () -> None :
174
177
"""This tests whether the Learner1D can handle small deviations.
175
178
See https://gitlab.kwant-project.org/qt/adaptive/merge_requests/73 and
176
179
https://github.com/python-adaptive/adaptive/issues/78."""
@@ -205,7 +208,7 @@ def test_small_deviations():
205
208
break
206
209
207
210
208
- def test_uniform_sampling1D_v2 ():
211
+ def test_uniform_sampling1D_v2 () -> None :
209
212
def check (known , expect ):
210
213
def f (x ):
211
214
return x
@@ -227,7 +230,7 @@ def f(x):
227
230
check ([- 1 , - 0.5 ], {- 0.75 , 0.25 , 1 })
228
231
229
232
230
- def test_add_data_unordered ():
233
+ def test_add_data_unordered () -> None :
231
234
# see https://github.com/python-adaptive/adaptive/issues/44
232
235
learner = Learner1D (lambda x : x , bounds = (- 1 , 1 ))
233
236
xs = [- 1 , 1 , 0 ]
@@ -239,14 +242,14 @@ def test_add_data_unordered():
239
242
learner .ask (3 )
240
243
241
244
242
- def test_ask_does_not_return_known_points_when_returning_bounds ():
245
+ def test_ask_does_not_return_known_points_when_returning_bounds () -> None :
243
246
learner = Learner1D (lambda x : None , (- 1 , 1 ))
244
247
learner .tell (0 , 0 )
245
248
points , _ = learner .ask (3 )
246
249
assert 0 not in points
247
250
248
251
249
- def test_tell_many ():
252
+ def test_tell_many () -> None :
250
253
def f (x , offset = 0.123214 ):
251
254
a = 0.01
252
255
return (
@@ -355,7 +358,7 @@ def _random_run(learner, learner2, scale_doubling=True):
355
358
test_equal (learner , learner2 )
356
359
357
360
358
- def test_curvature_loss ():
361
+ def test_curvature_loss () -> None :
359
362
def f (x ):
360
363
return np .tanh (20 * x )
361
364
@@ -366,7 +369,7 @@ def f(x):
366
369
assert learner .npoints > 100
367
370
368
371
369
- def test_curvature_loss_vectors ():
372
+ def test_curvature_loss_vectors () -> None :
370
373
def f (x ):
371
374
return np .tanh (20 * x ), np .tanh (20 * (x - 0.4 ))
372
375
@@ -377,7 +380,7 @@ def f(x):
377
380
assert learner .npoints > 100
378
381
379
382
380
- def test_NaN_loss ():
383
+ def test_NaN_loss () -> None :
381
384
# see https://github.com/python-adaptive/adaptive/issues/145
382
385
def f (x ):
383
386
a = 0.01
0 commit comments