Skip to content

Commit 7da7e81

Browse files
committed
rerun with updated pre-commit filters
1 parent 3a5681d commit 7da7e81

16 files changed

+115
-78
lines changed

.pre-commit-config.yaml

Lines changed: 5 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -1,6 +1,6 @@
11
repos:
22
- repo: https://github.com/pre-commit/pre-commit-hooks
3-
rev: v3.4.0
3+
rev: v4.2.0
44
hooks:
55
- id: trailing-whitespace
66
- id: end-of-file-fixer
@@ -9,19 +9,19 @@ repos:
99
- id: debug-statements
1010
- id: check-ast
1111
- repo: https://github.com/ambv/black
12-
rev: 20.8b1
12+
rev: 22.3.0
1313
hooks:
1414
- id: black
1515
- repo: https://github.com/asottile/pyupgrade
16-
rev: v2.11.0
16+
rev: v2.32.1
1717
hooks:
1818
- id: pyupgrade
1919
args: ['--py37-plus']
2020
- repo: https://github.com/timothycrosley/isort
21-
rev: 5.8.0
21+
rev: 5.10.1
2222
hooks:
2323
- id: isort
2424
- repo: https://gitlab.com/pycqa/flake8
25-
rev: 3.9.0
25+
rev: 3.9.2
2626
hooks:
2727
- id: flake8

adaptive/learner/average_learner.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -93,7 +93,7 @@ def tell(self, n: int, value: Real) -> None:
9393
self.data[n] = value
9494
self.pending_points.discard(n)
9595
self.sum_f += value
96-
self.sum_f_sq += value ** 2
96+
self.sum_f_sq += value**2
9797
self.npoints += 1
9898

9999
def tell_pending(self, n: int) -> None:
@@ -111,7 +111,7 @@ def std(self) -> Float:
111111
n = self.npoints
112112
if n < self.min_npoints:
113113
return np.inf
114-
numerator = self.sum_f_sq - n * self.mean ** 2
114+
numerator = self.sum_f_sq - n * self.mean**2
115115
if numerator < 0:
116116
# in this case the numerator ~ -1e-15
117117
return 0

adaptive/learner/learner1D.py

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -151,7 +151,7 @@ def curvature_loss(xs, ys):
151151
default_loss_ = default_loss(xs_middle, ys_middle)
152152
dx = xs_middle[1] - xs_middle[0]
153153
return (
154-
area_factor * (triangle_loss_ ** 0.5)
154+
area_factor * (triangle_loss_**0.5)
155155
+ euclid_factor * default_loss_
156156
+ horizontal_factor * dx
157157
)
@@ -536,10 +536,10 @@ def tell_many(self, xs: Sequence[float], ys: Sequence[Any], *, force=False) -> N
536536
self._oldscale = deepcopy(self._scale)
537537

538538
# Find the intervals for which the losses should be calculated.
539-
intervals, intervals_combined = [
539+
intervals, intervals_combined = (
540540
[(x_m, x_r) for x_m, (x_l, x_r) in neighbors.items()][:-1]
541541
for neighbors in (self.neighbors, self.neighbors_combined)
542-
]
542+
)
543543

544544
# The the losses for the "real" intervals.
545545
self.losses = loss_manager(self._scale[0])

adaptive/learner/learner2D.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -137,11 +137,11 @@ def resolution_loss(ip):
137137

138138
A = areas(ip)
139139
# Setting areas with a small area to zero such that they won't be chosen again
140-
loss[A < min_distance ** 2] = 0
140+
loss[A < min_distance**2] = 0
141141

142142
# Setting triangles that have a size larger than max_distance to infinite loss
143143
# such that these triangles will be picked
144-
loss[A > max_distance ** 2] = np.inf
144+
loss[A > max_distance**2] = np.inf
145145

146146
return loss
147147

adaptive/learner/triangulation.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -616,7 +616,7 @@ def _relative_volume(self, simplex: Simplex) -> float:
616616
vertices = array(self.get_vertices(simplex))
617617
vectors = vertices[1:] - vertices[0]
618618
average_edge_length = mean(np_abs(vectors))
619-
return self.volume(simplex) / (average_edge_length ** self.dim)
619+
return self.volume(simplex) / (average_edge_length**self.dim)
620620

621621
def add_point(
622622
self,

adaptive/notebook_integration.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -186,7 +186,7 @@ def should_update(status):
186186
# i.e. we're offline for 12h, with an update_interval of 0.5s,
187187
# and without the reduced probability, we have buffer_size=86400.
188188
# With the correction this is np.log(86400) / np.log(1.1) = 119.2
189-
return 1.1 ** buffer_size * random.random() < 1
189+
return 1.1**buffer_size * random.random() < 1
190190
except Exception:
191191
# We catch any Exception because we are using a private API.
192192
return True
@@ -251,7 +251,7 @@ def _info_html(runner):
251251

252252
overhead = runner.overhead()
253253
red_level = max(0, min(int(255 * overhead / 100), 255))
254-
overhead_color = "#{:02x}{:02x}{:02x}".format(red_level, 255 - red_level, 0)
254+
overhead_color = f"#{red_level:02x}{255 - red_level:02x}{0:02x}"
255255

256256
info = [
257257
("status", f'<font color="{color}">{status}</font>'),

adaptive/tests/algorithm_4.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -420,7 +420,7 @@ def f0(x: Union[float, np.ndarray]) -> Union[float, np.ndarray]:
420420

421421

422422
def f7(x: Union[float, np.ndarray]) -> Union[float, np.ndarray]:
423-
return x ** -0.5
423+
return x**-0.5
424424

425425

426426
def f24(x: Union[float, np.ndarray]) -> Union[float, np.ndarray]:
@@ -430,7 +430,7 @@ def f24(x: Union[float, np.ndarray]) -> Union[float, np.ndarray]:
430430
def f21(x: Union[float, np.ndarray]) -> Union[float, np.ndarray]:
431431
y = 0
432432
for i in range(1, 4):
433-
y += 1 / np.cosh(20 ** i * (x - 2 * i / 10))
433+
y += 1 / np.cosh(20**i * (x - 2 * i / 10))
434434
return y
435435

436436

adaptive/tests/test_learner1d.py

Lines changed: 8 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -252,17 +252,17 @@ def test_tell_many():
252252
def f(x, offset=0.123214):
253253
a = 0.01
254254
return (
255-
np.sin(x ** 2)
256-
+ np.sin(x ** 5)
257-
+ a ** 2 / (a ** 2 + (x - offset) ** 2)
258-
+ x ** 2
259-
+ 1e-5 * x ** 3
255+
np.sin(x**2)
256+
+ np.sin(x**5)
257+
+ a**2 / (a**2 + (x - offset) ** 2)
258+
+ x**2
259+
+ 1e-5 * x**3
260260
)
261261

262262
def f_vec(x, offset=0.123214):
263263
a = 0.01
264-
y = x + a ** 2 / (a ** 2 + (x - offset) ** 2)
265-
return [y, 0.5 * y, y ** 2]
264+
y = x + a**2 / (a**2 + (x - offset) ** 2)
265+
return [y, 0.5 * y, y**2]
266266

267267
def assert_equal_dicts(d1, d2):
268268
xs1, ys1 = zip(*sorted(d1.items()))
@@ -385,7 +385,7 @@ def f(x):
385385
a = 0.01
386386
if random.random() < 0.2:
387387
return np.NaN
388-
return x + a ** 2 / (a ** 2 + x ** 2)
388+
return x + a**2 / (a**2 + x**2)
389389

390390
learner = Learner1D(f, bounds=(-1, 1))
391391
simple(learner, lambda l: l.npoints > 100)

adaptive/tests/test_learners.py

Lines changed: 6 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -131,14 +131,14 @@ def maybe_skip(learner):
131131

132132
@learn_with(Learner1D, bounds=(-1, 1))
133133
def quadratic(x, m: uniform(1, 4), b: uniform(0, 1)):
134-
return m * x ** 2 + b
134+
return m * x**2 + b
135135

136136

137137
@learn_with(Learner1D, bounds=(-1, 1))
138138
@learn_with(SequenceLearner, sequence=np.linspace(-1, 1, 201))
139139
def linear_with_peak(x, d: uniform(-1, 1)):
140140
a = 0.01
141-
return x + a ** 2 / (a ** 2 + (x - d) ** 2)
141+
return x + a**2 / (a**2 + (x - d) ** 2)
142142

143143

144144
@learn_with(LearnerND, bounds=((-1, 1), (-1, 1)))
@@ -147,15 +147,15 @@ def linear_with_peak(x, d: uniform(-1, 1)):
147147
def ring_of_fire(xy, d: uniform(0.2, 1)):
148148
a = 0.2
149149
x, y = xy
150-
return x + math.exp(-((x ** 2 + y ** 2 - d ** 2) ** 2) / a ** 4)
150+
return x + math.exp(-((x**2 + y**2 - d**2) ** 2) / a**4)
151151

152152

153153
@learn_with(LearnerND, bounds=((-1, 1), (-1, 1), (-1, 1)))
154154
@learn_with(SequenceLearner, sequence=np.random.rand(1000, 3))
155155
def sphere_of_fire(xyz, d: uniform(0.2, 0.5)):
156156
a = 0.2
157157
x, y, z = xyz
158-
return x + math.exp(-((x ** 2 + y ** 2 + z ** 2 - d ** 2) ** 2) / a ** 4) + z ** 2
158+
return x + math.exp(-((x**2 + y**2 + z**2 - d**2) ** 2) / a**4) + z**2
159159

160160

161161
@learn_with(SequenceLearner, sequence=range(1000))
@@ -172,7 +172,7 @@ def noisy_peak(
172172
offset: uniform(-0.6, -0.3),
173173
):
174174
seed, x = seed_x
175-
y = x ** 3 - x + 3 * peak_width ** 2 / (peak_width ** 2 + (x - offset) ** 2)
175+
y = x**3 - x + 3 * peak_width**2 / (peak_width**2 + (x - offset) ** 2)
176176
noise = np.random.normal(0, sigma)
177177
return y + noise
178178

@@ -264,7 +264,7 @@ def test_uniform_sampling2D(learner_type, f, learner_kwargs):
264264
ys, dy = np.linspace(*ybounds, int(n * r), retstep=True)
265265

266266
distances, neighbors = tree.query(list(it.product(xs, ys)), k=1)
267-
assert max(distances) < math.sqrt(dx ** 2 + dy ** 2)
267+
assert max(distances) < math.sqrt(dx**2 + dy**2)
268268

269269

270270
@pytest.mark.parametrize(

adaptive/tests/test_pickling.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -37,10 +37,10 @@ def goal_2(learner):
3737

3838

3939
def pickleable_f(x):
40-
return hash(str(x)) / 2 ** 63
40+
return hash(str(x)) / 2**63
4141

4242

43-
nonpickleable_f = lambda x: hash(str(x)) / 2 ** 63 # noqa: E731
43+
nonpickleable_f = lambda x: hash(str(x)) / 2**63 # noqa: E731
4444

4545

4646
def identity_function(x):

0 commit comments

Comments
 (0)