Skip to content

Commit 4bc0352

Browse files
Balandatfacebook-github-bot
authored andcommitted
Black upgrade (#439)
Summary: Pull Request resolved: #439 The parenthesis change is pretty disgusting, but I guess it is what it is. Reviewed By: danielrjiang Differential Revision: D21370208 fbshipit-source-id: bf2f7fe496c6bdecd50e4e3369527698aafc0c6d
1 parent 86cac04 commit 4bc0352

File tree

13 files changed

+28
-20
lines changed

13 files changed

+28
-20
lines changed

botorch/acquisition/utils.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -143,7 +143,7 @@ def get_infeasible_cost(
143143
"""
144144
posterior = model.posterior(X)
145145
lb = objective(posterior.mean - 6 * posterior.variance.clamp_min(0).sqrt()).min()
146-
M = -lb.clamp_max(0.0)
146+
M = -(lb.clamp_max(0.0))
147147
return M.item()
148148

149149

botorch/generation/gen.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -203,7 +203,7 @@ def gen_candidates_torch(
203203
i += 1
204204
loss = -acquisition_function(candidates).sum()
205205
if verbose:
206-
print("Iter: {} - Value: {:.3f}".format(i, -loss.item()))
206+
print("Iter: {} - Value: {:.3f}".format(i, -(loss.item())))
207207
loss_trajectory.append(loss.item())
208208
param_trajectory["candidates"].append(candidates.clone())
209209

botorch/models/gpytorch.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -91,7 +91,7 @@ def _validate_tensor_args(
9191
)
9292
# Yvar may not have the same batch dimensions, but the trailing dimensions
9393
# of Yvar should be the same as the trailing dimensions of Y.
94-
if Yvar is not None and Y.shape[-Yvar.dim() :] != Yvar.shape:
94+
if Yvar is not None and Y.shape[-(Yvar.dim()) :] != Yvar.shape:
9595
raise BotorchTensorDimensionError(
9696
"An explicit output dimension is required for observation noise."
9797
f" Expected Yvar with shape: {Y.shape[-Yvar.dim() :]} (got"

botorch/models/pairwise_gp.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -445,7 +445,7 @@ def _posterior_f(self, utility: Union[Tensor, np.ndarray]) -> Tensor:
445445
utility: A Tensor of shape `batch_size x n`
446446
"""
447447
_, _, z_logcdf, _ = self._calc_z(utility, self.D, self.std_noise)
448-
loss1 = -torch.sum(z_logcdf, dim=-1)
448+
loss1 = -(torch.sum(z_logcdf, dim=-1))
449449
inv_prod = torch.cholesky_solve(utility.unsqueeze(-1), self.covar_chol)
450450
loss2 = 0.5 * (utility.unsqueeze(-2) @ inv_prod).squeeze(-1).squeeze(-1)
451451
loss = loss1 + loss2

botorch/test_functions/multi_fidelity.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -101,7 +101,7 @@ def evaluate_true(self, X: Tensor) -> Tensor:
101101
)
102102
alpha1 = self.ALPHA[0] - 0.1 * (1 - X[..., 6])
103103
H = (
104-
-torch.sum(self.ALPHA[1:] * torch.exp(-inner_sum)[..., 1:], dim=1)
104+
-(torch.sum(self.ALPHA[1:] * torch.exp(-inner_sum)[..., 1:], dim=1))
105105
- alpha1 * torch.exp(-inner_sum)[..., 0]
106106
)
107107
return H
@@ -139,4 +139,4 @@ def evaluate_true(self, X: Tensor) -> Tensor:
139139
X_next = X[..., 1:-2]
140140
t1 = 100 * (X_next - X_curr ** 2 + 0.1 * (1 - X[..., -2:-1])) ** 2
141141
t2 = (X_curr - 1 + 0.1 * (1 - X[..., -1:]) ** 2) ** 2
142-
return -(t1 + t2).sum(dim=-1)
142+
return -((t1 + t2).sum(dim=-1))

botorch/test_functions/synthetic.py

Lines changed: 11 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -75,7 +75,7 @@ def __init__(
7575
def evaluate_true(self, X: Tensor) -> Tensor:
7676
a, b, c = self.a, self.b, self.c
7777
part1 = -a * torch.exp(-b / math.sqrt(self.dim) * torch.norm(X, dim=-1))
78-
part2 = -torch.exp(torch.mean(torch.cos(c * X), dim=-1))
78+
part2 = -(torch.exp(torch.mean(torch.cos(c * X), dim=-1)))
7979
return part1 + part2 + a + math.e
8080

8181

@@ -235,8 +235,8 @@ def __init__(
235235
def evaluate_true(self, X: Tensor) -> Tensor:
236236
part1 = torch.sum(X ** 2 / 4000.0, dim=1)
237237
d = X.shape[1]
238-
part2 = -torch.prod(
239-
torch.cos(X / torch.sqrt(X.new(range(1, d + 1))).view(1, -1))
238+
part2 = -(
239+
torch.prod(torch.cos(X / torch.sqrt(X.new(range(1, d + 1))).view(1, -1)))
240240
)
241241
return part1 + part2 + 1.0
242242

@@ -325,7 +325,7 @@ def optimizers(self) -> Tensor:
325325
def evaluate_true(self, X: Tensor) -> Tensor:
326326
self.to(device=X.device, dtype=X.dtype)
327327
inner_sum = torch.sum(self.A * (X.unsqueeze(1) - 0.0001 * self.P) ** 2, dim=2)
328-
H = -torch.sum(self.ALPHA * torch.exp(-inner_sum), dim=1)
328+
H = -(torch.sum(self.ALPHA * torch.exp(-inner_sum), dim=1))
329329
if self.dim == 4:
330330
H = (1.1 + H) / 0.839
331331
return H
@@ -358,7 +358,9 @@ class HolderTable(SyntheticTestFunction):
358358

359359
def evaluate_true(self, X: Tensor) -> Tensor:
360360
term = torch.abs(1 - torch.norm(X, dim=1) / math.pi)
361-
return -torch.abs(torch.sin(X[..., 0]) * torch.cos(X[..., 1]) * torch.exp(term))
361+
return -(
362+
torch.abs(torch.sin(X[..., 0]) * torch.cos(X[..., 1]) * torch.exp(term))
363+
)
362364

363365

364366
class Levy(SyntheticTestFunction):
@@ -431,8 +433,10 @@ def optimizers(self) -> Tensor:
431433
def evaluate_true(self, X: Tensor) -> Tensor:
432434
self.to(device=X.device, dtype=X.dtype)
433435
m = 10
434-
return -torch.sum(
435-
torch.sin(X) * torch.sin(self.i * X ** 2 / math.pi) ** (2 * m), dim=-1
436+
return -(
437+
torch.sum(
438+
torch.sin(X) * torch.sin(self.i * X ** 2 / math.pi) ** (2 * m), dim=-1
439+
)
436440
)
437441

438442

botorch/utils/constraints.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -18,7 +18,7 @@
1818

1919

2020
def get_outcome_constraint_transforms(
21-
outcome_constraints: Optional[Tuple[Tensor, Tensor]],
21+
outcome_constraints: Optional[Tuple[Tensor, Tensor]]
2222
) -> Optional[List[Callable[[Tensor], Tensor]]]:
2323
r"""Create outcome constraint callables from outcome constraint tensors.
2424

botorch/utils/transforms.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -227,7 +227,7 @@ def match_batch_shape(X: Tensor, Y: Tensor) -> Tensor:
227227
torch.Size([2, 6, 5, 3])
228228
229229
"""
230-
return X.expand(X.shape[: -Y.dim()] + Y.shape[:-2] + X.shape[-2:])
230+
return X.expand(X.shape[: -(Y.dim())] + Y.shape[:-2] + X.shape[-2:])
231231

232232

233233
def convert_to_target_pre_hook(module, *args):

scripts/validate_sphinx.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -96,7 +96,7 @@ def validate_complete_sphinx(path_to_botorch: str) -> None:
9696

9797
if __name__ == "__main__":
9898
parser = argparse.ArgumentParser(
99-
description=("Validate that Sphinx documentation is complete.")
99+
description="Validate that Sphinx documentation is complete."
100100
)
101101
parser.add_argument(
102102
"-p",

test/acquisition/test_active_learning.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -53,7 +53,7 @@ def test_q_neg_int_post_variance(self):
5353
)
5454
X = torch.empty(1, 1, device=self.device, dtype=dtype) # dummy
5555
val = qNIPV(X)
56-
self.assertTrue(torch.allclose(val, -variance.mean(), atol=1e-4))
56+
self.assertTrue(torch.allclose(val, -(variance.mean()), atol=1e-4))
5757
# batched model
5858
mean = torch.zeros(2, 4, 1, device=self.device, dtype=dtype)
5959
variance = torch.rand(2, 4, 1, device=self.device, dtype=dtype)

0 commit comments

Comments
 (0)