Skip to content

Commit 2444d58

Browse files
jduerholtfacebook-github-bot
authored andcommitted
Fix problem with polytope sampler (#1341)
Summary: ## Motivation As discussed in issue #1225, the polytope sampler runs into problems if the variables live in different dimensions. This PR fixes the issue using the approach mentioned in the issue above. ### Have you read the [Contributing Guidelines on pull requests](https://github.com/pytorch/botorch/blob/main/CONTRIBUTING.md#pull-requests)? Yes Pull Request resolved: #1341 Test Plan: Unit tests Reviewed By: esantorella Differential Revision: D38485035 Pulled By: Balandat fbshipit-source-id: 8e6ed1c1b81f636d108becb1ab5cf2ca13294420
1 parent 2829e2a commit 2444d58

File tree

3 files changed

+62
-8
lines changed

3 files changed

+62
-8
lines changed

botorch/utils/sampling.py

Lines changed: 35 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -830,6 +830,30 @@ def draw(self, n: int = 1, seed: Optional[int] = None) -> Tensor:
830830
return samples
831831

832832

833+
def normalize_linear_constraints(
834+
bounds: Tensor, constraints: List[Tuple[Tensor, Tensor, float]]
835+
) -> List[Tuple[Tensor, Tensor, float]]:
836+
r"""Normalize linear constraints to the unit cube.
837+
838+
Args:
839+
bounds (Tensor): A `2 x d`-dim tensor containing the box bounds.
840+
constraints (List[Tuple[Tensor, Tensor, float]]): A list of
841+
tuples (indices, coefficients, rhs), with each tuple encoding
842+
an inequality constraint of the form
843+
`\sum_i (X[indices[i]] * coefficients[i]) >= rhs` or
844+
`\sum_i (X[indices[i]] * coefficients[i]) = rhs`.
845+
"""
846+
847+
new_constraints = []
848+
for index, coefficient, rhs in constraints:
849+
lower, upper = bounds[:, index]
850+
s = upper - lower
851+
new_constraints.append(
852+
(index, s * coefficient, (rhs - torch.dot(coefficient, lower)).item())
853+
)
854+
return new_constraints
855+
856+
833857
def get_polytope_samples(
834858
n: int,
835859
bounds: Tensor,
@@ -865,9 +889,11 @@ def get_polytope_samples(
865889
# create tensors representing linear inequality constraints
866890
# of the form Ax >= b.
867891
if inequality_constraints:
892+
# normalize_linear_constraints is called to solve this issue:
893+
# https://github.com/pytorch/botorch/issues/1225
868894
A, b = sparse_to_dense_constraints(
869895
d=bounds.shape[-1],
870-
constraints=inequality_constraints,
896+
constraints=normalize_linear_constraints(bounds, inequality_constraints),
871897
)
872898
# Note the inequality constraints are of the form Ax >= b,
873899
# but PolytopeSampler expects inequality constraints of the
@@ -876,19 +902,24 @@ def get_polytope_samples(
876902
else:
877903
dense_inequality_constraints = None
878904
if equality_constraints:
905+
# normalize_linear_constraints is called to solve this issue:
906+
# https://github.com/pytorch/botorch/issues/1225
879907
dense_equality_constraints = sparse_to_dense_constraints(
880908
d=bounds.shape[-1],
881-
constraints=equality_constraints,
909+
constraints=normalize_linear_constraints(bounds, equality_constraints),
882910
)
883911
else:
884912
dense_equality_constraints = None
913+
normalized_bounds = torch.zeros_like(bounds)
914+
normalized_bounds[1, :] = 1.0
885915
polytope_sampler = HitAndRunPolytopeSampler(
916+
bounds=normalized_bounds,
886917
inequality_constraints=dense_inequality_constraints,
887-
bounds=bounds,
888918
equality_constraints=dense_equality_constraints,
889919
n_burnin=n_burnin,
890920
)
891-
return polytope_sampler.draw(n=n * thinning, seed=seed)[::thinning]
921+
samples = polytope_sampler.draw(n=n * thinning, seed=seed)[::thinning]
922+
return bounds[0] + samples * (bounds[1] - bounds[0])
892923

893924

894925
def sparse_to_dense_constraints(

test/optim/test_initializers.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -275,14 +275,14 @@ def test_gen_batch_initial_conditions_constraints(self):
275275
bounds = torch.tensor([[0, 0], [1, 1]], device=self.device, dtype=dtype)
276276
inequality_constraints = [
277277
(
278-
torch.tensor([1], device=self.device, dtype=dtype),
278+
torch.tensor([1], device=self.device, dtype=torch.int64),
279279
torch.tensor([-4], device=self.device, dtype=dtype),
280280
torch.tensor(-3, device=self.device, dtype=dtype),
281281
)
282282
]
283283
equality_constraints = [
284284
(
285-
torch.tensor([0], device=self.device, dtype=dtype),
285+
torch.tensor([0], device=self.device, dtype=torch.int64),
286286
torch.tensor([1], device=self.device, dtype=dtype),
287287
torch.tensor(0.5, device=self.device, dtype=dtype),
288288
)

test/utils/test_sampling.py

Lines changed: 25 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -28,6 +28,7 @@
2828
get_polytope_samples,
2929
HitAndRunPolytopeSampler,
3030
manual_seed,
31+
normalize_linear_constraints,
3132
PolytopeSampler,
3233
sample_hypersphere,
3334
sample_simplex,
@@ -289,6 +290,28 @@ def test_sparse_to_dense_constraints(self):
289290
expected_b = torch.tensor([[3.0]], **tkwargs)
290291
self.assertTrue(torch.equal(b, expected_b))
291292

293+
def test_normalize_linear_constraints(self):
294+
tkwargs = {"device": self.device}
295+
for dtype in (torch.float, torch.double):
296+
tkwargs["dtype"] = dtype
297+
constraints = [
298+
(
299+
torch.tensor([1, 2, 0], dtype=torch.int64, device=self.device),
300+
torch.tensor([1.0, 1.0, 1.0], **tkwargs),
301+
1.0,
302+
)
303+
]
304+
bounds = torch.tensor(
305+
[[0.1, 0.3, 0.1, 30.0], [0.6, 0.7, 0.7, 700.0]], **tkwargs
306+
)
307+
new_constraints = normalize_linear_constraints(bounds, constraints)
308+
expected_coefficients = torch.tensor([0.4000, 0.6000, 0.5000], **tkwargs)
309+
self.assertTrue(
310+
torch.allclose(new_constraints[0][1], expected_coefficients)
311+
)
312+
expected_rhs = 0.5
313+
self.assertAlmostEqual(new_constraints[0][-1], expected_rhs)
314+
292315
def test_find_interior_point(self):
293316
# basic problem: 1 <= x_1 <= 2, 2 <= x_2 <= 3
294317
A = np.concatenate([np.eye(2), -np.eye(2)], axis=0)
@@ -318,14 +341,14 @@ def test_get_polytope_samples(self):
318341
bounds[1] = 1
319342
inequality_constraints = [
320343
(
321-
torch.tensor([3], **tkwargs),
344+
torch.tensor([3], dtype=torch.int64, device=self.device),
322345
torch.tensor([-4], **tkwargs),
323346
-3,
324347
)
325348
]
326349
equality_constraints = [
327350
(
328-
torch.tensor([0], **tkwargs),
351+
torch.tensor([0], dtype=torch.int64, device=self.device),
329352
torch.tensor([1], **tkwargs),
330353
0.5,
331354
)

0 commit comments

Comments
 (0)