Skip to content

Commit e618743

Browse files
saitcakmakfacebook-github-bot
authored andcommitted
Avoid division by zero in normalize & unnormalize when lower & upper bounds are equal (#2363)
Summary: Pull Request resolved: #2363 When lower and upper bounds are equal for a dimension, set `upper = lower + 1` to avoid division by zero in `normalize`. The behavior is mirrored in `unnormalize` to preserve `X = unnormalize(normalize(X, bounds), bounds)`. This fixes some downstream issues observed in `get_chebyshev_objective`, and also lets us simplify `bounds` computation there. Reviewed By: sdaulton Differential Revision: D58168827 fbshipit-source-id: 37f29876fdab653c426a04b7c4a4122c8d41e497
1 parent 91b45b7 commit e618743

File tree

4 files changed

+53
-9
lines changed

4 files changed

+53
-9
lines changed

botorch/utils/multi_objective/scalarization.py

Lines changed: 2 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -95,14 +95,8 @@ def obj(Y: Tensor, X: Optional[Tensor] = None) -> Tensor:
9595
return -chebyshev_obj(Y=-Y)
9696

9797
return obj
98-
if Y.shape[-2] == 1:
99-
# If there is only one observation, set the bounds to be
100-
# [min(Y_m), min(Y_m) + 1] for each objective m. This ensures we do not
101-
# divide by zero
102-
Y_bounds = torch.cat([Y, Y + 1], dim=0)
103-
else:
104-
# Set the bounds to be [min(Y_m), max(Y_m)], for each objective m
105-
Y_bounds = torch.stack([Y.min(dim=-2).values, Y.max(dim=-2).values])
98+
# Set the bounds to be [min(Y_m), max(Y_m)], for each objective m.
99+
Y_bounds = torch.stack([Y.min(dim=-2).values, Y.max(dim=-2).values])
106100

107101
def obj(Y: Tensor, X: Optional[Tensor] = None) -> Tensor:
108102
# scale to [0,1]

botorch/utils/transforms.py

Lines changed: 29 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -46,9 +46,32 @@ def standardize(Y: Tensor) -> Tensor:
4646
return (Y - Y.mean(dim=stddim, keepdim=True)) / Y_std
4747

4848

49+
def _update_constant_bounds(bounds: Tensor) -> Tensor:
50+
r"""If the lower and upper bounds are identical for a dimension, set
51+
the upper bound to lower bound + 1.
52+
53+
If any modification is needed, this will return a clone of the original
54+
tensor to avoid in-place modification.
55+
56+
Args:
57+
bounds: A `2 x d`-dim tensor of lower and upper bounds.
58+
59+
Returns:
60+
A `2 x d`-dim tensor of updated lower and upper bounds.
61+
"""
62+
if (constant_dims := (bounds[1] == bounds[0])).any():
63+
bounds = bounds.clone()
64+
bounds[1, constant_dims] = bounds[0, constant_dims] + 1
65+
return bounds
66+
67+
4968
def normalize(X: Tensor, bounds: Tensor) -> Tensor:
5069
r"""Min-max normalize X w.r.t. the provided bounds.
5170
71+
NOTE: If the upper and lower bounds are identical for a dimension, that dimension
72+
will not be scaled. Such dimensions will only be shifted as
73+
`new_X[..., i] = X[..., i] - bounds[0, i]`. This avoids division by zero issues.
74+
5275
Args:
5376
X: `... x d` tensor of data
5477
bounds: `2 x d` tensor of lower and upper bounds for each of the X's d
@@ -65,12 +88,17 @@ def normalize(X: Tensor, bounds: Tensor) -> Tensor:
6588
>>> bounds = torch.stack([torch.zeros(3), 0.5 * torch.ones(3)])
6689
>>> X_normalized = normalize(X, bounds)
6790
"""
91+
bounds = _update_constant_bounds(bounds=bounds)
6892
return (X - bounds[0]) / (bounds[1] - bounds[0])
6993

7094

7195
def unnormalize(X: Tensor, bounds: Tensor) -> Tensor:
7296
r"""Un-normalizes X w.r.t. the provided bounds.
7397
98+
NOTE: If the upper and lower bounds are identical for a dimension, that dimension
99+
will not be scaled. Such dimensions will only be shifted as
100+
`new_X[..., i] = X[..., i] + bounds[0, i]`, matching the behavior of `normalize`.
101+
74102
Args:
75103
X: `... x d` tensor of data
76104
bounds: `2 x d` tensor of lower and upper bounds for each of the X's d
@@ -87,6 +115,7 @@ def unnormalize(X: Tensor, bounds: Tensor) -> Tensor:
87115
>>> bounds = torch.stack([torch.zeros(3), 0.5 * torch.ones(3)])
88116
>>> X = unnormalize(X_normalized, bounds)
89117
"""
118+
bounds = _update_constant_bounds(bounds=bounds)
90119
return X * (bounds[1] - bounds[0]) + bounds[0]
91120

92121

test/utils/multi_objective/test_scalarization.py

Lines changed: 10 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -120,3 +120,13 @@ def test_get_chebyshev_scalarization(self):
120120
+ 0.05 * (weights * normalized_neg_Y_test).sum(dim=-1)
121121
)
122122
self.assertAllClose(Y_transformed, expected_Y_transformed)
123+
124+
# Test that it works when Y is constant in each dimension.
125+
objective_transform = get_chebyshev_scalarization(
126+
weights=weights, Y=torch.zeros(2, 2, **tkwargs), alpha=0.0
127+
)
128+
Y_transformed = objective_transform(Y_test)
129+
self.assertFalse(Y_transformed.isnan().any())
130+
self.assertFalse(Y_transformed.isinf().any())
131+
expected_Y = -(-weights * Y_test).max(dim=-1).values
132+
self.assertAllClose(Y_transformed, expected_Y)

test/utils/test_transforms.py

Lines changed: 12 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -59,7 +59,7 @@ def test_standardize(self):
5959

6060

6161
class TestNormalizeAndUnnormalize(BotorchTestCase):
62-
def test_normalize_unnormalize(self):
62+
def test_normalize_unnormalize(self) -> None:
6363
for dtype in (torch.float, torch.double):
6464
X = torch.tensor([0.0, 0.25, 0.5], device=self.device, dtype=dtype).view(
6565
-1, 1
@@ -86,6 +86,17 @@ def test_normalize_unnormalize(self):
8686
self.assertTrue(torch.equal(X2_normalized, expected_X2_normalized))
8787
self.assertTrue(torch.equal(X2, unnormalize(X2_normalized, bounds=bounds2)))
8888

89+
def test_with_constant_bounds(self) -> None:
90+
X = torch.rand(10, 2, dtype=torch.double)
91+
# First dimension is constant, second has a range of 1.
92+
# The transform should just add 1 to each dimension.
93+
bounds = -torch.ones(2, 2, dtype=torch.double)
94+
bounds[1, 1] = 0.0
95+
X_normalized = normalize(X, bounds=bounds)
96+
self.assertAllClose(X_normalized, X + 1)
97+
X_unnormalized = unnormalize(X_normalized, bounds=bounds)
98+
self.assertAllClose(X_unnormalized, X)
99+
89100

90101
class BMIMTestClass(BotorchTestCase):
91102
@t_batch_mode_transform(assert_output_shape=False)

0 commit comments

Comments
 (0)