Skip to content

Commit 92f0d1d

Browse files
esantorellafacebook-github-bot
authored andcommitted
Generate test for NdarrayOptimizationClosure that surfaces failures that was otherwise flaky (#1505)
Summary: Pull Request resolved: #1505 We have been seeing ```Caused by ValueError: failed to initialize intent(inout) array -- expected elsize=8 but got 4``` on occasion. Although this should be fixed by D41355824 (c7ed6ab), this was a frustratingly flaky error, happening ~1%-4% of the time in unit tests, so we wanted a test that errors consistently to guard against regressions. The error happens as a result of NaNs being cast to the wrong dtype upon optimization faiures in `NdarrayOptimizationClosure`. We didn't want to iterate on the flaky test, `test_fit.TestFitOther.test_fit_with_converter` since it will likely be delted soon, so I added a new test that creates this issue more directly. Reviewed By: j-wilson Differential Revision: D41386524 fbshipit-source-id: cc23cfd6857c924c240b6fcb33edd58827e384cd
1 parent c6595ed commit 92f0d1d

File tree

2 files changed

+57
-21
lines changed

2 files changed

+57
-21
lines changed

test/optim/test_fit.py

Lines changed: 28 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -142,6 +142,34 @@ def _test_fit_gpytorch_mll_scipy(self, mll):
142142
)
143143
mock_closure.assert_called_once_with(ab="cd")
144144

145+
def test_fit_with_nans(self) -> None:
146+
"""Test the branch of NdarrayOptimizationClosure that handles errors."""
147+
148+
from botorch.optim.closures import NdarrayOptimizationClosure
149+
150+
def closure():
151+
raise RuntimeError("singular")
152+
153+
for dtype in [torch.float32, torch.float64]:
154+
155+
parameters = {"x": torch.tensor([0.0], dtype=dtype)}
156+
157+
wrapper = NdarrayOptimizationClosure(closure=closure, parameters=parameters)
158+
159+
def _assert_np_array_is_float64_type(array) -> bool:
160+
# e.g. "float32" in "torch.float32"
161+
self.assertEqual(str(array.dtype), "float64")
162+
163+
_assert_np_array_is_float64_type(wrapper()[0])
164+
_assert_np_array_is_float64_type(wrapper()[1])
165+
_assert_np_array_is_float64_type(wrapper.state)
166+
_assert_np_array_is_float64_type(wrapper._get_gradient_ndarray())
167+
168+
# Any mll will do
169+
mll = next(iter(self.mlls.values()))
170+
# will error if dtypes are wrong
171+
fit.fit_gpytorch_mll_scipy(mll, closure=wrapper, parameters=parameters)
172+
145173

146174
class TestFitGPyTorchMLLTorch(BotorchTestCase):
147175
def setUp(self):

test/test_fit.py

Lines changed: 29 additions & 21 deletions
Original file line numberDiff line numberDiff line change
@@ -642,26 +642,34 @@ def mock_fit_gpytorch_mll(*args, **kwargs):
642642

643643

644644
class TestFitOther(BotorchTestCase):
645-
def test_fit_with_converter(self):
645+
def helper_fit_with_converter(self, dtype) -> None:
646646
# Check that sequential optimization using converter does not
647647
# break input transforms.
648-
for dtype in (torch.float, torch.double):
649-
tkwargs = {"device": self.device, "dtype": dtype}
650-
X = torch.rand(5, 2, **tkwargs) * 10
651-
Y = X**2
652-
intf = Normalize(2)
653-
model = SingleTaskGP(X, Y, input_transform=intf)
654-
mll = ExactMarginalLogLikelihood(model.likelihood, model)
655-
with patch(
656-
f"{fit_gpytorch_mll.__module__}.batched_to_model_list",
657-
wraps=batched_to_model_list,
658-
) as wrapped_converter, warnings.catch_warnings(record=True) as ws:
659-
warnings.simplefilter("always", BotorchWarning)
660-
fit_gpytorch_mll(mll)
661-
# Check that MLL repacking succeeded.
662-
self.assertFalse(
663-
any("Training loss of repacked model" in str(w.message) for w in ws)
664-
)
665-
wrapped_converter.assert_called_once()
666-
self.assertFalse(torch.allclose(intf.mins, torch.zeros(1, 2, **tkwargs)))
667-
self.assertFalse(torch.allclose(intf.ranges, torch.ones(1, 2, **tkwargs)))
648+
tkwargs = {"device": self.device, "dtype": dtype}
649+
# Set the seed to a number that doesn't generate numerical
650+
# issues (no NaNs)
651+
torch.manual_seed(0)
652+
X = torch.rand(5, 2, **tkwargs) * 10
653+
Y = X**2
654+
intf = Normalize(2)
655+
model = SingleTaskGP(X, Y, input_transform=intf)
656+
mll = ExactMarginalLogLikelihood(model.likelihood, model)
657+
with patch(
658+
f"{fit_gpytorch_mll.__module__}.batched_to_model_list",
659+
wraps=batched_to_model_list,
660+
) as wrapped_converter, warnings.catch_warnings(record=True) as ws:
661+
warnings.simplefilter("always", BotorchWarning)
662+
fit_gpytorch_mll(mll)
663+
# Check that MLL repacking succeeded.
664+
self.assertFalse(
665+
any("Training loss of repacked model" in str(w.message) for w in ws)
666+
)
667+
wrapped_converter.assert_called_once()
668+
self.assertFalse(torch.allclose(intf.mins, torch.zeros(1, 2, **tkwargs)))
669+
self.assertFalse(torch.allclose(intf.ranges, torch.ones(1, 2, **tkwargs)))
670+
671+
def test_fit_with_converter_float32(self) -> None:
672+
self.helper_fit_with_converter(torch.float)
673+
674+
def test_fit_with_converter_float64(self) -> None:
675+
self.helper_fit_with_converter(torch.double)

0 commit comments

Comments
 (0)