Skip to content

Commit 5c33d69

Browse files
Balandatfacebook-github-bot
authored andcommitted
Properly handle observation_noise kwarg for BatchedMultiOutputGPyTorchModel (#182)
Summary: Fixes a bug where the `observation_noise` kwarg was disregarded for models of the class `BatchedMultiOutputGPyTorchModel`. Pull Request resolved: #182 Test Plan: contbuild Reviewed By: danielrjiang Differential Revision: D15972893 Pulled By: Balandat fbshipit-source-id: 0abeacbf999ec90184838ccd7fa8b5f0dae51334
1 parent e2c64fe commit 5c33d69

File tree

2 files changed

+37
-5
lines changed

2 files changed

+37
-5
lines changed

botorch/models/gpytorch.py

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -185,6 +185,8 @@ def posterior(
185185
X=X, original_batch_shape=self._input_batch_shape
186186
)
187187
mvn = self(X)
188+
if observation_noise:
189+
mvn = self.likelihood(mvn, X)
188190
mean_x = mvn.mean
189191
covar_x = mvn.covariance_matrix
190192
if self._num_outputs > 1:

test/models/test_gp_regression.py

Lines changed: 35 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -12,6 +12,7 @@
1212
HeteroskedasticSingleTaskGP,
1313
SingleTaskGP,
1414
)
15+
from botorch.models.utils import add_output_dim
1516
from botorch.posteriors import GPyTorchPosterior
1617
from botorch.sampling import SobolQMCNormalSampler
1718
from gpytorch.kernels import MaternKernel, ScaleKernel
@@ -82,20 +83,39 @@ def test_gp(self, cuda=False):
8283
# test posterior
8384
# test non batch evaluation
8485
X = torch.rand(batch_shape + torch.Size([3, 1]), **tkwargs)
86+
expected_mean_shape = batch_shape + torch.Size([3, num_outputs])
8587
posterior = model.posterior(X)
8688
self.assertIsInstance(posterior, GPyTorchPosterior)
87-
self.assertEqual(
88-
posterior.mean.shape, batch_shape + torch.Size([3, num_outputs])
89+
self.assertEqual(posterior.mean.shape, expected_mean_shape)
90+
# test adding observation noise
91+
posterior_pred = model.posterior(X, observation_noise=True)
92+
self.assertIsInstance(posterior_pred, GPyTorchPosterior)
93+
self.assertEqual(posterior_pred.mean.shape, expected_mean_shape)
94+
pvar = posterior_pred.variance
95+
pvar_exp = _get_pvar_expected(posterior, model, X, num_outputs)
96+
self.assertTrue(
97+
torch.allclose(pvar, pvar_exp, rtol=1e-4, atol=1e-06)
8998
)
99+
90100
# test batch evaluation
91101
X = torch.rand(
92102
torch.Size([2]) + batch_shape + torch.Size([3, 1]), **tkwargs
93103
)
104+
expected_mean_shape = (
105+
torch.Size([2]) + batch_shape + torch.Size([3, num_outputs])
106+
)
107+
94108
posterior = model.posterior(X)
95109
self.assertIsInstance(posterior, GPyTorchPosterior)
96-
self.assertEqual(
97-
posterior.mean.shape,
98-
torch.Size([2]) + batch_shape + torch.Size([3, num_outputs]),
110+
self.assertEqual(posterior.mean.shape, expected_mean_shape)
111+
# test adding observation noise in batch mode
112+
posterior_pred = model.posterior(X, observation_noise=True)
113+
self.assertIsInstance(posterior_pred, GPyTorchPosterior)
114+
self.assertEqual(posterior_pred.mean.shape, expected_mean_shape)
115+
pvar = posterior_pred.variance
116+
pvar_exp = _get_pvar_expected(posterior, model, X, num_outputs)
117+
self.assertTrue(
118+
torch.allclose(pvar, pvar_exp, rtol=1e-4, atol=1e-06)
99119
)
100120

101121
def test_gp_cuda(self):
@@ -324,3 +344,13 @@ def test_condition_on_observations(self, cuda=False):
324344
def test_fantasize(self, cuda=False):
325345
with self.assertRaises(NotImplementedError):
326346
super().test_fantasize(cuda=cuda)
347+
348+
349+
def _get_pvar_expected(posterior, model, X, num_outputs):
350+
if num_outputs == 1:
351+
return model.likelihood(posterior.mvn, X).variance.unsqueeze(-1)
352+
X_, odi = add_output_dim(X=X, original_batch_shape=model._input_batch_shape)
353+
pvar_exp = model.likelihood(model(X_), X_).variance
354+
return torch.stack(
355+
[pvar_exp.select(dim=odi, index=i) for i in range(num_outputs)], dim=-1
356+
)

0 commit comments

Comments
 (0)