|
18 | 18 | fit_gpytorch_mll,
|
19 | 19 | get_fitted_map_saas_ensemble,
|
20 | 20 | get_fitted_map_saas_model,
|
| 21 | + logger, |
21 | 22 | )
|
22 |
| -from botorch.models import SaasFullyBayesianSingleTaskGP, SingleTaskGP |
| 23 | +from botorch.models import SingleTaskGP |
23 | 24 | from botorch.models.map_saas import (
|
24 | 25 | add_saas_prior,
|
25 | 26 | AdditiveMapSaasSingleTaskGP,
|
@@ -299,93 +300,24 @@ def test_get_saas_model(self) -> None:
|
299 | 300 | self.assertTrue(loss < loss_short)
|
300 | 301 |
|
301 | 302 | def test_get_saas_ensemble(self) -> None:
|
302 |
| - for infer_noise, taus in itertools.product([True, False], [None, [0.1, 0.2]]): |
303 |
| - tkwargs = {"device": self.device, "dtype": torch.double} |
304 |
| - train_X, train_Y, _ = self._get_data_hardcoded(**tkwargs) |
305 |
| - d = train_X.shape[-1] |
306 |
| - train_Yvar = ( |
307 |
| - None |
308 |
| - if infer_noise |
309 |
| - else 0.1 * torch.arange(len(train_X), **tkwargs).unsqueeze(-1) |
310 |
| - ) |
311 |
| - # Fit without specifying tau |
312 |
| - with torch.random.fork_rng(): |
313 |
| - torch.manual_seed(0) |
314 |
| - model = get_fitted_map_saas_ensemble( |
315 |
| - train_X=train_X, |
316 |
| - train_Y=train_Y, |
317 |
| - train_Yvar=train_Yvar, |
318 |
| - input_transform=Normalize(d=d), |
319 |
| - outcome_transform=Standardize(m=1), |
320 |
| - taus=taus, |
321 |
| - ) |
322 |
| - self.assertIsInstance(model, SaasFullyBayesianSingleTaskGP) |
323 |
| - num_taus = 4 if taus is None else len(taus) |
324 |
| - self.assertEqual( |
325 |
| - model.covar_module.base_kernel.lengthscale.shape, |
326 |
| - torch.Size([num_taus, 1, d]), |
327 |
| - ) |
328 |
| - self.assertEqual(model.batch_shape, torch.Size([num_taus])) |
329 |
| - # Make sure the lengthscales are reasonable |
330 |
| - self.assertGreater( |
331 |
| - model.covar_module.base_kernel.lengthscale[..., 1:].min(), 50 |
332 |
| - ) |
333 |
| - self.assertLess( |
334 |
| - model.covar_module.base_kernel.lengthscale[..., 0].max(), 10 |
335 |
| - ) |
336 |
| - |
337 |
| - # testing optimizer_options: short optimization run with maxiter = 3 |
338 |
| - with torch.random.fork_rng(): |
339 |
| - torch.manual_seed(0) |
340 |
| - fit_gpytorch_mll_mock = mock.Mock(wraps=fit_gpytorch_mll) |
341 |
| - with mock.patch( |
342 |
| - "botorch.fit.fit_gpytorch_mll", |
343 |
| - new=fit_gpytorch_mll_mock, |
344 |
| - ): |
345 |
| - maxiter = 3 |
346 |
| - model_short = get_fitted_map_saas_ensemble( |
347 |
| - train_X=train_X, |
348 |
| - train_Y=train_Y, |
349 |
| - train_Yvar=train_Yvar, |
350 |
| - input_transform=Normalize(d=d), |
351 |
| - outcome_transform=Standardize(m=1), |
352 |
| - taus=taus, |
353 |
| - optimizer_kwargs={"options": {"maxiter": maxiter}}, |
354 |
| - ) |
355 |
| - kwargs = fit_gpytorch_mll_mock.call_args.kwargs |
356 |
| - # fit_gpytorch_mll has "option" kwarg, not "optimizer_options" |
357 |
| - self.assertEqual( |
358 |
| - kwargs["optimizer_kwargs"]["options"]["maxiter"], maxiter |
359 |
| - ) |
360 |
| - |
361 |
| - # compute sum of marginal likelihoods of ensemble after short run |
362 |
| - # NOTE: We can't put MLL in train mode here since |
363 |
| - # SaasFullyBayesianSingleTaskGP requires NUTS for training. |
364 |
| - mll_short = ExactMarginalLogLikelihood( |
365 |
| - model=model_short, likelihood=model_short.likelihood |
| 303 | + train_X, train_Y, _ = self._get_data_hardcoded(device=self.device) |
| 304 | + with self.assertLogs(logger=logger, level="WARNING") as logs, mock.patch( |
| 305 | + "botorch.fit.fit_gpytorch_mll" |
| 306 | + ) as mock_fit: |
| 307 | + model = get_fitted_map_saas_ensemble( |
| 308 | + train_X=train_X, |
| 309 | + train_Y=train_Y, |
| 310 | + input_transform=Normalize(d=train_X.shape[-1]), |
| 311 | + outcome_transform=Standardize(m=1, batch_shape=torch.Size([4])), |
| 312 | + optimizer_kwargs={"options": {"maxiter": 3}}, |
366 | 313 | )
|
367 |
| - train_inputs = mll_short.model.train_inputs |
368 |
| - train_targets = mll_short.model.train_targets |
369 |
| - loss_short = -mll_short(model_short(*train_inputs), train_targets) |
370 |
| - # compute sum of marginal likelihoods of ensemble after standard run |
371 |
| - mll = ExactMarginalLogLikelihood(model=model, likelihood=model.likelihood) |
372 |
| - # reusing train_inputs and train_targets, since the transforms are the same |
373 |
| - loss = -mll(model(*train_inputs), train_targets) |
374 |
| - # the longer running optimization should have smaller loss than the shorter |
375 |
| - self.assertLess((loss - loss_short).max(), 0.0) |
376 |
| - |
377 |
| - # test error message |
378 |
| - with self.assertRaisesRegex( |
379 |
| - ValueError, "if you only specify one value of tau" |
380 |
| - ): |
381 |
| - model_short = get_fitted_map_saas_ensemble( |
382 |
| - train_X=train_X, |
383 |
| - train_Y=train_Y, |
384 |
| - train_Yvar=train_Yvar, |
385 |
| - input_transform=Normalize(d=d), |
386 |
| - outcome_transform=Standardize(m=1), |
387 |
| - taus=[0.1], |
388 |
| - ) |
| 314 | + self.assertTrue( |
| 315 | + any("use EnsembleMapSaasGP instead" in output for output in logs.output) |
| 316 | + ) |
| 317 | + self.assertEqual( |
| 318 | + mock_fit.call_args.kwargs["optimizer_kwargs"], {"options": {"maxiter": 3}} |
| 319 | + ) |
| 320 | + self.assertIsInstance(model, EnsembleMapSaasSingleTaskGP) |
389 | 321 |
|
390 | 322 | def test_input_transform_in_train(self) -> None:
|
391 | 323 | train_X, train_Y, test_X = self._get_data()
|
@@ -522,7 +454,7 @@ def test_batch_model_fitting(self) -> None:
|
522 | 454 |
|
523 | 455 | @mock_optimize
|
524 | 456 | def test_emsemble_map_saas(self) -> None:
|
525 |
| - train_X, train_Y, test_X = self._get_data() |
| 457 | + train_X, train_Y, test_X = self._get_data(device=self.device) |
526 | 458 | d = train_X.shape[-1]
|
527 | 459 | num_taus = 8
|
528 | 460 | for with_options in (False, True):
|
|
0 commit comments