|
17 | 17 | fit_gpytorch_mll,
|
18 | 18 | get_fitted_map_saas_ensemble,
|
19 | 19 | get_fitted_map_saas_model,
|
| 20 | + logger, |
20 | 21 | )
|
21 |
| -from botorch.models import SaasFullyBayesianSingleTaskGP, SingleTaskGP |
| 22 | +from botorch.models import SingleTaskGP |
22 | 23 | from botorch.models.map_saas import (
|
23 | 24 | add_saas_prior,
|
24 | 25 | AdditiveMapSaasSingleTaskGP,
|
@@ -292,93 +293,24 @@ def test_get_saas_model(self) -> None:
|
292 | 293 | self.assertTrue(loss < loss_short)
|
293 | 294 |
|
294 | 295 | def test_get_saas_ensemble(self) -> None:
|
295 |
| - for infer_noise, taus in itertools.product([True, False], [None, [0.1, 0.2]]): |
296 |
| - tkwargs = {"device": self.device, "dtype": torch.double} |
297 |
| - train_X, train_Y, _ = self._get_data_hardcoded(**tkwargs) |
298 |
| - d = train_X.shape[-1] |
299 |
| - train_Yvar = ( |
300 |
| - None |
301 |
| - if infer_noise |
302 |
| - else 0.1 * torch.arange(len(train_X), **tkwargs).unsqueeze(-1) |
303 |
| - ) |
304 |
| - # Fit without specifying tau |
305 |
| - with torch.random.fork_rng(): |
306 |
| - torch.manual_seed(0) |
307 |
| - model = get_fitted_map_saas_ensemble( |
308 |
| - train_X=train_X, |
309 |
| - train_Y=train_Y, |
310 |
| - train_Yvar=train_Yvar, |
311 |
| - input_transform=Normalize(d=d), |
312 |
| - outcome_transform=Standardize(m=1), |
313 |
| - taus=taus, |
314 |
| - ) |
315 |
| - self.assertIsInstance(model, SaasFullyBayesianSingleTaskGP) |
316 |
| - num_taus = 4 if taus is None else len(taus) |
317 |
| - self.assertEqual( |
318 |
| - model.covar_module.base_kernel.lengthscale.shape, |
319 |
| - torch.Size([num_taus, 1, d]), |
320 |
| - ) |
321 |
| - self.assertEqual(model.batch_shape, torch.Size([num_taus])) |
322 |
| - # Make sure the lengthscales are reasonable |
323 |
| - self.assertGreater( |
324 |
| - model.covar_module.base_kernel.lengthscale[..., 1:].min(), 50 |
325 |
| - ) |
326 |
| - self.assertLess( |
327 |
| - model.covar_module.base_kernel.lengthscale[..., 0].max(), 10 |
328 |
| - ) |
329 |
| - |
330 |
| - # testing optimizer_options: short optimization run with maxiter = 3 |
331 |
| - with torch.random.fork_rng(): |
332 |
| - torch.manual_seed(0) |
333 |
| - fit_gpytorch_mll_mock = mock.Mock(wraps=fit_gpytorch_mll) |
334 |
| - with mock.patch( |
335 |
| - "botorch.fit.fit_gpytorch_mll", |
336 |
| - new=fit_gpytorch_mll_mock, |
337 |
| - ): |
338 |
| - maxiter = 3 |
339 |
| - model_short = get_fitted_map_saas_ensemble( |
340 |
| - train_X=train_X, |
341 |
| - train_Y=train_Y, |
342 |
| - train_Yvar=train_Yvar, |
343 |
| - input_transform=Normalize(d=d), |
344 |
| - outcome_transform=Standardize(m=1), |
345 |
| - taus=taus, |
346 |
| - optimizer_kwargs={"options": {"maxiter": maxiter}}, |
347 |
| - ) |
348 |
| - kwargs = fit_gpytorch_mll_mock.call_args.kwargs |
349 |
| - # fit_gpytorch_mll has "option" kwarg, not "optimizer_options" |
350 |
| - self.assertEqual( |
351 |
| - kwargs["optimizer_kwargs"]["options"]["maxiter"], maxiter |
352 |
| - ) |
353 |
| - |
354 |
| - # compute sum of marginal likelihoods of ensemble after short run |
355 |
| - # NOTE: We can't put MLL in train mode here since |
356 |
| - # SaasFullyBayesianSingleTaskGP requires NUTS for training. |
357 |
| - mll_short = ExactMarginalLogLikelihood( |
358 |
| - model=model_short, likelihood=model_short.likelihood |
| 296 | + train_X, train_Y, _ = self._get_data_hardcoded(device=self.device) |
| 297 | + with self.assertLogs(logger=logger, level="WARNING") as logs, mock.patch( |
| 298 | + "botorch.fit.fit_gpytorch_mll" |
| 299 | + ) as mock_fit: |
| 300 | + model = get_fitted_map_saas_ensemble( |
| 301 | + train_X=train_X, |
| 302 | + train_Y=train_Y, |
| 303 | + input_transform=Normalize(d=train_X.shape[-1]), |
| 304 | + outcome_transform=Standardize(m=1, batch_shape=torch.Size([4])), |
| 305 | + optimizer_kwargs={"options": {"maxiter": 3}}, |
359 | 306 | )
|
360 |
| - train_inputs = mll_short.model.train_inputs |
361 |
| - train_targets = mll_short.model.train_targets |
362 |
| - loss_short = -mll_short(model_short(*train_inputs), train_targets) |
363 |
| - # compute sum of marginal likelihoods of ensemble after standard run |
364 |
| - mll = ExactMarginalLogLikelihood(model=model, likelihood=model.likelihood) |
365 |
| - # reusing train_inputs and train_targets, since the transforms are the same |
366 |
| - loss = -mll(model(*train_inputs), train_targets) |
367 |
| - # the longer running optimization should have smaller loss than the shorter |
368 |
| - self.assertLess((loss - loss_short).max(), 0.0) |
369 |
| - |
370 |
| - # test error message |
371 |
| - with self.assertRaisesRegex( |
372 |
| - ValueError, "if you only specify one value of tau" |
373 |
| - ): |
374 |
| - model_short = get_fitted_map_saas_ensemble( |
375 |
| - train_X=train_X, |
376 |
| - train_Y=train_Y, |
377 |
| - train_Yvar=train_Yvar, |
378 |
| - input_transform=Normalize(d=d), |
379 |
| - outcome_transform=Standardize(m=1), |
380 |
| - taus=[0.1], |
381 |
| - ) |
| 307 | + self.assertTrue( |
| 308 | + any("use EnsembleMapSaasGP instead" in output for output in logs.output) |
| 309 | + ) |
| 310 | + self.assertEqual( |
| 311 | + mock_fit.call_args.kwargs["optimizer_kwargs"], {"options": {"maxiter": 3}} |
| 312 | + ) |
| 313 | + self.assertIsInstance(model, EnsembleMapSaasSingleTaskGP) |
382 | 314 |
|
383 | 315 | def test_input_transform_in_train(self) -> None:
|
384 | 316 | train_X, train_Y, test_X = self._get_data()
|
@@ -515,7 +447,7 @@ def test_batch_model_fitting(self) -> None:
|
515 | 447 |
|
516 | 448 | @mock_optimize
|
517 | 449 | def test_emsemble_map_saas(self) -> None:
|
518 |
| - train_X, train_Y, test_X = self._get_data() |
| 450 | + train_X, train_Y, test_X = self._get_data(device=self.device) |
519 | 451 | d = train_X.shape[-1]
|
520 | 452 | num_taus = 8
|
521 | 453 | for with_options in (False, True):
|
|
0 commit comments