Skip to content

Commit 08ae7ae

Browse files
VilockLifacebook-github-bot
authored andcommitted
Updated Linear Truncated Kernel (#200)
Summary: Pull Request resolved: #200 The previous linear truncated kernel will not optimize over the lengthscale parameters. Here we initialize the Matern Kernels in the initialization function of Linear Truncated Kernel. In this diff we also add the GP with Linear Truncated Kernel. Reviewed By: Balandat Differential Revision: D16069438 fbshipit-source-id: 446ef6717cf2163abf7c821d9b005cba5e2519a6
1 parent 0af4796 commit 08ae7ae

File tree

6 files changed

+202
-82
lines changed

6 files changed

+202
-82
lines changed

botorch/models/fidelity/__init__.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -2,7 +2,7 @@
22

33
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
44

5-
from .gp_regression_fidelity import SingleTaskMultiFidelityGP
5+
from .gp_regression_fidelity import SingleTaskGPLTKernel, SingleTaskMultiFidelityGP
66

77

8-
__all__ = ["SingleTaskMultiFidelityGP"]
8+
__all__ = ["SingleTaskMultiFidelityGP", "SingleTaskGPLTKernel"]

botorch/models/fidelity/gp_regression_fidelity.py

Lines changed: 64 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -12,6 +12,9 @@
1212
from botorch.exceptions import UnsupportedError
1313
from botorch.models.fidelity_kernels.downsampling_kernel import DownsamplingKernel
1414
from botorch.models.fidelity_kernels.exponential_decay_kernel import ExpDecayKernel
15+
from botorch.models.fidelity_kernels.linear_truncated_fidelity import (
16+
LinearTruncatedFidelityKernel,
17+
)
1518
from gpytorch.kernels.rbf_kernel import RBFKernel
1619
from gpytorch.kernels.scale_kernel import ScaleKernel
1720
from gpytorch.likelihoods.likelihood import Likelihood
@@ -103,3 +106,64 @@ def __init__(
103106
)
104107
super().__init__(train_X=train_X, train_Y=train_Y, covar_module=covar_module)
105108
self.to(train_X)
109+
110+
111+
class SingleTaskGPLTKernel(SingleTaskGP):
112+
r"""A single task multi-fidelity GP model wiht Linear Truncated kernel.
113+
114+
A sub-class of SingleTaskGP model. By default the last two dimensions of train_X
115+
are the fidelity parameters: training iterations, training data points.
116+
117+
Args:
118+
train_X: A `n x (d + s)` or `batch_shape x n x (d + s) ` (batch mode) tensor
119+
of training features, s is the dimension of the fidelity parameters.
120+
train_Y: A `n x (o)` or `batch_shape x n x (o)` (batch mode) tensor of
121+
training observations.
122+
dimension: The dimension of `x`.
123+
nu: The smoothness parameter fo Matern kernel: either 1/2, 3/2, or 5/2.
124+
Default: '2.5'
125+
train_iteration_fidelity: An indicator of whether we have the training
126+
iteration fidelity variable.
127+
train_data_fidelity: An indicator of whether we have the downsampling
128+
fidelity variable. If train_iteration_fidelity and train_data_fidelity
129+
are both True, the last and second last columns are treated as the
130+
training data points fidelity parameter and training iteration
131+
number fidelity parameter respectively. Otherwise the last column of
132+
train_X is treated as the fidelity parameter with True indicator.
133+
We assume train_X has at least one fidelity parameter.
134+
likelihood: A likelihood. If omitted, use a standard
135+
GaussianLikelihood with inferred noise level.
136+
137+
Example:
138+
>>> train_X = torch.rand(20, 4)
139+
>>> train_Y = train_X.pow(2).sum(dim=-1)
140+
>>> model = SingleTaskGPLTKernel(train_X, train_Y)
141+
"""
142+
143+
def __init__(
144+
self,
145+
train_X: Tensor,
146+
train_Y: Tensor,
147+
nu: float = 2.5,
148+
train_iteration_fidelity: bool = True,
149+
train_data_fidelity: bool = True,
150+
likelihood: Optional[Likelihood] = None,
151+
) -> None:
152+
if not train_iteration_fidelity and not train_data_fidelity:
153+
raise UnsupportedError("You should have at least one fidelity parameter.")
154+
train_X, train_Y, _ = self._set_dimensions(train_X=train_X, train_Y=train_Y)
155+
kernel = LinearTruncatedFidelityKernel(
156+
nu=nu,
157+
dimension=train_X.shape[-1],
158+
train_iteration_fidelity=train_iteration_fidelity,
159+
train_data_fidelity=train_data_fidelity,
160+
batch_shape=self._aug_batch_shape,
161+
power_prior=GammaPrior(3.0, 3.0),
162+
)
163+
covar_module = ScaleKernel(
164+
kernel,
165+
batch_shape=self._aug_batch_shape,
166+
outputscale_prior=GammaPrior(2.0, 0.15),
167+
)
168+
super().__init__(train_X=train_X, train_Y=train_Y, covar_module=covar_module)
169+
self.to(train_X)

botorch/models/fidelity_kernels/__init__.py

Lines changed: 2 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -4,6 +4,7 @@
44

55
from .downsampling_kernel import DownsamplingKernel
66
from .exponential_decay_kernel import ExpDecayKernel
7+
from .linear_truncated_fidelity import LinearTruncatedFidelityKernel
78

89

9-
__all__ = ["ExpDecayKernel", "DownsamplingKernel"]
10+
__all__ = ["ExpDecayKernel", "DownsamplingKernel", "LinearTruncatedFidelityKernel"]

botorch/models/fidelity_kernels/linear_truncated_fidelity.py

Lines changed: 62 additions & 53 deletions
Original file line numberDiff line numberDiff line change
@@ -44,6 +44,9 @@ class LinearTruncatedFidelityKernel(Kernel):
4444
We assume the last two dimensions of input `x` are the fidelity parameters.
4545
4646
Args:
47+
:attr:`dimension` (int):
48+
The dimension of `x`. This is not needed if active_dims is specified.
49+
Default: `3`
4750
:attr:`nu` (float):
4851
The smoothness parameter fo Matern kernel: either 1/2, 3/2, or 5/2.
4952
Default: '2.5'
@@ -73,6 +76,19 @@ class LinearTruncatedFidelityKernel(Kernel):
7376
:attr:`power_constraint` (Constraint, optional):
7477
Set this if you want to apply a constraint to the power parameter
7578
polynomial kernel. Default: `Positive`
79+
:attr:`train_iteration_fidelity` (bool):
80+
Set this to True if your data contains iteration fidelity parameter.
81+
Default: 'True'
82+
:attr:`train_data_fidelity` (bool):
83+
Set this to True if your data contains training data fidelity parameter.
84+
Default: 'True'
85+
:attr: `covar_module_1` (Kernel):
86+
Set this if you want a different kernel for the unbiased part.
87+
Default: 'MaternKernel'
88+
:attr: `covar_module_2` (Kernel):
89+
Set this if you want a different kernel for the biased part.
90+
Default: 'MaternKernel'
91+
7692
7793
Attributes:
7894
:attr:`lengthscale` (Tensor):
@@ -92,6 +108,7 @@ class LinearTruncatedFidelityKernel(Kernel):
92108

93109
def __init__(
94110
self,
111+
dimension: int = 3,
95112
nu: float = 2.5,
96113
train_iteration_fidelity: bool = True,
97114
train_data_fidelity: bool = True,
@@ -100,32 +117,29 @@ def __init__(
100117
power_constraint: Optional[Interval] = None,
101118
lengthscale_2_prior: Optional[Prior] = None,
102119
lengthscale_2_constraint: Optional[Interval] = None,
120+
lengthscale_constraint: Optional[Interval] = None,
121+
covar_module_1: Optional[Kernel] = None,
122+
covar_module_2: Optional[Kernel] = None,
103123
**kwargs: Any,
104124
):
105125
if not train_iteration_fidelity and not train_data_fidelity:
106126
raise UnsupportedError("You should have at least one fidelity parameter.")
107127
if nu not in {0.5, 1.5, 2.5}:
108128
raise ValueError("nu expected to be 0.5, 1.5, or 2.5")
109-
super().__init__(has_lengthscale=True, **kwargs)
129+
super().__init__(**kwargs)
110130
self.train_iteration_fidelity = train_iteration_fidelity
111131
self.train_data_fidelity = train_data_fidelity
112132
if power_constraint is None:
113133
power_constraint = Positive()
114134

115135
if lengthscale_prior is None:
116-
self.lengthscale_prior = GammaPrior(1.1, 1 / 20)
117-
else:
118-
self.lengthscale_prior = lengthscale_prior
136+
lengthscale_prior = GammaPrior(3, 6)
119137

120138
if lengthscale_2_prior is None:
121-
self.lengthscale_2_prior = GammaPrior(5, 1 / 20)
122-
else:
123-
self.register_prior(
124-
"lengthscale_2_prior",
125-
lengthscale_2_prior,
126-
lambda: self.lengthscale_2,
127-
lambda v: self._set_lengthscale_2(v),
128-
)
139+
lengthscale_2_prior = GammaPrior(6, 2)
140+
141+
if lengthscale_constraint is None:
142+
lengthscale_constraint = Positive()
129143

130144
if lengthscale_2_constraint is None:
131145
lengthscale_2_constraint = Positive()
@@ -134,10 +148,6 @@ def __init__(
134148
name="raw_power",
135149
parameter=torch.nn.Parameter(torch.zeros(*self.batch_shape, 1)),
136150
)
137-
self.register_parameter(
138-
name="raw_lengthscale_2",
139-
parameter=torch.nn.Parameter(torch.zeros(*self.batch_shape, 1)),
140-
)
141151

142152
if power_prior is not None:
143153
self.register_prior(
@@ -146,10 +156,35 @@ def __init__(
146156
lambda: self.power,
147157
lambda v: self._set_power(v),
148158
)
149-
self.nu = nu
150-
self.register_constraint("raw_lengthscale_2", lengthscale_2_constraint)
151159
self.register_constraint("raw_power", power_constraint)
152160

161+
m = self.train_iteration_fidelity + self.train_data_fidelity
162+
163+
if self.active_dims is not None:
164+
dimension = len(self.active_dims)
165+
166+
if covar_module_1 is None:
167+
self.covar_module_1 = MaternKernel(
168+
nu=nu,
169+
batch_shape=self.batch_shape,
170+
lengthscale_prior=lengthscale_prior,
171+
ard_num_dims=dimension - m,
172+
lengthscale_constraint=lengthscale_constraint,
173+
)
174+
else:
175+
self.covar_module_1 = covar_module_1
176+
177+
if covar_module_2 is None:
178+
self.covar_module_2 = MaternKernel(
179+
nu=nu,
180+
batch_shape=self.batch_shape,
181+
lengthscale_prior=lengthscale_2_prior,
182+
ard_num_dims=dimension - m,
183+
lengthscale_constraint=lengthscale_2_constraint,
184+
)
185+
else:
186+
self.covar_module_2 = covar_module_2
187+
153188
@property
154189
def power(self) -> torch.Tensor:
155190
return self.raw_power_constraint.transform(self.raw_power)
@@ -163,46 +198,20 @@ def _set_power(self, value: torch.Tensor) -> None:
163198
value = torch.as_tensor(value).to(self.raw_power)
164199
self.initialize(raw_power=self.raw_power_constraint.inverse_transform(value))
165200

166-
@property
167-
def lengthscale_2(self) -> torch.Tensor:
168-
return self.raw_lengthscale_2_constraint.transform(self.raw_lengthscale_2)
169-
170-
@lengthscale_2.setter
171-
def lengthscale_2(self, value: torch.Tensor) -> None:
172-
self._set_lengthscale_2(value)
173-
174-
def _set_lengthscale_2(self, value: torch.Tensor) -> None:
175-
if not torch.is_tensor(value):
176-
value = torch.as_tensor(value).to(self.raw_lengthscale_2)
177-
self.initialize(
178-
raw_lengthscale_2=self.raw_lengthscale_2_constraint.inverse_transform(value)
179-
)
180-
181201
def forward(self, x1: torch.Tensor, x2: torch.Tensor, **params) -> torch.Tensor:
182-
m = self.train_iteration_fidelity + self.train_data_fidelity
183202
power = self.power.view(*self.batch_shape, 1, 1)
184-
active_dimsM = list(range(x1.size()[-1] - m))
185-
covar_module_1 = MaternKernel(
186-
nu=self.nu,
187-
batch_shape=self.batch_shape,
188-
lengthscale_prior=self.lengthscale_prior,
189-
active_dims=active_dimsM,
190-
ard_num_dims=x1.shape[-1] - m,
191-
)
192-
covar_module_2 = MaternKernel(
193-
nu=self.nu,
194-
batch_shape=self.batch_shape,
195-
lengthscale_prior=self.lengthscale_2_prior,
196-
active_dims=active_dimsM,
197-
ard_num_dims=x1.shape[-1] - m,
198-
)
199-
covar_0 = covar_module_1(x1, x2)
203+
204+
m = self.train_iteration_fidelity + self.train_data_fidelity
205+
active_dimsM = list(range(x1.shape[-1] - m))
206+
x1_ = x1[..., active_dimsM]
207+
x2_ = x2[..., active_dimsM]
208+
covar_0 = self.covar_module_1(x1_, x2_)
209+
covar_1 = self.covar_module_2(x1_, x2_)
200210
x11_ = x1[..., -1].unsqueeze(-1)
201211
x21t_ = x2[..., -1].unsqueeze(-1).transpose(-1, -2)
202-
covar_1 = covar_module_2(x1, x2)
203212
if self.train_iteration_fidelity and self.train_data_fidelity:
204-
covar_2 = covar_module_2(x1, x2)
205-
covar_3 = covar_module_2(x1, x2)
213+
covar_2 = self.covar_module_2(x1_, x2_)
214+
covar_3 = self.covar_module_2(x1_, x2_)
206215
x12_ = x1[..., -2].unsqueeze(-1)
207216
x22t_ = x2[..., -2].unsqueeze(-1).transpose(-1, -2)
208217
res = (

test/models/fidelity/test_gp_regression_fidelity.py

Lines changed: 16 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -8,7 +8,10 @@
88
import torch
99
from botorch import fit_gpytorch_model
1010
from botorch.exceptions import UnsupportedError
11-
from botorch.models.fidelity.gp_regression_fidelity import SingleTaskMultiFidelityGP
11+
from botorch.models.fidelity.gp_regression_fidelity import (
12+
SingleTaskGPLTKernel,
13+
SingleTaskMultiFidelityGP,
14+
)
1215
from botorch.models.gp_regression import FixedNoiseGP
1316
from botorch.posteriors import GPyTorchPosterior
1417
from botorch.sampling import SobolQMCNormalSampler
@@ -45,6 +48,9 @@ def _get_random_data_with_fidelity(
4548

4649

4750
class TestSingleTaskGPFidelity(unittest.TestCase):
51+
def _get_model(self):
52+
return SingleTaskMultiFidelityGP
53+
4854
def _get_model_and_data(
4955
self,
5056
train_iteration_fidelity,
@@ -66,14 +72,16 @@ def _get_model_and_data(
6672
"train_iteration_fidelity": train_iteration_fidelity,
6773
"train_data_fidelity": train_data_fidelity,
6874
}
69-
model = SingleTaskMultiFidelityGP(**model_kwargs)
75+
gp_model = self._get_model()
76+
model = gp_model(**model_kwargs)
7077
return model, model_kwargs
7178

7279
def test_exception_message(self, cuda=False):
7380
train_X = torch.rand(20, 4, device=torch.device("cuda" if cuda else "cpu"))
7481
train_Y = train_X.pow(2).sum(dim=-1)
82+
gp_model = self._get_model()
7583
with self.assertRaises(UnsupportedError):
76-
SingleTaskMultiFidelityGP(
84+
gp_model(
7785
train_X,
7886
train_Y,
7987
train_iteration_fidelity=False,
@@ -331,3 +339,8 @@ def test_fantasize(self, cuda=False):
331339
def test_fantasize_cuda(self):
332340
if torch.cuda.is_available():
333341
self.test_fantasize(cuda=True)
342+
343+
344+
class TestSingleTaskGPLTKernel(TestSingleTaskGPFidelity):
345+
def _get_model(self):
346+
return SingleTaskGPLTKernel

0 commit comments

Comments
 (0)