Skip to content

Commit c29712a

Browse files
VilockLifacebook-github-bot
authored andcommitted
Fidelity kernel for training iterations/training data points (#178)
Summary: Pull Request resolved: #178 We added the fidelity kernels from Section 5.3 in https://arxiv.org/abs/1903.04703. Reviewed By: Balandat Differential Revision: D15695576 fbshipit-source-id: ccdc33640dc5f266816c6d0198445d5b2635c837
1 parent ae507ad commit c29712a

File tree

6 files changed

+620
-0
lines changed

6 files changed

+620
-0
lines changed
Lines changed: 9 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,9 @@
1+
#! /usr/bin/env python3
2+
3+
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
4+
5+
from .downsampling_kernel import DownsamplingKernel
6+
from .exponential_decay_kernel import ExpDecayKernel
7+
8+
9+
__all__ = ["ExpDecayKernel", "DownsamplingKernel"]
Lines changed: 137 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,137 @@
1+
#!/usr/bin/env python3
2+
3+
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
4+
5+
from typing import Optional
6+
7+
import torch
8+
from gpytorch.constraints import Interval, Positive
9+
from gpytorch.kernels import Kernel
10+
from gpytorch.priors import Prior
11+
from torch import Tensor
12+
13+
14+
class DownsamplingKernel(Kernel):
15+
r"""
16+
Computes a covariance matrix based on the down sampling kernel
17+
between inputs :math:`\mathbf{x_1}` and :math:`\mathbf{x_2}` (we expect 'd = 1'):
18+
19+
.. math::
20+
\begin{equation*}
21+
k_\text{ds}(\mathbf{x_1}, \mathbf{x_2}) = c +
22+
(1 - \mathbf{x_1})^{1 + \delta} * (1 - \mathbf{x_2})^{1 + \delta}.
23+
\end{equation*}
24+
25+
where
26+
27+
* :math:`c` is an :attr:`offset` parameter,
28+
`\delta` is an :attr:`power` parameter
29+
Args:
30+
:attr:`power_constraint` (Constraint, optional):
31+
Constraint to place on power parameter. Default: `Positive`.
32+
:attr:`power_prior` (:class:`gpytorch.priors.Prior`):
33+
Prior over the power parameter (default `None`).
34+
:attr:`offset_constraint` (Constraint, optional):
35+
Constraint to place on offset parameter. Default: `Positive`.
36+
:attr:`active_dims` (list):
37+
List of data dimensions to operate on.
38+
`len(active_dims)` should equal `num_dimensions`.
39+
"""
40+
41+
def __init__(
42+
self,
43+
power_prior: Optional[Prior] = None,
44+
offset_prior: Optional[Prior] = None,
45+
power_constraint: Optional[Interval] = None,
46+
offset_constraint: Optional[Interval] = None,
47+
**kwargs
48+
):
49+
super().__init__(**kwargs)
50+
51+
if power_constraint is None:
52+
power_constraint = Positive()
53+
if offset_constraint is None:
54+
offset_constraint = Positive()
55+
56+
self.register_parameter(
57+
name="raw_power",
58+
parameter=torch.nn.Parameter(torch.zeros(*self.batch_shape, 1)),
59+
)
60+
61+
self.register_parameter(
62+
name="raw_offset",
63+
parameter=torch.nn.Parameter(torch.zeros(*self.batch_shape, 1)),
64+
)
65+
66+
if power_prior is not None:
67+
self.register_prior(
68+
"power_prior",
69+
power_prior,
70+
lambda: self.power,
71+
lambda v: self._set_power(v),
72+
)
73+
self.register_constraint("raw_power", power_constraint)
74+
75+
if offset_prior is not None:
76+
self.register_prior(
77+
"offset_prior",
78+
offset_prior,
79+
lambda: self.offset,
80+
lambda v: self._set_offset(v),
81+
)
82+
83+
self.register_constraint("raw_offset", offset_constraint)
84+
85+
@property
86+
def power(self) -> Tensor:
87+
return self.raw_power_constraint.transform(self.raw_power)
88+
89+
@power.setter
90+
def power(self, value: Tensor) -> None:
91+
self._set_power(value)
92+
93+
def _set_power(self, value: Tensor) -> None:
94+
if not torch.is_tensor(value):
95+
value = torch.as_tensor(value).to(self.raw_power)
96+
self.initialize(raw_power=self.raw_power_constraint.inverse_transform(value))
97+
98+
@property
99+
def offset(self) -> Tensor:
100+
return self.raw_offset_constraint.transform(self.raw_offset)
101+
102+
@offset.setter
103+
def offset(self, value: Tensor) -> None:
104+
self._set_offset(value)
105+
106+
def _set_offset(self, value: Tensor) -> None:
107+
if not torch.is_tensor(value):
108+
value = torch.as_tensor(value).to(self.raw_offset)
109+
self.initialize(raw_offset=self.raw_offset_constraint.inverse_transform(value))
110+
111+
def forward(
112+
self,
113+
x1: Tensor,
114+
x2: Tensor,
115+
diag: Optional[bool] = False,
116+
last_dim_is_batch: Optional[bool] = False,
117+
**params
118+
) -> Tensor:
119+
offset = self.offset.view(*self.batch_shape, 1, 1)
120+
power = self.power.view(*self.batch_shape, 1, 1)
121+
if last_dim_is_batch:
122+
x1 = x1.transpose(-1, -2).unsqueeze(-1)
123+
x2 = x2.transpose(-1, -2).unsqueeze(-1)
124+
x1_ = 1 - x1
125+
x2_ = 1 - x2
126+
if diag:
127+
return (x1_ * x2_).sum(dim=-1).pow(power + 1) + offset
128+
129+
if x1.dim() == 2 and x2.dim() == 2:
130+
return torch.addmm(
131+
offset, x1_.pow(power + 1), x2_.transpose(-2, -1).pow(power + 1)
132+
)
133+
else:
134+
return (
135+
torch.matmul(x1_.pow(power + 1), x2_.transpose(-2, -1).pow(power + 1))
136+
+ offset
137+
)
Lines changed: 122 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,122 @@
1+
#!/usr/bin/env python3
2+
3+
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
4+
5+
from typing import Optional
6+
7+
import torch
8+
from gpytorch.constraints import Interval, Positive
9+
from gpytorch.kernels import Kernel
10+
from gpytorch.priors import Prior
11+
from torch import Tensor
12+
13+
14+
class ExpDecayKernel(Kernel):
15+
r"""
16+
Computes a covariance matrix based on the exponential decay kernel
17+
between inputs :math:`\mathbf{x_1}` and :math:`\mathbf{x_2}` (we expect d = 1):
18+
19+
.. math::
20+
\begin{equation*}
21+
k_\text{expdecay}(\mathbf{x_1}, \mathbf{x_2}) = w +
22+
\frac{\beta^{\alpha}}{(\mathbf{x_1} + \mathbf{x_2} + \beta)^{\alpha}}.
23+
\end{equation*}
24+
25+
where
26+
27+
* :math:`w` is an :attr:`offset` parameter, `\beta` is an :attr:'lenthscale'
28+
parameter `\alpha` is an :attr:`power` parameter
29+
Args:
30+
:attr:`lengthscale_constraint` (Constraint, optional):
31+
Constraint to place on lengthscale parameter. Default: `Positive`.
32+
:attr:`lengthscale_prior` (:class:`gpytorch.priors.Prior`):
33+
Prior over the lengthscale parameter (default `None`).
34+
:attr:`power_constraint` (Constraint, optional):
35+
Constraint to place on power parameter. Default: `Positive`.
36+
:attr:`power_prior` (:class:`gpytorch.priors.Prior`):
37+
Prior over the power parameter (default `None`).
38+
:attr:`offset_constraint` (Constraint, optional):
39+
Constraint to place on offset parameter. Default: `Positive`.
40+
:attr:`active_dims` (list):
41+
List of data dimensions to operate on.
42+
`len(active_dims)` should equal `num_dimensions`.
43+
"""
44+
45+
def __init__(
46+
self,
47+
power_prior: Optional[Prior] = None,
48+
offset_prior: Optional[Prior] = None,
49+
power_constraint: Optional[Interval] = None,
50+
offset_constraint: Optional[Interval] = None,
51+
**kwargs
52+
):
53+
super().__init__(has_lengthscale=True, **kwargs)
54+
55+
if power_constraint is None:
56+
power_constraint = Positive()
57+
if offset_constraint is None:
58+
offset_constraint = Positive()
59+
60+
self.register_parameter(
61+
name="raw_power",
62+
parameter=torch.nn.Parameter(torch.zeros(*self.batch_shape, 1)),
63+
)
64+
65+
self.register_parameter(
66+
name="raw_offset",
67+
parameter=torch.nn.Parameter(torch.zeros(*self.batch_shape, 1)),
68+
)
69+
70+
if power_prior is not None:
71+
self.register_prior(
72+
"power_prior",
73+
power_prior,
74+
lambda: self.power,
75+
lambda v: self._set_power(v),
76+
)
77+
self.register_constraint("raw_power", offset_constraint)
78+
79+
if offset_prior is not None:
80+
self.register_prior(
81+
"offset_prior",
82+
offset_prior,
83+
lambda: self.offset,
84+
lambda v: self._set_offset(v),
85+
)
86+
87+
self.register_constraint("raw_offset", offset_constraint)
88+
89+
@property
90+
def power(self) -> Tensor:
91+
return self.raw_power_constraint.transform(self.raw_power)
92+
93+
@power.setter
94+
def power(self, value: Tensor) -> None:
95+
self._set_power(value)
96+
97+
def _set_power(self, value: Tensor) -> None:
98+
if not torch.is_tensor(value):
99+
value = torch.as_tensor(value).to(self.raw_power)
100+
self.initialize(raw_power=self.raw_power_constraint.inverse_transform(value))
101+
102+
@property
103+
def offset(self) -> Tensor:
104+
return self.raw_offset_constraint.transform(self.raw_offset)
105+
106+
@offset.setter
107+
def offset(self, value: Tensor) -> None:
108+
self._set_offset(value)
109+
110+
def _set_offset(self, value: Tensor) -> None:
111+
if not torch.is_tensor(value):
112+
value = torch.as_tensor(value).to(self.raw_offset)
113+
self.initialize(raw_offset=self.raw_offset_constraint.inverse_transform(value))
114+
115+
def forward(self, x1: Tensor, x2: Tensor, **params) -> Tensor:
116+
offset = self.offset.view(*self.batch_shape, 1, 1)
117+
power = self.power.view(*self.batch_shape, 1, 1)
118+
x1_ = x1.div(self.lengthscale)
119+
x2_ = x2.div(self.lengthscale)
120+
diff = self.covar_dist(x1_, -x2_, **params)
121+
res = offset + (diff + 1).pow(-power)
122+
return res
Lines changed: 3 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,3 @@
1+
#! /usr/bin/env python3
2+
3+
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved

0 commit comments

Comments
 (0)