|
| 1 | +import torch |
| 2 | + |
| 3 | +from .kernel import Kernel |
| 4 | + |
| 5 | + |
| 6 | +class PiecewisePolynomialKernel(Kernel): |
| 7 | + r""" |
| 8 | + Computes a covariance matrix based on the Piecewise Polynomial kernel |
| 9 | + between inputs :math:`\mathbf{x_1}` and :math:`\mathbf{x_2}`: |
| 10 | +
|
| 11 | + .. math:: |
| 12 | +
|
| 13 | + \begin{align} |
| 14 | + r &= \left\Vert x1 - x2 \right\Vert \\ |
| 15 | + j &= \lfloor \frac{D}{2} \rfloor + q +1 \\ |
| 16 | + K_{\text{ppD, 0}}(\mathbf{x_1}, \mathbf{x_2}) &= (1-r)^j_+ , \\ |
| 17 | + K_{\text{ppD, 1}}(\mathbf{x_1}, \mathbf{x_2}) &= (1-r)^{j+1}_+ ((j + 1)r + 1), \\ |
| 18 | + K_{\text{ppD, 2}}(\mathbf{x_1}, \mathbf{x_2}) &= (1-r)^{j+2}_+ ((1 + (j+2)r + |
| 19 | + \frac{j^2 + 4j + 3}{3}r^2), \\ |
| 20 | + K_{\text{ppD, 3}}(\mathbf{x_1}, \mathbf{x_2}) &= (1-r)^{j+3}_+ |
| 21 | + (1 + (j+3)r + \frac{6j^2 + 36j + 45}{15}r^2 + |
| 22 | + \frac{j^3 + 9j^2 + 23j +15}{15}r^3) \\ |
| 23 | + \end{align} |
| 24 | +
|
| 25 | + where :math:`K_{\text{ppD, q}}` is positive semidefinite in :math:`\mathbb{R}^{D}` and |
| 26 | + :math:`q` is the smoothness coefficient. See `Rasmussen and Williams (2006)`_ Equation 4.21. |
| 27 | +
|
| 28 | + .. note:: This kernel does not have an `outputscale` parameter. To add a scaling parameter, |
| 29 | + decorate this kernel with a :class:`gpytorch.kernels.ScaleKernel`. |
| 30 | +
|
| 31 | + :param int q: (default= 2) The smoothness parameter. |
| 32 | + :type q: int (0, 1, 2 or 3) |
| 33 | + :param ard_num_dims: (Default: `None`) Set this if you want a separate lengthscale for each |
| 34 | + input dimension. It should be `d` if :attr:`x1` is a `... x n x d` matrix. |
| 35 | + :type ard_num_dims: int, optional |
| 36 | + :param batch_shape: (Default: `None`) Set this if you want a separate lengthscale for each |
| 37 | + batch of input data. It should be `torch.Size([b1, b2])` for a `b1 x b2 x n x m` kernel output. |
| 38 | + :type batch_shape: torch.Size, optional |
| 39 | + :param active_dims: (Default: `None`) Set this if you want to |
| 40 | + compute the covariance of only a few input dimensions. The ints |
| 41 | + corresponds to the indices of the dimensions. |
| 42 | + :type active_dims: Tuple(int) |
| 43 | + :param lengthscale_prior: (Default: `None`) |
| 44 | + Set this if you want to apply a prior to the lengthscale parameter. |
| 45 | + :type lengthscale_prior: ~gpytorch.priors.Prior, optional |
| 46 | + :param lengthscale_constraint: (Default: `Positive`) Set this if you want |
| 47 | + to apply a constraint to the lengthscale parameter. |
| 48 | + :type lengthscale_constraint: ~gpytorch.constraints.Positive, optional |
| 49 | + :param eps: (Default: 1e-6) The minimum value that the lengthscale can take (prevents divide by zero errors). |
| 50 | + :type eps: float, optional |
| 51 | +
|
| 52 | + :var torch.Tensor lengthscale: The lengthscale parameter. Size/shape of parameter depends on the |
| 53 | + :attr:`ard_num_dims` and :attr:`batch_shape` arguments. |
| 54 | +
|
| 55 | + .. _Rasmussen and Williams (2006): |
| 56 | + http://www.gaussianprocess.org/gpml/ |
| 57 | +
|
| 58 | + Example: |
| 59 | + >>> x = torch.randn(10, 5) |
| 60 | + >>> # Non-batch option |
| 61 | + >>> covar_module = gpytorch.kernels.ScaleKernel( |
| 62 | + gpytorch.kernels.PiecewisePolynomialKernel(q = 2)) |
| 63 | + >>> # Non-batch: ARD (different lengthscale for each input dimension) |
| 64 | + >>> covar_module = gpytorch.kernels.ScaleKernel( |
| 65 | + gpytorch.kernels.PiecewisePolynomialKernel(q = 2, ard_num_dims=5) |
| 66 | + ) |
| 67 | + >>> covar = covar_module(x) # Output: LazyTensor of size (10 x 10) |
| 68 | + >>> batch_x = torch.randn(2, 10, 5) |
| 69 | + >>> # Batch: different lengthscale for each batch |
| 70 | + >>> covar_module = gpytorch.kernels.ScaleKernel( |
| 71 | + gpytorch.kernels.PiecewisePolynomialKernel(q = 2, batch_shape=torch.Size([2])) |
| 72 | + ) |
| 73 | + >>> covar = covar_module(batch_x) # Output: LazyTensor of size (2 x 10 x 10) |
| 74 | + """ |
| 75 | + has_lengthscale = True |
| 76 | + |
| 77 | + def __init__(self, q=2, **kwargs): |
| 78 | + super(PiecewisePolynomialKernel, self).__init__(**kwargs) |
| 79 | + if q not in {0, 1, 2, 3}: |
| 80 | + raise ValueError("q expected to be 0, 1, 2 or 3") |
| 81 | + self.q = q |
| 82 | + |
| 83 | + def fmax(self, r, j, q): |
| 84 | + return torch.max(torch.tensor(0.0), 1 - r).pow(j + q) |
| 85 | + |
| 86 | + def get_cov(self, r, j, q): |
| 87 | + if q == 0: |
| 88 | + return 1 |
| 89 | + if q == 1: |
| 90 | + return (j + 1) * r + 1 |
| 91 | + if q == 2: |
| 92 | + return 1 + (j + 2) * r + ((j ** 2 + 4 * j + 3) / 3.0) * r ** 2 |
| 93 | + if q == 3: |
| 94 | + return ( |
| 95 | + 1 |
| 96 | + + (j + 3) * r |
| 97 | + + ((6 * j ** 2 + 36 * j + 45) / 15.0) * r ** 2 |
| 98 | + + ((j ** 3 + 9 * j ** 2 + 23 * j + 15) / 15.0) * r ** 3 |
| 99 | + ) |
| 100 | + |
| 101 | + def forward(self, x1, x2, last_dim_is_batch=False, diag=False, **params): |
| 102 | + x1_ = x1.div(self.lengthscale) |
| 103 | + x2_ = x2.div(self.lengthscale) |
| 104 | + if last_dim_is_batch is True: |
| 105 | + D = x1.shape[1] |
| 106 | + else: |
| 107 | + D = x1.shape[-1] |
| 108 | + j = torch.floor(torch.tensor(D / 2.0)) + self.q + 1 |
| 109 | + if last_dim_is_batch and diag: |
| 110 | + r = self.covar_dist(x1_, x2_, last_dim_is_batch=True, diag=True) |
| 111 | + elif diag: |
| 112 | + r = self.covar_dist(x1_, x2_, diag=True) |
| 113 | + elif last_dim_is_batch: |
| 114 | + r = self.covar_dist(x1_, x2_, last_dim_is_batch=True) |
| 115 | + else: |
| 116 | + r = self.covar_dist(x1_, x2_) |
| 117 | + cov_matrix = self.fmax(r, j, self.q) * self.get_cov(r, j, self.q) |
| 118 | + return cov_matrix |
0 commit comments