Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
20 changes: 16 additions & 4 deletions gpytorch/functions/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -64,7 +64,14 @@ def dsmm(sparse_mat, dense_mat):


def exact_predictive_mean(
full_covar, full_mean, train_labels, num_train, likelihood, precomputed_cache=None, non_batch_train=False
full_covar,
full_mean,
train_inputs,
train_labels,
num_train,
likelihood,
precomputed_cache=None,
non_batch_train=False,
):
"""
Computes the posterior predictive mean of a GP
Expand All @@ -73,6 +80,7 @@ def exact_predictive_mean(
- full_covar ( (n+t) x (n+t) ) - the block prior covariance matrix of training and testing points
[ K_XX, K_XX*; K_X*X, K_X*X* ]
- full_mean (n + t) - the training and test prior means, stacked on top of each other
- train_inputs (:obj:`torch.tensor`) - The training data inputs
- train_labels (n) - the training labels minus the training prior mean
- noise (1) - the observed noise (from the likelihood)
- precomputed_cache - speeds up subsequent computations (default: None)
Expand All @@ -88,17 +96,20 @@ def exact_predictive_mean(

full_covar = NonLazyTensor(full_covar)
return full_covar.exact_predictive_mean(
full_mean, train_labels, num_train, likelihood, precomputed_cache, non_batch_train
full_mean, train_inputs, train_labels, num_train, likelihood, precomputed_cache, non_batch_train
)


def exact_predictive_covar(full_covar, num_train, likelihood, precomputed_cache=None, non_batch_train=False):
def exact_predictive_covar(
full_covar, train_inputs, num_train, likelihood, precomputed_cache=None, non_batch_train=False
):
"""
Computes the posterior predictive covariance of a GP

Args:
- full_covar ( (n+t) x (n+t) ) - the block prior covariance matrix of training and testing points
[ K_XX, K_XX*; K_X*X, K_X*X* ]
- train_inputs TODO
- num_train (int) - how many training points are there in the full covariance matrix
- noise (1) - the observed noise (from the likelihood)
- precomputed_cache - speeds up subsequent computations (default: None)
Expand All @@ -113,7 +124,8 @@ def exact_predictive_covar(full_covar, num_train, likelihood, precomputed_cache=
from ..lazy.non_lazy_tensor import NonLazyTensor

full_covar = NonLazyTensor(full_covar)
return full_covar.exact_predictive_covar(num_train, likelihood, precomputed_cache, non_batch_train)

return full_covar.exact_predictive_covar(train_inputs, num_train, likelihood, precomputed_cache, non_batch_train)


def log_normal_cdf(x):
Expand Down
6 changes: 3 additions & 3 deletions gpytorch/kernels/__init__.py
Original file line number Diff line number Diff line change
@@ -1,12 +1,12 @@
#!/usr/bin/env python3

from .kernel import Kernel, AdditiveKernel, ProductKernel
from .additive_structure_kernel import AdditiveStructureKernel
from .cosine_kernel import CosineKernel
from .grid_kernel import GridKernel
from .grid_interpolation_kernel import GridInterpolationKernel
from .grid_kernel import GridKernel
from .index_kernel import IndexKernel
from .inducing_point_kernel import InducingPointKernel
from .kernel import AdditiveKernel, Kernel, ProductKernel
from .lcm_kernel import LCMKernel
from .linear_kernel import LinearKernel
from .matern_kernel import MaternKernel
Expand All @@ -18,6 +18,7 @@
from .spectral_mixture_kernel import SpectralMixtureKernel
from .white_noise_kernel import WhiteNoiseKernel


__all__ = [
"Kernel",
"AdditiveKernel",
Expand All @@ -27,7 +28,6 @@
"GridInterpolationKernel",
"IndexKernel",
"InducingPointKernel",
"InducingPointKernelAddedLossTerm",
"LCMKernel",
"LinearKernel",
"MaternKernel",
Expand Down
3 changes: 2 additions & 1 deletion gpytorch/kernels/white_noise_kernel.py
Original file line number Diff line number Diff line change
@@ -1,6 +1,7 @@
#!/usr/bin/env python3

import torch

from . import Kernel
from ..lazy import DiagLazyTensor, ZeroLazyTensor

Expand Down Expand Up @@ -55,4 +56,4 @@ def forward(self, x1, x2, **params):
elif x1.size(-2) == x2.size(-2) and x1.size(-2) == self.variances.size(1) and torch.equal(x1, x2):
return DiagLazyTensor(self.variances.view(self.variances.size(0), -1))
else:
return ZeroLazyTensor(x1.size(-3), x1.size(-2), x2.size(-2))
return ZeroLazyTensor(x1.size(-3), x1.size(-2), x2.size(-2), dtype=x1.dtype, device=x1.device)
21 changes: 16 additions & 5 deletions gpytorch/lazy/interpolated_lazy_tensor.py
Original file line number Diff line number Diff line change
Expand Up @@ -363,7 +363,14 @@ def diag(self):
return res

def exact_predictive_mean(
self, full_mean, train_labels, num_train, likelihood, precomputed_cache=None, non_batch_train=False
self,
full_mean,
train_inputs,
train_labels,
num_train,
likelihood,
precomputed_cache=None,
non_batch_train=False,
):
from ..distributions import MultivariateNormal

Expand All @@ -382,7 +389,7 @@ def exact_predictive_mean(

train_mean = full_mean.narrow(-1, 0, train_train_covar.size(-1))

mvn = likelihood(MultivariateNormal(train_mean, train_train_covar))
mvn = likelihood(MultivariateNormal(train_mean, train_train_covar), train_inputs)
train_mean, train_train_covar = mvn.mean, mvn.lazy_covariance_matrix

train_train_covar_inv_labels = train_train_covar.inv_matmul((train_labels - train_mean).unsqueeze(-1))
Expand Down Expand Up @@ -422,11 +429,15 @@ def _exact_predictive_covar_inv_quad_form_root(self, precomputed_cache, test_tra
res = left_interp(test_interp_indices, test_interp_values, precomputed_cache)
return res

def exact_predictive_covar(self, num_train, likelihood, precomputed_cache=None, non_batch_train=False):
def exact_predictive_covar(
self, train_inputs, num_train, likelihood, precomputed_cache=None, non_batch_train=False
):
from ..distributions import MultivariateNormal

if not beta_features.fast_pred_var.on() and not beta_features.fast_pred_samples.on():
return super(InterpolatedLazyTensor, self).exact_predictive_covar(num_train, likelihood, precomputed_cache)
return super(InterpolatedLazyTensor, self).exact_predictive_covar(
train_inputs, num_train, likelihood, precomputed_cache
)

n_test = self.size(-2) - num_train
train_interp_indices = self.left_interp_indices.narrow(-2, 0, num_train)
Expand All @@ -452,7 +463,7 @@ def exact_predictive_covar(self, num_train, likelihood, precomputed_cache=None,
)

grv = MultivariateNormal(torch.zeros(1), train_train_covar)
train_train_covar = likelihood(grv).lazy_covariance_matrix
train_train_covar = likelihood(grv, train_inputs).lazy_covariance_matrix

# Get probe vectors for inverse root
num_probe_vectors = beta_features.fast_pred_var.num_probe_vectors()
Expand Down
32 changes: 26 additions & 6 deletions gpytorch/lazy/lazy_evaluated_kernel_tensor.py
Original file line number Diff line number Diff line change
Expand Up @@ -44,6 +44,17 @@ def _quad_form_derivative(self, left_vecs, right_vecs):
def _transpose_nonbatch(self):
return self.__class__(self.kernel, self.x2, self.x1, **self.params)

def _batch_get_indices(self, batch_indices, left_indices, right_indices):
from ..kernels import Kernel

x1 = self.x1[batch_indices, left_indices, :].unsqueeze(0)
x2 = self.x2[batch_indices, right_indices, :].unsqueeze(0)
res = super(Kernel, self.kernel).__call__(x1.transpose(-1, -2), x2.transpose(-1, -2))
if isinstance(res, LazyTensor):
res = res.evaluate()
res = res.view(-1)
return res

def _get_indices(self, left_indices, right_indices):
from ..kernels import Kernel

Expand Down Expand Up @@ -166,25 +177,34 @@ def evaluate(self):
return self.evaluate_kernel().evaluate()

def exact_predictive_mean(
self, full_mean, train_labels, num_train, likelihood, precomputed_cache=None, non_batch_train=False
self,
full_mean,
train_inputs,
train_labels,
num_train,
likelihood,
precomputed_cache=None,
non_batch_train=False,
):
if self.kernel.has_custom_exact_predictions:
return self.evaluate_kernel().exact_predictive_mean(
full_mean, train_labels, num_train, likelihood, precomputed_cache, non_batch_train
full_mean, train_inputs, train_labels, num_train, likelihood, precomputed_cache, non_batch_train
)
else:
return super(LazyEvaluatedKernelTensor, self).exact_predictive_mean(
full_mean, train_labels, num_train, likelihood, precomputed_cache, non_batch_train
full_mean, train_inputs, train_labels, num_train, likelihood, precomputed_cache, non_batch_train
)

def exact_predictive_covar(self, num_train, likelihood, precomputed_cache=None, non_batch_train=False):
def exact_predictive_covar(
self, train_inputs, num_train, likelihood, precomputed_cache=None, non_batch_train=False
):
if self.kernel.has_custom_exact_predictions:
return self.evaluate_kernel().exact_predictive_covar(
num_train, likelihood, precomputed_cache, non_batch_train
train_inputs, num_train, likelihood, precomputed_cache, non_batch_train
)
else:
return super(LazyEvaluatedKernelTensor, self).exact_predictive_covar(
num_train, likelihood, precomputed_cache, non_batch_train
train_inputs, num_train, likelihood, precomputed_cache, non_batch_train
)

def repeat(self, *sizes):
Expand Down
22 changes: 18 additions & 4 deletions gpytorch/lazy/lazy_tensor.py
Original file line number Diff line number Diff line change
Expand Up @@ -505,7 +505,14 @@ def evaluate_kernel(self):
return self.representation_tree()(*self.representation())

def exact_predictive_mean(
self, full_mean, train_labels, num_train, likelihood, precomputed_cache=None, non_batch_train=False
self,
full_mean,
train_inputs,
train_labels,
num_train,
likelihood,
precomputed_cache=None,
non_batch_train=False,
):
"""
Computes the posterior predictive covariance of a GP
Expand All @@ -514,6 +521,7 @@ def exact_predictive_mean(

Args:
full_mean (:obj:`torch.tensor`): the training and test prior means, stacked on top of each other
train_inputs (:obj:`torch.tensor`): The training data inputs
train_labels (:obj:`torch.tensor`): the training labels minus the training prior mean
noise (:obj:`torch.tensor`): the observed noise (from the likelihood)
precomputed_cache (optional): speeds up subsequent computations (default: None)
Expand All @@ -537,7 +545,8 @@ def exact_predictive_mean(
if non_batch_train and train_mean.dim() == 2:
train_mean = train_mean[0]
train_labels = train_labels[0]
mvn = likelihood(MultivariateNormal(train_mean, train_train_covar))
mvn = likelihood(MultivariateNormal(train_mean, train_train_covar), train_inputs)

train_mean, train_train_covar = mvn.mean, mvn.lazy_covariance_matrix

train_labels_offset = train_labels - train_mean
Expand All @@ -563,13 +572,16 @@ def exact_predictive_mean(

return res, precomputed_cache.detach()

def exact_predictive_covar(self, num_train, likelihood, precomputed_cache=None, non_batch_train=False):
def exact_predictive_covar(
self, train_inputs, num_train, likelihood, precomputed_cache=None, non_batch_train=False
):
"""
Computes the posterior predictive covariance of a GP
Assumes that self is the block prior covariance matrix of training and testing points
[ K_XX, K_XX*; K_X*X, K_X*X* ]

Args:
train_inputs (:obj:`torch.tensor`): The training data inputs
num_train (int): The number of training points in the full covariance matrix
noise (scalar): The observed noise (from the likelihood)
precomputed_cache (optional): speeds up subsequent computations (default: None)
Expand All @@ -589,7 +601,9 @@ def exact_predictive_covar(self, num_train, likelihood, precomputed_cache=None,
test_train_covar = self[num_train:, :num_train]
test_test_covar = self[num_train:, num_train:]

train_train_covar = likelihood(MultivariateNormal(torch.zeros(1), train_train_covar)).lazy_covariance_matrix
train_train_covar = likelihood(
MultivariateNormal(torch.zeros(1), train_train_covar), train_inputs
).lazy_covariance_matrix
if not beta_features.fast_pred_var.on():
from .matmul_lazy_tensor import MatmulLazyTensor

Expand Down
18 changes: 14 additions & 4 deletions gpytorch/likelihoods/__init__.py
Original file line number Diff line number Diff line change
@@ -1,15 +1,25 @@
#!/usr/bin/env python3

from .likelihood import Likelihood
from .gaussian_likelihood import GaussianLikelihood
from .multitask_gaussian_likelihood import MultitaskGaussianLikelihood
from .bernoulli_likelihood import BernoulliLikelihood
from .gaussian_likelihood import GaussianLikelihood, _GaussianLikelihoodBase
from .multitask_gaussian_likelihood import (
MultitaskGaussianLikelihood,
MultitaskGaussianLikelihoodKronecker,
_MultitaskGaussianLikelihoodBase,
)
from .noise_models import HeteroskedasticNoise
from .softmax_likelihood import SoftmaxLikelihood


__all__ = [
"Likelihood",
"_GaussianLikelihoodBase",
"_MultitaskGaussianLikelihoodBase",
"BernoulliLikelihood",
"GaussianLikelihood",
"HeteroskedasticNoise",
"Likelihood",
"MultitaskGaussianLikelihood",
"BernoulliLikelihood",
"MultitaskGaussianLikelihoodKronecker",
"SoftmaxLikelihood",
]
Loading