@@ -44,6 +44,8 @@ def predict(self, Xnew, point=None, given=None, diag=False):
44
44
@conditioned_vars (["X" , "f" ])
45
45
class Latent (Base ):
46
46
R"""
47
+ Latent Gaussian process.
48
+
47
49
The `gp.Latent` class is a direct implementation of a GP. No addiive
48
50
noise is assumed. It is called "Latent" because the underlying function
49
51
values are treated as latent variables. It has a `prior` method and a
@@ -111,8 +113,10 @@ def _build_prior(self, name, X, reparameterize=True, **kwargs):
111
113
112
114
def prior (self , name , X , reparameterize = True , ** kwargs ):
113
115
R"""
114
- Returns the GP prior distribution evaluated over the input
115
- locations `X`. This is the prior probability over the space
116
+ Returns the GP prior distribution evaluated over the input
117
+ locations `X`.
118
+
119
+ This is the prior probability over the space
116
120
of functions described by its mean and covariance function.
117
121
118
122
.. math::
@@ -163,8 +167,10 @@ def _build_conditional(self, Xnew, X, f, cov_total, mean_total):
163
167
164
168
def conditional (self , name , Xnew , given = {}, ** kwargs ):
165
169
R"""
166
- Returns the conditional distribution evaluated over new input
167
- locations `Xnew`. Given a set of function values `f` that
170
+ Returns the conditional distribution evaluated over new input
171
+ locations `Xnew`.
172
+
173
+ Given a set of function values `f` that
168
174
the GP prior was over, the conditional distribution over a
169
175
set of new points, `f_*` is
170
176
@@ -199,10 +205,11 @@ def conditional(self, name, Xnew, given={}, **kwargs):
199
205
@conditioned_vars (["X" , "f" , "nu" ])
200
206
class TP (Latent ):
201
207
"""
202
- Implementation of a Student's T process prior. The usage is nearly
203
- identical to that of `gp.Latent`. The differences are that it must
204
- be initialized with a degrees of freedom parameter, and TP is not
205
- additive. Given a mean and covariance function, and a degrees of
208
+ Student's T process prior.
209
+
210
+ The usage is nearly identical to that of `gp.Latent`. The differences
211
+ are that it must be initialized with a degrees of freedom parameter, and
212
+ TP is not additive. Given a mean and covariance function, and a degrees of
206
213
freedom parameter, the function $f(x)$ is modeled as,
207
214
208
215
.. math::
@@ -245,8 +252,10 @@ def _build_prior(self, name, X, reparameterize=True, **kwargs):
245
252
246
253
def prior (self , name , X , reparameterize = True , ** kwargs ):
247
254
R"""
248
- Returns the TP prior distribution evaluated over the input
249
- locations `X`. This is the prior probability over the space
255
+ Returns the TP prior distribution evaluated over the input
256
+ locations `X`.
257
+
258
+ This is the prior probability over the space
250
259
of functions described by its mean and covariance function.
251
260
252
261
Parameters
@@ -283,8 +292,10 @@ def _build_conditional(self, Xnew, X, f):
283
292
284
293
def conditional (self , name , Xnew , ** kwargs ):
285
294
R"""
286
- Returns the conditional distribution evaluated over new input
287
- locations `Xnew`. Given a set of function values `f` that
295
+ Returns the conditional distribution evaluated over new input
296
+ locations `Xnew`.
297
+
298
+ Given a set of function values `f` that
288
299
the TP prior was over, the conditional distribution over a
289
300
set of new points, `f_*` is
290
301
@@ -310,6 +321,8 @@ def conditional(self, name, Xnew, **kwargs):
310
321
@conditioned_vars (["X" , "y" , "noise" ])
311
322
class Marginal (Base ):
312
323
R"""
324
+ Marginal Gaussian process.
325
+
313
326
The `gp.Marginal` class is an implementation of the sum of a GP
314
327
prior and additive noise. It has `marginal_likelihood`, `conditional`
315
328
and `predict` methods. This GP implementation can be used to
@@ -335,7 +348,7 @@ class Marginal(Base):
335
348
cov_func = pm.gp.cov.ExpQuad(1, ls=0.1)
336
349
337
350
# Specify the GP. The default mean function is `Zero`.
338
- gp = pm.gp.Latent (cov_func=cov_func)
351
+ gp = pm.gp.Marginal (cov_func=cov_func)
339
352
340
353
# Place a GP prior over the function f.
341
354
sigma = pm.HalfCauchy("sigma", beta=3)
@@ -363,9 +376,10 @@ def _build_marginal_likelihood(self, X, noise):
363
376
364
377
def marginal_likelihood (self , name , X , y , noise , is_observed = True , ** kwargs ):
365
378
R"""
366
- Returns the marginal likelihood distribution, given the input
367
- locations `X` and the data `y`. This is integral over the product of the GP
368
- prior and a normal likelihood.
379
+ Returns the marginal likelihood distribution, given the input
380
+ locations `X` and the data `y`.
381
+
382
+ This is integral over the product of the GP prior and a normal likelihood.
369
383
370
384
.. math::
371
385
@@ -445,10 +459,11 @@ def _build_conditional(self, Xnew, pred_noise, diag, X, y, noise,
445
459
446
460
def conditional (self , name , Xnew , pred_noise = False , given = {}, ** kwargs ):
447
461
R"""
448
- Returns the conditional distribution evaluated over new input
449
- locations `Xnew`. Given a set of function values `f` that
450
- the GP prior was over, the conditional distribution over a
451
- set of new points, `f_*` is
462
+ Returns the conditional distribution evaluated over new input
463
+ locations `Xnew`.
464
+
465
+ Given a set of function values `f` that the GP prior was over, the
466
+ conditional distribution over a set of new points, `f_*` is:
452
467
453
468
.. math::
454
469
@@ -535,6 +550,8 @@ def predictt(self, Xnew, diag=False, pred_noise=False, given={}):
535
550
@conditioned_vars (["X" , "Xu" , "y" , "sigma" ])
536
551
class MarginalSparse (Marginal ):
537
552
R"""
553
+ Approximate marginal Gaussian process.
554
+
538
555
The `gp.MarginalSparse` class is an implementation of the sum of a GP
539
556
prior and additive noise. It has `marginal_likelihood`, `conditional`
540
557
and `predict` methods. This GP implementation can be used to
@@ -638,7 +655,7 @@ def _build_marginal_likelihood_logp(self, X, Xu, y, sigma):
638
655
639
656
def marginal_likelihood (self , name , X , Xu , y , sigma , is_observed = True , ** kwargs ):
640
657
R"""
641
- Returns the approximate marginal likelihood distribution, given the input
658
+ Returns the approximate marginal likelihood distribution, given the input
642
659
locations `X`, inducing point locations `Xu`, data `y`, and white noise
643
660
standard deviations `sigma`.
644
661
@@ -724,7 +741,7 @@ def _get_given_vals(self, given):
724
741
725
742
def conditional (self , name , Xnew , pred_noise = False , given = {}, ** kwargs ):
726
743
R"""
727
- Returns the approximate conditional distribution of the GP evaluated over
744
+ Returns the approximate conditional distribution of the GP evaluated over
728
745
new input locations `Xnew`.
729
746
730
747
Parameters
0 commit comments