@@ -312,6 +312,52 @@ def test_2dard(self):
312
312
Kd = theano .function ([], cov (X , diag = True ))()
313
313
npt .assert_allclose (np .diag (K ), Kd , atol = 1e-5 )
314
314
315
+ def test_inv_lengthscale (self ):
316
+ X = np .linspace (0 , 1 , 10 )[:, None ]
317
+ with pm .Model () as model :
318
+ cov = pm .gp .cov .ExpQuad (1 , ls_inv = 10 )
319
+ K = theano .function ([], cov (X ))()
320
+ npt .assert_allclose (K [0 , 1 ], 0.53940 , atol = 1e-3 )
321
+ K = theano .function ([], cov (X , X ))()
322
+ npt .assert_allclose (K [0 , 1 ], 0.53940 , atol = 1e-3 )
323
+ # check diagonal
324
+ Kd = theano .function ([], cov (X , diag = True ))()
325
+ npt .assert_allclose (np .diag (K ), Kd , atol = 1e-5 )
326
+
327
+
328
+ class TestWhiteNoise (object ):
329
+ def test_1d (self ):
330
+ X = np .linspace (0 , 1 , 10 )[:, None ]
331
+ with pm .Model () as model :
332
+ cov = pm .gp .cov .WhiteNoise (sigma = 0.5 )
333
+ K = theano .function ([], cov (X ))()
334
+ npt .assert_allclose (K [0 , 1 ], 0.0 , atol = 1e-3 )
335
+ npt .assert_allclose (K [0 , 0 ], 0.5 ** 2 , atol = 1e-3 )
336
+ # check diagonal
337
+ Kd = theano .function ([], cov (X , diag = True ))()
338
+ npt .assert_allclose (np .diag (K ), Kd , atol = 1e-5 )
339
+ # check predict
340
+ K = theano .function ([], cov (X , X ))()
341
+ npt .assert_allclose (K [0 , 1 ], 0.0 , atol = 1e-3 )
342
+ # white noise predicting should return all zeros
343
+ npt .assert_allclose (K [0 , 0 ], 0.0 , atol = 1e-3 )
344
+
345
+
346
+ class TestConstant (object ):
347
+ def test_1d (self ):
348
+ X = np .linspace (0 , 1 , 10 )[:, None ]
349
+ with pm .Model () as model :
350
+ cov = pm .gp .cov .Constant (2.5 )
351
+ K = theano .function ([], cov (X ))()
352
+ npt .assert_allclose (K [0 , 1 ], 2.5 , atol = 1e-3 )
353
+ npt .assert_allclose (K [0 , 0 ], 2.5 , atol = 1e-3 )
354
+ K = theano .function ([], cov (X , X ))()
355
+ npt .assert_allclose (K [0 , 1 ], 2.5 , atol = 1e-3 )
356
+ npt .assert_allclose (K [0 , 0 ], 2.5 , atol = 1e-3 )
357
+ # check diagonal
358
+ Kd = theano .function ([], cov (X , diag = True ))()
359
+ npt .assert_allclose (np .diag (K ), Kd , atol = 1e-5 )
360
+
315
361
316
362
class TestRatQuad (object ):
317
363
def test_1d (self ):
@@ -383,6 +429,20 @@ def test_1d(self):
383
429
npt .assert_allclose (np .diag (K ), Kd , atol = 1e-5 )
384
430
385
431
432
+ class TestPeriodic (object ):
433
+ def test_1d (self ):
434
+ X = np .linspace (0 , 1 , 10 )[:, None ]
435
+ with pm .Model () as model :
436
+ cov = pm .gp .cov .Periodic (1 , 0.1 , 0.1 )
437
+ K = theano .function ([], cov (X ))()
438
+ npt .assert_allclose (K [0 , 1 ], 0.00288 , atol = 1e-3 )
439
+ K = theano .function ([], cov (X , X ))()
440
+ npt .assert_allclose (K [0 , 1 ], 0.00288 , atol = 1e-3 )
441
+ # check diagonal
442
+ Kd = theano .function ([], cov (X , diag = True ))()
443
+ npt .assert_allclose (np .diag (K ), Kd , atol = 1e-5 )
444
+
445
+
386
446
class TestLinear (object ):
387
447
def test_1d (self ):
388
448
X = np .linspace (0 , 1 , 10 )[:, None ]
@@ -491,7 +551,7 @@ def setup_method(self):
491
551
cov_func = pm .gp .cov .ExpQuad (3 , [0.1 , 0.2 , 0.3 ])
492
552
mean_func = pm .gp .mean .Constant (0.5 )
493
553
gp = pm .gp .Marginal (mean_func , cov_func )
494
- f = gp .marginal_likelihood ("f" , X , y , noise = 0.0 )
554
+ f = gp .marginal_likelihood ("f" , X , y , noise = 0.0 , is_observed = False , observed = y )
495
555
p = gp .conditional ("p" , Xnew )
496
556
self .logp = model .logp ({"p" : pnew })
497
557
self .X = X
@@ -558,27 +618,28 @@ def testApproximations(self, approx):
558
618
approx_logp = model .logp ({"f" : self .y , "p" : self .pnew })
559
619
npt .assert_allclose (approx_logp , self .logp , atol = 0 , rtol = 1e-2 )
560
620
561
- def testPredictCov (self ):
621
+ @pytest .mark .parametrize ('approx' , ['FITC' , 'VFE' , 'DTC' ])
622
+ def testPredictVar (self , approx ):
562
623
with pm .Model () as model :
563
624
cov_func = pm .gp .cov .ExpQuad (3 , [0.1 , 0.2 , 0.3 ])
564
625
mean_func = pm .gp .mean .Constant (0.5 )
565
- gp = pm .gp .MarginalSparse (mean_func , cov_func , approx = "DTC" )
626
+ gp = pm .gp .MarginalSparse (mean_func , cov_func , approx = approx )
566
627
f = gp .marginal_likelihood ("f" , self .X , self .X , self .y , self .sigma )
567
- mu1 , cov1 = self .gp .predict (self .Xnew , pred_noise = True )
568
- mu2 , cov2 = gp .predict (self .Xnew , pred_noise = True )
628
+ mu1 , var1 = self .gp .predict (self .Xnew , diag = True )
629
+ mu2 , var2 = gp .predict (self .Xnew , diag = True )
569
630
npt .assert_allclose (mu1 , mu2 , atol = 0 , rtol = 1e-3 )
570
- npt .assert_allclose (cov1 , cov2 , atol = 0 , rtol = 1e-3 )
631
+ npt .assert_allclose (var1 , var2 , atol = 0 , rtol = 1e-3 )
571
632
572
- def testPredictVar (self ):
633
+ def testPredictCov (self ):
573
634
with pm .Model () as model :
574
635
cov_func = pm .gp .cov .ExpQuad (3 , [0.1 , 0.2 , 0.3 ])
575
636
mean_func = pm .gp .mean .Constant (0.5 )
576
637
gp = pm .gp .MarginalSparse (mean_func , cov_func , approx = "DTC" )
577
- f = gp .marginal_likelihood ("f" , self .X , self .X , self .y , self .sigma )
578
- mu1 , var1 = self .gp .predict (self .Xnew , diag = True )
579
- mu2 , var2 = gp .predict (self .Xnew , diag = True )
638
+ f = gp .marginal_likelihood ("f" , self .X , self .X , self .y , self .sigma , is_observed = False )
639
+ mu1 , cov1 = self .gp .predict (self .Xnew , pred_noise = True )
640
+ mu2 , cov2 = gp .predict (self .Xnew , pred_noise = True )
580
641
npt .assert_allclose (mu1 , mu2 , atol = 0 , rtol = 1e-3 )
581
- npt .assert_allclose (var1 , var2 , atol = 0 , rtol = 1e-3 )
642
+ npt .assert_allclose (cov1 , cov2 , atol = 0 , rtol = 1e-3 )
582
643
583
644
584
645
class TestGPAdditive (object ):
@@ -621,7 +682,7 @@ def testAdditiveMarginal(self):
621
682
622
683
@pytest .mark .parametrize ('approx' , ['FITC' , 'VFE' , 'DTC' ])
623
684
def testAdditiveMarginalSparse (self , approx ):
624
- Xu = np .random .randn (10 , 1 )
685
+ Xu = np .random .randn (10 , 3 )
625
686
sigma = 0.1
626
687
with pm .Model () as model1 :
627
688
gp1 = pm .gp .MarginalSparse (self .means [0 ], self .covs [0 ], approx = approx )
@@ -669,8 +730,8 @@ def testAdditiveLatent(self):
669
730
fp2 = gptot .conditional ("fp2" , self .Xnew )
670
731
671
732
fp = np .random .randn (self .Xnew .shape [0 ])
672
- npt .assert_allclose (fp1 .logp ({"fp1 " : fp }), fp2 . logp ({ "fp2 " : fp }), atol = 0 , rtol = 1e-2 )
673
-
733
+ npt .assert_allclose (fp1 .logp ({"fsum " : self . y , "fp1 " : fp }),
734
+ fp2 . logp ({ "fsum" : self . y , "fp2" : fp }), atol = 0 , rtol = 1e-2 )
674
735
675
736
def testAdditiveSparseRaises (self ):
676
737
# cant add different approximations
0 commit comments