You signed in with another tab or window. Reload to refresh your session.You signed out in another tab or window. Reload to refresh your session.You switched accounts on another tab or window. Reload to refresh your session.Dismiss alert
def chol_solve (l:LowerTriMat m Float) (b:m=>Float) : m=>Float =
21
+
b' = forward_substitute l b
22
+
u = transposeLowerToUpper l
23
+
backward_substitute u b'
24
+
19
25
' # Kernel ridge regression
20
26
21
27
' To learn a function $f_{true}: \mathcal{X} \to \mathbb R$
@@ -40,7 +46,7 @@ ys : Nx=>Float = for i. trueFun xs.i + noise * randn (ixkey k2 i)
40
46
-- Kernel ridge regression
41
47
def regress (kernel: a -> a -> Float) (xs: Nx=>a) (ys: Nx=>Float) : a -> Float =
42
48
gram = for i j. kernel xs.i xs.j + select (i==j) 0.0001 0.0
43
-
alpha = solve gram ys
49
+
alpha = solve' gram ys
44
50
predict = \x. sum for i. alpha.i * kernel xs.i x
45
51
predict
46
52
@@ -59,3 +65,35 @@ preds = map predict xtest
59
65
60
66
:html showPlot $ xyPlot xtest preds
61
67
> <html output>
68
+
69
+
' # Gaussian process regression
70
+
71
+
' GP regression (kriging) works in a similar way. Compared with kernel ridge regression, GP regression assumes Gaussian distributed prior. This, combined
72
+
with the Bayes rule, gives the variance of the prediction.
73
+
74
+
' In this implementation, the conjugate gradient solver is replaced with the
75
+
cholesky solver from `lib/linalg.dx` for efficiency.
76
+
77
+
def gp_regress (kernel: a -> a -> Float) (xs: n=>a) (ys: n=>Float)
78
+
: (a -> (Float&Float)) =
79
+
noise_var = 0.0001
80
+
gram = for i j. kernel xs.i xs.j
81
+
c = chol (gram + eye *. noise_var)
82
+
alpha = chol_solve c ys
83
+
predict = \x.
84
+
k' = for i. kernel xs.i x
85
+
mu = sum for i. alpha.i * k'.i
86
+
alpha' = chol_solve c k'
87
+
var = kernel x x + noise_var - sum for i. k'.i * alpha'.i
0 commit comments