Skip to content

Commit 6907d71

Browse files
committed
Add some nothing
1 parent 0fb3d7f commit 6907d71

File tree

1 file changed

+30
-15
lines changed

1 file changed

+30
-15
lines changed

examples/train-kernel-parameters/script.jl

Lines changed: 30 additions & 15 deletions
Original file line numberDiff line numberDiff line change
@@ -25,7 +25,8 @@ N = 50 # Number of samples
2525
x_train = rand(Uniform(xmin, xmax), N) # We sample 100 random samples
2626
σ = 0.1
2727
y_train = sinc.(x_train) + randn(N) * σ # We create a function and add some noise
28-
x_test = range(xmin - 0.1, xmax + 0.1; length=300);
28+
x_test = range(xmin - 0.1, xmax + 0.1; length=300)
29+
nothing #hide
2930
# Plot the data
3031

3132
## scatter(x_train, y_train; lab="data")
@@ -47,7 +48,8 @@ x_test = range(xmin - 0.1, xmax + 0.1; length=300);
4748
function kernelcall(θ)
4849
return (exp(θ[1]) * SqExponentialKernel() + exp(θ[2]) * Matern32Kernel())
4950
ScaleTransform(exp(θ[3]))
50-
end;
51+
end
52+
nothing #hide
5153

5254
# From theory we know the prediction for a test set x given
5355
# the kernel parameters and normalization constant
@@ -56,28 +58,32 @@ function f(x, x_train, y_train, θ)
5658
k = kernelcall(θ[1:3])
5759
return kernelmatrix(k, x, x_train) *
5860
((kernelmatrix(k, x_train) + exp(θ[4]) * I) \ y_train)
59-
end;
61+
end
62+
nothing #hide
6063

6164
# We look how the prediction looks like
6265
# with starting parameters [1.0, 1.0, 1.0, 1.0] we get :
6366

64-
= f(x_test, x_train, y_train, log.(ones(4)));
67+
= f(x_test, x_train, y_train, log.(ones(4)))
6568
## scatter(x_train, y_train; lab="data")
6669
## plot!(x_test, sinc; lab="true function")
6770
## plot!(x_test, ŷ; lab="prediction")
71+
nothing #hide
6872

6973
# We define the loss based on the L2 norm both
7074
# for the loss and the regularization
7175

7276
function loss(θ)
7377
= f(x_train, x_train, y_train, θ)
7478
return sum(abs2, y_train - ŷ) + exp(θ[4]) * norm(ŷ)
75-
end;
79+
end
80+
nothing #hide
7681

7782
# ### Training
7883
# Setting an initial value and initializing the optimizer:
7984
θ = log.([1.1, 0.1, 0.01, 0.001]) # Initial vector
80-
opt = Optimise.ADAGrad(0.5);
85+
opt = Optimise.ADAGrad(0.5)
86+
nothing #hide
8187

8288
# The loss with our starting point:
8389

@@ -123,25 +129,30 @@ raw_initial_θ = (
123129
noise_var=positive(0.001),
124130
)
125131

126-
flat_θ, unflatten = ParameterHandling.value_flatten(raw_initial_θ);
132+
flat_θ, unflatten = ParameterHandling.value_flatten(raw_initial_θ)
133+
nothing #hide
127134

128135
function kernelcall(θ)
129136
return.k1 * SqExponentialKernel() + θ.k2 * Matern32Kernel())
130137
ScaleTransform.k3)
131-
end;
138+
end
139+
nothing #hide
132140

133141
function f(x, x_train, y_train, θ)
134142
k = kernelcall(θ)
135143
return kernelmatrix(k, x, x_train) *
136144
((kernelmatrix(k, x_train) + θ.noise_var * I) \ y_train)
137-
end;
145+
end
146+
nothing #hide
138147

139148
function loss(θ)
140149
= f(x_train, x_train, y_train, θ)
141150
return sum(abs2, y_train - ŷ) + θ.noise_var * norm(ŷ)
142-
end;
151+
end
152+
nothing #hide
143153

144-
initial_θ = ParameterHandling.value(raw_initial_θ);
154+
initial_θ = ParameterHandling.value(raw_initial_θ)
155+
nothing #hide
145156

146157
# The loss with our starting point :
147158

@@ -162,7 +173,8 @@ opt = Optimise.ADAGrad(0.5)
162173
for i in 1:25
163174
grads = (Zygote.gradient(loss unflatten, flat_θ))[1]
164175
Optimise.update!(opt, flat_θ, grads)
165-
end;
176+
end
177+
nothing #hide
166178

167179
# Final loss
168180

@@ -188,7 +200,8 @@ function f(x, x_train, y_train, θ)
188200
k = kernelc(θ[1:3])
189201
return kernelmatrix(k, x, x_train) *
190202
((kernelmatrix(k, x_train) + (θ[4]) * I) \ y_train)
191-
end;
203+
end
204+
nothing #hide
192205

193206

194207
# We define the loss based on the L2 norm both
@@ -197,7 +210,8 @@ end;
197210
function loss(θ)
198211
= f(x_train, x_train, y_train, exp.(θ))
199212
return sum(abs2, y_train - ŷ) + exp(θ[4]) * norm(ŷ)
200-
end;
213+
end
214+
nothing #hide
201215

202216
# ## Training the model
203217

@@ -221,7 +235,8 @@ end
221235
for i in 1:25
222236
grads = only((Zygote.gradient(loss, θ))) # We compute the gradients given the kernel parameters and regularization
223237
Optimise.update!(opt, θ, grads)
224-
end;
238+
end
239+
nothing #hide
225240

226241
# Final loss
227242
loss(θ)

0 commit comments

Comments
 (0)