Skip to content

Commit 1d2d7c2

Browse files
committed
First WIP for examples
1 parent 4c14e26 commit 1d2d7c2

File tree

3 files changed

+93
-0
lines changed

3 files changed

+93
-0
lines changed

examples/deepkernellearning.jl

Lines changed: 35 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,35 @@
1+
using KernelFunctions
2+
using MLDataUtils
3+
using Zygote
4+
using Flux
5+
using Distributions, LinearAlgebra
6+
using Plots
7+
8+
Flux.@functor SqExponentialKernel
9+
Flux.@functor KernelSum
10+
Flux.@functor Matern32Kernel
11+
Flux.@functor FunctionTransform
12+
13+
neuralnet = Chain(Dense(1,3),Dense(3,2))
14+
k = SqExponentialKernel(FunctionTransform(neuralnet))
15+
xmin = -3; xmax = 3
16+
x = range(xmin,xmax,length=100)
17+
x_test = rand(Uniform(xmin,xmax),200)
18+
x,y = noisy_function(sinc,x;noise=0.1)
19+
X = reshape(x,:,1)
20+
λ = [0.1]
21+
f(x,k,λ) = kernelmatrix(k,X,x,obsdim=1)*inv(kernelmatrix(k,X,obsdim=1)+exp(λ[1])*I)*y
22+
f(X,k,1.0)
23+
loss(k,λ) = f(X,k,λ) |>->sum(y-ŷ)/length(y)+exp(λ[1])*norm(ŷ)
24+
loss(k,λ)
25+
ps = Flux.params(k)
26+
# push!(ps,λ)
27+
opt = Flux.Momentum(1.0)
28+
##
29+
for i in 1:10
30+
grads = Zygote.gradient(()->loss(k,λ),ps)
31+
Flux.Optimise.update!(opt,ps,grads)
32+
p = Plots.scatter(x,y,lab="data",title="Loss = $(loss(k,λ))")
33+
Plots.plot!(x,f(X,k,λ),lab="Prediction",lw=3.0)
34+
display(p)
35+
end

examples/kernelridgeregression.jl

Lines changed: 35 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,35 @@
1+
using KernelFunctions
2+
using MLDataUtils
3+
using Zygote
4+
using Flux
5+
using Distributions, LinearAlgebra
6+
using Plots
7+
8+
Flux.@functor SqExponentialKernel
9+
Flux.@functor ScaleTransform
10+
Flux.@functor KernelSum
11+
Flux.@functor Matern32Kernel
12+
13+
xmin = -3; xmax = 3
14+
x = range(xmin,xmax,length=100)
15+
x_test = range(xmin,xmax,length=300)
16+
x,y = noisy_function(sinc,x;noise=0.1)
17+
X = reshape(x,:,1)
18+
X_test = reshape(x_test,:,1)
19+
k = SqExponentialKernel(1.0)#+Matern32Kernel(2.0)
20+
λ = [-1.0]
21+
f(x,k,λ) = kernelmatrix(k,x,X,obsdim=1)*inv(kernelmatrix(k,X,obsdim=1)+exp(λ[1])*I)*y
22+
f(X,k,1.0)
23+
loss(k,λ) = f(X,k,λ) |>->sum(y-ŷ)/length(y)+exp(λ[1])*norm(ŷ)
24+
loss(k,λ)
25+
ps = Flux.params(k)
26+
push!(ps,λ)
27+
opt = Flux.Momentum(0.1)
28+
##
29+
for i in 1:10
30+
grads = Zygote.gradient(()->loss(k,λ),ps)
31+
Flux.Optimise.update!(opt,ps,grads)
32+
p = Plots.scatter(x,y,lab="data",title="Loss = $(loss(k,λ))")
33+
Plots.plot!(x_test,f(X_test,k,λ),lab="Prediction",lw=3.0)
34+
display(p)
35+
end

examples/svm.jl

Lines changed: 23 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,23 @@
1+
using KernelFunctions
2+
using MLDataUtils
3+
using Zygote
4+
using Flux
5+
using Distributions, LinearAlgebra
6+
using Plots
7+
8+
N = 100 #Number of samples
9+
μ = randn(2,2) # Random Centers
10+
xgrid = range(-3,3,length=100) # Create a grid
11+
Xgrid = hcat(collect.(Iterators.product(xgrid,xgrid))...)' #Combine into a 2D grid
12+
y = rand([-1,1],N) # Select randomly between the two classes
13+
X = zeros(N,2)
14+
X[y.==1,:] = rand(MvNormal(μ[:,1],I),count(y.==1))' #Attribute samples from class 1
15+
X[y.==-1,:] = rand(MvNormal(μ[:,2],I),count(y.==-1))' # Attribute samples from class 2
16+
17+
18+
k = SqExponentialKernel(2.0) # Create kernel function
19+
f(x,k,λ) = kernelmatrix(k,x,X,obsdim=1)*inv(kernelmatrix(k,X,obsdim=1)+exp(λ[1])*I)*y # Optimal prediction f
20+
svmloss(y,ŷ)= f(X,k,λ) |>-> sum(maximum.(0.0,1-y*ŷ)) - λ*norm(ŷ) # Total svm loss with regularisation
21+
pred = f(Xgrid,k,λ) #Compute prediction on a grid
22+
contourf(xgrid,xgrid,pred)
23+
scatter!(eachcol(X)...,color=y,lab="data")

0 commit comments

Comments
 (0)