Skip to content

Commit 322bb24

Browse files
improve gpu tests and some fixes
1 parent 8fdf813 commit 322bb24

File tree

5 files changed

+68
-108
lines changed

5 files changed

+68
-108
lines changed

src/featuredgraph.jl

Lines changed: 5 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -234,10 +234,10 @@ function LightGraphs.adjacency_matrix(fg::FeaturedGraph{<:ADJMAT_T}, T::DataType
234234
return dir == :out ? A : A'
235235
end
236236

237-
function LightGraphs.degree(fg::FeaturedGraph{<:COO_T}; dir=:out)
237+
function LightGraphs.degree(fg::FeaturedGraph{<:COO_T}, T=Int; dir=:out)
238238
s, t = edge_index(fg)
239-
degs = fill!(similar(s, eltype(s), fg.num_nodes), 0)
240-
o = fill!(similar(s, eltype(s), fg.num_edges), 1)
239+
degs = fill!(similar(s, T, fg.num_nodes), 0)
240+
o = fill!(similar(s, Int, fg.num_edges), 1)
241241
if dir [:out, :both]
242242
NNlib.scatter!(+, degs, o, s)
243243
end
@@ -247,9 +247,9 @@ function LightGraphs.degree(fg::FeaturedGraph{<:COO_T}; dir=:out)
247247
return degs
248248
end
249249

250-
function LightGraphs.degree(fg::FeaturedGraph{<:ADJMAT_T}; dir=:out)
250+
function LightGraphs.degree(fg::FeaturedGraph{<:ADJMAT_T}, T=Int; dir=:out)
251251
@assert dir (:in, :out)
252-
A = graph(fg)
252+
A = adjacency_matrix(fg, T)
253253
return dir == :out ? vec(sum(A, dims=2)) : vec(sum(A, dims=1))
254254
end
255255

src/layers/conv.jl

Lines changed: 8 additions & 10 deletions
Original file line numberDiff line numberDiff line change
@@ -34,22 +34,20 @@ end
3434
## but cannot compute the normalized laplacian of sparse cuda matrices yet,
3535
## therefore fallback to message passing framework on gpu for the time being
3636

37-
function (l::GCNConv)(fg::FeaturedGraph, x::AbstractMatrix)
38-
= normalized_adjacency(fg, eltype(x); dir=:out, add_self_loops=true)
39-
l.σ.(l.weight * x *.+ l.bias)
40-
end
37+
# function (l::GCNConv)(fg::FeaturedGraph, x::AbstractMatrix)
38+
# Ã = normalized_adjacency(fg, eltype(x); dir=:out, add_self_loops=true)
39+
# l.σ.(l.weight * x * Ã .+ l.bias)
40+
# end
4141

4242
message(l::GCNConv, xi, xj) = xj
4343
update(l::GCNConv, m, x) = m
4444

45-
function (l::GCNConv)(fg::FeaturedGraph, x::CuMatrix)
45+
function (l::GCNConv)(fg::FeaturedGraph, x::AbstractMatrix{T}) where T
4646
fg = add_self_loops(fg; add_to_existing=true)
47-
T = eltype(l.weight)
48-
# cout = sqrt.(degree(fg, dir=:out))
49-
cin = 1 ./ reshape(sqrt.(T.(degree(fg, dir=:in))), 1, :)
50-
x = cin .* x
47+
c = 1 ./ sqrt.(degree(fg, T, dir=:in))
48+
x = x .* c'
5149
_, x = propagate(l, fg, nothing, x, nothing, +)
52-
x = cin .* x
50+
x = x .* c'
5351
return l.σ.(l.weight * x .+ l.bias)
5452
end
5553

test/cuda/layers/conv.jl

Lines changed: 19 additions & 91 deletions
Original file line numberDiff line numberDiff line change
@@ -7,115 +7,43 @@
77
0 1 0 1
88
1 0 1 0]
99

10-
fg = FeaturedGraph(adj, graph_type=GRAPH_T) |> gpu
11-
10+
fg = FeaturedGraph(adj, graph_type=GRAPH_T)
11+
X = rand(Float32, in_channel, N)
12+
1213
@testset "GCNConv" begin
13-
gc = GCNConv(in_channel=>out_channel) |> gpu
14-
@test size(gc.weight) == (out_channel, in_channel)
15-
@test size(gc.bias) == (out_channel,)
16-
@test adjacency_matrix(gc.fg |> cpu) == adj
17-
18-
X = rand(in_channel, N) |> gpu
19-
Y = gc(fg, X)
20-
@test size(Y) == (out_channel, N)
21-
22-
g = Zygote.gradient(x -> sum(gc(x)), X)[1]
23-
@test size(g) == size(X)
24-
25-
g = Zygote.gradient(model -> sum(model(X)), gc)[1]
26-
@test size(g.weight) == size(gc.weight)
27-
@test size(g.bias) == size(gc.bias)
14+
m = GCNConv(in_channel => out_channel)
15+
gpugradtest(m, fg, X)
2816
end
2917

30-
3118
@testset "ChebConv" begin
3219
k = 6
33-
cc = ChebConv(in_channel=>out_channel, k) |> gpu
34-
@test size(cc.weight) == (out_channel, in_channel, k)
35-
@test size(cc.bias) == (out_channel,)
36-
@test adjacency_matrix(cc.fg |> cpu) == adj
37-
@test cc.k == k
38-
39-
@test_broken begin
40-
X = rand(in_channel, N) |> gpu
41-
Y = cc(fg, X)
42-
@test size(Y) == (out_channel, N)
43-
44-
g = Zygote.gradient(x -> sum(cc(x)), X)[1]
45-
@test size(g) == size(X)
46-
47-
g = Zygote.gradient(model -> sum(model(fg, X)), cc)[1]
48-
@test size(g.weight) == size(cc.weight)
49-
@test size(g.bias) == size(cc.bias)
50-
51-
true
52-
end
20+
m = ChebConv(in_channel => out_channel, k)
21+
@test_broken gpugradtest(m, fg, X)
5322
end
5423

5524
@testset "GraphConv" begin
56-
gc = GraphConv(in_channel=>out_channel) |> gpu
57-
@test size(gc.weight1) == (out_channel, in_channel)
58-
@test size(gc.weight2) == (out_channel, in_channel)
59-
@test size(gc.bias) == (out_channel,)
60-
61-
X = rand(in_channel, N) |> gpu
62-
Y = gc(fg, X)
63-
@test size(Y) == (out_channel, N)
64-
65-
g = Zygote.gradient(x -> sum(gc(fg, x)), X)[1]
66-
@test size(g) == size(X)
67-
68-
g = Zygote.gradient(model -> sum(model(fg, X)), gc)[1]
69-
@test size(g.weight1) == size(gc.weight1)
70-
@test size(g.weight2) == size(gc.weight2)
71-
@test size(g.bias) == size(gc.bias)
25+
m = GraphConv(in_channel => out_channel)
26+
gpugradtest(m, fg, X)
7227
end
7328

7429
@testset "GATConv" begin
75-
gat = GATConv(in_channel=>out_channel) |> gpu
76-
@test size(gat.weight) == (out_channel, in_channel)
77-
@test size(gat.bias) == (out_channel,)
78-
79-
X = rand(in_channel, N) |> gpu
80-
Y = gat(fg, X)
81-
@test size(Y) == (out_channel, N)
82-
83-
g = Zygote.gradient(x -> sum(gat(fg, x)), X)[1]
84-
@test size(g) == size(X)
30+
m = GATConv(in_channel => out_channel)
31+
gpugradtest(m, fg, X)
32+
end
8533

86-
g = Zygote.gradient(model -> sum(model(fg, X)), gat)[1]
87-
@test size(g.weight) == size(gat.weight)
88-
@test size(g.bias) == size(gat.bias)
89-
@test size(g.a) == size(gat.a)
34+
@testset "GINConv" begin
35+
m = GINConv(Dense(in_channel, out_channel), eps=0.1f0)
36+
gpugradtest(m, fg, X)
9037
end
9138

9239
@testset "GatedGraphConv" begin
9340
num_layers = 3
94-
ggc = GatedGraphConv(out_channel, num_layers) |> gpu
95-
@test size(ggc.weight) == (out_channel, out_channel, num_layers)
96-
97-
X = rand(in_channel, N) |> gpu
98-
Y = ggc(fg, X)
99-
@test size(Y) == (out_channel, N)
100-
101-
g = Zygote.gradient(x -> sum(ggc(fg, x)), X)[1]
102-
@test size(g) == size(X)
103-
104-
g = Zygote.gradient(model -> sum(model(fg, X)), ggc)[1]
105-
@test size(g.weight) == size(ggc.weight)
41+
m = GatedGraphConv(out_channel, num_layers)
42+
gpugradtest(m, fg, X)
10643
end
10744

10845
@testset "EdgeConv" begin
109-
ec = EdgeConv(Dense(2*in_channel, out_channel)) |> gpu
110-
X = rand(in_channel, N) |> gpu
111-
Y = ec(fg, X)
112-
@test size(Y) == (out_channel, N)
113-
114-
g = Zygote.gradient(x -> sum(ec(fg, x)), X)[1]
115-
@test size(g) == size(X)
116-
117-
g = Zygote.gradient(model -> sum(model(fg, X)), ec)[1]
118-
@test size(g.nn.weight) == size(ec.nn.weight)
119-
@test size(g.nn.bias) == size(ec.nn.bias)
46+
m = EdgeConv(Dense(2*in_channel, out_channel))
47+
gpugradtest(m, fg, X)
12048
end
12149
end

test/cuda/test_utils.jl

Lines changed: 32 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,32 @@
1+
function gpugradtest(m, fg, x::AbstractArray{T}) where T
2+
m_gpu = m |> gpu
3+
fg_gpu = fg |> gpu
4+
x_gpu = x |> gpu
5+
ps = Flux.params(m)
6+
ps_gpu = Flux.params(m_gpu)
7+
8+
# output
9+
y = m(fg, x)
10+
y_gpu = m_gpu(fg_gpu, x_gpu)
11+
@test y_gpu isa CuArray{T}
12+
@test Array(y_gpu) y
13+
14+
# input gradient
15+
gs = gradient(x -> sum(m(fg, x)), x)[1]
16+
gs_gpu = gradient(x_gpu -> sum(m_gpu(fg_gpu, x_gpu)), x_gpu)[1]
17+
@test gs_gpu isa CuArray{T}
18+
@test Array(gs_gpu) gs
19+
20+
# model gradient
21+
gs = gradient(() -> sum(m(fg, x)), Flux.params(m))
22+
gs_gpu = gradient(() -> sum(m_gpu(fg_gpu, x_gpu)), Flux.params(m_gpu))
23+
for (p, p_gpu) in zip(ps, ps_gpu)
24+
if gs[p] == nothing
25+
@test gs_gpu[p_gpu] == nothing
26+
else
27+
@test gs_gpu[p_gpu] isa CuArray{T}
28+
@test Array(gs_gpu[p_gpu]) gs[p]
29+
end
30+
end
31+
return true
32+
end

test/runtests.jl

Lines changed: 4 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -11,6 +11,8 @@ using Zygote
1111
using Test
1212
CUDA.allowscalar(false)
1313

14+
include("cuda/test_utils.jl")
15+
1416
tests = [
1517
"featured_graph",
1618
"layers/msgpass",
@@ -22,12 +24,12 @@ tests = [
2224
!CUDA.functional() && @warn("CUDA unavailable, not testing GPU support")
2325

2426
# Testing all graph types. :sparse is a bit broken at the moment
25-
@testset "GraphNeuralNetworks: graph format $graph_type" for graph_type in (:dense, :coo, :sparse)
27+
@testset "GraphNeuralNetworks: graph format $graph_type" for graph_type in (:coo, :sparse, :dense)
2628
global GRAPH_T = graph_type
2729
for t in tests
2830
include("$t.jl")
2931

30-
if CUDA.functional() && GRAPH_T != :sparse && isdir("cuda/$t.jl")
32+
if CUDA.functional() && GRAPH_T != :sparse && isfile("cuda/$t.jl")
3133
include("cuda/$t.jl")
3234
end
3335
end

0 commit comments

Comments
 (0)