Skip to content

Commit 1909c0c

Browse files
updates
1 parent 37f2d28 commit 1909c0c

File tree

5 files changed

+204
-27
lines changed

5 files changed

+204
-27
lines changed

GNNlib/Project.toml

Lines changed: 7 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -7,6 +7,7 @@ version = "0.2.2"
77
ChainRulesCore = "d360d2e6-b24c-11e9-a2a3-2a2ae2dbcce4"
88
DataStructures = "864edb3b-99cc-5e75-8d2d-829cb0a9cfe8"
99
GNNGraphs = "aed8fd31-079b-4b5a-b342-a13352159b8c"
10+
GPUArraysCore = "46192b85-c4d5-4398-a991-12ede77f4527"
1011
LinearAlgebra = "37e2e46d-f89d-539d-b4ee-838fcccc9c8e"
1112
MLUtils = "f1d291b0-491e-4a28-83b9-f70985020b54"
1213
NNlib = "872c559c-99b0-510c-b3b7-b6c96a88d5cd"
@@ -18,15 +19,20 @@ AMDGPU = "21141c5a-9bdb-4563-92ae-f87d6854732e"
1819
CUDA = "052768ef-5323-5732-b1bb-66c8b64840ba"
1920

2021
[extensions]
21-
GNNlibCUDAExt = "CUDA"
2222
GNNlibAMDGPUExt = "AMDGPU"
23+
GNNlibCUDAExt = "CUDA"
24+
25+
# GPUArraysCore is not needed as a direct dependency
26+
# but pinning it to 0.1 avoids problems when we do Pkg.add("CUDA") in testing
27+
# See https://github.com/JuliaGPU/CUDA.jl/issues/2564
2328

2429
[compat]
2530
AMDGPU = "1"
2631
CUDA = "4, 5"
2732
ChainRulesCore = "1.24"
2833
DataStructures = "0.18"
2934
GNNGraphs = "1.0"
35+
GPUArraysCore = "0.1"
3036
LinearAlgebra = "1"
3137
MLUtils = "0.4"
3238
NNlib = "0.9"

GNNlib/test/Project.toml

Lines changed: 8 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -1,6 +1,10 @@
11
[deps]
2+
FiniteDifferences = "26cc04aa-876d-5657-8c51-4c34ba976000"
3+
Functors = "d9f16b24-f501-4c13-a1f2-28368ffc5196"
24
GNNGraphs = "aed8fd31-079b-4b5a-b342-a13352159b8c"
35
GNNlib = "a6a84749-d869-43f8-aacc-be26a1996e48"
6+
GPUArraysCore = "46192b85-c4d5-4398-a991-12ede77f4527"
7+
Graphs = "86223c79-3864-5bf0-83f7-82e725a168b6"
48
MLUtils = "f1d291b0-491e-4a28-83b9-f70985020b54"
59
NNlib = "872c559c-99b0-510c-b3b7-b6c96a88d5cd"
610
Pkg = "44cfe95a-1eb2-52ea-b672-e2afdf69b78f"
@@ -10,3 +14,7 @@ SparseArrays = "2f01184e-e22b-5df5-ae63-d93ebab69eaf"
1014
Statistics = "10745b16-79ce-11e8-11f9-7d13ad32a3b2"
1115
Test = "8dfed614-e22c-5e08-85e1-65c5234f0b40"
1216
TestItemRunner = "f8b46487-2199-4994-9208-9a1283c18c0a"
17+
Zygote = "e88e6eb3-aa80-5325-afca-941959d7151f"
18+
19+
[compat]
20+
GPUArraysCore = "0.1"

GNNlib/test/msgpass.jl

Lines changed: 21 additions & 26 deletions
Original file line numberDiff line numberDiff line change
@@ -1,33 +1,19 @@
1-
@testitem "msgpass" setup=[TestModuleNNlib] begin
1+
@testitem "msgpass" setup=[TestModuleGNNlib] begin
22
using .TestModuleGNNlib
33
#TODO test all graph types
4-
GRAPH_T = :coo
5-
in_channel = 10
6-
out_channel = 5
7-
num_V = 6
8-
num_E = 14
9-
T = Float32
10-
11-
adj = [0 1 0 0 0 0
12-
1 0 0 1 1 1
13-
0 0 0 0 0 1
14-
0 1 0 0 1 0
15-
0 1 0 1 0 1
16-
0 1 1 0 1 0]
17-
18-
X = rand(T, in_channel, num_V)
19-
E = rand(T, in_channel, num_E)
20-
21-
g = GNNGraph(adj, graph_type = GRAPH_T)
4+
g = TEST_GRAPHS[1]
5+
out_channel = 10
6+
num_V = g.num_nodes
7+
num_E = g.num_edges
228

239
@testset "propagate" begin
2410
function message(xi, xj, e)
2511
@test xi === nothing
2612
@test e === nothing
27-
ones(T, out_channel, size(xj, 2))
13+
ones(Float32, out_channel, size(xj, 2))
2814
end
2915

30-
m = propagate(message, g, +, xj = X)
16+
m = propagate(message, g, +, xj = g.x)
3117

3218
@test size(m) == (out_channel, num_V)
3319

@@ -40,7 +26,7 @@
4026
end
4127

4228
@testset "apply_edges" begin
43-
m = apply_edges(g, e = E) do xi, xj, e
29+
m = apply_edges(g, e = g.e) do xi, xj, e
4430
@test xi === nothing
4531
@test xj === nothing
4632
ones(out_channel, size(e, 2))
@@ -49,15 +35,15 @@
4935
@test m == ones(out_channel, num_E)
5036

5137
# With NamedTuple input
52-
m = apply_edges(g, xj = (; a = X, b = 2X), e = E) do xi, xj, e
38+
m = apply_edges(g, xj = (; a = g.x, b = 2g.x), e = g.e) do xi, xj, e
5339
@test xi === nothing
5440
@test xj.b == 2 * xj.a
5541
@test size(xj.a, 2) == size(xj.b, 2) == size(e, 2)
5642
ones(out_channel, size(e, 2))
5743
end
5844

5945
# NamedTuple output
60-
m = apply_edges(g, e = E) do xi, xj, e
46+
m = apply_edges(g, e = g.e) do xi, xj, e
6147
@test xi === nothing
6248
@test xj === nothing
6349
(; a = ones(out_channel, size(e, 2)))
@@ -85,7 +71,7 @@
8571
Adj = map(x -> x > 0 ? 1 : 0, A)
8672
X = rand(10, n)
8773

88-
g = GNNGraph(A, ndata = X, graph_type = GRAPH_T)
74+
g = GNNGraph(A, ndata = X, graph_type = :coo)
8975

9076
function spmm_copyxj_fused(g)
9177
propagate(copy_xj,
@@ -107,7 +93,7 @@
10793
Adj = map(x -> x > 0 ? 1 : 0, A)
10894
X = rand(10, n)
10995

110-
g = GNNGraph(A, ndata = X, edata = A.nzval, graph_type = GRAPH_T)
96+
g = GNNGraph(A, ndata = X, edata = A.nzval, graph_type = :coo)
11197

11298
function spmm_unfused(g)
11399
propagate((xi, xj, e) -> reshape(e, 1, :) .* xj,
@@ -139,3 +125,12 @@
139125
end
140126

141127
end
128+
129+
@testitem "msgpass GPU" setup=[TestModuleGNNlib] begin
130+
using .TestModuleGNNlib
131+
n, m = 10, 20
132+
g = rand_graph(n, m, graph_type = :coo)
133+
x = rand(Float32, 2, n)
134+
f(g, x) = propagate(copy_xj, g, +, xj = x)
135+
test_gradients(f, g, x; test_gpu=true, test_grad_f=false, compare_finite_diff=false)
136+
end

GNNlib/test/test_module.jl

Lines changed: 164 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -1,5 +1,30 @@
11
@testmodule TestModuleGNNlib begin
22

3+
using Pkg
4+
5+
### GPU backends settings ############
6+
# tried to put this in __init__ but is not executed for some reason
7+
8+
## Uncomment below to change the default test settings
9+
ENV["GNN_TEST_CUDA"] = "true"
10+
# ENV["GNN_TEST_AMDGPU"] = "true"
11+
# ENV["GNN_TEST_Metal"] = "true"
12+
13+
to_test(backend) = get(ENV, "GNN_TEST_$(backend)", "false") == "true"
14+
has_dependecies(pkgs) = all(pkg -> haskey(Pkg.project().dependencies, pkg), pkgs)
15+
deps_dict = Dict(:CUDA => ["CUDA", "cuDNN"], :AMDGPU => ["AMDGPU"], :Metal => ["Metal"])
16+
17+
for (backend, deps) in deps_dict
18+
if to_test(backend)
19+
if !has_dependecies(deps)
20+
Pkg.add(deps)
21+
end
22+
@eval using $backend
23+
@eval $backend.allowscalar(false)
24+
end
25+
end
26+
######################################
27+
328
import Reexport: @reexport
429

530
@reexport using GNNlib
@@ -8,5 +33,144 @@ import Reexport: @reexport
833
@reexport using MLUtils
934
@reexport using SparseArrays
1035
@reexport using Test, Random, Statistics
36+
@reexport using MLDataDevices
37+
using Functors: fmapstructure_with_path
38+
using Graphs
39+
using ChainRulesTestUtils, FiniteDifferences
40+
using Zygote
41+
using SparseArrays
42+
43+
# from this module
44+
export D_IN, D_OUT, GRAPH_TYPES, TEST_GRAPHS,
45+
test_gradients, finitediff_withgradient,
46+
check_equal_leaves
47+
48+
49+
const D_IN = 3
50+
const D_OUT = 5
51+
52+
function finitediff_withgradient(f, x...)
53+
y = f(x...)
54+
# We set a range to avoid domain errors
55+
fdm = FiniteDifferences.central_fdm(5, 1, max_range=1e-2)
56+
return y, FiniteDifferences.grad(fdm, f, x...)
57+
end
58+
59+
function check_equal_leaves(a, b; rtol=1e-4, atol=1e-4)
60+
fmapstructure_with_path(a, b) do kp, x, y
61+
if x isa AbstractArray
62+
# @show kp
63+
@test x y rtol=rtol atol=atol
64+
# elseif x isa Number
65+
# @show kp
66+
# @test x ≈ y rtol=rtol atol=atol
67+
end
68+
end
69+
end
70+
71+
function test_gradients(
72+
f,
73+
graph::GNNGraph,
74+
xs...;
75+
rtol=1e-5, atol=1e-5,
76+
test_gpu = false,
77+
test_grad_f = true,
78+
test_grad_x = true,
79+
compare_finite_diff = true,
80+
loss = (f, g, xs...) -> mean(f(g, xs...)),
81+
)
82+
83+
if !test_gpu && !compare_finite_diff
84+
error("You should either compare finite diff vs CPU AD \
85+
or CPU AD vs GPU AD.")
86+
end
87+
88+
## Let's make sure first that the forward pass works.
89+
l = loss(f, graph, xs...)
90+
@test l isa Number
91+
if test_gpu
92+
gpu_dev = gpu_device(force=true)
93+
cpu_dev = cpu_device()
94+
graph_gpu = graph |> gpu_dev
95+
xs_gpu = xs |> gpu_dev
96+
f_gpu = f |> gpu_dev
97+
l_gpu = loss(f_gpu, graph_gpu, xs_gpu...)
98+
@test l_gpu isa Number
99+
end
100+
101+
if test_grad_x
102+
# Zygote gradient with respect to input.
103+
y, g = Zygote.withgradient((xs...) -> loss(f, graph, xs...), xs...)
104+
105+
if compare_finite_diff
106+
# Cast to Float64 to avoid precision issues.
107+
f64 = f |> Flux.f64
108+
xs64 = xs .|> Flux.f64
109+
y_fd, g_fd = finitediff_withgradient((xs...) -> loss(f64, graph, xs...), xs64...)
110+
@test y y_fd rtol=rtol atol=atol
111+
check_equal_leaves(g, g_fd; rtol, atol)
112+
end
113+
114+
if test_gpu
115+
# Zygote gradient with respect to input on GPU.
116+
y_gpu, g_gpu = Zygote.withgradient((xs...) -> loss(f_gpu, graph_gpu, xs...), xs_gpu...)
117+
@test get_device(g_gpu) == get_device(xs_gpu)
118+
@test y_gpu y rtol=rtol atol=atol
119+
check_equal_leaves(g_gpu |> cpu_dev, g; rtol, atol)
120+
end
121+
end
122+
123+
if test_grad_f
124+
# Zygote gradient with respect to f.
125+
y, g = Zygote.withgradient(f -> loss(f, graph, xs...), f)
126+
127+
if compare_finite_diff
128+
# Cast to Float64 to avoid precision issues.
129+
f64 = f |> Flux.f64
130+
ps, re = Flux.destructure(f64)
131+
y_fd, g_fd = finitediff_withgradient(ps -> loss(re(ps),graph, xs...), ps)
132+
g_fd = (re(g_fd[1]),)
133+
@test y y_fd rtol=rtol atol=atol
134+
check_equal_leaves(g, g_fd; rtol, atol)
135+
end
136+
137+
if test_gpu
138+
# Zygote gradient with respect to f on GPU.
139+
y_gpu, g_gpu = Zygote.withgradient(f -> loss(f,graph_gpu, xs_gpu...), f_gpu)
140+
# @test get_device(g_gpu) == get_device(xs_gpu)
141+
@test y_gpu y rtol=rtol atol=atol
142+
check_equal_leaves(g_gpu |> cpu_dev, g; rtol, atol)
143+
end
144+
end
145+
return true
146+
end
147+
148+
149+
function generate_test_graphs(graph_type)
150+
adj1 = [0 1 0 1
151+
1 0 1 0
152+
0 1 0 1
153+
1 0 1 0]
154+
155+
g1 = GNNGraph(adj1,
156+
ndata = rand(Float32, D_IN, 4);
157+
graph_type)
158+
159+
adj_single_vertex = [0 0 0 1
160+
0 0 0 0
161+
0 0 0 1
162+
1 0 1 0]
163+
164+
g_single_vertex = GNNGraph(adj_single_vertex,
165+
ndata = rand(Float32, D_IN, 4);
166+
graph_type)
167+
168+
return (g1, g_single_vertex)
169+
end
170+
171+
GRAPH_TYPES = [:coo, :dense, :sparse]
172+
TEST_GRAPHS = [generate_test_graphs(:coo)...,
173+
generate_test_graphs(:dense)...,
174+
generate_test_graphs(:sparse)...]
11175

12176
end # module

GraphNeuralNetworks/test/Project.toml

Lines changed: 4 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -3,6 +3,7 @@ ChainRulesTestUtils = "cdddcdb0-9152-4a09-a978-84456f9df70a"
33
FiniteDifferences = "26cc04aa-876d-5657-8c51-4c34ba976000"
44
Flux = "587475ba-b771-5e3f-ad9e-33799f191a9c"
55
Functors = "d9f16b24-f501-4c13-a1f2-28368ffc5196"
6+
GPUArraysCore = "46192b85-c4d5-4398-a991-12ede77f4527"
67
GraphNeuralNetworks = "cffab07f-9bc2-4db1-8861-388f63bf7694"
78
Graphs = "86223c79-3864-5bf0-83f7-82e725a168b6"
89
MLDatasets = "eb30cadb-4394-5ae3-aed4-317e484a6458"
@@ -13,3 +14,6 @@ Statistics = "10745b16-79ce-11e8-11f9-7d13ad32a3b2"
1314
Test = "8dfed614-e22c-5e08-85e1-65c5234f0b40"
1415
TestItemRunner = "f8b46487-2199-4994-9208-9a1283c18c0a"
1516
Zygote = "e88e6eb3-aa80-5325-afca-941959d7151f"
17+
18+
[compat]
19+
GPUArraysCore = "0.1"

0 commit comments

Comments
 (0)