Skip to content

Commit 89f91b8

Browse files
fix
1 parent 1909c0c commit 89f91b8

File tree

7 files changed

+131
-54
lines changed

7 files changed

+131
-54
lines changed

GNNGraphs/src/transform.jl

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -731,6 +731,7 @@ end
731731
Set `w` as edge weights in the returned graph.
732732
"""
733733
function set_edge_weight(g::GNNGraph, w::AbstractVector)
734+
# TODO preserve the representation instead of converting to COO
734735
s, t = edge_index(g)
735736
@assert length(w) == length(s)
736737

GNNGraphs/test/gnngraph.jl

Lines changed: 3 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -1,3 +1,6 @@
1+
# TODO test that the graph type is preserved
2+
# when constructing a GNNGraph from another
3+
14
@testset "Constructor: adjacency matrix" begin
25
A = sprand(10, 10, 0.5)
36
sA, tA, vA = findnz(A)

GNNlib/src/msgpass.jl

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -184,7 +184,7 @@ xj_sub_xi(xi, xj, e) = xj .- xi
184184
"""
185185
e_mul_xj(xi, xj, e) = reshape(e, (...)) .* xj
186186
187-
Reshape `e` into broadcast compatible shape with `xj`
187+
Reshape `e` into a broadcast compatible shape with `xj`
188188
(by prepending singleton dimensions) then perform
189189
broadcasted multiplication.
190190
"""

GNNlib/test/Project.toml

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -1,10 +1,12 @@
11
[deps]
22
FiniteDifferences = "26cc04aa-876d-5657-8c51-4c34ba976000"
3+
Flux = "587475ba-b771-5e3f-ad9e-33799f191a9c"
34
Functors = "d9f16b24-f501-4c13-a1f2-28368ffc5196"
45
GNNGraphs = "aed8fd31-079b-4b5a-b342-a13352159b8c"
56
GNNlib = "a6a84749-d869-43f8-aacc-be26a1996e48"
67
GPUArraysCore = "46192b85-c4d5-4398-a991-12ede77f4527"
78
Graphs = "86223c79-3864-5bf0-83f7-82e725a168b6"
9+
MLDataDevices = "7e8f7934-dd98-4c1a-8fe8-92b47a384d40"
810
MLUtils = "f1d291b0-491e-4a28-83b9-f70985020b54"
911
NNlib = "872c559c-99b0-510c-b3b7-b6c96a88d5cd"
1012
Pkg = "44cfe95a-1eb2-52ea-b672-e2afdf69b78f"

GNNlib/test/msgpass.jl

Lines changed: 76 additions & 9 deletions
Original file line numberDiff line numberDiff line change
@@ -2,10 +2,11 @@
22
using .TestModuleGNNlib
33
#TODO test all graph types
44
g = TEST_GRAPHS[1]
5-
out_channel = 10
5+
out_channel = size(g.x, 1)
66
num_V = g.num_nodes
77
num_E = g.num_edges
8-
8+
g = GNNGraph(g, edata = rand(Float32, size(g.x, 1), g.num_edges))
9+
910
@testset "propagate" begin
1011
function message(xi, xj, e)
1112
@test xi === nothing
@@ -20,7 +21,7 @@
2021
@testset "isolated nodes" begin
2122
x1 = rand(1, 6)
2223
g1 = GNNGraph(collect(1:5), collect(1:5), num_nodes = 6)
23-
y1 = propagate((xi, xj, e) -> xj, g, +, xj = x1)
24+
y1 = propagate((xi, xj, e) -> xj, g1, +, xj = x1)
2425
@test size(y1) == (1, 6)
2526
end
2627
end
@@ -123,14 +124,80 @@
123124
@test_throws AssertionError aggregate_neighbors(g, +, m)
124125
end
125126
end
127+
end
128+
129+
@testitem "propagate" setup=[TestModuleGNNlib] begin
130+
using .TestModuleGNNlib
131+
132+
@testset "copy_xj +" begin
133+
for g in TEST_GRAPHS
134+
f(g, x) = propagate(copy_xj, g, +, xj = x)
135+
test_gradients(f, g, g.x; test_grad_f=false)
136+
end
137+
end
126138

139+
@testset "copy_xj mean" begin
140+
for g in TEST_GRAPHS
141+
f(g, x) = propagate(copy_xj, g, mean, xj = x)
142+
test_gradients(f, g, g.x; test_grad_f=false)
143+
end
144+
end
145+
146+
@testset "e_mul_xj +" begin
147+
for g in TEST_GRAPHS
148+
e = rand(Float32, size(g.x, 1), g.num_edges)
149+
f(g, x, e) = propagate(e_mul_xj, g, +; xj = x, e)
150+
test_gradients(f, g, g.x, e; test_grad_f=false)
151+
end
152+
end
153+
154+
@testset "w_mul_xj +" begin
155+
for g in TEST_GRAPHS
156+
w = rand(Float32, g.num_edges)
157+
function f(g, x, w)
158+
g = set_edge_weight(g, w)
159+
return propagate(w_mul_xj, g, +, xj = x)
160+
end
161+
test_gradients(f, g, g.x, w; test_grad_f=false)
162+
end
163+
end
127164
end
128165

129-
@testitem "msgpass GPU" setup=[TestModuleGNNlib] begin
166+
@testitem "propagate GPU" setup=[TestModuleGNNlib] tags=[:gpu] begin
130167
using .TestModuleGNNlib
131-
n, m = 10, 20
132-
g = rand_graph(n, m, graph_type = :coo)
133-
x = rand(Float32, 2, n)
134-
f(g, x) = propagate(copy_xj, g, +, xj = x)
135-
test_gradients(f, g, x; test_gpu=true, test_grad_f=false, compare_finite_diff=false)
168+
169+
@testset "copy_xj +" begin
170+
for g in TEST_GRAPHS
171+
f(g, x) = propagate(copy_xj, g, +, xj = x)
172+
test_gradients(f, g, g.x; test_gpu=true, test_grad_f=false, compare_finite_diff=false)
173+
end
174+
end
175+
176+
@testset "copy_xj mean" begin
177+
for g in TEST_GRAPHS
178+
f(g, x) = propagate(copy_xj, g, mean, xj = x)
179+
test_gradients(f, g, g.x; test_gpu=true, test_grad_f=false, compare_finite_diff=false)
180+
end
181+
end
182+
183+
@testset "e_mul_xj +" begin
184+
for g in TEST_GRAPHS
185+
e = rand(Float32, size(g.x, 1), g.num_edges)
186+
f(g, x, e) = propagate(e_mul_xj, g, +; xj = x, e)
187+
test_gradients(f, g, g.x, e; test_gpu=true, test_grad_f=false, compare_finite_diff=false)
188+
end
189+
end
190+
191+
@testset "w_mul_xj +" begin
192+
for g in TEST_GRAPHS
193+
w = rand(Float32, g.num_edges)
194+
function f(g, x, w)
195+
g = set_edge_weight(g, w)
196+
return propagate(w_mul_xj, g, +, xj = x)
197+
end
198+
@test test_gradients(
199+
f, g, g.x, w; test_gpu=true, test_grad_f=false, compare_finite_diff=false
200+
) broken=true
201+
end
202+
end
136203
end

GNNlib/test/test_module.jl

Lines changed: 22 additions & 19 deletions
Original file line numberDiff line numberDiff line change
@@ -6,7 +6,7 @@ using Pkg
66
# tried to put this in __init__ but is not executed for some reason
77

88
## Uncomment below to change the default test settings
9-
ENV["GNN_TEST_CUDA"] = "true"
9+
# ENV["GNN_TEST_CUDA"] = "true"
1010
# ENV["GNN_TEST_AMDGPU"] = "true"
1111
# ENV["GNN_TEST_Metal"] = "true"
1212

@@ -20,6 +20,9 @@ for (backend, deps) in deps_dict
2020
Pkg.add(deps)
2121
end
2222
@eval using $backend
23+
if backend == :CUDA
24+
@eval using cuDNN
25+
end
2326
@eval $backend.allowscalar(false)
2427
end
2528
end
@@ -35,10 +38,9 @@ import Reexport: @reexport
3538
@reexport using Test, Random, Statistics
3639
@reexport using MLDataDevices
3740
using Functors: fmapstructure_with_path
38-
using Graphs
39-
using ChainRulesTestUtils, FiniteDifferences
40-
using Zygote
41-
using SparseArrays
41+
using FiniteDifferences: FiniteDifferences
42+
using Zygote: Zygote
43+
using Flux: Flux
4244

4345
# from this module
4446
export D_IN, D_OUT, GRAPH_TYPES, TEST_GRAPHS,
@@ -60,10 +62,10 @@ function check_equal_leaves(a, b; rtol=1e-4, atol=1e-4)
6062
fmapstructure_with_path(a, b) do kp, x, y
6163
if x isa AbstractArray
6264
# @show kp
63-
@test x y rtol=rtol atol=atol
65+
@assert x y rtol=rtol atol=atol
6466
# elseif x isa Number
6567
# @show kp
66-
# @test x ≈ y rtol=rtol atol=atol
68+
# @assert x ≈ y rtol=rtol atol=atol
6769
end
6870
end
6971
end
@@ -87,15 +89,15 @@ function test_gradients(
8789

8890
## Let's make sure first that the forward pass works.
8991
l = loss(f, graph, xs...)
90-
@test l isa Number
92+
@assert l isa Number
9193
if test_gpu
9294
gpu_dev = gpu_device(force=true)
9395
cpu_dev = cpu_device()
9496
graph_gpu = graph |> gpu_dev
9597
xs_gpu = xs |> gpu_dev
9698
f_gpu = f |> gpu_dev
9799
l_gpu = loss(f_gpu, graph_gpu, xs_gpu...)
98-
@test l_gpu isa Number
100+
@assert l_gpu isa Number
99101
end
100102

101103
if test_grad_x
@@ -107,15 +109,15 @@ function test_gradients(
107109
f64 = f |> Flux.f64
108110
xs64 = xs .|> Flux.f64
109111
y_fd, g_fd = finitediff_withgradient((xs...) -> loss(f64, graph, xs...), xs64...)
110-
@test y y_fd rtol=rtol atol=atol
112+
@assert y y_fd rtol=rtol atol=atol
111113
check_equal_leaves(g, g_fd; rtol, atol)
112114
end
113115

114116
if test_gpu
115117
# Zygote gradient with respect to input on GPU.
116118
y_gpu, g_gpu = Zygote.withgradient((xs...) -> loss(f_gpu, graph_gpu, xs...), xs_gpu...)
117-
@test get_device(g_gpu) == get_device(xs_gpu)
118-
@test y_gpu y rtol=rtol atol=atol
119+
@assert get_device(g_gpu) == get_device(xs_gpu)
120+
@assert y_gpu y rtol=rtol atol=atol
119121
check_equal_leaves(g_gpu |> cpu_dev, g; rtol, atol)
120122
end
121123
end
@@ -130,19 +132,20 @@ function test_gradients(
130132
ps, re = Flux.destructure(f64)
131133
y_fd, g_fd = finitediff_withgradient(ps -> loss(re(ps),graph, xs...), ps)
132134
g_fd = (re(g_fd[1]),)
133-
@test y y_fd rtol=rtol atol=atol
135+
@assert y y_fd rtol=rtol atol=atol
134136
check_equal_leaves(g, g_fd; rtol, atol)
135137
end
136138

137139
if test_gpu
138140
# Zygote gradient with respect to f on GPU.
139141
y_gpu, g_gpu = Zygote.withgradient(f -> loss(f,graph_gpu, xs_gpu...), f_gpu)
140-
# @test get_device(g_gpu) == get_device(xs_gpu)
141-
@test y_gpu y rtol=rtol atol=atol
142+
# @assert get_device(g_gpu) == get_device(xs_gpu)
143+
@assert y_gpu y rtol=rtol atol=atol
142144
check_equal_leaves(g_gpu |> cpu_dev, g; rtol, atol)
143145
end
144146
end
145-
return true
147+
@test true # if we reach here, the test passed
148+
return true # return true in case we want to put a @test_broken in the caller
146149
end
147150

148151

@@ -157,9 +160,9 @@ function generate_test_graphs(graph_type)
157160
graph_type)
158161

159162
adj_single_vertex = [0 0 0 1
160-
0 0 0 0
161-
0 0 0 1
162-
1 0 1 0]
163+
0 0 0 0
164+
0 0 0 1
165+
1 0 1 0]
163166

164167
g_single_vertex = GNNGraph(adj_single_vertex,
165168
ndata = rand(Float32, D_IN, 4);

GraphNeuralNetworks/test/test_module.jl

Lines changed: 26 additions & 25 deletions
Original file line numberDiff line numberDiff line change
@@ -3,25 +3,25 @@
33
using Pkg
44

55
## Uncomment below to change the default test settings
6-
# ENV["GNN_TEST_CPU"] = "false"
76
# ENV["GNN_TEST_CUDA"] = "true"
87
# ENV["GNN_TEST_AMDGPU"] = "true"
98
# ENV["GNN_TEST_Metal"] = "true"
109

11-
if get(ENV, "GNN_TEST_CUDA", "false") == "true"
12-
Pkg.add(["CUDA", "cuDNN"])
13-
using CUDA
14-
CUDA.allowscalar(false)
15-
end
16-
if get(ENV, "GNN_TEST_AMDGPU", "false") == "true"
17-
Pkg.add("AMDGPU")
18-
using AMDGPU
19-
AMDGPU.allowscalar(false)
20-
end
21-
if get(ENV, "GNN_TEST_Metal", "false") == "true"
22-
Pkg.add("Metal")
23-
using Metal
24-
Metal.allowscalar(false)
10+
to_test(backend) = get(ENV, "GNN_TEST_$(backend)", "false") == "true"
11+
has_dependecies(pkgs) = all(pkg -> haskey(Pkg.project().dependencies, pkg), pkgs)
12+
deps_dict = Dict(:CUDA => ["CUDA", "cuDNN"], :AMDGPU => ["AMDGPU"], :Metal => ["Metal"])
13+
14+
for (backend, deps) in deps_dict
15+
if to_test(backend)
16+
if !has_dependecies(deps)
17+
Pkg.add(deps)
18+
end
19+
@eval using $backend
20+
if backend == :CUDA
21+
@eval using cuDNN
22+
end
23+
@eval $backend.allowscalar(false)
24+
end
2525
end
2626

2727
using GraphNeuralNetworks
@@ -62,10 +62,10 @@ function check_equal_leaves(a, b; rtol=1e-4, atol=1e-4)
6262
fmapstructure_with_path(a, b) do kp, x, y
6363
if x isa AbstractArray
6464
# @show kp
65-
@test x y rtol=rtol atol=atol
65+
@assert x y rtol=rtol atol=atol
6666
# elseif x isa Number
6767
# @show kp
68-
# @test x ≈ y rtol=rtol atol=atol
68+
# @assert x ≈ y rtol=rtol atol=atol
6969
end
7070
end
7171
end
@@ -89,15 +89,15 @@ function test_gradients(
8989

9090
## Let's make sure first that the forward pass works.
9191
l = loss(f, graph, xs...)
92-
@test l isa Number
92+
@assert l isa Number
9393
if test_gpu
9494
gpu_dev = gpu_device(force=true)
9595
cpu_dev = cpu_device()
9696
graph_gpu = graph |> gpu_dev
9797
xs_gpu = xs |> gpu_dev
9898
f_gpu = f |> gpu_dev
9999
l_gpu = loss(f_gpu, graph_gpu, xs_gpu...)
100-
@test l_gpu isa Number
100+
@assert l_gpu isa Number
101101
end
102102

103103
if test_grad_x
@@ -109,15 +109,15 @@ function test_gradients(
109109
f64 = f |> Flux.f64
110110
xs64 = xs .|> Flux.f64
111111
y_fd, g_fd = finitediff_withgradient((xs...) -> loss(f64, graph, xs...), xs64...)
112-
@test y y_fd rtol=rtol atol=atol
112+
@assert y y_fd rtol=rtol atol=atol
113113
check_equal_leaves(g, g_fd; rtol, atol)
114114
end
115115

116116
if test_gpu
117117
# Zygote gradient with respect to input on GPU.
118118
y_gpu, g_gpu = Zygote.withgradient((xs...) -> loss(f_gpu, graph_gpu, xs...), xs_gpu...)
119-
@test get_device(g_gpu) == get_device(xs_gpu)
120-
@test y_gpu y rtol=rtol atol=atol
119+
@assert get_device(g_gpu) == get_device(xs_gpu)
120+
@assert y_gpu y rtol=rtol atol=atol
121121
check_equal_leaves(g_gpu |> cpu_dev, g; rtol, atol)
122122
end
123123
end
@@ -132,18 +132,19 @@ function test_gradients(
132132
ps, re = Flux.destructure(f64)
133133
y_fd, g_fd = finitediff_withgradient(ps -> loss(re(ps),graph, xs...), ps)
134134
g_fd = (re(g_fd[1]),)
135-
@test y y_fd rtol=rtol atol=atol
135+
@assert y y_fd rtol=rtol atol=atol
136136
check_equal_leaves(g, g_fd; rtol, atol)
137137
end
138138

139139
if test_gpu
140140
# Zygote gradient with respect to f on GPU.
141141
y_gpu, g_gpu = Zygote.withgradient(f -> loss(f,graph_gpu, xs_gpu...), f_gpu)
142-
# @test get_device(g_gpu) == get_device(xs_gpu)
143-
@test y_gpu y rtol=rtol atol=atol
142+
# @assert get_device(g_gpu) == get_device(xs_gpu)
143+
@assert y_gpu y rtol=rtol atol=atol
144144
check_equal_leaves(g_gpu |> cpu_dev, g; rtol, atol)
145145
end
146146
end
147+
@test true # if we reach here, the test passed
147148
return true
148149
end
149150

0 commit comments

Comments
 (0)