Skip to content

Commit eed49f6

Browse files
mote to testitems.jl
1 parent 59dcb86 commit eed49f6

File tree

5 files changed

+91
-94
lines changed

5 files changed

+91
-94
lines changed

GraphNeuralNetworks/Project.toml

Lines changed: 5 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -21,24 +21,22 @@ Statistics = "10745b16-79ce-11e8-11f9-7d13ad32a3b2"
2121
[weakdeps]
2222
CUDA = "052768ef-5323-5732-b1bb-66c8b64840ba"
2323

24-
# [extensions]
25-
# GraphNeuralNetworksCUDAExt = "CUDA"
26-
2724
[compat]
2825
CUDA = "4, 5"
2926
ChainRulesCore = "1"
3027
Flux = "0.14"
3128
Functors = "0.4.1"
32-
Graphs = "1.12"
3329
GNNGraphs = "1.0"
3430
GNNlib = "0.2"
31+
Graphs = "1.12"
3532
LinearAlgebra = "1"
3633
MLUtils = "0.4"
3734
MacroTools = "0.5"
3835
NNlib = "0.9"
3936
Random = "1"
4037
Reexport = "1"
4138
Statistics = "1"
39+
TestItemRunner = "1.0.5"
4240
cuDNN = "1"
4341
julia = "1.10"
4442

@@ -53,8 +51,10 @@ InlineStrings = "842dd82b-1e85-43dc-bf29-5d0ee9dffc48"
5351
MLDatasets = "eb30cadb-4394-5ae3-aed4-317e484a6458"
5452
SparseArrays = "2f01184e-e22b-5df5-ae63-d93ebab69eaf"
5553
Test = "8dfed614-e22c-5e08-85e1-65c5234f0b40"
54+
TestItemRunner = "f8b46487-2199-4994-9208-9a1283c18c0a"
5655
Zygote = "e88e6eb3-aa80-5325-afca-941959d7151f"
5756
cuDNN = "02a925ec-e4fe-4b08-9a7e-0d78e3d38ccd"
5857

5958
[targets]
60-
test = ["Test", "MLDatasets", "Adapt", "DataFrames", "InlineStrings", "SparseArrays", "Graphs", "Zygote", "FiniteDifferences", "ChainRulesTestUtils", "CUDA", "cuDNN"]
59+
test = ["Test", "TestItemRunner", "MLDatasets", "Adapt", "DataFrames", "InlineStrings",
60+
"SparseArrays", "Graphs", "Zygote", "FiniteDifferences", "ChainRulesTestUtils", "CUDA", "cuDNN"]

GraphNeuralNetworks/test/layers/conv.jl

Lines changed: 63 additions & 38 deletions
Original file line numberDiff line numberDiff line change
@@ -1,8 +1,11 @@
1-
RTOL_LOW = 1e-2
2-
RTOL_HIGH = 1e-5
3-
ATOL_LOW = 1e-3
1+
@testsnippet TolSnippet begin
2+
RTOL_LOW = 1e-2
3+
RTOL_HIGH = 1e-5
4+
ATOL_LOW = 1e-3
5+
end
46

5-
@testset "GCNConv" begin
7+
@testitem "GCNConv" setup=[TolSnippet, TestModule] begin
8+
using .TestModule
69
l = GCNConv(D_IN => D_OUT)
710
for g in TEST_GRAPHS
811
test_layer(l, g, rtol = RTOL_HIGH, outsize = (D_OUT, g.num_nodes))
@@ -16,7 +19,7 @@ ATOL_LOW = 1e-3
1619
l = GCNConv(D_IN => D_OUT, add_self_loops = false)
1720
test_layer(l, TEST_GRAPHS[1], rtol = RTOL_HIGH, outsize = (D_OUT, TEST_GRAPHS[1].num_nodes))
1821

19-
@testset "edge weights & custom normalization" begin
22+
@testset "edge weights & custom normalization $GRAPH_T" for GRAPH_T in GRAPH_TYPES
2023
s = [2, 3, 1, 3, 1, 2]
2124
t = [1, 1, 2, 2, 3, 3]
2225
w = Float32[1, 2, 3, 4, 5, 6]
@@ -41,7 +44,7 @@ ATOL_LOW = 1e-3
4144
end
4245

4346
@testset "conv_weight" begin
44-
l = GraphNeuralNetworks.GCNConv(D_IN => D_OUT)
47+
l = GraphNeuralNetworks.GCNConv(D_IN => D_OUT)
4548
w = zeros(Float32, D_OUT, D_IN)
4649
g1 = GNNGraph(TEST_GRAPHS[1], ndata = ones(Float32, D_IN, 4))
4750
@test l(g1, g1.ndata.x, conv_weight = w) == zeros(Float32, D_OUT, 4)
@@ -51,16 +54,16 @@ ATOL_LOW = 1e-3
5154
end
5255
end
5356

54-
@testset "ChebConv" begin
57+
@testitem "ChebConv" setup=[TolSnippet, TestModule] begin
58+
using .TestModule
5559
k = 2
5660
l = ChebConv(D_IN => D_OUT, k)
5761
@test size(l.weight) == (D_OUT, D_IN, k)
5862
@test size(l.bias) == (D_OUT,)
5963
@test l.k == k
6064
for g in TEST_GRAPHS
6165
g = add_self_loops(g)
62-
test_layer(l, g, rtol = RTOL_HIGH, test_gpu = TEST_GPU,
63-
outsize = (D_OUT, g.num_nodes))
66+
test_layer(l, g, rtol = RTOL_HIGH, outsize = (D_OUT, g.num_nodes))
6467
end
6568

6669
@testset "bias=false" begin
@@ -69,7 +72,8 @@ end
6972
end
7073
end
7174

72-
@testset "GraphConv" begin
75+
@testitem "GraphConv" setup=[TolSnippet, TestModule] begin
76+
using .TestModule
7377
l = GraphConv(D_IN => D_OUT)
7478
for g in TEST_GRAPHS
7579
test_layer(l, g, rtol = RTOL_HIGH, outsize = (D_OUT, g.num_nodes))
@@ -86,7 +90,8 @@ end
8690
end
8791
end
8892

89-
@testset "GATConv" begin
93+
@testitem "GATConv" setup=[TolSnippet, TestModule] begin
94+
using .TestModule
9095
for heads in (1, 2), concat in (true, false)
9196
l = GATConv(D_IN => D_OUT; heads, concat, dropout=0)
9297
for g in TEST_GRAPHS
@@ -116,7 +121,8 @@ end
116121
end
117122
end
118123

119-
@testset "GATv2Conv" begin
124+
@testitem "GATv2Conv" setup=[TolSnippet, TestModule] begin
125+
using .TestModule
120126
for heads in (1, 2), concat in (true, false)
121127
l = GATv2Conv(D_IN => D_OUT, tanh; heads, concat, dropout=0)
122128
for g in TEST_GRAPHS
@@ -146,7 +152,8 @@ end
146152
end
147153
end
148154

149-
@testset "GatedGraphConv" begin
155+
@testitem "GatedGraphConv" setup=[TolSnippet, TestModule] begin
156+
using .TestModule
150157
num_layers = 3
151158
l = GatedGraphConv(D_OUT, num_layers)
152159
@test size(l.weight) == (D_OUT, D_OUT, num_layers)
@@ -156,14 +163,16 @@ end
156163
end
157164
end
158165

159-
@testset "EdgeConv" begin
166+
@testitem "EdgeConv" setup=[TolSnippet, TestModule] begin
167+
using .TestModule
160168
l = EdgeConv(Dense(2 * D_IN, D_OUT), aggr = +)
161169
for g in TEST_GRAPHS
162170
test_layer(l, g, rtol = RTOL_HIGH, outsize = (D_OUT, g.num_nodes))
163171
end
164172
end
165173

166-
@testset "GINConv" begin
174+
@testitem "GINConv" setup=[TolSnippet, TestModule] begin
175+
using .TestModule
167176
nn = Dense(D_IN, D_OUT)
168177

169178
l = GINConv(nn, 0.01f0, aggr = mean)
@@ -174,7 +183,8 @@ end
174183
@test !in(:eps, Flux.trainable(l))
175184
end
176185

177-
@testset "NNConv" begin
186+
@testitem "NNConv" setup=[TolSnippet, TestModule] begin
187+
using .TestModule
178188
edim = 10
179189
nn = Dense(edim, D_OUT * D_IN)
180190

@@ -185,7 +195,8 @@ end
185195
end
186196
end
187197

188-
@testset "SAGEConv" begin
198+
@testitem "SAGEConv" setup=[TolSnippet, TestModule] begin
199+
using .TestModule
189200
l = SAGEConv(D_IN => D_OUT)
190201
@test l.aggr == mean
191202

@@ -195,14 +206,17 @@ end
195206
end
196207
end
197208

198-
@testset "ResGatedGraphConv" begin
209+
@testitem "ResGatedGraphConv" setup=[TolSnippet, TestModule] begin
210+
using .TestModule
199211
l = ResGatedGraphConv(D_IN => D_OUT, tanh, bias = true)
200212
for g in TEST_GRAPHS
201213
test_layer(l, g, rtol = RTOL_HIGH, outsize = (D_OUT, g.num_nodes))
202214
end
203215
end
204216

205-
@testset "CGConv" begin
217+
@testitem "CGConv" setup=[TolSnippet, TestModule] begin
218+
using .TestModule
219+
206220
edim = 10
207221
l = CGConv((D_IN, edim) => D_OUT, tanh, residual = false, bias = true)
208222
for g in TEST_GRAPHS
@@ -217,7 +231,8 @@ end
217231
@test l1(g1, g1.ndata.x, nothing) == l1(g1).ndata.x
218232
end
219233

220-
@testset "AGNNConv" begin
234+
@testitem "AGNNConv" setup=[TolSnippet, TestModule] begin
235+
using .TestModule
221236
l = AGNNConv(trainable=false, add_self_loops=false)
222237
@test l.β == [1.0f0]
223238
@test l.add_self_loops == false
@@ -234,7 +249,8 @@ end
234249
end
235250
end
236251

237-
@testset "MEGNetConv" begin
252+
@testitem "MEGNetConv" setup=[TolSnippet, TestModule] begin
253+
using .TestModule
238254
l = MEGNetConv(D_IN => D_OUT, aggr = +)
239255
for g in TEST_GRAPHS
240256
g = GNNGraph(g, edata = rand(Float32, D_IN, g.num_edges))
@@ -244,7 +260,8 @@ end
244260
end
245261
end
246262

247-
@testset "GMMConv" begin
263+
@testitem "GMMConv" setup=[TolSnippet, TestModule] begin
264+
using .TestModule
248265
ein_channel = 10
249266
K = 5
250267
l = GMMConv((D_IN, ein_channel) => D_OUT, K = K)
@@ -254,7 +271,8 @@ end
254271
end
255272
end
256273

257-
@testset "SGConv" begin
274+
@testitem "SGConv" setup=[TolSnippet, TestModule] begin
275+
using .TestModule
258276
K = [1, 2, 3] # for different number of hops
259277
for k in K
260278
l = SGConv(D_IN => D_OUT, k, add_self_loops = true)
@@ -269,7 +287,8 @@ end
269287
end
270288
end
271289

272-
@testset "TAGConv" begin
290+
@testitem "TAGConv" setup=[TolSnippet, TestModule] begin
291+
using .TestModule
273292
K = [1, 2, 3]
274293
for k in K
275294
l = TAGConv(D_IN => D_OUT, k, add_self_loops = true)
@@ -284,20 +303,25 @@ end
284303
end
285304
end
286305

287-
@testset "EGNNConv" begin
288-
hin = 5
289-
hout = 5
290-
hidden = 5
291-
l = EGNNConv(hin => hout, hidden)
292-
g = rand_graph(10, 20, graph_type = GRAPH_T)
293-
x = rand(Float32, D_IN, g.num_nodes)
294-
h = randn(Float32, hin, g.num_nodes)
295-
hnew, xnew = l(g, h, x)
296-
@test size(hnew) == (hout, g.num_nodes)
297-
@test size(xnew) == (D_IN, g.num_nodes)
306+
@testitem "EGNNConv" setup=[TolSnippet, TestModule] begin
307+
using .TestModule
308+
#TODO test gradient
309+
@testset "EGNNConv $GRAPH_T" for GRAPH_T in GRAPH_TYPES
310+
hin = 5
311+
hout = 5
312+
hidden = 5
313+
l = EGNNConv(hin => hout, hidden)
314+
g = rand_graph(10, 20, graph_type = GRAPH_T)
315+
x = rand(Float32, D_IN, g.num_nodes)
316+
h = randn(Float32, hin, g.num_nodes)
317+
hnew, xnew = l(g, h, x)
318+
@test size(hnew) == (hout, g.num_nodes)
319+
@test size(xnew) == (D_IN, g.num_nodes)
320+
end
298321
end
299322

300-
@testset "TransformerConv" begin
323+
@testitem "TransformerConv" setup=[TolSnippet, TestModule] begin
324+
using .TestModule
301325
ein = 2
302326
heads = 3
303327
# used like in Kool et al., 2019
@@ -306,7 +330,7 @@ end
306330
batch_norm = false)
307331
# batch_norm=false here for tests to pass; true in paper
308332
for g in TEST_GRAPHS
309-
g = GNNGraph(g, ndata = rand(Float32, D_IN * heads, g.num_nodes), graph_type = GRAPH_T)
333+
g = GNNGraph(g, ndata = rand(Float32, D_IN * heads, g.num_nodes))
310334
test_layer(l, g, rtol = RTOL_LOW,
311335
exclude_grad_fields = [:negative_slope],
312336
outsize = (D_IN * heads, g.num_nodes))
@@ -331,7 +355,8 @@ end
331355
end
332356
end
333357

334-
@testset "DConv" begin
358+
@testitem "DConv" setup=[TolSnippet, TestModule] begin
359+
using .TestModule
335360
K = [1, 2, 3] # for different number of hops
336361
for k in K
337362
l = DConv(D_IN => D_OUT, k)

GraphNeuralNetworks/test/layers/heteroconv.jl

Lines changed: 2 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1,4 +1,5 @@
1-
@testset "HeteroGraphConv" begin
1+
@testitem "HeteroGraphConv" setup=[TestModule] begin
2+
using .TestModule
23
d, n = 3, 5
34
g = rand_bipartite_heterograph((n, 2*n), 15)
45
hg = rand_bipartite_heterograph((2,3), 6)
Lines changed: 2 additions & 49 deletions
Original file line numberDiff line numberDiff line change
@@ -1,50 +1,3 @@
1-
using CUDA
2-
using GraphNeuralNetworks
3-
using GNNGraphs: sort_edge_index
4-
using GNNGraphs: getn, getdata
5-
using Functors
6-
using Flux
7-
using Flux: gpu
8-
using LinearAlgebra, Statistics, Random
9-
using NNlib
10-
import MLUtils
11-
using SparseArrays
12-
using Graphs
13-
using Zygote
14-
using Test
15-
using MLDatasets
16-
using InlineStrings # not used but with the import we test #98 and #104
1+
using TestItemsRunner
172

18-
CUDA.allowscalar(false)
19-
20-
const ACUMatrix{T} = Union{CuMatrix{T}, CUDA.CUSPARSE.CuSparseMatrix{T}}
21-
22-
ENV["DATADEPS_ALWAYS_ACCEPT"] = true # for MLDatasets
23-
24-
include("test_utils.jl")
25-
26-
tests = [
27-
# "layers/basic",
28-
"layers/conv",
29-
# "layers/heteroconv",
30-
# "layers/temporalconv",
31-
# "layers/pool",
32-
# "examples/node_classification_cora",
33-
# "samplers"
34-
]
35-
36-
!CUDA.functional() && @warn("CUDA unavailable, not testing GPU support")
37-
38-
# @testset "GraphNeuralNetworks: graph format $graph_type" for graph_type in (:coo, :dense, :sparse)
39-
for graph_type in (:coo, :dense, :sparse)
40-
41-
@info "Testing graph format :$graph_type"
42-
global GRAPH_T = graph_type
43-
global TEST_GPU = CUDA.functional() && (GRAPH_T != :sparse)
44-
global TEST_GRAPHS = generate_test_graphs(GRAPH_T)
45-
46-
@testset "$t" for t in tests
47-
startswith(t, "examples") && GRAPH_T == :dense && continue # not testing :dense since causes OutOfMememory on github's CI
48-
include("$t.jl")
49-
end
50-
end
3+
@run_package_tests

GraphNeuralNetworks/test/test_utils.jl renamed to GraphNeuralNetworks/test/test_module.jl

Lines changed: 19 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1,6 +1,16 @@
1+
@testmodule TestModule begin
2+
3+
using GraphNeuralNetworks
4+
using Test
5+
using Statistics, Random
6+
using Flux, Functors
17
using ChainRulesTestUtils, FiniteDifferences, Zygote, Adapt, CUDA
28
CUDA.allowscalar(false)
39

10+
export Flux, gradient, Dense, Chain, relu # from other packages
11+
export mean, randn
12+
export D_IN, D_OUT, test_layer, ngradient, GRAPH_TYPES, TEST_GRAPHS
13+
414
const D_IN = 3
515
const D_OUT = 5
616

@@ -27,7 +37,7 @@ end
2737
function test_layer(l, g::GNNGraph; atol = 1e-5, rtol = 1e-5,
2838
exclude_grad_fields = [],
2939
verbose = false,
30-
test_gpu = TEST_GPU,
40+
test_gpu = false,
3141
outsize = nothing,
3242
outtype = :node)
3343

@@ -251,3 +261,11 @@ function generate_test_graphs(graph_type)
251261

252262
return (g1, g_single_vertex)
253263
end
264+
265+
GRAPH_TYPES = [:coo, :dense, :sparse]
266+
TEST_GRAPHS = [generate_test_graphs(:coo)...,
267+
generate_test_graphs(:dense)...,
268+
generate_test_graphs(:sparse)...]
269+
270+
end # testmodule
271+

0 commit comments

Comments
 (0)