Skip to content

Commit 530457c

Browse files
move GraphNeuralNetworks.jl to TestItems.jl (#517)
1 parent e1910ca commit 530457c

File tree

10 files changed

+568
-599
lines changed

10 files changed

+568
-599
lines changed

GraphNeuralNetworks/Project.toml

Lines changed: 5 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -21,24 +21,22 @@ Statistics = "10745b16-79ce-11e8-11f9-7d13ad32a3b2"
2121
[weakdeps]
2222
CUDA = "052768ef-5323-5732-b1bb-66c8b64840ba"
2323

24-
# [extensions]
25-
# GraphNeuralNetworksCUDAExt = "CUDA"
26-
2724
[compat]
2825
CUDA = "4, 5"
2926
ChainRulesCore = "1"
3027
Flux = "0.14"
3128
Functors = "0.4.1"
32-
Graphs = "1.12"
3329
GNNGraphs = "1.0"
3430
GNNlib = "0.2"
31+
Graphs = "1.12"
3532
LinearAlgebra = "1"
3633
MLUtils = "0.4"
3734
MacroTools = "0.5"
3835
NNlib = "0.9"
3936
Random = "1"
4037
Reexport = "1"
4138
Statistics = "1"
39+
TestItemRunner = "1.0.5"
4240
cuDNN = "1"
4341
julia = "1.10"
4442

@@ -53,8 +51,10 @@ InlineStrings = "842dd82b-1e85-43dc-bf29-5d0ee9dffc48"
5351
MLDatasets = "eb30cadb-4394-5ae3-aed4-317e484a6458"
5452
SparseArrays = "2f01184e-e22b-5df5-ae63-d93ebab69eaf"
5553
Test = "8dfed614-e22c-5e08-85e1-65c5234f0b40"
54+
TestItemRunner = "f8b46487-2199-4994-9208-9a1283c18c0a"
5655
Zygote = "e88e6eb3-aa80-5325-afca-941959d7151f"
5756
cuDNN = "02a925ec-e4fe-4b08-9a7e-0d78e3d38ccd"
5857

5958
[targets]
60-
test = ["Test", "MLDatasets", "Adapt", "DataFrames", "InlineStrings", "SparseArrays", "Graphs", "Zygote", "FiniteDifferences", "ChainRulesTestUtils", "CUDA", "cuDNN"]
59+
test = ["Test", "TestItemRunner", "MLDatasets", "Adapt", "DataFrames", "InlineStrings",
60+
"SparseArrays", "Graphs", "Zygote", "FiniteDifferences", "ChainRulesTestUtils", "CUDA", "cuDNN"]
Lines changed: 95 additions & 91 deletions
Original file line numberDiff line numberDiff line change
@@ -1,107 +1,111 @@
1-
using Flux
2-
using Flux: onecold, onehotbatch
3-
using Flux.Losses: logitcrossentropy
4-
using GraphNeuralNetworks
5-
using MLDatasets: Cora
6-
using Statistics, Random
7-
using CUDA
8-
CUDA.allowscalar(false)
1+
@testitem "Training Example" setup=[TestModule] begin
2+
using .TestModule
3+
using Flux
4+
using Flux: onecold, onehotbatch
5+
using Flux.Losses: logitcrossentropy
6+
using GraphNeuralNetworks
7+
using MLDatasets: Cora
8+
using Statistics, Random
9+
using CUDA
10+
CUDA.allowscalar(false)
911

10-
function eval_loss_accuracy(X, y, ids, model, g)
11-
= model(g, X)
12-
l = logitcrossentropy(ŷ[:, ids], y[:, ids])
13-
acc = mean(onecold(ŷ[:, ids]) .== onecold(y[:, ids]))
14-
return (loss = round(l, digits = 4), acc = round(acc * 100, digits = 2))
15-
end
12+
function eval_loss_accuracy(X, y, ids, model, g)
13+
= model(g, X)
14+
l = logitcrossentropy(ŷ[:, ids], y[:, ids])
15+
acc = mean(onecold(ŷ[:, ids]) .== onecold(y[:, ids]))
16+
return (loss = round(l, digits = 4), acc = round(acc * 100, digits = 2))
17+
end
1618

17-
# arguments for the `train` function
18-
Base.@kwdef mutable struct Args
19-
η = 5.0f-3 # learning rate
20-
epochs = 10 # number of epochs
21-
seed = 17 # set seed > 0 for reproducibility
22-
usecuda = false # if true use cuda (if available)
23-
nhidden = 64 # dimension of hidden features
24-
end
19+
# arguments for the `train` function
20+
Base.@kwdef mutable struct Args
21+
η = 5.0f-3 # learning rate
22+
epochs = 10 # number of epochs
23+
seed = 17 # set seed > 0 for reproducibility
24+
usecuda = false # if true use cuda (if available)
25+
nhidden = 64 # dimension of hidden features
26+
end
2527

26-
function train(Layer; verbose = false, kws...)
27-
args = Args(; kws...)
28-
args.seed > 0 && Random.seed!(args.seed)
28+
function train(Layer; verbose = false, kws...)
29+
args = Args(; kws...)
30+
args.seed > 0 && Random.seed!(args.seed)
2931

30-
if args.usecuda && CUDA.functional()
31-
device = Flux.gpu
32-
args.seed > 0 && CUDA.seed!(args.seed)
33-
else
34-
device = Flux.cpu
35-
end
32+
if args.usecuda && CUDA.functional()
33+
device = Flux.gpu
34+
args.seed > 0 && CUDA.seed!(args.seed)
35+
else
36+
device = Flux.cpu
37+
end
3638

37-
# LOAD DATA
38-
dataset = Cora()
39-
classes = dataset.metadata["classes"]
40-
g = mldataset2gnngraph(dataset) |> device
41-
X = g.ndata.features
42-
y = onehotbatch(g.ndata.targets |> cpu, classes) |> device # remove when https://github.com/FluxML/Flux.jl/pull/1959 tagged
43-
train_mask = g.ndata.train_mask
44-
test_mask = g.ndata.test_mask
45-
ytrain = y[:, train_mask]
39+
# LOAD DATA
40+
dataset = Cora()
41+
classes = dataset.metadata["classes"]
42+
g = mldataset2gnngraph(dataset) |> device
43+
X = g.ndata.features
44+
y = onehotbatch(g.ndata.targets |> cpu, classes) |> device # remove when https://github.com/FluxML/Flux.jl/pull/1959 tagged
45+
train_mask = g.ndata.train_mask
46+
test_mask = g.ndata.test_mask
47+
ytrain = y[:, train_mask]
4648

47-
nin, nhidden, nout = size(X, 1), args.nhidden, length(classes)
49+
nin, nhidden, nout = size(X, 1), args.nhidden, length(classes)
4850

49-
## DEFINE MODEL
50-
model = GNNChain(Layer(nin, nhidden),
51-
# Dropout(0.5),
52-
Layer(nhidden, nhidden),
53-
Dense(nhidden, nout)) |> device
51+
## DEFINE MODEL
52+
model = GNNChain(Layer(nin, nhidden),
53+
# Dropout(0.5),
54+
Layer(nhidden, nhidden),
55+
Dense(nhidden, nout)) |> device
5456

55-
opt = Flux.setup(Adam(args.η), model)
57+
opt = Flux.setup(Adam(args.η), model)
5658

57-
## TRAINING
58-
function report(epoch)
59-
train = eval_loss_accuracy(X, y, train_mask, model, g)
60-
test = eval_loss_accuracy(X, y, test_mask, model, g)
61-
println("Epoch: $epoch Train: $(train) Test: $(test)")
62-
end
59+
## TRAINING
60+
function report(epoch)
61+
train = eval_loss_accuracy(X, y, train_mask, model, g)
62+
test = eval_loss_accuracy(X, y, test_mask, model, g)
63+
println("Epoch: $epoch Train: $(train) Test: $(test)")
64+
end
6365

64-
verbose && report(0)
65-
@time for epoch in 1:(args.epochs)
66-
grad = Flux.gradient(model) do model
67-
= model(g, X)
68-
logitcrossentropy(ŷ[:, train_mask], ytrain)
66+
verbose && report(0)
67+
@time for epoch in 1:(args.epochs)
68+
grad = Flux.gradient(model) do model
69+
= model(g, X)
70+
logitcrossentropy(ŷ[:, train_mask], ytrain)
71+
end
72+
Flux.update!(opt, model, grad[1])
73+
verbose && report(epoch)
6974
end
70-
Flux.update!(opt, model, grad[1])
71-
verbose && report(epoch)
72-
end
7375

74-
train_res = eval_loss_accuracy(X, y, train_mask, model, g)
75-
test_res = eval_loss_accuracy(X, y, test_mask, model, g)
76-
return train_res, test_res
77-
end
76+
train_res = eval_loss_accuracy(X, y, train_mask, model, g)
77+
test_res = eval_loss_accuracy(X, y, test_mask, model, g)
78+
return train_res, test_res
79+
end
7880

79-
function train_many(; usecuda = false)
80-
for (layer, Layer) in [
81-
("GCNConv", (nin, nout) -> GCNConv(nin => nout, relu)),
82-
("ResGatedGraphConv", (nin, nout) -> ResGatedGraphConv(nin => nout, relu)),
83-
("GraphConv", (nin, nout) -> GraphConv(nin => nout, relu, aggr = mean)),
84-
("SAGEConv", (nin, nout) -> SAGEConv(nin => nout, relu)),
85-
("GATConv", (nin, nout) -> GATConv(nin => nout, relu)),
86-
("GINConv", (nin, nout) -> GINConv(Dense(nin, nout, relu), 0.01, aggr = mean)),
87-
("TransformerConv",
88-
(nin, nout) -> TransformerConv(nin => nout, concat = false,
89-
add_self_loops = true, root_weight = false,
90-
heads = 2)),
91-
## ("ChebConv", (nin, nout) -> ChebConv(nin => nout, 2)), # not working on gpu
92-
## ("NNConv", (nin, nout) -> NNConv(nin => nout)), # needs edge features
93-
## ("GatedGraphConv", (nin, nout) -> GatedGraphConv(nout, 2)), # needs nin = nout
94-
## ("EdgeConv",(nin, nout) -> EdgeConv(Dense(2nin, nout, relu))), # Fits the training set but does not generalize well
95-
]
96-
@show layer
97-
@time train_res, test_res = train(Layer; usecuda, verbose = false)
98-
# @show train_res, test_res
99-
@test train_res.acc > 94
100-
@test test_res.acc > 69
81+
function train_many(; usecuda = false)
82+
for (layer, Layer) in [
83+
("GCNConv", (nin, nout) -> GCNConv(nin => nout, relu)),
84+
("ResGatedGraphConv", (nin, nout) -> ResGatedGraphConv(nin => nout, relu)),
85+
("GraphConv", (nin, nout) -> GraphConv(nin => nout, relu, aggr = mean)),
86+
("SAGEConv", (nin, nout) -> SAGEConv(nin => nout, relu)),
87+
("GATConv", (nin, nout) -> GATConv(nin => nout, relu)),
88+
("GINConv", (nin, nout) -> GINConv(Dense(nin, nout, relu), 0.01, aggr = mean)),
89+
("TransformerConv",
90+
(nin, nout) -> TransformerConv(nin => nout, concat = false,
91+
add_self_loops = true, root_weight = false,
92+
heads = 2)),
93+
## ("ChebConv", (nin, nout) -> ChebConv(nin => nout, 2)), # not working on gpu
94+
## ("NNConv", (nin, nout) -> NNConv(nin => nout)), # needs edge features
95+
## ("GatedGraphConv", (nin, nout) -> GatedGraphConv(nout, 2)), # needs nin = nout
96+
## ("EdgeConv",(nin, nout) -> EdgeConv(Dense(2nin, nout, relu))), # Fits the training set but does not generalize well
97+
]
98+
@show layer
99+
@time train_res, test_res = train(Layer; usecuda, verbose = false)
100+
# @show train_res, test_res
101+
@test train_res.acc > 94
102+
@test test_res.acc > 69
103+
end
101104
end
102-
end
103105

104-
train_many(usecuda = false)
105-
if TEST_GPU
106-
train_many(usecuda = true)
106+
train_many(usecuda = false)
107+
# #TODO
108+
# if TEST_GPU
109+
# train_many(usecuda = true)
110+
# end
107111
end

GraphNeuralNetworks/test/layers/basic.jl

Lines changed: 54 additions & 49 deletions
Original file line numberDiff line numberDiff line change
@@ -1,57 +1,60 @@
1-
@testset "GNNChain" begin
2-
n, din, d, dout = 10, 3, 4, 2
3-
deg = 4
4-
5-
g = GNNGraph(random_regular_graph(n, deg),
6-
graph_type = GRAPH_T,
7-
ndata = randn(Float32, din, n))
8-
x = g.ndata.x
9-
10-
gnn = GNNChain(GCNConv(din => d),
11-
LayerNorm(d),
12-
x -> tanh.(x),
13-
GraphConv(d => d, tanh),
14-
Dropout(0.5),
15-
Dense(d, dout))
16-
17-
testmode!(gnn)
18-
19-
test_layer(gnn, g, rtol = 1e-5, exclude_grad_fields = [, :σ²])
20-
21-
@testset "constructor with names" begin
22-
m = GNNChain(GCNConv(din => d),
1+
@testitem "GNNChain" setup=[TestModule] begin
2+
using .TestModule
3+
@testset "GNNChain $GRAPH_T" for GRAPH_T in GRAPH_TYPES
4+
n, din, d, dout = 10, 3, 4, 2
5+
deg = 4
6+
7+
g = GNNGraph(random_regular_graph(n, deg),
8+
graph_type = GRAPH_T,
9+
ndata = randn(Float32, din, n))
10+
x = g.ndata.x
11+
12+
gnn = GNNChain(GCNConv(din => d),
2313
LayerNorm(d),
2414
x -> tanh.(x),
15+
GraphConv(d => d, tanh),
16+
Dropout(0.5),
2517
Dense(d, dout))
2618

27-
m2 = GNNChain(enc = m,
28-
dec = DotDecoder())
19+
Flux.testmode!(gnn)
2920

30-
@test m2[:enc] === m
31-
@test m2(g, x) == m2[:dec](g, m2[:enc](g, x))
32-
end
21+
test_layer(gnn, g, rtol = 1e-5, exclude_grad_fields = [, :σ²])
3322

34-
@testset "constructor with vector" begin
35-
m = GNNChain(GCNConv(din => d),
36-
LayerNorm(d),
37-
x -> tanh.(x),
38-
Dense(d, dout))
39-
m2 = GNNChain([m.layers...])
40-
@test m2(g, x) == m(g, x)
41-
end
23+
@testset "constructor with names" begin
24+
m = GNNChain(GCNConv(din => d),
25+
LayerNorm(d),
26+
x -> tanh.(x),
27+
Dense(d, dout))
4228

43-
@testset "Parallel" begin
44-
AddResidual(l) = Parallel(+, identity, l)
29+
m2 = GNNChain(enc = m,
30+
dec = DotDecoder())
4531

46-
gnn = GNNChain(GraphConv(din => d, tanh),
47-
LayerNorm(d),
48-
AddResidual(GraphConv(d => d, tanh)),
49-
BatchNorm(d),
50-
Dense(d, dout))
32+
@test m2[:enc] === m
33+
@test m2(g, x) == m2[:dec](g, m2[:enc](g, x))
34+
end
35+
36+
@testset "constructor with vector" begin
37+
m = GNNChain(GCNConv(din => d),
38+
LayerNorm(d),
39+
x -> tanh.(x),
40+
Dense(d, dout))
41+
m2 = GNNChain([m.layers...])
42+
@test m2(g, x) == m(g, x)
43+
end
44+
45+
@testset "Parallel" begin
46+
AddResidual(l) = Parallel(+, identity, l)
47+
48+
gnn = GNNChain(GraphConv(din => d, tanh),
49+
LayerNorm(d),
50+
AddResidual(GraphConv(d => d, tanh)),
51+
BatchNorm(d),
52+
Dense(d, dout))
5153

52-
trainmode!(gnn)
54+
Flux.trainmode!(gnn)
5355

54-
test_layer(gnn, g, rtol = 1e-4, atol=1e-4, exclude_grad_fields = [, :σ²])
56+
test_layer(gnn, g, rtol = 1e-4, atol=1e-4, exclude_grad_fields = [, :σ²])
57+
end
5558
end
5659

5760
@testset "Only graph input" begin
@@ -67,27 +70,29 @@
6770
end
6871
end
6972

70-
@testset "WithGraph" begin
73+
@testitem "WithGraph" setup=[TestModule] begin
74+
using .TestModule
7175
x = rand(Float32, 2, 3)
7276
g = GNNGraph([1, 2, 3], [2, 3, 1], ndata = x)
7377
model = SAGEConv(2 => 3)
7478
wg = WithGraph(model, g)
7579
# No need to feed the graph to `wg`
7680
@test wg(x) == model(g, x)
77-
@test Flux.params(wg) == Flux.params(model)
81+
@test Flux.trainables(wg) == Flux.trainables(model)
7882
g2 = GNNGraph([1, 1, 2, 3], [2, 4, 1, 1])
7983
x2 = rand(Float32, 2, 4)
8084
# WithGraph will ignore the internal graph if fed with a new one.
8185
@test wg(g2, x2) == model(g2, x2)
8286

8387
wg = WithGraph(model, g, traingraph = false)
84-
@test length(Flux.params(wg)) == length(Flux.params(model))
88+
@test length(Flux.trainables(wg)) == length(Flux.trainables(model))
8589

8690
wg = WithGraph(model, g, traingraph = true)
87-
@test length(Flux.params(wg)) == length(Flux.params(model)) + length(Flux.params(g))
91+
@test length(Flux.trainables(wg)) == length(Flux.trainables(model)) + length(Flux.trainables(g))
8892
end
8993

90-
@testset "Flux restructure" begin
94+
@testitem "Flux.restructure" setup=[TestModule] begin
95+
using .TestModule
9196
chain = GNNChain(GraphConv(2 => 2))
9297
params, restructure = Flux.destructure(chain)
9398
@test restructure(params) isa GNNChain

0 commit comments

Comments
 (0)