Skip to content

Commit 0390aee

Browse files
fix gpu tests
1 parent a371365 commit 0390aee

File tree

3 files changed

+36
-24
lines changed

3 files changed

+36
-24
lines changed

GraphNeuralNetworks/test/examples/node_classification_cora.jl

Lines changed: 20 additions & 18 deletions
Original file line numberDiff line numberDiff line change
@@ -1,14 +1,11 @@
1-
@testitem "Training Example" setup=[TestModule] begin
2-
using .TestModule
1+
@testmodule TrainingExampleModule begin
32
using Flux
43
using Flux: onecold, onehotbatch
54
using Flux.Losses: logitcrossentropy
65
using GraphNeuralNetworks
76
using MLDatasets: Cora
87
using Statistics, Random
9-
using CUDA
10-
CUDA.allowscalar(false)
11-
8+
129
function eval_loss_accuracy(X, y, ids, model, g)
1310
= model(g, X)
1411
l = logitcrossentropy(ŷ[:, ids], y[:, ids])
@@ -21,27 +18,27 @@
2118
η = 5.0f-3 # learning rate
2219
epochs = 10 # number of epochs
2320
seed = 17 # set seed > 0 for reproducibility
24-
usecuda = false # if true use cuda (if available)
21+
use_gpu = false # if true use gpu (if available)
2522
nhidden = 64 # dimension of hidden features
2623
end
2724

2825
function train(Layer; verbose = false, kws...)
2926
args = Args(; kws...)
3027
args.seed > 0 && Random.seed!(args.seed)
3128

32-
if args.usecuda && CUDA.functional()
33-
device = Flux.gpu
34-
args.seed > 0 && CUDA.seed!(args.seed)
29+
if args.use_gpu
30+
device = gpu_device(force=true)
31+
Random.seed!(default_device_rng(device))
3532
else
36-
device = Flux.cpu
33+
device = cpu_device()
3734
end
3835

3936
# LOAD DATA
4037
dataset = Cora()
4138
classes = dataset.metadata["classes"]
4239
g = mldataset2gnngraph(dataset) |> device
4340
X = g.ndata.features
44-
y = onehotbatch(g.ndata.targets |> cpu, classes) |> device # remove when https://github.com/FluxML/Flux.jl/pull/1959 tagged
41+
y = onehotbatch(g.ndata.targets, classes)
4542
train_mask = g.ndata.train_mask
4643
test_mask = g.ndata.test_mask
4744
ytrain = y[:, train_mask]
@@ -78,7 +75,7 @@
7875
return train_res, test_res
7976
end
8077

81-
function train_many(; usecuda = false)
78+
function train_many(; use_gpu = false)
8279
for (layer, Layer) in [
8380
("GCNConv", (nin, nout) -> GCNConv(nin => nout, relu)),
8481
("ResGatedGraphConv", (nin, nout) -> ResGatedGraphConv(nin => nout, relu)),
@@ -96,16 +93,21 @@
9693
## ("EdgeConv",(nin, nout) -> EdgeConv(Dense(2nin, nout, relu))), # Fits the training set but does not generalize well
9794
]
9895
@show layer
99-
@time train_res, test_res = train(Layer; usecuda, verbose = false)
96+
@time train_res, test_res = train(Layer; use_gpu, verbose = false)
10097
# @show train_res, test_res
10198
@test train_res.acc > 94
10299
@test test_res.acc > 69
103100
end
104101
end
102+
end # module
103+
104+
@testitem "training example" setup=[TrainingExampleModule] begin
105+
using .TrainingExampleModule
106+
train_many()
107+
end
105108

106-
train_many(usecuda = false)
107-
# #TODO
108-
# if TEST_GPU
109-
# train_many(usecuda = true)
110-
# end
109+
@testitem "training example GPU" setup=[TrainingExampleModule] begin
110+
using .TrainingExampleModule
111+
train_many(use_gpu = true)
111112
end
113+

GraphNeuralNetworks/test/layers/conv.jl

Lines changed: 14 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -252,6 +252,16 @@ end
252252
end
253253
end
254254

255+
@testitem "EdgeConv GPU" setup=[TolSnippet, TestModule] tags=[:gpu] begin
256+
using .TestModule
257+
l = EdgeConv(Dense(2 * D_IN, D_OUT), aggr = +)
258+
for g in TEST_GRAPHS
259+
g.graph isa AbstractSparseMatrix && continue
260+
@test size(l(g, g.x)) == (D_OUT, g.num_nodes)
261+
test_gradients(l, g, g.x, rtol = RTOL_HIGH, test_gpu = true, compare_finite_diff = false)
262+
end
263+
end
264+
255265
@testitem "GINConv" setup=[TolSnippet, TestModule] begin
256266
using .TestModule
257267
nn = Dense(D_IN, D_OUT)
@@ -289,17 +299,17 @@ end
289299
end
290300
end
291301

292-
@testitem "NNConv" setup=[TolSnippet, TestModule] begin
302+
@testitem "NNConv GPU" setup=[TolSnippet, TestModule] tags=[:gpu] begin
293303
using .TestModule
294304
edim = 10
295305
nn = Dense(edim, D_OUT * D_IN)
296-
297306
l = NNConv(D_IN => D_OUT, nn, tanh, bias = true, aggr = +)
298307
for g in TEST_GRAPHS
308+
g.graph isa AbstractSparseMatrix && continue
299309
g = GNNGraph(g, edata = rand(Float32, edim, g.num_edges))
300310
@test size(l(g, g.x, g.e)) == (D_OUT, g.num_nodes)
301-
test_gradients(l, g, g.x, g.e, rtol = RTOL_HIGH)
302-
end
311+
test_gradients(l, g, g.x, g.e, rtol = RTOL_HIGH, test_gpu = true, compare_finite_diff = false)
312+
end
303313
end
304314

305315
@testitem "SAGEConv" setup=[TolSnippet, TestModule] begin

GraphNeuralNetworks/test/test_module.jl

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -11,7 +11,7 @@ using Pkg
1111

1212
## Uncomment below to change the default test settings
1313
# ENV["GNN_TEST_CPU"] = "false"
14-
# ENV["GNN_TEST_CUDA"] = "true"
14+
ENV["GNN_TEST_CUDA"] = "true"
1515
# ENV["GNN_TEST_AMDGPU"] = "true"
1616
# ENV["GNN_TEST_Metal"] = "true"
1717

@@ -24,7 +24,7 @@ if get(ENV, "GNN_TEST_AMDGPU", "false") == "true"
2424
Pkg.add("AMDGPU")
2525
using AMDGPU
2626
end
27-
if get(ENV, "GNN_TEST_Metal", "true") == "true"
27+
if get(ENV, "GNN_TEST_Metal", "false") == "true"
2828
Pkg.add("Metal")
2929
using Metal
3030
end

0 commit comments

Comments
 (0)