Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 1 addition & 1 deletion .github/workflows/test_GraphNeuralNetworks.yml
Original file line number Diff line number Diff line change
Expand Up @@ -15,7 +15,7 @@ jobs:
matrix:
version:
- '1.10' # Replace this with the minimum Julia version that your package supports.
# - '1' # '1' will automatically expand to the latest stable 1.x release of Julia.
- '1' # '1' will automatically expand to the latest stable 1.x release of Julia.
# - 'pre'
os:
- ubuntu-latest
Expand Down
221 changes: 0 additions & 221 deletions GNNGraphs/test/test_utils.jl
Original file line number Diff line number Diff line change
Expand Up @@ -5,224 +5,3 @@ function ngradient(f, x...)
fdm = central_fdm(5, 1)
return FiniteDifferences.grad(fdm, f, x...)
end

const rule_config = Zygote.ZygoteRuleConfig()

# Using this until https://github.com/JuliaDiff/FiniteDifferences.jl/issues/188 is fixed
function FiniteDifferences.to_vec(x::Integer)
Integer_from_vec(v) = x
return Int[x], Integer_from_vec
end

# Test that forward pass on cpu and gpu are the same.
# Tests also gradient on cpu and gpu comparing with
# finite difference methods.
# Test gradients with respects to layer weights and to input.
# If `g` has edge features, it is assumed that the layer can
# use them in the forward pass as `l(g, x, e)`.
# Test also gradient with respect to `e`.
function test_layer(l, g::GNNGraph; atol = 1e-5, rtol = 1e-5,
exclude_grad_fields = [],
verbose = false,
test_gpu = TEST_GPU,
outsize = nothing,
outtype = :node)

# TODO these give errors, probably some bugs in ChainRulesTestUtils
# test_rrule(rule_config, x -> l(g, x), x; rrule_f=rrule_via_ad, check_inferred=false)
# test_rrule(rule_config, l -> l(g, x), l; rrule_f=rrule_via_ad, check_inferred=false)

isnothing(node_features(g)) && error("Plese add node data to the input graph")
fdm = central_fdm(5, 1)

x = node_features(g)
e = edge_features(g)
use_edge_feat = !isnothing(e)

x64, e64, l64, g64 = to64.([x, e, l, g]) # needed for accurate FiniteDifferences' grad
xgpu, egpu, lgpu, ggpu = gpu.([x, e, l, g])

f(l, g::GNNGraph) = l(g)
f(l, g::GNNGraph, x, e) = use_edge_feat ? l(g, x, e) : l(g, x)

loss(l, g::GNNGraph) =
if outtype == :node
sum(node_features(f(l, g)))
elseif outtype == :edge
sum(edge_features(f(l, g)))
elseif outtype == :graph
sum(graph_features(f(l, g)))
elseif outtype == :node_edge
gnew = f(l, g)
sum(node_features(gnew)) + sum(edge_features(gnew))
end

function loss(l, g::GNNGraph, x, e)
y = f(l, g, x, e)
if outtype == :node_edge
return sum(y[1]) + sum(y[2])
else
return sum(y)
end
end

# TEST OUTPUT
y = f(l, g, x, e)
if outtype == :node_edge
@assert y isa Tuple
@test eltype(y[1]) == eltype(x)
@test eltype(y[2]) == eltype(e)
@test all(isfinite, y[1])
@test all(isfinite, y[2])
if !isnothing(outsize)
@test size(y[1]) == outsize[1]
@test size(y[2]) == outsize[2]
end
else
@test eltype(y) == eltype(x)
@test all(isfinite, y)
if !isnothing(outsize)
@test size(y) == outsize
end
end

# test same output on different graph formats
gcoo = GNNGraph(g, graph_type = :coo)
ycoo = f(l, gcoo, x, e)
if outtype == :node_edge
@test ycoo[1] ≈ y[1]
@test ycoo[2] ≈ y[2]
else
@test ycoo ≈ y
end

g′ = f(l, g)
if outtype == :node
@test g′.ndata.x ≈ y
elseif outtype == :edge
@test g′.edata.e ≈ y
elseif outtype == :graph
@test g′.gdata.u ≈ y
elseif outtype == :node_edge
@test g′.ndata.x ≈ y[1]
@test g′.edata.e ≈ y[2]
else
@error "wrong outtype $outtype"
end
if test_gpu
ygpu = f(lgpu, ggpu, xgpu, egpu)
if outtype == :node_edge
@test ygpu[1] isa CuArray
@test eltype(ygpu[1]) == eltype(xgpu)
@test Array(ygpu[1]) ≈ y[1]
@test ygpu[2] isa CuArray
@test eltype(ygpu[2]) == eltype(xgpu)
@test Array(ygpu[2]) ≈ y[2]
else
@test ygpu isa CuArray
@test eltype(ygpu) == eltype(xgpu)
@test Array(ygpu) ≈ y
end
end

# TEST x INPUT GRADIENT
x̄ = gradient(x -> loss(l, g, x, e), x)[1]
x̄_fd = FiniteDifferences.grad(fdm, x64 -> loss(l64, g64, x64, e64), x64)[1]
@test eltype(x̄) == eltype(x)
@test x̄≈x̄_fd atol=atol rtol=rtol

if test_gpu
x̄gpu = gradient(xgpu -> loss(lgpu, ggpu, xgpu, egpu), xgpu)[1]
@test x̄gpu isa CuArray
@test eltype(x̄gpu) == eltype(x)
@test Array(x̄gpu)≈x̄ atol=atol rtol=rtol
end

# TEST e INPUT GRADIENT
if e !== nothing
verbose && println("Test e gradient cpu")
ē = gradient(e -> loss(l, g, x, e), e)[1]
ē_fd = FiniteDifferences.grad(fdm, e64 -> loss(l64, g64, x64, e64), e64)[1]
@test eltype(ē) == eltype(e)
@test ē≈ē_fd atol=atol rtol=rtol

if test_gpu
verbose && println("Test e gradient gpu")
ēgpu = gradient(egpu -> loss(lgpu, ggpu, xgpu, egpu), egpu)[1]
@test ēgpu isa CuArray
@test eltype(ēgpu) == eltype(ē)
@test Array(ēgpu)≈ē atol=atol rtol=rtol
end
end

# TEST LAYER GRADIENT - l(g, x, e)
l̄ = gradient(l -> loss(l, g, x, e), l)[1]
l̄_fd = FiniteDifferences.grad(fdm, l64 -> loss(l64, g64, x64, e64), l64)[1]
test_approx_structs(l, l̄, l̄_fd; atol, rtol, exclude_grad_fields, verbose)

if test_gpu
l̄gpu = gradient(lgpu -> loss(lgpu, ggpu, xgpu, egpu), lgpu)[1]
test_approx_structs(lgpu, l̄gpu, l̄; atol, rtol, exclude_grad_fields, verbose)
end

# TEST LAYER GRADIENT - l(g)
l̄ = gradient(l -> loss(l, g), l)[1]
test_approx_structs(l, l̄, l̄_fd; atol, rtol, exclude_grad_fields, verbose)

return true
end

function test_approx_structs(l, l̄, l̄fd; atol = 1e-5, rtol = 1e-5,
exclude_grad_fields = [],
verbose = false)
l̄ = l̄ isa Base.RefValue ? l̄[] : l̄ # Zygote wraps gradient of mutables in RefValue
l̄fd = l̄fd isa Base.RefValue ? l̄fd[] : l̄fd # Zygote wraps gradient of mutables in RefValue

for f in fieldnames(typeof(l))
f ∈ exclude_grad_fields && continue
verbose && println("Test gradient of field $f...")
x, g, gfd = getfield(l, f), getfield(l̄, f), getfield(l̄fd, f)
test_approx_structs(x, g, gfd; atol, rtol, exclude_grad_fields, verbose)
verbose && println("... field $f done!")
end
return true
end

function test_approx_structs(x, g::Nothing, gfd; atol, rtol, kws...)
# finite diff gradients has to be zero if present
@test !(gfd isa AbstractArray) || isapprox(gfd, fill!(similar(gfd), 0); atol, rtol)
end

function test_approx_structs(x::Union{AbstractArray, Number},
g::Union{AbstractArray, Number}, gfd; atol, rtol, kws...)
@test eltype(g) == eltype(x)
if x isa CuArray
@test g isa CuArray
g = Array(g)
end
@test g≈gfd atol=atol rtol=rtol
end

"""
to32(m)

Convert the `eltype` of model's float parameters to `Float32`.
Preserves integer arrays.
"""
to32(m) = _paramtype(Float32, m)

"""
to64(m)

Convert the `eltype` of model's float parameters to `Float64`.
Preserves integer arrays.
"""
to64(m) = _paramtype(Float64, m)

struct GNNEltypeAdaptor{T} end

Adapt.adapt_storage(::GNNEltypeAdaptor{T}, x::AbstractArray{<:AbstractFloat}) where T = convert(AbstractArray{T}, x)
Adapt.adapt_storage(::GNNEltypeAdaptor{T}, x::AbstractArray{<:Integer}) where T = x
Adapt.adapt_storage(::GNNEltypeAdaptor{T}, x::AbstractArray{<:Number}) where T = convert(AbstractArray{T}, x)

_paramtype(::Type{T}, m) where T = fmap(adapt(GNNEltypeAdaptor{T}()), m)
1 change: 0 additions & 1 deletion GNNlib/src/layers/conv.jl
Original file line number Diff line number Diff line change
Expand Up @@ -253,7 +253,6 @@ function gin_conv(l, g::AbstractGNNGraph, x)
xj, xi = expand_srcdst(g, x)

m = propagate(copy_xj, g, l.aggr, xj = xj)

return l.nn((1 .+ ofeltype(xi, l.ϵ)) .* xi .+ m)
end

Expand Down
8 changes: 3 additions & 5 deletions GraphNeuralNetworks/Project.toml
Original file line number Diff line number Diff line change
Expand Up @@ -33,28 +33,26 @@ LinearAlgebra = "1"
MLUtils = "0.4"
MacroTools = "0.5"
NNlib = "0.9"
Pkg = "1"
Random = "1"
Reexport = "1"
Statistics = "1"
TestItemRunner = "1.0.5"
cuDNN = "1"
julia = "1.10"

[extras]
Adapt = "79e6a3ab-5dfb-504d-930d-738a2a938a0e"
CUDA = "052768ef-5323-5732-b1bb-66c8b64840ba"
ChainRulesTestUtils = "cdddcdb0-9152-4a09-a978-84456f9df70a"
DataFrames = "a93c6f00-e57d-5684-b7b6-d8193f3e46c0"
FiniteDifferences = "26cc04aa-876d-5657-8c51-4c34ba976000"
Graphs = "86223c79-3864-5bf0-83f7-82e725a168b6"
InlineStrings = "842dd82b-1e85-43dc-bf29-5d0ee9dffc48"
MLDatasets = "eb30cadb-4394-5ae3-aed4-317e484a6458"
Pkg = "44cfe95a-1eb2-52ea-b672-e2afdf69b78f"
SparseArrays = "2f01184e-e22b-5df5-ae63-d93ebab69eaf"
Test = "8dfed614-e22c-5e08-85e1-65c5234f0b40"
TestItemRunner = "f8b46487-2199-4994-9208-9a1283c18c0a"
Zygote = "e88e6eb3-aa80-5325-afca-941959d7151f"
cuDNN = "02a925ec-e4fe-4b08-9a7e-0d78e3d38ccd"

[targets]
test = ["Test", "TestItemRunner", "MLDatasets", "Adapt", "DataFrames", "InlineStrings",
"SparseArrays", "Graphs", "Zygote", "FiniteDifferences", "ChainRulesTestUtils", "CUDA", "cuDNN"]
test = ["Test", "TestItemRunner", "Pkg", "MLDatasets", "Adapt", "DataFrames", "InlineStrings", "SparseArrays", "Graphs", "Zygote", "FiniteDifferences", "ChainRulesTestUtils"]
40 changes: 22 additions & 18 deletions GraphNeuralNetworks/test/examples/node_classification_cora.jl
Original file line number Diff line number Diff line change
@@ -1,14 +1,13 @@
@testitem "Training Example" setup=[TestModule] begin
using .TestModule
@testmodule TrainingExampleModule begin
using Flux
using Flux: onecold, onehotbatch
using Flux.Losses: logitcrossentropy
using GraphNeuralNetworks
using MLDatasets: Cora
using Statistics, Random
using CUDA
CUDA.allowscalar(false)

using Test
ENV["DATADEPS_ALWAYS_ACCEPT"] = "true"
function eval_loss_accuracy(X, y, ids, model, g)
ŷ = model(g, X)
l = logitcrossentropy(ŷ[:, ids], y[:, ids])
Expand All @@ -21,27 +20,27 @@
η = 5.0f-3 # learning rate
epochs = 10 # number of epochs
seed = 17 # set seed > 0 for reproducibility
usecuda = false # if true use cuda (if available)
use_gpu = false # if true use gpu (if available)
nhidden = 64 # dimension of hidden features
end

function train(Layer; verbose = false, kws...)
args = Args(; kws...)
args.seed > 0 && Random.seed!(args.seed)

if args.usecuda && CUDA.functional()
device = Flux.gpu
args.seed > 0 && CUDA.seed!(args.seed)
if args.use_gpu
device = gpu_device(force=true)
Random.seed!(default_device_rng(device))
else
device = Flux.cpu
device = cpu_device()
end

# LOAD DATA
dataset = Cora()
classes = dataset.metadata["classes"]
g = mldataset2gnngraph(dataset) |> device
X = g.ndata.features
y = onehotbatch(g.ndata.targets |> cpu, classes) |> device # remove when https://github.com/FluxML/Flux.jl/pull/1959 tagged
y = onehotbatch(g.ndata.targets, classes)
train_mask = g.ndata.train_mask
test_mask = g.ndata.test_mask
ytrain = y[:, train_mask]
Expand Down Expand Up @@ -78,7 +77,7 @@
return train_res, test_res
end

function train_many(; usecuda = false)
function train_many(; use_gpu = false)
for (layer, Layer) in [
("GCNConv", (nin, nout) -> GCNConv(nin => nout, relu)),
("ResGatedGraphConv", (nin, nout) -> ResGatedGraphConv(nin => nout, relu)),
Expand All @@ -96,16 +95,21 @@
## ("EdgeConv",(nin, nout) -> EdgeConv(Dense(2nin, nout, relu))), # Fits the training set but does not generalize well
]
@show layer
@time train_res, test_res = train(Layer; usecuda, verbose = false)
@time train_res, test_res = train(Layer; use_gpu, verbose = false)
# @show train_res, test_res
@test train_res.acc > 94
@test test_res.acc > 69
end
end
end # module

@testitem "training example" setup=[TrainingExampleModule] begin
using .TrainingExampleModule
TrainingExampleModule.train_many()
end

train_many(usecuda = false)
# #TODO
# if TEST_GPU
# train_many(usecuda = true)
# end
@testitem "training example GPU" setup=[TrainingExampleModule] tags=[:gpu] begin
using .TrainingExampleModule
TrainingExampleModule.train_many(use_gpu = true)
end

4 changes: 2 additions & 2 deletions GraphNeuralNetworks/test/layers/basic.jl
Original file line number Diff line number Diff line change
Expand Up @@ -18,7 +18,7 @@

Flux.testmode!(gnn)

test_layer(gnn, g, rtol = 1e-5, exclude_grad_fields = [:μ, :σ²])
test_gradients(gnn, g, x, rtol = 1e-5)

@testset "constructor with names" begin
m = GNNChain(GCNConv(din => d),
Expand Down Expand Up @@ -53,7 +53,7 @@

Flux.trainmode!(gnn)

test_layer(gnn, g, rtol = 1e-4, atol=1e-4, exclude_grad_fields = [:μ, :σ²])
test_gradients(gnn, g, x, rtol = 1e-4, atol=1e-4)
end
end

Expand Down
Loading
Loading