Skip to content

Commit d8d69ec

Browse files
add buildkite workflow (#526)
1 parent 301602e commit d8d69ec

File tree

11 files changed

+119
-54
lines changed

11 files changed

+119
-54
lines changed

.buildkite/pipeline.yml

Lines changed: 25 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,25 @@
1+
steps:
2+
- label: "GNN CUDA"
3+
plugins:
4+
- JuliaCI/julia#v1:
5+
version: "1"
6+
- JuliaCI/julia-coverage#v1:
7+
dirs:
8+
- GraphNeuralNetworks/src
9+
command: |
10+
julia --color=yes --depwarn=yes --project=GraphNeuralNetworks/test -e '
11+
import Pkg
12+
dev_pkgs = Pkg.PackageSpec[]
13+
for pkg in ("GNNGraphs", "GNNlib", "GraphNeuralNetworks")
14+
push!(dev_pkgs, Pkg.PackageSpec(path=pkg));
15+
end
16+
Pkg.develop(dev_pkgs)
17+
Pkg.add(["CUDA", "cuDNN"])
18+
Pkg.test("GraphNeuralNetworks")'
19+
agents:
20+
queue: "juliagpu"
21+
cuda: "*"
22+
env:
23+
GNN_TEST_CUDA: "true"
24+
GNN_TEST_CPU: "false"
25+
timeout_in_minutes: 60

GNNGraphs/test/test_utils.jl

Lines changed: 6 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -1,6 +1,12 @@
11
using ChainRulesTestUtils, FiniteDifferences, Zygote, Adapt, CUDA
22
CUDA.allowscalar(false)
33

4+
# Using this until https://github.com/JuliaDiff/FiniteDifferences.jl/issues/188 is fixed
5+
function FiniteDifferences.to_vec(x::Integer)
6+
Integer_from_vec(v) = x
7+
return Int[x], Integer_from_vec
8+
end
9+
410
function ngradient(f, x...)
511
fdm = central_fdm(5, 1)
612
return FiniteDifferences.grad(fdm, f, x...)

GraphNeuralNetworks/Project.toml

Lines changed: 0 additions & 27 deletions
Original file line numberDiff line numberDiff line change
@@ -6,10 +6,8 @@ version = "0.6.22"
66
[deps]
77
ChainRulesCore = "d360d2e6-b24c-11e9-a2a3-2a2ae2dbcce4"
88
Flux = "587475ba-b771-5e3f-ad9e-33799f191a9c"
9-
Functors = "d9f16b24-f501-4c13-a1f2-28368ffc5196"
109
GNNGraphs = "aed8fd31-079b-4b5a-b342-a13352159b8c"
1110
GNNlib = "a6a84749-d869-43f8-aacc-be26a1996e48"
12-
Graphs = "86223c79-3864-5bf0-83f7-82e725a168b6"
1311
LinearAlgebra = "37e2e46d-f89d-539d-b4ee-838fcccc9c8e"
1412
MLUtils = "f1d291b0-491e-4a28-83b9-f70985020b54"
1513
MacroTools = "1914dd2f-81c6-5fcd-8719-6d5c9610ff09"
@@ -18,41 +16,16 @@ Random = "9a3f8284-a2c9-5f02-9a11-845980a1fd5c"
1816
Reexport = "189a3867-3050-52da-a836-e630ba90ab69"
1917
Statistics = "10745b16-79ce-11e8-11f9-7d13ad32a3b2"
2018

21-
[weakdeps]
22-
CUDA = "052768ef-5323-5732-b1bb-66c8b64840ba"
23-
2419
[compat]
25-
CUDA = "4, 5"
2620
ChainRulesCore = "1"
2721
Flux = "0.14"
28-
Functors = "0.4.1"
2922
GNNGraphs = "1.0"
3023
GNNlib = "0.2"
31-
Graphs = "1.12"
3224
LinearAlgebra = "1"
3325
MLUtils = "0.4"
3426
MacroTools = "0.5"
3527
NNlib = "0.9"
36-
Pkg = "1"
3728
Random = "1"
3829
Reexport = "1"
3930
Statistics = "1"
40-
TestItemRunner = "1.0.5"
4131
julia = "1.10"
42-
43-
[extras]
44-
Adapt = "79e6a3ab-5dfb-504d-930d-738a2a938a0e"
45-
ChainRulesTestUtils = "cdddcdb0-9152-4a09-a978-84456f9df70a"
46-
DataFrames = "a93c6f00-e57d-5684-b7b6-d8193f3e46c0"
47-
FiniteDifferences = "26cc04aa-876d-5657-8c51-4c34ba976000"
48-
Graphs = "86223c79-3864-5bf0-83f7-82e725a168b6"
49-
InlineStrings = "842dd82b-1e85-43dc-bf29-5d0ee9dffc48"
50-
MLDatasets = "eb30cadb-4394-5ae3-aed4-317e484a6458"
51-
Pkg = "44cfe95a-1eb2-52ea-b672-e2afdf69b78f"
52-
SparseArrays = "2f01184e-e22b-5df5-ae63-d93ebab69eaf"
53-
Test = "8dfed614-e22c-5e08-85e1-65c5234f0b40"
54-
TestItemRunner = "f8b46487-2199-4994-9208-9a1283c18c0a"
55-
Zygote = "e88e6eb3-aa80-5325-afca-941959d7151f"
56-
57-
[targets]
58-
test = ["Test", "TestItemRunner", "Pkg", "MLDatasets", "Adapt", "DataFrames", "InlineStrings", "SparseArrays", "Graphs", "Zygote", "FiniteDifferences", "ChainRulesTestUtils"]

GraphNeuralNetworks/docs/src/dev.md

Lines changed: 38 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -1,30 +1,62 @@
11
# Developer Notes
22

3-
## Develop and Managing the Monorepo
43

5-
### Development Enviroment
4+
## Development Enviroment
65
GraphNeuralNetworks.jl is package hosted in a monorepo that contains multiple packages.
7-
The GraphNeuralNetworks.jl package depends on GNNGraphs.jl, also hosted in the same monorepo.
6+
The GraphNeuralNetworks.jl package depends on GNNGraphs.jl and GNNlib.jl, also hosted in the same monorepo.
7+
In order
88

99
```julia
1010
pkg> activate .
1111

1212
pkg> dev ./GNNGraphs
1313
```
1414

15-
### Add a New Layer
15+
## Add a New Layer
1616

1717
To add a new graph convolutional layer and make it available in both the Flux-based frontend (GraphNeuralNetworks.jl) and the Lux-based frontend (GNNLux), you need to:
18+
1819
1. Add the functional version to GNNlib
1920
2. Add the stateful version to GraphNeuralNetworks
2021
3. Add the stateless version to GNNLux
2122
4. Add the layer to the table in docs/api/conv.md
2223

23-
### Versions and Tagging
24+
We suggest to start with implementing a self-contained Flux layer in GraphNeuralNetworks.jl, add the corresponding tests, and then when everything is working, move the implementation of the forward pass to GNNlib.jl. At this point, you can add the stateless version to GNNLux.jl.
25+
26+
It could also be convenient to use the `@structdef` macro from [Autostruct.jl](https://github.com/CarloLucibello/AutoStructs.jl) to simultaneously generate the struct and the constructor for the layer.
27+
For example, the Flux implementation of [`MEGNetConv`](@ref) layer can be written as follows:
28+
29+
```julia
30+
using Flux, GraphNeuralNetworks, AutoStructs
31+
32+
@structdef function MEGNetConv(ch::Pair{Int, Int}; aggr = mean)
33+
nin, nout = ch
34+
ϕe = Chain(Dense(3nin, nout, relu),
35+
Dense(nout, nout))
36+
37+
ϕv = Chain(Dense(nin + nout, nout, relu),
38+
Dense(nout, nout))
39+
40+
return MEGNetConv(ϕe, ϕv, aggr)
41+
end
42+
43+
Flux.@layer MEGNetConv
44+
45+
function (l::MEGNetConv)(g::AbstractGraph, x::AbstractMatrix, e::AbstractMatrix)
46+
= apply_edges(g, xi = x, xj = x, e = e) do xi, xj, e
47+
l.ϕe(vcat(xi, xj, e))
48+
end
49+
xᵉ = aggregate_neighbors(g, l.aggr, ē)
50+
= l.ϕv(vcat(x, xᵉ))
51+
return x̄, ē
52+
end
53+
```
54+
55+
## Versions and Tagging
2456
Each PR should update the version number in the Porject.toml file of each involved package if needed by semnatic versioning. For instance, when adding new features GNNGraphs could move from "1.17.5" to "1.18.0-DEV". The "DEV" will be removed when the package is tagged and released. Pay also attention to updating
2557
the compat bounds, e.g. GraphNeuralNetworks might require a newer version of GNNGraphs.
2658

27-
### Generate Documentation Locally
59+
## Generate Documentation Locally
2860
For generating the documentation locally
2961
```
3062
cd docs

GraphNeuralNetworks/src/GraphNeuralNetworks.jl

Lines changed: 1 addition & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -6,11 +6,9 @@ using Flux
66
using Flux: glorot_uniform, leakyrelu, GRUCell, batch
77
using MacroTools: @forward
88
using NNlib
9-
using NNlib: scatter, gather
109
using ChainRulesCore
11-
using Reexport
10+
using Reexport: @reexport
1211
using MLUtils: zeros_like
13-
using Graphs: Graphs
1412

1513
using GNNGraphs: COO_T, ADJMAT_T, SPARSE_T,
1614
check_num_nodes, check_num_edges,

GraphNeuralNetworks/test/Project.toml

Lines changed: 15 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,15 @@
1+
[deps]
2+
ChainRulesTestUtils = "cdddcdb0-9152-4a09-a978-84456f9df70a"
3+
FiniteDifferences = "26cc04aa-876d-5657-8c51-4c34ba976000"
4+
Flux = "587475ba-b771-5e3f-ad9e-33799f191a9c"
5+
Functors = "d9f16b24-f501-4c13-a1f2-28368ffc5196"
6+
GraphNeuralNetworks = "cffab07f-9bc2-4db1-8861-388f63bf7694"
7+
Graphs = "86223c79-3864-5bf0-83f7-82e725a168b6"
8+
MLDatasets = "eb30cadb-4394-5ae3-aed4-317e484a6458"
9+
Pkg = "44cfe95a-1eb2-52ea-b672-e2afdf69b78f"
10+
Random = "9a3f8284-a2c9-5f02-9a11-845980a1fd5c"
11+
SparseArrays = "2f01184e-e22b-5df5-ae63-d93ebab69eaf"
12+
Statistics = "10745b16-79ce-11e8-11f9-7d13ad32a3b2"
13+
Test = "8dfed614-e22c-5e08-85e1-65c5234f0b40"
14+
TestItemRunner = "f8b46487-2199-4994-9208-9a1283c18c0a"
15+
Zygote = "e88e6eb3-aa80-5325-afca-941959d7151f"

GraphNeuralNetworks/test/examples/node_classification_cora.jl

Lines changed: 2 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -108,7 +108,8 @@ end # module
108108
TrainingExampleModule.train_many()
109109
end
110110

111-
@testitem "training example GPU" setup=[TrainingExampleModule] tags=[:gpu] begin
111+
@testitem "training example GPU" setup=[TestModule, TrainingExampleModule] tags=[:gpu] begin
112+
using .TestModule # for loading gpu packages
112113
using .TrainingExampleModule
113114
TrainingExampleModule.train_many(use_gpu = true)
114115
end

GraphNeuralNetworks/test/layers/conv.jl

Lines changed: 5 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -101,6 +101,7 @@ end
101101
k = 2
102102
l = ChebConv(D_IN => D_OUT, k)
103103
for g in TEST_GRAPHS
104+
has_isolated_nodes(g) && continue
104105
g.graph isa AbstractSparseMatrix && continue
105106
@test size(l(g, g.x)) == (D_OUT, g.num_nodes)
106107
test_gradients(l, g, g.x, rtol = RTOL_LOW, test_gpu = true, compare_finite_diff = false)
@@ -377,6 +378,7 @@ end
377378
l = CGConv((D_IN, edim) => D_OUT, tanh, residual = false, bias = true)
378379
for g in TEST_GRAPHS
379380
g.graph isa AbstractSparseMatrix && continue
381+
g = GNNGraph(g, edata = rand(Float32, edim, g.num_edges))
380382
@test size(l(g, g.x, g.e)) == (D_OUT, g.num_nodes)
381383
test_gradients(l, g, g.x, g.e, rtol = RTOL_HIGH, test_gpu = true, compare_finite_diff = false)
382384
end
@@ -432,6 +434,7 @@ end
432434
l = MEGNetConv(D_IN => D_OUT, aggr = +)
433435
for g in TEST_GRAPHS
434436
g.graph isa AbstractSparseMatrix && continue
437+
g = GNNGraph(g, edata = rand(Float32, D_IN, g.num_edges))
435438
y = l(g, g.x, g.e)
436439
@test size(y[1]) == (D_OUT, g.num_nodes)
437440
@test size(y[2]) == (D_OUT, g.num_edges)
@@ -462,6 +465,7 @@ end
462465
l = GMMConv((D_IN, ein_channel) => D_OUT, K = K)
463466
for g in TEST_GRAPHS
464467
g.graph isa AbstractSparseMatrix && continue
468+
g = GNNGraph(g, edata = rand(Float32, ein_channel, g.num_edges))
465469
y = l(g, g.x, g.e)
466470
test_gradients(l, g, g.x, g.e, rtol = RTOL_HIGH, test_gpu = true, compare_finite_diff = false)
467471
end
@@ -585,6 +589,7 @@ end
585589
bias_qkv = true)
586590
for g in TEST_GRAPHS
587591
g.graph isa AbstractSparseMatrix && continue
592+
g = GNNGraph(g, edata = rand(Float32, ein, g.num_edges))
588593
@test size(l(g, g.x, g.e)) == (D_IN * heads, g.num_nodes)
589594
test_gradients(l, g, g.x, g.e, rtol = RTOL_LOW, test_gpu = true, compare_finite_diff = false)
590595
end

GraphNeuralNetworks/test/runtests.jl

Lines changed: 5 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -4,12 +4,16 @@ using TestItemRunner
44
## for how to run the tests within VS Code.
55
## See test_module.jl for the test infrastructure.
66

7-
## Uncomment below to change the default test settings
7+
## Uncomment below and in test_module.jl to change the default test settings
88
# ENV["GNN_TEST_CPU"] = "false"
99
# ENV["GNN_TEST_CUDA"] = "true"
1010
# ENV["GNN_TEST_AMDGPU"] = "true"
1111
# ENV["GNN_TEST_Metal"] = "true"
1212

13+
# The only available tag at the moment is :gpu
14+
# Tests not tagged with :gpu are considered to be CPU tests
15+
# Tests tagged with :gpu should run on all GPU backends
16+
1317
if get(ENV, "GNN_TEST_CPU", "true") == "true"
1418
@run_package_tests filter = ti -> :gpu ti.tags
1519
end

GraphNeuralNetworks/test/test_module.jl

Lines changed: 9 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -3,9 +3,11 @@
33
using GraphNeuralNetworks
44
using Test
55
using Statistics, Random
6-
using Flux, Functors
6+
using Flux
7+
using Functors: fmapstructure_with_path
78
using Graphs
8-
using ChainRulesTestUtils, FiniteDifferences, Zygote, Adapt
9+
using ChainRulesTestUtils, FiniteDifferences
10+
using Zygote
911
using SparseArrays
1012
using Pkg
1113

@@ -16,22 +18,23 @@ using Pkg
1618
# ENV["GNN_TEST_Metal"] = "true"
1719

1820
if get(ENV, "GNN_TEST_CUDA", "false") == "true"
19-
Pkg.add(["CUDA", "cuDNN"])
21+
# Pkg.add(["CUDA", "cuDNN"])
2022
using CUDA
2123
CUDA.allowscalar(false)
2224
end
2325
if get(ENV, "GNN_TEST_AMDGPU", "false") == "true"
24-
Pkg.add("AMDGPU")
26+
# Pkg.add("AMDGPU")
2527
using AMDGPU
2628
AMDGPU.allowscalar(false)
2729
end
2830
if get(ENV, "GNN_TEST_Metal", "false") == "true"
29-
Pkg.add("Metal")
31+
# Pkg.add("Metal")
3032
using Metal
3133
Metal.allowscalar(false)
3234
end
3335

34-
# from Bse
36+
37+
# from Base
3538
export mean, randn, SparseArrays, AbstractSparseMatrix
3639

3740
# from other packages

0 commit comments

Comments
 (0)