Skip to content

Commit 14ab918

Browse files
initial commit
1 parent 7a8b79b commit 14ab918

22 files changed

+2590
-11
lines changed

Project.toml

Lines changed: 25 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -1,13 +1,35 @@
11
name = "GraphNeuralNetworks"
2-
uuid = "b59e9cf9-92e9-4253-8889-0456869b82e0"
3-
authors = ["Carlo Lucibello <[email protected]> and contributors"]
2+
uuid = "cffab07f-9bc2-4db1-8861-388f63bf7694"
3+
authors = ["Carlo Lucibello and contributors"]
44
version = "0.1.0"
55

6+
[deps]
7+
CUDA = "052768ef-5323-5732-b1bb-66c8b64840ba"
8+
ChainRulesCore = "d360d2e6-b24c-11e9-a2a3-2a2ae2dbcce4"
9+
DataStructures = "864edb3b-99cc-5e75-8d2d-829cb0a9cfe8"
10+
Flux = "587475ba-b771-5e3f-ad9e-33799f191a9c"
11+
KrylovKit = "0b1a1467-8014-51b9-945f-bf0ae24f4b77"
12+
LightGraphs = "093fc24a-ae57-5d10-9952-331d41423f4d"
13+
LinearAlgebra = "37e2e46d-f89d-539d-b4ee-838fcccc9c8e"
14+
NNlib = "872c559c-99b0-510c-b3b7-b6c96a88d5cd"
15+
NNlibCUDA = "a00861dc-f156-4864-bf3c-e6376f28a68d"
16+
Random = "9a3f8284-a2c9-5f02-9a11-845980a1fd5c"
17+
SparseArrays = "2f01184e-e22b-5df5-ae63-d93ebab69eaf"
18+
Statistics = "10745b16-79ce-11e8-11f9-7d13ad32a3b2"
19+
620
[compat]
21+
CUDA = "3.3"
22+
DataStructures = "0.18"
23+
Flux = "0.12"
24+
KrylovKit = "0.5"
25+
LightGraphs = "1.3"
26+
NNlib = "0.7"
27+
NNlibCUDA = "0.1"
728
julia = "1.6"
829

930
[extras]
1031
Test = "8dfed614-e22c-5e08-85e1-65c5234f0b40"
32+
Zygote = "e88e6eb3-aa80-5325-afca-941959d7151f"
1133

1234
[targets]
13-
test = ["Test"]
35+
test = ["Test", "Zygote"]

README.md

Lines changed: 62 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -1,6 +1,63 @@
1-
# GraphNeuralNetworks
1+
# GraphNeuralNetworks.jl
22

3-
[![Stable](https://img.shields.io/badge/docs-stable-blue.svg)](https://CarloLucibello.github.io/GraphNeuralNetworks.jl/stable)
4-
[![Dev](https://img.shields.io/badge/docs-dev-blue.svg)](https://CarloLucibello.github.io/GraphNeuralNetworks.jl/dev)
5-
[![Build Status](https://github.com/CarloLucibello/GraphNeuralNetworks.jl/workflows/CI/badge.svg)](https://github.com/CarloLucibello/GraphNeuralNetworks.jl/actions)
6-
[![Coverage](https://codecov.io/gh/CarloLucibello/GraphNeuralNetworks.jl/branch/master/graph/badge.svg)](https://codecov.io/gh/CarloLucibello/GraphNeuralNetworks.jl)
3+
[![](https://img.shields.io/badge/docs-stable-blue.svg)](https://fluxml.ai/GraphNeuralNetworks.jl/stable)
4+
[![](https://img.shields.io/badge/docs-dev-blue.svg)](https://fluxml.ai/GraphNeuralNetworks.jl/dev)
5+
![](https://github.com/CarloLucibello/GraphNeuralNetworks.jl/actions/workflows/ci.yml/badge.svg)
6+
[![codecov](https://codecov.io/gh/FluxML/GraphNeuralNetworks.jl/branch/master/graph/badge.svg)](https://codecov.io/gh/CarloLucibello/GraphNeuralNetworks.jl)
7+
8+
GraphNeuralNetworks is a geometric deep learning library for [Flux](https://github.com/FluxML/Flux.jl). This library aims to be compatible with packages from [JuliaGraphs](https://github.com/JuliaGraphs) ecosystem and have support of CUDA GPU acceleration with [CUDA](https://github.com/JuliaGPU/CUDA.jl). Message passing scheme is implemented as a flexbile framework and fused with Graph Network block scheme. GraphNeuralNetworks is compatible with other packages that are composable with Flux.
9+
10+
Suggestions, issues and pull requsts are welcome.
11+
12+
## Installation
13+
14+
```julia
15+
]add GraphNeuralNetworks
16+
```
17+
18+
## Features
19+
20+
* Extend Flux deep learning framework in Julia and compatible with Flux layers.
21+
* Support of CUDA GPU with CUDA.jl
22+
* Integrate with existing JuliaGraphs ecosystem
23+
* Support generic graph neural network architectures
24+
* Variable graph inputs are supported. You use it when diverse graph structures are prepared as inputs to the same model.
25+
26+
## Featured Graphs
27+
28+
GraphNeuralNetworks handles graph data (the topology plus node/vertex/graph features)
29+
thanks to the type `FeaturedGraph`.
30+
31+
A `FeaturedGraph` can be constructed out of
32+
adjacency matrices, adjacency lists, LightGraphs' types...
33+
34+
```julia
35+
fg = FeaturedGraph(adj_list)
36+
```
37+
## Graph convolutional layers
38+
39+
Construct a GCN layer:
40+
41+
```julia
42+
GCNConv([fg,] input_dim => output_dim, relu)
43+
```
44+
45+
## Use it as you use Flux
46+
47+
```julia
48+
model = Chain(GCNConv(fg, 1024 => 512, relu),
49+
Dropout(0.5),
50+
GCNConv(fg, 512 => 128),
51+
Dense(128, 10))
52+
## Loss
53+
loss(x, y) = logitcrossentropy(model(x), y)
54+
accuracy(x, y) = mean(onecold(model(x)) .== onecold(y))
55+
56+
## Training
57+
ps = Flux.params(model)
58+
train_data = [(train_X, train_y)]
59+
opt = ADAM(0.01)
60+
evalcb() = @show(accuracy(train_X, train_y))
61+
62+
Flux.train!(loss, ps, train_data, opt, cb=throttle(evalcb, 10))
63+
```

perf/perf.jl

Lines changed: 101 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,101 @@
1+
using Flux, GraphNeuralNetworks, LightGraphs, BenchmarkTools, CUDA
2+
using DataFrames, Statistics, JLD2, SparseArrays
3+
4+
BenchmarkTools.ratio(::Missing, x) = Inf
5+
BenchmarkTools.ratio(x, ::Missing) = 0.0
6+
BenchmarkTools.ratio(::Missing, ::Missing) = missing
7+
8+
adjlist(g) = [neighbors(g, i) for i in 1:nv(g)]
9+
10+
function run_single_benchmark(N, c, D, CONV; gtype=:lg)
11+
@assert gtype [:lightgraph, :adjlist, :dense, :sparse]
12+
g = erdos_renyi(N, c / (N-1), seed=17)
13+
if gtype == :adjlist
14+
g = adjlist(g)
15+
elseif gtype == :dense
16+
g = Array(adjacency_matrix(g))
17+
elseif gtype == :sparse
18+
g = adjacency_matrix(g) # lightgraphs returns sparse adj mats
19+
end
20+
21+
res = Dict()
22+
X = randn(Float32, D, N)
23+
res["FG"] = @benchmark FeaturedGraph($g, nf=$X)
24+
25+
fg = FeaturedGraph(g, nf=X)
26+
fg_gpu = fg |> gpu
27+
28+
m = CONV(D => D)
29+
m_gpu = m |> gpu
30+
try
31+
res["CPU"] = @benchmark $m($fg)
32+
catch
33+
res["CPU"] = missing
34+
end
35+
36+
try
37+
res["GPU"] = @benchmark CUDA.@sync($m_gpu($fg_gpu)) teardown=(GC.gc(); CUDA.reclaim())
38+
catch
39+
res["GPU"] = missing
40+
end
41+
42+
return res
43+
end
44+
"""
45+
run_benchmarks(;
46+
Ns = [10, 100, 1000, 10000],
47+
c = 6,
48+
D = 100)
49+
50+
Benchmark GNN layers on random regular graphs
51+
of mean connectivity `c` and number of nodes in the list `Ns`.
52+
`D` is the number of node features.
53+
"""
54+
function run_benchmarks(;
55+
Ns = [10, 100, 1000, 10000],
56+
c = 6.0,
57+
D = 100)
58+
59+
df = DataFrame(N=Int[], c=Float64[], layer=String[], gtype=Symbol[],
60+
time_fg=Any[], time_cpu=Any[], time_gpu=Any[]) |> allowmissing
61+
62+
for gtype in [:lightgraph, :adjlist, :dense, :sparse]
63+
for N in Ns
64+
println("## GRAPH_TYPE = $gtype N = $N")
65+
for CONV in [GCNConv, GraphConv, GATConv]
66+
res = run_single_benchmark(N, c, D, CONV; gtype)
67+
row = (; N = N,
68+
c = c,
69+
layer = "$CONV",
70+
gtype = gtype,
71+
time_fg = median(res["FG"]),
72+
time_cpu = ismissing(res["CPU"]) ? missing : median(res["CPU"]),
73+
time_gpu = ismissing(res["GPU"]) ? missing : median(res["GPU"]),
74+
)
75+
push!(df, row)
76+
end
77+
end
78+
end
79+
df.gpu_to_cpu = ratio.(df.time_gpu, df.time_cpu)
80+
sort!(df, [:layer, :N, :c, :gtype])
81+
return df
82+
end
83+
84+
# df = run_benchmarks()
85+
# for g in groupby(df, :layer); println(g, "\n"); end
86+
87+
# @save "perf/perf_master_20210803_carlo.jld2" dfmaster=df
88+
## or
89+
# @save "perf/perf_pr.jld2" dfpr=df
90+
91+
92+
function compare(dfpr, dfmaster; on=[:N, :c, :layer])
93+
df = outerjoin(dfpr, dfmaster; on=on, makeunique=true, renamecols = :_pr => :_master)
94+
df.pr_to_master_cpu = ratio.(df.time_cpu_pr, df.time_cpu_master)
95+
df.pr_to_master_gpu = ratio.(df.time_cpu_pr, df.time_gpu_master)
96+
return df[:,[:N, :c, :gtype_pr, :gtype_master, :layer, :pr_to_master_cpu, :pr_to_master_gpu]]
97+
end
98+
99+
# @load "perf/perf_pr.jld2" dfpr
100+
# @load "perf/perf_master.jld2" dfmaster
101+
# compare(dfpr, dfmaster)

src/GraphNeuralNetworks.jl

Lines changed: 67 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1,5 +1,71 @@
11
module GraphNeuralNetworks
22

3-
# Write your package code here.
3+
using NNlib: similar
4+
using LinearAlgebra: similar, fill!
5+
using Statistics: mean
6+
using LinearAlgebra
7+
using SparseArrays
8+
import KrylovKit
9+
using CUDA
10+
using Flux
11+
using Flux: glorot_uniform, leakyrelu, GRUCell, @functor
12+
using NNlib, NNlibCUDA
13+
using ChainRulesCore
14+
import LightGraphs
15+
using LightGraphs: AbstractGraph, outneighbors, inneighbors, is_directed, ne, nv,
16+
adjacency_matrix, degree
17+
18+
export
19+
# featured_graph
20+
FeaturedGraph,
21+
edge_index,
22+
node_feature, edge_feature, global_feature,
23+
adjacency_list, normalized_laplacian, scaled_laplacian,
24+
add_self_loops,
25+
26+
# from LightGraphs
27+
adjacency_matrix,
28+
29+
# layers/msgpass
30+
MessagePassing,
31+
32+
# layers/conv
33+
GCNConv,
34+
ChebConv,
35+
GraphConv,
36+
GATConv,
37+
GatedGraphConv,
38+
EdgeConv,
39+
GINConv,
40+
41+
# layer/pool
42+
GlobalPool,
43+
LocalPool,
44+
TopKPool,
45+
topk_index,
46+
47+
# models
48+
GAE,
49+
VGAE,
50+
InnerProductDecoder,
51+
VariationalEncoder,
52+
summarize,
53+
sample,
54+
55+
# layer/selector
56+
bypass_graph
57+
58+
59+
include("featuredgraph.jl")
60+
include("graph_conversions.jl")
61+
include("utils.jl")
62+
63+
include("layers/msgpass.jl")
64+
65+
include("layers/conv.jl")
66+
include("layers/pool.jl")
67+
include("models.jl")
68+
include("layers/misc.jl")
69+
470

571
end

0 commit comments

Comments
 (0)