Skip to content

Commit 992e5bc

Browse files
committed
Add custom _adjacency_matrix for propagate CUDA COO graphs
- Leave public adjacency_matrix interface uniform, always returning a sparse adjacency_matrix - Implement custom _adjacency_matrix for propagate copy_xj for CUDA COO graphs, converting to dense when more efficient
1 parent 4beb3f5 commit 992e5bc

File tree

2 files changed

+29
-19
lines changed

2 files changed

+29
-19
lines changed

GNNGraphs/ext/GNNGraphsCUDAExt.jl

Lines changed: 9 additions & 17 deletions
Original file line numberDiff line numberDiff line change
@@ -14,20 +14,6 @@ const CUDA_COO_T = Tuple{T, T, V} where {T <: AnyCuArray{<:Integer}, V <: Union{
1414

1515
GNNGraphs._rand_dense_vector(A::CUMAT_T) = CUDA.randn(size(A, 1))
1616

17-
function Graphs.adjacency_matrix(g::GNNGraph{<:CUDA_COO_T}, T::DataType = eltype(g); dir = :out,
18-
weighted = true)
19-
@debug "Using CUDA adjacency_matrix for GNNGraph"
20-
if !g.is_coalesced
21-
# Revisit after
22-
# https://github.com/JuliaGPU/CUDA.jl/issues/1113
23-
A, n, m = GNNGraphs.to_dense(g.graph, T; num_nodes = g.num_nodes, weighted) # if not coalesce, construction of sparse matrix is slow
24-
else
25-
A, n, m = GNNGraphs.to_sparse(g.graph, T; num_nodes = g.num_nodes, weighted)
26-
end
27-
@assert size(A) == (n, n)
28-
return dir == :out ? A : A'
29-
end
30-
3117
# Transform
3218

3319
GNNGraphs.dense_zeros_like(a::CUMAT_T, T::Type, sz = size(a)) = CUDA.zeros(T, sz)
@@ -54,18 +40,24 @@ end
5440
# Convert
5541

5642
function GNNGraphs.to_sparse(coo::CUDA_COO_T, T = nothing; dir = :out, num_nodes = nothing,
57-
weighted = true)
43+
weighted = true, is_coalesced = false)
5844
s, t, eweight = coo
59-
@debug "Using CUDA to_sparse for COO"
45+
@debug "Using CUDA to_sparse for COO with is_coalesced=$is_coalesced"
6046
T = T === nothing ? (eweight === nothing ? eltype(s) : eltype(eweight)) : T
6147

6248
if eweight === nothing || !weighted
6349
eweight = fill!(similar(s, T), 1)
6450
end
6551

6652
num_nodes::Int = isnothing(num_nodes) ? max(maximum(s), maximum(t)) : num_nodes
67-
A = CUDA.CUSPARSE.CuSparseMatrixCOO{T,eltype(s)}(s, t, eweight, (num_nodes, num_nodes)) # create sparse matrix in COO format
6853

54+
# if coalesced build directly sparse coo matrix
55+
if is_coalesced
56+
A = CUDA.CUSPARSE.CuSparseMatrixCOO{T,eltype(s)}(s, t, eweight, (num_nodes, num_nodes))
57+
else
58+
A = sparse(s, t, eweight, num_nodes, num_nodes)
59+
end
60+
6961
num_edges::Int = nnz(A)
7062
if eltype(A) != T
7163
A = T.(A)

GNNlib/ext/GNNlibCUDAExt.jl

Lines changed: 20 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -3,7 +3,9 @@ module GNNlibCUDAExt
33
using CUDA
44
using Random, Statistics, LinearAlgebra
55
using GNNlib: GNNlib, propagate, copy_xj, e_mul_xj, w_mul_xj
6-
using GNNGraphs: GNNGraph, COO_T, SPARSE_T, adjacency_matrix
6+
using GNNGraphs: GNNGraph, COO_T, SPARSE_T
7+
8+
const CUDA_COO_T = Tuple{T, T, V} where {T <: AnyCuArray{<:Integer}, V <: Union{Nothing, AnyCuArray}}
79

810
###### PROPAGATE SPECIALIZATIONS ####################
911

@@ -13,7 +15,7 @@ using GNNGraphs: GNNGraph, COO_T, SPARSE_T, adjacency_matrix
1315
function GNNlib.propagate(::typeof(copy_xj), g::GNNGraph{<:COO_T}, ::typeof(+),
1416
xi, xj::AnyCuMatrix, e)
1517
@debug "Using CUDA propagate for copy_xj"
16-
A = adjacency_matrix(g, eltype(xj); weighted = false)
18+
A = _adjacency_matrix(g, eltype(xj); weighted = false)
1719

1820
return xj * A
1921
end
@@ -45,4 +47,20 @@ end
4547

4648
# Flux.Zygote.@nograd compute_degree
4749

50+
## CUSTOM ADJACENCY_MATRIX IMPLEMENTATION FOR CUDA COO GRAPHS, returning dense matrix when not coalesced, more efficient
51+
52+
function _adjacency_matrix(g::GNNGraph{<:CUDA_COO_T}, T::DataType = eltype(g); dir = :out,
53+
weighted = true)
54+
@debug "Using CUDA _adjacency_matrix for COO GNNGraph"
55+
if !g.is_coalesced
56+
# Revisit after
57+
# https://github.com/JuliaGPU/CUDA.jl/issues/1113
58+
A, n, m = GNNGraphs.to_dense(g.graph, T; num_nodes = g.num_nodes, weighted) # if not coalesced, construction of sparse matrix is slow
59+
else
60+
A, n, m = GNNGraphs.to_sparse(g.graph, T; num_nodes = g.num_nodes, weighted, is_coalesced = true)
61+
end
62+
@assert size(A) == (n, n)
63+
return dir == :out ? A : A'
64+
end
65+
4866
end #module

0 commit comments

Comments
 (0)