Skip to content

Commit 7040206

Browse files
fallback to dense adjmatrix on gpu
1 parent 3004d93 commit 7040206

File tree

4 files changed

+34
-36
lines changed

4 files changed

+34
-36
lines changed

src/GraphNeuralNetworks.jl

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1,7 +1,7 @@
11
module GraphNeuralNetworks
22

33
using Statistics: mean
4-
using LinearAlgebra
4+
using LinearAlgebra, Random
55
using SparseArrays
66
import KrylovKit
77
using Base: tail

src/gnngraph.jl

Lines changed: 4 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -348,8 +348,10 @@ function scaled_laplacian(g::GNNGraph, T::DataType=Float32; dir=:out)
348348
end
349349

350350
# _eigmax(A) = eigmax(Symmetric(A)) # Doesn't work on sparse arrays
351-
_eigmax(A) = KrylovKit.eigsolve(Symmetric(A), 1, :LR)[1][1] # also eigs(A, x0, nev, mode) available
352-
351+
function _eigmax(A)
352+
x0 = randn!(similar(A, float(eltype(A)), size(A, 1)))
353+
KrylovKit.eigsolve(Symmetric(A), x0, 1, :LR)[1][1] # also eigs(A, x0, nev, mode) available
354+
end
353355
# Eigenvalues for cuarray don't seem to be well supported.
354356
# https://github.com/JuliaGPU/CUDA.jl/issues/154
355357
# https://discourse.julialang.org/t/cuda-eigenvalues-of-a-sparse-matrix/46851/5

src/graph_conversions.jl

Lines changed: 12 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -104,30 +104,38 @@ function to_sparse(A::ADJMAT_T, T::DataType=eltype(A); dir=:out, num_nodes=nothi
104104
@assert dir [:out, :in]
105105
num_nodes = size(A, 1)
106106
@assert num_nodes == size(A, 2)
107-
num_edges = round(Int, sum(A))
107+
num_edges = A isa AbstractSparseMatrix ? nnz(A) : count(!=(0), A)
108108
if dir == :in
109109
A = A'
110110
end
111111
if T != eltype(A)
112112
A = T.(A)
113113
end
114-
return sparse(A), num_nodes, num_edges
114+
if !(A isa AbstractSparseMatrix)
115+
A = sparse(A)
116+
end
117+
return A, num_nodes, num_edges
115118
end
116119

117120
function to_sparse(adj_list::ADJLIST_T, T::DataType=Int; dir=:out, num_nodes=nothing)
118121
coo, num_nodes, num_edges = to_coo(adj_list; dir, num_nodes)
119-
to_sparse(coo; dir, num_nodes)
122+
return to_sparse(coo; dir, num_nodes)
120123
end
121124

122125
function to_sparse(coo::COO_T, T::DataType=Int; dir=:out, num_nodes=nothing)
123126
s, t, eweight = coo
127+
s isa CuVector && return to_dense(coo, T; dir, num_nodes)
124128
eweight = isnothing(eweight) ? fill!(similar(s, T), 1) : eweight
125129
num_nodes = isnothing(num_nodes) ? max(maximum(s), maximum(t)) : num_nodes
126130
A = sparse(s, t, eweight, num_nodes, num_nodes)
127131
num_edges = length(s)
128-
A, num_nodes, num_edges
132+
return A, num_nodes, num_edges
129133
end
130134

135+
# _sparse(I, J, V, m, n) = sparse(I, J, V, m, n)
136+
137+
# _sparse(I::CuVector, J::CuVector, V::CuVector, m, n)
138+
131139
@non_differentiable to_coo(x...)
132140
@non_differentiable to_dense(x...)
133141
@non_differentiable to_sparse(x...)

test/cuda/gnngraph.jl

Lines changed: 17 additions & 29 deletions
Original file line numberDiff line numberDiff line change
@@ -1,6 +1,8 @@
1+
const ACUMatrix{T} = Union{CuMatrix{T}, CUDA.CUSPARSE.CuSparseMatrix{T}}
2+
13
@testset "cuda/gnngraph" begin
2-
s = [1,1,2,3,4,5,5,5]
3-
t = [2,5,3,2,1,4,3,1]
4+
s = [1,1,2,3,4]
5+
t = [2,5,3,5,5]
46
s, t = [s; t], [t; s] #symmetrize
57
g = GNNGraph(s, t, graph_type=GRAPH_T)
68
g_gpu = g |> gpu
@@ -15,41 +17,27 @@
1517
end
1618

1719
@testset "adjacency_matrix" begin
18-
function test_adj()
19-
mat = adjacency_matrix(g)
20-
mat_gpu = adjacency_matrix(g_gpu)
21-
@test mat_gpu isa CuMatrix{Int}
22-
true
23-
end
20+
# See https://github.com/JuliaGPU/CUDA.jl/pull/1093
2421

25-
if GRAPH_T == :coo
26-
# See https://github.com/JuliaGPU/CUDA.jl/pull/1093
27-
@test_broken test_adj()
28-
else
29-
test_adj()
30-
end
22+
mat = adjacency_matrix(g)
23+
mat_gpu = adjacency_matrix(g_gpu)
24+
@test mat_gpu isa ACUMatrix{Int}
25+
@test Array(mat_gpu) == mat
3126
end
3227

3328
@testset "normalized_laplacian" begin
34-
function test_normlapl()
35-
mat = normalized_laplacian(g)
36-
mat_gpu = normalized_laplacian(g_gpu)
37-
@test mat_gpu isa CuMatrix{Float32}
38-
true
39-
end
40-
if GRAPH_T == :coo
41-
@test_broken test_normlapl()
42-
else
43-
test_normlapl()
44-
end
29+
mat = normalized_laplacian(g)
30+
mat_gpu = normalized_laplacian(g_gpu)
31+
@test mat_gpu isa ACUMatrix{Float32}
32+
@test Array(mat_gpu) == mat
4533
end
4634

4735
@testset "scaled_laplacian" begin
48-
@test_broken begin
36+
@test_broken begin
4937
mat = scaled_laplacian(g)
5038
mat_gpu = scaled_laplacian(g_gpu)
51-
@test mat_gpu isa CuMatrix{Float32}
52-
true
39+
@test mat_gpu isa ACUMatrix{Float32}
40+
@test Array(mat_gpu) == mat
5341
end
5442
end
55-
end
43+
end

0 commit comments

Comments
 (0)