Skip to content

Commit 59dcb86

Browse files
cleanup
1 parent 5f3141d commit 59dcb86

File tree

5 files changed

+58
-58
lines changed

5 files changed

+58
-58
lines changed

GraphNeuralNetworks/test/layers/basic.jl

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -74,17 +74,17 @@ end
7474
wg = WithGraph(model, g)
7575
# No need to feed the graph to `wg`
7676
@test wg(x) == model(g, x)
77-
@test Flux.params(wg) == Flux.params(model)
77+
@test Flux.trainables(wg) == Flux.trainables(model)
7878
g2 = GNNGraph([1, 1, 2, 3], [2, 4, 1, 1])
7979
x2 = rand(Float32, 2, 4)
8080
# WithGraph will ignore the internal graph if fed with a new one.
8181
@test wg(g2, x2) == model(g2, x2)
8282

8383
wg = WithGraph(model, g, traingraph = false)
84-
@test length(Flux.params(wg)) == length(Flux.params(model))
84+
@test length(Flux.trainables(wg)) == length(Flux.trainables(model))
8585

8686
wg = WithGraph(model, g, traingraph = true)
87-
@test length(Flux.params(wg)) == length(Flux.params(model)) + length(Flux.params(g))
87+
@test length(Flux.trainables(wg)) == length(Flux.trainables(model)) + length(Flux.trainables(g))
8888
end
8989

9090
@testset "Flux restructure" begin

GraphNeuralNetworks/test/layers/conv.jl

Lines changed: 38 additions & 37 deletions
Original file line numberDiff line numberDiff line change
@@ -4,17 +4,17 @@ ATOL_LOW = 1e-3
44

55
@testset "GCNConv" begin
66
l = GCNConv(D_IN => D_OUT)
7-
for g in test_graphs
7+
for g in TEST_GRAPHS
88
test_layer(l, g, rtol = RTOL_HIGH, outsize = (D_OUT, g.num_nodes))
99
end
1010

1111
l = GCNConv(D_IN => D_OUT, tanh, bias = false)
12-
for g in test_graphs
12+
for g in TEST_GRAPHS
1313
test_layer(l, g, rtol = RTOL_HIGH, outsize = (D_OUT, g.num_nodes))
1414
end
1515

1616
l = GCNConv(D_IN => D_OUT, add_self_loops = false)
17-
test_layer(l, g1, rtol = RTOL_HIGH, outsize = (D_OUT, g1.num_nodes))
17+
test_layer(l, TEST_GRAPHS[1], rtol = RTOL_HIGH, outsize = (D_OUT, TEST_GRAPHS[1].num_nodes))
1818

1919
@testset "edge weights & custom normalization" begin
2020
s = [2, 3, 1, 3, 1, 2]
@@ -57,39 +57,39 @@ end
5757
@test size(l.weight) == (D_OUT, D_IN, k)
5858
@test size(l.bias) == (D_OUT,)
5959
@test l.k == k
60-
for g in test_graphs
60+
for g in TEST_GRAPHS
6161
g = add_self_loops(g)
6262
test_layer(l, g, rtol = RTOL_HIGH, test_gpu = TEST_GPU,
6363
outsize = (D_OUT, g.num_nodes))
6464
end
6565

6666
@testset "bias=false" begin
67-
@test length(Flux.params(ChebConv(2 => 3, 3))) == 2
68-
@test length(Flux.params(ChebConv(2 => 3, 3, bias = false))) == 1
67+
@test length(Flux.trainables(ChebConv(2 => 3, 3))) == 2
68+
@test length(Flux.trainables(ChebConv(2 => 3, 3, bias = false))) == 1
6969
end
7070
end
7171

7272
@testset "GraphConv" begin
7373
l = GraphConv(D_IN => D_OUT)
74-
for g in test_graphs
74+
for g in TEST_GRAPHS
7575
test_layer(l, g, rtol = RTOL_HIGH, outsize = (D_OUT, g.num_nodes))
7676
end
7777

7878
l = GraphConv(D_IN => D_OUT, tanh, bias = false, aggr = mean)
79-
for g in test_graphs
79+
for g in TEST_GRAPHS
8080
test_layer(l, g, rtol = RTOL_HIGH, outsize = (D_OUT, g.num_nodes))
8181
end
8282

8383
@testset "bias=false" begin
84-
@test length(Flux.params(GraphConv(2 => 3))) == 3
85-
@test length(Flux.params(GraphConv(2 => 3, bias = false))) == 2
84+
@test length(Flux.trainables(GraphConv(2 => 3))) == 3
85+
@test length(Flux.trainables(GraphConv(2 => 3, bias = false))) == 2
8686
end
8787
end
8888

8989
@testset "GATConv" begin
9090
for heads in (1, 2), concat in (true, false)
9191
l = GATConv(D_IN => D_OUT; heads, concat, dropout=0)
92-
for g in test_graphs
92+
for g in TEST_GRAPHS
9393
test_layer(l, g, rtol = RTOL_LOW,
9494
exclude_grad_fields = [:negative_slope, :dropout],
9595
outsize = (concat ? heads * D_OUT : D_OUT,
@@ -100,26 +100,26 @@ end
100100
@testset "edge features" begin
101101
ein = 3
102102
l = GATConv((D_IN, ein) => D_OUT, add_self_loops = false, dropout=0)
103-
g = GNNGraph(g1, edata = rand(Float32, ein, g1.num_edges))
103+
g = GNNGraph(TEST_GRAPHS[1], edata = rand(Float32, ein, TEST_GRAPHS[1].num_edges))
104104
test_layer(l, g, rtol = RTOL_LOW,
105105
exclude_grad_fields = [:negative_slope, :dropout],
106106
outsize = (D_OUT, g.num_nodes))
107107
end
108108

109109
@testset "num params" begin
110110
l = GATConv(2 => 3, add_self_loops = false)
111-
@test length(Flux.params(l)) == 3
111+
@test length(Flux.trainables(l)) == 3
112112
l = GATConv((2, 4) => 3, add_self_loops = false)
113-
@test length(Flux.params(l)) == 4
113+
@test length(Flux.trainables(l)) == 4
114114
l = GATConv((2, 4) => 3, add_self_loops = false, bias = false)
115-
@test length(Flux.params(l)) == 3
115+
@test length(Flux.trainables(l)) == 3
116116
end
117117
end
118118

119119
@testset "GATv2Conv" begin
120120
for heads in (1, 2), concat in (true, false)
121121
l = GATv2Conv(D_IN => D_OUT, tanh; heads, concat, dropout=0)
122-
for g in test_graphs
122+
for g in TEST_GRAPHS
123123
test_layer(l, g, rtol = RTOL_LOW, atol=ATOL_LOW,
124124
exclude_grad_fields = [:negative_slope, :dropout],
125125
outsize = (concat ? heads * D_OUT : D_OUT,
@@ -130,19 +130,19 @@ end
130130
@testset "edge features" begin
131131
ein = 3
132132
l = GATv2Conv((D_IN, ein) => D_OUT, add_self_loops = false, dropout=0)
133-
g = GNNGraph(g1, edata = rand(Float32, ein, g1.num_edges))
133+
g = GNNGraph(TEST_GRAPHS[1], edata = rand(Float32, ein, TEST_GRAPHS[1].num_edges))
134134
test_layer(l, g, rtol = RTOL_LOW, atol=ATOL_LOW,
135135
exclude_grad_fields = [:negative_slope, :dropout],
136136
outsize = (D_OUT, g.num_nodes))
137137
end
138138

139139
@testset "num params" begin
140140
l = GATv2Conv(2 => 3, add_self_loops = false)
141-
@test length(Flux.params(l)) == 5
141+
@test length(Flux.trainables(l)) == 5
142142
l = GATv2Conv((2, 4) => 3, add_self_loops = false)
143-
@test length(Flux.params(l)) == 6
143+
@test length(Flux.trainables(l)) == 6
144144
l = GATv2Conv((2, 4) => 3, add_self_loops = false, bias = false)
145-
@test length(Flux.params(l)) == 4
145+
@test length(Flux.trainables(l)) == 4
146146
end
147147
end
148148

@@ -151,14 +151,14 @@ end
151151
l = GatedGraphConv(D_OUT, num_layers)
152152
@test size(l.weight) == (D_OUT, D_OUT, num_layers)
153153

154-
for g in test_graphs
154+
for g in TEST_GRAPHS
155155
test_layer(l, g, rtol = RTOL_HIGH, outsize = (D_OUT, g.num_nodes))
156156
end
157157
end
158158

159159
@testset "EdgeConv" begin
160160
l = EdgeConv(Dense(2 * D_IN, D_OUT), aggr = +)
161-
for g in test_graphs
161+
for g in TEST_GRAPHS
162162
test_layer(l, g, rtol = RTOL_HIGH, outsize = (D_OUT, g.num_nodes))
163163
end
164164
end
@@ -167,7 +167,7 @@ end
167167
nn = Dense(D_IN, D_OUT)
168168

169169
l = GINConv(nn, 0.01f0, aggr = mean)
170-
for g in test_graphs
170+
for g in TEST_GRAPHS
171171
test_layer(l, g, rtol = RTOL_HIGH, outsize = (D_OUT, g.num_nodes))
172172
end
173173

@@ -179,7 +179,7 @@ end
179179
nn = Dense(edim, D_OUT * D_IN)
180180

181181
l = NNConv(D_IN => D_OUT, nn, tanh, bias = true, aggr = +)
182-
for g in test_graphs
182+
for g in TEST_GRAPHS
183183
g = GNNGraph(g, edata = rand(Float32, edim, g.num_edges))
184184
test_layer(l, g, rtol = RTOL_HIGH, outsize = (D_OUT, g.num_nodes))
185185
end
@@ -190,28 +190,29 @@ end
190190
@test l.aggr == mean
191191

192192
l = SAGEConv(D_IN => D_OUT, tanh, bias = false, aggr = +)
193-
for g in test_graphs
193+
for g in TEST_GRAPHS
194194
test_layer(l, g, rtol = RTOL_HIGH, outsize = (D_OUT, g.num_nodes))
195195
end
196196
end
197197

198198
@testset "ResGatedGraphConv" begin
199199
l = ResGatedGraphConv(D_IN => D_OUT, tanh, bias = true)
200-
for g in test_graphs
200+
for g in TEST_GRAPHS
201201
test_layer(l, g, rtol = RTOL_HIGH, outsize = (D_OUT, g.num_nodes))
202202
end
203203
end
204204

205205
@testset "CGConv" begin
206206
edim = 10
207207
l = CGConv((D_IN, edim) => D_OUT, tanh, residual = false, bias = true)
208-
for g in test_graphs
208+
for g in TEST_GRAPHS
209209
g = GNNGraph(g, edata = rand(Float32, edim, g.num_edges))
210210
test_layer(l, g, rtol = RTOL_HIGH, outsize = (D_OUT, g.num_nodes))
211211
end
212212

213213
# no edge features
214214
l1 = CGConv(D_IN => D_OUT, tanh, residual = false, bias = true)
215+
g1 = TEST_GRAPHS[1]
215216
@test l1(g1, g1.ndata.x) == l1(g1).ndata.x
216217
@test l1(g1, g1.ndata.x, nothing) == l1(g1).ndata.x
217218
end
@@ -228,14 +229,14 @@ end
228229
@test l.add_self_loops == true
229230
@test l.trainable == true
230231
Flux.trainable(l) == (; β = [1f0])
231-
for g in test_graphs
232+
for g in TEST_GRAPHS
232233
test_layer(l, g, rtol = RTOL_HIGH, outsize = (D_IN, g.num_nodes))
233234
end
234235
end
235236

236237
@testset "MEGNetConv" begin
237238
l = MEGNetConv(D_IN => D_OUT, aggr = +)
238-
for g in test_graphs
239+
for g in TEST_GRAPHS
239240
g = GNNGraph(g, edata = rand(Float32, D_IN, g.num_edges))
240241
test_layer(l, g, rtol = RTOL_LOW,
241242
outtype = :node_edge,
@@ -247,7 +248,7 @@ end
247248
ein_channel = 10
248249
K = 5
249250
l = GMMConv((D_IN, ein_channel) => D_OUT, K = K)
250-
for g in test_graphs
251+
for g in TEST_GRAPHS
251252
g = GNNGraph(g, edata = rand(Float32, ein_channel, g.num_edges))
252253
test_layer(l, g, rtol = RTOL_HIGH, outsize = (D_OUT, g.num_nodes))
253254
end
@@ -257,12 +258,12 @@ end
257258
K = [1, 2, 3] # for different number of hops
258259
for k in K
259260
l = SGConv(D_IN => D_OUT, k, add_self_loops = true)
260-
for g in test_graphs
261+
for g in TEST_GRAPHS
261262
test_layer(l, g, rtol = RTOL_HIGH, outsize = (D_OUT, g.num_nodes))
262263
end
263264

264265
l = SGConv(D_IN => D_OUT, k, add_self_loops = true)
265-
for g in test_graphs
266+
for g in TEST_GRAPHS
266267
test_layer(l, g, rtol = RTOL_HIGH, outsize = (D_OUT, g.num_nodes))
267268
end
268269
end
@@ -272,12 +273,12 @@ end
272273
K = [1, 2, 3]
273274
for k in K
274275
l = TAGConv(D_IN => D_OUT, k, add_self_loops = true)
275-
for g in test_graphs
276+
for g in TEST_GRAPHS
276277
test_layer(l, g, rtol = RTOL_HIGH, outsize = (D_OUT, g.num_nodes))
277278
end
278279

279280
l = TAGConv(D_IN => D_OUT, k, add_self_loops = true)
280-
for g in test_graphs
281+
for g in TEST_GRAPHS
281282
test_layer(l, g, rtol = RTOL_HIGH, outsize = (D_OUT, g.num_nodes))
282283
end
283284
end
@@ -313,7 +314,7 @@ end
313314
# used like in Shi et al., 2021
314315
l = TransformerConv((D_IN, ein) => D_IN; heads, gating = true,
315316
bias_qkv = true)
316-
for g in test_graphs
317+
for g in TEST_GRAPHS
317318
g = GNNGraph(g, edata = rand(Float32, ein, g.num_edges))
318319
test_layer(l, g, rtol = RTOL_LOW,
319320
exclude_grad_fields = [:negative_slope],
@@ -323,7 +324,7 @@ end
323324
l = TransformerConv(D_IN => D_IN; heads, concat = false,
324325
bias_root = false,
325326
root_weight = false)
326-
for g in test_graphs
327+
for g in TEST_GRAPHS
327328
test_layer(l, g, rtol = RTOL_LOW,
328329
exclude_grad_fields = [:negative_slope],
329330
outsize = (D_IN, g.num_nodes))
@@ -334,7 +335,7 @@ end
334335
K = [1, 2, 3] # for different number of hops
335336
for k in K
336337
l = DConv(D_IN => D_OUT, k)
337-
for g in test_graphs
338+
for g in TEST_GRAPHS
338339
test_layer(l, g, rtol = RTOL_HIGH, outsize = (D_OUT, g.num_nodes))
339340
end
340341
end

GraphNeuralNetworks/test/layers/pool.jl

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -29,7 +29,7 @@ end
2929
fgate = Dense(chin, 1)
3030
ffeat = Dense(chin, chout)
3131
p = GlobalAttentionPool(fgate, ffeat)
32-
@test length(Flux.params(p)) == 4
32+
@test length(Flux.trainables(p)) == 4
3333

3434
g = Flux.batch([GNNGraph(random_regular_graph(n, 4),
3535
ndata = rand(Float32, chin, n),

GraphNeuralNetworks/test/runtests.jl

Lines changed: 8 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -24,13 +24,13 @@ ENV["DATADEPS_ALWAYS_ACCEPT"] = true # for MLDatasets
2424
include("test_utils.jl")
2525

2626
tests = [
27-
"layers/basic",
27+
# "layers/basic",
2828
"layers/conv",
29-
"layers/heteroconv",
30-
"layers/temporalconv",
31-
"layers/pool",
32-
"examples/node_classification_cora",
33-
"samplers"
29+
# "layers/heteroconv",
30+
# "layers/temporalconv",
31+
# "layers/pool",
32+
# "examples/node_classification_cora",
33+
# "samplers"
3434
]
3535

3636
!CUDA.functional() && @warn("CUDA unavailable, not testing GPU support")
@@ -41,7 +41,8 @@ for graph_type in (:coo, :dense, :sparse)
4141
@info "Testing graph format :$graph_type"
4242
global GRAPH_T = graph_type
4343
global TEST_GPU = CUDA.functional() && (GRAPH_T != :sparse)
44-
44+
global TEST_GRAPHS = generate_test_graphs(GRAPH_T)
45+
4546
@testset "$t" for t in tests
4647
startswith(t, "examples") && GRAPH_T == :dense && continue # not testing :dense since causes OutOfMememory on github's CI
4748
include("$t.jl")

GraphNeuralNetworks/test/test_utils.jl

Lines changed: 8 additions & 10 deletions
Original file line numberDiff line numberDiff line change
@@ -1,6 +1,9 @@
11
using ChainRulesTestUtils, FiniteDifferences, Zygote, Adapt, CUDA
22
CUDA.allowscalar(false)
33

4+
const D_IN = 3
5+
const D_OUT = 5
6+
47
function ngradient(f, x...)
58
fdm = central_fdm(5, 1)
69
return FiniteDifferences.grad(fdm, f, x...)
@@ -227,29 +230,24 @@ Adapt.adapt_storage(::GNNEltypeAdaptor{T}, x::AbstractArray{<:Number}) where T =
227230

228231
_paramtype(::Type{T}, m) where T = fmap(adapt(GNNEltypeAdaptor{T}()), m)
229232

230-
const D_IN = 3
231-
const D_OUT = 5
232-
233-
function generate_test_graphs()
233+
function generate_test_graphs(graph_type)
234234
adj1 = [0 1 0 1
235235
1 0 1 0
236236
0 1 0 1
237237
1 0 1 0]
238238

239239
g1 = GNNGraph(adj1,
240-
ndata = rand(Float32, D_IN, 4),
241-
graph_type = GRAPH_T)
240+
ndata = rand(Float32, D_IN, 4);
241+
graph_type)
242242

243243
adj_single_vertex = [0 0 0 1
244244
0 0 0 0
245245
0 0 0 1
246246
1 0 1 0]
247247

248248
g_single_vertex = GNNGraph(adj_single_vertex,
249-
ndata = rand(Float32, D_IN, 4),
250-
graph_type = GRAPH_T)
249+
ndata = rand(Float32, D_IN, 4);
250+
graph_type)
251251

252252
return (g1, g_single_vertex)
253253
end
254-
255-
const TEST_GRAPHS = generate_test_graphs()

0 commit comments

Comments
 (0)