Skip to content

Commit c15b750

Browse files
cleanup
1 parent e457ae5 commit c15b750

File tree

3 files changed

+127
-127
lines changed

3 files changed

+127
-127
lines changed

docs/src/models.md

Lines changed: 5 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -90,14 +90,14 @@ The `GNNChain` only propagates the graph and the node features. More complex sce
9090
A `GNNChain` oppurtunely propagates the graph into the branches created by the `Flux.Parallel` layer:
9191

9292
```julia
93-
AddResidual(l) = Parallel(+, identity, l)
93+
AddResidual(l) = Parallel(+, identity, l) # implementing a skip/residual connection
9494

95-
model = GNNChain( AddResidual(ResGatedGraphConv(din => d, relu)),
96-
BatchNorm(d),
95+
model = GNNChain( ResGatedGraphConv(din => d, relu),
96+
AddResidual(ResGatedGraphConv(d => d, relu)),
97+
AddResidual(ResGatedGraphConv(d => d, relu)),
9798
AddResidual(ResGatedGraphConv(d => d, relu)),
98-
BatchNorm(d),
9999
GlobalPooling(mean),
100100
Dense(d, dout))
101101

102102
y = model(g, X) # output size: (dout, g.num_graphs)
103-
```
103+
```

test/layers/conv.jl

Lines changed: 118 additions & 118 deletions
Original file line numberDiff line numberDiff line change
@@ -24,131 +24,131 @@
2424

2525
test_graphs = [g1, g_single_vertex]
2626

27-
# @testset "GCNConv" begin
28-
# l = GCNConv(in_channel => out_channel)
29-
# for g in test_graphs
30-
# test_layer(l, g, rtol=1e-5, outsize=(out_channel, g.num_nodes))
31-
# end
32-
33-
# l = GCNConv(in_channel => out_channel, tanh, bias=false)
34-
# for g in test_graphs
35-
# test_layer(l, g, rtol=1e-5, outsize=(out_channel, g.num_nodes))
36-
# end
37-
38-
# l = GCNConv(in_channel => out_channel, add_self_loops=false)
39-
# test_layer(l, g1, rtol=1e-5, outsize=(out_channel, g1.num_nodes))
40-
# end
41-
42-
# @testset "ChebConv" begin
43-
# k = 3
44-
# l = ChebConv(in_channel => out_channel, k)
45-
# @test size(l.weight) == (out_channel, in_channel, k)
46-
# @test size(l.bias) == (out_channel,)
47-
# @test l.k == k
48-
# for g in test_graphs
49-
# g = add_self_loops(g)
50-
# test_layer(l, g, rtol=1e-5, test_gpu=false, outsize=(out_channel, g.num_nodes))
51-
# if TEST_GPU
52-
# @test_broken test_layer(l, g, rtol=1e-5, test_gpu=true, outsize=(out_channel, g.num_nodes))
53-
# end
54-
# end
27+
@testset "GCNConv" begin
28+
l = GCNConv(in_channel => out_channel)
29+
for g in test_graphs
30+
test_layer(l, g, rtol=1e-5, outsize=(out_channel, g.num_nodes))
31+
end
32+
33+
l = GCNConv(in_channel => out_channel, tanh, bias=false)
34+
for g in test_graphs
35+
test_layer(l, g, rtol=1e-5, outsize=(out_channel, g.num_nodes))
36+
end
37+
38+
l = GCNConv(in_channel => out_channel, add_self_loops=false)
39+
test_layer(l, g1, rtol=1e-5, outsize=(out_channel, g1.num_nodes))
40+
end
41+
42+
@testset "ChebConv" begin
43+
k = 3
44+
l = ChebConv(in_channel => out_channel, k)
45+
@test size(l.weight) == (out_channel, in_channel, k)
46+
@test size(l.bias) == (out_channel,)
47+
@test l.k == k
48+
for g in test_graphs
49+
g = add_self_loops(g)
50+
test_layer(l, g, rtol=1e-5, test_gpu=false, outsize=(out_channel, g.num_nodes))
51+
if TEST_GPU
52+
@test_broken test_layer(l, g, rtol=1e-5, test_gpu=true, outsize=(out_channel, g.num_nodes))
53+
end
54+
end
5555

56-
# @testset "bias=false" begin
57-
# @test length(Flux.params(ChebConv(2=>3, 3))) == 2
58-
# @test length(Flux.params(ChebConv(2=>3, 3, bias=false))) == 1
59-
# end
60-
# end
61-
62-
# @testset "GraphConv" begin
63-
# l = GraphConv(in_channel => out_channel)
64-
# for g in test_graphs
65-
# test_layer(l, g, rtol=1e-5, outsize=(out_channel, g.num_nodes))
66-
# end
67-
68-
# l = GraphConv(in_channel => out_channel, relu, bias=false, aggr=mean)
69-
# for g in test_graphs
70-
# test_layer(l, g, rtol=1e-5, outsize=(out_channel, g.num_nodes))
71-
# end
56+
@testset "bias=false" begin
57+
@test length(Flux.params(ChebConv(2=>3, 3))) == 2
58+
@test length(Flux.params(ChebConv(2=>3, 3, bias=false))) == 1
59+
end
60+
end
61+
62+
@testset "GraphConv" begin
63+
l = GraphConv(in_channel => out_channel)
64+
for g in test_graphs
65+
test_layer(l, g, rtol=1e-5, outsize=(out_channel, g.num_nodes))
66+
end
67+
68+
l = GraphConv(in_channel => out_channel, relu, bias=false, aggr=mean)
69+
for g in test_graphs
70+
test_layer(l, g, rtol=1e-5, outsize=(out_channel, g.num_nodes))
71+
end
7272

73-
# @testset "bias=false" begin
74-
# @test length(Flux.params(GraphConv(2=>3))) == 3
75-
# @test length(Flux.params(GraphConv(2=>3, bias=false))) == 2
76-
# end
77-
# end
78-
79-
# @testset "GATConv" begin
80-
81-
# for heads in (1, 2), concat in (true, false)
82-
# l = GATConv(in_channel => out_channel; heads, concat)
83-
# for g in test_graphs
84-
# test_layer(l, g, rtol=1e-4,
85-
# outsize=(concat ? heads*out_channel : out_channel, g.num_nodes))
86-
# end
87-
# end
88-
89-
# @testset "bias=false" begin
90-
# @test length(Flux.params(GATConv(2=>3))) == 3
91-
# @test length(Flux.params(GATConv(2=>3, bias=false))) == 2
92-
# end
93-
# end
94-
95-
# @testset "GatedGraphConv" begin
96-
# num_layers = 3
97-
# l = GatedGraphConv(out_channel, num_layers)
98-
# @test size(l.weight) == (out_channel, out_channel, num_layers)
99-
100-
# for g in test_graphs
101-
# test_layer(l, g, rtol=1e-5, outsize=(out_channel, g.num_nodes))
102-
# end
103-
# end
104-
105-
# @testset "EdgeConv" begin
106-
# l = EdgeConv(Dense(2*in_channel, out_channel), aggr=+)
107-
# for g in test_graphs
108-
# test_layer(l, g, rtol=1e-5, outsize=(out_channel, g.num_nodes))
109-
# end
110-
# end
111-
112-
# @testset "GINConv" begin
113-
# nn = Dense(in_channel, out_channel)
114-
# eps = 0.001f0
115-
# l = GINConv(nn, eps=eps)
116-
# for g in test_graphs
117-
# test_layer(l, g, rtol=1e-5, outsize=(out_channel, g.num_nodes), exclude_grad_fields=[:eps])
118-
# end
73+
@testset "bias=false" begin
74+
@test length(Flux.params(GraphConv(2=>3))) == 3
75+
@test length(Flux.params(GraphConv(2=>3, bias=false))) == 2
76+
end
77+
end
78+
79+
@testset "GATConv" begin
80+
81+
for heads in (1, 2), concat in (true, false)
82+
l = GATConv(in_channel => out_channel; heads, concat)
83+
for g in test_graphs
84+
test_layer(l, g, rtol=1e-4,
85+
outsize=(concat ? heads*out_channel : out_channel, g.num_nodes))
86+
end
87+
end
88+
89+
@testset "bias=false" begin
90+
@test length(Flux.params(GATConv(2=>3))) == 3
91+
@test length(Flux.params(GATConv(2=>3, bias=false))) == 2
92+
end
93+
end
94+
95+
@testset "GatedGraphConv" begin
96+
num_layers = 3
97+
l = GatedGraphConv(out_channel, num_layers)
98+
@test size(l.weight) == (out_channel, out_channel, num_layers)
99+
100+
for g in test_graphs
101+
test_layer(l, g, rtol=1e-5, outsize=(out_channel, g.num_nodes))
102+
end
103+
end
104+
105+
@testset "EdgeConv" begin
106+
l = EdgeConv(Dense(2*in_channel, out_channel), aggr=+)
107+
for g in test_graphs
108+
test_layer(l, g, rtol=1e-5, outsize=(out_channel, g.num_nodes))
109+
end
110+
end
111+
112+
@testset "GINConv" begin
113+
nn = Dense(in_channel, out_channel)
114+
eps = 0.001f0
115+
l = GINConv(nn, eps=eps)
116+
for g in test_graphs
117+
test_layer(l, g, rtol=1e-5, outsize=(out_channel, g.num_nodes), exclude_grad_fields=[:eps])
118+
end
119119

120-
# @test !in(:eps, Flux.trainable(l))
121-
# end
120+
@test !in(:eps, Flux.trainable(l))
121+
end
122122

123-
# @testset "NNConv" begin
124-
# edim = 10
125-
# nn = Dense(edim, out_channel * in_channel)
123+
@testset "NNConv" begin
124+
edim = 10
125+
nn = Dense(edim, out_channel * in_channel)
126126

127-
# l = NNConv(in_channel => out_channel, nn)
128-
# for g in test_graphs
129-
# g = GNNGraph(g, edata=rand(T, edim, g.num_edges))
130-
# test_layer(l, g, rtol=1e-5, outsize=(out_channel, g.num_nodes))
131-
# end
127+
l = NNConv(in_channel => out_channel, nn)
128+
for g in test_graphs
129+
g = GNNGraph(g, edata=rand(T, edim, g.num_edges))
130+
test_layer(l, g, rtol=1e-5, outsize=(out_channel, g.num_nodes))
131+
end
132132

133-
# l = NNConv(in_channel => out_channel, nn, tanh, bias=false, aggr=mean)
134-
# for g in test_graphs
135-
# g = GNNGraph(g, edata=rand(T, edim, g.num_edges))
136-
# test_layer(l, g, rtol=1e-5, outsize=(out_channel, g.num_nodes))
137-
# end
138-
# end
139-
140-
# @testset "SAGEConv" begin
141-
# l = SAGEConv(in_channel => out_channel)
142-
# @test l.aggr == mean
143-
# for g in test_graphs
144-
# test_layer(l, g, rtol=1e-5, outsize=(out_channel, g.num_nodes))
145-
# end
133+
l = NNConv(in_channel => out_channel, nn, tanh, bias=false, aggr=mean)
134+
for g in test_graphs
135+
g = GNNGraph(g, edata=rand(T, edim, g.num_edges))
136+
test_layer(l, g, rtol=1e-5, outsize=(out_channel, g.num_nodes))
137+
end
138+
end
139+
140+
@testset "SAGEConv" begin
141+
l = SAGEConv(in_channel => out_channel)
142+
@test l.aggr == mean
143+
for g in test_graphs
144+
test_layer(l, g, rtol=1e-5, outsize=(out_channel, g.num_nodes))
145+
end
146146

147-
# l = SAGEConv(in_channel => out_channel, tanh, bias=false, aggr=+)
148-
# for g in test_graphs
149-
# test_layer(l, g, rtol=1e-5, outsize=(out_channel, g.num_nodes))
150-
# end
151-
# end
147+
l = SAGEConv(in_channel => out_channel, tanh, bias=false, aggr=+)
148+
for g in test_graphs
149+
test_layer(l, g, rtol=1e-5, outsize=(out_channel, g.num_nodes))
150+
end
151+
end
152152

153153

154154
@testset "ResGatedGraphConv" begin

test/runtests.jl

Lines changed: 4 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -16,12 +16,12 @@ ENV["DATADEPS_ALWAYS_ACCEPT"] = true # for MLDatasets
1616
include("test_utils.jl")
1717

1818
tests = [
19-
# "gnngraph",
20-
# "msgpass",
19+
"gnngraph",
20+
"msgpass",
2121
"layers/basic",
2222
"layers/conv",
23-
# "layers/pool",
24-
# "examples/node_classification_cora",
23+
"layers/pool",
24+
"examples/node_classification_cora",
2525
]
2626

2727
!CUDA.functional() && @warn("CUDA unavailable, not testing GPU support")

0 commit comments

Comments
 (0)