Skip to content

Commit 87f3c60

Browse files
@functor ->@layer (#484)
* `@functor` ->`@layer` * reinstate tests
1 parent 6b58b75 commit 87f3c60

16 files changed

+49
-53
lines changed

GNNlib/src/msgpass.jl

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -45,7 +45,7 @@ struct GNNConv <: GNNLayer
4545
σ
4646
end
4747
48-
Flux.@functor GNNConv
48+
Flux.@layer GNNConv
4949
5050
function GNNConv(ch::Pair{Int,Int}, σ=identity)
5151
in, out = ch

docs/src/messagepassing.md

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -109,7 +109,7 @@ struct GCN{A<:AbstractMatrix, B, F} <: GNNLayer
109109
σ::F
110110
end
111111

112-
Flux.@functor GCN # allow gpu movement, select trainable params etc...
112+
Flux.@layer GCN # allow gpu movement, select trainable params etc...
113113

114114
function GCN(ch::Pair{Int,Int}, σ=identity)
115115
in, out = ch

docs/src/models.md

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -13,7 +13,7 @@ and the *implicit modeling* style based on [`GNNChain`](@ref), more concise but
1313
In the explicit modeling style, the model is created according to the following steps:
1414

1515
1. Define a new type for your model (`GNN` in the example below). Layers and submodels are fields.
16-
2. Apply `Flux.@functor` to the new type to make it Flux's compatible (parameters' collection, gpu movement, etc...)
16+
2. Apply `Flux.@layer` to the new type to make it Flux's compatible (parameters' collection, gpu movement, etc...)
1717
3. Optionally define a convenience constructor for your model.
1818
4. Define the forward pass by implementing the call method for your type.
1919
5. Instantiate the model.
@@ -30,7 +30,7 @@ struct GNN # step 1
3030
dense
3131
end
3232

33-
Flux.@functor GNN # step 2
33+
Flux.@layer GNN # step 2
3434

3535
function GNN(din::Int, d::Int, dout::Int) # step 3
3636
GNN(GCNConv(din => d),

docs/tutorials/introductory_tutorials/gnn_intro_pluto.jl

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -182,7 +182,7 @@ begin
182182
layers::NamedTuple
183183
end
184184

185-
Flux.@functor GCN # provides parameter collection, gpu movement and more
185+
Flux.@layer GCN # provides parameter collection, gpu movement and more
186186

187187
function GCN(num_features, num_classes)
188188
layers = (conv1 = GCNConv(num_features => 4),

docs/tutorials/introductory_tutorials/node_classification_pluto.jl

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -138,7 +138,7 @@ begin
138138
layers::NamedTuple
139139
end
140140

141-
Flux.@functor MLP
141+
Flux.@layer :expand MLP
142142

143143
function MLP(num_features, num_classes, hidden_channels; drop_rate = 0.5)
144144
layers = (hidden = Dense(num_features => hidden_channels),
@@ -235,7 +235,7 @@ begin
235235
layers::NamedTuple
236236
end
237237

238-
Flux.@functor GCN # provides parameter collection, gpu movement and more
238+
Flux.@layer GCN # provides parameter collection, gpu movement and more
239239

240240
function GCN(num_features, num_classes, hidden_channels; drop_rate = 0.5)
241241
layers = (conv1 = GCNConv(num_features => hidden_channels),

docs/tutorials_broken/temporal_graph_classification_pluto.jl

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -117,7 +117,7 @@ begin
117117
dense::Dense
118118
end
119119

120-
Flux.@functor GenderPredictionModel
120+
Flux.@layer GenderPredictionModel
121121

122122
function GenderPredictionModel(; nfeatures = 103, nhidden = 128, activation = relu)
123123
mlp = Chain(Dense(nfeatures, nhidden, activation), Dense(nhidden, nhidden, activation))

examples/graph_classification_temporalbrains.jl

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -62,7 +62,7 @@ struct GenderPredictionModel
6262
dense::Dense
6363
end
6464

65-
Flux.@functor GenderPredictionModel
65+
Flux.@layer GenderPredictionModel
6666

6767
function GenderPredictionModel(; nfeatures = 103, nhidden = 128, activation = relu)
6868
mlp = Chain(Dense(nfeatures, nhidden, activation), Dense(nhidden, nhidden, activation))

notebooks/gnn_intro.ipynb

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -354,7 +354,7 @@
354354
" layers::NamedTuple\n",
355355
"end\n",
356356
"\n",
357-
"Flux.@functor GCN # provides parameter collection, gpu movement and more\n",
357+
"Flux.@layer :expand GCN # provides parameter collection, gpu movement and more\n",
358358
"\n",
359359
"function GCN(num_features, num_classes)\n",
360360
" layers = (conv1 = GCNConv(num_features => 4),\n",

notebooks/graph_classification_solved.ipynb

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -857,7 +857,7 @@
857857
"\tact::F\n",
858858
"end\n",
859859
"\n",
860-
"Flux.@functor MyConv\n",
860+
"Flux.@layer MyConv\n",
861861
"\n",
862862
"function MyConv((nin, nout)::Pair, act=identity)\n",
863863
"\tW1 = Flux.glorot_uniform(nout, nin)\n",

src/GraphNeuralNetworks.jl

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -3,7 +3,7 @@ module GraphNeuralNetworks
33
using Statistics: mean
44
using LinearAlgebra, Random
55
using Flux
6-
using Flux: glorot_uniform, leakyrelu, GRUCell, @functor, batch
6+
using Flux: glorot_uniform, leakyrelu, GRUCell, batch
77
using MacroTools: @forward
88
using NNlib
99
using NNlib: scatter, gather

0 commit comments

Comments
 (0)