@@ -919,6 +919,74 @@ function Base.show(io::IO, l::GATConv)
919919 print (io, " )" )
920920end
921921
922+ @doc raw """
923+ GATv2Conv(in => out, σ = identity; heads = 1, concat = true, negative_slope = 0.2, init_weight = glorot_uniform, init_bias = zeros32, use_bias = true, add_self_loops = true, dropout=0.0)
924+ GATv2Conv((in, ein) => out, ...)
925+
926+
927+ GATv2 attentional layer from the paper [How Attentive are Graph Attention Networks?](https://arxiv.org/abs/2105.14491).
928+
929+ Implements the operation
930+ ```math
931+ \m athbf{x}_i' = \s um_{j \i n N(i) \c up \{ i\} } \a lpha_{ij} W_1 \m athbf{x}_j
932+ ```
933+ where the attention coefficients ``\a lpha_{ij}`` are given by
934+ ```math
935+ \a lpha_{ij} = \f rac{1}{z_i} \e xp(\m athbf{a}^T LeakyReLU(W_2 \m athbf{x}_i + W_1 \m athbf{x}_j))
936+ ```
937+ with ``z_i`` a normalization factor.
938+
939+ In case `ein > 0` is given, edge features of dimension `ein` will be expected in the forward pass
940+ and the attention coefficients will be calculated as
941+ ```math
942+ \a lpha_{ij} = \f rac{1}{z_i} \e xp(\m athbf{a}^T LeakyReLU(W_3 \m athbf{e}_{j\t o i} + W_2 \m athbf{x}_i + W_1 \m athbf{x}_j)).
943+ ```
944+
945+ # Arguments
946+
947+ - `in`: The dimension of input node features.
948+ - `ein`: The dimension of input edge features. Default 0 (i.e. no edge features passed in the forward).
949+ - `out`: The dimension of output node features.
950+ - `σ`: Activation function. Default `identity`.
951+ - `heads`: Number attention heads. Default `1`.
952+ - `concat`: Concatenate layer output or not. If not, layer output is averaged over the heads. Default `true`.
953+ - `negative_slope`: The parameter of LeakyReLU.Default `0.2`.
954+ - `add_self_loops`: Add self loops to the graph before performing the convolution. Default `true`.
955+ - `dropout`: Dropout probability on the normalized attention coefficient. Default `0.0`.
956+ - `init_weight`: Weights' initializer. Default `glorot_uniform`.
957+ - `init_bias`: Bias initializer. Default `zeros32`.
958+ - `use_bias`: Add learnable bias. Default `true`.
959+
960+
961+ # Examples
962+ ```julia
963+ using GNNLux, Lux, Random
964+
965+ # initialize random number generator
966+ rng = Random.default_rng()
967+
968+ # create data
969+ s = [1,1,2,3]
970+ t = [2,3,1,1]
971+ in_channel = 3
972+ out_channel = 5
973+ ein = 3
974+ g = GNNGraph(s, t)
975+ x = randn(rng, Float32, 3, g.num_nodes)
976+
977+ # create layer
978+ l = GATv2Conv((in_channel, ein) => out_channel, add_self_loops = false)
979+
980+ # setup layer
981+ ps, st = LuxCore.setup(rng, l)
982+
983+ # edge features
984+ e = randn(rng, Float32, ein, length(s))
985+
986+ # forward pass
987+ y, st = l(g, x, e, ps, st)
988+ ```
989+ """
922990@concrete struct GATv2Conv <: GNNLayer
923991 dense_i
924992 dense_j
0 commit comments