|
2 | 2 | from layers import GraphConvLayer |
3 | 3 |
|
4 | 4 | class GCN(tf.keras.Model): |
5 | | - |
6 | | - def __init__(self, features_dim, num_layers, hidden_dim, num_classes, dropout_rate, bias=True): |
| 5 | + """Graph convolution network for semi-supevised node classification. |
| 6 | +
|
| 7 | + Args: |
| 8 | + features_dim (int): Dimension of input features |
| 9 | + num_layers (int): Number of gnn layers |
| 10 | + hidden_dim (list): List of hidden layers dimension |
| 11 | + num_classes (int): Total number of classes |
| 12 | + dropout_prob (float): Dropout probability |
| 13 | + bias (bool): Whether bias needs to be added to gcn layers |
| 14 | + """ |
| 15 | + |
| 16 | + def __init__(self, **kwargs): |
7 | 17 | super(GCN, self).__init__() |
8 | | - |
9 | | - self.num_layers = num_layers |
10 | | - self.bias = bias |
11 | | - |
| 18 | + |
| 19 | + for key, item in kwargs.items(): |
| 20 | + setattr(self, key, item) |
| 21 | + |
12 | 22 | self.gc = [] |
13 | 23 | # input layer |
14 | 24 | single_gc = tf.keras.Sequential() |
15 | | - single_gc.add(GraphConvLayer({"input_dim": features_dim, |
16 | | - "output_dim": hidden_dim[0], |
17 | | - "bias": bias})) |
| 25 | + single_gc.add(GraphConvLayer(input_dim=self.features_dim, |
| 26 | + output_dim=self.hidden_dim[0], |
| 27 | + bias=self.bias)) |
18 | 28 | single_gc.add(tf.keras.layers.ReLU()) |
19 | | - single_gc.add(tf.keras.layers.Dropout(dropout_rate)) |
| 29 | + single_gc.add(tf.keras.layers.Dropout(self.dropout_prob)) |
20 | 30 | self.gc.append(single_gc) |
21 | 31 |
|
22 | 32 | # hidden layers |
23 | | - for i in range(0, num_layers-2): |
| 33 | + for i in range(0, self.num_layers-2): |
24 | 34 | single_gc = tf.keras.Sequential() |
25 | | - single_gc.add(GraphConvLayer({"input_dim": hidden_dim[i], |
26 | | - "output_dim": hidden_dim[i+1], |
27 | | - "bias": bias})) |
| 35 | + single_gc.add(GraphConvLayer(input_dim=self.hidden_dim[i], |
| 36 | + output_dim=self.hidden_dim[i+1], |
| 37 | + bias=self.bias)) |
28 | 38 | single_gc.add(tf.keras.layers.ReLU()) |
29 | | - single_gc.add(tf.keras.layers.Dropout(dropout_rate)) |
| 39 | + single_gc.add(tf.keras.layers.Dropout(self.dropout_prob)) |
30 | 40 | self.gc.append(single_gc) |
31 | 41 |
|
32 | 42 | # output layer |
33 | | - self.classifier = GraphConvLayer({"input_dim": hidden_dim[-1], |
34 | | - "output_dim": num_classes, |
35 | | - "bias": bias}) |
| 43 | + self.classifier = GraphConvLayer(input_dim=self.hidden_dim[-1], |
| 44 | + output_dim=self.num_classes, |
| 45 | + bias=self.bias) |
36 | 46 |
|
37 | 47 | def call(self, inputs): |
38 | 48 | features, adj = inputs[0], inputs[1] |
|
0 commit comments