forked from dmlc/dgl
-
Notifications
You must be signed in to change notification settings - Fork 0
/
appnp.py
58 lines (53 loc) · 1.51 KB
/
appnp.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
"""
APPNP implementation in DGL.
References
----------
Paper: https://arxiv.org/abs/1810.05997
Author's code: https://github.com/klicperajo/ppnp
"""
import torch.nn as nn
from dgl.nn.pytorch.conv import APPNPConv
class APPNP(nn.Module):
def __init__(
self,
g,
in_feats,
hiddens,
n_classes,
activation,
feat_drop,
edge_drop,
alpha,
k,
):
super(APPNP, self).__init__()
self.g = g
self.layers = nn.ModuleList()
# input layer
self.layers.append(nn.Linear(in_feats, hiddens[0]))
# hidden layers
for i in range(1, len(hiddens)):
self.layers.append(nn.Linear(hiddens[i - 1], hiddens[i]))
# output layer
self.layers.append(nn.Linear(hiddens[-1], n_classes))
self.activation = activation
if feat_drop:
self.feat_drop = nn.Dropout(feat_drop)
else:
self.feat_drop = lambda x: x
self.propagate = APPNPConv(k, alpha, edge_drop)
self.reset_parameters()
def reset_parameters(self):
for layer in self.layers:
layer.reset_parameters()
def forward(self, features):
# prediction step
h = features
h = self.feat_drop(h)
h = self.activation(self.layers[0](h))
for layer in self.layers[1:-1]:
h = self.activation(layer(h))
h = self.layers[-1](self.feat_drop(h))
# propagation step
h = self.propagate(self.g, h)
return h