/
model.py
76 lines (57 loc) · 2.19 KB
/
model.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
import torch.nn as nn
import torch
import math
import torch.nn.functional as F
class Dense(nn.Module):
def __init__(self, in_features, out_features, bias='none'):
super(Dense, self).__init__()
self.in_features = in_features
self.out_features = out_features
self.weight = nn.Parameter(torch.FloatTensor(in_features, out_features))
if bias == 'bn':
self.bias = nn.BatchNorm1d(out_features)
else:
self.bias = lambda x: x
self.reset_parameters()
def reset_parameters(self):
stdv = 1. / math.sqrt(self.weight.size(1))
self.weight.data.uniform_(-stdv, stdv)
def forward(self, input):
output = torch.mm(input, self.weight)
output = self.bias(output)
if self.in_features == self.out_features:
output = output + input
return output
class GnnBP(nn.Module):
def __init__(self, nfeat, nlayers,nhidden, nclass, dropout, bias):
super(GnnBP, self).__init__()
self.fcs = nn.ModuleList()
self.fcs.append(Dense(nfeat, nhidden, bias))
for _ in range(nlayers-2):
self.fcs.append(Dense(nhidden, nhidden, bias))
self.fcs.append(Dense(nhidden, nclass))
self.act_fn = nn.ReLU()
self.dropout = dropout
def forward(self, x):
x = F.dropout(x, self.dropout, training=self.training)
x = self.act_fn(self.fcs[0](x))
for fc in self.fcs[1:-1]:
x = F.dropout(x, self.dropout, training=self.training)
x = self.act_fn(fc(x))
x = F.dropout(x, self.dropout, training=self.training)
x = self.fcs[-1](x)
return x
class Gnn(nn.Module):
def __init__(self, nfeat, nlayers, nhidden, nclass, dropout, bias):
super(Gnn, self).__init__()
self.feature_layers = nn.Sequential(
nn.Linear(nfeat, 128),
nn.ReLU(),
# nn.Linear(256, 128),
# nn.ReLU(),
nn.Linear(128, nclass))
def forward(self, x, is_dec = False):
enc = self.feature_layers(x)
return enc
if __name__ == '__main__':
pass