Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Add GBP(Graph Neural Networks via Bidirectional Propagation) #124

Open
wants to merge 2 commits into
base: master
Choose a base branch
from
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Jump to
Jump to file
Failed to load files.
Diff view
Diff view
109 changes: 109 additions & 0 deletions cogdl/models/nn/gbp.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,109 @@
import torch.nn as nn
import torch
import math
import torch.nn.functional as F

from .. import BaseModel, register_model


class Dense(nn.Module):
r"""
GBP layer: https://arxiv.org/pdf/2010.15421.pdf
"""

def __init__(self, in_features, out_features, bias="none"):
super(Dense, self).__init__()
self.in_features = in_features
self.out_features = out_features
self.weight = nn.Parameter(torch.FloatTensor(in_features, out_features))

if bias == "bn":
self.bias = nn.BatchNorm1d(out_features)
else:
self.bias = lambda x: x

self.reset_parameters()

def reset_parameters(self):
stdv = 1.0 / math.sqrt(self.weight.size(1))
self.weight.data.uniform_(-stdv, stdv)

def forward(self, input):
output = torch.mm(input, self.weight)
output = self.bias(output)

if self.in_features == self.out_features:
output = output + input

return output

def __repr__(self):
return self.__class__.__name__ + " (" + str(self.in_features) + " -> " + str(self.out_features) + ")"


@register_model("gbp")
class GnnBP(BaseModel):
r"""
The GBP model from the `"Scalable Graph Neural Networks via Bidirectional
Propagation"
<https://arxiv.org/pdf/2010.15421.pdf>`_ paper

Args:
num_features (int) : Number of input features.
num_layers (int) : the number of hidden layers
hidden_size (int) : The dimension of node representation.
num_classes (int) : Number of classes.
dropout (float) : Dropout rate for model training.
bias (str) : bias
"""

@staticmethod
def add_args(parser):
"""Add model-specific arguments to the parser."""
# fmt: off
parser.add_argument("--num-features", type=int)
parser.add_argument("--num-layers", type=int, default=2)
parser.add_argument("--hidden-size", type=int, default=64)
parser.add_argument("--num-classes", type=int)
parser.add_argument("--dropout", type=float, default=0.5)
parser.add_argument('--alpha', type=float, default=0.1, help='decay factor')
parser.add_argument('--rmax', type=float, default=1e-5, help='threshold.')
parser.add_argument('--rrz', type=float, default=0.0, help='r.')
parser.add_argument("--bias", default='none')
# fmt: on

@classmethod
def build_model_from_args(cls, args):
return cls(args.num_features, args.num_layers, args.hidden_size, args.num_classes, args.dropout, args.bias)

def __init__(self, num_features, num_layers, hidden_size, num_classes, dropout, bias):
super(GnnBP, self).__init__()

self.fcs = nn.ModuleList()
self.fcs.append(Dense(num_features, hidden_size, bias))
for _ in range(num_layers - 2):
self.fcs.append(Dense(hidden_size, hidden_size, bias))
self.fcs.append(Dense(hidden_size, num_classes))
self.act_fn = nn.ReLU()
self.dropout = dropout

def forward(self, x):
x = F.dropout(x, self.dropout, training=self.training)
x = self.act_fn(self.fcs[0](x))
for fc in self.fcs[1:-1]:
x = F.dropout(x, self.dropout, training=self.training)
x = self.act_fn(fc(x))
x = F.dropout(x, self.dropout, training=self.training)
x = self.fcs[-1](x)
return x

def node_classification_loss(self, data):
pred = self.forward(data.x)
pred = F.log_softmax(pred, dim=-1)
return F.nll_loss(
pred[data.train_mask],
data.y[data.train_mask],
)

def predict(self, data):
return self.forward(data.x)
1 change: 1 addition & 0 deletions examples/gcn.py
Original file line number Diff line number Diff line change
Expand Up @@ -32,3 +32,4 @@ def get_default_args():
model = build_model(args)
task = build_task(args, dataset=dataset, model=model)
ret = task.train()
print(ret)
1 change: 1 addition & 0 deletions match.yml
Original file line number Diff line number Diff line change
@@ -1,6 +1,7 @@
node_classification:
- model:
- gdc_gcn
- gbp
- gcn
- gat
- drgat
Expand Down
92 changes: 91 additions & 1 deletion tests/tasks/test_node_classification.py
Original file line number Diff line number Diff line change
Expand Up @@ -21,11 +21,98 @@ def get_default_args():
"weight_decay": 5e-4,
"missing_rate": -1,
"task": "node_classification",
"dataset": "cora"
"dataset": "cora",
}
return build_args_from_dict(default_dict)


def test_gbp_citeseer():
args = get_default_args()
args.task = "node_classification"
args.dataset = "citeseer"
args.model = "gbp"

dataset = build_dataset(args)

args.cpu = True
args.num_features = dataset.num_features
args.num_classes = dataset.num_classes
args.num_layers = 2
args.hidden_size = 64
args.dropout = 0.5
args.alpha = 0.1
args.rmax = 1e-5
args.rrz = 0.0
args.weight_decay = 0.0005
args.patience = 100
args.max_epoch = 500
args.missing_rate = -1
args.bias = "none"

model = build_model(args)
task = build_task(args, dataset=dataset, model=model)
ret = task.train()
assert 0 <= ret["Acc"] <= 1


def test_gbp_cora():
args = get_default_args()
args.task = "node_classification"
args.dataset = "cora"
args.model = "gbp"

dataset = build_dataset(args)

args.cpu = True
args.num_features = dataset.num_features
args.num_classes = dataset.num_classes
args.num_layers = 2
args.hidden_size = 64
args.dropout = 0.5
args.alpha = 0.1
args.rmax = 1e-5
args.rrz = 0.0
args.weight_decay = 0.0005
args.patience = 100
args.max_epoch = 500
args.missing_rate = -1
args.bias = "none"

model = build_model(args)
task = build_task(args, dataset=dataset, model=model)
ret = task.train()
assert 0 <= ret["Acc"] <= 1


def test_gbp_pubmed():
args = get_default_args()
args.task = "node_classification"
args.dataset = "pubmed"
args.model = "gbp"

dataset = build_dataset(args)

args.cpu = True
args.num_features = dataset.num_features
args.num_classes = dataset.num_classes
args.num_layers = 2
args.hidden_size = 64
args.dropout = 0.5
args.alpha = 0.1
args.rmax = 1e-5
args.rrz = 0.0
args.weight_decay = 0.0005
args.patience = 100
args.max_epoch = 500
args.missing_rate = -1
args.bias = "none"

model = build_model(args)
task = build_task(args, dataset=dataset, model=model)
ret = task.train()
assert 0 <= ret["Acc"] <= 1


def test_gdc_gcn_cora():
args = get_default_args()
args.task = "node_classification"
Expand Down Expand Up @@ -648,6 +735,9 @@ def test_dropedge_inceptiongcn_cora():


if __name__ == "__main__":
test_gbp_citeseer()
test_gbp_cora()
test_gbp_pubmed()
test_gdc_gcn_cora()
test_gcn_cora()
test_gat_cora()
Expand Down