Skip to content
Permalink
Browse files

docs + linting, removed numpy in test dependency

  • Loading branch information
rusty1s committed Nov 25, 2019
1 parent 70e6632 commit 7011d1cc772134139354fb91d56eaa60fe3c4933
@@ -8,7 +8,8 @@
from torch_geometric.nn import GCNConv, ChebConv # noqa

parser = argparse.ArgumentParser()
parser.add_argument('--use_gdc', action='store_true', help='Use GDC preprocessing.')
parser.add_argument('--use_gdc', action='store_true',
help='Use GDC preprocessing.')
args = parser.parse_args()

dataset = 'Cora'
@@ -17,19 +18,21 @@
data = dataset[0]

if args.use_gdc:
gdc = T.GDC(self_loop_weight=1,
normalization_in='sym', normalization_out='col',
gdc = T.GDC(self_loop_weight=1, normalization_in='sym',
normalization_out='col',
diffusion_kwargs=dict(method='ppr', alpha=0.05),
sparsification_kwargs=dict(method='topk', k=128, dim=0),
exact=True)
sparsification_kwargs=dict(method='topk', k=128,
dim=0), exact=True)
data = gdc(data)


class Net(torch.nn.Module):
def __init__(self):
super(Net, self).__init__()
self.conv1 = GCNConv(dataset.num_features, 16, cached=True, normalize=not args.use_gdc)
self.conv2 = GCNConv(16, dataset.num_classes, cached=True, normalize=not args.use_gdc)
self.conv1 = GCNConv(dataset.num_features, 16, cached=True,
normalize=not args.use_gdc)
self.conv2 = GCNConv(16, dataset.num_classes, cached=True,
normalize=not args.use_gdc)
# self.conv1 = ChebConv(data.num_features, 16, K=2)
# self.conv2 = ChebConv(16, data.num_features, K=2)

@@ -46,9 +49,10 @@ def forward(self):

device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
model, data = Net().to(device), data.to(device)
optimizer = torch.optim.Adam([dict(params=model.reg_params, weight_decay=5e-4),
dict(params=model.non_reg_params, weight_decay=0)],
lr=0.01)
optimizer = torch.optim.Adam([
dict(params=model.reg_params, weight_decay=5e-4),
dict(params=model.non_reg_params, weight_decay=0)
], lr=0.01)


def train():
@@ -1,4 +1,3 @@
import numpy as np
import torch
from torch_geometric.data import Data
from torch_geometric.transforms import GDC
@@ -10,74 +9,81 @@ def test_gdc():
[1, 2, 0, 2, 0, 1, 3, 2, 4, 3]])

data = Data(edge_index=edge_index, num_nodes=5)
gdc = GDC(self_loop_weight=1,
normalization_in='sym', normalization_out='sym',
gdc = GDC(self_loop_weight=1, normalization_in='sym',
normalization_out='sym',
diffusion_kwargs=dict(method='ppr', alpha=0.15),
sparsification_kwargs=dict(method='threshold', avg_degree=2),
exact=True)
sparsification_kwargs=dict(method='threshold',
avg_degree=2), exact=True)
data = gdc(data)
mat = to_dense_adj(data.edge_index, edge_attr=data.edge_attr).squeeze().cpu().numpy()
assert np.all(mat >= -1e-8)
assert np.allclose(mat, mat.T)
mat = to_dense_adj(data.edge_index, edge_attr=data.edge_attr).squeeze()
assert torch.all(mat >= -1e-8)
assert torch.allclose(mat, mat.t())

data = Data(edge_index=edge_index, num_nodes=5)
gdc = GDC(self_loop_weight=1,
normalization_in='sym', normalization_out='sym',
gdc = GDC(self_loop_weight=1, normalization_in='sym',
normalization_out='sym',
diffusion_kwargs=dict(method='heat', t=10),
sparsification_kwargs=dict(method='threshold', avg_degree=2),
exact=True)
sparsification_kwargs=dict(method='threshold',
avg_degree=2), exact=True)
data = gdc(data)
mat = to_dense_adj(data.edge_index, edge_attr=data.edge_attr).squeeze().cpu().numpy()
assert np.all(mat >= -1e-8)
assert np.allclose(mat, mat.T)
mat = to_dense_adj(data.edge_index, edge_attr=data.edge_attr).squeeze()
assert torch.all(mat >= -1e-8)
assert torch.allclose(mat, mat.t())

data = Data(edge_index=edge_index, num_nodes=5)
gdc = GDC(self_loop_weight=1,
normalization_in='col', normalization_out='col',
gdc = GDC(self_loop_weight=1, normalization_in='col',
normalization_out='col',
diffusion_kwargs=dict(method='heat', t=10),
sparsification_kwargs=dict(method='topk', k=2, dim=0),
exact=True)
sparsification_kwargs=dict(method='topk', k=2,
dim=0), exact=True)
data = gdc(data)
mat = to_dense_adj(data.edge_index, edge_attr=data.edge_attr).squeeze().cpu().numpy()
mat = to_dense_adj(data.edge_index, edge_attr=data.edge_attr).squeeze()
col_sum = mat.sum(0)
assert np.all(mat >= -1e-8)
assert np.all(np.isclose(col_sum, 1) | np.isclose(col_sum, 0))
assert np.all((~np.isclose(mat, 0)).sum(0) == 2)
assert torch.all(mat >= -1e-8)
assert torch.all(
torch.isclose(col_sum, torch.tensor(1.0))
| torch.isclose(col_sum, torch.tensor(0.0)))
assert torch.all((~torch.isclose(mat, torch.tensor(0.0))).sum(0) == 2)

data = Data(edge_index=edge_index, num_nodes=5)
gdc = GDC(self_loop_weight=1,
normalization_in='row', normalization_out='row',
gdc = GDC(self_loop_weight=1, normalization_in='row',
normalization_out='row',
diffusion_kwargs=dict(method='heat', t=5),
sparsification_kwargs=dict(method='topk', k=2, dim=1),
exact=True)
sparsification_kwargs=dict(method='topk', k=2,
dim=1), exact=True)
data = gdc(data)
mat = to_dense_adj(data.edge_index, edge_attr=data.edge_attr).squeeze().cpu().numpy()
mat = to_dense_adj(data.edge_index, edge_attr=data.edge_attr).squeeze()
row_sum = mat.sum(1)
assert np.all(mat >= -1e-8)
assert np.all(np.isclose(row_sum, 1) | np.isclose(row_sum, 0))
assert np.all((~np.isclose(mat, 0)).sum(1) == 2)
assert torch.all(mat >= -1e-8)
assert torch.all(
torch.isclose(row_sum, torch.tensor(1.0))
| torch.isclose(row_sum, torch.tensor(0.0)))
assert torch.all((~torch.isclose(mat, torch.tensor(0.0))).sum(1) == 2)

data = Data(edge_index=edge_index, num_nodes=5)
gdc = GDC(self_loop_weight=1,
normalization_in='row', normalization_out='row',
gdc = GDC(self_loop_weight=1, normalization_in='row',
normalization_out='row',
diffusion_kwargs=dict(method='coeff', coeffs=[0.8, 0.3, 0.1]),
sparsification_kwargs=dict(method='threshold', eps=0.1),
exact=True)
sparsification_kwargs=dict(method='threshold',
eps=0.1), exact=True)
data = gdc(data)
mat = to_dense_adj(data.edge_index, edge_attr=data.edge_attr).squeeze().cpu().numpy()
mat = to_dense_adj(data.edge_index, edge_attr=data.edge_attr).squeeze()
row_sum = mat.sum(1)
assert np.all(mat >= -1e-8)
assert np.all(np.isclose(row_sum, 1) | np.isclose(row_sum, 0))
assert torch.all(mat >= -1e-8)
assert torch.all(
torch.isclose(row_sum, torch.tensor(1.0))
| torch.isclose(row_sum, torch.tensor(0.0)))

data = Data(edge_index=edge_index, num_nodes=5)
gdc = GDC(self_loop_weight=1,
normalization_in='sym', normalization_out='col',
gdc = GDC(self_loop_weight=1, normalization_in='sym',
normalization_out='col',
diffusion_kwargs=dict(method='ppr', alpha=0.15, eps=1e-4),
sparsification_kwargs=dict(method='threshold', avg_degree=2),
exact=False)
sparsification_kwargs=dict(method='threshold',
avg_degree=2), exact=False)
data = gdc(data)
mat = to_dense_adj(data.edge_index, edge_attr=data.edge_attr).squeeze().cpu().numpy()
mat = to_dense_adj(data.edge_index, edge_attr=data.edge_attr).squeeze().cpu().numpy()
mat = to_dense_adj(data.edge_index, edge_attr=data.edge_attr).squeeze()
col_sum = mat.sum(0)
assert np.all(mat >= -1e-8)
assert np.all(np.isclose(col_sum, 1) | np.isclose(col_sum, 0))
assert torch.all(mat >= -1e-8)
assert torch.all(
torch.isclose(col_sum, torch.tensor(1.0))
| torch.isclose(col_sum, torch.tensor(0.0)))
@@ -98,7 +98,8 @@ def forward(self, x, edge_index, edge_weight=None):
self.cached_num_edges = edge_index.size(1)
if self.normalize:
edge_index, norm = self.norm(edge_index, x.size(self.node_dim),
edge_weight, self.improved, x.dtype)
edge_weight, self.improved,
x.dtype)
else:
norm = edge_weight
self.cached_result = edge_index, norm
@@ -108,7 +109,7 @@ def forward(self, x, edge_index, edge_weight=None):
return self.propagate(edge_index, x=x, norm=norm)

def message(self, x_j, norm):
return norm.view(-1, 1) * x_j
return norm.view(-1, 1) * x_j if norm is not None else x_j

def update(self, aggr_out):
if self.bias is not None:

0 comments on commit 7011d1c

Please sign in to comment.
You can’t perform that action at this time.