Skip to content

Commit

Permalink
linting
Browse files Browse the repository at this point in the history
  • Loading branch information
rusty1s committed Jul 6, 2019
1 parent 55c2e7d commit 7198db0
Show file tree
Hide file tree
Showing 2 changed files with 53 additions and 63 deletions.
57 changes: 26 additions & 31 deletions examples/colors_topk_pool.py
Original file line number Diff line number Diff line change
Expand Up @@ -7,13 +7,12 @@
from torch_geometric.datasets import SyntheticDataset
from torch_geometric.transforms import HandleNodeAttention
from torch_geometric.data import DataLoader
from torch_geometric.nn import GraphConv, GINConv, TopKPooling
from torch_geometric.nn import GINConv, TopKPooling
from torch_geometric.nn import global_add_pool as gsum
from torch_scatter import scatter_mean


train_path = osp.join(osp.dirname(osp.realpath(__file__)), '..',
'data', 'COLORS-3')
train_path = osp.join(
osp.dirname(osp.realpath(__file__)), '..', 'data', 'COLORS-3')
dataset = SyntheticDataset(train_path, name='COLORS-3', use_node_attr=True,
transform=HandleNodeAttention())

Expand All @@ -29,14 +28,13 @@ class Net(torch.nn.Module):
def __init__(self):
super(Net, self).__init__()

self.conv1 = GINConv(nn.Sequential(nn.Linear(train_dataset.
num_features, 256),
nn.ReLU(),
nn.Linear(256, 64)))
self.conv1 = GINConv(
nn.Sequential(
nn.Linear(train_dataset.num_features, 256), nn.ReLU(),
nn.Linear(256, 64)))
self.pool1 = TopKPooling(train_dataset.num_features, min_score=0.05)
self.conv2 = GINConv(nn.Sequential(nn.Linear(64, 256),
nn.ReLU(),
nn.Linear(256, 64)))
self.conv2 = GINConv(
nn.Sequential(nn.Linear(64, 256), nn.ReLU(), nn.Linear(256, 64)))

self.lin = torch.nn.Linear(64, 1) # regression

Expand All @@ -46,19 +44,19 @@ def forward(self, data):
x_input = x
x = F.relu(self.conv1(x_input, edge_index))

x, edge_index, _, batch, perm, score = self.pool1(x, edge_index,
None, batch,
attn_input=x_input)
x, edge_index, _, batch, perm, score = self.pool1(
x, edge_index, None, batch, attn_input=x_input)
ratio = x.shape[0] / float(x_input.shape[0])

x = F.relu(self.conv2(x, edge_index))
x = gsum(x, batch)
x = self.lin(x)

# supervised node attention
attn_loss_batch = scatter_mean(F.kl_div(torch.log(score + 1e-14),
data.node_attention[perm],
reduction='none'), batch)
attn_loss_batch = scatter_mean(
F.kl_div(
torch.log(score + 1e-14), data.node_attention[perm],
reduction='none'), batch)

return x, attn_loss_batch, ratio

Expand All @@ -69,10 +67,8 @@ def forward(self, data):
# model.pool1.weight.data = torch.tensor([0., 1., 0., 0.]).view(1,4).to(device)

print(model)
print('model size: %d trainable parameters' %
np.sum([np.prod(p.size()) if p.requires_grad else 0
for p in model.parameters()]))

print('model size: %d trainable parameters' % np.sum(
[np.prod(p.size()) if p.requires_grad else 0 for p in model.parameters()]))

optimizer = torch.optim.Adam(model.parameters(), lr=0.001)

Expand All @@ -85,7 +81,7 @@ def train(epoch):
data = data.to(device)
optimizer.zero_grad()
output, attn_loss, _ = model(data)
loss = ((data.y - output.view_as(data.y)) ** 2 + 100*attn_loss).mean()
loss = ((data.y - output.view_as(data.y))**2 + 100 * attn_loss).mean()

loss.backward()
loss_all += data.num_graphs * loss.item()
Expand Down Expand Up @@ -118,17 +114,16 @@ def test(loader):

# Test on three different subsets
test_correct1 = test_correct[:n_test_each].sum()
test_correct2 = test_correct[n_test_each: 2*n_test_each].sum()
test_correct3 = test_correct[n_test_each*2:].sum()
assert len(test_correct) == n_test_each*3, len(test_correct)
test_correct2 = test_correct[n_test_each:2 * n_test_each].sum()
test_correct3 = test_correct[n_test_each * 2:].sum()
assert len(test_correct) == n_test_each * 3, len(test_correct)

print('Epoch: {:03d}, Loss: {:.5f}, Train Acc: {:.3f}, Val Acc: {:.3f}, '
'Test Acc Orig: {:.3f} ({}/{}), '
'Test Acc Large: {:.3f} ({}/{}), '
'Test Acc LargeC: {:.3f} ({}/{}), '
'Train/Val/Test Pool Ratio={:.3f}/{:.3f}/{:.3f}'.
format(epoch, loss, train_acc, val_acc,
test_correct1 / n_test_each, test_correct1, n_test_each,
test_correct2 / n_test_each, test_correct2, n_test_each,
test_correct3 / n_test_each, test_correct3, n_test_each,
train_ratio, val_ratio, test_ratio))
'Train/Val/Test Pool Ratio={:.3f}/{:.3f}/{:.3f}'.format(
epoch, loss, train_acc, val_acc, test_correct1 / n_test_each,
test_correct1, n_test_each, test_correct2 / n_test_each,
test_correct2, n_test_each, test_correct3 / n_test_each,
test_correct3, n_test_each, train_ratio, val_ratio, test_ratio))
59 changes: 27 additions & 32 deletions examples/triangles_sag_pool.py
Original file line number Diff line number Diff line change
Expand Up @@ -8,15 +8,14 @@
from torch_geometric.transforms import OneHotDegree, HandleNodeAttention
from torch_geometric.transforms import Compose
from torch_geometric.data import DataLoader
from torch_geometric.nn import GraphConv, GINConv, SAGPooling
from torch_geometric.nn import GINConv, SAGPooling
from torch_geometric.nn import global_max_pool as gmp
from torch_scatter import scatter_mean


transform = Compose([HandleNodeAttention(), OneHotDegree(max_degree=14)])

train_path = osp.join(osp.dirname(osp.realpath(__file__)),
'..', 'data', 'TRIANGLES')
train_path = osp.join(
osp.dirname(osp.realpath(__file__)), '..', 'data', 'TRIANGLES')
dataset = SyntheticDataset(train_path, name='TRIANGLES', use_node_attr=True,
transform=transform)

Expand All @@ -32,32 +31,30 @@ class Net(torch.nn.Module):
def __init__(self):
super(Net, self).__init__()

self.conv1 = GINConv(nn.Sequential(nn.Linear(train_dataset.
num_features, 64),
nn.ReLU(),
nn.Linear(64, 64)))
self.conv1 = GINConv(
nn.Sequential(
nn.Linear(train_dataset.num_features, 64), nn.ReLU(),
nn.Linear(64, 64)))
self.pool1 = SAGPooling(64, min_score=0.001, gnn='GCN')
self.conv2 = GINConv(nn.Sequential(nn.Linear(64, 64),
nn.ReLU(),
nn.Linear(64, 64)))
self.conv2 = GINConv(
nn.Sequential(nn.Linear(64, 64), nn.ReLU(), nn.Linear(64, 64)))

self.pool2 = SAGPooling(64, min_score=0.001, gnn='GCN')

self.conv3 = GINConv(nn.Sequential(nn.Linear(64, 64),
nn.ReLU(),
nn.Linear(64, 64)))
self.conv3 = GINConv(
nn.Sequential(nn.Linear(64, 64), nn.ReLU(), nn.Linear(64, 64)))

self.lin = torch.nn.Linear(64, 1) # regression

def forward(self, data):
x, edge_index, batch = data.x, data.edge_index, data.batch

x = F.relu(self.conv1(x, edge_index))
x, edge_index, _, batch, perm, score = self.pool1(x, edge_index,
None, batch)
x, edge_index, _, batch, perm, score = self.pool1(
x, edge_index, None, batch)
x = F.relu(self.conv2(x, edge_index))
x, edge_index, _, batch, perm, score = self.pool2(x, edge_index,
None, batch)
x, edge_index, _, batch, perm, score = self.pool2(
x, edge_index, None, batch)
ratio = x.shape[0] / float(data.x.shape[0])

x = F.relu(self.conv3(x, edge_index))
Expand All @@ -66,20 +63,19 @@ def forward(self, data):
x = self.lin(x)

# supervised node attention
attn_loss_batch = scatter_mean(F.kl_div(torch.log(score + 1e-14),
data.node_attention[perm],
reduction='none'), batch)
attn_loss_batch = scatter_mean(
F.kl_div(
torch.log(score + 1e-14), data.node_attention[perm],
reduction='none'), batch)

return x, attn_loss_batch, ratio


device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
model = Net().to(device)
print(model)
print('model size: %d trainable parameters' %
np.sum([np.prod(p.size()) if p.requires_grad else 0
for p in model.parameters()]))

print('model size: %d trainable parameters' % np.sum(
[np.prod(p.size()) if p.requires_grad else 0 for p in model.parameters()]))

optimizer = torch.optim.Adam(model.parameters(), lr=0.001)

Expand All @@ -93,7 +89,7 @@ def train(epoch):
optimizer.zero_grad()
output, attn_loss, _ = model(data)

loss = ((data.y - output.view_as(data.y)) ** 2 + 100*attn_loss).mean()
loss = ((data.y - output.view_as(data.y))**2 + 100 * attn_loss).mean()

loss.backward()
loss_all += data.num_graphs * loss.item()
Expand Down Expand Up @@ -127,13 +123,12 @@ def test(loader):
# Test on two different subsets
test_correct1 = test_correct[:n_test_each].sum()
test_correct2 = test_correct[n_test_each:].sum()
assert len(test_correct) == n_test_each*2, len(test_correct)
assert len(test_correct) == n_test_each * 2, len(test_correct)

print('Epoch: {:03d}, Loss: {:.5f}, Train Acc: {:.3f}, Val Acc: {:.3f}, '
'Test Acc Orig: {:.3f} ({}/{}), '
'Test Acc Large: {:.3f} ({}/{}), '
'Train/Val/Test Pool Ratio={:.3f}/{:.3f}/{:.3f}'.
format(epoch, loss, train_acc, val_acc,
test_correct1 / n_test_each, test_correct1, n_test_each,
test_correct2 / n_test_each, test_correct2, n_test_each,
train_ratio, val_ratio, test_ratio))
'Train/Val/Test Pool Ratio={:.3f}/{:.3f}/{:.3f}'.format(
epoch, loss, train_acc, val_acc, test_correct1 / n_test_each,
test_correct1, n_test_each, test_correct2 / n_test_each,
test_correct2, n_test_each, train_ratio, val_ratio, test_ratio))

0 comments on commit 7198db0

Please sign in to comment.