Skip to content

Commit

Permalink
updated examples to pytorch 1.0
Browse files Browse the repository at this point in the history
  • Loading branch information
rusty1s committed Dec 14, 2018
1 parent 89cf500 commit 3f49306
Show file tree
Hide file tree
Showing 3 changed files with 259 additions and 30 deletions.
55 changes: 25 additions & 30 deletions examples/enzymes_diff_pool.py
Original file line number Diff line number Diff line change
@@ -1,4 +1,5 @@
import os.path as osp
from math import ceil

import torch
import torch.nn.functional as F
Expand All @@ -18,12 +19,7 @@ def __call__(self, data):
class MyTransform(object):
def __call__(self, data):
# Only use node attributes.
data.x = data.x[:, :-3]

# Add self loops.
arange = torch.arange(data.adj.size(-1), dtype=torch.long)
data.adj[arange, arange] = 1

data.x = data.x[:, -3:]
return data


Expand All @@ -49,19 +45,18 @@ def __init__(self,
in_channels,
hidden_channels,
out_channels,
lin=True,
norm=True,
norm_embed=True):
normalize=False,
add_loop=False,
lin=True):
super(GNN, self).__init__()

self.conv1 = DenseSAGEConv(in_channels, hidden_channels, norm,
norm_embed)
self.add_loop = add_loop

self.conv1 = DenseSAGEConv(in_channels, hidden_channels, normalize)
self.bn1 = torch.nn.BatchNorm1d(hidden_channels)
self.conv2 = DenseSAGEConv(hidden_channels, hidden_channels, norm,
norm_embed)
self.conv2 = DenseSAGEConv(hidden_channels, hidden_channels, normalize)
self.bn2 = torch.nn.BatchNorm1d(hidden_channels)
self.conv3 = DenseSAGEConv(hidden_channels, out_channels, norm,
norm_embed)
self.conv3 = DenseSAGEConv(hidden_channels, out_channels, normalize)
self.bn3 = torch.nn.BatchNorm1d(out_channels)

if lin is True:
Expand All @@ -78,13 +73,13 @@ def bn(self, i, x):
x = x.view(batch_size, num_nodes, num_channels)
return x

def forward(self, x, adj):
def forward(self, x, adj, mask=None):
batch_size, num_nodes, in_channels = x.size()

x0 = x
x1 = self.bn(1, F.relu(self.conv1(x0, adj)))
x2 = self.bn(2, F.relu(self.conv2(x1, adj)))
x3 = self.bn(3, F.relu(self.conv3(x2, adj)))
x1 = self.bn(1, F.relu(self.conv1(x0, adj, mask, self.add_loop)))
x2 = self.bn(2, F.relu(self.conv2(x1, adj, mask, self.add_loop)))
x3 = self.bn(3, F.relu(self.conv3(x2, adj, mask, self.add_loop)))

x = torch.cat([x1, x2, x3], dim=-1)

Expand All @@ -98,26 +93,26 @@ class Net(torch.nn.Module):
def __init__(self):
super(Net, self).__init__()

self.gnn1_pool = GNN(18, 64, int(0.1 * max_nodes), norm=False)
self.gnn1_embed = GNN(18, 64, 64, lin=False, norm=False)
self.gnn1_pool = GNN(3, 64, ceil(0.1 * max_nodes), add_loop=True)
self.gnn1_embed = GNN(3, 64, 64, add_loop=True, lin=False)

self.gnn2_embed = GNN(3 * 64, 64, 64, lin=False, norm=False)
self.gnn2_embed = GNN(3 * 64, 64, 64, lin=False)

self.lin1 = torch.nn.Linear(3 * 64, 64)
self.lin2 = torch.nn.Linear(64, 6)

def forward(self, data):
x, adj = data.x, data.adj
def forward(self, x, adj, mask=None):
s = self.gnn1_pool(x, adj, mask)
x = self.gnn1_embed(x, adj, mask)

x, adj, reg = dense_diff_pool(x, adj, s, mask)

s = self.gnn1_pool(x, adj)
x = self.gnn1_embed(x, adj)
x, adj, reg1 = dense_diff_pool(x, adj, s, data.mask)
x = self.gnn2_embed(x, adj)

x = x.mean(dim=1)
x = F.relu(self.lin1(x))
x = self.lin2(x)
return F.log_softmax(x, dim=-1), reg1
return F.log_softmax(x, dim=-1), reg


device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
Expand All @@ -132,7 +127,7 @@ def train(epoch):
for data in train_loader:
data = data.to(device)
optimizer.zero_grad()
output, reg = model(data)
output, reg = model(data.x, data.adj, data.mask)
loss = F.nll_loss(output, data.y.view(-1)) + reg
loss.backward()
loss_all += data.y.size(0) * loss.item()
Expand All @@ -146,7 +141,7 @@ def test(loader):

for data in loader:
data = data.to(device)
pred = model(data)[0].max(dim=1)[1]
pred = model(data.x, data.adj, data.mask)[0].max(dim=1)[1]
correct += pred.eq(data.y.view(-1)).sum().item()
return correct / len(loader.dataset)

Expand Down
93 changes: 93 additions & 0 deletions examples/mnist_nn_conv.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,93 @@
import os.path as osp

import torch
import torch.nn as nn
import torch.nn.functional as F
from torch_geometric.datasets import MNISTSuperpixels
import torch_geometric.transforms as T
from torch_geometric.data import DataLoader
from torch_geometric.utils import normalized_cut
from torch_geometric.nn import (NNConv, graclus, max_pool, max_pool_x,
global_mean_pool)

path = osp.join(osp.dirname(osp.realpath(__file__)), '..', 'data', 'MNIST')
train_dataset = MNISTSuperpixels(path, True, transform=T.Cartesian())
test_dataset = MNISTSuperpixels(path, False, transform=T.Cartesian())
train_loader = DataLoader(train_dataset, batch_size=64, shuffle=True)
test_loader = DataLoader(test_dataset, batch_size=64)
d = train_dataset.data


def normalized_cut_2d(edge_index, pos):
row, col = edge_index
edge_attr = torch.norm(pos[row] - pos[col], p=2, dim=1)
return normalized_cut(edge_index, edge_attr, num_nodes=pos.size(0))


class Net(nn.Module):
def __init__(self):
super(Net, self).__init__()
n1 = nn.Sequential(nn.Linear(2, 25), nn.ReLU(), nn.Linear(25, 32))
self.conv1 = NNConv(d.num_features, 32, n1)

n2 = nn.Sequential(nn.Linear(2, 25), nn.ReLU(), nn.Linear(25, 2048))
self.conv2 = NNConv(32, 64, n2)

self.fc1 = torch.nn.Linear(64, 128)
self.fc2 = torch.nn.Linear(128, d.num_classes)

def forward(self, data):
data.x = F.elu(self.conv1(data.x, data.edge_index, data.edge_attr))
weight = normalized_cut_2d(data.edge_index, data.pos)
cluster = graclus(data.edge_index, weight, data.x.size(0))
data = max_pool(cluster, data, transform=T.Cartesian(cat=False))

data.x = F.elu(self.conv2(data.x, data.edge_index, data.edge_attr))
weight = normalized_cut_2d(data.edge_index, data.pos)
cluster = graclus(data.edge_index, weight, data.x.size(0))
x, batch = max_pool_x(cluster, data.x, data.batch)

x = global_mean_pool(x, batch)
x = F.elu(self.fc1(x))
x = F.dropout(x, training=self.training)
return F.log_softmax(self.fc2(x), dim=1)


device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
model = Net().to(device)
optimizer = torch.optim.Adam(model.parameters(), lr=0.01)


def train(epoch):
model.train()

if epoch == 16:
for param_group in optimizer.param_groups:
param_group['lr'] = 0.001

if epoch == 26:
for param_group in optimizer.param_groups:
param_group['lr'] = 0.0001

for data in train_loader:
data = data.to(device)
optimizer.zero_grad()
F.nll_loss(model(data), data.y).backward()
optimizer.step()


def test():
model.eval()
correct = 0

for data in test_loader:
data = data.to(device)
pred = model(data).max(1)[1]
correct += pred.eq(data.y).sum().item()
return correct / len(test_dataset)


for epoch in range(1, 31):
train(epoch)
test_acc = test()
print('Epoch: {:02d}, Test: {:.4f}'.format(epoch, test_acc))
141 changes: 141 additions & 0 deletions examples/qm9_nn_conv.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,141 @@
import os.path as osp

import torch
import torch.nn.functional as F
from torch.nn import Sequential, Linear, ReLU, GRU

import torch_geometric.transforms as T
from torch_geometric.datasets import QM9
from torch_geometric.nn import NNConv, Set2Set
from torch_geometric.data import DataLoader
from torch_geometric.utils import remove_self_loops

target = 0
dim = 73


class MyTransform(object):
def __call__(self, data):
# Pad features.
x = data.x
data.x = torch.cat([x, x.new_zeros(x.size(0), dim - x.size(1))], dim=1)

# Specify target.
data.y = data.y[:, target]
return data


class Complete(object):
def __call__(self, data):
device = data.edge_index.device

row = torch.arange(data.num_nodes, dtype=torch.long, device=device)
col = torch.arange(data.num_nodes, dtype=torch.long, device=device)

row = row.view(-1, 1).repeat(1, data.num_nodes).view(-1)
col = col.repeat(data.num_nodes)
edge_index = torch.stack([row, col], dim=0)

edge_attr = None
if data.edge_attr is not None:
idx = data.edge_index[0] * data.num_nodes + data.edge_index[1]
size = list(data.edge_attr.size())
size[0] = data.num_nodes * data.num_nodes
edge_attr = data.edge_attr.new_zeros(size)
edge_attr[idx] = data.edge_attr

edge_index, edge_attr = remove_self_loops(edge_index, edge_attr)
data.edge_attr = edge_attr
data.edge_index = edge_index

return data


path = osp.join(osp.dirname(osp.realpath(__file__)), '..', 'data', 'QM9')
transform = T.Compose([MyTransform(), Complete(), T.Distance()])
dataset = QM9(path, transform=transform).shuffle()

# Normalize targets to mean = 0 and std = 1.
mean = dataset.data.y[:, target].mean().item()
std = dataset.data.y[:, target].std().item()
dataset.data.y[:, target] = (dataset.data.y[:, target] - mean) / std

# Split datasets.
test_dataset = dataset[:10000]
val_dataset = dataset[10000:20000]
train_dataset = dataset[20000:]
test_loader = DataLoader(test_dataset, batch_size=64)
val_loader = DataLoader(val_dataset, batch_size=64)
train_loader = DataLoader(train_dataset, batch_size=64, shuffle=True)


class Net(torch.nn.Module):
def __init__(self):
super(Net, self).__init__()
nn = Sequential(Linear(5, 128), ReLU(), Linear(128, dim * dim))
self.conv = NNConv(dim, dim, nn, root_weight=False)
self.gru = GRU(dim, dim, batch_first=True)
self.set2set = Set2Set(dim, dim, processing_steps=3)
self.fc1 = torch.nn.Linear(2 * dim, dim)
self.fc2 = torch.nn.Linear(dim, 1)

def forward(self, data):
out = data.x
h = data.x.unsqueeze(0)

for i in range(3):
m = F.relu(self.conv(out, data.edge_index, data.edge_attr))
out, h = self.gru(m.unsqueeze(1), h)
out = out.squeeze(1)

out = self.set2set(out, data.batch)
out = F.relu(self.fc1(out))
out = self.fc2(out)
out = out.view(-1)
return out


device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
model = Net().to(device)
optimizer = torch.optim.Adam(model.parameters(), lr=0.001)
scheduler = torch.optim.lr_scheduler.ReduceLROnPlateau(
optimizer, mode='min', factor=0.7, patience=5, min_lr=0.00001)


def train(epoch):
model.train()
loss_all = 0

for data in train_loader:
data = data.to(device)
optimizer.zero_grad()
loss = F.mse_loss(model(data), data.y)
loss.backward()
loss_all += loss.item() * data.num_graphs
optimizer.step()
return loss_all / len(train_loader.dataset)


def test(loader):
model.eval()
error = 0

for data in loader:
data = data.to(device)
error += (model(data) * std - data.y * std).abs().sum().item() # MAE
return error / len(loader.dataset)


best_val_error = None
for epoch in range(1, 301):
lr = scheduler.optimizer.param_groups[0]['lr']
loss = train(epoch)
val_error = test(val_loader)
scheduler.step(val_error)

if best_val_error is None or val_error <= best_val_error:
test_error = test(test_loader)
best_val_error = val_error

print('Epoch: {:03d}, LR: {:7f}, Loss: {:.7f}, Validation MAE: {:.7f}, '
'Test MAE: {:.7f},'.format(epoch, lr, loss, val_error, test_error))

0 comments on commit 3f49306

Please sign in to comment.