Skip to content
Permalink
Browse files

small changes to the tensorboard example

  • Loading branch information
rusty1s committed Feb 25, 2020
1 parent a72fd98 commit fdcafaff1bcbaba3665a27c9c630017f38e3ff15
Showing with 14 additions and 28 deletions.
  1. +1 −0 .gitignore
  2. +13 −28 examples/{tensorboard.py → logging.py}
@@ -3,6 +3,7 @@ data/
build/
dist/
alpha/
runs/
.cache/
.eggs/
*.egg-info/
@@ -1,17 +1,15 @@
import os.path as osp
import argparse

import torch
import torch.nn.functional as F
from torch_geometric.datasets import Planetoid
import torch_geometric.transforms as T
from torch_geometric.nn import GCNConv, ChebConv # noqa
from torch_geometric.nn import GCNConv

from torch.utils.tensorboard import SummaryWriter


dataset = 'Cora'
path = osp.join('.', '..', 'data', dataset)
path = osp.join(osp.dirname(osp.realpath(__file__)), '..', 'data', dataset)
dataset = Planetoid(path, dataset, T.NormalizeFeatures())
data = dataset[0]

@@ -21,14 +19,8 @@ def __init__(self):
super(Net, self).__init__()
self.conv1 = GCNConv(dataset.num_features, 16, cached=True)
self.conv2 = GCNConv(16, dataset.num_classes, cached=True)
# self.conv1 = ChebConv(data.num_features, 16, K=2)
# self.conv2 = ChebConv(16, data.num_features, K=2)

self.reg_params = self.conv1.parameters()
self.non_reg_params = self.conv2.parameters()

def forward(self, x, edge_index):
# x, edge_index, edge_weight = data.x, data.edge_index, data.edge_attr
x = F.relu(self.conv1(x, edge_index, None))
x = F.dropout(x, training=self.training)
x = self.conv2(x, edge_index, None)
@@ -37,20 +29,20 @@ def forward(self, x, edge_index):

device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
model, data = Net().to(device), data.to(device)
optimizer = torch.optim.Adam([
dict(params=model.reg_params, weight_decay=5e-4),
dict(params=model.non_reg_params, weight_decay=0)
], lr=0.01)
optimizer = torch.optim.Adam(model.parameters(), lr=0.01, weight_decay=5e-4)


def train():
model.train()
optimizer.zero_grad()
loss = F.nll_loss(model(data.x, data.edge_index)[data.train_mask], data.y[data.train_mask])
logits = model(data.x, data.edge_index)
loss = F.nll_loss(logits[data.train_mask], data.y[data.train_mask])
loss.backward()
optimizer.step()
return loss.item()


@torch.no_grad()
def test():
model.eval()
logits, accs = model(data.x, data.edge_index), []
@@ -60,16 +52,11 @@ def test():
accs.append(acc)
return accs

model(data.x, data.edge_index)

tb_path = 'runs/test'
writer = SummaryWriter(tb_path)
model(data.x, data.edge_index)
writer = SummaryWriter()
writer.add_graph(model, [data.x, data.edge_index])

writers_acc = []
for name in ['train_acc', 'best_val_acc', 'test_acc']:
writers_acc.append(SummaryWriter(osp.join(tb_path,name)))

best_val_acc = test_acc = 0
for epoch in range(1, 201):
train_loss = train()
@@ -80,9 +67,7 @@ def test():
log = 'Epoch: {:03d}, Train: {:.4f}, Val: {:.4f}, Test: {:.4f}'
print(log.format(epoch, train_acc, best_val_acc, test_acc))

writer.add_scalar('Train Loss', train_loss, epoch)
for i, value in zip(range(3),[train_acc, val_acc, test_acc]):
writers_acc[i].add_scalar('Accuracy', value, epoch)


# tensorboard --logdir=runs
writer.add_scalar('Loss/train', train_loss, epoch)
writer.add_scalar('Accuracy/train', train_acc, epoch)
writer.add_scalar('Accuracy/val', val_acc, epoch)
writer.add_scalar('Accuracy/test', test_acc, epoch)

0 comments on commit fdcafaf

Please sign in to comment.
You can’t perform that action at this time.