Skip to content

Commit

Permalink
Merge pull request #18 from adynathos/master
Browse files Browse the repository at this point in the history
Adjustments for Python 3
  • Loading branch information
zijundeng committed Nov 16, 2017
2 parents 308aa09 + 8af5f9f commit 74af97f
Show file tree
Hide file tree
Showing 19 changed files with 81 additions and 81 deletions.
4 changes: 2 additions & 2 deletions datasets/__init__.py
Original file line number Diff line number Diff line change
@@ -1,2 +1,2 @@
import cityscapes
import voc
from . import cityscapes
from . import voc
4 changes: 2 additions & 2 deletions datasets/cityscapes.py
Original file line number Diff line number Diff line change
Expand Up @@ -53,7 +53,7 @@ class CityScapes(data.Dataset):
def __init__(self, quality, mode, joint_transform=None, transform=None, target_transform=None):
self.imgs = make_dataset(quality, mode)
if len(self.imgs) == 0:
raise (RuntimeError('Found 0 images, please check the data set'))
raise RuntimeError('Found 0 images, please check the data set')
self.quality = quality
self.mode = mode
self.joint_transform = joint_transform
Expand All @@ -72,7 +72,7 @@ def __getitem__(self, index):

mask = np.array(mask)
mask_copy = mask.copy()
for k, v in self.id_to_trainid.iteritems():
for k, v in self.id_to_trainid.items():
mask_copy[mask == k] = v
mask = Image.fromarray(mask_copy.astype(np.uint8))

Expand Down
2 changes: 1 addition & 1 deletion datasets/voc.py
Original file line number Diff line number Diff line change
Expand Up @@ -64,7 +64,7 @@ class VOC(data.Dataset):
def __init__(self, mode, joint_transform=None, transform=None, target_transform=None):
self.imgs = make_dataset(mode)
if len(self.imgs) == 0:
raise (RuntimeError('Found 0 images, please check the data set'))
raise RuntimeError('Found 0 images, please check the data set')
self.mode = mode
self.joint_transform = joint_transform
self.transform = transform
Expand Down
4 changes: 2 additions & 2 deletions eval/eval_voc.py
Original file line number Diff line number Diff line change
Expand Up @@ -21,7 +21,7 @@

def main():
net = PSPNet(num_classes=voc.num_classes).cuda()
print 'load model ' + args['snapshot']
print('load model ' + args['snapshot'])
net.load_state_dict(torch.load(os.path.join(ckpt_path, args['exp_name'], args['snapshot'])))
net.eval()

Expand All @@ -48,7 +48,7 @@ def main():
prediction = voc.colorize_mask(prediction)
prediction.save(os.path.join(ckpt_path, args['exp_name'], 'test', img_name + '.png'))

print '%d / %d' % (vi + 1, len(test_loader))
print('%d / %d' % (vi + 1, len(test_loader)))


if __name__ == '__main__':
Expand Down
2 changes: 1 addition & 1 deletion models/fcn16s.py
Original file line number Diff line number Diff line change
Expand Up @@ -2,7 +2,7 @@
from torch import nn
from torchvision import models

from utils import get_upsampling_weight
from ..utils import get_upsampling_weight
from .config import vgg16_caffe_path


Expand Down
2 changes: 1 addition & 1 deletion models/fcn32s.py
Original file line number Diff line number Diff line change
Expand Up @@ -2,7 +2,7 @@
from torch import nn
from torchvision import models

from utils import get_upsampling_weight
from ..utils import get_upsampling_weight
from .config import vgg16_caffe_path


Expand Down
2 changes: 1 addition & 1 deletion models/fcn8s.py
Original file line number Diff line number Diff line change
Expand Up @@ -2,7 +2,7 @@
from torch import nn
from torchvision import models

from utils import get_upsampling_weight
from ..utils import get_upsampling_weight
from .config import vgg16_path, vgg16_caffe_path


Expand Down
2 changes: 1 addition & 1 deletion models/gcn.py
Original file line number Diff line number Diff line change
Expand Up @@ -3,7 +3,7 @@
from torch import nn
from torchvision import models

from utils import initialize_weights
from ..utils import initialize_weights
from .config import res152_path


Expand Down
4 changes: 2 additions & 2 deletions models/psp_net.py
Original file line number Diff line number Diff line change
Expand Up @@ -3,8 +3,8 @@
from torch import nn
from torchvision import models

from utils import initialize_weights
from utils.misc import Conv2dDeformable
from ..utils import initialize_weights
from ..utils.misc import Conv2dDeformable
from .config import res101_path


Expand Down
2 changes: 1 addition & 1 deletion models/seg_net.py
Original file line number Diff line number Diff line change
Expand Up @@ -2,7 +2,7 @@
from torch import nn
from torchvision import models

from utils import initialize_weights
from ..utils import initialize_weights
from .config import vgg19_bn_path


Expand Down
2 changes: 1 addition & 1 deletion models/u_net.py
Original file line number Diff line number Diff line change
Expand Up @@ -2,7 +2,7 @@
import torch.nn.functional as F
from torch import nn

from utils import initialize_weights
from ..utils import initialize_weights


class _EncoderBlock(nn.Module):
Expand Down
18 changes: 9 additions & 9 deletions train/cityscapes-fcn (caffe vgg)/train.py
Original file line number Diff line number Diff line change
Expand Up @@ -47,7 +47,7 @@ def main():
curr_epoch = 1
args['best_record'] = {'epoch': 0, 'val_loss': 1e10, 'acc': 0, 'acc_cls': 0, 'mean_iu': 0, 'fwavacc': 0}
else:
print 'training resumes from ' + args['snapshot']
print('training resumes from ' + args['snapshot'])
net.load_state_dict(torch.load(os.path.join(ckpt_path, exp_name, args['snapshot'])))
split_snapshot = args['snapshot'].split('_')
curr_epoch = int(split_snapshot[1]) + 1
Expand Down Expand Up @@ -141,8 +141,8 @@ def train(train_loader, net, criterion, optimizer, epoch, train_args):
writer.add_scalar('train_loss', train_loss.avg, curr_iter)

if (i + 1) % train_args['print_freq'] == 0:
print '[epoch %d], [iter %d / %d], [train loss %.5f]' % (
epoch, i + 1, len(train_loader), train_loss.avg)
print('[epoch %d], [iter %d / %d], [train loss %.5f]' % (
epoch, i + 1, len(train_loader), train_loss.avg))


def validate(val_loader, net, criterion, optimizer, epoch, train_args, restore, visualize):
Expand Down Expand Up @@ -209,15 +209,15 @@ def validate(val_loader, net, criterion, optimizer, epoch, train_args, restore,
val_visual = vutils.make_grid(val_visual, nrow=3, padding=5)
writer.add_image(snapshot_name, val_visual)

print '-----------------------------------------------------------------------------------------------------------'
print '[epoch %d], [val loss %.5f], [acc %.5f], [acc_cls %.5f], [mean_iu %.5f], [fwavacc %.5f]' % (
epoch, val_loss.avg, acc, acc_cls, mean_iu, fwavacc)
print('-----------------------------------------------------------------------------------------------------------')
print('[epoch %d], [val loss %.5f], [acc %.5f], [acc_cls %.5f], [mean_iu %.5f], [fwavacc %.5f]' % (
epoch, val_loss.avg, acc, acc_cls, mean_iu, fwavacc))

print 'best record: [val loss %.5f], [acc %.5f], [acc_cls %.5f], [mean_iu %.5f], [fwavacc %.5f], [epoch %d]' % (
print('best record: [val loss %.5f], [acc %.5f], [acc_cls %.5f], [mean_iu %.5f], [fwavacc %.5f], [epoch %d]' % (
train_args['best_record']['val_loss'], train_args['best_record']['acc'], train_args['best_record']['acc_cls'],
train_args['best_record']['mean_iu'], train_args['best_record']['fwavacc'], train_args['best_record']['epoch'])
train_args['best_record']['mean_iu'], train_args['best_record']['fwavacc'], train_args['best_record']['epoch']))

print '-----------------------------------------------------------------------------------------------------------'
print('-----------------------------------------------------------------------------------------------------------')

writer.add_scalar('val_loss', val_loss.avg, epoch)
writer.add_scalar('acc', acc, epoch)
Expand Down
18 changes: 9 additions & 9 deletions train/cityscapes-fcn/train.py
Original file line number Diff line number Diff line change
Expand Up @@ -47,7 +47,7 @@ def main():
curr_epoch = 1
args['best_record'] = {'epoch': 0, 'val_loss': 1e10, 'acc': 0, 'acc_cls': 0, 'mean_iu': 0, 'fwavacc': 0}
else:
print 'training resumes from ' + args['snapshot']
print('training resumes from ' + args['snapshot'])
net.load_state_dict(torch.load(os.path.join(ckpt_path, exp_name, args['snapshot'])))
split_snapshot = args['snapshot'].split('_')
curr_epoch = int(split_snapshot[1]) + 1
Expand Down Expand Up @@ -135,8 +135,8 @@ def train(train_loader, net, criterion, optimizer, epoch, train_args):
writer.add_scalar('train_loss', train_loss.avg, curr_iter)

if (i + 1) % train_args['print_freq'] == 0:
print '[epoch %d], [iter %d / %d], [train loss %.5f]' % (
epoch, i + 1, len(train_loader), train_loss.avg)
print('[epoch %d], [iter %d / %d], [train loss %.5f]' % (
epoch, i + 1, len(train_loader), train_loss.avg))


def validate(val_loader, net, criterion, optimizer, epoch, train_args, restore, visualize):
Expand Down Expand Up @@ -203,15 +203,15 @@ def validate(val_loader, net, criterion, optimizer, epoch, train_args, restore,
val_visual = vutils.make_grid(val_visual, nrow=3, padding=5)
writer.add_image(snapshot_name, val_visual)

print '-----------------------------------------------------------------------------------------------------------'
print '[epoch %d], [val loss %.5f], [acc %.5f], [acc_cls %.5f], [mean_iu %.5f], [fwavacc %.5f]' % (
epoch, val_loss.avg, acc, acc_cls, mean_iu, fwavacc)
print('-----------------------------------------------------------------------------------------------------------')
print('[epoch %d], [val loss %.5f], [acc %.5f], [acc_cls %.5f], [mean_iu %.5f], [fwavacc %.5f]' % (
epoch, val_loss.avg, acc, acc_cls, mean_iu, fwavacc))

print 'best record: [val loss %.5f], [acc %.5f], [acc_cls %.5f], [mean_iu %.5f], [fwavacc %.5f], [epoch %d]' % (
print('best record: [val loss %.5f], [acc %.5f], [acc_cls %.5f], [mean_iu %.5f], [fwavacc %.5f], [epoch %d]' % (
train_args['best_record']['val_loss'], train_args['best_record']['acc'], train_args['best_record']['acc_cls'],
train_args['best_record']['mean_iu'], train_args['best_record']['fwavacc'], train_args['best_record']['epoch'])
train_args['best_record']['mean_iu'], train_args['best_record']['fwavacc'], train_args['best_record']['epoch']))

print '-----------------------------------------------------------------------------------------------------------'
print('-----------------------------------------------------------------------------------------------------------')

writer.add_scalar('val_loss', val_loss.avg, epoch)
writer.add_scalar('acc', acc, epoch)
Expand Down
20 changes: 10 additions & 10 deletions train/cityscapes-psp_net/train.py
Original file line number Diff line number Diff line change
Expand Up @@ -48,7 +48,7 @@ def main():
curr_epoch = 1
args['best_record'] = {'epoch': 0, 'val_loss': 1e10, 'acc': 0, 'acc_cls': 0, 'mean_iu': 0, 'fwavacc': 0}
else:
print 'training resumes from ' + args['snapshot']
print('training resumes from ' + args['snapshot'])
net.load_state_dict(torch.load(os.path.join(ckpt_path, exp_name, args['snapshot'])))
split_snapshot = args['snapshot'].split('_')
curr_epoch = int(split_snapshot[1]) + 1
Expand Down Expand Up @@ -156,9 +156,9 @@ def train(train_loader, net, criterion, optimizer, curr_epoch, train_args, val_l
writer.add_scalar('lr', optimizer.param_groups[1]['lr'], curr_iter)

if (i + 1) % train_args['print_freq'] == 0:
print '[epoch %d], [iter %d / %d], [train main loss %.5f], [train aux loss %.5f]. [lr %.10f]' % (
print('[epoch %d], [iter %d / %d], [train main loss %.5f], [train aux loss %.5f]. [lr %.10f]' % (
curr_epoch, i + 1, len(train_loader), train_main_loss.avg, train_aux_loss.avg,
optimizer.param_groups[1]['lr'])
optimizer.param_groups[1]['lr']))
if curr_iter >= train_args['max_iter']:
return
validate(val_loader, net, criterion, optimizer, curr_epoch, train_args, restore, visualize)
Expand Down Expand Up @@ -190,7 +190,7 @@ def validate(val_loader, net, criterion, optimizer, epoch, train_args, restore,
inputs_all.append(None)
gts_all[vi, :, :] = gts.data.squeeze_(0).cpu().numpy()

print 'validating: %d / %d' % (vi + 1, len(val_loader))
print('validating: %d / %d' % (vi + 1, len(val_loader)))

acc, acc_cls, mean_iu, fwavacc = evaluate(predictions_all, gts_all, cityscapes.num_classes)

Expand Down Expand Up @@ -227,15 +227,15 @@ def validate(val_loader, net, criterion, optimizer, epoch, train_args, restore,
val_visual = vutils.make_grid(val_visual, nrow=3, padding=5)
writer.add_image(snapshot_name, val_visual)

print '-----------------------------------------------------------------------------------------------------------'
print '[epoch %d], [val loss %.5f], [acc %.5f], [acc_cls %.5f], [mean_iu %.5f], [fwavacc %.5f]' % (
epoch, val_loss.avg, acc, acc_cls, mean_iu, fwavacc)
print('-----------------------------------------------------------------------------------------------------------')
print('[epoch %d], [val loss %.5f], [acc %.5f], [acc_cls %.5f], [mean_iu %.5f], [fwavacc %.5f]' % (
epoch, val_loss.avg, acc, acc_cls, mean_iu, fwavacc))

print 'best record: [val loss %.5f], [acc %.5f], [acc_cls %.5f], [mean_iu %.5f], [fwavacc %.5f], [epoch %d]' % (
print('best record: [val loss %.5f], [acc %.5f], [acc_cls %.5f], [mean_iu %.5f], [fwavacc %.5f], [epoch %d]' % (
train_args['best_record']['val_loss'], train_args['best_record']['acc'], train_args['best_record']['acc_cls'],
train_args['best_record']['mean_iu'], train_args['best_record']['fwavacc'], train_args['best_record']['epoch'])
train_args['best_record']['mean_iu'], train_args['best_record']['fwavacc'], train_args['best_record']['epoch']))

print '-----------------------------------------------------------------------------------------------------------'
print('-----------------------------------------------------------------------------------------------------------')

writer.add_scalar('val_loss', val_loss.avg, epoch)
writer.add_scalar('acc', acc, epoch)
Expand Down
18 changes: 9 additions & 9 deletions train/voc-fcn (caffe vgg)/train.py
Original file line number Diff line number Diff line change
Expand Up @@ -42,7 +42,7 @@ def main(train_args):
curr_epoch = 1
train_args['best_record'] = {'epoch': 0, 'val_loss': 1e10, 'acc': 0, 'acc_cls': 0, 'mean_iu': 0, 'fwavacc': 0}
else:
print 'training resumes from ' + train_args['snapshot']
print('training resumes from ' + train_args['snapshot'])
net.load_state_dict(torch.load(os.path.join(ckpt_path, exp_name, train_args['snapshot'])))
split_snapshot = train_args['snapshot'].split('_')
curr_epoch = int(split_snapshot[1]) + 1
Expand Down Expand Up @@ -128,9 +128,9 @@ def train(train_loader, net, criterion, optimizer, epoch, train_args):
writer.add_scalar('train_loss', train_loss.avg, curr_iter)

if (i + 1) % train_args['print_freq'] == 0:
print '[epoch %d], [iter %d / %d], [train loss %.5f]' % (
print('[epoch %d], [iter %d / %d], [train loss %.5f]' % (
epoch, i + 1, len(train_loader), train_loss.avg
)
))


def validate(val_loader, net, criterion, optimizer, epoch, train_args, restore, visualize):
Expand Down Expand Up @@ -193,15 +193,15 @@ def validate(val_loader, net, criterion, optimizer, epoch, train_args, restore,
val_visual = vutils.make_grid(val_visual, nrow=3, padding=5)
writer.add_image(snapshot_name, val_visual)

print '--------------------------------------------------------------------'
print '[epoch %d], [val loss %.5f], [acc %.5f], [acc_cls %.5f], [mean_iu %.5f], [fwavacc %.5f]' % (
epoch, val_loss.avg, acc, acc_cls, mean_iu, fwavacc)
print('--------------------------------------------------------------------')
print('[epoch %d], [val loss %.5f], [acc %.5f], [acc_cls %.5f], [mean_iu %.5f], [fwavacc %.5f]' % (
epoch, val_loss.avg, acc, acc_cls, mean_iu, fwavacc))

print 'best record: [val loss %.5f], [acc %.5f], [acc_cls %.5f], [mean_iu %.5f], [fwavacc %.5f], [epoch %d]' % (
print('best record: [val loss %.5f], [acc %.5f], [acc_cls %.5f], [mean_iu %.5f], [fwavacc %.5f], [epoch %d]' % (
train_args['best_record']['val_loss'], train_args['best_record']['acc'], train_args['best_record']['acc_cls'],
train_args['best_record']['mean_iu'], train_args['best_record']['fwavacc'], train_args['best_record']['epoch'])
train_args['best_record']['mean_iu'], train_args['best_record']['fwavacc'], train_args['best_record']['epoch']))

print '--------------------------------------------------------------------'
print('--------------------------------------------------------------------')

writer.add_scalar('val_loss', val_loss.avg, epoch)
writer.add_scalar('acc', acc, epoch)
Expand Down
18 changes: 9 additions & 9 deletions train/voc-fcn/train.py
Original file line number Diff line number Diff line change
Expand Up @@ -42,7 +42,7 @@ def main(train_args):
curr_epoch = 1
train_args['best_record'] = {'epoch': 0, 'val_loss': 1e10, 'acc': 0, 'acc_cls': 0, 'mean_iu': 0, 'fwavacc': 0}
else:
print 'training resumes from ' + train_args['snapshot']
print('training resumes from ' + train_args['snapshot'])
net.load_state_dict(torch.load(os.path.join(ckpt_path, exp_name, train_args['snapshot'])))
split_snapshot = train_args['snapshot'].split('_')
curr_epoch = int(split_snapshot[1]) + 1
Expand Down Expand Up @@ -124,9 +124,9 @@ def train(train_loader, net, criterion, optimizer, epoch, train_args):
writer.add_scalar('train_loss', train_loss.avg, curr_iter)

if (i + 1) % train_args['print_freq'] == 0:
print '[epoch %d], [iter %d / %d], [train loss %.5f]' % (
print('[epoch %d], [iter %d / %d], [train loss %.5f]' % (
epoch, i + 1, len(train_loader), train_loss.avg
)
))


def validate(val_loader, net, criterion, optimizer, epoch, train_args, restore, visualize):
Expand Down Expand Up @@ -189,15 +189,15 @@ def validate(val_loader, net, criterion, optimizer, epoch, train_args, restore,
val_visual = vutils.make_grid(val_visual, nrow=3, padding=5)
writer.add_image(snapshot_name, val_visual)

print '--------------------------------------------------------------------'
print '[epoch %d], [val loss %.5f], [acc %.5f], [acc_cls %.5f], [mean_iu %.5f], [fwavacc %.5f]' % (
epoch, val_loss.avg, acc, acc_cls, mean_iu, fwavacc)
print('--------------------------------------------------------------------')
print('[epoch %d], [val loss %.5f], [acc %.5f], [acc_cls %.5f], [mean_iu %.5f], [fwavacc %.5f]' % (
epoch, val_loss.avg, acc, acc_cls, mean_iu, fwavacc))

print 'best record: [val loss %.5f], [acc %.5f], [acc_cls %.5f], [mean_iu %.5f], [fwavacc %.5f], [epoch %d]' % (
print('best record: [val loss %.5f], [acc %.5f], [acc_cls %.5f], [mean_iu %.5f], [fwavacc %.5f], [epoch %d]' % (
train_args['best_record']['val_loss'], train_args['best_record']['acc'], train_args['best_record']['acc_cls'],
train_args['best_record']['mean_iu'], train_args['best_record']['fwavacc'], train_args['best_record']['epoch'])
train_args['best_record']['mean_iu'], train_args['best_record']['fwavacc'], train_args['best_record']['epoch']))

print '--------------------------------------------------------------------'
print('--------------------------------------------------------------------')

writer.add_scalar('val_loss', val_loss.avg, epoch)
writer.add_scalar('acc', acc, epoch)
Expand Down

0 comments on commit 74af97f

Please sign in to comment.