Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

fix #60

Merged
merged 11 commits into from
Oct 10, 2019
Merged

fix #60

Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Jump to
Jump to file
Failed to load files.
Diff view
Diff view
113 changes: 39 additions & 74 deletions config_utils/re_train_autodeeplab.py
Original file line number Diff line number Diff line change
@@ -1,79 +1,44 @@
import argparse
import argparse

def obtain_retrain_autodeeplab_args():
parser = argparse.ArgumentParser(description="PyTorch DeeplabV3Plus Training")
parser.add_argument('--backbone', type=str, default='resnet',
choices=['resnet', 'xception', 'drn', 'mobilenet'],
help='backbone name (default: resnet)')
parser.add_argument('--out-stride', type=int, default=16,
help='network output stride (default: 8)')
parser.add_argument('--dataset', type=str, default='pascal',
choices=['pascal', 'coco', 'cityscapes'],
help='dataset name (default: pascal)')
parser.add_argument('--use-sbd', action='store_true', default=True,
help='whether to use SBD dataset (default: True)')
parser.add_argument('--workers', type=int, default=4,
metavar='N', help='dataloader threads')
parser.add_argument('--base-size', type=int, default=513,
help='base image size')
parser.add_argument('--crop-size', type=int, default=513,
help='crop image size')
parser.add_argument('--sync-bn', type=bool, default=None,
help='whether to use sync bn (default: auto)')
parser.add_argument('--freeze-bn', type=bool, default=False,
help='whether to freeze bn parameters (default: False)')
parser.add_argument('--loss-type', type=str, default='ce',
choices=['ce', 'focal'],
help='loss func type (default: ce)')
# training hyper params
parser.add_argument('--epochs', type=int, default=None, metavar='N',
help='number of epochs to train (default: auto)')
parser.add_argument('--start_epoch', type=int, default=0,
metavar='N', help='start epochs (default:0)')
parser.add_argument('--batch-size', type=int, default=None,
metavar='N', help='input batch size for \
training (default: auto)')
parser.add_argument('--test-batch-size', type=int, default=None,
metavar='N', help='input batch size for \
testing (default: auto)')
parser.add_argument('--use-balanced-weights', action='store_true', default=False,
help='whether to use balanced weights (default: False)')
# optimizer params
parser.add_argument('--lr', type=float, default=None, metavar='LR',
help='learning rate (default: auto)')
parser.add_argument('--lr-scheduler', type=str, default='poly',
choices=['poly', 'step', 'cos'],
help='lr scheduler mode: (default: poly)')
parser.add_argument('--momentum', type=float, default=0.9,
metavar='M', help='momentum (default: 0.9)')
parser.add_argument('--weight-decay', type=float, default=5e-4,
metavar='M', help='w-decay (default: 5e-4)')
parser.add_argument('--nesterov', action='store_true', default=False,
help='whether use nesterov (default: False)')
# cuda, seed and logging
parser.add_argument('--no-cuda', action='store_true', default=
False, help='disables CUDA training')
parser.add_argument('--gpu-ids', type=str, default='0',
help='use which gpu to train, must be a \
comma-separated list of integers only (default=0)')
parser.add_argument('--seed', type=int, default=1, metavar='S',
help='random seed (default: 1)')
# checking point
parser.add_argument('--resume', type=str, default=None,
help='put the path to resuming file if needed')
parser.add_argument('--checkname', type=str, default=None,
help='set the checkpoint name')
# finetuning pre-trained models
parser.add_argument('--ft', action='store_true', default=False,
help='finetuning on a different dataset')
# evaluation option
parser.add_argument('--eval-interval', type=int, default=1,
help='evaluuation interval (default: 1)')
parser.add_argument('--no-val', action='store_true', default=False,
help='skip validation during training')

def obtain_retrain_autodeeplab_args():
parser = argparse.ArgumentParser(description="PyTorch Autodeeplabv3+ Training")
parser.add_argument('--train', action='store_true', default=True, help='training mode')
parser.add_argument('--exp', type=str, default='bnlr7e-3', help='name of experiment')
parser.add_argument('--gpu', type=str, default='0', help='test time gpu device id')
parser.add_argument('--backbone', type=str, default='autodeeplab', help='resnet101')
parser.add_argument('--dataset', type=str, default='cityscapes', help='pascal or cityscapes')
parser.add_argument('--groups', type=int, default=None, help='num of groups for group normalization')
parser.add_argument('--epochs', type=int, default=4000, help='num of training epochs')
parser.add_argument('--batch_size', type=int, default=14, help='batch size')
parser.add_argument('--base_lr', type=float, default=0.05, help='base learning rate')
parser.add_argument('--warmup_start_lr', type=float, default=5e-6, help='warm up learning rate')
parser.add_argument('--lr-step', type=float, default=None)
parser.add_argument('--warmup-iters', type=int, default=1000)
parser.add_argument('--min-lr', type=float, default=None)
parser.add_argument('--last_mult', type=float, default=1.0, help='learning rate multiplier for last layers')
parser.add_argument('--scratch', action='store_true', default=False, help='train from scratch')
parser.add_argument('--freeze_bn', action='store_true', default=False, help='freeze batch normalization parameters')
parser.add_argument('--weight_std', action='store_true', default=False, help='weight standardization')
parser.add_argument('--beta', action='store_true', default=False, help='resnet101 beta')
parser.add_argument('--crop_size', type=int, default=769, help='image crop size')
parser.add_argument('--resize', type=int, default=769, help='image crop size')
parser.add_argument('--workers', type=int, default=4, help='number of data loading workers')
parser.add_argument('--seed', type=int, default=1, metavar='S', help='random seed (default: 1)')
parser.add_argument('--resume', type=str, default=None, help='put the path to resuming file if needed')
parser.add_argument('--filter_multiplier', type=int, default=20)
parser.add_argument('--dist', type=bool, default=False)
parser.add_argument('--autodeeplab', type=str, default='train')
parser.add_argument('--block_multiplier', type=int, default=5)
parser.add_argument('--use-ABN', default=True, type=bool, help='whether use ABN')
parser.add_argument('--affine', default=False, type=bool, help='whether use affine in BN')

parser.add_argument('--port', default=6000, type=int)
parser.add_argument('--max-iteration', default=1000000, type=bool)
parser.add_argument('--net_arch', default=None, type=str)
parser.add_argument('--cell_arch', default=None, type=str)
parser.add_argument('--criterion', default='Ohem', type=str)
parser.add_argument('--initial-fm', default=None, type=int)
parser.add_argument('--mode', default='poly', type=str, help='how lr decline')
parser.add_argument('--local_rank', dest='local_rank', type=int, default=-1, )
args = parser.parse_args()
return args
return args
1 change: 0 additions & 1 deletion config_utils/re_train_deeplab_v3plus.py
Original file line number Diff line number Diff line change
Expand Up @@ -72,7 +72,6 @@ def obtain_retrain_deeplab_v3plus_args():
help='evaluuation interval (default: 1)')
parser.add_argument('--no-val', action='store_true', default=False,
help='skip validation during training')

parser.add_argument('--use-ABN', default=True, type=bool, help='whether use ABN')
parser.add_argument('--affine', default=False, type=bool, help='whether use affine in BN')

Expand Down
3 changes: 2 additions & 1 deletion config_utils/search_args.py
Original file line number Diff line number Diff line change
Expand Up @@ -97,7 +97,8 @@ def obtain_search_args():
parser.add_argument('--no-val', action='store_true', default=False,
help='skip validation during training')

parser.add_argument('--use-ABN', default=True, type=bool, help='whether use ABN')
parser.add_argument('--use-ABN', default=False, type=bool, help='whether use ABN')
parser.add_argument('--affine', default=False, type=bool, help='whether use affine in BN')
parser.add_argument('--dist', default=False, type=bool, help='whether use dist')
args = parser.parse_args()
return args
148 changes: 92 additions & 56 deletions dataloaders/__init__.py
Original file line number Diff line number Diff line change
@@ -1,66 +1,102 @@
from dataloaders.datasets import cityscapes, kd, coco, combine_dbs, pascal, sbd
from torch.utils.data import DataLoader
import torch.utils.data.distributed


def make_data_loader(args, **kwargs):
if args.dist:
print("=> Using Distribued Sampler")
if args.dataset == 'cityscapes':
if args.autodeeplab == 'search':
train_set1, train_set2 = cityscapes.twoTrainSeg(args)
num_class = train_set1.NUM_CLASSES
sampler1 = torch.utils.data.distributed.DistributedSampler(train_set1)
sampler2 = torch.utils.data.distributed.DistributedSampler(train_set2)
train_loader1 = DataLoader(train_set1, batch_size=args.batch_size, shuffle=False, sampler=sampler1, **kwargs)
train_loader2 = DataLoader(train_set2, batch_size=args.batch_size, shuffle=False, sampler=sampler2, **kwargs)

elif args.autodeeplab == 'train':
train_set = cityscapes.CityscapesSegmentation(args, split='retrain')
num_class = train_set.NUM_CLASSES
sampler1 = torch.utils.data.distributed.DistributedSampler(train_set)
train_loader = DataLoader(train_set, batch_size=args.batch_size, shuffle=False, sampler=sampler1, **kwargs)

else:
raise Exception('autodeeplab param not set properly')

val_set = cityscapes.CityscapesSegmentation(args, split='val')
test_set = cityscapes.CityscapesSegmentation(args, split='test')
sampler3 = torch.utils.data.distributed.DistributedSampler(val_set)
sampler4 = torch.utils.data.distributed.DistributedSampler(test_set)
val_loader = DataLoader(val_set, batch_size=args.batch_size, shuffle=False, sampler=sampler3, **kwargs)
test_loader = DataLoader(test_set, batch_size=args.batch_size, shuffle=False, sampler=sampler4, **kwargs)

if args.autodeeplab == 'search':
return train_loader1, train_loader2, val_loader, test_loader, num_class
elif args.autodeeplab == 'train':
return train_loader, num_class
else:
raise NotImplementedError

else:
if args.dataset == 'pascal':
train_set = pascal.VOCSegmentation(args, split='train')
val_set = pascal.VOCSegmentation(args, split='val')
if args.use_sbd:
sbd_train = sbd.SBDSegmentation(args, split=['train', 'val'])
train_set = combine_dbs.CombineDBs([train_set, sbd_train], excluded=[val_set])

if args.dataset == 'pascal':
train_set = pascal.VOCSegmentation(args, split='train')
val_set = pascal.VOCSegmentation(args, split='val')
if args.use_sbd:
sbd_train = sbd.SBDSegmentation(args, split=['train', 'val'])
train_set = combine_dbs.CombineDBs([train_set, sbd_train], excluded=[val_set])

num_class = train_set.NUM_CLASSES
train_loader = DataLoader(train_set, batch_size=args.batch_size, shuffle=True, **kwargs)
val_loader = DataLoader(val_set, batch_size=args.batch_size, shuffle=False, **kwargs)
test_loader = None

return train_loader, train_loader, val_loader, test_loader, num_class

elif args.dataset == 'cityscapes':
if args.autodeeplab == 'search':
train_set1, train_set2 = cityscapes.twoTrainSeg(args)
num_class = train_set1.NUM_CLASSES
train_loader1 = DataLoader(train_set1, batch_size=args.batch_size, shuffle=True, **kwargs)
train_loader2 = DataLoader(train_set2, batch_size=args.batch_size, shuffle=True, **kwargs)
elif args.autodeeplab == 'train':
train_set = cityscapes.CityscapesSegmentation(args, split='train')
num_class = train_set.NUM_CLASSES
train_loader = DataLoader(train_set, batch_size=args.batch_size, shuffle=True, **kwargs)
else:
raise Exception('autodeeplab param not set properly')
val_loader = DataLoader(val_set, batch_size=args.batch_size, shuffle=False, **kwargs)
test_loader = None

val_set = cityscapes.CityscapesSegmentation(args, split='val')
test_set = cityscapes.CityscapesSegmentation(args, split='test')
val_loader = DataLoader(val_set, batch_size=args.batch_size, shuffle=False, **kwargs)
test_loader = DataLoader(test_set, batch_size=args.batch_size, shuffle=False, **kwargs)
return train_loader, train_loader, val_loader, test_loader, num_class

if args.autodeeplab == 'search':
return train_loader1, train_loader2, val_loader, test_loader, num_class
elif args.autodeeplab == 'train':
return train_loader, val_loader, test_loader, num_class


elif args.dataset == 'coco':
train_set = coco.COCOSegmentation(args, split='train')
val_set = coco.COCOSegmentation(args, split='val')
num_class = train_set.NUM_CLASSES
train_loader = DataLoader(train_set, batch_size=args.batch_size, shuffle=True, **kwargs)
val_loader = DataLoader(val_set, batch_size=args.batch_size, shuffle=False, **kwargs)
test_loader = None
return train_loader, train_loader, val_loader, test_loader, num_class

elif args.dataset == 'kd':
train_set = kd.CityscapesSegmentation(args, split='train')
val_set = kd.CityscapesSegmentation(args, split='val')
test_set = kd.CityscapesSegmentation(args, split='test')
num_class = train_set.NUM_CLASSES
train_loader1 = DataLoader(train_set, batch_size=args.batch_size, shuffle=True, **kwargs)
train_loader2 = DataLoader(train_set, batch_size=args.batch_size, shuffle=True, **kwargs)
val_loader = DataLoader(val_set, batch_size=args.batch_size, shuffle=False, **kwargs)
test_loader = DataLoader(test_set, batch_size=args.batch_size, shuffle=False, **kwargs)

return train_loader1, train_loader2, val_loader, test_loader, num_class
else:
raise NotImplementedError
elif args.dataset == 'cityscapes':
if args.autodeeplab == 'search':
train_set1, train_set2 = cityscapes.twoTrainSeg(args)
num_class = train_set1.NUM_CLASSES
train_loader1 = DataLoader(train_set1, batch_size=args.batch_size, shuffle=True, **kwargs)
train_loader2 = DataLoader(train_set2, batch_size=args.batch_size, shuffle=True, **kwargs)
elif args.autodeeplab == 'train':
train_set = cityscapes.CityscapesSegmentation(args, split='retrain')
num_class = train_set.NUM_CLASSES
train_loader = DataLoader(train_set, batch_size=args.batch_size, shuffle=True, **kwargs)
else:
raise Exception('autodeeplab param not set properly')

val_set = cityscapes.CityscapesSegmentation(args, split='val')
test_set = cityscapes.CityscapesSegmentation(args, split='test')
val_loader = DataLoader(val_set, batch_size=args.batch_size, shuffle=False, **kwargs)
test_loader = DataLoader(test_set, batch_size=args.batch_size, shuffle=False, **kwargs)

if args.autodeeplab == 'search':
return train_loader1, train_loader2, val_loader, test_loader, num_class
elif args.autodeeplab == 'train':
return train_loader, num_class



elif args.dataset == 'coco':
train_set = coco.COCOSegmentation(args, split='train')
val_set = coco.COCOSegmentation(args, split='val')
num_class = train_set.NUM_CLASSES
train_loader = DataLoader(train_set, batch_size=args.batch_size, shuffle=True, **kwargs)
val_loader = DataLoader(val_set, batch_size=args.batch_size, shuffle=False, **kwargs)
test_loader = None
return train_loader, train_loader, val_loader, test_loader, num_class

elif args.dataset == 'kd':
train_set = kd.CityscapesSegmentation(args, split='train')
val_set = kd.CityscapesSegmentation(args, split='val')
test_set = kd.CityscapesSegmentation(args, split='test')
num_class = train_set.NUM_CLASSES
train_loader1 = DataLoader(train_set, batch_size=args.batch_size, shuffle=True, **kwargs)
train_loader2 = DataLoader(train_set, batch_size=args.batch_size, shuffle=True, **kwargs)
val_loader = DataLoader(val_set, batch_size=args.batch_size, shuffle=False, **kwargs)
test_loader = DataLoader(test_set, batch_size=args.batch_size, shuffle=False, **kwargs)

return train_loader1, train_loader2, val_loader, test_loader, num_class
else:
raise NotImplementedError
4 changes: 1 addition & 3 deletions dataloaders/datasets/cityscapes.py
Original file line number Diff line number Diff line change
@@ -1,10 +1,8 @@
import os
import numpy as np
import scipy.misc as m
from PIL import Image
from torch.utils import data
from mypath import Path
from torchvision import transforms
from torch.utils import data
from dataloaders import custom_transforms as tr


Expand Down
3 changes: 1 addition & 2 deletions mypath.py
Original file line number Diff line number Diff line change
Expand Up @@ -6,8 +6,7 @@ def db_root_dir(dataset):
elif dataset == 'sbd':
return '/path/to/datasets/benchmark_RELEASE/' # folder that contains dataset/.
elif dataset == 'cityscapes':
# return '/data/deeplearning/cityscapes/' # foler that contains leftImg8bit/
return r'E:\BaiduNetdiskDownload\cityscapes' # foler that contains leftImg8bit/
return '/data/deeplearning/cityscapes/' # foler that contains leftImg8bit/
elif dataset == 'kd':
return '/data/deeplearning/cityscapes/'
elif dataset == 'coco':
Expand Down