import argparse import os, os.path as osp import random import shutil import time import warnings import logging import torchvision import torch import torch.nn as nn import torch.nn.parallel import torch.backends.cudnn as cudnn import torch.distributed as dist import torch.optim import torch.multiprocessing as mp import torch.utils.data import torch.utils.data.distributed import torchvision.transforms as transforms import torchvision.datasets as datasets from params import ContiguousParams def str2bool(v): if v.lower() in ["yes", "true", "t", "y", 1]: return True elif v.lower() in ["false", "no", "f", "n", 0]: return False else: raise argparse.ArgumentTypeError("Unsupported value encountered.") cudnn.benchmark = True parser = argparse.ArgumentParser(description='PyTorch ImageNet Training') parser.add_argument('--data', metavar='DIR',default='/dataset/public/ImageNetOrigin/', help='path to dataset') parser.add_argument('-j', '--workers', default=16, type=int, metavar='N', help='number of data loading workers (default: 4)') parser.add_argument('--epochs', default=1, type=int, metavar='N', help='number of total epochs to run') parser.add_argument('--start-epoch', default=0, type=int, metavar='N', help='manual epoch number (useful on restarts)') parser.add_argument('-b', '--batch-size', default=512, type=int, metavar='N', help='mini-batch size (default: 256), this is the total ' 'batch size of all GPUs on the current node when ' 'using Data Parallel or Distributed Data Parallel') parser.add_argument('--lr', '--learning-rate', default=0.1, type=float, metavar='LR', help='initial learning rate', dest='lr') parser.add_argument('--lr_policy', default='step', type=str, metavar='LR_Policy', help='learning rate policy', dest='lr_policy') parser.add_argument('--lr_decay', '--learning-rate-decay', default=0.1, type=float, metavar='LR_Decay', help='decay rate of initial learning rate', dest='lr_decay') parser.add_argument('--step_epoch', default=30, type=int, metavar='step_epoch', help='decay learning rate every step_epoch', dest='step_epoch') parser.add_argument('--min_crop_size', default=0.08, type=float, metavar='min_crop_size', help='minimum crop size for scale jitter ', dest='min_crop_size') parser.add_argument('--color_jitter', dest='color_jitter', action='store_true', help='training model with color jitter') parser.add_argument('--momentum', default=0.9, type=float, metavar='M', help='momentum') parser.add_argument('--wd', '--weight-decay', default=1e-4, type=float, metavar='W', help='weight decay (default: 1e-4)', dest='weight_decay') parser.add_argument('-p', '--print-freq', default=500, type=int, metavar='N', help='print frequency (default: 10)') parser.add_argument('--resume', default='', type=str, metavar='PATH', help='path to latest checkpoint (default: none)') parser.add_argument('-e', '--evaluate', dest='evaluate', action='store_true', help='evaluate model on validation set') parser.add_argument('--pretrained', dest='pretrained', action='store_true', help='use pre-trained model') parser.add_argument('--checkpoint', default='', type=str, help='name of checkpoint') parser.add_argument("--local_rank", default=0, type=int) parser.add_argument( "--world-size", default= 1, type=int, help="number of nodes for distributed training", ) parser.add_argument( "--rank", default=0, type=int, help="node rank for distributed training" ) parser.add_argument( "--dist-url", default='tcp://127.0.0.1:29500', type=str, help="url used to set up distributed training", ) parser.add_argument( "--dist-backend", default="nccl", type=str, help="distributed backend" ) parser.add_argument('--seed', default=None, type=int, help='seed for initializing training. ') parser.add_argument('--gpu', default=None, type=int, help='GPU id to use.') parser.add_argument( "--multiprocessing-distributed", default=True, ) parser.add_argument('--save_dir', default='./time', type=str, help='GPU id to use.') parser.add_argument('--nesterov', default=False,type=str2bool, help='use pre-trained model') parser.add_argument('--name', default='hrnet_c') parser.add_argument('--code', default='') parser.add_argument('--fc', default=1000, type=int) parser.add_argument('--cont', default=False) def setup_logger(log_dir, rank=0): logging.basicConfig( format='%(asctime)s - %(levelname)s - %(message)s', level=logging.INFO) logger = logging.getLogger(__name__) # don't log results for the non-master process if rank > 0: logger.setLevel('ERROR') return logger timestamp = time.strftime('%Y%m%d_%H%M%S', time.localtime()) log_file = osp.join(log_dir, '{}.log'.format(timestamp)) file_handler = logging.FileHandler(log_file, 'w') file_handler.setFormatter( logging.Formatter('%(asctime)s - %(levelname)s - %(message)s')) file_handler.setLevel(logging.INFO) logger.addHandler(file_handler) return logger best_acc1 = 0 args = parser.parse_args() if not osp.exists(args.save_dir): os.makedirs(args.save_dir) def main(): global best_prec1, args args.gpu = 0 args.world_size = 1 args.distributed = True if args.distributed: rank = int(os.environ['RANK']) num_gpus = torch.cuda.device_count() torch.cuda.set_device(rank % num_gpus) args.gpu = rank % num_gpus # torch.cuda.set_device(args.gpu) torch.distributed.init_process_group(backend='nccl') # args.world_size = torch.distributed.get_world_size() # print(args.world_size) # logger = logging.getLogger('main_worker') # logger = setup_logger(args.save_dir) # added by sdh # logger.info(args) # if args.gpu is not None: # print("Use GPU: {} for training".format(args.gpu)) # # if args.distributed: # if args.dist_url == "env://" and args.rank == -1: # args.rank = int(os.environ["RANK"]) # if args.multiprocessing_distributed: # # For multiprocessing distributed training, rank needs to be the # # global rank among all the processes # args.rank = args.rank * ngpus_per_node + gpu # dist.init_process_group(backend=args.dist_backend, init_method=args.dist_url, # world_size=args.world_size, rank=args.rank) logger = setup_logger(args.save_dir, args.rank) # added by sdh print(args) model = torchvision.models.resnet50() if args.cont: parameters_c = ContiguousParams(model.parameters()) optimizer = torch.optim.SGD(parameters_c.contiguous(), args.lr, momentum=args.momentum, weight_decay=args.weight_decay, nesterov=args.nesterov) else: optimizer = torch.optim.SGD(model.parameters(), args.lr, momentum=args.momentum, weight_decay=args.weight_decay, nesterov=args.nesterov) if args.distributed: # For multiprocessing distributed, DistributedDataParallel constructor # should always set the single device scope, otherwise, # DistributedDataParallel will use all available devices. if args.gpu is not None: torch.cuda.set_device(args.gpu) model.cuda(args.gpu) # When using a single GPU per process and per # DistributedDataParallel, we need to divide the batch size # ourselves based on the total number of GPUs we have args.batch_size = 64 args.workers = 2 model = torch.nn.parallel.DistributedDataParallel(model, device_ids=[args.gpu]) else: model.cuda() # DistributedDataParallel will divide and allocate batch_size to all # available GPUs if device_ids are not set model = torch.nn.parallel.DistributedDataParallel(model) elif args.gpu is not None: torch.cuda.set_device(args.gpu) model = model.cuda(args.gpu) else: model = torch.nn.DataParallel(model).cuda() # define loss function (criterion) and optimizer criterion = nn.CrossEntropyLoss().cuda(args.gpu) # optionally resume from a checkpoint if args.resume: if os.path.isfile(args.resume): logger.info("=> loading checkpoint '{}'".format(args.resume)) checkpoint = torch.load(args.resume) args.start_epoch = checkpoint['epoch'] best_acc1 = checkpoint['best_acc1'] if args.gpu is not None: # best_acc1 may be from a checkpoint from a different GPU best_acc1 = best_acc1.to(args.gpu) model.load_state_dict(checkpoint['state_dict']) optimizer.load_state_dict(checkpoint['optimizer']) logger.info("=> loaded checkpoint '{}' (epoch {})" .format(args.resume, checkpoint['epoch'])) else: logger.info("=> no checkpoint found at '{}'".format(args.resume)) cudnn.benchmark = True # Data loading code traindir = os.path.join(args.data, 'train') valdir = os.path.join(args.data, 'val2') normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]) if not args.color_jitter: train_dataset = datasets.ImageFolder( traindir, transforms.Compose([ # slightly less aggressive scale augmentation for ShuffleNetV2 transforms.RandomResizedCrop(224, scale=(args.min_crop_size, 1.0)), transforms.RandomHorizontalFlip(), transforms.ToTensor(), normalize, ])) else: train_dataset = datasets.ImageFolder( traindir, transforms.Compose([ # slightly less aggressive scale augmentation for ShuffleNetV2 transforms.RandomResizedCrop(224, scale=(args.min_crop_size, 1.0)), transforms.ColorJitter(brightness=0.4, contrast=0.4, saturation=0.4), transforms.RandomHorizontalFlip(), transforms.ToTensor(), normalize, ])) if args.distributed: train_sampler = torch.utils.data.distributed.DistributedSampler(train_dataset) else: train_sampler = None train_loader = torch.utils.data.DataLoader( train_dataset, batch_size=args.batch_size, shuffle=(train_sampler is None), num_workers=args.workers, pin_memory=True, sampler=train_sampler) # val_loader = torch.utils.data.DataLoader( # datasets.ImageFolder(valdir, transforms.Compose([ # transforms.Resize(256), # transforms.CenterCrop(224), # transforms.ToTensor(), # normalize, # ])), # batch_size=args.batch_size, shuffle=False, # num_workers=args.workers, pin_memory=True) # if args.evaluate: # validate(val_loader, model, criterion, args, logger) # return for epoch in range(args.start_epoch, args.epochs): if args.distributed: train_sampler.set_epoch(epoch) # lr = adjust_learning_rate(optimizer, epoch, args) # train for one epoch train(train_loader, model, criterion, optimizer, epoch, args, logger) # # evaluate on validation set # acc1 = validate(val_loader, model, criterion, args, logger) # # # remember best acc@1 and save checkpoint # is_best = acc1 > best_acc1 # best_acc1 = max(acc1, best_acc1) # # if not args.multiprocessing_distributed or (args.multiprocessing_distributed # and args.rank % ngpus_per_node == 0): # save_checkpoint( # {'epoch': epoch + 1, # 'state_dict': model.state_dict(), # 'best_acc1': best_acc1, # 'optimizer' : optimizer.state_dict()}, # is_best, # args.save_dir # ) def train(train_loader, model, criterion, optimizer, epoch, args, logger): batch_time = AverageMeter() data_time = AverageMeter() losses = AverageMeter() top1 = AverageMeter() top5 = AverageMeter() # switch to train mode model.train() end = time.time() for i, (input, target) in enumerate(train_loader): if i> 100: return # measure data loading time data_time.update(time.time() - end) lr = adjust_learning_rate(optimizer, epoch, args, i, len(train_loader)) if args.gpu is not None: input = input.cuda(args.gpu, non_blocking=True) target = target.cuda(args.gpu, non_blocking=True) # compute output if i < 100: torch.cuda.synchronize() f_s = time.time() output = model(input) if i < 100: torch.cuda.synchronize() f_e = time.time() logger.info('forward time: {:.3f}'.format(f_e-f_s)) loss = criterion(output, target) # measure accuracy and record loss acc1, acc5 = accuracy(output, target, topk=(1, 5)) losses.update(loss.item(), input.size(0)) top1.update(acc1[0], input.size(0)) top5.update(acc5[0], input.size(0)) # compute gradient and do SGD step optimizer.zero_grad() if i < 100: torch.cuda.synchronize() b_s = time.time() loss.backward() if i < 100: torch.cuda.synchronize() b_e = time.time() logger.info('backward time: {:.3f}'.format(b_e-b_s)) optimizer.step() if i < 100: torch.cuda.synchronize() s_e = time.time() logger.info('step time: {:.3f}'.format(s_e-b_e)) # measure elapsed time # batch_time.update(time.time() - end) # end = time.time() if i % args.print_freq == 0: # logger = logging.getLogger('train') logger.info('Epoch: [{0}][{1}/{2}]\t' 'lr: {3:.5f}, ' 'time: {batch_time.val:.3f} ({batch_time.avg:.3f}), ' 'data: {data_time.val:.3f} ({data_time.avg:.3f}), ' 'loss: {loss.val:.4f} ({loss.avg:.4f}), ' 'Acc@1: {top1.val:.3f} ({top1.avg:.3f}), ' 'Acc@5: {top5.val:.3f} ({top5.avg:.3f})'.format( epoch, i, len(train_loader), lr, batch_time=batch_time, data_time=data_time, loss=losses, top1=top1, top5=top5)) def validate(val_loader, model, criterion, args, logger): batch_time = AverageMeter() losses = AverageMeter() top1 = AverageMeter() top5 = AverageMeter() # logger = logging.getLogger('validate') # switch to evaluate mode model.eval() with torch.no_grad(): end = time.time() for i, (input, target) in enumerate(val_loader): if args.gpu is not None: input = input.cuda(args.gpu, non_blocking=True) target = target.cuda(args.gpu, non_blocking=True) # compute output output = model(input) loss = criterion(output, target) # measure accuracy and record loss acc1, acc5 = accuracy(output, target, topk=(1, 5)) losses.update(loss.item(), input.size(0)) top1.update(acc1[0], input.size(0)) top5.update(acc5[0], input.size(0)) # measure elapsed time batch_time.update(time.time() - end) end = time.time() if i % args.print_freq == 0: logger.info('Test: [{0}/{1}]\t' 'time: {batch_time.val:.3f} ({batch_time.avg:.3f}), ' 'loss: {loss.val:.4f} ({loss.avg:.4f}), ' 'Acc@1: {top1.val:.3f} ({top1.avg:.3f}), ' 'Acc@5: {top5.val:.3f} ({top5.avg:.3f})'.format( i, len(val_loader), batch_time=batch_time, loss=losses, top1=top1, top5=top5)) logger.info(' * Acc@1 {top1.avg:.3f} Acc@5 {top5.avg:.3f}' .format(top1=top1, top5=top5)) return top1.avg def save_checkpoint(state, is_best, save_dir): #filename='checkpoint.pth.tar' save_name = osp.join(save_dir, 'checkpoint.pth.tar') torch.save(state, save_name) if is_best: shutil.copyfile(save_name, osp.join(save_dir, 'model_best.pth.tar')) class AverageMeter(object): """Computes and stores the average and current value""" def __init__(self): self.reset() def reset(self): self.val = 0 self.avg = 0 self.sum = 0 self.count = 0 def update(self, val, n=1): self.val = val self.sum += val * n self.count += n self.avg = self.sum / self.count def adjust_learning_rate(optimizer, epoch, args, iteration=None, iters_per_epoch=None): """Sets the learning rate to the initial LR decayed by 10 every 30 epochs""" # lr = args.lr * (0.1 ** (epoch // 30)) if args.lr_policy == 'step': lr = args.lr * (args.lr_decay ** (epoch // args.step_epoch)) elif args.lr_policy == 'linear': lr = args.lr * (1 - epoch / args.epochs) elif args.lr_policy == 'cosine': total_iters = args.epochs * iters_per_epoch curr_iter = float(epoch * iters_per_epoch + iteration) lr = args.lr * 0.5 * (1 + np.cos(np.pi * curr_iter / total_iters)) elif args.lr_policy == 'step_with_warm_up': warmup_iter = 1000 curr_iter = float(epoch * iters_per_epoch + iteration) if curr_iter <= warmup_iter: lr = args.lr * curr_iter / warmup_iter else: lr = args.lr * (args.lr_decay ** (epoch // args.step_epoch)) for param_group in optimizer.param_groups: param_group['lr'] = lr return lr def accuracy(output, target, topk=(1,)): """Computes the accuracy over the k top predictions for the specified values of k""" with torch.no_grad(): maxk = max(topk) batch_size = target.size(0) _, pred = output.topk(maxk, 1, True, True) pred = pred.t() correct = pred.eq(target.view(1, -1).expand_as(pred)) res = [] for k in topk: correct_k = correct[:k].view(-1).float().sum(0, keepdim=True) res.append(correct_k.mul_(100.0 / batch_size)) return res if __name__ == '__main__': main()