Skip to content

Commit

Permalink
Update train.py
Browse files Browse the repository at this point in the history
  • Loading branch information
scyonggg committed Nov 10, 2022
1 parent 3bc7083 commit de3d5b7
Showing 1 changed file with 5 additions and 3 deletions.
8 changes: 5 additions & 3 deletions train.py
Original file line number Diff line number Diff line change
Expand Up @@ -28,7 +28,9 @@
parser.add_argument('--loss', type=str, default='label_smooth', choices=['label_smooth'], help='loss function')
parser.add_argument('--weight_decay', action='store_true', help='1-D. No bias decay (regularization)')
parser.add_argument('--optimizer', type=str, default='SGD', choices=['SGD', 'AdamW'], help='Optimizer')
parser.add_argument('--lr', type=float, default=0.04, help='initial learning rate')
parser.add_argument('--lr', type=float, default=0.04, help='learning rate')
parser.add_argument('--init_lr', type=float, default=0.001, help='initial learning rate when using learning rate scheduler')
parser.add_argument('--decay_rate', type=float, default=0.9, help='learning rate decay rate when using multi-step LR scheduler')
parser.add_argument('--lr_scheduler', type=str, default='cosinelr', choices=['cosinelr', 'steplr'], help='learning rate scheduler')
parser.add_argument('--epochs', type=int, default=450, help='training epoches')
parser.add_argument('--warm_t', type=int, default=5, help='warm up phase')
Expand Down Expand Up @@ -121,9 +123,9 @@

# warmup_scheduler = WarmUpLR(optimizer, iter_per_epoch * args.warm)
if args.lr_scheduler == 'cosinelr':
warmup_scheduler = CosineLRScheduler(optimizer, t_initial=args.epochs, warmup_t=args.warm_t, warmup_lr_init=1e-4)
warmup_scheduler = CosineLRScheduler(optimizer, t_initial=args.epochs, warmup_t=args.warm_t, warmup_lr_init=args.init_lr)
elif args.lr_scheduler == 'steplr':
warmup_scheduler = StepLRScheduler(optimizer, decay_t=args.decay_t, warmup_t=args.warm_t, warmup_lr_init=1e-4, decay_rate=0.9)
warmup_scheduler = StepLRScheduler(optimizer, decay_t=args.decay_t, warmup_t=args.warm_t, warmup_lr_init=args.init_lr, decay_rate=args.decay_rate)

#set up training phase learning rate scheduler
# train_scheduler = optim.lr_scheduler.MultiStepLR(optimizer, milestones=settings.MILESTONES)
Expand Down

0 comments on commit de3d5b7

Please sign in to comment.