Skip to content

Commit

Permalink
ITOP added
Browse files Browse the repository at this point in the history
  • Loading branch information
bing0037 committed Sep 26, 2023
1 parent 9c5da95 commit 6dab205
Show file tree
Hide file tree
Showing 4 changed files with 930 additions and 7 deletions.
34 changes: 27 additions & 7 deletions main.py
Original file line number Diff line number Diff line change
Expand Up @@ -13,6 +13,9 @@
import torchvision.datasets as datasets
import vgg

import sparselearning
from sparselearning.core import Masking, CosineDecay, LinearDecay

model_names = sorted(name for name in vgg.__dict__
if name.islower() and not name.startswith("__")
and name.startswith("vgg")
Expand Down Expand Up @@ -54,6 +57,9 @@
help='The directory used to save the trained models',
default='save_temp', type=str)

parser.add_argument('--multiplier', type=int, default=1, metavar='N',
help='extend training time by multiplier times')
sparselearning.core.add_sparse_args(parser)

best_prec1 = 0

Expand Down Expand Up @@ -130,11 +136,19 @@ def main():
validate(val_loader, model, criterion)
return

mask = None
if args.sparse:
decay = CosineDecay(args.death_rate, len(train_loader)*(args.epochs*args.multiplier))
mask = Masking(optimizer, death_rate=args.death_rate, death_mode=args.death, death_rate_decay=decay, growth_mode=args.growth,
redistribution_mode=args.redistribution, args=args)
mask.add_module(model, sparse_init=args.sparse_init, density=args.density) # add mask (initialized as 0 matirx) to represent the whole model. --libn


for epoch in range(args.start_epoch, args.epochs):
adjust_learning_rate(optimizer, epoch)

# train for one epoch
train(train_loader, model, criterion, optimizer, epoch)
train(train_loader, model, criterion, optimizer, epoch, mask)

# evaluate on validation set
prec1 = validate(val_loader, model, criterion)
Expand All @@ -149,7 +163,7 @@ def main():
}, is_best, filename=os.path.join(args.save_dir, 'checkpoint_{}.tar'.format(epoch)))


def train(train_loader, model, criterion, optimizer, epoch):
def train(train_loader, model, criterion, optimizer, epoch, mask):
"""
Run one train epoch
"""
Expand All @@ -168,8 +182,8 @@ def train(train_loader, model, criterion, optimizer, epoch):
data_time.update(time.time() - end)

if args.cpu == False:
input = input.cuda(async=True)
target = target.cuda(async=True)
input = input.cuda()
target = target.cuda()
if args.half:
input = input.half()

Expand All @@ -180,7 +194,12 @@ def train(train_loader, model, criterion, optimizer, epoch):
# compute gradient and do SGD step
optimizer.zero_grad()
loss.backward()
optimizer.step()
# optimizer.step()

if args.sparse:
mask.step()
else:
optimizer.step()

output = output.float()
loss = loss.float()
Expand Down Expand Up @@ -217,8 +236,8 @@ def validate(val_loader, model, criterion):
end = time.time()
for i, (input, target) in enumerate(val_loader):
if args.cpu == False:
input = input.cuda(async=True)
target = target.cuda(async=True)
input = input.cuda()
target = target.cuda()

if args.half:
input = input.half()
Expand Down Expand Up @@ -302,3 +321,4 @@ def accuracy(output, target, topk=(1,)):

if __name__ == '__main__':
main()

Empty file added sparselearning/__init__.py
Empty file.
Loading

0 comments on commit 6dab205

Please sign in to comment.