Skip to content

Commit

Permalink
Merge branch 'bench_amp' of https://github.com/vfdev-5/ignite into be…
Browse files Browse the repository at this point in the history
…nch_amp
  • Loading branch information
vfdev-5 committed Apr 12, 2020
2 parents ed0aeb9 + 81986c1 commit d327bd2
Show file tree
Hide file tree
Showing 5 changed files with 26 additions and 33 deletions.
5 changes: 1 addition & 4 deletions examples/contrib/cifar100_amp_benchmark/benchmark_fp32.py
Original file line number Diff line number Diff line change
Expand Up @@ -46,10 +46,7 @@ def train_step(engine, batch):
timer.attach(trainer, step=Events.EPOCH_COMPLETED)
ProgressBar(persist=True).attach(trainer, output_transform=lambda out: {"batch loss": out})

metrics = {
"Accuracy": Accuracy(),
"Loss": Loss(criterion)
}
metrics = {"Accuracy": Accuracy(), "Loss": Loss(criterion)}

evaluator = create_supervised_evaluator(model, metrics=metrics, device=device, non_blocking=True)

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -53,10 +53,7 @@ def train_step(engine, batch):
timer.attach(trainer, step=Events.EPOCH_COMPLETED)
ProgressBar(persist=True).attach(trainer, output_transform=lambda out: {"batch loss": out})

metrics = {
"Accuracy": Accuracy(),
"Loss": Loss(criterion)
}
metrics = {"Accuracy": Accuracy(), "Loss": Loss(criterion)}

evaluator = create_supervised_evaluator(model, metrics=metrics, device=device, non_blocking=True)

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -3,6 +3,7 @@
import torch
from torch.nn import CrossEntropyLoss
from torch.optim import SGD

# Creates a GradScaler once at the beginning of training.
from torch.cuda.amp import GradScaler, autocast

Expand Down Expand Up @@ -62,10 +63,7 @@ def train_step(engine, batch):
timer.attach(trainer, step=Events.EPOCH_COMPLETED)
ProgressBar(persist=True).attach(trainer, output_transform=lambda out: {"batch loss": out})

metrics = {
"Accuracy": Accuracy(),
"Loss": Loss(criterion)
}
metrics = {"Accuracy": Accuracy(), "Loss": Loss(criterion)}

evaluator = create_supervised_evaluator(model, metrics=metrics, device=device, non_blocking=True)

Expand Down
40 changes: 21 additions & 19 deletions examples/contrib/cifar100_amp_benchmark/utils.py
Original file line number Diff line number Diff line change
Expand Up @@ -18,33 +18,35 @@ def get_train_eval_loaders(path, batch_size=256):
Returns:
train_loader, test_loader, eval_train_loader
"""
train_transform = Compose([
Pad(4),
RandomCrop(32),
RandomHorizontalFlip(),
ToTensor(),
Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]),
RandomErasing()
])

test_transform = Compose([
ToTensor(),
Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])
])
train_transform = Compose(
[
Pad(4),
RandomCrop(32),
RandomHorizontalFlip(),
ToTensor(),
Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]),
RandomErasing(),
]
)

test_transform = Compose([ToTensor(), Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])])

train_dataset = CIFAR100(root=path, train=True, transform=train_transform, download=True)
test_dataset = CIFAR100(root=path, train=False, transform=test_transform, download=False)

train_eval_indices = [random.randint(0, len(train_dataset) - 1) for i in range(len(test_dataset))]
train_eval_dataset = Subset(train_dataset, train_eval_indices)

train_loader = DataLoader(train_dataset, batch_size=batch_size, num_workers=12,
shuffle=True, drop_last=True, pin_memory=True)
train_loader = DataLoader(
train_dataset, batch_size=batch_size, num_workers=12, shuffle=True, drop_last=True, pin_memory=True
)

test_loader = DataLoader(test_dataset, batch_size=batch_size, num_workers=12,
shuffle=False, drop_last=False, pin_memory=True)
test_loader = DataLoader(
test_dataset, batch_size=batch_size, num_workers=12, shuffle=False, drop_last=False, pin_memory=True
)

eval_train_loader = DataLoader(train_eval_dataset, batch_size=batch_size, num_workers=12,
shuffle=False, drop_last=False, pin_memory=True)
eval_train_loader = DataLoader(
train_eval_dataset, batch_size=batch_size, num_workers=12, shuffle=False, drop_last=False, pin_memory=True
)

return train_loader, test_loader, eval_train_loader
3 changes: 1 addition & 2 deletions examples/notebooks/Cifar100_bench_amp.ipynb
Original file line number Diff line number Diff line change
Expand Up @@ -105,8 +105,7 @@
}
],
"source": [
"# !git clone https://github.com/pytorch/ignite.git /tmp/ignite\n",
"!rm -rf /tmp/ignite && git clone https://github.com/vfdev-5/ignite.git -b bench_amp /tmp/ignite\n",
"!git clone https://github.com/pytorch/ignite.git /tmp/ignite\n",
"scriptspath=\"/tmp/ignite/examples/contrib/cifar100_amp_benchmark/\"\n",
"setup=\"cd {} && export PYTHONPATH=$PWD:$PYTHONPATH\".format(scriptspath)"
]
Expand Down

0 comments on commit d327bd2

Please sign in to comment.