Skip to content
Merged
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
35 changes: 32 additions & 3 deletions examples/contrib/cifar10/main.py
Original file line number Diff line number Diff line change
Expand Up @@ -12,7 +12,7 @@
import ignite.distributed as idist
from ignite.contrib.engines import common
from ignite.contrib.handlers import PiecewiseLinear
from ignite.engine import Engine, Events, create_supervised_evaluator
from ignite.engine import Engine, Events
from ignite.handlers import Checkpoint, DiskSaver, global_step_from_engine
from ignite.metrics import Accuracy, Loss
from ignite.utils import manual_seed, setup_logger
Expand Down Expand Up @@ -83,8 +83,8 @@ def training(local_rank, config):

# We define two evaluators as they wont have exactly similar roles:
# - `evaluator` will save the best model based on validation score
evaluator = create_supervised_evaluator(model, metrics=metrics, device=device, non_blocking=True)
train_evaluator = create_supervised_evaluator(model, metrics=metrics, device=device, non_blocking=True)
evaluator = create_evaluator(model, metrics=metrics, config=config)
train_evaluator = create_evaluator(model, metrics=metrics, config=config)

def run_validation(engine):
epoch = trainer.state.epoch
Expand Down Expand Up @@ -193,6 +193,8 @@ def run(
del config["spawn_kwargs"]

spawn_kwargs["nproc_per_node"] = nproc_per_node
if backend == "xla-tpu" and with_amp:
raise RuntimeError("The value of with_amp should be False if backend is xla")

with idist.Parallel(backend=backend, **spawn_kwargs) as parallel:

Expand Down Expand Up @@ -349,6 +351,33 @@ def train_step(engine, batch):
return trainer


def create_evaluator(model, metrics, config, tag="val"):
with_amp = config["with_amp"]
device = idist.device()

@torch.no_grad()
def evaluate_step(engine: Engine, batch):
model.eval()
x, y = batch[0], batch[1]
if x.device != device:
x = x.to(device, non_blocking=True)
y = y.to(device, non_blocking=True)

with autocast(enabled=with_amp):
output = model(x)
return output, y

evaluator = Engine(evaluate_step)

for name, metric in metrics.items():
metric.attach(evaluator, name)

if idist.get_rank() == 0 and (not config["with_clearml"]):
common.ProgressBar(desc=f"Evaluation ({tag})", persist=False).attach(evaluator)

return evaluator


def get_save_handler(config):
if config["with_clearml"]:
from ignite.contrib.handlers.clearml_logger import ClearMLSaver
Expand Down