diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml new file mode 100644 index 000000000000..69846a72cb9f --- /dev/null +++ b/.pre-commit-config.yaml @@ -0,0 +1,12 @@ +repos: + - repo: https://github.com/python/black + rev: 19.10b0 + hooks: + - id: black + language_version: python3.7 + + - repo: https://gitlab.com/pycqa/flake8 + rev: 3.7.7 + hooks: + - id: flake8 + args: [--append-config=tox.ini] diff --git a/.travis.yml b/.travis.yml index f29a236724f0..365e3b7e89df 100644 --- a/.travis.yml +++ b/.travis.yml @@ -86,8 +86,10 @@ jobs: - stage: Lint check python: "3.7" before_install: # Nothing to do - install: pip install flake8 - script: flake8 + install: pip install flake8 black + script: + - flake8 . + - black . after_success: # Nothing to do # GitHub Pages Deployment: https://docs.travis-ci.com/user/deployment/pages/ diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index 0ed13c677433..9e49a9b3f8c4 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -17,6 +17,47 @@ In both cases, you will also need to code some tests to ensure the correct behav New code should be compatible with Python 3.X versions. Once you finish implementing a feature or bugfix and tests, please run lint checking and tests: +#### pre-commit +To ensure the codebase complies with a style guide, we use [black](https://black.readthedocs.io/en/stable/) and [flake8](https://flake8.pycqa.org/en/latest/) to format and check codebase for compliance with PEP8. + +To automate the process, we have configured the repo with [pre-commit hooks](https://pre-commit.com/) to use black to autoformat the staged files to ensure every commit complies with a style guide. This requires some setup, which is described below: + +1. Install pre-commit in your python environment. +2. Run pre-commit install that configures a virtual environment to invoke black and flake8 on commits. + +```bash +pip install pre-commit +pre-commit install +``` + +3. When files are committed: + - If the stages files are not compliant with black, black will autoformat the staged files. If this were to happen, files should be staged and committed again. See example code below. + - If the staged files are not compliant with flake8, errors will be raised. These errors should be fixed and the files should be committed again. See example code below. + +```bash +git add . +git commit -m "Added awesome feature" +# DONT'T WORRY IF ERRORS ARE RAISED. +# YOUR CODE IS NOT COMPLIANT WITH FLAKE8 or BLACK +# Fix any flake8 errors by following their suggestions +# black will automatically format the files so they might look different, but you'll need to stage the files again for committing +# After fixing any flake8 errors +git add . +git commit -m "Added feature" +``` + +#### Formatting Code without pre-commit +If you choose not to use pre-commit, you can take advantage of IDE extensions configured to black format or invoke black manually to format files and commit them. + +```bash +pip install black +black . +# This should autoformat the files +git add . +git commit -m "....." +``` + + #### Run lint checking ```bash flake8 ignite/ tests/ examples/ diff --git a/docs/source/conf.py b/docs/source/conf.py index 7d14300b266f..f4190e1b4167 100644 --- a/docs/source/conf.py +++ b/docs/source/conf.py @@ -14,28 +14,29 @@ # import os import sys -sys.path.insert(0, os.path.abspath('../..')) + +sys.path.insert(0, os.path.abspath("../..")) import ignite import pytorch_sphinx_theme # -- Project information ----------------------------------------------------- -project = 'ignite' -copyright = '2019, Torch Contributors' -author = 'Torch Contributors' +project = "ignite" +copyright = "2019, Torch Contributors" +author = "Torch Contributors" # The short X.Y version try: - version = os.environ['code_version'] - if 'master' in version: - version = 'master (' + ignite.__version__ + ')' + version = os.environ["code_version"] + if "master" in version: + version = "master (" + ignite.__version__ + ")" else: - version = version.replace('v', '') + version = version.replace("v", "") except KeyError: version = ignite.__version__ # The full version, including alpha/beta/rc tags -release = 'master' +release = "master" # -- General configuration --------------------------------------------------- @@ -48,27 +49,27 @@ # extensions coming with Sphinx (named 'sphinx.ext.*') or your custom # ones. extensions = [ - 'sphinx.ext.autosummary', - 'sphinx.ext.doctest', - 'sphinx.ext.intersphinx', - 'sphinx.ext.todo', - 'sphinx.ext.coverage', - 'sphinx.ext.mathjax', - 'sphinx.ext.napoleon', - 'sphinx.ext.viewcode' + "sphinx.ext.autosummary", + "sphinx.ext.doctest", + "sphinx.ext.intersphinx", + "sphinx.ext.todo", + "sphinx.ext.coverage", + "sphinx.ext.mathjax", + "sphinx.ext.napoleon", + "sphinx.ext.viewcode", ] # Add any paths that contain templates here, relative to this directory. -templates_path = ['_templates'] +templates_path = ["_templates"] # The suffix(es) of source filenames. # You can specify multiple suffix as a list of string: # # source_suffix = ['.rst', '.md'] -source_suffix = '.rst' +source_suffix = ".rst" # The master toctree document. -master_doc = 'index' +master_doc = "index" # The language for content autogenerated by Sphinx. Refer to documentation # for a list of supported languages. @@ -83,7 +84,7 @@ exclude_patterns = [] # The name of the Pygments (syntax highlighting) style to use. -pygments_style = 'sphinx' +pygments_style = "sphinx" # -- Options for HTML output ------------------------------------------------- @@ -91,17 +92,17 @@ # The theme to use for HTML and HTML Help pages. See the documentation for # a list of builtin themes. # -html_theme = 'pytorch_sphinx_theme' +html_theme = "pytorch_sphinx_theme" html_theme_path = [pytorch_sphinx_theme.get_html_theme_path()] html_theme_options = { - 'canonical_url': 'https://pytorch.org/ignite/index.html', - 'collapse_navigation': False, - 'display_version': True, - 'logo_only': True, + "canonical_url": "https://pytorch.org/ignite/index.html", + "collapse_navigation": False, + "display_version": True, + "logo_only": True, } -html_logo = '_static/img/ignite-logo-dark.svg' +html_logo = "_static/img/ignite-logo-dark.svg" # Theme options are theme-specific and customize the look and feel of a theme # further. For a list of options available for each theme, see the @@ -112,13 +113,13 @@ # Add any paths that contain custom static files (such as style sheets) here, # relative to this directory. They are copied after the builtin static files, # so a file named "default.css" will overwrite the builtin "default.css". -html_static_path = ['_static', '_templates/_static'] +html_static_path = ["_static", "_templates/_static"] html_context = { - 'css_files': [ + "css_files": [ # 'https://fonts.googleapis.com/css?family=Lato', # '_static/css/pytorch_theme.css' - '_static/css/ignite_theme.css' + "_static/css/ignite_theme.css" ], } @@ -126,7 +127,7 @@ # -- Options for HTMLHelp output --------------------------------------------- # Output file base name for HTML help builder. -htmlhelp_basename = 'ignitedoc' +htmlhelp_basename = "ignitedoc" # -- Options for LaTeX output ------------------------------------------------ @@ -135,15 +136,12 @@ # The paper size ('letterpaper' or 'a4paper'). # # 'papersize': 'letterpaper', - # The font size ('10pt', '11pt' or '12pt'). # # 'pointsize': '10pt', - # Additional stuff for the LaTeX preamble. # # 'preamble': '', - # Latex figure (float) alignment # # 'figure_align': 'htbp', @@ -153,8 +151,7 @@ # (source start file, target name, title, # author, documentclass [howto, manual, or own class]). latex_documents = [ - (master_doc, 'ignite.tex', 'ignite Documentation', - 'Torch Contributors', 'manual'), + (master_doc, "ignite.tex", "ignite Documentation", "Torch Contributors", "manual"), ] @@ -162,10 +159,7 @@ # One entry per manual page. List of tuples # (source start file, name, description, authors, manual section). -man_pages = [ - (master_doc, 'ignite', 'ignite Documentation', - [author], 1) -] +man_pages = [(master_doc, "ignite", "ignite Documentation", [author], 1)] # -- Options for Texinfo output ---------------------------------------------- @@ -174,9 +168,15 @@ # (source start file, target name, title, author, # dir menu entry, description, category) texinfo_documents = [ - (master_doc, 'ignite', 'ignite Documentation', - author, 'ignite', 'One line description of project.', - 'Miscellaneous'), + ( + master_doc, + "ignite", + "ignite Documentation", + author, + "ignite", + "One line description of project.", + "Miscellaneous", + ), ] @@ -185,7 +185,7 @@ # -- Options for intersphinx extension --------------------------------------- # Example configuration for intersphinx: refer to the Python standard library. -intersphinx_mapping = {'https://docs.python.org/': None} +intersphinx_mapping = {"https://docs.python.org/": None} # -- Options for todo extension ---------------------------------------------- diff --git a/examples/contrib/cifar10/fastresnet.py b/examples/contrib/cifar10/fastresnet.py index 8b40f7a0b9e7..3b0d2a077aba 100644 --- a/examples/contrib/cifar10/fastresnet.py +++ b/examples/contrib/cifar10/fastresnet.py @@ -1,4 +1,3 @@ - # Network from https://github.com/davidcpage/cifar10-fast # Adapted to python < 3.6 @@ -9,8 +8,7 @@ def fastresnet(): return FastResnet() -def batch_norm(num_channels, bn_bias_init=None, bn_bias_freeze=False, - bn_weight_init=None, bn_weight_freeze=False): +def batch_norm(num_channels, bn_bias_init=None, bn_bias_freeze=False, bn_weight_init=None, bn_weight_freeze=False): m = nn.BatchNorm2d(num_channels) if bn_bias_init is not None: m.bias.data.fill_(bn_bias_init) @@ -32,10 +30,9 @@ def seq_conv_bn(in_channels, out_channels, conv_kwargs, bn_kwargs): if "bias" not in conv_kwargs: conv_kwargs["bias"] = False return nn.Sequential( - nn.Conv2d(in_channels, out_channels, - kernel_size=3, **conv_kwargs), + nn.Conv2d(in_channels, out_channels, kernel_size=3, **conv_kwargs), batch_norm(out_channels, **bn_kwargs), - nn.ReLU(inplace=True) + nn.ReLU(inplace=True), ) @@ -47,24 +44,19 @@ def conv_bn_elu(in_channels, out_channels, conv_kwargs, bn_kwargs, alpha=1.0): if "bias" not in conv_kwargs: conv_kwargs["bias"] = False return nn.Sequential( - nn.Conv2d(in_channels, out_channels, - kernel_size=3, **conv_kwargs), + nn.Conv2d(in_channels, out_channels, kernel_size=3, **conv_kwargs), batch_norm(out_channels, **bn_kwargs), - nn.ELU(alpha=alpha, inplace=True) + nn.ELU(alpha=alpha, inplace=True), ) class Flatten(nn.Module): - def forward(self, x): return x.view(x.size(0), x.size(1)) class FastResnet(nn.Module): - - def __init__(self, conv_kwargs=None, bn_kwargs=None, - conv_bn_fn=seq_conv_bn, - final_weight=0.125): + def __init__(self, conv_kwargs=None, bn_kwargs=None, conv_bn_fn=seq_conv_bn, final_weight=0.125): super(FastResnet, self).__init__() conv_kwargs = {} if conv_kwargs is None else conv_kwargs @@ -75,34 +67,22 @@ def __init__(self, conv_kwargs=None, bn_kwargs=None, self.layer1 = nn.Sequential( conv_bn_fn(64, 128, conv_kwargs, bn_kwargs), nn.MaxPool2d(kernel_size=2), - IdentityResidualBlock(128, 128, conv_kwargs, bn_kwargs, conv_bn_fn=conv_bn_fn) + IdentityResidualBlock(128, 128, conv_kwargs, bn_kwargs, conv_bn_fn=conv_bn_fn), ) - self.layer2 = nn.Sequential( - conv_bn_fn(128, 256, conv_kwargs, bn_kwargs), - nn.MaxPool2d(kernel_size=2) - ) + self.layer2 = nn.Sequential(conv_bn_fn(128, 256, conv_kwargs, bn_kwargs), nn.MaxPool2d(kernel_size=2)) self.layer3 = nn.Sequential( conv_bn_fn(256, 512, conv_kwargs, bn_kwargs), nn.MaxPool2d(kernel_size=2), - IdentityResidualBlock(512, 512, conv_kwargs, bn_kwargs, conv_bn_fn=conv_bn_fn) + IdentityResidualBlock(512, 512, conv_kwargs, bn_kwargs, conv_bn_fn=conv_bn_fn), ) - self.head = nn.Sequential( - nn.AdaptiveMaxPool2d(1), - Flatten(), - ) + self.head = nn.Sequential(nn.AdaptiveMaxPool2d(1), Flatten(),) self.final_weight = final_weight - self.features = nn.Sequential( - self.prep, - self.layer1, - self.layer2, - self.layer3, - self.head - ) + self.features = nn.Sequential(self.prep, self.layer1, self.layer2, self.layer3, self.head) self.classifier = nn.Linear(512, 10, bias=False) @@ -115,9 +95,7 @@ def forward(self, x): class IdentityResidualBlock(nn.Module): - - def __init__(self, in_channels, out_channels, conv_kwargs, bn_kwargs, - conv_bn_fn=seq_conv_bn): + def __init__(self, in_channels, out_channels, conv_kwargs, bn_kwargs, conv_bn_fn=seq_conv_bn): super(IdentityResidualBlock, self).__init__() self.conv1 = conv_bn_fn(in_channels, out_channels, conv_kwargs, bn_kwargs) self.conv2 = conv_bn_fn(out_channels, out_channels, conv_kwargs, bn_kwargs) diff --git a/examples/contrib/cifar10/gcp_ai_platform/parse_cluster_spec.py b/examples/contrib/cifar10/gcp_ai_platform/parse_cluster_spec.py index be93e82e4c6e..35a8625e640d 100644 --- a/examples/contrib/cifar10/gcp_ai_platform/parse_cluster_spec.py +++ b/examples/contrib/cifar10/gcp_ai_platform/parse_cluster_spec.py @@ -1,17 +1,16 @@ - import os import json assert "CLUSTER_SPEC" in os.environ -cluster_spec = json.loads(os.environ['CLUSTER_SPEC']) +cluster_spec = json.loads(os.environ["CLUSTER_SPEC"]) -master_addr_port = cluster_spec['cluster']['master'][0].split(":") +master_addr_port = cluster_spec["cluster"]["master"][0].split(":") master_addr = master_addr_port[0] master_port = master_addr_port[1] -rank = cluster_spec['task']['index'] -if cluster_spec['task']['type'] == "worker": +rank = cluster_spec["task"]["index"] +if cluster_spec["task"]["type"] == "worker": rank += 1 print("{},{},{}".format(master_addr, master_port, rank)) diff --git a/examples/contrib/cifar10/main.py b/examples/contrib/cifar10/main.py index 632088596ee1..26852beb80b3 100644 --- a/examples/contrib/cifar10/main.py +++ b/examples/contrib/cifar10/main.py @@ -26,56 +26,55 @@ def run(output_path, config): device = "cuda" - local_rank = config['local_rank'] + local_rank = config["local_rank"] distributed = backend is not None if distributed: torch.cuda.set_device(local_rank) device = "cuda" rank = dist.get_rank() if distributed else 0 - torch.manual_seed(config['seed'] + rank) + torch.manual_seed(config["seed"] + rank) # Rescale batch_size and num_workers ngpus_per_node = torch.cuda.device_count() ngpus = dist.get_world_size() if distributed else 1 - batch_size = config['batch_size'] // ngpus - num_workers = int((config['num_workers'] + ngpus_per_node - 1) / ngpus_per_node) + batch_size = config["batch_size"] // ngpus + num_workers = int((config["num_workers"] + ngpus_per_node - 1) / ngpus_per_node) train_loader, test_loader = get_train_test_loaders( - path=config['data_path'], - batch_size=batch_size, - distributed=distributed, - num_workers=num_workers + path=config["data_path"], batch_size=batch_size, distributed=distributed, num_workers=num_workers ) - model = get_model(config['model']) + model = get_model(config["model"]) model = model.to(device) if distributed: - model = torch.nn.parallel.DistributedDataParallel(model, - device_ids=[local_rank, ], - output_device=local_rank) - - optimizer = optim.SGD(model.parameters(), lr=config['learning_rate'], - momentum=config['momentum'], - weight_decay=config['weight_decay'], - nesterov=True) + model = torch.nn.parallel.DistributedDataParallel(model, device_ids=[local_rank,], output_device=local_rank) + + optimizer = optim.SGD( + model.parameters(), + lr=config["learning_rate"], + momentum=config["momentum"], + weight_decay=config["weight_decay"], + nesterov=True, + ) criterion = nn.CrossEntropyLoss().to(device) le = len(train_loader) milestones_values = [ (0, 0.0), - (le * config['num_warmup_epochs'], config['learning_rate']), - (le * config['num_epochs'], 0.0) + (le * config["num_warmup_epochs"], config["learning_rate"]), + (le * config["num_epochs"], 0.0), ] - lr_scheduler = PiecewiseLinear(optimizer, param_name="lr", - milestones_values=milestones_values) + lr_scheduler = PiecewiseLinear(optimizer, param_name="lr", milestones_values=milestones_values) def _prepare_batch(batch, device, non_blocking): x, y = batch - return (convert_tensor(x, device=device, non_blocking=non_blocking), - convert_tensor(y, device=device, non_blocking=non_blocking)) + return ( + convert_tensor(x, device=device, non_blocking=non_blocking), + convert_tensor(y, device=device, non_blocking=non_blocking), + ) def process_function(engine, batch): @@ -91,32 +90,41 @@ def process_function(engine, batch): optimizer.step() return { - 'batch loss': loss.item(), + "batch loss": loss.item(), } trainer = Engine(process_function) train_sampler = train_loader.sampler if distributed else None - to_save = {'trainer': trainer, 'model': model, 'optimizer': optimizer, 'lr_scheduler': lr_scheduler} - metric_names = ['batch loss', ] - common.setup_common_training_handlers(trainer, train_sampler=train_sampler, - to_save=to_save, save_every_iters=config['checkpoint_every'], - output_path=output_path, lr_scheduler=lr_scheduler, - output_names=metric_names, with_pbar_on_iters=config['display_iters'], - log_every_iters=10) + to_save = {"trainer": trainer, "model": model, "optimizer": optimizer, "lr_scheduler": lr_scheduler} + metric_names = [ + "batch loss", + ] + common.setup_common_training_handlers( + trainer, + train_sampler=train_sampler, + to_save=to_save, + save_every_iters=config["checkpoint_every"], + output_path=output_path, + lr_scheduler=lr_scheduler, + output_names=metric_names, + with_pbar_on_iters=config["display_iters"], + log_every_iters=10, + ) if rank == 0: tb_logger = TensorboardLogger(log_dir=output_path) - tb_logger.attach(trainer, - log_handler=OutputHandler(tag="train", - metric_names=metric_names), - event_name=Events.ITERATION_COMPLETED) - tb_logger.attach(trainer, - log_handler=OptimizerParamsHandler(optimizer, param_name="lr"), - event_name=Events.ITERATION_STARTED) + tb_logger.attach( + trainer, + log_handler=OutputHandler(tag="train", metric_names=metric_names), + event_name=Events.ITERATION_COMPLETED, + ) + tb_logger.attach( + trainer, log_handler=OptimizerParamsHandler(optimizer, param_name="lr"), event_name=Events.ITERATION_STARTED + ) metrics = { "accuracy": Accuracy(device=device if distributed else None), - "loss": Loss(criterion, device=device if distributed else None) + "loss": Loss(criterion, device=device if distributed else None), } evaluator = create_supervised_evaluator(model, metrics=metrics, device=device, non_blocking=True) @@ -127,41 +135,49 @@ def run_validation(engine): train_evaluator.run(train_loader) evaluator.run(test_loader) - trainer.add_event_handler(Events.EPOCH_STARTED(every=config['validate_every']), run_validation) + trainer.add_event_handler(Events.EPOCH_STARTED(every=config["validate_every"]), run_validation) trainer.add_event_handler(Events.COMPLETED, run_validation) if rank == 0: - if config['display_iters']: + if config["display_iters"]: ProgressBar(persist=False, desc="Train evaluation").attach(train_evaluator) ProgressBar(persist=False, desc="Test evaluation").attach(evaluator) - tb_logger.attach(train_evaluator, - log_handler=OutputHandler(tag="train", - metric_names=list(metrics.keys()), - global_step_transform=global_step_from_engine(trainer)), - event_name=Events.COMPLETED) - - tb_logger.attach(evaluator, - log_handler=OutputHandler(tag="test", - metric_names=list(metrics.keys()), - global_step_transform=global_step_from_engine(trainer)), - event_name=Events.COMPLETED) + tb_logger.attach( + train_evaluator, + log_handler=OutputHandler( + tag="train", metric_names=list(metrics.keys()), global_step_transform=global_step_from_engine(trainer) + ), + event_name=Events.COMPLETED, + ) + + tb_logger.attach( + evaluator, + log_handler=OutputHandler( + tag="test", metric_names=list(metrics.keys()), global_step_transform=global_step_from_engine(trainer) + ), + event_name=Events.COMPLETED, + ) # Store the best model by validation accuracy: - common.save_best_model_by_val_score(output_path, evaluator, model=model, metric_name='accuracy', n_saved=3, - trainer=trainer, tag="test") + common.save_best_model_by_val_score( + output_path, evaluator, model=model, metric_name="accuracy", n_saved=3, trainer=trainer, tag="test" + ) - if config['log_model_grads_every'] is not None: - tb_logger.attach(trainer, - log_handler=GradsHistHandler(model, tag=model.__class__.__name__), - event_name=Events.ITERATION_COMPLETED(every=config['log_model_grads_every'])) + if config["log_model_grads_every"] is not None: + tb_logger.attach( + trainer, + log_handler=GradsHistHandler(model, tag=model.__class__.__name__), + event_name=Events.ITERATION_COMPLETED(every=config["log_model_grads_every"]), + ) - if config['crash_iteration'] is not None: - @trainer.on(Events.ITERATION_STARTED(once=config['crash_iteration'])) + if config["crash_iteration"] is not None: + + @trainer.on(Events.ITERATION_STARTED(once=config["crash_iteration"])) def _(engine): raise Exception("STOP at iteration: {}".format(engine.state.iteration)) - resume_from = config['resume_from'] + resume_from = config["resume_from"] if resume_from is not None: checkpoint_fp = Path(resume_from) assert checkpoint_fp.exists(), "Checkpoint '{}' is not found".format(checkpoint_fp.as_posix()) @@ -170,9 +186,10 @@ def _(engine): Checkpoint.load_objects(to_load=to_save, checkpoint=checkpoint) try: - trainer.run(train_loader, max_epochs=config['num_epochs']) + trainer.run(train_loader, max_epochs=config["num_epochs"]) except Exception as e: import traceback + print(traceback.format_exc()) if rank == 0: @@ -183,13 +200,16 @@ def _(engine): parser = argparse.ArgumentParser("Training a CNN on CIFAR10 dataset") - parser.add_argument('--network', type=str, default="fastresnet", help="Network to train") + parser.add_argument("--network", type=str, default="fastresnet", help="Network to train") - parser.add_argument('--params', type=str, - help='Override default configuration with parameters: ' - 'data_path=/path/to/dataset;batch_size=64;num_workers=12 ...') + parser.add_argument( + "--params", + type=str, + help="Override default configuration with parameters: " + "data_path=/path/to/dataset;batch_size=64;num_workers=12 ...", + ) - parser.add_argument('--local_rank', type=int, help='Local process rank in distributed computation') + parser.add_argument("--local_rank", type=int, help="Local process rank in distributed computation") args = parser.parse_args() network_name = args.network @@ -202,42 +222,33 @@ def _(engine): # Default configuration dictionary config = { "seed": 12, - "data_path": "/tmp/cifar10", "output_path": "/tmp/cifar10-output", - "model": network_name, - "momentum": 0.9, "weight_decay": 1e-4, "batch_size": batch_size, "num_workers": 10, - "num_epochs": num_epochs, - "learning_rate": 0.04, "num_warmup_epochs": 4, - "validate_every": 3, - # distributed settings "dist_url": "env://", "dist_backend": None, # if None distributed option is disabled, set to "nccl" to enable - # Logging: "display_iters": True, "log_model_grads_every": None, "checkpoint_every": 200, - # Crash/Resume training: "resume_from": None, # Path to checkpoint file .pth "crash_iteration": None, } if args.local_rank is not None: - config['local_rank'] = args.local_rank + config["local_rank"] = args.local_rank else: - config['local_rank'] = 0 + config["local_rank"] = 0 # Override config: if args.params is not None: @@ -247,13 +258,13 @@ def _(engine): value = eval(value) config[key] = value - backend = config['dist_backend'] + backend = config["dist_backend"] distributed = backend is not None if distributed: - dist.init_process_group(backend, init_method=config['dist_url']) + dist.init_process_group(backend, init_method=config["dist_url"]) # let each node print the info - if config['local_rank'] == 0: + if config["local_rank"] == 0: print("\nDistributed setting:") print("\tbackend: {}".format(dist.get_backend())) print("\tworld size: {}".format(dist.get_world_size())) @@ -262,7 +273,7 @@ def _(engine): output_path = None # let each node print the info - if config['local_rank'] == 0: + if config["local_rank"] == 0: print("Train {} on CIFAR10".format(network_name)) print("- PyTorch version: {}".format(torch.__version__)) print("- Ignite version: {}".format(ignite.__version__)) @@ -285,7 +296,7 @@ def _(engine): nnodes = dist.get_world_size() // ngpus_per_node gpu_conf = "-distributed-{}nodes-{}gpus".format(nnodes, ngpus_per_node) - output_path = Path(config['output_path']) / "{}{}".format(now, gpu_conf) + output_path = Path(config["output_path"]) / "{}{}".format(now, gpu_conf) if not output_path.exists(): output_path.mkdir(parents=True) output_path = output_path.as_posix() diff --git a/examples/contrib/cifar10/utils.py b/examples/contrib/cifar10/utils.py index a55fe6f9ff4a..19a33e57aa87 100644 --- a/examples/contrib/cifar10/utils.py +++ b/examples/contrib/cifar10/utils.py @@ -21,18 +21,17 @@ def set_seed(seed): def get_train_test_loaders(path, batch_size, num_workers, distributed=False, pin_memory=True): - train_transform = Compose([ - Pad(4), - RandomCrop(32, fill=128), - RandomHorizontalFlip(), - ToTensor(), - Normalize((0.485, 0.456, 0.406), (0.229, 0.224, 0.225)), - ]) - - test_transform = Compose([ - ToTensor(), - Normalize((0.485, 0.456, 0.406), (0.229, 0.224, 0.225)), - ]) + train_transform = Compose( + [ + Pad(4), + RandomCrop(32, fill=128), + RandomHorizontalFlip(), + ToTensor(), + Normalize((0.485, 0.456, 0.406), (0.229, 0.224, 0.225)), + ] + ) + + test_transform = Compose([ToTensor(), Normalize((0.485, 0.456, 0.406), (0.229, 0.224, 0.225)),]) if not os.path.exists(path): os.makedirs(path) @@ -49,11 +48,18 @@ def get_train_test_loaders(path, batch_size, num_workers, distributed=False, pin train_sampler = DistributedSampler(train_ds) test_sampler = DistributedSampler(test_ds, shuffle=False) - train_labelled_loader = DataLoader(train_ds, batch_size=batch_size, sampler=train_sampler, - num_workers=num_workers, pin_memory=pin_memory, drop_last=True) - - test_loader = DataLoader(test_ds, batch_size=batch_size * 2, sampler=test_sampler, - num_workers=num_workers, pin_memory=pin_memory) + train_labelled_loader = DataLoader( + train_ds, + batch_size=batch_size, + sampler=train_sampler, + num_workers=num_workers, + pin_memory=pin_memory, + drop_last=True, + ) + + test_loader = DataLoader( + test_ds, batch_size=batch_size * 2, sampler=test_sampler, num_workers=num_workers, pin_memory=pin_memory + ) return train_labelled_loader, test_loader diff --git a/examples/contrib/mnist/mnist_with_neptune_logger.py b/examples/contrib/mnist/mnist_with_neptune_logger.py index 2f7c7d2bc078..7bb8eeafcafa 100644 --- a/examples/contrib/mnist/mnist_with_neptune_logger.py +++ b/examples/contrib/mnist/mnist_with_neptune_logger.py @@ -59,21 +59,23 @@ def forward(self, x): def get_data_loaders(train_batch_size, val_batch_size): data_transform = Compose([ToTensor(), Normalize((0.1307,), (0.3081,))]) - train_loader = DataLoader(MNIST(download=True, root=".", transform=data_transform, train=True), - batch_size=train_batch_size, shuffle=True) + train_loader = DataLoader( + MNIST(download=True, root=".", transform=data_transform, train=True), batch_size=train_batch_size, shuffle=True + ) - val_loader = DataLoader(MNIST(download=False, root=".", transform=data_transform, train=False), - batch_size=val_batch_size, shuffle=False) + val_loader = DataLoader( + MNIST(download=False, root=".", transform=data_transform, train=False), batch_size=val_batch_size, shuffle=False + ) return train_loader, val_loader def run(train_batch_size, val_batch_size, epochs, lr, momentum, neptune_project): train_loader, val_loader = get_data_loaders(train_batch_size, val_batch_size) model = Net() - device = 'cpu' + device = "cpu" if torch.cuda.is_available(): - device = 'cuda' + device = "cuda" optimizer = SGD(model.parameters(), lr=lr, momentum=momentum) criterion = nn.CrossEntropyLoss() @@ -81,17 +83,17 @@ def run(train_batch_size, val_batch_size, epochs, lr, momentum, neptune_project) if sys.version_info > (3,): from ignite.contrib.metrics.gpu_info import GpuInfo + try: GpuInfo().attach(trainer) except RuntimeError: - print("INFO: By default, in this example it is possible to log GPU information (used memory, utilization). " - "As there is no pynvml python package installed, GPU information won't be logged. Otherwise, please " - "install it : `pip install pynvml`") + print( + "INFO: By default, in this example it is possible to log GPU information (used memory, utilization). " + "As there is no pynvml python package installed, GPU information won't be logged. Otherwise, please " + "install it : `pip install pynvml`" + ) - metrics = { - 'accuracy': Accuracy(), - 'loss': Loss(criterion) - } + metrics = {"accuracy": Accuracy(), "loss": Loss(criterion)} train_evaluator = create_supervised_evaluator(model, metrics=metrics, device=device) validation_evaluator = create_supervised_evaluator(model, metrics=metrics, device=device) @@ -101,44 +103,48 @@ def compute_metrics(engine): train_evaluator.run(train_loader) validation_evaluator.run(val_loader) - npt_logger = NeptuneLogger(api_token=None, - project_name=neptune_project, - name='ignite-mnist-example', - params={'train_batch_size': train_batch_size, - 'val_batch_size': val_batch_size, - 'epochs': epochs, - 'lr': lr, - 'momentum': momentum}) - - npt_logger.attach(trainer, - log_handler=OutputHandler(tag="training", - output_transform=lambda loss: {'batchloss': loss}, - metric_names='all'), - event_name=Events.ITERATION_COMPLETED(every=100)) - - npt_logger.attach(train_evaluator, - log_handler=OutputHandler(tag="training", - metric_names=["loss", "accuracy"], - another_engine=trainer), - event_name=Events.EPOCH_COMPLETED) - - npt_logger.attach(validation_evaluator, - log_handler=OutputHandler(tag="validation", - metric_names=["loss", "accuracy"], - another_engine=trainer), - event_name=Events.EPOCH_COMPLETED) - - npt_logger.attach(trainer, - log_handler=OptimizerParamsHandler(optimizer), - event_name=Events.ITERATION_COMPLETED(every=100)) - - npt_logger.attach(trainer, - log_handler=WeightsScalarHandler(model), - event_name=Events.ITERATION_COMPLETED(every=100)) - - npt_logger.attach(trainer, - log_handler=GradsScalarHandler(model), - event_name=Events.ITERATION_COMPLETED(every=100)) + npt_logger = NeptuneLogger( + api_token=None, + project_name=neptune_project, + name="ignite-mnist-example", + params={ + "train_batch_size": train_batch_size, + "val_batch_size": val_batch_size, + "epochs": epochs, + "lr": lr, + "momentum": momentum, + }, + ) + + npt_logger.attach( + trainer, + log_handler=OutputHandler( + tag="training", output_transform=lambda loss: {"batchloss": loss}, metric_names="all" + ), + event_name=Events.ITERATION_COMPLETED(every=100), + ) + + npt_logger.attach( + train_evaluator, + log_handler=OutputHandler(tag="training", metric_names=["loss", "accuracy"], another_engine=trainer), + event_name=Events.EPOCH_COMPLETED, + ) + + npt_logger.attach( + validation_evaluator, + log_handler=OutputHandler(tag="validation", metric_names=["loss", "accuracy"], another_engine=trainer), + event_name=Events.EPOCH_COMPLETED, + ) + + npt_logger.attach( + trainer, log_handler=OptimizerParamsHandler(optimizer), event_name=Events.ITERATION_COMPLETED(every=100) + ) + + npt_logger.attach( + trainer, log_handler=WeightsScalarHandler(model), event_name=Events.ITERATION_COMPLETED(every=100) + ) + + npt_logger.attach(trainer, log_handler=GradsScalarHandler(model), event_name=Events.ITERATION_COMPLETED(every=100)) # kick everything off trainer.run(train_loader, max_epochs=epochs) @@ -147,18 +153,14 @@ def compute_metrics(engine): if __name__ == "__main__": parser = ArgumentParser() - parser.add_argument('--batch_size', type=int, default=64, - help='input batch size for training (default: 64)') - parser.add_argument('--val_batch_size', type=int, default=1000, - help='input batch size for validation (default: 1000)') - parser.add_argument('--epochs', type=int, default=10, - help='number of epochs to train (default: 10)') - parser.add_argument('--lr', type=float, default=0.01, - help='learning rate (default: 0.01)') - parser.add_argument('--momentum', type=float, default=0.5, - help='SGD momentum (default: 0.5)') - parser.add_argument("--neptune_project", type=str, - help="your project in neptune.ai") + parser.add_argument("--batch_size", type=int, default=64, help="input batch size for training (default: 64)") + parser.add_argument( + "--val_batch_size", type=int, default=1000, help="input batch size for validation (default: 1000)" + ) + parser.add_argument("--epochs", type=int, default=10, help="number of epochs to train (default: 10)") + parser.add_argument("--lr", type=float, default=0.01, help="learning rate (default: 0.01)") + parser.add_argument("--momentum", type=float, default=0.5, help="SGD momentum (default: 0.5)") + parser.add_argument("--neptune_project", type=str, help="your project in neptune.ai") args = parser.parse_args() diff --git a/examples/contrib/mnist/mnist_with_tensorboard_logger.py b/examples/contrib/mnist/mnist_with_tensorboard_logger.py index 76e899775a7b..92b20cb431e6 100644 --- a/examples/contrib/mnist/mnist_with_tensorboard_logger.py +++ b/examples/contrib/mnist/mnist_with_tensorboard_logger.py @@ -59,21 +59,23 @@ def forward(self, x): def get_data_loaders(train_batch_size, val_batch_size): data_transform = Compose([ToTensor(), Normalize((0.1307,), (0.3081,))]) - train_loader = DataLoader(MNIST(download=True, root=".", transform=data_transform, train=True), - batch_size=train_batch_size, shuffle=True) + train_loader = DataLoader( + MNIST(download=True, root=".", transform=data_transform, train=True), batch_size=train_batch_size, shuffle=True + ) - val_loader = DataLoader(MNIST(download=False, root=".", transform=data_transform, train=False), - batch_size=val_batch_size, shuffle=False) + val_loader = DataLoader( + MNIST(download=False, root=".", transform=data_transform, train=False), batch_size=val_batch_size, shuffle=False + ) return train_loader, val_loader def run(train_batch_size, val_batch_size, epochs, lr, momentum, log_dir): train_loader, val_loader = get_data_loaders(train_batch_size, val_batch_size) model = Net() - device = 'cpu' + device = "cpu" if torch.cuda.is_available(): - device = 'cuda' + device = "cuda" optimizer = SGD(model.parameters(), lr=lr, momentum=momentum) criterion = nn.CrossEntropyLoss() @@ -81,17 +83,17 @@ def run(train_batch_size, val_batch_size, epochs, lr, momentum, log_dir): if sys.version_info > (3,): from ignite.contrib.metrics.gpu_info import GpuInfo + try: GpuInfo().attach(trainer) except RuntimeError: - print("INFO: By default, in this example it is possible to log GPU information (used memory, utilization). " - "As there is no pynvml python package installed, GPU information won't be logged. Otherwise, please " - "install it : `pip install pynvml`") + print( + "INFO: By default, in this example it is possible to log GPU information (used memory, utilization). " + "As there is no pynvml python package installed, GPU information won't be logged. Otherwise, please " + "install it : `pip install pynvml`" + ) - metrics = { - 'accuracy': Accuracy(), - 'loss': Loss(criterion) - } + metrics = {"accuracy": Accuracy(), "loss": Loss(criterion)} train_evaluator = create_supervised_evaluator(model, metrics=metrics, device=device) validation_evaluator = create_supervised_evaluator(model, metrics=metrics, device=device) @@ -103,43 +105,37 @@ def compute_metrics(engine): tb_logger = TensorboardLogger(log_dir=log_dir) - tb_logger.attach(trainer, - log_handler=OutputHandler(tag="training", - output_transform=lambda loss: {'batchloss': loss}, - metric_names='all'), - event_name=Events.ITERATION_COMPLETED(every=100)) + tb_logger.attach( + trainer, + log_handler=OutputHandler( + tag="training", output_transform=lambda loss: {"batchloss": loss}, metric_names="all" + ), + event_name=Events.ITERATION_COMPLETED(every=100), + ) - tb_logger.attach(train_evaluator, - log_handler=OutputHandler(tag="training", - metric_names=["loss", "accuracy"], - another_engine=trainer), - event_name=Events.EPOCH_COMPLETED) + tb_logger.attach( + train_evaluator, + log_handler=OutputHandler(tag="training", metric_names=["loss", "accuracy"], another_engine=trainer), + event_name=Events.EPOCH_COMPLETED, + ) - tb_logger.attach(validation_evaluator, - log_handler=OutputHandler(tag="validation", - metric_names=["loss", "accuracy"], - another_engine=trainer), - event_name=Events.EPOCH_COMPLETED) + tb_logger.attach( + validation_evaluator, + log_handler=OutputHandler(tag="validation", metric_names=["loss", "accuracy"], another_engine=trainer), + event_name=Events.EPOCH_COMPLETED, + ) - tb_logger.attach(trainer, - log_handler=OptimizerParamsHandler(optimizer), - event_name=Events.ITERATION_COMPLETED(every=100)) + tb_logger.attach( + trainer, log_handler=OptimizerParamsHandler(optimizer), event_name=Events.ITERATION_COMPLETED(every=100) + ) - tb_logger.attach(trainer, - log_handler=WeightsScalarHandler(model), - event_name=Events.ITERATION_COMPLETED(every=100)) + tb_logger.attach(trainer, log_handler=WeightsScalarHandler(model), event_name=Events.ITERATION_COMPLETED(every=100)) - tb_logger.attach(trainer, - log_handler=WeightsHistHandler(model), - event_name=Events.EPOCH_COMPLETED(every=100)) + tb_logger.attach(trainer, log_handler=WeightsHistHandler(model), event_name=Events.EPOCH_COMPLETED(every=100)) - tb_logger.attach(trainer, - log_handler=GradsScalarHandler(model), - event_name=Events.ITERATION_COMPLETED(every=100)) + tb_logger.attach(trainer, log_handler=GradsScalarHandler(model), event_name=Events.ITERATION_COMPLETED(every=100)) - tb_logger.attach(trainer, - log_handler=GradsHistHandler(model), - event_name=Events.EPOCH_COMPLETED(every=100)) + tb_logger.attach(trainer, log_handler=GradsHistHandler(model), event_name=Events.EPOCH_COMPLETED(every=100)) # kick everything off trainer.run(train_loader, max_epochs=epochs) @@ -148,18 +144,16 @@ def compute_metrics(engine): if __name__ == "__main__": parser = ArgumentParser() - parser.add_argument('--batch_size', type=int, default=64, - help='input batch size for training (default: 64)') - parser.add_argument('--val_batch_size', type=int, default=1000, - help='input batch size for validation (default: 1000)') - parser.add_argument('--epochs', type=int, default=10, - help='number of epochs to train (default: 10)') - parser.add_argument('--lr', type=float, default=0.01, - help='learning rate (default: 0.01)') - parser.add_argument('--momentum', type=float, default=0.5, - help='SGD momentum (default: 0.5)') - parser.add_argument("--log_dir", type=str, default="tensorboard_logs", - help="log directory for Tensorboard log output") + parser.add_argument("--batch_size", type=int, default=64, help="input batch size for training (default: 64)") + parser.add_argument( + "--val_batch_size", type=int, default=1000, help="input batch size for validation (default: 1000)" + ) + parser.add_argument("--epochs", type=int, default=10, help="number of epochs to train (default: 10)") + parser.add_argument("--lr", type=float, default=0.01, help="learning rate (default: 0.01)") + parser.add_argument("--momentum", type=float, default=0.5, help="SGD momentum (default: 0.5)") + parser.add_argument( + "--log_dir", type=str, default="tensorboard_logs", help="log directory for Tensorboard log output" + ) args = parser.parse_args() diff --git a/examples/contrib/mnist/mnist_with_tqdm_logger.py b/examples/contrib/mnist/mnist_with_tqdm_logger.py index 4d63d6b0ce9b..f42fe10d66d4 100644 --- a/examples/contrib/mnist/mnist_with_tqdm_logger.py +++ b/examples/contrib/mnist/mnist_with_tqdm_logger.py @@ -36,59 +36,63 @@ def forward(self, x): def get_data_loaders(train_batch_size, val_batch_size): data_transform = Compose([ToTensor(), Normalize((0.1307,), (0.3081,))]) - train_loader = DataLoader(MNIST(download=True, root=".", transform=data_transform, train=True), - batch_size=train_batch_size, shuffle=True) + train_loader = DataLoader( + MNIST(download=True, root=".", transform=data_transform, train=True), batch_size=train_batch_size, shuffle=True + ) - val_loader = DataLoader(MNIST(download=False, root=".", transform=data_transform, train=False), - batch_size=val_batch_size, shuffle=False) + val_loader = DataLoader( + MNIST(download=False, root=".", transform=data_transform, train=False), batch_size=val_batch_size, shuffle=False + ) return train_loader, val_loader def run(train_batch_size, val_batch_size, epochs, lr, momentum, display_gpu_info): train_loader, val_loader = get_data_loaders(train_batch_size, val_batch_size) model = Net() - device = 'cpu' + device = "cpu" if torch.cuda.is_available(): - device = 'cuda' + device = "cuda" optimizer = SGD(model.parameters(), lr=lr, momentum=momentum) trainer = create_supervised_trainer(model, optimizer, F.nll_loss, device=device) - evaluator = create_supervised_evaluator(model, - metrics={'accuracy': Accuracy(), - 'nll': Loss(F.nll_loss)}, - device=device) + evaluator = create_supervised_evaluator( + model, metrics={"accuracy": Accuracy(), "nll": Loss(F.nll_loss)}, device=device + ) - RunningAverage(output_transform=lambda x: x).attach(trainer, 'loss') + RunningAverage(output_transform=lambda x: x).attach(trainer, "loss") if display_gpu_info: from ignite.contrib.metrics import GpuInfo - GpuInfo().attach(trainer, name='gpu') + GpuInfo().attach(trainer, name="gpu") pbar = ProgressBar(persist=True) - pbar.attach(trainer, metric_names='all') + pbar.attach(trainer, metric_names="all") @trainer.on(Events.EPOCH_COMPLETED) def log_training_results(engine): evaluator.run(train_loader) metrics = evaluator.state.metrics - avg_accuracy = metrics['accuracy'] - avg_nll = metrics['nll'] + avg_accuracy = metrics["accuracy"] + avg_nll = metrics["nll"] pbar.log_message( - "Training Results - Epoch: {} Avg accuracy: {:.2f} Avg loss: {:.2f}" - .format(engine.state.epoch, avg_accuracy, avg_nll) + "Training Results - Epoch: {} Avg accuracy: {:.2f} Avg loss: {:.2f}".format( + engine.state.epoch, avg_accuracy, avg_nll + ) ) @trainer.on(Events.EPOCH_COMPLETED) def log_validation_results(engine): evaluator.run(val_loader) metrics = evaluator.state.metrics - avg_accuracy = metrics['accuracy'] - avg_nll = metrics['nll'] + avg_accuracy = metrics["accuracy"] + avg_nll = metrics["nll"] pbar.log_message( - "Validation Results - Epoch: {} Avg accuracy: {:.2f} Avg loss: {:.2f}" - .format(engine.state.epoch, avg_accuracy, avg_nll)) + "Validation Results - Epoch: {} Avg accuracy: {:.2f} Avg loss: {:.2f}".format( + engine.state.epoch, avg_accuracy, avg_nll + ) + ) pbar.n = pbar.last_print_n = 0 @@ -97,18 +101,18 @@ def log_validation_results(engine): if __name__ == "__main__": parser = ArgumentParser() - parser.add_argument('--batch_size', type=int, default=64, - help='input batch size for training (default: 64)') - parser.add_argument('--val_batch_size', type=int, default=1000, - help='input batch size for validation (default: 1000)') - parser.add_argument('--epochs', type=int, default=10, - help='number of epochs to train (default: 10)') - parser.add_argument('--lr', type=float, default=0.01, - help='learning rate (default: 0.01)') - parser.add_argument('--momentum', type=float, default=0.5, - help='SGD momentum (default: 0.5)') - parser.add_argument('--display_gpu_info', action='store_true', - help='Display gpu usage info. This needs python 3.X and pynvml package') + parser.add_argument("--batch_size", type=int, default=64, help="input batch size for training (default: 64)") + parser.add_argument( + "--val_batch_size", type=int, default=1000, help="input batch size for validation (default: 1000)" + ) + parser.add_argument("--epochs", type=int, default=10, help="number of epochs to train (default: 10)") + parser.add_argument("--lr", type=float, default=0.01, help="learning rate (default: 0.01)") + parser.add_argument("--momentum", type=float, default=0.5, help="SGD momentum (default: 0.5)") + parser.add_argument( + "--display_gpu_info", + action="store_true", + help="Display gpu usage info. This needs python 3.X and pynvml package", + ) args = parser.parse_args() diff --git a/examples/contrib/mnist/mnist_with_visdom_logger.py b/examples/contrib/mnist/mnist_with_visdom_logger.py index 5b9da023716b..54003dd75913 100644 --- a/examples/contrib/mnist/mnist_with_visdom_logger.py +++ b/examples/contrib/mnist/mnist_with_visdom_logger.py @@ -55,30 +55,29 @@ def forward(self, x): def get_data_loaders(train_batch_size, val_batch_size): data_transform = Compose([ToTensor(), Normalize((0.1307,), (0.3081,))]) - train_loader = DataLoader(MNIST(download=True, root=".", transform=data_transform, train=True), - batch_size=train_batch_size, shuffle=True) + train_loader = DataLoader( + MNIST(download=True, root=".", transform=data_transform, train=True), batch_size=train_batch_size, shuffle=True + ) - val_loader = DataLoader(MNIST(download=False, root=".", transform=data_transform, train=False), - batch_size=val_batch_size, shuffle=False) + val_loader = DataLoader( + MNIST(download=False, root=".", transform=data_transform, train=False), batch_size=val_batch_size, shuffle=False + ) return train_loader, val_loader def run(train_batch_size, val_batch_size, epochs, lr, momentum, log_dir): train_loader, val_loader = get_data_loaders(train_batch_size, val_batch_size) model = Net() - device = 'cpu' + device = "cpu" if torch.cuda.is_available(): - device = 'cuda' + device = "cuda" optimizer = SGD(model.parameters(), lr=lr, momentum=momentum) criterion = nn.CrossEntropyLoss() trainer = create_supervised_trainer(model, optimizer, criterion, device=device) - metrics = { - 'accuracy': Accuracy(), - 'loss': Loss(criterion) - } + metrics = {"accuracy": Accuracy(), "loss": Loss(criterion)} train_evaluator = create_supervised_evaluator(model, metrics=metrics, device=device) validation_evaluator = create_supervised_evaluator(model, metrics=metrics, device=device) @@ -90,33 +89,31 @@ def compute_metrics(engine): vd_logger = VisdomLogger(env="mnist_training") - vd_logger.attach(trainer, - log_handler=OutputHandler(tag="training", output_transform=lambda loss: {'batchloss': loss}), - event_name=Events.ITERATION_COMPLETED(every=100)) + vd_logger.attach( + trainer, + log_handler=OutputHandler(tag="training", output_transform=lambda loss: {"batchloss": loss}), + event_name=Events.ITERATION_COMPLETED(every=100), + ) - vd_logger.attach(train_evaluator, - log_handler=OutputHandler(tag="training", - metric_names=["loss", "accuracy"], - another_engine=trainer), - event_name=Events.EPOCH_COMPLETED) + vd_logger.attach( + train_evaluator, + log_handler=OutputHandler(tag="training", metric_names=["loss", "accuracy"], another_engine=trainer), + event_name=Events.EPOCH_COMPLETED, + ) - vd_logger.attach(validation_evaluator, - log_handler=OutputHandler(tag="validation", - metric_names=["loss", "accuracy"], - another_engine=trainer), - event_name=Events.EPOCH_COMPLETED) + vd_logger.attach( + validation_evaluator, + log_handler=OutputHandler(tag="validation", metric_names=["loss", "accuracy"], another_engine=trainer), + event_name=Events.EPOCH_COMPLETED, + ) - vd_logger.attach(trainer, - log_handler=OptimizerParamsHandler(optimizer), - event_name=Events.ITERATION_COMPLETED(every=100)) + vd_logger.attach( + trainer, log_handler=OptimizerParamsHandler(optimizer), event_name=Events.ITERATION_COMPLETED(every=100) + ) - vd_logger.attach(trainer, - log_handler=WeightsScalarHandler(model), - event_name=Events.ITERATION_COMPLETED(every=100)) + vd_logger.attach(trainer, log_handler=WeightsScalarHandler(model), event_name=Events.ITERATION_COMPLETED(every=100)) - vd_logger.attach(trainer, - log_handler=GradsScalarHandler(model), - event_name=Events.ITERATION_COMPLETED(every=100)) + vd_logger.attach(trainer, log_handler=GradsScalarHandler(model), event_name=Events.ITERATION_COMPLETED(every=100)) # kick everything off trainer.run(train_loader, max_epochs=epochs) @@ -124,18 +121,14 @@ def compute_metrics(engine): if __name__ == "__main__": parser = ArgumentParser() - parser.add_argument('--batch_size', type=int, default=64, - help='input batch size for training (default: 64)') - parser.add_argument('--val_batch_size', type=int, default=1000, - help='input batch size for validation (default: 1000)') - parser.add_argument('--epochs', type=int, default=10, - help='number of epochs to train (default: 10)') - parser.add_argument('--lr', type=float, default=0.01, - help='learning rate (default: 0.01)') - parser.add_argument('--momentum', type=float, default=0.5, - help='SGD momentum (default: 0.5)') - parser.add_argument("--log_dir", type=str, default="visdom_logs", - help="log directory for Tensorboard log output") + parser.add_argument("--batch_size", type=int, default=64, help="input batch size for training (default: 64)") + parser.add_argument( + "--val_batch_size", type=int, default=1000, help="input batch size for validation (default: 1000)" + ) + parser.add_argument("--epochs", type=int, default=10, help="number of epochs to train (default: 10)") + parser.add_argument("--lr", type=float, default=0.01, help="learning rate (default: 0.01)") + parser.add_argument("--momentum", type=float, default=0.5, help="SGD momentum (default: 0.5)") + parser.add_argument("--log_dir", type=str, default="visdom_logs", help="log directory for Tensorboard log output") args = parser.parse_args() diff --git a/examples/fast_neural_style/handlers.py b/examples/fast_neural_style/handlers.py index bf5faa46a33c..33ac9c1312b7 100644 --- a/examples/fast_neural_style/handlers.py +++ b/examples/fast_neural_style/handlers.py @@ -2,7 +2,6 @@ class Progbar(object): - def __init__(self, loader, metrics): self.num_iterations = len(loader) self.output_stream = sys.stdout @@ -23,17 +22,17 @@ def __call__(self, engine): equal_to = int(percent_seen / 10) done = int(percent_seen) == 100 - bar = '[' + '=' * equal_to + '>' * (not done) + ' ' * (10 - equal_to) + ']' - message = 'Epoch {epoch} | {percent_seen:.2f}% | {bar}'.format(epoch=engine.state.epoch, - percent_seen=percent_seen, - bar=bar) + bar = "[" + "=" * equal_to + ">" * (not done) + " " * (10 - equal_to) + "]" + message = "Epoch {epoch} | {percent_seen:.2f}% | {bar}".format( + epoch=engine.state.epoch, percent_seen=percent_seen, bar=bar + ) for key, value in self.metrics.items(): - message += ' | {name}: {value:.2e}'.format(name=key, value=value) + message += " | {name}: {value:.2e}".format(name=key, value=value) - message += '\r' + message += "\r" self.output_stream.write(message) self.output_stream.flush() if done: - self.output_stream.write('\n') + self.output_stream.write("\n") diff --git a/examples/fast_neural_style/neural_style.py b/examples/fast_neural_style/neural_style.py index aa8d76cf13d0..9116e34f6719 100644 --- a/examples/fast_neural_style/neural_style.py +++ b/examples/fast_neural_style/neural_style.py @@ -38,18 +38,21 @@ def check_manual_seed(args): def check_dataset(args): - transform = transforms.Compose([ - transforms.Resize(args.image_size), - transforms.CenterCrop(args.image_size), - transforms.ToTensor(), - transforms.Lambda(lambda x: x.mul(255)) - ]) - - if args.dataset in {'folder', 'mscoco'}: + transform = transforms.Compose( + [ + transforms.Resize(args.image_size), + transforms.CenterCrop(args.image_size), + transforms.ToTensor(), + transforms.Lambda(lambda x: x.mul(255)), + ] + ) + + if args.dataset in {"folder", "mscoco"}: train_dataset = datasets.ImageFolder(args.dataroot, transform) - elif args.dataset == 'test': - train_dataset = datasets.FakeData(size=args.batch_size, image_size=(3, 32, 32), - num_classes=1, transform=transform) + elif args.dataset == "test": + train_dataset = datasets.FakeData( + size=args.batch_size, image_size=(3, 32, 32), num_classes=1, transform=transform + ) else: raise RuntimeError("Invalid dataset name: {}".format(args.dataset)) @@ -67,10 +70,7 @@ def train(args): mse_loss = torch.nn.MSELoss() vgg = Vgg16(requires_grad=False).to(device) - style_transform = transforms.Compose([ - transforms.ToTensor(), - transforms.Lambda(lambda x: x.mul(255)) - ]) + style_transform = transforms.Compose([transforms.ToTensor(), transforms.Lambda(lambda x: x.mul(255))]) style = utils.load_image(args.style_image, size=args.style_size) style = style_transform(style) @@ -100,7 +100,7 @@ def step(engine, batch): content_loss = args.content_weight * mse_loss(features_y.relu2_2, features_x.relu2_2) - style_loss = 0. + style_loss = 0.0 for ft_y, gm_s in zip(features_y, gram_style): gm_y = utils.gram_matrix(ft_y) style_loss += mse_loss(gm_y, gm_s[:n_batch, :, :]) @@ -110,20 +110,19 @@ def step(engine, batch): total_loss.backward() optimizer.step() - return { - 'content_loss': content_loss.item(), - 'style_loss': style_loss.item(), - 'total_loss': total_loss.item() - } + return {"content_loss": content_loss.item(), "style_loss": style_loss.item(), "total_loss": total_loss.item()} trainer = Engine(step) - checkpoint_handler = ModelCheckpoint(args.checkpoint_model_dir, 'checkpoint', - n_saved=10, require_empty=False, create_dir=True) + checkpoint_handler = ModelCheckpoint( + args.checkpoint_model_dir, "checkpoint", n_saved=10, require_empty=False, create_dir=True + ) progress_bar = Progbar(loader=train_loader, metrics=running_avgs) - trainer.add_event_handler(event_name=Events.EPOCH_COMPLETED(every=args.checkpoint_interval), - handler=checkpoint_handler, - to_save={'net': transformer}) + trainer.add_event_handler( + event_name=Events.EPOCH_COMPLETED(every=args.checkpoint_interval), + handler=checkpoint_handler, + to_save={"net": transformer}, + ) trainer.add_event_handler(event_name=Events.ITERATION_COMPLETED, handler=progress_bar) trainer.run(train_loader, max_epochs=args.epochs) @@ -131,10 +130,7 @@ def step(engine, batch): def stylize(args): device = torch.device("cuda" if args.cuda else "cpu") - content_transform = transforms.Compose([ - transforms.ToTensor(), - transforms.Lambda(lambda x: x.mul(255)) - ]) + content_transform = transforms.Compose([transforms.ToTensor(), transforms.Lambda(lambda x: x.mul(255))]) content_image = utils.load_image(args.content_image, scale=args.content_scale) content_image = content_transform(content_image) @@ -153,47 +149,59 @@ def main(): train_arg_parser = subparsers.add_parser("train", help="parser for training arguments") train_arg_parser.add_argument("--epochs", type=int, default=2, help="number of training epochs, default is 2") - train_arg_parser.add_argument("--batch_size", type=int, default=8, - help="batch size for training, default is 8") - train_arg_parser.add_argument("--dataset", type=str, required=True, choices={'test', 'folder', 'mscoco'}, - help="type of dataset to be used.") - train_arg_parser.add_argument("--dataroot", type=str, required=True, - help="path to training dataset, the path should point to a folder " - "containing another folder with all the training images") - train_arg_parser.add_argument("--style_image", type=str, default="test", - help="path to style-image") - train_arg_parser.add_argument("--test_image", type=str, default="test", - help="path to test-image") - train_arg_parser.add_argument("--checkpoint_model_dir", type=str, default='/tmp/checkpoints', - help="path to folder where checkpoints of trained models will be saved") - train_arg_parser.add_argument("--checkpoint_interval", type=int, default=1, - help="number of batches after which a checkpoint of trained model will be created") - train_arg_parser.add_argument("--image_size", type=int, default=256, - help="size of training images, default is 256 X 256") - train_arg_parser.add_argument("--style_size", type=int, default=None, - help="size of style-image, default is the original size of style image") - train_arg_parser.add_argument("--cuda", type=int, default=1, - help="set it to 1 for running on GPU, 0 for CPU") - train_arg_parser.add_argument("--seed", type=int, default=42, - help="random seed for training") - train_arg_parser.add_argument("--content_weight", type=float, default=1e5, - help="weight for content-loss, default is 1e5") - train_arg_parser.add_argument("--style_weight", type=float, default=1e10, - help="weight for style-loss, default is 1e10") - train_arg_parser.add_argument("--lr", type=float, default=1e-3, - help="learning rate, default is 1e-3") + train_arg_parser.add_argument("--batch_size", type=int, default=8, help="batch size for training, default is 8") + train_arg_parser.add_argument( + "--dataset", type=str, required=True, choices={"test", "folder", "mscoco"}, help="type of dataset to be used." + ) + train_arg_parser.add_argument( + "--dataroot", + type=str, + required=True, + help="path to training dataset, the path should point to a folder " + "containing another folder with all the training images", + ) + train_arg_parser.add_argument("--style_image", type=str, default="test", help="path to style-image") + train_arg_parser.add_argument("--test_image", type=str, default="test", help="path to test-image") + train_arg_parser.add_argument( + "--checkpoint_model_dir", + type=str, + default="/tmp/checkpoints", + help="path to folder where checkpoints of trained models will be saved", + ) + train_arg_parser.add_argument( + "--checkpoint_interval", + type=int, + default=1, + help="number of batches after which a checkpoint of trained model will be created", + ) + train_arg_parser.add_argument( + "--image_size", type=int, default=256, help="size of training images, default is 256 X 256" + ) + train_arg_parser.add_argument( + "--style_size", type=int, default=None, help="size of style-image, default is the original size of style image" + ) + train_arg_parser.add_argument("--cuda", type=int, default=1, help="set it to 1 for running on GPU, 0 for CPU") + train_arg_parser.add_argument("--seed", type=int, default=42, help="random seed for training") + train_arg_parser.add_argument( + "--content_weight", type=float, default=1e5, help="weight for content-loss, default is 1e5" + ) + train_arg_parser.add_argument( + "--style_weight", type=float, default=1e10, help="weight for style-loss, default is 1e10" + ) + train_arg_parser.add_argument("--lr", type=float, default=1e-3, help="learning rate, default is 1e-3") eval_arg_parser = subparsers.add_parser("eval", help="parser for evaluation/stylizing arguments") - eval_arg_parser.add_argument("--content_image", type=str, required=True, - help="path to content image you want to stylize") - eval_arg_parser.add_argument("--content_scale", type=float, default=None, - help="factor for scaling down the content image") - eval_arg_parser.add_argument("--output_image", type=str, required=True, - help="path for saving the output image") - eval_arg_parser.add_argument("--model", type=str, required=True, - help="saved model to be used for stylizing the image.") - eval_arg_parser.add_argument("--cuda", type=int, required=True, - help="set it to 1 for running on GPU, 0 for CPU") + eval_arg_parser.add_argument( + "--content_image", type=str, required=True, help="path to content image you want to stylize" + ) + eval_arg_parser.add_argument( + "--content_scale", type=float, default=None, help="factor for scaling down the content image" + ) + eval_arg_parser.add_argument("--output_image", type=str, required=True, help="path for saving the output image") + eval_arg_parser.add_argument( + "--model", type=str, required=True, help="saved model to be used for stylizing the image." + ) + eval_arg_parser.add_argument("--cuda", type=int, required=True, help="set it to 1 for running on GPU, 0 for CPU") args = main_arg_parser.parse_args() @@ -210,5 +218,5 @@ def main(): stylize(args) -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/examples/fast_neural_style/transformer_net.py b/examples/fast_neural_style/transformer_net.py index 2cb06971d281..bb2d0fffdb4a 100644 --- a/examples/fast_neural_style/transformer_net.py +++ b/examples/fast_neural_style/transformer_net.py @@ -93,7 +93,7 @@ def __init__(self, in_channels, out_channels, kernel_size, stride, upsample=None def forward(self, x): x_in = x if self.upsample: - x_in = torch.nn.functional.interpolate(x_in, mode='nearest', scale_factor=self.upsample) + x_in = torch.nn.functional.interpolate(x_in, mode="nearest", scale_factor=self.upsample) out = self.reflection_pad(x_in) out = self.conv2d(out) return out diff --git a/examples/fast_neural_style/vgg.py b/examples/fast_neural_style/vgg.py index 17ef66325178..2e9ad1f2d135 100644 --- a/examples/fast_neural_style/vgg.py +++ b/examples/fast_neural_style/vgg.py @@ -33,6 +33,6 @@ def forward(self, X): h_relu3_3 = h h = self.slice4(h) h_relu4_3 = h - vgg_outputs = namedtuple("VggOutputs", ['relu1_2', 'relu2_2', 'relu3_3', 'relu4_3']) + vgg_outputs = namedtuple("VggOutputs", ["relu1_2", "relu2_2", "relu3_3", "relu4_3"]) out = vgg_outputs(h_relu1_2, h_relu2_2, h_relu3_3, h_relu4_3) return out diff --git a/examples/gan/dcgan.py b/examples/gan/dcgan.py index 82a009836c70..ce0b12097dde 100644 --- a/examples/gan/dcgan.py +++ b/examples/gan/dcgan.py @@ -19,17 +19,19 @@ import torchvision.utils as vutils except ImportError: - raise ImportError("Please install torchvision to run this example, for example " - "via conda by running 'conda install -c pytorch torchvision'. ") + raise ImportError( + "Please install torchvision to run this example, for example " + "via conda by running 'conda install -c pytorch torchvision'. " + ) PRINT_FREQ = 100 -FAKE_IMG_FNAME = 'fake_sample_epoch_{:04d}.png' -REAL_IMG_FNAME = 'real_sample_epoch_{:04d}.png' -LOGS_FNAME = 'logs.tsv' -PLOT_FNAME = 'plot.svg' -SAMPLES_FNAME = 'samples.svg' -CKPT_PREFIX = 'networks' +FAKE_IMG_FNAME = "fake_sample_epoch_{:04d}.png" +REAL_IMG_FNAME = "real_sample_epoch_{:04d}.png" +LOGS_FNAME = "logs.tsv" +PLOT_FNAME = "plot.svg" +SAMPLES_FNAME = "samples.svg" +CKPT_PREFIX = "networks" class Net(nn.Module): @@ -42,10 +44,10 @@ def weights_init(self): for m in self.modules(): classname = m.__class__.__name__ - if 'Conv' in classname: + if "Conv" in classname: m.weight.data.normal_(0.0, 0.02) - elif 'BatchNorm' in classname: + elif "BatchNorm" in classname: m.weight.data.normal_(1.0, 0.02) m.bias.data.fill_(0) @@ -64,31 +66,25 @@ def __init__(self, z_dim, nf, nc): super(Generator, self).__init__() self.net = nn.Sequential( - # input is Z, going into a convolution nn.ConvTranspose2d(in_channels=z_dim, out_channels=nf * 8, kernel_size=4, stride=1, padding=0, bias=False), nn.BatchNorm2d(nf * 8), nn.ReLU(inplace=True), - # state size. (nf*8) x 4 x 4 nn.ConvTranspose2d(in_channels=nf * 8, out_channels=nf * 4, kernel_size=4, stride=2, padding=1, bias=False), nn.BatchNorm2d(nf * 4), nn.ReLU(inplace=True), - # state size. (nf*4) x 8 x 8 nn.ConvTranspose2d(in_channels=nf * 4, out_channels=nf * 2, kernel_size=4, stride=2, padding=1, bias=False), nn.BatchNorm2d(nf * 2), nn.ReLU(inplace=True), - # state size. (nf*2) x 16 x 16 nn.ConvTranspose2d(in_channels=nf * 2, out_channels=nf, kernel_size=4, stride=2, padding=1, bias=False), nn.BatchNorm2d(nf), nn.ReLU(inplace=True), - # state size. (nf) x 32 x 32 nn.ConvTranspose2d(in_channels=nf, out_channels=nc, kernel_size=4, stride=2, padding=1, bias=False), nn.Tanh() - # state size. (nc) x 64 x 64 ) @@ -109,29 +105,24 @@ def __init__(self, nc, nf): super(Discriminator, self).__init__() self.net = nn.Sequential( - # input is (nc) x 64 x 64 nn.Conv2d(in_channels=nc, out_channels=nf, kernel_size=4, stride=2, padding=1, bias=False), nn.LeakyReLU(0.2, inplace=True), - # state size. (nf) x 32 x 32 nn.Conv2d(in_channels=nf, out_channels=nf * 2, kernel_size=4, stride=2, padding=1, bias=False), nn.BatchNorm2d(nf * 2), nn.LeakyReLU(0.2, inplace=True), - # state size. (nf*2) x 16 x 16 nn.Conv2d(in_channels=nf * 2, out_channels=nf * 4, kernel_size=4, stride=2, padding=1, bias=False), nn.BatchNorm2d(nf * 4), nn.LeakyReLU(0.2, inplace=True), - # state size. (nf*4) x 8 x 8 nn.Conv2d(in_channels=nf * 4, out_channels=nf * 8, kernel_size=4, stride=2, padding=1, bias=False), nn.BatchNorm2d(nf * 8), nn.LeakyReLU(0.2, inplace=True), - # state size. (nf*8) x 4 x 4 nn.Conv2d(in_channels=nf * 8, out_channels=1, kernel_size=4, stride=1, padding=0, bias=False), - nn.Sigmoid() + nn.Sigmoid(), ) self.weights_init() @@ -150,7 +141,7 @@ def check_manual_seed(seed): random.seed(seed) torch.manual_seed(seed) - print('Using manual seed: {seed}'.format(seed=seed)) + print("Using manual seed: {seed}".format(seed=seed)) def check_dataset(dataset, dataroot): @@ -169,33 +160,27 @@ def check_dataset(dataset, dataroot): to_tensor = transforms.ToTensor() normalize = transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5)) - if dataset in {'imagenet', 'folder', 'lfw'}: - dataset = dset.ImageFolder(root=dataroot, transform=transforms.Compose([resize, - crop, - to_tensor, - normalize])) + if dataset in {"imagenet", "folder", "lfw"}: + dataset = dset.ImageFolder(root=dataroot, transform=transforms.Compose([resize, crop, to_tensor, normalize])) nc = 3 - elif dataset == 'lsun': - dataset = dset.LSUN(root=dataroot, classes=['bedroom_train'], transform=transforms.Compose([resize, - crop, - to_tensor, - normalize])) + elif dataset == "lsun": + dataset = dset.LSUN( + root=dataroot, classes=["bedroom_train"], transform=transforms.Compose([resize, crop, to_tensor, normalize]) + ) nc = 3 - elif dataset == 'cifar10': - dataset = dset.CIFAR10(root=dataroot, download=True, transform=transforms.Compose([resize, - to_tensor, - normalize])) + elif dataset == "cifar10": + dataset = dset.CIFAR10( + root=dataroot, download=True, transform=transforms.Compose([resize, to_tensor, normalize]) + ) nc = 3 - elif dataset == 'mnist': - dataset = dset.MNIST(root=dataroot, download=True, transform=transforms.Compose([resize, - to_tensor, - normalize])) + elif dataset == "mnist": + dataset = dset.MNIST(root=dataroot, download=True, transform=transforms.Compose([resize, to_tensor, normalize])) nc = 1 - elif dataset == 'fake': + elif dataset == "fake": dataset = dset.FakeData(size=256, image_size=(3, 64, 64), transform=to_tensor) nc = 3 @@ -205,14 +190,24 @@ def check_dataset(dataset, dataroot): return dataset, nc -def main(dataset, dataroot, - z_dim, g_filters, d_filters, - batch_size, epochs, - learning_rate, beta_1, - saved_G, saved_D, - seed, - n_workers, device, - alpha, output_dir): +def main( + dataset, + dataroot, + z_dim, + g_filters, + d_filters, + batch_size, + epochs, + learning_rate, + beta_1, + saved_G, + saved_D, + seed, + n_workers, + device, + alpha, + output_dir, +): # seed check_manual_seed(seed) @@ -294,13 +289,7 @@ def step(engine, batch): # gradient update optimizerG.step() - return { - 'errD': errD.item(), - 'errG': errG.item(), - 'D_x': D_x, - 'D_G_z1': D_G_z1, - 'D_G_z2': D_G_z2 - } + return {"errD": errD.item(), "errG": errG.item(), "D_x": D_x, "D_G_z1": D_G_z1, "D_G_z2": D_G_z2} # ignite objects trainer = Engine(step) @@ -308,12 +297,12 @@ def step(engine, batch): timer = Timer(average=True) # attach running average metrics - monitoring_metrics = ['errD', 'errG', 'D_x', 'D_G_z1', 'D_G_z2'] - RunningAverage(alpha=alpha, output_transform=lambda x: x['errD']).attach(trainer, 'errD') - RunningAverage(alpha=alpha, output_transform=lambda x: x['errG']).attach(trainer, 'errG') - RunningAverage(alpha=alpha, output_transform=lambda x: x['D_x']).attach(trainer, 'D_x') - RunningAverage(alpha=alpha, output_transform=lambda x: x['D_G_z1']).attach(trainer, 'D_G_z1') - RunningAverage(alpha=alpha, output_transform=lambda x: x['D_G_z2']).attach(trainer, 'D_G_z2') + monitoring_metrics = ["errD", "errG", "D_x", "D_G_z1", "D_G_z2"] + RunningAverage(alpha=alpha, output_transform=lambda x: x["errD"]).attach(trainer, "errD") + RunningAverage(alpha=alpha, output_transform=lambda x: x["errG"]).attach(trainer, "errG") + RunningAverage(alpha=alpha, output_transform=lambda x: x["D_x"]).attach(trainer, "D_x") + RunningAverage(alpha=alpha, output_transform=lambda x: x["D_G_z1"]).attach(trainer, "D_G_z1") + RunningAverage(alpha=alpha, output_transform=lambda x: x["D_G_z2"]).attach(trainer, "D_G_z2") # attach progress bar pbar = ProgressBar() @@ -322,21 +311,19 @@ def step(engine, batch): @trainer.on(Events.ITERATION_COMPLETED(every=PRINT_FREQ)) def print_logs(engine): fname = os.path.join(output_dir, LOGS_FNAME) - columns = ["iteration", ] + list(engine.state.metrics.keys()) - values = [str(engine.state.iteration), ] + \ - [str(round(value, 5)) for value in engine.state.metrics.values()] + columns = ["iteration",] + list(engine.state.metrics.keys()) + values = [str(engine.state.iteration),] + [str(round(value, 5)) for value in engine.state.metrics.values()] - with open(fname, 'a') as f: + with open(fname, "a") as f: if f.tell() == 0: - print('\t'.join(columns), file=f) - print('\t'.join(values), file=f) + print("\t".join(columns), file=f) + print("\t".join(values), file=f) - message = '[{epoch}/{max_epoch}][{i}/{max_i}]'.format(epoch=engine.state.epoch, - max_epoch=epochs, - i=(engine.state.iteration % len(loader)), - max_i=len(loader)) + message = "[{epoch}/{max_epoch}][{i}/{max_i}]".format( + epoch=engine.state.epoch, max_epoch=epochs, i=(engine.state.iteration % len(loader)), max_i=len(loader) + ) for name, value in zip(columns, values): - message += ' | {name}: {value}'.format(name=name, value=value) + message += " | {name}: {value}".format(name=name, value=value) pbar.log_message(message) @@ -355,20 +342,23 @@ def save_real_example(engine): vutils.save_image(img, path, normalize=True) # adding handlers using `trainer.add_event_handler` method API - trainer.add_event_handler(event_name=Events.EPOCH_COMPLETED, handler=checkpoint_handler, - to_save={ - 'netG': netG, - 'netD': netD - }) + trainer.add_event_handler( + event_name=Events.EPOCH_COMPLETED, handler=checkpoint_handler, to_save={"netG": netG, "netD": netD} + ) # automatically adding handlers via a special `attach` method of `Timer` handler - timer.attach(trainer, start=Events.EPOCH_STARTED, resume=Events.ITERATION_STARTED, - pause=Events.ITERATION_COMPLETED, step=Events.ITERATION_COMPLETED) + timer.attach( + trainer, + start=Events.EPOCH_STARTED, + resume=Events.ITERATION_STARTED, + pause=Events.ITERATION_COMPLETED, + step=Events.ITERATION_COMPLETED, + ) # adding handlers using `trainer.on` decorator API @trainer.on(Events.EPOCH_COMPLETED) def print_times(engine): - pbar.log_message('Epoch {} done. Time per batch: {:.3f}[s]'.format(engine.state.epoch, timer.value())) + pbar.log_message("Epoch {} done. Time per batch: {:.3f}[s]".format(engine.state.epoch, timer.value())) timer.reset() # adding handlers using `trainer.on` decorator API @@ -376,19 +366,20 @@ def print_times(engine): def create_plots(engine): try: import matplotlib as mpl - mpl.use('agg') + + mpl.use("agg") import numpy as np import pandas as pd import matplotlib.pyplot as plt except ImportError: - warnings.warn('Loss plots will not be generated -- pandas or matplotlib not found') + warnings.warn("Loss plots will not be generated -- pandas or matplotlib not found") else: - df = pd.read_csv(os.path.join(output_dir, LOGS_FNAME), delimiter='\t', index_col='iteration') + df = pd.read_csv(os.path.join(output_dir, LOGS_FNAME), delimiter="\t", index_col="iteration") _ = df.plot(subplots=True, figsize=(20, 20)) - _ = plt.xlabel('Iteration number') + _ = plt.xlabel("Iteration number") fig = plt.gcf() path = os.path.join(output_dir, PLOT_FNAME) @@ -399,13 +390,10 @@ def create_plots(engine): def handle_exception(engine, e): if isinstance(e, KeyboardInterrupt) and (engine.state.iteration > 1): engine.terminate() - warnings.warn('KeyboardInterrupt caught. Exiting gracefully.') + warnings.warn("KeyboardInterrupt caught. Exiting gracefully.") create_plots(engine) - checkpoint_handler(engine, { - 'netG_exception': netG, - 'netD_exception': netD - }) + checkpoint_handler(engine, {"netG_exception": netG, "netD_exception": netD}) else: raise e @@ -414,75 +402,50 @@ def handle_exception(engine, e): trainer.run(loader, epochs) -if __name__ == '__main__': +if __name__ == "__main__": parser = argparse.ArgumentParser() - parser.add_argument('--dataset', - required=True, choices={'cifar10', 'lsun', 'imagenet', 'folder', 'lfw', 'fake', 'mnist'}, - help='Type of the dataset to be used.') + parser.add_argument( + "--dataset", + required=True, + choices={"cifar10", "lsun", "imagenet", "folder", "lfw", "fake", "mnist"}, + help="Type of the dataset to be used.", + ) - parser.add_argument('--dataroot', - required=True, - help='path to dataset') + parser.add_argument("--dataroot", required=True, help="path to dataset") - parser.add_argument('--workers', - type=int, default=2, - help='number of data loading workers') + parser.add_argument("--workers", type=int, default=2, help="number of data loading workers") - parser.add_argument('--batch-size', - type=int, default=64, - help='input batch size') + parser.add_argument("--batch-size", type=int, default=64, help="input batch size") - parser.add_argument('--z-dim', - type=int, default=100, - help='size of the latent z vector') + parser.add_argument("--z-dim", type=int, default=100, help="size of the latent z vector") - parser.add_argument('--g-filters', - type=int, default=64, - help='Number of filters in the second-to-last generator deconv layer') + parser.add_argument( + "--g-filters", type=int, default=64, help="Number of filters in the second-to-last generator deconv layer" + ) - parser.add_argument('--d-filters', - type=int, default=64, - help='Number of filters in first discriminator conv layer') + parser.add_argument("--d-filters", type=int, default=64, help="Number of filters in first discriminator conv layer") - parser.add_argument('--epochs', - type=int, default=25, - help='number of epochs to train for') + parser.add_argument("--epochs", type=int, default=25, help="number of epochs to train for") - parser.add_argument('--lr', - type=float, default=0.0002, - help='learning rate') + parser.add_argument("--lr", type=float, default=0.0002, help="learning rate") - parser.add_argument('--beta-1', - type=float, default=0.5, - help='beta_1 for adam') + parser.add_argument("--beta-1", type=float, default=0.5, help="beta_1 for adam") - parser.add_argument('--no-cuda', - action='store_true', - help='disables cuda') + parser.add_argument("--no-cuda", action="store_true", help="disables cuda") - parser.add_argument('--saved-G', - default='', - help="path to pickled generator (to continue training)") + parser.add_argument("--saved-G", default="", help="path to pickled generator (to continue training)") - parser.add_argument('--saved-D', - default='', - help="path to pickled discriminator (to continue training)") + parser.add_argument("--saved-D", default="", help="path to pickled discriminator (to continue training)") - parser.add_argument('--output-dir', - default='.', - help='directory to output images and model checkpoints') + parser.add_argument("--output-dir", default=".", help="directory to output images and model checkpoints") - parser.add_argument('--seed', - type=int, - help='manual seed') + parser.add_argument("--seed", type=int, help="manual seed") - parser.add_argument('--alpha', - type=float, default=0.98, - help='smoothing constant for exponential moving averages') + parser.add_argument("--alpha", type=float, default=0.98, help="smoothing constant for exponential moving averages") args = parser.parse_args() - dev = 'cpu' if (not torch.cuda.is_available() or args.no_cuda) else 'cuda:0' + dev = "cpu" if (not torch.cuda.is_available() or args.no_cuda) else "cuda:0" try: os.makedirs(args.output_dir) @@ -490,11 +453,21 @@ def handle_exception(engine, e): if (not os.path.isdir(args.output_dir)) or (len(os.listdir(args.output_dir)) > 0): raise FileExistsError("Please provide a path to a non-existing or empty directory.") - main(dataset=args.dataset, dataroot=args.dataroot, - z_dim=args.z_dim, g_filters=args.g_filters, d_filters=args.d_filters, - batch_size=args.batch_size, epochs=args.epochs, - learning_rate=args.lr, beta_1=args.beta_1, - saved_D=args.saved_D, saved_G=args.saved_G, - seed=args.seed, - device=dev, n_workers=args.workers, - alpha=args.alpha, output_dir=args.output_dir) + main( + dataset=args.dataset, + dataroot=args.dataroot, + z_dim=args.z_dim, + g_filters=args.g_filters, + d_filters=args.d_filters, + batch_size=args.batch_size, + epochs=args.epochs, + learning_rate=args.lr, + beta_1=args.beta_1, + saved_D=args.saved_D, + saved_G=args.saved_G, + seed=args.seed, + device=dev, + n_workers=args.workers, + alpha=args.alpha, + output_dir=args.output_dir, + ) diff --git a/examples/mnist/mnist.py b/examples/mnist/mnist.py index fa79f835ab4c..7d3a002668b6 100644 --- a/examples/mnist/mnist.py +++ b/examples/mnist/mnist.py @@ -36,34 +36,32 @@ def forward(self, x): def get_data_loaders(train_batch_size, val_batch_size): data_transform = Compose([ToTensor(), Normalize((0.1307,), (0.3081,))]) - train_loader = DataLoader(MNIST(download=True, root=".", transform=data_transform, train=True), - batch_size=train_batch_size, shuffle=True) + train_loader = DataLoader( + MNIST(download=True, root=".", transform=data_transform, train=True), batch_size=train_batch_size, shuffle=True + ) - val_loader = DataLoader(MNIST(download=False, root=".", transform=data_transform, train=False), - batch_size=val_batch_size, shuffle=False) + val_loader = DataLoader( + MNIST(download=False, root=".", transform=data_transform, train=False), batch_size=val_batch_size, shuffle=False + ) return train_loader, val_loader def run(train_batch_size, val_batch_size, epochs, lr, momentum, log_interval): train_loader, val_loader = get_data_loaders(train_batch_size, val_batch_size) model = Net() - device = 'cpu' + device = "cpu" if torch.cuda.is_available(): - device = 'cuda' + device = "cuda" optimizer = SGD(model.parameters(), lr=lr, momentum=momentum) trainer = create_supervised_trainer(model, optimizer, F.nll_loss, device=device) - evaluator = create_supervised_evaluator(model, - metrics={'accuracy': Accuracy(), - 'nll': Loss(F.nll_loss)}, - device=device) + evaluator = create_supervised_evaluator( + model, metrics={"accuracy": Accuracy(), "nll": Loss(F.nll_loss)}, device=device + ) desc = "ITERATION - loss: {:.2f}" - pbar = tqdm( - initial=0, leave=False, total=len(train_loader), - desc=desc.format(0) - ) + pbar = tqdm(initial=0, leave=False, total=len(train_loader), desc=desc.format(0)) @trainer.on(Events.ITERATION_COMPLETED(every=log_interval)) def log_training_loss(engine): @@ -75,22 +73,25 @@ def log_training_results(engine): pbar.refresh() evaluator.run(train_loader) metrics = evaluator.state.metrics - avg_accuracy = metrics['accuracy'] - avg_nll = metrics['nll'] + avg_accuracy = metrics["accuracy"] + avg_nll = metrics["nll"] tqdm.write( - "Training Results - Epoch: {} Avg accuracy: {:.2f} Avg loss: {:.2f}" - .format(engine.state.epoch, avg_accuracy, avg_nll) + "Training Results - Epoch: {} Avg accuracy: {:.2f} Avg loss: {:.2f}".format( + engine.state.epoch, avg_accuracy, avg_nll + ) ) @trainer.on(Events.EPOCH_COMPLETED) def log_validation_results(engine): evaluator.run(val_loader) metrics = evaluator.state.metrics - avg_accuracy = metrics['accuracy'] - avg_nll = metrics['nll'] + avg_accuracy = metrics["accuracy"] + avg_nll = metrics["nll"] tqdm.write( - "Validation Results - Epoch: {} Avg accuracy: {:.2f} Avg loss: {:.2f}" - .format(engine.state.epoch, avg_accuracy, avg_nll)) + "Validation Results - Epoch: {} Avg accuracy: {:.2f} Avg loss: {:.2f}".format( + engine.state.epoch, avg_accuracy, avg_nll + ) + ) pbar.n = pbar.last_print_n = 0 @@ -100,18 +101,16 @@ def log_validation_results(engine): if __name__ == "__main__": parser = ArgumentParser() - parser.add_argument('--batch_size', type=int, default=64, - help='input batch size for training (default: 64)') - parser.add_argument('--val_batch_size', type=int, default=1000, - help='input batch size for validation (default: 1000)') - parser.add_argument('--epochs', type=int, default=10, - help='number of epochs to train (default: 10)') - parser.add_argument('--lr', type=float, default=0.01, - help='learning rate (default: 0.01)') - parser.add_argument('--momentum', type=float, default=0.5, - help='SGD momentum (default: 0.5)') - parser.add_argument('--log_interval', type=int, default=10, - help='how many batches to wait before logging training status') + parser.add_argument("--batch_size", type=int, default=64, help="input batch size for training (default: 64)") + parser.add_argument( + "--val_batch_size", type=int, default=1000, help="input batch size for validation (default: 1000)" + ) + parser.add_argument("--epochs", type=int, default=10, help="number of epochs to train (default: 10)") + parser.add_argument("--lr", type=float, default=0.01, help="learning rate (default: 0.01)") + parser.add_argument("--momentum", type=float, default=0.5, help="SGD momentum (default: 0.5)") + parser.add_argument( + "--log_interval", type=int, default=10, help="how many batches to wait before logging training status" + ) args = parser.parse_args() diff --git a/examples/mnist/mnist_save_resume_engine.py b/examples/mnist/mnist_save_resume_engine.py index 2a5d153283b9..d4554f9ceb5e 100644 --- a/examples/mnist/mnist_save_resume_engine.py +++ b/examples/mnist/mnist_save_resume_engine.py @@ -11,6 +11,7 @@ from torchvision.datasets import MNIST from tqdm import tqdm + try: from tensorboardX import SummaryWriter except ImportError: @@ -43,46 +44,51 @@ def forward(self, x): def get_data_loaders(train_batch_size, val_batch_size): data_transform = Compose([ToTensor(), Normalize((0.1307,), (0.3081,))]) - train_loader = DataLoader(MNIST(download=True, root=".", transform=data_transform, train=True), - batch_size=train_batch_size, shuffle=True) + train_loader = DataLoader( + MNIST(download=True, root=".", transform=data_transform, train=True), batch_size=train_batch_size, shuffle=True + ) - val_loader = DataLoader(MNIST(download=False, root=".", transform=data_transform, train=False), - batch_size=val_batch_size, shuffle=False) + val_loader = DataLoader( + MNIST(download=False, root=".", transform=data_transform, train=False), batch_size=val_batch_size, shuffle=False + ) return train_loader, val_loader -def run(train_batch_size, val_batch_size, - epochs, lr, momentum, - log_interval, log_dir, - checkpoint_every, - resume_from, crash_iteration=1000): +def run( + train_batch_size, + val_batch_size, + epochs, + lr, + momentum, + log_interval, + log_dir, + checkpoint_every, + resume_from, + crash_iteration=1000, +): train_loader, val_loader = get_data_loaders(train_batch_size, val_batch_size) model = Net() writer = SummaryWriter(logdir=log_dir) - device = 'cpu' + device = "cpu" if torch.cuda.is_available(): - device = 'cuda' + device = "cuda" criterion = nn.NLLLoss() optimizer = SGD(model.parameters(), lr=lr, momentum=momentum) lr_scheduler = StepLR(optimizer, step_size=1, gamma=0.5) trainer = create_supervised_trainer(model, optimizer, criterion, device=device) - evaluator = create_supervised_evaluator(model, - metrics={'accuracy': Accuracy(), - 'nll': Loss(criterion)}, - device=device) + evaluator = create_supervised_evaluator( + model, metrics={"accuracy": Accuracy(), "nll": Loss(criterion)}, device=device + ) @trainer.on(Events.EPOCH_COMPLETED) def lr_step(engine): lr_scheduler.step() desc = "ITERATION - loss: {:.4f} - lr: {:.4f}" - pbar = tqdm( - initial=0, leave=False, total=len(train_loader), - desc=desc.format(0, lr) - ) + pbar = tqdm(initial=0, leave=False, total=len(train_loader), desc=desc.format(0, lr)) if log_interval is None: e = Events.ITERATION_COMPLETED @@ -92,17 +98,20 @@ def lr_step(engine): @trainer.on(e) def log_training_loss(engine): - lr = optimizer.param_groups[0]['lr'] + lr = optimizer.param_groups[0]["lr"] pbar.desc = desc.format(engine.state.output, lr) pbar.update(log_interval) writer.add_scalar("training/loss", engine.state.output, engine.state.iteration) writer.add_scalar("lr", lr, engine.state.iteration) if resume_from is None: + @trainer.on(Events.ITERATION_COMPLETED(once=crash_iteration)) def _(engine): raise Exception("STOP at {}".format(engine.state.iteration)) + else: + @trainer.on(Events.STARTED) def _(engine): pbar.n = engine.state.iteration @@ -112,11 +121,12 @@ def log_training_results(engine): pbar.refresh() evaluator.run(train_loader) metrics = evaluator.state.metrics - avg_accuracy = metrics['accuracy'] - avg_nll = metrics['nll'] + avg_accuracy = metrics["accuracy"] + avg_nll = metrics["nll"] tqdm.write( - "Training Results - Epoch: {} Avg accuracy: {:.2f} Avg loss: {:.2f}" - .format(engine.state.epoch, avg_accuracy, avg_nll) + "Training Results - Epoch: {} Avg accuracy: {:.2f} Avg loss: {:.2f}".format( + engine.state.epoch, avg_accuracy, avg_nll + ) ) writer.add_scalar("training/avg_loss", avg_nll, engine.state.epoch) writer.add_scalar("training/avg_accuracy", avg_accuracy, engine.state.epoch) @@ -125,18 +135,21 @@ def log_training_results(engine): def log_validation_results(engine): evaluator.run(val_loader) metrics = evaluator.state.metrics - avg_accuracy = metrics['accuracy'] - avg_nll = metrics['nll'] + avg_accuracy = metrics["accuracy"] + avg_nll = metrics["nll"] tqdm.write( - "Validation Results - Epoch: {} Avg accuracy: {:.2f} Avg loss: {:.2f}" - .format(engine.state.epoch, avg_accuracy, avg_nll)) + "Validation Results - Epoch: {} Avg accuracy: {:.2f} Avg loss: {:.2f}".format( + engine.state.epoch, avg_accuracy, avg_nll + ) + ) pbar.n = pbar.last_print_n = 0 writer.add_scalar("valdation/avg_loss", avg_nll, engine.state.epoch) writer.add_scalar("valdation/avg_accuracy", avg_accuracy, engine.state.epoch) objects_to_checkpoint = {"trainer": trainer, "model": model, "optimizer": optimizer, "lr_scheduler": lr_scheduler} - training_checkpoint = Checkpoint(to_save=objects_to_checkpoint, - save_handler=DiskSaver(log_dir, require_empty=False)) + training_checkpoint = Checkpoint( + to_save=objects_to_checkpoint, save_handler=DiskSaver(log_dir, require_empty=False) + ) trainer.add_event_handler(Events.ITERATION_COMPLETED(every=checkpoint_every), training_checkpoint) @@ -149,6 +162,7 @@ def log_validation_results(engine): trainer.run(train_loader, max_epochs=epochs) except Exception as e: import traceback + print(traceback.format_exc()) pbar.close() @@ -157,28 +171,36 @@ def log_validation_results(engine): if __name__ == "__main__": parser = ArgumentParser() - parser.add_argument('--batch_size', type=int, default=64, - help='input batch size for training (default: 64)') - parser.add_argument('--val_batch_size', type=int, default=1000, - help='input batch size for validation (default: 1000)') - parser.add_argument('--epochs', type=int, default=10, - help='number of epochs to train (default: 10)') - parser.add_argument('--lr', type=float, default=0.01, - help='learning rate (default: 0.01)') - parser.add_argument('--momentum', type=float, default=0.5, - help='SGD momentum (default: 0.5)') - parser.add_argument('--log_interval', type=int, default=10, - help='how many batches to wait before logging training status') - parser.add_argument("--log_dir", type=str, default="/tmp/mnist_save_resume", - help="log directory for Tensorboard log output") - parser.add_argument('--checkpoint_every', type=int, default=550, help='Checkpoint training every X iterations') - parser.add_argument('--resume_from', type=str, default=None, - help='Path to the checkpoint .pth file to resume training from') - parser.add_argument('--crash_iteration', type=int, default=3000, help='Iteration at which to raise an exception') + parser.add_argument("--batch_size", type=int, default=64, help="input batch size for training (default: 64)") + parser.add_argument( + "--val_batch_size", type=int, default=1000, help="input batch size for validation (default: 1000)" + ) + parser.add_argument("--epochs", type=int, default=10, help="number of epochs to train (default: 10)") + parser.add_argument("--lr", type=float, default=0.01, help="learning rate (default: 0.01)") + parser.add_argument("--momentum", type=float, default=0.5, help="SGD momentum (default: 0.5)") + parser.add_argument( + "--log_interval", type=int, default=10, help="how many batches to wait before logging training status" + ) + parser.add_argument( + "--log_dir", type=str, default="/tmp/mnist_save_resume", help="log directory for Tensorboard log output" + ) + parser.add_argument("--checkpoint_every", type=int, default=550, help="Checkpoint training every X iterations") + parser.add_argument( + "--resume_from", type=str, default=None, help="Path to the checkpoint .pth file to resume training from" + ) + parser.add_argument("--crash_iteration", type=int, default=3000, help="Iteration at which to raise an exception") args = parser.parse_args() - run(args.batch_size, args.val_batch_size, - args.epochs, args.lr, args.momentum, - args.log_interval, args.log_dir, args.checkpoint_every, - args.resume_from, args.crash_iteration) + run( + args.batch_size, + args.val_batch_size, + args.epochs, + args.lr, + args.momentum, + args.log_interval, + args.log_dir, + args.checkpoint_every, + args.resume_from, + args.crash_iteration, + ) diff --git a/examples/mnist/mnist_with_tensorboardx.py b/examples/mnist/mnist_with_tensorboardx.py index 4306563450c8..3bc6f66ba720 100644 --- a/examples/mnist/mnist_with_tensorboardx.py +++ b/examples/mnist/mnist_with_tensorboardx.py @@ -54,11 +54,13 @@ def forward(self, x): def get_data_loaders(train_batch_size, val_batch_size): data_transform = Compose([ToTensor(), Normalize((0.1307,), (0.3081,))]) - train_loader = DataLoader(MNIST(download=True, root=".", transform=data_transform, train=True), - batch_size=train_batch_size, shuffle=True) + train_loader = DataLoader( + MNIST(download=True, root=".", transform=data_transform, train=True), batch_size=train_batch_size, shuffle=True + ) - val_loader = DataLoader(MNIST(download=False, root=".", transform=data_transform, train=False), - batch_size=val_batch_size, shuffle=False) + val_loader = DataLoader( + MNIST(download=False, root=".", transform=data_transform, train=False), batch_size=val_batch_size, shuffle=False + ) return train_loader, val_loader @@ -77,32 +79,36 @@ def run(train_batch_size, val_batch_size, epochs, lr, momentum, log_interval, lo train_loader, val_loader = get_data_loaders(train_batch_size, val_batch_size) model = Net() writer = create_summary_writer(model, train_loader, log_dir) - device = 'cpu' + device = "cpu" if torch.cuda.is_available(): - device = 'cuda' + device = "cuda" optimizer = SGD(model.parameters(), lr=lr, momentum=momentum) trainer = create_supervised_trainer(model, optimizer, F.nll_loss, device=device) - evaluator = create_supervised_evaluator(model, - metrics={'accuracy': Accuracy(), - 'nll': Loss(F.nll_loss)}, - device=device) + evaluator = create_supervised_evaluator( + model, metrics={"accuracy": Accuracy(), "nll": Loss(F.nll_loss)}, device=device + ) @trainer.on(Events.ITERATION_COMPLETED(every=log_interval)) def log_training_loss(engine): - print("Epoch[{}] Iteration[{}/{}] Loss: {:.2f}" - "".format(engine.state.epoch, engine.state.iteration, len(train_loader), engine.state.output)) + print( + "Epoch[{}] Iteration[{}/{}] Loss: {:.2f}" + "".format(engine.state.epoch, engine.state.iteration, len(train_loader), engine.state.output) + ) writer.add_scalar("training/loss", engine.state.output, engine.state.iteration) @trainer.on(Events.EPOCH_COMPLETED) def log_training_results(engine): evaluator.run(train_loader) metrics = evaluator.state.metrics - avg_accuracy = metrics['accuracy'] - avg_nll = metrics['nll'] - print("Training Results - Epoch: {} Avg accuracy: {:.2f} Avg loss: {:.2f}" - .format(engine.state.epoch, avg_accuracy, avg_nll)) + avg_accuracy = metrics["accuracy"] + avg_nll = metrics["nll"] + print( + "Training Results - Epoch: {} Avg accuracy: {:.2f} Avg loss: {:.2f}".format( + engine.state.epoch, avg_accuracy, avg_nll + ) + ) writer.add_scalar("training/avg_loss", avg_nll, engine.state.epoch) writer.add_scalar("training/avg_accuracy", avg_accuracy, engine.state.epoch) @@ -110,10 +116,13 @@ def log_training_results(engine): def log_validation_results(engine): evaluator.run(val_loader) metrics = evaluator.state.metrics - avg_accuracy = metrics['accuracy'] - avg_nll = metrics['nll'] - print("Validation Results - Epoch: {} Avg accuracy: {:.2f} Avg loss: {:.2f}" - .format(engine.state.epoch, avg_accuracy, avg_nll)) + avg_accuracy = metrics["accuracy"] + avg_nll = metrics["nll"] + print( + "Validation Results - Epoch: {} Avg accuracy: {:.2f} Avg loss: {:.2f}".format( + engine.state.epoch, avg_accuracy, avg_nll + ) + ) writer.add_scalar("valdation/avg_loss", avg_nll, engine.state.epoch) writer.add_scalar("valdation/avg_accuracy", avg_accuracy, engine.state.epoch) @@ -125,22 +134,20 @@ def log_validation_results(engine): if __name__ == "__main__": parser = ArgumentParser() - parser.add_argument('--batch_size', type=int, default=64, - help='input batch size for training (default: 64)') - parser.add_argument('--val_batch_size', type=int, default=1000, - help='input batch size for validation (default: 1000)') - parser.add_argument('--epochs', type=int, default=10, - help='number of epochs to train (default: 10)') - parser.add_argument('--lr', type=float, default=0.01, - help='learning rate (default: 0.01)') - parser.add_argument('--momentum', type=float, default=0.5, - help='SGD momentum (default: 0.5)') - parser.add_argument('--log_interval', type=int, default=10, - help='how many batches to wait before logging training status') - parser.add_argument("--log_dir", type=str, default="tensorboard_logs", - help="log directory for Tensorboard log output") + parser.add_argument("--batch_size", type=int, default=64, help="input batch size for training (default: 64)") + parser.add_argument( + "--val_batch_size", type=int, default=1000, help="input batch size for validation (default: 1000)" + ) + parser.add_argument("--epochs", type=int, default=10, help="number of epochs to train (default: 10)") + parser.add_argument("--lr", type=float, default=0.01, help="learning rate (default: 0.01)") + parser.add_argument("--momentum", type=float, default=0.5, help="SGD momentum (default: 0.5)") + parser.add_argument( + "--log_interval", type=int, default=10, help="how many batches to wait before logging training status" + ) + parser.add_argument( + "--log_dir", type=str, default="tensorboard_logs", help="log directory for Tensorboard log output" + ) args = parser.parse_args() - run(args.batch_size, args.val_batch_size, args.epochs, args.lr, args.momentum, - args.log_interval, args.log_dir) + run(args.batch_size, args.val_batch_size, args.epochs, args.lr, args.momentum, args.log_interval, args.log_dir) diff --git a/examples/mnist/mnist_with_visdom.py b/examples/mnist/mnist_with_visdom.py index 13d5251a8acf..826281907cc2 100644 --- a/examples/mnist/mnist_with_visdom.py +++ b/examples/mnist/mnist_with_visdom.py @@ -8,6 +8,7 @@ from torchvision.datasets import MNIST from torchvision.transforms import Compose, ToTensor, Normalize import numpy as np + try: import visdom except ImportError: @@ -39,11 +40,13 @@ def forward(self, x): def get_data_loaders(train_batch_size, val_batch_size): data_transform = Compose([ToTensor(), Normalize((0.1307,), (0.3081,))]) - train_loader = DataLoader(MNIST(download=True, root=".", transform=data_transform, train=True), - batch_size=train_batch_size, shuffle=True) + train_loader = DataLoader( + MNIST(download=True, root=".", transform=data_transform, train=True), batch_size=train_batch_size, shuffle=True + ) - val_loader = DataLoader(MNIST(download=False, root=".", transform=data_transform, train=False), - batch_size=val_batch_size, shuffle=False) + val_loader = DataLoader( + MNIST(download=False, root=".", transform=data_transform, train=False), batch_size=val_batch_size, shuffle=False + ) return train_loader, val_loader @@ -59,57 +62,67 @@ def run(train_batch_size, val_batch_size, epochs, lr, momentum, log_interval): train_loader, val_loader = get_data_loaders(train_batch_size, val_batch_size) model = Net() - device = 'cpu' + device = "cpu" if torch.cuda.is_available(): - device = 'cuda' + device = "cuda" optimizer = SGD(model.parameters(), lr=lr, momentum=momentum) trainer = create_supervised_trainer(model, optimizer, F.nll_loss, device=device) - evaluator = create_supervised_evaluator(model, - metrics={'accuracy': Accuracy(), - 'nll': Loss(F.nll_loss)}, - device=device) + evaluator = create_supervised_evaluator( + model, metrics={"accuracy": Accuracy(), "nll": Loss(F.nll_loss)}, device=device + ) - train_loss_window = create_plot_window(vis, '#Iterations', 'Loss', 'Training Loss') - train_avg_loss_window = create_plot_window(vis, '#Iterations', 'Loss', 'Training Average Loss') - train_avg_accuracy_window = create_plot_window(vis, '#Iterations', 'Accuracy', 'Training Average Accuracy') - val_avg_loss_window = create_plot_window(vis, '#Epochs', 'Loss', 'Validation Average Loss') - val_avg_accuracy_window = create_plot_window(vis, '#Epochs', 'Accuracy', 'Validation Average Accuracy') + train_loss_window = create_plot_window(vis, "#Iterations", "Loss", "Training Loss") + train_avg_loss_window = create_plot_window(vis, "#Iterations", "Loss", "Training Average Loss") + train_avg_accuracy_window = create_plot_window(vis, "#Iterations", "Accuracy", "Training Average Accuracy") + val_avg_loss_window = create_plot_window(vis, "#Epochs", "Loss", "Validation Average Loss") + val_avg_accuracy_window = create_plot_window(vis, "#Epochs", "Accuracy", "Validation Average Accuracy") @trainer.on(Events.ITERATION_COMPLETED(every=log_interval)) def log_training_loss(engine): - print("Epoch[{}] Iteration[{}/{}] Loss: {:.2f}" - "".format(engine.state.epoch, engine.state.iteration, len(train_loader), engine.state.output)) - vis.line(X=np.array([engine.state.iteration]), - Y=np.array([engine.state.output]), - update='append', win=train_loss_window) + print( + "Epoch[{}] Iteration[{}/{}] Loss: {:.2f}" + "".format(engine.state.epoch, engine.state.iteration, len(train_loader), engine.state.output) + ) + vis.line( + X=np.array([engine.state.iteration]), + Y=np.array([engine.state.output]), + update="append", + win=train_loss_window, + ) @trainer.on(Events.EPOCH_COMPLETED) def log_training_results(engine): evaluator.run(train_loader) metrics = evaluator.state.metrics - avg_accuracy = metrics['accuracy'] - avg_nll = metrics['nll'] - print("Training Results - Epoch: {} Avg accuracy: {:.2f} Avg loss: {:.2f}" - .format(engine.state.epoch, avg_accuracy, avg_nll)) - vis.line(X=np.array([engine.state.epoch]), Y=np.array([avg_accuracy]), - win=train_avg_accuracy_window, update='append') - vis.line(X=np.array([engine.state.epoch]), Y=np.array([avg_nll]), - win=train_avg_loss_window, update='append') + avg_accuracy = metrics["accuracy"] + avg_nll = metrics["nll"] + print( + "Training Results - Epoch: {} Avg accuracy: {:.2f} Avg loss: {:.2f}".format( + engine.state.epoch, avg_accuracy, avg_nll + ) + ) + vis.line( + X=np.array([engine.state.epoch]), Y=np.array([avg_accuracy]), win=train_avg_accuracy_window, update="append" + ) + vis.line(X=np.array([engine.state.epoch]), Y=np.array([avg_nll]), win=train_avg_loss_window, update="append") @trainer.on(Events.EPOCH_COMPLETED) def log_validation_results(engine): evaluator.run(val_loader) metrics = evaluator.state.metrics - avg_accuracy = metrics['accuracy'] - avg_nll = metrics['nll'] - print("Validation Results - Epoch: {} Avg accuracy: {:.2f} Avg loss: {:.2f}" - .format(engine.state.epoch, avg_accuracy, avg_nll)) - vis.line(X=np.array([engine.state.epoch]), Y=np.array([avg_accuracy]), - win=val_avg_accuracy_window, update='append') - vis.line(X=np.array([engine.state.epoch]), Y=np.array([avg_nll]), - win=val_avg_loss_window, update='append') + avg_accuracy = metrics["accuracy"] + avg_nll = metrics["nll"] + print( + "Validation Results - Epoch: {} Avg accuracy: {:.2f} Avg loss: {:.2f}".format( + engine.state.epoch, avg_accuracy, avg_nll + ) + ) + vis.line( + X=np.array([engine.state.epoch]), Y=np.array([avg_accuracy]), win=val_avg_accuracy_window, update="append" + ) + vis.line(X=np.array([engine.state.epoch]), Y=np.array([avg_nll]), win=val_avg_loss_window, update="append") # kick everything off trainer.run(train_loader, max_epochs=epochs) @@ -117,18 +130,16 @@ def log_validation_results(engine): if __name__ == "__main__": parser = ArgumentParser() - parser.add_argument('--batch_size', type=int, default=64, - help='input batch size for training (default: 64)') - parser.add_argument('--val_batch_size', type=int, default=1000, - help='input batch size for validation (default: 1000)') - parser.add_argument('--epochs', type=int, default=10, - help='number of epochs to train (default: 10)') - parser.add_argument('--lr', type=float, default=0.01, - help='learning rate (default: 0.01)') - parser.add_argument('--momentum', type=float, default=0.5, - help='SGD momentum (default: 0.5)') - parser.add_argument('--log_interval', type=int, default=10, - help='how many batches to wait before logging training status') + parser.add_argument("--batch_size", type=int, default=64, help="input batch size for training (default: 64)") + parser.add_argument( + "--val_batch_size", type=int, default=1000, help="input batch size for validation (default: 1000)" + ) + parser.add_argument("--epochs", type=int, default=10, help="number of epochs to train (default: 10)") + parser.add_argument("--lr", type=float, default=0.01, help="learning rate (default: 0.01)") + parser.add_argument("--momentum", type=float, default=0.5, help="SGD momentum (default: 0.5)") + parser.add_argument( + "--log_interval", type=int, default=10, help="how many batches to wait before logging training status" + ) parser.add_argument("--log_file", type=str, default=None, help="log file to log output to") args = parser.parse_args() diff --git a/examples/references/classification/imagenet/code/dataflow/dataloaders.py b/examples/references/classification/imagenet/code/dataflow/dataloaders.py index 3148155dff02..fab5e1d68993 100644 --- a/examples/references/classification/imagenet/code/dataflow/dataloaders.py +++ b/examples/references/classification/imagenet/code/dataflow/dataloaders.py @@ -15,25 +15,27 @@ def opencv_loader(path): return cv2.cvtColor(img, cv2.COLOR_BGR2RGB) -def get_train_val_loaders(root_path: str, - train_transforms: Callable, - val_transforms: Callable, - batch_size: int = 16, - num_workers: int = 8, - val_batch_size: Optional[int] = None, - pin_memory: bool = True, - random_seed: Optional[int] = None, - train_sampler: Optional[Union[Sampler, str]] = None, - val_sampler: Optional[Union[Sampler, str]] = None, - limit_train_num_samples: Optional[int] = None, - limit_val_num_samples: Optional[int] = None) -> Tuple[DataLoader, DataLoader, DataLoader]: - - train_ds = ImageNet(root_path, split='train', - transform=lambda sample: train_transforms(image=sample)['image'], - loader=opencv_loader) - val_ds = ImageNet(root_path, split='val', - transform=lambda sample: val_transforms(image=sample)['image'], - loader=opencv_loader) +def get_train_val_loaders( + root_path: str, + train_transforms: Callable, + val_transforms: Callable, + batch_size: int = 16, + num_workers: int = 8, + val_batch_size: Optional[int] = None, + pin_memory: bool = True, + random_seed: Optional[int] = None, + train_sampler: Optional[Union[Sampler, str]] = None, + val_sampler: Optional[Union[Sampler, str]] = None, + limit_train_num_samples: Optional[int] = None, + limit_val_num_samples: Optional[int] = None, +) -> Tuple[DataLoader, DataLoader, DataLoader]: + + train_ds = ImageNet( + root_path, split="train", transform=lambda sample: train_transforms(image=sample)["image"], loader=opencv_loader + ) + val_ds = ImageNet( + root_path, split="val", transform=lambda sample: val_transforms(image=sample)["image"], loader=opencv_loader + ) if limit_train_num_samples is not None: if random_seed is not None: @@ -47,33 +49,50 @@ def get_train_val_loaders(root_path: str, # random samples for evaluation on training dataset if len(val_ds) < len(train_ds): - train_eval_indices = np.random.permutation(len(train_ds))[:len(val_ds)] + train_eval_indices = np.random.permutation(len(train_ds))[: len(val_ds)] train_eval_ds = Subset(train_ds, train_eval_indices) else: train_eval_ds = train_ds if isinstance(train_sampler, str): - assert train_sampler == 'distributed' + assert train_sampler == "distributed" train_sampler = data_dist.DistributedSampler(train_ds) train_eval_sampler = None if isinstance(val_sampler, str): - assert val_sampler == 'distributed' + assert val_sampler == "distributed" val_sampler = data_dist.DistributedSampler(val_ds, shuffle=False) train_eval_sampler = data_dist.DistributedSampler(train_eval_ds, shuffle=False) - train_loader = DataLoader(train_ds, shuffle=train_sampler is None, - batch_size=batch_size, num_workers=num_workers, - sampler=train_sampler, - pin_memory=pin_memory, drop_last=True) + train_loader = DataLoader( + train_ds, + shuffle=train_sampler is None, + batch_size=batch_size, + num_workers=num_workers, + sampler=train_sampler, + pin_memory=pin_memory, + drop_last=True, + ) val_batch_size = batch_size * 4 if val_batch_size is None else val_batch_size - val_loader = DataLoader(val_ds, shuffle=False, sampler=val_sampler, - batch_size=val_batch_size, num_workers=num_workers, - pin_memory=pin_memory, drop_last=False) - - train_eval_loader = DataLoader(train_eval_ds, shuffle=False, sampler=train_eval_sampler, - batch_size=val_batch_size, num_workers=num_workers, - pin_memory=pin_memory, drop_last=False) + val_loader = DataLoader( + val_ds, + shuffle=False, + sampler=val_sampler, + batch_size=val_batch_size, + num_workers=num_workers, + pin_memory=pin_memory, + drop_last=False, + ) + + train_eval_loader = DataLoader( + train_eval_ds, + shuffle=False, + sampler=train_eval_sampler, + batch_size=val_batch_size, + num_workers=num_workers, + pin_memory=pin_memory, + drop_last=False, + ) return train_loader, val_loader, train_eval_loader diff --git a/examples/references/classification/imagenet/code/dataflow/vis.py b/examples/references/classification/imagenet/code/dataflow/vis.py index 2bfbb9a31673..a1ebb8315f74 100644 --- a/examples/references/classification/imagenet/code/dataflow/vis.py +++ b/examples/references/classification/imagenet/code/dataflow/vis.py @@ -14,10 +14,12 @@ def tensor_to_numpy(t: torch.Tensor) -> np.ndarray: return img.astype(np.uint8) -def make_grid(batch_img: torch.Tensor, - batch_preds: torch.Tensor, - img_denormalize_fn: Callable, - batch_gt: Optional[torch.Tensor] = None): +def make_grid( + batch_img: torch.Tensor, + batch_preds: torch.Tensor, + img_denormalize_fn: Callable, + batch_gt: Optional[torch.Tensor] = None, +): """Create a grid from batch image and mask as i+l1+gt1 | i+l2+gt2 | i+l3+gt3 | i+l4+gt4 | ... @@ -43,7 +45,7 @@ def make_grid(batch_img: torch.Tensor, h, w = batch_img.shape[2:] le = 1 - out_image = np.zeros((h * le, w * b, 3), dtype='uint8') + out_image = np.zeros((h * le, w * b, 3), dtype="uint8") for i in range(b): img = batch_img[i] @@ -60,6 +62,6 @@ def make_grid(batch_img: torch.Tensor, gt_label = gt_label.cpu().item() target += " | gt={}".format(gt_label) - out_image[0:h, i * w:(i + 1) * w, :] = render_datapoint(img, target, text_size=12) + out_image[0:h, i * w : (i + 1) * w, :] = render_datapoint(img, target, text_size=12) return out_image diff --git a/examples/references/classification/imagenet/code/scripts/common_training.py b/examples/references/classification/imagenet/code/scripts/common_training.py index 8a7256c46097..fda9a6805006 100644 --- a/examples/references/classification/imagenet/code/scripts/common_training.py +++ b/examples/references/classification/imagenet/code/scripts/common_training.py @@ -25,16 +25,18 @@ def training(config, local_rank=None, with_mlflow_logging=False, with_plx_loggin set_seed(config.seed + local_rank) torch.cuda.set_device(local_rank) - device = 'cuda' + device = "cuda" torch.backends.cudnn.benchmark = True train_loader = config.train_loader train_sampler = getattr(train_loader, "sampler", None) - assert train_sampler is not None, "Train loader of type '{}' " \ - "should have attribute 'sampler'".format(type(train_loader)) - assert hasattr(train_sampler, 'set_epoch') and callable(train_sampler.set_epoch), \ - "Train sampler should have a callable method `set_epoch`" + assert train_sampler is not None, "Train loader of type '{}' " "should have attribute 'sampler'".format( + type(train_loader) + ) + assert hasattr(train_sampler, "set_epoch") and callable( + train_sampler.set_epoch + ), "Train sampler should have a callable method `set_epoch`" train_eval_loader = config.train_eval_loader val_loader = config.val_loader @@ -69,27 +71,32 @@ def train_update_function(engine, batch): optimizer.zero_grad() return { - 'supervised batch loss': loss.item(), + "supervised batch loss": loss.item(), } trainer = Engine(train_update_function) lr_scheduler = config.lr_scheduler - to_save = {'model': model, 'optimizer': optimizer, 'lr_scheduler': lr_scheduler, 'trainer': trainer} + to_save = {"model": model, "optimizer": optimizer, "lr_scheduler": lr_scheduler, "trainer": trainer} common.setup_common_training_handlers( - trainer, train_sampler, + trainer, + train_sampler, to_save=to_save, - save_every_iters=1000, output_path=config.output_path.as_posix(), - lr_scheduler=lr_scheduler, with_gpu_stats=True, - output_names=['supervised batch loss', ], - with_pbars=True, with_pbar_on_iters=with_mlflow_logging, - log_every_iters=1 + save_every_iters=1000, + output_path=config.output_path.as_posix(), + lr_scheduler=lr_scheduler, + with_gpu_stats=True, + output_names=["supervised batch loss",], + with_pbars=True, + with_pbar_on_iters=with_mlflow_logging, + log_every_iters=1, ) if getattr(config, "benchmark_dataflow", False): benchmark_dataflow_num_iters = getattr(config, "benchmark_dataflow_num_iters", 1000) - DataflowBenchmark(benchmark_dataflow_num_iters, prepare_batch=prepare_batch, - device=device).attach(trainer, train_loader) + DataflowBenchmark(benchmark_dataflow_num_iters, prepare_batch=prepare_batch, device=device).attach( + trainer, train_loader + ) # Setup evaluators val_metrics = { @@ -103,8 +110,12 @@ def train_update_function(engine, batch): model_output_transform = getattr(config, "model_output_transform", lambda x: x) evaluator_args = dict( - model=model, metrics=val_metrics, device=device, non_blocking=non_blocking, prepare_batch=prepare_batch, - output_transform=lambda x, y, y_pred: (model_output_transform(y_pred), y,) + model=model, + metrics=val_metrics, + device=device, + non_blocking=non_blocking, + prepare_batch=prepare_batch, + output_transform=lambda x, y, y_pred: (model_output_transform(y_pred), y,), ) train_evaluator = create_supervised_evaluator(**evaluator_args) evaluator = create_supervised_evaluator(**evaluator_args) @@ -129,39 +140,47 @@ def run_validation(_): if dist.get_rank() == 0: - tb_logger = common.setup_tb_logging(config.output_path.as_posix(), trainer, optimizer, - evaluators={"training": train_evaluator, "validation": evaluator}) + tb_logger = common.setup_tb_logging( + config.output_path.as_posix(), + trainer, + optimizer, + evaluators={"training": train_evaluator, "validation": evaluator}, + ) if with_mlflow_logging: - common.setup_mlflow_logging(trainer, optimizer, - evaluators={"training": train_evaluator, "validation": evaluator}) + common.setup_mlflow_logging( + trainer, optimizer, evaluators={"training": train_evaluator, "validation": evaluator} + ) if with_plx_logging: - common.setup_plx_logging(trainer, optimizer, - evaluators={"training": train_evaluator, "validation": evaluator}) + common.setup_plx_logging( + trainer, optimizer, evaluators={"training": train_evaluator, "validation": evaluator} + ) - common.save_best_model_by_val_score(config.output_path.as_posix(), evaluator, model, - metric_name=score_metric_name, trainer=trainer) + common.save_best_model_by_val_score( + config.output_path.as_posix(), evaluator, model, metric_name=score_metric_name, trainer=trainer + ) # Log train/val predictions: - tb_logger.attach(evaluator, - log_handler=predictions_gt_images_handler(img_denormalize_fn=config.img_denormalize, - n_images=15, - another_engine=trainer, - prefix_tag="validation"), - event_name=Events.ITERATION_COMPLETED(once=len(val_loader) // 2)) - - tb_logger.attach(train_evaluator, - log_handler=predictions_gt_images_handler(img_denormalize_fn=config.img_denormalize, - n_images=15, - another_engine=trainer, - prefix_tag="training"), - event_name=Events.ITERATION_COMPLETED(once=len(train_eval_loader) // 2)) + tb_logger.attach( + evaluator, + log_handler=predictions_gt_images_handler( + img_denormalize_fn=config.img_denormalize, n_images=15, another_engine=trainer, prefix_tag="validation" + ), + event_name=Events.ITERATION_COMPLETED(once=len(val_loader) // 2), + ) + + tb_logger.attach( + train_evaluator, + log_handler=predictions_gt_images_handler( + img_denormalize_fn=config.img_denormalize, n_images=15, another_engine=trainer, prefix_tag="training" + ), + event_name=Events.ITERATION_COMPLETED(once=len(train_eval_loader) // 2), + ) trainer.run(train_loader, max_epochs=config.num_epochs) class DataflowBenchmark: - def __init__(self, num_iters=100, prepare_batch=None, device="cuda"): from ignite.handlers import Timer @@ -178,16 +197,19 @@ def stop_benchmark_dataflow(engine): engine.terminate() if dist.is_available() and dist.get_rank() == 0: + @self.benchmark_dataflow.on(Events.ITERATION_COMPLETED(every=num_iters // 100)) def show_progress_benchmark_dataflow(engine): print(".", end=" ") self.timer = Timer(average=False) - self.timer.attach(self.benchmark_dataflow, - start=Events.EPOCH_STARTED, - resume=Events.ITERATION_STARTED, - pause=Events.ITERATION_COMPLETED, - step=Events.ITERATION_COMPLETED) + self.timer.attach( + self.benchmark_dataflow, + start=Events.EPOCH_STARTED, + resume=Events.ITERATION_STARTED, + pause=Events.ITERATION_COMPLETED, + step=Events.ITERATION_COMPLETED, + ) def attach(self, trainer, train_loader): diff --git a/examples/references/classification/imagenet/code/scripts/mlflow_training.py b/examples/references/classification/imagenet/code/scripts/mlflow_training.py index dd8cc59247d6..7119791e80fb 100644 --- a/examples/references/classification/imagenet/code/scripts/mlflow_training.py +++ b/examples/references/classification/imagenet/code/scripts/mlflow_training.py @@ -22,8 +22,9 @@ def run(config, logger=None, local_rank=0, **kwargs): dist.init_process_group("nccl", init_method="env://") # As we passed config with option --manual_config_load - assert hasattr(config, "setup"), "We need to manually setup the configuration, please set --manual_config_load " \ - "to py_config_runner" + assert hasattr(config, "setup"), ( + "We need to manually setup the configuration, please set --manual_config_load " "to py_config_runner" + ) config = config.setup() @@ -40,10 +41,9 @@ def run(config, logger=None, local_rank=0, **kwargs): config.output_path = Path(output_path) if dist.get_rank() == 0: - mlflow.log_params({ - "pytorch version": torch.__version__, - "ignite version": ignite.__version__, - }) + mlflow.log_params( + {"pytorch version": torch.__version__, "ignite version": ignite.__version__,} + ) mlflow.log_params(get_params(config, TRAINVAL_CONFIG)) try: diff --git a/examples/references/classification/imagenet/code/scripts/plx_training.py b/examples/references/classification/imagenet/code/scripts/plx_training.py index ccfc4b9e19a5..e38967c3a180 100644 --- a/examples/references/classification/imagenet/code/scripts/plx_training.py +++ b/examples/references/classification/imagenet/code/scripts/plx_training.py @@ -22,8 +22,9 @@ def run(config, logger=None, local_rank=0, **kwargs): dist.init_process_group("nccl", init_method="env://") # As we passed config with option --manual_config_load - assert hasattr(config, "setup"), "We need to manually setup the configuration, please set --manual_config_load " \ - "to py_config_runner" + assert hasattr(config, "setup"), ( + "We need to manually setup the configuration, please set --manual_config_load " "to py_config_runner" + ) config = config.setup() @@ -36,10 +37,9 @@ def run(config, logger=None, local_rank=0, **kwargs): if dist.get_rank() == 0: plx_exp = Experiment() - plx_exp.log_params(**{ - "pytorch version": torch.__version__, - "ignite version": ignite.__version__, - }) + plx_exp.log_params( + **{"pytorch version": torch.__version__, "ignite version": ignite.__version__,} + ) plx_exp.log_params(**get_params(config, TRAINVAL_CONFIG)) try: diff --git a/examples/references/classification/imagenet/code/utils/handlers.py b/examples/references/classification/imagenet/code/utils/handlers.py index ed678499fe20..525da0f193de 100644 --- a/examples/references/classification/imagenet/code/utils/handlers.py +++ b/examples/references/classification/imagenet/code/utils/handlers.py @@ -4,7 +4,6 @@ def predictions_gt_images_handler(img_denormalize_fn, n_images=None, another_engine=None, prefix_tag=None): - def wrapper(engine, logger, event_name): batch = engine.state.batch output = engine.state.output @@ -30,6 +29,6 @@ def wrapper(engine, logger, event_name): tag = "predictions_with_gt" if prefix_tag is not None: tag = "{}: {}".format(prefix_tag, tag) - logger.writer.add_image(tag=tag, img_tensor=grid_pred_gt, global_step=global_step, dataformats='HWC') + logger.writer.add_image(tag=tag, img_tensor=grid_pred_gt, global_step=global_step, dataformats="HWC") return wrapper diff --git a/examples/references/classification/imagenet/configs/train/baseline_resnet50.py b/examples/references/classification/imagenet/configs/train/baseline_resnet50.py index c968d73f723e..715e5a6e39c4 100644 --- a/examples/references/classification/imagenet/configs/train/baseline_resnet50.py +++ b/examples/references/classification/imagenet/configs/train/baseline_resnet50.py @@ -20,7 +20,7 @@ # ############################## seed = 19 -device = 'cuda' +device = "cuda" debug = False # config to measure time passed to prepare batches and report measured time before the training @@ -41,28 +41,32 @@ # Setup Dataflow # ############################## -assert 'DATASET_PATH' in os.environ -data_path = os.environ['DATASET_PATH'] +assert "DATASET_PATH" in os.environ +data_path = os.environ["DATASET_PATH"] mean = [0.485, 0.456, 0.406] std = [0.229, 0.224, 0.225] -train_transforms = A.Compose([ - A.RandomResizedCrop(train_crop_size, train_crop_size, scale=(0.08, 1.0)), - A.HorizontalFlip(), - A.CoarseDropout(max_height=32, max_width=32), - A.HueSaturationValue(), - A.Normalize(mean=mean, std=std), - ToTensor(), -]) - -val_transforms = A.Compose([ - # https://github.com/facebookresearch/FixRes/blob/b27575208a7c48a3a6e0fa9efb57baa4021d1305/imnet_resnet50_scratch/transforms.py#L76 - A.Resize(int((256 / 224) * val_crop_size), int((256 / 224) * val_crop_size)), - A.CenterCrop(val_crop_size, val_crop_size), - A.Normalize(mean=mean, std=std), - ToTensor(), -]) +train_transforms = A.Compose( + [ + A.RandomResizedCrop(train_crop_size, train_crop_size, scale=(0.08, 1.0)), + A.HorizontalFlip(), + A.CoarseDropout(max_height=32, max_width=32), + A.HueSaturationValue(), + A.Normalize(mean=mean, std=std), + ToTensor(), + ] +) + +val_transforms = A.Compose( + [ + # https://github.com/facebookresearch/FixRes/blob/b27575208a7c48a3a6e0fa9efb57baa4021d1305/imnet_resnet50_scratch/transforms.py#L76 + A.Resize(int((256 / 224) * val_crop_size), int((256 / 224) * val_crop_size)), + A.CenterCrop(val_crop_size, val_crop_size), + A.Normalize(mean=mean, std=std), + ToTensor(), + ] +) train_loader, val_loader, train_eval_loader = get_train_val_loaders( data_path, @@ -72,8 +76,8 @@ num_workers=num_workers, val_batch_size=batch_size, pin_memory=True, - train_sampler='distributed', - val_sampler='distributed' + train_sampler="distributed", + val_sampler="distributed", ) # Image denormalization function to plot predictions with images diff --git a/examples/references/classification/imagenet/configs/train/check_baseline_resnet50.py b/examples/references/classification/imagenet/configs/train/check_baseline_resnet50.py index df81cb557896..908ac4c8dd9a 100644 --- a/examples/references/classification/imagenet/configs/train/check_baseline_resnet50.py +++ b/examples/references/classification/imagenet/configs/train/check_baseline_resnet50.py @@ -20,7 +20,7 @@ # ############################## seed = 19 -device = 'cuda' +device = "cuda" debug = False # config to measure time passed to prepare batches and report measured time before the training @@ -41,28 +41,32 @@ # Setup Dataflow # ############################## -assert 'DATASET_PATH' in os.environ -data_path = os.environ['DATASET_PATH'] +assert "DATASET_PATH" in os.environ +data_path = os.environ["DATASET_PATH"] mean = [0.485, 0.456, 0.406] std = [0.229, 0.224, 0.225] -train_transforms = A.Compose([ - A.RandomResizedCrop(train_crop_size, train_crop_size, scale=(0.08, 1.0)), - A.HorizontalFlip(), - A.CoarseDropout(max_height=32, max_width=32), - A.HueSaturationValue(), - A.Normalize(mean=mean, std=std), - ToTensor(), -]) - -val_transforms = A.Compose([ - # https://github.com/facebookresearch/FixRes/blob/b27575208a7c48a3a6e0fa9efb57baa4021d1305/imnet_resnet50_scratch/transforms.py#L76 - A.Resize(int((256 / 224) * val_crop_size), int((256 / 224) * val_crop_size)), - A.CenterCrop(val_crop_size, val_crop_size), - A.Normalize(mean=mean, std=std), - ToTensor(), -]) +train_transforms = A.Compose( + [ + A.RandomResizedCrop(train_crop_size, train_crop_size, scale=(0.08, 1.0)), + A.HorizontalFlip(), + A.CoarseDropout(max_height=32, max_width=32), + A.HueSaturationValue(), + A.Normalize(mean=mean, std=std), + ToTensor(), + ] +) + +val_transforms = A.Compose( + [ + # https://github.com/facebookresearch/FixRes/blob/b27575208a7c48a3a6e0fa9efb57baa4021d1305/imnet_resnet50_scratch/transforms.py#L76 + A.Resize(int((256 / 224) * val_crop_size), int((256 / 224) * val_crop_size)), + A.CenterCrop(val_crop_size, val_crop_size), + A.Normalize(mean=mean, std=std), + ToTensor(), + ] +) train_loader, val_loader, train_eval_loader = get_train_val_loaders( data_path, @@ -72,8 +76,8 @@ num_workers=num_workers, val_batch_size=batch_size, pin_memory=True, - train_sampler='distributed', - val_sampler='distributed' + train_sampler="distributed", + val_sampler="distributed", ) # Image denormalization function to plot predictions with images diff --git a/examples/references/segmentation/pascal_voc2012/code/dataflow/dataloaders.py b/examples/references/segmentation/pascal_voc2012/code/dataflow/dataloaders.py index 0eed82297822..27dbbb78ba6e 100644 --- a/examples/references/segmentation/pascal_voc2012/code/dataflow/dataloaders.py +++ b/examples/references/segmentation/pascal_voc2012/code/dataflow/dataloaders.py @@ -9,19 +9,21 @@ from dataflow.datasets import get_train_dataset, get_val_dataset, TransformedDataset, get_train_noval_sbdataset -def get_train_val_loaders(root_path: str, - train_transforms: Callable, - val_transforms: Callable, - batch_size: int = 16, - num_workers: int = 8, - val_batch_size: Optional[int] = None, - pin_memory: bool = True, - random_seed: Optional[int] = None, - train_sampler: Optional[Union[Sampler, str]] = None, - val_sampler: Optional[Union[Sampler, str]] = None, - with_sbd: Optional[str] = None, - limit_train_num_samples: Optional[int] = None, - limit_val_num_samples: Optional[int] = None) -> Tuple[DataLoader, DataLoader, DataLoader]: +def get_train_val_loaders( + root_path: str, + train_transforms: Callable, + val_transforms: Callable, + batch_size: int = 16, + num_workers: int = 8, + val_batch_size: Optional[int] = None, + pin_memory: bool = True, + random_seed: Optional[int] = None, + train_sampler: Optional[Union[Sampler, str]] = None, + val_sampler: Optional[Union[Sampler, str]] = None, + with_sbd: Optional[str] = None, + limit_train_num_samples: Optional[int] = None, + limit_val_num_samples: Optional[int] = None, +) -> Tuple[DataLoader, DataLoader, DataLoader]: train_ds = get_train_dataset(root_path) val_ds = get_val_dataset(root_path) @@ -43,7 +45,7 @@ def get_train_val_loaders(root_path: str, # random samples for evaluation on training dataset if len(val_ds) < len(train_ds): - train_eval_indices = np.random.permutation(len(train_ds))[:len(val_ds)] + train_eval_indices = np.random.permutation(len(train_ds))[: len(val_ds)] train_eval_ds = Subset(train_ds, train_eval_indices) else: train_eval_ds = train_ds @@ -53,38 +55,57 @@ def get_train_val_loaders(root_path: str, train_eval_ds = TransformedDataset(train_eval_ds, transform_fn=val_transforms) if isinstance(train_sampler, str): - assert train_sampler == 'distributed' + assert train_sampler == "distributed" train_sampler = data_dist.DistributedSampler(train_ds) if isinstance(val_sampler, str): - assert val_sampler == 'distributed' + assert val_sampler == "distributed" val_sampler = data_dist.DistributedSampler(val_ds, shuffle=False) - train_loader = DataLoader(train_ds, shuffle=train_sampler is None, - batch_size=batch_size, num_workers=num_workers, - sampler=train_sampler, - pin_memory=pin_memory, drop_last=True) + train_loader = DataLoader( + train_ds, + shuffle=train_sampler is None, + batch_size=batch_size, + num_workers=num_workers, + sampler=train_sampler, + pin_memory=pin_memory, + drop_last=True, + ) val_batch_size = batch_size * 4 if val_batch_size is None else val_batch_size - val_loader = DataLoader(val_ds, shuffle=False, sampler=val_sampler, - batch_size=val_batch_size, num_workers=num_workers, - pin_memory=pin_memory, drop_last=False) - - train_eval_loader = DataLoader(train_eval_ds, shuffle=False, sampler=val_sampler, - batch_size=val_batch_size, num_workers=num_workers, - pin_memory=pin_memory, drop_last=False) + val_loader = DataLoader( + val_ds, + shuffle=False, + sampler=val_sampler, + batch_size=val_batch_size, + num_workers=num_workers, + pin_memory=pin_memory, + drop_last=False, + ) + + train_eval_loader = DataLoader( + train_eval_ds, + shuffle=False, + sampler=val_sampler, + batch_size=val_batch_size, + num_workers=num_workers, + pin_memory=pin_memory, + drop_last=False, + ) return train_loader, val_loader, train_eval_loader -def get_inference_dataloader(root_path: str, - mode: str, - transforms: Callable, - batch_size: int = 16, - num_workers: int = 8, - pin_memory: bool = True, - limit_num_samples: Optional[int] = None) -> DataLoader: - assert mode in ('train', 'test'), "Mode should be 'train' or 'test'" +def get_inference_dataloader( + root_path: str, + mode: str, + transforms: Callable, + batch_size: int = 16, + num_workers: int = 8, + pin_memory: bool = True, + limit_num_samples: Optional[int] = None, +) -> DataLoader: + assert mode in ("train", "test"), "Mode should be 'train' or 'test'" get_dataset_fn = get_train_dataset if mode == "train" else get_val_dataset @@ -96,7 +117,7 @@ def get_inference_dataloader(root_path: str, dataset = TransformedDataset(dataset, transform_fn=transforms) - loader = DataLoader(dataset, shuffle=False, - batch_size=batch_size, num_workers=num_workers, - pin_memory=pin_memory, drop_last=False) + loader = DataLoader( + dataset, shuffle=False, batch_size=batch_size, num_workers=num_workers, pin_memory=pin_memory, drop_last=False + ) return loader diff --git a/examples/references/segmentation/pascal_voc2012/code/dataflow/datasets.py b/examples/references/segmentation/pascal_voc2012/code/dataflow/datasets.py index ae56f406ea09..2d8045343e89 100644 --- a/examples/references/segmentation/pascal_voc2012/code/dataflow/datasets.py +++ b/examples/references/segmentation/pascal_voc2012/code/dataflow/datasets.py @@ -13,7 +13,6 @@ class TransformedDataset(Dataset): - def __init__(self, ds: Dataset, transform_fn: Callable): assert isinstance(ds, Dataset) assert callable(transform_fn) @@ -29,7 +28,6 @@ def __getitem__(self, index): class VOCSegmentationOpencv(VOCSegmentation): - def __init__(self, *args, return_meta: bool = False, **kwargs): super(VOCSegmentationOpencv, self).__init__(*args, **kwargs) self.return_meta = return_meta @@ -41,18 +39,16 @@ def __getitem__(self, index): mask = np.asarray(Image.open(self.masks[index])) if self.return_meta: - return {"image": img, "mask": mask, - "meta": {"index": index, - "image_path": self.images[index], - "mask_path": self.masks[index] - } - } + return { + "image": img, + "mask": mask, + "meta": {"index": index, "image_path": self.images[index], "mask_path": self.masks[index]}, + } return {"image": img, "mask": mask} class SBDatasetOpencv(SBDataset): - def __init__(self, *args, return_meta: bool = False, **kwargs): super(SBDatasetOpencv, self).__init__(*args, **kwargs) assert self.mode == "segmentation", "SBDatasetOpencv should be in segmentation mode only" @@ -60,7 +56,7 @@ def __init__(self, *args, return_meta: bool = False, **kwargs): def _get_segmentation_target(self, filepath): mat = self._loadmat(filepath) - return mat['GTcls'][0]['Segmentation'][0] + return mat["GTcls"][0]["Segmentation"][0] def __getitem__(self, index): img = cv2.imread(self.images[index]) @@ -70,25 +66,24 @@ def __getitem__(self, index): mask = self._get_target(self.masks[index]) if self.return_meta: - return {"image": img, "mask": mask, - "meta": {"index": index, - "image_path": self.images[index], - "mask_path": self.masks[index] - } - } + return { + "image": img, + "mask": mask, + "meta": {"index": index, "image_path": self.images[index], "mask_path": self.masks[index]}, + } return {"image": img, "mask": mask} def get_train_dataset(root_path: str, return_meta: bool = False): - return VOCSegmentationOpencv(root=root_path, year='2012', image_set='train', download=False, - return_meta=return_meta) + return VOCSegmentationOpencv( + root=root_path, year="2012", image_set="train", download=False, return_meta=return_meta + ) def get_val_dataset(root_path: str, return_meta: bool = False): - return VOCSegmentationOpencv(root=root_path, year='2012', image_set='val', download=False, - return_meta=return_meta) + return VOCSegmentationOpencv(root=root_path, year="2012", image_set="val", download=False, return_meta=return_meta) def get_train_noval_sbdataset(root_path: str, return_meta: bool = False): - return SBDatasetOpencv(root_path, image_set='train_noval', mode='segmentation', return_meta=return_meta) + return SBDatasetOpencv(root_path, image_set="train_noval", mode="segmentation", return_meta=return_meta) diff --git a/examples/references/segmentation/pascal_voc2012/code/dataflow/transforms.py b/examples/references/segmentation/pascal_voc2012/code/dataflow/transforms.py index 06c05cbc99fc..2f67cb3cc731 100644 --- a/examples/references/segmentation/pascal_voc2012/code/dataflow/transforms.py +++ b/examples/references/segmentation/pascal_voc2012/code/dataflow/transforms.py @@ -4,10 +4,10 @@ def ignore_mask_boundaries(force_apply, **kwargs): - assert 'mask' in kwargs, "Input should contain 'mask'" - mask = kwargs['mask'] + assert "mask" in kwargs, "Input should contain 'mask'" + mask = kwargs["mask"] mask[mask == 255] = 0 - kwargs['mask'] = mask + kwargs["mask"] = mask return kwargs @@ -23,7 +23,7 @@ def denormalize(t, mean, std, max_pixel_value=255): def prepare_batch_fp32(batch, device, non_blocking): - x, y = batch['image'], batch['mask'] + x, y = batch["image"], batch["mask"] x = convert_tensor(x, device, non_blocking=non_blocking) y = convert_tensor(y, device, non_blocking=non_blocking).long() return x, y diff --git a/examples/references/segmentation/pascal_voc2012/code/dataflow/vis.py b/examples/references/segmentation/pascal_voc2012/code/dataflow/vis.py index aa3f4a6e9ba3..3eef72e700e5 100644 --- a/examples/references/segmentation/pascal_voc2012/code/dataflow/vis.py +++ b/examples/references/segmentation/pascal_voc2012/code/dataflow/vis.py @@ -21,9 +21,9 @@ def _getvocpallete(num_cls): pallete[j * 3 + 2] = 0 i = 0 while lab > 0: - pallete[j * 3 + 0] |= (((lab >> 0) & 1) << (7 - i)) - pallete[j * 3 + 1] |= (((lab >> 1) & 1) << (7 - i)) - pallete[j * 3 + 2] |= (((lab >> 2) & 1) << (7 - i)) + pallete[j * 3 + 0] |= ((lab >> 0) & 1) << (7 - i) + pallete[j * 3 + 1] |= ((lab >> 1) & 1) << (7 - i) + pallete[j * 3 + 2] |= ((lab >> 2) & 1) << (7 - i) i = i + 1 lab >>= 3 return pallete @@ -36,7 +36,7 @@ def render_mask(mask: Union[np.ndarray, Image.Image]) -> Image.Image: if isinstance(mask, np.ndarray): mask = Image.fromarray(mask) mask.putpalette(vocpallete) - mask = mask.convert(mode='RGB') + mask = mask.convert(mode="RGB") return mask @@ -45,10 +45,12 @@ def tensor_to_rgb(t: torch.Tensor) -> np.ndarray: return img.astype(np.uint8) -def make_grid(batch_img: torch.Tensor, - batch_mask: torch.Tensor, - img_denormalize_fn: Callable, - batch_gt_mask: Optional[torch.Tensor] = None): +def make_grid( + batch_img: torch.Tensor, + batch_mask: torch.Tensor, + img_denormalize_fn: Callable, + batch_gt_mask: Optional[torch.Tensor] = None, +): """Create a grid from batch image and mask as img1 | img2 | img3 | img4 | ... @@ -78,7 +80,7 @@ def make_grid(batch_img: torch.Tensor, h, w = batch_img.shape[2:] le = 3 if batch_gt_mask is None else 3 + 2 - out_image = np.zeros((h * le, w * b, 3), dtype='uint8') + out_image = np.zeros((h * le, w * b, 3), dtype="uint8") for i in range(b): img = batch_img[i] @@ -89,19 +91,15 @@ def make_grid(batch_img: torch.Tensor, mask = mask.cpu().numpy() mask = render_mask(mask) - out_image[0:h, i * w:(i + 1) * w, :] = img - out_image[1 * h:2 * h, i * w:(i + 1) * w, :] = render_datapoint(img, - mask, - blend_alpha=0.4) - out_image[2 * h:3 * h, i * w:(i + 1) * w, :] = mask + out_image[0:h, i * w : (i + 1) * w, :] = img + out_image[1 * h : 2 * h, i * w : (i + 1) * w, :] = render_datapoint(img, mask, blend_alpha=0.4) + out_image[2 * h : 3 * h, i * w : (i + 1) * w, :] = mask if batch_gt_mask is not None: gt_mask = batch_gt_mask[i] gt_mask = gt_mask.cpu().numpy() gt_mask = render_mask(gt_mask) - out_image[3 * h:4 * h, i * w:(i + 1) * w, :] = render_datapoint(img, - gt_mask, - blend_alpha=0.4) - out_image[4 * h:5 * h, i * w:(i + 1) * w, :] = gt_mask + out_image[3 * h : 4 * h, i * w : (i + 1) * w, :] = render_datapoint(img, gt_mask, blend_alpha=0.4) + out_image[4 * h : 5 * h, i * w : (i + 1) * w, :] = gt_mask return out_image diff --git a/examples/references/segmentation/pascal_voc2012/code/scripts/common_training.py b/examples/references/segmentation/pascal_voc2012/code/scripts/common_training.py index 69e89196d413..98f88e0413de 100644 --- a/examples/references/segmentation/pascal_voc2012/code/scripts/common_training.py +++ b/examples/references/segmentation/pascal_voc2012/code/scripts/common_training.py @@ -27,16 +27,18 @@ def training(config, local_rank=None, with_mlflow_logging=False, with_plx_loggin set_seed(config.seed + local_rank) torch.cuda.set_device(local_rank) - device = 'cuda' + device = "cuda" torch.backends.cudnn.benchmark = True train_loader = config.train_loader train_sampler = getattr(train_loader, "sampler", None) - assert train_sampler is not None, "Train loader of type '{}' " \ - "should have attribute 'sampler'".format(type(train_loader)) - assert hasattr(train_sampler, 'set_epoch') and callable(train_sampler.set_epoch), \ - "Train sampler should have a callable method `set_epoch`" + assert train_sampler is not None, "Train loader of type '{}' " "should have attribute 'sampler'".format( + type(train_loader) + ) + assert hasattr(train_sampler, "set_epoch") and callable( + train_sampler.set_epoch + ), "Train sampler should have a callable method `set_epoch`" train_eval_loader = config.train_eval_loader val_loader = config.val_loader @@ -64,12 +66,12 @@ def train_update_function(engine, batch): loss = criterion(y_pred, y) if isinstance(loss, Mapping): - assert 'supervised batch loss' in loss + assert "supervised batch loss" in loss loss_dict = loss output = {k: v.item() for k, v in loss_dict.items()} - loss = loss_dict['supervised batch loss'] / accumulation_steps + loss = loss_dict["supervised batch loss"] / accumulation_steps else: - output = {'supervised batch loss': loss.item()} + output = {"supervised batch loss": loss.item()} with amp.scale_loss(loss, optimizer, loss_id=0) as scaled_loss: scaled_loss.backward() @@ -80,17 +82,21 @@ def train_update_function(engine, batch): return output - output_names = getattr(config, "output_names", ['supervised batch loss', ]) + output_names = getattr(config, "output_names", ["supervised batch loss",]) trainer = Engine(train_update_function) common.setup_common_distrib_training_handlers( - trainer, train_sampler, - to_save={'model': model, 'optimizer': optimizer}, - save_every_iters=1000, output_path=config.output_path.as_posix(), - lr_scheduler=config.lr_scheduler, with_gpu_stats=True, + trainer, + train_sampler, + to_save={"model": model, "optimizer": optimizer}, + save_every_iters=1000, + output_path=config.output_path.as_posix(), + lr_scheduler=config.lr_scheduler, + with_gpu_stats=True, output_names=output_names, - with_pbars=True, with_pbar_on_iters=with_mlflow_logging, - log_every_iters=1 + with_pbars=True, + with_pbar_on_iters=with_mlflow_logging, + log_every_iters=1, ) # Setup evaluators @@ -108,8 +114,12 @@ def train_update_function(engine, batch): model_output_transform = getattr(config, "model_output_transform", lambda x: x) evaluator_args = dict( - model=model, metrics=val_metrics, device=device, non_blocking=non_blocking, prepare_batch=prepare_batch, - output_transform=lambda x, y, y_pred: (model_output_transform(y_pred), y,) + model=model, + metrics=val_metrics, + device=device, + non_blocking=non_blocking, + prepare_batch=prepare_batch, + output_transform=lambda x, y, y_pred: (model_output_transform(y_pred), y,), ) train_evaluator = create_supervised_evaluator(**evaluator_args) evaluator = create_supervised_evaluator(**evaluator_args) @@ -134,34 +144,46 @@ def run_validation(_): if dist.get_rank() == 0: - tb_logger = common.setup_tb_logging(config.output_path.as_posix(), trainer, optimizer, - evaluators={"training": train_evaluator, "validation": evaluator}) + tb_logger = common.setup_tb_logging( + config.output_path.as_posix(), + trainer, + optimizer, + evaluators={"training": train_evaluator, "validation": evaluator}, + ) if with_mlflow_logging: - common.setup_mlflow_logging(trainer, optimizer, - evaluators={"training": train_evaluator, "validation": evaluator}) + common.setup_mlflow_logging( + trainer, optimizer, evaluators={"training": train_evaluator, "validation": evaluator} + ) if with_plx_logging: - common.setup_plx_logging(trainer, optimizer, - evaluators={"training": train_evaluator, "validation": evaluator}) + common.setup_plx_logging( + trainer, optimizer, evaluators={"training": train_evaluator, "validation": evaluator} + ) - common.save_best_model_by_val_score(config.output_path.as_posix(), evaluator, model, - metric_name=score_metric_name, trainer=trainer) + common.save_best_model_by_val_score( + config.output_path.as_posix(), evaluator, model, metric_name=score_metric_name, trainer=trainer + ) # Log train/val predictions: - tb_logger.attach(evaluator, - log_handler=predictions_gt_images_handler(img_denormalize_fn=config.img_denormalize, - n_images=15, - another_engine=trainer, - prefix_tag="validation"), - event_name=Events.EPOCH_COMPLETED) + tb_logger.attach( + evaluator, + log_handler=predictions_gt_images_handler( + img_denormalize_fn=config.img_denormalize, n_images=15, another_engine=trainer, prefix_tag="validation" + ), + event_name=Events.EPOCH_COMPLETED, + ) log_train_predictions = getattr(config, "log_train_predictions", False) if log_train_predictions: - tb_logger.attach(train_evaluator, - log_handler=predictions_gt_images_handler(img_denormalize_fn=config.img_denormalize, - n_images=15, - another_engine=trainer, - prefix_tag="validation"), - event_name=Events.EPOCH_COMPLETED) + tb_logger.attach( + train_evaluator, + log_handler=predictions_gt_images_handler( + img_denormalize_fn=config.img_denormalize, + n_images=15, + another_engine=trainer, + prefix_tag="validation", + ), + event_name=Events.EPOCH_COMPLETED, + ) trainer.run(train_loader, max_epochs=config.num_epochs) diff --git a/examples/references/segmentation/pascal_voc2012/code/scripts/download_dataset.py b/examples/references/segmentation/pascal_voc2012/code/scripts/download_dataset.py index 288633a74b68..5d7587b53045 100644 --- a/examples/references/segmentation/pascal_voc2012/code/scripts/download_dataset.py +++ b/examples/references/segmentation/pascal_voc2012/code/scripts/download_dataset.py @@ -12,13 +12,13 @@ args = parser.parse_args() print("Download Pascal VOC 2012 - Training") - VOCSegmentation(args.output_path, image_set='train', download=True) + VOCSegmentation(args.output_path, image_set="train", download=True) print("Download Pascal VOC 2012 - Validation") - VOCSegmentation(args.output_path, image_set='val', download=True) + VOCSegmentation(args.output_path, image_set="val", download=True) print("Download SBD - Training without Pascal VOC validation part") sbd_path = os.path.join(args.output_path, "SBD") os.makedirs(sbd_path, exist_ok=True) - SBDataset(sbd_path, image_set='train_noval', mode='segmentation', download=True) + SBDataset(sbd_path, image_set="train_noval", mode="segmentation", download=True) print("Done") print("Pascal VOC 2012 is at : {}".format(os.path.join(args.output_path, "VOCdevkit"))) print("SBD is at : {}".format(sbd_path)) diff --git a/examples/references/segmentation/pascal_voc2012/code/scripts/mlflow_training.py b/examples/references/segmentation/pascal_voc2012/code/scripts/mlflow_training.py index dd8cc59247d6..7119791e80fb 100644 --- a/examples/references/segmentation/pascal_voc2012/code/scripts/mlflow_training.py +++ b/examples/references/segmentation/pascal_voc2012/code/scripts/mlflow_training.py @@ -22,8 +22,9 @@ def run(config, logger=None, local_rank=0, **kwargs): dist.init_process_group("nccl", init_method="env://") # As we passed config with option --manual_config_load - assert hasattr(config, "setup"), "We need to manually setup the configuration, please set --manual_config_load " \ - "to py_config_runner" + assert hasattr(config, "setup"), ( + "We need to manually setup the configuration, please set --manual_config_load " "to py_config_runner" + ) config = config.setup() @@ -40,10 +41,9 @@ def run(config, logger=None, local_rank=0, **kwargs): config.output_path = Path(output_path) if dist.get_rank() == 0: - mlflow.log_params({ - "pytorch version": torch.__version__, - "ignite version": ignite.__version__, - }) + mlflow.log_params( + {"pytorch version": torch.__version__, "ignite version": ignite.__version__,} + ) mlflow.log_params(get_params(config, TRAINVAL_CONFIG)) try: diff --git a/examples/references/segmentation/pascal_voc2012/code/scripts/plx_training.py b/examples/references/segmentation/pascal_voc2012/code/scripts/plx_training.py index ccfc4b9e19a5..e38967c3a180 100644 --- a/examples/references/segmentation/pascal_voc2012/code/scripts/plx_training.py +++ b/examples/references/segmentation/pascal_voc2012/code/scripts/plx_training.py @@ -22,8 +22,9 @@ def run(config, logger=None, local_rank=0, **kwargs): dist.init_process_group("nccl", init_method="env://") # As we passed config with option --manual_config_load - assert hasattr(config, "setup"), "We need to manually setup the configuration, please set --manual_config_load " \ - "to py_config_runner" + assert hasattr(config, "setup"), ( + "We need to manually setup the configuration, please set --manual_config_load " "to py_config_runner" + ) config = config.setup() @@ -36,10 +37,9 @@ def run(config, logger=None, local_rank=0, **kwargs): if dist.get_rank() == 0: plx_exp = Experiment() - plx_exp.log_params(**{ - "pytorch version": torch.__version__, - "ignite version": ignite.__version__, - }) + plx_exp.log_params( + **{"pytorch version": torch.__version__, "ignite version": ignite.__version__,} + ) plx_exp.log_params(**get_params(config, TRAINVAL_CONFIG)) try: diff --git a/examples/references/segmentation/pascal_voc2012/code/utils/handlers.py b/examples/references/segmentation/pascal_voc2012/code/utils/handlers.py index b2888d874151..48022e8ef9ac 100644 --- a/examples/references/segmentation/pascal_voc2012/code/utils/handlers.py +++ b/examples/references/segmentation/pascal_voc2012/code/utils/handlers.py @@ -4,12 +4,11 @@ def predictions_gt_images_handler(img_denormalize_fn, n_images=None, another_engine=None, prefix_tag=None): - def wrapper(engine, logger, event_name): batch = engine.state.batch output = engine.state.output - x = batch['image'] - y = batch['mask'] + x = batch["image"] + y = batch["mask"] y_pred = output[0] if y.shape == y_pred.shape and y.ndim == 4: @@ -31,6 +30,6 @@ def wrapper(engine, logger, event_name): tag = "predictions_with_gt" if prefix_tag is not None: tag = "{}: {}".format(prefix_tag, tag) - logger.writer.add_image(tag=tag, img_tensor=grid_pred_gt, global_step=global_step, dataformats='HWC') + logger.writer.add_image(tag=tag, img_tensor=grid_pred_gt, global_step=global_step, dataformats="HWC") return wrapper diff --git a/examples/references/segmentation/pascal_voc2012/configs/train/baseline_resnet101.py b/examples/references/segmentation/pascal_voc2012/configs/train/baseline_resnet101.py index 4cb80c79a09d..4f56e947bc34 100644 --- a/examples/references/segmentation/pascal_voc2012/configs/train/baseline_resnet101.py +++ b/examples/references/segmentation/pascal_voc2012/configs/train/baseline_resnet101.py @@ -17,14 +17,14 @@ from dataflow.transforms import ignore_mask_boundaries, prepare_batch_fp32, denormalize -assert 'DATASET_PATH' in os.environ -data_path = os.environ['DATASET_PATH'] +assert "DATASET_PATH" in os.environ +data_path = os.environ["DATASET_PATH"] debug = False seed = 12 -device = 'cuda' +device = "cuda" fp16_opt_level = "O2" @@ -48,37 +48,42 @@ std = (0.229, 0.224, 0.225) -train_transforms = A.Compose([ - A.RandomScale(scale_limit=(0.0, 1.5), interpolation=cv2.INTER_LINEAR, p=1.0), - A.PadIfNeeded(val_img_size, val_img_size, border_mode=cv2.BORDER_CONSTANT), - A.RandomCrop(train_img_size, train_img_size), - A.HorizontalFlip(), - A.Blur(blur_limit=3), - - A.Normalize(mean=mean, std=std), - ignore_mask_boundaries, - ToTensor(), -]) - -val_transforms = A.Compose([ - A.PadIfNeeded(val_img_size, val_img_size, border_mode=cv2.BORDER_CONSTANT), - A.Normalize(mean=mean, std=std), - ignore_mask_boundaries, - ToTensor(), -]) - - -train_loader, val_loader, train_eval_loader = get_train_val_loaders(root_path=data_path, - train_transforms=train_transforms, - val_transforms=val_transforms, - batch_size=batch_size, - num_workers=num_workers, - val_batch_size=val_batch_size, - train_sampler='distributed', - val_sampler='distributed', - limit_train_num_samples=100 if debug else None, - limit_val_num_samples=100 if debug else None, - random_seed=seed) +train_transforms = A.Compose( + [ + A.RandomScale(scale_limit=(0.0, 1.5), interpolation=cv2.INTER_LINEAR, p=1.0), + A.PadIfNeeded(val_img_size, val_img_size, border_mode=cv2.BORDER_CONSTANT), + A.RandomCrop(train_img_size, train_img_size), + A.HorizontalFlip(), + A.Blur(blur_limit=3), + A.Normalize(mean=mean, std=std), + ignore_mask_boundaries, + ToTensor(), + ] +) + +val_transforms = A.Compose( + [ + A.PadIfNeeded(val_img_size, val_img_size, border_mode=cv2.BORDER_CONSTANT), + A.Normalize(mean=mean, std=std), + ignore_mask_boundaries, + ToTensor(), + ] +) + + +train_loader, val_loader, train_eval_loader = get_train_val_loaders( + root_path=data_path, + train_transforms=train_transforms, + val_transforms=val_transforms, + batch_size=batch_size, + num_workers=num_workers, + val_batch_size=val_batch_size, + train_sampler="distributed", + val_sampler="distributed", + limit_train_num_samples=100 if debug else None, + limit_val_num_samples=100 if debug else None, + random_seed=seed, +) prepare_batch = prepare_batch_fp32 @@ -94,7 +99,7 @@ def model_output_transform(output): - return output['out'] + return output["out"] # ############################## @@ -110,9 +115,13 @@ def model_output_transform(output): momentum = 0.9 nesterov = False -optimizer = optim.SGD([{'params': model.backbone.parameters()}, - {'params': model.classifier.parameters()}], - lr=1.0, momentum=momentum, weight_decay=weight_decay, nesterov=nesterov) +optimizer = optim.SGD( + [{"params": model.backbone.parameters()}, {"params": model.classifier.parameters()}], + lr=1.0, + momentum=momentum, + weight_decay=weight_decay, + nesterov=nesterov, +) le = len(train_loader) @@ -122,8 +131,10 @@ def lambda_lr_scheduler(iteration, lr0, n, a): return lr0 * pow((1.0 - 1.0 * iteration / n), a) -lr_scheduler = lrs.LambdaLR(optimizer, - lr_lambda=[ - partial(lambda_lr_scheduler, lr0=lr, n=num_epochs * le, a=0.9), - partial(lambda_lr_scheduler, lr0=lr * 10.0, n=num_epochs * le, a=0.9) - ]) +lr_scheduler = lrs.LambdaLR( + optimizer, + lr_lambda=[ + partial(lambda_lr_scheduler, lr0=lr, n=num_epochs * le, a=0.9), + partial(lambda_lr_scheduler, lr0=lr * 10.0, n=num_epochs * le, a=0.9), + ], +) diff --git a/examples/references/segmentation/pascal_voc2012/configs/train/baseline_resnet101_sbd.py b/examples/references/segmentation/pascal_voc2012/configs/train/baseline_resnet101_sbd.py index 3f0a77e5204b..17ab20303274 100644 --- a/examples/references/segmentation/pascal_voc2012/configs/train/baseline_resnet101_sbd.py +++ b/examples/references/segmentation/pascal_voc2012/configs/train/baseline_resnet101_sbd.py @@ -17,17 +17,17 @@ from dataflow.transforms import ignore_mask_boundaries, prepare_batch_fp32, denormalize -assert 'DATASET_PATH' in os.environ -data_path = os.environ['DATASET_PATH'] +assert "DATASET_PATH" in os.environ +data_path = os.environ["DATASET_PATH"] -assert 'SBD_DATASET_PATH' in os.environ -sbd_data_path = os.environ['SBD_DATASET_PATH'] +assert "SBD_DATASET_PATH" in os.environ +sbd_data_path = os.environ["SBD_DATASET_PATH"] debug = False seed = 12 -device = 'cuda' +device = "cuda" fp16_opt_level = "O2" @@ -53,38 +53,43 @@ std = (0.229, 0.224, 0.225) -train_transforms = A.Compose([ - A.RandomScale(scale_limit=(0.0, 1.5), interpolation=cv2.INTER_LINEAR, p=1.0), - A.PadIfNeeded(val_img_size, val_img_size, border_mode=cv2.BORDER_CONSTANT), - A.RandomCrop(train_img_size, train_img_size), - A.HorizontalFlip(), - A.Blur(blur_limit=3), - - A.Normalize(mean=mean, std=std), - ignore_mask_boundaries, - ToTensor(), -]) - -val_transforms = A.Compose([ - A.PadIfNeeded(val_img_size, val_img_size, border_mode=cv2.BORDER_CONSTANT), - A.Normalize(mean=mean, std=std), - ignore_mask_boundaries, - ToTensor(), -]) - - -train_loader, val_loader, train_eval_loader = get_train_val_loaders(root_path=data_path, - train_transforms=train_transforms, - val_transforms=val_transforms, - batch_size=batch_size, - num_workers=num_workers, - val_batch_size=val_batch_size, - with_sbd=sbd_data_path, - train_sampler='distributed', - val_sampler='distributed', - limit_train_num_samples=100 if debug else None, - limit_val_num_samples=100 if debug else None, - random_seed=seed) +train_transforms = A.Compose( + [ + A.RandomScale(scale_limit=(0.0, 1.5), interpolation=cv2.INTER_LINEAR, p=1.0), + A.PadIfNeeded(val_img_size, val_img_size, border_mode=cv2.BORDER_CONSTANT), + A.RandomCrop(train_img_size, train_img_size), + A.HorizontalFlip(), + A.Blur(blur_limit=3), + A.Normalize(mean=mean, std=std), + ignore_mask_boundaries, + ToTensor(), + ] +) + +val_transforms = A.Compose( + [ + A.PadIfNeeded(val_img_size, val_img_size, border_mode=cv2.BORDER_CONSTANT), + A.Normalize(mean=mean, std=std), + ignore_mask_boundaries, + ToTensor(), + ] +) + + +train_loader, val_loader, train_eval_loader = get_train_val_loaders( + root_path=data_path, + train_transforms=train_transforms, + val_transforms=val_transforms, + batch_size=batch_size, + num_workers=num_workers, + val_batch_size=val_batch_size, + with_sbd=sbd_data_path, + train_sampler="distributed", + val_sampler="distributed", + limit_train_num_samples=100 if debug else None, + limit_val_num_samples=100 if debug else None, + random_seed=seed, +) prepare_batch = prepare_batch_fp32 @@ -100,7 +105,7 @@ def model_output_transform(output): - return output['out'] + return output["out"] # ############################## @@ -116,9 +121,13 @@ def model_output_transform(output): momentum = 0.9 nesterov = False -optimizer = optim.SGD([{'params': model.backbone.parameters()}, - {'params': model.classifier.parameters()}], - lr=1.0, momentum=momentum, weight_decay=weight_decay, nesterov=nesterov) +optimizer = optim.SGD( + [{"params": model.backbone.parameters()}, {"params": model.classifier.parameters()}], + lr=1.0, + momentum=momentum, + weight_decay=weight_decay, + nesterov=nesterov, +) le = len(train_loader) @@ -128,8 +137,10 @@ def lambda_lr_scheduler(iteration, lr0, n, a): return lr0 * pow((1.0 - 1.0 * iteration / n), a) -lr_scheduler = lrs.LambdaLR(optimizer, - lr_lambda=[ - partial(lambda_lr_scheduler, lr0=lr, n=num_epochs * le, a=0.9), - partial(lambda_lr_scheduler, lr0=lr * 10.0, n=num_epochs * le, a=0.9) - ]) +lr_scheduler = lrs.LambdaLR( + optimizer, + lr_lambda=[ + partial(lambda_lr_scheduler, lr0=lr, n=num_epochs * le, a=0.9), + partial(lambda_lr_scheduler, lr0=lr * 10.0, n=num_epochs * le, a=0.9), + ], +) diff --git a/examples/reinforcement_learning/actor_critic.py b/examples/reinforcement_learning/actor_critic.py index f4c36e47b136..4a7871ebb086 100644 --- a/examples/reinforcement_learning/actor_critic.py +++ b/examples/reinforcement_learning/actor_critic.py @@ -18,7 +18,7 @@ from ignite.engine import Engine, Events -SavedAction = namedtuple('SavedAction', ['log_prob', 'value']) +SavedAction = namedtuple("SavedAction", ["log_prob", "value"]) class Policy(nn.Module): @@ -112,36 +112,44 @@ def update_model(engine): @trainer.on(EPISODE_COMPLETED(every=args.log_interval)) def log_episode(engine): i_episode = engine.state.epoch - print('Episode {}\tLast length: {:5d}\tAverage length: {:.2f}'.format( - i_episode, engine.state.timestep, engine.state.running_reward)) + print( + "Episode {}\tLast length: {:5d}\tAverage length: {:.2f}".format( + i_episode, engine.state.timestep, engine.state.running_reward + ) + ) @trainer.on(EPISODE_COMPLETED) def should_finish_training(engine): running_reward = engine.state.running_reward if running_reward > env.spec.reward_threshold: - print("Solved! Running reward is now {} and " - "the last episode runs to {} time steps!".format(running_reward, engine.state.timestep)) + print( + "Solved! Running reward is now {} and " + "the last episode runs to {} time steps!".format(running_reward, engine.state.timestep) + ) engine.should_terminate = True trainer.run(timesteps, max_epochs=args.max_episodes) -if __name__ == '__main__': - - parser = argparse.ArgumentParser(description='Ignite actor-critic example') - parser.add_argument('--gamma', type=float, default=0.99, metavar='G', - help='discount factor (default: 0.99)') - parser.add_argument('--seed', type=int, default=543, metavar='N', - help='random seed (default: 1)') - parser.add_argument('--render', action='store_true', - help='render the environment') - parser.add_argument('--log-interval', type=int, default=10, metavar='N', - help='interval between training status logs (default: 10)') - parser.add_argument('--max-episodes', type=int, default=1000000, metavar='N', - help='Number of episodes for the training (default: 1000000)') +if __name__ == "__main__": + + parser = argparse.ArgumentParser(description="Ignite actor-critic example") + parser.add_argument("--gamma", type=float, default=0.99, metavar="G", help="discount factor (default: 0.99)") + parser.add_argument("--seed", type=int, default=543, metavar="N", help="random seed (default: 1)") + parser.add_argument("--render", action="store_true", help="render the environment") + parser.add_argument( + "--log-interval", type=int, default=10, metavar="N", help="interval between training status logs (default: 10)" + ) + parser.add_argument( + "--max-episodes", + type=int, + default=1000000, + metavar="N", + help="Number of episodes for the training (default: 1000000)", + ) args = parser.parse_args() - env = gym.make('CartPole-v0') + env = gym.make("CartPole-v0") env.seed(args.seed) torch.manual_seed(args.seed) diff --git a/examples/reinforcement_learning/reinforce.py b/examples/reinforcement_learning/reinforce.py index da2f6e7f076f..6c052003344a 100644 --- a/examples/reinforcement_learning/reinforce.py +++ b/examples/reinforcement_learning/reinforce.py @@ -102,36 +102,44 @@ def update_model(engine): @trainer.on(EPISODE_COMPLETED(every=args.log_interval)) def log_episode(engine): i_episode = engine.state.epoch - print('Episode {}\tLast length: {:5d}\tAverage length: {:.2f}'.format( - i_episode, engine.state.timestep, engine.state.running_reward)) + print( + "Episode {}\tLast length: {:5d}\tAverage length: {:.2f}".format( + i_episode, engine.state.timestep, engine.state.running_reward + ) + ) @trainer.on(EPISODE_COMPLETED) def should_finish_training(engine): running_reward = engine.state.running_reward if running_reward > env.spec.reward_threshold: - print("Solved! Running reward is now {} and " - "the last episode runs to {} time steps!".format(running_reward, engine.state.timestep)) + print( + "Solved! Running reward is now {} and " + "the last episode runs to {} time steps!".format(running_reward, engine.state.timestep) + ) engine.should_terminate = True trainer.run(timesteps, max_epochs=args.max_episodes) -if __name__ == '__main__': - - parser = argparse.ArgumentParser(description='PyTorch REINFORCE example') - parser.add_argument('--gamma', type=float, default=0.99, metavar='G', - help='discount factor (default: 0.99)') - parser.add_argument('--seed', type=int, default=543, metavar='N', - help='random seed (default: 543)') - parser.add_argument('--render', action='store_true', - help='render the environment') - parser.add_argument('--log-interval', type=int, default=10, metavar='N', - help='interval between training status logs (default: 10)') - parser.add_argument('--max-episodes', type=int, default=1000000, metavar='N', - help='Number of episodes for the training (default: 1000000)') +if __name__ == "__main__": + + parser = argparse.ArgumentParser(description="PyTorch REINFORCE example") + parser.add_argument("--gamma", type=float, default=0.99, metavar="G", help="discount factor (default: 0.99)") + parser.add_argument("--seed", type=int, default=543, metavar="N", help="random seed (default: 543)") + parser.add_argument("--render", action="store_true", help="render the environment") + parser.add_argument( + "--log-interval", type=int, default=10, metavar="N", help="interval between training status logs (default: 10)" + ) + parser.add_argument( + "--max-episodes", + type=int, + default=1000000, + metavar="N", + help="Number of episodes for the training (default: 1000000)", + ) args = parser.parse_args() - env = gym.make('CartPole-v0') + env = gym.make("CartPole-v0") env.seed(args.seed) torch.manual_seed(args.seed) diff --git a/ignite/__init__.py b/ignite/__init__.py index 1b6bc8e88e5b..a42a07b75d7c 100644 --- a/ignite/__init__.py +++ b/ignite/__init__.py @@ -5,4 +5,4 @@ import ignite.contrib import ignite.utils -__version__ = '0.4.0' +__version__ = "0.4.0" diff --git a/ignite/contrib/engines/common.py b/ignite/contrib/engines/common.py index 511ccff70769..2e738941b0ec 100644 --- a/ignite/contrib/engines/common.py +++ b/ignite/contrib/engines/common.py @@ -19,11 +19,20 @@ import ignite.contrib.handlers.polyaxon_logger as polyaxon_logger_module -def setup_common_training_handlers(trainer, train_sampler=None, - to_save=None, save_every_iters=1000, output_path=None, - lr_scheduler=None, with_gpu_stats=False, - output_names=None, with_pbars=True, with_pbar_on_iters=True, log_every_iters=100, - device='cuda'): +def setup_common_training_handlers( + trainer, + train_sampler=None, + to_save=None, + save_every_iters=1000, + output_path=None, + lr_scheduler=None, + with_gpu_stats=False, + output_names=None, + with_pbars=True, + with_pbar_on_iters=True, + log_every_iters=100, + device="cuda", +): """Helper method to setup trainer with common handlers (it also supports distributed configuration): - :class:`~ignite.handlers.TerminateOnNan` - handler to setup learning rate scheduling @@ -52,29 +61,46 @@ def setup_common_training_handlers(trainer, train_sampler=None, epoch-wise progress bar. device (str of torch.device, optional): Optional device specification in case of distributed computation usage. """ - kwargs = dict(to_save=to_save, - save_every_iters=save_every_iters, output_path=output_path, - lr_scheduler=lr_scheduler, with_gpu_stats=with_gpu_stats, - output_names=output_names, with_pbars=with_pbars, - with_pbar_on_iters=with_pbar_on_iters, - log_every_iters=log_every_iters, device=device) + kwargs = dict( + to_save=to_save, + save_every_iters=save_every_iters, + output_path=output_path, + lr_scheduler=lr_scheduler, + with_gpu_stats=with_gpu_stats, + output_names=output_names, + with_pbars=with_pbars, + with_pbar_on_iters=with_pbar_on_iters, + log_every_iters=log_every_iters, + device=device, + ) if dist.is_available() and dist.is_initialized(): return _setup_common_distrib_training_handlers(trainer, train_sampler=train_sampler, **kwargs) else: if train_sampler is not None: - warnings.warn("Argument train_sampler distributed sampler used to call `set_epoch` method on epoch " - "started event, but no distributed setting detected", UserWarning) + warnings.warn( + "Argument train_sampler distributed sampler used to call `set_epoch` method on epoch " + "started event, but no distributed setting detected", + UserWarning, + ) return _setup_common_training_handlers(trainer, **kwargs) setup_common_distrib_training_handlers = setup_common_training_handlers -def _setup_common_training_handlers(trainer, - to_save=None, save_every_iters=1000, output_path=None, - lr_scheduler=None, with_gpu_stats=True, - output_names=None, with_pbars=True, with_pbar_on_iters=True, - log_every_iters=100, device='cuda'): +def _setup_common_training_handlers( + trainer, + to_save=None, + save_every_iters=1000, + output_path=None, + lr_scheduler=None, + with_gpu_stats=True, + output_names=None, + with_pbars=True, + with_pbar_on_iters=True, + log_every_iters=100, + device="cuda", +): trainer.add_event_handler(Events.ITERATION_COMPLETED, TerminateOnNan()) if lr_scheduler is not None: @@ -92,7 +118,7 @@ def _setup_common_training_handlers(trainer, trainer.add_event_handler(Events.ITERATION_COMPLETED(every=save_every_iters), checkpoint_handler, to_save) if with_gpu_stats: - GpuInfo().attach(trainer, name='gpu', event_name=Events.ITERATION_COMPLETED(every=log_every_iters)) + GpuInfo().attach(trainer, name="gpu", event_name=Events.ITERATION_COMPLETED(every=log_every_iters)) if output_names is not None: @@ -104,37 +130,55 @@ def output_transform(x, index, name): elif isinstance(x, torch.Tensor): return x else: - raise ValueError("Unhandled type of update_function's output. " - "It should either mapping or sequence, but given {}".format(type(x))) + raise ValueError( + "Unhandled type of update_function's output. " + "It should either mapping or sequence, but given {}".format(type(x)) + ) for i, n in enumerate(output_names): - RunningAverage(output_transform=partial(output_transform, index=i, name=n), - epoch_bound=False, device=device).attach(trainer, n) + RunningAverage( + output_transform=partial(output_transform, index=i, name=n), epoch_bound=False, device=device + ).attach(trainer, n) if with_pbars: if with_pbar_on_iters: - ProgressBar(persist=False).attach(trainer, metric_names='all', - event_name=Events.ITERATION_COMPLETED(every=log_every_iters)) - - ProgressBar(persist=True, bar_format="").attach(trainer, - event_name=Events.EPOCH_STARTED, - closing_event_name=Events.COMPLETED) - - -def _setup_common_distrib_training_handlers(trainer, train_sampler=None, - to_save=None, save_every_iters=1000, output_path=None, - lr_scheduler=None, with_gpu_stats=True, - output_names=None, with_pbars=True, with_pbar_on_iters=True, - log_every_iters=100, device='cuda'): + ProgressBar(persist=False).attach( + trainer, metric_names="all", event_name=Events.ITERATION_COMPLETED(every=log_every_iters) + ) + + ProgressBar(persist=True, bar_format="").attach( + trainer, event_name=Events.EPOCH_STARTED, closing_event_name=Events.COMPLETED + ) + + +def _setup_common_distrib_training_handlers( + trainer, + train_sampler=None, + to_save=None, + save_every_iters=1000, + output_path=None, + lr_scheduler=None, + with_gpu_stats=True, + output_names=None, + with_pbars=True, + with_pbar_on_iters=True, + log_every_iters=100, + device="cuda", +): if not (dist.is_available() and dist.is_initialized()): raise RuntimeError("Distributed setting is not initialized, please call `dist.init_process_group` before.") - _setup_common_training_handlers(trainer, to_save=None, - lr_scheduler=lr_scheduler, with_gpu_stats=with_gpu_stats, - output_names=output_names, - with_pbars=(dist.get_rank() == 0) and with_pbars, - with_pbar_on_iters=with_pbar_on_iters, - log_every_iters=log_every_iters, device=device) + _setup_common_training_handlers( + trainer, + to_save=None, + lr_scheduler=lr_scheduler, + with_gpu_stats=with_gpu_stats, + output_names=output_names, + with_pbars=(dist.get_rank() == 0) and with_pbars, + with_pbar_on_iters=with_pbar_on_iters, + log_every_iters=log_every_iters, + device=device, + ) if train_sampler is not None: if not callable(getattr(train_sampler, "set_epoch", None)): @@ -157,6 +201,7 @@ def distrib_set_epoch(engine): def empty_cuda_cache(_): torch.cuda.empty_cache() import gc + gc.collect() @@ -174,9 +219,11 @@ def setup_any_logging(logger, logger_module, trainer, optimizers, evaluators, lo if log_every_iters is None: log_every_iters = 1 - logger.attach(trainer, - log_handler=logger_module.OutputHandler(tag="training", metric_names='all'), - event_name=Events.ITERATION_COMPLETED(every=log_every_iters)) + logger.attach( + trainer, + log_handler=logger_module.OutputHandler(tag="training", metric_names="all"), + event_name=Events.ITERATION_COMPLETED(every=log_every_iters), + ) if optimizers is not None: # Log optimizer parameters @@ -184,9 +231,11 @@ def setup_any_logging(logger, logger_module, trainer, optimizers, evaluators, lo optimizers = {None: optimizers} for k, optimizer in optimizers.items(): - logger.attach(trainer, - log_handler=logger_module.OptimizerParamsHandler(optimizer, param_name="lr", tag=k), - event_name=Events.ITERATION_STARTED(every=log_every_iters)) + logger.attach( + trainer, + log_handler=logger_module.OptimizerParamsHandler(optimizer, param_name="lr", tag=k), + event_name=Events.ITERATION_STARTED(every=log_every_iters), + ) if evaluators is not None: # Log evaluation metrics @@ -195,9 +244,11 @@ def setup_any_logging(logger, logger_module, trainer, optimizers, evaluators, lo for k, evaluator in evaluators.items(): gst = global_step_from_engine(trainer) - logger.attach(evaluator, - log_handler=logger_module.OutputHandler(tag=k, metric_names='all', global_step_transform=gst), - event_name=Events.COMPLETED) + logger.attach( + evaluator, + log_handler=logger_module.OutputHandler(tag=k, metric_names="all", global_step_transform=gst), + event_name=Events.COMPLETED, + ) def setup_tb_logging(output_path, trainer, optimizers=None, evaluators=None, log_every_iters=100): @@ -220,9 +271,7 @@ def setup_tb_logging(output_path, trainer, optimizers=None, evaluators=None, log TensorboardLogger """ tb_logger = TensorboardLogger(log_dir=output_path) - setup_any_logging(tb_logger, tb_logger_module, - trainer, optimizers, evaluators, - log_every_iters=log_every_iters) + setup_any_logging(tb_logger, tb_logger_module, trainer, optimizers, evaluators, log_every_iters=log_every_iters) return tb_logger @@ -245,9 +294,9 @@ def setup_mlflow_logging(trainer, optimizers=None, evaluators=None, log_every_it MLflowLogger """ mlflow_logger = MLflowLogger() - setup_any_logging(mlflow_logger, mlflow_logger_module, - trainer, optimizers, evaluators, - log_every_iters=log_every_iters) + setup_any_logging( + mlflow_logger, mlflow_logger_module, trainer, optimizers, evaluators, log_every_iters=log_every_iters + ) return mlflow_logger @@ -270,9 +319,9 @@ def setup_plx_logging(trainer, optimizers=None, evaluators=None, log_every_iters PolyaxonLogger """ plx_logger = PolyaxonLogger() - setup_any_logging(plx_logger, polyaxon_logger_module, - trainer, optimizers, evaluators, - log_every_iters=log_every_iters) + setup_any_logging( + plx_logger, polyaxon_logger_module, trainer, optimizers, evaluators, log_every_iters=log_every_iters + ) return plx_logger @@ -303,13 +352,15 @@ def save_best_model_by_val_score(output_path, evaluator, model, metric_name, n_s if trainer is not None: global_step_transform = global_step_from_engine(trainer) - best_model_handler = ModelCheckpoint(dirname=output_path, - filename_prefix="best", - n_saved=n_saved, - global_step_transform=global_step_transform, - score_name="{}_{}".format(tag, metric_name.lower()), - score_function=get_default_score_fn(metric_name)) - evaluator.add_event_handler(Events.COMPLETED, best_model_handler, {'model': model, }) + best_model_handler = ModelCheckpoint( + dirname=output_path, + filename_prefix="best", + n_saved=n_saved, + global_step_transform=global_step_transform, + score_name="{}_{}".format(tag, metric_name.lower()), + score_function=get_default_score_fn(metric_name), + ) + evaluator.add_event_handler(Events.COMPLETED, best_model_handler, {"model": model,}) def add_early_stopping_by_val_score(patience, evaluator, trainer, metric_name): diff --git a/ignite/contrib/engines/tbptt.py b/ignite/contrib/engines/tbptt.py index 8e91dc4b2160..eb3f452d03a2 100644 --- a/ignite/contrib/engines/tbptt.py +++ b/ignite/contrib/engines/tbptt.py @@ -29,14 +29,7 @@ def _detach_hidden(hidden): def create_supervised_tbptt_trainer( - model, - optimizer, - loss_fn, - tbtt_step, - dim=0, - device=None, - non_blocking=False, - prepare_batch=_prepare_batch + model, optimizer, loss_fn, tbtt_step, dim=0, device=None, non_blocking=False, prepare_batch=_prepare_batch ): """Create a trainer for truncated backprop through time supervised models. diff --git a/ignite/contrib/handlers/__init__.py b/ignite/contrib/handlers/__init__.py index abb60e882be4..5c42be715616 100644 --- a/ignite/contrib/handlers/__init__.py +++ b/ignite/contrib/handlers/__init__.py @@ -1,5 +1,12 @@ -from ignite.contrib.handlers.param_scheduler import LinearCyclicalScheduler, CosineAnnealingScheduler, \ - ConcatScheduler, LRScheduler, create_lr_scheduler_with_warmup, PiecewiseLinear, ParamGroupScheduler +from ignite.contrib.handlers.param_scheduler import ( + LinearCyclicalScheduler, + CosineAnnealingScheduler, + ConcatScheduler, + LRScheduler, + create_lr_scheduler_with_warmup, + PiecewiseLinear, + ParamGroupScheduler, +) from ignite.contrib.handlers.custom_events import CustomPeriodicEvent diff --git a/ignite/contrib/handlers/base_logger.py b/ignite/contrib/handlers/base_logger.py index 4c26b6c243a9..54b737be220a 100644 --- a/ignite/contrib/handlers/base_logger.py +++ b/ignite/contrib/handlers/base_logger.py @@ -45,7 +45,6 @@ def close(self): class BaseHandler(metaclass=ABCMeta): - @abstractmethod def __call__(self, *args, **kwargs): pass @@ -58,8 +57,9 @@ class BaseOptimizerParamsHandler(BaseHandler): def __init__(self, optimizer, param_name="lr", tag=None): if not isinstance(optimizer, torch.optim.Optimizer): - raise TypeError("Argument optimizer should be of type torch.optim.Optimizer, " - "but given {}".format(type(optimizer))) + raise TypeError( + "Argument optimizer should be of type torch.optim.Optimizer, " "but given {}".format(type(optimizer)) + ) self.optimizer = optimizer self.param_name = param_name @@ -75,29 +75,35 @@ def __init__(self, tag, metric_names=None, output_transform=None, another_engine if metric_names is not None: if not (isinstance(metric_names, list) or (isinstance(metric_names, str) and metric_names == "all")): - raise TypeError("metric_names should be either a list or equal 'all', " - "got {} instead.".format(type(metric_names))) + raise TypeError( + "metric_names should be either a list or equal 'all', " "got {} instead.".format(type(metric_names)) + ) if output_transform is not None and not callable(output_transform): - raise TypeError("output_transform should be a function, got {} instead." - .format(type(output_transform))) + raise TypeError("output_transform should be a function, got {} instead.".format(type(output_transform))) if output_transform is None and metric_names is None: raise ValueError("Either metric_names or output_transform should be defined") if another_engine is not None: if not isinstance(another_engine, Engine): - raise TypeError("Argument another_engine should be of type Engine, " - "but given {}".format(type(another_engine))) - warnings.warn("Use of another_engine is deprecated and will be removed in 0.4.0. " - "Please use global_step_transform instead.", DeprecationWarning) + raise TypeError( + "Argument another_engine should be of type Engine, " "but given {}".format(type(another_engine)) + ) + warnings.warn( + "Use of another_engine is deprecated and will be removed in 0.4.0. " + "Please use global_step_transform instead.", + DeprecationWarning, + ) global_step_transform = global_step_from_engine(another_engine) if global_step_transform is not None and not callable(global_step_transform): - raise TypeError("global_step_transform should be a function, got {} instead." - .format(type(global_step_transform))) + raise TypeError( + "global_step_transform should be a function, got {} instead.".format(type(global_step_transform)) + ) if global_step_transform is None: + def global_step_transform(engine, event_name): return engine.state.get_event_attrib_value(event_name) @@ -116,8 +122,10 @@ def _setup_output_metrics(self, engine): else: for name in self.metric_names: if name not in engine.state.metrics: - warnings.warn("Provided metric name '{}' is missing " - "in engine's state metrics: {}".format(name, list(engine.state.metrics.keys()))) + warnings.warn( + "Provided metric name '{}' is missing " + "in engine's state metrics: {}".format(name, list(engine.state.metrics.keys())) + ) continue metrics[name] = engine.state.metrics[name] @@ -138,12 +146,10 @@ class BaseWeightsScalarHandler(BaseHandler): def __init__(self, model, reduction=torch.norm, tag=None): if not isinstance(model, torch.nn.Module): - raise TypeError("Argument model should be of type torch.nn.Module, " - "but given {}".format(type(model))) + raise TypeError("Argument model should be of type torch.nn.Module, " "but given {}".format(type(model))) if not callable(reduction): - raise TypeError("Argument reduction should be callable, " - "but given {}".format(type(reduction))) + raise TypeError("Argument reduction should be callable, " "but given {}".format(type(reduction))) def _is_0D_tensor(t): return isinstance(t, torch.Tensor) and t.ndimension() == 0 @@ -165,8 +171,7 @@ class BaseWeightsHistHandler(BaseHandler): def __init__(self, model, tag=None): if not isinstance(model, torch.nn.Module): - raise TypeError("Argument model should be of type torch.nn.Module, " - "but given {}".format(type(model))) + raise TypeError("Argument model should be of type torch.nn.Module, " "but given {}".format(type(model))) self.model = model self.tag = tag diff --git a/ignite/contrib/handlers/custom_events.py b/ignite/contrib/handlers/custom_events.py index daca6ae3fe0c..b4d5cb89e8d2 100644 --- a/ignite/contrib/handlers/custom_events.py +++ b/ignite/contrib/handlers/custom_events.py @@ -73,11 +73,11 @@ def __init__(self, n_iterations=None, n_epochs=None): self.custom_state_attr = "{}_{}".format(prefix, self.period) event_name = "{}_{}".format(prefix.upper(), self.period) - setattr(self, "Events", - Enum("Events", - " ".join(["{}_STARTED".format(event_name), - "{}_COMPLETED".format(event_name)]) - )) + setattr( + self, + "Events", + Enum("Events", " ".join(["{}_STARTED".format(event_name), "{}_COMPLETED".format(event_name)])), + ) # Update State.event_to_attr for e in self.Events: State.event_to_attr[e] = self.custom_state_attr @@ -102,7 +102,9 @@ def attach(self, engine): engine.register_events(*self.Events) engine.add_event_handler(Events.STARTED, self._on_started) - engine.add_event_handler(getattr(Events, "{}_STARTED".format(self.state_attr.upper())), - self._on_periodic_event_started) - engine.add_event_handler(getattr(Events, "{}_COMPLETED".format(self.state_attr.upper())), - self._on_periodic_event_completed) + engine.add_event_handler( + getattr(Events, "{}_STARTED".format(self.state_attr.upper())), self._on_periodic_event_started + ) + engine.add_event_handler( + getattr(Events, "{}_COMPLETED".format(self.state_attr.upper())), self._on_periodic_event_completed + ) diff --git a/ignite/contrib/handlers/mlflow_logger.py b/ignite/contrib/handlers/mlflow_logger.py index 4e4f7486b931..1904b12578c5 100644 --- a/ignite/contrib/handlers/mlflow_logger.py +++ b/ignite/contrib/handlers/mlflow_logger.py @@ -3,10 +3,14 @@ import warnings import torch -from ignite.contrib.handlers.base_logger import BaseLogger, BaseOutputHandler, BaseOptimizerParamsHandler, \ - global_step_from_engine +from ignite.contrib.handlers.base_logger import ( + BaseLogger, + BaseOutputHandler, + BaseOptimizerParamsHandler, + global_step_from_engine, +) -__all__ = ['MLflowLogger', 'OutputHandler', 'OptimizerParamsHandler', 'global_step_from_engine'] +__all__ = ["MLflowLogger", "OutputHandler", "OptimizerParamsHandler", "global_step_from_engine"] class OutputHandler(BaseOutputHandler): @@ -98,8 +102,10 @@ def __call__(self, engine, logger, event_name): global_step = self.global_step_transform(engine, event_name) if not isinstance(global_step, int): - raise TypeError("global_step must be int, got {}." - " Please check the output of global_step_transform.".format(type(global_step))) + raise TypeError( + "global_step must be int, got {}." + " Please check the output of global_step_transform.".format(type(global_step)) + ) rendered_metrics = {} for key, value in metrics.items(): @@ -111,16 +117,17 @@ def __call__(self, engine, logger, event_name): for i, v in enumerate(value): rendered_metrics["{} {} {}".format(self.tag, key, i)] = v.item() else: - warnings.warn("MLflowLogger output_handler can not log " - "metrics value type {}".format(type(value))) + warnings.warn("MLflowLogger output_handler can not log " "metrics value type {}".format(type(value))) # Additionally recheck metric names as MLflow rejects non-valid names with MLflowException from mlflow.utils.validation import _VALID_PARAM_AND_METRIC_NAMES for key in list(rendered_metrics.keys()): if not _VALID_PARAM_AND_METRIC_NAMES.match(key): - warnings.warn("MLflowLogger output_handler encountered an invalid metric name '{}' that " - "will be ignored and not logged to MLflow".format(key)) + warnings.warn( + "MLflowLogger output_handler encountered an invalid metric name '{}' that " + "will be ignored and not logged to MLflow".format(key) + ) del rendered_metrics[key] logger.log_metrics(rendered_metrics, step=global_step) @@ -160,8 +167,10 @@ def __call__(self, engine, logger, event_name): global_step = engine.state.get_event_attrib_value(event_name) tag_prefix = "{} ".format(self.tag) if self.tag else "" - params = {"{}{} group_{}".format(tag_prefix, self.param_name, i): float(param_group[self.param_name]) - for i, param_group in enumerate(self.optimizer.param_groups)} + params = { + "{}{} group_{}".format(tag_prefix, self.param_name, i): float(param_group[self.param_name]) + for i, param_group in enumerate(self.optimizer.param_groups) + } logger.log_metrics(params, step=global_step) @@ -224,8 +233,10 @@ def __init__(self, tracking_uri=None): try: import mlflow except ImportError: - raise RuntimeError("This contrib module requires mlflow to be installed. " - "Please install it with command: \n pip install mlflow") + raise RuntimeError( + "This contrib module requires mlflow to be installed. " + "Please install it with command: \n pip install mlflow" + ) if tracking_uri is not None: mlflow.set_tracking_uri(tracking_uri) @@ -245,4 +256,5 @@ def wrapper(*args, **kwargs): def close(self): import mlflow + mlflow.end_run() diff --git a/ignite/contrib/handlers/neptune_logger.py b/ignite/contrib/handlers/neptune_logger.py index 4bd2e19967b1..fbc9e52280ed 100644 --- a/ignite/contrib/handlers/neptune_logger.py +++ b/ignite/contrib/handlers/neptune_logger.py @@ -8,11 +8,22 @@ import ignite from ignite.engine import Events -from ignite.contrib.handlers.base_logger import BaseLogger, BaseOptimizerParamsHandler, BaseOutputHandler, \ - BaseWeightsScalarHandler, global_step_from_engine - -__all__ = ['NeptuneLogger', 'OptimizerParamsHandler', 'OutputHandler', - 'WeightsScalarHandler', 'GradsScalarHandler', 'global_step_from_engine'] +from ignite.contrib.handlers.base_logger import ( + BaseLogger, + BaseOptimizerParamsHandler, + BaseOutputHandler, + BaseWeightsScalarHandler, + global_step_from_engine, +) + +__all__ = [ + "NeptuneLogger", + "OptimizerParamsHandler", + "OutputHandler", + "WeightsScalarHandler", + "GradsScalarHandler", + "global_step_from_engine", +] class OutputHandler(BaseOutputHandler): @@ -114,19 +125,19 @@ def __call__(self, engine, logger, event_name): global_step = self.global_step_transform(engine, event_name) if not isinstance(global_step, int): - raise TypeError("global_step must be int, got {}." - " Please check the output of global_step_transform.".format(type(global_step))) + raise TypeError( + "global_step must be int, got {}." + " Please check the output of global_step_transform.".format(type(global_step)) + ) for key, value in metrics.items(): - if isinstance(value, numbers.Number) or \ - isinstance(value, torch.Tensor) and value.ndimension() == 0: + if isinstance(value, numbers.Number) or isinstance(value, torch.Tensor) and value.ndimension() == 0: logger.experiment.log_metric("{}/{}".format(self.tag, key), x=global_step, y=value) elif isinstance(value, torch.Tensor) and value.ndimension() == 1: for i, v in enumerate(value): logger.experiment.log_metric("{}/{}/{}".format(self.tag, key, i), x=global_step, y=v.item()) else: - warnings.warn("NeptuneLogger output_handler can not log " - "metrics value type {}".format(type(value))) + warnings.warn("NeptuneLogger output_handler can not log " "metrics value type {}".format(type(value))) class OptimizerParamsHandler(BaseOptimizerParamsHandler): @@ -166,8 +177,10 @@ def __call__(self, engine, logger, event_name): global_step = engine.state.get_event_attrib_value(event_name) tag_prefix = "{}/".format(self.tag) if self.tag else "" - params = {"{}{}/group_{}".format(tag_prefix, self.param_name, i): float(param_group[self.param_name]) - for i, param_group in enumerate(self.optimizer.param_groups)} + params = { + "{}{}/group_{}".format(tag_prefix, self.param_name, i): float(param_group[self.param_name]) + for i, param_group in enumerate(self.optimizer.param_groups) + } for k, v in params.items(): logger.experiment.log_metric(k, x=global_step, y=v) @@ -218,10 +231,12 @@ def __call__(self, engine, logger, event_name): if p.grad is None: continue - name = name.replace('.', '/') - logger.experiment.log_metric("{}weights_{}/{}".format(tag_prefix, self.reduction.__name__, name), - x=global_step, - y=self.reduction(p.data)) + name = name.replace(".", "/") + logger.experiment.log_metric( + "{}weights_{}/{}".format(tag_prefix, self.reduction.__name__, name), + x=global_step, + y=self.reduction(p.data), + ) class GradsScalarHandler(BaseWeightsScalarHandler): @@ -268,10 +283,12 @@ def __call__(self, engine, logger, event_name): if p.grad is None: continue - name = name.replace('.', '/') - logger.experiment.log_metric("{}grads_{}/{}".format(tag_prefix, self.reduction.__name__, name), - x=global_step, - y=self.reduction(p.grad)) + name = name.replace(".", "/") + logger.experiment.log_metric( + "{}grads_{}/{}".format(tag_prefix, self.reduction.__name__, name), + x=global_step, + y=self.reduction(p.grad), + ) class NeptuneLogger(BaseLogger): @@ -389,20 +406,21 @@ def __init__(self, *args, **kwargs): try: import neptune except ImportError: - raise RuntimeError("This contrib module requires neptune-client to be installed. " - "You may install neptune with command: \n pip install neptune-client \n") + raise RuntimeError( + "This contrib module requires neptune-client to be installed. " + "You may install neptune with command: \n pip install neptune-client \n" + ) if kwargs.get("offline_mode", False): self.mode = "offline" - neptune.init(project_qualified_name="dry-run/project", - backend=neptune.OfflineBackend()) + neptune.init(project_qualified_name="dry-run/project", backend=neptune.OfflineBackend()) else: self.mode = "online" - neptune.init(api_token=kwargs["api_token"], - project_qualified_name=kwargs["project_name"]) + neptune.init(api_token=kwargs["api_token"], project_qualified_name=kwargs["project_name"]) - self._experiment_kwargs = {k: v for k, v in kwargs.items() - if k not in ["api_token", "project_name", "offline_mode"]} + self._experiment_kwargs = { + k: v for k, v in kwargs.items() if k not in ["api_token", "project_name", "offline_mode"] + } self.experiment = neptune.create_experiment(**self._experiment_kwargs) diff --git a/ignite/contrib/handlers/param_scheduler.py b/ignite/contrib/handlers/param_scheduler.py index a7432f8c26a3..a942c1c66409 100644 --- a/ignite/contrib/handlers/param_scheduler.py +++ b/ignite/contrib/handlers/param_scheduler.py @@ -1,5 +1,3 @@ - - from collections import OrderedDict from copy import copy @@ -43,7 +41,7 @@ def __init__(self, optimizer, param_name, save_history=False, param_group_index= self.param_name = param_name self.save_history = save_history self.event_index = 0 - self._state_attrs = ['event_index', 'param_name', 'save_history', 'param_group_index'] + self._state_attrs = ["event_index", "param_name", "save_history", "param_group_index"] def __call__(self, engine, name=None): @@ -56,8 +54,8 @@ def __call__(self, engine, name=None): name = self.param_name if self.save_history: - if not hasattr(engine.state, 'param_history'): - setattr(engine.state, 'param_history', {}) + if not hasattr(engine.state, "param_history"): + setattr(engine.state, "param_history", {}) engine.state.param_history.setdefault(name, []) values = [pg[self.param_name] for pg in self.optimizer_param_groups] engine.state.param_history[name].append(values) @@ -68,7 +66,9 @@ def __call__(self, engine, name=None): def optimizer_param_groups(self): if self.param_group_index is None: return self.optimizer.param_groups - return [self.optimizer.param_groups[self.param_group_index], ] + return [ + self.optimizer.param_groups[self.param_group_index], + ] def state_dict(self): """Returns a dictionary containing a whole state of ParamScheduler. @@ -81,7 +81,7 @@ def state_dict(self): for name in self._state_attrs: if hasattr(self, name): val = getattr(self, name) - if hasattr(val, 'state_dict'): + if hasattr(val, "state_dict"): val = val.state_dict() destination[name] = copy(val) return destination @@ -97,11 +97,14 @@ def load_state_dict(self, state_dict): for name in self._state_attrs: if name not in state_dict: - raise ValueError("Required state attribute '{}' is absent in provided state_dict '{}'" - .format(name, state_dict.keys())) + raise ValueError( + "Required state attribute '{}' is absent in provided state_dict '{}'".format( + name, state_dict.keys() + ) + ) val = state_dict[name] obj = getattr(self, name) - if isinstance(val, Mapping) and hasattr(obj, 'load_state_dict'): + if isinstance(val, Mapping) and hasattr(obj, "load_state_dict"): obj.load_state_dict(val) else: setattr(self, name, val) @@ -137,7 +140,7 @@ def simulate_values(cls, num_events, **scheduler_kwargs): plt.legend() """ - keys_to_remove = ['optimizer', 'save_history'] + keys_to_remove = ["optimizer", "save_history"] for key in keys_to_remove: if key in scheduler_kwargs: del scheduler_kwargs[key] @@ -178,14 +181,16 @@ def plot_values(cls, num_events, **scheduler_kwargs): try: import matplotlib.pylab as plt except ImportError: - raise RuntimeError("This method requires matplotlib to be installed. " - "Please install it with command: \n pip install matplotlib") + raise RuntimeError( + "This method requires matplotlib to be installed. " + "Please install it with command: \n pip install matplotlib" + ) values = cls.simulate_values(num_events=num_events, **scheduler_kwargs) label = scheduler_kwargs.get("param_name", "learning rate") ax = plt.plot([e for e, _ in values], [v for _, v in values], label=label) plt.legend() - plt.grid(which='both') + plt.grid(which="both") return ax @@ -214,22 +219,21 @@ class CyclicalScheduler(ParamScheduler): usually be the number of batches in an epoch. """ - def __init__(self, - optimizer, - param_name, - start_value, - end_value, - cycle_size, - cycle_mult=1.0, - start_value_mult=1.0, - end_value_mult=1.0, - save_history=False, - param_group_index=None): + def __init__( + self, + optimizer, + param_name, + start_value, + end_value, + cycle_size, + cycle_mult=1.0, + start_value_mult=1.0, + end_value_mult=1.0, + save_history=False, + param_group_index=None, + ): super(CyclicalScheduler, self).__init__( - optimizer, - param_name, - save_history=save_history, - param_group_index=param_group_index + optimizer, param_name, save_history=save_history, param_group_index=param_group_index ) self.start_value = start_value self.end_value = end_value @@ -240,11 +244,19 @@ def __init__(self, self.end_value_mult = end_value_mult if self.cycle_size < 2: - raise ValueError("Argument cycle_size should be positive and larger than 1, " - "but given {}".format(cycle_size)) - - self._state_attrs += ['start_value', 'end_value', 'cycle_size', 'cycle_mult', - 'cycle', 'start_value_mult', 'end_value_mult'] + raise ValueError( + "Argument cycle_size should be positive and larger than 1, " "but given {}".format(cycle_size) + ) + + self._state_attrs += [ + "start_value", + "end_value", + "cycle_size", + "cycle_mult", + "cycle", + "start_value_mult", + "end_value_mult", + ] def __call__(self, engine, name=None): if self.event_index != 0 and self.event_index % self.cycle_size == 0: @@ -403,22 +415,26 @@ class ConcatScheduler(ParamScheduler): def __init__(self, schedulers, durations, save_history=False): if not isinstance(schedulers, Sequence) or len(schedulers) < 2: - raise ValueError("Argument schedulers should be a sequence of more than one parameter schedulers, " - "but given {}".format(schedulers)) + raise ValueError( + "Argument schedulers should be a sequence of more than one parameter schedulers, " + "but given {}".format(schedulers) + ) - if not isinstance(durations, Sequence) or \ - not all([isinstance(t, numbers.Integral) for t in durations]): - raise ValueError("Argument durations should be list/tuple of integers, " - "but given {}".format(durations)) + if not isinstance(durations, Sequence) or not all([isinstance(t, numbers.Integral) for t in durations]): + raise ValueError("Argument durations should be list/tuple of integers, " "but given {}".format(durations)) if len(schedulers) != len(durations) + 1: - raise ValueError("Incorrect number schedulers or duration values, " - "given {} and {}".format(len(schedulers), len(durations))) + raise ValueError( + "Incorrect number schedulers or duration values, " + "given {} and {}".format(len(schedulers), len(durations)) + ) for i, scheduler in enumerate(schedulers): if not isinstance(scheduler, ParamScheduler): - raise TypeError("Value at index {} of schedulers should be a parameter scheduler, " - "but given {}".format(i, type(scheduler))) + raise TypeError( + "Value at index {} of schedulers should be a parameter scheduler, " + "but given {}".format(i, type(scheduler)) + ) self.schedulers = schedulers self.durations = durations @@ -428,7 +444,7 @@ def __init__(self, schedulers, durations, save_history=False): self._current_scheduler = None self._current_duration = None self._setup_scheduler() - self._state_attrs += ['_current_duration', 'durations', '_scheduler_index'] + self._state_attrs += ["_current_duration", "durations", "_scheduler_index"] def state_dict(self): """Returns a dictionary containing a whole state of ConcatScheduler. @@ -439,9 +455,9 @@ def state_dict(self): """ state_dict = super(ConcatScheduler, self).state_dict() - state_dict['schedulers'] = [] + state_dict["schedulers"] = [] for s in self.schedulers: - state_dict['schedulers'].append(s.state_dict()) + state_dict["schedulers"].append(s.state_dict()) return state_dict def load_state_dict(self, state_dict): @@ -453,13 +469,18 @@ def load_state_dict(self, state_dict): if not isinstance(state_dict, Mapping): raise TypeError("Argument state_dict should be a dictionary, but given {}".format(type(state_dict))) - if 'schedulers' not in state_dict: - raise ValueError("Required state attribute '{}' is absent in provided state_dict '{}'" - .format('schedulers', state_dict.keys())) - sds = state_dict['schedulers'] + if "schedulers" not in state_dict: + raise ValueError( + "Required state attribute '{}' is absent in provided state_dict '{}'".format( + "schedulers", state_dict.keys() + ) + ) + sds = state_dict["schedulers"] if len(sds) != len(self.schedulers): - raise ValueError("Input state_dict contains {} state_dicts of concatenated schedulers, " - "but {} needed".format(len(sds), len(self.schedulers))) + raise ValueError( + "Input state_dict contains {} state_dicts of concatenated schedulers, " + "but {} needed".format(len(sds), len(self.schedulers)) + ) for s, sd in zip(self.schedulers, sds): s.load_state_dict(sd) @@ -468,8 +489,9 @@ def load_state_dict(self, state_dict): def _setup_scheduler(self): self._current_scheduler = self.schedulers[self._scheduler_index] - self._current_duration = self.durations[self._scheduler_index] \ - if self._scheduler_index < len(self.durations) else -1 + self._current_duration = ( + self.durations[self._scheduler_index] if self._scheduler_index < len(self.durations) else -1 + ) self.param_name = self._current_scheduler.param_name self.optimizer = self._current_scheduler.optimizer @@ -526,7 +548,7 @@ def simulate_values(cls, num_events, schedulers, durations, param_names=None, ** for i in range(num_events): scheduler(engine=None) values = [scheduler.optimizer_param_groups[0][param_name] for param_name in param_names] - output.append([i, ] + values) + output.append([i,] + values) return output @@ -557,20 +579,24 @@ class LRScheduler(ParamScheduler): def __init__(self, lr_scheduler, save_history=False, **kwds): if not isinstance(lr_scheduler, _LRScheduler): - raise TypeError("Argument lr_scheduler should be a subclass of torch.optim.lr_scheduler._LRScheduler, " - "but given {}".format(type(lr_scheduler))) + raise TypeError( + "Argument lr_scheduler should be a subclass of torch.optim.lr_scheduler._LRScheduler, " + "but given {}".format(type(lr_scheduler)) + ) if len(lr_scheduler.optimizer.param_groups) > 1: - raise ValueError("Optimizer passed to lr_scheduler should have a single param group, " - "but currently there are {} param groups".format(len(lr_scheduler.optimizer.param_groups))) + raise ValueError( + "Optimizer passed to lr_scheduler should have a single param group, " + "but currently there are {} param groups".format(len(lr_scheduler.optimizer.param_groups)) + ) self.lr_scheduler = lr_scheduler super(LRScheduler, self).__init__( - optimizer=self.lr_scheduler.optimizer, - param_name='lr', - save_history=save_history + optimizer=self.lr_scheduler.optimizer, param_name="lr", save_history=save_history ) - self._state_attrs += ['lr_scheduler', ] + self._state_attrs += [ + "lr_scheduler", + ] def __call__(self, engine, name=None): self.lr_scheduler.last_epoch += 1 @@ -584,8 +610,10 @@ def get_param(self): lr_list = self.lr_scheduler.get_lr() self.lr_scheduler._get_lr_called_within_step = False if len(lr_list) > 1: - raise ValueError("Optimizer passed to lr_scheduler should have a single param group, " - "but currently there are {} param groups".format(len(lr_list))) + raise ValueError( + "Optimizer passed to lr_scheduler should have a single param group, " + "but currently there are {} param groups".format(len(lr_list)) + ) return lr_list[0] @classmethod @@ -620,18 +648,23 @@ def _replicate_lr_scheduler(lr_scheduler): optimizer_cls = lr_scheduler.optimizer.__class__ dummy_optimizer = _get_fake_optimizer(optimizer_cls, **lr_scheduler.optimizer.defaults) for group in dummy_optimizer.param_groups: - group.setdefault('initial_lr', group['lr']) + group.setdefault("initial_lr", group["lr"]) kwargs = lr_scheduler.state_dict() - for k in [_k for _k in kwargs.keys() if "_" == _k[0]] + ['base_lrs', 'last_epoch']: + for k in [_k for _k in kwargs.keys() if "_" == _k[0]] + ["base_lrs", "last_epoch"]: del kwargs[k] copy_lr_scheduler = lr_scheduler_cls(optimizer=dummy_optimizer, **kwargs) copy_lr_scheduler.load_state_dict(lr_scheduler.state_dict()) return copy_lr_scheduler -def create_lr_scheduler_with_warmup(lr_scheduler, warmup_start_value, warmup_end_value, warmup_duration, - save_history=False, - output_simulated_values=None): +def create_lr_scheduler_with_warmup( + lr_scheduler, + warmup_start_value, + warmup_end_value, + warmup_duration, + save_history=False, + output_simulated_values=None, +): """ Helper method to create a learning rate scheduler with a linear warm-up. @@ -675,17 +708,18 @@ def create_lr_scheduler_with_warmup(lr_scheduler, warmup_start_value, warmup_end """ if not isinstance(lr_scheduler, (ParamScheduler, _LRScheduler)): - raise TypeError("Argument lr_scheduler should be a subclass of torch.optim.lr_scheduler._LRScheduler or " - "ParamScheduler, but given {}".format(type(lr_scheduler))) + raise TypeError( + "Argument lr_scheduler should be a subclass of torch.optim.lr_scheduler._LRScheduler or " + "ParamScheduler, but given {}".format(type(lr_scheduler)) + ) if not (isinstance(warmup_duration, numbers.Integral) and warmup_duration > 1): - raise ValueError("Argument warmup_duration should be at least 2 events, but given {}" - .format(warmup_duration)) + raise ValueError("Argument warmup_duration should be at least 2 events, but given {}".format(warmup_duration)) milestones_values = [(0, warmup_start_value), (warmup_duration - 1, warmup_end_value)] if isinstance(lr_scheduler, _LRScheduler): - init_lrs = [g['lr'] for g in lr_scheduler.optimizer.param_groups] + init_lrs = [g["lr"] for g in lr_scheduler.optimizer.param_groups] if len(init_lrs) < 1: raise RuntimeError("Number of parameter groups of input `lr_scheduler.optimizer` is less than one.") @@ -702,18 +736,24 @@ def create_lr_scheduler_with_warmup(lr_scheduler, warmup_start_value, warmup_end else: milestones_values.pop(-1) - warmup_scheduler = PiecewiseLinear(lr_scheduler.optimizer, param_name="lr", - milestones_values=milestones_values, - param_group_index=lr_scheduler.param_group_index) + warmup_scheduler = PiecewiseLinear( + lr_scheduler.optimizer, + param_name="lr", + milestones_values=milestones_values, + param_group_index=lr_scheduler.param_group_index, + ) schedulers = [warmup_scheduler, lr_scheduler] - durations = [milestones_values[-1][0] + 1, ] - combined_scheduler = ConcatScheduler(schedulers, durations=durations, - save_history=save_history) + durations = [ + milestones_values[-1][0] + 1, + ] + combined_scheduler = ConcatScheduler(schedulers, durations=durations, save_history=save_history) if output_simulated_values is not None: if not isinstance(output_simulated_values, list): - raise TypeError("Argument output_simulated_values should be a list of None, e.g. `[None] * 100`, " - "but given {}.".format(type(output_simulated_values))) + raise TypeError( + "Argument output_simulated_values should be a list of None, e.g. `[None] * 100`, " + "but given {}.".format(type(output_simulated_values)) + ) num_events = len(output_simulated_values) result = ConcatScheduler.simulate_values(num_events=num_events, schedulers=schedulers, durations=durations) for i in range(num_events): @@ -755,8 +795,10 @@ def __init__(self, optimizer, param_name, milestones_values, save_history=False, super(PiecewiseLinear, self).__init__(optimizer, param_name, save_history, param_group_index=param_group_index) if not isinstance(milestones_values, Sequence) or len(milestones_values) < 1: - raise ValueError("Argument milestones_values should be a list or tuple with at least one value, " - "but given {}".format(type(milestones_values))) + raise ValueError( + "Argument milestones_values should be a list or tuple with at least one value, " + "but given {}".format(type(milestones_values)) + ) values = [] milestones = [] @@ -766,24 +808,35 @@ def __init__(self, optimizer, param_name, milestones_values, save_history=False, if not isinstance(pair[0], numbers.Integral): raise ValueError("Value of a milestone should be integer, but given {}".format(type(pair[0]))) if len(milestones) > 0 and pair[0] < milestones[-1]: - raise ValueError("Milestones should be increasing integers, but given {} is smaller " - "than the previous milestone {}".format(pair[0], milestones[-1])) + raise ValueError( + "Milestones should be increasing integers, but given {} is smaller " + "than the previous milestone {}".format(pair[0], milestones[-1]) + ) milestones.append(pair[0]) values.append(pair[1]) self.values = values self.milestones = milestones self._index = 0 - self._state_attrs += ['values', 'milestones', '_index'] + self._state_attrs += ["values", "milestones", "_index"] def _get_start_end(self): if self.milestones[0] > self.event_index: return self.event_index - 1, self.event_index, self.values[0], self.values[0] elif self.milestones[-1] <= self.event_index: - return self.event_index, self.event_index + 1, self.values[-1], self.values[-1], + return ( + self.event_index, + self.event_index + 1, + self.values[-1], + self.values[-1], + ) elif self.milestones[self._index] <= self.event_index < self.milestones[self._index + 1]: - return (self.milestones[self._index], self.milestones[self._index + 1], - self.values[self._index], self.values[self._index + 1]) + return ( + self.milestones[self._index], + self.milestones[self._index + 1], + self.values[self._index], + self.values[self._index + 1], + ) else: self._index += 1 return self._get_start_end() @@ -822,8 +875,9 @@ class ParamGroupScheduler: """ def __init__(self, schedulers, names): - if not (isinstance(schedulers, Sequence) and all(isinstance(scheduler, ParamScheduler) - for scheduler in schedulers)): + if not ( + isinstance(schedulers, Sequence) and all(isinstance(scheduler, ParamScheduler) for scheduler in schedulers) + ): raise ValueError("Argument schedulers should be a list/tuple of parameter schedulers") if not (isinstance(names, (list, tuple)) and all(isinstance(n, str) for n in names)): @@ -847,9 +901,9 @@ def state_dict(self): a dictionary containing a whole state of ParamGroupScheduler """ state_dict = OrderedDict() - state_dict['schedulers'] = [] + state_dict["schedulers"] = [] for n, s in zip(self.names, self.schedulers): - state_dict['schedulers'].append((n, s.state_dict())) + state_dict["schedulers"].append((n, s.state_dict())) return state_dict def load_state_dict(self, state_dict): @@ -861,28 +915,33 @@ def load_state_dict(self, state_dict): if not isinstance(state_dict, Mapping): raise TypeError("Argument state_dict should be a dictionary, but given {}".format(type(state_dict))) - if 'schedulers' not in state_dict: - raise ValueError("Required state attribute '{}' is absent in provided state_dict '{}'" - .format('schedulers', state_dict.keys())) - sds = state_dict['schedulers'] + if "schedulers" not in state_dict: + raise ValueError( + "Required state attribute '{}' is absent in provided state_dict '{}'".format( + "schedulers", state_dict.keys() + ) + ) + sds = state_dict["schedulers"] if len(sds) != len(self.schedulers): - raise ValueError("Input state_dict contains {} state_dicts of param group schedulers, " - "but {} needed".format(len(sds), len(self.schedulers))) + raise ValueError( + "Input state_dict contains {} state_dicts of param group schedulers, " + "but {} needed".format(len(sds), len(self.schedulers)) + ) for req_n, s, (n, sd) in zip(self.names, self.schedulers, sds): if req_n != n: - raise ValueError("Name of scheduler from input state dict does not correspond to required one," - " {} vs {}".format(n, req_n)) + raise ValueError( + "Name of scheduler from input state dict does not correspond to required one," + " {} vs {}".format(n, req_n) + ) s.load_state_dict(sd) def _replicate_scheduler(scheduler, save_history=False): if isinstance(scheduler, LRScheduler): - return LRScheduler(LRScheduler._replicate_lr_scheduler(scheduler.lr_scheduler), - save_history=save_history) + return LRScheduler(LRScheduler._replicate_lr_scheduler(scheduler.lr_scheduler), save_history=save_history) elif isinstance(scheduler, ConcatScheduler): - copy_schedulers = [_replicate_scheduler(s, save_history=save_history) - for s in scheduler.schedulers] + copy_schedulers = [_replicate_scheduler(s, save_history=save_history) for s in scheduler.schedulers] return ConcatScheduler(copy_schedulers, durations=scheduler.durations, save_history=save_history) elif isinstance(scheduler, ParamScheduler): new_scheduler = copy(scheduler) @@ -897,5 +956,5 @@ def _get_fake_optimizer(optimizer_cls=None, **kwargs): t = torch.zeros([1], requires_grad=True) if optimizer_cls is None: optimizer_cls = torch.optim.SGD - kwargs['lr'] = 0.01 + kwargs["lr"] = 0.01 return optimizer_cls([t], **kwargs) diff --git a/ignite/contrib/handlers/polyaxon_logger.py b/ignite/contrib/handlers/polyaxon_logger.py index cd14a3480695..e54534fcab72 100644 --- a/ignite/contrib/handlers/polyaxon_logger.py +++ b/ignite/contrib/handlers/polyaxon_logger.py @@ -3,10 +3,14 @@ import warnings import torch -from ignite.contrib.handlers.base_logger import BaseLogger, BaseOutputHandler, BaseOptimizerParamsHandler, \ - global_step_from_engine +from ignite.contrib.handlers.base_logger import ( + BaseLogger, + BaseOutputHandler, + BaseOptimizerParamsHandler, + global_step_from_engine, +) -__all__ = ['PolyaxonLogger', 'OutputHandler', 'OptimizerParamsHandler', 'global_step_from_engine'] +__all__ = ["PolyaxonLogger", "OutputHandler", "OptimizerParamsHandler", "global_step_from_engine"] class OutputHandler(BaseOutputHandler): @@ -98,8 +102,10 @@ def __call__(self, engine, logger, event_name): global_step = self.global_step_transform(engine, event_name) if not isinstance(global_step, int): - raise TypeError("global_step must be int, got {}." - " Please check the output of global_step_transform.".format(type(global_step))) + raise TypeError( + "global_step must be int, got {}." + " Please check the output of global_step_transform.".format(type(global_step)) + ) rendered_metrics = {"step": global_step} for key, value in metrics.items(): @@ -111,8 +117,7 @@ def __call__(self, engine, logger, event_name): for i, v in enumerate(value): rendered_metrics["{}/{}/{}".format(self.tag, key, i)] = v.item() else: - warnings.warn("PolyaxonLogger output_handler can not log " - "metrics value type {}".format(type(value))) + warnings.warn("PolyaxonLogger output_handler can not log " "metrics value type {}".format(type(value))) logger.log_metrics(**rendered_metrics) @@ -149,9 +154,11 @@ def __call__(self, engine, logger, event_name): global_step = engine.state.get_event_attrib_value(event_name) tag_prefix = "{}/".format(self.tag) if self.tag else "" - params = {"{}{}/group_{}".format(tag_prefix, self.param_name, i): float(param_group[self.param_name]) - for i, param_group in enumerate(self.optimizer.param_groups)} - params['step'] = global_step + params = { + "{}{}/group_{}".format(tag_prefix, self.param_name, i): float(param_group[self.param_name]) + for i, param_group in enumerate(self.optimizer.param_groups) + } + params["step"] = global_step logger.log_metrics(**params) @@ -211,8 +218,10 @@ def __init__(self): try: from polyaxon_client.tracking import Experiment except ImportError: - raise RuntimeError("This contrib module requires polyaxon-client to be installed. " - "Please install it with command: \n pip install polyaxon-client") + raise RuntimeError( + "This contrib module requires polyaxon-client to be installed. " + "Please install it with command: \n pip install polyaxon-client" + ) self.experiment = Experiment() diff --git a/ignite/contrib/handlers/tensorboard_logger.py b/ignite/contrib/handlers/tensorboard_logger.py index f584dbf3d149..48970e157592 100644 --- a/ignite/contrib/handlers/tensorboard_logger.py +++ b/ignite/contrib/handlers/tensorboard_logger.py @@ -3,12 +3,25 @@ import warnings import torch -from ignite.contrib.handlers.base_logger import BaseLogger, BaseOptimizerParamsHandler, BaseOutputHandler, \ - BaseWeightsScalarHandler, BaseWeightsHistHandler, global_step_from_engine - -__all__ = ['TensorboardLogger', 'OptimizerParamsHandler', 'OutputHandler', - 'WeightsScalarHandler', 'WeightsHistHandler', 'GradsScalarHandler', - 'GradsHistHandler', 'global_step_from_engine'] +from ignite.contrib.handlers.base_logger import ( + BaseLogger, + BaseOptimizerParamsHandler, + BaseOutputHandler, + BaseWeightsScalarHandler, + BaseWeightsHistHandler, + global_step_from_engine, +) + +__all__ = [ + "TensorboardLogger", + "OptimizerParamsHandler", + "OutputHandler", + "WeightsScalarHandler", + "WeightsHistHandler", + "GradsScalarHandler", + "GradsHistHandler", + "global_step_from_engine", +] class OutputHandler(BaseOutputHandler): @@ -100,19 +113,21 @@ def __call__(self, engine, logger, event_name): global_step = self.global_step_transform(engine, event_name) if not isinstance(global_step, int): - raise TypeError("global_step must be int, got {}." - " Please check the output of global_step_transform.".format(type(global_step))) + raise TypeError( + "global_step must be int, got {}." + " Please check the output of global_step_transform.".format(type(global_step)) + ) for key, value in metrics.items(): - if isinstance(value, numbers.Number) or \ - isinstance(value, torch.Tensor) and value.ndimension() == 0: + if isinstance(value, numbers.Number) or isinstance(value, torch.Tensor) and value.ndimension() == 0: logger.writer.add_scalar("{}/{}".format(self.tag, key), value, global_step) elif isinstance(value, torch.Tensor) and value.ndimension() == 1: for i, v in enumerate(value): logger.writer.add_scalar("{}/{}/{}".format(self.tag, key, i), v.item(), global_step) else: - warnings.warn("TensorboardLogger output_handler can not log " - "metrics value type {}".format(type(value))) + warnings.warn( + "TensorboardLogger output_handler can not log " "metrics value type {}".format(type(value)) + ) class OptimizerParamsHandler(BaseOptimizerParamsHandler): @@ -147,8 +162,10 @@ def __call__(self, engine, logger, event_name): global_step = engine.state.get_event_attrib_value(event_name) tag_prefix = "{}/".format(self.tag) if self.tag else "" - params = {"{}{}/group_{}".format(tag_prefix, self.param_name, i): float(param_group[self.param_name]) - for i, param_group in enumerate(self.optimizer.param_groups)} + params = { + "{}{}/group_{}".format(tag_prefix, self.param_name, i): float(param_group[self.param_name]) + for i, param_group in enumerate(self.optimizer.param_groups) + } for k, v in params.items(): logger.writer.add_scalar(k, v, global_step) @@ -194,10 +211,10 @@ def __call__(self, engine, logger, event_name): if p.grad is None: continue - name = name.replace('.', '/') - logger.writer.add_scalar("{}weights_{}/{}".format(tag_prefix, self.reduction.__name__, name), - self.reduction(p.data), - global_step) + name = name.replace(".", "/") + logger.writer.add_scalar( + "{}weights_{}/{}".format(tag_prefix, self.reduction.__name__, name), self.reduction(p.data), global_step + ) class WeightsHistHandler(BaseWeightsHistHandler): @@ -236,10 +253,12 @@ def __call__(self, engine, logger, event_name): if p.grad is None: continue - name = name.replace('.', '/') - logger.writer.add_histogram(tag="{}weights/{}".format(tag_prefix, name), - values=p.data.detach().cpu().numpy(), - global_step=global_step) + name = name.replace(".", "/") + logger.writer.add_histogram( + tag="{}weights/{}".format(tag_prefix, name), + values=p.data.detach().cpu().numpy(), + global_step=global_step, + ) class GradsScalarHandler(BaseWeightsScalarHandler): @@ -281,10 +300,10 @@ def __call__(self, engine, logger, event_name): if p.grad is None: continue - name = name.replace('.', '/') - logger.writer.add_scalar("{}grads_{}/{}".format(tag_prefix, self.reduction.__name__, name), - self.reduction(p.grad), - global_step) + name = name.replace(".", "/") + logger.writer.add_scalar( + "{}grads_{}/{}".format(tag_prefix, self.reduction.__name__, name), self.reduction(p.grad), global_step + ) class GradsHistHandler(BaseWeightsHistHandler): @@ -323,10 +342,10 @@ def __call__(self, engine, logger, event_name): if p.grad is None: continue - name = name.replace('.', '/') - logger.writer.add_histogram(tag="{}grads/{}".format(tag_prefix, name), - values=p.grad.detach().cpu().numpy(), - global_step=global_step) + name = name.replace(".", "/") + logger.writer.add_histogram( + tag="{}grads/{}".format(tag_prefix, name), values=p.grad.detach().cpu().numpy(), global_step=global_step + ) class TensorboardLogger(BaseLogger): @@ -430,9 +449,11 @@ def __init__(self, *args, **kwargs): try: from torch.utils.tensorboard import SummaryWriter except ImportError: - raise RuntimeError("This contrib module requires either tensorboardX or torch >= 1.2.0. " - "You may install tensorboardX with command: \n pip install tensorboardX \n" - "or upgrade PyTorch using your package manager of choice (pip or conda).") + raise RuntimeError( + "This contrib module requires either tensorboardX or torch >= 1.2.0. " + "You may install tensorboardX with command: \n pip install tensorboardX \n" + "or upgrade PyTorch using your package manager of choice (pip or conda)." + ) self.writer = SummaryWriter(*args, **kwargs) diff --git a/ignite/contrib/handlers/time_profilers.py b/ignite/contrib/handlers/time_profilers.py index 98424d43bfe0..c74f24454cd0 100644 --- a/ignite/contrib/handlers/time_profilers.py +++ b/ignite/contrib/handlers/time_profilers.py @@ -24,6 +24,7 @@ class BasicTimeProfiler(object): trainer.run(dataloader, max_epochs=3) """ + def __init__(self): self._dataflow_timer = Timer() self._processing_timer = Timer() @@ -40,7 +41,7 @@ def __init__(self): Events.ITERATION_COMPLETED, Events.GET_BATCH_STARTED, Events.GET_BATCH_COMPLETED, - Events.COMPLETED + Events.COMPLETED, ] self._fmethods = [ self._as_first_epoch_started, @@ -49,7 +50,7 @@ def __init__(self): self._as_first_iter_completed, self._as_first_get_batch_started, self._as_first_get_batch_completed, - self._as_first_completed + self._as_first_completed, ] self._lmethods = [ self._as_last_epoch_started, @@ -58,7 +59,7 @@ def __init__(self): self._as_last_iter_completed, self._as_last_get_batch_started, self._as_last_get_batch_completed, - self._as_last_completed + self._as_last_completed, ] def _reset(self, num_epochs, total_num_iters): @@ -72,7 +73,7 @@ def _reset(self, num_epochs, total_num_iters): Events.ITERATION_STARTED: torch.zeros(total_num_iters), Events.ITERATION_COMPLETED: torch.zeros(total_num_iters), Events.GET_BATCH_COMPLETED: torch.zeros(total_num_iters), - Events.GET_BATCH_STARTED: torch.zeros(total_num_iters) + Events.GET_BATCH_STARTED: torch.zeros(total_num_iters), } def _as_first_started(self, engine): @@ -86,9 +87,12 @@ def _as_first_started(self, engine): self._reset(self.max_epochs, self.total_num_iters) self.event_handlers_names = { - e: [h.__qualname__ if hasattr(h, "__qualname__") else h.__class__.__name__ - for (h, _, _) in engine._event_handlers[e]] - for e in Events if e != Events.EXCEPTION_RAISED + e: [ + h.__qualname__ if hasattr(h, "__qualname__") else h.__class__.__name__ + for (h, _, _) in engine._event_handlers[e] + ] + for e in Events + if e != Events.EXCEPTION_RAISED } # Setup all other handlers: @@ -183,22 +187,22 @@ def _as_last_completed(self, engine): def attach(self, engine): if not isinstance(engine, Engine): - raise TypeError("Argument engine should be ignite.engine.Engine, " - "but given {}".format(type(engine))) + raise TypeError("Argument engine should be ignite.engine.Engine, " "but given {}".format(type(engine))) if not engine.has_event_handler(self._as_first_started): - engine._event_handlers[Events.STARTED]\ - .insert(0, (self._as_first_started, (), {})) + engine._event_handlers[Events.STARTED].insert(0, (self._as_first_started, (), {})) @staticmethod def _compute_basic_stats(data): - return OrderedDict([ - ('min/index', (torch.min(data).item(), torch.argmin(data).item())), - ('max/index', (torch.max(data).item(), torch.argmax(data).item())), - ('mean', torch.mean(data).item()), - ('std', torch.std(data).item()), - ('total', torch.sum(data).item()) - ]) + return OrderedDict( + [ + ("min/index", (torch.min(data).item(), torch.argmin(data).item())), + ("max/index", (torch.max(data).item(), torch.argmax(data).item())), + ("mean", torch.mean(data).item()), + ("std", torch.std(data).item()), + ("total", torch.sum(data).item()), + ] + ) def get_results(self): """ @@ -209,20 +213,29 @@ def get_results(self): results = profiler.get_results() """ - events_to_ignore = [ - Events.EXCEPTION_RAISED - ] + events_to_ignore = [Events.EXCEPTION_RAISED] total_eh_time = sum([sum(self.event_handlers_times[e]) for e in Events if e not in events_to_ignore]) - return OrderedDict([ - ("processing_stats", self._compute_basic_stats(self.processing_times)), - ("dataflow_stats", self._compute_basic_stats(self.dataflow_times)), - ("event_handlers_stats", - dict([(str(e).replace(".", "_"), self._compute_basic_stats(self.event_handlers_times[e])) - for e in Events if e not in events_to_ignore] + [("total_time", total_eh_time)]) - ), - ("event_handlers_names", {str(e).replace(".", "_") + "_names": v - for e, v in self.event_handlers_names.items()}) - ]) + return OrderedDict( + [ + ("processing_stats", self._compute_basic_stats(self.processing_times)), + ("dataflow_stats", self._compute_basic_stats(self.dataflow_times)), + ( + "event_handlers_stats", + dict( + [ + (str(e).replace(".", "_"), self._compute_basic_stats(self.event_handlers_times[e])) + for e in Events + if e not in events_to_ignore + ] + + [("total_time", total_eh_time)] + ), + ), + ( + "event_handlers_names", + {str(e).replace(".", "_") + "_names": v for e, v in self.event_handlers_names.items()}, + ), + ] + ) def write_results(self, output_path): """ @@ -250,43 +263,55 @@ def write_results(self, output_path): iters_per_epoch = self.total_num_iters // self.max_epochs - epochs = torch.arange(self.max_epochs, dtype=torch.float32)\ - .repeat_interleave(iters_per_epoch) + 1 - iterations = torch.arange(self.total_num_iters, - dtype=torch.float32) + 1 + epochs = torch.arange(self.max_epochs, dtype=torch.float32).repeat_interleave(iters_per_epoch) + 1 + iterations = torch.arange(self.total_num_iters, dtype=torch.float32) + 1 processing_stats = self.processing_times dataflow_stats = self.dataflow_times - event_started = self.event_handlers_times[Events.STARTED]\ - .repeat_interleave(self.total_num_iters) - event_completed = self.event_handlers_times[Events.COMPLETED]\ - .repeat_interleave(self.total_num_iters) - event_epoch_started = self.event_handlers_times[Events.EPOCH_STARTED]\ - .repeat_interleave(iters_per_epoch) - event_epoch_completed = self.event_handlers_times[Events.EPOCH_COMPLETED]\ - .repeat_interleave(iters_per_epoch) + event_started = self.event_handlers_times[Events.STARTED].repeat_interleave(self.total_num_iters) + event_completed = self.event_handlers_times[Events.COMPLETED].repeat_interleave(self.total_num_iters) + event_epoch_started = self.event_handlers_times[Events.EPOCH_STARTED].repeat_interleave(iters_per_epoch) + event_epoch_completed = self.event_handlers_times[Events.EPOCH_COMPLETED].repeat_interleave(iters_per_epoch) event_iter_started = self.event_handlers_times[Events.ITERATION_STARTED] event_iter_completed = self.event_handlers_times[Events.ITERATION_COMPLETED] event_batch_started = self.event_handlers_times[Events.GET_BATCH_STARTED] event_batch_completed = self.event_handlers_times[Events.GET_BATCH_COMPLETED] - results_dump = torch.stack([ - epochs, iterations, processing_stats, dataflow_stats, - event_started, event_completed, event_epoch_started, - event_epoch_completed, event_iter_started, event_iter_completed, - event_batch_started, event_batch_completed - ], dim=1).numpy() + results_dump = torch.stack( + [ + epochs, + iterations, + processing_stats, + dataflow_stats, + event_started, + event_completed, + event_epoch_started, + event_epoch_completed, + event_iter_started, + event_iter_completed, + event_batch_started, + event_batch_completed, + ], + dim=1, + ).numpy() results_df = pd.DataFrame( data=results_dump, columns=[ - 'epoch', 'iteration', 'processing_stats', 'dataflow_stats', - 'Event_STARTED', 'Event_COMPLETED', - 'Event_EPOCH_STARTED', 'Event_EPOCH_COMPLETED', - 'Event_ITERATION_STARTED', 'Event_ITERATION_COMPLETED', - 'Event_GET_BATCH_STARTED', 'Event_GET_BATCH_COMPLETED' - ] + "epoch", + "iteration", + "processing_stats", + "dataflow_stats", + "Event_STARTED", + "Event_COMPLETED", + "Event_EPOCH_STARTED", + "Event_EPOCH_COMPLETED", + "Event_ITERATION_STARTED", + "Event_ITERATION_COMPLETED", + "Event_GET_BATCH_STARTED", + "Event_GET_BATCH_COMPLETED", + ], ) results_df.to_csv(output_path, index=False) @@ -335,16 +360,18 @@ def print_results(results): ['BasicTimeProfiler._as_first_started', 'delay_start'] -------------------------------------------- """ + def odict_to_str(d): out = "" for k, v in d.items(): out += "\t{}: {}\n".format(k, v) return out - others = {k: odict_to_str(v) if isinstance(v, OrderedDict) else v - for k, v in results['event_handlers_stats'].items()} + others = { + k: odict_to_str(v) if isinstance(v, OrderedDict) else v for k, v in results["event_handlers_stats"].items() + } - others.update(results['event_handlers_names']) + others.update(results["event_handlers_names"]) output_message = """ -------------------------------------------- @@ -391,8 +418,10 @@ def odict_to_str(d): Handlers names: {Events_COMPLETED_names} -""".format(processing_stats=odict_to_str(results['processing_stats']), - dataflow_stats=odict_to_str(results['dataflow_stats']), - **others) +""".format( + processing_stats=odict_to_str(results["processing_stats"]), + dataflow_stats=odict_to_str(results["dataflow_stats"]), + **others, + ) print(output_message) return output_message diff --git a/ignite/contrib/handlers/tqdm_logger.py b/ignite/contrib/handlers/tqdm_logger.py index bf7f80972003..5e7e05c86d3f 100644 --- a/ignite/contrib/handlers/tqdm_logger.py +++ b/ignite/contrib/handlers/tqdm_logger.py @@ -95,18 +95,23 @@ class ProgressBar(BaseLogger): Events.ITERATION_STARTED, Events.ITERATION_COMPLETED, Events.EPOCH_COMPLETED, - Events.COMPLETED + Events.COMPLETED, ] - def __init__(self, persist=False, - bar_format='{desc}[{n_fmt}/{total_fmt}] {percentage:3.0f}%|{bar}{postfix} [{elapsed}<{remaining}]', - **tqdm_kwargs): + def __init__( + self, + persist=False, + bar_format="{desc}[{n_fmt}/{total_fmt}] {percentage:3.0f}%|{bar}{postfix} [{elapsed}<{remaining}]", + **tqdm_kwargs + ): try: from tqdm.autonotebook import tqdm except ImportError: - raise RuntimeError("This contrib module requires tqdm to be installed. " - "Please install it with command: \n pip install tqdm") + raise RuntimeError( + "This contrib module requires tqdm to be installed. " + "Please install it with command: \n pip install tqdm" + ) self.pbar_cls = tqdm self.pbar = None @@ -116,11 +121,7 @@ def __init__(self, persist=False, def _reset(self, pbar_total): self.pbar = self.pbar_cls( - total=pbar_total, - leave=self.persist, - bar_format=self.bar_format, - initial=1, - **self.tqdm_kwargs + total=pbar_total, leave=self.persist, bar_format=self.bar_format, initial=1, **self.tqdm_kwargs ) def _close(self, engine): @@ -149,9 +150,14 @@ def log_message(self, message): tqdm.write(message, file=self.tqdm_kwargs.get("file", None)) - def attach(self, engine, metric_names=None, output_transform=None, - event_name=Events.ITERATION_COMPLETED, - closing_event_name=Events.EPOCH_COMPLETED): + def attach( + self, + engine, + metric_names=None, + output_transform=None, + event_name=Events.ITERATION_COMPLETED, + closing_event_name=Events.EPOCH_COMPLETED, + ): """ Attaches the progress bar to an engine object. @@ -179,11 +185,11 @@ def attach(self, engine, metric_names=None, output_transform=None, raise ValueError("Closing event should not use any event filter") if not self._compare_lt(event_name, closing_event_name): - raise ValueError("Logging event {} should be called before closing event {}" - .format(event_name, closing_event_name)) + raise ValueError( + "Logging event {} should be called before closing event {}".format(event_name, closing_event_name) + ) - log_handler = _OutputHandler(desc, metric_names, output_transform, - closing_event_name=closing_event_name) + log_handler = _OutputHandler(desc, metric_names, output_transform, closing_event_name=closing_event_name) # if event_name is EventWithFilter, filter is passed here super(ProgressBar, self).attach(engine, log_handler, event_name) engine.add_event_handler(closing_event_name, self._close) @@ -206,13 +212,15 @@ class _OutputHandler(BaseOutputHandler): """ - def __init__(self, description, metric_names=None, output_transform=None, - closing_event_name=Events.EPOCH_COMPLETED): + def __init__( + self, description, metric_names=None, output_transform=None, closing_event_name=Events.EPOCH_COMPLETED + ): if metric_names is None and output_transform is None: # This helps to avoid 'Either metric_names or output_transform should be defined' of BaseOutputHandler metric_names = [] - super(_OutputHandler, self).__init__(description, metric_names, output_transform, - another_engine=None, global_step_transform=None) + super(_OutputHandler, self).__init__( + description, metric_names, output_transform, another_engine=None, global_step_transform=None + ) self.closing_event_name = closing_event_name @staticmethod @@ -248,8 +256,7 @@ def __call__(self, engine, logger, event_name): k = "{}_{}".format(key, i) rendered_metrics[k] = v.item() else: - warnings.warn("ProgressBar can not log " - "tensor with {} dimensions".format(value.ndimension())) + warnings.warn("ProgressBar can not log " "tensor with {} dimensions".format(value.ndimension())) else: rendered_metrics[key] = value diff --git a/ignite/contrib/handlers/visdom_logger.py b/ignite/contrib/handlers/visdom_logger.py index dc50e508c897..27144c3e21c0 100644 --- a/ignite/contrib/handlers/visdom_logger.py +++ b/ignite/contrib/handlers/visdom_logger.py @@ -4,15 +4,25 @@ import warnings import torch -from ignite.contrib.handlers.base_logger import BaseLogger, BaseOptimizerParamsHandler, BaseOutputHandler, \ - BaseWeightsScalarHandler, global_step_from_engine - -__all__ = ['VisdomLogger', 'OptimizerParamsHandler', 'OutputHandler', - 'WeightsScalarHandler', 'GradsScalarHandler', 'global_step_from_engine'] +from ignite.contrib.handlers.base_logger import ( + BaseLogger, + BaseOptimizerParamsHandler, + BaseOutputHandler, + BaseWeightsScalarHandler, + global_step_from_engine, +) + +__all__ = [ + "VisdomLogger", + "OptimizerParamsHandler", + "OutputHandler", + "WeightsScalarHandler", + "GradsScalarHandler", + "global_step_from_engine", +] class _BaseVisDrawer: - def __init__(self, show_legend=False): self.windows = {} self.show_legend = show_legend @@ -33,30 +43,25 @@ def add_scalar(self, logger, k, v, event_name, global_step): """ if k not in self.windows: self.windows[k] = { - 'win': None, - 'opts': { - 'title': k, - 'xlabel': str(event_name), - 'ylabel': k, - 'showlegend': self.show_legend - } + "win": None, + "opts": {"title": k, "xlabel": str(event_name), "ylabel": k, "showlegend": self.show_legend}, } - update = None if self.windows[k]['win'] is None else 'append' + update = None if self.windows[k]["win"] is None else "append" kwargs = { - "X": [global_step, ], - "Y": [v, ], + "X": [global_step,], + "Y": [v,], "env": logger.vis.env, - "win": self.windows[k]['win'], + "win": self.windows[k]["win"], "update": update, - "opts": self.windows[k]['opts'], - "name": k + "opts": self.windows[k]["opts"], + "name": k, } future = logger.executor.submit(logger.vis.line, **kwargs) - if self.windows[k]['win'] is None: - self.windows[k]['win'] = future.result() + if self.windows[k]["win"] is None: + self.windows[k]["win"] = future.result() class OutputHandler(BaseOutputHandler, _BaseVisDrawer): @@ -137,8 +142,15 @@ def global_step_transform(engine, event_name): """ - def __init__(self, tag, metric_names=None, output_transform=None, another_engine=None, global_step_transform=None, - show_legend=False): + def __init__( + self, + tag, + metric_names=None, + output_transform=None, + another_engine=None, + global_step_transform=None, + show_legend=False, + ): super(OutputHandler, self).__init__(tag, metric_names, output_transform, another_engine, global_step_transform) _BaseVisDrawer.__init__(self, show_legend=show_legend) @@ -152,23 +164,23 @@ def __call__(self, engine, logger, event_name): global_step = self.global_step_transform(engine, event_name) if not isinstance(global_step, int): - raise TypeError("global_step must be int, got {}." - " Please check the output of global_step_transform.".format(type(global_step))) + raise TypeError( + "global_step must be int, got {}." + " Please check the output of global_step_transform.".format(type(global_step)) + ) for key, value in metrics.items(): values = [] keys = [] - if isinstance(value, numbers.Number) or \ - isinstance(value, torch.Tensor) and value.ndimension() == 0: + if isinstance(value, numbers.Number) or isinstance(value, torch.Tensor) and value.ndimension() == 0: values.append(value) keys.append(key) elif isinstance(value, torch.Tensor) and value.ndimension() == 1: values = value keys = ["{}/{}".format(key, i) for i in range(len(value))] else: - warnings.warn("VisdomLogger output_handler can not log " - "metrics value type {}".format(type(value))) + warnings.warn("VisdomLogger output_handler can not log " "metrics value type {}".format(type(value))) for k, v in zip(keys, values): k = "{}/{}".format(self.tag, k) @@ -211,8 +223,10 @@ def __call__(self, engine, logger, event_name): global_step = engine.state.get_event_attrib_value(event_name) tag_prefix = "{}/".format(self.tag) if self.tag else "" - params = {"{}{}/group_{}".format(tag_prefix, self.param_name, i): float(param_group[self.param_name]) - for i, param_group in enumerate(self.optimizer.param_groups)} + params = { + "{}{}/group_{}".format(tag_prefix, self.param_name, i): float(param_group[self.param_name]) + for i, param_group in enumerate(self.optimizer.param_groups) + } for k, v in params.items(): self.add_scalar(logger, k, v, event_name, global_step) @@ -258,7 +272,7 @@ def __call__(self, engine, logger, event_name): global_step = engine.state.get_event_attrib_value(event_name) tag_prefix = "{}/".format(self.tag) if self.tag else "" for name, p in self.model.named_parameters(): - name = name.replace('.', '/') + name = name.replace(".", "/") k = "{}weights_{}/{}".format(tag_prefix, self.reduction.__name__, name) v = float(self.reduction(p.data)) self.add_scalar(logger, k, v, event_name, global_step) @@ -304,7 +318,7 @@ def __call__(self, engine, logger, event_name): global_step = engine.state.get_event_attrib_value(event_name) tag_prefix = "{}/".format(self.tag) if self.tag else "" for name, p in self.model.named_parameters(): - name = name.replace('.', '/') + name = name.replace(".", "/") k = "{}grads_{}/{}".format(tag_prefix, self.reduction.__name__, name) v = float(self.reduction(p.grad)) self.add_scalar(logger, k, v, event_name, global_step) @@ -415,9 +429,11 @@ def __init__(self, server=None, port=None, num_workers=1, **kwargs): try: import visdom except ImportError: - raise RuntimeError("This contrib module requires visdom package. " - "Please install it with command:\n" - "pip install git+https://github.com/facebookresearch/visdom.git") + raise RuntimeError( + "This contrib module requires visdom package. " + "Please install it with command:\n" + "pip install git+https://github.com/facebookresearch/visdom.git" + ) if num_workers > 0: # If visdom is installed, one of its dependencies `tornado` @@ -426,12 +442,14 @@ def __init__(self, server=None, port=None, num_workers=1, **kwargs): try: import concurrent.futures except ImportError: - raise RuntimeError("This contrib module requires concurrent.futures module" - "Please install it with command:\n" - "pip install futures") + raise RuntimeError( + "This contrib module requires concurrent.futures module" + "Please install it with command:\n" + "pip install futures" + ) if server is None: - server = os.environ.get("VISDOM_SERVER_URL", 'localhost') + server = os.environ.get("VISDOM_SERVER_URL", "localhost") if port is None: port = int(os.environ.get("VISDOM_PORT", 8097)) @@ -444,19 +462,17 @@ def __init__(self, server=None, port=None, num_workers=1, **kwargs): password = os.environ.get("VISDOM_PASSWORD", None) kwargs["password"] = password - self.vis = visdom.Visdom( - server=server, - port=port, - **kwargs - ) + self.vis = visdom.Visdom(server=server, port=port, **kwargs) if not self.vis.check_connection(): - raise RuntimeError("Failed to connect to Visdom server at {}. " - "Did you run python -m visdom.server ?".format(server)) + raise RuntimeError( + "Failed to connect to Visdom server at {}. " "Did you run python -m visdom.server ?".format(server) + ) self.executor = _DummyExecutor() if num_workers > 0: from concurrent.futures import ThreadPoolExecutor + self.executor = ThreadPoolExecutor(max_workers=num_workers) def _save(self): @@ -469,7 +485,6 @@ def close(self): class _DummyExecutor: class _DummyFuture: - def __init__(self, result): self._output = result diff --git a/ignite/contrib/metrics/gpu_info.py b/ignite/contrib/metrics/gpu_info.py index 86921aa2dc00..5e6cad9eb074 100644 --- a/ignite/contrib/metrics/gpu_info.py +++ b/ignite/contrib/metrics/gpu_info.py @@ -33,13 +33,16 @@ def __init__(self): try: import pynvml except ImportError: - raise RuntimeError("This contrib module requires pynvml to be installed. " - "Please install it with command: \n pip install pynvml") + raise RuntimeError( + "This contrib module requires pynvml to be installed. " + "Please install it with command: \n pip install pynvml" + ) # Let's check available devices if not torch.cuda.is_available(): raise RuntimeError("This contrib module requires available GPU") from pynvml.smi import nvidia_smi + # Let it fail if no libnvidia drivers or NMVL library found self.nvsmi = nvidia_smi.getInstance() super(GpuInfo, self).__init__() @@ -51,11 +54,11 @@ def update(self, output): pass def compute(self): - data = self.nvsmi.DeviceQuery('memory.used, memory.total, utilization.gpu') - if len(data) == 0 or ('gpu' not in data): + data = self.nvsmi.DeviceQuery("memory.used, memory.total, utilization.gpu") + if len(data) == 0 or ("gpu" not in data): warnings.warn("No GPU information available") return [] - return data['gpu'] + return data["gpu"] def completed(self, engine, name): data = self.compute() @@ -66,29 +69,32 @@ def completed(self, engine, name): for i, data_by_rank in enumerate(data): mem_name = "{}:{} mem(%)".format(name, i) - if 'fb_memory_usage' not in data_by_rank: + if "fb_memory_usage" not in data_by_rank: warnings.warn("No GPU memory usage information available in {}".format(data_by_rank)) continue - mem_report = data_by_rank['fb_memory_usage'] - if not ('used' in mem_report and 'total' in mem_report): - warnings.warn("GPU memory usage information does not provide used/total " - "memory consumption information in {}".format(mem_report)) + mem_report = data_by_rank["fb_memory_usage"] + if not ("used" in mem_report and "total" in mem_report): + warnings.warn( + "GPU memory usage information does not provide used/total " + "memory consumption information in {}".format(mem_report) + ) continue - engine.state.metrics[mem_name] = int(mem_report['used'] * 100.0 / mem_report['total']) + engine.state.metrics[mem_name] = int(mem_report["used"] * 100.0 / mem_report["total"]) for i, data_by_rank in enumerate(data): util_name = "{}:{} util(%)".format(name, i) - if 'utilization' not in data_by_rank: + if "utilization" not in data_by_rank: warnings.warn("No GPU utilization information available in {}".format(data_by_rank)) continue - util_report = data_by_rank['utilization'] - if not ('gpu_util' in util_report): - warnings.warn("GPU utilization information does not provide 'gpu_util' information in " - "{}".format(util_report)) + util_report = data_by_rank["utilization"] + if not ("gpu_util" in util_report): + warnings.warn( + "GPU utilization information does not provide 'gpu_util' information in " "{}".format(util_report) + ) continue - engine.state.metrics[util_name] = int(util_report['gpu_util']) + engine.state.metrics[util_name] = int(util_report["gpu_util"]) def attach(self, engine, name="gpu", event_name=Events.ITERATION_COMPLETED): engine.add_event_handler(event_name, self.completed, name) diff --git a/ignite/contrib/metrics/regression/_base.py b/ignite/contrib/metrics/regression/_base.py index aa178e3177a7..2c115fd0a70f 100644 --- a/ignite/contrib/metrics/regression/_base.py +++ b/ignite/contrib/metrics/regression/_base.py @@ -15,8 +15,7 @@ class _BaseRegression(Metric): def update(self, output): y_pred, y = output if y_pred.shape != y.shape: - raise ValueError("Input data shapes should be the same, but given {} and {}" - .format(y_pred.shape, y.shape)) + raise ValueError("Input data shapes should be the same, but given {} and {}".format(y_pred.shape, y.shape)) c1 = y_pred.ndimension() == 2 and y_pred.shape[1] == 1 if not (y_pred.ndimension() == 1 or c1): @@ -64,5 +63,4 @@ def _update(self, output): try: self.compute_fn(self._predictions, self._targets) except Exception as e: - warnings.warn("Probably, there can be a problem with `compute_fn`:\n {}".format(e), - RuntimeWarning) + warnings.warn("Probably, there can be a problem with `compute_fn`:\n {}".format(e), RuntimeWarning) diff --git a/ignite/contrib/metrics/regression/canberra_metric.py b/ignite/contrib/metrics/regression/canberra_metric.py index 6381b8c1d8f7..55ab0460f0d0 100644 --- a/ignite/contrib/metrics/regression/canberra_metric.py +++ b/ignite/contrib/metrics/regression/canberra_metric.py @@ -1,5 +1,3 @@ - - import torch from ignite.contrib.metrics.regression._base import _BaseRegression diff --git a/ignite/contrib/metrics/regression/fractional_absolute_error.py b/ignite/contrib/metrics/regression/fractional_absolute_error.py index 03e55ba19e6f..1a9a678c1319 100644 --- a/ignite/contrib/metrics/regression/fractional_absolute_error.py +++ b/ignite/contrib/metrics/regression/fractional_absolute_error.py @@ -1,5 +1,3 @@ - - import torch from ignite.exceptions import NotComputableError @@ -34,6 +32,7 @@ def _update(self, output): def compute(self): if self._num_examples == 0: - raise NotComputableError('FractionalAbsoluteError must have at least ' - 'one example before it can be computed.') + raise NotComputableError( + "FractionalAbsoluteError must have at least " "one example before it can be computed." + ) return self._sum_of_errors / self._num_examples diff --git a/ignite/contrib/metrics/regression/fractional_bias.py b/ignite/contrib/metrics/regression/fractional_bias.py index 0c61a13efd5c..9968ff21e452 100644 --- a/ignite/contrib/metrics/regression/fractional_bias.py +++ b/ignite/contrib/metrics/regression/fractional_bias.py @@ -1,4 +1,3 @@ - import torch from ignite.exceptions import NotComputableError @@ -34,5 +33,5 @@ def _update(self, output): def compute(self): if self._num_examples == 0: - raise NotComputableError('FractionalBias must have at least one example before it can be computed.') + raise NotComputableError("FractionalBias must have at least one example before it can be computed.") return self._sum_of_errors / self._num_examples diff --git a/ignite/contrib/metrics/regression/geometric_mean_absolute_error.py b/ignite/contrib/metrics/regression/geometric_mean_absolute_error.py index c1c1bf02d0b3..b2248640eb04 100644 --- a/ignite/contrib/metrics/regression/geometric_mean_absolute_error.py +++ b/ignite/contrib/metrics/regression/geometric_mean_absolute_error.py @@ -1,5 +1,3 @@ - - import torch from ignite.exceptions import NotComputableError @@ -34,6 +32,7 @@ def _update(self, output): def compute(self): if self._num_examples == 0: - raise NotComputableError('GeometricMeanAbsoluteError must have at ' - 'least one example before it can be computed.') + raise NotComputableError( + "GeometricMeanAbsoluteError must have at " "least one example before it can be computed." + ) return torch.exp(self._sum_of_errors / self._num_examples).item() diff --git a/ignite/contrib/metrics/regression/geometric_mean_relative_absolute_error.py b/ignite/contrib/metrics/regression/geometric_mean_relative_absolute_error.py index 7cbceb849e42..075b8cae7e50 100644 --- a/ignite/contrib/metrics/regression/geometric_mean_relative_absolute_error.py +++ b/ignite/contrib/metrics/regression/geometric_mean_relative_absolute_error.py @@ -1,5 +1,3 @@ - - import torch from ignite.contrib.metrics.regression._base import _BaseRegression @@ -39,6 +37,7 @@ def _update(self, output): def compute(self): if self._num_examples == 0: - raise NotComputableError('GeometricMeanRelativeAbsoluteError must have at least ' - 'one example before it can be computed.') + raise NotComputableError( + "GeometricMeanRelativeAbsoluteError must have at least " "one example before it can be computed." + ) return torch.exp(torch.mean(self._sum_of_errors / self._num_examples)).item() diff --git a/ignite/contrib/metrics/regression/manhattan_distance.py b/ignite/contrib/metrics/regression/manhattan_distance.py index c69195f0a973..dad6e98bf4dc 100644 --- a/ignite/contrib/metrics/regression/manhattan_distance.py +++ b/ignite/contrib/metrics/regression/manhattan_distance.py @@ -1,5 +1,3 @@ - - import torch from ignite.contrib.metrics.regression._base import _BaseRegression diff --git a/ignite/contrib/metrics/regression/maximum_absolute_error.py b/ignite/contrib/metrics/regression/maximum_absolute_error.py index 30a7f02d6319..85d6d5228c90 100644 --- a/ignite/contrib/metrics/regression/maximum_absolute_error.py +++ b/ignite/contrib/metrics/regression/maximum_absolute_error.py @@ -32,5 +32,5 @@ def _update(self, output): def compute(self): if self._max_of_absolute_errors < 0: - raise NotComputableError('MaximumAbsoluteError must have at least one example before it can be computed.') + raise NotComputableError("MaximumAbsoluteError must have at least one example before it can be computed.") return self._max_of_absolute_errors diff --git a/ignite/contrib/metrics/regression/mean_absolute_relative_error.py b/ignite/contrib/metrics/regression/mean_absolute_relative_error.py index 8a8ba44bf8ca..affcd0ac2569 100644 --- a/ignite/contrib/metrics/regression/mean_absolute_relative_error.py +++ b/ignite/contrib/metrics/regression/mean_absolute_relative_error.py @@ -1,5 +1,3 @@ - - import torch from ignite.exceptions import NotComputableError @@ -30,13 +28,14 @@ def reset(self): def _update(self, output): y_pred, y = output if (y == 0).any(): - raise NotComputableError('The ground truth has 0.') + raise NotComputableError("The ground truth has 0.") absolute_error = torch.abs(y_pred - y.view_as(y_pred)) / torch.abs(y.view_as(y_pred)) self._sum_of_absolute_relative_errors += torch.sum(absolute_error).item() self._num_samples += y.size()[0] def compute(self): if self._num_samples == 0: - raise NotComputableError('MeanAbsoluteRelativeError must have at least' - 'one sample before it can be computed.') + raise NotComputableError( + "MeanAbsoluteRelativeError must have at least" "one sample before it can be computed." + ) return self._sum_of_absolute_relative_errors / self._num_samples diff --git a/ignite/contrib/metrics/regression/mean_error.py b/ignite/contrib/metrics/regression/mean_error.py index c7d7449a8b07..cc8e2483082f 100644 --- a/ignite/contrib/metrics/regression/mean_error.py +++ b/ignite/contrib/metrics/regression/mean_error.py @@ -1,5 +1,3 @@ - - import torch from ignite.exceptions import NotComputableError @@ -29,11 +27,11 @@ def reset(self): def _update(self, output): y_pred, y = output - errors = (y.view_as(y_pred) - y_pred) + errors = y.view_as(y_pred) - y_pred self._sum_of_errors += torch.sum(errors).item() self._num_examples += y.shape[0] def compute(self): if self._num_examples == 0: - raise NotComputableError('MeanError must have at least one example before it can be computed.') + raise NotComputableError("MeanError must have at least one example before it can be computed.") return self._sum_of_errors / self._num_examples diff --git a/ignite/contrib/metrics/regression/mean_normalized_bias.py b/ignite/contrib/metrics/regression/mean_normalized_bias.py index 6bd328eceab0..ece0f10bb7ce 100644 --- a/ignite/contrib/metrics/regression/mean_normalized_bias.py +++ b/ignite/contrib/metrics/regression/mean_normalized_bias.py @@ -1,5 +1,3 @@ - - import torch from ignite.exceptions import NotComputableError @@ -31,7 +29,7 @@ def _update(self, output): y_pred, y = output if (y == 0).any(): - raise NotComputableError('The ground truth has 0.') + raise NotComputableError("The ground truth has 0.") errors = (y.view_as(y_pred) - y_pred) / y self._sum_of_errors += torch.sum(errors).item() @@ -39,5 +37,5 @@ def _update(self, output): def compute(self): if self._num_examples == 0: - raise NotComputableError('MeanNormalizedBias must have at least one example before it can be computed.') + raise NotComputableError("MeanNormalizedBias must have at least one example before it can be computed.") return self._sum_of_errors / self._num_examples diff --git a/ignite/contrib/metrics/regression/median_absolute_percentage_error.py b/ignite/contrib/metrics/regression/median_absolute_percentage_error.py index fc3d9af51a44..b629d70354d9 100644 --- a/ignite/contrib/metrics/regression/median_absolute_percentage_error.py +++ b/ignite/contrib/metrics/regression/median_absolute_percentage_error.py @@ -1,5 +1,3 @@ - - import torch from ignite.contrib.metrics.regression._base import _BaseRegressionEpoch @@ -34,5 +32,6 @@ class MedianAbsolutePercentageError(_BaseRegressionEpoch): """ def __init__(self, output_transform=lambda x: x): - super(MedianAbsolutePercentageError, self).__init__(median_absolute_percentage_error_compute_fn, - output_transform) + super(MedianAbsolutePercentageError, self).__init__( + median_absolute_percentage_error_compute_fn, output_transform + ) diff --git a/ignite/contrib/metrics/regression/median_relative_absolute_error.py b/ignite/contrib/metrics/regression/median_relative_absolute_error.py index 7251b55ee21a..18584de56bb1 100644 --- a/ignite/contrib/metrics/regression/median_relative_absolute_error.py +++ b/ignite/contrib/metrics/regression/median_relative_absolute_error.py @@ -1,5 +1,3 @@ - - import torch from ignite.contrib.metrics.regression._base import _BaseRegressionEpoch @@ -34,5 +32,4 @@ class MedianRelativeAbsoluteError(_BaseRegressionEpoch): """ def __init__(self, output_transform=lambda x: x): - super(MedianRelativeAbsoluteError, self).__init__(median_relative_absolute_error_compute_fn, - output_transform) + super(MedianRelativeAbsoluteError, self).__init__(median_relative_absolute_error_compute_fn, output_transform) diff --git a/ignite/contrib/metrics/regression/r2_score.py b/ignite/contrib/metrics/regression/r2_score.py index a3c1be3b5634..6724841609ac 100644 --- a/ignite/contrib/metrics/regression/r2_score.py +++ b/ignite/contrib/metrics/regression/r2_score.py @@ -1,5 +1,3 @@ - - import torch from ignite.exceptions import NotComputableError @@ -36,5 +34,5 @@ def _update(self, output): def compute(self): if self._num_examples == 0: - raise NotComputableError('R2Score must have at least one example before it can be computed.') + raise NotComputableError("R2Score must have at least one example before it can be computed.") return 1 - self._sum_of_errors / (self._y_sq_sum - (self._y_sum ** 2) / self._num_examples) diff --git a/ignite/contrib/metrics/regression/wave_hedges_distance.py b/ignite/contrib/metrics/regression/wave_hedges_distance.py index 31c70f5e4118..bf70c15273af 100644 --- a/ignite/contrib/metrics/regression/wave_hedges_distance.py +++ b/ignite/contrib/metrics/regression/wave_hedges_distance.py @@ -1,5 +1,3 @@ - - import torch from ignite.contrib.metrics.regression._base import _BaseRegression diff --git a/ignite/engine/__init__.py b/ignite/engine/__init__.py index 528445552e19..4e418bb2af80 100644 --- a/ignite/engine/__init__.py +++ b/ignite/engine/__init__.py @@ -6,29 +6,31 @@ from ignite.utils import convert_tensor from ignite.metrics import Metric -__all__ = [ - 'create_supervised_trainer', - 'create_supervised_evaluator', - 'Engine', - 'Events' -] +__all__ = ["create_supervised_trainer", "create_supervised_evaluator", "Engine", "Events"] -def _prepare_batch(batch: Sequence[torch.Tensor], device: Optional[Union[str, torch.device]] = None, - non_blocking: bool = False): +def _prepare_batch( + batch: Sequence[torch.Tensor], device: Optional[Union[str, torch.device]] = None, non_blocking: bool = False +): """Prepare batch for training: pass to a device with options. """ x, y = batch - return (convert_tensor(x, device=device, non_blocking=non_blocking), - convert_tensor(y, device=device, non_blocking=non_blocking)) - - -def create_supervised_trainer(model: torch.nn.Module, optimizer: torch.optim.Optimizer, - loss_fn: Union[Callable, torch.nn.Module], - device: Optional[Union[str, torch.device]] = None, non_blocking: bool = False, - prepare_batch: Callable = _prepare_batch, - output_transform: Callable = lambda x, y, y_pred, loss: loss.item()) -> Engine: + return ( + convert_tensor(x, device=device, non_blocking=non_blocking), + convert_tensor(y, device=device, non_blocking=non_blocking), + ) + + +def create_supervised_trainer( + model: torch.nn.Module, + optimizer: torch.optim.Optimizer, + loss_fn: Union[Callable, torch.nn.Module], + device: Optional[Union[str, torch.device]] = None, + non_blocking: bool = False, + prepare_batch: Callable = _prepare_batch, + output_transform: Callable = lambda x, y, y_pred, loss: loss.item(), +) -> Engine: """ Factory function for creating a trainer for supervised models. @@ -67,10 +69,14 @@ def _update(engine: Engine, batch: Sequence[torch.Tensor]) -> Union[Any, Tuple[t return Engine(_update) -def create_supervised_evaluator(model: torch.nn.Module, metrics: Optional[Dict[str, Metric]] = None, - device: Optional[Union[str, torch.device]] = None, non_blocking: bool = False, - prepare_batch: Callable = _prepare_batch, - output_transform: Callable = lambda x, y, y_pred: (y_pred, y,)) -> Engine: +def create_supervised_evaluator( + model: torch.nn.Module, + metrics: Optional[Dict[str, Metric]] = None, + device: Optional[Union[str, torch.device]] = None, + non_blocking: bool = False, + prepare_batch: Callable = _prepare_batch, + output_transform: Callable = lambda x, y, y_pred: (y_pred, y,), +) -> Engine: """ Factory function for creating an evaluator for supervised models. diff --git a/ignite/engine/engine.py b/ignite/engine/engine.py index 9da83e84dfbc..e4911aaf0aaf 100644 --- a/ignite/engine/engine.py +++ b/ignite/engine/engine.py @@ -1,4 +1,3 @@ - import logging import time from collections import defaultdict, OrderedDict @@ -14,9 +13,7 @@ from ignite.engine.utils import ReproducibleBatchSampler, _update_dataloader, _check_signature from ignite._utils import _to_hours_mins_secs -__all__ = [ - 'Engine' -] +__all__ = ["Engine"] class Engine: @@ -140,7 +137,7 @@ def __init__(self, process_function: Callable): if self._process_function is None: raise ValueError("Engine must be given a processing function in order to run.") - _check_signature(self, process_function, 'process_function', None) + _check_signature(self, process_function, "process_function", None) def register_events(self, *event_names: Union[str, int, Any], **kwargs) -> None: """Add events that can be fired. @@ -193,10 +190,10 @@ class TBPTT_Events(CallableEvents, Enum): # engine.state contains an attribute time_iteration, which can be accessed using engine.state.time_iteration """ # for python2 compatibility: - event_to_attr = kwargs.get('event_to_attr', None) + event_to_attr = kwargs.get("event_to_attr", None) if event_to_attr is not None: if not isinstance(event_to_attr, dict): - raise ValueError('Expected event_to_attr to be dictionary. Got {}.'.format(type(event_to_attr))) + raise ValueError("Expected event_to_attr to be dictionary. Got {}.".format(type(event_to_attr))) for e in event_names: self._allowed_events.append(e) @@ -205,7 +202,6 @@ class TBPTT_Events(CallableEvents, Enum): @staticmethod def _handler_wrapper(handler: Callable, event_name: str, event_filter: Callable) -> Callable: - def wrapper(engine: Engine, *args, **kwargs) -> Any: event = engine.state.get_event_attrib_value(event_name) if event_filter(engine, event): @@ -261,7 +257,7 @@ def print_epoch(engine): raise ValueError("Event {} is not a valid event for this Engine.".format(event_name)) event_args = (Exception(),) if event_name == Events.EXCEPTION_RAISED else () - _check_signature(self, handler, 'handler', *(event_args + args), **kwargs) + _check_signature(self, handler, "handler", *(event_args + args), **kwargs) self._event_handlers[event_name].append((handler, args, kwargs)) self.logger.debug("added handler for event %s.", event_name) @@ -271,8 +267,9 @@ def print_epoch(engine): @staticmethod def _assert_non_callable_event(event_name: str): if isinstance(event_name, EventWithFilter): - raise TypeError("Argument event_name should not be a callable event, " - "please use event without any event filtering") + raise TypeError( + "Argument event_name should not be a callable event, " "please use event without any event filtering" + ) def has_event_handler(self, handler: Callable, event_name: Optional[str] = None): """Check if the specified event has the specified handler. @@ -314,8 +311,11 @@ def remove_event_handler(self, handler: Callable, event_name: str): if event_name not in self._event_handlers: raise ValueError("Input event name '{}' does not exist".format(event_name)) - new_event_handlers = [(h, args, kwargs) for h, args, kwargs in self._event_handlers[event_name] - if not self._compare_handlers(handler, h)] + new_event_handlers = [ + (h, args, kwargs) + for h, args, kwargs in self._event_handlers[event_name] + if not self._compare_handlers(handler, h) + ] if len(new_event_handlers) == len(self._event_handlers[event_name]): raise ValueError("Input handler '{}' is not found among registered event handlers".format(handler)) self._event_handlers[event_name] = new_event_handlers @@ -392,8 +392,10 @@ def terminate(self) -> None: def terminate_epoch(self) -> None: """Sends terminate signal to the engine, so that it terminates the current epoch after the current iteration. """ - self.logger.info("Terminate current epoch is signaled. " - "Current epoch iteration will stop after current iteration is finished.") + self.logger.info( + "Terminate current epoch is signaled. " + "Current epoch iteration will stop after current iteration is finished." + ) self.should_terminate_single_epoch = True def _run_once_on_dataset(self) -> Tuple[int, int, int]: @@ -421,11 +423,13 @@ def _run_once_on_dataset(self) -> Tuple[int, int, int]: # Should exit while loop if we can not iterate if should_exit: if not self._is_done(self.state): - warnings.warn("Data iterator can not provide data anymore but required total number of " - "iterations to run is not reached. " - "Current iteration: {} vs Total iterations to run : {}" - .format(self.state.iteration, - self.state.epoch_length * self.state.max_epochs)) + warnings.warn( + "Data iterator can not provide data anymore but required total number of " + "iterations to run is not reached. " + "Current iteration: {} vs Total iterations to run : {}".format( + self.state.iteration, self.state.epoch_length * self.state.max_epochs + ) + ) break # set seed on restart of data iterator @@ -509,29 +513,39 @@ def load_state_dict(self, state_dict: Mapping) -> None: for k in self._state_dict_all_req_keys: if k not in state_dict: - raise ValueError("Required state attribute '{}' is absent in provided state_dict '{}'" - .format(k, state_dict.keys())) + raise ValueError( + "Required state attribute '{}' is absent in provided state_dict '{}'".format(k, state_dict.keys()) + ) opts = [k in state_dict for k in self._state_dict_one_of_opt_keys] if (not any(opts)) or (all(opts)): raise ValueError("state_dict should contain only one of '{}' keys".format(self._state_dict_one_of_opt_keys)) - self.state = State(seed=state_dict['seed'], max_epochs=state_dict['max_epochs'], - epoch_length=state_dict['epoch_length'], metrics={}) + self.state = State( + seed=state_dict["seed"], + max_epochs=state_dict["max_epochs"], + epoch_length=state_dict["epoch_length"], + metrics={}, + ) if "iteration" in state_dict: - self.state.iteration = state_dict['iteration'] + self.state.iteration = state_dict["iteration"] self.state.epoch = self.state.iteration // self.state.epoch_length elif "epoch" in state_dict: - self.state.epoch = state_dict['epoch'] + self.state.epoch = state_dict["epoch"] self.state.iteration = self.state.epoch_length * self.state.epoch @staticmethod def _is_done(state: State) -> bool: return state.iteration == state.epoch_length * state.max_epochs - def run(self, data: Iterable, max_epochs: Optional[int] = None, epoch_length: Optional[int] = None, - seed: Optional[int] = None) -> State: + def run( + self, + data: Iterable, + max_epochs: Optional[int] = None, + epoch_length: Optional[int] = None, + seed: Optional[int] = None, + ) -> State: """Runs the `process_function` over the passed data. Engine has a state and the following logic is applied in this function: @@ -598,8 +612,11 @@ def switch_batch(engine): self.state.seed = seed if epoch_length is not None: self.state.epoch_length = epoch_length - self.logger.info("Engine run resuming from iteration {}, epoch {} until {} epochs" - .format(self.state.iteration, self.state.epoch, self.state.max_epochs)) + self.logger.info( + "Engine run resuming from iteration {}, epoch {} until {} epochs".format( + self.state.iteration, self.state.epoch, self.state.max_epochs + ) + ) self.state.dataloader = data return self._internal_run() @@ -622,13 +639,16 @@ def _setup_engine(self) -> None: if _dataloader_kind == torch.utils.data.dataloader._DatasetKind.Map: if (self._dataloader_len is not None) and hasattr(self.state.dataloader.sampler, "epoch"): if self._dataloader_len != self.state.epoch_length: - warnings.warn("When defined engine's epoch length is different of input dataloader length, " - "distributed sampler indices can not be setup in a reproducible manner") + warnings.warn( + "When defined engine's epoch length is different of input dataloader length, " + "distributed sampler indices can not be setup in a reproducible manner" + ) batch_sampler = self.state.dataloader.batch_sampler if not isinstance(batch_sampler, ReproducibleBatchSampler): - self.state.dataloader = _update_dataloader(self.state.dataloader, - ReproducibleBatchSampler(batch_sampler)) + self.state.dataloader = _update_dataloader( + self.state.dataloader, ReproducibleBatchSampler(batch_sampler) + ) iteration = self.state.iteration self._dataloader_iter = self._from_iteration(self.state.dataloader, iteration) @@ -671,6 +691,7 @@ def _manual_seed(seed: int, epoch: int) -> None: torch.manual_seed(seed + epoch) try: import numpy as np + np.random.seed(seed + epoch) except ImportError: pass diff --git a/ignite/engine/events.py b/ignite/engine/events.py index 65ce2fabb27c..4a39c0b937f6 100644 --- a/ignite/engine/events.py +++ b/ignite/engine/events.py @@ -1,4 +1,3 @@ - from typing import Callable, Optional, Union, Any from enum import Enum @@ -8,14 +7,10 @@ from ignite.engine.utils import _check_signature -__all__ = [ - 'Events', - 'State' -] +__all__ = ["Events", "State"] class EventWithFilter: - def __init__(self, event: Any, filter: Callable): if not callable(filter): raise TypeError("Argument filter should be callable") @@ -46,8 +41,9 @@ def call_on_test_event_every(engine): """ - def __call__(self, event_filter: Optional[Callable] = None, - every: Optional[int] = None, once: Optional[int] = None): + def __call__( + self, event_filter: Optional[Callable] = None, every: Optional[int] = None, once: Optional[int] = None + ): if not ((event_filter is not None) ^ (every is not None) ^ (once is not None)): raise ValueError("Only one of the input arguments should be specified") @@ -127,6 +123,7 @@ def call_once(engine): Argument `event` is the value of iteration or epoch, depending on which type of Events the function is passed. """ + EPOCH_STARTED = "epoch_started" EPOCH_COMPLETED = "epoch_completed" STARTED = "started" diff --git a/ignite/engine/utils.py b/ignite/engine/utils.py index d65c2e00836f..d65c43e4881f 100644 --- a/ignite/engine/utils.py +++ b/ignite/engine/utils.py @@ -4,14 +4,15 @@ import torch -def _update_dataloader(dataloader: torch.utils.data.DataLoader, - new_batch_sampler: torch.utils.data.sampler.BatchSampler) -> torch.utils.data.DataLoader: +def _update_dataloader( + dataloader: torch.utils.data.DataLoader, new_batch_sampler: torch.utils.data.sampler.BatchSampler +) -> torch.utils.data.DataLoader: params_keys = [k for k in dataloader.__dict__.keys() if not k.startswith("_")] - for k in ['batch_size', 'sampler', 'drop_last', 'batch_sampler', 'dataset_kind']: + for k in ["batch_size", "sampler", "drop_last", "batch_sampler", "dataset_kind"]: if k in params_keys: params_keys.remove(k) params = {k: getattr(dataloader, k) for k in params_keys} - params['batch_sampler'] = new_batch_sampler + params["batch_sampler"] = new_batch_sampler return type(dataloader)(**params) @@ -39,7 +40,7 @@ def setup_batch_indices(self) -> None: self.batch_indices.append(batch) if self.start_iteration is not None: - self.batch_indices = self.batch_indices[self.start_iteration:] + self.batch_indices = self.batch_indices[self.start_iteration :] self.start_iteration = None def __iter__(self) -> Generator: @@ -63,6 +64,8 @@ def _check_signature(engine, fn: Callable, fn_description: str, *args, **kwargs) fn_params = list(signature.parameters) exception_msg = str(exc) passed_params = [engine] + list(args) + list(kwargs) - raise ValueError("Error adding {} '{}': " - "takes parameters {} but will be called with {} " - "({}).".format(fn, fn_description, fn_params, passed_params, exception_msg)) + raise ValueError( + "Error adding {} '{}': " + "takes parameters {} but will be called with {} " + "({}).".format(fn, fn_description, fn_params, passed_params, exception_msg) + ) diff --git a/ignite/exceptions.py b/ignite/exceptions.py index a64a85431862..eef2243cf97f 100644 --- a/ignite/exceptions.py +++ b/ignite/exceptions.py @@ -1,6 +1,4 @@ -__all__ = [ - 'NotComputableError' -] +__all__ = ["NotComputableError"] class NotComputableError(RuntimeError): diff --git a/ignite/handlers/__init__.py b/ignite/handlers/__init__.py index 39ca6932b143..e07139f3a12b 100644 --- a/ignite/handlers/__init__.py +++ b/ignite/handlers/__init__.py @@ -10,13 +10,13 @@ from ignite.handlers.terminate_on_nan import TerminateOnNan __all__ = [ - 'ModelCheckpoint', - 'Checkpoint', - 'DiskSaver', - 'Timer', - 'EarlyStopping', - 'TerminateOnNan', - 'global_step_from_engine' + "ModelCheckpoint", + "Checkpoint", + "DiskSaver", + "Timer", + "EarlyStopping", + "TerminateOnNan", + "global_step_from_engine", ] diff --git a/ignite/handlers/checkpoint.py b/ignite/handlers/checkpoint.py index d98bf19824fb..2f3092c1ab65 100644 --- a/ignite/handlers/checkpoint.py +++ b/ignite/handlers/checkpoint.py @@ -12,11 +12,7 @@ from ignite.engine import Events, Engine -__all__ = [ - 'Checkpoint', - 'DiskSaver', - 'ModelCheckpoint' -] +__all__ = ["Checkpoint", "DiskSaver", "ModelCheckpoint"] class Checkpoint: @@ -130,9 +126,17 @@ def score_function(engine): Item = namedtuple("Item", ["priority", "filename"]) - def __init__(self, to_save: dict, save_handler: Callable, filename_prefix: str = "", - score_function: Optional[Callable] = None, score_name: Optional[str] = None, - n_saved: Optional[int] = 1, global_step_transform: Callable = None, archived: bool = False): + def __init__( + self, + to_save: dict, + save_handler: Callable, + filename_prefix: str = "", + score_function: Optional[Callable] = None, + score_name: Optional[str] = None, + n_saved: Optional[int] = 1, + global_step_transform: Callable = None, + archived: bool = False, + ): if not isinstance(to_save, collections.Mapping): raise TypeError("Argument `to_save` should be a dictionary, but given {}".format(type(to_save))) @@ -144,12 +148,12 @@ def __init__(self, to_save: dict, save_handler: Callable, filename_prefix: str = raise TypeError("Argument `save_handler` should be callable") if score_function is None and score_name is not None: - raise ValueError("If `score_name` is provided, then `score_function` " - "should be also provided.") + raise ValueError("If `score_name` is provided, then `score_function` " "should be also provided.") if global_step_transform is not None and not callable(global_step_transform): - raise TypeError("global_step_transform should be a function, got {} instead." - .format(type(global_step_transform))) + raise TypeError( + "global_step_transform should be a function, got {} instead.".format(type(global_step_transform)) + ) self._check_objects(to_save, "state_dict") self._fname_prefix = filename_prefix + "_" if len(filename_prefix) > 0 else filename_prefix @@ -189,8 +193,9 @@ def __call__(self, engine: Engine) -> None: if self._check_lt_n_saved() or self._saved[0].priority < priority: - priority_str = "{}".format(priority) if isinstance(priority, numbers.Integral) \ - else "{:.4f}".format(priority) + priority_str = ( + "{}".format(priority) if isinstance(priority, numbers.Integral) else "{:.4f}".format(priority) + ) if self._score_name is not None: if len(suffix) > 0: @@ -210,7 +215,7 @@ def __call__(self, engine: Engine) -> None: for k in checkpoint: name = k checkpoint = checkpoint[name] - filename = '{}{}_{}{}'.format(self._fname_prefix, name, suffix, self._ext) + filename = "{}{}_{}{}".format(self._fname_prefix, name, suffix, self._ext) self.save_handler(checkpoint, filename) @@ -285,10 +290,12 @@ def __init__(self, dirname: str, atomic: bool = True, create_dir: bool = True, r if require_empty: matched = [fname for fname in os.listdir(dirname) if fname.endswith(".pth") or fname.endswith(".pth.tar")] if len(matched) > 0: - raise ValueError("Files {} with extension '.pth' or '.pth.tar' are already present " - "in the directory {}. If you want to use this " - "directory anyway, pass `require_empty=False`." - "".format(matched, dirname)) + raise ValueError( + "Files {} with extension '.pth' or '.pth.tar' are already present " + "in the directory {}. If you want to use this " + "directory anyway, pass `require_empty=False`." + "".format(matched, dirname) + ) def __call__(self, checkpoint: Mapping, filename: str) -> None: path = os.path.join(self.dirname, filename) @@ -378,20 +385,29 @@ class ModelCheckpoint(Checkpoint): ['/tmp/models/myprefix_mymodel_6.pth'] """ - def __init__(self, dirname: str, filename_prefix: str, - save_interval: Optional[Callable] = None, - score_function: Optional[Callable] = None, score_name: Optional[str] = None, - n_saved: int = 1, - atomic: bool = True, require_empty: bool = True, - create_dir: bool = True, - save_as_state_dict: bool = True, global_step_transform: Optional[Callable] = None, - archived: bool = False): + def __init__( + self, + dirname: str, + filename_prefix: str, + save_interval: Optional[Callable] = None, + score_function: Optional[Callable] = None, + score_name: Optional[str] = None, + n_saved: int = 1, + atomic: bool = True, + require_empty: bool = True, + create_dir: bool = True, + save_as_state_dict: bool = True, + global_step_transform: Optional[Callable] = None, + archived: bool = False, + ): if not save_as_state_dict: raise ValueError("Argument save_as_state_dict is deprecated and should be True") if save_interval is not None: - msg = "Argument save_interval is deprecated and should be None. " \ - "Please, use events filtering instead, e.g. Events.ITERATION_STARTED(every=1000)" + msg = ( + "Argument save_interval is deprecated and should be None. " + "Please, use events filtering instead, e.g. Events.ITERATION_STARTED(every=1000)" + ) if save_interval == 1: # Do not break for old version who used `save_interval=1` warnings.warn(msg) @@ -402,12 +418,12 @@ def __init__(self, dirname: str, filename_prefix: str, disk_saver = DiskSaver(dirname, atomic=atomic, create_dir=create_dir, require_empty=require_empty) if score_function is None and score_name is not None: - raise ValueError("If `score_name` is provided, then `score_function` " - "should be also provided.") + raise ValueError("If `score_name` is provided, then `score_function` " "should be also provided.") if global_step_transform is not None and not callable(global_step_transform): - raise TypeError("global_step_transform should be a function, got {} instead." - .format(type(global_step_transform))) + raise TypeError( + "global_step_transform should be a function, got {} instead.".format(type(global_step_transform)) + ) self._fname_prefix = filename_prefix + "_" if len(filename_prefix) > 0 else filename_prefix self.save_handler = disk_saver diff --git a/ignite/handlers/early_stopping.py b/ignite/handlers/early_stopping.py index fe93fe7d27c7..28d5002d61ad 100644 --- a/ignite/handlers/early_stopping.py +++ b/ignite/handlers/early_stopping.py @@ -3,9 +3,7 @@ from ignite.engine import Engine -__all__ = [ - 'EarlyStopping' -] +__all__ = ["EarlyStopping"] class EarlyStopping: @@ -43,8 +41,14 @@ def score_function(engine): """ - def __init__(self, patience: int, score_function: Callable, trainer: Engine, min_delta: float = 0., - cumulative_delta: bool = False): + def __init__( + self, + patience: int, + score_function: Callable, + trainer: Engine, + min_delta: float = 0.0, + cumulative_delta: bool = False, + ): if not callable(score_function): raise TypeError("Argument score_function should be a function.") @@ -52,7 +56,7 @@ def __init__(self, patience: int, score_function: Callable, trainer: Engine, min if patience < 1: raise ValueError("Argument patience should be positive integer.") - if min_delta < 0.: + if min_delta < 0.0: raise ValueError("Argument min_delta should not be a negative number.") if not isinstance(trainer, Engine): diff --git a/ignite/handlers/terminate_on_nan.py b/ignite/handlers/terminate_on_nan.py index a4178ad0a794..3ef74b32c995 100644 --- a/ignite/handlers/terminate_on_nan.py +++ b/ignite/handlers/terminate_on_nan.py @@ -7,9 +7,7 @@ from ignite.utils import apply_to_type from ignite.engine import Engine -__all__ = [ - 'TerminateOnNan' -] +__all__ = ["TerminateOnNan"] class TerminateOnNan: @@ -53,6 +51,7 @@ def raise_error(x: Union[numbers.Number, torch.Tensor]) -> None: try: apply_to_type(output, (numbers.Number, torch.Tensor), raise_error) except RuntimeError: - self.logger.warning("{}: Output '{}' contains NaN or Inf. Stop training" - .format(self.__class__.__name__, output)) + self.logger.warning( + "{}: Output '{}' contains NaN or Inf. Stop training".format(self.__class__.__name__, output) + ) engine.terminate() diff --git a/ignite/handlers/timing.py b/ignite/handlers/timing.py index 0bc5423b90fe..9ac83630a849 100644 --- a/ignite/handlers/timing.py +++ b/ignite/handlers/timing.py @@ -1,12 +1,9 @@ - from time import perf_counter from typing import Optional from ignite.engine import Events, Engine -__all__ = [ - 'Timer' -] +__all__ = ["Timer"] class Timer: @@ -83,12 +80,18 @@ def __init__(self, average: bool = False): self._average = average self._t0 = perf_counter() - self.total = 0. - self.step_count = 0. + self.total = 0.0 + self.step_count = 0.0 self.running = True - def attach(self, engine: Engine, start: str = Events.STARTED, - pause: str = Events.COMPLETED, resume: Optional[str] = None, step: Optional[str] = None): + def attach( + self, + engine: Engine, + start: str = Events.STARTED, + pause: str = Events.COMPLETED, + resume: Optional[str] = None, + step: Optional[str] = None, + ): """ Register callbacks to control the timer. Args: @@ -139,14 +142,14 @@ def value(self) -> float: total += self._elapsed() if self._average: - denominator = max(self.step_count, 1.) + denominator = max(self.step_count, 1.0) else: - denominator = 1. + denominator = 1.0 return total / denominator def step(self, *args) -> None: - self.step_count += 1. + self.step_count += 1.0 def _elapsed(self) -> float: return perf_counter() - self._t0 diff --git a/ignite/metrics/__init__.py b/ignite/metrics/__init__.py index 29cf57d6b9a4..74721194dc36 100644 --- a/ignite/metrics/__init__.py +++ b/ignite/metrics/__init__.py @@ -17,26 +17,26 @@ from ignite.metrics.frequency import Frequency __all__ = [ - 'Metric', - 'Accuracy', - 'Loss', - 'MetricsLambda', - 'MeanAbsoluteError', - 'MeanPairwiseDistance', - 'MeanSquaredError', - 'ConfusionMatrix', - 'TopKCategoricalAccuracy', - 'Average', - 'DiceCoefficient', - 'EpochMetric', - 'Fbeta', - 'GeometricAverage', - 'IoU', - 'mIoU', - 'Precision', - 'Recall', - 'RootMeanSquaredError', - 'RunningAverage', - 'VariableAccumulation', - 'Frequency' + "Metric", + "Accuracy", + "Loss", + "MetricsLambda", + "MeanAbsoluteError", + "MeanPairwiseDistance", + "MeanSquaredError", + "ConfusionMatrix", + "TopKCategoricalAccuracy", + "Average", + "DiceCoefficient", + "EpochMetric", + "Fbeta", + "GeometricAverage", + "IoU", + "mIoU", + "Precision", + "Recall", + "RootMeanSquaredError", + "RunningAverage", + "VariableAccumulation", + "Frequency", ] diff --git a/ignite/metrics/accumulation.py b/ignite/metrics/accumulation.py index 4e1c9af95378..f071734e3f90 100644 --- a/ignite/metrics/accumulation.py +++ b/ignite/metrics/accumulation.py @@ -8,11 +8,7 @@ import torch -__all__ = [ - 'VariableAccumulation', - 'GeometricAverage', - 'Average' -] +__all__ = ["VariableAccumulation", "GeometricAverage", "Average"] class VariableAccumulation(Metric): @@ -43,10 +39,12 @@ class VariableAccumulation(Metric): initialized and available, device is set to `cuda`. """ + _required_output_keys = None - def __init__(self, op: Callable, output_transform: Callable = lambda x: x, - device: Optional[Union[str, torch.device]] = None): + def __init__( + self, op: Callable, output_transform: Callable = lambda x: x, device: Optional[Union[str, torch.device]] = None + ): if not callable(op): raise TypeError("Argument op should be a callable, but given {}".format(type(op))) self.accumulator = None @@ -60,8 +58,7 @@ def reset(self) -> None: self.accumulator = torch.tensor(0.0, dtype=torch.float64, device=self._device) self.num_examples = torch.tensor(0.0, dtype=torch.long, device=self._device) - def _check_output_type(self, output: Union[Any, torch.Tensor, - numbers.Number]) -> None: + def _check_output_type(self, output: Union[Any, torch.Tensor, numbers.Number]) -> None: if not (isinstance(output, numbers.Number) or isinstance(output, torch.Tensor)): raise TypeError("Output should be a number or torch.Tensor, but given {}".format(type(output))) @@ -75,12 +72,12 @@ def update(self, output: Union[Any, torch.Tensor, numbers.Number]) -> None: output = output.to(self._device) self.accumulator = self._op(self.accumulator, output) - if hasattr(output, 'shape'): + if hasattr(output, "shape"): self.num_examples += output.shape[0] if len(output.shape) > 1 else 1 else: self.num_examples += 1 - @sync_all_reduce('accumulator', 'num_examples') + @sync_all_reduce("accumulator", "num_examples") def compute(self) -> list: return [self.accumulator, self.num_examples] @@ -124,9 +121,7 @@ class Average(VariableAccumulation): """ - def __init__(self, output_transform: Callable = lambda x: x, - device: Optional[Union[str, torch.device]] = None): - + def __init__(self, output_transform: Callable = lambda x: x, device: Optional[Union[str, torch.device]] = None): def _mean_op(a, x): if isinstance(x, torch.Tensor) and x.ndim > 1: x = x.sum(dim=0) @@ -134,11 +129,12 @@ def _mean_op(a, x): super(Average, self).__init__(op=_mean_op, output_transform=output_transform, device=device) - @sync_all_reduce('accumulator', 'num_examples') + @sync_all_reduce("accumulator", "num_examples") def compute(self) -> Union[Any, torch.Tensor, numbers.Number]: if self.num_examples < 1: - raise NotComputableError("{} must have at least one example before" - " it can be computed.".format(self.__class__.__name__)) + raise NotComputableError( + "{} must have at least one example before" " it can be computed.".format(self.__class__.__name__) + ) return self.accumulator / self.num_examples @@ -170,11 +166,8 @@ class GeometricAverage(VariableAccumulation): """ - def __init__(self, output_transform: Callable = lambda x: x, - device: Optional[Union[str, torch.device]] = None): - - def _geom_op(a: torch.Tensor, x: Union[Any, numbers.Number, - torch.Tensor]) -> torch.Tensor: + def __init__(self, output_transform: Callable = lambda x: x, device: Optional[Union[str, torch.device]] = None): + def _geom_op(a: torch.Tensor, x: Union[Any, numbers.Number, torch.Tensor]) -> torch.Tensor: if not isinstance(x, torch.Tensor): x = torch.tensor(x) x = torch.log(x) @@ -184,10 +177,11 @@ def _geom_op(a: torch.Tensor, x: Union[Any, numbers.Number, super(GeometricAverage, self).__init__(op=_geom_op, output_transform=output_transform, device=device) - @sync_all_reduce('accumulator', 'num_examples') + @sync_all_reduce("accumulator", "num_examples") def compute(self) -> torch.Tensor: if self.num_examples < 1: - raise NotComputableError("{} must have at least one example before" - " it can be computed.".format(self.__class__.__name__)) + raise NotComputableError( + "{} must have at least one example before" " it can be computed.".format(self.__class__.__name__) + ) return torch.exp(self.accumulator / self.num_examples) diff --git a/ignite/metrics/accuracy.py b/ignite/metrics/accuracy.py index 5db9c0b745b8..699ba055e61c 100644 --- a/ignite/metrics/accuracy.py +++ b/ignite/metrics/accuracy.py @@ -6,15 +6,16 @@ import torch -__all__ = [ - 'Accuracy' -] +__all__ = ["Accuracy"] class _BaseClassification(Metric): - - def __init__(self, output_transform: Callable = lambda x: x, is_multilabel: bool = False, - device: Optional[Union[str, torch.device]] = None): + def __init__( + self, + output_transform: Callable = lambda x: x, + is_multilabel: bool = False, + device: Optional[Union[str, torch.device]] = None, + ): self._is_multilabel = is_multilabel self._type = None self._num_classes = None @@ -28,9 +29,11 @@ def _check_shape(self, output: Sequence[torch.Tensor]) -> None: y_pred, y = output if not (y.ndimension() == y_pred.ndimension() or y.ndimension() + 1 == y_pred.ndimension()): - raise ValueError("y must have shape of (batch_size, ...) and y_pred must have " - "shape of (batch_size, num_categories, ...) or (batch_size, ...), " - "but given {} vs {}.".format(y.shape, y_pred.shape)) + raise ValueError( + "y must have shape of (batch_size, ...) and y_pred must have " + "shape of (batch_size, num_categories, ...) or (batch_size, ...), " + "but given {} vs {}.".format(y.shape, y_pred.shape) + ) y_shape = y.shape y_pred_shape = y_pred.shape @@ -73,8 +76,10 @@ def _check_type(self, output: Sequence[torch.Tensor]) -> None: update_type = "binary" num_classes = 1 else: - raise RuntimeError("Invalid shapes of y (shape={}) and y_pred (shape={}), check documentation." - " for expected shapes of y and y_pred.".format(y.shape, y_pred.shape)) + raise RuntimeError( + "Invalid shapes of y (shape={}) and y_pred (shape={}), check documentation." + " for expected shapes of y and y_pred.".format(y.shape, y_pred.shape) + ) if self._type is None: self._type = update_type self._num_classes = num_classes @@ -82,8 +87,9 @@ def _check_type(self, output: Sequence[torch.Tensor]) -> None: if self._type != update_type: raise RuntimeError("Input data type has changed from {} to {}.".format(self._type, update_type)) if self._num_classes != num_classes: - raise ValueError("Input data number of classes has changed from {} to {}" - .format(self._num_classes, num_classes)) + raise ValueError( + "Input data number of classes has changed from {} to {}".format(self._num_classes, num_classes) + ) class Accuracy(_BaseClassification): @@ -121,14 +127,15 @@ def thresholded_output_transform(output): """ - def __init__(self, output_transform: Callable = lambda x: x, - is_multilabel: bool = False, - device: Optional[Union[str, torch.device]] = None): + def __init__( + self, + output_transform: Callable = lambda x: x, + is_multilabel: bool = False, + device: Optional[Union[str, torch.device]] = None, + ): self._num_correct = None self._num_examples = None - super(Accuracy, self).__init__(output_transform=output_transform, - is_multilabel=is_multilabel, - device=device) + super(Accuracy, self).__init__(output_transform=output_transform, is_multilabel=is_multilabel, device=device) @reinit__is_reduced def reset(self) -> None: @@ -161,5 +168,5 @@ def update(self, output: Sequence[torch.Tensor]) -> None: @sync_all_reduce("_num_examples", "_num_correct") def compute(self) -> torch.Tensor: if self._num_examples == 0: - raise NotComputableError('Accuracy must have at least one example before it can be computed.') + raise NotComputableError("Accuracy must have at least one example before it can be computed.") return self._num_correct / self._num_examples diff --git a/ignite/metrics/confusion_matrix.py b/ignite/metrics/confusion_matrix.py index 478895907683..d4d986ee51c7 100644 --- a/ignite/metrics/confusion_matrix.py +++ b/ignite/metrics/confusion_matrix.py @@ -7,15 +7,7 @@ from ignite.exceptions import NotComputableError from ignite.metrics.metric import sync_all_reduce, reinit__is_reduced -__all__ = [ - 'ConfusionMatrix', - 'mIoU', - 'IoU', - 'DiceCoefficient', - 'cmAccuracy', - 'cmPrecision', - 'cmRecall' -] +__all__ = ["ConfusionMatrix", "mIoU", "IoU", "DiceCoefficient", "cmAccuracy", "cmPrecision", "cmRecall"] class ConfusionMatrix(Metric): @@ -50,8 +42,13 @@ class ConfusionMatrix(Metric): """ - def __init__(self, num_classes: int, average: Optional[str] = None, output_transform: Callable = lambda x: x, - device: Optional[Union[str, torch.device]] = None): + def __init__( + self, + num_classes: int, + average: Optional[str] = None, + output_transform: Callable = lambda x: x, + device: Optional[Union[str, torch.device]] = None, + ): if average is not None and average not in ("samples", "recall", "precision"): raise ValueError("Argument average can None or one of ['samples', 'recall', 'precision']") @@ -63,26 +60,28 @@ def __init__(self, num_classes: int, average: Optional[str] = None, output_trans @reinit__is_reduced def reset(self) -> None: - self.confusion_matrix = torch.zeros(self.num_classes, self.num_classes, - dtype=torch.int64, - device=self._device) + self.confusion_matrix = torch.zeros(self.num_classes, self.num_classes, dtype=torch.int64, device=self._device) self._num_examples = 0 def _check_shape(self, output: Sequence[torch.Tensor]) -> None: y_pred, y = output if y_pred.ndimension() < 2: - raise ValueError("y_pred must have shape (batch_size, num_categories, ...), " - "but given {}".format(y_pred.shape)) + raise ValueError( + "y_pred must have shape (batch_size, num_categories, ...), " "but given {}".format(y_pred.shape) + ) if y_pred.shape[1] != self.num_classes: - raise ValueError("y_pred does not have correct number of categories: {} vs {}" - .format(y_pred.shape[1], self.num_classes)) + raise ValueError( + "y_pred does not have correct number of categories: {} vs {}".format(y_pred.shape[1], self.num_classes) + ) if not (y.ndimension() + 1 == y_pred.ndimension()): - raise ValueError("y_pred must have shape (batch_size, num_categories, ...) and y must have " - "shape of (batch_size, ...), " - "but given {} vs {}.".format(y.shape, y_pred.shape)) + raise ValueError( + "y_pred must have shape (batch_size, num_categories, ...) and y must have " + "shape of (batch_size, ...), " + "but given {} vs {}.".format(y.shape, y_pred.shape) + ) y_shape = y.shape y_pred_shape = y_pred.shape @@ -112,10 +111,10 @@ def update(self, output: Sequence[torch.Tensor]) -> None: m = torch.bincount(indices, minlength=self.num_classes ** 2).reshape(self.num_classes, self.num_classes) self.confusion_matrix += m.to(self.confusion_matrix) - @sync_all_reduce('confusion_matrix', '_num_examples') + @sync_all_reduce("confusion_matrix", "_num_examples") def compute(self) -> torch.Tensor: if self._num_examples == 0: - raise NotComputableError('Confusion matrix must have at least one example before it can be computed.') + raise NotComputableError("Confusion matrix must have at least one example before it can be computed.") if self.average: self.confusion_matrix = self.confusion_matrix.float() if self.average == "samples": @@ -164,8 +163,9 @@ def IoU(cm: ConfusionMatrix, ignore_index: Optional[int] = None) -> MetricsLambd def ignore_index_fn(iou_vector): if ignore_index >= len(iou_vector): - raise ValueError("ignore_index {} is larger than the length of IoU vector {}" - .format(ignore_index, len(iou_vector))) + raise ValueError( + "ignore_index {} is larger than the length of IoU vector {}".format(ignore_index, len(iou_vector)) + ) indices = list(range(len(iou_vector))) indices.remove(ignore_index) return iou_vector[indices] @@ -275,8 +275,9 @@ def DiceCoefficient(cm: ConfusionMatrix, ignore_index: Optional[int] = None) -> def ignore_index_fn(dice_vector: torch.Tensor) -> torch.Tensor: if ignore_index >= len(dice_vector): - raise ValueError("ignore_index {} is larger than the length of Dice vector {}" - .format(ignore_index, len(dice_vector))) + raise ValueError( + "ignore_index {} is larger than the length of Dice vector {}".format(ignore_index, len(dice_vector)) + ) indices = list(range(len(dice_vector))) indices.remove(ignore_index) return dice_vector[indices] diff --git a/ignite/metrics/epoch_metric.py b/ignite/metrics/epoch_metric.py index 3c6033745cc3..1ecc173fd93f 100644 --- a/ignite/metrics/epoch_metric.py +++ b/ignite/metrics/epoch_metric.py @@ -5,9 +5,7 @@ from ignite.metrics.metric import Metric -__all__ = [ - 'EpochMetric' -] +__all__ = ["EpochMetric"] class EpochMetric(Metric): @@ -44,7 +42,7 @@ def __init__(self, compute_fn: Callable, output_transform: Callable = lambda x: if not callable(compute_fn): raise TypeError("Argument compute_fn should be callable.") - super(EpochMetric, self).__init__(output_transform=output_transform, device='cpu') + super(EpochMetric, self).__init__(output_transform=output_transform, device="cpu") self.compute_fn = compute_fn def reset(self) -> None: @@ -81,8 +79,7 @@ def update(self, output: Sequence[torch.Tensor]) -> None: try: self.compute_fn(self._predictions, self._targets) except Exception as e: - warnings.warn("Probably, there can be a problem with `compute_fn`:\n {}.".format(e), - EpochMetricWarning) + warnings.warn("Probably, there can be a problem with `compute_fn`:\n {}.".format(e), EpochMetricWarning) def compute(self) -> None: return self.compute_fn(self._predictions, self._targets) diff --git a/ignite/metrics/fbeta.py b/ignite/metrics/fbeta.py index 35d71f21fa43..e0b34698004d 100644 --- a/ignite/metrics/fbeta.py +++ b/ignite/metrics/fbeta.py @@ -1,17 +1,20 @@ from typing import Optional, Union, Callable -__all__ = [ - 'Fbeta' -] +__all__ = ["Fbeta"] import torch from ignite.metrics import Precision, Recall, MetricsLambda -def Fbeta(beta: float, average: bool = True, precision: Optional[Precision] = None, - recall: Optional[Recall] = None, output_transform: Optional[Callable] = None, - device: Optional[Union[str, torch.device]] = None) -> MetricsLambda: +def Fbeta( + beta: float, + average: bool = True, + precision: Optional[Precision] = None, + recall: Optional[Recall] = None, + output_transform: Optional[Callable] = None, + device: Optional[Union[str, torch.device]] = None, +) -> MetricsLambda: """Calculates F-beta score Args: @@ -41,14 +44,20 @@ def Fbeta(beta: float, average: bool = True, precision: Optional[Precision] = No raise ValueError("If recall argument is provided, output_transform should be None") if precision is None: - precision = Precision(output_transform=(lambda x: x) if output_transform is None else output_transform, - average=False, device=device) + precision = Precision( + output_transform=(lambda x: x) if output_transform is None else output_transform, + average=False, + device=device, + ) elif precision._average: raise ValueError("Input precision metric should have average=False") if recall is None: - recall = Recall(output_transform=(lambda x: x) if output_transform is None else output_transform, - average=False, device=device) + recall = Recall( + output_transform=(lambda x: x) if output_transform is None else output_transform, + average=False, + device=device, + ) elif recall._average: raise ValueError("Input recall metric should have average=False") diff --git a/ignite/metrics/loss.py b/ignite/metrics/loss.py index ca3456e96d44..5d3a3fb282b8 100644 --- a/ignite/metrics/loss.py +++ b/ignite/metrics/loss.py @@ -6,9 +6,7 @@ from ignite.metrics import Metric from ignite.metrics.metric import sync_all_reduce, reinit__is_reduced -__all__ = [ - 'Loss' -] +__all__ = ["Loss"] class Loss(Metric): @@ -35,10 +33,16 @@ class Loss(Metric): initialized and available, device is set to `cuda`. """ + _required_output_keys = None - def __init__(self, loss_fn: Callable, output_transform: Callable = lambda x: x, - batch_size: Callable = lambda x: len(x), device: Optional[Union[str, torch.device]] = None): + def __init__( + self, + loss_fn: Callable, + output_transform: Callable = lambda x: x, + batch_size: Callable = lambda x: len(x), + device: Optional[Union[str, torch.device]] = None, + ): super(Loss, self).__init__(output_transform, device=device) self._loss_fn = loss_fn self._batch_size = batch_size @@ -58,7 +62,7 @@ def update(self, output: Sequence[Union[torch.Tensor, dict]]) -> None: average_loss = self._loss_fn(y_pred, y, **kwargs) if len(average_loss.shape) != 0: - raise ValueError('loss_fn did not return the average loss.') + raise ValueError("loss_fn did not return the average loss.") N = self._batch_size(y) self._sum += average_loss.item() * N @@ -67,6 +71,5 @@ def update(self, output: Sequence[Union[torch.Tensor, dict]]) -> None: @sync_all_reduce("_sum", "_num_examples") def compute(self) -> None: if self._num_examples == 0: - raise NotComputableError( - 'Loss must have at least one example before it can be computed.') + raise NotComputableError("Loss must have at least one example before it can be computed.") return self._sum / self._num_examples diff --git a/ignite/metrics/mean_absolute_error.py b/ignite/metrics/mean_absolute_error.py index 454e5b36e4ab..11bf5e3d5eb6 100644 --- a/ignite/metrics/mean_absolute_error.py +++ b/ignite/metrics/mean_absolute_error.py @@ -6,9 +6,7 @@ from ignite.metrics.metric import Metric from ignite.metrics.metric import sync_all_reduce, reinit__is_reduced -__all__ = [ - 'MeanAbsoluteError' -] +__all__ = ["MeanAbsoluteError"] class MeanAbsoluteError(Metric): @@ -33,5 +31,5 @@ def update(self, output: Sequence[torch.Tensor]) -> None: @sync_all_reduce("_sum_of_absolute_errors", "_num_examples") def compute(self) -> Union[float, torch.Tensor]: if self._num_examples == 0: - raise NotComputableError('MeanAbsoluteError must have at least one example before it can be computed.') + raise NotComputableError("MeanAbsoluteError must have at least one example before it can be computed.") return self._sum_of_absolute_errors / self._num_examples diff --git a/ignite/metrics/mean_pairwise_distance.py b/ignite/metrics/mean_pairwise_distance.py index 0824e7bf96ef..e8f9e85c3c4e 100644 --- a/ignite/metrics/mean_pairwise_distance.py +++ b/ignite/metrics/mean_pairwise_distance.py @@ -7,9 +7,7 @@ from ignite.metrics.metric import Metric from ignite.metrics.metric import sync_all_reduce, reinit__is_reduced -__all__ = [ - 'MeanPairwiseDistance' -] +__all__ = ["MeanPairwiseDistance"] class MeanPairwiseDistance(Metric): @@ -19,8 +17,13 @@ class MeanPairwiseDistance(Metric): - `update` must receive output of the form `(y_pred, y)` or `{'y_pred': y_pred, 'y': y}`. """ - def __init__(self, p: int = 2, eps: float = 1e-6, output_transform: Callable = lambda x: x, - device: Optional[Union[str, torch.device]] = None): + def __init__( + self, + p: int = 2, + eps: float = 1e-6, + output_transform: Callable = lambda x: x, + device: Optional[Union[str, torch.device]] = None, + ): super(MeanPairwiseDistance, self).__init__(output_transform, device=device) self._p = p self._eps = eps @@ -40,5 +43,5 @@ def update(self, output: Sequence[torch.Tensor]) -> None: @sync_all_reduce("_sum_of_distances", "_num_examples") def compute(self) -> Union[float, torch.Tensor]: if self._num_examples == 0: - raise NotComputableError('MeanAbsoluteError must have at least one example before it can be computed.') + raise NotComputableError("MeanAbsoluteError must have at least one example before it can be computed.") return self._sum_of_distances / self._num_examples diff --git a/ignite/metrics/mean_squared_error.py b/ignite/metrics/mean_squared_error.py index 42e27a7c1b7c..5c60623cc2cc 100644 --- a/ignite/metrics/mean_squared_error.py +++ b/ignite/metrics/mean_squared_error.py @@ -6,9 +6,7 @@ from ignite.metrics.metric import Metric from ignite.metrics.metric import sync_all_reduce, reinit__is_reduced -__all__ = [ - 'MeanSquaredError' -] +__all__ = ["MeanSquaredError"] class MeanSquaredError(Metric): @@ -33,5 +31,5 @@ def update(self, output: Sequence[torch.Tensor]) -> None: @sync_all_reduce("_sum_of_squared_errors", "_num_examples") def compute(self) -> Union[float, torch.Tensor]: if self._num_examples == 0: - raise NotComputableError('MeanSquaredError must have at least one example before it can be computed.') + raise NotComputableError("MeanSquaredError must have at least one example before it can be computed.") return self._sum_of_squared_errors / self._num_examples diff --git a/ignite/metrics/metric.py b/ignite/metrics/metric.py index 0503ccec3b12..1ad6704993c4 100644 --- a/ignite/metrics/metric.py +++ b/ignite/metrics/metric.py @@ -11,9 +11,7 @@ from ignite.engine import Events, Engine -__all__ = [ - 'Metric' -] +__all__ = ["Metric"] class Metric(metaclass=ABCMeta): @@ -32,10 +30,10 @@ class Metric(metaclass=ABCMeta): initialized and available, device is set to `cuda`. """ + _required_output_keys = ("y_pred", "y") - def __init__(self, output_transform: Callable = lambda x: x, - device: Optional[Union[str, torch.device]] = None): + def __init__(self, output_transform: Callable = lambda x: x, device: Optional[Union[str, torch.device]] = None): self._output_transform = output_transform # Check device if distributed is initialized: @@ -43,9 +41,11 @@ def __init__(self, output_transform: Callable = lambda x: x, # check if reset and update methods are decorated. Compute may not be decorated if not (hasattr(self.reset, "_decorated") and hasattr(self.update, "_decorated")): - warnings.warn("{} class does not support distributed setting. Computed result is not collected " - "across all computing devices".format(self.__class__.__name__), - RuntimeWarning) + warnings.warn( + "{} class does not support distributed setting. Computed result is not collected " + "across all computing devices".format(self.__class__.__name__), + RuntimeWarning, + ) if device is None: device = "cuda" device = torch.device(device) @@ -89,10 +89,7 @@ def compute(self) -> Any: """ pass - def _sync_all_reduce( - self, - tensor: Union[torch.Tensor, numbers.Number]) -> Union[torch.Tensor, - numbers.Number]: + def _sync_all_reduce(self, tensor: Union[torch.Tensor, numbers.Number]) -> Union[torch.Tensor, numbers.Number]: if not (dist.is_available() and dist.is_initialized()): # Nothing to reduce return tensor @@ -125,12 +122,16 @@ def iteration_completed(self, engine: Engine) -> None: output = self._output_transform(engine.state.output) if isinstance(output, Mapping): if self._required_output_keys is None: - raise TypeError("Transformed engine output for {} metric should be a tuple/list, but given {}" - .format(self.__class__.__name__, type(output))) + raise TypeError( + "Transformed engine output for {} metric should be a tuple/list, but given {}".format( + self.__class__.__name__, type(output) + ) + ) if not all([k in output for k in self._required_output_keys]): - raise ValueError("When transformed engine's output is a mapping, " - "it should contain {} keys, but given {}".format(self._required_output_keys, - list(output.keys()))) + raise ValueError( + "When transformed engine's output is a mapping, " + "it should contain {} keys, but given {}".format(self._required_output_keys, list(output.keys())) + ) output = tuple(output[k] for k in self._required_output_keys) self.update(output) @@ -149,58 +150,72 @@ def attach(self, engine: Engine, name: str) -> None: def __add__(self, other): from ignite.metrics import MetricsLambda + return MetricsLambda(lambda x, y: x + y, self, other) def __radd__(self, other): from ignite.metrics import MetricsLambda + return MetricsLambda(lambda x, y: x + y, other, self) def __sub__(self, other): from ignite.metrics import MetricsLambda + return MetricsLambda(lambda x, y: x - y, self, other) def __rsub__(self, other): from ignite.metrics import MetricsLambda + return MetricsLambda(lambda x, y: x - y, other, self) def __mul__(self, other): from ignite.metrics import MetricsLambda + return MetricsLambda(lambda x, y: x * y, self, other) def __rmul__(self, other): from ignite.metrics import MetricsLambda + return MetricsLambda(lambda x, y: x * y, other, self) def __pow__(self, other): from ignite.metrics import MetricsLambda + return MetricsLambda(lambda x, y: x ** y, self, other) def __rpow__(self, other): from ignite.metrics import MetricsLambda + return MetricsLambda(lambda x, y: x ** y, other, self) def __mod__(self, other): from ignite.metrics import MetricsLambda + return MetricsLambda(lambda x, y: x % y, self, other) def __div__(self, other): from ignite.metrics import MetricsLambda + return MetricsLambda(lambda x, y: x.__div__(y), self, other) def __rdiv__(self, other): from ignite.metrics import MetricsLambda + return MetricsLambda(lambda x, y: x.__div__(y), other, self) def __truediv__(self, other): from ignite.metrics import MetricsLambda + return MetricsLambda(lambda x, y: x.__truediv__(y), self, other) def __rtruediv__(self, other): from ignite.metrics import MetricsLambda + return MetricsLambda(lambda x, y: x.__truediv__(y), other, self) def __floordiv__(self, other): from ignite.metrics import MetricsLambda + return MetricsLambda(lambda x, y: x // y, self, other) def __getattr__(self, attr: str) -> Callable: @@ -216,17 +231,18 @@ def wrapper(*args, **kwargs): def __getitem__(self, index: Any): from ignite.metrics import MetricsLambda + return MetricsLambda(lambda x: x[index], self) def sync_all_reduce(*attrs) -> Callable: def wrapper(func: Callable) -> Callable: - @wraps(func) def another_wrapper(self: Metric, *args, **kwargs) -> Callable: if not isinstance(self, Metric): - raise RuntimeError("Decorator sync_all_reduce should be used on " - "ignite.metric.Metric class methods only") + raise RuntimeError( + "Decorator sync_all_reduce should be used on " "ignite.metric.Metric class methods only" + ) if len(attrs) > 0 and not self._is_reduced: for attr in attrs: diff --git a/ignite/metrics/metrics_lambda.py b/ignite/metrics/metrics_lambda.py index 8061e7d8bc25..01d9fa042085 100644 --- a/ignite/metrics/metrics_lambda.py +++ b/ignite/metrics/metrics_lambda.py @@ -4,9 +4,7 @@ from ignite.metrics.metric import Metric, reinit__is_reduced from ignite.engine import Events, Engine -__all__ = [ - 'MetricsLambda' -] +__all__ = ["MetricsLambda"] class MetricsLambda(Metric): @@ -45,7 +43,7 @@ def __init__(self, f: Callable, *args, **kwargs): self.function = f self.args = args self.kwargs = kwargs - super(MetricsLambda, self).__init__(device='cpu') + super(MetricsLambda, self).__init__(device="cpu") @reinit__is_reduced def reset(self) -> None: diff --git a/ignite/metrics/precision.py b/ignite/metrics/precision.py index d4cd93359c67..4a4063c92902 100644 --- a/ignite/metrics/precision.py +++ b/ignite/metrics/precision.py @@ -8,28 +8,33 @@ from ignite.utils import to_onehot from ignite.metrics.metric import reinit__is_reduced -__all__ = [ - 'Precision' -] +__all__ = ["Precision"] class _BasePrecisionRecall(_BaseClassification): - - def __init__(self, output_transform: Callable = lambda x: x, average: bool = False, is_multilabel: bool = False, - device: Optional[Union[str, torch.device]] = None): + def __init__( + self, + output_transform: Callable = lambda x: x, + average: bool = False, + is_multilabel: bool = False, + device: Optional[Union[str, torch.device]] = None, + ): if torch.distributed.is_available() and torch.distributed.is_initialized(): if (not average) and is_multilabel: - warnings.warn("Precision/Recall metrics do not work in distributed setting when average=False " - "and is_multilabel=True. Results are not reduced across the GPUs. Computed result " - "corresponds to the local rank's (single GPU) result.", RuntimeWarning) + warnings.warn( + "Precision/Recall metrics do not work in distributed setting when average=False " + "and is_multilabel=True. Results are not reduced across the GPUs. Computed result " + "corresponds to the local rank's (single GPU) result.", + RuntimeWarning, + ) self._average = average self._true_positives = None self._positives = None self.eps = 1e-20 - super(_BasePrecisionRecall, self).__init__(output_transform=output_transform, - is_multilabel=is_multilabel, - device=device) + super(_BasePrecisionRecall, self).__init__( + output_transform=output_transform, is_multilabel=is_multilabel, device=device + ) @reinit__is_reduced def reset(self) -> None: @@ -40,8 +45,9 @@ def reset(self) -> None: def compute(self) -> torch.Tensor: if not (isinstance(self._positives, torch.Tensor) or self._positives > 0): - raise NotComputableError("{} must have at least one example before" - " it can be computed.".format(self.__class__.__name__)) + raise NotComputableError( + "{} must have at least one example before" " it can be computed.".format(self.__class__.__name__) + ) if not (self._type == "multilabel" and not self._average): if not self._is_reduced: @@ -115,10 +121,16 @@ def thresholded_output_transform(output): """ - def __init__(self, output_transform: Callable = lambda x: x, average: bool = False, is_multilabel: bool = False, - device: Optional[Union[str, torch.device]] = None): - super(Precision, self).__init__(output_transform=output_transform, - average=average, is_multilabel=is_multilabel, device=device) + def __init__( + self, + output_transform: Callable = lambda x: x, + average: bool = False, + is_multilabel: bool = False, + device: Optional[Union[str, torch.device]] = None, + ): + super(Precision, self).__init__( + output_transform=output_transform, average=average, is_multilabel=is_multilabel, device=device + ) @reinit__is_reduced def update(self, output: Sequence[torch.Tensor]) -> None: @@ -132,8 +144,10 @@ def update(self, output: Sequence[torch.Tensor]) -> None: elif self._type == "multiclass": num_classes = y_pred.size(1) if y.max() + 1 > num_classes: - raise ValueError("y_pred contains less classes than y. Number of predicted classes is {}" - " and element in y has invalid class = {}.".format(num_classes, y.max().item() + 1)) + raise ValueError( + "y_pred contains less classes than y. Number of predicted classes is {}" + " and element in y has invalid class = {}.".format(num_classes, y.max().item() + 1) + ) y = to_onehot(y.view(-1), num_classes=num_classes) indices = torch.argmax(y_pred, dim=1).view(-1) y_pred = to_onehot(indices, num_classes=num_classes) diff --git a/ignite/metrics/recall.py b/ignite/metrics/recall.py index 8a18ddc174e6..c7a6739bf0e1 100644 --- a/ignite/metrics/recall.py +++ b/ignite/metrics/recall.py @@ -6,9 +6,7 @@ from ignite.utils import to_onehot from ignite.metrics.metric import reinit__is_reduced -__all__ = [ - 'Recall' -] +__all__ = ["Recall"] class Recall(_BasePrecisionRecall): @@ -69,10 +67,16 @@ def thresholded_output_transform(output): """ - def __init__(self, output_transform: Callable = lambda x: x, average: bool = False, is_multilabel: bool = False, - device: Optional[Union[str, torch.device]] = None): - super(Recall, self).__init__(output_transform=output_transform, - average=average, is_multilabel=is_multilabel, device=device) + def __init__( + self, + output_transform: Callable = lambda x: x, + average: bool = False, + is_multilabel: bool = False, + device: Optional[Union[str, torch.device]] = None, + ): + super(Recall, self).__init__( + output_transform=output_transform, average=average, is_multilabel=is_multilabel, device=device + ) @reinit__is_reduced def update(self, output: Sequence[torch.Tensor]) -> None: @@ -86,8 +90,10 @@ def update(self, output: Sequence[torch.Tensor]) -> None: elif self._type == "multiclass": num_classes = y_pred.size(1) if y.max() + 1 > num_classes: - raise ValueError("y_pred contains less classes than y. Number of predicted classes is {}" - " and element in y has invalid class = {}.".format(num_classes, y.max().item() + 1)) + raise ValueError( + "y_pred contains less classes than y. Number of predicted classes is {}" + " and element in y has invalid class = {}.".format(num_classes, y.max().item() + 1) + ) y = to_onehot(y.view(-1), num_classes=num_classes) indices = torch.argmax(y_pred, dim=1).view(-1) y_pred = to_onehot(indices, num_classes=num_classes) diff --git a/ignite/metrics/root_mean_squared_error.py b/ignite/metrics/root_mean_squared_error.py index 63a4c4c5283a..da91511c86be 100644 --- a/ignite/metrics/root_mean_squared_error.py +++ b/ignite/metrics/root_mean_squared_error.py @@ -5,9 +5,7 @@ from ignite.metrics.mean_squared_error import MeanSquaredError -__all__ = [ - 'RootMeanSquaredError' -] +__all__ = ["RootMeanSquaredError"] class RootMeanSquaredError(MeanSquaredError): diff --git a/ignite/metrics/running_average.py b/ignite/metrics/running_average.py index e9c4ffbf36c6..b528918a1419 100644 --- a/ignite/metrics/running_average.py +++ b/ignite/metrics/running_average.py @@ -6,9 +6,7 @@ from ignite.metrics import Metric from ignite.metrics.metric import reinit__is_reduced, sync_all_reduce -__all__ = [ - 'RunningAverage' -] +__all__ = ["RunningAverage"] class RunningAverage(Metric): @@ -45,10 +43,17 @@ def log_running_avg_metrics(engine): print("running avg loss:", engine.state.metrics['running_avg_loss']) """ + _required_output_keys = None - def __init__(self, src: Optional[Metric] = None, alpha: float = 0.98, output_transform: Optional[Callable] = None, - epoch_bound: bool = True, device: Optional[Union[str, torch.device]] = None): + def __init__( + self, + src: Optional[Metric] = None, + alpha: float = 0.98, + output_transform: Optional[Callable] = None, + epoch_bound: bool = True, + device: Optional[Union[str, torch.device]] = None, + ): if not (isinstance(src, Metric) or src is None): raise TypeError("Argument src should be a Metric or None.") if not (0.0 < alpha <= 1.0): @@ -64,8 +69,10 @@ def __init__(self, src: Optional[Metric] = None, alpha: float = 0.98, output_tra self.iteration_completed = self._metric_iteration_completed else: if output_transform is None: - raise ValueError("Argument output_transform should not be None if src corresponds " - "to the output of process function.") + raise ValueError( + "Argument output_transform should not be None if src corresponds " + "to the output of process function." + ) self._get_src_value = self._get_output_value self.update = self._output_update diff --git a/ignite/metrics/top_k_categorical_accuracy.py b/ignite/metrics/top_k_categorical_accuracy.py index 7d9bdb664f1f..60ed320a4205 100644 --- a/ignite/metrics/top_k_categorical_accuracy.py +++ b/ignite/metrics/top_k_categorical_accuracy.py @@ -6,9 +6,7 @@ from ignite.exceptions import NotComputableError from ignite.metrics.metric import sync_all_reduce, reinit__is_reduced -__all__ = [ - 'TopKCategoricalAccuracy' -] +__all__ = ["TopKCategoricalAccuracy"] class TopKCategoricalAccuracy(Metric): @@ -18,8 +16,9 @@ class TopKCategoricalAccuracy(Metric): - `update` must receive output of the form `(y_pred, y)` or `{'y_pred': y_pred, 'y': y}`. """ - def __init__(self, k=5, output_transform: Callable = lambda x: x, - device: Optional[Union[str, torch.device]] = None): + def __init__( + self, k=5, output_transform: Callable = lambda x: x, device: Optional[Union[str, torch.device]] = None + ): super(TopKCategoricalAccuracy, self).__init__(output_transform, device=device) self._k = k @@ -40,6 +39,7 @@ def update(self, output: Sequence) -> None: @sync_all_reduce("_num_correct", "_num_examples") def compute(self) -> Union[float, torch.Tensor]: if self._num_examples == 0: - raise NotComputableError("TopKCategoricalAccuracy must have at" - "least one example before it can be computed.") + raise NotComputableError( + "TopKCategoricalAccuracy must have at" "least one example before it can be computed." + ) return self._num_correct / self._num_examples diff --git a/ignite/utils.py b/ignite/utils.py index 8a90f1e35303..291294d52d4e 100644 --- a/ignite/utils.py +++ b/ignite/utils.py @@ -4,46 +4,35 @@ import torch -__all__ = [ - 'convert_tensor', - 'apply_to_tensor', - 'apply_to_type', - 'to_onehot', - 'setup_logger' -] - - -def convert_tensor(input_: Union[torch.Tensor, collections.Sequence, - collections.Mapping, str, bytes], - device: Optional[Union[str, torch.device]] = None, - non_blocking: bool = False) -> Union[torch.Tensor, - collections.Sequence, - collections.Mapping, - str, bytes]: +__all__ = ["convert_tensor", "apply_to_tensor", "apply_to_type", "to_onehot", "setup_logger"] + + +def convert_tensor( + input_: Union[torch.Tensor, collections.Sequence, collections.Mapping, str, bytes], + device: Optional[Union[str, torch.device]] = None, + non_blocking: bool = False, +) -> Union[torch.Tensor, collections.Sequence, collections.Mapping, str, bytes]: """Move tensors to relevant device.""" def _func(tensor: torch.Tensor) -> torch.Tensor: - return tensor.to(device=device, - non_blocking=non_blocking) if device else tensor + return tensor.to(device=device, non_blocking=non_blocking) if device else tensor return apply_to_tensor(input_, _func) -def apply_to_tensor(input_: Union[torch.Tensor, collections.Sequence, - collections.Mapping, str, bytes], - func: Callable) -> Union[torch.Tensor, - collections.Sequence, - collections.Mapping, str, bytes]: +def apply_to_tensor( + input_: Union[torch.Tensor, collections.Sequence, collections.Mapping, str, bytes], func: Callable +) -> Union[torch.Tensor, collections.Sequence, collections.Mapping, str, bytes]: """Apply a function on a tensor or mapping, or sequence of tensors. """ return apply_to_type(input_, torch.Tensor, func) -def apply_to_type(input_: Union[Any, collections.Sequence, - collections.Mapping, str, bytes], - input_type: Union[Type, Tuple[Type[Any], Any]], - func: Callable) -> Union[Any, collections.Sequence, - collections.Mapping, str, bytes]: +def apply_to_type( + input_: Union[Any, collections.Sequence, collections.Mapping, str, bytes], + input_type: Union[Type, Tuple[Type[Any], Any]], + func: Callable, +) -> Union[Any, collections.Sequence, collections.Mapping, str, bytes]: """Apply a function on a object of `input_type` or mapping, or sequence of objects of `input_type`. """ if isinstance(input_, input_type): @@ -52,13 +41,12 @@ def apply_to_type(input_: Union[Any, collections.Sequence, return input_ elif isinstance(input_, collections.Mapping): return type(input_)({k: apply_to_type(sample, input_type, func) for k, sample in input_.items()}) - elif isinstance(input_, tuple) and hasattr(input_, '_fields'): # namedtuple + elif isinstance(input_, tuple) and hasattr(input_, "_fields"): # namedtuple return type(input_)(*(apply_to_type(sample, input_type, func) for sample in input_)) elif isinstance(input_, collections.Sequence): return type(input_)([apply_to_type(sample, input_type, func) for sample in input_]) else: - raise TypeError(("input must contain {}, dicts or lists; found {}" - .format(input_type, type(input_)))) + raise TypeError(("input must contain {}, dicts or lists; found {}".format(input_type, type(input_)))) def to_onehot(indices: torch.Tensor, num_classes: int) -> torch.Tensor: @@ -66,16 +54,17 @@ def to_onehot(indices: torch.Tensor, num_classes: int) -> torch.Tensor: tensor of one-hot indicators of shape `(N, num_classes, ...) and of type uint8. Output's device is equal to the input's device`. """ - onehot = torch.zeros(indices.shape[0], num_classes, *indices.shape[1:], - dtype=torch.uint8, - device=indices.device) + onehot = torch.zeros(indices.shape[0], num_classes, *indices.shape[1:], dtype=torch.uint8, device=indices.device) return onehot.scatter_(1, indices.unsqueeze(1), 1) -def setup_logger(name: str, level: int = logging.INFO, - format: str = "%(asctime)s %(name)s %(levelname)s: %(message)s", - filepath: Optional[str] = None, - distributed_rank: int = 0) -> logging.Logger: +def setup_logger( + name: str, + level: int = logging.INFO, + format: str = "%(asctime)s %(name)s %(levelname)s: %(message)s", + filepath: Optional[str] = None, + distributed_rank: int = 0, +) -> logging.Logger: """Setups logger: name, level, format etc. Args: diff --git a/pyproject.toml b/pyproject.toml new file mode 100644 index 000000000000..45ac10070503 --- /dev/null +++ b/pyproject.toml @@ -0,0 +1,23 @@ +[tool.black] +line-length = 120 +target-version = ['py35', 'py36', 'py37', 'py38'] +include = '\.pyi?$' +exclude = ''' + +( + /( + \.eggs # exclude a few common directories in the + | \.git # root of the project + | \.hg + | \.mypy_cache + | \.tox + | \.venv + | _build + | buck-out + | build + | dist + )/ + | foo.py # also separately exclude a file named foo.py in + # the root of the project +) +''' diff --git a/setup.py b/setup.py index fff377706da2..4cc7f67aee53 100644 --- a/setup.py +++ b/setup.py @@ -5,8 +5,7 @@ def read(*names, **kwargs): - with io.open(os.path.join(os.path.dirname(__file__), *names), - encoding=kwargs.get("encoding", "utf8")) as fp: + with io.open(os.path.join(os.path.dirname(__file__), *names), encoding=kwargs.get("encoding", "utf8")) as fp: return fp.read() @@ -18,26 +17,26 @@ def find_version(*file_paths): raise RuntimeError("Unable to find version string.") -readme = read('README.rst') +readme = read("README.rst") -VERSION = find_version('ignite', '__init__.py') +VERSION = find_version("ignite", "__init__.py") -requirements = ['torch', ] +requirements = [ + "torch", +] setup( # Metadata - name='pytorch-ignite', + name="pytorch-ignite", version=VERSION, - author='PyTorch Core Team', - author_email='soumith@pytorch.org', - url='https://github.com/pytorch/ignite', - description='A lightweight library to help with training neural networks in PyTorch.', + author="PyTorch Core Team", + author_email="soumith@pytorch.org", + url="https://github.com/pytorch/ignite", + description="A lightweight library to help with training neural networks in PyTorch.", long_description=readme, - license='BSD', - + license="BSD", # Package info - packages=find_packages(exclude=('tests', 'tests.*',)), - + packages=find_packages(exclude=("tests", "tests.*",)), zip_safe=True, install_requires=requirements, ) diff --git a/tests/ignite/conftest.py b/tests/ignite/conftest.py index 9d97e8c88c5f..b26be98d0e58 100644 --- a/tests/ignite/conftest.py +++ b/tests/ignite/conftest.py @@ -29,6 +29,7 @@ def local_rank(worker_id): def distributed_context_single_node_nccl(local_rank): import os + if "WORLD_SIZE" not in os.environ: os.environ["WORLD_SIZE"] = "{}".format(torch.cuda.device_count()) @@ -36,13 +37,13 @@ def distributed_context_single_node_nccl(local_rank): "backend": "nccl", "world_size": int(os.environ["WORLD_SIZE"]), "rank": local_rank, - "init_method": "tcp://localhost:2222" + "init_method": "tcp://localhost:2222", } dist.init_process_group(**dist_info) torch.cuda.device(local_rank) - yield {'local_rank': local_rank} + yield {"local_rank": local_rank} dist.barrier() @@ -53,6 +54,7 @@ def distributed_context_single_node_nccl(local_rank): def distributed_context_single_node_gloo(local_rank): import os + if "WORLD_SIZE" not in os.environ: os.environ["WORLD_SIZE"] = "1" @@ -60,7 +62,7 @@ def distributed_context_single_node_gloo(local_rank): "backend": "gloo", "world_size": int(os.environ["WORLD_SIZE"]), "rank": local_rank, - "init_method": "tcp://localhost:2222" + "init_method": "tcp://localhost:2222", } dist.init_process_group(**dist_info) @@ -75,17 +77,18 @@ def distributed_context_single_node_gloo(local_rank): @pytest.fixture() def multi_node_conf(local_rank): import os + assert "node_id" in os.environ assert "nnodes" in os.environ assert "nproc_per_node" in os.environ - node_id = int(os.environ['node_id']) - nnodes = int(os.environ['nnodes']) - nproc_per_node = int(os.environ['nproc_per_node']) + node_id = int(os.environ["node_id"]) + nnodes = int(os.environ["nnodes"]) + nproc_per_node = int(os.environ["nproc_per_node"]) out = { - 'world_size': nnodes * nproc_per_node, - 'rank': local_rank + node_id * nproc_per_node, - 'local_rank': local_rank + "world_size": nnodes * nproc_per_node, + "rank": local_rank + node_id * nproc_per_node, + "local_rank": local_rank, } return out @@ -101,8 +104,8 @@ def distributed_context_multi_node_gloo(multi_node_conf): dist_info = { "backend": "gloo", "init_method": "env://", - "world_size": multi_node_conf['world_size'], - "rank": multi_node_conf['rank'] + "world_size": multi_node_conf["world_size"], + "rank": multi_node_conf["rank"], } dist.init_process_group(**dist_info) @@ -125,12 +128,12 @@ def distributed_context_multi_node_nccl(multi_node_conf): dist_info = { "backend": "nccl", "init_method": "env://", - "world_size": multi_node_conf['world_size'], - "rank": multi_node_conf['rank'] + "world_size": multi_node_conf["world_size"], + "rank": multi_node_conf["rank"], } dist.init_process_group(**dist_info) - torch.cuda.device(multi_node_conf['local_rank']) + torch.cuda.device(multi_node_conf["local_rank"]) yield multi_node_conf diff --git a/tests/ignite/contrib/engines/test_common.py b/tests/ignite/contrib/engines/test_common.py index 55ec279c15e1..6503dd506c65 100644 --- a/tests/ignite/contrib/engines/test_common.py +++ b/tests/ignite/contrib/engines/test_common.py @@ -4,8 +4,12 @@ import torch.nn as nn from ignite.engine import Events, Engine -from ignite.contrib.engines.common import setup_common_training_handlers, \ - save_best_model_by_val_score, add_early_stopping_by_val_score, setup_tb_logging +from ignite.contrib.engines.common import ( + setup_common_training_handlers, + save_best_model_by_val_score, + add_early_stopping_by_val_score, + setup_tb_logging, +) from ignite.handlers import TerminateOnNan from ignite.contrib.handlers.tensorboard_logger import OutputHandler, OptimizerParamsHandler @@ -31,9 +35,7 @@ def _test_setup_common_training_handlers(dirname, device, rank=0, local_rank=0, model = DummyModel().to(device) if distributed: - model = torch.nn.parallel.DistributedDataParallel(model, - device_ids=[local_rank, ], - output_device=local_rank) + model = torch.nn.parallel.DistributedDataParallel(model, device_ids=[local_rank,], output_device=local_rank) optimizer = torch.optim.SGD(model.parameters(), lr=lr) lr_scheduler = torch.optim.lr_scheduler.StepLR(optimizer, step_size=step_size, gamma=gamma) @@ -50,13 +52,20 @@ def update_fn(engine, batch): train_sampler.set_epoch = MagicMock() trainer = Engine(update_fn) - setup_common_training_handlers(trainer, train_sampler=train_sampler, - to_save={"model": model, "optimizer": optimizer}, - save_every_iters=75, output_path=dirname, - lr_scheduler=lr_scheduler, with_gpu_stats=False, - output_names=['batch_loss', ], - with_pbars=True, with_pbar_on_iters=True, log_every_iters=50, - device=device) + setup_common_training_handlers( + trainer, + train_sampler=train_sampler, + to_save={"model": model, "optimizer": optimizer}, + save_every_iters=75, + output_path=dirname, + lr_scheduler=lr_scheduler, + with_gpu_stats=False, + output_names=["batch_loss",], + with_pbars=True, + with_pbar_on_iters=True, + log_every_iters=50, + device=device, + ) num_iters = 100 num_epochs = 10 @@ -65,45 +74,52 @@ def update_fn(engine, batch): # check handlers handlers = trainer._event_handlers[Events.ITERATION_COMPLETED] - for cls in [TerminateOnNan, ]: + for cls in [ + TerminateOnNan, + ]: assert any([isinstance(h[0], cls) for h in handlers]), "{}".format(handlers) - assert 'batch_loss' in trainer.state.metrics + assert "batch_loss" in trainer.state.metrics # Check saved checkpoint if rank == 0: checkpoints = list(os.listdir(dirname)) assert len(checkpoints) == 1 - for v in ["training_checkpoint", ]: + for v in [ + "training_checkpoint", + ]: assert any([v in c for c in checkpoints]) # Check LR scheduling - assert optimizer.param_groups[0]['lr'] <= lr * gamma ** (num_iters * num_epochs / step_size), \ - "{} vs {}".format(optimizer.param_groups[0]['lr'], lr * gamma ** (num_iters * num_epochs / step_size)) + assert optimizer.param_groups[0]["lr"] <= lr * gamma ** (num_iters * num_epochs / step_size), "{} vs {}".format( + optimizer.param_groups[0]["lr"], lr * gamma ** (num_iters * num_epochs / step_size) + ) def test_asserts_setup_common_training_handlers(): trainer = Engine(lambda e, b: None) - with pytest.raises(ValueError, match=r"If to_save argument is provided then output_path argument should be " - r"also defined"): + with pytest.raises( + ValueError, match=r"If to_save argument is provided then output_path argument should be " r"also defined" + ): setup_common_training_handlers(trainer, to_save={}) - with pytest.warns(UserWarning, match=r"Argument train_sampler distributed sampler used to call " - r"`set_epoch` method on epoch"): + with pytest.warns( + UserWarning, match=r"Argument train_sampler distributed sampler used to call " r"`set_epoch` method on epoch" + ): train_sampler = MagicMock() setup_common_training_handlers(trainer, train_sampler=train_sampler, with_gpu_stats=False) def test_setup_common_training_handlers(dirname, capsys): - _test_setup_common_training_handlers(dirname, device='cpu') + _test_setup_common_training_handlers(dirname, device="cpu") # Check epoch-wise pbar captured = capsys.readouterr() - out = captured.err.split('\r') + out = captured.err.split("\r") out = list(map(lambda x: x.strip(), out)) out = list(filter(None, out)) - assert u"Epoch:" in out[-1], "{}".format(out[-1]) + assert "Epoch:" in out[-1], "{}".format(out[-1]) def test_save_best_model_by_val_score(dirname, capsys): @@ -116,7 +132,9 @@ def test_save_best_model_by_val_score(dirname, capsys): @trainer.on(Events.EPOCH_COMPLETED) def validate(engine): - evaluator.run([0, ]) + evaluator.run( + [0,] + ) @evaluator.on(Events.EPOCH_COMPLETED) def set_eval_metric(engine): @@ -124,10 +142,12 @@ def set_eval_metric(engine): save_best_model_by_val_score(dirname, evaluator, model, metric_name="acc", n_saved=2, trainer=trainer) - data = [0, ] + data = [ + 0, + ] trainer.run(data, max_epochs=len(acc_scores)) - assert set(os.listdir(dirname)) == set(['best_model_8_val_acc=0.6100.pth', 'best_model_9_val_acc=0.7000.pth']) + assert set(os.listdir(dirname)) == set(["best_model_8_val_acc=0.6100.pth", "best_model_9_val_acc=0.7000.pth"]) def test_add_early_stopping_by_val_score(): @@ -138,7 +158,9 @@ def test_add_early_stopping_by_val_score(): @trainer.on(Events.EPOCH_COMPLETED) def validate(engine): - evaluator.run([0, ]) + evaluator.run( + [0,] + ) @evaluator.on(Events.EPOCH_COMPLETED) def set_eval_metric(engine): @@ -146,14 +168,15 @@ def set_eval_metric(engine): add_early_stopping_by_val_score(patience=3, evaluator=evaluator, trainer=trainer, metric_name="acc") - data = [0, ] + data = [ + 0, + ] state = trainer.run(data, max_epochs=len(acc_scores)) assert state.epoch == 7 def test_setup_tb_logging(dirname): - def _test(with_eval, with_optim): trainer = Engine(lambda e, b: b) evaluators = None @@ -165,32 +188,40 @@ def _test(with_eval, with_optim): @trainer.on(Events.EPOCH_COMPLETED) def validate(engine): - evaluator.run([0, ]) + evaluator.run( + [0,] + ) @evaluator.on(Events.EPOCH_COMPLETED) def set_eval_metric(engine): engine.state.metrics = {"acc": acc_scores[trainer.state.epoch - 1]} - evaluators = {'validation': evaluator} + evaluators = {"validation": evaluator} if with_optim: - t = torch.tensor([0, ]) - optimizers = {'optimizer': torch.optim.SGD([t, ], lr=0.01)} + t = torch.tensor([0,]) + optimizers = {"optimizer": torch.optim.SGD([t,], lr=0.01)} setup_tb_logging(dirname, trainer, optimizers=optimizers, evaluators=evaluators, log_every_iters=1) handlers = trainer._event_handlers[Events.ITERATION_COMPLETED] - for cls in [OutputHandler, ]: + for cls in [ + OutputHandler, + ]: assert any([isinstance(h[0], cls) for h in handlers]), "{}".format(handlers) if with_optim: handlers = trainer._event_handlers[Events.ITERATION_STARTED] - for cls in [OptimizerParamsHandler, ]: + for cls in [ + OptimizerParamsHandler, + ]: assert any([isinstance(h[0], cls) for h in handlers]), "{}".format(handlers) if with_eval: handlers = evaluator._event_handlers[Events.COMPLETED] - for cls in [OutputHandler, ]: + for cls in [ + OutputHandler, + ]: assert any([isinstance(h[0], cls) for h in handlers]), "{}".format(handlers) data = [0, 1, 2] @@ -198,7 +229,9 @@ def set_eval_metric(engine): tb_files = list(os.listdir(dirname)) assert len(tb_files) == 1 - for v in ["events", ]: + for v in [ + "events", + ]: assert any([v in c for c in tb_files]), "{}".format(tb_files) _test(with_eval=False, with_optim=False) @@ -208,7 +241,7 @@ def set_eval_metric(engine): @pytest.mark.distributed @pytest.mark.skipif(torch.cuda.device_count() < 1, reason="Skip if no GPU") def test_distrib_gpu(dirname, distributed_context_single_node_nccl): - local_rank = distributed_context_single_node_nccl['local_rank'] + local_rank = distributed_context_single_node_nccl["local_rank"] device = "cuda:{}".format(local_rank) _test_setup_common_training_handlers(dirname, device, rank=local_rank, local_rank=local_rank, distributed=True) test_add_early_stopping_by_val_score() @@ -217,25 +250,25 @@ def test_distrib_gpu(dirname, distributed_context_single_node_nccl): @pytest.mark.distributed def test_distrib_cpu(dirname, distributed_context_single_node_gloo): device = "cpu" - local_rank = distributed_context_single_node_gloo['local_rank'] + local_rank = distributed_context_single_node_gloo["local_rank"] _test_setup_common_training_handlers(dirname, device, rank=local_rank) test_add_early_stopping_by_val_score() @pytest.mark.multinode_distributed -@pytest.mark.skipif('MULTINODE_DISTRIB' not in os.environ, reason="Skip if not multi-node distributed") +@pytest.mark.skipif("MULTINODE_DISTRIB" not in os.environ, reason="Skip if not multi-node distributed") def test_multinode_distrib_cpu(dirname, distributed_context_multi_node_gloo): device = "cpu" - rank = distributed_context_multi_node_gloo['rank'] + rank = distributed_context_multi_node_gloo["rank"] _test_setup_common_training_handlers(dirname, device, rank=rank) test_add_early_stopping_by_val_score() @pytest.mark.multinode_distributed -@pytest.mark.skipif('GPU_MULTINODE_DISTRIB' not in os.environ, reason="Skip if not multi-node distributed") +@pytest.mark.skipif("GPU_MULTINODE_DISTRIB" not in os.environ, reason="Skip if not multi-node distributed") def test_multinode_distrib_gpu(dirname, distributed_context_multi_node_nccl): - local_rank = distributed_context_multi_node_nccl['local_rank'] - rank = distributed_context_multi_node_nccl['rank'] + local_rank = distributed_context_multi_node_nccl["local_rank"] + rank = distributed_context_multi_node_nccl["rank"] device = "cuda:{}".format(local_rank) _test_setup_common_training_handlers(dirname, device, rank=rank, local_rank=local_rank, distributed=True) test_add_early_stopping_by_val_score() diff --git a/tests/ignite/contrib/engines/test_tbptt.py b/tests/ignite/contrib/engines/test_tbptt.py index cacbc5b51160..78d57678d31b 100644 --- a/tests/ignite/contrib/engines/test_tbptt.py +++ b/tests/ignite/contrib/engines/test_tbptt.py @@ -52,22 +52,14 @@ def test_create_supervised_tbptt_trainer_callcounts(mock_detach_hidden): optimizer = mock.MagicMock() loss = mock.MagicMock() - trainer = create_supervised_tbptt_trainer( - model, optimizer, loss, tbtt_step=2 - ) + trainer = create_supervised_tbptt_trainer(model, optimizer, loss, tbtt_step=2) # Adding two mock handles to the trainer to monitor that TBPTT events are # called correctly handle_started = mock.MagicMock() - trainer.add_event_handler( - Tbptt_Events.TIME_ITERATION_STARTED, - handle_started - ) + trainer.add_event_handler(Tbptt_Events.TIME_ITERATION_STARTED, handle_started) handle_completed = mock.MagicMock() - trainer.add_event_handler( - Tbptt_Events.TIME_ITERATION_COMPLETED, - handle_completed - ) + trainer.add_event_handler(Tbptt_Events.TIME_ITERATION_COMPLETED, handle_completed) # Fake data X = torch.ones(6, 2, 1) @@ -102,13 +94,7 @@ def _test_create_supervised_tbptt_trainer(device): # Defning optimizer and trainer optimizer = optim.SGD(model.parameters(), 1) - trainer = create_supervised_tbptt_trainer( - model, - optimizer, - F.mse_loss, - tbtt_step=2, - device=device - ) + trainer = create_supervised_tbptt_trainer(model, optimizer, F.mse_loss, tbtt_step=2, device=device) # Fake data X = torch.ones(6, 2, 1) diff --git a/tests/ignite/contrib/handlers/conftest.py b/tests/ignite/contrib/handlers/conftest.py index 16968a5a1bab..75b890a0b05a 100644 --- a/tests/ignite/contrib/handlers/conftest.py +++ b/tests/ignite/contrib/handlers/conftest.py @@ -1,4 +1,3 @@ - import numpy as np import pytest @@ -11,6 +10,7 @@ def norm_mock(): def norm(x): return np.linalg.norm(x) + norm_mock = Mock(side_effect=norm, spec=norm) norm_mock.configure_mock(**{"__name__": "norm"}) norm_mock.reset_mock() diff --git a/tests/ignite/contrib/handlers/test_base_logger.py b/tests/ignite/contrib/handlers/test_base_logger.py index 5c8c99901b7f..d457f3cfacee 100644 --- a/tests/ignite/contrib/handlers/test_base_logger.py +++ b/tests/ignite/contrib/handlers/test_base_logger.py @@ -16,7 +16,6 @@ class DummyLogger(BaseLogger): class DummyOutputHandler(BaseOutputHandler): - def __call__(self, *args, **kwargs): pass @@ -46,7 +45,7 @@ def test_base_output_handler_with_another_engine(): engine.state.output = 12345 with pytest.warns(DeprecationWarning, match="Use of another_engine is deprecated"): - handler = DummyOutputHandler("tag", metric_names=['a', 'b'], output_transform=None, another_engine=engine) + handler = DummyOutputHandler("tag", metric_names=["a", "b"], output_transform=None, another_engine=engine) def test_base_output_handler_setup_output_metrics(): @@ -57,12 +56,12 @@ def test_base_output_handler_setup_output_metrics(): engine.state.output = 12345 # Only metric_names - handler = DummyOutputHandler("tag", metric_names=['a', 'b'], output_transform=None) + handler = DummyOutputHandler("tag", metric_names=["a", "b"], output_transform=None) metrics = handler._setup_output_metrics(engine=engine) assert metrics == true_metrics # Only metric_names with a warning - handler = DummyOutputHandler("tag", metric_names=['a', 'c'], output_transform=None) + handler = DummyOutputHandler("tag", metric_names=["a", "c"], output_transform=None) with pytest.warns(UserWarning): metrics = handler._setup_output_metrics(engine=engine) assert metrics == {"a": 0} @@ -78,7 +77,7 @@ def test_base_output_handler_setup_output_metrics(): assert metrics == {"loss": engine.state.output} # Metrics and output - handler = DummyOutputHandler("tag", metric_names=['a', 'b'], output_transform=lambda x: {"loss": x}) + handler = DummyOutputHandler("tag", metric_names=["a", "b"], output_transform=lambda x: {"loss": x}) metrics = handler._setup_output_metrics(engine=engine) assert metrics == {"a": 0, "b": 1, "loss": engine.state.output} @@ -107,9 +106,7 @@ def update_fn(engine, batch): mock_log_handler = MagicMock() - logger.attach(trainer, - log_handler=mock_log_handler, - event_name=event) + logger.attach(trainer, log_handler=mock_log_handler, event_name=event) trainer.run(data, max_epochs=n_epochs) @@ -149,9 +146,7 @@ def update_fn(engine, batch): mock_log_handler = MagicMock() - logger.attach(trainer, - log_handler=mock_log_handler, - event_name=event) + logger.attach(trainer, log_handler=mock_log_handler, event_name=event) trainer.run(data, max_epochs=n_epochs) @@ -189,7 +184,6 @@ def test_as_context_manager(): data = list(range(50)) class _DummyLogger(BaseLogger): - def __init__(self, writer): self.writer = writer @@ -215,9 +209,7 @@ def update_fn(engine, batch): trainer = Engine(update_fn) mock_log_handler = MagicMock() - logger.attach(trainer, - log_handler=mock_log_handler, - event_name=event) + logger.attach(trainer, log_handler=mock_log_handler, event_name=event) trainer.run(data, max_epochs=n_epochs) diff --git a/tests/ignite/contrib/handlers/test_custom_events.py b/tests/ignite/contrib/handlers/test_custom_events.py index b491c1b798fb..006ac5fc8dd6 100644 --- a/tests/ignite/contrib/handlers/test_custom_events.py +++ b/tests/ignite/contrib/handlers/test_custom_events.py @@ -33,7 +33,6 @@ def test_bad_input(): def test_new_events(): - def update(*args, **kwargs): pass @@ -60,9 +59,7 @@ def update(*args, **kwargs): def test_integration_iterations(): - def _test(n_iterations, max_epochs, n_iters_per_epoch): - def update(*args, **kwargs): pass @@ -77,6 +74,7 @@ def update(*args, **kwargs): n_calls_iter_completed = [0] event_started = getattr(cpe.Events, "ITERATIONS_{}_STARTED".format(n_iterations)) + @engine.on(event_started) def on_my_event_started(engine): assert (engine.state.iteration - 1) % n_iterations == 0 @@ -86,6 +84,7 @@ def on_my_event_started(engine): n_calls_iter_started[0] += 1 event_completed = getattr(cpe.Events, "ITERATIONS_{}_COMPLETED".format(n_iterations)) + @engine.on(event_completed) def on_my_event_ended(engine): assert engine.state.iteration % n_iterations == 0 @@ -108,7 +107,6 @@ def on_my_event_ended(engine): def test_integration_epochs(): - def update(*args, **kwargs): pass diff --git a/tests/ignite/contrib/handlers/test_mlflow_logger.py b/tests/ignite/contrib/handlers/test_mlflow_logger.py index 1cf6201e42ff..df646f2278c0 100644 --- a/tests/ignite/contrib/handlers/test_mlflow_logger.py +++ b/tests/ignite/contrib/handlers/test_mlflow_logger.py @@ -39,7 +39,9 @@ def test_output_handler_output_transform(): mock_logger.log_metrics = MagicMock() wrapper(mock_engine, mock_logger, Events.ITERATION_STARTED) - mock_logger.log_metrics.assert_called_once_with({"another_tag loss": 12345}, step=123,) + mock_logger.log_metrics.assert_called_once_with( + {"another_tag loss": 12345}, step=123, + ) def test_output_handler_metric_names(): @@ -56,13 +58,10 @@ def test_output_handler_metric_names(): assert mock_logger.log_metrics.call_count == 1 mock_logger.log_metrics.assert_called_once_with( - {"tag a": 12.23, - "tag b": 23.45, - "tag c": 10.0}, - step=5, + {"tag a": 12.23, "tag b": 23.45, "tag c": 10.0}, step=5, ) - wrapper = OutputHandler("tag", metric_names=["a", ]) + wrapper = OutputHandler("tag", metric_names=["a",]) mock_engine = MagicMock() mock_engine.state = State(metrics={"a": torch.Tensor([0.0, 1.0, 2.0, 3.0])}) @@ -74,13 +73,9 @@ def test_output_handler_metric_names(): wrapper(mock_engine, mock_logger, Events.ITERATION_STARTED) assert mock_logger.log_metrics.call_count == 1 - mock_logger.log_metrics.assert_has_calls([ - call({"tag a 0": 0.0, - "tag a 1": 1.0, - "tag a 2": 2.0, - "tag a 3": 3.0}, - step=5), - ], any_order=True) + mock_logger.log_metrics.assert_has_calls( + [call({"tag a 0": 0.0, "tag a 1": 1.0, "tag a 2": 2.0, "tag a 3": 3.0}, step=5),], any_order=True + ) wrapper = OutputHandler("tag", metric_names=["a", "c"]) @@ -95,9 +90,7 @@ def test_output_handler_metric_names(): wrapper(mock_engine, mock_logger, Events.ITERATION_STARTED) assert mock_logger.log_metrics.call_count == 1 - mock_logger.log_metrics.assert_has_calls([ - call({"tag a": 55.56}, step=7) - ], any_order=True) + mock_logger.log_metrics.assert_has_calls([call({"tag a": 55.56}, step=7)], any_order=True) def test_output_handler_both(): @@ -115,16 +108,13 @@ def test_output_handler_both(): assert mock_logger.log_metrics.call_count == 1 mock_logger.log_metrics.assert_called_once_with( - {"tag a": 12.23, - "tag b": 23.45, - "tag loss": 12345}, - step=5, + {"tag a": 12.23, "tag b": 23.45, "tag loss": 12345}, step=5, ) def test_output_handler_with_wrong_global_step_transform_output(): def global_step_transform(*args, **kwargs): - return 'a' + return "a" wrapper = OutputHandler("tag", output_transform=lambda x: {"loss": x}, global_step_transform=global_step_transform) mock_logger = MagicMock(spec=MLflowLogger) @@ -163,8 +153,11 @@ def test_output_handler_with_global_step_from_engine(): mock_another_engine.state.epoch = 10 mock_another_engine.state.output = 12.345 - wrapper = OutputHandler("tag", output_transform=lambda x: {"loss": x}, - global_step_transform=global_step_from_engine(mock_another_engine)) + wrapper = OutputHandler( + "tag", + output_transform=lambda x: {"loss": x}, + global_step_transform=global_step_from_engine(mock_another_engine), + ) mock_logger = MagicMock(spec=MLflowLogger) mock_logger.log_metrics = MagicMock() @@ -176,16 +169,18 @@ def test_output_handler_with_global_step_from_engine(): wrapper(mock_engine, mock_logger, Events.EPOCH_STARTED) assert mock_logger.log_metrics.call_count == 1 - mock_logger.log_metrics.assert_has_calls([call({"tag loss": mock_engine.state.output}, - step=mock_another_engine.state.epoch)]) + mock_logger.log_metrics.assert_has_calls( + [call({"tag loss": mock_engine.state.output}, step=mock_another_engine.state.epoch)] + ) mock_another_engine.state.epoch = 11 mock_engine.state.output = 1.123 wrapper(mock_engine, mock_logger, Events.EPOCH_STARTED) assert mock_logger.log_metrics.call_count == 2 - mock_logger.log_metrics.assert_has_calls([call({"tag loss": mock_engine.state.output}, - step=mock_another_engine.state.epoch)]) + mock_logger.log_metrics.assert_has_calls( + [call({"tag loss": mock_engine.state.output}, step=mock_another_engine.state.epoch)] + ) def test_optimizer_params_handler_wrong_setup(): @@ -246,9 +241,7 @@ def dummy_handler(engine, logger, event_name): true_values.append(v) logger.log_metrics({"{}".format("test_value"): v}, step=global_step) - mlflow_logger.attach(trainer, - log_handler=dummy_handler, - event_name=Events.EPOCH_COMPLETED) + mlflow_logger.attach(trainer, log_handler=dummy_handler, event_name=Events.EPOCH_COMPLETED) import mlflow @@ -289,11 +282,10 @@ def dummy_handler(engine, logger, event_name): true_values.append(v) logger.log_metrics({"{}".format("test_value"): v}, step=global_step) - mlflow_logger.attach(trainer, - log_handler=dummy_handler, - event_name=Events.EPOCH_COMPLETED) + mlflow_logger.attach(trainer, log_handler=dummy_handler, event_name=Events.EPOCH_COMPLETED) import mlflow + active_run = mlflow.active_run() trainer.run(data, max_epochs=n_epochs) @@ -317,10 +309,7 @@ def test_mlflow_bad_metric_name_handling(dirname): handler = OutputHandler(tag="training", metric_names="all") engine = Engine(lambda e, b: None) - engine.state = State(metrics={ - "metric:0 in %": 123.0, - "metric 0": 1000.0, - }) + engine.state = State(metrics={"metric:0 in %": 123.0, "metric 0": 1000.0,}) with pytest.warns(UserWarning, match=r"MLflowLogger output_handler encountered an invalid metric name"): @@ -329,7 +318,7 @@ def test_mlflow_bad_metric_name_handling(dirname): for i, v in enumerate(true_values): engine.state.epoch += 1 - engine.state.metrics['metric 0'] = v + engine.state.metrics["metric 0"] = v handler(engine, mlflow_logger, event_name=Events.EPOCH_COMPLETED) from mlflow.tracking import MlflowClient @@ -337,7 +326,7 @@ def test_mlflow_bad_metric_name_handling(dirname): client = MlflowClient(tracking_uri=os.path.join(dirname, "mlruns")) stored_values = client.get_metric_history(active_run.info.run_id, "training metric 0") - for t, s in zip([1000.0, ] + true_values, stored_values): + for t, s in zip([1000.0,] + true_values, stored_values): assert t == s.value diff --git a/tests/ignite/contrib/handlers/test_neptune_logger.py b/tests/ignite/contrib/handlers/test_neptune_logger.py index 053d2ac6cdbc..2f0489cfbad8 100644 --- a/tests/ignite/contrib/handlers/test_neptune_logger.py +++ b/tests/ignite/contrib/handlers/test_neptune_logger.py @@ -84,12 +84,11 @@ def test_output_handler_metric_names(): wrapper(mock_engine, mock_logger, Events.ITERATION_STARTED) assert mock_logger.experiment.log_metric.call_count == 2 - mock_logger.experiment.log_metric.assert_has_calls([ - call("tag/a", y=12.23, x=5), - call("tag/b", y=23.45, x=5), - ], any_order=True) + mock_logger.experiment.log_metric.assert_has_calls( + [call("tag/a", y=12.23, x=5), call("tag/b", y=23.45, x=5),], any_order=True + ) - wrapper = OutputHandler("tag", metric_names=["a", ]) + wrapper = OutputHandler("tag", metric_names=["a",]) mock_engine = MagicMock() mock_engine.state = State(metrics={"a": torch.Tensor([0.0, 1.0, 2.0, 3.0])}) @@ -101,12 +100,15 @@ def test_output_handler_metric_names(): wrapper(mock_engine, mock_logger, Events.ITERATION_STARTED) assert mock_logger.experiment.log_metric.call_count == 4 - mock_logger.experiment.log_metric.assert_has_calls([ - call("tag/a/0", y=0.0, x=5), - call("tag/a/1", y=1.0, x=5), - call("tag/a/2", y=2.0, x=5), - call("tag/a/3", y=3.0, x=5), - ], any_order=True) + mock_logger.experiment.log_metric.assert_has_calls( + [ + call("tag/a/0", y=0.0, x=5), + call("tag/a/1", y=1.0, x=5), + call("tag/a/2", y=2.0, x=5), + call("tag/a/3", y=3.0, x=5), + ], + any_order=True, + ) wrapper = OutputHandler("tag", metric_names=["a", "c"]) @@ -121,9 +123,7 @@ def test_output_handler_metric_names(): wrapper(mock_engine, mock_logger, Events.ITERATION_STARTED) assert mock_logger.experiment.log_metric.call_count == 1 - mock_logger.experiment.log_metric.assert_has_calls([ - call("tag/a", y=55.56, x=7), - ], any_order=True) + mock_logger.experiment.log_metric.assert_has_calls([call("tag/a", y=55.56, x=7),], any_order=True) # all metrics wrapper = OutputHandler("tag", metric_names="all") @@ -137,10 +137,9 @@ def test_output_handler_metric_names(): wrapper(mock_engine, mock_logger, Events.ITERATION_STARTED) assert mock_logger.experiment.log_metric.call_count == 2 - mock_logger.experiment.log_metric.assert_has_calls([ - call("tag/a", y=12.23, x=5), - call("tag/b", y=23.45, x=5), - ], any_order=True) + mock_logger.experiment.log_metric.assert_has_calls( + [call("tag/a", y=12.23, x=5), call("tag/b", y=23.45, x=5),], any_order=True + ) def test_output_handler_both(): @@ -156,16 +155,14 @@ def test_output_handler_both(): wrapper(mock_engine, mock_logger, Events.EPOCH_STARTED) assert mock_logger.experiment.log_metric.call_count == 3 - mock_logger.experiment.log_metric.assert_has_calls([ - call("tag/a", y=12.23, x=5), - call("tag/b", y=23.45, x=5), - call("tag/loss", y=12345, x=5) - ], any_order=True) + mock_logger.experiment.log_metric.assert_has_calls( + [call("tag/a", y=12.23, x=5), call("tag/b", y=23.45, x=5), call("tag/loss", y=12345, x=5)], any_order=True + ) def test_output_handler_with_wrong_global_step_transform_output(): def global_step_transform(*args, **kwargs): - return 'a' + return "a" wrapper = OutputHandler("tag", output_transform=lambda x: {"loss": x}, global_step_transform=global_step_transform) mock_logger = MagicMock(spec=NeptuneLogger) @@ -186,8 +183,11 @@ def test_output_handler_with_global_step_from_engine(): mock_another_engine.state.epoch = 10 mock_another_engine.state.output = 12.345 - wrapper = OutputHandler("tag", output_transform=lambda x: {"loss": x}, - global_step_transform=global_step_from_engine(mock_another_engine)) + wrapper = OutputHandler( + "tag", + output_transform=lambda x: {"loss": x}, + global_step_transform=global_step_from_engine(mock_another_engine), + ) mock_logger = MagicMock(spec=NeptuneLogger) mock_logger.experiment = MagicMock() @@ -199,18 +199,18 @@ def test_output_handler_with_global_step_from_engine(): wrapper(mock_engine, mock_logger, Events.EPOCH_STARTED) assert mock_logger.experiment.log_metric.call_count == 1 - mock_logger.experiment.log_metric.assert_has_calls([call("tag/loss", - y=mock_engine.state.output, - x=mock_another_engine.state.epoch)]) + mock_logger.experiment.log_metric.assert_has_calls( + [call("tag/loss", y=mock_engine.state.output, x=mock_another_engine.state.epoch)] + ) mock_another_engine.state.epoch = 11 mock_engine.state.output = 1.123 wrapper(mock_engine, mock_logger, Events.EPOCH_STARTED) assert mock_logger.experiment.log_metric.call_count == 2 - mock_logger.experiment.log_metric.assert_has_calls([call("tag/loss", - y=mock_engine.state.output, - x=mock_another_engine.state.epoch)]) + mock_logger.experiment.log_metric.assert_has_calls( + [call("tag/loss", y=mock_engine.state.output, x=mock_another_engine.state.epoch)] + ) def test_output_handler_with_global_step_transform(): @@ -267,12 +267,15 @@ def _test(tag=None): tag_prefix = "{}/".format(tag) if tag else "" assert mock_logger.experiment.log_metric.call_count == 4 - mock_logger.experiment.log_metric.assert_has_calls([ - call(tag_prefix + "weights_norm/fc1/weight", y=0.0, x=5), - call(tag_prefix + "weights_norm/fc1/bias", y=0.0, x=5), - call(tag_prefix + "weights_norm/fc2/weight", y=12.0, x=5), - call(tag_prefix + "weights_norm/fc2/bias", y=math.sqrt(12.0), x=5), - ], any_order=True) + mock_logger.experiment.log_metric.assert_has_calls( + [ + call(tag_prefix + "weights_norm/fc1/weight", y=0.0, x=5), + call(tag_prefix + "weights_norm/fc1/bias", y=0.0, x=5), + call(tag_prefix + "weights_norm/fc2/weight", y=12.0, x=5), + call(tag_prefix + "weights_norm/fc2/bias", y=math.sqrt(12.0), x=5), + ], + any_order=True, + ) _test() _test(tag="tag") @@ -291,16 +294,16 @@ def test_weights_scalar_handler_frozen_layers(dummy_model_factory): wrapper(mock_engine, mock_logger, Events.EPOCH_STARTED) - mock_logger.experiment.log_metric.assert_has_calls([ - call("weights_norm/fc2/weight", y=12.0, x=5), - call("weights_norm/fc2/bias", y=math.sqrt(12.0), x=5), - ], any_order=True) + mock_logger.experiment.log_metric.assert_has_calls( + [call("weights_norm/fc2/weight", y=12.0, x=5), call("weights_norm/fc2/bias", y=math.sqrt(12.0), x=5),], + any_order=True, + ) with pytest.raises(AssertionError): - mock_logger.experiment.log_metric.assert_has_calls([ - call("weights_norm/fc1/weight", y=12.0, x=5), - call("weights_norm/fc1/bias", y=math.sqrt(12.0), x=5), - ], any_order=True) + mock_logger.experiment.log_metric.assert_has_calls( + [call("weights_norm/fc1/weight", y=12.0, x=5), call("weights_norm/fc1/bias", y=math.sqrt(12.0), x=5),], + any_order=True, + ) assert mock_logger.experiment.log_metric.call_count == 2 @@ -338,12 +341,15 @@ def _test(tag=None): tag_prefix = "{}/".format(tag) if tag else "" - mock_logger.experiment.log_metric.assert_has_calls([ - call(tag_prefix + "grads_norm/fc1/weight", y=ANY, x=5), - call(tag_prefix + "grads_norm/fc1/bias", y=ANY, x=5), - call(tag_prefix + "grads_norm/fc2/weight", y=ANY, x=5), - call(tag_prefix + "grads_norm/fc2/bias", y=ANY, x=5), - ], any_order=True) + mock_logger.experiment.log_metric.assert_has_calls( + [ + call(tag_prefix + "grads_norm/fc1/weight", y=ANY, x=5), + call(tag_prefix + "grads_norm/fc1/bias", y=ANY, x=5), + call(tag_prefix + "grads_norm/fc2/weight", y=ANY, x=5), + call(tag_prefix + "grads_norm/fc2/bias", y=ANY, x=5), + ], + any_order=True, + ) assert mock_logger.experiment.log_metric.call_count == 4 assert norm_mock.call_count == 4 @@ -365,16 +371,14 @@ def test_grads_scalar_handler_frozen_layers(dummy_model_factory, norm_mock): wrapper(mock_engine, mock_logger, Events.EPOCH_STARTED) - mock_logger.experiment.log_metric.assert_has_calls([ - call("grads_norm/fc2/weight", y=ANY, x=5), - call("grads_norm/fc2/bias", y=ANY, x=5), - ], any_order=True) + mock_logger.experiment.log_metric.assert_has_calls( + [call("grads_norm/fc2/weight", y=ANY, x=5), call("grads_norm/fc2/bias", y=ANY, x=5),], any_order=True + ) with pytest.raises(AssertionError): - mock_logger.experiment.log_metric.assert_has_calls([ - call("grads_norm/fc1/weight", y=ANY, x=5), - call("grads_norm/fc1/bias", y=ANY, x=5), - ], any_order=True) + mock_logger.experiment.log_metric.assert_has_calls( + [call("grads_norm/fc1/weight", y=ANY, x=5), call("grads_norm/fc1/bias", y=ANY, x=5),], any_order=True + ) assert mock_logger.experiment.log_metric.call_count == 2 assert norm_mock.call_count == 2 @@ -397,9 +401,7 @@ def dummy_handler(engine, logger, event_name): global_step = engine.state.get_event_attrib_value(event_name) logger.experiment.log_metric("test_value", global_step, global_step) - npt_logger.attach(trainer, - log_handler=dummy_handler, - event_name=Events.EPOCH_COMPLETED) + npt_logger.attach(trainer, log_handler=dummy_handler, event_name=Events.EPOCH_COMPLETED) trainer.run(data, max_epochs=n_epochs) npt_logger.close() @@ -422,9 +424,7 @@ def dummy_handler(engine, logger, event_name): global_step = engine.state.get_event_attrib_value(event_name) logger.experiment.log_metric("test_value", global_step, global_step) - npt_logger.attach(trainer, - log_handler=dummy_handler, - event_name=Events.EPOCH_COMPLETED) + npt_logger.attach(trainer, log_handler=dummy_handler, event_name=Events.EPOCH_COMPLETED) trainer.run(data, max_epochs=n_epochs) diff --git a/tests/ignite/contrib/handlers/test_param_scheduler.py b/tests/ignite/contrib/handlers/test_param_scheduler.py index 19fbc71fc904..5a74de524e07 100644 --- a/tests/ignite/contrib/handlers/test_param_scheduler.py +++ b/tests/ignite/contrib/handlers/test_param_scheduler.py @@ -13,22 +13,22 @@ def test_linear_scheduler(): with pytest.raises(TypeError, match=r"Argument optimizer should be torch.optim.Optimizer"): - LinearCyclicalScheduler({}, 'lr', 1, 0, cycle_size=0) + LinearCyclicalScheduler({}, "lr", 1, 0, cycle_size=0) tensor = torch.zeros([1], requires_grad=True) optimizer = torch.optim.SGD([tensor], lr=0.0) with pytest.raises(ValueError, match=r"Argument cycle_size should be positive and larger than 1"): - LinearCyclicalScheduler(optimizer, 'lr', 1, 0, cycle_size=0) + LinearCyclicalScheduler(optimizer, "lr", 1, 0, cycle_size=0) with pytest.raises(ValueError, match=r"Argument cycle_size should be positive and larger than 1"): - LinearCyclicalScheduler(optimizer, 'lr', 1, 0, cycle_size=1) + LinearCyclicalScheduler(optimizer, "lr", 1, 0, cycle_size=1) - scheduler = LinearCyclicalScheduler(optimizer, 'lr', 1, 0, 10) + scheduler = LinearCyclicalScheduler(optimizer, "lr", 1, 0, 10) state_dict = scheduler.state_dict() def save_lr(engine): - lrs.append(optimizer.param_groups[0]['lr']) + lrs.append(optimizer.param_groups[0]["lr"]) trainer = Engine(lambda engine, batch: None) trainer.add_event_handler(Events.ITERATION_STARTED, scheduler) @@ -38,18 +38,37 @@ def save_lr(engine): lrs = [] trainer.run([0] * 9, max_epochs=2) - assert lrs == list(map(pytest.approx, [ - # Cycle 1 - 1.0, 0.8, 0.6, 0.4, 0.2, - 0.0, 0.2, 0.4, 0.6, 0.8, - # Cycle 2 - 1.0, 0.8, 0.6, 0.4, 0.2, - 0.0, 0.2, 0.4, # 0.6, 0.8, - ])) + assert lrs == list( + map( + pytest.approx, + [ + # Cycle 1 + 1.0, + 0.8, + 0.6, + 0.4, + 0.2, + 0.0, + 0.2, + 0.4, + 0.6, + 0.8, + # Cycle 2 + 1.0, + 0.8, + 0.6, + 0.4, + 0.2, + 0.0, + 0.2, + 0.4, # 0.6, 0.8, + ], + ) + ) scheduler.load_state_dict(state_dict) optimizer = torch.optim.SGD([tensor], lr=0) - scheduler = LinearCyclicalScheduler(optimizer, 'lr', 1, 0, 10, cycle_mult=2) + scheduler = LinearCyclicalScheduler(optimizer, "lr", 1, 0, 10, cycle_mult=2) state_dict = scheduler.state_dict() trainer = Engine(lambda engine, batch: None) @@ -60,23 +79,52 @@ def save_lr(engine): lrs = [] trainer.run([0] * 10, max_epochs=3) - assert lrs == list(map(pytest.approx, [ - # Cycle 1 - 1.0, 0.8, 0.6, 0.4, 0.2, - 0.0, 0.2, 0.4, 0.6, 0.8, - # Cycle 2 - 1.0, 0.9, 0.8, 0.7, 0.6, - 0.5, 0.4, 0.3, 0.2, 0.1, - 0.0, 0.1, 0.2, 0.3, 0.4, - 0.5, 0.6, 0.7, 0.8, 0.9, - ])) + assert lrs == list( + map( + pytest.approx, + [ + # Cycle 1 + 1.0, + 0.8, + 0.6, + 0.4, + 0.2, + 0.0, + 0.2, + 0.4, + 0.6, + 0.8, + # Cycle 2 + 1.0, + 0.9, + 0.8, + 0.7, + 0.6, + 0.5, + 0.4, + 0.3, + 0.2, + 0.1, + 0.0, + 0.1, + 0.2, + 0.3, + 0.4, + 0.5, + 0.6, + 0.7, + 0.8, + 0.9, + ], + ) + ) scheduler.load_state_dict(state_dict) # With float cycle_size optimizer = torch.optim.SGD([tensor], lr=0) - scheduler = LinearCyclicalScheduler(optimizer, 'lr', - start_value=1.2, end_value=0.2, - cycle_size=10.00000012, cycle_mult=1.0) + scheduler = LinearCyclicalScheduler( + optimizer, "lr", start_value=1.2, end_value=0.2, cycle_size=10.00000012, cycle_mult=1.0 + ) state_dict = scheduler.state_dict() trainer = Engine(lambda engine, batch: None) @@ -86,14 +134,33 @@ def save_lr(engine): for _ in range(2): lrs = [] trainer.run([0] * 9, max_epochs=2) - assert lrs == list(map(pytest.approx, [ - # Cycle 1 - 1.2, 1.0, 0.8, 0.6, 0.4, - 0.2, 0.4, 0.6, 0.8, 1.0, - # Cycle 2 - 1.2, 1.0, 0.8, 0.6, 0.4, - 0.2, 0.4, 0.6, # 0.8, 1.0, - ])) + assert lrs == list( + map( + pytest.approx, + [ + # Cycle 1 + 1.2, + 1.0, + 0.8, + 0.6, + 0.4, + 0.2, + 0.4, + 0.6, + 0.8, + 1.0, + # Cycle 2 + 1.2, + 1.0, + 0.8, + 0.6, + 0.4, + 0.2, + 0.4, + 0.6, # 0.8, 1.0, + ], + ) + ) scheduler.load_state_dict(state_dict) @@ -101,16 +168,16 @@ def test_linear_scheduler_cycle_size_two(): tensor = torch.zeros([1], requires_grad=True) optimizer = torch.optim.SGD([tensor], lr=0) - scheduler = LinearCyclicalScheduler(optimizer, 'lr', 1, 0, cycle_size=2) + scheduler = LinearCyclicalScheduler(optimizer, "lr", 1, 0, cycle_size=2) data = [0] * 10 max_epochs = 2 - simulated_values = LinearCyclicalScheduler.simulate_values(num_events=len(data) * max_epochs, - param_name='lr', - start_value=1, end_value=0, cycle_size=2) + simulated_values = LinearCyclicalScheduler.simulate_values( + num_events=len(data) * max_epochs, param_name="lr", start_value=1, end_value=0, cycle_size=2 + ) def save_lr(engine): - lrs.append(optimizer.param_groups[0]['lr']) + lrs.append(optimizer.param_groups[0]["lr"]) trainer = Engine(lambda engine, batch: None) trainer.add_event_handler(Events.ITERATION_STARTED, scheduler) @@ -118,10 +185,12 @@ def save_lr(engine): lrs = [] trainer.run(data, max_epochs=max_epochs) - assert lrs == list(map(pytest.approx, [ - 1.0, 0.0, 1.0, 0.0, 1.0, 0.0, 1.0, 0.0, 1.0, 0.0, - 1.0, 0.0, 1.0, 0.0, 1.0, 0.0, 1.0, 0.0, 1.0, 0.0 - ])) + assert lrs == list( + map( + pytest.approx, + [1.0, 0.0, 1.0, 0.0, 1.0, 0.0, 1.0, 0.0, 1.0, 0.0, 1.0, 0.0, 1.0, 0.0, 1.0, 0.0, 1.0, 0.0, 1.0, 0.0], + ) + ) assert lrs == pytest.approx([v for i, v in simulated_values]) @@ -130,17 +199,17 @@ def test_cosine_annealing_scheduler(): tensor = torch.zeros([1], requires_grad=True) optimizer = torch.optim.SGD([tensor], lr=0) - scheduler = CosineAnnealingScheduler(optimizer, 'lr', 0, 1, 10) + scheduler = CosineAnnealingScheduler(optimizer, "lr", 0, 1, 10) state_dict = scheduler.state_dict() data = [0] * 9 max_epochs = 2 - simulated_values = CosineAnnealingScheduler.simulate_values(num_events=len(data) * max_epochs, - param_name='lr', - start_value=0, end_value=1, cycle_size=10) + simulated_values = CosineAnnealingScheduler.simulate_values( + num_events=len(data) * max_epochs, param_name="lr", start_value=0, end_value=1, cycle_size=10 + ) def save_lr(engine): - lrs.append(optimizer.param_groups[0]['lr']) + lrs.append(optimizer.param_groups[0]["lr"]) trainer = Engine(lambda engine, batch: None) trainer.add_event_handler(Events.ITERATION_STARTED, scheduler) @@ -150,12 +219,31 @@ def save_lr(engine): lrs = [] trainer.run(data, max_epochs=max_epochs) - assert lrs == list(map(pytest.approx, [ - 0.0, 0.02447174185242318, 0.09549150281252627, 0.20610737385376332, 0.3454915028125263, - 0.5, 0.6545084971874737, 0.7938926261462365, 0.9045084971874737, 0.9755282581475768, - 0.0, 0.02447174185242318, 0.09549150281252627, 0.20610737385376332, 0.3454915028125263, - 0.5, 0.6545084971874737, 0.7938926261462365, # 0.9045084971874737, 0.9755282581475768 - ])) + assert lrs == list( + map( + pytest.approx, + [ + 0.0, + 0.02447174185242318, + 0.09549150281252627, + 0.20610737385376332, + 0.3454915028125263, + 0.5, + 0.6545084971874737, + 0.7938926261462365, + 0.9045084971874737, + 0.9755282581475768, + 0.0, + 0.02447174185242318, + 0.09549150281252627, + 0.20610737385376332, + 0.3454915028125263, + 0.5, + 0.6545084971874737, + 0.7938926261462365, # 0.9045084971874737, 0.9755282581475768 + ], + ) + ) scheduler.load_state_dict(state_dict) assert lrs == pytest.approx([v for i, v in simulated_values]) @@ -173,10 +261,10 @@ def test_concat_scheduler_asserts(): ConcatScheduler(schedulers=[], durations=[]) with pytest.raises(ValueError): - ConcatScheduler(schedulers=[scheduler_1, ], durations=[10, ]) + ConcatScheduler(schedulers=[scheduler_1,], durations=[10,]) with pytest.raises(TypeError): - ConcatScheduler(schedulers=[scheduler_1, 12], durations=[10, ]) + ConcatScheduler(schedulers=[scheduler_1, 12], durations=[10,]) with pytest.raises(ValueError): ConcatScheduler(schedulers=[scheduler_1, scheduler_2], durations=[10, 5]) @@ -188,8 +276,9 @@ def test_concat_scheduler_asserts(): ConcatScheduler(schedulers=[scheduler_1, scheduler_2], durations="abc") with pytest.raises(ValueError): - ConcatScheduler.simulate_values(num_events=123, schedulers=[scheduler_1, scheduler_2], - durations=[15, ], param_names="abc") + ConcatScheduler.simulate_values( + num_events=123, schedulers=[scheduler_1, scheduler_2], durations=[15,], param_names="abc" + ) def test_concat_scheduler_state_dict(): @@ -197,13 +286,15 @@ def test_concat_scheduler_state_dict(): optimizer = torch.optim.SGD([tensor], lr=0) scheduler_1 = LinearCyclicalScheduler(optimizer, "lr", start_value=1.0, end_value=0.0, cycle_size=10) scheduler_2 = CosineAnnealingScheduler(optimizer, "lr", start_value=0.0, end_value=1.0, cycle_size=10) - durations = [10, ] + durations = [ + 10, + ] concat_scheduler = ConcatScheduler(schedulers=[scheduler_1, scheduler_2], durations=durations, save_history=False) state_dict = concat_scheduler.state_dict() - assert state_dict['durations'] == durations - assert state_dict['_current_duration'] == durations[0] - assert state_dict['_scheduler_index'] == 0 + assert state_dict["durations"] == durations + assert state_dict["_current_duration"] == durations[0] + assert state_dict["_scheduler_index"] == 0 for _ in range(20): concat_scheduler(None, None) @@ -214,10 +305,10 @@ def test_concat_scheduler_state_dict(): assert id(concat_scheduler._current_scheduler) == id(scheduler_1) with pytest.raises(ValueError, match=r"Required state attribute 'schedulers' is absent in provided state_dict"): - concat_scheduler.load_state_dict({'a': 1}) + concat_scheduler.load_state_dict({"a": 1}) with pytest.raises(ValueError, match=r"Input state_dict contains 0 state_dicts of concatenated schedulers"): - concat_scheduler.load_state_dict({'schedulers': []}) + concat_scheduler.load_state_dict({"schedulers": []}) def test_concat_scheduler_two_schedulers(): @@ -228,22 +319,25 @@ def _test(duration_vals_as_np_int): scheduler_1 = LinearCyclicalScheduler(optimizer, "lr", start_value=1.0, end_value=0.0, cycle_size=10) scheduler_2 = CosineAnnealingScheduler(optimizer, "lr", start_value=0.0, end_value=1.0, cycle_size=10) - durations = [10, ] + durations = [ + 10, + ] if duration_vals_as_np_int: durations = [np.int64(t) for t in durations] - concat_scheduler = ConcatScheduler(schedulers=[scheduler_1, scheduler_2], - durations=durations, save_history=True) + concat_scheduler = ConcatScheduler( + schedulers=[scheduler_1, scheduler_2], durations=durations, save_history=True + ) state_dict = concat_scheduler.state_dict() data = [0] * 10 max_epochs = 2 - simulated_values = ConcatScheduler.simulate_values(num_events=len(data) * max_epochs, - schedulers=[scheduler_1, scheduler_2], - durations=durations) + simulated_values = ConcatScheduler.simulate_values( + num_events=len(data) * max_epochs, schedulers=[scheduler_1, scheduler_2], durations=durations + ) def save_lr(engine): - lrs.append(optimizer.param_groups[0]['lr']) + lrs.append(optimizer.param_groups[0]["lr"]) trainer = Engine(lambda engine, batch: None) trainer.add_event_handler(Events.ITERATION_STARTED, concat_scheduler) @@ -253,16 +347,37 @@ def save_lr(engine): lrs = [] trainer.run(data, max_epochs=max_epochs) - assert lrs == list(map(pytest.approx, [ - # Cycle 1 of the LinearCyclicalScheduler - 1.0, 0.8, 0.6, 0.4, 0.2, - 0.0, 0.2, 0.4, 0.6, 0.8, - # Cycle 1 of the CosineAnnealingScheduler - 0.0, 0.02447174185242318, 0.09549150281252627, 0.20610737385376332, 0.3454915028125263, - 0.5, 0.6545084971874737, 0.7938926261462365, 0.9045084971874737, 0.9755282581475768, - ])) + assert lrs == list( + map( + pytest.approx, + [ + # Cycle 1 of the LinearCyclicalScheduler + 1.0, + 0.8, + 0.6, + 0.4, + 0.2, + 0.0, + 0.2, + 0.4, + 0.6, + 0.8, + # Cycle 1 of the CosineAnnealingScheduler + 0.0, + 0.02447174185242318, + 0.09549150281252627, + 0.20610737385376332, + 0.3454915028125263, + 0.5, + 0.6545084971874737, + 0.7938926261462365, + 0.9045084971874737, + 0.9755282581475768, + ], + ) + ) - state_lrs = trainer.state.param_history['lr'] + state_lrs = trainer.state.param_history["lr"] assert len(state_lrs) == len(lrs) # Unpack singleton lists assert [group[0] for group in state_lrs] == lrs @@ -280,21 +395,22 @@ def test_concat_scheduler_two_linear(): scheduler_1 = LinearCyclicalScheduler(optimizer, "lr", start_value=0.0, end_value=0.1, cycle_size=2) scheduler_2 = LinearCyclicalScheduler(optimizer, "lr", start_value=0.2, end_value=1.0, cycle_size=2) - durations = [5, ] - concat_scheduler = ConcatScheduler(schedulers=[scheduler_1, scheduler_2], - durations=durations, save_history=True) + durations = [ + 5, + ] + concat_scheduler = ConcatScheduler(schedulers=[scheduler_1, scheduler_2], durations=durations, save_history=True) state_dict = concat_scheduler.state_dict() assert concat_scheduler.get_param() == 0.0 data = [0] * 10 max_epochs = 2 - simulated_values = ConcatScheduler.simulate_values(num_events=len(data) * max_epochs, - schedulers=[scheduler_1, scheduler_2], - durations=durations) + simulated_values = ConcatScheduler.simulate_values( + num_events=len(data) * max_epochs, schedulers=[scheduler_1, scheduler_2], durations=durations + ) def save_lr(engine): - lrs.append(optimizer.param_groups[0]['lr']) + lrs.append(optimizer.param_groups[0]["lr"]) trainer = Engine(lambda engine, batch: None) trainer.add_event_handler(Events.ITERATION_STARTED, concat_scheduler) @@ -304,15 +420,37 @@ def save_lr(engine): lrs = [] trainer.run(data, max_epochs=max_epochs) - assert lrs == list(map(pytest.approx, [ - # first LinearCyclicalScheduler - 0.0, 0.1, 0.0, 0.1, 0.0, - # second LinearCyclicalScheduler - 0.2, 1.0, 0.2, 1.0, 0.2, 1.0, 0.2, 1.0, - 0.2, 1.0, 0.2, 1.0, 0.2, 1.0, 0.2, - ])) + assert lrs == list( + map( + pytest.approx, + [ + # first LinearCyclicalScheduler + 0.0, + 0.1, + 0.0, + 0.1, + 0.0, + # second LinearCyclicalScheduler + 0.2, + 1.0, + 0.2, + 1.0, + 0.2, + 1.0, + 0.2, + 1.0, + 0.2, + 1.0, + 0.2, + 1.0, + 0.2, + 1.0, + 0.2, + ], + ) + ) - state_lrs = trainer.state.param_history['lr'] + state_lrs = trainer.state.param_history["lr"] assert len(state_lrs) == len(lrs) # Unpack singleton lists assert [group[0] for group in state_lrs] == lrs @@ -330,18 +468,19 @@ def test_concat_scheduler_3_schedulers(): scheduler_3 = LinearCyclicalScheduler(optimizer, "lr", start_value=0.5, end_value=0.0, cycle_size=20) durations = [10, 5] - concat_scheduler = ConcatScheduler(schedulers=[scheduler_1, scheduler_2, scheduler_3], - durations=durations, save_history=True) + concat_scheduler = ConcatScheduler( + schedulers=[scheduler_1, scheduler_2, scheduler_3], durations=durations, save_history=True + ) state_dict = concat_scheduler.state_dict() data = [0] * 10 max_epochs = 2 - simulated_values = ConcatScheduler.simulate_values(num_events=len(data) * max_epochs, - schedulers=[scheduler_1, scheduler_2, scheduler_3], - durations=durations) + simulated_values = ConcatScheduler.simulate_values( + num_events=len(data) * max_epochs, schedulers=[scheduler_1, scheduler_2, scheduler_3], durations=durations + ) def save_lr(engine): - lrs.append(optimizer.param_groups[0]['lr']) + lrs.append(optimizer.param_groups[0]["lr"]) trainer = Engine(lambda engine, batch: None) trainer.add_event_handler(Events.ITERATION_STARTED, concat_scheduler) @@ -351,16 +490,38 @@ def save_lr(engine): lrs = [] trainer.run(data, max_epochs=max_epochs) - assert lrs == list(map(pytest.approx, [ - # Cycle 1 of the first LinearCyclicalScheduler - 1.0, 0.95, 0.9, 0.85, 0.8, 0.75, 0.7, 0.65, 0.6, 0.55, - # Cycle 1 of the second LinearCyclicalScheduler - 0.5, 0.49, 0.48, 0.47, 0.46, - # Cycle 1 of the third LinearCyclicalScheduler - 0.5, 0.45, 0.4, 0.35, 0.3, - ])) + assert lrs == list( + map( + pytest.approx, + [ + # Cycle 1 of the first LinearCyclicalScheduler + 1.0, + 0.95, + 0.9, + 0.85, + 0.8, + 0.75, + 0.7, + 0.65, + 0.6, + 0.55, + # Cycle 1 of the second LinearCyclicalScheduler + 0.5, + 0.49, + 0.48, + 0.47, + 0.46, + # Cycle 1 of the third LinearCyclicalScheduler + 0.5, + 0.45, + 0.4, + 0.35, + 0.3, + ], + ) + ) - state_lrs = trainer.state.param_history['lr'] + state_lrs = trainer.state.param_history["lr"] assert len(state_lrs) == len(lrs) # Unpack singleton lists assert [group[0] for group in state_lrs] == lrs @@ -373,20 +534,20 @@ def test_save_param_history(): tensor = torch.zeros([1], requires_grad=True) optimizer = torch.optim.SGD([tensor], lr=0) - scheduler = LinearCyclicalScheduler(optimizer, 'lr', 1, 0, 10, save_history=True) + scheduler = LinearCyclicalScheduler(optimizer, "lr", 1, 0, 10, save_history=True) lrs = [] def save_lr(engine): - lrs.append(optimizer.param_groups[0]['lr']) + lrs.append(optimizer.param_groups[0]["lr"]) trainer = Engine(lambda engine, batch: None) - assert not hasattr(trainer.state, 'param_history') + assert not hasattr(trainer.state, "param_history") trainer.add_event_handler(Events.ITERATION_STARTED, scheduler) trainer.add_event_handler(Events.ITERATION_COMPLETED, save_lr) trainer.run([0] * 10, max_epochs=2) - state_lrs = trainer.state.param_history['lr'] + state_lrs = trainer.state.param_history["lr"] assert len(state_lrs) == len(lrs) # Unpack singleton lists assert [group[0] for group in state_lrs] == lrs @@ -396,10 +557,7 @@ def test_lr_scheduler_asserts(): t1 = torch.zeros([1], requires_grad=True) t2 = torch.zeros([1], requires_grad=True) - optimizer = torch.optim.SGD([ - {"params": t1, 'lr': 0.1}, - {"params": t2, 'lr': 0.1}, - ]) + optimizer = torch.optim.SGD([{"params": t1, "lr": 0.1}, {"params": t2, "lr": 0.1},]) lr_scheduler = torch.optim.lr_scheduler.ExponentialLR(optimizer=optimizer, gamma=0.98) with pytest.raises(ValueError): @@ -413,7 +571,6 @@ def test_lr_scheduler_asserts(): def test_lr_scheduler(): - def _test(torch_lr_scheduler_cls, **kwargs): tensor = torch.zeros([1], requires_grad=True) @@ -437,11 +594,11 @@ def dummy_update(engine, batch): @trainer.on(Events.ITERATION_STARTED) def save_lr(engine): - lrs.append(optimizer1.param_groups[0]['lr']) + lrs.append(optimizer1.param_groups[0]["lr"]) @trainer.on(Events.ITERATION_STARTED) def save_true_lr(engine): - lrs_true.append(optimizer2.param_groups[0]['lr']) + lrs_true.append(optimizer2.param_groups[0]["lr"]) @trainer.on(Events.ITERATION_COMPLETED) def torch_lr_scheduler_step(engine): @@ -455,8 +612,9 @@ def torch_lr_scheduler_step(engine): data = [0] * 10 max_epochs = 2 trainer.run(data, max_epochs=max_epochs) - assert lrs_true == pytest.approx(lrs), \ - "{}: {} ({}) vs {} ({})".format(_, lrs_true, len(lrs_true), lrs, len(lrs)) + assert lrs_true == pytest.approx(lrs), "{}: {} ({}) vs {} ({})".format( + _, lrs_true, len(lrs_true), lrs, len(lrs) + ) optimizer1.load_state_dict(opt_state_dict1) scheduler.load_state_dict(state_dict1) optimizer2.load_state_dict(opt_state_dict2) @@ -465,8 +623,9 @@ def torch_lr_scheduler_step(engine): optimizer3 = torch.optim.SGD([tensor], lr=0.01) torch_lr_scheduler3 = torch_lr_scheduler_cls(optimizer=optimizer3, **kwargs) - simulated_values = LRScheduler.simulate_values(num_events=len(data) * max_epochs, - lr_scheduler=torch_lr_scheduler3) + simulated_values = LRScheduler.simulate_values( + num_events=len(data) * max_epochs, lr_scheduler=torch_lr_scheduler3 + ) assert lrs == pytest.approx([v for i, v in simulated_values]) _test(torch.optim.lr_scheduler.StepLR, step_size=5, gamma=0.5) @@ -497,7 +656,7 @@ def test_piecewiselinear_asserts(): PiecewiseLinear(optimizer, "lr", milestones_values=[]) with pytest.raises(ValueError): - PiecewiseLinear(optimizer, "lr", milestones_values=[(0.5,), ]) + PiecewiseLinear(optimizer, "lr", milestones_values=[(0.5,),]) with pytest.raises(ValueError): PiecewiseLinear(optimizer, "lr", milestones_values=[(10, 0.5), (0.6,)]) @@ -507,25 +666,19 @@ def test_piecewiselinear_asserts(): def test_piecewiselinear(): - def _test(milestones_as_np_int): tensor = torch.zeros([1], requires_grad=True) optimizer = torch.optim.SGD([tensor], lr=0) - milestones_values = [(5, 0.5), - (15, 1.0), - (25, 0.0), - (35, 1.0), - (40, 0.5)] + milestones_values = [(5, 0.5), (15, 1.0), (25, 0.0), (35, 1.0), (40, 0.5)] if milestones_as_np_int: milestones_values = [(np.int64(t), v) for t, v in milestones_values] - scheduler = PiecewiseLinear(optimizer, 'lr', - milestones_values=milestones_values) + scheduler = PiecewiseLinear(optimizer, "lr", milestones_values=milestones_values) state_dict = scheduler.state_dict() def save_lr(engine): - lrs.append(optimizer.param_groups[0]['lr']) + lrs.append(optimizer.param_groups[0]["lr"]) trainer = Engine(lambda engine, batch: None) trainer.add_event_handler(Events.ITERATION_COMPLETED, scheduler) @@ -535,14 +688,63 @@ def save_lr(engine): lrs = [] trainer.run([0] * 25, max_epochs=2) - assert lrs == list(map(pytest.approx, [ - 0.5, 0.5, 0.5, 0.5, 0.5, - 0.5, 0.55, 0.6, 0.65, 0.7, 0.75, 0.8, 0.85, 0.9, 0.95, - 1.0, 0.9, 0.8, 0.7, 0.6, 0.5, 0.4, 0.3, 0.2, 0.1, - 0.0, 0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9, - 1.0, 0.9, 0.8, 0.7, 0.6, - 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5 - ])) + assert lrs == list( + map( + pytest.approx, + [ + 0.5, + 0.5, + 0.5, + 0.5, + 0.5, + 0.5, + 0.55, + 0.6, + 0.65, + 0.7, + 0.75, + 0.8, + 0.85, + 0.9, + 0.95, + 1.0, + 0.9, + 0.8, + 0.7, + 0.6, + 0.5, + 0.4, + 0.3, + 0.2, + 0.1, + 0.0, + 0.1, + 0.2, + 0.3, + 0.4, + 0.5, + 0.6, + 0.7, + 0.8, + 0.9, + 1.0, + 0.9, + 0.8, + 0.7, + 0.6, + 0.5, + 0.5, + 0.5, + 0.5, + 0.5, + 0.5, + 0.5, + 0.5, + 0.5, + 0.5, + ], + ) + ) scheduler.load_state_dict(state_dict) _test(milestones_as_np_int=True) @@ -552,23 +754,24 @@ def save_lr(engine): def test_simulate_and_plot_values(): import matplotlib - matplotlib.use('Agg') + + matplotlib.use("Agg") def _test(scheduler_cls, **scheduler_kwargs): optimizer = None event = Events.ITERATION_STARTED if scheduler_cls == LRScheduler: - scheduler_kwargs['optimizer'] = scheduler_kwargs['lr_scheduler'].optimizer - optimizer = scheduler_kwargs['optimizer'] + scheduler_kwargs["optimizer"] = scheduler_kwargs["lr_scheduler"].optimizer + optimizer = scheduler_kwargs["optimizer"] event = Events.ITERATION_COMPLETED elif scheduler_cls == ConcatScheduler: - optimizer = scheduler_kwargs['optimizer'] - del scheduler_kwargs['optimizer'] + optimizer = scheduler_kwargs["optimizer"] + del scheduler_kwargs["optimizer"] else: tensor = torch.zeros([1], requires_grad=True) - scheduler_kwargs['optimizer'] = torch.optim.SGD([tensor], lr=0.1) - optimizer = scheduler_kwargs['optimizer'] + scheduler_kwargs["optimizer"] = torch.optim.SGD([tensor], lr=0.1) + optimizer = scheduler_kwargs["optimizer"] max_epochs = 2 data = [0] * 10 @@ -579,7 +782,7 @@ def _test(scheduler_cls, **scheduler_kwargs): lrs = [] def save_lr(engine): - lrs.append(optimizer.param_groups[0]['lr']) + lrs.append(optimizer.param_groups[0]["lr"]) trainer = Engine(lambda engine, batch: None) trainer.add_event_handler(event, scheduler) @@ -617,7 +820,9 @@ def save_lr(engine): # ConcatScheduler = [LinearCyclicalScheduler, CosineAnnealingScheduler] scheduler_1 = LinearCyclicalScheduler(optimizer, "lr", start_value=1.0, end_value=0.0, cycle_size=20) scheduler_2 = CosineAnnealingScheduler(optimizer, "lr", start_value=0.0, end_value=1.0, cycle_size=10) - durations = [10, ] + durations = [ + 10, + ] _test(ConcatScheduler, optimizer=optimizer, schedulers=[scheduler_1, scheduler_2], durations=durations) # ConcatScheduler = [LinearCyclicalScheduler, LRScheduler] @@ -626,14 +831,20 @@ def save_lr(engine): torch_lr_scheduler = torch.optim.lr_scheduler.ExponentialLR(optimizer=optimizer, gamma=1.5) scheduler_1 = LRScheduler(torch_lr_scheduler) scheduler_2 = LinearCyclicalScheduler(optimizer, "lr", start_value=0.1, end_value=0.0, cycle_size=10) - durations = [10, ] + durations = [ + 10, + ] _test(ConcatScheduler, optimizer=optimizer, schedulers=[scheduler_1, scheduler_2], durations=durations) # PiecewiseLinear tensor = torch.ones([1], requires_grad=True) optimizer = torch.optim.SGD([tensor], lr=0.001) - _test(PiecewiseLinear, optimizer=optimizer, param_name="lr", - milestones_values=[(10, 0.5), (20, 0.45), (21, 0.3), (30, 0.1), (40, 0.1)]) + _test( + PiecewiseLinear, + optimizer=optimizer, + param_name="lr", + milestones_values=[(10, 0.5), (20, 0.45), (21, 0.3), (30, 0.1), (40, 0.1)], + ) def test_create_lr_scheduler_with_warmup(): @@ -647,35 +858,45 @@ def test_create_lr_scheduler_with_warmup(): torch_lr_scheduler = torch.optim.lr_scheduler.ExponentialLR(optimizer=optimizer, gamma=0.98) with pytest.raises(ValueError, match=r"Argument warmup_duration should be at least 2 events"): - create_lr_scheduler_with_warmup(torch_lr_scheduler, warmup_start_value=0.0, - warmup_end_value=0.1, warmup_duration=1) + create_lr_scheduler_with_warmup( + torch_lr_scheduler, warmup_start_value=0.0, warmup_end_value=0.1, warmup_duration=1 + ) with pytest.raises(ValueError, match=r"Argument warmup_duration should be at least 2 events"): - create_lr_scheduler_with_warmup(torch_lr_scheduler, warmup_start_value=0.0, - warmup_end_value=0.1, warmup_duration="abc") + create_lr_scheduler_with_warmup( + torch_lr_scheduler, warmup_start_value=0.0, warmup_end_value=0.1, warmup_duration="abc" + ) with pytest.raises(TypeError, match=r"Argument output_simulated_values should be a list of None"): simulated_values = () - create_lr_scheduler_with_warmup(torch_lr_scheduler, warmup_start_value=0.0, warmup_end_value=0.1, - warmup_duration=10, output_simulated_values=simulated_values) + create_lr_scheduler_with_warmup( + torch_lr_scheduler, + warmup_start_value=0.0, + warmup_end_value=0.1, + warmup_duration=10, + output_simulated_values=simulated_values, + ) def _test(lr_scheduler, optimizer, warmup_start_value, warmup_end_value, warmup_duration, warmup_end_next_value): num_iterations = 10 max_epochs = 20 simulated_values = [None] * (num_iterations * max_epochs) - scheduler = create_lr_scheduler_with_warmup(lr_scheduler, - warmup_start_value=warmup_start_value, - warmup_end_value=warmup_end_value, - warmup_duration=warmup_duration, - output_simulated_values=simulated_values) + scheduler = create_lr_scheduler_with_warmup( + lr_scheduler, + warmup_start_value=warmup_start_value, + warmup_end_value=warmup_end_value, + warmup_duration=warmup_duration, + output_simulated_values=simulated_values, + ) state_dict = scheduler.state_dict() trainer = Engine(lambda engine, batch: None) trainer.add_event_handler(Events.ITERATION_STARTED, scheduler) + @trainer.on(Events.ITERATION_STARTED) def save_lr(engine): - lrs.append(optimizer.param_groups[0]['lr']) + lrs.append(optimizer.param_groups[0]["lr"]) data = [0] * num_iterations @@ -685,12 +906,13 @@ def save_lr(engine): assert lrs == pytest.approx([v for i, v in simulated_values]) - assert lrs[0] == pytest.approx(warmup_start_value), \ - "lrs={}".format(lrs[:warmup_duration + num_iterations]) - assert lrs[warmup_duration - 1] == pytest.approx(warmup_end_value), \ - "lrs={}".format(lrs[:warmup_duration + num_iterations]) - assert lrs[warmup_duration] == pytest.approx(warmup_end_next_value), \ - "lrs={}".format(lrs[:warmup_duration + num_iterations]) + assert lrs[0] == pytest.approx(warmup_start_value), "lrs={}".format(lrs[: warmup_duration + num_iterations]) + assert lrs[warmup_duration - 1] == pytest.approx(warmup_end_value), "lrs={}".format( + lrs[: warmup_duration + num_iterations] + ) + assert lrs[warmup_duration] == pytest.approx(warmup_end_next_value), "lrs={}".format( + lrs[: warmup_duration + num_iterations] + ) scheduler.load_state_dict(state_dict) t1 = torch.zeros([1], requires_grad=True) @@ -713,23 +935,27 @@ def save_lr(engine): # C) lr_scheduler start_value != warmup_end_value t1 = torch.zeros([1], requires_grad=True) optimizer = torch.optim.SGD([t1], lr=0.0) - lr_scheduler = LinearCyclicalScheduler(optimizer=optimizer, param_name='lr', - start_value=0.8, end_value=0.0, cycle_size=10) + lr_scheduler = LinearCyclicalScheduler( + optimizer=optimizer, param_name="lr", start_value=0.8, end_value=0.0, cycle_size=10 + ) _test(lr_scheduler, optimizer, 0.01, 0.05, 10, 0.8) optimizer = torch.optim.SGD([t1], lr=0.0) - lr_scheduler = LinearCyclicalScheduler(optimizer=optimizer, param_name='lr', - start_value=0.8, end_value=0.0, cycle_size=10) + lr_scheduler = LinearCyclicalScheduler( + optimizer=optimizer, param_name="lr", start_value=0.8, end_value=0.0, cycle_size=10 + ) _test(lr_scheduler, optimizer, 0.01, 0.05, 2, 0.8) # D) lr_scheduler start_value == warmup_end_value t1 = torch.zeros([1], requires_grad=True) optimizer = torch.optim.SGD([t1], lr=0.0) - lr_scheduler = LinearCyclicalScheduler(optimizer=optimizer, param_name='lr', - start_value=0.8, end_value=0.0, cycle_size=10) + lr_scheduler = LinearCyclicalScheduler( + optimizer=optimizer, param_name="lr", start_value=0.8, end_value=0.0, cycle_size=10 + ) _test(lr_scheduler, optimizer, 0.01, 0.8, 10, 0.8 - (0.8 / 5.0)) optimizer = torch.optim.SGD([t1], lr=0.0) - lr_scheduler = LinearCyclicalScheduler(optimizer=optimizer, param_name='lr', - start_value=0.8, end_value=0.0, cycle_size=10) + lr_scheduler = LinearCyclicalScheduler( + optimizer=optimizer, param_name="lr", start_value=0.8, end_value=0.0, cycle_size=10 + ) _test(lr_scheduler, optimizer, 0.01, 0.8, 2, 0.8 - (0.8 / 5.0)) @@ -746,17 +972,23 @@ def _test(save_history): warmup_duration = 5 * num_iterations_per_epoch cooldown_duration = 5 * num_iterations_per_epoch - scheduler_1 = LinearCyclicalScheduler(optimizer, "lr", - start_value=lr_max_value, end_value=lr_max_value * 0.9, - cycle_size=(num_iterations - warmup_duration - cooldown_duration) * 2) + scheduler_1 = LinearCyclicalScheduler( + optimizer, + "lr", + start_value=lr_max_value, + end_value=lr_max_value * 0.9, + cycle_size=(num_iterations - warmup_duration - cooldown_duration) * 2, + ) - scheduler_2 = LinearCyclicalScheduler(optimizer, "lr", - start_value=lr_max_value, end_value=0.0, - cycle_size=cooldown_duration * 2) + scheduler_2 = LinearCyclicalScheduler( + optimizer, "lr", start_value=lr_max_value, end_value=0.0, cycle_size=cooldown_duration * 2 + ) - lr_scheduler = ConcatScheduler(schedulers=[scheduler_1, scheduler_2], - durations=[num_iterations - warmup_duration - cooldown_duration, ], - save_history=False) + lr_scheduler = ConcatScheduler( + schedulers=[scheduler_1, scheduler_2], + durations=[num_iterations - warmup_duration - cooldown_duration,], + save_history=False, + ) lr_values = [None] * num_iterations scheduler = create_lr_scheduler_with_warmup( lr_scheduler, @@ -764,7 +996,7 @@ def _test(save_history): warmup_end_value=lr_max_value, warmup_duration=warmup_duration, save_history=save_history, - output_simulated_values=lr_values + output_simulated_values=lr_values, ) state_dict = scheduler.state_dict() @@ -772,7 +1004,7 @@ def _test(save_history): @trainer.on(Events.ITERATION_COMPLETED) def save_lr(engine): - lrs.append(optimizer.param_groups[0]['lr']) + lrs.append(optimizer.param_groups[0]["lr"]) trainer.add_event_handler(Events.ITERATION_STARTED, scheduler) @@ -785,7 +1017,7 @@ def save_lr(engine): assert lrs == pytest.approx([v for i, v in lr_values]) if save_history: - param_history = trainer.state.param_history['lr'] + param_history = trainer.state.param_history["lr"] assert lrs == pytest.approx([v[0] for v in param_history]) scheduler.load_state_dict(state_dict) @@ -811,7 +1043,7 @@ def test_create_lr_scheduler_with_warmup_with_real_model(dummy_model_factory): warmup_start_value=0.0, warmup_end_value=scaled_lr, warmup_duration=warmup_duration, - output_simulated_values=output_simulated_values + output_simulated_values=output_simulated_values, ) assert output_simulated_values[0] == [0, 0.0] @@ -825,42 +1057,41 @@ def test_param_group_scheduler_asserts(): t1 = torch.zeros([1], requires_grad=True) t2 = torch.zeros([1], requires_grad=True) - optimizer = torch.optim.SGD([ - {"params": t1, 'lr': 0.1}, - {"params": t2, 'lr': 0.1}, - ]) + optimizer = torch.optim.SGD([{"params": t1, "lr": 0.1}, {"params": t2, "lr": 0.1},]) - lr_scheduler1 = LinearCyclicalScheduler(optimizer, "lr", param_group_index=0, - start_value=1.0, end_value=0.0, cycle_size=10) - lr_scheduler2 = LinearCyclicalScheduler(optimizer, "lr", param_group_index=1, - start_value=1.0, end_value=0.0, cycle_size=10) + lr_scheduler1 = LinearCyclicalScheduler( + optimizer, "lr", param_group_index=0, start_value=1.0, end_value=0.0, cycle_size=10 + ) + lr_scheduler2 = LinearCyclicalScheduler( + optimizer, "lr", param_group_index=1, start_value=1.0, end_value=0.0, cycle_size=10 + ) with pytest.raises(ValueError): - ParamGroupScheduler(schedulers=[0, 1, 2], names=['a', 'b', 'c']) + ParamGroupScheduler(schedulers=[0, 1, 2], names=["a", "b", "c"]) with pytest.raises(ValueError): - ParamGroupScheduler(schedulers=[lr_scheduler1, '2'], names=['a', 'b']) + ParamGroupScheduler(schedulers=[lr_scheduler1, "2"], names=["a", "b"]) with pytest.raises(ValueError): - ParamGroupScheduler(schedulers=[lr_scheduler1, lr_scheduler2], names='ab') + ParamGroupScheduler(schedulers=[lr_scheduler1, lr_scheduler2], names="ab") with pytest.raises(ValueError): - ParamGroupScheduler(schedulers=[lr_scheduler1, lr_scheduler2], names=['a', ]) + ParamGroupScheduler(schedulers=[lr_scheduler1, lr_scheduler2], names=["a",]) - scheduler = ParamGroupScheduler(schedulers=[lr_scheduler1, lr_scheduler2], names=['a', 'b']) + scheduler = ParamGroupScheduler(schedulers=[lr_scheduler1, lr_scheduler2], names=["a", "b"]) with pytest.raises(ValueError, match=r"Required state attribute 'schedulers' is absent in provided state_dict"): - scheduler.load_state_dict({'a': 1}) + scheduler.load_state_dict({"a": 1}) with pytest.raises(ValueError, match=r"Input state_dict contains 0 state_dicts of param group schedulers"): - scheduler.load_state_dict({'schedulers': []}) + scheduler.load_state_dict({"schedulers": []}) - with pytest.raises(ValueError, match=r"Name of scheduler from input state dict does not " - r"correspond to required one"): - scheduler.load_state_dict({'schedulers': [('a', lr_scheduler1.state_dict()), ('bad_name', {})]}) + with pytest.raises( + ValueError, match=r"Name of scheduler from input state dict does not " r"correspond to required one" + ): + scheduler.load_state_dict({"schedulers": [("a", lr_scheduler1.state_dict()), ("bad_name", {})]}) def test_param_group_scheduler(): - def _test(lr_schedulers, optimizer): num_iterations = 10 max_epochs = 20 @@ -872,9 +1103,7 @@ def _test(lr_schedulers, optimizer): @trainer.on(Events.ITERATION_COMPLETED) def save_lr(engine): - lrs.append( - (optimizer.param_groups[0]['lr'], optimizer.param_groups[1]['lr']) - ) + lrs.append((optimizer.param_groups[0]["lr"], optimizer.param_groups[1]["lr"])) trainer.add_event_handler(Events.ITERATION_STARTED, scheduler) @@ -888,13 +1117,12 @@ def save_lr(engine): t1 = torch.zeros([1], requires_grad=True) t2 = torch.zeros([1], requires_grad=True) - optimizer = torch.optim.SGD([ - {"params": t1, 'lr': 0.1}, - {"params": t2, 'lr': 0.1}, - ]) - - lr_scheduler1 = LinearCyclicalScheduler(optimizer, "lr", param_group_index=0, - start_value=1.0, end_value=0.0, cycle_size=10) - lr_scheduler2 = LinearCyclicalScheduler(optimizer, "lr", param_group_index=1, - start_value=1.0, end_value=0.0, cycle_size=10) + optimizer = torch.optim.SGD([{"params": t1, "lr": 0.1}, {"params": t2, "lr": 0.1},]) + + lr_scheduler1 = LinearCyclicalScheduler( + optimizer, "lr", param_group_index=0, start_value=1.0, end_value=0.0, cycle_size=10 + ) + lr_scheduler2 = LinearCyclicalScheduler( + optimizer, "lr", param_group_index=1, start_value=1.0, end_value=0.0, cycle_size=10 + ) _test([lr_scheduler1, lr_scheduler2], optimizer) diff --git a/tests/ignite/contrib/handlers/test_polyaxon_logger.py b/tests/ignite/contrib/handlers/test_polyaxon_logger.py index a705f0cc66d7..f167192c1882 100644 --- a/tests/ignite/contrib/handlers/test_polyaxon_logger.py +++ b/tests/ignite/contrib/handlers/test_polyaxon_logger.py @@ -8,7 +8,7 @@ from ignite.engine import Engine, Events, State from ignite.contrib.handlers.polyaxon_logger import * -os.environ['POLYAXON_NO_OP'] = "1" +os.environ["POLYAXON_NO_OP"] = "1" def test_output_handler_with_wrong_logger_type(): @@ -57,14 +57,9 @@ def test_output_handler_metric_names(): wrapper(mock_engine, mock_logger, Events.ITERATION_STARTED) assert mock_logger.log_metrics.call_count == 1 - mock_logger.log_metrics.assert_called_once_with( - step=5, - **{"tag/a": 12.23, - "tag/b": 23.45, - "tag/c": 10.0} - ) + mock_logger.log_metrics.assert_called_once_with(step=5, **{"tag/a": 12.23, "tag/b": 23.45, "tag/c": 10.0}) - wrapper = OutputHandler("tag", metric_names=["a", ]) + wrapper = OutputHandler("tag", metric_names=["a",]) mock_engine = MagicMock() mock_engine.state = State(metrics={"a": torch.Tensor([0.0, 1.0, 2.0, 3.0])}) @@ -76,13 +71,9 @@ def test_output_handler_metric_names(): wrapper(mock_engine, mock_logger, Events.ITERATION_STARTED) assert mock_logger.log_metrics.call_count == 1 - mock_logger.log_metrics.assert_has_calls([ - call(step=5, - **{"tag/a/0": 0.0, - "tag/a/1": 1.0, - "tag/a/2": 2.0, - "tag/a/3": 3.0}), - ], any_order=True) + mock_logger.log_metrics.assert_has_calls( + [call(step=5, **{"tag/a/0": 0.0, "tag/a/1": 1.0, "tag/a/2": 2.0, "tag/a/3": 3.0}),], any_order=True + ) wrapper = OutputHandler("tag", metric_names=["a", "c"]) @@ -97,9 +88,7 @@ def test_output_handler_metric_names(): wrapper(mock_engine, mock_logger, Events.ITERATION_STARTED) assert mock_logger.log_metrics.call_count == 1 - mock_logger.log_metrics.assert_has_calls([ - call(step=7, **{"tag/a": 55.56}) - ], any_order=True) + mock_logger.log_metrics.assert_has_calls([call(step=7, **{"tag/a": 55.56})], any_order=True) # all metrics wrapper = OutputHandler("tag", metric_names="all") @@ -113,12 +102,7 @@ def test_output_handler_metric_names(): wrapper(mock_engine, mock_logger, Events.ITERATION_STARTED) assert mock_logger.log_metrics.call_count == 1 - mock_logger.log_metrics.assert_called_once_with( - step=5, - **{"tag/a": 12.23, - "tag/b": 23.45, - "tag/c": 10.0} - ) + mock_logger.log_metrics.assert_called_once_with(step=5, **{"tag/a": 12.23, "tag/b": 23.45, "tag/c": 10.0}) def test_output_handler_both(): @@ -135,17 +119,12 @@ def test_output_handler_both(): wrapper(mock_engine, mock_logger, Events.EPOCH_STARTED) assert mock_logger.log_metrics.call_count == 1 - mock_logger.log_metrics.assert_called_once_with( - step=5, - **{"tag/a": 12.23, - "tag/b": 23.45, - "tag/loss": 12345} - ) + mock_logger.log_metrics.assert_called_once_with(step=5, **{"tag/a": 12.23, "tag/b": 23.45, "tag/loss": 12345}) def test_output_handler_with_wrong_global_step_transform_output(): def global_step_transform(*args, **kwargs): - return 'a' + return "a" wrapper = OutputHandler("tag", output_transform=lambda x: {"loss": x}, global_step_transform=global_step_transform) mock_logger = MagicMock(spec=PolyaxonLogger) @@ -184,8 +163,11 @@ def test_output_handler_with_global_step_from_engine(): mock_another_engine.state.epoch = 10 mock_another_engine.state.output = 12.345 - wrapper = OutputHandler("tag", output_transform=lambda x: {"loss": x}, - global_step_transform=global_step_from_engine(mock_another_engine)) + wrapper = OutputHandler( + "tag", + output_transform=lambda x: {"loss": x}, + global_step_transform=global_step_from_engine(mock_another_engine), + ) mock_logger = MagicMock(spec=PolyaxonLogger) mock_logger.log_metrics = MagicMock() @@ -197,16 +179,18 @@ def test_output_handler_with_global_step_from_engine(): wrapper(mock_engine, mock_logger, Events.EPOCH_STARTED) assert mock_logger.log_metrics.call_count == 1 - mock_logger.log_metrics.assert_has_calls([call(step=mock_another_engine.state.epoch, - **{"tag/loss": mock_engine.state.output})]) + mock_logger.log_metrics.assert_has_calls( + [call(step=mock_another_engine.state.epoch, **{"tag/loss": mock_engine.state.output})] + ) mock_another_engine.state.epoch = 11 mock_engine.state.output = 1.123 wrapper(mock_engine, mock_logger, Events.EPOCH_STARTED) assert mock_logger.log_metrics.call_count == 2 - mock_logger.log_metrics.assert_has_calls([call(step=mock_another_engine.state.epoch, - **{"tag/loss": mock_engine.state.output})]) + mock_logger.log_metrics.assert_has_calls( + [call(step=mock_another_engine.state.epoch, **{"tag/loss": mock_engine.state.output})] + ) def test_optimizer_params_handler_wrong_setup(): @@ -263,9 +247,7 @@ def dummy_handler(engine, logger, event_name): global_step = engine.state.get_event_attrib_value(event_name) logger.log_metrics(step=global_step, **{"{}".format("test_value"): global_step}) - plx_logger.attach(trainer, - log_handler=dummy_handler, - event_name=Events.EPOCH_COMPLETED) + plx_logger.attach(trainer, log_handler=dummy_handler, event_name=Events.EPOCH_COMPLETED) trainer.run(data, max_epochs=n_epochs) @@ -289,9 +271,7 @@ def dummy_handler(engine, logger, event_name): global_step = engine.state.get_event_attrib_value(event_name) logger.log_metrics(step=global_step, **{"{}".format("test_value"): global_step}) - plx_logger.attach(trainer, - log_handler=dummy_handler, - event_name=Events.EPOCH_COMPLETED) + plx_logger.attach(trainer, log_handler=dummy_handler, event_name=Events.EPOCH_COMPLETED) trainer.run(data, max_epochs=n_epochs) diff --git a/tests/ignite/contrib/handlers/test_tensorboard_logger.py b/tests/ignite/contrib/handlers/test_tensorboard_logger.py index 769935b9a136..f88800f4ad62 100644 --- a/tests/ignite/contrib/handlers/test_tensorboard_logger.py +++ b/tests/ignite/contrib/handlers/test_tensorboard_logger.py @@ -92,12 +92,9 @@ def test_output_handler_metric_names(dirname): wrapper(mock_engine, mock_logger, Events.ITERATION_STARTED) assert mock_logger.writer.add_scalar.call_count == 2 - mock_logger.writer.add_scalar.assert_has_calls([ - call("tag/a", 12.23, 5), - call("tag/b", 23.45, 5), - ], any_order=True) + mock_logger.writer.add_scalar.assert_has_calls([call("tag/a", 12.23, 5), call("tag/b", 23.45, 5),], any_order=True) - wrapper = OutputHandler("tag", metric_names=["a", ]) + wrapper = OutputHandler("tag", metric_names=["a",]) mock_engine = MagicMock() mock_engine.state = State(metrics={"a": torch.Tensor([0.0, 1.0, 2.0, 3.0])}) @@ -109,12 +106,10 @@ def test_output_handler_metric_names(dirname): wrapper(mock_engine, mock_logger, Events.ITERATION_STARTED) assert mock_logger.writer.add_scalar.call_count == 4 - mock_logger.writer.add_scalar.assert_has_calls([ - call("tag/a/0", 0.0, 5), - call("tag/a/1", 1.0, 5), - call("tag/a/2", 2.0, 5), - call("tag/a/3", 3.0, 5), - ], any_order=True) + mock_logger.writer.add_scalar.assert_has_calls( + [call("tag/a/0", 0.0, 5), call("tag/a/1", 1.0, 5), call("tag/a/2", 2.0, 5), call("tag/a/3", 3.0, 5),], + any_order=True, + ) wrapper = OutputHandler("tag", metric_names=["a", "c"]) @@ -129,9 +124,7 @@ def test_output_handler_metric_names(dirname): wrapper(mock_engine, mock_logger, Events.ITERATION_STARTED) assert mock_logger.writer.add_scalar.call_count == 1 - mock_logger.writer.add_scalar.assert_has_calls([ - call("tag/a", 55.56, 7), - ], any_order=True) + mock_logger.writer.add_scalar.assert_has_calls([call("tag/a", 55.56, 7),], any_order=True) # all metrics wrapper = OutputHandler("tag", metric_names="all") @@ -145,10 +138,7 @@ def test_output_handler_metric_names(dirname): wrapper(mock_engine, mock_logger, Events.ITERATION_STARTED) assert mock_logger.writer.add_scalar.call_count == 2 - mock_logger.writer.add_scalar.assert_has_calls([ - call("tag/a", 12.23, 5), - call("tag/b", 23.45, 5), - ], any_order=True) + mock_logger.writer.add_scalar.assert_has_calls([call("tag/a", 12.23, 5), call("tag/b", 23.45, 5),], any_order=True) def test_output_handler_both(dirname): @@ -165,16 +155,14 @@ def test_output_handler_both(dirname): wrapper(mock_engine, mock_logger, Events.EPOCH_STARTED) assert mock_logger.writer.add_scalar.call_count == 3 - mock_logger.writer.add_scalar.assert_has_calls([ - call("tag/a", 12.23, 5), - call("tag/b", 23.45, 5), - call("tag/loss", 12345, 5) - ], any_order=True) + mock_logger.writer.add_scalar.assert_has_calls( + [call("tag/a", 12.23, 5), call("tag/b", 23.45, 5), call("tag/loss", 12345, 5)], any_order=True + ) def test_output_handler_with_wrong_global_step_transform_output(): def global_step_transform(*args, **kwargs): - return 'a' + return "a" wrapper = OutputHandler("tag", output_transform=lambda x: {"loss": x}, global_step_transform=global_step_transform) mock_logger = MagicMock(spec=TensorboardLogger) @@ -196,8 +184,11 @@ def test_output_handler_with_global_step_from_engine(): mock_another_engine.state.epoch = 10 mock_another_engine.state.output = 12.345 - wrapper = OutputHandler("tag", output_transform=lambda x: {"loss": x}, - global_step_transform=global_step_from_engine(mock_another_engine)) + wrapper = OutputHandler( + "tag", + output_transform=lambda x: {"loss": x}, + global_step_transform=global_step_from_engine(mock_another_engine), + ) mock_logger = MagicMock(spec=TensorboardLogger) mock_logger.writer = MagicMock() @@ -209,18 +200,18 @@ def test_output_handler_with_global_step_from_engine(): wrapper(mock_engine, mock_logger, Events.EPOCH_STARTED) assert mock_logger.writer.add_scalar.call_count == 1 - mock_logger.writer.add_scalar.assert_has_calls([call("tag/loss", - mock_engine.state.output, - mock_another_engine.state.epoch)]) + mock_logger.writer.add_scalar.assert_has_calls( + [call("tag/loss", mock_engine.state.output, mock_another_engine.state.epoch)] + ) mock_another_engine.state.epoch = 11 mock_engine.state.output = 1.123 wrapper(mock_engine, mock_logger, Events.EPOCH_STARTED) assert mock_logger.writer.add_scalar.call_count == 2 - mock_logger.writer.add_scalar.assert_has_calls([call("tag/loss", - mock_engine.state.output, - mock_another_engine.state.epoch)]) + mock_logger.writer.add_scalar.assert_has_calls( + [call("tag/loss", mock_engine.state.output, mock_another_engine.state.epoch)] + ) def test_output_handler_with_global_step_transform(): @@ -279,12 +270,15 @@ def _test(tag=None): tag_prefix = "{}/".format(tag) if tag else "" assert mock_logger.writer.add_scalar.call_count == 4 - mock_logger.writer.add_scalar.assert_has_calls([ - call(tag_prefix + "weights_norm/fc1/weight", 0.0, 5), - call(tag_prefix + "weights_norm/fc1/bias", 0.0, 5), - call(tag_prefix + "weights_norm/fc2/weight", 12.0, 5), - call(tag_prefix + "weights_norm/fc2/bias", math.sqrt(12.0), 5), - ], any_order=True) + mock_logger.writer.add_scalar.assert_has_calls( + [ + call(tag_prefix + "weights_norm/fc1/weight", 0.0, 5), + call(tag_prefix + "weights_norm/fc1/bias", 0.0, 5), + call(tag_prefix + "weights_norm/fc2/weight", 12.0, 5), + call(tag_prefix + "weights_norm/fc2/bias", math.sqrt(12.0), 5), + ], + any_order=True, + ) _test() _test(tag="tag") @@ -304,16 +298,15 @@ def test_weights_scalar_handler_frozen_layers(dummy_model_factory): wrapper(mock_engine, mock_logger, Events.EPOCH_STARTED) - mock_logger.writer.add_scalar.assert_has_calls([ - call("weights_norm/fc2/weight", 12.0, 5), - call("weights_norm/fc2/bias", math.sqrt(12.0), 5), - ], any_order=True) + mock_logger.writer.add_scalar.assert_has_calls( + [call("weights_norm/fc2/weight", 12.0, 5), call("weights_norm/fc2/bias", math.sqrt(12.0), 5),], any_order=True + ) with pytest.raises(AssertionError): - mock_logger.writer.add_scalar.assert_has_calls([ - call("weights_norm/fc1/weight", 12.0, 5), - call("weights_norm/fc1/bias", math.sqrt(12.0), 5), - ], any_order=True) + mock_logger.writer.add_scalar.assert_has_calls( + [call("weights_norm/fc1/weight", 12.0, 5), call("weights_norm/fc1/bias", math.sqrt(12.0), 5),], + any_order=True, + ) assert mock_logger.writer.add_scalar.call_count == 2 @@ -350,12 +343,15 @@ def _test(tag=None): tag_prefix = "{}/".format(tag) if tag else "" assert mock_logger.writer.add_histogram.call_count == 4 - mock_logger.writer.add_histogram.assert_has_calls([ - call(tag=tag_prefix + "weights/fc1/weight", values=ANY, global_step=5), - call(tag=tag_prefix + "weights/fc1/bias", values=ANY, global_step=5), - call(tag=tag_prefix + "weights/fc2/weight", values=ANY, global_step=5), - call(tag=tag_prefix + "weights/fc2/bias", values=ANY, global_step=5), - ], any_order=True) + mock_logger.writer.add_histogram.assert_has_calls( + [ + call(tag=tag_prefix + "weights/fc1/weight", values=ANY, global_step=5), + call(tag=tag_prefix + "weights/fc1/bias", values=ANY, global_step=5), + call(tag=tag_prefix + "weights/fc2/weight", values=ANY, global_step=5), + call(tag=tag_prefix + "weights/fc2/bias", values=ANY, global_step=5), + ], + any_order=True, + ) _test() _test(tag="tag") @@ -375,16 +371,22 @@ def test_weights_hist_handler_frozen_layers(dummy_model_factory): wrapper(mock_engine, mock_logger, Events.EPOCH_STARTED) - mock_logger.writer.add_histogram.assert_has_calls([ - call(tag="weights/fc2/weight", values=ANY, global_step=5), - call(tag="weights/fc2/bias", values=ANY, global_step=5), - ], any_order=True) + mock_logger.writer.add_histogram.assert_has_calls( + [ + call(tag="weights/fc2/weight", values=ANY, global_step=5), + call(tag="weights/fc2/bias", values=ANY, global_step=5), + ], + any_order=True, + ) with pytest.raises(AssertionError): - mock_logger.writer.add_histogram.assert_has_calls([ - call(tag="weights/fc1/weight", values=ANY, global_step=5), - call(tag="weights/fc1/bias", values=ANY, global_step=5), - ], any_order=True) + mock_logger.writer.add_histogram.assert_has_calls( + [ + call(tag="weights/fc1/weight", values=ANY, global_step=5), + call(tag="weights/fc1/bias", values=ANY, global_step=5), + ], + any_order=True, + ) assert mock_logger.writer.add_histogram.call_count == 2 @@ -422,12 +424,15 @@ def _test(tag=None): tag_prefix = "{}/".format(tag) if tag else "" - mock_logger.writer.add_scalar.assert_has_calls([ - call(tag_prefix + "grads_norm/fc1/weight", ANY, 5), - call(tag_prefix + "grads_norm/fc1/bias", ANY, 5), - call(tag_prefix + "grads_norm/fc2/weight", ANY, 5), - call(tag_prefix + "grads_norm/fc2/bias", ANY, 5), - ], any_order=True) + mock_logger.writer.add_scalar.assert_has_calls( + [ + call(tag_prefix + "grads_norm/fc1/weight", ANY, 5), + call(tag_prefix + "grads_norm/fc1/bias", ANY, 5), + call(tag_prefix + "grads_norm/fc2/weight", ANY, 5), + call(tag_prefix + "grads_norm/fc2/bias", ANY, 5), + ], + any_order=True, + ) assert mock_logger.writer.add_scalar.call_count == 4 assert norm_mock.call_count == 4 @@ -449,16 +454,14 @@ def test_grads_scalar_handler_frozen_layers(dummy_model_factory, norm_mock): wrapper(mock_engine, mock_logger, Events.EPOCH_STARTED) - mock_logger.writer.add_scalar.assert_has_calls([ - call("grads_norm/fc2/weight", ANY, 5), - call("grads_norm/fc2/bias", ANY, 5), - ], any_order=True) + mock_logger.writer.add_scalar.assert_has_calls( + [call("grads_norm/fc2/weight", ANY, 5), call("grads_norm/fc2/bias", ANY, 5),], any_order=True + ) with pytest.raises(AssertionError): - mock_logger.writer.add_scalar.assert_has_calls([ - call("grads_norm/fc1/weight", ANY, 5), - call("grads_norm/fc1/bias", ANY, 5), - ], any_order=True) + mock_logger.writer.add_scalar.assert_has_calls( + [call("grads_norm/fc1/weight", ANY, 5), call("grads_norm/fc1/bias", ANY, 5),], any_order=True + ) assert mock_logger.writer.add_scalar.call_count == 2 assert norm_mock.call_count == 2 @@ -494,12 +497,15 @@ def _test(tag=None): tag_prefix = "{}/".format(tag) if tag else "" assert mock_logger.writer.add_histogram.call_count == 4 - mock_logger.writer.add_histogram.assert_has_calls([ - call(tag=tag_prefix + "grads/fc1/weight", values=ANY, global_step=5), - call(tag=tag_prefix + "grads/fc1/bias", values=ANY, global_step=5), - call(tag=tag_prefix + "grads/fc2/weight", values=ANY, global_step=5), - call(tag=tag_prefix + "grads/fc2/bias", values=ANY, global_step=5), - ], any_order=True) + mock_logger.writer.add_histogram.assert_has_calls( + [ + call(tag=tag_prefix + "grads/fc1/weight", values=ANY, global_step=5), + call(tag=tag_prefix + "grads/fc1/bias", values=ANY, global_step=5), + call(tag=tag_prefix + "grads/fc2/weight", values=ANY, global_step=5), + call(tag=tag_prefix + "grads/fc2/bias", values=ANY, global_step=5), + ], + any_order=True, + ) _test() _test(tag="tag") @@ -519,16 +525,22 @@ def test_grads_hist_frozen_layers(dummy_model_factory): wrapper(mock_engine, mock_logger, Events.EPOCH_STARTED) assert mock_logger.writer.add_histogram.call_count == 2 - mock_logger.writer.add_histogram.assert_has_calls([ - call(tag="grads/fc2/weight", values=ANY, global_step=5), - call(tag="grads/fc2/bias", values=ANY, global_step=5), - ], any_order=True) + mock_logger.writer.add_histogram.assert_has_calls( + [ + call(tag="grads/fc2/weight", values=ANY, global_step=5), + call(tag="grads/fc2/bias", values=ANY, global_step=5), + ], + any_order=True, + ) with pytest.raises(AssertionError): - mock_logger.writer.add_histogram.assert_has_calls([ - call(tag="grads/fc1/weight", values=ANY, global_step=5), - call(tag="grads/fc1/bias", values=ANY, global_step=5), - ], any_order=True) + mock_logger.writer.add_histogram.assert_has_calls( + [ + call(tag="grads/fc1/weight", values=ANY, global_step=5), + call(tag="grads/fc1/bias", values=ANY, global_step=5), + ], + any_order=True, + ) def test_integration(dirname): @@ -550,9 +562,7 @@ def dummy_handler(engine, logger, event_name): global_step = engine.state.get_event_attrib_value(event_name) logger.writer.add_scalar("test_value", global_step, global_step) - tb_logger.attach(trainer, - log_handler=dummy_handler, - event_name=Events.EPOCH_COMPLETED) + tb_logger.attach(trainer, log_handler=dummy_handler, event_name=Events.EPOCH_COMPLETED) trainer.run(data, max_epochs=n_epochs) tb_logger.close() @@ -582,9 +592,7 @@ def dummy_handler(engine, logger, event_name): global_step = engine.state.get_event_attrib_value(event_name) logger.writer.add_scalar("test_value", global_step, global_step) - tb_logger.attach(trainer, - log_handler=dummy_handler, - event_name=Events.EPOCH_COMPLETED) + tb_logger.attach(trainer, log_handler=dummy_handler, event_name=Events.EPOCH_COMPLETED) trainer.run(data, max_epochs=n_epochs) @@ -596,19 +604,21 @@ def dummy_handler(engine, logger, event_name): def test_no_tensorboardX_package(dirname): from torch.utils.tensorboard import SummaryWriter - with patch.dict('sys.modules', {'tensorboardX': None}): + + with patch.dict("sys.modules", {"tensorboardX": None}): tb_logger = TensorboardLogger(log_dir=dirname) assert isinstance(tb_logger.writer, SummaryWriter), type(tb_logger.writer) def test_no_torch_utils_tensorboard_package(dirname): from tensorboardX import SummaryWriter - with patch.dict('sys.modules', {'torch.utils.tensorboard': None}): + + with patch.dict("sys.modules", {"torch.utils.tensorboard": None}): tb_logger = TensorboardLogger(log_dir=dirname) assert isinstance(tb_logger.writer, SummaryWriter), type(tb_logger.writer) def test_no_tensorboardX_nor_torch_utils_tensorboard(): - with patch.dict('sys.modules', {'tensorboardX': None, 'torch.utils.tensorboard': None}): - with pytest.raises(RuntimeError, match=r'This contrib module requires either tensorboardX or torch'): + with patch.dict("sys.modules", {"tensorboardX": None, "torch.utils.tensorboard": None}): + with pytest.raises(RuntimeError, match=r"This contrib module requires either tensorboardX or torch"): TensorboardLogger(log_dir=None) diff --git a/tests/ignite/contrib/handlers/test_time_profilers.py b/tests/ignite/contrib/handlers/test_time_profilers.py index 5ffdc8269562..f07277388490 100644 --- a/tests/ignite/contrib/handlers/test_time_profilers.py +++ b/tests/ignite/contrib/handlers/test_time_profilers.py @@ -29,20 +29,15 @@ def dummy_data_loader(data): profiler = BasicTimeProfiler() dummy_trainer = Engine(_do_nothing_update_fn) profiler.attach(dummy_trainer) - dummy_trainer.run( - dummy_data_loader(dummy_data), - max_epochs=true_max_epochs, - epoch_length=true_num_iters - ) + dummy_trainer.run(dummy_data_loader(dummy_data), max_epochs=true_max_epochs, epoch_length=true_num_iters) results = profiler.get_results() - dataflow_results = results['dataflow_stats'] + dataflow_results = results["dataflow_stats"] - assert dataflow_results['min/index'][0] == approx(true_dataflow_time_per_ele, abs=1e-1) - assert dataflow_results['max/index'][0] == approx(true_dataflow_time_per_ele, abs=1e-1) - assert dataflow_results['mean'] == approx(true_dataflow_time_per_ele, abs=1e-1) - assert dataflow_results['std'] == approx(0., abs=1e-1) - assert dataflow_results['total']\ - == approx(true_num_iters * true_dataflow_time_per_ele, abs=1e-1) + assert dataflow_results["min/index"][0] == approx(true_dataflow_time_per_ele, abs=1e-1) + assert dataflow_results["max/index"][0] == approx(true_dataflow_time_per_ele, abs=1e-1) + assert dataflow_results["mean"] == approx(true_dataflow_time_per_ele, abs=1e-1) + assert dataflow_results["std"] == approx(0.0, abs=1e-1) + assert dataflow_results["total"] == approx(true_num_iters * true_dataflow_time_per_ele, abs=1e-1) def test_processing_timer(): @@ -58,14 +53,13 @@ def train_updater(engine, batch): profiler.attach(dummy_trainer) dummy_trainer.run(range(true_num_iters), max_epochs=true_max_epochs) results = profiler.get_results() - processing_results = results['processing_stats'] + processing_results = results["processing_stats"] - assert processing_results['min/index'][0] == approx(true_processing_time, abs=1e-1) - assert processing_results['max/index'][0] == approx(true_processing_time, abs=1e-1) - assert processing_results['mean'] == approx(true_processing_time, abs=1e-1) - assert processing_results['std'] == approx(0., abs=1e-1) - assert processing_results['total']\ - == approx(true_max_epochs * true_num_iters * true_processing_time, abs=1e-1) + assert processing_results["min/index"][0] == approx(true_processing_time, abs=1e-1) + assert processing_results["max/index"][0] == approx(true_processing_time, abs=1e-1) + assert processing_results["mean"] == approx(true_processing_time, abs=1e-1) + assert processing_results["std"] == approx(0.0, abs=1e-1) + assert processing_results["total"] == approx(true_max_epochs * true_num_iters * true_processing_time, abs=1e-1) def test_event_handler_started(): @@ -83,11 +77,11 @@ def delay_start(engine): dummy_trainer.run(range(true_num_iters), max_epochs=true_max_epochs) results = profiler.get_results() - event_results = results['event_handlers_stats']['Events_STARTED'] + event_results = results["event_handlers_stats"]["Events_STARTED"] - assert event_results['min/index'][0] == approx(true_event_handler_time, abs=1e-1) - assert event_results['max/index'][0] == approx(true_event_handler_time, abs=1e-1) - assert event_results['mean'] == approx(true_event_handler_time, abs=1e-1) + assert event_results["min/index"][0] == approx(true_event_handler_time, abs=1e-1) + assert event_results["max/index"][0] == approx(true_event_handler_time, abs=1e-1) + assert event_results["mean"] == approx(true_event_handler_time, abs=1e-1) def test_event_handler_completed(): @@ -105,11 +99,11 @@ def delay_complete(engine): dummy_trainer.run(range(true_num_iters), max_epochs=true_max_epochs) results = profiler.get_results() - event_results = results['event_handlers_stats']['Events_COMPLETED'] + event_results = results["event_handlers_stats"]["Events_COMPLETED"] - assert event_results['min/index'][0] == approx(true_event_handler_time, abs=1e-1) - assert event_results['max/index'][0] == approx(true_event_handler_time, abs=1e-1) - assert event_results['mean'] == approx(true_event_handler_time, abs=1e-1) + assert event_results["min/index"][0] == approx(true_event_handler_time, abs=1e-1) + assert event_results["max/index"][0] == approx(true_event_handler_time, abs=1e-1) + assert event_results["mean"] == approx(true_event_handler_time, abs=1e-1) def test_event_handler_epoch_started(): @@ -127,15 +121,13 @@ def delay_epoch_start(engine): dummy_trainer.run(range(true_num_iters), max_epochs=true_max_epochs) results = profiler.get_results() - event_results = results['event_handlers_stats']['Events_EPOCH_STARTED'] + event_results = results["event_handlers_stats"]["Events_EPOCH_STARTED"] - assert event_results['min/index'][0] == approx(true_event_handler_time, abs=1e-1) - assert event_results['max/index'][0] == approx(true_event_handler_time, abs=1e-1) - assert event_results['mean'] == approx(true_event_handler_time, abs=1e-1) - assert event_results['std'] == approx(0., abs=1e-1) - assert event_results['total'] == approx( - true_max_epochs * true_event_handler_time, abs=1e-1 - ) + assert event_results["min/index"][0] == approx(true_event_handler_time, abs=1e-1) + assert event_results["max/index"][0] == approx(true_event_handler_time, abs=1e-1) + assert event_results["mean"] == approx(true_event_handler_time, abs=1e-1) + assert event_results["std"] == approx(0.0, abs=1e-1) + assert event_results["total"] == approx(true_max_epochs * true_event_handler_time, abs=1e-1) def test_event_handler_epoch_completed(): @@ -153,15 +145,13 @@ def delay_epoch_complete(engine): dummy_trainer.run(range(true_num_iters), max_epochs=true_max_epochs) results = profiler.get_results() - event_results = results['event_handlers_stats']['Events_EPOCH_COMPLETED'] + event_results = results["event_handlers_stats"]["Events_EPOCH_COMPLETED"] - assert event_results['min/index'][0] == approx(true_event_handler_time, abs=1e-1) - assert event_results['max/index'][0] == approx(true_event_handler_time, abs=1e-1) - assert event_results['mean'] == approx(true_event_handler_time, abs=1e-1) - assert event_results['std'] == approx(0., abs=1e-1) - assert event_results['total'] == approx( - true_max_epochs * true_event_handler_time, abs=1e-1 - ) + assert event_results["min/index"][0] == approx(true_event_handler_time, abs=1e-1) + assert event_results["max/index"][0] == approx(true_event_handler_time, abs=1e-1) + assert event_results["mean"] == approx(true_event_handler_time, abs=1e-1) + assert event_results["std"] == approx(0.0, abs=1e-1) + assert event_results["total"] == approx(true_max_epochs * true_event_handler_time, abs=1e-1) def test_event_handler_iteration_started(): @@ -179,15 +169,13 @@ def delay_iter_start(engine): dummy_trainer.run(range(true_num_iters), max_epochs=true_max_epochs) results = profiler.get_results() - event_results = results['event_handlers_stats']['Events_ITERATION_STARTED'] + event_results = results["event_handlers_stats"]["Events_ITERATION_STARTED"] - assert event_results['min/index'][0] == approx(true_event_handler_time, abs=1e-1) - assert event_results['max/index'][0] == approx(true_event_handler_time, abs=1e-1) - assert event_results['mean'] == approx(true_event_handler_time, abs=1e-1) - assert event_results['std'] == approx(0., abs=1e-1) - assert event_results['total'] == approx( - true_max_epochs * true_num_iters * true_event_handler_time, abs=1e-1 - ) + assert event_results["min/index"][0] == approx(true_event_handler_time, abs=1e-1) + assert event_results["max/index"][0] == approx(true_event_handler_time, abs=1e-1) + assert event_results["mean"] == approx(true_event_handler_time, abs=1e-1) + assert event_results["std"] == approx(0.0, abs=1e-1) + assert event_results["total"] == approx(true_max_epochs * true_num_iters * true_event_handler_time, abs=1e-1) def test_event_handler_iteration_completed(): @@ -205,15 +193,13 @@ def delay_iter_complete(engine): dummy_trainer.run(range(true_num_iters), max_epochs=true_max_epochs) results = profiler.get_results() - event_results = results['event_handlers_stats']['Events_ITERATION_COMPLETED'] + event_results = results["event_handlers_stats"]["Events_ITERATION_COMPLETED"] - assert event_results['min/index'][0] == approx(true_event_handler_time, abs=1e-1) - assert event_results['max/index'][0] == approx(true_event_handler_time, abs=1e-1) - assert event_results['mean'] == approx(true_event_handler_time, abs=1e-1) - assert event_results['std'] == approx(0., abs=1e-1) - assert event_results['total'] == approx( - true_max_epochs * true_num_iters * true_event_handler_time, abs=1e-1 - ) + assert event_results["min/index"][0] == approx(true_event_handler_time, abs=1e-1) + assert event_results["max/index"][0] == approx(true_event_handler_time, abs=1e-1) + assert event_results["mean"] == approx(true_event_handler_time, abs=1e-1) + assert event_results["std"] == approx(0.0, abs=1e-1) + assert event_results["total"] == approx(true_max_epochs * true_num_iters * true_event_handler_time, abs=1e-1) def test_event_handler_get_batch_started(): @@ -231,15 +217,13 @@ def delay_get_batch_started(engine): dummy_trainer.run(range(true_num_iters), max_epochs=true_max_epochs) results = profiler.get_results() - event_results = results['event_handlers_stats']['Events_GET_BATCH_STARTED'] + event_results = results["event_handlers_stats"]["Events_GET_BATCH_STARTED"] - assert event_results['min/index'][0] == approx(true_event_handler_time, abs=1e-1) - assert event_results['max/index'][0] == approx(true_event_handler_time, abs=1e-1) - assert event_results['mean'] == approx(true_event_handler_time, abs=1e-1) - assert event_results['std'] == approx(0., abs=1e-1) - assert event_results['total'] == approx( - true_max_epochs * true_num_iters * true_event_handler_time, abs=1e-1 - ) + assert event_results["min/index"][0] == approx(true_event_handler_time, abs=1e-1) + assert event_results["max/index"][0] == approx(true_event_handler_time, abs=1e-1) + assert event_results["mean"] == approx(true_event_handler_time, abs=1e-1) + assert event_results["std"] == approx(0.0, abs=1e-1) + assert event_results["total"] == approx(true_max_epochs * true_num_iters * true_event_handler_time, abs=1e-1) def test_event_handler_get_batch_completed(): @@ -257,15 +241,13 @@ def delay_get_batch_completed(engine): dummy_trainer.run(range(true_num_iters), max_epochs=true_max_epochs) results = profiler.get_results() - event_results = results['event_handlers_stats']['Events_GET_BATCH_COMPLETED'] + event_results = results["event_handlers_stats"]["Events_GET_BATCH_COMPLETED"] - assert event_results['min/index'][0] == approx(true_event_handler_time, abs=1e-1) - assert event_results['max/index'][0] == approx(true_event_handler_time, abs=1e-1) - assert event_results['mean'] == approx(true_event_handler_time, abs=1e-1) - assert event_results['std'] == approx(0., abs=1e-1) - assert event_results['total'] == approx( - true_max_epochs * true_num_iters * true_event_handler_time, abs=1e-1 - ) + assert event_results["min/index"][0] == approx(true_event_handler_time, abs=1e-1) + assert event_results["max/index"][0] == approx(true_event_handler_time, abs=1e-1) + assert event_results["mean"] == approx(true_event_handler_time, abs=1e-1) + assert event_results["std"] == approx(0.0, abs=1e-1) + assert event_results["total"] == approx(true_max_epochs * true_num_iters * true_event_handler_time, abs=1e-1) def test_event_handler_total_time(): @@ -311,17 +293,16 @@ def delay_get_batch_completed(engine): dummy_trainer.run(range(true_num_iters), max_epochs=true_max_epochs) results = profiler.get_results() - event_results = results['event_handlers_stats'] + event_results = results["event_handlers_stats"] - assert event_results['total_time'].item() == approx( - true_event_handler_time * 8, abs=1e-1) + assert event_results["total_time"].item() == approx(true_event_handler_time * 8, abs=1e-1) def test_write_results(): true_event_handler_time = 0.125 true_max_epochs = 3 true_num_iters = 2 - test_folder = './test_log_folder' + test_folder = "./test_log_folder" if os.path.exists(test_folder): shutil.rmtree(test_folder) @@ -364,12 +345,12 @@ def delay_get_batch_completed(engine): time.sleep(true_event_handler_time) dummy_trainer.run(range(true_num_iters), max_epochs=true_max_epochs) - profiler.write_results(test_folder + '/test_log.csv') + profiler.write_results(test_folder + "/test_log.csv") - assert os.path.isfile(test_folder + '/test_log.csv') + assert os.path.isfile(test_folder + "/test_log.csv") file_length = 0 - with open(test_folder + '/test_log.csv') as f: + with open(test_folder + "/test_log.csv") as f: for l in f: file_length += 1 diff --git a/tests/ignite/contrib/handlers/test_tqdm_logger.py b/tests/ignite/contrib/handlers/test_tqdm_logger.py index 09aa942ed416..a8b4eb072c10 100644 --- a/tests/ignite/contrib/handlers/test_tqdm_logger.py +++ b/tests/ignite/contrib/handlers/test_tqdm_logger.py @@ -13,7 +13,7 @@ def update_fn(engine, batch): a = 1 - engine.state.metrics['a'] = a + engine.state.metrics["a"] = a return a @@ -24,15 +24,15 @@ def test_pbar(capsys): engine = Engine(update_fn) pbar = ProgressBar() - pbar.attach(engine, ['a']) + pbar.attach(engine, ["a"]) engine.run(loader, max_epochs=n_epochs) captured = capsys.readouterr() - err = captured.err.split('\r') + err = captured.err.split("\r") err = list(map(lambda x: x.strip(), err)) err = list(filter(None, err)) - expected = u'Epoch [2/2]: [1/2] 50%|█████ , a=1 [00:00<00:00]' + expected = "Epoch [2/2]: [1/2] 50%|█████ , a=1 [00:00<00:00]" assert err[-1] == expected @@ -45,7 +45,7 @@ def test_pbar_file(tmp_path): file = open(str(file_path), "w+") pbar = ProgressBar(file=file) - pbar.attach(engine, ['a']) + pbar.attach(engine, ["a"]) engine.run(loader, max_epochs=n_epochs) file.close() # Force a flush of the buffer. file.flush() does not work. @@ -53,7 +53,7 @@ def test_pbar_file(tmp_path): file = open(str(file_path), "r") lines = file.readlines() - expected = u"Epoch [2/2]: [1/2] 50%|█████ , a=1 [00:00<00:00]\n" + expected = "Epoch [2/2]: [1/2] 50%|█████ , a=1 [00:00<00:00]\n" assert lines[-2] == expected @@ -63,10 +63,10 @@ def test_pbar_log_message(capsys): pbar.log_message("test") captured = capsys.readouterr() - out = captured.out.split('\r') + out = captured.out.split("\r") out = list(map(lambda x: x.strip(), out)) out = list(filter(None, out)) - expected = u'test' + expected = "test" assert out[-1] == expected @@ -82,7 +82,7 @@ def test_pbar_log_message_file(tmp_path): file = open(str(file_path), "r") lines = file.readlines() - expected = u"test\n" + expected = "test\n" assert lines[0] == expected @@ -91,11 +91,12 @@ def test_attach_fail_with_string(): pbar = ProgressBar() with pytest.raises(TypeError): - pbar.attach(engine, 'a') + pbar.attach(engine, "a") def test_pbar_batch_indeces(capsys): engine = Engine(lambda e, b: time.sleep(0.1)) + @engine.on(Events.ITERATION_STARTED) def print_iter(_): print("iteration: ", engine.state.iteration) @@ -104,10 +105,10 @@ def print_iter(_): engine.run(list(range(4)), max_epochs=1) captured = capsys.readouterr() - err = captured.err.split('\r') + err = captured.err.split("\r") err = list(map(lambda x: x.strip(), err)) err = list(filter(None, err)) - printed_batch_indeces = set(map(lambda x: int(x.split('/')[0][-1]), err)) + printed_batch_indeces = set(map(lambda x: int(x.split("/")[0][-1]), err)) expected_batch_indeces = list(range(1, 5)) assert sorted(list(printed_batch_indeces)) == expected_batch_indeces @@ -127,16 +128,16 @@ def step(engine, batch): RunningAverage(alpha=0.5, output_transform=lambda x: x).attach(trainer, "batchloss") pbar = ProgressBar() - pbar.attach(trainer, metric_names=['batchloss', ]) + pbar.attach(trainer, metric_names=["batchloss",]) trainer.run(data=data, max_epochs=1) captured = capsys.readouterr() - err = captured.err.split('\r') + err = captured.err.split("\r") err = list(map(lambda x: x.strip(), err)) err = list(filter(None, err)) actual = err[-1] - expected = u'Epoch: [1/2] 50%|█████ , batchloss=0.5 [00:00<00:00]' + expected = "Epoch: [1/2] 50%|█████ , batchloss=0.5 [00:00<00:00]" assert actual == expected @@ -163,11 +164,11 @@ def step(engine, batch): trainer.run(data=data, max_epochs=1) captured = capsys.readouterr() - err = captured.err.split('\r') + err = captured.err.split("\r") err = list(map(lambda x: x.strip(), err)) err = list(filter(None, err)) actual = err[-1] - expected = u'Epoch: [1/2] 50%|█████ , another batchloss=1.5, batchloss=0.5 [00:00<00:00]' + expected = "Epoch: [1/2] 50%|█████ , another batchloss=1.5, batchloss=0.5 [00:00<00:00]" assert actual == expected @@ -183,11 +184,11 @@ def test_pbar_no_metric_names(capsys): engine.run(loader, max_epochs=n_epochs) captured = capsys.readouterr() - err = captured.err.split('\r') + err = captured.err.split("\r") err = list(map(lambda x: x.strip(), err)) err = list(filter(None, err)) actual = err[-1] - expected = u'Epoch [2/2]: [1/2] 50%|█████ [00:00<00:00]' + expected = "Epoch [2/2]: [1/2] 50%|█████ [00:00<00:00]" assert actual == expected @@ -197,15 +198,15 @@ def test_pbar_with_output(capsys): engine = Engine(update_fn) pbar = ProgressBar() - pbar.attach(engine, output_transform=lambda x: {'a': x}) + pbar.attach(engine, output_transform=lambda x: {"a": x}) engine.run(loader, max_epochs=n_epochs) captured = capsys.readouterr() - err = captured.err.split('\r') + err = captured.err.split("\r") err = list(map(lambda x: x.strip(), err)) err = list(filter(None, err)) - expected = u'Epoch [2/2]: [1/2] 50%|█████ , a=1 [00:00<00:00]' + expected = "Epoch [2/2]: [1/2] 50%|█████ , a=1 [00:00<00:00]" assert err[-1] == expected @@ -228,10 +229,10 @@ def test_pbar_with_scalar_output(capsys): engine.run(loader, max_epochs=n_epochs) captured = capsys.readouterr() - err = captured.err.split('\r') + err = captured.err.split("\r") err = list(map(lambda x: x.strip(), err)) err = list(filter(None, err)) - expected = u'Epoch [2/2]: [1/2] 50%|█████ , output=1 [00:00<00:00]' + expected = "Epoch [2/2]: [1/2] 50%|█████ , output=1 [00:00<00:00]" assert err[-1] == expected @@ -246,10 +247,10 @@ def test_pbar_with_str_output(capsys): engine.run(loader, max_epochs=n_epochs) captured = capsys.readouterr() - err = captured.err.split('\r') + err = captured.err.split("\r") err = list(map(lambda x: x.strip(), err)) err = list(filter(None, err)) - expected = u'Epoch [2/2]: [1/2] 50%|█████ , output=red [00:00<00:00]' + expected = "Epoch [2/2]: [1/2] 50%|█████ , output=red [00:00<00:00]" assert err[-1] == expected @@ -263,10 +264,10 @@ def test_pbar_with_tqdm_kwargs(capsys): engine.run(loader, max_epochs=n_epochs) captured = capsys.readouterr() - err = captured.err.split('\r') + err = captured.err.split("\r") err = list(map(lambda x: x.strip(), err)) err = list(filter(None, err)) - expected = u'My description: [10/10]: [4/5] 80%|████████ , output=1 [00:00<00:00]' + expected = "My description: [10/10]: [4/5] 80%|████████ , output=1 [00:00<00:00]" assert err[-1] == expected @@ -279,15 +280,14 @@ def test_pbar_for_validation(capsys): engine.run(loader, max_epochs=1) captured = capsys.readouterr() - err = captured.err.split('\r') + err = captured.err.split("\r") err = list(map(lambda x: x.strip(), err)) err = list(filter(None, err)) - expected = u'Validation: [4/5] 80%|████████ [00:00<00:00]' + expected = "Validation: [4/5] 80%|████████ [00:00<00:00]" assert err[-1] == expected def test_pbar_output_tensor(capsys): - def _test(out_tensor, out_msg): loader = [1, 2, 3, 4, 5] @@ -301,10 +301,10 @@ def update_fn(engine, batch): engine.run(loader, max_epochs=1) captured = capsys.readouterr() - err = captured.err.split('\r') + err = captured.err.split("\r") err = list(map(lambda x: x.strip(), err)) err = list(filter(None, err)) - expected = u'Output tensor: [4/5] 80%|████████ , {} [00:00<00:00]'.format(out_msg) + expected = "Output tensor: [4/5] 80%|████████ , {} [00:00<00:00]".format(out_msg) assert err[-1] == expected _test(out_tensor=torch.tensor([5, 0]), out_msg="output_0=5, output_1=0") @@ -337,11 +337,11 @@ def test_pbar_on_epochs(capsys): engine.run(loader, max_epochs=n_epochs) captured = capsys.readouterr() - err = captured.err.split('\r') + err = captured.err.split("\r") err = list(map(lambda x: x.strip(), err)) err = list(filter(None, err)) actual = err[-1] - expected = u'Epoch: [9/10] 90%|█████████ [00:00<00:00]' + expected = "Epoch: [9/10] 90%|█████████ [00:00<00:00]" assert actual == expected @@ -418,11 +418,11 @@ def test_pbar_on_callable_events(capsys): engine.run(loader, max_epochs=n_epochs) captured = capsys.readouterr() - err = captured.err.split('\r') + err = captured.err.split("\r") err = list(map(lambda x: x.strip(), err)) err = list(filter(None, err)) actual = err[-1] - expected = u'Epoch: [90/100] 90%|█████████ [00:00<00:00]' + expected = "Epoch: [90/100] 90%|█████████ [00:00<00:00]" assert actual == expected @@ -434,9 +434,9 @@ def test_tqdm_logger_epoch_length(capsys): engine.run(loader, epoch_length=50) captured = capsys.readouterr() - err = captured.err.split('\r') + err = captured.err.split("\r") err = list(map(lambda x: x.strip(), err)) err = list(filter(None, err)) actual = err[-1] - expected = u'Epoch: [50/50] 100%|██████████ [00:00<00:00]' + expected = "Epoch: [50/50] 100%|██████████ [00:00<00:00]" assert actual == expected diff --git a/tests/ignite/contrib/handlers/test_visdom_logger.py b/tests/ignite/contrib/handlers/test_visdom_logger.py index 08fd6092cdb8..457f1d3b6b1f 100644 --- a/tests/ignite/contrib/handlers/test_visdom_logger.py +++ b/tests/ignite/contrib/handlers/test_visdom_logger.py @@ -15,6 +15,7 @@ def visdom_server(): import subprocess from visdom.server import download_scripts + download_scripts() hostname = "localhost" @@ -54,13 +55,16 @@ def test_optimizer_params(): # mock_logger.vis.line.assert_called_once_with("lr/group_0", 0.01, 123) assert len(wrapper.windows) == 1 and "lr/group_0" in wrapper.windows - assert wrapper.windows["lr/group_0"]['win'] is not None + assert wrapper.windows["lr/group_0"]["win"] is not None mock_logger.vis.line.assert_called_once_with( - X=[123, ], Y=[0.01, ], env=mock_logger.vis.env, - win=None, update=None, - opts=wrapper.windows['lr/group_0']['opts'], - name="lr/group_0" + X=[123,], + Y=[0.01,], + env=mock_logger.vis.env, + win=None, + update=None, + opts=wrapper.windows["lr/group_0"]["opts"], + name="lr/group_0", ) wrapper = OptimizerParamsHandler(optimizer=optimizer, param_name="lr", tag="generator") @@ -71,13 +75,16 @@ def test_optimizer_params(): wrapper(mock_engine, mock_logger, Events.ITERATION_STARTED) assert len(wrapper.windows) == 1 and "generator/lr/group_0" in wrapper.windows - assert wrapper.windows["generator/lr/group_0"]['win'] is not None + assert wrapper.windows["generator/lr/group_0"]["win"] is not None mock_logger.vis.line.assert_called_once_with( - X=[123, ], Y=[0.01, ], env=mock_logger.vis.env, - win=None, update=None, - opts=wrapper.windows['generator/lr/group_0']['opts'], - name="generator/lr/group_0" + X=[123,], + Y=[0.01,], + env=mock_logger.vis.env, + win=None, + update=None, + opts=wrapper.windows["generator/lr/group_0"]["opts"], + name="generator/lr/group_0", ) @@ -106,13 +113,16 @@ def test_output_handler_output_transform(dirname): wrapper(mock_engine, mock_logger, Events.ITERATION_STARTED) assert len(wrapper.windows) == 1 and "tag/output" in wrapper.windows - assert wrapper.windows["tag/output"]['win'] is not None + assert wrapper.windows["tag/output"]["win"] is not None mock_logger.vis.line.assert_called_once_with( - X=[123, ], Y=[12345, ], env=mock_logger.vis.env, - win=None, update=None, - opts=wrapper.windows['tag/output']['opts'], - name="tag/output" + X=[123,], + Y=[12345,], + env=mock_logger.vis.env, + win=None, + update=None, + opts=wrapper.windows["tag/output"]["opts"], + name="tag/output", ) wrapper = OutputHandler("another_tag", output_transform=lambda x: {"loss": x}) @@ -123,13 +133,16 @@ def test_output_handler_output_transform(dirname): wrapper(mock_engine, mock_logger, Events.ITERATION_STARTED) assert len(wrapper.windows) == 1 and "another_tag/loss" in wrapper.windows - assert wrapper.windows["another_tag/loss"]['win'] is not None + assert wrapper.windows["another_tag/loss"]["win"] is not None mock_logger.vis.line.assert_called_once_with( - X=[123, ], Y=[12345, ], env=mock_logger.vis.env, - win=None, update=None, - opts=wrapper.windows['another_tag/loss']['opts'], - name="another_tag/loss" + X=[123,], + Y=[12345,], + env=mock_logger.vis.env, + win=None, + update=None, + opts=wrapper.windows["another_tag/loss"]["opts"], + name="another_tag/loss", ) @@ -146,22 +159,36 @@ def test_output_handler_metric_names(dirname): wrapper(mock_engine, mock_logger, Events.ITERATION_STARTED) - assert len(wrapper.windows) == 2 and \ - "tag/a" in wrapper.windows and "tag/b" in wrapper.windows - assert wrapper.windows["tag/a"]['win'] is not None - assert wrapper.windows["tag/b"]['win'] is not None + assert len(wrapper.windows) == 2 and "tag/a" in wrapper.windows and "tag/b" in wrapper.windows + assert wrapper.windows["tag/a"]["win"] is not None + assert wrapper.windows["tag/b"]["win"] is not None assert mock_logger.vis.line.call_count == 2 - mock_logger.vis.line.assert_has_calls([ - call(X=[5, ], Y=[12.23, ], env=mock_logger.vis.env, - win=None, update=None, - opts=wrapper.windows['tag/a']['opts'], name="tag/a"), - call(X=[5, ], Y=[23.45, ], env=mock_logger.vis.env, - win=None, update=None, - opts=wrapper.windows['tag/b']['opts'], name="tag/b"), - ], any_order=True) + mock_logger.vis.line.assert_has_calls( + [ + call( + X=[5,], + Y=[12.23,], + env=mock_logger.vis.env, + win=None, + update=None, + opts=wrapper.windows["tag/a"]["opts"], + name="tag/a", + ), + call( + X=[5,], + Y=[23.45,], + env=mock_logger.vis.env, + win=None, + update=None, + opts=wrapper.windows["tag/b"]["opts"], + name="tag/b", + ), + ], + any_order=True, + ) - wrapper = OutputHandler("tag", metric_names=["a", ]) + wrapper = OutputHandler("tag", metric_names=["a",]) mock_engine = MagicMock() mock_engine.state = State(metrics={"a": torch.Tensor([0.0, 1.0, 2.0, 3.0])}) @@ -173,28 +200,54 @@ def test_output_handler_metric_names(dirname): wrapper(mock_engine, mock_logger, Events.ITERATION_STARTED) - assert len(wrapper.windows) == 4 and \ - all(["tag/a/{}".format(i) in wrapper.windows for i in range(4)]) - assert wrapper.windows["tag/a/0"]['win'] is not None - assert wrapper.windows["tag/a/1"]['win'] is not None - assert wrapper.windows["tag/a/2"]['win'] is not None - assert wrapper.windows["tag/a/3"]['win'] is not None + assert len(wrapper.windows) == 4 and all(["tag/a/{}".format(i) in wrapper.windows for i in range(4)]) + assert wrapper.windows["tag/a/0"]["win"] is not None + assert wrapper.windows["tag/a/1"]["win"] is not None + assert wrapper.windows["tag/a/2"]["win"] is not None + assert wrapper.windows["tag/a/3"]["win"] is not None assert mock_logger.vis.line.call_count == 4 - mock_logger.vis.line.assert_has_calls([ - call(X=[5, ], Y=[0.0, ], env=mock_logger.vis.env, - win=None, update=None, - opts=wrapper.windows['tag/a/0']['opts'], name="tag/a/0"), - call(X=[5, ], Y=[1.0, ], env=mock_logger.vis.env, - win=None, update=None, - opts=wrapper.windows['tag/a/1']['opts'], name="tag/a/1"), - call(X=[5, ], Y=[2.0, ], env=mock_logger.vis.env, - win=None, update=None, - opts=wrapper.windows['tag/a/2']['opts'], name="tag/a/2"), - call(X=[5, ], Y=[3.0, ], env=mock_logger.vis.env, - win=None, update=None, - opts=wrapper.windows['tag/a/3']['opts'], name="tag/a/3"), - ], any_order=True) + mock_logger.vis.line.assert_has_calls( + [ + call( + X=[5,], + Y=[0.0,], + env=mock_logger.vis.env, + win=None, + update=None, + opts=wrapper.windows["tag/a/0"]["opts"], + name="tag/a/0", + ), + call( + X=[5,], + Y=[1.0,], + env=mock_logger.vis.env, + win=None, + update=None, + opts=wrapper.windows["tag/a/1"]["opts"], + name="tag/a/1", + ), + call( + X=[5,], + Y=[2.0,], + env=mock_logger.vis.env, + win=None, + update=None, + opts=wrapper.windows["tag/a/2"]["opts"], + name="tag/a/2", + ), + call( + X=[5,], + Y=[3.0,], + env=mock_logger.vis.env, + win=None, + update=None, + opts=wrapper.windows["tag/a/3"]["opts"], + name="tag/a/3", + ), + ], + any_order=True, + ) wrapper = OutputHandler("tag", metric_names=["a", "c"]) @@ -210,14 +263,23 @@ def test_output_handler_metric_names(dirname): wrapper(mock_engine, mock_logger, Events.ITERATION_STARTED) assert len(wrapper.windows) == 1 and "tag/a" in wrapper.windows - assert wrapper.windows["tag/a"]['win'] is not None + assert wrapper.windows["tag/a"]["win"] is not None assert mock_logger.vis.line.call_count == 1 - mock_logger.vis.line.assert_has_calls([ - call(X=[7, ], Y=[55.56, ], env=mock_logger.vis.env, - win=None, update=None, - opts=wrapper.windows['tag/a']['opts'], name="tag/a"), - ], any_order=True) + mock_logger.vis.line.assert_has_calls( + [ + call( + X=[7,], + Y=[55.56,], + env=mock_logger.vis.env, + win=None, + update=None, + opts=wrapper.windows["tag/a"]["opts"], + name="tag/a", + ), + ], + any_order=True, + ) # all metrics wrapper = OutputHandler("tag", metric_names="all") @@ -231,20 +293,34 @@ def test_output_handler_metric_names(dirname): wrapper(mock_engine, mock_logger, Events.ITERATION_STARTED) - assert len(wrapper.windows) == 2 and \ - "tag/a" in wrapper.windows and "tag/b" in wrapper.windows - assert wrapper.windows["tag/a"]['win'] is not None - assert wrapper.windows["tag/b"]['win'] is not None + assert len(wrapper.windows) == 2 and "tag/a" in wrapper.windows and "tag/b" in wrapper.windows + assert wrapper.windows["tag/a"]["win"] is not None + assert wrapper.windows["tag/b"]["win"] is not None assert mock_logger.vis.line.call_count == 2 - mock_logger.vis.line.assert_has_calls([ - call(X=[5, ], Y=[12.23, ], env=mock_logger.vis.env, - win=None, update=None, - opts=wrapper.windows['tag/a']['opts'], name="tag/a"), - call(X=[5, ], Y=[23.45, ], env=mock_logger.vis.env, - win=None, update=None, - opts=wrapper.windows['tag/b']['opts'], name="tag/b"), - ], any_order=True) + mock_logger.vis.line.assert_has_calls( + [ + call( + X=[5,], + Y=[12.23,], + env=mock_logger.vis.env, + win=None, + update=None, + opts=wrapper.windows["tag/a"]["opts"], + name="tag/a", + ), + call( + X=[5,], + Y=[23.45,], + env=mock_logger.vis.env, + win=None, + update=None, + opts=wrapper.windows["tag/b"]["opts"], + name="tag/b", + ), + ], + any_order=True, + ) def test_output_handler_both(dirname): @@ -262,50 +338,100 @@ def test_output_handler_both(dirname): wrapper(mock_engine, mock_logger, Events.EPOCH_STARTED) assert mock_logger.vis.line.call_count == 3 - assert len(wrapper.windows) == 3 and \ - "tag/a" in wrapper.windows and "tag/b" in wrapper.windows and "tag/loss" in wrapper.windows - assert wrapper.windows["tag/a"]['win'] is not None - assert wrapper.windows["tag/b"]['win'] is not None - assert wrapper.windows["tag/loss"]['win'] is not None - - mock_logger.vis.line.assert_has_calls([ - call(X=[5, ], Y=[12.23, ], env=mock_logger.vis.env, - win=None, update=None, - opts=wrapper.windows['tag/a']['opts'], name="tag/a"), - call(X=[5, ], Y=[23.45, ], env=mock_logger.vis.env, - win=None, update=None, - opts=wrapper.windows['tag/b']['opts'], name="tag/b"), - call(X=[5, ], Y=[12345, ], env=mock_logger.vis.env, - win=None, update=None, - opts=wrapper.windows['tag/loss']['opts'], name="tag/loss"), - ], any_order=True) + assert ( + len(wrapper.windows) == 3 + and "tag/a" in wrapper.windows + and "tag/b" in wrapper.windows + and "tag/loss" in wrapper.windows + ) + assert wrapper.windows["tag/a"]["win"] is not None + assert wrapper.windows["tag/b"]["win"] is not None + assert wrapper.windows["tag/loss"]["win"] is not None + + mock_logger.vis.line.assert_has_calls( + [ + call( + X=[5,], + Y=[12.23,], + env=mock_logger.vis.env, + win=None, + update=None, + opts=wrapper.windows["tag/a"]["opts"], + name="tag/a", + ), + call( + X=[5,], + Y=[23.45,], + env=mock_logger.vis.env, + win=None, + update=None, + opts=wrapper.windows["tag/b"]["opts"], + name="tag/b", + ), + call( + X=[5,], + Y=[12345,], + env=mock_logger.vis.env, + win=None, + update=None, + opts=wrapper.windows["tag/loss"]["opts"], + name="tag/loss", + ), + ], + any_order=True, + ) mock_engine.state.epoch = 6 wrapper(mock_engine, mock_logger, Events.EPOCH_STARTED) assert mock_logger.vis.line.call_count == 6 - assert len(wrapper.windows) == 3 and \ - "tag/a" in wrapper.windows and "tag/b" in wrapper.windows and "tag/loss" in wrapper.windows - assert wrapper.windows["tag/a"]['win'] is not None - assert wrapper.windows["tag/b"]['win'] is not None - assert wrapper.windows["tag/loss"]['win'] is not None - - mock_logger.vis.line.assert_has_calls([ - call(X=[6, ], Y=[12.23, ], env=mock_logger.vis.env, - win=wrapper.windows["tag/a"]['win'], update='append', - opts=wrapper.windows['tag/a']['opts'], name="tag/a"), - call(X=[6, ], Y=[23.45, ], env=mock_logger.vis.env, - win=wrapper.windows["tag/b"]['win'], update='append', - opts=wrapper.windows['tag/b']['opts'], name="tag/b"), - call(X=[6, ], Y=[12345, ], env=mock_logger.vis.env, - win=wrapper.windows["tag/loss"]['win'], update='append', - opts=wrapper.windows['tag/loss']['opts'], name="tag/loss"), - ], any_order=True) + assert ( + len(wrapper.windows) == 3 + and "tag/a" in wrapper.windows + and "tag/b" in wrapper.windows + and "tag/loss" in wrapper.windows + ) + assert wrapper.windows["tag/a"]["win"] is not None + assert wrapper.windows["tag/b"]["win"] is not None + assert wrapper.windows["tag/loss"]["win"] is not None + + mock_logger.vis.line.assert_has_calls( + [ + call( + X=[6,], + Y=[12.23,], + env=mock_logger.vis.env, + win=wrapper.windows["tag/a"]["win"], + update="append", + opts=wrapper.windows["tag/a"]["opts"], + name="tag/a", + ), + call( + X=[6,], + Y=[23.45,], + env=mock_logger.vis.env, + win=wrapper.windows["tag/b"]["win"], + update="append", + opts=wrapper.windows["tag/b"]["opts"], + name="tag/b", + ), + call( + X=[6,], + Y=[12345,], + env=mock_logger.vis.env, + win=wrapper.windows["tag/loss"]["win"], + update="append", + opts=wrapper.windows["tag/loss"]["opts"], + name="tag/loss", + ), + ], + any_order=True, + ) def test_output_handler_with_wrong_global_step_transform_output(): def global_step_transform(*args, **kwargs): - return 'a' + return "a" wrapper = OutputHandler("tag", output_transform=lambda x: {"loss": x}, global_step_transform=global_step_transform) mock_logger = MagicMock(spec=VisdomLogger) @@ -338,12 +464,21 @@ def global_step_transform(*args, **kwargs): wrapper(mock_engine, mock_logger, Events.EPOCH_STARTED) assert mock_logger.vis.line.call_count == 1 assert len(wrapper.windows) == 1 and "tag/loss" in wrapper.windows - assert wrapper.windows["tag/loss"]['win'] is not None - - mock_logger.vis.line.assert_has_calls([ - call(X=[10, ], Y=[12345, ], env=mock_logger.vis.env, - win=None, update=None, - opts=wrapper.windows['tag/loss']['opts'], name="tag/loss")]) + assert wrapper.windows["tag/loss"]["win"] is not None + + mock_logger.vis.line.assert_has_calls( + [ + call( + X=[10,], + Y=[12345,], + env=mock_logger.vis.env, + win=None, + update=None, + opts=wrapper.windows["tag/loss"]["opts"], + name="tag/loss", + ) + ] + ) def test_output_handler_with_global_step_from_engine(): @@ -353,8 +488,11 @@ def test_output_handler_with_global_step_from_engine(): mock_another_engine.state.epoch = 10 mock_another_engine.state.output = 12.345 - wrapper = OutputHandler("tag", output_transform=lambda x: {"loss": x}, - global_step_transform=global_step_from_engine(mock_another_engine)) + wrapper = OutputHandler( + "tag", + output_transform=lambda x: {"loss": x}, + global_step_transform=global_step_from_engine(mock_another_engine), + ) mock_logger = MagicMock(spec=VisdomLogger) mock_logger.vis = MagicMock() @@ -368,13 +506,20 @@ def test_output_handler_with_global_step_from_engine(): wrapper(mock_engine, mock_logger, Events.EPOCH_STARTED) assert mock_logger.vis.line.call_count == 1 assert len(wrapper.windows) == 1 and "tag/loss" in wrapper.windows - assert wrapper.windows["tag/loss"]['win'] is not None - mock_logger.vis.line.assert_has_calls([call(X=[mock_another_engine.state.epoch, ], - Y=[mock_engine.state.output, ], - env=mock_logger.vis.env, - win=None, update=None, - opts=wrapper.windows['tag/loss']['opts'], - name="tag/loss")]) + assert wrapper.windows["tag/loss"]["win"] is not None + mock_logger.vis.line.assert_has_calls( + [ + call( + X=[mock_another_engine.state.epoch,], + Y=[mock_engine.state.output,], + env=mock_logger.vis.env, + win=None, + update=None, + opts=wrapper.windows["tag/loss"]["opts"], + name="tag/loss", + ) + ] + ) mock_another_engine.state.epoch = 11 mock_engine.state.output = 1.123 @@ -382,13 +527,20 @@ def test_output_handler_with_global_step_from_engine(): wrapper(mock_engine, mock_logger, Events.EPOCH_STARTED) assert mock_logger.vis.line.call_count == 2 assert len(wrapper.windows) == 1 and "tag/loss" in wrapper.windows - assert wrapper.windows["tag/loss"]['win'] is not None - mock_logger.vis.line.assert_has_calls([call(X=[mock_another_engine.state.epoch, ], - Y=[mock_engine.state.output, ], - env=mock_logger.vis.env, - win=wrapper.windows["tag/loss"]['win'], update='append', - opts=wrapper.windows['tag/loss']['opts'], - name="tag/loss")]) + assert wrapper.windows["tag/loss"]["win"] is not None + mock_logger.vis.line.assert_has_calls( + [ + call( + X=[mock_another_engine.state.epoch,], + Y=[mock_engine.state.output,], + env=mock_logger.vis.env, + win=wrapper.windows["tag/loss"]["win"], + update="append", + opts=wrapper.windows["tag/loss"]["opts"], + name="tag/loss", + ) + ] + ) def test_weights_scalar_handler_wrong_setup(): @@ -411,9 +563,7 @@ def test_weights_scalar_handler_wrong_setup(): def test_weights_scalar_handler(): - class DummyModel(torch.nn.Module): - def __init__(self): super(DummyModel, self).__init__() self.fc1 = torch.nn.Linear(10, 10) @@ -441,34 +591,54 @@ def _test(tag=None): tag_prefix = "{}/".format(tag) if tag else "" assert mock_logger.vis.line.call_count == 4 - mock_logger.vis.line.assert_has_calls([ - call(X=[5, ], Y=[0.0, ], env=mock_logger.vis.env, - win=None, update=None, - opts=wrapper.windows[tag_prefix + "weights_norm/fc1/weight"]['opts'], - name=tag_prefix + "weights_norm/fc1/weight"), - call(X=[5, ], Y=[0.0, ], env=mock_logger.vis.env, - win=None, update=None, - opts=wrapper.windows[tag_prefix + "weights_norm/fc1/bias"]['opts'], - name=tag_prefix + "weights_norm/fc1/bias"), - - call(X=[5, ], Y=[12.0, ], env=mock_logger.vis.env, - win=None, update=None, - opts=wrapper.windows[tag_prefix + "weights_norm/fc2/weight"]['opts'], - name=tag_prefix + "weights_norm/fc2/weight"), - call(X=[5, ], Y=ANY, env=mock_logger.vis.env, - win=None, update=None, - opts=wrapper.windows[tag_prefix + "weights_norm/fc2/bias"]['opts'], - name=tag_prefix + "weights_norm/fc2/bias"), - ], any_order=True) + mock_logger.vis.line.assert_has_calls( + [ + call( + X=[5,], + Y=[0.0,], + env=mock_logger.vis.env, + win=None, + update=None, + opts=wrapper.windows[tag_prefix + "weights_norm/fc1/weight"]["opts"], + name=tag_prefix + "weights_norm/fc1/weight", + ), + call( + X=[5,], + Y=[0.0,], + env=mock_logger.vis.env, + win=None, + update=None, + opts=wrapper.windows[tag_prefix + "weights_norm/fc1/bias"]["opts"], + name=tag_prefix + "weights_norm/fc1/bias", + ), + call( + X=[5,], + Y=[12.0,], + env=mock_logger.vis.env, + win=None, + update=None, + opts=wrapper.windows[tag_prefix + "weights_norm/fc2/weight"]["opts"], + name=tag_prefix + "weights_norm/fc2/weight", + ), + call( + X=[5,], + Y=ANY, + env=mock_logger.vis.env, + win=None, + update=None, + opts=wrapper.windows[tag_prefix + "weights_norm/fc2/bias"]["opts"], + name=tag_prefix + "weights_norm/fc2/bias", + ), + ], + any_order=True, + ) _test() _test(tag="tag") def test_weights_scalar_handler_custom_reduction(): - class DummyModel(torch.nn.Module): - def __init__(self): super(DummyModel, self).__init__() self.fc1 = torch.nn.Linear(10, 10) @@ -495,22 +665,47 @@ def norm(x): wrapper(mock_engine, mock_logger, Events.EPOCH_STARTED) assert mock_logger.vis.line.call_count == 4 - mock_logger.vis.line.assert_has_calls([ - call(X=[5, ], Y=[12.34, ], env=mock_logger.vis.env, - win=None, update=None, - opts=wrapper.windows["weights_norm/fc1/weight"]['opts'], name="weights_norm/fc1/weight"), - call(X=[5, ], Y=[12.34, ], env=mock_logger.vis.env, - win=None, update=None, - opts=wrapper.windows["weights_norm/fc1/bias"]['opts'], name="weights_norm/fc1/bias"), - - call(X=[5, ], Y=[12.34, ], env=mock_logger.vis.env, - win=None, update=None, - opts=wrapper.windows["weights_norm/fc2/weight"]['opts'], name="weights_norm/fc2/weight"), - call(X=[5, ], Y=[12.34, ], env=mock_logger.vis.env, - win=None, update=None, - opts=wrapper.windows["weights_norm/fc2/bias"]['opts'], name="weights_norm/fc2/bias"), - - ], any_order=True) + mock_logger.vis.line.assert_has_calls( + [ + call( + X=[5,], + Y=[12.34,], + env=mock_logger.vis.env, + win=None, + update=None, + opts=wrapper.windows["weights_norm/fc1/weight"]["opts"], + name="weights_norm/fc1/weight", + ), + call( + X=[5,], + Y=[12.34,], + env=mock_logger.vis.env, + win=None, + update=None, + opts=wrapper.windows["weights_norm/fc1/bias"]["opts"], + name="weights_norm/fc1/bias", + ), + call( + X=[5,], + Y=[12.34,], + env=mock_logger.vis.env, + win=None, + update=None, + opts=wrapper.windows["weights_norm/fc2/weight"]["opts"], + name="weights_norm/fc2/weight", + ), + call( + X=[5,], + Y=[12.34,], + env=mock_logger.vis.env, + win=None, + update=None, + opts=wrapper.windows["weights_norm/fc2/bias"]["opts"], + name="weights_norm/fc2/bias", + ), + ], + any_order=True, + ) def test_grads_scalar_handler_wrong_setup(): @@ -530,9 +725,7 @@ def test_grads_scalar_handler_wrong_setup(): def test_grads_scalar_handler(): - class DummyModel(torch.nn.Module): - def __init__(self): super(DummyModel, self).__init__() self.fc1 = torch.nn.Linear(10, 10) @@ -563,25 +756,47 @@ def _test(tag=None): tag_prefix = "{}/".format(tag) if tag else "" assert mock_logger.vis.line.call_count == 4 - mock_logger.vis.line.assert_has_calls([ - call(X=[5, ], Y=ANY, env=mock_logger.vis.env, - win=None, update=None, - opts=wrapper.windows[tag_prefix + "grads_norm/fc1/weight"]['opts'], - name=tag_prefix + "grads_norm/fc1/weight"), - call(X=[5, ], Y=ANY, env=mock_logger.vis.env, - win=None, update=None, - opts=wrapper.windows[tag_prefix + "grads_norm/fc1/bias"]['opts'], - name=tag_prefix + "grads_norm/fc1/bias"), - - call(X=[5, ], Y=ANY, env=mock_logger.vis.env, - win=None, update=None, - opts=wrapper.windows[tag_prefix + "grads_norm/fc2/weight"]['opts'], - name=tag_prefix + "grads_norm/fc2/weight"), - call(X=[5, ], Y=ANY, env=mock_logger.vis.env, - win=None, update=None, - opts=wrapper.windows[tag_prefix + "grads_norm/fc2/bias"]['opts'], - name=tag_prefix + "grads_norm/fc2/bias"), - ], any_order=True) + mock_logger.vis.line.assert_has_calls( + [ + call( + X=[5,], + Y=ANY, + env=mock_logger.vis.env, + win=None, + update=None, + opts=wrapper.windows[tag_prefix + "grads_norm/fc1/weight"]["opts"], + name=tag_prefix + "grads_norm/fc1/weight", + ), + call( + X=[5,], + Y=ANY, + env=mock_logger.vis.env, + win=None, + update=None, + opts=wrapper.windows[tag_prefix + "grads_norm/fc1/bias"]["opts"], + name=tag_prefix + "grads_norm/fc1/bias", + ), + call( + X=[5,], + Y=ANY, + env=mock_logger.vis.env, + win=None, + update=None, + opts=wrapper.windows[tag_prefix + "grads_norm/fc2/weight"]["opts"], + name=tag_prefix + "grads_norm/fc2/weight", + ), + call( + X=[5,], + Y=ANY, + env=mock_logger.vis.env, + win=None, + update=None, + opts=wrapper.windows[tag_prefix + "grads_norm/fc2/bias"]["opts"], + name=tag_prefix + "grads_norm/fc2/bias", + ), + ], + any_order=True, + ) _test() _test(tag="tag") @@ -602,14 +817,16 @@ def test_logger_init_hostname_port(visdom_server): def test_logger_init_env_vars(visdom_server): # As env vars import os - os.environ['VISDOM_SERVER_URL'] = visdom_server[0] - os.environ['VISDOM_PORT'] = str(visdom_server[1]) + + os.environ["VISDOM_SERVER_URL"] = visdom_server[0] + os.environ["VISDOM_PORT"] = str(visdom_server[1]) vd_logger = VisdomLogger(server=visdom_server[0], port=visdom_server[1], num_workers=0) assert "main" in vd_logger.vis.get_env_list() def _parse_content(content): import json + return json.loads(content) @@ -629,22 +846,20 @@ def update_fn(engine, batch): return next(losses_iter) trainer = Engine(update_fn) - output_handler = OutputHandler(tag="training", output_transform=lambda x: {'loss': x}) - vd_logger.attach(trainer, - log_handler=output_handler, - event_name=Events.ITERATION_COMPLETED) + output_handler = OutputHandler(tag="training", output_transform=lambda x: {"loss": x}) + vd_logger.attach(trainer, log_handler=output_handler, event_name=Events.ITERATION_COMPLETED) trainer.run(data, max_epochs=n_epochs) assert len(output_handler.windows) == 1 assert "training/loss" in output_handler.windows - win_name = output_handler.windows['training/loss']['win'] + win_name = output_handler.windows["training/loss"]["win"] data = vd_logger.vis.get_window_data(win=win_name) data = _parse_content(data) assert "content" in data and "data" in data["content"] data = data["content"]["data"][0] assert "x" in data and "y" in data - x_vals, y_vals = data['x'], data['y'] + x_vals, y_vals = data["x"], data["y"] assert all([int(x) == x_true for x, x_true in zip(x_vals, list(range(1, n_epochs * len(data) + 1)))]) assert all([y == y_true for y, y_true in zip(y_vals, losses)]) @@ -665,22 +880,20 @@ def update_fn(engine, batch): return next(losses_iter) trainer = Engine(update_fn) - output_handler = OutputHandler(tag="training", output_transform=lambda x: {'loss': x}) - vd_logger.attach(trainer, - log_handler=output_handler, - event_name=Events.ITERATION_COMPLETED) + output_handler = OutputHandler(tag="training", output_transform=lambda x: {"loss": x}) + vd_logger.attach(trainer, log_handler=output_handler, event_name=Events.ITERATION_COMPLETED) trainer.run(data, max_epochs=n_epochs) assert len(output_handler.windows) == 1 assert "training/loss" in output_handler.windows - win_name = output_handler.windows['training/loss']['win'] + win_name = output_handler.windows["training/loss"]["win"] data = vd_logger.vis.get_window_data(win=win_name) data = _parse_content(data) assert "content" in data and "data" in data["content"] data = data["content"]["data"][0] assert "x" in data and "y" in data - x_vals, y_vals = data['x'], data['y'] + x_vals, y_vals = data["x"], data["y"] assert all([int(x) == x_true for x, x_true in zip(x_vals, list(range(1, n_epochs * len(data) + 1)))]) assert all([y == y_true for y, y_true in zip(y_vals, losses)]) @@ -704,22 +917,20 @@ def update_fn(engine, batch): vd_logger.vis.close() trainer = Engine(update_fn) - output_handler = OutputHandler(tag="training", output_transform=lambda x: {'loss': x}) - vd_logger.attach(trainer, - log_handler=output_handler, - event_name=Events.ITERATION_COMPLETED) + output_handler = OutputHandler(tag="training", output_transform=lambda x: {"loss": x}) + vd_logger.attach(trainer, log_handler=output_handler, event_name=Events.ITERATION_COMPLETED) trainer.run(data, max_epochs=n_epochs) assert len(output_handler.windows) == 1 assert "training/loss" in output_handler.windows - win_name = output_handler.windows['training/loss']['win'] + win_name = output_handler.windows["training/loss"]["win"] data = vd_logger.vis.get_window_data(win=win_name) data = _parse_content(data) assert "content" in data and "data" in data["content"] data = data["content"]["data"][0] assert "x" in data and "y" in data - x_vals, y_vals = data['x'], data['y'] + x_vals, y_vals = data["x"], data["y"] assert all([int(x) == x_true for x, x_true in zip(x_vals, list(range(1, n_epochs * len(data) + 1)))]) assert all([y == y_true for y, y_true in zip(y_vals, losses)]) @@ -727,13 +938,14 @@ def update_fn(engine, batch): @pytest.fixture def no_site_packages(): import sys - plx_module = sys.modules['visdom'] - del sys.modules['visdom'] + + plx_module = sys.modules["visdom"] + del sys.modules["visdom"] prev_path = list(sys.path) sys.path = [p for p in sys.path if "site-packages" not in p] yield "no_site_packages" sys.path = prev_path - sys.modules['visdom'] = plx_module + sys.modules["visdom"] = plx_module def test_no_visdom(no_site_packages): diff --git a/tests/ignite/contrib/metrics/regression/test__base.py b/tests/ignite/contrib/metrics/regression/test__base.py index 5a65fbcaf890..c47646d79e47 100644 --- a/tests/ignite/contrib/metrics/regression/test__base.py +++ b/tests/ignite/contrib/metrics/regression/test__base.py @@ -5,7 +5,6 @@ def test_base_regression_shapes(): - class L1(_BaseRegression): def reset(self): self._sum_of_errors = 0.0 @@ -21,40 +20,31 @@ def compute(self): m = L1() with pytest.raises(ValueError): - m.update((torch.rand(4, 1, 2), - torch.rand(4, 1))) + m.update((torch.rand(4, 1, 2), torch.rand(4, 1))) with pytest.raises(ValueError): - m.update((torch.rand(4, 1), - torch.rand(4, 1, 2))) + m.update((torch.rand(4, 1), torch.rand(4, 1, 2))) with pytest.raises(ValueError): - m.update((torch.rand(4, 1, 2), - torch.rand(4,))) + m.update((torch.rand(4, 1, 2), torch.rand(4,))) with pytest.raises(ValueError): - m.update((torch.rand(4,), - torch.rand(4, 1, 2))) + m.update((torch.rand(4,), torch.rand(4, 1, 2))) with pytest.raises(ValueError): - m.update((torch.rand(4, 3), - torch.rand(4, 1))) + m.update((torch.rand(4, 3), torch.rand(4, 1))) with pytest.raises(ValueError): - m.update((torch.rand(4, 1), - torch.rand(4, 3))) + m.update((torch.rand(4, 1), torch.rand(4, 3))) with pytest.raises(ValueError): - m.update((torch.rand(4, 7), - torch.rand(4,))) + m.update((torch.rand(4, 7), torch.rand(4,))) with pytest.raises(ValueError): - m.update((torch.rand(4,), - torch.rand(4, 7))) + m.update((torch.rand(4,), torch.rand(4, 7))) def test_base_regression_epoch_shapes(): - def compute_fn(y_pred, y): return 0.0 @@ -65,36 +55,28 @@ def __init__(self, output_transform=lambda x: x): m = ZeroEpoch() with pytest.raises(ValueError): - m.update((torch.rand(4, 1, 2), - torch.rand(4, 1))) + m.update((torch.rand(4, 1, 2), torch.rand(4, 1))) with pytest.raises(ValueError): - m.update((torch.rand(4, 1), - torch.rand(4, 1, 2))) + m.update((torch.rand(4, 1), torch.rand(4, 1, 2))) with pytest.raises(ValueError): - m.update((torch.rand(4, 1, 2), - torch.rand(4,))) + m.update((torch.rand(4, 1, 2), torch.rand(4,))) with pytest.raises(ValueError): - m.update((torch.rand(4,), - torch.rand(4, 1, 2))) + m.update((torch.rand(4,), torch.rand(4, 1, 2))) with pytest.raises(ValueError): - m.update((torch.rand(4, 3), - torch.rand(4, 1))) + m.update((torch.rand(4, 3), torch.rand(4, 1))) with pytest.raises(ValueError): - m.update((torch.rand(4, 1), - torch.rand(4, 3))) + m.update((torch.rand(4, 1), torch.rand(4, 3))) with pytest.raises(ValueError): - m.update((torch.rand(4, 7), - torch.rand(4,))) + m.update((torch.rand(4, 7), torch.rand(4,))) with pytest.raises(ValueError): - m.update((torch.rand(4,), - torch.rand(4, 7))) + m.update((torch.rand(4,), torch.rand(4, 7))) def test_base_regression_compute_fn(): diff --git a/tests/ignite/contrib/metrics/regression/test_canberra_metric.py b/tests/ignite/contrib/metrics/regression/test_canberra_metric.py index c62378518b10..4ad552016a67 100644 --- a/tests/ignite/contrib/metrics/regression/test_canberra_metric.py +++ b/tests/ignite/contrib/metrics/regression/test_canberra_metric.py @@ -9,20 +9,16 @@ def test_wrong_input_shapes(): m = CanberraMetric() with pytest.raises(ValueError): - m.update((torch.rand(4, 1, 2), - torch.rand(4, 1))) + m.update((torch.rand(4, 1, 2), torch.rand(4, 1))) with pytest.raises(ValueError): - m.update((torch.rand(4, 1), - torch.rand(4, 1, 2))) + m.update((torch.rand(4, 1), torch.rand(4, 1, 2))) with pytest.raises(ValueError): - m.update((torch.rand(4, 1, 2), - torch.rand(4,))) + m.update((torch.rand(4, 1, 2), torch.rand(4,))) with pytest.raises(ValueError): - m.update((torch.rand(4,), - torch.rand(4, 1, 2))) + m.update((torch.rand(4,), torch.rand(4, 1, 2))) def test_compute(): diff --git a/tests/ignite/contrib/metrics/regression/test_fractional_absolute_error.py b/tests/ignite/contrib/metrics/regression/test_fractional_absolute_error.py index 49deeb8ee057..70f41ecb033d 100644 --- a/tests/ignite/contrib/metrics/regression/test_fractional_absolute_error.py +++ b/tests/ignite/contrib/metrics/regression/test_fractional_absolute_error.py @@ -15,20 +15,16 @@ def test_wrong_input_shapes(): m = FractionalAbsoluteError() with pytest.raises(ValueError): - m.update((torch.rand(4, 1, 2), - torch.rand(4, 1))) + m.update((torch.rand(4, 1, 2), torch.rand(4, 1))) with pytest.raises(ValueError): - m.update((torch.rand(4, 1), - torch.rand(4, 1, 2))) + m.update((torch.rand(4, 1), torch.rand(4, 1, 2))) with pytest.raises(ValueError): - m.update((torch.rand(4, 1, 2), - torch.rand(4,))) + m.update((torch.rand(4, 1, 2), torch.rand(4,))) with pytest.raises(ValueError): - m.update((torch.rand(4,), - torch.rand(4, 1, 2))) + m.update((torch.rand(4,), torch.rand(4, 1, 2))) def test_compute(): diff --git a/tests/ignite/contrib/metrics/regression/test_fractional_bias.py b/tests/ignite/contrib/metrics/regression/test_fractional_bias.py index c193af79a73c..30e940d7e7b8 100644 --- a/tests/ignite/contrib/metrics/regression/test_fractional_bias.py +++ b/tests/ignite/contrib/metrics/regression/test_fractional_bias.py @@ -15,20 +15,16 @@ def test_wrong_input_shapes(): m = FractionalBias() with pytest.raises(ValueError): - m.update((torch.rand(4, 1, 2), - torch.rand(4, 1))) + m.update((torch.rand(4, 1, 2), torch.rand(4, 1))) with pytest.raises(ValueError): - m.update((torch.rand(4, 1), - torch.rand(4, 1, 2))) + m.update((torch.rand(4, 1), torch.rand(4, 1, 2))) with pytest.raises(ValueError): - m.update((torch.rand(4, 1, 2), - torch.rand(4,))) + m.update((torch.rand(4, 1, 2), torch.rand(4,))) with pytest.raises(ValueError): - m.update((torch.rand(4,), - torch.rand(4, 1, 2))) + m.update((torch.rand(4,), torch.rand(4, 1, 2))) def test_fractional_bias(): diff --git a/tests/ignite/contrib/metrics/regression/test_geometric_mean_absolute_error.py b/tests/ignite/contrib/metrics/regression/test_geometric_mean_absolute_error.py index 0d612eb663c7..b75a5bc323d4 100644 --- a/tests/ignite/contrib/metrics/regression/test_geometric_mean_absolute_error.py +++ b/tests/ignite/contrib/metrics/regression/test_geometric_mean_absolute_error.py @@ -15,20 +15,16 @@ def test_wrong_input_shapes(): m = GeometricMeanAbsoluteError() with pytest.raises(ValueError): - m.update((torch.rand(4, 1, 2), - torch.rand(4, 1))) + m.update((torch.rand(4, 1, 2), torch.rand(4, 1))) with pytest.raises(ValueError): - m.update((torch.rand(4, 1), - torch.rand(4, 1, 2))) + m.update((torch.rand(4, 1), torch.rand(4, 1, 2))) with pytest.raises(ValueError): - m.update((torch.rand(4, 1, 2), - torch.rand(4,))) + m.update((torch.rand(4, 1, 2), torch.rand(4,))) with pytest.raises(ValueError): - m.update((torch.rand(4,), - torch.rand(4, 1, 2))) + m.update((torch.rand(4,), torch.rand(4, 1, 2))) def test_compute(): diff --git a/tests/ignite/contrib/metrics/regression/test_geometric_mean_relative_absolute_error.py b/tests/ignite/contrib/metrics/regression/test_geometric_mean_relative_absolute_error.py index 032cfd64327d..546fc68f3d70 100644 --- a/tests/ignite/contrib/metrics/regression/test_geometric_mean_relative_absolute_error.py +++ b/tests/ignite/contrib/metrics/regression/test_geometric_mean_relative_absolute_error.py @@ -9,20 +9,16 @@ def test_wrong_input_shapes(): m = GeometricMeanRelativeAbsoluteError() with pytest.raises(ValueError): - m.update((torch.rand(4, 1, 2), - torch.rand(4, 1))) + m.update((torch.rand(4, 1, 2), torch.rand(4, 1))) with pytest.raises(ValueError): - m.update((torch.rand(4, 1), - torch.rand(4, 1, 2))) + m.update((torch.rand(4, 1), torch.rand(4, 1, 2))) with pytest.raises(ValueError): - m.update((torch.rand(4, 1, 2), - torch.rand(4,))) + m.update((torch.rand(4, 1, 2), torch.rand(4,))) with pytest.raises(ValueError): - m.update((torch.rand(4,), - torch.rand(4, 1, 2))) + m.update((torch.rand(4,), torch.rand(4, 1, 2))) def test_geometric_mean_relative_absolute_error(): @@ -63,15 +59,15 @@ def test_geometric_mean_relative_absolute_error_2(): batch_size = size // n_iters for i in range(n_iters + 1): idx = i * batch_size - np_y_i = np_y[idx: idx + batch_size] - np_y_pred_i = np_y_pred[idx: idx + batch_size] + np_y_i = np_y[idx : idx + batch_size] + np_y_pred_i = np_y_pred[idx : idx + batch_size] np_y_sum += np_y_i.sum() num_examples += np_y_i.shape[0] np_mean = np_y_sum / num_examples np_gmrae += np.log(np.abs(np_y_i - np_y_pred_i) / np.abs(np_y_i - np_mean)).sum() - m.update((y_pred[idx: idx + batch_size], y[idx: idx + batch_size])) + m.update((y_pred[idx : idx + batch_size], y[idx : idx + batch_size])) assert np.exp(np_gmrae / num_examples) == pytest.approx(m.compute()) @@ -93,8 +89,8 @@ def test_integration_geometric_mean_relative_absolute_error_with_output_transfor batch_size = size // n_iters for i in range(n_iters + 1): idx = i * batch_size - np_y_i = np_y[idx: idx + batch_size] - np_y_pred_i = np_y_pred[idx: idx + batch_size] + np_y_i = np_y[idx : idx + batch_size] + np_y_pred_i = np_y_pred[idx : idx + batch_size] np_y_sum += np_y_i.sum() num_examples += np_y_i.shape[0] @@ -104,16 +100,16 @@ def test_integration_geometric_mean_relative_absolute_error_with_output_transfor def update_fn(engine, batch): idx = (engine.state.iteration - 1) * batch_size - y_true_batch = np_y[idx:idx + batch_size] - y_pred_batch = np_y_pred[idx:idx + batch_size] + y_true_batch = np_y[idx : idx + batch_size] + y_pred_batch = np_y_pred[idx : idx + batch_size] return idx, torch.from_numpy(y_pred_batch), torch.from_numpy(y_true_batch) engine = Engine(update_fn) m = GeometricMeanRelativeAbsoluteError(output_transform=lambda x: (x[1], x[2])) - m.attach(engine, 'geometric_mean_relative_absolute_error') + m.attach(engine, "geometric_mean_relative_absolute_error") data = list(range(size // batch_size)) - gmrae = engine.run(data, max_epochs=1).metrics['geometric_mean_relative_absolute_error'] + gmrae = engine.run(data, max_epochs=1).metrics["geometric_mean_relative_absolute_error"] assert np.exp(np_gmrae / num_examples) == pytest.approx(m.compute()) diff --git a/tests/ignite/contrib/metrics/regression/test_manhattan_distance.py b/tests/ignite/contrib/metrics/regression/test_manhattan_distance.py index cdd26841ba7f..4cd1c24458ca 100644 --- a/tests/ignite/contrib/metrics/regression/test_manhattan_distance.py +++ b/tests/ignite/contrib/metrics/regression/test_manhattan_distance.py @@ -9,20 +9,16 @@ def test_wrong_input_shapes(): m = ManhattanDistance() with pytest.raises(ValueError): - m.update((torch.rand(4, 1, 2), - torch.rand(4, 1))) + m.update((torch.rand(4, 1, 2), torch.rand(4, 1))) with pytest.raises(ValueError): - m.update((torch.rand(4, 1), - torch.rand(4, 1, 2))) + m.update((torch.rand(4, 1), torch.rand(4, 1, 2))) with pytest.raises(ValueError): - m.update((torch.rand(4, 1, 2), - torch.rand(4,))) + m.update((torch.rand(4, 1, 2), torch.rand(4,))) with pytest.raises(ValueError): - m.update((torch.rand(4,), - torch.rand(4, 1, 2))) + m.update((torch.rand(4,), torch.rand(4, 1, 2))) def test_mahattan_distance(): diff --git a/tests/ignite/contrib/metrics/regression/test_maximum_absolute_error.py b/tests/ignite/contrib/metrics/regression/test_maximum_absolute_error.py index 36645bd5d1fe..ef5d3aafe2a8 100644 --- a/tests/ignite/contrib/metrics/regression/test_maximum_absolute_error.py +++ b/tests/ignite/contrib/metrics/regression/test_maximum_absolute_error.py @@ -15,20 +15,16 @@ def test_wrong_input_shapes(): m = MaximumAbsoluteError() with pytest.raises(ValueError): - m.update((torch.rand(4, 1, 2), - torch.rand(4, 1))) + m.update((torch.rand(4, 1, 2), torch.rand(4, 1))) with pytest.raises(ValueError): - m.update((torch.rand(4, 1), - torch.rand(4, 1, 2))) + m.update((torch.rand(4, 1), torch.rand(4, 1, 2))) with pytest.raises(ValueError): - m.update((torch.rand(4, 1, 2), - torch.rand(4,))) + m.update((torch.rand(4, 1, 2), torch.rand(4,))) with pytest.raises(ValueError): - m.update((torch.rand(4,), - torch.rand(4, 1, 2))) + m.update((torch.rand(4,), torch.rand(4, 1, 2))) def test_maximum_absolute_error(): diff --git a/tests/ignite/contrib/metrics/regression/test_mean_absolute_relative_error.py b/tests/ignite/contrib/metrics/regression/test_mean_absolute_relative_error.py index 865faf989767..a2e622c995d1 100644 --- a/tests/ignite/contrib/metrics/regression/test_mean_absolute_relative_error.py +++ b/tests/ignite/contrib/metrics/regression/test_mean_absolute_relative_error.py @@ -9,20 +9,16 @@ def test_wrong_input_shapes(): m = MeanAbsoluteRelativeError() with raises(ValueError): - m.update((torch.rand(4, 1, 2), - torch.rand(4, 1))) + m.update((torch.rand(4, 1, 2), torch.rand(4, 1))) with raises(ValueError): - m.update((torch.rand(4, 1), - torch.rand(4, 1, 2))) + m.update((torch.rand(4, 1), torch.rand(4, 1, 2))) with raises(ValueError): - m.update((torch.rand(4, 1, 2), - torch.rand(4,))) + m.update((torch.rand(4, 1, 2), torch.rand(4,))) with raises(ValueError): - m.update((torch.rand(4,), - torch.rand(4, 1, 2))) + m.update((torch.rand(4,), torch.rand(4, 1, 2))) def test_mean_absolute_relative_error(): diff --git a/tests/ignite/contrib/metrics/regression/test_mean_error.py b/tests/ignite/contrib/metrics/regression/test_mean_error.py index 027eae2bcc95..7e92be9753c2 100644 --- a/tests/ignite/contrib/metrics/regression/test_mean_error.py +++ b/tests/ignite/contrib/metrics/regression/test_mean_error.py @@ -16,20 +16,16 @@ def test_wrong_input_shapes(): m = MeanError() with pytest.raises(ValueError): - m.update((torch.rand(4, 1, 2), - torch.rand(4, 1))) + m.update((torch.rand(4, 1, 2), torch.rand(4, 1))) with pytest.raises(ValueError): - m.update((torch.rand(4, 1), - torch.rand(4, 1, 2))) + m.update((torch.rand(4, 1), torch.rand(4, 1, 2))) with pytest.raises(ValueError): - m.update((torch.rand(4, 1, 2), - torch.rand(4,))) + m.update((torch.rand(4, 1, 2), torch.rand(4,))) with pytest.raises(ValueError): - m.update((torch.rand(4,), - torch.rand(4, 1, 2))) + m.update((torch.rand(4,), torch.rand(4, 1, 2))) def test_mean_error(): diff --git a/tests/ignite/contrib/metrics/regression/test_mean_normalized_bias.py b/tests/ignite/contrib/metrics/regression/test_mean_normalized_bias.py index 5a84cafdd41a..143e8abefb94 100644 --- a/tests/ignite/contrib/metrics/regression/test_mean_normalized_bias.py +++ b/tests/ignite/contrib/metrics/regression/test_mean_normalized_bias.py @@ -26,20 +26,16 @@ def test_wrong_input_shapes(): m = MeanNormalizedBias() with pytest.raises(ValueError): - m.update((torch.rand(4, 1, 2), - torch.rand(4, 1))) + m.update((torch.rand(4, 1, 2), torch.rand(4, 1))) with pytest.raises(ValueError): - m.update((torch.rand(4, 1), - torch.rand(4, 1, 2))) + m.update((torch.rand(4, 1), torch.rand(4, 1, 2))) with pytest.raises(ValueError): - m.update((torch.rand(4, 1, 2), - torch.rand(4,))) + m.update((torch.rand(4, 1, 2), torch.rand(4,))) with pytest.raises(ValueError): - m.update((torch.rand(4,), - torch.rand(4, 1, 2))) + m.update((torch.rand(4,), torch.rand(4, 1, 2))) def test_mean_error(): diff --git a/tests/ignite/contrib/metrics/regression/test_median_absolute_error.py b/tests/ignite/contrib/metrics/regression/test_median_absolute_error.py index 1712adf3e31c..53acde1e86c0 100644 --- a/tests/ignite/contrib/metrics/regression/test_median_absolute_error.py +++ b/tests/ignite/contrib/metrics/regression/test_median_absolute_error.py @@ -9,20 +9,16 @@ def test_wrong_input_shapes(): m = MedianAbsoluteError() with pytest.raises(ValueError): - m.update((torch.rand(4, 1, 2), - torch.rand(4, 1))) + m.update((torch.rand(4, 1, 2), torch.rand(4, 1))) with pytest.raises(ValueError): - m.update((torch.rand(4, 1), - torch.rand(4, 1, 2))) + m.update((torch.rand(4, 1), torch.rand(4, 1, 2))) with pytest.raises(ValueError): - m.update((torch.rand(4, 1, 2), - torch.rand(4,))) + m.update((torch.rand(4, 1, 2), torch.rand(4,))) with pytest.raises(ValueError): - m.update((torch.rand(4,), - torch.rand(4, 1, 2))) + m.update((torch.rand(4,), torch.rand(4, 1, 2))) def test_median_absolute_error(): @@ -65,7 +61,7 @@ def test_median_absolute_error_2(): n_iters = size // batch_size + 1 for i in range(n_iters): idx = i * batch_size - m.update((y_pred[idx: idx + batch_size], y[idx: idx + batch_size])) + m.update((y_pred[idx : idx + batch_size], y[idx : idx + batch_size])) assert np_median_absolute_error == pytest.approx(m.compute()) @@ -83,16 +79,16 @@ def test_integration_median_absolute_error_with_output_transform(): def update_fn(engine, batch): idx = (engine.state.iteration - 1) * batch_size - y_true_batch = np_y[idx:idx + batch_size] - y_pred_batch = np_y_pred[idx:idx + batch_size] + y_true_batch = np_y[idx : idx + batch_size] + y_pred_batch = np_y_pred[idx : idx + batch_size] return idx, torch.from_numpy(y_pred_batch), torch.from_numpy(y_true_batch) engine = Engine(update_fn) m = MedianAbsoluteError(output_transform=lambda x: (x[1], x[2])) - m.attach(engine, 'median_absolute_error') + m.attach(engine, "median_absolute_error") data = list(range(size // batch_size)) - median_absolute_error = engine.run(data, max_epochs=1).metrics['median_absolute_error'] + median_absolute_error = engine.run(data, max_epochs=1).metrics["median_absolute_error"] assert np_median_absolute_error == pytest.approx(median_absolute_error) diff --git a/tests/ignite/contrib/metrics/regression/test_median_absolute_percentage_error.py b/tests/ignite/contrib/metrics/regression/test_median_absolute_percentage_error.py index ab40726db919..34069556820b 100644 --- a/tests/ignite/contrib/metrics/regression/test_median_absolute_percentage_error.py +++ b/tests/ignite/contrib/metrics/regression/test_median_absolute_percentage_error.py @@ -9,20 +9,16 @@ def test_wrong_input_shapes(): m = MedianAbsolutePercentageError() with pytest.raises(ValueError): - m.update((torch.rand(4, 1, 2), - torch.rand(4, 1))) + m.update((torch.rand(4, 1, 2), torch.rand(4, 1))) with pytest.raises(ValueError): - m.update((torch.rand(4, 1), - torch.rand(4, 1, 2))) + m.update((torch.rand(4, 1), torch.rand(4, 1, 2))) with pytest.raises(ValueError): - m.update((torch.rand(4, 1, 2), - torch.rand(4,))) + m.update((torch.rand(4, 1, 2), torch.rand(4,))) with pytest.raises(ValueError): - m.update((torch.rand(4,), - torch.rand(4, 1, 2))) + m.update((torch.rand(4,), torch.rand(4, 1, 2))) def test_median_absolute_percentage_error(): @@ -65,7 +61,7 @@ def test_median_absolute_percentage_error_2(): n_iters = size // batch_size + 1 for i in range(n_iters): idx = i * batch_size - m.update((y_pred[idx: idx + batch_size], y[idx: idx + batch_size])) + m.update((y_pred[idx : idx + batch_size], y[idx : idx + batch_size])) assert np_median_absolute_percentage_error == pytest.approx(m.compute()) @@ -83,16 +79,16 @@ def test_integration_median_absolute_percentage_error_with_output_transform(): def update_fn(engine, batch): idx = (engine.state.iteration - 1) * batch_size - y_true_batch = np_y[idx:idx + batch_size] - y_pred_batch = np_y_pred[idx:idx + batch_size] + y_true_batch = np_y[idx : idx + batch_size] + y_pred_batch = np_y_pred[idx : idx + batch_size] return idx, torch.from_numpy(y_pred_batch), torch.from_numpy(y_true_batch) engine = Engine(update_fn) m = MedianAbsolutePercentageError(output_transform=lambda x: (x[1], x[2])) - m.attach(engine, 'median_absolute_percentage_error') + m.attach(engine, "median_absolute_percentage_error") data = list(range(size // batch_size)) - median_absolute_percentage_error = engine.run(data, max_epochs=1).metrics['median_absolute_percentage_error'] + median_absolute_percentage_error = engine.run(data, max_epochs=1).metrics["median_absolute_percentage_error"] assert np_median_absolute_percentage_error == pytest.approx(median_absolute_percentage_error) diff --git a/tests/ignite/contrib/metrics/regression/test_median_relative_absolute_error.py b/tests/ignite/contrib/metrics/regression/test_median_relative_absolute_error.py index 05399934930d..c445ed44f3fe 100644 --- a/tests/ignite/contrib/metrics/regression/test_median_relative_absolute_error.py +++ b/tests/ignite/contrib/metrics/regression/test_median_relative_absolute_error.py @@ -9,20 +9,16 @@ def test_wrong_input_shapes(): m = MedianRelativeAbsoluteError() with pytest.raises(ValueError): - m.update((torch.rand(4, 1, 2), - torch.rand(4, 1))) + m.update((torch.rand(4, 1, 2), torch.rand(4, 1))) with pytest.raises(ValueError): - m.update((torch.rand(4, 1), - torch.rand(4, 1, 2))) + m.update((torch.rand(4, 1), torch.rand(4, 1, 2))) with pytest.raises(ValueError): - m.update((torch.rand(4, 1, 2), - torch.rand(4,))) + m.update((torch.rand(4, 1, 2), torch.rand(4,))) with pytest.raises(ValueError): - m.update((torch.rand(4,), - torch.rand(4, 1, 2))) + m.update((torch.rand(4,), torch.rand(4, 1, 2))) def test_median_relative_absolute_error(): @@ -65,7 +61,7 @@ def test_median_relative_absolute_error_2(): n_iters = size // batch_size + 1 for i in range(n_iters + 1): idx = i * batch_size - m.update((y_pred[idx: idx + batch_size], y[idx: idx + batch_size])) + m.update((y_pred[idx : idx + batch_size], y[idx : idx + batch_size])) assert np_median_absolute_relative_error == pytest.approx(m.compute()) @@ -83,16 +79,16 @@ def test_integration_median_relative_absolute_error_with_output_transform(): def update_fn(engine, batch): idx = (engine.state.iteration - 1) * batch_size - y_true_batch = np_y[idx:idx + batch_size] - y_pred_batch = np_y_pred[idx:idx + batch_size] + y_true_batch = np_y[idx : idx + batch_size] + y_pred_batch = np_y_pred[idx : idx + batch_size] return idx, torch.from_numpy(y_pred_batch), torch.from_numpy(y_true_batch) engine = Engine(update_fn) m = MedianRelativeAbsoluteError(output_transform=lambda x: (x[1], x[2])) - m.attach(engine, 'median_absolute_relative_error') + m.attach(engine, "median_absolute_relative_error") data = list(range(size // batch_size)) - median_absolute_relative_error = engine.run(data, max_epochs=1).metrics['median_absolute_relative_error'] + median_absolute_relative_error = engine.run(data, max_epochs=1).metrics["median_absolute_relative_error"] assert np_median_absolute_relative_error == pytest.approx(median_absolute_relative_error) diff --git a/tests/ignite/contrib/metrics/regression/test_r2_score.py b/tests/ignite/contrib/metrics/regression/test_r2_score.py index b72015496176..3fb4e411bee5 100644 --- a/tests/ignite/contrib/metrics/regression/test_r2_score.py +++ b/tests/ignite/contrib/metrics/regression/test_r2_score.py @@ -10,20 +10,16 @@ def test_wrong_input_shapes(): m = R2Score() with pytest.raises(ValueError): - m.update((torch.rand(4, 1, 2), - torch.rand(4, 1))) + m.update((torch.rand(4, 1, 2), torch.rand(4, 1))) with pytest.raises(ValueError): - m.update((torch.rand(4, 1), - torch.rand(4, 1, 2))) + m.update((torch.rand(4, 1), torch.rand(4, 1, 2))) with pytest.raises(ValueError): - m.update((torch.rand(4, 1, 2), - torch.rand(4,))) + m.update((torch.rand(4, 1, 2), torch.rand(4,))) with pytest.raises(ValueError): - m.update((torch.rand(4,), - torch.rand(4, 1, 2))) + m.update((torch.rand(4,), torch.rand(4, 1, 2))) def test_r2_score(): @@ -59,7 +55,7 @@ def test_r2_score_2(): n_iters = size // batch_size + 1 for i in range(n_iters): idx = i * batch_size - m.update((y_pred[idx: idx + batch_size], y[idx: idx + batch_size])) + m.update((y_pred[idx : idx + batch_size], y[idx : idx + batch_size])) assert r2_score(np_y, np_y_pred) == pytest.approx(m.compute()) @@ -76,16 +72,16 @@ def test_integration_r2_score_with_output_transform(): def update_fn(engine, batch): idx = (engine.state.iteration - 1) * batch_size - y_true_batch = np_y[idx:idx + batch_size] - y_pred_batch = np_y_pred[idx:idx + batch_size] + y_true_batch = np_y[idx : idx + batch_size] + y_pred_batch = np_y_pred[idx : idx + batch_size] return idx, torch.from_numpy(y_pred_batch), torch.from_numpy(y_true_batch) engine = Engine(update_fn) m = R2Score(output_transform=lambda x: (x[1], x[2])) - m.attach(engine, 'r2_score') + m.attach(engine, "r2_score") data = list(range(size // batch_size)) - r_squared = engine.run(data, max_epochs=1).metrics['r2_score'] + r_squared = engine.run(data, max_epochs=1).metrics["r2_score"] assert r2_score(np_y, np_y_pred) == pytest.approx(r_squared) diff --git a/tests/ignite/contrib/metrics/regression/test_wave_hedges_distance.py b/tests/ignite/contrib/metrics/regression/test_wave_hedges_distance.py index 92b4c71e9d0b..bd8d1d558f73 100644 --- a/tests/ignite/contrib/metrics/regression/test_wave_hedges_distance.py +++ b/tests/ignite/contrib/metrics/regression/test_wave_hedges_distance.py @@ -9,20 +9,16 @@ def test_wrong_input_shapes(): m = WaveHedgesDistance() with pytest.raises(ValueError): - m.update((torch.rand(4, 1, 2), - torch.rand(4, 1))) + m.update((torch.rand(4, 1, 2), torch.rand(4, 1))) with pytest.raises(ValueError): - m.update((torch.rand(4, 1), - torch.rand(4, 1, 2))) + m.update((torch.rand(4, 1), torch.rand(4, 1, 2))) with pytest.raises(ValueError): - m.update((torch.rand(4, 1, 2), - torch.rand(4,))) + m.update((torch.rand(4, 1, 2), torch.rand(4,))) with pytest.raises(ValueError): - m.update((torch.rand(4,), - torch.rand(4, 1, 2))) + m.update((torch.rand(4,), torch.rand(4, 1, 2))) def test_compute(): diff --git a/tests/ignite/contrib/metrics/test_average_precision.py b/tests/ignite/contrib/metrics/test_average_precision.py index 0e8034be35bd..c32357eab351 100644 --- a/tests/ignite/contrib/metrics/test_average_precision.py +++ b/tests/ignite/contrib/metrics/test_average_precision.py @@ -31,7 +31,7 @@ def test_ap_score_2(): size = 100 np_y_pred = np.random.rand(size, 1) np_y = np.zeros((size,), dtype=np.long) - np_y[size // 2:] = 1 + np_y[size // 2 :] = 1 np.random.shuffle(np_y) np_ap = average_precision_score(np_y, np_y_pred) @@ -44,7 +44,7 @@ def test_ap_score_2(): batch_size = size // n_iters for i in range(n_iters): idx = i * batch_size - ap_metric.update((y_pred[idx: idx + batch_size], y[idx: idx + batch_size])) + ap_metric.update((y_pred[idx : idx + batch_size], y[idx : idx + batch_size])) ap = ap_metric.compute() @@ -57,7 +57,7 @@ def test_integration_ap_score_with_output_transform(): size = 100 np_y_pred = np.random.rand(size, 1) np_y = np.zeros((size,), dtype=np.long) - np_y[size // 2:] = 1 + np_y[size // 2 :] = 1 np.random.shuffle(np_y) np_ap = average_precision_score(np_y, np_y_pred) @@ -66,17 +66,17 @@ def test_integration_ap_score_with_output_transform(): def update_fn(engine, batch): idx = (engine.state.iteration - 1) * batch_size - y_true_batch = np_y[idx:idx + batch_size] - y_pred_batch = np_y_pred[idx:idx + batch_size] + y_true_batch = np_y[idx : idx + batch_size] + y_pred_batch = np_y_pred[idx : idx + batch_size] return idx, torch.from_numpy(y_pred_batch), torch.from_numpy(y_true_batch) engine = Engine(update_fn) ap_metric = AveragePrecision(output_transform=lambda x: (x[1], x[2])) - ap_metric.attach(engine, 'ap') + ap_metric.attach(engine, "ap") data = list(range(size // batch_size)) - ap = engine.run(data, max_epochs=1).metrics['ap'] + ap = engine.run(data, max_epochs=1).metrics["ap"] assert ap == np_ap @@ -88,7 +88,7 @@ def test_integration_ap_score_with_activated_output_transform(): np_y_pred = np.random.rand(size, 1) np_y_pred_softmax = torch.softmax(torch.from_numpy(np_y_pred), dim=1).numpy() np_y = np.zeros((size,), dtype=np.long) - np_y[size // 2:] = 1 + np_y[size // 2 :] = 1 np.random.shuffle(np_y) np_ap = average_precision_score(np_y, np_y_pred_softmax) @@ -97,16 +97,16 @@ def test_integration_ap_score_with_activated_output_transform(): def update_fn(engine, batch): idx = (engine.state.iteration - 1) * batch_size - y_true_batch = np_y[idx:idx + batch_size] - y_pred_batch = np_y_pred[idx:idx + batch_size] + y_true_batch = np_y[idx : idx + batch_size] + y_pred_batch = np_y_pred[idx : idx + batch_size] return idx, torch.from_numpy(y_pred_batch), torch.from_numpy(y_true_batch) engine = Engine(update_fn) ap_metric = AveragePrecision(output_transform=lambda x: (torch.softmax(x[1], dim=1), x[2])) - ap_metric.attach(engine, 'ap') + ap_metric.attach(engine, "ap") data = list(range(size // batch_size)) - ap = engine.run(data, max_epochs=1).metrics['ap'] + ap = engine.run(data, max_epochs=1).metrics["ap"] assert ap == np_ap diff --git a/tests/ignite/contrib/metrics/test_gpu_info.py b/tests/ignite/contrib/metrics/test_gpu_info.py index f1f4e80b2b03..05ef7a7fc785 100644 --- a/tests/ignite/contrib/metrics/test_gpu_info.py +++ b/tests/ignite/contrib/metrics/test_gpu_info.py @@ -8,25 +8,25 @@ import pytest from unittest.mock import Mock, patch -python_below_36 = (sys.version[0] == '3' and int(sys.version[2]) < 6) or int(sys.version[0]) < 2 +python_below_36 = (sys.version[0] == "3" and int(sys.version[2]) < 6) or int(sys.version[0]) < 2 @pytest.fixture def no_site_packages(): import pynvml import sys - assert 'pynvml' in sys.modules - pynvml_module = sys.modules['pynvml'] - del sys.modules['pynvml'] + + assert "pynvml" in sys.modules + pynvml_module = sys.modules["pynvml"] + del sys.modules["pynvml"] prev_path = list(sys.path) sys.path = [p for p in sys.path if "site-packages" not in p] yield "no_site_packages" sys.path = prev_path - sys.modules['pynvml'] = pynvml_module + sys.modules["pynvml"] = pynvml_module -@pytest.mark.skipif(python_below_36, - reason="No pynvml for python < 3.6") +@pytest.mark.skipif(python_below_36, reason="No pynvml for python < 3.6") def test_no_pynvml_package(no_site_packages): with pytest.raises(RuntimeError, match="This contrib module requires pynvml to be installed."): @@ -40,7 +40,7 @@ def test_no_gpu(): GpuInfo() -def _test_gpu_info(device='cpu'): +def _test_gpu_info(device="cpu"): gpu_info = GpuInfo() # increase code cov @@ -51,60 +51,52 @@ def _test_gpu_info(device='cpu'): data = gpu_info.compute() assert len(data) > 0 assert "fb_memory_usage" in data[0] - mem_report = data[0]['fb_memory_usage'] - assert 'used' in mem_report and 'total' in mem_report - assert mem_report['total'] > 0.0 - assert mem_report['used'] > t.shape[0] * t.shape[1] * t.shape[2] * t.shape[3] / 1024.0 / 1024.0 + mem_report = data[0]["fb_memory_usage"] + assert "used" in mem_report and "total" in mem_report + assert mem_report["total"] > 0.0 + assert mem_report["used"] > t.shape[0] * t.shape[1] * t.shape[2] * t.shape[3] / 1024.0 / 1024.0 assert "utilization" in data[0] - util_report = data[0]['utilization'] - assert 'gpu_util' in util_report + util_report = data[0]["utilization"] + assert "gpu_util" in util_report # with Engine engine = Engine(lambda engine, batch: 0.0) engine.state = State(metrics={}) - gpu_info.completed(engine, name='gpu') + gpu_info.completed(engine, name="gpu") - assert 'gpu:0 mem(%)' in engine.state.metrics - assert 'gpu:0 util(%)' in engine.state.metrics + assert "gpu:0 mem(%)" in engine.state.metrics + assert "gpu:0 util(%)" in engine.state.metrics - assert isinstance(engine.state.metrics['gpu:0 mem(%)'], int) - assert int(mem_report['used'] * 100.0 / mem_report['total']) == engine.state.metrics['gpu:0 mem(%)'] + assert isinstance(engine.state.metrics["gpu:0 mem(%)"], int) + assert int(mem_report["used"] * 100.0 / mem_report["total"]) == engine.state.metrics["gpu:0 mem(%)"] - assert isinstance(engine.state.metrics['gpu:0 util(%)'], int) - assert int(util_report['gpu_util']) == engine.state.metrics['gpu:0 util(%)'] + assert isinstance(engine.state.metrics["gpu:0 util(%)"], int) + assert int(util_report["gpu_util"]) == engine.state.metrics["gpu:0 util(%)"] -@pytest.mark.skipif(python_below_36 or not (torch.cuda.is_available()), - reason="No pynvml for python < 3.6 and no GPU") +@pytest.mark.skipif(python_below_36 or not (torch.cuda.is_available()), reason="No pynvml for python < 3.6 and no GPU") def test_gpu_info(): - _test_gpu_info(device='cuda') + _test_gpu_info(device="cuda") @pytest.fixture def mock_pynvml_module(): - with patch.dict('sys.modules', { - 'pynvml': Mock(name='pynvml'), - 'pynvml.smi': Mock(name='pynvml.smi'), - 'pynvml.smi.nvidia_smi': Mock(name='pynvml.smi.nvidia_smi'), - }): + with patch.dict( + "sys.modules", + { + "pynvml": Mock(name="pynvml"), + "pynvml.smi": Mock(name="pynvml.smi"), + "pynvml.smi.nvidia_smi": Mock(name="pynvml.smi.nvidia_smi"), + }, + ): import pynvml from pynvml.smi import nvidia_smi def query(*args, **kwargs): - return { - "gpu": [{ - "fb_memory_usage": { - "used": 100.0, - "total": 11000.0 - }, - "utilization": { - "gpu_util": 50.0 - } - }] - } + return {"gpu": [{"fb_memory_usage": {"used": 100.0, "total": 11000.0}, "utilization": {"gpu_util": 50.0}}]} def getInstance(): nvsmi = Mock() @@ -118,7 +110,7 @@ def getInstance(): @pytest.fixture def mock_gpu_is_available(): - with patch('ignite.contrib.metrics.gpu_info.torch.cuda') as mock_cuda: + with patch("ignite.contrib.metrics.gpu_info.torch.cuda") as mock_cuda: mock_cuda.is_available.return_value = True yield mock_cuda @@ -151,15 +143,15 @@ def getInstance(): engine.state = State(metrics={}) with pytest.warns(UserWarning, match=warn_msg): - gpu_info.completed(engine, name='gpu info') + gpu_info.completed(engine, name="gpu info") # No GPU info _test_with_custom_query(resp={}, warn_msg=r"No GPU information available", check_compute=True) # No GPU memory info - _test_with_custom_query(resp={"gpu": [{"utilization": {}}, ]}, - warn_msg=r"No GPU memory usage information available") + _test_with_custom_query(resp={"gpu": [{"utilization": {}},]}, warn_msg=r"No GPU memory usage information available") # No GPU utilization info - _test_with_custom_query(resp={"gpu": [{"fb_memory_usage": {}}, ]}, - warn_msg=r"No GPU utilization information available") + _test_with_custom_query( + resp={"gpu": [{"fb_memory_usage": {}},]}, warn_msg=r"No GPU utilization information available" + ) diff --git a/tests/ignite/contrib/metrics/test_roc_auc.py b/tests/ignite/contrib/metrics/test_roc_auc.py index ba42a67910ab..4fff4ce6521c 100644 --- a/tests/ignite/contrib/metrics/test_roc_auc.py +++ b/tests/ignite/contrib/metrics/test_roc_auc.py @@ -12,7 +12,7 @@ def test_roc_auc_score(): size = 100 np_y_pred = np.random.rand(size, 1) np_y = np.zeros((size,), dtype=np.long) - np_y[size // 2:] = 1 + np_y[size // 2 :] = 1 np_roc_auc = roc_auc_score(np_y, np_y_pred) roc_auc_metric = ROC_AUC() @@ -32,7 +32,7 @@ def test_roc_auc_score_2(): size = 100 np_y_pred = np.random.rand(size, 1) np_y = np.zeros((size,), dtype=np.long) - np_y[size // 2:] = 1 + np_y[size // 2 :] = 1 np.random.shuffle(np_y) np_roc_auc = roc_auc_score(np_y, np_y_pred) @@ -45,7 +45,7 @@ def test_roc_auc_score_2(): batch_size = size // n_iters for i in range(n_iters): idx = i * batch_size - roc_auc_metric.update((y_pred[idx: idx + batch_size], y[idx: idx + batch_size])) + roc_auc_metric.update((y_pred[idx : idx + batch_size], y[idx : idx + batch_size])) roc_auc = roc_auc_metric.compute() @@ -58,7 +58,7 @@ def test_integration_roc_auc_score_with_output_transform(): size = 100 np_y_pred = np.random.rand(size, 1) np_y = np.zeros((size,), dtype=np.long) - np_y[size // 2:] = 1 + np_y[size // 2 :] = 1 np.random.shuffle(np_y) np_roc_auc = roc_auc_score(np_y, np_y_pred) @@ -67,17 +67,17 @@ def test_integration_roc_auc_score_with_output_transform(): def update_fn(engine, batch): idx = (engine.state.iteration - 1) * batch_size - y_true_batch = np_y[idx:idx + batch_size] - y_pred_batch = np_y_pred[idx:idx + batch_size] + y_true_batch = np_y[idx : idx + batch_size] + y_pred_batch = np_y_pred[idx : idx + batch_size] return idx, torch.from_numpy(y_pred_batch), torch.from_numpy(y_true_batch) engine = Engine(update_fn) roc_auc_metric = ROC_AUC(output_transform=lambda x: (x[1], x[2])) - roc_auc_metric.attach(engine, 'roc_auc') + roc_auc_metric.attach(engine, "roc_auc") data = list(range(size // batch_size)) - roc_auc = engine.run(data, max_epochs=1).metrics['roc_auc'] + roc_auc = engine.run(data, max_epochs=1).metrics["roc_auc"] assert roc_auc == np_roc_auc @@ -89,7 +89,7 @@ def test_integration_roc_auc_score_with_activated_output_transform(): np_y_pred = np.random.rand(size, 1) np_y_pred_sigmoid = torch.sigmoid(torch.from_numpy(np_y_pred)).numpy() np_y = np.zeros((size,), dtype=np.long) - np_y[size // 2:] = 1 + np_y[size // 2 :] = 1 np.random.shuffle(np_y) np_roc_auc = roc_auc_score(np_y, np_y_pred_sigmoid) @@ -98,16 +98,16 @@ def test_integration_roc_auc_score_with_activated_output_transform(): def update_fn(engine, batch): idx = (engine.state.iteration - 1) * batch_size - y_true_batch = np_y[idx:idx + batch_size] - y_pred_batch = np_y_pred[idx:idx + batch_size] + y_true_batch = np_y[idx : idx + batch_size] + y_pred_batch = np_y_pred[idx : idx + batch_size] return idx, torch.from_numpy(y_pred_batch), torch.from_numpy(y_true_batch) engine = Engine(update_fn) roc_auc_metric = ROC_AUC(output_transform=lambda x: (torch.sigmoid(x[1]), x[2])) - roc_auc_metric.attach(engine, 'roc_auc') + roc_auc_metric.attach(engine, "roc_auc") data = list(range(size // batch_size)) - roc_auc = engine.run(data, max_epochs=1).metrics['roc_auc'] + roc_auc = engine.run(data, max_epochs=1).metrics["roc_auc"] assert roc_auc == np_roc_auc diff --git a/tests/ignite/engine/conftest.py b/tests/ignite/engine/conftest.py index 2b245580bf72..e9ae7ae63766 100644 --- a/tests/ignite/engine/conftest.py +++ b/tests/ignite/engine/conftest.py @@ -21,7 +21,6 @@ def __call__(self, engine): @pytest.fixture() def counter_factory(): - def create(name, start_value=1): if name == "epoch": return EpochCounter(start_value) diff --git a/tests/ignite/engine/test_create_supervised.py b/tests/ignite/engine/test_create_supervised.py index 2bdef84e147d..d4044af4318c 100644 --- a/tests/ignite/engine/test_create_supervised.py +++ b/tests/ignite/engine/test_create_supervised.py @@ -36,7 +36,7 @@ def test_create_supervised_trainer_with_cpu(): model.weight.data.zero_() model.bias.data.zero_() optimizer = SGD(model.parameters(), 0.1) - trainer = create_supervised_trainer(model, optimizer, mse_loss, device='cpu') + trainer = create_supervised_trainer(model, optimizer, mse_loss, device="cpu") x = torch.tensor([[1.0], [2.0]]) y = torch.tensor([[3.0], [5.0]]) @@ -62,7 +62,7 @@ def test_create_supervised_trainer_traced_with_cpu(): optimizer = SGD(traced_model.parameters(), 0.1) - trainer = create_supervised_trainer(traced_model, optimizer, mse_loss, device='cpu') + trainer = create_supervised_trainer(traced_model, optimizer, mse_loss, device="cpu") x = torch.tensor([[1.0], [2.0]]) y = torch.tensor([[3.0], [5.0]]) @@ -84,7 +84,7 @@ def test_create_supervised_trainer_on_cuda(): model.weight.data.zero_() model.bias.data.zero_() optimizer = SGD(model.parameters(), 0.1) - trainer = create_supervised_trainer(model, optimizer, mse_loss, device='cuda') + trainer = create_supervised_trainer(model, optimizer, mse_loss, device="cuda") x = torch.tensor([[1.0], [2.0]]) y = torch.tensor([[3.0], [5.0]]) @@ -128,7 +128,7 @@ def test_create_supervised_on_cpu(): model.weight.data.zero_() model.bias.data.zero_() - evaluator = create_supervised_evaluator(model, device='cpu') + evaluator = create_supervised_evaluator(model, device="cpu") x = torch.tensor([[1.0], [2.0]]) y = torch.tensor([[3.0], [5.0]]) @@ -154,7 +154,7 @@ def test_create_supervised_evaluator_traced_on_cpu(): example_input = torch.randn(1, 1) traced_model = torch.jit.trace(model, example_input) - evaluator = create_supervised_evaluator(traced_model, device='cpu') + evaluator = create_supervised_evaluator(traced_model, device="cpu") x = torch.tensor([[1.0], [2.0]]) y = torch.tensor([[3.0], [5.0]]) @@ -178,7 +178,7 @@ def test_create_supervised_on_cuda(): model.weight.data.zero_() model.bias.data.zero_() - evaluator = create_supervised_evaluator(model, device='cuda') + evaluator = create_supervised_evaluator(model, device="cuda") x = torch.tensor([[1.0], [2.0]]) y = torch.tensor([[3.0], [5.0]]) @@ -201,11 +201,11 @@ def test_create_supervised_with_metrics(): model.weight.data.zero_() model.bias.data.zero_() - evaluator = create_supervised_evaluator(model, metrics={'mse': MeanSquaredError()}) + evaluator = create_supervised_evaluator(model, metrics={"mse": MeanSquaredError()}) x = torch.tensor([[1.0], [2.0]]) y = torch.tensor([[3.0], [4.0]]) data = [(x, y)] state = evaluator.run(data) - assert state.metrics['mse'] == 12.5 + assert state.metrics["mse"] == 12.5 diff --git a/tests/ignite/engine/test_custom_events.py b/tests/ignite/engine/test_custom_events.py index 7b11dec89429..303223501930 100644 --- a/tests/ignite/engine/test_custom_events.py +++ b/tests/ignite/engine/test_custom_events.py @@ -39,11 +39,10 @@ def process_func(engine, batch): def test_custom_events_with_event_to_attr(): - class CustomEvents(Enum): TEST_EVENT = "test_event" - custom_event_to_attr = {CustomEvents.TEST_EVENT: 'test_event'} + custom_event_to_attr = {CustomEvents.TEST_EVENT: "test_event"} # Dummy engine engine = Engine(lambda engine, batch: 0) @@ -53,7 +52,7 @@ class CustomEvents(Enum): handle = MagicMock() engine.add_event_handler(CustomEvents.TEST_EVENT, handle) engine.run(range(1)) - assert hasattr(engine.state, 'test_event') + assert hasattr(engine.state, "test_event") assert engine.state.test_event == 0 # Advanced engine @@ -70,7 +69,7 @@ def handle(engine): engine.run(range(25)) assert engine.state.test_event == 25 - custom_event_to_attr = 'a' + custom_event_to_attr = "a" engine = Engine(lambda engine, batch: 0) with pytest.raises(ValueError): engine.register_events(*CustomEvents, event_to_attr=custom_event_to_attr) @@ -197,9 +196,15 @@ def _test(event_name, event_attr, every, true_num_calls): engine = Engine(lambda e, b: b) - counter = [0, ] - counter_every = [0, ] - num_calls = [0, ] + counter = [ + 0, + ] + counter_every = [ + 0, + ] + num_calls = [ + 0, + ] @engine.on(event_name(every=every)) def assert_every(engine): @@ -228,14 +233,17 @@ def test_every_event_filter_with_engine(): def test_once_event_filter_with_engine(): - def _test(event_name, event_attr): engine = Engine(lambda e, b: b) once = 2 - counter = [0, ] - num_calls = [0, ] + counter = [ + 0, + ] + num_calls = [ + 0, + ] @engine.on(event_name(once=once)) def assert_once(engine): @@ -271,7 +279,9 @@ def _test(event_name, event_attr, true_num_calls): engine = Engine(lambda e, b: b) - num_calls = [0, ] + num_calls = [ + 0, + ] @engine.on(event_name(event_filter=custom_event_filter)) def assert_on_special_event(engine): @@ -300,7 +310,9 @@ def custom_event_filter(engine, event): # Check bad behaviour engine = Engine(lambda e, b: b) - counter = [0, ] + counter = [ + 0, + ] # Modify events Events.ITERATION_STARTED(event_filter=custom_event_filter) @@ -317,7 +329,6 @@ def assert_all_iters(engine): def test_custom_callable_events(): - class CustomEvents(Enum): TEST_EVENT = "test_event" @@ -331,13 +342,10 @@ class CustomEvents2(CallableEvents, Enum): def test_custom_callable_events_with_engine(): - class CustomEvents(CallableEvents, Enum): TEST_EVENT = "test_event" - event_to_attr = { - CustomEvents.TEST_EVENT: "test_event" - } + event_to_attr = {CustomEvents.TEST_EVENT: "test_event"} special_events = [1, 2, 5, 7, 17, 20] @@ -347,7 +355,6 @@ def custom_event_filter(engine, event): return False def _test(event_name, event_attr, true_num_calls): - def update_fn(engine, batch): engine.state.test_event = engine.state.iteration engine.fire_event(CustomEvents.TEST_EVENT) @@ -355,7 +362,9 @@ def update_fn(engine, batch): engine = Engine(update_fn) engine.register_events(*CustomEvents, event_to_attr=event_to_attr) - num_calls = [0, ] + num_calls = [ + 0, + ] @engine.on(event_name(event_filter=custom_event_filter)) def assert_on_special_event(engine): @@ -371,17 +380,20 @@ def assert_on_special_event(engine): def _test_every_event_filter_with_engine_with_dataloader(device): - def _test(num_workers): max_epochs = 3 batch_size = 4 num_iters = 21 data = torch.randint(0, 1000, size=(num_iters * batch_size,)) - dataloader = torch.utils.data.DataLoader(data, batch_size=batch_size, - num_workers=num_workers, - pin_memory="cuda" in device, - drop_last=True, shuffle=True) + dataloader = torch.utils.data.DataLoader( + data, + batch_size=batch_size, + num_workers=num_workers, + pin_memory="cuda" in device, + drop_last=True, + shuffle=True, + ) seen_batchs = [] def update_fn(engine, batch): @@ -398,6 +410,7 @@ def foo(engine): engine = None import gc + gc.collect() assert len(gc.garbage) == 0 @@ -418,6 +431,6 @@ def test_distrib_cpu(distributed_context_single_node_gloo): @pytest.mark.distributed @pytest.mark.skipif(torch.cuda.device_count() < 1, reason="Skip if no GPU") def test_distrib_gpu(distributed_context_single_node_nccl): - device = "cuda:{}".format(distributed_context_single_node_nccl['local_rank']) + device = "cuda:{}".format(distributed_context_single_node_nccl["local_rank"]) _test_every_event_filter_with_engine(device) _test_every_event_filter_with_engine_with_dataloader(device) diff --git a/tests/ignite/engine/test_engine.py b/tests/ignite/engine/test_engine.py index d7267d4235dd..3dfd15a52432 100644 --- a/tests/ignite/engine/test_engine.py +++ b/tests/ignite/engine/test_engine.py @@ -1,5 +1,3 @@ - - import os import pytest from unittest.mock import call, MagicMock, Mock @@ -35,7 +33,7 @@ def test_current_epoch_counter_increases_every_epoch(counter_factory): engine = Engine(MagicMock(return_value=1)) max_epochs = 5 - counter = counter_factory('epoch') + counter = counter_factory("epoch") engine.add_event_handler(Events.EPOCH_STARTED, counter) state = engine.run([1, 2], max_epochs=max_epochs) @@ -50,7 +48,7 @@ def test_current_iteration_counter_increases_every_iteration(counter_factory): engine = Engine(MagicMock(return_value=1)) max_epochs = 5 - counter = counter_factory('iter') + counter = counter_factory("iter") engine.add_event_handler(Events.ITERATION_STARTED, counter) state = engine.run(batches, max_epochs=max_epochs) @@ -123,7 +121,7 @@ def start_of_iteration_handler(engine): engine.add_event_handler(Events.ITERATION_STARTED, start_of_iteration_handler) state = engine.run(data=[None] * num_iterations_per_epoch, max_epochs=3) # completes the iteration but doesn't increment counter (this happens just before a new iteration starts) - assert (state.iteration == iteration_to_stop) + assert state.iteration == iteration_to_stop assert state.epoch == np.ceil(iteration_to_stop / num_iterations_per_epoch) # it starts from 0 @@ -140,8 +138,7 @@ def start_of_iteration_handler(engine): engine.add_event_handler(Events.ITERATION_STARTED, start_of_iteration_handler) state = engine.run(data=[None] * num_iterations_per_epoch, max_epochs=max_epochs) # completes the iteration but doesn't increment counter (this happens just before a new iteration starts) - assert state.iteration == num_iterations_per_epoch * (max_epochs - 1) + \ - iteration_to_stop % num_iterations_per_epoch + assert state.iteration == num_iterations_per_epoch * (max_epochs - 1) + iteration_to_stop % num_iterations_per_epoch def _create_mock_data_loader(epochs, batches_per_epoch): @@ -169,8 +166,8 @@ def test_iteration_events_are_fired(): iteration_complete = Mock() engine.add_event_handler(Events.ITERATION_COMPLETED, iteration_complete) - mock_manager.attach_mock(iteration_started, 'iteration_started') - mock_manager.attach_mock(iteration_complete, 'iteration_complete') + mock_manager.attach_mock(iteration_started, "iteration_started") + mock_manager.attach_mock(iteration_complete, "iteration_complete") engine.run(data, max_epochs=max_epochs) @@ -214,7 +211,6 @@ def _(_engine): def test_reset_should_terminate(): - def update_fn(engine, batch): pass @@ -233,7 +229,6 @@ def terminate_on_iteration_10(engine): def test_batch_values(): - def _test(data): # This test check the content passed to update function counter = [0] @@ -246,7 +241,7 @@ def update_fn(engine, batch): engine = Engine(update_fn) engine.run(data, max_epochs=10) - data = torch.randint(0, 1000, size=(256, )) + data = torch.randint(0, 1000, size=(256,)) _test(data) @@ -271,8 +266,8 @@ def test_alter_batch(): small_shape = (1, 2, 2) large_shape = (1, 3, 3) - small_loader = torch.randint(0, 256, size=(30, ) + small_shape) - large_loader = torch.randint(0, 256, size=(20, ) + large_shape) + small_loader = torch.randint(0, 256, size=(30,) + small_shape) + large_loader = torch.randint(0, 256, size=(20,) + large_shape) switch_iteration = 50 @@ -371,12 +366,17 @@ def test_state_get_event_attrib_value(): def _test_run_check_triggered_events(): - def _test(data, max_epochs, epoch_length): engine = Engine(lambda e, b: 1) - events = [Events.STARTED, Events.EPOCH_STARTED, Events.ITERATION_STARTED, - Events.ITERATION_COMPLETED, Events.EPOCH_COMPLETED, Events.COMPLETED] + events = [ + Events.STARTED, + Events.EPOCH_STARTED, + Events.ITERATION_STARTED, + Events.ITERATION_COMPLETED, + Events.EPOCH_COMPLETED, + Events.COMPLETED, + ] handlers = {e: MagicMock() for e in events} @@ -395,8 +395,9 @@ def _test(data, max_epochs, epoch_length): } for n, handler in handlers.items(): - assert handler.call_count == expected_num_calls[n], \ - "{}: {} vs {}".format(n, handler.call_count, expected_num_calls[n]) + assert handler.call_count == expected_num_calls[n], "{}: {} vs {}".format( + n, handler.call_count, expected_num_calls[n] + ) _test(list(range(100)), max_epochs=5, epoch_length=100) _test(list(range(100)), max_epochs=5, epoch_length=50) @@ -409,12 +410,17 @@ def test_run_check_triggered_events(): def _test_run_check_triggered_events_on_iterator(): - def _test(data, max_epochs, epoch_length): engine = Engine(lambda e, b: 1) - events = [Events.STARTED, Events.EPOCH_STARTED, Events.ITERATION_STARTED, - Events.ITERATION_COMPLETED, Events.EPOCH_COMPLETED, Events.COMPLETED] + events = [ + Events.STARTED, + Events.EPOCH_STARTED, + Events.ITERATION_STARTED, + Events.ITERATION_COMPLETED, + Events.EPOCH_COMPLETED, + Events.COMPLETED, + ] handlers = {e: MagicMock() for e in events} @@ -433,8 +439,9 @@ def _test(data, max_epochs, epoch_length): } for n, handler in handlers.items(): - assert handler.call_count == expected_num_calls[n], \ - "{}: {} vs {}".format(n, handler.call_count, expected_num_calls[n]) + assert handler.call_count == expected_num_calls[n], "{}: {} vs {}".format( + n, handler.call_count, expected_num_calls[n] + ) def infinite_data_iterator(): while True: @@ -485,21 +492,20 @@ def test_distrib_cpu(distributed_context_single_node_gloo): @pytest.mark.multinode_distributed -@pytest.mark.skipif('MULTINODE_DISTRIB' not in os.environ, reason="Skip if not multi-node distributed") +@pytest.mark.skipif("MULTINODE_DISTRIB" not in os.environ, reason="Skip if not multi-node distributed") def test_multinode_distrib_cpu(distributed_context_multi_node_gloo): _test_run_check_triggered_events_on_iterator() _test_run_check_triggered_events() @pytest.mark.multinode_distributed -@pytest.mark.skipif('GPU_MULTINODE_DISTRIB' not in os.environ, reason="Skip if not multi-node distributed") +@pytest.mark.skipif("GPU_MULTINODE_DISTRIB" not in os.environ, reason="Skip if not multi-node distributed") def test_multinode_distrib_gpu(distributed_context_multi_node_nccl): _test_run_check_triggered_events_on_iterator() _test_run_check_triggered_events() def test_engine_with_iterable_dataloader(): - class MyIterableDataset(torch.utils.data.IterableDataset): def __init__(self, start, end): super(MyIterableDataset).__init__() diff --git a/tests/ignite/engine/test_event_handlers.py b/tests/ignite/engine/test_event_handlers.py index d1d1013a4958..bdcf9a216058 100644 --- a/tests/ignite/engine/test_event_handlers.py +++ b/tests/ignite/engine/test_event_handlers.py @@ -74,12 +74,14 @@ def __init__(self, count=0): def handle_iteration_started(engine, counter): counter.count += 1 + engine.add_event_handler(Events.STARTED, handle_iteration_started, started_counter) completed_counter = Counter() def handle_iteration_completed(engine, counter): counter.count += 1 + engine.add_event_handler(Events.COMPLETED, handle_iteration_completed, completed_counter) engine.run(15) @@ -202,7 +204,7 @@ def test_has_event_handler(): def test_remove_event_handler(): engine = DummyEngine() - with pytest.raises(ValueError, match=r'Input event name'): + with pytest.raises(ValueError, match=r"Input event name"): engine.remove_event_handler(lambda x: x, "an event") def on_started(engine): @@ -210,7 +212,7 @@ def on_started(engine): engine.add_event_handler(Events.STARTED, on_started) - with pytest.raises(ValueError, match=r'Input handler'): + with pytest.raises(ValueError, match=r"Input handler"): engine.remove_event_handler(lambda x: x, Events.STARTED) h1 = MagicMock(spec_set=True) @@ -233,7 +235,7 @@ def on_started(engine): def test_args_and_kwargs_are_passed_to_event(): engine = DummyEngine() - kwargs = {'a': 'a', 'b': 'b'} + kwargs = {"a": "a", "b": "b"} args = (1, 2, 3) handlers = [] for event in [Events.STARTED, Events.COMPLETED]: @@ -255,6 +257,7 @@ def test_args_and_kwargs_are_passed_to_event(): def test_on_decorator_raises_with_invalid_event(): engine = DummyEngine() with pytest.raises(ValueError): + @engine.on("incorrect") def f(engine): pass @@ -287,7 +290,7 @@ def handle_iteration_completed(engine, completed_counter): def test_returns_state(): engine = Engine(MagicMock(return_value=1)) - state = engine.run([0, ]) + state = engine.run([0,]) assert isinstance(state, State) diff --git a/tests/ignite/engine/test_resume_run.py b/tests/ignite/engine/test_resume_run.py index 1c85bff6a513..de65b1a90e70 100644 --- a/tests/ignite/engine/test_resume_run.py +++ b/tests/ignite/engine/test_resume_run.py @@ -17,12 +17,11 @@ def test_state_dict(): def _test(state): engine.state = state sd = engine.state_dict() - assert isinstance(sd, Mapping) and \ - len(sd) == len(engine._state_dict_all_req_keys) + 1 - assert sd['seed'] == engine.state.seed - assert sd['iteration'] == engine.state.iteration - assert sd['epoch_length'] == engine.state.epoch_length - assert sd['max_epochs'] == engine.state.max_epochs + assert isinstance(sd, Mapping) and len(sd) == len(engine._state_dict_all_req_keys) + 1 + assert sd["seed"] == engine.state.seed + assert sd["iteration"] == engine.state.iteration + assert sd["epoch_length"] == engine.state.epoch_length + assert sd["max_epochs"] == engine.state.max_epochs _test(State(seed=0, iteration=500, epoch_length=1000, max_epochs=100)) _test(State(seed=0, epoch=5, epoch_length=1000, max_epochs=100)) @@ -34,10 +33,10 @@ def test_state_dict_integration(): engine.run(data, max_epochs=10, seed=17) sd = engine.state_dict() assert isinstance(sd, Mapping) and len(sd) == 4 - assert sd['seed'] == engine.state.seed - assert sd['iteration'] == engine.state.iteration == 10 * 100 - assert sd['epoch_length'] == engine.state.epoch_length == 100 - assert sd['max_epochs'] == engine.state.max_epochs == 10 + assert sd["seed"] == engine.state.seed + assert sd["iteration"] == engine.state.iteration == 10 * 100 + assert sd["epoch_length"] == engine.state.epoch_length == 100 + assert sd["max_epochs"] == engine.state.max_epochs == 10 def test_load_state_dict_asserts(): @@ -53,8 +52,7 @@ def test_load_state_dict_asserts(): engine.load_state_dict({"seed": 0, "max_epochs": 100, "epoch_length": 120}) with pytest.raises(ValueError, match=r"state_dict should contain only one of"): - engine.load_state_dict({"seed": 0, "max_epochs": 100, "epoch_length": 120, - "iteration": 12, "epoch": 123}) + engine.load_state_dict({"seed": 0, "max_epochs": 100, "epoch_length": 120, "iteration": 12, "epoch": 123}) def test_load_state_dict(): @@ -62,13 +60,13 @@ def test_load_state_dict(): def _test(sd): engine.load_state_dict(sd) - assert sd['seed'] == engine.state.seed - if 'iteration' in sd: - assert sd['iteration'] == engine.state.iteration - elif 'epoch' in sd: - assert sd['epoch'] == engine.state.epoch - assert sd['epoch_length'] == engine.state.epoch_length - assert sd['max_epochs'] == engine.state.max_epochs + assert sd["seed"] == engine.state.seed + if "iteration" in sd: + assert sd["iteration"] == engine.state.iteration + elif "epoch" in sd: + assert sd["epoch"] == engine.state.epoch + assert sd["epoch_length"] == engine.state.epoch_length + assert sd["max_epochs"] == engine.state.max_epochs _test({"seed": 0, "max_epochs": 100, "epoch_length": 120, "iteration": 123}) _test({"seed": 0, "max_epochs": 100, "epoch_length": 120, "epoch": 5}) @@ -80,14 +78,13 @@ def test_load_state_dict_integration(counter_factory): state_dict = {"seed": 0, "max_epochs": 100, "epoch_length": 120, "epoch": 5} engine.load_state_dict(state_dict) - engine.add_event_handler(Events.ITERATION_COMPLETED, counter_factory('iter', 5 * 120 + 1)) - engine.add_event_handler(Events.EPOCH_COMPLETED, counter_factory('epoch', 6)) + engine.add_event_handler(Events.ITERATION_COMPLETED, counter_factory("iter", 5 * 120 + 1)) + engine.add_event_handler(Events.EPOCH_COMPLETED, counter_factory("epoch", 6)) data = list(range(120)) engine.run(data) class BatchChecker: - def __init__(self, data, init_counter=0): self.counter = init_counter self.data = data @@ -100,14 +97,14 @@ def check(self, batch): def test_epoch_length(): - def _test(data, max_epochs, num_iters): batch_checker = BatchChecker(data) def update_fn(engine, batch): - assert batch_checker.check(batch), \ - "{}: {} vs {}".format(batch_checker.counter, batch_checker.true_batch, batch) + assert batch_checker.check(batch), "{}: {} vs {}".format( + batch_checker.counter, batch_checker.true_batch, batch + ) engine = Engine(update_fn) engine.run(data, max_epochs=max_epochs, epoch_length=num_iters) @@ -121,8 +118,9 @@ def _test_as_iter(data, max_epochs, num_iters): batch_checker = BatchChecker(data) def update_fn(engine, batch): - assert batch_checker.check(batch), \ - "{}: {} vs {}".format(batch_checker.counter, batch_checker.true_batch, batch) + assert batch_checker.check(batch), "{}: {} vs {}".format( + batch_checker.counter, batch_checker.true_batch, batch + ) engine = Engine(update_fn) engine.run(iter(data), max_epochs=max_epochs, epoch_length=num_iters) @@ -144,7 +142,6 @@ def update_fn(engine, batch): def test_strict_resume_from_iter(): - def _test(epoch_length=None): max_epochs = 5 @@ -157,10 +154,9 @@ def _test(epoch_length=None): batch_checker = BatchChecker(data, init_counter=resume_iteration) def update_fn(engine, batch): - assert batch_checker.check(batch), \ - "{} | {}: {} vs {}".format( - resume_iteration, - batch_checker.counter, batch_checker.true_batch, batch) + assert batch_checker.check(batch), "{} | {}: {} vs {}".format( + resume_iteration, batch_checker.counter, batch_checker.true_batch, batch + ) engine = Engine(update_fn) @@ -172,7 +168,7 @@ def check_iteration(engine): "iteration": resume_iteration, "max_epochs": max_epochs, "epoch_length": epoch_length, - "seed": 0 + "seed": 0, } engine.load_state_dict(resume_state_dict) engine.run(data) @@ -185,7 +181,6 @@ def check_iteration(engine): def test_strict_resume_from_epoch(): - def _test(epoch_length=None): max_epochs = 10 num_iters = 21 @@ -197,16 +192,12 @@ def _test(epoch_length=None): batch_checker = BatchChecker(data, init_counter=resume_epoch * epoch_length) def update_fn(engine, batch): - assert batch_checker.check(batch), \ - "{} | {}: {} vs {}".format( - resume_epoch, - batch_checker.counter, batch_checker.true_batch, batch) + assert batch_checker.check(batch), "{} | {}: {} vs {}".format( + resume_epoch, batch_checker.counter, batch_checker.true_batch, batch + ) engine = Engine(update_fn) - resume_state_dict = dict(epoch=resume_epoch, - max_epochs=max_epochs, - epoch_length=epoch_length, - seed=0) + resume_state_dict = dict(epoch=resume_epoch, max_epochs=max_epochs, epoch_length=epoch_length, seed=0) engine.load_state_dict(resume_state_dict) engine.run(data) assert engine.state.epoch == max_epochs @@ -226,7 +217,7 @@ def _setup_sampler(sampler_type, num_iters, batch_size): w = torch.ones(num_iters * batch_size, dtype=torch.float) for i in range(num_iters): - w[batch_size * i:batch_size * (i + 1)] += i * 1.0 + w[batch_size * i : batch_size * (i + 1)] += i * 1.0 return WeightedRandomSampler(w, num_samples=num_iters * batch_size, replacement=True) if sampler_type == "distributed": @@ -244,7 +235,6 @@ def _setup_sampler(sampler_type, num_iters, batch_size): def test__update_dataloader(): - def _test(sampler_type=None): num_epochs = 3 batch_size = 4 @@ -253,11 +243,15 @@ def _test(sampler_type=None): num_workers = 4 sampler = _setup_sampler(sampler_type, num_iters, batch_size) - dataloader = torch.utils.data.DataLoader(data, batch_size=batch_size, - num_workers=num_workers, - pin_memory=False, - sampler=sampler, - drop_last=True, shuffle=sampler is None) + dataloader = torch.utils.data.DataLoader( + data, + batch_size=batch_size, + num_workers=num_workers, + pin_memory=False, + sampler=sampler, + drop_last=True, + shuffle=sampler is None, + ) torch.manual_seed(12) seen_batches = [] @@ -270,11 +264,15 @@ def _test(sampler_type=None): seen_batches.append(t) sampler = _setup_sampler(sampler_type, num_iters, batch_size) - dataloader = torch.utils.data.DataLoader(data, batch_size=batch_size, - num_workers=num_workers, - pin_memory=False, - sampler=sampler, - drop_last=True, shuffle=sampler is None) + dataloader = torch.utils.data.DataLoader( + data, + batch_size=batch_size, + num_workers=num_workers, + pin_memory=False, + sampler=sampler, + drop_last=True, + shuffle=sampler is None, + ) batch_sampler = dataloader.batch_sampler new_dataloader = _update_dataloader(dataloader, ReproducibleBatchSampler(batch_sampler)) @@ -304,18 +302,22 @@ def _test(epoch_length=None): max_epochs = 5 batch_size = 4 num_iters = 21 - data = torch.randint(0, 1000, size=(num_iters * batch_size, )) + data = torch.randint(0, 1000, size=(num_iters * batch_size,)) if epoch_length is None: epoch_length = num_iters for num_workers in [0, 4]: sampler = _setup_sampler(sampler_type, num_iters, batch_size) - orig_dataloader = torch.utils.data.DataLoader(data, batch_size=batch_size, - num_workers=num_workers, - pin_memory="cuda" in device, - sampler=sampler, - drop_last=True, shuffle=sampler is None) + orig_dataloader = torch.utils.data.DataLoader( + data, + batch_size=batch_size, + num_workers=num_workers, + pin_memory="cuda" in device, + sampler=sampler, + drop_last=True, + shuffle=sampler is None, + ) seen_batchs = [] @@ -326,6 +328,7 @@ def update_fn(engine, batch): engine = Engine(update_fn) if sampler_type == "distributed": + @engine.on(Events.EPOCH_STARTED) def _(engine): sampler.set_epoch(engine.state.epoch - 1) @@ -336,30 +339,31 @@ def _(engine): batch_checker = BatchChecker(seen_batchs, init_counter=resume_epoch * epoch_length) sampler = _setup_sampler(sampler_type, num_iters, batch_size) - resume_dataloader = torch.utils.data.DataLoader(data, batch_size=batch_size, - num_workers=num_workers, - pin_memory="cuda" in device, - sampler=sampler, - drop_last=True, shuffle=sampler is None) + resume_dataloader = torch.utils.data.DataLoader( + data, + batch_size=batch_size, + num_workers=num_workers, + pin_memory="cuda" in device, + sampler=sampler, + drop_last=True, + shuffle=sampler is None, + ) def update_fn(engine, batch): batch_to_device = batch.to(device) - assert batch_checker.check(batch), \ - "{} {} | {}: {} vs {}".format( - num_workers, resume_epoch, - batch_checker.counter, batch_checker.true_batch, batch) + assert batch_checker.check(batch), "{} {} | {}: {} vs {}".format( + num_workers, resume_epoch, batch_checker.counter, batch_checker.true_batch, batch + ) engine = Engine(update_fn) if sampler_type == "distributed": + @engine.on(Events.EPOCH_STARTED) def _(engine): sampler.set_epoch(engine.state.epoch - 1) - resume_state_dict = dict(epoch=resume_epoch, - max_epochs=max_epochs, - epoch_length=epoch_length, - seed=12) + resume_state_dict = dict(epoch=resume_epoch, max_epochs=max_epochs, epoch_length=epoch_length, seed=12) engine.load_state_dict(resume_state_dict) engine.run(resume_dataloader) assert engine.state.epoch == max_epochs @@ -373,12 +377,11 @@ def _(engine): def test_resume_random_dataloader_from_epoch(): _test_resume_random_dataloader_from_epoch("cpu") - _test_resume_random_dataloader_from_epoch("cpu", sampler_type='weighted') - _test_resume_random_dataloader_from_epoch("cpu", sampler_type='distributed') + _test_resume_random_dataloader_from_epoch("cpu", sampler_type="weighted") + _test_resume_random_dataloader_from_epoch("cpu", sampler_type="distributed") class AugmentedData: - def __init__(self, data): self.data = data @@ -407,11 +410,15 @@ def _test(epoch_length=None): for num_workers in [0, 4]: sampler = _setup_sampler(sampler_type, num_iters, batch_size) - orig_dataloader = torch.utils.data.DataLoader(data, batch_size=batch_size, - num_workers=num_workers, - pin_memory="cuda" in device, - sampler=sampler, - drop_last=True, shuffle=sampler is None) + orig_dataloader = torch.utils.data.DataLoader( + data, + batch_size=batch_size, + num_workers=num_workers, + pin_memory="cuda" in device, + sampler=sampler, + drop_last=True, + shuffle=sampler is None, + ) seen_batchs = [] def update_fn(engine, batch): @@ -421,6 +428,7 @@ def update_fn(engine, batch): engine = Engine(update_fn) if sampler_type == "distributed": + @engine.on(Events.EPOCH_STARTED) def _(engine): sampler.set_epoch(engine.state.epoch) @@ -431,37 +439,39 @@ def _(engine): batch_checker = BatchChecker(seen_batchs, init_counter=resume_iteration) sampler = _setup_sampler(sampler_type, num_iters, batch_size) - resume_dataloader = torch.utils.data.DataLoader(data, batch_size=batch_size, - num_workers=num_workers, - pin_memory="cuda" in device, - sampler=sampler, - drop_last=True, shuffle=sampler is None) + resume_dataloader = torch.utils.data.DataLoader( + data, + batch_size=batch_size, + num_workers=num_workers, + pin_memory="cuda" in device, + sampler=sampler, + drop_last=True, + shuffle=sampler is None, + ) def update_fn(engine, batch): batch_to_device = batch.to(device) - assert batch_checker.check(batch), \ - "{} {} | {}: {} vs {}".format( - num_workers, resume_iteration, - batch_checker.counter, batch_checker.true_batch, batch) + assert batch_checker.check(batch), "{} {} | {}: {} vs {}".format( + num_workers, resume_iteration, batch_checker.counter, batch_checker.true_batch, batch + ) engine = Engine(update_fn) if sampler_type == "distributed": + @engine.on(Events.EPOCH_STARTED) def _(engine): sampler.set_epoch(engine.state.epoch) - resume_state_dict = dict(iteration=resume_iteration, - max_epochs=max_epochs, - epoch_length=epoch_length, - seed=12) + resume_state_dict = dict( + iteration=resume_iteration, max_epochs=max_epochs, epoch_length=epoch_length, seed=12 + ) engine.load_state_dict(resume_state_dict) engine.run(resume_dataloader) assert engine.state.epoch == max_epochs - assert engine.state.iteration == epoch_length * max_epochs, \ - "{}, {} | {} vs {}".format(num_workers, resume_iteration, - engine.state.iteration, - epoch_length * max_epochs) + assert engine.state.iteration == epoch_length * max_epochs, "{}, {} | {} vs {}".format( + num_workers, resume_iteration, engine.state.iteration, epoch_length * max_epochs + ) _test() if sampler_type != "distributed": @@ -469,8 +479,9 @@ def _(engine): _test(11) else: with pytest.raises(AssertionError): - with pytest.warns(UserWarning, match=r"When defined engine's epoch length is different of " - r"input dataloader length"): + with pytest.warns( + UserWarning, match=r"When defined engine's epoch length is different of " r"input dataloader length" + ): _test(40) @@ -514,7 +525,6 @@ def test_reproducible_batch_sampler(): def _test_resume_random_data_iterator_from_epoch(device): - def _test(epoch_length=None): max_epochs = 5 batch_size = 4 @@ -542,14 +552,12 @@ def update_fn(engine, batch): batch_checker = BatchChecker(seen_batchs, init_counter=resume_epoch * epoch_length) def update_fn(engine, batch): - assert batch_checker.check(batch), \ - "{} | {}: {} vs {}".format(resume_epoch, batch_checker.counter, batch_checker.true_batch, batch) + assert batch_checker.check(batch), "{} | {}: {} vs {}".format( + resume_epoch, batch_checker.counter, batch_checker.true_batch, batch + ) engine = Engine(update_fn) - resume_state_dict = dict(epoch=resume_epoch, - max_epochs=max_epochs, - epoch_length=epoch_length, - seed=12) + resume_state_dict = dict(epoch=resume_epoch, max_epochs=max_epochs, epoch_length=epoch_length, seed=12) engine.load_state_dict(resume_state_dict) engine.run(infinite_data_iterator()) assert engine.state.epoch == max_epochs @@ -565,7 +573,6 @@ def test_resume_random_data_iterator_from_epoch(): def _test_resume_random_data_iterator_from_iter(device): - def _test(epoch_length=None): max_epochs = 3 batch_size = 4 @@ -593,19 +600,20 @@ def update_fn(engine, batch): batch_checker = BatchChecker(seen_batchs, init_counter=resume_iteration) def update_fn(engine, batch): - assert batch_checker.check(batch), \ - "{} | {}: {} vs {}".format(resume_iteration, batch_checker.counter, batch_checker.true_batch, batch) + assert batch_checker.check(batch), "{} | {}: {} vs {}".format( + resume_iteration, batch_checker.counter, batch_checker.true_batch, batch + ) engine = Engine(update_fn) - resume_state_dict = dict(iteration=resume_iteration, - max_epochs=max_epochs, - epoch_length=epoch_length, - seed=12) + resume_state_dict = dict( + iteration=resume_iteration, max_epochs=max_epochs, epoch_length=epoch_length, seed=12 + ) engine.load_state_dict(resume_state_dict) engine.run(infinite_data_iterator()) assert engine.state.epoch == max_epochs - assert engine.state.iteration == epoch_length * max_epochs, \ - "{} | {} vs {}".format(resume_iteration, engine.state.iteration, epoch_length * max_epochs) + assert engine.state.iteration == epoch_length * max_epochs, "{} | {} vs {}".format( + resume_iteration, engine.state.iteration, epoch_length * max_epochs + ) _test() _test(50) @@ -619,7 +627,7 @@ def test_resume_random_data_iterator_from_iter(): @pytest.mark.distributed @pytest.mark.skipif(torch.cuda.device_count() < 1, reason="Skip if no GPU") def test_distrib_gpu(distributed_context_single_node_nccl): - device = "cuda:{}".format(distributed_context_single_node_nccl['local_rank']) + device = "cuda:{}".format(distributed_context_single_node_nccl["local_rank"]) _test_resume_random_data_iterator_from_iter(device) _test_resume_random_data_iterator_from_epoch(device) _test_resume_random_dataloader_from_iter(device) @@ -636,7 +644,7 @@ def test_distrib_cpu(distributed_context_single_node_gloo): @pytest.mark.multinode_distributed -@pytest.mark.skipif('MULTINODE_DISTRIB' not in os.environ, reason="Skip if not multi-node distributed") +@pytest.mark.skipif("MULTINODE_DISTRIB" not in os.environ, reason="Skip if not multi-node distributed") def test_multinode_distrib_cpu(distributed_context_multi_node_gloo): device = "cpu" _test_resume_random_data_iterator_from_iter(device) @@ -646,9 +654,9 @@ def test_multinode_distrib_cpu(distributed_context_multi_node_gloo): @pytest.mark.multinode_distributed -@pytest.mark.skipif('GPU_MULTINODE_DISTRIB' not in os.environ, reason="Skip if not multi-node distributed") +@pytest.mark.skipif("GPU_MULTINODE_DISTRIB" not in os.environ, reason="Skip if not multi-node distributed") def test_multinode_distrib_gpu(distributed_context_multi_node_nccl): - device = "cuda:{}".format(distributed_context_multi_node_nccl['local_rank']) + device = "cuda:{}".format(distributed_context_multi_node_nccl["local_rank"]) _test_resume_random_data_iterator_from_iter(device) _test_resume_random_data_iterator_from_epoch(device) _test_resume_random_dataloader_from_iter(device) diff --git a/tests/ignite/handlers/test_checkpoint.py b/tests/ignite/handlers/test_checkpoint.py index 66ae0cbd4594..420854ad48c5 100644 --- a/tests/ignite/handlers/test_checkpoint.py +++ b/tests/ignite/handlers/test_checkpoint.py @@ -10,7 +10,7 @@ import pytest from unittest.mock import MagicMock -_PREFIX = 'PREFIX' +_PREFIX = "PREFIX" class DummyModel(nn.Module): @@ -25,22 +25,25 @@ def forward(self, x): def test_checkpoint_wrong_input(): with pytest.raises(TypeError, match=r"Argument `to_save` should be a dictionary"): - Checkpoint(12, lambda x: x, "prefix", ) + Checkpoint( + 12, lambda x: x, "prefix", + ) with pytest.raises(TypeError, match=r"Argument `to_save` should be a dictionary"): - Checkpoint([12, ], lambda x: x, "prefix") + Checkpoint([12,], lambda x: x, "prefix") with pytest.raises(ValueError, match=r"No objects to checkpoint."): Checkpoint({}, lambda x: x, "prefix") model = DummyModel() - to_save = {'model': model} + to_save = {"model": model} with pytest.raises(TypeError, match=r"Argument `save_handler` should be callable"): Checkpoint(to_save, 12, "prefix") - with pytest.raises(ValueError, - match=r"If `score_name` is provided, then `score_function` should be also provided."): + with pytest.raises( + ValueError, match=r"If `score_name` is provided, then `score_function` should be also provided." + ): Checkpoint(to_save, lambda x: x, score_name="acc") with pytest.raises(TypeError, match=r"global_step_transform should be a function."): @@ -49,7 +52,7 @@ def test_checkpoint_wrong_input(): def test_checkpoint_score_function_wrong_output(): model = DummyModel() - to_save = {'model': model} + to_save = {"model": model} checkpointer = Checkpoint(to_save, lambda x: x, score_function=lambda e: {"1": 1}, score_name="acc") trainer = Engine(lambda e, b: None) @@ -59,7 +62,6 @@ def test_checkpoint_score_function_wrong_output(): def test_checkpoint_default(): - def _test(to_save, obj, name): save_handler = MagicMock() save_handler.remove = MagicMock() @@ -85,23 +87,26 @@ def _test(to_save, obj, name): assert checkpointer.last_checkpoint == "{}_1234.pth".format(name) model = DummyModel() - to_save = {'model': model} - _test(to_save, model.state_dict(), 'model') + to_save = {"model": model} + _test(to_save, model.state_dict(), "model") model = DummyModel() optimizer = torch.optim.SGD(model.parameters(), lr=0.1) - to_save = {'model': model, 'optimizer': optimizer} - _test(to_save, {'model': model.state_dict(), 'optimizer': optimizer.state_dict()}, 'checkpoint') + to_save = {"model": model, "optimizer": optimizer} + _test(to_save, {"model": model.state_dict(), "optimizer": optimizer.state_dict()}, "checkpoint") def test_checkpoint_with_global_step_transform(): - def _test(filename_prefix, to_save, obj, name): save_handler = MagicMock() save_handler.remove = MagicMock() - checkpointer = Checkpoint(to_save, save_handler=save_handler, filename_prefix=filename_prefix, - global_step_transform=lambda e, _: e.state.epoch) + checkpointer = Checkpoint( + to_save, + save_handler=save_handler, + filename_prefix=filename_prefix, + global_step_transform=lambda e, _: e.state.epoch, + ) trainer = Engine(lambda e, b: None) trainer.state = State(epoch=1, iteration=1) @@ -125,17 +130,16 @@ def _test(filename_prefix, to_save, obj, name): for prefix in ["", "dummytask"]: model = DummyModel() - to_save = {'model': model} - _test(prefix, to_save, model.state_dict(), 'model') + to_save = {"model": model} + _test(prefix, to_save, model.state_dict(), "model") model = DummyModel() optimizer = torch.optim.SGD(model.parameters(), lr=0.1) - to_save = {'model': model, 'optimizer': optimizer} - _test(prefix, to_save, {'model': model.state_dict(), 'optimizer': optimizer.state_dict()}, 'checkpoint') + to_save = {"model": model, "optimizer": optimizer} + _test(prefix, to_save, {"model": model.state_dict(), "optimizer": optimizer.state_dict()}, "checkpoint") def test_checkpoint_with_score_function(): - def _test(to_save, obj, name): save_handler = MagicMock() save_handler.remove = MagicMock() @@ -162,24 +166,23 @@ def _test(to_save, obj, name): assert checkpointer.last_checkpoint == "{}_0.7800.pth".format(name) model = DummyModel() - to_save = {'model': model} - _test(to_save, model.state_dict(), 'model') + to_save = {"model": model} + _test(to_save, model.state_dict(), "model") model = DummyModel() optimizer = torch.optim.SGD(model.parameters(), lr=0.1) - to_save = {'model': model, 'optimizer': optimizer} - _test(to_save, {'model': model.state_dict(), 'optimizer': optimizer.state_dict()}, 'checkpoint') + to_save = {"model": model, "optimizer": optimizer} + _test(to_save, {"model": model.state_dict(), "optimizer": optimizer.state_dict()}, "checkpoint") def test_checkpoint_with_score_name_and_function(): - def _test(to_save, obj, name): save_handler = MagicMock() save_handler.remove = MagicMock() - checkpointer = Checkpoint(to_save, save_handler=save_handler, - score_name="loss", - score_function=lambda e: e.state.score) + checkpointer = Checkpoint( + to_save, save_handler=save_handler, score_name="loss", score_function=lambda e: e.state.score + ) trainer = Engine(lambda e, b: None) trainer.state = State(epoch=1, iteration=1, score=-0.77) @@ -201,24 +204,23 @@ def _test(to_save, obj, name): assert checkpointer.last_checkpoint == "{}_loss=-0.7600.pth".format(name) model = DummyModel() - to_save = {'model': model} - _test(to_save, model.state_dict(), 'model') + to_save = {"model": model} + _test(to_save, model.state_dict(), "model") model = DummyModel() optimizer = torch.optim.SGD(model.parameters(), lr=0.1) - to_save = {'model': model, 'optimizer': optimizer} - _test(to_save, {'model': model.state_dict(), 'optimizer': optimizer.state_dict()}, 'checkpoint') + to_save = {"model": model, "optimizer": optimizer} + _test(to_save, {"model": model.state_dict(), "optimizer": optimizer.state_dict()}, "checkpoint") def test_checkpoint_with_int_score(): - def _test(to_save, obj, name, score_name=None): save_handler = MagicMock() save_handler.remove = MagicMock() - checkpointer = Checkpoint(to_save, save_handler=save_handler, - score_name=score_name, - score_function=lambda e: e.state.epoch) + checkpointer = Checkpoint( + to_save, save_handler=save_handler, score_name=score_name, score_function=lambda e: e.state.epoch + ) if score_name is None: score_name = "" @@ -244,19 +246,18 @@ def _test(to_save, obj, name, score_name=None): assert checkpointer.last_checkpoint == "{}_{}12.pth".format(name, score_name) model = DummyModel() - to_save = {'model': model} - _test(to_save, model.state_dict(), 'model') - _test(to_save, model.state_dict(), 'model', "epoch") + to_save = {"model": model} + _test(to_save, model.state_dict(), "model") + _test(to_save, model.state_dict(), "model", "epoch") model = DummyModel() optimizer = torch.optim.SGD(model.parameters(), lr=0.1) - to_save = {'model': model, 'optimizer': optimizer} - _test(to_save, {'model': model.state_dict(), 'optimizer': optimizer.state_dict()}, 'checkpoint') - _test(to_save, {'model': model.state_dict(), 'optimizer': optimizer.state_dict()}, 'checkpoint', "epoch") + to_save = {"model": model, "optimizer": optimizer} + _test(to_save, {"model": model.state_dict(), "optimizer": optimizer.state_dict()}, "checkpoint") + _test(to_save, {"model": model.state_dict(), "optimizer": optimizer.state_dict()}, "checkpoint", "epoch") def test_checkpoint_with_score_function_and_trainer_epoch(): - def _test(to_save, obj, name): save_handler = MagicMock() save_handler.remove = MagicMock() @@ -265,19 +266,21 @@ def _test(to_save, obj, name): evaluator = Engine(lambda e, b: None) trainer.state = State(epoch=11, iteration=1) - checkpointer = Checkpoint(to_save, save_handler=save_handler, - global_step_transform=lambda _1, _2: trainer.state.epoch, - score_function=lambda e: e.state.metrics['val_acc']) + checkpointer = Checkpoint( + to_save, + save_handler=save_handler, + global_step_transform=lambda _1, _2: trainer.state.epoch, + score_function=lambda e: e.state.metrics["val_acc"], + ) - evaluator.state = State(epoch=1, iteration=1000, - metrics={'val_acc': 0.77}) + evaluator.state = State(epoch=1, iteration=1000, metrics={"val_acc": 0.77}) checkpointer(evaluator) assert save_handler.call_count == 1 save_handler.assert_called_with(obj, "{}_11_0.7700.pth".format(name)) trainer.state.epoch = 12 - evaluator.state.metrics['val_acc'] = 0.78 + evaluator.state.metrics["val_acc"] = 0.78 checkpointer(evaluator) assert save_handler.call_count == 2 @@ -287,12 +290,11 @@ def _test(to_save, obj, name): assert checkpointer.last_checkpoint == "{}_12_0.7800.pth".format(name) model = DummyModel() - to_save = {'model': model} - _test(to_save, model.state_dict(), 'model') + to_save = {"model": model} + _test(to_save, model.state_dict(), "model") def test_checkpoint_with_score_name_and_function_and_trainer_epoch(): - def _test(to_save, obj, name): save_handler = MagicMock() save_handler.remove = MagicMock() @@ -301,13 +303,15 @@ def _test(to_save, obj, name): evaluator = Engine(lambda e, b: None) trainer.state = State(epoch=11, iteration=1) - checkpointer = Checkpoint(to_save, save_handler=save_handler, - global_step_transform=lambda _1, _2: trainer.state.epoch, - score_name="val_acc", - score_function=lambda e: e.state.metrics['val_acc']) + checkpointer = Checkpoint( + to_save, + save_handler=save_handler, + global_step_transform=lambda _1, _2: trainer.state.epoch, + score_name="val_acc", + score_function=lambda e: e.state.metrics["val_acc"], + ) - evaluator.state = State(epoch=1, iteration=1000, - metrics={'val_acc': 0.77}) + evaluator.state = State(epoch=1, iteration=1000, metrics={"val_acc": 0.77}) checkpointer(evaluator) assert save_handler.call_count == 1 @@ -315,7 +319,7 @@ def _test(to_save, obj, name): save_handler.assert_called_with(obj, "{}_11_val_acc=0.7700.pth".format(name)) trainer.state.epoch = 12 - evaluator.state.metrics['val_acc'] = 0.78 + evaluator.state.metrics["val_acc"] = 0.78 checkpointer(evaluator) assert save_handler.call_count == 2 @@ -325,14 +329,14 @@ def _test(to_save, obj, name): assert checkpointer.last_checkpoint == "{}_12_val_acc=0.7800.pth".format(name) model = DummyModel() - to_save = {'model': model} - _test(to_save, model.state_dict(), 'model') + to_save = {"model": model} + _test(to_save, model.state_dict(), "model") def test_checkpoint_last_checkpoint(): save_handler = MagicMock() save_handler.remove = MagicMock() - to_save = {'model': DummyModel()} + to_save = {"model": DummyModel()} checkpointer = Checkpoint(to_save, save_handler=save_handler, n_saved=None) @@ -343,16 +347,21 @@ def test_checkpoint_last_checkpoint(): checkpointer(trainer) assert save_handler.call_count == 10 - assert checkpointer.last_checkpoint == "{}_9.pth".format('model') + assert checkpointer.last_checkpoint == "{}_9.pth".format("model") def test_checkpoint_last_checkpoint_on_score(): save_handler = MagicMock() save_handler.remove = MagicMock() - to_save = {'model': DummyModel()} + to_save = {"model": DummyModel()} - checkpointer = Checkpoint(to_save, save_handler=save_handler, n_saved=None, - score_name="val_acc", score_function=lambda e: e.state.metrics['val_acc']) + checkpointer = Checkpoint( + to_save, + save_handler=save_handler, + n_saved=None, + score_name="val_acc", + score_function=lambda e: e.state.metrics["val_acc"], + ) trainer = Engine(lambda e, b: None) @@ -363,17 +372,17 @@ def test_checkpoint_last_checkpoint_on_score(): checkpointer(trainer) assert save_handler.call_count == 10 - assert checkpointer.last_checkpoint == "{}_val_acc=0.9000.pth".format('model') + assert checkpointer.last_checkpoint == "{}_val_acc=0.9000.pth".format("model") def test_model_checkpoint_args_validation(dirname): - existing = os.path.join(dirname, 'existing_dir') - nonempty = os.path.join(dirname, 'nonempty') + existing = os.path.join(dirname, "existing_dir") + nonempty = os.path.join(dirname, "nonempty") os.makedirs(existing) os.makedirs(nonempty) - with open(os.path.join(nonempty, '{}_name_0.pth'.format(_PREFIX)), 'w'): + with open(os.path.join(nonempty, "{}_name_0.pth".format(_PREFIX)), "w"): pass with pytest.raises(ValueError, match=r"with extension '.pth' or '.pth.tar' are already present "): @@ -383,13 +392,13 @@ def test_model_checkpoint_args_validation(dirname): ModelCheckpoint(existing, _PREFIX, save_interval=42) with pytest.raises(ValueError, match=r"Directory path '\S+' is not found"): - ModelCheckpoint(os.path.join(dirname, 'non_existing_dir'), _PREFIX, create_dir=False) + ModelCheckpoint(os.path.join(dirname, "non_existing_dir"), _PREFIX, create_dir=False) with pytest.raises(ValueError, match=r"Argument save_as_state_dict is deprecated and should be True"): ModelCheckpoint(existing, _PREFIX, create_dir=False, save_as_state_dict=False) with pytest.raises(ValueError, match=r"If `score_name` is provided, then `score_function` "): - ModelCheckpoint(existing, _PREFIX, create_dir=False, score_name='test') + ModelCheckpoint(existing, _PREFIX, create_dir=False, score_name="test") with pytest.raises(TypeError, match=r"global_step_transform should be a function"): ModelCheckpoint(existing, _PREFIX, create_dir=False, global_step_transform=1234) @@ -406,7 +415,7 @@ def test_model_checkpoint_simple_recovery(dirname): engine.state = State(epoch=0, iteration=1) model = DummyModel() - to_save = {'model': model} + to_save = {"model": model} h(engine, to_save) fname = h.last_checkpoint @@ -418,10 +427,9 @@ def test_model_checkpoint_simple_recovery(dirname): def test_model_checkpoint_simple_recovery_from_existing_non_empty(dirname): - def _test(ext, require_empty, archived): - previous_fname = os.path.join(dirname, '{}_{}_{}{}'.format(_PREFIX, 'obj', 1, ext)) - with open(previous_fname, 'w') as f: + previous_fname = os.path.join(dirname, "{}_{}_{}{}".format(_PREFIX, "obj", 1, ext)) + with open(previous_fname, "w") as f: f.write("test") h = ModelCheckpoint(dirname, _PREFIX, create_dir=True, require_empty=require_empty, archived=archived) @@ -429,13 +437,13 @@ def _test(ext, require_empty, archived): engine.state = State(epoch=0, iteration=1) model = DummyModel() - to_save = {'model': model} + to_save = {"model": model} h(engine, to_save) fname = h.last_checkpoint ext = ".pth.tar" if archived else ".pth" assert isinstance(fname, str) - assert os.path.join(dirname, '{}_{}_{}{}'.format(_PREFIX, 'model', 1, ext)) == fname + assert os.path.join(dirname, "{}_{}_{}{}".format(_PREFIX, "model", 1, ext)) == fname assert os.path.exists(fname) assert os.path.exists(previous_fname) loaded_objects = torch.load(fname) @@ -450,8 +458,8 @@ def _test(ext, require_empty, archived): def test_disk_saver_atomic(dirname): model = DummyModel() - to_save_serializable = {'model': model} - to_save_non_serializable = {'model': lambda x: x} + to_save_serializable = {"model": model} + to_save_non_serializable = {"model": lambda x: x} def _test_existance(atomic, _to_save, expected): @@ -484,14 +492,14 @@ def test_last_k(dirname): engine.state = State(epoch=0, iteration=0) model = DummyModel() - to_save = {'model': model} + to_save = {"model": model} h(engine, to_save) for i in range(1, 9): engine.state.iteration = i h(engine, to_save) - expected = ['{}_{}_{}.pth'.format(_PREFIX, 'model', i) for i in [7, 8]] + expected = ["{}_{}_{}.pth".format(_PREFIX, "model", i) for i in [7, 8]] assert sorted(os.listdir(dirname)) == expected, "{} vs {}".format(sorted(os.listdir(dirname)), expected) @@ -503,7 +511,7 @@ def test_disabled_n_saved(dirname): engine.state = State(epoch=0, iteration=0) model = DummyModel() - to_save = {'model': model} + to_save = {"model": model} num_iters = 100 for i in range(num_iters): @@ -513,12 +521,12 @@ def test_disabled_n_saved(dirname): saved_files = sorted(os.listdir(dirname)) assert len(saved_files) == num_iters, "{}".format(saved_files) - expected = sorted(['{}_{}_{}.pth'.format(_PREFIX, 'model', i) for i in range(num_iters)]) + expected = sorted(["{}_{}_{}.pth".format(_PREFIX, "model", i) for i in range(num_iters)]) assert saved_files == expected, "{} vs {}".format(saved_files, expected) def test_best_k(dirname): - scores = iter([1.2, -2., 3.1, -4.0]) + scores = iter([1.2, -2.0, 3.1, -4.0]) def score_function(_): return next(scores) @@ -529,11 +537,11 @@ def score_function(_): engine.state = State(epoch=0, iteration=0) model = DummyModel() - to_save = {'model': model} + to_save = {"model": model} for _ in range(4): h(engine, to_save) - expected = ['{}_{}_{:.4f}.pth'.format(_PREFIX, 'model', i) for i in [1.2, 3.1]] + expected = ["{}_{}_{:.4f}.pth".format(_PREFIX, "model", i) for i in [1.2, 3.1]] assert sorted(os.listdir(dirname)) == expected @@ -545,44 +553,43 @@ def test_best_k_with_suffix(dirname): def score_function(engine): return next(scores_iter) - h = ModelCheckpoint(dirname, _PREFIX, create_dir=False, n_saved=2, - score_function=score_function, score_name="val_loss") + h = ModelCheckpoint( + dirname, _PREFIX, create_dir=False, n_saved=2, score_function=score_function, score_name="val_loss" + ) engine = Engine(lambda e, b: None) engine.state = State(epoch=0, iteration=0) model = DummyModel() - to_save = {'model': model} + to_save = {"model": model} for _ in range(4): engine.state.epoch += 1 h(engine, to_save) - expected = ['{}_{}_val_loss={:.4}.pth'.format(_PREFIX, 'model', scores[e - 1]) for e in [1, 3]] + expected = ["{}_{}_val_loss={:.4}.pth".format(_PREFIX, "model", scores[e - 1]) for e in [1, 3]] assert sorted(os.listdir(dirname)) == expected def test_with_engine(dirname): - def update_fn(_1, _2): pass - name = 'model' + name = "model" engine = Engine(update_fn) handler = ModelCheckpoint(dirname, _PREFIX, create_dir=False, n_saved=2) model = DummyModel() - to_save = {'model': model} + to_save = {"model": model} engine.add_event_handler(Events.EPOCH_COMPLETED, handler, to_save) engine.run([0], max_epochs=4) - expected = ['{}_{}_{}.pth'.format(_PREFIX, name, i) for i in [3, 4]] + expected = ["{}_{}_{}.pth".format(_PREFIX, name, i) for i in [3, 4]] assert sorted(os.listdir(dirname)) == expected def test_with_state_dict(dirname): - def update_fn(_1, _2): pass @@ -590,7 +597,7 @@ def update_fn(_1, _2): handler = ModelCheckpoint(dirname, _PREFIX, create_dir=False, n_saved=1) model = DummyModel() - to_save = {'model': model} + to_save = {"model": model} engine.add_event_handler(Events.EPOCH_COMPLETED, handler, to_save) engine.run([0], max_epochs=4) @@ -618,10 +625,10 @@ def test_valid_state_dict_save(dirname): engine = Engine(lambda e, b: None) engine.state = State(epoch=0, iteration=0) - to_save = {'name': 42} + to_save = {"name": 42} with pytest.raises(TypeError, match=r"should have `state_dict` method"): h(engine, to_save) - to_save = {'name': model} + to_save = {"name": model} try: h(engine, to_save) except ValueError: @@ -645,13 +652,9 @@ def update_fn(engine, batch): engine = Engine(update_fn) handler = ModelCheckpoint(dirname, _PREFIX, create_dir=False, n_saved=1) - engine.add_event_handler(Events.EPOCH_COMPLETED, - handler, - { - "model": model, - "optimizer": optim, - "lr_scheduler": lr_scheduler, - }) + engine.add_event_handler( + Events.EPOCH_COMPLETED, handler, {"model": model, "optimizer": optim, "lr_scheduler": lr_scheduler,} + ) engine.run([0], max_epochs=4) saved_objects = sorted(os.listdir(dirname)) @@ -661,9 +664,9 @@ def update_fn(engine, batch): loaded_obj = torch.load(saved_checkpoint) for f in ["model", "optimizer", "lr_scheduler"]: assert f in loaded_obj - loaded_model_state_dict = loaded_obj['model'] - loaded_optimizer_state_dict = loaded_obj['optimizer'] - loaded_lr_scheduler_state_dict = loaded_obj['lr_scheduler'] + loaded_model_state_dict = loaded_obj["model"] + loaded_optimizer_state_dict = loaded_obj["optimizer"] + loaded_lr_scheduler_state_dict = loaded_obj["lr_scheduler"] assert isinstance(loaded_model_state_dict, dict) assert isinstance(loaded_optimizer_state_dict, dict) @@ -700,22 +703,21 @@ def test_checkpoint_load_objects(): Checkpoint.load_objects({"a": None}, {"a": None}) model = DummyModel() - to_load = {'model': model, 'another_model': model} + to_load = {"model": model, "another_model": model} with pytest.raises(ValueError, match=r"from `to_load` is not found in the checkpoint"): Checkpoint.load_objects(to_load, {}) model = DummyModel() - to_load = {'model': model} + to_load = {"model": model} model2 = DummyModel() - chkpt = {'model': model2.state_dict()} + chkpt = {"model": model2.state_dict()} Checkpoint.load_objects(to_load, chkpt) assert model.state_dict() == model2.state_dict() def test_checkpoint_load_objects_from_saved_file(dirname): - def _get_single_obj_to_save(): model = DummyModel() to_save = { @@ -758,7 +760,7 @@ def _get_multiple_objs_to_save(): assert os.path.join(dirname, _PREFIX) in fname assert os.path.exists(fname) loaded_objects = torch.load(fname) - to_load = {'model': to_save['model']} + to_load = {"model": to_save["model"]} Checkpoint.load_objects(to_load, loaded_objects) os.remove(fname) @@ -780,8 +782,8 @@ def test_disksaver_wrong_input(dirname): DiskSaver("/tmp/non-existing-folder", create_dir=False) def _test(ext): - previous_fname = os.path.join(dirname, '{}_{}_{}{}'.format(_PREFIX, 'obj', 1, ext)) - with open(previous_fname, 'w') as f: + previous_fname = os.path.join(dirname, "{}_{}_{}{}".format(_PREFIX, "obj", 1, ext)) + with open(previous_fname, "w") as f: f.write("test") with pytest.raises(ValueError, match=r"with extension '.pth' or '.pth.tar' are already present"): diff --git a/tests/ignite/handlers/test_early_stopping.py b/tests/ignite/handlers/test_early_stopping.py index d23187e6b208..a626fca165cf 100644 --- a/tests/ignite/handlers/test_early_stopping.py +++ b/tests/ignite/handlers/test_early_stopping.py @@ -77,8 +77,9 @@ def test_early_stopping_on_last_event_delta(): trainer = Engine(do_nothing_update_fn) - h = EarlyStopping(patience=2, min_delta=0.4, cumulative_delta=False, - score_function=lambda _: next(scores), trainer=trainer) + h = EarlyStopping( + patience=2, min_delta=0.4, cumulative_delta=False, score_function=lambda _: next(scores), trainer=trainer + ) assert not trainer.should_terminate h(None) # counter == 0 @@ -95,8 +96,9 @@ def test_early_stopping_on_cumulative_delta(): trainer = Engine(do_nothing_update_fn) - h = EarlyStopping(patience=2, min_delta=0.4, cumulative_delta=True, - score_function=lambda _: next(scores), trainer=trainer) + h = EarlyStopping( + patience=2, min_delta=0.4, cumulative_delta=True, score_function=lambda _: next(scores), trainer=trainer + ) assert not trainer.should_terminate h(None) # counter == 0 @@ -108,7 +110,6 @@ def test_early_stopping_on_cumulative_delta(): def test_simple_early_stopping_on_plateau(): - def score_function(engine): return 42 @@ -142,7 +143,6 @@ def score_function(engine): def test_with_engine_early_stopping(): - class Counter(object): def __init__(self, count=0): self.count = count @@ -170,7 +170,6 @@ def evaluation(engine): def test_with_engine_early_stopping_on_plateau(): - class Counter(object): def __init__(self, count=0): self.count = count @@ -196,7 +195,6 @@ def evaluation(engine): def test_with_engine_no_early_stopping(): - class Counter(object): def __init__(self, count=0): self.count = count @@ -263,6 +261,7 @@ def _test_distrib_integration_engine_early_stopping(device): import torch.distributed as dist from ignite.metrics import Accuracy + rank = dist.get_rank() ws = dist.get_world_size() torch.manual_seed(12) @@ -270,21 +269,17 @@ def _test_distrib_integration_engine_early_stopping(device): n_epochs = 10 n_iters = 20 - y_preds = [ - torch.randint(0, 2, size=(n_iters, ws)).to(device) - ] + [ - torch.ones(n_iters, ws).to(device) - ] + [ - torch.randint(0, 2, size=(n_iters, ws)).to(device) for _ in range(n_epochs - 2) - ] - - y_true = [ - torch.randint(0, 2, size=(n_iters, ws)).to(device) - ] + [ - torch.ones(n_iters, ws).to(device) - ] + [ - torch.randint(0, 2, size=(n_iters, ws)).to(device) for _ in range(n_epochs - 2) - ] + y_preds = ( + [torch.randint(0, 2, size=(n_iters, ws)).to(device)] + + [torch.ones(n_iters, ws).to(device)] + + [torch.randint(0, 2, size=(n_iters, ws)).to(device) for _ in range(n_epochs - 2)] + ) + + y_true = ( + [torch.randint(0, 2, size=(n_iters, ws)).to(device)] + + [torch.ones(n_iters, ws).to(device)] + + [torch.randint(0, 2, size=(n_iters, ws)).to(device) for _ in range(n_epochs - 2)] + ) def update(engine, _): e = trainer.state.epoch - 1 @@ -296,7 +291,7 @@ def update(engine, _): acc.attach(evaluator, "acc") def score_function(engine): - return engine.state.metrics['acc'] + return engine.state.metrics["acc"] trainer = Engine(lambda e, b: None) early_stopping = EarlyStopping(patience=3, score_function=score_function, trainer=trainer) @@ -327,7 +322,7 @@ def test_distrib_cpu(local_rank, distributed_context_single_node_gloo): @pytest.mark.multinode_distributed -@pytest.mark.skipif('MULTINODE_DISTRIB' not in os.environ, reason="Skip if not multi-node distributed") +@pytest.mark.skipif("MULTINODE_DISTRIB" not in os.environ, reason="Skip if not multi-node distributed") def test_multinode_distrib_cpu(distributed_context_multi_node_gloo): device = "cpu" _test_distrib_with_engine_early_stopping(device) @@ -335,8 +330,8 @@ def test_multinode_distrib_cpu(distributed_context_multi_node_gloo): @pytest.mark.multinode_distributed -@pytest.mark.skipif('GPU_MULTINODE_DISTRIB' not in os.environ, reason="Skip if not multi-node distributed") +@pytest.mark.skipif("GPU_MULTINODE_DISTRIB" not in os.environ, reason="Skip if not multi-node distributed") def test_multinode_distrib_gpu(distributed_context_multi_node_nccl): - device = "cuda:{}".format(distributed_context_multi_node_nccl['local_rank']) + device = "cuda:{}".format(distributed_context_multi_node_nccl["local_rank"]) _test_distrib_with_engine_early_stopping(device) _test_distrib_integration_engine_early_stopping(device) diff --git a/tests/ignite/handlers/test_terminate_on_nan.py b/tests/ignite/handlers/test_terminate_on_nan.py index b122b4507990..410eaefbc9ed 100644 --- a/tests/ignite/handlers/test_terminate_on_nan.py +++ b/tests/ignite/handlers/test_terminate_on_nan.py @@ -1,4 +1,3 @@ - import numpy as np import torch @@ -55,17 +54,17 @@ def update_fn(engine, batch): assert trainer.should_terminate trainer.should_terminate = False - trainer.state.output = (float('nan'), 10.0) + trainer.state.output = (float("nan"), 10.0) h(trainer) assert trainer.should_terminate trainer.should_terminate = False - trainer.state.output = float('inf') + trainer.state.output = float("inf") h(trainer) assert trainer.should_terminate trainer.should_terminate = False - trainer.state.output = [float('nan'), 10.0] + trainer.state.output = [float("nan"), 10.0] h(trainer) assert trainer.should_terminate trainer.should_terminate = False @@ -75,9 +74,7 @@ def test_with_terminate_on_nan(): torch.manual_seed(12) - data = [1.0, 0.8, - (torch.rand(4, 4), torch.rand(4, 4)), - torch.rand(5), torch.asin(torch.randn(4, 4)), 0.0, 1.0] + data = [1.0, 0.8, (torch.rand(4, 4), torch.rand(4, 4)), torch.rand(5), torch.asin(torch.randn(4, 4)), 0.0, 1.0] def update_fn(engine, batch): return batch @@ -94,9 +91,16 @@ def test_with_terminate_on_inf(): torch.manual_seed(12) - data = [1.0, 0.8, torch.rand(4, 4), - (1.0 / torch.randint(0, 2, size=(4,)).type(torch.float), torch.tensor(1.234)), - torch.rand(5), torch.asin(torch.randn(4, 4)), 0.0, 1.0] + data = [ + 1.0, + 0.8, + torch.rand(4, 4), + (1.0 / torch.randint(0, 2, size=(4,)).type(torch.float), torch.tensor(1.234)), + torch.rand(5), + torch.asin(torch.randn(4, 4)), + 0.0, + 1.0, + ] def update_fn(engine, batch): return batch diff --git a/tests/ignite/handlers/test_timing.py b/tests/ignite/handlers/test_timing.py index 1044054dd262..8070f6e99d67 100644 --- a/tests/ignite/handlers/test_timing.py +++ b/tests/ignite/handlers/test_timing.py @@ -22,13 +22,10 @@ def _test_func(engine, batch): t_train = Timer() t_total.attach(trainer) - t_batch.attach(trainer, - pause=Events.ITERATION_COMPLETED, - resume=Events.ITERATION_STARTED, - step=Events.ITERATION_COMPLETED) - t_train.attach(trainer, - pause=Events.EPOCH_COMPLETED, - resume=Events.EPOCH_STARTED) + t_batch.attach( + trainer, pause=Events.ITERATION_COMPLETED, resume=Events.ITERATION_STARTED, step=Events.ITERATION_COMPLETED + ) + t_train.attach(trainer, pause=Events.EPOCH_COMPLETED, resume=Events.EPOCH_STARTED) @trainer.on(Events.EPOCH_COMPLETED) def run_validation(trainer): diff --git a/tests/ignite/metrics/test_accumulation.py b/tests/ignite/metrics/test_accumulation.py index 587f7b7aa201..2a101a7099d8 100644 --- a/tests/ignite/metrics/test_accumulation.py +++ b/tests/ignite/metrics/test_accumulation.py @@ -65,7 +65,7 @@ def test_average(): v.compute() mean_var = Average() - y_true = torch.rand(100) + torch.randint(0, 10, size=(100, )).float() + y_true = torch.rand(100) + torch.randint(0, 10, size=(100,)).float() for y in y_true: mean_var.update(y.item()) @@ -128,7 +128,6 @@ def test_geom_average(): def test_integration(): - def _test(metric_cls, true_result_fn): size = 100 @@ -140,11 +139,12 @@ def update_fn(engine, batch): engine = Engine(update_fn) custom_var_mean = metric_cls(output_transform=lambda output: output[1]) - custom_var_mean.attach(engine, 'agg_custom_var') + custom_var_mean.attach(engine, "agg_custom_var") state = engine.run([0] * size) - np.testing.assert_almost_equal(state.metrics['agg_custom_var'].numpy(), true_result_fn(custom_variable), - decimal=5) + np.testing.assert_almost_equal( + state.metrics["agg_custom_var"].numpy(), true_result_fn(custom_variable), decimal=5 + ) size = 100 custom_variable = 10.0 + 5.0 * torch.rand(size) @@ -155,10 +155,10 @@ def update_fn(engine, batch): engine = Engine(update_fn) custom_var_mean = metric_cls(output_transform=lambda output: output[1]) - custom_var_mean.attach(engine, 'agg_custom_var') + custom_var_mean.attach(engine, "agg_custom_var") state = engine.run([0] * size) - assert state.metrics['agg_custom_var'] == pytest.approx(true_result_fn(custom_variable)) + assert state.metrics["agg_custom_var"] == pytest.approx(true_result_fn(custom_variable)) def _mean(y_true): return y_true.mean(dim=0).numpy() @@ -172,7 +172,7 @@ def test_compute_mean_std(): b = 12 c = 3 w = h = 64 - true_data = np.arange(0, n * b * h * w * c, dtype='float64').reshape(n * b, c, h, w) - (n * b * c * w * h * 0.75) + true_data = np.arange(0, n * b * h * w * c, dtype="float64").reshape(n * b, c, h, w) - (n * b * c * w * h * 0.75) mean = true_data.transpose((0, 2, 3, 1)).reshape(-1, c).mean(axis=0) std = true_data.transpose((0, 2, 3, 1)).reshape(-1, c).std(axis=0) @@ -186,15 +186,15 @@ def compute_mean_std(engine, batch): return {"mean": _mean, "mean^2": _mean2} compute_engine = Engine(compute_mean_std) - img_mean = Average(output_transform=lambda output: output['mean']) - img_mean2 = Average(output_transform=lambda output: output['mean^2']) - img_mean.attach(compute_engine, 'mean') - img_mean2.attach(compute_engine, 'mean2') + img_mean = Average(output_transform=lambda output: output["mean"]) + img_mean2 = Average(output_transform=lambda output: output["mean^2"]) + img_mean.attach(compute_engine, "mean") + img_mean2.attach(compute_engine, "mean2") state = compute_engine.run(train_loader) - state.metrics['std'] = torch.sqrt(state.metrics['mean2'] - state.metrics['mean'] ** 2) + state.metrics["std"] = torch.sqrt(state.metrics["mean2"] - state.metrics["mean"] ** 2) - np.testing.assert_almost_equal(state.metrics['mean'].numpy(), mean, decimal=7) - np.testing.assert_almost_equal(state.metrics['std'].numpy(), std, decimal=5) + np.testing.assert_almost_equal(state.metrics["mean"].numpy(), mean, decimal=7) + np.testing.assert_almost_equal(state.metrics["std"].numpy(), std, decimal=5) def _test_distrib_variable_accumulation(device): @@ -225,14 +225,10 @@ def _test_distrib_variable_accumulation(device): dist.all_reduce(y_true) a, n = mean_var.compute() assert n == len(y_true) * dist.get_world_size() - np.testing.assert_almost_equal(a.cpu().numpy(), - y_true.sum(dim=0).cpu().numpy(), - decimal=5) + np.testing.assert_almost_equal(a.cpu().numpy(), y_true.sum(dim=0).cpu().numpy(), decimal=5) a, n = mean_var.compute() assert n == len(y_true) * dist.get_world_size() - np.testing.assert_almost_equal(a.cpu().numpy(), - y_true.sum(dim=0).cpu().numpy(), - decimal=5) + np.testing.assert_almost_equal(a.cpu().numpy(), y_true.sum(dim=0).cpu().numpy(), decimal=5) def _test_distrib_average(device): @@ -244,7 +240,7 @@ def _test_distrib_average(device): v.compute() mean_var = Average(device=device) - y_true = torch.rand(100, dtype=torch.float64) + torch.randint(0, 10, size=(100, )).double() + y_true = torch.rand(100, dtype=torch.float64) + torch.randint(0, 10, size=(100,)).double() y_true = y_true.to(device) for y in y_true: @@ -265,9 +261,7 @@ def _test_distrib_average(device): m = mean_var.compute() dist.all_reduce(y_true) - np.testing.assert_almost_equal(m.cpu().numpy(), - y_true.mean(dim=0).cpu().numpy() / dist.get_world_size(), - decimal=5) + np.testing.assert_almost_equal(m.cpu().numpy(), y_true.mean(dim=0).cpu().numpy() / dist.get_world_size(), decimal=5) def _test_distrib_geom_average(device): @@ -300,9 +294,9 @@ def _test_distrib_geom_average(device): m = mean_var.compute() log_y_true = torch.log(y_true) dist.all_reduce(log_y_true) - np.testing.assert_almost_equal(m.cpu().numpy(), - torch.exp(log_y_true.mean(dim=0) / dist.get_world_size()).cpu().numpy(), - decimal=5) + np.testing.assert_almost_equal( + m.cpu().numpy(), torch.exp(log_y_true.mean(dim=0) / dist.get_world_size()).cpu().numpy(), decimal=5 + ) def _test_distrib_integration(device): @@ -320,14 +314,13 @@ def update_fn(engine, batch): engine = Engine(update_fn) - custom_var_mean = metric_cls(output_transform=lambda output: output[1], - device=device) - custom_var_mean.attach(engine, 'agg_custom_var') + custom_var_mean = metric_cls(output_transform=lambda output: output[1], device=device) + custom_var_mean.attach(engine, "agg_custom_var") state = engine.run([0] * size) - np.testing.assert_almost_equal(state.metrics['agg_custom_var'].cpu().numpy(), - true_result_fn(custom_variable), - decimal=5) + np.testing.assert_almost_equal( + state.metrics["agg_custom_var"].cpu().numpy(), true_result_fn(custom_variable), decimal=5 + ) size = 100 custom_variable = 10.0 + 5.0 * torch.rand(size, dtype=torch.float64) @@ -338,12 +331,11 @@ def update_fn(engine, batch): engine = Engine(update_fn) - custom_var_mean = metric_cls(output_transform=lambda output: output[1], - device=device) - custom_var_mean.attach(engine, 'agg_custom_var') + custom_var_mean = metric_cls(output_transform=lambda output: output[1], device=device) + custom_var_mean.attach(engine, "agg_custom_var") state = engine.run([0] * size) - assert state.metrics['agg_custom_var'] == pytest.approx(true_result_fn(custom_variable)) + assert state.metrics["agg_custom_var"] == pytest.approx(true_result_fn(custom_variable)) def _mean(y_true): dist.all_reduce(y_true) @@ -363,7 +355,7 @@ def _geom_mean(y_true): @pytest.mark.skipif(torch.cuda.device_count() < 1, reason="Skip if no GPU") def test_distrib_gpu(distributed_context_single_node_nccl): - device = "cuda:{}".format(distributed_context_single_node_nccl['local_rank']) + device = "cuda:{}".format(distributed_context_single_node_nccl["local_rank"]) _test_distrib_variable_accumulation(device) _test_distrib_average(device) _test_distrib_geom_average(device) @@ -381,7 +373,7 @@ def test_distrib_cpu(distributed_context_single_node_gloo): @pytest.mark.multinode_distributed -@pytest.mark.skipif('MULTINODE_DISTRIB' not in os.environ, reason="Skip if not multi-node distributed") +@pytest.mark.skipif("MULTINODE_DISTRIB" not in os.environ, reason="Skip if not multi-node distributed") def test_multinode_distrib_cpu(distributed_context_multi_node_gloo): device = "cpu" _test_distrib_variable_accumulation(device) @@ -391,9 +383,9 @@ def test_multinode_distrib_cpu(distributed_context_multi_node_gloo): @pytest.mark.multinode_distributed -@pytest.mark.skipif('GPU_MULTINODE_DISTRIB' not in os.environ, reason="Skip if not multi-node distributed") +@pytest.mark.skipif("GPU_MULTINODE_DISTRIB" not in os.environ, reason="Skip if not multi-node distributed") def test_multinode_distrib_gpu(distributed_context_multi_node_nccl): - device = "cuda:{}".format(distributed_context_multi_node_nccl['local_rank']) + device = "cuda:{}".format(distributed_context_multi_node_nccl["local_rank"]) _test_distrib_variable_accumulation(device) _test_distrib_average(device) _test_distrib_geom_average(device) diff --git a/tests/ignite/metrics/test_accuracy.py b/tests/ignite/metrics/test_accuracy.py index 244797f0758e..5622ce7cf12a 100644 --- a/tests/ignite/metrics/test_accuracy.py +++ b/tests/ignite/metrics/test_accuracy.py @@ -19,16 +19,13 @@ def test__check_shape(): acc = Accuracy() with pytest.raises(ValueError): - acc._check_shape((torch.randint(0, 2, size=(10, 1, 5, 12)).long(), - torch.randint(0, 2, size=(10, 5, 6)).long())) + acc._check_shape((torch.randint(0, 2, size=(10, 1, 5, 12)).long(), torch.randint(0, 2, size=(10, 5, 6)).long())) with pytest.raises(ValueError): - acc._check_shape((torch.randint(0, 2, size=(10, 1, 6)).long(), - torch.randint(0, 2, size=(10, 5, 6)).long())) + acc._check_shape((torch.randint(0, 2, size=(10, 1, 6)).long(), torch.randint(0, 2, size=(10, 5, 6)).long())) with pytest.raises(ValueError): - acc._check_shape((torch.randint(0, 2, size=(10, 1)).long(), - torch.randint(0, 2, size=(10, 5)).long())) + acc._check_shape((torch.randint(0, 2, size=(10, 1)).long(), torch.randint(0, 2, size=(10, 5)).long())) def test_binary_wrong_inputs(): @@ -36,28 +33,23 @@ def test_binary_wrong_inputs(): with pytest.raises(ValueError): # y has not only 0 or 1 values - acc.update((torch.randint(0, 2, size=(10,)).long(), - torch.arange(0, 10).long())) + acc.update((torch.randint(0, 2, size=(10,)).long(), torch.arange(0, 10).long())) with pytest.raises(ValueError): # y_pred values are not thresholded to 0, 1 values - acc.update((torch.rand(10,), - torch.randint(0, 2, size=(10,)).long())) + acc.update((torch.rand(10,), torch.randint(0, 2, size=(10,)).long())) with pytest.raises(ValueError): # incompatible shapes - acc.update((torch.randint(0, 2, size=(10,)).long(), - torch.randint(0, 2, size=(10, 5)).long())) + acc.update((torch.randint(0, 2, size=(10,)).long(), torch.randint(0, 2, size=(10, 5)).long())) with pytest.raises(ValueError): # incompatible shapes - acc.update((torch.randint(0, 2, size=(10, 5, 6)).long(), - torch.randint(0, 2, size=(10,)).long())) + acc.update((torch.randint(0, 2, size=(10, 5, 6)).long(), torch.randint(0, 2, size=(10,)).long())) with pytest.raises(ValueError): # incompatible shapes - acc.update((torch.randint(0, 2, size=(10,)).long(), - torch.randint(0, 2, size=(10, 5, 6)).long())) + acc.update((torch.randint(0, 2, size=(10,)).long(), torch.randint(0, 2, size=(10, 5, 6)).long())) def test_binary_input_N(): @@ -70,7 +62,7 @@ def _test(): acc.update((y_pred, y)) np_y = y.numpy().ravel() np_y_pred = y_pred.numpy().ravel() - assert acc._type == 'binary' + assert acc._type == "binary" assert isinstance(acc.compute(), float) assert accuracy_score(np_y, np_y_pred) == pytest.approx(acc.compute()) @@ -84,11 +76,11 @@ def _test(): for i in range(n_iters): idx = i * batch_size - acc.update((y_pred[idx: idx + batch_size], y[idx: idx + batch_size])) + acc.update((y_pred[idx : idx + batch_size], y[idx : idx + batch_size])) np_y = y.numpy().ravel() np_y_pred = y_pred.numpy().ravel() - assert acc._type == 'binary' + assert acc._type == "binary" assert isinstance(acc.compute(), float) assert accuracy_score(np_y, np_y_pred) == pytest.approx(acc.compute()) @@ -107,7 +99,7 @@ def _test(): acc.update((y_pred, y)) np_y = y.numpy().ravel() np_y_pred = y_pred.numpy().ravel() - assert acc._type == 'binary' + assert acc._type == "binary" assert isinstance(acc.compute(), float) assert accuracy_score(np_y, np_y_pred) == pytest.approx(acc.compute()) @@ -117,7 +109,7 @@ def _test(): acc.update((y_pred, y)) np_y = y.numpy().ravel() np_y_pred = y_pred.numpy().ravel() - assert acc._type == 'binary' + assert acc._type == "binary" assert isinstance(acc.compute(), float) assert accuracy_score(np_y, np_y_pred) == pytest.approx(acc.compute()) @@ -131,11 +123,11 @@ def _test(): for i in range(n_iters): idx = i * batch_size - acc.update((y_pred[idx: idx + batch_size], y[idx: idx + batch_size])) + acc.update((y_pred[idx : idx + batch_size], y[idx : idx + batch_size])) np_y = y.numpy().ravel() np_y_pred = y_pred.numpy().ravel() - assert acc._type == 'binary' + assert acc._type == "binary" assert isinstance(acc.compute(), float) assert accuracy_score(np_y, np_y_pred) == pytest.approx(acc.compute()) @@ -154,7 +146,7 @@ def _test(): acc.update((y_pred, y)) np_y = y.numpy().ravel() np_y_pred = y_pred.numpy().ravel() - assert acc._type == 'binary' + assert acc._type == "binary" assert isinstance(acc.compute(), float) assert accuracy_score(np_y, np_y_pred) == pytest.approx(acc.compute()) @@ -164,7 +156,7 @@ def _test(): acc.update((y_pred, y)) np_y = y.numpy().ravel() np_y_pred = y_pred.numpy().ravel() - assert acc._type == 'binary' + assert acc._type == "binary" assert isinstance(acc.compute(), float) assert accuracy_score(np_y, np_y_pred) == pytest.approx(acc.compute()) @@ -178,11 +170,11 @@ def _test(): for i in range(n_iters): idx = i * batch_size - acc.update((y_pred[idx: idx + batch_size], y[idx: idx + batch_size])) + acc.update((y_pred[idx : idx + batch_size], y[idx : idx + batch_size])) np_y = y.numpy().ravel() np_y_pred = y_pred.numpy().ravel() - assert acc._type == 'binary' + assert acc._type == "binary" assert isinstance(acc.compute(), float) assert accuracy_score(np_y, np_y_pred) == pytest.approx(acc.compute()) @@ -201,7 +193,7 @@ def _test(): acc.update((y_pred, y)) np_y = y.numpy().ravel() np_y_pred = y_pred.numpy().ravel() - assert acc._type == 'binary' + assert acc._type == "binary" assert isinstance(acc.compute(), float) assert accuracy_score(np_y, np_y_pred) == pytest.approx(acc.compute()) @@ -211,7 +203,7 @@ def _test(): acc.update((y_pred, y)) np_y = y.numpy().ravel() np_y_pred = y_pred.numpy().ravel() - assert acc._type == 'binary' + assert acc._type == "binary" assert isinstance(acc.compute(), float) assert accuracy_score(np_y, np_y_pred) == pytest.approx(acc.compute()) @@ -225,11 +217,11 @@ def _test(): for i in range(n_iters): idx = i * batch_size - acc.update((y_pred[idx: idx + batch_size], y[idx: idx + batch_size])) + acc.update((y_pred[idx : idx + batch_size], y[idx : idx + batch_size])) np_y = y.numpy().ravel() np_y_pred = y_pred.numpy().ravel() - assert acc._type == 'binary' + assert acc._type == "binary" assert isinstance(acc.compute(), float) assert accuracy_score(np_y, np_y_pred) == pytest.approx(acc.compute()) @@ -243,18 +235,15 @@ def test_multiclass_wrong_inputs(): with pytest.raises(ValueError): # incompatible shapes - acc.update((torch.rand(10, 5, 4), - torch.randint(0, 2, size=(10,)).long())) + acc.update((torch.rand(10, 5, 4), torch.randint(0, 2, size=(10,)).long())) with pytest.raises(ValueError): # incompatible shapes - acc.update((torch.rand(10, 5, 6), - torch.randint(0, 5, size=(10, 5)).long())) + acc.update((torch.rand(10, 5, 6), torch.randint(0, 5, size=(10, 5)).long())) with pytest.raises(ValueError): # incompatible shapes - acc.update((torch.rand(10), - torch.randint(0, 5, size=(10, 5, 6)).long())) + acc.update((torch.rand(10), torch.randint(0, 5, size=(10, 5, 6)).long())) def test_multiclass_input_N(): @@ -267,7 +256,7 @@ def _test(): acc.update((y_pred, y)) np_y_pred = y_pred.numpy().argmax(axis=1).ravel() np_y = y.numpy().ravel() - assert acc._type == 'multiclass' + assert acc._type == "multiclass" assert isinstance(acc.compute(), float) assert accuracy_score(np_y, np_y_pred) == pytest.approx(acc.compute()) @@ -277,7 +266,7 @@ def _test(): acc.update((y_pred, y)) np_y_pred = y_pred.numpy().argmax(axis=1).ravel() np_y = y.numpy().ravel() - assert acc._type == 'multiclass' + assert acc._type == "multiclass" assert isinstance(acc.compute(), float) assert accuracy_score(np_y, np_y_pred) == pytest.approx(acc.compute()) @@ -287,7 +276,7 @@ def _test(): acc.update((y_pred, y)) np_y_pred = y_pred.numpy().argmax(axis=1).ravel() np_y = y.numpy().ravel() - assert acc._type == 'multiclass' + assert acc._type == "multiclass" assert isinstance(acc.compute(), float) assert accuracy_score(np_y, np_y_pred) == pytest.approx(acc.compute()) @@ -297,7 +286,7 @@ def _test(): acc.update((y_pred, y)) np_y_pred = y_pred.numpy().argmax(axis=1).ravel() np_y = y.numpy().ravel() - assert acc._type == 'multiclass' + assert acc._type == "multiclass" assert isinstance(acc.compute(), float) assert accuracy_score(np_y, np_y_pred) == pytest.approx(acc.compute()) @@ -308,7 +297,7 @@ def _test(): acc.update((y_pred, y)) np_y_pred = y_pred.numpy().argmax(axis=1).ravel() np_y = y.numpy().ravel() - assert acc._type == 'multiclass' + assert acc._type == "multiclass" assert isinstance(acc.compute(), float) assert accuracy_score(np_y, np_y_pred) == pytest.approx(acc.compute()) @@ -322,11 +311,11 @@ def _test(): for i in range(n_iters): idx = i * batch_size - acc.update((y_pred[idx: idx + batch_size], y[idx: idx + batch_size])) + acc.update((y_pred[idx : idx + batch_size], y[idx : idx + batch_size])) np_y = y.numpy().ravel() np_y_pred = y_pred.numpy().argmax(axis=1).ravel() - assert acc._type == 'multiclass' + assert acc._type == "multiclass" assert isinstance(acc.compute(), float) assert accuracy_score(np_y, np_y_pred) == pytest.approx(acc.compute()) @@ -345,7 +334,7 @@ def _test(): acc.update((y_pred, y)) np_y_pred = y_pred.numpy().argmax(axis=1).ravel() np_y = y.numpy().ravel() - assert acc._type == 'multiclass' + assert acc._type == "multiclass" assert isinstance(acc.compute(), float) assert accuracy_score(np_y, np_y_pred) == pytest.approx(acc.compute()) @@ -355,7 +344,7 @@ def _test(): acc.update((y_pred, y)) np_y_pred = y_pred.numpy().argmax(axis=1).ravel() np_y = y.numpy().ravel() - assert acc._type == 'multiclass' + assert acc._type == "multiclass" assert isinstance(acc.compute(), float) assert accuracy_score(np_y, np_y_pred) == pytest.approx(acc.compute()) @@ -369,11 +358,11 @@ def _test(): for i in range(n_iters): idx = i * batch_size - acc.update((y_pred[idx: idx + batch_size], y[idx: idx + batch_size])) + acc.update((y_pred[idx : idx + batch_size], y[idx : idx + batch_size])) np_y = y.numpy().ravel() np_y_pred = y_pred.numpy().argmax(axis=1).ravel() - assert acc._type == 'multiclass' + assert acc._type == "multiclass" assert isinstance(acc.compute(), float) assert accuracy_score(np_y, np_y_pred) == pytest.approx(acc.compute()) @@ -392,7 +381,7 @@ def _test(): acc.update((y_pred, y)) np_y_pred = y_pred.numpy().argmax(axis=1).ravel() np_y = y.numpy().ravel() - assert acc._type == 'multiclass' + assert acc._type == "multiclass" assert isinstance(acc.compute(), float) assert accuracy_score(np_y, np_y_pred) == pytest.approx(acc.compute()) @@ -402,7 +391,7 @@ def _test(): acc.update((y_pred, y)) np_y_pred = y_pred.numpy().argmax(axis=1).ravel() np_y = y.numpy().ravel() - assert acc._type == 'multiclass' + assert acc._type == "multiclass" assert isinstance(acc.compute(), float) assert accuracy_score(np_y, np_y_pred) == pytest.approx(acc.compute()) @@ -416,11 +405,11 @@ def _test(): for i in range(n_iters): idx = i * batch_size - acc.update((y_pred[idx: idx + batch_size], y[idx: idx + batch_size])) + acc.update((y_pred[idx : idx + batch_size], y[idx : idx + batch_size])) np_y = y.numpy().ravel() np_y_pred = y_pred.numpy().argmax(axis=1).ravel() - assert acc._type == 'multiclass' + assert acc._type == "multiclass" assert isinstance(acc.compute(), float) assert accuracy_score(np_y, np_y_pred) == pytest.approx(acc.compute()) @@ -463,7 +452,7 @@ def _test(): acc.update((y_pred, y)) np_y_pred = y_pred.numpy() np_y = y.numpy() - assert acc._type == 'multilabel' + assert acc._type == "multilabel" assert isinstance(acc.compute(), float) assert accuracy_score(np_y, np_y_pred) == pytest.approx(acc.compute()) @@ -473,7 +462,7 @@ def _test(): acc.update((y_pred, y)) np_y_pred = y_pred.numpy() np_y = y.numpy() - assert acc._type == 'multilabel' + assert acc._type == "multilabel" assert isinstance(acc.compute(), float) assert accuracy_score(np_y, np_y_pred) == pytest.approx(acc.compute()) @@ -487,11 +476,11 @@ def _test(): for i in range(n_iters): idx = i * batch_size - acc.update((y_pred[idx: idx + batch_size], y[idx: idx + batch_size])) + acc.update((y_pred[idx : idx + batch_size], y[idx : idx + batch_size])) np_y = y.numpy() np_y_pred = y_pred.numpy() - assert acc._type == 'multilabel' + assert acc._type == "multilabel" assert isinstance(acc.compute(), float) assert accuracy_score(np_y, np_y_pred) == pytest.approx(acc.compute()) @@ -511,7 +500,7 @@ def _test(): acc.update((y_pred, y)) np_y_pred = to_numpy_multilabel(y_pred) # (N, C, L, ...) -> (N * L * ..., C) np_y = to_numpy_multilabel(y) # (N, C, L, ...) -> (N * L ..., C) - assert acc._type == 'multilabel' + assert acc._type == "multilabel" assert isinstance(acc.compute(), float) assert accuracy_score(np_y, np_y_pred) == pytest.approx(acc.compute()) @@ -521,7 +510,7 @@ def _test(): acc.update((y_pred, y)) np_y_pred = to_numpy_multilabel(y_pred) # (N, C, L, ...) -> (N * L * ..., C) np_y = to_numpy_multilabel(y) # (N, C, L, ...) -> (N * L ..., C) - assert acc._type == 'multilabel' + assert acc._type == "multilabel" assert isinstance(acc.compute(), float) assert accuracy_score(np_y, np_y_pred) == pytest.approx(acc.compute()) @@ -535,11 +524,11 @@ def _test(): for i in range(n_iters): idx = i * batch_size - acc.update((y_pred[idx: idx + batch_size], y[idx: idx + batch_size])) + acc.update((y_pred[idx : idx + batch_size], y[idx : idx + batch_size])) np_y_pred = to_numpy_multilabel(y_pred) # (N, C, L, ...) -> (N * L * ..., C) np_y = to_numpy_multilabel(y) # (N, C, L, ...) -> (N * L ..., C) - assert acc._type == 'multilabel' + assert acc._type == "multilabel" assert isinstance(acc.compute(), float) assert accuracy_score(np_y, np_y_pred) == pytest.approx(acc.compute()) @@ -559,7 +548,7 @@ def _test(): acc.update((y_pred, y)) np_y_pred = to_numpy_multilabel(y_pred) # (N, C, H, W, ...) -> (N * H * W ..., C) np_y = to_numpy_multilabel(y) # (N, C, H, W, ...) -> (N * H * W ..., C) - assert acc._type == 'multilabel' + assert acc._type == "multilabel" assert isinstance(acc.compute(), float) assert accuracy_score(np_y, np_y_pred) == pytest.approx(acc.compute()) @@ -569,7 +558,7 @@ def _test(): acc.update((y_pred, y)) np_y_pred = to_numpy_multilabel(y_pred) # (N, C, H, W, ...) -> (N * H * W ..., C) np_y = to_numpy_multilabel(y) # (N, C, H, W, ...) -> (N * H * W ..., C) - assert acc._type == 'multilabel' + assert acc._type == "multilabel" assert isinstance(acc.compute(), float) assert accuracy_score(np_y, np_y_pred) == pytest.approx(acc.compute()) @@ -583,11 +572,11 @@ def _test(): for i in range(n_iters): idx = i * batch_size - acc.update((y_pred[idx: idx + batch_size], y[idx: idx + batch_size])) + acc.update((y_pred[idx : idx + batch_size], y[idx : idx + batch_size])) np_y_pred = to_numpy_multilabel(y_pred) # (N, C, L, ...) -> (N * L * ..., C) np_y = to_numpy_multilabel(y) # (N, C, L, ...) -> (N * L ..., C) - assert acc._type == 'multilabel' + assert acc._type == "multilabel" assert isinstance(acc.compute(), float) assert accuracy_score(np_y, np_y_pred) == pytest.approx(acc.compute()) @@ -616,6 +605,7 @@ def _test_distrib_multilabel_input_NHW(device): # Multilabel input data of shape (N, C, H, W, ...) and (N, C, H, W, ...) import torch.distributed as dist + rank = dist.get_rank() def _gather(y): @@ -638,7 +628,7 @@ def _test(): np_y_pred = to_numpy_multilabel(y_pred.cpu()) # (N, C, H, W, ...) -> (N * H * W ..., C) np_y = to_numpy_multilabel(y.cpu()) # (N, C, H, W, ...) -> (N * H * W ..., C) - assert acc._type == 'multilabel' + assert acc._type == "multilabel" n = acc._num_examples res = acc.compute() assert n * dist.get_world_size() == acc._num_examples @@ -658,7 +648,7 @@ def _test(): np_y_pred = to_numpy_multilabel(y_pred.cpu()) # (N, C, H, W, ...) -> (N * H * W ..., C) np_y = to_numpy_multilabel(y.cpu()) # (N, C, H, W, ...) -> (N * H * W ..., C) - assert acc._type == 'multilabel' + assert acc._type == "multilabel" n = acc._num_examples res = acc.compute() assert n * dist.get_world_size() == acc._num_examples @@ -681,7 +671,7 @@ def _test(): for i in range(n_iters): idx = i * batch_size - acc.update((y_pred[idx: idx + batch_size], y[idx: idx + batch_size])) + acc.update((y_pred[idx : idx + batch_size], y[idx : idx + batch_size])) # gather y_pred, y y_pred = _gather(y_pred) @@ -690,7 +680,7 @@ def _test(): np_y_pred = to_numpy_multilabel(y_pred.cpu()) # (N, C, L, ...) -> (N * L * ..., C) np_y = to_numpy_multilabel(y.cpu()) # (N, C, L, ...) -> (N * L ..., C) - assert acc._type == 'multilabel' + assert acc._type == "multilabel" n = acc._num_examples res = acc.compute() assert n * dist.get_world_size() == acc._num_examples @@ -716,12 +706,14 @@ def _test(n_epochs): n_classes = 10 offset = n_iters * s - y_true = torch.randint(0, n_classes, size=(offset * dist.get_world_size(), )).to(device) + y_true = torch.randint(0, n_classes, size=(offset * dist.get_world_size(),)).to(device) y_preds = torch.rand(offset * dist.get_world_size(), n_classes).to(device) def update(engine, i): - return y_preds[i * s + rank * offset:(i + 1) * s + rank * offset, :], \ - y_true[i * s + rank * offset:(i + 1) * s + rank * offset] + return ( + y_preds[i * s + rank * offset : (i + 1) * s + rank * offset, :], + y_true[i * s + rank * offset : (i + 1) * s + rank * offset], + ) engine = Engine(update) @@ -732,7 +724,7 @@ def update(engine, i): engine.run(data=data, max_epochs=n_epochs) assert "acc" in engine.state.metrics - res = engine.state.metrics['acc'] + res = engine.state.metrics["acc"] if isinstance(res, torch.Tensor): res = res.cpu().numpy() @@ -763,8 +755,10 @@ def _test(n_epochs): y_preds = torch.randint(0, 2, size=(offset * dist.get_world_size(), n_classes, 8, 10)).to(device) def update(engine, i): - return y_preds[i * s + rank * offset:(i + 1) * s + rank * offset, ...], \ - y_true[i * s + rank * offset:(i + 1) * s + rank * offset, ...] + return ( + y_preds[i * s + rank * offset : (i + 1) * s + rank * offset, ...], + y_true[i * s + rank * offset : (i + 1) * s + rank * offset, ...], + ) engine = Engine(update) @@ -775,12 +769,11 @@ def update(engine, i): engine.run(data=data, max_epochs=n_epochs) assert "acc" in engine.state.metrics - res = engine.state.metrics['acc'] + res = engine.state.metrics["acc"] if isinstance(res, torch.Tensor): res = res.cpu().numpy() - true_res = accuracy_score(to_numpy_multilabel(y_true), - to_numpy_multilabel(y_preds)) + true_res = accuracy_score(to_numpy_multilabel(y_true), to_numpy_multilabel(y_preds)) assert pytest.approx(res) == true_res @@ -792,7 +785,7 @@ def update(engine, i): @pytest.mark.distributed @pytest.mark.skipif(torch.cuda.device_count() < 1, reason="Skip if no GPU") def test_distrib_gpu(distributed_context_single_node_nccl): - device = "cuda:{}".format(distributed_context_single_node_nccl['local_rank']) + device = "cuda:{}".format(distributed_context_single_node_nccl["local_rank"]) _test_distrib_multilabel_input_NHW(device) _test_distrib_itegration_multiclass(device) _test_distrib_itegration_multilabel(device) @@ -808,7 +801,7 @@ def test_distrib_cpu(distributed_context_single_node_gloo): @pytest.mark.multinode_distributed -@pytest.mark.skipif('MULTINODE_DISTRIB' not in os.environ, reason="Skip if not multi-node distributed") +@pytest.mark.skipif("MULTINODE_DISTRIB" not in os.environ, reason="Skip if not multi-node distributed") def test_multinode_distrib_cpu(distributed_context_multi_node_gloo): device = "cpu" _test_distrib_multilabel_input_NHW(device) @@ -817,9 +810,9 @@ def test_multinode_distrib_cpu(distributed_context_multi_node_gloo): @pytest.mark.multinode_distributed -@pytest.mark.skipif('GPU_MULTINODE_DISTRIB' not in os.environ, reason="Skip if not multi-node distributed") +@pytest.mark.skipif("GPU_MULTINODE_DISTRIB" not in os.environ, reason="Skip if not multi-node distributed") def test_multinode_distrib_gpu(distributed_context_multi_node_nccl): - device = "cuda:{}".format(distributed_context_multi_node_nccl['local_rank']) + device = "cuda:{}".format(distributed_context_multi_node_nccl["local_rank"]) _test_distrib_multilabel_input_NHW(device) _test_distrib_itegration_multiclass(device) _test_distrib_itegration_multilabel(device) diff --git a/tests/ignite/metrics/test_confusion_matrix.py b/tests/ignite/metrics/test_confusion_matrix.py index 21e5d8c1d564..e01167f8a946 100644 --- a/tests/ignite/metrics/test_confusion_matrix.py +++ b/tests/ignite/metrics/test_confusion_matrix.py @@ -23,21 +23,18 @@ def test_multiclass_wrong_inputs(): cm = ConfusionMatrix(10) with pytest.raises(ValueError, match=r"y_pred must have shape \(batch_size, num_categories, ...\)"): - cm.update((torch.rand(10), - torch.randint(0, 2, size=(10,)).long())) + cm.update((torch.rand(10), torch.randint(0, 2, size=(10,)).long())) with pytest.raises(ValueError, match=r"y_pred does not have correct number of categories:"): - cm.update((torch.rand(10, 5, 4), - torch.randint(0, 2, size=(10,)).long())) + cm.update((torch.rand(10, 5, 4), torch.randint(0, 2, size=(10,)).long())) - with pytest.raises(ValueError, match=r"y_pred must have shape \(batch_size, num_categories, ...\) " - r"and y must have "): - cm.update((torch.rand(4, 10, 12, 12), - torch.randint(0, 10, size=(10, )).long())) + with pytest.raises( + ValueError, match=r"y_pred must have shape \(batch_size, num_categories, ...\) " r"and y must have " + ): + cm.update((torch.rand(4, 10, 12, 12), torch.randint(0, 10, size=(10,)).long())) with pytest.raises(ValueError, match=r"y and y_pred must have compatible shapes."): - cm.update((torch.rand(4, 10, 12, 14), - torch.randint(0, 10, size=(4, 5, 6)).long())) + cm.update((torch.rand(4, 10, 12, 14), torch.randint(0, 10, size=(4, 5, 6)).long())) with pytest.raises(ValueError, match=r"Argument average can None or one of"): ConfusionMatrix(num_classes=10, average="abc") @@ -58,7 +55,7 @@ def _test_N(): num_classes = 10 cm = ConfusionMatrix(num_classes=num_classes) y_pred = torch.rand(4, num_classes) - y = torch.randint(0, num_classes, size=(4, )).long() + y = torch.randint(0, num_classes, size=(4,)).long() cm.update((y_pred, y)) np_y_pred = y_pred.numpy().argmax(axis=1).ravel() np_y = y.numpy().ravel() @@ -86,7 +83,7 @@ def _test_N(): for i in range(n_iters): idx = i * batch_size - cm.update((y_pred[idx: idx + batch_size], y[idx: idx + batch_size])) + cm.update((y_pred[idx : idx + batch_size], y[idx : idx + batch_size])) np_y = y.numpy().ravel() np_y_pred = y_pred.numpy().argmax(axis=1).ravel() @@ -131,7 +128,7 @@ def _test_NL(): for i in range(n_iters): idx = i * batch_size - cm.update((y_pred[idx: idx + batch_size], y[idx: idx + batch_size])) + cm.update((y_pred[idx : idx + batch_size], y[idx : idx + batch_size])) np_y = y.numpy().ravel() np_y_pred = y_pred.numpy().argmax(axis=1).ravel() @@ -175,7 +172,7 @@ def _test_NHW(): for i in range(n_iters): idx = i * batch_size - cm.update((y_pred[idx: idx + batch_size], y[idx: idx + batch_size])) + cm.update((y_pred[idx : idx + batch_size], y[idx : idx + batch_size])) np_y = y.numpy().ravel() np_y_pred = y_pred.numpy().argmax(axis=1).ravel() @@ -216,7 +213,7 @@ def compute_th_y_true_y_logits(y_true, y_pred): th_y_true = torch.from_numpy(y_true).unsqueeze(0) # Create logits torch.tensor: num_classes = max(np.max(y_true), np.max(y_pred)) + 1 - y_probas = np.ones((num_classes, ) + y_true.shape) * -10 + y_probas = np.ones((num_classes,) + y_true.shape) * -10 for i in range(num_classes): y_probas[i, (y_pred == i)] = 720 th_y_logits = torch.from_numpy(y_probas).unsqueeze(0) @@ -327,7 +324,7 @@ def test_iou(): output = (th_y_logits, th_y_true) cm.update(output) res = iou_metric.compute().numpy() - true_res_ = true_res[:ignore_index] + true_res[ignore_index + 1:] + true_res_ = true_res[:ignore_index] + true_res[ignore_index + 1 :] assert np.all(res == true_res_), "{}: {} vs {}".format(ignore_index, res, true_res_) @@ -364,7 +361,7 @@ def test_miou(): output = (th_y_logits, th_y_true) cm.update(output) res = iou_metric.compute().numpy() - true_res_ = np.mean(true_res[:ignore_index] + true_res[ignore_index + 1:]) + true_res_ = np.mean(true_res[:ignore_index] + true_res[ignore_index + 1 :]) assert res == true_res_, "{}: {} vs {}".format(ignore_index, res, true_res_) @@ -392,7 +389,7 @@ def test_cm_precision(): y_true, y_pred = np.random.randint(0, 10, size=(1000,)), np.random.randint(0, 10, size=(1000,)) th_y_true, th_y_logits = compute_th_y_true_y_logits(y_true, y_pred) - true_pr = precision_score(y_true.reshape(-1), y_pred.reshape(-1), average='macro') + true_pr = precision_score(y_true.reshape(-1), y_pred.reshape(-1), average="macro") cm = ConfusionMatrix(num_classes=10) pr_metric = cmPrecision(cm, average=True) @@ -423,7 +420,7 @@ def test_cm_recall(): y_true, y_pred = np.random.randint(0, 10, size=(1000,)), np.random.randint(0, 10, size=(1000,)) th_y_true, th_y_logits = compute_th_y_true_y_logits(y_true, y_pred) - true_re = recall_score(y_true.reshape(-1), y_pred.reshape(-1), average='macro') + true_re = recall_score(y_true.reshape(-1), y_pred.reshape(-1), average="macro") cm = ConfusionMatrix(num_classes=10) re_metric = cmRecall(cm, average=True) @@ -456,19 +453,19 @@ def test_cm_with_average(): np_y_pred = y_pred.numpy().argmax(axis=1).ravel() np_y = y.numpy().ravel() - cm = ConfusionMatrix(num_classes=num_classes, average='samples') + cm = ConfusionMatrix(num_classes=num_classes, average="samples") cm.update((y_pred, y)) true_res = confusion_matrix(np_y, np_y_pred, labels=list(range(num_classes))) * 1.0 / len(np_y) res = cm.compute().numpy() np.testing.assert_almost_equal(true_res, res) - cm = ConfusionMatrix(num_classes=num_classes, average='recall') + cm = ConfusionMatrix(num_classes=num_classes, average="recall") cm.update((y_pred, y)) true_re = recall_score(np_y, np_y_pred, average=None, labels=list(range(num_classes))) res = cm.compute().numpy().diagonal() np.testing.assert_almost_equal(true_re, res) - cm = ConfusionMatrix(num_classes=num_classes, average='precision') + cm = ConfusionMatrix(num_classes=num_classes, average="precision") cm.update((y_pred, y)) true_pr = precision_score(np_y, np_y_pred, average=None, labels=list(range(num_classes))) res = cm.compute().numpy().diagonal() @@ -526,7 +523,7 @@ def test_dice_coefficient(): output = (th_y_logits, th_y_true) cm.update(output) res = dice_metric.compute().numpy() - true_res_ = true_res[:ignore_index] + true_res[ignore_index + 1:] + true_res_ = true_res[:ignore_index] + true_res[ignore_index + 1 :] assert np.all(res == true_res_), "{}: {} vs {}".format(ignore_index, res, true_res_) @@ -619,14 +616,14 @@ def test_distrib_cpu(distributed_context_single_node_gloo): @pytest.mark.multinode_distributed -@pytest.mark.skipif('MULTINODE_DISTRIB' not in os.environ, reason="Skip if not multi-node distributed") +@pytest.mark.skipif("MULTINODE_DISTRIB" not in os.environ, reason="Skip if not multi-node distributed") def test_multinode_distrib_cpu(distributed_context_multi_node_gloo): device = "cpu" _test_distrib_multiclass_images(device) @pytest.mark.multinode_distributed -@pytest.mark.skipif('GPU_MULTINODE_DISTRIB' not in os.environ, reason="Skip if not multi-node distributed") +@pytest.mark.skipif("GPU_MULTINODE_DISTRIB" not in os.environ, reason="Skip if not multi-node distributed") def test_multinode_distrib_gpu(distributed_context_multi_node_nccl): - device = "cuda:{}".format(distributed_context_multi_node_nccl['local_rank']) + device = "cuda:{}".format(distributed_context_multi_node_nccl["local_rank"]) _test_distrib_multiclass_images(device) diff --git a/tests/ignite/metrics/test_epoch_metric.py b/tests/ignite/metrics/test_epoch_metric.py index 428629d9c8bb..da0275155632 100644 --- a/tests/ignite/metrics/test_epoch_metric.py +++ b/tests/ignite/metrics/test_epoch_metric.py @@ -44,7 +44,7 @@ def compute_fn(y_preds, y_targets): output2 = (torch.rand(4, 3), torch.randint(0, 2, size=(4, 3), dtype=torch.long)) em.update(output2) - assert em._predictions.device.type == 'cpu' and em._targets.device.type == 'cpu' + assert em._predictions.device.type == "cpu" and em._targets.device.type == "cpu" assert torch.equal(em._predictions[:4, :], output1[0]) assert torch.equal(em._predictions[4:, :], output2[0]) assert torch.equal(em._targets[:4, :], output1[1]) @@ -58,7 +58,7 @@ def compute_fn(y_preds, y_targets): output2 = (torch.rand(4, 1), torch.randint(0, 2, size=(4, 1), dtype=torch.long)) em.update(output2) - assert em._predictions.device.type == 'cpu' and em._targets.device.type == 'cpu' + assert em._predictions.device.type == "cpu" and em._targets.device.type == "cpu" assert torch.equal(em._predictions[:4], output1[0][:, 0]) assert torch.equal(em._predictions[4:], output2[0][:, 0]) assert torch.equal(em._targets[:4], output1[1][:, 0]) @@ -67,7 +67,6 @@ def compute_fn(y_preds, y_targets): def test_mse_epoch_metric(): - def compute_fn(y_preds, y_targets): return torch.mean(((y_preds - y_targets.type_as(y_preds)) ** 2)).item() @@ -103,7 +102,6 @@ def compute_fn(y_preds, y_targets): def test_bad_compute_fn(): - def compute_fn(y_preds, y_targets): # Following will raise the error: # The size of tensor a (3) must match the size of tensor b (4) @@ -119,7 +117,6 @@ def compute_fn(y_preds, y_targets): def _test_warning(): - def compute_fn(y_preds, y_targets): return 0.0 @@ -171,13 +168,13 @@ def _gather(y): @pytest.mark.multinode_distributed -@pytest.mark.skipif('MULTINODE_DISTRIB' not in os.environ, reason="Skip if not multi-node distributed") +@pytest.mark.skipif("MULTINODE_DISTRIB" not in os.environ, reason="Skip if not multi-node distributed") def test_multinode_distrib_cpu(distributed_context_multi_node_gloo): _test_warning() @pytest.mark.multinode_distributed -@pytest.mark.skipif('GPU_MULTINODE_DISTRIB' not in os.environ, reason="Skip if not multi-node distributed") +@pytest.mark.skipif("GPU_MULTINODE_DISTRIB" not in os.environ, reason="Skip if not multi-node distributed") def test_multinode_distrib_gpu(distributed_context_multi_node_nccl): _test_warning() diff --git a/tests/ignite/metrics/test_fbeta.py b/tests/ignite/metrics/test_fbeta.py index 1d3c9f9f14d4..71a75ab9cce3 100644 --- a/tests/ignite/metrics/test_fbeta.py +++ b/tests/ignite/metrics/test_fbeta.py @@ -36,7 +36,6 @@ def test_wrong_inputs(): def test_integration(): - def _test(p, r, average, output_transform): np.random.seed(1) @@ -60,10 +59,7 @@ def update_fn(engine, batch): y_true_batch = next(y_true_batch_values) y_pred_batch = next(y_pred_batch_values) if output_transform is not None: - return { - 'y_pred': torch.from_numpy(y_pred_batch), - 'y': torch.from_numpy(y_true_batch) - } + return {"y_pred": torch.from_numpy(y_pred_batch), "y": torch.from_numpy(y_true_batch)} return torch.from_numpy(y_pred_batch), torch.from_numpy(y_true_batch) evaluator = Engine(update_fn) @@ -74,18 +70,17 @@ def update_fn(engine, batch): data = list(range(n_iters)) state = evaluator.run(data, max_epochs=1) - f2_true = fbeta_score(y_true, np.argmax(y_pred, axis=-1), - average='macro' if average else None, beta=2.0) - if isinstance(state.metrics['f2'], torch.Tensor): - np.testing.assert_allclose(f2_true, state.metrics['f2'].numpy()) + f2_true = fbeta_score(y_true, np.argmax(y_pred, axis=-1), average="macro" if average else None, beta=2.0) + if isinstance(state.metrics["f2"], torch.Tensor): + np.testing.assert_allclose(f2_true, state.metrics["f2"].numpy()) else: - assert f2_true == pytest.approx(state.metrics['f2']), "{} vs {}".format(f2_true, state.metrics['f2']) + assert f2_true == pytest.approx(state.metrics["f2"]), "{} vs {}".format(f2_true, state.metrics["f2"]) _test(None, None, False, output_transform=None) _test(None, None, True, output_transform=None) def output_transform(output): - return output['y_pred'], output['y'] + return output["y_pred"], output["y"] _test(None, None, False, output_transform=output_transform) _test(None, None, True, output_transform=output_transform) @@ -107,12 +102,14 @@ def _test(p, r, average, n_epochs): n_classes = 7 offset = n_iters * s - y_true = torch.randint(0, n_classes, size=(offset * dist.get_world_size(), )).to(device) + y_true = torch.randint(0, n_classes, size=(offset * dist.get_world_size(),)).to(device) y_preds = torch.rand(offset * dist.get_world_size(), n_classes).to(device) def update(engine, i): - return y_preds[i * s + rank * offset:(i + 1) * s + rank * offset, :], \ - y_true[i * s + rank * offset:(i + 1) * s + rank * offset] + return ( + y_preds[i * s + rank * offset : (i + 1) * s + rank * offset, :], + y_true[i * s + rank * offset : (i + 1) * s + rank * offset], + ) engine = Engine(update) @@ -123,12 +120,16 @@ def update(engine, i): engine.run(data=data, max_epochs=n_epochs) assert "f2.5" in engine.state.metrics - res = engine.state.metrics['f2.5'] + res = engine.state.metrics["f2.5"] if isinstance(res, torch.Tensor): res = res.cpu().numpy() - true_res = fbeta_score(y_true.cpu().numpy(), torch.argmax(y_preds, dim=1).cpu().numpy(), beta=2.5, - average='macro' if average else None) + true_res = fbeta_score( + y_true.cpu().numpy(), + torch.argmax(y_preds, dim=1).cpu().numpy(), + beta=2.5, + average="macro" if average else None, + ) assert pytest.approx(res) == true_res @@ -154,14 +155,14 @@ def test_distrib_cpu(distributed_context_single_node_gloo): @pytest.mark.multinode_distributed -@pytest.mark.skipif('MULTINODE_DISTRIB' not in os.environ, reason="Skip if not multi-node distributed") +@pytest.mark.skipif("MULTINODE_DISTRIB" not in os.environ, reason="Skip if not multi-node distributed") def test_multinode_distrib_cpu(distributed_context_multi_node_gloo): device = "cpu" _test_distrib_itegration(device) @pytest.mark.multinode_distributed -@pytest.mark.skipif('GPU_MULTINODE_DISTRIB' not in os.environ, reason="Skip if not multi-node distributed") +@pytest.mark.skipif("GPU_MULTINODE_DISTRIB" not in os.environ, reason="Skip if not multi-node distributed") def test_multinode_distrib_gpu(distributed_context_multi_node_nccl): - device = "cuda:{}".format(distributed_context_multi_node_nccl['local_rank']) + device = "cuda:{}".format(distributed_context_multi_node_nccl["local_rank"]) _test_distrib_itegration(device) diff --git a/tests/ignite/metrics/test_frequency.py b/tests/ignite/metrics/test_frequency.py index aff78b82942b..055bcd82895b 100644 --- a/tests/ignite/metrics/test_frequency.py +++ b/tests/ignite/metrics/test_frequency.py @@ -35,13 +35,14 @@ def update_fn(engine, batch): engine = Engine(update_fn) wps_metric = Frequency(output_transform=lambda x: x["ntokens"], device=device) - wps_metric.attach(engine, 'wps') + wps_metric.attach(engine, "wps") @engine.on(Events.ITERATION_COMPLETED) def assert_wps(e): - wps = e.state.metrics['wps'] - assert estimated_wps * 0.85 < wps < estimated_wps, \ - "{}: {} < {} < {}".format(e.state.iteration, estimated_wps * 0.85, wps, estimated_wps) + wps = e.state.metrics["wps"] + assert estimated_wps * 0.85 < wps < estimated_wps, "{}: {} < {} < {}".format( + e.state.iteration, estimated_wps * 0.85, wps, estimated_wps + ) data = [[i] * batch_size for i in range(0, total_tokens, batch_size)] engine.run(data, max_epochs=1) diff --git a/tests/ignite/metrics/test_loss.py b/tests/ignite/metrics/test_loss.py index c513dbe3c5b2..4daad2d8aec2 100644 --- a/tests/ignite/metrics/test_loss.py +++ b/tests/ignite/metrics/test_loss.py @@ -46,7 +46,7 @@ def test_compute_on_criterion(): def test_non_averaging_loss(): - loss = Loss(nn.NLLLoss(reduction='none')) + loss = Loss(nn.NLLLoss(reduction="none")) y_pred = torch.tensor([[0.1, 0.4, 0.5], [0.1, 0.7, 0.2]]).log() y = torch.tensor([2, 2]).long() @@ -130,14 +130,14 @@ def test_distrib_cpu(distributed_context_single_node_gloo): @pytest.mark.multinode_distributed -@pytest.mark.skipif('MULTINODE_DISTRIB' not in os.environ, reason="Skip if not multi-node distributed") +@pytest.mark.skipif("MULTINODE_DISTRIB" not in os.environ, reason="Skip if not multi-node distributed") def test_multinode_distrib_cpu(distributed_context_multi_node_gloo): device = "cpu" _test_distrib_compute_on_criterion(device) @pytest.mark.multinode_distributed -@pytest.mark.skipif('GPU_MULTINODE_DISTRIB' not in os.environ, reason="Skip if not multi-node distributed") +@pytest.mark.skipif("GPU_MULTINODE_DISTRIB" not in os.environ, reason="Skip if not multi-node distributed") def test_multinode_distrib_gpu(distributed_context_multi_node_nccl): - device = "cuda:{}".format(distributed_context_multi_node_nccl['local_rank']) + device = "cuda:{}".format(distributed_context_multi_node_nccl["local_rank"]) _test_distrib_compute_on_criterion(device) diff --git a/tests/ignite/metrics/test_mean_absolute_error.py b/tests/ignite/metrics/test_mean_absolute_error.py index e0c5fb684c48..37012fe8f270 100644 --- a/tests/ignite/metrics/test_mean_absolute_error.py +++ b/tests/ignite/metrics/test_mean_absolute_error.py @@ -45,8 +45,10 @@ def _test_distrib_itegration(device): y_preds = torch.ones(offset * dist.get_world_size(), dtype=torch.float).to(device) def update(engine, i): - return y_preds[i * s + offset * rank:(i + 1) * s + offset * rank], \ - y_true[i * s + offset * rank:(i + 1) * s + offset * rank] + return ( + y_preds[i * s + offset * rank : (i + 1) * s + offset * rank], + y_true[i * s + offset * rank : (i + 1) * s + offset * rank], + ) engine = Engine(update) @@ -57,7 +59,7 @@ def update(engine, i): engine.run(data=data, max_epochs=1) assert "mae" in engine.state.metrics - res = engine.state.metrics['mae'] + res = engine.state.metrics["mae"] true_res = np.mean(np.abs((y_true - y_preds).cpu().numpy())) @@ -78,14 +80,14 @@ def test_distrib_cpu(distributed_context_single_node_gloo): @pytest.mark.multinode_distributed -@pytest.mark.skipif('MULTINODE_DISTRIB' not in os.environ, reason="Skip if not multi-node distributed") +@pytest.mark.skipif("MULTINODE_DISTRIB" not in os.environ, reason="Skip if not multi-node distributed") def test_multinode_distrib_cpu(distributed_context_multi_node_gloo): device = "cpu" _test_distrib_itegration(device) @pytest.mark.multinode_distributed -@pytest.mark.skipif('GPU_MULTINODE_DISTRIB' not in os.environ, reason="Skip if not multi-node distributed") +@pytest.mark.skipif("GPU_MULTINODE_DISTRIB" not in os.environ, reason="Skip if not multi-node distributed") def test_multinode_distrib_gpu(distributed_context_multi_node_nccl): - device = "cuda:{}".format(distributed_context_multi_node_nccl['local_rank']) + device = "cuda:{}".format(distributed_context_multi_node_nccl["local_rank"]) _test_distrib_itegration(device) diff --git a/tests/ignite/metrics/test_mean_pairwise_distance.py b/tests/ignite/metrics/test_mean_pairwise_distance.py index 9c0b6e4341e9..7b0dd4b4a99b 100644 --- a/tests/ignite/metrics/test_mean_pairwise_distance.py +++ b/tests/ignite/metrics/test_mean_pairwise_distance.py @@ -48,8 +48,10 @@ def _test_distrib_itegration(device): y_preds = torch.rand(offset * dist.get_world_size(), 10).to(device) def update(engine, i): - return y_preds[i * s + offset * rank:(i + 1) * s + offset * rank, ...], \ - y_true[i * s + offset * rank:(i + 1) * s + offset * rank, ...] + return ( + y_preds[i * s + offset * rank : (i + 1) * s + offset * rank, ...], + y_true[i * s + offset * rank : (i + 1) * s + offset * rank, ...], + ) engine = Engine(update) @@ -60,14 +62,16 @@ def update(engine, i): engine.run(data=data, max_epochs=1) assert "mpwd" in engine.state.metrics - res = engine.state.metrics['mpwd'] + res = engine.state.metrics["mpwd"] true_res = [] for i in range(n_iters * dist.get_world_size()): true_res.append( - torch.pairwise_distance(y_true[i * s:(i + 1) * s, ...], - y_preds[i * s:(i + 1) * s, ...], - p=m._p, eps=m._eps).cpu().numpy() + torch.pairwise_distance( + y_true[i * s : (i + 1) * s, ...], y_preds[i * s : (i + 1) * s, ...], p=m._p, eps=m._eps + ) + .cpu() + .numpy() ) true_res = np.array(true_res).ravel() true_res = true_res.mean() @@ -89,14 +93,14 @@ def test_distrib_cpu(distributed_context_single_node_gloo): @pytest.mark.multinode_distributed -@pytest.mark.skipif('MULTINODE_DISTRIB' not in os.environ, reason="Skip if not multi-node distributed") +@pytest.mark.skipif("MULTINODE_DISTRIB" not in os.environ, reason="Skip if not multi-node distributed") def test_multinode_distrib_cpu(distributed_context_multi_node_gloo): device = "cpu" _test_distrib_itegration(device) @pytest.mark.multinode_distributed -@pytest.mark.skipif('GPU_MULTINODE_DISTRIB' not in os.environ, reason="Skip if not multi-node distributed") +@pytest.mark.skipif("GPU_MULTINODE_DISTRIB" not in os.environ, reason="Skip if not multi-node distributed") def test_multinode_distrib_gpu(distributed_context_multi_node_nccl): - device = "cuda:{}".format(distributed_context_multi_node_nccl['local_rank']) + device = "cuda:{}".format(distributed_context_multi_node_nccl["local_rank"]) _test_distrib_itegration(device) diff --git a/tests/ignite/metrics/test_mean_squared_error.py b/tests/ignite/metrics/test_mean_squared_error.py index b02d8788b0ae..f8c690753386 100644 --- a/tests/ignite/metrics/test_mean_squared_error.py +++ b/tests/ignite/metrics/test_mean_squared_error.py @@ -45,8 +45,10 @@ def _test_distrib_itegration(device): y_preds = torch.ones(offset * dist.get_world_size(), dtype=torch.float).to(device) def update(engine, i): - return y_preds[i * s + offset * rank:(i + 1) * s + offset * rank], \ - y_true[i * s + offset * rank:(i + 1) * s + offset * rank] + return ( + y_preds[i * s + offset * rank : (i + 1) * s + offset * rank], + y_true[i * s + offset * rank : (i + 1) * s + offset * rank], + ) engine = Engine(update) @@ -57,7 +59,7 @@ def update(engine, i): engine.run(data=data, max_epochs=1) assert "mse" in engine.state.metrics - res = engine.state.metrics['mse'] + res = engine.state.metrics["mse"] true_res = np.mean(np.power((y_true - y_preds).cpu().numpy(), 2.0)) @@ -79,14 +81,14 @@ def test_distrib_cpu(distributed_context_single_node_gloo): @pytest.mark.multinode_distributed -@pytest.mark.skipif('MULTINODE_DISTRIB' not in os.environ, reason="Skip if not multi-node distributed") +@pytest.mark.skipif("MULTINODE_DISTRIB" not in os.environ, reason="Skip if not multi-node distributed") def test_multinode_distrib_cpu(distributed_context_multi_node_gloo): device = "cpu" _test_distrib_itegration(device) @pytest.mark.multinode_distributed -@pytest.mark.skipif('GPU_MULTINODE_DISTRIB' not in os.environ, reason="Skip if not multi-node distributed") +@pytest.mark.skipif("GPU_MULTINODE_DISTRIB" not in os.environ, reason="Skip if not multi-node distributed") def test_multinode_distrib_gpu(distributed_context_multi_node_nccl): - device = "cuda:{}".format(distributed_context_multi_node_nccl['local_rank']) + device = "cuda:{}".format(distributed_context_multi_node_nccl["local_rank"]) _test_distrib_itegration(device) diff --git a/tests/ignite/metrics/test_metric.py b/tests/ignite/metrics/test_metric.py index 55c09dafb6c0..92ed89755b90 100644 --- a/tests/ignite/metrics/test_metric.py +++ b/tests/ignite/metrics/test_metric.py @@ -16,7 +16,6 @@ class DummyMetric1(Metric): - def __init__(self, true_output, output_transform=lambda x: x): super(DummyMetric1, self).__init__(output_transform=output_transform) self.true_output = true_output @@ -47,26 +46,26 @@ def test_transform(): def transform(output): pred_dict, target_dict = output - return pred_dict['y'], target_dict['y'] + return pred_dict["y"], target_dict["y"] metric = DummyMetric1(true_output=(y_pred, y), output_transform=transform) - state = State(output=({'y': y_pred}, {'y': y})) + state = State(output=({"y": y_pred}, {"y": y})) engine = MagicMock(state=state) metric.iteration_completed(engine) def test_output_as_mapping_wrong_keys(): metric = DummyMetric1(true_output=(0, 1)) - state = State(output=({'y1': 0, 'y2': 1})) + state = State(output=({"y1": 0, "y2": 1})) engine = MagicMock(state=state) - with pytest.raises(ValueError, match=r"When transformed engine's output is a mapping, " - r"it should contain \('y_pred', 'y'\) keys"): + with pytest.raises( + ValueError, match=r"When transformed engine's output is a mapping, " r"it should contain \('y_pred', 'y'\) keys" + ): metric.iteration_completed(engine) def test_output_as_mapping_keys_is_none(): - class DummyMetric(Metric): _required_output_keys = None @@ -81,7 +80,7 @@ def update(self, output): metric = DummyMetric() assert metric._required_output_keys is None - state = State(output=({'y1': 0, 'y2': 1})) + state = State(output=({"y1": 0, "y2": 1})) engine = MagicMock(state=state) with pytest.raises(TypeError, match=r"Transformed engine output for DummyMetric metric should be a tuple/list"): @@ -93,7 +92,7 @@ def test_output_as_mapping(): y = torch.zeros(2) metric = DummyMetric1(true_output=(y_pred, y)) - state = State(output=({'y_pred': y_pred, 'y': y})) + state = State(output=({"y_pred": y_pred, "y": y})) engine = MagicMock(state=state) metric.iteration_completed(engine) @@ -123,7 +122,6 @@ def update(self, output): def test_arithmetics(): class ListGatherMetric(Metric): - def __init__(self, index): self.index = index super(ListGatherMetric, self).__init__() @@ -356,9 +354,9 @@ def update_fn(engine, batch): recall_true = recall_score(y_true, np.argmax(y_pred, axis=-1), average=None) f1_true = f1_score(y_true, np.argmax(y_pred, axis=-1), average=None) - precision = state.metrics['precision'].numpy() - recall = state.metrics['recall'].numpy() - f1 = state.metrics['f1'].numpy() + precision = state.metrics["precision"].numpy() + recall = state.metrics["recall"].numpy() + f1 = state.metrics["f1"].numpy() assert precision_true == approx(precision), "{} vs {}".format(precision_true, precision) assert recall_true == approx(recall), "{} vs {}".format(recall_true, recall) @@ -371,7 +369,6 @@ def test_abstract_class(): def test_pytorch_operators(): - def _test(composed_metric, metric_name, compute_true_value_fn): metrics = { @@ -397,7 +394,7 @@ def data(y_pred, y): d = data(y_pred, y) state = validator.run(d, max_epochs=1, epoch_length=y_pred.shape[0]) - assert set(state.metrics.keys()) == set([metric_name, ]) + assert set(state.metrics.keys()) == set([metric_name,]) np_y_pred = np.argmax(y_pred.numpy(), axis=-1).ravel() np_y = y.numpy().ravel() assert state.metrics[metric_name] == approx(compute_true_value_fn(np_y_pred, np_y)) @@ -429,7 +426,7 @@ def compute_sum_precision_recall(y_pred, y): f1 = (precision * recall * 2 / (precision + recall + 1e-20)).mean() def compute_f1(y_pred, y): - f1 = f1_score(y, y_pred, average='macro') + f1 = f1_score(y, y_pred, average="macro") return f1 _test(f1, "f1", compute_true_value_fn=compute_f1) @@ -444,8 +441,7 @@ def update_fn(engine, batch): y_pred, y = batch return y_pred, y - metrics = {'metric': ignite_metric[index], - 'metric_wo_index': ignite_metric} + metrics = {"metric": ignite_metric[index], "metric_wo_index": ignite_metric} validator = Engine(update_fn) @@ -459,41 +455,40 @@ def data(y_pred, y): d = data(y_pred, y) state = validator.run(d, max_epochs=1, epoch_length=y_pred.shape[0]) - sklearn_output = sklearn_metic(y.view(-1).numpy(), - y_pred.view(-1, num_classes).argmax(dim=1).numpy(), - **sklearn_args) + sklearn_output = sklearn_metic( + y.view(-1).numpy(), y_pred.view(-1, num_classes).argmax(dim=1).numpy(), **sklearn_args + ) - assert (state.metrics['metric_wo_index'][index] == state.metrics['metric']).all() - assert (np.allclose(state.metrics['metric'].numpy(), sklearn_output)) + assert (state.metrics["metric_wo_index"][index] == state.metrics["metric"]).all() + assert np.allclose(state.metrics["metric"].numpy(), sklearn_output) num_classes = 5 labels = list(range(0, num_classes, 2)) - _test(Precision(), precision_score, {'labels': labels, 'average': None}, index=labels) + _test(Precision(), precision_score, {"labels": labels, "average": None}, index=labels) labels = list(range(num_classes - 1, 0, -2)) - _test(Precision(), precision_score, {'labels': labels, 'average': None}, index=labels) + _test(Precision(), precision_score, {"labels": labels, "average": None}, index=labels) labels = [1] - _test(Precision(), precision_score, {'labels': labels, 'average': None}, index=labels) + _test(Precision(), precision_score, {"labels": labels, "average": None}, index=labels) labels = list(range(0, num_classes, 2)) - _test(Recall(), recall_score, {'labels': labels, 'average': None}, index=labels) + _test(Recall(), recall_score, {"labels": labels, "average": None}, index=labels) labels = list(range(num_classes - 1, 0, -2)) - _test(Recall(), recall_score, {'labels': labels, 'average': None}, index=labels) + _test(Recall(), recall_score, {"labels": labels, "average": None}, index=labels) labels = [1] - _test(Recall(), recall_score, {'labels': labels, 'average': None}, index=labels) + _test(Recall(), recall_score, {"labels": labels, "average": None}, index=labels) # np.ix_ is used to allow for a 2D slice of a matrix. This is required to get accurate result from # ConfusionMatrix. ConfusionMatrix must be sliced the same row-wise and column-wise. labels = list(range(0, num_classes, 2)) - _test(ConfusionMatrix(num_classes), confusion_matrix, {'labels': labels}, index=np.ix_(labels, labels)) + _test(ConfusionMatrix(num_classes), confusion_matrix, {"labels": labels}, index=np.ix_(labels, labels)) labels = list(range(num_classes - 1, 0, -2)) - _test(ConfusionMatrix(num_classes), confusion_matrix, {'labels': labels}, index=np.ix_(labels, labels)) + _test(ConfusionMatrix(num_classes), confusion_matrix, {"labels": labels}, index=np.ix_(labels, labels)) labels = [1] - _test(ConfusionMatrix(num_classes), confusion_matrix, {'labels': labels}, index=np.ix_(labels, labels)) + _test(ConfusionMatrix(num_classes), confusion_matrix, {"labels": labels}, index=np.ix_(labels, labels)) class DummyMetric2(Metric): - @reinit__is_reduced def reset(self): pass @@ -514,6 +509,7 @@ def test__sync_all_reduce(): def _test_distrib__sync_all_reduce(device): import torch.distributed as dist + assert dist.is_available() and dist.is_initialized() m = DummyMetric2(device=device) @@ -536,11 +532,10 @@ def _test_distrib_sync_all_reduce_decorator(device): from ignite.metrics.metric import sync_all_reduce, reinit__is_reduced class DummyMetric(Metric): - @reinit__is_reduced def reset(self): self.a = torch.tensor([0.0, 1.0, 2.0, 3.0], device=self._device, requires_grad=False) - self.a_nocomp = self.a.clone().to('cpu') + self.a_nocomp = self.a.clone().to("cpu") self.b = torch.tensor(1.0, dtype=torch.float64, device=self._device, requires_grad=False) self.b_nocomp = self.b.clone().to("cpu") self.c = 0.0 @@ -587,7 +582,7 @@ def test_distrib_cpu(distributed_context_single_node_gloo): @pytest.mark.multinode_distributed -@pytest.mark.skipif('MULTINODE_DISTRIB' not in os.environ, reason="Skip if not multi-node distributed") +@pytest.mark.skipif("MULTINODE_DISTRIB" not in os.environ, reason="Skip if not multi-node distributed") def test_multinode_distrib_cpu(distributed_context_multi_node_gloo): device = "cpu" _test_distrib__sync_all_reduce(device) @@ -595,8 +590,8 @@ def test_multinode_distrib_cpu(distributed_context_multi_node_gloo): @pytest.mark.multinode_distributed -@pytest.mark.skipif('GPU_MULTINODE_DISTRIB' not in os.environ, reason="Skip if not multi-node distributed") +@pytest.mark.skipif("GPU_MULTINODE_DISTRIB" not in os.environ, reason="Skip if not multi-node distributed") def test_multinode_distrib_gpu(distributed_context_multi_node_nccl): - device = "cuda:{}".format(distributed_context_multi_node_nccl['local_rank']) + device = "cuda:{}".format(distributed_context_multi_node_nccl["local_rank"]) _test_distrib__sync_all_reduce(device) _test_distrib_sync_all_reduce_decorator(device) diff --git a/tests/ignite/metrics/test_metrics_lambda.py b/tests/ignite/metrics/test_metrics_lambda.py index bc36e85349ae..76490a557382 100644 --- a/tests/ignite/metrics/test_metrics_lambda.py +++ b/tests/ignite/metrics/test_metrics_lambda.py @@ -13,7 +13,6 @@ class ListGatherMetric(Metric): - def __init__(self, index): super(ListGatherMetric, self).__init__() self.index = index @@ -43,15 +42,15 @@ def plus(this, other): m0_plus_m1 = MetricsLambda(plus, m0, other=m1) m2_plus_2 = MetricsLambda(plus, m2, 2) - m0_plus_m1.attach(engine, 'm0_plus_m1') - m2_plus_2.attach(engine, 'm2_plus_2') + m0_plus_m1.attach(engine, "m0_plus_m1") + m2_plus_2.attach(engine, "m2_plus_2") engine.run([[1, 10, 100]]) - assert engine.state.metrics['m0_plus_m1'] == 11 - assert engine.state.metrics['m2_plus_2'] == 102 + assert engine.state.metrics["m0_plus_m1"] == 11 + assert engine.state.metrics["m2_plus_2"] == 102 engine.run([[2, 20, 200]]) - assert engine.state.metrics['m0_plus_m1'] == 22 - assert engine.state.metrics['m2_plus_2'] == 202 + assert engine.state.metrics["m0_plus_m1"] == 22 + assert engine.state.metrics["m2_plus_2"] == 202 def test_metrics_lambda_reset(): @@ -125,14 +124,14 @@ def Fbeta(r, p, beta): precision_true = precision_score(y_true, np.argmax(y_pred, axis=-1), average=None) recall_true = recall_score(y_true, np.argmax(y_pred, axis=-1), average=None) - f1_true = f1_score(y_true, np.argmax(y_pred, axis=-1), average='macro') + f1_true = f1_score(y_true, np.argmax(y_pred, axis=-1), average="macro") - precision = state.metrics['precision'].numpy() - recall = state.metrics['recall'].numpy() + precision = state.metrics["precision"].numpy() + recall = state.metrics["recall"].numpy() assert precision_true == approx(precision), "{} vs {}".format(precision_true, precision) assert recall_true == approx(recall), "{} vs {}".format(recall_true, recall) - assert f1_true == approx(state.metrics['f1']), "{} vs {}".format(f1_true, state.metrics['f1']) + assert f1_true == approx(state.metrics["f1"]), "{} vs {}".format(f1_true, state.metrics["f1"]) def test_integration_ingredients_not_attached(): @@ -172,8 +171,8 @@ def Fbeta(r, p, beta): data = list(range(n_iters)) state = evaluator.run(data, max_epochs=1) - f1_true = f1_score(y_true, np.argmax(y_pred, axis=-1), average='macro') - assert f1_true == approx(state.metrics['f1']), "{} vs {}".format(f1_true, state.metrics['f1']) + f1_true = f1_score(y_true, np.argmax(y_pred, axis=-1), average="macro") + assert f1_true == approx(state.metrics["f1"]), "{} vs {}".format(f1_true, state.metrics["f1"]) def test_state_metrics(): @@ -235,7 +234,6 @@ def data(y_pred, y): def test_recursive_attachment(): - def _test(composed_metric, metric_name, compute_true_value_fn): metrics = { @@ -261,7 +259,7 @@ def data(y_pred, y): d = data(y_pred, y) state = validator.run(d, max_epochs=1, epoch_length=y_pred.shape[0]) - assert set(state.metrics.keys()) == set([metric_name, ]) + assert set(state.metrics.keys()) == set([metric_name,]) np_y_pred = y_pred.numpy().ravel() np_y = y.numpy().ravel() assert state.metrics[metric_name] == approx(compute_true_value_fn(np_y_pred, np_y)) @@ -303,6 +301,7 @@ def compute_true_somemetric(y_pred, y): def _test_distrib_integration(device): import torch.distributed as dist + rank = dist.get_rank() np.random.seed(12) @@ -345,11 +344,11 @@ def Fbeta(r, p, beta): data = list(range(n_iters)) state = evaluator.run(data, max_epochs=1) - assert 'f1' in state.metrics - assert 'ff1' in state.metrics - f1_true = f1_score(y_true.ravel(), np.argmax(y_pred.reshape(-1, n_classes), axis=-1), average='macro') - assert f1_true == approx(state.metrics['f1']) - assert 1.0 + f1_true == approx(state.metrics['ff1']) + assert "f1" in state.metrics + assert "ff1" in state.metrics + f1_true = f1_score(y_true.ravel(), np.argmax(y_pred.reshape(-1, n_classes), axis=-1), average="macro") + assert f1_true == approx(state.metrics["f1"]) + assert 1.0 + f1_true == approx(state.metrics["ff1"]) for _ in range(5): _test() @@ -371,14 +370,14 @@ def test_distrib_cpu(local_rank, distributed_context_single_node_gloo): @pytest.mark.multinode_distributed -@pytest.mark.skipif('MULTINODE_DISTRIB' not in os.environ, reason="Skip if not multi-node distributed") +@pytest.mark.skipif("MULTINODE_DISTRIB" not in os.environ, reason="Skip if not multi-node distributed") def test_multinode_distrib_cpu(distributed_context_multi_node_gloo): device = "cpu" _test_distrib_integration(device) @pytest.mark.multinode_distributed -@pytest.mark.skipif('GPU_MULTINODE_DISTRIB' not in os.environ, reason="Skip if not multi-node distributed") +@pytest.mark.skipif("GPU_MULTINODE_DISTRIB" not in os.environ, reason="Skip if not multi-node distributed") def test_multinode_distrib_gpu(distributed_context_multi_node_nccl): - device = "cuda:{}".format(distributed_context_multi_node_nccl['local_rank']) + device = "cuda:{}".format(distributed_context_multi_node_nccl["local_rank"]) _test_distrib_integration(device) diff --git a/tests/ignite/metrics/test_precision.py b/tests/ignite/metrics/test_precision.py index 94489b47abf1..f18c834856f5 100644 --- a/tests/ignite/metrics/test_precision.py +++ b/tests/ignite/metrics/test_precision.py @@ -29,28 +29,23 @@ def test_binary_wrong_inputs(): with pytest.raises(ValueError): # y has not only 0 or 1 values - pr.update((torch.randint(0, 2, size=(10,)).long(), - torch.arange(0, 10).long())) + pr.update((torch.randint(0, 2, size=(10,)).long(), torch.arange(0, 10).long())) with pytest.raises(ValueError): # y_pred values are not thresholded to 0, 1 values - pr.update((torch.rand(10,), - torch.randint(0, 2, size=(10,)).long())) + pr.update((torch.rand(10,), torch.randint(0, 2, size=(10,)).long())) with pytest.raises(ValueError): # incompatible shapes - pr.update((torch.randint(0, 2, size=(10,)).long(), - torch.randint(0, 2, size=(10, 5)).long())) + pr.update((torch.randint(0, 2, size=(10,)).long(), torch.randint(0, 2, size=(10, 5)).long())) with pytest.raises(ValueError): # incompatible shapes - pr.update((torch.randint(0, 2, size=(10, 5, 6)).long(), - torch.randint(0, 2, size=(10,)).long())) + pr.update((torch.randint(0, 2, size=(10, 5, 6)).long(), torch.randint(0, 2, size=(10,)).long())) with pytest.raises(ValueError): # incompatible shapes - pr.update((torch.randint(0, 2, size=(10,)).long(), - torch.randint(0, 2, size=(10, 5, 6)).long())) + pr.update((torch.randint(0, 2, size=(10,)).long(), torch.randint(0, 2, size=(10, 5, 6)).long())) def test_binary_input_N(): @@ -63,10 +58,10 @@ def _test(average): pr.update((y_pred, y)) np_y = y.numpy().ravel() np_y_pred = y_pred.numpy().ravel() - assert pr._type == 'binary' + assert pr._type == "binary" assert isinstance(pr.compute(), float if average else torch.Tensor) pr_compute = pr.compute() if average else pr.compute().numpy() - assert precision_score(np_y, np_y_pred, average='binary') == pytest.approx(pr_compute) + assert precision_score(np_y, np_y_pred, average="binary") == pytest.approx(pr_compute) pr.reset() y_pred = torch.randint(0, 2, size=(10,)) @@ -74,10 +69,10 @@ def _test(average): pr.update((y_pred, y)) np_y = y.numpy().ravel() np_y_pred = y_pred.numpy().ravel() - assert pr._type == 'binary' + assert pr._type == "binary" assert isinstance(pr.compute(), float if average else torch.Tensor) pr_compute = pr.compute() if average else pr.compute().numpy() - assert precision_score(np_y, np_y_pred, average='binary') == pytest.approx(pr_compute) + assert precision_score(np_y, np_y_pred, average="binary") == pytest.approx(pr_compute) pr.reset() y_pred = torch.Tensor([0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.51]) @@ -86,10 +81,10 @@ def _test(average): pr.update((y_pred, y)) np_y = y.numpy().ravel() np_y_pred = y_pred.numpy().ravel() - assert pr._type == 'binary' + assert pr._type == "binary" assert isinstance(pr.compute(), float if average else torch.Tensor) pr_compute = pr.compute() if average else pr.compute().numpy() - assert precision_score(np_y, np_y_pred, average='binary') == pytest.approx(pr_compute) + assert precision_score(np_y, np_y_pred, average="binary") == pytest.approx(pr_compute) # Batched Updates pr.reset() @@ -101,14 +96,14 @@ def _test(average): for i in range(n_iters): idx = i * batch_size - pr.update((y_pred[idx:idx + batch_size], y[idx:idx + batch_size])) + pr.update((y_pred[idx : idx + batch_size], y[idx : idx + batch_size])) np_y = y.numpy().ravel() np_y_pred = y_pred.numpy().ravel() - assert pr._type == 'binary' + assert pr._type == "binary" assert isinstance(pr.compute(), float if average else torch.Tensor) pr_compute = pr.compute() if average else pr.compute().numpy() - assert precision_score(np_y, np_y_pred, average='binary') == pytest.approx(pr_compute) + assert precision_score(np_y, np_y_pred, average="binary") == pytest.approx(pr_compute) for _ in range(5): _test(average=True) @@ -126,10 +121,10 @@ def _test(average): pr.update((y_pred, y)) np_y = y.numpy().ravel() np_y_pred = y_pred.numpy().ravel() - assert pr._type == 'binary' + assert pr._type == "binary" assert isinstance(pr.compute(), float if average else torch.Tensor) pr_compute = pr.compute() if average else pr.compute().numpy() - assert precision_score(np_y, np_y_pred, average='binary') == pytest.approx(pr_compute) + assert precision_score(np_y, np_y_pred, average="binary") == pytest.approx(pr_compute) pr.reset() y_pred = torch.randint(0, 2, size=(10, 1, 5)) @@ -137,10 +132,10 @@ def _test(average): pr.update((y_pred, y)) np_y = y.numpy().ravel() np_y_pred = y_pred.numpy().ravel() - assert pr._type == 'binary' + assert pr._type == "binary" assert isinstance(pr.compute(), float if average else torch.Tensor) pr_compute = pr.compute() if average else pr.compute().numpy() - assert precision_score(np_y, np_y_pred, average='binary') == pytest.approx(pr_compute) + assert precision_score(np_y, np_y_pred, average="binary") == pytest.approx(pr_compute) # Batched Updates pr.reset() @@ -152,14 +147,14 @@ def _test(average): for i in range(n_iters): idx = i * batch_size - pr.update((y_pred[idx:idx + batch_size], y[idx:idx + batch_size])) + pr.update((y_pred[idx : idx + batch_size], y[idx : idx + batch_size])) np_y = y.numpy().ravel() np_y_pred = y_pred.numpy().ravel() - assert pr._type == 'binary' + assert pr._type == "binary" assert isinstance(pr.compute(), float if average else torch.Tensor) pr_compute = pr.compute() if average else pr.compute().numpy() - assert precision_score(np_y, np_y_pred, average='binary') == pytest.approx(pr_compute) + assert precision_score(np_y, np_y_pred, average="binary") == pytest.approx(pr_compute) for _ in range(5): _test(average=True) @@ -177,10 +172,10 @@ def _test(average): pr.update((y_pred, y)) np_y = y.numpy().ravel() np_y_pred = y_pred.numpy().ravel() - assert pr._type == 'binary' + assert pr._type == "binary" assert isinstance(pr.compute(), float if average else torch.Tensor) pr_compute = pr.compute() if average else pr.compute().numpy() - assert precision_score(np_y, np_y_pred, average='binary') == pytest.approx(pr_compute) + assert precision_score(np_y, np_y_pred, average="binary") == pytest.approx(pr_compute) pr.reset() y_pred = torch.randint(0, 2, size=(10, 1, 12, 10)) @@ -188,10 +183,10 @@ def _test(average): pr.update((y_pred, y)) np_y = y.numpy().ravel() np_y_pred = y_pred.numpy().ravel() - assert pr._type == 'binary' + assert pr._type == "binary" assert isinstance(pr.compute(), float if average else torch.Tensor) pr_compute = pr.compute() if average else pr.compute().numpy() - assert precision_score(np_y, np_y_pred, average='binary') == pytest.approx(pr_compute) + assert precision_score(np_y, np_y_pred, average="binary") == pytest.approx(pr_compute) # Batched Updates pr.reset() @@ -203,14 +198,14 @@ def _test(average): for i in range(n_iters): idx = i * batch_size - pr.update((y_pred[idx:idx + batch_size], y[idx:idx + batch_size])) + pr.update((y_pred[idx : idx + batch_size], y[idx : idx + batch_size])) np_y = y.numpy().ravel() np_y_pred = y_pred.numpy().ravel() - assert pr._type == 'binary' + assert pr._type == "binary" assert isinstance(pr.compute(), float if average else torch.Tensor) pr_compute = pr.compute() if average else pr.compute().numpy() - assert precision_score(np_y, np_y_pred, average='binary') == pytest.approx(pr_compute) + assert precision_score(np_y, np_y_pred, average="binary") == pytest.approx(pr_compute) for _ in range(5): _test(average=True) @@ -268,10 +263,10 @@ def _test(average): num_classes = y_pred.shape[1] np_y_pred = y_pred.argmax(dim=1).numpy().ravel() np_y = y.numpy().ravel() - assert pr._type == 'multiclass' + assert pr._type == "multiclass" assert isinstance(pr.compute(), float if average else torch.Tensor) pr_compute = pr.compute() if average else pr.compute().numpy() - sk_average_parameter = 'macro' if average else None + sk_average_parameter = "macro" if average else None with warnings.catch_warnings(): warnings.simplefilter("ignore", category=UndefinedMetricWarning) sk_compute = precision_score(np_y, np_y_pred, labels=range(0, num_classes), average=sk_average_parameter) @@ -284,10 +279,10 @@ def _test(average): num_classes = y_pred.shape[1] np_y_pred = y_pred.argmax(dim=1).numpy().ravel() np_y = y.numpy().ravel() - assert pr._type == 'multiclass' + assert pr._type == "multiclass" assert isinstance(pr.compute(), float if average else torch.Tensor) pr_compute = pr.compute() if average else pr.compute().numpy() - sk_average_parameter = 'macro' if average else None + sk_average_parameter = "macro" if average else None with warnings.catch_warnings(): warnings.simplefilter("ignore", category=UndefinedMetricWarning) sk_compute = precision_score(np_y, np_y_pred, labels=range(0, num_classes), average=sk_average_parameter) @@ -301,10 +296,10 @@ def _test(average): num_classes = y_pred.shape[1] np_y_pred = y_pred.argmax(dim=1).numpy().ravel() np_y = y.numpy().ravel() - assert pr._type == 'multiclass' + assert pr._type == "multiclass" assert isinstance(pr.compute(), float if average else torch.Tensor) pr_compute = pr.compute() if average else pr.compute().numpy() - sk_average_parameter = 'macro' if average else None + sk_average_parameter = "macro" if average else None with warnings.catch_warnings(): warnings.simplefilter("ignore", category=UndefinedMetricWarning) sk_compute = precision_score(np_y, np_y_pred, labels=range(0, num_classes), average=sk_average_parameter) @@ -320,15 +315,15 @@ def _test(average): for i in range(n_iters): idx = i * batch_size - pr.update((y_pred[idx:idx + batch_size], y[idx:idx + batch_size])) + pr.update((y_pred[idx : idx + batch_size], y[idx : idx + batch_size])) num_classes = y_pred.shape[1] np_y = y.numpy().ravel() np_y_pred = y_pred.argmax(dim=1).numpy().ravel() - assert pr._type == 'multiclass' + assert pr._type == "multiclass" assert isinstance(pr.compute(), float if average else torch.Tensor) pr_compute = pr.compute() if average else pr.compute().numpy() - sk_average_parameter = 'macro' if average else None + sk_average_parameter = "macro" if average else None with warnings.catch_warnings(): warnings.simplefilter("ignore", category=UndefinedMetricWarning) sk_compute = precision_score(np_y, np_y_pred, labels=range(0, num_classes), average=sk_average_parameter) @@ -351,10 +346,10 @@ def _test(average): num_classes = y_pred.shape[1] np_y_pred = y_pred.argmax(dim=1).numpy().ravel() np_y = y.numpy().ravel() - assert pr._type == 'multiclass' + assert pr._type == "multiclass" assert isinstance(pr.compute(), float if average else torch.Tensor) pr_compute = pr.compute() if average else pr.compute().numpy() - sk_average_parameter = 'macro' if average else None + sk_average_parameter = "macro" if average else None with warnings.catch_warnings(): warnings.simplefilter("ignore", category=UndefinedMetricWarning) sk_compute = precision_score(np_y, np_y_pred, labels=range(0, num_classes), average=sk_average_parameter) @@ -367,10 +362,10 @@ def _test(average): num_classes = y_pred.shape[1] np_y_pred = y_pred.argmax(dim=1).numpy().ravel() np_y = y.numpy().ravel() - assert pr._type == 'multiclass' + assert pr._type == "multiclass" assert isinstance(pr.compute(), float if average else torch.Tensor) pr_compute = pr.compute() if average else pr.compute().numpy() - sk_average_parameter = 'macro' if average else None + sk_average_parameter = "macro" if average else None with warnings.catch_warnings(): warnings.simplefilter("ignore", category=UndefinedMetricWarning) sk_compute = precision_score(np_y, np_y_pred, labels=range(0, num_classes), average=sk_average_parameter) @@ -386,15 +381,15 @@ def _test(average): for i in range(n_iters): idx = i * batch_size - pr.update((y_pred[idx:idx + batch_size], y[idx:idx + batch_size])) + pr.update((y_pred[idx : idx + batch_size], y[idx : idx + batch_size])) num_classes = y_pred.shape[1] np_y = y.numpy().ravel() np_y_pred = y_pred.argmax(dim=1).numpy().ravel() - assert pr._type == 'multiclass' + assert pr._type == "multiclass" assert isinstance(pr.compute(), float if average else torch.Tensor) pr_compute = pr.compute() if average else pr.compute().numpy() - sk_average_parameter = 'macro' if average else None + sk_average_parameter = "macro" if average else None with warnings.catch_warnings(): warnings.simplefilter("ignore", category=UndefinedMetricWarning) sk_compute = precision_score(np_y, np_y_pred, labels=range(0, num_classes), average=sk_average_parameter) @@ -417,10 +412,10 @@ def _test(average): num_classes = y_pred.shape[1] np_y_pred = y_pred.argmax(dim=1).numpy().ravel() np_y = y.numpy().ravel() - assert pr._type == 'multiclass' + assert pr._type == "multiclass" assert isinstance(pr.compute(), float if average else torch.Tensor) pr_compute = pr.compute() if average else pr.compute().numpy() - sk_average_parameter = 'macro' if average else None + sk_average_parameter = "macro" if average else None with warnings.catch_warnings(): warnings.simplefilter("ignore", category=UndefinedMetricWarning) sk_compute = precision_score(np_y, np_y_pred, labels=range(0, num_classes), average=sk_average_parameter) @@ -433,10 +428,10 @@ def _test(average): num_classes = y_pred.shape[1] np_y_pred = y_pred.argmax(dim=1).numpy().ravel() np_y = y.numpy().ravel() - assert pr._type == 'multiclass' + assert pr._type == "multiclass" assert isinstance(pr.compute(), float if average else torch.Tensor) pr_compute = pr.compute() if average else pr.compute().numpy() - sk_average_parameter = 'macro' if average else None + sk_average_parameter = "macro" if average else None with warnings.catch_warnings(): warnings.simplefilter("ignore", category=UndefinedMetricWarning) sk_compute = precision_score(np_y, np_y_pred, labels=range(0, num_classes), average=sk_average_parameter) @@ -452,15 +447,15 @@ def _test(average): for i in range(n_iters): idx = i * batch_size - pr.update((y_pred[idx:idx + batch_size], y[idx:idx + batch_size])) + pr.update((y_pred[idx : idx + batch_size], y[idx : idx + batch_size])) num_classes = y_pred.shape[1] np_y = y.numpy().ravel() np_y_pred = y_pred.argmax(dim=1).numpy().ravel() - assert pr._type == 'multiclass' + assert pr._type == "multiclass" assert isinstance(pr.compute(), float if average else torch.Tensor) pr_compute = pr.compute() if average else pr.compute().numpy() - sk_average_parameter = 'macro' if average else None + sk_average_parameter = "macro" if average else None with warnings.catch_warnings(): warnings.simplefilter("ignore", category=UndefinedMetricWarning) sk_compute = precision_score(np_y, np_y_pred, labels=range(0, num_classes), average=sk_average_parameter) @@ -501,7 +496,6 @@ def to_numpy_multilabel(y): def test_multilabel_input_NC(): - def _test(average): pr = Precision(average=average, is_multilabel=True) @@ -510,11 +504,11 @@ def _test(average): pr.update((y_pred, y)) np_y_pred = y_pred.numpy() np_y = y.numpy() - assert pr._type == 'multilabel' + assert pr._type == "multilabel" pr_compute = pr.compute() if average else pr.compute().mean().item() with warnings.catch_warnings(): warnings.simplefilter("ignore", category=UndefinedMetricWarning) - assert precision_score(np_y, np_y_pred, average='samples') == pytest.approx(pr_compute) + assert precision_score(np_y, np_y_pred, average="samples") == pytest.approx(pr_compute) pr.reset() y_pred = torch.randint(0, 2, size=(10, 4)) @@ -522,11 +516,11 @@ def _test(average): pr.update((y_pred, y)) np_y_pred = y_pred.numpy() np_y = y.numpy() - assert pr._type == 'multilabel' + assert pr._type == "multilabel" pr_compute = pr.compute() if average else pr.compute().mean().item() with warnings.catch_warnings(): warnings.simplefilter("ignore", category=UndefinedMetricWarning) - assert precision_score(np_y, np_y_pred, average='samples') == pytest.approx(pr_compute) + assert precision_score(np_y, np_y_pred, average="samples") == pytest.approx(pr_compute) # Batched Updates pr.reset() @@ -538,15 +532,15 @@ def _test(average): for i in range(n_iters): idx = i * batch_size - pr.update((y_pred[idx:idx + batch_size], y[idx:idx + batch_size])) + pr.update((y_pred[idx : idx + batch_size], y[idx : idx + batch_size])) np_y = y.numpy() np_y_pred = y_pred.numpy() - assert pr._type == 'multilabel' + assert pr._type == "multilabel" pr_compute = pr.compute() if average else pr.compute().mean().item() with warnings.catch_warnings(): warnings.simplefilter("ignore", category=UndefinedMetricWarning) - assert precision_score(np_y, np_y_pred, average='samples') == pytest.approx(pr_compute) + assert precision_score(np_y, np_y_pred, average="samples") == pytest.approx(pr_compute) for _ in range(5): _test(average=True) @@ -562,7 +556,6 @@ def _test(average): def test_multilabel_input_NCL(): - def _test(average): pr = Precision(average=average, is_multilabel=True) @@ -571,11 +564,11 @@ def _test(average): pr.update((y_pred, y)) np_y_pred = to_numpy_multilabel(y_pred) np_y = to_numpy_multilabel(y) - assert pr._type == 'multilabel' + assert pr._type == "multilabel" pr_compute = pr.compute() if average else pr.compute().mean().item() with warnings.catch_warnings(): warnings.simplefilter("ignore", category=UndefinedMetricWarning) - assert precision_score(np_y, np_y_pred, average='samples') == pytest.approx(pr_compute) + assert precision_score(np_y, np_y_pred, average="samples") == pytest.approx(pr_compute) pr.reset() y_pred = torch.randint(0, 2, size=(15, 4, 10)) @@ -583,11 +576,11 @@ def _test(average): pr.update((y_pred, y)) np_y_pred = to_numpy_multilabel(y_pred) np_y = to_numpy_multilabel(y) - assert pr._type == 'multilabel' + assert pr._type == "multilabel" pr_compute = pr.compute() if average else pr.compute().mean().item() with warnings.catch_warnings(): warnings.simplefilter("ignore", category=UndefinedMetricWarning) - assert precision_score(np_y, np_y_pred, average='samples') == pytest.approx(pr_compute) + assert precision_score(np_y, np_y_pred, average="samples") == pytest.approx(pr_compute) # Batched Updates pr.reset() @@ -599,15 +592,15 @@ def _test(average): for i in range(n_iters): idx = i * batch_size - pr.update((y_pred[idx:idx + batch_size], y[idx:idx + batch_size])) + pr.update((y_pred[idx : idx + batch_size], y[idx : idx + batch_size])) np_y = to_numpy_multilabel(y) np_y_pred = to_numpy_multilabel(y_pred) - assert pr._type == 'multilabel' + assert pr._type == "multilabel" pr_compute = pr.compute() if average else pr.compute().mean().item() with warnings.catch_warnings(): warnings.simplefilter("ignore", category=UndefinedMetricWarning) - assert precision_score(np_y, np_y_pred, average='samples') == pytest.approx(pr_compute) + assert precision_score(np_y, np_y_pred, average="samples") == pytest.approx(pr_compute) for _ in range(5): _test(average=True) @@ -623,7 +616,6 @@ def _test(average): def test_multilabel_input_NCHW(): - def _test(average): pr = Precision(average=average, is_multilabel=True) @@ -632,11 +624,11 @@ def _test(average): pr.update((y_pred, y)) np_y_pred = to_numpy_multilabel(y_pred) np_y = to_numpy_multilabel(y) - assert pr._type == 'multilabel' + assert pr._type == "multilabel" pr_compute = pr.compute() if average else pr.compute().mean().item() with warnings.catch_warnings(): warnings.simplefilter("ignore", category=UndefinedMetricWarning) - assert precision_score(np_y, np_y_pred, average='samples') == pytest.approx(pr_compute) + assert precision_score(np_y, np_y_pred, average="samples") == pytest.approx(pr_compute) pr.reset() y_pred = torch.randint(0, 2, size=(10, 4, 20, 23)) @@ -644,11 +636,11 @@ def _test(average): pr.update((y_pred, y)) np_y_pred = to_numpy_multilabel(y_pred) np_y = to_numpy_multilabel(y) - assert pr._type == 'multilabel' + assert pr._type == "multilabel" pr_compute = pr.compute() if average else pr.compute().mean().item() with warnings.catch_warnings(): warnings.simplefilter("ignore", category=UndefinedMetricWarning) - assert precision_score(np_y, np_y_pred, average='samples') == pytest.approx(pr_compute) + assert precision_score(np_y, np_y_pred, average="samples") == pytest.approx(pr_compute) # Batched Updates pr.reset() @@ -660,15 +652,15 @@ def _test(average): for i in range(n_iters): idx = i * batch_size - pr.update((y_pred[idx:idx + batch_size], y[idx:idx + batch_size])) + pr.update((y_pred[idx : idx + batch_size], y[idx : idx + batch_size])) np_y = to_numpy_multilabel(y) np_y_pred = to_numpy_multilabel(y_pred) - assert pr._type == 'multilabel' + assert pr._type == "multilabel" pr_compute = pr.compute() if average else pr.compute().mean().item() with warnings.catch_warnings(): warnings.simplefilter("ignore", category=UndefinedMetricWarning) - assert precision_score(np_y, np_y_pred, average='samples') == pytest.approx(pr_compute) + assert precision_score(np_y, np_y_pred, average="samples") == pytest.approx(pr_compute) for _ in range(5): _test(average=True) @@ -712,7 +704,6 @@ def _test(average): def test_incorrect_y_classes(): - def _test(average): pr = Precision(average=average) @@ -739,12 +730,14 @@ def _test(average, n_epochs): n_classes = 7 offset = n_iters * s - y_true = torch.randint(0, n_classes, size=(offset * dist.get_world_size(), )).to(device) + y_true = torch.randint(0, n_classes, size=(offset * dist.get_world_size(),)).to(device) y_preds = torch.rand(offset * dist.get_world_size(), n_classes).to(device) def update(engine, i): - return y_preds[i * s + rank * offset:(i + 1) * s + rank * offset, :], \ - y_true[i * s + rank * offset:(i + 1) * s + rank * offset] + return ( + y_preds[i * s + rank * offset : (i + 1) * s + rank * offset, :], + y_true[i * s + rank * offset : (i + 1) * s + rank * offset], + ) engine = Engine(update) @@ -755,12 +748,13 @@ def update(engine, i): engine.run(data=data, max_epochs=n_epochs) assert "pr" in engine.state.metrics - res = engine.state.metrics['pr'] + res = engine.state.metrics["pr"] if isinstance(res, torch.Tensor): res = res.cpu().numpy() - true_res = precision_score(y_true.cpu().numpy(), torch.argmax(y_preds, dim=1).cpu().numpy(), - average='macro' if average else None) + true_res = precision_score( + y_true.cpu().numpy(), torch.argmax(y_preds, dim=1).cpu().numpy(), average="macro" if average else None + ) assert pytest.approx(res) == true_res @@ -789,8 +783,10 @@ def _test(average, n_epochs): y_preds = torch.randint(0, 2, size=(offset * dist.get_world_size(), n_classes, 6, 8)).to(device) def update(engine, i): - return y_preds[i * s + rank * offset:(i + 1) * s + rank * offset, ...], \ - y_true[i * s + rank * offset:(i + 1) * s + rank * offset, ...] + return ( + y_preds[i * s + rank * offset : (i + 1) * s + rank * offset, ...], + y_true[i * s + rank * offset : (i + 1) * s + rank * offset, ...], + ) engine = Engine(update) @@ -801,7 +797,7 @@ def update(engine, i): engine.run(data=data, max_epochs=n_epochs) assert "pr" in engine.state.metrics - res = engine.state.metrics['pr'] + res = engine.state.metrics["pr"] res2 = pr.compute() if isinstance(res, torch.Tensor): res = res.cpu().numpy() @@ -812,9 +808,9 @@ def update(engine, i): with warnings.catch_warnings(): warnings.simplefilter("ignore", category=UndefinedMetricWarning) - true_res = precision_score(to_numpy_multilabel(y_true), - to_numpy_multilabel(y_preds), - average='samples' if average else None) + true_res = precision_score( + to_numpy_multilabel(y_true), to_numpy_multilabel(y_preds), average="samples" if average else None + ) assert pytest.approx(res) == true_res @@ -822,8 +818,11 @@ def update(engine, i): _test(average=True, n_epochs=1) _test(average=True, n_epochs=2) - with pytest.warns(RuntimeWarning, match="Precision/Recall metrics do not work in distributed setting when " - "average=False and is_multilabel=True"): + with pytest.warns( + RuntimeWarning, + match="Precision/Recall metrics do not work in distributed setting when " + "average=False and is_multilabel=True", + ): pr = Precision(average=False, is_multilabel=True, device=device) y_pred = torch.randint(0, 2, size=(4, 3, 6, 8)) @@ -851,7 +850,7 @@ def test_distrib_cpu(local_rank, distributed_context_single_node_gloo): @pytest.mark.multinode_distributed -@pytest.mark.skipif('MULTINODE_DISTRIB' not in os.environ, reason="Skip if not multi-node distributed") +@pytest.mark.skipif("MULTINODE_DISTRIB" not in os.environ, reason="Skip if not multi-node distributed") def test_multinode_distrib_cpu(distributed_context_multi_node_gloo): device = "cpu" _test_distrib_itegration_multiclass(device) @@ -859,8 +858,8 @@ def test_multinode_distrib_cpu(distributed_context_multi_node_gloo): @pytest.mark.multinode_distributed -@pytest.mark.skipif('GPU_MULTINODE_DISTRIB' not in os.environ, reason="Skip if not multi-node distributed") +@pytest.mark.skipif("GPU_MULTINODE_DISTRIB" not in os.environ, reason="Skip if not multi-node distributed") def test_multinode_distrib_gpu(distributed_context_multi_node_nccl): - device = "cuda:{}".format(distributed_context_multi_node_nccl['local_rank']) + device = "cuda:{}".format(distributed_context_multi_node_nccl["local_rank"]) _test_distrib_itegration_multiclass(device) _test_distrib_itegration_multilabel(device) diff --git a/tests/ignite/metrics/test_recall.py b/tests/ignite/metrics/test_recall.py index 3940d97970aa..d43459d6c9d7 100644 --- a/tests/ignite/metrics/test_recall.py +++ b/tests/ignite/metrics/test_recall.py @@ -29,28 +29,23 @@ def test_binary_wrong_inputs(): with pytest.raises(ValueError): # y has not only 0 or 1 values - re.update((torch.randint(0, 2, size=(10,)), - torch.arange(0, 10).long())) + re.update((torch.randint(0, 2, size=(10,)), torch.arange(0, 10).long())) with pytest.raises(ValueError): # y_pred values are not thresholded to 0, 1 values - re.update((torch.rand(10, 1), - torch.randint(0, 2, size=(10,)).long())) + re.update((torch.rand(10, 1), torch.randint(0, 2, size=(10,)).long())) with pytest.raises(ValueError): # incompatible shapes - re.update((torch.randint(0, 2, size=(10,)), - torch.randint(0, 2, size=(10, 5)).long())) + re.update((torch.randint(0, 2, size=(10,)), torch.randint(0, 2, size=(10, 5)).long())) with pytest.raises(ValueError): # incompatible shapes - re.update((torch.randint(0, 2, size=(10, 5, 6)), - torch.randint(0, 2, size=(10,)).long())) + re.update((torch.randint(0, 2, size=(10, 5, 6)), torch.randint(0, 2, size=(10,)).long())) with pytest.raises(ValueError): # incompatible shapes - re.update((torch.randint(0, 2, size=(10,)), - torch.randint(0, 2, size=(10, 5, 6)).long())) + re.update((torch.randint(0, 2, size=(10,)), torch.randint(0, 2, size=(10, 5, 6)).long())) def test_binary_input_N(): @@ -63,10 +58,10 @@ def _test(average): re.update((y_pred, y)) np_y = y.numpy().ravel() np_y_pred = y_pred.numpy().ravel() - assert re._type == 'binary' + assert re._type == "binary" assert isinstance(re.compute(), float if average else torch.Tensor) re_compute = re.compute() if average else re.compute().numpy() - assert recall_score(np_y, np_y_pred, average='binary') == pytest.approx(re_compute) + assert recall_score(np_y, np_y_pred, average="binary") == pytest.approx(re_compute) re.reset() y_pred = torch.randint(0, 2, size=(10,)) @@ -74,10 +69,10 @@ def _test(average): re.update((y_pred, y)) np_y = y.numpy().ravel() np_y_pred = y_pred.numpy().ravel() - assert re._type == 'binary' + assert re._type == "binary" assert isinstance(re.compute(), float if average else torch.Tensor) re_compute = re.compute() if average else re.compute().numpy() - assert recall_score(np_y, np_y_pred, average='binary') == pytest.approx(re_compute) + assert recall_score(np_y, np_y_pred, average="binary") == pytest.approx(re_compute) re.reset() y_pred = torch.Tensor([0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.51]) @@ -86,10 +81,10 @@ def _test(average): re.update((y_pred, y)) np_y = y.numpy().ravel() np_y_pred = y_pred.numpy().ravel() - assert re._type == 'binary' + assert re._type == "binary" assert isinstance(re.compute(), float if average else torch.Tensor) re_compute = re.compute() if average else re.compute().numpy() - assert recall_score(np_y, np_y_pred, average='binary') == pytest.approx(re_compute) + assert recall_score(np_y, np_y_pred, average="binary") == pytest.approx(re_compute) # Batched Updates re.reset() @@ -101,14 +96,14 @@ def _test(average): for i in range(n_iters): idx = i * batch_size - re.update((y_pred[idx: idx + batch_size], y[idx: idx + batch_size])) + re.update((y_pred[idx : idx + batch_size], y[idx : idx + batch_size])) np_y = y.numpy().ravel() np_y_pred = y_pred.numpy().ravel() - assert re._type == 'binary' + assert re._type == "binary" assert isinstance(re.compute(), float if average else torch.Tensor) re_compute = re.compute() if average else re.compute().numpy() - assert recall_score(np_y, np_y_pred, average='binary') == pytest.approx(re_compute) + assert recall_score(np_y, np_y_pred, average="binary") == pytest.approx(re_compute) for _ in range(5): _test(average=True) @@ -126,10 +121,10 @@ def _test(average): re.update((y_pred, y)) np_y = y.numpy().ravel() np_y_pred = y_pred.numpy().ravel() - assert re._type == 'binary' + assert re._type == "binary" assert isinstance(re.compute(), float if average else torch.Tensor) re_compute = re.compute() if average else re.compute().numpy() - assert recall_score(np_y, np_y_pred, average='binary') == pytest.approx(re_compute) + assert recall_score(np_y, np_y_pred, average="binary") == pytest.approx(re_compute) re.reset() y_pred = torch.randint(0, 2, size=(10, 1, 5)) @@ -137,10 +132,10 @@ def _test(average): re.update((y_pred, y)) np_y = y.numpy().ravel() np_y_pred = y_pred.numpy().ravel() - assert re._type == 'binary' + assert re._type == "binary" assert isinstance(re.compute(), float if average else torch.Tensor) re_compute = re.compute() if average else re.compute().numpy() - assert recall_score(np_y, np_y_pred, average='binary') == pytest.approx(re_compute) + assert recall_score(np_y, np_y_pred, average="binary") == pytest.approx(re_compute) # Batched Updates re.reset() @@ -152,14 +147,14 @@ def _test(average): for i in range(n_iters): idx = i * batch_size - re.update((y_pred[idx:idx + batch_size], y[idx:idx + batch_size])) + re.update((y_pred[idx : idx + batch_size], y[idx : idx + batch_size])) np_y = y.numpy().ravel() np_y_pred = y_pred.numpy().ravel() - assert re._type == 'binary' + assert re._type == "binary" assert isinstance(re.compute(), float if average else torch.Tensor) re_compute = re.compute() if average else re.compute().numpy() - assert recall_score(np_y, np_y_pred, average='binary') == pytest.approx(re_compute) + assert recall_score(np_y, np_y_pred, average="binary") == pytest.approx(re_compute) for _ in range(5): _test(average=True) @@ -177,10 +172,10 @@ def _test(average): re.update((y_pred, y)) np_y = y.numpy().ravel() np_y_pred = y_pred.numpy().ravel() - assert re._type == 'binary' + assert re._type == "binary" assert isinstance(re.compute(), float if average else torch.Tensor) re_compute = re.compute() if average else re.compute().numpy() - assert recall_score(np_y, np_y_pred, average='binary') == pytest.approx(re_compute) + assert recall_score(np_y, np_y_pred, average="binary") == pytest.approx(re_compute) re.reset() y_pred = torch.randint(0, 2, size=(10, 1, 12, 10)) @@ -188,10 +183,10 @@ def _test(average): re.update((y_pred, y)) np_y = y.numpy().ravel() np_y_pred = y_pred.numpy().ravel() - assert re._type == 'binary' + assert re._type == "binary" assert isinstance(re.compute(), float if average else torch.Tensor) re_compute = re.compute() if average else re.compute().numpy() - assert recall_score(np_y, np_y_pred, average='binary') == pytest.approx(re_compute) + assert recall_score(np_y, np_y_pred, average="binary") == pytest.approx(re_compute) # Batched Updates re.reset() @@ -203,14 +198,14 @@ def _test(average): for i in range(n_iters): idx = i * batch_size - re.update((y_pred[idx:idx + batch_size], y[idx:idx + batch_size])) + re.update((y_pred[idx : idx + batch_size], y[idx : idx + batch_size])) np_y = y.numpy().ravel() np_y_pred = y_pred.numpy().ravel() - assert re._type == 'binary' + assert re._type == "binary" assert isinstance(re.compute(), float if average else torch.Tensor) re_compute = re.compute() if average else re.compute().numpy() - assert recall_score(np_y, np_y_pred, average='binary') == pytest.approx(re_compute) + assert recall_score(np_y, np_y_pred, average="binary") == pytest.approx(re_compute) for _ in range(5): _test(average=True) @@ -268,10 +263,10 @@ def _test(average): num_classes = y_pred.shape[1] np_y_pred = y_pred.argmax(dim=1).numpy().ravel() np_y = y.numpy().ravel() - assert re._type == 'multiclass' + assert re._type == "multiclass" assert isinstance(re.compute(), float if average else torch.Tensor) re_compute = re.compute() if average else re.compute().numpy() - sk_average_parameter = 'macro' if average else None + sk_average_parameter = "macro" if average else None with warnings.catch_warnings(): warnings.simplefilter("ignore", category=UndefinedMetricWarning) sk_compute = recall_score(np_y, np_y_pred, labels=range(0, num_classes), average=sk_average_parameter) @@ -284,10 +279,10 @@ def _test(average): num_classes = y_pred.shape[1] np_y_pred = y_pred.argmax(dim=1).numpy().ravel() np_y = y.numpy().ravel() - assert re._type == 'multiclass' + assert re._type == "multiclass" assert isinstance(re.compute(), float if average else torch.Tensor) re_compute = re.compute() if average else re.compute().numpy() - sk_average_parameter = 'macro' if average else None + sk_average_parameter = "macro" if average else None with warnings.catch_warnings(): warnings.simplefilter("ignore", category=UndefinedMetricWarning) sk_compute = recall_score(np_y, np_y_pred, labels=range(0, num_classes), average=sk_average_parameter) @@ -301,10 +296,10 @@ def _test(average): num_classes = y_pred.shape[1] np_y_pred = y_pred.argmax(dim=1).numpy().ravel() np_y = y.numpy().ravel() - assert re._type == 'multiclass' + assert re._type == "multiclass" assert isinstance(re.compute(), float if average else torch.Tensor) re_compute = re.compute() if average else re.compute().numpy() - sk_average_parameter = 'macro' if average else None + sk_average_parameter = "macro" if average else None with warnings.catch_warnings(): warnings.simplefilter("ignore", category=UndefinedMetricWarning) sk_compute = recall_score(np_y, np_y_pred, labels=range(0, num_classes), average=sk_average_parameter) @@ -320,15 +315,15 @@ def _test(average): for i in range(n_iters): idx = i * batch_size - re.update((y_pred[idx: idx + batch_size], y[idx: idx + batch_size])) + re.update((y_pred[idx : idx + batch_size], y[idx : idx + batch_size])) num_classes = y_pred.shape[1] np_y = y.numpy().ravel() np_y_pred = y_pred.argmax(dim=1).numpy().ravel() - assert re._type == 'multiclass' + assert re._type == "multiclass" assert isinstance(re.compute(), float if average else torch.Tensor) re_compute = re.compute() if average else re.compute().numpy() - sk_average_parameter = 'macro' if average else None + sk_average_parameter = "macro" if average else None with warnings.catch_warnings(): warnings.simplefilter("ignore", category=UndefinedMetricWarning) sk_compute = recall_score(np_y, np_y_pred, labels=range(0, num_classes), average=sk_average_parameter) @@ -351,10 +346,10 @@ def _test(average): num_classes = y_pred.shape[1] np_y_pred = y_pred.argmax(dim=1).numpy().ravel() np_y = y.numpy().ravel() - assert re._type == 'multiclass' + assert re._type == "multiclass" assert isinstance(re.compute(), float if average else torch.Tensor) re_compute = re.compute() if average else re.compute().numpy() - sk_average_parameter = 'macro' if average else None + sk_average_parameter = "macro" if average else None with warnings.catch_warnings(): warnings.simplefilter("ignore", category=UndefinedMetricWarning) assert recall_score(np_y, np_y_pred, average=sk_average_parameter) == pytest.approx(re_compute) @@ -366,10 +361,10 @@ def _test(average): num_classes = y_pred.shape[1] np_y_pred = y_pred.argmax(dim=1).numpy().ravel() np_y = y.numpy().ravel() - assert re._type == 'multiclass' + assert re._type == "multiclass" assert isinstance(re.compute(), float if average else torch.Tensor) re_compute = re.compute() if average else re.compute().numpy() - sk_average_parameter = 'macro' if average else None + sk_average_parameter = "macro" if average else None with warnings.catch_warnings(): warnings.simplefilter("ignore", category=UndefinedMetricWarning) sk_compute = recall_score(np_y, np_y_pred, labels=range(0, num_classes), average=sk_average_parameter) @@ -385,15 +380,15 @@ def _test(average): for i in range(n_iters): idx = i * batch_size - re.update((y_pred[idx:idx + batch_size], y[idx:idx + batch_size])) + re.update((y_pred[idx : idx + batch_size], y[idx : idx + batch_size])) num_classes = y_pred.shape[1] np_y = y.numpy().ravel() np_y_pred = y_pred.argmax(dim=1).numpy().ravel() - assert re._type == 'multiclass' + assert re._type == "multiclass" assert isinstance(re.compute(), float if average else torch.Tensor) re_compute = re.compute() if average else re.compute().numpy() - sk_average_parameter = 'macro' if average else None + sk_average_parameter = "macro" if average else None with warnings.catch_warnings(): warnings.simplefilter("ignore", category=UndefinedMetricWarning) sk_compute = recall_score(np_y, np_y_pred, labels=range(0, num_classes), average=sk_average_parameter) @@ -416,10 +411,10 @@ def _test(average): num_classes = y_pred.shape[1] np_y_pred = y_pred.argmax(dim=1).numpy().ravel() np_y = y.numpy().ravel() - assert re._type == 'multiclass' + assert re._type == "multiclass" assert isinstance(re.compute(), float if average else torch.Tensor) re_compute = re.compute() if average else re.compute().numpy() - sk_average_parameter = 'macro' if average else None + sk_average_parameter = "macro" if average else None with warnings.catch_warnings(): warnings.simplefilter("ignore", category=UndefinedMetricWarning) sk_compute = recall_score(np_y, np_y_pred, labels=range(0, num_classes), average=sk_average_parameter) @@ -432,10 +427,10 @@ def _test(average): num_classes = y_pred.shape[1] np_y_pred = y_pred.argmax(dim=1).numpy().ravel() np_y = y.numpy().ravel() - assert re._type == 'multiclass' + assert re._type == "multiclass" assert isinstance(re.compute(), float if average else torch.Tensor) re_compute = re.compute() if average else re.compute().numpy() - sk_average_parameter = 'macro' if average else None + sk_average_parameter = "macro" if average else None with warnings.catch_warnings(): warnings.simplefilter("ignore", category=UndefinedMetricWarning) sk_compute = recall_score(np_y, np_y_pred, labels=range(0, num_classes), average=sk_average_parameter) @@ -451,15 +446,15 @@ def _test(average): for i in range(n_iters): idx = i * batch_size - re.update((y_pred[idx:idx + batch_size], y[idx:idx + batch_size])) + re.update((y_pred[idx : idx + batch_size], y[idx : idx + batch_size])) num_classes = y_pred.shape[1] np_y = y.numpy().ravel() np_y_pred = y_pred.argmax(dim=1).numpy().ravel() - assert re._type == 'multiclass' + assert re._type == "multiclass" assert isinstance(re.compute(), float if average else torch.Tensor) re_compute = re.compute() if average else re.compute().numpy() - sk_average_parameter = 'macro' if average else None + sk_average_parameter = "macro" if average else None with warnings.catch_warnings(): warnings.simplefilter("ignore", category=UndefinedMetricWarning) sk_compute = recall_score(np_y, np_y_pred, labels=range(0, num_classes), average=sk_average_parameter) @@ -500,7 +495,6 @@ def to_numpy_multilabel(y): def test_multilabel_input_NC(): - def _test(average): re = Recall(average=average, is_multilabel=True) @@ -509,11 +503,11 @@ def _test(average): re.update((y_pred, y)) np_y_pred = to_numpy_multilabel(y_pred) np_y = to_numpy_multilabel(y) - assert re._type == 'multilabel' + assert re._type == "multilabel" re_compute = re.compute() if average else re.compute().mean().item() with warnings.catch_warnings(): warnings.simplefilter("ignore", category=UndefinedMetricWarning) - assert recall_score(np_y, np_y_pred, average='samples') == pytest.approx(re_compute) + assert recall_score(np_y, np_y_pred, average="samples") == pytest.approx(re_compute) re.reset() y_pred = torch.randint(0, 2, size=(10, 4)) @@ -521,11 +515,11 @@ def _test(average): re.update((y_pred, y)) np_y_pred = y_pred.numpy() np_y = y.numpy() - assert re._type == 'multilabel' + assert re._type == "multilabel" re_compute = re.compute() if average else re.compute().mean().item() with warnings.catch_warnings(): warnings.simplefilter("ignore", category=UndefinedMetricWarning) - assert recall_score(np_y, np_y_pred, average='samples') == pytest.approx(re_compute) + assert recall_score(np_y, np_y_pred, average="samples") == pytest.approx(re_compute) # Batched Updates re.reset() @@ -537,15 +531,15 @@ def _test(average): for i in range(n_iters): idx = i * batch_size - re.update((y_pred[idx: idx + batch_size], y[idx: idx + batch_size])) + re.update((y_pred[idx : idx + batch_size], y[idx : idx + batch_size])) np_y = y.numpy() np_y_pred = y_pred.numpy() - assert re._type == 'multilabel' + assert re._type == "multilabel" re_compute = re.compute() if average else re.compute().mean().item() with warnings.catch_warnings(): warnings.simplefilter("ignore", category=UndefinedMetricWarning) - assert recall_score(np_y, np_y_pred, average='samples') == pytest.approx(re_compute) + assert recall_score(np_y, np_y_pred, average="samples") == pytest.approx(re_compute) for _ in range(5): _test(average=True) @@ -561,7 +555,6 @@ def _test(average): def test_multilabel_input_NCL(): - def _test(average): re = Recall(average=average, is_multilabel=True) @@ -570,11 +563,11 @@ def _test(average): re.update((y_pred, y)) np_y_pred = to_numpy_multilabel(y_pred) np_y = to_numpy_multilabel(y) - assert re._type == 'multilabel' + assert re._type == "multilabel" re_compute = re.compute() if average else re.compute().mean().item() with warnings.catch_warnings(): warnings.simplefilter("ignore", category=UndefinedMetricWarning) - assert recall_score(np_y, np_y_pred, average='samples') == pytest.approx(re_compute) + assert recall_score(np_y, np_y_pred, average="samples") == pytest.approx(re_compute) re.reset() y_pred = torch.randint(0, 2, size=(15, 4, 10)) @@ -582,11 +575,11 @@ def _test(average): re.update((y_pred, y)) np_y_pred = to_numpy_multilabel(y_pred) np_y = to_numpy_multilabel(y) - assert re._type == 'multilabel' + assert re._type == "multilabel" re_compute = re.compute() if average else re.compute().mean().item() with warnings.catch_warnings(): warnings.simplefilter("ignore", category=UndefinedMetricWarning) - assert recall_score(np_y, np_y_pred, average='samples') == pytest.approx(re_compute) + assert recall_score(np_y, np_y_pred, average="samples") == pytest.approx(re_compute) # Batched Updates re.reset() @@ -598,15 +591,15 @@ def _test(average): for i in range(n_iters): idx = i * batch_size - re.update((y_pred[idx:idx + batch_size], y[idx:idx + batch_size])) + re.update((y_pred[idx : idx + batch_size], y[idx : idx + batch_size])) np_y = to_numpy_multilabel(y) np_y_pred = to_numpy_multilabel(y_pred) - assert re._type == 'multilabel' + assert re._type == "multilabel" re_compute = re.compute() if average else re.compute().mean().item() with warnings.catch_warnings(): warnings.simplefilter("ignore", category=UndefinedMetricWarning) - assert recall_score(np_y, np_y_pred, average='samples') == pytest.approx(re_compute) + assert recall_score(np_y, np_y_pred, average="samples") == pytest.approx(re_compute) for _ in range(5): _test(average=True) @@ -622,7 +615,6 @@ def _test(average): def test_multilabel_input_NCHW(): - def _test(average): re = Recall(average=average, is_multilabel=True) @@ -631,11 +623,11 @@ def _test(average): re.update((y_pred, y)) np_y_pred = to_numpy_multilabel(y_pred) np_y = to_numpy_multilabel(y) - assert re._type == 'multilabel' + assert re._type == "multilabel" re_compute = re.compute() if average else re.compute().mean().item() with warnings.catch_warnings(): warnings.simplefilter("ignore", category=UndefinedMetricWarning) - assert recall_score(np_y, np_y_pred, average='samples') == pytest.approx(re_compute) + assert recall_score(np_y, np_y_pred, average="samples") == pytest.approx(re_compute) re.reset() y_pred = torch.randint(0, 2, size=(10, 4, 20, 23)) @@ -643,11 +635,11 @@ def _test(average): re.update((y_pred, y)) np_y_pred = to_numpy_multilabel(y_pred) np_y = to_numpy_multilabel(y) - assert re._type == 'multilabel' + assert re._type == "multilabel" re_compute = re.compute() if average else re.compute().mean().item() with warnings.catch_warnings(): warnings.simplefilter("ignore", category=UndefinedMetricWarning) - assert recall_score(np_y, np_y_pred, average='samples') == pytest.approx(re_compute) + assert recall_score(np_y, np_y_pred, average="samples") == pytest.approx(re_compute) # Batched Updates re.reset() @@ -659,15 +651,15 @@ def _test(average): for i in range(n_iters): idx = i * batch_size - re.update((y_pred[idx:idx + batch_size], y[idx:idx + batch_size])) + re.update((y_pred[idx : idx + batch_size], y[idx : idx + batch_size])) np_y = to_numpy_multilabel(y) np_y_pred = to_numpy_multilabel(y_pred) - assert re._type == 'multilabel' + assert re._type == "multilabel" re_compute = re.compute() if average else re.compute().mean().item() with warnings.catch_warnings(): warnings.simplefilter("ignore", category=UndefinedMetricWarning) - assert recall_score(np_y, np_y_pred, average='samples') == pytest.approx(re_compute) + assert recall_score(np_y, np_y_pred, average="samples") == pytest.approx(re_compute) for _ in range(5): _test(average=True) @@ -711,7 +703,6 @@ def _test(average): def test_incorrect_y_classes(): - def _test(average): re = Recall(average=average) @@ -738,12 +729,14 @@ def _test(average, n_epochs): n_classes = 7 offset = n_iters * s - y_true = torch.randint(0, n_classes, size=(offset * dist.get_world_size(), )).to(device) + y_true = torch.randint(0, n_classes, size=(offset * dist.get_world_size(),)).to(device) y_preds = torch.rand(offset * dist.get_world_size(), n_classes).to(device) def update(engine, i): - return y_preds[i * s + rank * offset:(i + 1) * s + rank * offset, :], \ - y_true[i * s + rank * offset:(i + 1) * s + rank * offset] + return ( + y_preds[i * s + rank * offset : (i + 1) * s + rank * offset, :], + y_true[i * s + rank * offset : (i + 1) * s + rank * offset], + ) engine = Engine(update) @@ -754,12 +747,13 @@ def update(engine, i): engine.run(data=data, max_epochs=n_epochs) assert "re" in engine.state.metrics - res = engine.state.metrics['re'] + res = engine.state.metrics["re"] if isinstance(res, torch.Tensor): res = res.cpu().numpy() - true_res = recall_score(y_true.cpu().numpy(), torch.argmax(y_preds, dim=1).cpu().numpy(), - average='macro' if average else None) + true_res = recall_score( + y_true.cpu().numpy(), torch.argmax(y_preds, dim=1).cpu().numpy(), average="macro" if average else None + ) assert pytest.approx(res) == true_res @@ -788,8 +782,10 @@ def _test(average, n_epochs): y_preds = torch.randint(0, 2, size=(offset * dist.get_world_size(), n_classes, 6, 8)).to(device) def update(engine, i): - return y_preds[i * s + rank * offset:(i + 1) * s + rank * offset, ...], \ - y_true[i * s + rank * offset:(i + 1) * s + rank * offset, ...] + return ( + y_preds[i * s + rank * offset : (i + 1) * s + rank * offset, ...], + y_true[i * s + rank * offset : (i + 1) * s + rank * offset, ...], + ) engine = Engine(update) @@ -800,7 +796,7 @@ def update(engine, i): engine.run(data=data, max_epochs=n_epochs) assert "re" in engine.state.metrics - res = engine.state.metrics['re'] + res = engine.state.metrics["re"] res2 = re.compute() if isinstance(res, torch.Tensor): res = res.cpu().numpy() @@ -811,9 +807,9 @@ def update(engine, i): with warnings.catch_warnings(): warnings.simplefilter("ignore", category=UndefinedMetricWarning) - true_res = recall_score(to_numpy_multilabel(y_true), - to_numpy_multilabel(y_preds), - average='samples' if average else None) + true_res = recall_score( + to_numpy_multilabel(y_true), to_numpy_multilabel(y_preds), average="samples" if average else None + ) assert pytest.approx(res) == true_res @@ -821,8 +817,11 @@ def update(engine, i): _test(average=True, n_epochs=1) _test(average=True, n_epochs=2) - with pytest.warns(RuntimeWarning, match="Precision/Recall metrics do not work in distributed setting when " - "average=False and is_multilabel=True"): + with pytest.warns( + RuntimeWarning, + match="Precision/Recall metrics do not work in distributed setting when " + "average=False and is_multilabel=True", + ): re = Recall(average=False, is_multilabel=True, device=device) y_pred = torch.randint(0, 2, size=(4, 3, 6, 8)) @@ -850,7 +849,7 @@ def test_distrib_cpu(distributed_context_single_node_gloo): @pytest.mark.multinode_distributed -@pytest.mark.skipif('MULTINODE_DISTRIB' not in os.environ, reason="Skip if not multi-node distributed") +@pytest.mark.skipif("MULTINODE_DISTRIB" not in os.environ, reason="Skip if not multi-node distributed") def test_multinode_distrib_cpu(distributed_context_multi_node_gloo): device = "cpu" _test_distrib_itegration_multiclass(device) @@ -858,8 +857,8 @@ def test_multinode_distrib_cpu(distributed_context_multi_node_gloo): @pytest.mark.multinode_distributed -@pytest.mark.skipif('GPU_MULTINODE_DISTRIB' not in os.environ, reason="Skip if not multi-node distributed") +@pytest.mark.skipif("GPU_MULTINODE_DISTRIB" not in os.environ, reason="Skip if not multi-node distributed") def test_multinode_distrib_gpu(distributed_context_multi_node_nccl): - device = "cuda:{}".format(distributed_context_multi_node_nccl['local_rank']) + device = "cuda:{}".format(distributed_context_multi_node_nccl["local_rank"]) _test_distrib_itegration_multiclass(device) _test_distrib_itegration_multilabel(device) diff --git a/tests/ignite/metrics/test_root_mean_squared_error.py b/tests/ignite/metrics/test_root_mean_squared_error.py index b8fcad045ec6..04e87ea86bf3 100644 --- a/tests/ignite/metrics/test_root_mean_squared_error.py +++ b/tests/ignite/metrics/test_root_mean_squared_error.py @@ -45,7 +45,7 @@ def _test_distrib_itegration(device): y_preds = (rank + 1) * torch.ones(offset, dtype=torch.float).to(device) def update(engine, i): - return y_preds[i * s:(i + 1) * s], y_true[i * s + offset * rank:(i + 1) * s + offset * rank] + return y_preds[i * s : (i + 1) * s], y_true[i * s + offset * rank : (i + 1) * s + offset * rank] engine = Engine(update) @@ -56,7 +56,7 @@ def update(engine, i): engine.run(data=data, max_epochs=1) assert "rmse" in engine.state.metrics - res = engine.state.metrics['rmse'] + res = engine.state.metrics["rmse"] y_preds_full = [] for i in range(dist.get_world_size()): @@ -84,14 +84,14 @@ def test_distrib_cpu(local_rank, distributed_context_single_node_gloo): @pytest.mark.multinode_distributed -@pytest.mark.skipif('MULTINODE_DISTRIB' not in os.environ, reason="Skip if not multi-node distributed") +@pytest.mark.skipif("MULTINODE_DISTRIB" not in os.environ, reason="Skip if not multi-node distributed") def test_multinode_distrib_cpu(distributed_context_multi_node_gloo): device = "cpu" _test_distrib_itegration(device) @pytest.mark.multinode_distributed -@pytest.mark.skipif('GPU_MULTINODE_DISTRIB' not in os.environ, reason="Skip if not multi-node distributed") +@pytest.mark.skipif("GPU_MULTINODE_DISTRIB" not in os.environ, reason="Skip if not multi-node distributed") def test_multinode_distrib_gpu(distributed_context_multi_node_nccl): - device = "cuda:{}".format(distributed_context_multi_node_nccl['local_rank']) + device = "cuda:{}".format(distributed_context_multi_node_nccl["local_rank"]) _test_distrib_itegration(device) diff --git a/tests/ignite/metrics/test_running_average.py b/tests/ignite/metrics/test_running_average.py index d72b736ee4f2..b8d04f1919c8 100644 --- a/tests/ignite/metrics/test_running_average.py +++ b/tests/ignite/metrics/test_running_average.py @@ -24,7 +24,7 @@ def test_wrong_input_args(): RunningAverage() with pytest.raises(ValueError, match=r"Argument device should be None if src is a Metric"): - RunningAverage(Accuracy(), device='cpu') + RunningAverage(Accuracy(), device="cpu") def test_integration(): @@ -46,12 +46,14 @@ def update_fn(engine, batch): alpha = 0.98 acc_metric = RunningAverage(Accuracy(output_transform=lambda x: [x[1], x[2]]), alpha=alpha) - acc_metric.attach(trainer, 'running_avg_accuracy') + acc_metric.attach(trainer, "running_avg_accuracy") avg_output = RunningAverage(output_transform=lambda x: x[0], alpha=alpha) - avg_output.attach(trainer, 'running_avg_output') + avg_output.attach(trainer, "running_avg_output") - running_avg_acc = [None, ] + running_avg_acc = [ + None, + ] @trainer.on(Events.ITERATION_COMPLETED) def manual_running_avg_acc(engine): @@ -76,21 +78,26 @@ def running_avg_output_update(engine): if engine.state.running_avg_output is None: engine.state.running_avg_output = engine.state.output[0] else: - engine.state.running_avg_output = engine.state.running_avg_output * alpha + \ - (1.0 - alpha) * engine.state.output[0] + engine.state.running_avg_output = ( + engine.state.running_avg_output * alpha + (1.0 - alpha) * engine.state.output[0] + ) @trainer.on(Events.ITERATION_COMPLETED) def assert_equal_running_avg_acc_values(engine): - assert engine.state.running_avg_acc == engine.state.metrics['running_avg_accuracy'], \ - "{} vs {}".format(engine.state.running_avg_acc, engine.state.metrics['running_avg_accuracy']) + assert engine.state.running_avg_acc == engine.state.metrics["running_avg_accuracy"], "{} vs {}".format( + engine.state.running_avg_acc, engine.state.metrics["running_avg_accuracy"] + ) @trainer.on(Events.ITERATION_COMPLETED) def assert_equal_running_avg_output_values(engine): - assert engine.state.running_avg_output == engine.state.metrics['running_avg_output'], \ - "{} vs {}".format(engine.state.running_avg_output, engine.state.metrics['running_avg_output']) + assert engine.state.running_avg_output == engine.state.metrics["running_avg_output"], "{} vs {}".format( + engine.state.running_avg_output, engine.state.metrics["running_avg_output"] + ) np.random.seed(10) - running_avg_acc = [None, ] + running_avg_acc = [ + None, + ] n_iters = 10 batch_size = 10 n_classes = 10 @@ -100,7 +107,9 @@ def assert_equal_running_avg_output_values(engine): y_pred_batch_values = iter(np.random.rand(n_iters, batch_size, n_classes)) trainer.run(data, max_epochs=1) - running_avg_acc = [None, ] + running_avg_acc = [ + None, + ] n_iters = 10 batch_size = 10 n_classes = 10 @@ -132,10 +141,10 @@ def update_fn(engine, batch): alpha = 0.98 acc_metric = RunningAverage(Accuracy(output_transform=lambda x: [x[1], x[2]]), alpha=alpha, epoch_bound=False) - acc_metric.attach(trainer, 'running_avg_accuracy') + acc_metric.attach(trainer, "running_avg_accuracy") avg_output = RunningAverage(output_transform=lambda x: x[0], alpha=alpha, epoch_bound=False) - avg_output.attach(trainer, 'running_avg_output') + avg_output.attach(trainer, "running_avg_output") running_avg_acc = [None] @@ -162,44 +171,47 @@ def running_avg_output_update(engine): if engine.state.running_avg_output is None: engine.state.running_avg_output = engine.state.output[0] else: - engine.state.running_avg_output = engine.state.running_avg_output * alpha + \ - (1.0 - alpha) * engine.state.output[0] + engine.state.running_avg_output = ( + engine.state.running_avg_output * alpha + (1.0 - alpha) * engine.state.output[0] + ) @trainer.on(Events.ITERATION_COMPLETED) def assert_equal_running_avg_acc_values(engine): - assert engine.state.running_avg_acc == engine.state.metrics['running_avg_accuracy'], \ - "{} vs {}".format(engine.state.running_avg_acc, engine.state.metrics['running_avg_accuracy']) + assert engine.state.running_avg_acc == engine.state.metrics["running_avg_accuracy"], "{} vs {}".format( + engine.state.running_avg_acc, engine.state.metrics["running_avg_accuracy"] + ) @trainer.on(Events.ITERATION_COMPLETED) def assert_equal_running_avg_output_values(engine): - assert engine.state.running_avg_output == engine.state.metrics['running_avg_output'], \ - "{} vs {}".format(engine.state.running_avg_output, engine.state.metrics['running_avg_output']) + assert engine.state.running_avg_output == engine.state.metrics["running_avg_output"], "{} vs {}".format( + engine.state.running_avg_output, engine.state.metrics["running_avg_output"] + ) trainer.run(data, max_epochs=3) def test_multiple_attach(): n_iters = 100 - errD_values = iter(np.random.rand(n_iters, )) - errG_values = iter(np.random.rand(n_iters, )) - D_x_values = iter(np.random.rand(n_iters, )) - D_G_z1 = iter(np.random.rand(n_iters, )) - D_G_z2 = iter(np.random.rand(n_iters, )) + errD_values = iter(np.random.rand(n_iters,)) + errG_values = iter(np.random.rand(n_iters,)) + D_x_values = iter(np.random.rand(n_iters,)) + D_G_z1 = iter(np.random.rand(n_iters,)) + D_G_z2 = iter(np.random.rand(n_iters,)) def update_fn(engine, batch): return { - 'errD': next(errD_values), - 'errG': next(errG_values), - 'D_x': next(D_x_values), - 'D_G_z1': next(D_G_z1), - 'D_G_z2': next(D_G_z2), + "errD": next(errD_values), + "errG": next(errG_values), + "D_x": next(D_x_values), + "D_G_z1": next(D_G_z1), + "D_G_z2": next(D_G_z2), } trainer = Engine(update_fn) alpha = 0.98 # attach running average - monitoring_metrics = ['errD', 'errG', 'D_x', 'D_G_z1', 'D_G_z2'] + monitoring_metrics = ["errD", "errG", "D_x", "D_G_z1", "D_G_z2"] for metric in monitoring_metrics: foo = partial(lambda x, metric: x[metric], metric=metric) RunningAverage(alpha=alpha, output_transform=foo).attach(trainer, metric) @@ -220,6 +232,7 @@ def check_values(engine): def _test_distrib_on_output(device): import torch.distributed as dist + rank = dist.get_rank() n_iters = 10 n_epochs = 3 @@ -229,7 +242,7 @@ def _test_distrib_on_output(device): data = list(range(n_iters)) k = n_epochs * batch_size * n_iters all_loss_values = torch.arange(0, k * dist.get_world_size(), dtype=torch.float64).to(device) - loss_values = iter(all_loss_values[k * rank:k * (rank + 1)]) + loss_values = iter(all_loss_values[k * rank : k * (rank + 1)]) def update_fn(engine, batch): loss_value = next(loss_values) @@ -239,7 +252,7 @@ def update_fn(engine, batch): alpha = 0.98 avg_output = RunningAverage(output_transform=lambda x: x, alpha=alpha, epoch_bound=False, device=device) - avg_output.attach(trainer, 'running_avg_output') + avg_output.attach(trainer, "running_avg_output") @trainer.on(Events.STARTED) def running_avg_output_init(engine): @@ -252,21 +265,22 @@ def running_avg_output_update(engine): if engine.state.running_avg_output is None: engine.state.running_avg_output = o else: - engine.state.running_avg_output = engine.state.running_avg_output * alpha + \ - (1.0 - alpha) * o + engine.state.running_avg_output = engine.state.running_avg_output * alpha + (1.0 - alpha) * o @trainer.on(Events.ITERATION_COMPLETED) def assert_equal_running_avg_output_values(engine): - assert engine.state.running_avg_output == pytest.approx(engine.state.metrics['running_avg_output']), \ - "{}: {} vs {}".format(engine.state.iteration, - engine.state.running_avg_output, - engine.state.metrics['running_avg_output']) + assert engine.state.running_avg_output == pytest.approx( + engine.state.metrics["running_avg_output"] + ), "{}: {} vs {}".format( + engine.state.iteration, engine.state.running_avg_output, engine.state.metrics["running_avg_output"] + ) trainer.run(data, max_epochs=3) def _test_distrib_on_metric(device): import torch.distributed as dist + rank = dist.get_rank() n_iters = 10 n_epochs = 3 @@ -275,8 +289,9 @@ def _test_distrib_on_metric(device): data = list(range(n_iters)) np.random.seed(12) - all_y_true_batch_values = np.random.randint(0, n_classes, - size=(dist.get_world_size(), n_epochs * n_iters, batch_size)) + all_y_true_batch_values = np.random.randint( + 0, n_classes, size=(dist.get_world_size(), n_epochs * n_iters, batch_size) + ) all_y_pred_batch_values = np.random.rand(dist.get_world_size(), n_epochs * n_iters, batch_size, n_classes) y_true_batch_values = iter(all_y_true_batch_values[rank, ...]) @@ -290,11 +305,14 @@ def update_fn(engine, batch): trainer = Engine(update_fn) alpha = 0.98 - acc_metric = RunningAverage(Accuracy(output_transform=lambda x: [x[0], x[1]], device=device), - alpha=alpha, epoch_bound=False) - acc_metric.attach(trainer, 'running_avg_accuracy') + acc_metric = RunningAverage( + Accuracy(output_transform=lambda x: [x[0], x[1]], device=device), alpha=alpha, epoch_bound=False + ) + acc_metric.attach(trainer, "running_avg_accuracy") - running_avg_acc = [None, ] + running_avg_acc = [ + None, + ] true_acc_metric = Accuracy(device=device) @trainer.on(Events.ITERATION_COMPLETED) @@ -303,8 +321,10 @@ def manual_running_avg_acc(engine): true_acc_metric.reset() for j in range(dist.get_world_size()): - output = (torch.from_numpy(all_y_pred_batch_values[j, i, :, :]), - torch.from_numpy(all_y_true_batch_values[j, i, :])) + output = ( + torch.from_numpy(all_y_pred_batch_values[j, i, :, :]), + torch.from_numpy(all_y_true_batch_values[j, i, :]), + ) true_acc_metric.update(output) batch_acc = true_acc_metric._num_correct * 1.0 / true_acc_metric._num_examples @@ -317,8 +337,9 @@ def manual_running_avg_acc(engine): @trainer.on(Events.ITERATION_COMPLETED) def assert_equal_running_avg_acc_values(engine): - assert engine.state.running_avg_acc == engine.state.metrics['running_avg_accuracy'], \ - "{} vs {}".format(engine.state.running_avg_acc, engine.state.metrics['running_avg_accuracy']) + assert engine.state.running_avg_acc == engine.state.metrics["running_avg_accuracy"], "{} vs {}".format( + engine.state.running_avg_acc, engine.state.metrics["running_avg_accuracy"] + ) trainer.run(data, max_epochs=3) @@ -341,7 +362,7 @@ def test_distrib_cpu(distributed_context_single_node_gloo): @pytest.mark.multinode_distributed -@pytest.mark.skipif('MULTINODE_DISTRIB' not in os.environ, reason="Skip if not multi-node distributed") +@pytest.mark.skipif("MULTINODE_DISTRIB" not in os.environ, reason="Skip if not multi-node distributed") def test_multinode_distrib_cpu(distributed_context_multi_node_gloo): device = "cpu" _test_distrib_on_output(device) @@ -349,8 +370,8 @@ def test_multinode_distrib_cpu(distributed_context_multi_node_gloo): @pytest.mark.multinode_distributed -@pytest.mark.skipif('GPU_MULTINODE_DISTRIB' not in os.environ, reason="Skip if not multi-node distributed") +@pytest.mark.skipif("GPU_MULTINODE_DISTRIB" not in os.environ, reason="Skip if not multi-node distributed") def test_multinode_distrib_gpu(distributed_context_multi_node_nccl): - device = "cuda:{}".format(distributed_context_multi_node_nccl['local_rank']) + device = "cuda:{}".format(distributed_context_multi_node_nccl["local_rank"]) _test_distrib_on_output(device) _test_distrib_on_metric(device) diff --git a/tests/ignite/metrics/test_top_k_categorical_accuracy.py b/tests/ignite/metrics/test_top_k_categorical_accuracy.py index 368df9491a35..3dae259fd68e 100644 --- a/tests/ignite/metrics/test_top_k_categorical_accuracy.py +++ b/tests/ignite/metrics/test_top_k_categorical_accuracy.py @@ -33,6 +33,7 @@ def test_compute(): def top_k_accuracy(y_true, y_pred, k=5, normalize=True): import numpy as np + # Taken from # https://github.com/scikit-learn/scikit-learn/blob/4685cb5c50629aba4429f6701585f82fc3eee5f7/ # sklearn/metrics/classification.py#L187 @@ -44,7 +45,7 @@ def top_k_accuracy(y_true, y_pred, k=5, normalize=True): counter = 0.0 argsorted = np.argsort(y_pred, axis=1) for i in range(num_obs): - if y_true[i] in argsorted[i, idx + 1:]: + if y_true[i] in argsorted[i, idx + 1 :]: counter += 1.0 if normalize: return counter * 1.0 / num_obs @@ -55,6 +56,7 @@ def top_k_accuracy(y_true, y_pred, k=5, normalize=True): def _test_distrib_itegration(device): import torch.distributed as dist from ignite.engine import Engine + rank = dist.get_rank() torch.manual_seed(12) @@ -64,14 +66,16 @@ def _test(n_epochs): n_classes = 10 offset = n_iters * s - y_true = torch.randint(0, n_classes, size=(offset * dist.get_world_size(), )).to(device) + y_true = torch.randint(0, n_classes, size=(offset * dist.get_world_size(),)).to(device) y_preds = torch.rand(offset * dist.get_world_size(), n_classes).to(device) print("{}: y_true={} | y_preds={}".format(rank, y_true[:5], y_preds[:5, :2])) def update(engine, i): - return y_preds[i * s + rank * offset:(i + 1) * s + rank * offset, :], \ - y_true[i * s + rank * offset:(i + 1) * s + rank * offset] + return ( + y_preds[i * s + rank * offset : (i + 1) * s + rank * offset, :], + y_true[i * s + rank * offset : (i + 1) * s + rank * offset], + ) engine = Engine(update) @@ -83,12 +87,11 @@ def update(engine, i): engine.run(data=data, max_epochs=n_epochs) assert "acc" in engine.state.metrics - res = engine.state.metrics['acc'] + res = engine.state.metrics["acc"] if isinstance(res, torch.Tensor): res = res.cpu().numpy() - true_res = top_k_accuracy(y_true.cpu().numpy(), - y_preds.cpu().numpy(), k=k) + true_res = top_k_accuracy(y_true.cpu().numpy(), y_preds.cpu().numpy(), k=k) assert pytest.approx(res) == true_res @@ -111,14 +114,14 @@ def test_distrib_cpu(local_rank, distributed_context_single_node_gloo): @pytest.mark.multinode_distributed -@pytest.mark.skipif('MULTINODE_DISTRIB' not in os.environ, reason="Skip if not multi-node distributed") +@pytest.mark.skipif("MULTINODE_DISTRIB" not in os.environ, reason="Skip if not multi-node distributed") def test_multinode_distrib_cpu(distributed_context_multi_node_gloo): device = "cpu" _test_distrib_itegration(device) @pytest.mark.multinode_distributed -@pytest.mark.skipif('GPU_MULTINODE_DISTRIB' not in os.environ, reason="Skip if not multi-node distributed") +@pytest.mark.skipif("GPU_MULTINODE_DISTRIB" not in os.environ, reason="Skip if not multi-node distributed") def test_multinode_distrib_gpu(distributed_context_multi_node_nccl): - device = "cuda:{}".format(distributed_context_multi_node_nccl['local_rank']) + device = "cuda:{}".format(distributed_context_multi_node_nccl["local_rank"]) _test_distrib_itegration(device) diff --git a/tests/ignite/test_utils.py b/tests/ignite/test_utils.py index 3ef2b45df2ef..f23d123221f0 100644 --- a/tests/ignite/test_utils.py +++ b/tests/ignite/test_utils.py @@ -13,11 +13,11 @@ def test_convert_tensor(): assert torch.is_tensor(tensor) x = torch.tensor([0.0]) - tensor = convert_tensor(x, device='cpu', non_blocking=True) + tensor = convert_tensor(x, device="cpu", non_blocking=True) assert torch.is_tensor(tensor) x = torch.tensor([0.0]) - tensor = convert_tensor(x, device='cpu', non_blocking=False) + tensor = convert_tensor(x, device="cpu", non_blocking=False) assert torch.is_tensor(tensor) x = [torch.tensor([0.0]), torch.tensor([0.0])] @@ -32,20 +32,20 @@ def test_convert_tensor(): assert torch.is_tensor(tuple_[0]) assert torch.is_tensor(tuple_[1]) - Point = namedtuple("Point", ['x', 'y']) + Point = namedtuple("Point", ["x", "y"]) x = Point(torch.tensor([0.0]), torch.tensor([0.0])) tuple_ = convert_tensor(x) assert isinstance(tuple_, Point) assert torch.is_tensor(tuple_[0]) assert torch.is_tensor(tuple_[1]) - x = {'a': torch.tensor([0.0]), 'b': torch.tensor([0.0])} + x = {"a": torch.tensor([0.0]), "b": torch.tensor([0.0])} dict_ = convert_tensor(x) assert isinstance(dict_, dict) - assert torch.is_tensor(dict_['a']) - assert torch.is_tensor(dict_['b']) + assert torch.is_tensor(dict_["a"]) + assert torch.is_tensor(dict_["b"]) - assert convert_tensor('a') == 'a' + assert convert_tensor("a") == "a" with pytest.raises(TypeError): convert_tensor(12345) @@ -105,7 +105,7 @@ def _(_): trainer.run([0, 1, 2, 3, 4, 5], max_epochs=5) captured = capsys.readouterr() - err = captured.err.split('\n') + err = captured.err.split("\n") with open(fp, "r") as h: data = h.readlines() diff --git a/tox.ini b/tox.ini index 5b6fdec6f4f4..2dfe6badab4f 100644 --- a/tox.ini +++ b/tox.ini @@ -1,6 +1,6 @@ [flake8] max-line-length = 120 -ignore = E305,E402,E721,E722,E741,F401,F403,F405,F821,F841,F999 +ignore = E203,E231,E305,E402,E721,E722,E741,F401,F403,F405,F821,F841,F999,W503 [pytest] markers =