From 55dfe72faef44fd240e3c027f2e12210ba8333a5 Mon Sep 17 00:00:00 2001 From: steven Date: Tue, 15 Dec 2020 17:51:56 -0700 Subject: [PATCH 01/29] breaking up pull request --- README.md | 2 +- assets/tldr/teaser.ipynb | 4 ++-- assets/tldr/teaser.py | 2 +- docs/source/concepts.rst | 14 +++++------ examples/contrib/cifar10/main.py | 24 +++++++++---------- .../cifar100_amp_benchmark/benchmark_fp32.py | 4 ++-- .../benchmark_nvidia_apex.py | 4 ++-- .../contrib/mnist/mnist_with_tqdm_logger.py | 8 ++----- 8 files changed, 28 insertions(+), 34 deletions(-) diff --git a/README.md b/README.md index 5b7eba008438..c0eef3b25419 100644 --- a/README.md +++ b/README.md @@ -125,7 +125,7 @@ mydata = [1, 2, 3, 4] logger = ... def on_training_ended(data): - print("Training is ended. mydata={}".format(data)) + print(f"Training is ended. mydata={data}") # User can use variables from another scope logger.info("Training is ended") diff --git a/assets/tldr/teaser.ipynb b/assets/tldr/teaser.ipynb index 1b09453c1a6f..f90c20e3544e 100644 --- a/assets/tldr/teaser.ipynb +++ b/assets/tldr/teaser.ipynb @@ -99,7 +99,7 @@ " if name in models.__dict__:\n", " fn = models.__dict__[name]\n", " else:\n", - " raise RuntimeError(\"Unknown model name {}\".format(name))\n", + " raise RuntimeError(f\"Unknown model name {name}\")\n", "\n", " return fn(num_classes=10)\n", "\n", @@ -485,4 +485,4 @@ }, "nbformat": 4, "nbformat_minor": 4 -} +} \ No newline at end of file diff --git a/assets/tldr/teaser.py b/assets/tldr/teaser.py index b4d4e1e4dfa8..3a6dcd3fa583 100644 --- a/assets/tldr/teaser.py +++ b/assets/tldr/teaser.py @@ -41,7 +41,7 @@ def get_model(name): if name in models.__dict__: fn = models.__dict__[name] else: - raise RuntimeError("Unknown model name {}".format(name)) + raise RuntimeError(f"Unknown model name {name}") return fn(num_classes=10) diff --git a/docs/source/concepts.rst b/docs/source/concepts.rst index ab027a7abc34..04c9c79d0b10 100644 --- a/docs/source/concepts.rst +++ b/docs/source/concepts.rst @@ -165,7 +165,7 @@ Attaching an event handler is simple using method :meth:`~ignite.engine.engine.E mydata = [1, 2, 3, 4] def on_training_ended(data): - print("Training is ended. mydata={}".format(data)) + print(f"Training is ended. mydata={data}") trainer.add_event_handler(Events.COMPLETED, on_training_ended, mydata) @@ -181,8 +181,7 @@ reference returned by :meth:`~ignite.engine.engine.Engine.add_event_handler`. Th evaluator = create_supervised_evaluator(model, metrics={"acc": Accuracy()}) def log_metrics(engine, title): - print("Epoch: {} - {} accuracy: {:.2f}" - .format(trainer.state.epoch, title, engine.state.metrics["acc"])) + print(f"Epoch: {trainer.state.epoch} - {title} accuracy: {engine.state.metrics["acc"]:.2f}") @trainer.on(Events.EPOCH_COMPLETED) def evaluate(trainer): @@ -210,8 +209,7 @@ event filtering function: @trainer.on(Events.ITERATION_COMPLETED(every=50)) def log_training_loss_every_50_iterations(): - print("{} / {} : {} - loss: {:.2f}" - .format(trainer.state.epoch, trainer.state.max_epochs, trainer.state.iteration, trainer.state.output)) + print(f"{trainer.state.epoch} / {trainer.state.max_epochs} : {trainer.state.iteration} - loss: {trainer.state.output:.2f}") @trainer.on(Events.EPOCH_STARTED(once=25)) def do_something_once_on_25_epoch(): @@ -339,7 +337,7 @@ every iteration. iteration = engine.state.iteration epoch = engine.state.epoch loss = engine.state.output - print("Epoch: {}, Iteration: {}, Loss: {}".format(epoch, iteration, loss)) + print(f"Epoch: {epoch}, Iteration: {iteration}, Loss: {loss}") trainer.add_event_handler(Events.ITERATION_COMPLETED, on_iteration_completed) @@ -366,7 +364,7 @@ In the code below, `engine.state.output` will be a list of loss, y_pred, y for t def print_loss(engine): epoch = engine.state.epoch loss = engine.state.output[0] - print ('Epoch {epoch}: train_loss = {loss}'.format(epoch=epoch, loss=loss)) + print (f'Epoch {epoch}: train_loss = {loss}') accuracy = Accuracy(output_transform=lambda x: [x[1], x[2]]) accuracy.attach(trainer, 'acc') @@ -394,7 +392,7 @@ batch, this is how the user can use `output_transform` to get y_pred and y from def print_loss(engine): epoch = engine.state.epoch loss = engine.state.output['loss'] - print ('Epoch {epoch}: train_loss = {loss}'.format(epoch=epoch, loss=loss)) + print (f'Epoch {epoch}: train_loss = {loss}') accuracy = Accuracy(output_transform=lambda x: [x['y_pred'], x['y']]) accuracy.attach(trainer, 'acc') diff --git a/examples/contrib/cifar10/main.py b/examples/contrib/cifar10/main.py index 79055c9c0f17..49964fa3951b 100644 --- a/examples/contrib/cifar10/main.py +++ b/examples/contrib/cifar10/main.py @@ -35,14 +35,14 @@ def training(local_rank, config): if config["stop_iteration"] is None: now = datetime.now().strftime("%Y%m%d-%H%M%S") else: - now = "stop-on-{}".format(config["stop_iteration"]) + now = f"stop-on-{config['stop_iteration']}" - folder_name = "{}_backend-{}-{}_{}".format(config["model"], idist.backend(), idist.get_world_size(), now) + folder_name = f"{config['model']}_backend-{idist.backend()}-{idist.get_world_size()}_{now}" output_path = Path(output_path) / folder_name if not output_path.exists(): output_path.mkdir(parents=True) config["output_path"] = output_path.as_posix() - logger.info("Output path: {}".format(config["output_path"])) + logger.info(f"Output path: {config['output_path']}") if "cuda" in device.type: config["cuda device name"] = torch.cuda.get_device_name(local_rank) @@ -117,7 +117,7 @@ def run_validation(engine): @trainer.on(Events.ITERATION_STARTED(once=config["stop_iteration"])) def _(): - logger.info("Stop training on {} iteration".format(trainer.state.iteration)) + logger.info(f"Stop training on {trainer.state.iteration} iteration") trainer.terminate() try: @@ -251,20 +251,20 @@ def log_metrics(logger, epoch, elapsed, tag, metrics): def log_basic_info(logger, config): - logger.info("Train {} on CIFAR10".format(config["model"])) - logger.info("- PyTorch version: {}".format(torch.__version__)) - logger.info("- Ignite version: {}".format(ignite.__version__)) + logger.info(f"Train {config['model']} on CIFAR10") + logger.info(f"- PyTorch version: {torch.__version__}") + logger.info(f"- Ignite version: {ignite.__version__}") logger.info("\n") logger.info("Configuration:") for key, value in config.items(): - logger.info("\t{}: {}".format(key, value)) + logger.info(f"\t{key}: {value}") logger.info("\n") if idist.get_world_size() > 1: logger.info("\nDistributed setting:") - logger.info("\tbackend: {}".format(idist.backend())) - logger.info("\tworld size: {}".format(idist.get_world_size())) + logger.info(f"\tbackend: {idist.backend()}") + logger.info(f"\tworld size: {idist.get_world_size()}") logger.info("\n") @@ -334,8 +334,8 @@ def train_step(engine, batch): resume_from = config["resume_from"] if resume_from is not None: checkpoint_fp = Path(resume_from) - assert checkpoint_fp.exists(), "Checkpoint '{}' is not found".format(checkpoint_fp.as_posix()) - logger.info("Resume from a checkpoint: {}".format(checkpoint_fp.as_posix())) + assert checkpoint_fp.exists(), f"Checkpoint '{checkpoint_fp.as_posix()}' is not found" + logger.info(f"Resume from a checkpoint: {checkpoint_fp.as_posix()}") checkpoint = torch.load(checkpoint_fp.as_posix(), map_location="cpu") Checkpoint.load_objects(to_load=to_save, checkpoint=checkpoint) diff --git a/examples/contrib/cifar100_amp_benchmark/benchmark_fp32.py b/examples/contrib/cifar100_amp_benchmark/benchmark_fp32.py index 568775d01a64..bcd53619a910 100644 --- a/examples/contrib/cifar100_amp_benchmark/benchmark_fp32.py +++ b/examples/contrib/cifar100_amp_benchmark/benchmark_fp32.py @@ -52,11 +52,11 @@ def train_step(engine, batch): def log_metrics(engine, title): for name in metrics: - print("\t{} {}: {:.2f}".format(title, name, engine.state.metrics[name])) + print(f"\t{title} {name}: {engine.state.metrics[name]:.2f}") @trainer.on(Events.COMPLETED) def run_validation(_): - print("- Mean elapsed time for 1 epoch: {}".format(timer.value())) + print(f"- Mean elapsed time for 1 epoch: {timer.value()}") print("- Metrics:") with evaluator.add_event_handler(Events.COMPLETED, log_metrics, "Train"): evaluator.run(eval_train_loader) diff --git a/examples/contrib/cifar100_amp_benchmark/benchmark_nvidia_apex.py b/examples/contrib/cifar100_amp_benchmark/benchmark_nvidia_apex.py index 73eb0ab47495..a16f0ffc7665 100644 --- a/examples/contrib/cifar100_amp_benchmark/benchmark_nvidia_apex.py +++ b/examples/contrib/cifar100_amp_benchmark/benchmark_nvidia_apex.py @@ -58,11 +58,11 @@ def train_step(engine, batch): def log_metrics(engine, title): for name in metrics: - print("\t{} {}: {:.2f}".format(title, name, engine.state.metrics[name])) + print(f"\t{title} {name}: {engine.state.metrics[name]:.2f}") @trainer.on(Events.COMPLETED) def run_validation(_): - print("- Mean elapsed time for 1 epoch: {}".format(timer.value())) + print(f"- Mean elapsed time for 1 epoch: {timer.value()}") print("- Metrics:") with evaluator.add_event_handler(Events.COMPLETED, log_metrics, "Train"): evaluator.run(eval_train_loader) diff --git a/examples/contrib/mnist/mnist_with_tqdm_logger.py b/examples/contrib/mnist/mnist_with_tqdm_logger.py index 73351b9e60bf..30cb5dae314b 100644 --- a/examples/contrib/mnist/mnist_with_tqdm_logger.py +++ b/examples/contrib/mnist/mnist_with_tqdm_logger.py @@ -77,9 +77,7 @@ def log_training_results(engine): avg_accuracy = metrics["accuracy"] avg_nll = metrics["nll"] pbar.log_message( - "Training Results - Epoch: {} Avg accuracy: {:.2f} Avg loss: {:.2f}".format( - engine.state.epoch, avg_accuracy, avg_nll - ) + f"Training Results - Epoch: {engine.state.epoch} Avg accuracy: {avg_accuracy:.2f} Avg loss: {avg_nll:.2f}" ) @trainer.on(Events.EPOCH_COMPLETED) @@ -89,9 +87,7 @@ def log_validation_results(engine): avg_accuracy = metrics["accuracy"] avg_nll = metrics["nll"] pbar.log_message( - "Validation Results - Epoch: {} Avg accuracy: {:.2f} Avg loss: {:.2f}".format( - engine.state.epoch, avg_accuracy, avg_nll - ) + f"Validation Results - Epoch: {engine.state.epoch} Avg accuracy: {avg_accuracy:.2f} Avg loss: {avg_nll:.2f}" ) pbar.n = pbar.last_print_n = 0 From df914eb84a7f03e9a915ef66b5622e8efd997752 Mon Sep 17 00:00:00 2001 From: steven Date: Tue, 15 Dec 2020 17:52:40 -0700 Subject: [PATCH 02/29] breaking up pull request --- docs/source/metrics.rst | 2 +- examples/contrib/cifar10/utils.py | 2 +- .../cifar100_amp_benchmark/benchmark_torch_cuda_amp.py | 4 ++-- 3 files changed, 4 insertions(+), 4 deletions(-) diff --git a/docs/source/metrics.rst b/docs/source/metrics.rst index 85656bea8af2..7e2c91f776c8 100644 --- a/docs/source/metrics.rst +++ b/docs/source/metrics.rst @@ -66,7 +66,7 @@ use the ``output_transform`` argument to transform it: .. code-block:: python - device = "cuda:{}".format(local_rank) + device = f"cuda:{local_rank}" model = torch.nn.parallel.DistributedDataParallel(model, device_ids=[local_rank, ], output_device=local_rank) diff --git a/examples/contrib/cifar10/utils.py b/examples/contrib/cifar10/utils.py index bdef65f23ab0..4f65d682bb50 100644 --- a/examples/contrib/cifar10/utils.py +++ b/examples/contrib/cifar10/utils.py @@ -34,6 +34,6 @@ def get_model(name): if name in models.__dict__: fn = models.__dict__[name] else: - raise RuntimeError("Unknown model name {}".format(name)) + raise RuntimeError(f"Unknown model name {name}") return fn(num_classes=10) diff --git a/examples/contrib/cifar100_amp_benchmark/benchmark_torch_cuda_amp.py b/examples/contrib/cifar100_amp_benchmark/benchmark_torch_cuda_amp.py index 787640578a54..b8a5933f3470 100644 --- a/examples/contrib/cifar100_amp_benchmark/benchmark_torch_cuda_amp.py +++ b/examples/contrib/cifar100_amp_benchmark/benchmark_torch_cuda_amp.py @@ -69,11 +69,11 @@ def train_step(engine, batch): def log_metrics(engine, title): for name in metrics: - print("\t{} {}: {:.2f}".format(title, name, engine.state.metrics[name])) + print(f"\t{title} {name}: {engine.state.metrics[name]:.2f}") @trainer.on(Events.COMPLETED) def run_validation(_): - print("- Mean elapsed time for 1 epoch: {}".format(timer.value())) + print(f"- Mean elapsed time for 1 epoch: {timer.value()}") print("- Metrics:") with evaluator.add_event_handler(Events.COMPLETED, log_metrics, "Train"): evaluator.run(eval_train_loader) From 7c2c68bba4d0e0c28b97352947384ed5ad929dd2 Mon Sep 17 00:00:00 2001 From: vfdev Date: Wed, 16 Dec 2020 08:00:12 +0100 Subject: [PATCH 03/29] Update docs/source/concepts.rst --- docs/source/concepts.rst | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/source/concepts.rst b/docs/source/concepts.rst index 04c9c79d0b10..21a1ffb31103 100644 --- a/docs/source/concepts.rst +++ b/docs/source/concepts.rst @@ -181,7 +181,7 @@ reference returned by :meth:`~ignite.engine.engine.Engine.add_event_handler`. Th evaluator = create_supervised_evaluator(model, metrics={"acc": Accuracy()}) def log_metrics(engine, title): - print(f"Epoch: {trainer.state.epoch} - {title} accuracy: {engine.state.metrics["acc"]:.2f}") + print(f"Epoch: {trainer.state.epoch} - {title} accuracy: {engine.state.metrics['acc']:.2f}") @trainer.on(Events.EPOCH_COMPLETED) def evaluate(trainer): From 6e9d82ccebac40c1514bd371c2346286e6e4f0d8 Mon Sep 17 00:00:00 2001 From: steven Date: Wed, 16 Dec 2020 10:17:48 -0700 Subject: [PATCH 04/29] breaking up commits --- docs/source/faq.rst | 16 ++++++++-------- docs/source/quickstart.rst | 18 +++++++----------- examples/fast_neural_style/neural_style.py | 2 +- examples/gan/dcgan.py | 4 ++-- examples/mnist/mnist.py | 10 +++------- examples/mnist/mnist_save_resume_engine.py | 12 ++++-------- .../mnist/mnist_with_tensorboard_on_tpu.py | 8 ++------ examples/mnist/mnist_with_visdom.py | 12 ++++-------- .../imagenet/code/dataflow/dataloaders.py | 2 +- .../imagenet/code/dataflow/transforms.py | 2 +- .../imagenet/code/dataflow/vis.py | 10 +++++----- .../imagenet/code/scripts/training.py | 16 ++++++++-------- .../imagenet/code/utils/handlers.py | 2 +- .../pascal_voc2012/code/dataflow/datasets.py | 4 ++-- .../pascal_voc2012/code/dataflow/transforms.py | 2 +- .../code/scripts/download_dataset.py | 4 ++-- .../pascal_voc2012/code/scripts/training.py | 12 ++++++------ .../pascal_voc2012/code/utils/handlers.py | 2 +- .../reinforcement_learning/actor_critic.py | 8 +++----- examples/reinforcement_learning/reinforce.py | 8 +++----- 20 files changed, 65 insertions(+), 89 deletions(-) diff --git a/docs/source/faq.rst b/docs/source/faq.rst index 4e4804b3a106..b2dc9b8c26af 100644 --- a/docs/source/faq.rst +++ b/docs/source/faq.rst @@ -143,7 +143,7 @@ Let's use an infinite data iterator as training dataflow # ... s = trainer.state print( - "{}/{} : {} - {:.3f}".format(s.epoch, s.max_epochs, s.iteration, batch.norm()) + f"{s.epoch}/{s.max_epochs} : {s.iteration} - {batch.norm():.3f}" ) trainer = Engine(train_step) @@ -189,7 +189,7 @@ In this case, there will be only a single epoch defined. # ... s = trainer.state print( - "{}/{} : {} - {:.3f}".format(s.epoch, s.max_epochs, s.iteration, batch.norm()) + f"{s.epoch}/{s.max_epochs} : {s.iteration} - {batch.norm():.3f}" ) trainer = Engine(train_step) @@ -246,7 +246,7 @@ In the code, we do not specify `epoch_length` which will be automatically determ # ... s = trainer.state print( - "{}/{} : {} - {:.3f}".format(s.epoch, s.max_epochs, s.iteration, batch) + f"{s.epoch}/{s.max_epochs} : {s.iteration} - {batch:.3f}" ) trainer = Engine(train_step) @@ -276,7 +276,7 @@ In case of validation, the code is simply # ... s = evaluator.state print( - "{}/{} : {} - {:.3f}".format(s.epoch, s.max_epochs, s.iteration, batch) + f"{s.epoch}/{s.max_epochs} : {s.iteration} - {batch:.3f}" ) evaluator = Engine(val_step) @@ -310,7 +310,7 @@ but here we will do this explicitly on iteration: # ... s = trainer.state print( - "{}/{} : {} - {:.3f}".format(s.epoch, s.max_epochs, s.iteration, batch) + f"{s.epoch}/{s.max_epochs} : {s.iteration} - {batch:.3f}" ) trainer = Engine(train_step) @@ -342,7 +342,7 @@ In case of validation, the code is simply # ... s = evaluator.state print( - "{}/{} : {} - {:.3f}".format(s.epoch, s.max_epochs, s.iteration, batch) + f"{s.epoch}/{s.max_epochs} : {s.iteration} - {batch:.3f}" ) evaluator = Engine(val_step) @@ -375,11 +375,11 @@ Simpliest way to fetch time of single epoch and complete training is to use @trainer.on(Events.EPOCH_COMPLETED) def log_epoch_time(): - print("{}: {}".format(trainer.state.epoch, trainer.state.times["EPOCH_COMPLETED"])) + print(f"{trainer.state.epoch}: {trainer.state.times["EPOCH_COMPLETED"]}") @trainer.on(Events.COMPLETED) def log_total_time(): - print("Total: {}".format(trainer.state.times["COMPLETED"])) + print(f"Total: {trainer.state.times["COMPLETED"]}") For details, see :class:`~ignite.engine.events.State`. diff --git a/docs/source/quickstart.rst b/docs/source/quickstart.rst index f9dffc6089f6..e30f9f65fb72 100644 --- a/docs/source/quickstart.rst +++ b/docs/source/quickstart.rst @@ -28,21 +28,19 @@ Code @trainer.on(Events.ITERATION_COMPLETED(every=log_interval)) def log_training_loss(trainer): - print("Epoch[{}] Loss: {:.2f}".format(trainer.state.epoch, trainer.state.output)) + print(f"Epoch[{trainer.state.epoch}] Loss: {trainer.state.output:.2f}") @trainer.on(Events.EPOCH_COMPLETED) def log_training_results(trainer): evaluator.run(train_loader) metrics = evaluator.state.metrics - print("Training Results - Epoch: {} Avg accuracy: {:.2f} Avg loss: {:.2f}" - .format(trainer.state.epoch, metrics["accuracy"], metrics["nll"])) + print(f"Training Results - Epoch: {trainer.state.epoch} Avg accuracy: {metrics["accuracy"]:.2f} Avg loss: {metrics["nll"]:.2f}") @trainer.on(Events.EPOCH_COMPLETED) def log_validation_results(trainer): evaluator.run(val_loader) metrics = evaluator.state.metrics - print("Validation Results - Epoch: {} Avg accuracy: {:.2f} Avg loss: {:.2f}" - .format(trainer.state.epoch, metrics["accuracy"], metrics["nll"])) + print("Validation Results - Epoch: {trainer.state.epoch} Avg accuracy: {metrics["accuracy"]:.2f} Avg loss: {metrics["nll"]:.2f}") trainer.run(train_loader, max_epochs=100) @@ -128,14 +126,14 @@ logging purposes we add a function to be executed at the end of every ``log_inte @trainer.on(Events.ITERATION_COMPLETED(every=log_interval)) def log_training_loss(engine): - print("Epoch[{}] Loss: {:.2f}".format(engine.state.epoch, engine.state.output)) + print(f"Epoch[{engine.state.epoch}] Loss: {engine.state.output:.2f}") or equivalently without the decorator .. code-block:: python def log_training_loss(engine): - print("Epoch[{}] Loss: {:.2f}".format(engine.state.epoch, engine.state.output)) + print(f"Epoch[{engine.state.epoch}] Loss: {engine.state.output:.2f}") trainer.add_event_handler(Events.ITERATION_COMPLETED, log_training_loss) @@ -149,15 +147,13 @@ complete event: def log_training_results(trainer): evaluator.run(train_loader) metrics = evaluator.state.metrics - print("Training Results - Epoch[{}] Avg accuracy: {:.2f} Avg loss: {:.2f}" - .format(trainer.state.epoch, metrics['accuracy'], metrics['nll'])) + print(f"Training Results - Epoch[{trainer.state.epoch}] Avg accuracy: {metrics['accuracy']:.2f} Avg loss: {metrics['nll']:.2f}") @trainer.on(Events.EPOCH_COMPLETED) def log_validation_results(trainer): evaluator.run(val_loader) metrics = evaluator.state.metrics - print("Validation Results - Epoch[{}] Avg accuracy: {:.2f} Avg loss: {:.2f}" - .format(trainer.state.epoch, metrics['accuracy'], metrics['nll'])) + print(f"Validation Results - Epoch[{trainer.state.epoch}] Avg accuracy: {metrics['accuracy']:.2f} Avg loss: {metrics['nll']:.2f}") .. Note :: diff --git a/examples/fast_neural_style/neural_style.py b/examples/fast_neural_style/neural_style.py index 9116e34f6719..4f3a0bb1cefa 100644 --- a/examples/fast_neural_style/neural_style.py +++ b/examples/fast_neural_style/neural_style.py @@ -54,7 +54,7 @@ def check_dataset(args): size=args.batch_size, image_size=(3, 32, 32), num_classes=1, transform=transform ) else: - raise RuntimeError("Invalid dataset name: {}".format(args.dataset)) + raise RuntimeError(f"Invalid dataset name: {args.dataset}") train_loader = DataLoader(train_dataset, batch_size=args.batch_size, shuffle=True, num_workers=0) diff --git a/examples/gan/dcgan.py b/examples/gan/dcgan.py index ce0b12097dde..515c325a9e42 100644 --- a/examples/gan/dcgan.py +++ b/examples/gan/dcgan.py @@ -185,7 +185,7 @@ def check_dataset(dataset, dataroot): nc = 3 else: - raise RuntimeError("Invalid dataset name: {}".format(dataset)) + raise RuntimeError(f"Invalid dataset name: {dataset}") return dataset, nc @@ -358,7 +358,7 @@ def save_real_example(engine): # adding handlers using `trainer.on` decorator API @trainer.on(Events.EPOCH_COMPLETED) def print_times(engine): - pbar.log_message("Epoch {} done. Time per batch: {:.3f}[s]".format(engine.state.epoch, timer.value())) + pbar.log_message(f"Epoch {engine.state.epoch} done. Time per batch: {timer.value():.3f}[s]") timer.reset() # adding handlers using `trainer.on` decorator API diff --git a/examples/mnist/mnist.py b/examples/mnist/mnist.py index c6a1c66b3565..ad2a8fdbe899 100644 --- a/examples/mnist/mnist.py +++ b/examples/mnist/mnist.py @@ -81,9 +81,7 @@ def log_training_results(engine): avg_accuracy = metrics["accuracy"] avg_nll = metrics["nll"] tqdm.write( - "Training Results - Epoch: {} Avg accuracy: {:.2f} Avg loss: {:.2f}".format( - engine.state.epoch, avg_accuracy, avg_nll - ) + f"Training Results - Epoch: {engine.state.epoch} Avg accuracy: {avg_accuracy:.2f} Avg loss: {avg_nll:.2f}" ) @trainer.on(Events.EPOCH_COMPLETED) @@ -93,9 +91,7 @@ def log_validation_results(engine): avg_accuracy = metrics["accuracy"] avg_nll = metrics["nll"] tqdm.write( - "Validation Results - Epoch: {} Avg accuracy: {:.2f} Avg loss: {:.2f}".format( - engine.state.epoch, avg_accuracy, avg_nll - ) + f"Validation Results - Epoch: {engine.state.epoch} Avg accuracy: {avg_accuracy:.2f} Avg loss: {avg_nll:.2f}" ) pbar.n = pbar.last_print_n = 0 @@ -103,7 +99,7 @@ def log_validation_results(engine): @trainer.on(Events.EPOCH_COMPLETED | Events.COMPLETED) def log_time(engine): tqdm.write( - "{} took {} seconds".format(trainer.last_event_name.name, trainer.state.times[trainer.last_event_name.name]) + f"{trainer.last_event_name.name} took { trainer.state.times[trainer.last_event_name.name]} seconds" ) trainer.run(train_loader, max_epochs=epochs) diff --git a/examples/mnist/mnist_save_resume_engine.py b/examples/mnist/mnist_save_resume_engine.py index 917217850925..7b6b08e7cfb4 100644 --- a/examples/mnist/mnist_save_resume_engine.py +++ b/examples/mnist/mnist_save_resume_engine.py @@ -198,7 +198,7 @@ def log_training_loss(engine): @trainer.on(Events.ITERATION_COMPLETED(once=crash_iteration)) def _(engine): - raise Exception("STOP at {}".format(engine.state.iteration)) + raise Exception(f"STOP at {engine.state.iteration}") if resume_from is not None: @@ -214,9 +214,7 @@ def log_training_results(engine): avg_accuracy = metrics["accuracy"] avg_nll = metrics["nll"] tqdm.write( - "Training Results - Epoch: {} Avg accuracy: {:.2f} Avg loss: {:.2f}".format( - engine.state.epoch, avg_accuracy, avg_nll - ) + f"Training Results - Epoch: {engine.state.epoch} Avg accuracy: {avg_accuracy:.2f} Avg loss: {avg_nll:.2f}" ) writer.add_scalar("training/avg_loss", avg_nll, engine.state.epoch) writer.add_scalar("training/avg_accuracy", avg_accuracy, engine.state.epoch) @@ -229,9 +227,7 @@ def log_validation_results(engine): avg_accuracy = metrics["accuracy"] avg_nll = metrics["nll"] tqdm.write( - "Validation Results - Epoch: {} Avg accuracy: {:.2f} Avg loss: {:.2f}".format( - engine.state.epoch, avg_accuracy, avg_nll - ) + f"Validation Results - Epoch: {engine.state.epoch} Avg accuracy: {avg_accuracy:.2f} Avg loss: {avg_nll:.2f}" ) pbar.n = pbar.last_print_n = 0 writer.add_scalar("valdation/avg_loss", avg_nll, engine.state.epoch) @@ -264,7 +260,7 @@ def log_event_filter(e, event): trainer.add_event_handler(Events.ITERATION_COMPLETED(event_filter=log_event_filter), h, model=model, fp=fp) if resume_from is not None: - tqdm.write("Resume from the checkpoint: {}".format(resume_from)) + tqdm.write(f"Resume from the checkpoint: {resume_from}") checkpoint = torch.load(resume_from) Checkpoint.load_objects(to_load=objects_to_checkpoint, checkpoint=checkpoint) diff --git a/examples/mnist/mnist_with_tensorboard_on_tpu.py b/examples/mnist/mnist_with_tensorboard_on_tpu.py index 84441ee12386..77d72d4d9ff6 100644 --- a/examples/mnist/mnist_with_tensorboard_on_tpu.py +++ b/examples/mnist/mnist_with_tensorboard_on_tpu.py @@ -116,9 +116,7 @@ def log_training_results(engine): avg_accuracy = metrics["accuracy"] avg_nll = metrics["nll"] print( - "Training Results - Epoch: {} Avg accuracy: {:.2f} Avg loss: {:.2f}".format( - engine.state.epoch, avg_accuracy, avg_nll - ) + f"Training Results - Epoch: {engine.state.epoch} Avg accuracy: {avg_accuracy:.2f} Avg loss: {avg_nll:.2f}" ) writer.add_scalar("training/avg_loss", avg_nll, engine.state.epoch) writer.add_scalar("training/avg_accuracy", avg_accuracy, engine.state.epoch) @@ -130,9 +128,7 @@ def log_validation_results(engine): avg_accuracy = metrics["accuracy"] avg_nll = metrics["nll"] print( - "Validation Results - Epoch: {} Avg accuracy: {:.2f} Avg loss: {:.2f}".format( - engine.state.epoch, avg_accuracy, avg_nll - ) + f"Validation Results - Epoch: {engine.state.epoch} Avg accuracy: {avg_accuracy:.2f} Avg loss: {avg_nll:.2f}" ) writer.add_scalar("valdation/avg_loss", avg_nll, engine.state.epoch) writer.add_scalar("valdation/avg_accuracy", avg_accuracy, engine.state.epoch) diff --git a/examples/mnist/mnist_with_visdom.py b/examples/mnist/mnist_with_visdom.py index e3b88a6e04fe..ebe5aedd8ddb 100644 --- a/examples/mnist/mnist_with_visdom.py +++ b/examples/mnist/mnist_with_visdom.py @@ -83,8 +83,8 @@ def run(train_batch_size, val_batch_size, epochs, lr, momentum, log_interval): @trainer.on(Events.ITERATION_COMPLETED(every=log_interval)) def log_training_loss(engine): print( - "Epoch[{}] Iteration[{}/{}] Loss: {:.2f}" - "".format(engine.state.epoch, engine.state.iteration, len(train_loader), engine.state.output) + f"Epoch[{engine.state.epoch}] Iteration[{engine.state.iteration}/{len(train_loader)}] Loss: {engine.state.output:.2f}" + "" ) vis.line( X=np.array([engine.state.iteration]), @@ -100,9 +100,7 @@ def log_training_results(engine): avg_accuracy = metrics["accuracy"] avg_nll = metrics["nll"] print( - "Training Results - Epoch: {} Avg accuracy: {:.2f} Avg loss: {:.2f}".format( - engine.state.epoch, avg_accuracy, avg_nll - ) + f"Training Results - Epoch: {engine.state.epoch} Avg accuracy: {avg_accuracy:.2f} Avg loss: {avg_nll:.2f}" ) vis.line( X=np.array([engine.state.epoch]), Y=np.array([avg_accuracy]), win=train_avg_accuracy_window, update="append" @@ -116,9 +114,7 @@ def log_validation_results(engine): avg_accuracy = metrics["accuracy"] avg_nll = metrics["nll"] print( - "Validation Results - Epoch: {} Avg accuracy: {:.2f} Avg loss: {:.2f}".format( - engine.state.epoch, avg_accuracy, avg_nll - ) + f"Validation Results - Epoch: {engine.state.epoch} Avg accuracy: {avg_accuracy:.2f} Avg loss: {avg_nll:.2f}" ) vis.line( X=np.array([engine.state.epoch]), Y=np.array([avg_accuracy]), win=val_avg_accuracy_window, update="append" diff --git a/examples/references/classification/imagenet/code/dataflow/dataloaders.py b/examples/references/classification/imagenet/code/dataflow/dataloaders.py index e29b1eb70f2e..3c1d32c62176 100644 --- a/examples/references/classification/imagenet/code/dataflow/dataloaders.py +++ b/examples/references/classification/imagenet/code/dataflow/dataloaders.py @@ -12,7 +12,7 @@ def opencv_loader(path): img = cv2.imread(path) - assert img is not None, "Image at '{}' has a problem".format(path) + assert img is not None, f"Image at '{path}' has a problem" return cv2.cvtColor(img, cv2.COLOR_BGR2RGB) diff --git a/examples/references/classification/imagenet/code/dataflow/transforms.py b/examples/references/classification/imagenet/code/dataflow/transforms.py index 4e64563aeebd..86b106db5f45 100644 --- a/examples/references/classification/imagenet/code/dataflow/transforms.py +++ b/examples/references/classification/imagenet/code/dataflow/transforms.py @@ -4,7 +4,7 @@ def denormalize(t, mean, std, max_pixel_value=255): - assert isinstance(t, torch.Tensor), "{}".format(type(t)) + assert isinstance(t, torch.Tensor), f"{type(t)}" assert t.ndim == 3 d = t.device mean = torch.tensor(mean, device=d).unsqueeze(-1).unsqueeze(-1) diff --git a/examples/references/classification/imagenet/code/dataflow/vis.py b/examples/references/classification/imagenet/code/dataflow/vis.py index a1ebb8315f74..f79f4657a0a0 100644 --- a/examples/references/classification/imagenet/code/dataflow/vis.py +++ b/examples/references/classification/imagenet/code/dataflow/vis.py @@ -33,13 +33,13 @@ def make_grid( batch_gt (torch.Tensor, optional): batch of ground truth masks. """ assert isinstance(batch_img, torch.Tensor) and isinstance(batch_preds, torch.Tensor) - assert len(batch_img) == len(batch_preds), "{} vs {}".format(len(batch_img), len(batch_preds)) - assert batch_preds.ndim == 1, "{}".format(batch_preds.ndim) + assert len(batch_img) == len(batch_preds), f"{len(batch_img)} vs {len(batch_preds)}" + assert batch_preds.ndim == 1, f"{batch_preds.ndim}" if batch_gt is not None: assert isinstance(batch_gt, torch.Tensor) assert len(batch_preds) == len(batch_gt) - assert batch_gt.ndim == 1, "{}".format(batch_gt.ndim) + assert batch_gt.ndim == 1, f"{batch_gt.ndim}" b = batch_img.shape[0] h, w = batch_img.shape[2:] @@ -55,12 +55,12 @@ def make_grid( img = tensor_to_numpy(img) pred_label = y_preds.cpu().item() - target = "p={}".format(pred_label) + target = f"p={pred_label}" if batch_gt is not None: gt_label = batch_gt[i] gt_label = gt_label.cpu().item() - target += " | gt={}".format(gt_label) + target += f" | gt={gt_label}" out_image[0:h, i * w : (i + 1) * w, :] = render_datapoint(img, target, text_size=12) diff --git a/examples/references/classification/imagenet/code/scripts/training.py b/examples/references/classification/imagenet/code/scripts/training.py index 9ff0f8d04092..75fe20a9d022 100644 --- a/examples/references/classification/imagenet/code/scripts/training.py +++ b/examples/references/classification/imagenet/code/scripts/training.py @@ -121,15 +121,15 @@ def log_metrics(logger, epoch, elapsed, tag, metrics): def log_basic_info(logger, config): - msg = "\n- PyTorch version: {}".format(torch.__version__) - msg += "\n- Ignite version: {}".format(ignite.__version__) + msg = f"\n- PyTorch version: {torch.__version__}" + msg += f"\n- Ignite version: {ignite.__version__}" logger.info(msg) if idist.get_world_size() > 1: msg = "\nDistributed setting:" - msg += "\tbackend: {}".format(idist.backend()) - msg += "\trank: {}".format(idist.get_rank()) - msg += "\tworld size: {}".format(idist.get_world_size()) + msg += f"\tbackend: {idist.backend()}" + msg += f"\trank: {idist.get_rank()}" + msg += f"\tworld size: {idist.get_world_size()}" logger.info(msg) @@ -323,11 +323,11 @@ def run_benchmark(_): if idist.get_rank() == 0: print(" ") - print(" Total time ({} iterations) : {:.5f} seconds".format(self.num_iters, t)) - print(" time per iteration : {} seconds".format(t / self.num_iters)) + print(f" Total time ({self.num_iters} iterations) : {t:.5f} seconds") + print(f" time per iteration : {t / self.num_iters} seconds") if isinstance(train_loader, DataLoader): num_images = train_loader.batch_size * self.num_iters - print(" number of images / s : {}".format(num_images / t)) + print(f" number of images / s : {num_images / t}") print("-" * 50) diff --git a/examples/references/classification/imagenet/code/utils/handlers.py b/examples/references/classification/imagenet/code/utils/handlers.py index 525da0f193de..0911839dc3c6 100644 --- a/examples/references/classification/imagenet/code/utils/handlers.py +++ b/examples/references/classification/imagenet/code/utils/handlers.py @@ -28,7 +28,7 @@ def wrapper(engine, logger, event_name): tag = "predictions_with_gt" if prefix_tag is not None: - tag = "{}: {}".format(prefix_tag, tag) + tag = f"{prefix_tag}: {tag}" logger.writer.add_image(tag=tag, img_tensor=grid_pred_gt, global_step=global_step, dataformats="HWC") return wrapper diff --git a/examples/references/segmentation/pascal_voc2012/code/dataflow/datasets.py b/examples/references/segmentation/pascal_voc2012/code/dataflow/datasets.py index 1c28405217ca..aee84aba553e 100644 --- a/examples/references/segmentation/pascal_voc2012/code/dataflow/datasets.py +++ b/examples/references/segmentation/pascal_voc2012/code/dataflow/datasets.py @@ -59,7 +59,7 @@ def __init__(self, *args, return_meta: bool = False, **kwargs): def __getitem__(self, index): img = cv2.imread(self.images[index]) - assert img is not None, "Image at '{}' has a problem".format(self.images[index]) + assert img is not None, f"Image at '{self.images[index]}' has a problem" img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB) mask = np.asarray(Image.open(self.masks[index])) @@ -85,7 +85,7 @@ def _get_segmentation_target(self, filepath): def __getitem__(self, index): img = cv2.imread(self.images[index]) - assert img is not None, "Image at '{}' has a problem".format(self.images[index]) + assert img is not None, f"Image at '{self.images[index]}' has a problem" img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB) mask = self._get_target(self.masks[index]) diff --git a/examples/references/segmentation/pascal_voc2012/code/dataflow/transforms.py b/examples/references/segmentation/pascal_voc2012/code/dataflow/transforms.py index 2f67cb3cc731..651dd6d86ed8 100644 --- a/examples/references/segmentation/pascal_voc2012/code/dataflow/transforms.py +++ b/examples/references/segmentation/pascal_voc2012/code/dataflow/transforms.py @@ -12,7 +12,7 @@ def ignore_mask_boundaries(force_apply, **kwargs): def denormalize(t, mean, std, max_pixel_value=255): - assert isinstance(t, torch.Tensor), "{}".format(type(t)) + assert isinstance(t, torch.Tensor), f"{type(t)}" assert t.ndim == 3 d = t.device mean = torch.tensor(mean, device=d).unsqueeze(-1).unsqueeze(-1) diff --git a/examples/references/segmentation/pascal_voc2012/code/scripts/download_dataset.py b/examples/references/segmentation/pascal_voc2012/code/scripts/download_dataset.py index 5d7587b53045..253d69ec1668 100644 --- a/examples/references/segmentation/pascal_voc2012/code/scripts/download_dataset.py +++ b/examples/references/segmentation/pascal_voc2012/code/scripts/download_dataset.py @@ -20,5 +20,5 @@ os.makedirs(sbd_path, exist_ok=True) SBDataset(sbd_path, image_set="train_noval", mode="segmentation", download=True) print("Done") - print("Pascal VOC 2012 is at : {}".format(os.path.join(args.output_path, "VOCdevkit"))) - print("SBD is at : {}".format(sbd_path)) + print(f"Pascal VOC 2012 is at : {os.path.join(args.output_path, 'VOCdevkit')}") + print(f"SBD is at : {sbd_path}") diff --git a/examples/references/segmentation/pascal_voc2012/code/scripts/training.py b/examples/references/segmentation/pascal_voc2012/code/scripts/training.py index 0074115da17e..1db56826cc78 100644 --- a/examples/references/segmentation/pascal_voc2012/code/scripts/training.py +++ b/examples/references/segmentation/pascal_voc2012/code/scripts/training.py @@ -146,17 +146,17 @@ def log_metrics(logger, epoch, elapsed, tag, metrics): def log_basic_info(logger, config): - msg = "\n- PyTorch version: {}".format(torch.__version__) - msg += "\n- Ignite version: {}".format(ignite.__version__) - msg += "\n- Cuda device name: {}".format(torch.cuda.get_device_name(idist.get_local_rank())) + msg = f"\n- PyTorch version: {torch.__version__}" + msg += f"\n- Ignite version: {ignite.__version__}" + msg += f"\n- Cuda device name: {torch.cuda.get_device_name(idist.get_local_rank())}" logger.info(msg) if idist.get_world_size() > 1: msg = "\nDistributed setting:" - msg += "\tbackend: {}".format(idist.backend()) - msg += "\trank: {}".format(idist.get_rank()) - msg += "\tworld size: {}".format(idist.get_world_size()) + msg += f"\tbackend: {idist.backend()}" + msg += f"\trank: {idist.get_rank()}" + msg += f"\tworld size: {idist.get_world_size()}" logger.info(msg) diff --git a/examples/references/segmentation/pascal_voc2012/code/utils/handlers.py b/examples/references/segmentation/pascal_voc2012/code/utils/handlers.py index 48022e8ef9ac..df71156ebb58 100644 --- a/examples/references/segmentation/pascal_voc2012/code/utils/handlers.py +++ b/examples/references/segmentation/pascal_voc2012/code/utils/handlers.py @@ -29,7 +29,7 @@ def wrapper(engine, logger, event_name): tag = "predictions_with_gt" if prefix_tag is not None: - tag = "{}: {}".format(prefix_tag, tag) + tag = f"{prefix_tag}: {tag}" logger.writer.add_image(tag=tag, img_tensor=grid_pred_gt, global_step=global_step, dataformats="HWC") return wrapper diff --git a/examples/reinforcement_learning/actor_critic.py b/examples/reinforcement_learning/actor_critic.py index 4a7871ebb086..cf642a0fd7d0 100644 --- a/examples/reinforcement_learning/actor_critic.py +++ b/examples/reinforcement_learning/actor_critic.py @@ -113,9 +113,7 @@ def update_model(engine): def log_episode(engine): i_episode = engine.state.epoch print( - "Episode {}\tLast length: {:5d}\tAverage length: {:.2f}".format( - i_episode, engine.state.timestep, engine.state.running_reward - ) + f"Episode {i_episode}\tLast length: {engine.state.timestep:5d}\tAverage length: {engine.state.running_reward:.2f}" ) @trainer.on(EPISODE_COMPLETED) @@ -123,8 +121,8 @@ def should_finish_training(engine): running_reward = engine.state.running_reward if running_reward > env.spec.reward_threshold: print( - "Solved! Running reward is now {} and " - "the last episode runs to {} time steps!".format(running_reward, engine.state.timestep) + f"Solved! Running reward is now {running_reward} and " + f"the last episode runs to {engine.state.timestep} time steps!" ) engine.should_terminate = True diff --git a/examples/reinforcement_learning/reinforce.py b/examples/reinforcement_learning/reinforce.py index 6c052003344a..9b25e3d171d9 100644 --- a/examples/reinforcement_learning/reinforce.py +++ b/examples/reinforcement_learning/reinforce.py @@ -103,9 +103,7 @@ def update_model(engine): def log_episode(engine): i_episode = engine.state.epoch print( - "Episode {}\tLast length: {:5d}\tAverage length: {:.2f}".format( - i_episode, engine.state.timestep, engine.state.running_reward - ) + f"Episode {i_episode}\tLast length: {engine.state.timestep:5d}\tAverage length: {engine.state.running_reward:.2f}" ) @trainer.on(EPISODE_COMPLETED) @@ -113,8 +111,8 @@ def should_finish_training(engine): running_reward = engine.state.running_reward if running_reward > env.spec.reward_threshold: print( - "Solved! Running reward is now {} and " - "the last episode runs to {} time steps!".format(running_reward, engine.state.timestep) + f"Solved! Running reward is now {running_reward} and " + f"the last episode runs to {engine.state.timestep} time steps!" ) engine.should_terminate = True From 63c36fdc5bdb071ec01bfe79c9f9d8a83922d7a8 Mon Sep 17 00:00:00 2001 From: vfdev Date: Wed, 16 Dec 2020 18:57:42 +0100 Subject: [PATCH 05/29] Apply suggestions from code review --- docs/source/faq.rst | 4 ++-- docs/source/quickstart.rst | 4 ++-- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/docs/source/faq.rst b/docs/source/faq.rst index b2dc9b8c26af..3ac486900446 100644 --- a/docs/source/faq.rst +++ b/docs/source/faq.rst @@ -375,11 +375,11 @@ Simpliest way to fetch time of single epoch and complete training is to use @trainer.on(Events.EPOCH_COMPLETED) def log_epoch_time(): - print(f"{trainer.state.epoch}: {trainer.state.times["EPOCH_COMPLETED"]}") + print(f"{trainer.state.epoch}: {trainer.state.times['EPOCH_COMPLETED']}") @trainer.on(Events.COMPLETED) def log_total_time(): - print(f"Total: {trainer.state.times["COMPLETED"]}") + print(f"Total: {trainer.state.times['COMPLETED']}") For details, see :class:`~ignite.engine.events.State`. diff --git a/docs/source/quickstart.rst b/docs/source/quickstart.rst index e30f9f65fb72..e3023b2c2a99 100644 --- a/docs/source/quickstart.rst +++ b/docs/source/quickstart.rst @@ -34,13 +34,13 @@ Code def log_training_results(trainer): evaluator.run(train_loader) metrics = evaluator.state.metrics - print(f"Training Results - Epoch: {trainer.state.epoch} Avg accuracy: {metrics["accuracy"]:.2f} Avg loss: {metrics["nll"]:.2f}") + print(f"Training Results - Epoch: {trainer.state.epoch} Avg accuracy: {metrics['accuracy']:.2f} Avg loss: {metrics['nll']:.2f}") @trainer.on(Events.EPOCH_COMPLETED) def log_validation_results(trainer): evaluator.run(val_loader) metrics = evaluator.state.metrics - print("Validation Results - Epoch: {trainer.state.epoch} Avg accuracy: {metrics["accuracy"]:.2f} Avg loss: {metrics["nll"]:.2f}") + print("Validation Results - Epoch: {trainer.state.epoch} Avg accuracy: {metrics['accuracy']:.2f} Avg loss: {metrics['nll']:.2f}") trainer.run(train_loader, max_epochs=100) From 3e10083ec9e73cd93a3812cefee6574c5fcd46fb Mon Sep 17 00:00:00 2001 From: steven Date: Wed, 16 Dec 2020 18:33:19 -0700 Subject: [PATCH 06/29] ran black, copied corrections from github, added 'f' to one line --- docs/source/faq.rst | 4 ++-- docs/source/quickstart.rst | 4 ++-- examples/mnist/mnist.py | 4 +--- 3 files changed, 5 insertions(+), 7 deletions(-) diff --git a/docs/source/faq.rst b/docs/source/faq.rst index b2dc9b8c26af..3ac486900446 100644 --- a/docs/source/faq.rst +++ b/docs/source/faq.rst @@ -375,11 +375,11 @@ Simpliest way to fetch time of single epoch and complete training is to use @trainer.on(Events.EPOCH_COMPLETED) def log_epoch_time(): - print(f"{trainer.state.epoch}: {trainer.state.times["EPOCH_COMPLETED"]}") + print(f"{trainer.state.epoch}: {trainer.state.times['EPOCH_COMPLETED']}") @trainer.on(Events.COMPLETED) def log_total_time(): - print(f"Total: {trainer.state.times["COMPLETED"]}") + print(f"Total: {trainer.state.times['COMPLETED']}") For details, see :class:`~ignite.engine.events.State`. diff --git a/docs/source/quickstart.rst b/docs/source/quickstart.rst index e30f9f65fb72..e7e453246c96 100644 --- a/docs/source/quickstart.rst +++ b/docs/source/quickstart.rst @@ -34,13 +34,13 @@ Code def log_training_results(trainer): evaluator.run(train_loader) metrics = evaluator.state.metrics - print(f"Training Results - Epoch: {trainer.state.epoch} Avg accuracy: {metrics["accuracy"]:.2f} Avg loss: {metrics["nll"]:.2f}") + print(f"Training Results - Epoch: {trainer.state.epoch} Avg accuracy: {metrics['accuracy']:.2f} Avg loss: {metrics['nll']:.2f}") @trainer.on(Events.EPOCH_COMPLETED) def log_validation_results(trainer): evaluator.run(val_loader) metrics = evaluator.state.metrics - print("Validation Results - Epoch: {trainer.state.epoch} Avg accuracy: {metrics["accuracy"]:.2f} Avg loss: {metrics["nll"]:.2f}") + print(f"Validation Results - Epoch: {trainer.state.epoch} Avg accuracy: {metrics['accuracy']:.2f} Avg loss: {metrics['nll']:.2f}") trainer.run(train_loader, max_epochs=100) diff --git a/examples/mnist/mnist.py b/examples/mnist/mnist.py index ad2a8fdbe899..5b7bc9edcdd8 100644 --- a/examples/mnist/mnist.py +++ b/examples/mnist/mnist.py @@ -98,9 +98,7 @@ def log_validation_results(engine): @trainer.on(Events.EPOCH_COMPLETED | Events.COMPLETED) def log_time(engine): - tqdm.write( - f"{trainer.last_event_name.name} took { trainer.state.times[trainer.last_event_name.name]} seconds" - ) + tqdm.write(f"{trainer.last_event_name.name} took { trainer.state.times[trainer.last_event_name.name]} seconds") trainer.run(train_loader, max_epochs=epochs) pbar.close() From b0e8288bbf05ba0a491f8571e035e494be6f251c Mon Sep 17 00:00:00 2001 From: steven Date: Thu, 17 Dec 2020 12:58:20 -0700 Subject: [PATCH 07/29] metrics output f-strings --- .../classification/imagenet/code/scripts/training.py | 7 ++----- .../segmentation/pascal_voc2012/code/scripts/training.py | 7 ++----- 2 files changed, 4 insertions(+), 10 deletions(-) diff --git a/examples/references/classification/imagenet/code/scripts/training.py b/examples/references/classification/imagenet/code/scripts/training.py index 75fe20a9d022..629d69fe0ff8 100644 --- a/examples/references/classification/imagenet/code/scripts/training.py +++ b/examples/references/classification/imagenet/code/scripts/training.py @@ -112,11 +112,8 @@ def create_evaluators(model, metrics, config): def log_metrics(logger, epoch, elapsed, tag, metrics): - logger.info( - "\nEpoch {} - Evaluation time (seconds): {} - {} metrics:\n {}".format( - epoch, elapsed, tag, "\n".join(["\t{}: {}".format(k, v) for k, v in metrics.items()]) - ) - ) + metrics_output = "\n".join([f"\t{k}: {v}" for k, v in metrics.items()]) + logger.info(f"\nEpoch {epoch} - Evaluation time (seconds): {elapsed} - {tag} metrics:\n {metrics_output}") def log_basic_info(logger, config): diff --git a/examples/references/segmentation/pascal_voc2012/code/scripts/training.py b/examples/references/segmentation/pascal_voc2012/code/scripts/training.py index 1db56826cc78..b1e1c5bc0a69 100644 --- a/examples/references/segmentation/pascal_voc2012/code/scripts/training.py +++ b/examples/references/segmentation/pascal_voc2012/code/scripts/training.py @@ -137,11 +137,8 @@ def create_evaluators(model, metrics, config): def log_metrics(logger, epoch, elapsed, tag, metrics): - logger.info( - "\nEpoch {} - Evaluation time (seconds): {} - {} metrics:\n {}".format( - epoch, int(elapsed), tag, "\n".join(["\t{}: {}".format(k, v) for k, v in metrics.items()]) - ) - ) + metrics_output = "\n".join([f"\t{k}: {v}" for k, v in metrics.items()]) + logger.info(f"\nEpoch {epoch} - Evaluation time (seconds): {int(elapsed)} - {tag} metrics:\n {metrics_output}") def log_basic_info(logger, config): From 4a6c7d799feff4de82c3dc48948bfbbbafa05211 Mon Sep 17 00:00:00 2001 From: steven Date: Thu, 17 Dec 2020 14:08:48 -0700 Subject: [PATCH 08/29] breaking up large pull. needs to do more in trains_logger.py --- examples/mnist/mnist_with_tensorboard.py | 12 +- ignite/base/mixins.py | 6 +- ignite/contrib/engines/common.py | 4 +- ignite/contrib/handlers/base_logger.py | 24 ++-- ignite/contrib/handlers/lr_finder.py | 16 +-- ignite/contrib/handlers/mlflow_logger.py | 20 ++-- ignite/contrib/handlers/neptune_logger.py | 26 ++--- ignite/contrib/handlers/param_scheduler.py | 103 +++++++----------- ignite/contrib/handlers/polyaxon_logger.py | 16 +-- ignite/contrib/handlers/tensorboard_logger.py | 32 +++--- ignite/contrib/handlers/time_profilers.py | 10 +- ignite/contrib/handlers/tqdm_logger.py | 14 +-- ignite/contrib/handlers/trains_logger.py | 42 ++++--- ignite/contrib/handlers/visdom_logger.py | 26 ++--- ignite/contrib/handlers/wandb_logger.py | 10 +- ignite/utils.py | 2 +- 16 files changed, 163 insertions(+), 200 deletions(-) diff --git a/examples/mnist/mnist_with_tensorboard.py b/examples/mnist/mnist_with_tensorboard.py index 05e33ba1df85..45289d94b5c6 100644 --- a/examples/mnist/mnist_with_tensorboard.py +++ b/examples/mnist/mnist_with_tensorboard.py @@ -92,8 +92,8 @@ def run(train_batch_size, val_batch_size, epochs, lr, momentum, log_interval, lo @trainer.on(Events.ITERATION_COMPLETED(every=log_interval)) def log_training_loss(engine): print( - "Epoch[{}] Iteration[{}/{}] Loss: {:.2f}" - "".format(engine.state.epoch, engine.state.iteration, len(train_loader), engine.state.output) + f"Epoch[{engine.state.epoch}] Iteration[{engine.state.iteration}/{len(train_loader)}] Loss: {engine.state.output:.2f}" + "" ) writer.add_scalar("training/loss", engine.state.output, engine.state.iteration) @@ -104,9 +104,7 @@ def log_training_results(engine): avg_accuracy = metrics["accuracy"] avg_nll = metrics["nll"] print( - "Training Results - Epoch: {} Avg accuracy: {:.2f} Avg loss: {:.2f}".format( - engine.state.epoch, avg_accuracy, avg_nll - ) + f"Training Results - Epoch: {engine.state.epoch} Avg accuracy: {avg_accuracy:.2f} Avg loss: {avg_nll:.2f}" ) writer.add_scalar("training/avg_loss", avg_nll, engine.state.epoch) writer.add_scalar("training/avg_accuracy", avg_accuracy, engine.state.epoch) @@ -118,9 +116,7 @@ def log_validation_results(engine): avg_accuracy = metrics["accuracy"] avg_nll = metrics["nll"] print( - "Validation Results - Epoch: {} Avg accuracy: {:.2f} Avg loss: {:.2f}".format( - engine.state.epoch, avg_accuracy, avg_nll - ) + f"Validation Results - Epoch: {engine.state.epoch} Avg accuracy: {avg_accuracy:.2f} Avg loss: {avg_nll:.2f}" ) writer.add_scalar("valdation/avg_loss", avg_nll, engine.state.epoch) writer.add_scalar("valdation/avg_accuracy", avg_accuracy, engine.state.epoch) diff --git a/ignite/base/mixins.py b/ignite/base/mixins.py index 93b3ac0dd424..1e187243d99b 100644 --- a/ignite/base/mixins.py +++ b/ignite/base/mixins.py @@ -12,13 +12,13 @@ def state_dict(self) -> OrderedDict: def load_state_dict(self, state_dict: Mapping) -> None: if not isinstance(state_dict, Mapping): - raise TypeError("Argument state_dict should be a dictionary, but given {}".format(type(state_dict))) + raise TypeError(f"Argument state_dict should be a dictionary, but given {type(state_dict)}") for k in self._state_dict_all_req_keys: if k not in state_dict: raise ValueError( - "Required state attribute '{}' is absent in provided state_dict '{}'".format(k, state_dict.keys()) + f"Required state attribute '{k}' is absent in provided state_dict '{state_dict.keys()}'" ) opts = [k in state_dict for k in self._state_dict_one_of_opt_keys] if len(opts) > 0 and ((not any(opts)) or (all(opts))): - raise ValueError("state_dict should contain only one of '{}' keys".format(self._state_dict_one_of_opt_keys)) + raise ValueError(f"state_dict should contain only one of '{self._state_dict_one_of_opt_keys}' keys") diff --git a/ignite/contrib/engines/common.py b/ignite/contrib/engines/common.py index 78228537c636..a0304f50ca12 100644 --- a/ignite/contrib/engines/common.py +++ b/ignite/contrib/engines/common.py @@ -203,7 +203,7 @@ def output_transform(x: Any, index: int, name: str) -> Any: else: raise TypeError( "Unhandled type of update_function's output. " - "It should either mapping or sequence, but given {}".format(type(x)) + f"It should either mapping or sequence, but given {type(x)}" ) for i, n in enumerate(output_names): @@ -602,7 +602,7 @@ def gen_save_best_models_by_val_score( filename_prefix="best", n_saved=n_saved, global_step_transform=global_step_transform, - score_name="{}_{}".format(tag, metric_name.lower()), + score_name=f"{tag}_{metric_name.lower()}", score_function=get_default_score_fn(metric_name), **kwargs, ) diff --git a/ignite/contrib/handlers/base_logger.py b/ignite/contrib/handlers/base_logger.py index b6101b23e902..3d2c3c64dab4 100644 --- a/ignite/contrib/handlers/base_logger.py +++ b/ignite/contrib/handlers/base_logger.py @@ -29,7 +29,7 @@ def __init__(self, optimizer: Optimizer, param_name: str = "lr", tag: Optional[s ): raise TypeError( "Argument optimizer should be torch.optim.Optimizer or has attribute 'param_groups' as list/tuple, " - "but given {}".format(type(optimizer)) + f"but given {type(optimizer)}" ) self.optimizer = optimizer @@ -53,19 +53,17 @@ def __init__( if metric_names is not None: if not (isinstance(metric_names, list) or (isinstance(metric_names, str) and metric_names == "all")): raise TypeError( - "metric_names should be either a list or equal 'all', " "got {} instead.".format(type(metric_names)) + "metric_names should be either a list or equal 'all', " f"got {type(metric_names)} instead." ) if output_transform is not None and not callable(output_transform): - raise TypeError("output_transform should be a function, got {} instead.".format(type(output_transform))) + raise TypeError(f"output_transform should be a function, got {type(output_transform)} instead.") if output_transform is None and metric_names is None: raise ValueError("Either metric_names or output_transform should be defined") if global_step_transform is not None and not callable(global_step_transform): - raise TypeError( - "global_step_transform should be a function, got {} instead.".format(type(global_step_transform)) - ) + raise TypeError(f"global_step_transform should be a function, got {type(global_step_transform)} instead.") if global_step_transform is None: @@ -88,8 +86,8 @@ def _setup_output_metrics(self, engine: Engine) -> Dict[str, Any]: for name in self.metric_names: if name not in engine.state.metrics: warnings.warn( - "Provided metric name '{}' is missing " - "in engine's state metrics: {}".format(name, list(engine.state.metrics.keys())) + f"Provided metric name '{name}' is missing " + f"in engine's state metrics: {list(engine.state.metrics.keys())}" ) continue metrics[name] = engine.state.metrics[name] @@ -111,10 +109,10 @@ class BaseWeightsScalarHandler(BaseHandler): def __init__(self, model: nn.Module, reduction: Callable = torch.norm, tag: Optional[str] = None) -> None: if not isinstance(model, torch.nn.Module): - raise TypeError("Argument model should be of type torch.nn.Module, " "but given {}".format(type(model))) + raise TypeError("Argument model should be of type torch.nn.Module, " f"but given {type(model)}") if not callable(reduction): - raise TypeError("Argument reduction should be callable, " "but given {}".format(type(reduction))) + raise TypeError("Argument reduction should be callable, " f"but given {type(reduction)}") def _is_0D_tensor(t: torch.Tensor) -> bool: return isinstance(t, torch.Tensor) and t.ndimension() == 0 @@ -122,7 +120,7 @@ def _is_0D_tensor(t: torch.Tensor) -> bool: # Test reduction function on a tensor o = reduction(torch.ones(4, 2)) if not (isinstance(o, numbers.Number) or _is_0D_tensor(o)): - raise TypeError("Output of the reduction function should be a scalar, but got {}".format(type(o))) + raise TypeError(f"Output of the reduction function should be a scalar, but got {type(o)}") self.model = model self.reduction = reduction @@ -136,7 +134,7 @@ class BaseWeightsHistHandler(BaseHandler): def __init__(self, model: nn.Module, tag: Optional[str] = None): if not isinstance(model, torch.nn.Module): - raise TypeError("Argument model should be of type torch.nn.Module, " "but given {}".format(type(model))) + raise TypeError("Argument model should be of type torch.nn.Module, " f"but given {type(model)}") self.model = model self.tag = tag @@ -166,7 +164,7 @@ def attach( name = event_name if name not in State.event_to_attr: - raise RuntimeError("Unknown event name '{}'".format(name)) + raise RuntimeError(f"Unknown event name '{name}'") return engine.add_event_handler(event_name, log_handler, self, name) diff --git a/ignite/contrib/handlers/lr_finder.py b/ignite/contrib/handlers/lr_finder.py index 2ad702171b01..4817cfef175c 100644 --- a/ignite/contrib/handlers/lr_finder.py +++ b/ignite/contrib/handlers/lr_finder.py @@ -101,8 +101,8 @@ def _run( max_iter = trainer.state.epoch_length * trainer.state.max_epochs # type: ignore[operator] if num_iter > max_iter: warnings.warn( - "Desired num_iter {} is unreachable with the current run setup of {} iteration " - "({} epochs)".format(num_iter, max_iter, trainer.state.max_epochs), + f"Desired num_iter {num_iter} is unreachable with the current run setup of {max_iter} iteration " + f"({trainer.state.max_epochs} epochs)", UserWarning, ) @@ -115,7 +115,7 @@ def _run( Events.ITERATION_COMPLETED, self._log_lr_and_loss, output_transform, smooth_f, diverge_th ) - self.logger.debug("Running LR finder for {} iterations".format(num_iter)) + self.logger.debug(f"Running LR finder for {num_iter} iterations") # Initialize the proper learning rate policy if step_mode.lower() == "exp": self._lr_schedule = LRScheduler(_ExponentialLR(optimizer, end_lr, num_iter)) @@ -296,7 +296,7 @@ def attach( trainer_with_lr_finder: trainer used for finding the lr """ if not isinstance(to_save, Mapping): - raise TypeError("Argument to_save should be a mapping, but given {}".format(type(to_save))) + raise TypeError(f"Argument to_save should be a mapping, but given {type(to_save)}") Checkpoint._check_objects(to_save, "state_dict") Checkpoint._check_objects(to_save, "load_state_dict") @@ -306,7 +306,7 @@ def attach( if not isinstance(to_save["optimizer"], torch.optim.Optimizer): raise TypeError( - "Object to_save['optimizer'] should be torch optimizer, but given {}".format(type(to_save["optimizer"])) + f"Object to_save['optimizer'] should be torch optimizer, but given {type(to_save['optimizer'])}" ) if smooth_f < 0 or smooth_f >= 1: @@ -314,12 +314,12 @@ def attach( if diverge_th < 1: raise ValueError("diverge_th should be larger than 1") if step_mode not in ["exp", "linear"]: - raise ValueError("step_mode should be 'exp' or 'linear', but given {}".format(step_mode)) + raise ValueError(f"step_mode should be 'exp' or 'linear', but given {step_mode}") if num_iter is not None: if not isinstance(num_iter, int): - raise TypeError("if provided, num_iter should be an integer, but give {}".format(num_iter)) + raise TypeError(f"if provided, num_iter should be an integer, but give {num_iter}") if num_iter <= 0: - raise ValueError("if provided, num_iter should be positive, but give {}".format(num_iter)) + raise ValueError(f"if provided, num_iter should be positive, but give {num_iter}") # store to_save with tempfile.TemporaryDirectory() as tmpdirname: diff --git a/ignite/contrib/handlers/mlflow_logger.py b/ignite/contrib/handlers/mlflow_logger.py index d55b9698363f..d01af2d4621e 100644 --- a/ignite/contrib/handlers/mlflow_logger.py +++ b/ignite/contrib/handlers/mlflow_logger.py @@ -226,21 +226,21 @@ def __call__(self, engine: Engine, logger: MLflowLogger, event_name: Union[str, if not isinstance(global_step, int): raise TypeError( - "global_step must be int, got {}." - " Please check the output of global_step_transform.".format(type(global_step)) + f"global_step must be int, got {type(global_step)}." + " Please check the output of global_step_transform." ) rendered_metrics = {} # type: Dict[str, float] for key, value in metrics.items(): if isinstance(value, numbers.Number): - rendered_metrics["{} {}".format(self.tag, key)] = value # type: ignore[assignment] + rendered_metrics[f"{self.tag} {key}"] = value # type: ignore[assignment] elif isinstance(value, torch.Tensor) and value.ndimension() == 0: - rendered_metrics["{} {}".format(self.tag, key)] = value.item() + rendered_metrics[f"{self.tag} {key}"] = value.item() elif isinstance(value, torch.Tensor) and value.ndimension() == 1: for i, v in enumerate(value): - rendered_metrics["{} {} {}".format(self.tag, key, i)] = v.item() + rendered_metrics[f"{self.tag} {key} {i}"] = v.item() else: - warnings.warn("MLflowLogger output_handler can not log " "metrics value type {}".format(type(value))) + warnings.warn("MLflowLogger output_handler can not log " f"metrics value type {type(value)}") # Additionally recheck metric names as MLflow rejects non-valid names with MLflowException from mlflow.utils.validation import _VALID_PARAM_AND_METRIC_NAMES @@ -248,8 +248,8 @@ def __call__(self, engine: Engine, logger: MLflowLogger, event_name: Union[str, for key in list(rendered_metrics.keys()): if not _VALID_PARAM_AND_METRIC_NAMES.match(key): warnings.warn( - "MLflowLogger output_handler encountered an invalid metric name '{}' that " - "will be ignored and not logged to MLflow".format(key) + f"MLflowLogger output_handler encountered an invalid metric name '{key}' that " + "will be ignored and not logged to MLflow" ) del rendered_metrics[key] @@ -298,9 +298,9 @@ def __call__(self, engine: Engine, logger: MLflowLogger, event_name: Union[str, raise TypeError("Handler OptimizerParamsHandler works only with MLflowLogger") global_step = engine.state.get_event_attrib_value(event_name) - tag_prefix = "{} ".format(self.tag) if self.tag else "" + tag_prefix = f"{self.tag} " if self.tag else "" params = { - "{}{} group_{}".format(tag_prefix, self.param_name, i): float(param_group[self.param_name]) + f"{tag_prefix}{self.param_name} group_{i}": float(param_group[self.param_name]) for i, param_group in enumerate(self.optimizer.param_groups) } diff --git a/ignite/contrib/handlers/neptune_logger.py b/ignite/contrib/handlers/neptune_logger.py index 98216a26206c..60a07f917bd7 100644 --- a/ignite/contrib/handlers/neptune_logger.py +++ b/ignite/contrib/handlers/neptune_logger.py @@ -337,18 +337,18 @@ def __call__(self, engine: Engine, logger: NeptuneLogger, event_name: Union[str, if not isinstance(global_step, int): raise TypeError( - "global_step must be int, got {}." - " Please check the output of global_step_transform.".format(type(global_step)) + f"global_step must be int, got {type(global_step)}." + " Please check the output of global_step_transform." ) for key, value in metrics.items(): if isinstance(value, numbers.Number) or isinstance(value, torch.Tensor) and value.ndimension() == 0: - logger.log_metric("{}/{}".format(self.tag, key), x=global_step, y=value) + logger.log_metric(f"{self.tag}/{key}", x=global_step, y=value) elif isinstance(value, torch.Tensor) and value.ndimension() == 1: for i, v in enumerate(value): - logger.log_metric("{}/{}/{}".format(self.tag, key, i), x=global_step, y=v.item()) + logger.log_metric(f"{self.tag}/{key}/{i}", x=global_step, y=v.item()) else: - warnings.warn("NeptuneLogger output_handler can not log metrics value type {}".format(type(value))) + warnings.warn(f"NeptuneLogger output_handler can not log metrics value type {type(value)}") class OptimizerParamsHandler(BaseOptimizerParamsHandler): @@ -399,9 +399,9 @@ def __call__(self, engine: Engine, logger: NeptuneLogger, event_name: Union[str, raise TypeError("Handler OptimizerParamsHandler works only with NeptuneLogger") global_step = engine.state.get_event_attrib_value(event_name) - tag_prefix = "{}/".format(self.tag) if self.tag else "" + tag_prefix = f"{self.tag}/" if self.tag else "" params = { - "{}{}/group_{}".format(tag_prefix, self.param_name, i): float(param_group[self.param_name]) + f"{tag_prefix}{self.param_name}/group_{i}": float(param_group[self.param_name]) for i, param_group in enumerate(self.optimizer.param_groups) } @@ -454,16 +454,14 @@ def __call__(self, engine: Engine, logger: NeptuneLogger, event_name: Union[str, raise TypeError("Handler WeightsScalarHandler works only with NeptuneLogger") global_step = engine.state.get_event_attrib_value(event_name) - tag_prefix = "{}/".format(self.tag) if self.tag else "" + tag_prefix = f"{self.tag}/" if self.tag else "" for name, p in self.model.named_parameters(): if p.grad is None: continue name = name.replace(".", "/") logger.log_metric( - "{}weights_{}/{}".format(tag_prefix, self.reduction.__name__, name), - x=global_step, - y=self.reduction(p.data), + f"{tag_prefix}weights_{self.reduction.__name__}/{name}", x=global_step, y=self.reduction(p.data), ) @@ -511,16 +509,14 @@ def __call__(self, engine: Engine, logger: NeptuneLogger, event_name: Union[str, raise TypeError("Handler GradsScalarHandler works only with NeptuneLogger") global_step = engine.state.get_event_attrib_value(event_name) - tag_prefix = "{}/".format(self.tag) if self.tag else "" + tag_prefix = f"{self.tag}/" if self.tag else "" for name, p in self.model.named_parameters(): if p.grad is None: continue name = name.replace(".", "/") logger.log_metric( - "{}grads_{}/{}".format(tag_prefix, self.reduction.__name__, name), - x=global_step, - y=self.reduction(p.grad), + f"{tag_prefix}grads_{self.reduction.__name__}/{name}", x=global_step, y=self.reduction(p.grad), ) diff --git a/ignite/contrib/handlers/param_scheduler.py b/ignite/contrib/handlers/param_scheduler.py index 5cb28dc352f4..83966aeb80ef 100644 --- a/ignite/contrib/handlers/param_scheduler.py +++ b/ignite/contrib/handlers/param_scheduler.py @@ -48,7 +48,7 @@ def __init__( ): raise TypeError( "Argument optimizer should be torch.optim.Optimizer or has attribute 'param_groups' as list/tuple, " - "but given {}".format(type(optimizer)) + f"but given {type(optimizer)}" ) self.optimizer = optimizer @@ -65,9 +65,7 @@ def __call__(self, engine: Optional[Engine], name: Optional[str] = None) -> None if isinstance(value, list): if len(value) != len(self.optimizer_param_groups): raise ValueError( - "size of value is different than optimizer_param_groups {} != {}".format( - len(value), len(self.optimizer_param_groups) - ) + f"size of value is different than optimizer_param_groups {len(value)} != {len(self.optimizer_param_groups)}" ) for i, param_group in enumerate(self.optimizer_param_groups): @@ -124,14 +122,12 @@ def load_state_dict(self, state_dict: Mapping) -> None: state_dict (dict): a dict containing parameters. """ if not isinstance(state_dict, Mapping): - raise TypeError("Argument state_dict should be a dictionary, but given {}".format(type(state_dict))) + raise TypeError(f"Argument state_dict should be a dictionary, but given {type(state_dict)}") for name in self._state_attrs: if name not in state_dict: raise ValueError( - "Required state attribute '{}' is absent in provided state_dict '{}'".format( - name, state_dict.keys() - ) + f"Required state attribute '{name}' is absent in provided state_dict '{state_dict.keys()}'" ) val = state_dict[name] obj = getattr(self, name) @@ -279,9 +275,7 @@ def __init__( self.end_value_mult = end_value_mult if self.cycle_size < 2: - raise ValueError( - "Argument cycle_size should be positive and larger than 1, but given {}".format(cycle_size) - ) + raise ValueError(f"Argument cycle_size should be positive and larger than 1, but given {cycle_size}") self._state_attrs += [ "start_value", @@ -452,31 +446,28 @@ class ConcatScheduler(ParamScheduler): def __init__(self, schedulers: List[ParamScheduler], durations: List[int], save_history: bool = False) -> None: if not isinstance(schedulers, Sequence): - raise TypeError("Argument schedulers should be a sequence, but given {}".format(schedulers)) + raise TypeError(f"Argument schedulers should be a sequence, but given {schedulers}") if len(schedulers) < 2: raise ValueError( - "Argument schedulers should be of more than one parameter schedulers, " - "but given {}".format(schedulers) + "Argument schedulers should be of more than one parameter schedulers, " f"but given {schedulers}" ) if not isinstance(durations, (list, tuple)): - raise TypeError("Argument durations should be list/tuple, but given {}".format(durations)) + raise TypeError(f"Argument durations should be list/tuple, but given {durations}") if not all([isinstance(t, numbers.Integral) for t in durations]): - raise ValueError("Argument durations should be list/tuple of integers, but given {}".format(durations)) + raise ValueError(f"Argument durations should be list/tuple of integers, but given {durations}") if len(schedulers) != len(durations) + 1: raise ValueError( - "Incorrect number schedulers or duration values, " - "given {} and {}".format(len(schedulers), len(durations)) + "Incorrect number schedulers or duration values, " f"given {len(schedulers)} and {len(durations)}" ) for i, scheduler in enumerate(schedulers): if not isinstance(scheduler, ParamScheduler) and not isinstance(scheduler, ParamGroupScheduler): raise TypeError( - "Value at index {} of schedulers should be a parameter scheduler, " - "but given {}".format(i, type(scheduler)) + f"Value at index {i} of schedulers should be a parameter scheduler, " f"but given {type(scheduler)}" ) self.schedulers = schedulers @@ -531,19 +522,17 @@ def load_state_dict(self, state_dict: Mapping) -> None: state_dict (dict): a dict containing parameters. """ if not isinstance(state_dict, Mapping): - raise TypeError("Argument state_dict should be a dictionary, but given {}".format(type(state_dict))) + raise TypeError(f"Argument state_dict should be a dictionary, but given {type(state_dict)}") if "schedulers" not in state_dict: raise ValueError( - "Required state attribute '{}' is absent in provided state_dict '{}'".format( - "schedulers", state_dict.keys() - ) + f"Required state attribute '{'schedulers'}' is absent in provided state_dict '{state_dict.keys()}'" ) sds = state_dict["schedulers"] if len(sds) != len(self.schedulers): raise ValueError( - "Input state_dict contains {} state_dicts of concatenated schedulers, " - "but {} needed".format(len(sds), len(self.schedulers)) + f"Input state_dict contains {len(sds)} state_dicts of concatenated schedulers, " + f"but {len(self.schedulers)} needed" ) for s, sd in zip(self.schedulers, sds): @@ -606,11 +595,9 @@ def simulate_values( # type: ignore[override] """ if param_names is not None: if not isinstance(param_names, (list, tuple)): - raise TypeError("Argument param_names should be list or tuple, but given {}".format(type(param_names))) + raise TypeError(f"Argument param_names should be list or tuple, but given {type(param_names)}") if not all(isinstance(item, str) for item in param_names): - raise ValueError( - "Argument param_names should be list or tuple of strings, but given {}".format(param_names) - ) + raise ValueError(f"Argument param_names should be list or tuple of strings, but given {param_names}") tmp_param_optimizers = [s.optimizer for s in schedulers] tmp_list_param_optimizers = [s if isinstance(s, list) else [s] for s in tmp_param_optimizers] @@ -627,7 +614,7 @@ def simulate_values( # type: ignore[override] # not perturb original scheduler. with tempfile.TemporaryDirectory() as tmpdirname: cache_filepath = Path(tmpdirname) / "ignite_lr_scheduler_cache.pt" - objs = {"lr_scheduler_{}".format(i): s.state_dict() for i, s in enumerate(schedulers)} + objs = {f"lr_scheduler_{i}": s.state_dict() for i, s in enumerate(schedulers)} # all schedulers should be related to the same optimizer objs["optimizer"] = optimizer.state_dict() @@ -653,7 +640,7 @@ def simulate_values( # type: ignore[override] objs = torch.load(cache_filepath.as_posix()) for i, s in enumerate(schedulers): - s.load_state_dict(objs["lr_scheduler_{}".format(i)]) + s.load_state_dict(objs[f"lr_scheduler_{i}"]) optimizer.load_state_dict(objs["optimizer"]) return output @@ -688,7 +675,7 @@ def __init__(self, lr_scheduler: _LRScheduler, save_history: bool = False) -> No if not isinstance(lr_scheduler, _LRScheduler): raise TypeError( "Argument lr_scheduler should be a subclass of torch.optim.lr_scheduler._LRScheduler, " - "but given {}".format(type(lr_scheduler)) + f"but given {type(lr_scheduler)}" ) self.lr_scheduler = lr_scheduler @@ -733,7 +720,7 @@ def simulate_values( # type: ignore[override] if not isinstance(lr_scheduler, _LRScheduler): raise TypeError( "Argument lr_scheduler should be a subclass of torch.optim.lr_scheduler._LRScheduler, " - "but given {}".format(type(lr_scheduler)) + f"but given {type(lr_scheduler)}" ) # This scheduler uses `torch.optim.lr_scheduler._LRScheduler` which @@ -815,14 +802,14 @@ def create_lr_scheduler_with_warmup( if not isinstance(lr_scheduler, (ParamScheduler, _LRScheduler)): raise TypeError( "Argument lr_scheduler should be a subclass of torch.optim.lr_scheduler._LRScheduler or " - "ParamScheduler, but given {}".format(type(lr_scheduler)) + f"ParamScheduler, but given {type(lr_scheduler)}" ) if not isinstance(warmup_duration, numbers.Integral): - raise TypeError("Argument warmup_duration should be integer, but given {}".format(warmup_duration)) + raise TypeError(f"Argument warmup_duration should be integer, but given {warmup_duration}") if not (warmup_duration > 1): - raise ValueError("Argument warmup_duration should be at least 2 events, but given {}".format(warmup_duration)) + raise ValueError(f"Argument warmup_duration should be at least 2 events, but given {warmup_duration}") warmup_schedulers = [] # type: List[ParamScheduler] @@ -874,7 +861,7 @@ def create_lr_scheduler_with_warmup( if not isinstance(output_simulated_values, list): raise TypeError( "Argument output_simulated_values should be a list of None, e.g. `[None] * 100`, " - "but given {}.".format(type(output_simulated_values)) + f"but given {type(output_simulated_values)}." ) num_events = len(output_simulated_values) result = ConcatScheduler.simulate_values(num_events=num_events, schedulers=schedulers, durations=durations) @@ -926,12 +913,11 @@ def __init__( if not isinstance(milestones_values, Sequence): raise TypeError( - "Argument milestones_values should be a list or tuple, but given {}".format(type(milestones_values)) + f"Argument milestones_values should be a list or tuple, but given {type(milestones_values)}" ) if len(milestones_values) < 1: raise ValueError( - "Argument milestones_values should be with at least one value, " - "but given {}".format(milestones_values) + "Argument milestones_values should be with at least one value, " f"but given {milestones_values}" ) values = [] # type: List[float] @@ -940,11 +926,11 @@ def __init__( if not isinstance(pair, tuple) or len(pair) != 2: raise ValueError("Argument milestones_values should be a list of pairs (milestone, param_value)") if not isinstance(pair[0], numbers.Integral): - raise TypeError("Value of a milestone should be integer, but given {}".format(type(pair[0]))) + raise TypeError(f"Value of a milestone should be integer, but given {type(pair[0])}") if len(milestones) > 0 and pair[0] < milestones[-1]: raise ValueError( - "Milestones should be increasing integers, but given {} is smaller " - "than the previous milestone {}".format(pair[0], milestones[-1]) + f"Milestones should be increasing integers, but given {pair[0]} is smaller " + f"than the previous milestone {milestones[-1]}" ) milestones.append(pair[0]) values.append(pair[1]) @@ -1005,26 +991,24 @@ class ParamGroupScheduler: def __init__(self, schedulers: List[ParamScheduler], names: Optional[List[str]] = None, save_history: bool = False): if not isinstance(schedulers, Sequence): - raise TypeError("Argument schedulers should be a list/tuple, but given {}".format(schedulers)) + raise TypeError(f"Argument schedulers should be a list/tuple, but given {schedulers}") if not all(isinstance(scheduler, ParamScheduler) for scheduler in schedulers): raise ValueError( - "Argument schedulers should be a list/tuple of parameter schedulers, but given {}".format(schedulers) + f"Argument schedulers should be a list/tuple of parameter schedulers, but given {schedulers}" ) if names is None: names = [s.param_name for s in schedulers] if not isinstance(names, (list, tuple)): - raise TypeError("Argument names should be a list/tuple, but given {}".format(names)) + raise TypeError(f"Argument names should be a list/tuple, but given {names}") if not all(isinstance(n, str) for n in names): - raise ValueError( - "Argument names should be a list/tuple of parameter scheduler's names, but given {}".format(names) - ) + raise ValueError(f"Argument names should be a list/tuple of parameter scheduler's names, but given {names}") if len(names) != len(schedulers): - raise ValueError("{} should be equal {}".format(len(schedulers), len(names))) + raise ValueError(f"{len(schedulers)} should be equal {len(names)}") self.schedulers = schedulers self.names = names @@ -1073,26 +1057,23 @@ def load_state_dict(self, state_dict: Mapping) -> None: state_dict (dict): a dict containing parameters. """ if not isinstance(state_dict, Mapping): - raise TypeError("Argument state_dict should be a dictionary, but given {}".format(type(state_dict))) + raise TypeError(f"Argument state_dict should be a dictionary, but given {type(state_dict)}") if "schedulers" not in state_dict: raise ValueError( - "Required state attribute '{}' is absent in provided state_dict '{}'".format( - "schedulers", state_dict.keys() - ) + f"Required state attribute '{'schedulers'}' is absent in provided state_dict '{state_dict.keys()}'" ) sds = state_dict["schedulers"] if len(sds) != len(self.schedulers): raise ValueError( - "Input state_dict contains {} state_dicts of param group schedulers, " - "but {} needed".format(len(sds), len(self.schedulers)) + f"Input state_dict contains {len(sds)} state_dicts of param group schedulers, " + f"but {len(self.schedulers)} needed" ) for req_n, s, (n, sd) in zip(self.names, self.schedulers, sds): if req_n != n: raise ValueError( - "Name of scheduler from input state dict does not correspond to required one," - " {} vs {}".format(n, req_n) + "Name of scheduler from input state dict does not correspond to required one," f" {n} vs {req_n}" ) s.load_state_dict(sd) @@ -1114,7 +1095,7 @@ def simulate_values(cls, num_events: int, schedulers: List[_LRScheduler], **kwar # not perturb original scheduler. with tempfile.TemporaryDirectory() as tmpdirname: cache_filepath = Path(tmpdirname) / "ignite_lr_scheduler_cache.pt" - objs = {"lr_scheduler_{}".format(i): s.state_dict() for i, s in enumerate(schedulers)} + objs = {f"lr_scheduler_{i}": s.state_dict() for i, s in enumerate(schedulers)} # all schedulers should be related to the same optimizer objs["optimizer"] = schedulers[0].optimizer.state_dict() # type: ignore[attr-defined] @@ -1129,7 +1110,7 @@ def simulate_values(cls, num_events: int, schedulers: List[_LRScheduler], **kwar objs = torch.load(cache_filepath.as_posix()) for i, s in enumerate(schedulers): - s.load_state_dict(objs["lr_scheduler_{}".format(i)]) + s.load_state_dict(objs[f"lr_scheduler_{i}"]) s.optimizer.load_state_dict(objs["optimizer"]) # type: ignore[attr-defined] return values diff --git a/ignite/contrib/handlers/polyaxon_logger.py b/ignite/contrib/handlers/polyaxon_logger.py index 70a2416aa569..0b39aeb58451 100644 --- a/ignite/contrib/handlers/polyaxon_logger.py +++ b/ignite/contrib/handlers/polyaxon_logger.py @@ -218,21 +218,21 @@ def __call__(self, engine: Engine, logger: PolyaxonLogger, event_name: Union[str if not isinstance(global_step, int): raise TypeError( - "global_step must be int, got {}." - " Please check the output of global_step_transform.".format(type(global_step)) + f"global_step must be int, got {type(global_step)}." + " Please check the output of global_step_transform." ) rendered_metrics = {"step": global_step} # type: Dict[str, Union[float, numbers.Number]] for key, value in metrics.items(): if isinstance(value, numbers.Number): - rendered_metrics["{}/{}".format(self.tag, key)] = value + rendered_metrics[f"{self.tag}/{key}"] = value elif isinstance(value, torch.Tensor) and value.ndimension() == 0: - rendered_metrics["{}/{}".format(self.tag, key)] = value.item() + rendered_metrics[f"{self.tag}/{key}"] = value.item() elif isinstance(value, torch.Tensor) and value.ndimension() == 1: for i, v in enumerate(value): - rendered_metrics["{}/{}/{}".format(self.tag, key, i)] = v.item() + rendered_metrics[f"{self.tag}/{key}/{i}"] = v.item() else: - warnings.warn("PolyaxonLogger output_handler can not log " "metrics value type {}".format(type(value))) + warnings.warn("PolyaxonLogger output_handler can not log " f"metrics value type {type(value)}") logger.log_metrics(**rendered_metrics) @@ -277,9 +277,9 @@ def __call__(self, engine: Engine, logger: PolyaxonLogger, event_name: Union[str raise RuntimeError("Handler OptimizerParamsHandler works only with PolyaxonLogger") global_step = engine.state.get_event_attrib_value(event_name) - tag_prefix = "{}/".format(self.tag) if self.tag else "" + tag_prefix = f"{self.tag}/" if self.tag else "" params = { - "{}{}/group_{}".format(tag_prefix, self.param_name, i): float(param_group[self.param_name]) + f"{tag_prefix}{self.param_name}/group_{i}": float(param_group[self.param_name]) for i, param_group in enumerate(self.optimizer.param_groups) } params["step"] = global_step diff --git a/ignite/contrib/handlers/tensorboard_logger.py b/ignite/contrib/handlers/tensorboard_logger.py index 9acc2f1bda31..bf0cf35cd4c6 100644 --- a/ignite/contrib/handlers/tensorboard_logger.py +++ b/ignite/contrib/handlers/tensorboard_logger.py @@ -279,18 +279,18 @@ def __call__(self, engine: Engine, logger: TensorboardLogger, event_name: Union[ global_step = self.global_step_transform(engine, event_name) # type: ignore[misc] if not isinstance(global_step, int): raise TypeError( - "global_step must be int, got {}." - " Please check the output of global_step_transform.".format(type(global_step)) + f"global_step must be int, got {type(global_step)}." + " Please check the output of global_step_transform." ) for key, value in metrics.items(): if isinstance(value, numbers.Number) or isinstance(value, torch.Tensor) and value.ndimension() == 0: - logger.writer.add_scalar("{}/{}".format(self.tag, key), value, global_step) + logger.writer.add_scalar(f"{self.tag}/{key}", value, global_step) elif isinstance(value, torch.Tensor) and value.ndimension() == 1: for i, v in enumerate(value): - logger.writer.add_scalar("{}/{}/{}".format(self.tag, key, i), v.item(), global_step) + logger.writer.add_scalar(f"{self.tag}/{key}/{i}", v.item(), global_step) else: - warnings.warn("TensorboardLogger output_handler can not log metrics value type {}".format(type(value))) + warnings.warn(f"TensorboardLogger output_handler can not log metrics value type {type(value)}") class OptimizerParamsHandler(BaseOptimizerParamsHandler): @@ -333,9 +333,9 @@ def __call__(self, engine: Engine, logger: TensorboardLogger, event_name: Union[ raise RuntimeError("Handler OptimizerParamsHandler works only with TensorboardLogger") global_step = engine.state.get_event_attrib_value(event_name) - tag_prefix = "{}/".format(self.tag) if self.tag else "" + tag_prefix = f"{self.tag}/" if self.tag else "" params = { - "{}{}/group_{}".format(tag_prefix, self.param_name, i): float(param_group[self.param_name]) + f"{tag_prefix}{self.param_name}/group_{i}": float(param_group[self.param_name]) for i, param_group in enumerate(self.optimizer.param_groups) } @@ -380,14 +380,14 @@ def __call__(self, engine: Engine, logger: TensorboardLogger, event_name: Union[ raise RuntimeError("Handler 'WeightsScalarHandler' works only with TensorboardLogger") global_step = engine.state.get_event_attrib_value(event_name) - tag_prefix = "{}/".format(self.tag) if self.tag else "" + tag_prefix = f"{self.tag}/" if self.tag else "" for name, p in self.model.named_parameters(): if p.grad is None: continue name = name.replace(".", "/") logger.writer.add_scalar( - "{}weights_{}/{}".format(tag_prefix, self.reduction.__name__, name), self.reduction(p.data), global_step + f"{tag_prefix}weights_{self.reduction.__name__}/{name}", self.reduction(p.data), global_step ) @@ -424,16 +424,14 @@ def __call__(self, engine: Engine, logger: TensorboardLogger, event_name: Union[ raise RuntimeError("Handler 'WeightsHistHandler' works only with TensorboardLogger") global_step = engine.state.get_event_attrib_value(event_name) - tag_prefix = "{}/".format(self.tag) if self.tag else "" + tag_prefix = f"{self.tag}/" if self.tag else "" for name, p in self.model.named_parameters(): if p.grad is None: continue name = name.replace(".", "/") logger.writer.add_histogram( - tag="{}weights/{}".format(tag_prefix, name), - values=p.data.detach().cpu().numpy(), - global_step=global_step, + tag=f"{tag_prefix}weights/{name}", values=p.data.detach().cpu().numpy(), global_step=global_step, ) @@ -473,14 +471,14 @@ def __call__(self, engine: Engine, logger: TensorboardLogger, event_name: Union[ raise RuntimeError("Handler 'GradsScalarHandler' works only with TensorboardLogger") global_step = engine.state.get_event_attrib_value(event_name) - tag_prefix = "{}/".format(self.tag) if self.tag else "" + tag_prefix = f"{self.tag}/" if self.tag else "" for name, p in self.model.named_parameters(): if p.grad is None: continue name = name.replace(".", "/") logger.writer.add_scalar( - "{}grads_{}/{}".format(tag_prefix, self.reduction.__name__, name), self.reduction(p.grad), global_step + f"{tag_prefix}grads_{self.reduction.__name__}/{name}", self.reduction(p.grad), global_step ) @@ -517,12 +515,12 @@ def __call__(self, engine: Engine, logger: TensorboardLogger, event_name: Union[ raise RuntimeError("Handler 'GradsHistHandler' works only with TensorboardLogger") global_step = engine.state.get_event_attrib_value(event_name) - tag_prefix = "{}/".format(self.tag) if self.tag else "" + tag_prefix = f"{self.tag}/" if self.tag else "" for name, p in self.model.named_parameters(): if p.grad is None: continue name = name.replace(".", "/") logger.writer.add_histogram( - tag="{}grads/{}".format(tag_prefix, name), values=p.grad.detach().cpu().numpy(), global_step=global_step + tag=f"{tag_prefix}grads/{name}", values=p.grad.detach().cpu().numpy(), global_step=global_step ) diff --git a/ignite/contrib/handlers/time_profilers.py b/ignite/contrib/handlers/time_profilers.py index 95e03f55a967..c15ec4949d49 100644 --- a/ignite/contrib/handlers/time_profilers.py +++ b/ignite/contrib/handlers/time_profilers.py @@ -210,7 +210,7 @@ def _as_last_completed(self, engine: Engine) -> None: def attach(self, engine: Engine) -> None: if not isinstance(engine, Engine): - raise TypeError("Argument engine should be ignite.engine.Engine, " "but given {}".format(type(engine))) + raise TypeError("Argument engine should be ignite.engine.Engine, " f"but given {type(engine)}") if not engine.has_event_handler(self._as_first_started): engine._event_handlers[Events.STARTED].insert(0, (self._as_first_started, (engine,), {})) @@ -393,8 +393,8 @@ def to_str(v: Union[str, tuple]) -> str: if isinstance(v, str): return v elif isinstance(v, tuple): - return "{:.5f}/{}".format(v[0], v[1]) - return "{:.5f}".format(v) + return f"{v[0]:.5f}/{v[1]}" + return f"{v:.5f}" def odict_to_str(d: Mapping) -> str: out = " | ".join([to_str(v) for v in d.values()]) @@ -567,7 +567,7 @@ def _as_first_started(self, engine: Engine) -> None: def attach(self, engine: Engine) -> None: if not isinstance(engine, Engine): - raise TypeError("Argument engine should be ignite.engine.Engine, but given {}".format(type(engine))) + raise TypeError(f"Argument engine should be ignite.engine.Engine, but given {type(engine)}") if not engine.has_event_handler(self._as_first_started): engine._event_handlers[Events.STARTED].insert(0, (self._as_first_started, (engine,), {})) @@ -655,7 +655,7 @@ def write_results(self, output_path: str) -> None: headers = ["processing_stats", "dataflow_stats"] for e in self.event_handlers_times: for h in self.event_handlers_times[e]: - headers.append("{} ({})".format(h, getattr(e, "name", str(e)))) + headers.append(f"{h} ({getattr(e, 'name', str(e))})") cols.append(torch.tensor(self.event_handlers_times[e][h], dtype=torch.float32)) # Determine maximum length max_len = max([x.numel() for x in cols]) diff --git a/ignite/contrib/handlers/tqdm_logger.py b/ignite/contrib/handlers/tqdm_logger.py index b29f46ff8631..30df129e47ac 100644 --- a/ignite/contrib/handlers/tqdm_logger.py +++ b/ignite/contrib/handlers/tqdm_logger.py @@ -6,7 +6,7 @@ from ignite.contrib.handlers.base_logger import BaseLogger, BaseOutputHandler from ignite.engine import Engine, Events -from ignite.engine.events import CallableEventWithFilter, RemovableEventHandle +from ignite.engine.events import CallableEventWithFilter class ProgressBar(BaseLogger): @@ -185,16 +185,14 @@ def attach( # type: ignore[override] desc = self.tqdm_kwargs.get("desc", None) if event_name not in engine._allowed_events: - raise ValueError("Logging event {} is not in allowed events for this engine".format(event_name.name)) + raise ValueError(f"Logging event {event_name.name} is not in allowed events for this engine") if isinstance(closing_event_name, CallableEventWithFilter): if closing_event_name.filter != CallableEventWithFilter.default_event_filter: raise ValueError("Closing Event should not be a filtered event") if not self._compare_lt(event_name, closing_event_name): - raise ValueError( - "Logging event {} should be called before closing event {}".format(event_name, closing_event_name) - ) + raise ValueError(f"Logging event {event_name} should be called before closing event {closing_event_name}") log_handler = _OutputHandler(desc, metric_names, output_transform, closing_event_name=closing_event_name) @@ -266,7 +264,7 @@ def __call__(self, engine: Engine, logger: ProgressBar, event_name: Union[str, E max_num_of_closing_events = self.get_max_number_events(self.closing_event_name, engine) if max_num_of_closing_events and max_num_of_closing_events > 1: global_step = engine.state.get_event_attrib_value(self.closing_event_name) - desc += " [{}/{}]".format(global_step, max_num_of_closing_events) + desc += f" [{global_step}/{max_num_of_closing_events}]" logger.pbar.set_description(desc) # type: ignore[attr-defined] metrics = self._setup_output_metrics(engine) @@ -278,10 +276,10 @@ def __call__(self, engine: Engine, logger: ProgressBar, event_name: Union[str, E rendered_metrics[key] = value.item() elif value.ndimension() == 1: for i, v in enumerate(value): - k = "{}_{}".format(key, i) + k = f"{key}_{i}" rendered_metrics[k] = v.item() else: - warnings.warn("ProgressBar can not log " "tensor with {} dimensions".format(value.ndimension())) + warnings.warn("ProgressBar can not log " f"tensor with {value.ndimension()} dimensions") else: rendered_metrics[key] = value diff --git a/ignite/contrib/handlers/trains_logger.py b/ignite/contrib/handlers/trains_logger.py index 7c00da41c87d..a8007f9e6da0 100644 --- a/ignite/contrib/handlers/trains_logger.py +++ b/ignite/contrib/handlers/trains_logger.py @@ -307,8 +307,8 @@ def __call__(self, engine: Engine, logger: TrainsLogger, event_name: Union[str, if not isinstance(global_step, int): raise TypeError( - "global_step must be int, got {}." - " Please check the output of global_step_transform.".format(type(global_step)) + f"global_step must be int, got {type(global_step)}." + " Please check the output of global_step_transform." ) for key, value in metrics.items(): @@ -317,10 +317,10 @@ def __call__(self, engine: Engine, logger: TrainsLogger, event_name: Union[str, elif isinstance(value, torch.Tensor) and value.ndimension() == 1: for i, v in enumerate(value): logger.trains_logger.report_scalar( - title="{}/{}".format(self.tag, key), series=str(i), iteration=global_step, value=v.item() + title=f"{self.tag}/{key}", series=str(i), iteration=global_step, value=v.item() ) else: - warnings.warn("TrainsLogger output_handler can not log metrics value type {}".format(type(value))) + warnings.warn(f"TrainsLogger output_handler can not log metrics value type {type(value)}") class OptimizerParamsHandler(BaseOptimizerParamsHandler): @@ -367,14 +367,14 @@ def __call__(self, engine: Engine, logger: TrainsLogger, event_name: Union[str, raise RuntimeError("Handler OptimizerParamsHandler works only with TrainsLogger") global_step = engine.state.get_event_attrib_value(event_name) - tag_prefix = "{}/".format(self.tag) if self.tag else "" + tag_prefix = f"{self.tag}/" if self.tag else "" params = { str(i): float(param_group[self.param_name]) for i, param_group in enumerate(self.optimizer.param_groups) } for k, v in params.items(): logger.trains_logger.report_scalar( - title="{}{}".format(tag_prefix, self.param_name), series=k, value=v, iteration=global_step + title=f"{tag_prefix}{self.param_name}", series=k, value=v, iteration=global_step ) @@ -419,14 +419,14 @@ def __call__(self, engine: Engine, logger: TrainsLogger, event_name: Union[str, raise RuntimeError("Handler WeightsScalarHandler works only with TrainsLogger") global_step = engine.state.get_event_attrib_value(event_name) - tag_prefix = "{}/".format(self.tag) if self.tag else "" + tag_prefix = f"{self.tag}/" if self.tag else "" for name, p in self.model.named_parameters(): if p.grad is None: continue title_name, _, series_name = name.partition(".") logger.trains_logger.report_scalar( - title="{}weights_{}/{}".format(tag_prefix, self.reduction.__name__, title_name), + title=f"{tag_prefix}weights_{self.reduction.__name__}/{title_name}", series=series_name, value=self.reduction(p.data), iteration=global_step, @@ -470,7 +470,7 @@ def __call__(self, engine: Engine, logger: TrainsLogger, event_name: Union[str, raise RuntimeError("Handler 'WeightsHistHandler' works only with TrainsLogger") global_step = engine.state.get_event_attrib_value(event_name) - tag_prefix = "{}/".format(self.tag) if self.tag else "" + tag_prefix = f"{self.tag}/" if self.tag else "" for name, p in self.model.named_parameters(): if p.grad is None: continue @@ -478,7 +478,7 @@ def __call__(self, engine: Engine, logger: TrainsLogger, event_name: Union[str, title_name, _, series_name = name.partition(".") logger.grad_helper.add_histogram( - title="{}weights_{}".format(tag_prefix, title_name), + title=f"{tag_prefix}weights_{title_name}", series=series_name, step=global_step, hist_data=p.grad.detach().cpu().numpy(), @@ -525,14 +525,14 @@ def __call__(self, engine: Engine, logger: TrainsLogger, event_name: Union[str, raise RuntimeError("Handler GradsScalarHandler works only with TrainsLogger") global_step = engine.state.get_event_attrib_value(event_name) - tag_prefix = "{}/".format(self.tag) if self.tag else "" + tag_prefix = f"{self.tag}/" if self.tag else "" for name, p in self.model.named_parameters(): if p.grad is None: continue title_name, _, series_name = name.partition(".") logger.trains_logger.report_scalar( - title="{}grads_{}/{}".format(tag_prefix, self.reduction.__name__, title_name), + title=f"{tag_prefix}grads_{self.reduction.__name__}/{title_name}", series=series_name, value=self.reduction(p.data), iteration=global_step, @@ -576,7 +576,7 @@ def __call__(self, engine: Engine, logger: TrainsLogger, event_name: Union[str, raise RuntimeError("Handler 'GradsHistHandler' works only with TrainsLogger") global_step = engine.state.get_event_attrib_value(event_name) - tag_prefix = "{}/".format(self.tag) if self.tag else "" + tag_prefix = f"{self.tag}/" if self.tag else "" for name, p in self.model.named_parameters(): if p.grad is None: continue @@ -584,7 +584,7 @@ def __call__(self, engine: Engine, logger: TrainsLogger, event_name: Union[str, title_name, _, series_name = name.partition(".") logger.grad_helper.add_histogram( - title="{}grads_{}".format(tag_prefix, title_name), + title=f"{tag_prefix}grads_{title_name}", series=series_name, step=global_step, hist_data=p.grad.detach().cpu().numpy(), @@ -646,13 +646,11 @@ def __init__( if not dirname: dirname = "" if idist.get_rank() == 0: - dirname = tempfile.mkdtemp( - prefix="ignite_checkpoints_{}".format(datetime.now().strftime("%Y_%m_%d_%H_%M_%S_")) - ) + dirname = tempfile.mkdtemp(prefix=f"ignite_checkpoints_{datetime.now().strftime('%Y_%m_%d_%H_%M_%S_')}") if idist.get_world_size() > 1: dirname = idist.all_gather(dirname)[0] # type: ignore[index, assignment] - warnings.warn("TrainsSaver created a temporary checkpoints directory: {}".format(dirname)) + warnings.warn(f"TrainsSaver created a temporary checkpoints directory: {dirname}") idist.barrier() # Let's set non-atomic tmp dir saving behaviour @@ -714,15 +712,15 @@ def pre_callback(self, action: str, model_info: Any) -> Any: self._slots.append(model_info.upload_filename) slot = len(self._slots) - 1 - model_info.upload_filename = "{}_{}{}".format(self._basename, slot, os.path.splitext(self._filename)[1]) - model_info.local_model_id = "{}:{}".format(self._checkpoint_key, model_info.upload_filename) + model_info.upload_filename = f"{self._basename}_{slot}{os.path.splitext(self._filename)[1]}" + model_info.local_model_id = f"{self._checkpoint_key}:{model_info.upload_filename}" return model_info def post_callback(self, action: str, model_info: Any) -> Any: if action != self._callback_type.save: # type: ignore[attr-defined] return model_info - model_info.model.name = "{}: {}".format(model_info.task.name, self._filename) + model_info.model.name = f"{model_info.task.name}: {self._filename}" prefix = "Checkpoint Metadata: " metadata = "{}{}".format( prefix, ", ".join("{}={}".format(k, v) for k, v in self._metadata.items()) if self._metadata else "none" @@ -789,7 +787,7 @@ def get_local_copy(self, filename: str) -> Optional[str]: artifact = self._task.artifacts.get(filename) if artifact: return artifact.get_local_copy() - self._task.get_logger().report_text("Can not find artifact {}".format(filename)) + self._task.get_logger().report_text(f"Can not find artifact {filename}") return None diff --git a/ignite/contrib/handlers/visdom_logger.py b/ignite/contrib/handlers/visdom_logger.py index 7b737c6bea85..eabd11b4d22e 100644 --- a/ignite/contrib/handlers/visdom_logger.py +++ b/ignite/contrib/handlers/visdom_logger.py @@ -186,9 +186,7 @@ def __init__( self.vis = visdom.Visdom(server=server, port=port, raise_exceptions=raise_exceptions, **kwargs) if not self.vis.offline and not self.vis.check_connection(): # type: ignore[attr-defined] - raise RuntimeError( - "Failed to connect to Visdom server at {}. Did you run python -m visdom.server ?".format(server) - ) + raise RuntimeError(f"Failed to connect to Visdom server at {server}. Did you run python -m visdom.server ?") self.executor = _DummyExecutor() # type: Union[_DummyExecutor, "ThreadPoolExecutor"] if num_workers > 0: @@ -363,8 +361,8 @@ def __call__(self, engine: Engine, logger: VisdomLogger, event_name: Union[str, if not isinstance(global_step, int): raise TypeError( - "global_step must be int, got {}." - " Please check the output of global_step_transform.".format(type(global_step)) + f"global_step must be int, got {type(global_step)}." + " Please check the output of global_step_transform." ) for key, value in metrics.items(): @@ -376,12 +374,12 @@ def __call__(self, engine: Engine, logger: VisdomLogger, event_name: Union[str, keys.append(key) elif isinstance(value, torch.Tensor) and value.ndimension() == 1: values = value # type: ignore[assignment] - keys = ["{}/{}".format(key, i) for i in range(len(value))] + keys = [f"{key}/{i}" for i in range(len(value))] else: - warnings.warn("VisdomLogger output_handler can not log " "metrics value type {}".format(type(value))) + warnings.warn("VisdomLogger output_handler can not log " f"metrics value type {type(value)}") for k, v in zip(keys, values): - k = "{}/{}".format(self.tag, k) + k = f"{self.tag}/{k}" self.add_scalar(logger, k, v, event_name, global_step) logger._save() @@ -431,9 +429,9 @@ def __call__(self, engine: Engine, logger: VisdomLogger, event_name: Union[str, raise RuntimeError("Handler OptimizerParamsHandler works only with VisdomLogger") global_step = engine.state.get_event_attrib_value(event_name) - tag_prefix = "{}/".format(self.tag) if self.tag else "" + tag_prefix = f"{self.tag}/" if self.tag else "" params = { - "{}{}/group_{}".format(tag_prefix, self.param_name, i): float(param_group[self.param_name]) + f"{tag_prefix}{self.param_name}/group_{i}": float(param_group[self.param_name]) for i, param_group in enumerate(self.optimizer.param_groups) } @@ -483,10 +481,10 @@ def __call__(self, engine: Engine, logger: VisdomLogger, event_name: Union[str, raise RuntimeError("Handler 'WeightsScalarHandler' works only with VisdomLogger") global_step = engine.state.get_event_attrib_value(event_name) - tag_prefix = "{}/".format(self.tag) if self.tag else "" + tag_prefix = f"{self.tag}/" if self.tag else "" for name, p in self.model.named_parameters(): name = name.replace(".", "/") - k = "{}weights_{}/{}".format(tag_prefix, self.reduction.__name__, name) + k = f"{tag_prefix}weights_{self.reduction.__name__}/{name}" v = float(self.reduction(p.data)) self.add_scalar(logger, k, v, event_name, global_step) @@ -533,10 +531,10 @@ def __call__(self, engine: Engine, logger: VisdomLogger, event_name: Union[str, raise RuntimeError("Handler 'GradsScalarHandler' works only with VisdomLogger") global_step = engine.state.get_event_attrib_value(event_name) - tag_prefix = "{}/".format(self.tag) if self.tag else "" + tag_prefix = f"{self.tag}/" if self.tag else "" for name, p in self.model.named_parameters(): name = name.replace(".", "/") - k = "{}grads_{}/{}".format(tag_prefix, self.reduction.__name__, name) + k = f"{tag_prefix}grads_{self.reduction.__name__}/{name}" v = float(self.reduction(p.grad)) self.add_scalar(logger, k, v, event_name, global_step) diff --git a/ignite/contrib/handlers/wandb_logger.py b/ignite/contrib/handlers/wandb_logger.py index a101f4d1f56c..ff7dbddcbcbd 100644 --- a/ignite/contrib/handlers/wandb_logger.py +++ b/ignite/contrib/handlers/wandb_logger.py @@ -258,13 +258,13 @@ def __init__( def __call__(self, engine: Engine, logger: WandBLogger, event_name: Union[str, Events]) -> None: if not isinstance(logger, WandBLogger): - raise RuntimeError("Handler '{}' works only with WandBLogger.".format(self.__class__.__name__)) + raise RuntimeError(f"Handler '{self.__class__.__name__}' works only with WandBLogger.") global_step = self.global_step_transform(engine, event_name) # type: ignore[misc] if not isinstance(global_step, int): raise TypeError( - "global_step must be int, got {}." - " Please check the output of global_step_transform.".format(type(global_step)) + f"global_step must be int, got {type(global_step)}." + " Please check the output of global_step_transform." ) metrics = self._setup_output_metrics(engine) @@ -327,9 +327,9 @@ def __call__(self, engine: Engine, logger: WandBLogger, event_name: Union[str, E raise RuntimeError("Handler OptimizerParamsHandler works only with WandBLogger") global_step = engine.state.get_event_attrib_value(event_name) - tag_prefix = "{}/".format(self.tag) if self.tag else "" + tag_prefix = f"{self.tag}/" if self.tag else "" params = { - "{}{}/group_{}".format(tag_prefix, self.param_name, i): float(param_group[self.param_name]) + f"{tag_prefix}{self.param_name}/group_{i}": float(param_group[self.param_name]) for i, param_group in enumerate(self.optimizer.param_groups) } logger.log(params, step=global_step, sync=self.sync) diff --git a/ignite/utils.py b/ignite/utils.py index 1b805d588bdf..e3637e63c569 100644 --- a/ignite/utils.py +++ b/ignite/utils.py @@ -48,7 +48,7 @@ def apply_to_type( return cast(Callable, type(input_))(*(apply_to_type(sample, input_type, func) for sample in input_)) if isinstance(input_, collections.Sequence): return cast(Callable, type(input_))([apply_to_type(sample, input_type, func) for sample in input_]) - raise TypeError(("input must contain {}, dicts or lists; found {}".format(input_type, type(input_)))) + raise TypeError((f"input must contain {input_type}, dicts or lists; found {type(input_)}")) def to_onehot(indices: torch.Tensor, num_classes: int) -> torch.Tensor: From 32875121cd6f3775b45b23ba55b0e459cbf4543e Mon Sep 17 00:00:00 2001 From: steven Date: Thu, 17 Dec 2020 14:31:04 -0700 Subject: [PATCH 09/29] metadata items f-string --- ignite/contrib/handlers/trains_logger.py | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/ignite/contrib/handlers/trains_logger.py b/ignite/contrib/handlers/trains_logger.py index a8007f9e6da0..dae323ce2bae 100644 --- a/ignite/contrib/handlers/trains_logger.py +++ b/ignite/contrib/handlers/trains_logger.py @@ -722,9 +722,8 @@ def post_callback(self, action: str, model_info: Any) -> Any: model_info.model.name = f"{model_info.task.name}: {self._filename}" prefix = "Checkpoint Metadata: " - metadata = "{}{}".format( - prefix, ", ".join("{}={}".format(k, v) for k, v in self._metadata.items()) if self._metadata else "none" - ) + metadata_items = ", ".join(f"{k}={v}" for k, v in self._metadata.items()) if self._metadata else "none" + metadata = f"{prefix}{metadata_items}" comment = "\n".join( metadata if line.startswith(prefix) else line for line in (model_info.model.comment or "").split("\n") ) From 963467f37c71774d51dfaf010bfbc1bd2cb6b8c5 Mon Sep 17 00:00:00 2001 From: steven Date: Thu, 17 Dec 2020 18:50:37 -0700 Subject: [PATCH 10/29] retrigger checks From b810b2d9f5956f601dcef06801b074884c6d4b75 Mon Sep 17 00:00:00 2001 From: steven Date: Thu, 17 Dec 2020 18:51:30 -0700 Subject: [PATCH 11/29] retrigger checks From 8f8bdb871e220fa73d4704aa811070db16eff3ee Mon Sep 17 00:00:00 2001 From: steven Date: Thu, 17 Dec 2020 18:52:30 -0700 Subject: [PATCH 12/29] retrigger checks From a5227c785fabd57941783eb99b7f0fa5aa3dc8cb Mon Sep 17 00:00:00 2001 From: steven Date: Thu, 17 Dec 2020 19:01:01 -0700 Subject: [PATCH 13/29] RemovableEventHandle --- ignite/contrib/handlers/tqdm_logger.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/ignite/contrib/handlers/tqdm_logger.py b/ignite/contrib/handlers/tqdm_logger.py index 30df129e47ac..3e10c1fa5b05 100644 --- a/ignite/contrib/handlers/tqdm_logger.py +++ b/ignite/contrib/handlers/tqdm_logger.py @@ -6,7 +6,7 @@ from ignite.contrib.handlers.base_logger import BaseLogger, BaseOutputHandler from ignite.engine import Engine, Events -from ignite.engine.events import CallableEventWithFilter +from ignite.engine.events import CallableEventWithFilter, RemovableEventHandle class ProgressBar(BaseLogger): From 003956eb27157886fbeaafb793671c85b0205b2b Mon Sep 17 00:00:00 2001 From: steven Date: Fri, 18 Dec 2020 00:18:48 -0700 Subject: [PATCH 14/29] f-strings for rows --- ignite/contrib/handlers/time_profilers.py | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/ignite/contrib/handlers/time_profilers.py b/ignite/contrib/handlers/time_profilers.py index c15ec4949d49..375f986928f6 100644 --- a/ignite/contrib/handlers/time_profilers.py +++ b/ignite/contrib/handlers/time_profilers.py @@ -751,8 +751,8 @@ def append(s: str) -> None: for row in results[:-3]: # format min/idx and max/idx - row[3] = "{}/{}".format(*row[3]) # type: ignore[misc] - row[4] = "{}/{}".format(*row[4]) # type: ignore[misc] + row[3] = f"{row[3][0]}/{row[3][1]}" # type: ignore[misc] + row[4] = f"{row[4][0]}/{row[4][1]}" # type: ignore[misc] append(row_format.format(*row)) @@ -763,8 +763,8 @@ def append(s: str) -> None: summary_format = "{} took total {}s [min/index: {}, max/index: {}, mean: {}s, std: {}s]" for row in results[-2:]: - row[3] = "{}s/{}".format(*row[3]) # type: ignore[misc] - row[4] = "{}s/{}".format(*row[4]) # type: ignore[misc] + row[3] = f"{row[3][0]}s/{row[3][1]}" # type: ignore[misc] + row[4] = f"{row[4][0]}s/{row[4][1]}" # type: ignore[misc] del row[1] append(summary_format.format(*row)) print("".join(result)) From bbbaa1088cd23849fda7b3491452bea7bd8f624e Mon Sep 17 00:00:00 2001 From: steven Date: Fri, 18 Dec 2020 00:42:50 -0700 Subject: [PATCH 15/29] Revert "f-strings for rows" This reverts commit 003956eb27157886fbeaafb793671c85b0205b2b. --- ignite/contrib/handlers/time_profilers.py | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/ignite/contrib/handlers/time_profilers.py b/ignite/contrib/handlers/time_profilers.py index 375f986928f6..c15ec4949d49 100644 --- a/ignite/contrib/handlers/time_profilers.py +++ b/ignite/contrib/handlers/time_profilers.py @@ -751,8 +751,8 @@ def append(s: str) -> None: for row in results[:-3]: # format min/idx and max/idx - row[3] = f"{row[3][0]}/{row[3][1]}" # type: ignore[misc] - row[4] = f"{row[4][0]}/{row[4][1]}" # type: ignore[misc] + row[3] = "{}/{}".format(*row[3]) # type: ignore[misc] + row[4] = "{}/{}".format(*row[4]) # type: ignore[misc] append(row_format.format(*row)) @@ -763,8 +763,8 @@ def append(s: str) -> None: summary_format = "{} took total {}s [min/index: {}, max/index: {}, mean: {}s, std: {}s]" for row in results[-2:]: - row[3] = f"{row[3][0]}s/{row[3][1]}" # type: ignore[misc] - row[4] = f"{row[4][0]}s/{row[4][1]}" # type: ignore[misc] + row[3] = "{}s/{}".format(*row[3]) # type: ignore[misc] + row[4] = "{}s/{}".format(*row[4]) # type: ignore[misc] del row[1] append(summary_format.format(*row)) print("".join(result)) From 66f8576ac55b8ace19b6426b9d6ad88a100aae5c Mon Sep 17 00:00:00 2001 From: steven Date: Fri, 18 Dec 2020 09:32:07 -0700 Subject: [PATCH 16/29] combining strings --- ignite/contrib/handlers/base_logger.py | 8 ++++---- ignite/contrib/handlers/mlflow_logger.py | 2 +- ignite/contrib/handlers/param_scheduler.py | 8 ++++---- ignite/contrib/handlers/polyaxon_logger.py | 2 +- ignite/contrib/handlers/time_profilers.py | 2 +- ignite/contrib/handlers/tqdm_logger.py | 2 +- ignite/contrib/handlers/visdom_logger.py | 2 +- 7 files changed, 13 insertions(+), 13 deletions(-) diff --git a/ignite/contrib/handlers/base_logger.py b/ignite/contrib/handlers/base_logger.py index 3d2c3c64dab4..44707d668d79 100644 --- a/ignite/contrib/handlers/base_logger.py +++ b/ignite/contrib/handlers/base_logger.py @@ -53,7 +53,7 @@ def __init__( if metric_names is not None: if not (isinstance(metric_names, list) or (isinstance(metric_names, str) and metric_names == "all")): raise TypeError( - "metric_names should be either a list or equal 'all', " f"got {type(metric_names)} instead." + f"metric_names should be either a list or equal 'all', got {type(metric_names)} instead." ) if output_transform is not None and not callable(output_transform): @@ -109,10 +109,10 @@ class BaseWeightsScalarHandler(BaseHandler): def __init__(self, model: nn.Module, reduction: Callable = torch.norm, tag: Optional[str] = None) -> None: if not isinstance(model, torch.nn.Module): - raise TypeError("Argument model should be of type torch.nn.Module, " f"but given {type(model)}") + raise TypeError(f"Argument model should be of type torch.nn.Module, but given {type(model)}") if not callable(reduction): - raise TypeError("Argument reduction should be callable, " f"but given {type(reduction)}") + raise TypeError(f"Argument reduction should be callable, but given {type(reduction)}") def _is_0D_tensor(t: torch.Tensor) -> bool: return isinstance(t, torch.Tensor) and t.ndimension() == 0 @@ -134,7 +134,7 @@ class BaseWeightsHistHandler(BaseHandler): def __init__(self, model: nn.Module, tag: Optional[str] = None): if not isinstance(model, torch.nn.Module): - raise TypeError("Argument model should be of type torch.nn.Module, " f"but given {type(model)}") + raise TypeError(f"Argument model should be of type torch.nn.Module, but given {type(model)}") self.model = model self.tag = tag diff --git a/ignite/contrib/handlers/mlflow_logger.py b/ignite/contrib/handlers/mlflow_logger.py index d01af2d4621e..d8a7819d8e6a 100644 --- a/ignite/contrib/handlers/mlflow_logger.py +++ b/ignite/contrib/handlers/mlflow_logger.py @@ -240,7 +240,7 @@ def __call__(self, engine: Engine, logger: MLflowLogger, event_name: Union[str, for i, v in enumerate(value): rendered_metrics[f"{self.tag} {key} {i}"] = v.item() else: - warnings.warn("MLflowLogger output_handler can not log " f"metrics value type {type(value)}") + warnings.warn(f"MLflowLogger output_handler can not log metrics value type {type(value)}") # Additionally recheck metric names as MLflow rejects non-valid names with MLflowException from mlflow.utils.validation import _VALID_PARAM_AND_METRIC_NAMES diff --git a/ignite/contrib/handlers/param_scheduler.py b/ignite/contrib/handlers/param_scheduler.py index 83966aeb80ef..81e41fa9cbc2 100644 --- a/ignite/contrib/handlers/param_scheduler.py +++ b/ignite/contrib/handlers/param_scheduler.py @@ -450,7 +450,7 @@ def __init__(self, schedulers: List[ParamScheduler], durations: List[int], save_ if len(schedulers) < 2: raise ValueError( - "Argument schedulers should be of more than one parameter schedulers, " f"but given {schedulers}" + f"Argument schedulers should be of more than one parameter schedulers, but given {schedulers}" ) if not isinstance(durations, (list, tuple)): @@ -467,7 +467,7 @@ def __init__(self, schedulers: List[ParamScheduler], durations: List[int], save_ for i, scheduler in enumerate(schedulers): if not isinstance(scheduler, ParamScheduler) and not isinstance(scheduler, ParamGroupScheduler): raise TypeError( - f"Value at index {i} of schedulers should be a parameter scheduler, " f"but given {type(scheduler)}" + f"Value at index {i} of schedulers should be a parameter scheduler, but given {type(scheduler)}" ) self.schedulers = schedulers @@ -917,7 +917,7 @@ def __init__( ) if len(milestones_values) < 1: raise ValueError( - "Argument milestones_values should be with at least one value, " f"but given {milestones_values}" + f"Argument milestones_values should be with at least one value, but given {milestones_values}" ) values = [] # type: List[float] @@ -1073,7 +1073,7 @@ def load_state_dict(self, state_dict: Mapping) -> None: for req_n, s, (n, sd) in zip(self.names, self.schedulers, sds): if req_n != n: raise ValueError( - "Name of scheduler from input state dict does not correspond to required one," f" {n} vs {req_n}" + f"Name of scheduler from input state dict does not correspond to required one, {n} vs {req_n}" ) s.load_state_dict(sd) diff --git a/ignite/contrib/handlers/polyaxon_logger.py b/ignite/contrib/handlers/polyaxon_logger.py index 0b39aeb58451..62d3b4629c85 100644 --- a/ignite/contrib/handlers/polyaxon_logger.py +++ b/ignite/contrib/handlers/polyaxon_logger.py @@ -232,7 +232,7 @@ def __call__(self, engine: Engine, logger: PolyaxonLogger, event_name: Union[str for i, v in enumerate(value): rendered_metrics[f"{self.tag}/{key}/{i}"] = v.item() else: - warnings.warn("PolyaxonLogger output_handler can not log " f"metrics value type {type(value)}") + warnings.warn(f"PolyaxonLogger output_handler can not log metrics value type {type(value)}") logger.log_metrics(**rendered_metrics) diff --git a/ignite/contrib/handlers/time_profilers.py b/ignite/contrib/handlers/time_profilers.py index c15ec4949d49..c081fcefb803 100644 --- a/ignite/contrib/handlers/time_profilers.py +++ b/ignite/contrib/handlers/time_profilers.py @@ -210,7 +210,7 @@ def _as_last_completed(self, engine: Engine) -> None: def attach(self, engine: Engine) -> None: if not isinstance(engine, Engine): - raise TypeError("Argument engine should be ignite.engine.Engine, " f"but given {type(engine)}") + raise TypeError(f"Argument engine should be ignite.engine.Engine, but given {type(engine)}") if not engine.has_event_handler(self._as_first_started): engine._event_handlers[Events.STARTED].insert(0, (self._as_first_started, (engine,), {})) diff --git a/ignite/contrib/handlers/tqdm_logger.py b/ignite/contrib/handlers/tqdm_logger.py index 3e10c1fa5b05..f4d78fd7322e 100644 --- a/ignite/contrib/handlers/tqdm_logger.py +++ b/ignite/contrib/handlers/tqdm_logger.py @@ -279,7 +279,7 @@ def __call__(self, engine: Engine, logger: ProgressBar, event_name: Union[str, E k = f"{key}_{i}" rendered_metrics[k] = v.item() else: - warnings.warn("ProgressBar can not log " f"tensor with {value.ndimension()} dimensions") + warnings.warn(f"ProgressBar can not log tensor with {value.ndimension()} dimensions") else: rendered_metrics[key] = value diff --git a/ignite/contrib/handlers/visdom_logger.py b/ignite/contrib/handlers/visdom_logger.py index eabd11b4d22e..d03b45e74cd2 100644 --- a/ignite/contrib/handlers/visdom_logger.py +++ b/ignite/contrib/handlers/visdom_logger.py @@ -376,7 +376,7 @@ def __call__(self, engine: Engine, logger: VisdomLogger, event_name: Union[str, values = value # type: ignore[assignment] keys = [f"{key}/{i}" for i in range(len(value))] else: - warnings.warn("VisdomLogger output_handler can not log " f"metrics value type {type(value)}") + warnings.warn(f"VisdomLogger output_handler can not log metrics value type {type(value)}") for k, v in zip(keys, values): k = f"{self.tag}/{k}" From 6da60da2f14715c37d5469c06f2e0c70d3398f33 Mon Sep 17 00:00:00 2001 From: steven Date: Fri, 18 Dec 2020 22:50:28 -0700 Subject: [PATCH 17/29] schedulers --- ignite/contrib/handlers/param_scheduler.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/ignite/contrib/handlers/param_scheduler.py b/ignite/contrib/handlers/param_scheduler.py index 81e41fa9cbc2..11e2b97f8e44 100644 --- a/ignite/contrib/handlers/param_scheduler.py +++ b/ignite/contrib/handlers/param_scheduler.py @@ -526,7 +526,7 @@ def load_state_dict(self, state_dict: Mapping) -> None: if "schedulers" not in state_dict: raise ValueError( - f"Required state attribute '{'schedulers'}' is absent in provided state_dict '{state_dict.keys()}'" + f"Required state attribute 'schedulers' is absent in provided state_dict '{state_dict.keys()}'" ) sds = state_dict["schedulers"] if len(sds) != len(self.schedulers): From d295f8707f0333da4cecec0cc04c2bcf46017ab6 Mon Sep 17 00:00:00 2001 From: steven Date: Sat, 19 Dec 2020 13:16:42 -0700 Subject: [PATCH 18/29] breaking up large pull --- examples/contrib/cifar10/main.py | 7 ++--- ignite/contrib/metrics/gpu_info.py | 14 ++++------ ignite/contrib/metrics/regression/_base.py | 10 +++---- ignite/distributed/auto.py | 8 +++--- ignite/distributed/comp_models/base.py | 8 +++--- ignite/distributed/comp_models/horovod.py | 6 ++-- ignite/distributed/comp_models/native.py | 20 ++++++-------- ignite/distributed/comp_models/xla.py | 2 +- ignite/distributed/launcher.py | 32 +++++++++------------- ignite/distributed/utils.py | 28 +++++++++---------- 10 files changed, 60 insertions(+), 75 deletions(-) diff --git a/examples/contrib/cifar10/main.py b/examples/contrib/cifar10/main.py index 49964fa3951b..922cbb0b6c22 100644 --- a/examples/contrib/cifar10/main.py +++ b/examples/contrib/cifar10/main.py @@ -243,11 +243,8 @@ def initialize(config): def log_metrics(logger, epoch, elapsed, tag, metrics): - logger.info( - "\nEpoch {} - elapsed: {} - {} metrics:\n {}".format( - epoch, elapsed, tag, "\n".join(["\t{}: {}".format(k, v) for k, v in metrics.items()]) - ) - ) + metrics_output = "\n".join([f"\t{k}: {v}" for k, v in metrics.items()]) + logger.info(f"\nEpoch {epoch} - elapsed: {elapsed} - {tag} metrics:\n {metrics_output}") def log_basic_info(logger, config): diff --git a/ignite/contrib/metrics/gpu_info.py b/ignite/contrib/metrics/gpu_info.py index 7ab49447f395..8cdad53f7164 100644 --- a/ignite/contrib/metrics/gpu_info.py +++ b/ignite/contrib/metrics/gpu_info.py @@ -74,31 +74,29 @@ def completed(self, engine: Engine, name: str) -> None: return for i, data_by_rank in enumerate(data): - mem_name = "{}:{} mem(%)".format(name, i) + mem_name = f"{name}:{i} mem(%)" if "fb_memory_usage" not in data_by_rank: - warnings.warn("No GPU memory usage information available in {}".format(data_by_rank)) + warnings.warn(f"No GPU memory usage information available in {data_by_rank}") continue mem_report = data_by_rank["fb_memory_usage"] if not ("used" in mem_report and "total" in mem_report): warnings.warn( "GPU memory usage information does not provide used/total " - "memory consumption information in {}".format(mem_report) + f"memory consumption information in {mem_report}" ) continue engine.state.metrics[mem_name] = int(mem_report["used"] * 100.0 / mem_report["total"]) for i, data_by_rank in enumerate(data): - util_name = "{}:{} util(%)".format(name, i) + util_name = f"{name}:{i} util(%)" if "utilization" not in data_by_rank: - warnings.warn("No GPU utilization information available in {}".format(data_by_rank)) + warnings.warn(f"No GPU utilization information available in {data_by_rank}") continue util_report = data_by_rank["utilization"] if not ("gpu_util" in util_report): - warnings.warn( - "GPU utilization information does not provide 'gpu_util' information in {}".format(util_report) - ) + warnings.warn(f"GPU utilization information does not provide 'gpu_util' information in {util_report}") continue try: engine.state.metrics[util_name] = int(util_report["gpu_util"]) diff --git a/ignite/contrib/metrics/regression/_base.py b/ignite/contrib/metrics/regression/_base.py index b25e574c05be..540526a1e6b3 100644 --- a/ignite/contrib/metrics/regression/_base.py +++ b/ignite/contrib/metrics/regression/_base.py @@ -10,24 +10,24 @@ def _check_output_shapes(output: Tuple[torch.Tensor, torch.Tensor]) -> None: y_pred, y = output if y_pred.shape != y.shape: - raise ValueError("Input data shapes should be the same, but given {} and {}".format(y_pred.shape, y.shape)) + raise ValueError(f"Input data shapes should be the same, but given {y_pred.shape} and {y.shape}") c1 = y_pred.ndimension() == 2 and y_pred.shape[1] == 1 if not (y_pred.ndimension() == 1 or c1): - raise ValueError("Input y_pred should have shape (N,) or (N, 1), but given {}".format(y_pred.shape)) + raise ValueError(f"Input y_pred should have shape (N,) or (N, 1), but given {y_pred.shape}") c2 = y.ndimension() == 2 and y.shape[1] == 1 if not (y.ndimension() == 1 or c2): - raise ValueError("Input y should have shape (N,) or (N, 1), but given {}".format(y.shape)) + raise ValueError(f"Input y should have shape (N,) or (N, 1), but given {y.shape}") def _check_output_types(output: Tuple[torch.Tensor, torch.Tensor]) -> None: y_pred, y = output if y_pred.dtype not in (torch.float16, torch.float32, torch.float64): - raise TypeError("Input y_pred dtype should be float 16, 32 or 64, but given {}".format(y_pred.dtype)) + raise TypeError(f"Input y_pred dtype should be float 16, 32 or 64, but given {y_pred.dtype}") if y.dtype not in (torch.float16, torch.float32, torch.float64): - raise TypeError("Input y dtype should be float 16, 32 or 64, but given {}".format(y.dtype)) + raise TypeError(f"Input y dtype should be float 16, 32 or 64, but given {y.dtype}") class _BaseRegression(Metric): diff --git a/ignite/distributed/auto.py b/ignite/distributed/auto.py index 2169f83226a7..5d7ef1f47992 100644 --- a/ignite/distributed/auto.py +++ b/ignite/distributed/auto.py @@ -103,7 +103,7 @@ def auto_dataloader(dataset: Dataset, **kwargs: Any) -> Union[DataLoader, "_MpDe else: kwargs["pin_memory"] = kwargs.get("pin_memory", "cuda" in idist.device().type) - logger.info("Use data loader kwargs for dataset '{}': \n\t{}".format(repr(dataset)[:20].strip(), kwargs)) + logger.info(f"Use data loader kwargs for dataset '{repr(dataset)[:20].strip()}': \n\t{kwargs}") dataloader = DataLoader(dataset, **kwargs) if idist.has_xla_support and idist.backend() == idist_xla.XLA_TPU and world_size > 1: @@ -185,7 +185,7 @@ def auto_model(model: nn.Module, sync_bn: bool = False) -> nn.Module: model = nn.SyncBatchNorm.convert_sync_batchnorm(model) lrank = idist.get_local_rank() - logger.info("Apply torch DistributedDataParallel on model, device id: {}".format(lrank)) + logger.info(f"Apply torch DistributedDataParallel on model, device id: {lrank}") model = torch.nn.parallel.DistributedDataParallel(model, device_ids=[lrank,]) elif idist.has_native_dist_support and bnd == idist_native.GLOO: if sync_bn: @@ -272,7 +272,7 @@ class DistributedProxySampler(DistributedSampler): def __init__(self, sampler: Sampler, num_replicas: Optional[int] = None, rank: Optional[int] = None) -> None: if not isinstance(sampler, Sampler): - raise TypeError("Argument sampler should be instance of torch Sampler, but given: {}".format(type(sampler))) + raise TypeError(f"Argument sampler should be instance of torch Sampler, but given: {type(sampler)}") if not hasattr(sampler, "__len__"): raise TypeError("Argument sampler should have length") @@ -296,7 +296,7 @@ def __iter__(self) -> Iterator: # subsample indices = indices[self.rank : self.total_size : self.num_replicas] if len(indices) != self.num_samples: - raise RuntimeError("{} vs {}".format(len(indices), self.num_samples)) + raise RuntimeError(f"{len(indices)} vs {self.num_samples}") return iter(indices) diff --git a/ignite/distributed/comp_models/base.py b/ignite/distributed/comp_models/base.py index 70549f9f1a9f..94d7f54c43a7 100644 --- a/ignite/distributed/comp_models/base.py +++ b/ignite/distributed/comp_models/base.py @@ -91,7 +91,7 @@ def _encode_str(x: str, device: torch.device) -> torch.Tensor: # use fix padded size size = 1024 if len(x) > size: - warnings.warn("Input string size {} is larger than {} and thus will be truncated".format(len(x), size)) + warnings.warn(f"Input string size {len(x)} is larger than {size} and thus will be truncated") x = x[:size] name = torch.tensor(bytearray(x, "utf-8")).to(device) @@ -159,19 +159,19 @@ def _collective_op( def all_reduce(self, tensor: Union[torch.Tensor, float], op: str = "sum") -> Union[torch.Tensor, float]: if not isinstance(tensor, (torch.Tensor, Number)): - raise TypeError("Unhandled input type {}".format(type(tensor))) + raise TypeError(f"Unhandled input type {type(tensor)}") return cast(Union[torch.Tensor, float], self._collective_op(tensor, self._do_all_reduce, op)) def all_gather(self, tensor: Union[torch.Tensor, float, str]) -> Union[torch.Tensor, float, List[float], List[str]]: if not isinstance(tensor, (torch.Tensor, Number, str)): - raise TypeError("Unhandled input type {}".format(type(tensor))) + raise TypeError(f"Unhandled input type {type(tensor)}") return self._collective_op(tensor, self._do_all_gather) def broadcast(self, tensor: Union[torch.Tensor, float, str], src: int = 0) -> Union[torch.Tensor, float, str]: if not isinstance(tensor, (torch.Tensor, Number, str)): - raise TypeError("Unhandled input type {}".format(type(tensor))) + raise TypeError(f"Unhandled input type {type(tensor)}") rank = self.get_rank() device = self.device() diff --git a/ignite/distributed/comp_models/horovod.py b/ignite/distributed/comp_models/horovod.py index 5f36fd3d08f3..329cac26d736 100644 --- a/ignite/distributed/comp_models/horovod.py +++ b/ignite/distributed/comp_models/horovod.py @@ -51,7 +51,7 @@ def create_from_context() -> Optional["_HorovodDistModel"]: @staticmethod def create_from_backend(backend: str, **kwargs: Any) -> "_HorovodDistModel": if backend not in _HorovodDistModel.available_backends: - raise ValueError("Backend should be one of '{}'".format(_HorovodDistModel.available_backends)) + raise ValueError(f"Backend should be one of '{_HorovodDistModel.available_backends}'") rank = _HorovodDistModel._get_hvd_rank() if has_hvd_support and rank > -1: @@ -103,7 +103,7 @@ def device(self) -> torch.device: "Current device index is less than current local rank. " "Please, make sure to call torch.cuda.set_device(local_rank)." ) - return torch.device("cuda:{}".format(index)) + return torch.device(f"cuda:{index}") return torch.device("cpu") def backend(self) -> str: @@ -160,7 +160,7 @@ def spawn( # type: ignore[override] def _do_all_reduce(self, tensor: torch.Tensor, op: str = "SUM") -> torch.Tensor: if op not in self._reduce_op_map: - raise ValueError("Unsupported reduction operation: '{}'".format(op)) + raise ValueError(f"Unsupported reduction operation: '{op}'") op = self._reduce_op_map[op] return hvd.allreduce(tensor, op=op) diff --git a/ignite/distributed/comp_models/native.py b/ignite/distributed/comp_models/native.py index 4fcde6626684..64a81ee7e742 100644 --- a/ignite/distributed/comp_models/native.py +++ b/ignite/distributed/comp_models/native.py @@ -37,9 +37,7 @@ class _NativeDistModel(ComputationModel): """ name = "native-dist" - available_backends = tuple( - name for name in [NCCL, GLOO, MPI] if getattr(dist, "is_{}_available".format(name))() - ) + available_backends = tuple(name for name in [NCCL, GLOO, MPI] if getattr(dist, f"is_{name}_available")()) @staticmethod def create_from_context() -> Optional["_NativeDistModel"]: @@ -101,7 +99,7 @@ def _compute_nproc_per_node(self) -> int: device = torch.device("cpu") if self.backend() == dist.Backend.NCCL: # we manually set cuda device to local rank in order to avoid a hang on all_reduce - device = torch.device("cuda:{}".format(local_rank)) + device = torch.device(f"cuda:{local_rank}") tensor = torch.tensor([self.get_local_rank() + 1]).to(device) dist.all_reduce(tensor, op=dist.ReduceOp.MAX) return int(tensor.item()) @@ -112,7 +110,7 @@ def _get_all_hostnames(self) -> List[Tuple[str, ...]]: device = "cpu" if self.backend() == dist.Backend.NCCL: index = torch.cuda.current_device() - device = "cuda:{}".format(index) + device = f"cuda:{index}" hostname = socket.gethostname() name = torch.tensor(bytearray(hostname, "utf-8")).to(device) padded_t_name = torch.zeros(256, device=device, dtype=torch.long) @@ -140,9 +138,7 @@ def _compute_local_rank_via_hostname(self) -> int: if local_rank < 0 or self._node < 0: raise ValueError( "Failed to correctly estimate local rank. " - "Debugging info: local rank: {}, node rank: {}, hostnames: {}".format( - local_rank, self._node, hostnames - ) + f"Debugging info: local rank: {local_rank}, node rank: {self._node}, hostnames: {hostnames}" ) return local_rank @@ -179,7 +175,7 @@ def setup_env_vars(self) -> None: all_env_vars_defined = [k in os.environ for k in necessary_env_vars] if any(all_env_vars_defined) and not all(all_env_vars_defined): raise RuntimeError( - "PyTorch distributed configuration should define env variables '{}'".format(necessary_env_vars) + f"PyTorch distributed configuration should define env variables '{necessary_env_vars}'" ) os.environ["RANK"] = os.environ.get("RANK", "0") @@ -191,7 +187,7 @@ def setup_env_vars(self) -> None: def _setup_env_in_slurm(self) -> None: for k in ["SLURM_PROCID", "SLURM_LOCALID", "SLURM_NTASKS", "SLURM_JOB_NODELIST"]: if k not in os.environ: - raise RuntimeError("SLURM distributed configuration is missing '{}' in env variables".format(k)) + raise RuntimeError(f"SLURM distributed configuration is missing '{k}' in env variables") os.environ["RANK"] = os.environ["SLURM_PROCID"] os.environ["LOCAL_RANK"] = os.environ["SLURM_LOCALID"] @@ -230,7 +226,7 @@ def device(self) -> torch.device: "Current device index is less than current local rank. " "Please, make sure to call torch.cuda.set_device(local_rank)." ) - return torch.device("cuda:{}".format(index)) + return torch.device(f"cuda:{index}") return torch.device("cpu") def backend(self) -> str: @@ -330,7 +326,7 @@ def spawn( # type: ignore[override] def _do_all_reduce(self, tensor: torch.Tensor, op: str = "SUM") -> torch.Tensor: if op not in self._reduce_op_map: - raise ValueError("Unsupported reduction operation: '{}'".format(op)) + raise ValueError(f"Unsupported reduction operation: '{op}'") reduce_op = self._reduce_op_map[op] dist.all_reduce(tensor, reduce_op) return tensor diff --git a/ignite/distributed/comp_models/xla.py b/ignite/distributed/comp_models/xla.py index fa981367f51f..325517e3e906 100644 --- a/ignite/distributed/comp_models/xla.py +++ b/ignite/distributed/comp_models/xla.py @@ -137,7 +137,7 @@ def spawn( # type: ignore[override] def _do_all_reduce(self, tensor: torch.Tensor, op: str = "SUM") -> torch.Tensor: if op not in self._reduce_op_map: - raise ValueError("Unsupported reduction operation: '{}'".format(op)) + raise ValueError(f"Unsupported reduction operation: '{op}'") op = self._reduce_op_map[op] xm.all_reduce(op, [tensor,]) return tensor diff --git a/ignite/distributed/launcher.py b/ignite/distributed/launcher.py index de2b14c5a14b..f51e25ae6c9a 100644 --- a/ignite/distributed/launcher.py +++ b/ignite/distributed/launcher.py @@ -185,17 +185,13 @@ def __init__( ) -> None: if backend is not None: if backend not in idist.available_backends(): - raise ValueError( - "Unknown backend '{}'. Available backends: {}".format(backend, idist.available_backends()) - ) + raise ValueError(f"Unknown backend '{backend}'. Available backends: {idist.available_backends()}") else: arg_names = ["nproc_per_node", "nnodes", "node_rank", "master_addr", "master_port"] arg_values = [nproc_per_node, nnodes, node_rank, master_addr, master_port] for name, value in zip(arg_names, arg_values): if value is not None: - raise ValueError( - "If backend is None, argument '{}' should be also None, but given {}".format(name, value) - ) + raise ValueError(f"If backend is None, argument '{name}' should be also None, but given {value}") self.backend = backend self._spawn_params = None @@ -209,9 +205,9 @@ def __init__( ) if self._spawn_params is not None: - self.logger.info("Initialized distributed launcher with backend: '{}'".format(self.backend)) - msg = "\n\t".join(["{}: {}".format(k, v) for k, v in self._spawn_params.items() if v is not None]) - self.logger.info("- Parameters to spawn processes: \n\t{}".format(msg)) + self.logger.info(f"Initialized distributed launcher with backend: '{self.backend}'") + msg = "\n\t".join([f"{k}: {v}" for k, v in self._spawn_params.items() if v is not None]) + self.logger.info(f"- Parameters to spawn processes: \n\t{msg}") @staticmethod def _setup_spawn_params( @@ -223,23 +219,21 @@ def _setup_spawn_params( **spawn_kwargs: Any ) -> Dict: if nproc_per_node < 1: - raise ValueError("Argument nproc_per_node should positive, but given {}".format(nproc_per_node)) + raise ValueError(f"Argument nproc_per_node should positive, but given {nproc_per_node}") if nnodes is None: nnodes = 1 if nnodes < 1: - raise ValueError("Argument nnodes should positive, but given {}".format(nnodes)) + raise ValueError(f"Argument nnodes should positive, but given {nnodes}") if node_rank is None: if nnodes > 1: raise ValueError("If number of nodes larger than one, arguments node_rank should be given") node_rank = 0 if node_rank >= nnodes or node_rank < 0: - raise ValueError( - "Argument node_rank should be between 0 and {}, but given {}".format(nnodes - 1, node_rank) - ) + raise ValueError(f"Argument node_rank should be between 0 and {nnodes - 1}, but given {node_rank}") if nnodes > 1 and (master_addr is None or master_port is None): raise ValueError( "If number of nodes larger than one, arguments master_addr and master_port " - "should be specified, but given master_addr={} and master_port={}".format(master_addr, master_port) + f"should be specified, but given master_addr={master_addr} and master_port={master_port}" ) params = { "nproc_per_node": nproc_per_node, @@ -274,10 +268,10 @@ def training(local_rank, config, **kwargs): """ if self._spawn_params is not None and self.backend is not None: - self.logger.info("Spawn function '{}' in {} processes".format(func, self._spawn_params["nproc_per_node"])) + self.logger.info(f"Spawn function '{func}' in {self._spawn_params['nproc_per_node']} processes") idist.spawn(self.backend, func, args=args, kwargs_dict=kwargs, **self._spawn_params) else: - self.logger.info("- Run '{}' in {} processes".format(func, idist.get_world_size())) + self.logger.info(f"- Run '{func}' in {idist.get_world_size()} processes") local_rank = idist.get_local_rank() func(local_rank, *args, **kwargs) @@ -287,11 +281,11 @@ def __enter__(self) -> "Parallel": if (self.backend is not None) and self._spawn_params is None: idist.initialize(self.backend) self.logger = setup_logger(__name__ + "." + self.__class__.__name__) - self.logger.info("Initialized processing group with backend: '{}'".format(self.backend)) + self.logger.info(f"Initialized processing group with backend: '{self.backend}'") return self def __exit__(self, *args: Any, **kwargs: Any) -> None: if (self.backend is not None) and self._spawn_params is None: - self.logger.info("Finalized processing group with backend: '{}'".format(self.backend)) + self.logger.info(f"Finalized processing group with backend: '{self.backend}'") idist.finalize() diff --git a/ignite/distributed/utils.py b/ignite/distributed/utils.py index a7cf8dc4ee05..b15ef1351922 100644 --- a/ignite/distributed/utils.py +++ b/ignite/distributed/utils.py @@ -217,7 +217,7 @@ def train_fn(local_rank, a, b, c, d=12): assert dist.get_world_size() == 4 device = idist.device() - assert device == torch.device("cuda:{}".format(local_rank)) + assert device == torch.device(f"cuda:{local_rank}") idist.spawn("nccl", train_fn, args=(a, b, c), kwargs_dict={"d": 23}, nproc_per_node=4) @@ -243,7 +243,7 @@ def train_fn(local_rank, nnodes, nproc_per_node): assert dist.get_world_size() == nnodes * nproc_per_node device = idist.device() - assert device == torch.device("cuda:{}".format(local_rank)) + assert device == torch.device(f"cuda:{local_rank}") idist.spawn( "nccl", @@ -442,7 +442,7 @@ def _set_model(model: Any, temporary: bool = False) -> None: def _assert_backend(backend: str) -> None: backends = available_backends() if backend not in backends: - raise ValueError("Backend should be one of '{}'".format(backends)) + raise ValueError(f"Backend should be one of '{backends}'") def initialize(backend: str, **kwargs: Any) -> None: @@ -466,7 +466,7 @@ def train_fn(local_rank, a, b, c): assert dist.get_world_size() == 4 device = idist.device() - assert device == torch.device("cuda:{}".format(local_rank)) + assert device == torch.device(f"cuda:{local_rank}") idist.initialize("nccl") @@ -514,16 +514,16 @@ def show_config() -> None: # setup parallel logger logger = setup_logger(__name__) - logger.info("distributed configuration: {}".format(model_name())) - logger.info("backend: {}".format(backend())) - logger.info("device: {}".format(device().type)) - logger.info("hostname: {}".format(hostname())) - logger.info("world size: {}".format(get_world_size())) - logger.info("rank: {}".format(get_rank())) - logger.info("local rank: {}".format(get_local_rank())) - logger.info("num processes per_node: {}".format(get_nproc_per_node())) - logger.info("num nodes: {}".format(get_nnodes())) - logger.info("node rank: {}".format(get_node_rank())) + logger.info(f"distributed configuration: {model_name()}") + logger.info(f"backend: {backend()}") + logger.info(f"device: {device().type}") + logger.info(f"hostname: {hostname()}") + logger.info(f"world size: {get_world_size()}") + logger.info(f"rank: {get_rank()}") + logger.info(f"local rank: {get_local_rank()}") + logger.info(f"num processes per_node: {get_nproc_per_node()}") + logger.info(f"num nodes: {get_nnodes()}") + logger.info(f"node rank: {get_node_rank()}") def one_rank_only(rank: int = 0, with_barrier: bool = False) -> Callable: From d17b1c8419a6d51107de473dd31ee43f9cc2cb5c Mon Sep 17 00:00:00 2001 From: steven Date: Sun, 20 Dec 2020 14:18:49 -0700 Subject: [PATCH 19/29] breaking up large pull --- ignite/engine/engine.py | 43 +++++++++++------------------ ignite/engine/events.py | 8 +++--- ignite/engine/utils.py | 6 ++-- ignite/handlers/checkpoint.py | 28 +++++++++---------- ignite/handlers/terminate_on_nan.py | 2 +- ignite/metrics/accumulation.py | 8 +++--- ignite/metrics/accuracy.py | 10 +++---- ignite/metrics/confusion_matrix.py | 18 ++++++------ ignite/metrics/epoch_metric.py | 6 ++-- ignite/metrics/fbeta.py | 2 +- ignite/metrics/metric.py | 16 +++++------ ignite/metrics/precision.py | 6 ++-- ignite/metrics/recall.py | 4 +-- ignite/metrics/ssim.py | 12 ++++---- 14 files changed, 77 insertions(+), 92 deletions(-) diff --git a/ignite/engine/engine.py b/ignite/engine/engine.py index 4b343465a7b0..d41411f5e2e9 100644 --- a/ignite/engine/engine.py +++ b/ignite/engine/engine.py @@ -55,7 +55,7 @@ def log_training(engine): e = engine.state.epoch n = engine.state.max_epochs i = engine.state.iteration - print("Epoch {}/{} : {} - batch loss: {}, lr: {}".format(e, n, i, batch_loss, lr)) + print(f"Epoch {e}/{n} : {i} - batch loss: {batch_loss}, lr: {lr}") trainer.run(data_loader, max_epochs=5) @@ -218,12 +218,12 @@ class TBPTT_Events(EventEnum): # engine.state contains an attribute time_iteration, which can be accessed using engine.state.time_iteration """ if not (event_to_attr is None or isinstance(event_to_attr, dict)): - raise ValueError("Expected event_to_attr to be dictionary. Got {}.".format(type(event_to_attr))) + raise ValueError(f"Expected event_to_attr to be dictionary. Got {type(event_to_attr)}.") for index, e in enumerate(event_names): if not isinstance(e, (str, EventEnum)): raise TypeError( - "Value at {} of event_names should be a str or EventEnum, but given {}".format(index, e) + f"Value at {index} of event_names should be a str or EventEnum, but given {e}" ) self._allowed_events.append(e) if event_to_attr and e in event_to_attr: @@ -271,7 +271,7 @@ def add_event_handler(self, event_name: Any, handler: Callable, *args: Any, **kw engine = Engine(process_function) def print_epoch(engine): - print("Epoch: {}".format(engine.state.epoch)) + print(f"Epoch: {engine.state.epoch}") engine.add_event_handler(Events.EPOCH_COMPLETED, print_epoch) @@ -301,7 +301,7 @@ def execute_something(): if event_name not in self._allowed_events: self.logger.error("attempt to add event handler to an invalid event %s.", event_name) - raise ValueError("Event {} is not a valid event for this Engine.".format(event_name)) + raise ValueError(f"Event {event_name} is not a valid event for this Engine.") event_args = (Exception(),) if event_name == Events.EXCEPTION_RAISED else () try: @@ -359,7 +359,7 @@ def remove_event_handler(self, handler: Callable, event_name: Any) -> None: """ if event_name not in self._event_handlers: - raise ValueError("Input event name '{}' does not exist".format(event_name)) + raise ValueError(f"Input event name '{event_name}' does not exist") new_event_handlers = [ (h, args, kwargs) @@ -367,7 +367,7 @@ def remove_event_handler(self, handler: Callable, event_name: Any) -> None: if not self._compare_handlers(handler, h) ] if len(new_event_handlers) == len(self._event_handlers[event_name]): - raise ValueError("Input handler '{}' is not found among registered event handlers".format(handler)) + raise ValueError(f"Input handler '{handler}' is not found among registered event handlers") self._event_handlers[event_name] = new_event_handlers def on(self, event_name: Any, *args: Any, **kwargs: Any) -> Callable: @@ -387,7 +387,7 @@ def on(self, event_name: Any, *args: Any, **kwargs: Any) -> Callable: @engine.on(Events.EPOCH_COMPLETED) def print_epoch(): - print("Epoch: {}".format(engine.state.epoch)) + print(f"Epoch: {engine.state.epoch}") @engine.on(Events.EPOCH_COMPLETED | Events.COMPLETED) def execute_something(): @@ -533,9 +533,7 @@ def load_state_dict(self, state_dict: Mapping) -> None: for k in self._state_dict_user_keys: if k not in state_dict: raise ValueError( - "Required user state attribute '{}' is absent in provided state_dict '{}'".format( - k, state_dict.keys() - ) + f"Required user state attribute '{k}' is absent in provided state_dict '{state_dict.keys()}'" ) self.state.max_epochs = state_dict["max_epochs"] self.state.epoch_length = state_dict["epoch_length"] @@ -552,7 +550,7 @@ def load_state_dict(self, state_dict: Mapping) -> None: if self.state.epoch_length is None: raise ValueError( "If epoch is provided in the state dict, epoch_length should not be None. " - "Input state_dict: {}".format(state_dict) + f"Input state_dict: {state_dict}" ) self.state.iteration = self.state.epoch_length * self.state.epoch @@ -669,18 +667,14 @@ def switch_batch(engine): if max_epochs < self.state.epoch: raise ValueError( "Argument max_epochs should be larger than the start epoch " - "defined in the state: {} vs {}. Please, set engine.state.max_epochs = None " - "before calling engine.run() in order to restart the training from the beginning.".format( - max_epochs, self.state.epoch - ) + f"defined in the state: {max_epochs} vs {self.state.epoch}. Please, set engine.state.max_epochs = None " + "before calling engine.run() in order to restart the training from the beginning." ) self.state.max_epochs = max_epochs if epoch_length is not None: if epoch_length != self.state.epoch_length: raise ValueError( - "Argument epoch_length should be same as in the state, given {} vs {}".format( - epoch_length, self.state.epoch_length - ) + f"Argument epoch_length should be same as in the state, given {epoch_length} vs {self.state.epoch_length}" ) if self.state.max_epochs is None or self._is_done(self.state): @@ -707,12 +701,10 @@ def switch_batch(engine): self.state.max_epochs = max_epochs self.state.max_iters = max_iters self.state.epoch_length = epoch_length - self.logger.info("Engine run starting with max_epochs={}.".format(max_epochs)) + self.logger.info(f"Engine run starting with max_epochs={max_epochs}.") else: self.logger.info( - "Engine run resuming from iteration {}, epoch {} until {} epochs".format( - self.state.iteration, self.state.epoch, self.state.max_epochs - ) + f"Engine run resuming from iteration {self.state.iteration}, epoch {self.state.epoch} until {self.state.max_epochs} epochs" ) self.state.dataloader = data @@ -836,10 +828,7 @@ def _run_once_on_dataset(self) -> float: warnings.warn( "Data iterator can not provide data anymore but required total number of " "iterations to run is not reached. " - "Current iteration: {} vs Total iterations to run : {}".format( - self.state.iteration, - self.state.epoch_length * self.state.max_epochs, # type: ignore[operator] - ) + f"Current iteration: {self.state.iteration} vs Total iterations to run : {self.state.epoch_length * self.state.max_epochs}" # type: ignore[operator] ) break diff --git a/ignite/engine/events.py b/ignite/engine/events.py index e05f477b2a82..4f07f5765082 100644 --- a/ignite/engine/events.py +++ b/ignite/engine/events.py @@ -315,7 +315,7 @@ def __init__(self) -> None: def _append(self, event: Union[Events, CallableEventWithFilter]) -> None: if not isinstance(event, (Events, CallableEventWithFilter)): - raise TypeError("Argument event should be Events or CallableEventWithFilter, got: {}".format(type(event))) + raise TypeError(f"Argument event should be Events or CallableEventWithFilter, got: {type(event)}") self._events.append(event) def __getitem__(self, item: int) -> Union[Events, CallableEventWithFilter]: @@ -392,7 +392,7 @@ def _update_attrs(self) -> None: def get_event_attrib_value(self, event_name: Union[str, Events, CallableEventWithFilter]) -> int: if event_name not in State.event_to_attr: - raise RuntimeError("Unknown event name '{}'".format(event_name)) + raise RuntimeError(f"Unknown event name '{event_name}'") return getattr(self, State.event_to_attr[event_name]) def __repr__(self) -> str: @@ -400,7 +400,7 @@ def __repr__(self) -> str: for attr, value in self.__dict__.items(): if not isinstance(value, (numbers.Number, str)): value = type(value) - s += "\t{}: {}\n".format(attr, value) + s += f"\t{attr}: {value}\n" return s @@ -424,7 +424,7 @@ class RemovableEventHandle: engine = Engine() def print_epoch(engine): - print("Epoch: {}".format(engine.state.epoch)) + print(f"Epoch: {engine.state.epoch}") with engine.add_event_handler(Events.EPOCH_COMPLETED, print_epoch): # print_epoch handler registered for a single run diff --git a/ignite/engine/utils.py b/ignite/engine/utils.py index 9c4c5b8d9846..d2c2d10d1ea5 100644 --- a/ignite/engine/utils.py +++ b/ignite/engine/utils.py @@ -15,7 +15,7 @@ def _check_signature(fn: Callable, fn_description: str, *args: Any, **kwargs: An exception_msg = str(exc) passed_params = list(args) + list(kwargs) raise ValueError( - "Error adding {} '{}': " - "takes parameters {} but will be called with {}" - "({}).".format(fn, fn_description, fn_params, passed_params, exception_msg) + f"Error adding {fn} '{fn_description}': " + f"takes parameters {fn_params} but will be called with {passed_params}" + f"({exception_msg})." ) diff --git a/ignite/handlers/checkpoint.py b/ignite/handlers/checkpoint.py index f3c419ee9dfb..71edf54499be 100644 --- a/ignite/handlers/checkpoint.py +++ b/ignite/handlers/checkpoint.py @@ -251,7 +251,7 @@ def __init__( if to_save is not None: # for compatibility with ModelCheckpoint if not isinstance(to_save, collections.Mapping): - raise TypeError("Argument `to_save` should be a dictionary, but given {}".format(type(to_save))) + raise TypeError(f"Argument `to_save` should be a dictionary, but given {type(to_save)}") if len(to_save) < 1: raise ValueError("No objects to checkpoint.") @@ -261,11 +261,11 @@ def __init__( if include_self: if not isinstance(to_save, collections.MutableMapping): raise TypeError( - "If `include_self` is True, then `to_save` must be mutable, but given {}.".format(type(to_save)) + f"If `include_self` is True, then `to_save` must be mutable, but given {type(to_save)}." ) if "checkpointer" in to_save: - raise ValueError("Cannot have key 'checkpointer' if `include_self` is True: {}".format(to_save)) + raise ValueError(f"Cannot have key 'checkpointer' if `include_self` is True: {to_save}") if not (callable(save_handler) or isinstance(save_handler, BaseSaveHandler)): raise TypeError("Argument `save_handler` should be callable or inherit from BaseSaveHandler") @@ -275,7 +275,7 @@ def __init__( if global_step_transform is not None and not callable(global_step_transform): raise TypeError( - "global_step_transform should be a function, got {} instead.".format(type(global_step_transform)) + f"global_step_transform should be a function, got {type(global_step_transform)} instead." ) self.to_save = to_save @@ -319,7 +319,7 @@ def __call__(self, engine: Engine) -> None: if self._check_lt_n_saved() or self._saved[0].priority < priority: priority_str = ( - "{}".format(priority) if isinstance(priority, numbers.Integral) else "{:.4f}".format(priority) + f"{priority}" if isinstance(priority, numbers.Integral) else f"{priority:.4f}" ) checkpoint = self._setup_checkpoint() @@ -351,7 +351,7 @@ def __call__(self, engine: Engine) -> None: filename = filename_pattern.format(**filename_dict) metadata = { - "basename": "{}{}{}".format(self.filename_prefix, "_" * int(len(self.filename_prefix) > 0), name), + "basename": f"{self.filename_prefix}{'_' * int(len(self.filename_prefix) > 0)}{name}", "score_name": self.score_name, "priority": priority, } @@ -443,7 +443,7 @@ def setup_filename_pattern( def _check_objects(objs: Mapping, attr: str) -> None: for k, obj in objs.items(): if not hasattr(obj, attr): - raise TypeError("Object {} should have `{}` method".format(type(obj), attr)) + raise TypeError(f"Object {type(obj)} should have `{attr}` method") @staticmethod def load_objects(to_load: Mapping, checkpoint: Mapping, **kwargs: Any) -> None: @@ -488,7 +488,7 @@ def load_objects(to_load: Mapping, checkpoint: Mapping, **kwargs: Any) -> None: """ Checkpoint._check_objects(to_load, "load_state_dict") if not isinstance(checkpoint, collections.Mapping): - raise TypeError("Argument checkpoint should be a dictionary, but given {}".format(type(checkpoint))) + raise TypeError(f"Argument checkpoint should be a dictionary, but given {type(checkpoint)}") if len(kwargs) > 1 or any(k for k in kwargs.keys() if k not in ["strict"]): warnings.warn("kwargs contains keys other than strict and these will be ignored") @@ -506,7 +506,7 @@ def load_objects(to_load: Mapping, checkpoint: Mapping, **kwargs: Any) -> None: # multiple objects to load for k, obj in to_load.items(): if k not in checkpoint: - raise ValueError("Object labeled by '{}' from `to_load` is not found in the checkpoint".format(k)) + raise ValueError(f"Object labeled by '{k}' from `to_load` is not found in the checkpoint") if isinstance(obj, (nn.DataParallel, nn.parallel.DistributedDataParallel)): obj = obj.module if isinstance(obj, torch.nn.Module): @@ -552,16 +552,16 @@ def _check_and_setup(dirname: str, create_dir: bool, require_empty: bool) -> Non os.makedirs(dirname) # Ensure that dirname exists if not os.path.exists(dirname): - raise ValueError("Directory path '{}' is not found".format(dirname)) + raise ValueError(f"Directory path '{dirname}' is not found") if require_empty: matched = [fname for fname in os.listdir(dirname) if fname.endswith(".pt")] if len(matched) > 0: raise ValueError( - "Files {} with extension '.pt' are already present " - "in the directory {}. If you want to use this " + f"Files {matched} with extension '.pt' are already present " + f"in the directory {dirname}. If you want to use this " "directory anyway, pass `require_empty=False`." - "".format(matched, dirname) + "" ) def __call__(self, checkpoint: Mapping, filename: str, metadata: Optional[Mapping] = None) -> None: @@ -709,7 +709,7 @@ def last_checkpoint(self) -> Union[str, None]: if not isinstance(self.save_handler, DiskSaver): raise RuntimeError( - "Unable to save checkpoint, save_handler should be DiskSaver, got {}.".format(type(self.save_handler)) + f"Unable to save checkpoint, save_handler should be DiskSaver, got {type(self.save_handler)}." ) return os.path.join(self.save_handler.dirname, self._saved[-1].filename) diff --git a/ignite/handlers/terminate_on_nan.py b/ignite/handlers/terminate_on_nan.py index d016d62657b9..61aa568ef978 100644 --- a/ignite/handlers/terminate_on_nan.py +++ b/ignite/handlers/terminate_on_nan.py @@ -52,6 +52,6 @@ def raise_error(x: Union[float, torch.Tensor]) -> None: apply_to_type(output, (numbers.Number, torch.Tensor), raise_error) except RuntimeError: self.logger.warning( - "{}: Output '{}' contains NaN or Inf. Stop training".format(self.__class__.__name__, output) + f"{self.__class__.__name__}: Output '{output}' contains NaN or Inf. Stop training" ) engine.terminate() diff --git a/ignite/metrics/accumulation.py b/ignite/metrics/accumulation.py index b6c780ea272d..c26d1758d79f 100644 --- a/ignite/metrics/accumulation.py +++ b/ignite/metrics/accumulation.py @@ -46,7 +46,7 @@ def __init__( device: Union[str, torch.device] = torch.device("cpu"), ): if not callable(op): - raise TypeError("Argument op should be a callable, but given {}".format(type(op))) + raise TypeError(f"Argument op should be a callable, but given {type(op)}") self._op = op @@ -59,7 +59,7 @@ def reset(self) -> None: def _check_output_type(self, output: Union[float, torch.Tensor]) -> None: if not (isinstance(output, numbers.Number) or isinstance(output, torch.Tensor)): - raise TypeError("Output should be a number or torch.Tensor, but given {}".format(type(output))) + raise TypeError(f"Output should be a number or torch.Tensor, but given {type(output)}") @reinit__is_reduced def update(self, output: Union[float, torch.Tensor]) -> None: @@ -135,7 +135,7 @@ def _mean_op(a: Union[float, torch.Tensor], x: Union[float, torch.Tensor]) -> Un def compute(self) -> Union[float, torch.Tensor]: if self.num_examples < 1: raise NotComputableError( - "{} must have at least one example before it can be computed.".format(self.__class__.__name__) + f"{self.__class__.__name__} must have at least one example before it can be computed." ) return self.accumulator / self.num_examples @@ -186,7 +186,7 @@ def _geom_op(a: torch.Tensor, x: Union[float, torch.Tensor]) -> torch.Tensor: def compute(self) -> Union[float, torch.Tensor]: if self.num_examples < 1: raise NotComputableError( - "{} must have at least one example before it can be computed.".format(self.__class__.__name__) + f"{self.__class__.__name__} must have at least one example before it can be computed." ) tensor = torch.exp(self.accumulator / self.num_examples) diff --git a/ignite/metrics/accuracy.py b/ignite/metrics/accuracy.py index 47e357e62eda..6ece8ace18fa 100644 --- a/ignite/metrics/accuracy.py +++ b/ignite/metrics/accuracy.py @@ -31,7 +31,7 @@ def _check_shape(self, output: Sequence[torch.Tensor]) -> None: raise ValueError( "y must have shape of (batch_size, ...) and y_pred must have " "shape of (batch_size, num_categories, ...) or (batch_size, ...), " - "but given {} vs {}.".format(y.shape, y_pred.shape) + f"but given {y.shape} vs {y_pred.shape}." ) y_shape = y.shape @@ -78,18 +78,18 @@ def _check_type(self, output: Sequence[torch.Tensor]) -> None: num_classes = 1 else: raise RuntimeError( - "Invalid shapes of y (shape={}) and y_pred (shape={}), check documentation." - " for expected shapes of y and y_pred.".format(y.shape, y_pred.shape) + f"Invalid shapes of y (shape={y.shape}) and y_pred (shape={y_pred.shape}), check documentation." + " for expected shapes of y and y_pred." ) if self._type is None: self._type = update_type self._num_classes = num_classes else: if self._type != update_type: - raise RuntimeError("Input data type has changed from {} to {}.".format(self._type, update_type)) + raise RuntimeError(f"Input data type has changed from {self._type} to {update_type}.") if self._num_classes != num_classes: raise ValueError( - "Input data number of classes has changed from {} to {}".format(self._num_classes, num_classes) + f"Input data number of classes has changed from {self._num_classes} to {num_classes}" ) diff --git a/ignite/metrics/confusion_matrix.py b/ignite/metrics/confusion_matrix.py index c45fc6445681..171afae11206 100644 --- a/ignite/metrics/confusion_matrix.py +++ b/ignite/metrics/confusion_matrix.py @@ -66,19 +66,19 @@ def _check_shape(self, output: Sequence[torch.Tensor]) -> None: if y_pred.ndimension() < 2: raise ValueError( - "y_pred must have shape (batch_size, num_categories, ...), but given {}".format(y_pred.shape) + f"y_pred must have shape (batch_size, num_categories, ...), but given {y_pred.shape}" ) if y_pred.shape[1] != self.num_classes: raise ValueError( - "y_pred does not have correct number of categories: {} vs {}".format(y_pred.shape[1], self.num_classes) + f"y_pred does not have correct number of categories: {y_pred.shape[1]} vs {self.num_classes}" ) if not (y.ndimension() + 1 == y_pred.ndimension()): raise ValueError( "y_pred must have shape (batch_size, num_categories, ...) and y must have " "shape of (batch_size, ...), " - "but given {} vs {}.".format(y.shape, y_pred.shape) + f"but given {y.shape} vs {y_pred.shape}." ) y_shape = y.shape @@ -155,14 +155,14 @@ def IoU(cm: ConfusionMatrix, ignore_index: Optional[int] = None) -> MetricsLambd """ if not isinstance(cm, ConfusionMatrix): - raise TypeError("Argument cm should be instance of ConfusionMatrix, but given {}".format(type(cm))) + raise TypeError(f"Argument cm should be instance of ConfusionMatrix, but given {type(cm)}") if not (cm.average in (None, "samples")): raise ValueError("ConfusionMatrix should have average attribute either None or 'samples'") if ignore_index is not None: if not (isinstance(ignore_index, numbers.Integral) and 0 <= ignore_index < cm.num_classes): - raise ValueError("ignore_index should be non-negative integer, but given {}".format(ignore_index)) + raise ValueError(f"ignore_index should be non-negative integer, but given {ignore_index}") # Increase floating point precision and pass to CPU cm = cm.type(torch.DoubleTensor) @@ -173,7 +173,7 @@ def IoU(cm: ConfusionMatrix, ignore_index: Optional[int] = None) -> MetricsLambd def ignore_index_fn(iou_vector: torch.Tensor) -> torch.Tensor: if ignore_idx >= len(iou_vector): raise ValueError( - "ignore_index {} is larger than the length of IoU vector {}".format(ignore_idx, len(iou_vector)) + f"ignore_index {ignore_idx} is larger than the length of IoU vector {len(iou_vector)}" ) indices = list(range(len(iou_vector))) indices.remove(ignore_idx) @@ -274,11 +274,11 @@ def DiceCoefficient(cm: ConfusionMatrix, ignore_index: Optional[int] = None) -> """ if not isinstance(cm, ConfusionMatrix): - raise TypeError("Argument cm should be instance of ConfusionMatrix, but given {}".format(type(cm))) + raise TypeError(f"Argument cm should be instance of ConfusionMatrix, but given {type(cm)}") if ignore_index is not None: if not (isinstance(ignore_index, numbers.Integral) and 0 <= ignore_index < cm.num_classes): - raise ValueError("ignore_index should be non-negative integer, but given {}".format(ignore_index)) + raise ValueError(f"ignore_index should be non-negative integer, but given {ignore_index}") # Increase floating point precision and pass to CPU cm = cm.type(torch.DoubleTensor) @@ -290,7 +290,7 @@ def DiceCoefficient(cm: ConfusionMatrix, ignore_index: Optional[int] = None) -> def ignore_index_fn(dice_vector: torch.Tensor) -> torch.Tensor: if ignore_idx >= len(dice_vector): raise ValueError( - "ignore_index {} is larger than the length of Dice vector {}".format(ignore_idx, len(dice_vector)) + f"ignore_index {ignore_idx} is larger than the length of Dice vector {len(dice_vector)}" ) indices = list(range(len(dice_vector))) indices.remove(ignore_idx) diff --git a/ignite/metrics/epoch_metric.py b/ignite/metrics/epoch_metric.py index f4f05d88e393..616bff631d39 100644 --- a/ignite/metrics/epoch_metric.py +++ b/ignite/metrics/epoch_metric.py @@ -89,13 +89,13 @@ def _check_type(self, output: Tuple[torch.Tensor, torch.Tensor]) -> None: if dtype_preds != y_pred.dtype: raise ValueError( "Incoherent types between input y_pred and stored predictions: " - "{} vs {}".format(dtype_preds, y_pred.dtype) + f"{dtype_preds} vs {y_pred.dtype}" ) dtype_targets = self._targets[-1].dtype if dtype_targets != y.dtype: raise ValueError( - "Incoherent types between input y and stored targets: {} vs {}".format(dtype_targets, y.dtype) + f"Incoherent types between input y and stored targets: {dtype_targets} vs {y.dtype}" ) @reinit__is_reduced @@ -121,7 +121,7 @@ def update(self, output: Tuple[torch.Tensor, torch.Tensor]) -> None: try: self.compute_fn(self._predictions[0], self._targets[0]) except Exception as e: - warnings.warn("Probably, there can be a problem with `compute_fn`:\n {}.".format(e), EpochMetricWarning) + warnings.warn(f"Probably, there can be a problem with `compute_fn`:\n {e}.", EpochMetricWarning) def compute(self) -> float: if len(self._predictions) < 1 or len(self._targets) < 1: diff --git a/ignite/metrics/fbeta.py b/ignite/metrics/fbeta.py index 769c0febfc10..7083808eeb69 100644 --- a/ignite/metrics/fbeta.py +++ b/ignite/metrics/fbeta.py @@ -36,7 +36,7 @@ def Fbeta( MetricsLambda, F-beta metric """ if not (beta > 0): - raise ValueError("Beta should be a positive integer, but given {}".format(beta)) + raise ValueError(f"Beta should be a positive integer, but given {beta}") if precision is not None and output_transform is not None: raise ValueError("If precision argument is provided, output_transform should be None") diff --git a/ignite/metrics/metric.py b/ignite/metrics/metric.py index 000769f2779b..6f25920c649e 100644 --- a/ignite/metrics/metric.py +++ b/ignite/metrics/metric.py @@ -209,8 +209,8 @@ def __init__( # check if reset and update methods are decorated. Compute may not be decorated if not (hasattr(self.reset, "_decorated") and hasattr(self.update, "_decorated")): warnings.warn( - "{} class does not support distributed setting. Computed result is not collected " - "across all computing devices".format(self.__class__.__name__), + f"{self.__class__.__name__} class does not support distributed setting. Computed result is not collected " + "across all computing devices", RuntimeWarning, ) @@ -282,14 +282,12 @@ def iteration_completed(self, engine: Engine) -> None: if isinstance(output, Mapping): if self.required_output_keys is None: raise TypeError( - "Transformed engine output for {} metric should be a tuple/list, but given {}".format( - self.__class__.__name__, type(output) - ) + f"Transformed engine output for {self.__class__.__name__} metric should be a tuple/list, but given {type(output)}" ) if not all([k in output for k in self.required_output_keys]): raise ValueError( "When transformed engine's output is a mapping, " - "it should contain {} keys, but given {}".format(self.required_output_keys, list(output.keys())) + f"it should contain {self.required_output_keys} keys, but given {list(output.keys())}" ) output = tuple(output[k] for k in self.required_output_keys) self.update(output) @@ -306,7 +304,7 @@ def completed(self, engine: Engine, name: str) -> None: if isinstance(result, Mapping): if name in result.keys(): raise ValueError( - "Argument name '{}' is conflicting with mapping keys: {}".format(name, list(result.keys())) + f"Argument name '{name}' is conflicting with mapping keys: {list(result.keys())}" ) for key, value in result.items(): @@ -326,10 +324,10 @@ def _check_usage(self, usage: Union[str, MetricUsage]) -> MetricUsage: usage = BatchWise() else: raise ValueError( - "usage should be 'EpochWise.usage_name' or 'BatchWise.usage_name', get {}".format(usage) + f"usage should be 'EpochWise.usage_name' or 'BatchWise.usage_name', get {usage}" ) if not isinstance(usage, MetricUsage): - raise TypeError("Unhandled usage type {}".format(type(usage))) + raise TypeError(f"Unhandled usage type {type(usage)}") return usage def attach(self, engine: Engine, name: str, usage: Union[str, MetricUsage] = EpochWise()) -> None: diff --git a/ignite/metrics/precision.py b/ignite/metrics/precision.py index 1f390d5c4bfe..2955dfd73b11 100644 --- a/ignite/metrics/precision.py +++ b/ignite/metrics/precision.py @@ -51,7 +51,7 @@ def compute(self) -> Union[torch.Tensor, float]: is_scalar = not isinstance(self._positives, torch.Tensor) or self._positives.ndim == 0 if is_scalar and self._positives == 0: raise NotComputableError( - "{} must have at least one example before it can be computed.".format(self.__class__.__name__) + f"{self.__class__.__name__} must have at least one example before it can be computed." ) if not (self._type == "multilabel" and not self._average): @@ -149,8 +149,8 @@ def update(self, output: Sequence[torch.Tensor]) -> None: num_classes = y_pred.size(1) if y.max() + 1 > num_classes: raise ValueError( - "y_pred contains less classes than y. Number of predicted classes is {}" - " and element in y has invalid class = {}.".format(num_classes, y.max().item() + 1) + f"y_pred contains less classes than y. Number of predicted classes is {num_classes}" + f" and element in y has invalid class = {y.max().item() + 1}." ) y = to_onehot(y.view(-1), num_classes=num_classes) indices = torch.argmax(y_pred, dim=1).view(-1) diff --git a/ignite/metrics/recall.py b/ignite/metrics/recall.py index 19a087febb64..7d7dc29e477b 100644 --- a/ignite/metrics/recall.py +++ b/ignite/metrics/recall.py @@ -90,8 +90,8 @@ def update(self, output: Sequence[torch.Tensor]) -> None: num_classes = y_pred.size(1) if y.max() + 1 > num_classes: raise ValueError( - "y_pred contains less classes than y. Number of predicted classes is {}" - " and element in y has invalid class = {}.".format(num_classes, y.max().item() + 1) + f"y_pred contains less classes than y. Number of predicted classes is {num_classes}" + f" and element in y has invalid class = {y.max().item() + 1}." ) y = to_onehot(y.view(-1), num_classes=num_classes) indices = torch.argmax(y_pred, dim=1).view(-1) diff --git a/ignite/metrics/ssim.py b/ignite/metrics/ssim.py index 69a9e861f394..ed00c2cafe13 100644 --- a/ignite/metrics/ssim.py +++ b/ignite/metrics/ssim.py @@ -70,10 +70,10 @@ def __init__( raise ValueError("Argument sigma should be either float or a sequence of float.") if any(x % 2 == 0 or x <= 0 for x in self.kernel_size): - raise ValueError("Expected kernel_size to have odd positive number. Got {}.".format(kernel_size)) + raise ValueError(f"Expected kernel_size to have odd positive number. Got {kernel_size}.") if any(y <= 0 for y in self.sigma): - raise ValueError("Expected sigma to have positive number. Got {}.".format(sigma)) + raise ValueError(f"Expected sigma to have positive number. Got {sigma}.") super(SSIM, self).__init__(output_transform=output_transform, device=device) self.gaussian = gaussian @@ -124,19 +124,17 @@ def update(self, output: Sequence[torch.Tensor]) -> None: if y_pred.dtype != y.dtype: raise TypeError( - "Expected y_pred and y to have the same data type. Got y_pred: {} and y: {}.".format( - y_pred.dtype, y.dtype - ) + f"Expected y_pred and y to have the same data type. Got y_pred: {y_pred.dtype} and y: {y.dtype}." ) if y_pred.shape != y.shape: raise ValueError( - "Expected y_pred and y to have the same shape. Got y_pred: {} and y: {}.".format(y_pred.shape, y.shape) + f"Expected y_pred and y to have the same shape. Got y_pred: {y_pred.shape} and y: {y.shape}." ) if len(y_pred.shape) != 4 or len(y.shape) != 4: raise ValueError( - "Expected y_pred and y to have BxCxHxW shape. Got y_pred: {} and y: {}.".format(y_pred.shape, y.shape) + f"Expected y_pred and y to have BxCxHxW shape. Got y_pred: {y_pred.shape} and y: {y.shape}." ) channel = y_pred.size(1) From 9c3b7b81d5044e3420cf4bf5dfd79d1ad3a4f69c Mon Sep 17 00:00:00 2001 From: uribgp Date: Sun, 20 Dec 2020 21:20:27 +0000 Subject: [PATCH 20/29] autopep8 fix --- ignite/engine/engine.py | 7 +++---- ignite/handlers/checkpoint.py | 8 ++------ ignite/handlers/terminate_on_nan.py | 4 +--- ignite/metrics/accuracy.py | 4 +--- ignite/metrics/confusion_matrix.py | 8 ++------ ignite/metrics/epoch_metric.py | 7 ++----- ignite/metrics/metric.py | 8 ++------ 7 files changed, 13 insertions(+), 33 deletions(-) diff --git a/ignite/engine/engine.py b/ignite/engine/engine.py index d41411f5e2e9..1708fc82d7a0 100644 --- a/ignite/engine/engine.py +++ b/ignite/engine/engine.py @@ -222,9 +222,7 @@ class TBPTT_Events(EventEnum): for index, e in enumerate(event_names): if not isinstance(e, (str, EventEnum)): - raise TypeError( - f"Value at {index} of event_names should be a str or EventEnum, but given {e}" - ) + raise TypeError(f"Value at {index} of event_names should be a str or EventEnum, but given {e}") self._allowed_events.append(e) if event_to_attr and e in event_to_attr: State.event_to_attr[e] = event_to_attr[e] @@ -828,7 +826,8 @@ def _run_once_on_dataset(self) -> float: warnings.warn( "Data iterator can not provide data anymore but required total number of " "iterations to run is not reached. " - f"Current iteration: {self.state.iteration} vs Total iterations to run : {self.state.epoch_length * self.state.max_epochs}" # type: ignore[operator] + # type: ignore[operator] + f"Current iteration: {self.state.iteration} vs Total iterations to run : {self.state.epoch_length * self.state.max_epochs}" ) break diff --git a/ignite/handlers/checkpoint.py b/ignite/handlers/checkpoint.py index 71edf54499be..994f89b0033c 100644 --- a/ignite/handlers/checkpoint.py +++ b/ignite/handlers/checkpoint.py @@ -274,9 +274,7 @@ def __init__( raise ValueError("If `score_name` is provided, then `score_function` " "should be also provided.") if global_step_transform is not None and not callable(global_step_transform): - raise TypeError( - f"global_step_transform should be a function, got {type(global_step_transform)} instead." - ) + raise TypeError(f"global_step_transform should be a function, got {type(global_step_transform)} instead.") self.to_save = to_save self.filename_prefix = filename_prefix @@ -318,9 +316,7 @@ def __call__(self, engine: Engine) -> None: if self._check_lt_n_saved() or self._saved[0].priority < priority: - priority_str = ( - f"{priority}" if isinstance(priority, numbers.Integral) else f"{priority:.4f}" - ) + priority_str = f"{priority}" if isinstance(priority, numbers.Integral) else f"{priority:.4f}" checkpoint = self._setup_checkpoint() diff --git a/ignite/handlers/terminate_on_nan.py b/ignite/handlers/terminate_on_nan.py index 61aa568ef978..9b54fbee2a2d 100644 --- a/ignite/handlers/terminate_on_nan.py +++ b/ignite/handlers/terminate_on_nan.py @@ -51,7 +51,5 @@ def raise_error(x: Union[float, torch.Tensor]) -> None: try: apply_to_type(output, (numbers.Number, torch.Tensor), raise_error) except RuntimeError: - self.logger.warning( - f"{self.__class__.__name__}: Output '{output}' contains NaN or Inf. Stop training" - ) + self.logger.warning(f"{self.__class__.__name__}: Output '{output}' contains NaN or Inf. Stop training") engine.terminate() diff --git a/ignite/metrics/accuracy.py b/ignite/metrics/accuracy.py index 6ece8ace18fa..25f8ad4a6f1b 100644 --- a/ignite/metrics/accuracy.py +++ b/ignite/metrics/accuracy.py @@ -88,9 +88,7 @@ def _check_type(self, output: Sequence[torch.Tensor]) -> None: if self._type != update_type: raise RuntimeError(f"Input data type has changed from {self._type} to {update_type}.") if self._num_classes != num_classes: - raise ValueError( - f"Input data number of classes has changed from {self._num_classes} to {num_classes}" - ) + raise ValueError(f"Input data number of classes has changed from {self._num_classes} to {num_classes}") class Accuracy(_BaseClassification): diff --git a/ignite/metrics/confusion_matrix.py b/ignite/metrics/confusion_matrix.py index 171afae11206..8591b77a6b89 100644 --- a/ignite/metrics/confusion_matrix.py +++ b/ignite/metrics/confusion_matrix.py @@ -65,9 +65,7 @@ def _check_shape(self, output: Sequence[torch.Tensor]) -> None: y_pred, y = output[0].detach(), output[1].detach() if y_pred.ndimension() < 2: - raise ValueError( - f"y_pred must have shape (batch_size, num_categories, ...), but given {y_pred.shape}" - ) + raise ValueError(f"y_pred must have shape (batch_size, num_categories, ...), but given {y_pred.shape}") if y_pred.shape[1] != self.num_classes: raise ValueError( @@ -172,9 +170,7 @@ def IoU(cm: ConfusionMatrix, ignore_index: Optional[int] = None) -> MetricsLambd def ignore_index_fn(iou_vector: torch.Tensor) -> torch.Tensor: if ignore_idx >= len(iou_vector): - raise ValueError( - f"ignore_index {ignore_idx} is larger than the length of IoU vector {len(iou_vector)}" - ) + raise ValueError(f"ignore_index {ignore_idx} is larger than the length of IoU vector {len(iou_vector)}") indices = list(range(len(iou_vector))) indices.remove(ignore_idx) return iou_vector[indices] diff --git a/ignite/metrics/epoch_metric.py b/ignite/metrics/epoch_metric.py index 616bff631d39..cb4a48710e41 100644 --- a/ignite/metrics/epoch_metric.py +++ b/ignite/metrics/epoch_metric.py @@ -88,15 +88,12 @@ def _check_type(self, output: Tuple[torch.Tensor, torch.Tensor]) -> None: dtype_preds = self._predictions[-1].dtype if dtype_preds != y_pred.dtype: raise ValueError( - "Incoherent types between input y_pred and stored predictions: " - f"{dtype_preds} vs {y_pred.dtype}" + "Incoherent types between input y_pred and stored predictions: " f"{dtype_preds} vs {y_pred.dtype}" ) dtype_targets = self._targets[-1].dtype if dtype_targets != y.dtype: - raise ValueError( - f"Incoherent types between input y and stored targets: {dtype_targets} vs {y.dtype}" - ) + raise ValueError(f"Incoherent types between input y and stored targets: {dtype_targets} vs {y.dtype}") @reinit__is_reduced def update(self, output: Tuple[torch.Tensor, torch.Tensor]) -> None: diff --git a/ignite/metrics/metric.py b/ignite/metrics/metric.py index 6f25920c649e..59dad2a7b39e 100644 --- a/ignite/metrics/metric.py +++ b/ignite/metrics/metric.py @@ -303,9 +303,7 @@ def completed(self, engine: Engine, name: str) -> None: result = self.compute() if isinstance(result, Mapping): if name in result.keys(): - raise ValueError( - f"Argument name '{name}' is conflicting with mapping keys: {list(result.keys())}" - ) + raise ValueError(f"Argument name '{name}' is conflicting with mapping keys: {list(result.keys())}") for key, value in result.items(): engine.state.metrics[key] = value @@ -323,9 +321,7 @@ def _check_usage(self, usage: Union[str, MetricUsage]) -> MetricUsage: elif usage == BatchWise.usage_name: usage = BatchWise() else: - raise ValueError( - f"usage should be 'EpochWise.usage_name' or 'BatchWise.usage_name', get {usage}" - ) + raise ValueError(f"usage should be 'EpochWise.usage_name' or 'BatchWise.usage_name', get {usage}") if not isinstance(usage, MetricUsage): raise TypeError(f"Unhandled usage type {type(usage)}") return usage From 317a1b453c25fd9aeae6f340da0864aeb624901c Mon Sep 17 00:00:00 2001 From: steven Date: Sun, 20 Dec 2020 16:31:22 -0700 Subject: [PATCH 21/29] annotation --- ignite/metrics/accuracy.py | 4 +--- ignite/metrics/confusion_matrix.py | 8 ++------ ignite/metrics/epoch_metric.py | 7 ++----- ignite/metrics/metric.py | 8 ++------ 4 files changed, 7 insertions(+), 20 deletions(-) diff --git a/ignite/metrics/accuracy.py b/ignite/metrics/accuracy.py index 6ece8ace18fa..25f8ad4a6f1b 100644 --- a/ignite/metrics/accuracy.py +++ b/ignite/metrics/accuracy.py @@ -88,9 +88,7 @@ def _check_type(self, output: Sequence[torch.Tensor]) -> None: if self._type != update_type: raise RuntimeError(f"Input data type has changed from {self._type} to {update_type}.") if self._num_classes != num_classes: - raise ValueError( - f"Input data number of classes has changed from {self._num_classes} to {num_classes}" - ) + raise ValueError(f"Input data number of classes has changed from {self._num_classes} to {num_classes}") class Accuracy(_BaseClassification): diff --git a/ignite/metrics/confusion_matrix.py b/ignite/metrics/confusion_matrix.py index 171afae11206..8591b77a6b89 100644 --- a/ignite/metrics/confusion_matrix.py +++ b/ignite/metrics/confusion_matrix.py @@ -65,9 +65,7 @@ def _check_shape(self, output: Sequence[torch.Tensor]) -> None: y_pred, y = output[0].detach(), output[1].detach() if y_pred.ndimension() < 2: - raise ValueError( - f"y_pred must have shape (batch_size, num_categories, ...), but given {y_pred.shape}" - ) + raise ValueError(f"y_pred must have shape (batch_size, num_categories, ...), but given {y_pred.shape}") if y_pred.shape[1] != self.num_classes: raise ValueError( @@ -172,9 +170,7 @@ def IoU(cm: ConfusionMatrix, ignore_index: Optional[int] = None) -> MetricsLambd def ignore_index_fn(iou_vector: torch.Tensor) -> torch.Tensor: if ignore_idx >= len(iou_vector): - raise ValueError( - f"ignore_index {ignore_idx} is larger than the length of IoU vector {len(iou_vector)}" - ) + raise ValueError(f"ignore_index {ignore_idx} is larger than the length of IoU vector {len(iou_vector)}") indices = list(range(len(iou_vector))) indices.remove(ignore_idx) return iou_vector[indices] diff --git a/ignite/metrics/epoch_metric.py b/ignite/metrics/epoch_metric.py index 616bff631d39..cb4a48710e41 100644 --- a/ignite/metrics/epoch_metric.py +++ b/ignite/metrics/epoch_metric.py @@ -88,15 +88,12 @@ def _check_type(self, output: Tuple[torch.Tensor, torch.Tensor]) -> None: dtype_preds = self._predictions[-1].dtype if dtype_preds != y_pred.dtype: raise ValueError( - "Incoherent types between input y_pred and stored predictions: " - f"{dtype_preds} vs {y_pred.dtype}" + "Incoherent types between input y_pred and stored predictions: " f"{dtype_preds} vs {y_pred.dtype}" ) dtype_targets = self._targets[-1].dtype if dtype_targets != y.dtype: - raise ValueError( - f"Incoherent types between input y and stored targets: {dtype_targets} vs {y.dtype}" - ) + raise ValueError(f"Incoherent types between input y and stored targets: {dtype_targets} vs {y.dtype}") @reinit__is_reduced def update(self, output: Tuple[torch.Tensor, torch.Tensor]) -> None: diff --git a/ignite/metrics/metric.py b/ignite/metrics/metric.py index 6f25920c649e..59dad2a7b39e 100644 --- a/ignite/metrics/metric.py +++ b/ignite/metrics/metric.py @@ -303,9 +303,7 @@ def completed(self, engine: Engine, name: str) -> None: result = self.compute() if isinstance(result, Mapping): if name in result.keys(): - raise ValueError( - f"Argument name '{name}' is conflicting with mapping keys: {list(result.keys())}" - ) + raise ValueError(f"Argument name '{name}' is conflicting with mapping keys: {list(result.keys())}") for key, value in result.items(): engine.state.metrics[key] = value @@ -323,9 +321,7 @@ def _check_usage(self, usage: Union[str, MetricUsage]) -> MetricUsage: elif usage == BatchWise.usage_name: usage = BatchWise() else: - raise ValueError( - f"usage should be 'EpochWise.usage_name' or 'BatchWise.usage_name', get {usage}" - ) + raise ValueError(f"usage should be 'EpochWise.usage_name' or 'BatchWise.usage_name', get {usage}") if not isinstance(usage, MetricUsage): raise TypeError(f"Unhandled usage type {type(usage)}") return usage From 83415de1e493f304656ba81b76402e494aa8ced5 Mon Sep 17 00:00:00 2001 From: steven Date: Sun, 20 Dec 2020 16:31:59 -0700 Subject: [PATCH 22/29] annotation --- ignite/engine/engine.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/ignite/engine/engine.py b/ignite/engine/engine.py index d41411f5e2e9..ae7b234c9953 100644 --- a/ignite/engine/engine.py +++ b/ignite/engine/engine.py @@ -828,7 +828,7 @@ def _run_once_on_dataset(self) -> float: warnings.warn( "Data iterator can not provide data anymore but required total number of " "iterations to run is not reached. " - f"Current iteration: {self.state.iteration} vs Total iterations to run : {self.state.epoch_length * self.state.max_epochs}" # type: ignore[operator] + f"Current iteration: {self.state.iteration} vs Total iterations to run : {self.state.epoch_length * self.state.max_epochs}" # type: ignore[operator] ) break From 578ac33e5f5288f8d9027310889ae8425fce38d8 Mon Sep 17 00:00:00 2001 From: uribgp Date: Sun, 20 Dec 2020 23:35:46 +0000 Subject: [PATCH 23/29] autopep8 fix --- ignite/engine/engine.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/ignite/engine/engine.py b/ignite/engine/engine.py index 9952bb674529..1708fc82d7a0 100644 --- a/ignite/engine/engine.py +++ b/ignite/engine/engine.py @@ -826,7 +826,8 @@ def _run_once_on_dataset(self) -> float: warnings.warn( "Data iterator can not provide data anymore but required total number of " "iterations to run is not reached. " - f"Current iteration: {self.state.iteration} vs Total iterations to run : {self.state.epoch_length * self.state.max_epochs}" # type: ignore[operator] + # type: ignore[operator] + f"Current iteration: {self.state.iteration} vs Total iterations to run : {self.state.epoch_length * self.state.max_epochs}" ) break From e215dcd7f9acec30d2f483888ab3ec8bac5b99e0 Mon Sep 17 00:00:00 2001 From: steven Date: Sun, 20 Dec 2020 17:26:07 -0700 Subject: [PATCH 24/29] type ignore --- ignite/engine/engine.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/ignite/engine/engine.py b/ignite/engine/engine.py index 9952bb674529..f8d7a056c4d5 100644 --- a/ignite/engine/engine.py +++ b/ignite/engine/engine.py @@ -826,7 +826,7 @@ def _run_once_on_dataset(self) -> float: warnings.warn( "Data iterator can not provide data anymore but required total number of " "iterations to run is not reached. " - f"Current iteration: {self.state.iteration} vs Total iterations to run : {self.state.epoch_length * self.state.max_epochs}" # type: ignore[operator] + f"Current iteration: {self.state.iteration} vs Total iterations to run : {self.state.epoch_length * self.state.max_epochs}" # type: ignore[operator] ) break From fd14a5995896fb37d39c26cd26522de961cae63e Mon Sep 17 00:00:00 2001 From: uribgp Date: Mon, 21 Dec 2020 03:54:14 +0000 Subject: [PATCH 25/29] autopep8 fix --- ignite/engine/engine.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/ignite/engine/engine.py b/ignite/engine/engine.py index 9952bb674529..1708fc82d7a0 100644 --- a/ignite/engine/engine.py +++ b/ignite/engine/engine.py @@ -826,7 +826,8 @@ def _run_once_on_dataset(self) -> float: warnings.warn( "Data iterator can not provide data anymore but required total number of " "iterations to run is not reached. " - f"Current iteration: {self.state.iteration} vs Total iterations to run : {self.state.epoch_length * self.state.max_epochs}" # type: ignore[operator] + # type: ignore[operator] + f"Current iteration: {self.state.iteration} vs Total iterations to run : {self.state.epoch_length * self.state.max_epochs}" ) break From a9bcb5274fcc184d99072a74c34fdc56d05966f0 Mon Sep 17 00:00:00 2001 From: steven Date: Mon, 21 Dec 2020 15:17:52 -0700 Subject: [PATCH 26/29] autopep8 work around --- ignite/engine/engine.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/ignite/engine/engine.py b/ignite/engine/engine.py index 9952bb674529..bb57d3c1da8b 100644 --- a/ignite/engine/engine.py +++ b/ignite/engine/engine.py @@ -823,10 +823,10 @@ def _run_once_on_dataset(self) -> float: # Should exit while loop if we can not iterate if should_exit: if not self._is_done(self.state): + msg = f"Current iteration: {self.state.iteration} vs Total iterations to run : {self.state.epoch_length * self.state.max_epochs}" # type: ignore[operator] warnings.warn( "Data iterator can not provide data anymore but required total number of " - "iterations to run is not reached. " - f"Current iteration: {self.state.iteration} vs Total iterations to run : {self.state.epoch_length * self.state.max_epochs}" # type: ignore[operator] + f"iterations to run is not reached. {msg}" ) break From 0dd131a5d2f30847077c17eda2ebda21ec4316d1 Mon Sep 17 00:00:00 2001 From: uribgp Date: Mon, 21 Dec 2020 22:22:16 +0000 Subject: [PATCH 27/29] autopep8 fix --- ignite/engine/engine.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/ignite/engine/engine.py b/ignite/engine/engine.py index bb57d3c1da8b..d3412970d1ce 100644 --- a/ignite/engine/engine.py +++ b/ignite/engine/engine.py @@ -823,7 +823,8 @@ def _run_once_on_dataset(self) -> float: # Should exit while loop if we can not iterate if should_exit: if not self._is_done(self.state): - msg = f"Current iteration: {self.state.iteration} vs Total iterations to run : {self.state.epoch_length * self.state.max_epochs}" # type: ignore[operator] + # type: ignore[operator] + msg = f"Current iteration: {self.state.iteration} vs Total iterations to run : {self.state.epoch_length * self.state.max_epochs}" warnings.warn( "Data iterator can not provide data anymore but required total number of " f"iterations to run is not reached. {msg}" From f33f8fbd1de85f9fceff357a81852b64af2fca05 Mon Sep 17 00:00:00 2001 From: steven Date: Tue, 22 Dec 2020 09:32:01 -0700 Subject: [PATCH 28/29] bypass autopep --- ignite/engine/engine.py | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/ignite/engine/engine.py b/ignite/engine/engine.py index d3412970d1ce..2aa0ec047086 100644 --- a/ignite/engine/engine.py +++ b/ignite/engine/engine.py @@ -823,8 +823,10 @@ def _run_once_on_dataset(self) -> float: # Should exit while loop if we can not iterate if should_exit: if not self._is_done(self.state): - # type: ignore[operator] - msg = f"Current iteration: {self.state.iteration} vs Total iterations to run : {self.state.epoch_length * self.state.max_epochs}" + total_iters = self.state.epoch_length * self.state.max_epochs # type: ignore[operator] + msg = ( + f"Current iteration: {self.state.iteration} vs Total iterations to run : {total_iters}" + ) warnings.warn( "Data iterator can not provide data anymore but required total number of " f"iterations to run is not reached. {msg}" From b5539e0cc7b5102c3bf3204129223d8624b61ca8 Mon Sep 17 00:00:00 2001 From: vfdev Date: Tue, 22 Dec 2020 19:03:08 +0100 Subject: [PATCH 29/29] Update ignite/metrics/epoch_metric.py --- ignite/metrics/epoch_metric.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/ignite/metrics/epoch_metric.py b/ignite/metrics/epoch_metric.py index cb4a48710e41..cc8f3ec4a7c0 100644 --- a/ignite/metrics/epoch_metric.py +++ b/ignite/metrics/epoch_metric.py @@ -88,7 +88,7 @@ def _check_type(self, output: Tuple[torch.Tensor, torch.Tensor]) -> None: dtype_preds = self._predictions[-1].dtype if dtype_preds != y_pred.dtype: raise ValueError( - "Incoherent types between input y_pred and stored predictions: " f"{dtype_preds} vs {y_pred.dtype}" + f"Incoherent types between input y_pred and stored predictions: {dtype_preds} vs {y_pred.dtype}" ) dtype_targets = self._targets[-1].dtype