From 41a07d61d3e0f91606297dd6a368db2b0128886a Mon Sep 17 00:00:00 2001 From: chenglu Date: Mon, 20 Dec 2021 14:48:55 +0800 Subject: [PATCH 1/7] fix: remove tailing commas that would mislead black formating --- ignite/distributed/comp_models/base.py | 7 +- ignite/distributed/comp_models/horovod.py | 6 +- ignite/distributed/comp_models/native.py | 8 +- ignite/distributed/comp_models/xla.py | 11 +- ignite/distributed/launcher.py | 4 +- ignite/distributed/utils.py | 18 +-- ignite/engine/__init__.py | 4 +- ignite/engine/engine.py | 6 +- ignite/handlers/checkpoint.py | 2 +- ignite/handlers/lr_finder.py | 2 +- ignite/handlers/param_scheduler.py | 10 +- ignite/handlers/state_param_scheduler.py | 2 +- ignite/handlers/time_profilers.py | 8 +- ignite/handlers/timing.py | 4 +- ignite/metrics/classification_report.py | 8 +- ignite/metrics/metric.py | 4 +- ignite/metrics/nlp/bleu.py | 10 +- ignite/metrics/nlp/rouge.py | 2 +- tests/ignite/conftest.py | 2 +- tests/ignite/contrib/engines/test_common.py | 8 +- .../contrib/handlers/test_mlflow_logger.py | 22 +-- .../contrib/handlers/test_neptune_logger.py | 24 ++- .../contrib/handlers/test_polyaxon_logger.py | 11 +- .../handlers/test_tensorboard_logger.py | 26 ++-- .../contrib/handlers/test_tqdm_logger.py | 6 +- .../contrib/handlers/test_visdom_logger.py | 146 +++++++++--------- .../regression/test_canberra_metric.py | 2 +- .../test_fractional_absolute_error.py | 2 +- .../regression/test_fractional_bias.py | 2 +- .../test_geometric_mean_absolute_error.py | 2 +- ..._geometric_mean_relative_absolute_error.py | 6 +- .../regression/test_manhattan_distance.py | 2 +- .../regression/test_maximum_absolute_error.py | 2 +- .../test_mean_absolute_relative_error.py | 2 +- .../metrics/regression/test_mean_error.py | 2 +- .../regression/test_mean_normalized_bias.py | 2 +- .../regression/test_median_absolute_error.py | 8 +- .../test_median_absolute_percentage_error.py | 8 +- .../test_median_relative_absolute_error.py | 26 +++- .../metrics/regression/test_r2_score.py | 6 +- .../regression/test_wave_hedges_distance.py | 2 +- .../contrib/metrics/test_average_precision.py | 2 +- .../contrib/metrics/test_cohen_kappa.py | 8 +- tests/ignite/contrib/metrics/test_gpu_info.py | 4 +- tests/ignite/contrib/metrics/test_roc_auc.py | 2 +- .../distributed/comp_models/test_base.py | 4 +- .../distributed/comp_models/test_xla.py | 10 +- tests/ignite/distributed/utils/__init__.py | 6 +- tests/ignite/engine/test_create_supervised.py | 8 +- tests/ignite/engine/test_deterministic.py | 16 +- tests/ignite/engine/test_engine.py | 8 +- tests/ignite/engine/test_event_handlers.py | 2 +- tests/ignite/handlers/test_checkpoint.py | 18 +-- tests/ignite/handlers/test_param_scheduler.py | 8 +- .../handlers/test_state_param_scheduler.py | 40 ++--- tests/ignite/metrics/gan/test_fid.py | 4 +- tests/ignite/metrics/nlp/test_rouge.py | 2 +- tests/ignite/metrics/nlp/test_utils.py | 4 +- tests/ignite/metrics/test_accumulation.py | 2 +- tests/ignite/metrics/test_accuracy.py | 2 +- tests/ignite/metrics/test_loss.py | 2 +- tests/ignite/metrics/test_metric.py | 4 +- tests/ignite/metrics/test_metrics_lambda.py | 2 +- tests/ignite/metrics/test_precision.py | 2 +- tests/ignite/metrics/test_recall.py | 2 +- .../metrics/test_root_mean_squared_error.py | 4 +- tests/ignite/metrics/test_running_average.py | 10 +- tests/ignite/test_utils.py | 11 +- 68 files changed, 274 insertions(+), 348 deletions(-) diff --git a/ignite/distributed/comp_models/base.py b/ignite/distributed/comp_models/base.py index 2b083ecfbb6c..6b58d7069df4 100644 --- a/ignite/distributed/comp_models/base.py +++ b/ignite/distributed/comp_models/base.py @@ -95,7 +95,7 @@ def _encode_str(x: str, device: torch.device, size: int) -> torch.Tensor: return padded_x.unsqueeze(0) def _get_max_length(self, x: str, device: torch.device) -> int: - size = torch.tensor([len(x),], device=device) + size = torch.tensor([len(x)], device=device) size = self._do_all_reduce(size, "MAX") return cast(int, size.item()) @@ -245,7 +245,7 @@ def broadcast( if rank != src: tensor = torch.empty(1, device=device, dtype=torch.float) else: - tensor = torch.tensor([tensor,], device=device, dtype=torch.float) + tensor = torch.tensor([tensor], device=device, dtype=torch.float) elif isinstance(tensor, str): tensor_to_str = True max_length = self._get_max_length(tensor, device) @@ -281,8 +281,7 @@ def barrier(self) -> None: class _SerialModel(ComputationModel): - """Private class defines non-distributed computation model for code compatibility with other distributed models. - """ + """Private class defines non-distributed computation model for code compatibility with other distributed models.""" name = "serial" available_backends = () diff --git a/ignite/distributed/comp_models/horovod.py b/ignite/distributed/comp_models/horovod.py index 0e313ed81ad1..dcfc227d1437 100644 --- a/ignite/distributed/comp_models/horovod.py +++ b/ignite/distributed/comp_models/horovod.py @@ -25,8 +25,7 @@ HOROVOD = "horovod" class _HorovodDistModel(ComputationModel): - """Private class for `Horovod `_ distributed computation model. - """ + """Private class for `Horovod `_ distributed computation model.""" name = "horovod-dist" @@ -60,8 +59,7 @@ def create_from_backend(backend: str = HOROVOD, **kwargs: Any) -> "_HorovodDistM return _HorovodDistModel(backend, **kwargs) def __init__(self, backend: Optional[str] = None, **kwargs: Any) -> None: - """This is a private method. Please, use `create_from_backend` or `create_from_context` - """ + """This is a private method. Please, use `create_from_backend` or `create_from_context`""" super(_HorovodDistModel, self).__init__() if backend is not None: self._create_from_backend(backend, **kwargs) diff --git a/ignite/distributed/comp_models/native.py b/ignite/distributed/comp_models/native.py index 0b568497a5f3..b1fd40587cb5 100644 --- a/ignite/distributed/comp_models/native.py +++ b/ignite/distributed/comp_models/native.py @@ -82,8 +82,7 @@ def __init__( rank: Optional[int] = None, **kwargs: Any, ) -> None: - """This is a private method. Please, use `create_from_backend` or `create_from_context` - """ + """This is a private method. Please, use `create_from_backend` or `create_from_context`""" super(_NativeDistModel, self).__init__() self._env_backup = None # type: Optional[Dict[str, str]] self._local_rank = None # type: Optional[int] @@ -177,7 +176,7 @@ def _compute_node_and_local_ranks(rank: int, hostnames: List[Tuple[str, ...]]) - from collections import Counter c = Counter(hostnames) # type: Counter - sizes = torch.tensor([0,] + list(c.values())) + sizes = torch.tensor([0] + list(c.values())) cumsum_sizes = torch.cumsum(sizes, dim=0) node_rank = (rank // cumsum_sizes[1:]).clamp(0, 1).sum().item() local_rank = rank - cumsum_sizes[node_rank].item() @@ -503,8 +502,7 @@ def _expand_hostlist(nodelist: str) -> List[str]: return result_hostlist def _setup_ddp_vars_from_slurm_env(environ: Dict[str, str]) -> Dict[str, Union[str, int]]: - """Method to setup DDP env vars required by PyTorch from SLURM env - """ + """Method to setup DDP env vars required by PyTorch from SLURM env""" # 1) Tools like enroot can have hooks to translate slurm env vars to RANK, LOCAL_RANK, WORLD_SIZE etc # See https://github.com/NVIDIA/enroot/blob/v3.1.0/conf/hooks/extra/50-slurm-pytorch.sh # 2) User can use torch.distributed.launch tool to schedule on N local GPUs using 1 node, 1 task by SLURM diff --git a/ignite/distributed/comp_models/xla.py b/ignite/distributed/comp_models/xla.py index 58feca3d131f..ebf55240f4f3 100644 --- a/ignite/distributed/comp_models/xla.py +++ b/ignite/distributed/comp_models/xla.py @@ -45,8 +45,7 @@ def create_from_backend(backend: str = XLA_TPU, **kwargs: Any) -> "_XlaDistModel return _XlaDistModel(backend=backend, **kwargs) def __init__(self, backend: Optional[str] = None, **kwargs: Any): - """This is a private method. Please, use `create_from_backend` or `create_from_context` - """ + """This is a private method. Please, use `create_from_backend` or `create_from_context`""" super(_XlaDistModel, self).__init__() if backend is not None: self._create_from_backend(backend, **kwargs) @@ -65,7 +64,7 @@ def _init_from_context(self) -> None: def _compute_nproc_per_node(self) -> int: tensor = torch.tensor([self.get_local_rank() + 1.0], dtype=torch.float).to(self.device()) - xm.all_reduce("max", [tensor,]) + xm.all_reduce("max", [tensor]) return int(tensor.item()) def get_local_rank(self) -> int: @@ -142,7 +141,7 @@ def _do_all_reduce(self, tensor: torch.Tensor, op: str = "SUM") -> torch.Tensor: if op not in self._reduce_op_map: raise ValueError(f"Unsupported reduction operation: '{op}'") op = self._reduce_op_map[op] - xm.all_reduce(op, [tensor,]) + xm.all_reduce(op, [tensor]) return tensor def _do_all_gather(self, tensor: torch.Tensor) -> torch.Tensor: @@ -150,14 +149,14 @@ def _do_all_gather(self, tensor: torch.Tensor) -> torch.Tensor: group_size = self.get_world_size() output = torch.zeros((group_size,) + tensor.shape, dtype=tensor.dtype, device=tensor.device) output[self.get_rank() % group_size] = tensor - xm.all_reduce("sum", [output,]) + xm.all_reduce("sum", [output]) return output.reshape(-1, *output.shape[2:]) def _do_broadcast(self, tensor: torch.Tensor, src: int) -> torch.Tensor: # from https://github.com/jysohn23/xla/blob/model-parallel-colab/Gather_Scatter_Broadcast_PyTorch_XLA.ipynb if src != self.get_rank(): tensor.fill_(0.0) - xm.all_reduce("sum", [tensor,]) + xm.all_reduce("sum", [tensor]) return tensor def barrier(self) -> None: diff --git a/ignite/distributed/launcher.py b/ignite/distributed/launcher.py index 41dbe3b90e81..86b811f7b432 100644 --- a/ignite/distributed/launcher.py +++ b/ignite/distributed/launcher.py @@ -327,9 +327,7 @@ def __enter__(self) -> "Parallel": f"Initialized distributed launcher with backend: '{self.backend}'" ) msg = "\n\t".join([f"{k}: {v}" for k, v in self._spawn_params.items() if v is not None]) - self._logger.info( # type: ignore[attr-defined] - f"- Parameters to spawn processes: \n\t{msg}" - ) + self._logger.info(f"- Parameters to spawn processes: \n\t{msg}") # type: ignore[attr-defined] return self diff --git a/ignite/distributed/utils.py b/ignite/distributed/utils.py index 9505756ba6c2..e26d81d5ca4e 100644 --- a/ignite/distributed/utils.py +++ b/ignite/distributed/utils.py @@ -133,8 +133,7 @@ def model_name() -> str: def get_world_size() -> int: - """Returns world size of current distributed configuration. Returns 1 if no distributed configuration. - """ + """Returns world size of current distributed configuration. Returns 1 if no distributed configuration.""" if _need_to_sync and isinstance(_model, _SerialModel): sync(temporary=True) @@ -142,8 +141,7 @@ def get_world_size() -> int: def get_rank() -> int: - """Returns process rank within current distributed configuration. Returns 0 if no distributed configuration. - """ + """Returns process rank within current distributed configuration. Returns 0 if no distributed configuration.""" if _need_to_sync and isinstance(_model, _SerialModel): sync(temporary=True) @@ -151,8 +149,7 @@ def get_rank() -> int: def get_local_rank() -> int: - """Returns local process rank within current distributed configuration. Returns 0 if no distributed configuration. - """ + """Returns local process rank within current distributed configuration. Returns 0 if no distributed configuration.""" if _need_to_sync and isinstance(_model, _SerialModel): sync(temporary=True) @@ -190,8 +187,7 @@ def get_node_rank() -> int: def hostname() -> str: - """Returns host name for current process within current distributed configuration. - """ + """Returns host name for current process within current distributed configuration.""" return socket.gethostname() @@ -422,8 +418,7 @@ def broadcast( def barrier() -> None: - """Helper method to synchronize all processes. - """ + """Helper method to synchronize all processes.""" if _need_to_sync and isinstance(_model, _SerialModel): sync(temporary=True) @@ -543,8 +538,7 @@ def finalize() -> None: def show_config() -> None: - """Helper method to display distributed configuration via ``logging``. - """ + """Helper method to display distributed configuration via ``logging``.""" # setup parallel logger logger = setup_logger(__name__) diff --git a/ignite/engine/__init__.py b/ignite/engine/__init__.py index 89e0a8c7ed48..c3c1a80c5621 100644 --- a/ignite/engine/__init__.py +++ b/ignite/engine/__init__.py @@ -33,9 +33,7 @@ def _prepare_batch( batch: Sequence[torch.Tensor], device: Optional[Union[str, torch.device]] = None, non_blocking: bool = False ) -> Tuple[Union[torch.Tensor, Sequence, Mapping, str, bytes], ...]: - """Prepare batch for training: pass to a device with options. - - """ + """Prepare batch for training: pass to a device with options.""" x, y = batch return ( convert_tensor(x, device=device, non_blocking=non_blocking), diff --git a/ignite/engine/engine.py b/ignite/engine/engine.py index 31a1d2be1d60..7b7a5c924aef 100644 --- a/ignite/engine/engine.py +++ b/ignite/engine/engine.py @@ -446,14 +446,12 @@ def fire_event(self, event_name: Any) -> None: return self._fire_event(event_name) def terminate(self) -> None: - """Sends terminate signal to the engine, so that it terminates completely the run after the current iteration. - """ + """Sends terminate signal to the engine, so that it terminates completely the run after the current iteration.""" self.logger.info("Terminate signaled. Engine will stop after current iteration is finished.") self.should_terminate = True def terminate_epoch(self) -> None: - """Sends terminate signal to the engine, so that it terminates the current epoch after the current iteration. - """ + """Sends terminate signal to the engine, so that it terminates the current epoch after the current iteration.""" self.logger.info( "Terminate current epoch is signaled. " "Current epoch iteration will stop after current iteration is finished." diff --git a/ignite/handlers/checkpoint.py b/ignite/handlers/checkpoint.py index 2f7e6843149f..9a366c4f6ec5 100644 --- a/ignite/handlers/checkpoint.py +++ b/ignite/handlers/checkpoint.py @@ -451,7 +451,7 @@ def _setup_checkpoint(self) -> Dict[str, Dict[Any, Any]]: @staticmethod def setup_filename_pattern( - with_prefix: bool = True, with_score: bool = True, with_score_name: bool = True, with_global_step: bool = True, + with_prefix: bool = True, with_score: bool = True, with_score_name: bool = True, with_global_step: bool = True ) -> str: """Helper method to get the default filename pattern for a checkpoint. diff --git a/ignite/handlers/lr_finder.py b/ignite/handlers/lr_finder.py index 665e73911597..cdde715faa03 100644 --- a/ignite/handlers/lr_finder.py +++ b/ignite/handlers/lr_finder.py @@ -288,7 +288,7 @@ def plot( ] for lr in sug_lr: ax.scatter( - lr, corresponding_loss, color="red" if len(sug_lr) == 1 else None, s=75, marker="o", zorder=3, + lr, corresponding_loss, color="red" if len(sug_lr) == 1 else None, s=75, marker="o", zorder=3 ) # handle skip_end=0 properly diff --git a/ignite/handlers/param_scheduler.py b/ignite/handlers/param_scheduler.py index 615fe9bc40df..be0d50795262 100644 --- a/ignite/handlers/param_scheduler.py +++ b/ignite/handlers/param_scheduler.py @@ -28,9 +28,7 @@ class BaseParamScheduler(metaclass=ABCMeta): """ - def __init__( - self, param_name: str, save_history: bool = False, - ): + def __init__(self, param_name: str, save_history: bool = False): self.param_name = param_name self.event_index = 0 self._save_history = save_history @@ -497,8 +495,7 @@ class CosineAnnealingScheduler(CyclicalScheduler): """ def get_param(self) -> float: - """Method to get current optimizer's parameter value - """ + """Method to get current optimizer's parameter value""" cycle_progress = self.event_index / self.cycle_size return self.start_value + ((self.end_value - self.start_value) / 2) * (1 - math.cos(math.pi * cycle_progress)) @@ -782,8 +779,7 @@ def __call__(self, engine: Optional[Engine], name: Optional[str] = None) -> None super(LRScheduler, self).__call__(engine, name) def get_param(self) -> Union[float, List[float]]: - """Method to get current optimizer's parameter value - """ + """Method to get current optimizer's parameter value""" # Emulate context manager for pytorch>=1.4 self.lr_scheduler._get_lr_called_within_step = True # type: ignore[attr-defined] lr_list = cast(List[float], self.lr_scheduler.get_lr()) diff --git a/ignite/handlers/state_param_scheduler.py b/ignite/handlers/state_param_scheduler.py index e01f62319236..a98cd30380da 100644 --- a/ignite/handlers/state_param_scheduler.py +++ b/ignite/handlers/state_param_scheduler.py @@ -302,7 +302,7 @@ class ExpStateScheduler(StateParamScheduler): """ def __init__( - self, initial_value: float, gamma: float, param_name: str, save_history: bool = False, create_new: bool = False, + self, initial_value: float, gamma: float, param_name: str, save_history: bool = False, create_new: bool = False ): super(ExpStateScheduler, self).__init__(param_name, save_history, create_new) self.initial_value = initial_value diff --git a/ignite/handlers/time_profilers.py b/ignite/handlers/time_profilers.py index 0d9e46d65e56..be0d8c6e8840 100644 --- a/ignite/handlers/time_profilers.py +++ b/ignite/handlers/time_profilers.py @@ -261,7 +261,7 @@ def get_results(self) -> Dict[str, Dict[str, Any]]: [ ("processing_stats", self._compute_basic_stats(self.processing_times)), ("dataflow_stats", self._compute_basic_stats(self.dataflow_times)), - ("event_handlers_stats", event_handlers_stats,), + ("event_handlers_stats", event_handlers_stats), ( "event_handlers_names", {str(e.name).replace(".", "_") + "_names": v for e, v in self.event_handlers_names.items()}, @@ -601,7 +601,7 @@ def get_results(self) -> List[List[Union[str, float]]]: for h in self.event_handlers_times[e] ] ) - total_eh_time = round(float(total_eh_time), 5,) + total_eh_time = round(float(total_eh_time), 5) def compute_basic_stats( times: Union[Sequence, torch.Tensor] @@ -681,9 +681,9 @@ def write_results(self, output_path: str) -> None: # pad all tensors to have same length cols = [torch.nn.functional.pad(x, pad=(0, max_len - x.numel()), mode="constant", value=0) for x in cols] - results_dump = torch.stack(cols, dim=1,).numpy() + results_dump = torch.stack(cols, dim=1).numpy() - results_df = pd.DataFrame(data=results_dump, columns=headers,) + results_df = pd.DataFrame(data=results_dump, columns=headers) results_df.to_csv(output_path, index=False) @staticmethod diff --git a/ignite/handlers/timing.py b/ignite/handlers/timing.py index 5f105ffdb587..16c65dc05705 100644 --- a/ignite/handlers/timing.py +++ b/ignite/handlers/timing.py @@ -7,7 +7,7 @@ class Timer: - """ Timer object can be used to measure (average) time between events. + """Timer object can be used to measure (average) time between events. Args: average: if True, then when ``.value()`` method is called, the returned value @@ -96,7 +96,7 @@ def attach( resume: Optional[Events] = None, step: Optional[Events] = None, ) -> "Timer": - """ Register callbacks to control the timer. + """Register callbacks to control the timer. Args: engine: Engine that this timer will be attached to. diff --git a/ignite/metrics/classification_report.py b/ignite/metrics/classification_report.py index b2f04d797fb2..cd0265432da8 100644 --- a/ignite/metrics/classification_report.py +++ b/ignite/metrics/classification_report.py @@ -106,15 +106,15 @@ def ClassificationReport( """ # setup all the underlying metrics - precision = Precision(average=False, is_multilabel=is_multilabel, output_transform=output_transform, device=device,) - recall = Recall(average=False, is_multilabel=is_multilabel, output_transform=output_transform, device=device,) + precision = Precision(average=False, is_multilabel=is_multilabel, output_transform=output_transform, device=device) + recall = Recall(average=False, is_multilabel=is_multilabel, output_transform=output_transform, device=device) fbeta = Fbeta(beta, average=False, precision=precision, recall=recall) averaged_precision = precision.mean() averaged_recall = recall.mean() averaged_fbeta = fbeta.mean() def _wrapper( - recall_metric: Metric, precision_metric: Metric, f: Metric, a_recall: Metric, a_precision: Metric, a_f: Metric, + recall_metric: Metric, precision_metric: Metric, f: Metric, a_recall: Metric, a_precision: Metric, a_f: Metric ) -> Union[Collection[str], Dict]: p_tensor, r_tensor, f_tensor = precision_metric, recall_metric, f if p_tensor.shape != r_tensor.shape: @@ -141,4 +141,4 @@ def _wrapper( def _get_label_for_class(idx: int) -> str: return labels[idx] if labels else str(idx) - return MetricsLambda(_wrapper, recall, precision, fbeta, averaged_recall, averaged_precision, averaged_fbeta,) + return MetricsLambda(_wrapper, recall, precision, fbeta, averaged_recall, averaged_precision, averaged_fbeta) diff --git a/ignite/metrics/metric.py b/ignite/metrics/metric.py index 82059146c592..f48df9714d98 100644 --- a/ignite/metrics/metric.py +++ b/ignite/metrics/metric.py @@ -204,7 +204,7 @@ def compute(self): _required_output_keys = required_output_keys def __init__( - self, output_transform: Callable = lambda x: x, device: Union[str, torch.device] = torch.device("cpu"), + self, output_transform: Callable = lambda x: x, device: Union[str, torch.device] = torch.device("cpu") ): self._output_transform = output_transform @@ -616,4 +616,4 @@ def _is_list_of_tensors_or_numbers(x: Sequence[Union[torch.Tensor, float]]) -> b def _to_batched_tensor(x: Union[torch.Tensor, float], device: Optional[torch.device] = None) -> torch.Tensor: if isinstance(x, torch.Tensor): return x.unsqueeze(dim=0) - return torch.tensor([x,], device=device) + return torch.tensor([x], device=device) diff --git a/ignite/metrics/nlp/bleu.py b/ignite/metrics/nlp/bleu.py index ab8dfb5cc37f..6f662046b8a9 100644 --- a/ignite/metrics/nlp/bleu.py +++ b/ignite/metrics/nlp/bleu.py @@ -181,7 +181,7 @@ def _n_gram_counter( return hyp_lengths, ref_lengths def _brevity_penalty_smoothing( - self, p_numerators: torch.Tensor, p_denominators: torch.Tensor, hyp_length_sum: int, ref_length_sum: int, + self, p_numerators: torch.Tensor, p_denominators: torch.Tensor, hyp_length_sum: int, ref_length_sum: int ) -> float: # Returns 0 if there's no matching n-grams @@ -208,18 +208,16 @@ def _brevity_penalty_smoothing( gm = bp * math.exp(math.fsum(s)) return gm - def _sentence_bleu(self, references: Sequence[Sequence[Any]], candidates: Sequence[Any],) -> float: + def _sentence_bleu(self, references: Sequence[Sequence[Any]], candidates: Sequence[Any]) -> float: return self._corpus_bleu([references], [candidates]) - def _corpus_bleu( - self, references: Sequence[Sequence[Sequence[Any]]], candidates: Sequence[Sequence[Any]], - ) -> float: + def _corpus_bleu(self, references: Sequence[Sequence[Sequence[Any]]], candidates: Sequence[Sequence[Any]]) -> float: p_numerators: torch.Tensor = torch.zeros(self.ngrams_order + 1) p_denominators: torch.Tensor = torch.zeros(self.ngrams_order + 1) hyp_length_sum, ref_length_sum = self._n_gram_counter( - references=references, candidates=candidates, p_numerators=p_numerators, p_denominators=p_denominators, + references=references, candidates=candidates, p_numerators=p_numerators, p_denominators=p_denominators ) bleu_score = self._brevity_penalty_smoothing( p_numerators=p_numerators, diff --git a/ignite/metrics/nlp/rouge.py b/ignite/metrics/nlp/rouge.py index 2784e51c9a5c..232663843160 100644 --- a/ignite/metrics/nlp/rouge.py +++ b/ignite/metrics/nlp/rouge.py @@ -152,7 +152,7 @@ def reset(self) -> None: def update(self, output: Tuple[Sequence[Sequence[Any]], Sequence[Sequence[Sequence[Any]]]]) -> None: candidates, references = output for _candidate, _reference in zip(candidates, references): - multiref_scores = [self._compute_score(candidate=_candidate, reference=_ref,) for _ref in _reference] + multiref_scores = [self._compute_score(candidate=_candidate, reference=_ref) for _ref in _reference] score = self._mutliref_reducer(multiref_scores) precision = score.precision() recall = score.recall() diff --git a/tests/ignite/conftest.py b/tests/ignite/conftest.py index 5758bb892622..3f1e27cc3551 100644 --- a/tests/ignite/conftest.py +++ b/tests/ignite/conftest.py @@ -51,7 +51,7 @@ def func(): @pytest.fixture() def local_rank(worker_id): - """ use a different account in each xdist worker """ + """use a different account in each xdist worker""" if "gw" in worker_id: lrank = int(worker_id.replace("gw", "")) diff --git a/tests/ignite/contrib/engines/test_common.py b/tests/ignite/contrib/engines/test_common.py index aed310d66bd6..5ad323a862fa 100644 --- a/tests/ignite/contrib/engines/test_common.py +++ b/tests/ignite/contrib/engines/test_common.py @@ -57,7 +57,7 @@ def _test_setup_common_training_handlers( model = DummyModel().to(device) if distributed and "cuda" in torch.device(device).type: - model = torch.nn.parallel.DistributedDataParallel(model, device_ids=[local_rank,], output_device=local_rank) + model = torch.nn.parallel.DistributedDataParallel(model, device_ids=[local_rank], output_device=local_rank) optimizer = torch.optim.SGD(model.parameters(), lr=lr) if lr_scheduler is None: @@ -98,7 +98,7 @@ def update_fn(engine, batch): save_handler=save_handler, lr_scheduler=lr_scheduler, with_gpu_stats=False, - output_names=["batch_loss",], + output_names=["batch_loss"], with_pbars=True, with_pbar_on_iters=True, log_every_iters=50, @@ -358,8 +358,8 @@ def set_eval_metric(engine): evaluators = evaluators["validation"] if with_optim: - t = torch.tensor([0,]) - optimizers = {"optimizer": torch.optim.SGD([t,], lr=0.01)} + t = torch.tensor([0]) + optimizers = {"optimizer": torch.optim.SGD([t], lr=0.01)} if as_class: optimizers = optimizers["optimizer"] diff --git a/tests/ignite/contrib/handlers/test_mlflow_logger.py b/tests/ignite/contrib/handlers/test_mlflow_logger.py index d907b9a873c8..949f1661b628 100644 --- a/tests/ignite/contrib/handlers/test_mlflow_logger.py +++ b/tests/ignite/contrib/handlers/test_mlflow_logger.py @@ -44,9 +44,7 @@ def test_output_handler_output_transform(): mock_logger.log_metrics = MagicMock() wrapper(mock_engine, mock_logger, Events.ITERATION_STARTED) - mock_logger.log_metrics.assert_called_once_with( - {"another_tag loss": 12345}, step=123, - ) + mock_logger.log_metrics.assert_called_once_with({"another_tag loss": 12345}, step=123) def test_output_handler_metric_names(): @@ -62,11 +60,9 @@ def test_output_handler_metric_names(): wrapper(mock_engine, mock_logger, Events.ITERATION_STARTED) assert mock_logger.log_metrics.call_count == 1 - mock_logger.log_metrics.assert_called_once_with( - {"tag a": 12.23, "tag b": 23.45, "tag c": 10.0}, step=5, - ) + mock_logger.log_metrics.assert_called_once_with({"tag a": 12.23, "tag b": 23.45, "tag c": 10.0}, step=5) - wrapper = OutputHandler("tag", metric_names=["a",]) + wrapper = OutputHandler("tag", metric_names=["a"]) mock_engine = MagicMock() mock_engine.state = State(metrics={"a": torch.Tensor([0.0, 1.0, 2.0, 3.0])}) @@ -79,7 +75,7 @@ def test_output_handler_metric_names(): assert mock_logger.log_metrics.call_count == 1 mock_logger.log_metrics.assert_has_calls( - [call({"tag a 0": 0.0, "tag a 1": 1.0, "tag a 2": 2.0, "tag a 3": 3.0}, step=5),], any_order=True + [call({"tag a 0": 0.0, "tag a 1": 1.0, "tag a 2": 2.0, "tag a 3": 3.0}, step=5)], any_order=True ) wrapper = OutputHandler("tag", metric_names=["a", "c"]) @@ -112,9 +108,7 @@ def test_output_handler_both(): wrapper(mock_engine, mock_logger, Events.EPOCH_STARTED) assert mock_logger.log_metrics.call_count == 1 - mock_logger.log_metrics.assert_called_once_with( - {"tag a": 12.23, "tag b": 23.45, "tag loss": 12345}, step=5, - ) + mock_logger.log_metrics.assert_called_once_with({"tag a": 12.23, "tag b": 23.45, "tag loss": 12345}, step=5) def test_output_handler_with_wrong_global_step_transform_output(): @@ -203,7 +197,7 @@ def test_output_handler_state_attrs(): wrapper(mock_engine, mock_logger, Events.ITERATION_STARTED) mock_logger.log_metrics.assert_called_once_with( - {"tag alpha": 3.899, "tag beta": torch.tensor(12.21).item(), "tag gamma 0": 21.0, "tag gamma 1": 6.0,}, step=5, + {"tag alpha": 3.899, "tag beta": torch.tensor(12.21).item(), "tag gamma 0": 21.0, "tag gamma 1": 6.0}, step=5 ) @@ -336,7 +330,7 @@ def test_mlflow_bad_metric_name_handling(dirname): handler = OutputHandler(tag="training", metric_names="all") engine = Engine(lambda e, b: None) - engine.state = State(metrics={"metric:0 in %": 123.0, "metric 0": 1000.0,}) + engine.state = State(metrics={"metric:0 in %": 123.0, "metric 0": 1000.0}) with pytest.warns(UserWarning, match=r"MLflowLogger output_handler encountered an invalid metric name"): @@ -353,7 +347,7 @@ def test_mlflow_bad_metric_name_handling(dirname): client = MlflowClient(tracking_uri=os.path.join(dirname, "mlruns")) stored_values = client.get_metric_history(active_run.info.run_id, "training metric 0") - for t, s in zip([1000.0,] + true_values, stored_values): + for t, s in zip([1000.0] + true_values, stored_values): assert t == s.value diff --git a/tests/ignite/contrib/handlers/test_neptune_logger.py b/tests/ignite/contrib/handlers/test_neptune_logger.py index 11186422b485..8224459be73b 100644 --- a/tests/ignite/contrib/handlers/test_neptune_logger.py +++ b/tests/ignite/contrib/handlers/test_neptune_logger.py @@ -94,11 +94,9 @@ def test_output_handler_metric_names(): wrapper(mock_engine, mock_logger, Events.ITERATION_STARTED) assert mock_logger.log_metric.call_count == 2 - mock_logger.log_metric.assert_has_calls( - [call("tag/a", y=12.23, x=5), call("tag/b", y=23.45, x=5)], any_order=True, - ) + mock_logger.log_metric.assert_has_calls([call("tag/a", y=12.23, x=5), call("tag/b", y=23.45, x=5)], any_order=True) - wrapper = OutputHandler("tag", metric_names=["a",],) + wrapper = OutputHandler("tag", metric_names=["a"]) mock_engine = MagicMock() mock_logger.log_metric = MagicMock() @@ -134,9 +132,7 @@ def test_output_handler_metric_names(): wrapper(mock_engine, mock_logger, Events.ITERATION_STARTED) assert mock_logger.log_metric.call_count == 1 - mock_logger.log_metric.assert_has_calls( - [call("tag/a", y=55.56, x=7),], any_order=True, - ) + mock_logger.log_metric.assert_has_calls([call("tag/a", y=55.56, x=7)], any_order=True) # all metrics wrapper = OutputHandler("tag", metric_names="all") @@ -149,9 +145,7 @@ def test_output_handler_metric_names(): wrapper(mock_engine, mock_logger, Events.ITERATION_STARTED) assert mock_logger.log_metric.call_count == 2 - mock_logger.log_metric.assert_has_calls( - [call("tag/a", y=12.23, x=5), call("tag/b", y=23.45, x=5),], any_order=True, - ) + mock_logger.log_metric.assert_has_calls([call("tag/a", y=12.23, x=5), call("tag/b", y=23.45, x=5)], any_order=True) # log a torch tensor (ndimension = 0) wrapper = OutputHandler("tag", metric_names="all") @@ -165,7 +159,7 @@ def test_output_handler_metric_names(): assert mock_logger.log_metric.call_count == 2 mock_logger.log_metric.assert_has_calls( - [call("tag/a", y=torch.tensor(12.23).item(), x=5), call("tag/b", y=torch.tensor(23.45).item(), x=5),], + [call("tag/a", y=torch.tensor(12.23).item(), x=5), call("tag/b", y=torch.tensor(23.45).item(), x=5)], any_order=True, ) @@ -342,13 +336,13 @@ def test_weights_scalar_handler_frozen_layers(dummy_model_factory): wrapper(mock_engine, mock_logger, Events.EPOCH_STARTED) mock_logger.log_metric.assert_has_calls( - [call("weights_norm/fc2/weight", y=12.0, x=5), call("weights_norm/fc2/bias", y=math.sqrt(12.0), x=5),], + [call("weights_norm/fc2/weight", y=12.0, x=5), call("weights_norm/fc2/bias", y=math.sqrt(12.0), x=5)], any_order=True, ) with pytest.raises(AssertionError): mock_logger.log_metric.assert_has_calls( - [call("weights_norm/fc1/weight", y=12.0, x=5), call("weights_norm/fc1/bias", y=math.sqrt(12.0), x=5),], + [call("weights_norm/fc1/weight", y=12.0, x=5), call("weights_norm/fc1/bias", y=math.sqrt(12.0), x=5)], any_order=True, ) @@ -417,12 +411,12 @@ def test_grads_scalar_handler_frozen_layers(dummy_model_factory, norm_mock): wrapper(mock_engine, mock_logger, Events.EPOCH_STARTED) mock_logger.log_metric.assert_has_calls( - [call("grads_norm/fc2/weight", y=ANY, x=5), call("grads_norm/fc2/bias", y=ANY, x=5),], any_order=True, + [call("grads_norm/fc2/weight", y=ANY, x=5), call("grads_norm/fc2/bias", y=ANY, x=5)], any_order=True ) with pytest.raises(AssertionError): mock_logger.log_metric.assert_has_calls( - [call("grads_norm/fc1/weight", y=ANY, x=5), call("grads_norm/fc1/bias", y=ANY, x=5),], any_order=True, + [call("grads_norm/fc1/weight", y=ANY, x=5), call("grads_norm/fc1/bias", y=ANY, x=5)], any_order=True ) assert mock_logger.log_metric.call_count == 2 assert norm_mock.call_count == 2 diff --git a/tests/ignite/contrib/handlers/test_polyaxon_logger.py b/tests/ignite/contrib/handlers/test_polyaxon_logger.py index 660ebbfb1f2a..a97c7709784d 100644 --- a/tests/ignite/contrib/handlers/test_polyaxon_logger.py +++ b/tests/ignite/contrib/handlers/test_polyaxon_logger.py @@ -63,7 +63,7 @@ def test_output_handler_metric_names(): assert mock_logger.log_metrics.call_count == 1 mock_logger.log_metrics.assert_called_once_with(step=5, **{"tag/a": 12.23, "tag/b": 23.45, "tag/c": 10.0}) - wrapper = OutputHandler("tag", metric_names=["a",]) + wrapper = OutputHandler("tag", metric_names=["a"]) mock_engine = MagicMock() mock_engine.state = State(metrics={"a": torch.Tensor([0.0, 1.0, 2.0, 3.0])}) @@ -76,7 +76,7 @@ def test_output_handler_metric_names(): assert mock_logger.log_metrics.call_count == 1 mock_logger.log_metrics.assert_has_calls( - [call(step=5, **{"tag/a/0": 0.0, "tag/a/1": 1.0, "tag/a/2": 2.0, "tag/a/3": 3.0}),], any_order=True + [call(step=5, **{"tag/a/0": 0.0, "tag/a/1": 1.0, "tag/a/2": 2.0, "tag/a/3": 3.0})], any_order=True ) wrapper = OutputHandler("tag", metric_names=["a", "c"]) @@ -212,8 +212,7 @@ def test_output_handler_state_attrs(): wrapper(mock_engine, mock_logger, Events.ITERATION_STARTED) mock_logger.log_metrics.assert_called_once_with( - **{"tag/alpha": 3.899, "tag/beta": torch.tensor(12.21).item(), "tag/gamma/0": 21.0, "tag/gamma/1": 6.0,}, - step=5, + **{"tag/alpha": 3.899, "tag/beta": torch.tensor(12.21).item(), "tag/gamma/0": 21.0, "tag/gamma/1": 6.0}, step=5 ) @@ -322,7 +321,5 @@ def no_site_packages(): def test_no_polyaxon_client(no_site_packages): - with pytest.raises( - RuntimeError, match=r"This contrib module requires polyaxon", - ): + with pytest.raises(RuntimeError, match=r"This contrib module requires polyaxon"): PolyaxonLogger() diff --git a/tests/ignite/contrib/handlers/test_tensorboard_logger.py b/tests/ignite/contrib/handlers/test_tensorboard_logger.py index 2b06cce9e169..eaba3237480b 100644 --- a/tests/ignite/contrib/handlers/test_tensorboard_logger.py +++ b/tests/ignite/contrib/handlers/test_tensorboard_logger.py @@ -99,11 +99,9 @@ def test_output_handler_metric_names(): wrapper(mock_engine, mock_logger, Events.ITERATION_STARTED) assert mock_logger.writer.add_scalar.call_count == 2 - mock_logger.writer.add_scalar.assert_has_calls( - [call("tag/a", 12.23, 5), call("tag/b", 23.45, 5),], any_order=True, - ) + mock_logger.writer.add_scalar.assert_has_calls([call("tag/a", 12.23, 5), call("tag/b", 23.45, 5)], any_order=True) - wrapper = OutputHandler("tag", metric_names=["a",],) + wrapper = OutputHandler("tag", metric_names=["a"]) mock_engine = MagicMock() mock_engine.state = State(metrics={"a": torch.Tensor([0.0, 1.0, 2.0, 3.0])}) @@ -116,7 +114,7 @@ def test_output_handler_metric_names(): assert mock_logger.writer.add_scalar.call_count == 4 mock_logger.writer.add_scalar.assert_has_calls( - [call("tag/a/0", 0.0, 5), call("tag/a/1", 1.0, 5), call("tag/a/2", 2.0, 5), call("tag/a/3", 3.0, 5),], + [call("tag/a/0", 0.0, 5), call("tag/a/1", 1.0, 5), call("tag/a/2", 2.0, 5), call("tag/a/3", 3.0, 5)], any_order=True, ) @@ -133,9 +131,7 @@ def test_output_handler_metric_names(): wrapper(mock_engine, mock_logger, Events.ITERATION_STARTED) assert mock_logger.writer.add_scalar.call_count == 1 - mock_logger.writer.add_scalar.assert_has_calls( - [call("tag/a", 55.56, 7),], any_order=True, - ) + mock_logger.writer.add_scalar.assert_has_calls([call("tag/a", 55.56, 7)], any_order=True) # all metrics wrapper = OutputHandler("tag", metric_names="all") @@ -149,9 +145,7 @@ def test_output_handler_metric_names(): wrapper(mock_engine, mock_logger, Events.ITERATION_STARTED) assert mock_logger.writer.add_scalar.call_count == 2 - mock_logger.writer.add_scalar.assert_has_calls( - [call("tag/a", 12.23, 5), call("tag/b", 23.45, 5),], any_order=True, - ) + mock_logger.writer.add_scalar.assert_has_calls([call("tag/a", 12.23, 5), call("tag/b", 23.45, 5)], any_order=True) # log a torch tensor (ndimension = 0) wrapper = OutputHandler("tag", metric_names="all") @@ -166,7 +160,7 @@ def test_output_handler_metric_names(): assert mock_logger.writer.add_scalar.call_count == 2 mock_logger.writer.add_scalar.assert_has_calls( - [call("tag/a", torch.tensor(12.23).item(), 5), call("tag/b", torch.tensor(23.45).item(), 5),], any_order=True, + [call("tag/a", torch.tensor(12.23).item(), 5), call("tag/b", torch.tensor(23.45).item(), 5)], any_order=True ) @@ -328,12 +322,12 @@ def test_weights_scalar_handler_frozen_layers(dummy_model_factory): wrapper(mock_engine, mock_logger, Events.EPOCH_STARTED) mock_logger.writer.add_scalar.assert_has_calls( - [call("weights_norm/fc2/weight", 12.0, 5), call("weights_norm/fc2/bias", math.sqrt(12.0), 5),], any_order=True, + [call("weights_norm/fc2/weight", 12.0, 5), call("weights_norm/fc2/bias", math.sqrt(12.0), 5)], any_order=True ) with pytest.raises(AssertionError): mock_logger.writer.add_scalar.assert_has_calls( - [call("weights_norm/fc1/weight", 12.0, 5), call("weights_norm/fc1/bias", math.sqrt(12.0), 5),], + [call("weights_norm/fc1/weight", 12.0, 5), call("weights_norm/fc1/bias", math.sqrt(12.0), 5)], any_order=True, ) @@ -484,12 +478,12 @@ def test_grads_scalar_handler_frozen_layers(dummy_model_factory, norm_mock): wrapper(mock_engine, mock_logger, Events.EPOCH_STARTED) mock_logger.writer.add_scalar.assert_has_calls( - [call("grads_norm/fc2/weight", ANY, 5), call("grads_norm/fc2/bias", ANY, 5),], any_order=True, + [call("grads_norm/fc2/weight", ANY, 5), call("grads_norm/fc2/bias", ANY, 5)], any_order=True ) with pytest.raises(AssertionError): mock_logger.writer.add_scalar.assert_has_calls( - [call("grads_norm/fc1/weight", ANY, 5), call("grads_norm/fc1/bias", ANY, 5),], any_order=True, + [call("grads_norm/fc1/weight", ANY, 5), call("grads_norm/fc1/bias", ANY, 5)], any_order=True ) assert mock_logger.writer.add_scalar.call_count == 2 assert norm_mock.call_count == 2 diff --git a/tests/ignite/contrib/handlers/test_tqdm_logger.py b/tests/ignite/contrib/handlers/test_tqdm_logger.py index 41bcc40d6b58..89d4447428c5 100644 --- a/tests/ignite/contrib/handlers/test_tqdm_logger.py +++ b/tests/ignite/contrib/handlers/test_tqdm_logger.py @@ -157,9 +157,7 @@ def step(engine, batch): RunningAverage(alpha=0.5, output_transform=lambda x: x).attach(trainer, "batchloss") pbar = ProgressBar() - pbar.attach( - trainer, metric_names=["batchloss",], - ) + pbar.attach(trainer, metric_names=["batchloss"]) trainer.run(data=data, max_epochs=1) @@ -227,7 +225,7 @@ def step(engine, batch): RunningAverage(alpha=0.5, output_transform=lambda x: x).attach(trainer, "batchloss") pbar = ProgressBar() - pbar.attach(trainer, metric_names=["batchloss",], state_attributes=["alpha", "beta", "gamma"]) + pbar.attach(trainer, metric_names=["batchloss"], state_attributes=["alpha", "beta", "gamma"]) trainer.run(data=data, max_epochs=1) diff --git a/tests/ignite/contrib/handlers/test_visdom_logger.py b/tests/ignite/contrib/handlers/test_visdom_logger.py index 465ea5eb2a59..0f93d83ec26c 100644 --- a/tests/ignite/contrib/handlers/test_visdom_logger.py +++ b/tests/ignite/contrib/handlers/test_visdom_logger.py @@ -48,8 +48,8 @@ def test_optimizer_params(): assert wrapper.windows["lr/group_0"]["win"] is not None mock_logger.vis.line.assert_called_once_with( - X=[123,], - Y=[0.01,], + X=[123], + Y=[0.01], env=mock_logger.vis.env, win=None, update=None, @@ -68,8 +68,8 @@ def test_optimizer_params(): assert wrapper.windows["generator/lr/group_0"]["win"] is not None mock_logger.vis.line.assert_called_once_with( - X=[123,], - Y=[0.01,], + X=[123], + Y=[0.01], env=mock_logger.vis.env, win=None, update=None, @@ -106,8 +106,8 @@ def test_output_handler_output_transform(dirname): assert wrapper.windows["tag/output"]["win"] is not None mock_logger.vis.line.assert_called_once_with( - X=[123,], - Y=[12345,], + X=[123], + Y=[12345], env=mock_logger.vis.env, win=None, update=None, @@ -126,8 +126,8 @@ def test_output_handler_output_transform(dirname): assert wrapper.windows["another_tag/loss"]["win"] is not None mock_logger.vis.line.assert_called_once_with( - X=[123,], - Y=[12345,], + X=[123], + Y=[12345], env=mock_logger.vis.env, win=None, update=None, @@ -157,17 +157,17 @@ def test_output_handler_metric_names(dirname): mock_logger.vis.line.assert_has_calls( [ call( - X=[5,], - Y=[12.23,], + X=[5], + Y=[12.23], env=mock_logger.vis.env, win=None, update=None, opts=wrapper.windows["tag/a"]["opts"], - name="tag/a", + name="tag/a,", ), call( - X=[5,], - Y=[23.45,], + X=[5], + Y=[23.45], env=mock_logger.vis.env, win=None, update=None, @@ -178,7 +178,7 @@ def test_output_handler_metric_names(dirname): any_order=True, ) - wrapper = OutputHandler("tag", metric_names=["a",]) + wrapper = OutputHandler("tag", metric_names=["a"]) mock_engine = MagicMock() mock_engine.state = State(metrics={"a": torch.Tensor([0.0, 1.0, 2.0, 3.0])}) @@ -200,8 +200,8 @@ def test_output_handler_metric_names(dirname): mock_logger.vis.line.assert_has_calls( [ call( - X=[5,], - Y=[0.0,], + X=[5], + Y=[0.0], env=mock_logger.vis.env, win=None, update=None, @@ -209,8 +209,8 @@ def test_output_handler_metric_names(dirname): name="tag/a/0", ), call( - X=[5,], - Y=[1.0,], + X=[5], + Y=[1.0], env=mock_logger.vis.env, win=None, update=None, @@ -218,8 +218,8 @@ def test_output_handler_metric_names(dirname): name="tag/a/1", ), call( - X=[5,], - Y=[2.0,], + X=[5], + Y=[2.0], env=mock_logger.vis.env, win=None, update=None, @@ -227,8 +227,8 @@ def test_output_handler_metric_names(dirname): name="tag/a/2", ), call( - X=[5,], - Y=[3.0,], + X=[5], + Y=[3.0], env=mock_logger.vis.env, win=None, update=None, @@ -259,8 +259,8 @@ def test_output_handler_metric_names(dirname): mock_logger.vis.line.assert_has_calls( [ call( - X=[7,], - Y=[55.56,], + X=[7], + Y=[55.56], env=mock_logger.vis.env, win=None, update=None, @@ -291,8 +291,8 @@ def test_output_handler_metric_names(dirname): mock_logger.vis.line.assert_has_calls( [ call( - X=[5,], - Y=[12.23,], + X=[5], + Y=[12.23], env=mock_logger.vis.env, win=None, update=None, @@ -300,8 +300,8 @@ def test_output_handler_metric_names(dirname): name="tag/a", ), call( - X=[5,], - Y=[23.45,], + X=[5], + Y=[23.45], env=mock_logger.vis.env, win=None, update=None, @@ -341,8 +341,8 @@ def test_output_handler_both(dirname): mock_logger.vis.line.assert_has_calls( [ call( - X=[5,], - Y=[12.23,], + X=[5], + Y=[12.23], env=mock_logger.vis.env, win=None, update=None, @@ -350,8 +350,8 @@ def test_output_handler_both(dirname): name="tag/a", ), call( - X=[5,], - Y=[23.45,], + X=[5], + Y=[23.45], env=mock_logger.vis.env, win=None, update=None, @@ -359,8 +359,8 @@ def test_output_handler_both(dirname): name="tag/b", ), call( - X=[5,], - Y=[12345,], + X=[5], + Y=[12345], env=mock_logger.vis.env, win=None, update=None, @@ -388,8 +388,8 @@ def test_output_handler_both(dirname): mock_logger.vis.line.assert_has_calls( [ call( - X=[6,], - Y=[12.23,], + X=[6], + Y=[12.23], env=mock_logger.vis.env, win=wrapper.windows["tag/a"]["win"], update="append", @@ -397,8 +397,8 @@ def test_output_handler_both(dirname): name="tag/a", ), call( - X=[6,], - Y=[23.45,], + X=[6], + Y=[23.45], env=mock_logger.vis.env, win=wrapper.windows["tag/b"]["win"], update="append", @@ -406,8 +406,8 @@ def test_output_handler_both(dirname): name="tag/b", ), call( - X=[6,], - Y=[12345,], + X=[6], + Y=[12345], env=mock_logger.vis.env, win=wrapper.windows["tag/loss"]["win"], update="append", @@ -450,8 +450,8 @@ def test_output_handler_state_attrs(): mock_logger.vis.line.assert_has_calls( [ call( - X=[5,], - Y=[3.899,], + X=[5], + Y=[3.899], env=mock_logger.vis.env, win=None, update=None, @@ -459,8 +459,8 @@ def test_output_handler_state_attrs(): name="tag/alpha", ), call( - X=[5,], - Y=[12.0,], + X=[5], + Y=[12.0], env=mock_logger.vis.env, win=None, update=None, @@ -468,8 +468,8 @@ def test_output_handler_state_attrs(): name="tag/beta", ), call( - X=[5,], - Y=[21.0,], + X=[5], + Y=[21.0], env=mock_logger.vis.env, win=None, update=None, @@ -477,8 +477,8 @@ def test_output_handler_state_attrs(): name="tag/gamma/0", ), call( - X=[5,], - Y=[6.0,], + X=[5], + Y=[6.0], env=mock_logger.vis.env, win=None, update=None, @@ -530,8 +530,8 @@ def global_step_transform(*args, **kwargs): mock_logger.vis.line.assert_has_calls( [ call( - X=[10,], - Y=[12345,], + X=[10], + Y=[12345], env=mock_logger.vis.env, win=None, update=None, @@ -571,8 +571,8 @@ def test_output_handler_with_global_step_from_engine(): mock_logger.vis.line.assert_has_calls( [ call( - X=[mock_another_engine.state.epoch,], - Y=[mock_engine.state.output,], + X=[mock_another_engine.state.epoch], + Y=[mock_engine.state.output], env=mock_logger.vis.env, win=None, update=None, @@ -592,8 +592,8 @@ def test_output_handler_with_global_step_from_engine(): mock_logger.vis.line.assert_has_calls( [ call( - X=[mock_another_engine.state.epoch,], - Y=[mock_engine.state.output,], + X=[mock_another_engine.state.epoch], + Y=[mock_engine.state.output], env=mock_logger.vis.env, win=wrapper.windows["tag/loss"]["win"], update="append", @@ -655,8 +655,8 @@ def _test(tag=None): mock_logger.vis.line.assert_has_calls( [ call( - X=[5,], - Y=[0.0,], + X=[5], + Y=[0.0], env=mock_logger.vis.env, win=None, update=None, @@ -664,8 +664,8 @@ def _test(tag=None): name=tag_prefix + "weights_norm/fc1/weight", ), call( - X=[5,], - Y=[0.0,], + X=[5], + Y=[0.0], env=mock_logger.vis.env, win=None, update=None, @@ -673,8 +673,8 @@ def _test(tag=None): name=tag_prefix + "weights_norm/fc1/bias", ), call( - X=[5,], - Y=[12.0,], + X=[5], + Y=[12.0], env=mock_logger.vis.env, win=None, update=None, @@ -682,7 +682,7 @@ def _test(tag=None): name=tag_prefix + "weights_norm/fc2/weight", ), call( - X=[5,], + X=[5], Y=ANY, env=mock_logger.vis.env, win=None, @@ -729,8 +729,8 @@ def norm(x): mock_logger.vis.line.assert_has_calls( [ call( - X=[5,], - Y=[12.34,], + X=[5], + Y=[12.34], env=mock_logger.vis.env, win=None, update=None, @@ -738,8 +738,8 @@ def norm(x): name="weights_norm/fc1/weight", ), call( - X=[5,], - Y=[12.34,], + X=[5], + Y=[12.34], env=mock_logger.vis.env, win=None, update=None, @@ -747,8 +747,8 @@ def norm(x): name="weights_norm/fc1/bias", ), call( - X=[5,], - Y=[12.34,], + X=[5], + Y=[12.34], env=mock_logger.vis.env, win=None, update=None, @@ -756,8 +756,8 @@ def norm(x): name="weights_norm/fc2/weight", ), call( - X=[5,], - Y=[12.34,], + X=[5], + Y=[12.34], env=mock_logger.vis.env, win=None, update=None, @@ -820,7 +820,7 @@ def _test(tag=None): mock_logger.vis.line.assert_has_calls( [ call( - X=[5,], + X=[5], Y=ANY, env=mock_logger.vis.env, win=None, @@ -829,7 +829,7 @@ def _test(tag=None): name=tag_prefix + "grads_norm/fc1/weight", ), call( - X=[5,], + X=[5], Y=ANY, env=mock_logger.vis.env, win=None, @@ -838,7 +838,7 @@ def _test(tag=None): name=tag_prefix + "grads_norm/fc1/bias", ), call( - X=[5,], + X=[5], Y=ANY, env=mock_logger.vis.env, win=None, @@ -847,7 +847,7 @@ def _test(tag=None): name=tag_prefix + "grads_norm/fc2/weight", ), call( - X=[5,], + X=[5], Y=ANY, env=mock_logger.vis.env, win=None, diff --git a/tests/ignite/contrib/metrics/regression/test_canberra_metric.py b/tests/ignite/contrib/metrics/regression/test_canberra_metric.py index b30a9c402e68..2b220727e580 100644 --- a/tests/ignite/contrib/metrics/regression/test_canberra_metric.py +++ b/tests/ignite/contrib/metrics/regression/test_canberra_metric.py @@ -17,7 +17,7 @@ def test_wrong_input_shapes(): m.update((torch.rand(4), torch.rand(4, 1))) with pytest.raises(ValueError, match=r"Input data shapes should be the same, but given"): - m.update((torch.rand(4, 1), torch.rand(4,))) + m.update((torch.rand(4, 1), torch.rand(4))) def test_compute(): diff --git a/tests/ignite/contrib/metrics/regression/test_fractional_absolute_error.py b/tests/ignite/contrib/metrics/regression/test_fractional_absolute_error.py index dc45bd6d0a98..59d72e4faf5f 100644 --- a/tests/ignite/contrib/metrics/regression/test_fractional_absolute_error.py +++ b/tests/ignite/contrib/metrics/regression/test_fractional_absolute_error.py @@ -25,7 +25,7 @@ def test_wrong_input_shapes(): m.update((torch.rand(4), torch.rand(4, 1))) with pytest.raises(ValueError, match=r"Input data shapes should be the same, but given"): - m.update((torch.rand(4, 1), torch.rand(4,))) + m.update((torch.rand(4, 1), torch.rand(4))) def test_compute(): diff --git a/tests/ignite/contrib/metrics/regression/test_fractional_bias.py b/tests/ignite/contrib/metrics/regression/test_fractional_bias.py index 7079b2baa438..45cd238abe3d 100644 --- a/tests/ignite/contrib/metrics/regression/test_fractional_bias.py +++ b/tests/ignite/contrib/metrics/regression/test_fractional_bias.py @@ -25,7 +25,7 @@ def test_wrong_input_shapes(): m.update((torch.rand(4), torch.rand(4, 1))) with pytest.raises(ValueError, match=r"Input data shapes should be the same, but given"): - m.update((torch.rand(4, 1), torch.rand(4,))) + m.update((torch.rand(4, 1), torch.rand(4))) def test_fractional_bias(): diff --git a/tests/ignite/contrib/metrics/regression/test_geometric_mean_absolute_error.py b/tests/ignite/contrib/metrics/regression/test_geometric_mean_absolute_error.py index eae47c7fa71a..50b7f1db77d3 100644 --- a/tests/ignite/contrib/metrics/regression/test_geometric_mean_absolute_error.py +++ b/tests/ignite/contrib/metrics/regression/test_geometric_mean_absolute_error.py @@ -25,7 +25,7 @@ def test_wrong_input_shapes(): m.update((torch.rand(4), torch.rand(4, 1))) with pytest.raises(ValueError, match=r"Input data shapes should be the same, but given"): - m.update((torch.rand(4, 1), torch.rand(4,))) + m.update((torch.rand(4, 1), torch.rand(4))) def test_compute(): diff --git a/tests/ignite/contrib/metrics/regression/test_geometric_mean_relative_absolute_error.py b/tests/ignite/contrib/metrics/regression/test_geometric_mean_relative_absolute_error.py index b1d6a8776f07..5009d6bc8f05 100644 --- a/tests/ignite/contrib/metrics/regression/test_geometric_mean_relative_absolute_error.py +++ b/tests/ignite/contrib/metrics/regression/test_geometric_mean_relative_absolute_error.py @@ -26,13 +26,13 @@ def test_wrong_input_shapes(): m.update((torch.rand(4), torch.rand(4, 1))) with pytest.raises(ValueError, match=r"Input data shapes should be the same, but given"): - m.update((torch.rand(4, 1), torch.rand(4,))) + m.update((torch.rand(4, 1), torch.rand(4))) def test_compute(): size = 51 - np_y_pred = np.random.rand(size,) - np_y = np.random.rand(size,) + np_y_pred = np.random.rand(size) + np_y = np.random.rand(size) np_gmrae = np.exp(np.log(np.abs(np_y - np_y_pred) / np.abs(np_y - np_y.mean())).mean()) m = GeometricMeanRelativeAbsoluteError() diff --git a/tests/ignite/contrib/metrics/regression/test_manhattan_distance.py b/tests/ignite/contrib/metrics/regression/test_manhattan_distance.py index 2ed6726db4a6..5e794d52de86 100644 --- a/tests/ignite/contrib/metrics/regression/test_manhattan_distance.py +++ b/tests/ignite/contrib/metrics/regression/test_manhattan_distance.py @@ -17,7 +17,7 @@ def test_wrong_input_shapes(): m.update((torch.rand(4), torch.rand(4, 1))) with pytest.raises(ValueError, match=r"Input data shapes should be the same, but given"): - m.update((torch.rand(4, 1), torch.rand(4,))) + m.update((torch.rand(4, 1), torch.rand(4))) def test_mahattan_distance(): diff --git a/tests/ignite/contrib/metrics/regression/test_maximum_absolute_error.py b/tests/ignite/contrib/metrics/regression/test_maximum_absolute_error.py index 828a9dcc1ee4..cf1436ae0251 100644 --- a/tests/ignite/contrib/metrics/regression/test_maximum_absolute_error.py +++ b/tests/ignite/contrib/metrics/regression/test_maximum_absolute_error.py @@ -25,7 +25,7 @@ def test_wrong_input_shapes(): m.update((torch.rand(4), torch.rand(4, 1))) with pytest.raises(ValueError, match=r"Input data shapes should be the same, but given"): - m.update((torch.rand(4, 1), torch.rand(4,))) + m.update((torch.rand(4, 1), torch.rand(4))) def test_maximum_absolute_error(): diff --git a/tests/ignite/contrib/metrics/regression/test_mean_absolute_relative_error.py b/tests/ignite/contrib/metrics/regression/test_mean_absolute_relative_error.py index 90e8baceb497..0b9754acf614 100644 --- a/tests/ignite/contrib/metrics/regression/test_mean_absolute_relative_error.py +++ b/tests/ignite/contrib/metrics/regression/test_mean_absolute_relative_error.py @@ -18,7 +18,7 @@ def test_wrong_input_shapes(): m.update((torch.rand(4), torch.rand(4, 1))) with raises(ValueError, match=r"Input data shapes should be the same, but given"): - m.update((torch.rand(4, 1), torch.rand(4,))) + m.update((torch.rand(4, 1), torch.rand(4))) def test_mean_absolute_relative_error(): diff --git a/tests/ignite/contrib/metrics/regression/test_mean_error.py b/tests/ignite/contrib/metrics/regression/test_mean_error.py index f66c82e1236b..b7659f810dab 100644 --- a/tests/ignite/contrib/metrics/regression/test_mean_error.py +++ b/tests/ignite/contrib/metrics/regression/test_mean_error.py @@ -23,7 +23,7 @@ def test_wrong_input_shapes(): m.update((torch.rand(4), torch.rand(4, 1))) with pytest.raises(ValueError, match=r"Input data shapes should be the same, but given"): - m.update((torch.rand(4, 1), torch.rand(4,))) + m.update((torch.rand(4, 1), torch.rand(4))) def test_mean_error(): diff --git a/tests/ignite/contrib/metrics/regression/test_mean_normalized_bias.py b/tests/ignite/contrib/metrics/regression/test_mean_normalized_bias.py index b9036287ff1c..480d6552d5c4 100644 --- a/tests/ignite/contrib/metrics/regression/test_mean_normalized_bias.py +++ b/tests/ignite/contrib/metrics/regression/test_mean_normalized_bias.py @@ -35,7 +35,7 @@ def test_wrong_input_shapes(): m.update((torch.rand(4), torch.rand(4, 1))) with pytest.raises(ValueError, match=r"Input data shapes should be the same, but given"): - m.update((torch.rand(4, 1), torch.rand(4,))) + m.update((torch.rand(4, 1), torch.rand(4))) def test_mean_error(): diff --git a/tests/ignite/contrib/metrics/regression/test_median_absolute_error.py b/tests/ignite/contrib/metrics/regression/test_median_absolute_error.py index ebe063293626..2fd3e5fe3597 100644 --- a/tests/ignite/contrib/metrics/regression/test_median_absolute_error.py +++ b/tests/ignite/contrib/metrics/regression/test_median_absolute_error.py @@ -28,10 +28,10 @@ def test_wrong_input_shapes(): m.update((torch.rand(4, 1), torch.rand(4, 1, 2))) with pytest.raises(ValueError, match=r"Predictions should be of shape"): - m.update((torch.rand(4, 1, 2), torch.rand(4,),)) + m.update((torch.rand(4, 1, 2), torch.rand(4))) with pytest.raises(ValueError, match=r"Targets should be of shape"): - m.update((torch.rand(4,), torch.rand(4, 1, 2),)) + m.update((torch.rand(4), torch.rand(4, 1, 2))) def test_median_absolute_error(): @@ -42,8 +42,8 @@ def test_median_absolute_error(): # Size of dataset will be odd for these tests size = 51 - np_y_pred = np.random.rand(size,) - np_y = np.random.rand(size,) + np_y_pred = np.random.rand(size) + np_y = np.random.rand(size) np_median_absolute_error = np.median(np.abs(np_y - np_y_pred)) m = MedianAbsoluteError() diff --git a/tests/ignite/contrib/metrics/regression/test_median_absolute_percentage_error.py b/tests/ignite/contrib/metrics/regression/test_median_absolute_percentage_error.py index a463b6406e05..af28afdc1770 100644 --- a/tests/ignite/contrib/metrics/regression/test_median_absolute_percentage_error.py +++ b/tests/ignite/contrib/metrics/regression/test_median_absolute_percentage_error.py @@ -28,10 +28,10 @@ def test_wrong_input_shapes(): m.update((torch.rand(4, 1), torch.rand(4, 1, 2))) with pytest.raises(ValueError, match=r"Predictions should be of shape"): - m.update((torch.rand(4, 1, 2), torch.rand(4,),)) + m.update((torch.rand(4, 1, 2), torch.rand(4))) with pytest.raises(ValueError, match=r"Targets should be of shape"): - m.update((torch.rand(4,), torch.rand(4, 1, 2),)) + m.update((torch.rand(4), torch.rand(4, 1, 2))) def test_median_absolute_percentage_error(): @@ -42,8 +42,8 @@ def test_median_absolute_percentage_error(): # Size of dataset will be odd for these tests size = 51 - np_y_pred = np.random.rand(size,) - np_y = np.random.rand(size,) + np_y_pred = np.random.rand(size) + np_y = np.random.rand(size) np_median_absolute_percentage_error = 100.0 * np.median(np.abs(np_y - np_y_pred) / np.abs(np_y)) m = MedianAbsolutePercentageError() diff --git a/tests/ignite/contrib/metrics/regression/test_median_relative_absolute_error.py b/tests/ignite/contrib/metrics/regression/test_median_relative_absolute_error.py index 06c5ab2eea53..acd4df744880 100644 --- a/tests/ignite/contrib/metrics/regression/test_median_relative_absolute_error.py +++ b/tests/ignite/contrib/metrics/regression/test_median_relative_absolute_error.py @@ -28,10 +28,24 @@ def test_wrong_input_shapes(): m.update((torch.rand(4, 1), torch.rand(4, 1, 2))) with pytest.raises(ValueError, match=r"Predictions should be of shape"): - m.update((torch.rand(4, 1, 2), torch.rand(4,),)) + m.update( + ( + torch.rand(4, 1, 2), + torch.rand( + 4, + ), + ) + ) with pytest.raises(ValueError, match=r"Targets should be of shape"): - m.update((torch.rand(4,), torch.rand(4, 1, 2),)) + m.update( + ( + torch.rand( + 4, + ), + torch.rand(4, 1, 2), + ) + ) def test_median_relative_absolute_error(): @@ -42,8 +56,12 @@ def test_median_relative_absolute_error(): # Size of dataset will be odd for these tests size = 51 - np_y_pred = np.random.rand(size,) - np_y = np.random.rand(size,) + np_y_pred = np.random.rand( + size, + ) + np_y = np.random.rand( + size, + ) np_median_absolute_relative_error = np.median(np.abs(np_y - np_y_pred) / np.abs(np_y - np_y.mean())) m = MedianRelativeAbsoluteError() diff --git a/tests/ignite/contrib/metrics/regression/test_r2_score.py b/tests/ignite/contrib/metrics/regression/test_r2_score.py index 4a87089f3304..89fe468c22d9 100644 --- a/tests/ignite/contrib/metrics/regression/test_r2_score.py +++ b/tests/ignite/contrib/metrics/regression/test_r2_score.py @@ -24,14 +24,14 @@ def test_wrong_input_shapes(): m.update((torch.rand(4), torch.rand(4, 1))) with pytest.raises(ValueError, match=r"Input data shapes should be the same, but given"): - m.update((torch.rand(4, 1), torch.rand(4,))) + m.update((torch.rand(4, 1), torch.rand(4))) def test_r2_score(): size = 51 - np_y_pred = np.random.rand(size,) - np_y = np.random.rand(size,) + np_y_pred = np.random.rand(size) + np_y = np.random.rand(size) m = R2Score() y_pred = torch.from_numpy(np_y_pred) diff --git a/tests/ignite/contrib/metrics/regression/test_wave_hedges_distance.py b/tests/ignite/contrib/metrics/regression/test_wave_hedges_distance.py index da8ba88f7f20..1c1af7fb38d6 100644 --- a/tests/ignite/contrib/metrics/regression/test_wave_hedges_distance.py +++ b/tests/ignite/contrib/metrics/regression/test_wave_hedges_distance.py @@ -16,7 +16,7 @@ def test_wrong_input_shapes(): m.update((torch.rand(4), torch.rand(4, 1))) with pytest.raises(ValueError, match=r"Input data shapes should be the same, but given"): - m.update((torch.rand(4, 1), torch.rand(4,))) + m.update((torch.rand(4, 1), torch.rand(4))) def test_compute(): diff --git a/tests/ignite/contrib/metrics/test_average_precision.py b/tests/ignite/contrib/metrics/test_average_precision.py index 7b7f55aaca0e..1627e04c40e4 100644 --- a/tests/ignite/contrib/metrics/test_average_precision.py +++ b/tests/ignite/contrib/metrics/test_average_precision.py @@ -240,7 +240,7 @@ def _test(y_preds, y_true, n_epochs, metric_device, update_fn): def get_tests(is_N): if is_N: y_true = torch.randint(0, n_classes, size=(offset * idist.get_world_size(),)).to(device) - y_preds = torch.rand(offset * idist.get_world_size(),).to(device) + y_preds = torch.rand(offset * idist.get_world_size()).to(device) def update_fn(engine, i): return ( diff --git a/tests/ignite/contrib/metrics/test_cohen_kappa.py b/tests/ignite/contrib/metrics/test_cohen_kappa.py index 32f9bbf9a1f3..c2c004f8eddb 100644 --- a/tests/ignite/contrib/metrics/test_cohen_kappa.py +++ b/tests/ignite/contrib/metrics/test_cohen_kappa.py @@ -297,12 +297,8 @@ def test_distrib_hvd(gloo_hvd_executor): device = torch.device("cpu" if not torch.cuda.is_available() else "cuda") nproc = 4 if not torch.cuda.is_available() else torch.cuda.device_count() - gloo_hvd_executor( - _test_distrib_binary_input, (device,), np=nproc, do_init=True, - ) - gloo_hvd_executor( - _test_distrib_integration_binary_input, (device,), np=nproc, do_init=True, - ) + gloo_hvd_executor(_test_distrib_binary_input, (device,), np=nproc, do_init=True) + gloo_hvd_executor(_test_distrib_integration_binary_input, (device,), np=nproc, do_init=True) @pytest.mark.multinode_distributed diff --git a/tests/ignite/contrib/metrics/test_gpu_info.py b/tests/ignite/contrib/metrics/test_gpu_info.py index 97ee72b83905..a63823ad9fef 100644 --- a/tests/ignite/contrib/metrics/test_gpu_info.py +++ b/tests/ignite/contrib/metrics/test_gpu_info.py @@ -139,9 +139,9 @@ def getInstance(): _test_with_custom_query(resp={}, warn_msg=r"No GPU information available", check_compute=True) # No GPU memory info - _test_with_custom_query(resp={"gpu": [{"utilization": {}},]}, warn_msg=r"No GPU memory usage information available") + _test_with_custom_query(resp={"gpu": [{"utilization": {}}]}, warn_msg=r"No GPU memory usage information available") # No GPU utilization info _test_with_custom_query( - resp={"gpu": [{"fb_memory_usage": {}},]}, warn_msg=r"No GPU utilization information available" + resp={"gpu": [{"fb_memory_usage": {}}]}, warn_msg=r"No GPU utilization information available" ) diff --git a/tests/ignite/contrib/metrics/test_roc_auc.py b/tests/ignite/contrib/metrics/test_roc_auc.py index aa34089cdbc2..9daf384939b1 100644 --- a/tests/ignite/contrib/metrics/test_roc_auc.py +++ b/tests/ignite/contrib/metrics/test_roc_auc.py @@ -253,7 +253,7 @@ def _test(y_preds, y_true, n_epochs, metric_device, update_fn): def get_tests(is_N): if is_N: y_true = torch.randint(0, n_classes, size=(offset * idist.get_world_size(),)).to(device) - y_preds = torch.rand(offset * idist.get_world_size(),).to(device) + y_preds = torch.rand(offset * idist.get_world_size()).to(device) def update_fn(engine, i): return ( diff --git a/tests/ignite/distributed/comp_models/test_base.py b/tests/ignite/distributed/comp_models/test_base.py index 6a7ca20d35dd..34bb1f68f9a7 100644 --- a/tests/ignite/distributed/comp_models/test_base.py +++ b/tests/ignite/distributed/comp_models/test_base.py @@ -50,10 +50,10 @@ def test__encode_input_data(): assert encoded_msg == [-1] * 512 encoded_msg = ComputationModel._encode_input_data(12.0, is_src=True) - assert encoded_msg == [1,] + [-1] * 511 + assert encoded_msg == [1] + [-1] * 511 encoded_msg = ComputationModel._encode_input_data("abc", is_src=True) - assert encoded_msg == [2,] + [-1] * 511 + assert encoded_msg == [2] + [-1] * 511 t = torch.rand(2, 512, 32, 32, 64) encoded_msg = ComputationModel._encode_input_data(t, is_src=True) diff --git a/tests/ignite/distributed/comp_models/test_xla.py b/tests/ignite/distributed/comp_models/test_xla.py index 798b50c04e7c..7fa92c0088b4 100644 --- a/tests/ignite/distributed/comp_models/test_xla.py +++ b/tests/ignite/distributed/comp_models/test_xla.py @@ -42,9 +42,7 @@ def _test_xla_spawn_fn(local_rank, world_size, device): @pytest.mark.skipif(not has_xla_support, reason="Skip if no PyTorch XLA package") def test__xla_dist_model_spawn_one_proc(): try: - _XlaDistModel.spawn( - _test_xla_spawn_fn, args=(1, "xla"), kwargs_dict={}, nproc_per_node=1, - ) + _XlaDistModel.spawn(_test_xla_spawn_fn, args=(1, "xla"), kwargs_dict={}, nproc_per_node=1) except SystemExit: pass @@ -55,9 +53,7 @@ def test__xla_dist_model_spawn_one_proc(): def test__xla_dist_model_spawn_n_procs(): n = int(os.environ["NUM_TPU_WORKERS"]) try: - _XlaDistModel.spawn( - _test_xla_spawn_fn, args=(n, "xla"), kwargs_dict={}, nproc_per_node=n, - ) + _XlaDistModel.spawn(_test_xla_spawn_fn, args=(n, "xla"), kwargs_dict={}, nproc_per_node=n) except SystemExit: pass @@ -187,7 +183,7 @@ def training_step(engine, _): # THIS CAN BE A CAUSE OF CRASH if DEVICE is OTHER THAN device tensor = torch.tensor([fold + 1.0], dtype=torch.float).to(comp_model.device()) - xm.all_reduce("max", [tensor,]) + xm.all_reduce("max", [tensor]) time.sleep(0.01 * fold) diff --git a/tests/ignite/distributed/utils/__init__.py b/tests/ignite/distributed/utils/__init__.py index 91f15958431b..10c7eef7b69a 100644 --- a/tests/ignite/distributed/utils/__init__.py +++ b/tests/ignite/distributed/utils/__init__.py @@ -122,7 +122,7 @@ def _test_distrib_all_reduce(device): def _test_distrib_all_gather(device): res = torch.tensor(idist.all_gather(10), device=device) - true_res = torch.tensor([10,] * idist.get_world_size(), device=device) + true_res = torch.tensor([10] * idist.get_world_size(), device=device) assert (res == true_res).all() t = torch.tensor(idist.get_rank(), device=device) @@ -134,7 +134,7 @@ def _test_distrib_all_gather(device): if idist.get_rank() == 0: x = "abc" res = idist.all_gather(x) - true_res = ["abc",] + ["test-test"] * (idist.get_world_size() - 1) + true_res = ["abc"] + ["test-test"] * (idist.get_world_size() - 1) assert res == true_res base_x = "tests/ignite/distributed/utils/test_native.py" * 2000 @@ -143,7 +143,7 @@ def _test_distrib_all_gather(device): x = "abc" res = idist.all_gather(x) - true_res = ["abc",] + [base_x] * (idist.get_world_size() - 1) + true_res = ["abc"] + [base_x] * (idist.get_world_size() - 1) assert res == true_res t = torch.arange(100, device=device).reshape(4, 25) * (idist.get_rank() + 1) diff --git a/tests/ignite/engine/test_create_supervised.py b/tests/ignite/engine/test_create_supervised.py index 9d5d65c51914..611b6a6c8d56 100644 --- a/tests/ignite/engine/test_create_supervised.py +++ b/tests/ignite/engine/test_create_supervised.py @@ -426,10 +426,10 @@ def test_create_supervised_trainer_on_cuda_amp(): model_device=model_device, trainer_device=trainer_device, amp_mode="amp" ) _test_create_supervised_trainer( - gradient_accumulation_steps=1, model_device=model_device, trainer_device=trainer_device, amp_mode="amp", + gradient_accumulation_steps=1, model_device=model_device, trainer_device=trainer_device, amp_mode="amp" ) _test_create_supervised_trainer( - gradient_accumulation_steps=3, model_device=model_device, trainer_device=trainer_device, amp_mode="amp", + gradient_accumulation_steps=3, model_device=model_device, trainer_device=trainer_device, amp_mode="amp" ) _test_create_mocked_supervised_trainer(model_device=model_device, trainer_device=trainer_device, amp_mode="amp") @@ -487,10 +487,10 @@ def test_create_supervised_trainer_on_cuda_apex(): model_device=model_device, trainer_device=trainer_device, amp_mode="apex" ) _test_create_supervised_trainer( - gradient_accumulation_steps=1, model_device=model_device, trainer_device=trainer_device, amp_mode="apex", + gradient_accumulation_steps=1, model_device=model_device, trainer_device=trainer_device, amp_mode="apex" ) _test_create_supervised_trainer( - gradient_accumulation_steps=3, model_device=model_device, trainer_device=trainer_device, amp_mode="apex", + gradient_accumulation_steps=3, model_device=model_device, trainer_device=trainer_device, amp_mode="apex" ) _test_create_mocked_supervised_trainer(model_device=model_device, trainer_device=trainer_device, amp_mode="apex") diff --git a/tests/ignite/engine/test_deterministic.py b/tests/ignite/engine/test_deterministic.py index 9cb257b26258..ac8014c15b65 100644 --- a/tests/ignite/engine/test_deterministic.py +++ b/tests/ignite/engine/test_deterministic.py @@ -283,9 +283,7 @@ def _(engine): sampler.set_epoch(engine.state.epoch - 1) torch.manual_seed(87) - engine.run( - orig_dataloader, max_epochs=max_epochs, epoch_length=epoch_length, - ) + engine.run(orig_dataloader, max_epochs=max_epochs, epoch_length=epoch_length) batch_checker = BatchChecker(seen_batchs, init_counter=resume_epoch * epoch_length) @@ -390,9 +388,7 @@ def _(engine): sampler.set_epoch(engine.state.epoch) torch.manual_seed(12) - engine.run( - orig_dataloader, max_epochs=max_epochs, epoch_length=epoch_length, - ) + engine.run(orig_dataloader, max_epochs=max_epochs, epoch_length=epoch_length) batch_checker = BatchChecker(seen_batchs, init_counter=resume_iteration) @@ -471,9 +467,7 @@ def update_fn(_, batch): engine = DeterministicEngine(update_fn) torch.manual_seed(121) - engine.run( - infinite_data_iterator(), max_epochs=max_epochs, epoch_length=epoch_length, - ) + engine.run(infinite_data_iterator(), max_epochs=max_epochs, epoch_length=epoch_length) batch_checker = BatchChecker(seen_batchs, init_counter=resume_epoch * epoch_length) @@ -527,9 +521,7 @@ def update_fn(_, batch): engine = DeterministicEngine(update_fn) torch.manual_seed(24) - engine.run( - infinite_data_iterator(), max_epochs=max_epochs, epoch_length=epoch_length, - ) + engine.run(infinite_data_iterator(), max_epochs=max_epochs, epoch_length=epoch_length) batch_checker = BatchChecker(seen_batchs, init_counter=resume_iteration) diff --git a/tests/ignite/engine/test_engine.py b/tests/ignite/engine/test_engine.py index ebbdf74c5c5e..bacdc3bba7c9 100644 --- a/tests/ignite/engine/test_engine.py +++ b/tests/ignite/engine/test_engine.py @@ -580,9 +580,7 @@ def train_fn(_, batch): torch.manual_seed(1) epoch_length = 6 trainer = Engine(train_fn) - trainer.run( - random_train_data_generator(size), max_epochs=4, epoch_length=epoch_length, - ) + trainer.run(random_train_data_generator(size), max_epochs=4, epoch_length=epoch_length) def val_fn(_1, _2): pass @@ -601,9 +599,7 @@ def run_evaluation(_): evaluator.run(random_val_data_generator(size), epoch_length=4) torch.manual_seed(1) - trainer.run( - random_train_data_generator(size), max_epochs=4, epoch_length=epoch_length, - ) + trainer.run(random_train_data_generator(size), max_epochs=4, epoch_length=epoch_length) for i in range(epoch_length): assert train_batches[epoch_length + i] != train_batches[2 * epoch_length + i] diff --git a/tests/ignite/engine/test_event_handlers.py b/tests/ignite/engine/test_event_handlers.py index f3e0f209c79a..be47e15decd2 100644 --- a/tests/ignite/engine/test_event_handlers.py +++ b/tests/ignite/engine/test_event_handlers.py @@ -426,7 +426,7 @@ def handle_iteration_completed(engine, completed_counter): def test_returns_state(): engine = Engine(MagicMock(return_value=1)) - state = engine.run([0,]) + state = engine.run([0]) assert isinstance(state, State) diff --git a/tests/ignite/handlers/test_checkpoint.py b/tests/ignite/handlers/test_checkpoint.py index 7897da355f0b..04cae4186348 100644 --- a/tests/ignite/handlers/test_checkpoint.py +++ b/tests/ignite/handlers/test_checkpoint.py @@ -526,7 +526,7 @@ def save_handler(c, f): to_save = {"model": DummyModel()} - checkpointer = Checkpoint(to_save, save_handler=save_handler,) + checkpointer = Checkpoint(to_save, save_handler=save_handler) trainer = Engine(lambda e, b: None) @@ -797,7 +797,7 @@ def update_fn(_1, _2): model = DummyModel() to_save = {"model": model} engine.add_event_handler(Events.EPOCH_COMPLETED, handler, to_save) - engine.run([0, 1,], max_epochs=4) + engine.run([0, 1], max_epochs=4) expected = sorted([f"{_PREFIX}_{name}_{i}.pt" for i in [3 * 2, 4 * 2]]) @@ -1203,9 +1203,7 @@ def _test_checkpoint_with_ddp(device): torch.manual_seed(0) model = DummyModel().to(device) - device_ids = ( - None if "cpu" in device.type else [device,] - ) + device_ids = None if "cpu" in device.type else [device] ddp_model = nn.parallel.DistributedDataParallel(model, device_ids=device_ids) to_save = {"model": ddp_model} @@ -1223,9 +1221,7 @@ def _test_checkpoint_with_ddp(device): def _test_checkpoint_load_objects_ddp(device): model = DummyModel().to(device) - device_ids = ( - None if "cpu" in device.type else [device,] - ) + device_ids = None if "cpu" in device.type else [device] ddp_model = nn.parallel.DistributedDataParallel(model, device_ids=device_ids) opt = torch.optim.SGD(ddp_model.parameters(), lr=0.01) @@ -1598,7 +1594,7 @@ def test_checkpoint_reset(): assert save_handler.call_count == 3 assert checkpointer.last_checkpoint == "model_124.pt" assert len(checkpointer._saved) == 1 - assert sorted([item.filename for item in checkpointer._saved]) == sorted(["model_124.pt",]) + assert sorted([item.filename for item in checkpointer._saved]) == sorted(["model_124.pt"]) def test_checkpoint_reset_with_engine(dirname): @@ -1609,7 +1605,7 @@ def test_checkpoint_reset_with_engine(dirname): model = DummyModel() to_save = {"model": model} engine.add_event_handler(Events.EPOCH_COMPLETED, handler, to_save) - engine.run([0, 1,], max_epochs=10) + engine.run([0, 1], max_epochs=10) expected = sorted([f"{_PREFIX}_{name}_{i}.pt" for i in [9 * 2, 10 * 2]]) assert sorted(os.listdir(dirname)) == expected @@ -1617,7 +1613,7 @@ def test_checkpoint_reset_with_engine(dirname): handler.reset() engine.state.max_epochs = None - engine.run([0, 1,], max_epochs=2) + engine.run([0, 1], max_epochs=2) expected += [f"{_PREFIX}_{name}_{i}.pt" for i in [1 * 2, 2 * 2]] assert sorted(os.listdir(dirname)) == sorted(expected) diff --git a/tests/ignite/handlers/test_param_scheduler.py b/tests/ignite/handlers/test_param_scheduler.py index e27c266d2c75..4b3fb2446412 100644 --- a/tests/ignite/handlers/test_param_scheduler.py +++ b/tests/ignite/handlers/test_param_scheduler.py @@ -337,15 +337,15 @@ def test_concat_scheduler_asserts(): scheduler_3 = CosineAnnealingScheduler(optimizer_2, "lr", start_value=0.0, end_value=1.0, cycle_size=10) with pytest.raises(ValueError, match=r"schedulers should be related to same optimizer"): - ConcatScheduler([scheduler_1, scheduler_3], durations=[30,]) + ConcatScheduler([scheduler_1, scheduler_3], durations=[30]) scheduler_4 = CosineAnnealingScheduler(optimizer, "lr2", start_value=0.0, end_value=1.0, cycle_size=10) with pytest.raises(ValueError, match=r"schedulers should be related to same param_name"): - ConcatScheduler([scheduler_1, scheduler_4], durations=[30,]) + ConcatScheduler([scheduler_1, scheduler_4], durations=[30]) with pytest.raises(ValueError, match=r"schedulers should be related to same optimizer"): - ConcatScheduler.simulate_values(3, [scheduler_1, scheduler_3], durations=[30,]) + ConcatScheduler.simulate_values(3, [scheduler_1, scheduler_3], durations=[30]) def test_concat_scheduler_state_dict(): @@ -1300,5 +1300,5 @@ def save_lr(engine): trainer.run([0] * 15, max_epochs=1) assert lrs == list( - map(pytest.approx, [0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.55, 0.6, 0.65, 0.7, 0.75, 0.8, 0.85, 0.9, 0.95,],) + map(pytest.approx, [0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.55, 0.6, 0.65, 0.7, 0.75, 0.8, 0.85, 0.9, 0.95]) ) diff --git a/tests/ignite/handlers/test_state_param_scheduler.py b/tests/ignite/handlers/test_state_param_scheduler.py index 2e5e8a3f7fe6..97d04555f3d3 100644 --- a/tests/ignite/handlers/test_state_param_scheduler.py +++ b/tests/ignite/handlers/test_state_param_scheduler.py @@ -61,9 +61,7 @@ def __call__(self, event_index): ) -@pytest.mark.parametrize( - "max_epochs, milestones_values, save_history, expected_param_history", [config1, config2], -) +@pytest.mark.parametrize("max_epochs, milestones_values, save_history, expected_param_history", [config1, config2]) def test_pwlinear_scheduler_linear_increase_history( max_epochs, milestones_values, save_history, expected_param_history ): @@ -87,9 +85,7 @@ def test_pwlinear_scheduler_linear_increase_history( pw_linear_step_parameter_scheduler.load_state_dict(state_dict) -@pytest.mark.parametrize( - "max_epochs, milestones_values", [(3, [(3, 12), (5, 10)]), (5, [(10, 12), (20, 10)]),], -) +@pytest.mark.parametrize("max_epochs, milestones_values", [(3, [(3, 12), (5, 10)]), (5, [(10, 12), (20, 10)])]) def test_pwlinear_scheduler_step_constant(max_epochs, milestones_values): # Testing step_constant engine = Engine(lambda e, b: None) @@ -106,7 +102,7 @@ def test_pwlinear_scheduler_step_constant(max_epochs, milestones_values): @pytest.mark.parametrize( "max_epochs, milestones_values, expected_val", - [(2, [(0, 0), (3, 10)], 6.666666666666667), (10, [(0, 0), (20, 10)], 5.0),], + [(2, [(0, 0), (3, 10)], 6.666666666666667), (10, [(0, 0), (20, 10)], 5.0)], ) def test_pwlinear_scheduler_linear_increase(max_epochs, milestones_values, expected_val): # Testing linear increase @@ -122,12 +118,8 @@ def test_pwlinear_scheduler_linear_increase(max_epochs, milestones_values, expec linear_state_parameter_scheduler.load_state_dict(state_dict) -@pytest.mark.parametrize( - "max_epochs, milestones_values,", [(3, [(0, 0), (3, 10)]), (40, [(0, 0), (20, 10)])], -) -def test_pwlinear_scheduler_max_value( - max_epochs, milestones_values, -): +@pytest.mark.parametrize("max_epochs, milestones_values,", [(3, [(0, 0), (3, 10)]), (40, [(0, 0), (20, 10)])]) +def test_pwlinear_scheduler_max_value(max_epochs, milestones_values): # Testing max_value engine = Engine(lambda e, b: None) linear_state_parameter_scheduler = PiecewiseLinearStateScheduler( @@ -162,9 +154,7 @@ def test_piecewiselinear_asserts(): PiecewiseLinearStateScheduler(param_name="linear_scheduled_param", milestones_values=[(0.5, 1)]) -@pytest.mark.parametrize( - "max_epochs, initial_value, gamma", [(3, 10, 0.99), (40, 5, 0.98)], -) +@pytest.mark.parametrize("max_epochs, initial_value, gamma", [(3, 10, 0.99), (40, 5, 0.98)]) def test_exponential_scheduler(max_epochs, initial_value, gamma): engine = Engine(lambda e, b: None) exp_state_parameter_scheduler = ExpStateScheduler( @@ -178,12 +168,8 @@ def test_exponential_scheduler(max_epochs, initial_value, gamma): exp_state_parameter_scheduler.load_state_dict(state_dict) -@pytest.mark.parametrize( - "max_epochs, initial_value, gamma, step_size", [(3, 10, 0.99, 5), (40, 5, 0.98, 22)], -) -def test_step_scheduler( - max_epochs, initial_value, gamma, step_size, -): +@pytest.mark.parametrize("max_epochs, initial_value, gamma, step_size", [(3, 10, 0.99, 5), (40, 5, 0.98, 22)]) +def test_step_scheduler(max_epochs, initial_value, gamma, step_size): engine = Engine(lambda e, b: None) step_state_parameter_scheduler = StepStateScheduler( param_name="step_scheduled_param", @@ -206,11 +192,9 @@ def test_step_scheduler( @pytest.mark.parametrize( - "max_epochs, initial_value, gamma, milestones", [(3, 10, 0.99, [3, 6]), (40, 5, 0.98, [3, 6, 9, 10, 11])], + "max_epochs, initial_value, gamma, milestones", [(3, 10, 0.99, [3, 6]), (40, 5, 0.98, [3, 6, 9, 10, 11])] ) -def test_multistep_scheduler( - max_epochs, initial_value, gamma, milestones, -): +def test_multistep_scheduler(max_epochs, initial_value, gamma, milestones): engine = Engine(lambda e, b: None) multi_step_state_parameter_scheduler = MultiStepStateScheduler( param_name="multistep_scheduled_param", @@ -368,7 +352,7 @@ def test_multiple_scheduler_with_save_history(): if "save_history" in config: del config["save_history"] _scheduler = scheduler(**config, save_history=True) - _scheduler.attach(engine_multiple_schedulers,) + _scheduler.attach(engine_multiple_schedulers) engine_multiple_schedulers.run([0] * 8, max_epochs=2) @@ -507,7 +491,7 @@ def test_param_scheduler_with_ema_handler(): ema_handler.attach(trainer, name=param_name, event=Events.ITERATION_COMPLETED) ema_decay_scheduler = PiecewiseLinearStateScheduler( - param_name=param_name, milestones_values=[(0, 0.0), (10, 0.999),], save_history=True + param_name=param_name, milestones_values=[(0, 0.0), (10, 0.999)], save_history=True ) ema_decay_scheduler.attach(trainer, Events.ITERATION_COMPLETED) trainer.run(data, max_epochs=20) diff --git a/tests/ignite/metrics/gan/test_fid.py b/tests/ignite/metrics/gan/test_fid.py index fdf2ae2cea2d..6244a636f070 100644 --- a/tests/ignite/metrics/gan/test_fid.py +++ b/tests/ignite/metrics/gan/test_fid.py @@ -121,9 +121,7 @@ def test_wrong_inputs(): err_str = ( "Number of Training Features and Testing Features should be equal (torch.Size([9, 2]) != torch.Size([5, 2]))" ) - with pytest.raises( - ValueError, match=re.escape(err_str), - ): + with pytest.raises(ValueError, match=re.escape(err_str)): FID(num_features=2, feature_extractor=torch.nn.Identity()).update((torch.rand(9, 2), torch.rand(5, 2))) with pytest.raises(TypeError, match=r"Argument feature_extractor must be of type torch.nn.Module"): diff --git a/tests/ignite/metrics/nlp/test_rouge.py b/tests/ignite/metrics/nlp/test_rouge.py index ee69708e93b0..2f1e24fcb892 100644 --- a/tests/ignite/metrics/nlp/test_rouge.py +++ b/tests/ignite/metrics/nlp/test_rouge.py @@ -85,7 +85,7 @@ def test_rouge_n_alpha(ngram, candidate, reference, expected): @pytest.mark.parametrize( - "candidates, references", [corpus.sample_1, corpus.sample_2, corpus.sample_3, corpus.sample_4, corpus.sample_5,], + "candidates, references", [corpus.sample_1, corpus.sample_2, corpus.sample_3, corpus.sample_4, corpus.sample_5] ) def test_rouge_metrics(candidates, references): for multiref in ["average", "best"]: diff --git a/tests/ignite/metrics/nlp/test_utils.py b/tests/ignite/metrics/nlp/test_utils.py index 8cf267a68bdc..5c6dbb446934 100644 --- a/tests/ignite/metrics/nlp/test_utils.py +++ b/tests/ignite/metrics/nlp/test_utils.py @@ -8,7 +8,7 @@ [ ([], 1, [], []), ([0, 1, 2], 1, [(0,), (1,), (2,)], [1, 1, 1]), - ([0, 1, 2], 2, [(0, 1,), (1, 2,),], [1, 1],), + ([0, 1, 2], 2, [(0, 1), (1, 2)], [1, 1]), ([0, 1, 2], 3, [(0, 1, 2)], [1]), ([0, 0, 0], 1, [(0,)], [3]), ([0, 0, 0], 2, [(0, 0)], [2]), @@ -23,7 +23,7 @@ def test_ngrams(sequence, n, expected_keys, expected_values): @pytest.mark.parametrize( "seq_a, seq_b, expected", - [([], [], 0), ([0, 1, 2], [0, 1, 2], 3), ([0, 1, 2], [0, 3, 2], 2), ("academy", "abracadabra", 4),], + [([], [], 0), ([0, 1, 2], [0, 1, 2], 3), ([0, 1, 2], [0, 3, 2], 2), ("academy", "abracadabra", 4)], ) def test_lcs(seq_a, seq_b, expected): assert lcs(seq_a, seq_b) == expected diff --git a/tests/ignite/metrics/test_accumulation.py b/tests/ignite/metrics/test_accumulation.py index 5f721aa16759..6777ba9f1caa 100644 --- a/tests/ignite/metrics/test_accumulation.py +++ b/tests/ignite/metrics/test_accumulation.py @@ -355,7 +355,7 @@ def update_fn(engine, batch): assert len(true_val) == shape[-1] np.testing.assert_almost_equal( - state.metrics["agg_custom_var"].cpu().numpy(), true_val, decimal=int(np.log10(1.0 / tol)), + state.metrics["agg_custom_var"].cpu().numpy(), true_val, decimal=int(np.log10(1.0 / tol)) ) size = 100 diff --git a/tests/ignite/metrics/test_accuracy.py b/tests/ignite/metrics/test_accuracy.py index ba38afb8f699..da6d2a7511fc 100644 --- a/tests/ignite/metrics/test_accuracy.py +++ b/tests/ignite/metrics/test_accuracy.py @@ -47,7 +47,7 @@ def test_binary_wrong_inputs(): with pytest.raises(ValueError, match=r"For binary cases, y_pred must be comprised of 0's and 1's"): # y_pred values are not thresholded to 0, 1 values - acc.update((torch.rand(10,), torch.randint(0, 2, size=(10,)).long(),)) + acc.update((torch.rand(10), torch.randint(0, 2, size=(10,)).long())) with pytest.raises(ValueError, match=r"y must have shape of "): # incompatible shapes diff --git a/tests/ignite/metrics/test_loss.py b/tests/ignite/metrics/test_loss.py index d425c017ca3f..a3ae8ce1053a 100644 --- a/tests/ignite/metrics/test_loss.py +++ b/tests/ignite/metrics/test_loss.py @@ -123,7 +123,7 @@ def test_gradient_based_loss(): def loss_fn(y_pred, x): gradients = torch.autograd.grad( - outputs=y_pred, inputs=x, grad_outputs=torch.ones_like(y_pred), create_graph=True, + outputs=y_pred, inputs=x, grad_outputs=torch.ones_like(y_pred), create_graph=True )[0] gradients = gradients.flatten(start_dim=1) diff --git a/tests/ignite/metrics/test_metric.py b/tests/ignite/metrics/test_metric.py index abf7193321be..6be8e111684d 100644 --- a/tests/ignite/metrics/test_metric.py +++ b/tests/ignite/metrics/test_metric.py @@ -437,7 +437,7 @@ def data(y_pred, y): d = data(y_pred, y) state = validator.run(d, max_epochs=1, epoch_length=y_pred.shape[0]) - assert set(state.metrics.keys()) == set([metric_name,]) + assert set(state.metrics.keys()) == set([metric_name]) np_y_pred = np.argmax(y_pred.numpy(), axis=-1).ravel() np_y = y.numpy().ravel() assert state.metrics[metric_name] == approx(compute_true_value_fn(np_y_pred, np_y)) @@ -961,7 +961,7 @@ def compute(self): evaluator.run(data) -@pytest.mark.parametrize("shapes", [[(10,), ()], [(5, 32, 32), (5, 32, 32)],]) +@pytest.mark.parametrize("shapes", [[(10,), ()], [(5, 32, 32), (5, 32, 32)]]) def test_list_of_tensors_and_numbers(shapes): def check_fn(output): assert len(output) == 2 diff --git a/tests/ignite/metrics/test_metrics_lambda.py b/tests/ignite/metrics/test_metrics_lambda.py index 84d9bcce428c..72fce39885c6 100644 --- a/tests/ignite/metrics/test_metrics_lambda.py +++ b/tests/ignite/metrics/test_metrics_lambda.py @@ -395,7 +395,7 @@ def data(y_pred, y): d = data(y_pred, y) state = validator.run(d, max_epochs=1, epoch_length=y_pred.shape[0]) - assert set(state.metrics.keys()) == set([metric_name,]) + assert set(state.metrics.keys()) == set([metric_name]) np_y_pred = y_pred.numpy().ravel() np_y = y.numpy().ravel() assert state.metrics[metric_name] == approx(compute_true_value_fn(np_y_pred, np_y)) diff --git a/tests/ignite/metrics/test_precision.py b/tests/ignite/metrics/test_precision.py index 74003a0724b0..5f07d5413526 100644 --- a/tests/ignite/metrics/test_precision.py +++ b/tests/ignite/metrics/test_precision.py @@ -38,7 +38,7 @@ def test_binary_wrong_inputs(): with pytest.raises(ValueError, match=r"For binary cases, y_pred must be comprised of 0's and 1's"): # y_pred values are not thresholded to 0, 1 values - pr.update((torch.rand(10,), torch.randint(0, 2, size=(10,)).long(),)) + pr.update((torch.rand(10), torch.randint(0, 2, size=(10,)).long())) assert pr._updated is False with pytest.raises(ValueError, match=r"y must have shape of"): diff --git a/tests/ignite/metrics/test_recall.py b/tests/ignite/metrics/test_recall.py index 4d973dee16cf..12c47e9f27e2 100644 --- a/tests/ignite/metrics/test_recall.py +++ b/tests/ignite/metrics/test_recall.py @@ -357,7 +357,7 @@ def _test(average): re.update((y_pred, y)) assert re._updated is True - y_pred = torch.zeros(4,) + y_pred = torch.zeros(4) y = torch.ones(4).long() with pytest.raises(RuntimeError): diff --git a/tests/ignite/metrics/test_root_mean_squared_error.py b/tests/ignite/metrics/test_root_mean_squared_error.py index 7c0ccee4d60b..a92e5908deb4 100644 --- a/tests/ignite/metrics/test_root_mean_squared_error.py +++ b/tests/ignite/metrics/test_root_mean_squared_error.py @@ -43,10 +43,10 @@ def _test(y_pred, y, batch_size): def get_test_cases(): test_cases = [ - (torch.empty(10,).uniform_(0, 10), torch.empty(10,).uniform_(0, 10), 1,), + (torch.empty(10).uniform_(0, 10), torch.empty(10).uniform_(0, 10), 1), (torch.empty(10, 1).uniform_(-10, 10), torch.empty(10, 1).uniform_(-10, 10), 1), # updated batches - (torch.empty(50,).uniform_(0, 10), torch.empty(50).uniform_(0, 10), 16,), + (torch.empty(50).uniform_(0, 10), torch.empty(50).uniform_(0, 10), 16), (torch.empty(50, 1).uniform_(-10, 10), torch.empty(50, 1).uniform_(-10, 10), 16), ] diff --git a/tests/ignite/metrics/test_running_average.py b/tests/ignite/metrics/test_running_average.py index 857e929c8509..9d034c1c781b 100644 --- a/tests/ignite/metrics/test_running_average.py +++ b/tests/ignite/metrics/test_running_average.py @@ -192,11 +192,11 @@ def assert_equal_running_avg_output_values(engine): def test_multiple_attach(): n_iters = 100 - errD_values = iter(np.random.rand(n_iters,)) - errG_values = iter(np.random.rand(n_iters,)) - D_x_values = iter(np.random.rand(n_iters,)) - D_G_z1 = iter(np.random.rand(n_iters,)) - D_G_z2 = iter(np.random.rand(n_iters,)) + errD_values = iter(np.random.rand(n_iters)) + errG_values = iter(np.random.rand(n_iters)) + D_x_values = iter(np.random.rand(n_iters)) + D_G_z1 = iter(np.random.rand(n_iters)) + D_G_z2 = iter(np.random.rand(n_iters)) def update_fn(engine, batch): return { diff --git a/tests/ignite/test_utils.py b/tests/ignite/test_utils.py index 542f06e28cc5..dd9bfc9706ac 100644 --- a/tests/ignite/test_utils.py +++ b/tests/ignite/test_utils.py @@ -191,8 +191,7 @@ def func_no_docs(): # Test on function with docs, @deprecated without reasons @deprecated("0.4.2", "0.6.0") def func_no_reasons(): - """Docs are cool - """ + """Docs are cool""" return 24 assert func_no_reasons.__doc__ == "**Deprecated function**.\n\n Docs are cool\n .. deprecated:: 0.4.2" @@ -200,8 +199,7 @@ def func_no_reasons(): # Test on function with docs, @deprecated with reasons @deprecated("0.4.2", "0.6.0", reasons=("r1", "r2")) def func_no_warnings(): - """Docs are very cool - """ + """Docs are very cool""" return 24 assert ( @@ -212,8 +210,7 @@ def func_no_warnings(): # Tests that the function emits DeprecationWarning @deprecated("0.4.2", "0.6.0", reasons=("r1", "r2")) def func_check_warning(): - """Docs are very ... - """ + """Docs are very ...""" return 24 with pytest.deprecated_call(): @@ -252,7 +249,7 @@ def test_hash_checkpoint(tmp_path): model = squeezenet1_0() torch.hub.download_url_to_file( - "https://download.pytorch.org/models/squeezenet1_0-b66bff10.pth", f"{tmp_path}/squeezenet1_0.pt", + "https://download.pytorch.org/models/squeezenet1_0-b66bff10.pth", f"{tmp_path}/squeezenet1_0.pt" ) hash_checkpoint_path, sha_hash = hash_checkpoint(f"{tmp_path}/squeezenet1_0.pt", str(tmp_path)) model.load_state_dict(torch.load(str(hash_checkpoint_path), "cpu"), True) From 06ab582c5aa969bbf34fc6ff5493a57d12294b4d Mon Sep 17 00:00:00 2001 From: louis-she Date: Mon, 20 Dec 2021 06:50:11 +0000 Subject: [PATCH 2/7] autopep8 fix --- ignite/utils.py | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/ignite/utils.py b/ignite/utils.py index 5bc387e81b55..261a1febb607 100644 --- a/ignite/utils.py +++ b/ignite/utils.py @@ -285,7 +285,10 @@ def wrapper(*args: Any, **kwargs: Dict[str, Any]) -> Callable: return decorator -def hash_checkpoint(checkpoint_path: Union[str, Path], output_dir: Union[str, Path],) -> Tuple[Path, str]: +def hash_checkpoint( + checkpoint_path: Union[str, Path], + output_dir: Union[str, Path], +) -> Tuple[Path, str]: """ Hash the checkpoint file in the format of ``-.`` to be used with ``check_hash`` of :func:`torch.hub.load_state_dict_from_url`. From 473b166505e1bb4ee4849bc930f22bf07a2944d8 Mon Sep 17 00:00:00 2001 From: chenglu Date: Mon, 20 Dec 2021 21:24:14 +0800 Subject: [PATCH 3/7] fix: left out tailing comma --- ignite/distributed/utils.py | 3 ++- ignite/engine/engine.py | 6 +++-- ignite/utils.py | 5 +--- .../test_median_relative_absolute_error.py | 26 +++---------------- 4 files changed, 11 insertions(+), 29 deletions(-) diff --git a/ignite/distributed/utils.py b/ignite/distributed/utils.py index e26d81d5ca4e..04f8c86b6ef7 100644 --- a/ignite/distributed/utils.py +++ b/ignite/distributed/utils.py @@ -149,7 +149,8 @@ def get_rank() -> int: def get_local_rank() -> int: - """Returns local process rank within current distributed configuration. Returns 0 if no distributed configuration.""" + """Returns local process rank within current distributed configuration. + Returns 0 if no distributed configuration.""" if _need_to_sync and isinstance(_model, _SerialModel): sync(temporary=True) diff --git a/ignite/engine/engine.py b/ignite/engine/engine.py index 7b7a5c924aef..68ada8c1c343 100644 --- a/ignite/engine/engine.py +++ b/ignite/engine/engine.py @@ -446,12 +446,14 @@ def fire_event(self, event_name: Any) -> None: return self._fire_event(event_name) def terminate(self) -> None: - """Sends terminate signal to the engine, so that it terminates completely the run after the current iteration.""" + """Sends terminate signal to the engine, so that it terminates completely the run after + the current iteration.""" self.logger.info("Terminate signaled. Engine will stop after current iteration is finished.") self.should_terminate = True def terminate_epoch(self) -> None: - """Sends terminate signal to the engine, so that it terminates the current epoch after the current iteration.""" + """Sends terminate signal to the engine, so that it terminates the current epoch + after the current iteration.""" self.logger.info( "Terminate current epoch is signaled. " "Current epoch iteration will stop after current iteration is finished." diff --git a/ignite/utils.py b/ignite/utils.py index 261a1febb607..23995bde0d1b 100644 --- a/ignite/utils.py +++ b/ignite/utils.py @@ -285,10 +285,7 @@ def wrapper(*args: Any, **kwargs: Dict[str, Any]) -> Callable: return decorator -def hash_checkpoint( - checkpoint_path: Union[str, Path], - output_dir: Union[str, Path], -) -> Tuple[Path, str]: +def hash_checkpoint(checkpoint_path: Union[str, Path], output_dir: Union[str, Path]) -> Tuple[Path, str]: """ Hash the checkpoint file in the format of ``-.`` to be used with ``check_hash`` of :func:`torch.hub.load_state_dict_from_url`. diff --git a/tests/ignite/contrib/metrics/regression/test_median_relative_absolute_error.py b/tests/ignite/contrib/metrics/regression/test_median_relative_absolute_error.py index acd4df744880..de43a173704d 100644 --- a/tests/ignite/contrib/metrics/regression/test_median_relative_absolute_error.py +++ b/tests/ignite/contrib/metrics/regression/test_median_relative_absolute_error.py @@ -28,24 +28,10 @@ def test_wrong_input_shapes(): m.update((torch.rand(4, 1), torch.rand(4, 1, 2))) with pytest.raises(ValueError, match=r"Predictions should be of shape"): - m.update( - ( - torch.rand(4, 1, 2), - torch.rand( - 4, - ), - ) - ) + m.update((torch.rand(4, 1, 2), torch.rand(4))) with pytest.raises(ValueError, match=r"Targets should be of shape"): - m.update( - ( - torch.rand( - 4, - ), - torch.rand(4, 1, 2), - ) - ) + m.update((torch.rand(4), torch.rand(4, 1, 2))) def test_median_relative_absolute_error(): @@ -56,12 +42,8 @@ def test_median_relative_absolute_error(): # Size of dataset will be odd for these tests size = 51 - np_y_pred = np.random.rand( - size, - ) - np_y = np.random.rand( - size, - ) + np_y_pred = np.random.rand(size) + np_y = np.random.rand(size) np_median_absolute_relative_error = np.median(np.abs(np_y - np_y_pred) / np.abs(np_y - np_y.mean())) m = MedianRelativeAbsoluteError() From 94b836421f2cdfca8d0e10ed3d61e092f9b1fd94 Mon Sep 17 00:00:00 2001 From: chenglu Date: Mon, 20 Dec 2021 21:30:32 +0800 Subject: [PATCH 4/7] chg: change black version --- .pre-commit-config.yaml | 2 +- CONTRIBUTING.md | 8 ++++---- 2 files changed, 5 insertions(+), 5 deletions(-) diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index eeef4fb23814..bc889ea2189a 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -15,7 +15,7 @@ repos: exclude_types: ["python", "jupyter", "shell", "gitignore"] - repo: https://github.com/python/black - rev: 19.10b0 + rev: 21.12b0 hooks: - id: black language_version: python3.8 diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index 54b23fcf202d..ae05a721cf00 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -109,11 +109,11 @@ If you modify the code, you will most probably also need to code some tests to e - naming convention for files `test_*.py`, e.g. `test_precision.py` - naming of testing functions `def test_*`, e.g. `def test_precision_on_random_data()` - - if test function should run on GPU, please **make sure to add `cuda`** in the test name, e.g. `def test_something_on_cuda()`. + - if test function should run on GPU, please **make sure to add `cuda`** in the test name, e.g. `def test_something_on_cuda()`. Additionally, we may want to decorate it with `@pytest.mark.skipif(not torch.cuda.is_available(), reason="Skip if no GPU")`. For more examples, please see https://github.com/pytorch/ignite/blob/master/tests/ignite/engine/test_create_supervised.py - - if test function checks distributed configuration, we have to mark the test as `@pytest.mark.distributed` and additional - conditions depending on the intended checks. For example, please see + - if test function checks distributed configuration, we have to mark the test as `@pytest.mark.distributed` and additional + conditions depending on the intended checks. For example, please see https://github.com/pytorch/ignite/blob/master/tests/ignite/metrics/test_accuracy.py @@ -131,7 +131,7 @@ format and check codebase for compliance with PEP8. If you choose not to use pre-commit, you can take advantage of IDE extensions configured to black format or invoke black manually to format files and commit them. -To install `flake8`, `black==19.10b0`, `isort==5.7.0` and `mypy`, please run +To install `flake8`, `black==21.12b0`, `isort==5.7.0` and `mypy`, please run ```bash bash ./tests/run_code_style.sh install ``` From ad08c204914c72f4c9098c0c654e943753c66214 Mon Sep 17 00:00:00 2001 From: chenglu Date: Mon, 20 Dec 2021 23:01:00 +0800 Subject: [PATCH 5/7] fix: test_deprecated assert failed with additional newline --- tests/ignite/test_utils.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tests/ignite/test_utils.py b/tests/ignite/test_utils.py index dd9bfc9706ac..08d7a57b1959 100644 --- a/tests/ignite/test_utils.py +++ b/tests/ignite/test_utils.py @@ -194,7 +194,7 @@ def func_no_reasons(): """Docs are cool""" return 24 - assert func_no_reasons.__doc__ == "**Deprecated function**.\n\n Docs are cool\n .. deprecated:: 0.4.2" + assert func_no_reasons.__doc__ == "**Deprecated function**.\n\n Docs are cool .. deprecated:: 0.4.2" # Test on function with docs, @deprecated with reasons @deprecated("0.4.2", "0.6.0", reasons=("r1", "r2")) From 1569daf6bf5194b1182675ecb4217530319986cc Mon Sep 17 00:00:00 2001 From: chenglu Date: Mon, 20 Dec 2021 23:02:39 +0800 Subject: [PATCH 6/7] fix: typo --- tests/ignite/contrib/handlers/test_visdom_logger.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tests/ignite/contrib/handlers/test_visdom_logger.py b/tests/ignite/contrib/handlers/test_visdom_logger.py index 0f93d83ec26c..3faca55c47ec 100644 --- a/tests/ignite/contrib/handlers/test_visdom_logger.py +++ b/tests/ignite/contrib/handlers/test_visdom_logger.py @@ -163,7 +163,7 @@ def test_output_handler_metric_names(dirname): win=None, update=None, opts=wrapper.windows["tag/a"]["opts"], - name="tag/a,", + name="tag/a", ), call( X=[5], From 8a19a6b1e958dbf76d064af34c986b2a305a01a3 Mon Sep 17 00:00:00 2001 From: chenglu Date: Mon, 20 Dec 2021 23:33:17 +0800 Subject: [PATCH 7/7] fix: fix func_no_reasons test assert failed --- tests/ignite/test_utils.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/tests/ignite/test_utils.py b/tests/ignite/test_utils.py index 08d7a57b1959..81e30de97c20 100644 --- a/tests/ignite/test_utils.py +++ b/tests/ignite/test_utils.py @@ -194,7 +194,7 @@ def func_no_reasons(): """Docs are cool""" return 24 - assert func_no_reasons.__doc__ == "**Deprecated function**.\n\n Docs are cool .. deprecated:: 0.4.2" + assert func_no_reasons.__doc__ == "**Deprecated function**.\n\n Docs are cool.. deprecated:: 0.4.2" # Test on function with docs, @deprecated with reasons @deprecated("0.4.2", "0.6.0", reasons=("r1", "r2")) @@ -204,7 +204,7 @@ def func_no_warnings(): assert ( func_no_warnings.__doc__ - == "**Deprecated function**.\n\n Docs are very cool\n .. deprecated:: 0.4.2\n\n\t\n\t- r1\n\t- r2" + == "**Deprecated function**.\n\n Docs are very cool.. deprecated:: 0.4.2\n\n\t\n\t- r1\n\t- r2" ) # Tests that the function emits DeprecationWarning