diff --git a/.github/workflows/code-formatting.yml b/.github/workflows/code-formatting.yml index 54bdbe23e0608..598b7a5df9aa8 100644 --- a/.github/workflows/code-formatting.yml +++ b/.github/workflows/code-formatting.yml @@ -25,7 +25,7 @@ jobs: python-types: name: Python static type checking with Pyright - runs-on: ubuntu-18.04 + runs-on: ubuntu-20.04 # Timeout: https://stackoverflow.com/a/59076067/4521646 timeout-minutes: 15 @@ -71,3 +71,24 @@ jobs: - name: Run type checking run: | $(npm bin)/pyright --project .pyrightconfig.json + + python-pep8: + name: Python formatting PEP8 + runs-on: ubuntu-20.04 + + # Timeout: https://stackoverflow.com/a/59076067/4521646 + timeout-minutes: 10 + steps: + - name: Checkout + uses: actions/checkout@v2 + - uses: actions/setup-python@v2 + with: + python-version: 3.7 + + - name: Install dependencies + run: | + pip install flake8 + + - name: Run checking + run: | + flake8 . diff --git a/pytorch_lightning/accelerators/ddp2_accelerator.py b/pytorch_lightning/accelerators/ddp2_accelerator.py index e51ddc2eba48c..f4d3eb40232b4 100644 --- a/pytorch_lightning/accelerators/ddp2_accelerator.py +++ b/pytorch_lightning/accelerators/ddp2_accelerator.py @@ -16,7 +16,9 @@ import torch import torch.distributed as torch_distrib + from pytorch_lightning.utilities.exceptions import MisconfigurationException +from pytorch_lightning.core.lightning import LightningModule from pytorch_lightning.core.step_result import Result from pytorch_lightning.distributed.dist import LightningDistributed from pytorch_lightning import _logger as log @@ -191,14 +193,14 @@ def ddp_train(self, process_idx, mp_queue, model): return results def configure_ddp( - self, model: "LightningModule", device_ids: List[int] + self, model: LightningModule, device_ids: List[int] ) -> DistributedDataParallel: model = LightningDistributedDataParallel( model, device_ids=device_ids, find_unused_parameters=True ) return model - def configure_sync_batchnorm(self, model: "LightningModule") -> "LightningModule": + def configure_sync_batchnorm(self, model: LightningModule) -> LightningModule: """ Add global batchnorm for a model spread across multiple GPUs and nodes. diff --git a/pytorch_lightning/accelerators/ddp_accelerator.py b/pytorch_lightning/accelerators/ddp_accelerator.py index 9c8ed93c3f57a..9439c7656ac9e 100644 --- a/pytorch_lightning/accelerators/ddp_accelerator.py +++ b/pytorch_lightning/accelerators/ddp_accelerator.py @@ -18,21 +18,21 @@ import sys from os.path import abspath from time import sleep -from typing import Optional -import numpy as np +from typing import Optional, List +import numpy as np from pytorch_lightning import _logger as log -from pytorch_lightning.utilities.distributed import find_free_network_port from pytorch_lightning.accelerators.accelerator import Accelerator -from pytorch_lightning.utilities.distributed import rank_zero_only -from pytorch_lightning.utilities import AMPType -from pytorch_lightning.utilities.seed import seed_everything +from pytorch_lightning.core.lightning import LightningModule from pytorch_lightning.distributed.dist import LightningDistributed -from pytorch_lightning.utilities.exceptions import MisconfigurationException from pytorch_lightning.overrides.data_parallel import LightningDistributedDataParallel +from pytorch_lightning.utilities import AMPType +from pytorch_lightning.utilities.distributed import find_free_network_port +from pytorch_lightning.utilities.distributed import rank_zero_only +from pytorch_lightning.utilities.exceptions import MisconfigurationException +from pytorch_lightning.utilities.seed import seed_everything from torch.nn.parallel import DistributedDataParallel -from typing import List try: @@ -284,14 +284,14 @@ def ddp_train(self, process_idx, model): return results def configure_ddp( - self, model: "LightningModule", device_ids: List[int] + self, model: LightningModule, device_ids: List[int] ) -> DistributedDataParallel: model = LightningDistributedDataParallel( model, device_ids=device_ids, find_unused_parameters=True ) return model - def configure_sync_batchnorm(self, model: "LightningModule") -> "LightningModule": + def configure_sync_batchnorm(self, model: LightningModule) -> LightningModule: """ Add global batchnorm for a model spread across multiple GPUs and nodes. diff --git a/pytorch_lightning/accelerators/ddp_cpu_slurm_accelerator.py b/pytorch_lightning/accelerators/ddp_cpu_slurm_accelerator.py index 6e1a42c708476..f194c2c1e6d72 100644 --- a/pytorch_lightning/accelerators/ddp_cpu_slurm_accelerator.py +++ b/pytorch_lightning/accelerators/ddp_cpu_slurm_accelerator.py @@ -12,19 +12,20 @@ # See the License for the specific language governing permissions and # limitations under the License import os +from typing import List, Optional + import torch import torch.distributed as torch_distrib import torch.distributed as dist +from torch.nn.parallel import DistributedDataParallel -from pytorch_lightning.accelerators.accelerator import Accelerator from pytorch_lightning import _logger as log +from pytorch_lightning.accelerators.accelerator import Accelerator +from pytorch_lightning.core.lightning import LightningModule from pytorch_lightning.utilities import AMPType from pytorch_lightning.utilities.distributed import rank_zero_only -from pytorch_lightning.utilities.seed import seed_everything from pytorch_lightning.distributed.dist import LightningDistributed from pytorch_lightning.overrides.data_parallel import LightningDistributedDataParallel -from torch.nn.parallel import DistributedDataParallel -from typing import List, Optional try: @@ -177,14 +178,14 @@ def ddp_train(self, process_idx, model): return results def configure_ddp( - self, model: "LightningModule", device_ids: List[int] + self, model: LightningModule, device_ids: List[int] ) -> DistributedDataParallel: model = LightningDistributedDataParallel( model, device_ids=device_ids, find_unused_parameters=True ) return model - def configure_sync_batchnorm(self, model: "LightningModule") -> "LightningModule": + def configure_sync_batchnorm(self, model: LightningModule) -> LightningModule: """ Add global batchnorm for a model spread across multiple GPUs and nodes. diff --git a/pytorch_lightning/accelerators/ddp_cpu_spawn_accelerator.py b/pytorch_lightning/accelerators/ddp_cpu_spawn_accelerator.py index 8241d96695d1c..8f897b01a8cd1 100644 --- a/pytorch_lightning/accelerators/ddp_cpu_spawn_accelerator.py +++ b/pytorch_lightning/accelerators/ddp_cpu_spawn_accelerator.py @@ -12,23 +12,22 @@ # See the License for the specific language governing permissions and # limitations under the License import os -import re +from typing import List, Optional import torch import torch.distributed as torch_distrib import torch.distributed as dist import torch.multiprocessing as mp +from torch.nn.parallel import DistributedDataParallel from pytorch_lightning import _logger as log from pytorch_lightning.accelerators.accelerator import Accelerator +from pytorch_lightning.core.lightning import LightningModule +from pytorch_lightning.overrides.data_parallel import LightningDistributedDataParallel from pytorch_lightning.utilities import AMPType -from pytorch_lightning.utilities.cloud_io import atomic_save, load as pl_load from pytorch_lightning.utilities.distributed import rank_zero_only, rank_zero_warn from pytorch_lightning.utilities.distributed import find_free_network_port from pytorch_lightning.distributed.dist import LightningDistributed -from pytorch_lightning.overrides.data_parallel import LightningDistributedDataParallel -from torch.nn.parallel import DistributedDataParallel -from typing import List, Optional try: from hydra.core.hydra_config import HydraConfig @@ -210,14 +209,14 @@ def transfer_distrib_spawn_state_on_fit_end(self, model, mp_queue, results): mp_queue.put(results) def configure_ddp( - self, model: "LightningModule", device_ids: List[int] + self, model: LightningModule, device_ids: List[int] ) -> DistributedDataParallel: model = LightningDistributedDataParallel( model, device_ids=device_ids, find_unused_parameters=True ) return model - def configure_sync_batchnorm(self, model: "LightningModule") -> "LightningModule": + def configure_sync_batchnorm(self, model: LightningModule) -> LightningModule: """ Add global batchnorm for a model spread across multiple GPUs and nodes. diff --git a/pytorch_lightning/accelerators/ddp_cpu_torchelastic_accelerator.py b/pytorch_lightning/accelerators/ddp_cpu_torchelastic_accelerator.py index 91735c2bcefdc..300512e3fc2f9 100644 --- a/pytorch_lightning/accelerators/ddp_cpu_torchelastic_accelerator.py +++ b/pytorch_lightning/accelerators/ddp_cpu_torchelastic_accelerator.py @@ -12,19 +12,20 @@ # See the License for the specific language governing permissions and # limitations under the License import os +from typing import List, Optional + import torch import torch.distributed as torch_distrib import torch.distributed as dist +from torch.nn.parallel import DistributedDataParallel -from pytorch_lightning.accelerators.accelerator import Accelerator from pytorch_lightning import _logger as log -from pytorch_lightning.utilities import AMPType -from pytorch_lightning.utilities.distributed import rank_zero_only -from pytorch_lightning.utilities.seed import seed_everything +from pytorch_lightning.accelerators.accelerator import Accelerator +from pytorch_lightning.core.lightning import LightningModule from pytorch_lightning.distributed.dist import LightningDistributed from pytorch_lightning.overrides.data_parallel import LightningDistributedDataParallel -from torch.nn.parallel import DistributedDataParallel -from typing import List, Optional +from pytorch_lightning.utilities import AMPType +from pytorch_lightning.utilities.distributed import rank_zero_only try: from hydra.utils import to_absolute_path, get_original_cwd @@ -176,14 +177,14 @@ def ddp_train(self, process_idx, model): return results def configure_ddp( - self, model: "LightningModule", device_ids: List[int] + self, model: LightningModule, device_ids: List[int] ) -> DistributedDataParallel: model = LightningDistributedDataParallel( model, device_ids=device_ids, find_unused_parameters=True ) return model - def configure_sync_batchnorm(self, model: "LightningModule") -> "LightningModule": + def configure_sync_batchnorm(self, model: LightningModule) -> LightningModule: """ Add global batchnorm for a model spread across multiple GPUs and nodes. diff --git a/pytorch_lightning/accelerators/ddp_slurm_accelerator.py b/pytorch_lightning/accelerators/ddp_slurm_accelerator.py index b556f0858d448..bc7920c36457a 100644 --- a/pytorch_lightning/accelerators/ddp_slurm_accelerator.py +++ b/pytorch_lightning/accelerators/ddp_slurm_accelerator.py @@ -12,19 +12,21 @@ # See the License for the specific language governing permissions and # limitations under the License import os +from typing import List + import torch import torch.distributed as torch_distrib import torch.distributed as dist +from torch.nn.parallel import DistributedDataParallel -from pytorch_lightning.accelerators.accelerator import Accelerator from pytorch_lightning import _logger as log +from pytorch_lightning.accelerators.accelerator import Accelerator +from pytorch_lightning.core.lightning import LightningModule +from pytorch_lightning.distributed.dist import LightningDistributed +from pytorch_lightning.overrides.data_parallel import LightningDistributedDataParallel from pytorch_lightning.utilities import AMPType from pytorch_lightning.utilities.distributed import rank_zero_only from pytorch_lightning.utilities.seed import seed_everything -from pytorch_lightning.distributed.dist import LightningDistributed -from pytorch_lightning.overrides.data_parallel import LightningDistributedDataParallel -from torch.nn.parallel import DistributedDataParallel -from typing import List try: from hydra.utils import to_absolute_path, get_original_cwd @@ -182,14 +184,14 @@ def ddp_train(self, process_idx, model): return results def configure_ddp( - self, model: "LightningModule", device_ids: List[int] + self, model: LightningModule, device_ids: List[int] ) -> DistributedDataParallel: model = LightningDistributedDataParallel( model, device_ids=device_ids, find_unused_parameters=True ) return model - def configure_sync_batchnorm(self, model: "LightningModule") -> "LightningModule": + def configure_sync_batchnorm(self, model: LightningModule) -> LightningModule: """ Add global batchnorm for a model spread across multiple GPUs and nodes. diff --git a/pytorch_lightning/accelerators/ddp_spawn_accelerator.py b/pytorch_lightning/accelerators/ddp_spawn_accelerator.py index fba819096ad10..272fcda932a8e 100644 --- a/pytorch_lightning/accelerators/ddp_spawn_accelerator.py +++ b/pytorch_lightning/accelerators/ddp_spawn_accelerator.py @@ -13,23 +13,23 @@ # limitations under the License import os import re +from typing import List, Optional import torch import torch.multiprocessing as mp import torch.distributed as torch_distrib import torch.distributed as dist +from torch.nn.parallel import DistributedDataParallel from pytorch_lightning import _logger as log from pytorch_lightning.accelerators.accelerator import Accelerator +from pytorch_lightning.core.lightning import LightningModule +from pytorch_lightning.overrides.data_parallel import LightningDistributedDataParallel from pytorch_lightning.utilities import AMPType from pytorch_lightning.utilities.cloud_io import atomic_save, load as pl_load -from pytorch_lightning.utilities.distributed import rank_zero_only, rank_zero_warn +from pytorch_lightning.utilities.distributed import rank_zero_only, rank_zero_warn, find_free_network_port from pytorch_lightning.utilities.seed import seed_everything from pytorch_lightning.distributed.dist import LightningDistributed -from pytorch_lightning.utilities.distributed import find_free_network_port -from pytorch_lightning.overrides.data_parallel import LightningDistributedDataParallel -from torch.nn.parallel import DistributedDataParallel -from typing import List, Optional try: from hydra.core.hydra_config import HydraConfig @@ -237,14 +237,14 @@ def transfer_distrib_spawn_state_on_fit_end(self, model, mp_queue, results): mp_queue.put(last_path) def configure_ddp( - self, model: "LightningModule", device_ids: List[int] + self, model: LightningModule, device_ids: List[int] ) -> DistributedDataParallel: model = LightningDistributedDataParallel( model, device_ids=device_ids, find_unused_parameters=True ) return model - def configure_sync_batchnorm(self, model: "LightningModule") -> "LightningModule": + def configure_sync_batchnorm(self, model: LightningModule) -> LightningModule: """ Add global batchnorm for a model spread across multiple GPUs and nodes. diff --git a/pytorch_lightning/accelerators/ddp_torchelastic_accelerator.py b/pytorch_lightning/accelerators/ddp_torchelastic_accelerator.py index 282a1d6d35582..df7693f521ddd 100644 --- a/pytorch_lightning/accelerators/ddp_torchelastic_accelerator.py +++ b/pytorch_lightning/accelerators/ddp_torchelastic_accelerator.py @@ -12,19 +12,20 @@ # See the License for the specific language governing permissions and # limitations under the License import os +from typing import List, Optional + import torch import torch.distributed as torch_distrib import torch.distributed as dist +from torch.nn.parallel import DistributedDataParallel -from pytorch_lightning.accelerators.accelerator import Accelerator from pytorch_lightning import _logger as log -from pytorch_lightning.utilities import AMPType -from pytorch_lightning.utilities.distributed import rank_zero_only -from pytorch_lightning.utilities.seed import seed_everything +from pytorch_lightning.accelerators.accelerator import Accelerator +from pytorch_lightning.core.lightning import LightningModule from pytorch_lightning.distributed.dist import LightningDistributed from pytorch_lightning.overrides.data_parallel import LightningDistributedDataParallel -from torch.nn.parallel import DistributedDataParallel -from typing import List, Optional +from pytorch_lightning.utilities import AMPType +from pytorch_lightning.utilities.distributed import rank_zero_only try: @@ -179,14 +180,14 @@ def ddp_train(self, process_idx, model): return results def configure_ddp( - self, model: "LightningModule", device_ids: List[int] + self, model: LightningModule, device_ids: List[int] ) -> DistributedDataParallel: model = LightningDistributedDataParallel( model, device_ids=device_ids, find_unused_parameters=True ) return model - def configure_sync_batchnorm(self, model: "LightningModule") -> "LightningModule": + def configure_sync_batchnorm(self, model: LightningModule) -> LightningModule: """ Add global batchnorm for a model spread across multiple GPUs and nodes. diff --git a/pytorch_lightning/accelerators/horovod_accelerator.py b/pytorch_lightning/accelerators/horovod_accelerator.py index ea5faad0e0cb8..3267e1d7f14b4 100644 --- a/pytorch_lightning/accelerators/horovod_accelerator.py +++ b/pytorch_lightning/accelerators/horovod_accelerator.py @@ -152,7 +152,7 @@ def test_step(self, args): return output def backward(self, closure_loss, optimizer, opt_idx, *args, **kwargs): - super().backward(closure_loss, optimizer, opt_idx, *args, **kwargs) + super().backward(closure_loss, optimizer, opt_idx, *args, **kwargs) optimizer.synchronize() def on_train_epoch_end(self, outputs): diff --git a/pytorch_lightning/callbacks/early_stopping.py b/pytorch_lightning/callbacks/early_stopping.py index 9566542f2ece4..864d3d836ec43 100644 --- a/pytorch_lightning/callbacks/early_stopping.py +++ b/pytorch_lightning/callbacks/early_stopping.py @@ -35,8 +35,6 @@ torch_inf = torch.tensor(np.Inf) - - class EarlyStopping(Callback): r""" Monitor a validation metric and stop training when it stops improving. diff --git a/pytorch_lightning/core/__init__.py b/pytorch_lightning/core/__init__.py index 51ec4acd5652b..bcab67d821e09 100644 --- a/pytorch_lightning/core/__init__.py +++ b/pytorch_lightning/core/__init__.py @@ -11,6 +11,7 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. + from pytorch_lightning.core.datamodule import LightningDataModule from pytorch_lightning.core.lightning import LightningModule diff --git a/pytorch_lightning/core/lightning.py b/pytorch_lightning/core/lightning.py index e7b8cf6f0cdee..d5523ef92f80a 100644 --- a/pytorch_lightning/core/lightning.py +++ b/pytorch_lightning/core/lightning.py @@ -233,7 +233,7 @@ def log( m = f'on_step=True cannot be used on {self._current_fx_name} method' raise MisconfigurationException(m) - if 'epoch_end' in self._current_fx_name and on_epoch == False: + if 'epoch_end' in self._current_fx_name and on_epoch is False: m = f'on_epoch cannot be False when called from the {self._current_fx_name} method' raise MisconfigurationException(m) diff --git a/pytorch_lightning/plugins/apex.py b/pytorch_lightning/plugins/apex.py index ec2e4c30a6291..59f724ec3d666 100644 --- a/pytorch_lightning/plugins/apex.py +++ b/pytorch_lightning/plugins/apex.py @@ -12,7 +12,10 @@ # See the License for the specific language governing permissions and # limitations under the License. from typing import List, Tuple + from torch.optim.optimizer import Optimizer + +from pytorch_lightning.core.lightning import LightningModule from pytorch_lightning.utilities.distributed import rank_zero_warn from pytorch_lightning.utilities import AMPType @@ -65,10 +68,10 @@ def backward(self, closure_loss, optimizer, opt_idx, *args, **kwargs): def configure_apex( self, amp: object, - model: "LightningModule", + model: LightningModule, optimizers: List[Optimizer], amp_level: str, - ) -> Tuple["LightningModule", List[Optimizer]]: + ) -> Tuple[LightningModule, List[Optimizer]]: r""" Override to init AMP your own way. Must return a model and list of optimizers. diff --git a/pytorch_lightning/trainer/connectors/checkpoint_connector.py b/pytorch_lightning/trainer/connectors/checkpoint_connector.py index 420cb6d3f5bf3..6a4e5820e05d9 100644 --- a/pytorch_lightning/trainer/connectors/checkpoint_connector.py +++ b/pytorch_lightning/trainer/connectors/checkpoint_connector.py @@ -24,15 +24,11 @@ import pytorch_lightning from pytorch_lightning import _logger as log -from pytorch_lightning.callbacks import EarlyStopping, ModelCheckpoint from pytorch_lightning.core.lightning import LightningModule -from pytorch_lightning.loggers import LightningLoggerBase -from pytorch_lightning.overrides.data_parallel import LightningDataParallel, LightningDistributedDataParallel from pytorch_lightning.utilities import AMPType, rank_zero_warn from pytorch_lightning.utilities.cloud_io import atomic_save, get_filesystem from pytorch_lightning.utilities.cloud_io import load as pl_load from pytorch_lightning.utilities.upgrade_checkpoint import KEYS_MAPPING as DEPRECATED_CHECKPOINT_KEYS -from pytorch_lightning.accelerators.accelerator import Accelerator from pytorch_lightning.utilities.exceptions import MisconfigurationException try: diff --git a/pytorch_lightning/trainer/model_hooks.py b/pytorch_lightning/trainer/model_hooks.py index cbf71748aa77a..12225dd955ecb 100644 --- a/pytorch_lightning/trainer/model_hooks.py +++ b/pytorch_lightning/trainer/model_hooks.py @@ -15,7 +15,6 @@ import inspect from abc import ABC, abstractmethod -from pytorch_lightning.core.datamodule import LightningDataModule from pytorch_lightning.core.lightning import LightningModule diff --git a/pytorch_lightning/trainer/properties.py b/pytorch_lightning/trainer/properties.py index 56b6c6e67094f..afb2f4cb5eb91 100644 --- a/pytorch_lightning/trainer/properties.py +++ b/pytorch_lightning/trainer/properties.py @@ -11,20 +11,21 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. -from pytorch_lightning.utilities.cloud_io import get_filesystem -from pytorch_lightning.trainer.connectors.logger_connector import LoggerConnector -from pytorch_lightning.trainer.states import TrainerState -from typing import List, Optional, Union, Type, TypeVar -from pytorch_lightning.utilities import argparse_utils -from argparse import ArgumentParser, Namespace -from abc import ABC import inspect import os -from pytorch_lightning.utilities.model_utils import is_overridden -from pytorch_lightning.core.lightning import LightningModule +from abc import ABC +from argparse import ArgumentParser, Namespace +from typing import List, Optional, Union, Type, TypeVar + from pytorch_lightning.callbacks import ProgressBarBase -from pytorch_lightning.trainer.connectors.model_connector import ModelConnector +from pytorch_lightning.core.lightning import LightningModule from pytorch_lightning.trainer.connectors.checkpoint_connector import CheckpointConnector +from pytorch_lightning.trainer.connectors.logger_connector import LoggerConnector +from pytorch_lightning.trainer.connectors.model_connector import ModelConnector +from pytorch_lightning.trainer.states import TrainerState +from pytorch_lightning.utilities import argparse_utils +from pytorch_lightning.utilities.cloud_io import get_filesystem +from pytorch_lightning.utilities.model_utils import is_overridden class TrainerProperties(ABC): diff --git a/pytorch_lightning/trainer/trainer.py b/pytorch_lightning/trainer/trainer.py index 96954733c15b9..68eafe45ba392 100644 --- a/pytorch_lightning/trainer/trainer.py +++ b/pytorch_lightning/trainer/trainer.py @@ -19,10 +19,9 @@ import torch from torch.utils.data import DataLoader -from pytorch_lightning.callbacks import Callback, EarlyStopping, ModelCheckpoint +from pytorch_lightning.callbacks import Callback, ModelCheckpoint from pytorch_lightning.core.datamodule import LightningDataModule from pytorch_lightning.core.lightning import LightningModule -from pytorch_lightning.core.memory import ModelSummary from pytorch_lightning.core.step_result import EvalResult from pytorch_lightning.loggers import LightningLoggerBase from pytorch_lightning.profiler import BaseProfiler diff --git a/pytorch_lightning/trainer/training_tricks.py b/pytorch_lightning/trainer/training_tricks.py index 3dbab4a78e6f8..471bfe6d47e3b 100644 --- a/pytorch_lightning/trainer/training_tricks.py +++ b/pytorch_lightning/trainer/training_tricks.py @@ -18,7 +18,6 @@ from torch import Tensor from pytorch_lightning import _logger as log -from pytorch_lightning.callbacks import GradientAccumulationScheduler from pytorch_lightning.core.lightning import LightningModule try: diff --git a/pytorch_lightning/tuner/batch_size_scaling.py b/pytorch_lightning/tuner/batch_size_scaling.py index e7c031fce8e3b..87783fbde5d1f 100644 --- a/pytorch_lightning/tuner/batch_size_scaling.py +++ b/pytorch_lightning/tuner/batch_size_scaling.py @@ -12,6 +12,8 @@ # See the License for the specific language governing permissions and # limitations under the License import os +from typing import Optional, Tuple + from pytorch_lightning.core.lightning import LightningModule from pytorch_lightning.utilities.data import has_len from pytorch_lightning.utilities.parsing import lightning_hasattr, lightning_getattr, lightning_setattr @@ -20,7 +22,6 @@ from pytorch_lightning.utilities.memory import is_oom_error, garbage_collection_cuda from pytorch_lightning.loggers.base import DummyLogger from pytorch_lightning import _logger as log -from typing import Optional, Tuple def scale_batch_size(trainer, diff --git a/pytorch_lightning/tuner/tuning.py b/pytorch_lightning/tuner/tuning.py index 2e7fcf087bd13..9929249804309 100644 --- a/pytorch_lightning/tuner/tuning.py +++ b/pytorch_lightning/tuner/tuning.py @@ -11,13 +11,16 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. + +from typing import Optional, List, Union + +from torch.utils.data import DataLoader + from pytorch_lightning.tuner.batch_size_scaling import scale_batch_size from pytorch_lightning.tuner.auto_gpu_select import pick_multiple_gpus from pytorch_lightning.tuner.lr_finder import _run_lr_finder_internally, lr_find from pytorch_lightning.core.lightning import LightningModule from pytorch_lightning.core.datamodule import LightningDataModule -from typing import Optional, List, Union -from torch.utils.data import DataLoader class Tuner: diff --git a/pytorch_lightning/utilities/model_utils.py b/pytorch_lightning/utilities/model_utils.py index cd2e454c2f96c..876f546312902 100644 --- a/pytorch_lightning/utilities/model_utils.py +++ b/pytorch_lightning/utilities/model_utils.py @@ -11,9 +11,11 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. + +from typing import Union + from pytorch_lightning.core.lightning import LightningModule from pytorch_lightning.core.datamodule import LightningDataModule -from typing import Union def is_overridden(method_name: str, model: Union[LightningModule, LightningDataModule]) -> bool: diff --git a/tests/backends/test_accelerator_connector.py b/tests/backends/test_accelerator_connector.py index d2166a2e5b149..cbc96b0793062 100644 --- a/tests/backends/test_accelerator_connector.py +++ b/tests/backends/test_accelerator_connector.py @@ -310,9 +310,7 @@ def on_fit_start(self, trainer, pl_module): @mock.patch('torch.cuda.device_count', return_value=0) def test_custom_accelerator(tmpdir): class Accel(Accelerator): - def init_ddp_connection( - self, global_rank: int, world_size: int, is_slurm_managing_tasks: bool = True - ) -> None: + def init_ddp_connection(self, global_rank: int, world_size: int, is_slurm_managing_tasks: bool = True) -> None: pass class CB(Callback): diff --git a/tests/base/models.py b/tests/base/models.py index 2be5a4690eb05..a7c1196198017 100644 --- a/tests/base/models.py +++ b/tests/base/models.py @@ -19,9 +19,8 @@ import torch.nn.functional as F from torch.utils.data import DataLoader -from tests.base.datasets import TrialMNIST, AverageDataset, MNIST - from pytorch_lightning.core.lightning import LightningModule +from tests.base.datasets import TrialMNIST, AverageDataset, MNIST class Generator(nn.Module): diff --git a/tests/loggers/test_mlflow.py b/tests/loggers/test_mlflow.py index 69534132647f0..db2c353dc4e2c 100644 --- a/tests/loggers/test_mlflow.py +++ b/tests/loggers/test_mlflow.py @@ -121,7 +121,7 @@ def test_mlflow_log_dir(client, mlflow, tmpdir): def test_mlflow_logger_dirs_creation(tmpdir): """ Test that the logger creates the folders and files in the right place. """ if not importlib.util.find_spec('mlflow'): - pytest.xfail(f"test for explicit file creation requires mlflow dependency to be installed.") + pytest.xfail("test for explicit file creation requires mlflow dependency to be installed.") assert not os.listdir(tmpdir) logger = MLFlowLogger('test', save_dir=tmpdir) diff --git a/tests/metrics/classification/test_accuracy.py b/tests/metrics/classification/test_accuracy.py index 082f1bb3e40a9..a7bdf16a69c25 100644 --- a/tests/metrics/classification/test_accuracy.py +++ b/tests/metrics/classification/test_accuracy.py @@ -93,9 +93,9 @@ def test_accuracy_invalid_shape(): (_multiclass_prob_inputs.preds, _multiclass_prob_inputs.target, _sk_accuracy_multiclass_prob), (_multiclass_inputs.preds, _multiclass_inputs.target, _sk_accuracy_multiclass), ( - _multidim_multiclass_prob_inputs.preds, - _multidim_multiclass_prob_inputs.target, - _sk_accuracy_multidim_multiclass_prob, + _multidim_multiclass_prob_inputs.preds, + _multidim_multiclass_prob_inputs.target, + _sk_accuracy_multidim_multiclass_prob, ), (_multidim_multiclass_inputs.preds, _multidim_multiclass_inputs.target, _sk_accuracy_multidim_multiclass), ], diff --git a/tests/metrics/classification/test_f_beta.py b/tests/metrics/classification/test_f_beta.py index dbd0862349e2f..c7eda551ed1a5 100644 --- a/tests/metrics/classification/test_f_beta.py +++ b/tests/metrics/classification/test_f_beta.py @@ -90,18 +90,18 @@ def _sk_fbeta_multidim_multiclass(preds, target, average='micro', beta=1.0): (_multiclass_prob_inputs.preds, _multiclass_prob_inputs.target, _sk_fbeta_multiclass_prob, NUM_CLASSES, False), (_multiclass_inputs.preds, _multiclass_inputs.target, _sk_fbeta_multiclass, NUM_CLASSES, False), ( - _multidim_multiclass_prob_inputs.preds, - _multidim_multiclass_prob_inputs.target, - _sk_fbeta_multidim_multiclass_prob, - NUM_CLASSES, - False, + _multidim_multiclass_prob_inputs.preds, + _multidim_multiclass_prob_inputs.target, + _sk_fbeta_multidim_multiclass_prob, + NUM_CLASSES, + False, ), ( - _multidim_multiclass_inputs.preds, - _multidim_multiclass_inputs.target, - _sk_fbeta_multidim_multiclass, - NUM_CLASSES, - False, + _multidim_multiclass_inputs.preds, + _multidim_multiclass_inputs.target, + _sk_fbeta_multidim_multiclass, + NUM_CLASSES, + False, ), ], ) diff --git a/tests/metrics/classification/test_precision_recall.py b/tests/metrics/classification/test_precision_recall.py index c2a096c4ab1a6..cf027ad279ead 100644 --- a/tests/metrics/classification/test_precision_recall.py +++ b/tests/metrics/classification/test_precision_recall.py @@ -90,18 +90,18 @@ def _sk_prec_recall_multidim_multiclass(preds, target, sk_fn=precision_score, av (_multiclass_prob_inputs.preds, _multiclass_prob_inputs.target, _sk_prec_recall_multiclass_prob, NUM_CLASSES, False), (_multiclass_inputs.preds, _multiclass_inputs.target, _sk_prec_recall_multiclass, NUM_CLASSES, False), ( - _multidim_multiclass_prob_inputs.preds, - _multidim_multiclass_prob_inputs.target, - _sk_prec_recall_multidim_multiclass_prob, - NUM_CLASSES, - False, + _multidim_multiclass_prob_inputs.preds, + _multidim_multiclass_prob_inputs.target, + _sk_prec_recall_multidim_multiclass_prob, + NUM_CLASSES, + False, ), ( - _multidim_multiclass_inputs.preds, - _multidim_multiclass_inputs.target, - _sk_prec_recall_multidim_multiclass, - NUM_CLASSES, - False, + _multidim_multiclass_inputs.preds, + _multidim_multiclass_inputs.target, + _sk_prec_recall_multidim_multiclass, + NUM_CLASSES, + False, ), ], ) diff --git a/tests/metrics/test_metric.py b/tests/metrics/test_metric.py index ccb9b4ad0ad09..3c85a4c126a27 100644 --- a/tests/metrics/test_metric.py +++ b/tests/metrics/test_metric.py @@ -72,7 +72,6 @@ def test_add_state_persistent(): assert "b" not in a.state_dict() - def test_reset(): class A(Dummy): pass diff --git a/tests/models/test_hparams.py b/tests/models/test_hparams.py index 7c147e3dd10a5..b7d0be01e9622 100644 --- a/tests/models/test_hparams.py +++ b/tests/models/test_hparams.py @@ -552,7 +552,7 @@ def test_args(tmpdir): trainer.fit(model) raw_checkpoint_path = _raw_checkpoint_path(trainer) - with pytest.raises(TypeError, match="__init__\(\) got an unexpected keyword argument 'test'"): + with pytest.raises(TypeError, match=r"__init__\(\) got an unexpected keyword argument 'test'"): SubClassVarArgs.load_from_checkpoint(raw_checkpoint_path) diff --git a/tests/trainer/logging/test_train_loop_logging_1_0.py b/tests/trainer/logging/test_train_loop_logging_1_0.py index 80b7e7e4b8f1d..64ca89842cc94 100644 --- a/tests/trainer/logging/test_train_loop_logging_1_0.py +++ b/tests/trainer/logging/test_train_loop_logging_1_0.py @@ -448,8 +448,8 @@ def __getitem__(self, index): x = { 'post_text': ['bird is fast', 'big cat'], 'dense_0': [ - torch.tensor([-0.1000, 0.2000], dtype=torch.float64), - torch.tensor([1, 1], dtype=torch.uint8) + torch.tensor([-0.1000, 0.2000], dtype=torch.float64), + torch.tensor([1, 1], dtype=torch.uint8), ], 'post_id': ['115', '116'], 'label': [torch.tensor([0, 1]), torch.tensor([1, 1], dtype=torch.uint8)] diff --git a/tests/trainer/test_optimizers.py b/tests/trainer/test_optimizers.py index 4a6dd99e9c8aa..d0af97c5b341c 100644 --- a/tests/trainer/test_optimizers.py +++ b/tests/trainer/test_optimizers.py @@ -147,6 +147,7 @@ def test_reduce_lr_on_plateau_scheduling_missing_monitor(tmpdir): def test_reduce_lr_on_plateau_scheduling(tmpdir): hparams = EvalModelTemplate.get_default_hparams() + class TestModel(EvalModelTemplate): def configure_optimizers(self):