Skip to content

Commit

Permalink
CI: add flake8 (#4239)
Browse files Browse the repository at this point in the history
  • Loading branch information
Borda committed Oct 19, 2020
1 parent 7c4f80a commit f37444f
Show file tree
Hide file tree
Showing 32 changed files with 143 additions and 117 deletions.
23 changes: 22 additions & 1 deletion .github/workflows/code-formatting.yml
Expand Up @@ -25,7 +25,7 @@ jobs:

python-types:
name: Python static type checking with Pyright
runs-on: ubuntu-18.04
runs-on: ubuntu-20.04

# Timeout: https://stackoverflow.com/a/59076067/4521646
timeout-minutes: 15
Expand Down Expand Up @@ -71,3 +71,24 @@ jobs:
- name: Run type checking
run: |
$(npm bin)/pyright --project .pyrightconfig.json
python-pep8:
name: Python formatting PEP8
runs-on: ubuntu-20.04

# Timeout: https://stackoverflow.com/a/59076067/4521646
timeout-minutes: 10
steps:
- name: Checkout
uses: actions/checkout@v2
- uses: actions/setup-python@v2
with:
python-version: 3.7

- name: Install dependencies
run: |
pip install flake8
- name: Run checking
run: |
flake8 .
6 changes: 4 additions & 2 deletions pytorch_lightning/accelerators/ddp2_accelerator.py
Expand Up @@ -16,7 +16,9 @@

import torch
import torch.distributed as torch_distrib

from pytorch_lightning.utilities.exceptions import MisconfigurationException
from pytorch_lightning.core.lightning import LightningModule
from pytorch_lightning.core.step_result import Result
from pytorch_lightning.distributed.dist import LightningDistributed
from pytorch_lightning import _logger as log
Expand Down Expand Up @@ -191,14 +193,14 @@ def ddp_train(self, process_idx, mp_queue, model):
return results

def configure_ddp(
self, model: "LightningModule", device_ids: List[int]
self, model: LightningModule, device_ids: List[int]
) -> DistributedDataParallel:
model = LightningDistributedDataParallel(
model, device_ids=device_ids, find_unused_parameters=True
)
return model

def configure_sync_batchnorm(self, model: "LightningModule") -> "LightningModule":
def configure_sync_batchnorm(self, model: LightningModule) -> LightningModule:
"""
Add global batchnorm for a model spread across multiple GPUs and nodes.
Expand Down
20 changes: 10 additions & 10 deletions pytorch_lightning/accelerators/ddp_accelerator.py
Expand Up @@ -18,21 +18,21 @@
import sys
from os.path import abspath
from time import sleep
from typing import Optional
import numpy as np
from typing import Optional, List

import numpy as np

from pytorch_lightning import _logger as log
from pytorch_lightning.utilities.distributed import find_free_network_port
from pytorch_lightning.accelerators.accelerator import Accelerator
from pytorch_lightning.utilities.distributed import rank_zero_only
from pytorch_lightning.utilities import AMPType
from pytorch_lightning.utilities.seed import seed_everything
from pytorch_lightning.core.lightning import LightningModule
from pytorch_lightning.distributed.dist import LightningDistributed
from pytorch_lightning.utilities.exceptions import MisconfigurationException
from pytorch_lightning.overrides.data_parallel import LightningDistributedDataParallel
from pytorch_lightning.utilities import AMPType
from pytorch_lightning.utilities.distributed import find_free_network_port
from pytorch_lightning.utilities.distributed import rank_zero_only
from pytorch_lightning.utilities.exceptions import MisconfigurationException
from pytorch_lightning.utilities.seed import seed_everything
from torch.nn.parallel import DistributedDataParallel
from typing import List


try:
Expand Down Expand Up @@ -284,14 +284,14 @@ def ddp_train(self, process_idx, model):
return results

def configure_ddp(
self, model: "LightningModule", device_ids: List[int]
self, model: LightningModule, device_ids: List[int]
) -> DistributedDataParallel:
model = LightningDistributedDataParallel(
model, device_ids=device_ids, find_unused_parameters=True
)
return model

def configure_sync_batchnorm(self, model: "LightningModule") -> "LightningModule":
def configure_sync_batchnorm(self, model: LightningModule) -> LightningModule:
"""
Add global batchnorm for a model spread across multiple GPUs and nodes.
Expand Down
13 changes: 7 additions & 6 deletions pytorch_lightning/accelerators/ddp_cpu_slurm_accelerator.py
Expand Up @@ -12,19 +12,20 @@
# See the License for the specific language governing permissions and
# limitations under the License
import os
from typing import List, Optional

import torch
import torch.distributed as torch_distrib
import torch.distributed as dist
from torch.nn.parallel import DistributedDataParallel

from pytorch_lightning.accelerators.accelerator import Accelerator
from pytorch_lightning import _logger as log
from pytorch_lightning.accelerators.accelerator import Accelerator
from pytorch_lightning.core.lightning import LightningModule
from pytorch_lightning.utilities import AMPType
from pytorch_lightning.utilities.distributed import rank_zero_only
from pytorch_lightning.utilities.seed import seed_everything
from pytorch_lightning.distributed.dist import LightningDistributed
from pytorch_lightning.overrides.data_parallel import LightningDistributedDataParallel
from torch.nn.parallel import DistributedDataParallel
from typing import List, Optional


try:
Expand Down Expand Up @@ -177,14 +178,14 @@ def ddp_train(self, process_idx, model):
return results

def configure_ddp(
self, model: "LightningModule", device_ids: List[int]
self, model: LightningModule, device_ids: List[int]
) -> DistributedDataParallel:
model = LightningDistributedDataParallel(
model, device_ids=device_ids, find_unused_parameters=True
)
return model

def configure_sync_batchnorm(self, model: "LightningModule") -> "LightningModule":
def configure_sync_batchnorm(self, model: LightningModule) -> LightningModule:
"""
Add global batchnorm for a model spread across multiple GPUs and nodes.
Expand Down
13 changes: 6 additions & 7 deletions pytorch_lightning/accelerators/ddp_cpu_spawn_accelerator.py
Expand Up @@ -12,23 +12,22 @@
# See the License for the specific language governing permissions and
# limitations under the License
import os
import re
from typing import List, Optional

import torch
import torch.distributed as torch_distrib
import torch.distributed as dist
import torch.multiprocessing as mp
from torch.nn.parallel import DistributedDataParallel

from pytorch_lightning import _logger as log
from pytorch_lightning.accelerators.accelerator import Accelerator
from pytorch_lightning.core.lightning import LightningModule
from pytorch_lightning.overrides.data_parallel import LightningDistributedDataParallel
from pytorch_lightning.utilities import AMPType
from pytorch_lightning.utilities.cloud_io import atomic_save, load as pl_load
from pytorch_lightning.utilities.distributed import rank_zero_only, rank_zero_warn
from pytorch_lightning.utilities.distributed import find_free_network_port
from pytorch_lightning.distributed.dist import LightningDistributed
from pytorch_lightning.overrides.data_parallel import LightningDistributedDataParallel
from torch.nn.parallel import DistributedDataParallel
from typing import List, Optional

try:
from hydra.core.hydra_config import HydraConfig
Expand Down Expand Up @@ -210,14 +209,14 @@ def transfer_distrib_spawn_state_on_fit_end(self, model, mp_queue, results):
mp_queue.put(results)

def configure_ddp(
self, model: "LightningModule", device_ids: List[int]
self, model: LightningModule, device_ids: List[int]
) -> DistributedDataParallel:
model = LightningDistributedDataParallel(
model, device_ids=device_ids, find_unused_parameters=True
)
return model

def configure_sync_batchnorm(self, model: "LightningModule") -> "LightningModule":
def configure_sync_batchnorm(self, model: LightningModule) -> LightningModule:
"""
Add global batchnorm for a model spread across multiple GPUs and nodes.
Expand Down
Expand Up @@ -12,19 +12,20 @@
# See the License for the specific language governing permissions and
# limitations under the License
import os
from typing import List, Optional

import torch
import torch.distributed as torch_distrib
import torch.distributed as dist
from torch.nn.parallel import DistributedDataParallel

from pytorch_lightning.accelerators.accelerator import Accelerator
from pytorch_lightning import _logger as log
from pytorch_lightning.utilities import AMPType
from pytorch_lightning.utilities.distributed import rank_zero_only
from pytorch_lightning.utilities.seed import seed_everything
from pytorch_lightning.accelerators.accelerator import Accelerator
from pytorch_lightning.core.lightning import LightningModule
from pytorch_lightning.distributed.dist import LightningDistributed
from pytorch_lightning.overrides.data_parallel import LightningDistributedDataParallel
from torch.nn.parallel import DistributedDataParallel
from typing import List, Optional
from pytorch_lightning.utilities import AMPType
from pytorch_lightning.utilities.distributed import rank_zero_only

try:
from hydra.utils import to_absolute_path, get_original_cwd
Expand Down Expand Up @@ -176,14 +177,14 @@ def ddp_train(self, process_idx, model):
return results

def configure_ddp(
self, model: "LightningModule", device_ids: List[int]
self, model: LightningModule, device_ids: List[int]
) -> DistributedDataParallel:
model = LightningDistributedDataParallel(
model, device_ids=device_ids, find_unused_parameters=True
)
return model

def configure_sync_batchnorm(self, model: "LightningModule") -> "LightningModule":
def configure_sync_batchnorm(self, model: LightningModule) -> LightningModule:
"""
Add global batchnorm for a model spread across multiple GPUs and nodes.
Expand Down
16 changes: 9 additions & 7 deletions pytorch_lightning/accelerators/ddp_slurm_accelerator.py
Expand Up @@ -12,19 +12,21 @@
# See the License for the specific language governing permissions and
# limitations under the License
import os
from typing import List

import torch
import torch.distributed as torch_distrib
import torch.distributed as dist
from torch.nn.parallel import DistributedDataParallel

from pytorch_lightning.accelerators.accelerator import Accelerator
from pytorch_lightning import _logger as log
from pytorch_lightning.accelerators.accelerator import Accelerator
from pytorch_lightning.core.lightning import LightningModule
from pytorch_lightning.distributed.dist import LightningDistributed
from pytorch_lightning.overrides.data_parallel import LightningDistributedDataParallel
from pytorch_lightning.utilities import AMPType
from pytorch_lightning.utilities.distributed import rank_zero_only
from pytorch_lightning.utilities.seed import seed_everything
from pytorch_lightning.distributed.dist import LightningDistributed
from pytorch_lightning.overrides.data_parallel import LightningDistributedDataParallel
from torch.nn.parallel import DistributedDataParallel
from typing import List

try:
from hydra.utils import to_absolute_path, get_original_cwd
Expand Down Expand Up @@ -182,14 +184,14 @@ def ddp_train(self, process_idx, model):
return results

def configure_ddp(
self, model: "LightningModule", device_ids: List[int]
self, model: LightningModule, device_ids: List[int]
) -> DistributedDataParallel:
model = LightningDistributedDataParallel(
model, device_ids=device_ids, find_unused_parameters=True
)
return model

def configure_sync_batchnorm(self, model: "LightningModule") -> "LightningModule":
def configure_sync_batchnorm(self, model: LightningModule) -> LightningModule:
"""
Add global batchnorm for a model spread across multiple GPUs and nodes.
Expand Down
14 changes: 7 additions & 7 deletions pytorch_lightning/accelerators/ddp_spawn_accelerator.py
Expand Up @@ -13,23 +13,23 @@
# limitations under the License
import os
import re
from typing import List, Optional

import torch
import torch.multiprocessing as mp
import torch.distributed as torch_distrib
import torch.distributed as dist
from torch.nn.parallel import DistributedDataParallel

from pytorch_lightning import _logger as log
from pytorch_lightning.accelerators.accelerator import Accelerator
from pytorch_lightning.core.lightning import LightningModule
from pytorch_lightning.overrides.data_parallel import LightningDistributedDataParallel
from pytorch_lightning.utilities import AMPType
from pytorch_lightning.utilities.cloud_io import atomic_save, load as pl_load
from pytorch_lightning.utilities.distributed import rank_zero_only, rank_zero_warn
from pytorch_lightning.utilities.distributed import rank_zero_only, rank_zero_warn, find_free_network_port
from pytorch_lightning.utilities.seed import seed_everything
from pytorch_lightning.distributed.dist import LightningDistributed
from pytorch_lightning.utilities.distributed import find_free_network_port
from pytorch_lightning.overrides.data_parallel import LightningDistributedDataParallel
from torch.nn.parallel import DistributedDataParallel
from typing import List, Optional

try:
from hydra.core.hydra_config import HydraConfig
Expand Down Expand Up @@ -237,14 +237,14 @@ def transfer_distrib_spawn_state_on_fit_end(self, model, mp_queue, results):
mp_queue.put(last_path)

def configure_ddp(
self, model: "LightningModule", device_ids: List[int]
self, model: LightningModule, device_ids: List[int]
) -> DistributedDataParallel:
model = LightningDistributedDataParallel(
model, device_ids=device_ids, find_unused_parameters=True
)
return model

def configure_sync_batchnorm(self, model: "LightningModule") -> "LightningModule":
def configure_sync_batchnorm(self, model: LightningModule) -> LightningModule:
"""
Add global batchnorm for a model spread across multiple GPUs and nodes.
Expand Down
17 changes: 9 additions & 8 deletions pytorch_lightning/accelerators/ddp_torchelastic_accelerator.py
Expand Up @@ -12,19 +12,20 @@
# See the License for the specific language governing permissions and
# limitations under the License
import os
from typing import List, Optional

import torch
import torch.distributed as torch_distrib
import torch.distributed as dist
from torch.nn.parallel import DistributedDataParallel

from pytorch_lightning.accelerators.accelerator import Accelerator
from pytorch_lightning import _logger as log
from pytorch_lightning.utilities import AMPType
from pytorch_lightning.utilities.distributed import rank_zero_only
from pytorch_lightning.utilities.seed import seed_everything
from pytorch_lightning.accelerators.accelerator import Accelerator
from pytorch_lightning.core.lightning import LightningModule
from pytorch_lightning.distributed.dist import LightningDistributed
from pytorch_lightning.overrides.data_parallel import LightningDistributedDataParallel
from torch.nn.parallel import DistributedDataParallel
from typing import List, Optional
from pytorch_lightning.utilities import AMPType
from pytorch_lightning.utilities.distributed import rank_zero_only


try:
Expand Down Expand Up @@ -179,14 +180,14 @@ def ddp_train(self, process_idx, model):
return results

def configure_ddp(
self, model: "LightningModule", device_ids: List[int]
self, model: LightningModule, device_ids: List[int]
) -> DistributedDataParallel:
model = LightningDistributedDataParallel(
model, device_ids=device_ids, find_unused_parameters=True
)
return model

def configure_sync_batchnorm(self, model: "LightningModule") -> "LightningModule":
def configure_sync_batchnorm(self, model: LightningModule) -> LightningModule:
"""
Add global batchnorm for a model spread across multiple GPUs and nodes.
Expand Down
2 changes: 1 addition & 1 deletion pytorch_lightning/accelerators/horovod_accelerator.py
Expand Up @@ -152,7 +152,7 @@ def test_step(self, args):
return output

def backward(self, closure_loss, optimizer, opt_idx, *args, **kwargs):
super().backward(closure_loss, optimizer, opt_idx, *args, **kwargs)
super().backward(closure_loss, optimizer, opt_idx, *args, **kwargs)
optimizer.synchronize()

def on_train_epoch_end(self, outputs):
Expand Down
2 changes: 0 additions & 2 deletions pytorch_lightning/callbacks/early_stopping.py
Expand Up @@ -35,8 +35,6 @@
torch_inf = torch.tensor(np.Inf)




class EarlyStopping(Callback):
r"""
Monitor a validation metric and stop training when it stops improving.
Expand Down
1 change: 1 addition & 0 deletions pytorch_lightning/core/__init__.py
Expand Up @@ -11,6 +11,7 @@
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

from pytorch_lightning.core.datamodule import LightningDataModule
from pytorch_lightning.core.lightning import LightningModule

Expand Down

0 comments on commit f37444f

Please sign in to comment.