Skip to content

Commit

Permalink
precommit: ruff-format (#19434)
Browse files Browse the repository at this point in the history
* precommit: ruff-format

* [pre-commit.ci] auto fixes from pre-commit.com hooks

for more information, see https://pre-commit.ci

* manual update

* [pre-commit.ci] auto fixes from pre-commit.com hooks

for more information, see https://pre-commit.ci

* manual update

* order

* mypy

* [pre-commit.ci] auto fixes from pre-commit.com hooks

for more information, see https://pre-commit.ci

* mypy

---------

Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com>
  • Loading branch information
Borda and pre-commit-ci[bot] committed Feb 15, 2024
1 parent 6cb5813 commit 99fe656
Show file tree
Hide file tree
Showing 185 changed files with 883 additions and 983 deletions.
4 changes: 3 additions & 1 deletion .pre-commit-config.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -84,10 +84,12 @@ repos:
- flake8-return

- repo: https://github.com/astral-sh/ruff-pre-commit
rev: "v0.1.15"
rev: "v0.2.0"
hooks:
- id: ruff
args: ["--fix", "--preview"]
- id: ruff-format
args: ["--preview"]

- repo: https://github.com/executablebooks/mdformat
rev: 0.7.17
Expand Down
6 changes: 3 additions & 3 deletions examples/app/dag/app.py
Original file line number Diff line number Diff line change
Expand Up @@ -65,9 +65,9 @@ def __init__(self, models_paths: list):
)

# Step 3: Create the work to train the models_paths in parallel.
self.dict = Dict(
**{model_path.split(".")[-1]: ModelWork(model_path, parallel=True) for model_path in models_paths}
)
self.dict = Dict(**{
model_path.split(".")[-1]: ModelWork(model_path, parallel=True) for model_path in models_paths
})

# Step 4: Some element to track components progress.
self.has_completed = False
Expand Down
12 changes: 5 additions & 7 deletions examples/app/server/app.py
Original file line number Diff line number Diff line change
Expand Up @@ -20,13 +20,11 @@ def setup(self):
def predict(self, request):
image = base64.b64decode(request.image.encode("utf-8"))
image = Image.open(io.BytesIO(image))
transforms = torchvision.transforms.Compose(
[
torchvision.transforms.Resize(224),
torchvision.transforms.ToTensor(),
torchvision.transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225]),
]
)
transforms = torchvision.transforms.Compose([
torchvision.transforms.Resize(224),
torchvision.transforms.ToTensor(),
torchvision.transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225]),
])
image = transforms(image)
image = image.to(self._device)
prediction = self._model(image.unsqueeze(0))
Expand Down
12 changes: 5 additions & 7 deletions examples/app/server_with_auto_scaler/app.py
Original file line number Diff line number Diff line change
Expand Up @@ -34,13 +34,11 @@ def setup(self):
self._model = torchvision.models.resnet18(pretrained=True).to(self._device)

def predict(self, requests: BatchRequestModel):
transforms = torchvision.transforms.Compose(
[
torchvision.transforms.Resize(224),
torchvision.transforms.ToTensor(),
torchvision.transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225]),
]
)
transforms = torchvision.transforms.Compose([
torchvision.transforms.Resize(224),
torchvision.transforms.ToTensor(),
torchvision.transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225]),
])
images = []
for request in requests.inputs:
image = app.components.serve.types.image.Image.deserialize(request.image)
Expand Down
17 changes: 8 additions & 9 deletions examples/fabric/dcgan/train_fabric.py
Original file line number Diff line number Diff line change
Expand Up @@ -4,6 +4,7 @@
Code adapted from the official PyTorch DCGAN tutorial:
https://pytorch.org/tutorials/beginner/dcgan_faces_tutorial.html
"""

import os
import time
from pathlib import Path
Expand Down Expand Up @@ -55,14 +56,12 @@ def main():
root=dataroot,
split="all",
download=True,
transform=transforms.Compose(
[
transforms.Resize(image_size),
transforms.CenterCrop(image_size),
transforms.ToTensor(),
transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5)),
]
),
transform=transforms.Compose([
transforms.Resize(image_size),
transforms.CenterCrop(image_size),
transforms.ToTensor(),
transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5)),
]),
)

# Create the dataloader
Expand Down Expand Up @@ -227,7 +226,7 @@ def __init__(self):
nn.ReLU(True),
# state size. (ngf) x 32 x 32
nn.ConvTranspose2d(ngf, nc, 4, 2, 1, bias=False),
nn.Tanh()
nn.Tanh(),
# state size. (nc) x 64 x 64
)

Expand Down
17 changes: 8 additions & 9 deletions examples/fabric/dcgan/train_torch.py
Original file line number Diff line number Diff line change
Expand Up @@ -4,6 +4,7 @@
Code adapted from the official PyTorch DCGAN tutorial:
https://pytorch.org/tutorials/beginner/dcgan_faces_tutorial.html
"""

import os
import random
import time
Expand Down Expand Up @@ -55,14 +56,12 @@ def main():
root=dataroot,
split="all",
download=True,
transform=transforms.Compose(
[
transforms.Resize(image_size),
transforms.CenterCrop(image_size),
transforms.ToTensor(),
transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5)),
]
),
transform=transforms.Compose([
transforms.Resize(image_size),
transforms.CenterCrop(image_size),
transforms.ToTensor(),
transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5)),
]),
)

# Create the dataloader
Expand Down Expand Up @@ -236,7 +235,7 @@ def __init__(self):
nn.ReLU(True),
# state size. (ngf) x 32 x 32
nn.ConvTranspose2d(ngf, nc, 4, 2, 1, bias=False),
nn.Tanh()
nn.Tanh(),
# state size. (nc) x 64 x 64
)

Expand Down
1 change: 1 addition & 0 deletions examples/fabric/meta_learning/train_fabric.py
Original file line number Diff line number Diff line change
Expand Up @@ -14,6 +14,7 @@
Run it with:
lightning run model train_fabric.py --accelerator=cuda --devices=2 --strategy=ddp
"""

import cherry
import learn2learn as l2l
import torch
Expand Down
1 change: 1 addition & 0 deletions examples/fabric/meta_learning/train_torch.py
Original file line number Diff line number Diff line change
Expand Up @@ -15,6 +15,7 @@
Run it with:
torchrun --nproc_per_node=2 --standalone train_torch.py
"""

import os
import random

Expand Down
12 changes: 4 additions & 8 deletions examples/fabric/reinforcement_learning/train_fabric.py
Original file line number Diff line number Diff line change
Expand Up @@ -84,14 +84,10 @@ def main(args: argparse.Namespace):
)

# Environment setup
envs = gym.vector.SyncVectorEnv(
[
make_env(
args.env_id, args.seed + rank * args.num_envs + i, rank, args.capture_video, logger.log_dir, "train"
)
for i in range(args.num_envs)
]
)
envs = gym.vector.SyncVectorEnv([
make_env(args.env_id, args.seed + rank * args.num_envs + i, rank, args.capture_video, logger.log_dir, "train")
for i in range(args.num_envs)
])
assert isinstance(envs.single_action_space, gym.spaces.Discrete), "only discrete action space is supported"

# Define the agent and the optimizer and setup them with Fabric
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -59,9 +59,9 @@ def player(args, world_collective: TorchCollective, player_trainer_collective: T
)

# Environment setup
envs = gym.vector.SyncVectorEnv(
[make_env(args.env_id, args.seed + i, 0, args.capture_video, log_dir, "train") for i in range(args.num_envs)]
)
envs = gym.vector.SyncVectorEnv([
make_env(args.env_id, args.seed + i, 0, args.capture_video, log_dir, "train") for i in range(args.num_envs)
])
assert isinstance(envs.single_action_space, gym.spaces.Discrete), "only discrete action space is supported"

# Define the agent
Expand Down
24 changes: 11 additions & 13 deletions examples/fabric/reinforcement_learning/train_torch.py
Original file line number Diff line number Diff line change
Expand Up @@ -142,19 +142,17 @@ def main(args: argparse.Namespace):
)

# Environment setup
envs = gym.vector.SyncVectorEnv(
[
make_env(
args.env_id,
args.seed + global_rank * args.num_envs + i,
global_rank,
args.capture_video,
logger.log_dir if global_rank == 0 else None,
"train",
)
for i in range(args.num_envs)
]
)
envs = gym.vector.SyncVectorEnv([
make_env(
args.env_id,
args.seed + global_rank * args.num_envs + i,
global_rank,
args.capture_video,
logger.log_dir if global_rank == 0 else None,
"train",
)
for i in range(args.num_envs)
])
assert isinstance(envs.single_action_space, gym.spaces.Discrete), "only discrete action space is supported"

# Define the agent and the optimizer and setup them with DistributedDataParallel
Expand Down
1 change: 1 addition & 0 deletions examples/pytorch/basics/autoencoder.py
Original file line number Diff line number Diff line change
Expand Up @@ -16,6 +16,7 @@
To run: python autoencoder.py --trainer.max_epochs=50
"""

from os import path
from typing import Optional, Tuple

Expand Down
1 change: 1 addition & 0 deletions examples/pytorch/basics/backbone_image_classifier.py
Original file line number Diff line number Diff line change
Expand Up @@ -16,6 +16,7 @@
To run: python backbone_image_classifier.py --trainer.max_epochs=50
"""

from os import path
from typing import Optional

Expand Down
26 changes: 11 additions & 15 deletions examples/pytorch/domain_templates/computer_vision_fine_tuning.py
Original file line number Diff line number Diff line change
Expand Up @@ -119,14 +119,12 @@ def normalize_transform(self):

@property
def train_transform(self):
return transforms.Compose(
[
transforms.Resize((224, 224)),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
self.normalize_transform,
]
)
return transforms.Compose([
transforms.Resize((224, 224)),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
self.normalize_transform,
])

@property
def valid_transform(self):
Expand Down Expand Up @@ -269,13 +267,11 @@ def add_arguments_to_parser(self, parser):
parser.link_arguments("data.batch_size", "model.batch_size")
parser.link_arguments("finetuning.milestones", "model.milestones")
parser.link_arguments("finetuning.train_bn", "model.train_bn")
parser.set_defaults(
{
"trainer.max_epochs": 15,
"trainer.enable_model_summary": False,
"trainer.num_sanity_val_steps": 0,
}
)
parser.set_defaults({
"trainer.max_epochs": 15,
"trainer.enable_model_summary": False,
"trainer.num_sanity_val_steps": 0,
})


def cli_main():
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -18,6 +18,7 @@
tensorboard --logdir default
"""

from argparse import ArgumentParser, Namespace

import numpy as np
Expand Down
15 changes: 7 additions & 8 deletions examples/pytorch/domain_templates/imagenet.py
Original file line number Diff line number Diff line change
Expand Up @@ -30,6 +30,7 @@
python imagenet.py fit --help
"""

import os
from typing import Optional

Expand Down Expand Up @@ -139,14 +140,12 @@ def setup(self, stage: str):
train_dir = os.path.join(self.data_path, "train")
self.train_dataset = datasets.ImageFolder(
train_dir,
transforms.Compose(
[
transforms.RandomResizedCrop(224),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
normalize,
]
),
transforms.Compose([
transforms.RandomResizedCrop(224),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
normalize,
]),
)
# all stages will use the eval dataset
normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])
Expand Down
1 change: 1 addition & 0 deletions examples/pytorch/domain_templates/reinforce_learn_ppo.py
Original file line number Diff line number Diff line change
Expand Up @@ -28,6 +28,7 @@
[3] https://github.com/sid-sundrani/ppo_lightning
"""

import argparse
from typing import Callable, Iterator, List, Tuple

Expand Down
12 changes: 4 additions & 8 deletions examples/pytorch/domain_templates/semantic_segmentation.py
Original file line number Diff line number Diff line change
Expand Up @@ -333,14 +333,10 @@ def __init__(
self.net = UNet(
num_classes=19, num_layers=self.num_layers, features_start=self.features_start, bilinear=self.bilinear
)
self.transform = transforms.Compose(
[
transforms.ToTensor(),
transforms.Normalize(
mean=[0.35675976, 0.37380189, 0.3764753], std=[0.32064945, 0.32098866, 0.32325324]
),
]
)
self.transform = transforms.Compose([
transforms.ToTensor(),
transforms.Normalize(mean=[0.35675976, 0.37380189, 0.3764753], std=[0.32064945, 0.32098866, 0.32325324]),
])
self.trainset = KITTI(self.data_path, split="train", transform=self.transform)
self.validset = KITTI(self.data_path, split="valid", transform=self.transform)

Expand Down
15 changes: 9 additions & 6 deletions pyproject.toml
Original file line number Diff line number Diff line change
Expand Up @@ -49,20 +49,20 @@ ignore-words-list = "te, compiletime"
[tool.ruff]
line-length = 120
# Enable Pyflakes `E` and `F` codes by default.
select = [
lint.select = [
"E", "W", # see: https://pypi.org/project/pycodestyle
"F", # see: https://pypi.org/project/pyflakes
"S", # see: https://pypi.org/project/flake8-bandit
"RUF018", # see: https://docs.astral.sh/ruff/rules/assignment-in-assert
]
extend-select = [
lint.extend-select = [
"I", # see: isort
"C4", # see: https://pypi.org/project/flake8-comprehensions
"SIM", # see: https://pypi.org/project/flake8-simplify
"RET", # see: https://pypi.org/project/flake8-return
"PT", # see: https://pypi.org/project/flake8-pytest-style
]
ignore = [
lint.ignore = [
"E731", # Do not assign a lambda expression, use a def
"S108",
"E203", # conflicts with black
Expand All @@ -73,9 +73,9 @@ exclude = [
"docs",
"_notebooks"
]
ignore-init-module-imports = true
lint.ignore-init-module-imports = true

[tool.ruff.per-file-ignores]
[tool.ruff.lint.per-file-ignores]
".actions/*" = ["S101", "S310"]
"setup.py" = ["S101"]
"examples/**" = [
Expand All @@ -102,6 +102,9 @@ ignore-init-module-imports = true
"RET504", # todo:Unnecessary variable assignment before `return` statement
"RET503",
]
"src/lightning/data/**" = [
"S310", # todo: Audit URL open for permitted schemes. Allowing use of `file:` or custom schemes is often unexpected.
]
"tests/**" = [
"S101", # Use of `assert` detected
"S105", "S106", # todo: Possible hardcoded password: ...
Expand All @@ -123,7 +126,7 @@ ignore-init-module-imports = true
"PT019", # todo: Fixture `_` without value is injected as parameter, use `@pytest.mark.usefixtures` instead
]

[tool.ruff.mccabe]
[tool.ruff.lint.mccabe]
# Unlike Flake8, default to a complexity level of 10.
max-complexity = 10

Expand Down

0 comments on commit 99fe656

Please sign in to comment.