Skip to content

Commit

Permalink
lint: switch pyupgrade with Ruff's UP rule (#19638)
Browse files Browse the repository at this point in the history
* ruff
* configure
* update

---------

Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com>
  • Loading branch information
Borda and pre-commit-ci[bot] committed Mar 15, 2024
1 parent 22dc9c6 commit fe53597
Show file tree
Hide file tree
Showing 10 changed files with 38 additions and 53 deletions.
16 changes: 6 additions & 10 deletions .pre-commit-config.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -50,13 +50,6 @@ repos:
)$
- id: detect-private-key

- repo: https://github.com/asottile/pyupgrade
rev: v3.15.0
hooks:
- id: pyupgrade
args: ["--py38-plus"]
name: Upgrade code

- repo: https://github.com/codespell-project/codespell
rev: v2.2.6
hooks:
Expand All @@ -77,12 +70,15 @@ repos:
- id: sphinx-lint

- repo: https://github.com/astral-sh/ruff-pre-commit
rev: "v0.2.0"
rev: v0.3.2
hooks:
# try to fix what is possible
- id: ruff
args: ["--fix"]
# perform formatting updates
- id: ruff-format
args: ["--preview"]
# validate if all is fine with preview mode
- id: ruff
args: ["--fix", "--preview"]

- repo: https://github.com/executablebooks/mdformat
rev: 0.7.17
Expand Down
9 changes: 2 additions & 7 deletions examples/fabric/image_classifier/train_fabric.py
Original file line number Diff line number Diff line change
Expand Up @@ -118,13 +118,8 @@ def run(hparams):
optimizer.step()
if (batch_idx == 0) or ((batch_idx + 1) % hparams.log_interval == 0):
print(
"Train Epoch: {} [{}/{} ({:.0f}%)]\tLoss: {:.6f}".format(
epoch,
batch_idx * len(data),
len(train_loader.dataset),
100.0 * batch_idx / len(train_loader),
loss.item(),
)
f"Train Epoch: {epoch} [{batch_idx * len(data)}/{len(train_loader.dataset)}"
f" ({100.0 * batch_idx / len(train_loader):.0f}%)]\tLoss: {loss.item():.6f}"
)
if hparams.dry_run:
break
Expand Down
14 changes: 4 additions & 10 deletions examples/fabric/image_classifier/train_torch.py
Original file line number Diff line number Diff line change
Expand Up @@ -91,13 +91,8 @@ def run(hparams):
optimizer.step()
if (batch_idx == 0) or ((batch_idx + 1) % hparams.log_interval == 0):
print(
"Train Epoch: {} [{}/{} ({:.0f}%)]\tLoss: {:.6f}".format(
epoch,
batch_idx * len(data),
len(train_loader.dataset),
100.0 * batch_idx / len(train_loader),
loss.item(),
)
f"Train Epoch: {epoch} [{batch_idx * len(data)}/{len(train_loader.dataset)}"
f" ({100.0 * batch_idx / len(train_loader):.0f}%)]\tLoss: {loss.item():.6f}"
)
if hparams.dry_run:
break
Expand All @@ -120,9 +115,8 @@ def run(hparams):
test_loss /= len(test_loader.dataset)

print(
"\nTest set: Average loss: {:.4f}, Accuracy: {}/{} ({:.0f}%)\n".format(
test_loss, correct, len(test_loader.dataset), 100.0 * correct / len(test_loader.dataset)
)
f"\nTest set: Average loss: {test_loss:.4f}, Accuracy: {correct}/{len(test_loader.dataset)}"
f" ({100.0 * correct / len(test_loader.dataset):.0f}%)\n"
)

if hparams.dry_run:
Expand Down
9 changes: 2 additions & 7 deletions examples/fabric/kfold_cv/train_fabric.py
Original file line number Diff line number Diff line change
Expand Up @@ -67,13 +67,8 @@ def train_dataloader(model, data_loader, optimizer, fabric, epoch, hparams, fold
optimizer.step()
if (batch_idx == 0) or ((batch_idx + 1) % hparams.log_interval == 0):
print(
"Train Epoch: {} [{}/{} ({:.0f}%)]\tLoss: {:.6f}".format(
epoch,
batch_idx * len(data),
len(data_loader.dataset),
100.0 * batch_idx / len(data_loader),
loss.item(),
)
f"Train Epoch: {epoch} [{batch_idx * len(data)}/{len(data_loader.dataset)}"
f" ({100.0 * batch_idx / len(data_loader):.0f}%)]\tLoss: {loss.item():.6f}"
)

if hparams.dry_run:
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -104,10 +104,9 @@ def player(args, world_collective: TorchCollective, player_trainer_collective: T
if not args.share_data:
if single_global_step < world_collective.world_size - 1:
raise RuntimeError(
"The number of trainers ({}) is greater than the available collected data ({}). ".format(
world_collective.world_size - 1, single_global_step
)
+ "Consider to lower the number of trainers at least to the size of available collected data"
f"The number of trainers ({world_collective.world_size - 1})"
f" is greater than the available collected data ({single_global_step})."
f" Consider to lower the number of trainers at least to the size of available collected data"
)
chunks_sizes = [
len(chunk)
Expand Down
28 changes: 17 additions & 11 deletions pyproject.toml
Original file line number Diff line number Diff line change
Expand Up @@ -48,33 +48,39 @@ ignore-words-list = "te, compiletime"

[tool.ruff]
line-length = 120
# Enable Pyflakes `E` and `F` codes by default.
lint.select = [
target-version = "py38"
# Exclude a variety of commonly ignored directories.
exclude = [
".git",
"docs",
"_notebooks"
]

[tool.ruff.format]
preview = true

[tool.ruff.lint]
select = [
"E", "W", # see: https://pypi.org/project/pycodestyle
"F", # see: https://pypi.org/project/pyflakes
"S", # see: https://pypi.org/project/flake8-bandit
"RUF018", # see: https://docs.astral.sh/ruff/rules/assignment-in-assert
"UP", # see: https://docs.astral.sh/ruff/rules/#pyupgrade-up
]
lint.extend-select = [
extend-select = [
"I", # see: isort
"C4", # see: https://pypi.org/project/flake8-comprehensions
"SIM", # see: https://pypi.org/project/flake8-simplify
"RET", # see: https://pypi.org/project/flake8-return
"PT", # see: https://pypi.org/project/flake8-pytest-style
"RUF100", # see: https://docs.astral.sh/ruff/rules/unused-noqa/
]
lint.ignore = [
ignore = [
"E731", # Do not assign a lambda expression, use a def
"S108",
"E203", # conflicts with black
]
# Exclude a variety of commonly ignored directories.
exclude = [
".git",
"docs",
"_notebooks"
]
lint.ignore-init-module-imports = true
ignore-init-module-imports = true

[tool.ruff.lint.per-file-ignores]
".actions/*" = ["S101", "S310"]
Expand Down
2 changes: 1 addition & 1 deletion src/lightning/app/utilities/layout.py
Original file line number Diff line number Diff line change
Expand Up @@ -165,7 +165,7 @@ def _collect_content_layout(
m = f"""
A dictionary returned by `{flow.__class__.__name__}.configure_layout()` contains an unsupported entry.
{{'content': {repr(entry['content'])}}}
{{'content': {repr(entry["content"])}}}
Set the `content` key to a child flow or a URL, for example:
Expand Down
2 changes: 1 addition & 1 deletion src/lightning/pytorch/demos/transformer.py
Original file line number Diff line number Diff line change
Expand Up @@ -62,7 +62,7 @@ def forward(self, inputs: Tensor, target: Tensor, mask: Optional[Tensor] = None)
# we assume target is already shifted w.r.t. inputs
if mask is None:
mask = torch.tril(torch.ones(t, t, device=inputs.device)) == 1
mask = mask.float().masked_fill(mask == 0, float("-inf")).masked_fill(mask == 1, float(0.0))
mask = mask.float().masked_fill(mask == 0, float("-inf")).masked_fill(mask == 1, 0.0)

src = self.pos_encoder(self.embedding(inputs) * math.sqrt(self.ninp))
target = self.pos_encoder(self.embedding(target) * math.sqrt(self.ninp))
Expand Down
2 changes: 1 addition & 1 deletion tests/tests_app/core/test_lightning_app.py
Original file line number Diff line number Diff line change
Expand Up @@ -1116,7 +1116,7 @@ def __init__(self, flow):
def test_cloud_compute_binding():
cloud_compute.ENABLE_MULTIPLE_WORKS_IN_NON_DEFAULT_CONTAINER = True

assert cloud_compute._CLOUD_COMPUTE_STORE == {}
assert {} == cloud_compute._CLOUD_COMPUTE_STORE
flow = FlowCC()
assert len(cloud_compute._CLOUD_COMPUTE_STORE) == 2
assert cloud_compute._CLOUD_COMPUTE_STORE["default"].component_names == ["root.work_c"]
Expand Down
2 changes: 1 addition & 1 deletion tests/tests_app/utilities/test_log_helpers.py
Original file line number Diff line number Diff line change
Expand Up @@ -16,7 +16,7 @@ def test_known_error(self):
def test_unknown_error(self):
websocket = mock.Mock()
with self.assertLogs("lightning.app.utilities.log_helpers") as captured:
_error_callback(websocket, IOError())
_error_callback(websocket, OSError())
# check that there is only one log message
assert len(captured.records) == 1
# and it contains the error message expected
Expand Down

0 comments on commit fe53597

Please sign in to comment.