Skip to content

Commit

Permalink
docformatter: config with black (#18064)
Browse files Browse the repository at this point in the history
* docformatter: config with black

* additional_dependencies: [tomli]

* 119

* [pre-commit.ci] auto fixes from pre-commit.com hooks

for more information, see https://pre-commit.ci

* [pre-commit.ci] auto fixes from pre-commit.com hooks

for more information, see https://pre-commit.ci

* [pre-commit.ci] auto fixes from pre-commit.com hooks

for more information, see https://pre-commit.ci

* fix

* [pre-commit.ci] auto fixes from pre-commit.com hooks

for more information, see https://pre-commit.ci

* [pre-commit.ci] auto fixes from pre-commit.com hooks

for more information, see https://pre-commit.ci

---------

Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com>

(cherry picked from commit efa7b2f)
  • Loading branch information
Borda authored and lexierule committed Aug 14, 2023
1 parent cbf53ca commit 03f57f9
Show file tree
Hide file tree
Showing 345 changed files with 1,203 additions and 424 deletions.
6 changes: 6 additions & 0 deletions .actions/assistant.py
Original file line number Diff line number Diff line change
Expand Up @@ -81,6 +81,7 @@ def adjust(self, unfreeze: str) -> str:
'arrow>=1.2.0'
>>> _RequirementWithComment("arrow").adjust("major")
'arrow'
"""
out = str(self)
if self.strict:
Expand Down Expand Up @@ -110,6 +111,7 @@ def _parse_requirements(strs: Union[str, Iterable[str]]) -> Iterator[_Requiremen
>>> txt = '\\n'.join(txt)
>>> [r.adjust('none') for r in _parse_requirements(txt)]
['this', 'example', 'foo # strict', 'thing']
"""
lines = yield_lines(strs)
pip_argument = None
Expand Down Expand Up @@ -144,6 +146,7 @@ def load_requirements(path_dir: str, file_name: str = "base.txt", unfreeze: str
>>> path_req = os.path.join(_PROJECT_ROOT, "requirements")
>>> load_requirements(path_req, "docs.txt", unfreeze="major") # doctest: +ELLIPSIS +NORMALIZE_WHITESPACE
['sphinx<...]
"""
assert unfreeze in {"none", "major", "all"}
path = Path(path_dir) / file_name
Expand All @@ -157,6 +160,7 @@ def load_readme_description(path_dir: str, homepage: str, version: str) -> str:
>>> load_readme_description(_PROJECT_ROOT, "", "") # doctest: +ELLIPSIS +NORMALIZE_WHITESPACE
'...PyTorch Lightning is just organized PyTorch...'
"""
path_readme = os.path.join(path_dir, "README.md")
with open(path_readme, encoding="utf-8") as fo:
Expand Down Expand Up @@ -236,6 +240,7 @@ def _load_aggregate_requirements(req_dir: str = "requirements", freeze_requireme
"""Load all base requirements from all particular packages and prune duplicates.
>>> _load_aggregate_requirements(os.path.join(_PROJECT_ROOT, "requirements"))
"""
requires = [
load_requirements(d, unfreeze="none" if freeze_requirements else "major")
Expand Down Expand Up @@ -292,6 +297,7 @@ def _replace_imports(lines: List[str], mapping: List[Tuple[str, str]], lightning
'http://pytorch_lightning.ai', \
'from lightning_fabric import __version__', \
'@lightning.ai']
"""
out = lines[:]
for source_import, target_import in mapping:
Expand Down
3 changes: 2 additions & 1 deletion .pre-commit-config.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -60,7 +60,8 @@ repos:
rev: v1.7.3
hooks:
- id: docformatter
args: [--in-place, --wrap-summaries=115, --wrap-descriptions=120]
additional_dependencies: [tomli]
args: ["--in-place"]

- repo: https://github.com/asottile/yesqa
rev: v1.5.0
Expand Down
2 changes: 2 additions & 0 deletions docs/source-app/examples/file_server/app.py
Original file line number Diff line number Diff line change
Expand Up @@ -23,6 +23,7 @@ def __init__(
drive: The drive can share data inside your application.
base_dir: The local directory where the data will be stored.
chunk_size: The quantity of bytes to download/upload at once.
"""
super().__init__(
cloud_build_config=L.BuildConfig(["flask, flask-cors"]),
Expand Down Expand Up @@ -238,4 +239,5 @@ def test_file_server_in_cloud():
# 2. By calling logs = get_logs_fn(),
# you get all the logs currently on the admin page.
"""
1 change: 1 addition & 0 deletions docs/source-app/examples/github_repo_runner/app.py
Original file line number Diff line number Diff line change
Expand Up @@ -36,6 +36,7 @@ def __init__(
script_args: The arguments to be provided to the script.
requirements: The python requirements tp run the script.
cloud_compute: The object to select the cloud instance.
"""
super().__init__(
script_path=script_path,
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -10,6 +10,7 @@ def __init__(self, num_users: int = 100):
Arguments:
num_users: Number of users emulated by Locust
"""
# Note: Using the default port 8089 of Locust.
super().__init__(
Expand Down
2 changes: 2 additions & 0 deletions docs/source-app/examples/model_server_app/model_server.py
Original file line number Diff line number Diff line change
Expand Up @@ -18,6 +18,7 @@ class MLServer(LightningWork):
Example: "mlserver_sklearn.SKLearnModel".
Learn more here: $ML_SERVER_URL/tree/master/runtimes
workers: Number of server worker.
"""

def __init__(
Expand Down Expand Up @@ -51,6 +52,7 @@ def run(self, model_path: Path):
Arguments:
model_path: The path to the trained model.
"""
# 1: Use the host and port at runtime so it works in the cloud.
# $ML_SERVER_URL/blob/master/mlserver/settings.py#L50
Expand Down
1 change: 1 addition & 0 deletions examples/app/hpo/utils.py
Original file line number Diff line number Diff line change
Expand Up @@ -15,6 +15,7 @@ def download_data(url: str, path: str = "data/", verbose: bool = False) -> None:
Usage:
download_file('http://web4host.net/5MB.zip')
"""
if url == "NEED_TO_BE_CREATED":
raise NotImplementedError
Expand Down
1 change: 1 addition & 0 deletions examples/app/layout/app.py
Original file line number Diff line number Diff line change
Expand Up @@ -5,6 +5,7 @@
lightning run app examples/layout/demo.py
This starts one server for each flow that returns a UI. Access the UI at the link printed in the terminal.
"""

import os
Expand Down
13 changes: 11 additions & 2 deletions examples/fabric/build_your_own_trainer/trainer.py
Original file line number Diff line number Diff line change
Expand Up @@ -137,6 +137,7 @@ def fit(
If not specified, no validation will run.
ckpt_path: Path to previous checkpoints to resume training from.
If specified, will always look for the latest checkpoint within the given directory.
"""
self.fabric.launch()

Expand Down Expand Up @@ -207,6 +208,7 @@ def train_loop(
If greater then the number of batches in the ``train_loader``, this has no effect.
scheduler_cfg: The learning rate scheduler configuration.
Have a look at :meth:`lightning.pytorch.LightninModule.configure_optimizers` for supported values.
"""
self.fabric.call("on_train_epoch_start")
iterable = self.progbar_wrapper(
Expand Down Expand Up @@ -269,6 +271,7 @@ def val_loop(
val_loader: The dataloader yielding the validation batches.
limit_batches: Limits the batches during this validation epoch.
If greater then the number of batches in the ``val_loader``, this has no effect.
"""
# no validation if val_loader wasn't passed
if val_loader is None:
Expand Down Expand Up @@ -313,13 +316,14 @@ def val_loop(
torch.set_grad_enabled(True)

def training_step(self, model: L.LightningModule, batch: Any, batch_idx: int) -> torch.Tensor:
"""A single training step, running forward and backward. The optimizer step is called separately, as this
is given as a closure to the optimizer step.
"""A single training step, running forward and backward. The optimizer step is called separately, as this is
given as a closure to the optimizer step.
Args:
model: the lightning module to train
batch: the batch to run the forward on
batch_idx: index of the current batch w.r.t the current epoch
"""
outputs: Union[torch.Tensor, Mapping[str, Any]] = model.training_step(batch, batch_idx=batch_idx)

Expand Down Expand Up @@ -349,6 +353,7 @@ def step_scheduler(
Have a look at :meth:`lightning.pytorch.LightninModule.configure_optimizers` for supported values.
level: whether we are trying to step on epoch- or step-level
current_value: Holds the current_epoch if ``level==epoch``, else holds the ``global_step``
"""

# no scheduler
Expand Down Expand Up @@ -397,6 +402,7 @@ def progbar_wrapper(self, iterable: Iterable, total: int, **kwargs: Any):
Args:
iterable: the iterable to wrap with tqdm
total: the total length of the iterable, necessary in case the number of batches was limited.
"""
if self.fabric.is_global_zero:
return tqdm(iterable, total=total, **kwargs)
Expand All @@ -408,6 +414,7 @@ def load(self, state: Optional[Mapping], path: str) -> None:
Args:
state: a mapping contaning model, optimizer and lr scheduler
path: the path to load the checkpoint from
"""
if state is None:
state = {}
Expand Down Expand Up @@ -460,6 +467,7 @@ def _parse_optimizers_schedulers(
Args:
configure_optim_output: The output of ``configure_optimizers``.
For supported values, please refer to :meth:`lightning.pytorch.LightningModule.configure_optimizers`.
"""
_lr_sched_defaults = {"interval": "epoch", "frequency": 1, "monitor": "val_loss"}

Expand Down Expand Up @@ -513,6 +521,7 @@ def _format_iterable(
prog_bar: a progressbar (on global rank zero) or an iterable (every other rank).
candidates: the values to add as postfix strings to the progressbar.
prefix: the prefix to add to each of these values.
"""
if isinstance(prog_bar, tqdm) and candidates is not None:
postfix_str = ""
Expand Down
1 change: 1 addition & 0 deletions examples/fabric/image_classifier/train_fabric.py
Original file line number Diff line number Diff line change
Expand Up @@ -25,6 +25,7 @@
Accelerate your training loop by setting the ``--accelerator``, ``--strategy``, ``--devices`` options directly from
the command line. See ``lightning run model --help`` or learn more from the documentation:
https://lightning.ai/docs/fabric.
"""

import argparse
Expand Down
1 change: 1 addition & 0 deletions examples/pytorch/basics/autoencoder.py
Original file line number Diff line number Diff line change
Expand Up @@ -14,6 +14,7 @@
"""MNIST autoencoder example.
To run: python autoencoder.py --trainer.max_epochs=50
"""
from os import path
from typing import Optional, Tuple
Expand Down
1 change: 1 addition & 0 deletions examples/pytorch/basics/backbone_image_classifier.py
Original file line number Diff line number Diff line change
Expand Up @@ -14,6 +14,7 @@
"""MNIST backbone image classifier example.
To run: python backbone_image_classifier.py --trainer.max_epochs=50
"""
from os import path
from typing import Optional
Expand Down
1 change: 1 addition & 0 deletions examples/pytorch/basics/profiler_example.py
Original file line number Diff line number Diff line change
Expand Up @@ -20,6 +20,7 @@
* With PyTorch Tensorboard Profiler (Instructions are here: https://github.com/pytorch/kineto/tree/master/tb_plugin)
1. pip install tensorboard torch-tb-profiler
2. tensorboard --logdir={FOLDER}
"""

from os import path
Expand Down
10 changes: 7 additions & 3 deletions examples/pytorch/domain_templates/computer_vision_fine_tuning.py
Original file line number Diff line number Diff line change
Expand Up @@ -12,9 +12,9 @@
# See the License for the specific language governing permissions and
# limitations under the License.
"""Computer vision example on Transfer Learning. This computer vision example illustrates how one could fine-tune a
pre-trained network (by default, a ResNet50 is used) using pytorch-lightning. For the sake of this example, the
'cats and dogs dataset' (~60MB, see `DATA_URL` below) and the proposed network (denoted by `TransferLearningModel`,
see below) is trained for 15 epochs.
pre-trained network (by default, a ResNet50 is used) using pytorch-lightning. For the sake of this example, the 'cats
and dogs dataset' (~60MB, see `DATA_URL` below) and the proposed network (denoted by `TransferLearningModel`, see
below) is trained for 15 epochs.
The training consists of three stages.
Expand All @@ -37,6 +37,7 @@
To run:
python computer_vision_fine_tuning.py fit
"""

import logging
Expand Down Expand Up @@ -97,6 +98,7 @@ def __init__(self, dl_path: Union[str, Path] = "data", num_workers: int = 0, bat
dl_path: root directory where to download the data
num_workers: number of CPU workers
batch_size: number of sample in a batch
"""
super().__init__()

Expand Down Expand Up @@ -174,6 +176,7 @@ def __init__(
milestones: List of two epochs milestones
lr: Initial learning rate
lr_scheduler_gamma: Factor by which the learning rate is reduced at each milestone
"""
super().__init__()
self.backbone = backbone
Expand Down Expand Up @@ -209,6 +212,7 @@ def forward(self, x):
"""Forward pass.
Returns logits.
"""
# 1. Feature extraction:
x = self.feature_extractor(x)
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -16,6 +16,7 @@
After a few epochs, launch TensorBoard to see the images being generated at every batch:
tensorboard --logdir default
"""
from argparse import ArgumentParser, Namespace

Expand Down
1 change: 1 addition & 0 deletions examples/pytorch/domain_templates/imagenet.py
Original file line number Diff line number Diff line change
Expand Up @@ -28,6 +28,7 @@
python imagenet.py --help
python imagenet.py fit --help
"""
import os
from typing import Optional
Expand Down
Loading

0 comments on commit 03f57f9

Please sign in to comment.