diff --git a/.github/workflows/ci_code-format.yml b/.github/workflows/ci_code-format.yml index c38c504e7..9bbb317c0 100644 --- a/.github/workflows/ci_code-format.yml +++ b/.github/workflows/ci_code-format.yml @@ -12,6 +12,7 @@ on: # Trigger the workflow on push or pull request, but only for the main branc jobs: pre-commit-check: runs-on: ubuntu-latest + timeout-minutes: 10 steps: - uses: actions/checkout@v2 - uses: actions/setup-python@v2 diff --git a/.github/workflows/ci_docs.yml b/.github/workflows/ci_docs.yml index 8be81f412..ac5b27c7f 100644 --- a/.github/workflows/ci_docs.yml +++ b/.github/workflows/ci_docs.yml @@ -2,6 +2,9 @@ name: validate Docs on: # Trigger the workflow on push or pull request pull_request: {} + schedule: + # At the end of every day + - cron: "0 0 * * *" concurrency: group: ${{ github.workflow }}-${{ github.head_ref }} @@ -13,6 +16,7 @@ jobs: env: PUB_BRANCH: publication PATH_DATASETS: ${{ github.workspace }}/.datasets + timeout-minutes: 20 steps: - name: Checkout 🛎️ uses: actions/checkout@v2 diff --git a/.github/workflows/ci_schema.yml b/.github/workflows/ci_schema.yml index 87a8d89f0..29ac3e433 100644 --- a/.github/workflows/ci_schema.yml +++ b/.github/workflows/ci_schema.yml @@ -7,6 +7,7 @@ on: # Trigger the workflow on push or pull request, but only for the master bran jobs: validate-schema: runs-on: ubuntu-20.04 + timeout-minutes: 5 steps: - name: Checkout uses: actions/checkout@v2 diff --git a/lightning_examples/mnist-hello-world/hello-world.py b/lightning_examples/mnist-hello-world/hello-world.py index 03e234816..0428fdaad 100644 --- a/lightning_examples/mnist-hello-world/hello-world.py +++ b/lightning_examples/mnist-hello-world/hello-world.py @@ -89,7 +89,7 @@ def configure_optimizers(self): # - If you don't mind loading all your datasets at once, you can set up a condition to allow for both 'fit' related setup and 'test' related setup to run whenever `None` is passed to `stage` (or ignore it altogether and exclude any conditionals). # - **Note this runs across all GPUs and it *is* safe to make state assignments here** # -# 3. [x_dataloader()](https://pytorch-lightning.readthedocs.io/en/stable/api/pytorch_lightning.core.hooks.html) ♻️ +# 3. [x_dataloader()](https://pytorch-lightning.readthedocs.io/en/stable/api_references.html#core-api) ♻️ # - `train_dataloader()`, `val_dataloader()`, and `test_dataloader()` all return PyTorch `DataLoader` instances that are created by wrapping their respective datasets that we prepared in `setup()` diff --git a/lightning_examples/mnist-tpu-training/.meta.yml b/lightning_examples/mnist-tpu-training/.meta.yml index b7cb2e9e6..bba3e2bfa 100644 --- a/lightning_examples/mnist-tpu-training/.meta.yml +++ b/lightning_examples/mnist-tpu-training/.meta.yml @@ -8,7 +8,7 @@ tags: - Image description: In this notebook, we'll train a model on TPUs. Updating one Trainer flag is all you need for that. The most up to documentation related to TPU training can be found - [here](https://pytorch-lightning.readthedocs.io/en/stable/advanced/tpu.html). + [here](https://pytorch-lightning.readthedocs.io/en/stable/accelerators/tpu.html). requirements: - torchvision accelerator: