From 5c42c67bd5698e46295610baf846dd156dd78d85 Mon Sep 17 00:00:00 2001 From: "pre-commit-ci[bot]" <66853113+pre-commit-ci[bot]@users.noreply.github.com> Date: Tue, 3 Oct 2023 10:06:21 +0000 Subject: [PATCH 1/5] [pre-commit.ci] pre-commit suggestions MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit updates: - [github.com/asottile/pyupgrade: v3.8.0 → v3.14.0](https://github.com/asottile/pyupgrade/compare/v3.8.0...v3.14.0) - [github.com/codespell-project/codespell: v2.2.5 → v2.2.6](https://github.com/codespell-project/codespell/compare/v2.2.5...v2.2.6) - [github.com/PyCQA/docformatter: v1.7.3 → v1.7.5](https://github.com/PyCQA/docformatter/compare/v1.7.3...v1.7.5) - [github.com/psf/black: 23.3.0 → 23.9.1](https://github.com/psf/black/compare/23.3.0...23.9.1) - [github.com/executablebooks/mdformat: 0.7.16 → 0.7.17](https://github.com/executablebooks/mdformat/compare/0.7.16...0.7.17) - [github.com/astral-sh/ruff-pre-commit: v0.0.276 → v0.0.292](https://github.com/astral-sh/ruff-pre-commit/compare/v0.0.276...v0.0.292) --- .pre-commit-config.yaml | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index 3d3da9722..7f1f2cc78 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -23,21 +23,21 @@ repos: - id: detect-private-key - repo: https://github.com/asottile/pyupgrade - rev: v3.8.0 + rev: v3.14.0 hooks: - id: pyupgrade args: ["--py38-plus"] name: Upgrade code - repo: https://github.com/codespell-project/codespell - rev: v2.2.5 + rev: v2.2.6 hooks: - id: codespell additional_dependencies: [tomli] #args: ["--write-changes"] - repo: https://github.com/PyCQA/docformatter - rev: v1.7.3 + rev: v1.7.5 hooks: - id: docformatter args: @@ -53,7 +53,7 @@ repos: args: ["--print-width=120"] - repo: https://github.com/psf/black - rev: 23.3.0 + rev: 23.9.1 hooks: - id: black name: Format code @@ -64,7 +64,7 @@ repos: - id: yesqa - repo: https://github.com/executablebooks/mdformat - rev: 0.7.16 + rev: 0.7.17 hooks: - id: mdformat additional_dependencies: @@ -73,7 +73,7 @@ repos: - mdformat_frontmatter - repo: https://github.com/astral-sh/ruff-pre-commit - rev: v0.0.276 + rev: v0.0.292 hooks: - id: ruff args: ["--fix"] From aa3aa2ba93342e1d3b3b8e3f588b48b24fd2f27e Mon Sep 17 00:00:00 2001 From: "pre-commit-ci[bot]" <66853113+pre-commit-ci[bot]@users.noreply.github.com> Date: Tue, 3 Oct 2023 10:07:56 +0000 Subject: [PATCH 2/5] [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci --- .../finetuning-scheduler/finetuning-scheduler.py | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/lightning_examples/finetuning-scheduler/finetuning-scheduler.py b/lightning_examples/finetuning-scheduler/finetuning-scheduler.py index db66028bf..445415e81 100644 --- a/lightning_examples/finetuning-scheduler/finetuning-scheduler.py +++ b/lightning_examples/finetuning-scheduler/finetuning-scheduler.py @@ -235,8 +235,7 @@ def __init__( tokenizers_parallelism: bool = True, **dataloader_kwargs: Any, ): - r"""Initialize the ``LightningDataModule`` designed for both the RTE or BoolQ SuperGLUE Hugging Face - datasets. + r"""Initialize the ``LightningDataModule`` designed for both the RTE or BoolQ SuperGLUE Hugging Face datasets. Args: model_name_or_path (str): From 3d6f3b95487b3a55058e627b9dd4fcef37a55caa Mon Sep 17 00:00:00 2001 From: Jirka Borovec <6035284+Borda@users.noreply.github.com> Date: Tue, 3 Oct 2023 13:40:09 +0200 Subject: [PATCH 3/5] typos --- .pre-commit-config.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index 7f1f2cc78..998a85fd8 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -34,7 +34,7 @@ repos: hooks: - id: codespell additional_dependencies: [tomli] - #args: ["--write-changes"] + args: ["--write-changes"] - repo: https://github.com/PyCQA/docformatter rev: v1.7.5 From 0eafd5158d7a03a25d0b221d3d50021d9085fb1a Mon Sep 17 00:00:00 2001 From: "pre-commit-ci[bot]" <66853113+pre-commit-ci[bot]@users.noreply.github.com> Date: Tue, 3 Oct 2023 11:40:39 +0000 Subject: [PATCH 4/5] [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci --- .actions/assistant.py | 2 +- course_UvA-DL/06-graph-neural-networks/GNN_overview.py | 2 +- .../Deep_Energy_Models.py | 2 +- .../Autoregressive_Image_Modeling.py | 2 +- flash_tutorials/text_classification/text_classification.py | 2 +- 5 files changed, 5 insertions(+), 5 deletions(-) diff --git a/.actions/assistant.py b/.actions/assistant.py index 912c13882..8ebab08a6 100644 --- a/.actions/assistant.py +++ b/.actions/assistant.py @@ -188,7 +188,7 @@ def _load_meta(folder: str, strict: bool = False) -> Optional[dict]: Args: folder: path to the folder with python script, meta and artefacts - strict: raise error if meta is missing required feilds + strict: raise error if meta is missing required fields """ fpath = AssistantCLI._find_meta(folder) assert fpath, f"Missing meta file in folder: {folder}" diff --git a/course_UvA-DL/06-graph-neural-networks/GNN_overview.py b/course_UvA-DL/06-graph-neural-networks/GNN_overview.py index 1e693573e..aac757908 100644 --- a/course_UvA-DL/06-graph-neural-networks/GNN_overview.py +++ b/course_UvA-DL/06-graph-neural-networks/GNN_overview.py @@ -843,7 +843,7 @@ def print_results(result_dict): # In this case, we will use the average pooling. # Hence, we need to know which nodes should be included in which average pool. # Using this pooling, we can already create our graph network below. -# Specifically, we re-use our class `GNNModel` from before, +# Specifically, we reuse our class `GNNModel` from before, # and simply add an average pool and single linear layer for the graph prediction task. diff --git a/course_UvA-DL/07-deep-energy-based-generative-models/Deep_Energy_Models.py b/course_UvA-DL/07-deep-energy-based-generative-models/Deep_Energy_Models.py index c688ac5e2..05ca2f9a4 100644 --- a/course_UvA-DL/07-deep-energy-based-generative-models/Deep_Energy_Models.py +++ b/course_UvA-DL/07-deep-energy-based-generative-models/Deep_Energy_Models.py @@ -315,7 +315,7 @@ def forward(self, x): # inside the MCMC sampling to obtain reasonable samples. # However, there is a training trick that significantly reduces the sampling cost: using a sampling buffer. # The idea is that we store the samples of the last couple of batches in a buffer, -# and re-use those as the starting point of the MCMC algorithm for the next batches. +# and reuse those as the starting point of the MCMC algorithm for the next batches. # This reduces the sampling cost because the model requires a significantly # lower number of steps to converge to reasonable samples. # However, to not solely rely on previous samples and allow novel samples as well, diff --git a/course_UvA-DL/10-autoregressive-image-modeling/Autoregressive_Image_Modeling.py b/course_UvA-DL/10-autoregressive-image-modeling/Autoregressive_Image_Modeling.py index c36660ade..6faf119b0 100644 --- a/course_UvA-DL/10-autoregressive-image-modeling/Autoregressive_Image_Modeling.py +++ b/course_UvA-DL/10-autoregressive-image-modeling/Autoregressive_Image_Modeling.py @@ -905,7 +905,7 @@ def autocomplete_image(img): # potentially undesirable behavior. For instance, the value 242 has a # 1000x lower likelihood than 243 although they are extremely close and # can often not be distinguished. This shows that the model might have not -# generlized well over pixel values. The better solution to this problem +# generalized well over pixel values. The better solution to this problem # is to use discrete logitics mixtures instead of a softmax distribution. # A discrete logistic distribution can be imagined as discretized, binned # Gaussians. Using a mixture of discrete logistics instead of a softmax diff --git a/flash_tutorials/text_classification/text_classification.py b/flash_tutorials/text_classification/text_classification.py index f83bac5e0..7299576bd 100644 --- a/flash_tutorials/text_classification/text_classification.py +++ b/flash_tutorials/text_classification/text_classification.py @@ -5,7 +5,7 @@ # # Finetuning consists of four steps: # -# - 1. Train a source neural network model on a source dataset. For text classication, it is traditionally a transformer model such as BERT [Bidirectional Encoder Representations from Transformers](https://arxiv.org/abs/1810.04805) trained on wikipedia. +# - 1. Train a source neural network model on a source dataset. For text classification, it is traditionally a transformer model such as BERT [Bidirectional Encoder Representations from Transformers](https://arxiv.org/abs/1810.04805) trained on wikipedia. # As those model are costly to train, [Transformers](https://github.com/huggingface/transformers) or [FairSeq](https://github.com/pytorch/fairseq) libraries provides popular pre-trained model architectures for NLP. In this notebook, we will be using [tiny-bert](https://huggingface.co/prajjwal1/bert-tiny). # # - 2. Create a new neural network the target model. Its architecture replicates all model designs and their parameters on the source model, expect the latest layer which is removed. This model without its latest layers is traditionally called a backbone From 4043af462a3aa9fb9599de916fec4983b7015b93 Mon Sep 17 00:00:00 2001 From: Jirka Borovec <6035284+Borda@users.noreply.github.com> Date: Tue, 3 Oct 2023 13:41:45 +0200 Subject: [PATCH 5/5] info --- .pre-commit-config.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index 998a85fd8..7f1f2cc78 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -34,7 +34,7 @@ repos: hooks: - id: codespell additional_dependencies: [tomli] - args: ["--write-changes"] + #args: ["--write-changes"] - repo: https://github.com/PyCQA/docformatter rev: v1.7.5