From 7fcd7cc406462e4081900c64d9a8af5f4e828f11 Mon Sep 17 00:00:00 2001 From: Jirka Date: Mon, 24 Apr 2023 18:28:04 +0200 Subject: [PATCH 01/93] chlog: new section --- src/lightning/app/CHANGELOG.md | 12 ++++++++++++ src/lightning/fabric/CHANGELOG.md | 12 ++++++++++++ src/lightning/pytorch/CHANGELOG.md | 12 ++++++++++++ 3 files changed, 36 insertions(+) diff --git a/src/lightning/app/CHANGELOG.md b/src/lightning/app/CHANGELOG.md index 0f5f5b65b8ba4..ac547a4e24c50 100644 --- a/src/lightning/app/CHANGELOG.md +++ b/src/lightning/app/CHANGELOG.md @@ -5,6 +5,18 @@ All notable changes to this project will be documented in this file. The format is based on [Keep a Changelog](http://keepachangelog.com/en/1.0.0/). +## [UnReleased] - 2023-04-DD + +### Changed + +- + + +### Fixed + +- + + ## [2.0.2] - 2023-04-24 ### Fixed diff --git a/src/lightning/fabric/CHANGELOG.md b/src/lightning/fabric/CHANGELOG.md index 87c90d812c3ca..eea2c2fa98314 100644 --- a/src/lightning/fabric/CHANGELOG.md +++ b/src/lightning/fabric/CHANGELOG.md @@ -5,6 +5,18 @@ All notable changes to this project will be documented in this file. The format is based on [Keep a Changelog](http://keepachangelog.com/en/1.0.0/). +## [UnReleased] - 2023-04-DD + +### Changed + +- + + +### Fixed + +- + + ## [2.0.2] - 2023-04-24 ### Changed diff --git a/src/lightning/pytorch/CHANGELOG.md b/src/lightning/pytorch/CHANGELOG.md index 93aa7e58ec485..a8c7a10ae7c16 100644 --- a/src/lightning/pytorch/CHANGELOG.md +++ b/src/lightning/pytorch/CHANGELOG.md @@ -5,6 +5,18 @@ All notable changes to this project will be documented in this file. The format is based on [Keep a Changelog](http://keepachangelog.com/en/1.0.0/). +## [UnReleased] - 2023-04-DD + +### Changed + +- + + +### Fixed + +- + + ## [2.0.2] - 2023-04-24 ### Fixed From 2c981a307dc034bfeafe85b572d4ec1d62fd28a4 Mon Sep 17 00:00:00 2001 From: Jirka Borovec <6035284+Borda@users.noreply.github.com> Date: Mon, 24 Apr 2023 15:18:32 +0200 Subject: [PATCH 02/93] set codecov as informational (#17453) (cherry picked from commit e38e2cccd43bf7c13739dab9175a1b0c903eed00) --- .codecov.yml | 2 ++ 1 file changed, 2 insertions(+) diff --git a/.codecov.yml b/.codecov.yml index 8544950a6acd9..301eba5ba4de5 100644 --- a/.codecov.yml +++ b/.codecov.yml @@ -36,6 +36,7 @@ coverage: # https://codecov.readme.io/v1.0/docs/commit-status project: default: + informational: true target: 99% # specify the target coverage for each commit status threshold: 30% # allow this little decrease on project # https://github.com/codecov/support/wiki/Filtering-Branches @@ -44,6 +45,7 @@ coverage: # https://github.com/codecov/support/wiki/Patch-Status patch: default: + informational: true target: 50% # specify the target "X%" coverage to hit threshold: 5% # allow this much decrease on patch changes: false From d5d3efb65e5dd0a40f172e02b88768c949c5194f Mon Sep 17 00:00:00 2001 From: Jirka Borovec <6035284+borda@users.noreply.github.com> Date: Mon, 24 Apr 2023 16:07:22 +0200 Subject: [PATCH 03/93] docs: fix past versions location (#17432) (cherry picked from commit a5c43d3b2b80f5fc769d7ed0ea511c0bd6733c6b) --- docs/source-pytorch/past_versions.rst | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/docs/source-pytorch/past_versions.rst b/docs/source-pytorch/past_versions.rst index 31e3f50903822..c1b80d949ec38 100644 --- a/docs/source-pytorch/past_versions.rst +++ b/docs/source-pytorch/past_versions.rst @@ -1,9 +1,9 @@ Past PyTorch Lightning versions =============================== -PyTorch Lightning evolved over time. Here's the complete history of versions with links to their respective docs. +PyTorch Lightning :doc:`evolved over time `. Here's the history of versions with links to their respective docs. -TO help you with keeping up to spead, check :doc:`Migration guide <./upgrade/migration_guide>`. +To help you with keeping up to spead, check :doc:`Migration guide <./upgrade/migration_guide>`. .. list-table:: Past versions :widths: 5 50 30 15 From 3871cc15615255103b6414a65981728d0a6a8d88 Mon Sep 17 00:00:00 2001 From: Ethan Harris Date: Mon, 24 Apr 2023 17:14:36 +0100 Subject: [PATCH 04/93] [App] Fix resolution of latest version in CLI (#17351) Co-authored-by: Jirka Borovec <6035284+Borda@users.noreply.github.com> Co-authored-by: Jirka (cherry picked from commit 57ad46258b318243a83f463d5e764e6ebe623880) --- .github/workflows/ci-tests-app.yml | 2 +- src/lightning/app/utilities/cli_helpers.py | 21 ++----- tests/tests_app/utilities/test_cli_helpers.py | 60 +++++++++++++++---- 3 files changed, 56 insertions(+), 27 deletions(-) diff --git a/.github/workflows/ci-tests-app.yml b/.github/workflows/ci-tests-app.yml index b4c7f6d5c0945..e7d94d9434122 100644 --- a/.github/workflows/ci-tests-app.yml +++ b/.github/workflows/ci-tests-app.yml @@ -47,7 +47,7 @@ jobs: - {os: "ubuntu-20.04", pkg-name: "app", python-version: "3.9", requires: "latest"} - {os: "windows-2022", pkg-name: "app", python-version: "3.8", requires: "latest"} # Timeout: https://stackoverflow.com/a/59076067/4521646 - timeout-minutes: 40 + timeout-minutes: 45 env: PACKAGE_NAME: ${{ matrix.pkg-name }} FREEZE_REQUIREMENTS: ${{ ! (github.ref == 'refs/heads/master' || startsWith(github.ref, 'refs/heads/release/')) }} diff --git a/src/lightning/app/utilities/cli_helpers.py b/src/lightning/app/utilities/cli_helpers.py index 99a76d3999811..5397ed82d6215 100644 --- a/src/lightning/app/utilities/cli_helpers.py +++ b/src/lightning/app/utilities/cli_helpers.py @@ -249,14 +249,6 @@ def _arrow_time_callback( raise click.ClickException(f"cannot parse time {value}") -def _is_valid_release(release): - version, release = release - version = packaging.version.parse(version) - if any(r["yanked"] for r in release) or version.is_devrelease or version.is_prerelease: - return False - return True - - @functools.lru_cache(maxsize=1) def _get_newer_version() -> Optional[str]: """Check PyPI for newer versions of ``lightning``, returning the newest version if different from the current @@ -265,16 +257,15 @@ def _get_newer_version() -> Optional[str]: return None try: response = requests.get(f"https://pypi.org/pypi/{__package_name__}/json") - releases = response.json()["releases"] + response_json = response.json() + releases = response_json["releases"] if __version__ not in releases: # Always return None if not installed from PyPI (e.g. dev versions) return None - releases = dict(filter(_is_valid_release, releases.items())) - sorted_releases = sorted( - releases.items(), key=lambda release: release[1][0]["upload_time_iso_8601"], reverse=True - ) - latest_version = sorted_releases[0][0] - return None if __version__ == latest_version else latest_version + latest_version = response_json["info"]["version"] + parsed_version = packaging.version.parse(latest_version) + is_invalid = response_json["info"]["yanked"] or parsed_version.is_devrelease or parsed_version.is_prerelease + return None if __version__ == latest_version or is_invalid else latest_version except Exception: # Return None if any exception occurs return None diff --git a/tests/tests_app/utilities/test_cli_helpers.py b/tests/tests_app/utilities/test_cli_helpers.py index 248b46acd368e..e82a6e1acf668 100644 --- a/tests/tests_app/utilities/test_cli_helpers.py +++ b/tests/tests_app/utilities/test_cli_helpers.py @@ -73,46 +73,84 @@ def test_arrow_time_callback(): @pytest.mark.parametrize( - "releases, current_version, newer_version", + "response, current_version, newer_version", [ ( { - "1.0.0": [{"upload_time_iso_8601": "2022-09-10", "yanked": False}], - "2.0.0": [{"upload_time_iso_8601": "2022-11-01", "yanked": False}], + "info": { + "version": "2.0.0", + "yanked": False, + }, + "releases": { + "1.0.0": {}, + "2.0.0": {}, + }, }, "1.0.0", "2.0.0", ), ( { - "1.0.0": [{"upload_time_iso_8601": "2022-09-10", "yanked": False}], - "2.0.0": [{"upload_time_iso_8601": "2022-11-01", "yanked": True}], + "info": { + "version": "2.0.0", + "yanked": True, + }, + "releases": { + "1.0.0": {}, + "2.0.0": {}, + }, }, "1.0.0", None, ), ( { - "1.0.0": [{"upload_time_iso_8601": "2022-09-10", "yanked": False}], - "2.0.0rc0": [{"upload_time_iso_8601": "2022-11-01", "yanked": False}], + "info": { + "version": "1.0.0", + "yanked": False, + }, + "releases": { + "1.0.0": {}, + }, }, "1.0.0", None, ), ( { - "2.0.0": [{"upload_time_iso_8601": "2022-11-01", "yanked": False}], + "info": { + "version": "2.0.0rc0", + "yanked": False, + }, + "releases": { + "1.0.0": {}, + "2.0.0": {}, + }, + }, + "1.0.0", + None, + ), + ( + { + "info": { + "version": "2.0.0", + "yanked": False, + }, + "releases": { + "1.0.0": {}, + "2.0.0": {}, + }, }, "1.0.0dev", None, ), - ({"1.0.0": "this wil trigger an error"}, "1.0.0", None), + ({"this wil trigger an error": True}, "1.0.0", None), ({}, "1.0.0rc0", None), ], ) @patch("lightning.app.utilities.cli_helpers.requests") -def test_get_newer_version(mock_requests, releases, current_version, newer_version): - mock_requests.get().json.return_value = {"releases": releases} +def test_get_newer_version(mock_requests, response, current_version, newer_version): + mock_requests.get().json.return_value = response lightning.app.utilities.cli_helpers.__version__ = current_version From adb81ccf557f8d120f795c92f88e7ef0a9217f27 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 24 Apr 2023 18:28:14 +0000 Subject: [PATCH 05/93] Update uvicorn requirement from <0.19.1 to <0.21.2 in /requirements (#17314) Updates the requirements on [uvicorn](https://github.com/encode/uvicorn) to permit the latest version. - [Release notes](https://github.com/encode/uvicorn/releases) - [Changelog](https://github.com/encode/uvicorn/blob/master/CHANGELOG.md) - [Commits](https://github.com/encode/uvicorn/compare/0.0.1...0.21.1) --- updated-dependencies: - dependency-name: uvicorn dependency-type: direct:production ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> (cherry picked from commit 09ae070d2d648d3b2c59183cd6ec2236d21d6a98) --- requirements/app/base.txt | 2 +- requirements/pytorch/test.txt | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/requirements/app/base.txt b/requirements/app/base.txt index 8680223db0684..5625988a8344e 100644 --- a/requirements/app/base.txt +++ b/requirements/app/base.txt @@ -23,6 +23,6 @@ PyYAML <=6.0 requests <2.28.3 rich >=12.3.0, <=13.0.1 urllib3 <=1.26.13 -uvicorn <=0.17.6 +uvicorn <=0.21.1 websocket-client <1.5.2 websockets <=10.4 diff --git a/requirements/pytorch/test.txt b/requirements/pytorch/test.txt index 9a572f5774a98..86f3f81914e84 100644 --- a/requirements/pytorch/test.txt +++ b/requirements/pytorch/test.txt @@ -12,7 +12,7 @@ onnxruntime<1.14.0 psutil<5.9.5 # for `DeviceStatsMonitor` pandas>1.0, <1.5.4 # needed in benchmarks fastapi<0.87.0 # for `ServableModuleValidator` -uvicorn<0.19.1 # for `ServableModuleValidator` +uvicorn<0.21.2 # for `ServableModuleValidator` tensorboard >=2.9.1, <2.12.0 # for `TensorBoardLogger` protobuf <=3.20.1 # strict # an extra is updating protobuf, this pin prevents TensorBoard failure From b07419bcd067abc1b2849b49ceb013a1658cf926 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 24 Apr 2023 18:51:51 +0000 Subject: [PATCH 06/93] Update inquirer requirement from <=3.1.2,>=2.10.0 to >=2.10.0,<=3.1.3 in /requirements (#17258) Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> (cherry picked from commit cae1888ae5431c89e7497d0abdf6044d7987e85f) --- requirements/app/base.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/requirements/app/base.txt b/requirements/app/base.txt index 5625988a8344e..b07c5898b9d78 100644 --- a/requirements/app/base.txt +++ b/requirements/app/base.txt @@ -9,7 +9,7 @@ traitlets >=5.3.0, <5.9.0 arrow >=1.2.0, <1.2.4 lightning-utilities >=0.7.0, <0.9.0 beautifulsoup4 >=4.8.0, <4.11.2 -inquirer >=2.10.0, <=3.1.2 +inquirer >=2.10.0, <=3.1.3 psutil <5.9.5 click <=8.1.3 From ca2b71921ac2f2f8b922a9b6544698f882f46dfe Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 24 Apr 2023 19:34:42 +0000 Subject: [PATCH 07/93] Update aiohttp requirement from <=3.8.3,>=3.8.0 to >=3.8.0,<=3.8.4 in /requirements (#17315) Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> (cherry picked from commit 05bcc8c2a5e5126c4ca1f0670c3db025ff0a09be) --- requirements/app/components.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/requirements/app/components.txt b/requirements/app/components.txt index c3ecefd2287f6..b821f7ad4f8be 100644 --- a/requirements/app/components.txt +++ b/requirements/app/components.txt @@ -1,5 +1,5 @@ # deps required by components in the lightning app repository (src/lightning/app/components) lightning_api_access >=0.0.3 # serve -aiohttp >=3.8.0, <=3.8.3 # auto_scaler +aiohttp >=3.8.0, <=3.8.4 # auto_scaler lightning-fabric >=1.9.0 # multinode pytorch-lightning >=1.9.0 # multinode From 986633150575844d338510e4401b8b49e06f4949 Mon Sep 17 00:00:00 2001 From: Jirka Borovec <6035284+Borda@users.noreply.github.com> Date: Mon, 24 Apr 2023 21:49:14 +0200 Subject: [PATCH 08/93] typing: fix App's core API - api (#16950) Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> (cherry picked from commit 68716cc80a64d2d0f4e25413732e55aa22a124b7) --- pyproject.toml | 1 - src/lightning/app/core/api.py | 83 ++++++++++++----------- tests/tests_app/storage/test_path.py | 4 +- tests/tests_app/utilities/test_proxies.py | 2 +- 4 files changed, 46 insertions(+), 44 deletions(-) diff --git a/pyproject.toml b/pyproject.toml index 6d6c2086320b4..a000dc9122a77 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -137,7 +137,6 @@ module = [ "lightning.app.components.serve.types.image", "lightning.app.components.serve.types.type", "lightning.app.components.training", - "lightning.app.core.api", "lightning.app.frontend.panel.app_state_comm", "lightning.app.frontend.panel.app_state_watcher", "lightning.app.frontend.panel.panel_frontend", diff --git a/src/lightning/app/core/api.py b/src/lightning/app/core/api.py index 0829b185e61fe..e4207525c0106 100644 --- a/src/lightning/app/core/api.py +++ b/src/lightning/app/core/api.py @@ -16,6 +16,7 @@ import json import os import queue +import socket import sys import traceback from copy import deepcopy @@ -47,7 +48,9 @@ FRONTEND_DIR, get_cloud_queue_type, ) +from lightning.app.core.flow import LightningFlow from lightning.app.core.queues import QueuingSystem +from lightning.app.core.work import LightningWork from lightning.app.storage import Drive from lightning.app.utilities.app_helpers import InMemoryStateStore, Logger, StateStore from lightning.app.utilities.app_status import AppStatus @@ -60,7 +63,7 @@ from starsessions import SessionMiddleware else: - class SessionMiddleware: + class SessionMiddleware: # type: ignore[no-redef] pass @@ -71,9 +74,9 @@ class SessionMiddleware: frontend_static_dir = os.path.join(FRONTEND_DIR, "static") -api_app_delta_queue: Queue = None +api_app_delta_queue: Optional[Queue] = None -template = {"ui": {}, "app": {}} +template: dict = {"ui": {}, "app": {}} templates = Jinja2Templates(directory=FRONTEND_DIR) # TODO: try to avoid using global var for state store @@ -98,8 +101,8 @@ class SessionMiddleware: class UIRefresher(Thread): def __init__( self, - api_publish_state_queue, - api_response_queue, + api_publish_state_queue: Queue, + api_response_queue: Queue, refresh_interval: float = 0.1, ) -> None: super().__init__(daemon=True) @@ -108,7 +111,7 @@ def __init__( self._exit_event = Event() self.refresh_interval = refresh_interval - def run(self): + def run(self) -> None: # TODO: Create multiple threads to handle the background logic # TODO: Investigate the use of `parallel=True` try: @@ -116,11 +119,11 @@ def run(self): self.run_once() # Note: Sleep to reduce queue calls. sleep(self.refresh_interval) - except Exception as e: - logger.error(traceback.print_exc()) - raise e + except Exception as ex: + traceback.print_exc() + raise ex - def run_once(self): + def run_once(self) -> None: try: global app_status state, app_status = self.api_publish_state_queue.get(timeout=0) @@ -196,9 +199,9 @@ class StateUpdate(BaseModel): @fastapi_service.get("/api/v1/state", response_class=JSONResponse) async def get_state( response: Response, - x_lightning_type: Optional[str] = Header(None), - x_lightning_session_uuid: Optional[str] = Header(None), - x_lightning_session_id: Optional[str] = Header(None), + x_lightning_type: Optional[str] = Header(None), # type: ignore[assignment] + x_lightning_session_uuid: Optional[str] = Header(None), # type: ignore[assignment] + x_lightning_session_id: Optional[str] = Header(None), # type: ignore[assignment] ) -> Mapping: if x_lightning_session_uuid is None: raise Exception("Missing X-Lightning-Session-UUID header") @@ -216,7 +219,7 @@ async def get_state( return state -def _get_component_by_name(component_name: str, state): +def _get_component_by_name(component_name: str, state: dict) -> Union[LightningFlow, LightningWork]: child = state for child_name in component_name.split(".")[1:]: try: @@ -246,8 +249,8 @@ async def get_layout() -> Mapping: @fastapi_service.get("/api/v1/spec", response_class=JSONResponse) async def get_spec( response: Response, - x_lightning_session_uuid: Optional[str] = Header(None), - x_lightning_session_id: Optional[str] = Header(None), + x_lightning_session_uuid: Optional[str] = Header(None), # type: ignore[assignment] + x_lightning_session_id: Optional[str] = Header(None), # type: ignore[assignment] ) -> Union[List, Dict]: if x_lightning_session_uuid is None: raise Exception("Missing X-Lightning-Session-UUID header") @@ -266,9 +269,9 @@ async def get_spec( async def post_delta( request: Request, response: Response, - x_lightning_type: Optional[str] = Header(None), - x_lightning_session_uuid: Optional[str] = Header(None), - x_lightning_session_id: Optional[str] = Header(None), + x_lightning_type: Optional[str] = Header(None), # type: ignore[assignment] + x_lightning_session_uuid: Optional[str] = Header(None), # type: ignore[assignment] + x_lightning_session_id: Optional[str] = Header(None), # type: ignore[assignment] ) -> Optional[Dict]: """This endpoint is used to make an update to the app state using delta diff, mainly used by streamlit to update the state.""" @@ -283,6 +286,7 @@ async def post_delta( return {"status": "failure", "reason": "This endpoint is disabled."} body: Dict = await request.json() + assert api_app_delta_queue is not None api_app_delta_queue.put(_DeltaRequest(delta=Delta(body["delta"]))) @@ -290,9 +294,9 @@ async def post_delta( async def post_state( request: Request, response: Response, - x_lightning_type: Optional[str] = Header(None), - x_lightning_session_uuid: Optional[str] = Header(None), - x_lightning_session_id: Optional[str] = Header(None), + x_lightning_type: Optional[str] = Header(None), # type: ignore[assignment] + x_lightning_session_uuid: Optional[str] = Header(None), # type: ignore[assignment] + x_lightning_session_id: Optional[str] = Header(None), # type: ignore[assignment] ) -> Optional[Dict]: if x_lightning_session_uuid is None: raise Exception("Missing X-Lightning-Session-UUID header") @@ -320,11 +324,12 @@ async def post_state( state = body["state"] last_state = global_app_state_store.get_served_state(x_lightning_session_uuid) deep_diff = DeepDiff(last_state, state, verbose_level=2) + assert api_app_delta_queue is not None api_app_delta_queue.put(_DeltaRequest(delta=Delta(deep_diff))) @fastapi_service.put("/api/v1/upload_file/{filename}") -async def upload_file(response: Response, filename: str, uploaded_file: UploadFile = File(...)): +async def upload_file(response: Response, filename: str, uploaded_file: UploadFile = File(...)) -> Union[str, dict]: if not ENABLE_UPLOAD_ENDPOINT: response.status_code = status.HTTP_405_METHOD_NOT_ALLOWED return {"status": "failure", "reason": "This endpoint is disabled."} @@ -346,7 +351,7 @@ async def upload_file(response: Response, filename: str, uploaded_file: UploadFi f.write(content) done = content == b"" - with _context(ComponentContext.WORK): + with _context(str(ComponentContext.WORK)): drive.put(filename) return f"Successfully uploaded '{filename}' to the Drive" @@ -370,7 +375,7 @@ async def get_annotations() -> Union[List, Dict]: @fastapi_service.get("/healthz", status_code=200) -async def healthz(response: Response): +async def healthz(response: Response) -> dict: """Health check endpoint used in the cloud FastAPI servers to check the status periodically.""" # check the queue status only if running in cloud if is_running_in_cloud(): @@ -392,7 +397,7 @@ async def healthz(response: Response): # Creates session websocket connection to notify client about any state changes # The websocket instance needs to be stored based on session id so it is accessible in the api layer @fastapi_service.websocket("/api/v1/ws") -async def websocket_endpoint(websocket: WebSocket): +async def websocket_endpoint(websocket: WebSocket) -> None: await websocket.accept() if not ENABLE_STATE_WEBSOCKET: await websocket.close() @@ -410,7 +415,7 @@ async def websocket_endpoint(websocket: WebSocket): await websocket.close() -async def api_catch_all(request: Request, full_path: str): +async def api_catch_all(request: Request, full_path: str) -> None: raise HTTPException(status_code=404, detail="Not found") @@ -418,22 +423,22 @@ async def api_catch_all(request: Request, full_path: str): fastapi_service.mount("/static", StaticFiles(directory=frontend_static_dir, check_dir=False), name="static") -async def frontend_route(request: Request, full_path: str): +async def frontend_route(request: Request, full_path: str): # type: ignore[no-untyped-def] if "pytest" in sys.modules: return "" return templates.TemplateResponse("index.html", {"request": request}) -def register_global_routes(): +def register_global_routes() -> None: # Catch-all for nonexistent API routes (since we define a catch-all for client-side routing) fastapi_service.get("/api{full_path:path}", response_class=JSONResponse)(api_catch_all) fastapi_service.get("/{full_path:path}", response_class=HTMLResponse)(frontend_route) class LightningUvicornServer(uvicorn.Server): - has_started_queue = None + has_started_queue: Optional[Queue] = None - def run(self, sockets=None): + def run(self, sockets: Optional[List[socket.socket]] = None) -> None: self.config.setup_event_loop() loop = asyncio.get_event_loop() asyncio.ensure_future(self.serve(sockets=sockets)) @@ -441,25 +446,25 @@ def run(self, sockets=None): asyncio.ensure_future(self.check_is_started(self.has_started_queue)) loop.run_forever() - async def check_is_started(self, queue): + async def check_is_started(self, queue: Queue) -> None: while not self.started: await asyncio.sleep(0.1) queue.put("SERVER_HAS_STARTED") def start_server( - api_publish_state_queue, - api_delta_queue, - api_response_queue, + api_publish_state_queue: Queue, + api_delta_queue: Queue, + api_response_queue: Queue, has_started_queue: Optional[Queue] = None, - host="127.0.0.1", - port=8000, + host: str = "127.0.0.1", + port: int = 8000, root_path: str = "", uvicorn_run: bool = True, spec: Optional[List] = None, apis: Optional[List[_HttpMethod]] = None, app_state_store: Optional[StateStore] = None, -): +) -> UIRefresher: global api_app_delta_queue global global_app_state_store global app_spec @@ -469,7 +474,7 @@ def start_server( api_app_delta_queue = api_delta_queue if app_state_store is not None: - global_app_state_store = app_state_store + global_app_state_store = app_state_store # type: ignore[assignment] global_app_state_store.add(TEST_SESSION_UUID) diff --git a/tests/tests_app/storage/test_path.py b/tests/tests_app/storage/test_path.py index 78e3c495d0b90..d7c765b407dc9 100644 --- a/tests/tests_app/storage/test_path.py +++ b/tests/tests_app/storage/test_path.py @@ -2,7 +2,6 @@ import os import pathlib import pickle -import sys from re import escape from time import sleep from unittest import mock, TestCase @@ -400,8 +399,7 @@ def run(self): # FIXME(alecmerdler): This test is failing... -@pytest.mark.skipif(sys.platform in ("linux", "win32"), reason="hanging...") -@pytest.mark.xfail(sys.platform == "darwin", strict=False, reason="Timeout >300.0s") # fixme +@pytest.mark.skip(reason="hanging...") def test_multiprocess_path_in_work_and_flow_dynamic(tmpdir): root = DynamicSourceToDestFlow(tmpdir) app = LightningApp(root) diff --git a/tests/tests_app/utilities/test_proxies.py b/tests/tests_app/utilities/test_proxies.py index 633ffb7d036fd..1f0222573f67f 100644 --- a/tests/tests_app/utilities/test_proxies.py +++ b/tests/tests_app/utilities/test_proxies.py @@ -71,7 +71,7 @@ def proxy_setattr(): (True, True), (True, False), (False, True), - pytest.param(False, False, marks=pytest.mark.xfail(sys.platform == "linux", strict=False, reason="failing...")), + pytest.param(False, False, marks=pytest.mark.xfail(strict=False, reason="failing...")), # fixme ], ) @mock.patch("lightning.app.utilities.proxies._Copier", MagicMock()) From f6f9eb320c3ae5d11f4509abce7e31db458a64fd Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 24 Apr 2023 19:59:52 +0000 Subject: [PATCH 09/93] Update traitlets requirement from <5.9.0,>=5.3.0 to >=5.3.0,<5.10.0 in /requirements (#17398) Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> (cherry picked from commit d8234c5003d2241bfc853fd86b0aa5dfcaa787fe) --- requirements/app/base.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/requirements/app/base.txt b/requirements/app/base.txt index b07c5898b9d78..1a2c0f018f159 100644 --- a/requirements/app/base.txt +++ b/requirements/app/base.txt @@ -5,7 +5,7 @@ deepdiff >=5.7.0, <6.2.4 starsessions >=1.2.1, <2.0 # strict fsspec >=2022.5.0, <=2022.7.1 croniter >=1.3.0, <1.4.0 # strict; TODO: for now until we find something more robust. -traitlets >=5.3.0, <5.9.0 +traitlets >=5.3.0, <5.10.0 arrow >=1.2.0, <1.2.4 lightning-utilities >=0.7.0, <0.9.0 beautifulsoup4 >=4.8.0, <4.11.2 From 048663abbe65e4f1a24a5dab388c865643a42726 Mon Sep 17 00:00:00 2001 From: Jirka Borovec <6035284+borda@users.noreply.github.com> Date: Mon, 24 Apr 2023 22:56:04 +0200 Subject: [PATCH 10/93] app/tests: skip instead of fail (#17461) (cherry picked from commit 6705bfcc472c41996d00cf9ca78c9b543c08a458) --- .github/workflows/ci-tests-app.yml | 2 +- requirements/app/test.txt | 17 +++++++++-------- requirements/fabric/test.txt | 2 +- requirements/pytorch/test.txt | 2 +- tests/tests_app/core/test_lightning_app.py | 1 + tests/tests_app/core/test_lightning_flow.py | 4 +--- tests/tests_app/core/test_lightning_work.py | 4 +--- tests/tests_app/runners/test_multiprocess.py | 6 ++---- 8 files changed, 17 insertions(+), 21 deletions(-) diff --git a/.github/workflows/ci-tests-app.yml b/.github/workflows/ci-tests-app.yml index e7d94d9434122..2d77eba0b027e 100644 --- a/.github/workflows/ci-tests-app.yml +++ b/.github/workflows/ci-tests-app.yml @@ -47,7 +47,7 @@ jobs: - {os: "ubuntu-20.04", pkg-name: "app", python-version: "3.9", requires: "latest"} - {os: "windows-2022", pkg-name: "app", python-version: "3.8", requires: "latest"} # Timeout: https://stackoverflow.com/a/59076067/4521646 - timeout-minutes: 45 + timeout-minutes: 30 env: PACKAGE_NAME: ${{ matrix.pkg-name }} FREEZE_REQUIREMENTS: ${{ ! (github.ref == 'refs/heads/master' || startsWith(github.ref, 'refs/heads/release/')) }} diff --git a/requirements/app/test.txt b/requirements/app/test.txt index ec67718cdbc41..081047decda85 100644 --- a/requirements/app/test.txt +++ b/requirements/app/test.txt @@ -1,13 +1,14 @@ -coverage==6.5.0 -pytest==7.2.0 -pytest-timeout==2.1.0 -pytest-cov==4.0.0 +coverage ==6.5.0 +pytest ==7.2.2 +pytest-timeout ==2.1.0 +pytest-cov ==4.0.0 pytest-doctestplus >=0.9.0 -pytest-asyncio==0.20.3 -playwright==1.30.0 +pytest-asyncio ==0.20.3 +pytest-rerunfailures <=11.1.2 +playwright ==1.30.0 httpx -trio<0.22.0 # strict https://github.com/python-trio/trio/pull/2213 +trio <0.22.0 # strict https://github.com/python-trio/trio/pull/2213 pympler psutil -setuptools<67.7.0 +setuptools <67.7.0 requests-mock diff --git a/requirements/fabric/test.txt b/requirements/fabric/test.txt index f08de78fe9d11..8259ea2f6e2a6 100644 --- a/requirements/fabric/test.txt +++ b/requirements/fabric/test.txt @@ -1,5 +1,5 @@ coverage==6.5.0 -pytest==7.2.0 +pytest==7.2.2 pytest-cov==4.0.0 pytest-rerunfailures==10.3 click==8.1.3 diff --git a/requirements/pytorch/test.txt b/requirements/pytorch/test.txt index 86f3f81914e84..d19928cfd35c7 100644 --- a/requirements/pytorch/test.txt +++ b/requirements/pytorch/test.txt @@ -1,5 +1,5 @@ coverage==6.5.0 -pytest==7.2.0 +pytest==7.2.2 pytest-cov==4.0.0 pytest-forked==1.4.0 pytest-rerunfailures==10.3 diff --git a/tests/tests_app/core/test_lightning_app.py b/tests/tests_app/core/test_lightning_app.py index 205a532119df1..54fc38ab0f657 100644 --- a/tests/tests_app/core/test_lightning_app.py +++ b/tests/tests_app/core/test_lightning_app.py @@ -446,6 +446,7 @@ def run(self): pytest.param(0, 10.0, marks=pytest.mark.xfail(strict=False, reason="failing...")), # fixme ], ) +@pytest.mark.flaky(reruns=5) def test_lightning_app_aggregation_speed(default_timeout, queue_type_cls: BaseQueue, sleep_time, expect): """This test validates the `_collect_deltas_from_ui_and_work_queues` can aggregate multiple delta together in a diff --git a/tests/tests_app/core/test_lightning_flow.py b/tests/tests_app/core/test_lightning_flow.py index 8b05bc03f3724..9cc773d2a0b4f 100644 --- a/tests/tests_app/core/test_lightning_flow.py +++ b/tests/tests_app/core/test_lightning_flow.py @@ -1,6 +1,5 @@ import os import pickle -import sys from collections import Counter from copy import deepcopy from dataclasses import dataclass @@ -610,8 +609,7 @@ def __init__(self): assert flow.path == flow.lit_path -@pytest.mark.skipif(sys.platform == "win32", reason="Timeout") # fixme -@pytest.mark.xfail(strict=False, reason="No idea why... need to be fixed") # fixme +@pytest.mark.skip(reason="Timeout") # fixme def test_flow_state_change_with_path(): """Test that type changes to a Path attribute are properly reflected within the state.""" diff --git a/tests/tests_app/core/test_lightning_work.py b/tests/tests_app/core/test_lightning_work.py index 6cbe8018c033f..b5a7d7a96e0c8 100644 --- a/tests/tests_app/core/test_lightning_work.py +++ b/tests/tests_app/core/test_lightning_work.py @@ -1,4 +1,3 @@ -import sys from queue import Empty from re import escape from unittest.mock import MagicMock, Mock @@ -254,8 +253,7 @@ def run(self): assert work.path == work.lit_path -@pytest.mark.skipif(sys.platform == "win32", reason="Timeout") -@pytest.mark.xfail(strict=False, reason="No idea why... need to be fixed") # fixme +@pytest.mark.skip(reason="Timeout") # fixme def test_work_state_change_with_path(): """Test that type changes to a Path attribute are properly reflected within the state.""" diff --git a/tests/tests_app/runners/test_multiprocess.py b/tests/tests_app/runners/test_multiprocess.py index a2cbb714e0495..36abb8f1ba450 100644 --- a/tests/tests_app/runners/test_multiprocess.py +++ b/tests/tests_app/runners/test_multiprocess.py @@ -48,8 +48,7 @@ def run(self): self.stop() -@pytest.mark.skipif(sys.platform in ("linux", "win32"), reason="hanging with timeout") # fixme -@pytest.mark.xfail(sys.platform == "darwin", strict=False, reason="failing need to be fixed") # fixme +@pytest.mark.skip(reason="hanging with timeout") # fixme @pytest.mark.parametrize( "cloudspace_host, port, expected_host, expected_target", [ @@ -104,8 +103,7 @@ def run(self): self.stop() -@pytest.mark.skipif(sys.platform == "win32", reason="hanging with timeout") # fixme -@pytest.mark.xfail(sys.platform in ("linux", "darwin"), strict=False, reason="missing output... need to be fixed") +@pytest.mark.skip(reason="hanging with timeout") # fixme def test_multiprocess_runtime_sets_context(): """Test that the runtime sets the global variable COMPONENT_CONTEXT in Flow and Work.""" MultiProcessRuntime(LightningApp(ContextFlow())).dispatch() From 0fd5fae230fcb3b6d44e3b6aab21d53c3eb37541 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Carlos=20Mochol=C3=AD?= Date: Tue, 25 Apr 2023 01:31:47 +0200 Subject: [PATCH 11/93] Install project specific dependencies (#17376) Co-authored-by: Jirka Borovec <6035284+Borda@users.noreply.github.com> (cherry picked from commit 039891fb4930de39cc159ad278eb840433ab5053) --- .azure/gpu-tests-fabric.yml | 6 ++++-- .azure/gpu-tests-pytorch.yml | 6 ++++-- .github/workflows/ci-examples-app.yml | 3 ++- .github/workflows/ci-tests-app.yml | 3 ++- .github/workflows/ci-tests-fabric.yml | 3 ++- .github/workflows/ci-tests-pytorch.yml | 4 ++-- .github/workflows/code-checks.yml | 3 +-- requirements/fabric/examples.txt | 3 +-- requirements/fabric/strategies.txt | 2 +- requirements/pytorch/examples.txt | 3 +-- requirements/pytorch/strategies.txt | 3 +-- requirements/typing.txt | 2 ++ src/lightning/__setup__.py | 17 ++++++++++++----- src/lightning_app/__setup__.py | 4 ++-- src/lightning_fabric/__setup__.py | 4 ++-- src/pytorch_lightning/__setup__.py | 4 ++-- 16 files changed, 41 insertions(+), 29 deletions(-) diff --git a/.azure/gpu-tests-fabric.yml b/.azure/gpu-tests-fabric.yml index 64088cc255f9c..2c6699c3a9b31 100644 --- a/.azure/gpu-tests-fabric.yml +++ b/.azure/gpu-tests-fabric.yml @@ -71,7 +71,7 @@ jobs: cuda_ver=$(python -c "import torch ; print(''.join(map(str, torch.version.cuda.split('.')[:2])))") echo "##vso[task.setvariable variable=CUDA_VERSION_MM]$cuda_ver" echo "##vso[task.setvariable variable=TORCH_URL]https://download.pytorch.org/whl/cu${cuda_ver}/torch_stable.html" - scope=$( python -c 'n = "$(PACKAGE_NAME)" ; print(dict(fabric="lightning_fabric").get(n, n))' ) + scope=$(python -c 'n = "$(PACKAGE_NAME)" ; print(dict(fabric="lightning_fabric").get(n, n))') echo "##vso[task.setvariable variable=COVERAGE_SOURCE]$scope" displayName: 'set env. vars' @@ -97,7 +97,9 @@ jobs: done displayName: 'Adjust dependencies' - - bash: pip install -e .[dev,strategies,examples] -U --find-links ${TORCH_URL} + - bash: | + extra=$(python -c "print({'lightning': 'fabric-'}.get('$(PACKAGE_NAME)', ''))") + pip install -e ".[${extra}dev]" pytest-timeout -U --find-links ${TORCH_URL} displayName: 'Install package & dependencies' - bash: | diff --git a/.azure/gpu-tests-pytorch.yml b/.azure/gpu-tests-pytorch.yml index 6d17e19e6dda5..66ef3db4072c5 100644 --- a/.azure/gpu-tests-pytorch.yml +++ b/.azure/gpu-tests-pytorch.yml @@ -82,7 +82,7 @@ jobs: cuda_ver=$(python -c "import torch ; print(''.join(map(str, torch.version.cuda.split('.')[:2])))") echo "##vso[task.setvariable variable=CUDA_VERSION_MM]$cuda_ver" echo "##vso[task.setvariable variable=TORCH_URL]https://download.pytorch.org/whl/cu${cuda_ver}/torch_stable.html" - scope=$( python -c 'n = "$(PACKAGE_NAME)" ; print(dict(pytorch="pytorch_lightning").get(n, n))' ) + scope=$(python -c 'n = "$(PACKAGE_NAME)" ; print(dict(pytorch="pytorch_lightning").get(n, n))') echo "##vso[task.setvariable variable=COVERAGE_SOURCE]$scope" displayName: 'set env. vars' @@ -108,7 +108,9 @@ jobs: done displayName: 'Adjust dependencies' - - bash: pip install -e .[extra,test,examples] -U --find-links ${TORCH_URL} + - bash: | + extra=$(python -c "print({'lightning': 'pytorch-'}.get('$(PACKAGE_NAME)', ''))") + pip install -e ".[${extra}extra,${extra}test,${extra}examples]" pytest-timeout -U --find-links ${TORCH_URL} displayName: 'Install package & dependencies' - bash: | diff --git a/.github/workflows/ci-examples-app.yml b/.github/workflows/ci-examples-app.yml index 76c930b093357..f8941add768c9 100644 --- a/.github/workflows/ci-examples-app.yml +++ b/.github/workflows/ci-examples-app.yml @@ -74,8 +74,9 @@ jobs: - name: Install Lightning package & dependencies run: | + extra=$(python -c "print({'lightning': 'app-'}.get('${{ matrix.pkg-name }}', ''))") # do not use `-e` because it will make both packages available since it adds `src` to `sys.path` automatically - pip install .[dev] -U -f ${TORCH_URL} -f ${PYPI_CACHE_DIR} --prefer-binary + pip install ".[${extra}dev]" -U -f ${TORCH_URL} -f ${PYPI_CACHE_DIR} --prefer-binary pip list - name: Dump handy wheels if: github.event_name == 'push' && github.ref == 'refs/heads/master' diff --git a/.github/workflows/ci-tests-app.yml b/.github/workflows/ci-tests-app.yml index 2d77eba0b027e..4342044474c0e 100644 --- a/.github/workflows/ci-tests-app.yml +++ b/.github/workflows/ci-tests-app.yml @@ -81,7 +81,8 @@ jobs: - name: Install package & dependencies run: | python -m pip install -q pip -U - pip install -e .[dev] -U -f ${TORCH_URL} -f ${PYPI_CACHE_DIR} --prefer-binary + extra=$(python -c "print({'lightning': 'app-'}.get('${{ matrix.pkg-name }}', ''))") + pip install -e ".[${extra}dev]" -U -f ${TORCH_URL} -f ${PYPI_CACHE_DIR} --prefer-binary pip list - name: Dump handy wheels if: github.event_name == 'push' && github.ref == 'refs/heads/master' diff --git a/.github/workflows/ci-tests-fabric.yml b/.github/workflows/ci-tests-fabric.yml index f03c7ca8b4e65..8db2de1f8afc6 100644 --- a/.github/workflows/ci-tests-fabric.yml +++ b/.github/workflows/ci-tests-fabric.yml @@ -109,7 +109,8 @@ jobs: - name: Install package & dependencies run: | python -m pip install -q pip -U - pip install -e .[test] "pytest-timeout" -U -f ${TORCH_URL} ${TORCH_PREINSTALL} -f ${PYPI_CACHE_DIR} --prefer-binary + extra=$(python -c "print({'lightning': 'fabric-'}.get('${{ matrix.pkg-name }}', ''))") + pip install -e ".[${extra}test]" "pytest-timeout" -U -f ${TORCH_URL} ${TORCH_PREINSTALL} -f ${PYPI_CACHE_DIR} --prefer-binary pip install -r requirements/fabric/strategies.txt -f ${PYPI_CACHE_DIR} --prefer-binary pip list - name: Dump handy wheels diff --git a/.github/workflows/ci-tests-pytorch.yml b/.github/workflows/ci-tests-pytorch.yml index f999cdc9dfe01..34de1b14f649b 100644 --- a/.github/workflows/ci-tests-pytorch.yml +++ b/.github/workflows/ci-tests-pytorch.yml @@ -116,8 +116,8 @@ jobs: - name: Install package & dependencies run: | python -m pip install -q pip -U - pip install .[extra,test] -U \ - "pytest-timeout" \ + extra=$(python -c "print({'lightning': 'pytorch-'}.get('${{ matrix.pkg-name }}', ''))") + pip install ".[${extra}extra,${extra}test]" -U "pytest-timeout" \ -f ${TORCH_URL} ${TORCH_PREINSTALL} -f ${PYPI_CACHE_DIR} --prefer-binary pip list - name: Dump handy wheels diff --git a/.github/workflows/code-checks.yml b/.github/workflows/code-checks.yml index 3cc066fb380d9..2c3abccda6b7b 100644 --- a/.github/workflows/code-checks.yml +++ b/.github/workflows/code-checks.yml @@ -44,8 +44,7 @@ jobs: env: FREEZE_REQUIREMENTS: 1 run: | - # todo: adjust requirements for both code-bases - pip install -e '.[extra,ui,cloud]' -r requirements/typing.txt + pip install -e '.[all]' -r requirements/typing.txt pip list - name: Check typing diff --git a/requirements/fabric/examples.txt b/requirements/fabric/examples.txt index e78b3af71785e..1d72c99e2e2b5 100644 --- a/requirements/fabric/examples.txt +++ b/requirements/fabric/examples.txt @@ -1,6 +1,5 @@ # NOTE: the upper bound for the package version is only set for CI stability, and it is dropped while installing this package # in case you want to preserve/enforce restrictions on the latest compatible version, add "strict" as an in-line comment - -torchvision >=0.10.0, <=0.14.1 +torchvision >=0.12.0, <=0.15.1 torchmetrics >=0.10.0, <0.12.0 lightning-utilities >=0.8.0, <0.9.0 diff --git a/requirements/fabric/strategies.txt b/requirements/fabric/strategies.txt index b1183042d4cbf..f5f689f7cb2cb 100644 --- a/requirements/fabric/strategies.txt +++ b/requirements/fabric/strategies.txt @@ -1,3 +1,3 @@ # NOTE: the upper bound for the package version is only set for CI stability, and it is dropped while installing this package # in case you want to preserve/enforce restrictions on the latest compatible version, add "strict" as an in-line comment -deepspeed >=0.6.0, <=0.8.0; platform_system != "Windows" +deepspeed >=0.6.3, !=0.7.0, <=0.8.0; platform_system != "Windows" diff --git a/requirements/pytorch/examples.txt b/requirements/pytorch/examples.txt index 1c011bc13bac8..f4bf7feca9e98 100644 --- a/requirements/pytorch/examples.txt +++ b/requirements/pytorch/examples.txt @@ -1,7 +1,6 @@ # NOTE: the upper bound for the package version is only set for CI stability, and it is dropped while installing this package # in case you want to preserve/enforce restrictions on the latest compatible version, add "strict" as an in-line comment - -torchvision >=0.11.1, <=0.14.1 +torchvision >=0.12.0, <=0.15.1 gym[classic_control] >=0.17.0, <0.26.3 ipython[all] <8.7.1 torchmetrics >=0.10.0, <0.12.0 diff --git a/requirements/pytorch/strategies.txt b/requirements/pytorch/strategies.txt index dca9c68e37f5c..5dbc533d48505 100644 --- a/requirements/pytorch/strategies.txt +++ b/requirements/pytorch/strategies.txt @@ -1,4 +1,3 @@ # NOTE: the upper bound for the package version is only set for CI stability, and it is dropped while installing this package # in case you want to preserve/enforce restrictions on the latest compatible version, add "strict" as an in-line comment - -deepspeed >=0.6.0, <0.8.0 # TODO: Include 0.8.x after https://github.com/microsoft/DeepSpeed/commit/b587c7e85470329ac25df7c7c2521ff9b2833db7 gets released +deepspeed >=0.6.3, !=0.7.0, <0.8.0; platform_system != "Windows" # TODO: Include 0.8.x after https://github.com/microsoft/DeepSpeed/commit/b587c7e85470329ac25df7c7c2521ff9b2833db7 gets released diff --git a/requirements/typing.txt b/requirements/typing.txt index deeea613ac3de..5ecabf432b776 100644 --- a/requirements/typing.txt +++ b/requirements/typing.txt @@ -6,12 +6,14 @@ types-PyYAML types-bleach types-cachetools types-croniter +types-paramiko types-protobuf types-python-dateutil types-redis types-requests types-setuptools types-six +types-tabulate types-toml types-tzlocal types-ujson diff --git a/src/lightning/__setup__.py b/src/lightning/__setup__.py index bac2a0ede5870..64ba78bbafe0a 100644 --- a/src/lightning/__setup__.py +++ b/src/lightning/__setup__.py @@ -29,21 +29,28 @@ def _load_py_module(name: str, location: str) -> ModuleType: def _prepare_extras() -> Dict[str, Any]: # https://setuptools.readthedocs.io/en/latest/setuptools.html#declaring-extras # Define package extras. These are only installed if you specify them. - # From remote, use like `pip install pytorch-lightning[dev, docs]` + # From remote, use like `pip install "lightning[dev, docs]"` # From local copy of repo, use like `pip install ".[dev, docs]"` req_files = [Path(p) for p in glob.glob(os.path.join(_PATH_REQUIREMENTS, "*", "*.txt"))] common_args = {"unfreeze": "none" if _FREEZE_REQUIREMENTS else "major"} + # per-project extras extras = { f"{p.parent.name}-{p.stem}": _ASSISTANT.load_requirements(file_name=p.name, path_dir=p.parent, **common_args) for p in req_files - if p.name not in ("docs.txt", "devel.txt", "base.txt") and not p.parts[-2].startswith("_") + if p.name not in ("docs.txt", "devel.txt", "base.txt") and not p.parent.name.startswith("_") } + # project specific extras groups + extras["fabric-all"] = extras["fabric-strategies"] + extras["fabric-examples"] + extras["fabric-dev"] = extras["fabric-all"] + extras["fabric-test"] + extras["pytorch-all"] = extras["pytorch-extra"] + extras["pytorch-strategies"] + extras["pytorch-examples"] + extras["pytorch-dev"] = extras["pytorch-all"] + extras["pytorch-test"] + extras["app-extra"] = extras["app-cloud"] + extras["app-ui"] + extras["app-components"] + extras["app-all"] = extras["app-extra"] + extras["app-dev"] = extras["app-all"] + extras["app-test"] + # merge per-project extras of the same category, e.g. `app-test` + `fabric-test` for extra in list(extras): name = "-".join(extra.split("-")[1:]) extras[name] = extras.get(name, []) + extras[extra] - extras["extra"] += extras["cloud"] + extras["ui"] + extras["components"] - extras["all"] = extras["extra"] - extras["dev"] = extras["all"] + extras["test"] # + extras['docs'] extras = {name: sorted(set(reqs)) for name, reqs in extras.items()} print("The extras are: ", extras) return extras diff --git a/src/lightning_app/__setup__.py b/src/lightning_app/__setup__.py index 6732eaecc7406..4fd6b36c2a819 100644 --- a/src/lightning_app/__setup__.py +++ b/src/lightning_app/__setup__.py @@ -32,8 +32,8 @@ def _prepare_extras() -> Dict[str, Any]: assistant = _load_assistant() # https://setuptools.readthedocs.io/en/latest/setuptools.html#declaring-extras # Define package extras. These are only installed if you specify them. - # From remote, use like `pip install pytorch-lightning[dev, docs]` - # From local copy of repo, use like `pip install ".[dev, docs]"` + # From remote, use like `pip install "pytorch-lightning[dev, docs]"` + # From local copy of repo, use like `PACKAGE_NAME=app pip install ".[dev, docs]"` req_files = [Path(p) for p in glob.glob(os.path.join(_PATH_REQUIREMENTS, "*.txt"))] common_args = {"path_dir": _PATH_REQUIREMENTS, "unfreeze": "none" if _FREEZE_REQUIREMENTS else "major"} extras = { diff --git a/src/lightning_fabric/__setup__.py b/src/lightning_fabric/__setup__.py index 5bcca9779c448..055e75ca0c199 100644 --- a/src/lightning_fabric/__setup__.py +++ b/src/lightning_fabric/__setup__.py @@ -33,8 +33,8 @@ def _prepare_extras() -> Dict[str, Any]: assistant = _load_assistant() # https://setuptools.readthedocs.io/en/latest/setuptools.html#declaring-extras # Define package extras. These are only installed if you specify them. - # From remote, use like `pip install pytorch-lightning[dev, docs]` - # From local copy of repo, use like `pip install ".[dev, docs]"` + # From remote, use like `pip install "lightning-fabric[dev, docs]"` + # From local copy of repo, use like `PACKAGE_NAME=fabric pip install ".[dev, docs]"` common_args = {"path_dir": _PATH_REQUIREMENTS, "unfreeze": "none" if _FREEZE_REQUIREMENTS else "all"} req_files = [Path(p) for p in glob.glob(os.path.join(_PATH_REQUIREMENTS, "*.txt"))] extras = { diff --git a/src/pytorch_lightning/__setup__.py b/src/pytorch_lightning/__setup__.py index 251adad8b75d2..8835e3ac65c51 100644 --- a/src/pytorch_lightning/__setup__.py +++ b/src/pytorch_lightning/__setup__.py @@ -33,8 +33,8 @@ def _prepare_extras() -> Dict[str, Any]: assistant = _load_assistant() # https://setuptools.readthedocs.io/en/latest/setuptools.html#declaring-extras # Define package extras. These are only installed if you specify them. - # From remote, use like `pip install pytorch-lightning[dev, docs]` - # From local copy of repo, use like `pip install ".[dev, docs]"` + # From remote, use like `pip install "pytorch-lightning[dev, docs]"` + # From local copy of repo, use like `PACKAGE_NAME=pytorch pip install ".[dev, docs]"` common_args = {"path_dir": _PATH_REQUIREMENTS, "unfreeze": "none" if _FREEZE_REQUIREMENTS else "all"} req_files = [Path(p) for p in glob.glob(os.path.join(_PATH_REQUIREMENTS, "*.txt"))] extras = { From abfc286f03be09593fab4bfdac91437482072f0c Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Carlos=20Mochol=C3=AD?= Date: Tue, 25 Apr 2023 03:09:57 +0200 Subject: [PATCH 12/93] Simplify strategy installation in CI (#17347) (cherry picked from commit 9627121da7ae9d3168fcc4f763eed99edabb7acc) --- .azure/gpu-benchmark.yml | 2 -- .azure/gpu-tests-fabric.yml | 2 -- .azure/gpu-tests-pytorch.yml | 29 ++++--------------- dockers/base-cuda/Dockerfile | 26 ++++------------- .../pytorch/check-avail-strategies.py | 2 -- 5 files changed, 12 insertions(+), 49 deletions(-) delete mode 100644 requirements/pytorch/check-avail-strategies.py diff --git a/.azure/gpu-benchmark.yml b/.azure/gpu-benchmark.yml index 34942c8928f53..f9580fb595e59 100644 --- a/.azure/gpu-benchmark.yml +++ b/.azure/gpu-benchmark.yml @@ -59,7 +59,6 @@ jobs: - bash: | echo $CUDA_VISIBLE_DEVICES echo $TORCH_URL - lspci | egrep 'VGA|3D' whereis nvidia nvidia-smi which python && which pip @@ -76,7 +75,6 @@ jobs: - bash: | set -e - pip list python requirements/collect_env_details.py python -c "import torch ; mgpu = torch.cuda.device_count() ; assert mgpu == 2, f'GPU: {mgpu}'" displayName: 'Env details' diff --git a/.azure/gpu-tests-fabric.yml b/.azure/gpu-tests-fabric.yml index 2c6699c3a9b31..c4efcf3383520 100644 --- a/.azure/gpu-tests-fabric.yml +++ b/.azure/gpu-tests-fabric.yml @@ -81,7 +81,6 @@ jobs: echo $CUDA_VERSION_MM echo $TORCH_URL echo $COVERAGE_SOURCE - lspci | egrep 'VGA|3D' whereis nvidia nvidia-smi which python && which pip @@ -104,7 +103,6 @@ jobs: - bash: | set -e - pip list python requirements/collect_env_details.py python -c "import torch ; mgpu = torch.cuda.device_count() ; assert mgpu == 2, f'GPU: {mgpu}'" displayName: 'Env details' diff --git a/.azure/gpu-tests-pytorch.yml b/.azure/gpu-tests-pytorch.yml index 66ef3db4072c5..5df5dad4b4179 100644 --- a/.azure/gpu-tests-pytorch.yml +++ b/.azure/gpu-tests-pytorch.yml @@ -51,10 +51,6 @@ jobs: cancelTimeoutInMinutes: "2" strategy: matrix: - 'PyTorch & strategies': # this uses torch 1.12 as not all strategies support 1.13 yet - image: "pytorchlightning/pytorch_lightning:base-cuda-py3.9-torch1.12-cuda11.6.1" - scope: "strategies" - PACKAGE_NAME: "pytorch" 'PyTorch | latest': image: "pytorchlightning/pytorch_lightning:base-cuda-py3.10-torch2.0-cuda11.7.1" scope: "" @@ -92,7 +88,6 @@ jobs: echo $CUDA_VERSION_MM echo $TORCH_URL echo $COVERAGE_SOURCE - lspci | egrep 'VGA|3D' whereis nvidia nvidia-smi which python && which pip @@ -106,31 +101,20 @@ jobs: for fpath in `ls requirements/**/*.txt`; do \ python ./requirements/pytorch/adjust-versions.py $fpath ${PYTORCH_VERSION}; \ done + # prune packages with installation issues + pip install -q -r .actions/requirements.txt + python .actions/assistant.py requirements_prune_pkgs \ + --packages="[lightning-colossalai,lightning-bagua]" \ + --req_files="[requirements/_integrations/strategies.txt]" displayName: 'Adjust dependencies' - bash: | extra=$(python -c "print({'lightning': 'pytorch-'}.get('$(PACKAGE_NAME)', ''))") - pip install -e ".[${extra}extra,${extra}test,${extra}examples]" pytest-timeout -U --find-links ${TORCH_URL} + pip install -e ".[${extra}dev]" -r requirements/_integrations/strategies.txt pytest-timeout -U --find-links ${TORCH_URL} displayName: 'Install package & dependencies' - - bash: | - pip uninstall -y -r requirements/pytorch/strategies.txt \ - -r requirements/_integrations/strategies.txt - condition: ne(variables['scope'], 'strategies') - displayName: 'Uninstall strategies' - - bash: | set -e - pip install -r requirements/pytorch/strategies.txt \ - -r requirements/_integrations/strategies.txt \ - --find-links ${TORCH_URL} - python requirements/pytorch/check-avail-strategies.py - condition: eq(variables['scope'], 'strategies') - displayName: 'Install strategies' - - - bash: | - set -e - pip list python requirements/collect_env_details.py python -c "import torch ; mgpu = torch.cuda.device_count() ; assert mgpu == 2, f'GPU: {mgpu}'" python requirements/pytorch/check-avail-extras.py @@ -145,7 +129,6 @@ jobs: displayName: 'Testing: PyTorch doctests' - bash: | - pip install -q -r .actions/requirements.txt python .actions/assistant.py copy_replace_imports --source_dir="./tests/tests_pytorch" \ --source_import="lightning.fabric,lightning.pytorch" \ --target_import="lightning_fabric,pytorch_lightning" diff --git a/dockers/base-cuda/Dockerfile b/dockers/base-cuda/Dockerfile index 9a12bb8c55eea..6c83762fad9ca 100644 --- a/dockers/base-cuda/Dockerfile +++ b/dockers/base-cuda/Dockerfile @@ -76,8 +76,8 @@ RUN \ rm -rf /root/.cache && \ rm -rf /var/lib/apt/lists/* -COPY ./requirements/pytorch/ ./requirements/pytorch/ -COPY ./.actions/assistant.py assistant.py +COPY requirements/pytorch/ requirements/pytorch/ +COPY requirements/_integrations/ requirements/_integrations/ ENV PYTHONPATH="/usr/lib/python${PYTHON_VERSION}/site-packages" @@ -85,31 +85,17 @@ RUN \ wget https://bootstrap.pypa.io/get-pip.py --progress=bar:force:noscroll --no-check-certificate && \ python${PYTHON_VERSION} get-pip.py && \ rm get-pip.py && \ - pip install -q fire && \ # Disable cache \ - export CUDA_VERSION_MM=$(python -c "print(''.join('$CUDA_VERSION'.split('.')[:2]))") && \ pip config set global.cache-dir false && \ # set particular PyTorch version \ for fpath in `ls requirements/**/*.txt`; do \ python ./requirements/pytorch/adjust-versions.py $fpath ${PYTORCH_VERSION}; \ done && \ - - rm assistant.py && \ - # Install base requirements \ - CUDA_VERSION_MM=${CUDA_VERSION%.*} && \ - pip install -r requirements/pytorch/base.txt \ - --no-cache-dir --find-links "https://download.pytorch.org/whl/cu${CUDA_VERSION_MM//'.'/''}/torch_stable.html" - - -RUN \ - # install rest of strategies CUDA_VERSION_MM=${CUDA_VERSION%.*} && \ - cat requirements/pytorch/strategies.txt && \ - pip install -r requirements/pytorch/devel.txt -r requirements/pytorch/strategies.txt \ - --no-cache-dir --find-links "https://download.pytorch.org/whl/cu${CUDA_VERSION_MM//'.'/''}/torch_stable.html" - -COPY requirements/pytorch/check-avail-extras.py check-avail-extras.py -COPY requirements/pytorch/check-avail-strategies.py check-avail-strategies.py + pip install \ + -r requirements/pytorch/devel.txt \ + -r requirements/pytorch/strategies.txt \ + --find-links "https://download.pytorch.org/whl/cu${CUDA_VERSION_MM//'.'/''}/torch_stable.html" RUN \ # Show what we have diff --git a/requirements/pytorch/check-avail-strategies.py b/requirements/pytorch/check-avail-strategies.py deleted file mode 100644 index af7fee95ccd08..0000000000000 --- a/requirements/pytorch/check-avail-strategies.py +++ /dev/null @@ -1,2 +0,0 @@ -if __name__ == "__main__": - import deepspeed # noqa: F401 From 88be8cb35c19488d6ae23d2b8f8854930c8dbc27 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Tue, 25 Apr 2023 17:26:49 +0200 Subject: [PATCH 13/93] Update deepspeed requirement support window (#16813) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Co-authored-by: Carlos Mocholí Co-authored-by: Jirka (cherry picked from commit b792c90ea7148d61af192fde6c338ebbd355702f) --- requirements/fabric/strategies.txt | 2 +- requirements/pytorch/strategies.txt | 2 +- src/lightning/fabric/strategies/deepspeed.py | 13 +------------ src/lightning/fabric/wrappers.py | 15 +-------------- tests/tests_fabric/test_wrappers.py | 4 ---- .../strategies/test_deepspeed_strategy.py | 4 +++- 6 files changed, 7 insertions(+), 33 deletions(-) diff --git a/requirements/fabric/strategies.txt b/requirements/fabric/strategies.txt index f5f689f7cb2cb..25444361719b0 100644 --- a/requirements/fabric/strategies.txt +++ b/requirements/fabric/strategies.txt @@ -1,3 +1,3 @@ # NOTE: the upper bound for the package version is only set for CI stability, and it is dropped while installing this package # in case you want to preserve/enforce restrictions on the latest compatible version, add "strict" as an in-line comment -deepspeed >=0.6.3, !=0.7.0, <=0.8.0; platform_system != "Windows" +deepspeed >=0.8.2, <=0.9.1; platform_system != "Windows" diff --git a/requirements/pytorch/strategies.txt b/requirements/pytorch/strategies.txt index 5dbc533d48505..25444361719b0 100644 --- a/requirements/pytorch/strategies.txt +++ b/requirements/pytorch/strategies.txt @@ -1,3 +1,3 @@ # NOTE: the upper bound for the package version is only set for CI stability, and it is dropped while installing this package # in case you want to preserve/enforce restrictions on the latest compatible version, add "strict" as an in-line comment -deepspeed >=0.6.3, !=0.7.0, <0.8.0; platform_system != "Windows" # TODO: Include 0.8.x after https://github.com/microsoft/DeepSpeed/commit/b587c7e85470329ac25df7c7c2521ff9b2833db7 gets released +deepspeed >=0.8.2, <=0.9.1; platform_system != "Windows" diff --git a/src/lightning/fabric/strategies/deepspeed.py b/src/lightning/fabric/strategies/deepspeed.py index ed040f106901c..34a0c68e775f3 100644 --- a/src/lightning/fabric/strategies/deepspeed.py +++ b/src/lightning/fabric/strategies/deepspeed.py @@ -32,22 +32,11 @@ from lightning.fabric.strategies.ddp import DDPStrategy from lightning.fabric.strategies.strategy import _Sharded from lightning.fabric.utilities.distributed import log -from lightning.fabric.utilities.imports import _TORCH_GREATER_EQUAL_2_0 from lightning.fabric.utilities.rank_zero import rank_zero_info, rank_zero_only, rank_zero_warn from lightning.fabric.utilities.seed import reset_seed from lightning.fabric.utilities.types import _PATH -_DEEPSPEED_AVAILABLE = ( - # DeepSpeed fails under 0.8.2 with torch 2.0: https://github.com/microsoft/DeepSpeed/pull/2863 - RequirementCache("deepspeed>=0.8.2") - or ( - not _TORCH_GREATER_EQUAL_2_0 - and RequirementCache("deepspeed") - # check packaging because of https://github.com/microsoft/DeepSpeed/pull/2771 - # remove the packaging check when min version is >=0.8.1 - and RequirementCache("packaging>=20.0") - ) -) +_DEEPSPEED_AVAILABLE = RequirementCache("deepspeed") if TYPE_CHECKING and _DEEPSPEED_AVAILABLE: import deepspeed diff --git a/src/lightning/fabric/wrappers.py b/src/lightning/fabric/wrappers.py index 74889328b68fc..96ee7aedea205 100644 --- a/src/lightning/fabric/wrappers.py +++ b/src/lightning/fabric/wrappers.py @@ -49,9 +49,7 @@ def __init__(self, optimizer: Optimizer, strategy: Strategy) -> None: """ # `__del__` is skipped in case the optimizer has implemented custom destructor logic which we would # not want to call on destruction of the `_FabricOptimizer - self.__dict__ = { - k: v for k, v in optimizer.__dict__.items() if k not in ("state_dict", "step", "zero_grad", "__del__") - } + self.__dict__ = {k: v for k, v in optimizer.__dict__.items() if k not in ("state_dict", "step", "__del__")} self.__class__ = type("Fabric" + optimizer.__class__.__name__, (self.__class__, optimizer.__class__), {}) self._optimizer = optimizer self._strategy = strategy @@ -75,10 +73,6 @@ def step(self, closure: Optional[Callable] = None) -> Any: **kwargs, ) - def zero_grad(self, **kwargs: Any) -> None: - kwargs = _process_optimizer_zero_grad_kwargs(self.optimizer, kwargs) - self.optimizer.zero_grad(**kwargs) - class _FabricModule(_DeviceDtypeModuleMixin): def __init__( @@ -220,13 +214,6 @@ def __iter__(self) -> Union[Iterator[Any], Generator[Any, None, None]]: yield move_data_to_device(item, self._device) -def _process_optimizer_zero_grad_kwargs(optimizer: Optimizer, kwargs: Dict[str, Any]) -> Dict[str, Any]: - if "set_to_none" in kwargs and "set_grads_to_None" in inspect.signature(optimizer.zero_grad).parameters: - # Some optimizers out there, for example DeepSpeedZeroOptimizer, use a different name than PyTorch - kwargs["set_grads_to_None"] = kwargs.pop("set_to_none") - return kwargs - - def _unwrap_objects(collection: Any) -> Any: def _unwrap( obj: Union[_FabricModule, _FabricOptimizer, _FabricDataLoader] diff --git a/tests/tests_fabric/test_wrappers.py b/tests/tests_fabric/test_wrappers.py index c5e0a69d6957a..2d9554251eaf1 100644 --- a/tests/tests_fabric/test_wrappers.py +++ b/tests/tests_fabric/test_wrappers.py @@ -358,10 +358,6 @@ def zero_grad(self, set_grads_to_None=False): fabric_optimizer = _FabricOptimizer(optimizer=optimizer, strategy=Mock()) fabric_optimizer.zero_grad() custom_zero_grad.assert_called_with(set_grads_to_None=False) - fabric_optimizer.zero_grad(set_to_none=False) - custom_zero_grad.assert_called_with(set_grads_to_None=False) - fabric_optimizer.zero_grad(set_to_none=True) - custom_zero_grad.assert_called_with(set_grads_to_None=True) def test_is_wrapped(): diff --git a/tests/tests_pytorch/strategies/test_deepspeed_strategy.py b/tests/tests_pytorch/strategies/test_deepspeed_strategy.py index 152319efba8be..f4ba5952cd812 100644 --- a/tests/tests_pytorch/strategies/test_deepspeed_strategy.py +++ b/tests/tests_pytorch/strategies/test_deepspeed_strategy.py @@ -847,13 +847,15 @@ def on_train_batch_start(self, trainer, pl_module: LightningModule, batch: Any, model = ModelParallelClassificationModel() dm = ClassifDataModule() verification_callback = VerificationCallback() + strategy = DeepSpeedStrategy(stage=2, offload_optimizer=offload_optimizer) + strategy.config["zero_force_ds_cpu_optimizer"] = False trainer = Trainer( default_root_dir=tmpdir, # TODO: this test fails with max_epochs >1 as there are leftover batches per epoch. # there's divergence in how Lightning handles the last batch of the epoch with how DeepSpeed does it. # we step the optimizers on the last batch but DeepSpeed keeps the accumulation for the next epoch max_epochs=1, - strategy=DeepSpeedStrategy(stage=2, offload_optimizer=offload_optimizer), + strategy=strategy, accelerator="gpu", devices=2, limit_train_batches=5, From da3a102f3357b45ee892f7effd1a27c443dd2a62 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Carlos=20Mochol=C3=AD?= Date: Tue, 25 Apr 2023 17:46:28 +0200 Subject: [PATCH 14/93] Fallback to module available check for mlflow (#17467) (cherry picked from commit b0af0eede27531a3d7d9940b8e6ed205b03e6218) --- src/lightning/pytorch/loggers/mlflow.py | 11 ++++------- 1 file changed, 4 insertions(+), 7 deletions(-) diff --git a/src/lightning/pytorch/loggers/mlflow.py b/src/lightning/pytorch/loggers/mlflow.py index 99d620958f107..337d92bf85674 100644 --- a/src/lightning/pytorch/loggers/mlflow.py +++ b/src/lightning/pytorch/loggers/mlflow.py @@ -25,7 +25,7 @@ from typing import Any, Dict, List, Literal, Mapping, Optional, Union import yaml -from lightning_utilities.core.imports import RequirementCache +from lightning_utilities.core.imports import module_available, RequirementCache from torch import Tensor from lightning.fabric.utilities.logger import _add_prefix, _convert_params, _flatten_dict @@ -36,9 +36,8 @@ log = logging.getLogger(__name__) LOCAL_FILE_URI_PREFIX = "file:" -_MLFLOW_FULL_AVAILABLE = RequirementCache("mlflow>=1.0.0") -_MLFLOW_SKINNY_AVAILABLE = RequirementCache("mlflow-skinny>=1.0.0") -_MLFLOW_AVAILABLE = _MLFLOW_FULL_AVAILABLE or _MLFLOW_SKINNY_AVAILABLE +_MLFLOW_AVAILABLE = RequirementCache("mlflow>=1.0.0") or module_available("mlflow") + if _MLFLOW_AVAILABLE: from mlflow.entities import Metric, Param from mlflow.tracking import context, MlflowClient @@ -149,9 +148,7 @@ def __init__( run_id: Optional[str] = None, ): if not _MLFLOW_AVAILABLE: - raise ModuleNotFoundError( - f"{_MLFLOW_FULL_AVAILABLE!s}. You can also try {_MLFLOW_SKINNY_AVAILABLE.requirement!r}" - ) + raise ModuleNotFoundError(str(_MLFLOW_AVAILABLE)) super().__init__() if not tracking_uri: tracking_uri = f"{LOCAL_FILE_URI_PREFIX}{save_dir}" From 276d4b4e27739d324d0b0d1f53b635ea40352bfc Mon Sep 17 00:00:00 2001 From: "pre-commit-ci[bot]" <66853113+pre-commit-ci[bot]@users.noreply.github.com> Date: Wed, 26 Apr 2023 21:37:41 +0200 Subject: [PATCH 15/93] [pre-commit.ci] pre-commit suggestions (#17271) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * [pre-commit.ci] pre-commit suggestions updates: - [github.com/PyCQA/docformatter: v1.4 → v1.6.0](https://github.com/PyCQA/docformatter/compare/v1.4...v1.6.0) - [github.com/psf/black: 22.12.0 → 23.3.0](https://github.com/psf/black/compare/22.12.0...23.3.0) - [github.com/charliermarsh/ruff-pre-commit: v0.0.237 → v0.0.260](https://github.com/charliermarsh/ruff-pre-commit/compare/v0.0.237...v0.0.260) * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * update * apply * fixing * docs/lines --------- Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> Co-authored-by: Jirka Borovec <6035284+Borda@users.noreply.github.com> Co-authored-by: Jirka (cherry picked from commit 91cb4b9b87bdd59b0322dfad094e66a06ece4ff0) --- .pre-commit-config.yaml | 6 +-- .../examples/dag/dag_from_scratch.rst | 2 +- examples/app/boring/app_dynamic.py | 1 - examples/app/boring/scripts/serve.py | 1 - examples/app/dag/app.py | 1 - examples/fabric/build_your_own_trainer/run.py | 1 - .../fabric/build_your_own_trainer/trainer.py | 12 ++---- .../fabric/image_classifier/train_fabric.py | 2 - .../fabric/image_classifier/train_torch.py | 1 - examples/fabric/kfold_cv/train_fabric.py | 1 - .../computer_vision_fine_tuning.py | 2 - .../domain_templates/reinforce_learn_Qnet.py | 1 - .../domain_templates/reinforce_learn_ppo.py | 1 - src/lightning/app/cli/cmd_clusters.py | 1 - src/lightning/app/cli/cmd_install.py | 2 - src/lightning/app/cli/commands/cd.py | 2 - src/lightning/app/cli/commands/cp.py | 4 -- src/lightning/app/cli/commands/logs.py | 14 +++---- src/lightning/app/cli/commands/ls.py | 12 +----- src/lightning/app/cli/commands/pwd.py | 2 - src/lightning/app/cli/commands/rm.py | 2 - src/lightning/app/cli/connect/app.py | 2 - src/lightning/app/cli/connect/data.py | 2 - src/lightning/app/cli/lightning_cli.py | 23 ++++------- .../app/components/multi_node/base.py | 1 - .../components/multi_node/pytorch_spawn.py | 1 - src/lightning/app/core/app.py | 2 - src/lightning/app/core/flow.py | 3 -- src/lightning/app/core/queues.py | 1 - .../app/frontend/just_py/just_py_base.py | 2 +- .../app/frontend/panel/app_state_comm.py | 1 - .../app/frontend/panel/app_state_watcher.py | 1 - .../app/frontend/panel/panel_frontend.py | 1 - .../frontend/panel/panel_serve_render_fn.py | 1 - src/lightning/app/frontend/streamlit_base.py | 1 - src/lightning/app/frontend/utils.py | 1 - src/lightning/app/runners/cloud.py | 6 +-- src/lightning/app/storage/filesystem.py | 1 - src/lightning/app/storage/orchestrator.py | 1 - src/lightning/app/testing/testing.py | 2 - src/lightning/app/utilities/app_commands.py | 15 +++---- src/lightning/app/utilities/app_helpers.py | 1 - src/lightning/app/utilities/app_logs.py | 1 - src/lightning/app/utilities/cli_helpers.py | 2 - src/lightning/app/utilities/introspection.py | 38 ++++++++---------- src/lightning/app/utilities/load_app.py | 1 - .../app/utilities/packaging/tarfile.py | 1 - src/lightning/app/utilities/port.py | 1 - src/lightning/app/utilities/proxies.py | 1 - src/lightning/app/utilities/safe_pickle.py | 1 - src/lightning/app/utilities/tree.py | 1 - src/lightning/fabric/connector.py | 1 - src/lightning/fabric/fabric.py | 1 - src/lightning/fabric/loggers/csv_logs.py | 6 +-- src/lightning/fabric/loggers/tensorboard.py | 3 +- .../fabric/plugins/environments/slurm.py | 5 ++- src/lightning/fabric/plugins/precision/tpu.py | 1 - .../fabric/strategies/launchers/base.py | 3 +- .../fabric/strategies/launchers/launcher.py | 3 +- .../fabric/strategies/launchers/xla.py | 4 +- src/lightning/fabric/utilities/apply_func.py | 1 - src/lightning/fabric/utilities/rank_zero.py | 1 - src/lightning/pytorch/callbacks/callback.py | 14 ++----- src/lightning/pytorch/callbacks/checkpoint.py | 8 ++-- .../pytorch/callbacks/early_stopping.py | 8 +--- src/lightning/pytorch/callbacks/finetuning.py | 10 +---- .../gradient_accumulation_scheduler.py | 3 +- .../pytorch/callbacks/lambda_function.py | 8 +--- src/lightning/pytorch/callbacks/lr_monitor.py | 4 +- .../pytorch/callbacks/model_checkpoint.py | 1 - .../pytorch/callbacks/model_summary.py | 3 +- .../callbacks/progress/progress_bar.py | 18 ++++----- .../callbacks/progress/rich_progress.py | 1 - src/lightning/pytorch/callbacks/pruning.py | 8 +--- .../pytorch/callbacks/rich_model_summary.py | 6 +-- .../callbacks/stochastic_weight_avg.py | 10 +---- src/lightning/pytorch/callbacks/timer.py | 5 +-- src/lightning/pytorch/core/datamodule.py | 3 +- src/lightning/pytorch/core/hooks.py | 11 ++---- .../pytorch/core/mixins/hparams_mixin.py | 1 - src/lightning/pytorch/core/module.py | 39 +++++++------------ src/lightning/pytorch/core/saving.py | 1 - src/lightning/pytorch/loggers/comet.py | 5 +-- src/lightning/pytorch/loggers/csv_logs.py | 6 +-- src/lightning/pytorch/loggers/mlflow.py | 1 - src/lightning/pytorch/loggers/neptune.py | 6 +-- src/lightning/pytorch/loggers/tensorboard.py | 3 +- src/lightning/pytorch/loggers/utilities.py | 1 - src/lightning/pytorch/loggers/wandb.py | 4 +- .../pytorch/loops/optimization/automatic.py | 1 - .../pytorch/loops/optimization/closure.py | 4 +- .../pytorch/overrides/distributed.py | 4 +- src/lightning/pytorch/profilers/pytorch.py | 1 - src/lightning/pytorch/profilers/simple.py | 1 - src/lightning/pytorch/profilers/xla.py | 1 - .../pytorch/serve/servable_module.py | 4 +- src/lightning/pytorch/strategies/deepspeed.py | 1 - src/lightning/pytorch/strategies/fsdp.py | 1 - .../pytorch/strategies/hpu_parallel.py | 2 - .../strategies/launchers/subprocess_script.py | 3 +- .../pytorch/strategies/launchers/xla.py | 4 +- .../pytorch/strategies/single_hpu.py | 1 - src/lightning/pytorch/trainer/call.py | 5 +-- .../trainer/configuration_validator.py | 4 +- .../connectors/checkpoint_connector.py | 3 +- src/lightning/pytorch/trainer/trainer.py | 25 ++++-------- src/lightning/pytorch/utilities/parsing.py | 1 - .../integrations_app/flagship/test_flashy.py | 2 - .../local/test_core_features_app.py | 1 - tests/integrations_app/public/test_app_dag.py | 1 - .../public/test_commands_and_api.py | 1 - tests/integrations_app/public/test_drive.py | 1 - tests/integrations_app/public/test_gradio.py | 2 - tests/integrations_app/public/test_layout.py | 1 - tests/integrations_app/public/test_payload.py | 1 - .../public/test_pickle_or_not.py | 1 - .../public/test_quick_start.py | 1 - tests/integrations_app/public/test_scripts.py | 1 - tests/tests_app/cli/test_cd.py | 1 - tests/tests_app/cli/test_cloud_cli.py | 2 - tests/tests_app/cli/test_cmd_init.py | 1 - tests/tests_app/cli/test_cmd_install.py | 5 --- .../cli/test_cmd_show_cluster_logs.py | 1 - tests/tests_app/cli/test_cmd_show_logs.py | 1 - tests/tests_app/cli/test_connect_data.py | 2 - tests/tests_app/cli/test_rm.py | 1 - tests/tests_app/cli/test_run_app.py | 2 - .../components/database/test_client_server.py | 3 -- .../components/multi_node/test_fabric.py | 1 - .../components/multi_node/test_trainer.py | 1 - .../serve/test_model_inference_api.py | 2 - tests/tests_app/core/test_lightning_api.py | 2 - tests/tests_app/core/test_lightning_app.py | 7 +--- tests/tests_app/core/test_lightning_flow.py | 5 +-- tests/tests_app/core/test_lightning_work.py | 3 -- tests/tests_app/core/test_queues.py | 1 - .../frontend/just_py/test_just_py.py | 1 - tests/tests_app/frontend/panel/app_panel.py | 1 - tests/tests_app/plugin/test_plugin.py | 1 - tests/tests_app/runners/test_cloud.py | 2 +- tests/tests_app/runners/test_runtime.py | 1 - tests/tests_app/source_code/test_local.py | 1 - tests/tests_app/structures/test_structures.py | 3 -- tests/tests_app/utilities/test_app_helpers.py | 1 - tests/tests_app/utilities/test_git.py | 1 - .../tests_app/utilities/test_introspection.py | 1 - tests/tests_app/utilities/test_network.py | 1 - tests/tests_app/utilities/test_proxies.py | 2 - tests/tests_app/utilities/test_state.py | 2 - tests/tests_fabric/accelerators/test_cuda.py | 2 - tests/tests_fabric/loggers/test_csv.py | 1 - .../tests_fabric/loggers/test_tensorboard.py | 1 - .../plugins/environments/test_mpi.py | 1 - .../plugins/precision/test_amp_integration.py | 1 - .../tests_fabric/strategies/test_deepspeed.py | 2 - .../strategies/test_deepspeed_integration.py | 2 - .../tests_fabric/strategies/test_registry.py | 1 - tests/tests_fabric/test_fabric.py | 3 -- tests/tests_fabric/test_wrappers.py | 2 - tests/tests_fabric/utilities/test_logger.py | 4 -- tests/tests_pytorch/accelerators/test_hpu.py | 5 --- tests/tests_pytorch/accelerators/test_ipu.py | 2 - tests/tests_pytorch/accelerators/test_tpu.py | 2 - .../benchmarks/test_basic_parity.py | 1 - .../progress/test_tqdm_progress_bar.py | 2 - .../tests_pytorch/callbacks/test_callbacks.py | 1 - .../callbacks/test_device_stats_monitor.py | 4 -- .../callbacks/test_early_stopping.py | 2 - .../callbacks/test_finetuning_callback.py | 5 --- .../callbacks/test_model_summary.py | 1 - .../test_checkpoint_callback_frequency.py | 1 - .../checkpointing/test_model_checkpoint.py | 1 - .../checkpointing/test_trainer_checkpoint.py | 1 - .../core/test_lightning_module.py | 3 -- .../core/test_metric_result_integration.py | 2 - tests/tests_pytorch/loggers/test_all.py | 1 - tests/tests_pytorch/loggers/test_comet.py | 5 --- tests/tests_pytorch/loggers/test_csv.py | 3 -- tests/tests_pytorch/loggers/test_logger.py | 1 - tests/tests_pytorch/loggers/test_mlflow.py | 4 -- tests/tests_pytorch/loggers/test_neptune.py | 2 + .../tests_pytorch/loggers/test_tensorboard.py | 4 -- tests/tests_pytorch/loggers/test_wandb.py | 2 - tests/tests_pytorch/loops/test_all.py | 1 - tests/tests_pytorch/loops/test_fetchers.py | 2 - .../tests_pytorch/loops/test_flow_warnings.py | 1 - .../models/test_fabric_integration.py | 2 - tests/tests_pytorch/models/test_hooks.py | 1 - tests/tests_pytorch/models/test_hparams.py | 3 -- .../tests_pytorch/models/test_torchscript.py | 1 - tests/tests_pytorch/models/test_tpu.py | 1 - .../overrides/test_distributed.py | 1 - .../tests_pytorch/profilers/test_profiler.py | 7 ---- .../strategies/test_ddp_spawn_strategy.py | 1 - .../strategies/test_deepspeed_strategy.py | 6 +-- tests/tests_pytorch/strategies/test_fsdp.py | 2 - .../tests_pytorch/strategies/test_registry.py | 2 - tests/tests_pytorch/test_cli.py | 4 -- .../connectors/test_accelerator_connector.py | 1 - .../connectors/test_checkpoint_connector.py | 1 - .../trainer/connectors/test_data_connector.py | 2 - .../trainer/flags/test_fast_dev_run.py | 1 - .../trainer/flags/test_overfit_batches.py | 1 - .../logging_/test_eval_loop_logging.py | 3 -- .../trainer/logging_/test_logger_connector.py | 1 - .../logging_/test_train_loop_logging.py | 2 - .../optimization/test_manual_optimization.py | 11 ------ .../trainer/optimization/test_optimizers.py | 1 - .../trainer/properties/test_get_model.py | 3 -- .../tests_pytorch/trainer/test_dataloaders.py | 10 ----- tests/tests_pytorch/trainer/test_trainer.py | 8 ---- tests/tests_pytorch/tuner/test_lr_finder.py | 3 -- .../utilities/migration/test_utils.py | 2 - .../utilities/test_all_gather_grad.py | 2 - .../utilities/test_parameter_tying.py | 1 - 215 files changed, 143 insertions(+), 562 deletions(-) diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index bde8e42de2f7f..1fe9773a1942b 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -57,7 +57,7 @@ repos: name: Upgrade code - repo: https://github.com/PyCQA/docformatter - rev: v1.4 + rev: v1.6.3 hooks: - id: docformatter args: [--in-place, --wrap-summaries=115, --wrap-descriptions=120] @@ -76,7 +76,7 @@ repos: exclude: docs/source-app - repo: https://github.com/psf/black - rev: 22.12.0 + rev: 23.3.0 hooks: - id: black name: Format code @@ -106,7 +106,7 @@ repos: )$ - repo: https://github.com/charliermarsh/ruff-pre-commit - rev: 'v0.0.261' + rev: 'v0.0.262' hooks: - id: ruff args: ["--fix"] diff --git a/docs/source-app/examples/dag/dag_from_scratch.rst b/docs/source-app/examples/dag/dag_from_scratch.rst index e8eef7d1e0128..ac843ab25dce9 100644 --- a/docs/source-app/examples/dag/dag_from_scratch.rst +++ b/docs/source-app/examples/dag/dag_from_scratch.rst @@ -50,4 +50,4 @@ Step 2: Define the scheduling ***************************** .. literalinclude:: ../../../../examples/app/dag/app.py - :lines: 103-132 + :lines: 103-131 diff --git a/examples/app/boring/app_dynamic.py b/examples/app/boring/app_dynamic.py index dc2d87345fcb8..ea66cc2ddad6d 100644 --- a/examples/app/boring/app_dynamic.py +++ b/examples/app/boring/app_dynamic.py @@ -51,7 +51,6 @@ def run(self): self.dict["src_w"].run() if self.dict["src_w"].has_succeeded: - # create dynamically the dst_w at runtime if "dst_w" not in self.dict: self.dict["dst_w"] = DestinationFileAndServeWork( diff --git a/examples/app/boring/scripts/serve.py b/examples/app/boring/scripts/serve.py index 17c431ca378ac..e554976865496 100644 --- a/examples/app/boring/scripts/serve.py +++ b/examples/app/boring/scripts/serve.py @@ -7,7 +7,6 @@ from fastapi.responses import HTMLResponse if __name__ == "__main__": - parser = argparse.ArgumentParser("Server Parser") parser.add_argument("--filepath", type=str, help="Where to find the `filepath`") parser.add_argument("--host", type=str, default="0.0.0.0", help="Server host`") diff --git a/examples/app/dag/app.py b/examples/app/dag/app.py index 578c47c0b31fc..f344242823afe 100644 --- a/examples/app/dag/app.py +++ b/examples/app/dag/app.py @@ -109,7 +109,6 @@ def __init__(self, dag_cls, **dag_kwargs): def run(self): """Example of scheduling an infinite number of DAG runs continuously.""" - # Step 1: Every minute, create and launch a new DAG. if self.schedule("* * * * *"): print("Launching a new DAG") diff --git a/examples/fabric/build_your_own_trainer/run.py b/examples/fabric/build_your_own_trainer/run.py index a5d638b87a191..6902374f75575 100644 --- a/examples/fabric/build_your_own_trainer/run.py +++ b/examples/fabric/build_your_own_trainer/run.py @@ -41,7 +41,6 @@ def training_step(self, batch, batch_idx: int): return {"loss": loss, "accuracy": accuracy_train} def configure_optimizers(self): - optim = torch.optim.Adam(self.parameters(), lr=1e-4) return optim, { "scheduler": torch.optim.lr_scheduler.ReduceLROnPlateau(optim, mode="max", verbose=True), diff --git a/examples/fabric/build_your_own_trainer/trainer.py b/examples/fabric/build_your_own_trainer/trainer.py index d30eacfaf5b8f..a819174d77117 100644 --- a/examples/fabric/build_your_own_trainer/trainer.py +++ b/examples/fabric/build_your_own_trainer/trainer.py @@ -290,7 +290,6 @@ def val_loop( iterable = self.progbar_wrapper(val_loader, total=min(len(val_loader), limit_batches), desc="Validation") for batch_idx, batch in enumerate(iterable): - # end epoch if stopping training completely or max batches for this epoch reached if self.should_stop or batch_idx >= limit_batches: self.fabric.call("on_validation_epoch_end") @@ -478,9 +477,7 @@ def _parse_optimizers_schedulers( # list or tuple elif isinstance(configure_optim_output, (list, tuple)): - if all( - [isinstance(_opt_cand, L.fabric.utilities.types.Optimizable) for _opt_cand in configure_optim_output] - ): + if all(isinstance(_opt_cand, L.fabric.utilities.types.Optimizable) for _opt_cand in configure_optim_output): # single optimizer in list if len(configure_optim_output) == 1: return configure_optim_output[0][0], None @@ -488,12 +485,9 @@ def _parse_optimizers_schedulers( raise NotImplementedError("BYOT only supports a single optimizer") elif all( - [ - isinstance(_lr_cand, (L.fabric.utilities.types.LRScheduler, Mapping)) - for _lr_cand in configure_optim_output - ] + isinstance(_lr_cand, (L.fabric.utilities.types.LRScheduler, Mapping)) + for _lr_cand in configure_optim_output ): - # single scheduler in list if len(configure_optim_output) == 1: return None, self._parse_optimizers_schedulers(configure_optim_output[0])[1] diff --git a/examples/fabric/image_classifier/train_fabric.py b/examples/fabric/image_classifier/train_fabric.py index 5d8d6c0b23b16..38c775c9aabf3 100644 --- a/examples/fabric/image_classifier/train_fabric.py +++ b/examples/fabric/image_classifier/train_fabric.py @@ -11,7 +11,6 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. - """Here are 4 easy steps to use Fabric in your PyTorch code. 1. Create the Lightning Fabric object at the beginning of your script. @@ -109,7 +108,6 @@ def run(hparams): # EPOCH LOOP for epoch in range(1, hparams.epochs + 1): - # TRAINING LOOP model.train() for batch_idx, (data, target) in enumerate(train_loader): diff --git a/examples/fabric/image_classifier/train_torch.py b/examples/fabric/image_classifier/train_torch.py index 55ea6beb463fa..814c28a01829c 100644 --- a/examples/fabric/image_classifier/train_torch.py +++ b/examples/fabric/image_classifier/train_torch.py @@ -75,7 +75,6 @@ def run(hparams): # EPOCH LOOP for epoch in range(1, hparams.epochs + 1): - # TRAINING LOOP model.train() for batch_idx, (data, target) in enumerate(train_loader): diff --git a/examples/fabric/kfold_cv/train_fabric.py b/examples/fabric/kfold_cv/train_fabric.py index 0ac295d937c1b..babe33518a8b5 100644 --- a/examples/fabric/kfold_cv/train_fabric.py +++ b/examples/fabric/kfold_cv/train_fabric.py @@ -141,7 +141,6 @@ def run(hparams): # loop over epochs for epoch in range(1, hparams.epochs + 1): - # loop over folds epoch_acc = 0 for fold, (train_ids, val_ids) in enumerate(kfold.split(dataset)): diff --git a/examples/pytorch/domain_templates/computer_vision_fine_tuning.py b/examples/pytorch/domain_templates/computer_vision_fine_tuning.py index a4c4bb3703afe..c7e492e9aa4a9 100644 --- a/examples/pytorch/domain_templates/computer_vision_fine_tuning.py +++ b/examples/pytorch/domain_templates/computer_vision_fine_tuning.py @@ -192,7 +192,6 @@ def __init__( def __build_model(self): """Define model layers & loss.""" - # 1. Load pre-trained network: backbone = get_torchvision_model(self.backbone, weights="DEFAULT") @@ -211,7 +210,6 @@ def forward(self, x): Returns logits. """ - # 1. Feature extraction: x = self.feature_extractor(x) x = x.squeeze(-1).squeeze(-1) diff --git a/examples/pytorch/domain_templates/reinforce_learn_Qnet.py b/examples/pytorch/domain_templates/reinforce_learn_Qnet.py index ab8234c0100ed..e1a3d1eef6cb0 100644 --- a/examples/pytorch/domain_templates/reinforce_learn_Qnet.py +++ b/examples/pytorch/domain_templates/reinforce_learn_Qnet.py @@ -195,7 +195,6 @@ def play_step(self, net: nn.Module, epsilon: float = 0.0, device: str = "cpu") - Returns: reward, done """ - action = self.get_action(net, epsilon, device) # do step in the environment diff --git a/examples/pytorch/domain_templates/reinforce_learn_ppo.py b/examples/pytorch/domain_templates/reinforce_learn_ppo.py index 5fe4a3ab9fd02..633b0ae0ddf30 100644 --- a/examples/pytorch/domain_templates/reinforce_learn_ppo.py +++ b/examples/pytorch/domain_templates/reinforce_learn_ppo.py @@ -285,7 +285,6 @@ def generate_trajectory_samples(self) -> Tuple[List[torch.Tensor], List[torch.Te Yield: Tuple of Lists containing tensors for states, actions, log probs, qvals and advantage """ - for step in range(self.steps_per_epoch): self.state = self.state.to(device=self.device) diff --git a/src/lightning/app/cli/cmd_clusters.py b/src/lightning/app/cli/cmd_clusters.py index 21abccedbfa11..c735b98ff6810 100644 --- a/src/lightning/app/cli/cmd_clusters.py +++ b/src/lightning/app/cli/cmd_clusters.py @@ -366,7 +366,6 @@ def _cluster_status_long(cluster: V1GetClusterResponse, desired_state: V1Cluster cluster: The cluster object elapsed: Seconds since we've started polling """ - cluster_id = cluster.id current_state = cluster.status.phase current_reason = cluster.status.reason diff --git a/src/lightning/app/cli/cmd_install.py b/src/lightning/app/cli/cmd_install.py index 0d68d736a7bc7..00b306d80c72a 100644 --- a/src/lightning/app/cli/cmd_install.py +++ b/src/lightning/app/cli/cmd_install.py @@ -109,7 +109,6 @@ def _install_component_command(name: str, yes: bool, version: str, overwrite: bo def gallery_apps_and_components( name: str, yes_arg: bool, version_arg: str, cwd: Optional[str] = None, overwrite: bool = False ) -> Optional[str]: - try: org, app_or_component = name.split("/") except Exception: @@ -439,7 +438,6 @@ def _resolve_entry(name, version_arg) -> Tuple[Optional[Dict], Optional[str]]: entry = _resolve_resource(registry_url, name=name, version_arg=version_arg, resource_type="app", raise_error=False) if not entry: - registry_url = _resolve_component_registry() # load the component resource diff --git a/src/lightning/app/cli/commands/cd.py b/src/lightning/app/cli/commands/cd.py index 80f1974755952..c6cc1ff029045 100644 --- a/src/lightning/app/cli/commands/cd.py +++ b/src/lightning/app/cli/commands/cd.py @@ -34,9 +34,7 @@ @click.argument("path", nargs=-1) def cd(path: Optional[Union[Tuple[str], str]], verify: bool = True) -> None: """Change the current directory within the Lightning Cloud filesystem.""" - with Live(Spinner("point", text=Text("pending...", style="white")), transient=True) as live: - root = "/" if isinstance(path, Tuple) and len(path) > 0: diff --git a/src/lightning/app/cli/commands/cp.py b/src/lightning/app/cli/commands/cp.py index b89dc7aaafd42..ec0288c9ab7d8 100644 --- a/src/lightning/app/cli/commands/cp.py +++ b/src/lightning/app/cli/commands/cp.py @@ -54,13 +54,11 @@ @click.option("--zip", required=False, is_flag=True, default=False) def cp(src_path: str, dst_path: str, r: bool = False, recursive: bool = False, zip: bool = False) -> None: """Copy files between your local filesystem and the Lightning Cloud filesystem.""" - if sys.platform == "win32": print("`cp` isn't supported on windows. Open an issue on Github.") sys.exit(0) with Live(Spinner("point", text=Text("pending...", style="white")), transient=True) as live: - pwd = _pwd() client = LightningClient(retry=False) @@ -308,11 +306,9 @@ def _get_project_id_and_resource(pwd: str) -> Tuple[str, Union[Externalv1Lightni lit_ressources = [lit_resource for lit_resource in lit_cloud_spaces if lit_resource.name == resource_name] if len(lit_ressources) == 0: - lit_ressources = [lit_resource for lit_resource in lit_apps if lit_resource.name == resource_name] if len(lit_ressources) == 0: - print(f"ERROR: There isn't any Lightning Ressource matching the name {resource_name}.") sys.exit(0) diff --git a/src/lightning/app/cli/commands/logs.py b/src/lightning/app/cli/commands/logs.py index 9e6e7b91f98bd..eba1746cbdd2d 100644 --- a/src/lightning/app/cli/commands/logs.py +++ b/src/lightning/app/cli/commands/logs.py @@ -35,19 +35,17 @@ def logs(app_name: str, components: List[str], follow: bool) -> None: Example uses: - Print all application logs: + Print all application logs: - $ lightning show logs my-application + $ lightning show logs my-application + Print logs only from the flow (no work): - Print logs only from the flow (no work): + $ lightning show logs my-application flow - $ lightning show logs my-application flow + Print logs only from selected works: - - Print logs only from selected works: - - $ lightning show logs my-application root.work_a root.work_b + $ lightning show logs my-application root.work_a root.work_b """ _show_logs(app_name, components, follow) diff --git a/src/lightning/app/cli/commands/ls.py b/src/lightning/app/cli/commands/ls.py index 9ad477d2aece9..5427435bd9a66 100644 --- a/src/lightning/app/cli/commands/ls.py +++ b/src/lightning/app/cli/commands/ls.py @@ -40,7 +40,6 @@ @click.argument("path", required=False) def ls(path: Optional[str] = None, print: bool = True, use_live: bool = True) -> List[str]: """List the contents of a folder in the Lightning Cloud Filesystem.""" - from lightning.app.cli.commands.cd import _CD_FILE if sys.platform == "win32": @@ -53,7 +52,6 @@ def ls(path: Optional[str] = None, print: bool = True, use_live: bool = True) -> ) with context: - if not os.path.exists(_LIGHTNING_CONNECTION_FOLDER): os.makedirs(_LIGHTNING_CONNECTION_FOLDER) @@ -108,7 +106,6 @@ def ls(path: Optional[str] = None, print: bool = True, use_live: bool = True) -> lit_ressources = [lit_resource for lit_resource in lit_cloud_spaces if lit_resource.name == splits[1]] if len(lit_ressources) == 0: - lit_ressources = [lit_resource for lit_resource in lit_apps if lit_resource.name == splits[1]] if len(lit_ressources) == 0: @@ -128,7 +125,6 @@ def ls(path: Optional[str] = None, print: bool = True, use_live: bool = True) -> prefix = _get_prefix(prefix, lit_resource) for artifact in _collect_artifacts(client=client, project_id=project_id, prefix=prefix): - if str(artifact.filename).startswith("/"): artifact.filename = artifact.filename[1:] @@ -191,11 +187,8 @@ def _print_names_with_colors(names: List[str], colors: List[str], padding: int = for row_index in sorted(columns): row = "" - for (name, color) in columns[row_index]: - if use_spacing: - spacing = padding - else: - spacing = max_L - len(name) + for name, color in columns[row_index]: + spacing = padding if use_spacing else max_L - len(name) spaces = " " * spacing row += _add_colors(name, color) + spaces rich.print(row) @@ -228,7 +221,6 @@ def _collect_artifacts( include_download_url=include_download_url, ) else: - if page_token in tokens: return diff --git a/src/lightning/app/cli/commands/pwd.py b/src/lightning/app/cli/commands/pwd.py index 05737eb52d7c2..7768309e4e6bb 100644 --- a/src/lightning/app/cli/commands/pwd.py +++ b/src/lightning/app/cli/commands/pwd.py @@ -27,13 +27,11 @@ def pwd() -> str: """Print your current working directory in the Lightning Cloud filesystem.""" - if sys.platform == "win32": print("`pwd` isn't supported on windows. Open an issue on Github.") sys.exit(0) with Live(Spinner("point", text=Text("pending...", style="white")), transient=True): - root = _pwd() print(root) diff --git a/src/lightning/app/cli/commands/rm.py b/src/lightning/app/cli/commands/rm.py index 9127515713215..f114a27d7b0f1 100644 --- a/src/lightning/app/cli/commands/rm.py +++ b/src/lightning/app/cli/commands/rm.py @@ -32,7 +32,6 @@ @click.option("--recursive", required=False, hidden=True) def rm(rm_path: str, r: bool = False, recursive: bool = False) -> None: """Delete files on the Lightning Cloud filesystem.""" - root = _pwd() if rm_path in (".", ".."): @@ -70,7 +69,6 @@ def rm(rm_path: str, r: bool = False, recursive: bool = False) -> None: lit_ressources = [lit_resource for lit_resource in lit_cloud_spaces if lit_resource.name == splits[1]] if len(lit_ressources) == 0: - lit_ressources = [lit_resource for lit_resource in lit_apps if lit_resource.name == splits[1]] if len(lit_ressources) == 0: diff --git a/src/lightning/app/cli/connect/app.py b/src/lightning/app/cli/connect/app.py index 4047c1a739153..68f2a6909ab3c 100644 --- a/src/lightning/app/cli/connect/app.py +++ b/src/lightning/app/cli/connect/app.py @@ -83,7 +83,6 @@ def connect_app(app_name_or_id: str): connect_app(app_name_or_id) elif app_name_or_id.startswith("localhost"): - with Progress() as progress_bar: connecting = progress_bar.add_task("[magenta]Setting things up for you...", total=1.0) @@ -140,7 +139,6 @@ def connect_app(app_name_or_id: str): ).wait() elif matched_connection_path: - matched_connected_file = os.path.join(matched_connection_path, "connect.txt") matched_commands = os.path.join(matched_connection_path, "commands") if os.path.isdir(matched_commands): diff --git a/src/lightning/app/cli/connect/data.py b/src/lightning/app/cli/connect/data.py index 0a35b788f173f..9781f9c9e4935 100644 --- a/src/lightning/app/cli/connect/data.py +++ b/src/lightning/app/cli/connect/data.py @@ -62,7 +62,6 @@ def connect_data( _error_and_exit("Data connection isn't supported on windows. Open an issue on Github.") with Live(Spinner("point", text=Text("pending...", style="white")), transient=True) as live: - live.stop() client = LightningClient(retry=False) @@ -71,7 +70,6 @@ def connect_data( project_id = None for project in projects.memberships: - if project.name == project_name: project_id = project.project_id break diff --git a/src/lightning/app/cli/lightning_cli.py b/src/lightning/app/cli/lightning_cli.py index ad1d42088df46..cf67ce9434806 100644 --- a/src/lightning/app/cli/lightning_cli.py +++ b/src/lightning/app/cli/lightning_cli.py @@ -190,26 +190,22 @@ def cluster_logs(cluster_id: str, to_time: arrow.Arrow, from_time: arrow.Arrow, Example uses: - Print cluster logs: + Print cluster logs: - $ lightning show cluster logs my-cluster + $ lightning show cluster logs my-cluster + Print cluster logs and wait for new logs: - Print cluster logs and wait for new logs: + $ lightning show cluster logs my-cluster --follow - $ lightning show cluster logs my-cluster --follow + Print cluster logs, from 48 hours ago to now: + $ lightning show cluster logs my-cluster --from "48 hours ago" - Print cluster logs, from 48 hours ago to now: + Print cluster logs, 10 most recent lines: - $ lightning show cluster logs my-cluster --from "48 hours ago" - - - Print cluster logs, 10 most recent lines: - - $ lightning show cluster logs my-cluster --limit 10 + $ lightning show cluster logs my-cluster --limit 10 """ - client = LightningClient(retry=False) cluster_manager = AWSClusterManager() existing_cluster_list = cluster_manager.get_clusters() @@ -286,7 +282,6 @@ def _run_app( run_app_comment_commands: bool, enable_basic_auth: str, ) -> None: - if not os.path.exists(file): original_file = file file = cmd_install.gallery_apps_and_components(file, True, "latest", overwrite=True) # type: ignore[assignment] # noqa E501 @@ -444,7 +439,6 @@ def run_app( @click.option("--name", help="The name to use for the CloudSpace", default="", type=str) def open(path: str, cluster_id: str, name: str) -> None: """Open files or folders from your machine in a Lightning CloudSpace.""" - if not os.path.exists(path): click.echo(f"The provided path `{path}` doesn't exist.") sys.exit(1) @@ -476,7 +470,6 @@ def open(path: str, cluster_id: str, name: str) -> None: ) def ssh(app_name: Optional[str] = None, component_name: Optional[str] = None) -> None: """SSH into a Lightning App.""" - app_manager = _AppManager() apps = app_manager.list_apps(phase_in=[V1LightningappInstanceState.RUNNING]) if len(apps) == 0: diff --git a/src/lightning/app/components/multi_node/base.py b/src/lightning/app/components/multi_node/base.py index 34dc0c3dd35a9..a300918452d48 100644 --- a/src/lightning/app/components/multi_node/base.py +++ b/src/lightning/app/components/multi_node/base.py @@ -95,7 +95,6 @@ def run(self) -> None: # 2. Loop over all node machines for node_rank in range(len(self.ws)): - # 3. Run the user code in a distributed way ! self.ws[node_rank].run( main_address=self.ws[0].internal_ip, diff --git a/src/lightning/app/components/multi_node/pytorch_spawn.py b/src/lightning/app/components/multi_node/pytorch_spawn.py index 2e7659b824430..399597819f069 100644 --- a/src/lightning/app/components/multi_node/pytorch_spawn.py +++ b/src/lightning/app/components/multi_node/pytorch_spawn.py @@ -81,7 +81,6 @@ def run( node_rank: int, nprocs: int, ): - import torch # 1. Setting distributed environment diff --git a/src/lightning/app/core/app.py b/src/lightning/app/core/app.py index 3bb0288bea9d0..f5f302536fbaf 100644 --- a/src/lightning/app/core/app.py +++ b/src/lightning/app/core/app.py @@ -341,7 +341,6 @@ def _collect_deltas_from_ui_and_work_queues(self) -> List[Union[Delta, _APIReque t0 = time() while (time() - t0) < self.state_accumulate_wait: - # TODO: Fetch all available deltas at once to reduce queue calls. delta: Optional[ Union[_DeltaRequest, _APIRequest, _CommandRequest, ComponentDelta] @@ -531,7 +530,6 @@ def _run(self) -> bool: return True def _update_layout(self) -> None: - if self.backend: self.backend.resolve_url(self, base_url=None) diff --git a/src/lightning/app/core/flow.py b/src/lightning/app/core/flow.py index 8a5cbf3a57c88..190429ca61139 100644 --- a/src/lightning/app/core/flow.py +++ b/src/lightning/app/core/flow.py @@ -36,7 +36,6 @@ class LightningFlow: - _INTERNAL_STATE_VARS = { # Internal protected variables that are still part of the state (even though they are prefixed with "_") "_paths", @@ -110,7 +109,6 @@ def __init__(self) -> None: >>> assert flow.counter == 1 >>> assert flow.state["vars"]["counter"] == 1 """ - self._state: set = set() self._name: str = "" self._flows: set = set() @@ -154,7 +152,6 @@ def __setattr__(self, name: str, value: Any) -> None: value = Path(value) if self._is_state_attribute(name): - if hasattr(self, name): if name in self._flows and value != getattr(self, name): raise AttributeError(f"Cannot set attributes as the flow can't be changed once defined: {name}") diff --git a/src/lightning/app/core/queues.py b/src/lightning/app/core/queues.py index 239a7ca1969af..f0b1e897e65c9 100644 --- a/src/lightning/app/core/queues.py +++ b/src/lightning/app/core/queues.py @@ -287,7 +287,6 @@ def get(self, timeout: Optional[float] = None) -> Any: Read timeout in seconds, in case of input timeout is 0, the `self.default_timeout` is used. A timeout of None can be used to block indefinitely. """ - if timeout is None: # this means it's blocking in redis timeout = 0 diff --git a/src/lightning/app/frontend/just_py/just_py_base.py b/src/lightning/app/frontend/just_py/just_py_base.py index c317ee89c0812..ce6d009ef7905 100644 --- a/src/lightning/app/frontend/just_py/just_py_base.py +++ b/src/lightning/app/frontend/just_py/just_py_base.py @@ -42,9 +42,9 @@ def _get_render_fn_from_environment() -> Callable: def _main() -> None: + """Run the render_fn with the current flow_state.""" import justpy as jp - """Run the render_fn with the current flow_state.""" # Fetch the information of which flow attaches to this justpy instance flow_name = os.environ["LIGHTNING_FLOW_NAME"] diff --git a/src/lightning/app/frontend/panel/app_state_comm.py b/src/lightning/app/frontend/panel/app_state_comm.py index 2067bd4d19fbf..eb1f0187862d4 100644 --- a/src/lightning/app/frontend/panel/app_state_comm.py +++ b/src/lightning/app/frontend/panel/app_state_comm.py @@ -11,7 +11,6 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. - """The watch_app_state function enables us to trigger a callback function when ever the app state changes.""" # Todo: Refactor with Streamlit # Note: It would be nice one day to just watch changes within the Flow scope instead of whole app diff --git a/src/lightning/app/frontend/panel/app_state_watcher.py b/src/lightning/app/frontend/panel/app_state_watcher.py index 86b84eec6e2d6..528a19accede8 100644 --- a/src/lightning/app/frontend/panel/app_state_watcher.py +++ b/src/lightning/app/frontend/panel/app_state_watcher.py @@ -11,7 +11,6 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. - """The ``AppStateWatcher`` enables a Frontend to: - subscribe to App state changes diff --git a/src/lightning/app/frontend/panel/panel_frontend.py b/src/lightning/app/frontend/panel/panel_frontend.py index 86ad7e4523fa5..f4a5c68f57054 100644 --- a/src/lightning/app/frontend/panel/panel_frontend.py +++ b/src/lightning/app/frontend/panel/panel_frontend.py @@ -11,7 +11,6 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. - """The PanelFrontend wraps your Panel code in your LightningFlow.""" from __future__ import annotations diff --git a/src/lightning/app/frontend/panel/panel_serve_render_fn.py b/src/lightning/app/frontend/panel/panel_serve_render_fn.py index ee42a4921a445..df6a83d713d59 100644 --- a/src/lightning/app/frontend/panel/panel_serve_render_fn.py +++ b/src/lightning/app/frontend/panel/panel_serve_render_fn.py @@ -11,7 +11,6 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. - """This file gets run by Python to launch a Panel Server with Lightning. We will call the ``render_fn`` that the user provided to the PanelFrontend. diff --git a/src/lightning/app/frontend/streamlit_base.py b/src/lightning/app/frontend/streamlit_base.py index ca12b12e60452..189bbba82b768 100644 --- a/src/lightning/app/frontend/streamlit_base.py +++ b/src/lightning/app/frontend/streamlit_base.py @@ -11,7 +11,6 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. - """This file gets run by streamlit, which we launch within Lightning. From here, we will call the render function that the user provided in ``configure_layout``. diff --git a/src/lightning/app/frontend/utils.py b/src/lightning/app/frontend/utils.py index 1d2152e4fe3e8..e194443d7ce84 100644 --- a/src/lightning/app/frontend/utils.py +++ b/src/lightning/app/frontend/utils.py @@ -11,7 +11,6 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. - """Utility functions for lightning Frontends.""" from __future__ import annotations diff --git a/src/lightning/app/runners/cloud.py b/src/lightning/app/runners/cloud.py index 12ba2f9f0a7c0..1783fee3d6927 100644 --- a/src/lightning/app/runners/cloud.py +++ b/src/lightning/app/runners/cloud.py @@ -103,7 +103,6 @@ def _to_clean_dict(swagger_object, map_attributes): """Returns the swagger object properties as a dict with correct object names.""" - if hasattr(swagger_object, "to_dict"): attribute_map = swagger_object.attribute_map result = {} @@ -395,7 +394,6 @@ def dispatch( @classmethod def load_app_from_file(cls, filepath: str, env_vars: Dict[str, str] = {}) -> "LightningApp": """Load a LightningApp from a file, mocking the imports.""" - # Pretend we are running in the cloud when loading the app locally os.environ["LAI_RUNNING_IN_CLOUD"] = "1" @@ -538,7 +536,7 @@ def _resolve_cloudspace_name( name_exists = True while name_exists: random_name = cloudspace_name + "-" + "".join(random.sample(string.ascii_letters, 4)) - name_exists = any([app.name == random_name for app in existing_cloudspaces]) + name_exists = any(app.name == random_name for app in existing_cloudspaces) cloudspace_name = random_name return cloudspace_name @@ -553,7 +551,7 @@ def _resolve_run_name( name_exists = True while name_exists: random_name = name + "-" + "".join(random.sample(string.ascii_letters, 4)) - name_exists = any([app.name == random_name for app in existing_instances]) + name_exists = any(app.name == random_name for app in existing_instances) name = random_name return name diff --git a/src/lightning/app/storage/filesystem.py b/src/lightning/app/storage/filesystem.py index e52c1ee100b30..141a29e8a1bc5 100644 --- a/src/lightning/app/storage/filesystem.py +++ b/src/lightning/app/storage/filesystem.py @@ -29,7 +29,6 @@ def _get_files(fs, src: Path, dst: Path, overwrite: bool = True): class FileSystem: - """This filesystem enables to easily move files from and to the shared storage.""" def __init__(self) -> None: diff --git a/src/lightning/app/storage/orchestrator.py b/src/lightning/app/storage/orchestrator.py index 9546a40ba4841..e2274487fbf0c 100644 --- a/src/lightning/app/storage/orchestrator.py +++ b/src/lightning/app/storage/orchestrator.py @@ -176,7 +176,6 @@ def run_once(self, work_name: str) -> None: # It is possible to have multiple destination targeting # the same source concurrently. if work_name in self.waiting_for_response.values(): - # check if the current work has responses for file transfers to other works. copy_response_queue = self.copy_response_queues[work_name] try: diff --git a/src/lightning/app/testing/testing.py b/src/lightning/app/testing/testing.py index 85c8109490ac2..317263c95feb5 100644 --- a/src/lightning/app/testing/testing.py +++ b/src/lightning/app/testing/testing.py @@ -498,7 +498,6 @@ def delete_cloud_lightning_apps(): PR_NUMBER and TEST_APP_NAME are environment variables. """ - client = LightningClient() try: @@ -524,7 +523,6 @@ def delete_cloud_lightning_apps(): print("deleting apps that were created more than 1 hour ago.") for lit_app in list_apps.lightningapps: - if lit_app.created_at < datetime.datetime.now(lit_app.created_at.tzinfo) - datetime.timedelta(hours=1): _delete_lightning_app(client, project_id=project_id, app_id=lit_app.id, app_name=lit_app.name) _delete_cloud_space( diff --git a/src/lightning/app/utilities/app_commands.py b/src/lightning/app/utilities/app_commands.py index c7b28cfec7f8f..1c4844689bb5a 100644 --- a/src/lightning/app/utilities/app_commands.py +++ b/src/lightning/app/utilities/app_commands.py @@ -106,21 +106,16 @@ def _execute_app_commands(cl: CommandLines) -> None: def run_app_commands(file: str) -> None: """Extract all lines at the top of the file which contain commands & execute them. - Commands to execute are comment lines whose first non-whitespace character - begins with the "bang" symbol (`!`). After the first non comment line we - stop parsing the rest of the file. Running environment is preserved in the + Commands to execute are comment lines whose first non-whitespace character begins with the "bang" symbol (`!`). + After the first non comment line we stop parsing the rest of the file. Running environment is preserved in the subprocess shell. For example: - # some file <--- not a command - # !echo "hello world" <--- a command - # ! pip install foo <--- a command - # foo! bar <--- not a command - import lightning <--- not a command, end parsing. + # some file <--- not a command # !echo "hello world" <--- a command # ! pip install foo <--- a command # + foo! bar <--- not a command import lightning <--- not a command, end parsing. - where `echo "hello world" && pip install foo` would be executed in the current - running environment. + where `echo "hello world" && pip install foo` would be executed in the current running environment. """ cl = _extract_commands_from_file(file_name=file) if len(cl.commands) == 0: diff --git a/src/lightning/app/utilities/app_helpers.py b/src/lightning/app/utilities/app_helpers.py index 7e9b3b5f200dd..449fddaa9418c 100644 --- a/src/lightning/app/utilities/app_helpers.py +++ b/src/lightning/app/utilities/app_helpers.py @@ -323,7 +323,6 @@ def _delta_to_app_state_delta(root: "LightningFlow", component: "Component", del new_prefix = "root" for p, c in _walk_to_component(root, component): - if isinstance(c, lightning.app.core.LightningWork): new_prefix += "['works']" diff --git a/src/lightning/app/utilities/app_logs.py b/src/lightning/app/utilities/app_logs.py index bc21c3d456b47..807a4492729ce 100644 --- a/src/lightning/app/utilities/app_logs.py +++ b/src/lightning/app/utilities/app_logs.py @@ -79,7 +79,6 @@ def _app_logs_reader( follow: bool, on_error_callback: Optional[Callable] = None, ) -> Iterator[_LogEvent]: - read_queue = queue.PriorityQueue() # We will use a socket per component diff --git a/src/lightning/app/utilities/cli_helpers.py b/src/lightning/app/utilities/cli_helpers.py index 5397ed82d6215..d334dedc5e360 100644 --- a/src/lightning/app/utilities/cli_helpers.py +++ b/src/lightning/app/utilities/cli_helpers.py @@ -47,7 +47,6 @@ def _format_input_env_variables(env_list: tuple) -> Dict[str, str]: key: env variable name value: env variable value """ - env_vars_dict = {} for env_str in env_list: var_parts = env_str.split("=") @@ -198,7 +197,6 @@ def _maybe_find_matching_cloud_app(self): def _collect_open_api_json(self): """This function is used to retrieve the current url associated with an id.""" - if _is_url(self.app_id_or_name_or_url): self.url = self.app_id_or_name_or_url assert self.url diff --git a/src/lightning/app/utilities/introspection.py b/src/lightning/app/utilities/introspection.py index e36aae8e5c73c..394c5da593a19 100644 --- a/src/lightning/app/utilities/introspection.py +++ b/src/lightning/app/utilities/introspection.py @@ -22,11 +22,9 @@ class LightningVisitor(ast.NodeVisitor): - """ - Base class for visitor that finds class definitions based on - class inheritance. - Derived classes are expected to define class_name and implement - the analyze_class_def method. + """Base class for visitor that finds class definitions based on class inheritance. Derived classes are expected + to define class_name and implement the analyze_class_def method. + Attributes ---------- class_name: str @@ -55,8 +53,8 @@ def visit_ClassDef(self, node: ast.ClassDef) -> None: class LightningModuleVisitor(LightningVisitor): - """ - Finds Lightning modules based on class inheritance. + """Finds Lightning modules based on class inheritance. + Attributes ---------- class_name: Optional[str] @@ -126,8 +124,8 @@ class LightningModuleVisitor(LightningVisitor): class LightningDataModuleVisitor(LightningVisitor): - """ - Finds Lightning data modules based on class inheritance. + """Finds Lightning data modules based on class inheritance. + Attributes ---------- class_name: Optional[str] @@ -149,8 +147,8 @@ class LightningDataModuleVisitor(LightningVisitor): class LightningLoggerVisitor(LightningVisitor): - """ - Finds Lightning loggers based on class inheritance. + """Finds Lightning loggers based on class inheritance. + Attributes ---------- class_name: Optional[str] @@ -165,8 +163,8 @@ class LightningLoggerVisitor(LightningVisitor): class LightningCallbackVisitor(LightningVisitor): - """ - Finds Lightning callbacks based on class inheritance. + """Finds Lightning callbacks based on class inheritance. + Attributes ---------- class_name: Optional[str] @@ -217,8 +215,8 @@ class LightningCallbackVisitor(LightningVisitor): class LightningStrategyVisitor(LightningVisitor): - """ - Finds Lightning callbacks based on class inheritance. + """Finds Lightning callbacks based on class inheritance. + Attributes ---------- class_name: Optional[str] @@ -271,8 +269,8 @@ class LightningProfilerVisitor(LightningVisitor): class Scanner: - """ - Finds relevant Lightning objects in files in the file system. + """Finds relevant Lightning objects in files in the file system. + Attributes ---------- visitor_classes: List[Type] @@ -324,7 +322,6 @@ def has_class(self, cls) -> bool: continue for node in ast.walk(module): - if isinstance(node, ast.ImportFrom): for import_from_cls in node.names: classes.append(import_from_cls.name) @@ -337,9 +334,8 @@ def has_class(self, cls) -> bool: return cls.__name__ in classes def scan(self) -> List[Dict[str, str]]: - """ - Finds Lightning modules in files, returning importable - objects. + """Finds Lightning modules in files, returning importable objects. + Returns ------- List[Dict[str, Any]] diff --git a/src/lightning/app/utilities/load_app.py b/src/lightning/app/utilities/load_app.py index 5df1adfe36694..52a2363d5199b 100644 --- a/src/lightning/app/utilities/load_app.py +++ b/src/lightning/app/utilities/load_app.py @@ -135,7 +135,6 @@ def load_app_from_file( def _new_module(name): """Create a new module with the given name.""" - return types.ModuleType(name) diff --git a/src/lightning/app/utilities/packaging/tarfile.py b/src/lightning/app/utilities/packaging/tarfile.py index 9316478bc93c3..734993c05beff 100644 --- a/src/lightning/app/utilities/packaging/tarfile.py +++ b/src/lightning/app/utilities/packaging/tarfile.py @@ -19,7 +19,6 @@ def clean_tarfile(file_path: str, mode: str) -> None: """This utility removes all files extracted from a tarfile.""" - if not os.path.exists(file_path): return None diff --git a/src/lightning/app/utilities/port.py b/src/lightning/app/utilities/port.py index ca5ad7d91f1c9..11d1daa222183 100644 --- a/src/lightning/app/utilities/port.py +++ b/src/lightning/app/utilities/port.py @@ -125,7 +125,6 @@ def enable_port() -> V1NetworkConfig: def disable_port(port: int, ignore_disabled: bool = True) -> None: """Make a request to the cloud controlplane to close a port of the flow.""" - app_id = os.getenv("LIGHTNING_CLOUD_APP_ID", None) project_id = os.getenv("LIGHTNING_CLOUD_PROJECT_ID", None) diff --git a/src/lightning/app/utilities/proxies.py b/src/lightning/app/utilities/proxies.py index 704751dc681ad..a624e9a8af6b3 100644 --- a/src/lightning/app/utilities/proxies.py +++ b/src/lightning/app/utilities/proxies.py @@ -79,7 +79,6 @@ def unwrap(fn): def _send_data_to_caller_queue( proxy, work: "LightningWork", caller_queue: "BaseQueue", data: Dict, call_hash: str ) -> Dict: - proxy.has_sent = True if work._calls[CacheCallsKeys.LATEST_CALL_HASH] is None: diff --git a/src/lightning/app/utilities/safe_pickle.py b/src/lightning/app/utilities/safe_pickle.py index 52f97c9bf44b6..614ac8dd3f24d 100644 --- a/src/lightning/app/utilities/safe_pickle.py +++ b/src/lightning/app/utilities/safe_pickle.py @@ -56,7 +56,6 @@ def get_picklable_work(work: LightningWork) -> LightningWork: └── bar └── app.py """ - # If the work object not taken from the app ref, there is a thread lock reference # somewhere thats preventing it from being pickled. Investigate it later. We # shouldn't be fetching the work object from the app ref. TODO @sherin diff --git a/src/lightning/app/utilities/tree.py b/src/lightning/app/utilities/tree.py index 751e47a0b6dea..5dafaee6bf60b 100644 --- a/src/lightning/app/utilities/tree.py +++ b/src/lightning/app/utilities/tree.py @@ -11,7 +11,6 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. - """Utilities for traversing the tree of components in an app.""" from typing import Type, TYPE_CHECKING diff --git a/src/lightning/fabric/connector.py b/src/lightning/fabric/connector.py index f9cdb56617554..69d98ef174ec4 100644 --- a/src/lightning/fabric/connector.py +++ b/src/lightning/fabric/connector.py @@ -107,7 +107,6 @@ def __init__( precision: _PRECISION_INPUT = "32-true", plugins: Optional[Union[_PLUGIN_INPUT, List[_PLUGIN_INPUT]]] = None, ) -> None: - # These arguments can be set through environment variables set by the CLI accelerator = self._argument_from_env("accelerator", accelerator, default="auto") strategy = self._argument_from_env("strategy", strategy, default="auto") diff --git a/src/lightning/fabric/fabric.py b/src/lightning/fabric/fabric.py index 659829633dacc..3fccc572e640b 100644 --- a/src/lightning/fabric/fabric.py +++ b/src/lightning/fabric/fabric.py @@ -542,7 +542,6 @@ def no_backward_sync(self, module: _FabricModule, enabled: bool = True) -> Gener enabled: Whether the context manager is enabled or not. ``True`` means skip the sync, ``False`` means do not skip. """ - if not isinstance(module, _FabricModule): raise TypeError( "You need to set up the model first before you can call `self.no_backward_sync()`:" diff --git a/src/lightning/fabric/loggers/csv_logs.py b/src/lightning/fabric/loggers/csv_logs.py index ad9fd89b1f88e..13e72f20aa154 100644 --- a/src/lightning/fabric/loggers/csv_logs.py +++ b/src/lightning/fabric/loggers/csv_logs.py @@ -30,8 +30,7 @@ class CSVLogger(Logger): - r""" - Log to the local file system in CSV format. + r"""Log to the local file system in CSV format. Logs are saved to ``os.path.join(root_dir, name, version)``. @@ -170,8 +169,7 @@ def _get_next_version(self) -> int: class _ExperimentWriter: - r""" - Experiment writer for CSVLogger. + r"""Experiment writer for CSVLogger. Args: log_dir: Directory for the experiment logs diff --git a/src/lightning/fabric/loggers/tensorboard.py b/src/lightning/fabric/loggers/tensorboard.py index a7790e5b700b4..f6ba229cdbe6f 100644 --- a/src/lightning/fabric/loggers/tensorboard.py +++ b/src/lightning/fabric/loggers/tensorboard.py @@ -42,8 +42,7 @@ class TensorBoardLogger(Logger): - r""" - Log to local file system in `TensorBoard `_ format. + r"""Log to local file system in `TensorBoard `_ format. Implemented using :class:`~tensorboardX.SummaryWriter`. Logs are saved to ``os.path.join(root_dir, name, version)``. This is the recommended logger in Lightning Fabric. diff --git a/src/lightning/fabric/plugins/environments/slurm.py b/src/lightning/fabric/plugins/environments/slurm.py index 5bead211b913d..086c7a0012c50 100644 --- a/src/lightning/fabric/plugins/environments/slurm.py +++ b/src/lightning/fabric/plugins/environments/slurm.py @@ -173,8 +173,9 @@ def _validate_srun_variables() -> None: """Checks for conflicting or incorrectly set variables set through `srun` and raises a useful error message. - Right now, we only check for the most common user errors. See `the srun docs - `_ for a complete list of supported srun variables. + Right now, we only check for the most common user errors. See + `the srun docs `_ + for a complete list of supported srun variables. """ ntasks = int(os.environ.get("SLURM_NTASKS", "1")) if ntasks > 1 and "SLURM_NTASKS_PER_NODE" not in os.environ: diff --git a/src/lightning/fabric/plugins/precision/tpu.py b/src/lightning/fabric/plugins/precision/tpu.py index 364e475088ded..1d599f2f82298 100644 --- a/src/lightning/fabric/plugins/precision/tpu.py +++ b/src/lightning/fabric/plugins/precision/tpu.py @@ -25,7 +25,6 @@ def optimizer_step( optimizer: Optimizable, **kwargs: Any, ) -> Any: - import torch_xla.core.xla_model as xm return xm.optimizer_step(optimizer, optimizer_args=kwargs) diff --git a/src/lightning/fabric/strategies/launchers/base.py b/src/lightning/fabric/strategies/launchers/base.py index aefc8577a3e2a..f261f81124d5f 100644 --- a/src/lightning/fabric/strategies/launchers/base.py +++ b/src/lightning/fabric/strategies/launchers/base.py @@ -16,8 +16,7 @@ class _Launcher(ABC): - r""" - Abstract base class for all Launchers. + r"""Abstract base class for all Launchers. Launchers are responsible for the creation and instrumentation of new processes so that the :class:`~lightning.fabric.strategies.strategy.Strategy` can set up communication between all them. diff --git a/src/lightning/fabric/strategies/launchers/launcher.py b/src/lightning/fabric/strategies/launchers/launcher.py index aefc8577a3e2a..f261f81124d5f 100644 --- a/src/lightning/fabric/strategies/launchers/launcher.py +++ b/src/lightning/fabric/strategies/launchers/launcher.py @@ -16,8 +16,7 @@ class _Launcher(ABC): - r""" - Abstract base class for all Launchers. + r"""Abstract base class for all Launchers. Launchers are responsible for the creation and instrumentation of new processes so that the :class:`~lightning.fabric.strategies.strategy.Strategy` can set up communication between all them. diff --git a/src/lightning/fabric/strategies/launchers/xla.py b/src/lightning/fabric/strategies/launchers/xla.py index 6911c4014ccd8..8ae7067750b9e 100644 --- a/src/lightning/fabric/strategies/launchers/xla.py +++ b/src/lightning/fabric/strategies/launchers/xla.py @@ -27,8 +27,8 @@ class _XLALauncher(_Launcher): - r"""Launches processes that run a given function in parallel on XLA supported hardware, and joins them all at the - end. + r"""Launches processes that run a given function in parallel on XLA supported hardware, and joins them all at + the end. The main process in which this launcher is invoked creates N so-called worker processes (using the `torch_xla` :func:`xmp.spawn`) that run the given function. diff --git a/src/lightning/fabric/utilities/apply_func.py b/src/lightning/fabric/utilities/apply_func.py index dad42a764b0c5..1feedef96e18f 100644 --- a/src/lightning/fabric/utilities/apply_func.py +++ b/src/lightning/fabric/utilities/apply_func.py @@ -82,7 +82,6 @@ def move_data_to_device(batch: Any, device: _DEVICE) -> Any: - :meth:`torch.Tensor.to` - :class:`torch.device` """ - if isinstance(device, str): device = torch.device(device) diff --git a/src/lightning/fabric/utilities/rank_zero.py b/src/lightning/fabric/utilities/rank_zero.py index 018691ebedcb5..4f5dc8a957529 100644 --- a/src/lightning/fabric/utilities/rank_zero.py +++ b/src/lightning/fabric/utilities/rank_zero.py @@ -11,7 +11,6 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. - """Utilities that can be used for calling functions on a particular rank.""" import logging import os diff --git a/src/lightning/pytorch/callbacks/callback.py b/src/lightning/pytorch/callbacks/callback.py index bef5327ca2fb9..5a7ac0cca0f9b 100644 --- a/src/lightning/pytorch/callbacks/callback.py +++ b/src/lightning/pytorch/callbacks/callback.py @@ -11,10 +11,7 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. -r""" -Base class used to build new callbacks. - -""" +r"""Base class used to build new callbacks.""" from typing import Any, Dict, Optional, Type @@ -26,8 +23,7 @@ class Callback: - r""" - Abstract base class used to build new callbacks. + r"""Abstract base class used to build new callbacks. Subclass this class and override any of the relevant hooks """ @@ -247,8 +243,7 @@ def load_state_dict(self, state_dict: Dict[str, Any]) -> None: def on_save_checkpoint( self, trainer: "pl.Trainer", pl_module: "pl.LightningModule", checkpoint: Dict[str, Any] ) -> None: - r""" - Called when saving a checkpoint to give you a chance to store anything else you might want to save. + r"""Called when saving a checkpoint to give you a chance to store anything else you might want to save. Args: trainer: the current :class:`~lightning.pytorch.trainer.Trainer` instance. @@ -259,8 +254,7 @@ def on_save_checkpoint( def on_load_checkpoint( self, trainer: "pl.Trainer", pl_module: "pl.LightningModule", checkpoint: Dict[str, Any] ) -> None: - r""" - Called when loading a model checkpoint, use to reload state. + r"""Called when loading a model checkpoint, use to reload state. Args: trainer: the current :class:`~lightning.pytorch.trainer.Trainer` instance. diff --git a/src/lightning/pytorch/callbacks/checkpoint.py b/src/lightning/pytorch/callbacks/checkpoint.py index 2dd5917601442..301761049be74 100644 --- a/src/lightning/pytorch/callbacks/checkpoint.py +++ b/src/lightning/pytorch/callbacks/checkpoint.py @@ -2,8 +2,8 @@ class Checkpoint(Callback): - r""" - This is the base class for model checkpointing. Expert users may want to subclass it in case of writing - custom :class:`~lightning.pytorch.callbacksCheckpoint` callback, so that - the trainer recognizes the custom class as a checkpointing callback. + r"""This is the base class for model checkpointing. + + Expert users may want to subclass it in case of writing custom :class:`~lightning.pytorch.callbacksCheckpoint` + callback, so that the trainer recognizes the custom class as a checkpointing callback. """ diff --git a/src/lightning/pytorch/callbacks/early_stopping.py b/src/lightning/pytorch/callbacks/early_stopping.py index 5cbb005416cf7..478ca63b493d3 100644 --- a/src/lightning/pytorch/callbacks/early_stopping.py +++ b/src/lightning/pytorch/callbacks/early_stopping.py @@ -11,12 +11,9 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. -r""" -Early Stopping -^^^^^^^^^^^^^^ +r"""Early Stopping ^^^^^^^^^^^^^^ Monitor a metric and stop training when it stops improving. - """ import logging from typing import Any, Callable, Dict, Optional, Tuple @@ -35,8 +32,7 @@ class EarlyStopping(Callback): - r""" - Monitor a metric and stop training when it stops improving. + r"""Monitor a metric and stop training when it stops improving. Args: monitor: quantity to be monitored. diff --git a/src/lightning/pytorch/callbacks/finetuning.py b/src/lightning/pytorch/callbacks/finetuning.py index 4a8b4cda228d3..ddc4cdaceea0c 100644 --- a/src/lightning/pytorch/callbacks/finetuning.py +++ b/src/lightning/pytorch/callbacks/finetuning.py @@ -11,11 +11,7 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. -r""" -Finetuning Callback -^^^^^^^^^^^^^^^^^^^^ -Freeze and unfreeze models for finetuning purposes -""" +r"""Finetuning Callback ^^^^^^^^^^^^^^^^^^^^ Freeze and unfreeze models for finetuning purposes.""" import logging from typing import Any, Callable, Dict, Generator, Iterable, List, Optional, Union @@ -37,8 +33,7 @@ def multiplicative(epoch: int) -> float: class BaseFinetuning(Callback): - r""" - This class implements the base logic for writing your own Finetuning Callback. + r"""This class implements the base logic for writing your own Finetuning Callback. .. warning:: This is an :ref:`experimental ` feature. @@ -335,7 +330,6 @@ class BackboneFinetuning(BaseFinetuning): >>> multiplicative = lambda epoch: 1.5 >>> backbone_finetuning = BackboneFinetuning(200, multiplicative) >>> trainer = Trainer(callbacks=[backbone_finetuning]) - """ def __init__( diff --git a/src/lightning/pytorch/callbacks/gradient_accumulation_scheduler.py b/src/lightning/pytorch/callbacks/gradient_accumulation_scheduler.py index 7c5a83f373f68..eedcfbb3cf9c7 100644 --- a/src/lightning/pytorch/callbacks/gradient_accumulation_scheduler.py +++ b/src/lightning/pytorch/callbacks/gradient_accumulation_scheduler.py @@ -31,8 +31,7 @@ class GradientAccumulationScheduler(Callback): - r""" - Change gradient accumulation factor according to scheduling. + r"""Change gradient accumulation factor according to scheduling. Args: scheduling: scheduling in format {epoch: accumulation_factor} diff --git a/src/lightning/pytorch/callbacks/lambda_function.py b/src/lightning/pytorch/callbacks/lambda_function.py index 64e29337b359a..e062656313eab 100644 --- a/src/lightning/pytorch/callbacks/lambda_function.py +++ b/src/lightning/pytorch/callbacks/lambda_function.py @@ -11,12 +11,9 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. -r""" -Lambda Callback -^^^^^^^^^^^^^^^ +r"""Lambda Callback ^^^^^^^^^^^^^^^ Create a simple callback on the fly using lambda functions. - """ from typing import Callable, Optional @@ -25,8 +22,7 @@ class LambdaCallback(Callback): - r""" - Create a simple callback on the fly using lambda functions. + r"""Create a simple callback on the fly using lambda functions. Args: **kwargs: hooks supported by :class:`~lightning.pytorch.callbacks.callback.Callback` diff --git a/src/lightning/pytorch/callbacks/lr_monitor.py b/src/lightning/pytorch/callbacks/lr_monitor.py index 567385b079469..fef524444de1a 100644 --- a/src/lightning/pytorch/callbacks/lr_monitor.py +++ b/src/lightning/pytorch/callbacks/lr_monitor.py @@ -33,8 +33,7 @@ class LearningRateMonitor(Callback): - r""" - Automatically monitor and logs learning rate for learning rate schedulers during training. + r"""Automatically monitor and logs learning rate for learning rate schedulers during training. Args: logging_interval: set to ``'epoch'`` or ``'step'`` to log ``lr`` of all optimizers @@ -84,7 +83,6 @@ def configure_optimizer(self): ) lr_scheduler = torch.optim.lr_scheduler.LambdaLR(optimizer, ...) return [optimizer], [lr_scheduler] - """ def __init__(self, logging_interval: Optional[str] = None, log_momentum: bool = False) -> None: diff --git a/src/lightning/pytorch/callbacks/model_checkpoint.py b/src/lightning/pytorch/callbacks/model_checkpoint.py index bc4fe28827d12..a0eb75375e3b3 100644 --- a/src/lightning/pytorch/callbacks/model_checkpoint.py +++ b/src/lightning/pytorch/callbacks/model_checkpoint.py @@ -456,7 +456,6 @@ def __init_triggers( every_n_epochs: Optional[int], train_time_interval: Optional[timedelta], ) -> None: - # Default to running once after each validation epoch if neither # every_n_train_steps nor every_n_epochs is set if every_n_train_steps is None and every_n_epochs is None and train_time_interval is None: diff --git a/src/lightning/pytorch/callbacks/model_summary.py b/src/lightning/pytorch/callbacks/model_summary.py index 6d050ff9c47e2..5e565ac4a2697 100644 --- a/src/lightning/pytorch/callbacks/model_summary.py +++ b/src/lightning/pytorch/callbacks/model_summary.py @@ -35,8 +35,7 @@ class ModelSummary(Callback): - r""" - Generates a summary of all layers in a :class:`~lightning.pytorch.core.module.LightningModule`. + r"""Generates a summary of all layers in a :class:`~lightning.pytorch.core.module.LightningModule`. Args: max_depth: The maximum depth of layer nesting that the summary will include. A value of 0 turns the diff --git a/src/lightning/pytorch/callbacks/progress/progress_bar.py b/src/lightning/pytorch/callbacks/progress/progress_bar.py index 5998d3845ee55..e4bd102a744b9 100644 --- a/src/lightning/pytorch/callbacks/progress/progress_bar.py +++ b/src/lightning/pytorch/callbacks/progress/progress_bar.py @@ -19,10 +19,9 @@ class ProgressBar(Callback): - r""" - The base class for progress bars in Lightning. It is a :class:`~lightning.pytorch.callbacks.Callback` - that keeps track of the batch progress in the :class:`~lightning.pytorch.trainer.trainer.Trainer`. - You should implement your highly custom progress bars with this as the base class. + r"""The base class for progress bars in Lightning. It is a :class:`~lightning.pytorch.callbacks.Callback` that + keeps track of the batch progress in the :class:`~lightning.pytorch.trainer.trainer.Trainer`. You should + implement your highly custom progress bars with this as the base class. Example:: @@ -43,7 +42,6 @@ def on_train_batch_end(self, trainer, pl_module, outputs, batch_idx): bar = LitProgressBar() trainer = Trainer(callbacks=[bar]) - """ def __init__(self) -> None: @@ -160,9 +158,8 @@ def setup(self, trainer: "pl.Trainer", pl_module: "pl.LightningModule", stage: s def get_metrics( self, trainer: "pl.Trainer", pl_module: "pl.LightningModule" ) -> Dict[str, Union[int, str, float, Dict[str, float]]]: - r""" - Combines progress bar metrics collected from the trainer with standard metrics from get_standard_metrics. - Implement this to override the items displayed in the progress bar. + r"""Combines progress bar metrics collected from the trainer with standard metrics from + get_standard_metrics. Implement this to override the items displayed in the progress bar. Here is an example of how to override the defaults: @@ -191,9 +188,8 @@ def get_metrics(self, trainer, model): def get_standard_metrics(trainer: "pl.Trainer") -> Dict[str, Union[int, str]]: - r""" - Returns the standard metrics displayed in the progress bar. - Currently, it only includes the version of the experiment when using a logger. + r"""Returns the standard metrics displayed in the progress bar. Currently, it only includes the version of the + experiment when using a logger. .. code-block:: diff --git a/src/lightning/pytorch/callbacks/progress/rich_progress.py b/src/lightning/pytorch/callbacks/progress/rich_progress.py index 03c6658ee35cd..26e53c121573a 100644 --- a/src/lightning/pytorch/callbacks/progress/rich_progress.py +++ b/src/lightning/pytorch/callbacks/progress/rich_progress.py @@ -101,7 +101,6 @@ def add_custom_task(self, task: CustomInfiniteTask, start: bool = True) -> TaskI return new_task_index class CustomTimeColumn(ProgressColumn): - # Only refresh twice a second to prevent jitter max_refresh = 0.5 diff --git a/src/lightning/pytorch/callbacks/pruning.py b/src/lightning/pytorch/callbacks/pruning.py index 6c23671317d88..74cacf68e5327 100644 --- a/src/lightning/pytorch/callbacks/pruning.py +++ b/src/lightning/pytorch/callbacks/pruning.py @@ -11,10 +11,7 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. -r""" -ModelPruning -^^^^^^^^^^^^ -""" +r"""ModelPruning ^^^^^^^^^^^^""" import inspect import logging from copy import deepcopy @@ -279,8 +276,7 @@ def _copy_param(new: nn.Module, old: nn.Module, name: str) -> None: dst.data = src.data.to(dst.device) def apply_lottery_ticket_hypothesis(self) -> None: - r""" - Lottery ticket hypothesis algorithm (see page 2 of the paper): + r"""Lottery ticket hypothesis algorithm (see page 2 of the paper): 1. Randomly initialize a neural network :math:`f(x; \theta_0)` (where :math:`\theta_0 \sim \mathcal{D}_\theta`). 2. Train the network for :math:`j` iterations, arriving at parameters :math:`\theta_j`. diff --git a/src/lightning/pytorch/callbacks/rich_model_summary.py b/src/lightning/pytorch/callbacks/rich_model_summary.py index 8165822c98236..6da6318b39f01 100644 --- a/src/lightning/pytorch/callbacks/rich_model_summary.py +++ b/src/lightning/pytorch/callbacks/rich_model_summary.py @@ -23,9 +23,8 @@ class RichModelSummary(ModelSummary): - r""" - Generates a summary of all layers in a :class:`~lightning.pytorch.core.module.LightningModule` - with `rich text formatting `_. + r"""Generates a summary of all layers in a :class:`~lightning.pytorch.core.module.LightningModule` with `rich + text formatting `_. Install it with pip: @@ -72,7 +71,6 @@ def summarize( trainable_parameters: int, model_size: float, ) -> None: - console = get_console() table = Table(header_style="bold magenta") diff --git a/src/lightning/pytorch/callbacks/stochastic_weight_avg.py b/src/lightning/pytorch/callbacks/stochastic_weight_avg.py index 11d78dc49ef26..e8dbf997899d7 100644 --- a/src/lightning/pytorch/callbacks/stochastic_weight_avg.py +++ b/src/lightning/pytorch/callbacks/stochastic_weight_avg.py @@ -11,10 +11,7 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. -r""" -Stochastic Weight Averaging Callback -^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ -""" +r"""Stochastic Weight Averaging Callback ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^""" from copy import deepcopy from typing import Any, Callable, cast, Dict, List, Optional, Union @@ -44,9 +41,7 @@ def __init__( avg_fn: Optional[_AVG_FN] = None, device: Optional[Union[torch.device, str]] = torch.device("cpu"), ): - r""" - - Implements the Stochastic Weight Averaging (SWA) Callback to average a model. + r"""Implements the Stochastic Weight Averaging (SWA) Callback to average a model. Stochastic Weight Averaging was proposed in ``Averaging Weights Leads to Wider Optima and Better Generalization`` by Pavel Izmailov, Dmitrii @@ -94,7 +89,6 @@ def __init__( device: if provided, the averaged model will be stored on the ``device``. When None is provided, it will infer the `device` from ``pl_module``. (default: ``"cpu"``) - """ err_msg = "swa_epoch_start should be a >0 integer or a float between 0 and 1." diff --git a/src/lightning/pytorch/callbacks/timer.py b/src/lightning/pytorch/callbacks/timer.py index 68833997e5ecb..5f33b5afd7369 100644 --- a/src/lightning/pytorch/callbacks/timer.py +++ b/src/lightning/pytorch/callbacks/timer.py @@ -11,10 +11,7 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. -r""" -Timer -^^^^^ -""" +r"""Timer ^^^^^""" import logging import time from datetime import timedelta diff --git a/src/lightning/pytorch/core/datamodule.py b/src/lightning/pytorch/core/datamodule.py index e7c5f031d8b3e..3f09ba1b1eb1a 100644 --- a/src/lightning/pytorch/core/datamodule.py +++ b/src/lightning/pytorch/core/datamodule.py @@ -76,8 +76,7 @@ def from_datasets( num_workers: int = 0, **datamodule_kwargs: Any, ) -> "LightningDataModule": - r""" - Create an instance from torch.utils.data.Dataset. + r"""Create an instance from torch.utils.data.Dataset. Args: train_dataset: Optional dataset to be used for train_dataloader() diff --git a/src/lightning/pytorch/core/hooks.py b/src/lightning/pytorch/core/hooks.py index 4e8be7d0558ff..df11b48f9b232 100644 --- a/src/lightning/pytorch/core/hooks.py +++ b/src/lightning/pytorch/core/hooks.py @@ -625,9 +625,8 @@ class CheckpointHooks: """Hooks to be used with Checkpointing.""" def on_load_checkpoint(self, checkpoint: Dict[str, Any]) -> None: - r""" - Called by Lightning to restore your model. - If you saved something with :meth:`on_save_checkpoint` this is your chance to restore this. + r"""Called by Lightning to restore your model. If you saved something with :meth:`on_save_checkpoint` this + is your chance to restore this. Args: checkpoint: Loaded checkpoint @@ -644,9 +643,8 @@ def on_load_checkpoint(self, checkpoint): """ def on_save_checkpoint(self, checkpoint: Dict[str, Any]) -> None: - r""" - Called by Lightning when saving a checkpoint to give you a chance to store anything - else you might want to save. + r"""Called by Lightning when saving a checkpoint to give you a chance to store anything else you might want + to save. Args: checkpoint: The full checkpoint dictionary before it gets dumped to a file. @@ -662,5 +660,4 @@ def on_save_checkpoint(self, checkpoint): Lightning saves all aspects of training (epoch, global step, etc...) including amp scaling. There is no need for you to store anything about training. - """ diff --git a/src/lightning/pytorch/core/mixins/hparams_mixin.py b/src/lightning/pytorch/core/mixins/hparams_mixin.py index edc252ce618aa..d30caeda6b59c 100644 --- a/src/lightning/pytorch/core/mixins/hparams_mixin.py +++ b/src/lightning/pytorch/core/mixins/hparams_mixin.py @@ -24,7 +24,6 @@ class HyperparametersMixin: - __jit_unused_properties__: List[str] = ["hparams", "hparams_initial"] def __init__(self) -> None: diff --git a/src/lightning/pytorch/core/module.py b/src/lightning/pytorch/core/module.py index feef9eb87028b..3408d4f77b8a3 100644 --- a/src/lightning/pytorch/core/module.py +++ b/src/lightning/pytorch/core/module.py @@ -325,8 +325,7 @@ def _apply_batch_transfer_handler( return batch def print(self, *args: Any, **kwargs: Any) -> None: - r""" - Prints only from process 0. Use this in any distributed mode to log only once. + r"""Prints only from process 0. Use this in any distributed mode to log only once. Args: *args: The thing to print. The same as for Python's built-in print function. @@ -336,7 +335,6 @@ def print(self, *args: Any, **kwargs: Any) -> None: def forward(self, x): self.print(x, 'in forward') - """ if self.trainer.is_global_zero: progress_bar = self.trainer.progress_bar_callback @@ -643,8 +641,7 @@ def all_gather( return apply_to_collection(data, Tensor, all_gather, group=group, sync_grads=sync_grads) def forward(self, *args: Any, **kwargs: Any) -> Any: - r""" - Same as :meth:`torch.nn.Module.forward`. + r"""Same as :meth:`torch.nn.Module.forward`. Args: *args: Whatever you decide to pass into the forward method. @@ -656,9 +653,8 @@ def forward(self, *args: Any, **kwargs: Any) -> Any: return super().forward(*args, **kwargs) def training_step(self, *args: Any, **kwargs: Any) -> STEP_OUTPUT: # type: ignore[return-value] - r""" - Here you compute and return the training loss and some additional metrics for e.g. - the progress bar or logger. + r"""Here you compute and return the training loss and some additional metrics for e.g. the progress bar or + logger. Args: batch (:class:`~torch.Tensor` | (:class:`~torch.Tensor`, ...) | [:class:`~torch.Tensor`, ...]): @@ -711,9 +707,8 @@ def training_step(self, batch, batch_idx): rank_zero_warn("`training_step` must be implemented to be used with the Lightning Trainer") def validation_step(self, *args: Any, **kwargs: Any) -> Optional[STEP_OUTPUT]: - r""" - Operates on a single batch of data from the validation set. - In this step you'd might generate examples or calculate anything of interest like accuracy. + r"""Operates on a single batch of data from the validation set. In this step you'd might generate examples + or calculate anything of interest like accuracy. Args: batch: The output of your :class:`~torch.utils.data.DataLoader`. @@ -779,10 +774,8 @@ def validation_step(self, batch, batch_idx, dataloader_idx=0): """ def test_step(self, *args: Any, **kwargs: Any) -> Optional[STEP_OUTPUT]: - r""" - Operates on a single batch of data from the test set. - In this step you'd normally generate examples or calculate anything of interest - such as accuracy. + r"""Operates on a single batch of data from the test set. In this step you'd normally generate examples or + calculate anything of interest such as accuracy. Args: batch: The output of your :class:`~torch.utils.data.DataLoader`. @@ -907,10 +900,9 @@ def configure_callbacks(self): return [] def configure_optimizers(self) -> Any: - r""" - Choose what optimizers and learning-rate schedulers to use in your optimization. - Normally you'd need one. But in the case of GANs or similar you might have multiple. - Optimization with multiple optimizers only works in the manual optimization mode. + r"""Choose what optimizers and learning-rate schedulers to use in your optimization. Normally you'd need + one. But in the case of GANs or similar you might have multiple. Optimization with multiple optimizers only + works in the manual optimization mode. Return: Any of these 6 options. @@ -1230,9 +1222,8 @@ def optimizer_step( optimizer: Union[Optimizer, LightningOptimizer], optimizer_closure: Optional[Callable[[], Any]] = None, ) -> None: - r""" - Override this method to adjust the default way the :class:`~lightning.pytorch.trainer.trainer.Trainer` calls - the optimizer. + r"""Override this method to adjust the default way the :class:`~lightning.pytorch.trainer.trainer.Trainer` + calls the optimizer. By default, Lightning calls ``step()`` and ``zero_grad()`` as shown in the example. This method (and ``zero_grad()``) won't be called during the accumulation phase when @@ -1287,14 +1278,12 @@ def optimizer_zero_grad(self, epoch, batch_idx, optimizer): optimizer.zero_grad() def freeze(self) -> None: - r""" - Freeze all params for inference. + r"""Freeze all params for inference. Example:: model = MyLightningModule(...) model.freeze() - """ for param in self.parameters(): param.requires_grad = False diff --git a/src/lightning/pytorch/core/saving.py b/src/lightning/pytorch/core/saving.py index b30d84b33a435..2cbdd165b9064 100644 --- a/src/lightning/pytorch/core/saving.py +++ b/src/lightning/pytorch/core/saving.py @@ -112,7 +112,6 @@ def _load_state( cls_kwargs_loaded = {} # pass in the values we saved automatically if cls.CHECKPOINT_HYPER_PARAMS_KEY in checkpoint: - if issubclass(cls, pl.LightningModule): # TODO: make this a migration: # 1. (backward compatibility) Try to restore model hparams from checkpoint using old/past keys diff --git a/src/lightning/pytorch/loggers/comet.py b/src/lightning/pytorch/loggers/comet.py index ddd996586d93b..5c237ddfbcf31 100644 --- a/src/lightning/pytorch/loggers/comet.py +++ b/src/lightning/pytorch/loggers/comet.py @@ -52,9 +52,8 @@ class CometLogger(Logger): - r""" - Track your parameters, metrics, source code and more using - `Comet `_. + r"""Track your parameters, metrics, source code and more using `Comet + `_. Install it with pip: diff --git a/src/lightning/pytorch/loggers/csv_logs.py b/src/lightning/pytorch/loggers/csv_logs.py index 5de0e27247faf..a12a491868b64 100644 --- a/src/lightning/pytorch/loggers/csv_logs.py +++ b/src/lightning/pytorch/loggers/csv_logs.py @@ -36,8 +36,7 @@ class ExperimentWriter(_FabricExperimentWriter): - r""" - Experiment writer for CSVLogger. + r"""Experiment writer for CSVLogger. Currently, supports to log hyperparameters and metrics in YAML and CSV format, respectively. @@ -64,8 +63,7 @@ def save(self) -> None: class CSVLogger(Logger, FabricCSVLogger): - r""" - Log to local file system in yaml and CSV format. + r"""Log to local file system in yaml and CSV format. Logs are saved to ``os.path.join(save_dir, name, version)``. diff --git a/src/lightning/pytorch/loggers/mlflow.py b/src/lightning/pytorch/loggers/mlflow.py index 337d92bf85674..f240a3023bd32 100644 --- a/src/lightning/pytorch/loggers/mlflow.py +++ b/src/lightning/pytorch/loggers/mlflow.py @@ -181,7 +181,6 @@ def experiment(self) -> MlflowClient: self.logger.experiment.some_mlflow_function() """ - if self._initialized: return self._mlflow_client diff --git a/src/lightning/pytorch/loggers/neptune.py b/src/lightning/pytorch/loggers/neptune.py index 64b76e0d57531..4d2a1c156b747 100644 --- a/src/lightning/pytorch/loggers/neptune.py +++ b/src/lightning/pytorch/loggers/neptune.py @@ -48,8 +48,7 @@ class NeptuneLogger(Logger): - r""" - Log using `Neptune `_. + r"""Log using `Neptune `_. Install it with pip: @@ -364,8 +363,7 @@ def run(self) -> Run: @rank_zero_only def log_hyperparams(self, params: Union[Dict[str, Any], Namespace]) -> None: # skipcq: PYL-W0221 - r""" - Log hyper-parameters to the run. + r"""Log hyper-parameters to the run. Hyperparams will be logged under the "/hyperparams" namespace. diff --git a/src/lightning/pytorch/loggers/tensorboard.py b/src/lightning/pytorch/loggers/tensorboard.py index bcced33e5b9c3..1b23f1826f28d 100644 --- a/src/lightning/pytorch/loggers/tensorboard.py +++ b/src/lightning/pytorch/loggers/tensorboard.py @@ -45,8 +45,7 @@ class TensorBoardLogger(Logger, FabricTensorBoardLogger): - r""" - Log to local file system in `TensorBoard `_ format. + r"""Log to local file system in `TensorBoard `_ format. Implemented using :class:`~tensorboardX.SummaryWriter`. Logs are saved to ``os.path.join(save_dir, name, version)``. This is the default logger in Lightning, it comes diff --git a/src/lightning/pytorch/loggers/utilities.py b/src/lightning/pytorch/loggers/utilities.py index fcb793867886a..a9e4afc8d87a0 100644 --- a/src/lightning/pytorch/loggers/utilities.py +++ b/src/lightning/pytorch/loggers/utilities.py @@ -37,7 +37,6 @@ def _scan_checkpoints(checkpoint_callback: Checkpoint, logged_model_time: dict) checkpoint_callback: Checkpoint callback reference. logged_model_time: dictionary containing the logged model times. """ - # get checkpoints to be saved with associated score checkpoints = {} if hasattr(checkpoint_callback, "last_model_path") and hasattr(checkpoint_callback, "current_score"): diff --git a/src/lightning/pytorch/loggers/wandb.py b/src/lightning/pytorch/loggers/wandb.py index cd289b4de0631..305266946561e 100644 --- a/src/lightning/pytorch/loggers/wandb.py +++ b/src/lightning/pytorch/loggers/wandb.py @@ -46,8 +46,7 @@ class WandbLogger(Logger): - r""" - Log using `Weights and Biases `_. + r"""Log using `Weights and Biases `_. **Installation and set-up** @@ -281,7 +280,6 @@ def any_lightning_module_function_or_hook(self): If required WandB package is not installed on the device. MisconfigurationException: If both ``log_model`` and ``offline`` is set to ``True``. - """ LOGGER_JOIN_CHAR = "-" diff --git a/src/lightning/pytorch/loops/optimization/automatic.py b/src/lightning/pytorch/loops/optimization/automatic.py index d2b5fc923b662..d45cb1f12c3dc 100644 --- a/src/lightning/pytorch/loops/optimization/automatic.py +++ b/src/lightning/pytorch/loops/optimization/automatic.py @@ -206,7 +206,6 @@ def _make_zero_grad_fn(self, batch_idx: int, optimizer: Optimizer) -> Optional[C Returns ``None`` in the case backward needs to be skipped. """ - if self._skip_backward: return None diff --git a/src/lightning/pytorch/loops/optimization/closure.py b/src/lightning/pytorch/loops/optimization/closure.py index 0949b222d114e..ec85a96e54042 100644 --- a/src/lightning/pytorch/loops/optimization/closure.py +++ b/src/lightning/pytorch/loops/optimization/closure.py @@ -30,8 +30,8 @@ class AbstractClosure(ABC, Generic[T]): """Abstract base class for optimizer closures in Lightning. Formally, a closure is binding variables from an external scope to a function that does a computation on these - variables without taking them explicitly as input. This has the benefit that a closure can be passed to an - object which later can call it like a function but without requiring to pass in any arguments. + variables without taking them explicitly as input. This has the benefit that a closure can be passed to an object + which later can call it like a function but without requiring to pass in any arguments. This class provides a simple abstraction making the instance of this class callable like a function while capturing the closure result and caching it. diff --git a/src/lightning/pytorch/overrides/distributed.py b/src/lightning/pytorch/overrides/distributed.py index 1531e80e22517..f34314c9f9305 100644 --- a/src/lightning/pytorch/overrides/distributed.py +++ b/src/lightning/pytorch/overrides/distributed.py @@ -194,8 +194,8 @@ class UnrepeatedDistributedSampler(DistributedSampler): one of the processes runs one fewer batch. During prediction, buffers are only synced on the first batch, so this is safe to use as long as each process runs at least one batch. We verify this in an assert. - Taken from https://github.com/jpuigcerver/PyLaia/blob/v1.0.0/laia/data/unpadded_distributed_sampler.py - and https://github.com/pytorch/pytorch/issues/25162#issuecomment-634146002 + Taken from https://github.com/jpuigcerver/PyLaia/blob/v1.0.0/laia/data/unpadded_distributed_sampler.py and + https://github.com/pytorch/pytorch/issues/25162#issuecomment-634146002 """ def __init__(self, *args: Any, **kwargs: Any) -> None: diff --git a/src/lightning/pytorch/profilers/pytorch.py b/src/lightning/pytorch/profilers/pytorch.py index b0281caee1105..0fec4ca83ce69 100644 --- a/src/lightning/pytorch/profilers/pytorch.py +++ b/src/lightning/pytorch/profilers/pytorch.py @@ -216,7 +216,6 @@ def __call__(self, num_step: int) -> "ProfilerAction": class PyTorchProfiler(Profiler): - STEP_FUNCTIONS = {"training_step", "validation_step", "test_step", "predict_step"} AVAILABLE_SORT_KEYS = { "cpu_time", diff --git a/src/lightning/pytorch/profilers/simple.py b/src/lightning/pytorch/profilers/simple.py index 50b37624bb944..7facafe9cb140 100644 --- a/src/lightning/pytorch/profilers/simple.py +++ b/src/lightning/pytorch/profilers/simple.py @@ -100,7 +100,6 @@ def summary(self) -> str: output_string += f"Profiler Report{sep}" if self.extended: - if len(self.recorded_durations) > 0: max_key = max(len(k) for k in self.recorded_durations.keys()) diff --git a/src/lightning/pytorch/profilers/xla.py b/src/lightning/pytorch/profilers/xla.py index 6d04dccf08a6d..3f710cfdbd318 100644 --- a/src/lightning/pytorch/profilers/xla.py +++ b/src/lightning/pytorch/profilers/xla.py @@ -21,7 +21,6 @@ class XLAProfiler(Profiler): - STEP_FUNCTIONS = {"validation_step", "test_step", "predict_step"} RECORD_FUNCTIONS = { "training_step", diff --git a/src/lightning/pytorch/serve/servable_module.py b/src/lightning/pytorch/serve/servable_module.py index 3aa15a6c1f80b..33efa9956a16f 100644 --- a/src/lightning/pytorch/serve/servable_module.py +++ b/src/lightning/pytorch/serve/servable_module.py @@ -6,7 +6,6 @@ class ServableModule(ABC, torch.nn.Module): - """The ServableModule provides a simple API to make your model servable. .. warning:: This is an :ref:`experimental ` feature. @@ -72,8 +71,7 @@ def configure_serialization(self) -> Tuple[Dict[str, Callable], Dict[str, Callab @abstractmethod def serve_step(self, *args: Tensor, **kwargs: Tensor) -> Dict[str, Tensor]: - r""" - Returns the predictions of your model as a dictionary. + r"""Returns the predictions of your model as a dictionary. .. code-block:: python diff --git a/src/lightning/pytorch/strategies/deepspeed.py b/src/lightning/pytorch/strategies/deepspeed.py index 7a1be0ecb500a..19862c615d416 100644 --- a/src/lightning/pytorch/strategies/deepspeed.py +++ b/src/lightning/pytorch/strategies/deepspeed.py @@ -825,7 +825,6 @@ def _restore_zero_state(self, ckpt: Mapping[str, Any]) -> None: assert self.lightning_module is not None def load(module: torch.nn.Module, prefix: str = "") -> None: - missing_keys: List[str] = [] unexpected_keys: List[str] = [] error_msgs: List[str] = [] diff --git a/src/lightning/pytorch/strategies/fsdp.py b/src/lightning/pytorch/strategies/fsdp.py index a86c4f16499ad..d99df72f76194 100644 --- a/src/lightning/pytorch/strategies/fsdp.py +++ b/src/lightning/pytorch/strategies/fsdp.py @@ -89,7 +89,6 @@ class FSDPStrategy(ParallelStrategy): Enabling this can free up a significant amount of memory at the cost of speed since activations in these layers need to be recomputed during backpropagation. \**kwargs: See available parameters in :class:`torch.distributed.fsdp.FullyShardedDataParallel`. - """ strategy_name = "fsdp" diff --git a/src/lightning/pytorch/strategies/hpu_parallel.py b/src/lightning/pytorch/strategies/hpu_parallel.py index 9c700e91c0928..1b54cea9e66b9 100644 --- a/src/lightning/pytorch/strategies/hpu_parallel.py +++ b/src/lightning/pytorch/strategies/hpu_parallel.py @@ -58,7 +58,6 @@ def __init__( process_group_backend: Optional[str] = "hccl", **kwargs: Any, ) -> None: - if not _HPU_AVAILABLE: raise MisconfigurationException("`HPUParallelStrategy` requires HPU devices to run") @@ -90,7 +89,6 @@ def checkpoint_io(self, io: Optional[CheckpointIO]) -> None: self._checkpoint_io = io def setup_environment(self) -> None: - os.environ["ID"] = str(self.local_rank) if self._process_group_backend == "hccl": # this env is used in overrides to check the backend initiated diff --git a/src/lightning/pytorch/strategies/launchers/subprocess_script.py b/src/lightning/pytorch/strategies/launchers/subprocess_script.py index 815c36e0fbefa..a46f09f93022a 100644 --- a/src/lightning/pytorch/strategies/launchers/subprocess_script.py +++ b/src/lightning/pytorch/strategies/launchers/subprocess_script.py @@ -29,8 +29,7 @@ class _SubprocessScriptLauncher(_Launcher): - r""" - A process launcher that invokes the current script as many times as desired in a single node. + r"""A process launcher that invokes the current script as many times as desired in a single node. This launcher needs to be invoked on each node. In its default behavior, the main process in each node then spawns N-1 child processes via :func:`subprocess.Popen`, diff --git a/src/lightning/pytorch/strategies/launchers/xla.py b/src/lightning/pytorch/strategies/launchers/xla.py index 823e5671c3b3e..a762b5ac0506c 100644 --- a/src/lightning/pytorch/strategies/launchers/xla.py +++ b/src/lightning/pytorch/strategies/launchers/xla.py @@ -31,8 +31,8 @@ class _XLALauncher(_MultiProcessingLauncher): - r"""Launches processes that run a given function in parallel on XLA supported hardware, and joins them all at the - end. + r"""Launches processes that run a given function in parallel on XLA supported hardware, and joins them all at + the end. The main process in which this launcher is invoked creates N so-called worker processes (using the `torch_xla` :func:`xmp.spawn`) that run the given function. diff --git a/src/lightning/pytorch/strategies/single_hpu.py b/src/lightning/pytorch/strategies/single_hpu.py index 72bee3e057b43..d93deac8f5b1d 100644 --- a/src/lightning/pytorch/strategies/single_hpu.py +++ b/src/lightning/pytorch/strategies/single_hpu.py @@ -46,7 +46,6 @@ def __init__( checkpoint_io: Optional[CheckpointIO] = None, precision_plugin: Optional[PrecisionPlugin] = None, ): - if not _HPU_AVAILABLE: raise MisconfigurationException("`SingleHPUStrategy` requires HPU devices to run") diff --git a/src/lightning/pytorch/trainer/call.py b/src/lightning/pytorch/trainer/call.py index c009fc7b59f37..a13066e9fbef0 100644 --- a/src/lightning/pytorch/trainer/call.py +++ b/src/lightning/pytorch/trainer/call.py @@ -28,9 +28,8 @@ def _call_and_handle_interrupt(trainer: "pl.Trainer", trainer_fn: Callable, *args: Any, **kwargs: Any) -> Any: - r""" - Error handling, intended to be used only for main trainer function entry points (fit, validate, test, predict) - as all errors should funnel through them + r"""Error handling, intended to be used only for main trainer function entry points (fit, validate, test, + predict) as all errors should funnel through them. Args: trainer_fn: one of (fit, validate, test, predict) diff --git a/src/lightning/pytorch/trainer/configuration_validator.py b/src/lightning/pytorch/trainer/configuration_validator.py index 060c57aecff48..ff3aa4273692b 100644 --- a/src/lightning/pytorch/trainer/configuration_validator.py +++ b/src/lightning/pytorch/trainer/configuration_validator.py @@ -23,12 +23,10 @@ def _verify_loop_configurations(trainer: "pl.Trainer") -> None: - r""" - Checks that the model is configured correctly before the run is started. + r"""Checks that the model is configured correctly before the run is started. Args: trainer: Lightning Trainer. Its `lightning_module` (the model) to check the configuration. - """ model = trainer.lightning_module diff --git a/src/lightning/pytorch/trainer/connectors/checkpoint_connector.py b/src/lightning/pytorch/trainer/connectors/checkpoint_connector.py index 5ff60bdbe55ea..97cbba9f97470 100644 --- a/src/lightning/pytorch/trainer/connectors/checkpoint_connector.py +++ b/src/lightning/pytorch/trainer/connectors/checkpoint_connector.py @@ -401,6 +401,7 @@ def _restore_modules_and_callbacks(self, checkpoint_path: Optional[_PATH] = None def dump_checkpoint(self, weights_only: bool = False) -> dict: """Creating a model checkpoint dictionary object from various component states. + Args: weights_only: saving model weights only Return: @@ -520,7 +521,6 @@ def __max_ckpt_version_in_folder(dir_path: _PATH, name_key: str = "ckpt_") -> Op Returns: None if no-corresponding-file else maximum suffix number """ - # check directory existence fs, uri = url_to_fs(str(dir_path)) if not fs.exists(dir_path): @@ -544,7 +544,6 @@ def __max_ckpt_version_in_folder(dir_path: _PATH, name_key: str = "ckpt_") -> Op @staticmethod def __get_max_ckpt_path_from_folder(folder_path: _PATH) -> str: """Get path of maximum-epoch checkpoint in the folder.""" - max_suffix = _CheckpointConnector.__max_ckpt_version_in_folder(folder_path) ckpt_number = max_suffix if max_suffix is not None else 0 return f"{folder_path}/hpc_ckpt_{ckpt_number}.ckpt" diff --git a/src/lightning/pytorch/trainer/trainer.py b/src/lightning/pytorch/trainer/trainer.py index abd1f563f019f..37750ec2ac3f0 100644 --- a/src/lightning/pytorch/trainer/trainer.py +++ b/src/lightning/pytorch/trainer/trainer.py @@ -18,7 +18,6 @@ # WE FAVOR READABILITY OVER ENGINEERING-CONSTRUCTS BY DESIGN # DO NOT REMOVE THIS NOTICE # - WILLIAM FALCON - """Trainer to automate the training.""" import logging import math @@ -130,8 +129,7 @@ def __init__( reload_dataloaders_every_n_epochs: int = 0, default_root_dir: Optional[_PATH] = None, ) -> None: - r""" - Customize every aspect of training via flags. + r"""Customize every aspect of training via flags. Args: accelerator: Supports passing different accelerator types ("cpu", "gpu", "tpu", "ipu", "hpu", "mps", "auto") @@ -494,8 +492,7 @@ def fit( datamodule: Optional[LightningDataModule] = None, ckpt_path: Optional[str] = None, ) -> None: - r""" - Runs the full optimization routine. + r"""Runs the full optimization routine. Args: model: Model to fit. @@ -570,8 +567,7 @@ def validate( verbose: bool = True, datamodule: Optional[LightningDataModule] = None, ) -> _EVALUATE_OUTPUT: - r""" - Perform one evaluation epoch over the validation set. + r"""Perform one evaluation epoch over the validation set. Args: model: The model to validate. @@ -666,9 +662,8 @@ def test( verbose: bool = True, datamodule: Optional[LightningDataModule] = None, ) -> _EVALUATE_OUTPUT: - r""" - Perform one evaluation epoch over the test set. - It's separated from fit to make sure you never run on your test set until you want to. + r"""Perform one evaluation epoch over the test set. It's separated from fit to make sure you never run on + your test set until you want to. Args: model: The model to test. @@ -763,10 +758,8 @@ def predict( return_predictions: Optional[bool] = None, ckpt_path: Optional[str] = None, ) -> Optional[_PREDICT_OUTPUT]: - r""" - Run inference on your data. - This will call the model forward function to compute predictions. Useful to perform distributed - and batched predictions. Logging is disabled in the predict hooks. + r"""Run inference on your data. This will call the model forward function to compute predictions. Useful to + perform distributed and batched predictions. Logging is disabled in the predict hooks. Args: model: The model to predict with. @@ -1245,14 +1238,12 @@ def ckpt_path(self, ckpt_path: Optional[_PATH]) -> None: def save_checkpoint( self, filepath: _PATH, weights_only: bool = False, storage_options: Optional[Any] = None ) -> None: - r""" - Runs routine to create a checkpoint. + r"""Runs routine to create a checkpoint. Args: filepath: Path where checkpoint is saved. weights_only: If ``True``, will only save the model weights. storage_options: parameter for how to save to storage, passed to ``CheckpointIO`` plugin - """ if self.model is None: raise AttributeError( diff --git a/src/lightning/pytorch/utilities/parsing.py b/src/lightning/pytorch/utilities/parsing.py index 1280af63ec368..a17d580593899 100644 --- a/src/lightning/pytorch/utilities/parsing.py +++ b/src/lightning/pytorch/utilities/parsing.py @@ -28,7 +28,6 @@ def is_picklable(obj: object) -> bool: """Tests if an object can be pickled.""" - try: pickle.dumps(obj) return True diff --git a/tests/integrations_app/flagship/test_flashy.py b/tests/integrations_app/flagship/test_flashy.py index c4b09c70261be..fbaad8efec0b3 100644 --- a/tests/integrations_app/flagship/test_flashy.py +++ b/tests/integrations_app/flagship/test_flashy.py @@ -20,7 +20,6 @@ def validate_app_functionalities(app_page: "Page") -> None: app_page: The UI page of the app to be validated. """ - while True: try: app_page.reload() @@ -74,5 +73,4 @@ def validate_app_functionalities(app_page: "Page") -> None: @pytest.mark.cloud def test_app_cloud() -> None: with run_app_in_cloud(_PATH_INTEGRATIONS_DIR) as (admin_page, view_page, fetch_logs, _): - validate_app_functionalities(view_page) diff --git a/tests/integrations_app/local/test_core_features_app.py b/tests/integrations_app/local/test_core_features_app.py index cfb9487b9dd0f..c112ef30ac425 100644 --- a/tests/integrations_app/local/test_core_features_app.py +++ b/tests/integrations_app/local/test_core_features_app.py @@ -7,7 +7,6 @@ def test_core_features_app_example(): - runner = CliRunner() result = runner.invoke( run_app, diff --git a/tests/integrations_app/public/test_app_dag.py b/tests/integrations_app/public/test_app_dag.py index 6a2808397f365..0bdbdcae47f18 100644 --- a/tests/integrations_app/public/test_app_dag.py +++ b/tests/integrations_app/public/test_app_dag.py @@ -10,7 +10,6 @@ @pytest.mark.cloud def test_app_dag_example_cloud() -> None: with run_app_in_cloud(os.path.join(_PATH_EXAMPLES, "dag")) as (_, _, fetch_logs, _): - launch_log, finish_log = False, False while not (launch_log and finish_log): for log in fetch_logs(["flow"]): diff --git a/tests/integrations_app/public/test_commands_and_api.py b/tests/integrations_app/public/test_commands_and_api.py index 82b59a9cbf508..a8af0c0088bbb 100644 --- a/tests/integrations_app/public/test_commands_and_api.py +++ b/tests/integrations_app/public/test_commands_and_api.py @@ -26,7 +26,6 @@ def test_commands_and_api_example_cloud() -> None: cmd_5 = "lightning logout" process = Popen(" && ".join([cmd_1, cmd_2, cmd_3, cmd_4, cmd_5]), shell=True) process.wait() - "/".join(view_page.url.split("/")[:-2]) # Validate the logs. diff --git a/tests/integrations_app/public/test_drive.py b/tests/integrations_app/public/test_drive.py index b885318c0289b..b17508bbfce96 100644 --- a/tests/integrations_app/public/test_drive.py +++ b/tests/integrations_app/public/test_drive.py @@ -15,7 +15,6 @@ def test_drive_example_cloud() -> None: fetch_logs, _, ): - has_logs = False while not has_logs: for log in fetch_logs(["flow"]): diff --git a/tests/integrations_app/public/test_gradio.py b/tests/integrations_app/public/test_gradio.py index 1ed4b66e90ad3..df88e342c4f18 100644 --- a/tests/integrations_app/public/test_gradio.py +++ b/tests/integrations_app/public/test_gradio.py @@ -6,11 +6,9 @@ @mock.patch.dict(os.environ, {"LIGHTING_TESTING": "1"}) @mock.patch("lightning.app.components.serve.gradio_server.gradio") def test_serve_gradio(gradio_mock): - from lightning.app.components.serve.gradio_server import ServeGradio class MyGradioServe(ServeGradio): - inputs = gradio_mock.inputs.Image(type="pil") outputs = gradio_mock.outputs.Image(type="pil") examples = [["./examples/app/components/serve/gradio/beyonce.png"]] diff --git a/tests/integrations_app/public/test_layout.py b/tests/integrations_app/public/test_layout.py index af8dd2358762f..c839187c98bef 100644 --- a/tests/integrations_app/public/test_layout.py +++ b/tests/integrations_app/public/test_layout.py @@ -7,7 +7,6 @@ def test_layout_example(): - runner = CliRunner() result = runner.invoke( run_app, diff --git a/tests/integrations_app/public/test_payload.py b/tests/integrations_app/public/test_payload.py index 65b6a24c1dc35..0ff968b4f8247 100644 --- a/tests/integrations_app/public/test_payload.py +++ b/tests/integrations_app/public/test_payload.py @@ -10,7 +10,6 @@ @pytest.mark.cloud def test_payload_example_cloud() -> None: with run_app_in_cloud(os.path.join(_PATH_EXAMPLES, "payload")) as (_, _, fetch_logs, _): - has_logs = False while not has_logs: for log in fetch_logs(["flow"]): diff --git a/tests/integrations_app/public/test_pickle_or_not.py b/tests/integrations_app/public/test_pickle_or_not.py index bdfaa4903e6ab..942f50df9db21 100644 --- a/tests/integrations_app/public/test_pickle_or_not.py +++ b/tests/integrations_app/public/test_pickle_or_not.py @@ -10,7 +10,6 @@ # TODO: Investigate why it doesn't work @pytest.mark.skip(reason="test has been ignored for a while and seems not to be working :(") def test_pickle_or_not_example(): - runner = CliRunner() result = runner.invoke( run_app, diff --git a/tests/integrations_app/public/test_quick_start.py b/tests/integrations_app/public/test_quick_start.py index 7a3cad03077f1..379494e775d80 100644 --- a/tests/integrations_app/public/test_quick_start.py +++ b/tests/integrations_app/public/test_quick_start.py @@ -29,7 +29,6 @@ def run_once(self): @_RunIf(pl=True, skip_windows=True, skip_linux=True) def test_quick_start_example(caplog, monkeypatch): """This test ensures the Quick Start example properly train and serve PyTorch Lightning.""" - monkeypatch.setattr("logging.getLogger", mock.MagicMock(return_value=logging.getLogger())) with caplog.at_level(logging.INFO): diff --git a/tests/integrations_app/public/test_scripts.py b/tests/integrations_app/public/test_scripts.py index ff6f9f62b476d..ec5cf10ec3a82 100644 --- a/tests/integrations_app/public/test_scripts.py +++ b/tests/integrations_app/public/test_scripts.py @@ -23,7 +23,6 @@ def test_scripts(file): @pytest.mark.skip(reason="causing some issues with CI, not sure if the test is actually needed") @_RunIf(pl=True) def test_components_app_example(): - runner = CliRunner() result = runner.invoke( run_app, diff --git a/tests/tests_app/cli/test_cd.py b/tests/tests_app/cli/test_cd.py index 0d80b65ffff2c..ef3a9a56d4096 100644 --- a/tests/tests_app/cli/test_cd.py +++ b/tests/tests_app/cli/test_cd.py @@ -12,7 +12,6 @@ @pytest.mark.skipif(sys.platform == "win32", reason="not supported on windows yet") def test_cd(monkeypatch): """This test validates cd behaves as expected.""" - ls = mock.MagicMock() monkeypatch.setattr(cd, "ls", ls) diff --git a/tests/tests_app/cli/test_cloud_cli.py b/tests/tests_app/cli/test_cloud_cli.py index e3ee012ed052e..4dceed026d39e 100644 --- a/tests/tests_app/cli/test_cloud_cli.py +++ b/tests/tests_app/cli/test_cloud_cli.py @@ -121,7 +121,6 @@ def cloud_space_service_create_lightning_run_instance(self, project_id, cloudspa @mock.patch("lightning.app.runners.runtime_type.CloudRuntime", CloudRuntimePatch) @pytest.mark.parametrize("create_response", [RuntimeErrorResponse(), RuntimeErrorResponse2()]) def test_start_app(create_response, monkeypatch): - monkeypatch.setattr(cloud, "V1LightningappInstanceState", MagicMock()) monkeypatch.setattr(cloud, "CloudspaceIdRunsBody", MagicMock()) monkeypatch.setattr(cloud, "V1Flowserver", MagicMock()) @@ -205,7 +204,6 @@ def cloud_space_service_list_cloud_spaces(self, *args, **kwargs): ], ) def test_start_app_exception(message, monkeypatch, caplog): - monkeypatch.setattr(cloud, "V1LightningappInstanceState", MagicMock()) monkeypatch.setattr(cloud, "CloudspaceIdRunsBody", MagicMock()) monkeypatch.setattr(cloud, "V1Flowserver", MagicMock()) diff --git a/tests/tests_app/cli/test_cmd_init.py b/tests/tests_app/cli/test_cmd_init.py index 0ec1999c613cd..0a4bcdd7b4fcd 100644 --- a/tests/tests_app/cli/test_cmd_init.py +++ b/tests/tests_app/cli/test_cmd_init.py @@ -9,7 +9,6 @@ def test_validate_init_name(): - # test that a good name works (mix chars) value = cmd_init._capture_valid_app_component_name("abc1-cde") assert value == "abc1-cde" diff --git a/tests/tests_app/cli/test_cmd_install.py b/tests/tests_app/cli/test_cmd_install.py index b383132c59573..bc8998722e5b0 100644 --- a/tests/tests_app/cli/test_cmd_install.py +++ b/tests/tests_app/cli/test_cmd_install.py @@ -63,7 +63,6 @@ def test_valid_unpublished_app_name(): @pytest.mark.skip(reason="need to figure out how to authorize git clone from the private repo") def test_app_install(tmpdir, monkeypatch): """Tests unpublished app install.""" - monkeypatch.chdir(tmpdir) real_app = "https://github.com/Lightning-AI/install-app" @@ -195,7 +194,6 @@ def test_version_arg_component(tmpdir, monkeypatch): @mock.patch("lightning.app.cli.cmd_install.subprocess", mock.MagicMock()) @mock.patch("lightning.app.cli.cmd_install.os.chdir", mock.MagicMock()) def test_version_arg_app(tmpdir): - # Version does not exist app_name = "lightning/invideo" version_arg = "NOT-EXIST" @@ -215,7 +213,6 @@ def test_version_arg_app(tmpdir): @mock.patch("lightning.app.cli.cmd_install.os.chdir", mock.MagicMock()) @mock.patch("lightning.app.cli.cmd_install._show_install_app_prompt") def test_install_resolve_latest_version(mock_show_install_app_prompt, tmpdir): - app_name = "lightning/invideo" runner = CliRunner() with mock.patch("lightning.app.cli.cmd_install.requests.get") as get_api_mock: @@ -246,7 +243,6 @@ def test_install_resolve_latest_version(mock_show_install_app_prompt, tmpdir): def test_proper_url_parsing(): - name = "lightning/invideo" # make sure org/app-name name is correct @@ -273,7 +269,6 @@ def test_proper_url_parsing(): @_RunIf(skip_windows=True) def test_install_app_shows_error(tmpdir): - app_folder_dir = Path(tmpdir / "some_random_directory").absolute() app_folder_dir.mkdir() diff --git a/tests/tests_app/cli/test_cmd_show_cluster_logs.py b/tests/tests_app/cli/test_cmd_show_cluster_logs.py index 471f2befbd386..7af278065b53d 100644 --- a/tests/tests_app/cli/test_cmd_show_cluster_logs.py +++ b/tests/tests_app/cli/test_cmd_show_cluster_logs.py @@ -13,7 +13,6 @@ @mock.patch("lightning.app.cli.lightning_cli.AWSClusterManager.get_clusters") def test_show_logs_errors(get_clusters): """Test that the CLI prints the errors for the show logs command.""" - runner = CliRunner() # Run without arguments diff --git a/tests/tests_app/cli/test_cmd_show_logs.py b/tests/tests_app/cli/test_cmd_show_logs.py index d6d8e522f3763..16ff57eb9bcaa 100644 --- a/tests/tests_app/cli/test_cmd_show_logs.py +++ b/tests/tests_app/cli/test_cmd_show_logs.py @@ -9,7 +9,6 @@ @mock.patch("lightning.app.cli.commands.logs._get_project") def test_show_logs_errors(_, client): """Test that the CLI prints the errors for the show logs command.""" - runner = CliRunner() # Response prep diff --git a/tests/tests_app/cli/test_connect_data.py b/tests/tests_app/cli/test_connect_data.py index 3243ca28c45c3..80159d5375b76 100644 --- a/tests/tests_app/cli/test_connect_data.py +++ b/tests/tests_app/cli/test_connect_data.py @@ -8,7 +8,6 @@ @pytest.mark.skipif(sys.platform == "win32", reason="lightning connect data isn't supported on windows") def test_connect_data_no_project(monkeypatch): - from lightning_cloud.openapi import V1ListMembershipsResponse, V1Membership client = MagicMock() @@ -29,7 +28,6 @@ def test_connect_data_no_project(monkeypatch): @pytest.mark.skipif(sys.platform == "win32", reason="lightning connect data isn't supported on windows") def test_connect_data(monkeypatch): - from lightning_cloud.openapi import Create, V1AwsDataConnection, V1ListMembershipsResponse, V1Membership client = MagicMock() diff --git a/tests/tests_app/cli/test_rm.py b/tests/tests_app/cli/test_rm.py index 6d9dcfa90b66a..2f5dae6487e54 100644 --- a/tests/tests_app/cli/test_rm.py +++ b/tests/tests_app/cli/test_rm.py @@ -19,7 +19,6 @@ @pytest.mark.skipif(sys.platform == "win32", reason="not supported on windows yet") def test_rm(monkeypatch): """This test validates rm behaves as expected.""" - if os.path.exists(cd._CD_FILE): os.remove(cd._CD_FILE) diff --git a/tests/tests_app/cli/test_run_app.py b/tests/tests_app/cli/test_run_app.py index 37ea9b6233b01..fde21597eca28 100644 --- a/tests/tests_app/cli/test_run_app.py +++ b/tests/tests_app/cli/test_run_app.py @@ -19,7 +19,6 @@ @pytest.mark.parametrize("open_ui", (True, False)) def test_lightning_run_app(lauch_mock: mock.MagicMock, open_ui, caplog, monkeypatch): """This test validates the command is runned properly and the LightningApp method is being executed.""" - monkeypatch.setattr("lightning.app._logger", logging.getLogger()) original_method = LightningApp._run @@ -50,7 +49,6 @@ def _lightning_app_run_and_logging(self, *args, **kwargs): os.environ["PYTEST_CURRENT_TEST"] = pytest_env # capture logs. if open_ui: - # Get the designated port port = constants.APP_SERVER_PORT diff --git a/tests/tests_app/components/database/test_client_server.py b/tests/tests_app/components/database/test_client_server.py index b8c938da9dc4b..bfcc435e8069c 100644 --- a/tests/tests_app/components/database/test_client_server.py +++ b/tests/tests_app/components/database/test_client_server.py @@ -48,7 +48,6 @@ def run(self, client: DatabaseClient): @pytest.mark.skipif(not _is_sqlmodel_available(), reason="sqlmodel is required for this test.") def test_client_server(): - database_path = Path("database.db").resolve() if database_path.exists(): os.remove(database_path) @@ -122,7 +121,6 @@ def run(self): @pytest.mark.skipif(sys.platform == "win32", reason="currently not supported for windows.") @pytest.mark.skipif(not _is_sqlmodel_available(), reason="sqlmodel is required for this test.") def test_work_database_restart(): - id = str(uuid4()).split("-")[0] class Flow(LightningFlow): @@ -166,7 +164,6 @@ def run(self): @pytest.mark.skipif(sys.platform == "win32", reason="currently not supported for windows.") @pytest.mark.skipif(not _is_sqlmodel_available(), reason="sqlmodel is required for this test.") def test_work_database_periodic_store(): - id = str(uuid4()).split("-")[0] class Flow(LightningFlow): diff --git a/tests/tests_app/components/multi_node/test_fabric.py b/tests/tests_app/components/multi_node/test_fabric.py index 756d59faa3c97..7e6b7bae8cd5c 100644 --- a/tests/tests_app/components/multi_node/test_fabric.py +++ b/tests/tests_app/components/multi_node/test_fabric.py @@ -73,7 +73,6 @@ def test_fabric_run_executor_mps_forced_cpu(accelerator_given, accelerator_expec ) @pytest.mark.skipif(not module_available("lightning"), reason="Lightning is required for this test") def test_trainer_run_executor_arguments_choices(args_given: dict, args_expected: dict): - # ddp with mps devices not available (tested separately, just patching here for cross-os testing of other args) if lf.accelerators.MPSAccelerator.is_available(): args_expected["accelerator"] = "cpu" diff --git a/tests/tests_app/components/multi_node/test_trainer.py b/tests/tests_app/components/multi_node/test_trainer.py index 55282f82f7c9d..249d7868652bb 100644 --- a/tests/tests_app/components/multi_node/test_trainer.py +++ b/tests/tests_app/components/multi_node/test_trainer.py @@ -71,7 +71,6 @@ def test_trainer_run_executor_arguments_choices( args_given: dict, args_expected: dict, ): - if pl.accelerators.MPSAccelerator.is_available(): args_expected.pop("accelerator", None) # Cross platform tests -> MPS is tested separately diff --git a/tests/tests_app/components/serve/test_model_inference_api.py b/tests/tests_app/components/serve/test_model_inference_api.py index 0b0353a234d76..be96e4a6b1da4 100644 --- a/tests/tests_app/components/serve/test_model_inference_api.py +++ b/tests/tests_app/components/serve/test_model_inference_api.py @@ -38,7 +38,6 @@ def target_fn(port, workers): # target machine actively refused it @_RunIf(skip_windows=True) def test_model_inference_api(workers): - port = find_free_network_port() process = mp.Process(target=target_fn, args=(port, workers)) process.start() @@ -70,7 +69,6 @@ def predict(self, x): def test_model_inference_api_mock(monkeypatch): - monkeypatch.setattr(serve, "uvicorn", MagicMock()) comp = EmptyServer() comp.run() diff --git a/tests/tests_app/core/test_lightning_api.py b/tests/tests_app/core/test_lightning_api.py index dd6b7e0851cc4..9754ef916a673 100644 --- a/tests/tests_app/core/test_lightning_api.py +++ b/tests/tests_app/core/test_lightning_api.py @@ -192,7 +192,6 @@ def test_update_publish_state_and_maybe_refresh_ui(): - receives the state from the `publish_state_queue` and populates the app_state_store - receives a notification to refresh the UI and makes a GET Request (streamlit). """ - app = AppStageTestingApp(FlowA(), log_level="debug") publish_state_queue = _MockQueue("publish_state_queue") api_response_queue = _MockQueue("api_response_queue") @@ -241,7 +240,6 @@ def get(self, timeout: int = 0): headers = headers_for({"type": x_lightning_type}) async with AsyncClient(app=fastapi_service, base_url="http://test") as client: - with pytest.raises(Exception, match="X-Lightning-Session-UUID"): await client.get("/api/v1/spec") diff --git a/tests/tests_app/core/test_lightning_app.py b/tests/tests_app/core/test_lightning_app.py index 54fc38ab0f657..526ab7006d66d 100644 --- a/tests/tests_app/core/test_lightning_app.py +++ b/tests/tests_app/core/test_lightning_app.py @@ -36,7 +36,6 @@ def test_lightning_app_requires_root_run_method(): """Test that a useful exception is raised if the root flow does not override the run method.""" - with pytest.raises( TypeError, match=escape("The root flow passed to `LightningApp` does not override the `run()` method") ): @@ -448,7 +447,6 @@ def run(self): ) @pytest.mark.flaky(reruns=5) def test_lightning_app_aggregation_speed(default_timeout, queue_type_cls: BaseQueue, sleep_time, expect): - """This test validates the `_collect_deltas_from_ui_and_work_queues` can aggregate multiple delta together in a time window.""" @@ -510,7 +508,6 @@ def run(self): def test_maybe_apply_changes_from_flow(): """This test validates the app `_updated` is set to True only if the state was changed in the flow.""" - app = LightningApp(SimpleFlow2()) app.delta_queue = MultiProcessQueue("a", 0) assert app._has_updated @@ -607,7 +604,7 @@ def run(self): expected = 1 if self.use_same_args else next_c - if not all([w.num_successes == (expected if w.cache_calls else next_c) for w in self.works()]): + if not all(w.num_successes == (expected if w.cache_calls else next_c) for w in self.works()): return self.c += 1 @@ -1169,7 +1166,6 @@ def run(self): def test_lightning_flow_properties(): """Validates setting properties to the LightningFlow properly calls property.fset.""" - flow = FlowValue() assert flow._value is None flow.run() @@ -1182,6 +1178,5 @@ def run(self): def test_lightning_work_stopped(): - app = LightningApp(SimpleWork2()) MultiProcessRuntime(app, start_server=False).dispatch() diff --git a/tests/tests_app/core/test_lightning_flow.py b/tests/tests_app/core/test_lightning_flow.py index 9cc773d2a0b4f..52ef5bfe80f94 100644 --- a/tests/tests_app/core/test_lightning_flow.py +++ b/tests/tests_app/core/test_lightning_flow.py @@ -261,6 +261,7 @@ def test_attribute_state_change(attribute, update_fn, expected, tmpdir): def test_inplace_attribute_state_change(tmpdir): """Test that in-place modifications on containers get captured as a state change.""" + # inplace modification of a nested dict def transform(x): x["b"]["c"] += 1 @@ -564,7 +565,6 @@ def run(self): def test_lightning_flow_counter(tmpdir): - app = LightningApp(FlowCounter()) app.checkpointing = True MultiProcessRuntime(app, start_server=False).dispatch() @@ -659,7 +659,6 @@ def run(self): def test_scheduling_api(): - app = LightningApp(FlowSchedule()) MultiProcessRuntime(app, start_server=False).dispatch() @@ -845,7 +844,6 @@ def run(self): def test_lightning_flow_flows_and_works(): - flow = FlowCollection() app = LightningApp(flow) @@ -904,7 +902,6 @@ def __init__(self): @pytest.mark.parametrize("flow", [FlowReady, RootFlowReady]) def test_flow_ready(flow): """This test validates that the app status queue is populated correctly.""" - mock_queue = _MockQueue("api_publish_state_queue") def run_patch(method): diff --git a/tests/tests_app/core/test_lightning_work.py b/tests/tests_app/core/test_lightning_work.py index b5a7d7a96e0c8..bc2b09bd5fcdf 100644 --- a/tests/tests_app/core/test_lightning_work.py +++ b/tests/tests_app/core/test_lightning_work.py @@ -19,7 +19,6 @@ def test_lightning_work_run_method_required(): """Test that a helpful exception is raised when the user did not implement the `LightningWork.run()` method.""" - with pytest.raises(TypeError, match=escape("The work `LightningWork` is missing the `run()` method")): LightningWork() @@ -306,7 +305,6 @@ def run(self, *args, **kwargs): def test_work_cloud_build_config_provided(): - assert isinstance(LightningWork.cloud_build_config, property) assert LightningWork.cloud_build_config.fset is not None @@ -323,7 +321,6 @@ def run(self, *args, **kwargs): def test_work_local_build_config_provided(): - assert isinstance(LightningWork.local_build_config, property) assert LightningWork.local_build_config.fset is not None diff --git a/tests/tests_app/core/test_queues.py b/tests/tests_app/core/test_queues.py index a237071169fc7..55c0523daf64b 100644 --- a/tests/tests_app/core/test_queues.py +++ b/tests/tests_app/core/test_queues.py @@ -107,7 +107,6 @@ def test_redis_queue_read_timeout(redis_mock): [(QueuingSystem.MULTIPROCESS, multiprocessing)], ) def test_process_queue_read_timeout(queue_type, queue_process_mock, monkeypatch): - context = mock.MagicMock() queue_mocked = mock.MagicMock() context.Queue = queue_mocked diff --git a/tests/tests_app/frontend/just_py/test_just_py.py b/tests/tests_app/frontend/just_py/test_just_py.py index 04996c1287dc9..f273e64d5f30a 100644 --- a/tests/tests_app/frontend/just_py/test_just_py.py +++ b/tests/tests_app/frontend/just_py/test_just_py.py @@ -15,7 +15,6 @@ def render_fn(get_state: Callable) -> Callable: def test_justpy_frontend(monkeypatch): - justpy = MagicMock() popen = MagicMock() monkeypatch.setitem(sys.modules, "justpy", justpy) diff --git a/tests/tests_app/frontend/panel/app_panel.py b/tests/tests_app/frontend/panel/app_panel.py index 6b54261e37e7d..3d8c056cc4492 100644 --- a/tests/tests_app/frontend/panel/app_panel.py +++ b/tests/tests_app/frontend/panel/app_panel.py @@ -1,5 +1,4 @@ if __name__ == "__main__": - import panel as pn pn.pane.Markdown("# Panel App").servable() diff --git a/tests/tests_app/plugin/test_plugin.py b/tests/tests_app/plugin/test_plugin.py index 35347a0bbfc11..25ef9085156d1 100644 --- a/tests/tests_app/plugin/test_plugin.py +++ b/tests/tests_app/plugin/test_plugin.py @@ -17,7 +17,6 @@ @mock.patch("lightning.app.plugin.plugin.uvicorn") def mock_plugin_server(mock_uvicorn) -> TestClient: """This fixture returns a `TestClient` for the plugin server.""" - test_client = {} def create_test_client(app, **_): diff --git a/tests/tests_app/runners/test_cloud.py b/tests/tests_app/runners/test_cloud.py index 221440e34244a..d18cd27a1c000 100644 --- a/tests/tests_app/runners/test_cloud.py +++ b/tests/tests_app/runners/test_cloud.py @@ -1914,7 +1914,7 @@ def test_print_specs(tmpdir, caplog, monkeypatch, print_format, expected): expected = re.escape(str(expected).replace("'", '"').replace(" ", "")).replace('"\\*"', "(.*)") expected = "INFO(.*)works: " + expected - assert any([re.fullmatch(expected, line) for line in lines]) + assert any(re.fullmatch(expected, line) for line in lines) finally: cloud.LIGHTNING_CLOUD_PRINT_SPECS = None diff --git a/tests/tests_app/runners/test_runtime.py b/tests/tests_app/runners/test_runtime.py index 5422a24256351..895b50a4afffd 100644 --- a/tests/tests_app/runners/test_runtime.py +++ b/tests/tests_app/runners/test_runtime.py @@ -21,7 +21,6 @@ @mock.patch("lightning.app.runners.backends.cloud.LightningClient", mock.MagicMock()) def test_dispatch(runtime_type, monkeypatch): """This test ensures the runtime dispatch method gets called when using dispatch.""" - monkeypatch.setattr(cloud, "CloudBackend", mock.MagicMock()) with pytest.raises(FileNotFoundError, match="doesnt_exists.py"): diff --git a/tests/tests_app/source_code/test_local.py b/tests/tests_app/source_code/test_local.py index 7d9df15d5daa2..c638bc32fd88a 100644 --- a/tests/tests_app/source_code/test_local.py +++ b/tests/tests_app/source_code/test_local.py @@ -273,7 +273,6 @@ def test_repository_lightningignore_supports_different_patterns(tmp_path): def test_repository_lightningignore_unpackage(tmp_path, monkeypatch): """.lightningignore behaves similarly to the gitignore standard.""" - lorem_ipsum = "Lorem ipsum dolor sit amet, consectetur adipiscing elit." cache_path = tmp_path / "cache" diff --git a/tests/tests_app/structures/test_structures.py b/tests/tests_app/structures/test_structures.py index f124b7dd5f611..eddffd70908c6 100644 --- a/tests/tests_app/structures/test_structures.py +++ b/tests/tests_app/structures/test_structures.py @@ -449,7 +449,6 @@ def run(self): def test_dict_with_queues(): - app = LightningApp(FlowDict()) MultiProcessRuntime(app, start_server=False).dispatch() @@ -470,7 +469,6 @@ def run(self): def test_list_with_queues(): - app = LightningApp(FlowList()) MultiProcessRuntime(app, start_server=False).dispatch() @@ -552,7 +550,6 @@ def run(self): def test_flow_without_structures(): - flow = FlowWiStructures() assert isinstance(flow.ws, List) assert isinstance(flow.ws1, Dict) diff --git a/tests/tests_app/utilities/test_app_helpers.py b/tests/tests_app/utilities/test_app_helpers.py index 9efbe6f1bda1c..0f8155dacfc6a 100644 --- a/tests/tests_app/utilities/test_app_helpers.py +++ b/tests/tests_app/utilities/test_app_helpers.py @@ -52,7 +52,6 @@ def test_is_overridden(): def test_simple_app_store(): - store = InMemoryStateStore() user_id = "1234" store.add(user_id) diff --git a/tests/tests_app/utilities/test_git.py b/tests/tests_app/utilities/test_git.py index 3aeb9d5956e98..236ebc9730476 100644 --- a/tests/tests_app/utilities/test_git.py +++ b/tests/tests_app/utilities/test_git.py @@ -14,7 +14,6 @@ @pytest.mark.skipif(sys.platform == "win32", reason="Don't run on windows") def test_execute_git_command(): - res = execute_git_command(["pull"]) assert res diff --git a/tests/tests_app/utilities/test_introspection.py b/tests/tests_app/utilities/test_introspection.py index c3e4c824acb60..b3371e2348565 100644 --- a/tests/tests_app/utilities/test_introspection.py +++ b/tests/tests_app/utilities/test_introspection.py @@ -15,7 +15,6 @@ def test_introspection(): """This test validates the scanner can find some class within the provided files.""" - scanner = Scanner(str(os.path.join(_PROJECT_ROOT, "tests/tests_app/core/scripts/example_1.py"))) assert scanner.has_class(Rational) assert not scanner.has_class(LightningApp) diff --git a/tests/tests_app/utilities/test_network.py b/tests/tests_app/utilities/test_network.py index f8cc25304f0ae..8c25e8305f2f2 100644 --- a/tests/tests_app/utilities/test_network.py +++ b/tests/tests_app/utilities/test_network.py @@ -46,7 +46,6 @@ def test_find_free_network_port_cloudspace(_, patch_constants): def test_lightning_client_retry_enabled(): - client = LightningClient() # default: retry=True assert hasattr(client.auth_service_get_user_with_http_info, "__wrapped__") diff --git a/tests/tests_app/utilities/test_proxies.py b/tests/tests_app/utilities/test_proxies.py index 1f0222573f67f..83b2827768cc5 100644 --- a/tests/tests_app/utilities/test_proxies.py +++ b/tests/tests_app/utilities/test_proxies.py @@ -247,7 +247,6 @@ def run(self): class WorkRunnerPatch(WorkRunner): - counter = 0 def __call__(self): @@ -645,7 +644,6 @@ def run(self): def test_state_observer(): - app = LightningApp(FlowState()) MultiProcessRuntime(app, start_server=False).dispatch() diff --git a/tests/tests_app/utilities/test_state.py b/tests/tests_app/utilities/test_state.py index d4bd6883271d4..4cb9f20d3fc6b 100644 --- a/tests/tests_app/utilities/test_state.py +++ b/tests/tests_app/utilities/test_state.py @@ -15,7 +15,6 @@ @mock.patch("lightning.app.utilities.state._configure_session", return_value=requests) def test_app_state_not_connected(_): - """Test an error message when a disconnected AppState tries to access attributes.""" state = AppState(port=8000) with pytest.raises(AttributeError, match="Failed to connect and fetch the app state"): @@ -250,7 +249,6 @@ def json(self): def test_get_send_request(monkeypatch): - app = LightningApp(Flow()) monkeypatch.setattr(lightning.app.utilities.state, "_configure_session", mock.MagicMock()) diff --git a/tests/tests_fabric/accelerators/test_cuda.py b/tests/tests_fabric/accelerators/test_cuda.py index d4cc69f0beb51..f623125c4f4d3 100644 --- a/tests/tests_fabric/accelerators/test_cuda.py +++ b/tests/tests_fabric/accelerators/test_cuda.py @@ -91,7 +91,6 @@ def test_force_nvml_based_cuda_check(): @mock.patch("torch.cuda.get_device_capability", return_value=(10, 1)) @mock.patch("torch.cuda.get_device_name", return_value="Z100") def test_tf32_message(_, __, caplog, monkeypatch): - # for some reason, caplog doesn't work with our rank_zero_info utilities monkeypatch.setattr(lightning.fabric.accelerators.cuda, "rank_zero_info", logging.info) @@ -125,7 +124,6 @@ def test_tf32_message(_, __, caplog, monkeypatch): def test_find_usable_cuda_devices_error_handling(): """Test error handling for edge cases when using `find_usable_cuda_devices`.""" - # Asking for GPUs if no GPUs visible with mock.patch("lightning.fabric.accelerators.cuda.num_cuda_devices", return_value=0), pytest.raises( ValueError, match="You requested to find 2 devices but there are no visible CUDA" diff --git a/tests/tests_fabric/loggers/test_csv.py b/tests/tests_fabric/loggers/test_csv.py index f16fa7f19aec4..2cdd29ec34920 100644 --- a/tests/tests_fabric/loggers/test_csv.py +++ b/tests/tests_fabric/loggers/test_csv.py @@ -42,7 +42,6 @@ def test_file_logger_manual_versioning(tmpdir): def test_file_logger_named_version(tmpdir): """Verify that manual versioning works for string versions, e.g. '2020-02-05-162402'.""" - exp_name = "exp" tmpdir.mkdir(exp_name) expected_version = "2020-02-05-162402" diff --git a/tests/tests_fabric/loggers/test_tensorboard.py b/tests/tests_fabric/loggers/test_tensorboard.py index c84810814cb09..05889452ceeb2 100644 --- a/tests/tests_fabric/loggers/test_tensorboard.py +++ b/tests/tests_fabric/loggers/test_tensorboard.py @@ -219,7 +219,6 @@ def test_tensorboard_with_symlink(log, tmpdir): def test_tensorboard_missing_folder_warning(tmpdir, caplog): """Verify that the logger throws a warning for invalid directory.""" - name = "fake_dir" logger = TensorBoardLogger(root_dir=tmpdir, name=name) diff --git a/tests/tests_fabric/plugins/environments/test_mpi.py b/tests/tests_fabric/plugins/environments/test_mpi.py index f8132c8ece103..87a4499667150 100644 --- a/tests/tests_fabric/plugins/environments/test_mpi.py +++ b/tests/tests_fabric/plugins/environments/test_mpi.py @@ -58,7 +58,6 @@ def test_detect(monkeypatch): @mock.patch.dict(os.environ, {}, clear=True) def test_default_attributes(monkeypatch): """Test the default attributes when no environment variables are set.""" - # pretend mpi4py is available monkeypatch.setattr(lightning.fabric.plugins.environments.mpi, "_MPI4PY_AVAILABLE", True) mpi4py_mock = MagicMock() diff --git a/tests/tests_fabric/plugins/precision/test_amp_integration.py b/tests/tests_fabric/plugins/precision/test_amp_integration.py index 69318eac79c19..70025428fd328 100644 --- a/tests/tests_fabric/plugins/precision/test_amp_integration.py +++ b/tests/tests_fabric/plugins/precision/test_amp_integration.py @@ -39,7 +39,6 @@ def forward(self, x): class MixedPrecisionBoringFabric(BoringFabric): - expected_dtype: torch.dtype def get_model(self): diff --git a/tests/tests_fabric/strategies/test_deepspeed.py b/tests/tests_fabric/strategies/test_deepspeed.py index 22bfcc011ab13..8e1225bf799b9 100644 --- a/tests/tests_fabric/strategies/test_deepspeed.py +++ b/tests/tests_fabric/strategies/test_deepspeed.py @@ -53,7 +53,6 @@ def test_deepspeed_only_compatible_with_cuda(): @RunIf(deepspeed=True) def test_deepspeed_with_invalid_config_path(): """Test to ensure if we pass an invalid config path we throw an exception.""" - with pytest.raises( FileNotFoundError, match="You passed in a path to a DeepSpeed config but the path does not exist" ): @@ -99,7 +98,6 @@ def test_deepspeed_custom_activation_checkpointing_params(tmpdir): @RunIf(deepspeed=True) def test_deepspeed_config_zero_offload(deepspeed_zero_config): """Test the various ways optimizer-offloading can be configured.""" - # default config strategy = DeepSpeedStrategy(config=deepspeed_zero_config) assert "offload_optimizer" not in strategy.config["zero_optimization"] diff --git a/tests/tests_fabric/strategies/test_deepspeed_integration.py b/tests/tests_fabric/strategies/test_deepspeed_integration.py index b3183aa979f0e..3e451dbad0c23 100644 --- a/tests/tests_fabric/strategies/test_deepspeed_integration.py +++ b/tests/tests_fabric/strategies/test_deepspeed_integration.py @@ -136,7 +136,6 @@ def run(self): @RunIf(min_cuda_gpus=1, standalone=True, deepspeed=True) def test_deepspeed_configure_optimizers(): """Test that the deepspeed strategy with default initialization wraps the optimizer correctly.""" - from deepspeed.runtime.zero.stage_1_and_2 import DeepSpeedZeroOptimizer class RunFabric(Fabric): @@ -220,7 +219,6 @@ def run(self): class ModelParallelClassification(BoringFabric): - num_blocks = 5 def get_model(self): diff --git a/tests/tests_fabric/strategies/test_registry.py b/tests/tests_fabric/strategies/test_registry.py index 07aee5ea91f0f..53b7025c14ee8 100644 --- a/tests/tests_fabric/strategies/test_registry.py +++ b/tests/tests_fabric/strategies/test_registry.py @@ -18,7 +18,6 @@ def test_strategy_registry_with_new_strategy(): class TestStrategy: - strategy_name = "test_strategy" def __init__(self, param1, param2): diff --git a/tests/tests_fabric/test_fabric.py b/tests/tests_fabric/test_fabric.py index 525365177c1d6..e1776f109701e 100644 --- a/tests/tests_fabric/test_fabric.py +++ b/tests/tests_fabric/test_fabric.py @@ -56,7 +56,6 @@ def test_run_input_output(): """Test that the dynamically patched run() method receives the input arguments and returns the result.""" class RunFabric(Fabric): - run_args = () run_kwargs = {} @@ -776,7 +775,6 @@ def test_loggers_input(): def test_log(): """Test that `fabric.log` sends the metrics to each logger.""" - logger0 = Mock() logger1 = Mock() fabric = Fabric(loggers=[logger0, logger1]) @@ -792,7 +790,6 @@ def test_log(): def test_log_dict(): """Test that `fabric.log_dict` sends the metrics dict to each logger.""" - logger0 = Mock() logger1 = Mock() fabric = Fabric(loggers=[logger0, logger1]) diff --git a/tests/tests_fabric/test_wrappers.py b/tests/tests_fabric/test_wrappers.py index 2d9554251eaf1..0996627e57a15 100644 --- a/tests/tests_fabric/test_wrappers.py +++ b/tests/tests_fabric/test_wrappers.py @@ -189,7 +189,6 @@ def check_autocast(forward_input): @pytest.mark.parametrize("dtype", [torch.float32, torch.float16]) def test_fabric_module_device_dtype_propagation(device_str, dtype): """Test that the FabricModule propagates device and dtype properties to its submodules (e.g. torchmetrics).""" - device = torch.device(device_str) class DeviceModule(_DeviceDtypeModuleMixin): @@ -335,7 +334,6 @@ def test_fabric_optimizer_steps(): def test_fabric_optimizer_zero_grad_kwargs(): """Test that Fabric can adapt the `.zero_grad()` arguments to the underlying optimizer.""" - # Test PyTorch's standard `.zero_grad()` signature with mock.patch("torch.optim.SGD.zero_grad") as zero_grad_mock: optimizer = torch.optim.SGD(torch.nn.Linear(1, 1).parameters(), 0.1) diff --git a/tests/tests_fabric/utilities/test_logger.py b/tests/tests_fabric/utilities/test_logger.py index 1dc81a35b6d62..c5286d0dbe29d 100644 --- a/tests/tests_fabric/utilities/test_logger.py +++ b/tests/tests_fabric/utilities/test_logger.py @@ -28,7 +28,6 @@ def test_convert_params(): """Test conversion of params to a dict.""" - # Test normal dict, make sure it is unchanged params = {"string": "string", "int": 1, "float": 0.1, "bool": True, "none": None} expected = params.copy() @@ -45,7 +44,6 @@ def test_convert_params(): def test_flatten_dict(): """Validate flatten_dict can handle nested dictionaries and argparse Namespace.""" - # Test basic dict flattening with custom delimiter params = {"a": {"b": "c"}} params = _flatten_dict(params, "--") @@ -106,7 +104,6 @@ def wrapper_something(): def test_sanitize_params(): """Verify sanitize params converts various types to loggable strings.""" - params = { "float": 0.3, "int": 1, @@ -137,7 +134,6 @@ def test_sanitize_params(): def test_add_prefix(): """Verify add_prefix modifies the dict keys correctly.""" - metrics = {"metric1": 1, "metric2": 2} metrics = _add_prefix(metrics, "prefix", "-") diff --git a/tests/tests_pytorch/accelerators/test_hpu.py b/tests/tests_pytorch/accelerators/test_hpu.py index b8ba801e7ede4..652e229b2d357 100644 --- a/tests/tests_pytorch/accelerators/test_hpu.py +++ b/tests/tests_pytorch/accelerators/test_hpu.py @@ -162,7 +162,6 @@ def on_predict_batch_end(self, trainer, pl_module, outputs, *_) -> None: @RunIf(hpu=True) def test_accelerator_hpu(): - trainer = Trainer(accelerator="hpu", devices=1) assert isinstance(trainer.accelerator, HPUAccelerator) assert trainer.num_devices == 1 @@ -178,7 +177,6 @@ def test_accelerator_hpu(): @RunIf(hpu=True) def test_accelerator_hpu_with_single_device(): - trainer = Trainer(accelerator="hpu", devices=1) assert isinstance(trainer.strategy, SingleHPUStrategy) @@ -187,7 +185,6 @@ def test_accelerator_hpu_with_single_device(): @RunIf(hpu=True) def test_accelerator_hpu_with_multiple_devices(): - trainer = Trainer(accelerator="hpu", devices=8) assert isinstance(trainer.strategy, HPUParallelStrategy) @@ -196,7 +193,6 @@ def test_accelerator_hpu_with_multiple_devices(): @RunIf(hpu=True) def test_accelerator_auto_with_devices_hpu(): - trainer = Trainer(accelerator="auto", devices=8) assert isinstance(trainer.strategy, HPUParallelStrategy) @@ -305,7 +301,6 @@ def training_step(self, batch, batch_idx): @RunIf(hpu=True) def test_hpu_device_stats_monitor(): - hpu_stats = HPUAccelerator().get_device_stats("hpu") fields = [ "Limit", diff --git a/tests/tests_pytorch/accelerators/test_ipu.py b/tests/tests_pytorch/accelerators/test_ipu.py index 7cb500a3e5bba..03d48c1898c7b 100644 --- a/tests/tests_pytorch/accelerators/test_ipu.py +++ b/tests/tests_pytorch/accelerators/test_ipu.py @@ -508,7 +508,6 @@ def test_replication_factor(tmpdir): @RunIf(ipu=True) def test_default_opts(tmpdir): """Ensure default opts are set correctly in the IPUStrategy.""" - model = IPUModel() trainer = Trainer(default_root_dir=tmpdir, accelerator="ipu", devices=1, fast_dev_run=True) @@ -544,7 +543,6 @@ def configure_optimizers(self): @RunIf(ipu=True) def test_precision_plugin(): """Ensure precision plugin value is set correctly.""" - plugin = IPUPrecisionPlugin(precision="16-mixed") assert plugin.precision == "16-mixed" diff --git a/tests/tests_pytorch/accelerators/test_tpu.py b/tests/tests_pytorch/accelerators/test_tpu.py index b1d8ef2a0d3b2..964f62597b2ad 100644 --- a/tests/tests_pytorch/accelerators/test_tpu.py +++ b/tests/tests_pytorch/accelerators/test_tpu.py @@ -103,7 +103,6 @@ def test_accelerator_tpu(_, accelerator, devices, tpu_available): @mock.patch.dict(os.environ, os.environ.copy(), clear=True) def test_manual_optimization_tpus(tmpdir): class ManualOptimizationModel(BoringModel): - count = 0 called = collections.defaultdict(int) @@ -192,7 +191,6 @@ def test_strategy_choice_tpu_strategy(): @RunIf(tpu=True) @mock.patch.dict(os.environ, os.environ.copy(), clear=True) def test_auto_parameters_tying_tpus(tmpdir): - model = WeightSharingModule() shared_params = find_shared_parameters(model) diff --git a/tests/tests_pytorch/benchmarks/test_basic_parity.py b/tests/tests_pytorch/benchmarks/test_basic_parity.py index ee45332ce15dd..226522d6449f0 100644 --- a/tests/tests_pytorch/benchmarks/test_basic_parity.py +++ b/tests/tests_pytorch/benchmarks/test_basic_parity.py @@ -131,7 +131,6 @@ def vanilla_loop(cls_model, idx, device_type: str = "cuda", num_epochs=10): epoch_losses = [] # as the first run is skipped, no need to run it long for epoch in range(num_epochs if idx > 0 else 1): - # run through full training set for j, batch in enumerate(dl): batch = [x.to(device) for x in batch] diff --git a/tests/tests_pytorch/callbacks/progress/test_tqdm_progress_bar.py b/tests/tests_pytorch/callbacks/progress/test_tqdm_progress_bar.py index e10ab03630ac2..9e56dcc7ff381 100644 --- a/tests/tests_pytorch/callbacks/progress/test_tqdm_progress_bar.py +++ b/tests/tests_pytorch/callbacks/progress/test_tqdm_progress_bar.py @@ -234,11 +234,9 @@ def test_tqdm_progress_bar_fast_dev_run(tmpdir): @pytest.mark.parametrize("refresh_rate", [0, 1, 50]) def test_tqdm_progress_bar_progress_refresh(tmpdir, refresh_rate: int): """Test that the three progress bars get correctly updated when using different refresh rates.""" - model = BoringModel() class CurrentProgressBar(TQDMProgressBar): - train_batches_seen = 0 val_batches_seen = 0 test_batches_seen = 0 diff --git a/tests/tests_pytorch/callbacks/test_callbacks.py b/tests/tests_pytorch/callbacks/test_callbacks.py index c5160b82d7302..4f09358451a62 100644 --- a/tests/tests_pytorch/callbacks/test_callbacks.py +++ b/tests/tests_pytorch/callbacks/test_callbacks.py @@ -25,7 +25,6 @@ def test_callbacks_configured_in_model(tmpdir): """Test the callback system with callbacks added through the model hook.""" - model_callback_mock = Mock(spec=Callback, model=Callback()) trainer_callback_mock = Mock(spec=Callback, model=Callback()) diff --git a/tests/tests_pytorch/callbacks/test_device_stats_monitor.py b/tests/tests_pytorch/callbacks/test_device_stats_monitor.py index 796feef04970e..6861d5248f1b4 100644 --- a/tests/tests_pytorch/callbacks/test_device_stats_monitor.py +++ b/tests/tests_pytorch/callbacks/test_device_stats_monitor.py @@ -131,7 +131,6 @@ def log_metrics(self, metrics, step=None) -> None: def test_device_stats_monitor_no_logger(tmpdir): """Test DeviceStatsMonitor with no logger in Trainer.""" - model = BoringModel() device_stats = DeviceStatsMonitor() @@ -171,7 +170,6 @@ def test_device_stats_monitor_warning_when_psutil_not_available(monkeypatch, tmp def test_device_stats_monitor_logs_for_different_stages(tmpdir): """Test that metrics are logged for all stages that is training, testing and validation.""" - model = BoringModel() device_stats = DeviceStatsMonitor() @@ -194,7 +192,6 @@ def test_device_stats_monitor_logs_for_different_stages(tmpdir): trainer.fit(model) with open(f"{tmpdir}/lightning_logs/version_0/metrics.csv") as csvfile: - content = csv.reader(csvfile, delimiter=",") it = iter(content).__next__() @@ -212,7 +209,6 @@ def test_device_stats_monitor_logs_for_different_stages(tmpdir): trainer.test(model) with open(f"{tmpdir}/lightning_logs/version_0/metrics.csv") as csvfile: - content = csv.reader(csvfile, delimiter=",") it = iter(content).__next__() diff --git a/tests/tests_pytorch/callbacks/test_early_stopping.py b/tests/tests_pytorch/callbacks/test_early_stopping.py index ebd3b40f88c04..888674860e1de 100644 --- a/tests/tests_pytorch/callbacks/test_early_stopping.py +++ b/tests/tests_pytorch/callbacks/test_early_stopping.py @@ -202,7 +202,6 @@ def test_pickling(): @RunIf(sklearn=True) def test_early_stopping_no_val_step(tmpdir): """Test that early stopping callback falls back to training metrics when no validation defined.""" - model = ClassificationModel() dm = ClassifDataModule() model.validation_step = None @@ -247,7 +246,6 @@ def on_validation_epoch_end(self): @pytest.mark.parametrize("stop_value", [torch.tensor(np.inf), torch.tensor(np.nan)]) def test_early_stopping_on_non_finite_monitor(tmpdir, stop_value): - losses = [4, 3, stop_value, 2, 1] expected_stop_epoch = 2 diff --git a/tests/tests_pytorch/callbacks/test_finetuning_callback.py b/tests/tests_pytorch/callbacks/test_finetuning_callback.py index c50c3439dd534..ec33505b10def 100644 --- a/tests/tests_pytorch/callbacks/test_finetuning_callback.py +++ b/tests/tests_pytorch/callbacks/test_finetuning_callback.py @@ -41,7 +41,6 @@ def on_train_epoch_start(self, trainer, pl_module): def test_finetuning_callback(tmpdir): """Test finetuning callbacks works as expected.""" - seed_everything(42) class FinetuningBoringModel(BoringModel): @@ -76,7 +75,6 @@ def train_dataloader(self): class TestBackboneFinetuningWarningCallback(BackboneFinetuning): def finetune_function(self, pl_module, epoch: int, optimizer): """Called when the epoch begins.""" - if epoch == 0: self.unfreeze_and_add_param_group( pl_module.backbone, optimizer, 0.1, train_bn=self.train_bn, initial_denom_lr=self.initial_denom_lr @@ -85,7 +83,6 @@ def finetune_function(self, pl_module, epoch: int, optimizer): def test_finetuning_callback_warning(tmpdir): """Test finetuning callbacks works as expected.""" - seed_everything(42) class FinetuningBoringModel(BoringModel): @@ -124,7 +121,6 @@ def configure_optimizers(self): def test_freeze_unfreeze_function(tmpdir): """Test freeze properly sets requires_grad on the modules.""" - seed_everything(42) class FreezeModel(LightningModule): @@ -163,7 +159,6 @@ def __init__(self): def test_unfreeze_and_add_param_group_function(tmpdir): """Test unfreeze_and_add_param_group properly unfreeze parameters and add to the correct param_group.""" - seed_everything(42) class FreezeModel(LightningModule): diff --git a/tests/tests_pytorch/callbacks/test_model_summary.py b/tests/tests_pytorch/callbacks/test_model_summary.py index c8a82d790481e..5d6ccc43bb8b5 100644 --- a/tests/tests_pytorch/callbacks/test_model_summary.py +++ b/tests/tests_pytorch/callbacks/test_model_summary.py @@ -19,7 +19,6 @@ def test_model_summary_callback_present_trainer(): - trainer = Trainer() assert any(isinstance(cb, ModelSummary) for cb in trainer.callbacks) diff --git a/tests/tests_pytorch/checkpointing/test_checkpoint_callback_frequency.py b/tests/tests_pytorch/checkpointing/test_checkpoint_callback_frequency.py index 26059f9d4c531..e9369df6b6cdf 100644 --- a/tests/tests_pytorch/checkpointing/test_checkpoint_callback_frequency.py +++ b/tests/tests_pytorch/checkpointing/test_checkpoint_callback_frequency.py @@ -35,7 +35,6 @@ def test_disabled_checkpointing(tmpdir): ["epochs", "val_check_interval", "expected"], [(1, 1.0, 1), (2, 1.0, 2), (1, 0.25, 4), (2, 0.3, 6)] ) def test_default_checkpoint_freq(save_mock, tmpdir, epochs: int, val_check_interval: float, expected: int): - model = BoringModel() trainer = Trainer( default_root_dir=tmpdir, diff --git a/tests/tests_pytorch/checkpointing/test_model_checkpoint.py b/tests/tests_pytorch/checkpointing/test_model_checkpoint.py index 7c6a3905d9280..2a2f9d550d415 100644 --- a/tests/tests_pytorch/checkpointing/test_model_checkpoint.py +++ b/tests/tests_pytorch/checkpointing/test_model_checkpoint.py @@ -618,7 +618,6 @@ def test_model_checkpoint_every_n_epochs(tmpdir, every_n_epochs): def test_ckpt_every_n_train_steps(tmpdir): """Tests that the checkpoints are saved every n training steps.""" - model = LogInTwoMethods() every_n_train_steps = 16 max_epochs = 2 diff --git a/tests/tests_pytorch/checkpointing/test_trainer_checkpoint.py b/tests/tests_pytorch/checkpointing/test_trainer_checkpoint.py index 9bad928c44340..7e35b29e44c56 100644 --- a/tests/tests_pytorch/checkpointing/test_trainer_checkpoint.py +++ b/tests/tests_pytorch/checkpointing/test_trainer_checkpoint.py @@ -27,7 +27,6 @@ def test_finetuning_with_ckpt_path(tmpdir): """This test validates that generated ModelCheckpoint is pointing to the right best_model_path during test.""" - checkpoint_callback = ModelCheckpoint(monitor="val_loss", dirpath=tmpdir, filename="{epoch:02d}", save_top_k=-1) class ExtendedBoringModel(BoringModel): diff --git a/tests/tests_pytorch/core/test_lightning_module.py b/tests/tests_pytorch/core/test_lightning_module.py index 7011369c5a7bc..1a0be112743a6 100644 --- a/tests/tests_pytorch/core/test_lightning_module.py +++ b/tests/tests_pytorch/core/test_lightning_module.py @@ -291,7 +291,6 @@ def configure_optimizers(self): ], ) def test_device_placement(tmpdir, accelerator, device): - model = BoringModel() trainer = Trainer(default_root_dir=tmpdir, fast_dev_run=True, accelerator=accelerator, devices=1) trainer.fit(model) @@ -348,7 +347,6 @@ def test_lightning_module_configure_gradient_clipping(tmpdir): """Test custom gradient clipping inside `configure_gradient_clipping` hook.""" class TestModel(BoringModel): - has_validated_gradients = False custom_gradient_clip_val = 1e-2 @@ -564,7 +562,6 @@ def test_fabric_log_dict(): @pytest.mark.parametrize("algo", ["value", "norm"]) def test_grad_clipping_lm_fabric(algo): - from lightning.pytorch.utilities import GradClipAlgorithmType class DummyLM(LightningModule): diff --git a/tests/tests_pytorch/core/test_metric_result_integration.py b/tests/tests_pytorch/core/test_metric_result_integration.py index a74da96a8dce4..d15dddb25a3c1 100644 --- a/tests/tests_pytorch/core/test_metric_result_integration.py +++ b/tests/tests_pytorch/core/test_metric_result_integration.py @@ -221,11 +221,9 @@ def lightning_log(fx, *args, **kwargs): current_fx_name = fx for epoch in range(2): - cumulative_sum = 0 for i in range(3): - a = metric_a(i) b = metric_b(i) c = metric_c(i) diff --git a/tests/tests_pytorch/loggers/test_all.py b/tests/tests_pytorch/loggers/test_all.py index 380058e752387..9ea18f533db00 100644 --- a/tests/tests_pytorch/loggers/test_all.py +++ b/tests/tests_pytorch/loggers/test_all.py @@ -321,7 +321,6 @@ def test_logger_with_prefix_all(tmpdir, monkeypatch): def test_logger_default_name(tmpdir, monkeypatch): """Test that the default logger name is lightning_logs.""" - # CSV logger = CSVLogger(save_dir=tmpdir) assert logger.name == "lightning_logs" diff --git a/tests/tests_pytorch/loggers/test_comet.py b/tests/tests_pytorch/loggers/test_comet.py index 3758cab13fffc..f85ecd5ebb006 100644 --- a/tests/tests_pytorch/loggers/test_comet.py +++ b/tests/tests_pytorch/loggers/test_comet.py @@ -84,7 +84,6 @@ def test_comet_logger_no_api_key_given(comet): @patch("lightning.pytorch.loggers.comet.comet_ml") def test_comet_logger_experiment_name(comet): """Test that Comet Logger experiment name works correctly.""" - api_key = "key" experiment_name = "My Name" @@ -101,7 +100,6 @@ def test_comet_logger_experiment_name(comet): @patch("lightning.pytorch.loggers.comet.comet_ml") def test_comet_logger_manual_experiment_key(comet): """Test that Comet Logger respects manually set COMET_EXPERIMENT_KEY.""" - api_key = "key" experiment_key = "96346da91469407a85641afe5766b554" @@ -163,7 +161,6 @@ def test_comet_logger_dirs_creation(comet, comet_experiment, tmpdir, monkeypatch @patch("lightning.pytorch.loggers.comet.comet_ml") def test_comet_name_default(comet): """Test that CometLogger.name don't create an Experiment and returns a default value.""" - api_key = "key" with patch("lightning.pytorch.loggers.comet.CometExperiment"): @@ -176,7 +173,6 @@ def test_comet_name_default(comet): @patch("lightning.pytorch.loggers.comet.comet_ml") def test_comet_name_project_name(comet): """Test that CometLogger.name does not create an Experiment and returns project name if passed.""" - api_key = "key" project_name = "My Project Name" @@ -190,7 +186,6 @@ def test_comet_name_project_name(comet): @patch("lightning.pytorch.loggers.comet.comet_ml") def test_comet_version_without_experiment(comet): """Test that CometLogger.version does not create an Experiment.""" - api_key = "key" experiment_name = "My Name" comet.generate_guid.return_value = "1234" diff --git a/tests/tests_pytorch/loggers/test_csv.py b/tests/tests_pytorch/loggers/test_csv.py index 16ca21a7c0916..406d6f525ecf7 100644 --- a/tests/tests_pytorch/loggers/test_csv.py +++ b/tests/tests_pytorch/loggers/test_csv.py @@ -29,7 +29,6 @@ def test_file_logger_automatic_versioning(tmpdir): """Verify that automatic versioning works.""" - root_dir = tmpdir.mkdir("exp") root_dir.mkdir("version_0") root_dir.mkdir("version_1") @@ -41,7 +40,6 @@ def test_file_logger_automatic_versioning(tmpdir): def test_file_logger_manual_versioning(tmpdir): """Verify that manual versioning works.""" - root_dir = tmpdir.mkdir("exp") root_dir.mkdir("version_0") root_dir.mkdir("version_1") @@ -54,7 +52,6 @@ def test_file_logger_manual_versioning(tmpdir): def test_file_logger_named_version(tmpdir): """Verify that manual versioning works for string versions, e.g. '2020-02-05-162402'.""" - exp_name = "exp" tmpdir.mkdir(exp_name) expected_version = "2020-02-05-162402" diff --git a/tests/tests_pytorch/loggers/test_logger.py b/tests/tests_pytorch/loggers/test_logger.py index aed8fb8e1cfaa..2aa6187bc7e4f 100644 --- a/tests/tests_pytorch/loggers/test_logger.py +++ b/tests/tests_pytorch/loggers/test_logger.py @@ -118,7 +118,6 @@ def training_step(self, batch, batch_idx): def test_multiple_loggers_pickle(tmpdir): """Verify that pickling trainer with multiple loggers works.""" - logger1 = CustomLogger() logger2 = CustomLogger() diff --git a/tests/tests_pytorch/loggers/test_mlflow.py b/tests/tests_pytorch/loggers/test_mlflow.py index 959609f747557..d968805b5183b 100644 --- a/tests/tests_pytorch/loggers/test_mlflow.py +++ b/tests/tests_pytorch/loggers/test_mlflow.py @@ -37,7 +37,6 @@ def mock_mlflow_run_creation(logger, experiment_name=None, experiment_id=None, r @mock.patch("lightning.pytorch.loggers.mlflow.MlflowClient") def test_mlflow_logger_exists(client, _, tmpdir): """Test launching three independent loggers with either same or different experiment name.""" - run1 = MagicMock() run1.info.run_id = "run-id-1" run1.info.experiment_id = "exp-id-1" @@ -91,7 +90,6 @@ def test_mlflow_logger_exists(client, _, tmpdir): @mock.patch("lightning.pytorch.loggers.mlflow.MlflowClient") def test_mlflow_run_name_setting(client, _, tmpdir): """Test that the run_name argument makes the MLFLOW_RUN_NAME tag.""" - tags = resolve_tags({MLFLOW_RUN_NAME: "run-name-1"}) # run_name is appended to tags @@ -118,7 +116,6 @@ def test_mlflow_run_name_setting(client, _, tmpdir): @mock.patch("lightning.pytorch.loggers.mlflow.MlflowClient") def test_mlflow_run_id_setting(client, _, tmpdir): """Test that the run_id argument uses the provided run_id.""" - run = MagicMock() run.info.run_id = "run-id" run.info.experiment_id = "experiment-id" @@ -139,7 +136,6 @@ def test_mlflow_run_id_setting(client, _, tmpdir): @mock.patch("lightning.pytorch.loggers.mlflow.MlflowClient") def test_mlflow_log_dir(client, _, tmpdir): """Test that the trainer saves checkpoints in the logger's save dir.""" - # simulate experiment creation with mlflow client mock run = MagicMock() run.info.run_id = "run-id" diff --git a/tests/tests_pytorch/loggers/test_neptune.py b/tests/tests_pytorch/loggers/test_neptune.py index e4e741eaa0a81..6d24d547c36e8 100644 --- a/tests/tests_pytorch/loggers/test_neptune.py +++ b/tests/tests_pytorch/loggers/test_neptune.py @@ -81,6 +81,7 @@ def exists(self, value): @pytest.fixture def tmpdir_unittest_fixture(request, tmpdir): """Proxy for pytest `tmpdir` fixture between pytest and unittest. + Resources: * https://docs.pytest.org/en/6.2.x/tmpdir.html#the-tmpdir-fixture * https://towardsdatascience.com/mixing-pytest-fixture-and-unittest-testcase-for-selenium-test-9162218e8c8e @@ -206,6 +207,7 @@ def test_neptune_leave_open_experiment_after_fit(self, neptune): @pytest.mark.usefixtures("tmpdir_unittest_fixture") def test_neptune_log_metrics_on_trained_model(self, neptune): """Verify that trained models do log data.""" + # given class LoggingModel(BoringModel): def on_validation_epoch_end(self): diff --git a/tests/tests_pytorch/loggers/test_tensorboard.py b/tests/tests_pytorch/loggers/test_tensorboard.py index 2c511772852a7..b33445d99e2d6 100644 --- a/tests/tests_pytorch/loggers/test_tensorboard.py +++ b/tests/tests_pytorch/loggers/test_tensorboard.py @@ -62,7 +62,6 @@ def __init__(self, b1=0.5, b2=0.999): def test_tensorboard_automatic_versioning(tmpdir): """Verify that automatic versioning works.""" - root_dir = tmpdir / "tb_versioning" root_dir.mkdir() (root_dir / "version_0").mkdir() @@ -74,7 +73,6 @@ def test_tensorboard_automatic_versioning(tmpdir): def test_tensorboard_manual_versioning(tmpdir): """Verify that manual versioning works.""" - root_dir = tmpdir / "tb_versioning" root_dir.mkdir() (root_dir / "version_0").mkdir() @@ -88,7 +86,6 @@ def test_tensorboard_manual_versioning(tmpdir): def test_tensorboard_named_version(tmpdir): """Verify that manual versioning works for string versions, e.g. '2020-02-05-162402'.""" - name = "tb_versioning" (tmpdir / name).mkdir() expected_version = "2020-02-05-162402" @@ -333,7 +330,6 @@ def test_tensorboard_with_symlink(log, tmpdir): def test_tensorboard_missing_folder_warning(tmpdir, caplog): """Verify that the logger throws a warning for invalid directory.""" - name = "fake_dir" logger = TensorBoardLogger(save_dir=tmpdir, name=name) diff --git a/tests/tests_pytorch/loggers/test_wandb.py b/tests/tests_pytorch/loggers/test_wandb.py index a89ad0332c714..08b0532ed84fc 100644 --- a/tests/tests_pytorch/loggers/test_wandb.py +++ b/tests/tests_pytorch/loggers/test_wandb.py @@ -388,7 +388,6 @@ def test_wandb_log_model_with_score(wandb, monkeypatch, tmpdir): @mock.patch("lightning.pytorch.loggers.wandb.wandb") def test_wandb_log_media(wandb, tmpdir): """Test that the logger creates the folders and files in the right place.""" - wandb.run = None # test log_text with columns and data @@ -465,7 +464,6 @@ def test_wandb_logger_offline_log_model(wandb, tmpdir): @mock.patch("lightning.pytorch.loggers.wandb.wandb") def test_wandb_logger_download_artifact(wandb, tmpdir): """Test that download_artifact works.""" - wandb.run = wandb.init() logger = WandbLogger() logger.download_artifact("test_artifact", str(tmpdir), "model", True) diff --git a/tests/tests_pytorch/loops/test_all.py b/tests/tests_pytorch/loops/test_all.py index 1bfb8286e8c49..c8f1bfa41fb1f 100644 --- a/tests/tests_pytorch/loops/test_all.py +++ b/tests/tests_pytorch/loops/test_all.py @@ -89,7 +89,6 @@ def on_predict_batch_end(self, outputs, batch, *_): ) def test_callback_batch_on_device(tmpdir, accelerator): """Test that the batch object sent to the on_*_batch_start/end hooks is on the right device.""" - batch_callback = BatchHookObserverCallback() model = BatchHookObserverModel() diff --git a/tests/tests_pytorch/loops/test_fetchers.py b/tests/tests_pytorch/loops/test_fetchers.py index bd9e2e8d6e841..1f653f70b6734 100644 --- a/tests/tests_pytorch/loops/test_fetchers.py +++ b/tests/tests_pytorch/loops/test_fetchers.py @@ -369,7 +369,6 @@ def on_train_batch_end(self, *_): def test_transfer_hooks_with_unpacking(tmpdir): - """This test asserts the `transfer_batch` hooks are called only once per batch.""" class RandomDictDataset(RandomDataset): @@ -377,7 +376,6 @@ def __getitem__(self, index): return {"x": self.data[index], "y_true": torch.ones((2,)), "other": torch.ones((1,))} class BoringDataModule(LightningDataModule): - count_called_on_before_batch_transfer = 0 count_called_transfer_batch_to_device = 0 count_called_on_after_batch_transfer = 0 diff --git a/tests/tests_pytorch/loops/test_flow_warnings.py b/tests/tests_pytorch/loops/test_flow_warnings.py index b1c56f52de08e..ee7f4e261f30b 100644 --- a/tests/tests_pytorch/loops/test_flow_warnings.py +++ b/tests/tests_pytorch/loops/test_flow_warnings.py @@ -25,7 +25,6 @@ def training_step(self, batch, batch_idx): def test_no_depre_without_epoch_end(tmpdir): """Tests that only training_step can be used.""" - model = TestModel() trainer = Trainer( diff --git a/tests/tests_pytorch/models/test_fabric_integration.py b/tests/tests_pytorch/models/test_fabric_integration.py index 8b1506febe7df..957f5c8f95465 100644 --- a/tests/tests_pytorch/models/test_fabric_integration.py +++ b/tests/tests_pytorch/models/test_fabric_integration.py @@ -22,7 +22,6 @@ def test_fabric_boring_lightning_module_automatic(): """Test that basic LightningModules written for 'automatic optimization' work with Fabric.""" - fabric = Fabric(accelerator="cpu", devices=1) module = BoringModel() @@ -44,7 +43,6 @@ def test_fabric_boring_lightning_module_automatic(): def test_fabric_boring_lightning_module_manual(): """Test that basic LightningModules written for 'manual optimization' work with Fabric.""" - fabric = Fabric(accelerator="cpu", devices=1) module = ManualOptimBoringModel() diff --git a/tests/tests_pytorch/models/test_hooks.py b/tests/tests_pytorch/models/test_hooks.py index d00b27f91e84e..1297eb4a62d6c 100644 --- a/tests/tests_pytorch/models/test_hooks.py +++ b/tests/tests_pytorch/models/test_hooks.py @@ -794,7 +794,6 @@ def predict_dataloader(self): def test_trainer_datamodule_hook_system(tmpdir): """Test the LightningDataModule hook system.""" - model = BoringModel() batches = 2 trainer = Trainer( diff --git a/tests/tests_pytorch/models/test_hparams.py b/tests/tests_pytorch/models/test_hparams.py index 4ab32c1fa95a9..18101d49568bb 100644 --- a/tests/tests_pytorch/models/test_hparams.py +++ b/tests/tests_pytorch/models/test_hparams.py @@ -482,7 +482,6 @@ def __init__(self, arg1): class OtherArgsModel(BoringModel): def __init__(self, arg1, arg2): - super().__init__() self.save_hyperparameters(arg1, arg2) @@ -590,7 +589,6 @@ def __init__(self): @pytest.mark.parametrize("cls", [BoringModel, NoArgsSubClassBoringModel]) def test_model_nohparams_train_test(tmpdir, cls): """Test models that do not take any argument in init.""" - model = cls() trainer = Trainer(max_epochs=1, default_root_dir=tmpdir) @@ -813,7 +811,6 @@ def test_hparams_name_from_container(tmpdir): @dataclass class DataClassModel(BoringModel): - mandatory: int optional: str = "optional" ignore_me: bool = False diff --git a/tests/tests_pytorch/models/test_torchscript.py b/tests/tests_pytorch/models/test_torchscript.py index c4488502699ec..d6de490b9d3fa 100644 --- a/tests/tests_pytorch/models/test_torchscript.py +++ b/tests/tests_pytorch/models/test_torchscript.py @@ -135,7 +135,6 @@ def test_torchscript_save_load(tmpdir, modelclass): @pytest.mark.parametrize("modelclass", [BoringModel, ParityModuleRNN, BasicGAN]) def test_torchscript_save_load_custom_filesystem(tmpdir, modelclass): """Test that scripted LightningModule is correctly saved and can be loaded with custom filesystems.""" - _DUMMY_PRFEIX = "dummy" _PREFIX_SEPARATOR = "://" diff --git a/tests/tests_pytorch/models/test_tpu.py b/tests/tests_pytorch/models/test_tpu.py index ea5672f0b7c06..94c86a18f259f 100644 --- a/tests/tests_pytorch/models/test_tpu.py +++ b/tests/tests_pytorch/models/test_tpu.py @@ -260,7 +260,6 @@ def test_accelerator_set_when_using_tpu(devices): @mock.patch.dict(os.environ, os.environ.copy(), clear=True) def test_if_test_works_with_checkpoint_false(tmpdir): """Ensure that model trains properly when `enable_checkpointing` is set to False.""" - # Train a model on TPU model = BoringModel() trainer = Trainer( diff --git a/tests/tests_pytorch/overrides/test_distributed.py b/tests/tests_pytorch/overrides/test_distributed.py index 1189ca9c23b48..c221238e4fbe8 100644 --- a/tests/tests_pytorch/overrides/test_distributed.py +++ b/tests/tests_pytorch/overrides/test_distributed.py @@ -52,7 +52,6 @@ def test_params_synced_during_nonfit(): @pytest.mark.parametrize("shuffle", [False, True]) def test_unrepeated_distributed_sampler(shuffle): """Test each rank will receive a different number of elements.""" - seed_everything(42) world_size = 4 samplers = [] diff --git a/tests/tests_pytorch/profilers/test_profiler.py b/tests/tests_pytorch/profilers/test_profiler.py index c1c5a574236e8..5ec89e91f3679 100644 --- a/tests/tests_pytorch/profilers/test_profiler.py +++ b/tests/tests_pytorch/profilers/test_profiler.py @@ -56,7 +56,6 @@ def simple_profiler(): @pytest.mark.parametrize(["action", "expected"], [("a", [3, 1]), ("b", [2]), ("c", [1])]) def test_simple_profiler_durations(simple_profiler, action: str, expected: list): """Ensure the reported durations are reasonably accurate.""" - for duration in expected: with simple_profiler.profile(action): time.sleep(duration) @@ -78,7 +77,6 @@ def test_simple_profiler_overhead(simple_profiler, n_iter=5): def test_simple_profiler_value_errors(simple_profiler): """Ensure errors are raised where expected.""" - action = "test" with pytest.raises(ValueError): simple_profiler.stop(action) @@ -266,7 +264,6 @@ def advanced_profiler(tmpdir): @pytest.mark.flaky(reruns=3) @pytest.mark.parametrize(["action", "expected"], [("a", [3, 1]), ("b", [2]), ("c", [1])]) def test_advanced_profiler_durations(advanced_profiler, action: str, expected: list): - for duration in expected: with advanced_profiler.profile(action): time.sleep(duration) @@ -305,7 +302,6 @@ def test_advanced_profiler_describe(tmpdir, advanced_profiler): def test_advanced_profiler_value_errors(advanced_profiler): """Ensure errors are raised where expected.""" - action = "test" with pytest.raises(ValueError): advanced_profiler.stop(action) @@ -435,7 +431,6 @@ def test_pytorch_profiler_trainer(fn, step_name, boring_model_cls, tmpdir): def test_pytorch_profiler_nested(tmpdir): """Ensure that the profiler handles nested context.""" - pytorch_profiler = PyTorchProfiler(use_cuda=False, dirpath=tmpdir, filename="profiler", schedule=None) with pytorch_profiler.profile("a"): @@ -493,7 +488,6 @@ def test_pytorch_profiler_nested_emit_nvtx(): def test_register_record_function(tmpdir): - use_cuda = torch.cuda.is_available() pytorch_profiler = PyTorchProfiler( export_to_chrome=False, @@ -609,7 +603,6 @@ def test_pytorch_profiler_raises_warning_for_limited_steps(tmpdir, trainer_confi def test_profile_callbacks(tmpdir): """Checks if profiling callbacks works correctly, specifically when there are two of the same callback type.""" - pytorch_profiler = PyTorchProfiler(dirpath=tmpdir, filename="profiler") model = BoringModel() trainer = Trainer( diff --git a/tests/tests_pytorch/strategies/test_ddp_spawn_strategy.py b/tests/tests_pytorch/strategies/test_ddp_spawn_strategy.py index 0292faf9f223b..db3d2f4813c45 100644 --- a/tests/tests_pytorch/strategies/test_ddp_spawn_strategy.py +++ b/tests/tests_pytorch/strategies/test_ddp_spawn_strategy.py @@ -73,7 +73,6 @@ def _configure_launcher(self): @RunIf(skip_windows=True) def test_ddp_spawn_add_get_queue(tmpdir): """Tests get_extra_results/update_main_process_results with DDPSpawnStrategy.""" - ddp_spawn_strategy = TestDDPSpawnStrategy() trainer = Trainer( default_root_dir=tmpdir, fast_dev_run=True, accelerator="cpu", devices=2, strategy=ddp_spawn_strategy diff --git a/tests/tests_pytorch/strategies/test_deepspeed_strategy.py b/tests/tests_pytorch/strategies/test_deepspeed_strategy.py index f4ba5952cd812..caab8fe351c5e 100644 --- a/tests/tests_pytorch/strategies/test_deepspeed_strategy.py +++ b/tests/tests_pytorch/strategies/test_deepspeed_strategy.py @@ -151,7 +151,6 @@ def test_deepspeed_precision_choice(cuda_count_1, tmpdir): @RunIf(deepspeed=True) def test_deepspeed_with_invalid_config_path(): """Test to ensure if we pass an invalid config path we throw an exception.""" - with pytest.raises( MisconfigurationException, match="You passed in a path to a DeepSpeed config but the path does not exist" ): @@ -426,7 +425,6 @@ def test_deepspeed_custom_activation_checkpointing_params_forwarded(tmpdir): @RunIf(min_cuda_gpus=1, deepspeed=True) def test_deepspeed_assert_config_zero_offload_disabled(tmpdir, deepspeed_zero_config): """Ensure if we use a config and turn off offload_optimizer, that this is set to False within the config.""" - deepspeed_zero_config["zero_optimization"]["offload_optimizer"] = False class TestCallback(Callback): @@ -908,7 +906,7 @@ def forward(self, x): return self.layer(x) def on_train_epoch_start(self) -> None: - assert all([x.dtype == torch.float16 for x in self.parameters()]) + assert all(x.dtype == torch.float16 for x in self.parameters()) model = TestModel() trainer = Trainer( @@ -935,7 +933,7 @@ def __init__(self): self.rnn = torch.nn.GRU(32, 32) def on_train_epoch_start(self) -> None: - assert all([x.dtype == torch.float16 for x in self.parameters()]) + assert all(x.dtype == torch.float16 for x in self.parameters()) model = TestModel() trainer = Trainer( diff --git a/tests/tests_pytorch/strategies/test_fsdp.py b/tests/tests_pytorch/strategies/test_fsdp.py index 07afb3732f542..c082d35b35794 100644 --- a/tests/tests_pytorch/strategies/test_fsdp.py +++ b/tests/tests_pytorch/strategies/test_fsdp.py @@ -184,7 +184,6 @@ def test_fsdp_custom_mixed_precision(tmpdir): @RunIf(min_cuda_gpus=2, skip_windows=True, standalone=True, min_torch="1.12") def test_fsdp_strategy_sync_batchnorm(tmpdir): """Test to ensure that sync_batchnorm works when using FSDP and GPU, and all stages can be run.""" - model = TestFSDPModel() trainer = Trainer( default_root_dir=tmpdir, @@ -272,7 +271,6 @@ def custom_auto_wrap_policy( ) def test_fsdp_checkpoint_multi_gpus(tmpdir, model, strategy, strategy_cfg): """Test to ensure that checkpoint is saved correctly when using multiple GPUs, and all stages can be run.""" - ck = ModelCheckpoint(save_last=True) strategy_cfg = strategy_cfg or {} diff --git a/tests/tests_pytorch/strategies/test_registry.py b/tests/tests_pytorch/strategies/test_registry.py index 8dd8524452bfb..35eec54e2e413 100644 --- a/tests/tests_pytorch/strategies/test_registry.py +++ b/tests/tests_pytorch/strategies/test_registry.py @@ -33,7 +33,6 @@ ], ) def test_strategy_registry_with_deepspeed_strategies(strategy_name, init_params): - assert strategy_name in StrategyRegistry assert StrategyRegistry[strategy_name]["init_params"] == init_params assert StrategyRegistry[strategy_name]["strategy"] == DeepSpeedStrategy @@ -42,7 +41,6 @@ def test_strategy_registry_with_deepspeed_strategies(strategy_name, init_params) @RunIf(deepspeed=True) @pytest.mark.parametrize("strategy", ["deepspeed", "deepspeed_stage_2_offload", "deepspeed_stage_3"]) def test_deepspeed_strategy_registry_with_trainer(tmpdir, strategy): - trainer = Trainer(default_root_dir=tmpdir, strategy=strategy, precision="16-mixed") assert isinstance(trainer.strategy, DeepSpeedStrategy) diff --git a/tests/tests_pytorch/test_cli.py b/tests/tests_pytorch/test_cli.py index 316c65910e5ef..7d42a0c6987a6 100644 --- a/tests/tests_pytorch/test_cli.py +++ b/tests/tests_pytorch/test_cli.py @@ -121,7 +121,6 @@ def _trainer_builder( @pytest.mark.parametrize(["trainer_class", "model_class"], [(Trainer, Model), (_trainer_builder, _model_builder)]) def test_lightning_cli(trainer_class, model_class, monkeypatch): """Test that LightningCLI correctly instantiates model, trainer and calls fit.""" - expected_model = {"model_param": 7} expected_trainer = {"limit_train_batches": 100} @@ -151,7 +150,6 @@ def on_train_start(callback, trainer, _): def test_lightning_cli_args_callbacks(cleandir): - callbacks = [ { "class_path": "lightning.pytorch.callbacks.LearningRateMonitor", @@ -278,7 +276,6 @@ def test_lightning_env_parse(cleandir): def test_lightning_cli_save_config_cases(cleandir): - config_path = "config.yaml" cli_args = ["fit", "--trainer.logger=false", "--trainer.fast_dev_run=1"] @@ -947,7 +944,6 @@ def test_lightning_cli_datamodule_short_arguments(): @pytest.mark.parametrize("use_class_path_callbacks", [False, True]) def test_callbacks_append(use_class_path_callbacks): - """This test validates registries are used when simplified command line are being used.""" cli_args = [ "--optimizer", diff --git a/tests/tests_pytorch/trainer/connectors/test_accelerator_connector.py b/tests/tests_pytorch/trainer/connectors/test_accelerator_connector.py index 3ae42add64f77..e435ebe037518 100644 --- a/tests/tests_pytorch/trainer/connectors/test_accelerator_connector.py +++ b/tests/tests_pytorch/trainer/connectors/test_accelerator_connector.py @@ -377,7 +377,6 @@ def test_device_type_when_strategy_instance_gpu_passed(cuda_count_2, mps_count_0 @pytest.mark.parametrize("precision", [1, 12, "invalid"]) def test_validate_precision_type(precision): - with pytest.raises(ValueError, match=f"Precision {repr(precision)} is invalid"): Trainer(precision=precision) diff --git a/tests/tests_pytorch/trainer/connectors/test_checkpoint_connector.py b/tests/tests_pytorch/trainer/connectors/test_checkpoint_connector.py index 0277eca292fca..905190396a68f 100644 --- a/tests/tests_pytorch/trainer/connectors/test_checkpoint_connector.py +++ b/tests/tests_pytorch/trainer/connectors/test_checkpoint_connector.py @@ -107,7 +107,6 @@ def test_hpc_max_ckpt_version(tmpdir): def test_ckpt_for_fsspec(): """Test that the _CheckpointConnector is able to write to fsspec file systems.""" - model = BoringModel() # hardcoding dir since `tmpdir` can be windows path trainer = Trainer( diff --git a/tests/tests_pytorch/trainer/connectors/test_data_connector.py b/tests/tests_pytorch/trainer/connectors/test_data_connector.py index 03ad099ec451f..13ce8d1204680 100644 --- a/tests/tests_pytorch/trainer/connectors/test_data_connector.py +++ b/tests/tests_pytorch/trainer/connectors/test_data_connector.py @@ -314,7 +314,6 @@ def predict_step(self, batch, batch_idx, dataloader_idx=0): def test_loader_detaching(): """Checks that the loader has been reset after the entrypoint.""" - loader = DataLoader(RandomDataset(32, 10), batch_size=1) model = LoaderTestModel() @@ -535,7 +534,6 @@ def test_invalid_hook_passed_in_datahook_selector(): @pytest.mark.parametrize("devices, warn_context", [(1, no_warning_call), (2, pytest.warns)]) def test_eval_distributed_sampler_warning(devices, warn_context): """Test that a warning is raised when `DistributedSampler` is used with evaluation.""" - model = BoringModel() trainer = Trainer(strategy="ddp", devices=devices, accelerator="cpu") trainer.strategy.connect(model) diff --git a/tests/tests_pytorch/trainer/flags/test_fast_dev_run.py b/tests/tests_pytorch/trainer/flags/test_fast_dev_run.py index a7d2d24af097b..705f5cd5bb339 100644 --- a/tests/tests_pytorch/trainer/flags/test_fast_dev_run.py +++ b/tests/tests_pytorch/trainer/flags/test_fast_dev_run.py @@ -14,7 +14,6 @@ def test_skip_on_fast_dev_run_tuner(tmpdir): """Test that tuner algorithms are skipped if fast dev run is enabled.""" - model = BoringModel() model.lr = 0.1 # avoid no-lr-found exception model.batch_size = 8 diff --git a/tests/tests_pytorch/trainer/flags/test_overfit_batches.py b/tests/tests_pytorch/trainer/flags/test_overfit_batches.py index efbb37a9cb150..83d03125ea4be 100644 --- a/tests/tests_pytorch/trainer/flags/test_overfit_batches.py +++ b/tests/tests_pytorch/trainer/flags/test_overfit_batches.py @@ -30,7 +30,6 @@ @pytest.mark.parametrize("overfit_batches", [1, 2, 0.1, 0.25, 1.0]) def test_overfit_basic(tmpdir, overfit_batches): """Tests that only training_step can be used when overfitting.""" - model = BoringModel() model.validation_step = None total_train_samples = len(BoringModel().train_dataloader()) diff --git a/tests/tests_pytorch/trainer/logging_/test_eval_loop_logging.py b/tests/tests_pytorch/trainer/logging_/test_eval_loop_logging.py index 4a2f6a31bd6ad..848c224cfa9ee 100644 --- a/tests/tests_pytorch/trainer/logging_/test_eval_loop_logging.py +++ b/tests/tests_pytorch/trainer/logging_/test_eval_loop_logging.py @@ -273,7 +273,6 @@ def test_log_works_in_val_callback(tmpdir): """Tests that log can be called within callback.""" class TestCallback(callbacks.Callback): - count = 0 choices = [False, True] @@ -367,7 +366,6 @@ def test_log_works_in_test_callback(tmpdir): """Tests that log can be called within callback.""" class TestCallback(callbacks.Callback): - # helpers count = 0 choices = [False, True] @@ -499,7 +497,6 @@ def test_validation_step_log_with_tensorboard(mock_log_metrics, tmpdir): """This tests make sure we properly log_metrics to loggers.""" class ExtendedModel(BoringModel): - val_losses = [] def __init__(self, some_val=7): diff --git a/tests/tests_pytorch/trainer/logging_/test_logger_connector.py b/tests/tests_pytorch/trainer/logging_/test_logger_connector.py index 9af1a944078f9..2c889fd4cb5ca 100644 --- a/tests/tests_pytorch/trainer/logging_/test_logger_connector.py +++ b/tests/tests_pytorch/trainer/logging_/test_logger_connector.py @@ -448,7 +448,6 @@ def __init__(self): self.layer = torch.nn.Linear(32, 10) def training_step(self, batch): - self.metrics(torch.rand(10, 10).softmax(-1), torch.randint(0, 10, (10,))) self.metrics._is_currently_logging = True self.log_dict(self.metrics, on_step=True, on_epoch=True) diff --git a/tests/tests_pytorch/trainer/logging_/test_train_loop_logging.py b/tests/tests_pytorch/trainer/logging_/test_train_loop_logging.py index 1f6c23123334c..1159fc5410e36 100644 --- a/tests/tests_pytorch/trainer/logging_/test_train_loop_logging.py +++ b/tests/tests_pytorch/trainer/logging_/test_train_loop_logging.py @@ -212,7 +212,6 @@ def test_log_works_in_train_callback(tmpdir): """Tests that log can be called within callback.""" class TestCallback(callbacks.Callback): - count = 0 choices = [False, True] @@ -352,7 +351,6 @@ def validation_step(self, batch, batch_idx): ], ) def test_logging_sync_dist_true(tmpdir, devices, accelerator): - """Tests to ensure that the sync_dist flag works (should just return the original value)""" fake_result = 1 model = LoggingSyncDistModel(fake_result) diff --git a/tests/tests_pytorch/trainer/optimization/test_manual_optimization.py b/tests/tests_pytorch/trainer/optimization/test_manual_optimization.py index 7b5b5b6395a91..5fa2d1bcdf223 100644 --- a/tests/tests_pytorch/trainer/optimization/test_manual_optimization.py +++ b/tests/tests_pytorch/trainer/optimization/test_manual_optimization.py @@ -153,7 +153,6 @@ def test_multiple_optimizers_manual_amp(tmpdir, accelerator): class ManualOptimizationExtendedModel(BoringModel): - count = 0 called = collections.defaultdict(int) detach = False @@ -178,7 +177,6 @@ def training_step(self, batch, batch_idx): loss *= 0.1 if self.should_update: - self.manual_backward(loss) opt.step() opt.zero_grad() @@ -238,7 +236,6 @@ def test_manual_optimization_and_accumulated_gradient(tmpdir): seed_everything(234) class ExtendedModel(BoringModel): - count = 1 called = collections.defaultdict(int) detach = False @@ -271,7 +268,6 @@ def training_step(self, batch, batch_idx): loss *= 0.1 if self.should_update: - self.manual_backward(loss) if self.should_have_updated: opt.step() @@ -394,7 +390,6 @@ def test_step_with_optimizer_closure(tmpdir): """Tests that `step` works with optimizer_closure.""" class TestModel(BoringModel): - _losses = [] def __init__(self): @@ -508,7 +503,6 @@ def on_train_start(self) -> None: mock_adam_step.reset_mock() def training_step(self, batch, batch_idx): - # emulate gans training opt_gen, opt_dis = self.optimizers() @@ -587,7 +581,6 @@ def manual_sync_grad(self) -> bool: return True def training_step(self, batch, batch_idx): - # emulate gans training opt_gen, opt_dis = self.optimizers() @@ -658,7 +651,6 @@ def on_train_end(self): def train_manual_optimization(tmpdir, strategy, model_cls=TesManualOptimizationDDPModel): - seed_everything(42) model = model_cls() @@ -687,20 +679,17 @@ def train_manual_optimization(tmpdir, strategy, model_cls=TesManualOptimizationD @RunIf(min_cuda_gpus=2, standalone=True) def test_step_with_optimizer_closure_with_different_frequencies_ddp(tmpdir): """Tests that `step` works with optimizer_closure and different accumulated_gradient frequency.""" - train_manual_optimization(tmpdir, "ddp") @RunIf(min_cuda_gpus=2) def test_step_with_optimizer_closure_with_different_frequencies_ddp_spawn(tmpdir): """Tests that `step` works with optimizer_closure and different accumulated_gradient frequency.""" - train_manual_optimization(tmpdir, "ddp_spawn") class TestManualOptimizationDDPModelToggleModel(TesManualOptimizationDDPModel): def training_step(self, batch, batch_idx): - # emulate gans training opt_gen, opt_dis = self.optimizers() diff --git a/tests/tests_pytorch/trainer/optimization/test_optimizers.py b/tests/tests_pytorch/trainer/optimization/test_optimizers.py index 7d887380fa5d9..d824117eeb014 100644 --- a/tests/tests_pytorch/trainer/optimization/test_optimizers.py +++ b/tests/tests_pytorch/trainer/optimization/test_optimizers.py @@ -33,7 +33,6 @@ def test_optimizer_with_scheduling(tmpdir): """Verify that learning rate scheduling is working.""" - model = BoringModel() trainer = Trainer( default_root_dir=tmpdir, max_epochs=1, limit_val_batches=0.1, limit_train_batches=0.2, val_check_interval=0.5 diff --git a/tests/tests_pytorch/trainer/properties/test_get_model.py b/tests/tests_pytorch/trainer/properties/test_get_model.py index 5390ac6f256f2..bf9c4e651513a 100644 --- a/tests/tests_pytorch/trainer/properties/test_get_model.py +++ b/tests/tests_pytorch/trainer/properties/test_get_model.py @@ -29,7 +29,6 @@ def on_fit_end(self): def test_get_model(tmpdir): """Tests that `trainer.lightning_module` extracts the model correctly.""" - model = TrainerGetModel() limit_train_batches = 2 @@ -42,7 +41,6 @@ def test_get_model(tmpdir): @RunIf(skip_windows=True) def test_get_model_ddp_cpu(tmpdir): """Tests that `trainer.lightning_module` extracts the model correctly when using ddp on cpu.""" - model = TrainerGetModel() limit_train_batches = 2 @@ -67,7 +65,6 @@ def test_get_model_ddp_cpu(tmpdir): ) def test_get_model_gpu(tmpdir, accelerator): """Tests that `trainer.lightning_module` extracts the model correctly when using GPU.""" - model = TrainerGetModel() limit_train_batches = 2 diff --git a/tests/tests_pytorch/trainer/test_dataloaders.py b/tests/tests_pytorch/trainer/test_dataloaders.py index 42cbea5d26d7e..9b39c3b0beae0 100644 --- a/tests/tests_pytorch/trainer/test_dataloaders.py +++ b/tests/tests_pytorch/trainer/test_dataloaders.py @@ -149,7 +149,6 @@ def test_multiple_eval_dataloader(tmpdir, ckpt_path): def test_train_dataloader_passed_to_fit(tmpdir): """Verify that train dataloader can be passed to fit.""" - # only train passed to fit model = BoringModel() train_loader = model.train_dataloader() @@ -166,7 +165,6 @@ def test_train_dataloader_passed_to_fit(tmpdir): @pytest.mark.parametrize("n", (1, 2)) def test_dataloaders_passed_to_fn(tmpdir, ckpt_path, n): """Verify that dataloaders can be passed.""" - train_dataloaders = DataLoader(RandomDataset(32, 64)) if n == 1: model = BoringModel() @@ -240,7 +238,6 @@ def on_test_epoch_start(self, *_): def test_inf_dataloaders_with_limit_percent_batches(tmpdir): """Verify inf train, val & test dataloaders (e.g. IterableDataset) passed with batch limit in percent.""" - epoch_cb = Counter() trainer = Trainer( default_root_dir=tmpdir, @@ -288,7 +285,6 @@ def test_inf_dataloaders_with_limit_percent_batches(tmpdir): ) def test_dataloaders_with_limit_train_batches(tmpdir, dataset, limit_train_batches): """Verify inf train, val & test dataloaders (e.g. IterableDataset) passed with batch limit as number.""" - epoch_cb = Counter() max_epochs = 2 trainer = Trainer( @@ -321,7 +317,6 @@ def test_dataloaders_with_limit_train_batches(tmpdir, dataset, limit_train_batch ) def test_dataloaders_with_limit_val_batches(tmpdir, dataset): """Verify inf train, val & test dataloaders (e.g. IterableDataset) passed with batch limit as number.""" - epoch_cb = Counter() callbacks = [epoch_cb] enable_checkpointing = False @@ -360,7 +355,6 @@ def test_dataloaders_with_limit_val_batches(tmpdir, dataset): ) def test_datasets_dataloaders_with_limit_num_batches(tmpdir, dataset): """Verify inf train, val & test dataloaders (e.g. IterableDataset) passed with batch limit as number.""" - epoch_cb = Counter() max_epochs = 2 limit_batches = 10 @@ -424,7 +418,6 @@ def test_dataloaders_with_limit_percent_batches(tmpdir, limit_train_batches, lim @pytest.mark.parametrize(["limit_train_batches", "limit_val_batches", "limit_test_batches"], [(1, 2, 3), (1, 2, 1e50)]) def test_dataloaders_with_limit_num_batches(tmpdir, limit_train_batches, limit_val_batches, limit_test_batches): """Verify num_batches for train, val & test dataloaders passed with batch limit as number.""" - model = MultiEvalDataLoaderModel() # train, multiple val and multiple test passed with percent_check @@ -502,7 +495,6 @@ def test_dataloaders_with_fast_dev_run(tmpdir, fast_dev_run): @pytest.mark.parametrize("ckpt_path", [None, "best", "specific"]) def test_mixing_of_dataloader_options(tmpdir, ckpt_path): """Verify that dataloaders can be passed to fit.""" - model = BoringModel() eval_dataloader = DataLoader(RandomDataset(32, 64)) trainer_options = { @@ -550,7 +542,6 @@ def test_warning_on_zero_len_dataloader(): @patch("lightning.pytorch.trainer.connectors.data_connector.multiprocessing.cpu_count", return_value=4) def test_warning_with_few_workers(_, tmpdir, ckpt_path, stage): """Test that error is raised if dataloader with only a few workers is used.""" - model = BoringModel() train_dl = model.train_dataloader() @@ -1090,7 +1081,6 @@ def on_validation_epoch_end(self): @pytest.mark.parametrize("n", ["test", -1]) def test_dataloaders_load_every_n_epochs_exception(tmpdir, n): - with pytest.raises(MisconfigurationException, match="should be an int >"): Trainer(default_root_dir=tmpdir, reload_dataloaders_every_n_epochs=n) diff --git a/tests/tests_pytorch/trainer/test_trainer.py b/tests/tests_pytorch/trainer/test_trainer.py index a99e4fd018d37..e4aae715571e3 100644 --- a/tests/tests_pytorch/trainer/test_trainer.py +++ b/tests/tests_pytorch/trainer/test_trainer.py @@ -882,7 +882,6 @@ def test_disabled_training(tmpdir): """Verify that `limit_train_batches=0` disables the training loop unless `fast_dev_run=True`.""" class CurrentModel(BoringModel): - training_step_invoked = False def training_step(self, *args, **kwargs): @@ -937,7 +936,6 @@ def test_disabled_validation(tmpdir): """Verify that `limit_val_batches=0` disables the validation loop unless `fast_dev_run=True`.""" class CurrentModel(BoringModel): - validation_step_invoked = False def validation_step(self, *args, **kwargs): @@ -976,7 +974,6 @@ def validation_step(self, *args, **kwargs): def test_on_exception_hook(tmpdir): """Test the on_exception callback hook and the trainer interrupted flag.""" - model = BoringModel() class InterruptCallback(Callback): @@ -1262,7 +1259,6 @@ def predict_dataloader(self): class CustomPredictionWriter(BasePredictionWriter): - write_on_batch_end_called = False write_on_epoch_end_called = False @@ -1407,7 +1403,6 @@ def test_trainer_predict_ddp_spawn(tmpdir, accelerator): @pytest.mark.parametrize("dataset_cls", [RandomDataset, RandomIterableDatasetWithLen, RandomIterableDataset]) def test_index_batch_sampler_wrapper_with_iterable_dataset(dataset_cls, tmpdir): - ds = dataset_cls(32, 8) loader = DataLoader(ds) is_iterable_dataset = isinstance(ds, IterableDataset) @@ -1491,7 +1486,6 @@ def configure_optimizers(self): ], ) def test_setup_hook_move_to_device_correctly(tmpdir, accelerator): - """Verify that if a user defines a layer in the setup hook function, this is moved to the correct device.""" class TestModel(BoringModel): @@ -1598,7 +1592,6 @@ def backward(self, *args, **kwargs): def test_check_val_every_n_epoch_exception(tmpdir): - with pytest.raises(MisconfigurationException, match="should be an integer."): Trainer(default_root_dir=tmpdir, max_epochs=1, check_val_every_n_epoch=1.2) @@ -1673,7 +1666,6 @@ def state_dict(self) -> dict: def test_on_load_checkpoint_missing_callbacks(tmpdir): """Test a warning appears when callbacks in the checkpoint don't match callbacks provided when resuming.""" - model = BoringModel() chk = ModelCheckpoint(dirpath=tmpdir, save_last=True) diff --git a/tests/tests_pytorch/tuner/test_lr_finder.py b/tests/tests_pytorch/tuner/test_lr_finder.py index 253cc35e6d3f2..e1670b9f29c7d 100644 --- a/tests/tests_pytorch/tuner/test_lr_finder.py +++ b/tests/tests_pytorch/tuner/test_lr_finder.py @@ -57,7 +57,6 @@ def configure_optimizers(self): def test_model_reset_correctly(tmpdir): """Check that model weights are correctly reset after _lr_find()""" - model = BoringModel() model.lr = 0.1 @@ -80,7 +79,6 @@ def test_model_reset_correctly(tmpdir): def test_trainer_reset_correctly(tmpdir): """Check that all trainer parameters are reset correctly after lr_find()""" - model = BoringModel() model.lr = 0.1 @@ -425,7 +423,6 @@ def __init__(self): def test_lr_finder_callback_restarting(tmpdir): """Test that `LearningRateFinder` does not set restarting=True when loading checkpoint.""" - num_lr_steps = 100 class MyBoringModel(BoringModel): diff --git a/tests/tests_pytorch/utilities/migration/test_utils.py b/tests/tests_pytorch/utilities/migration/test_utils.py index e6db26ed58866..9461a348e14c5 100644 --- a/tests/tests_pytorch/utilities/migration/test_utils.py +++ b/tests/tests_pytorch/utilities/migration/test_utils.py @@ -44,7 +44,6 @@ def test_patch_legacy_gpus_arg_default(): def test_migrate_checkpoint(monkeypatch): """Test that the correct migration function gets executed given the current version of the checkpoint.""" - # A checkpoint that is older than any migration point in the index old_checkpoint = {"pytorch-lightning_version": "0.0.0", "content": 123} new_checkpoint, call_order = _run_simple_migration(monkeypatch, old_checkpoint) @@ -108,7 +107,6 @@ def test_migrate_checkpoint_too_new(): def test_migrate_checkpoint_for_pl(caplog): """Test that the automatic migration in Lightning informs the user about how to make the upgrade permanent.""" - # simulate a very recent checkpoint, no migrations needed loaded_checkpoint = {"pytorch-lightning_version": pl.__version__, "global_step": 2, "epoch": 0} new_checkpoint = _pl_migrate_checkpoint(loaded_checkpoint, "path/to/ckpt") diff --git a/tests/tests_pytorch/utilities/test_all_gather_grad.py b/tests/tests_pytorch/utilities/test_all_gather_grad.py index 80f765290a99f..6080816478843 100644 --- a/tests/tests_pytorch/utilities/test_all_gather_grad.py +++ b/tests/tests_pytorch/utilities/test_all_gather_grad.py @@ -52,7 +52,6 @@ def test_all_gather_ddp_spawn(): @RunIf(min_cuda_gpus=2, skip_windows=True, standalone=True) def test_all_gather_collection(tmpdir): class TestModel(BoringModel): - on_train_epoch_end_called = False def on_train_epoch_end(self): @@ -108,7 +107,6 @@ def on_train_epoch_end(self): @RunIf(min_cuda_gpus=2, skip_windows=True, standalone=True) def test_all_gather_sync_grads(tmpdir): class TestModel(BoringModel): - training_step_called = False def training_step(self, batch, batch_idx): diff --git a/tests/tests_pytorch/utilities/test_parameter_tying.py b/tests/tests_pytorch/utilities/test_parameter_tying.py index de40198c573d9..5910feec1961d 100644 --- a/tests/tests_pytorch/utilities/test_parameter_tying.py +++ b/tests/tests_pytorch/utilities/test_parameter_tying.py @@ -39,7 +39,6 @@ def forward(self, x): [(BoringModel, []), (ParameterSharingModule, [["layer_1.weight", "layer_3.weight"]])], ) def test_find_shared_parameters(model, expected_shared_params): - assert expected_shared_params == find_shared_parameters(model()) From 5ca605673426953ac18587e065fb5739d3d03232 Mon Sep 17 00:00:00 2001 From: Jirka Borovec <6035284+Borda@users.noreply.github.com> Date: Wed, 26 Apr 2023 22:27:53 +0200 Subject: [PATCH 16/93] simple examples instead of skips (#17482) Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> (cherry picked from commit cc3800aac3e91d0a741047fd26584b1de0cfedc4) --- .../app/utilities/packaging/build_config.py | 8 +- .../pytorch/callbacks/device_stats_monitor.py | 11 +- src/lightning/pytorch/core/module.py | 36 +++---- .../pytorch/overrides/distributed.py | 100 +++++++++--------- src/lightning/pytorch/utilities/deepspeed.py | 17 ++- 5 files changed, 84 insertions(+), 88 deletions(-) diff --git a/src/lightning/app/utilities/packaging/build_config.py b/src/lightning/app/utilities/packaging/build_config.py index a16b84a6109cd..f67f7a81583e7 100644 --- a/src/lightning/app/utilities/packaging/build_config.py +++ b/src/lightning/app/utilities/packaging/build_config.py @@ -31,13 +31,7 @@ def load_requirements( path_dir: str, file_name: str = "base.txt", comment_char: str = "#", unfreeze: bool = True ) -> List[str]: - """Load requirements from a file. - - >>> from lightning.app import _PROJECT_ROOT - >>> path_req = os.path.join(_PROJECT_ROOT, "requirements") - >>> load_requirements(path_req, "docs.txt") # doctest: +ELLIPSIS +NORMALIZE_WHITESPACE +SKIP - ['sphinx>=4.0', ...] - """ + """Load requirements from a file.""" path = os.path.join(path_dir, file_name) if not os.path.isfile(path): return [] diff --git a/src/lightning/pytorch/callbacks/device_stats_monitor.py b/src/lightning/pytorch/callbacks/device_stats_monitor.py index 1c317967ffdca..4442e73dbbc6e 100644 --- a/src/lightning/pytorch/callbacks/device_stats_monitor.py +++ b/src/lightning/pytorch/callbacks/device_stats_monitor.py @@ -44,11 +44,12 @@ class DeviceStatsMonitor(Callback): ModuleNotFoundError: If ``psutil`` is not installed and CPU stats are monitored. - Example: - >>> from lightning.pytorch import Trainer - >>> from lightning.pytorch.callbacks import DeviceStatsMonitor - >>> device_stats = DeviceStatsMonitor() # doctest: +SKIP - >>> trainer = Trainer(callbacks=[device_stats]) # doctest: +SKIP + Example:: + + from lightning import Trainer + from lightning.pytorch.callbacks import DeviceStatsMonitor + device_stats = DeviceStatsMonitor() + trainer = Trainer(callbacks=[device_stats]) """ def __init__(self, cpu_stats: Optional[bool] = None) -> None: diff --git a/src/lightning/pytorch/core/module.py b/src/lightning/pytorch/core/module.py index 3408d4f77b8a3..f66c0d0e2c000 100644 --- a/src/lightning/pytorch/core/module.py +++ b/src/lightning/pytorch/core/module.py @@ -1319,7 +1319,8 @@ def to_onnx(self, file_path: Union[str, Path], input_sample: Optional[Any] = Non input_sample: An input for tracing. Default: None (Use self.example_input_array) **kwargs: Will be passed to torch.onnx.export function. - Example: + Example:: + class SimpleModel(LightningModule): def __init__(self): super().__init__() @@ -1383,23 +1384,22 @@ def to_torchscript( to use this feature without limitations. See also the :mod:`torch.jit` documentation for supported features. - Example: - >>> class SimpleModel(LightningModule): - ... def __init__(self): - ... super().__init__() - ... self.l1 = torch.nn.Linear(in_features=64, out_features=4) - ... - ... def forward(self, x): - ... return torch.relu(self.l1(x.view(x.size(0), -1))) - ... - >>> import os - >>> model = SimpleModel() - >>> model.to_torchscript(file_path="model.pt") # doctest: +SKIP - >>> os.path.isfile("model.pt") # doctest: +SKIP - >>> torch.jit.save(model.to_torchscript(file_path="model_trace.pt", method='trace', # doctest: +SKIP - ... example_inputs=torch.randn(1, 64))) # doctest: +SKIP - >>> os.path.isfile("model_trace.pt") # doctest: +SKIP - True + Example:: + + class SimpleModel(LightningModule): + def __init__(self): + super().__init__() + self.l1 = torch.nn.Linear(in_features=64, out_features=4) + + def forward(self, x): + return torch.relu(self.l1(x.view(x.size(0), -1))) + + model = SimpleModel() + model.to_torchscript(file_path="model.pt") + + torch.jit.save(model.to_torchscript( + file_path="model_trace.pt", method='trace', example_inputs=torch.randn(1, 64)) + ) Return: This LightningModule as a torchscript, regardless of whether `file_path` is diff --git a/src/lightning/pytorch/overrides/distributed.py b/src/lightning/pytorch/overrides/distributed.py index f34314c9f9305..1480163dc57c0 100644 --- a/src/lightning/pytorch/overrides/distributed.py +++ b/src/lightning/pytorch/overrides/distributed.py @@ -90,54 +90,58 @@ def _register_ddp_comm_hook( as FP16 compression as wrapper, which could be combined with ddp_comm_hook - Examples: - - >>> from torch.distributed.algorithms.ddp_comm_hooks import ( # doctest: +SKIP - ... default_hooks as default, - ... powerSGD_hook as powerSGD, - ... post_localSGD_hook as post_localSGD, - ... ) - >>> # fp16_compress_hook for compress gradients - >>> ddp_model = ... - >>> _register_ddp_comm_hook( # doctest: +SKIP - ... model=ddp_model, - ... ddp_comm_hook=default.fp16_compress_hook, - ... ) - >>> # powerSGD_hook - >>> ddp_model = ... - >>> _register_ddp_comm_hook( # doctest: +SKIP - ... model=ddp_model, - ... ddp_comm_state=powerSGD.PowerSGDState( - ... process_group=None, - ... matrix_approximation_rank=1, - ... start_powerSGD_iter=5000, - ... ), - ... ddp_comm_hook=powerSGD.powerSGD_hook, - ... ) - >>> # post_localSGD_hook - >>> subgroup, _ = torch.distributed.new_subgroups() # doctest: +SKIP - >>> ddp_model = ... - >>> _register_ddp_comm_hook( # doctest: +SKIP - ... model=ddp_model, - ... state=post_localSGD.PostLocalSGDState( - ... process_group=None, - ... subgroup=subgroup, - ... start_localSGD_iter=1_000, - ... ), - ... ddp_comm_hook=post_localSGD.post_localSGD_hook, - ... ) - >>> # fp16_compress_wrapper combined with other communication hook - >>> ddp_model = ... - >>> _register_ddp_comm_hook( # doctest: +SKIP - ... model=ddp_model, - ... ddp_comm_state=powerSGD.PowerSGDState( - ... process_group=None, - ... matrix_approximation_rank=1, - ... start_powerSGD_iter=5000, - ... ), - ... ddp_comm_hook=powerSGD.powerSGD_hook, - ... ddp_comm_wrapper=default.fp16_compress_wrapper, - ... ) + Examples:: + + from torch.distributed.algorithms.ddp_comm_hooks import ( + default_hooks as default, + powerSGD_hook as powerSGD, + post_localSGD_hook as post_localSGD, + ) + + # fp16_compress_hook for compress gradients + ddp_model = ... + _register_ddp_comm_hook( + model=ddp_model, + ddp_comm_hook=default.fp16_compress_hook, + ) + + # powerSGD_hook + ddp_model = ... + _register_ddp_comm_hook( + model=ddp_model, + ddp_comm_state=powerSGD.PowerSGDState( + process_group=None, + matrix_approximation_rank=1, + start_powerSGD_iter=5000, + ), + ddp_comm_hook=powerSGD.powerSGD_hook, + ) + + # post_localSGD_hook + subgroup, _ = torch.distributed.new_subgroups() + ddp_model = ... + _register_ddp_comm_hook( + model=ddp_model, + state=post_localSGD.PostLocalSGDState( + process_group=None, + subgroup=subgroup, + start_localSGD_iter=1_000, + ), + ddp_comm_hook=post_localSGD.post_localSGD_hook, + ) + + # fp16_compress_wrapper combined with other communication hook + ddp_model = ... + _register_ddp_comm_hook( + model=ddp_model, + ddp_comm_state=powerSGD.PowerSGDState( + process_group=None, + matrix_approximation_rank=1, + start_powerSGD_iter=5000, + ), + ddp_comm_hook=powerSGD.powerSGD_hook, + ddp_comm_wrapper=default.fp16_compress_wrapper, + ) """ if ddp_comm_hook is None: return diff --git a/src/lightning/pytorch/utilities/deepspeed.py b/src/lightning/pytorch/utilities/deepspeed.py index 1fac2b3160db0..797deb60e00ac 100644 --- a/src/lightning/pytorch/utilities/deepspeed.py +++ b/src/lightning/pytorch/utilities/deepspeed.py @@ -66,16 +66,13 @@ def convert_zero_checkpoint_to_fp32_state_dict( tag: checkpoint tag used as a unique identifier for checkpoint. If not provided will attempt to load tag in the file named ``latest`` in the checkpoint folder, e.g., ``global_step14`` - Examples: - - >>> from lightning.pytorch.utilities.deepspeed import ( - ... convert_zero_checkpoint_to_fp32_state_dict - ... ) - >>> # Lightning deepspeed has saved a directory instead of a file - >>> save_path = "lightning_logs/version_0/checkpoints/epoch=0-step=0.ckpt/" # doctest: +SKIP - >>> output_path = "lightning_model.pt" # doctest: +SKIP - >>> convert_zero_checkpoint_to_fp32_state_dict(save_path, output_path) # doctest: +SKIP - Saving fp32 state dict to lightning_model.pt + Examples:: + + # Lightning deepspeed has saved a directory instead of a file + convert_zero_checkpoint_to_fp32_state_dict( + "lightning_logs/version_0/checkpoints/epoch=0-step=0.ckpt/", + "lightning_model.pt" + ) """ state_dict = get_fp32_state_dict_from_zero_checkpoint(checkpoint_dir, tag) From 4cadf3255d2e46343813744a4026147952ab3a7d Mon Sep 17 00:00:00 2001 From: Ethan Harris Date: Thu, 27 Apr 2023 12:21:04 +0200 Subject: [PATCH 17/93] [App] Add missing python-multipart dependency (#17244) Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> Co-authored-by: Jirka Borovec <6035284+Borda@users.noreply.github.com> Co-authored-by: Jirka (cherry picked from commit 8d310b89ead275a80dbacf299dae4b9524240aaa) --- requirements/app/base.txt | 1 + src/lightning/app/cli/lightning_cli.py | 2 +- 2 files changed, 2 insertions(+), 1 deletion(-) diff --git a/requirements/app/base.txt b/requirements/app/base.txt index 1a2c0f018f159..8b97dc9b2f20e 100644 --- a/requirements/app/base.txt +++ b/requirements/app/base.txt @@ -12,6 +12,7 @@ beautifulsoup4 >=4.8.0, <4.11.2 inquirer >=2.10.0, <=3.1.3 psutil <5.9.5 click <=8.1.3 +python-multipart>=0.0.5, <=0.0.6 fastapi >=0.69.0, <0.89.0 # strict; TODO: broken serializations starlette # https://fastapi.tiangolo.com/deployment/versions/#about-starlette diff --git a/src/lightning/app/cli/lightning_cli.py b/src/lightning/app/cli/lightning_cli.py index cf67ce9434806..38211b7b0ab7f 100644 --- a/src/lightning/app/cli/lightning_cli.py +++ b/src/lightning/app/cli/lightning_cli.py @@ -530,7 +530,7 @@ def ssh(app_name: Optional[str] = None, component_name: Optional[str] = None) -> raise click.ClickException( "Unable to find the ssh binary. You must install ssh first to use this functionality." ) - os.execv(ssh_path, ["-tt", f"{component_id}@{ssh_endpoint}"]) + os.execv(ssh_path, ["-tt", f"{component_id}@{ssh_endpoint}"]) # noqa: S606 @_main.group() From 993085849849016c12fbe27ad91bb76f8fceec41 Mon Sep 17 00:00:00 2001 From: asmith26 Date: Thu, 27 Apr 2023 12:54:52 +0100 Subject: [PATCH 18/93] Fix conda badge in README (#17345) (cherry picked from commit 8c18439207c30eedbd7aa3737fdb88b4f0cf8290) --- README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/README.md b/README.md index db35544b5ccbc..45af4b8d08f6f 100644 --- a/README.md +++ b/README.md @@ -26,7 +26,7 @@ ______________________________________________________________________ [![PyPI - Python Version](https://img.shields.io/pypi/pyversions/pytorch-lightning)](https://pypi.org/project/pytorch-lightning/) [![PyPI Status](https://badge.fury.io/py/pytorch-lightning.svg)](https://badge.fury.io/py/pytorch-lightning) [![PyPI Status](https://pepy.tech/badge/pytorch-lightning)](https://pepy.tech/project/pytorch-lightning) -[![Conda](https://img.shields.io/conda/v/conda-forge/pytorch-lightning?label=conda&color=success)](https://anaconda.org/conda-forge/pytorch-lightning) +[![Conda](https://img.shields.io/conda/v/conda-forge/lightning?label=conda&color=success)](https://anaconda.org/conda-forge/lightning) [![DockerHub](https://img.shields.io/docker/pulls/pytorchlightning/pytorch_lightning.svg)](https://hub.docker.com/r/pytorchlightning/pytorch_lightning) [![codecov](https://codecov.io/gh/Lightning-AI/lightning/branch/master/graph/badge.svg?token=SmzX8mnKlA)](https://codecov.io/gh/Lightning-AI/lightning) From ff88ebc5d680d7499fc7f81ca320a193eaee87c8 Mon Sep 17 00:00:00 2001 From: "Adam J. Stewart" Date: Thu, 27 Apr 2023 07:24:43 -0500 Subject: [PATCH 19/93] Lightning: make type hints public (#17100) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * Add missing MANIFESTs * move * one more * Ignore version.info properly * move * manifest --------- Co-authored-by: Carlos Mocholí Co-authored-by: Jirka Borovec <6035284+Borda@users.noreply.github.com> Co-authored-by: Jirka Co-authored-by: Adrian Wälchli (cherry picked from commit 3d7360a8d4b83f5901f423055ea794d9f42ab3af) --- .gitignore | 8 ++++---- setup.py | 6 +++++- src/lightning/app/cli/lightning_cli.py | 2 +- src/lightning/py.typed | 0 src/lightning_app/MANIFEST.in | 1 + src/lightning_app/py.typed | 0 src/lightning_fabric/MANIFEST.in | 1 + src/lightning_fabric/py.typed | 0 8 files changed, 12 insertions(+), 6 deletions(-) create mode 100644 src/lightning/py.typed create mode 100644 src/lightning_app/py.typed create mode 100644 src/lightning_fabric/py.typed diff --git a/.gitignore b/.gitignore index 375d77d5ef4f8..17c18b06ee99a 100644 --- a/.gitignore +++ b/.gitignore @@ -53,9 +53,9 @@ wheels/ .installed.cfg *.egg src/*/version.info -src/lightning_app/ -src/lightning_fabric/ -src/pytorch_lightning/ +src/lightning_app/* +src/lightning_fabric/* +src/pytorch_lightning/* !src/*/__about__.py !src/*/__main__.py !src/*/__setup__.py @@ -63,7 +63,7 @@ src/pytorch_lightning/ !src/*/MANIFEST.in !src/*/py.typed !src/*/README.md -!src/*/*.info +!src/*/shell-folder_code-lives-lightning.info # PyInstaller # Usually these files are written by a python script from a template diff --git a/setup.py b/setup.py index 5b4e9b5caaaeb..e2dfc48d08088 100755 --- a/setup.py +++ b/setup.py @@ -87,7 +87,11 @@ def _set_manifest_path(manifest_dir: str, aggregate: bool = False, mapping: Mapp if aggregate: # aggregate all MANIFEST.in contents into a single temporary file manifest_path = _named_temporary_file(manifest_dir) - lines = ["include src/lightning/version.info\n", "include requirements/base.txt\n"] + lines = [ + "include src/lightning/version.info\n", + "include src/lightning/py.typed\n", + "include requirements/base.txt\n", + ] # load manifest and aggregated all manifests for pkg in mapping.values(): pkg_manifest = os.path.join(_PATH_SRC, pkg, "MANIFEST.in") diff --git a/src/lightning/app/cli/lightning_cli.py b/src/lightning/app/cli/lightning_cli.py index 38211b7b0ab7f..cf67ce9434806 100644 --- a/src/lightning/app/cli/lightning_cli.py +++ b/src/lightning/app/cli/lightning_cli.py @@ -530,7 +530,7 @@ def ssh(app_name: Optional[str] = None, component_name: Optional[str] = None) -> raise click.ClickException( "Unable to find the ssh binary. You must install ssh first to use this functionality." ) - os.execv(ssh_path, ["-tt", f"{component_id}@{ssh_endpoint}"]) # noqa: S606 + os.execv(ssh_path, ["-tt", f"{component_id}@{ssh_endpoint}"]) @_main.group() diff --git a/src/lightning/py.typed b/src/lightning/py.typed new file mode 100644 index 0000000000000..e69de29bb2d1d diff --git a/src/lightning_app/MANIFEST.in b/src/lightning_app/MANIFEST.in index 45aea6f8bdbef..a8e251508baf5 100644 --- a/src/lightning_app/MANIFEST.in +++ b/src/lightning_app/MANIFEST.in @@ -8,3 +8,4 @@ recursive-include src/lightning_app/cli/*-template * # TODO: remove this once lightning-ui package is ready as a dependency recursive-include src/lightning_app/ui * include src/lightning_app/components/serve/catimage.png +include src/lightning_app/py.typed # marker file for PEP 561 diff --git a/src/lightning_app/py.typed b/src/lightning_app/py.typed new file mode 100644 index 0000000000000..e69de29bb2d1d diff --git a/src/lightning_fabric/MANIFEST.in b/src/lightning_fabric/MANIFEST.in index 4cff43c8fdb22..3c33b95ae9b20 100644 --- a/src/lightning_fabric/MANIFEST.in +++ b/src/lightning_fabric/MANIFEST.in @@ -4,3 +4,4 @@ include src/lightning_fabric/CHANGELOG.md include src/lightning_fabric/README.md recursive-include requirements/fabric *.txt include .actions/assistant.py +include src/lightning_fabric/py.typed # marker file for PEP 561 diff --git a/src/lightning_fabric/py.typed b/src/lightning_fabric/py.typed new file mode 100644 index 0000000000000..e69de29bb2d1d From 1d875e61fcb5912b6e9bb333b6d9e298349a5bc1 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Carlos=20Mochol=C3=AD?= Date: Thu, 27 Apr 2023 16:27:26 +0200 Subject: [PATCH 20/93] Fix registry typing annotation (#17489) (cherry picked from commit 4d3517c007bf095dad6509d130599ce28d1764b9) --- src/lightning/fabric/accelerators/accelerator.py | 6 ++++-- src/lightning/fabric/accelerators/cpu.py | 5 +++-- src/lightning/fabric/accelerators/cuda.py | 5 +++-- src/lightning/fabric/accelerators/mps.py | 5 +++-- src/lightning/fabric/accelerators/registry.py | 3 ++- src/lightning/fabric/strategies/ddp.py | 3 ++- src/lightning/fabric/strategies/deepspeed.py | 3 ++- src/lightning/fabric/strategies/dp.py | 3 ++- src/lightning/fabric/strategies/fsdp.py | 3 ++- src/lightning/fabric/strategies/registry.py | 5 +++-- src/lightning/fabric/strategies/strategy.py | 3 ++- src/lightning/fabric/strategies/xla.py | 4 ++-- src/lightning/pytorch/accelerators/cpu.py | 5 +++-- src/lightning/pytorch/accelerators/cuda.py | 3 ++- src/lightning/pytorch/accelerators/ipu.py | 3 ++- src/lightning/pytorch/accelerators/mps.py | 3 ++- src/lightning/pytorch/strategies/ddp.py | 3 ++- src/lightning/pytorch/strategies/deepspeed.py | 3 ++- src/lightning/pytorch/strategies/fsdp.py | 3 ++- src/lightning/pytorch/strategies/ipu.py | 3 ++- src/lightning/pytorch/strategies/single_device.py | 5 +++-- src/lightning/pytorch/strategies/strategy.py | 3 ++- src/lightning/pytorch/strategies/xla.py | 3 ++- .../pytorch/trainer/connectors/accelerator_connector.py | 5 ++--- src/lightning/pytorch/trainer/trainer.py | 3 +-- 25 files changed, 57 insertions(+), 36 deletions(-) diff --git a/src/lightning/fabric/accelerators/accelerator.py b/src/lightning/fabric/accelerators/accelerator.py index 79a3662484e68..f843f05f21ccf 100644 --- a/src/lightning/fabric/accelerators/accelerator.py +++ b/src/lightning/fabric/accelerators/accelerator.py @@ -12,10 +12,12 @@ # See the License for the specific language governing permissions and # limitations under the License. from abc import ABC, abstractmethod -from typing import Any, Dict +from typing import Any import torch +from lightning.fabric.accelerators.registry import _AcceleratorRegistry + class Accelerator(ABC): """The Accelerator base class. @@ -54,5 +56,5 @@ def is_available() -> bool: """Detect if the hardware is available.""" @classmethod - def register_accelerators(cls, accelerator_registry: Dict) -> None: + def register_accelerators(cls, accelerator_registry: _AcceleratorRegistry) -> None: pass diff --git a/src/lightning/fabric/accelerators/cpu.py b/src/lightning/fabric/accelerators/cpu.py index 37cd2a2f079b6..e3562a1e249d6 100644 --- a/src/lightning/fabric/accelerators/cpu.py +++ b/src/lightning/fabric/accelerators/cpu.py @@ -11,11 +11,12 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. -from typing import Dict, List, Union +from typing import List, Union import torch from lightning.fabric.accelerators.accelerator import Accelerator +from lightning.fabric.accelerators.registry import _AcceleratorRegistry class CPUAccelerator(Accelerator): @@ -56,7 +57,7 @@ def is_available() -> bool: return True @classmethod - def register_accelerators(cls, accelerator_registry: Dict) -> None: + def register_accelerators(cls, accelerator_registry: _AcceleratorRegistry) -> None: accelerator_registry.register( "cpu", cls, diff --git a/src/lightning/fabric/accelerators/cuda.py b/src/lightning/fabric/accelerators/cuda.py index 4977b0e1a7aa5..9f58fc7192bbd 100644 --- a/src/lightning/fabric/accelerators/cuda.py +++ b/src/lightning/fabric/accelerators/cuda.py @@ -15,12 +15,13 @@ import warnings from contextlib import contextmanager from functools import lru_cache -from typing import cast, Dict, Generator, List, Optional, Union +from typing import cast, Generator, List, Optional, Union import torch from lightning_utilities.core.rank_zero import rank_zero_info from lightning.fabric.accelerators.accelerator import Accelerator +from lightning.fabric.accelerators.registry import _AcceleratorRegistry from lightning.fabric.utilities.imports import _TORCH_GREATER_EQUAL_1_12, _TORCH_GREATER_EQUAL_2_0 @@ -63,7 +64,7 @@ def is_available() -> bool: return num_cuda_devices() > 0 @classmethod - def register_accelerators(cls, accelerator_registry: Dict) -> None: + def register_accelerators(cls, accelerator_registry: _AcceleratorRegistry) -> None: accelerator_registry.register( "cuda", cls, diff --git a/src/lightning/fabric/accelerators/mps.py b/src/lightning/fabric/accelerators/mps.py index 742aef3670eb8..2eaadd0fb7b19 100644 --- a/src/lightning/fabric/accelerators/mps.py +++ b/src/lightning/fabric/accelerators/mps.py @@ -13,11 +13,12 @@ # limitations under the License. import platform from functools import lru_cache -from typing import Dict, List, Optional, Union +from typing import List, Optional, Union import torch from lightning.fabric.accelerators.accelerator import Accelerator +from lightning.fabric.accelerators.registry import _AcceleratorRegistry from lightning.fabric.utilities.imports import _TORCH_GREATER_EQUAL_1_12 @@ -69,7 +70,7 @@ def is_available() -> bool: ) @classmethod - def register_accelerators(cls, accelerator_registry: Dict) -> None: + def register_accelerators(cls, accelerator_registry: _AcceleratorRegistry) -> None: accelerator_registry.register( "mps", cls, diff --git a/src/lightning/fabric/accelerators/registry.py b/src/lightning/fabric/accelerators/registry.py index 4fd62ab15708a..f8d79dc1b602d 100644 --- a/src/lightning/fabric/accelerators/registry.py +++ b/src/lightning/fabric/accelerators/registry.py @@ -15,7 +15,6 @@ from inspect import getmembers, isclass from typing import Any, Callable, Dict, List, Optional -from lightning.fabric.accelerators.accelerator import Accelerator from lightning.fabric.utilities.exceptions import MisconfigurationException from lightning.fabric.utilities.registry import _is_register_method_overridden @@ -114,6 +113,8 @@ def __str__(self) -> str: def call_register_accelerators(registry: _AcceleratorRegistry, base_module: str) -> None: module = importlib.import_module(base_module) + from lightning.fabric.accelerators.accelerator import Accelerator + for _, mod in getmembers(module, isclass): if issubclass(mod, Accelerator) and _is_register_method_overridden(mod, Accelerator, "register_accelerators"): mod.register_accelerators(registry) diff --git a/src/lightning/fabric/strategies/ddp.py b/src/lightning/fabric/strategies/ddp.py index b91d4aef9c970..801ee6eefcd84 100644 --- a/src/lightning/fabric/strategies/ddp.py +++ b/src/lightning/fabric/strategies/ddp.py @@ -29,6 +29,7 @@ from lightning.fabric.strategies.launchers.multiprocessing import _MultiProcessingLauncher from lightning.fabric.strategies.launchers.subprocess_script import _SubprocessScriptLauncher from lightning.fabric.strategies.parallel import ParallelStrategy +from lightning.fabric.strategies.registry import _StrategyRegistry from lightning.fabric.strategies.strategy import _BackwardSyncControl, TBroadcast from lightning.fabric.utilities.distributed import ( _distributed_available, @@ -160,7 +161,7 @@ def get_module_state_dict(self, module: Module) -> Dict[str, Union[Any, Tensor]] return super().get_module_state_dict(module) @classmethod - def register_strategies(cls, strategy_registry: Dict) -> None: + def register_strategies(cls, strategy_registry: _StrategyRegistry) -> None: entries = ( ("ddp", "popen"), ("ddp_spawn", "spawn"), diff --git a/src/lightning/fabric/strategies/deepspeed.py b/src/lightning/fabric/strategies/deepspeed.py index 34a0c68e775f3..7c3dc0ac6a230 100644 --- a/src/lightning/fabric/strategies/deepspeed.py +++ b/src/lightning/fabric/strategies/deepspeed.py @@ -30,6 +30,7 @@ from lightning.fabric.plugins.environments.cluster_environment import ClusterEnvironment from lightning.fabric.plugins.precision import Precision from lightning.fabric.strategies.ddp import DDPStrategy +from lightning.fabric.strategies.registry import _StrategyRegistry from lightning.fabric.strategies.strategy import _Sharded from lightning.fabric.utilities.distributed import log from lightning.fabric.utilities.rank_zero import rank_zero_info, rank_zero_only, rank_zero_warn @@ -507,7 +508,7 @@ def clip_gradients_value( ) @classmethod - def register_strategies(cls, strategy_registry: Dict) -> None: + def register_strategies(cls, strategy_registry: _StrategyRegistry) -> None: strategy_registry.register("deepspeed", cls, description="Default DeepSpeed Strategy") strategy_registry.register("deepspeed_stage_1", cls, description="DeepSpeed with ZeRO Stage 1 enabled", stage=1) strategy_registry.register("deepspeed_stage_2", cls, description="DeepSpeed with ZeRO Stage 2 enabled", stage=2) diff --git a/src/lightning/fabric/strategies/dp.py b/src/lightning/fabric/strategies/dp.py index 62401372ed510..47fdc24fe5018 100644 --- a/src/lightning/fabric/strategies/dp.py +++ b/src/lightning/fabric/strategies/dp.py @@ -21,6 +21,7 @@ from lightning.fabric.plugins.io.checkpoint_io import CheckpointIO from lightning.fabric.plugins.precision import Precision from lightning.fabric.strategies.parallel import ParallelStrategy +from lightning.fabric.strategies.registry import _StrategyRegistry from lightning.fabric.strategies.strategy import TBroadcast, TReduce from lightning.fabric.utilities.apply_func import apply_to_collection from lightning.fabric.utilities.distributed import ReduceOp @@ -89,5 +90,5 @@ def get_module_state_dict(self, module: Module) -> Dict[str, Union[Any, Tensor]] return super().get_module_state_dict(module) @classmethod - def register_strategies(cls, strategy_registry: Dict) -> None: + def register_strategies(cls, strategy_registry: _StrategyRegistry) -> None: strategy_registry.register("dp", cls, description=cls.__class__.__name__) diff --git a/src/lightning/fabric/strategies/fsdp.py b/src/lightning/fabric/strategies/fsdp.py index 84e0768752284..5dba3be620178 100644 --- a/src/lightning/fabric/strategies/fsdp.py +++ b/src/lightning/fabric/strategies/fsdp.py @@ -27,6 +27,7 @@ from lightning.fabric.plugins.precision.fsdp import FSDPPrecision from lightning.fabric.strategies.launchers.subprocess_script import _SubprocessScriptLauncher from lightning.fabric.strategies.parallel import ParallelStrategy +from lightning.fabric.strategies.registry import _StrategyRegistry from lightning.fabric.strategies.strategy import _BackwardSyncControl, _Sharded, TBroadcast from lightning.fabric.utilities.distributed import ( _distributed_available, @@ -274,7 +275,7 @@ def clip_gradients_value( # type: ignore[override] ) @classmethod - def register_strategies(cls, strategy_registry: Dict) -> None: + def register_strategies(cls, strategy_registry: _StrategyRegistry) -> None: if not _TORCH_GREATER_EQUAL_1_12 or not torch.distributed.is_available(): return diff --git a/src/lightning/fabric/strategies/registry.py b/src/lightning/fabric/strategies/registry.py index 9b53841d1ba93..92c0417062d10 100644 --- a/src/lightning/fabric/strategies/registry.py +++ b/src/lightning/fabric/strategies/registry.py @@ -15,7 +15,6 @@ from inspect import getmembers, isclass from typing import Any, Callable, Dict, List, Optional -from lightning.fabric.strategies.strategy import Strategy from lightning.fabric.utilities.registry import _is_register_method_overridden @@ -82,7 +81,7 @@ def do_register(strategy: Callable) -> Callable: return do_register - def get(self, name: str, default: Optional[Strategy] = None) -> Strategy: # type: ignore[override] + def get(self, name: str, default: Optional[Any] = None) -> Any: """Calls the registered strategy with the required parameters and returns the strategy object. Args: @@ -113,6 +112,8 @@ def __str__(self) -> str: def _call_register_strategies(registry: _StrategyRegistry, base_module: str) -> None: module = importlib.import_module(base_module) + from lightning.fabric.strategies.strategy import Strategy + for _, mod in getmembers(module, isclass): if issubclass(mod, Strategy) and _is_register_method_overridden(mod, Strategy, "register_strategies"): mod.register_strategies(registry) diff --git a/src/lightning/fabric/strategies/strategy.py b/src/lightning/fabric/strategies/strategy.py index afffaf21d0a42..db90f95bc34cd 100644 --- a/src/lightning/fabric/strategies/strategy.py +++ b/src/lightning/fabric/strategies/strategy.py @@ -27,6 +27,7 @@ from lightning.fabric.plugins.io.torch_io import TorchCheckpointIO from lightning.fabric.plugins.precision import Precision from lightning.fabric.strategies.launchers.launcher import _Launcher +from lightning.fabric.strategies.registry import _StrategyRegistry from lightning.fabric.utilities.apply_func import move_data_to_device from lightning.fabric.utilities.types import _PATH, _Stateful, Optimizable, ReduceOp @@ -327,7 +328,7 @@ def clip_gradients_value(self, module: torch.nn.Module, optimizer: Optimizer, cl return torch.nn.utils.clip_grad_value_(parameters, clip_value=clip_val) @classmethod - def register_strategies(cls, strategy_registry: Dict[str, Any]) -> None: + def register_strategies(cls, strategy_registry: _StrategyRegistry) -> None: pass def _err_msg_joint_setup_required(self) -> str: diff --git a/src/lightning/fabric/strategies/xla.py b/src/lightning/fabric/strategies/xla.py index 6f40ef4ada50c..adf66ce1ebb58 100644 --- a/src/lightning/fabric/strategies/xla.py +++ b/src/lightning/fabric/strategies/xla.py @@ -27,7 +27,7 @@ from lightning.fabric.plugins.io.checkpoint_io import CheckpointIO from lightning.fabric.plugins.io.xla import XLACheckpointIO from lightning.fabric.plugins.precision import Precision -from lightning.fabric.strategies import ParallelStrategy +from lightning.fabric.strategies import _StrategyRegistry, ParallelStrategy from lightning.fabric.strategies.launchers.xla import _XLALauncher from lightning.fabric.strategies.strategy import TBroadcast from lightning.fabric.utilities.data import has_len @@ -203,7 +203,7 @@ def remove_checkpoint(self, filepath: _PATH) -> None: self.checkpoint_io.remove_checkpoint(filepath) @classmethod - def register_strategies(cls, strategy_registry: Dict) -> None: + def register_strategies(cls, strategy_registry: _StrategyRegistry) -> None: strategy_registry.register("xla", cls, description=cls.__class__.__name__) def _set_world_ranks(self) -> None: diff --git a/src/lightning/pytorch/accelerators/cpu.py b/src/lightning/pytorch/accelerators/cpu.py index 51313f0f14529..6c1ce5186be6c 100644 --- a/src/lightning/pytorch/accelerators/cpu.py +++ b/src/lightning/pytorch/accelerators/cpu.py @@ -16,6 +16,7 @@ import torch from lightning_utilities.core.imports import RequirementCache +from lightning.fabric.accelerators import _AcceleratorRegistry from lightning.fabric.accelerators.cpu import _parse_cpu_cores from lightning.fabric.utilities.types import _DEVICE from lightning.pytorch.accelerators.accelerator import Accelerator @@ -64,11 +65,11 @@ def is_available() -> bool: return True @classmethod - def register_accelerators(cls, accelerator_registry: Dict) -> None: + def register_accelerators(cls, accelerator_registry: _AcceleratorRegistry) -> None: accelerator_registry.register( "cpu", cls, - description=f"{cls.__class__.__name__}", + description=cls.__class__.__name__, ) diff --git a/src/lightning/pytorch/accelerators/cuda.py b/src/lightning/pytorch/accelerators/cuda.py index 027296b92a57c..9b2d4031bc8ae 100644 --- a/src/lightning/pytorch/accelerators/cuda.py +++ b/src/lightning/pytorch/accelerators/cuda.py @@ -20,6 +20,7 @@ import torch import lightning.pytorch as pl +from lightning.fabric.accelerators import _AcceleratorRegistry from lightning.fabric.accelerators.cuda import _check_cuda_matmul_precision, _clear_cuda_memory, num_cuda_devices from lightning.fabric.utilities.device_parser import _parse_gpu_ids from lightning.fabric.utilities.types import _DEVICE @@ -94,7 +95,7 @@ def is_available() -> bool: return num_cuda_devices() > 0 @classmethod - def register_accelerators(cls, accelerator_registry: Dict) -> None: + def register_accelerators(cls, accelerator_registry: _AcceleratorRegistry) -> None: accelerator_registry.register( "cuda", cls, diff --git a/src/lightning/pytorch/accelerators/ipu.py b/src/lightning/pytorch/accelerators/ipu.py index 91223d007453f..7d31f8e2c32dc 100644 --- a/src/lightning/pytorch/accelerators/ipu.py +++ b/src/lightning/pytorch/accelerators/ipu.py @@ -16,6 +16,7 @@ import torch from lightning_utilities.core.imports import package_available +from lightning.fabric.accelerators import _AcceleratorRegistry from lightning.fabric.utilities.types import _DEVICE from lightning.pytorch.accelerators.accelerator import Accelerator @@ -68,7 +69,7 @@ def is_available() -> bool: return _IPU_AVAILABLE @classmethod - def register_accelerators(cls, accelerator_registry: Dict) -> None: + def register_accelerators(cls, accelerator_registry: _AcceleratorRegistry) -> None: accelerator_registry.register( "ipu", cls, diff --git a/src/lightning/pytorch/accelerators/mps.py b/src/lightning/pytorch/accelerators/mps.py index 88f071e46c912..91655d7d40d22 100644 --- a/src/lightning/pytorch/accelerators/mps.py +++ b/src/lightning/pytorch/accelerators/mps.py @@ -15,6 +15,7 @@ import torch +from lightning.fabric.accelerators import _AcceleratorRegistry from lightning.fabric.accelerators.mps import MPSAccelerator as _MPSAccelerator from lightning.fabric.utilities.device_parser import _parse_gpu_ids from lightning.fabric.utilities.types import _DEVICE @@ -70,7 +71,7 @@ def is_available() -> bool: return _MPSAccelerator.is_available() @classmethod - def register_accelerators(cls, accelerator_registry: Dict) -> None: + def register_accelerators(cls, accelerator_registry: _AcceleratorRegistry) -> None: accelerator_registry.register( "mps", cls, diff --git a/src/lightning/pytorch/strategies/ddp.py b/src/lightning/pytorch/strategies/ddp.py index 76aaffbb06697..7614ab21b8ad2 100644 --- a/src/lightning/pytorch/strategies/ddp.py +++ b/src/lightning/pytorch/strategies/ddp.py @@ -25,6 +25,7 @@ import lightning.pytorch as pl from lightning.fabric.plugins import CheckpointIO, ClusterEnvironment from lightning.fabric.plugins.collectives.torch_collective import default_pg_timeout +from lightning.fabric.strategies import _StrategyRegistry from lightning.fabric.utilities.distributed import ( _distributed_available, _get_default_process_group_backend_for_device, @@ -359,7 +360,7 @@ def post_training_step(self) -> None: self.model.require_backward_grad_sync = True # type: ignore[assignment] @classmethod - def register_strategies(cls, strategy_registry: Dict) -> None: + def register_strategies(cls, strategy_registry: _StrategyRegistry) -> None: entries = ( ("ddp", "popen"), ("ddp_spawn", "spawn"), diff --git a/src/lightning/pytorch/strategies/deepspeed.py b/src/lightning/pytorch/strategies/deepspeed.py index 19862c615d416..8d1eb2b278d08 100644 --- a/src/lightning/pytorch/strategies/deepspeed.py +++ b/src/lightning/pytorch/strategies/deepspeed.py @@ -29,6 +29,7 @@ import lightning.pytorch as pl from lightning.fabric.plugins import ClusterEnvironment +from lightning.fabric.strategies import _StrategyRegistry from lightning.fabric.strategies.deepspeed import _DEEPSPEED_AVAILABLE from lightning.fabric.utilities.optimizer import _optimizers_to_device from lightning.fabric.utilities.seed import reset_seed @@ -863,7 +864,7 @@ def load_optimizer_state_dict(self, checkpoint: Mapping[str, Any]) -> None: pass @classmethod - def register_strategies(cls, strategy_registry: Dict) -> None: + def register_strategies(cls, strategy_registry: _StrategyRegistry) -> None: strategy_registry.register("deepspeed", cls, description="Default DeepSpeed Strategy") strategy_registry.register("deepspeed_stage_1", cls, description="DeepSpeed with ZeRO Stage 1 enabled", stage=1) strategy_registry.register("deepspeed_stage_2", cls, description="DeepSpeed with ZeRO Stage 2 enabled", stage=2) diff --git a/src/lightning/pytorch/strategies/fsdp.py b/src/lightning/pytorch/strategies/fsdp.py index d99df72f76194..978342e1e029a 100644 --- a/src/lightning/pytorch/strategies/fsdp.py +++ b/src/lightning/pytorch/strategies/fsdp.py @@ -21,6 +21,7 @@ import lightning.pytorch as pl from lightning.fabric.plugins import CheckpointIO, ClusterEnvironment +from lightning.fabric.strategies import _StrategyRegistry from lightning.fabric.strategies.fsdp import ( _init_cpu_offload, _optimizer_has_flat_params, @@ -362,7 +363,7 @@ def get_registered_strategies(cls) -> List[str]: return cls._registered_strategies @classmethod - def register_strategies(cls, strategy_registry: Dict) -> None: + def register_strategies(cls, strategy_registry: _StrategyRegistry) -> None: if not _fsdp_available: return strategy_registry.register( diff --git a/src/lightning/pytorch/strategies/ipu.py b/src/lightning/pytorch/strategies/ipu.py index 255de4f88270f..e82c626172082 100644 --- a/src/lightning/pytorch/strategies/ipu.py +++ b/src/lightning/pytorch/strategies/ipu.py @@ -22,6 +22,7 @@ import lightning.pytorch as pl from lightning.fabric.plugins import CheckpointIO, ClusterEnvironment +from lightning.fabric.strategies import _StrategyRegistry from lightning.fabric.utilities.cloud_io import get_filesystem from lightning.pytorch.accelerators.ipu import _IPU_AVAILABLE, _POPTORCH_AVAILABLE from lightning.pytorch.overrides.base import _LightningModuleWrapperBase @@ -371,7 +372,7 @@ def broadcast(self, obj: TBroadcast, src: int = 0) -> TBroadcast: return obj @classmethod - def register_strategies(cls, strategy_registry: Dict) -> None: + def register_strategies(cls, strategy_registry: _StrategyRegistry) -> None: strategy_registry.register( cls.strategy_name, cls, diff --git a/src/lightning/pytorch/strategies/single_device.py b/src/lightning/pytorch/strategies/single_device.py index 9f875929ffd66..ee1a7abff440e 100644 --- a/src/lightning/pytorch/strategies/single_device.py +++ b/src/lightning/pytorch/strategies/single_device.py @@ -20,6 +20,7 @@ import lightning.pytorch as pl from lightning.fabric.plugins import CheckpointIO +from lightning.fabric.strategies import _StrategyRegistry from lightning.fabric.utilities.types import _DEVICE from lightning.pytorch.plugins.precision import PrecisionPlugin from lightning.pytorch.strategies.strategy import Strategy, TBroadcast @@ -84,9 +85,9 @@ def broadcast(self, obj: TBroadcast, src: int = 0) -> TBroadcast: return obj @classmethod - def register_strategies(cls, strategy_registry: dict) -> None: + def register_strategies(cls, strategy_registry: _StrategyRegistry) -> None: strategy_registry.register( cls.strategy_name, cls, - description=f"{cls.__class__.__name__}", + description=cls.__class__.__name__, ) diff --git a/src/lightning/pytorch/strategies/strategy.py b/src/lightning/pytorch/strategies/strategy.py index a6f536c45bbba..068af0bd51256 100644 --- a/src/lightning/pytorch/strategies/strategy.py +++ b/src/lightning/pytorch/strategies/strategy.py @@ -23,6 +23,7 @@ import lightning.pytorch as pl from lightning.fabric.plugins import CheckpointIO +from lightning.fabric.strategies import _StrategyRegistry from lightning.fabric.utilities import move_data_to_device from lightning.fabric.utilities.distributed import ReduceOp from lightning.fabric.utilities.optimizer import _optimizer_to_device, _optimizers_to_device @@ -479,7 +480,7 @@ def teardown(self) -> None: self.checkpoint_io.teardown() @classmethod - def register_strategies(cls, strategy_registry: Dict[str, Any]) -> None: + def register_strategies(cls, strategy_registry: _StrategyRegistry) -> None: pass def on_train_start(self) -> None: diff --git a/src/lightning/pytorch/strategies/xla.py b/src/lightning/pytorch/strategies/xla.py index 4db32cbefc948..92a3a432fc3d9 100644 --- a/src/lightning/pytorch/strategies/xla.py +++ b/src/lightning/pytorch/strategies/xla.py @@ -23,6 +23,7 @@ from lightning.fabric.accelerators.tpu import _XLA_AVAILABLE from lightning.fabric.plugins import CheckpointIO, XLACheckpointIO from lightning.fabric.plugins.environments import XLAEnvironment +from lightning.fabric.strategies import _StrategyRegistry from lightning.fabric.utilities.data import has_len from lightning.fabric.utilities.optimizer import _optimizers_to_device from lightning.fabric.utilities.types import _PATH, ReduceOp @@ -282,7 +283,7 @@ def teardown(self) -> None: os.environ.pop("PT_XLA_DEBUG", None) @classmethod - def register_strategies(cls, strategy_registry: Dict) -> None: + def register_strategies(cls, strategy_registry: _StrategyRegistry) -> None: strategy_registry.register("xla_debug", cls, description="XLA strategy with `debug` as True", debug=True) strategy_registry.register( cls.strategy_name, diff --git a/src/lightning/pytorch/trainer/connectors/accelerator_connector.py b/src/lightning/pytorch/trainer/connectors/accelerator_connector.py index a6431350a0292..7c4b3ab476ae8 100644 --- a/src/lightning/pytorch/trainer/connectors/accelerator_connector.py +++ b/src/lightning/pytorch/trainer/connectors/accelerator_connector.py @@ -476,8 +476,7 @@ def _init_strategy(self) -> None: if isinstance(self._strategy_flag, str): self.strategy = StrategyRegistry.get(self._strategy_flag) else: - # TODO(fabric): remove ignore after merging Fabric and PL strategies - self.strategy = self._strategy_flag # type: ignore[assignment] + self.strategy = self._strategy_flag def _check_and_init_precision(self) -> PrecisionPlugin: self._validate_precision_choice() @@ -506,7 +505,7 @@ def _check_and_init_precision(self) -> PrecisionPlugin: return ColossalAIPrecisionPlugin(self._precision_flag) if isinstance(self.strategy, DeepSpeedStrategy): - return DeepSpeedPrecisionPlugin(self._precision_flag) + return DeepSpeedPrecisionPlugin(self._precision_flag) # type: ignore[arg-type] if self._precision_flag == "32-true": return PrecisionPlugin() diff --git a/src/lightning/pytorch/trainer/trainer.py b/src/lightning/pytorch/trainer/trainer.py index 37750ec2ac3f0..f904e8f513817 100644 --- a/src/lightning/pytorch/trainer/trainer.py +++ b/src/lightning/pytorch/trainer/trainer.py @@ -1027,8 +1027,7 @@ def accelerator(self) -> Accelerator: @property def strategy(self) -> Strategy: - # TODO(fabric): remove ignore after merging Fabric and PL strategies - return self._accelerator_connector.strategy # type: ignore[return-value] + return self._accelerator_connector.strategy @property def precision_plugin(self) -> PrecisionPlugin: From 0ad5603d8f9a93b884b19be038892b503eeff207 Mon Sep 17 00:00:00 2001 From: Dingu Sagar Date: Thu, 27 Apr 2023 21:53:25 +0530 Subject: [PATCH 21/93] Adding doc strings for exceptions raised in trainer.py (#16684) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Co-authored-by: Dingu Sagar Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> Co-authored-by: Jirka Borovec <6035284+Borda@users.noreply.github.com> Co-authored-by: Adrian Wälchli Co-authored-by: Jirka (cherry picked from commit 56583fb668aefc02450e460c9ec1b97320393772) --- src/lightning/pytorch/trainer/trainer.py | 58 +++++++++++++++++++++++- 1 file changed, 57 insertions(+), 1 deletion(-) diff --git a/src/lightning/pytorch/trainer/trainer.py b/src/lightning/pytorch/trainer/trainer.py index f904e8f513817..1aad537e84a00 100644 --- a/src/lightning/pytorch/trainer/trainer.py +++ b/src/lightning/pytorch/trainer/trainer.py @@ -278,7 +278,6 @@ def __init__( :paramref:`~lightning.pytorch.trainer.trainer.Trainer.profiler`, :meth:`~lightning.pytorch.core.module.LightningModule.log`, :meth:`~lightning.pytorch.core.module.LightningModule.log_dict`. - plugins: Plugins allow modification of core behavior like ddp and amp, and enable custom lightning plugins. Default: ``None``. @@ -291,6 +290,14 @@ def __init__( default_root_dir: Default path for logs and weights when no logger/ckpt_callback passed. Default: ``os.getcwd()``. Can be remote file paths such as `s3://mybucket/path` or 'hdfs://path/' + + Raises: + TypeError: + If ``gradient_clip_val`` is not an int or float. + + MisconfigurationException: + If ``gradient_clip_algorithm`` is invalid. + If ``track_grad_norm`` is not a positive number or inf. """ super().__init__() log.debug(f"{self.__class__.__name__}: Initializing trainer with parameters: {locals()}") @@ -507,9 +514,16 @@ def fit( keywords ``"last"`` and ``"hpc"``. If there is no checkpoint file at the path, an exception is raised. If resuming from mid-epoch checkpoint, training will start from the beginning of the next epoch. + datamodule: An instance of :class:`~lightning.pytorch.core.datamodule.LightningDataModule`. datamodule: A :class:`~lightning.pytorch.core.datamodule.LightningDataModule` that defines the `:class:`~lightning.pytorch.core.hooks.DataHooks.train_dataloader` hook. + Raises: + TypeError: + If ``model`` is not :class:`~lightning.pytorch.core.module.LightningModule` for torch version less than + 2.0.0 and if ``model`` is not :class:`~lightning.pytorch.core.module.LightningModule` or + :class:`torch._dynamo.OptimizedModule` for torch versions greater than or equal to 2.0.0 . + For more information about multiple dataloaders, see this :ref:`section `. """ model = _maybe_unwrap_optimized(model) @@ -592,6 +606,17 @@ def validate( List of dictionaries with metrics logged during the validation phase, e.g., in model- or callback hooks like :meth:`~lightning.pytorch.LightningModule.validation_step` etc. The length of the list corresponds to the number of validation dataloaders used. + + Raises: + TypeError: + If no ``model`` is passed and there was no ``LightningModule`` passed in the previous run. + If ``model`` passed is not `LightningModule` or `torch._dynamo.OptimizedModule`. + + MisconfigurationException: + If both ``dataloaders`` and ``datamodule`` are passed. Pass only one of these. + + RuntimeError: + If a compiled ``model`` is passed and the strategy is not supported. """ if model is None: # do we still have a reference from a previous call? @@ -688,6 +713,17 @@ def test( List of dictionaries with metrics logged during the test phase, e.g., in model- or callback hooks like :meth:`~lightning.pytorch.LightningModule.test_step` etc. The length of the list corresponds to the number of test dataloaders used. + + Raises: + TypeError: + If no ``model`` is passed and there was no ``LightningModule`` passed in the previous run. + If ``model`` passed is not `LightningModule` or `torch._dynamo.OptimizedModule`. + + MisconfigurationException: + If both ``dataloaders`` and ``datamodule`` are passed. Pass only one of these. + + RuntimeError: + If a compiled ``model`` is passed and the strategy is not supported. """ if model is None: # do we still have a reference from a previous call? @@ -784,6 +820,17 @@ def predict( Returns: Returns a list of dictionaries, one for each provided dataloader containing their respective predictions. + Raises: + TypeError: + If no ``model`` is passed and there was no ``LightningModule`` passed in the previous run. + If ``model`` passed is not `LightningModule` or `torch._dynamo.OptimizedModule`. + + MisconfigurationException: + If both ``dataloaders`` and ``datamodule`` are passed. Pass only one of these. + + RuntimeError: + If a compiled ``model`` is passed and the strategy is not supported. + See :ref:`Lightning inference section` for more. """ if model is None: @@ -1243,6 +1290,10 @@ def save_checkpoint( filepath: Path where checkpoint is saved. weights_only: If ``True``, will only save the model weights. storage_options: parameter for how to save to storage, passed to ``CheckpointIO`` plugin + + Raises: + AttributeError: + If the model is not attached to the Trainer before calling this method. """ if self.model is None: raise AttributeError( @@ -1531,6 +1582,11 @@ def configure_optimizers(self): stepping_batches = self.trainer.estimated_stepping_batches scheduler = torch.optim.lr_scheduler.OneCycleLR(optimizer, max_lr=1e-3, total_steps=stepping_batches) return [optimizer], [scheduler] + + Raises: + MisconfigurationException: + If estimated stepping batches cannot be computed due to different `accumulate_grad_batches` + at different epochs. """ # infinite training if self.max_epochs == -1: From 634ca4d620173b8248aa15d54b5b786c01e038d0 Mon Sep 17 00:00:00 2001 From: AleksanderWWW Date: Thu, 27 Apr 2023 18:52:44 +0200 Subject: [PATCH 22/93] upload checkpoint files to neptune from stream (#17430) (cherry picked from commit ef9df059893b2377df547222977eb5a9e36e4eae) --- src/lightning/pytorch/CHANGELOG.md | 3 ++- src/lightning/pytorch/loggers/neptune.py | 9 +++++--- tests/tests_pytorch/loggers/test_all.py | 1 + tests/tests_pytorch/loggers/test_neptune.py | 25 +++++++++++---------- 4 files changed, 22 insertions(+), 16 deletions(-) diff --git a/src/lightning/pytorch/CHANGELOG.md b/src/lightning/pytorch/CHANGELOG.md index a8c7a10ae7c16..c7d286e1ed032 100644 --- a/src/lightning/pytorch/CHANGELOG.md +++ b/src/lightning/pytorch/CHANGELOG.md @@ -14,7 +14,8 @@ The format is based on [Keep a Changelog](http://keepachangelog.com/en/1.0.0/). ### Fixed -- +- Fixed a potential bug with uploading model checkpoints to Neptune.ai by uploading files from stream ([#17430](https://github.com/Lightning-AI/lightning/pull/17430)) + ## [2.0.2] - 2023-04-24 diff --git a/src/lightning/pytorch/loggers/neptune.py b/src/lightning/pytorch/loggers/neptune.py index 4d2a1c156b747..c452daac22d52 100644 --- a/src/lightning/pytorch/loggers/neptune.py +++ b/src/lightning/pytorch/loggers/neptune.py @@ -38,9 +38,10 @@ if _NEPTUNE_AVAILABLE: from neptune import new as neptune from neptune.new.run import Run + from neptune.new.types import File else: # needed for test mocks, and function signatures - neptune, Run = None, None + neptune, Run, File = None, None, None log = logging.getLogger(__name__) @@ -466,7 +467,8 @@ def after_save_checkpoint(self, checkpoint_callback: Checkpoint) -> None: if hasattr(checkpoint_callback, "last_model_path") and checkpoint_callback.last_model_path: model_last_name = self._get_full_model_name(checkpoint_callback.last_model_path, checkpoint_callback) file_names.add(model_last_name) - self.run[f"{checkpoints_namespace}/{model_last_name}"].upload(checkpoint_callback.last_model_path) + with open(checkpoint_callback.last_model_path, "rb") as fp: + self.run[f"{checkpoints_namespace}/{model_last_name}"] = File.from_stream(fp) # save best k models if hasattr(checkpoint_callback, "best_k_models"): @@ -481,7 +483,8 @@ def after_save_checkpoint(self, checkpoint_callback: Checkpoint) -> None: model_name = self._get_full_model_name(checkpoint_callback.best_model_path, checkpoint_callback) file_names.add(model_name) - self.run[f"{checkpoints_namespace}/{model_name}"].upload(checkpoint_callback.best_model_path) + with open(checkpoint_callback.best_model_path, "rb") as fp: + self.run[f"{checkpoints_namespace}/{model_name}"] = File.from_stream(fp) # remove old models logged to experiment if they are not part of best k models at this point if self.run.exists(checkpoints_namespace): diff --git a/tests/tests_pytorch/loggers/test_all.py b/tests/tests_pytorch/loggers/test_all.py index 9ea18f533db00..537c0c8c0c3a4 100644 --- a/tests/tests_pytorch/loggers/test_all.py +++ b/tests/tests_pytorch/loggers/test_all.py @@ -46,6 +46,7 @@ mock.patch("lightning.pytorch.loggers.mlflow.Metric"), mock.patch("lightning.pytorch.loggers.neptune.neptune", new_callable=create_neptune_mock), mock.patch("lightning.pytorch.loggers.neptune._NEPTUNE_AVAILABLE", return_value=True), + mock.patch("lightning.pytorch.loggers.neptune.File", new=mock.Mock()), mock.patch("lightning.pytorch.loggers.wandb.wandb"), mock.patch("lightning.pytorch.loggers.wandb.Run", new=mock.Mock), ) diff --git a/tests/tests_pytorch/loggers/test_neptune.py b/tests/tests_pytorch/loggers/test_neptune.py index 6d24d547c36e8..94f54a6df3ff1 100644 --- a/tests/tests_pytorch/loggers/test_neptune.py +++ b/tests/tests_pytorch/loggers/test_neptune.py @@ -190,6 +190,7 @@ def _fit_and_test(self, logger, model): assert trainer.log_dir == os.path.join(os.getcwd(), ".neptune") @pytest.mark.usefixtures("tmpdir_unittest_fixture") + @patch("lightning.pytorch.loggers.neptune.File", new=mock.Mock()) def test_neptune_leave_open_experiment_after_fit(self, neptune): """Verify that neptune experiment was NOT closed after training.""" # given @@ -205,6 +206,7 @@ def test_neptune_leave_open_experiment_after_fit(self, neptune): assert run_instance_mock.stop.call_count == 0 @pytest.mark.usefixtures("tmpdir_unittest_fixture") + @patch("lightning.pytorch.loggers.neptune.File", new=mock.Mock()) def test_neptune_log_metrics_on_trained_model(self, neptune): """Verify that trained models do log data.""" @@ -293,6 +295,7 @@ def test_log_model_summary(self, neptune): self.assertEqual(run_instance_mock.__getitem__.call_count, 0) run_instance_mock.__setitem__.assert_called_once_with(model_summary_key, file_from_content_mock) + @patch("builtins.open", mock.mock_open(read_data="test")) def test_after_save_checkpoint(self, neptune): test_variants = [ ({}, "training/model"), @@ -317,26 +320,24 @@ def test_after_save_checkpoint(self, neptune): best_model_score=None, ) - # when: save checkpoint - logger.after_save_checkpoint(cb_mock) + with patch("lightning.pytorch.loggers.neptune.File", side_effect=mock.Mock()) as mock_file: + # when: save checkpoint + logger.after_save_checkpoint(cb_mock) # then: - self.assertEqual(run_instance_mock.__setitem__.call_count, 1) - self.assertEqual(run_instance_mock.__getitem__.call_count, 4) - self.assertEqual(run_attr_mock.upload.call_count, 4) - run_instance_mock.__setitem__.assert_called_once_with( - f"{model_key_prefix}/best_model_path", os.path.join(models_root_dir, "best_model") - ) - run_instance_mock.__getitem__.assert_any_call(f"{model_key_prefix}/checkpoints/last") + self.assertEqual(run_instance_mock.__setitem__.call_count, 3) + self.assertEqual(run_instance_mock.__getitem__.call_count, 2) + self.assertEqual(run_attr_mock.upload.call_count, 2) + + self.assertEqual(mock_file.from_stream.call_count, 2) + run_instance_mock.__getitem__.assert_any_call(f"{model_key_prefix}/checkpoints/model1") run_instance_mock.__getitem__.assert_any_call(f"{model_key_prefix}/checkpoints/model2/with/slashes") - run_instance_mock.__getitem__.assert_any_call(f"{model_key_prefix}/checkpoints/best_model") + run_attr_mock.upload.assert_has_calls( [ - call(os.path.join(models_root_dir, "last")), call(os.path.join(models_root_dir, "model1")), call(os.path.join(models_root_dir, "model2/with/slashes")), - call(os.path.join(models_root_dir, "best_model")), ] ) From 942619e3130177a1766937c438f5d981baa37460 Mon Sep 17 00:00:00 2001 From: Jirka B Date: Sat, 20 May 2023 13:32:23 -0400 Subject: [PATCH 23/93] ci: LIGHTING_TESTING --- .github/workflows/ci-examples-app.yml | 1 + .github/workflows/ci-tests-app.yml | 1 + 2 files changed, 2 insertions(+) diff --git a/.github/workflows/ci-examples-app.yml b/.github/workflows/ci-examples-app.yml index f8941add768c9..d16c5d3750f0a 100644 --- a/.github/workflows/ci-examples-app.yml +++ b/.github/workflows/ci-examples-app.yml @@ -52,6 +52,7 @@ jobs: FREEZE_REQUIREMENTS: 1 TORCH_URL: "https://download.pytorch.org/whl/cpu/torch_stable.html" PYPI_CACHE_DIR: "_pip-wheels" + LIGHTING_TESTING: "1" steps: - uses: actions/checkout@v3 - name: Set up Python ${{ matrix.python-version }} diff --git a/.github/workflows/ci-tests-app.yml b/.github/workflows/ci-tests-app.yml index 4342044474c0e..57b127dacc264 100644 --- a/.github/workflows/ci-tests-app.yml +++ b/.github/workflows/ci-tests-app.yml @@ -52,6 +52,7 @@ jobs: PACKAGE_NAME: ${{ matrix.pkg-name }} FREEZE_REQUIREMENTS: ${{ ! (github.ref == 'refs/heads/master' || startsWith(github.ref, 'refs/heads/release/')) }} PYPI_CACHE_DIR: "_pip-wheels" + LIGHTING_TESTING: "1" steps: - uses: actions/checkout@v3 - name: Set up Python ${{ matrix.python-version }} From e640a93ee1f4bd3a9055ff6b5b1b7cba51ec8b98 Mon Sep 17 00:00:00 2001 From: Jirka Date: Thu, 1 Jun 2023 16:28:39 +0200 Subject: [PATCH 24/93] tests: xfail for `quote_from_bytes` --- tests/tests_app/runners/test_cloud.py | 15 +++++++++++++++ 1 file changed, 15 insertions(+) diff --git a/tests/tests_app/runners/test_cloud.py b/tests/tests_app/runners/test_cloud.py index d18cd27a1c000..682fcc2d40c44 100644 --- a/tests/tests_app/runners/test_cloud.py +++ b/tests/tests_app/runners/test_cloud.py @@ -177,6 +177,7 @@ def test_run_on_deleted_cluster(self, cloud_backend): ("litng-ai-03", None), ], ) + @pytest.mark.xfail(TypeError, reason="quote_from_bytes() expected bytes", strict=True) # FixMe def test_new_instance_on_different_cluster(self, tmpdir, cloud_backend, project_id, old_cluster, new_cluster): entrypoint = Path(tmpdir) / "entrypoint.py" entrypoint.touch() @@ -250,6 +251,7 @@ def test_new_instance_on_different_cluster(self, tmpdir, cloud_backend, project_ assert args[1]["body"].name.startswith(app_name) assert args[1]["body"].cluster_id == new_cluster + @pytest.mark.xfail(TypeError, reason="quote_from_bytes() expected bytes", strict=True) # FixMe def test_running_deleted_app(self, tmpdir, cloud_backend, project_id): """Deleted apps show up in list apps but not in list instances. @@ -314,6 +316,7 @@ def test_running_deleted_app(self, tmpdir, cloud_backend, project_id): assert args[1]["body"].name != app_name assert args[1]["body"].name.startswith(app_name) + @pytest.mark.xfail(TypeError, reason="quote_from_bytes() expected bytes", strict=True) # FixMe @pytest.mark.parametrize("flow_cloud_compute", [None, CloudCompute(name="t2.medium")]) @mock.patch("lightning.app.runners.backends.cloud.LightningClient", mock.MagicMock()) def test_run_with_default_flow_compute_config(self, tmpdir, monkeypatch, flow_cloud_compute): @@ -359,6 +362,7 @@ def test_run_with_default_flow_compute_config(self, tmpdir, monkeypatch, flow_cl project_id="test-project-id", cloudspace_id=mock.ANY, body=body ) + @pytest.mark.xfail(TypeError, reason="quote_from_bytes() expected bytes", strict=True) # FixMe @mock.patch("lightning.app.runners.backends.cloud.LightningClient", mock.MagicMock()) def test_run_on_byoc_cluster(self, tmpdir, monkeypatch): entrypoint = Path(tmpdir) / "entrypoint.py" @@ -408,6 +412,7 @@ def test_run_on_byoc_cluster(self, tmpdir, monkeypatch): body=ProjectIdProjectclustersbindingsBody(cluster_id="test1234"), ) + @pytest.mark.xfail(TypeError, reason="quote_from_bytes() expected bytes", strict=True) # FixMe @mock.patch("lightning.app.runners.backends.cloud.LightningClient", mock.MagicMock()) def test_requirements_file(self, tmpdir, monkeypatch): entrypoint = Path(tmpdir) / "entrypoint.py" @@ -466,6 +471,7 @@ def test_requirements_file(self, tmpdir, monkeypatch): project_id="test-project-id", cloudspace_id=mock.ANY, body=body ) + @pytest.mark.xfail(TypeError, reason="quote_from_bytes() expected bytes", strict=True) # FixMe @mock.patch("lightning.app.runners.backends.cloud.LightningClient", mock.MagicMock()) def test_basic_auth_enabled(self, tmpdir, monkeypatch): entrypoint = Path(tmpdir) / "entrypoint.py" @@ -526,6 +532,7 @@ def test_basic_auth_enabled(self, tmpdir, monkeypatch): ), ) + @pytest.mark.xfail(TypeError, reason="quote_from_bytes() expected bytes", strict=True) # FixMe @mock.patch("lightning.app.runners.backends.cloud.LightningClient", mock.MagicMock()) def test_no_cache(self, tmpdir, monkeypatch): entrypoint = Path(tmpdir) / "entrypoint.py" @@ -575,6 +582,7 @@ def test_no_cache(self, tmpdir, monkeypatch): body = kwargs["body"] assert body.dependency_cache_key is None + @pytest.mark.xfail(TypeError, reason="quote_from_bytes() expected bytes", strict=True) # FixMe @mock.patch("lightning.app.runners.backends.cloud.LightningClient", mock.MagicMock()) @pytest.mark.parametrize( "lightningapps,start_with_flow", @@ -700,6 +708,7 @@ def test_call_with_work_app(self, lightningapps, start_with_flow, monkeypatch, t project_id="test-project-id", cloudspace_id=mock.ANY, id=mock.ANY, body=mock.ANY ) + @pytest.mark.xfail(TypeError, reason="quote_from_bytes() expected bytes", strict=True) # FixMe @mock.patch("lightning.app.runners.backends.cloud.LightningClient", mock.MagicMock()) @pytest.mark.parametrize("lightningapps", [[], [MagicMock()]]) def test_call_with_queue_server_type_specified(self, tmpdir, lightningapps, monkeypatch): @@ -755,6 +764,7 @@ def test_call_with_queue_server_type_specified(self, tmpdir, lightningapps, monk project_id="test-project-id", cloudspace_id=mock.ANY, id=mock.ANY, body=body ) + @pytest.mark.xfail(TypeError, reason="quote_from_bytes() expected bytes", strict=True) # FixMe @mock.patch("lightning.app.runners.backends.cloud.LightningClient", mock.MagicMock()) @pytest.mark.parametrize("lightningapps", [[], [MagicMock()]]) def test_call_with_work_app_and_attached_drives(self, lightningapps, monkeypatch, tmpdir): @@ -900,6 +910,7 @@ def test_call_with_work_app_and_attached_drives(self, lightningapps, monkeypatch project_id="test-project-id", cloudspace_id=mock.ANY, id=mock.ANY, body=mock.ANY ) + @pytest.mark.xfail(TypeError, reason="quote_from_bytes() expected bytes", strict=True) # FixMe @mock.patch("lightning.app.runners.backends.cloud.LightningClient", mock.MagicMock()) @mock.patch("lightning.app.core.constants.ENABLE_APP_COMMENT_COMMAND_EXECUTION", True) @pytest.mark.parametrize("lightningapps", [[], [MagicMock()]]) @@ -1029,6 +1040,7 @@ def test_call_with_work_app_and_app_comment_command_execution_set(self, lightnin ), ) + @pytest.mark.xfail(TypeError, reason="quote_from_bytes() expected bytes", strict=True) # FixMe @mock.patch("lightning.app.runners.backends.cloud.LightningClient", mock.MagicMock()) @pytest.mark.parametrize("lightningapps", [[], [MagicMock()]]) def test_call_with_work_app_and_multiple_attached_drives(self, lightningapps, monkeypatch, tmpdir): @@ -1245,6 +1257,7 @@ def test_call_with_work_app_and_multiple_attached_drives(self, lightningapps, mo project_id="test-project-id", cloudspace_id=mock.ANY, id=mock.ANY, body=mock.ANY ) + @pytest.mark.xfail(TypeError, reason="quote_from_bytes() expected bytes", strict=True) # FixMe @mock.patch("lightning.app.runners.backends.cloud.LightningClient", mock.MagicMock()) @pytest.mark.parametrize("lightningapps", [[], [MagicMock()]]) def test_call_with_work_app_and_attached_mount_and_drive(self, lightningapps, monkeypatch, tmpdir): @@ -1947,6 +1960,7 @@ def run(self): CloudRuntime(app=app)._validate_work_build_specs_and_compute() +@pytest.mark.xfail(TypeError, reason="quote_from_bytes() expected bytes", strict=True) # FixMe def test_programmatic_lightningignore(monkeypatch, caplog, tmpdir): path = Path(tmpdir) entrypoint = path / "entrypoint.py" @@ -2018,6 +2032,7 @@ def run(self): flow.run() +@pytest.mark.xfail(TypeError, reason="quote_from_bytes() expected bytes", strict=True) # FixMe def test_default_lightningignore(monkeypatch, caplog, tmpdir): path = Path(tmpdir) entrypoint = path / "entrypoint.py" From 06e52cf1d44ce19797ec31519262ac6ab9774aad Mon Sep 17 00:00:00 2001 From: Jirka Borovec <6035284+borda@users.noreply.github.com> Date: Mon, 29 May 2023 21:34:04 +0200 Subject: [PATCH 25/93] replace local adjustment script with external (#17582) (cherry picked from commit 51b0e81105c4c267f8ef86a2e862bf3e410990fb) --- .azure/gpu-tests-fabric.yml | 4 +- .azure/gpu-tests-pytorch.yml | 4 +- .github/workflows/ci-tests-fabric.yml | 4 +- .github/workflows/ci-tests-pytorch.yml | 4 +- .github/workflows/tpu-tests.yml | 1 - dockers/base-cuda/Dockerfile | 4 +- dockers/base-ipu/Dockerfile | 4 +- dockers/base-xla/Dockerfile | 4 +- dockers/base-xla/tpu_workflow_fabric.jsonnet | 4 +- dockers/base-xla/tpu_workflow_pytorch.jsonnet | 4 +- requirements/pytorch/adjust-versions.py | 63 ------------------- 11 files changed, 27 insertions(+), 73 deletions(-) delete mode 100644 requirements/pytorch/adjust-versions.py diff --git a/.azure/gpu-tests-fabric.yml b/.azure/gpu-tests-fabric.yml index c4efcf3383520..59e4a2ad94565 100644 --- a/.azure/gpu-tests-fabric.yml +++ b/.azure/gpu-tests-fabric.yml @@ -91,8 +91,10 @@ jobs: - bash: | PYTORCH_VERSION=$(python -c "import torch; print(torch.__version__.split('+')[0])") + pip install -q wget packaging + python -m wget https://raw.githubusercontent.com/Lightning-AI/utilities/main/scripts/adjust-torch-versions.py for fpath in `ls requirements/**/*.txt`; do \ - python ./requirements/pytorch/adjust-versions.py $fpath ${PYTORCH_VERSION}; \ + python ./adjust-torch-versions.py $fpath ${PYTORCH_VERSION}; \ done displayName: 'Adjust dependencies' diff --git a/.azure/gpu-tests-pytorch.yml b/.azure/gpu-tests-pytorch.yml index 5df5dad4b4179..f5d44acefc00d 100644 --- a/.azure/gpu-tests-pytorch.yml +++ b/.azure/gpu-tests-pytorch.yml @@ -98,8 +98,10 @@ jobs: - bash: | PYTORCH_VERSION=$(python -c "import torch; print(torch.__version__.split('+')[0])") + pip install -q wget packaging + python -m wget https://raw.githubusercontent.com/Lightning-AI/utilities/main/scripts/adjust-torch-versions.py for fpath in `ls requirements/**/*.txt`; do \ - python ./requirements/pytorch/adjust-versions.py $fpath ${PYTORCH_VERSION}; \ + python ./adjust-torch-versions.py $fpath ${PYTORCH_VERSION}; \ done # prune packages with installation issues pip install -q -r .actions/requirements.txt diff --git a/.github/workflows/ci-tests-fabric.yml b/.github/workflows/ci-tests-fabric.yml index 8db2de1f8afc6..0b8c562f6acef 100644 --- a/.github/workflows/ci-tests-fabric.yml +++ b/.github/workflows/ci-tests-fabric.yml @@ -84,8 +84,10 @@ jobs: - name: Adjust PyTorch versions in requirements files if: ${{ matrix.requires != 'oldest' && matrix.release != 'pre' }} run: | + pip install -q wget packaging + python -m wget https://raw.githubusercontent.com/Lightning-AI/utilities/main/scripts/adjust-torch-versions.py for fpath in `ls requirements/**/*.txt`; do \ - python ./requirements/pytorch/adjust-versions.py $fpath ${{ matrix.pytorch-version }}; \ + python ./adjust-torch-versions.py $fpath ${{ matrix.pytorch-version }}; \ done cat requirements/fabric/base.txt diff --git a/.github/workflows/ci-tests-pytorch.yml b/.github/workflows/ci-tests-pytorch.yml index 34de1b14f649b..089de252f96e5 100644 --- a/.github/workflows/ci-tests-pytorch.yml +++ b/.github/workflows/ci-tests-pytorch.yml @@ -91,8 +91,10 @@ jobs: - name: Adjust PyTorch versions in requirements files if: ${{ matrix.requires != 'oldest' && matrix.release != 'pre' }} run: | + pip install -q wget packaging + python -m wget https://raw.githubusercontent.com/Lightning-AI/utilities/main/scripts/adjust-torch-versions.py for fpath in `ls requirements/**/*.txt`; do \ - python ./requirements/pytorch/adjust-versions.py $fpath ${{ matrix.pytorch-version }}; \ + python ./adjust-torch-versions.py $fpath ${{ matrix.pytorch-version }}; \ done cat requirements/pytorch/base.txt diff --git a/.github/workflows/tpu-tests.yml b/.github/workflows/tpu-tests.yml index 85912db076ee6..3bdfbcb8d7541 100644 --- a/.github/workflows/tpu-tests.yml +++ b/.github/workflows/tpu-tests.yml @@ -142,7 +142,6 @@ jobs: - name: Upload coverage to Codecov uses: codecov/codecov-action@v3 - # see: https://github.com/actions/toolkit/issues/399 continue-on-error: true with: token: ${{ secrets.CODECOV_TOKEN }} diff --git a/dockers/base-cuda/Dockerfile b/dockers/base-cuda/Dockerfile index 6c83762fad9ca..072728b954a76 100644 --- a/dockers/base-cuda/Dockerfile +++ b/dockers/base-cuda/Dockerfile @@ -88,8 +88,10 @@ RUN \ # Disable cache \ pip config set global.cache-dir false && \ # set particular PyTorch version \ + pip install -q wget packaging && \ + python -m wget https://raw.githubusercontent.com/Lightning-AI/utilities/main/scripts/adjust-torch-versions.py && \ for fpath in `ls requirements/**/*.txt`; do \ - python ./requirements/pytorch/adjust-versions.py $fpath ${PYTORCH_VERSION}; \ + python ./adjust-torch-versions.py $fpath ${PYTORCH_VERSION}; \ done && \ CUDA_VERSION_MM=${CUDA_VERSION%.*} && \ pip install \ diff --git a/dockers/base-ipu/Dockerfile b/dockers/base-ipu/Dockerfile index 1d26798360b66..2132b7bacdc83 100644 --- a/dockers/base-ipu/Dockerfile +++ b/dockers/base-ipu/Dockerfile @@ -85,8 +85,10 @@ RUN \ python --version && \ cd lightning && \ # set particular PyTorch version \ + pip install -q wget packaging && \ + python -m wget https://raw.githubusercontent.com/Lightning-AI/utilities/main/scripts/adjust-torch-versions.py && \ for fpath in `ls requirements/**/*.txt`; do \ - python requirements/pytorch/adjust-versions.py $fpath ${PYTORCH_VERSION}; \ + python adjust-torch-versions.py $fpath ${PYTORCH_VERSION}; \ done && \ # install PL dependencies pip install --requirement requirements/pytorch/devel.txt && \ diff --git a/dockers/base-xla/Dockerfile b/dockers/base-xla/Dockerfile index fd035a68b52e9..081a9dd60121f 100644 --- a/dockers/base-xla/Dockerfile +++ b/dockers/base-xla/Dockerfile @@ -89,8 +89,10 @@ RUN \ # https://github.com/pytorch/xla/issues/1666 pip install mkl==2021.4.0 && \ # set particular PyTorch version \ + pip install -q wget packaging && \ + python -m wget https://raw.githubusercontent.com/Lightning-AI/utilities/main/scripts/adjust-torch-versions.py && \ for fpath in `ls requirements/**/*.txt`; do \ - python requirements/pytorch/adjust-versions.py $fpath ${XLA_VERSION}; \ + python adjust-torch-versions.py $fpath ${XLA_VERSION}; \ done && \ # install PL dependencies pip install --requirement requirements/pytorch/devel.txt && \ diff --git a/dockers/base-xla/tpu_workflow_fabric.jsonnet b/dockers/base-xla/tpu_workflow_fabric.jsonnet index e087329916085..9bc3988b42a33 100644 --- a/dockers/base-xla/tpu_workflow_fabric.jsonnet +++ b/dockers/base-xla/tpu_workflow_fabric.jsonnet @@ -39,8 +39,10 @@ local tputests = base.BaseTest { echo "--- Install packages ---" # set particular PyTorch version + pip install -q wget packaging && \ + python -m wget https://raw.githubusercontent.com/Lightning-AI/utilities/main/scripts/adjust-torch-versions.py && \ for fpath in `ls requirements/**/*.txt`; do - python requirements/pytorch/adjust-versions.py $fpath {PYTORCH_VERSION}; + python adjust-torch-versions.py $fpath {PYTORCH_VERSION}; done PACKAGE_NAME=fabric pip install .[dev] pip list diff --git a/dockers/base-xla/tpu_workflow_pytorch.jsonnet b/dockers/base-xla/tpu_workflow_pytorch.jsonnet index cfc4d14cf9e11..4f033f5b30497 100644 --- a/dockers/base-xla/tpu_workflow_pytorch.jsonnet +++ b/dockers/base-xla/tpu_workflow_pytorch.jsonnet @@ -39,8 +39,10 @@ local tputests = base.BaseTest { echo "--- Install packages ---" # set particular PyTorch version + pip install -q wget packaging && \ + python -m wget https://raw.githubusercontent.com/Lightning-AI/utilities/main/scripts/adjust-torch-versions.py && \ for fpath in `ls requirements/**/*.txt`; do - python requirements/pytorch/adjust-versions.py $fpath {PYTORCH_VERSION}; + python adjust-torch-versions.py $fpath {PYTORCH_VERSION}; done PACKAGE_NAME=pytorch pip install .[extra,test] pip list diff --git a/requirements/pytorch/adjust-versions.py b/requirements/pytorch/adjust-versions.py deleted file mode 100644 index d518d511b5c12..0000000000000 --- a/requirements/pytorch/adjust-versions.py +++ /dev/null @@ -1,63 +0,0 @@ -import os -import re -import sys -from typing import Dict, Optional - -# IMPORTANT: this list needs to be sorted in reverse -VERSIONS = [ - {"torch": "2.1.0", "torchvision": "0.16.0"}, # nightly - {"torch": "2.0.0", "torchvision": "0.15.1"}, # stable - {"torch": "1.13.1", "torchvision": "0.14.1"}, - {"torch": "1.13.0", "torchvision": "0.14.0"}, - {"torch": "1.12.1", "torchvision": "0.13.1"}, - {"torch": "1.12.0", "torchvision": "0.13.0"}, - {"torch": "1.11.0", "torchvision": "0.12.0"}, -] - - -def find_latest(ver: str) -> Dict[str, str]: - # drop all except semantic version - ver = re.search(r"([\.\d]+)", ver).groups()[0] - # in case there remaining dot at the end - e.g "1.9.0.dev20210504" - ver = ver[:-1] if ver[-1] == "." else ver - print(f"finding ecosystem versions for: {ver}") - - # find first match - for option in VERSIONS: - if option["torch"].startswith(ver): - return option - - raise ValueError(f"Missing {ver} in {VERSIONS}") - - -def replace(req: str, torch_version: Optional[str] = None) -> str: - if not torch_version: - import torch - - torch_version = torch.__version__ - assert torch_version, f"invalid torch: {torch_version}" - - # remove comments and strip whitespace - req = re.sub(rf"\s*#.*{os.linesep}", os.linesep, req).strip() - - latest = find_latest(torch_version) - for lib, version in latest.items(): - replace = f"{lib}=={version}" if version else "" - req = re.sub(rf"\b{lib}(?!\w).*", replace, req) - - return req - - -if __name__ == "__main__": - if len(sys.argv) == 3: - requirements_path, torch_version = sys.argv[1:] - else: - requirements_path, torch_version = sys.argv[1], None - print(f"requirements_path='{requirements_path}' with torch_version='{torch_version}'") - - with open(requirements_path) as fp: - requirements = fp.read() - requirements = replace(requirements, torch_version) - print(requirements) # on purpose - to debug - with open(requirements_path, "w") as fp: - fp.write(requirements) From 25585eae2d88176f707d4b7f574cefa40699036c Mon Sep 17 00:00:00 2001 From: Jirka Date: Thu, 1 Jun 2023 17:11:15 +0200 Subject: [PATCH 26/93] ci: skip flagship tests on PR --- .github/workflows/ci-flagship-apps.yml | 15 ++++++++------- 1 file changed, 8 insertions(+), 7 deletions(-) diff --git a/.github/workflows/ci-flagship-apps.yml b/.github/workflows/ci-flagship-apps.yml index a21853ca47ddb..34230a53748ce 100644 --- a/.github/workflows/ci-flagship-apps.yml +++ b/.github/workflows/ci-flagship-apps.yml @@ -4,13 +4,14 @@ name: Test App - flagships on: push: branches: ["release/*"] - pull_request: - branches: [master, "release/*"] - types: [opened, reopened, ready_for_review, synchronize] - paths: - - ".github/workflows/ci-flagship-apps.yml" - - "github/actions/prep-apps/action.yml" - - "tests/integrations_app/flagship/**" + # todo: skip this as we are not validation them ATM + #pull_request: + # branches: [master, "release/*"] + # types: [opened, reopened, ready_for_review, synchronize] + # paths: + # - ".github/workflows/ci-flagship-apps.yml" + # - "github/actions/prep-apps/action.yml" + # - "tests/integrations_app/flagship/**" schedule: # on Sundays - cron: "0 0 * * 0" From 782a2f39e36ccdc22b247abe6b44e90df1a6a841 Mon Sep 17 00:00:00 2001 From: Jirka Date: Thu, 1 Jun 2023 18:14:44 +0200 Subject: [PATCH 27/93] docker: NGC prune git --- dockers/nvidia/Dockerfile | 2 ++ 1 file changed, 2 insertions(+) diff --git a/dockers/nvidia/Dockerfile b/dockers/nvidia/Dockerfile index cb76595f3eac7..7cf0f9297e006 100644 --- a/dockers/nvidia/Dockerfile +++ b/dockers/nvidia/Dockerfile @@ -38,6 +38,8 @@ RUN \ fi && \ # save the examples \ ls -lh lightning/ && \ + rm -rf lightning/.git && \ + rm -rf lightning/_notebooks/.git && \ mv lightning/_notebooks/.notebooks/ notebooks && \ cp -r lightning/*examples . && \ From 50ac69c085b403dc89017e5159665f5d5c0a59f4 Mon Sep 17 00:00:00 2001 From: Jirka Date: Thu, 1 Jun 2023 18:43:52 +0200 Subject: [PATCH 28/93] req: freeze altair<5.0 for streamlit --- requirements/app/ui.txt | 1 + 1 file changed, 1 insertion(+) diff --git a/requirements/app/ui.txt b/requirements/app/ui.txt index 5805a20c8d7e5..11dd4058a49bd 100644 --- a/requirements/app/ui.txt +++ b/requirements/app/ui.txt @@ -1,2 +1,3 @@ streamlit >=1.13.0, <1.16.1 +altair <5.0.0 # strict - ModuleNotFoundError in streamlit: No module named 'altair.vegalite.v4' panel >=0.12.7, <=0.13.1 From 84e19e03bc8c079e0fb2265d126432d38ea01945 Mon Sep 17 00:00:00 2001 From: Jirka Date: Thu, 1 Jun 2023 19:00:43 +0200 Subject: [PATCH 29/93] tests: allow xfail `test_fsdp_train_save_load` --- .azure/hpu-tests.yml | 2 +- tests/tests_fabric/strategies/test_fsdp_integration.py | 1 + 2 files changed, 2 insertions(+), 1 deletion(-) diff --git a/.azure/hpu-tests.yml b/.azure/hpu-tests.yml index 7b0b72313ef46..9493ff24436e3 100644 --- a/.azure/hpu-tests.yml +++ b/.azure/hpu-tests.yml @@ -41,7 +41,7 @@ pr: jobs: - job: testing # how long to run the job before automatically cancelling - timeoutInMinutes: "10" + timeoutInMinutes: "20" # how much time to give 'run always even if cancelled tasks' before stopping them cancelTimeoutInMinutes: "2" pool: intel-hpus diff --git a/tests/tests_fabric/strategies/test_fsdp_integration.py b/tests/tests_fabric/strategies/test_fsdp_integration.py index 08c6e19ed6687..fd580efcec582 100644 --- a/tests/tests_fabric/strategies/test_fsdp_integration.py +++ b/tests/tests_fabric/strategies/test_fsdp_integration.py @@ -101,6 +101,7 @@ def _custom_auto_wrap_policy( @RunIf(min_cuda_gpus=2, skip_windows=True, standalone=True, min_torch="1.13") +@pytest.mark.xfail(reason="experimental feature", strict=False) @pytest.mark.parametrize("precision", ("16-mixed", pytest.param("bf16-mixed", marks=RunIf(bf16_cuda=True)))) @pytest.mark.parametrize("manual_wrapping", [True, False]) def test_fsdp_train_save_load(manual_wrapping, precision): From deabea51be9262b7a13bef4435a58d5fee51fda7 Mon Sep 17 00:00:00 2001 From: Jirka Date: Thu, 1 Jun 2023 19:17:50 +0200 Subject: [PATCH 30/93] tests: flaky `test_collectives_distributed` --- tests/tests_fabric/plugins/collectives/test_torch_collective.py | 1 + 1 file changed, 1 insertion(+) diff --git a/tests/tests_fabric/plugins/collectives/test_torch_collective.py b/tests/tests_fabric/plugins/collectives/test_torch_collective.py index ca9f5858d26e7..040248718fddc 100644 --- a/tests/tests_fabric/plugins/collectives/test_torch_collective.py +++ b/tests/tests_fabric/plugins/collectives/test_torch_collective.py @@ -231,6 +231,7 @@ def _test_distributed_collectives_fn(strategy, collective): @skip_distributed_unavailable @pytest.mark.parametrize("n", (1, 2)) @RunIf(skip_windows=True) +@pytest.mark.flaky(reruns=3) @mock.patch.dict(os.environ, os.environ.copy(), clear=True) # sets CUDA_MODULE_LOADING in torch==1.13 def test_collectives_distributed(n): collective_launch(_test_distributed_collectives_fn, [torch.device("cpu")] * n) From 55f7650c84fb7f1c58731f0bb3ad7dc6b90fc7cd Mon Sep 17 00:00:00 2001 From: Jirka Borovec <6035284+borda@users.noreply.github.com> Date: Mon, 24 Apr 2023 23:57:08 +0200 Subject: [PATCH 31/93] add & apply flake8-simplify (#17386) (cherry picked from commit df9714178194c2cc34948eb56974582860a44ec9) --- .actions/assistant.py | 9 ++-- .pre-commit-config.yaml | 2 + examples/app/boring/scripts/serve.py | 3 +- .../reinforcement_learning/src/utils.py | 9 ++-- .../reinforcement_learning/train_torch.py | 2 +- pyproject.toml | 1 + src/lightning/app/cli/cmd_clusters.py | 9 ++-- src/lightning/app/cli/cmd_init.py | 7 +-- src/lightning/app/cli/commands/cd.py | 5 +- src/lightning/app/cli/commands/cp.py | 10 +--- src/lightning/app/cli/connect/app.py | 2 +- src/lightning/app/cli/connect/maverick.py | 9 ++-- src/lightning/app/cli/lightning_cli.py | 5 +- .../app/cli/pl-app-template/core/callbacks.py | 5 +- .../app/components/serve/auto_scaler.py | 7 ++- src/lightning/app/core/app.py | 9 ++-- src/lightning/app/core/work.py | 7 ++- src/lightning/app/runners/cloud.py | 4 +- src/lightning/app/source_code/tar.py | 3 +- src/lightning/app/storage/drive.py | 2 +- src/lightning/app/utilities/app_helpers.py | 2 +- src/lightning/app/utilities/app_logs.py | 5 +- src/lightning/app/utilities/cloud.py | 5 +- src/lightning/app/utilities/layout.py | 5 +- src/lightning/app/utilities/load_app.py | 19 +++----- .../app/utilities/packaging/cloud_compute.py | 2 +- src/lightning/app/utilities/proxies.py | 27 ++++------- src/lightning/app/utilities/safe_pickle.py | 5 +- src/lightning/app/utilities/state.py | 5 +- src/lightning/app/utilities/tracer.py | 5 +- src/lightning/fabric/cli.py | 5 +- src/lightning/fabric/connector.py | 33 +++++++------ src/lightning/fabric/utilities/data.py | 5 +- src/lightning/fabric/utilities/logger.py | 2 +- src/lightning/pytorch/callbacks/finetuning.py | 2 +- src/lightning/pytorch/callbacks/lr_monitor.py | 9 +--- src/lightning/pytorch/callbacks/pruning.py | 2 +- src/lightning/pytorch/core/module.py | 6 +-- src/lightning/pytorch/core/optimizer.py | 11 ++--- src/lightning/pytorch/core/saving.py | 11 ++--- src/lightning/pytorch/loggers/neptune.py | 7 ++- src/lightning/pytorch/loggers/utilities.py | 10 ++-- src/lightning/pytorch/loggers/wandb.py | 2 +- src/lightning/pytorch/loops/utilities.py | 6 +-- src/lightning/pytorch/profilers/simple.py | 2 +- src/lightning/pytorch/strategies/ddp.py | 6 +-- .../connectors/accelerator_connector.py | 46 ++++++++++--------- .../trainer/connectors/callback_connector.py | 9 ++-- src/lightning/pytorch/trainer/trainer.py | 6 +-- src/lightning/pytorch/tuner/lr_finder.py | 13 ++++-- src/lightning/pytorch/utilities/deepspeed.py | 2 +- .../pytorch/utilities/migration/utils.py | 6 +-- src/lightning/pytorch/utilities/parsing.py | 2 +- src/lightning/store/save.py | 24 +++++----- src/lightning/store/utils.py | 5 +- tests/tests_app/cli/test_cd.py | 28 +++++------ tests/tests_app/cli/test_cloud_cli.py | 12 ++--- tests/tests_app/cli/test_cmd_init.py | 17 ++----- tests/tests_app/cli/test_cmd_react_ui_init.py | 6 ++- tests/tests_app/cli/test_cp.py | 14 +++--- tests/tests_app/cli/test_ls.py | 6 +-- tests/tests_app/cli/test_rm.py | 2 +- tests/tests_app/conftest.py | 2 +- tests/tests_app/core/test_lightning_api.py | 8 ++-- tests/tests_app/core/test_lightning_app.py | 14 +++--- tests/tests_app/core/test_lightning_flow.py | 5 +- tests/tests_app/core/test_lightning_work.py | 5 +- tests/tests_app/runners/test_cloud.py | 9 ++-- tests/tests_app/storage/test_copier.py | 7 +-- tests/tests_app/storage/test_path.py | 17 ++++--- tests/tests_app/structures/test_structures.py | 5 +- .../utilities/packaging/test_cloud_compute.py | 2 +- tests/tests_app/utilities/test_port.py | 6 +-- tests/tests_app/utilities/test_proxies.py | 27 ++++------- .../plugins/environments/test_slurm.py | 7 +-- tests/tests_fabric/strategies/test_ddp.py | 10 ++-- tests/tests_fabric/strategies/test_dp.py | 5 +- tests/tests_fabric/strategies/test_fsdp.py | 5 +- .../strategies/test_single_device.py | 5 +- tests/tests_fabric/test_connector.py | 35 ++++++++------ tests/tests_fabric/test_fabric.py | 12 ++--- tests/tests_fabric/utilities/test_data.py | 5 +- tests/tests_pytorch/accelerators/test_cpu.py | 2 +- tests/tests_pytorch/accelerators/test_gpu.py | 4 +- tests/tests_pytorch/accelerators/test_hpu.py | 2 +- tests/tests_pytorch/accelerators/test_mps.py | 2 +- .../progress/test_tqdm_progress_bar.py | 16 +++---- .../callbacks/test_early_stopping.py | 2 +- .../checkpointing/test_model_checkpoint.py | 3 +- tests/tests_pytorch/conftest.py | 2 +- .../graveyard/test_legacy_import_unpickler.py | 14 +++--- tests/tests_pytorch/helpers/utils.py | 5 +- tests/tests_pytorch/loggers/test_comet.py | 17 +++---- .../tests_pytorch/loggers/test_tensorboard.py | 9 ++-- .../loops/test_training_loop_flow_scalar.py | 5 +- tests/tests_pytorch/models/test_hooks.py | 4 +- tests/tests_pytorch/models/test_restore.py | 4 +- .../tests_pytorch/profilers/test_profiler.py | 5 +- .../strategies/test_deepspeed_strategy.py | 10 +--- tests/tests_pytorch/test_cli.py | 2 +- .../trainer/logging_/test_logger_connector.py | 5 +- .../optimization/test_manual_optimization.py | 9 ++-- tests/tests_pytorch/trainer/test_trainer.py | 29 ++++++------ tests/tests_pytorch/tuner/test_lr_finder.py | 14 ++---- .../tuner/test_scale_batch_size.py | 2 +- tests/tests_pytorch/utilities/test_grads.py | 2 +- .../utilities/test_model_summary.py | 2 +- .../utilities/test_upgrade_checkpoint.py | 20 ++++---- 108 files changed, 390 insertions(+), 499 deletions(-) diff --git a/.actions/assistant.py b/.actions/assistant.py index d791793af9490..fd2ad537e1db6 100644 --- a/.actions/assistant.py +++ b/.actions/assistant.py @@ -158,7 +158,8 @@ def load_readme_description(path_dir: str, homepage: str, version: str) -> str: '...PyTorch Lightning is just organized PyTorch...' """ path_readme = os.path.join(path_dir, "README.md") - text = open(path_readme, encoding="utf-8").read() + with open(path_readme, encoding="utf-8") as fo: + text = fo.read() # drop images from readme text = text.replace( @@ -394,8 +395,10 @@ def _prune_packages(req_file: str, packages: Sequence[str]) -> None: @staticmethod def _replace_min(fname: str) -> None: - req = open(fname, encoding="utf-8").read().replace(">=", "==") - open(fname, "w", encoding="utf-8").write(req) + with open(fname, encoding="utf-8") as fo: + req = fo.read().replace(">=", "==") + with open(fname, "w", encoding="utf-8") as fw: + fw.write(req) @staticmethod def replace_oldest_ver(requirement_fnames: Sequence[str] = REQUIREMENT_FILES_ALL) -> None: diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index 1fe9773a1942b..f99c51228a00a 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -67,6 +67,8 @@ repos: hooks: - id: yesqa name: Unused noqa + additional_dependencies: + - flake8-simplify - repo: https://github.com/PyCQA/isort rev: 5.12.0 diff --git a/examples/app/boring/scripts/serve.py b/examples/app/boring/scripts/serve.py index e554976865496..dedd6013985ca 100644 --- a/examples/app/boring/scripts/serve.py +++ b/examples/app/boring/scripts/serve.py @@ -18,7 +18,8 @@ if not os.path.exists(str(hparams.filepath)): content = ["The file wasn't transferred"] else: - content = open(hparams.filepath).readlines() # read the file received from SourceWork. + with open(hparams.filepath) as fo: + content = fo.readlines() # read the file received from SourceWork. @fastapi_service.get("/file") async def get_file_content(request: Request, response_class=HTMLResponse): diff --git a/examples/fabric/reinforcement_learning/src/utils.py b/examples/fabric/reinforcement_learning/src/utils.py index 0718d3f6665bb..d5e6ca548b105 100644 --- a/examples/fabric/reinforcement_learning/src/utils.py +++ b/examples/fabric/reinforcement_learning/src/utils.py @@ -118,11 +118,10 @@ def make_env(env_id: str, seed: int, idx: int, capture_video: bool, run_name: Op def thunk(): env = gym.make(env_id, render_mode="rgb_array") env = gym.wrappers.RecordEpisodeStatistics(env) - if capture_video: - if idx == 0 and run_name is not None: - env = gym.wrappers.RecordVideo( - env, os.path.join(run_name, prefix + "_videos" if prefix else "videos"), disable_logger=True - ) + if capture_video and idx == 0 and run_name is not None: + env = gym.wrappers.RecordVideo( + env, os.path.join(run_name, prefix + "_videos" if prefix else "videos"), disable_logger=True + ) env.action_space.seed(seed) env.observation_space.seed(seed) return env diff --git a/examples/fabric/reinforcement_learning/train_torch.py b/examples/fabric/reinforcement_learning/train_torch.py index 944d217afdf36..40c8d7e62cb69 100644 --- a/examples/fabric/reinforcement_learning/train_torch.py +++ b/examples/fabric/reinforcement_learning/train_torch.py @@ -349,7 +349,7 @@ def main(args: argparse.Namespace): distributed.all_gather_object(gathered_data, local_data) processed_gathered_data = gathered_data[0] for i in range(1, len(gathered_data)): - for k in processed_gathered_data.keys(): + for k in processed_gathered_data: processed_gathered_data[k] = torch.cat( (processed_gathered_data[k].to(device), gathered_data[i][k].to(device)), dim=0 ) diff --git a/pyproject.toml b/pyproject.toml index a000dc9122a77..5a1e03e10950e 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -58,6 +58,7 @@ select = [ ] extend-select = [ "C4", # see: https://pypi.org/project/flake8-comprehensions + "SIM", # see: https://pypi.org/project/flake8-simplify ] ignore = [ "E731", # Do not assign a lambda expression, use a def diff --git a/src/lightning/app/cli/cmd_clusters.py b/src/lightning/app/cli/cmd_clusters.py index c735b98ff6810..0f943487ceb15 100644 --- a/src/lightning/app/cli/cmd_clusters.py +++ b/src/lightning/app/cli/cmd_clusters.py @@ -373,10 +373,9 @@ def _cluster_status_long(cluster: V1GetClusterResponse, desired_state: V1Cluster duration = _format_elapsed_seconds(elapsed) - if current_state == V1ClusterState.FAILED: - if not _is_retryable_error(current_reason): - return dedent( - f"""\ + if current_state == V1ClusterState.FAILED and not _is_retryable_error(current_reason): + return dedent( + f"""\ The requested cluster operation for cluster {cluster_id} has errors: {current_reason} @@ -391,7 +390,7 @@ def _cluster_status_long(cluster: V1GetClusterResponse, desired_state: V1Cluster Contact support@lightning.ai for additional help """ - ) + ) if desired_state == current_state == V1ClusterState.RUNNING: return dedent( diff --git a/src/lightning/app/cli/cmd_init.py b/src/lightning/app/cli/cmd_init.py index 547f96b2350e1..6a415af3e80cd 100644 --- a/src/lightning/app/cli/cmd_init.py +++ b/src/lightning/app/cli/cmd_init.py @@ -63,9 +63,10 @@ def _make_resource(resource_dir: str, resource_name: str) -> Tuple[str, str]: if bad_file.split("/")[-1] in trouble_names: continue # find the words and replace - content = open(bad_file).read().replace("placeholdername", name_for_files) - with open(bad_file, "w") as file: - file.write(content) + with open(bad_file) as fo: + content = fo.read().replace("placeholdername", name_for_files) + with open(bad_file, "w") as fw: + fw.write(content) # rename files for file_name in files: diff --git a/src/lightning/app/cli/commands/cd.py b/src/lightning/app/cli/commands/cd.py index c6cc1ff029045..1d8718848a3c3 100644 --- a/src/lightning/app/cli/commands/cd.py +++ b/src/lightning/app/cli/commands/cd.py @@ -113,8 +113,5 @@ def cd(path: Optional[Union[Tuple[str], str]], verify: bool = True) -> None: def _apply_double_dots(root: str, path: str) -> str: splits = [split for split in path.split("/") if split != ""] for split in splits: - if split == "..": - root = "/" + os.path.join(*root.split("/")[:-1]) - else: - root = os.path.join(root, split) + root = "/" + os.path.join(*root.split("/")[:-1]) if split == ".." else os.path.join(root, split) return root diff --git a/src/lightning/app/cli/commands/cp.py b/src/lightning/app/cli/commands/cp.py index ec0288c9ab7d8..bac2a55098ffa 100644 --- a/src/lightning/app/cli/commands/cp.py +++ b/src/lightning/app/cli/commands/cp.py @@ -119,10 +119,7 @@ def _upload_files(live, client: LightningClient, local_src: str, remote_dst: str for upload_path in upload_paths: for cluster in clusters.clusters: filename = str(upload_path).replace(str(os.getcwd()), "")[1:] - if lit_resource: - filename = _get_prefix(os.path.join(remote_dst, filename), lit_resource) - else: - filename = "/" + filename + filename = _get_prefix(os.path.join(remote_dst, filename), lit_resource) if lit_resource else "/" + filename response = client.lightningapp_instance_service_upload_project_artifact( project_id=project_id, @@ -273,10 +270,7 @@ def _sanitize_path(path: str, pwd: str) -> Tuple[str, bool]: is_remote = _is_remote(path) if is_remote: path = _remove_remote(path) - if path == ".": - path = pwd - else: - path = os.path.join(pwd, path) + path = pwd if path == "." else os.path.join(pwd, path) return path, is_remote diff --git a/src/lightning/app/cli/connect/app.py b/src/lightning/app/cli/connect/app.py index 68f2a6909ab3c..68c09ba7e58b6 100644 --- a/src/lightning/app/cli/connect/app.py +++ b/src/lightning/app/cli/connect/app.py @@ -380,7 +380,7 @@ def _scan_lightning_connections(app_name_or_id): if not curr_app_name: continue - if app_name_or_id == curr_app_name or app_name_or_id == curr_app_id: + if app_name_or_id in (curr_app_name, curr_app_id): return connection_path return None diff --git a/src/lightning/app/cli/connect/maverick.py b/src/lightning/app/cli/connect/maverick.py index f5a289edce689..c5ea46b60641e 100644 --- a/src/lightning/app/cli/connect/maverick.py +++ b/src/lightning/app/cli/connect/maverick.py @@ -112,11 +112,10 @@ def connect_maverick(name: str, project_name: str = "") -> None: # run network creation in the background out = subprocess.run(CMD_CREATE_NETWORK, shell=True, capture_output=True) error = out.stderr - if error: - if "already exists" not in str(error): - live.stop() - rich.print(f"[red]Failed[/red]: network creation failed with error: {str(error)}") - return + if error and "already exists" not in str(error): + live.stop() + rich.print(f"[red]Failed[/red]: network creation failed with error: {str(error)}") + return # if code server is already running, ignore. # If not, but container exists, remove it and run. Otherwise, run. diff --git a/src/lightning/app/cli/lightning_cli.py b/src/lightning/app/cli/lightning_cli.py index cf67ce9434806..2370f2dbc8054 100644 --- a/src/lightning/app/cli/lightning_cli.py +++ b/src/lightning/app/cli/lightning_cli.py @@ -307,9 +307,8 @@ def _run_app( "Secrets can only be used for apps running in cloud. " "Using the option --secret in local execution is not supported." ) - if ENABLE_APP_COMMENT_COMMAND_EXECUTION or run_app_comment_commands: - if file is not None: - run_app_commands(str(file)) + if (ENABLE_APP_COMMENT_COMMAND_EXECUTION or run_app_comment_commands) and file is not None: + run_app_commands(str(file)) env_vars = _format_input_env_variables(env) os.environ.update(env_vars) diff --git a/src/lightning/app/cli/pl-app-template/core/callbacks.py b/src/lightning/app/cli/pl-app-template/core/callbacks.py index a8cc99eff3736..610101950835f 100644 --- a/src/lightning/app/cli/pl-app-template/core/callbacks.py +++ b/src/lightning/app/cli/pl-app-template/core/callbacks.py @@ -158,10 +158,7 @@ def _train_batch_idx(self, trainer: "pl.Trainer") -> int: return trainer.fit_loop.epoch_loop.batch_progress.current.processed def _val_batch_idx(self, trainer: "pl.Trainer") -> int: - if trainer.state.fn == "fit": - loop = trainer.fit_loop.epoch_loop.val_loop - else: - loop = trainer.validate_loop + loop = trainer.fit_loop.epoch_loop.val_loop if trainer.state.fn == "fit" else trainer.validate_loop current_batch_idx = loop.epoch_loop.batch_progress.current.processed return current_batch_idx diff --git a/src/lightning/app/components/serve/auto_scaler.py b/src/lightning/app/components/serve/auto_scaler.py index b05b4c31e0865..a15a87da93897 100644 --- a/src/lightning/app/components/serve/auto_scaler.py +++ b/src/lightning/app/components/serve/auto_scaler.py @@ -72,9 +72,8 @@ def _maybe_raise_granular_exception(exception: Exception) -> None: if isinstance(exception, asyncio.TimeoutError): raise HTTPException(408, "Request timed out") from exception - if isinstance(exception, Exception): - if exception.args[0] == "Server disconnected": - raise HTTPException(500, "Worker Server disconnected") from exception + if isinstance(exception, Exception) and exception.args[0] == "Server disconnected": + raise HTTPException(500, "Worker Server disconnected") from exception logging.exception(exception) raise HTTPException(500, exception.args[0]) from exception @@ -312,7 +311,7 @@ def run(self): @fastapi_app.middleware("http") async def current_request_counter(request: Request, call_next): - if not request.scope["path"] == self.endpoint: + if request.scope["path"] != self.endpoint: return await call_next(request) fastapi_app.global_request_count += 1 fastapi_app.num_current_requests += 1 diff --git a/src/lightning/app/core/app.py b/src/lightning/app/core/app.py index f5f302536fbaf..6d24e31eca6b3 100644 --- a/src/lightning/app/core/app.py +++ b/src/lightning/app/core/app.py @@ -617,7 +617,8 @@ def load_state_dict_from_checkpoint_dir( elif len(available_checkpoints) > 1: raise Exception(f"Found 2 checkpoints `{available_checkpoints}`with the same version.") checkpoint_path = os.path.join(checkpoints_dir, available_checkpoints[0]) - state = pickle.load(open(checkpoint_path, "rb")) + with open(checkpoint_path, "rb") as fo: + state = pickle.load(fo) self.load_state_dict(state) def _dump_checkpoint(self) -> Optional[str]: @@ -632,11 +633,7 @@ def _dump_checkpoint(self) -> Optional[str]: int(f.split("_")[1]) for f in os.listdir(checkpoints_dir) if f.startswith("v_") and f.endswith(".json") ) - if checkpoint_versions: - previous_version = checkpoint_versions[-1] - else: - # initialization - previous_version = -1 + previous_version = checkpoint_versions[-1] if checkpoint_versions else -1 checkpoint_path = os.path.join(checkpoints_dir, f"v_{previous_version + 1}_{time()}.json") diff --git a/src/lightning/app/core/work.py b/src/lightning/app/core/work.py index 8e0a70abe9a0e..6c1747f5cb695 100644 --- a/src/lightning/app/core/work.py +++ b/src/lightning/app/core/work.py @@ -383,7 +383,7 @@ def num_timeouts(self) -> int: def num_successes(self) -> int: """Returns the number of successful runs.""" # FIXME: Resolve this within single process runtime. - run_keys = [key for key in self._calls.keys() if key.startswith("run:")] + run_keys = [key for key in self._calls if key.startswith("run:")] if not run_keys: return 0 @@ -491,10 +491,9 @@ def __getattribute__(self, name: str) -> Any: if isinstance(attr, ProxyWorkRun): return attr - if callable(attr) and getattr(attr, "__name__", "") == "run": + if callable(attr) and getattr(attr, "__name__", "") == "run" and getattr(self, "_cache_calls", False): # disable while building the class. - if getattr(self, "_cache_calls", False): - return self._wrap_run_for_caching(attr) + return self._wrap_run_for_caching(attr) return attr def __getattr__(self, item: str) -> Any: diff --git a/src/lightning/app/runners/cloud.py b/src/lightning/app/runners/cloud.py index 1783fee3d6927..f1dcd2eca4d5b 100644 --- a/src/lightning/app/runners/cloud.py +++ b/src/lightning/app/runners/cloud.py @@ -106,7 +106,7 @@ def _to_clean_dict(swagger_object, map_attributes): if hasattr(swagger_object, "to_dict"): attribute_map = swagger_object.attribute_map result = {} - for key in attribute_map.keys(): + for key in attribute_map: value = getattr(swagger_object, key) value = _to_clean_dict(value, map_attributes) if value is not None and value != {}: @@ -668,7 +668,7 @@ def _get_flow_servers(self) -> List[V1Flowserver]: """Collect a spec for each flow that contains a frontend so that the backend knows for which flows it needs to start servers.""" flow_servers: List[V1Flowserver] = [] - for flow_name in self.app.frontends.keys(): + for flow_name in self.app.frontends: flow_server = V1Flowserver(name=flow_name) flow_servers.append(flow_server) return flow_servers diff --git a/src/lightning/app/source_code/tar.py b/src/lightning/app/source_code/tar.py index 2679eaaced924..7ca93c798b8b6 100644 --- a/src/lightning/app/source_code/tar.py +++ b/src/lightning/app/source_code/tar.py @@ -157,7 +157,8 @@ def _tar_path_python(source_path: str, target_file: str, compression: bool = Fal tar.add(str(source_path), arcname=".") elif os.path.isfile(source_path): file_info = tarfile.TarInfo(os.path.basename(str(source_path))) - tar.addfile(file_info, open(source_path)) + with open(source_path) as fo: + tar.addfile(file_info, fo) def _tar_path_subprocess(source_path: str, target_file: str, compression: bool = False) -> None: diff --git a/src/lightning/app/storage/drive.py b/src/lightning/app/storage/drive.py index 88a27bd25c838..314551b863b27 100644 --- a/src/lightning/app/storage/drive.py +++ b/src/lightning/app/storage/drive.py @@ -330,7 +330,7 @@ def __str__(self) -> str: def _maybe_create_drive(component_name: str, state: Dict) -> Union[Dict, Drive]: - if Drive.__IDENTIFIER__ == state.get("type", None): + if state.get("type", None) == Drive.__IDENTIFIER__: drive = Drive.from_dict(state) drive.component_name = component_name return drive diff --git a/src/lightning/app/utilities/app_helpers.py b/src/lightning/app/utilities/app_helpers.py index 449fddaa9418c..b5710ba818857 100644 --- a/src/lightning/app/utilities/app_helpers.py +++ b/src/lightning/app/utilities/app_helpers.py @@ -318,7 +318,7 @@ def _set_child_name(component: "Component", child: "Component", new_name: str) - def _delta_to_app_state_delta(root: "LightningFlow", component: "Component", delta: Delta) -> Delta: delta_dict = delta.to_dict() for changed in delta_dict.values(): - for delta_key in changed.copy().keys(): + for delta_key in changed.copy(): val = changed[delta_key] new_prefix = "root" diff --git a/src/lightning/app/utilities/app_logs.py b/src/lightning/app/utilities/app_logs.py index 807a4492729ce..6f018600d0a37 100644 --- a/src/lightning/app/utilities/app_logs.py +++ b/src/lightning/app/utilities/app_logs.py @@ -116,9 +116,8 @@ def _app_logs_reader( start_timestamps[log_event.component_name] = log_event.timestamp timestamp = start_timestamps.get(log_event.component_name, None) - if timestamp and log_event.timestamp >= timestamp: - if "launcher" not in log_event.message: - yield log_event + if timestamp and log_event.timestamp >= timestamp and "launcher" not in log_event.message: + yield log_event except queue.Empty: # Empty is raised by queue.get if timeout is reached. Follow = False case. diff --git a/src/lightning/app/utilities/cloud.py b/src/lightning/app/utilities/cloud.py index f5145fe10c480..9fb0853fa3e2d 100644 --- a/src/lightning/app/utilities/cloud.py +++ b/src/lightning/app/utilities/cloud.py @@ -41,9 +41,8 @@ def _get_project(client: LightningClient, project_id: Optional[str] = None, verb if len(projects.memberships) == 0: raise ValueError("No valid projects found. Please reach out to lightning.ai team to create a project") - if len(projects.memberships) > 1: - if verbose: - print(f"Defaulting to the project: {projects.memberships[0].name}") + if len(projects.memberships) > 1 and verbose: + print(f"Defaulting to the project: {projects.memberships[0].name}") return projects.memberships[0] diff --git a/src/lightning/app/utilities/layout.py b/src/lightning/app/utilities/layout.py index 28822c33a86ed..d9d2175834754 100644 --- a/src/lightning/app/utilities/layout.py +++ b/src/lightning/app/utilities/layout.py @@ -185,10 +185,7 @@ def _collect_work_layout(work: "lightning.app.LightningWork") -> Union[None, str Raises: TypeError: If the value returned by ``configure_layout`` is not of a supported format. """ - if is_overridden("configure_layout", work): - work_layout = work.configure_layout() - else: - work_layout = work.url + work_layout = work.configure_layout() if is_overridden("configure_layout", work) else work.url if work_layout is None: return None diff --git a/src/lightning/app/utilities/load_app.py b/src/lightning/app/utilities/load_app.py index 52a2363d5199b..008cdc05ce427 100644 --- a/src/lightning/app/utilities/load_app.py +++ b/src/lightning/app/utilities/load_app.py @@ -71,13 +71,12 @@ def _load_objects_from_file( code = _create_code(filepath) with _create_fake_main_module(filepath) as module: try: - with _add_to_env(env_vars): - with _patch_sys_argv(): - if mock_imports: - with _mock_missing_imports(): - exec(code, module.__dict__) - else: + with _add_to_env(env_vars), _patch_sys_argv(): + if mock_imports: + with _mock_missing_imports(): exec(code, module.__dict__) + else: + exec(code, module.__dict__) except Exception as e: if raise_exception: raise e @@ -150,8 +149,7 @@ def open_python_file(filename): # Open file respecting PEP263 encoding. If no encoding header is # found, opens as utf-8. return tokenize.open(filename) - else: - return open(filename, encoding="utf-8") + return open(filename, encoding="utf-8") # noqa: SIM115 def _create_code(script_path: str): @@ -260,10 +258,7 @@ def _patch_sys_argv(): matches = [ argv_slice.index(opt) for opt in options if opt in argv_slice and argv_slice.index(opt) >= first_index ] - if not matches: - last_index = len(argv_slice) - else: - last_index = min(matches) + last_index = len(argv_slice) if not matches else min(matches) # 6: last_index is either the fully command or the latest match from the CLI options. new_argv = [argv_slice[0]] + argv_slice[first_index:last_index] diff --git a/src/lightning/app/utilities/packaging/cloud_compute.py b/src/lightning/app/utilities/packaging/cloud_compute.py index 75c7cb93d6e00..9423d626f86c9 100644 --- a/src/lightning/app/utilities/packaging/cloud_compute.py +++ b/src/lightning/app/utilities/packaging/cloud_compute.py @@ -171,7 +171,7 @@ def _verify_mount_root_dirs_are_unique(mounts: Union[None, Mount, List[Mount], T def _maybe_create_cloud_compute(state: Dict) -> Union[CloudCompute, Dict]: - if state and __CLOUD_COMPUTE_IDENTIFIER__ == state.get("type", None): + if state and state.get("type", None) == __CLOUD_COMPUTE_IDENTIFIER__: cloud_compute = CloudCompute.from_dict(state) return cloud_compute return state diff --git a/src/lightning/app/utilities/proxies.py b/src/lightning/app/utilities/proxies.py index a624e9a8af6b3..2b061f895bbe2 100644 --- a/src/lightning/app/utilities/proxies.py +++ b/src/lightning/app/utilities/proxies.py @@ -134,22 +134,16 @@ def __call__(self, *args: Any, **kwargs: Any): data = {"args": args, "kwargs": kwargs, "call_hash": call_hash} - # The if/else conditions are left un-compressed to simplify readability - # for the readers. - if self.work.cache_calls: - if not entered or stopped_on_sigterm: - _send_data_to_caller_queue(self, self.work, self.caller_queue, data, call_hash) - else: - if returned: - return + # The if/else conditions are left un-compressed to simplify readability for the readers. + if not entered or stopped_on_sigterm: + _send_data_to_caller_queue(self, self.work, self.caller_queue, data, call_hash) else: - if not entered or stopped_on_sigterm: + if self.work.cache_calls and returned: + return + elif returned or stopped_on_sigterm: + # the previous task has completed and we can re-queue the next one. + # overriding the return value for next loop iteration. _send_data_to_caller_queue(self, self.work, self.caller_queue, data, call_hash) - else: - if returned or stopped_on_sigterm: - # the previous task has completed and we can re-queue the next one. - # overriding the return value for next loop iteration. - _send_data_to_caller_queue(self, self.work, self.caller_queue, data, call_hash) if not self.work.parallel: raise CacheMissException("Task never called before. Triggered now") @@ -572,9 +566,8 @@ def run_once(self): return # 13. Destroy the state observer. - if self.run_executor_cls.enable_start_observer: - if self.state_observer.started: - self.state_observer.join(0) + if self.run_executor_cls.enable_start_observer and self.state_observer.started: + self.state_observer.join(0) self.state_observer = None # 14. Copy all artifacts to the shared storage so other Works can access them while this Work gets scaled down diff --git a/src/lightning/app/utilities/safe_pickle.py b/src/lightning/app/utilities/safe_pickle.py index 614ac8dd3f24d..ddd77ddcc6509 100644 --- a/src/lightning/app/utilities/safe_pickle.py +++ b/src/lightning/app/utilities/safe_pickle.py @@ -89,9 +89,8 @@ def get_picklable_work(work: LightningWork) -> LightningWork: fake_module.__dict__["__name__"] = expected_module_name sys.modules[expected_module_name] = fake_module for k, v in fake_module.__dict__.items(): - if not k.startswith("__") and hasattr(v, "__module__"): - if "_main__" in v.__module__: - v.__module__ = expected_module_name + if not k.startswith("__") and hasattr(v, "__module__") and "_main__" in v.__module__: + v.__module__ = expected_module_name return copied_work diff --git a/src/lightning/app/utilities/state.py b/src/lightning/app/utilities/state.py index 4fbe58600bc51..aedacfe827395 100644 --- a/src/lightning/app/utilities/state.py +++ b/src/lightning/app/utilities/state.py @@ -123,10 +123,7 @@ def _url(self) -> str: return f"{self._host}:{self._port}" if self._use_localhost else self._host def _attach_plugin(self, plugin: Optional[BaseStatePlugin]) -> None: - if plugin is not None: - plugin = plugin - else: - plugin = AppStatePlugin() + plugin = plugin if plugin is not None else AppStatePlugin() self._plugin = plugin @staticmethod diff --git a/src/lightning/app/utilities/tracer.py b/src/lightning/app/utilities/tracer.py index 7aac8b1ca4b93..c5a0e56b01264 100644 --- a/src/lightning/app/utilities/tracer.py +++ b/src/lightning/app/utilities/tracer.py @@ -75,10 +75,7 @@ def fn_with_tracing(self, *args: Any, **kwargs: Any): trace_entry["call"] = {"start": time.time_ns()} - if not is_class_method: - ret = fn(self, *args, **kwargs) - else: - ret = fn(*args, **kwargs) + ret = fn(self, *args, **kwargs) if not is_class_method else fn(*args, **kwargs) trace_entry["call"]["end"] = time.time_ns() diff --git a/src/lightning/fabric/cli.py b/src/lightning/fabric/cli.py index d63579fdc711b..abcb8f195abcb 100644 --- a/src/lightning/fabric/cli.py +++ b/src/lightning/fabric/cli.py @@ -162,10 +162,7 @@ def _torchrun_launch(args: Namespace, script_args: List[str]) -> None: """This will invoke `torchrun` programmatically to launch the given script in new processes.""" import torch.distributed.run as torchrun - if args.strategy == "dp": - num_processes = 1 - else: - num_processes = _get_num_processes(args.accelerator, args.devices) + num_processes = 1 if args.strategy == "dp" else _get_num_processes(args.accelerator, args.devices) torchrun_args = [ f"--nproc_per_node={num_processes}", diff --git a/src/lightning/fabric/connector.py b/src/lightning/fabric/connector.py index 69d98ef174ec4..065ce2bb07a0a 100644 --- a/src/lightning/fabric/connector.py +++ b/src/lightning/fabric/connector.py @@ -276,23 +276,22 @@ def _check_config_and_set_final_flags( else: self._cluster_environment_flag = getattr(self._strategy_flag, "cluster_environment") - if hasattr(self._strategy_flag, "parallel_devices"): - if self._strategy_flag.parallel_devices: - if self._strategy_flag.parallel_devices[0].type == "cpu": - if self._accelerator_flag and self._accelerator_flag not in ("auto", "cpu"): - raise ValueError( - f"CPU parallel_devices set through {self._strategy_flag.__class__.__name__} class," - f" but accelerator set to {self._accelerator_flag}, please choose one device type" - ) - self._accelerator_flag = "cpu" - if self._strategy_flag.parallel_devices[0].type == "cuda": - if self._accelerator_flag and self._accelerator_flag not in ("auto", "cuda", "gpu"): - raise ValueError( - f"GPU parallel_devices set through {self._strategy_flag.__class__.__name__} class," - f" but accelerator set to {self._accelerator_flag}, please choose one device type" - ) - self._accelerator_flag = "cuda" - self._parallel_devices = self._strategy_flag.parallel_devices + if hasattr(self._strategy_flag, "parallel_devices") and self._strategy_flag.parallel_devices: + if self._strategy_flag.parallel_devices[0].type == "cpu": + if self._accelerator_flag and self._accelerator_flag not in ("auto", "cpu"): + raise ValueError( + f"CPU parallel_devices set through {self._strategy_flag.__class__.__name__} class," + f" but accelerator set to {self._accelerator_flag}, please choose one device type" + ) + self._accelerator_flag = "cpu" + if self._strategy_flag.parallel_devices[0].type == "cuda": + if self._accelerator_flag and self._accelerator_flag not in ("auto", "cuda", "gpu"): + raise ValueError( + f"GPU parallel_devices set through {self._strategy_flag.__class__.__name__} class," + f" but accelerator set to {self._accelerator_flag}, please choose one device type" + ) + self._accelerator_flag = "cuda" + self._parallel_devices = self._strategy_flag.parallel_devices def _check_device_config_and_set_final_flags(self, devices: Union[List[int], str, int], num_nodes: int) -> None: self._num_nodes_flag = int(num_nodes) if num_nodes is not None else 1 diff --git a/src/lightning/fabric/utilities/data.py b/src/lightning/fabric/utilities/data.py index d49486ddbaca3..14b033f3ecb8b 100644 --- a/src/lightning/fabric/utilities/data.py +++ b/src/lightning/fabric/utilities/data.py @@ -36,10 +36,7 @@ class _WrapAttrTag(LightningEnum): def __call__(self, *args: Any) -> None: fn: Union[Callable[[object, str], None], Callable[[object, str, Any], None]] - if self == self.SET: - fn = setattr - else: - fn = delattr + fn = setattr if self == self.SET else delattr return fn(*args) diff --git a/src/lightning/fabric/utilities/logger.py b/src/lightning/fabric/utilities/logger.py index be2f2e2087146..f9d4b586f0dfb 100644 --- a/src/lightning/fabric/utilities/logger.py +++ b/src/lightning/fabric/utilities/logger.py @@ -115,7 +115,7 @@ def _sanitize_params(params: Dict[str, Any]) -> Dict[str, Any]: 'namespace': 'Namespace(foo=3)', 'string': 'abc'} """ - for k in params.keys(): + for k in params: # convert relevant np scalars to python types first (instead of str) if isinstance(params[k], (np.bool_, np.integer, np.floating)): params[k] = params[k].item() diff --git a/src/lightning/pytorch/callbacks/finetuning.py b/src/lightning/pytorch/callbacks/finetuning.py index ddc4cdaceea0c..e4a68024ea5f4 100644 --- a/src/lightning/pytorch/callbacks/finetuning.py +++ b/src/lightning/pytorch/callbacks/finetuning.py @@ -87,7 +87,7 @@ def state_dict(self) -> Dict[str, Any]: def load_state_dict(self, state_dict: Dict[str, Any]) -> None: self._restarting = True - if "internal_optimizer_metadata" in state_dict: + if "internal_optimizer_metadata" in state_dict: # noqa: SIM401 self._internal_optimizer_metadata = state_dict["internal_optimizer_metadata"] else: # compatibility to load from old checkpoints before PR #11887 diff --git a/src/lightning/pytorch/callbacks/lr_monitor.py b/src/lightning/pytorch/callbacks/lr_monitor.py index fef524444de1a..3cc76d55061b6 100644 --- a/src/lightning/pytorch/callbacks/lr_monitor.py +++ b/src/lightning/pytorch/callbacks/lr_monitor.py @@ -270,10 +270,7 @@ def _find_names_from_schedulers( seen_optimizer_types: DefaultDict[Type[Optimizer], int] = defaultdict(int) for config in lr_scheduler_configs: sch = config.scheduler - if config.name is not None: - name = config.name - else: - name = "lr-" + sch.optimizer.__class__.__name__ + name = config.name if config.name is not None else "lr-" + sch.optimizer.__class__.__name__ updated_names = self._check_duplicates_and_update_name( sch.optimizer, name, seen_optimizers, seen_optimizer_types, config @@ -316,9 +313,7 @@ def _check_duplicates_and_update_name( ) -> List[str]: seen_optimizers.append(optimizer) optimizer_cls = type(optimizer) - if lr_scheduler_config is not None and lr_scheduler_config.name is None: - seen_optimizer_types[optimizer_cls] += 1 - elif lr_scheduler_config is None: + if lr_scheduler_config is None or lr_scheduler_config.name is None: seen_optimizer_types[optimizer_cls] += 1 # Multiple param groups for the same optimizer diff --git a/src/lightning/pytorch/callbacks/pruning.py b/src/lightning/pytorch/callbacks/pruning.py index 74cacf68e5327..5cffe6171707e 100644 --- a/src/lightning/pytorch/callbacks/pruning.py +++ b/src/lightning/pytorch/callbacks/pruning.py @@ -406,7 +406,7 @@ def _make_pruning_permanent_on_state_dict(self, pl_module: LightningModule) -> D state_dict = pl_module.state_dict() # find the mask and the original weights. - map_pruned_params = {k.replace("_mask", "") for k in state_dict.keys() if k.endswith("_mask")} + map_pruned_params = {k.replace("_mask", "") for k in state_dict if k.endswith("_mask")} for tensor_name in map_pruned_params: orig = state_dict.pop(tensor_name + "_orig") mask = state_dict.pop(tensor_name + "_mask") diff --git a/src/lightning/pytorch/core/module.py b/src/lightning/pytorch/core/module.py index f66c0d0e2c000..965dba2d41f47 100644 --- a/src/lightning/pytorch/core/module.py +++ b/src/lightning/pytorch/core/module.py @@ -205,9 +205,9 @@ def trainer(self, trainer: Optional["pl.Trainer"]) -> None: for v in self.children(): if isinstance(v, LightningModule): v.trainer = trainer # type: ignore[assignment] - if not _TORCH_GREATER_EQUAL_2_0: # https://github.com/pytorch/pytorch/issues/95857 - if trainer is not None and not isinstance(trainer, weakref.ProxyTypes): - trainer = weakref.proxy(trainer) + # https://github.com/pytorch/pytorch/issues/95857 + if not _TORCH_GREATER_EQUAL_2_0 and trainer is not None and not isinstance(trainer, weakref.ProxyTypes): + trainer = weakref.proxy(trainer) self._trainer = trainer @property diff --git a/src/lightning/pytorch/core/optimizer.py b/src/lightning/pytorch/core/optimizer.py index 65fba2a29aa1e..2c66045aa8ff5 100644 --- a/src/lightning/pytorch/core/optimizer.py +++ b/src/lightning/pytorch/core/optimizer.py @@ -56,12 +56,9 @@ def optimizer(self) -> Optimizer: def _to_lightning_optimizer( cls, optimizer: Union[Optimizer, "LightningOptimizer"], strategy: "pl.strategies.Strategy" ) -> "LightningOptimizer": - if isinstance(optimizer, LightningOptimizer): - # the user could return a `LightningOptimizer` from `configure_optimizers`, see test: - # tests/core/test_lightning_optimizer.py::test_lightning_optimizer[False] - lightning_optimizer = optimizer - else: - lightning_optimizer = cls(optimizer) + # the user could return a `LightningOptimizer` from `configure_optimizers`, see test: + # tests/core/test_lightning_optimizer.py::test_lightning_optimizer[False] + lightning_optimizer = optimizer if isinstance(optimizer, LightningOptimizer) else cls(optimizer) lightning_optimizer._strategy = proxy(strategy) return lightning_optimizer @@ -303,7 +300,7 @@ def _configure_schedulers_manual_opt(schedulers: list) -> List[LRSchedulerConfig # interval is not in this list even though the user needs to manually call the scheduler because # the `LearningRateMonitor` callback needs to check its value to know when to log the learning rate invalid_keys = {"reduce_on_plateau", "monitor", "strict"} - keys_to_warn = [k for k in scheduler.keys() if k in invalid_keys] + keys_to_warn = [k for k in scheduler if k in invalid_keys] if keys_to_warn: rank_zero_warn( diff --git a/src/lightning/pytorch/core/saving.py b/src/lightning/pytorch/core/saving.py index 2cbdd165b9064..5aa4abe613b89 100644 --- a/src/lightning/pytorch/core/saving.py +++ b/src/lightning/pytorch/core/saving.py @@ -271,12 +271,11 @@ def load_hparams_from_yaml(config_yaml: _PATH, use_omegaconf: bool = True) -> Di with fs.open(config_yaml, "r") as fp: hparams = yaml.full_load(fp) - if _OMEGACONF_AVAILABLE: - if use_omegaconf: - try: - return OmegaConf.create(hparams) - except (UnsupportedValueType, ValidationError): - pass + if _OMEGACONF_AVAILABLE and use_omegaconf: + try: + return OmegaConf.create(hparams) + except (UnsupportedValueType, ValidationError): + pass return hparams diff --git a/src/lightning/pytorch/loggers/neptune.py b/src/lightning/pytorch/loggers/neptune.py index c452daac22d52..d24935c0eeb09 100644 --- a/src/lightning/pytorch/loggers/neptune.py +++ b/src/lightning/pytorch/loggers/neptune.py @@ -19,6 +19,7 @@ "NeptuneLogger", ] +import contextlib import logging import os from argparse import Namespace @@ -263,10 +264,8 @@ def _retrieve_run_data(self) -> None: def _neptune_init_args(self) -> Dict: args: Dict = {} # Backward compatibility in case of previous version retrieval - try: + with contextlib.suppress(AttributeError): args = self._neptune_run_kwargs - except AttributeError: - pass if self._project_name is not None: args["project"] = self._project_name @@ -472,7 +471,7 @@ def after_save_checkpoint(self, checkpoint_callback: Checkpoint) -> None: # save best k models if hasattr(checkpoint_callback, "best_k_models"): - for key in checkpoint_callback.best_k_models.keys(): + for key in checkpoint_callback.best_k_models: model_name = self._get_full_model_name(key, checkpoint_callback) file_names.add(model_name) self.run[f"{checkpoints_namespace}/{model_name}"].upload(key) diff --git a/src/lightning/pytorch/loggers/utilities.py b/src/lightning/pytorch/loggers/utilities.py index a9e4afc8d87a0..1781776beda56 100644 --- a/src/lightning/pytorch/loggers/utilities.py +++ b/src/lightning/pytorch/loggers/utilities.py @@ -70,11 +70,11 @@ def _log_hyperparams(trainer: "pl.Trainer") -> None: inconsistent_keys = [] for key in lightning_hparams.keys() & datamodule_hparams.keys(): lm_val, dm_val = lightning_hparams[key], datamodule_hparams[key] - if type(lm_val) != type(dm_val): - inconsistent_keys.append(key) - elif isinstance(lm_val, Tensor) and id(lm_val) != id(dm_val): - inconsistent_keys.append(key) - elif lm_val != dm_val: + if ( + type(lm_val) != type(dm_val) + or (isinstance(lm_val, Tensor) and id(lm_val) != id(dm_val)) + or lm_val != dm_val + ): inconsistent_keys.append(key) if inconsistent_keys: raise RuntimeError( diff --git a/src/lightning/pytorch/loggers/wandb.py b/src/lightning/pytorch/loggers/wandb.py index 305266946561e..b303ddc5957e0 100644 --- a/src/lightning/pytorch/loggers/wandb.py +++ b/src/lightning/pytorch/loggers/wandb.py @@ -477,7 +477,7 @@ def log_image(self, key: str, images: List[Any], step: Optional[int] = None, **k for k, v in kwargs.items(): if len(v) != n: raise ValueError(f"Expected {n} items but only found {len(v)} for {k}") - kwarg_list = [{k: kwargs[k][i] for k in kwargs.keys()} for i in range(n)] + kwarg_list = [{k: kwargs[k][i] for k in kwargs} for i in range(n)] metrics = {key: [wandb.Image(img, **kwarg) for img, kwarg in zip(images, kwarg_list)]} self.log_metrics(metrics, step) diff --git a/src/lightning/pytorch/loops/utilities.py b/src/lightning/pytorch/loops/utilities.py index 7c9fd1a190fe5..d063c965e9025 100644 --- a/src/lightning/pytorch/loops/utilities.py +++ b/src/lightning/pytorch/loops/utilities.py @@ -156,14 +156,14 @@ def _decorator(self: _Loop, *args: Any, **kwargs: Any) -> Any: if not hasattr(self, "inference_mode"): raise TypeError(f"`{type(self).__name__}.inference_mode` needs to be defined") context_manager: Type[ContextManager] - if dist.is_available() and dist.is_initialized() and dist.get_backend() == "gloo": + if dist.is_available() and dist.is_initialized() and dist.get_backend() == "gloo": # noqa: SIM114 # gloo backend does not work properly. # https://github.com/Lightning-AI/lightning/pull/12715/files#r854569110 # TODO: explore why and possibly open an issue in PyTorch repository context_manager = torch.no_grad - elif isinstance(self.trainer.accelerator, TPUAccelerator): + elif isinstance(self.trainer.accelerator, TPUAccelerator): # noqa: SIM114 context_manager = torch.no_grad - elif _TORCH_GREATER_EQUAL_1_13 and isinstance(self.trainer.strategy, FSDPStrategy): + elif _TORCH_GREATER_EQUAL_1_13 and isinstance(self.trainer.strategy, FSDPStrategy): # noqa: SIM114 # https://github.com/pytorch/pytorch/issues/95957 context_manager = torch.no_grad elif _TORCH_EQUAL_2_0 and self.trainer.lightning_module._compiler_ctx is not None: diff --git a/src/lightning/pytorch/profilers/simple.py b/src/lightning/pytorch/profilers/simple.py index 7facafe9cb140..870d2b71abc34 100644 --- a/src/lightning/pytorch/profilers/simple.py +++ b/src/lightning/pytorch/profilers/simple.py @@ -101,7 +101,7 @@ def summary(self) -> str: if self.extended: if len(self.recorded_durations) > 0: - max_key = max(len(k) for k in self.recorded_durations.keys()) + max_key = max(len(k) for k in self.recorded_durations) def log_row_extended(action: str, mean: str, num_calls: str, total: str, per: str) -> str: row = f"{sep}| {action:<{max_key}s}\t| {mean:<15}\t|" diff --git a/src/lightning/pytorch/strategies/ddp.py b/src/lightning/pytorch/strategies/ddp.py index 7614ab21b8ad2..12b33f20f7c91 100644 --- a/src/lightning/pytorch/strategies/ddp.py +++ b/src/lightning/pytorch/strategies/ddp.py @@ -227,11 +227,7 @@ def _enable_model_averaging(self) -> None: optimizer = optimizer._optimizer is_distributed_optimizer = isinstance(optimizer, DistributedOptimizer) if not _IS_WINDOWS else False - if ( - is_distributed_optimizer - or isinstance(optimizer, ZeroRedundancyOptimizer) - or isinstance(optimizer, PostLocalSGDOptimizer) - ): + if isinstance(optimizer, (ZeroRedundancyOptimizer, PostLocalSGDOptimizer)) or is_distributed_optimizer: raise ValueError( f"Currently model averaging cannot work with a distributed optimizer of type " f"{optimizer.__class__.__name__}." diff --git a/src/lightning/pytorch/trainer/connectors/accelerator_connector.py b/src/lightning/pytorch/trainer/connectors/accelerator_connector.py index 7c4b3ab476ae8..9cabdc62f8b48 100644 --- a/src/lightning/pytorch/trainer/connectors/accelerator_connector.py +++ b/src/lightning/pytorch/trainer/connectors/accelerator_connector.py @@ -305,23 +305,22 @@ def _check_config_and_set_final_flags( else: self._cluster_environment_flag = getattr(self._strategy_flag, "cluster_environment") - if hasattr(self._strategy_flag, "parallel_devices"): - if self._strategy_flag.parallel_devices: - if self._strategy_flag.parallel_devices[0].type == "cpu": - if self._accelerator_flag and self._accelerator_flag not in ("auto", "cpu"): - raise MisconfigurationException( - f"CPU parallel_devices set through {self._strategy_flag.__class__.__name__} class," - f" but accelerator set to {self._accelerator_flag}, please choose one device type" - ) - self._accelerator_flag = "cpu" - if self._strategy_flag.parallel_devices[0].type == "cuda": - if self._accelerator_flag and self._accelerator_flag not in ("auto", "cuda", "gpu"): - raise MisconfigurationException( - f"GPU parallel_devices set through {self._strategy_flag.__class__.__name__} class," - f" but accelerator set to {self._accelerator_flag}, please choose one device type" - ) - self._accelerator_flag = "cuda" - self._parallel_devices = self._strategy_flag.parallel_devices + if hasattr(self._strategy_flag, "parallel_devices") and self._strategy_flag.parallel_devices: + if self._strategy_flag.parallel_devices[0].type == "cpu": + if self._accelerator_flag and self._accelerator_flag not in ("auto", "cpu"): + raise MisconfigurationException( + f"CPU parallel_devices set through {self._strategy_flag.__class__.__name__} class," + f" but accelerator set to {self._accelerator_flag}, please choose one device type" + ) + self._accelerator_flag = "cpu" + if self._strategy_flag.parallel_devices[0].type == "cuda": + if self._accelerator_flag and self._accelerator_flag not in ("auto", "cuda", "gpu"): + raise MisconfigurationException( + f"GPU parallel_devices set through {self._strategy_flag.__class__.__name__} class," + f" but accelerator set to {self._accelerator_flag}, please choose one device type" + ) + self._accelerator_flag = "cuda" + self._parallel_devices = self._strategy_flag.parallel_devices def _check_device_config_and_set_final_flags( self, @@ -547,11 +546,14 @@ def _validate_precision_choice(self) -> None: f"The `TPUAccelerator` can only be used with a `TPUPrecisionPlugin`," f" found: {self._precision_plugin_flag}." ) - if isinstance(self.accelerator, HPUAccelerator): - if self._precision_flag not in ("16-mixed", "bf16-mixed", "32-true"): - raise MisconfigurationException( - f"`Trainer(accelerator='hpu', precision={self._precision_flag!r})` is not supported." - ) + if isinstance(self.accelerator, HPUAccelerator) and self._precision_flag not in ( + "16-mixed", + "bf16-mixed", + "32-true", + ): + raise MisconfigurationException( + f"`Trainer(accelerator='hpu', precision={self._precision_flag!r})` is not supported." + ) def _lazy_init_strategy(self) -> None: """Lazily set missing attributes on the previously instantiated strategy.""" diff --git a/src/lightning/pytorch/trainer/connectors/callback_connector.py b/src/lightning/pytorch/trainer/connectors/callback_connector.py index 1055353542bbf..8195a5c4a3b52 100644 --- a/src/lightning/pytorch/trainer/connectors/callback_connector.py +++ b/src/lightning/pytorch/trainer/connectors/callback_connector.py @@ -226,10 +226,11 @@ def _configure_external_callbacks() -> List[Callback]: if _PYTHON_GREATER_EQUAL_3_8_0: from importlib.metadata import entry_points - if _PYTHON_GREATER_EQUAL_3_10_0: - factories = entry_points(group=group) - else: - factories = entry_points().get(group, {}) # type: ignore[arg-type] + factories = ( + entry_points(group=group) + if _PYTHON_GREATER_EQUAL_3_10_0 + else entry_points().get(group, {}) # type: ignore[arg-type] + ) else: from pkg_resources import iter_entry_points diff --git a/src/lightning/pytorch/trainer/trainer.py b/src/lightning/pytorch/trainer/trainer.py index 1aad537e84a00..7d44f5bfedeec 100644 --- a/src/lightning/pytorch/trainer/trainer.py +++ b/src/lightning/pytorch/trainer/trainer.py @@ -1424,9 +1424,9 @@ def train_dataloader(self) -> Optional[TRAIN_DATALOADERS]: @property def val_dataloaders(self) -> Optional[EVAL_DATALOADERS]: """The validation dataloader(s) used during ``trainer.fit()`` or ``trainer.validate()``.""" - if (combined_loader := self.fit_loop.epoch_loop.val_loop._combined_loader) is not None: - return combined_loader.iterables - elif (combined_loader := self.validate_loop._combined_loader) is not None: + if (combined_loader := self.fit_loop.epoch_loop.val_loop._combined_loader) is not None or ( + combined_loader := self.validate_loop._combined_loader + ) is not None: return combined_loader.iterables @property diff --git a/src/lightning/pytorch/tuner/lr_finder.py b/src/lightning/pytorch/tuner/lr_finder.py index 3c3ee387b9ec8..dcbafa0b0da7e 100644 --- a/src/lightning/pytorch/tuner/lr_finder.py +++ b/src/lightning/pytorch/tuner/lr_finder.py @@ -403,11 +403,14 @@ def on_train_batch_end( smoothed_loss = self.avg_loss / (1 - self.beta ** (current_step + 1)) # Check if we diverging - if self.early_stop_threshold is not None: - if current_step > 1 and smoothed_loss > self.early_stop_threshold * self.best_loss: - trainer.should_stop = True # stop signal - if self.progress_bar: - self.progress_bar.close() + if ( + self.early_stop_threshold is not None + and current_step > 1 + and smoothed_loss > self.early_stop_threshold * self.best_loss + ): + trainer.should_stop = True # stop signal + if self.progress_bar: + self.progress_bar.close() trainer.should_stop = trainer.strategy.broadcast(trainer.should_stop) diff --git a/src/lightning/pytorch/utilities/deepspeed.py b/src/lightning/pytorch/utilities/deepspeed.py index 797deb60e00ac..8ef78825c9a4a 100644 --- a/src/lightning/pytorch/utilities/deepspeed.py +++ b/src/lightning/pytorch/utilities/deepspeed.py @@ -97,7 +97,7 @@ def convert_zero_checkpoint_to_fp32_state_dict( client_state = {key: value for key, value in client_state.items() if key not in deepspeed_states} # State dict keys will include reference to wrapper _LightningModuleWrapperBase # Delete `module` prefix before saving. - state_dict = {k.partition("module.")[2]: state_dict[k] for k in state_dict.keys()} + state_dict = {k.partition("module.")[2]: state_dict[k] for k in state_dict} client_state["state_dict"] = state_dict print(f"Saving fp32 state dict to {output_file}") diff --git a/src/lightning/pytorch/utilities/migration/utils.py b/src/lightning/pytorch/utilities/migration/utils.py index ee09bcd5d7d18..61517a221bc93 100644 --- a/src/lightning/pytorch/utilities/migration/utils.py +++ b/src/lightning/pytorch/utilities/migration/utils.py @@ -119,10 +119,8 @@ def _pl_migrate_checkpoint(checkpoint: _CHECKPOINT, checkpoint_path: Optional[_P # include the full upgrade command, including the path to the loaded file in the error message, # so user can copy-paste and run if they want - if not _IS_WINDOWS: # side-step bug: ValueError: path is on mount 'C:', start on mount 'D:' - path_hint = os.path.relpath(checkpoint_path, os.getcwd()) - else: - path_hint = os.path.abspath(checkpoint_path) + # side-step bug: ValueError: path is on mount 'C:', start on mount 'D:' + path_hint = os.path.relpath(checkpoint_path, os.getcwd()) if not _IS_WINDOWS else os.path.abspath(checkpoint_path) _log.info( f"Lightning automatically upgraded your loaded checkpoint from v{old_version} to v{new_version}." " To apply the upgrade to your files permanently, run" diff --git a/src/lightning/pytorch/utilities/parsing.py b/src/lightning/pytorch/utilities/parsing.py index a17d580593899..214fa7ff526c5 100644 --- a/src/lightning/pytorch/utilities/parsing.py +++ b/src/lightning/pytorch/utilities/parsing.py @@ -93,7 +93,7 @@ def _get_init_args(frame: types.FrameType) -> Tuple[Optional[Any], Dict[str, Any filtered_vars = [n for n in (self_var, args_var, kwargs_var) if n] exclude_argnames = (*filtered_vars, "__class__", "frame", "frame_args") # only collect variables that appear in the signature - local_args = {k: local_vars[k] for k in init_parameters.keys()} + local_args = {k: local_vars[k] for k in init_parameters} # kwargs_var might be None => raised an error by mypy if kwargs_var: local_args.update(local_args.get(kwargs_var, {})) diff --git a/src/lightning/store/save.py b/src/lightning/store/save.py index 637332ec124c2..22955560dc21f 100644 --- a/src/lightning/store/save.py +++ b/src/lightning/store/save.py @@ -213,24 +213,24 @@ def _make_tar(tmpdir, archive_output_path): def upload_from_file(src, dst): file_size = os.path.getsize(src) - with open(src, "rb") as fd: - with tqdm( - desc="Uploading", - total=file_size, - unit="B", - unit_scale=True, - unit_divisor=1024, - ) as t: - reader_wrapper = CallbackIOWrapper(t.update, fd, "read") - response = requests.put(dst, data=reader_wrapper) - response.raise_for_status() + with open(src, "rb") as fd, tqdm( + desc="Uploading", + total=file_size, + unit="B", + unit_scale=True, + unit_divisor=1024, + ) as t: + reader_wrapper = CallbackIOWrapper(t.update, fd, "read") + response = requests.put(dst, data=reader_wrapper) + response.raise_for_status() archive_path = f"{tmpdir}/data.tar.gz" _make_tar(tmpdir, archive_path) if progress_bar: upload_from_file(archive_path, url) else: - requests.put(url, data=open(archive_path, "rb")) + with open(archive_path, "rb") as fo: + requests.put(url, data=fo) def _download_tarfile(download_url: str, output_dir: str, progress_bar: bool) -> None: diff --git a/src/lightning/store/utils.py b/src/lightning/store/utils.py index 0aa7f6abd44f0..585550cfda76c 100644 --- a/src/lightning/store/utils.py +++ b/src/lightning/store/utils.py @@ -30,10 +30,7 @@ def _check_version(version: str) -> bool: allowed_chars = "0123456789." if version == "latest": return True - for version_char in version: - if version_char not in allowed_chars: - return False - return True + return all(version_char in allowed_chars for version_char in version) def _split_name(name: str, version: str, l_stage: stage) -> Tuple[str, str, str]: diff --git a/tests/tests_app/cli/test_cd.py b/tests/tests_app/cli/test_cd.py index ef3a9a56d4096..e105daaa81e06 100644 --- a/tests/tests_app/cli/test_cd.py +++ b/tests/tests_app/cli/test_cd.py @@ -15,11 +15,11 @@ def test_cd(monkeypatch): ls = mock.MagicMock() monkeypatch.setattr(cd, "ls", ls) - assert "/" == cd.cd("/") - assert "/" == pwd() + assert cd.cd("/") == "/" + assert pwd() == "/" ls.ls.return_value = ["hero"] - assert "/hero" == cd.cd("hero") - assert "/hero" == pwd() + assert cd.cd("hero") == "/hero" + assert pwd() == "/hero" ls.ls.return_value = ["something_else"] assert f"/hero{os.sep}something_else" == cd.cd("something_else") assert f"/hero{os.sep}something_else" == pwd() @@ -29,30 +29,30 @@ def test_cd(monkeypatch): assert f"/hero{os.sep}something_else" == cd.cd(f"..{os.sep}..") ls.ls.return_value = ["something_else"] assert f"/hero{os.sep}something_else" == pwd() - assert "/hero" == cd.cd("..") - assert "/hero" == pwd() - assert "/" == cd.cd("/") - assert "/" == pwd() + assert cd.cd("..") == "/hero" + assert pwd() == "/hero" + assert cd.cd("/") == "/" + assert pwd() == "/" ls.ls.return_value = ["a"] - assert "/a" == cd.cd("../a") - assert "/a" == pwd() + assert cd.cd("../a") == "/a" + assert pwd() == "/a" ls.ls.return_value = ["thomas"] assert f"/a{os.sep}thomas{os.sep}hello" == cd.cd(f"thomas{os.sep}hello") assert f"/a{os.sep}thomas{os.sep}hello" == pwd() ls.ls.return_value = ["thomas"] assert f"/thomas{os.sep}hello" == cd.cd(f"/thomas{os.sep}hello") assert f"/thomas{os.sep}hello" == pwd() - assert "/" == cd.cd("/") + assert cd.cd("/") == "/" ls.ls.return_value = ["name with spaces"] - assert "/name with spaces" == cd.cd("name with spaces") + assert cd.cd("name with spaces") == "/name with spaces" ls.ls.return_value = ["name with spaces 2"] - assert "/name with spaces/name with spaces 2" == cd.cd("name with spaces 2") + assert cd.cd("name with spaces 2") == "/name with spaces/name with spaces 2" os.remove(cd._CD_FILE) mock_exit = mock.MagicMock() monkeypatch.setattr(cd, "_error_and_exit", mock_exit) - assert "/" == cd.cd("/") + assert cd.cd("/") == "/" ls.ls.return_value = ["project_a"] cd.cd("project_b") assert mock_exit._mock_call_args.args[0] == "no such file or directory: /project_b" diff --git a/tests/tests_app/cli/test_cloud_cli.py b/tests/tests_app/cli/test_cloud_cli.py index 4dceed026d39e..adbc623bd69d3 100644 --- a/tests/tests_app/cli/test_cloud_cli.py +++ b/tests/tests_app/cli/test_cloud_cli.py @@ -146,9 +146,6 @@ def run(): elif isinstance(create_response, RuntimeErrorResponse2): with pytest.raises(RuntimeError, match="The source upload url is empty."): run() - elif isinstance(create_response, RuntimeErrorResponse2): - with pytest.raises(RuntimeError, match="The source upload url is empty."): - run() else: run() mocks_calls = cloud.LocalSourceCodeDir._mock_mock_calls @@ -215,8 +212,9 @@ def test_start_app_exception(message, monkeypatch, caplog): runner = CliRunner() fake_grid_rest_client = partial(FakeLightningClientException, message=message) - with caplog.at_level(logging.ERROR): - with mock.patch("lightning.app.runners.backends.cloud.LightningClient", fake_grid_rest_client): - result = runner.invoke(run_app, [_FILE_PATH, "--cloud", "--open-ui=False"], catch_exceptions=False) - assert result.exit_code == 1 + with caplog.at_level(logging.ERROR), mock.patch( + "lightning.app.runners.backends.cloud.LightningClient", fake_grid_rest_client + ): + result = runner.invoke(run_app, [_FILE_PATH, "--cloud", "--open-ui=False"], catch_exceptions=False) + assert result.exit_code == 1 assert caplog.messages == [message] diff --git a/tests/tests_app/cli/test_cmd_init.py b/tests/tests_app/cli/test_cmd_init.py index 0a4bcdd7b4fcd..07ac93fbb1096 100644 --- a/tests/tests_app/cli/test_cmd_init.py +++ b/tests/tests_app/cli/test_cmd_init.py @@ -1,3 +1,4 @@ +import contextlib import os import re import shutil @@ -31,10 +32,8 @@ def test_make_app_template(): # remove the template if there template_dir = os.path.join(os.getcwd(), template_name) - try: + with contextlib.suppress(Exception): shutil.rmtree(template_dir) - except Exception as e: # noqa - pass # create template subprocess.check_output(f"lightning init app {template_name}", shell=True) @@ -55,10 +54,8 @@ def test_make_app_template(): # TODO: verify output # clean up the template dir - try: + with contextlib.suppress(Exception): shutil.rmtree(template_dir) - except Exception as e: # noqa - pass @pytest.mark.skip(reason="need component fast_dev_run to work via CLI") @@ -68,10 +65,8 @@ def test_make_component_template(): # remove the template if there template_dir = os.path.join(os.getcwd(), template_name) - try: + with contextlib.suppress(Exception): shutil.rmtree(template_dir) - except Exception as e: # noqa - pass # create template subprocess.check_output(f"lightning init component {template_name}", shell=True) @@ -92,7 +87,5 @@ def test_make_component_template(): # TODO: verify output # clean up the template dir - try: + with contextlib.suppress(Exception): shutil.rmtree(template_dir) - except Exception as e: # noqa - pass diff --git a/tests/tests_app/cli/test_cmd_react_ui_init.py b/tests/tests_app/cli/test_cmd_react_ui_init.py index 232c582177f95..65a016cf85494 100644 --- a/tests/tests_app/cli/test_cmd_react_ui_init.py +++ b/tests/tests_app/cli/test_cmd_react_ui_init.py @@ -38,13 +38,15 @@ def test_copy_and_setup_react_ui(tmpdir): assert len(files) == 3, "should only be 3 objects: readme.md, example_app.py and ui dir" # make sure index.html has the vite app placeholder - index_content = open(dest_dir + "/ui/dist/index.html").read() + with open(dest_dir + "/ui/dist/index.html") as fo: + index_content = fo.read() assert "Vite App" in index_content # read the compiled js file js_file = [x for x in os.listdir(os.path.join(dest_dir, "ui", "dist", "assets")) if ".js" in x] js_file = os.path.join(dest_dir, f"ui/dist/assets/{js_file[0]}") - index_content = open(js_file).read() + with open(js_file) as fo: + index_content = fo.read() # if this is in the compiled file, the compilation worked and the app will work assert "Total number of prints in your terminal:" in index_content, "react app was not compiled properly" diff --git a/tests/tests_app/cli/test_cp.py b/tests/tests_app/cli/test_cp.py index dea75b1c83191..c9670dcbc00e7 100644 --- a/tests/tests_app/cli/test_cp.py +++ b/tests/tests_app/cli/test_cp.py @@ -60,11 +60,11 @@ def test_cp_local_to_remote(tmpdir, monkeypatch): monkeypatch.setattr(cp, "LightningClient", MagicMock(return_value=client)) - assert "/" == cd("/", verify=False) + assert cd("/", verify=False) == "/" cp.cp(str(tmpdir), "r:.") assert error_and_exit._mock_call_args_list[0].args[0] == "Uploading files at the project level isn't allowed yet." - assert "/project-0/app-name-0" == cd("/project-0/app-name-0", verify=False) + assert cd("/project-0/app-name-0", verify=False) == "/project-0/app-name-0" with open(f"{tmpdir}/a.txt", "w") as f: f.write("hello world !") @@ -129,11 +129,11 @@ def test_cp_cloud_to_local(tmpdir, monkeypatch): monkeypatch.setattr(cp, "LightningClient", MagicMock(return_value=client)) - assert "/" == cd("/", verify=False) + assert cd("/", verify=False) == "/" cp.cp(str(tmpdir), "r:.") assert error_and_exit._mock_call_args_list[0].args[0] == "Uploading files at the project level isn't allowed yet." - assert "/project-0/app-name-0" == cd("/project-0/app-name-0", verify=False) + assert cd("/project-0/app-name-0", verify=False) == "/project-0/app-name-0" download_file = MagicMock() monkeypatch.setattr(cp, "_download_file", download_file) @@ -164,7 +164,7 @@ def test_sanitize_path(): @pytest.mark.skipif(sys.platform == "win32", reason="not supported on windows yet") def test_cp_zip_arg_order(monkeypatch): - assert "/" == cd("/", verify=False) + assert cd("/", verify=False) == "/" error_and_exit = MagicMock() monkeypatch.setattr(cp, "_error_and_exit", error_and_exit) @@ -186,7 +186,7 @@ def test_cp_zip_src_path_too_short(monkeypatch): @pytest.mark.skipif(sys.platform == "win32", reason="not supported on windows yet") def test_cp_zip_remote_to_local_cloudspace_artifact(monkeypatch): - assert "/" == cd("/", verify=False) + assert cd("/", verify=False) == "/" token_getter = MagicMock() token_getter._get_api_token.return_value = "my-token" @@ -233,7 +233,7 @@ def test_cp_zip_remote_to_local_cloudspace_artifact(monkeypatch): @pytest.mark.skipif(sys.platform == "win32", reason="not supported on windows yet") def test_cp_zip_remote_to_local_app_artifact(monkeypatch): - assert "/" == cd("/", verify=False) + assert cd("/", verify=False) == "/" token_getter = MagicMock() token_getter._get_api_token.return_value = "my-token" diff --git a/tests/tests_app/cli/test_ls.py b/tests/tests_app/cli/test_ls.py index 3bc8650a74130..bf62ae893202b 100644 --- a/tests/tests_app/cli/test_ls.py +++ b/tests/tests_app/cli/test_ls.py @@ -90,7 +90,7 @@ def fn(*args, prefix, **kwargs): monkeypatch.setattr(ls, "LightningClient", MagicMock(return_value=client)) assert ls.ls() == ["project-0", "project-1", "project 2"] - assert "/project-0" == cd.cd("project-0", verify=False) + assert cd.cd("project-0", verify=False) == "/project-0" assert ls.ls() == ["app name 2", "app-name-0", "app-name-1"] assert f"/project-0{os.sep}app-name-1" == cd.cd("app-name-1", verify=False) @@ -102,9 +102,9 @@ def fn(*args, prefix, **kwargs): assert f"/project-0{os.sep}app-name-1{os.sep}folder_2{os.sep}folder_3" == cd.cd("folder_3", verify=False) assert ls.ls() == ["file_3.txt"] - assert "/project 2" == cd.cd("/project 2", verify=False) + assert cd.cd("/project 2", verify=False) == "/project 2" assert ls.ls() == ["app name 2", "app-name-0", "app-name-1"] - assert "/project 2/app name 2" == cd.cd("app name 2", verify=False) + assert cd.cd("app name 2", verify=False) == "/project 2/app name 2" assert ls.ls() == ["file_1.txt", "folder_1", "folder_2"] os.remove(cd._CD_FILE) diff --git a/tests/tests_app/cli/test_rm.py b/tests/tests_app/cli/test_rm.py index 2f5dae6487e54..daaf419591a98 100644 --- a/tests/tests_app/cli/test_rm.py +++ b/tests/tests_app/cli/test_rm.py @@ -93,7 +93,7 @@ def fn(*args, prefix, **kwargs): monkeypatch.setattr(ls, "LightningClient", MagicMock(return_value=client)) assert ls.ls() == ["project-0", "project-1", "project 2"] - assert "/project-0" == cd.cd("project-0", verify=False) + assert cd.cd("project-0", verify=False) == "/project-0" assert f"/project-0{os.sep}app-name-1" == cd.cd("app-name-1", verify=False) diff --git a/tests/tests_app/conftest.py b/tests/tests_app/conftest.py index f3319e7b98d66..6865f91bc5ff9 100644 --- a/tests/tests_app/conftest.py +++ b/tests/tests_app/conftest.py @@ -108,7 +108,7 @@ def caplog(caplog): for name in logging.root.manager.loggerDict if name.startswith("lightning.app") } - for name in propagation_dict.keys(): + for name in propagation_dict: logging.getLogger(name).propagate = True yield caplog diff --git a/tests/tests_app/core/test_lightning_api.py b/tests/tests_app/core/test_lightning_api.py index 9754ef916a673..d8ce61398b60f 100644 --- a/tests/tests_app/core/test_lightning_api.py +++ b/tests/tests_app/core/test_lightning_api.py @@ -1,4 +1,5 @@ import asyncio +import contextlib import logging import multiprocessing as mp import os @@ -522,7 +523,8 @@ def test_configure_api(): time_left -= 0.1 # Test Upload File - files = {"uploaded_file": open(__file__, "rb")} + with open(__file__, "rb") as fo: + files = {"uploaded_file": fo} response = requests.put(f"http://localhost:{APP_SERVER_PORT}/api/v1/upload_file/test", files=files) assert response.json() == "Successfully uploaded 'test' to the Drive" @@ -548,10 +550,8 @@ def test_configure_api(): assert response.status_code == 200 # Stop the Application - try: + with contextlib.suppress(Exception): response = requests.post(url, json=InputRequestModel(index=0, name="hello").dict()) - except Exception: - pass # Teardown time_left = 5 diff --git a/tests/tests_app/core/test_lightning_app.py b/tests/tests_app/core/test_lightning_app.py index 526ab7006d66d..428c6fb9578fe 100644 --- a/tests/tests_app/core/test_lightning_app.py +++ b/tests/tests_app/core/test_lightning_app.py @@ -81,9 +81,7 @@ def __init__(self, cache_calls: bool = True): def run(self): self.counter = self.counter + 1 - if self.cache_calls: - self.has_finished = True - elif self.counter >= 3: + if self.cache_calls or self.counter >= 3: self.has_finished = True @@ -1108,7 +1106,7 @@ def __init__(self, flow): def test_cloud_compute_binding(): cloud_compute.ENABLE_MULTIPLE_WORKS_IN_NON_DEFAULT_CONTAINER = True - assert cloud_compute._CLOUD_COMPUTE_STORE == {} + assert {} == cloud_compute._CLOUD_COMPUTE_STORE flow = FlowCC() assert len(cloud_compute._CLOUD_COMPUTE_STORE) == 2 assert cloud_compute._CLOUD_COMPUTE_STORE["default"].component_names == ["root.work_c"] @@ -1122,10 +1120,10 @@ def test_cloud_compute_binding(): assert cloud_compute._CLOUD_COMPUTE_STORE["default"].component_names == ["root.w.w.work_c"] assert cloud_compute._CLOUD_COMPUTE_STORE["a"].component_names == ["root.w.w.work_a", "root.w.w.work_b"] - assert "__cloud_compute__" == flow.state["vars"]["cloud_compute"]["type"] - assert "__cloud_compute__" == flow.work_a.state["vars"]["_cloud_compute"]["type"] - assert "__cloud_compute__" == flow.work_b.state["vars"]["_cloud_compute"]["type"] - assert "__cloud_compute__" == flow.work_c.state["vars"]["_cloud_compute"]["type"] + assert flow.state["vars"]["cloud_compute"]["type"] == "__cloud_compute__" + assert flow.work_a.state["vars"]["_cloud_compute"]["type"] == "__cloud_compute__" + assert flow.work_b.state["vars"]["_cloud_compute"]["type"] == "__cloud_compute__" + assert flow.work_c.state["vars"]["_cloud_compute"]["type"] == "__cloud_compute__" work_a_id = flow.work_a.state["vars"]["_cloud_compute"]["_internal_id"] work_b_id = flow.work_b.state["vars"]["_cloud_compute"]["_internal_id"] work_c_id = flow.work_c.state["vars"]["_cloud_compute"]["_internal_id"] diff --git a/tests/tests_app/core/test_lightning_flow.py b/tests/tests_app/core/test_lightning_flow.py index 52ef5bfe80f94..62a787ea3c508 100644 --- a/tests/tests_app/core/test_lightning_flow.py +++ b/tests/tests_app/core/test_lightning_flow.py @@ -960,6 +960,5 @@ def run(self): def test_deprecation_warning_exit(): - with pytest.raises(ExitAppException): - with pytest.warns(DeprecationWarning, match="*Use LightningFlow.stop instead"): - RootFlowReady()._exit() + with pytest.raises(ExitAppException), pytest.warns(DeprecationWarning, match="*Use LightningFlow.stop instead"): + RootFlowReady()._exit() diff --git a/tests/tests_app/core/test_lightning_work.py b/tests/tests_app/core/test_lightning_work.py index bc2b09bd5fcdf..14d87fbabf23d 100644 --- a/tests/tests_app/core/test_lightning_work.py +++ b/tests/tests_app/core/test_lightning_work.py @@ -1,3 +1,4 @@ +import contextlib from queue import Empty from re import escape from unittest.mock import MagicMock, Mock @@ -196,10 +197,8 @@ def run(self): copy_request_queue, copy_response_queue, ) - try: + with contextlib.suppress(Exception, Empty): work_runner() - except (Exception, Empty): - pass res = delta_queue._queue[0].delta.to_dict()["iterable_item_added"] L = len(delta_queue._queue) - 1 diff --git a/tests/tests_app/runners/test_cloud.py b/tests/tests_app/runners/test_cloud.py index 682fcc2d40c44..f6262bc535a6d 100644 --- a/tests/tests_app/runners/test_cloud.py +++ b/tests/tests_app/runners/test_cloud.py @@ -1,3 +1,4 @@ +import contextlib import logging import os import pathlib @@ -1916,12 +1917,8 @@ def test_print_specs(tmpdir, caplog, monkeypatch, print_format, expected): cloud.LIGHTNING_CLOUD_PRINT_SPECS = print_format try: - with caplog.at_level(logging.INFO): - try: - cloud_runtime.dispatch() - except SystemExit: - # Expected behaviour - pass + with caplog.at_level(logging.INFO), contextlib.suppress(SystemExit): + cloud_runtime.dispatch() lines = caplog.text.split("\n") diff --git a/tests/tests_app/storage/test_copier.py b/tests/tests_app/storage/test_copier.py index c7767519fdb9e..fd4e274b91f62 100644 --- a/tests/tests_app/storage/test_copier.py +++ b/tests/tests_app/storage/test_copier.py @@ -143,6 +143,7 @@ def test_copy_files_with_exception(tmpdir): pathlib.Path(src, "file.txt").touch() dst = pathlib.Path(tmpdir, "dest") - with mock.patch("lightning.app.storage.copier._filesystem", fs_mock): - with pytest.raises(ValueError, match="error from thread"): - _copy_files(src, dst) + with mock.patch("lightning.app.storage.copier._filesystem", fs_mock), pytest.raises( + ValueError, match="error from thread" + ): + _copy_files(src, dst) diff --git a/tests/tests_app/storage/test_path.py b/tests/tests_app/storage/test_path.py index d7c765b407dc9..6a452efce8163 100644 --- a/tests/tests_app/storage/test_path.py +++ b/tests/tests_app/storage/test_path.py @@ -329,7 +329,8 @@ def run(self): else: assert root.path_component.path_one._origin is None assert root.path_component.path_one._consumer is None - assert open(root.path_component.path_two).readlines() == ["Hello"] + with open(root.path_component.path_two) as fo: + assert fo.readlines() == ["Hello"] class SourceWork(LightningWork): @@ -557,9 +558,10 @@ def test_path_get_overwrite(tmpdir): def test_path_get_error_in_flow_context(): - with pytest.raises(RuntimeError, match=escape("`Path.get()` can only be called from within the `run()`")): - with _context("flow"): - Path().get() + with pytest.raises(RuntimeError, match=escape("`Path.get()` can only be called from within the `run()`")), _context( + "flow" + ): + Path().get() def test_path_response_with_exception(tmpdir): @@ -582,9 +584,10 @@ def test_path_response_with_exception(tmpdir): ) ) - with pytest.raises(RuntimeError, match="An exception was raised while trying to transfer the contents at"): - with _context("work"): - path.get() + with pytest.raises( + RuntimeError, match="An exception was raised while trying to transfer the contents at" + ), _context("work"): + path.get() def test_path_response_not_matching_reqeuest(tmpdir): diff --git a/tests/tests_app/structures/test_structures.py b/tests/tests_app/structures/test_structures.py index eddffd70908c6..f921607a50734 100644 --- a/tests/tests_app/structures/test_structures.py +++ b/tests/tests_app/structures/test_structures.py @@ -381,10 +381,7 @@ def run(self): app.root.restarting = True MultiProcessRuntime(app, start_server=False).dispatch() - if run_once_iterable: - expected_value = 1 - else: - expected_value = 1 if cache_calls else 2 + expected_value = 1 if run_once_iterable else 1 if cache_calls else 2 assert app.root.iter[0 if use_list else "0"].counter == expected_value assert app.root.iter[1 if use_list else "1"].counter == expected_value assert app.root.iter[2 if use_list else "2"].counter == expected_value diff --git a/tests/tests_app/utilities/packaging/test_cloud_compute.py b/tests/tests_app/utilities/packaging/test_cloud_compute.py index d954b2507339f..6ce4d93ae8832 100644 --- a/tests/tests_app/utilities/packaging/test_cloud_compute.py +++ b/tests/tests_app/utilities/packaging/test_cloud_compute.py @@ -60,7 +60,7 @@ def test_cloud_compute_clone(): assert len(c1_dict) == len(c2_dict) - for k in c1_dict.keys(): + for k in c1_dict: if k == "_internal_id": assert c1_dict[k] != c2_dict[k] else: diff --git a/tests/tests_app/utilities/test_port.py b/tests/tests_app/utilities/test_port.py index df173aac55de7..8b47629413a8e 100644 --- a/tests/tests_app/utilities/test_port.py +++ b/tests/tests_app/utilities/test_port.py @@ -11,7 +11,7 @@ def test_find_lit_app_port(monkeypatch): client = MagicMock() monkeypatch.setattr(port, "LightningClient", MagicMock(return_value=client)) - assert 5701 == _find_lit_app_port(5701) + assert _find_lit_app_port(5701) == 5701 resp = MagicMock() lit_app = MagicMock() @@ -42,7 +42,7 @@ def test_enable_port(monkeypatch): client = MagicMock() monkeypatch.setattr(port, "LightningClient", MagicMock(return_value=client)) - assert 5701 == _find_lit_app_port(5701) + assert _find_lit_app_port(5701) == 5701 resp = MagicMock() lit_app = MagicMock() @@ -73,7 +73,7 @@ def test_disable_port(monkeypatch): client = MagicMock() monkeypatch.setattr(port, "LightningClient", MagicMock(return_value=client)) - assert 5701 == _find_lit_app_port(5701) + assert _find_lit_app_port(5701) == 5701 resp = MagicMock() lit_app = MagicMock() diff --git a/tests/tests_app/utilities/test_proxies.py b/tests/tests_app/utilities/test_proxies.py index 83b2827768cc5..d1ac86e42dfd0 100644 --- a/tests/tests_app/utilities/test_proxies.py +++ b/tests/tests_app/utilities/test_proxies.py @@ -1,3 +1,4 @@ +import contextlib import logging import os import pathlib @@ -147,10 +148,8 @@ def get(self, timeout: int = 0): copy_request_queue, copy_response_queue, ) - try: + with contextlib.suppress(Empty, Exception): work_runner() - except (Empty, Exception): - pass assert readiness_queue._queue[0] if parallel: @@ -217,9 +216,8 @@ def __init__(self): proxy_run = ProxyWorkRun(work.run, "some", work, Mock()) warn_ctx = pytest.warns(UserWarning, match="You passed a the value") if warning_expected else pytest.warns(None) - with warn_ctx as record: - with pytest.raises(CacheMissException): - proxy_run(path) + with warn_ctx as record, pytest.raises(CacheMissException): + proxy_run(path) assert warning_expected or all("You passed a the value" not in str(msg.message) for msg in record) @@ -349,10 +347,8 @@ def run(self, *args, **kwargs): copy_response_queue=_MockQueue(), ) - try: + with contextlib.suppress(ExitAppException): runner() - except ExitAppException: - pass path1.exists_remote.assert_called_once() path1.get.assert_not_called() @@ -440,10 +436,8 @@ def run(self): copy_response_queue=_MockQueue(), ) - try: + with contextlib.suppress(ExitAppException): runner() - except ExitAppException: - pass assert path_mock.get.call_count == expected_get @@ -638,9 +632,8 @@ def run(self): self.w.counter = 0 self.w.run("") self.counter = 2 - elif self.counter == 2: - if len(self.w.vars) == 10 and self.w.counter == 10: - self.stop() + elif self.counter == 2 and len(self.w.vars) == 10 and self.w.counter == 10: + self.stop() def test_state_observer(): @@ -700,10 +693,8 @@ def run(self): work_runner.setup() # The internal ip address only becomes available once the hardware is up / the work is running. assert work.internal_ip == "" - try: + with contextlib.suppress(Empty): work_runner.run_once() - except Empty: - pass assert work.internal_ip == expected_ip_addr diff --git a/tests/tests_fabric/plugins/environments/test_slurm.py b/tests/tests_fabric/plugins/environments/test_slurm.py index c80e2f9b276ed..b3ee02845d8dd 100644 --- a/tests/tests_fabric/plugins/environments/test_slurm.py +++ b/tests/tests_fabric/plugins/environments/test_slurm.py @@ -148,6 +148,7 @@ def test_srun_variable_validation(): """Test that we raise useful errors when `srun` variables are misconfigured.""" with mock.patch.dict(os.environ, {"SLURM_NTASKS": "1"}): SLURMEnvironment() - with mock.patch.dict(os.environ, {"SLURM_NTASKS": "2"}): - with pytest.raises(RuntimeError, match="You set `--ntasks=2` in your SLURM"): - SLURMEnvironment() + with mock.patch.dict(os.environ, {"SLURM_NTASKS": "2"}), pytest.raises( + RuntimeError, match="You set `--ntasks=2` in your SLURM" + ): + SLURMEnvironment() diff --git a/tests/tests_fabric/strategies/test_ddp.py b/tests/tests_fabric/strategies/test_ddp.py index f39c324d7b392..4ea9a151ea2b8 100644 --- a/tests/tests_fabric/strategies/test_ddp.py +++ b/tests/tests_fabric/strategies/test_ddp.py @@ -56,9 +56,8 @@ def test_ddp_no_backward_sync(): with pytest.raises( TypeError, match="is only possible if the module passed to .* is wrapped in `DistributedDataParallel`" - ): - with strategy._backward_sync_control.no_backward_sync(Mock()): - pass + ), strategy._backward_sync_control.no_backward_sync(Mock()): + pass module = MagicMock(spec=DistributedDataParallel) with strategy._backward_sync_control.no_backward_sync(module): @@ -120,9 +119,6 @@ def __instancecheck__(self, instance): ) @RunIf(standalone=True) def test_ddp_grad_clipping(clip_type, accelerator, precision): - if clip_type == "norm": - clipping_test_cls = _MyFabricGradNorm - else: - clipping_test_cls = _MyFabricGradVal + clipping_test_cls = _MyFabricGradNorm if clip_type == "norm" else _MyFabricGradVal fabric = clipping_test_cls(accelerator=accelerator, devices=2, precision=precision, strategy="ddp") fabric.run() diff --git a/tests/tests_fabric/strategies/test_dp.py b/tests/tests_fabric/strategies/test_dp.py index 9f76c43d252ce..e06834a31a283 100644 --- a/tests/tests_fabric/strategies/test_dp.py +++ b/tests/tests_fabric/strategies/test_dp.py @@ -84,9 +84,6 @@ def __instancecheck__(self, instance): @pytest.mark.parametrize("clip_type", ["norm", "val"]) @RunIf(min_cuda_gpus=2) def test_dp_grad_clipping(clip_type, precision): - if clip_type == "norm": - clipping_test_cls = _MyFabricGradNorm - else: - clipping_test_cls = _MyFabricGradVal + clipping_test_cls = _MyFabricGradNorm if clip_type == "norm" else _MyFabricGradVal fabric = clipping_test_cls(accelerator="cuda", devices=2, precision=precision, strategy="dp") fabric.run() diff --git a/tests/tests_fabric/strategies/test_fsdp.py b/tests/tests_fabric/strategies/test_fsdp.py index 24d9e243ed206..f0013aa4fba08 100644 --- a/tests/tests_fabric/strategies/test_fsdp.py +++ b/tests/tests_fabric/strategies/test_fsdp.py @@ -82,9 +82,8 @@ def test_fsdp_no_backward_sync(): with pytest.raises( TypeError, match="is only possible if the module passed to .* is wrapped in `FullyShardedDataParallel`" - ): - with strategy._backward_sync_control.no_backward_sync(Mock()): - pass + ), strategy._backward_sync_control.no_backward_sync(Mock()): + pass module = MagicMock(spec=FullyShardedDataParallel) with strategy._backward_sync_control.no_backward_sync(module): diff --git a/tests/tests_fabric/strategies/test_single_device.py b/tests/tests_fabric/strategies/test_single_device.py index a2240c2004d8b..005de6bbd66ef 100644 --- a/tests/tests_fabric/strategies/test_single_device.py +++ b/tests/tests_fabric/strategies/test_single_device.py @@ -144,9 +144,6 @@ def run(self): ) @pytest.mark.parametrize("clip_type", ["norm", "val"]) def test_single_device_grad_clipping(clip_type, precision): - if clip_type == "norm": - clipping_test_cls = _MyFabricGradNorm - else: - clipping_test_cls = _MyFabricGradVal + clipping_test_cls = _MyFabricGradNorm if clip_type == "norm" else _MyFabricGradVal fabric = clipping_test_cls(accelerator="auto", devices=1, precision=precision) fabric.run() diff --git a/tests/tests_fabric/test_connector.py b/tests/tests_fabric/test_connector.py index 4f8b2995a105e..7c231fedeba62 100644 --- a/tests/tests_fabric/test_connector.py +++ b/tests/tests_fabric/test_connector.py @@ -837,25 +837,30 @@ def test_devices_from_environment(*_): def test_arguments_from_environment_collision(): """Test that the connector raises an error when the CLI settings conflict with settings in the code.""" - with mock.patch.dict(os.environ, {"LT_ACCELERATOR": "cpu"}): - with pytest.raises(ValueError, match="`Fabric\\(accelerator='cuda', ...\\)` but .* `--accelerator=cpu`"): - _Connector(accelerator="cuda") + with mock.patch.dict(os.environ, {"LT_ACCELERATOR": "cpu"}), pytest.raises( + ValueError, match="`Fabric\\(accelerator='cuda', ...\\)` but .* `--accelerator=cpu`" + ): + _Connector(accelerator="cuda") - with mock.patch.dict(os.environ, {"LT_STRATEGY": "ddp"}): - with pytest.raises(ValueError, match="`Fabric\\(strategy='ddp_spawn', ...\\)` but .* `--strategy=ddp`"): - _Connector(strategy="ddp_spawn") + with mock.patch.dict(os.environ, {"LT_STRATEGY": "ddp"}), pytest.raises( + ValueError, match="`Fabric\\(strategy='ddp_spawn', ...\\)` but .* `--strategy=ddp`" + ): + _Connector(strategy="ddp_spawn") - with mock.patch.dict(os.environ, {"LT_DEVICES": "2"}): - with pytest.raises(ValueError, match="`Fabric\\(devices=3, ...\\)` but .* `--devices=2`"): - _Connector(devices=3) + with mock.patch.dict(os.environ, {"LT_DEVICES": "2"}), pytest.raises( + ValueError, match="`Fabric\\(devices=3, ...\\)` but .* `--devices=2`" + ): + _Connector(devices=3) - with mock.patch.dict(os.environ, {"LT_NUM_NODES": "3"}): - with pytest.raises(ValueError, match="`Fabric\\(num_nodes=2, ...\\)` but .* `--num_nodes=3`"): - _Connector(num_nodes=2) + with mock.patch.dict(os.environ, {"LT_NUM_NODES": "3"}), pytest.raises( + ValueError, match="`Fabric\\(num_nodes=2, ...\\)` but .* `--num_nodes=3`" + ): + _Connector(num_nodes=2) - with mock.patch.dict(os.environ, {"LT_PRECISION": "16-mixed"}): - with pytest.raises(ValueError, match="`Fabric\\(precision='64-true', ...\\)` but .* `--precision=16-mixed`"): - _Connector(precision="64-true") + with mock.patch.dict(os.environ, {"LT_PRECISION": "16-mixed"}), pytest.raises( + ValueError, match="`Fabric\\(precision='64-true', ...\\)` but .* `--precision=16-mixed`" + ): + _Connector(precision="64-true") @RunIf(min_torch="1.12") diff --git a/tests/tests_fabric/test_fabric.py b/tests/tests_fabric/test_fabric.py index e1776f109701e..95c3a4db81e84 100644 --- a/tests/tests_fabric/test_fabric.py +++ b/tests/tests_fabric/test_fabric.py @@ -610,17 +610,17 @@ def test_no_backward_sync(): """Test that `Fabric.no_backward_sync()` validates the strategy and model is compatible.""" fabric = Fabric() model = nn.Linear(3, 3) - with pytest.raises(TypeError, match="You need to set up the model first"): - with fabric.no_backward_sync(model): - pass + with pytest.raises(TypeError, match="You need to set up the model first"), fabric.no_backward_sync(model): + pass model = fabric.setup(model) # pretend that the strategy does not support skipping backward sync fabric._strategy = Mock(spec=ParallelStrategy, _backward_sync_control=None) - with pytest.warns(PossibleUserWarning, match="The `ParallelStrategy` does not support skipping the"): - with fabric.no_backward_sync(model): - pass + with pytest.warns( + PossibleUserWarning, match="The `ParallelStrategy` does not support skipping the" + ), fabric.no_backward_sync(model): + pass # for single-device strategies, it becomes a no-op without warning fabric._strategy = Mock(spec=SingleDeviceStrategy, _backward_sync_control=MagicMock()) diff --git a/tests/tests_fabric/utilities/test_data.py b/tests/tests_fabric/utilities/test_data.py index 09c9fc665efd2..542433e3e74cc 100644 --- a/tests/tests_fabric/utilities/test_data.py +++ b/tests/tests_fabric/utilities/test_data.py @@ -1,3 +1,4 @@ +import contextlib import random from unittest.mock import Mock @@ -270,10 +271,8 @@ def __setattr__(self, attr, val): dataloader.my_arg = 10 dataloader.another_arg = 100 del dataloader.dataset - try: + with contextlib.suppress(AttributeError): del dataloader.abc_arg - except AttributeError: - pass assert dataloader.__pl_saved_args == (range(10),) assert dataloader.__pl_saved_kwargs == {} diff --git a/tests/tests_pytorch/accelerators/test_cpu.py b/tests/tests_pytorch/accelerators/test_cpu.py index 0bbae1846754f..e724652a076ed 100644 --- a/tests/tests_pytorch/accelerators/test_cpu.py +++ b/tests/tests_pytorch/accelerators/test_cpu.py @@ -34,7 +34,7 @@ def test_get_device_stats(tmpdir): fields = ["cpu_vm_percent", "cpu_percent", "cpu_swap_percent"] for f in fields: - assert any(f in h for h in gpu_stats.keys()) + assert any(f in h for h in gpu_stats) @pytest.mark.parametrize("restore_after_pre_setup", [True, False]) diff --git a/tests/tests_pytorch/accelerators/test_gpu.py b/tests/tests_pytorch/accelerators/test_gpu.py index 7da631c2649fd..bd89406f0cb83 100644 --- a/tests/tests_pytorch/accelerators/test_gpu.py +++ b/tests/tests_pytorch/accelerators/test_gpu.py @@ -30,7 +30,7 @@ def test_get_torch_gpu_stats(): fields = ["allocated_bytes.all.freed", "inactive_split.all.peak", "reserved_bytes.large_pool.peak"] for f in fields: - assert any(f in h for h in gpu_stats.keys()) + assert any(f in h for h in gpu_stats) @RunIf(min_cuda_gpus=1) @@ -40,7 +40,7 @@ def test_get_nvidia_gpu_stats(): fields = ["utilization.gpu", "memory.used", "memory.free", "utilization.memory"] for f in fields: - assert any(f in h for h in gpu_stats.keys()) + assert any(f in h for h in gpu_stats) @RunIf(min_cuda_gpus=1) diff --git a/tests/tests_pytorch/accelerators/test_hpu.py b/tests/tests_pytorch/accelerators/test_hpu.py index 652e229b2d357..beca536912c83 100644 --- a/tests/tests_pytorch/accelerators/test_hpu.py +++ b/tests/tests_pytorch/accelerators/test_hpu.py @@ -315,4 +315,4 @@ def test_hpu_device_stats_monitor(): "TotalActiveAllocs", ] for f in fields: - assert any(f in h for h in hpu_stats.keys()) + assert any(f in h for h in hpu_stats) diff --git a/tests/tests_pytorch/accelerators/test_mps.py b/tests/tests_pytorch/accelerators/test_mps.py index 6e2f994d1309f..73453272b08d7 100644 --- a/tests/tests_pytorch/accelerators/test_mps.py +++ b/tests/tests_pytorch/accelerators/test_mps.py @@ -31,7 +31,7 @@ def test_get_mps_stats(): fields = ["M1_vm_percent", "M1_percent", "M1_swap_percent"] for f in fields: - assert any(f in h for h in device_stats.keys()) + assert any(f in h for h in device_stats) @RunIf(mps=True) diff --git a/tests/tests_pytorch/callbacks/progress/test_tqdm_progress_bar.py b/tests/tests_pytorch/callbacks/progress/test_tqdm_progress_bar.py index 9e56dcc7ff381..51f1e086877fb 100644 --- a/tests/tests_pytorch/callbacks/progress/test_tqdm_progress_bar.py +++ b/tests/tests_pytorch/callbacks/progress/test_tqdm_progress_bar.py @@ -211,24 +211,24 @@ def test_tqdm_progress_bar_fast_dev_run(tmpdir): pbar = trainer.progress_bar_callback - assert 1 == pbar.val_progress_bar.n - assert 1 == pbar.val_progress_bar.total + assert pbar.val_progress_bar.n == 1 + assert pbar.val_progress_bar.total == 1 # the train progress bar should display 1 batch - assert 1 == pbar.train_progress_bar.total - assert 1 == pbar.train_progress_bar.n + assert pbar.train_progress_bar.total == 1 + assert pbar.train_progress_bar.n == 1 trainer.validate(model) # the validation progress bar should display 1 batch - assert 1 == pbar.val_progress_bar.total - assert 1 == pbar.val_progress_bar.n + assert pbar.val_progress_bar.total == 1 + assert pbar.val_progress_bar.n == 1 trainer.test(model) # the test progress bar should display 1 batch - assert 1 == pbar.test_progress_bar.total - assert 1 == pbar.test_progress_bar.n + assert pbar.test_progress_bar.total == 1 + assert pbar.test_progress_bar.n == 1 @pytest.mark.parametrize("refresh_rate", [0, 1, 50]) diff --git a/tests/tests_pytorch/callbacks/test_early_stopping.py b/tests/tests_pytorch/callbacks/test_early_stopping.py index 888674860e1de..d991a6704308a 100644 --- a/tests/tests_pytorch/callbacks/test_early_stopping.py +++ b/tests/tests_pytorch/callbacks/test_early_stopping.py @@ -85,7 +85,7 @@ def test_resume_early_stopping_from_checkpoint(tmpdir): checkpoint = torch.load(checkpoint_filepath) # the checkpoint saves "epoch + 1" early_stop_callback_state = early_stop_callback.saved_states[checkpoint["epoch"]] - assert 4 == len(early_stop_callback.saved_states) + assert len(early_stop_callback.saved_states) == 4 es_name = "EarlyStoppingTestRestore{'monitor': 'train_loss', 'mode': 'min'}" assert checkpoint["callbacks"][es_name] == early_stop_callback_state diff --git a/tests/tests_pytorch/checkpointing/test_model_checkpoint.py b/tests/tests_pytorch/checkpointing/test_model_checkpoint.py index 2a2f9d550d415..173a5578cde58 100644 --- a/tests/tests_pytorch/checkpointing/test_model_checkpoint.py +++ b/tests/tests_pytorch/checkpointing/test_model_checkpoint.py @@ -324,7 +324,8 @@ def test_model_checkpoint_to_yaml(tmpdir, save_top_k: int): path_yaml = os.path.join(tmpdir, "best_k_models.yaml") checkpoint.to_yaml(path_yaml) - d = yaml.full_load(open(path_yaml)) + with open(path_yaml) as fo: + d = yaml.full_load(fo) best_k = dict(checkpoint.best_k_models.items()) assert d == best_k diff --git a/tests/tests_pytorch/conftest.py b/tests/tests_pytorch/conftest.py index 3792528e222e5..13446f3af74d9 100644 --- a/tests/tests_pytorch/conftest.py +++ b/tests/tests_pytorch/conftest.py @@ -223,7 +223,7 @@ def caplog(caplog): for name in logging.root.manager.loggerDict if name.startswith("lightning.pytorch") } - for name in propagation_dict.keys(): + for name in propagation_dict: logging.getLogger(name).propagate = True yield caplog diff --git a/tests/tests_pytorch/graveyard/test_legacy_import_unpickler.py b/tests/tests_pytorch/graveyard/test_legacy_import_unpickler.py index 43450dddcaaff..429859b69bee1 100644 --- a/tests/tests_pytorch/graveyard/test_legacy_import_unpickler.py +++ b/tests/tests_pytorch/graveyard/test_legacy_import_unpickler.py @@ -21,7 +21,7 @@ ) def test_imports_standalone(pl_version: str): assert any( - key.startswith("pytorch_lightning") for key in sys.modules.keys() + key.startswith("pytorch_lightning") for key in sys.modules ), f"Imported PL, so it has to be in sys.modules: {sorted(sys.modules.keys())}" path_legacy = os.path.join(LEGACY_CHECKPOINTS_PATH, pl_version) path_ckpts = sorted(glob.glob(os.path.join(path_legacy, f"*{CHECKPOINT_EXTENSION}"))) @@ -32,10 +32,10 @@ def test_imports_standalone(pl_version: str): torch.load(path_ckpt) assert any( - key.startswith("pytorch_lightning") for key in sys.modules.keys() + key.startswith("pytorch_lightning") for key in sys.modules ), f"Imported PL, so it has to be in sys.modules: {sorted(sys.modules.keys())}" assert not any( - key.startswith("lightning." + "pytorch") for key in sys.modules.keys() + key.startswith("lightning." + "pytorch") for key in sys.modules ), f"Did not import the unified package, so it should not be in sys.modules: {sorted(sys.modules.keys())}" @@ -43,10 +43,10 @@ def test_imports_standalone(pl_version: str): @pytest.mark.skipif(not module_available("lightning"), reason="This test is ONLY relevant for the UNIFIED package") def test_imports_unified(pl_version: str): assert any( - key.startswith("lightning." + "pytorch") for key in sys.modules.keys() + key.startswith("lightning." + "pytorch") for key in sys.modules ), f"Imported unified package, so it has to be in sys.modules: {sorted(sys.modules.keys())}" assert not any( - key.startswith("pytorch_lightning") for key in sys.modules.keys() + key.startswith("pytorch_lightning") for key in sys.modules ), "Should not import standalone package, all imports should be redirected to the unified package" path_legacy = os.path.join(LEGACY_CHECKPOINTS_PATH, pl_version) @@ -63,8 +63,8 @@ def test_imports_unified(pl_version: str): torch.load(path_ckpt) assert any( - key.startswith("lightning." + "pytorch") for key in sys.modules.keys() + key.startswith("lightning." + "pytorch") for key in sys.modules ), f"Imported unified package, so it has to be in sys.modules: {sorted(sys.modules.keys())}" assert not any( - key.startswith("pytorch_lightning") for key in sys.modules.keys() + key.startswith("pytorch_lightning") for key in sys.modules ), "Should not import standalone package, all imports should be redirected to the unified package" diff --git a/tests/tests_pytorch/helpers/utils.py b/tests/tests_pytorch/helpers/utils.py index 7420175e85692..97676d5ad093a 100644 --- a/tests/tests_pytorch/helpers/utils.py +++ b/tests/tests_pytorch/helpers/utils.py @@ -34,10 +34,7 @@ def get_data_path(expt_logger, path_dir=None): # the other experiments... if not path_dir: - if hasattr(expt_logger, "save_dir") and expt_logger.save_dir: - path_dir = expt_logger.save_dir - else: - path_dir = _TEMP_PATH + path_dir = expt_logger.save_dir if hasattr(expt_logger, "save_dir") and expt_logger.save_dir else _TEMP_PATH path_expt = os.path.join(path_dir, name, "version_%s" % version) # try if the new sub-folder exists, typical case for test-tube diff --git a/tests/tests_pytorch/loggers/test_comet.py b/tests/tests_pytorch/loggers/test_comet.py index f85ecd5ebb006..b6ca1338210ef 100644 --- a/tests/tests_pytorch/loggers/test_comet.py +++ b/tests/tests_pytorch/loggers/test_comet.py @@ -112,14 +112,15 @@ def save_os_environ(*args, **kwargs): return DEFAULT # Test api_key given - with patch.dict(os.environ, {"COMET_EXPERIMENT_KEY": experiment_key}): - with patch("lightning.pytorch.loggers.comet.CometExperiment", side_effect=save_os_environ) as comet_experiment: - logger = CometLogger(api_key=api_key) - assert logger.version == experiment_key - assert logger._experiment is None - - _ = logger.experiment - comet_experiment.assert_called_once_with(api_key=api_key, project_name=None) + with patch.dict(os.environ, {"COMET_EXPERIMENT_KEY": experiment_key}), patch( + "lightning.pytorch.loggers.comet.CometExperiment", side_effect=save_os_environ + ) as comet_experiment: + logger = CometLogger(api_key=api_key) + assert logger.version == experiment_key + assert logger._experiment is None + + _ = logger.experiment + comet_experiment.assert_called_once_with(api_key=api_key, project_name=None) assert instantiation_environ["COMET_EXPERIMENT_KEY"] == experiment_key diff --git a/tests/tests_pytorch/loggers/test_tensorboard.py b/tests/tests_pytorch/loggers/test_tensorboard.py index b33445d99e2d6..c3f00d7e6fef0 100644 --- a/tests/tests_pytorch/loggers/test_tensorboard.py +++ b/tests/tests_pytorch/loggers/test_tensorboard.py @@ -141,8 +141,8 @@ def name(self): # test env var (`$`) handling test_env_dir = "some_directory" - os.environ["test_env_dir"] = test_env_dir - save_dir = "$test_env_dir/tmp" + os.environ["TEST_ENV_DIR"] = test_env_dir + save_dir = "$TEST_ENV_DIR/tmp" explicit_save_dir = f"{test_env_dir}/tmp" logger = TestLogger(save_dir, sub_dir="sub_dir") trainer = Trainer(**trainer_args, logger=logger) @@ -249,9 +249,8 @@ def __init__(self): def training_step(self, *args): self.log("foo", 1, on_step=True, on_epoch=True) - if not self.trainer.fit_loop._should_accumulate(): - if self.trainer._logger_connector.should_update_logs: - self.indexes.append(self.trainer.global_step) + if not self.trainer.fit_loop._should_accumulate() and self.trainer._logger_connector.should_update_logs: + self.indexes.append(self.trainer.global_step) return super().training_step(*args) model = TestModel() diff --git a/tests/tests_pytorch/loops/test_training_loop_flow_scalar.py b/tests/tests_pytorch/loops/test_training_loop_flow_scalar.py index 574893199d328..b6529f2614301 100644 --- a/tests/tests_pytorch/loops/test_training_loop_flow_scalar.py +++ b/tests/tests_pytorch/loops/test_training_loop_flow_scalar.py @@ -217,10 +217,7 @@ def __init__(self): self.counter = 0 def collate_none_when_even(self, batch): - if self.counter % 2 == 0: - result = None - else: - result = default_collate(batch) + result = None if self.counter % 2 == 0 else default_collate(batch) self.counter += 1 return result diff --git a/tests/tests_pytorch/models/test_hooks.py b/tests/tests_pytorch/models/test_hooks.py index 1297eb4a62d6c..968f45a4036da 100644 --- a/tests/tests_pytorch/models/test_hooks.py +++ b/tests/tests_pytorch/models/test_hooks.py @@ -58,13 +58,13 @@ def on_before_zero_grad(self, optimizer): model = CurrentTestModel() trainer = Trainer(default_root_dir=tmpdir, max_steps=max_steps, max_epochs=2) - assert 0 == model.on_before_zero_grad_called + assert model.on_before_zero_grad_called == 0 trainer.fit(model) assert max_steps == model.on_before_zero_grad_called model.on_before_zero_grad_called = 0 trainer.test(model) - assert 0 == model.on_before_zero_grad_called + assert model.on_before_zero_grad_called == 0 def test_on_train_epoch_end_metrics_collection(tmpdir): diff --git a/tests/tests_pytorch/models/test_restore.py b/tests/tests_pytorch/models/test_restore.py index 21ed0949f5531..05d503df57c53 100644 --- a/tests/tests_pytorch/models/test_restore.py +++ b/tests/tests_pytorch/models/test_restore.py @@ -142,7 +142,7 @@ def _is_equal(self, a, b): return torch.all(torch.eq(a, b)) if isinstance(a, Mapping): - return all(self._is_equal(a.get(k, None), b.get(k, None)) for k in b.keys()) + return all(self._is_equal(a.get(k, None), b.get(k, None)) for k in b) return a == b @@ -463,7 +463,7 @@ def test_load_model_from_checkpoint(tmpdir, model_template): # Since `BoringModel` has `_save_hparams = True` by default, check that ckpt has hparams ckpt = torch.load(last_checkpoint) - assert model_template.CHECKPOINT_HYPER_PARAMS_KEY in ckpt.keys(), "hyper_parameters missing from checkpoints" + assert model_template.CHECKPOINT_HYPER_PARAMS_KEY in ckpt, "hyper_parameters missing from checkpoints" # Ensure that model can be correctly restored from checkpoint pretrained_model = model_template.load_from_checkpoint(last_checkpoint) diff --git a/tests/tests_pytorch/profilers/test_profiler.py b/tests/tests_pytorch/profilers/test_profiler.py index 5ec89e91f3679..477775d80b93c 100644 --- a/tests/tests_pytorch/profilers/test_profiler.py +++ b/tests/tests_pytorch/profilers/test_profiler.py @@ -510,9 +510,8 @@ def __init__(self): model = model.cuda() input = input.cuda() - with pytorch_profiler.profile("a"): - with RegisterRecordFunction(model): - model(input) + with pytorch_profiler.profile("a"), RegisterRecordFunction(model): + model(input) pytorch_profiler.describe() event_names = [e.name for e in pytorch_profiler.function_events] diff --git a/tests/tests_pytorch/strategies/test_deepspeed_strategy.py b/tests/tests_pytorch/strategies/test_deepspeed_strategy.py index caab8fe351c5e..359ac7a2a8211 100644 --- a/tests/tests_pytorch/strategies/test_deepspeed_strategy.py +++ b/tests/tests_pytorch/strategies/test_deepspeed_strategy.py @@ -686,10 +686,7 @@ def test_deepspeed_multigpu_stage_3_manual_optimization(tmpdir, deepspeed_config @pytest.mark.parametrize(("accumulate_grad_batches", "automatic_optimization"), [(1, False), (2, True)]) @RunIf(min_cuda_gpus=2, standalone=True, deepspeed=True, sklearn=True) def test_deepspeed_multigpu_stage_3_checkpointing(tmpdir, automatic_optimization, accumulate_grad_batches): - if automatic_optimization: - model = ModelParallelClassificationModel() - else: - model = ManualModelParallelClassificationModel() + model = ModelParallelClassificationModel() if automatic_optimization else ManualModelParallelClassificationModel() dm = ClassifDataModule() ck = ModelCheckpoint(monitor="val_acc", mode="max", save_last=True, save_top_k=-1) trainer = Trainer( @@ -710,10 +707,7 @@ def test_deepspeed_multigpu_stage_3_checkpointing(tmpdir, automatic_optimization saved_results = trainer.test(ckpt_path=ck.best_model_path, datamodule=dm) assert saved_results == results - if automatic_optimization: - model = ModelParallelClassificationModel() - else: - model = ManualModelParallelClassificationModel() + model = ModelParallelClassificationModel() if automatic_optimization else ManualModelParallelClassificationModel() trainer = Trainer( default_root_dir=tmpdir, accelerator="gpu", diff --git a/tests/tests_pytorch/test_cli.py b/tests/tests_pytorch/test_cli.py index 7d42a0c6987a6..b307dad85216c 100644 --- a/tests/tests_pytorch/test_cli.py +++ b/tests/tests_pytorch/test_cli.py @@ -366,7 +366,7 @@ def test_lightning_cli_help(): assert "--data.help" in out skip_params = {"self"} - for param in inspect.signature(Trainer.__init__).parameters.keys(): + for param in inspect.signature(Trainer.__init__).parameters: if param not in skip_params: assert f"--trainer.{param}" in out diff --git a/tests/tests_pytorch/trainer/logging_/test_logger_connector.py b/tests/tests_pytorch/trainer/logging_/test_logger_connector.py index 2c889fd4cb5ca..292c363dba391 100644 --- a/tests/tests_pytorch/trainer/logging_/test_logger_connector.py +++ b/tests/tests_pytorch/trainer/logging_/test_logger_connector.py @@ -274,10 +274,7 @@ def val_dataloader(self): def validation_step(self, *args, **kwargs): output = super().validation_step(*args[:-1], **kwargs) - if add_dataloader_idx: - name = "val_loss" - else: - name = f"val_loss_custom_naming_{args[-1]}" + name = "val_loss" if add_dataloader_idx else f"val_loss_custom_naming_{args[-1]}" self.log(name, output["x"], add_dataloader_idx=add_dataloader_idx) return output diff --git a/tests/tests_pytorch/trainer/optimization/test_manual_optimization.py b/tests/tests_pytorch/trainer/optimization/test_manual_optimization.py index 5fa2d1bcdf223..e8c4095f04cc0 100644 --- a/tests/tests_pytorch/trainer/optimization/test_manual_optimization.py +++ b/tests/tests_pytorch/trainer/optimization/test_manual_optimization.py @@ -12,6 +12,7 @@ # See the License for the specific language governing permissions and # limitations under the License. import collections +import contextlib from copy import deepcopy from unittest import mock from unittest.mock import ANY, call, patch @@ -187,12 +188,10 @@ def on_train_batch_end(self, *_): self.called["on_train_batch_end"] += 1 after_before = self.layer.weight.clone() if self.should_update: - try: + # TODO: Figure out why 1 every 3 runs, weights don't get updated on count = 4" + with contextlib.suppress(Exception): + # todo: specify the possible exception assert not torch.equal(self.weight_before, after_before), self.count - # todo: specify the possible exception - except Exception: - # TODO: Figure out why 1 every 3 runs, weights don't get updated on count = 4" - pass else: try: assert torch.equal(self.weight_before, after_before) diff --git a/tests/tests_pytorch/trainer/test_trainer.py b/tests/tests_pytorch/trainer/test_trainer.py index e4aae715571e3..497cb685c4362 100644 --- a/tests/tests_pytorch/trainer/test_trainer.py +++ b/tests/tests_pytorch/trainer/test_trainer.py @@ -104,7 +104,7 @@ def __init__(self, lr=1e-2): # assert ckpt has hparams ckpt = torch.load(new_weights_path) - assert LightningModule.CHECKPOINT_HYPER_PARAMS_KEY in ckpt.keys(), "hyper_parameters missing from checkpoints" + assert LightningModule.CHECKPOINT_HYPER_PARAMS_KEY in ckpt, "hyper_parameters missing from checkpoints" # load new model hparams_path = tutils.get_data_path(logger, path_dir=tmpdir) @@ -306,7 +306,7 @@ def test_model_checkpoint_options(tmpdir, save_top_k, save_last, expected_files) """Test ModelCheckpoint options.""" def mock_save_function(filepath, *args): - open(filepath, "a").close() + open(filepath, "a").close() # noqa: SIM115 # simulated losses losses = [10, 9, 2.8, 5, 2.5] @@ -906,7 +906,7 @@ def training_step(self, *args, **kwargs): after_state_dict = model.state_dict() - for key in before_state_dict.keys(): + for key in before_state_dict: assert torch.all(torch.eq(before_state_dict[key], after_state_dict[key])) # check that limit_train_batches=0 turns off training @@ -924,7 +924,7 @@ def training_step(self, *args, **kwargs): after_state_dict = model.state_dict() - for key in before_state_dict.keys(): + for key in before_state_dict: assert not torch.all(torch.eq(before_state_dict[key], after_state_dict[key])) assert trainer.state.finished, f"Training failed with {trainer.state}" @@ -1322,10 +1322,7 @@ def predict( with pytest.raises(ProcessRaisedException, match="`return_predictions` should be set to `False`"): trainer.predict(model, datamodule=dm, return_predictions=True) - if datamodule: - results = trainer.predict(model, datamodule=dm) - else: - results = trainer.predict(model, dataloaders=dataloaders) + results = trainer.predict(model, datamodule=dm) if datamodule else trainer.predict(model, dataloaders=dataloaders) if not isinstance(trainer.strategy.launcher, _MultiProcessingLauncher): if use_callbacks: @@ -1856,11 +1853,10 @@ def training_step(self, batch, batch_idx): model = NanModel() trainer = Trainer(default_root_dir=tmpdir, detect_anomaly=True) - with pytest.raises(RuntimeError, match=r"returned nan values in its 0th output."): - with pytest.warns( - UserWarning, match=r".*Error detected in.* Traceback of forward call that caused the error.*" - ): - trainer.fit(model) + with pytest.raises(RuntimeError, match=r"returned nan values in its 0th output."), pytest.warns( + UserWarning, match=r".*Error detected in.* Traceback of forward call that caused the error.*" + ): + trainer.fit(model) @pytest.mark.parametrize( @@ -2045,7 +2041,8 @@ def on_fit_start(self): raise exception trainer = Trainer() - with mock.patch("lightning.pytorch.strategies.strategy.Strategy.on_exception") as on_exception_mock: - with suppress(Exception): - trainer.fit(ExceptionModel()) + with mock.patch("lightning.pytorch.strategies.strategy.Strategy.on_exception") as on_exception_mock, suppress( + Exception + ): + trainer.fit(ExceptionModel()) on_exception_mock.assert_called_once_with(exception) diff --git a/tests/tests_pytorch/tuner/test_lr_finder.py b/tests/tests_pytorch/tuner/test_lr_finder.py index e1670b9f29c7d..c8fba73ff636b 100644 --- a/tests/tests_pytorch/tuner/test_lr_finder.py +++ b/tests/tests_pytorch/tuner/test_lr_finder.py @@ -69,7 +69,7 @@ def test_model_reset_correctly(tmpdir): after_state_dict = model.state_dict() - for key in before_state_dict.keys(): + for key in before_state_dict: assert torch.all( torch.eq(before_state_dict[key], after_state_dict[key]) ), "Model was not reset correctly after learning rate finder" @@ -129,10 +129,7 @@ def configure_optimizers(self): tuner = Tuner(trainer) tuner.lr_find(model, update_attr=True) - if use_hparams: - after_lr = model.hparams.lr - else: - after_lr = model.lr + after_lr = model.hparams.lr if use_hparams else model.lr assert after_lr is not None assert before_lr != after_lr, "Learning rate was not altered after running learning rate finder" @@ -160,10 +157,7 @@ def configure_optimizers(self): trainer = Trainer(default_root_dir=tmpdir, max_epochs=2) tuner = Tuner(trainer) tuner.lr_find(model, update_attr=True, attr_name="my_fancy_lr") - if use_hparams: - after_lr = model.hparams.my_fancy_lr - else: - after_lr = model.my_fancy_lr + after_lr = model.hparams.my_fancy_lr if use_hparams else model.my_fancy_lr assert after_lr is not None assert before_lr != after_lr, "Learning rate was not altered after running learning rate finder" @@ -364,7 +358,7 @@ def test_multiple_lr_find_calls_gives_same_results(tmpdir): assert all( all_res[0][k] == curr_lr_finder[k] and len(curr_lr_finder[k]) > 10 for curr_lr_finder in all_res[1:] - for k in all_res[0].keys() + for k in all_res[0] ) diff --git a/tests/tests_pytorch/tuner/test_scale_batch_size.py b/tests/tests_pytorch/tuner/test_scale_batch_size.py index 60632a95cc94f..31ad3a6c6bd46 100644 --- a/tests/tests_pytorch/tuner/test_scale_batch_size.py +++ b/tests/tests_pytorch/tuner/test_scale_batch_size.py @@ -113,7 +113,7 @@ def test_trainer_reset_correctly(tmpdir, trainer_fn): assert actual == expected after_state_dict = model.state_dict() - for key in before_state_dict.keys(): + for key in before_state_dict: assert torch.all( torch.eq(before_state_dict[key], after_state_dict[key]) ), "Model was not reset correctly after scaling batch size" diff --git a/tests/tests_pytorch/utilities/test_grads.py b/tests/tests_pytorch/utilities/test_grads.py index 69df44bf66fa2..f4b5ee2d035fb 100644 --- a/tests/tests_pytorch/utilities/test_grads.py +++ b/tests/tests_pytorch/utilities/test_grads.py @@ -71,7 +71,7 @@ def __init__(self): norms = grad_norm(model, norm_type) assert norms.keys() == expected.keys() - for k in norms.keys(): + for k in norms: assert norms[k] == approx(expected[k]) diff --git a/tests/tests_pytorch/utilities/test_model_summary.py b/tests/tests_pytorch/utilities/test_model_summary.py index 8bf3de07625f0..7bb4ccf6258be 100644 --- a/tests/tests_pytorch/utilities/test_model_summary.py +++ b/tests/tests_pytorch/utilities/test_model_summary.py @@ -292,7 +292,7 @@ def test_empty_model_size(max_depth): """Test empty model size is zero.""" model = EmptyModule() summary = summarize(model, max_depth=max_depth) - assert 0.0 == summary.model_size + assert summary.model_size == 0.0 @pytest.mark.parametrize( diff --git a/tests/tests_pytorch/utilities/test_upgrade_checkpoint.py b/tests/tests_pytorch/utilities/test_upgrade_checkpoint.py index 59b86b717e8ec..ffd048a7238f7 100644 --- a/tests/tests_pytorch/utilities/test_upgrade_checkpoint.py +++ b/tests/tests_pytorch/utilities/test_upgrade_checkpoint.py @@ -24,21 +24,21 @@ def test_upgrade_checkpoint_file_missing(tmp_path, caplog): # path to single file (missing) file = tmp_path / "checkpoint.ckpt" - with mock.patch("sys.argv", ["upgrade_checkpoint.py", str(file)]): - with caplog.at_level(logging.ERROR): - with pytest.raises(SystemExit): - upgrade_main() - assert f"The path {file} does not exist" in caplog.text + with mock.patch("sys.argv", ["upgrade_checkpoint.py", str(file)]), caplog.at_level(logging.ERROR): + with pytest.raises(SystemExit): + upgrade_main() + assert f"The path {file} does not exist" in caplog.text caplog.clear() # path to non-empty directory, but no checkpoints with matching extension file.touch() - with mock.patch("sys.argv", ["upgrade_checkpoint.py", str(tmp_path), "--extension", ".other"]): - with caplog.at_level(logging.ERROR): - with pytest.raises(SystemExit): - upgrade_main() - assert "No checkpoint files with extension .other were found" in caplog.text + with mock.patch("sys.argv", ["upgrade_checkpoint.py", str(tmp_path), "--extension", ".other"]), caplog.at_level( + logging.ERROR + ): + with pytest.raises(SystemExit): + upgrade_main() + assert "No checkpoint files with extension .other were found" in caplog.text @mock.patch("lightning.pytorch.utilities.upgrade_checkpoint.torch.save") From 039ed52661a44a0af26319d9e537ae6b11f59cce Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Carlos=20Mochol=C3=AD?= Date: Tue, 25 Apr 2023 09:21:21 +0200 Subject: [PATCH 32/93] Remove devel.txt requirements file (#17466) (cherry picked from commit 1145c450b5ed41a9a86509bc667df093df5170a9) --- dockers/base-cuda/Dockerfile | 4 +++- dockers/base-ipu/Dockerfile | 5 ++++- requirements/README.md | 1 - requirements/app/devel.txt | 14 -------------- requirements/fabric/devel.txt | 4 ---- requirements/pytorch/devel.txt | 8 -------- src/lightning/__setup__.py | 2 +- src/lightning_app/__setup__.py | 2 +- src/lightning_fabric/__setup__.py | 2 +- src/pytorch_lightning/__setup__.py | 2 +- tests/README.md | 2 +- 11 files changed, 12 insertions(+), 34 deletions(-) delete mode 100644 requirements/app/devel.txt delete mode 100644 requirements/fabric/devel.txt delete mode 100644 requirements/pytorch/devel.txt diff --git a/dockers/base-cuda/Dockerfile b/dockers/base-cuda/Dockerfile index 072728b954a76..0d24bd0d0d24a 100644 --- a/dockers/base-cuda/Dockerfile +++ b/dockers/base-cuda/Dockerfile @@ -95,7 +95,9 @@ RUN \ done && \ CUDA_VERSION_MM=${CUDA_VERSION%.*} && \ pip install \ - -r requirements/pytorch/devel.txt \ + -r requirements/pytorch/base.txt \ + -r requirements/pytorch/extra.txt \ + -r requirements/pytorch/test.txt \ -r requirements/pytorch/strategies.txt \ --find-links "https://download.pytorch.org/whl/cu${CUDA_VERSION_MM//'.'/''}/torch_stable.html" diff --git a/dockers/base-ipu/Dockerfile b/dockers/base-ipu/Dockerfile index 2132b7bacdc83..e44aa89740285 100644 --- a/dockers/base-ipu/Dockerfile +++ b/dockers/base-ipu/Dockerfile @@ -91,7 +91,10 @@ RUN \ python adjust-torch-versions.py $fpath ${PYTORCH_VERSION}; \ done && \ # install PL dependencies - pip install --requirement requirements/pytorch/devel.txt && \ + pip install \ + -r requirements/pytorch/base.txt \ + -r requirements/pytorch/extra.txt \ + -r requirements/pytorch/test.txt && \ cd .. && \ rm -rf lightning && \ rm -rf /root/.cache \ diff --git a/requirements/README.md b/requirements/README.md index 1c5403b41eb74..5b408f3971df7 100644 --- a/requirements/README.md +++ b/requirements/README.md @@ -3,7 +3,6 @@ This root requirements folder branches into sub-folders depending on the python package. Within the folder, we have grouped requirements files/lists per focus, which shall closely match package extra So, for example, when you install PL as `pip install pytorch-lightning[extra]`, this list is stored in `requirements/pytorch/extra.txt`. -The only exceptional requirement file is `devel.txt`, which aggregated all the needed requirements for development. ## CI/CD upper bounds diff --git a/requirements/app/devel.txt b/requirements/app/devel.txt deleted file mode 100644 index f21e5acf79afd..0000000000000 --- a/requirements/app/devel.txt +++ /dev/null @@ -1,14 +0,0 @@ -# install all mandatory dependencies --r ./base.txt - -# extended list of dependencies for development and run lint and tests --r ./test.txt - -# extended list of dependencies for Cloud --r ./cloud.txt - -# extended list of dependencies for UI --r ./ui.txt - -# extended list of dependencies for UI --r ./components.txt diff --git a/requirements/fabric/devel.txt b/requirements/fabric/devel.txt deleted file mode 100644 index c1e6c57fd5c4b..0000000000000 --- a/requirements/fabric/devel.txt +++ /dev/null @@ -1,4 +0,0 @@ -# install all mandatory dependencies --r ./base.txt - --r ./test.txt diff --git a/requirements/pytorch/devel.txt b/requirements/pytorch/devel.txt deleted file mode 100644 index fe3159645aaa7..0000000000000 --- a/requirements/pytorch/devel.txt +++ /dev/null @@ -1,8 +0,0 @@ -# install all mandatory dependencies --r ./base.txt - -# install all extra dependencies for full package testing --r ./extra.txt - -# extended list of dependencies for development and run lint and tests --r ./test.txt diff --git a/src/lightning/__setup__.py b/src/lightning/__setup__.py index 64ba78bbafe0a..25ad816cbffaf 100644 --- a/src/lightning/__setup__.py +++ b/src/lightning/__setup__.py @@ -37,7 +37,7 @@ def _prepare_extras() -> Dict[str, Any]: extras = { f"{p.parent.name}-{p.stem}": _ASSISTANT.load_requirements(file_name=p.name, path_dir=p.parent, **common_args) for p in req_files - if p.name not in ("docs.txt", "devel.txt", "base.txt") and not p.parent.name.startswith("_") + if p.name not in ("docs.txt", "base.txt") and not p.parent.name.startswith("_") } # project specific extras groups extras["fabric-all"] = extras["fabric-strategies"] + extras["fabric-examples"] diff --git a/src/lightning_app/__setup__.py b/src/lightning_app/__setup__.py index 4fd6b36c2a819..f7c509572513b 100644 --- a/src/lightning_app/__setup__.py +++ b/src/lightning_app/__setup__.py @@ -39,7 +39,7 @@ def _prepare_extras() -> Dict[str, Any]: extras = { p.stem: assistant.load_requirements(file_name=p.name, **common_args) for p in req_files - if p.name not in ("docs.txt", "devel.txt", "base.txt") + if p.name not in ("docs.txt", "base.txt") } extras["extra"] = extras["cloud"] + extras["ui"] + extras["components"] extras["all"] = extras["extra"] diff --git a/src/lightning_fabric/__setup__.py b/src/lightning_fabric/__setup__.py index 055e75ca0c199..cee30e58cac23 100644 --- a/src/lightning_fabric/__setup__.py +++ b/src/lightning_fabric/__setup__.py @@ -40,7 +40,7 @@ def _prepare_extras() -> Dict[str, Any]: extras = { p.stem: assistant.load_requirements(file_name=p.name, **common_args) for p in req_files - if p.name not in ("docs.txt", "devel.txt", "base.txt") + if p.name not in ("docs.txt", "base.txt") } for req in parse_requirements(extras["strategies"]): extras[req.key] = [str(req)] diff --git a/src/pytorch_lightning/__setup__.py b/src/pytorch_lightning/__setup__.py index 8835e3ac65c51..c0ec52e690e5d 100644 --- a/src/pytorch_lightning/__setup__.py +++ b/src/pytorch_lightning/__setup__.py @@ -40,7 +40,7 @@ def _prepare_extras() -> Dict[str, Any]: extras = { p.stem: assistant.load_requirements(file_name=p.name, **common_args) for p in req_files - if p.name not in ("docs.txt", "devel.txt", "base.txt") + if p.name not in ("docs.txt", "base.txt") } for req in parse_requirements(extras["strategies"]): extras[req.key] = [str(req)] diff --git a/tests/README.md b/tests/README.md index df2102872527c..637c7ccbc0834 100644 --- a/tests/README.md +++ b/tests/README.md @@ -74,7 +74,7 @@ There are certain standalone tests, which you can run using: Make sure to run coverage on a GPU machine with at least 2 GPUs. ```bash -# generate coverage (coverage is also installed as part of dev dependencies under requirements/pytorch/devel.txt) +# generate coverage (coverage is also installed as part of dev dependencies) coverage run --source src/lightning/pytorch -m pytest src/lightning/pytorch tests/tests_pytorch -v # print coverage stats From 1e6111ec33afbf20b223538c4fa66a6a2314db0a Mon Sep 17 00:00:00 2001 From: Jirka Borovec <6035284+borda@users.noreply.github.com> Date: Tue, 25 Apr 2023 14:35:02 +0200 Subject: [PATCH 33/93] drop contribution badge (#17471) (cherry picked from commit 843a167838915215352154bb51ea5a524bb58360) --- README.md | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/README.md b/README.md index 45af4b8d08f6f..ba27507cd4f17 100644 --- a/README.md +++ b/README.md @@ -30,8 +30,8 @@ ______________________________________________________________________ [![DockerHub](https://img.shields.io/docker/pulls/pytorchlightning/pytorch_lightning.svg)](https://hub.docker.com/r/pytorchlightning/pytorch_lightning) [![codecov](https://codecov.io/gh/Lightning-AI/lightning/branch/master/graph/badge.svg?token=SmzX8mnKlA)](https://codecov.io/gh/Lightning-AI/lightning) -[![ReadTheDocs](https://readthedocs.org/projects/pytorch-lightning/badge/?version=stable)](https://lightning.ai/docs/pytorch/stable/) [![Discord](https://img.shields.io/discord/1077906959069626439?style=plastic)](https://discord.gg/VptPCZkGNa) +![GitHub commit activity](https://img.shields.io/github/commit-activity/w/lightning-ai/lightning) [![license](https://img.shields.io/badge/License-Apache%202.0-blue.svg)](https://github.com/Lightning-AI/lightning/blob/master/LICENSE)