diff --git a/.azure-pipelines/ipu-tests.yml b/.azure-pipelines/ipu-tests.yml index c1474ee1c9187..065c6983a2abe 100644 --- a/.azure-pipelines/ipu-tests.yml +++ b/.azure-pipelines/ipu-tests.yml @@ -53,12 +53,9 @@ jobs: export GIT_TERMINAL_PROMPT=1 python -c "fname = 'requirements/extra.txt' ; lines = [line for line in open(fname).readlines() if 'fairscale' not in line] ; open(fname, 'w').writelines(lines)" python -c "fname = 'requirements/extra.txt' ; lines = [line for line in open(fname).readlines() if 'horovod' not in line] ; open(fname, 'w').writelines(lines)" - python ./requirements/adjust_versions.py requirements/extra.txt python ./requirements/adjust_versions.py requirements/examples.txt - - pip install --requirement ./requirements/devel.txt --upgrade-strategy only-if-needed - + pip install . --requirement requirements/devel.txt pip list displayName: 'Install dependencies' diff --git a/pytorch_lightning/core/datamodule.py b/pytorch_lightning/core/datamodule.py index 9dd8066f15080..df3fa26a24a17 100644 --- a/pytorch_lightning/core/datamodule.py +++ b/pytorch_lightning/core/datamodule.py @@ -20,8 +20,8 @@ from torch.utils.data import DataLoader, Dataset, IterableDataset from pytorch_lightning.core.hooks import CheckpointHooks, DataHooks +from pytorch_lightning.utilities import rank_zero_deprecation from pytorch_lightning.utilities.argparse import add_argparse_args, from_argparse_args, get_init_arguments_and_types -from pytorch_lightning.utilities.distributed import rank_zero_deprecation class LightningDataModule(CheckpointHooks, DataHooks): diff --git a/pytorch_lightning/core/grads.py b/pytorch_lightning/core/grads.py index 30a2f0ae7e38f..f6a0d41035460 100644 --- a/pytorch_lightning/core/grads.py +++ b/pytorch_lightning/core/grads.py @@ -18,7 +18,7 @@ from torch.nn import Module -from pytorch_lightning.utilities.distributed import rank_zero_deprecation +from pytorch_lightning.utilities import rank_zero_deprecation from pytorch_lightning.utilities.grads import grad_norm as new_grad_norm diff --git a/pytorch_lightning/core/lightning.py b/pytorch_lightning/core/lightning.py index df9a3a0d362f1..6af8e8b308ea4 100644 --- a/pytorch_lightning/core/lightning.py +++ b/pytorch_lightning/core/lightning.py @@ -171,7 +171,7 @@ def datamodule(self) -> Any: warning_cache.deprecation( "The `LightningModule.datamodule` property is deprecated in v1.3 and will be removed in v1.5." " Access the datamodule through using `self.trainer.datamodule` instead.", - stacklevel=5, + stacklevel=6, ) return self._datamodule diff --git a/pytorch_lightning/loggers/csv_logs.py b/pytorch_lightning/loggers/csv_logs.py index 4df672fa6e3b5..754a7cf892060 100644 --- a/pytorch_lightning/loggers/csv_logs.py +++ b/pytorch_lightning/loggers/csv_logs.py @@ -29,7 +29,8 @@ from pytorch_lightning.core.saving import save_hparams_to_yaml from pytorch_lightning.loggers.base import LightningLoggerBase, rank_zero_experiment -from pytorch_lightning.utilities.distributed import rank_zero_only, rank_zero_warn +from pytorch_lightning.utilities import rank_zero_warn +from pytorch_lightning.utilities.distributed import rank_zero_only log = logging.getLogger(__name__) diff --git a/pytorch_lightning/loggers/test_tube.py b/pytorch_lightning/loggers/test_tube.py index 84f231b0f16d7..1107a0bcb2c4c 100644 --- a/pytorch_lightning/loggers/test_tube.py +++ b/pytorch_lightning/loggers/test_tube.py @@ -20,8 +20,8 @@ from pytorch_lightning.core.lightning import LightningModule from pytorch_lightning.loggers.base import LightningLoggerBase, rank_zero_experiment -from pytorch_lightning.utilities import _module_available -from pytorch_lightning.utilities.distributed import rank_zero_only, rank_zero_warn +from pytorch_lightning.utilities import _module_available, rank_zero_warn +from pytorch_lightning.utilities.distributed import rank_zero_only _TESTTUBE_AVAILABLE = _module_available("test_tube") diff --git a/pytorch_lightning/loops/training_batch_loop.py b/pytorch_lightning/loops/training_batch_loop.py index f049ca2132013..e0ff96ac9d43f 100644 --- a/pytorch_lightning/loops/training_batch_loop.py +++ b/pytorch_lightning/loops/training_batch_loop.py @@ -491,7 +491,7 @@ def build_train_args(self, batch: Any, batch_idx: int, opt_idx: int, hiddens: Te self.warning_cache.deprecation( "`training_step` hook signature has changed in v1.3." " `optimizer_idx` argument has been removed in case of manual optimization. Support for" - " the old signature will be removed in v1.5", + " the old signature will be removed in v1.5" ) args.append(opt_idx) elif not self.trainer.has_arg( @@ -685,7 +685,7 @@ def _build_kwargs(self, batch: Any, batch_idx: int, opt_idx: int, hiddens: Optio self.warning_cache.deprecation( "`training_step` hook signature has changed in v1.3." " `optimizer_idx` argument has been removed in case of manual optimization. Support for" - " the old signature will be removed in v1.5", + " the old signature will be removed in v1.5" ) step_kwargs['optimizer_idx'] = opt_idx elif not has_opt_idx_in_train_step and lightning_module.automatic_optimization: diff --git a/pytorch_lightning/plugins/training_type/ddp_spawn.py b/pytorch_lightning/plugins/training_type/ddp_spawn.py index 8d2cc217835fb..c8ff9298a56bb 100644 --- a/pytorch_lightning/plugins/training_type/ddp_spawn.py +++ b/pytorch_lightning/plugins/training_type/ddp_spawn.py @@ -28,16 +28,15 @@ from pytorch_lightning.plugins.environments.cluster_environment import ClusterEnvironment from pytorch_lightning.plugins.training_type.parallel import ParallelPlugin from pytorch_lightning.trainer.states import TrainerFn -from pytorch_lightning.utilities import _TORCH_GREATER_EQUAL_1_7, _TORCH_GREATER_EQUAL_1_8 -from pytorch_lightning.utilities.cloud_io import atomic_save -from pytorch_lightning.utilities.cloud_io import load as pl_load -from pytorch_lightning.utilities.distributed import ( +from pytorch_lightning.utilities import ( + _TORCH_GREATER_EQUAL_1_7, + _TORCH_GREATER_EQUAL_1_8, rank_zero_deprecation, - rank_zero_only, rank_zero_warn, - ReduceOp, - sync_ddp_if_available, ) +from pytorch_lightning.utilities.cloud_io import atomic_save +from pytorch_lightning.utilities.cloud_io import load as pl_load +from pytorch_lightning.utilities.distributed import rank_zero_only, ReduceOp, sync_ddp_if_available from pytorch_lightning.utilities.seed import reset_seed if _TORCH_GREATER_EQUAL_1_8: diff --git a/pytorch_lightning/plugins/training_type/deepspeed.py b/pytorch_lightning/plugins/training_type/deepspeed.py index 86510fbed6fe2..c57e715eccf91 100644 --- a/pytorch_lightning/plugins/training_type/deepspeed.py +++ b/pytorch_lightning/plugins/training_type/deepspeed.py @@ -29,9 +29,10 @@ from pytorch_lightning.trainer.optimizers import _get_default_scheduler_config from pytorch_lightning.utilities import AMPType from pytorch_lightning.utilities.apply_func import apply_to_collection -from pytorch_lightning.utilities.distributed import _warn, rank_zero_info, rank_zero_only +from pytorch_lightning.utilities.distributed import rank_zero_info, rank_zero_only from pytorch_lightning.utilities.exceptions import MisconfigurationException from pytorch_lightning.utilities.imports import _DEEPSPEED_AVAILABLE +from pytorch_lightning.utilities.warnings import _warn, LightningDeprecationWarning if _DEEPSPEED_AVAILABLE: import deepspeed @@ -263,7 +264,7 @@ def __init__( "The usage of `cpu_offload`, `cpu_offload_params`, and `cpu_offload_use_pin_memory` " "is deprecated since v1.4 and will be removed in v1.5." " From now on use `offload_optimizer`, `offload_parameters` and `pin_memory`.", - category=DeprecationWarning + category=LightningDeprecationWarning ) offload_optimizer = cpu_offload offload_parameters = cpu_offload_params diff --git a/pytorch_lightning/profiler/profilers.py b/pytorch_lightning/profiler/profilers.py index c97dab0c8968b..3f534ce0bb425 100644 --- a/pytorch_lightning/profiler/profilers.py +++ b/pytorch_lightning/profiler/profilers.py @@ -1,7 +1,7 @@ -from pytorch_lightning.utilities.distributed import rank_zero_deprecation +from pytorch_lightning.utilities import rank_zero_deprecation rank_zero_deprecation( - "Using ``import pytorch_lightning.profiler.profilers`` is depreceated in v1.4, and will be removed in v1.6. " + "Using ``import pytorch_lightning.profiler.profilers`` is deprecated in v1.4, and will be removed in v1.6. " "HINT: Use ``import pytorch_lightning.profiler`` directly." ) diff --git a/pytorch_lightning/profiler/pytorch.py b/pytorch_lightning/profiler/pytorch.py index 0c0bde515a4dd..05a4d99b023a8 100644 --- a/pytorch_lightning/profiler/pytorch.py +++ b/pytorch_lightning/profiler/pytorch.py @@ -24,7 +24,7 @@ from torch.autograd.profiler import record_function from pytorch_lightning.profiler.base import BaseProfiler -from pytorch_lightning.utilities.distributed import rank_zero_deprecation, rank_zero_warn +from pytorch_lightning.utilities import rank_zero_deprecation, rank_zero_warn from pytorch_lightning.utilities.exceptions import MisconfigurationException from pytorch_lightning.utilities.imports import _KINETO_AVAILABLE @@ -351,7 +351,7 @@ def __deprecation_check( if profiled_functions is not None: rank_zero_deprecation( "`PyTorchProfiler.profiled_functions` has been renamed to" - " `record_functions` in v1.3 and will be removed in v1.5", + " `record_functions` in v1.3 and will be removed in v1.5" ) if not record_functions: record_functions |= set(profiled_functions) diff --git a/pytorch_lightning/trainer/connectors/accelerator_connector.py b/pytorch_lightning/trainer/connectors/accelerator_connector.py index eb5f0a2fc1a7d..13b996c6dbe95 100644 --- a/pytorch_lightning/trainer/connectors/accelerator_connector.py +++ b/pytorch_lightning/trainer/connectors/accelerator_connector.py @@ -67,8 +67,10 @@ device_parser, DeviceType, DistributedType, + rank_zero_deprecation, + rank_zero_info, + rank_zero_warn, ) -from pytorch_lightning.utilities.distributed import rank_zero_deprecation, rank_zero_info, rank_zero_warn from pytorch_lightning.utilities.exceptions import MisconfigurationException if _HOROVOD_AVAILABLE: diff --git a/pytorch_lightning/trainer/connectors/data_connector.py b/pytorch_lightning/trainer/connectors/data_connector.py index 4ff7e5aa21a42..c21238f06fe8f 100644 --- a/pytorch_lightning/trainer/connectors/data_connector.py +++ b/pytorch_lightning/trainer/connectors/data_connector.py @@ -113,30 +113,28 @@ def attach_dataloaders( def attach_datamodule( self, model: 'pl.LightningModule', datamodule: Optional['pl.LightningDataModule'] = None ) -> None: - # We use datamodule if it's been provided, otherwise we check model for it - datamodule = datamodule or getattr(model, 'datamodule', None) - # If we have a datamodule, attach necessary hooks + dataloaders - if datamodule: - - # Override loader hooks - dl_methods = ('train_dataloader', 'val_dataloader', 'test_dataloader', 'predict_dataloader') - for method in dl_methods: - if is_overridden(method, datamodule): - setattr(model, method, getattr(datamodule, method)) - - # Override data transfer hooks if dataset-specific to_device logic has been defined in datamodule - batch_transfer_hooks = ('on_before_batch_transfer', 'transfer_batch_to_device', 'on_after_batch_transfer') - for hook in batch_transfer_hooks: - if is_overridden(hook, datamodule): - setattr(model, hook, getattr(datamodule, hook)) - - self.trainer.datamodule = datamodule - datamodule.trainer = self.trainer - - # experimental feature for Flash - if hasattr(datamodule, "data_pipeline"): - model.data_pipeline = datamodule.data_pipeline + if datamodule is None: + return + + # Override loader hooks + dl_methods = ('train_dataloader', 'val_dataloader', 'test_dataloader', 'predict_dataloader') + for method in dl_methods: + if is_overridden(method, datamodule): + setattr(model, method, getattr(datamodule, method)) + + # Override data transfer hooks if dataset-specific to_device logic has been defined in datamodule + batch_transfer_hooks = ('on_before_batch_transfer', 'transfer_batch_to_device', 'on_after_batch_transfer') + for hook in batch_transfer_hooks: + if is_overridden(hook, datamodule): + setattr(model, hook, getattr(datamodule, hook)) + + self.trainer.datamodule = datamodule + datamodule.trainer = self.trainer + + # experimental feature for Flash + if hasattr(datamodule, "data_pipeline"): + model.data_pipeline = datamodule.data_pipeline class _PatchDataLoader: diff --git a/pytorch_lightning/trainer/connectors/logger_connector/result.py b/pytorch_lightning/trainer/connectors/logger_connector/result.py index 096741d4a7486..0cbd663e07da7 100644 --- a/pytorch_lightning/trainer/connectors/logger_connector/result.py +++ b/pytorch_lightning/trainer/connectors/logger_connector/result.py @@ -370,10 +370,10 @@ def extra(self, extra: Mapping[str, Any]) -> None: def check_fn(v): if v.grad_fn is not None: - warning_cache.warn( + warning_cache.deprecation( f"One of the returned values {set(extra.keys())} has a `grad_fn`. We will detach it automatically" " but this behaviour will change in v1.6. Please detach it manually:" - " `return {'loss': ..., 'something': something.detach()}`", DeprecationWarning + " `return {'loss': ..., 'something': something.detach()}`" ) return v.detach() return v diff --git a/pytorch_lightning/trainer/connectors/training_trick_connector.py b/pytorch_lightning/trainer/connectors/training_trick_connector.py index f27288d2b13f4..4d93fa5977d13 100644 --- a/pytorch_lightning/trainer/connectors/training_trick_connector.py +++ b/pytorch_lightning/trainer/connectors/training_trick_connector.py @@ -14,8 +14,7 @@ from typing import Dict, List, Optional, Union from pytorch_lightning.callbacks import GradientAccumulationScheduler -from pytorch_lightning.utilities import GradClipAlgorithmType -from pytorch_lightning.utilities.distributed import rank_zero_deprecation +from pytorch_lightning.utilities import GradClipAlgorithmType, rank_zero_deprecation from pytorch_lightning.utilities.exceptions import MisconfigurationException diff --git a/pytorch_lightning/trainer/logging.py b/pytorch_lightning/trainer/logging.py index 0a59b9d8d4c36..74603782f3293 100644 --- a/pytorch_lightning/trainer/logging.py +++ b/pytorch_lightning/trainer/logging.py @@ -14,7 +14,7 @@ from abc import ABC -from pytorch_lightning.utilities.distributed import rank_zero_deprecation +from pytorch_lightning.utilities import rank_zero_deprecation from pytorch_lightning.utilities.metrics import metrics_to_scalars as new_metrics_to_scalars diff --git a/pytorch_lightning/trainer/model_hooks.py b/pytorch_lightning/trainer/model_hooks.py index 86cb1334a7067..cbf331913e597 100644 --- a/pytorch_lightning/trainer/model_hooks.py +++ b/pytorch_lightning/trainer/model_hooks.py @@ -16,7 +16,7 @@ from typing import Optional from pytorch_lightning.core.lightning import LightningModule -from pytorch_lightning.utilities.distributed import rank_zero_deprecation +from pytorch_lightning.utilities import rank_zero_deprecation from pytorch_lightning.utilities.signature_utils import is_param_in_hook_signature diff --git a/pytorch_lightning/utilities/__init__.py b/pytorch_lightning/utilities/__init__.py index 613a5013d5198..c2e727d314396 100644 --- a/pytorch_lightning/utilities/__init__.py +++ b/pytorch_lightning/utilities/__init__.py @@ -16,13 +16,7 @@ import numpy from pytorch_lightning.utilities.apply_func import move_data_to_device # noqa: F401 -from pytorch_lightning.utilities.distributed import ( # noqa: F401 - AllGatherGrad, - rank_zero_deprecation, - rank_zero_info, - rank_zero_only, - rank_zero_warn, -) +from pytorch_lightning.utilities.distributed import AllGatherGrad, rank_zero_info, rank_zero_only # noqa: F401 from pytorch_lightning.utilities.enums import ( # noqa: F401 AMPType, DeviceType, @@ -63,6 +57,7 @@ _XLA_AVAILABLE, ) from pytorch_lightning.utilities.parsing import AttributeDict, flatten_dict, is_picklable # noqa: F401 +from pytorch_lightning.utilities.warnings import rank_zero_deprecation, rank_zero_warn # noqa: F401 FLOAT16_EPSILON = numpy.finfo(numpy.float16).eps FLOAT32_EPSILON = numpy.finfo(numpy.float32).eps diff --git a/pytorch_lightning/utilities/distributed.py b/pytorch_lightning/utilities/distributed.py index a507afa6bc895..eb35cdec6c13d 100644 --- a/pytorch_lightning/utilities/distributed.py +++ b/pytorch_lightning/utilities/distributed.py @@ -14,8 +14,8 @@ import logging import os -import warnings -from functools import partial, wraps +from functools import wraps +from platform import python_version from typing import Any, Optional, Union import torch @@ -65,22 +65,26 @@ def _get_rank() -> int: rank_zero_only.rank = getattr(rank_zero_only, 'rank', _get_rank()) -def _warn(*args, **kwargs): - warnings.warn(*args, **kwargs) - - -def _info(*args, **kwargs): +def _info(*args, stacklevel: int = 2, **kwargs): + if python_version() >= "3.8.0": + kwargs['stacklevel'] = stacklevel log.info(*args, **kwargs) -def _debug(*args, **kwargs): +def _debug(*args, stacklevel: int = 2, **kwargs): + if python_version() >= "3.8.0": + kwargs['stacklevel'] = stacklevel log.debug(*args, **kwargs) -rank_zero_debug = rank_zero_only(_debug) -rank_zero_info = rank_zero_only(_info) -rank_zero_warn = rank_zero_only(_warn) -rank_zero_deprecation = partial(rank_zero_warn, category=DeprecationWarning) +@rank_zero_only +def rank_zero_debug(*args, stacklevel: int = 4, **kwargs): + _debug(*args, stacklevel=stacklevel, **kwargs) + + +@rank_zero_only +def rank_zero_info(*args, stacklevel: int = 4, **kwargs): + _info(*args, stacklevel=stacklevel, **kwargs) def gather_all_tensors(result: Union[torch.Tensor], group: Optional[Any] = None): @@ -294,6 +298,7 @@ def register_ddp_comm_hook( ddp_comm_wrapper=default.fp16_compress_wrapper, ) """ + from pytorch_lightning.utilities import rank_zero_warn if not _TORCH_GREATER_EQUAL_1_8: rank_zero_warn("Not registering DDP comm hook. To use communication hooks, please use pytorch>=1.8.0.") return diff --git a/pytorch_lightning/utilities/parsing.py b/pytorch_lightning/utilities/parsing.py index 7eb923eca5ec3..d498849ac1b1c 100644 --- a/pytorch_lightning/utilities/parsing.py +++ b/pytorch_lightning/utilities/parsing.py @@ -19,7 +19,7 @@ from dataclasses import fields, is_dataclass from typing import Any, Dict, Optional, Sequence, Tuple, Union -from pytorch_lightning.utilities import rank_zero_warn +from pytorch_lightning.utilities.warnings import rank_zero_warn def str_to_bool_or_str(val: str) -> Union[str, bool]: @@ -98,7 +98,7 @@ def clean_namespace(hparams): del_attrs = [k for k, v in hparams_dict.items() if not is_picklable(v)] for k in del_attrs: - rank_zero_warn(f"attribute '{k}' removed from hparams because it cannot be pickled", UserWarning) + rank_zero_warn(f"attribute '{k}' removed from hparams because it cannot be pickled") del hparams_dict[k] diff --git a/pytorch_lightning/utilities/warnings.py b/pytorch_lightning/utilities/warnings.py index 7017ef5c3100c..0595a41ea5aa0 100644 --- a/pytorch_lightning/utilities/warnings.py +++ b/pytorch_lightning/utilities/warnings.py @@ -11,17 +11,40 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. -from pytorch_lightning.utilities.distributed import rank_zero_deprecation, rank_zero_warn +"""Warning-related utilities""" +import warnings +from functools import partial + +from pytorch_lightning.utilities.distributed import rank_zero_only + + +def _warn(*args, stacklevel: int = 2, **kwargs): + warnings.warn(*args, stacklevel=stacklevel, **kwargs) + + +@rank_zero_only +def rank_zero_warn(*args, stacklevel: int = 4, **kwargs): + _warn(*args, stacklevel=stacklevel, **kwargs) + + +class LightningDeprecationWarning(DeprecationWarning): + ... + + +# enable our warnings +warnings.simplefilter('default', LightningDeprecationWarning) + +rank_zero_deprecation = partial(rank_zero_warn, category=LightningDeprecationWarning) class WarningCache(set): - def warn(self, m, *args, **kwargs): + def warn(self, m, *args, stacklevel: int = 5, **kwargs): if m not in self: self.add(m) - rank_zero_warn(m, *args, **kwargs) + rank_zero_warn(m, *args, stacklevel=stacklevel, **kwargs) - def deprecation(self, m, *args, **kwargs): + def deprecation(self, m, *args, stacklevel: int = 5, **kwargs): if m not in self: self.add(m) - rank_zero_deprecation(m, *args, **kwargs) + rank_zero_deprecation(m, *args, stacklevel=stacklevel, **kwargs) diff --git a/tests/special_tests.sh b/tests/special_tests.sh index b6de1ca69ecef..9fca3b62bad40 100755 --- a/tests/special_tests.sh +++ b/tests/special_tests.sh @@ -72,6 +72,12 @@ if nvcc --version; then nvprof --profile-from-start off -o trace_name.prof -- python ${defaults} tests/test_profiler.py::test_pytorch_profiler_nested_emit_nvtx fi +# needs to run outside of `pytest` +python tests/utilities/test_warnings.py +if [ $? -eq 0 ]; then + report+="Ran\ttests/utilities/test_warnings.py\n" +fi + # echo test report printf '=%.s' {1..80} printf "\n$report" diff --git a/tests/utilities/test_warnings.py b/tests/utilities/test_warnings.py new file mode 100644 index 0000000000000..2e0c372e5c39f --- /dev/null +++ b/tests/utilities/test_warnings.py @@ -0,0 +1,52 @@ +# Copyright The PyTorch Lightning team. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +""" +Test that the warnings actually appear and they have the correct `stacklevel` + +Needs to be run outside of `pytest` as it captures all the warnings. +""" +import os +from contextlib import redirect_stderr +from io import StringIO + +from pytorch_lightning.utilities.warnings import _warn, rank_zero_deprecation, rank_zero_warn, WarningCache + +running_special = os.getenv("PL_RUNNING_SPECIAL_TESTS", "0") == "1" +if running_special: + + stderr = StringIO() + with redirect_stderr(stderr): + _warn("test1") + _warn("test2", DeprecationWarning) + + rank_zero_warn("test3") + rank_zero_warn("test4", DeprecationWarning) + + rank_zero_deprecation("test5") + + cache = WarningCache() + cache.warn("test6") + cache.deprecation("test7") + + output = stderr.getvalue() + assert "test_warnings.py:30: UserWarning: test1" in output + assert "test_warnings.py:31: DeprecationWarning: test2" in output + + assert "test_warnings.py:33: UserWarning: test3" in output + assert "test_warnings.py:34: DeprecationWarning: test4" in output + + assert "test_warnings.py:36: LightningDeprecationWarning: test5" in output + + assert "test_warnings.py:39: UserWarning: test6" in output + assert "test_warnings.py:40: LightningDeprecationWarning: test7" in output