diff --git a/.drone.yml b/.drone.yml index 49fda10c56e9b..e2d051ee53262 100644 --- a/.drone.yml +++ b/.drone.yml @@ -38,7 +38,7 @@ steps: #- pip install -r ./docs/requirements.txt --user -q - pip list - python -c "import torch ; print(' & '.join([torch.cuda.get_device_name(i) for i in range(torch.cuda.device_count())]) if torch.cuda.is_available() else 'only CPU')" - - coverage run --source pytorch_lightning -m py.test pytorch_lightning tests benchmarks -v # --flake8 + - coverage run --source pytorch_lightning -m py.test pytorch_lightning tests benchmarks -v --durations=25 # --flake8 #- cd docs; make doctest; make coverage - coverage report - codecov --token $CODECOV_TOKEN # --pr $DRONE_PULL_REQUEST --build $DRONE_BUILD_NUMBER --branch $DRONE_BRANCH --commit $DRONE_COMMIT --tag $DRONE_TAG diff --git a/.github/workflows/code-formatting-check.yml b/.github/workflows/code-formatting.yml similarity index 71% rename from .github/workflows/code-formatting-check.yml rename to .github/workflows/code-formatting.yml index d9096cf2f3618..68696a52c7848 100644 --- a/.github/workflows/code-formatting-check.yml +++ b/.github/workflows/code-formatting.yml @@ -1,5 +1,13 @@ -name: "Check Formatting" -on: [push, pull_request] +name: "Check Formatting - Black" +on: + # Trigger the workflow on push or pull request, + # but only for the master branch + push: + branches: + - master + pull_request: + branches: + - master jobs: check_code_formatting: diff --git a/CHANGELOG.md b/CHANGELOG.md index c70b280b2e73f..6d8c5108d6547 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -53,7 +53,12 @@ The format is based on [Keep a Changelog](http://keepachangelog.com/en/1.0.0/). ### Removed - Removed unintended Trainer argument `progress_bar_callback`, the callback should be passed in by `Trainer(callbacks=[...])` instead ([#1855](https://github.com/PyTorchLightning/pytorch-lightning/pull/1855)) -- Remove obsolete `self._device` in Trainer ([#1849](https://github.com/PyTorchLightning/pytorch-lightning/pull/1849)) +- Removed obsolete `self._device` in Trainer ([#1849](https://github.com/PyTorchLightning/pytorch-lightning/pull/1849)) +- Removed deprecated API ([#2073](https://github.com/PyTorchLightning/pytorch-lightning/pull/2073)) + * Packages: `pytorch_lightning.pt_overrides`, `pytorch_lightning.root_module` + * Modules: `pytorch_lightning.logging.comet_logger`, `pytorch_lightning.logging.mlflow_logger`, `pytorch_lightning.logging.test_tube_logger`, `pytorch_lightning.overrides.override_data_parallel`, `pytorch_lightning.core.model_saving`, `pytorch_lightning.core.root_module` + * Trainer arguments: `add_row_log_interval`, `default_save_path`, `gradient_clip`, `nb_gpu_nodes`, `max_nb_epochs`, `min_nb_epochs`, `nb_sanity_val_steps` + * Trainer attributes: `nb_gpu_nodes`, `num_gpu_nodes`, `gradient_clip`, `max_nb_epochs`, `min_nb_epochs`, `nb_sanity_val_steps`, `default_save_path`, `tng_tqdm_dic` ### Fixed @@ -102,6 +107,7 @@ The format is based on [Keep a Changelog](http://keepachangelog.com/en/1.0.0/). ### Deprecated - Deprecated `tags_csv` in favor of `hparams_file` ([#1271](https://github.com/PyTorchLightning/pytorch-lightning/pull/1271)) +- Deprecated `amp_level` in favor of native AMP ([#1561](https://github.com/PyTorchLightning/pytorch-lightning/pull/1561)) ### Fixed diff --git a/docs/source/conf.py b/docs/source/conf.py index a084e5e349e39..301ce61595338 100644 --- a/docs/source/conf.py +++ b/docs/source/conf.py @@ -141,13 +141,7 @@ 'api/modules.rst', # deprecated/renamed: - 'api/pytorch_lightning.loggers.comet_logger.rst', # TODO: remove in v0.8.0 - 'api/pytorch_lightning.loggers.mlflow_logger.rst', # TODO: remove in v0.8.0 - 'api/pytorch_lightning.loggers.test_tube_logger.rst', # TODO: remove in v0.8.0 - 'api/pytorch_lightning.callbacks.pt_callbacks.*', # TODO: remove in v0.8.0 - 'api/pytorch_lightning.pt_overrides.*', # TODO: remove in v0.8.0 - 'api/pytorch_lightning.root_module.*', # TODO: remove in v0.8.0 - 'api/pytorch_lightning.logging.*', # TODO: remove in v0.8.0 + 'api/pytorch_lightning.logging.*', # TODO: remove in v0.9.0 ] # The name of the Pygments (syntax highlighting) style to use. diff --git a/docs/source/weights_loading.rst b/docs/source/weights_loading.rst index 11844678397a9..88a04edfcfd87 100644 --- a/docs/source/weights_loading.rst +++ b/docs/source/weights_loading.rst @@ -31,7 +31,7 @@ To change the checkpoint path pass in: .. testcode:: - trainer = Trainer(default_save_path='/your/path/to/save/checkpoints') + trainer = Trainer(default_root_dir='/your/path/to/save/checkpoints') To modify the behavior of checkpointing pass in your own callback. diff --git a/pytorch_lightning/core/hooks.py b/pytorch_lightning/core/hooks.py index d3fea6d446845..04f284bc7a163 100644 --- a/pytorch_lightning/core/hooks.py +++ b/pytorch_lightning/core/hooks.py @@ -149,8 +149,6 @@ def backward(self, use_amp, loss, optimizer): if self.trainer.use_native_amp: self.trainer.scaler.scale(loss).backward() - - # TODO: remove in v0.8.0 else: with amp.scale_loss(loss, optimizer) as scaled_loss: scaled_loss.backward() diff --git a/pytorch_lightning/core/lightning.py b/pytorch_lightning/core/lightning.py index 7632822e462c3..2b858ac528b15 100644 --- a/pytorch_lightning/core/lightning.py +++ b/pytorch_lightning/core/lightning.py @@ -990,9 +990,7 @@ def configure_apex(self, amp, model, optimizers, amp_level): return model, optimizers """ - model, optimizers = amp.initialize( - model, optimizers, opt_level=amp_level, - ) + model, optimizers = amp.initialize(model, optimizers, opt_level=amp_level) return model, optimizers diff --git a/pytorch_lightning/core/model_saving.py b/pytorch_lightning/core/model_saving.py deleted file mode 100644 index 233ea2292fe80..0000000000000 --- a/pytorch_lightning/core/model_saving.py +++ /dev/null @@ -1,11 +0,0 @@ -""" -.. warning:: `model_saving` module has been renamed to `saving` since v0.6.0. - The deprecated module name will be removed in v0.8.0. -""" - -from pytorch_lightning.utilities import rank_zero_warn - -rank_zero_warn("`model_saving` module has been renamed to `saving` since v0.6.0." - " The deprecated module name will be removed in v0.8.0.", DeprecationWarning) - -from pytorch_lightning.core.saving import * # noqa: F403 E402 diff --git a/pytorch_lightning/core/root_module.py b/pytorch_lightning/core/root_module.py deleted file mode 100644 index b66cfd0d07cd8..0000000000000 --- a/pytorch_lightning/core/root_module.py +++ /dev/null @@ -1,11 +0,0 @@ -""" -.. warning:: `root_module` module has been renamed to `lightning` since v0.6.0. - The deprecated module name will be removed in v0.8.0. -""" - -from pytorch_lightning.utilities import rank_zero_warn - -rank_zero_warn("`root_module` module has been renamed to `lightning` since v0.6.0." - " The deprecated module name will be removed in v0.8.0.", DeprecationWarning) - -from pytorch_lightning.core.lightning import * # noqa: F403 E402 diff --git a/pytorch_lightning/logging/comet_logger.py b/pytorch_lightning/logging/comet_logger.py deleted file mode 100644 index 83360b36f8bc9..0000000000000 --- a/pytorch_lightning/logging/comet_logger.py +++ /dev/null @@ -1,11 +0,0 @@ -""" -.. warning:: `comet_logger` module has been renamed to `comet` since v0.6.0. - The deprecated module name will be removed in v0.8.0. -""" - -from pytorch_lightning.utilities import rank_zero_warn - -rank_zero_warn("`comet_logger` module has been renamed to `comet` since v0.6.0." - " The deprecated module name will be removed in v0.8.0.", DeprecationWarning) - -from pytorch_lightning.loggers.comet import CometLogger # noqa: E402 diff --git a/pytorch_lightning/logging/mlflow_logger.py b/pytorch_lightning/logging/mlflow_logger.py deleted file mode 100644 index 2e1b52126ecdf..0000000000000 --- a/pytorch_lightning/logging/mlflow_logger.py +++ /dev/null @@ -1,11 +0,0 @@ -""" -.. warning:: `mlflow_logger` module has been renamed to `mlflow` since v0.6.0. - The deprecated module name will be removed in v0.8.0. -""" - -from pytorch_lightning.utilities import rank_zero_warn - -rank_zero_warn("`mlflow_logger` module has been renamed to `mlflow` since v0.6.0." - " The deprecated module name will be removed in v0.8.0.", DeprecationWarning) - -from pytorch_lightning.loggers.mlflow import MLFlowLogger # noqa: E402 diff --git a/pytorch_lightning/logging/test_tube_logger.py b/pytorch_lightning/logging/test_tube_logger.py deleted file mode 100644 index 3280ac8dce632..0000000000000 --- a/pytorch_lightning/logging/test_tube_logger.py +++ /dev/null @@ -1,11 +0,0 @@ -""" -.. warning:: `test_tube_logger` module has been renamed to `test_tube` since v0.6.0. - The deprecated module name will be removed in v0.8.0. -""" - -from pytorch_lightning.utilities import rank_zero_warn - -rank_zero_warn("`test_tube_logger` module has been renamed to `test_tube` since v0.6.0." - " The deprecated module name will be removed in v0.8.0.", DeprecationWarning) - -from pytorch_lightning.loggers.test_tube import TestTubeLogger # noqa: E402 diff --git a/pytorch_lightning/overrides/override_data_parallel.py b/pytorch_lightning/overrides/override_data_parallel.py deleted file mode 100644 index bf08b1a528953..0000000000000 --- a/pytorch_lightning/overrides/override_data_parallel.py +++ /dev/null @@ -1,12 +0,0 @@ -""" -.. warning:: `override_data_parallel` module has been renamed to `data_parallel` since v0.6.0. - The deprecated module name will be removed in v0.8.0. -""" - -from pytorch_lightning.utilities import rank_zero_warn - -rank_zero_warn("`override_data_parallel` module has been renamed to `data_parallel` since v0.6.0." - " The deprecated module name will be removed in v0.8.0.", DeprecationWarning) - -from pytorch_lightning.overrides.data_parallel import ( # noqa: E402 - get_a_var, parallel_apply, LightningDataParallel, LightningDistributedDataParallel) diff --git a/pytorch_lightning/pt_overrides/__init__.py b/pytorch_lightning/pt_overrides/__init__.py deleted file mode 100644 index 5e2b3ddf02c31..0000000000000 --- a/pytorch_lightning/pt_overrides/__init__.py +++ /dev/null @@ -1,9 +0,0 @@ -""" -.. warning:: `pt_overrides` package has been renamed to `overrides` since v0.6.0. - The deprecated module name will be removed in v0.8.0. -""" - -from pytorch_lightning.utilities import rank_zero_warn - -rank_zero_warn("`pt_overrides` package has been renamed to `overrides` since v0.6.0." - " The deprecated module name will be removed in v0.8.0.", DeprecationWarning) diff --git a/pytorch_lightning/pt_overrides/override_data_parallel.py b/pytorch_lightning/pt_overrides/override_data_parallel.py deleted file mode 100644 index e6c8716456920..0000000000000 --- a/pytorch_lightning/pt_overrides/override_data_parallel.py +++ /dev/null @@ -1,12 +0,0 @@ -""" -.. warning:: `override_data_parallel` module has been renamed to `data_parallel` since v0.6.0. - The deprecated module name will be removed in v0.8.0. -""" - -from pytorch_lightning.utilities import rank_zero_warn - -rank_zero_warn("`override_data_parallel` module has been renamed to `data_parallel` since v0.6.0." - " The deprecated module name will be removed in v0.8.0.", DeprecationWarning) - -from pytorch_lightning.overrides.data_parallel import ( # noqa: F402 E402 - get_a_var, parallel_apply, LightningDataParallel, LightningDistributedDataParallel) diff --git a/pytorch_lightning/root_module/__init__.py b/pytorch_lightning/root_module/__init__.py deleted file mode 100644 index 41f741de6d460..0000000000000 --- a/pytorch_lightning/root_module/__init__.py +++ /dev/null @@ -1,9 +0,0 @@ -""" -.. warning:: `root_module` package has been renamed to `core` since v0.6.0. - The deprecated package name will be removed in v0.8.0. -""" - -from pytorch_lightning.utilities import rank_zero_warn - -rank_zero_warn("`root_module` package has been renamed to `core` since v0.6.0." - " The deprecated package name will be removed in v0.8.0.", DeprecationWarning) diff --git a/pytorch_lightning/root_module/decorators.py b/pytorch_lightning/root_module/decorators.py deleted file mode 100644 index a8fd39a219f75..0000000000000 --- a/pytorch_lightning/root_module/decorators.py +++ /dev/null @@ -1,11 +0,0 @@ -""" -.. warning:: `root_module.decorators` module has been renamed to `core.decorators` since v0.6.0. - The deprecated module name will be removed in v0.8.0. -""" - -from pytorch_lightning.utilities import rank_zero_warn - -rank_zero_warn("`root_module.decorators` module has been renamed to `core.decorators` since v0.6.0." - " The deprecated module name will be removed in v0.8.0.", DeprecationWarning) - -from pytorch_lightning.core.decorators import * # noqa: F403 E402 diff --git a/pytorch_lightning/root_module/grads.py b/pytorch_lightning/root_module/grads.py deleted file mode 100644 index 540becd8b8c42..0000000000000 --- a/pytorch_lightning/root_module/grads.py +++ /dev/null @@ -1,11 +0,0 @@ -""" -.. warning:: `root_module.grads` module has been renamed to `core.grads` since v0.6.0. - The deprecated module name will be removed in v0.8.0. -""" - -from pytorch_lightning.utilities import rank_zero_warn - -rank_zero_warn("`root_module.grads` module has been renamed to `core.grads` since v0.6.0." - " The deprecated module name will be removed in v0.8.0.", DeprecationWarning) - -from pytorch_lightning.core.grads import * # noqa: F403 E402 diff --git a/pytorch_lightning/root_module/hooks.py b/pytorch_lightning/root_module/hooks.py deleted file mode 100644 index 04f4c5a6e6f40..0000000000000 --- a/pytorch_lightning/root_module/hooks.py +++ /dev/null @@ -1,11 +0,0 @@ -""" -.. warning:: `root_module.hooks` module has been renamed to `core.hooks` since v0.6.0. - The deprecated module name will be removed in v0.8.0. -""" - -from pytorch_lightning.utilities import rank_zero_warn - -rank_zero_warn("`root_module.hooks` module has been renamed to `core.hooks` since v0.6.0." - " The deprecated module name will be removed in v0.8.0.", DeprecationWarning) - -from pytorch_lightning.core.hooks import * # noqa: F403 E402 diff --git a/pytorch_lightning/root_module/memory.py b/pytorch_lightning/root_module/memory.py deleted file mode 100644 index 00a06f64bd178..0000000000000 --- a/pytorch_lightning/root_module/memory.py +++ /dev/null @@ -1,11 +0,0 @@ -""" -.. warning:: `root_module.memory` module has been renamed to `core.memory` since v0.6.0. - The deprecated module name will be removed in v0.8.0. -""" - -from pytorch_lightning.utilities import rank_zero_warn - -rank_zero_warn("`root_module.memory` module has been renamed to `core.memory` since v0.6.0." - " The deprecated module name will be removed in v0.8.0.", DeprecationWarning) - -from pytorch_lightning.core.memory import * # noqa: F403 E402 diff --git a/pytorch_lightning/root_module/model_saving.py b/pytorch_lightning/root_module/model_saving.py deleted file mode 100644 index 93b346beaf267..0000000000000 --- a/pytorch_lightning/root_module/model_saving.py +++ /dev/null @@ -1,11 +0,0 @@ -""" -.. warning:: `root_module.model_saving` module has been renamed to `core.saving` since v0.6.0. - The deprecated module name will be removed in v0.8.0. -""" - -from pytorch_lightning.utilities import rank_zero_warn - -rank_zero_warn("`root_module.model_saving` module has been renamed to `core.saving` since v0.6.0." - " The deprecated module name will be removed in v0.8.0.", DeprecationWarning) - -from pytorch_lightning.core.saving import * # noqa: F403 E402 diff --git a/pytorch_lightning/root_module/root_module.py b/pytorch_lightning/root_module/root_module.py deleted file mode 100644 index 987507980a256..0000000000000 --- a/pytorch_lightning/root_module/root_module.py +++ /dev/null @@ -1,11 +0,0 @@ -""" -.. warning:: `root_module.root_module` module has been renamed to `core.lightning` since v0.6.0. - The deprecated module name will be removed in v0.8.0. -""" - -from pytorch_lightning.utilities import rank_zero_warn - -rank_zero_warn("`root_module.root_module` module has been renamed to `core.lightning` since v0.6.0." - " The deprecated module name will be removed in v0.8.0.", DeprecationWarning) - -from pytorch_lightning.core.lightning import * # noqa: F403 E402 diff --git a/pytorch_lightning/trainer/__init__.py b/pytorch_lightning/trainer/__init__.py index 3c37671982983..b9737b0557b75 100644 --- a/pytorch_lightning/trainer/__init__.py +++ b/pytorch_lightning/trainer/__init__.py @@ -428,13 +428,6 @@ def on_train_end(self, trainer, pl_module): # default used by the Trainer trainer = Trainer(gradient_clip_val=0.0) - -gradient_clip: - -.. warning:: .. deprecated:: 0.5.0 - - Use `gradient_clip_val` instead. Will remove 0.8.0. - log_gpu_memory ^^^^^^^^^^^^^^ Options: @@ -495,12 +488,6 @@ def on_train_end(self, trainer, pl_module): # default used by the Trainer trainer = Trainer(max_epochs=1000) -max_nb_epochs: - -.. warning:: .. deprecated:: 0.5.0 - - Use `max_epochs` instead. Will remove 0.8.0. - min_epochs ^^^^^^^^^^ Force training for at least these many epochs @@ -510,11 +497,6 @@ def on_train_end(self, trainer, pl_module): # default used by the Trainer trainer = Trainer(min_epochs=1) -min_nb_epochs: - -.. warning:: deprecated:: 0.5.0 - Use `min_epochs` instead. Will remove 0.8.0. - max_steps ^^^^^^^^^ Stop training after this number of steps @@ -559,12 +541,6 @@ def on_train_end(self, trainer, pl_module): # to train on 8 nodes trainer = Trainer(num_nodes=8) -nb_gpu_nodes: - -.. warning:: .. deprecated:: 0.5.0 - - Use `num_nodes` instead. Will remove 0.8.0. - num_processes ^^^^^^^^^^^^^ @@ -595,12 +571,6 @@ def on_train_end(self, trainer, pl_module): # turn it off trainer = Trainer(num_sanity_val_steps=0) -nb_sanity_val_steps: - -.. warning:: .. deprecated:: 0.5.0 - - Use `num_sanity_val_steps` instead. Will remove 0.8.0. - num_tpu_cores ^^^^^^^^^^^^^ .. warning:: .. deprecated:: 0.7.6 @@ -825,13 +795,6 @@ def on_train_end(self, trainer, pl_module): # default used by the Trainer trainer = Trainer(row_log_interval=10) - -add_row_log_interval: - -.. warning:: .. deprecated:: 0.5.0 - - Use `row_log_interval` instead. Will remove 0.8.0. - use_amp: .. warning:: .. deprecated:: 0.7.0 diff --git a/pytorch_lightning/trainer/auto_mix_precision.py b/pytorch_lightning/trainer/auto_mix_precision.py index 2551b8a22dd0f..bc96d39435d85 100644 --- a/pytorch_lightning/trainer/auto_mix_precision.py +++ b/pytorch_lightning/trainer/auto_mix_precision.py @@ -20,11 +20,9 @@ class TrainerAMPMixin(ABC): use_native_amp: bool def init_amp(self, use_amp): - # TODO: remove in v 0.8.0 if self.use_native_amp: - rank_zero_warn("`amp_level` has been deprecated since v0.7.4 " - "(native amp does not require it)" - " and this argument will be removed in v0.8.0", DeprecationWarning) + rank_zero_warn("`amp_level` has been deprecated since v0.7.4 (native amp does not require it)" + " and this argument will be removed in v0.9.0", DeprecationWarning) # Backward compatibility, TODO: remove in v0.9.0 if use_amp is not None: @@ -38,13 +36,12 @@ def init_amp(self, use_amp): log.info('Using 16bit precision.') return - # TODO: remove all below for v0.8.0 + # TODO: remove all below for v0.9.0 if use_amp and not APEX_AVAILABLE: # pragma: no-cover raise ModuleNotFoundError(""" You set `use_amp=True` but do not have apex installed. Install apex first using this guide and rerun with use_amp=True: https://github.com/NVIDIA/apex#linux - this run will NOT use 16 bit precision """) diff --git a/pytorch_lightning/trainer/deprecated_api.py b/pytorch_lightning/trainer/deprecated_api.py index 11536df2a7830..9938faca2ec1d 100644 --- a/pytorch_lightning/trainer/deprecated_api.py +++ b/pytorch_lightning/trainer/deprecated_api.py @@ -5,113 +5,9 @@ from pytorch_lightning.utilities import rank_zero_warn -class TrainerDeprecatedAPITillVer0_8(ABC): - - def __init__(self): - super().__init__() # mixin calls super too - - @property - def nb_gpu_nodes(self): - """Back compatibility, will be removed in v0.8.0""" - rank_zero_warn("Attribute `nb_gpu_nodes` has renamed to `num_nodes` since v0.5.0" - " and this method will be removed in v0.8.0", DeprecationWarning) - return self.num_nodes - - @property - def num_gpu_nodes(self): - """Back compatibility, will be removed in v0.8.0""" - rank_zero_warn("Attribute `num_gpu_nodes` has renamed to `num_nodes` since v0.5.0" - " and this method will be removed in v0.8.0", DeprecationWarning) - return self.num_nodes - - @num_gpu_nodes.setter - def num_gpu_nodes(self, num_nodes): - """Back compatibility, will be removed in v0.8.0""" - rank_zero_warn("Attribute `num_gpu_nodes` has renamed to `num_nodes` since v0.5.0" - " and this method will be removed in v0.8.0", DeprecationWarning) - self.num_nodes = num_nodes - - @property - def gradient_clip(self): - """Back compatibility, will be removed in v0.8.0""" - rank_zero_warn("Attribute `gradient_clip` has renamed to `gradient_clip_val` since v0.5.0" - " and this method will be removed in v0.8.0", DeprecationWarning) - return self.gradient_clip_val - - @gradient_clip.setter - def gradient_clip(self, gradient_clip): - """Back compatibility, will be removed in v0.8.0""" - rank_zero_warn("Attribute `gradient_clip` has renamed to `gradient_clip_val` since v0.5.0" - " and this method will be removed in v0.8.0", DeprecationWarning) - self.gradient_clip_val = gradient_clip - - @property - def max_nb_epochs(self): - """Back compatibility, will be removed in v0.8.0""" - rank_zero_warn("Attribute `max_nb_epochs` has renamed to `max_epochs` since v0.5.0" - " and this method will be removed in v0.8.0", DeprecationWarning) - return self.max_epochs - - @max_nb_epochs.setter - def max_nb_epochs(self, max_epochs): - """Back compatibility, will be removed in v0.8.0""" - rank_zero_warn("Attribute `max_nb_epochs` has renamed to `max_epochs` since v0.5.0" - " and this method will be removed in v0.8.0", DeprecationWarning) - self.max_epochs = max_epochs - - @property - def min_nb_epochs(self): - """Back compatibility, will be removed in v0.8.0""" - rank_zero_warn("Attribute `min_nb_epochs` has renamed to `min_epochs` since v0.5.0" - " and this method will be removed in v0.8.0", DeprecationWarning) - return self.min_epochs - - @min_nb_epochs.setter - def min_nb_epochs(self, min_epochs): - """Back compatibility, will be removed in v0.8.0""" - rank_zero_warn("Attribute `min_nb_epochs` has renamed to `min_epochs` since v0.5.0" - " and this method will be removed in v0.8.0", DeprecationWarning) - self.min_epochs = min_epochs - - @property - def nb_sanity_val_steps(self): - """Back compatibility, will be removed in v0.8.0""" - rank_zero_warn("Attribute `nb_sanity_val_steps` has renamed to " - "`num_sanity_val_steps` since v0.5.0" - " and this method will be removed in v0.8.0", DeprecationWarning) - return self.num_sanity_val_steps - - @nb_sanity_val_steps.setter - def nb_sanity_val_steps(self, nb): - """Back compatibility, will be removed in v0.8.0""" - rank_zero_warn("Attribute `nb_sanity_val_steps` has renamed to " - "`num_sanity_val_steps` since v0.5.0" - " and this method will be removed in v0.8.0", DeprecationWarning) - self.num_sanity_val_steps = nb - - @property - def default_save_path(self): - """Back compatibility, will be removed in v0.8.0""" - rank_zero_warn("Attribute `default_save_path` has renamed to `default_root_dir` since v0.5.x" - " and this method will be removed in v0.8.0", DeprecationWarning) - return self.default_root_dir - - @default_save_path.setter - def default_save_path(self, path): - """Back compatibility, will be removed in v0.8.0""" - rank_zero_warn("Attribute `default_save_path` has renamed to `default_root_dir` since v0.5.x" - " and this method will be removed in v0.8.0", DeprecationWarning) - self.default_root_dir = path - - @property - def tng_tqdm_dic(self): - """Back compatibility, will be removed in v0.8.0""" - rank_zero_warn("`tng_tqdm_dic` has renamed to `training_tqdm_dict` since v0.5.0" - " and this method will be removed in v0.8.0", DeprecationWarning) - return self.progress_bar_dict - - class TrainerDeprecatedAPITillVer0_9(ABC): + progress_bar_dict: ... + progress_bar_callback: ... def __init__(self): super().__init__() # mixin calls super too diff --git a/pytorch_lightning/trainer/trainer.py b/pytorch_lightning/trainer/trainer.py index 209c56d30553a..fce67654f7a08 100644 --- a/pytorch_lightning/trainer/trainer.py +++ b/pytorch_lightning/trainer/trainer.py @@ -13,12 +13,11 @@ from pytorch_lightning.core.lightning import LightningModule from pytorch_lightning.loggers import LightningLoggerBase from pytorch_lightning.profiler import SimpleProfiler, PassThroughProfiler, BaseProfiler -from pytorch_lightning.trainer.seed import seed_everything from pytorch_lightning.trainer.auto_mix_precision import TrainerAMPMixin from pytorch_lightning.trainer.callback_config import TrainerCallbackConfigMixin from pytorch_lightning.trainer.callback_hook import TrainerCallbackHookMixin from pytorch_lightning.trainer.data_loading import TrainerDataLoadingMixin -from pytorch_lightning.trainer.deprecated_api import TrainerDeprecatedAPITillVer0_8, TrainerDeprecatedAPITillVer0_9 +from pytorch_lightning.trainer.deprecated_api import TrainerDeprecatedAPITillVer0_9 from pytorch_lightning.trainer.distrib_data_parallel import TrainerDDPMixin from pytorch_lightning.trainer.distrib_parts import ( TrainerDPMixin, parse_gpu_ids, determine_root_gpu_device, pick_multiple_gpus) @@ -73,73 +72,61 @@ class Trainer( TrainerCallbackConfigMixin, TrainerCallbackHookMixin, TrainerLRFinderMixin, - TrainerDeprecatedAPITillVer0_8, TrainerDeprecatedAPITillVer0_9, ): - DEPRECATED_IN_0_8 = ( - 'gradient_clip', 'nb_gpu_nodes', 'max_nb_epochs', 'min_nb_epochs', - 'add_row_log_interval', 'nb_sanity_val_steps', 'tng_tqdm_dic', - ) DEPRECATED_IN_0_9 = ('use_amp', 'show_progress_bar', 'training_tqdm_dict', 'num_tpu_cores') def __init__( - self, - logger: Union[LightningLoggerBase, Iterable[LightningLoggerBase], bool] = True, - checkpoint_callback: Union[ModelCheckpoint, bool] = True, - early_stop_callback: Optional[Union[EarlyStopping, bool]] = False, - callbacks: Optional[List[Callback]] = None, - default_root_dir: Optional[str] = None, - gradient_clip_val: float = 0, - process_position: int = 0, - num_nodes: int = 1, - num_processes: int = 1, - gpus: Optional[Union[List[int], str, int]] = None, - auto_select_gpus: bool = False, - tpu_cores: Optional[Union[List[int], int]] = None, - log_gpu_memory: Optional[str] = None, - progress_bar_refresh_rate: int = 1, - overfit_pct: float = 0.0, - track_grad_norm: Union[int, float, str] = -1, - check_val_every_n_epoch: int = 1, - fast_dev_run: bool = False, - accumulate_grad_batches: Union[int, Dict[int, int], List[list]] = 1, - max_epochs: int = 1000, - min_epochs: int = 1, - max_steps: Optional[int] = None, - min_steps: Optional[int] = None, - train_percent_check: float = 1.0, - val_percent_check: float = 1.0, - test_percent_check: float = 1.0, - val_check_interval: float = 1.0, - log_save_interval: int = 100, - row_log_interval: int = 10, - add_row_log_interval=None, # backward compatible, todo: remove in v0.8.0 - distributed_backend: Optional[str] = None, - precision: int = 32, - print_nan_grads: bool = False, # backward compatible, todo: remove in v0.9.0 - weights_summary: Optional[str] = 'top', - weights_save_path: Optional[str] = None, - num_sanity_val_steps: int = 2, - truncated_bptt_steps: Optional[int] = None, - resume_from_checkpoint: Optional[str] = None, - profiler: Optional[Union[BaseProfiler, bool]] = None, - benchmark: bool = False, - deterministic: bool = False, - reload_dataloaders_every_epoch: bool = False, - auto_lr_find: Union[bool, str] = False, - replace_sampler_ddp: bool = True, - terminate_on_nan: bool = False, - auto_scale_batch_size: Union[str, bool] = False, - num_tpu_cores: Optional[int] = None, # backward compatible, todo: remove in v0.9.0 - amp_level: str = 'O1', # backward compatible, todo: remove in v0.8.0 - default_save_path=None, # backward compatible, todo: remove in v0.8.0 - gradient_clip=None, # backward compatible, todo: remove in v0.8.0 - nb_gpu_nodes=None, # backward compatible, todo: remove in v0.8.0 - max_nb_epochs=None, # backward compatible, todo: remove in v0.8.0 - min_nb_epochs=None, # backward compatible, todo: remove in v0.8.0 - use_amp=None, # backward compatible, todo: remove in v0.9.0 - show_progress_bar=None, # backward compatible, todo: remove in v0.9.0 - nb_sanity_val_steps=None, # backward compatible, todo: remove in v0.8.0 + self, + logger: Union[LightningLoggerBase, Iterable[LightningLoggerBase], bool] = True, + checkpoint_callback: Union[ModelCheckpoint, bool] = True, + early_stop_callback: Optional[Union[EarlyStopping, bool]] = False, + callbacks: Optional[List[Callback]] = None, + default_root_dir: Optional[str] = None, + gradient_clip_val: float = 0, + process_position: int = 0, + num_nodes: int = 1, + num_processes: int = 1, + gpus: Optional[Union[List[int], str, int]] = None, + auto_select_gpus: bool = False, + tpu_cores: Optional[Union[List[int], int]] = None, + log_gpu_memory: Optional[str] = None, + progress_bar_refresh_rate: int = 1, + overfit_pct: float = 0.0, + track_grad_norm: Union[int, float, str] = -1, + check_val_every_n_epoch: int = 1, + fast_dev_run: bool = False, + accumulate_grad_batches: Union[int, Dict[int, int], List[list]] = 1, + max_epochs: int = 1000, + min_epochs: int = 1, + max_steps: Optional[int] = None, + min_steps: Optional[int] = None, + train_percent_check: float = 1.0, + val_percent_check: float = 1.0, + test_percent_check: float = 1.0, + val_check_interval: float = 1.0, + log_save_interval: int = 100, + row_log_interval: int = 10, + distributed_backend: Optional[str] = None, + precision: int = 32, + print_nan_grads: bool = False, # backward compatible, todo: remove in v0.9.0 + weights_summary: Optional[str] = 'top', + weights_save_path: Optional[str] = None, + num_sanity_val_steps: int = 2, + truncated_bptt_steps: Optional[int] = None, + resume_from_checkpoint: Optional[str] = None, + profiler: Optional[Union[BaseProfiler, bool]] = None, + benchmark: bool = False, + deterministic: bool = False, + reload_dataloaders_every_epoch: bool = False, + auto_lr_find: Union[bool, str] = False, + replace_sampler_ddp: bool = True, + terminate_on_nan: bool = False, + auto_scale_batch_size: Union[str, bool] = False, + amp_level: str = 'O1', # backward compatible, todo: remove in v1.0.0 + num_tpu_cores: Optional[int] = None, # backward compatible, todo: remove in v0.9.0 + use_amp=None, # backward compatible, todo: remove in v0.9.0 + show_progress_bar=None, # backward compatible, todo: remove in v0.9.0 ): r""" @@ -156,11 +143,6 @@ def __init__( default_root_dir: Default path for logs and weights when no logger/ckpt_callback passed - default_save_path: - .. warning:: .. deprecated:: 0.7.3 - - Use `default_root_dir` instead. Will remove 0.9.0. - gradient_clip_val: 0 means don't clip. gradient_clip: @@ -271,11 +253,6 @@ def __init__( num_sanity_val_steps: Sanity check runs n batches of val before starting the training routine. - nb_sanity_val_steps: - .. warning:: .. deprecated:: 0.7.0 - - Use `num_sanity_val_steps` instead. Will remove 0.8.0. - truncated_bptt_steps: Truncated back prop breaks performs backprop every k steps of resume_from_checkpoint: To resume training from a specific checkpoint pass in the path here. @@ -325,20 +302,9 @@ def __init__( # Transfer params self.num_nodes = num_nodes - # Backward compatibility, TODO: remove in v0.8.0 - if nb_gpu_nodes is not None: - rank_zero_warn("Argument `nb_gpu_nodes` has renamed to `num_nodes` since v0.5.0" - " and this method will be removed in v0.8.0", DeprecationWarning) - self.num_gpu_nodes = nb_gpu_nodes self.log_gpu_memory = log_gpu_memory self.gradient_clip_val = gradient_clip_val - # Backward compatibility, TODO: remove in v0.8.0 - if gradient_clip is not None: - rank_zero_warn("Argument `gradient_clip` has renamed to `gradient_clip_val` since v0.5.0" - " and this method will be removed in v0.8.0", DeprecationWarning) - self.gradient_clip = gradient_clip - self.check_val_every_n_epoch = check_val_every_n_epoch if not isinstance(track_grad_norm, (int, float)) and track_grad_norm != 'inf': @@ -370,30 +336,11 @@ def __init__( self.weights_summary = weights_summary self.max_epochs = max_epochs - # Backward compatibility, TODO: remove in v0.8.0 - if max_nb_epochs is not None: - rank_zero_warn("Argument `max_nb_epochs` has renamed to `max_epochs` since v0.5.0" - " and this method will be removed in v0.8.0", DeprecationWarning) - self.max_nb_epochs = max_nb_epochs - self.min_epochs = min_epochs - # Backward compatibility, TODO: remove in v0.8.0 - if min_nb_epochs is not None: - rank_zero_warn("Argument `min_nb_epochs` has renamed to `min_epochs` since v0.5.0" - " and this method will be removed in v0.8.0", DeprecationWarning) - self.min_nb_epochs = min_nb_epochs - self.max_steps = max_steps self.min_steps = min_steps self.num_sanity_val_steps = num_sanity_val_steps - # Backward compatibility, TODO: remove in v0.8.0 - if nb_sanity_val_steps is not None: - rank_zero_warn("Argument `nb_sanity_val_steps` has renamed to " - "`num_sanity_val_steps` since v0.5.0" - " and this method will be removed in v0.8.0", DeprecationWarning) - self.nb_sanity_val_steps = nb_sanity_val_steps - # Backward compatibility, TODO: remove in v0.9.0 if print_nan_grads: rank_zero_warn("Argument `print_nan_grads` has no effect and will be removed in v0.9.0." @@ -421,10 +368,6 @@ def __init__( # set default save path if user didn't provide one self.default_root_dir = default_root_dir - # Backward compatibility, TODO: remove in v0.8.0 - if default_save_path is not None: - self.default_root_dir = default_save_path - if self.default_root_dir is None: self.default_root_dir = os.getcwd() @@ -515,12 +458,6 @@ def __init__( self.log_save_interval = log_save_interval self.val_check_interval = val_check_interval - # backward compatibility - if add_row_log_interval is not None: - rank_zero_warn("`add_row_log_interval` has renamed to `row_log_interval` since v0.5.0" - " and this method will be removed in v0.8.0", DeprecationWarning) - if not row_log_interval: # in case you did not set the proper value - row_log_interval = add_row_log_interval self.row_log_interval = row_log_interval # how much of the data to use @@ -536,7 +473,6 @@ def __init__( self.precision = precision self.scaler = None - # TODO: remove for v0.8.0 self.amp_level = amp_level self.init_amp(use_amp) diff --git a/pytorch_lightning/trainer/training_io.py b/pytorch_lightning/trainer/training_io.py index f8fa97be5a3dd..d19dd8d4019e3 100644 --- a/pytorch_lightning/trainer/training_io.py +++ b/pytorch_lightning/trainer/training_io.py @@ -61,7 +61,7 @@ You can even change the logic of your model as long as the weights and "architecture" of the system isn't different. If you add a layer, for instance, it might not work. -At a rough level, here's what happens inside Trainer :py:mod:`pytorch_lightning.base_module.model_saving.py`: +At a rough level, here's what happens inside Trainer :py:mod:`pytorch_lightning.base_module.saving.py`: .. code-block:: python diff --git a/setup.cfg b/setup.cfg index d5277ad1afdc5..d30036c698fe4 100644 --- a/setup.cfg +++ b/setup.cfg @@ -9,7 +9,6 @@ python_files = addopts = --strict --doctest-modules - --durations=0 markers = slow remote_data diff --git a/tests/loggers/test_all.py b/tests/loggers/test_all.py index 54a54204fe28f..f8a8fead41f58 100644 --- a/tests/loggers/test_all.py +++ b/tests/loggers/test_all.py @@ -109,7 +109,7 @@ def test_logger_reset_correctly(tmpdir, extra_params): model = EvalModelTemplate() trainer = Trainer( - default_save_path=tmpdir, + default_root_dir=tmpdir, **extra_params ) logger1 = trainer.logger diff --git a/tests/test_deprecated.py b/tests/test_deprecated.py index 57c72d3fd08f7..3a540fb565afe 100644 --- a/tests/test_deprecated.py +++ b/tests/test_deprecated.py @@ -13,77 +13,6 @@ def _soft_unimport_module(str_module): del sys.modules[str_module] -def test_tbd_remove_in_v0_8_0_module_imports(): - _soft_unimport_module("pytorch_lightning.logging.comet_logger") - with pytest.deprecated_call(match='v0.8.0'): - from pytorch_lightning.logging.comet_logger import CometLogger # noqa: F811 - _soft_unimport_module("pytorch_lightning.logging.mlflow_logger") - with pytest.deprecated_call(match='v0.8.0'): - from pytorch_lightning.logging.mlflow_logger import MLFlowLogger # noqa: F811 - _soft_unimport_module("pytorch_lightning.logging.test_tube_logger") - with pytest.deprecated_call(match='v0.8.0'): - from pytorch_lightning.logging.test_tube_logger import TestTubeLogger # noqa: F811 - - _soft_unimport_module("pytorch_lightning.pt_overrides.override_data_parallel") - with pytest.deprecated_call(match='v0.8.0'): - from pytorch_lightning.pt_overrides.override_data_parallel import ( # noqa: F811 - LightningDataParallel, LightningDistributedDataParallel) - _soft_unimport_module("pytorch_lightning.overrides.override_data_parallel") - with pytest.deprecated_call(match='v0.8.0'): - from pytorch_lightning.overrides.override_data_parallel import ( # noqa: F811 - LightningDataParallel, LightningDistributedDataParallel) - - _soft_unimport_module("pytorch_lightning.core.model_saving") - with pytest.deprecated_call(match='v0.8.0'): - from pytorch_lightning.core.model_saving import ModelIO # noqa: F811 - _soft_unimport_module("pytorch_lightning.core.root_module") - with pytest.deprecated_call(match='v0.8.0'): - from pytorch_lightning.core.root_module import LightningModule # noqa: F811 - - _soft_unimport_module("pytorch_lightning.root_module.decorators") - with pytest.deprecated_call(match='v0.8.0'): - from pytorch_lightning.root_module.decorators import data_loader # noqa: F811 - _soft_unimport_module("pytorch_lightning.root_module.grads") - with pytest.deprecated_call(match='v0.8.0'): - from pytorch_lightning.root_module.grads import GradInformation # noqa: F811 - _soft_unimport_module("pytorch_lightning.root_module.hooks") - with pytest.deprecated_call(match='v0.8.0'): - from pytorch_lightning.root_module.hooks import ModelHooks # noqa: F811 - _soft_unimport_module("pytorch_lightning.root_module.memory") - with pytest.deprecated_call(match='v0.8.0'): - from pytorch_lightning.root_module.memory import ModelSummary # noqa: F811 - _soft_unimport_module("pytorch_lightning.root_module.model_saving") - with pytest.deprecated_call(match='v0.8.0'): - from pytorch_lightning.root_module.model_saving import ModelIO # noqa: F811 - _soft_unimport_module("pytorch_lightning.root_module.root_module") - with pytest.deprecated_call(match='v0.8.0'): - from pytorch_lightning.root_module.root_module import LightningModule # noqa: F811 - - -def test_tbd_remove_in_v0_8_0_trainer(): - mapping_old_new = { - 'gradient_clip': 'gradient_clip_val', - 'nb_gpu_nodes': 'num_nodes', - 'max_nb_epochs': 'max_epochs', - 'min_nb_epochs': 'min_epochs', - 'nb_sanity_val_steps': 'num_sanity_val_steps', - 'default_save_path': 'default_root_dir', - } - # skip 0 since it may be interested as False - kwargs = {k: (i + 1) for i, k in enumerate(mapping_old_new)} - - trainer = Trainer(**kwargs) - - for attr_old in mapping_old_new: - attr_new = mapping_old_new[attr_old] - with pytest.deprecated_call(match='v0.8.0'): - _ = getattr(trainer, attr_old) - assert kwargs[attr_old] == getattr(trainer, attr_old), \ - 'Missing deprecated attribute "%s"' % attr_old - assert kwargs[attr_old] == getattr(trainer, attr_new), \ - 'Wrongly passed deprecated argument "%s" to attribute "%s"' % (attr_old, attr_new) - - def test_tbd_remove_in_v0_9_0_trainer(): # test show_progress_bar set by progress_bar_refresh_rate with pytest.deprecated_call(match='v0.9.0'): diff --git a/tests/trainer/test_lr_finder.py b/tests/trainer/test_lr_finder.py index 61001f5c925a0..d0becff0918c6 100755 --- a/tests/trainer/test_lr_finder.py +++ b/tests/trainer/test_lr_finder.py @@ -14,7 +14,7 @@ def test_error_on_more_than_1_optimizer(tmpdir): # logger file to get meta trainer = Trainer( - default_save_path=tmpdir, + default_root_dir=tmpdir, max_epochs=1 ) @@ -29,7 +29,7 @@ def test_model_reset_correctly(tmpdir): # logger file to get meta trainer = Trainer( - default_save_path=tmpdir, + default_root_dir=tmpdir, max_epochs=1 ) @@ -51,7 +51,7 @@ def test_trainer_reset_correctly(tmpdir): # logger file to get meta trainer = Trainer( - default_save_path=tmpdir, + default_root_dir=tmpdir, max_epochs=1 ) @@ -81,7 +81,7 @@ def test_trainer_arg_bool(tmpdir): # logger file to get meta trainer = Trainer( - default_save_path=tmpdir, + default_root_dir=tmpdir, max_epochs=2, auto_lr_find=True ) @@ -100,7 +100,7 @@ def test_trainer_arg_str(tmpdir): before_lr = model.my_fancy_lr # logger file to get meta trainer = Trainer( - default_save_path=tmpdir, + default_root_dir=tmpdir, max_epochs=2, auto_lr_find='my_fancy_lr' ) @@ -120,7 +120,7 @@ def test_call_to_trainer_method(tmpdir): before_lr = hparams.get('learning_rate') # logger file to get meta trainer = Trainer( - default_save_path=tmpdir, + default_root_dir=tmpdir, max_epochs=2, ) @@ -144,7 +144,7 @@ def test_accumulation_and_early_stopping(tmpdir): before_lr = hparams.get('learning_rate') # logger file to get meta trainer = Trainer( - default_save_path=tmpdir, + default_root_dir=tmpdir, accumulate_grad_batches=2, ) @@ -167,7 +167,7 @@ def test_suggestion_parameters_work(tmpdir): # logger file to get meta trainer = Trainer( - default_save_path=tmpdir, + default_root_dir=tmpdir, max_epochs=3, ) @@ -187,7 +187,7 @@ def test_suggestion_with_non_finite_values(tmpdir): # logger file to get meta trainer = Trainer( - default_save_path=tmpdir, + default_root_dir=tmpdir, max_epochs=3 ) diff --git a/tests/trainer/test_optimizers.py b/tests/trainer/test_optimizers.py index bf1b68629607b..be1f429c85c1b 100644 --- a/tests/trainer/test_optimizers.py +++ b/tests/trainer/test_optimizers.py @@ -234,6 +234,6 @@ def configure_optimizers(self): model = CurrentModel(hparams) # fit model - trainer = Trainer(default_save_path=tmpdir, max_epochs=1) + trainer = Trainer(default_root_dir=tmpdir, max_epochs=1) result = trainer.fit(model) assert result == 1 diff --git a/tests/trainer/test_trainer.py b/tests/trainer/test_trainer.py index caf1632ec3474..f391260c139f4 100644 --- a/tests/trainer/test_trainer.py +++ b/tests/trainer/test_trainer.py @@ -705,7 +705,7 @@ def _optimizer_step(*args, **kwargs): def test_gpu_choice(tmpdir): trainer_options = dict( - default_save_path=tmpdir, + default_root_dir=tmpdir, ) # Only run if CUDA is available if not torch.cuda.is_available(): diff --git a/tests/trainer/test_trainer_tricks.py b/tests/trainer/test_trainer_tricks.py index a66e8bbde8b7f..973ed32e7cd92 100755 --- a/tests/trainer/test_trainer_tricks.py +++ b/tests/trainer/test_trainer_tricks.py @@ -15,7 +15,7 @@ def test_model_reset_correctly(tmpdir): # logger file to get meta trainer = Trainer( - default_save_path=tmpdir, + default_root_dir=tmpdir, max_epochs=1 ) @@ -38,7 +38,7 @@ def test_trainer_reset_correctly(tmpdir): # logger file to get meta trainer = Trainer( - default_save_path=tmpdir, + default_root_dir=tmpdir, max_epochs=1 ) @@ -77,7 +77,7 @@ def test_trainer_arg(tmpdir, scale_arg): before_batch_size = hparams.get('batch_size') # logger file to get meta trainer = Trainer( - default_save_path=tmpdir, + default_root_dir=tmpdir, max_epochs=1, auto_scale_batch_size=scale_arg, ) @@ -99,7 +99,7 @@ def test_call_to_trainer_method(tmpdir, scale_method): before_batch_size = hparams.get('batch_size') # logger file to get meta trainer = Trainer( - default_save_path=tmpdir, + default_root_dir=tmpdir, max_epochs=1, )