Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Drop torch 1.6 testing #10390

Merged
merged 22 commits into from Nov 13, 2021
Merged
Show file tree
Hide file tree
Changes from 17 commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Jump to
Jump to file
Failed to load files.
Diff view
Diff view
1 change: 0 additions & 1 deletion CHANGELOG.md
Expand Up @@ -89,7 +89,6 @@ The format is based on [Keep a Changelog](http://keepachangelog.com/en/1.0.0/).

- Removed deprecated method `master_params` from PrecisionPlugin ([#10372](https://github.com/PyTorchLightning/pytorch-lightning/pull/10372))

carmocca marked this conversation as resolved.
Show resolved Hide resolved

### Fixed

- Fixed `apply_to_collection(defaultdict)` ([#10316](https://github.com/PyTorchLightning/pytorch-lightning/issues/10316))
Expand Down
2 changes: 1 addition & 1 deletion pytorch_lightning/callbacks/quantization.py
Expand Up @@ -28,7 +28,7 @@
if _TORCH_GREATER_EQUAL_1_8:
from torch.quantization import FakeQuantizeBase
else:
# For torch 1.6 and 1.7.
# For torch 1.7.
from torch.quantization import FakeQuantize as FakeQuantizeBase

import pytorch_lightning as pl
Expand Down
5 changes: 3 additions & 2 deletions pytorch_lightning/distributed/dist.py
Expand Up @@ -13,7 +13,8 @@
# limitations under the License.
from typing import Any

from pytorch_lightning.overrides.torch_distributed import broadcast_object_list
import torch.distributed

from pytorch_lightning.utilities import rank_zero_deprecation
from pytorch_lightning.utilities.distributed import group as _group

Expand All @@ -40,6 +41,6 @@ def broadcast(self, obj: Any, group=_group.WORLD):
if self.rank != 0:
obj = [None] * len(obj)

broadcast_object_list(obj, 0, group=group or _group.WORLD)
torch.distributed.broadcast_object_list(obj, 0, group=group or _group.WORLD)
carmocca marked this conversation as resolved.
Show resolved Hide resolved

return obj[0]
99 changes: 0 additions & 99 deletions pytorch_lightning/overrides/torch_distributed.py

This file was deleted.

15 changes: 5 additions & 10 deletions pytorch_lightning/plugins/training_type/ddp.py
Expand Up @@ -34,7 +34,6 @@
from pytorch_lightning.core.optimizer import LightningOptimizer
from pytorch_lightning.overrides import LightningDistributedModule
from pytorch_lightning.overrides.distributed import prepare_for_backward
from pytorch_lightning.overrides.torch_distributed import broadcast_object_list
from pytorch_lightning.plugins.environments.cluster_environment import ClusterEnvironment
from pytorch_lightning.plugins.io.checkpoint_plugin import CheckpointIO
from pytorch_lightning.plugins.training_type.parallel import ParallelPlugin
Expand All @@ -43,7 +42,6 @@
_FAIRSCALE_AVAILABLE,
_HYDRA_AVAILABLE,
_IS_WINDOWS,
_TORCH_GREATER_EQUAL_1_7,
_TORCH_GREATER_EQUAL_1_8,
_TORCH_GREATER_EQUAL_1_9,
_TORCH_GREATER_EQUAL_1_10,
Expand Down Expand Up @@ -280,15 +278,12 @@ def pre_configure_ddp(self):
# when not all parameter backward hooks are fired by the autograd engine even if require_grad is set to True.
# This flag does come with a performance hit, so it is suggested to disable in cases where it is possible.
self._ddp_kwargs["find_unused_parameters"] = self._ddp_kwargs.get("find_unused_parameters", True)
# todo: PyTorch 1.7.0 DDP introduces `self.reducer._rebuild_buckets()` breaking manual_optimization
if (
_TORCH_GREATER_EQUAL_1_7
and not self.lightning_module.automatic_optimization
and not self._ddp_kwargs.get("find_unused_parameters", False)
if not self.lightning_module.automatic_optimization and not self._ddp_kwargs.get(
"find_unused_parameters", False
):
# TODO: PyTorch 1.7.0 DDP introduces `self.reducer._rebuild_buckets()` breaking manual_optimization
rank_zero_warn(
"From PyTorch 1.7.0, Lightning ``manual_optimization`` needs to set ``find_unused_parameters=True`` "
"to properly work with DDP."
"Lightning `manual_optimization` needs to set `find_unused_parameters=True` to properly work with DDP."
carmocca marked this conversation as resolved.
Show resolved Hide resolved
)
self._ddp_kwargs["find_unused_parameters"] = True

Expand Down Expand Up @@ -396,7 +391,7 @@ def broadcast(self, obj: object, src: int = 0) -> object:
obj = [obj]
if self.global_rank != src:
obj = [None]
broadcast_object_list(obj, src, group=_group.WORLD)
torch.distributed.broadcast_object_list(obj, src, group=_group.WORLD)
return obj[0]

def pre_backward(self, closure_loss: torch.Tensor) -> None:
Expand Down
16 changes: 6 additions & 10 deletions pytorch_lightning/plugins/training_type/ddp_spawn.py
Expand Up @@ -27,12 +27,11 @@
import pytorch_lightning as pl
from pytorch_lightning.overrides import LightningDistributedModule
from pytorch_lightning.overrides.distributed import prepare_for_backward
from pytorch_lightning.overrides.torch_distributed import broadcast_object_list
from pytorch_lightning.plugins.environments.cluster_environment import ClusterEnvironment
from pytorch_lightning.plugins.io.checkpoint_plugin import CheckpointIO
from pytorch_lightning.plugins.training_type.parallel import ParallelPlugin
from pytorch_lightning.trainer.states import TrainerFn
from pytorch_lightning.utilities import _TORCH_GREATER_EQUAL_1_7, _TORCH_GREATER_EQUAL_1_8, rank_zero_warn
from pytorch_lightning.utilities import _TORCH_GREATER_EQUAL_1_8, rank_zero_warn
from pytorch_lightning.utilities.apply_func import apply_to_collection, move_data_to_device
from pytorch_lightning.utilities.cloud_io import atomic_save
from pytorch_lightning.utilities.cloud_io import load as pl_load
Expand Down Expand Up @@ -246,15 +245,12 @@ def pre_configure_ddp(self):
# when not all parameter backward hooks are fired by the autograd engine even if require_grad is set to True.
# This flag does come with a performance hit, so it is suggested to disable in cases where it is possible.
self._ddp_kwargs["find_unused_parameters"] = self._ddp_kwargs.get("find_unused_parameters", True)
# todo: PyTorch 1.7.0 DDP introduces `self.reducer._rebuild_buckets()` breaking manual_optimization
if (
_TORCH_GREATER_EQUAL_1_7
and not self.lightning_module.automatic_optimization
and not self._ddp_kwargs.get("find_unused_parameters", False)
if not self.lightning_module.automatic_optimization and not self._ddp_kwargs.get(
"find_unused_parameters", False
):
# TODO: PyTorch 1.7.0 DDP introduces `self.reducer._rebuild_buckets()` breaking manual_optimization
rank_zero_warn(
"From PyTorch 1.7.0, Lightning ``manual_optimization`` needs to set ``find_unused_parameters=True`` "
"to properly work with DDP."
"Lightning `manual_optimization` needs to set `find_unused_parameters=True` to properly work with DDP."
carmocca marked this conversation as resolved.
Show resolved Hide resolved
)
self._ddp_kwargs["find_unused_parameters"] = True

Expand Down Expand Up @@ -331,7 +327,7 @@ def broadcast(self, obj: object, src: int = 0) -> object:
obj = [obj]
if self.global_rank != src:
obj = [None]
broadcast_object_list(obj, src, group=_group.WORLD)
torch.distributed.broadcast_object_list(obj, src, group=_group.WORLD)
return obj[0]

def model_to_device(self):
Expand Down
Expand Up @@ -74,7 +74,6 @@
from pytorch_lightning.utilities.imports import (
_HOROVOD_AVAILABLE,
_IPU_AVAILABLE,
_TORCH_GREATER_EQUAL_1_7,
_TORCH_GREATER_EQUAL_1_8,
_TPU_AVAILABLE,
)
Expand Down Expand Up @@ -190,10 +189,8 @@ def _init_deterministic(self, deterministic: bool) -> None:
self.deterministic = deterministic
if _TORCH_GREATER_EQUAL_1_8:
torch.use_deterministic_algorithms(deterministic)
elif _TORCH_GREATER_EQUAL_1_7:
else:
torch.set_deterministic(deterministic)
else: # the minimum version Lightning supports is PyTorch 1.6
torch._set_deterministic(deterministic)
tchaton marked this conversation as resolved.
Show resolved Hide resolved
if deterministic:
# fixing non-deterministic part of horovod
# https://github.com/PyTorchLightning/pytorch-lightning/pull/1572/files#r420279383
Expand Down
1 change: 0 additions & 1 deletion pytorch_lightning/utilities/__init__.py
Expand Up @@ -44,7 +44,6 @@
_OMEGACONF_AVAILABLE,
_POPTORCH_AVAILABLE,
_RICH_AVAILABLE,
_TORCH_GREATER_EQUAL_1_7,
_TORCH_GREATER_EQUAL_1_8,
_TORCH_GREATER_EQUAL_1_9,
_TORCH_GREATER_EQUAL_1_10,
Expand Down
35 changes: 9 additions & 26 deletions pytorch_lightning/utilities/auto_restart.py
Expand Up @@ -305,9 +305,6 @@ def _wrap_generator_samplers(self) -> None:
# access wrapped dataset attributes
dataset_dict = self.dataset.__dict__

# create a tuple of sampler names
samplers_names = tuple(v.__class__.__name__ for k, v in dataset_dict.items() if isinstance(v, Sampler))

# create a dictionary of generator present within the dataset attributes
dataset_sampler_generators = {k: v for k, v in dataset_dict.items() if isinstance(v, (Generator, Iterator))}

Expand All @@ -318,31 +315,17 @@ def _wrap_generator_samplers(self) -> None:
if isinstance(generator, Sampler):
continue

# used to handle a weird behaviour from PyTorch 1.6
# where the sampler is converted to a list_iterator
is_legacy = False
carmocca marked this conversation as resolved.
Show resolved Hide resolved

if isinstance(generator, Generator):
# Generator name have the the form `SamplerName.__iter__`
generator_name = generator.__qualname__.split(".")[0]
else:
# assume the retrieved iterator is coming from sampler.
is_legacy = True

# validate the base generator name matches a sampler name.
if is_legacy or any(sampler_name == generator_name for sampler_name in samplers_names):

# wrap the generator into a `FastForwardSampler`
sampler = FastForwardSampler(generator, attr_name=generator_attr_name)
# wrap the generator into a `FastForwardSampler`
sampler = FastForwardSampler(generator, attr_name=generator_attr_name)

# if `CaptureIterableDataset` was available, the sampler should reload its own state.
if self._state_dict is not None:
sampler.load_state_dict(self._state_dict[generator_attr_name])
# store the samplers
self.samplers[generator_attr_name] = sampler
# if `CaptureIterableDataset` was available, the sampler should reload its own state.
if self._state_dict is not None:
sampler.load_state_dict(self._state_dict[generator_attr_name])
# store the samplers
self.samplers[generator_attr_name] = sampler

# replace generator with the generator from the `FastForwardSampler`.
dataset_dict[generator_attr_name] = iter(sampler)
# replace generator with the generator from the `FastForwardSampler`.
dataset_dict[generator_attr_name] = iter(sampler)

self.reset_on_epoch()

Expand Down
9 changes: 1 addition & 8 deletions pytorch_lightning/utilities/cloud_io.py
Expand Up @@ -19,7 +19,6 @@
import fsspec
import torch
from fsspec.implementations.local import AbstractFileSystem, LocalFileSystem
from packaging.version import Version


def load(
Expand Down Expand Up @@ -59,12 +58,6 @@ def atomic_save(checkpoint: Dict[str, Any], filepath: Union[str, Path]) -> None:
"""

bytesbuffer = io.BytesIO()
# Can't use the new zipfile serialization for 1.6.0 because there's a bug in
# torch.hub.load_state_dict_from_url() that prevents it from loading the new files.
# More details can be found here: https://github.com/pytorch/pytorch/issues/42239
if Version(torch.__version__).release[:3] == (1, 6, 0):
torch.save(checkpoint, bytesbuffer, _use_new_zipfile_serialization=False)
else:
torch.save(checkpoint, bytesbuffer)
torch.save(checkpoint, bytesbuffer)
with fsspec.open(filepath, "wb") as f:
f.write(bytesbuffer.getvalue())
3 changes: 1 addition & 2 deletions pytorch_lightning/utilities/imports.py
Expand Up @@ -70,7 +70,6 @@ def _compare_version(package: str, op: Callable, version: str, use_base_version:

_IS_WINDOWS = platform.system() == "Windows"
_IS_INTERACTIVE = hasattr(sys, "ps1") # https://stackoverflow.com/a/64523765
_TORCH_GREATER_EQUAL_1_7 = _compare_version("torch", operator.ge, "1.7.0")
_TORCH_GREATER_EQUAL_1_8 = _compare_version("torch", operator.ge, "1.8.0")
_TORCH_GREATER_EQUAL_1_8_1 = _compare_version("torch", operator.ge, "1.8.1")
_TORCH_GREATER_EQUAL_1_9 = _compare_version("torch", operator.ge, "1.9.0")
Expand Down Expand Up @@ -112,4 +111,4 @@ def _compare_version(package: str, op: Callable, version: str, use_base_version:

# experimental feature within PyTorch Lightning.
def _fault_tolerant_training() -> bool:
return _TORCH_GREATER_EQUAL_1_7 and int(os.getenv("PL_FAULT_TOLERANT_TRAINING", 0))
return bool(int(os.getenv("PL_FAULT_TOLERANT_TRAINING", 0)))
6 changes: 2 additions & 4 deletions pytorch_lightning/utilities/seed.py
Expand Up @@ -21,7 +21,7 @@
import numpy as np
import torch

from pytorch_lightning.utilities import _TORCH_GREATER_EQUAL_1_7, rank_zero_warn
from pytorch_lightning.utilities import rank_zero_warn
from pytorch_lightning.utilities.distributed import rank_zero_only

log = logging.getLogger(__name__)
Expand Down Expand Up @@ -113,9 +113,7 @@ def pl_worker_init_function(worker_id: int, rank: Optional[int] = None) -> None:
np.random.seed(ss.generate_state(4))
# Spawn distinct SeedSequences for the PyTorch PRNG and the stdlib random module
torch_ss, stdlib_ss = ss.spawn(2)
# PyTorch 1.7 and above takes a 64-bit seed
dtype = np.uint64 if _TORCH_GREATER_EQUAL_1_7 else np.uint32
torch.manual_seed(torch_ss.generate_state(1, dtype=dtype)[0])
torch.manual_seed(torch_ss.generate_state(1, dtype=np.uint64)[0])
# use 128 bits expressed as an integer
stdlib_seed = (stdlib_ss.generate_state(2, dtype=np.uint64).astype(object) * [1 << 64, 1]).sum()
random.seed(stdlib_seed)
2 changes: 1 addition & 1 deletion tests/callbacks/test_quantization.py
Expand Up @@ -31,7 +31,7 @@
if _TORCH_GREATER_EQUAL_1_8:
from torch.quantization import FakeQuantizeBase
else:
# For torch 1.6 and 1.7.
# For torch 1.7.
from torch.quantization import FakeQuantize as FakeQuantizeBase


Expand Down
6 changes: 2 additions & 4 deletions tests/conftest.py
Expand Up @@ -22,7 +22,7 @@
import torch.distributed

from pytorch_lightning.plugins.environments.lightning_environment import find_free_network_port
from pytorch_lightning.utilities.imports import _TORCH_GREATER_EQUAL_1_7, _TORCH_GREATER_EQUAL_1_8
from pytorch_lightning.utilities.imports import _TORCH_GREATER_EQUAL_1_8
from tests import _PATH_DATASETS


Expand Down Expand Up @@ -95,10 +95,8 @@ def reset_deterministic_algorithm():
yield
if _TORCH_GREATER_EQUAL_1_8:
torch.use_deterministic_algorithms(False)
elif _TORCH_GREATER_EQUAL_1_7:
else:
torch.set_deterministic(False)
else: # the minimum version Lightning supports is PyTorch 1.6
torch._set_deterministic(False)


@pytest.fixture
Expand Down