Skip to content

Commit

Permalink
Deprecate Trainer.gpus (#12436)
Browse files Browse the repository at this point in the history
Co-authored-by: Carlos Mocholí <carlossmocholi@gmail.com>
  • Loading branch information
DuYicong515 and carmocca committed Mar 27, 2022
1 parent e072336 commit 01d817c
Show file tree
Hide file tree
Showing 6 changed files with 39 additions and 5 deletions.
3 changes: 3 additions & 0 deletions CHANGELOG.md
Original file line number Diff line number Diff line change
Expand Up @@ -600,6 +600,9 @@ The format is based on [Keep a Changelog](http://keepachangelog.com/en/1.0.0/).
- Deprecated passing only the callback state to `Callback.on_load_checkpoint(callback_state)` in favor of passing the callback state to `Callback.load_state_dict` and in 1.8, passing the entire checkpoint dictionary to `Callback.on_load_checkpoint(checkpoint)` ([#11887](https://github.com/PyTorchLightning/pytorch-lightning/pull/11887))


- Deprecated `Trainer.gpus` in favor of `Trainer.device_ids` or `Trainer.num_devices` ([#12436](https://github.com/PyTorchLightning/pytorch-lightning/pull/12436))


### Removed

- Removed deprecated parameter `method` in `pytorch_lightning.utilities.model_helpers.is_overridden` ([#10507](https://github.com/PyTorchLightning/pytorch-lightning/pull/10507))
Expand Down
4 changes: 2 additions & 2 deletions pytorch_lightning/callbacks/gpu_stats_monitor.py
Original file line number Diff line number Diff line change
Expand Up @@ -128,8 +128,8 @@ def setup(self, trainer: "pl.Trainer", pl_module: "pl.LightningModule", stage: O

if trainer.strategy.root_device.type != "cuda":
raise MisconfigurationException(
"You are using GPUStatsMonitor but are not running on GPU"
f" since gpus attribute in Trainer is set to {trainer.gpus}."
"You are using GPUStatsMonitor but are not running on GPU."
f" The root device type is {trainer.strategy.root_device.type}."
)

# The logical device IDs for selected devices
Expand Down
4 changes: 4 additions & 0 deletions pytorch_lightning/trainer/trainer.py
Original file line number Diff line number Diff line change
Expand Up @@ -2195,6 +2195,10 @@ def scaler(self) -> Optional[Any]:

@property
def gpus(self) -> Optional[Union[List[int], str, int]]:
rank_zero_deprecation(
"`Trainer.gpus` was deprecated in v1.6 and will be removed in v1.8."
" Please use `Trainer.num_devices` or `Trainer.device_ids` to get device information instead."
)
return self._accelerator_connector.gpus

@property
Expand Down
4 changes: 2 additions & 2 deletions tests/accelerators/test_accelerator_connector.py
Original file line number Diff line number Diff line change
Expand Up @@ -554,7 +554,7 @@ def test_accelerator_gpu_with_devices(devices, plugin):
def test_accelerator_auto_with_devices_gpu():
trainer = Trainer(accelerator="auto", devices=1)
assert isinstance(trainer.accelerator, GPUAccelerator)
assert trainer.gpus == 1
assert trainer.num_devices == 1


def test_validate_accelerator_and_devices():
Expand Down Expand Up @@ -946,8 +946,8 @@ def test_devices_auto_choice_cpu(
@mock.patch("torch.cuda.device_count", return_value=2)
def test_devices_auto_choice_gpu(is_gpu_available_mock, device_count_mock):
trainer = Trainer(accelerator="auto", devices="auto")
assert isinstance(trainer.accelerator, GPUAccelerator)
assert trainer.num_devices == 2
assert trainer.gpus == 2


@pytest.mark.parametrize(
Expand Down
20 changes: 20 additions & 0 deletions tests/deprecated_api/test_remove_1-8.py
Original file line number Diff line number Diff line change
Expand Up @@ -1106,3 +1106,23 @@ def on_save_checkpoint(self, trainer, pl_module, checkpoint):

trainer.callbacks = [TestCallbackSaveHookOverride()]
trainer.save_checkpoint(tmpdir + "/pathok.ckpt")


@pytest.mark.parametrize(
"trainer_kwargs",
[
{"accelerator": "gpu", "devices": 2},
{"accelerator": "gpu", "devices": [0, 2]},
{"accelerator": "gpu", "devices": "2"},
{"accelerator": "gpu", "devices": "0,"},
],
)
def test_trainer_gpus(monkeypatch, trainer_kwargs):
monkeypatch.setattr(torch.cuda, "is_available", lambda: True)
monkeypatch.setattr(torch.cuda, "device_count", lambda: 4)
trainer = Trainer(**trainer_kwargs)
with pytest.deprecated_call(
match="`Trainer.gpus` was deprecated in v1.6 and will be removed in v1.8."
" Please use `Trainer.num_devices` or `Trainer.device_ids` to get device information instead."
):
assert trainer.gpus == trainer_kwargs["devices"]
9 changes: 8 additions & 1 deletion tests/models/test_gpu.py
Original file line number Diff line number Diff line change
Expand Up @@ -23,6 +23,7 @@
import tests.helpers.pipelines as tpipes
import tests.helpers.utils as tutils
from pytorch_lightning import Trainer
from pytorch_lightning.accelerators import CPUAccelerator, GPUAccelerator
from pytorch_lightning.plugins.environments import TorchElasticEnvironment
from pytorch_lightning.utilities import device_parser
from pytorch_lightning.utilities.exceptions import MisconfigurationException
Expand Down Expand Up @@ -192,10 +193,16 @@ def test_torchelastic_gpu_parsing(mocked_device_count, mocked_is_available, gpus
sanitizing the gpus as only one of the GPUs is visible."""
trainer = Trainer(gpus=gpus)
assert isinstance(trainer._accelerator_connector.cluster_environment, TorchElasticEnvironment)
assert trainer.gpus == gpus
# when use gpu
if device_parser.parse_gpu_ids(gpus) is not None:
assert isinstance(trainer.accelerator, GPUAccelerator)
assert trainer.num_devices == len(gpus) if isinstance(gpus, list) else gpus
assert trainer.device_ids == device_parser.parse_gpu_ids(gpus)
# fall back to cpu
else:
assert isinstance(trainer.accelerator, CPUAccelerator)
assert trainer.num_devices == 1
assert trainer.device_ids == [0]


@RunIf(min_gpus=1)
Expand Down

0 comments on commit 01d817c

Please sign in to comment.