Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
9 changes: 6 additions & 3 deletions pytorch_lightning/accelerators/gpu.py
Original file line number Diff line number Diff line change
Expand Up @@ -26,16 +26,19 @@
class GPUAccelerator(Accelerator):
""" Accelerator for GPU devices. """

def setup_environment(self) -> None:
super().setup_environment()
if "cuda" not in str(self.root_device):
raise MisconfigurationException(f"Device should be GPU, got {self.root_device} instead")
torch.cuda.set_device(self.root_device)

def setup(self, trainer: 'pl.Trainer', model: 'pl.LightningModule') -> None:
"""
Raises:
MisconfigurationException:
If the selected device is not GPU.
"""
if "cuda" not in str(self.root_device):
raise MisconfigurationException(f"Device should be GPU, got {self.root_device} instead")
self.set_nvidia_flags(trainer.local_rank)
torch.cuda.set_device(self.root_device)
return super().setup(trainer, model)

def on_train_start(self) -> None:
Expand Down
2 changes: 0 additions & 2 deletions pytorch_lightning/plugins/training_type/ddp.py
Original file line number Diff line number Diff line change
Expand Up @@ -367,8 +367,6 @@ def pre_backward(self, closure_loss: torch.Tensor, should_accumulate: bool, opti
prepare_for_backward(self.model, closure_loss)

def model_to_device(self):
if self.root_device.type == "cuda":
torch.cuda.set_device(self.root_device)
self.model.to(self.root_device)

def reduce(self, tensor, group: Optional[Any] = None, reduce_op: Union[ReduceOp, str] = "mean") -> torch.Tensor:
Expand Down
2 changes: 0 additions & 2 deletions pytorch_lightning/plugins/training_type/deepspeed.py
Original file line number Diff line number Diff line change
Expand Up @@ -339,8 +339,6 @@ def setup_distributed(self):
if not self._config_initialized:
self._format_config()
self._config_initialized = True
if self.on_gpu:
torch.cuda.set_device(self.root_device)

def pre_dispatch(self):
self.init_deepspeed()
Expand Down
1 change: 0 additions & 1 deletion pytorch_lightning/plugins/training_type/fully_sharded.py
Original file line number Diff line number Diff line change
Expand Up @@ -118,7 +118,6 @@ def setup_distributed(self) -> None:
"You selected accelerator to be `ddp_fully_sharded`, but GPU is not available."
)
super().setup_distributed()
torch.cuda.set_device(self.root_device)

@contextlib.contextmanager
def model_sharded_context(self) -> Generator:
Expand Down
3 changes: 0 additions & 3 deletions pytorch_lightning/plugins/training_type/single_device.py
Original file line number Diff line number Diff line change
Expand Up @@ -61,9 +61,6 @@ def root_device(self) -> torch.device:
return self.device

def model_to_device(self) -> None:
if self.on_gpu:
torch.cuda.set_device(self.root_device)

self._model.to(self.root_device)

def setup(self, model: torch.nn.Module) -> torch.nn.Module:
Expand Down