Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

add multi swa support for custom device #103297

Closed
Closed
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Jump to
Jump to file
Failed to load files.
Diff view
Diff view
6 changes: 3 additions & 3 deletions torch/optim/swa_utils.py
Original file line number Diff line number Diff line change
Expand Up @@ -6,6 +6,7 @@
import torch
from torch.nn import Module
from torch.optim.lr_scheduler import LRScheduler
from torch.utils._foreach_utils import _get_foreach_kernels_supported_devices

__all__ = [
'AveragedModel',
Expand Down Expand Up @@ -184,8 +185,7 @@ def update_parameters(self, model):
self_param_detached = []
model_param_detached = []
for p_averaged, p_model in zip(self_param, model_param):
device = p_averaged.device
p_model_ = p_model.detach().to(device)
p_model_ = p_model.detach().to(p_averaged.device)
self_param_detached.append(p_averaged.detach())
model_param_detached.append(p_model_)
if self.n_averaged == 0:
Expand All @@ -197,7 +197,7 @@ def update_parameters(self, model):
for ((device, _), ([self_params, model_params], _)) in grouped_tensors.items():
if self.multi_avg_fn:
self.multi_avg_fn(self_params, model_params, self.n_averaged.to(device))
elif device.type == 'cuda':
elif device.type in _get_foreach_kernels_supported_devices():
multi_avg_fn = get_swa_multi_avg_fn()
multi_avg_fn(self_params, model_params, self.n_averaged.to(device))
else:
Expand Down