Skip to content

Commit

Permalink
fix init
Browse files Browse the repository at this point in the history
  • Loading branch information
jasperzhong committed Jan 21, 2021
1 parent 83c6c8a commit 7ac1dc7
Showing 1 changed file with 12 additions and 6 deletions.
18 changes: 12 additions & 6 deletions byteps/torch/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -55,7 +55,8 @@ def __init__(self, params, named_parameters, compression,
'tuples (name, parameter), usually produced by '
'model.named_parameters().')

dups = _DistributedOptimizer.find_duplicates([k for k, _ in named_parameters])
dups = _DistributedOptimizer.find_duplicates(
[k for k, _ in named_parameters])
if len(dups) > 0:
raise ValueError('Parameter names in named_parameters must be unique. '
'Found duplicates: %s' % ', '.join(dups))
Expand All @@ -70,7 +71,8 @@ def __init__(self, params, named_parameters, compression,
# https://github.com/pytorch/pytorch/issues/7733
self._parameter_names = {v.__hash__(): k for k, v
in sorted(named_parameters)}
self._tensor_list = [tensor for name, tensor in named_parameters]
self._tensor_list = [tensor for name,
tensor in named_parameters]
else:
self._is_tensor_instance = False
self._parameter_names = {v: k for k, v
Expand Down Expand Up @@ -134,7 +136,8 @@ def _push_pull_grad_async(self, p):
else:
tensor = p.grad
tensor_compressed, ctx = self._compression.compress(tensor)
handle = byteps_push_pull(tensor_compressed, average=True, name="Gradient."+name)
handle = byteps_push_pull(
tensor_compressed, average=True, name="Gradient."+name)
return handle, ctx

def _make_hook(self, p):
Expand Down Expand Up @@ -176,7 +179,8 @@ def synchronize(self):
@contextmanager
def skip_synchronize(self):
if self._enable_async:
raise AssertionError("skip_synchronize cannot be used in async training")
raise AssertionError(
"skip_synchronize cannot be used in async training")
self._should_sync = False
try:
yield
Expand All @@ -201,7 +205,8 @@ def step(self, closure=None):
name = self._parameter_names.get(p.__hash__())
else:
name = self._parameter_names.get(p)
handle = byteps_push_pull(p, average=False, name="AsyncParam."+name)
handle = byteps_push_pull(
p, average=False, name="Parameter."+name)
_, ctx = self._compression.compress(p)
self._handles[p] = (handle, ctx)

Expand Down Expand Up @@ -378,7 +383,8 @@ def _from_tensor():
key = '%s.%d' % (option_key, index)
dtypes = _get_types(option_value)
option_tensor = torch.Tensor([option_value]).cuda()
callbacks[key] = _create_option_callback(index, option_key, option_tensor, dtypes)
callbacks[key] = _create_option_callback(
index, option_key, option_tensor, dtypes)
params.append((key, option_tensor))

# The params list here is ordered by the layers in the model
Expand Down

0 comments on commit 7ac1dc7

Please sign in to comment.