Skip to content
This repository has been archived by the owner on Apr 19, 2023. It is now read-only.

Commit

Permalink
Merge pull request #150 from mys007/python37_async
Browse files Browse the repository at this point in the history
Python 3.7 compatibility
  • Loading branch information
constantinpape committed Oct 30, 2018
2 parents 62edee6 + 624f316 commit 84e22bc
Show file tree
Hide file tree
Showing 2 changed files with 12 additions and 12 deletions.
4 changes: 2 additions & 2 deletions inferno/extensions/containers/graph.py
Original file line number Diff line number Diff line change
Expand Up @@ -357,7 +357,7 @@ def get_module_for_nodes(self, names):
modules.append(module)
return pyu.from_iterable(modules)

def to_device(self, names, target_device, device_ordinal=None, async=False):
def to_device(self, names, target_device, device_ordinal=None, asynchron=False):
"""Transfer nodes in the network to a specified device."""
names = pyu.to_iterable(names)
for name in names:
Expand All @@ -368,7 +368,7 @@ def to_device(self, names, target_device, device_ordinal=None, async=False):
# Transfer
module_on_device = OnDevice(module, target_device,
device_ordinal=device_ordinal,
async=async)
asynchron=asynchron)
setattr(self, name, module_on_device)
return self

Expand Down
20 changes: 10 additions & 10 deletions inferno/extensions/layers/device.py
Original file line number Diff line number Diff line change
Expand Up @@ -7,16 +7,16 @@

class DeviceTransfer(nn.Module):
"""Layer to transfer variables to a specified device."""
def __init__(self, target_device, device_ordinal=None, async=False):
def __init__(self, target_device, device_ordinal=None, asynchron=False):
"""
Parameters
----------
target_device : {'cpu', 'cuda'}
Device to transfer to.
device_ordinal : int
Device ordinal if target_device == 'cuda'.
async : bool
Whether to use async transfers.
asynchron : bool
Whether to use asynchronous transfers.
"""
super(DeviceTransfer, self).__init__()
# Validate arguments
Expand All @@ -29,11 +29,11 @@ def __init__(self, target_device, device_ordinal=None, async=False):
DeviceError)
self.target_device = target_device
self.device_ordinal = device_ordinal
self.async = async
self.asynchron = asynchron

def forward(self, *inputs):
if self.target_device == 'cuda':
transferred = tuple(input_.cuda(device_id=self.device_ordinal, async=self.async)
transferred = tuple(input_.cuda(device_id=self.device_ordinal, asynchron=self.asynchron)
for input_ in inputs)
elif self.target_device == 'cpu':
transferred = tuple(input_.cpu() for input_ in inputs)
Expand All @@ -48,7 +48,7 @@ class OnDevice(nn.Module):
that the inputs are transferred to the same device as the module, enabling easy model
parallelism.
"""
def __init__(self, module, target_device, device_ordinal=None, async=False):
def __init__(self, module, target_device, device_ordinal=None, asynchron=False):
"""
Parameters
----------
Expand All @@ -58,8 +58,8 @@ def __init__(self, module, target_device, device_ordinal=None, async=False):
The device to move `module` to. Must be either 'cuda' or 'cpu'.
device_ordinal : int
Ordinal of the GPU device if `target_device = 'cuda'`.
async : bool
Whether to use async transfers.
asynchron : bool
Whether to use asynchronous transfers.
"""
super(OnDevice, self).__init__()
# Validate arguments
Expand All @@ -72,11 +72,11 @@ def __init__(self, module, target_device, device_ordinal=None, async=False):
DeviceError)
self.target_device = target_device
self.device_ordinal = device_ordinal
self.async = async
self.asynchron = asynchron
# This is a no-op if module is already in the right device
self.device_transfer = DeviceTransfer(self.target_device,
device_ordinal=self.device_ordinal,
async=self.async)
asynchron=self.asynchron)

self.module = self.transfer_module(module)

Expand Down

0 comments on commit 84e22bc

Please sign in to comment.