Skip to content

Commit

Permalink
Merge d773b97 into 5683281
Browse files Browse the repository at this point in the history
  • Loading branch information
niboshi committed Nov 14, 2017
2 parents 5683281 + d773b97 commit e16b073
Show file tree
Hide file tree
Showing 16 changed files with 1,179 additions and 548 deletions.
48 changes: 48 additions & 0 deletions chainer/__init__.py
Expand Up @@ -3,6 +3,8 @@
import threading
import warnings

import numpy

from chainer import _version
from chainer import configuration # NOQA
from chainer import cuda # NOQA
Expand Down Expand Up @@ -62,6 +64,7 @@


from chainer import _environment_check
from chainer import _ideep


# Check environment conditions
Expand All @@ -71,6 +74,7 @@
__version__ = _version.__version__

_thread_local = threading.local()
_array_types = None


def get_function_hooks():
Expand All @@ -82,6 +86,49 @@ def get_function_hooks():
return ret


def _load_array_types():
global _array_types
global _cpu_array_types

if _array_types is None:
_array_types = [numpy.ndarray]
_cpu_array_types = [numpy.ndarray]

if cuda.available:
_array_types.append(cuda.ndarray)

if _ideep.is_available():
_array_types.append(_ideep.ideep.mdarray)
_cpu_array_types.append(_ideep.ideep.mdarray)

_array_types = tuple(_array_types)
_cpu_array_types = tuple(_cpu_array_types)


def get_array_types():
_load_array_types()
return _array_types


def get_cpu_array_types():
_load_array_types()
return _cpu_array_types


def is_arrays_compatible(arrays):
arrays = [_ for _ in arrays if _ is not None]
if len(arrays) == 0:
return True
if type(arrays[0]) is cuda.ndarray:
types = cuda.ndarray
else:
if _ideep.is_available():
types = (numpy.ndarray, _ideep.ideep.mdarray)
else:
types = numpy.ndarray
return all(isinstance(_, types) for _ in arrays)


global_config.debug = bool(int(os.environ.get('CHAINER_DEBUG', '0')))
global_config.cudnn_deterministic = False
global_config.enable_backprop = True
Expand All @@ -92,6 +139,7 @@ def get_function_hooks():
global_config.use_cudnn = os.environ.get('CHAINER_USE_CUDNN', 'auto')
global_config.use_cudnn_tensor_core = 'auto'
global_config.autotune = False
global_config.use_ideep = os.environ.get('CHAINER_USE_IDEEP', 'auto')


_SHOULD_USE_CUDNN = {
Expand Down
64 changes: 64 additions & 0 deletions chainer/_ideep.py
@@ -0,0 +1,64 @@
_error = None

from chainer.configuration import config


try:
import ideep # NOQA
except ImportError as e:
_error = e


_SHOULD_USE_IDEEP = {
'==always': {'always': True, 'auto': False, 'never': False},
'>=auto': {'always': True, 'auto': True, 'never': False},
}


def is_available():
return _error is None


def check_available():
"""Checks if iDeep is available.
When iDeep is correctly set up, nothing happens.
Otherwise it raises ``RuntimeError``.
"""
if _error is not None:
raise RuntimeError(
'iDeep is not available.\n'
'Reason: {}'.format(type(_error).__name__, str(_error)))


def should_use_ideep(level):
"""Determines if we should use iDeep.
This function checks ``chainer.config.use_ideep`` and availability
of ``ideep`` package.
Args:
level (str): iDeep use level. It must be either ``'==always'`` or
``'>=auto'``. ``'==always'`` indicates that the ``use_ideep``
config must be ``'always'`` to use iDeep.
Returns:
bool: ``True`` if the caller should use cuDNN.
"""
if not is_available():
return False

if level not in _SHOULD_USE_IDEEP:
raise ValueError('invalid iDeep use level: %s '
'(must be either of "==always" or ">=auto")' %
repr(level))

flags = _SHOULD_USE_IDEEP[level]

use_ideep = config.use_ideep
if use_ideep not in flags:
raise ValueError('invalid use_ideep configuration: %s '
'(must be either of "always", "auto", or "never")' %
repr(use_ideep))
return flags[use_ideep]
61 changes: 52 additions & 9 deletions chainer/cuda.py
Expand Up @@ -237,16 +237,17 @@ def to_gpu(array, device=None, stream=None):
"""Copies the given CPU array to the specified device.
Args:
array: Array to be sent to GPU.
array (numpy.ndarray, cupy.ndarray, list or tuple): Array or arrays to
be sent to GPU.
device: Device specifier.
stream (~cupy.cuda.Stream): *(deprecated since v3.0.0)*
CUDA stream. If not ``None``, the copy runs asynchronously.
Returns:
cupy.ndarray: Array on GPU.
cupy.ndarray, list or tuple: Array or arrays on GPU.
If ``array`` is already on the GPU device specified by ``device``,
this function just returns ``array`` without performing any copy.
If some of the arrays are already on GPU, then this function just
returns those arrays without performing any copy.
"""
if stream is not None:
Expand All @@ -255,6 +256,27 @@ def to_gpu(array, device=None, stream=None):
'Please remove it.', DeprecationWarning)

check_cuda_available()
with Device(device) as device_:
if isinstance(array, (list, tuple)):
d = {}
ret = []
for arr in array:
if arr is None:
ret.append(None)
else:
arr2 = d.get(id(arr))
if arr2 is None:
arr2 = _array_to_gpu(arr, device_, stream)
d[id(arr)] = arr2
ret.append(arr2)
return type(array)(ret)
else:
return _array_to_gpu(array, device_, stream)


def _array_to_gpu(array, device, stream):
if array is None:
return None
if isinstance(array, (numpy.number, numpy.bool_)):
array = numpy.asarray(array)
if not isinstance(array, (cupy.ndarray, numpy.ndarray)):
Expand Down Expand Up @@ -303,23 +325,44 @@ def to_cpu(array, stream=None):
"""Copies the given GPU array to host CPU.
Args:
array: Array to be sent to CPU.
array (numpy.ndarray, cupy.ndarray, list or tuple): Array or arrays to
be sent to CPU.
stream (cupy.cuda.Stream): CUDA stream.
Returns:
numpy.ndarray: Array on CPU.
numpy.ndarray, list or tuple: Array on CPU.
If given ``array`` is already on CPU, then this function just returns
``array`` without performing any copy.
If some of the arrays are already on CPU, then this function just
returns those arrays without performing any copy.
"""
if isinstance(array, (list, tuple)):
d = {}
ret = []
for arr in array:
if arr is None:
ret.append(None)
else:
arr2 = d.get(id(arr))
if arr2 is None:
arr2 = _array_to_cpu(arr, stream)
d[id(arr)] = arr2
ret.append(arr2)
return type(array)(ret)
else:
return _array_to_cpu(array, stream)


def _array_to_cpu(array, stream):
if array is None:
return None
if isinstance(array, ndarray):
check_cuda_available()
with get_device_from_array(array):
return array.get(stream)
elif isinstance(array, (numpy.number, numpy.bool_)):
return numpy.asarray(array)
elif isinstance(array, numpy.ndarray):
elif isinstance(array, chainer.get_cpu_array_types()):
return array
else:
raise TypeError(
Expand Down
5 changes: 5 additions & 0 deletions chainer/function_node.py
Expand Up @@ -284,6 +284,11 @@ def apply(self, inputs):
return ret

def _check_data_type_forward(self, in_data):
if not chainer.is_arrays_compatible(in_data):
raise ValueError('numpy and cupy must not be used together\n'
'{}'
.format(', '.join(str(type(_)) for _ in in_data)))

in_type = type_check.get_light_types(in_data)
try:
with type_check.light_mode:
Expand Down

0 comments on commit e16b073

Please sign in to comment.