Skip to content

Commit

Permalink
Merge pull request #1 from cupy/cupy-v1.19.0
Browse files Browse the repository at this point in the history
Merge chainer v1.19.0
  • Loading branch information
beam2d committed Dec 22, 2016
2 parents 8a0e9e3 + d35bdfb commit 353ea32
Show file tree
Hide file tree
Showing 57 changed files with 3,523 additions and 1,267 deletions.
2 changes: 2 additions & 0 deletions README.md
Original file line number Diff line number Diff line change
Expand Up @@ -100,6 +100,8 @@ The Twenty-ninth Annual Conference on Neural Information Processing Systems (NIP
- Forum (Japanese): https://groups.google.com/forum/#!forum/chainer-jp
- Twitter: https://twitter.com/ChainerOfficial
- Twitter (Japanese): https://twitter.com/chainerjp
- External examples: https://github.com/pfnet/chainer/wiki/External-examples
- Research projects using Chainer: https://github.com/pfnet/chainer/wiki/Research-projects-using-Chainer

## License

Expand Down
2 changes: 1 addition & 1 deletion appveyor.yml
Original file line number Diff line number Diff line change
Expand Up @@ -68,4 +68,4 @@ test_script:
- "%CMD_IN_ENV% pip install nose mock hacking"
- "flake8"
- "cd tests\\install_tests"
- "nosetests -A \"gpu<1\""
- "nosetests -a \"!gpu,!slow\""
66 changes: 66 additions & 0 deletions chainer/functions/evaluation/r2_score.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,66 @@
from chainer import cuda
from chainer import function
from chainer.utils import type_check


class R2_score(function.Function):

def __init__(self, sample_weight, multioutput):
if sample_weight is not None:
raise NotImplementedError()
if multioutput in ['uniform_average', 'raw_values']:
self.multioutput = multioutput
else:
raise ValueError("invalid multioutput argument")

def check_type_forward(self, in_types):
type_check.expect(in_types.size() == 2)
pred_type, true_type = in_types

type_check.expect(
pred_type.dtype.kind == 'f',
true_type.dtype.kind == 'f'
)

type_check.expect(
pred_type.shape == true_type.shape,
)

def forward(self, inputs):
xp = cuda.get_array_module(*inputs)
pred, true = inputs
SS_res = xp.sum((pred - true) ** 2, axis=0)
SS_tot = xp.sum((true - xp.mean(true, axis=0)) ** 2, axis=0)
ret = xp.where(SS_tot != 0, 1 - SS_res / SS_tot, 0.0)\
.astype(pred.dtype)
if self.multioutput == 'uniform_average':
return xp.asarray(ret.mean()),
elif self.multioutput == 'raw_values':
return ret,


def r2_score(pred, true, sample_weight=None, multioutput='uniform_average'):
"""Computes R^2(coefficient of determination) regression score function.
Args:
pred(Variable): Variable holding a vector, matrix or tensor of
estimated target values.
true(Variable): Variable holding a vector, matrix or tensor of
correct target values.
sample_weight: This argument is for compatibility with scikit-learn's
implementation of r2_score. Current implementation admits None
only.
multioutput(string): ['uniform_average', 'raw_values']. if
'uniform_average', this function returns an average of R^2
score of multiple output. If 'raw_average', this function
return a set of R^2 score of multiple output.
Returns:
Variable: A Variable holding a scalar array of the R^2 score if
'multioutput' is 'uniform_average' or a vector of R^2 scores if
'multioutput' is 'raw_values'.
.. note:: This function is non-differentiable.
"""
return R2_score(sample_weight=sample_weight,
multioutput=multioutput)(pred, true)
163 changes: 163 additions & 0 deletions chainer/functions/pooling/max_pooling_nd.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,163 @@
import numpy

import functools
from operator import mul
import six

from chainer import cuda
from chainer.functions.pooling import max_pooling_nd_kernel
from chainer.functions.pooling import pooling_nd
from chainer import utils
from chainer.utils import conv_nd


if cuda.cudnn_enabled:
cudnn = cuda.cudnn
libcudnn = cudnn.cudnn
_cudnn_version = libcudnn.getVersion()


class MaxPoolingND(pooling_nd._PoolingND):

"""Max pooling over a set of N-dimensional planes."""

def __init__(self, ndim, ksize, stride=None, pad=0, cover_all=True,
use_cudnn=True):
utils.experimental('chainer.functions.pooling.MaxPoolingND')
super(MaxPoolingND, self).__init__(
ndim, ksize, stride=stride, pad=pad, cover_all=cover_all,
use_cudnn=use_cudnn)

def forward_cpu(self, x):
col = conv_nd.im2col_nd_cpu(
x[0], self.ksize, self.stride, self.pad, pval=-float('inf'),
cover_all=self.cover_all)
n, c = col.shape[:2]
mid = (len(col.shape) - 2) // 2 + 2
ksize = col.shape[2:mid]
outs = col.shape[mid:]
# (n, c, k_1 * k_2 * ... * k_N, out_1, out_2, ..., out_N)
col_shape = (n, c) + (functools.reduce(mul, ksize),) + outs
col = col.reshape(col_shape)

# We select maximum twice, since the implementation using numpy.choose
# hits its bug when kh * kw >= 32.
self.indexes = col.argmax(axis=2)
y = col.max(axis=2)
return y,

def forward_gpu(self, x):
if (cuda.cudnn_enabled and self.use_cudnn and
pooling_nd._check_cudnn_acceptable_type(x[0].dtype)):
# With cuDNN v3 or greater, use cuDNN implementation for inputs
# with spatial dimensions of two or more.
if _cudnn_version >= 3000 and self.ndim >= 2:
return super(MaxPoolingND, self).forward_gpu(x)
# With cuDNN v2, use cuDNN implementation only for inputs with
# spatial dimensions of two.
elif self.ndim == 2:
return super(MaxPoolingND, self).forward_gpu(x)

n, c = x[0].shape[:2]
dims = x[0].shape[2:]
ys = tuple(conv_nd.get_conv_outsize(d, k, s, p, self.cover_all)
for (d, k, s, p) in six.moves.zip(
dims, self.ksize, self.stride, self.pad))
# (n, c, y_1, y_2, ..., y_N)
y_shape = (n, c) + ys
y = cuda.cupy.empty(y_shape, dtype=x[0].dtype)
self.indexes = cuda.cupy.empty(y_shape, dtype=numpy.int32)

in_params, out_params, operation, name = \
max_pooling_nd_kernel.MaxPoolingNDKernelForward.generate(self.ndim)
cuda.elementwise(in_params, out_params, operation, name)(
x[0].reduced_view(),
*(dims + ys + self.ksize + self.stride + self.pad +
(y, self.indexes)))

return y,

def backward_cpu(self, x, gy):
ndim = self.ndim
n, c = gy[0].shape[:2]
outs = gy[0].shape[2:]
dims = x[0].shape[2:]
prod_outs = functools.reduce(mul, outs)
prod_ksize = functools.reduce(mul, self.ksize)

gcol = numpy.zeros(n * c * prod_outs * prod_ksize, dtype=x[0].dtype)

indexes = self.indexes.ravel()
indexes += numpy.arange(0, indexes.size * prod_ksize, prod_ksize)

gcol[indexes] = gy[0].ravel()
gcol_shape = (n, c) + outs + self.ksize
gcol = gcol.reshape(gcol_shape)
for i in six.moves.range(ndim):
gcol = numpy.swapaxes(gcol, 2 + i, ndim + 2 + i)

gx = conv_nd.col2im_nd_cpu(gcol, self.stride, self.pad, dims)
return gx,

def backward_gpu(self, x, gy):
if (cuda.cudnn_enabled and self.use_cudnn and
pooling_nd._check_cudnn_acceptable_type(x[0].dtype)):
# With cuDNN v3 or greater, use cuDNN implementation for inputs
# with spatial dimensions of two or more.
if _cudnn_version >= 3000 and self.ndim >= 2:
return super(MaxPoolingND, self).backward_gpu(x, gy)
# With cuDNN v2, use cuDNN implementation only for inputs with
# spatial dimensions of two.
elif self.ndim == 2:
return super(MaxPoolingND, self).backward_gpu(x, gy)

n, c = x[0].shape[:2]
dims = x[0].shape[2:]
ys = gy[0].shape[2:]
gx = cuda.cupy.empty_like(x[0])

ndim = self.ndim
in_params, out_params, operation, name = \
max_pooling_nd_kernel.MaxPoolingNDKernelBackward.generate(ndim)
cuda.elementwise(in_params, out_params, operation, name)(
gy[0].reduced_view(), self.indexes.reduced_view(),
*(dims + ys + self.ksize + self.stride + self.pad + (gx,)))
return gx,

def create_pool_desc(self):
return cudnn.create_pooling_descriptor(
self.ksize, self.stride, self.pad, libcudnn.CUDNN_POOLING_MAX)


def max_pooling_nd(x, ksize, stride=None, pad=0, cover_all=True,
use_cudnn=True):
"""N-dimensionally spatial max pooling function.
This function provides a N-dimensionally generalized version of
:func:`~functions.max_pooling_2d`. This acts similarly to
:class:`~functions.ConvolutionND`, but it computes the maximum of input
spatial patch for each channel without any parameter instead of computing
the inner products.
Args:
x (~chainer.Variable): Input variable.
ksize (int or tuple of ints): Size of pooling window. ``ksize=k`` and
``ksize=(k, k, ..., k)`` are equivalent.
stride (int or tuple of ints or None): Stride of pooling applications.
``stride=s`` and ``stride=(s,s, ..., s)`` are equivalent. If
``None`` is specified, then it uses same stride as the pooling
window size.
pad (int or tuple of ints): Spatial padding width for the input array.
``pad=p`` and ``pad=(p, p, ..., p)`` are equivalent.
cover_all (bool): If ``True``, all spatial locations are pooled into
some output pixels. It may make the output size larger.
use_cudnn (bool): If ``True`` and cuDNN is enabled, then this function
uses cuDNN as the core implementation. cuDNN supports more than
one-dimensional pooling.
Returns:
~chainer.Variable: Output variable.
"""
ndim = len(x.shape[2:])
return MaxPoolingND(ndim, ksize, stride, pad, cover_all, use_cudnn)(x)
92 changes: 92 additions & 0 deletions chainer/functions/pooling/max_pooling_nd_kernel.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,92 @@
import six

from chainer.functions.pooling import pooling_nd_kernel
from chainer.utils import conv_nd_kernel


class MaxPoolingNDKernelForward(pooling_nd_kernel.PoolingNDKernelForward):

def name(self):
# max_pool_{N}d_fwd
return 'max'

def out_params(self):
# T out, S indexes
return ['S indexes']

def before(self):
# 2D: T maxval = (T)-INFINITY;
# int argmax_0 = 0;
# int argmax_1 = 0;
def aux(argmax):
return 'int {} = 0;'.format(argmax)
self.argmaxs = conv_nd_kernel.vars('argmax', self.ndim)
argmax_decls = conv_nd_kernel.map_(aux, self.argmaxs)
return '\n'.join(['T maxval = (T)-INFINITY;'] + argmax_decls)

def main(self, offset, xs):
# 2D: T v = in[offset_1];
# if (maxval < v) {
# maxval = v;
# argmax_0 = x_0;
# argmax_1 = x_1;
# }
w = conv_nd_kernel.Writer()
w.write('T v = in[{}];'.format(offset))
w.write('if (maxval < v) {', 'inc')
w.write('maxval = v;')
for argmax, x in six.moves.zip(self.argmaxs, xs):
w.write('{} = {};'.format(argmax, x))
w.write('}', 'dec')
return w.get()

def after(self, out_xs):
# 2D: out = maxval;
# int argmax_k_0 = argmax_0 + p_0 - out_x_0 * s_0;
# int argmax_k_1 = argmax_1 + p_1 - out_x_1 * s_1;
# indexes = (argmax_k_1 + k_1 * argmax_k_0);
def aux(argmax_k, argmax, p, out_x, s):
return 'int {} = {} + {} - {} * {};'.format(
argmax_k, argmax, p, out_x, s)
argmax_ks = conv_nd_kernel.vars('argmax_k', self.ndim)
argmax_k_decls = conv_nd_kernel.map_(
aux, argmax_ks, self.argmaxs, self.ps, out_xs, self.ss)
indexes_set = 'indexes = {};'.format(
conv_nd_kernel.muladdexp(self.ks[1:], argmax_ks[1:], argmax_ks[0]))
return '\n'.join(['out = maxval;'] + argmax_k_decls + [indexes_set])


class MaxPoolingNDKernelBackward(pooling_nd_kernel.PoolingNDKernelBackward):

def name(self):
# max_pool_{N}d_bwd
return 'max'

def in_params(self):
# 2D: raw T gy, raw S indexes, int32 d_0, int32 d_1, int32 out_0,
# int32 out_1, int32 k_0, int32 k_1, int32 s_0, int32 s_1,
# int32 p_0, int32 p_1
return (['raw S indexes'], [])

def before(self):
return 'T val = 0;'

def main(self, offset, xs, out_xs):
# 2D: int kx = (x_1 - out_x_1 * s_1 + k_1 *
# (x_0 - out_x_0 * s_0 + k_0 * 0));
# if (indexes[offset_1] == kx) {
# val = val + gy[offset_1];
# }
def aux(x, out_x, s):
return '{} - {} * {}'.format(x, out_x, s)
w = conv_nd_kernel.Writer()
w.write('int kx = {};'.format(
conv_nd_kernel.muladdexp(self.ks, conv_nd_kernel.map_(
aux, xs, out_xs, self.ss), '0')))
w.write('if (indexes[{}] == kx) {{'.format(offset), 'inc')
w.write('val = val + gy[{}];'.format(offset))
w.write('}', 'dec')
return w.get()

def after(self, xs):
return 'gx = val;'

0 comments on commit 353ea32

Please sign in to comment.