Skip to content

Commit

Permalink
Merge pull request #2955 from mitmul/remove-use-cudnn-arg
Browse files Browse the repository at this point in the history
Remove use_cudnn argument from SpetialTransformerGrid/Sampler
  • Loading branch information
beam2d committed Sep 11, 2017
2 parents 6bd1230 + 1a053f1 commit dc30664
Show file tree
Hide file tree
Showing 4 changed files with 89 additions and 79 deletions.
30 changes: 17 additions & 13 deletions chainer/functions/array/spatial_transformer_grid.py
@@ -1,22 +1,21 @@
import numpy

import chainer
from chainer import cuda
from chainer import function
from chainer.utils import argument
from chainer.utils import type_check


if cuda.cudnn_enabled:
cudnn = cuda.cudnn
libcudnn = cudnn.cudnn
_cudnn_version = libcudnn.getVersion()
_sampler_type = libcudnn.CUDNN_SAMPLER_BILINEAR


class SpatialTransformerGrid(function.Function):

def __init__(self, output_shape, use_cudnn=True):
def __init__(self, output_shape):
self.output_shape = output_shape
self.use_cudnn = use_cudnn

def check_type_forward(self, in_types):
n_in = in_types.size()
Expand All @@ -34,8 +33,7 @@ def forward_cpu(self, inputs):
return self._forward(inputs)

def forward_gpu(self, inputs):
if not (cuda.cudnn_enabled and self.use_cudnn and
_cudnn_version >= 5000):
if not chainer.should_use_cudnn('>=auto', 5000):
return self._forward(inputs)
theta, = inputs
B, _, _ = theta.shape
Expand Down Expand Up @@ -79,8 +77,7 @@ def backward_cpu(self, inputs, grad_outputs):
return self._backward(inputs, grad_outputs)

def backward_gpu(self, inputs, grad_outputs):
if not (cuda.cudnn_enabled and self.use_cudnn and
_cudnn_version >= 5000):
if not chainer.should_use_cudnn('>=auto', 5000):
return self._backward(inputs, grad_outputs)
theta, = inputs
ggrid, = grad_outputs
Expand Down Expand Up @@ -115,7 +112,7 @@ def _backward(self, inputs, grad_outputs):
return gtheta,


def spatial_transformer_grid(theta, output_shape, use_cudnn=True):
def spatial_transformer_grid(theta, output_shape, **kwargs):
"""2D Spatial Transformer grid.
This function generates coordinates of the points sampled from an image
Expand All @@ -126,6 +123,10 @@ def spatial_transformer_grid(theta, output_shape, use_cudnn=True):
sampled from the source image :math:`(x_i^s, y_i^s)` are calculated
by the following equation.
.. note::
cuDNN supports SpatialTransformerGrid from version 5.0.0.
.. math::
\\left(\\begin{matrix} x_i^s \\\\
Expand All @@ -148,9 +149,6 @@ def spatial_transformer_grid(theta, output_shape, use_cudnn=True):
This is a batch of :math:`2 \\times 3` matrix used for
the warping described above.
output_shape (tuple): A tuple of 2 elements: :math:`h_O, w_O`.
use_cudnn (bool): If ``True``, then this function uses cuDNN if
available. Note that, cuDNN supports SpatialTransformerGrid
from version 5.0.0.
Returns:
~chainer.Variable: A variable of shape :math:`(n, 2, h_O, w_O)`.
Expand All @@ -162,4 +160,10 @@ def spatial_transformer_grid(theta, output_shape, use_cudnn=True):
the upper-left corner of the input image.
"""
return SpatialTransformerGrid(output_shape, use_cudnn)(theta)
argument.check_unexpected_kwargs(
kwargs, use_cudnn="The argument \"use_cudnn\" is not "
"supported anymore. "
"Use chainer.using_config('use_cudnn', value) "
"context where value can be `always`, `never`, or `auto`.")
argument.assert_kwargs_empty(kwargs)
return SpatialTransformerGrid(output_shape)(theta)
29 changes: 16 additions & 13 deletions chainer/functions/array/spatial_transformer_sampler.py
@@ -1,22 +1,20 @@
import numpy

import chainer
from chainer import cuda
from chainer import function
from chainer.utils import argument
from chainer.utils import type_check


if cuda.cudnn_enabled:
cudnn = cuda.cudnn
libcudnn = cudnn.cudnn
_cudnn_version = libcudnn.getVersion()
_sampler_type = libcudnn.CUDNN_SAMPLER_BILINEAR


class SpatialTransformerSampler(function.Function):

def __init__(self, use_cudnn=True):
self.use_cudnn = use_cudnn

def check_type_forward(self, in_types):
n_in = in_types.size()
type_check.expect(2 == n_in)
Expand All @@ -36,8 +34,7 @@ def forward_cpu(self, inputs):
return self._forward(inputs)

def forward_gpu(self, inputs):
if not (cuda.cudnn_enabled and self.use_cudnn and
_cudnn_version >= 5000):
if not chainer.should_use_cudnn('>=auto', 5000):
return self._forward(inputs)
x, grid = inputs
out_shape = x.shape[:2] + grid.shape[2:]
Expand Down Expand Up @@ -124,8 +121,7 @@ def backward_cpu(self, inputs, grad_outputs):
return self._backward(inputs, grad_outputs)

def backward_gpu(self, inputs, grad_outputs):
if not (cuda.cudnn_enabled and self.use_cudnn and
_cudnn_version >= 5000):
if not chainer.should_use_cudnn('>=auto', 5000):
return self._backward(inputs, grad_outputs)
x, grid = inputs
gy, = grad_outputs
Expand Down Expand Up @@ -253,7 +249,7 @@ def _backward(self, inputs, grad_outputs):
return gx, ggrid


def spatial_transformer_sampler(x, grid, use_cudnn=True):
def spatial_transformer_sampler(x, grid, **kwargs):
"""2D Spatial Transformer sampler.
This is a differentiable image sampler. With a set of sampling points
Expand All @@ -278,6 +274,10 @@ def spatial_transformer_sampler(x, grid, use_cudnn=True):
See detail in the following paper: `Spatial Transformer Networks \
<https://arxiv.org/abs/1506.02025>`_.
.. note::
cuDNN supports SpatialTransformerSampler from version 5.0.0.
Args:
x (~chainer.Variable): Input variable of shape :math:`(n, c_I, h, w)`.
grid (~chainer.Variable): Coordinate variable of shape
Expand All @@ -294,13 +294,16 @@ def spatial_transformer_sampler(x, grid, use_cudnn=True):
The coordinate :math:`(-1, -1)` corresponds to the upper-left
corner of the input image.
use_cudnn (bool): If ``True``, then this function uses cuDNN if
available. Note that, cuDNN supports SpatialTransformerSampler
from version 5.0.0.
Returns:
~chainer.Variable: Output feature map of shape \
:math:`(n, c_I, h_O, w_O)`.
"""
return SpatialTransformerSampler(use_cudnn)(x, grid)
argument.check_unexpected_kwargs(
kwargs, use_cudnn="The argument \"use_cudnn\" is not "
"supported anymore. "
"Use chainer.using_config('use_cudnn', value) "
"context where value can be `always`, `never`, or `auto`.")
argument.assert_kwargs_empty(kwargs)
return SpatialTransformerSampler()(x, grid)
Expand Up @@ -2,6 +2,7 @@

import numpy

import chainer
from chainer import cuda
from chainer import functions
from chainer import gradient_check
Expand All @@ -11,7 +12,7 @@


@testing.parameterize(*testing.product({
'use_cudnn': [True, False],
'use_cudnn': ['always', 'never'],
}))
class TestSpatialTransformerGrid(unittest.TestCase):

Expand All @@ -22,9 +23,8 @@ def setUp(self):
self.grads = numpy.random.uniform(
size=(B, 2) + self.output_shape).astype(self.theta.dtype)

def check_forward(self, theta, output_shape, use_cudnn=True):
grid = functions.spatial_transformer_grid(
theta, output_shape, use_cudnn).data
def check_forward(self, theta, output_shape):
grid = functions.spatial_transformer_grid(theta, output_shape).data

theta = cuda.to_cpu(theta)
B = theta.shape[0]
Expand All @@ -46,13 +46,13 @@ def test_forward_cpu(self):

@attr.gpu
def test_forward_gpu(self):
self.check_forward(
cuda.to_gpu(self.theta), self.output_shape, self.use_cudnn)
self.check_forward(cuda.to_gpu(self.theta), self.output_shape)

def check_backward(self, theta, output_shape, grads, use_cudnn=True):
gradient_check.check_backward(
functions.SpatialTransformerGrid(output_shape, use_cudnn),
(theta,), (grads,), atol=1e-4, rtol=1e-3)
def check_backward(self, theta, output_shape, grads):
with chainer.using_config('use_cudnn', self.use_cudnn):
gradient_check.check_backward(
functions.SpatialTransformerGrid(output_shape),
(theta,), (grads,), atol=1e-4, rtol=1e-3)

@condition.retry(3)
def test_backward_cpu(self):
Expand All @@ -61,10 +61,9 @@ def test_backward_cpu(self):
@attr.gpu
@condition.retry(3)
def test_backward_gpu(self):
self.check_backward(cuda.to_gpu(self.theta),
self.output_shape,
cuda.to_gpu(self.grads),
self.use_cudnn)
with chainer.using_config('use_cudnn', self.use_cudnn):
self.check_backward(cuda.to_gpu(self.theta), self.output_shape,
cuda.to_gpu(self.grads))


testing.run_module(__name__, __file__)

0 comments on commit dc30664

Please sign in to comment.