Skip to content

Commit

Permalink
Merge pull request #5751 from takagi/dtype-spatial-transformer-sampler
Browse files Browse the repository at this point in the history
Support all float dtypes in `F.spatial_transformer_sampler`
  • Loading branch information
toslunar authored and takagi committed Jan 16, 2019
1 parent fcd9a0b commit 0da6335
Show file tree
Hide file tree
Showing 2 changed files with 35 additions and 19 deletions.
14 changes: 8 additions & 6 deletions chainer/functions/array/spatial_transformer_sampler.py
Expand Up @@ -23,8 +23,8 @@ def check_type_forward(self, in_types):
x_type = in_types[0]
grid_type = in_types[1]
type_check.expect(
x_type.dtype.char == 'f',
grid_type.dtype.char == 'f',
x_type.dtype.kind == 'f',
grid_type.dtype == x_type.dtype,
x_type.ndim == 4,
grid_type.ndim == 4,
grid_type.shape[1] == 2,
Expand Down Expand Up @@ -53,8 +53,9 @@ def forward_gpu(self, inputs):
cuda.cupy.cudnn.create_spatial_transformer_descriptor(
_sampler_type, grid.dtype, len(shape), shape.ctypes.data)

one = numpy.array(1, dtype=x.dtype).ctypes
zero = numpy.array(0, dtype=x.dtype).ctypes
dtype = numpy.float64 if x.dtype == numpy.float64 else numpy.float32
one = numpy.array(1, dtype=dtype).ctypes
zero = numpy.array(0, dtype=dtype).ctypes
libcudnn.spatialTfSamplerForward(
handle, self.st_desc.value, one.data,
x_desc.value, x.data.ptr, grid_t.data.ptr, zero.data,
Expand Down Expand Up @@ -139,8 +140,9 @@ def backward_gpu(self, inputs, grad_outputs):
dx_desc = cudnn.create_tensor_descriptor(gx)
dy_desc = cudnn.create_tensor_descriptor(gy)

one = numpy.array(1, dtype=x.dtype).ctypes
zero = numpy.array(0, dtype=x.dtype).ctypes
dtype = numpy.float64 if x.dtype == numpy.float64 else numpy.float32
one = numpy.array(1, dtype=dtype).ctypes
zero = numpy.array(0, dtype=dtype).ctypes
libcudnn.spatialTfSamplerBackward(
handle, self.st_desc.value,
one.data,
Expand Down
Expand Up @@ -42,6 +42,7 @@ def _rotate_BCHW(x):


@testing.parameterize(*testing.product({
'dtype': [numpy.float16, numpy.float32, numpy.float64],
'use_cudnn': ['always', 'never'],
}))
class TestSpatialTransformerSampler(unittest.TestCase):
Expand All @@ -52,11 +53,11 @@ class TestSpatialTransformerSampler(unittest.TestCase):

def setUp(self):
self.x = numpy.random.uniform(
size=self.in_shape).astype(numpy.float32)
size=self.in_shape).astype(self.dtype)
self.grid = numpy.random.uniform(
low=-2., high=2., size=self.grid_shape).astype(numpy.float32)
low=-2., high=2., size=self.grid_shape).astype(self.dtype)
self.grads = numpy.random.uniform(
size=self.out_shape).astype(numpy.float32)
size=self.out_shape).astype(self.dtype)

def check_forward(self, x, grid):
y = functions.spatial_transformer_sampler(x, grid)
Expand Down Expand Up @@ -90,19 +91,26 @@ def test_backward_gpu(self):
cuda.to_gpu(self.grads))


@testing.parameterize(*testing.product({
'dtype': [numpy.float16, numpy.float32, numpy.float64],
}))
class TestSpatialTransformerSamplerConsistencyWithCuDNN(unittest.TestCase):

in_shape = (2, 2, 4, 4)
out_shape = (2, 2, 3, 3)
grid_shape = (2, 2, 3, 3)

def setUp(self):
self.x = numpy.random.uniform(
size=self.in_shape).astype(numpy.float32)
self.x = numpy.random.uniform(size=self.in_shape).astype(self.dtype)
self.grid = numpy.random.uniform(
low=-2, high=2, size=self.grid_shape).astype(numpy.float32)
low=-2, high=2, size=self.grid_shape).astype(self.dtype)
self.grads = numpy.random.uniform(
size=self.out_shape).astype(numpy.float32)
size=self.out_shape).astype(self.dtype)

if self.dtype == numpy.float16:
self.assert_options = {'atol': 1e-2}
else:
self.assert_options = {}

def _apply_backward(self, x, grid, grads):
x = Variable(x)
Expand All @@ -125,9 +133,12 @@ def test_consistency_with_cudnn_cpu(self):
cuda.to_gpu(self.x), cuda.to_gpu(self.grid),
cuda.to_gpu(self.grads))

testing.assert_allclose(y_cpu.data, y_cudnn.data)
testing.assert_allclose(x_cpu.grad, x_cudnn.grad)
testing.assert_allclose(grid_cpu.grad, grid_cudnn.grad)
testing.assert_allclose(
y_cpu.data, y_cudnn.data, **self.assert_options)
testing.assert_allclose(
x_cpu.grad, x_cudnn.grad, **self.assert_options)
testing.assert_allclose(
grid_cpu.grad, grid_cudnn.grad, **self.assert_options)

@attr.gpu
@attr.cudnn
Expand All @@ -141,9 +152,12 @@ def test_consistency_with_cudnn_gpu(self):
cuda.to_gpu(self.x), cuda.to_gpu(self.grid),
cuda.to_gpu(self.grads))

testing.assert_allclose(y_gpu.data, y_cudnn.data)
testing.assert_allclose(x_gpu.grad, x_cudnn.grad)
testing.assert_allclose(grid_gpu.grad, grid_cudnn.grad)
testing.assert_allclose(
y_gpu.data, y_cudnn.data, **self.assert_options)
testing.assert_allclose(
x_gpu.grad, x_cudnn.grad, **self.assert_options)
testing.assert_allclose(
grid_gpu.grad, grid_cudnn.grad, **self.assert_options)


@testing.parameterize(
Expand Down

0 comments on commit 0da6335

Please sign in to comment.