Skip to content

Commit

Permalink
convolution links tests refactored
Browse files Browse the repository at this point in the history
  • Loading branch information
Emilio Castillo committed Aug 6, 2019
1 parent 02bcef9 commit 49f659d
Show file tree
Hide file tree
Showing 4 changed files with 404 additions and 454 deletions.
298 changes: 135 additions & 163 deletions tests/chainer_tests/links_tests/connection_tests/test_convolution_2d.py
Original file line number Diff line number Diff line change
Expand Up @@ -5,137 +5,106 @@

import chainer
from chainer.backends import cuda
from chainer import gradient_check
from chainer import links
from chainer import testing
from chainer.testing import attr
from chainer.testing import condition
from chainer.utils import conv


@testing.parameterize(*testing.product({
'x_dtype': [numpy.float16, numpy.float32, numpy.float64],
'W_dtype': [numpy.float16, numpy.float32, numpy.float64],
}))
class TestConvolution2D(unittest.TestCase):
@testing.inject_backend_tests(
['test_forward', 'test_backward', 'test_double_backward',
'test_pickling'],
# CPU tests
[
{},
]
# GPU tests
+ testing.product({
'use_cuda': [True],
'use_cudnn': ['never', 'always'],
'cuda_device': [0, 1],
})
+ [
{'use_chainerx': True, 'chainerx_device': 'native:0'},
{'use_chainerx': True, 'chainerx_device': 'cuda:0'},
{'use_chainerx': True, 'chainerx_device': 'cuda:1'},
])
class TestConvolution2D(testing.LinkTestCase):

param_names = ('W', 'b')

def setUp(self):
self.link = links.Convolution2D(
3, 2, 3, stride=2, pad=1,
initialW=chainer.initializers.Normal(1, self.W_dtype),
initial_bias=chainer.initializers.Normal(1, self.x_dtype))
self.link.cleargrads()

self.x = numpy.random.uniform(-1, 1,
(2, 3, 4, 3)).astype(self.x_dtype)
self.gy = numpy.random.uniform(-1, 1,
(2, 2, 2, 2)).astype(self.x_dtype)
self.check_backward_options = {}
self.check_backward_options.update({
'atol': 5e-4, 'rtol': 5e-3
})
if self.x_dtype == numpy.float16 or self.W_dtype == numpy.float16:
self.check_backward_options = {'atol': 3e-2, 'rtol': 5e-2}
self.check_forward_options.update({'atol': 5e-3, 'rtol': 5e-2})
self.check_backward_options.update({
'atol': 1e-3, 'rtol': 1e-3
})

@attr.gpu
def test_im2col_consistency(self):
col_cpu = conv.im2col_cpu(self.x, 3, 3, 2, 2, 1, 1)
col_gpu = conv.im2col_gpu(cuda.to_gpu(self.x), 3, 3, 2, 2, 1, 1)
testing.assert_allclose(col_cpu, col_gpu.get(), atol=0, rtol=0)
def generate_params(self):
initialW = chainer.initializers.Normal(1, self.W_dtype)
initial_bias = chainer.initializers.Normal(1, self.x_dtype)
return initialW, initial_bias

@attr.gpu
def test_col2im_consistency(self):
col = conv.im2col_cpu(self.x, 3, 3, 2, 2, 1, 1)
h, w = self.x.shape[2:]
im_cpu = conv.col2im_cpu(col, 2, 2, 1, 1, h, w)
im_gpu = conv.col2im_gpu(cuda.to_gpu(col), 2, 2, 1, 1, h, w)
testing.assert_allclose(im_cpu, im_gpu.get())

def check_forward_consistency(self):
x_cpu = chainer.Variable(self.x)
y_cpu = self.link(x_cpu)
self.assertEqual(y_cpu.data.dtype, self.x_dtype)

self.link.to_gpu()
x_gpu = chainer.Variable(cuda.to_gpu(self.x))
y_gpu = self.link(x_gpu)
self.assertEqual(y_gpu.data.dtype, self.x_dtype)

testing.assert_allclose(y_cpu.data, y_gpu.data.get())

@attr.gpu
@condition.retry(3)
def test_forward_consistency(self):
self.check_forward_consistency()

@attr.gpu
@condition.retry(3)
def test_forward_consistency_im2col(self):
with chainer.using_config('use_cudnn', 'never'):
self.check_forward_consistency()

def check_backward(self, x_data, y_grad):
gradient_check.check_backward(
self.link, x_data, y_grad, (self.link.W, self.link.b), eps=2 ** -3,
**self.check_backward_options)

@condition.retry(3)
def test_backward_cpu(self):
self.check_backward(self.x, self.gy)

@attr.gpu
@condition.retry(3)
def test_backward_gpu(self):
self.link.to_gpu()
self.check_backward(cuda.to_gpu(self.x), cuda.to_gpu(self.gy))

@attr.gpu
@condition.retry(3)
def test_backward_gpu_im2col(self):
self.link.to_gpu()
with chainer.using_config('use_cudnn', 'never'):
self.check_backward(cuda.to_gpu(self.x), cuda.to_gpu(self.gy))
def create_link(self, initializers):
initialW, initial_bias = initializers

def check_pickling(self, x_data):
x = chainer.Variable(x_data)
y = self.link(x)
y_data1 = y.data

del x, y

pickled = pickle.dumps(self.link, -1)
del self.link
self.link = pickle.loads(pickled)

x = chainer.Variable(x_data)
y = self.link(x)
y_data2 = y.data
link = links.Convolution2D(
3, 2, 3, stride=2, pad=1,
initialW=initialW,
initial_bias=initial_bias)

return link

def generate_inputs(self):
x = numpy.random.uniform(-1, 1,
(2, 3, 4, 3)).astype(self.x_dtype)
return x,

def forward_expected(self, link, inputs):
x, = inputs
y = link(x).array
return y,

def test_pickling(self, backend_config):
with backend_config:
x_data, = self.generate_inputs()

link = self.create_link(self.generate_params())
link.to_device(backend_config.device)

x = chainer.Variable(x_data)
x.to_device(backend_config.device)

y = link(x)
y_data1 = y.data
del x, y
pickled = pickle.dumps(link, -1)
del link
link = pickle.loads(pickled)
x = chainer.Variable(x_data)
x.to_device(backend_config.device)
y = link(x)
y_data2 = y.data

testing.assert_allclose(y_data1, y_data2, atol=0, rtol=0)

def test_pickling_cpu(self):
self.check_pickling(self.x)

@attr.gpu
def test_pickling_gpu(self):
self.link.to_gpu()
self.check_pickling(cuda.to_gpu(self.x))


@testing.parameterize(*testing.product({
'conv_args': [((None, 2, 3, 2, 1), {}),
((2, 3), {'stride': 2, 'pad': 1})],
'x_dtype': [numpy.float16, numpy.float32, numpy.float64],
'W_dtype': [numpy.float16, numpy.float32, numpy.float64],
}))
class TestConvolution2DParameterShapePlaceholder(unittest.TestCase):
class TestConvolution2DIm2ColConsistency(unittest.TestCase):

def setUp(self):
args, kwargs = self.conv_args
self.link = links.Convolution2D(*args, **kwargs)
self.x = numpy.random.uniform(-1, 1,
(2, 3, 4, 3)).astype(numpy.float32)
self.link(chainer.Variable(self.x))
b = self.link.b.data
b[...] = numpy.random.uniform(-1, 1, b.shape)
self.link.cleargrads()
self.gy = numpy.random.uniform(-1, 1,
(2, 2, 2, 2)).astype(numpy.float32)
(2, 3, 4, 3)).astype(self.x_dtype)

@attr.gpu
def test_im2col_consistency(self):
Expand All @@ -151,74 +120,77 @@ def test_col2im_consistency(self):
im_gpu = conv.col2im_gpu(cuda.to_gpu(col), 2, 2, 1, 1, h, w)
testing.assert_allclose(im_cpu, im_gpu.get())

def check_forward_consistency(self):
x_cpu = chainer.Variable(self.x)
y_cpu = self.link(x_cpu)
self.assertEqual(y_cpu.data.dtype, numpy.float32)

self.link.to_gpu()
x_gpu = chainer.Variable(cuda.to_gpu(self.x))
y_gpu = self.link(x_gpu)
self.assertEqual(y_gpu.data.dtype, numpy.float32)

testing.assert_allclose(y_cpu.data, y_gpu.data.get())

@attr.cudnn
@condition.retry(3)
def test_forward_consistency(self):
self.check_forward_consistency()

@attr.gpu
@condition.retry(3)
def test_forward_consistency_im2col(self):
with chainer.using_config('use_cudnn', 'never'):
self.check_forward_consistency()
@testing.parameterize(*testing.product({
'conv_args': [((None, 2, 3, 2, 1), {}),
((2, 3), {'stride': 2, 'pad': 1})],
}))
@testing.inject_backend_tests(
['test_forward', 'test_backward', 'test_double_backward',
'test_pickling'],
# CPU tests
[
{},
]
# GPU tests
+ testing.product({
'use_cuda': [True],
'use_cudnn': ['never', 'always'],
'cuda_device': [0, 1],
})
+ [
{'use_chainerx': True, 'chainerx_device': 'native:0'},
{'use_chainerx': True, 'chainerx_device': 'cuda:0'},
{'use_chainerx': True, 'chainerx_device': 'cuda:1'},
])
class TestConvolution2DParameterShapePlaceholder(testing.LinkTestCase):

param_names = ('W', 'b')

def generate_params(self):
return ()

def create_link(self, initializers):

def check_backward(self, x_data, y_grad):
gradient_check.check_backward(
self.link, x_data, y_grad, (self.link.W, self.link.b), eps=1e-2)
args, kwargs = self.conv_args
link = links.Convolution2D(*args, **kwargs)
b = link.b.data
b[...] = numpy.random.uniform(-1, 1, b.shape)

@condition.retry(3)
def test_backward_cpu(self):
self.check_backward(self.x, self.gy)
return link

@attr.cudnn
@condition.retry(3)
def test_backward_gpu(self):
self.link.to_gpu()
self.check_backward(cuda.to_gpu(self.x), cuda.to_gpu(self.gy))
def generate_inputs(self):
x = numpy.random.uniform(-1, 1,
(2, 3, 4, 3)).astype(numpy.float32)
return x,

@attr.gpu
@condition.retry(3)
def test_backward_gpu_im2col(self):
self.link.to_gpu()
with chainer.using_config('use_cudnn', 'never'):
self.check_backward(cuda.to_gpu(self.x), cuda.to_gpu(self.gy))
def forward_expected(self, link, inputs):
x, = inputs
y = link(x).array
return y,

def check_pickling(self, x_data):
x = chainer.Variable(x_data)
y = self.link(x)
y_data1 = y.data
def test_pickling(self, backend_config):
with backend_config:
x_data, = self.generate_inputs()

del x, y
link = self.create_link(self.generate_params())
link.to_device(backend_config.device)

pickled = pickle.dumps(self.link, -1)
del self.link
self.link = pickle.loads(pickled)
x = chainer.Variable(x_data)
x.to_device(backend_config.device)

x = chainer.Variable(x_data)
y = self.link(x)
y_data2 = y.data
y = link(x)
y_data1 = y.data
del x, y
pickled = pickle.dumps(link, -1)
del link
link = pickle.loads(pickled)
x = chainer.Variable(x_data)
x.to_device(backend_config.device)
y = link(x)
y_data2 = y.data

testing.assert_allclose(y_data1, y_data2, atol=0, rtol=0)

def test_pickling_cpu(self):
self.check_pickling(self.x)

@attr.gpu
def test_pickling_gpu(self):
self.link.to_gpu()
self.check_pickling(cuda.to_gpu(self.x))


testing.run_module(__name__, __file__)

0 comments on commit 49f659d

Please sign in to comment.