Skip to content

Commit

Permalink
Add the arctan2 function
Browse files Browse the repository at this point in the history
  • Loading branch information
muupan committed Aug 9, 2017
1 parent 0aa511a commit 76b18c4
Show file tree
Hide file tree
Showing 4 changed files with 109 additions and 0 deletions.
2 changes: 2 additions & 0 deletions chainer/functions/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -240,6 +240,8 @@
from chainer.functions.math.trigonometric import Arcsin # NOQA
from chainer.functions.math.trigonometric import arctan # NOQA
from chainer.functions.math.trigonometric import Arctan # NOQA
from chainer.functions.math.trigonometric import arctan2 # NOQA
from chainer.functions.math.trigonometric import Arctan2 # NOQA
from chainer.functions.math.trigonometric import cos # NOQA
from chainer.functions.math.trigonometric import Cos # NOQA
from chainer.functions.math.trigonometric import sin # NOQA
Expand Down
51 changes: 51 additions & 0 deletions chainer/functions/math/trigonometric.py
Original file line number Diff line number Diff line change
Expand Up @@ -235,3 +235,54 @@ def arctan(x):
~chainer.Variable: Output variable.
"""
return Arctan()(x)


class Arctan2(function.Function):

@property
def label(self):
return 'arctan2'

def check_type_forward(self, in_types):
type_check.expect(in_types.size() == 2)
type_check.expect(in_types[0].dtype.kind == 'f')
type_check.expect(in_types[1].dtype.kind == 'f')

def forward(self, x):
xp = cuda.get_array_module(*x)
return utils.force_array(xp.arctan2(x[0], x[1])),

def backward_cpu(self, x, gy):
x1, x2 = x
sqnorm = x1 ** 2 + x2 ** 2
gx1 = utils.force_array(x2 / sqnorm * gy[0])
gx2 = utils.force_array(-x1 / sqnorm * gy[0])
return gx1, gx2

def backward_gpu(self, x, gy):
gx1, gx2 = cuda.elementwise(
'T x1, T x2, T gy',
'T gx1, T gx2',
('T sqnorm = x1 * x1 + x2 * x2;'
'gx1 = x2 / sqnorm * gy;'
'gx2 = -x1 / sqnorm * gy;'),
'arctan2_bwd'
)(x[0], x[1], gy[0])
return gx1, gx2


def arctan2(x1, x2):
"""Elementwise arctangent function with two arguments.
Args:
x1 (:class:`~chainer.Variable` or :class:`numpy.ndarray` or \
:class:`cupy.ndarray`):
Y-coordinates.
x2 (:class:`~chainer.Variable` or :class:`numpy.ndarray` or \
:class:`cupy.ndarray`):
X-coordinates.
Returns:
~chainer.Variable: Angles in radians, in the range [-pi, pi].
"""
return Arctan2()(x1, x2)
1 change: 1 addition & 0 deletions docs/source/reference/functions.rst
Original file line number Diff line number Diff line change
Expand Up @@ -163,6 +163,7 @@ Mathematical functions
chainer.functions.arccos
chainer.functions.arcsin
chainer.functions.arctan
chainer.functions.arctan2
chainer.functions.argmax
chainer.functions.argmin
chainer.functions.average
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -85,4 +85,59 @@ class TestArctan(unittest.TestCase):
pass


@testing.parameterize(*testing.product({
'shape': [(3, 2), ()],
'dtype': [numpy.float16, numpy.float32, numpy.float64],
}))
class TestArctan2(unittest.TestCase):

def setUp(self):
self.x1 = numpy.random.uniform(
-10.0, 10.0, self.shape).astype(self.dtype)
self.x2 = numpy.random.uniform(
-10.0, 10.0, self.shape).astype(self.dtype)
self.gy = numpy.random.uniform(-1, 1, self.shape).astype(self.dtype)
if self.dtype == numpy.float16:
self.backward_options = {
'eps': 2 ** -4, 'atol': 2 ** -4, 'rtol': 2 ** -4}
else:
self.backward_options = {
'atol': 1e-3, 'rtol': 1e-3}

def check_forward(self, x1_data, x2_data):
y = F.arctan2(x1_data, x2_data)
numpy.testing.assert_array_less(
cuda.to_cpu(y.data),
numpy.full(y.shape, numpy.pi))
numpy.testing.assert_array_less(
numpy.full(y.shape, -numpy.pi),
cuda.to_cpu(y.data))
testing.assert_allclose(
numpy.arctan2(self.x1, self.x2), y.data, atol=1e-4, rtol=1e-4)

@condition.retry(3)
def test_forward_cpu(self):
self.check_forward(self.x1, self.x2)

@attr.gpu
@condition.retry(3)
def test_forward_gpu(self):
self.check_forward(cuda.to_gpu(self.x1), cuda.to_gpu(self.x2))

def check_backward(self, x1_data, x2_data, y_grad):
gradient_check.check_backward(
F.arctan2, (x1_data, x2_data), y_grad, **self.backward_options)

@condition.retry(3)
def test_backward_cpu(self):
self.check_backward(self.x1, self.x2, self.gy)

@attr.gpu
@condition.retry(3)
def test_backward_gpu(self):
self.check_backward(cuda.to_gpu(self.x1),
cuda.to_gpu(self.x2),
cuda.to_gpu(self.gy))


testing.run_module(__name__, __file__)

0 comments on commit 76b18c4

Please sign in to comment.