Skip to content

Commit

Permalink
New style clip
Browse files Browse the repository at this point in the history
  • Loading branch information
hvy committed Oct 30, 2017
1 parent e7dcc12 commit de72088
Show file tree
Hide file tree
Showing 2 changed files with 50 additions and 12 deletions.
40 changes: 29 additions & 11 deletions chainer/functions/math/clip.py
@@ -1,12 +1,12 @@
import numpy

from chainer import cuda
from chainer import function
from chainer import function_node
from chainer import utils
from chainer.utils import type_check


class Clip(function.Function):
class Clip(function_node.FunctionNode):
"""Clips (limits) elements of input variable."""

def __init__(self, x_min, x_max):
Expand All @@ -25,23 +25,41 @@ def check_type_forward(self, in_types):
type_check.expect(x_type.dtype.kind == 'f')

def forward_cpu(self, x):
self.retain_inputs((0,))
return utils.force_array(
numpy.clip(x[0], self.x_min, self.x_max)),

def backward_cpu(self, x, gy):
return utils.force_array(
gy[0] * (self.x_min < x[0]) * (x[0] < self.x_max)),

def forward_gpu(self, x):
self.retain_inputs((0,))
return cuda.cupy.clip(x[0], self.x_min, self.x_max),

def backward_gpu(self, x, gy):
def backward(self, indexes, grad_outputs):
x, = self.get_retained_inputs()
return ClipGrad(x.data, self.x_min, self.x_max).apply(grad_outputs)


class ClipGrad(function_node.FunctionNode):

def __init__(self, x, x_min, x_max):
self.cond = (x_min < x) * (x < x_max)

def check_type_forward(self, in_types):
type_check.expect(in_types.size() == 1)
type_check.expect(in_types[0].dtype.kind == 'f')

def forward_cpu(self, inputs):
return utils.force_array(inputs[0] * self.cond),

def forward_gpu(self, inputs):
gx = cuda.elementwise(
'T x, T gy, T x_min, T x_max', 'T gx',
'gx = ((x > x_min) & (x < x_max)) ? gy : T(0)',
'clip_bwd')(x[0], gy[0], self.x_min, self.x_max)
'T gy, bool cond', 'T gx',
'gx = cond ? gy : T(0)',
'clip_bwd')(inputs[0], self.cond)
return gx,

def backward(self, indexes, grad_outputs):
return grad_outputs[0] * self.cond,


def clip(x, x_min, x_max):
"""Clips (limits) elements of input variable.
Expand All @@ -58,4 +76,4 @@ def clip(x, x_min, x_max):
~chainer.Variable: Output variable.
"""
return Clip(x_min, x_max)(x)
return Clip(x_min, x_max).apply((x,))[0]
22 changes: 21 additions & 1 deletion tests/chainer_tests/functions_tests/math_tests/test_clip.py
Expand Up @@ -26,6 +26,7 @@ def setUp(self):
elif 0.74 < self.x[ind] < 0.76:
self.x[ind] = 0.5
self.gy = numpy.random.uniform(-1, 1, self.shape).astype(self.dtype)
self.ggx = numpy.random.uniform(-1, 1, self.shape).astype(self.dtype)
self.x_min = -0.75
self.x_max = 0.75

Expand All @@ -51,8 +52,11 @@ def test_forward_gpu(self):
self.check_forward(cuda.to_gpu(self.x))

def check_backward(self, x_data, y_grad):
def f(x):
return functions.clip(x, self.x_min, self.x_max)

gradient_check.check_backward(
functions.Clip(self.x_min, self.x_max), x_data, y_grad,
f, x_data, y_grad,
dtype=numpy.float64)

def test_backward_cpu(self):
Expand All @@ -62,6 +66,22 @@ def test_backward_cpu(self):
def test_backward_gpu(self):
self.check_backward(cuda.to_gpu(self.x), cuda.to_gpu(self.gy))

def check_double_backward(self, x_data, y_grad, gx_grad):
def f(x):
y = functions.clip(x, self.x_min, self.x_max)
return y * y

gradient_check.check_double_backward(
f, x_data, y_grad, gx_grad, dtype=numpy.float64)

def test_double_backward_cpu(self):
self.check_double_backward(self.x, self.gy, self.ggx)

@attr.gpu
def test_double_backward_gpu(self):
self.check_double_backward(
cuda.to_gpu(self.x), cuda.to_gpu(self.gy), cuda.to_gpu(self.ggx))


class TestClipInvalidInterval(unittest.TestCase):

Expand Down

0 comments on commit de72088

Please sign in to comment.