Skip to content

Commit

Permalink
Optimize loss function
Browse files Browse the repository at this point in the history
  • Loading branch information
tsurumeso committed Jul 15, 2018
1 parent 6cc4cb7 commit 055c61d
Showing 1 changed file with 16 additions and 10 deletions.
26 changes: 16 additions & 10 deletions lib/loss/clipped_weighted_huber_loss.py
Original file line number Diff line number Diff line change
@@ -1,7 +1,8 @@
import numpy

from chainer import cuda
from chainer import function
from chainer.utils import type_check
import numpy


class ClippedWeightedHuberLoss(function.Function):
Expand All @@ -25,17 +26,22 @@ def forward(self, inputs):
x0_c = xp.clip(x0, self.clip[0], self.clip[1])
x1_c = xp.clip(x1, self.clip[0], self.clip[1])
self.diff = (x0_c - x1_c) * self.weight
y = xp.square(self.diff)
mask = y > (self.delta ** 2)
y -= mask * xp.square(abs(self.diff) - self.delta)
y *= 0.5
return xp.array(y.sum() / y.dtype.type(y.size), dtype=y.dtype),

def backward(self, inputs, gy):
diff = xp.abs(self.diff)
y = xp.square(diff)
diff -= diff.dtype.type(self.delta)
xp.maximum(diff, 0, dtype=diff.dtype, out=diff)
xp.square(diff, out=diff)
y = (y - diff) * 0.5

return y.mean(),

def backward(self, inputs, grad_outputs):
xp = cuda.get_array_module(*inputs)
mask = xp.abs(self.diff) <= self.delta
coeff = gy[0] * gy[0].dtype.type(1. / self.diff.size)
gx = coeff * xp.where(mask, self.diff, self.delta * xp.sign(self.diff))
gy, = grad_outputs
delta = float(self.delta)
gx = gy * xp.clip(self.diff, -delta, delta)

return gx, -gx


Expand Down

0 comments on commit 055c61d

Please sign in to comment.