Skip to content
Permalink
Browse files

Make sigma parameter of smooth L1 loss configurable.

Also fixes a bug in the computation of smooth L1 loss when sigma != 1.
  • Loading branch information...
IanTayler authored and vierja committed Dec 28, 2017
1 parent 849b0b3 commit eec777db9531e0d4c4e739bc0dbbe9fff0999f2e
@@ -168,6 +168,7 @@ model:
mean: 0.0
stddev: 0.01
l2_regularization_scale: 0.0005 # disable using 0
l1_sigma: 3.0
activation_function: relu

proposals:
@@ -208,6 +209,7 @@ model:
dropout_keep_prob: 1.0
activation_function: relu
l2_regularization_scale: 0.0005
l1_sigma: 3.0
use_mean: False
initializer:
_replace: True
@@ -49,6 +49,7 @@ def setUp(self):
'dropout_keep_prob': 1.0,
'activation_function': 'relu6',
'l2_regularization_scale': 0.0005,
'l1_sigma': 3.0,
'use_mean': False,
'initializer': {
'type': 'variance_scaling_initializer',
@@ -97,6 +98,7 @@ def setUp(self):
},
'activation_function': 'relu6',
'l2_regularization_scale': 0.0005,
'l1_sigma': 3.0,
'proposals': {
'pre_nms_top_n': 12000,
'post_nms_top_n': 2000,
@@ -50,6 +50,8 @@ def __init__(self, num_classes, config, debug=False, seed=None,
self.regularizer = tf.contrib.layers.l2_regularizer(
scale=config.l2_regularization_scale)

self._l1_sigma = config.l1_sigma

# Debug mode makes the module return more detailed Tensors which can be
# useful for debugging.
self._debug = debug
@@ -364,7 +366,9 @@ def loss(self, prediction_dict):
# offsets (that means, the useful results) and the labeled
# targets.
reg_loss_per_proposal = smooth_l1_loss(
bbox_offset_cleaned, bbox_offsets_target_labeled)
bbox_offset_cleaned, bbox_offsets_target_labeled,
sigma=self._l1_sigma
)

tf.summary.scalar(
'rcnn_foreground_samples',
@@ -32,6 +32,7 @@ def setUp(self):
'mode': 'FAN_AVG',
},
'l2_regularization_scale': 0.0005,
'l1_sigma': 3.0,
'roi': {
'pooling_mode': 'crop',
'pooled_width': 7,
@@ -55,6 +55,8 @@ def __init__(self, num_anchors, config, debug=False, seed=None,
scale=config.l2_regularization_scale
)

self._l1_sigma = config.l1_sigma

# We could use normal relu without any problems.
self._rpn_activation = get_activation_function(
config.activation_function
@@ -276,7 +278,7 @@ def loss(self, prediction_dict):

# We apply smooth l1 loss as described by the Fast R-CNN paper.
reg_loss_per_anchor = smooth_l1_loss(
rpn_bbox_pred, rpn_bbox_target
rpn_bbox_pred, rpn_bbox_target, sigma=self._l1_sigma
)

prediction_dict['reg_loss_per_anchor'] = reg_loss_per_anchor
@@ -33,6 +33,7 @@ def setUp(self):
'stddev': 0.01,
},
'l2_regularization_scale': 0.0005,
'l1_sigma': 3.0,
'activation_function': 'relu6',
'proposals': {
'pre_nms_top_n': 12000,
@@ -1,7 +1,7 @@
import tensorflow as tf


def smooth_l1_loss(bbox_prediction, bbox_target, sigma=1.0):
def smooth_l1_loss(bbox_prediction, bbox_target, sigma=3.0):
"""
Return Smooth L1 Loss for bounding box prediction.
@@ -24,8 +24,9 @@ def smooth_l1_loss(bbox_prediction, bbox_target, sigma=1.0):
abs_diff_lt_sigma2 = tf.less(abs_diff, 1.0 / sigma2)
bbox_loss = tf.reduce_sum(
tf.where(
abs_diff_lt_sigma2, 0.5 * tf.square(abs_diff),
abs_diff - 0.5
abs_diff_lt_sigma2,
0.5 * sigma2 * tf.square(abs_diff),
abs_diff - 0.5 / sigma2
), [1]
)
return bbox_loss

0 comments on commit eec777d

Please sign in to comment.
You can’t perform that action at this time.