forked from pytorch/pytorch
/
smooth_l1_loss_op.cc
117 lines (104 loc) · 3.29 KB
/
smooth_l1_loss_op.cc
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
/**
* Copyright (c) 2016-present, Facebook, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "smooth_l1_loss_op.h"
namespace caffe2 {
REGISTER_CPU_OPERATOR(SmoothL1Loss, SmoothL1LossOp<float, CPUContext>);
REGISTER_CPU_OPERATOR(
SmoothL1LossGradient,
SmoothL1LossGradientOp<float, CPUContext>);
OPERATOR_SCHEMA(SmoothL1Loss)
.NumInputs(4)
.NumOutputs(1)
.SetDoc(R"DOC(
Smooth L1 Loss is a minor variation of Huber loss in which the point of
transition between L2 loss and L1 loss is adjustable by a hyper-parameter beta:
SmoothL1(x) = 0.5 * x^2 / beta if |x| < beta
|x| - 0.5 * beta otherwise.
SmoothL1 is used in Fast R-CNN and descendants as the loss function for bounding
box regression.
The loss computed by this op has a flexible form:
scale / N * sum_i alpha_out[i] * SmoothL1(alpha_in[i] * (y_hat[i] - y[i])).
The weights alpha_in and alpha_out are called the "inside" and "outside"
weights, respectively. The inside weights are typically set to either 0 or 1 to
implement ignoring (when 0) certain samples. The outside weights can be used
to implement a per-sample loss weight. The overall loss is scaled by scale / N,
where N is the number of batch elements in the input predictions.
)DOC")
.Arg(
"beta",
"(float) default 1.0; L2 to L1 transition point.")
.Arg(
"scale",
"(float) default 1.0; multiply the loss by this scale factor.")
.Input(
0,
"Y_hat",
"Tensor of predictions (at least 1D).")
.Input(
1,
"Y",
"Tensor of labels with the same shape as Y_hat.")
.Input(
2,
"alpha_in",
"Tensor of inside weights with the same shape as Y.")
.Input(
3,
"alpha_out",
"Tensor of outside weights with the same shape as Y.")
.Output(
0,
"loss",
"Scalar loss.");
OPERATOR_SCHEMA(SmoothL1LossGradient)
.NumInputs(5)
.NumOutputs(1)
.Input(
0,
"Y_hat",
"See SmoothL1Loss.")
.Input(
1,
"Y",
"See SmoothL1Loss.")
.Input(
2,
"alpha_in",
"See SmoothL1Loss.")
.Input(
3,
"alpha_out",
"See SmoothL1Loss.")
.Input(
4,
"d_loss",
"Gradient of forward output 0 (loss).")
.Output(
0,
"d_Y_hat",
"Gradient of forward input 0 (Y_hat).");
class GetSmoothL1LossGradient : public GradientMakerBase {
using GradientMakerBase::GradientMakerBase;
vector<OperatorDef> GetGradientDefs() override {
return SingleGradientDef(
"SmoothL1LossGradient",
"",
vector<string>{I(0), I(1), I(2), I(3), GO(0)},
vector<string>{GI(0)});
}
};
REGISTER_GRADIENT(SmoothL1Loss, GetSmoothL1LossGradient);
} // namespace caffe2