Skip to content

Commit

Permalink
add paddle.nn.functional.mse_loss
Browse files Browse the repository at this point in the history
  • Loading branch information
baiyfbupt committed Aug 11, 2020
1 parent 65b97d6 commit 56a9aa7
Show file tree
Hide file tree
Showing 2 changed files with 206 additions and 1 deletion.
110 changes: 110 additions & 0 deletions python/paddle/fluid/tests/unittests/test_mse_loss.py
Original file line number Diff line number Diff line change
Expand Up @@ -69,6 +69,7 @@ def test_NNMseLoss_mean(self):
for dim in [[10, 10], [2, 10, 10], [3, 3, 10, 10]]:
input_np = np.random.uniform(0.1, 0.5, dim).astype("float32")
label_np = np.random.uniform(0.1, 0.5, dim).astype("float32")
paddle.enable_static()
prog = fluid.Program()
startup_prog = fluid.Program()
place = fluid.CUDAPlace(0) if fluid.core.is_compiled_with_cuda(
Expand Down Expand Up @@ -106,6 +107,7 @@ def test_NNMseLoss_sum(self):
for dim in [[10, 10], [2, 10, 10], [3, 3, 10, 10]]:
input_np = np.random.uniform(0.1, 0.5, dim).astype("float32")
label_np = np.random.uniform(0.1, 0.5, dim).astype("float32")
paddle.enable_static()
prog = fluid.Program()
startup_prog = fluid.Program()
place = fluid.CUDAPlace(0) if fluid.core.is_compiled_with_cuda(
Expand Down Expand Up @@ -143,6 +145,7 @@ def test_NNMseLoss_none(self):
for dim in [[10, 10], [2, 10, 10], [3, 3, 10, 10]]:
input_np = np.random.uniform(0.1, 0.5, dim).astype("float32")
label_np = np.random.uniform(0.1, 0.5, dim).astype("float32")
paddle.enable_static()
prog = fluid.Program()
startup_prog = fluid.Program()
place = fluid.CUDAPlace(0) if fluid.core.is_compiled_with_cuda(
Expand Down Expand Up @@ -177,5 +180,112 @@ def test_NNMseLoss_none(self):
self.assertTrue(dy_result.shape, [1])


class TestNNFunctionalMseLoss(unittest.TestCase):
def test_NNFunctionalMseLoss_mean(self):
for dim in [[10, 10], [2, 10, 10], [3, 3, 10, 10]]:
input_np = np.random.uniform(0.1, 0.5, dim).astype("float32")
target_np = np.random.uniform(0.1, 0.5, dim).astype("float32")
paddle.enable_static()
prog = paddle.static.Program()
startup_prog = paddle.static.Program()
place = paddle.CUDAPlace(0) if core.is_compiled_with_cuda(
) else paddle.CPUPlace()
with paddle.static.program_guard(prog, startup_prog):
input = paddle.data(name='input', shape=dim, dtype='float32')
target = paddle.data(name='target', shape=dim, dtype='float32')
mse_loss = paddle.nn.functional.mse_loss(input, target, 'mean')

exe = paddle.static.Executor(place)
exe.run(startup_prog)
static_result = exe.run(
prog,
feed={"input": input_np,
"target": target_np},
fetch_list=[mse_loss])

paddle.disable_static()
dy_ret = paddle.nn.functional.mse_loss(
paddle.to_variable(input_np),
paddle.to_variable(target_np), 'mean')
dy_result = dy_ret.numpy()

sub = input_np - target_np
expected = np.mean(sub * sub)
self.assertTrue(np.allclose(static_result, expected))
self.assertTrue(np.allclose(static_result, dy_result))
self.assertTrue(np.allclose(dy_result, expected))
self.assertTrue(dy_result.shape, [1])

def test_NNFunctionalMseLoss_sum(self):
for dim in [[10, 10], [2, 10, 10], [3, 3, 10, 10]]:
input_np = np.random.uniform(0.1, 0.5, dim).astype("float32")
target_np = np.random.uniform(0.1, 0.5, dim).astype("float32")
paddle.enable_static()
prog = paddle.static.Program()
startup_prog = paddle.static.Program()
place = paddle.CUDAPlace(0) if core.is_compiled_with_cuda(
) else paddle.CPUPlace()
with paddle.static.program_guard(prog, startup_prog):
input = paddle.data(name='input', shape=dim, dtype='float32')
target = paddle.data(name='target', shape=dim, dtype='float32')
mse_loss = paddle.nn.functional.mse_loss(input, target, 'sum')

exe = paddle.static.Executor(place)
exe.run(startup_prog)
static_result = exe.run(
prog,
feed={"input": input_np,
"target": target_np},
fetch_list=[mse_loss])

paddle.disable_static()
dy_ret = paddle.nn.functional.mse_loss(
paddle.to_variable(input_np),
paddle.to_variable(target_np), 'sum')
dy_result = dy_ret.numpy()

sub = input_np - target_np
expected = np.sum(sub * sub)
self.assertTrue(np.allclose(static_result, expected))
self.assertTrue(np.allclose(static_result, dy_result))
self.assertTrue(np.allclose(dy_result, expected))
self.assertTrue(dy_result.shape, [1])

def test_NNFunctionalMseLoss_none(self):
for dim in [[10, 10], [2, 10, 10], [3, 3, 10, 10]]:
input_np = np.random.uniform(0.1, 0.5, dim).astype("float32")
target_np = np.random.uniform(0.1, 0.5, dim).astype("float32")
paddle.enable_static()
prog = paddle.static.Program()
startup_prog = paddle.static.Program()
place = paddle.CUDAPlace(0) if core.is_compiled_with_cuda(
) else paddle.CPUPlace()
with paddle.static.program_guard(prog, startup_prog):
input = paddle.data(name='input', shape=dim, dtype='float32')
target = paddle.data(name='target', shape=dim, dtype='float32')
mse_loss = paddle.nn.functional.mse_loss(input, target, 'none')

exe = paddle.static.Executor(place)
exe.run(startup_prog)
static_result = exe.run(
prog,
feed={"input": input_np,
"target": target_np},
fetch_list=[mse_loss])

paddle.disable_static()
dy_ret = paddle.nn.functional.mse_loss(
paddle.to_variable(input_np),
paddle.to_variable(target_np), 'none')
dy_result = dy_ret.numpy()

sub = input_np - target_np
expected = sub * sub
self.assertTrue(np.allclose(static_result, expected))
self.assertTrue(np.allclose(static_result, dy_result))
self.assertTrue(np.allclose(dy_result, expected))
self.assertTrue(dy_result.shape, [1])


if __name__ == "__main__":
unittest.main()
97 changes: 96 additions & 1 deletion python/paddle/nn/functional/loss.py
Original file line number Diff line number Diff line change
Expand Up @@ -12,6 +12,8 @@
# See the License for the specific language governing permissions and
# limitations under the License.

import paddle

# TODO: define loss functions of neural network
from ...fluid.layers import bpr_loss #DEFINE_ALIAS
from ...fluid.layers import center_loss #DEFINE_ALIAS
Expand All @@ -20,7 +22,6 @@
from ...fluid.layers import iou_similarity #DEFINE_ALIAS
from ...fluid.layers import kldiv_loss #DEFINE_ALIAS
from ...fluid.layers import log_loss #DEFINE_ALIAS
from ...fluid.layers import mse_loss #DEFINE_ALIAS
from ...fluid.layers import npair_loss #DEFINE_ALIAS
from ...fluid.layers import rank_loss #DEFINE_ALIAS
from ...fluid.layers import sigmoid_cross_entropy_with_logits #DEFINE_ALIAS
Expand Down Expand Up @@ -60,3 +61,97 @@
'ssd_loss',
'teacher_student_sigmoid_loss'
]


def mse_loss(input, label, reduction='mean'):
"""
This op accepts input predications and label and returns the mean square error.
If :attr:`reduction` is set to ``'none'``, loss is calculated as:
.. math::
Out = (input - label)^2
If :attr:`reduction` is set to ``'mean'``, loss is calculated as:
.. math::
Out = \operatorname{mean}((input - label)^2)
If :attr:`reduction` is set to ``'sum'``, loss is calculated as:
.. math::
Out = \operatorname{sum}((input - label)^2)
Parameters:
input (Tensor): Input tensor, the data type should be float32 or float64.
label (Tensor): Label tensor, the data type should be float32 or float64.
reduction (string, optional): The reduction method for the output,
could be 'none' | 'mean' | 'sum'.
If :attr:`reduction` is ``'mean'``, the reduced mean loss is returned.
If :attr:`reduction` is ``'sum'``, the reduced sum loss is returned.
If :attr:`reduction` is ``'none'``, the unreduced loss is returned.
Default is ``'mean'``.
Returns:
Tensor: The tensor tensor storing the mean square error difference of input and label.
Return type: Tensor.
Examples:
.. code-block:: python
import numpy as np
import paddle
# static graph mode
paddle.enable_static()
mse_loss = paddle.nn.loss.MSELoss()
input = paddle.data(name="input", shape=[1])
label = paddle.data(name="label", shape=[1])
place = paddle.CPUPlace()
input_data = np.array([1.5]).astype("float32")
label_data = np.array([1.7]).astype("float32")
output = mse_loss(input,label)
exe = paddle.static.Executor(place)
exe.run(paddle.static.default_startup_program())
output_data = exe.run(
paddle.static.default_main_program(),
feed={"input":input_data, "label":label_data},
fetch_list=[output],
return_numpy=True)
print(output_data)
# [array([0.04000002], dtype=float32)]
# dynamic graph mode
paddle.disable_static()
input = paddle.to_variable(input_data)
label = paddle.to_variable(label_data)
output = mse_loss(input, label)
print(output.numpy())
# [0.04000002]
"""

if reduction not in ['sum', 'mean', 'none']:
raise ValueError(
"'reduction' in 'mse_loss' should be 'sum', 'mean' or 'none', "
"but received {}.".format(reduction))

if not paddle.fluid.framework.in_dygraph_mode():
paddle.fluid.data_feeder.check_variable_and_dtype(
input, 'input', ['float32', 'float64'], 'mse_loss')
paddle.fluid.data_feeder.check_variable_and_dtype(
label, 'label', ['float32', 'float64'], 'mse_loss')
square_out = paddle.fluid.layers.square(
paddle.fluid.layers.elementwise_sub(input, label))

if reduction == 'none':
return square_out
reduce_op = 'reduce_mean'
if reduction == 'sum':
reduce_op = 'reduce_sum'

return getattr(paddle.fluid.layers, reduce_op)(square_out)

0 comments on commit 56a9aa7

Please sign in to comment.