From 1132b5a8ecf347aa2f4b9fcd7118a0298a062e30 Mon Sep 17 00:00:00 2001 From: yangguohao <1901212980@pku.edu.cn> Date: Thu, 28 Apr 2022 17:29:12 +0800 Subject: [PATCH 01/12] 2022-04-28 --- .../test_multi_label_soft_margin_loss.py | 190 ++++++++++++++++++ python/paddle/nn/__init__.py | 2 + python/paddle/nn/functional/__init__.py | 2 + python/paddle/nn/functional/loss.py | 76 +++++++ python/paddle/nn/layer/__init__.py | 1 + python/paddle/nn/layer/loss.py | 66 ++++++ 6 files changed, 337 insertions(+) create mode 100644 python/paddle/fluid/tests/unittests/test_multi_label_soft_margin_loss.py diff --git a/python/paddle/fluid/tests/unittests/test_multi_label_soft_margin_loss.py b/python/paddle/fluid/tests/unittests/test_multi_label_soft_margin_loss.py new file mode 100644 index 0000000000000..a74813976768d --- /dev/null +++ b/python/paddle/fluid/tests/unittests/test_multi_label_soft_margin_loss.py @@ -0,0 +1,190 @@ +import paddle +import numpy as np +import unittest + +def call_MultiLabelSoftMarginLoss_layer(input, + label, + weight=None, + reduction='mean',): + multilabel_margin_loss = paddle.nn.MultiLabelSoftMarginLoss(weight=weight,reduction=reduction) + res = multilabel_margin_loss(input=input,label=label,) + return res + + +def call_MultiLabelSoftMarginLoss_functional(input, + label, + weight=None, + reduction='mean',): + res = paddle.nn.functional.multi_label_soft_margin_loss( + input, + label, + reduction=reduction, + weight=weight,) + return res + + +def test_static(place, + input_np, + label_np, + weight_np = None, + reduction='mean', + functional=False): + paddle.enable_static() + prog = paddle.static.Program() + startup_prog = paddle.static.Program() + with paddle.static.program_guard(prog, startup_prog): + input = paddle.static.data( + name='input', shape=input_np.shape, dtype='float64') + label = paddle.static.data( + name='label', shape=label_np.shape, dtype='float64') + feed_dict = {"input": input_np, "label": label_np,} + weight = None + if weight_np is not None: + weight = paddle.static.data(name='weight', shape=weight_np.shape,dtype='float64') + feed_dict['weight']= weight_np + + if functional: + res = call_MultiLabelSoftMarginLoss_functional(input=input,label=label,weight=weight,reduction=reduction) + else: + res = call_MultiLabelSoftMarginLoss_layer(input=input,label=label,weight=weight,reduction=reduction) + + exe = paddle.static.Executor(place) + static_result = exe.run(prog, feed=feed_dict, fetch_list=[res]) + return static_result + +def test_dygraph(place, + input_np, + label_np, + weight = None, + reduction='mean', + functional=False): + with paddle.fluid.dygraph.base.guard(): + input = paddle.to_tensor(input_np) + label = paddle.to_tensor(label_np) + if weight is not None: + weight = paddle.to_tensor(weight) + + if functional: + dy_res = call_MultiLabelSoftMarginLoss_functional(input=input, label=label, weight=weight, reduction=reduction) + else: + dy_res = call_MultiLabelSoftMarginLoss_layer(input=input, label=label, weight=weight, reduction=reduction) + dy_result = dy_res.numpy() + return dy_result + + +def calc_multilabel_margin_loss(input, + label, + weight = None, + reduction = "mean",): + + def LogSigmoid(x): + return np.log(1/(1+np.exp(-x))) + + loss = -(label * LogSigmoid(input) + (1 - label) * LogSigmoid(-input)) + + if weight is not None: + loss = loss * weight + + + loss = loss.mean(axis=-1) # only return N loss values + + if reduction == "none": + return loss + elif reduction == "mean": + return np.mean(loss) + elif reduction == "sum": + return np.sum(loss) + + +class TestMultiLabelMarginLoss(unittest.TestCase): + def test_MultiLabelSoftMarginLoss(self): + input = np.random.uniform(0.1, 0.8, size=(20, 30)).astype(np.float64) + label = np.random.randint(0, 2, size=(20, 30)).astype(np.float64) + + places = ['cpu'] + if paddle.device.is_compiled_with_cuda(): + places.append('gpu') + reductions = ['sum', 'mean', 'none'] + for place in places: + for reduction in reductions: + expected = calc_multilabel_margin_loss(input=input, label=label, + reduction=reduction) + + dy_result = test_dygraph(place=place, + input_np=input, label_np=label, + reduction=reduction) + + static_result = test_static(place=place, + input_np=input, label_np=label, + reduction=reduction) + self.assertTrue(np.allclose(static_result, expected)) + self.assertTrue(np.allclose(static_result, dy_result)) + self.assertTrue(np.allclose(dy_result, expected)) + static_functional = test_static(place=place, + input_np=input, label_np=label, + reduction=reduction,functional=True) + dy_functional = test_dygraph(place=place, + input_np=input, label_np=label, + reduction=reduction,functional=True) + self.assertTrue(np.allclose(static_functional, expected)) + self.assertTrue(np.allclose(static_functional, dy_functional)) + self.assertTrue(np.allclose(dy_functional, expected)) + + def test_MultiLabelSoftMarginLoss_error(self): + paddle.disable_static() + self.assertRaises( + ValueError, + paddle.nn.MultiLabelSoftMarginLoss, + reduction="unsupport reduction") + input = paddle.to_tensor([[0.1, 0.3]], dtype='float32') + label = paddle.to_tensor([[0.0, 1.0]], dtype='float32') + self.assertRaises( + ValueError, + paddle.nn.functional.multi_label_soft_margin_loss, + input=input, + label = label, + reduction="unsupport reduction") + paddle.enable_static() + + def test_MultiLabelSoftMarginLoss_weights(self): + input = np.random.uniform(0.1, 0.8, size=(20, 30)).astype(np.float64) + label = np.random.randint(0, 2, size=(20, 30)).astype(np.float64) + weight = np.random.randint(0, 2, size=(20, 30)).astype(np.float64) + place='cpu' + reduction = 'mean' + expected = calc_multilabel_margin_loss(input=input, label=label,weight=weight, + reduction=reduction) + + dy_result = test_dygraph(place=place, + input_np=input, label_np=label,weight=weight, + reduction=reduction) + + static_result = test_static(place=place, + input_np=input, label_np=label,weight_np=weight, + reduction=reduction) + self.assertTrue(np.allclose(static_result, expected)) + self.assertTrue(np.allclose(static_result, dy_result)) + self.assertTrue(np.allclose(dy_result, expected)) + static_functional = test_static(place=place, + input_np=input, label_np=label,weight_np=weight, + reduction=reduction, functional=True) + dy_functional = test_dygraph(place=place, + input_np=input, label_np=label,weight=weight, + reduction=reduction, functional=True) + self.assertTrue(np.allclose(static_functional, expected)) + self.assertTrue(np.allclose(static_functional, dy_functional)) + self.assertTrue(np.allclose(dy_functional, expected)) + def test_MultiLabelSoftMarginLoss_dimension(self): + paddle.disable_static() + + input = paddle.to_tensor([[0.1, 0.3], [1, 2]], dtype='float32') + label = paddle.to_tensor([[0.2, 0.1]], dtype='float32') + self.assertRaises( + ValueError, + paddle.nn.functional.multi_label_soft_margin_loss, + input=input, + label=label) + paddle.enable_static() + +if __name__ == "__main__": + unittest.main() \ No newline at end of file diff --git a/python/paddle/nn/__init__.py b/python/paddle/nn/__init__.py index bceee4b964a33..dded115e2e740 100644 --- a/python/paddle/nn/__init__.py +++ b/python/paddle/nn/__init__.py @@ -106,6 +106,7 @@ from .layer.loss import CTCLoss # noqa: F401 from .layer.loss import SmoothL1Loss # noqa: F401 from .layer.loss import HingeEmbeddingLoss # noqa: F401 +from .layer.loss import MultiLabelSoftMarginLoss from .layer.norm import BatchNorm # noqa: F401 from .layer.norm import SyncBatchNorm # noqa: F401 from .layer.norm import GroupNorm # noqa: F401 @@ -313,4 +314,5 @@ def weight_norm(*args): 'MaxUnPool3D', 'HingeEmbeddingLoss', 'Identity', + 'MultiLabelSoftMarginLoss', ] diff --git a/python/paddle/nn/functional/__init__.py b/python/paddle/nn/functional/__init__.py index 68213d831c550..bd6f8503d5e79 100644 --- a/python/paddle/nn/functional/__init__.py +++ b/python/paddle/nn/functional/__init__.py @@ -89,6 +89,7 @@ from .loss import square_error_cost # noqa: F401 from .loss import ctc_loss # noqa: F401 from .loss import hinge_embedding_loss # noqa: F401 +from .loss import multi_label_soft_margin_loss from .norm import batch_norm # noqa: F401 from .norm import instance_norm # noqa: F401 from .norm import layer_norm # noqa: F401 @@ -228,4 +229,5 @@ 'class_center_sample', 'sparse_attention', 'fold', + 'multi_label_soft_margin_loss', ] diff --git a/python/paddle/nn/functional/loss.py b/python/paddle/nn/functional/loss.py index ca3ac1772829d..4642d79580f6a 100755 --- a/python/paddle/nn/functional/loss.py +++ b/python/paddle/nn/functional/loss.py @@ -2225,3 +2225,79 @@ def hinge_embedding_loss(input, label, margin=1.0, reduction='mean', name=None): return paddle.sum(loss, name=name) elif reduction == 'none': return loss + +def multi_label_soft_margin_loss( + input, + label, + weight = None, + reduction = "mean", + name = None): + r""" + Parameters: + input (Tensor): Input tensor, the data type is float32 or float64. Shape is (N, C), where C is number of classes, and if shape is more than 2D, this is (N, C, D1, D2,..., Dk), k >= 1. + label (Tensor): Label tensor, the data type is float32 or float64. The shape of label is the same as the shape of input. + weight (Tensor,optional): a manual rescaling weight given to each class. + If given, has to be a Tensor of size C and the data type is float32, float64. + Default is ``'None'`` . + reduction (str, optional): Indicate how to average the loss by batch_size, + the candicates are ``'none'`` | ``'mean'`` | ``'sum'``. + If :attr:`reduction` is ``'none'``, the unreduced loss is returned; + If :attr:`reduction` is ``'mean'``, the reduced mean loss is returned; + If :attr:`reduction` is ``'sum'``, the summed loss is returned. + Default: ``'mean'`` + name (str, optional): Name for the operation (optional, default is None). + For more information, please refer to :ref:`api_guide_Name`. + Shape: + input: N-D Tensor, the shape is [N, \*], N is batch size and `\*` means number of classes, available dtype is float32, float64. The sum operationoperates over all the elements. + label: N-D Tensor, same shape as the input. + weight:N-D Tensor, the shape is [N,1] + output: scalar. If :attr:`reduction` is ``'none'``, then same shape as the input. + Returns: + Tensor, The tensor variable storing the multi_label_soft_margin_loss of input and label. + Examples: + .. code-block:: python + import paddle + import paddle.nn.functional as F + input = paddle.to_tensor([[1, -2, 3], [0, -1, 2], [1, 0, 1]], dtype=paddle.float32) + # label elements in {1., -1.} + label = paddle.to_tensor([[-1, 1, -1], [1, 1, 1], [1, -1, 1]], dtype=paddle.float32) + loss = F.multi_label_soft_margin_loss(input, label, reduction='none') + print(loss) + # Tensor([3.49625897, 0.71111226, 0.43989015]) + loss = F.multi_label_soft_margin_loss(input, label, reduction='mean') + print(loss) + # Tensor([1.54908717]) + """ + if reduction not in ['sum', 'mean', 'none']: + raise ValueError( + "'reduction' in 'multi_label_soft_margin_loss' should be 'sum', 'mean' or 'none', " + "but received {}.".format(reduction)) + + if not(input.shape==label.shape): + raise ValueError( + "The input and label should have same dimension," + "but received {}!={}".format(input.shape,label.shape) + ) + + if not _non_static_mode(): + check_variable_and_dtype(input, 'input', ['float32', 'float64'], + 'multilabel_soft_margin_loss') + check_variable_and_dtype(label, 'label', ['float32', 'float64'], + 'multilabel_soft_margin_loss') + + loss = -(label * paddle.nn.functional.log_sigmoid(input) + (1 - label) * paddle.nn.functional.log_sigmoid(-input)) + + if weight is not None: + if not _non_static_mode(): + check_variable_and_dtype(weight,'weight',['float32','float64'], + 'multilabel_soft_margin_loss') + loss = loss * weight + + loss = loss.mean(axis=-1) # only return N loss values + + if reduction == "none": + return loss + elif reduction == "mean": + return paddle.mean(loss) + elif reduction == "sum": + return paddle.sum(loss) diff --git a/python/paddle/nn/layer/__init__.py b/python/paddle/nn/layer/__init__.py index 31364f0281c8a..239989b44e121 100644 --- a/python/paddle/nn/layer/__init__.py +++ b/python/paddle/nn/layer/__init__.py @@ -78,6 +78,7 @@ from .loss import CTCLoss # noqa: F401 from .loss import SmoothL1Loss # noqa: F401 from .loss import HingeEmbeddingLoss # noqa: F401 +from .loss import MultiLabelSoftMarginLoss from .norm import BatchNorm1D # noqa: F401 from .norm import BatchNorm2D # noqa: F401 from .norm import BatchNorm3D # noqa: F401 diff --git a/python/paddle/nn/layer/loss.py b/python/paddle/nn/layer/loss.py index d4e059b6dfa49..d710df8414aa7 100644 --- a/python/paddle/nn/layer/loss.py +++ b/python/paddle/nn/layer/loss.py @@ -1302,3 +1302,69 @@ def forward(self, input, label): reduction=self.reduction, margin=self.margin, name=self.name) + + +class MultiLabelSoftMarginLoss(Layer): + r"""Creates a criterion that optimizes a multi-class multi-classification + hinge loss (margin-based loss) between input :math:`x` (a 2D mini-batch `Tensor`) + and output :math:`y` (which is a 2D `Tensor` of target class indices). + For each sample in the mini-batch: + .. math:: + \text{loss}(x, y) = \sum_{ij}\frac{\max(0, 1 - (x[y[j]] - x[i]))}{\text{x.size}(0)} + where :math:`x \in \left\{0, \; \cdots , \; \text{x.size}(0) - 1\right\}`, \ + :math:`y \in \left\{0, \; \cdots , \; \text{y.size}(0) - 1\right\}`, \ + :math:`0 \leq y[j] \leq \text{x.size}(0)-1`, \ + and :math:`i \neq y[j]` for all :math:`i` and :math:`j`. + :math:`y` and :math:`x` must have the same size. + Parameters: + reduction (str, optional): Indicate how to average the loss by batch_size, + the candicates are ``'none'`` | ``'mean'`` | ``'sum'``. + If :attr:`reduction` is ``'none'``, the unreduced loss is returned; + If :attr:`reduction` is ``'mean'``, the reduced mean loss is returned; + If :attr:`reduction` is ``'sum'``, the summed loss is returned. + Default: ``'mean'`` + name (str, optional): Name for the operation (optional, default is None). + For more information, please refer to :ref:`api_guide_Name`. + Call parameters: + input (Tensor): Input tensor, the data type is float32 or float64. Shape is (N, C), where C is number of classes, and if shape is more than 2D, this is (N, C, D1, D2,..., Dk), k >= 1. + label (Tensor): Label tensor containing 1 or -1, the data type is float32 or float64. The shape of label is the same as the shape of input. + Shape: + input: N-D Tensor, the shape is [N, \*], N is batch size and `\*` means number of classes, available dtype is float32, float64. The sum operationoperates over all the elements. + label: N-D Tensor, same shape as the input. + output: scalar. If :attr:`reduction` is ``'none'``, then same shape as the input. + Returns: + Tensor, The tensor variable storing the multi_label_soft_margin_loss of input and label. + Examples: + .. code-block:: python + import paddle + import paddle.nn as nn + input = paddle.to_tensor([[1, -2, 3], [0, -1, 2], [1, 0, 1]], dtype=paddle.float32) + # label elements in {1., -1.} + label = paddle.to_tensor([[-1, 1, -1], [1, 1, 1], [1, -1, 1]], dtype=paddle.float32) + multi_label_soft_margin_loss = nn.MultiLabelSoftMarginLoss(reduction='none') + loss = multi_label_soft_margin_loss(input, label) + print(loss) + # Tensor([3.49625897, 0.71111226, 0.43989015]) + multi_label_soft_margin_loss = nn.MultiLabelSoftMarginLoss(reduction='mean') + loss = multi_label_soft_margin_loss(input, label) + print(loss) + # Tensor([1.54908717]) + """ + def __init__(self, weight=None, reduction="mean", name=None): + super(MultiLabelSoftMarginLoss, self).__init__() + if reduction not in ['sum', 'mean', 'none']: + raise ValueError( + "'reduction' in 'MultiLabelSoftMarginloss' should be 'sum', 'mean' or 'none', " + "but received {}.".format(reduction)) + self.weight=weight + self.reduction = reduction + self.name = name + + def forward(self, input, label): + return F.multi_label_soft_margin_loss( + input, + label, + reduction=self.reduction, + weight=self.weight, + name=self.name) + From c3c12282e1116937b189e9faeaee8713181d94ac Mon Sep 17 00:00:00 2001 From: yangguohao <1901212980@pku.edu.cn> Date: Wed, 4 May 2022 14:53:58 +0800 Subject: [PATCH 02/12] 2022-05-04 --- python/paddle/nn/layer/loss.py | 10 +++++++++- 1 file changed, 9 insertions(+), 1 deletion(-) diff --git a/python/paddle/nn/layer/loss.py b/python/paddle/nn/layer/loss.py index d710df8414aa7..5fd108a644786 100644 --- a/python/paddle/nn/layer/loss.py +++ b/python/paddle/nn/layer/loss.py @@ -1316,6 +1316,7 @@ class MultiLabelSoftMarginLoss(Layer): :math:`0 \leq y[j] \leq \text{x.size}(0)-1`, \ and :math:`i \neq y[j]` for all :math:`i` and :math:`j`. :math:`y` and :math:`x` must have the same size. + Parameters: reduction (str, optional): Indicate how to average the loss by batch_size, the candicates are ``'none'`` | ``'mean'`` | ``'sum'``. @@ -1325,26 +1326,33 @@ class MultiLabelSoftMarginLoss(Layer): Default: ``'mean'`` name (str, optional): Name for the operation (optional, default is None). For more information, please refer to :ref:`api_guide_Name`. + Call parameters: input (Tensor): Input tensor, the data type is float32 or float64. Shape is (N, C), where C is number of classes, and if shape is more than 2D, this is (N, C, D1, D2,..., Dk), k >= 1. label (Tensor): Label tensor containing 1 or -1, the data type is float32 or float64. The shape of label is the same as the shape of input. + Shape: input: N-D Tensor, the shape is [N, \*], N is batch size and `\*` means number of classes, available dtype is float32, float64. The sum operationoperates over all the elements. label: N-D Tensor, same shape as the input. output: scalar. If :attr:`reduction` is ``'none'``, then same shape as the input. + Returns: Tensor, The tensor variable storing the multi_label_soft_margin_loss of input and label. + Examples: .. code-block:: python + import paddle import paddle.nn as nn + input = paddle.to_tensor([[1, -2, 3], [0, -1, 2], [1, 0, 1]], dtype=paddle.float32) - # label elements in {1., -1.} label = paddle.to_tensor([[-1, 1, -1], [1, 1, 1], [1, -1, 1]], dtype=paddle.float32) + multi_label_soft_margin_loss = nn.MultiLabelSoftMarginLoss(reduction='none') loss = multi_label_soft_margin_loss(input, label) print(loss) # Tensor([3.49625897, 0.71111226, 0.43989015]) + multi_label_soft_margin_loss = nn.MultiLabelSoftMarginLoss(reduction='mean') loss = multi_label_soft_margin_loss(input, label) print(loss) From 64577abb23ba6d2ac928588b05b2359aaa66d8c6 Mon Sep 17 00:00:00 2001 From: yangguohao <1901212980@pku.edu.cn> Date: Thu, 5 May 2022 11:36:40 +0800 Subject: [PATCH 03/12] 2022-05-05_V1 --- python/paddle/nn/layer/loss.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/python/paddle/nn/layer/loss.py b/python/paddle/nn/layer/loss.py index 5fd108a644786..f2946170cd46e 100644 --- a/python/paddle/nn/layer/loss.py +++ b/python/paddle/nn/layer/loss.py @@ -1347,7 +1347,7 @@ class MultiLabelSoftMarginLoss(Layer): input = paddle.to_tensor([[1, -2, 3], [0, -1, 2], [1, 0, 1]], dtype=paddle.float32) label = paddle.to_tensor([[-1, 1, -1], [1, 1, 1], [1, -1, 1]], dtype=paddle.float32) - + multi_label_soft_margin_loss = nn.MultiLabelSoftMarginLoss(reduction='none') loss = multi_label_soft_margin_loss(input, label) print(loss) From 94236c56e2a79bed5357d9ba70f0dde6595a92af Mon Sep 17 00:00:00 2001 From: yangguohao <1901212980@pku.edu.cn> Date: Thu, 5 May 2022 16:14:33 +0800 Subject: [PATCH 04/12] 2022-05-05_V1 --- .../unittests/test_multi_label_soft_margin_loss.py | 11 ++++++----- 1 file changed, 6 insertions(+), 5 deletions(-) diff --git a/python/paddle/fluid/tests/unittests/test_multi_label_soft_margin_loss.py b/python/paddle/fluid/tests/unittests/test_multi_label_soft_margin_loss.py index a74813976768d..6799918711352 100644 --- a/python/paddle/fluid/tests/unittests/test_multi_label_soft_margin_loss.py +++ b/python/paddle/fluid/tests/unittests/test_multi_label_soft_margin_loss.py @@ -98,8 +98,8 @@ def LogSigmoid(x): class TestMultiLabelMarginLoss(unittest.TestCase): def test_MultiLabelSoftMarginLoss(self): - input = np.random.uniform(0.1, 0.8, size=(20, 30)).astype(np.float64) - label = np.random.randint(0, 2, size=(20, 30)).astype(np.float64) + input = np.random.uniform(0.1, 0.8, size=(5, 5)).astype(np.float64) + label = np.random.randint(0, 2, size=(5, 5)).astype(np.float64) places = ['cpu'] if paddle.device.is_compiled_with_cuda(): @@ -147,9 +147,9 @@ def test_MultiLabelSoftMarginLoss_error(self): paddle.enable_static() def test_MultiLabelSoftMarginLoss_weights(self): - input = np.random.uniform(0.1, 0.8, size=(20, 30)).astype(np.float64) - label = np.random.randint(0, 2, size=(20, 30)).astype(np.float64) - weight = np.random.randint(0, 2, size=(20, 30)).astype(np.float64) + input = np.random.uniform(0.1, 0.8, size=(5, 5)).astype(np.float64) + label = np.random.randint(0, 2, size=(5, 5)).astype(np.float64) + weight = np.random.randint(0, 2, size=(5, 5)).astype(np.float64) place='cpu' reduction = 'mean' expected = calc_multilabel_margin_loss(input=input, label=label,weight=weight, @@ -174,6 +174,7 @@ def test_MultiLabelSoftMarginLoss_weights(self): self.assertTrue(np.allclose(static_functional, expected)) self.assertTrue(np.allclose(static_functional, dy_functional)) self.assertTrue(np.allclose(dy_functional, expected)) + def test_MultiLabelSoftMarginLoss_dimension(self): paddle.disable_static() From 3b59cfdf325265985613fc7a9a9c504483f83b5a Mon Sep 17 00:00:00 2001 From: yangguohao <70266361+yangguohao@users.noreply.github.com> Date: Mon, 9 May 2022 14:59:58 +0800 Subject: [PATCH 05/12] Update loss.py --- python/paddle/nn/functional/loss.py | 11 ++++++++--- 1 file changed, 8 insertions(+), 3 deletions(-) diff --git a/python/paddle/nn/functional/loss.py b/python/paddle/nn/functional/loss.py index 4642d79580f6a..4f2282b5245e8 100755 --- a/python/paddle/nn/functional/loss.py +++ b/python/paddle/nn/functional/loss.py @@ -2233,6 +2233,7 @@ def multi_label_soft_margin_loss( reduction = "mean", name = None): r""" + Parameters: input (Tensor): Input tensor, the data type is float32 or float64. Shape is (N, C), where C is number of classes, and if shape is more than 2D, this is (N, C, D1, D2,..., Dk), k >= 1. label (Tensor): Label tensor, the data type is float32 or float64. The shape of label is the same as the shape of input. @@ -2247,15 +2248,19 @@ def multi_label_soft_margin_loss( Default: ``'mean'`` name (str, optional): Name for the operation (optional, default is None). For more information, please refer to :ref:`api_guide_Name`. - Shape: + + Shape: input: N-D Tensor, the shape is [N, \*], N is batch size and `\*` means number of classes, available dtype is float32, float64. The sum operationoperates over all the elements. label: N-D Tensor, same shape as the input. weight:N-D Tensor, the shape is [N,1] output: scalar. If :attr:`reduction` is ``'none'``, then same shape as the input. - Returns: + + Returns: Tensor, The tensor variable storing the multi_label_soft_margin_loss of input and label. - Examples: + + Examples: .. code-block:: python + import paddle import paddle.nn.functional as F input = paddle.to_tensor([[1, -2, 3], [0, -1, 2], [1, 0, 1]], dtype=paddle.float32) From 2d6cd548e61d137bab6bbebdfb9f94f3b5a3cc15 Mon Sep 17 00:00:00 2001 From: yangguohao <70266361+yangguohao@users.noreply.github.com> Date: Mon, 9 May 2022 15:12:04 +0800 Subject: [PATCH 06/12] Update loss.py --- python/paddle/nn/layer/loss.py | 9 +++++++-- 1 file changed, 7 insertions(+), 2 deletions(-) diff --git a/python/paddle/nn/layer/loss.py b/python/paddle/nn/layer/loss.py index f2946170cd46e..26d3102cbac09 100644 --- a/python/paddle/nn/layer/loss.py +++ b/python/paddle/nn/layer/loss.py @@ -1309,8 +1309,10 @@ class MultiLabelSoftMarginLoss(Layer): hinge loss (margin-based loss) between input :math:`x` (a 2D mini-batch `Tensor`) and output :math:`y` (which is a 2D `Tensor` of target class indices). For each sample in the mini-batch: + .. math:: \text{loss}(x, y) = \sum_{ij}\frac{\max(0, 1 - (x[y[j]] - x[i]))}{\text{x.size}(0)} + where :math:`x \in \left\{0, \; \cdots , \; \text{x.size}(0) - 1\right\}`, \ :math:`y \in \left\{0, \; \cdots , \; \text{y.size}(0) - 1\right\}`, \ :math:`0 \leq y[j] \leq \text{x.size}(0)-1`, \ @@ -1318,6 +1320,9 @@ class MultiLabelSoftMarginLoss(Layer): :math:`y` and :math:`x` must have the same size. Parameters: + weight (Tensor,optional): a manual rescaling weight given to each class. + If given, has to be a Tensor of size C and the data type is float32, float64. + Default is ``'None'`` . reduction (str, optional): Indicate how to average the loss by batch_size, the candicates are ``'none'`` | ``'mean'`` | ``'sum'``. If :attr:`reduction` is ``'none'``, the unreduced loss is returned; @@ -1337,7 +1342,7 @@ class MultiLabelSoftMarginLoss(Layer): output: scalar. If :attr:`reduction` is ``'none'``, then same shape as the input. Returns: - Tensor, The tensor variable storing the multi_label_soft_margin_loss of input and label. + A callable object of MultiLabelSoftMarginLoss. Examples: .. code-block:: python @@ -1372,7 +1377,7 @@ def forward(self, input, label): return F.multi_label_soft_margin_loss( input, label, + weight=self.weight, reduction=self.reduction, - weight=self.weight, name=self.name) From 1ae87b0bd055e53774a76b727c200d7ebbfedd9a Mon Sep 17 00:00:00 2001 From: yangguohao <1901212980@pku.edu.cn> Date: Wed, 1 Jun 2022 22:43:39 +0800 Subject: [PATCH 07/12] 2022-06-01_hook --- python/paddle/nn/layer/loss.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/python/paddle/nn/layer/loss.py b/python/paddle/nn/layer/loss.py index 124b2c0c8f726..31f928e87055d 100644 --- a/python/paddle/nn/layer/loss.py +++ b/python/paddle/nn/layer/loss.py @@ -1367,13 +1367,14 @@ class MultiLabelSoftMarginLoss(Layer): print(loss) # Tensor([1.54908717]) """ + def __init__(self, weight=None, reduction="mean", name=None): super(MultiLabelSoftMarginLoss, self).__init__() if reduction not in ['sum', 'mean', 'none']: raise ValueError( "'reduction' in 'MultiLabelSoftMarginloss' should be 'sum', 'mean' or 'none', " "but received {}.".format(reduction)) - self.weight=weight + self.weight = weight self.reduction = reduction self.name = name @@ -1381,7 +1382,6 @@ def forward(self, input, label): return F.multi_label_soft_margin_loss( input, label, - weight=self.weight, + weight=self.weight, reduction=self.reduction, name=self.name) - From 8196f514324e87d96b6437706c6831a2dd443c1a Mon Sep 17 00:00:00 2001 From: yangguohao <1901212980@pku.edu.cn> Date: Sun, 5 Jun 2022 18:41:09 +0800 Subject: [PATCH 08/12] 2022-06-05 --- .../test_multi_label_soft_margin_loss.py | 193 +++++++++++------- 1 file changed, 123 insertions(+), 70 deletions(-) diff --git a/python/paddle/fluid/tests/unittests/test_multi_label_soft_margin_loss.py b/python/paddle/fluid/tests/unittests/test_multi_label_soft_margin_loss.py index 6799918711352..5e8a8fa189a1f 100644 --- a/python/paddle/fluid/tests/unittests/test_multi_label_soft_margin_loss.py +++ b/python/paddle/fluid/tests/unittests/test_multi_label_soft_margin_loss.py @@ -1,32 +1,52 @@ +# Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + import paddle import numpy as np import unittest -def call_MultiLabelSoftMarginLoss_layer(input, - label, - weight=None, - reduction='mean',): - multilabel_margin_loss = paddle.nn.MultiLabelSoftMarginLoss(weight=weight,reduction=reduction) - res = multilabel_margin_loss(input=input,label=label,) + +def call_MultiLabelSoftMarginLoss_layer( + input, + label, + weight=None, + reduction='mean', ): + multilabel_margin_loss = paddle.nn.MultiLabelSoftMarginLoss( + weight=weight, reduction=reduction) + res = multilabel_margin_loss( + input=input, + label=label, ) return res -def call_MultiLabelSoftMarginLoss_functional(input, - label, - weight=None, - reduction='mean',): +def call_MultiLabelSoftMarginLoss_functional( + input, + label, + weight=None, + reduction='mean', ): res = paddle.nn.functional.multi_label_soft_margin_loss( - input, - label, - reduction=reduction, - weight=weight,) + input, + label, + reduction=reduction, + weight=weight, ) return res def test_static(place, input_np, label_np, - weight_np = None, + weight_np=None, reduction='mean', functional=False): paddle.enable_static() @@ -37,27 +57,34 @@ def test_static(place, name='input', shape=input_np.shape, dtype='float64') label = paddle.static.data( name='label', shape=label_np.shape, dtype='float64') - feed_dict = {"input": input_np, "label": label_np,} + feed_dict = { + "input": input_np, + "label": label_np, + } weight = None if weight_np is not None: - weight = paddle.static.data(name='weight', shape=weight_np.shape,dtype='float64') - feed_dict['weight']= weight_np + weight = paddle.static.data( + name='weight', shape=weight_np.shape, dtype='float64') + feed_dict['weight'] = weight_np if functional: - res = call_MultiLabelSoftMarginLoss_functional(input=input,label=label,weight=weight,reduction=reduction) + res = call_MultiLabelSoftMarginLoss_functional( + input=input, label=label, weight=weight, reduction=reduction) else: - res = call_MultiLabelSoftMarginLoss_layer(input=input,label=label,weight=weight,reduction=reduction) + res = call_MultiLabelSoftMarginLoss_layer( + input=input, label=label, weight=weight, reduction=reduction) exe = paddle.static.Executor(place) static_result = exe.run(prog, feed=feed_dict, fetch_list=[res]) return static_result + def test_dygraph(place, - input_np, - label_np, - weight = None, - reduction='mean', - functional=False): + input_np, + label_np, + weight=None, + reduction='mean', + functional=False): with paddle.fluid.dygraph.base.guard(): input = paddle.to_tensor(input_np) label = paddle.to_tensor(label_np) @@ -65,27 +92,28 @@ def test_dygraph(place, weight = paddle.to_tensor(weight) if functional: - dy_res = call_MultiLabelSoftMarginLoss_functional(input=input, label=label, weight=weight, reduction=reduction) + dy_res = call_MultiLabelSoftMarginLoss_functional( + input=input, label=label, weight=weight, reduction=reduction) else: - dy_res = call_MultiLabelSoftMarginLoss_layer(input=input, label=label, weight=weight, reduction=reduction) + dy_res = call_MultiLabelSoftMarginLoss_layer( + input=input, label=label, weight=weight, reduction=reduction) dy_result = dy_res.numpy() return dy_result -def calc_multilabel_margin_loss(input, - label, - weight = None, - reduction = "mean",): - +def calc_multilabel_margin_loss( + input, + label, + weight=None, + reduction="mean", ): def LogSigmoid(x): - return np.log(1/(1+np.exp(-x))) + return np.log(1 / (1 + np.exp(-x))) loss = -(label * LogSigmoid(input) + (1 - label) * LogSigmoid(-input)) if weight is not None: loss = loss * weight - loss = loss.mean(axis=-1) # only return N loss values if reduction == "none": @@ -107,25 +135,35 @@ def test_MultiLabelSoftMarginLoss(self): reductions = ['sum', 'mean', 'none'] for place in places: for reduction in reductions: - expected = calc_multilabel_margin_loss(input=input, label=label, - reduction=reduction) - - dy_result = test_dygraph(place=place, - input_np=input, label_np=label, - reduction=reduction) - - static_result = test_static(place=place, - input_np=input, label_np=label, - reduction=reduction) + expected = calc_multilabel_margin_loss( + input=input, label=label, reduction=reduction) + + dy_result = test_dygraph( + place=place, + input_np=input, + label_np=label, + reduction=reduction) + + static_result = test_static( + place=place, + input_np=input, + label_np=label, + reduction=reduction) self.assertTrue(np.allclose(static_result, expected)) self.assertTrue(np.allclose(static_result, dy_result)) self.assertTrue(np.allclose(dy_result, expected)) - static_functional = test_static(place=place, - input_np=input, label_np=label, - reduction=reduction,functional=True) - dy_functional = test_dygraph(place=place, - input_np=input, label_np=label, - reduction=reduction,functional=True) + static_functional = test_static( + place=place, + input_np=input, + label_np=label, + reduction=reduction, + functional=True) + dy_functional = test_dygraph( + place=place, + input_np=input, + label_np=label, + reduction=reduction, + functional=True) self.assertTrue(np.allclose(static_functional, expected)) self.assertTrue(np.allclose(static_functional, dy_functional)) self.assertTrue(np.allclose(dy_functional, expected)) @@ -142,7 +180,7 @@ def test_MultiLabelSoftMarginLoss_error(self): ValueError, paddle.nn.functional.multi_label_soft_margin_loss, input=input, - label = label, + label=label, reduction="unsupport reduction") paddle.enable_static() @@ -150,31 +188,45 @@ def test_MultiLabelSoftMarginLoss_weights(self): input = np.random.uniform(0.1, 0.8, size=(5, 5)).astype(np.float64) label = np.random.randint(0, 2, size=(5, 5)).astype(np.float64) weight = np.random.randint(0, 2, size=(5, 5)).astype(np.float64) - place='cpu' + place = 'cpu' reduction = 'mean' - expected = calc_multilabel_margin_loss(input=input, label=label,weight=weight, - reduction=reduction) - - dy_result = test_dygraph(place=place, - input_np=input, label_np=label,weight=weight, - reduction=reduction) - - static_result = test_static(place=place, - input_np=input, label_np=label,weight_np=weight, - reduction=reduction) + expected = calc_multilabel_margin_loss( + input=input, label=label, weight=weight, reduction=reduction) + + dy_result = test_dygraph( + place=place, + input_np=input, + label_np=label, + weight=weight, + reduction=reduction) + + static_result = test_static( + place=place, + input_np=input, + label_np=label, + weight_np=weight, + reduction=reduction) self.assertTrue(np.allclose(static_result, expected)) self.assertTrue(np.allclose(static_result, dy_result)) self.assertTrue(np.allclose(dy_result, expected)) - static_functional = test_static(place=place, - input_np=input, label_np=label,weight_np=weight, - reduction=reduction, functional=True) - dy_functional = test_dygraph(place=place, - input_np=input, label_np=label,weight=weight, - reduction=reduction, functional=True) + static_functional = test_static( + place=place, + input_np=input, + label_np=label, + weight_np=weight, + reduction=reduction, + functional=True) + dy_functional = test_dygraph( + place=place, + input_np=input, + label_np=label, + weight=weight, + reduction=reduction, + functional=True) self.assertTrue(np.allclose(static_functional, expected)) self.assertTrue(np.allclose(static_functional, dy_functional)) self.assertTrue(np.allclose(dy_functional, expected)) - + def test_MultiLabelSoftMarginLoss_dimension(self): paddle.disable_static() @@ -187,5 +239,6 @@ def test_MultiLabelSoftMarginLoss_dimension(self): label=label) paddle.enable_static() + if __name__ == "__main__": - unittest.main() \ No newline at end of file + unittest.main() From 46261ef6ac110a9ac4bba1289610b6a6c91af3d2 Mon Sep 17 00:00:00 2001 From: yangguohao <1901212980@pku.edu.cn> Date: Tue, 7 Jun 2022 00:44:19 +0800 Subject: [PATCH 09/12] 2022-06-07 --- .../test_multi_label_soft_margin_loss.py | 202 +++++++++--------- 1 file changed, 105 insertions(+), 97 deletions(-) diff --git a/python/paddle/fluid/tests/unittests/test_multi_label_soft_margin_loss.py b/python/paddle/fluid/tests/unittests/test_multi_label_soft_margin_loss.py index 5e8a8fa189a1f..1eae5eb97dbed 100644 --- a/python/paddle/fluid/tests/unittests/test_multi_label_soft_margin_loss.py +++ b/python/paddle/fluid/tests/unittests/test_multi_label_soft_margin_loss.py @@ -18,28 +18,32 @@ def call_MultiLabelSoftMarginLoss_layer( - input, - label, - weight=None, - reduction='mean', ): + input, + label, + weight=None, + reduction='mean', +): multilabel_margin_loss = paddle.nn.MultiLabelSoftMarginLoss( weight=weight, reduction=reduction) res = multilabel_margin_loss( input=input, - label=label, ) + label=label, + ) return res def call_MultiLabelSoftMarginLoss_functional( - input, - label, - weight=None, - reduction='mean', ): + input, + label, + weight=None, + reduction='mean', +): res = paddle.nn.functional.multi_label_soft_margin_loss( input, label, reduction=reduction, - weight=weight, ) + weight=weight, + ) return res @@ -53,26 +57,33 @@ def test_static(place, prog = paddle.static.Program() startup_prog = paddle.static.Program() with paddle.static.program_guard(prog, startup_prog): - input = paddle.static.data( - name='input', shape=input_np.shape, dtype='float64') - label = paddle.static.data( - name='label', shape=label_np.shape, dtype='float64') + input = paddle.static.data(name='input', + shape=input_np.shape, + dtype='float64') + label = paddle.static.data(name='label', + shape=label_np.shape, + dtype='float64') feed_dict = { "input": input_np, "label": label_np, } weight = None if weight_np is not None: - weight = paddle.static.data( - name='weight', shape=weight_np.shape, dtype='float64') + weight = paddle.static.data(name='weight', + shape=weight_np.shape, + dtype='float64') feed_dict['weight'] = weight_np if functional: - res = call_MultiLabelSoftMarginLoss_functional( - input=input, label=label, weight=weight, reduction=reduction) + res = call_MultiLabelSoftMarginLoss_functional(input=input, + label=label, + weight=weight, + reduction=reduction) else: - res = call_MultiLabelSoftMarginLoss_layer( - input=input, label=label, weight=weight, reduction=reduction) + res = call_MultiLabelSoftMarginLoss_layer(input=input, + label=label, + weight=weight, + reduction=reduction) exe = paddle.static.Executor(place) static_result = exe.run(prog, feed=feed_dict, fetch_list=[res]) @@ -95,17 +106,21 @@ def test_dygraph(place, dy_res = call_MultiLabelSoftMarginLoss_functional( input=input, label=label, weight=weight, reduction=reduction) else: - dy_res = call_MultiLabelSoftMarginLoss_layer( - input=input, label=label, weight=weight, reduction=reduction) + dy_res = call_MultiLabelSoftMarginLoss_layer(input=input, + label=label, + weight=weight, + reduction=reduction) dy_result = dy_res.numpy() return dy_result def calc_multilabel_margin_loss( - input, - label, - weight=None, - reduction="mean", ): + input, + label, + weight=None, + reduction="mean", +): + def LogSigmoid(x): return np.log(1 / (1 + np.exp(-x))) @@ -125,6 +140,7 @@ def LogSigmoid(x): class TestMultiLabelMarginLoss(unittest.TestCase): + def test_MultiLabelSoftMarginLoss(self): input = np.random.uniform(0.1, 0.8, size=(5, 5)).astype(np.float64) label = np.random.randint(0, 2, size=(5, 5)).astype(np.float64) @@ -135,53 +151,48 @@ def test_MultiLabelSoftMarginLoss(self): reductions = ['sum', 'mean', 'none'] for place in places: for reduction in reductions: - expected = calc_multilabel_margin_loss( - input=input, label=label, reduction=reduction) - - dy_result = test_dygraph( - place=place, - input_np=input, - label_np=label, - reduction=reduction) - - static_result = test_static( - place=place, - input_np=input, - label_np=label, - reduction=reduction) + expected = calc_multilabel_margin_loss(input=input, + label=label, + reduction=reduction) + + dy_result = test_dygraph(place=place, + input_np=input, + label_np=label, + reduction=reduction) + + static_result = test_static(place=place, + input_np=input, + label_np=label, + reduction=reduction) self.assertTrue(np.allclose(static_result, expected)) self.assertTrue(np.allclose(static_result, dy_result)) self.assertTrue(np.allclose(dy_result, expected)) - static_functional = test_static( - place=place, - input_np=input, - label_np=label, - reduction=reduction, - functional=True) - dy_functional = test_dygraph( - place=place, - input_np=input, - label_np=label, - reduction=reduction, - functional=True) + static_functional = test_static(place=place, + input_np=input, + label_np=label, + reduction=reduction, + functional=True) + dy_functional = test_dygraph(place=place, + input_np=input, + label_np=label, + reduction=reduction, + functional=True) self.assertTrue(np.allclose(static_functional, expected)) self.assertTrue(np.allclose(static_functional, dy_functional)) self.assertTrue(np.allclose(dy_functional, expected)) def test_MultiLabelSoftMarginLoss_error(self): paddle.disable_static() - self.assertRaises( - ValueError, - paddle.nn.MultiLabelSoftMarginLoss, - reduction="unsupport reduction") + self.assertRaises(ValueError, + paddle.nn.MultiLabelSoftMarginLoss, + reduction="unsupport reduction") input = paddle.to_tensor([[0.1, 0.3]], dtype='float32') label = paddle.to_tensor([[0.0, 1.0]], dtype='float32') - self.assertRaises( - ValueError, - paddle.nn.functional.multi_label_soft_margin_loss, - input=input, - label=label, - reduction="unsupport reduction") + self.assertRaises(ValueError, + paddle.nn.functional.multi_label_soft_margin_loss, + input=input, + label=label, + reduction="unsupport reduction") paddle.enable_static() def test_MultiLabelSoftMarginLoss_weights(self): @@ -190,39 +201,37 @@ def test_MultiLabelSoftMarginLoss_weights(self): weight = np.random.randint(0, 2, size=(5, 5)).astype(np.float64) place = 'cpu' reduction = 'mean' - expected = calc_multilabel_margin_loss( - input=input, label=label, weight=weight, reduction=reduction) - - dy_result = test_dygraph( - place=place, - input_np=input, - label_np=label, - weight=weight, - reduction=reduction) - - static_result = test_static( - place=place, - input_np=input, - label_np=label, - weight_np=weight, - reduction=reduction) + expected = calc_multilabel_margin_loss(input=input, + label=label, + weight=weight, + reduction=reduction) + + dy_result = test_dygraph(place=place, + input_np=input, + label_np=label, + weight=weight, + reduction=reduction) + + static_result = test_static(place=place, + input_np=input, + label_np=label, + weight_np=weight, + reduction=reduction) self.assertTrue(np.allclose(static_result, expected)) self.assertTrue(np.allclose(static_result, dy_result)) self.assertTrue(np.allclose(dy_result, expected)) - static_functional = test_static( - place=place, - input_np=input, - label_np=label, - weight_np=weight, - reduction=reduction, - functional=True) - dy_functional = test_dygraph( - place=place, - input_np=input, - label_np=label, - weight=weight, - reduction=reduction, - functional=True) + static_functional = test_static(place=place, + input_np=input, + label_np=label, + weight_np=weight, + reduction=reduction, + functional=True) + dy_functional = test_dygraph(place=place, + input_np=input, + label_np=label, + weight=weight, + reduction=reduction, + functional=True) self.assertTrue(np.allclose(static_functional, expected)) self.assertTrue(np.allclose(static_functional, dy_functional)) self.assertTrue(np.allclose(dy_functional, expected)) @@ -232,11 +241,10 @@ def test_MultiLabelSoftMarginLoss_dimension(self): input = paddle.to_tensor([[0.1, 0.3], [1, 2]], dtype='float32') label = paddle.to_tensor([[0.2, 0.1]], dtype='float32') - self.assertRaises( - ValueError, - paddle.nn.functional.multi_label_soft_margin_loss, - input=input, - label=label) + self.assertRaises(ValueError, + paddle.nn.functional.multi_label_soft_margin_loss, + input=input, + label=label) paddle.enable_static() From 016fa330c15bbf26222df23199b6f1ea8be54622 Mon Sep 17 00:00:00 2001 From: yangguohao <1901212980@pku.edu.cn> Date: Tue, 7 Jun 2022 21:08:37 +0800 Subject: [PATCH 10/12] 2022-06-07_V2 --- python/paddle/nn/__init__.py | 3 +-- python/paddle/nn/functional/__init__.py | 3 +-- 2 files changed, 2 insertions(+), 4 deletions(-) diff --git a/python/paddle/nn/__init__.py b/python/paddle/nn/__init__.py index 0cf1fc0d91baa..81b43ee1b2c19 100644 --- a/python/paddle/nn/__init__.py +++ b/python/paddle/nn/__init__.py @@ -191,8 +191,7 @@ def weight_norm(*args): return utils.weight_norm(*args) - -__all__ = [ #noqa +__all__ = [ # noqa 'BatchNorm', 'CELU', 'GroupNorm', diff --git a/python/paddle/nn/functional/__init__.py b/python/paddle/nn/functional/__init__.py index b83b5082f08f4..6684f2c66bfc7 100644 --- a/python/paddle/nn/functional/__init__.py +++ b/python/paddle/nn/functional/__init__.py @@ -125,8 +125,7 @@ from .sparse_attention import sparse_attention - -__all__ = [ #noqa +__all__ = [ # noqa 'celu', 'conv1d', 'conv1d_transpose', From 13292480e9e1aef4401582f60a2c9ad387969f5d Mon Sep 17 00:00:00 2001 From: yangguohao <1901212980@pku.edu.cn> Date: Tue, 7 Jun 2022 21:36:38 +0800 Subject: [PATCH 11/12] 2022-06-07_V2 --- python/paddle/nn/functional/loss.py | 31 ++++++++++++++--------------- python/paddle/nn/layer/loss.py | 13 ++++++------ 2 files changed, 21 insertions(+), 23 deletions(-) diff --git a/python/paddle/nn/functional/loss.py b/python/paddle/nn/functional/loss.py index 0c3b9a3440550..d37627cc7bf11 100755 --- a/python/paddle/nn/functional/loss.py +++ b/python/paddle/nn/functional/loss.py @@ -2764,12 +2764,12 @@ def hinge_embedding_loss(input, label, margin=1.0, reduction='mean', name=None): elif reduction == 'none': return loss -def multi_label_soft_margin_loss( - input, - label, - weight = None, - reduction = "mean", - name = None): + +def multi_label_soft_margin_loss(input, + label, + weight=None, + reduction="mean", + name=None): r""" Parameters: @@ -2816,24 +2816,23 @@ def multi_label_soft_margin_loss( "'reduction' in 'multi_label_soft_margin_loss' should be 'sum', 'mean' or 'none', " "but received {}.".format(reduction)) - if not(input.shape==label.shape): - raise ValueError( - "The input and label should have same dimension," - "but received {}!={}".format(input.shape,label.shape) - ) + if not (input.shape == label.shape): + raise ValueError("The input and label should have same dimension," + "but received {}!={}".format(input.shape, label.shape)) if not _non_static_mode(): check_variable_and_dtype(input, 'input', ['float32', 'float64'], - 'multilabel_soft_margin_loss') + 'multi_label_soft_margin_loss') check_variable_and_dtype(label, 'label', ['float32', 'float64'], - 'multilabel_soft_margin_loss') + 'multi_label_soft_margin_loss') - loss = -(label * paddle.nn.functional.log_sigmoid(input) + (1 - label) * paddle.nn.functional.log_sigmoid(-input)) + loss = -(label * paddle.nn.functional.log_sigmoid(input) + + (1 - label) * paddle.nn.functional.log_sigmoid(-input)) if weight is not None: if not _non_static_mode(): - check_variable_and_dtype(weight,'weight',['float32','float64'], - 'multilabel_soft_margin_loss') + check_variable_and_dtype(weight, 'weight', ['float32', 'float64'], + 'multi_label_soft_margin_loss') loss = loss * weight loss = loss.mean(axis=-1) # only return N loss values diff --git a/python/paddle/nn/layer/loss.py b/python/paddle/nn/layer/loss.py index 485d22a211231..b6ce6a0a5f46a 100644 --- a/python/paddle/nn/layer/loss.py +++ b/python/paddle/nn/layer/loss.py @@ -1375,16 +1375,15 @@ def __init__(self, weight=None, reduction="mean", name=None): super(MultiLabelSoftMarginLoss, self).__init__() if reduction not in ['sum', 'mean', 'none']: raise ValueError( - "'reduction' in 'MultiLabelSoftMarginloss' should be 'sum', 'mean' or 'none', " + "'reduction' in 'MultiLabelSoftMarginLoss' should be 'sum', 'mean' or 'none', " "but received {}.".format(reduction)) self.weight = weight self.reduction = reduction self.name = name def forward(self, input, label): - return F.multi_label_soft_margin_loss( - input, - label, - weight=self.weight, - reduction=self.reduction, - name=self.name) + return F.multi_label_soft_margin_loss(input, + label, + weight=self.weight, + reduction=self.reduction, + name=self.name) From ce13bcf2443f84e4cfcacc892c367040c5d80544 Mon Sep 17 00:00:00 2001 From: yangguohao <1901212980@pku.edu.cn> Date: Fri, 17 Jun 2022 10:16:35 +0800 Subject: [PATCH 12/12] 2022-06-17_codestyle --- python/paddle/nn/__init__.py | 4 +- python/paddle/nn/functional/__init__.py | 2 +- python/paddle/nn/functional/loss.py | 160 ++++++++++++------------ python/paddle/nn/layer/__init__.py | 2 +- python/paddle/nn/layer/loss.py | 156 +++++++++++------------ 5 files changed, 162 insertions(+), 162 deletions(-) diff --git a/python/paddle/nn/__init__.py b/python/paddle/nn/__init__.py index a9714a4451744..708cffd10b771 100644 --- a/python/paddle/nn/__init__.py +++ b/python/paddle/nn/__init__.py @@ -104,12 +104,12 @@ from .layer.loss import BCELoss # noqa: F401 from .layer.loss import KLDivLoss # noqa: F401 from .layer.loss import MarginRankingLoss # noqa: F401 +from .layer.loss import MultiLabelSoftMarginLoss from .layer.loss import CTCLoss # noqa: F401 from .layer.loss import SmoothL1Loss # noqa: F401 from .layer.loss import HingeEmbeddingLoss # noqa: F401 from .layer.loss import CosineEmbeddingLoss # noqa: F401 from .layer.loss import TripletMarginWithDistanceLoss -from .layer.loss import MultiLabelSoftMarginLoss from .layer.norm import BatchNorm # noqa: F401 from .layer.norm import SyncBatchNorm # noqa: F401 from .layer.norm import GroupNorm # noqa: F401 @@ -312,10 +312,10 @@ def weight_norm(*args): 'MaxUnPool1D', 'MaxUnPool2D', 'MaxUnPool3D', + 'MultiLabelSoftMarginLoss', 'HingeEmbeddingLoss', 'Identity', 'CosineEmbeddingLoss', 'RReLU', - 'MultiLabelSoftMarginLoss', 'TripletMarginWithDistanceLoss', ] diff --git a/python/paddle/nn/functional/__init__.py b/python/paddle/nn/functional/__init__.py index c469984415c79..2192abbd32d22 100644 --- a/python/paddle/nn/functional/__init__.py +++ b/python/paddle/nn/functional/__init__.py @@ -206,6 +206,7 @@ 'log_loss', 'mse_loss', 'margin_ranking_loss', + 'multi_label_soft_margin_loss', 'nll_loss', 'npair_loss', 'sigmoid_focal_loss', @@ -235,5 +236,4 @@ 'cosine_embedding_loss', 'rrelu', 'triplet_margin_with_distance_loss', - 'multi_label_soft_margin_loss', ] diff --git a/python/paddle/nn/functional/loss.py b/python/paddle/nn/functional/loss.py index 7f06d5965d61d..79cb9c236ddbf 100755 --- a/python/paddle/nn/functional/loss.py +++ b/python/paddle/nn/functional/loss.py @@ -2668,6 +2668,86 @@ def sigmoid_focal_loss(logit, return loss +def multi_label_soft_margin_loss(input, + label, + weight=None, + reduction="mean", + name=None): + r""" + + Parameters: + input (Tensor): Input tensor, the data type is float32 or float64. Shape is (N, C), where C is number of classes, and if shape is more than 2D, this is (N, C, D1, D2,..., Dk), k >= 1. + label (Tensor): Label tensor, the data type is float32 or float64. The shape of label is the same as the shape of input. + weight (Tensor,optional): a manual rescaling weight given to each class. + If given, has to be a Tensor of size C and the data type is float32, float64. + Default is ``'None'`` . + reduction (str, optional): Indicate how to average the loss by batch_size, + the candicates are ``'none'`` | ``'mean'`` | ``'sum'``. + If :attr:`reduction` is ``'none'``, the unreduced loss is returned; + If :attr:`reduction` is ``'mean'``, the reduced mean loss is returned; + If :attr:`reduction` is ``'sum'``, the summed loss is returned. + Default: ``'mean'`` + name (str, optional): Name for the operation (optional, default is None). + For more information, please refer to :ref:`api_guide_Name`. + + Shape: + input: N-D Tensor, the shape is [N, \*], N is batch size and `\*` means number of classes, available dtype is float32, float64. The sum operationoperates over all the elements. + label: N-D Tensor, same shape as the input. + weight:N-D Tensor, the shape is [N,1] + output: scalar. If :attr:`reduction` is ``'none'``, then same shape as the input. + + Returns: + Tensor, The tensor variable storing the multi_label_soft_margin_loss of input and label. + + Examples: + .. code-block:: python + + import paddle + import paddle.nn.functional as F + input = paddle.to_tensor([[1, -2, 3], [0, -1, 2], [1, 0, 1]], dtype=paddle.float32) + # label elements in {1., -1.} + label = paddle.to_tensor([[-1, 1, -1], [1, 1, 1], [1, -1, 1]], dtype=paddle.float32) + loss = F.multi_label_soft_margin_loss(input, label, reduction='none') + print(loss) + # Tensor([3.49625897, 0.71111226, 0.43989015]) + loss = F.multi_label_soft_margin_loss(input, label, reduction='mean') + print(loss) + # Tensor([1.54908717]) + """ + if reduction not in ['sum', 'mean', 'none']: + raise ValueError( + "'reduction' in 'multi_label_soft_margin_loss' should be 'sum', 'mean' or 'none', " + "but received {}.".format(reduction)) + + if not (input.shape == label.shape): + raise ValueError("The input and label should have same dimension," + "but received {}!={}".format(input.shape, label.shape)) + + if not _non_static_mode(): + check_variable_and_dtype(input, 'input', ['float32', 'float64'], + 'multilabel_soft_margin_loss') + check_variable_and_dtype(label, 'label', ['float32', 'float64'], + 'multilabel_soft_margin_loss') + + loss = -(label * paddle.nn.functional.log_sigmoid(input) + + (1 - label) * paddle.nn.functional.log_sigmoid(-input)) + + if weight is not None: + if not _non_static_mode(): + check_variable_and_dtype(weight, 'weight', ['float32', 'float64'], + 'multilabel_soft_margin_loss') + loss = loss * weight + + loss = loss.mean(axis=-1) # only return N loss values + + if reduction == "none": + return loss + elif reduction == "mean": + return paddle.mean(loss) + elif reduction == "sum": + return paddle.sum(loss) + + def hinge_embedding_loss(input, label, margin=1.0, reduction='mean', name=None): r""" This operator calculates hinge_embedding_loss. Measures the loss given an input tensor :math:`x` and a labels tensor :math:`y`(containing 1 or -1). @@ -2999,83 +3079,3 @@ def triplet_margin_with_distance_loss(input, return paddle.sum(loss, name=name) elif reduction == 'none': return loss - - -def multi_label_soft_margin_loss(input, - label, - weight=None, - reduction="mean", - name=None): - r""" - - Parameters: - input (Tensor): Input tensor, the data type is float32 or float64. Shape is (N, C), where C is number of classes, and if shape is more than 2D, this is (N, C, D1, D2,..., Dk), k >= 1. - label (Tensor): Label tensor, the data type is float32 or float64. The shape of label is the same as the shape of input. - weight (Tensor,optional): a manual rescaling weight given to each class. - If given, has to be a Tensor of size C and the data type is float32, float64. - Default is ``'None'`` . - reduction (str, optional): Indicate how to average the loss by batch_size, - the candicates are ``'none'`` | ``'mean'`` | ``'sum'``. - If :attr:`reduction` is ``'none'``, the unreduced loss is returned; - If :attr:`reduction` is ``'mean'``, the reduced mean loss is returned; - If :attr:`reduction` is ``'sum'``, the summed loss is returned. - Default: ``'mean'`` - name (str, optional): Name for the operation (optional, default is None). - For more information, please refer to :ref:`api_guide_Name`. - - Shape: - input: N-D Tensor, the shape is [N, \*], N is batch size and `\*` means number of classes, available dtype is float32, float64. The sum operationoperates over all the elements. - label: N-D Tensor, same shape as the input. - weight:N-D Tensor, the shape is [N,1] - output: scalar. If :attr:`reduction` is ``'none'``, then same shape as the input. - - Returns: - Tensor, The tensor variable storing the multi_label_soft_margin_loss of input and label. - - Examples: - .. code-block:: python - - import paddle - import paddle.nn.functional as F - input = paddle.to_tensor([[1, -2, 3], [0, -1, 2], [1, 0, 1]], dtype=paddle.float32) - # label elements in {1., -1.} - label = paddle.to_tensor([[-1, 1, -1], [1, 1, 1], [1, -1, 1]], dtype=paddle.float32) - loss = F.multi_label_soft_margin_loss(input, label, reduction='none') - print(loss) - # Tensor([3.49625897, 0.71111226, 0.43989015]) - loss = F.multi_label_soft_margin_loss(input, label, reduction='mean') - print(loss) - # Tensor([1.54908717]) - """ - if reduction not in ['sum', 'mean', 'none']: - raise ValueError( - "'reduction' in 'multi_label_soft_margin_loss' should be 'sum', 'mean' or 'none', " - "but received {}.".format(reduction)) - - if not (input.shape == label.shape): - raise ValueError("The input and label should have same dimension," - "but received {}!={}".format(input.shape, label.shape)) - - if not _non_static_mode(): - check_variable_and_dtype(input, 'input', ['float32', 'float64'], - 'multi_label_soft_margin_loss') - check_variable_and_dtype(label, 'label', ['float32', 'float64'], - 'multi_label_soft_margin_loss') - - loss = -(label * paddle.nn.functional.log_sigmoid(input) + - (1 - label) * paddle.nn.functional.log_sigmoid(-input)) - - if weight is not None: - if not _non_static_mode(): - check_variable_and_dtype(weight, 'weight', ['float32', 'float64'], - 'multi_label_soft_margin_loss') - loss = loss * weight - - loss = loss.mean(axis=-1) # only return N loss values - - if reduction == "none": - return loss - elif reduction == "mean": - return paddle.mean(loss) - elif reduction == "sum": - return paddle.sum(loss) diff --git a/python/paddle/nn/layer/__init__.py b/python/paddle/nn/layer/__init__.py index 6c2c3dbed3679..b1890d14a9816 100644 --- a/python/paddle/nn/layer/__init__.py +++ b/python/paddle/nn/layer/__init__.py @@ -76,11 +76,11 @@ from .loss import BCELoss # noqa: F401 from .loss import KLDivLoss # noqa: F401 from .loss import MarginRankingLoss # noqa: F401 +from .loss import MultiLabelSoftMarginLoss from .loss import CTCLoss # noqa: F401 from .loss import SmoothL1Loss # noqa: F401 from .loss import HingeEmbeddingLoss # noqa: F401 from .loss import TripletMarginWithDistanceLoss -from .loss import MultiLabelSoftMarginLoss from .norm import BatchNorm1D # noqa: F401 from .norm import BatchNorm2D # noqa: F401 from .norm import BatchNorm3D # noqa: F401 diff --git a/python/paddle/nn/layer/loss.py b/python/paddle/nn/layer/loss.py index 471c88804bd13..6cc8dae75fc5b 100644 --- a/python/paddle/nn/layer/loss.py +++ b/python/paddle/nn/layer/loss.py @@ -1217,6 +1217,84 @@ def forward(self, input, label): name=self.name) +class MultiLabelSoftMarginLoss(Layer): + r"""Creates a criterion that optimizes a multi-class multi-classification + hinge loss (margin-based loss) between input :math:`x` (a 2D mini-batch `Tensor`) + and output :math:`y` (which is a 2D `Tensor` of target class indices). + For each sample in the mini-batch: + + .. math:: + \text{loss}(x, y) = \sum_{ij}\frac{\max(0, 1 - (x[y[j]] - x[i]))}{\text{x.size}(0)} + + where :math:`x \in \left\{0, \; \cdots , \; \text{x.size}(0) - 1\right\}`, \ + :math:`y \in \left\{0, \; \cdots , \; \text{y.size}(0) - 1\right\}`, \ + :math:`0 \leq y[j] \leq \text{x.size}(0)-1`, \ + and :math:`i \neq y[j]` for all :math:`i` and :math:`j`. + :math:`y` and :math:`x` must have the same size. + + Parameters: + weight (Tensor,optional): a manual rescaling weight given to each class. + If given, has to be a Tensor of size C and the data type is float32, float64. + Default is ``'None'`` . + reduction (str, optional): Indicate how to average the loss by batch_size, + the candicates are ``'none'`` | ``'mean'`` | ``'sum'``. + If :attr:`reduction` is ``'none'``, the unreduced loss is returned; + If :attr:`reduction` is ``'mean'``, the reduced mean loss is returned; + If :attr:`reduction` is ``'sum'``, the summed loss is returned. + Default: ``'mean'`` + name (str, optional): Name for the operation (optional, default is None). + For more information, please refer to :ref:`api_guide_Name`. + + Call parameters: + input (Tensor): Input tensor, the data type is float32 or float64. Shape is (N, C), where C is number of classes, and if shape is more than 2D, this is (N, C, D1, D2,..., Dk), k >= 1. + label (Tensor): Label tensor containing 1 or -1, the data type is float32 or float64. The shape of label is the same as the shape of input. + + Shape: + input: N-D Tensor, the shape is [N, \*], N is batch size and `\*` means number of classes, available dtype is float32, float64. The sum operationoperates over all the elements. + label: N-D Tensor, same shape as the input. + output: scalar. If :attr:`reduction` is ``'none'``, then same shape as the input. + + Returns: + A callable object of MultiLabelSoftMarginLoss. + + Examples: + .. code-block:: python + + import paddle + import paddle.nn as nn + + input = paddle.to_tensor([[1, -2, 3], [0, -1, 2], [1, 0, 1]], dtype=paddle.float32) + label = paddle.to_tensor([[-1, 1, -1], [1, 1, 1], [1, -1, 1]], dtype=paddle.float32) + + multi_label_soft_margin_loss = nn.MultiLabelSoftMarginLoss(reduction='none') + loss = multi_label_soft_margin_loss(input, label) + print(loss) + # Tensor([3.49625897, 0.71111226, 0.43989015]) + + multi_label_soft_margin_loss = nn.MultiLabelSoftMarginLoss(reduction='mean') + loss = multi_label_soft_margin_loss(input, label) + print(loss) + # Tensor([1.54908717]) + """ + + def __init__(self, weight=None, reduction="mean", name=None): + super(MultiLabelSoftMarginLoss, self).__init__() + if reduction not in ['sum', 'mean', 'none']: + raise ValueError( + "'reduction' in 'MultiLabelSoftMarginloss' should be 'sum', 'mean' or 'none', " + "but received {}.".format(reduction)) + self.weight = weight + self.reduction = reduction + self.name = name + + def forward(self, input, label): + return F.multi_label_soft_margin_loss(input, + label, + weight=self.weight, + reduction=self.reduction, + name=self.name) + + class HingeEmbeddingLoss(Layer): r""" This operator calculates hinge_embedding_loss. Measures the loss given an input tensor :math:`x` and a labels tensor :math:`y`(containing 1 or -1). @@ -1507,81 +1585,3 @@ def forward(self, input, positive, negative): swap=self.swap, reduction=self.reduction, name=self.name) - - - class MultiLabelSoftMarginLoss(Layer): - r"""Creates a criterion that optimizes a multi-class multi-classification - hinge loss (margin-based loss) between input :math:`x` (a 2D mini-batch `Tensor`) - and output :math:`y` (which is a 2D `Tensor` of target class indices). - For each sample in the mini-batch: - - .. math:: - \text{loss}(x, y) = \sum_{ij}\frac{\max(0, 1 - (x[y[j]] - x[i]))}{\text{x.size}(0)} - - where :math:`x \in \left\{0, \; \cdots , \; \text{x.size}(0) - 1\right\}`, \ - :math:`y \in \left\{0, \; \cdots , \; \text{y.size}(0) - 1\right\}`, \ - :math:`0 \leq y[j] \leq \text{x.size}(0)-1`, \ - and :math:`i \neq y[j]` for all :math:`i` and :math:`j`. - :math:`y` and :math:`x` must have the same size. - - Parameters: - weight (Tensor,optional): a manual rescaling weight given to each class. - If given, has to be a Tensor of size C and the data type is float32, float64. - Default is ``'None'`` . - reduction (str, optional): Indicate how to average the loss by batch_size, - the candicates are ``'none'`` | ``'mean'`` | ``'sum'``. - If :attr:`reduction` is ``'none'``, the unreduced loss is returned; - If :attr:`reduction` is ``'mean'``, the reduced mean loss is returned; - If :attr:`reduction` is ``'sum'``, the summed loss is returned. - Default: ``'mean'`` - name (str, optional): Name for the operation (optional, default is None). - For more information, please refer to :ref:`api_guide_Name`. - - Call parameters: - input (Tensor): Input tensor, the data type is float32 or float64. Shape is (N, C), where C is number of classes, and if shape is more than 2D, this is (N, C, D1, D2,..., Dk), k >= 1. - label (Tensor): Label tensor containing 1 or -1, the data type is float32 or float64. The shape of label is the same as the shape of input. - - Shape: - input: N-D Tensor, the shape is [N, \*], N is batch size and `\*` means number of classes, available dtype is float32, float64. The sum operationoperates over all the elements. - label: N-D Tensor, same shape as the input. - output: scalar. If :attr:`reduction` is ``'none'``, then same shape as the input. - - Returns: - A callable object of MultiLabelSoftMarginLoss. - - Examples: - .. code-block:: python - - import paddle - import paddle.nn as nn - - input = paddle.to_tensor([[1, -2, 3], [0, -1, 2], [1, 0, 1]], dtype=paddle.float32) - label = paddle.to_tensor([[-1, 1, -1], [1, 1, 1], [1, -1, 1]], dtype=paddle.float32) - - multi_label_soft_margin_loss = nn.MultiLabelSoftMarginLoss(reduction='none') - loss = multi_label_soft_margin_loss(input, label) - print(loss) - # Tensor([3.49625897, 0.71111226, 0.43989015]) - - multi_label_soft_margin_loss = nn.MultiLabelSoftMarginLoss(reduction='mean') - loss = multi_label_soft_margin_loss(input, label) - print(loss) - # Tensor([1.54908717]) - """ - - def __init__(self, weight=None, reduction="mean", name=None): - super(MultiLabelSoftMarginLoss, self).__init__() - if reduction not in ['sum', 'mean', 'none']: - raise ValueError( - "'reduction' in 'MultiLabelSoftMarginLoss' should be 'sum', 'mean' or 'none', " - "but received {}.".format(reduction)) - self.weight = weight - self.reduction = reduction - self.name = name - - def forward(self, input, label): - return F.multi_label_soft_margin_loss(input, - label, - weight=self.weight, - reduction=self.reduction, - name=self.name)