Skip to content

Commit

Permalink
clear fluid api: sigmoid_cross_entropy_with_logits (#48146)
Browse files Browse the repository at this point in the history
* clear fluid api: sigmoid_cross_entropy_with_logits

* fix loss.py

* change paddle.nn.functional.sigmoid_cross_entropy_with_logits

* delete sigmoid_cross_entropy_with_logits

* fix binary_cross_entropy_with_logits

* fix ci bug

* fix ci buf
  • Loading branch information
yuehuayingxueluo committed Nov 30, 2022
1 parent 41da96c commit 9ff99e9
Show file tree
Hide file tree
Showing 11 changed files with 61 additions and 233 deletions.
64 changes: 0 additions & 64 deletions python/paddle/fluid/layers/loss.py
Original file line number Diff line number Diff line change
Expand Up @@ -38,7 +38,6 @@
'cross_entropy',
'square_error_cost',
'softmax_with_cross_entropy',
'sigmoid_cross_entropy_with_logits',
]

kIgnoreIndex = -100
Expand Down Expand Up @@ -292,66 +291,3 @@ def softmax_with_cross_entropy(
return_softmax,
axis,
)


@templatedoc()
def sigmoid_cross_entropy_with_logits(
x, label, ignore_index=kIgnoreIndex, name=None, normalize=False
):
"""
${comment}
Args:
x(Tensor): a 2-D tensor with shape N x D, where N is the batch size and
D is the number of classes. This input is a tensor of logits computed
by the previous operator. Logits are unscaled log probabilities given
as log(p/(1-p)) The data type should be float32 or float64.
label (Tensor): a 2-D tensor of the same type and shape as X.
This input is a tensor of probabalistic labels for each logit.
ignore_index(int): Specifies a target value that is ignored and
does not contribute to the input gradient.
name(str|None): The default value is None. Normally there is
no need for user to set this property. For more information,
please refer to :ref:`api_guide_Name`
normalize(bool): If true, divide the output by the number of
targets != ignore_index.
Returns:
out(Tensor): ${out_comment}
Examples:
.. code-block:: python
import paddle
input = paddle.rand(shape=[10], dtype='float32')
label = paddle.rand(shape=[10], dtype='float32')
loss = paddle.fluid.layers.sigmoid_cross_entropy_with_logits(input, label,
ignore_index=-1, normalize=True)
print(loss)
"""

if in_dygraph_mode():
return _C_ops.sigmoid_cross_entropy_with_logits(
x, label, normalize, int(ignore_index)
)
check_variable_and_dtype(
x,
'input',
['float16', 'float32', 'float64'],
'sigmoid_cross_entropy_with_logits',
)

helper = LayerHelper("sigmoid_cross_entropy_with_logits", **locals())

out = helper.create_variable_for_type_inference(dtype=x.dtype)

helper.append_op(
type="sigmoid_cross_entropy_with_logits",
inputs={"X": x, "Label": label},
attrs={"ignore_index": ignore_index, 'normalize': normalize},
outputs={"Out": out},
)
return out
Original file line number Diff line number Diff line change
Expand Up @@ -14,6 +14,7 @@

import math
import random
import paddle
import numpy as np
import paddle
import paddle.fluid as fluid
Expand Down Expand Up @@ -262,7 +263,9 @@ def forward(self, center_words, target_words, label):

pred = paddle.nn.functional.sigmoid(word_sim)

loss = fluid.layers.sigmoid_cross_entropy_with_logits(word_sim, label)
loss = paddle.nn.functional.binary_cross_entropy_with_logits(
word_sim, label
)
loss = fluid.layers.reduce_mean(loss)

return pred, loss
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -271,7 +271,7 @@ def create_model(self, use_ipu=False):

class TestWithoutIdentityLoss5(TestBase):
def set_op_attrs(self):
self.loss_op = paddle.fluid.layers.sigmoid_cross_entropy_with_logits
self.loss_op = paddle.nn.functional.binary_cross_entropy_with_logits

def set_data_feed(self):
self.data = paddle.uniform((8, 3, 10, 10), dtype='float32')
Expand Down

This file was deleted.

4 changes: 2 additions & 2 deletions python/paddle/fluid/tests/unittests/test_dist_transpiler.py
Original file line number Diff line number Diff line change
Expand Up @@ -427,10 +427,10 @@ def net_conf(self):
true_logits, shape=[-1, neg_num], value=0.0, dtype='float32'
)

true_xent = fluid.layers.sigmoid_cross_entropy_with_logits(
true_xent = paddle.nn.functional.binary_cross_entropy_with_logits(
true_logits, label_ones
)
neg_xent = fluid.layers.sigmoid_cross_entropy_with_logits(
neg_xent = paddle.nn.functional.binary_cross_entropy_with_logits(
neg_logits, label_zeros
)
cost = fluid.layers.elementwise_add(
Expand Down
40 changes: 22 additions & 18 deletions python/paddle/fluid/tests/unittests/test_imperative_gan.py
Original file line number Diff line number Diff line change
Expand Up @@ -80,8 +80,8 @@ def func_test_gan_float32(self):

d_real = discriminator(img)
d_loss_real = fluid.layers.reduce_mean(
fluid.layers.sigmoid_cross_entropy_with_logits(
x=d_real,
paddle.nn.functional.binary_cross_entropy_with_logits(
logit=d_real,
label=fluid.layers.fill_constant(
shape=[2, 1], dtype='float32', value=1.0
),
Expand All @@ -90,8 +90,8 @@ def func_test_gan_float32(self):

d_fake = discriminator(generator(noise))
d_loss_fake = fluid.layers.reduce_mean(
fluid.layers.sigmoid_cross_entropy_with_logits(
x=d_fake,
paddle.nn.functional.binary_cross_entropy_with_logits(
logit=d_fake,
label=fluid.layers.fill_constant(
shape=[2, 1], dtype='float32', value=0.0
),
Expand All @@ -113,8 +113,8 @@ def func_test_gan_float32(self):

d_fake = discriminator(generator(noise))
g_loss = fluid.layers.reduce_mean(
fluid.layers.sigmoid_cross_entropy_with_logits(
x=d_fake,
paddle.nn.functional.binary_cross_entropy_with_logits(
logit=d_fake,
label=fluid.layers.fill_constant(
shape=[2, 1], dtype='float32', value=1.0
),
Expand Down Expand Up @@ -165,17 +165,18 @@ def func_test_gan_float32(self):

d_real = discriminator(to_variable(np.ones([2, 1], np.float32)))
d_loss_real = fluid.layers.reduce_mean(
fluid.layers.sigmoid_cross_entropy_with_logits(
x=d_real, label=to_variable(np.ones([2, 1], np.float32))
paddle.nn.functional.binary_cross_entropy_with_logits(
logit=d_real, label=to_variable(np.ones([2, 1], np.float32))
)
)

d_fake = discriminator(
generator(to_variable(np.ones([2, 2], np.float32)))
)
d_loss_fake = fluid.layers.reduce_mean(
fluid.layers.sigmoid_cross_entropy_with_logits(
x=d_fake, label=to_variable(np.zeros([2, 1], np.float32))
paddle.nn.functional.binary_cross_entropy_with_logits(
logit=d_fake,
label=to_variable(np.zeros([2, 1], np.float32)),
)
)

Expand All @@ -189,8 +190,8 @@ def func_test_gan_float32(self):
generator(to_variable(np.ones([2, 2], np.float32)))
)
g_loss = fluid.layers.reduce_mean(
fluid.layers.sigmoid_cross_entropy_with_logits(
x=d_fake, label=to_variable(np.ones([2, 1], np.float32))
paddle.nn.functional.binary_cross_entropy_with_logits(
logit=d_fake, label=to_variable(np.ones([2, 1], np.float32))
)
)
g_loss.backward()
Expand Down Expand Up @@ -219,17 +220,19 @@ def func_test_gan_float32(self):

d_real2 = discriminator2(to_variable(np.ones([2, 1], np.float32)))
d_loss_real2 = fluid.layers.reduce_mean(
fluid.layers.sigmoid_cross_entropy_with_logits(
x=d_real2, label=to_variable(np.ones([2, 1], np.float32))
paddle.nn.functional.binary_cross_entropy_with_logits(
logit=d_real2,
label=to_variable(np.ones([2, 1], np.float32)),
)
)

d_fake2 = discriminator2(
generator2(to_variable(np.ones([2, 2], np.float32)))
)
d_loss_fake2 = fluid.layers.reduce_mean(
fluid.layers.sigmoid_cross_entropy_with_logits(
x=d_fake2, label=to_variable(np.zeros([2, 1], np.float32))
paddle.nn.functional.binary_cross_entropy_with_logits(
logit=d_fake2,
label=to_variable(np.zeros([2, 1], np.float32)),
)
)

Expand All @@ -243,8 +246,9 @@ def func_test_gan_float32(self):
generator2(to_variable(np.ones([2, 2], np.float32)))
)
g_loss2 = fluid.layers.reduce_mean(
fluid.layers.sigmoid_cross_entropy_with_logits(
x=d_fake2, label=to_variable(np.ones([2, 1], np.float32))
paddle.nn.functional.binary_cross_entropy_with_logits(
logit=d_fake2,
label=to_variable(np.ones([2, 1], np.float32)),
)
)
g_loss2.backward()
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -381,7 +381,9 @@ def loss_cls(cls, label, cfg):
cls_shape = cls.shape
cls = paddle.reshape(cls, [-1, cls_shape[1] * cls_shape[2] * cls_shape[3]])
return (
paddle.sum(fluid.layers.sigmoid_cross_entropy_with_logits(cls, label))
paddle.sum(
paddle.nn.functional.binary_cross_entropy_with_logits(cls, label)
)
/ cfg.batch_size
)

Expand Down
11 changes: 0 additions & 11 deletions python/paddle/fluid/tests/unittests/test_layers.py
Original file line number Diff line number Diff line change
Expand Up @@ -3152,17 +3152,6 @@ def make_word_embedding(self):
avg_cost = paddle.mean(cost)
return avg_cost

def make_sigmoid_cross_entropy(self):
with program_guard(
fluid.default_main_program(), fluid.default_startup_program()
):
dat = self._get_data(name='data', shape=[10], dtype='float32')
lbl = self._get_data(name='label', shape=[10], dtype='float32')
ignore_index = -1
return layers.sigmoid_cross_entropy_with_logits(
x=dat, label=lbl, ignore_index=ignore_index
)

def make_pool2d(self):
with program_guard(
fluid.default_main_program(), fluid.default_startup_program()
Expand Down

0 comments on commit 9ff99e9

Please sign in to comment.