Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

rename the inputs of allclose #26360

Merged
merged 3 commits into from
Aug 20, 2020
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
15 changes: 9 additions & 6 deletions paddle/fluid/operators/allclose_op.cc
Original file line number Diff line number Diff line change
Expand Up @@ -22,9 +22,11 @@ namespace operators {
class AllcloseOpMaker : public framework::OpProtoAndCheckerMaker {
public:
void Make() override {
AddInput("Input", "The first input tensor to compare.");
AddInput("Other", "The second input tensor to compare.");
AddOutput("Out", "The output tensor of allclose op.");
AddInput("Input",
"The input tensor, it's data type should be float32, float64.");
AddInput("Other",
"The input tensor, it's data type should be float32, float64.");
AddOutput("Out", "The output tensor, it's data type is bool.");

AddAttr<float>("rtol", "The relative tolerance. Default: :math:`1e-5` .")
.SetDefault(1e-5);
Expand All @@ -36,11 +38,12 @@ class AllcloseOpMaker : public framework::OpProtoAndCheckerMaker {
.SetDefault(false);

AddComment(R"DOC(
This operator checks if all :math:`input` and :math:`other` satisfy the condition:
This operator checks if all :math:`x` and :math:`y` satisfy the condition:

:math:`\left| input - other \right| \leq atol + rtol \times \left| other \right|`
.. math::
\left| x - y \right| \leq atol + rtol \times \left| y \right|

elementwise, for all elements of :math:`input` and :math:`other`. The behaviour of this
elementwise, for all elements of :math:`x` and :math:`y`. The behaviour of this
operator is analogous to :math:`numpy.allclose`, namely that it returns :math:`True` if
two tensors are elementwise equal within a tolerance.
)DOC");
Expand Down
54 changes: 54 additions & 0 deletions python/paddle/fluid/tests/unittests/test_allclose_op.py
Original file line number Diff line number Diff line change
Expand Up @@ -15,6 +15,7 @@
import unittest
import numpy as np
from op_test import OpTest
import paddle


class TestAllcloseOp(OpTest):
Expand Down Expand Up @@ -76,5 +77,58 @@ def set_args(self):
self.equal_nan = True


class TestAllcloseDygraph(unittest.TestCase):
def test_api_case(self):
paddle.disable_static()
x_data = np.random.rand(10, 10)
y_data = np.random.rand(10, 10)
x = paddle.to_tensor(x_data)
y = paddle.to_tensor(y_data)
out = paddle.allclose(x, y, rtol=1e-05, atol=1e-08)
expected_out = np.allclose(x_data, y_data, rtol=1e-05, atol=1e-08)
self.assertTrue((out.numpy() == expected_out).all(), True)
paddle.enable_static()


class TestAllcloseError(unittest.TestCase):
def test_input_dtype(self):
def test_x_dtype():
with paddle.static.program_guard(paddle.static.Program(),
paddle.static.Program()):
x = paddle.data(name='x', shape=[10, 10], dtype='float16')
y = paddle.data(name='y', shape=[10, 10], dtype='float64')
result = paddle.allclose(x, y)

self.assertRaises(TypeError, test_x_dtype)

def test_y_dtype():
with paddle.static.program_guard(paddle.static.Program(),
paddle.static.Program()):
x = paddle.data(name='x', shape=[10, 10], dtype='float64')
y = paddle.data(name='y', shape=[10, 10], dtype='int32')
result = paddle.allclose(x, y)

self.assertRaises(TypeError, test_y_dtype)

def test_attr(self):
x = paddle.data(name='x', shape=[10, 10], dtype='float64')
y = paddle.data(name='y', shape=[10, 10], dtype='float64')

def test_rtol():
result = paddle.allclose(x, y, rtol=True)

self.assertRaises(TypeError, test_rtol)

def test_atol():
result = paddle.allclose(x, y, rtol=True)

self.assertRaises(TypeError, test_atol)

def test_equal_nan():
result = paddle.allclose(x, y, equal_nan=1)

self.assertRaises(TypeError, test_equal_nan)


if __name__ == "__main__":
unittest.main()
100 changes: 50 additions & 50 deletions python/paddle/tensor/logic.py
Original file line number Diff line number Diff line change
Expand Up @@ -13,9 +13,11 @@
# limitations under the License.

from ..fluid.layer_helper import LayerHelper
from ..fluid.data_feeder import check_type
from ..fluid.data_feeder import check_type, check_variable_and_dtype
from ..fluid.layers.layer_function_generator import templatedoc
from .. import fluid
from ..fluid.framework import in_dygraph_mode
from paddle.common_ops_import import *

# TODO: define logic functions of a tensor
from ..fluid.layers import is_empty #DEFINE_ALIAS
Expand Down Expand Up @@ -91,83 +93,81 @@ def equal_all(x, y, name=None):


@templatedoc()
def allclose(input, other, rtol=1e-05, atol=1e-08, equal_nan=False, name=None):
def allclose(x, y, rtol=1e-05, atol=1e-08, equal_nan=False, name=None):
"""
:alias_main: paddle.allclose
:alias: paddle.allclose,paddle.tensor.allclose,paddle.tensor.logic.allclose

Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

可以将“alias_main”、“alias”这两行删去,后期会自动化增加

Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

另提PR修改文档问题。

${comment}

Args:
input(inputtype):{input_comment}.
other(othertype):{other_comment}.
rtol(rtoltype,optional):{rtol_comment}.
atol(atoltype,optional):{atol_comment}.
equal_nan(equalnantype,optional):{equal_nan_comment}.
name(STR, optional): The default value is None.
Normally there is no need for user to set this property.
For more information, please refer to :ref:`api_guide_Name`.
x(Tensor): ${input_comment}.
y(Tensor): ${other_comment}.
rtol(rtoltype, optional): ${rtol_comment}.
atol(atoltype, optional): ${atol_comment}.
equal_nan(equalnantype, optional): ${equal_nan_comment}.
name (str, optional): Name for the operation. For more information, please
refer to :ref:`api_guide_Name`. Default: None.

Returns:
${out_comment}.
Tensor: ${out_comment}.

Raises:
TypeError: The data type of ``x`` must be one of float32, float64.
TypeError: The data type of ``y`` must be one of float32, float64.
TypeError: The type of ``rtol`` must be float.
TypeError: The type of ``atol`` must be float.
TypeError: The type of ``equal_nan`` must be bool.

Return Type:
${out_type}

Examples:
.. code-block:: python

import paddle
import paddle.fluid as fluid
import numpy as np

use_cuda = fluid.core.is_compiled_with_cuda()

a = fluid.data(name="a", shape=[2], dtype='float32')
b = fluid.data(name="b", shape=[2], dtype='float32')
paddle.disable_static()

result = paddle.allclose(a, b, rtol=1e-05, atol=1e-08,
np_x = np.array([10000., 1e-07]).astype("float32")
np_y = np.array([10000.1, 1e-08]).astype("float32")
x = paddle.to_tensor(np_x)
y = paddle.to_tensor(np_y)
result1 = paddle.allclose(x, y, rtol=1e-05, atol=1e-08,
equal_nan=False, name="ignore_nan")
result_nan = paddle.allclose(a, b, rtol=1e-05, atol=1e-08,
np_result1 = result1.numpy()
# [False]
result2 = paddle.allclose(x, y, rtol=1e-05, atol=1e-08,
equal_nan=True, name="equal_nan")

place = fluid.CUDAPlace(0) if use_cuda else fluid.CPUPlace()
exe = fluid.Executor(place)
exe.run(fluid.default_startup_program())

x = np.array([10000., 1e-07]).astype("float32")
y = np.array([10000.1, 1e-08]).astype("float32")
result_v, result_nan_v = exe.run(
feed={'a': x, 'b': y},
fetch_list=[result, result_nan])
print(result_v, result_nan_v)
# Output: (array([False]), array([False]))

x = np.array([10000., 1e-08]).astype("float32")
y = np.array([10000.1, 1e-09]).astype("float32")
result_v, result_nan_v = exe.run(
feed={'a': x, 'b': y},
fetch_list=[result, result_nan])
print(result_v, result_nan_v)
# Output: (array([ True]), array([ True]))

x = np.array([1.0, float('nan')]).astype("float32")
y = np.array([1.0, float('nan')]).astype("float32")
result_v, result_nan_v = exe.run(
feed={'a': x, 'b': y},
fetch_list=[result, result_nan])
print(result_v, result_nan_v)
# Output: (array([False]), array([ True]))
np_result2 = result2.numpy()
# [False]

np_x = np.array([1.0, float('nan')]).astype("float32")
np_y = np.array([1.0, float('nan')]).astype("float32")
x = paddle.to_tensor(np_x)
y = paddle.to_tensor(np_y)
result1 = paddle.allclose(x, y, rtol=1e-05, atol=1e-08,
equal_nan=False, name="ignore_nan")
np_result1 = result1.numpy()
# [False]
result2 = paddle.allclose(x, y, rtol=1e-05, atol=1e-08,
equal_nan=True, name="equal_nan")
np_result2 = result2.numpy()
# [True]
"""

if in_dygraph_mode():
return core.ops.allclose(x, y, 'rtol', rtol, 'atol', atol, 'equal_nan',
equal_nan)

check_variable_and_dtype(x, "input", ['float32', 'float64'], 'allclose')
check_variable_and_dtype(y, "input", ['float32', 'float64'], 'allclose')
check_type(rtol, 'rtol', float, 'allclose')
check_type(atol, 'atol', float, 'allclose')
check_type(equal_nan, 'equal_nan', bool, 'allclose')

helper = LayerHelper("allclose", **locals())
out = helper.create_variable_for_type_inference(dtype='bool')

inputs = {'Input': input, 'Other': other}
inputs = {'Input': x, 'Other': y}
outputs = {'Out': out}
attrs = {'rtol': rtol, 'atol': atol, 'equal_nan': equal_nan}
helper.append_op(
Expand Down