Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

[Eager] Support allclose and linalg_cond to eager mode #41545

Merged
merged 1 commit into from
Apr 9, 2022
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
10 changes: 8 additions & 2 deletions python/paddle/fluid/tests/unittests/test_allclose_layer.py
Original file line number Diff line number Diff line change
Expand Up @@ -16,6 +16,7 @@
import paddle.fluid as fluid
import unittest
import numpy as np
from paddle.fluid.framework import _test_eager_guard


class TestAllcloseLayer(unittest.TestCase):
Expand Down Expand Up @@ -95,7 +96,7 @@ def test_allclose_gpu_fp64(self):
with fluid.program_guard(main, startup):
self.allclose_check(use_cuda=True, dtype='float64')

def test_dygraph_mode(self):
def func_dygraph_mode(self):
x_1 = np.array([10000., 1e-07]).astype("float32")
y_1 = np.array([10000.1, 1e-08]).astype("float32")
x_2 = np.array([10000., 1e-08]).astype("float32")
Expand Down Expand Up @@ -171,9 +172,14 @@ def test_dygraph_mode(self):
x_v_5 = paddle.to_tensor(x_5)
y_v_5 = paddle.to_tensor(y_5)
ret_5 = paddle.allclose(
x_v_5, y_v_5, rtol=0.01, atol=0.0, name='test_8')
x_v_5, y_v_5, rtol=0.015, atol=0.0, name='test_8')
self.assertEqual(ret_5.numpy()[0], True)

def test_dygraph_mode(self):
with _test_eager_guard():
self.func_dygraph_mode()
self.func_dygraph_mode()


if __name__ == "__main__":
unittest.main()
22 changes: 19 additions & 3 deletions python/paddle/fluid/tests/unittests/test_linalg_cond.py
Original file line number Diff line number Diff line change
Expand Up @@ -18,6 +18,7 @@
import numpy as np
import paddle
import paddle.static as static
from paddle.fluid.framework import _test_eager_guard

p_list_n_n = ("fro", "nuc", 1, -1, np.inf, -np.inf)
p_list_m_n = (None, 2, -2)
Expand Down Expand Up @@ -89,16 +90,21 @@ def test_out(self):


class API_TestDygraphCond(unittest.TestCase):
def test_out(self):
def func_out(self):
paddle.disable_static()
# test calling results of 'cond' in dynamic mode
x_list_n_n, x_list_m_n = gen_input()
test_dygraph_assert_true(self, x_list_n_n, p_list_n_n + p_list_m_n)
test_dygraph_assert_true(self, x_list_m_n, p_list_m_n)

def test_out(self):
with _test_eager_guard():
self.func_out()
self.func_out()


class TestCondAPIError(unittest.TestCase):
def test_dygraph_api_error(self):
def func_dygraph_api_error(self):
paddle.disable_static()
# test raising errors when 'cond' is called in dygraph mode
p_list_error = ('fro_', '_nuc', -0.7, 0, 1.5, 3)
Expand All @@ -113,6 +119,11 @@ def test_dygraph_api_error(self):
x_tensor = paddle.to_tensor(x)
self.assertRaises(ValueError, paddle.linalg.cond, x_tensor, p)

def test_dygraph_api_error(self):
with _test_eager_guard():
self.func_dygraph_api_error()
self.func_dygraph_api_error()

def test_static_api_error(self):
paddle.enable_static()
# test raising errors when 'cond' is called in static mode
Expand Down Expand Up @@ -149,13 +160,18 @@ def test_static_empty_input_error(self):


class TestCondEmptyTensorInput(unittest.TestCase):
def test_dygraph_empty_tensor_input(self):
def func_dygraph_empty_tensor_input(self):
paddle.disable_static()
# test calling results of 'cond' when input is an empty tensor in dynamic mode
x_list_n_n, x_list_m_n = gen_empty_input()
test_dygraph_assert_true(self, x_list_n_n, p_list_n_n + p_list_m_n)
test_dygraph_assert_true(self, x_list_m_n, p_list_m_n)

def test_dygraph_empty_tensor_input(self):
with _test_eager_guard():
self.func_dygraph_empty_tensor_input()
self.func_dygraph_empty_tensor_input()


if __name__ == "__main__":
paddle.enable_static()
Expand Down