Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

rm unittests eager guard test part15 layers2maxout #48837

Merged
merged 3 commits into from
Dec 21, 2022
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
559 changes: 4 additions & 555 deletions python/paddle/fluid/tests/unittests/test_layers.py

Large diffs are not rendered by default.

Original file line number Diff line number Diff line change
Expand Up @@ -19,7 +19,6 @@
import paddle
from paddle.distributed.models.moe import utils
from paddle.fluid import core
from paddle.fluid.framework import _test_eager_guard


def limit_by_capacity(expert_count, _capacity, n_worker):
Expand Down Expand Up @@ -88,7 +87,7 @@ def test_static_api(self):

assert all_close(self.out, res[0], self.n_worker)

def func_dygraph_api(self):
def test_dygraph_api(self):
paddle.disable_static(self.place)
capacity = paddle.to_tensor(self.capacity)
expert_count_tensor = paddle.to_tensor(self.expert_count)
Expand All @@ -97,11 +96,6 @@ def func_dygraph_api(self):
)
assert all_close(self.out, out.numpy(), self.n_worker)

def test_dygraph_api(self):
with _test_eager_guard():
self.func_dygraph_api()
self.func_dygraph_api()


@unittest.skipIf(
not core.is_compiled_with_cuda(), "core is not compiled with CUDA"
Expand Down
22 changes: 3 additions & 19 deletions python/paddle/fluid/tests/unittests/test_linalg_cond.py
Original file line number Diff line number Diff line change
Expand Up @@ -18,7 +18,6 @@

import paddle
import paddle.static as static
from paddle.fluid.framework import _test_eager_guard

p_list_n_n = ("fro", "nuc", 1, -1, np.inf, -np.inf)
p_list_m_n = (None, 2, -2)
Expand Down Expand Up @@ -92,21 +91,16 @@ def test_out(self):


class API_TestDygraphCond(unittest.TestCase):
def func_out(self):
def test_out(self):
paddle.disable_static()
# test calling results of 'cond' in dynamic mode
x_list_n_n, x_list_m_n = gen_input()
test_dygraph_assert_true(self, x_list_n_n, p_list_n_n + p_list_m_n)
test_dygraph_assert_true(self, x_list_m_n, p_list_m_n)

def test_out(self):
with _test_eager_guard():
self.func_out()
self.func_out()


class TestCondAPIError(unittest.TestCase):
def func_dygraph_api_error(self):
def test_dygraph_api_error(self):
paddle.disable_static()
# test raising errors when 'cond' is called in dygraph mode
p_list_error = ('fro_', '_nuc', -0.7, 0, 1.5, 3)
Expand All @@ -121,11 +115,6 @@ def func_dygraph_api_error(self):
x_tensor = paddle.to_tensor(x)
self.assertRaises(ValueError, paddle.linalg.cond, x_tensor, p)

def test_dygraph_api_error(self):
with _test_eager_guard():
self.func_dygraph_api_error()
self.func_dygraph_api_error()

def test_static_api_error(self):
paddle.enable_static()
# test raising errors when 'cond' is called in static mode
Expand Down Expand Up @@ -162,18 +151,13 @@ def test_static_empty_input_error(self):


class TestCondEmptyTensorInput(unittest.TestCase):
def func_dygraph_empty_tensor_input(self):
def test_dygraph_empty_tensor_input(self):
paddle.disable_static()
# test calling results of 'cond' when input is an empty tensor in dynamic mode
x_list_n_n, x_list_m_n = gen_empty_input()
test_dygraph_assert_true(self, x_list_n_n, p_list_n_n + p_list_m_n)
test_dygraph_assert_true(self, x_list_m_n, p_list_m_n)

def test_dygraph_empty_tensor_input(self):
with _test_eager_guard():
self.func_dygraph_empty_tensor_input()
self.func_dygraph_empty_tensor_input()


if __name__ == "__main__":
paddle.enable_static()
Expand Down
6 changes: 0 additions & 6 deletions python/paddle/fluid/tests/unittests/test_linspace.py
Original file line number Diff line number Diff line change
Expand Up @@ -20,7 +20,6 @@
import paddle
import paddle.fluid as fluid
from paddle.fluid import Program, core, program_guard
from paddle.fluid.framework import _test_eager_guard


class TestLinspaceOpCommonCase(OpTest):
Expand Down Expand Up @@ -128,11 +127,6 @@ def test_imperative(self):
self.assertEqual((out2.numpy() == np_out2).all(), True)
self.assertEqual((out3.numpy() == np_out3).all(), True)

def test_api_eager_dygraph(self):
with _test_eager_guard():
self.test_variable_input2()
self.test_imperative()


class TestLinspaceOpError(unittest.TestCase):
def test_errors(self):
Expand Down
18 changes: 8 additions & 10 deletions python/paddle/fluid/tests/unittests/test_logical_op.py
Original file line number Diff line number Diff line change
Expand Up @@ -17,7 +17,6 @@
import numpy as np

import paddle
from paddle.fluid.framework import _test_eager_guard
from paddle.framework import _non_static_mode
from paddle.static import Executor, Program, program_guard

Expand Down Expand Up @@ -106,15 +105,14 @@ def run_eager(x_np, y_np, op_str, use_gpu=False, binary_op=True):
if use_gpu and paddle.is_compiled_with_cuda():
place = paddle.CUDAPlace(0)
paddle.disable_static(place)
with _test_eager_guard():
op = getattr(paddle, op_str)
x = paddle.to_tensor(x_np, dtype=x_np.dtype)
if not binary_op:
dygraph_result = op(x)
else:
y = paddle.to_tensor(y_np, dtype=y_np.dtype)
dygraph_result = op(x, y)
return dygraph_result
op = getattr(paddle, op_str)
x = paddle.to_tensor(x_np, dtype=x_np.dtype)
if not binary_op:
dygraph_result = op(x)
else:
y = paddle.to_tensor(y_np, dtype=y_np.dtype)
dygraph_result = op(x, y)
return dygraph_result


def np_data_generator(np_shape, dtype, *args, **kwargs):
Expand Down
6 changes: 0 additions & 6 deletions python/paddle/fluid/tests/unittests/test_logit_op.py
Original file line number Diff line number Diff line change
Expand Up @@ -18,7 +18,6 @@
from op_test import OpTest

import paddle
from paddle.fluid.framework import _test_eager_guard

np.random.seed(10)

Expand Down Expand Up @@ -117,11 +116,6 @@ def test_errors(self):
x = paddle.fluid.data(name='X2', shape=[100], dtype='float32')
self.assertRaises(TypeError, paddle.logit, x, dtype='int32')

def test_api_eager_dygraph(self):
with _test_eager_guard():
self.test_check_api()
self.test_errors()


if __name__ == "__main__":
unittest.main()
8 changes: 1 addition & 7 deletions python/paddle/fluid/tests/unittests/test_lookahead.py
Original file line number Diff line number Diff line change
Expand Up @@ -19,7 +19,6 @@
import paddle
import paddle.fluid as fluid
import paddle.nn as nn
from paddle.fluid.framework import _test_eager_guard

LOOKAHEAD_K = 5
LOOKAHEAD_ALPHA = 0.2
Expand Down Expand Up @@ -71,7 +70,7 @@ def test_lookahead_static(self):
)
fast_param = latest_b - SGD_LR * b_grad

def func_test_look_ahead_dygraph(self):
def test_look_ahead_dygraph(self):
BATCH_SIZE = 16
BATCH_NUM = 4
EPOCH_NUM = 4
Expand Down Expand Up @@ -152,11 +151,6 @@ def train(layer, loader, loss_fn, opt):

train(layer, loader, loss_fn, lookahead)

def test_look_ahead_dygraph(self):
with _test_eager_guard():
self.func_test_look_ahead_dygraph()
self.func_test_look_ahead_dygraph()


if __name__ == "__main__":
unittest.main()
10 changes: 0 additions & 10 deletions python/paddle/fluid/tests/unittests/test_matmul_v2_op.py
Original file line number Diff line number Diff line change
Expand Up @@ -20,7 +20,6 @@
import paddle
import paddle.fluid as fluid
import paddle.fluid.core as core
from paddle.fluid.framework import _test_eager_guard
from paddle.fluid.tests.unittests.testsuite import create_op


Expand Down Expand Up @@ -559,11 +558,6 @@ def test_compute_type_fp16_nan(self):
{'FLAGS_gemm_use_half_precision_compute_type': False}
)

def test_api_eager_dygraph(self):
with _test_eager_guard():
self.test_dygraph()
self.test_dygraph_fp16()


class TestComplexMatMulOp(OpTest):
def setUp(self):
Expand Down Expand Up @@ -732,10 +726,6 @@ def func_dygraph_matmul(self):

paddle.enable_static()

def func_dygraph_matmul(self): # noqa: F811
with _test_eager_guard():
self.func_dygraph_matmul()

yjjiang11 marked this conversation as resolved.
Show resolved Hide resolved

if __name__ == "__main__":
paddle.enable_static()
Expand Down
5 changes: 0 additions & 5 deletions python/paddle/fluid/tests/unittests/test_max_op.py
Original file line number Diff line number Diff line change
Expand Up @@ -20,7 +20,6 @@

import paddle
import paddle.fluid.core as core
from paddle.fluid.framework import _test_eager_guard


class ApiMaxTest(unittest.TestCase):
Expand Down Expand Up @@ -83,10 +82,6 @@ def test_imperative_api(self):
z_expected = np.array(np.max(np_x, axis=0))
self.assertEqual((np_z == z_expected).all(), True)

def test_eager_api(self):
with _test_eager_guard():
self.test_imperative_api()

def test_big_dimension(self):
paddle.disable_static()
x = paddle.rand(shape=[2, 2, 2, 2, 2, 2, 2])
Expand Down
8 changes: 1 addition & 7 deletions python/paddle/fluid/tests/unittests/test_maxout_op.py
Original file line number Diff line number Diff line change
Expand Up @@ -20,7 +20,6 @@
import paddle
import paddle.fluid.core as core
import paddle.nn.functional as F
from paddle.fluid.framework import _test_eager_guard

paddle.enable_static()
np.random.seed(1)
Expand Down Expand Up @@ -108,7 +107,7 @@ def test_static_api(self):
for r in res:
np.testing.assert_allclose(out_ref, r, rtol=1e-05)

def func_test_dygraph_api(self):
def test_dygraph_api(self):
paddle.disable_static(self.place)
x = paddle.to_tensor(self.x_np)
out1 = F.maxout(x, self.groups, self.axis)
Expand Down Expand Up @@ -136,11 +135,6 @@ def test_errors(self):
x_float32 = paddle.fluid.data(name='x_float32', shape=[2, 4, 6, 8])
self.assertRaises(ValueError, F.maxout, x_float32, 2, 2)

def test_dygraph_api(self):
with _test_eager_guard():
self.func_test_dygraph_api()
self.func_test_dygraph_api()


if __name__ == '__main__':
unittest.main()