Skip to content

Commit

Permalink
rm unittests eager guard tests part9 histogram2imperative_dataloader (#…
Browse files Browse the repository at this point in the history
…48825)

* rm unittests eager guard tests part9 histogram2imperative_dataloader

* rm basic
  • Loading branch information
yjjiang11 committed Dec 14, 2022
1 parent 30674b0 commit 627eaa0
Show file tree
Hide file tree
Showing 11 changed files with 45 additions and 236 deletions.
17 changes: 7 additions & 10 deletions python/paddle/fluid/tests/unittests/test_histogram_op.py
Original file line number Diff line number Diff line change
Expand Up @@ -20,7 +20,6 @@
import paddle
import paddle.fluid as fluid
from paddle.fluid import Program, program_guard
from paddle.fluid.framework import _test_eager_guard


class TestHistogramOpAPI(unittest.TestCase):
Expand Down Expand Up @@ -59,15 +58,13 @@ def test_dygraph(self):
msg='histogram output is wrong, out =' + str(actual.numpy()),
)

with _test_eager_guard():
inputs_np = np.array([[2, 4, 2], [2, 5, 4]]).astype(np.int64)
inputs = paddle.to_tensor(inputs_np)
actual = paddle.histogram(inputs, bins=5, min=1, max=5)
self.assertTrue(
(actual.numpy() == expected).all(),
msg='histogram output is wrong, out ='
+ str(actual.numpy()),
)
inputs_np = np.array([[2, 4, 2], [2, 5, 4]]).astype(np.int64)
inputs = paddle.to_tensor(inputs_np)
actual = paddle.histogram(inputs, bins=5, min=1, max=5)
self.assertTrue(
(actual.numpy() == expected).all(),
msg='histogram output is wrong, out =' + str(actual.numpy()),
)


class TestHistogramOpError(unittest.TestCase):
Expand Down
105 changes: 17 additions & 88 deletions python/paddle/fluid/tests/unittests/test_imperative_auto_prune.py
Original file line number Diff line number Diff line change
Expand Up @@ -18,7 +18,6 @@

import paddle
import paddle.fluid as fluid
from paddle.fluid.framework import _test_eager_guard
from paddle.nn import Embedding
from paddle.tensor import random

Expand Down Expand Up @@ -169,7 +168,7 @@ def embed_linear0(self, x):


class TestImperativeAutoPrune(unittest.TestCase):
def func_auto_prune(self):
def test_auto_prune(self):
with fluid.dygraph.guard():
case1 = AutoPruneLayer0(input_size=5)
value1 = np.arange(25).reshape(5, 5).astype("float32")
Expand All @@ -181,12 +180,7 @@ def func_auto_prune(self):
self.assertIsNotNone(case1.linear2.weight._grad_ivar())
self.assertIsNotNone(case1.linear1.weight._grad_ivar())

def test_auto_prune(self):
with _test_eager_guard():
self.func_auto_prune()
self.func_auto_prune()

def func_auto_prune2(self):
def test_auto_prune2(self):
with fluid.dygraph.guard():
case2 = AutoPruneLayer1(input_size=5)
value1 = np.arange(25).reshape(5, 5).astype("float32")
Expand All @@ -199,13 +193,9 @@ def func_auto_prune2(self):
self.assertIsNone(case2.linear2.weight._grad_ivar())
self.assertIsNotNone(case2.linear1.weight._grad_ivar())

def test_auto_prune2(self):
with _test_eager_guard():
self.func_auto_prune2()
self.func_auto_prune2()

# TODO(jiabin): Support this when we support better split tensor
def func_auto_prune3(self):
def test_auto_prune3(self):
fluid.set_flags({"FLAGS_retain_grad_for_all_tensor": True})
with fluid.dygraph.guard():
case3 = AutoPruneLayer3(input_size=784)
value1 = np.arange(784).reshape(1, 784).astype("float32")
Expand All @@ -216,15 +206,10 @@ def func_auto_prune3(self):
loss.backward()
self.assertIsNotNone(case3.linear.weight._grad_ivar())
self.assertTrue((part2.gradient() == 0).all())

def test_auto_prune3(self):
fluid.set_flags({"FLAGS_retain_grad_for_all_tensor": True})
with _test_eager_guard():
self.func_auto_prune3()
self.func_auto_prune3()
fluid.set_flags({"FLAGS_retain_grad_for_all_tensor": False})

def func_auto_prune4(self):
def test_auto_prune4(self):
fluid.set_flags({"FLAGS_retain_grad_for_all_tensor": True})
with fluid.dygraph.guard():
case4 = AutoPruneLayer3(input_size=784)
value1 = np.arange(784).reshape(1, 784).astype("float32")
Expand All @@ -235,15 +220,10 @@ def func_auto_prune4(self):
part2.backward()
self.assertIsNotNone(case4.linear.weight._grad_ivar())
self.assertTrue((part2.gradient() == 1).all())

def test_auto_prune4(self):
fluid.set_flags({"FLAGS_retain_grad_for_all_tensor": True})
with _test_eager_guard():
self.func_auto_prune4()
self.func_auto_prune4()
fluid.set_flags({"FLAGS_retain_grad_for_all_tensor": False})

def func_auto_prune5(self):
def test_auto_prune5(self):
fluid.set_flags({"FLAGS_retain_grad_for_all_tensor": True})
with fluid.dygraph.guard():
case4 = AutoPruneLayer3(input_size=784)
value1 = np.arange(784).reshape(1, 784).astype("float32")
Expand All @@ -254,15 +234,9 @@ def func_auto_prune5(self):
part1.backward()
self.assertIsNotNone(case4.linear.weight._grad_ivar())
self.assertTrue((part2.gradient() == 0).all())

def test_auto_prune5(self):
fluid.set_flags({"FLAGS_retain_grad_for_all_tensor": True})
with _test_eager_guard():
self.func_auto_prune5()
self.func_auto_prune5()
fluid.set_flags({"FLAGS_retain_grad_for_all_tensor": False})

def func_auto_prune6(self):
def test_auto_prune6(self):
with fluid.dygraph.guard():
value0 = np.arange(26).reshape(2, 13).astype("float32")
value1 = np.arange(6).reshape(2, 3).astype("float32")
Expand All @@ -280,12 +254,7 @@ def func_auto_prune6(self):
self.assertIsNone(linear.weight.gradient())
self.assertIsNone(out1.gradient())

def test_auto_prune6(self):
with _test_eager_guard():
self.func_auto_prune6()
self.func_auto_prune6()

def func_auto_prune7(self):
def test_auto_prune7(self):
with fluid.dygraph.guard():
value0 = np.arange(26).reshape(2, 13).astype("float32")
value1 = np.arange(6).reshape(2, 3).astype("float32")
Expand All @@ -303,12 +272,7 @@ def func_auto_prune7(self):
self.assertIsNone(linear.weight.gradient())
self.assertIsNone(out1.gradient())

def test_auto_prune7(self):
with _test_eager_guard():
self.func_auto_prune7()
self.func_auto_prune7()

def func_auto_prune8(self):
def test_auto_prune8(self):
with fluid.dygraph.guard():
value0 = np.arange(26).reshape(2, 13).astype("float32")
value1 = np.arange(6).reshape(2, 3).astype("float32")
Expand Down Expand Up @@ -336,12 +300,7 @@ def func_auto_prune8(self):
np.array_equal(linear_origin, linear.weight.numpy())
)

def test_auto_prune8(self):
with _test_eager_guard():
self.func_auto_prune8()
self.func_auto_prune8()

def func_auto_prune9(self):
def test_auto_prune9(self):
with fluid.dygraph.guard():
value0 = np.arange(26).reshape(2, 13).astype("float32")
value1 = np.arange(6).reshape(2, 3).astype("float32")
Expand Down Expand Up @@ -371,12 +330,7 @@ def func_auto_prune9(self):
except ValueError as e:
assert type(e) == ValueError

def test_auto_prune9(self):
with _test_eager_guard():
self.func_auto_prune9()
self.func_auto_prune9()

def func_auto_prune10(self):
def test_auto_prune10(self):
with fluid.dygraph.guard():
value0 = np.arange(26).reshape(2, 13).astype("float32")
value1 = np.arange(6).reshape(2, 3).astype("float32")
Expand All @@ -396,12 +350,7 @@ def func_auto_prune10(self):
self.assertIsNone(linear.weight.gradient())
self.assertIsNone(out1.gradient())

def test_auto_prune10(self):
with _test_eager_guard():
self.func_auto_prune10()
self.func_auto_prune10()

def func_auto_prune_with_optimizer(self):
def test_auto_prune_with_optimizer(self):
vocab_size = 100
size = 20
batch_size = 16
Expand Down Expand Up @@ -451,12 +400,7 @@ def func_auto_prune_with_optimizer(self):
assert model.embed1.weight._grad_ivar() is None
assert model.linear_1.weight._grad_ivar() is None

def test_auto_prune_with_optimizer(self):
with _test_eager_guard():
self.func_auto_prune_with_optimizer()
self.func_auto_prune_with_optimizer()

def func_case2_prune_no_grad_branch(self):
def test_case2_prune_no_grad_branch(self):
with fluid.dygraph.guard():
value1 = np.arange(784).reshape(1, 784)
value2 = np.arange(1).reshape(1, 1)
Expand All @@ -468,12 +412,7 @@ def func_case2_prune_no_grad_branch(self):
self.assertIsNone(case3.linear2.weight._grad_ivar())
self.assertIsNotNone(case3.linear.weight._grad_ivar())

def test_case2_prune_no_grad_branch(self):
with _test_eager_guard():
self.func_case2_prune_no_grad_branch()
self.func_case2_prune_no_grad_branch()

def func_case3_prune_no_grad_branch2(self):
def test_case3_prune_no_grad_branch2(self):
with fluid.dygraph.guard():
value1 = np.arange(1).reshape(1, 1)
linear = paddle.nn.Linear(1, 1)
Expand All @@ -486,23 +425,13 @@ def func_case3_prune_no_grad_branch2(self):
loss.backward()
self.assertIsNone(linear.weight._grad_ivar())

def test_case3_prune_no_grad_branch2(self):
with _test_eager_guard():
self.func_case3_prune_no_grad_branch2()
self.func_case3_prune_no_grad_branch2()

def func_case4_with_no_grad_op_maker(self):
def test_case4_with_no_grad_op_maker(self):
with fluid.dygraph.guard():
out = random.gaussian(shape=[20, 30])
loss = paddle.mean(out)
loss.backward()
self.assertIsNone(out._grad_ivar())

def test_case4_with_no_grad_op_maker(self):
with _test_eager_guard():
self.func_case4_with_no_grad_op_maker()
self.func_case4_with_no_grad_op_maker()


if __name__ == '__main__':
unittest.main()
Original file line number Diff line number Diff line change
Expand Up @@ -16,11 +16,10 @@
from collections import OrderedDict

import paddle
from paddle.fluid.framework import _test_eager_guard


class TestLayerDict(unittest.TestCase):
def func_layer_dict(self):
def test_layer_dict(self):
layers = OrderedDict(
[
('conv1d', paddle.nn.Conv1D(3, 2, 3)),
Expand Down Expand Up @@ -92,12 +91,7 @@ def check_layer_dict():
layers_dicts.update(list_format_layers)
check_layer_dict()

def test_layer_dict(self):
with _test_eager_guard():
self.func_layer_dict()
self.func_layer_dict()

def func_layer_dict_error_inputs(self):
def test_layer_dict_error_inputs(self):
layers = [
('conv1d', paddle.nn.Conv1D(3, 2, 3), "conv1d"),
('conv2d', paddle.nn.Conv2D(3, 2, 3)),
Expand All @@ -108,11 +102,6 @@ def func_layer_dict_error_inputs(self):

self.assertRaises(AssertionError, layers_dicts.update, 1)

def test_layer_dict_error_inputs(self):
with _test_eager_guard():
self.func_layer_dict_error_inputs()
self.func_layer_dict_error_inputs()


if __name__ == '__main__':
unittest.main()
Original file line number Diff line number Diff line change
Expand Up @@ -18,7 +18,6 @@

import paddle
import paddle.fluid as fluid
from paddle.fluid.framework import _test_eager_guard


class MyLayer(fluid.Layer):
Expand Down Expand Up @@ -94,15 +93,10 @@ def layer_list(self, use_fluid_api):
self.assertListEqual(res11.shape, [5, 4])
res11.backward()

def func_test_layer_list(self):
def test_test_layer_list(self):
self.layer_list(True)
self.layer_list(False)

def test_layer_list(self):
with _test_eager_guard():
self.func_test_layer_list()
self.func_test_layer_list()


if __name__ == '__main__':
unittest.main()
Original file line number Diff line number Diff line change
Expand Up @@ -19,7 +19,6 @@
import paddle
import paddle.fluid as fluid
from paddle import _legacy_C_ops
from paddle.fluid.framework import _test_eager_guard


class MyLayer(fluid.Layer):
Expand Down Expand Up @@ -68,9 +67,6 @@ def paramter_list(self, use_fluid_api):
loss.backward()

def test_paramter_list(self):
with _test_eager_guard():
self.paramter_list(False)
self.paramter_list(True)
self.paramter_list(False)
self.paramter_list(True)

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -18,12 +18,11 @@

import paddle
import paddle.fluid as fluid
from paddle.fluid.framework import _test_eager_guard
from paddle.nn import Linear


class TestImperativeContainerSequential(unittest.TestCase):
def func_sequential(self):
def test_sequential(self):
data = np.random.uniform(-1, 1, [5, 10]).astype('float32')
with fluid.dygraph.guard():
data = fluid.dygraph.to_variable(data)
Expand Down Expand Up @@ -57,12 +56,7 @@ def func_sequential(self):
loss2 = paddle.mean(res2)
loss2.backward()

def test_sequential(self):
with _test_eager_guard():
self.func_sequential()
self.func_sequential()

def func_sequential_list_params(self):
def test_sequential_list_params(self):
data = np.random.uniform(-1, 1, [5, 10]).astype('float32')
with fluid.dygraph.guard():
data = fluid.dygraph.to_variable(data)
Expand Down Expand Up @@ -96,11 +90,6 @@ def func_sequential_list_params(self):
loss2 = paddle.mean(res2)
loss2.backward()

def test_sequential_list_params(self):
with _test_eager_guard():
self.func_sequential_list_params()
self.func_sequential_list_params()


if __name__ == '__main__':
unittest.main()
Loading

0 comments on commit 627eaa0

Please sign in to comment.