Skip to content

Commit

Permalink
remove fleet eager guard tests (#48765)
Browse files Browse the repository at this point in the history
  • Loading branch information
yjjiang11 committed Dec 13, 2022
1 parent f3982a9 commit 0852475
Show file tree
Hide file tree
Showing 13 changed files with 14 additions and 46 deletions.
Original file line number Diff line number Diff line change
Expand Up @@ -24,7 +24,6 @@
group_sharded_parallel,
save_group_sharded_model,
)
from paddle.fluid.framework import _test_eager_guard
from paddle.nn import Linear

epoch = 10
Expand Down Expand Up @@ -196,7 +195,5 @@ def test_sharding_api():


if __name__ == '__main__':
with _test_eager_guard():
pass
fleet.init(is_collective=True)
test_sharding_api()
Original file line number Diff line number Diff line change
Expand Up @@ -22,7 +22,6 @@
group_sharded_parallel,
save_group_sharded_model,
)
from paddle.fluid.framework import _test_eager_guard
from paddle.nn import Linear

epoch = 10
Expand Down Expand Up @@ -199,5 +198,4 @@ def test_sharding_api():


if __name__ == '__main__':
with _test_eager_guard():
test_sharding_api()
test_sharding_api()
Original file line number Diff line number Diff line change
Expand Up @@ -28,7 +28,6 @@
from paddle.distributed.fleet.meta_parallel.sharding.group_sharded_stage2 import (
GroupShardedStage2,
)
from paddle.fluid.framework import _test_eager_guard
from paddle.nn import Linear

seed = 2022
Expand Down Expand Up @@ -246,5 +245,4 @@ def test_dp_stage2():


if __name__ == '__main__':
with _test_eager_guard():
test_dp_stage2()
test_dp_stage2()
Original file line number Diff line number Diff line change
Expand Up @@ -28,7 +28,6 @@
from paddle.distributed.fleet.meta_parallel.sharding.group_sharded_stage2 import (
GroupShardedStage2,
)
from paddle.fluid.framework import _test_eager_guard
from paddle.nn import Linear

seed = 2022
Expand Down Expand Up @@ -250,5 +249,4 @@ def test_dp_stage2():


if __name__ == '__main__':
with _test_eager_guard():
test_dp_stage2()
test_dp_stage2()
Original file line number Diff line number Diff line change
Expand Up @@ -31,7 +31,6 @@
from paddle.distributed.fleet.meta_parallel.sharding.group_sharded_utils import (
GroupShardedScaler,
)
from paddle.fluid.framework import _test_eager_guard

seed = 2021
epoch = 2
Expand Down Expand Up @@ -115,5 +114,4 @@ def test_sharding_stage2_offload():


if __name__ == '__main__':
with _test_eager_guard():
test_sharding_stage2_offload()
test_sharding_stage2_offload()
Original file line number Diff line number Diff line change
Expand Up @@ -34,7 +34,6 @@
from paddle.distributed.fleet.meta_parallel.sharding.group_sharded_utils import (
GroupShardedScaler,
)
from paddle.fluid.framework import _test_eager_guard
from paddle.nn import Linear

epoch = 10
Expand Down Expand Up @@ -320,5 +319,4 @@ def test_stage2_stage3():


if __name__ == '__main__':
with _test_eager_guard():
test_stage2_stage3()
test_stage2_stage3()
Original file line number Diff line number Diff line change
Expand Up @@ -24,7 +24,6 @@
from paddle.distributed.fleet.meta_parallel.sharding.group_sharded_utils import (
GroupShardedScaler,
)
from paddle.fluid.framework import _test_eager_guard
from paddle.nn import Linear

epoch = 10
Expand Down Expand Up @@ -220,5 +219,4 @@ def test_stage3_offload():


if __name__ == '__main__':
with _test_eager_guard():
test_stage3_offload()
test_stage3_offload()
Original file line number Diff line number Diff line change
Expand Up @@ -23,7 +23,6 @@
ShardingOptimizerStage2,
)
from paddle.distributed.fleet.utils.internal_storage import GradStorage
from paddle.fluid.framework import _test_eager_guard
from paddle.nn import Linear

base_lr = 0.1
Expand Down Expand Up @@ -142,6 +141,4 @@ def train_mlp():


if __name__ == '__main__':
with _test_eager_guard():
pass
train_mlp()
Original file line number Diff line number Diff line change
Expand Up @@ -29,7 +29,6 @@
from paddle.distributed.fleet.meta_parallel.sharding.sharding_stage2 import (
ShardingStage2,
)
from paddle.fluid.framework import _test_eager_guard
from paddle.nn import Linear

seed = 2022
Expand Down Expand Up @@ -239,7 +238,5 @@ def test_dp_stage2():


if __name__ == '__main__':
with _test_eager_guard():
pass
fleet.init(is_collective=True, strategy=strategy)
test_dp_stage2()
Original file line number Diff line number Diff line change
Expand Up @@ -28,7 +28,6 @@
from paddle.distributed.fleet.meta_parallel.sharding.sharding_utils import (
ShardingScaler,
)
from paddle.fluid.framework import _test_eager_guard

seed = 2021
epoch = 2
Expand Down Expand Up @@ -119,7 +118,5 @@ def test_sharding_stage2_offload():


if __name__ == '__main__':
with _test_eager_guard():
pass
fleet.init(is_collective=True, strategy=strategy)
test_sharding_stage2_offload()
Original file line number Diff line number Diff line change
Expand Up @@ -35,7 +35,6 @@
from paddle.distributed.fleet.meta_parallel.sharding.sharding_utils import (
ShardingScaler,
)
from paddle.fluid.framework import _test_eager_guard
from paddle.nn import Linear

epoch = 10
Expand Down Expand Up @@ -316,7 +315,5 @@ def test_stage2_stage3():


if __name__ == '__main__':
with _test_eager_guard():
pass
fleet.init(is_collective=True)
test_stage2_stage3()
Original file line number Diff line number Diff line change
Expand Up @@ -25,7 +25,6 @@
from paddle.distributed.fleet.meta_parallel.sharding.sharding_utils import (
ShardingScaler,
)
from paddle.fluid.framework import _test_eager_guard
from paddle.nn import Linear

epoch = 10
Expand Down Expand Up @@ -216,7 +215,5 @@ def test_stage3_offload():


if __name__ == '__main__':
with _test_eager_guard():
pass
fleet.init(is_collective=True)
test_stage3_offload()
Original file line number Diff line number Diff line change
Expand Up @@ -15,7 +15,6 @@
import unittest

import paddle
from paddle.fluid.framework import _test_eager_guard


class TestProcessGroupFp32(unittest.TestCase):
Expand All @@ -26,15 +25,14 @@ def config(self):
pass

def test_init_process_group(self):
with _test_eager_guard():
paddle.distributed.init_parallel_env()
paddle.distributed.new_group()
group = paddle.distributed.new_group([-1, -2])
assert group.process_group is None

group = paddle.distributed.collective.Group(-1, 2, 0, [-1, -2])
ret = paddle.distributed.barrier(group)
assert ret is None
paddle.distributed.init_parallel_env()
paddle.distributed.new_group()
group = paddle.distributed.new_group([-1, -2])
assert group.process_group is None

group = paddle.distributed.collective.Group(-1, 2, 0, [-1, -2])
ret = paddle.distributed.barrier(group)
assert ret is None
paddle.enable_static()
in_tensor = paddle.empty((1, 2))
in_tensor2 = paddle.empty((1, 2))
Expand Down

0 comments on commit 0852475

Please sign in to comment.