From e7c35a3363e1e728f7f5d69e49858ce284269816 Mon Sep 17 00:00:00 2001 From: "Rong Rong (AI Infra)" Date: Sat, 22 May 2021 07:46:13 -0700 Subject: [PATCH] Revert D28617214: [Gradient Compression] Do not skip the comm hook tests on Gloo backend Test Plan: revert-hammer Differential Revision: D28617214 (https://github.com/pytorch/pytorch/commit/3e88acbf05734fa6949c238e20feea0d1cd79049) Original commit changeset: 3bafb0c837a1 fbshipit-source-id: 0b6254e9766436633faea63cd64c454b739f74b4 --- .../_internal/distributed/distributed_test.py | 16 ++++++++-------- 1 file changed, 8 insertions(+), 8 deletions(-) diff --git a/torch/testing/_internal/distributed/distributed_test.py b/torch/testing/_internal/distributed/distributed_test.py index ada44f9283ff..fcccd481d290 100644 --- a/torch/testing/_internal/distributed/distributed_test.py +++ b/torch/testing/_internal/distributed/distributed_test.py @@ -3330,8 +3330,8 @@ def test_DistributedDataParallel_non_default_stream(self): ) @unittest.skipIf( - BACKEND != "nccl" and BACKEND != "gloo", - "MPI backend does not support DDP communication hook on CUDA devices", + BACKEND != "nccl", + "Only NCCL backend supports DDP communication hook", ) @skip_if_lt_x_gpu(int(os.environ["WORLD_SIZE"])) @skip_if_rocm @@ -3441,8 +3441,8 @@ def _test_ddp_hook_parity(self, state, hook): ) @unittest.skipIf( - BACKEND != "nccl" and BACKEND != "gloo", - "MPI backend does not support DDP communication hook on CUDA devices", + BACKEND != "nccl", + "Only NCCL backend supports DDP communication hook", ) @skip_if_lt_x_gpu(int(os.environ["WORLD_SIZE"])) @skip_if_rocm @@ -3450,8 +3450,8 @@ def test_ddp_hook_parity_allreduce(self): self._test_ddp_hook_parity(state=None, hook=default.allreduce_hook) @unittest.skipIf( - BACKEND != "nccl" and BACKEND != "gloo", - "MPI backend does not support DDP communication hook on CUDA devices", + BACKEND != "nccl", + "Only NCCL backend supports DDP communication hook", ) @skip_if_lt_x_gpu(int(os.environ["WORLD_SIZE"])) @skip_if_rocm @@ -3463,8 +3463,8 @@ def test_ddp_hook_parity_allreduce_process_group(self): self._test_ddp_hook_parity(state=process_group, hook=default.allreduce_hook) @unittest.skipIf( - BACKEND != "nccl" and BACKEND != "gloo", - "MPI backend does not support DDP communication hook on CUDA devices", + BACKEND != "nccl", + "Only NCCL backend supports DDP communication hook", ) @skip_if_lt_x_gpu(int(os.environ["WORLD_SIZE"])) @skip_if_rocm