diff --git a/torch/testing/_internal/distributed/distributed_test.py b/torch/testing/_internal/distributed/distributed_test.py index ada44f9283ff..fcccd481d290 100644 --- a/torch/testing/_internal/distributed/distributed_test.py +++ b/torch/testing/_internal/distributed/distributed_test.py @@ -3330,8 +3330,8 @@ def test_DistributedDataParallel_non_default_stream(self): ) @unittest.skipIf( - BACKEND != "nccl" and BACKEND != "gloo", - "MPI backend does not support DDP communication hook on CUDA devices", + BACKEND != "nccl", + "Only NCCL backend supports DDP communication hook", ) @skip_if_lt_x_gpu(int(os.environ["WORLD_SIZE"])) @skip_if_rocm @@ -3441,8 +3441,8 @@ def _test_ddp_hook_parity(self, state, hook): ) @unittest.skipIf( - BACKEND != "nccl" and BACKEND != "gloo", - "MPI backend does not support DDP communication hook on CUDA devices", + BACKEND != "nccl", + "Only NCCL backend supports DDP communication hook", ) @skip_if_lt_x_gpu(int(os.environ["WORLD_SIZE"])) @skip_if_rocm @@ -3450,8 +3450,8 @@ def test_ddp_hook_parity_allreduce(self): self._test_ddp_hook_parity(state=None, hook=default.allreduce_hook) @unittest.skipIf( - BACKEND != "nccl" and BACKEND != "gloo", - "MPI backend does not support DDP communication hook on CUDA devices", + BACKEND != "nccl", + "Only NCCL backend supports DDP communication hook", ) @skip_if_lt_x_gpu(int(os.environ["WORLD_SIZE"])) @skip_if_rocm @@ -3463,8 +3463,8 @@ def test_ddp_hook_parity_allreduce_process_group(self): self._test_ddp_hook_parity(state=process_group, hook=default.allreduce_hook) @unittest.skipIf( - BACKEND != "nccl" and BACKEND != "gloo", - "MPI backend does not support DDP communication hook on CUDA devices", + BACKEND != "nccl", + "Only NCCL backend supports DDP communication hook", ) @skip_if_lt_x_gpu(int(os.environ["WORLD_SIZE"])) @skip_if_rocm