From 7e73fd273d02dd4d0c3611589d5a696b6939b053 Mon Sep 17 00:00:00 2001 From: Rohan Varma Date: Thu, 7 Jan 2021 21:25:40 -0800 Subject: [PATCH] Address clang-tidy warnings in ProcessGroupNCCL (#50131) Summary: Pull Request resolved: https://github.com/pytorch/pytorch/pull/50131 Noticed that in the internal diff for https://github.com/pytorch/pytorch/pull/49069 there was a clang-tidy warning to use emplace instead of push_back. This can save us a copy as it eliminates the unnecessary in-place construction ghstack-source-id: 119560979 Test Plan: CI Reviewed By: pritamdamania87 Differential Revision: D25800134 fbshipit-source-id: 243e57318f5d6e43de524d4e5409893febe6164c --- torch/lib/c10d/ProcessGroupNCCL.cpp | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/torch/lib/c10d/ProcessGroupNCCL.cpp b/torch/lib/c10d/ProcessGroupNCCL.cpp index b9ac5aa77150..f7c6658d715e 100644 --- a/torch/lib/c10d/ProcessGroupNCCL.cpp +++ b/torch/lib/c10d/ProcessGroupNCCL.cpp @@ -1413,7 +1413,7 @@ c10::intrusive_ptr ProcessGroupNCCL::barrier( // Use user defined GPU device ids if provided if (!opts.device_ids.empty()) { for (auto device : opts.device_ids) { - devices.push_back(at::Device(at::DeviceType::CUDA, device)); + devices.emplace_back(at::DeviceType::CUDA, device); } } else if (usedDeviceIdxs_.empty()) { // This means there is not yet a NCCL collective being called @@ -1423,10 +1423,10 @@ c10::intrusive_ptr ProcessGroupNCCL::barrier( // ensure that each process is on a different GPU auto numGPUs = at::cuda::getNumGPUs(); int16_t deviceIdx = static_cast(rank_ % numGPUs); - devices.push_back(at::Device(at::DeviceType::CUDA, deviceIdx)); + devices.emplace_back(at::DeviceType::CUDA, deviceIdx); } else { for (auto usedDeviceIdx : usedDeviceIdxs_) { - devices.push_back(at::Device(at::DeviceType::CUDA, usedDeviceIdx)); + devices.emplace_back(at::DeviceType::CUDA, usedDeviceIdx); } }