-
Notifications
You must be signed in to change notification settings - Fork 2.7k
New issue
Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.
By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.
Already on GitHub? Sign in to your account
custom allreduce cuda kernel #20703
base: main
Are you sure you want to change the base?
custom allreduce cuda kernel #20703
Conversation
ead5e90
to
9ba3637
Compare
orttraining/orttraining/core/optimizer/compute_optimizer/padding_elimination.cc
Outdated
Show resolved
Hide resolved
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
🕐
#if defined(USE_MPI) || defined(USE_NCCL) | ||
|
||
struct CudaDeleter { | ||
void operator()(void* ptr) { |
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
}; | ||
|
||
struct IpcDeleter { | ||
void operator()(void* ptr) { |
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
// A global resource pack for IPC memory used in custom reduce kernel. | ||
// Resource retrieval and deserialization are made atomic to thread safety of accessing it. | ||
struct IPCMemoryResourcePack { | ||
InlinedVector<std::shared_ptr<IpcMemory>> m_ipc_momery_handles; |
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
|
||
Status IpcMemory::AllocateIpcMemory() { | ||
CUDA_RETURN_IF_ERROR(cudaMalloc(&m_buffer_ptr_, mbuffer_size_)); | ||
m_buffer_uptr_ = std::move(CudaMemPtrT{m_buffer_ptr_}); |
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
} | ||
|
||
Status IpcMemory::AllocateIpcMemory() { | ||
CUDA_RETURN_IF_ERROR(cudaMalloc(&m_buffer_ptr_, mbuffer_size_)); |
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
int world_size_; | ||
InlinedVector<void*> m_comm_ptrs_; | ||
std::size_t mbuffer_size_; | ||
void* m_buffer_ptr_{nullptr}; |
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
|
||
for (size_t node_id = 0; node_id < handles.size(); node_id++) { | ||
if ((int)node_id == rank_) { | ||
m_comm_ptrs_[node_id] = m_buffer_ptr_; |
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
uint8_t* foreign_buffer; | ||
CUDA_RETURN_IF_ERROR(cudaIpcOpenMemHandle( | ||
reinterpret_cast<void**>(&foreign_buffer), handles[node_id], cudaIpcMemLazyEnablePeerAccess)); | ||
m_ipc_uptrs_.emplace_back(IpcMemPtrT{foreign_buffer}); |
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
|
||
Status IpcMemory::AllocateIpcMemory() { | ||
CUDA_RETURN_IF_ERROR(cudaMalloc(&m_buffer_ptr_, mbuffer_size_)); | ||
m_buffer_uptr_ = std::move(CudaMemPtrT{m_buffer_ptr_}); |
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
Description
Conditionally route to custom AllReduce kernel when buffer size and gpu numbers meet certain requirements. Otherwise, keep using NCCL's AllReduce.
Motivation and Context