Skip to content

Commit

Permalink
continue fix #1296 (not use vector directly in comm)
Browse files Browse the repository at this point in the history
  • Loading branch information
vincentchabannes committed Jul 2, 2020
1 parent cdf8da7 commit 01bfdb4
Show file tree
Hide file tree
Showing 2 changed files with 56 additions and 13 deletions.
24 changes: 21 additions & 3 deletions feelpp/feel/feeldiscr/meshimpl.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -2430,15 +2430,33 @@ void Mesh<Shape, T, Tag, IndexT>::updateEntitiesCoDimensionGhostCellByUsingNonBl
int nbRequest = 2 * neighborSubdomains;
mpi::request* reqs = new mpi::request[nbRequest];
int cptRequest = 0;

// get size of data to transfer
std::map<rank_type,size_type> sizeRecv;
for ( rank_type neighborRank : this->neighborSubdomains() )
{
reqs[cptRequest++] = MeshBase<IndexT>::worldComm().localComm().isend( neighborRank, 0, (size_type)dataToSend[neighborRank].size() );
reqs[cptRequest++] = MeshBase<IndexT>::worldComm().localComm().irecv( neighborRank, 0, sizeRecv[neighborRank] );
}
// wait all requests
mpi::wait_all( reqs, reqs + cptRequest );

// first send/recv
cptRequest = 0;
for ( rank_type neighborRank : this->neighborSubdomains() )
{
reqs[cptRequest++] = MeshBase<IndexT>::worldComm().localComm().isend( neighborRank, 0, dataToSend[neighborRank] );
reqs[cptRequest++] = MeshBase<IndexT>::worldComm().localComm().irecv( neighborRank, 0, dataToRecv[neighborRank] );
int nSendData = dataToSend[neighborRank].size();
if ( nSendData > 0 )
reqs[cptRequest++] = MeshBase<IndexT>::worldComm().localComm().isend( neighborRank, 2, &(dataToSend[neighborRank][0]), nSendData );

int nRecvData = sizeRecv[neighborRank];
dataToRecv[neighborRank].resize( nRecvData );
if ( nRecvData > 0 )
reqs[cptRequest++] = MeshBase<IndexT>::worldComm().localComm().irecv( neighborRank, 2, &(dataToRecv[neighborRank][0]), nRecvData );
}
//------------------------------------------------------------------------------------------------//
// wait all requests
mpi::wait_all( reqs, reqs + nbRequest );
mpi::wait_all( reqs, reqs + cptRequest );
//------------------------------------------------------------------------------------------------//
// build the container to ReSend
std::map<rank_type, std::vector<boost::tuple<resultghost_point_type, resultghost_edge_type, resultghost_face_type>>> dataToReSend;
Expand Down
45 changes: 35 additions & 10 deletions feelpp/feel/feelfilters/importergmsh.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -2961,14 +2961,34 @@ ImporterGmsh<MeshType>::updateGhostCellInfo( mesh_type* mesh, std::map<int,int>

mpi::request * reqs = new mpi::request[nbRequest];
int cptRequest=0;

for ( auto const& [proc,thedata] : dataToSend )
{
int nSendData = thedata.size();
reqs[cptRequest++] = this->worldComm().localComm().isend( proc , 0, nSendData );
}

std::map<rank_type,size_type> sizeRecv;
for ( rank_type proc=0; proc<nProc; ++proc )
{
if ( nbMsgToRecv[proc] > 0 )
{
reqs[cptRequest++] = this->worldComm().localComm().irecv( proc , 0, sizeRecv[proc] );
}
}
// wait all requests
mpi::wait_all(reqs, reqs + cptRequest);

//-----------------------------------------------------------//
cptRequest=0;
// first send
auto itDataToSend = dataToSend.begin();
auto const enDataToSend = dataToSend.end();
for ( ; itDataToSend!=enDataToSend ; ++itDataToSend )
{
reqs[cptRequest] = this->worldComm().localComm().isend( itDataToSend->first , 0, itDataToSend->second );
++cptRequest;
int nSendData = itDataToSend->second.size();
if ( nSendData > 0 )
reqs[cptRequest++] = this->worldComm().localComm().isend( itDataToSend->first , 0, &(itDataToSend->second[0]), nSendData );
}
//-----------------------------------------------------------//
// first recv
Expand All @@ -2977,13 +2997,15 @@ ImporterGmsh<MeshType>::updateGhostCellInfo( mesh_type* mesh, std::map<int,int>
{
if ( nbMsgToRecv[proc] > 0 )
{
reqs[cptRequest] = this->worldComm().localComm().irecv( proc , 0, dataToRecv[proc] );
++cptRequest;
int nRecvData = sizeRecv[proc];
dataToRecv[proc].resize( nRecvData );
if ( nRecvData > 0 )
reqs[cptRequest++] = this->worldComm().localComm().irecv( proc , 0, &(dataToRecv[proc][0]), nRecvData );
}
}
//-----------------------------------------------------------//
// wait all requests
mpi::wait_all(reqs, reqs + nbRequest);
mpi::wait_all(reqs, reqs + cptRequest);
//-----------------------------------------------------------//
// build the container to ReSend
std::map<rank_type, std::vector<int> > dataToReSend;
Expand All @@ -3005,8 +3027,9 @@ ImporterGmsh<MeshType>::updateGhostCellInfo( mesh_type* mesh, std::map<int,int>
auto const enDataToReSend = dataToReSend.end();
for ( ; itDataToReSend!=enDataToReSend ; ++itDataToReSend )
{
reqs[cptRequest] = this->worldComm().localComm().isend( itDataToReSend->first , 0, itDataToReSend->second );
++cptRequest;
int nSendData = itDataToReSend->second.size();
if ( nSendData > 0 )
reqs[cptRequest++] = this->worldComm().localComm().isend( itDataToReSend->first , 0, &(itDataToReSend->second[0]), nSendData );
}
//-----------------------------------------------------------//
// recv the initial request
Expand All @@ -3015,12 +3038,14 @@ ImporterGmsh<MeshType>::updateGhostCellInfo( mesh_type* mesh, std::map<int,int>
for ( ; itDataToSend!=enDataToSend ; ++itDataToSend )
{
const rank_type idProc = itDataToSend->first;
reqs[cptRequest] = this->worldComm().localComm().irecv( idProc, 0, finalDataToRecv[idProc] );
++cptRequest;
int nRecvData = itDataToSend->second.size();
finalDataToRecv[idProc].resize( nRecvData );
if ( nRecvData > 0 )
reqs[cptRequest++] = this->worldComm().localComm().irecv( idProc, 0, &(finalDataToRecv[idProc][0]), nRecvData );
}
//-----------------------------------------------------------//
// wait all requests
mpi::wait_all(reqs, reqs + nbRequest);
mpi::wait_all(reqs, reqs + cptRequest);
// delete reqs because finish comm
delete [] reqs;
//-----------------------------------------------------------//
Expand Down

0 comments on commit 01bfdb4

Please sign in to comment.