Permalink
Browse files

Fixed bug in bvals_mhd with MPI: added wait for non-blocking sends to…

… check

that they complete before proceeding -- caused errors with mutliple MPI blocks 
in more than 1D.  Also extended VL integrators to use new lr_states API 
(needed with cylindrical coords).
  • Loading branch information...
jmstone
jmstone committed Feb 27, 2010
1 parent a25e5fa commit 4ca16151ab879c06cf4a68cdf4506c0706e3b269
Showing with 81 additions and 43 deletions.
  1. +0 −1 Makefile.in
  2. +75 −36 src/bvals_mhd.c
  3. +1 −1 src/integrators/integrate_1d_vl.c
  4. +2 −2 src/integrators/integrate_2d_vl.c
  5. +3 −3 src/integrators/integrate_3d_vl.c
View
@@ -1,4 +1,3 @@
include Makefile.config
# @WARNING1@
# @WARNING2@
#===============================================================================
View
@@ -156,20 +156,23 @@ void bvals_mhd(DomainS *pD)
if (pGrid->rx1_id >= 0 && pGrid->lx1_id >= 0) {
/* Post non-blocking receives for data from L and R Grids */
ierr = MPI_Irecv(recv_buf[0], cnt, MPI_DOUBLE, pGrid->lx1_id, LtoR_tag,
ierr = MPI_Irecv(&(recv_buf[0][0]),cnt,MPI_DOUBLE,pGrid->lx1_id,LtoR_tag,
pD->Comm_Domain, &(recv_rq[0]));
ierr = MPI_Irecv(recv_buf[1], cnt, MPI_DOUBLE, pGrid->rx1_id, RtoL_tag,
ierr = MPI_Irecv(&(recv_buf[1][0]),cnt,MPI_DOUBLE,pGrid->rx1_id,RtoL_tag,
pD->Comm_Domain, &(recv_rq[1]));
/* pack and send data L and R */
pack_ix1(pGrid);
ierr = MPI_Isend(send_buf[0], cnt, MPI_DOUBLE, pGrid->lx1_id, RtoL_tag,
ierr = MPI_Isend(&(send_buf[0][0]),cnt,MPI_DOUBLE,pGrid->lx1_id,RtoL_tag,
pD->Comm_Domain, &(send_rq[0]));
pack_ox1(pGrid);
ierr = MPI_Isend(send_buf[1], cnt, MPI_DOUBLE, pGrid->rx1_id, LtoR_tag,
ierr = MPI_Isend(&(send_buf[1][0]),cnt,MPI_DOUBLE,pGrid->rx1_id,LtoR_tag,
pD->Comm_Domain, &(send_rq[1]));
/* check non-blocking sends have completed. */
ierr = MPI_Waitall(2, send_rq, MPI_STATUS_IGNORE);
/* check non-blocking receives and unpack data in any order. */
ierr = MPI_Waitany(2,recv_rq,&mIndex,MPI_STATUS_IGNORE);
if (mIndex == 0) unpack_ix1(pGrid);
@@ -184,17 +187,20 @@ void bvals_mhd(DomainS *pD)
if (pGrid->rx1_id >= 0 && pGrid->lx1_id < 0) {
/* Post non-blocking receive for data from R Grid */
ierr = MPI_Irecv(recv_buf[1], cnt, MPI_DOUBLE, pGrid->rx1_id, RtoL_tag,
ierr = MPI_Irecv(&(recv_buf[1][0]),cnt,MPI_DOUBLE,pGrid->rx1_id,RtoL_tag,
pD->Comm_Domain, &(recv_rq[1]));
/* pack and send data R */
pack_ox1(pGrid);
ierr = MPI_Isend(send_buf[1], cnt, MPI_DOUBLE, pGrid->rx1_id, LtoR_tag,
ierr = MPI_Isend(&(send_buf[1][0]),cnt,MPI_DOUBLE,pGrid->rx1_id,LtoR_tag,
pD->Comm_Domain, &(send_rq[1]));
/* set physical boundary */
(*(pD->ix1_BCFun))(pGrid);
/* check non-blocking send has completed. */
ierr = MPI_Wait(&(send_rq[1]), MPI_STATUS_IGNORE);
/* wait on non-blocking receive from R and unpack data */
ierr = MPI_Wait(&(recv_rq[1]), MPI_STATUS_IGNORE);
unpack_ox1(pGrid);
@@ -205,17 +211,20 @@ void bvals_mhd(DomainS *pD)
if (pGrid->rx1_id < 0 && pGrid->lx1_id >= 0) {
/* Post non-blocking receive for data from L grid */
ierr = MPI_Irecv(recv_buf[0], cnt, MPI_DOUBLE, pGrid->lx1_id, LtoR_tag,
ierr = MPI_Irecv(&(recv_buf[0][0]),cnt,MPI_DOUBLE,pGrid->lx1_id,LtoR_tag,
pD->Comm_Domain, &(recv_rq[0]));
/* pack and send data L */
pack_ix1(pGrid);
ierr = MPI_Isend(send_buf[0], cnt, MPI_DOUBLE, pGrid->lx1_id, RtoL_tag,
ierr = MPI_Isend(&(send_buf[0][0]),cnt,MPI_DOUBLE,pGrid->lx1_id,RtoL_tag,
pD->Comm_Domain, &(send_rq[0]));
/* set physical boundary */
(*(pD->ox1_BCFun))(pGrid);
/* check non-blocking send has completed. */
ierr = MPI_Wait(&(send_rq[0]), MPI_STATUS_IGNORE);
/* wait on non-blocking receive from L and unpack data */
ierr = MPI_Wait(&(recv_rq[0]), MPI_STATUS_IGNORE);
unpack_ix1(pGrid);
@@ -249,20 +258,23 @@ void bvals_mhd(DomainS *pD)
if (pGrid->rx2_id >= 0 && pGrid->lx2_id >= 0) {
/* Post non-blocking receives for data from L and R Grids */
ierr = MPI_Irecv(recv_buf[0], cnt, MPI_DOUBLE, pGrid->lx2_id, LtoR_tag,
ierr = MPI_Irecv(&(recv_buf[0][0]),cnt,MPI_DOUBLE,pGrid->lx2_id,LtoR_tag,
pD->Comm_Domain, &(recv_rq[0]));
ierr = MPI_Irecv(recv_buf[1], cnt, MPI_DOUBLE, pGrid->rx2_id, RtoL_tag,
ierr = MPI_Irecv(&(recv_buf[1][0]),cnt,MPI_DOUBLE,pGrid->rx2_id,RtoL_tag,
pD->Comm_Domain, &(recv_rq[1]));
/* pack and send data L and R */
pack_ix2(pGrid);
ierr = MPI_Isend(send_buf[0], cnt, MPI_DOUBLE, pGrid->lx2_id, RtoL_tag,
ierr = MPI_Isend(&(send_buf[0][0]),cnt,MPI_DOUBLE,pGrid->lx2_id,RtoL_tag,
pD->Comm_Domain, &(send_rq[0]));
pack_ox2(pGrid);
ierr = MPI_Isend(send_buf[1], cnt, MPI_DOUBLE, pGrid->rx2_id, LtoR_tag,
ierr = MPI_Isend(&(send_buf[1][0]),cnt,MPI_DOUBLE,pGrid->rx2_id,LtoR_tag,
pD->Comm_Domain, &(send_rq[1]));
/* check non-blocking sends have completed. */
ierr = MPI_Waitall(2, send_rq, MPI_STATUS_IGNORE);
/* check non-blocking receives and unpack data in any order. */
ierr = MPI_Waitany(2,recv_rq,&mIndex,MPI_STATUS_IGNORE);
if (mIndex == 0) unpack_ix2(pGrid);
@@ -277,17 +289,20 @@ void bvals_mhd(DomainS *pD)
if (pGrid->rx2_id >= 0 && pGrid->lx2_id < 0) {
/* Post non-blocking receive for data from R Grid */
ierr = MPI_Irecv(recv_buf[1], cnt, MPI_DOUBLE, pGrid->rx2_id, RtoL_tag,
ierr = MPI_Irecv(&(recv_buf[1][0]),cnt,MPI_DOUBLE,pGrid->rx2_id,RtoL_tag,
pD->Comm_Domain, &(recv_rq[1]));
/* pack and send data R */
pack_ox2(pGrid);
ierr = MPI_Isend(send_buf[1], cnt, MPI_DOUBLE, pGrid->rx2_id, LtoR_tag,
ierr = MPI_Isend(&(send_buf[1][0]),cnt,MPI_DOUBLE,pGrid->rx2_id,LtoR_tag,
pD->Comm_Domain, &(send_rq[1]));
/* set physical boundary */
(*(pD->ix2_BCFun))(pGrid);
/* check non-blocking send has completed. */
ierr = MPI_Wait(&(send_rq[1]), MPI_STATUS_IGNORE);
/* wait on non-blocking receive from R and unpack data */
ierr = MPI_Wait(&(recv_rq[1]), MPI_STATUS_IGNORE);
unpack_ox2(pGrid);
@@ -298,17 +313,20 @@ void bvals_mhd(DomainS *pD)
if (pGrid->rx2_id < 0 && pGrid->lx2_id >= 0) {
/* Post non-blocking receive for data from L grid */
ierr = MPI_Irecv(recv_buf[0], cnt, MPI_DOUBLE, pGrid->lx2_id, LtoR_tag,
ierr = MPI_Irecv(&(recv_buf[0][0]),cnt,MPI_DOUBLE,pGrid->lx2_id,LtoR_tag,
pD->Comm_Domain, &(recv_rq[0]));
/* pack and send data L */
pack_ix2(pGrid);
ierr = MPI_Isend(send_buf[0], cnt, MPI_DOUBLE, pGrid->lx2_id, RtoL_tag,
ierr = MPI_Isend(&(send_buf[0][0]),cnt,MPI_DOUBLE,pGrid->lx2_id,RtoL_tag,
pD->Comm_Domain, &(send_rq[0]));
/* set physical boundary */
(*(pD->ox2_BCFun))(pGrid);
/* check non-blocking send has completed. */
ierr = MPI_Wait(&(send_rq[0]), MPI_STATUS_IGNORE);
/* wait on non-blocking receive from L and unpack data */
ierr = MPI_Wait(&(recv_rq[0]), MPI_STATUS_IGNORE);
unpack_ix2(pGrid);
@@ -352,20 +370,23 @@ void bvals_mhd(DomainS *pD)
if (pGrid->rx3_id >= 0 && pGrid->lx3_id >= 0) {
/* Post non-blocking receives for data from L and R Grids */
ierr = MPI_Irecv(recv_buf[0], cnt, MPI_DOUBLE, pGrid->lx3_id, LtoR_tag,
ierr = MPI_Irecv(&(recv_buf[0][0]),cnt,MPI_DOUBLE,pGrid->lx3_id,LtoR_tag,
pD->Comm_Domain, &(recv_rq[0]));
ierr = MPI_Irecv(recv_buf[1], cnt, MPI_DOUBLE, pGrid->rx3_id, RtoL_tag,
ierr = MPI_Irecv(&(recv_buf[1][0]),cnt,MPI_DOUBLE,pGrid->rx3_id,RtoL_tag,
pD->Comm_Domain, &(recv_rq[1]));
/* pack and send data L and R */
pack_ix3(pGrid);
ierr = MPI_Isend(send_buf[0], cnt, MPI_DOUBLE, pGrid->lx3_id, RtoL_tag,
ierr = MPI_Isend(&(send_buf[0][0]),cnt,MPI_DOUBLE,pGrid->lx3_id,RtoL_tag,
pD->Comm_Domain, &(send_rq[0]));
pack_ox3(pGrid);
ierr = MPI_Isend(send_buf[1], cnt, MPI_DOUBLE, pGrid->rx3_id, LtoR_tag,
ierr = MPI_Isend(&(send_buf[1][0]),cnt,MPI_DOUBLE,pGrid->rx3_id,LtoR_tag,
pD->Comm_Domain, &(send_rq[1]));
/* check non-blocking sends have completed. */
ierr = MPI_Waitall(2, send_rq, MPI_STATUS_IGNORE);
/* check non-blocking receives and unpack data in any order. */
ierr = MPI_Waitany(2,recv_rq,&mIndex,MPI_STATUS_IGNORE);
if (mIndex == 0) unpack_ix3(pGrid);
@@ -380,17 +401,20 @@ void bvals_mhd(DomainS *pD)
if (pGrid->rx3_id >= 0 && pGrid->lx3_id < 0) {
/* Post non-blocking receive for data from R Grid */
ierr = MPI_Irecv(recv_buf[1], cnt, MPI_DOUBLE, pGrid->rx3_id, RtoL_tag,
ierr = MPI_Irecv(&(recv_buf[1][0]),cnt,MPI_DOUBLE,pGrid->rx3_id,RtoL_tag,
pD->Comm_Domain, &(recv_rq[1]));
/* pack and send data R */
pack_ox3(pGrid);
ierr = MPI_Isend(send_buf[1], cnt, MPI_DOUBLE, pGrid->rx3_id, LtoR_tag,
ierr = MPI_Isend(&(send_buf[1][0]),cnt,MPI_DOUBLE,pGrid->rx3_id,LtoR_tag,
pD->Comm_Domain, &(send_rq[1]));
/* set physical boundary */
(*(pD->ix3_BCFun))(pGrid);
/* check non-blocking send has completed. */
ierr = MPI_Wait(&(send_rq[1]), MPI_STATUS_IGNORE);
/* wait on non-blocking receive from R and unpack data */
ierr = MPI_Wait(&(recv_rq[1]), MPI_STATUS_IGNORE);
unpack_ox3(pGrid);
@@ -401,17 +425,20 @@ void bvals_mhd(DomainS *pD)
if (pGrid->rx3_id < 0 && pGrid->lx3_id >= 0) {
/* Post non-blocking receive for data from L grid */
ierr = MPI_Irecv(recv_buf[0], cnt, MPI_DOUBLE, pGrid->lx3_id, LtoR_tag,
ierr = MPI_Irecv(&(recv_buf[0][0]),cnt,MPI_DOUBLE,pGrid->lx3_id,LtoR_tag,
pD->Comm_Domain, &(recv_rq[0]));
/* pack and send data L */
pack_ix3(pGrid);
ierr = MPI_Isend(send_buf[0], cnt, MPI_DOUBLE, pGrid->lx3_id, RtoL_tag,
ierr = MPI_Isend(&(send_buf[0][0]),cnt,MPI_DOUBLE,pGrid->lx3_id,RtoL_tag,
pD->Comm_Domain, &(send_rq[0]));
/* set physical boundary */
(*(pD->ox3_BCFun))(pGrid);
/* check non-blocking send has completed. */
ierr = MPI_Wait(&(send_rq[0]), MPI_STATUS_IGNORE);
/* wait on non-blocking receive from L and unpack data */
ierr = MPI_Wait(&(recv_rq[0]), MPI_STATUS_IGNORE);
unpack_ix3(pGrid);
@@ -2219,7 +2246,8 @@ static void pack_ix1(GridS *pG)
#if (NSCALARS > 0)
int n;
#endif
double *pSnd = send_buf[0];
double *pSnd;
pSnd = (double*)&(send_buf[0][0]);
for (k=ks; k<=ke; k++){
for (j=js; j<=je; j++){
@@ -2290,7 +2318,8 @@ static void pack_ox1(GridS *pG)
#if (NSCALARS > 0)
int n;
#endif
double *pSnd = send_buf[1];
double *pSnd;
pSnd = (double*)&(send_buf[1][0]);
for (k=ks; k<=ke; k++){
for (j=js; j<=je; j++){
@@ -2360,7 +2389,8 @@ static void pack_ix2(GridS *pG)
#if (NSCALARS > 0)
int n;
#endif
double *pSnd = send_buf[0];
double *pSnd;
pSnd = (double*)&(send_buf[0][0]);
for (k=ks; k<=ke; k++) {
for (j=js; j<=js+(nghost-1); j++) {
@@ -2431,7 +2461,8 @@ static void pack_ox2(GridS *pG)
#if (NSCALARS > 0)
int n;
#endif
double *pSnd = send_buf[1];
double *pSnd;
pSnd = (double*)&(send_buf[1][0]);
for (k=ks; k<=ke; k++){
for (j=je-(nghost-1); j<=je; j++){
@@ -2499,7 +2530,8 @@ static void pack_ix3(GridS *pG)
#if (NSCALARS > 0)
int n;
#endif
double *pSnd = send_buf[0];
double *pSnd;
pSnd = (double*)&(send_buf[0][0]);
for (k=ks; k<=ks+(nghost-1); k++) {
for (j=js-nghost; j<=je+nghost; j++) {
@@ -2567,7 +2599,8 @@ static void pack_ox3(GridS *pG)
#if (NSCALARS > 0)
int n;
#endif
double *pSnd = send_buf[1];
double *pSnd;
pSnd = (double*)&(send_buf[1][0]);
for (k=ke-(nghost-1); k<=ke; k++) {
for (j=js-nghost; j<=je+nghost; j++) {
@@ -2638,7 +2671,8 @@ static void unpack_ix1(GridS *pG)
#if (NSCALARS > 0)
int n;
#endif
double *pRcv = recv_buf[0];
double *pRcv;
pRcv = (double*)&(recv_buf[0][0]);
for (k=ks; k<=ke; k++){
for (j=js; j<=je; j++){
@@ -2709,7 +2743,8 @@ static void unpack_ox1(GridS *pG)
#if (NSCALARS > 0)
int n;
#endif
double *pRcv = recv_buf[1];
double *pRcv;
pRcv = (double*)&(recv_buf[1][0]);
for (k=ks; k<=ke; k++) {
for (j=js; j<=je; j++) {
@@ -2780,7 +2815,8 @@ static void unpack_ix2(GridS *pG)
#if (NSCALARS > 0)
int n;
#endif
double *pRcv = recv_buf[0];
double *pRcv;
pRcv = (double*)&(recv_buf[0][0]);
for (k=ks; k<=ke; k++) {
for (j=js-nghost; j<=js-1; j++) {
@@ -2851,7 +2887,8 @@ static void unpack_ox2(GridS *pG)
#if (NSCALARS > 0)
int n;
#endif
double *pRcv = recv_buf[1];
double *pRcv;
pRcv = (double*)&(recv_buf[1][0]);
for (k=ks; k<=ke; k++) {
for (j=je+1; j<=je+nghost; j++) {
@@ -2919,7 +2956,8 @@ static void unpack_ix3(GridS *pG)
#if (NSCALARS > 0)
int n;
#endif
double *pRcv = recv_buf[0];
double *pRcv;
pRcv = (double*)&(recv_buf[0][0]);
for (k=ks-nghost; k<=ks-1; k++) {
for (j=js-nghost; j<=je+nghost; j++) {
@@ -2987,7 +3025,8 @@ static void unpack_ox3(GridS *pG)
#if (NSCALARS > 0)
int n;
#endif
double *pRcv = recv_buf[1];
double *pRcv;
pRcv = (double*)&(recv_buf[1][0]);
for (k=ke+1; k<=ke+nghost; k++) {
for (j=js-nghost; j<=je+nghost; j++) {
@@ -221,7 +221,7 @@ void integrate_1d_vl(DomainS *pD)
W[i] = Cons1D_to_Prim1D(&U1d[i],&Bxc[i]);
}
lr_states(W,Bxc,0.0,is,ie,Wl,Wr);
lr_states(pG,W,Bxc,pG->dt,pG->dx1,is,ie,Wl,Wr,cart_x1);
for (i=is; i<=ie+1; i++) {
Wl_x1Face[i] = Wl[i];
@@ -443,7 +443,7 @@ void integrate_2d_vl(DomainS *pD)
W1d[i] = Cons1D_to_Prim1D(&U1d[i],&Bxc[i]);
}
lr_states(W1d,Bxc,0.0,is,ie,Wl,Wr);
lr_states(pG,W1d,Bxc,pG->dt,pG->dx1,is,ie,Wl,Wr,cart_x1);
for (i=is; i<=ie+1; i++) {
Wl_x1Face[j][i] = Wl[i];
@@ -486,7 +486,7 @@ void integrate_2d_vl(DomainS *pD)
W1d[j] = Cons1D_to_Prim1D(&U1d[j],&Bxc[j]);
}
lr_states(W1d,Bxc,0.0,js,je,Wl,Wr);
lr_states(pG,W1d,Bxc,pG->dt,pG->dx2,js,je,Wl,Wr,cart_x2);
for (j=js; j<=je+1; j++) {
Wl_x2Face[j][i] = Wl[j];
Oops, something went wrong.

0 comments on commit 4ca1615

Please sign in to comment.