• Main Page
  • Classes
  • Files
  • File List
  • File Members

bvals_mhd.c

Go to the documentation of this file.
00001 #include "copyright.h"
00002 /*============================================================================*/
00003 /*! \file bvals_mhd.c
00004  *  \brief Sets boundary conditions (quantities in ghost zones) on each edge
00005  *   of a Grid for the MHD variables.
00006  *
00007  * PURPOSE: Sets boundary conditions (quantities in ghost zones) on each edge
00008  *   of a Grid for the MHD variables.  Each edge of a Grid represents either:
00009  *  - (1) a physical boundary at the edge of the Mesh; in which case BCs are
00010  *        specified by an integer flag input by user (or by user-defined BC
00011  *        function in the problem file)
00012  *  - (2) the boundary between Grids resulting from decomposition of a larger
00013  *        Domain using MPI; in which case BCs are handled by MPI calls
00014  *  - (3) an internal boundary between fine/coarse grid levels in a nested Mesh;
00015  *        in which case the BCs require a prolongation operator from the parent
00016  *
00017  *   This file contains functions that can handle the first two cases.  Case (3)
00018  *   is handled in the Prolongate function called from main loop.
00019  *   The naming convention of the integer flags for BCs is:
00020  *    -  bc_ix1 = Boundary Condition for Inner x1 (at i=is)
00021  *    -  bc_ox1 = Boundary Condition for Outer x1 (at i=ie)
00022  *   similarly for bc_ix2; bc_ox2; bc_ix3; bc_ox3
00023  *
00024  * For case (1) -- PHYSICAL BOUNDARIES
00025  *   The values of the integer flags (bc_ix1, etc.) are:
00026  *   - 1 = reflecting; 2 = outflow; 4 = periodic; 5 = conductor
00027  *   For flow-in bondaries (ghost zones set to pre-determined values), pointers
00028  *   to user-defined functions in the problem file are used. 
00029  *
00030  * For case (2) -- MPI BOUNDARIES
00031  *   We do the parallel synchronization by having every grid:
00032  *   - 1) Post non-blocking receives for data from both L and R Grids
00033  *   - 2) Pack and send data to the Grids on both L and R
00034  *   - 3) Check for receives and unpack data in order of first to finish
00035  *   If the Grid is at the edge of the Domain, we set BCs as in case (1) or (3).
00036  *
00037  * For case (3) -- INTERNAL GRID LEVEL BOUNDARIES
00038  *   This step is complicated and must be handled separately, in the function
00039  *   Prolongate() called from the main loop.  In the algorithm below, nothing is
00040  *   done for this case; the BCs are left to be set later.
00041  *
00042  * With SELF-GRAVITY: BCs for Phi are set independently of the MHD variables
00043  *   in a separate function bvals_grav(). 
00044  *
00045  * CONTAINS PUBLIC FUNCTIONS: 
00046  * - bvals_mhd()      - calls appropriate functions to set ghost cells
00047  * - bvals_mhd_init() - sets function pointers used by bvals_mhd()
00048  * - bvals_mhd_fun()  - enrolls a pointer to a user-defined BC function
00049  *
00050  * PRIVATE FUNCTION PROTOTYPES:
00051  * - reflect_ix1()  - reflecting BCs at boundary ix1
00052  * - reflect_ox1()  - reflecting BCs at boundary ox1
00053  * - reflect_ix2()  - reflecting BCs at boundary ix2
00054  * - reflect_ox2()  - reflecting BCs at boundary ox2
00055  * - reflect_ix3()  - reflecting BCs at boundary ix3
00056  * - reflect_ox3()  - reflecting BCs at boundary ox3
00057  * - outflow_ix1()  - outflow BCs at boundary ix1
00058  * - outflow_ox1()  - outflow BCs at boundary ox1
00059  * - outflow_ix2()  - outflow BCs at boundary ix2
00060  * - outflow_ox2()  - outflow BCs at boundary ox2
00061  * - outflow_ix3()  - outflow BCs at boundary ix3
00062  * - outflow_ox3()  - outflow BCs at boundary ox3
00063  * - periodic_ix1() - periodic BCs at boundary ix1
00064  * - periodic_ox1() - periodic BCs at boundary ox1
00065  * - periodic_ix2() - periodic BCs at boundary ix2
00066  * - periodic_ox2() - periodic BCs at boundary ox2
00067  * - periodic_ix3() - periodic BCs at boundary ix3
00068  * - periodic_ox3() - periodic BCs at boundary ox3
00069  * - conduct_ix1()  - conducting BCs at boundary ix1
00070  * - conduct_ox1()  - conducting BCs at boundary ox1
00071  * - conduct_ix2()  - conducting BCs at boundary ix2
00072  * - conduct_ox2()  - conducting BCs at boundary ox2
00073  * - conduct_ix3()  - conducting BCs at boundary ix3
00074  * - conduct_ox3()  - conducting BCs at boundary ox3
00075  * - pack_ix1()     - pack data for MPI non-blocking send at ix1 boundary
00076  * - pack_ox1()     - pack data for MPI non-blocking send at ox1 boundary
00077  * - pack_ix2()     - pack data for MPI non-blocking send at ix2 boundary
00078  * - pack_ox2()     - pack data for MPI non-blocking send at ox2 boundary
00079  * - pack_ix3()     - pack data for MPI non-blocking send at ix3 boundary
00080  * - pack_ox3()     - pack data for MPI non-blocking send at ox3 boundary
00081  * - unpack_ix1()   - unpack data for MPI non-blocking receive at ix1 boundary
00082  * - unpack_ox1()   - unpack data for MPI non-blocking receive at ox1 boundary
00083  * - unpack_ix2()   - unpack data for MPI non-blocking receive at ix2 boundary
00084  * - unpack_ox2()   - unpack data for MPI non-blocking receive at ox2 boundary
00085  * - unpack_ix3()   - unpack data for MPI non-blocking receive at ix3 boundary
00086  * - unpack_ox3()   - unpack data for MPI non-blocking receive at ox3 boundary*/
00087 /*============================================================================*/
00088 
00089 #include <stdio.h>
00090 #include <stdlib.h>
00091 #include "defs.h"
00092 #include "athena.h"
00093 #include "globals.h"
00094 #include "prototypes.h"
00095 
00096 #ifdef MPI_PARALLEL
00097 /* MPI send and receive buffers */
00098 static double **send_buf = NULL, **recv_buf = NULL;
00099 static MPI_Request *recv_rq, *send_rq;
00100 #endif /* MPI_PARALLEL */
00101 
00102 /*==============================================================================
00103  * PRIVATE FUNCTION PROTOTYPES:
00104  *   reflect_???()  - reflecting BCs at boundary ???
00105  *   outflow_???()  - outflow BCs at boundary ???
00106  *   periodic_???() - periodic BCs at boundary ???
00107  *   conduct_???()  - conducting BCs at boundary ???
00108  *   pack_???()     - pack data for MPI non-blocking send at ??? boundary
00109  *   unpack_???()   - unpack data for MPI non-blocking receive at ??? boundary
00110  *============================================================================*/
00111 
00112 static void reflect_ix1(GridS *pG);
00113 static void reflect_ox1(GridS *pG);
00114 static void reflect_ix2(GridS *pG);
00115 static void reflect_ox2(GridS *pG);
00116 static void reflect_ix3(GridS *pG);
00117 static void reflect_ox3(GridS *pG);
00118 
00119 static void outflow_ix1(GridS *pG);
00120 static void outflow_ox1(GridS *pG);
00121 static void outflow_ix2(GridS *pG);
00122 static void outflow_ox2(GridS *pG);
00123 static void outflow_ix3(GridS *pG);
00124 static void outflow_ox3(GridS *pG);
00125 
00126 static void periodic_ix1(GridS *pG);
00127 static void periodic_ox1(GridS *pG);
00128 static void periodic_ix2(GridS *pG);
00129 static void periodic_ox2(GridS *pG);
00130 static void periodic_ix3(GridS *pG);
00131 static void periodic_ox3(GridS *pG);
00132 
00133 static void conduct_ix1(GridS *pG);
00134 static void conduct_ox1(GridS *pG);
00135 static void conduct_ix2(GridS *pG);
00136 static void conduct_ox2(GridS *pG);
00137 static void conduct_ix3(GridS *pG);
00138 static void conduct_ox3(GridS *pG);
00139 
00140 static void ProlongateLater(GridS *pG);
00141 
00142 #ifdef MPI_PARALLEL
00143 static void pack_ix1(GridS *pG);
00144 static void pack_ox1(GridS *pG);
00145 static void pack_ix2(GridS *pG);
00146 static void pack_ox2(GridS *pG);
00147 static void pack_ix3(GridS *pG);
00148 static void pack_ox3(GridS *pG);
00149 
00150 static void unpack_ix1(GridS *pG);
00151 static void unpack_ox1(GridS *pG);
00152 static void unpack_ix2(GridS *pG);
00153 static void unpack_ox2(GridS *pG);
00154 static void unpack_ix3(GridS *pG);
00155 static void unpack_ox3(GridS *pG);
00156 #endif /* MPI_PARALLEL */
00157 
00158 /*=========================== PUBLIC FUNCTIONS ===============================*/
00159 
00160 /*----------------------------------------------------------------------------*/
00161 /*! \fn void bvals_mhd(DomainS *pD)
00162  *  \brief Calls appropriate functions to set ghost zones.  
00163  *
00164  *   The function
00165  *   pointers (*(pD->???_BCFun)) are set by bvals_init() to be either a
00166  *   user-defined function, or one of the functions corresponding to reflecting,
00167  *   periodic, or outflow.  If the left- or right-Grid ID numbers are >= 1
00168  *   (neighboring grids exist), then MPI calls are used.
00169  *
00170  * Order for updating boundary conditions must always be x1-x2-x3 in order to
00171  * fill the corner cells properly
00172  */
00173 
00174 void bvals_mhd(DomainS *pD)
00175 {
00176   GridS *pGrid = (pD->Grid);
00177 #ifdef SHEARING_BOX
00178   int myL,myM,myN,BCFlag;
00179 #endif
00180 #ifdef MPI_PARALLEL
00181   int cnt, cnt2, cnt3, ierr, mIndex;
00182 #endif /* MPI_PARALLEL */
00183 
00184 /*--- Step 1. ------------------------------------------------------------------
00185  * Boundary Conditions in x1-direction */
00186 
00187   if (pGrid->Nx[0] > 1){
00188 
00189 #ifdef MPI_PARALLEL
00190     cnt = nghost*(pGrid->Nx[1])*(pGrid->Nx[2])*(NVAR);
00191 #ifdef MHD
00192     cnt2 = (pGrid->Nx[1] > 1) ? (pGrid->Nx[1] + 1) : 1;
00193     cnt3 = (pGrid->Nx[2] > 1) ? (pGrid->Nx[2] + 1) : 1;
00194     cnt += (nghost-1)*(pGrid->Nx[1])*(pGrid->Nx[2]);
00195     cnt += nghost*cnt2*(pGrid->Nx[2]);
00196     cnt += nghost*(pGrid->Nx[1])*cnt3;
00197 #endif
00198 
00199 /* MPI blocks to both left and right */
00200     if (pGrid->rx1_id >= 0 && pGrid->lx1_id >= 0) {
00201 
00202       /* Post non-blocking receives for data from L and R Grids */
00203       ierr = MPI_Irecv(&(recv_buf[0][0]),cnt,MPI_DOUBLE,pGrid->lx1_id,LtoR_tag,
00204         pD->Comm_Domain, &(recv_rq[0]));
00205       ierr = MPI_Irecv(&(recv_buf[1][0]),cnt,MPI_DOUBLE,pGrid->rx1_id,RtoL_tag,
00206         pD->Comm_Domain, &(recv_rq[1]));
00207 
00208       /* pack and send data L and R */
00209       pack_ix1(pGrid);
00210       ierr = MPI_Isend(&(send_buf[0][0]),cnt,MPI_DOUBLE,pGrid->lx1_id,RtoL_tag,
00211         pD->Comm_Domain, &(send_rq[0]));
00212 
00213       pack_ox1(pGrid); 
00214       ierr = MPI_Isend(&(send_buf[1][0]),cnt,MPI_DOUBLE,pGrid->rx1_id,LtoR_tag,
00215         pD->Comm_Domain, &(send_rq[1]));
00216 
00217       /* check non-blocking sends have completed. */
00218       ierr = MPI_Waitall(2, send_rq, MPI_STATUS_IGNORE);
00219 
00220       /* check non-blocking receives and unpack data in any order. */
00221       ierr = MPI_Waitany(2,recv_rq,&mIndex,MPI_STATUS_IGNORE);
00222       if (mIndex == 0) unpack_ix1(pGrid);
00223       if (mIndex == 1) unpack_ox1(pGrid);
00224       ierr = MPI_Waitany(2,recv_rq,&mIndex,MPI_STATUS_IGNORE);
00225       if (mIndex == 0) unpack_ix1(pGrid);
00226       if (mIndex == 1) unpack_ox1(pGrid);
00227 
00228     }
00229 
00230 /* Physical boundary on left, MPI block on right */
00231     if (pGrid->rx1_id >= 0 && pGrid->lx1_id < 0) {
00232 
00233       /* Post non-blocking receive for data from R Grid */
00234       ierr = MPI_Irecv(&(recv_buf[1][0]),cnt,MPI_DOUBLE,pGrid->rx1_id,RtoL_tag,
00235         pD->Comm_Domain, &(recv_rq[1]));
00236 
00237       /* pack and send data R */
00238       pack_ox1(pGrid); 
00239       ierr = MPI_Isend(&(send_buf[1][0]),cnt,MPI_DOUBLE,pGrid->rx1_id,LtoR_tag,
00240         pD->Comm_Domain, &(send_rq[1]));
00241 
00242       /* set physical boundary */
00243       (*(pD->ix1_BCFun))(pGrid);
00244 
00245       /* check non-blocking send has completed. */
00246       ierr = MPI_Wait(&(send_rq[1]), MPI_STATUS_IGNORE);
00247 
00248       /* wait on non-blocking receive from R and unpack data */
00249       ierr = MPI_Wait(&(recv_rq[1]), MPI_STATUS_IGNORE);
00250       unpack_ox1(pGrid);
00251 
00252     }
00253 
00254 /* MPI block on left, Physical boundary on right */
00255     if (pGrid->rx1_id < 0 && pGrid->lx1_id >= 0) {
00256 
00257       /* Post non-blocking receive for data from L grid */
00258       ierr = MPI_Irecv(&(recv_buf[0][0]),cnt,MPI_DOUBLE,pGrid->lx1_id,LtoR_tag,
00259         pD->Comm_Domain, &(recv_rq[0]));
00260 
00261       /* pack and send data L */
00262       pack_ix1(pGrid); 
00263       ierr = MPI_Isend(&(send_buf[0][0]),cnt,MPI_DOUBLE,pGrid->lx1_id,RtoL_tag,
00264         pD->Comm_Domain, &(send_rq[0]));
00265 
00266       /* set physical boundary */
00267       (*(pD->ox1_BCFun))(pGrid);
00268 
00269       /* check non-blocking send has completed. */
00270       ierr = MPI_Wait(&(send_rq[0]), MPI_STATUS_IGNORE);
00271 
00272       /* wait on non-blocking receive from L and unpack data */
00273       ierr = MPI_Wait(&(recv_rq[0]), MPI_STATUS_IGNORE);
00274       unpack_ix1(pGrid);
00275 
00276     }
00277 #endif /* MPI_PARALLEL */
00278 
00279 /* Physical boundaries on both left and right */
00280     if (pGrid->rx1_id < 0 && pGrid->lx1_id < 0) {
00281       (*(pD->ix1_BCFun))(pGrid);
00282       (*(pD->ox1_BCFun))(pGrid);
00283     } 
00284 
00285   }
00286 
00287 /*--- Step 2. ------------------------------------------------------------------
00288  * Boundary Conditions in x2-direction */
00289 
00290   if (pGrid->Nx[1] > 1){
00291 
00292 #ifdef MPI_PARALLEL
00293     cnt = (pGrid->Nx[0] + 2*nghost)*nghost*(pGrid->Nx[2])*(NVAR);
00294 #ifdef MHD
00295     cnt3 = (pGrid->Nx[2] > 1) ? (pGrid->Nx[2] + 1) : 1;
00296     cnt += (pGrid->Nx[0] + 2*nghost - 1)*nghost*(pGrid->Nx[2]);
00297     cnt += (pGrid->Nx[0] + 2*nghost)*(nghost-1)*(pGrid->Nx[2]);
00298     cnt += (pGrid->Nx[0] + 2*nghost)*nghost*cnt3;
00299 #endif
00300 
00301 /* MPI blocks to both left and right */
00302     if (pGrid->rx2_id >= 0 && pGrid->lx2_id >= 0) {
00303 
00304       /* Post non-blocking receives for data from L and R Grids */
00305       ierr = MPI_Irecv(&(recv_buf[0][0]),cnt,MPI_DOUBLE,pGrid->lx2_id,LtoR_tag,
00306         pD->Comm_Domain, &(recv_rq[0]));
00307       ierr = MPI_Irecv(&(recv_buf[1][0]),cnt,MPI_DOUBLE,pGrid->rx2_id,RtoL_tag,
00308         pD->Comm_Domain, &(recv_rq[1]));
00309 
00310       /* pack and send data L and R */
00311       pack_ix2(pGrid);
00312       ierr = MPI_Isend(&(send_buf[0][0]),cnt,MPI_DOUBLE,pGrid->lx2_id,RtoL_tag,
00313         pD->Comm_Domain, &(send_rq[0]));
00314 
00315       pack_ox2(pGrid); 
00316       ierr = MPI_Isend(&(send_buf[1][0]),cnt,MPI_DOUBLE,pGrid->rx2_id,LtoR_tag,
00317         pD->Comm_Domain, &(send_rq[1]));
00318 
00319       /* check non-blocking sends have completed. */
00320       ierr = MPI_Waitall(2, send_rq, MPI_STATUS_IGNORE);
00321 
00322       /* check non-blocking receives and unpack data in any order. */
00323       ierr = MPI_Waitany(2,recv_rq,&mIndex,MPI_STATUS_IGNORE);
00324       if (mIndex == 0) unpack_ix2(pGrid);
00325       if (mIndex == 1) unpack_ox2(pGrid);
00326       ierr = MPI_Waitany(2,recv_rq,&mIndex,MPI_STATUS_IGNORE);
00327       if (mIndex == 0) unpack_ix2(pGrid);
00328       if (mIndex == 1) unpack_ox2(pGrid);
00329 
00330     }
00331 
00332 /* Physical boundary on left, MPI block on right */
00333     if (pGrid->rx2_id >= 0 && pGrid->lx2_id < 0) {
00334 
00335       /* Post non-blocking receive for data from R Grid */
00336       ierr = MPI_Irecv(&(recv_buf[1][0]),cnt,MPI_DOUBLE,pGrid->rx2_id,RtoL_tag,
00337         pD->Comm_Domain, &(recv_rq[1]));
00338 
00339       /* pack and send data R */
00340       pack_ox2(pGrid); 
00341       ierr = MPI_Isend(&(send_buf[1][0]),cnt,MPI_DOUBLE,pGrid->rx2_id,LtoR_tag,
00342         pD->Comm_Domain, &(send_rq[1]));
00343 
00344       /* set physical boundary */
00345       (*(pD->ix2_BCFun))(pGrid);
00346 
00347       /* check non-blocking send has completed. */
00348       ierr = MPI_Wait(&(send_rq[1]), MPI_STATUS_IGNORE);
00349 
00350       /* wait on non-blocking receive from R and unpack data */
00351       ierr = MPI_Wait(&(recv_rq[1]), MPI_STATUS_IGNORE);
00352       unpack_ox2(pGrid);
00353 
00354     }
00355 
00356 /* MPI block on left, Physical boundary on right */
00357     if (pGrid->rx2_id < 0 && pGrid->lx2_id >= 0) {
00358 
00359       /* Post non-blocking receive for data from L grid */
00360       ierr = MPI_Irecv(&(recv_buf[0][0]),cnt,MPI_DOUBLE,pGrid->lx2_id,LtoR_tag,
00361         pD->Comm_Domain, &(recv_rq[0]));
00362 
00363       /* pack and send data L */
00364       pack_ix2(pGrid); 
00365       ierr = MPI_Isend(&(send_buf[0][0]),cnt,MPI_DOUBLE,pGrid->lx2_id,RtoL_tag,
00366         pD->Comm_Domain, &(send_rq[0]));
00367 
00368       /* set physical boundary */
00369       (*(pD->ox2_BCFun))(pGrid);
00370 
00371       /* check non-blocking send has completed. */
00372       ierr = MPI_Wait(&(send_rq[0]), MPI_STATUS_IGNORE);
00373 
00374       /* wait on non-blocking receive from L and unpack data */
00375       ierr = MPI_Wait(&(recv_rq[0]), MPI_STATUS_IGNORE);
00376       unpack_ix2(pGrid);
00377 
00378     }
00379 #endif /* MPI_PARALLEL */
00380 
00381 /* Physical boundaries on both left and right */
00382     if (pGrid->rx2_id < 0 && pGrid->lx2_id < 0) {
00383       (*(pD->ix2_BCFun))(pGrid);
00384       (*(pD->ox2_BCFun))(pGrid);
00385     } 
00386 
00387 /* shearing sheet BCs; function defined in problem generator.
00388  * Enroll outflow BCs if perdiodic BCs NOT selected.  This assumes the root
00389  * level grid is specified by the <domain1> block in the input file */
00390 #ifdef SHEARING_BOX
00391     BCFlag = par_geti_def("domain1","bc_ix1",0);
00392     get_myGridIndex(pD, myID_Comm_world, &myL, &myM, &myN);
00393     if (myL == 0 && BCFlag == 4) {
00394       ShearingSheet_ix1(pD);
00395     }
00396     BCFlag = par_geti_def("domain1","bc_ox1",0);
00397     if (myL == ((pD->NGrid[0])-1) && BCFlag == 4) {
00398       ShearingSheet_ox1(pD);
00399     }
00400 #endif
00401 
00402   }
00403 
00404 /*--- Step 3. ------------------------------------------------------------------
00405  * Boundary Conditions in x3-direction */
00406 
00407   if (pGrid->Nx[2] > 1){
00408 
00409 #ifdef MPI_PARALLEL
00410     cnt = (pGrid->Nx[0] + 2*nghost)*(pGrid->Nx[1] + 2*nghost)*nghost*(NVAR);
00411 #ifdef MHD
00412     cnt += (pGrid->Nx[0] + 2*nghost - 1)*(pGrid->Nx[1] + 2*nghost)*nghost;
00413     cnt += (pGrid->Nx[0] + 2*nghost)*(pGrid->Nx[1] + 2*nghost - 1)*nghost;
00414     cnt += (pGrid->Nx[0] + 2*nghost)*(pGrid->Nx[1] + 2*nghost)*(nghost-1);
00415 #endif
00416 
00417 /* MPI blocks to both left and right */
00418     if (pGrid->rx3_id >= 0 && pGrid->lx3_id >= 0) {
00419 
00420       /* Post non-blocking receives for data from L and R Grids */
00421       ierr = MPI_Irecv(&(recv_buf[0][0]),cnt,MPI_DOUBLE,pGrid->lx3_id,LtoR_tag,
00422         pD->Comm_Domain, &(recv_rq[0]));
00423       ierr = MPI_Irecv(&(recv_buf[1][0]),cnt,MPI_DOUBLE,pGrid->rx3_id,RtoL_tag,
00424         pD->Comm_Domain, &(recv_rq[1]));
00425 
00426       /* pack and send data L and R */
00427       pack_ix3(pGrid);
00428       ierr = MPI_Isend(&(send_buf[0][0]),cnt,MPI_DOUBLE,pGrid->lx3_id,RtoL_tag,
00429         pD->Comm_Domain, &(send_rq[0]));
00430 
00431       pack_ox3(pGrid); 
00432       ierr = MPI_Isend(&(send_buf[1][0]),cnt,MPI_DOUBLE,pGrid->rx3_id,LtoR_tag,
00433         pD->Comm_Domain, &(send_rq[1]));
00434 
00435       /* check non-blocking sends have completed. */
00436       ierr = MPI_Waitall(2, send_rq, MPI_STATUS_IGNORE);
00437 
00438       /* check non-blocking receives and unpack data in any order. */
00439       ierr = MPI_Waitany(2,recv_rq,&mIndex,MPI_STATUS_IGNORE);
00440       if (mIndex == 0) unpack_ix3(pGrid);
00441       if (mIndex == 1) unpack_ox3(pGrid);
00442       ierr = MPI_Waitany(2,recv_rq,&mIndex,MPI_STATUS_IGNORE);
00443       if (mIndex == 0) unpack_ix3(pGrid);
00444       if (mIndex == 1) unpack_ox3(pGrid);
00445 
00446     }
00447 
00448 /* Physical boundary on left, MPI block on right */
00449     if (pGrid->rx3_id >= 0 && pGrid->lx3_id < 0) {
00450 
00451       /* Post non-blocking receive for data from R Grid */
00452       ierr = MPI_Irecv(&(recv_buf[1][0]),cnt,MPI_DOUBLE,pGrid->rx3_id,RtoL_tag,
00453         pD->Comm_Domain, &(recv_rq[1]));
00454 
00455       /* pack and send data R */
00456       pack_ox3(pGrid); 
00457       ierr = MPI_Isend(&(send_buf[1][0]),cnt,MPI_DOUBLE,pGrid->rx3_id,LtoR_tag,
00458         pD->Comm_Domain, &(send_rq[1]));
00459 
00460       /* set physical boundary */
00461       (*(pD->ix3_BCFun))(pGrid);
00462 
00463       /* check non-blocking send has completed. */
00464       ierr = MPI_Wait(&(send_rq[1]), MPI_STATUS_IGNORE);
00465 
00466       /* wait on non-blocking receive from R and unpack data */
00467       ierr = MPI_Wait(&(recv_rq[1]), MPI_STATUS_IGNORE);
00468       unpack_ox3(pGrid);
00469 
00470     }
00471 
00472 /* MPI block on left, Physical boundary on right */
00473     if (pGrid->rx3_id < 0 && pGrid->lx3_id >= 0) {
00474 
00475       /* Post non-blocking receive for data from L grid */
00476       ierr = MPI_Irecv(&(recv_buf[0][0]),cnt,MPI_DOUBLE,pGrid->lx3_id,LtoR_tag,
00477         pD->Comm_Domain, &(recv_rq[0]));
00478 
00479       /* pack and send data L */
00480       pack_ix3(pGrid); 
00481       ierr = MPI_Isend(&(send_buf[0][0]),cnt,MPI_DOUBLE,pGrid->lx3_id,RtoL_tag,
00482         pD->Comm_Domain, &(send_rq[0]));
00483 
00484       /* set physical boundary */
00485       (*(pD->ox3_BCFun))(pGrid);
00486 
00487       /* check non-blocking send has completed. */
00488       ierr = MPI_Wait(&(send_rq[0]), MPI_STATUS_IGNORE);
00489 
00490       /* wait on non-blocking receive from L and unpack data */
00491       ierr = MPI_Wait(&(recv_rq[0]), MPI_STATUS_IGNORE);
00492       unpack_ix3(pGrid);
00493 
00494     }
00495 #endif /* MPI_PARALLEL */
00496 
00497 /* Physical boundaries on both left and right */
00498     if (pGrid->rx3_id < 0 && pGrid->lx3_id < 0) {
00499       (*(pD->ix3_BCFun))(pGrid);
00500       (*(pD->ox3_BCFun))(pGrid);
00501     } 
00502 
00503   }
00504 
00505   return;
00506 }
00507 
00508 /*----------------------------------------------------------------------------*/
00509 /*! \fn void bvals_mhd_init(MeshS *pM)
00510  *  \brief Sets function pointers for physical boundaries during
00511  *   initialization, allocates memory for send/receive buffers with MPI.
00512  */
00513 
00514 void bvals_mhd_init(MeshS *pM)
00515 {
00516   GridS *pG;
00517   DomainS *pD;
00518   int i,nl,nd,irefine;
00519 #ifdef MPI_PARALLEL
00520   int myL,myM,myN,l,m,n,nx1t,nx2t,nx3t,size;
00521   int x1cnt=0, x2cnt=0, x3cnt=0; /* Number of words passed in x1/x2/x3-dir. */
00522 #endif /* MPI_PARALLEL */
00523 
00524 /* Cycle through all the Domains that have active Grids on this proc */
00525 
00526   for (nl=0; nl<(pM->NLevels); nl++){
00527   for (nd=0; nd<(pM->DomainsPerLevel[nl]); nd++){
00528   if (pM->Domain[nl][nd].Grid != NULL) {
00529     pD = (DomainS*)&(pM->Domain[nl][nd]);  /* ptr to Domain */
00530     pG = pM->Domain[nl][nd].Grid;          /* ptr to Grid */
00531     irefine = 1;
00532     for (i=1;i<=nl;i++) irefine *= 2;   /* C pow fn only takes doubles !! */
00533 #ifdef MPI_PARALLEL
00534 /* get (l,m,n) coordinates of Grid being updated on this processor */
00535     get_myGridIndex(pD, myID_Comm_world, &myL, &myM, &myN);
00536 #endif /* MPI_PARALLEL */
00537 
00538 /* Set function pointers for physical boundaries in x1-direction -------------*/
00539 
00540     if(pG->Nx[0] > 1) {
00541 
00542 /*---- ix1 boundary ----------------------------------------------------------*/
00543 
00544       if(pD->ix1_BCFun == NULL){    /* BCFun ptr was not set in prob gen */
00545 
00546 /* Domain boundary is in interior of root */
00547         if(pD->Disp[0] != 0) {      
00548           pD->ix1_BCFun = ProlongateLater;
00549 
00550 /* Domain is at L-edge of root Domain, but not R-edge and periodic BC  */
00551         } else {
00552           if(((pD->Disp[0] + pD->Nx[0])/irefine != pM->Nx[0]) && 
00553                pM->BCFlag_ix1 == 4) {
00554             ath_error("[bvals_init]:level=%d Domain touching ix1b but not ox1b and periodic BC not allowed\n",nl); 
00555 
00556 /* Domain is at L-edge of root Domain */
00557           } else {                    
00558             switch(pM->BCFlag_ix1){
00559 
00560             case 1: /* Reflecting, B_normal=0 */
00561               pD->ix1_BCFun = reflect_ix1;
00562             break;
00563 
00564             case 2: /* Outflow */
00565               pD->ix1_BCFun = outflow_ix1;
00566             break;
00567 
00568             case 4: /* Periodic. Handle with MPI calls for parallel jobs. */
00569               pD->ix1_BCFun = periodic_ix1;
00570 #ifdef MPI_PARALLEL
00571               if(pG->lx1_id < 0 && pD->NGrid[0] > 1){
00572                 pG->lx1_id = pD->GData[myN][myM][pD->NGrid[0]-1].ID_Comm_Domain;
00573               }
00574 #endif /* MPI_PARALLEL */
00575             break;
00576 
00577             case 5: /* Reflecting, B_normal!=0 */
00578               pD->ix1_BCFun = reflect_ix1;
00579             break;
00580 
00581             default:
00582               ath_perr(-1,"[bvals_init]:bc_ix1=%d unknown\n",pM->BCFlag_ix1);
00583               exit(EXIT_FAILURE);
00584             }
00585           }
00586         }
00587       }
00588 
00589 /*---- ox1 boundary ----------------------------------------------------------*/
00590 
00591       if(pD->ox1_BCFun == NULL){    /* BCFun ptr was not set in prob gen */
00592 
00593 /* Domain boundary is in interior of root */
00594         if((pD->Disp[0] + pD->Nx[0])/irefine != pM->Nx[0]) {
00595           pD->ox1_BCFun = ProlongateLater;
00596 
00597 /* Domain is at R-edge of root Domain, but not L-edge and periodic BC */
00598         } else {
00599           if((pD->Disp[0] != 0) && (pM->BCFlag_ox1 == 4)) {      
00600             ath_error("[bvals_init]:level=%d Domain touching ox1b but not ix1b and periodic BC not allowed\n",nl); 
00601 
00602 
00603 /* Domain is at R-edge of root Domain */
00604           } else {
00605             switch(pM->BCFlag_ox1){
00606 
00607             case 1: /* Reflecting, B_normal=0 */
00608               pD->ox1_BCFun = reflect_ox1;
00609             break;
00610 
00611             case 2: /* Outflow */
00612               pD->ox1_BCFun = outflow_ox1;
00613             break;
00614 
00615             case 4: /* Periodic. Handle with MPI calls for parallel jobs. */
00616               pD->ox1_BCFun = periodic_ox1;
00617 #ifdef MPI_PARALLEL
00618               if(pG->rx1_id < 0 && pD->NGrid[0] > 1){
00619                 pG->rx1_id = pD->GData[myN][myM][0].ID_Comm_Domain;
00620               }
00621 #endif /* MPI_PARALLEL */
00622             break;
00623 
00624             case 5: /* Reflecting, B_normal!=0 */
00625               pD->ox1_BCFun = reflect_ox1;
00626             break;
00627 
00628             default:
00629               ath_perr(-1,"[bvals_init]:bc_ox1=%d unknown\n",pM->BCFlag_ox1);
00630               exit(EXIT_FAILURE);
00631             }
00632           }
00633         }
00634       }
00635     }
00636 
00637 /* Set function pointers for physical boundaries in x2-direction -------------*/
00638 
00639     if(pG->Nx[1] > 1) {
00640 
00641 /*---- ix2 boundary ----------------------------------------------------------*/
00642 
00643       if(pD->ix2_BCFun == NULL){    /* BCFun ptr was not set in prob gen */
00644 
00645 /* Domain boundary is in interior of root */
00646         if(pD->Disp[1] != 0) {
00647           pD->ix2_BCFun = ProlongateLater;
00648 
00649 /* Domain is at L-edge of root Domain, but not R-edge and periodic BC  */
00650         } else {
00651           if(((pD->Disp[1] + pD->Nx[1])/irefine != pM->Nx[1]) &&
00652                pM->BCFlag_ix2 == 4) {
00653             ath_error("[bvals_init]:level=%d Domain touching ix2b but not ox2b and periodic BC not allowed\n",nl); 
00654 
00655 
00656 /* Domain is at L-edge of root Domain */
00657           } else {
00658             switch(pM->BCFlag_ix2){
00659 
00660             case 1: /* Reflecting, B_normal=0 */
00661               pD->ix2_BCFun = reflect_ix2;
00662             break;
00663 
00664             case 2: /* Outflow */
00665               pD->ix2_BCFun = outflow_ix2;
00666             break;
00667 
00668             case 4: /* Periodic. Handle with MPI calls for parallel jobs. */
00669               pD->ix2_BCFun = periodic_ix2;
00670 #ifdef MPI_PARALLEL
00671               if(pG->lx2_id < 0 && pD->NGrid[1] > 1){
00672                 pG->lx2_id = pD->GData[myN][pD->NGrid[1]-1][myL].ID_Comm_Domain;
00673               }
00674 #endif /* MPI_PARALLEL */
00675             break;
00676   
00677             case 5: /* Reflecting, B_normal!=0 */
00678               pD->ix2_BCFun = reflect_ix2;
00679             break;
00680 
00681             default:
00682               ath_perr(-1,"[bvals_init]:bc_ix2=%d unknown\n",pM->BCFlag_ix2);
00683               exit(EXIT_FAILURE);
00684             }
00685           }
00686         }
00687       }
00688 
00689 /*---- ox2 boundary ----------------------------------------------------------*/
00690 
00691       if(pD->ox2_BCFun == NULL){    /* BCFun ptr was not set in prob gen */
00692 
00693 /* Domain boundary is in interior of root */
00694         if((pD->Disp[1] + pD->Nx[1])/irefine != pM->Nx[1]) {
00695           pD->ox2_BCFun = ProlongateLater;
00696 
00697 /* Domain is at R-edge of root Domain, but not L-edge and periodic BC */
00698         } else {
00699           if((pD->Disp[1] != 0) && (pM->BCFlag_ox2 == 4)) {
00700             ath_error("[bvals_init]:level=%d Domain touching ox2b but not ix2b and periodic BC not allowed\n",nl); 
00701 
00702 /* Domain is at R-edge of root Domain */
00703           } else {
00704             switch(pM->BCFlag_ox2){
00705 
00706             case 1: /* Reflecting, B_normal=0 */
00707               pD->ox2_BCFun = reflect_ox2;
00708             break;
00709 
00710             case 2: /* Outflow */
00711               pD->ox2_BCFun = outflow_ox2;
00712             break;
00713 
00714             case 4: /* Periodic. Handle with MPI calls for parallel jobs. */
00715               pD->ox2_BCFun = periodic_ox2;
00716 #ifdef MPI_PARALLEL
00717               if(pG->rx2_id < 0 && pD->NGrid[1] > 1){
00718                 pG->rx2_id = pD->GData[myN][0][myL].ID_Comm_Domain;
00719               }
00720 #endif /* MPI_PARALLEL */
00721             break;
00722 
00723             case 5: /* Reflecting, B_normal!=0 */
00724               pD->ox2_BCFun = reflect_ox2;
00725             break;
00726 
00727             default:
00728               ath_perr(-1,"[bvals_init]:bc_ox2=%d unknown\n",pM->BCFlag_ox2);
00729               exit(EXIT_FAILURE);
00730             }
00731           }
00732         }
00733       }
00734     }
00735 
00736 /* Set function pointers for physical boundaries in x3-direction -------------*/
00737 
00738     if(pG->Nx[2] > 1) {
00739 
00740 /*---- ix3 boundary ----------------------------------------------------------*/
00741 
00742       if(pD->ix3_BCFun == NULL){    /* BCFun ptr was not set in prob gen */
00743 
00744 /* Domain boundary is in interior of root */
00745         if(pD->Disp[2] != 0) {
00746           pD->ix3_BCFun = ProlongateLater;
00747 
00748 /* Domain is at L-edge of root Domain, but not R-edge and periodic BC  */
00749         } else {
00750           if(((pD->Disp[2] + pD->Nx[2])/irefine != pM->Nx[2]) &&
00751                pM->BCFlag_ix3 == 4) {
00752             ath_error("[bvals_init]:level=%d Domain touching ix3b but not ox3b and periodic BC not allowed\n",nl); 
00753 
00754 /* Domain is at L-edge of root Domain */
00755           } else {
00756             switch(pM->BCFlag_ix3){
00757 
00758             case 1: /* Reflecting, B_normal=0 */
00759               pD->ix3_BCFun = reflect_ix3;
00760             break;
00761 
00762             case 2: /* Outflow */
00763               pD->ix3_BCFun = outflow_ix3;
00764             break;
00765 
00766             case 4: /* Periodic. Handle with MPI calls for parallel jobs. */
00767               pD->ix3_BCFun = periodic_ix3;
00768 #ifdef MPI_PARALLEL
00769               if(pG->lx3_id < 0 && pD->NGrid[2] > 1){
00770                 pG->lx3_id = pD->GData[pD->NGrid[2]-1][myM][myL].ID_Comm_Domain;
00771               }
00772 #endif /* MPI_PARALLEL */
00773             break;
00774 
00775             case 5: /* Reflecting, B_normal!=0 */
00776               pD->ix3_BCFun = reflect_ix3;
00777             break;
00778 
00779             default:
00780               ath_perr(-1,"[bvals_init]:bc_ix3=%d unknown\n",pM->BCFlag_ix3);
00781               exit(EXIT_FAILURE);
00782             }
00783           }
00784         }
00785       }
00786 
00787 /*---- ox3 boundary ----------------------------------------------------------*/
00788 
00789       if(pD->ox3_BCFun == NULL){    /* BCFun ptr was not set in prob gen */
00790 
00791 /* Domain boundary is in interior of root */
00792         if((pD->Disp[2] + pD->Nx[2])/irefine != pM->Nx[2]) {
00793           pD->ox3_BCFun = ProlongateLater;
00794 
00795 /* Domain is at R-edge of root Domain, but not L-edge and periodic BC */
00796         } else {
00797           if((pD->Disp[2] != 0) && (pM->BCFlag_ox3 == 4)) {
00798             ath_error("[bvals_init]:level=%d Domain touching ox3b but not ix3b and periodic BC not allowed\n",nl); 
00799 
00800 /* Domain is at R-edge of root Domain */
00801           } else {
00802             switch(pM->BCFlag_ox3){
00803 
00804             case 1: /* Reflecting, B_normal=0 */
00805               pD->ox3_BCFun = reflect_ox3;
00806             break;
00807 
00808             case 2: /* Outflow */
00809               pD->ox3_BCFun = outflow_ox3;
00810             break;
00811 
00812             case 4: /* Periodic. Handle with MPI calls for parallel jobs. */
00813               pD->ox3_BCFun = periodic_ox3;
00814 #ifdef MPI_PARALLEL
00815               if(pG->rx3_id < 0 && pD->NGrid[2] > 1){
00816                 pG->rx3_id = pD->GData[0][myM][myL].ID_Comm_Domain;
00817               }
00818 #endif /* MPI_PARALLEL */
00819             break;
00820 
00821             case 5: /* Reflecting, B_normal!=0 */
00822               pD->ox3_BCFun = reflect_ox3;
00823             break;
00824 
00825             default:
00826               ath_perr(-1,"[bvals_init]:bc_ox3=%d unknown\n",pM->BCFlag_ox3);
00827               exit(EXIT_FAILURE);
00828             }
00829           }
00830         }
00831       }
00832     }
00833 
00834 /* Figure out largest size needed for send/receive buffers with MPI ----------*/
00835 
00836 #ifdef MPI_PARALLEL
00837 
00838     for (n=0; n<(pD->NGrid[2]); n++){
00839     for (m=0; m<(pD->NGrid[1]); m++){
00840       for (l=0; l<(pD->NGrid[0]); l++){
00841 
00842 /* x1cnt is surface area of x1 faces */
00843         if(pD->NGrid[0] > 1){
00844           nx2t = pD->GData[n][m][l].Nx[1];
00845           if(nx2t > 1) nx2t += 1;
00846 
00847           nx3t = pD->GData[n][m][l].Nx[2];
00848           if(nx3t > 1) nx3t += 1;
00849 
00850           if(nx2t*nx3t > x1cnt) x1cnt = nx2t*nx3t;
00851         }
00852 
00853 /* x2cnt is surface area of x2 faces */
00854         if(pD->NGrid[1] > 1){
00855           nx1t = pD->GData[n][m][l].Nx[0];
00856           if(nx1t > 1) nx1t += 2*nghost;
00857 
00858           nx3t = pD->GData[n][m][l].Nx[2];
00859           if(nx3t > 1) nx3t += 1;
00860 
00861           if(nx1t*nx3t > x2cnt) x2cnt = nx1t*nx3t;
00862         }
00863 
00864 /* x3cnt is surface area of x3 faces */
00865         if(pD->NGrid[2] > 1){
00866           nx1t = pD->GData[n][m][l].Nx[0];
00867           if(nx1t > 1) nx1t += 2*nghost;
00868 
00869           nx2t = pD->GData[n][m][l].Nx[1];
00870           if(nx2t > 1) nx2t += 2*nghost;
00871 
00872           if(nx1t*nx2t > x3cnt) x3cnt = nx1t*nx2t;
00873         }
00874       }
00875     }}
00876 #endif /* MPI_PARALLEL */
00877 
00878   }}}  /* End loop over all Domains with active Grids -----------------------*/
00879 
00880 #ifdef MPI_PARALLEL
00881 /* Allocate memory for send/receive buffers and MPI_Requests */
00882 
00883   size = x1cnt > x2cnt ? x1cnt : x2cnt;
00884   size = x3cnt >  size ? x3cnt : size;
00885 
00886 #ifdef MHD
00887   size *= nghost*((NVAR)+3);
00888 #else
00889   size *= nghost*(NVAR);
00890 #endif
00891 
00892   if (size > 0) {
00893     if((send_buf = (double**)calloc_2d_array(2,size,sizeof(double))) == NULL)
00894       ath_error("[bvals_init]: Failed to allocate send buffer\n");
00895 
00896     if((recv_buf = (double**)calloc_2d_array(2,size,sizeof(double))) == NULL)
00897       ath_error("[bvals_init]: Failed to allocate recv buffer\n");
00898   }
00899 
00900   if((recv_rq = (MPI_Request*) calloc_1d_array(2,sizeof(MPI_Request))) == NULL)
00901     ath_error("[bvals_init]: Failed to allocate recv MPI_Request array\n");
00902   if((send_rq = (MPI_Request*) calloc_1d_array(2,sizeof(MPI_Request))) == NULL)
00903     ath_error("[bvals_init]: Failed to allocate send MPI_Request array\n");
00904 
00905 #endif /* MPI_PARALLEL */
00906 
00907   return;
00908 }
00909 
00910 /*----------------------------------------------------------------------------*/
00911 /*! \fn void bvals_mhd_fun(DomainS *pD, enum BCDirection dir, VGFun_t prob_bc)
00912  *  \brief Sets function ptrs for user-defined BCs.
00913  */
00914 
00915 void bvals_mhd_fun(DomainS *pD, enum BCDirection dir, VGFun_t prob_bc)
00916 {
00917   switch(dir){
00918   case left_x1:
00919     pD->ix1_BCFun = prob_bc;
00920     break;
00921   case right_x1:
00922     pD->ox1_BCFun = prob_bc;
00923     break;
00924   case left_x2:
00925     pD->ix2_BCFun = prob_bc;
00926     break;
00927   case right_x2:
00928     pD->ox2_BCFun = prob_bc;
00929     break;
00930   case left_x3:
00931     pD->ix3_BCFun = prob_bc;
00932     break;
00933   case right_x3:
00934     pD->ox3_BCFun = prob_bc;
00935     break;
00936   default:
00937     ath_perr(-1,"[bvals_fun]: Unknown direction = %d\n",dir);
00938     exit(EXIT_FAILURE);
00939   }
00940   return;
00941 }
00942 
00943 /*=========================== PRIVATE FUNCTIONS ==============================*/
00944 /* Following are the functions:
00945  *   reflecting_???:   where ???=[ix1,ox1,ix2,ox2,ix3,ox3]
00946  *   outflow_???
00947  *   periodic_???
00948  *   conduct_???
00949  *   pack_???
00950  *   unpack_???
00951  */
00952 
00953 /*----------------------------------------------------------------------------*/
00954 /*! \fn static void reflect_ix1(GridS *pGrid)
00955  *  \brief  REFLECTING boundary conditions, Inner x1 boundary (bc_ix1=1) */
00956 
00957 static void reflect_ix1(GridS *pGrid)
00958 {
00959   int is = pGrid->is;
00960   int js = pGrid->js, je = pGrid->je;
00961   int ks = pGrid->ks, ke = pGrid->ke;
00962   int i,j,k;
00963 #ifdef MHD
00964   int ju,ku; /* j-upper, k-upper */
00965 #endif
00966 
00967   for (k=ks; k<=ke; k++) {
00968     for (j=js; j<=je; j++) {
00969       for (i=1; i<=nghost; i++) {
00970         pGrid->U[k][j][is-i]    =  pGrid->U[k][j][is+(i-1)];
00971         pGrid->U[k][j][is-i].M1 = -pGrid->U[k][j][is-i].M1; /* reflect 1-mom. */
00972 #ifdef MHD
00973         pGrid->U[k][j][is-i].B1c= -pGrid->U[k][j][is-i].B1c;/* reflect 1-fld. */
00974 #endif
00975       }
00976     }
00977   }
00978 
00979 #ifdef MHD
00980 /* reflect normal component of B field, B1i not set at i=is-nghost */
00981   for (k=ks; k<=ke; k++) {
00982     for (j=js; j<=je; j++) {
00983       pGrid->B1i[k][j][is] = 0.0;
00984       for (i=1; i<=nghost-1; i++) {
00985         pGrid->B1i[k][j][is-i]   = -pGrid->B1i[k][j][is+i];
00986       }
00987     }
00988   }
00989 
00990   if (pGrid->Nx[1] > 1) ju=je+1; else ju=je;
00991   for (k=ks; k<=ke; k++) {
00992     for (j=js; j<=ju; j++) {
00993       for (i=1; i<=nghost; i++) {
00994         pGrid->B2i[k][j][is-i] = pGrid->B2i[k][j][is+(i-1)];
00995       }
00996     }
00997   }
00998 
00999   if (pGrid->Nx[2] > 1) ku=ke+1; else ku=ke;
01000   for (k=ks; k<=ku; k++) {
01001     for (j=js; j<=je; j++) {
01002       for (i=1; i<=nghost; i++) {
01003         pGrid->B3i[k][j][is-i] = pGrid->B3i[k][j][is+(i-1)];
01004       }
01005     }
01006   }
01007 #endif /* MHD */
01008 
01009   return;
01010 }
01011 
01012 /*----------------------------------------------------------------------------*/
01013 /*! \fn static void reflect_ox1(GridS *pGrid)
01014  *  \brief REFLECTING boundary conditions, Outer x1 boundary (bc_ox1=1). */
01015 
01016 static void reflect_ox1(GridS *pGrid)
01017 {
01018   int ie = pGrid->ie;
01019   int js = pGrid->js, je = pGrid->je;
01020   int ks = pGrid->ks, ke = pGrid->ke;
01021   int i,j,k;
01022 #ifdef MHD
01023   int ju,ku; /* j-upper, k-upper */
01024 #endif
01025 
01026   for (k=ks; k<=ke; k++) {
01027     for (j=js; j<=je; j++) {
01028       for (i=1; i<=nghost; i++) {
01029         pGrid->U[k][j][ie+i]    =  pGrid->U[k][j][ie-(i-1)];
01030         pGrid->U[k][j][ie+i].M1 = -pGrid->U[k][j][ie+i].M1; /* reflect 1-mom. */
01031 #ifdef MHD
01032         pGrid->U[k][j][ie+i].B1c= -pGrid->U[k][j][ie+i].B1c;/* reflect 1-fld. */
01033 #endif
01034       }
01035     }
01036   }
01037 
01038 #ifdef MHD
01039 /* reflect normal component of B field */
01040   for (k=ks; k<=ke; k++) {
01041     for (j=js; j<=je; j++) {
01042       pGrid->B1i[k][j][ie+1] = 0.0;
01043       for (i=2; i<=nghost; i++) {
01044         pGrid->B1i[k][j][ie+i] = -pGrid->B1i[k][j][ie-(i-2)];
01045       }
01046     }
01047   }
01048 
01049   if (pGrid->Nx[1] > 1) ju=je+1; else ju=je;
01050   for (k=ks; k<=ke; k++) {
01051     for (j=js; j<=ju; j++) {
01052       for (i=1; i<=nghost; i++) {
01053         pGrid->B2i[k][j][ie+i] = pGrid->B2i[k][j][ie-(i-1)];
01054       }
01055     }
01056   }
01057 
01058   if (pGrid->Nx[2] > 1) ku=ke+1; else ku=ke;
01059   for (k=ks; k<=ku; k++) {
01060     for (j=js; j<=je; j++) {
01061       for (i=1; i<=nghost; i++) {
01062         pGrid->B3i[k][j][ie+i] = pGrid->B3i[k][j][ie-(i-1)];
01063       }
01064     }
01065   }
01066 #endif /* MHD */
01067 
01068   return;
01069 }
01070 
01071 /*----------------------------------------------------------------------------*/
01072 /*! \fn static void reflect_ix2(GridS *pGrid)
01073  *  \brief REFLECTING boundary conditions, Inner x2 boundary (bc_ix2=1) */
01074 
01075 static void reflect_ix2(GridS *pGrid)
01076 {
01077   int is = pGrid->is, ie = pGrid->ie;
01078   int js = pGrid->js;
01079   int ks = pGrid->ks, ke = pGrid->ke;
01080   int i,j,k;
01081 #ifdef MHD
01082   int ku; /* k-upper */
01083 #endif
01084 
01085   for (k=ks; k<=ke; k++) {
01086     for (j=1; j<=nghost; j++) {
01087       for (i=is-nghost; i<=ie+nghost; i++) {
01088         pGrid->U[k][js-j][i]    =  pGrid->U[k][js+(j-1)][i];
01089         pGrid->U[k][js-j][i].M2 = -pGrid->U[k][js-j][i].M2; /* reflect 2-mom. */
01090 #ifdef MHD
01091         pGrid->U[k][js-j][i].B2c= -pGrid->U[k][js-j][i].B2c;/* reflect 2-fld. */
01092 #endif
01093       }
01094     }
01095   }
01096 
01097 #ifdef MHD
01098 /* B1i is not set at i=is-nghost */
01099   for (k=ks; k<=ke; k++) {
01100     for (j=1; j<=nghost; j++) {
01101       for (i=is-(nghost-1); i<=ie+nghost; i++) {
01102         pGrid->B1i[k][js-j][i] = pGrid->B1i[k][js+(j-1)][i];
01103       }
01104     }
01105   }
01106 
01107 /* reflect normal component of B field, B2i not set at j=js-nghost */
01108   for (k=ks; k<=ke; k++) {
01109     for (i=is-nghost; i<=ie+nghost; i++) {
01110       pGrid->B2i[k][js][i] = 0.0;
01111     }
01112     for (j=1; j<=nghost-1; j++) {
01113       for (i=is-nghost; i<=ie+nghost; i++) {
01114         pGrid->B2i[k][js-j][i] = -pGrid->B2i[k][js+j][i];
01115       }
01116     }
01117   }
01118 
01119   if (pGrid->Nx[2] > 1) ku=ke+1; else ku=ke;
01120   for (k=ks; k<=ku; k++) {
01121     for (j=1; j<=nghost; j++) {
01122       for (i=is-nghost; i<=ie+nghost; i++) {
01123         pGrid->B3i[k][js-j][i] = pGrid->B3i[k][js+(j-1)][i];
01124       }
01125     }
01126   }
01127 #endif /* MHD */
01128 
01129   return;
01130 }
01131 
01132 /*----------------------------------------------------------------------------*/
01133 /*! \fn static void reflect_ox2(GridS *pGrid)
01134  *  \brief REFLECTING boundary conditions, Outer x2 boundary (bc_ox2=1) */
01135 
01136 static void reflect_ox2(GridS *pGrid)
01137 {
01138   int is = pGrid->is, ie = pGrid->ie;
01139   int je = pGrid->je;
01140   int ks = pGrid->ks, ke = pGrid->ke;
01141   int i,j,k;
01142 #ifdef MHD
01143   int ku; /* k-upper */
01144 #endif
01145 
01146   for (k=ks; k<=ke; k++) {
01147     for (j=1; j<=nghost; j++) {
01148       for (i=is-nghost; i<=ie+nghost; i++) {
01149         pGrid->U[k][je+j][i]    =  pGrid->U[k][je-(j-1)][i];
01150         pGrid->U[k][je+j][i].M2 = -pGrid->U[k][je+j][i].M2; /* reflect 2-mom. */
01151 #ifdef MHD
01152         pGrid->U[k][je+j][i].B2c= -pGrid->U[k][je+j][i].B2c;/* reflect 2-fld. */
01153 #endif
01154       }
01155     }
01156   }
01157 
01158 #ifdef MHD
01159 /* B1i is not set at i=is-nghost */
01160   for (k=ks; k<=ke; k++) {
01161     for (j=1; j<=nghost; j++) {
01162       for (i=is-(nghost-1); i<=ie+nghost; i++) {
01163         pGrid->B1i[k][je+j][i] = pGrid->B1i[k][je-(j-1)][i];
01164       }
01165     }
01166   }
01167 
01168 /* reflect normal component of B field */
01169   for (k=ks; k<=ke; k++) {
01170     for (i=is-nghost; i<=ie+nghost; i++) {
01171       pGrid->B2i[k][je+1][i] = 0.0;
01172     }
01173     for (j=2; j<=nghost; j++) {
01174       for (i=is-nghost; i<=ie+nghost; i++) {
01175         pGrid->B2i[k][je+j][i] = -pGrid->B2i[k][je-(j-2)][i];
01176       }
01177     }
01178   }
01179 
01180   if (pGrid->Nx[2] > 1) ku=ke+1; else ku=ke;
01181   for (k=ks; k<=ku; k++) {
01182     for (j=1; j<=nghost; j++) {
01183       for (i=is-nghost; i<=ie+nghost; i++) {
01184         pGrid->B3i[k][je+j][i] = pGrid->B3i[k][je-(j-1)][i];
01185       }
01186     }
01187   }
01188 #endif /* MHD */
01189 
01190   return;
01191 }
01192 
01193 /*----------------------------------------------------------------------------*/
01194 /*! \fn static void reflect_ix3(GridS *pGrid)
01195  *  \brief REFLECTING boundary conditions, Inner x3 boundary (bc_ix3=1) */
01196 
01197 static void reflect_ix3(GridS *pGrid)
01198 {
01199   int is = pGrid->is, ie = pGrid->ie;
01200   int js = pGrid->js, je = pGrid->je;
01201   int ks = pGrid->ks;
01202   int i,j,k;
01203 
01204   for (k=1; k<=nghost; k++) {
01205     for (j=js-nghost; j<=je+nghost; j++) {
01206       for (i=is-nghost; i<=ie+nghost; i++) {
01207         pGrid->U[ks-k][j][i]    =  pGrid->U[ks+(k-1)][j][i];
01208         pGrid->U[ks-k][j][i].M3 = -pGrid->U[ks-k][j][i].M3; /* reflect 3-mom. */
01209 #ifdef MHD
01210         pGrid->U[ks-k][j][i].B3c= -pGrid->U[ks-k][j][i].B3c;/* reflect 3-fld.*/
01211 #endif
01212       }
01213     }
01214   }
01215 
01216 #ifdef MHD
01217 /* B1i is not set at i=is-nghost */
01218   for (k=1; k<=nghost; k++) {
01219     for (j=js-nghost; j<=je+nghost; j++) {
01220       for (i=is-(nghost-1); i<=ie+nghost; i++) {
01221         pGrid->B1i[ks-k][j][i] = pGrid->B1i[ks+(k-1)][j][i];
01222       }
01223     }
01224   }
01225 
01226 /* B2i is not set at j=js-nghost */
01227   for (k=1; k<=nghost; k++) {
01228     for (j=js-(nghost-1); j<=je+nghost; j++) {
01229       for (i=is-nghost; i<=ie+nghost; i++) {
01230         pGrid->B2i[ks-k][j][i] = pGrid->B2i[ks+(k-1)][j][i];
01231       }
01232     }
01233   }
01234 
01235 /* reflect normal component of B field, B3i not set at k=ks-nghost */
01236   for (j=js-nghost; j<=je+nghost; j++) {
01237     for (i=is-nghost; i<=ie+nghost; i++) {
01238       pGrid->B3i[ks][j][i] = 0.0;
01239     }
01240   }
01241   for (k=1; k<=nghost-1; k++) {
01242     for (j=js-nghost; j<=je+nghost; j++) {
01243       for (i=is-nghost; i<=ie+nghost; i++) {
01244         pGrid->B3i[ks-k][j][i]   = -pGrid->B3i[ks+k][j][i];
01245       }
01246     }
01247   }
01248 #endif /* MHD */
01249 
01250   return;
01251 }
01252 
01253 /*----------------------------------------------------------------------------*/
01254 /*! \fn static void reflect_ox3(GridS *pGrid)
01255  *  \brief REFLECTING boundary conditions, Outer x3 boundary (bc_ox3=1) */
01256 
01257 static void reflect_ox3(GridS *pGrid)
01258 {
01259   int is = pGrid->is, ie = pGrid->ie;
01260   int js = pGrid->js, je = pGrid->je;
01261   int ke = pGrid->ke;
01262   int i,j,k;
01263 
01264   for (k=1; k<=nghost; k++) {
01265     for (j=js-nghost; j<=je+nghost; j++) {
01266       for (i=is-nghost; i<=ie+nghost; i++) {
01267         pGrid->U[ke+k][j][i]    =  pGrid->U[ke-(k-1)][j][i];
01268         pGrid->U[ke+k][j][i].M3 = -pGrid->U[ke+k][j][i].M3; /* reflect 3-mom. */
01269 #ifdef MHD
01270         pGrid->U[ke+k][j][i].B3c= -pGrid->U[ke+k][j][i].B3c;/* reflect 3-fld. */
01271 #endif
01272       }
01273     }
01274   }
01275 
01276 #ifdef MHD
01277 /* B1i is not set at i=is-nghost */
01278   for (k=1; k<=nghost; k++) {
01279     for (j=js-nghost; j<=je+nghost; j++) {
01280       for (i=is-(nghost-1); i<=ie+nghost; i++) {
01281         pGrid->B1i[ke+k][j][i] = pGrid->B1i[ke-(k-1)][j][i];
01282       }
01283     }
01284   }
01285 
01286 /* B2i is not set at j=js-nghost */
01287   for (k=1; k<=nghost; k++) {
01288     for (j=js-(nghost-1); j<=je+nghost; j++) {
01289       for (i=is-nghost; i<=ie+nghost; i++) {
01290         pGrid->B2i[ke+k][j][i] = pGrid->B2i[ke-(k-1)][j][i];
01291       }
01292     }
01293   }
01294 
01295 /* reflect normal component of B field */
01296   for (j=js-nghost; j<=je+nghost; j++) {
01297     for (i=is-nghost; i<=ie+nghost; i++) {
01298       pGrid->B3i[ke+1][j][i] = 0.0;
01299     }
01300   }
01301   for (k=2; k<=nghost; k++) {
01302     for (j=js-nghost; j<=je+nghost; j++) {
01303       for (i=is-nghost; i<=ie+nghost; i++) {
01304         pGrid->B3i[ke+k][j][i]   = -pGrid->B3i[ke-(k-2)][j][i];
01305       }
01306     }
01307   }
01308 #endif /* MHD */
01309 
01310   return;
01311 }
01312 
01313 /*----------------------------------------------------------------------------*/
01314 /*! \fn static void outflow_ix1(GridS *pGrid)
01315  *  \brief OUTFLOW boundary condition, Inner x1 boundary (bc_ix1=2) */
01316 
01317 static void outflow_ix1(GridS *pGrid)
01318 {
01319   int is = pGrid->is;
01320   int js = pGrid->js, je = pGrid->je;
01321   int ks = pGrid->ks, ke = pGrid->ke;
01322   int i,j,k;
01323 #ifdef MHD
01324   int ju, ku; /* j-upper, k-upper */
01325 #endif
01326 
01327   for (k=ks; k<=ke; k++) {
01328     for (j=js; j<=je; j++) {
01329       for (i=1; i<=nghost; i++) {
01330         pGrid->U[k][j][is-i] = pGrid->U[k][j][is];
01331       }
01332     }
01333   }
01334 
01335 #ifdef MHD
01336 /* B1i is not set at i=is-nghost */
01337   for (k=ks; k<=ke; k++) {
01338     for (j=js; j<=je; j++) {
01339       for (i=1; i<=nghost-1; i++) {
01340         pGrid->B1i[k][j][is-i] = pGrid->B1i[k][j][is];
01341       }
01342     }
01343   }
01344 
01345   if (pGrid->Nx[1] > 1) ju=je+1; else ju=je;
01346   for (k=ks; k<=ke; k++) {
01347     for (j=js; j<=ju; j++) {
01348       for (i=1; i<=nghost; i++) {
01349         pGrid->B2i[k][j][is-i] = pGrid->B2i[k][j][is];
01350       }
01351     }
01352   }
01353 
01354   if (pGrid->Nx[2] > 1) ku=ke+1; else ku=ke;
01355   for (k=ks; k<=ku; k++) {
01356     for (j=js; j<=je; j++) {
01357       for (i=1; i<=nghost; i++) {
01358         pGrid->B3i[k][j][is-i] = pGrid->B3i[k][j][is];
01359       }
01360     }
01361   }
01362 #endif /* MHD */
01363 
01364   return;
01365 }
01366 
01367 /*----------------------------------------------------------------------------*/
01368 /*! \fn static void outflow_ox1(GridS *pGrid)
01369  *  \brief OUTFLOW boundary conditions, Outer x1 boundary (bc_ox1=2) */
01370 
01371 static void outflow_ox1(GridS *pGrid)
01372 {
01373   int ie = pGrid->ie;
01374   int js = pGrid->js, je = pGrid->je;
01375   int ks = pGrid->ks, ke = pGrid->ke;
01376   int i,j,k;
01377 #ifdef MHD
01378   int ju, ku; /* j-upper, k-upper */
01379 #endif
01380 
01381   for (k=ks; k<=ke; k++) {
01382     for (j=js; j<=je; j++) {
01383       for (i=1; i<=nghost; i++) {
01384         pGrid->U[k][j][ie+i] = pGrid->U[k][j][ie];
01385       }
01386     }
01387   }
01388 
01389 #ifdef MHD
01390 /* i=ie+1 is not a boundary condition for the interface field B1i */
01391   for (k=ks; k<=ke; k++) {
01392     for (j=js; j<=je; j++) {
01393       for (i=2; i<=nghost; i++) {
01394         pGrid->B1i[k][j][ie+i] = pGrid->B1i[k][j][ie];
01395       }
01396     }
01397   }
01398 
01399   if (pGrid->Nx[1] > 1) ju=je+1; else ju=je;
01400   for (k=ks; k<=ke; k++) {
01401     for (j=js; j<=ju; j++) {
01402       for (i=1; i<=nghost; i++) {
01403         pGrid->B2i[k][j][ie+i] = pGrid->B2i[k][j][ie];
01404       }
01405     }
01406   }
01407 
01408   if (pGrid->Nx[2] > 1) ku=ke+1; else ku=ke;
01409   for (k=ks; k<=ku; k++) {
01410     for (j=js; j<=je; j++) {
01411       for (i=1; i<=nghost; i++) {
01412         pGrid->B3i[k][j][ie+i] = pGrid->B3i[k][j][ie];
01413       }
01414     }
01415   }
01416 #endif /* MHD */
01417 
01418   return;
01419 }
01420 
01421 /*----------------------------------------------------------------------------*/
01422 /*! \fn static void outflow_ix2(GridS *pGrid)
01423  *  \brief OUTFLOW boundary conditions, Inner x2 boundary (bc_ix2=2) */
01424 
01425 static void outflow_ix2(GridS *pGrid)
01426 {
01427   int is = pGrid->is, ie = pGrid->ie;
01428   int js = pGrid->js;
01429   int ks = pGrid->ks, ke = pGrid->ke;
01430   int i,j,k;
01431 #ifdef MHD
01432   int ku; /* k-upper */
01433 #endif
01434 
01435   for (k=ks; k<=ke; k++) {
01436     for (j=1; j<=nghost; j++) {
01437       for (i=is-nghost; i<=ie+nghost; i++) {
01438         pGrid->U[k][js-j][i] = pGrid->U[k][js][i];
01439       }
01440     }
01441   }
01442 
01443 #ifdef MHD
01444 /* B1i is not set at i=is-nghost */
01445   for (k=ks; k<=ke; k++) {
01446     for (j=1; j<=nghost; j++) {
01447       for (i=is-(nghost-1); i<=ie+nghost; i++) {
01448         pGrid->B1i[k][js-j][i] = pGrid->B1i[k][js][i];
01449       }
01450     }
01451   }
01452 
01453 /* B2i is not set at j=js-nghost */
01454   for (k=ks; k<=ke; k++) {
01455     for (j=1; j<=nghost-1; j++) {
01456       for (i=is-nghost; i<=ie+nghost; i++) {
01457         pGrid->B2i[k][js-j][i] = pGrid->B2i[k][js][i];
01458       }
01459     }
01460   }
01461 
01462   if (pGrid->Nx[2] > 1) ku=ke+1; else ku=ke;
01463   for (k=ks; k<=ku; k++) {
01464     for (j=1; j<=nghost; j++) {
01465       for (i=is-nghost; i<=ie+nghost; i++) {
01466         pGrid->B3i[k][js-j][i] = pGrid->B3i[k][js][i];
01467       }
01468     }
01469   }
01470 #endif /* MHD */
01471 
01472   return;
01473 }
01474 
01475 /*----------------------------------------------------------------------------*/
01476 /*! \fn static void outflow_ox2(GridS *pGrid)
01477  *  \brief OUTFLOW boundary conditions, Outer x2 boundary (bc_ox2=2) */
01478 
01479 static void outflow_ox2(GridS *pGrid)
01480 {
01481   int is = pGrid->is, ie = pGrid->ie;
01482   int je = pGrid->je;
01483   int ks = pGrid->ks, ke = pGrid->ke;
01484   int i,j,k;
01485 #ifdef MHD
01486   int ku; /* k-upper */
01487 #endif
01488 
01489   for (k=ks; k<=ke; k++) {
01490     for (j=1; j<=nghost; j++) {
01491       for (i=is-nghost; i<=ie+nghost; i++) {
01492         pGrid->U[k][je+j][i] = pGrid->U[k][je][i];
01493       }
01494     }
01495   }
01496 
01497 #ifdef MHD
01498 /* B1i is not set at i=is-nghost */
01499   for (k=ks; k<=ke; k++) {
01500     for (j=1; j<=nghost; j++) {
01501       for (i=is-(nghost-1); i<=ie+nghost; i++) {
01502         pGrid->B1i[k][je+j][i] = pGrid->B1i[k][je][i];
01503       }
01504     }
01505   }
01506 
01507 /* j=je+1 is not a boundary condition for the interface field B2i */
01508   for (k=ks; k<=ke; k++) {
01509     for (j=2; j<=nghost; j++) {
01510       for (i=is-nghost; i<=ie+nghost; i++) {
01511         pGrid->B2i[k][je+j][i] = pGrid->B2i[k][je][i];
01512       }
01513     }
01514   }
01515 
01516   if (pGrid->Nx[2] > 1) ku=ke+1; else ku=ke;
01517   for (k=ks; k<=ku; k++) {
01518     for (j=1; j<=nghost; j++) {
01519       for (i=is-nghost; i<=ie+nghost; i++) {
01520         pGrid->B3i[k][je+j][i] = pGrid->B3i[k][je][i];
01521       }
01522     }
01523   }
01524 #endif /* MHD */
01525 
01526   return;
01527 }
01528 
01529 /*----------------------------------------------------------------------------*/
01530 /*! \fn static void outflow_ix3(GridS *pGrid)
01531  *  \brief OUTFLOW boundary conditions, Inner x3 boundary (bc_ix3=2) */
01532 
01533 static void outflow_ix3(GridS *pGrid)
01534 {
01535   int is = pGrid->is, ie = pGrid->ie;
01536   int js = pGrid->js, je = pGrid->je;
01537   int ks = pGrid->ks;
01538   int i,j,k;
01539 
01540   for (k=1; k<=nghost; k++) {
01541     for (j=js-nghost; j<=je+nghost; j++) {
01542       for (i=is-nghost; i<=ie+nghost; i++) {
01543         pGrid->U[ks-k][j][i] = pGrid->U[ks][j][i];
01544       }
01545     }
01546   }
01547 
01548 #ifdef MHD
01549 /* B1i is not set at i=is-nghost */
01550   for (k=1; k<=nghost; k++) {
01551     for (j=js-nghost; j<=je+nghost; j++) {
01552       for (i=is-(nghost-1); i<=ie+nghost; i++) {
01553         pGrid->B1i[ks-k][j][i] = pGrid->B1i[ks][j][i];
01554       }
01555     }
01556   }
01557 
01558 /* B2i is not set at j=js-nghost */
01559   for (k=1; k<=nghost; k++) {
01560     for (j=js-(nghost-1); j<=je+nghost; j++) {
01561       for (i=is-nghost; i<=ie+nghost; i++) {
01562         pGrid->B2i[ks-k][j][i] = pGrid->B2i[ks][j][i];
01563       }
01564     }
01565   }
01566 
01567 /* B3i is not set at k=ks-nghost */
01568   for (k=1; k<=nghost-1; k++) {
01569     for (j=js-nghost; j<=je+nghost; j++) {
01570       for (i=is-nghost; i<=ie+nghost; i++) {
01571         pGrid->B3i[ks-k][j][i] = pGrid->B3i[ks][j][i];
01572       }
01573     }
01574   }
01575 #endif /* MHD */
01576 
01577   return;
01578 }
01579 
01580 /*----------------------------------------------------------------------------*/
01581 /*! \fn static void outflow_ox3(GridS *pGrid)
01582  *  \brief OUTFLOW boundary conditions, Outer x3 boundary (bc_ox3=2) */
01583 
01584 static void outflow_ox3(GridS *pGrid)
01585 {
01586   int is = pGrid->is, ie = pGrid->ie;
01587   int js = pGrid->js, je = pGrid->je;
01588   int ke = pGrid->ke;
01589   int i,j,k;
01590 
01591   for (k=1; k<=nghost; k++) {
01592     for (j=js-nghost; j<=je+nghost; j++) {
01593       for (i=is-nghost; i<=ie+nghost; i++) {
01594         pGrid->U[ke+k][j][i] = pGrid->U[ke][j][i];
01595       }
01596     }
01597   }
01598 
01599 #ifdef MHD
01600 /* B1i is not set at i=is-nghost */
01601   for (k=1; k<=nghost; k++) {
01602     for (j=js-nghost; j<=je+nghost; j++) {
01603       for (i=is-(nghost-1); i<=ie+nghost; i++) {
01604         pGrid->B1i[ke+k][j][i] = pGrid->B1i[ke][j][i];
01605       }
01606     }
01607   }
01608 
01609 /* B2i is not set at j=js-nghost */
01610   for (k=1; k<=nghost; k++) {
01611     for (j=js-(nghost-1); j<=je+nghost; j++) {
01612       for (i=is-nghost; i<=ie+nghost; i++) {
01613         pGrid->B2i[ke+k][j][i] = pGrid->B2i[ke][j][i];
01614       }
01615     }
01616   }
01617 
01618 /* k=ke+1 is not a boundary condition for the interface field B3i */
01619   for (k=2; k<=nghost; k++) {
01620     for (j=js-nghost; j<=je+nghost; j++) {
01621       for (i=is-nghost; i<=ie+nghost; i++) {
01622         pGrid->B3i[ke+k][j][i] = pGrid->B3i[ke][j][i];
01623       }
01624     }
01625   }
01626 #endif /* MHD */
01627 
01628   return;
01629 }
01630 
01631 /*----------------------------------------------------------------------------*/
01632 /*! \fn static void periodic_ix1(GridS *pGrid)
01633  *  \brief PERIODIC boundary conditions, Inner x1 boundary (bc_ix1=4) */
01634 
01635 static void periodic_ix1(GridS *pGrid)
01636 {
01637   int is = pGrid->is, ie = pGrid->ie;
01638   int js = pGrid->js, je = pGrid->je;
01639   int ks = pGrid->ks, ke = pGrid->ke;
01640   int i,j,k;
01641 #ifdef MHD
01642   int ju, ku; /* j-upper, k-upper */
01643 #endif
01644 
01645   for (k=ks; k<=ke; k++) {
01646     for (j=js; j<=je; j++) {
01647       for (i=1; i<=nghost; i++) {
01648         pGrid->U[k][j][is-i] = pGrid->U[k][j][ie-(i-1)];
01649       }
01650     }
01651   }
01652 
01653 #ifdef MHD
01654 /* B1i is not set at i=is-nghost */
01655   for (k=ks; k<=ke; k++) {
01656     for (j=js; j<=je; j++) {
01657       for (i=1; i<=nghost-1; i++) {
01658         pGrid->B1i[k][j][is-i] = pGrid->B1i[k][j][ie-(i-1)];
01659       }
01660     }
01661   }
01662 
01663   if (pGrid->Nx[1] > 1) ju=je+1; else ju=je;
01664   for (k=ks; k<=ke; k++) {
01665     for (j=js; j<=ju; j++) {
01666       for (i=1; i<=nghost; i++) {
01667         pGrid->B2i[k][j][is-i] = pGrid->B2i[k][j][ie-(i-1)];
01668       }
01669     }
01670   }
01671 
01672   if (pGrid->Nx[2] > 1) ku=ke+1; else ku=ke;
01673   for (k=ks; k<=ku; k++) {
01674     for (j=js; j<=je; j++) {
01675       for (i=1; i<=nghost; i++) {
01676         pGrid->B3i[k][j][is-i] = pGrid->B3i[k][j][ie-(i-1)];
01677       }
01678     }
01679   }
01680 #endif /* MHD */
01681 
01682   return;
01683 }
01684 
01685 /*----------------------------------------------------------------------------*/
01686 /*! \fn static void periodic_ox1(GridS *pGrid)
01687  *  \brief PERIODIC boundary conditions (cont), Outer x1 boundary (bc_ox1=4) */
01688 
01689 static void periodic_ox1(GridS *pGrid)
01690 {
01691   int is = pGrid->is, ie = pGrid->ie;
01692   int js = pGrid->js, je = pGrid->je;
01693   int ks = pGrid->ks, ke = pGrid->ke;
01694   int i,j,k;
01695 #ifdef MHD
01696   int ju, ku; /* j-upper, k-upper */
01697 #endif
01698 
01699   for (k=ks; k<=ke; k++) {
01700     for (j=js; j<=je; j++) {
01701       for (i=1; i<=nghost; i++) {
01702         pGrid->U[k][j][ie+i] = pGrid->U[k][j][is+(i-1)];
01703       }
01704     }
01705   }
01706 
01707 #ifdef MHD
01708 /* B1i is not set at i=ie+1 */
01709   for (k=ks; k<=ke; k++) {
01710     for (j=js; j<=je; j++) {
01711       for (i=2; i<=nghost; i++) {
01712         pGrid->B1i[k][j][ie+i] = pGrid->B1i[k][j][is+(i-1)];
01713       }
01714     }
01715   }
01716 
01717   if (pGrid->Nx[1] > 1) ju=je+1; else ju=je;
01718   for (k=ks; k<=ke; k++) {
01719     for (j=js; j<=ju; j++) {
01720       for (i=1; i<=nghost; i++) {
01721         pGrid->B2i[k][j][ie+i] = pGrid->B2i[k][j][is+(i-1)];
01722       }
01723     }
01724   }
01725 
01726   if (pGrid->Nx[2] > 1) ku=ke+1; else ku=ke;
01727   for (k=ks; k<=ku; k++) {
01728     for (j=js; j<=je; j++) {
01729       for (i=1; i<=nghost; i++) {
01730         pGrid->B3i[k][j][ie+i] = pGrid->B3i[k][j][is+(i-1)];
01731       }
01732     }
01733   }
01734 #endif /* MHD */
01735 
01736   return;
01737 }
01738 
01739 /*----------------------------------------------------------------------------*/
01740 /*! \fn static void periodic_ix2(GridS *pGrid)
01741  *  \brief PERIODIC boundary conditions (cont), Inner x2 boundary (bc_ix2=4) */
01742 
01743 static void periodic_ix2(GridS *pGrid)
01744 {
01745   int is = pGrid->is, ie = pGrid->ie;
01746   int js = pGrid->js, je = pGrid->je;
01747   int ks = pGrid->ks, ke = pGrid->ke;
01748   int i,j,k;
01749 #ifdef MHD
01750   int ku; /* k-upper */
01751 #endif
01752 
01753   for (k=ks; k<=ke; k++) {
01754     for (j=1; j<=nghost; j++) {
01755       for (i=is-nghost; i<=ie+nghost; i++) {
01756         pGrid->U[k][js-j][i] = pGrid->U[k][je-(j-1)][i];
01757       }
01758     }
01759   }
01760 
01761 #ifdef MHD
01762 /* B1i is not set at i=is-nghost */
01763   for (k=ks; k<=ke; k++) {
01764     for (j=1; j<=nghost; j++) {
01765       for (i=is-(nghost-1); i<=ie+nghost; i++) {
01766         pGrid->B1i[k][js-j][i] = pGrid->B1i[k][je-(j-1)][i];
01767       }
01768     }
01769   }
01770 
01771 /* B2i is not set at j=js-nghost */
01772   for (k=ks; k<=ke; k++) {
01773     for (j=1; j<=nghost-1; j++) {
01774       for (i=is-nghost; i<=ie+nghost; i++) {
01775         pGrid->B2i[k][js-j][i] = pGrid->B2i[k][je-(j-1)][i];
01776       }
01777     }
01778   }
01779 
01780   if (pGrid->Nx[2] > 1) ku=ke+1; else ku=ke;
01781   for (k=ks; k<=ku; k++) {
01782     for (j=1; j<=nghost; j++) {
01783       for (i=is-nghost; i<=ie+nghost; i++) {
01784         pGrid->B3i[k][js-j][i] = pGrid->B3i[k][je-(j-1)][i];
01785       }
01786     }
01787   }
01788 #endif /* MHD */
01789 
01790   return;
01791 }
01792 
01793 /*----------------------------------------------------------------------------*/
01794 /*! \fn static void periodic_ox2(GridS *pGrid)
01795  *  \brief PERIODIC boundary conditions (cont), Outer x2 boundary (bc_ox2=4) */
01796 
01797 static void periodic_ox2(GridS *pGrid)
01798 {
01799   int is = pGrid->is, ie = pGrid->ie;
01800   int js = pGrid->js, je = pGrid->je;
01801   int ks = pGrid->ks, ke = pGrid->ke;
01802   int i,j,k;
01803 #ifdef MHD
01804   int ku; /* k-upper */
01805 #endif
01806 
01807   for (k=ks; k<=ke; k++) {
01808     for (j=1; j<=nghost; j++) {
01809       for (i=is-nghost; i<=ie+nghost; i++) {
01810         pGrid->U[k][je+j][i] = pGrid->U[k][js+(j-1)][i];
01811       }
01812     }
01813   }
01814 
01815 #ifdef MHD
01816 /* B1i is not set at i=is-nghost */
01817   for (k=ks; k<=ke; k++) {
01818     for (j=1; j<=nghost; j++) {
01819       for (i=is-(nghost-1); i<=ie+nghost; i++) {
01820         pGrid->B1i[k][je+j][i] = pGrid->B1i[k][js+(j-1)][i];
01821       }
01822     }
01823   }
01824 
01825 /* B2i is not set at j=je+1 */
01826   for (k=ks; k<=ke; k++) {
01827     for (j=2; j<=nghost; j++) {
01828       for (i=is-nghost; i<=ie+nghost; i++) {
01829         pGrid->B2i[k][je+j][i] = pGrid->B2i[k][js+(j-1)][i];
01830       }
01831     }
01832   }
01833 
01834   if (pGrid->Nx[2] > 1) ku=ke+1; else ku=ke;
01835   for (k=ks; k<=ku; k++) {
01836     for (j=1; j<=nghost; j++) {
01837       for (i=is-nghost; i<=ie+nghost; i++) {
01838         pGrid->B3i[k][je+j][i] = pGrid->B3i[k][js+(j-1)][i];
01839       }
01840     }
01841   }
01842 #endif /* MHD */
01843 
01844   return;
01845 }
01846 
01847 /*----------------------------------------------------------------------------*/
01848 /*! \fn static void periodic_ix3(GridS *pGrid)
01849  *  \brief PERIODIC boundary conditions (cont), Inner x3 boundary (bc_ix3=4) */
01850 
01851 static void periodic_ix3(GridS *pGrid)
01852 {
01853   int is = pGrid->is, ie = pGrid->ie;
01854   int js = pGrid->js, je = pGrid->je;
01855   int ks = pGrid->ks, ke = pGrid->ke;
01856   int i,j,k;
01857 
01858   for (k=1; k<=nghost; k++) {
01859     for (j=js-nghost; j<=je+nghost; j++) {
01860       for (i=is-nghost; i<=ie+nghost; i++) {
01861         pGrid->U[ks-k][j][i] = pGrid->U[ke-(k-1)][j][i];
01862       }
01863     }
01864   }
01865 
01866 #ifdef MHD
01867 /* B1i is not set at i=is-nghost */
01868   for (k=1; k<=nghost; k++) {
01869     for (j=js-nghost; j<=je+nghost; j++) {
01870       for (i=is-(nghost-1); i<=ie+nghost; i++) {
01871         pGrid->B1i[ks-k][j][i] = pGrid->B1i[ke-(k-1)][j][i];
01872       }
01873     }
01874   }
01875 
01876 /* B2i is not set at j=js-nghost */
01877   for (k=1; k<=nghost; k++) {
01878     for (j=js-(nghost-1); j<=je+nghost; j++) {
01879       for (i=is-nghost; i<=ie+nghost; i++) {
01880         pGrid->B2i[ks-k][j][i] = pGrid->B2i[ke-(k-1)][j][i];
01881       }
01882     }
01883   }
01884 
01885 /* B3i is not set at k=ks-nghost */
01886   for (k=1; k<=nghost-1; k++) {
01887     for (j=js-nghost; j<=je+nghost; j++) {
01888       for (i=is-nghost; i<=ie+nghost; i++) {
01889         pGrid->B3i[ks-k][j][i] = pGrid->B3i[ke-(k-1)][j][i];
01890       }
01891     }
01892   }
01893 #endif /* MHD */
01894 
01895   return;
01896 }
01897 
01898 /*----------------------------------------------------------------------------*/
01899 /*! \fn static void periodic_ox3(GridS *pGrid)
01900  *  \brief PERIODIC boundary conditions (cont), Outer x3 boundary (bc_ox3=4) */
01901 
01902 static void periodic_ox3(GridS *pGrid)
01903 {
01904   int is = pGrid->is, ie = pGrid->ie;
01905   int js = pGrid->js, je = pGrid->je;
01906   int ks = pGrid->ks, ke = pGrid->ke;
01907   int i,j,k;
01908 
01909   for (k=1; k<=nghost; k++) {
01910     for (j=js-nghost; j<=je+nghost; j++) {
01911       for (i=is-nghost; i<=ie+nghost; i++) {
01912         pGrid->U[ke+k][j][i] = pGrid->U[ks+(k-1)][j][i];
01913       }
01914     }
01915   }
01916 
01917 #ifdef MHD
01918 /* B1i is not set at i=is-nghost */
01919   for (k=1; k<=nghost; k++) {
01920     for (j=js-nghost; j<=je+nghost; j++) {
01921       for (i=is-(nghost-1); i<=ie+nghost; i++) {
01922         pGrid->B1i[ke+k][j][i] = pGrid->B1i[ks+(k-1)][j][i];
01923       }
01924     }
01925   }
01926 
01927 /* B2i is not set at j=js-nghost */
01928   for (k=1; k<=nghost; k++) {
01929     for (j=js-(nghost-1); j<=je+nghost; j++) {
01930       for (i=is-nghost; i<=ie+nghost; i++) {
01931         pGrid->B2i[ke+k][j][i] = pGrid->B2i[ks+(k-1)][j][i];
01932       }
01933     }
01934   }
01935 
01936 /* B3i is not set at k=ke+1 */
01937   for (k=2; k<=nghost; k++) {
01938     for (j=js-nghost; j<=je+nghost; j++) {
01939       for (i=is-nghost; i<=ie+nghost; i++) {
01940         pGrid->B3i[ke+k][j][i] = pGrid->B3i[ks+(k-1)][j][i];
01941       }
01942     }
01943   }
01944 #endif /* MHD */
01945 
01946   return;
01947 }
01948 
01949 /*----------------------------------------------------------------------------*/
01950 /*! \fn static void conduct_ix1(GridS *pGrid)
01951  *  \brief CONDUCTOR boundary conditions, Inner x1 boundary (bc_ix1=5) */
01952 
01953 static void conduct_ix1(GridS *pGrid)
01954 {
01955   int is = pGrid->is;
01956   int js = pGrid->js, je = pGrid->je;
01957   int ks = pGrid->ks, ke = pGrid->ke;
01958   int i,j,k;
01959 #ifdef MHD
01960   int ju,ku; /* j-upper, k-upper */
01961 #endif
01962 
01963   for (k=ks; k<=ke; k++) {
01964     for (j=js; j<=je; j++) {
01965       for (i=1; i<=nghost; i++) {
01966         pGrid->U[k][j][is-i]    =  pGrid->U[k][j][is+(i-1)];
01967         pGrid->U[k][j][is-i].M1 = -pGrid->U[k][j][is-i].M1; /* reflect 1-mom. */
01968 #ifdef MHD
01969         pGrid->U[k][j][is-i].B2c= -pGrid->U[k][j][is-i].B2c;/* reflect fld */
01970         pGrid->U[k][j][is-i].B3c= -pGrid->U[k][j][is-i].B3c;
01971 #endif
01972       }
01973     }
01974   }
01975 
01976 #ifdef MHD
01977 /* B1i not set at i=is-nghost */
01978   for (k=ks; k<=ke; k++) {
01979     for (j=js; j<=je; j++) {
01980       for (i=1; i<=nghost-1; i++) {
01981         pGrid->B1i[k][j][is-i] = pGrid->B1i[k][j][is+i];
01982       }
01983     }
01984   }
01985 
01986   if (pGrid->Nx[1] > 1) ju=je+1; else ju=je;
01987   for (k=ks; k<=ke; k++) {
01988     for (j=js; j<=ju; j++) {
01989       for (i=1; i<=nghost; i++) {
01990         pGrid->B2i[k][j][is-i]   = -pGrid->B2i[k][j][is+(i-1)];
01991       }
01992     }
01993   }
01994 
01995   if (pGrid->Nx[2] > 1) ku=ke+1; else ku=ke;
01996   for (k=ks; k<=ku; k++) {
01997     for (j=js; j<=je; j++) {
01998       for (i=1; i<=nghost; i++) {
01999         pGrid->B3i[k][j][is-i]   = -pGrid->B3i[k][j][is+(i-1)];
02000       }
02001     }
02002   }
02003 #endif /* MHD */
02004 
02005   return;
02006 }
02007 
02008 /*----------------------------------------------------------------------------*/
02009 /*! \fn static void conduct_ox1(GridS *pGrid)
02010  *  \brief CONDUCTOR boundary conditions, Outer x1 boundary (bc_ox1=5) */
02011 
02012 static void conduct_ox1(GridS *pGrid)
02013 {
02014   int ie = pGrid->ie;
02015   int js = pGrid->js, je = pGrid->je;
02016   int ks = pGrid->ks, ke = pGrid->ke;
02017   int i,j,k;
02018 #ifdef MHD
02019   int ju,ku; /* j-upper, k-upper */
02020 #endif
02021 
02022   for (k=ks; k<=ke; k++) {
02023     for (j=js; j<=je; j++) {
02024       for (i=1; i<=nghost; i++) {
02025         pGrid->U[k][j][ie+i]    =  pGrid->U[k][j][ie-(i-1)];
02026         pGrid->U[k][j][ie+i].M1 = -pGrid->U[k][j][ie+i].M1; /* reflect 1-mom. */
02027 #ifdef MHD
02028         pGrid->U[k][j][ie+i].B2c= -pGrid->U[k][j][ie+i].B2c;/* reflect fld */
02029         pGrid->U[k][j][ie+i].B3c= -pGrid->U[k][j][ie+i].B3c;
02030 #endif
02031       }
02032     }
02033   }
02034 
02035 #ifdef MHD
02036 /* i=ie+1 is not set for the interface field B1i */
02037   for (k=ks; k<=ke; k++) {
02038     for (j=js; j<=je; j++) {
02039       for (i=2; i<=nghost; i++) {
02040         pGrid->B1i[k][j][ie+i] = pGrid->B1i[k][j][ie-(i-2)];
02041       }
02042     }
02043   }
02044 
02045   if (pGrid->Nx[1] > 1) ju=je+1; else ju=je;
02046   for (k=ks; k<=ke; k++) {
02047     for (j=js; j<=ju; j++) {
02048       for (i=1; i<=nghost; i++) {
02049         pGrid->B2i[k][j][ie+i]   = -pGrid->B2i[k][j][ie-(i-1)];
02050       }
02051     }
02052   }
02053 
02054   if (pGrid->Nx[2] > 1) ku=ke+1; else ku=ke;
02055   for (k=ks; k<=ku; k++) {
02056     for (j=js; j<=je; j++) {
02057       for (i=1; i<=nghost; i++) {
02058         pGrid->B3i[k][j][ie+i]   = -pGrid->B3i[k][j][ie-(i-1)];
02059       }
02060     }
02061   }
02062 #endif /* MHD */
02063 
02064   return;
02065 }
02066 
02067 /*----------------------------------------------------------------------------*/
02068 /*! \fn static void conduct_ix2(GridS *pGrid)
02069  *  \brief CONDUCTOR boundary conditions, Inner x2 boundary (bc_ix2=5) */
02070 
02071 static void conduct_ix2(GridS *pGrid)
02072 {
02073   int is = pGrid->is, ie = pGrid->ie;
02074   int js = pGrid->js;
02075   int ks = pGrid->ks, ke = pGrid->ke;
02076   int i,j,k;
02077 #ifdef MHD
02078   int ku; /* k-upper */
02079 #endif
02080 
02081   for (k=ks; k<=ke; k++) {
02082     for (j=1; j<=nghost; j++) {
02083       for (i=is-nghost; i<=ie+nghost; i++) {
02084         pGrid->U[k][js-j][i]    =  pGrid->U[k][js+(j-1)][i];
02085         pGrid->U[k][js-j][i].M2 = -pGrid->U[k][js-j][i].M2; /* reflect 2-mom. */
02086 #ifdef MHD
02087         pGrid->U[k][js-j][i].B1c= -pGrid->U[k][js-j][i].B1c;/* reflect fld */
02088         pGrid->U[k][js-j][i].B3c= -pGrid->U[k][js-j][i].B3c;
02089 #endif
02090       }
02091     }
02092   }
02093 
02094 #ifdef MHD
02095 /* B1i is not set at i=is-nghost */
02096   for (k=ks; k<=ke; k++) {
02097     for (j=1; j<=nghost; j++) {
02098       for (i=is-nghost; i<=ie+nghost; i++) {
02099         pGrid->B1i[k][js-j][i] = -pGrid->B1i[k][js+(j-1)][i];
02100       }
02101     }
02102   }
02103 
02104 /* B2i not set at j=js-nghost */
02105   for (k=ks; k<=ke; k++) {
02106     for (j=1; j<=nghost-1; j++) {
02107       for (i=is-nghost; i<=ie+nghost; i++) {
02108         pGrid->B2i[k][js-j][i] = pGrid->B2i[k][js+j][i];
02109       }
02110     }
02111   }
02112 
02113   if (pGrid->Nx[2] > 1) ku=ke+1; else ku=ke;
02114   for (k=ks; k<=ku; k++) {
02115     for (j=1; j<=nghost; j++) {
02116       for (i=is-nghost; i<=ie+nghost; i++) {
02117         pGrid->B3i[k][js-j][i] = -pGrid->B3i[k][js+(j-1)][i];
02118       }
02119     }
02120   }
02121 #endif /* MHD */
02122 
02123   return;
02124 }
02125 
02126 /*----------------------------------------------------------------------------*/
02127 /*! \fn static void conduct_ox2(GridS *pGrid)
02128  *  \brief CONDUCTOR boundary conditions, Outer x2 boundary (bc_ox2=5) */
02129 
02130 static void conduct_ox2(GridS *pGrid)
02131 {
02132   int is = pGrid->is, ie = pGrid->ie;
02133   int je = pGrid->je;
02134   int ks = pGrid->ks, ke = pGrid->ke;
02135   int i,j,k;
02136 #ifdef MHD
02137   int ku; /* k-upper */
02138 #endif
02139 
02140   for (k=ks; k<=ke; k++) {
02141     for (j=1; j<=nghost; j++) {
02142       for (i=is-nghost; i<=ie+nghost; i++) {
02143         pGrid->U[k][je+j][i]    =  pGrid->U[k][je-(j-1)][i];
02144         pGrid->U[k][je+j][i].M2 = -pGrid->U[k][je+j][i].M2; /* reflect 2-mom. */
02145 #ifdef MHD
02146         pGrid->U[k][je+j][i].B1c= -pGrid->U[k][je+j][i].B1c;/* reflect fld */
02147         pGrid->U[k][je+j][i].B3c= -pGrid->U[k][je+j][i].B3c;
02148 #endif
02149       }
02150     }
02151   }
02152 
02153 #ifdef MHD
02154 /* B1i is not set at i=is-nghost */
02155   for (k=ks; k<=ke; k++) {
02156     for (j=1; j<=nghost; j++) {
02157       for (i=is-(nghost-1); i<=ie+nghost; i++) {
02158         pGrid->B1i[k][je+j][i]   = -pGrid->B1i[k][je-(j-1)][i];
02159       }
02160     }
02161   }
02162 
02163 /* j=je+1 is not set for the interface field B2i */
02164   for (k=ks; k<=ke; k++) {
02165     for (j=2; j<=nghost; j++) {
02166       for (i=is-nghost; i<=ie+nghost; i++) {
02167         pGrid->B2i[k][je+j][i]   = pGrid->B2i[k][je-(j-2)][i];
02168       }
02169     }
02170   }
02171 
02172   if (pGrid->Nx[2] > 1) ku=ke+1; else ku=ke;
02173   for (k=ks; k<=ku; k++) {
02174     for (j=1; j<=nghost; j++) {
02175       for (i=is-nghost; i<=ie+nghost; i++) {
02176         pGrid->B3i[k][je+j][i]   = -pGrid->B3i[k][je-(j-1)][i];
02177       }
02178     }
02179   }
02180 #endif /* MHD */
02181 
02182   return;
02183 }
02184 
02185 /*----------------------------------------------------------------------------*/
02186 /*! \fn static void conduct_ix3(GridS *pGrid)
02187  *  \brief CONDUCTOR boundary conditions, Inner x3 boundary (bc_ix3=5) */
02188 
02189 static void conduct_ix3(GridS *pGrid)
02190 {
02191   int is = pGrid->is, ie = pGrid->ie;
02192   int js = pGrid->js, je = pGrid->je;
02193   int ks = pGrid->ks;
02194   int i,j,k;
02195 
02196   for (k=1; k<=nghost; k++) {
02197     for (j=js-nghost; j<=je+nghost; j++) {
02198       for (i=is-nghost; i<=ie+nghost; i++) {
02199         pGrid->U[ks-k][j][i]    =  pGrid->U[ks+(k-1)][j][i];
02200         pGrid->U[ks-k][j][i].M3 = -pGrid->U[ks-k][j][i].M3; /* reflect 3-mom. */
02201 #ifdef MHD
02202         pGrid->U[ks-k][j][i].B1c= -pGrid->U[ks-k][j][i].B1c;/* reflect fld */
02203         pGrid->U[ks-k][j][i].B2c= -pGrid->U[ks-k][j][i].B2c;
02204 #endif
02205       }
02206     }
02207   }
02208 
02209 #ifdef MHD
02210 /* B1i is not set at i=is-nghost */
02211   for (k=1; k<=nghost; k++) {
02212     for (j=js-nghost; j<=je+nghost; j++) {
02213       for (i=is-(nghost-1); i<=ie+nghost; i++) {
02214         pGrid->B1i[ks-k][j][i]   = -pGrid->B1i[ks+(k-1)][j][i];
02215       }
02216     }
02217   }
02218 
02219 /* B2i is not set at j=js-nghost */
02220   for (k=1; k<=nghost; k++) {
02221     for (j=js-(nghost-1); j<=je+nghost; j++) {
02222       for (i=is-nghost; i<=ie+nghost; i++) {
02223         pGrid->B2i[ks-k][j][i]   = -pGrid->B2i[ks+(k-1)][j][i];
02224       }
02225     }
02226   }
02227 
02228 /* B3i is not set at k=ks-nghost */
02229   for (k=1; k<=nghost-1; k++) {
02230     for (j=js-nghost; j<=je+nghost; j++) {
02231       for (i=is-nghost; i<=ie+nghost; i++) {
02232         pGrid->B3i[ks-k][j][i] = pGrid->B3i[ks+k][j][i];
02233       }
02234     }
02235   }
02236 #endif /* MHD */
02237 
02238   return;
02239 }
02240 
02241 /*----------------------------------------------------------------------------*/
02242 /*! \fn static void conduct_ox3(GridS *pGrid)
02243  *  \brief CONDUCTOR boundary conditions, Outer x3 boundary (bc_ox3=5) */
02244 
02245 static void conduct_ox3(GridS *pGrid)
02246 {
02247   int is = pGrid->is, ie = pGrid->ie;
02248   int js = pGrid->js, je = pGrid->je;
02249   int ke = pGrid->ke;
02250   int i,j,k;
02251 
02252   for (k=1; k<=nghost; k++) {
02253     for (j=js-nghost; j<=je+nghost; j++) {
02254       for (i=is-nghost; i<=ie+nghost; i++) {
02255         pGrid->U[ke+k][j][i]    =  pGrid->U[ke-(k-1)][j][i];
02256         pGrid->U[ke+k][j][i].M3 = -pGrid->U[ke+k][j][i].M3; /* reflect 3-mom. */
02257 #ifdef MHD
02258         pGrid->U[ke+k][j][i].B1c= -pGrid->U[ke+k][j][i].B1c;/* reflect fld */
02259         pGrid->U[ke+k][j][i].B2c= -pGrid->U[ke+k][j][i].B2c;
02260 #endif
02261       }
02262     }
02263   }
02264 
02265 #ifdef MHD
02266 /* B1i is not set at i=is-nghost */
02267   for (k=1; k<=nghost; k++) {
02268     for (j=js-nghost; j<=je+nghost; j++) {
02269       for (i=is-(nghost-1); i<=ie+nghost; i++) {
02270         pGrid->B1i[ke+k][j][i]   = -pGrid->B1i[ke-(k-1)][j][i];
02271       }
02272     }
02273   }
02274 
02275 /* B2i is not set at j=js-nghost */
02276   for (k=1; k<=nghost; k++) {
02277     for (j=js-(nghost-1); j<=je+nghost; j++) {
02278       for (i=is-nghost; i<=ie+nghost; i++) {
02279         pGrid->B2i[ke+k][j][i]   = -pGrid->B2i[ke-(k-1)][j][i];
02280       }
02281     }
02282   }
02283 
02284 /* k=ke+1 is not set for the interface field B3i */
02285   for (k=2; k<=nghost; k++) {
02286     for (j=js-nghost; j<=je+nghost; j++) {
02287       for (i=is-nghost; i<=ie+nghost; i++) {
02288         pGrid->B3i[ke+k][j][i]   = pGrid->B3i[ke-(k-2)][j][i];
02289       }
02290     }
02291   }
02292 #endif /* MHD */
02293 
02294   return;
02295 }
02296 
02297 /*----------------------------------------------------------------------------*/
02298 /*! \fn static void ProlongateLater(GridS *pGrid)
02299  *  \brief PROLONGATION boundary conditions.  
02300  *
02301  *  Nothing is actually done here, the
02302  * prolongation is actually handled in ProlongateGhostZones in main loop, so
02303  * this is just a NoOp Grid function.  */
02304 
02305 static void ProlongateLater(GridS *pGrid)
02306 {
02307   return;
02308 }
02309 
02310 #ifdef MPI_PARALLEL  /* This ifdef wraps the next 12 funs; ~800 lines */
02311 /*----------------------------------------------------------------------------*/
02312 /*! \fn static void pack_ix1(GridS *pG)
02313  *  \brief PACK boundary conditions for MPI_Isend, Inner x1 boundary */
02314 
02315 static void pack_ix1(GridS *pG)
02316 {
02317   int is = pG->is, ie = pG->ie;
02318   int js = pG->js, je = pG->je;
02319   int ks = pG->ks, ke = pG->ke;
02320   int i,j,k;
02321 #ifdef MHD
02322   int ju, ku; /* j-upper, k-upper */
02323 #endif
02324 #if (NSCALARS > 0)
02325   int n;
02326 #endif
02327   double *pSnd;
02328   pSnd = (double*)&(send_buf[0][0]);
02329 
02330   for (k=ks; k<=ke; k++){
02331     for (j=js; j<=je; j++){
02332       for (i=is; i<=is+(nghost-1); i++){
02333         *(pSnd++) = pG->U[k][j][i].d;
02334         *(pSnd++) = pG->U[k][j][i].M1;
02335         *(pSnd++) = pG->U[k][j][i].M2;
02336         *(pSnd++) = pG->U[k][j][i].M3;
02337 #ifndef BAROTROPIC
02338         *(pSnd++) = pG->U[k][j][i].E;
02339 #endif /* BAROTROPIC */
02340 #ifdef MHD
02341         *(pSnd++) = pG->U[k][j][i].B1c;
02342         *(pSnd++) = pG->U[k][j][i].B2c;
02343         *(pSnd++) = pG->U[k][j][i].B3c;
02344 #endif /* MHD */
02345 #if (NSCALARS > 0)
02346         for (n=0; n<NSCALARS; n++) *(pSnd++) = pG->U[k][j][i].s[n];
02347 #endif
02348       }
02349     }
02350   }
02351 
02352 #ifdef MHD
02353 /* B1i at i=is maps to B1i at i=ie+1 and is not passed */
02354   for (k=ks; k<=ke; k++){
02355     for (j=js; j<=je; j++){
02356       for (i=is+1; i<=is+(nghost-1); i++){
02357         *(pSnd++) = pG->B1i[k][j][i];
02358       }
02359     }
02360   }
02361 
02362   if (pG->Nx[1] > 1) ju=je+1; else ju=je;
02363   for (k=ks; k<=ke; k++){
02364     for (j=js; j<=ju; j++){
02365       for (i=is; i<=is+(nghost-1); i++){
02366         *(pSnd++) = pG->B2i[k][j][i];
02367       }
02368     }
02369   }
02370 
02371   if (pG->Nx[2] > 1) ku=ke+1; else ku=ke;
02372   for (k=ks; k<=ku; k++){
02373     for (j=js; j<=je; j++){
02374       for (i=is; i<=is+(nghost-1); i++){
02375         *(pSnd++) = pG->B3i[k][j][i];
02376       }
02377     }
02378   }
02379 #endif
02380 
02381   return;
02382 }
02383 
02384 /*----------------------------------------------------------------------------*/
02385 /*! \fn static void pack_ox1(GridS *pG)
02386  *  \brief PACK boundary conditions for MPI_Isend, Outer x1 boundary */
02387 
02388 static void pack_ox1(GridS *pG)
02389 {
02390   int is = pG->is, ie = pG->ie;
02391   int js = pG->js, je = pG->je;
02392   int ks = pG->ks, ke = pG->ke;
02393   int i,j,k;
02394 #ifdef MHD
02395   int ju, ku; /* j-upper, k-upper */
02396 #endif
02397 #if (NSCALARS > 0)
02398   int n;
02399 #endif
02400   double *pSnd;
02401   pSnd = (double*)&(send_buf[1][0]);
02402 
02403   for (k=ks; k<=ke; k++){
02404     for (j=js; j<=je; j++){
02405       for (i=ie-(nghost-1); i<=ie; i++){
02406         *(pSnd++) = pG->U[k][j][i].d;
02407         *(pSnd++) = pG->U[k][j][i].M1;
02408         *(pSnd++) = pG->U[k][j][i].M2;
02409         *(pSnd++) = pG->U[k][j][i].M3;
02410 #ifndef BAROTROPIC
02411         *(pSnd++) = pG->U[k][j][i].E;
02412 #endif /* BAROTROPIC */
02413 #ifdef MHD
02414         *(pSnd++) = pG->U[k][j][i].B1c;
02415         *(pSnd++) = pG->U[k][j][i].B2c;
02416         *(pSnd++) = pG->U[k][j][i].B3c;
02417 #endif /* MHD */
02418 #if (NSCALARS > 0)
02419         for (n=0; n<NSCALARS; n++) *(pSnd++) = pG->U[k][j][i].s[n];
02420 #endif
02421       }
02422     }
02423   }
02424 
02425 #ifdef MHD
02426 /* B1i at i=ie-(nghost-1) maps to B1i at i=is-nghost and is not passed */
02427   for (k=ks; k<=ke; k++){
02428     for (j=js; j<=je; j++){
02429       for (i=ie-(nghost-2); i<=ie; i++){
02430         *(pSnd++) = pG->B1i[k][j][i];
02431       }
02432     }
02433   }
02434 
02435   if (pG->Nx[1] > 1) ju=je+1; else ju=je;
02436   for (k=ks; k<=ke; k++){
02437     for (j=js; j<=ju; j++){
02438       for (i=ie-(nghost-1); i<=ie; i++){
02439         *(pSnd++) = pG->B2i[k][j][i];
02440       }
02441     }
02442   }
02443 
02444   if (pG->Nx[2] > 1) ku=ke+1; else ku=ke;
02445   for (k=ks; k<=ku; k++){
02446     for (j=js; j<=je; j++){
02447       for (i=ie-(nghost-1); i<=ie; i++){
02448         *(pSnd++) = pG->B3i[k][j][i];
02449       }
02450     }
02451   }
02452 #endif /* MHD */
02453   return;
02454 }
02455 
02456 /*----------------------------------------------------------------------------*/
02457 /*! \fn static void pack_ix2(GridS *pG)
02458  *  \brief PACK boundary conditions for MPI_Isend, Inner x2 boundary */
02459 
02460 static void pack_ix2(GridS *pG)
02461 {
02462   int is = pG->is, ie = pG->ie;
02463   int js = pG->js, je = pG->je;
02464   int ks = pG->ks, ke = pG->ke;
02465   int i,j,k;
02466 #ifdef MHD
02467   int ku; /* k-upper */
02468 #endif
02469 #if (NSCALARS > 0)
02470   int n;
02471 #endif
02472   double *pSnd;
02473   pSnd = (double*)&(send_buf[0][0]);
02474 
02475   for (k=ks; k<=ke; k++) {
02476     for (j=js; j<=js+(nghost-1); j++) {
02477       for (i=is-nghost; i<=ie+nghost; i++) {
02478         *(pSnd++) = pG->U[k][j][i].d;
02479         *(pSnd++) = pG->U[k][j][i].M1;
02480         *(pSnd++) = pG->U[k][j][i].M2;
02481         *(pSnd++) = pG->U[k][j][i].M3;
02482 #ifndef BAROTROPIC
02483         *(pSnd++) = pG->U[k][j][i].E;
02484 #endif /* BAROTROPIC */
02485 #ifdef MHD
02486         *(pSnd++) = pG->U[k][j][i].B1c;
02487         *(pSnd++) = pG->U[k][j][i].B2c;
02488         *(pSnd++) = pG->U[k][j][i].B3c;
02489 #endif /* MHD */
02490 #if (NSCALARS > 0)
02491         for (n=0; n<NSCALARS; n++) *(pSnd++) = pG->U[k][j][i].s[n];
02492 #endif
02493       }
02494     }
02495   }
02496 
02497 #ifdef MHD
02498 /* B1i is not set at i=is-nghost */
02499   for (k=ks; k<=ke; k++) {
02500     for (j=js; j<=js+(nghost-1); j++) {
02501       for (i=is-(nghost-1); i<=ie+nghost; i++) {
02502         *(pSnd++) = pG->B1i[k][j][i];
02503       }
02504     }
02505   }
02506 
02507 /* B2i at j=js maps to B2i at j=je+1 and is not passed */
02508   for (k=ks; k<=ke; k++) {
02509     for (j=js+1; j<=js+(nghost-1); j++) {
02510       for (i=is-nghost; i<=ie+nghost; i++) {
02511         *(pSnd++) = pG->B2i[k][j][i];
02512       }
02513     }
02514   }
02515 
02516   if (pG->Nx[2] > 1) ku=ke+1; else ku=ke;
02517   for (k=ks; k<=ku; k++) {
02518     for (j=js; j<=js+(nghost-1); j++) {
02519       for (i=is-nghost; i<=ie+nghost; i++) {
02520         *(pSnd++) = pG->B3i[k][j][i];
02521       }
02522     }
02523   }
02524 #endif /* MHD */
02525 
02526   return;
02527 }
02528 
02529 /*----------------------------------------------------------------------------*/
02530 /*! \fn static void pack_ox2(GridS *pG)
02531  *  \brief PACK boundary conditions for MPI_Isend, Outer x2 boundary */
02532 
02533 static void pack_ox2(GridS *pG)
02534 {
02535   int is = pG->is, ie = pG->ie;
02536   int js = pG->js, je = pG->je;
02537   int ks = pG->ks, ke = pG->ke;
02538   int i,j,k;
02539 #ifdef MHD
02540   int ku; /* k-upper */
02541 #endif
02542 #if (NSCALARS > 0)
02543   int n;
02544 #endif
02545   double *pSnd;
02546   pSnd = (double*)&(send_buf[1][0]);
02547 
02548   for (k=ks; k<=ke; k++){
02549     for (j=je-(nghost-1); j<=je; j++){
02550       for (i=is-nghost; i<=ie+nghost; i++){
02551         *(pSnd++) = pG->U[k][j][i].d;
02552         *(pSnd++) = pG->U[k][j][i].M1;
02553         *(pSnd++) = pG->U[k][j][i].M2;
02554         *(pSnd++) = pG->U[k][j][i].M3;
02555 #ifndef BAROTROPIC
02556         *(pSnd++) = pG->U[k][j][i].E;
02557 #endif /* BAROTROPIC */
02558 #ifdef MHD
02559         *(pSnd++) = pG->U[k][j][i].B1c;
02560         *(pSnd++) = pG->U[k][j][i].B2c;
02561         *(pSnd++) = pG->U[k][j][i].B3c;
02562 #endif /* MHD */
02563 #if (NSCALARS > 0)
02564         for (n=0; n<NSCALARS; n++) *(pSnd++) = pG->U[k][j][i].s[n];
02565 #endif
02566       }
02567     }
02568   }
02569 
02570 #ifdef MHD
02571 /* B1i is not set at i=is-nghost */
02572   for (k=ks; k<=ke; k++) {
02573     for (j=je-(nghost-1); j<=je; j++){
02574       for (i=is-(nghost-1); i<=ie+nghost; i++) {
02575         *(pSnd++) = pG->B1i[k][j][i];
02576       }
02577     }
02578   }
02579 
02580 /* B2i at j=je-(nghost-1) maps to B2i at j=js-nghost and is not passed */
02581   for (k=ks; k<=ke; k++) {
02582     for (j=je-(nghost-2); j<=je; j++){
02583       for (i=is-nghost; i<=ie+nghost; i++) {
02584         *(pSnd++) = pG->B2i[k][j][i];
02585       }
02586     }
02587   }
02588 
02589   if (pG->Nx[2] > 1) ku=ke+1; else ku=ke;
02590   for (k=ks; k<=ku; k++) {
02591     for (j=je-(nghost-1); j<=je; j++){
02592       for (i=is-nghost; i<=ie+nghost; i++) {
02593         *(pSnd++) = pG->B3i[k][j][i];
02594       }
02595     }
02596   }
02597 #endif /* MHD */
02598 
02599   return;
02600 }
02601 
02602 /*----------------------------------------------------------------------------*/
02603 /*! \fn static void pack_ix3(GridS *pG)
02604  *  \brief PACK boundary conditions for MPI_Isend, Inner x3 boundary */
02605 
02606 static void pack_ix3(GridS *pG)
02607 {
02608   int is = pG->is, ie = pG->ie;
02609   int js = pG->js, je = pG->je;
02610   int ks = pG->ks, ke = pG->ke;
02611   int i,j,k;
02612 #if (NSCALARS > 0)
02613   int n;
02614 #endif
02615   double *pSnd;
02616   pSnd = (double*)&(send_buf[0][0]);
02617 
02618   for (k=ks; k<=ks+(nghost-1); k++) {
02619     for (j=js-nghost; j<=je+nghost; j++) {
02620       for (i=is-nghost; i<=ie+nghost; i++) {
02621         *(pSnd++) = pG->U[k][j][i].d;
02622         *(pSnd++) = pG->U[k][j][i].M1;
02623         *(pSnd++) = pG->U[k][j][i].M2;
02624         *(pSnd++) = pG->U[k][j][i].M3;
02625 #ifndef BAROTROPIC
02626         *(pSnd++) = pG->U[k][j][i].E;
02627 #endif /* BAROTROPIC */
02628 #ifdef MHD
02629         *(pSnd++) = pG->U[k][j][i].B1c;
02630         *(pSnd++) = pG->U[k][j][i].B2c;
02631         *(pSnd++) = pG->U[k][j][i].B3c;
02632 #endif /* MHD */
02633 #if (NSCALARS > 0)
02634         for (n=0; n<NSCALARS; n++) *(pSnd++) = pG->U[k][j][i].s[n];
02635 #endif
02636       }
02637     }
02638   }
02639 
02640 #ifdef MHD
02641 /* B1i is not set at i=is-nghost */
02642   for (k=ks; k<=ks+(nghost-1); k++) {
02643     for (j=js-nghost; j<=je+nghost; j++) {
02644       for (i=is-(nghost-1); i<=ie+nghost; i++) {
02645         *(pSnd++) = pG->B1i[k][j][i];
02646       }
02647     }
02648   }
02649 
02650 /* B2i is not set at j=js-nghost */
02651   for (k=ks; k<=ks+(nghost-1); k++) {
02652     for (j=js-(nghost-1); j<=je+nghost; j++) {
02653       for (i=is-nghost; i<=ie+nghost; i++) {
02654         *(pSnd++) = pG->B2i[k][j][i];
02655       }
02656     }
02657   }
02658 
02659 /* B3i at k=ks maps to B3i at k=ke+1 and is not passed */
02660   for (k=ks+1; k<=ks+(nghost-1); k++) {
02661     for (j=js-nghost; j<=je+nghost; j++) {
02662       for (i=is-nghost; i<=ie+nghost; i++) {
02663         *(pSnd++) = pG->B3i[k][j][i];
02664       }
02665     }
02666   }
02667 #endif /* MHD */
02668 
02669   return;
02670 }
02671 
02672 /*----------------------------------------------------------------------------*/
02673 /*! \fn static void pack_ox3(GridS *pG)
02674  *  \brief PACK boundary conditions for MPI_Isend, Outer x3 boundary */
02675 
02676 static void pack_ox3(GridS *pG)
02677 {
02678   int is = pG->is, ie = pG->ie;
02679   int js = pG->js, je = pG->je;
02680   int ks = pG->ks, ke = pG->ke;
02681   int i,j,k;
02682 #if (NSCALARS > 0)
02683   int n;
02684 #endif
02685   double *pSnd;
02686   pSnd = (double*)&(send_buf[1][0]);
02687 
02688   for (k=ke-(nghost-1); k<=ke; k++) {
02689     for (j=js-nghost; j<=je+nghost; j++) {
02690       for (i=is-nghost; i<=ie+nghost; i++) {
02691         *(pSnd++) = pG->U[k][j][i].d;
02692         *(pSnd++) = pG->U[k][j][i].M1;
02693         *(pSnd++) = pG->U[k][j][i].M2;
02694         *(pSnd++) = pG->U[k][j][i].M3;
02695 #ifndef BAROTROPIC
02696         *(pSnd++) = pG->U[k][j][i].E;
02697 #endif /* BAROTROPIC */
02698 #ifdef MHD
02699         *(pSnd++) = pG->U[k][j][i].B1c;
02700         *(pSnd++) = pG->U[k][j][i].B2c;
02701         *(pSnd++) = pG->U[k][j][i].B3c;
02702 #endif /* MHD */
02703 #if (NSCALARS > 0)
02704         for (n=0; n<NSCALARS; n++) *(pSnd++) = pG->U[k][j][i].s[n];
02705 #endif
02706       }
02707     }
02708   }
02709 
02710 #ifdef MHD
02711 /* B1i is not set at i=is-nghost */
02712   for (k=ke-(nghost-1); k<=ke; k++) {
02713     for (j=js-nghost; j<=je+nghost; j++) {
02714       for (i=is-(nghost-1); i<=ie+nghost; i++) {
02715         *(pSnd++) = pG->B1i[k][j][i];
02716       }
02717     }
02718   }
02719 
02720 /* B2i is not set at j=js-nghost */
02721   for (k=ke-(nghost-1); k<=ke; k++) {
02722     for (j=js-(nghost-1); j<=je+nghost; j++) {
02723       for (i=is-nghost; i<=ie+nghost; i++) {
02724         *(pSnd++) = pG->B2i[k][j][i];
02725       }
02726     }
02727   }
02728 
02729 /* B3i at k=ke-(nghost-1) maps to B3i at k=ks-nghost and is not passed */
02730   for (k=ke-(nghost-2); k<=ke; k++) {
02731     for (j=js-nghost; j<=je+nghost; j++) {
02732       for (i=is-nghost; i<=ie+nghost; i++) {
02733         *(pSnd++) = pG->B3i[k][j][i];
02734       }
02735     }
02736   }
02737 #endif /* MHD */
02738 
02739   return;
02740 }
02741 
02742 /*----------------------------------------------------------------------------*/
02743 /*! \fn static void unpack_ix1(GridS *pG)
02744  *  \brief UNPACK boundary conditions after MPI_Irecv, Inner x1 boundary */
02745 
02746 static void unpack_ix1(GridS *pG)
02747 {
02748   int is = pG->is;
02749   int js = pG->js, je = pG->je;
02750   int ks = pG->ks, ke = pG->ke;
02751   int i,j,k;
02752 #ifdef MHD
02753   int ju, ku; /* j-upper, k-upper */
02754 #endif
02755 #if (NSCALARS > 0)
02756   int n;
02757 #endif
02758   double *pRcv;
02759   pRcv = (double*)&(recv_buf[0][0]);
02760 
02761   for (k=ks; k<=ke; k++){
02762     for (j=js; j<=je; j++){
02763       for (i=is-nghost; i<=is-1; i++){
02764         pG->U[k][j][i].d  = *(pRcv++);
02765         pG->U[k][j][i].M1 = *(pRcv++);
02766         pG->U[k][j][i].M2 = *(pRcv++);
02767         pG->U[k][j][i].M3 = *(pRcv++);
02768 #ifndef BAROTROPIC
02769         pG->U[k][j][i].E  = *(pRcv++);
02770 #endif /* BAROTROPIC */
02771 #ifdef MHD
02772         pG->U[k][j][i].B1c = *(pRcv++);
02773         pG->U[k][j][i].B2c = *(pRcv++);
02774         pG->U[k][j][i].B3c = *(pRcv++);
02775 #endif /* MHD */
02776 #if (NSCALARS > 0)
02777         for (n=0; n<NSCALARS; n++) pG->U[k][j][i].s[n] = *(pRcv++);
02778 #endif
02779       }
02780     }
02781   }
02782 
02783 #ifdef MHD
02784 /* B1i is not set at i=is-nghost */
02785   for (k=ks; k<=ke; k++) {
02786     for (j=js; j<=je; j++) {
02787       for (i=is-(nghost-1); i<=is-1; i++){
02788         pG->B1i[k][j][i] = *(pRcv++);
02789       }
02790     }
02791   }
02792 
02793   if (pG->Nx[1] > 1) ju=je+1; else ju=je;
02794   for (k=ks; k<=ke; k++) {
02795     for (j=js; j<=ju; j++) {
02796       for (i=is-nghost; i<=is-1; i++){
02797         pG->B2i[k][j][i] = *(pRcv++);
02798       }
02799     }
02800   }
02801 
02802   if (pG->Nx[2] > 1) ku=ke+1; else ku=ke;
02803   for (k=ks; k<=ku; k++) {
02804     for (j=js; j<=je; j++) {
02805       for (i=is-nghost; i<=is-1; i++){
02806         pG->B3i[k][j][i] = *(pRcv++);
02807       }
02808     }
02809   }
02810 #endif /* MHD */
02811 
02812   return;
02813 }
02814 
02815 /*----------------------------------------------------------------------------*/
02816 /*! \fn static void unpack_ox1(GridS *pG)
02817  *  \brief UNPACK boundary conditions after MPI_Irecv, Outer x1 boundary */
02818 
02819 static void unpack_ox1(GridS *pG)
02820 {
02821   int ie = pG->ie;
02822   int js = pG->js, je = pG->je;
02823   int ks = pG->ks, ke = pG->ke;
02824   int i,j,k;
02825 #ifdef MHD
02826   int ju, ku; /* j-upper, k-upper */
02827 #endif
02828 #if (NSCALARS > 0)
02829   int n;
02830 #endif
02831   double *pRcv;
02832   pRcv = (double*)&(recv_buf[1][0]);
02833 
02834   for (k=ks; k<=ke; k++) {
02835     for (j=js; j<=je; j++) {
02836       for (i=ie+1; i<=ie+nghost; i++) {
02837         pG->U[k][j][i].d  = *(pRcv++);
02838         pG->U[k][j][i].M1 = *(pRcv++);
02839         pG->U[k][j][i].M2 = *(pRcv++);
02840         pG->U[k][j][i].M3 = *(pRcv++);
02841 #ifndef BAROTROPIC
02842         pG->U[k][j][i].E  = *(pRcv++);
02843 #endif /* BAROTROPIC */
02844 #ifdef MHD
02845         pG->U[k][j][i].B1c = *(pRcv++);
02846         pG->U[k][j][i].B2c = *(pRcv++);
02847         pG->U[k][j][i].B3c = *(pRcv++);
02848 #endif /* MHD */
02849 #if (NSCALARS > 0)
02850         for (n=0; n<NSCALARS; n++) pG->U[k][j][i].s[n] = *(pRcv++);
02851 #endif
02852       }
02853     }
02854   }
02855 
02856 #ifdef MHD
02857 /* B1i is not set at i=ie+1 */
02858   for (k=ks; k<=ke; k++) {
02859     for (j=js; j<=je; j++) {
02860       for (i=ie+2; i<=ie+nghost; i++) {
02861         pG->B1i[k][j][i] = *(pRcv++);
02862       }
02863     }
02864   }
02865 
02866   if (pG->Nx[1] > 1) ju=je+1; else ju=je;
02867   for (k=ks; k<=ke; k++) {
02868     for (j=js; j<=ju; j++) {
02869       for (i=ie+1; i<=ie+nghost; i++) {
02870         pG->B2i[k][j][i] = *(pRcv++);
02871       }
02872     }
02873   }
02874 
02875   if (pG->Nx[2] > 1) ku=ke+1; else ku=ke;
02876   for (k=ks; k<=ku; k++) {
02877     for (j=js; j<=je; j++) {
02878       for (i=ie+1; i<=ie+nghost; i++) {
02879         pG->B3i[k][j][i] = *(pRcv++);
02880       }
02881     }
02882   }
02883 #endif /* MHD */
02884 
02885   return;
02886 }
02887 
02888 /*----------------------------------------------------------------------------*/
02889 /*! \fn static void unpack_ix2(GridS *pG)
02890  *  \brief UNPACK boundary conditions after MPI_Irecv, Inner x2 boundary */
02891 
02892 static void unpack_ix2(GridS *pG)
02893 {
02894   int is = pG->is, ie = pG->ie;
02895   int js = pG->js;
02896   int ks = pG->ks, ke = pG->ke;
02897   int i,j,k;
02898 #ifdef MHD
02899   int ku; /* k-upper */
02900 #endif
02901 #if (NSCALARS > 0)
02902   int n;
02903 #endif
02904   double *pRcv;
02905   pRcv = (double*)&(recv_buf[0][0]);
02906 
02907   for (k=ks; k<=ke; k++) {
02908     for (j=js-nghost; j<=js-1; j++) {
02909       for (i=is-nghost; i<=ie+nghost; i++) {
02910         pG->U[k][j][i].d  = *(pRcv++);
02911         pG->U[k][j][i].M1 = *(pRcv++);
02912         pG->U[k][j][i].M2 = *(pRcv++);
02913         pG->U[k][j][i].M3 = *(pRcv++);
02914 #ifndef BAROTROPIC
02915         pG->U[k][j][i].E  = *(pRcv++);
02916 #endif /* BAROTROPIC */
02917 #ifdef MHD
02918         pG->U[k][j][i].B1c = *(pRcv++);
02919         pG->U[k][j][i].B2c = *(pRcv++);
02920         pG->U[k][j][i].B3c = *(pRcv++);
02921 #endif /* MHD */
02922 #if (NSCALARS > 0)
02923         for (n=0; n<NSCALARS; n++) pG->U[k][j][i].s[n] = *(pRcv++);
02924 #endif
02925       }
02926     }
02927   }
02928 
02929 #ifdef MHD
02930 /* B1i is not set at i=is-nghost */
02931   for (k=ks; k<=ke; k++) {
02932     for (j=js-nghost; j<=js-1; j++) {
02933       for (i=is-(nghost-1); i<=ie+nghost; i++) {
02934         pG->B1i[k][j][i] = *(pRcv++);
02935       }
02936     }
02937   }
02938 
02939 /* B2i is not set at j=js-nghost */
02940   for (k=ks; k<=ke; k++) {
02941     for (j=js-(nghost-1); j<=js-1; j++) {
02942       for (i=is-nghost; i<=ie+nghost; i++) {
02943         pG->B2i[k][j][i] = *(pRcv++);
02944       }
02945     }
02946   }
02947 
02948   if (pG->Nx[2] > 1) ku=ke+1; else ku=ke;
02949   for (k=ks; k<=ku; k++) {
02950     for (j=js-nghost; j<=js-1; j++) {
02951       for (i=is-nghost; i<=ie+nghost; i++) {
02952         pG->B3i[k][j][i] = *(pRcv++);
02953       }
02954     }
02955   }
02956 #endif /* MHD */
02957 
02958   return;
02959 }
02960 
02961 /*----------------------------------------------------------------------------*/
02962 /*! \fn static void unpack_ox2(GridS *pG)
02963  *  \brief UNPACK boundary conditions after MPI_Irecv, Outer x2 boundary */
02964 
02965 static void unpack_ox2(GridS *pG)
02966 {
02967   int is = pG->is, ie = pG->ie;
02968   int je = pG->je;
02969   int ks = pG->ks, ke = pG->ke;
02970   int i,j,k;
02971 #ifdef MHD
02972   int ku; /* k-upper */
02973 #endif
02974 #if (NSCALARS > 0)
02975   int n;
02976 #endif
02977   double *pRcv;
02978   pRcv = (double*)&(recv_buf[1][0]);
02979 
02980   for (k=ks; k<=ke; k++) {
02981     for (j=je+1; j<=je+nghost; j++) {
02982       for (i=is-nghost; i<=ie+nghost; i++) {
02983         pG->U[k][j][i].d  = *(pRcv++);
02984         pG->U[k][j][i].M1 = *(pRcv++);
02985         pG->U[k][j][i].M2 = *(pRcv++);
02986         pG->U[k][j][i].M3 = *(pRcv++);
02987 #ifndef BAROTROPIC
02988         pG->U[k][j][i].E  = *(pRcv++);
02989 #endif /* BAROTROPIC */
02990 #ifdef MHD
02991         pG->U[k][j][i].B1c = *(pRcv++);
02992         pG->U[k][j][i].B2c = *(pRcv++);
02993         pG->U[k][j][i].B3c = *(pRcv++);
02994 #endif /* MHD */
02995 #if (NSCALARS > 0)
02996         for (n=0; n<NSCALARS; n++) pG->U[k][j][i].s[n] = *(pRcv++);
02997 #endif
02998       }
02999     }
03000   }
03001 
03002 #ifdef MHD
03003 /* B1i is not set at i=is-nghost */
03004   for (k=ks; k<=ke; k++) {
03005     for (j=je+1; j<=je+nghost; j++) {
03006       for (i=is-(nghost-1); i<=ie+nghost; i++) {
03007         pG->B1i[k][j][i] = *(pRcv++);
03008       }
03009     }
03010   }
03011 
03012 /* B2i is not set at j=je+1 */
03013   for (k=ks; k<=ke; k++) {
03014     for (j=je+2; j<=je+nghost; j++) {
03015       for (i=is-nghost; i<=ie+nghost; i++) {
03016         pG->B2i[k][j][i] = *(pRcv++);
03017       }
03018     }
03019   }
03020 
03021   if (pG->Nx[2] > 1) ku=ke+1; else ku=ke;
03022   for (k=ks; k<=ku; k++) {
03023     for (j=je+1; j<=je+nghost; j++) {
03024       for (i=is-nghost; i<=ie+nghost; i++) {
03025         pG->B3i[k][j][i] = *(pRcv++);
03026       }
03027     }
03028   }
03029 #endif /* MHD */
03030 
03031   return;
03032 }
03033 
03034 /*----------------------------------------------------------------------------*/
03035 /*! \fn static void unpack_ix3(GridS *pG)
03036  *  \brief UNPACK boundary conditions after MPI_Irecv, Inner x3 boundary */
03037 
03038 static void unpack_ix3(GridS *pG)
03039 {
03040   int is = pG->is, ie = pG->ie;
03041   int js = pG->js, je = pG->je;
03042   int ks = pG->ks;
03043   int i,j,k;
03044 #if (NSCALARS > 0)
03045   int n;
03046 #endif
03047   double *pRcv;
03048   pRcv = (double*)&(recv_buf[0][0]);
03049 
03050   for (k=ks-nghost; k<=ks-1; k++) {
03051     for (j=js-nghost; j<=je+nghost; j++) {
03052       for (i=is-nghost; i<=ie+nghost; i++) {
03053         pG->U[k][j][i].d  = *(pRcv++);
03054         pG->U[k][j][i].M1 = *(pRcv++);
03055         pG->U[k][j][i].M2 = *(pRcv++);
03056         pG->U[k][j][i].M3 = *(pRcv++);
03057 #ifndef BAROTROPIC
03058         pG->U[k][j][i].E  = *(pRcv++);
03059 #endif /* BAROTROPIC */
03060 #ifdef MHD
03061         pG->U[k][j][i].B1c = *(pRcv++);
03062         pG->U[k][j][i].B2c = *(pRcv++);
03063         pG->U[k][j][i].B3c = *(pRcv++);
03064 #endif /* MHD */
03065 #if (NSCALARS > 0)
03066         for (n=0; n<NSCALARS; n++) pG->U[k][j][i].s[n] = *(pRcv++);
03067 #endif
03068       }
03069     }
03070   }
03071 
03072 #ifdef MHD
03073 /* B1i is not set at i=is-nghost */
03074   for (k=ks-nghost; k<=ks-1; k++) {
03075     for (j=js-nghost; j<=je+nghost; j++) {
03076       for (i=is-(nghost-1); i<=ie+nghost; i++) {
03077         pG->B1i[k][j][i] = *(pRcv++);
03078       }
03079     }
03080   }
03081 
03082 /* B2i is not set at j=js-nghost */
03083   for (k=ks-nghost; k<=ks-1; k++) {
03084     for (j=js-(nghost-1); j<=je+nghost; j++) {
03085       for (i=is-nghost; i<=ie+nghost; i++) {
03086         pG->B2i[k][j][i] = *(pRcv++);
03087       }
03088     }
03089   }
03090 
03091 /* B3i is not set at k=ks-nghost */
03092   for (k=ks-(nghost-1); k<=ks-1; k++) {
03093     for (j=js-nghost; j<=je+nghost; j++) {
03094       for (i=is-nghost; i<=ie+nghost; i++) {
03095         pG->B3i[k][j][i] = *(pRcv++);
03096       }
03097     }
03098   }
03099 #endif /* MHD */
03100 
03101   return;
03102 }
03103 
03104 /*----------------------------------------------------------------------------*/
03105 /*! \fn static void unpack_ox3(GridS *pG)
03106  *  \brief UNPACK boundary conditions after MPI_Irecv, Outer x3 boundary */
03107 
03108 static void unpack_ox3(GridS *pG)
03109 {
03110   int is = pG->is, ie = pG->ie;
03111   int js = pG->js, je = pG->je;
03112   int ke = pG->ke;
03113   int i,j,k;
03114 #if (NSCALARS > 0)
03115   int n;
03116 #endif
03117   double *pRcv;
03118   pRcv = (double*)&(recv_buf[1][0]);
03119 
03120   for (k=ke+1; k<=ke+nghost; k++) {
03121     for (j=js-nghost; j<=je+nghost; j++) {
03122       for (i=is-nghost; i<=ie+nghost; i++) {
03123         pG->U[k][j][i].d  = *(pRcv++);
03124         pG->U[k][j][i].M1 = *(pRcv++);
03125         pG->U[k][j][i].M2 = *(pRcv++);
03126         pG->U[k][j][i].M3 = *(pRcv++);
03127 #ifndef BAROTROPIC
03128         pG->U[k][j][i].E  = *(pRcv++);
03129 #endif /* BAROTROPIC */
03130 #ifdef MHD
03131         pG->U[k][j][i].B1c = *(pRcv++);
03132         pG->U[k][j][i].B2c = *(pRcv++);
03133         pG->U[k][j][i].B3c = *(pRcv++);
03134 #endif /* MHD */
03135 #if (NSCALARS > 0)
03136         for (n=0; n<NSCALARS; n++) pG->U[k][j][i].s[n] = *(pRcv++);
03137 #endif
03138       }
03139     }
03140   }
03141 
03142 #ifdef MHD
03143 /* B1i is not set at i=is-nghost */
03144   for (k=ke+1; k<=ke+nghost; k++) {
03145     for (j=js-nghost; j<=je+nghost; j++) {
03146       for (i=is-(nghost-1); i<=ie+nghost; i++) {
03147         pG->B1i[k][j][i] = *(pRcv++);
03148       }
03149     }
03150   }
03151 
03152 /* B2i is not set at j=js-nghost */
03153   for (k=ke+1; k<=ke+nghost; k++) {
03154     for (j=js-(nghost-1); j<=je+nghost; j++) {
03155       for (i=is-nghost; i<=ie+nghost; i++) {
03156         pG->B2i[k][j][i] = *(pRcv++);
03157       }
03158     }
03159   }
03160 
03161 /* B3i is not set at k=ke+1 */
03162   for (k=ke+2; k<=ke+nghost; k++) {
03163     for (j=js-nghost; j<=je+nghost; j++) {
03164       for (i=is-nghost; i<=ie+nghost; i++) {
03165         pG->B3i[k][j][i] = *(pRcv++);
03166       }
03167     }
03168   }
03169 #endif /* MHD */
03170 
03171   return;
03172 }
03173 #endif /* MPI_PARALLEL */

Generated on Mon Sep 27 2010 23:03:06 for Athena by  doxygen 1.7.1