• Main Page
  • Classes
  • Files
  • File List
  • File Members

bvals_shear.c

Go to the documentation of this file.
00001 #include "copyright.h"
00002 /*============================================================================*/
00003 /*! \file bvals_shear.c
00004  *  \brief Shearing sheet boundary conditions at ix1 and ox1 for both 2D and 3D
00005  *
00006  * PURPOSE: Shearing sheet boundary conditions at ix1 and ox1 for both 2D and 3D
00007  *   Called by bvals_mhd.  Decomposition of the Domain into MPI grids in X,Y
00008  *   and/or Z is allowed.  The RemapEy() function (which applies the shearing
00009  *   sheet boundary conditions to the y-component of the EMF to keep <Bz>=const)
00010  *   is called directly by the 3D integrator.  Configure code with
00011  *   --enable-shearing-box to use.
00012  *
00013  * FARGO (orbital advection) algorithm is implemented in the Fargo() function
00014  *   called in the main loop directly after the integrator and before bvals_mhd.
00015  *   Code must be configured with --enable-shearing-box --enable-fargo to use
00016  *   this option.
00017  *
00018  * CONTAINS PUBLIC FUNCTIONS:
00019  * - ShearingSheet_ix1() - shearing sheet BCs on ix1
00020  * - ShearingSheet_ox1() - shearing sheet BCs on ox1
00021  * - RemapEy_ix1()       - sets Ey at ix1 in integrator to keep <Bz>=const. 
00022  * - RemapEy_ox1()       - sets Ey at ox1 in integrator to keep <Bz>=const. 
00023  * - Fargo()             - implements FARGO algorithm for background flow
00024  * - bvals_shear_init() - allocates memory for arrays used here
00025  * - bvals_shear_destruct() - frees memory for arrays used here               
00026  *
00027  * PRIVATE FUNCTION PROTOTYPES:
00028  * - RemapFlux() - 2nd or 3rd order reconstruction for remap in ghost zones   */
00029 /*============================================================================*/
00030 
00031 #include <float.h>
00032 #include <math.h>
00033 
00034 #include <stdlib.h>
00035 #include <string.h>
00036 #include "defs.h"
00037 #include "athena.h"
00038 #include "globals.h"
00039 #include "prototypes.h"
00040 
00041 /* The functions in this file will only work with the shearing box */
00042 #if defined(SHEARING_BOX) || (defined(CYLINDRICAL) && defined(FARGO))
00043 
00044 /* Define number of variables to be remapped */
00045 #ifdef BAROTROPIC /* BAROTROPIC EOS */
00046 #ifdef HYDRO
00047  enum {NREMAP = 4, NFARGO = 4};
00048 #endif
00049 #ifdef MHD
00050  enum {NREMAP = 8, NFARGO = 6};
00051 #endif
00052 #else /* ADIABATIC or other EOS */
00053 #ifdef HYDRO
00054  enum {NREMAP = 5, NFARGO = 5};
00055 #endif
00056 #ifdef MHD
00057  enum {NREMAP = 9, NFARGO = 7};
00058 #endif
00059 #endif /* EOS */
00060 
00061 #ifdef MHD
00062 #define NVAR_SHARE (NVAR + 3)
00063 #else
00064 #define NVAR_SHARE NVAR
00065 #endif
00066 
00067 /*! \struct Remap
00068  *  \brief Define structure which holds variables remapped 
00069  *  by shearing sheet BCs */
00070 typedef struct Remap_s{
00071   Real U[NREMAP];
00072 #if (NSCALARS > 0)
00073   Real s[NSCALARS];
00074 #endif
00075 }Remap;
00076 
00077 /*! \struct FConsS
00078  *  \brief Define structure for variables used in FARGO algorithm */
00079 typedef struct FCons_s{
00080   Real U[NFARGO];
00081 #if (NSCALARS > 0)
00082   Real s[NSCALARS];
00083 #endif
00084 }FConsS;
00085 
00086 /* The memory for all the arrays below is allocated in bvals_shear_init */
00087 /* Arrays of ghost zones containing remapped conserved quantities */
00088 static Remap ***GhstZns=NULL, ***GhstZnsBuf=NULL;
00089 /* 1D vectors for reconstruction in conservative remap step */
00090 static Real *U=NULL, *Flx=NULL;
00091 /* Arrays of Ey remapped at ix1/ox1 edges of Domain */
00092 #ifdef MHD
00093 static Real **tEyBuf=NULL;
00094 #endif
00095 /* temporary vector needed for 3rd order reconstruction in ghost zones */
00096 #if defined(THIRD_ORDER_CHAR) || defined(THIRD_ORDER_PRIM)
00097 static Real *Uhalf=NULL;
00098 #endif
00099 /* MPI send and receive buffers */
00100 #ifdef MPI_PARALLEL
00101 static double *send_buf = NULL, *recv_buf = NULL;
00102 #endif
00103 #ifdef FARGO
00104 int nfghost;
00105 static FConsS ***FargoVars=NULL, ***FargoFlx=NULL;
00106 #endif
00107 
00108 /*==============================================================================
00109  * PRIVATE FUNCTION PROTOTYPES:
00110  * RemapFlux() - 2nd or 3rd order reconstruction for remap in ghost zones
00111  *============================================================================*/
00112 
00113 void RemapFlux(const Real *U,const Real eps,const int ji,const int jo, Real *F);
00114 
00115 #endif
00116 
00117 #ifdef SHEARING_BOX
00118 /*=========================== PUBLIC FUNCTIONS ===============================*/
00119 /*----------------------------------------------------------------------------*/
00120 /*! \fn void ShearingSheet_ix1(DomainS *pD)
00121  *  \brief 3D shearing-sheet BCs in x1.  
00122  *
00123  * It applies a remap
00124  * in Y after the ghost cells have been set by the usual periodic BCs in X and
00125  * Y implemented in bvals_mhd.c
00126  *
00127  * This is a public function which is called by bvals_mhd() inside a
00128  * SHEARING_BOX macro.                                                        */
00129 /*----------------------------------------------------------------------------*/
00130 void ShearingSheet_ix1(DomainS *pD)
00131 {
00132   GridS *pG = pD->Grid;
00133   int is = pG->is, ie = pG->ie;
00134   int js = pG->js, je = pG->je;
00135   int ks = pG->ks, ke = pG->ke;
00136   int i,ii,j,k,ku,n,joffset,jremap;
00137   Real xmin,xmax,Lx,Ly,qomL,yshear,deltay,epsi;
00138 #ifdef MPI_PARALLEL
00139   int my_iproc,my_jproc,my_kproc,cnt,jproc,joverlap,Ngrids;
00140   int ierr,sendto_id,getfrom_id;
00141   double *pSnd,*pRcv;
00142   Remap *pRemap;
00143   ConsS *pCons;
00144   MPI_Request rq;
00145 #endif
00146 
00147 /*--- Step 1. ------------------------------------------------------------------
00148  * Compute the distance the computational domain has sheared in y */
00149 
00150   xmin = pD->RootMinX[0];
00151   xmax = pD->RootMaxX[0];
00152   Lx = xmax - xmin;
00153 
00154   xmin = pD->RootMinX[1];
00155   xmax = pD->RootMaxX[1];
00156   Ly = xmax - xmin;
00157 
00158   qomL = qshear*Omega_0*Lx;
00159   yshear = qomL*pG->time;
00160 
00161 /* Split this into integer and fractional peices of the Domain in y.  Ignore
00162  * the integer piece because the Grid is periodic in y */
00163 
00164   deltay = fmod(yshear, Ly);
00165 
00166 /* further decompose the fractional peice into integer and fractional pieces of
00167  * a grid cell.  Note 0.0 <= epsi < 1.0.  If Domain has MPI decomposition in Y,
00168  * then it is possible that:  pD->Nx2 > joffset > pG->Nx2   */
00169 
00170   joffset = (int)(deltay/pG->dx2);
00171   epsi = (fmod(deltay,pG->dx2))/pG->dx2;
00172 
00173 /*--- Step 2. ------------------------------------------------------------------
00174  * Copy data into GhstZns array.  Note i and j indices are switched.
00175  * Steps 2-10 are for 3D or 2d xy runs.  Step 11 handles 2D xz separately */
00176 
00177   if (pG->Nx[2] > 1 || ShBoxCoord==xy) {  /* this if ends at end of step 10 */
00178 
00179   if (pG->Nx[2] > 1) ku=ke+1; else ku=ke;
00180   for(k=ks; k<=ku; k++) {
00181     for(j=js-nghost; j<=je+nghost; j++){
00182       for(i=0; i<nghost; i++){
00183         ii = is-nghost+i;
00184         GhstZns[k][i][j].U[0] = pG->U[k][j][ii].d;
00185         GhstZns[k][i][j].U[1] = pG->U[k][j][ii].M1;
00186         GhstZns[k][i][j].U[2] = pG->U[k][j][ii].M2;
00187 #ifndef FARGO
00188         GhstZns[k][i][j].U[2] += qomL*pG->U[k][j][ii].d;
00189 #endif
00190         GhstZns[k][i][j].U[3] = pG->U[k][j][ii].M3;
00191 #ifdef ADIABATIC
00192 /* No change in the internal energy */
00193         GhstZns[k][i][j].U[4] = pG->U[k][j][ii].E + (0.5/GhstZns[k][i][j].U[0])*
00194           (SQR(GhstZns[k][i][j].U[2]) - SQR(pG->U[k][j][ii].M2));
00195 #endif /* ADIABATIC */
00196 #ifdef MHD
00197         GhstZns[k][i][j].U[NREMAP-4] = pG->U[k][j][ii].B1c;
00198         GhstZns[k][i][j].U[NREMAP-3] = pG->B1i[k][j][ii];
00199         GhstZns[k][i][j].U[NREMAP-2] = pG->B2i[k][j][ii];
00200         GhstZns[k][i][j].U[NREMAP-1] = pG->B3i[k][j][ii];
00201 #endif /* MHD */
00202 #if (NSCALARS > 0)
00203         for(n=0; n<NSCALARS; n++) GhstZns[k][i][j].s[n] = pG->U[k][j][ii].s[n];
00204 #endif
00205       }
00206     }
00207   }
00208 
00209 /*--- Step 3. ------------------------------------------------------------------
00210  * Copy GhstZns into buffer, at the same time apply a conservative remap of
00211  * solution over the fractional part of grid cell */
00212 
00213   for(k=ks; k<=ku; k++) {
00214     for(i=0; i<nghost; i++){
00215 
00216       for (n=0; n<(NREMAP); n++) {
00217         for (j=js-nghost; j<=je+nghost; j++) U[j] = GhstZns[k][i][j].U[n];
00218         RemapFlux(U,epsi,js,je+1,Flx);
00219         for(j=js; j<=je; j++){
00220           GhstZnsBuf[k][i][j].U[n] = GhstZns[k][i][j].U[n] - (Flx[j+1]-Flx[j]);
00221         }
00222       }
00223 
00224 #if (NSCALARS > 0)
00225       for (n=0; n<(NSCALARS); n++) {
00226         for (j=js-nghost; j<=je+nghost; j++) U[j] = GhstZns[k][i][j].s[n];
00227         RemapFlux(U,epsi,js,je+1,Flx);
00228         for(j=js; j<=je; j++){
00229           GhstZnsBuf[k][i][j].s[n] = GhstZns[k][i][j].s[n] - (Flx[j+1]-Flx[j]);
00230         }
00231       }
00232 #endif
00233 
00234     }
00235   }
00236 
00237 /*--- Step 4. ------------------------------------------------------------------
00238  * If no MPI decomposition in Y, apply shift over integer number of
00239  * grid cells during copy from buffer back into GhstZns.  */
00240 
00241   if (pD->NGrid[1] == 1) {
00242 
00243     for(k=ks; k<=ku; k++) {
00244       for(j=js; j<=je; j++){
00245         jremap = j - joffset;
00246         if (jremap < (int)js) jremap += pG->Nx[1];
00247 
00248         for(i=0; i<nghost; i++){
00249           for (n=0; n<(NREMAP); n++) {
00250             GhstZns[k][i][j].U[n]  = GhstZnsBuf[k][i][jremap].U[n];
00251           }
00252 #if (NSCALARS > 0)
00253           for (n=0; n<NSCALARS; n++) { 
00254             GhstZns[k][i][j].s[n] = GhstZnsBuf[k][i][jremap].s[n];
00255           }
00256 #endif
00257         }
00258 
00259       }
00260     }
00261 
00262 #ifdef MPI_PARALLEL
00263   } else {
00264 
00265 /*--- Step 5. ------------------------------------------------------------------
00266  * If Domain contains MPI decomposition in Y, then MPI calls are required for
00267  * the cyclic shift needed to apply shift over integer number of grid cells
00268  * during copy from buffer back into GhstZns.  */
00269 
00270     get_myGridIndex(pD, myID_Comm_world, &my_iproc, &my_jproc, &my_kproc);
00271 
00272 /* Find integer and fractional number of grids over which offset extends.
00273  * This assumes every grid has same number of cells in x2-direction! */
00274     Ngrids = (int)(joffset/pG->Nx[1]);
00275     joverlap = joffset - Ngrids*pG->Nx[1];
00276 
00277 /*--- Step 5a. -----------------------------------------------------------------
00278  * Find ids of processors that data in [je-(joverlap-1):je] is sent to, and
00279  * data in [js:js+(joverlap-1)] is received from.  Only execute if joverlap>0 */
00280 /* This can result in send/receive to self -- we rely on MPI to handle this
00281  * properly */
00282 
00283     if (joverlap != 0) {
00284 
00285       jproc = my_jproc + (Ngrids + 1);
00286       if (jproc > (pD->NGrid[1]-1)) jproc -= pD->NGrid[1]; 
00287       sendto_id = pD->GData[my_kproc][jproc][my_iproc].ID_Comm_Domain;
00288 
00289       jproc = my_jproc - (Ngrids + 1);
00290       if (jproc < 0) jproc += pD->NGrid[1]; 
00291       getfrom_id = pD->GData[my_kproc][jproc][my_iproc].ID_Comm_Domain;
00292 
00293 /*--- Step 5b. -----------------------------------------------------------------
00294  * Pack send buffer and send data in [je-(joverlap-1):je] from GhstZnsBuf */
00295 
00296       cnt = nghost*joverlap*(ku-ks+1)*(NREMAP+NSCALARS);
00297 /* Post a non-blocking receive for the input data */
00298       ierr = MPI_Irecv(recv_buf, cnt, MPI_DOUBLE, getfrom_id,
00299                       shearing_sheet_ix1_tag, pD->Comm_Domain, &rq);
00300 
00301       pSnd = send_buf;
00302       for (k=ks; k<=ku; k++) {
00303         for (j=je-(joverlap-1); j<=je; j++) {
00304           for(i=0; i<nghost; i++){
00305             /* Get a pointer to the Remap structure */
00306             pRemap = &(GhstZnsBuf[k][i][j]);
00307 
00308             for (n=0; n<NREMAP; n++) *(pSnd++) = pRemap->U[n];
00309 #if (NSCALARS > 0)
00310             for (n=0; n<NSCALARS; n++) *(pSnd++) = pRemap->s[n];
00311 #endif
00312           }
00313         }
00314       }
00315       ierr = MPI_Send(send_buf, cnt, MPI_DOUBLE, sendto_id,
00316                      shearing_sheet_ix1_tag, pD->Comm_Domain);
00317 
00318 /*--- Step 5c. -----------------------------------------------------------------
00319  * unpack data sent from [je-(joverlap-1):je], and remap into cells in
00320  * [js:js+(joverlap-1)] in GhstZns */
00321 
00322       ierr = MPI_Wait(&rq, MPI_STATUS_IGNORE);
00323 
00324       pRcv = recv_buf;
00325       for (k=ks; k<=ku; k++) {
00326         for (j=js; j<=js+(joverlap-1); j++) {
00327           for(i=0; i<nghost; i++){
00328             /* Get a pointer to the Remap structure */
00329             pRemap = &(GhstZns[k][i][j]);
00330   
00331             for (n=0; n<NREMAP; n++) pRemap->U[n] = *(pRcv++);
00332 #if (NSCALARS > 0)
00333             for (n=0; n<NSCALARS; n++) pRemap->s[n] = *(pRcv++);
00334 #endif
00335           }
00336         }
00337       }
00338 
00339     }
00340 
00341 /*--- Step 5d. -----------------------------------------------------------------
00342  * If shear is less one full Grid, remap cells which remain on same processor
00343  * from GhstZnsBuf into GhstZns.  Cells in [js:je-joverlap] are shifted by
00344  * joverlap into [js+joverlap:je] */
00345 
00346     if (Ngrids == 0) {
00347 
00348       for(k=ks; k<=ku; k++) {
00349         for(j=js+joverlap; j<=je; j++){
00350           jremap = j-joverlap;
00351           for(i=0; i<nghost; i++){
00352             for (n=0; n<(NREMAP); n++) {
00353               GhstZns[k][i][j].U[n]  = GhstZnsBuf[k][i][jremap].U[n];
00354             }
00355 #if (NSCALARS > 0)
00356             for (n=0; n<NSCALARS; n++) { 
00357               GhstZns[k][i][j].s[n] = GhstZnsBuf[k][i][jremap].s[n];
00358             }
00359 #endif
00360           }
00361         }
00362       }
00363 
00364 /*--- Step 5e. -----------------------------------------------------------------
00365  * If shear is more than one Grid, pack and send data from [js:je-joverlap]
00366  * from GhstZnsBuf (this step replaces 5d) */
00367 
00368     } else {
00369 
00370 /* index of sendto and getfrom processors in GData are -/+1 from Step 5a */
00371 
00372       jproc = my_jproc + Ngrids;
00373       if (jproc > (pD->NGrid[1]-1)) jproc -= pD->NGrid[1];
00374       sendto_id = pD->GData[my_kproc][jproc][my_iproc].ID_Comm_Domain;
00375 
00376       jproc = my_jproc - Ngrids;
00377       if (jproc < 0) jproc += pD->NGrid[1];
00378       getfrom_id = pD->GData[my_kproc][jproc][my_iproc].ID_Comm_Domain;
00379 
00380       cnt = nghost*(pG->Nx[1]-joverlap)*(ku-ks+1)*(NREMAP+NSCALARS);
00381 /* Post a non-blocking receive for the input data from the left grid */
00382       ierr = MPI_Irecv(recv_buf, cnt, MPI_DOUBLE, getfrom_id,
00383                       shearing_sheet_ix1_tag, pD->Comm_Domain, &rq);
00384 
00385       pSnd = send_buf;
00386       for (k=ks; k<=ku; k++) {
00387         for (j=js; j<=je-joverlap; j++) {
00388           for(i=0; i<nghost; i++){
00389             /* Get a pointer to the Remap structure */
00390             pRemap = &(GhstZnsBuf[k][i][j]);
00391             for (n=0; n<NREMAP; n++) *(pSnd++) = pRemap->U[n];
00392 #if (NSCALARS > 0)
00393             for (n=0; n<NSCALARS; n++) *(pSnd++) = pRemap->s[n];
00394 #endif
00395           }
00396         }
00397       }
00398       ierr = MPI_Send(send_buf, cnt, MPI_DOUBLE, sendto_id,
00399                      shearing_sheet_ix1_tag, pD->Comm_Domain);
00400 
00401 /* unpack data sent from [js:je-overlap], and remap into cells in
00402  * [js+joverlap:je] in GhstZns */
00403 
00404       ierr = MPI_Wait(&rq, MPI_STATUS_IGNORE);
00405 
00406       pRcv = recv_buf;
00407       for (k=ks; k<=ku; k++) {
00408         for (j=js+joverlap; j<=je; j++) {
00409           for(i=0; i<nghost; i++){
00410             /* Get a pointer to the Remap structure */
00411             pRemap = &(GhstZns[k][i][j]);
00412             for (n=0; n<NREMAP; n++) pRemap->U[n] = *(pRcv++);
00413 #if (NSCALARS > 0)
00414             for (n=0; n<NSCALARS; n++) pRemap->s[n] = *(pRcv++);
00415 #endif
00416           }
00417         }
00418       }
00419     } /* end of step 5e - shear is more than one Grid */
00420 
00421 #endif /* MPI_PARALLEL */
00422   } /* end of step 5 - MPI decomposition in Y */
00423 
00424 /*--- Step 6. ------------------------------------------------------------------
00425  * Now copy remapped variables back into ghost cells */
00426 
00427   for(k=ks; k<=ke; k++) {
00428     for(j=js; j<=je; j++){
00429       for(i=0; i<nghost; i++){
00430         pG->U[k][j][is-nghost+i].d  = GhstZns[k][i][j].U[0];
00431         pG->U[k][j][is-nghost+i].M1 = GhstZns[k][i][j].U[1];
00432         pG->U[k][j][is-nghost+i].M2 = GhstZns[k][i][j].U[2];
00433         pG->U[k][j][is-nghost+i].M3 = GhstZns[k][i][j].U[3];
00434 #ifdef ADIABATIC
00435         pG->U[k][j][is-nghost+i].E  = GhstZns[k][i][j].U[4];
00436 #endif /* ADIABATIC */
00437 #ifdef MHD
00438         pG->U[k][j][is-nghost+i].B1c = GhstZns[k][i][j].U[NREMAP-4];
00439         pG->B1i[k][j][is-nghost+i] = GhstZns[k][i][j].U[NREMAP-3];
00440         pG->B2i[k][j][is-nghost+i] = GhstZns[k][i][j].U[NREMAP-2];
00441         pG->B3i[k][j][is-nghost+i] = GhstZns[k][i][j].U[NREMAP-1];
00442 #endif /* MHD */
00443 #if (NSCALARS > 0)
00444         for (n=0; n<NSCALARS; n++) {
00445           pG->U[k][j][is-nghost+i].s[n] = GhstZns[k][i][j].s[n];
00446         }
00447 #endif
00448       }
00449     }
00450   }
00451 
00452 /* Copy the face-centered B3 component of the field at k=ke+1 in 3D */
00453 #ifdef MHD
00454   if (pG->Nx[2] > 1) {
00455     for(j=js; j<=je; j++){
00456       for(i=0; i<nghost; i++){
00457         pG->B3i[ke+1][j][is-nghost+i] = GhstZns[ke+1][i][j].U[NREMAP-1];
00458       }
00459     }
00460   }
00461 #endif /* MHD */
00462 
00463 /*--- Step 7. ------------------------------------------------------------------
00464  * compute cell-centered B as average of remapped face centered B, except B1.
00465  * The value of B2c at j=je is incorrect since B2i[je+1] not yet set -- fix in
00466  * step 10 below */
00467 
00468 #ifdef MHD
00469   for (k=ks; k<=ke; k++) {
00470     for (j=js; j<=je; j++) {
00471       for(i=is-nghost; i<is; i++){
00472         pG->U[k][j][i].B2c = 0.5*(pG->B2i[k][j][i]+pG->B2i[k][j+1][i]);
00473       }
00474     }
00475   }
00476   if (pG->Nx[2] > 1) {
00477     for (k=ks; k<=ke; k++) {
00478       for (j=js; j<=je; j++) {
00479         for(i=is-nghost; i<is; i++){
00480           pG->U[k][j][i].B3c = 0.5*(pG->B3i[k][j][i]+pG->B3i[k+1][j][i]);
00481         }
00482       }
00483     }
00484   }
00485 #endif /* MHD */
00486 
00487 /*--- Step 8. ------------------------------------------------------------------
00488  * With no MPI decomposition in Y, apply periodic BCs in Y (similar to
00489  * periodic_ix2() and periodic_ox2() in bvals_mhd.c) */
00490 
00491   if (pD->NGrid[1] == 1) {
00492 
00493     for(k=ks; k<=ke; k++) {
00494       for(j=1; j<=nghost; j++){
00495         for(i=is-nghost; i<is; i++){
00496           pG->U[k][js-j][i] = pG->U[k][je-(j-1)][i];
00497           pG->U[k][je+j][i] = pG->U[k][js+(j-1)][i];
00498 #ifdef MHD
00499           pG->B1i[k][js-j][i] = pG->B1i[k][je-(j-1)][i];
00500           pG->B2i[k][js-j][i] = pG->B2i[k][je-(j-1)][i];
00501           pG->B3i[k][js-j][i] = pG->B3i[k][je-(j-1)][i];
00502 
00503           pG->B1i[k][je+j][i] = pG->B1i[k][js+(j-1)][i];
00504           pG->B2i[k][je+j][i] = pG->B2i[k][js+(j-1)][i];
00505           pG->B3i[k][je+j][i] = pG->B3i[k][js+(j-1)][i];
00506 #endif /* MHD */
00507         }
00508       }
00509     }
00510 #ifdef MHD
00511     if (pG->Nx[2] > 1) {
00512       for (j=1; j<=nghost; j++) {
00513         for (i=is-nghost; i<is; i++) {
00514           pG->B3i[ke+1][js-j][i] = pG->B3i[ke+1][je-(j-1)][i];
00515           pG->B3i[ke+1][je+j][i] = pG->B3i[ke+1][js+(j-1)][i];
00516         }
00517       }
00518     }
00519 #endif /* MHD */
00520 
00521 #ifdef MPI_PARALLEL
00522   } else {
00523 
00524 /*--- Step 9. ------------------------------------------------------------------
00525  * With MPI decomposition in Y, use MPI calls to handle periodic BCs in Y (like
00526  * send_ox2/receive_ix1 and send_ix1/receive_ox2 pairs in bvals_mhd.c */
00527 
00528 
00529 /* Post a non-blocking receive for the input data from the left grid */
00530     cnt = nghost*nghost*(ku-ks+1)*NVAR_SHARE;
00531     ierr = MPI_Irecv(recv_buf, cnt, MPI_DOUBLE, pG->lx2_id,
00532                     shearing_sheet_ix1_tag, pD->Comm_Domain, &rq);
00533 
00534     pSnd = send_buf;
00535     for (k=ks; k<=ku; k++){
00536       for (j=je-nghost+1; j<=je; j++){
00537         for (i=is-nghost; i<is; i++){
00538           /* Get a pointer to the ConsS cell */
00539           pCons = &(pG->U[k][j][i]);
00540 
00541           *(pSnd++) = pCons->d;
00542           *(pSnd++) = pCons->M1;
00543           *(pSnd++) = pCons->M2;
00544           *(pSnd++) = pCons->M3;
00545 #ifndef BAROTROPIC
00546           *(pSnd++) = pCons->E;
00547 #endif /* BAROTROPIC */
00548 #ifdef MHD
00549           *(pSnd++) = pCons->B1c;
00550           *(pSnd++) = pCons->B2c;
00551           *(pSnd++) = pCons->B3c;
00552           *(pSnd++) = pG->B1i[k][j][i];
00553           *(pSnd++) = pG->B2i[k][j][i];
00554           *(pSnd++) = pG->B3i[k][j][i];
00555 #endif /* MHD */
00556 #if (NSCALARS > 0)
00557           for (n=0; n<NSCALARS; n++) *(pSnd++) = pCons->s[n];
00558 #endif
00559         }
00560       }
00561     }
00562 
00563 /* send contents of buffer to the neighboring grid on R-x2 */
00564     ierr = MPI_Send(send_buf, cnt, MPI_DOUBLE, pG->rx2_id,
00565                    shearing_sheet_ix1_tag, pD->Comm_Domain);
00566 
00567 /* Wait to receive the input data from the left grid */
00568     ierr = MPI_Wait(&rq, MPI_STATUS_IGNORE);
00569 
00570     pRcv = recv_buf;
00571     for (k=ks; k<=ku; k++){
00572       for (j=js-nghost; j<=js-1; j++){
00573         for (i=is-nghost; i<is; i++){
00574           /* Get a pointer to the ConsS cell */
00575           pCons = &(pG->U[k][j][i]);
00576 
00577           pCons->d  = *(pRcv++);
00578           pCons->M1 = *(pRcv++);
00579           pCons->M2 = *(pRcv++);
00580           pCons->M3 = *(pRcv++);
00581 #ifndef BAROTROPIC
00582           pCons->E  = *(pRcv++);
00583 #endif /* BAROTROPIC */
00584 #ifdef MHD
00585           pCons->B1c = *(pRcv++);
00586           pCons->B2c = *(pRcv++);
00587           pCons->B3c = *(pRcv++);
00588           pG->B1i[k][j][i] = *(pRcv++);
00589           pG->B2i[k][j][i] = *(pRcv++);
00590           pG->B3i[k][j][i] = *(pRcv++);
00591 #endif /* MHD */
00592 #if (NSCALARS > 0)
00593           for (n=0; n<NSCALARS; n++) pCons->s[n] = *(pRcv++);
00594 #endif
00595         }
00596       }
00597     }
00598 
00599 /* Post a non-blocking receive for the input data from the right grid */
00600     ierr = MPI_Irecv(recv_buf, cnt, MPI_DOUBLE, pG->rx2_id,
00601                     shearing_sheet_ix1_tag, pD->Comm_Domain, &rq);
00602 
00603     pSnd = send_buf;
00604     for (k=ks; k<=ku; k++){
00605       for (j=js; j<=js+nghost-1; j++){
00606         for (i=is-nghost; i<is; i++){
00607           /* Get a pointer to the ConsS cell */
00608           pCons = &(pG->U[k][j][i]);
00609 
00610           *(pSnd++) = pCons->d;
00611           *(pSnd++) = pCons->M1;
00612           *(pSnd++) = pCons->M2;
00613           *(pSnd++) = pCons->M3;
00614 #ifndef BAROTROPIC
00615           *(pSnd++) = pCons->E;
00616 #endif /* BAROTROPIC */
00617 #ifdef MHD
00618           *(pSnd++) = pCons->B1c;
00619           *(pSnd++) = pCons->B2c;
00620           *(pSnd++) = pCons->B3c;
00621           *(pSnd++) = pG->B1i[k][j][i];
00622           *(pSnd++) = pG->B2i[k][j][i];
00623           *(pSnd++) = pG->B3i[k][j][i];
00624 #endif /* MHD */
00625 #if (NSCALARS > 0)
00626           for (n=0; n<NSCALARS; n++) *(pSnd++) = pCons->s[n];
00627 #endif
00628         }
00629       }
00630     }
00631 
00632 /* send contents of buffer to the neighboring grid on L-x2 */
00633     ierr = MPI_Send(send_buf, cnt, MPI_DOUBLE, pG->lx2_id,
00634                    shearing_sheet_ix1_tag, pD->Comm_Domain);
00635 
00636 /* Wait to receive the input data from the left grid */
00637     ierr = MPI_Wait(&rq, MPI_STATUS_IGNORE);
00638 
00639     pRcv = recv_buf;
00640     for (k=ks; k<=ku; k++){
00641       for (j=je+1; j<=je+nghost; j++){
00642         for (i=is-nghost; i<is; i++){
00643           /* Get a pointer to the ConsS cell */
00644           pCons = &(pG->U[k][j][i]);
00645 
00646           pCons->d  = *(pRcv++);
00647           pCons->M1 = *(pRcv++);
00648           pCons->M2 = *(pRcv++);
00649           pCons->M3 = *(pRcv++);
00650 #ifndef BAROTROPIC
00651           pCons->E  = *(pRcv++);
00652 #endif /* BAROTROPIC */
00653 #ifdef MHD
00654           pCons->B1c = *(pRcv++);
00655           pCons->B2c = *(pRcv++);
00656           pCons->B3c = *(pRcv++);
00657           pG->B1i[k][j][i] = *(pRcv++);
00658           pG->B2i[k][j][i] = *(pRcv++);
00659           pG->B3i[k][j][i] = *(pRcv++);
00660 #endif /* MHD */
00661 #if (NSCALARS > 0)
00662           for (n=0; n<NSCALARS; n++) pCons->s[n] = *(pRcv++);
00663 #endif
00664         }
00665       }
00666     }
00667 #endif /* MPI_PARALLEL */
00668 
00669   } /* end of step 9 - periodic BC in Y with MPI */
00670 
00671 /*--- Step 10 ------------------------------------------------------------------
00672  * Fix B2c at j=je,js-1, now that B2i[je+1] has been set properly  */
00673 
00674 #ifdef MHD
00675   for (k=ks; k<=ke; k++) {
00676     for(i=is-nghost; i<is; i++){
00677       pG->U[k][je  ][i].B2c = 0.5*(pG->B2i[k][je+1][i]+pG->B2i[k][je][i]);
00678       pG->U[k][js-1][i].B2c = 0.5*(pG->B2i[k][js-1][i]+pG->B2i[k][js][i]);
00679     }
00680   }
00681 #endif /* MHD */
00682 
00683   } /* end of if */
00684 
00685 /*--- Step 11 ------------------------------------------------------------------
00686  * Shearing sheet BC in 2D xz.  Periodic BC already applied in x1 and x2 in
00687  * bvals_mhd (including for MPI parallel jobs).  Now just have to add offset
00688  * to azimuthal velocity when FARGO not defined */
00689 
00690 #ifndef FARGO
00691   if (pG->Nx[2] == 1 && ShBoxCoord==xz) {
00692 
00693     for (j=js-nghost; j<=je+nghost; j++) {
00694       for (i=1; i<=nghost; i++) {
00695 #ifdef ADIABATIC
00696 /* No change in the internal energy */
00697         pG->U[ks][j][is-i].E += (0.5/pG->U[ks][j][is-i].d)*
00698          (SQR((pG->U[ks][j][is-i].M3 + qomL*pG->U[ks][j][is-i].d))
00699         - SQR(pG->U[ks][j][is-i].M3));
00700 #endif
00701         pG->U[ks][j][is-i].M3 += qomL*pG->U[ks][j][is-i].d;
00702       }
00703     }
00704 
00705   }
00706 #endif /* FARGO */
00707 
00708   return;
00709 }
00710 
00711 
00712 /*----------------------------------------------------------------------------*/
00713 /*! \fn void ShearingSheet_ox1(DomainS *pD)
00714  *  \brief 3D shearing-sheet BCs in x1.  
00715  *
00716  * It applies a remap
00717  * in Y after the ghost cells have been set by the usual periodic BCs in X and
00718  * Y implemented in bvals_mhd.c
00719  *
00720  * This is a public function which is called by bvals_mhd() inside a
00721  * SHEARING_BOX macro.                                                        */
00722 /*----------------------------------------------------------------------------*/
00723 
00724 void ShearingSheet_ox1(DomainS *pD)
00725 {
00726   GridS *pG = pD->Grid;
00727   int is = pG->is, ie = pG->ie;
00728   int js = pG->js, je = pG->je;
00729   int ks = pG->ks, ke = pG->ke;
00730   int i,ii,j,k,ku,n,joffset,jremap;
00731   Real xmin,xmax,Lx,Ly,qomL,yshear,deltay,epso;
00732 #ifdef MPI_PARALLEL
00733   int my_iproc,my_jproc,my_kproc,cnt,jproc,joverlap,Ngrids;
00734   int ierr,sendto_id,getfrom_id;
00735   double *pSnd,*pRcv;
00736   Remap *pRemap;
00737   ConsS *pCons;
00738   MPI_Request rq;
00739 #endif
00740 
00741 /*--- Step 1. ------------------------------------------------------------------
00742  * Compute the distance the computational domain has sheared in y */
00743 
00744   xmin = pD->RootMinX[0];
00745   xmax = pD->RootMaxX[0];
00746   Lx = xmax - xmin;
00747 
00748   xmin = pD->RootMinX[1];
00749   xmax = pD->RootMaxX[1];
00750   Ly = xmax - xmin;
00751 
00752   qomL = qshear*Omega_0*Lx;
00753   yshear = qomL*pG->time;
00754 
00755 /* Split this into integer and fractional peices of the Domain in y.  Ignore
00756  * the integer piece because the Grid is periodic in y */
00757 
00758   deltay = fmod(yshear, Ly);
00759 
00760 /* further decompose the fractional peice into integer and fractional pieces of
00761  * a grid cell.  Note 0.0 <= epsi < 1.0.  If Domain has MPI decomposition in Y,
00762  * then it is possible that:  pD->Nx2 > joffset > pG->Nx2   */
00763 
00764   joffset = (int)(deltay/pG->dx2);
00765   epso = -(fmod(deltay,pG->dx2))/pG->dx2;
00766 
00767 /*--- Step 2. ------------------------------------------------------------------
00768  * Copy data into GhstZns array.  Note i and j indices are switched.
00769  * Steps 2-10 are for 3D or 2D xy runs.  Step 11 handles 2D xz separately */
00770 
00771   if (pG->Nx[2] > 1 || ShBoxCoord==xy) {  /* this if ends at end of step 10 */
00772 
00773   if (pG->Nx[2] > 1) ku=ke+1; else ku=ke;
00774   for(k=ks; k<=ku; k++) {
00775     for(j=js-nghost; j<=je+nghost; j++){
00776       for(i=0; i<nghost; i++){
00777         ii = ie+1+i;
00778         GhstZns[k][i][j].U[0] = pG->U[k][j][ii].d;
00779         GhstZns[k][i][j].U[1] = pG->U[k][j][ii].M1;
00780         GhstZns[k][i][j].U[2] = pG->U[k][j][ii].M2;
00781 #ifndef FARGO
00782         GhstZns[k][i][j].U[2] -= qomL*pG->U[k][j][ii].d;
00783 #endif
00784         GhstZns[k][i][j].U[3] = pG->U[k][j][ii].M3;
00785 #ifdef ADIABATIC
00786 /* No change in the internal energy */
00787         GhstZns[k][i][j].U[4] = pG->U[k][j][ii].E + (0.5/GhstZns[k][i][j].U[0])*
00788           (SQR(GhstZns[k][i][j].U[2]) - SQR(pG->U[k][j][ii].M2));
00789 #endif /* ADIABATIC */
00790 #ifdef MHD
00791         GhstZns[k][i][j].U[NREMAP-4] = pG->U[k][j][ii].B1c;
00792         GhstZns[k][i][j].U[NREMAP-3] = pG->B1i[k][j][ii];
00793         GhstZns[k][i][j].U[NREMAP-2] = pG->B2i[k][j][ii];
00794         GhstZns[k][i][j].U[NREMAP-1] = pG->B3i[k][j][ii];
00795 #endif /* MHD */
00796 #if (NSCALARS > 0)
00797         for(n=0; n<NSCALARS; n++) GhstZns[k][i][j].s[n] = pG->U[k][j][ii].s[n];
00798 #endif
00799       }
00800     }
00801   }
00802 
00803 /*--- Step 3. ------------------------------------------------------------------
00804  * Copy GhstZns into buffer, at the same time apply a conservative remap of
00805  * solution over the fractional part of grid cell */
00806 
00807   for(k=ks; k<=ku; k++) {
00808     for(i=0; i<nghost; i++){
00809 
00810       for (n=0; n<(NREMAP); n++) {
00811         for (j=js-nghost; j<=je+nghost; j++) U[j] = GhstZns[k][i][j].U[n];
00812         RemapFlux(U,epso,js,je+1,Flx);
00813         for(j=js; j<=je; j++){
00814           GhstZnsBuf[k][i][j].U[n] = GhstZns[k][i][j].U[n] - (Flx[j+1]-Flx[j]);
00815         }
00816       }
00817 
00818 #if (NSCALARS > 0)
00819       for (n=0; n<(NSCALARS); n++) {
00820         for (j=js-nghost; j<=je+nghost; j++) U[j] = GhstZns[k][i][j].s[n];
00821         RemapFlux(U,epso,js,je+1,Flx);
00822         for(j=js; j<=je; j++){
00823           GhstZnsBuf[k][i][j].s[n] = GhstZns[k][i][j].s[n] - (Flx[j+1]-Flx[j]);
00824         }
00825       }
00826 #endif
00827 
00828     }
00829   }
00830 
00831 /*--- Step 4. ------------------------------------------------------------------
00832  * If no MPI decomposition in Y, apply shift over integer number of
00833  * grid cells during copy from buffer back into GhstZns.  */
00834 
00835   if (pD->NGrid[1] == 1) {
00836 
00837     for(k=ks; k<=ku; k++) {
00838       for(j=js; j<=je; j++){
00839         jremap = j + joffset;
00840         if (jremap > (int)je) jremap -= pG->Nx[1];
00841 
00842         for(i=0; i<nghost; i++){
00843           for (n=0; n<(NREMAP); n++) {
00844             GhstZns[k][i][j].U[n]  = GhstZnsBuf[k][i][jremap].U[n];
00845           }
00846 #if (NSCALARS > 0)
00847           for (n=0; n<NSCALARS; n++) { 
00848             GhstZns[k][i][j].s[n] = GhstZnsBuf[k][i][jremap].s[n];
00849           }
00850 #endif
00851         }
00852 
00853       }
00854     }
00855 
00856 #ifdef MPI_PARALLEL
00857   } else {
00858 
00859 /*--- Step 5. ------------------------------------------------------------------
00860  * If Domain contains MPI decomposition in Y, then MPI calls are required for
00861  * the cyclic shift needed to apply shift over integer number of grid cells
00862  * during copy from buffer back into GhstZns.  */
00863 
00864     get_myGridIndex(pD, myID_Comm_world, &my_iproc, &my_jproc, &my_kproc);
00865 
00866 /* Find integer and fractional number of grids over which offset extends.
00867  * This assumes every grid has same number of cells in x2-direction! */
00868     Ngrids = (int)(joffset/pG->Nx[1]);
00869     joverlap = joffset - Ngrids*pG->Nx[1];
00870 
00871 /*--- Step 5a. -----------------------------------------------------------------
00872  * Find ids of processors that data in [js:js+(joverlap-1)] is sent to, and
00873  * data in [je-(overlap-1):je] is received from.  Only execute if joverlap>0  */
00874 /* This can result in send/receive to self -- we rely on MPI to handle this
00875  * properly */
00876 
00877     if (joverlap != 0) {
00878 
00879       jproc = my_jproc - (Ngrids + 1);
00880       if (jproc < 0) jproc += pD->NGrid[1]; 
00881       sendto_id = pD->GData[my_kproc][jproc][my_iproc].ID_Comm_Domain;
00882 
00883       jproc = my_jproc + (Ngrids + 1);
00884       if (jproc > (pD->NGrid[1]-1)) jproc -= pD->NGrid[1]; 
00885       getfrom_id = pD->GData[my_kproc][jproc][my_iproc].ID_Comm_Domain;
00886 
00887 /*--- Step 5b. -----------------------------------------------------------------
00888  * Pack send buffer and send data in [js:js+(joverlap-1)] from GhstZnsBuf */
00889 
00890       cnt = nghost*joverlap*(ku-ks+1)*(NREMAP+NSCALARS);
00891 /* Post a non-blocking receive for the input data */
00892       ierr = MPI_Irecv(recv_buf, cnt, MPI_DOUBLE, getfrom_id,
00893                       shearing_sheet_ox1_tag, pD->Comm_Domain, &rq);
00894 
00895       pSnd = send_buf;
00896       for (k=ks; k<=ku; k++) {
00897         for (j=js; j<=js+(joverlap-1); j++) {
00898           for(i=0; i<nghost; i++){
00899             /* Get a pointer to the Remap structure */
00900             pRemap = &(GhstZnsBuf[k][i][j]);
00901 
00902             for (n=0; n<NREMAP; n++) *(pSnd++) = pRemap->U[n];
00903 #if (NSCALARS > 0)
00904             for (n=0; n<NSCALARS; n++) *(pSnd++) = pRemap->s[n];
00905 #endif
00906           }
00907         }
00908       }
00909       ierr = MPI_Send(send_buf, cnt, MPI_DOUBLE, sendto_id,
00910                      shearing_sheet_ox1_tag, pD->Comm_Domain);
00911 
00912 
00913 /*--- Step 5c. -----------------------------------------------------------------
00914  * unpack data sent from [js:js+(joverlap-1)], and remap into cells in
00915  * [je-(joverlap-1):je] in GhstZns
00916  */
00917 
00918       ierr = MPI_Wait(&rq, MPI_STATUS_IGNORE);
00919 
00920       pRcv = recv_buf;
00921       for (k=ks; k<=ku; k++) {
00922         for (j=je-(joverlap-1); j<=je; j++) {
00923           for(i=0; i<nghost; i++){
00924             /* Get a pointer to the Remap structure */
00925             pRemap = &(GhstZns[k][i][j]);
00926   
00927             for (n=0; n<NREMAP; n++) pRemap->U[n] = *(pRcv++);
00928 #if (NSCALARS > 0)
00929             for (n=0; n<NSCALARS; n++) pRemap->s[n] = *(pRcv++);
00930 #endif
00931           }
00932         }
00933       }
00934 
00935     }
00936 
00937 /*--- Step 5d. -----------------------------------------------------------------
00938  * If shear is less one full Grid, remap cells which remain on same processor
00939  * from GhstZnsBuf into GhstZns.  Cells in [js+joverlap:je] are shifted by
00940  * joverlap into [js:je-joverlap] */
00941 
00942     if (Ngrids == 0) {
00943 
00944       for(k=ks; k<=ku; k++) {
00945         for(j=js; j<=je-joverlap; j++){
00946           jremap = j+joverlap;
00947           for(i=0; i<nghost; i++){
00948             for (n=0; n<(NREMAP); n++) {
00949               GhstZns[k][i][j].U[n]  = GhstZnsBuf[k][i][jremap].U[n];
00950             }
00951 #if (NSCALARS > 0)
00952             for (n=0; n<NSCALARS; n++) { 
00953               GhstZns[k][i][j].s[n] = GhstZnsBuf[k][i][jremap].s[n];
00954             }
00955 #endif
00956           }
00957         }
00958       }
00959 
00960 /*--- Step 5e. -----------------------------------------------------------------
00961  * If shear is more than one Grid, pack and send data from [js+joverlap:je]
00962  * from GhstZnsBuf (this step replaces 5d) */
00963 
00964     } else {
00965 
00966 /* index of sendto and getfrom processors in GData are -/+1 from Step 5a */
00967 
00968       jproc = my_jproc - Ngrids;
00969       if (jproc < 0) jproc += pD->NGrid[1];
00970       sendto_id = pD->GData[my_kproc][jproc][my_iproc].ID_Comm_Domain;
00971 
00972       jproc = my_jproc + Ngrids;
00973       if (jproc > (pD->NGrid[1]-1)) jproc -= pD->NGrid[1];
00974       getfrom_id = pD->GData[my_kproc][jproc][my_iproc].ID_Comm_Domain;
00975 
00976       cnt = nghost*(pG->Nx[1]-joverlap)*(ku-ks+1)*(NREMAP+NSCALARS);
00977 /* Post a non-blocking receive for the input data from the left grid */
00978       ierr = MPI_Irecv(recv_buf, cnt, MPI_DOUBLE, getfrom_id,
00979                       shearing_sheet_ox1_tag, pD->Comm_Domain, &rq);
00980 
00981       pSnd = send_buf;
00982       for (k=ks; k<=ku; k++) {
00983         for (j=js+joverlap; j<=je; j++) {
00984           for(i=0; i<nghost; i++){
00985             /* Get a pointer to the Remap structure */
00986             pRemap = &(GhstZnsBuf[k][i][j]);
00987             for (n=0; n<NREMAP; n++) *(pSnd++) = pRemap->U[n];
00988 #if (NSCALARS > 0)
00989             for (n=0; n<NSCALARS; n++) *(pSnd++) = pRemap->s[n];
00990 #endif
00991           }
00992         }
00993       }
00994       ierr = MPI_Send(send_buf, cnt, MPI_DOUBLE, sendto_id,
00995                      shearing_sheet_ox1_tag, pD->Comm_Domain);
00996 
00997 /* unpack data sent from [js+joverlap:je], and remap into cells in
00998  * [js:je-joverlap] in GhstZns */
00999 
01000       ierr = MPI_Wait(&rq, MPI_STATUS_IGNORE);
01001 
01002       pRcv = recv_buf;
01003       for (k=ks; k<=ku; k++) {
01004         for (j=js; j<=je-joverlap; j++) {
01005           for(i=0; i<nghost; i++){
01006             /* Get a pointer to the Remap structure */
01007             pRemap = &(GhstZns[k][i][j]);
01008             for (n=0; n<NREMAP; n++) pRemap->U[n] = *(pRcv++);
01009 #if (NSCALARS > 0)
01010             for (n=0; n<NSCALARS; n++) pRemap->s[n] = *(pRcv++);
01011 #endif
01012           }
01013         }
01014       }
01015     } /* end of step 5e - shear is more than one Grid */
01016 
01017 #endif /* MPI_PARALLEL */
01018   } /* end of step 5 - MPI decomposition in Y */
01019 
01020 /*--- Step 6. ------------------------------------------------------------------
01021  * Now copy remapped variables back into ghost cells, except B1i[ie+1] */
01022 
01023   for(k=ks; k<=ke; k++) {
01024     for(j=js; j<=je; j++){
01025       for(i=0; i<nghost; i++){
01026         pG->U[k][j][ie+1+i].d  = GhstZns[k][i][j].U[0];
01027         pG->U[k][j][ie+1+i].M1 = GhstZns[k][i][j].U[1];
01028         pG->U[k][j][ie+1+i].M2 = GhstZns[k][i][j].U[2];
01029         pG->U[k][j][ie+1+i].M3 = GhstZns[k][i][j].U[3];
01030 #ifdef ADIABATIC
01031         pG->U[k][j][ie+1+i].E  = GhstZns[k][i][j].U[4];
01032 #endif /* ADIABATIC */
01033 #ifdef MHD
01034         pG->U[k][j][ie+1+i].B1c = GhstZns[k][i][j].U[NREMAP-4];
01035         if(i>0) pG->B1i[k][j][ie+1+i] = GhstZns[k][i][j].U[NREMAP-3];
01036         pG->B2i[k][j][ie+1+i] = GhstZns[k][i][j].U[NREMAP-2];
01037         pG->B3i[k][j][ie+1+i] = GhstZns[k][i][j].U[NREMAP-1];
01038 #endif /* MHD */
01039 #if (NSCALARS > 0)
01040         for (n=0; n<NSCALARS; n++) {
01041           pG->U[k][j][ie+1+i].s[n] = GhstZns[k][i][j].s[n];
01042         }
01043 #endif
01044       }
01045     }
01046   }
01047 
01048 /* Copy the face-centered B3 component of the field at k=ke+1 in 3D */
01049 #ifdef MHD
01050   if (pG->Nx[2] > 1) {
01051     for(j=js; j<=je; j++){
01052       for(i=0; i<nghost; i++){
01053         pG->B3i[ke+1][j][ie+1+i] = GhstZns[ke+1][i][j].U[NREMAP-1];
01054       }
01055     }
01056   }
01057 #endif /* MHD */
01058 
01059 /*--- Step 7. ------------------------------------------------------------------
01060  * compute cell-centered B as average of remapped face centered B, except B1.
01061  * The value of B2c at j=je is incorrect since B2i[je+1] not yet set -- fix in
01062  * step 10 below */
01063 
01064 #ifdef MHD
01065   for (k=ks; k<=ke; k++) {
01066     for (j=js; j<=je; j++) {
01067       for(i=ie+1; i<=ie+nghost; i++){
01068         pG->U[k][j][i].B2c = 0.5*(pG->B2i[k][j][i]+pG->B2i[k][j+1][i]);
01069       }
01070     }
01071   }
01072   if (pG->Nx[2] > 1) {
01073     for (k=ks; k<=ke; k++) {
01074       for (j=js; j<=je; j++) {
01075         for(i=ie+1; i<=ie+nghost; i++){
01076           pG->U[k][j][i].B3c = 0.5*(pG->B3i[k][j][i]+pG->B3i[k+1][j][i]);
01077         }
01078       }
01079     }
01080   }
01081 #endif /* MHD */
01082 
01083 /*--- Step 8. ------------------------------------------------------------------
01084  * With no MPI decomposition in Y, apply periodic BCs in Y (similar to
01085  * periodic_ix2() and periodic_ox2() in bvals_mhd.c) */
01086 
01087   if (pD->NGrid[1] == 1) {
01088 
01089     for(k=ks; k<=ke; k++) {
01090       for(j=1; j<=nghost; j++){
01091         for(i=ie+1; i<=ie+nghost; i++){
01092           pG->U[k][js-j][i] = pG->U[k][je-(j-1)][i];
01093           pG->U[k][je+j][i] = pG->U[k][js+(j-1)][i];
01094 #ifdef MHD
01095           pG->B1i[k][js-j][i] = pG->B1i[k][je-(j-1)][i];
01096           pG->B2i[k][js-j][i] = pG->B2i[k][je-(j-1)][i];
01097           pG->B3i[k][js-j][i] = pG->B3i[k][je-(j-1)][i];
01098 
01099           pG->B1i[k][je+j][i] = pG->B1i[k][js+(j-1)][i];
01100           pG->B2i[k][je+j][i] = pG->B2i[k][js+(j-1)][i];
01101           pG->B3i[k][je+j][i] = pG->B3i[k][js+(j-1)][i];
01102 #endif /* MHD */
01103         }
01104       }
01105     }
01106 #ifdef MHD
01107     for (j=1; j<=nghost; j++) {
01108       for (i=ie+1; i<=ie+nghost; i++) {
01109         pG->B3i[ke+1][js-j][i] = pG->B3i[ke+1][je-(j-1)][i];
01110         pG->B3i[ke+1][je+j][i] = pG->B3i[ke+1][js+(j-1)][i];
01111       }
01112     }
01113 #endif /* MHD */
01114 
01115 #ifdef MPI_PARALLEL
01116   } else {
01117 
01118 /*--- Step 9. ------------------------------------------------------------------
01119  * With MPI decomposition in Y, use MPI calls to handle periodic BCs in Y (like
01120  * send_ox2/receive_ix1 and send_ix1/receive_ox2 pairs in bvals_mhd.c */
01121 
01122 
01123 /* Post a non-blocking receive for the input data from the left grid */
01124     cnt = nghost*nghost*(ku-ks+1)*NVAR_SHARE;
01125     ierr = MPI_Irecv(recv_buf, cnt, MPI_DOUBLE, pG->lx2_id,
01126                     shearing_sheet_ox1_tag, pD->Comm_Domain, &rq);
01127 
01128     pSnd = send_buf;
01129     for (k=ks; k<=ku; k++){
01130       for (j=je-nghost+1; j<=je; j++){
01131         for (i=ie+1; i<=ie+nghost; i++){
01132           /* Get a pointer to the ConsS cell */
01133           pCons = &(pG->U[k][j][i]);
01134 
01135           *(pSnd++) = pCons->d;
01136           *(pSnd++) = pCons->M1;
01137           *(pSnd++) = pCons->M2;
01138           *(pSnd++) = pCons->M3;
01139 #ifndef BAROTROPIC
01140           *(pSnd++) = pCons->E;
01141 #endif /* BAROTROPIC */
01142 #ifdef MHD
01143           *(pSnd++) = pCons->B1c;
01144           *(pSnd++) = pCons->B2c;
01145           *(pSnd++) = pCons->B3c;
01146           *(pSnd++) = pG->B1i[k][j][i];
01147           *(pSnd++) = pG->B2i[k][j][i];
01148           *(pSnd++) = pG->B3i[k][j][i];
01149 #endif /* MHD */
01150 #if (NSCALARS > 0)
01151           for (n=0; n<NSCALARS; n++) *(pSnd++) = pCons->s[n];
01152 #endif
01153         }
01154       }
01155     }
01156 
01157 /* send contents of buffer to the neighboring grid on R-x2 */
01158     ierr = MPI_Send(send_buf, cnt, MPI_DOUBLE, pG->rx2_id,
01159                    shearing_sheet_ox1_tag, pD->Comm_Domain);
01160 
01161 /* Wait to receive the input data from the left grid */
01162     ierr = MPI_Wait(&rq, MPI_STATUS_IGNORE);
01163 
01164     pRcv = recv_buf;
01165     for (k=ks; k<=ku; k++){
01166       for (j=js-nghost; j<=js-1; j++){
01167         for (i=ie+1; i<=ie+nghost; i++){
01168           /* Get a pointer to the ConsS cell */
01169           pCons = &(pG->U[k][j][i]);
01170 
01171           pCons->d  = *(pRcv++);
01172           pCons->M1 = *(pRcv++);
01173           pCons->M2 = *(pRcv++);
01174           pCons->M3 = *(pRcv++);
01175 #ifndef BAROTROPIC
01176           pCons->E  = *(pRcv++);
01177 #endif /* BAROTROPIC */
01178 #ifdef MHD
01179           pCons->B1c = *(pRcv++);
01180           pCons->B2c = *(pRcv++);
01181           pCons->B3c = *(pRcv++);
01182           pG->B1i[k][j][i] = *(pRcv++);
01183           pG->B2i[k][j][i] = *(pRcv++);
01184           pG->B3i[k][j][i] = *(pRcv++);
01185 #endif /* MHD */
01186 #if (NSCALARS > 0)
01187           for (n=0; n<NSCALARS; n++) pCons->s[n] = *(pRcv++);
01188 #endif
01189         }
01190       }
01191     }
01192 
01193 /* Post a non-blocking receive for the input data from the right grid */
01194     ierr = MPI_Irecv(recv_buf, cnt, MPI_DOUBLE, pG->rx2_id,
01195                     shearing_sheet_ox1_tag, pD->Comm_Domain, &rq);
01196 
01197     pSnd = send_buf;
01198     for (k=ks; k<=ku; k++){
01199       for (j=js; j<=js+nghost-1; j++){
01200         for (i=ie+1; i<=ie+nghost; i++){
01201           /* Get a pointer to the ConsS cell */
01202           pCons = &(pG->U[k][j][i]);
01203 
01204           *(pSnd++) = pCons->d;
01205           *(pSnd++) = pCons->M1;
01206           *(pSnd++) = pCons->M2;
01207           *(pSnd++) = pCons->M3;
01208 #ifndef BAROTROPIC
01209           *(pSnd++) = pCons->E;
01210 #endif /* BAROTROPIC */
01211 #ifdef MHD
01212           *(pSnd++) = pCons->B1c;
01213           *(pSnd++) = pCons->B2c;
01214           *(pSnd++) = pCons->B3c;
01215           *(pSnd++) = pG->B1i[k][j][i];
01216           *(pSnd++) = pG->B2i[k][j][i];
01217           *(pSnd++) = pG->B3i[k][j][i];
01218 #endif /* MHD */
01219 #if (NSCALARS > 0)
01220           for (n=0; n<NSCALARS; n++) *(pSnd++) = pCons->s[n];
01221 #endif
01222         }
01223       }
01224     }
01225 
01226 /* send contents of buffer to the neighboring grid on L-x2 */
01227     ierr = MPI_Send(send_buf, cnt, MPI_DOUBLE, pG->lx2_id,
01228                    shearing_sheet_ox1_tag, pD->Comm_Domain);
01229 
01230 /* Wait to receive the input data from the left grid */
01231     ierr = MPI_Wait(&rq, MPI_STATUS_IGNORE);
01232 
01233     pRcv = recv_buf;
01234     for (k=ks; k<=ku; k++){
01235       for (j=je+1; j<=je+nghost; j++){
01236         for (i=ie+1; i<=ie+nghost; i++){
01237           /* Get a pointer to the ConsS cell */
01238           pCons = &(pG->U[k][j][i]);
01239 
01240           pCons->d  = *(pRcv++);
01241           pCons->M1 = *(pRcv++);
01242           pCons->M2 = *(pRcv++);
01243           pCons->M3 = *(pRcv++);
01244 #ifndef BAROTROPIC
01245           pCons->E  = *(pRcv++);
01246 #endif /* BAROTROPIC */
01247 #ifdef MHD
01248           pCons->B1c = *(pRcv++);
01249           pCons->B2c = *(pRcv++);
01250           pCons->B3c = *(pRcv++);
01251           pG->B1i[k][j][i] = *(pRcv++);
01252           pG->B2i[k][j][i] = *(pRcv++);
01253           pG->B3i[k][j][i] = *(pRcv++);
01254 #endif /* MHD */
01255 #if (NSCALARS > 0)
01256           for (n=0; n<NSCALARS; n++) pCons->s[n] = *(pRcv++);
01257 #endif
01258         }
01259       }
01260     }
01261 #endif /* MPI_PARALLEL */
01262 
01263   } /* end of step 9 - periodic BC in Y with MPI */
01264 
01265 /*--- Step 10 ------------------------------------------------------------------
01266  * Fix B2c at j=je,js-1, now that B2i[je+1] has been set properly  */
01267 
01268 #ifdef MHD
01269   for (k=ks; k<=ke; k++) {
01270     for (i=ie+1; i<=ie+nghost; i++){
01271       pG->U[k][je  ][i].B2c = 0.5*(pG->B2i[k][je+1][i]+pG->B2i[k][je][i]);
01272       pG->U[k][js-1][i].B2c = 0.5*(pG->B2i[k][js-1][i]+pG->B2i[k][js][i]);
01273     }
01274   }
01275 #endif /* MHD */
01276 
01277   } /* end of if */
01278 
01279 /*--- Step 11 ------------------------------------------------------------------
01280  * Shearing sheet BC in 2D.  Periodic BC already applied in x1 and x2 in
01281  * bvals_mhd (including for MPI parallel jobs).  Now just have to add offset
01282  * to azimuthal velocity when FARGO not defined */
01283 
01284 #ifndef FARGO
01285   if (pG->Nx[2] == 1 && ShBoxCoord==xz) {
01286 
01287     for (j=js-nghost; j<=je+nghost; j++) {
01288       for (i=1; i<=nghost; i++) {
01289 #ifdef ADIABATIC
01290 /* No change in the internal energy */
01291         pG->U[ks][j][ie+i].E += (0.5/pG->U[ks][j][ie+i].d)*
01292           (SQR((pG->U[ks][j][ie+i].M3 - qomL*pG->U[ks][j][ie+i].d))
01293          - SQR(pG->U[ks][j][ie+i].M3));
01294 #endif
01295         pG->U[ks][j][ie+i].M3 -= qomL*pG->U[ks][j][ie+i].d;
01296       }
01297     }
01298 
01299   }
01300 #endif /* FARGO */
01301 
01302   return;
01303 }
01304 
01305 
01306 /*----------------------------------------------------------------------------*/
01307 /*! \fn void RemapEy_ix1(DomainS *pD, Real ***emfy, Real **tEy)
01308  *  \brief Remaps Ey at [is] due to background shear, and then
01309  *   averages remapped and original field.  This guarantees the sums of Ey
01310  * along the x1 boundaries at [is] and [ie+1] are identical -- thus net Bz is
01311  * conserved
01312  *
01313  * This is a public function which is called by integrator (inside a
01314  * SHEARING_BOX macro).                                                       */
01315 /*----------------------------------------------------------------------------*/
01316 
01317 #ifdef MHD
01318 void RemapEy_ix1(DomainS *pD, Real ***emfy, Real **tEy)
01319 {
01320   GridS *pG = pD->Grid;
01321   int ie = pG->ie;
01322   int js = pG->js, je = pG->je;
01323   int ks = pG->ks, ke = pG->ke;
01324   int j,k,joffset,jremap;
01325   Real xmin,xmax,Lx,Ly,qomL,yshear,deltay,epsi;
01326 #ifdef MPI_PARALLEL
01327   int is = pG->is;
01328   int my_iproc,my_jproc,my_kproc,cnt,jproc,joverlap,Ngrids;
01329   int ierr,sendto_id,getfrom_id;
01330   double *pSnd,*pRcv;
01331   Real *pEy;
01332   MPI_Request rq;
01333 #endif
01334 
01335 /* Compute the distance the computational domain has sheared in y in integer
01336  * and fractional pieces of a cell.  Same code as in ShearingSheet_ix1()  */
01337 
01338   xmin = pD->RootMinX[0];
01339   xmax = pD->RootMaxX[0];
01340   Lx = xmax - xmin;
01341 
01342   xmin = pD->RootMinX[1];
01343   xmax = pD->RootMaxX[1];
01344   Ly = xmax - xmin;
01345 
01346   qomL = qshear*Omega_0*Lx;
01347   yshear = qomL*pG->time;
01348   deltay = fmod(yshear, Ly);
01349   joffset = (int)(deltay/pG->dx2);
01350   epsi = (fmod(deltay,pG->dx2))/pG->dx2;
01351 
01352 /*--- Step 1. ------------------------------------------------------------------
01353  * Copy Ey from [ie+1] into temporary array, using periodic BC in x1.
01354  * Requires MPI calls if NGrid_x1 > 1   */
01355 
01356   if (pD->NGrid[0] == 1) {
01357     for(k=ks; k<=ke+1; k++) {
01358       for(j=js; j<=je; j++){
01359         tEy[k][j] = emfy[k][j][ie+1];
01360       }
01361     }
01362 
01363 #ifdef MPI_PARALLEL
01364   } else {
01365 
01366 /* MPI calls to swap data */
01367 
01368     cnt = pG->Nx[1]*(pG->Nx[2]+1); /* Post a non-blocking receive for the input data from remapEy_ox1 (listen L) */
01369     ierr = MPI_Irecv(recv_buf, cnt, MPI_DOUBLE, pG->lx1_id,
01370                     remapEy_tag, pD->Comm_Domain, &rq);
01371 
01372 /* send Ey at [is] to ox1 (send L) -- this data is needed by remapEy_ox1 */
01373 
01374     pSnd = send_buf;
01375     for (k=ks; k<=ke+1; k++) {
01376       for (j=js; j<=je; j++) {
01377         pEy = &(emfy[k][j][is]);
01378         *(pSnd++) = *pEy;
01379       }
01380     }
01381     ierr = MPI_Send(send_buf, cnt, MPI_DOUBLE, pG->lx1_id,
01382                    remapEy_tag, pD->Comm_Domain);
01383 
01384 /* Listen for data from ox1 (listen L), unpack and set temporary array */
01385 
01386     ierr = MPI_Wait(&rq, MPI_STATUS_IGNORE);
01387 
01388     pRcv = recv_buf;
01389     for (k=ks; k<=ke+1; k++) {
01390       for (j=js; j<=je; j++) {
01391           pEy = &(tEy[k][j]);
01392           *pEy = *(pRcv++);
01393       }
01394     }
01395 #endif /* MPI_PARALLEL */
01396   }
01397 
01398 /*--- Step 2. ------------------------------------------------------------------
01399  * Apply periodic BC in x2 to temporary array.  Requires MPI calls if 
01400  * NGrid_x2 > 1 */
01401 
01402   if (pD->NGrid[1] == 1) {
01403 
01404     for(k=ks; k<=ke+1; k++) {
01405       for(j=1; j<=nghost; j++){
01406         tEy[k][js-j] = tEy[k][je-(j-1)];
01407         tEy[k][je+j] = tEy[k][js+(j-1)];
01408       }
01409     }
01410 
01411 #ifdef MPI_PARALLEL
01412   } else {
01413 
01414 /* MPI calls to swap data */
01415 
01416     cnt = nghost*(pG->Nx[2] + 1);
01417 /* Post a non-blocking receive for the input data from the left grid */
01418     ierr = MPI_Irecv(recv_buf, cnt, MPI_DOUBLE, pG->lx2_id,
01419                     remapEy_tag, pD->Comm_Domain, &rq);
01420 
01421     pSnd = send_buf;
01422     for (k=ks; k<=ke+1; k++){
01423       for (j=je-nghost+1; j<=je; j++){
01424         pEy = &(tEy[k][j]);
01425         *(pSnd++) = *pEy;
01426       }
01427     }
01428 
01429 /* send contents of buffer to the neighboring grid on R-x2 */
01430     ierr = MPI_Send(send_buf, cnt, MPI_DOUBLE, pG->rx2_id,
01431                    remapEy_tag, pD->Comm_Domain);
01432 
01433 /* Wait to receive the input data from the left grid */
01434     ierr = MPI_Wait(&rq, MPI_STATUS_IGNORE);
01435 
01436     pRcv = recv_buf;
01437     for (k=ks; k<=ke+1; k++){
01438       for (j=js-nghost; j<=js-1; j++){
01439         pEy = &(tEy[k][j]);
01440         *pEy = *(pRcv++);
01441       }
01442     }
01443 
01444 /* Post a non-blocking receive for the input data from the right grid */
01445     ierr = MPI_Irecv(recv_buf, cnt, MPI_DOUBLE, pG->rx2_id,
01446                     remapEy_tag, pD->Comm_Domain, &rq);
01447 
01448     pSnd = send_buf;
01449     for (k=ks; k<=ke+1; k++){
01450       for (j=js; j<=js+nghost-1; j++){
01451         pEy = &(tEy[k][j]);
01452         *(pSnd++) = *pEy;
01453       }
01454     }
01455 
01456 /* send contents of buffer to the neighboring grid on L-x2 */
01457     ierr = MPI_Send(send_buf, cnt, MPI_DOUBLE, pG->lx2_id,
01458                    remapEy_tag, pD->Comm_Domain);
01459 
01460 /* Wait to receive the input data from the left grid */
01461     ierr = MPI_Wait(&rq, MPI_STATUS_IGNORE);
01462 
01463     pRcv = recv_buf;
01464     for (k=ks; k<=ke+1; k++){
01465       for (j=je+1; j<=je+nghost; j++){
01466         pEy = &(tEy[k][j]);
01467         *pEy = *(pRcv++);
01468       }
01469     }
01470 #endif /* MPI_PARALLEL */
01471   }
01472 
01473 /*--- Step 3. ------------------------------------------------------------------
01474  * Copy tEy into buffer, at the same time apply a conservative remap of
01475  * solution over the fractional part of grid cell */
01476 
01477   for(k=ks; k<=ke+1; k++) {
01478     RemapFlux(tEy[k],epsi,js,je+1,Flx);
01479     for(j=js; j<=je; j++){
01480       tEyBuf[k][j] = tEy[k][j] - (Flx[j+1] - Flx[j]);
01481     }
01482   }
01483 
01484 /*--- Step 4. ------------------------------------------------------------------
01485  * If no MPI decomposition in Y, apply shift over integer number of
01486  * grid cells during copy from buffer back into tEy.  */
01487 
01488   if (pD->NGrid[1] == 1) {
01489 
01490     for(k=ks; k<=ke+1; k++) {
01491       for(j=js; j<=je; j++){
01492         jremap = j - joffset;
01493         if (jremap < (int)js) jremap += pG->Nx[1];
01494         tEy[k][j]  = tEyBuf[k][jremap];
01495       }
01496     }
01497 
01498 #ifdef MPI_PARALLEL
01499   } else {
01500 
01501 /*--- Step 5. ------------------------------------------------------------------
01502  * If Domain contains MPI decomposition in Y, then MPI calls are required for
01503  * the cyclic shift needed to apply shift over integer number of grid cells
01504  * during copy from buffer back into tEy.  */
01505 
01506     get_myGridIndex(pD, myID_Comm_world, &my_iproc, &my_jproc, &my_kproc);
01507 
01508 /* Find integer and fractional number of grids over which offset extends.
01509  * This assumes every grid has same number of cells in x2-direction! */
01510     Ngrids = (int)(joffset/pG->Nx[1]);
01511     joverlap = joffset - Ngrids*pG->Nx[1];
01512 
01513 /*--- Step 5a. -----------------------------------------------------------------
01514  * Find ids of processors that data in [je-(joverlap-1):je] is sent to, and
01515  * data in [js:js+(joverlap-1)] is received from.  Only execute if joverlap>0 */
01516 /* This can result in send/receive to self -- we rely on MPI to handle this
01517  * properly */
01518 
01519     if (joverlap != 0) {
01520 
01521       jproc = my_jproc + (Ngrids + 1);
01522       if (jproc > (pD->NGrid[1]-1)) jproc -= pD->NGrid[1];
01523       sendto_id = pD->GData[my_kproc][jproc][my_iproc].ID_Comm_Domain;
01524 
01525       jproc = my_jproc - (Ngrids + 1);
01526       if (jproc < 0) jproc += pD->NGrid[1];
01527       getfrom_id = pD->GData[my_kproc][jproc][my_iproc].ID_Comm_Domain;
01528 
01529 /*--- Step 5b. -----------------------------------------------------------------
01530  * Pack send buffer and send data in [je-(joverlap-1):je] from tEyBuf */
01531 
01532       cnt = joverlap*(pG->Nx[2]+1);
01533 /* Post a non-blocking receive for the input data */
01534       ierr = MPI_Irecv(recv_buf, cnt, MPI_DOUBLE, getfrom_id,
01535                       remapEy_tag, pD->Comm_Domain, &rq);
01536 
01537       pSnd = send_buf;
01538       for (k=ks; k<=ke+1; k++) {
01539         for (j=je-(joverlap-1); j<=je; j++) {
01540           pEy = &(tEyBuf[k][j]);
01541           *(pSnd++) = *pEy;
01542         }
01543       }
01544       ierr = MPI_Send(send_buf, cnt, MPI_DOUBLE, sendto_id,
01545                      remapEy_tag, pD->Comm_Domain);
01546 
01547 
01548 /*--- Step 5c. -----------------------------------------------------------------
01549  * unpack data sent from [je-(joverlap-1):je], and remap into cells in
01550  * [js:js+(joverlap-1)] in tEy
01551  */
01552 
01553       ierr = MPI_Wait(&rq, MPI_STATUS_IGNORE);
01554 
01555       pRcv = recv_buf;
01556       for (k=ks; k<=ke+1; k++) {
01557         for (j=js; j<=js+(joverlap-1); j++) {
01558             pEy = &(tEy[k][j]);
01559             *pEy = *(pRcv++);
01560         }
01561       }
01562 
01563     }
01564 
01565 /*--- Step 5d. -----------------------------------------------------------------
01566  * If shear is less one full Grid, remap cells which remain on same processor
01567  * from tEyBuf into tEy.  Cells in [js:je-joverlap] are shifted by
01568  * joverlap into [js+joverlap:je] */
01569 
01570     if (Ngrids == 0) {
01571 
01572       for(k=ks; k<=ke+1; k++) {
01573         for(j=js+joverlap; j<=je; j++){
01574           jremap = j-joverlap;
01575           tEy[k][j]  = tEyBuf[k][jremap];
01576         }
01577       }
01578 
01579 /*--- Step 5e. -----------------------------------------------------------------
01580  * If shear is more than one Grid, pack and send data from [js:je-joverlap]
01581  * from tEyBuf (this step replaces 5d) */
01582 
01583     } else {
01584 
01585 /* index of sendto and getfrom processors in GData are -/+1 from Step 5a */
01586 
01587       jproc = my_jproc + Ngrids;
01588       if (jproc > (pD->NGrid[1]-1)) jproc -= pD->NGrid[1];
01589       sendto_id = pD->GData[my_kproc][jproc][my_iproc].ID_Comm_Domain;
01590 
01591       jproc = my_jproc - Ngrids;
01592       if (jproc < 0) jproc += pD->NGrid[1];
01593       getfrom_id = pD->GData[my_kproc][jproc][my_iproc].ID_Comm_Domain;
01594 
01595       cnt = (pG->Nx[1]-joverlap)*(pG->Nx[2]+1);
01596 /* Post a non-blocking receive for the input data from the left grid */
01597       ierr = MPI_Irecv(recv_buf, cnt, MPI_DOUBLE, getfrom_id,
01598                       remapEy_tag, pD->Comm_Domain, &rq);
01599 
01600       pSnd = send_buf;
01601       for (k=ks; k<=ke+1; k++) {
01602         for (j=js; j<=je-joverlap; j++) {
01603           pEy = &(tEyBuf[k][j]);
01604           *(pSnd++) = *pEy;
01605         }
01606       }
01607       ierr = MPI_Send(send_buf, cnt, MPI_DOUBLE, sendto_id,
01608                      remapEy_tag, pD->Comm_Domain);
01609 
01610 /* unpack data sent from [js:je-overlap], and remap into cells in
01611  * [js+joverlap:je] in tEy */
01612 
01613       ierr = MPI_Wait(&rq, MPI_STATUS_IGNORE);
01614 
01615       pRcv = recv_buf;
01616       for (k=ks; k<=ke+1; k++) {
01617         for (j=js+joverlap; j<=je; j++) {
01618           pEy = &(tEy[k][j]);
01619           *pEy = *(pRcv++);
01620         }
01621       }
01622     } /* end of step 5e - shear is more than one Grid */
01623 
01624 #endif /* MPI_PARALLEL */
01625   } /* end of step 5 - MPI decomposition in Y */
01626 
01627 /*--- Step 6. ------------------------------------------------------------------
01628  * Now return remapped Ey */
01629 
01630   return;
01631 }
01632 #endif /* MHD */
01633 
01634 
01635 /*----------------------------------------------------------------------------*/
01636 /*! \fn void RemapEy_ox1(DomainS *pD, Real ***emfy, Real **tEy)
01637  *  \brief Remaps Ey at [ie+1] due to background shear, and then
01638  * averages remapped and original field.  This guarantees the sums of Ey
01639  * along the x1 boundaries at [is] and [ie+1] are identical -- thus net Bz is
01640  * conserved
01641  *
01642  * This is a public function which is called by integrator (inside a
01643  * SHEARING_BOX macro).                                                       */
01644 /*----------------------------------------------------------------------------*/
01645 
01646 #ifdef MHD
01647 void RemapEy_ox1(DomainS *pD, Real ***emfy, Real **tEy)
01648 {
01649   GridS *pG = pD->Grid;
01650   int is = pG->is;
01651   int js = pG->js, je = pG->je;
01652   int ks = pG->ks, ke = pG->ke;
01653   int j,k,joffset,jremap;
01654   Real xmin,xmax,Lx,Ly,qomL,yshear,deltay,epso;
01655 #ifdef MPI_PARALLEL
01656   int ie = pG->ie;
01657   int my_iproc,my_jproc,my_kproc,cnt,jproc,joverlap,Ngrids;
01658   int ierr,sendto_id,getfrom_id;
01659   double *pSnd,*pRcv;
01660   Real *pEy;
01661   MPI_Request rq;
01662 #endif
01663 
01664 /* Compute the distance the computational domain has sheared in y in integer
01665  * and fractional pieces of a cell.  Same code as in ShearingSheet_ox1()  */
01666 
01667   xmin = pD->RootMinX[0];
01668   xmax = pD->RootMaxX[0];
01669   Lx = xmax - xmin;
01670 
01671   xmin = pD->RootMinX[1];
01672   xmax = pD->RootMaxX[1];
01673   Ly = xmax - xmin;
01674 
01675   qomL = qshear*Omega_0*Lx;
01676   yshear = qomL*pG->time;
01677   deltay = fmod(yshear, Ly);
01678   joffset = (int)(deltay/pG->dx2);
01679   epso = -(fmod(deltay,pG->dx2))/pG->dx2;
01680 
01681 /*--- Step 1. ------------------------------------------------------------------
01682  * Copy Ey from [is] into temporary array, using periodic BC in x1.
01683  * Requires MPI calls if NGrid_x1 > 1   */
01684 
01685   if (pD->NGrid[0] == 1) {
01686     for(k=ks; k<=ke+1; k++) {
01687       for(j=js; j<=je; j++){
01688         tEy[k][j] = emfy[k][j][is];
01689       }
01690     }
01691 
01692 #ifdef MPI_PARALLEL
01693   } else {
01694 
01695 /* MPI calls to swap data */
01696 
01697     cnt = pG->Nx[1]*(pG->Nx[2]+1);
01698 /* Post a non-blocking receive for the input data from remapEy_ix1 (listen R) */
01699     ierr = MPI_Irecv(recv_buf, cnt, MPI_DOUBLE, pG->rx1_id,
01700                     remapEy_tag, pD->Comm_Domain, &rq);
01701 
01702 /* send Ey at [ie+1] to ix1 (send R) -- this data is needed by remapEy_ix1 */
01703 
01704     pSnd = send_buf;
01705     for (k=ks; k<=ke+1; k++) {
01706       for (j=js; j<=je; j++) {
01707         pEy = &(emfy[k][j][ie+1]);
01708         *(pSnd++) = *pEy;
01709       }
01710     }
01711     ierr = MPI_Send(send_buf, cnt, MPI_DOUBLE, pG->rx1_id,
01712                    remapEy_tag, pD->Comm_Domain);
01713 
01714 /* Listen for data from ix1 (listen R), unpack and set temporary array */
01715 
01716     ierr = MPI_Wait(&rq, MPI_STATUS_IGNORE);
01717 
01718     pRcv = recv_buf;
01719     for (k=ks; k<=ke+1; k++) {
01720       for (j=js; j<=je; j++) {
01721           pEy = &(tEy[k][j]);
01722           *pEy = *(pRcv++);
01723       }
01724     }
01725 #endif /* MPI_PARALLEL */
01726   }
01727 
01728 /*--- Step 2. ------------------------------------------------------------------
01729  * Apply periodic BC in x2 to temporary array.  Requires MPI calls if 
01730  * NGrid_x2 > 1 */
01731 
01732   if (pD->NGrid[1] == 1) {
01733 
01734     for(k=ks; k<=ke+1; k++) {
01735       for(j=1; j<=nghost; j++){
01736         tEy[k][js-j] = tEy[k][je-(j-1)];
01737         tEy[k][je+j] = tEy[k][js+(j-1)];
01738       }
01739     }
01740 
01741 #ifdef MPI_PARALLEL
01742   } else {
01743 
01744 /* MPI calls to swap data */
01745 
01746     cnt = nghost*(pG->Nx[2] + 1);
01747 /* Post a non-blocking receive for the input data from the left grid */
01748     ierr = MPI_Irecv(recv_buf, cnt, MPI_DOUBLE, pG->lx2_id,
01749                     remapEy_tag, pD->Comm_Domain, &rq);
01750 
01751     pSnd = send_buf;
01752     for (k=ks; k<=ke+1; k++){
01753       for (j=je-nghost+1; j<=je; j++){
01754         pEy = &(tEy[k][j]);
01755         *(pSnd++) = *pEy;
01756       }
01757     }
01758 
01759 /* send contents of buffer to the neighboring grid on R-x2 */
01760     ierr = MPI_Send(send_buf, cnt, MPI_DOUBLE, pG->rx2_id,
01761                    remapEy_tag, pD->Comm_Domain);
01762 
01763 /* Wait to receive the input data from the left grid */
01764     ierr = MPI_Wait(&rq, MPI_STATUS_IGNORE);
01765 
01766     pRcv = recv_buf;
01767     for (k=ks; k<=ke+1; k++){
01768       for (j=js-nghost; j<=js-1; j++){
01769         pEy = &(tEy[k][j]);
01770         *pEy = *(pRcv++);
01771       }
01772     }
01773 
01774 /* Post a non-blocking receive for the input data from the right grid */
01775     ierr = MPI_Irecv(recv_buf, cnt, MPI_DOUBLE, pG->rx2_id,
01776                     remapEy_tag, pD->Comm_Domain, &rq);
01777 
01778     pSnd = send_buf;
01779     for (k=ks; k<=ke+1; k++){
01780       for (j=js; j<=js+nghost-1; j++){
01781         pEy = &(tEy[k][j]);
01782         *(pSnd++) = *pEy;
01783       }
01784     }
01785 
01786 /* send contents of buffer to the neighboring grid on L-x2 */
01787     ierr = MPI_Send(send_buf, cnt, MPI_DOUBLE, pG->lx2_id,
01788                    remapEy_tag, pD->Comm_Domain);
01789 
01790 /* Wait to receive the input data from the left grid */
01791     ierr = MPI_Wait(&rq, MPI_STATUS_IGNORE);
01792 
01793     pRcv = recv_buf;
01794     for (k=ks; k<=ke+1; k++){
01795       for (j=je+1; j<=je+nghost; j++){
01796         pEy = &(tEy[k][j]);
01797         *pEy = *(pRcv++);
01798       }
01799     }
01800 #endif /* MPI_PARALLEL */
01801   }
01802 
01803 /*--- Step 3. ------------------------------------------------------------------
01804  * Copy tEy into buffer, at the same time apply a conservative remap of
01805  * solution over the fractional part of grid cell */
01806 
01807   for(k=ks; k<=ke+1; k++) {
01808     RemapFlux(tEy[k],epso,js,je+1,Flx);
01809     for(j=js; j<=je; j++){
01810       tEyBuf[k][j] = tEy[k][j] - (Flx[j+1] - Flx[j]);
01811     }
01812   }
01813 
01814 /*--- Step 4. ------------------------------------------------------------------
01815  * If no MPI decomposition in Y, apply shift over integer number of
01816  * grid cells during copy from buffer back into tEy.  */
01817 
01818   if (pD->NGrid[1] == 1) {
01819 
01820     for(k=ks; k<=ke+1; k++) {
01821       for(j=js; j<=je; j++){
01822         jremap = j + joffset;
01823         if (jremap > (int)je) jremap -= pG->Nx[1];
01824         tEy[k][j]  = tEyBuf[k][jremap];
01825       }
01826     }
01827 
01828 #ifdef MPI_PARALLEL
01829   } else {
01830 
01831 /*--- Step 5. ------------------------------------------------------------------
01832  * If Domain contains MPI decomposition in Y, then MPI calls are required for
01833  * the cyclic shift needed to apply shift over integer number of grid cells
01834  * during copy from buffer back into tEy.  */
01835 
01836     get_myGridIndex(pD, myID_Comm_world, &my_iproc, &my_jproc, &my_kproc);
01837 
01838 /* Find integer and fractional number of grids over which offset extends.
01839  * This assumes every grid has same number of cells in x2-direction! */
01840     Ngrids = (int)(joffset/pG->Nx[1]);
01841     joverlap = joffset - Ngrids*pG->Nx[1];
01842 
01843 /*--- Step 5a. -----------------------------------------------------------------
01844  * Find ids of processors that data in [js:js+(joverlap-1)] is sent to, and
01845  * data in [je-(joverlap-1):je] is received from.  Only execute if joverlap>0 */
01846 /* This can result in send/receive to self -- we rely on MPI to handle this
01847  * properly */
01848 
01849     if (joverlap != 0) {
01850 
01851       jproc = my_jproc - (Ngrids + 1);
01852       if (jproc < 0) jproc += pD->NGrid[1];
01853       sendto_id = pD->GData[my_kproc][jproc][my_iproc].ID_Comm_Domain;
01854 
01855       jproc = my_jproc + (Ngrids + 1);
01856       if (jproc > (pD->NGrid[1]-1)) jproc -= pD->NGrid[1];
01857       getfrom_id = pD->GData[my_kproc][jproc][my_iproc].ID_Comm_Domain;
01858 
01859 /*--- Step 5b. -----------------------------------------------------------------
01860  * Pack send buffer and send data in [js:js+(joverlap-1)] from tEyBuf */
01861 
01862       cnt = joverlap*(pG->Nx[2]+1);
01863 /* Post a non-blocking receive for the input data */
01864       ierr = MPI_Irecv(recv_buf, cnt, MPI_DOUBLE, getfrom_id,
01865                       remapEy_tag, pD->Comm_Domain, &rq);
01866 
01867       pSnd = send_buf;
01868       for (k=ks; k<=ke+1; k++) {
01869         for (j=js; j<=js+(joverlap-1); j++) {
01870           pEy = &(tEyBuf[k][j]);
01871           *(pSnd++) = *pEy;
01872         }
01873       }
01874       ierr = MPI_Send(send_buf, cnt, MPI_DOUBLE, sendto_id,
01875                      remapEy_tag, pD->Comm_Domain);
01876 
01877 /*--- Step 5c. -----------------------------------------------------------------
01878  * unpack data sent from [js:js+(joverlap-1)], and remap into cells in
01879  * [je-(joverlap-1):je] in tEy
01880  */
01881 
01882       ierr = MPI_Wait(&rq, MPI_STATUS_IGNORE);
01883 
01884       pRcv = recv_buf;
01885       for (k=ks; k<=ke+1; k++) {
01886         for (j=je-(joverlap-1); j<=je; j++) {
01887             pEy = &(tEy[k][j]);
01888             *pEy = *(pRcv++);
01889         }
01890       }
01891 
01892     }
01893 
01894 /*--- Step 5d. -----------------------------------------------------------------
01895  * If shear is less one full Grid, remap cells which remain on same processor
01896  * from tEyBuf into tEy.  Cells in [js+joverlap:je] are shifted by
01897  * joverlap into [js:je-overlap] */
01898 
01899     if (Ngrids == 0) {
01900 
01901       for(k=ks; k<=ke+1; k++) {
01902         for(j=js; j<=je-joverlap; j++){
01903           jremap = j+joverlap;
01904           tEy[k][j]  = tEyBuf[k][jremap];
01905         }
01906       }
01907 
01908 /*--- Step 5e. -----------------------------------------------------------------
01909  * If shear is more than one Grid, pack and send data from [js+overlap:je]
01910  * from tEyBuf (this step replaces 5d) */
01911 
01912     } else {
01913 
01914 /* index of sendto and getfrom processors in GData are -/+1 from Step 5a */
01915 
01916       jproc = my_jproc - Ngrids;
01917       if (jproc < 0) jproc += pD->NGrid[1];
01918       sendto_id = pD->GData[my_kproc][jproc][my_iproc].ID_Comm_Domain;
01919 
01920       jproc = my_jproc + Ngrids;
01921       if (jproc > (pD->NGrid[1]-1)) jproc -= pD->NGrid[1];
01922       getfrom_id = pD->GData[my_kproc][jproc][my_iproc].ID_Comm_Domain;
01923 
01924       cnt = (pG->Nx[1]-joverlap)*(pG->Nx[2]+1);
01925 /* Post a non-blocking receive for the input data from the left grid */
01926       ierr = MPI_Irecv(recv_buf, cnt, MPI_DOUBLE, getfrom_id,
01927                       remapEy_tag, pD->Comm_Domain, &rq);
01928 
01929       pSnd = send_buf;
01930       for (k=ks; k<=ke+1; k++) {
01931         for (j=js+joverlap; j<=je; j++) {
01932           pEy = &(tEyBuf[k][j]);
01933           *(pSnd++) = *pEy;
01934         }
01935       }
01936       ierr = MPI_Send(send_buf, cnt, MPI_DOUBLE, sendto_id,
01937                      remapEy_tag, pD->Comm_Domain);
01938 
01939 /* unpack data sent from [js+overlap:je], and remap into cells in
01940  * [js:je-joverlap] in tEy */
01941 
01942       ierr = MPI_Wait(&rq, MPI_STATUS_IGNORE);
01943 
01944       pRcv = recv_buf;
01945       for (k=ks; k<=ke+1; k++) {
01946         for (j=js; j<=je-joverlap; j++) {
01947           pEy = &(tEy[k][j]);
01948           *pEy = *(pRcv++);
01949         }
01950       }
01951     } /* end of step 5e - shear is more than one Grid */
01952 
01953 #endif /* MPI_PARALLEL */
01954   } /* end of step 5 - MPI decomposition in Y */
01955 
01956 /*--- Step 6. ------------------------------------------------------------------
01957  * Now return remapped Ey */
01958 
01959   return;
01960 }
01961 #endif /* MHD */
01962 
01963 #endif /* Shearing Box */
01964 
01965 /*----------------------------------------------------------------------------*/
01966 /*! \fn void Fargo(DomainS *pD)
01967  *  \brief Implements FARGO algorithm.  Only works in 3D or 2D xy.  Called in
01968  * the main loop after the integrator (and before bvals_mhd).
01969  *  Written in Munich 24-27.4.2008                                            */
01970 /*----------------------------------------------------------------------------*/
01971 
01972 #ifdef FARGO
01973 void Fargo(DomainS *pD)
01974 {
01975   GridS *pG = pD->Grid;
01976   int is = pG->is, ie = pG->ie;
01977   int js = pG->js, je = pG->je;
01978   int ks = pG->ks, ke = pG->ke;
01979   int jfs = nfghost, jfe = pG->Nx[1] + nfghost - 1;
01980   int i,j,k,ku,jj,n,joffset;
01981   Real x1,x2,x3,yshear,eps;
01982 #if defined(ADIABATIC) && defined(SHEARING_BOX)
01983   Real qom_dt = qshear*Omega_0*pG->dt;
01984 #endif
01985 #ifdef CYLINDRICAL
01986   const Real *r=pG->r, *ri=pG->ri;
01987 #endif
01988 #ifdef MPI_PARALLEL
01989   int ierr,cnt;
01990   double *pSnd,*pRcv;
01991   FConsS *pFCons;
01992   MPI_Request rq;
01993 #endif
01994 
01995 /*--- Step 1. ------------------------------------------------------------------
01996  * Copy data into FargoVars array.  Note i and j indices are switched.
01997  * Since the FargoVars array has extra ghost zones in y, it must be indexed
01998  * over [jfs:jfe] instead of [js:je]  */
01999   if (pG->Nx[2] > 1) ku=ke+1; else ku=ke;
02000   for(k=ks; k<=ku; k++) {
02001     for(j=jfs; j<=jfe+1; j++){
02002       for(i=is; i<=ie+1; i++){
02003         jj = j-(jfs-js);
02004         FargoVars[k][i][j].U[0] = pG->U[k][jj][i].d;
02005         FargoVars[k][i][j].U[1] = pG->U[k][jj][i].M1;
02006         FargoVars[k][i][j].U[2] = pG->U[k][jj][i].M2;
02007         FargoVars[k][i][j].U[3] = pG->U[k][jj][i].M3;
02008 #if defined(ADIABATIC) && defined(SHEARING_BOX)
02009 #ifdef MHD
02010 /* Add energy equation source term in MHD */
02011         pG->U[k][jj][i].E -= qom_dt*pG->U[k][jj][i].B1c*
02012          (pG->U[k][jj][i].B2c - (qom_dt/2.)*pG->U[k][jj][i].B1c);
02013 #endif /* MHD */
02014         FargoVars[k][i][j].U[4] = pG->U[k][jj][i].E;
02015 #endif /* ADIABATIC */
02016 /* Only store Bz and Bx in that order.  This is to match order in FargoFlx:
02017  *  FargoFlx.U[NFARGO-2] = emfx = -Vy*Bz
02018  *  FargoFlx.U[NFARGO-1] = emfy = Vy*Bx  */
02019 #ifdef MHD
02020         FargoVars[k][i][j].U[NFARGO-2] = pG->B3i[k][jj][i];
02021         FargoVars[k][i][j].U[NFARGO-1] = pG->B1i[k][jj][i];
02022 #endif /* MHD */
02023 #if (NSCALARS > 0)
02024         for(n=0;n<NSCALARS;n++) FargoVars[k][i][j].s[n] = pG->U[k][jj][i].s[n];
02025 #endif
02026       }
02027     }
02028   }
02029 /*--- Step 2. ------------------------------------------------------------------
02030  * With no MPI decomposition in Y, apply periodic BCs in Y to FargoVars array
02031  * (method is similar to periodic_ix2() and periodic_ox2() in bvals_mhd).
02032  * Note there are extra ghost zones in Y in the FargoVars array  */
02033 
02034   if (pD->NGrid[1] == 1) {
02035 
02036     for(k=ks; k<=ku; k++) {
02037       for(i=is; i<=ie+1; i++){
02038         for(j=1; j<=nfghost; j++){
02039           FargoVars[k][i][jfs-j] = FargoVars[k][i][jfe-(j-1)];
02040           FargoVars[k][i][jfe+j] = FargoVars[k][i][jfs+(j-1)];
02041         }
02042       }
02043     }
02044 
02045 #ifdef MPI_PARALLEL
02046   } else {
02047 
02048 /*--- Step 3. ------------------------------------------------------------------
02049  * With MPI decomposition in Y, use MPI calls to handle periodic BCs in Y (like
02050  * send_ox2/receive_ix1 and send_ix1/receive_ox2 pairs in bvals_mhd.c */
02051 
02052 /* Post a non-blocking receive for the input data from the left grid */
02053     cnt = (pG->Nx[0]+1)*nfghost*(ku-ks+1)*(NFARGO + NSCALARS);
02054     ierr = MPI_Irecv(recv_buf, cnt, MPI_DOUBLE, pG->lx2_id,
02055                     fargo_tag, pD->Comm_Domain, &rq);
02056 
02057     pSnd = send_buf;
02058     for (k=ks; k<=ku; k++){
02059       for (i=is; i<=ie+1; i++){
02060         for (j=jfe-nfghost+1; j<=jfe; j++){
02061           /* Get a pointer to the FargoVars cell */
02062           pFCons = &(FargoVars[k][i][j]);
02063           for (n=0; n<NFARGO; n++) *(pSnd++) = pFCons->U[n];
02064 #if (NSCALARS > 0)
02065           for (n=0; n<NSCALARS; n++) *(pSnd++) = pFCons->s[n];
02066 #endif
02067         }
02068       }
02069     }
02070 
02071 /* send contents of buffer to the neighboring grid on R-x2 */
02072     ierr = MPI_Send(send_buf, cnt, MPI_DOUBLE, pG->rx2_id,
02073                    fargo_tag, pD->Comm_Domain);
02074 
02075 /* Wait to receive the input data from the left grid */
02076     ierr = MPI_Wait(&rq, MPI_STATUS_IGNORE);
02077 
02078     pRcv = recv_buf;
02079     for (k=ks; k<=ku; k++){
02080       for (i=is; i<=ie+1; i++){
02081         for (j=jfs-nfghost; j<=jfs-1; j++){
02082           /* Get a pointer to the FargoVars cell */
02083           pFCons = &(FargoVars[k][i][j]);
02084           for (n=0; n<NFARGO; n++) pFCons->U[n] = *(pRcv++);
02085 #if (NSCALARS > 0)
02086           for (n=0; n<NSCALARS; n++) pFCons->s[n] = *(pRcv++);
02087 #endif
02088         }
02089       }
02090     }
02091 
02092 /* Post a non-blocking receive for the input data from the right grid */
02093     ierr = MPI_Irecv(recv_buf, cnt, MPI_DOUBLE, pG->rx2_id,
02094                     fargo_tag, pD->Comm_Domain, &rq);
02095 
02096     pSnd = send_buf;
02097     for (k=ks; k<=ku; k++){
02098       for (i=is; i<=ie+1; i++){
02099         for (j=jfs; j<=jfs+nfghost-1; j++){
02100           /* Get a pointer to the FargoVars cell */
02101           pFCons = &(FargoVars[k][i][j]);
02102           for (n=0; n<NFARGO; n++) *(pSnd++) = pFCons->U[n];
02103 #if (NSCALARS > 0)
02104           for (n=0; n<NSCALARS; n++) *(pSnd++) = pFCons->s[n];
02105 #endif
02106         }
02107       }
02108     }
02109 
02110 /* send contents of buffer to the neighboring grid on L-x2 */
02111     ierr = MPI_Send(send_buf, cnt, MPI_DOUBLE, pG->lx2_id,
02112                    fargo_tag, pD->Comm_Domain);
02113 
02114 /* Wait to receive the input data from the left grid */
02115     ierr = MPI_Wait(&rq, MPI_STATUS_IGNORE);
02116 
02117     pRcv = recv_buf;
02118     for (k=ks; k<=ku; k++){
02119       for (i=is; i<=ie+1; i++){
02120         for (j=jfe+1; j<=jfe+nfghost; j++){
02121           /* Get a pointer to the FargoVars cell */
02122           pFCons = &(FargoVars[k][i][j]);
02123           for (n=0; n<NFARGO; n++) pFCons->U[n] = *(pRcv++);
02124 #if (NSCALARS > 0)
02125           for (n=0; n<NSCALARS; n++) pFCons->s[n] = *(pRcv++);
02126 #endif
02127         }
02128       }
02129     }
02130 #endif /* MPI_PARALLEL */
02131   }
02132 
02133 /*--- Step 4. ------------------------------------------------------------------
02134  * Compute fluxes, including both (1) the fractional part of grid cell, and
02135  * (2) the sum over integer number of cells  */
02136   for(k=ks; k<=ku; k++) {
02137     for(i=is; i<=ie+1; i++){
02138 
02139 /* Compute integer and fractional peices of a cell covered by shear */
02140       cc_pos(pG, i, js, ks, &x1,&x2,&x3);
02141 #ifdef SHEARING_BOX
02142       yshear = -qshear*Omega_0*x1*pG->dt;
02143 #endif
02144 #ifdef CYLINDRICAL
02145                         yshear = ((*OrbitalProfile)(x1))*pG->dt;
02146 #endif
02147       joffset = (int)(yshear/pG->dx2);
02148       if (abs(joffset) > (jfs-js))
02149         ath_error("[bvals_shear]: FARGO offset exceeded # of gh zns\n");
02150       eps = (fmod(yshear,pG->dx2))/pG->dx2;
02151 
02152 /* Compute fluxes of hydro variables  */
02153 #ifdef MHD
02154       for (n=0; n<(NFARGO-2); n++) {
02155 #else
02156       for (n=0; n<(NFARGO); n++) {
02157 #endif
02158         for (j=jfs-nfghost; j<=jfe+nfghost; j++) U[j] = FargoVars[k][i][j].U[n];
02159         RemapFlux(U,eps,(jfs-joffset),(jfe+1-joffset),Flx);
02160 
02161         for(j=jfs; j<=jfe+1; j++){
02162           FargoFlx[k][i][j].U[n] = Flx[j-joffset];
02163 
02164 /* Sum the flux from integer offset: +/- for +/- joffset */
02165           for (jj=1; jj<=joffset; jj++) {
02166             FargoFlx[k][i][j].U[n] += FargoVars[k][i][j-jj].U[n];
02167           }
02168           for (jj=(joffset+1); jj<=0; jj++) {
02169             FargoFlx[k][i][j].U[n] -= FargoVars[k][i][j-jj].U[n];
02170           }
02171         }
02172       }
02173 
02174 /* Compute fluxes of passive scalars */
02175 #if (NSCALARS > 0)
02176       for (n=0; n<(NSCALARS); n++) {
02177 
02178         for (j=jfs-nfghost; j<=jfe+nfghost; j++) U[j] = FargoVars[k][i][j].s[n];
02179         RemapFlux(U,eps,(jfs-joffset),(jfe+1-joffset),Flx);
02180 
02181         for(j=jfs; j<=jfe+1; j++){
02182           FargoFlx[k][i][j].s[n] = Flx[j-joffset];
02183           for (jj=1; jj<=joffset; jj++) {
02184             FargoFlx[k][i][j].s[n] += FargoVars[k][i][j-jj].s[n];
02185           }
02186           for (jj=(joffset+1); jj<=0; jj++) {
02187             FargoFlx[k][i][j].s[n] -= FargoVars[k][i][j-jj].s[n];
02188           }
02189         }
02190 
02191       }
02192 #endif
02193 
02194 #ifdef MHD
02195 /* Compute emfx = -VyBz, which is at cell-center in x1-direction */
02196 
02197       for (j=jfs-nfghost; j<=jfe+nfghost; j++) {
02198         U[j] = FargoVars[k][i][j].U[NFARGO-2];
02199       }
02200       RemapFlux(U,eps,(jfs-joffset),(jfe+1-joffset),Flx);
02201 
02202       for(j=jfs; j<=jfe+1; j++){
02203         FargoFlx[k][i][j].U[NFARGO-2] = -Flx[j-joffset];
02204         for (jj=1; jj<=joffset; jj++) {
02205           FargoFlx[k][i][j].U[NFARGO-2] -= FargoVars[k][i][j-jj].U[NFARGO-2];
02206         }
02207         for (jj=(joffset+1); jj<=0; jj++) {
02208           FargoFlx[k][i][j].U[NFARGO-2] += FargoVars[k][i][j-jj].U[NFARGO-2];
02209         }
02210       }
02211 
02212 /* Compute emfz =  VyBx, which is at cell-face in x1-direction  */
02213 #ifdef SHEARING_BOX
02214       yshear = -qshear*Omega_0*(x1 - 0.5*pG->dx1)*pG->dt;
02215 #endif
02216 #ifdef CYLINDRICAL
02217                         yshear = ((*OrbitalProfile)(ri[i]))*pG->dt;
02218 #endif
02219       joffset = (int)(yshear/pG->dx2);
02220       if (abs(joffset) > (jfs-js))
02221         ath_error("[bvals_shear]: FARGO offset exceeded # of gh zns\n");
02222       eps = (fmod(yshear,pG->dx2))/pG->dx2;
02223 
02224       for (j=jfs-nfghost; j<=jfe+nfghost; j++) {
02225         U[j] = FargoVars[k][i][j].U[NFARGO-1];
02226       }
02227       RemapFlux(U,eps,(jfs-joffset),(jfe+1-joffset),Flx);
02228 
02229       for(j=jfs; j<=jfe+1; j++){
02230         FargoFlx[k][i][j].U[NFARGO-1] = Flx[j-joffset];
02231         for (jj=1; jj<=joffset; jj++) {
02232           FargoFlx[k][i][j].U[NFARGO-1] += FargoVars[k][i][j-jj].U[NFARGO-1];
02233         }
02234         for (jj=(joffset+1); jj<=0; jj++) {
02235           FargoFlx[k][i][j].U[NFARGO-1] -= FargoVars[k][i][j-jj].U[NFARGO-1];
02236         }
02237       }
02238 #endif
02239 
02240     }
02241   }
02242 /*--- Step 5. ------------------------------------------------------------------
02243  * Update cell centered variables with flux gradient.  Note i/j are swapped */
02244 
02245   for(k=ks; k<=ke; k++) {
02246     for(j=js; j<=je; j++){
02247       jj = j-js+jfs;
02248       for(i=is; i<=ie; i++){
02249         pG->U[k][j][i].d  -=(FargoFlx[k][i][jj+1].U[0]-FargoFlx[k][i][jj].U[0]);
02250         pG->U[k][j][i].M1 -=(FargoFlx[k][i][jj+1].U[1]-FargoFlx[k][i][jj].U[1]);
02251         pG->U[k][j][i].M2 -=(FargoFlx[k][i][jj+1].U[2]-FargoFlx[k][i][jj].U[2]);
02252         pG->U[k][j][i].M3 -=(FargoFlx[k][i][jj+1].U[3]-FargoFlx[k][i][jj].U[3]);
02253 #ifdef ADIABATIC
02254         pG->U[k][j][i].E  -=(FargoFlx[k][i][jj+1].U[4]-FargoFlx[k][i][jj].U[4]);
02255 #endif /* ADIABATIC */
02256 #if (NSCALARS > 0)
02257         for (n=0; n<NSCALARS; n++) {
02258          pG->U[k][j][i].s[n]-=FargoFlx[k][i][jj+1].s[n]-FargoFlx[k][i][jj].s[n];
02259         }
02260 #endif
02261       }
02262     }
02263   }
02264 
02265 /*--- Step 6. ------------------------------------------------------------------
02266  * Update face centered field using CT.  Note i/j are swapped.
02267  *  FargoFlx.U[NFARGO-2] = emfx
02268  *  FargoFlx.U[NFARGO-1] = emfz  */
02269 
02270 #ifdef MHD
02271   if (pG->Nx[2]==1) ath_error("[Fargo] only works in 3D with MHD\n");
02272   for (k=ks; k<=ke; k++) {
02273     for (j=js; j<=je; j++) {
02274       jj = j-js+jfs;
02275       for (i=is; i<=ie; i++) {
02276         pG->B1i[k][j][i] -= 
02277           (FargoFlx[k][i][jj+1].U[NFARGO-1] - FargoFlx[k][i][jj].U[NFARGO-1]);
02278 #ifdef SHEARING_BOX
02279         pG->B2i[k][j][i] += (pG->dx2/pG->dx1)* 
02280           (FargoFlx[k][i+1][jj].U[NFARGO-1] - FargoFlx[k][i][jj].U[NFARGO-1]);
02281         pG->B2i[k][j][i] -= (pG->dx2/pG->dx3)* 
02282           (FargoFlx[k+1][i][jj].U[NFARGO-2] - FargoFlx[k][i][jj].U[NFARGO-2]);
02283 #endif 
02284 #ifdef CYLINDRICAL
02285         pG->B2i[k][j][i] += (pG->dx2/pG->dx1)*
02286                   (ri[i+1]*FargoFlx[k][i+1][jj].U[NFARGO-1] - ri[i]*FargoFlx[k][i][jj].U[NFARGO-1]);
02287         pG->B2i[k][j][i] -= (r[i]*pG->dx2/pG->dx3)*
02288           (FargoFlx[k+1][i][jj].U[NFARGO-2] - FargoFlx[k][i][jj].U[NFARGO-2]);
02289 #endif
02290         pG->B3i[k][j][i] += 
02291           (FargoFlx[k][i][jj+1].U[NFARGO-2] - FargoFlx[k][i][jj].U[NFARGO-2]);
02292       }
02293       pG->B1i[k][j][ie+1] -= 
02294         (FargoFlx[k][ie+1][jj+1].U[NFARGO-1]-FargoFlx[k][ie+1][jj].U[NFARGO-1]);
02295     }
02296     for (i=is; i<=ie; i++) {
02297 #ifdef SHEARING_BOX
02298       pG->B2i[k][je+1][i] += (pG->dx2/pG->dx1)* 
02299         (FargoFlx[k][i+1][jfe+1].U[NFARGO-1]-FargoFlx[k][i][jfe+1].U[NFARGO-1]);
02300       pG->B2i[k][je+1][i] -= (pG->dx2/pG->dx3)* 
02301         (FargoFlx[k+1][i][jfe+1].U[NFARGO-2]-FargoFlx[k][i][jfe+1].U[NFARGO-2]);
02302 #endif
02303 #ifdef CYLINDRICAL
02304       pG->B2i[k][je+1][i] += (pG->dx2/pG->dx1)*
02305         (ri[i+1]*FargoFlx[k][i+1][jfe+1].U[NFARGO-1]-ri[i]*FargoFlx[k][i][jfe+1].U[NFARGO-1]);
02306       pG->B2i[k][je+1][i] -= (r[i]*pG->dx2/pG->dx3)*
02307         (FargoFlx[k+1][i][jfe+1].U[NFARGO-2]-FargoFlx[k][i][jfe+1].U[NFARGO-2]);
02308 #endif
02309     }
02310   }
02311   for (j=js; j<=je; j++) {
02312     jj = j-js+jfs;
02313     for (i=is; i<=ie; i++) {
02314       pG->B3i[ke+1][j][i] += 
02315         (FargoFlx[ke+1][i][jj+1].U[NFARGO-2]-FargoFlx[ke+1][i][jj].U[NFARGO-2]);
02316     }
02317   }
02318 #endif /* MHD */
02319 
02320 /*--- Step 7. ------------------------------------------------------------------
02321  * compute cell-centered B  */
02322 
02323 #ifdef MHD
02324   for (k=ks; k<=ke; k++) {
02325     for (j=js; j<=je; j++) {
02326       for(i=is; i<=ie; i++){
02327 #ifdef SHEARING_BOX
02328         pG->U[k][j][i].B1c = 0.5*(pG->B1i[k][j][i]+pG->B1i[k][j][i+1]);
02329 #endif
02330 #ifdef CYLINDRICAL
02331         pG->U[k][j][i].B1c = 0.5*(1.0/r[i])*(ri[i]*pG->B1i[k][j][i] + ri[i+1]*pG->B1i[k][j][i+1]);
02332 #endif
02333         pG->U[k][j][i].B2c = 0.5*(pG->B2i[k][j][i]+pG->B2i[k][j+1][i]);
02334         pG->U[k][j][i].B3c = 0.5*(pG->B3i[k][j][i]+pG->B3i[k+1][j][i]);
02335       }
02336     }
02337   }
02338 #endif /* MHD */
02339 
02340   return;
02341 }
02342 #endif /* FARGO */
02343 
02344 #if defined(SHEARING_BOX) || (defined(CYLINDRICAL) && defined(FARGO))
02345 /*----------------------------------------------------------------------------*/
02346 /*! \fn void bvals_shear_init(MeshS *pM)
02347  *  \brief Allocates memory for temporary arrays/buffers
02348  */
02349 
02350 void bvals_shear_init(MeshS *pM)
02351 {
02352   GridS *pG;
02353   int nl,nd,nx1,nx2,nx3,max1=0,max2=0,max3=0;
02354 #ifdef MPI_PARALLEL
02355   int size1=0,size2=0,size;
02356 #endif
02357 #ifdef FARGO
02358   Real xmin,xmax,x2,x3;
02359 #endif
02360 #ifdef CYLINDRICAL
02361         Real MachKep;
02362 #endif
02363 
02364 /* Loop over all Grids on this processor to find maximum size of arrays */
02365 
02366   for (nl=0; nl<(pM->NLevels); nl++){
02367     for (nd=0; nd<(pM->DomainsPerLevel[nl]); nd++){
02368       if (pM->Domain[nl][nd].Grid != NULL) { /* there is a Grid on this proc */
02369         pG=pM->Domain[nl][nd].Grid;          /* set pointer to Grid */
02370 
02371         nx1 = pG->Nx[0] + 2*nghost;
02372         nx2 = pG->Nx[1] + 2*nghost;
02373         nx3 = pG->Nx[2] + 2*nghost;
02374         max1 = MAX(max1,nx1);
02375         max2 = MAX(max2,nx2);
02376         max3 = MAX(max3,nx3);
02377       }
02378     }
02379   }
02380 
02381 /* Allocate memory for temporary arrays and vectors */
02382 
02383   if((GhstZns=(Remap***)calloc_3d_array(max3,nghost,max2,sizeof(Remap)))==NULL)
02384     ath_error("[bvals_shear_init]: malloc returned a NULL pointer\n");
02385 
02386   if((GhstZnsBuf=(Remap***)calloc_3d_array(max3,nghost,max2,sizeof(Remap))) ==
02387     NULL) ath_error("[bvals_shear_init]: malloc returned a NULL pointer\n");
02388 
02389 #ifdef MHD
02390   if ((tEyBuf=(Real**)calloc_2d_array(max3,max2,sizeof(Real))) == NULL)
02391     ath_error("[bvals_shear_init]: malloc returned a NULL pointer\n");
02392 #endif /* MHD */
02393 
02394 /* estimate extra ghost cells needed by FARGO, allocate arrays accordingly */
02395 #ifdef FARGO
02396   xmin = pM->RootMinX[0];
02397   xmax = pM->RootMaxX[0];
02398 #ifdef SHEARING_BOX
02399   nfghost = nghost + ((int)(1.5*CourNo*MAX(fabs(xmin),fabs(xmax))) + 1);
02400 #endif
02401 #ifdef CYLINDRICAL
02402   MachKep = MAX( xmin*(*OrbitalProfile)(xmin), xmax*(*OrbitalProfile)(xmax) )/Iso_csound;
02403   nfghost = nghost + 1 + ((int)(CourNo*MachKep));
02404 #endif
02405   max2 = max2 + 2*nfghost;
02406 
02407   if((FargoVars=(FConsS***)calloc_3d_array(max3,max1,max2,sizeof(FConsS)))
02408     ==NULL) ath_error("[bvals_shear_init]: malloc returned a NULL pointer\n");
02409 
02410   if((FargoFlx=(FConsS***)calloc_3d_array(max3,max1,max2,sizeof(FConsS)))==NULL)
02411     ath_error("[bvals_shear_init]: malloc returned a NULL pointer\n");
02412 #endif
02413 
02414   if((U = (Real*)malloc(max2*sizeof(Real))) == NULL)
02415     ath_error("[bvals_shear_init]: malloc returned a NULL pointer\n");
02416 
02417   if((Flx = (Real*)malloc(max2*sizeof(Real))) == NULL)
02418     ath_error("[bvals_shear_init]: malloc returned a NULL pointer\n");
02419 
02420 #if defined(THIRD_ORDER_CHAR) || defined(THIRD_ORDER_PRIM)
02421   if ((Uhalf = (Real*)malloc(max2*sizeof(Real))) == NULL)
02422     ath_error("[bvals_shear_init]: malloc returned a NULL pointer\n");
02423 #endif
02424 
02425 /* allocate memory for send/receive buffers in MPI parallel calculations */
02426 
02427 #ifdef MPI_PARALLEL
02428   size1 = nghost*pG->Nx[1]*(pG->Nx[2]+1)*(NREMAP+NSCALARS);
02429 #ifdef FARGO
02430   size2 = nfghost*(pG->Nx[0]+1)*(pG->Nx[2]+1)*(NFARGO+NSCALARS);
02431 #endif
02432   size = MAX(size1,size2);
02433 
02434   if((send_buf = (double*)malloc(size*sizeof(double))) == NULL)
02435     ath_error("[bvals_shear_init]: Failed to allocate send buffer\n");
02436 
02437   if((recv_buf = (double*)malloc(size*sizeof(double))) == NULL)
02438     ath_error("[bvals_shear_init]: Failed to allocate receive buffer\n");
02439 #endif /* MPI_PARALLEL */
02440 
02441   return;
02442 }
02443 
02444 /*----------------------------------------------------------------------------*/
02445 /*! \fn void bvals_shear_destruct(void)
02446  *  \brief Free temporary arrays
02447  */
02448 
02449 void bvals_shear_destruct(void)
02450 {
02451   if (GhstZns    != NULL) free_3d_array(GhstZns);
02452   if (GhstZnsBuf != NULL) free_3d_array(GhstZnsBuf);
02453 #ifdef MHD
02454   if (tEyBuf != NULL) free_2d_array(tEyBuf);
02455 #endif
02456 #if defined(THIRD_ORDER_CHAR) || defined(THIRD_ORDER_PRIM)
02457   if (Uhalf != NULL) free(Uhalf);
02458 #endif
02459 #ifdef FARGO
02460   if (FargoVars != NULL) free_3d_array(FargoVars);
02461   if (FargoFlx  != NULL) free_3d_array(FargoFlx);
02462 #endif
02463   if (U   != NULL) free(U);
02464   if (Flx != NULL) free(Flx);
02465 #ifdef MPI_PARALLEL
02466   if (send_buf != NULL) free(send_buf);
02467   if (recv_buf != NULL) free(recv_buf);
02468 #endif /* MPI_PARALLEL */
02469 
02470   return;
02471 }
02472 
02473 
02474 /*=========================== PRIVATE FUNCTIONS ==============================*/
02475 
02476 /*------------------------------------------------------------------------------
02477  * RemapFlux: computes "fluxes" of conserved variables for conservative remap
02478  * Input Arguments:
02479  *   U = 1D vector of conserved variable at cell centers along 1-D slice
02480  *   eps = fraction of a cell to be remapped
02481  * Output Arguments:
02482  *   Flux = fluxes of conserved variable at interfaces over [jinner:jouter]
02483  */
02484 
02485 #if defined(SECOND_ORDER_CHAR) || defined (SECOND_ORDER_PRIM)
02486 /*----------------------------------------------------------------------------*/
02487 /*! \fn void RemapFlux(const Real *U, const Real eps,
02488  *             const int jinner, const int jouter, Real *Flux)
02489  *  \brief Second order reconstruction for conservative remap.
02490  *   using piecewise linear reconstruction and min/mod limiters
02491  */
02492 
02493 void RemapFlux(const Real *U, const Real eps,
02494                const int jinner, const int jouter, Real *Flux)
02495 {
02496   int j,jl,ju;
02497   Real dUc,dUl,dUr,dUm,lim_slope;
02498 
02499 /* jinner,jouter are index range over which flux must be returned.  Set loop
02500  * limits depending on direction of upwind differences  */
02501 
02502   if (eps > 0.0) { /* eps always > 0 for inner i boundary */
02503     jl = jinner-1;
02504     ju = jouter-1;
02505   } else {         /* eps always < 0 for outer i boundary */
02506     jl = jinner;
02507     ju = jouter;
02508   }
02509 
02510   for (j=jl; j<=ju; j++) {
02511       dUc = U[j+1] - U[j-1];
02512       dUl = U[j  ] - U[j-1];
02513       dUr = U[j+1] - U[j  ];
02514 
02515       dUm = 0.0;
02516       if (dUl*dUr > 0.0) {
02517         lim_slope = MIN(fabs(dUl),fabs(dUr));
02518         dUm = SIGN(dUc)*MIN(0.5*fabs(dUc),2.0*lim_slope);
02519       }
02520  
02521     if (eps > 0.0) { /* eps always > 0 for inner i boundary */
02522       Flux[j+1] = eps*(U[j] + 0.5*(1.0 - eps)*dUm);
02523     } else {         /* eps always < 0 for outer i boundary */
02524       Flux[j  ] = eps*(U[j] - 0.5*(1.0 + eps)*dUm);
02525     }
02526   }
02527 
02528   return;
02529 }
02530 
02531 #endif /* SECOND_ORDER */
02532 
02533 #if defined(THIRD_ORDER_CHAR) || defined(THIRD_ORDER_PRIM)
02534 /*----------------------------------------------------------------------------*/
02535 /*! \fn void RemapFlux(const Real *U, const Real eps,
02536  *             const int jinner, const int jouter, Real *Flux)
02537  *  \brief third order reconstruction for conservative remap 
02538  *   using Colella & Sekora extremum preserving algorithm (PPME)
02539  */
02540 
02541 void RemapFlux(const Real *U, const Real eps,
02542                const int jinner, const int jouter, Real *Flux)
02543 {
02544   int j,jl,ju;
02545   Real d2Uc,d2Ul,d2Ur,d2U,d2Ulim,lim_slope,Ulv,Urv,dU,U6,qa,qb,qc,qx;
02546 
02547 /* jinner,jouter are index range over which flux must be returned.  Set loop
02548  * limits depending on direction of upwind differences  */
02549 
02550   if (eps > 0.0) { /* eps always > 0 for inner i boundary */
02551     jl = jinner-1;
02552     ju = jouter-1;
02553   } else {         /* eps always < 0 for outer i boundary */
02554     jl = jinner;
02555     ju = jouter;
02556   }
02557 
02558   for (j=jl; j<=ju+1; j++) {
02559     Uhalf[j]=(7.0*(U[j-1]+U[j]) - (U[j-2]+U[j+1]))/12.0;
02560     d2Uc = 3.0*(U[j-1] - 2.0*Uhalf[j] + U[j]);
02561     d2Ul = (U[j-2] - 2.0*U[j-1] + U[j  ]);
02562     d2Ur = (U[j-1] - 2.0*U[j  ] + U[j+1]);
02563     d2Ulim = 0.0;
02564     lim_slope = MIN(fabs(d2Ul),fabs(d2Ur));
02565     if (d2Uc > 0.0 && d2Ul > 0.0 && d2Ur > 0.0) {
02566       d2Ulim = SIGN(d2Uc)*MIN(1.25*lim_slope,fabs(d2Uc));
02567     }
02568     if (d2Uc < 0.0 && d2Ul < 0.0 && d2Ur < 0.0) {
02569       d2Ulim = SIGN(d2Uc)*MIN(1.25*lim_slope,fabs(d2Uc));
02570     }
02571     Uhalf[j] = 0.5*((U[j-1]+U[j]) - d2Ulim/3.0);
02572   }
02573 
02574   for (j=jl; j<=ju; j++) {
02575     Ulv = Uhalf[j  ];
02576     Urv = Uhalf[j+1];
02577 
02578     qa = (Urv-U[j])*(U[j]-Ulv);
02579     qb = (U[j-1]-U[j])*(U[j]-U[j+1]);
02580     if (qa <= 0.0 && qb <= 0.0) {
02581       qc = 6.0*(U[j] - 0.5*(Ulv+Urv));
02582       d2U  = -2.0*qc;
02583       d2Uc = (U[j-1] - 2.0*U[j  ] + U[j+1]);
02584       d2Ul = (U[j-2] - 2.0*U[j-1] + U[j  ]);
02585       d2Ur = (U[j  ] - 2.0*U[j+1] + U[j+2]);
02586       d2Ulim = 0.0;
02587       lim_slope = MIN(fabs(d2Ul),fabs(d2Ur));
02588       lim_slope = MIN(fabs(d2Uc),lim_slope);
02589       if (d2Uc > 0.0 && d2Ul > 0.0 && d2Ur > 0.0 && d2U > 0.0) {
02590         d2Ulim = SIGN(d2U)*MIN(1.25*lim_slope,fabs(d2U));
02591       }
02592       if (d2Uc < 0.0 && d2Ul < 0.0 && d2Ur < 0.0 && d2U < 0.0) {
02593         d2Ulim = SIGN(d2U)*MIN(1.25*lim_slope,fabs(d2U));
02594       }
02595       if (d2U == 0.0) {
02596         Ulv = U[j];
02597         Urv = U[j];
02598       } else {
02599         Ulv = U[j] + (Ulv - U[j])*d2Ulim/d2U;
02600         Urv = U[j] + (Urv - U[j])*d2Ulim/d2U;
02601       }
02602     }
02603 
02604     qa = (Urv-U[j])*(U[j]-Ulv);
02605     qb = Urv-Ulv;
02606     qc = 6.0*(U[j] - 0.5*(Ulv+Urv));
02607     if (qa <= 0.0) {
02608       Ulv = U[j];
02609       Urv = U[j];
02610     } else if ((qb*qc) > (qb*qb)) {
02611       Ulv = 3.0*U[j] - 2.0*Urv;
02612     } else if ((qb*qc) < -(qb*qb)) {
02613       Urv = 3.0*U[j] - 2.0*Ulv;
02614     }
02615 
02616     dU = Urv - Ulv;
02617     U6 = 6.0*(U[j] - 0.5*(Ulv + Urv));
02618 
02619     if (eps > 0.0) { /* eps always > 0 for inner i boundary */
02620       qx = TWO_3RDS*eps;
02621       Flux[j+1] = eps*(Urv - 0.75*qx*(dU - (1.0 - qx)*U6));
02622 
02623     } else {         /* eps always < 0 for outer i boundary */
02624       qx = -TWO_3RDS*eps;
02625       Flux[j  ] = eps*(Ulv + 0.75*qx*(dU + (1.0 - qx)*U6));
02626     }
02627   }
02628 
02629   return;
02630 }
02631 
02632 #endif /* THIRD_ORDER */
02633 
02634 #endif /* SHEARING_BOX || Cylindrical + Fargo */

Generated on Mon Sep 27 2010 23:03:07 for Athena by  doxygen 1.7.1